summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichelle McDaniel <adiaaida@gmail.com>2016-08-09 13:15:05 -0700
committerMichelle McDaniel <adiaaida@gmail.com>2016-08-11 09:53:41 -0700
commit36a2b906c008cd3693a9ab5aef7b4402addd6c74 (patch)
tree27333c6f26304490169825ae1c17484534246dc6
parentab7d6a8df73d3d89210a778338feaa9fedf4146a (diff)
downloadcoreclr-36a2b906c008cd3693a9ab5aef7b4402addd6c74.tar.gz
coreclr-36a2b906c008cd3693a9ab5aef7b4402addd6c74.tar.bz2
coreclr-36a2b906c008cd3693a9ab5aef7b4402addd6c74.zip
Reformat jit sources with clang-tidy and format
This change is the result of running clang-tidy and clang-format on jit sources.
-rw-r--r--src/jit/.clang-format4
-rwxr-xr-xsrc/jit/_typeinfo.h412
-rw-r--r--src/jit/alloc.cpp97
-rw-r--r--src/jit/alloc.h8
-rw-r--r--src/jit/arraystack.h51
-rw-r--r--src/jit/assertionprop.cpp2304
-rw-r--r--src/jit/bitset.cpp85
-rw-r--r--src/jit/bitset.h89
-rw-r--r--src/jit/bitsetasshortlong.h422
-rw-r--r--src/jit/bitsetasuint64.h53
-rw-r--r--src/jit/bitsetasuint64inclass.h137
-rw-r--r--src/jit/bitsetops.h1
-rw-r--r--src/jit/bitvec.h22
-rw-r--r--src/jit/block.cpp358
-rw-r--r--src/jit/block.h872
-rw-r--r--src/jit/blockset.h34
-rwxr-xr-xsrc/jit/codegen.h1167
-rw-r--r--src/jit/codegenarm.cpp1078
-rw-r--r--src/jit/codegenarm64.cpp5788
-rw-r--r--src/jit/codegenclassic.h1020
-rwxr-xr-xsrc/jit/codegencommon.cpp4087
-rw-r--r--src/jit/codegeninterface.h378
-rw-r--r--src/jit/codegenlegacy.cpp14194
-rw-r--r--src/jit/codegenlinear.h239
-rwxr-xr-xsrc/jit/codegenxarch.cpp3677
-rw-r--r--src/jit/compiler.cpp5892
-rw-r--r--src/jit/compiler.h8768
-rw-r--r--src/jit/compiler.hpp2219
-rw-r--r--src/jit/compilerbitsettraits.h38
-rw-r--r--src/jit/compilerbitsettraits.hpp43
-rw-r--r--src/jit/compmemkind.h2
-rw-r--r--src/jit/copyprop.cpp41
-rw-r--r--src/jit/dataflow.h4
-rw-r--r--src/jit/decomposelongs.cpp518
-rw-r--r--src/jit/decomposelongs.h7
-rw-r--r--src/jit/delayload.cpp1
-rw-r--r--src/jit/disasm.cpp1205
-rw-r--r--src/jit/disasm.h199
-rw-r--r--src/jit/earlyprop.cpp131
-rwxr-xr-xsrc/jit/ee_il_dll.cpp776
-rw-r--r--src/jit/ee_il_dll.hpp189
-rw-r--r--src/jit/eeinterface.cpp62
-rw-r--r--src/jit/emit.cpp3314
-rw-r--r--src/jit/emit.h2242
-rw-r--r--src/jit/emitarm.cpp7735
-rw-r--r--src/jit/emitarm.h701
-rw-r--r--src/jit/emitarm64.cpp10996
-rw-r--r--src/jit/emitarm64.h1405
-rw-r--r--src/jit/emitdef.h4
-rw-r--r--src/jit/emitfmts.h2
-rw-r--r--src/jit/emitfmtsarm64.h283
-rw-r--r--src/jit/emitinl.h228
-rw-r--r--src/jit/emitpub.h225
-rw-r--r--src/jit/emitxarch.cpp8022
-rw-r--r--src/jit/emitxarch.h798
-rw-r--r--src/jit/error.cpp148
-rw-r--r--src/jit/error.h234
-rw-r--r--src/jit/flowgraph.cpp7520
-rw-r--r--src/jit/fp.h56
-rw-r--r--src/jit/gcencode.cpp2130
-rw-r--r--src/jit/gcinfo.cpp336
-rw-r--r--src/jit/gentree.cpp12943
-rw-r--r--src/jit/gentree.h3665
-rw-r--r--src/jit/gschecks.cpp390
-rw-r--r--src/jit/hashbv.cpp960
-rw-r--r--src/jit/hashbv.h267
-rw-r--r--src/jit/host.h25
-rw-r--r--src/jit/hostallocator.h4
-rw-r--r--src/jit/importer.cpp13683
-rw-r--r--src/jit/inline.cpp396
-rw-r--r--src/jit/inline.h254
-rw-r--r--src/jit/inlinepolicy.cpp706
-rw-r--r--src/jit/inlinepolicy.h136
-rw-r--r--src/jit/instr.cpp2515
-rw-r--r--src/jit/instr.h4
-rw-r--r--src/jit/instrs.h2
-rw-r--r--src/jit/instrsarm.h36
-rw-r--r--src/jit/instrsarm64.h30
-rw-r--r--src/jit/jit.h700
-rw-r--r--src/jit/jitconfig.cpp315
-rw-r--r--src/jit/jitconfig.h53
-rw-r--r--src/jit/jitconfigvalues.h206
-rw-r--r--src/jit/jiteh.cpp1046
-rw-r--r--src/jit/jiteh.h113
-rw-r--r--src/jit/jitgcinfo.h359
-rw-r--r--src/jit/jitpch.h1
-rw-r--r--src/jit/jitstd.h2
-rw-r--r--src/jit/jitstd/allocator.h2
-rw-r--r--src/jit/jitstd/list.h24
-rw-r--r--src/jit/jitstd/vector.h2
-rw-r--r--src/jit/jittelemetry.cpp107
-rw-r--r--src/jit/jittelemetry.h4
-rw-r--r--src/jit/lclvars.cpp2708
-rw-r--r--src/jit/liveness.cpp982
-rw-r--r--src/jit/loopcloning.cpp166
-rw-r--r--src/jit/loopcloning.h277
-rw-r--r--src/jit/loopcloningopts.h2
-rw-r--r--src/jit/lower.cpp1341
-rw-r--r--src/jit/lower.h148
-rw-r--r--src/jit/lowerarm.cpp15
-rw-r--r--src/jit/lowerarm64.cpp1266
-rw-r--r--src/jit/lowerxarch.cpp2275
-rw-r--r--src/jit/lsra.cpp4594
-rw-r--r--src/jit/lsra.h1057
-rwxr-xr-xsrc/jit/morph.cpp10505
-rw-r--r--src/jit/nodeinfo.h102
-rw-r--r--src/jit/objectalloc.cpp19
-rw-r--r--src/jit/objectalloc.h22
-rw-r--r--src/jit/opcode.h10
-rw-r--r--src/jit/optcse.cpp1261
-rw-r--r--src/jit/optimizer.cpp4100
-rw-r--r--src/jit/phase.h14
-rw-r--r--src/jit/rangecheck.cpp364
-rw-r--r--src/jit/rangecheck.h238
-rw-r--r--src/jit/rationalize.cpp723
-rw-r--r--src/jit/rationalize.h56
-rw-r--r--src/jit/regalloc.cpp5677
-rw-r--r--src/jit/regalloc.h85
-rw-r--r--src/jit/register_arg_convention.cpp37
-rw-r--r--src/jit/register_arg_convention.h31
-rw-r--r--src/jit/registerfp.cpp945
-rw-r--r--src/jit/registerfp.h21
-rw-r--r--src/jit/regpair.h624
-rw-r--r--src/jit/regset.cpp1384
-rw-r--r--src/jit/regset.h447
-rw-r--r--src/jit/scopeinfo.cpp568
-rw-r--r--src/jit/sharedfloat.cpp263
-rw-r--r--src/jit/simd.cpp944
-rw-r--r--src/jit/simd.h35
-rw-r--r--src/jit/simdcodegenxarch.cpp705
-rw-r--r--src/jit/sm.cpp116
-rw-r--r--src/jit/sm.h62
-rw-r--r--src/jit/smallhash.h108
-rw-r--r--src/jit/smcommon.cpp225
-rw-r--r--src/jit/smcommon.h33
-rw-r--r--src/jit/smdata.cpp11
-rw-r--r--src/jit/smopenum.h16
-rw-r--r--src/jit/smweights.cpp514
-rw-r--r--src/jit/ssabuilder.cpp399
-rw-r--r--src/jit/ssabuilder.h22
-rw-r--r--src/jit/ssaconfig.h31
-rw-r--r--src/jit/ssarenamestate.cpp32
-rw-r--r--src/jit/ssarenamestate.h35
-rw-r--r--src/jit/stackfp.cpp2716
-rw-r--r--src/jit/target.h413
-rw-r--r--src/jit/targetamd64.cpp4
-rw-r--r--src/jit/targetarm.cpp4
-rw-r--r--src/jit/targetarm64.cpp4
-rw-r--r--src/jit/targetx86.cpp4
-rw-r--r--src/jit/tinyarray.h42
-rw-r--r--src/jit/titypes.h17
-rw-r--r--src/jit/typeinfo.cpp151
-rw-r--r--src/jit/typelist.h28
-rw-r--r--src/jit/unwind.cpp54
-rw-r--r--src/jit/unwind.h406
-rw-r--r--src/jit/unwindamd64.cpp488
-rw-r--r--src/jit/unwindarm.cpp674
-rw-r--r--src/jit/unwindarm64.cpp241
-rw-r--r--src/jit/utils.cpp1180
-rw-r--r--src/jit/utils.h199
-rw-r--r--src/jit/valuenum.cpp4224
-rw-r--r--src/jit/valuenum.h602
-rw-r--r--src/jit/valuenumfuncs.h5
-rw-r--r--src/jit/valuenumtype.h58
-rw-r--r--src/jit/varset.h105
-rw-r--r--src/jit/vartype.h196
-rw-r--r--src/jit/x86_instrs.h5
167 files changed, 100746 insertions, 98690 deletions
diff --git a/src/jit/.clang-format b/src/jit/.clang-format
index 756dbff197..1e3930f737 100644
--- a/src/jit/.clang-format
+++ b/src/jit/.clang-format
@@ -33,10 +33,10 @@ BraceWrapping:
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Allman
BreakBeforeTernaryOperators: true
-BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
CommentPragmas: '^ IWYU pragma:'
-ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
diff --git a/src/jit/_typeinfo.h b/src/jit/_typeinfo.h
index c560ec7ac2..08273adc8d 100755
--- a/src/jit/_typeinfo.h
+++ b/src/jit/_typeinfo.h
@@ -26,8 +26,8 @@ enum ti_types
{
#define DEF_TI(ti, nm) ti,
#include "titypes.h"
-#undef DEF_TI
- TI_ONLY_ENUM = TI_METHOD, //Enum values above this are completely described by the enumeration
+#undef DEF_TI
+ TI_ONLY_ENUM = TI_METHOD, // Enum values above this are completely described by the enumeration
TI_COUNT
};
@@ -44,12 +44,11 @@ enum ti_types
namespace
{
#endif // _MSC_VER
- SELECTANY const char* g_ti_type_names_map[] =
- {
+SELECTANY const char* g_ti_type_names_map[] = {
#define DEF_TI(ti, nm) nm,
#include "titypes.h"
#undef DEF_TI
- };
+};
#ifdef _MSC_VER
}
#endif // _MSC_VER
@@ -60,12 +59,11 @@ namespace
namespace
{
#endif // _MSC_VER
- SELECTANY const ti_types g_jit_types_map[] =
- {
-#define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) verType,
+SELECTANY const ti_types g_jit_types_map[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) verType,
#include "typelist.h"
-#undef DEF_TP
- };
+#undef DEF_TP
+};
#ifdef _MSC_VER
}
#endif // _MSC_VER
@@ -80,7 +78,7 @@ inline const char* tiType2Str(ti_types type)
#endif // DEBUG
// typeInfo does not care about distinction between signed/unsigned
-// This routine converts all unsigned types to signed ones
+// This routine converts all unsigned types to signed ones
inline ti_types varType2tiType(var_types type)
{
assert(g_jit_types_map[TYP_BYTE] == TI_BYTE);
@@ -88,7 +86,7 @@ inline ti_types varType2tiType(var_types type)
assert(g_jit_types_map[TYP_UINT] == TI_INT);
assert(g_jit_types_map[TYP_FLOAT] == TI_FLOAT);
assert(g_jit_types_map[TYP_BYREF] == TI_ERROR);
- assert(g_jit_types_map[type] != TI_ERROR);
+ assert(g_jit_types_map[type] != TI_ERROR);
return g_jit_types_map[type];
}
@@ -96,33 +94,33 @@ inline ti_types varType2tiType(var_types type)
namespace
{
#endif // _MSC_VER
- SELECTANY const ti_types g_ti_types_map[CORINFO_TYPE_COUNT] =
- { // see the definition of enum CorInfoType in file inc/corinfo.h
- TI_ERROR, // CORINFO_TYPE_UNDEF = 0x0,
- TI_ERROR, // CORINFO_TYPE_VOID = 0x1,
- TI_BYTE, // CORINFO_TYPE_BOOL = 0x2,
- TI_SHORT, // CORINFO_TYPE_CHAR = 0x3,
- TI_BYTE, // CORINFO_TYPE_BYTE = 0x4,
- TI_BYTE, // CORINFO_TYPE_UBYTE = 0x5,
- TI_SHORT, // CORINFO_TYPE_SHORT = 0x6,
- TI_SHORT, // CORINFO_TYPE_USHORT = 0x7,
- TI_INT, // CORINFO_TYPE_INT = 0x8,
- TI_INT, // CORINFO_TYPE_UINT = 0x9,
- TI_LONG, // CORINFO_TYPE_LONG = 0xa,
- TI_LONG, // CORINFO_TYPE_ULONG = 0xb,
- TI_I_IMPL, // CORINFO_TYPE_NATIVEINT = 0xc,
- TI_I_IMPL, // CORINFO_TYPE_NATIVEUINT = 0xd,
- TI_FLOAT, // CORINFO_TYPE_FLOAT = 0xe,
- TI_DOUBLE, // CORINFO_TYPE_DOUBLE = 0xf,
- TI_REF, // CORINFO_TYPE_STRING = 0x10,
- TI_ERROR, // CORINFO_TYPE_PTR = 0x11,
- TI_ERROR, // CORINFO_TYPE_BYREF = 0x12,
- TI_STRUCT, // CORINFO_TYPE_VALUECLASS = 0x13,
- TI_REF, // CORINFO_TYPE_CLASS = 0x14,
- TI_STRUCT, // CORINFO_TYPE_REFANY = 0x15,
- TI_REF, // CORINFO_TYPE_VAR = 0x16,
- };
-#ifdef _MSC_VER
+SELECTANY const ti_types g_ti_types_map[CORINFO_TYPE_COUNT] = {
+ // see the definition of enum CorInfoType in file inc/corinfo.h
+ TI_ERROR, // CORINFO_TYPE_UNDEF = 0x0,
+ TI_ERROR, // CORINFO_TYPE_VOID = 0x1,
+ TI_BYTE, // CORINFO_TYPE_BOOL = 0x2,
+ TI_SHORT, // CORINFO_TYPE_CHAR = 0x3,
+ TI_BYTE, // CORINFO_TYPE_BYTE = 0x4,
+ TI_BYTE, // CORINFO_TYPE_UBYTE = 0x5,
+ TI_SHORT, // CORINFO_TYPE_SHORT = 0x6,
+ TI_SHORT, // CORINFO_TYPE_USHORT = 0x7,
+ TI_INT, // CORINFO_TYPE_INT = 0x8,
+ TI_INT, // CORINFO_TYPE_UINT = 0x9,
+ TI_LONG, // CORINFO_TYPE_LONG = 0xa,
+ TI_LONG, // CORINFO_TYPE_ULONG = 0xb,
+ TI_I_IMPL, // CORINFO_TYPE_NATIVEINT = 0xc,
+ TI_I_IMPL, // CORINFO_TYPE_NATIVEUINT = 0xd,
+ TI_FLOAT, // CORINFO_TYPE_FLOAT = 0xe,
+ TI_DOUBLE, // CORINFO_TYPE_DOUBLE = 0xf,
+ TI_REF, // CORINFO_TYPE_STRING = 0x10,
+ TI_ERROR, // CORINFO_TYPE_PTR = 0x11,
+ TI_ERROR, // CORINFO_TYPE_BYREF = 0x12,
+ TI_STRUCT, // CORINFO_TYPE_VALUECLASS = 0x13,
+ TI_REF, // CORINFO_TYPE_CLASS = 0x14,
+ TI_STRUCT, // CORINFO_TYPE_REFANY = 0x15,
+ TI_REF, // CORINFO_TYPE_VAR = 0x16,
+};
+#ifdef _MSC_VER
}
#endif // _MSC_VER
@@ -132,11 +130,11 @@ inline ti_types JITtype2tiType(CorInfoType type)
{
// spot check to make certain enumerations have not changed
- assert(g_ti_types_map[CORINFO_TYPE_CLASS] == TI_REF);
- assert(g_ti_types_map[CORINFO_TYPE_BYREF] == TI_ERROR);
- assert(g_ti_types_map[CORINFO_TYPE_DOUBLE] == TI_DOUBLE);
+ assert(g_ti_types_map[CORINFO_TYPE_CLASS] == TI_REF);
+ assert(g_ti_types_map[CORINFO_TYPE_BYREF] == TI_ERROR);
+ assert(g_ti_types_map[CORINFO_TYPE_DOUBLE] == TI_DOUBLE);
assert(g_ti_types_map[CORINFO_TYPE_VALUECLASS] == TI_STRUCT);
- assert(g_ti_types_map[CORINFO_TYPE_STRING] == TI_REF);
+ assert(g_ti_types_map[CORINFO_TYPE_STRING] == TI_REF);
type = CorInfoType(type & CORINFO_TYPE_MASK); // strip off modifiers
@@ -148,7 +146,7 @@ inline ti_types JITtype2tiType(CorInfoType type)
/*****************************************************************************
* Declares the typeInfo class, which represents the type of an entity on the
- * stack, in a local variable or an argument.
+ * stack, in a local variable or an argument.
*
* Flags: LLLLLLLLLLLLLLLLffffffffffTTTTTT
*
@@ -164,12 +162,12 @@ inline ti_types JITtype2tiType(CorInfoType type)
* (including arrays and null objref)
* TI_STRUCT - VALUE type, use m_cls for the actual type
*
- * NOTE carefully that BYREF info is not stored here. You will never see a
- * TI_BYREF in this component. For example, the type component
+ * NOTE carefully that BYREF info is not stored here. You will never see a
+ * TI_BYREF in this component. For example, the type component
* of a "byref TI_INT" is TI_FLAG_BYREF | TI_INT.
*
* NOTE carefully that Generic Type Variable info is
- * only stored here in part. Values of type "T" (e.g "!0" in ILASM syntax),
+ * only stored here in part. Values of type "T" (e.g "!0" in ILASM syntax),
* i.e. some generic variable type, appear only when verifying generic
* code. They come in two flavours: unboxed and boxed. Unboxed
* is the norm, e.g. a local, field or argument of type T. Boxed
@@ -180,7 +178,7 @@ inline ti_types JITtype2tiType(CorInfoType type)
*
* (TI_REF, <type-variable-type-handle>) == boxed type variable
*
- * (TI_REF, <type-variable-type-handle>)
+ * (TI_REF, <type-variable-type-handle>)
* + TI_FLAG_GENERIC_TYPE_VAR == unboxed type variable
*
* Using TI_REF for these may seem odd but using TI_STRUCT means the
@@ -192,53 +190,52 @@ inline ti_types JITtype2tiType(CorInfoType type)
*
*/
- // TI_COUNT is less than or equal to TI_FLAG_DATA_MASK
-
-#define TI_FLAG_DATA_BITS 6
-#define TI_FLAG_DATA_MASK ((1 << TI_FLAG_DATA_BITS)-1)
+// TI_COUNT is less than or equal to TI_FLAG_DATA_MASK
- // Flag indicating this item is uninitialized
- // Note that if UNINIT and BYREF are both set,
- // it means byref (uninit x) - i.e. we are pointing to an uninit <something>
+#define TI_FLAG_DATA_BITS 6
+#define TI_FLAG_DATA_MASK ((1 << TI_FLAG_DATA_BITS) - 1)
-#define TI_FLAG_UNINIT_OBJREF 0x00000040
+// Flag indicating this item is uninitialized
+// Note that if UNINIT and BYREF are both set,
+// it means byref (uninit x) - i.e. we are pointing to an uninit <something>
- // Flag indicating this item is a byref <something>
+#define TI_FLAG_UNINIT_OBJREF 0x00000040
-#define TI_FLAG_BYREF 0x00000080
+// Flag indicating this item is a byref <something>
- // This item is a byref generated using the readonly. prefix
- // to a ldelema or Address function on an array type. The
- // runtime type check is ignored in these cases, but the
- // resulting byref can only be used in order to perform a
- // constraint call.
+#define TI_FLAG_BYREF 0x00000080
-#define TI_FLAG_BYREF_READONLY 0x00000100
+// This item is a byref generated using the readonly. prefix
+// to a ldelema or Address function on an array type. The
+// runtime type check is ignored in these cases, but the
+// resulting byref can only be used in order to perform a
+// constraint call.
- // This item is the MSIL 'I' type which is pointer-sized
- // (different size depending on platform) but which on ALL platforms
- // is implicitly convertible with a 32-bit int but not with a 64-bit one.
+#define TI_FLAG_BYREF_READONLY 0x00000100
- // Note: this flag is currently used only in 64-bit systems to annotate
- // native int types. In 32 bits, since you can transparently coalesce int32
- // and native-int and both are the same size, JIT32 had no need to model
- // native-ints as a separate entity. For 64-bit though, since they have
- // different size, it's important to discern between a long and a native int
- // since conversions between them are not verifiable.
-#define TI_FLAG_NATIVE_INT 0x00000200
+// This item is the MSIL 'I' type which is pointer-sized
+// (different size depending on platform) but which on ALL platforms
+// is implicitly convertible with a 32-bit int but not with a 64-bit one.
- // This item contains the 'this' pointer (used for tracking)
+// Note: this flag is currently used only in 64-bit systems to annotate
+// native int types. In 32 bits, since you can transparently coalesce int32
+// and native-int and both are the same size, JIT32 had no need to model
+// native-ints as a separate entity. For 64-bit though, since they have
+// different size, it's important to discern between a long and a native int
+// since conversions between them are not verifiable.
+#define TI_FLAG_NATIVE_INT 0x00000200
-#define TI_FLAG_THIS_PTR 0x00001000
+// This item contains the 'this' pointer (used for tracking)
+#define TI_FLAG_THIS_PTR 0x00001000
-// This item is a byref to something which has a permanent home
-// (e.g. a static field, or instance field of an object in GC heap, as
-// opposed to the stack or a local variable). TI_FLAG_BYREF must also be
+// This item is a byref to something which has a permanent home
+// (e.g. a static field, or instance field of an object in GC heap, as
+// opposed to the stack or a local variable). TI_FLAG_BYREF must also be
// set. This information is useful for tail calls and return byrefs.
//
// Instructions that generate a permanent home byref:
-//
+//
// ldelema
// ldflda of a ref object or another permanent home byref
// array element address Get() helper
@@ -248,28 +245,25 @@ inline ti_types JITtype2tiType(CorInfoType type)
#define TI_FLAG_BYREF_PERMANENT_HOME 0x00002000
-
// This is for use when verifying generic code.
// This indicates that the type handle is really an unboxed
// generic type variable (e.g. the result of loading an argument
// of type T in a class List<T>). Without this flag
// the same type handle indicates a boxed generic value,
-// e.g. the result of a "box T" instruction.
-#define TI_FLAG_GENERIC_TYPE_VAR 0x00004000
+// e.g. the result of a "box T" instruction.
+#define TI_FLAG_GENERIC_TYPE_VAR 0x00004000
- // Number of bits local var # is shifted
+// Number of bits local var # is shifted
-#define TI_FLAG_LOCAL_VAR_SHIFT 16
-#define TI_FLAG_LOCAL_VAR_MASK 0xFFFF0000
+#define TI_FLAG_LOCAL_VAR_SHIFT 16
+#define TI_FLAG_LOCAL_VAR_MASK 0xFFFF0000
- // Field info uses the same space as the local info
+// Field info uses the same space as the local info
-#define TI_FLAG_FIELD_SHIFT TI_FLAG_LOCAL_VAR_SHIFT
-#define TI_FLAG_FIELD_MASK TI_FLAG_LOCAL_VAR_MASK
+#define TI_FLAG_FIELD_SHIFT TI_FLAG_LOCAL_VAR_SHIFT
+#define TI_FLAG_FIELD_MASK TI_FLAG_LOCAL_VAR_MASK
-#define TI_ALL_BYREF_FLAGS (TI_FLAG_BYREF| \
- TI_FLAG_BYREF_READONLY | \
- TI_FLAG_BYREF_PERMANENT_HOME)
+#define TI_ALL_BYREF_FLAGS (TI_FLAG_BYREF | TI_FLAG_BYREF_READONLY | TI_FLAG_BYREF_PERMANENT_HOME)
/*****************************************************************************
* A typeInfo can be one of several types:
@@ -278,11 +272,11 @@ inline ti_types JITtype2tiType(CorInfoType type)
* - An array (m_cls describes the array type)
* - A byref (byref flag set, otherwise the same as the above),
* - A Function Pointer (m_method)
- * - A byref local variable (byref and byref local flags set), can be
+ * - A byref local variable (byref and byref local flags set), can be
* uninitialized
*
- * The reason that there can be 2 types of byrefs (general byrefs, and byref
- * locals) is that byref locals initially point to uninitialized items.
+ * The reason that there can be 2 types of byrefs (general byrefs, and byref
+ * locals) is that byref locals initially point to uninitialized items.
* Therefore these byrefs must be tracked specialy.
*/
@@ -291,31 +285,33 @@ class typeInfo
private:
union {
- struct {
- ti_types type : 6;
- unsigned uninitobj : 1; // used
- unsigned byref : 1; // used
- unsigned byref_readonly : 1; // used
- unsigned nativeInt : 1; // used
- unsigned : 2; // unused
- unsigned thisPtr : 1; // used
- unsigned thisPermHome : 1; // used
+ struct
+ {
+ ti_types type : 6;
+ unsigned uninitobj : 1; // used
+ unsigned byref : 1; // used
+ unsigned byref_readonly : 1; // used
+ unsigned nativeInt : 1; // used
+ unsigned : 2; // unused
+ unsigned thisPtr : 1; // used
+ unsigned thisPermHome : 1; // used
unsigned generic_type_var : 1; // used
- } m_bits;
-
- DWORD m_flags;
+ } m_bits;
+
+ DWORD m_flags;
};
-
- union {
- CORINFO_CLASS_HANDLE m_cls;
- // Valid only for type TI_METHOD
+
+ union {
+ CORINFO_CLASS_HANDLE m_cls;
+ // Valid only for type TI_METHOD
CORINFO_METHOD_HANDLE m_method;
};
- template<typename T>
+ template <typename T>
static bool isInvalidHandle(const T handle)
{
- static_assert(std::is_same<T, CORINFO_CLASS_HANDLE>::value || std::is_same<T, CORINFO_METHOD_HANDLE>::value, "");
+ static_assert(std::is_same<T, CORINFO_CLASS_HANDLE>::value || std::is_same<T, CORINFO_METHOD_HANDLE>::value,
+ "");
#ifdef _HOST_64BIT_
return handle == reinterpret_cast<T>(0xcccccccccccccccc);
#else
@@ -324,49 +320,51 @@ private:
}
public:
- typeInfo():m_flags(TI_ERROR)
+ typeInfo() : m_flags(TI_ERROR)
{
m_cls = NO_CLASS_HANDLE;
}
- typeInfo(ti_types tiType)
- {
+ typeInfo(ti_types tiType)
+ {
assert((tiType >= TI_BYTE) && (tiType <= TI_NULL));
assert(tiType <= TI_FLAG_DATA_MASK);
- m_flags = (DWORD) tiType;
- m_cls = NO_CLASS_HANDLE;
+ m_flags = (DWORD)tiType;
+ m_cls = NO_CLASS_HANDLE;
}
- typeInfo(var_types varType)
- {
- m_flags = (DWORD) varType2tiType(varType);
- m_cls = NO_CLASS_HANDLE;
+ typeInfo(var_types varType)
+ {
+ m_flags = (DWORD)varType2tiType(varType);
+ m_cls = NO_CLASS_HANDLE;
}
static typeInfo nativeInt()
{
typeInfo result = typeInfo(TI_I_IMPL);
-#ifdef _TARGET_64BIT_
+#ifdef _TARGET_64BIT_
result.m_flags |= TI_FLAG_NATIVE_INT;
#endif
return result;
}
- typeInfo(ti_types tiType, CORINFO_CLASS_HANDLE cls, bool typeVar = false)
+ typeInfo(ti_types tiType, CORINFO_CLASS_HANDLE cls, bool typeVar = false)
{
assert(tiType == TI_STRUCT || tiType == TI_REF);
- assert(cls != 0 && !isInvalidHandle(cls));
+ assert(cls != nullptr && !isInvalidHandle(cls));
m_flags = tiType;
- if (typeVar)
+ if (typeVar)
+ {
m_flags |= TI_FLAG_GENERIC_TYPE_VAR;
- m_cls = cls;
+ }
+ m_cls = cls;
}
typeInfo(CORINFO_METHOD_HANDLE method)
{
- assert(method != 0 && !isInvalidHandle(method));
- m_flags = TI_METHOD;
+ assert(method != nullptr && !isInvalidHandle(method));
+ m_flags = TI_METHOD;
m_method = method;
}
@@ -376,7 +374,7 @@ public:
#endif // VERBOSE_VERIFY
#endif // DEBUG
-public:
+public:
// Note that we specifically ignore the permanent byref here. The rationale is that
// the type system doesn't know about this (it's jit only), ie, signatures don't specify if
// a byref is safe, so they are fully equivalent for the jit, except for the RET instruction
@@ -384,8 +382,9 @@ public:
// the bit
static bool AreEquivalent(const typeInfo& li, const typeInfo& ti)
{
- DWORD allFlags = TI_FLAG_DATA_MASK|TI_FLAG_BYREF|TI_FLAG_BYREF_READONLY|TI_FLAG_GENERIC_TYPE_VAR|TI_FLAG_UNINIT_OBJREF;
-#ifdef _TARGET_64BIT_
+ DWORD allFlags = TI_FLAG_DATA_MASK | TI_FLAG_BYREF | TI_FLAG_BYREF_READONLY | TI_FLAG_GENERIC_TYPE_VAR |
+ TI_FLAG_UNINIT_OBJREF;
+#ifdef _TARGET_64BIT_
allFlags |= TI_FLAG_NATIVE_INT;
#endif // _TARGET_64BIT_
@@ -395,11 +394,16 @@ public:
}
unsigned type = li.m_flags & TI_FLAG_DATA_MASK;
- assert(TI_ERROR < TI_ONLY_ENUM); // TI_ERROR looks like it needs more than enum. This optimises the success case a bit
- if (type > TI_ONLY_ENUM)
+ assert(TI_ERROR <
+ TI_ONLY_ENUM); // TI_ERROR looks like it needs more than enum. This optimises the success case a bit
+ if (type > TI_ONLY_ENUM)
+ {
return true;
+ }
if (type == TI_ERROR)
- return false; // TI_ERROR != TI_ERROR
+ {
+ return false; // TI_ERROR != TI_ERROR
+ }
assert(li.m_cls != NO_CLASS_HANDLE && ti.m_cls != NO_CLASS_HANDLE);
return li.m_cls == ti.m_cls;
}
@@ -415,29 +419,29 @@ public:
// native int flag set.
static bool AreEquivalentModuloNativeInt(const typeInfo& verTi, const typeInfo& nodeTi)
{
- if (AreEquivalent(verTi, nodeTi)) return true;
+ if (AreEquivalent(verTi, nodeTi))
+ {
+ return true;
+ }
#ifdef _TARGET_64BIT_
- return (nodeTi.IsType(TI_I_IMPL) && tiCompatibleWith(0, verTi, typeInfo::nativeInt(), true)) ||
- (verTi.IsType(TI_I_IMPL) && tiCompatibleWith(0, typeInfo::nativeInt(), nodeTi, true));
+ return (nodeTi.IsType(TI_I_IMPL) && tiCompatibleWith(nullptr, verTi, typeInfo::nativeInt(), true)) ||
+ (verTi.IsType(TI_I_IMPL) && tiCompatibleWith(nullptr, typeInfo::nativeInt(), nodeTi, true));
#else // _TARGET_64BIT_
return false;
#endif // !_TARGET_64BIT_
-
}
#endif // DEBUG
+ static BOOL tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed);
+ static BOOL tiCompatibleWith(COMP_HANDLE CompHnd,
+ const typeInfo& child,
+ const typeInfo& parent,
+ bool normalisedForStack);
- static BOOL tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo *pDest,
- const typeInfo *pSrc,
- bool* changed) ;
- static BOOL tiCompatibleWith (COMP_HANDLE CompHnd, const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack) ;
-
- static BOOL tiMergeCompatibleWith (COMP_HANDLE CompHnd, const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack) ;
-
+ static BOOL tiMergeCompatibleWith(COMP_HANDLE CompHnd,
+ const typeInfo& child,
+ const typeInfo& parent,
+ bool normalisedForStack);
/////////////////////////////////////////////////////////////////////////
// Operations
@@ -459,14 +463,14 @@ public:
assert(IsByRef());
m_flags |= TI_FLAG_BYREF_PERMANENT_HOME;
}
-
+
void SetIsReadonlyByRef()
{
assert(IsByRef());
m_flags |= TI_FLAG_BYREF_READONLY;
}
- // Set that this item is uninitialized.
+ // Set that this item is uninitialized.
void SetUninitialisedObjRef()
{
assert((IsObjRef() && IsThisPtr()));
@@ -487,7 +491,8 @@ public:
typeInfo& DereferenceByRef()
{
- if (!IsByRef()) {
+ if (!IsByRef())
+ {
m_flags = TI_ERROR;
INDEBUG(m_cls = NO_CLASS_HANDLE);
}
@@ -511,16 +516,16 @@ public:
{
switch (GetType())
{
- case TI_BYTE:
- case TI_SHORT:
- m_flags = TI_INT;
- break;
-
- case TI_FLOAT:
- m_flags = TI_DOUBLE;
- break;
- default:
- break;
+ case TI_BYTE:
+ case TI_SHORT:
+ m_flags = TI_INT;
+ break;
+
+ case TI_FLOAT:
+ m_flags = TI_DOUBLE;
+ break;
+ default:
+ break;
}
return (*this);
}
@@ -529,26 +534,26 @@ public:
// Getters
/////////////////////////////////////////////////////////////////////////
- CORINFO_CLASS_HANDLE GetClassHandle() const
+ CORINFO_CLASS_HANDLE GetClassHandle() const
{
return m_cls;
}
- CORINFO_CLASS_HANDLE GetClassHandleForValueClass() const
+ CORINFO_CLASS_HANDLE GetClassHandleForValueClass() const
{
assert(IsType(TI_STRUCT));
assert(m_cls != NO_CLASS_HANDLE);
return m_cls;
}
- CORINFO_CLASS_HANDLE GetClassHandleForObjRef() const
+ CORINFO_CLASS_HANDLE GetClassHandleForObjRef() const
{
assert(IsType(TI_REF));
assert(m_cls != NO_CLASS_HANDLE);
return m_cls;
}
- CORINFO_METHOD_HANDLE GetMethod() const
+ CORINFO_METHOD_HANDLE GetMethod() const
{
assert(GetType() == TI_METHOD);
return m_method;
@@ -556,30 +561,33 @@ public:
// If FEATURE_CORECLR is enabled, GetMethod can be called
// before the pointer type is known to be a method pointer type.
- CORINFO_METHOD_HANDLE GetMethod2() const
+ CORINFO_METHOD_HANDLE GetMethod2() const
{
return m_method;
- }
-
+ }
+
// Get this item's type
// If primitive, returns the primitive type (TI_*)
// If not primitive, returns:
- // - TI_ERROR if a byref anything
+ // - TI_ERROR if a byref anything
// - TI_REF if a class or array or null or a generic type variable
// - TI_STRUCT if a value class
ti_types GetType() const
{
if (m_flags & TI_FLAG_BYREF)
+ {
return TI_ERROR;
+ }
// objref/array/null (objref), value class, ptr, primitive
- return (ti_types)(m_flags & TI_FLAG_DATA_MASK);
+ return (ti_types)(m_flags & TI_FLAG_DATA_MASK);
}
- BOOL IsType(ti_types type) const {
+ BOOL IsType(ti_types type) const
+ {
assert(type != TI_ERROR);
- return (m_flags & (TI_FLAG_DATA_MASK|TI_FLAG_BYREF|TI_FLAG_BYREF_READONLY|
- TI_FLAG_BYREF_PERMANENT_HOME|TI_FLAG_GENERIC_TYPE_VAR)) == DWORD(type);
+ return (m_flags & (TI_FLAG_DATA_MASK | TI_FLAG_BYREF | TI_FLAG_BYREF_READONLY | TI_FLAG_BYREF_PERMANENT_HOME |
+ TI_FLAG_GENERIC_TYPE_VAR)) == DWORD(type);
}
// Returns whether this is an objref
@@ -623,13 +631,13 @@ public:
BOOL IsStruct() const
{
- return IsType(TI_STRUCT);
+ return IsType(TI_STRUCT);
}
-
+
// A byref value class is NOT a value class
BOOL IsValueClass() const
{
- return (IsStruct() || IsPrimitiveType());
+ return (IsStruct() || IsPrimitiveType());
}
// Does not return true for primitives. Will return true for value types that behave
@@ -637,8 +645,7 @@ public:
BOOL IsValueClassWithClsHnd() const
{
if ((GetType() == TI_STRUCT) ||
- (m_cls && GetType() != TI_REF &&
- GetType() != TI_METHOD &&
+ (m_cls && GetType() != TI_REF && GetType() != TI_METHOD &&
GetType() != TI_ERROR)) // necessary because if byref bit is set, we return TI_ERROR)
{
return TRUE;
@@ -650,34 +657,31 @@ public:
}
// Returns whether this is an integer or real number
- // NOTE: Use NormaliseToPrimitiveType() if you think you may have a
- // System.Int32 etc., because those types are not considered number
+ // NOTE: Use NormaliseToPrimitiveType() if you think you may have a
+ // System.Int32 etc., because those types are not considered number
// types by this function.
BOOL IsNumberType() const
{
ti_types Type = GetType();
- // I1, I2, Boolean, character etc. cannot exist plainly -
+ // I1, I2, Boolean, character etc. cannot exist plainly -
// everything is at least an I4
- return (Type == TI_INT ||
- Type == TI_LONG ||
- Type == TI_DOUBLE);
+ return (Type == TI_INT || Type == TI_LONG || Type == TI_DOUBLE);
}
// Returns whether this is an integer
- // NOTE: Use NormaliseToPrimitiveType() if you think you may have a
- // System.Int32 etc., because those types are not considered number
+ // NOTE: Use NormaliseToPrimitiveType() if you think you may have a
+ // System.Int32 etc., because those types are not considered number
// types by this function.
BOOL IsIntegerType() const
{
ti_types Type = GetType();
- // I1, I2, Boolean, character etc. cannot exist plainly -
+ // I1, I2, Boolean, character etc. cannot exist plainly -
// everything is at least an I4
- return (Type == TI_INT ||
- Type == TI_LONG);
+ return (Type == TI_INT || Type == TI_LONG);
}
// Returns true whether this is an integer or a native int.
@@ -695,7 +699,7 @@ public:
return AreEquivalent(*this, nativeInt());
}
- // Returns whether this is a primitive type (not a byref, objref,
+ // Returns whether this is a primitive type (not a byref, objref,
// array, null, value class, invalid value)
// May Need to normalise first (m/r/I4 --> I4)
BOOL IsPrimitiveType() const
@@ -703,11 +707,7 @@ public:
DWORD Type = GetType();
// boolean, char, u1,u2 never appear on the operand stack
- return (Type == TI_BYTE ||
- Type == TI_SHORT ||
- Type == TI_INT ||
- Type == TI_LONG ||
- Type == TI_FLOAT ||
+ return (Type == TI_BYTE || Type == TI_SHORT || Type == TI_INT || Type == TI_LONG || Type == TI_FLOAT ||
Type == TI_DOUBLE);
}
@@ -719,7 +719,7 @@ public:
// must be for a local which is an object type (i.e. has a slot >= 0)
// for primitive locals, use the liveness bitmap instead
- // Note that this works if the error is 'Byref'
+ // Note that this works if the error is 'Byref'
BOOL IsDead() const
{
return (m_flags & (TI_FLAG_DATA_MASK)) == TI_ERROR;
@@ -732,34 +732,30 @@ public:
private:
// used to make functions that return typeinfo efficient.
- typeInfo(DWORD flags, CORINFO_CLASS_HANDLE cls)
+ typeInfo(DWORD flags, CORINFO_CLASS_HANDLE cls)
{
m_cls = cls;
m_flags = flags;
}
-
+
friend typeInfo ByRef(const typeInfo& ti);
friend typeInfo DereferenceByRef(const typeInfo& ti);
friend typeInfo NormaliseForStack(const typeInfo& ti);
};
-inline
-typeInfo NormaliseForStack(const typeInfo& ti)
+inline typeInfo NormaliseForStack(const typeInfo& ti)
{
return typeInfo(ti).NormaliseForStack();
}
-// given ti make a byref to that type.
-inline
-typeInfo ByRef(const typeInfo& ti)
+// given ti make a byref to that type.
+inline typeInfo ByRef(const typeInfo& ti)
{
return typeInfo(ti).MakeByRef();
}
-
// given ti which is a byref, return the type it points at
-inline
-typeInfo DereferenceByRef(const typeInfo& ti)
+inline typeInfo DereferenceByRef(const typeInfo& ti)
{
return typeInfo(ti).DereferenceByRef();
}
diff --git a/src/jit/alloc.cpp b/src/jit/alloc.cpp
index d09bbe641f..5c5f712a3f 100644
--- a/src/jit/alloc.cpp
+++ b/src/jit/alloc.cpp
@@ -20,15 +20,17 @@ private:
enum
{
POOLED_ALLOCATOR_NOTINITIALIZED = 0,
- POOLED_ALLOCATOR_IN_USE = 1,
- POOLED_ALLOCATOR_AVAILABLE = 2,
- POOLED_ALLOCATOR_SHUTDOWN = 3,
+ POOLED_ALLOCATOR_IN_USE = 1,
+ POOLED_ALLOCATOR_AVAILABLE = 2,
+ POOLED_ALLOCATOR_SHUTDOWN = 3,
};
static PooledAllocator s_pooledAllocator;
- static LONG s_pooledAllocatorState;
+ static LONG s_pooledAllocatorState;
- PooledAllocator() : ArenaAllocator() {}
+ PooledAllocator() : ArenaAllocator()
+ {
+ }
PooledAllocator(IEEMemoryManager* memoryManager);
PooledAllocator(const PooledAllocator& other) = delete;
@@ -61,7 +63,7 @@ bool ArenaAllocator::bypassHostAllocator()
// knobs for ensuring that we do not have buffer overruns in the JIT.
return JitConfig.JitDirectAlloc() != 0;
-#else // defined(DEBUG)
+#else // defined(DEBUG)
return false;
#endif // !defined(DEBUG)
}
@@ -115,16 +117,16 @@ ArenaAllocator& ArenaAllocator::operator=(ArenaAllocator&& other)
assert(!isInitialized());
m_memoryManager = other.m_memoryManager;
- m_firstPage = other.m_firstPage;
- m_lastPage = other.m_lastPage;
- m_nextFreeByte = other.m_nextFreeByte;
- m_lastFreeByte = other.m_lastFreeByte;
+ m_firstPage = other.m_firstPage;
+ m_lastPage = other.m_lastPage;
+ m_nextFreeByte = other.m_nextFreeByte;
+ m_lastFreeByte = other.m_lastFreeByte;
other.m_memoryManager = nullptr;
- other.m_firstPage = nullptr;
- other.m_lastPage = nullptr;
- other.m_nextFreeByte = nullptr;
- other.m_lastFreeByte = nullptr;
+ other.m_firstPage = nullptr;
+ other.m_lastPage = nullptr;
+ other.m_nextFreeByte = nullptr;
+ other.m_lastFreeByte = nullptr;
return *this;
}
@@ -196,12 +198,12 @@ void* ArenaAllocator::allocateNewPage(size_t size, bool canThrow)
}
// Append the new page to the end of the list
- newPage->m_next = nullptr;
+ newPage->m_next = nullptr;
newPage->m_pageBytes = pageSize;
- newPage->m_previous = m_lastPage;
- newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
- // Instead of letting it contain garbage (so to confuse us),
- // set it to zero.
+ newPage->m_previous = m_lastPage;
+ newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
+ // Instead of letting it contain garbage (so to confuse us),
+ // set it to zero.
if (m_lastPage != nullptr)
{
@@ -230,7 +232,7 @@ void ArenaAllocator::destroy()
assert(isInitialized());
// Free all of the allocated pages
- for (PageDescriptor* page = m_firstPage, *next; page != nullptr; page = next)
+ for (PageDescriptor *page = m_firstPage, *next; page != nullptr; page = next)
{
next = page->m_next;
freeHostMemory(page);
@@ -238,10 +240,10 @@ void ArenaAllocator::destroy()
// Clear out the allocator's fields
m_memoryManager = nullptr;
- m_firstPage = nullptr;
- m_lastPage = nullptr;
- m_nextFreeByte = nullptr;
- m_lastFreeByte = nullptr;
+ m_firstPage = nullptr;
+ m_lastPage = nullptr;
+ m_nextFreeByte = nullptr;
+ m_lastFreeByte = nullptr;
}
// The debug version of the allocator may allocate directly from the
@@ -277,7 +279,7 @@ void* ArenaAllocator::allocateHostMemory(size_t size)
{
return ClrAllocInProcessHeap(0, S_SIZE_T(size));
}
-#else // defined(DEBUG)
+#else // defined(DEBUG)
return m_memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
#endif // !defined(DEBUG)
}
@@ -301,7 +303,7 @@ void ArenaAllocator::freeHostMemory(void* block)
{
ClrFreeInProcessHeap(0, block);
}
-#else // defined(DEBUG)
+#else // defined(DEBUG)
m_memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
#endif // !defined(DEBUG)
}
@@ -334,16 +336,16 @@ void* ArenaAllocator::allocateMemory(size_t size)
if (JitConfig.ShouldInjectFault() != 0)
{
- // Force the underlying memory allocator (either the OS or the CLR hoster)
+ // Force the underlying memory allocator (either the OS or the CLR hoster)
// to allocate the memory. Any fault injection will kick in.
void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1));
if (p != nullptr)
{
ClrFreeInProcessHeap(0, p);
}
- else
+ else
{
- NOMEM(); // Throw!
+ NOMEM(); // Throw!
}
}
@@ -419,9 +421,7 @@ size_t ArenaAllocator::getTotalBytesUsed()
// subsystem.
void ArenaAllocator::startup()
{
- s_defaultPageSize = bypassHostAllocator()
- ? (size_t)MIN_PAGE_SIZE
- : (size_t)DEFAULT_PAGE_SIZE;
+ s_defaultPageSize = bypassHostAllocator() ? (size_t)MIN_PAGE_SIZE : (size_t)DEFAULT_PAGE_SIZE;
}
//------------------------------------------------------------------------
@@ -433,13 +433,12 @@ void ArenaAllocator::shutdown()
}
PooledAllocator PooledAllocator::s_pooledAllocator;
-LONG PooledAllocator::s_pooledAllocatorState = POOLED_ALLOCATOR_NOTINITIALIZED;
+LONG PooledAllocator::s_pooledAllocatorState = POOLED_ALLOCATOR_NOTINITIALIZED;
//------------------------------------------------------------------------
// PooledAllocator::PooledAllocator:
// Constructs a `PooledAllocator`.
-PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager)
- : ArenaAllocator(memoryManager)
+PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager) : ArenaAllocator(memoryManager)
{
}
@@ -448,7 +447,7 @@ PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager)
// Move-assigns a `PooledAllocator`.
PooledAllocator& PooledAllocator::operator=(PooledAllocator&& other)
{
- *((ArenaAllocator*)this) = std::move((ArenaAllocator&&)other);
+ *((ArenaAllocator*)this) = std::move((ArenaAllocator &&)other);
return *this;
}
@@ -514,18 +513,18 @@ ArenaAllocator* PooledAllocator::getPooledAllocator(IEEMemoryManager* memoryMana
return &s_pooledAllocator;
case POOLED_ALLOCATOR_NOTINITIALIZED:
+ {
+ PooledAllocator allocator(memoryManager);
+ if (allocator.allocateNewPage(0, false) == nullptr)
{
- PooledAllocator allocator(memoryManager);
- if (allocator.allocateNewPage(0, false) == nullptr)
- {
- // Failed to grab the initial memory page.
- InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_NOTINITIALIZED);
- return nullptr;
- }
-
- s_pooledAllocator = std::move(allocator);
+ // Failed to grab the initial memory page.
+ InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_NOTINITIALIZED);
+ return nullptr;
}
+ s_pooledAllocator = std::move(allocator);
+ }
+
return &s_pooledAllocator;
default:
@@ -546,7 +545,7 @@ void PooledAllocator::destroy()
assert(m_firstPage != nullptr);
// Free all but the first allocated page
- for (PageDescriptor* page = m_firstPage->m_next, *next; page != nullptr; page = next)
+ for (PageDescriptor *page = m_firstPage->m_next, *next; page != nullptr; page = next)
{
next = page->m_next;
freeHostMemory(page);
@@ -554,9 +553,9 @@ void PooledAllocator::destroy()
// Reset the relevant state to point back to the first byte of the first page
m_firstPage->m_next = nullptr;
- m_lastPage = m_firstPage;
- m_nextFreeByte = m_firstPage->m_contents;
- m_lastFreeByte = (BYTE*)m_firstPage + m_firstPage->m_pageBytes;
+ m_lastPage = m_firstPage;
+ m_nextFreeByte = m_firstPage->m_contents;
+ m_lastFreeByte = (BYTE*)m_firstPage + m_firstPage->m_pageBytes;
assert(getTotalBytesAllocated() == s_defaultPageSize);
diff --git a/src/jit/alloc.h b/src/jit/alloc.h
index e5aa29251c..a769341378 100644
--- a/src/jit/alloc.h
+++ b/src/jit/alloc.h
@@ -33,7 +33,7 @@ protected:
enum
{
DEFAULT_PAGE_SIZE = 16 * OS_page_size,
- MIN_PAGE_SIZE = sizeof(PageDescriptor)
+ MIN_PAGE_SIZE = sizeof(PageDescriptor)
};
static size_t s_defaultPageSize;
@@ -44,7 +44,7 @@ protected:
PageDescriptor* m_lastPage;
// These two pointers (when non-null) will always point into 'm_lastPage'.
- BYTE* m_nextFreeByte;
+ BYTE* m_nextFreeByte;
BYTE* m_lastFreeByte;
bool isInitialized();
@@ -69,7 +69,7 @@ public:
#if defined(DEBUG)
void* allocateMemory(size_t sz);
-#else // defined(DEBUG)
+#else // defined(DEBUG)
inline void* allocateMemory(size_t size)
{
void* block = m_nextFreeByte;
@@ -87,7 +87,7 @@ public:
size_t getTotalBytesAllocated();
size_t getTotalBytesUsed();
- static bool bypassHostAllocator();
+ static bool bypassHostAllocator();
static size_t getDefaultPageSize();
static void startup();
diff --git a/src/jit/arraystack.h b/src/jit/arraystack.h
index 4d433dd5a8..1692294fcb 100644
--- a/src/jit/arraystack.h
+++ b/src/jit/arraystack.h
@@ -9,21 +9,21 @@ template <class T>
class ArrayStack
{
static const int builtinSize = 8;
-
+
public:
- ArrayStack(Compiler *comp, int initialSize = builtinSize)
+ ArrayStack(Compiler* comp, int initialSize = builtinSize)
{
compiler = comp;
if (initialSize > builtinSize)
{
maxIndex = initialSize;
- data = new(compiler, CMK_ArrayStack) T[initialSize];
+ data = new (compiler, CMK_ArrayStack) T[initialSize];
}
else
{
maxIndex = builtinSize;
- data = builtinData;
+ data = builtinData;
}
tosIndex = 0;
@@ -32,8 +32,10 @@ public:
void Push(T item)
{
if (tosIndex == maxIndex)
+ {
Realloc();
-
+ }
+
data[tosIndex] = item;
tosIndex++;
}
@@ -43,9 +45,9 @@ public:
// get a new chunk 2x the size of the old one
// and copy over
T* oldData = data;
- noway_assert(maxIndex*2 > maxIndex);
- data = new(compiler, CMK_ArrayStack) T[maxIndex*2];
- for (int i=0; i<maxIndex; i++)
+ noway_assert(maxIndex * 2 > maxIndex);
+ data = new (compiler, CMK_ArrayStack) T[maxIndex * 2];
+ for (int i = 0; i < maxIndex; i++)
{
data[i] = oldData[i];
}
@@ -56,19 +58,21 @@ public:
void ReverseTop(int number)
{
if (number < 2)
+ {
return;
+ }
assert(number <= tosIndex);
- int start = tosIndex - number;
+ int start = tosIndex - number;
int offset = 0;
- while (offset < number/2)
+ while (offset < number / 2)
{
- T temp;
- int index = start+offset;
- int otherIndex = tosIndex - 1 - offset;
- temp = data[index];
- data[index] = data[otherIndex];
+ T temp;
+ int index = start + offset;
+ int otherIndex = tosIndex - 1 - offset;
+ temp = data[index];
+ data[index] = data[otherIndex];
data[otherIndex] = temp;
offset++;
@@ -85,13 +89,13 @@ public:
T Top()
{
assert(tosIndex > 0);
- return data[tosIndex-1];
+ return data[tosIndex - 1];
}
T& TopRef()
{
assert(tosIndex > 0);
- return data[tosIndex-1];
+ return data[tosIndex - 1];
}
// return the i'th from the top
@@ -133,13 +137,10 @@ public:
}
private:
- Compiler *compiler; // needed for allocation
- int tosIndex; // first free location
- int maxIndex;
- T* data;
+ Compiler* compiler; // needed for allocation
+ int tosIndex; // first free location
+ int maxIndex;
+ T* data;
// initial allocation
- T builtinData[builtinSize];
+ T builtinData[builtinSize];
};
-
-
-
diff --git a/src/jit/assertionprop.cpp b/src/jit/assertionprop.cpp
index e859cd7bd9..a85f7311bf 100644
--- a/src/jit/assertionprop.cpp
+++ b/src/jit/assertionprop.cpp
@@ -16,24 +16,22 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma hdrstop
#endif
-
/*****************************************************************************
*
* Helper passed to Compiler::fgWalkTreePre() to find the Asgn node for optAddCopies()
*/
/* static */
-Compiler::fgWalkResult Compiler::optAddCopiesCallback(GenTreePtr * pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optAddCopiesCallback(GenTreePtr* pTree, fgWalkData* data)
{
GenTreePtr tree = *pTree;
if (tree->OperKind() & GTK_ASGOP)
{
GenTreePtr op1 = tree->gtOp.gtOp1;
- Compiler * comp = data->compiler;
+ Compiler* comp = data->compiler;
- if ((op1->gtOper == GT_LCL_VAR) &&
- (op1->gtLclVarCommon.gtLclNum == comp->optAddCopyLclNum))
+ if ((op1->gtOper == GT_LCL_VAR) && (op1->gtLclVarCommon.gtLclNum == comp->optAddCopyLclNum))
{
comp->optAddCopyAsgnNode = tree;
return WALK_ABORT;
@@ -42,20 +40,21 @@ Compiler::fgWalkResult Compiler::optAddCopiesCallback(GenTreePtr * pTree, fgWal
return WALK_CONTINUE;
}
-
/*****************************************************************************
*
* Add new copies before Assertion Prop.
*/
-void Compiler::optAddCopies()
+void Compiler::optAddCopies()
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In optAddCopies()\n\n");
+ }
if (verboseTrees)
{
printf("Blocks/Trees at start of phase\n");
@@ -64,20 +63,22 @@ void Compiler::optAddCopies()
#endif
// Don't add any copies if we have reached the tracking limit.
- if (lvaHaveManyLocals())
+ if (lvaHaveManyLocals())
+ {
return;
+ }
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- var_types typ = varDsc->TypeGet();
+ var_types typ = varDsc->TypeGet();
- // We only add copies for non temp local variables
+ // We only add copies for non temp local variables
// that have a single def and that can possibly be enregistered
if (varDsc->lvIsTemp || !varDsc->lvSingleDef || !varTypeCanReg(typ))
+ {
continue;
+ }
/* For lvNormalizeOnLoad(), we need to add a cast to the copy-assignment
like "copyLclNum = int(varDsc)" and optAssertionGen() only
@@ -85,10 +86,14 @@ void Compiler::optAddCopies()
the cast is generated in fgMorphSmpOpAsg. This boils down to not having
a copy until optAssertionGen handles this*/
if (varDsc->lvNormalizeOnLoad() || varDsc->lvNormalizeOnStore() || typ == TYP_BOOL)
+ {
continue;
+ }
if (varTypeIsSmall(varDsc->TypeGet()) || typ == TYP_BOOL)
+ {
continue;
+ }
// If locals must be initialized to zero, that initialization counts as a second definition.
// VB in particular allows usage of variables not explicitly initialized.
@@ -96,13 +101,15 @@ void Compiler::optAddCopies()
// as C# sets InitLocals all the time starting in Whidbey.
if (!varDsc->lvIsParam && info.compInitMem)
+ {
continue;
+ }
// On x86 we may want to add a copy for an incoming double parameter
// because we can ensure that the copy we make is double aligned
// where as we can never ensure the alignment of an incoming double parameter
- //
- // On all other platforms we will never need to make a copy
+ //
+ // On all other platforms we will never need to make a copy
// for an incoming double parameter
bool isFloatParam = false;
@@ -112,37 +119,40 @@ void Compiler::optAddCopies()
#endif
if (!isFloatParam && !varDsc->lvVolatileHint)
+ {
continue;
+ }
// We don't want to add a copy for a variable that is part of a struct
if (varDsc->lvIsStructField)
+ {
continue;
+ }
// We require that the weighted ref count be significant.
- if (varDsc->lvRefCntWtd <= (BB_LOOP_WEIGHT*BB_UNITY_WEIGHT/2))
+ if (varDsc->lvRefCntWtd <= (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT / 2))
+ {
continue;
+ }
// For parameters, we only want to add a copy for the heavier-than-average
// uses instead of adding a copy to cover every single use.
// 'paramImportantUseDom' is the set of blocks that dominate the
- // heavier-than-average uses of a parameter.
+ // heavier-than-average uses of a parameter.
// Initial value is all blocks.
BlockSet BLOCKSET_INIT_NOCOPY(paramImportantUseDom, BlockSetOps::MakeFull(this));
// This will be threshold for determining heavier-than-average uses
- unsigned paramAvgWtdRefDiv2 = (varDsc->lvRefCntWtd + varDsc->lvRefCnt/2) / (varDsc->lvRefCnt*2);
+ unsigned paramAvgWtdRefDiv2 = (varDsc->lvRefCntWtd + varDsc->lvRefCnt / 2) / (varDsc->lvRefCnt * 2);
bool paramFoundImportantUse = false;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("Trying to add a copy for V%02u %s, avg_wtd = %s\n",
- lclNum,
- varDsc->lvIsParam ? "an arg"
- : "a local",
- refCntWtd2str(paramAvgWtdRefDiv2));
+ printf("Trying to add a copy for V%02u %s, avg_wtd = %s\n", lclNum,
+ varDsc->lvIsParam ? "an arg" : "a local", refCntWtd2str(paramAvgWtdRefDiv2));
}
#endif
@@ -162,17 +172,17 @@ void Compiler::optAddCopies()
while (iter.NextElem(this, &blkNum))
{
/* Find the block 'blkNum' */
- BasicBlock * block = fgFirstBB;
+ BasicBlock* block = fgFirstBB;
while (block && (block->bbNum != blkNum))
{
block = block->bbNext;
}
noway_assert(block && (block->bbNum == blkNum));
- bool importantUseInBlock = (varDsc->lvIsParam) && (block->getBBWeight(this) > paramAvgWtdRefDiv2);
- bool isPreHeaderBlock = ((block->bbFlags & BBF_LOOP_PREHEADER) != 0);
- BlockSet BLOCKSET_INIT_NOCOPY(blockDom, BlockSetOps::UninitVal());
- BlockSet BLOCKSET_INIT_NOCOPY(blockDomSub0, BlockSetOps::UninitVal());
+ bool importantUseInBlock = (varDsc->lvIsParam) && (block->getBBWeight(this) > paramAvgWtdRefDiv2);
+ bool isPreHeaderBlock = ((block->bbFlags & BBF_LOOP_PREHEADER) != 0);
+ BlockSet BLOCKSET_INIT_NOCOPY(blockDom, BlockSetOps::UninitVal());
+ BlockSet BLOCKSET_INIT_NOCOPY(blockDomSub0, BlockSetOps::UninitVal());
if (block->bbIDom == nullptr && isPreHeaderBlock)
{
@@ -202,17 +212,19 @@ void Compiler::optAddCopies()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf(" Referenced in BB%02u, bbWeight is %s",
- blkNum,
- refCntWtd2str(block->getBBWeight(this)));
+ printf(" Referenced in BB%02u, bbWeight is %s", blkNum, refCntWtd2str(block->getBBWeight(this)));
if (isDominatedByFirstBB)
+ {
printf(", which is dominated by BB01");
+ }
if (importantUseInBlock)
+ {
printf(", ImportantUse");
+ }
printf("\n");
}
@@ -223,7 +235,8 @@ void Compiler::optAddCopies()
if (importantUseInBlock)
{
paramFoundImportantUse = true;
- BlockSetOps::IntersectionD(this, paramImportantUseDom, blockDomSub0); // Clear blocks that do not dominate
+ BlockSetOps::IntersectionD(this, paramImportantUseDom,
+ blockDomSub0); // Clear blocks that do not dominate
}
}
@@ -231,7 +244,9 @@ void Compiler::optAddCopies()
if (varDsc->lvIsParam)
{
if (!paramFoundImportantUse)
+ {
continue;
+ }
}
// For us to add a new copy:
@@ -239,38 +254,41 @@ void Compiler::optAddCopies()
// or a lvVolatile variable that is always reached from the first BB
// and we have at least one block available in paramImportantUseDom
//
- bool doCopy = (isFloatParam || (isDominatedByFirstBB && varDsc->lvVolatileHint))
- && !BlockSetOps::IsEmpty(this, paramImportantUseDom);
-
+ bool doCopy = (isFloatParam || (isDominatedByFirstBB && varDsc->lvVolatileHint)) &&
+ !BlockSetOps::IsEmpty(this, paramImportantUseDom);
+
// Under stress mode we expand the number of candidates
- // to include parameters of any type
+ // to include parameters of any type
// or any variable that is always reached from the first BB
//
if (compStressCompile(STRESS_GENERIC_VARN, 30))
{
// Ensure that we preserve the invariants required by the subsequent code.
if (varDsc->lvIsParam || isDominatedByFirstBB)
+ {
doCopy = true;
+ }
}
if (!doCopy)
+ {
continue;
+ }
GenTreePtr stmt;
- unsigned copyLclNum = lvaGrabTemp(false DEBUGARG("optAddCopies"));
+ unsigned copyLclNum = lvaGrabTemp(false DEBUGARG("optAddCopies"));
// Because lvaGrabTemp may have reallocated the lvaTable, ensure varDsc
// is still in sync with lvaTable[lclNum];
varDsc = &lvaTable[lclNum];
-
+
// Set lvType on the new Temp Lcl Var
lvaTable[copyLclNum].lvType = typ;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\n Finding the best place to insert the assignment V%02i=V%02i\n",
- copyLclNum, lclNum);
+ printf("\n Finding the best place to insert the assignment V%02i=V%02i\n", copyLclNum, lclNum);
}
#endif
@@ -287,19 +305,17 @@ void Compiler::optAddCopies()
/* dominates all the uses of the local variable */
/* Our default is to use the first block */
- BasicBlock * bestBlock = fgFirstBB;
- unsigned bestWeight = bestBlock->getBBWeight(this);
- BasicBlock * block = bestBlock;
+ BasicBlock* bestBlock = fgFirstBB;
+ unsigned bestWeight = bestBlock->getBBWeight(this);
+ BasicBlock* block = bestBlock;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf(" Starting at BB%02u, bbWeight is %s",
- block->bbNum,
+ printf(" Starting at BB%02u, bbWeight is %s", block->bbNum,
refCntWtd2str(block->getBBWeight(this)));
- printf(", bestWeight is %s\n",
- refCntWtd2str(bestWeight));
+ printf(", bestWeight is %s\n", refCntWtd2str(bestWeight));
}
#endif
@@ -317,36 +333,38 @@ void Compiler::optAddCopies()
noway_assert(block && (block->bbNum == blkNum));
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf(" Considering BB%02u, bbWeight is %s",
- block->bbNum,
+ printf(" Considering BB%02u, bbWeight is %s", block->bbNum,
refCntWtd2str(block->getBBWeight(this)));
- printf(", bestWeight is %s\n",
- refCntWtd2str(bestWeight));
+ printf(", bestWeight is %s\n", refCntWtd2str(bestWeight));
}
#endif
- // Does this block have a smaller bbWeight value?
+ // Does this block have a smaller bbWeight value?
if (block->getBBWeight(this) > bestWeight)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("bbWeight too high\n");
+ }
#endif
continue;
}
// Don't use blocks that are exception handlers because
- // inserting a new first statement will interface with
+ // inserting a new first statement will interface with
// the CATCHARG
if (handlerGetsXcptnObj(block->bbCatchTyp))
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("Catch block\n");
+ }
#endif
continue;
}
@@ -357,12 +375,15 @@ void Compiler::optAddCopies()
if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)
{
#if FEATURE_EH_FUNCLETS
- // With funclets, this is only used for BBJ_CALLFINALLY/BBJ_ALWAYS pairs. For x86, it is also used as the "final step" block for leaving finallys.
+ // With funclets, this is only used for BBJ_CALLFINALLY/BBJ_ALWAYS pairs. For x86, it is also used
+ // as the "final step" block for leaving finallys.
assert((block->bbPrev != nullptr) && block->bbPrev->isBBCallAlwaysPair());
#endif // FEATURE_EH_FUNCLETS
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("Internal EH BBJ_ALWAYS block\n");
+ }
#endif
continue;
}
@@ -372,8 +393,10 @@ void Compiler::optAddCopies()
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("new bestBlock\n");
+ }
#endif
bestBlock = block;
@@ -386,19 +409,26 @@ void Compiler::optAddCopies()
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf(" Insert copy at the %s of BB%02u\n",
- (BlockSetOps::IsEmpty(this, paramImportantUseDom) || BlockSetOps::IsMember(this, varDsc->lvRefBlks, bestBlock->bbNum)) ?
- "start" : "end",
- bestBlock->bbNum);
+ (BlockSetOps::IsEmpty(this, paramImportantUseDom) ||
+ BlockSetOps::IsMember(this, varDsc->lvRefBlks, bestBlock->bbNum))
+ ? "start"
+ : "end",
+ bestBlock->bbNum);
}
#endif
- if (BlockSetOps::IsEmpty(this, paramImportantUseDom) || BlockSetOps::IsMember(this, varDsc->lvRefBlks, bestBlock->bbNum))
+ if (BlockSetOps::IsEmpty(this, paramImportantUseDom) ||
+ BlockSetOps::IsMember(this, varDsc->lvRefBlks, bestBlock->bbNum))
+ {
stmt = fgInsertStmtAtBeg(bestBlock, copyAsgn);
+ }
else
+ {
stmt = fgInsertStmtNearEnd(bestBlock, copyAsgn);
+ }
/* Increment its lvRefCnt and lvRefCntWtd */
lvaTable[lclNum].incRefCnts(fgFirstBB->getBBWeight(this), this);
@@ -414,26 +444,21 @@ void Compiler::optAddCopies()
stmt = varDsc->lvDefStmt;
noway_assert(stmt->gtOper == GT_STMT);
- optAddCopyLclNum = lclNum; // in
- optAddCopyAsgnNode = nullptr; // out
-
- fgWalkTreePre(&stmt->gtStmt.gtStmtExpr,
- Compiler::optAddCopiesCallback,
- (void *) this,
- false);
+ optAddCopyLclNum = lclNum; // in
+ optAddCopyAsgnNode = nullptr; // out
+
+ fgWalkTreePre(&stmt->gtStmt.gtStmtExpr, Compiler::optAddCopiesCallback, (void*)this, false);
noway_assert(optAddCopyAsgnNode);
GenTreePtr tree = optAddCopyAsgnNode;
GenTreePtr op1 = tree->gtOp.gtOp1;
- noway_assert( tree && op1 &&
- (tree->OperKind() & GTK_ASGOP) &&
- (op1->gtOper == GT_LCL_VAR) &&
- (op1->gtLclVarCommon.gtLclNum == lclNum));
+ noway_assert(tree && op1 && (tree->OperKind() & GTK_ASGOP) && (op1->gtOper == GT_LCL_VAR) &&
+ (op1->gtLclVarCommon.gtLclNum == lclNum));
/* TODO-Review: BB_UNITY_WEIGHT is not the correct block weight */
- unsigned blockWeight = BB_UNITY_WEIGHT;
+ unsigned blockWeight = BB_UNITY_WEIGHT;
/* Increment its lvRefCnt and lvRefCntWtd twice */
lvaTable[copyLclNum].incRefCnts(blockWeight, this);
@@ -441,7 +466,7 @@ void Compiler::optAddCopies()
/* Assign the old expression into the new temp */
- GenTreePtr newAsgn = gtNewTempAssign(copyLclNum, tree->gtOp.gtOp2);
+ GenTreePtr newAsgn = gtNewTempAssign(copyLclNum, tree->gtOp.gtOp2);
/* Copy the new temp to op1 */
@@ -452,15 +477,15 @@ void Compiler::optAddCopies()
tree->gtBashToNOP();
tree->ChangeOper(GT_COMMA);
- tree->gtOp.gtOp1 = newAsgn;
- tree->gtOp.gtOp2 = copyAsgn;
+ tree->gtOp.gtOp1 = newAsgn;
+ tree->gtOp.gtOp2 = copyAsgn;
- tree->gtFlags |= ( newAsgn->gtFlags & GTF_ALL_EFFECT);
- tree->gtFlags |= (copyAsgn->gtFlags & GTF_ALL_EFFECT);
+ tree->gtFlags |= (newAsgn->gtFlags & GTF_ALL_EFFECT);
+ tree->gtFlags |= (copyAsgn->gtFlags & GTF_ALL_EFFECT);
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nIntroducing a new copy for V%02u\n", lclNum);
gtDispTree(stmt->gtStmt.gtStmtExpr);
@@ -477,14 +502,14 @@ void Compiler::optAddCopies()
// lclNum - The local var id.
//
// Return Value:
-// The dependent assertions (assertions using the value of the local var)
+// The dependent assertions (assertions using the value of the local var)
// of the local var.
-//
+//
-ASSERT_TP& Compiler::GetAssertionDep(unsigned lclNum)
+ASSERT_TP& Compiler::GetAssertionDep(unsigned lclNum)
{
ExpandArray<ASSERT_TP>& dep = *optAssertionDep;
- if (dep[lclNum] == NULL)
+ if (dep[lclNum] == nullptr)
{
dep[lclNum] = optNewEmptyAssertSet();
}
@@ -496,11 +521,11 @@ ASSERT_TP& Compiler::GetAssertionDep(unsigned lclNum)
* Initialize the assertion prop bitset traits and the default bitsets.
*/
-void Compiler::optAssertionTraitsInit(AssertionIndex assertionCount)
+void Compiler::optAssertionTraitsInit(AssertionIndex assertionCount)
{
apTraits = new (getAllocator()) BitVecTraits(assertionCount, this);
- apFull = BitVecOps::UninitVal();
- apEmpty = BitVecOps::UninitVal();
+ apFull = BitVecOps::UninitVal();
+ apEmpty = BitVecOps::UninitVal();
BitVecOps::AssignNoCopy(apTraits, apFull, BitVecOps::MakeFull(apTraits));
BitVecOps::AssignNoCopy(apTraits, apEmpty, BitVecOps::MakeEmpty(apTraits));
}
@@ -510,22 +535,23 @@ void Compiler::optAssertionTraitsInit(AssertionIndex assertionCou
* Initialize the assertion prop tracking logic.
*/
-void Compiler::optAssertionInit(bool isLocalProp)
-{
- // Use a function countFunc to determine a proper maximum assertion count for the
- // method being compiled. The function is linear to the IL size for small and
+void Compiler::optAssertionInit(bool isLocalProp)
+{
+ // Use a function countFunc to determine a proper maximum assertion count for the
+ // method being compiled. The function is linear to the IL size for small and
// moderate methods. For large methods, considering throughput impact, we track no
// more than 64 assertions.
// Note this tracks at most only 256 assertions.
- static const AssertionIndex countFunc[] = { 64, 128, 256, 64 };
- static const unsigned lowerBound = 0;
- static const unsigned upperBound = sizeof(countFunc) / sizeof(countFunc[0]) - 1;
- const unsigned codeSize = info.compILCodeSize / 512;
- optMaxAssertionCount = countFunc[isLocalProp ? lowerBound : min(upperBound, codeSize)];
+ static const AssertionIndex countFunc[] = {64, 128, 256, 64};
+ static const unsigned lowerBound = 0;
+ static const unsigned upperBound = sizeof(countFunc) / sizeof(countFunc[0]) - 1;
+ const unsigned codeSize = info.compILCodeSize / 512;
+ optMaxAssertionCount = countFunc[isLocalProp ? lowerBound : min(upperBound, codeSize)];
- optLocalAssertionProp = isLocalProp;
+ optLocalAssertionProp = isLocalProp;
optAssertionTabPrivate = new (getAllocator()) AssertionDsc[optMaxAssertionCount];
- optComplementaryAssertionMap = new (getAllocator()) AssertionIndex[optMaxAssertionCount](); // zero-inited (NO_ASSERTION_INDEX.)
+ optComplementaryAssertionMap =
+ new (getAllocator()) AssertionIndex[optMaxAssertionCount](); // zero-inited (NO_ASSERTION_INDEX.)
assert(NO_ASSERTION_INDEX == 0);
if (!isLocalProp)
@@ -533,19 +559,19 @@ void Compiler::optAssertionInit(bool isLocalProp)
optValueNumToAsserts = new (getAllocator()) ValueNumToAssertsMap(getAllocator());
}
- if (optAssertionDep == NULL)
+ if (optAssertionDep == nullptr)
{
optAssertionDep = new (getAllocator()) ExpandArray<ASSERT_TP>(getAllocator(), max(1, lvaCount));
}
optAssertionTraitsInit(optMaxAssertionCount);
- optAssertionCount = 0;
+ optAssertionCount = 0;
optAssertionPropagated = false;
- bbJtrueAssertionOut = NULL;
+ bbJtrueAssertionOut = nullptr;
}
#ifdef DEBUG
-void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex assertionIndex /* =0 */)
+void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex assertionIndex /* =0 */)
{
if (curAssertion->op1.kind == O1K_EXACT_TYPE)
{
@@ -563,13 +589,12 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
{
printf("Copy ");
}
- else if ((curAssertion->op2.kind == O2K_CONST_INT) ||
- (curAssertion->op2.kind == O2K_CONST_LONG) ||
- (curAssertion->op2.kind == O2K_CONST_DOUBLE) )
+ else if ((curAssertion->op2.kind == O2K_CONST_INT) || (curAssertion->op2.kind == O2K_CONST_LONG) ||
+ (curAssertion->op2.kind == O2K_CONST_DOUBLE))
{
printf("Constant ");
}
- else if (curAssertion->op2.kind == O2K_SUBRANGE)
+ else if (curAssertion->op2.kind == O2K_SUBRANGE)
{
printf("Subrange ");
}
@@ -588,9 +613,8 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
printf("(" STR_VN "%x," STR_VN "%x) ", curAssertion->op1.vn, curAssertion->op2.vn);
}
- if ((curAssertion->op1.kind == O1K_LCLVAR) ||
- (curAssertion->op1.kind == O1K_EXACT_TYPE) ||
- (curAssertion->op1.kind == O1K_SUBTYPE) )
+ if ((curAssertion->op1.kind == O1K_LCLVAR) || (curAssertion->op1.kind == O1K_EXACT_TYPE) ||
+ (curAssertion->op1.kind == O1K_SUBTYPE))
{
printf("V%02u", curAssertion->op1.lcl.lclNum);
if (curAssertion->op1.lcl.ssaNum != SsaConfig::RESERVED_SSA_NUM)
@@ -638,9 +662,13 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
else if (curAssertion->assertionKind == OAK_EQUAL)
{
if (curAssertion->op1.kind == O1K_LCLVAR)
+ {
printf(" == ");
+ }
else
+ {
printf(" is ");
+ }
}
else if (curAssertion->assertionKind == OAK_NO_THROW)
{
@@ -649,9 +677,13 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
else if (curAssertion->assertionKind == OAK_NOT_EQUAL)
{
if (curAssertion->op1.kind == O1K_LCLVAR)
+ {
printf(" != ");
+ }
else
+ {
printf(" is not ");
+ }
}
else
{
@@ -662,97 +694,106 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
{
switch (curAssertion->op2.kind)
{
- case O2K_LCLVAR_COPY:
- printf("V%02u", curAssertion->op2.lcl.lclNum);
- if (curAssertion->op1.lcl.ssaNum != SsaConfig::RESERVED_SSA_NUM)
- {
- printf(".%02u", curAssertion->op1.lcl.ssaNum);
- }
- break;
-
- case O2K_CONST_INT:
- case O2K_IND_CNS_INT:
- if (curAssertion->op1.kind == O1K_EXACT_TYPE)
- {
- printf("Exact Type MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
- assert(curAssertion->op2.u1.iconFlags != 0);
- }
- else if (curAssertion->op1.kind == O1K_SUBTYPE)
- {
- printf("MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
- assert(curAssertion->op2.u1.iconFlags != 0);
- }
- else if (curAssertion->op1.kind == O1K_ARRLEN_OPER_BND)
- {
- assert(!optLocalAssertionProp);
- vnStore->vnDump(this, curAssertion->op2.vn);
- }
- else if (curAssertion->op1.kind == O1K_ARRLEN_LOOP_BND)
- {
- assert(!optLocalAssertionProp);
- vnStore->vnDump(this, curAssertion->op2.vn);
- }
- else if (curAssertion->op1.kind == O1K_CONSTANT_LOOP_BND)
- {
- assert(!optLocalAssertionProp);
- vnStore->vnDump(this, curAssertion->op2.vn);
- }
- else
- {
- var_types op1Type;
+ case O2K_LCLVAR_COPY:
+ printf("V%02u", curAssertion->op2.lcl.lclNum);
+ if (curAssertion->op1.lcl.ssaNum != SsaConfig::RESERVED_SSA_NUM)
+ {
+ printf(".%02u", curAssertion->op1.lcl.ssaNum);
+ }
+ break;
- if (curAssertion->op1.kind == O1K_VALUE_NUMBER)
+ case O2K_CONST_INT:
+ case O2K_IND_CNS_INT:
+ if (curAssertion->op1.kind == O1K_EXACT_TYPE)
+ {
+ printf("Exact Type MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
+ assert(curAssertion->op2.u1.iconFlags != 0);
+ }
+ else if (curAssertion->op1.kind == O1K_SUBTYPE)
+ {
+ printf("MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
+ assert(curAssertion->op2.u1.iconFlags != 0);
+ }
+ else if (curAssertion->op1.kind == O1K_ARRLEN_OPER_BND)
{
- op1Type = vnStore->TypeOfVN(curAssertion->op1.vn);
+ assert(!optLocalAssertionProp);
+ vnStore->vnDump(this, curAssertion->op2.vn);
+ }
+ else if (curAssertion->op1.kind == O1K_ARRLEN_LOOP_BND)
+ {
+ assert(!optLocalAssertionProp);
+ vnStore->vnDump(this, curAssertion->op2.vn);
+ }
+ else if (curAssertion->op1.kind == O1K_CONSTANT_LOOP_BND)
+ {
+ assert(!optLocalAssertionProp);
+ vnStore->vnDump(this, curAssertion->op2.vn);
}
else
{
- unsigned lclNum = curAssertion->op1.lcl.lclNum; assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
- op1Type = varDsc->lvType;
+ var_types op1Type;
+
+ if (curAssertion->op1.kind == O1K_VALUE_NUMBER)
+ {
+ op1Type = vnStore->TypeOfVN(curAssertion->op1.vn);
+ }
+ else
+ {
+ unsigned lclNum = curAssertion->op1.lcl.lclNum;
+ assert(lclNum < lvaCount);
+ LclVarDsc* varDsc = lvaTable + lclNum;
+ op1Type = varDsc->lvType;
+ }
+
+ if (op1Type == TYP_REF)
+ {
+ assert(curAssertion->op2.u1.iconVal == 0);
+ printf("null");
+ }
+ else
+ {
+ if ((curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK) != 0)
+ {
+ printf("[%08p]", dspPtr(curAssertion->op2.u1.iconVal));
+ }
+ else
+ {
+ printf("%d", curAssertion->op2.u1.iconVal);
+ }
+ }
}
+ break;
- if (op1Type == TYP_REF)
+ case O2K_CONST_LONG:
+ printf("0x%016llx", curAssertion->op2.lconVal);
+ break;
+
+ case O2K_CONST_DOUBLE:
+ if (*((__int64*)&curAssertion->op2.dconVal) == (__int64)I64(0x8000000000000000))
{
- assert(curAssertion->op2.u1.iconVal == 0);
- printf("null");
+ printf("-0.00000");
}
else
{
- if ((curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK) != 0)
- printf("[%08p]", dspPtr(curAssertion->op2.u1.iconVal));
- else
- printf("%d", curAssertion->op2.u1.iconVal);
+ printf("%#lg", curAssertion->op2.dconVal);
}
- }
- break;
-
- case O2K_CONST_LONG:
- printf("0x%016llx", curAssertion->op2.lconVal);
- break;
-
- case O2K_CONST_DOUBLE:
- if (*((__int64 *)&curAssertion->op2.dconVal) == (__int64)I64(0x8000000000000000))
- printf("-0.00000");
- else
- printf("%#lg", curAssertion->op2.dconVal);
- break;
-
- case O2K_SUBRANGE:
- printf("[%d..%d]", curAssertion->op2.u2.loBound, curAssertion->op2.u2.hiBound);
- break;
-
- default:
- printf("?op2.kind?");
- break;
+ break;
+
+ case O2K_SUBRANGE:
+ printf("[%d..%d]", curAssertion->op2.u2.loBound, curAssertion->op2.u2.hiBound);
+ break;
+
+ default:
+ printf("?op2.kind?");
+ break;
}
}
if (assertionIndex > 0)
{
printf(" index=#%02u, mask=", assertionIndex);
-
- // This is an hack to reuse a known empty set in order to display
+
+ // This is an hack to reuse a known empty set in order to display
// a single bit mask.
BitVecOps::AddElemD(apTraits, apEmpty, assertionIndex - 1);
printf("%s", BitVecOps::ToString(apTraits, apEmpty));
@@ -768,7 +809,7 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex ass
* is NO_ASSERTION_INDEX and "optAssertionCount" is the last valid index.
*
*/
-Compiler::AssertionDsc * Compiler::optGetAssertion(AssertionIndex assertIndex)
+Compiler::AssertionDsc* Compiler::optGetAssertion(AssertionIndex assertIndex)
{
assert(NO_ASSERTION_INDEX == 0);
noway_assert(assertIndex != NO_ASSERTION_INDEX);
@@ -803,24 +844,25 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
* unsuccessful assertion->assertionKind will be OAK_INVALID. If we are
* successful in creating the assertion we call optAddAssertion which adds
* the assertion to our assertion table.
- *
+ *
* If we are able to create the assertion the return value is the
* assertionIndex for this assertion otherwise the return value is
* NO_ASSERTION_INDEX and we could not create the assertion.
*
*/
-Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr op2,
- optAssertionKind assertionKind,
- AssertionDsc* assertion)
+Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1,
+ GenTreePtr op2,
+ optAssertionKind assertionKind,
+ AssertionDsc* assertion)
{
memset(assertion, 0, sizeof(AssertionDsc));
//
// If we cannot create an assertion using op1 and op2 then the assertionKind
// must be OAK_INVALID, so we initialize it to OAK_INVALID and only change it
// to a valid assertion when everything is good.
- //
+ //
assertion->assertionKind = OAK_INVALID;
- bool haveArgs = false;
+ bool haveArgs = false;
var_types toType;
if (op1->gtOper == GT_ARR_BOUNDS_CHECK)
@@ -828,10 +870,10 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
if (assertionKind == OAK_NO_THROW)
{
GenTreeBoundsChk* arrBndsChk = op1->AsBoundsChk();
- assertion->assertionKind = assertionKind;
- assertion->op1.kind = O1K_ARR_BND;
- assertion->op1.bnd.vnIdx = arrBndsChk->gtIndex->gtVNPair.GetConservative();
- assertion->op1.bnd.vnLen = arrBndsChk->gtArrLen->gtVNPair.GetConservative();
+ assertion->assertionKind = assertionKind;
+ assertion->op1.kind = O1K_ARR_BND;
+ assertion->op1.bnd.vnIdx = arrBndsChk->gtIndex->gtVNPair.GetConservative();
+ assertion->op1.bnd.vnLen = arrBndsChk->gtArrLen->gtVNPair.GetConservative();
goto DONE_ASSERTION;
}
}
@@ -843,27 +885,27 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
{
if (op2->gtOper != GT_LIST)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- op1 = op1->gtOp.gtOp1;
- op2 = op2->gtOp.gtOp1;
+ op1 = op1->gtOp.gtOp1;
+ op2 = op2->gtOp.gtOp1;
haveArgs = true;
}
//
// Are we trying to make a non-null assertion?
- //
+ //
if (op2 == nullptr)
{
assert(haveArgs == false);
//
// Must an OAK_NOT_EQUAL assertion
- //
+ //
noway_assert(assertionKind == OAK_NOT_EQUAL);
//
// Set op1 to the instance pointer of the indirection
- //
+ //
ssize_t offset = 0;
while ((op1->gtOper == GT_ADD) && (op1->gtType == TYP_BYREF))
@@ -886,32 +928,32 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
if (fgIsBigOffset(offset) || op1->gtOper != GT_LCL_VAR)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum; noway_assert(lclNum < lvaCount);
- LclVarDsc * lclVar = &lvaTable[lclNum];
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
+ LclVarDsc* lclVar = &lvaTable[lclNum];
ValueNum vn;
//
- // We only perform null-checks on GC refs
+ // We only perform null-checks on GC refs
// so only make non-null assertions about GC refs
- //
+ //
if (lclVar->TypeGet() != TYP_REF)
{
if (optLocalAssertionProp || (lclVar->TypeGet() != TYP_BYREF))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
-
+
vn = op1->gtVNPair.GetConservative();
VNFuncApp funcAttr;
// Try to get value number corresponding to the GC ref of the indirection
- while(vnStore->GetVNFunc(vn, &funcAttr) &&
- (funcAttr.m_func == (VNFunc)GT_ADD) &&
- (vnStore->TypeOfVN(vn) == TYP_BYREF))
+ while (vnStore->GetVNFunc(vn, &funcAttr) && (funcAttr.m_func == (VNFunc)GT_ADD) &&
+ (vnStore->TypeOfVN(vn) == TYP_BYREF))
{
if (vnStore->IsVNConstant(funcAttr.m_args[1]))
{
@@ -931,26 +973,26 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
if (fgIsBigOffset(offset) || (vnStore->TypeOfVN(vn) != TYP_REF))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
assertion->op1.kind = O1K_VALUE_NUMBER;
}
else
{
- // If the local variable has its address exposed then bail
+ // If the local variable has its address exposed then bail
if (lclVar->lvAddrExposed)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- assertion->op1.kind = O1K_LCLVAR;
+ assertion->op1.kind = O1K_LCLVAR;
assertion->op1.lcl.lclNum = lclNum;
assertion->op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum();
- vn = op1->gtVNPair.GetConservative();
+ vn = op1->gtVNPair.GetConservative();
}
- assertion->op1.vn = vn;
+ assertion->op1.vn = vn;
assertion->assertionKind = assertionKind;
assertion->op2.kind = O2K_CONST_INT;
assertion->op2.vn = ValueNumStore::VNForNull();
@@ -958,35 +1000,36 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
assertion->op2.u1.iconFlags = 0;
#ifdef _TARGET_64BIT_
assertion->op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG
-#endif // _TARGET_64BIT_
+#endif // _TARGET_64BIT_
}
//
// Are we making an assertion about a local variable?
- //
+ //
else if (op1->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum; noway_assert(lclNum < lvaCount);
- LclVarDsc * lclVar = &lvaTable[lclNum];
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
+ LclVarDsc* lclVar = &lvaTable[lclNum];
- // If the local variable has its address exposed then bail
+ // If the local variable has its address exposed then bail
if (lclVar->lvAddrExposed)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
if (haveArgs)
{
//
// Must either be an OAK_EQUAL or an OAK_NOT_EQUAL assertion
- //
+ //
if ((assertionKind != OAK_EQUAL) && (assertionKind != OAK_NOT_EQUAL))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
if (op2->gtOper == GT_IND)
{
- op2 = op2->gtOp.gtOp1;
+ op2 = op2->gtOp.gtOp1;
assertion->op2.kind = O2K_IND_CNS_INT;
}
else
@@ -996,14 +1039,14 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
if (op2->gtOper != GT_CNS_INT)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
//
// TODO-CQ: Check for Sealed class and change kind to O1K_EXACT_TYPE
// And consider the special cases, like CORINFO_FLG_SHAREDINST or CORINFO_FLG_VARIANCE
- // where a class can be sealed, but they don't behave as exact types because casts to
- // non-base types sometimes still succeed.
+ // where a class can be sealed, but they don't behave as exact types because casts to
+ // non-base types sometimes still succeed.
//
assertion->op1.kind = O1K_SUBTYPE;
assertion->op1.lcl.lclNum = lclNum;
@@ -1015,7 +1058,7 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
//
// Ok everything has been set and the assertion looks good
- //
+ //
assertion->assertionKind = assertionKind;
}
else // !haveArgs
@@ -1026,56 +1069,56 @@ Compiler::AssertionIndex Compiler::optCreateAssertion(GenTreePtr op1, GenTreePtr
op2 = op2->gtOp.gtOp2;
}
- assertion->op1.kind = O1K_LCLVAR;
+ assertion->op1.kind = O1K_LCLVAR;
assertion->op1.lcl.lclNum = lclNum;
- assertion->op1.vn = op1->gtVNPair.GetConservative();
+ assertion->op1.vn = op1->gtVNPair.GetConservative();
assertion->op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum();
switch (op2->gtOper)
{
- optOp2Kind op2Kind;
- //
- // No Assertion
- //
- default:
- goto DONE_ASSERTION; // Don't make an assertion
+ optOp2Kind op2Kind;
+ //
+ // No Assertion
+ //
+ default:
+ goto DONE_ASSERTION; // Don't make an assertion
- //
- // Constant Assertions
- //
- case GT_CNS_INT:
- op2Kind = O2K_CONST_INT;
- goto CNS_COMMON;
+ //
+ // Constant Assertions
+ //
+ case GT_CNS_INT:
+ op2Kind = O2K_CONST_INT;
+ goto CNS_COMMON;
- case GT_CNS_LNG:
- op2Kind = O2K_CONST_LONG;
- goto CNS_COMMON;
+ case GT_CNS_LNG:
+ op2Kind = O2K_CONST_LONG;
+ goto CNS_COMMON;
- case GT_CNS_DBL:
- op2Kind = O2K_CONST_DOUBLE;
- goto CNS_COMMON;
+ case GT_CNS_DBL:
+ op2Kind = O2K_CONST_DOUBLE;
+ goto CNS_COMMON;
-CNS_COMMON:
+ CNS_COMMON:
{
//
// Must either be an OAK_EQUAL or an OAK_NOT_EQUAL assertion
- //
+ //
if ((assertionKind != OAK_EQUAL) && (assertionKind != OAK_NOT_EQUAL))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- // If the LclVar is a TYP_LONG then we only make
+ // If the LclVar is a TYP_LONG then we only make
// assertions where op2 is also TYP_LONG
- //
+ //
if ((lclVar->TypeGet() == TYP_LONG) && (op2->TypeGet() != TYP_LONG))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- assertion->op2.kind = op2Kind;
+ assertion->op2.kind = op2Kind;
assertion->op2.lconVal = 0;
- assertion->op2.vn = op2->gtVNPair.GetConservative();
+ assertion->op2.vn = op2->gtVNPair.GetConservative();
if (op2->gtOper == GT_CNS_INT)
{
@@ -1083,7 +1126,7 @@ CNS_COMMON:
// Do not Constant-Prop large constants for ARM
if (!codeGen->validImmForMov(op2->gtIntCon.gtIconVal))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
#endif // _TARGET_ARM_
assertion->op2.u1.iconVal = op2->gtIntCon.gtIconVal;
@@ -1103,224 +1146,232 @@ CNS_COMMON:
{
noway_assert(op2->gtOper == GT_CNS_DBL);
/* If we have an NaN value then don't record it */
- if (_isnan(op2->gtDblCon.gtDconVal))
+ if (_isnan(op2->gtDblCon.gtDconVal))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
assertion->op2.dconVal = op2->gtDblCon.gtDconVal;
}
//
// Ok everything has been set and the assertion looks good
- //
+ //
assertion->assertionKind = assertionKind;
}
break;
- //
- // Copy Assertions
- //
- case GT_LCL_VAR:
+ //
+ // Copy Assertions
+ //
+ case GT_LCL_VAR:
{
//
// Must either be an OAK_EQUAL or an OAK_NOT_EQUAL assertion
- //
+ //
if ((assertionKind != OAK_EQUAL) && (assertionKind != OAK_NOT_EQUAL))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- unsigned lclNum2 = op2->gtLclVarCommon.gtLclNum; noway_assert(lclNum2 < lvaCount);
- LclVarDsc * lclVar2 = &lvaTable[lclNum2];
+ unsigned lclNum2 = op2->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum2 < lvaCount);
+ LclVarDsc* lclVar2 = &lvaTable[lclNum2];
// If the two locals are the same then bail
if (lclNum == lclNum2)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
-
+
// If the types are different then bail */
if (lclVar->lvType != lclVar2->lvType)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- // If the local variable has its address exposed then bail
+ // If the local variable has its address exposed then bail
if (lclVar2->lvAddrExposed)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- assertion->op2.kind = O2K_LCLVAR_COPY;
+ assertion->op2.kind = O2K_LCLVAR_COPY;
assertion->op2.lcl.lclNum = lclNum2;
- assertion->op2.vn = op2->gtVNPair.GetConservative();
+ assertion->op2.vn = op2->gtVNPair.GetConservative();
assertion->op2.lcl.ssaNum = op2->AsLclVarCommon()->GetSsaNum();
-
+
//
// Ok everything has been set and the assertion looks good
- //
+ //
assertion->assertionKind = assertionKind;
}
break;
- // Subrange Assertions
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GT:
- case GT_GE:
+ // Subrange Assertions
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GT:
+ case GT_GE:
- /* Assigning the result of a RELOP, we can add a boolean subrange assertion */
+ /* Assigning the result of a RELOP, we can add a boolean subrange assertion */
- toType = TYP_BOOL;
- goto SUBRANGE_COMMON;
+ toType = TYP_BOOL;
+ goto SUBRANGE_COMMON;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
- /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
+ /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
- toType = op2->gtType;
- goto SUBRANGE_COMMON;
+ toType = op2->gtType;
+ goto SUBRANGE_COMMON;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
+ /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
- toType = op2->gtType;
- goto SUBRANGE_COMMON;
+ toType = op2->gtType;
+ goto SUBRANGE_COMMON;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
+ /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
- toType = op2->gtType;
- goto SUBRANGE_COMMON;
+ toType = op2->gtType;
+ goto SUBRANGE_COMMON;
- case GT_IND:
+ case GT_IND:
- /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
+ /* Assigning the result of an indirection into a LCL_VAR, see if we can add a subrange assertion */
- toType = op2->gtType;
- goto SUBRANGE_COMMON;
+ toType = op2->gtType;
+ goto SUBRANGE_COMMON;
- case GT_CAST:
+ case GT_CAST:
{
if (lvaTable[lclNum].lvIsStructField && lvaTable[lclNum].lvNormalizeOnLoad())
- {
+ {
// Keep the cast on small struct fields.
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
toType = op2->CastToType();
-SUBRANGE_COMMON:
+ SUBRANGE_COMMON:
if ((assertionKind != OAK_SUBRANGE) && (assertionKind != OAK_EQUAL))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
if (varTypeIsFloating(op1->TypeGet()))
{
- // We don't make assertions on a cast from floating point
+ // We don't make assertions on a cast from floating point
goto DONE_ASSERTION;
}
-
+
switch (toType)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_SHORT:
- case TYP_USHORT:
- case TYP_CHAR:
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_CHAR:
#ifdef _TARGET_64BIT_
- case TYP_UINT:
- case TYP_INT:
+ case TYP_UINT:
+ case TYP_INT:
#endif // _TARGET_64BIT_
- assertion->op2.u2.loBound = AssertionDsc::GetLowerBoundForIntegralType(toType);
- assertion->op2.u2.hiBound = AssertionDsc::GetUpperBoundForIntegralType(toType);
- break;
+ assertion->op2.u2.loBound = AssertionDsc::GetLowerBoundForIntegralType(toType);
+ assertion->op2.u2.hiBound = AssertionDsc::GetUpperBoundForIntegralType(toType);
+ break;
- default:
- goto DONE_ASSERTION; // Don't make an assertion
+ default:
+ goto DONE_ASSERTION; // Don't make an assertion
}
- assertion->op2.kind = O2K_SUBRANGE;
+ assertion->op2.kind = O2K_SUBRANGE;
assertion->assertionKind = OAK_SUBRANGE;
}
break;
}
} // else // !haveArgs
- } // if (op1->gtOper == GT_LCL_VAR)
+ } // if (op1->gtOper == GT_LCL_VAR)
//
- // Are we making an IsType assertion?
- //
+ // Are we making an IsType assertion?
+ //
else if (op1->gtOper == GT_IND)
{
op1 = op1->gtOp.gtOp1;
//
// Is this an indirection of a local variable?
- //
+ //
if (op1->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum; noway_assert(lclNum < lvaCount);
- LclVarDsc * lclVar = &lvaTable[lclNum];
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
+ LclVarDsc* lclVar = &lvaTable[lclNum];
- // If the local variable has its address exposed then bail
+ // If the local variable has its address exposed then bail
if (fgExcludeFromSsa(lclNum))
{
goto DONE_ASSERTION;
}
- // If we have an typeHnd indirection then op1 must be a TYP_REF
+ // If we have an typeHnd indirection then op1 must be a TYP_REF
// and the indirection must produce a TYP_I
- //
+ //
if (op1->gtType != TYP_REF)
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
-
- assertion->op1.kind = O1K_EXACT_TYPE;
+
+ assertion->op1.kind = O1K_EXACT_TYPE;
assertion->op1.lcl.lclNum = lclNum;
- assertion->op1.vn = op1->gtVNPair.GetConservative();
+ assertion->op1.vn = op1->gtVNPair.GetConservative();
assertion->op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum();
- assert(assertion->op1.lcl.ssaNum == SsaConfig::RESERVED_SSA_NUM || assertion->op1.vn == lvaTable[lclNum].GetPerSsaData(assertion->op1.lcl.ssaNum)->m_vnPair.GetConservative());
+ assert(assertion->op1.lcl.ssaNum == SsaConfig::RESERVED_SSA_NUM ||
+ assertion->op1.vn ==
+ lvaTable[lclNum].GetPerSsaData(assertion->op1.lcl.ssaNum)->m_vnPair.GetConservative());
- ssize_t cnsValue = 0;
+ ssize_t cnsValue = 0;
unsigned iconFlags = 0;
// Ngen case
if (op2->gtOper == GT_IND)
{
if (!optIsTreeKnownIntValue(!optLocalAssertionProp, op2->gtOp.gtOp1, &cnsValue, &iconFlags))
{
- goto DONE_ASSERTION; // Don't make an assertion
+ goto DONE_ASSERTION; // Don't make an assertion
}
- assertion->assertionKind = assertionKind;
- assertion->op2.kind = O2K_IND_CNS_INT;
+ assertion->assertionKind = assertionKind;
+ assertion->op2.kind = O2K_IND_CNS_INT;
assertion->op2.u1.iconVal = cnsValue;
- assertion->op2.vn = op2->gtOp.gtOp1->gtVNPair.GetConservative();
+ assertion->op2.vn = op2->gtOp.gtOp1->gtVNPair.GetConservative();
/* iconFlags should only contain bits in GTF_ICON_HDL_MASK */
assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0);
assertion->op2.u1.iconFlags = iconFlags;
#ifdef _TARGET_64BIT_
if (op2->gtOp.gtOp1->TypeGet() == TYP_LONG)
+ {
assertion->op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG
+ }
#endif // _TARGET_64BIT_
}
// JIT case
else if (optIsTreeKnownIntValue(!optLocalAssertionProp, op2, &cnsValue, &iconFlags))
{
- assertion->assertionKind = assertionKind;
- assertion->op2.kind = O2K_IND_CNS_INT;
+ assertion->assertionKind = assertionKind;
+ assertion->op2.kind = O2K_IND_CNS_INT;
assertion->op2.u1.iconVal = cnsValue;
- assertion->op2.vn = op2->gtVNPair.GetConservative();
+ assertion->op2.vn = op2->gtVNPair.GetConservative();
/* iconFlags should only contain bits in GTF_ICON_HDL_MASK */
assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0);
assertion->op2.u1.iconFlags = iconFlags;
#ifdef _TARGET_64BIT_
if (op2->TypeGet() == TYP_LONG)
+ {
assertion->op2.u1.iconFlags |= 1; // Signify that this is really TYP_LONG
+ }
#endif // _TARGET_64BIT_
}
else
@@ -1338,10 +1389,8 @@ DONE_ASSERTION:
if (!optLocalAssertionProp)
{
- if ((assertion->op1.vn == ValueNumStore::NoVN) ||
- (assertion->op2.vn == ValueNumStore::NoVN) ||
- (assertion->op1.vn == ValueNumStore::VNForVoid()) ||
- (assertion->op2.vn == ValueNumStore::VNForVoid()))
+ if ((assertion->op1.vn == ValueNumStore::NoVN) || (assertion->op2.vn == ValueNumStore::NoVN) ||
+ (assertion->op1.vn == ValueNumStore::VNForVoid()) || (assertion->op2.vn == ValueNumStore::VNForVoid()))
{
return NO_ASSERTION_INDEX;
}
@@ -1355,8 +1404,7 @@ DONE_ASSERTION:
// Now add the assertion to our assertion table
noway_assert(assertion->op1.kind != O1K_INVALID);
- noway_assert(assertion->op1.kind == O1K_ARR_BND ||
- assertion->op2.kind != O2K_INVALID);
+ noway_assert(assertion->op1.kind == O1K_ARR_BND || assertion->op2.kind != O2K_INVALID);
return optAddAssertion(assertion);
}
@@ -1375,7 +1423,7 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTreePtr tree, ssize_t* pC
if (tree->OperGet() == GT_CNS_INT)
{
*pConstant = tree->gtIntCon.IconValue();
- *pFlags = tree->GetIconHandleFlag();
+ *pFlags = tree->GetIconHandleFlag();
return true;
}
#ifdef _TARGET_64BIT_
@@ -1384,7 +1432,7 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTreePtr tree, ssize_t* pC
else if (tree->OperGet() == GT_CNS_LNG)
{
*pConstant = tree->gtLngCon.gtLconVal;
- *pFlags = tree->GetIconHandleFlag();
+ *pFlags = tree->GetIconHandleFlag();
return true;
}
#endif
@@ -1397,19 +1445,19 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTreePtr tree, ssize_t* pC
return false;
}
- ValueNum vn = tree->gtVNPair.GetConservative();
+ ValueNum vn = tree->gtVNPair.GetConservative();
var_types vnType = vnStore->TypeOfVN(vn);
if (vnType == TYP_INT)
{
*pConstant = vnStore->ConstantValue<int>(vn);
- *pFlags = vnStore->IsVNHandle(vn) ? vnStore->GetHandleFlags(vn) : 0;
+ *pFlags = vnStore->IsVNHandle(vn) ? vnStore->GetHandleFlags(vn) : 0;
return true;
}
#ifdef _TARGET_64BIT_
else if (vnType == TYP_LONG)
{
*pConstant = vnStore->ConstantValue<INT64>(vn);
- *pFlags = vnStore->IsVNHandle(vn) ? vnStore->GetHandleFlags(vn) : 0;
+ *pFlags = vnStore->IsVNHandle(vn) ? vnStore->GetHandleFlags(vn) : 0;
return true;
}
#endif
@@ -1418,7 +1466,7 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTreePtr tree, ssize_t* pC
#ifdef DEBUG
/*****************************************************************************
- *
+ *
* Print the assertions related to a VN for all VNs.
*
*/
@@ -1426,8 +1474,8 @@ void Compiler::optPrintVnAssertionMapping()
{
printf("\nVN Assertion Mapping\n");
printf("---------------------\n");
- for (ValueNumToAssertsMap::KeyIterator ki = optValueNumToAsserts->Begin();
- !ki.Equal(optValueNumToAsserts->End()); ++ki)
+ for (ValueNumToAssertsMap::KeyIterator ki = optValueNumToAsserts->Begin(); !ki.Equal(optValueNumToAsserts->End());
+ ++ki)
{
printf("(%d => ", ki.Get());
printf("%s)\n", BitVecOps::ToString(apTraits, ki.GetValue()));
@@ -1456,21 +1504,21 @@ void Compiler::optAddVnAssertionMapping(ValueNum vn, AssertionIndex index)
* Statically if we know that this assertion's VN involves a NaN don't bother
* wasting an assertion table slot.
*/
-bool Compiler::optAssertionVnInvolvesNan(AssertionDsc* assertion)
+bool Compiler::optAssertionVnInvolvesNan(AssertionDsc* assertion)
{
if (optLocalAssertionProp)
{
return false;
}
- static const int SZ = 2;
- ValueNum vns[SZ] = { assertion->op1.vn, assertion->op2.vn };
+ static const int SZ = 2;
+ ValueNum vns[SZ] = {assertion->op1.vn, assertion->op2.vn};
for (int i = 0; i < SZ; ++i)
{
if (vnStore->IsVNConstant(vns[i]))
{
var_types type = vnStore->TypeOfVN(vns[i]);
- if ((type == TYP_FLOAT && _isnan(vnStore->ConstantValue<float >(vns[i])) != 0) ||
+ if ((type == TYP_FLOAT && _isnan(vnStore->ConstantValue<float>(vns[i])) != 0) ||
(type == TYP_DOUBLE && _isnan(vnStore->ConstantValue<double>(vns[i])) != 0))
{
return true;
@@ -1482,14 +1530,14 @@ bool Compiler::optAssertionVnInvolvesNan(AssertionDsc* assertion)
/*****************************************************************************
*
- * Given an assertion add it to the assertion table
- *
- * If it is already in the assertion table return the assertionIndex that
+ * Given an assertion add it to the assertion table
+ *
+ * If it is already in the assertion table return the assertionIndex that
* we use to refer to this element.
- * Otherwise add it to the assertion table ad return the assertionIndex that
+ * Otherwise add it to the assertion table ad return the assertionIndex that
* we use to refer to this element.
* If we need to add to the table and the table is full return the value zero
- */
+ */
Compiler::AssertionIndex Compiler::optAddAssertion(AssertionDsc* newAssertion)
{
noway_assert(newAssertion->assertionKind != OAK_INVALID);
@@ -1521,11 +1569,11 @@ Compiler::AssertionIndex Compiler::optAddAssertion(AssertionDsc* newAssertion)
optAssertionTabPrivate[optAssertionCount] = *newAssertion;
optAssertionCount++;
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("GenTreeNode creates assertion:\n");
- gtDispTree(optAssertionPropCurrentTree, 0, nullptr, true);
+ gtDispTree(optAssertionPropCurrentTree, nullptr, nullptr, true);
printf(optLocalAssertionProp ? "In BB%02u New Local " : "In BB%02u New Global ", compCurBB->bbNum);
optPrintAssertion(newAssertion, optAssertionCount);
}
@@ -1543,7 +1591,7 @@ Compiler::AssertionIndex Compiler::optAddAssertion(AssertionDsc* newAssertion)
{
lclNum = newAssertion->op2.lcl.lclNum;
BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), optAssertionCount - 1);
- }
+ }
}
else
// If global assertion prop, then add it to the dependents map.
@@ -1569,57 +1617,57 @@ void Compiler::optDebugCheckAssertion(AssertionDsc* assertion)
assert(assertion->op2.kind < O2K_COUNT);
// It would be good to check that op1.vn and op2.vn are valid value numbers.
- switch(assertion->op1.kind)
+ switch (assertion->op1.kind)
{
- case O1K_LCLVAR:
- case O1K_EXACT_TYPE:
- case O1K_SUBTYPE:
- assert(assertion->op1.lcl.lclNum < lvaCount);
- assert(optLocalAssertionProp ||
- ((assertion->op1.lcl.ssaNum - SsaConfig::UNINIT_SSA_NUM) < lvaTable[assertion->op1.lcl.lclNum].lvNumSsaNames));
- break;
- case O1K_ARR_BND:
- // It would be good to check that bnd.vnIdx and bnd.vnLen are valid value numbers.
- break;
- case O1K_ARRLEN_OPER_BND:
- case O1K_ARRLEN_LOOP_BND:
- case O1K_CONSTANT_LOOP_BND:
- case O1K_VALUE_NUMBER:
- assert(!optLocalAssertionProp);
- break;
- default:
- break;
- }
- switch (assertion->op2.kind)
- {
- case O2K_IND_CNS_INT:
- case O2K_CONST_INT:
- {
- // The only flags that can be set are those in the GTF_ICON_HDL_MASK, or bit 0, which is
- // used to indicate a long constant.
- assert((assertion->op2.u1.iconFlags & ~(GTF_ICON_HDL_MASK|1)) == 0);
- switch (assertion->op1.kind)
- {
+ case O1K_LCLVAR:
case O1K_EXACT_TYPE:
case O1K_SUBTYPE:
- assert(assertion->op2.u1.iconFlags != 0);
+ assert(assertion->op1.lcl.lclNum < lvaCount);
+ assert(optLocalAssertionProp || ((assertion->op1.lcl.ssaNum - SsaConfig::UNINIT_SSA_NUM) <
+ lvaTable[assertion->op1.lcl.lclNum].lvNumSsaNames));
break;
- case O1K_LCLVAR:
case O1K_ARR_BND:
- assert((lvaTable[assertion->op1.lcl.lclNum].lvType != TYP_REF) || (assertion->op2.u1.iconVal == 0));
+ // It would be good to check that bnd.vnIdx and bnd.vnLen are valid value numbers.
break;
+ case O1K_ARRLEN_OPER_BND:
+ case O1K_ARRLEN_LOOP_BND:
+ case O1K_CONSTANT_LOOP_BND:
case O1K_VALUE_NUMBER:
- assert((vnStore->TypeOfVN(assertion->op1.vn) != TYP_REF) || (assertion->op2.u1.iconVal == 0));
+ assert(!optLocalAssertionProp);
break;
default:
break;
- }
}
- break;
-
- default:
- // for all other 'assertion->op2.kind' values we don't check anything
+ switch (assertion->op2.kind)
+ {
+ case O2K_IND_CNS_INT:
+ case O2K_CONST_INT:
+ {
+ // The only flags that can be set are those in the GTF_ICON_HDL_MASK, or bit 0, which is
+ // used to indicate a long constant.
+ assert((assertion->op2.u1.iconFlags & ~(GTF_ICON_HDL_MASK | 1)) == 0);
+ switch (assertion->op1.kind)
+ {
+ case O1K_EXACT_TYPE:
+ case O1K_SUBTYPE:
+ assert(assertion->op2.u1.iconFlags != 0);
+ break;
+ case O1K_LCLVAR:
+ case O1K_ARR_BND:
+ assert((lvaTable[assertion->op1.lcl.lclNum].lvType != TYP_REF) || (assertion->op2.u1.iconVal == 0));
+ break;
+ case O1K_VALUE_NUMBER:
+ assert((vnStore->TypeOfVN(assertion->op1.vn) != TYP_REF) || (assertion->op2.u1.iconVal == 0));
+ break;
+ default:
+ break;
+ }
+ }
break;
+
+ default:
+ // for all other 'assertion->op2.kind' values we don't check anything
+ break;
}
}
@@ -1633,7 +1681,7 @@ void Compiler::optDebugCheckAssertion(AssertionDsc* assertion)
void Compiler::optDebugCheckAssertions(AssertionIndex index)
{
AssertionIndex start = (index == NO_ASSERTION_INDEX) ? 1 : index;
- AssertionIndex end = (index == NO_ASSERTION_INDEX) ? optAssertionCount : index;
+ AssertionIndex end = (index == NO_ASSERTION_INDEX) ? optAssertionCount : index;
for (AssertionIndex ind = start; ind <= end; ++ind)
{
AssertionDsc* assertion = optGetAssertion(ind);
@@ -1658,11 +1706,10 @@ void Compiler::optCreateComplementaryAssertion(AssertionIndex assertionIndex, Ge
}
AssertionDsc& candidateAssertion = *optGetAssertion(assertionIndex);
- if (candidateAssertion.op1.kind == O1K_ARRLEN_OPER_BND ||
- candidateAssertion.op1.kind == O1K_ARRLEN_LOOP_BND ||
+ if (candidateAssertion.op1.kind == O1K_ARRLEN_OPER_BND || candidateAssertion.op1.kind == O1K_ARRLEN_LOOP_BND ||
candidateAssertion.op1.kind == O1K_CONSTANT_LOOP_BND)
{
- AssertionDsc dsc = candidateAssertion;
+ AssertionDsc dsc = candidateAssertion;
dsc.assertionKind = dsc.assertionKind == OAK_EQUAL ? OAK_NOT_EQUAL : OAK_EQUAL;
optAddAssertion(&dsc);
return;
@@ -1698,7 +1745,9 @@ void Compiler::optCreateComplementaryAssertion(AssertionIndex assertionIndex, Ge
* for the operands.
*/
-Compiler::AssertionIndex Compiler::optCreateJtrueAssertions(GenTreePtr op1, GenTreePtr op2, Compiler::optAssertionKind assertionKind)
+Compiler::AssertionIndex Compiler::optCreateJtrueAssertions(GenTreePtr op1,
+ GenTreePtr op2,
+ Compiler::optAssertionKind assertionKind)
{
AssertionDsc candidateAssertion;
AssertionIndex assertionIndex = optCreateAssertion(op1, op2, assertionKind, &candidateAssertion);
@@ -1725,17 +1774,17 @@ Compiler::AssertionIndex Compiler::optCreateJTrueBoundsAssertion(GenTreePtr tree
// Cases where op1 holds the condition with array arithmetic and op2 is 0.
// Loop condition like: "i < a.len +/-k == 0"
// Assertion: "i < a.len +/- k == 0"
- if (vnStore->IsVNArrLenArithBound(vn) &&
+ if (vnStore->IsVNArrLenArithBound(vn) &&
op2->gtVNPair.GetConservative() == vnStore->VNZeroForType(op2->TypeGet()) &&
(relop->gtOper == GT_EQ || relop->gtOper == GT_NE))
{
AssertionDsc dsc;
- dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
- dsc.op1.kind = O1K_ARRLEN_OPER_BND;
- dsc.op1.vn = vn;
- dsc.op2.kind = O2K_CONST_INT;
- dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
- dsc.op2.u1.iconVal = 0;
+ dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
+ dsc.op1.kind = O1K_ARRLEN_OPER_BND;
+ dsc.op1.vn = vn;
+ dsc.op2.kind = O2K_CONST_INT;
+ dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
+ dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = 0;
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
@@ -1744,17 +1793,17 @@ Compiler::AssertionIndex Compiler::optCreateJTrueBoundsAssertion(GenTreePtr tree
// Cases where op1 holds the condition array length and op2 is 0.
// Loop condition like: "i < a.len == 0"
// Assertion: "i < a.len == false"
- else if (vnStore->IsVNArrLenBound(vn) &&
- (op2->gtVNPair.GetConservative() == vnStore->VNZeroForType(op2->TypeGet())) &&
- (relop->gtOper == GT_EQ || relop->gtOper == GT_NE))
+ else if (vnStore->IsVNArrLenBound(vn) &&
+ (op2->gtVNPair.GetConservative() == vnStore->VNZeroForType(op2->TypeGet())) &&
+ (relop->gtOper == GT_EQ || relop->gtOper == GT_NE))
{
AssertionDsc dsc;
- dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
- dsc.op1.kind = O1K_ARRLEN_LOOP_BND;
- dsc.op1.vn = vn;
- dsc.op2.kind = O2K_CONST_INT;
- dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
- dsc.op2.u1.iconVal = 0;
+ dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
+ dsc.op1.kind = O1K_ARRLEN_LOOP_BND;
+ dsc.op1.vn = vn;
+ dsc.op2.kind = O2K_CONST_INT;
+ dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
+ dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = 0;
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
@@ -1766,12 +1815,12 @@ Compiler::AssertionIndex Compiler::optCreateJTrueBoundsAssertion(GenTreePtr tree
else if (vnStore->IsVNArrLenBound(relop->gtVNPair.GetConservative()))
{
AssertionDsc dsc;
- dsc.assertionKind = OAK_NOT_EQUAL;
- dsc.op1.kind = O1K_ARRLEN_LOOP_BND;
- dsc.op1.vn = relop->gtVNPair.GetConservative();
- dsc.op2.kind = O2K_CONST_INT;
- dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
- dsc.op2.u1.iconVal = 0;
+ dsc.assertionKind = OAK_NOT_EQUAL;
+ dsc.op1.kind = O1K_ARRLEN_LOOP_BND;
+ dsc.op1.vn = relop->gtVNPair.GetConservative();
+ dsc.op2.kind = O2K_CONST_INT;
+ dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
+ dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = 0;
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
@@ -1781,16 +1830,16 @@ Compiler::AssertionIndex Compiler::optCreateJTrueBoundsAssertion(GenTreePtr tree
// Loop condition like: "i < 100 == 0"
// Assertion: "i < 100 == false"
else if (vnStore->IsVNConstantBound(vn) &&
- (op2->gtVNPair.GetConservative() == vnStore->VNZeroForType(op2->TypeGet())) &&
- (relop->gtOper == GT_EQ || relop->gtOper == GT_NE))
+ (op2->gtVNPair.GetConservative() == vnStore->VNZeroForType(op2->TypeGet())) &&
+ (relop->gtOper == GT_EQ || relop->gtOper == GT_NE))
{
AssertionDsc dsc;
- dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
- dsc.op1.kind = O1K_CONSTANT_LOOP_BND;
- dsc.op1.vn = vn;
- dsc.op2.kind = O2K_CONST_INT;
- dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
- dsc.op2.u1.iconVal = 0;
+ dsc.assertionKind = relop->gtOper == GT_EQ ? OAK_EQUAL : OAK_NOT_EQUAL;
+ dsc.op1.kind = O1K_CONSTANT_LOOP_BND;
+ dsc.op1.vn = vn;
+ dsc.op2.kind = O2K_CONST_INT;
+ dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
+ dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = 0;
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
@@ -1802,12 +1851,12 @@ Compiler::AssertionIndex Compiler::optCreateJTrueBoundsAssertion(GenTreePtr tree
else if (vnStore->IsVNConstantBound(relop->gtVNPair.GetConservative()))
{
AssertionDsc dsc;
- dsc.assertionKind = OAK_NOT_EQUAL;
- dsc.op1.kind = O1K_CONSTANT_LOOP_BND;
- dsc.op1.vn = relop->gtVNPair.GetConservative();
- dsc.op2.kind = O2K_CONST_INT;
- dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
- dsc.op2.u1.iconVal = 0;
+ dsc.assertionKind = OAK_NOT_EQUAL;
+ dsc.op1.kind = O1K_CONSTANT_LOOP_BND;
+ dsc.op1.vn = relop->gtVNPair.GetConservative();
+ dsc.op2.kind = O2K_CONST_INT;
+ dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
+ dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = 0;
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
@@ -1849,16 +1898,16 @@ Compiler::AssertionIndex Compiler::optAssertionGenJtrue(GenTreePtr tree)
// Find assertion kind.
switch (relop->gtOper)
{
- case GT_EQ:
- assertionKind = OAK_EQUAL;
- break;
- case GT_NE:
- assertionKind = OAK_NOT_EQUAL;
- break;
- default:
- // TODO-CQ: add other relop operands. Disabled for now to measure perf
- // and not occupy assertion table slots. We'll add them when used.
- return NO_ASSERTION_INDEX;
+ case GT_EQ:
+ assertionKind = OAK_EQUAL;
+ break;
+ case GT_NE:
+ assertionKind = OAK_NOT_EQUAL;
+ break;
+ default:
+ // TODO-CQ: add other relop operands. Disabled for now to measure perf
+ // and not occupy assertion table slots. We'll add them when used.
+ return NO_ASSERTION_INDEX;
}
// Check for op1 or op2 to be lcl var and if so, keep it in op1.
@@ -1867,8 +1916,8 @@ Compiler::AssertionIndex Compiler::optAssertionGenJtrue(GenTreePtr tree)
jitstd::swap(op1, op2);
}
// If op1 is lcl and op2 is const or lcl, create assertion.
- if ((op1->gtOper == GT_LCL_VAR) &&
- ((op2->OperKind() & GTK_CONST) || (op2->gtOper == GT_LCL_VAR))) // Fix for Dev10 851483
+ if ((op1->gtOper == GT_LCL_VAR) &&
+ ((op2->OperKind() & GTK_CONST) || (op2->gtOper == GT_LCL_VAR))) // Fix for Dev10 851483
{
return optCreateJtrueAssertions(op1, op2, assertionKind);
}
@@ -1903,7 +1952,7 @@ Compiler::AssertionIndex Compiler::optAssertionGenJtrue(GenTreePtr tree)
{
return NO_ASSERTION_INDEX;
}
-
+
op2 = op1->gtCall.gtCallLateArgs->gtOp.gtOp2;
op1 = op1->gtCall.gtCallLateArgs;
@@ -1936,9 +1985,7 @@ Compiler::AssertionIndex Compiler::optAssertionGenPhiDefn(GenTreePtr tree)
// Try to find if all phi arguments are known to be non-null.
bool isNonNull = true;
- for (GenTreeArgList* args = phi->gtOp.gtOp1->AsArgList();
- args != nullptr;
- args = args->Rest())
+ for (GenTreeArgList* args = phi->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
if (!vnStore->IsKnownNonNull(args->Current()->gtVNPair.GetConservative()))
{
@@ -1959,9 +2006,9 @@ Compiler::AssertionIndex Compiler::optAssertionGenPhiDefn(GenTreePtr tree)
*
* If this statement creates a value assignment or assertion
* then assign an index to the given value assignment by adding
- * it to the lookup table, if necessary.
+ * it to the lookup table, if necessary.
*/
-void Compiler::optAssertionGen(GenTreePtr tree)
+void Compiler::optAssertionGen(GenTreePtr tree)
{
tree->ClearAssertion();
@@ -1974,84 +2021,84 @@ void Compiler::optAssertionGen(GenTreePtr tree)
optAssertionPropCurrentTree = tree;
#endif
- // For most of the assertions that we create below
+ // For most of the assertions that we create below
// the assertion is true after the tree is processed
- bool assertionProven = true;
- AssertionIndex assertionIndex = NO_ASSERTION_INDEX;
+ bool assertionProven = true;
+ AssertionIndex assertionIndex = NO_ASSERTION_INDEX;
switch (tree->gtOper)
{
- case GT_ASG:
- // VN takes care of non local assertions for assignments and data flow.
- if (optLocalAssertionProp)
- {
- assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, tree->gtOp.gtOp2, OAK_EQUAL);
- }
- else
- {
- assertionIndex = optAssertionGenPhiDefn(tree);
- }
- break;
+ case GT_ASG:
+ // VN takes care of non local assertions for assignments and data flow.
+ if (optLocalAssertionProp)
+ {
+ assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, tree->gtOp.gtOp2, OAK_EQUAL);
+ }
+ else
+ {
+ assertionIndex = optAssertionGenPhiDefn(tree);
+ }
+ break;
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
// An indirection can create a non-null assertion
- case GT_ARR_LENGTH:
- // An array length can create a non-null assertion
- assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, nullptr, OAK_NOT_EQUAL);
- break;
+ case GT_ARR_LENGTH:
+ // An array length can create a non-null assertion
+ assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, nullptr, OAK_NOT_EQUAL);
+ break;
- case GT_ARR_BOUNDS_CHECK:
- if (!optLocalAssertionProp)
- {
- assertionIndex = optCreateAssertion(tree, nullptr, OAK_NO_THROW);
- }
- break;
+ case GT_ARR_BOUNDS_CHECK:
+ if (!optLocalAssertionProp)
+ {
+ assertionIndex = optCreateAssertion(tree, nullptr, OAK_NO_THROW);
+ }
+ break;
- case GT_ARR_ELEM:
- // An array element reference can create a non-null assertion
- assertionIndex = optCreateAssertion(tree->gtArrElem.gtArrObj, nullptr, OAK_NOT_EQUAL);
- break;
+ case GT_ARR_ELEM:
+ // An array element reference can create a non-null assertion
+ assertionIndex = optCreateAssertion(tree->gtArrElem.gtArrObj, nullptr, OAK_NOT_EQUAL);
+ break;
- case GT_CALL:
- // A virtual call can create a non-null assertion. We transform some virtual calls into non-virtual calls
- // with a GTF_CALL_NULLCHECK flag set.
- if ((tree->gtFlags & GTF_CALL_NULLCHECK) ||
- ((tree->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
- {
- // Retrieve the 'this' arg
- GenTreePtr thisArg = gtGetThisArg(tree);
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
- if (thisArg == nullptr)
+ case GT_CALL:
+ // A virtual call can create a non-null assertion. We transform some virtual calls into non-virtual calls
+ // with a GTF_CALL_NULLCHECK flag set.
+ if ((tree->gtFlags & GTF_CALL_NULLCHECK) || ((tree->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
{
- // For tail calls we lose the this pointer in the argument list but that's OK because a null check
- // was made explicit, so we get the assertion when we walk the GT_IND in the argument list.
- noway_assert(tree->gtCall.IsTailCall());
- break;
- }
+ // Retrieve the 'this' arg
+ GenTreePtr thisArg = gtGetThisArg(tree);
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ if (thisArg == nullptr)
+ {
+ // For tail calls we lose the this pointer in the argument list but that's OK because a null check
+ // was made explicit, so we get the assertion when we walk the GT_IND in the argument list.
+ noway_assert(tree->gtCall.IsTailCall());
+ break;
+ }
#endif // _TARGET_X86_ || _TARGET_AMD64_ || _TARGET_ARM_
- noway_assert(thisArg != nullptr);
- assertionIndex = optCreateAssertion(thisArg, nullptr, OAK_NOT_EQUAL);
- }
- break;
+ noway_assert(thisArg != nullptr);
+ assertionIndex = optCreateAssertion(thisArg, nullptr, OAK_NOT_EQUAL);
+ }
+ break;
- case GT_CAST:
- // We only create this assertion for global assertion prop
- if (!optLocalAssertionProp)
- {
- // This represets an assertion that we would like to prove to be true. It is not actually a true assertion.
- // If we can prove this assertion true then we can eliminate this cast.
- assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, tree, OAK_SUBRANGE);
- assertionProven = false;
- }
- break;
+ case GT_CAST:
+ // We only create this assertion for global assertion prop
+ if (!optLocalAssertionProp)
+ {
+ // This represets an assertion that we would like to prove to be true. It is not actually a true
+ // assertion.
+ // If we can prove this assertion true then we can eliminate this cast.
+ assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, tree, OAK_SUBRANGE);
+ assertionProven = false;
+ }
+ break;
- case GT_JTRUE:
- assertionIndex = optAssertionGenJtrue(tree);
- break;
+ case GT_JTRUE:
+ assertionIndex = optAssertionGenJtrue(tree);
+ break;
- default:
- // All other gtOper node kinds, leave 'assertionIndex' = NO_ASSERTION_INDEX
- break;
+ default:
+ // All other gtOper node kinds, leave 'assertionIndex' = NO_ASSERTION_INDEX
+ break;
}
// For global assertion prop we must store the assertion number in the tree node
@@ -2073,14 +2120,14 @@ void Compiler::optMapComplementary(AssertionIndex assertionIndex, AssertionIndex
return;
}
optComplementaryAssertionMap[assertionIndex] = index;
- optComplementaryAssertionMap[index] = assertionIndex;
+ optComplementaryAssertionMap[index] = assertionIndex;
}
/*****************************************************************************
*
- * Given an assertion index, return the assertion index of the complementary
+ * Given an assertion index, return the assertion index of the complementary
* assertion or 0 if one does not exist.
- */
+ */
Compiler::AssertionIndex Compiler::optFindComplementary(AssertionIndex assertIndex)
{
if (assertIndex == NO_ASSERTION_INDEX)
@@ -2101,7 +2148,8 @@ Compiler::AssertionIndex Compiler::optFindComplementary(AssertionIndex assertInd
return index;
}
- optAssertionKind complementaryAssertionKind = (inputAssertion->assertionKind == OAK_EQUAL) ? OAK_NOT_EQUAL : OAK_EQUAL;
+ optAssertionKind complementaryAssertionKind =
+ (inputAssertion->assertionKind == OAK_EQUAL) ? OAK_NOT_EQUAL : OAK_EQUAL;
for (AssertionIndex index = 1; index <= optAssertionCount; ++index)
{
// Make sure assertion kinds are complementary and op1, op2 kinds match.
@@ -2123,7 +2171,9 @@ Compiler::AssertionIndex Compiler::optFindComplementary(AssertionIndex assertInd
* if one such assertion could not be found in "assertions."
*/
-Compiler::AssertionIndex Compiler::optAssertionIsSubrange(GenTreePtr tree, var_types toType, ASSERT_VALARG_TP assertions)
+Compiler::AssertionIndex Compiler::optAssertionIsSubrange(GenTreePtr tree,
+ var_types toType,
+ ASSERT_VALARG_TP assertions)
{
if (!optLocalAssertionProp && BitVecOps::IsEmpty(apTraits, assertions))
{
@@ -2133,9 +2183,10 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubrange(GenTreePtr tree, var_t
for (AssertionIndex index = 1; index <= optAssertionCount; index++)
{
AssertionDsc* curAssertion = optGetAssertion(index);
- if ((optLocalAssertionProp || BitVecOps::IsMember(apTraits, assertions, index - 1)) && // either local prop or use propagated assertions
- (curAssertion->assertionKind == OAK_SUBRANGE) &&
- (curAssertion->op1.kind == O1K_LCLVAR))
+ if ((optLocalAssertionProp ||
+ BitVecOps::IsMember(apTraits, assertions, index - 1)) && // either local prop or use propagated assertions
+ (curAssertion->assertionKind == OAK_SUBRANGE) &&
+ (curAssertion->op1.kind == O1K_LCLVAR))
{
// For local assertion prop use comparison on locals, and use comparison on vns for global prop.
bool isEqual = optLocalAssertionProp ? (curAssertion->op1.lcl.lclNum == tree->AsLclVarCommon()->GetLclNum())
@@ -2148,30 +2199,30 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubrange(GenTreePtr tree, var_t
// Make sure the toType is within current assertion's bounds.
switch (toType)
{
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_SHORT:
- case TYP_USHORT:
- case TYP_CHAR:
- if ((curAssertion->op2.u2.loBound < AssertionDsc::GetLowerBoundForIntegralType(toType)) ||
- (curAssertion->op2.u2.hiBound > AssertionDsc::GetUpperBoundForIntegralType(toType)))
- {
- continue;
- }
- break;
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_CHAR:
+ if ((curAssertion->op2.u2.loBound < AssertionDsc::GetLowerBoundForIntegralType(toType)) ||
+ (curAssertion->op2.u2.hiBound > AssertionDsc::GetUpperBoundForIntegralType(toType)))
+ {
+ continue;
+ }
+ break;
- case TYP_UINT:
- if (curAssertion->op2.u2.loBound < AssertionDsc::GetLowerBoundForIntegralType(toType))
- {
- continue;
- }
- break;
+ case TYP_UINT:
+ if (curAssertion->op2.u2.loBound < AssertionDsc::GetLowerBoundForIntegralType(toType))
+ {
+ continue;
+ }
+ break;
- case TYP_INT:
- break;
+ case TYP_INT:
+ break;
- default:
- continue;
+ default:
+ continue;
}
return index;
}
@@ -2188,7 +2239,9 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubrange(GenTreePtr tree, var_t
* could not be found, then it returns NO_ASSERTION_INDEX.
*
*/
-Compiler::AssertionIndex Compiler::optAssertionIsSubtype(GenTreePtr tree, GenTreePtr methodTableArg, ASSERT_VALARG_TP assertions)
+Compiler::AssertionIndex Compiler::optAssertionIsSubtype(GenTreePtr tree,
+ GenTreePtr methodTableArg,
+ ASSERT_VALARG_TP assertions)
{
if (!optLocalAssertionProp && BitVecOps::IsEmpty(apTraits, assertions))
{
@@ -2196,15 +2249,14 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubtype(GenTreePtr tree, GenTre
}
for (AssertionIndex index = 1; index <= optAssertionCount; index++)
{
- if (!optLocalAssertionProp && !BitVecOps::IsMember(apTraits, assertions, index - 1))
+ if (!optLocalAssertionProp && !BitVecOps::IsMember(apTraits, assertions, index - 1))
{
continue;
}
AssertionDsc* curAssertion = optGetAssertion(index);
if (curAssertion->assertionKind != OAK_EQUAL ||
- (curAssertion->op1.kind != O1K_SUBTYPE &&
- curAssertion->op1.kind != O1K_EXACT_TYPE))
+ (curAssertion->op1.kind != O1K_SUBTYPE && curAssertion->op1.kind != O1K_EXACT_TYPE))
{
continue;
}
@@ -2228,9 +2280,9 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubtype(GenTreePtr tree, GenTre
{
continue;
}
-
- ssize_t methodTableVal = 0;
- unsigned iconFlags = 0;
+
+ ssize_t methodTableVal = 0;
+ unsigned iconFlags = 0;
if (!optIsTreeKnownIntValue(!optLocalAssertionProp, methodTableArg, &methodTableVal, &iconFlags))
{
continue;
@@ -2259,10 +2311,10 @@ Compiler::AssertionIndex Compiler::optAssertionIsSubtype(GenTreePtr tree, GenTre
// Returns nullptr when no transformation is possible.
//
// Description:
-// Transforms a tree node if its result evaluates to a constant. The
+// Transforms a tree node if its result evaluates to a constant. The
// transformation can be a "ChangeOper" to a constant or a new constant node
// with extracted side-effects.
-//
+//
// Before replacing or substituting the "tree" with a constant, extracts any
// side effects from the "tree" and creates a comma separated side effect list
// and then appends the transformed node at the end of the list.
@@ -2300,11 +2352,11 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
return nullptr;
}
- GenTreePtr newTree = tree;
+ GenTreePtr newTree = tree;
GenTreePtr sideEffList = nullptr;
switch (vnStore->TypeOfVN(vnCns))
{
- case TYP_FLOAT:
+ case TYP_FLOAT:
{
float value = vnStore->ConstantValue<float>(vnCns);
@@ -2314,7 +2366,7 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
newTree = optPrepareTreeForReplacement(tree, tree);
tree->ChangeOperConst(GT_CNS_INT);
tree->gtIntCon.gtIconVal = *(reinterpret_cast<int*>(&value));
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
}
else
{
@@ -2324,12 +2376,12 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
newTree = optPrepareTreeForReplacement(tree, tree);
tree->ChangeOperConst(GT_CNS_DBL);
tree->gtDblCon.gtDconVal = value;
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
}
break;
}
- case TYP_DOUBLE:
+ case TYP_DOUBLE:
{
double value = vnStore->ConstantValue<double>(vnCns);
@@ -2349,12 +2401,12 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
newTree = optPrepareTreeForReplacement(tree, tree);
tree->ChangeOperConst(GT_CNS_DBL);
tree->gtDblCon.gtDconVal = value;
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
}
break;
}
- case TYP_LONG:
+ case TYP_LONG:
{
INT64 value = vnStore->ConstantValue<INT64>(vnCns);
#ifdef _TARGET_64BIT_
@@ -2366,9 +2418,9 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
if (!opts.compReloc)
#endif
{
- newTree = gtNewIconHandleNode(value, vnStore->GetHandleFlags(vnCns));
+ newTree = gtNewIconHandleNode(value, vnStore->GetHandleFlags(vnCns));
newTree->gtVNPair = ValueNumPair(vnLib, vnCns);
- newTree = optPrepareTreeForReplacement(tree, newTree);
+ newTree = optPrepareTreeForReplacement(tree, newTree);
}
}
else
@@ -2376,56 +2428,58 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
{
switch (tree->TypeGet())
{
- case TYP_INT:
- // Implicit assignment conversion to smaller integer
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_INT);
- tree->gtIntCon.gtIconVal = (int) value;
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- case TYP_LONG:
- // Same type no conversion required
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_NATIVELONG);
- tree->gtIntConCommon.SetLngValue(value);
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- case TYP_FLOAT:
- // No implicit conversions from long to float and value numbering will
- // not propagate through memory reinterpretations of different size.
- unreached();
- break;
-
- case TYP_DOUBLE:
- // Same sized reinterpretation of bits to double
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_DBL);
- tree->gtDblCon.gtDconVal = *(reinterpret_cast<double*>(&value));
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- default:
- return nullptr;
+ case TYP_INT:
+ // Implicit assignment conversion to smaller integer
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtIntCon.gtIconVal = (int)value;
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ case TYP_LONG:
+ // Same type no conversion required
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_NATIVELONG);
+ tree->gtIntConCommon.SetLngValue(value);
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ case TYP_FLOAT:
+ // No implicit conversions from long to float and value numbering will
+ // not propagate through memory reinterpretations of different size.
+ unreached();
+ break;
+
+ case TYP_DOUBLE:
+ // Same sized reinterpretation of bits to double
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_DBL);
+ tree->gtDblCon.gtDconVal = *(reinterpret_cast<double*>(&value));
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ default:
+ return nullptr;
}
}
}
break;
- case TYP_REF:
- if (tree->TypeGet() != TYP_REF)
- return nullptr;
+ case TYP_REF:
+ if (tree->TypeGet() != TYP_REF)
+ {
+ return nullptr;
+ }
- assert(vnStore->ConstantValue<size_t>(vnCns) == 0);
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_INT);
- tree->gtIntCon.gtIconVal = 0;
- tree->ClearIconHandleMask();
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
+ assert(vnStore->ConstantValue<size_t>(vnCns) == 0);
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtIntCon.gtIconVal = 0;
+ tree->ClearIconHandleMask();
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
- case TYP_INT:
+ case TYP_INT:
{
int value = vnStore->ConstantValue<int>(vnCns);
#ifndef _TARGET_64BIT_
@@ -2437,9 +2491,9 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
if (!opts.compReloc)
#endif
{
- newTree = gtNewIconHandleNode(value, vnStore->GetHandleFlags(vnCns));
+ newTree = gtNewIconHandleNode(value, vnStore->GetHandleFlags(vnCns));
newTree->gtVNPair = ValueNumPair(vnLib, vnCns);
- newTree = optPrepareTreeForReplacement(tree, newTree);
+ newTree = optPrepareTreeForReplacement(tree, newTree);
}
}
else
@@ -2447,47 +2501,47 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
{
switch (tree->TypeGet())
{
- case TYP_REF:
- case TYP_INT:
- // Same type no conversion required
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_INT);
- tree->gtIntCon.gtIconVal = value;
- tree->ClearIconHandleMask();
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- case TYP_LONG:
- // Implicit assignment conversion to larger integer
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_NATIVELONG);
- tree->gtIntConCommon.SetLngValue(value);
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- case TYP_FLOAT:
- // Same sized reinterpretation of bits to float
- newTree = optPrepareTreeForReplacement(tree, tree);
- tree->ChangeOperConst(GT_CNS_DBL);
- tree->gtDblCon.gtDconVal = *(reinterpret_cast<float*>(&value));
- tree->gtVNPair = ValueNumPair(vnLib, vnCns);
- break;
-
- case TYP_DOUBLE:
- // No implicit conversions from int to double and value numbering will
- // not propagate through memory reinterpretations of different size.
- unreached();
- break;
+ case TYP_REF:
+ case TYP_INT:
+ // Same type no conversion required
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtIntCon.gtIconVal = value;
+ tree->ClearIconHandleMask();
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ case TYP_LONG:
+ // Implicit assignment conversion to larger integer
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_NATIVELONG);
+ tree->gtIntConCommon.SetLngValue(value);
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ case TYP_FLOAT:
+ // Same sized reinterpretation of bits to float
+ newTree = optPrepareTreeForReplacement(tree, tree);
+ tree->ChangeOperConst(GT_CNS_DBL);
+ tree->gtDblCon.gtDconVal = *(reinterpret_cast<float*>(&value));
+ tree->gtVNPair = ValueNumPair(vnLib, vnCns);
+ break;
+
+ case TYP_DOUBLE:
+ // No implicit conversions from int to double and value numbering will
+ // not propagate through memory reinterpretations of different size.
+ unreached();
+ break;
- default:
- return nullptr;
+ default:
+ return nullptr;
}
}
}
break;
- default:
- return nullptr;
+ default:
+ return nullptr;
}
return newTree;
}
@@ -2497,7 +2551,9 @@ GenTreePtr Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTreePtr stmt,
* Perform constant propagation on a tree given the "curAssertion" is true at the point of the "tree."
*
*/
-GenTreePtr Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, GenTreePtr tree, GenTreePtr stmt DEBUGARG(AssertionIndex index))
+GenTreePtr Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
+ GenTreePtr tree,
+ GenTreePtr stmt DEBUGARG(AssertionIndex index))
{
unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
@@ -2512,90 +2568,91 @@ GenTreePtr Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, GenTre
// Typically newTree == tree and we are updating the node in place
switch (curAssertion->op2.kind)
{
- case O2K_CONST_DOUBLE:
- // There could be a positive zero and a negative zero, so don't propagate zeroes.
- if (curAssertion->op2.dconVal == 0.0)
- {
- return nullptr;
- }
- newTree->ChangeOperConst(GT_CNS_DBL);
- newTree->gtDblCon.gtDconVal = curAssertion->op2.dconVal;
- break;
-
- case O2K_CONST_LONG:
- if (newTree->gtType == TYP_LONG)
- {
- newTree->ChangeOperConst(GT_CNS_NATIVELONG);
- newTree->gtIntConCommon.SetLngValue(curAssertion->op2.lconVal);
- }
- else
- {
- newTree->ChangeOperConst(GT_CNS_INT);
- newTree->gtIntCon.gtIconVal = (int) curAssertion->op2.lconVal;
- newTree->gtType=TYP_INT;
- }
- break;
+ case O2K_CONST_DOUBLE:
+ // There could be a positive zero and a negative zero, so don't propagate zeroes.
+ if (curAssertion->op2.dconVal == 0.0)
+ {
+ return nullptr;
+ }
+ newTree->ChangeOperConst(GT_CNS_DBL);
+ newTree->gtDblCon.gtDconVal = curAssertion->op2.dconVal;
+ break;
- case O2K_CONST_INT:
- if (curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK)
- {
- // Here we have to allocate a new 'large' node to replace the old one
- newTree = gtNewIconHandleNode(curAssertion->op2.u1.iconVal,
- curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK);
- }
- else
- {
- bool isArrIndex = ((tree->gtFlags & GTF_VAR_ARR_INDEX) != 0);
- newTree->ChangeOperConst(GT_CNS_INT);
- newTree->gtIntCon.gtIconVal = curAssertion->op2.u1.iconVal;
- newTree->ClearIconHandleMask();
- // If we're doing an array index address, assume any constant propagated contributes to the index.
- if (isArrIndex)
+ case O2K_CONST_LONG:
+ if (newTree->gtType == TYP_LONG)
{
- newTree->gtIntCon.gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
+ newTree->ChangeOperConst(GT_CNS_NATIVELONG);
+ newTree->gtIntConCommon.SetLngValue(curAssertion->op2.lconVal);
}
- newTree->gtFlags &= ~GTF_VAR_ARR_INDEX;
- }
+ else
+ {
+ newTree->ChangeOperConst(GT_CNS_INT);
+ newTree->gtIntCon.gtIconVal = (int)curAssertion->op2.lconVal;
+ newTree->gtType = TYP_INT;
+ }
+ break;
- // Constant ints are of type TYP_INT, not any of the short forms.
- if (varTypeIsIntegral(newTree->TypeGet()))
- {
-#ifdef _TARGET_64BIT_
- var_types newType = (var_types)((curAssertion->op2.u1.iconFlags & 1) ? TYP_LONG : TYP_INT);
- if (newTree->TypeGet() != newType)
+ case O2K_CONST_INT:
+ if (curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK)
{
- noway_assert(newTree->gtType != TYP_REF);
- newTree->gtType = newType;
+ // Here we have to allocate a new 'large' node to replace the old one
+ newTree = gtNewIconHandleNode(curAssertion->op2.u1.iconVal,
+ curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK);
}
-#else
- if (newTree->TypeGet() != TYP_INT)
+ else
{
- noway_assert(newTree->gtType != TYP_REF && newTree->gtType != TYP_LONG);
- newTree->gtType = TYP_INT;
+ bool isArrIndex = ((tree->gtFlags & GTF_VAR_ARR_INDEX) != 0);
+ newTree->ChangeOperConst(GT_CNS_INT);
+ newTree->gtIntCon.gtIconVal = curAssertion->op2.u1.iconVal;
+ newTree->ClearIconHandleMask();
+ // If we're doing an array index address, assume any constant propagated contributes to the index.
+ if (isArrIndex)
+ {
+ newTree->gtIntCon.gtFieldSeq =
+ GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
+ }
+ newTree->gtFlags &= ~GTF_VAR_ARR_INDEX;
}
+
+ // Constant ints are of type TYP_INT, not any of the short forms.
+ if (varTypeIsIntegral(newTree->TypeGet()))
+ {
+#ifdef _TARGET_64BIT_
+ var_types newType = (var_types)((curAssertion->op2.u1.iconFlags & 1) ? TYP_LONG : TYP_INT);
+ if (newTree->TypeGet() != newType)
+ {
+ noway_assert(newTree->gtType != TYP_REF);
+ newTree->gtType = newType;
+ }
+#else
+ if (newTree->TypeGet() != TYP_INT)
+ {
+ noway_assert(newTree->gtType != TYP_REF && newTree->gtType != TYP_LONG);
+ newTree->gtType = TYP_INT;
+ }
#endif
- }
- break;
-
- default:
- return nullptr;
+ }
+ break;
+
+ default:
+ return nullptr;
}
if (!optLocalAssertionProp)
{
- assert(newTree->OperIsConst()); // We should have a simple Constant node for newTree
- assert(vnStore->IsVNConstant(curAssertion->op2.vn)); // The value number stored for op2 should be a valid
- // VN representing the constant
- newTree->gtVNPair.SetBoth(curAssertion->op2.vn); // Set the ValueNumPair to the constant VN from op2
- // of the assertion
+ assert(newTree->OperIsConst()); // We should have a simple Constant node for newTree
+ assert(vnStore->IsVNConstant(curAssertion->op2.vn)); // The value number stored for op2 should be a valid
+ // VN representing the constant
+ newTree->gtVNPair.SetBoth(curAssertion->op2.vn); // Set the ValueNumPair to the constant VN from op2
+ // of the assertion
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\nAssertion prop in BB%02u:\n", compCurBB->bbNum);
optPrintAssertion(curAssertion, index);
- gtDispTree(newTree, 0, nullptr, true);
+ gtDispTree(newTree, nullptr, nullptr, true);
}
#endif
if (lvaLocalVarRefCounted)
@@ -2614,19 +2671,19 @@ GenTreePtr Compiler::optConstantAssertionProp(AssertionDsc* curAssertion, GenTre
*/
bool Compiler::optAssertionProp_LclVarTypeCheck(GenTreePtr tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc)
{
- /*
- Small struct field locals are stored using the exact width and loaded widened
+ /*
+ Small struct field locals are stored using the exact width and loaded widened
(i.e. lvNormalizeOnStore==false lvNormalizeOnLoad==true),
because the field locals might end up embedded in the parent struct local with the exact width.
-
+
In other words, a store to a short field local should always done using an exact width store
-
+
[00254538] 0x0009 ------------ const int 0x1234
[002545B8] 0x000B -A--G--NR--- = short
[00254570] 0x000A D------N---- lclVar short V43 tmp40
- mov word ptr [L_043], 0x1234
-
+ mov word ptr [L_043], 0x1234
+
Now, if we copy prop, say a short field local V43, to another short local V34
for the following tree:
@@ -2635,21 +2692,21 @@ bool Compiler::optAssertionProp_LclVarTypeCheck(GenTreePtr tree, LclVarDsc* lclV
[04E196DC] 0x0001 D------N---- lclVar int V36 tmp33
We will end with this tree:
-
+
[04E18650] 0x0001 ------------ lclVar int V43 tmp40
[04E19714] 0x0002 -A-----NR--- = int
[04E196DC] 0x0001 D------N---- lclVar int V36 tmp33 EAX
- And eventually causing a fetch of 4-byte out from [L_043] :(
+ And eventually causing a fetch of 4-byte out from [L_043] :(
mov EAX, dword ptr [L_043]
- The following check is to make sure we only perform the copy prop
+ The following check is to make sure we only perform the copy prop
when we don't retrieve the wider value.
- */
+ */
if (copyVarDsc->lvIsStructField)
{
- var_types varType = (var_types) copyVarDsc->lvType;
+ var_types varType = (var_types)copyVarDsc->lvType;
// Make sure we don't retrieve the wider value.
return !varTypeIsSmall(varType) || (varType == tree->TypeGet());
}
@@ -2664,7 +2721,9 @@ bool Compiler::optAssertionProp_LclVarTypeCheck(GenTreePtr tree, LclVarDsc* lclV
* the "curAssertion."
*
*/
-GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr tree, GenTreePtr stmt DEBUGARG(AssertionIndex index))
+GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion,
+ GenTreePtr tree,
+ GenTreePtr stmt DEBUGARG(AssertionIndex index))
{
const AssertionDsc::AssertionDscOp1& op1 = curAssertion->op1;
const AssertionDsc::AssertionDscOp2& op2 = curAssertion->op2;
@@ -2687,7 +2746,7 @@ GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr
// Extract the ssaNum of the matching lclNum.
unsigned ssaNum = (op1.lcl.lclNum == lclNum) ? op1.lcl.ssaNum : op2.lcl.ssaNum;
copySsaNum = (op1.lcl.lclNum == lclNum) ? op2.lcl.ssaNum : op1.lcl.ssaNum;
-
+
if (ssaNum != tree->AsLclVarCommon()->GetSsaNum())
{
return nullptr;
@@ -2695,7 +2754,7 @@ GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr
}
LclVarDsc* copyVarDsc = &lvaTable[copyLclNum];
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
// Make sure the types are compatible.
if (!optAssertionProp_LclVarTypeCheck(tree, lclVarDsc, copyVarDsc))
@@ -2718,12 +2777,12 @@ GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr
}
tree->gtLclVarCommon.SetLclNum(copyLclNum);
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\nAssertion prop in BB%02u:\n", compCurBB->bbNum);
optPrintAssertion(curAssertion, index);
- gtDispTree(tree, 0, nullptr, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
@@ -2731,7 +2790,6 @@ GenTreePtr Compiler::optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr
return optAssertionProp_Update(tree, tree, stmt);
}
-
/*****************************************************************************
*
* Given a tree consisting of a just a LclVar and a set of available assertions
@@ -2750,18 +2808,21 @@ GenTreePtr Compiler::optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, const
{
return nullptr;
}
-
+
BitVecOps::Iter iter(apTraits, assertions);
- unsigned index = 0;
+ unsigned index = 0;
while (iter.NextElem(apTraits, &index))
{
index++;
- if (index > optAssertionCount) break;
+ if (index > optAssertionCount)
+ {
+ break;
+ }
// See if the variable is equal to a constant or another variable.
AssertionDsc* curAssertion = optGetAssertion((AssertionIndex)index);
if (curAssertion->assertionKind != OAK_EQUAL || curAssertion->op1.kind != O1K_LCLVAR)
{
- continue;
+ continue;
}
// Copy prop.
@@ -2785,7 +2846,7 @@ GenTreePtr Compiler::optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, const
}
// Constant prop (for local assertion prop.)
// The case where the tree type could be different than the LclVar type is caused by
- // gtFoldExpr, specifically the case of a cast, where the fold operation changes the type of the LclVar
+ // gtFoldExpr, specifically the case of a cast, where the fold operation changes the type of the LclVar
// node. In such a case is not safe to perform the substitution since later on the JIT will assert mismatching
// types between trees.
else if (curAssertion->op1.lcl.lclNum == tree->gtLclVarCommon.GetLclNum() &&
@@ -2812,15 +2873,14 @@ GenTreePtr Compiler::optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, const
return nullptr;
}
-
/*****************************************************************************
*
* Given a set of "assertions" to search, find an assertion that matches
* op1Kind and lclNum, op2Kind and the constant value and is either equal or
* not equal assertion.
*/
-Compiler::AssertionIndex Compiler::optLocalAssertionIsEqualOrNotEqual(optOp1Kind op1Kind, unsigned lclNum,
- optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions)
+Compiler::AssertionIndex Compiler::optLocalAssertionIsEqualOrNotEqual(
+ optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions)
{
noway_assert((op1Kind == O1K_LCLVAR) || (op1Kind == O1K_EXACT_TYPE) || (op1Kind == O1K_SUBTYPE));
noway_assert((op2Kind == O2K_CONST_INT) || (op2Kind == O2K_IND_CNS_INT));
@@ -2832,19 +2892,18 @@ Compiler::AssertionIndex Compiler::optLocalAssertionIsEqualOrNotEqual(optOp1Kind
for (AssertionIndex index = 1; index <= optAssertionCount; ++index)
{
AssertionDsc* curAssertion = optGetAssertion(index);
- if (optLocalAssertionProp || BitVecOps::IsMember(apTraits, assertions, index - 1))
+ if (optLocalAssertionProp || BitVecOps::IsMember(apTraits, assertions, index - 1))
{
if ((curAssertion->assertionKind != OAK_EQUAL) && (curAssertion->assertionKind != OAK_NOT_EQUAL))
{
continue;
}
- if ((curAssertion->op1.kind == op1Kind) &&
- (curAssertion->op1.lcl.lclNum == lclNum) &&
+ if ((curAssertion->op1.kind == op1Kind) && (curAssertion->op1.lcl.lclNum == lclNum) &&
(curAssertion->op2.kind == op2Kind))
{
bool constantIsEqual = (curAssertion->op2.u1.iconVal == cnsVal);
- bool assertionIsEqual = (curAssertion->assertionKind == OAK_EQUAL);
+ bool assertionIsEqual = (curAssertion->assertionKind == OAK_EQUAL);
if (constantIsEqual || assertionIsEqual)
{
@@ -2862,18 +2921,23 @@ Compiler::AssertionIndex Compiler::optLocalAssertionIsEqualOrNotEqual(optOp1Kind
* "op1" == "op2" or "op1" != "op2." Does a value number based comparison.
*
*/
-Compiler::AssertionIndex Compiler::optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTreePtr op1, GenTreePtr op2)
+Compiler::AssertionIndex Compiler::optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions,
+ GenTreePtr op1,
+ GenTreePtr op2)
{
if (BitVecOps::IsEmpty(apTraits, assertions))
{
return NO_ASSERTION_INDEX;
}
BitVecOps::Iter iter(apTraits, assertions);
- unsigned index = 0;
+ unsigned index = 0;
while (iter.NextElem(apTraits, &index))
{
index++;
- if (index > optAssertionCount) break;
+ if (index > optAssertionCount)
+ {
+ break;
+ }
AssertionDsc* curAssertion = optGetAssertion((AssertionIndex)index);
if ((curAssertion->assertionKind != OAK_EQUAL && curAssertion->assertionKind != OAK_NOT_EQUAL))
{
@@ -2889,12 +2953,11 @@ Compiler::AssertionIndex Compiler::optGlobalAssertionIsEqualOrNotEqual(ASSERT_VA
return NO_ASSERTION_INDEX;
}
-
/*****************************************************************************
*
* Given a tree consisting of a RelOp and a set of available assertions
* we try to propagate an assertion and modify the RelOp tree if we can.
- * We pass in the root of the tree via 'stmt', for local copy prop 'stmt' will be nullptr
+ * We pass in the root of the tree via 'stmt', for local copy prop 'stmt' will be nullptr
* Returns the modified tree, or nullptr if no assertion prop took place
*/
@@ -2928,13 +2991,15 @@ GenTreePtr Compiler::optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, const G
* perform Value numbering based relop assertion propagation on the tree.
*
*/
-GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, const GenTreePtr tree, const GenTreePtr stmt)
+GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions,
+ const GenTreePtr tree,
+ const GenTreePtr stmt)
{
assert(tree->OperGet() == GT_EQ || tree->OperGet() == GT_NE);
GenTreePtr newTree = tree;
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
if (op1->gtOper != GT_LCL_VAR)
{
@@ -2956,9 +3021,9 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
// If the assertion involves "op2" and it is a constant, then check if "op1" also has a constant value.
if (vnStore->IsVNConstant(op2->gtVNPair.GetConservative()))
{
- ValueNum vnCns = op2->gtVNPair.GetConservative();
+ ValueNum vnCns = op2->gtVNPair.GetConservative();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nVN relop based constant assertion prop in BB%02u:\n", compCurBB->bbNum);
printf("Assertion index=#%02u: ", index);
@@ -2990,7 +3055,7 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
{
printf("??unknown\n");
}
- gtDispTree(tree, 0, nullptr, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
// Decrement the ref counts, before we change the oper.
@@ -3034,25 +3099,24 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
noway_assert(vnStore->ConstantValue<size_t>(vnCns) == 0);
op1->gtIntCon.gtIconVal = 0;
}
- else
+ else
{
noway_assert(!"unknown type in Global_RelOp");
}
- op1->gtVNPair.SetBoth(vnCns); // Preserve the ValueNumPair, as ChangeOperConst/SetOper will clear it.
+ op1->gtVNPair.SetBoth(vnCns); // Preserve the ValueNumPair, as ChangeOperConst/SetOper will clear it.
}
// If the assertion involves "op2" and "op1" is also a local var, then just morph the tree.
else if (op2->gtOper == GT_LCL_VAR)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nVN relop based copy assertion prop in BB%02u:\n", compCurBB->bbNum);
- printf("Assertion index=#%02u: V%02d.%02d %s V%02d.%02d\n", index,
- op1->gtLclVar.gtLclNum, op1->gtLclVar.gtSsaNum,
- (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=",
+ printf("Assertion index=#%02u: V%02d.%02d %s V%02d.%02d\n", index, op1->gtLclVar.gtLclNum,
+ op1->gtLclVar.gtSsaNum, (curAssertion->assertionKind == OAK_EQUAL) ? "==" : "!=",
op2->gtLclVar.gtLclNum, op2->gtLclVar.gtSsaNum);
- gtDispTree(tree, 0, nullptr, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
lvaTable[op1->gtLclVar.gtLclNum].decRefCnts(compCurBB->getBBWeight(this), this);
@@ -3063,7 +3127,7 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
{
// Note we can't trust the OAK_EQUAL as the value could end up being a NaN
// violating the assertion. However, we create OAK_EQUAL assertions for floating
- // point only on JTrue nodes, so if the condition held earlier, it will hold
+ // point only on JTrue nodes, so if the condition held earlier, it will hold
// now. We don't create OAK_EQUAL assertion on floating point from GT_ASG
// because we depend on value num which would constant prop the NaN.
lvaTable[op2->gtLclVar.gtLclNum].decRefCnts(compCurBB->getBBWeight(this), this);
@@ -3097,7 +3161,7 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
#ifdef DEBUG
if (verbose)
{
- gtDispTree(newTree, 0, nullptr, true);
+ gtDispTree(newTree, nullptr, nullptr, true);
}
#endif
@@ -3110,7 +3174,9 @@ GenTreePtr Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, c
* perform local variable name based relop assertion propagation on the tree.
*
*/
-GenTreePtr Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, const GenTreePtr tree, const GenTreePtr stmt)
+GenTreePtr Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions,
+ const GenTreePtr tree,
+ const GenTreePtr stmt)
{
assert(tree->OperGet() == GT_EQ || tree->OperGet() == GT_NE);
@@ -3129,18 +3195,21 @@ GenTreePtr Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, co
return nullptr;
}
- optOp1Kind op1Kind = O1K_LCLVAR;
- optOp2Kind op2Kind = O2K_CONST_INT;
- ssize_t cnsVal = op2->gtIntCon.gtIconVal;
- var_types cmpType = op1->TypeGet();
+ optOp1Kind op1Kind = O1K_LCLVAR;
+ optOp2Kind op2Kind = O2K_CONST_INT;
+ ssize_t cnsVal = op2->gtIntCon.gtIconVal;
+ var_types cmpType = op1->TypeGet();
- // Don't try to fold/optimize Floating Compares; there are multiple zero values.
+ // Don't try to fold/optimize Floating Compares; there are multiple zero values.
if (varTypeIsFloating(cmpType))
+ {
return nullptr;
+ }
// Find an equal or not equal assertion about op1 var.
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum; noway_assert(lclNum < lvaCount);
- AssertionIndex index = optLocalAssertionIsEqualOrNotEqual(op1Kind, lclNum, op2Kind, cnsVal, assertions);
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
+ AssertionIndex index = optLocalAssertionIsEqualOrNotEqual(op1Kind, lclNum, op2Kind, cnsVal, assertions);
if (index == NO_ASSERTION_INDEX)
{
@@ -3149,8 +3218,8 @@ GenTreePtr Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, co
AssertionDsc* curAssertion = optGetAssertion(index);
- bool assertionKindIsEqual = (curAssertion->assertionKind == OAK_EQUAL);
- bool constantIsEqual = false;
+ bool assertionKindIsEqual = (curAssertion->assertionKind == OAK_EQUAL);
+ bool constantIsEqual = false;
if (genTypeSize(cmpType) == TARGET_POINTER_SIZE)
{
@@ -3171,22 +3240,23 @@ GenTreePtr Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, co
noway_assert(constantIsEqual || assertionKindIsEqual);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nAssertion prop for index #%02u in BB%02u:\n",
- index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
- }
+ printf("\nAssertion prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
+ }
#endif
// Return either CNS_INT 0 or CNS_INT 1.
bool foldResult = (constantIsEqual == assertionKindIsEqual);
if (tree->gtOper == GT_NE)
- foldResult = !foldResult;
+ {
+ foldResult = !foldResult;
+ }
op2->gtIntCon.gtIconVal = foldResult;
- op2->gtType = TYP_INT;
+ op2->gtType = TYP_INT;
return optAssertionProp_Update(op2, tree, stmt);
}
@@ -3234,7 +3304,7 @@ GenTreePtr Compiler::optAssertionProp_Cast(ASSERT_VALARG_TP assertions, const Ge
{
// For normalize on load variables it must be a narrowing cast to remove
if (genTypeSize(toType) > genTypeSize(varDsc->TypeGet()))
- {
+ {
// Can we just remove the GTF_OVERFLOW flag?
if ((tree->gtFlags & GTF_OVERFLOW) == 0)
{
@@ -3243,14 +3313,14 @@ GenTreePtr Compiler::optAssertionProp_Cast(ASSERT_VALARG_TP assertions, const Ge
else
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nSubrange prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ printf("\nSubrange prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
- tree->gtFlags &= ~GTF_OVERFLOW; // This cast cannot overflow
+ tree->gtFlags &= ~GTF_OVERFLOW; // This cast cannot overflow
return optAssertionProp_Update(tree, tree, stmt);
}
}
@@ -3261,8 +3331,8 @@ GenTreePtr Compiler::optAssertionProp_Cast(ASSERT_VALARG_TP assertions, const Ge
//
// Where the lclvar is known to be in the range of [0..MAX_UINT]
//
- // A load of a 32-bit unsigned int is the same as a load of a 32-bit signed int
- //
+ // A load of a 32-bit unsigned int is the same as a load of a 32-bit signed int
+ //
if (toType == TYP_UINT)
{
toType = TYP_INT;
@@ -3274,18 +3344,17 @@ GenTreePtr Compiler::optAssertionProp_Cast(ASSERT_VALARG_TP assertions, const Ge
while (tmp->gtOper == GT_COMMA)
{
tmp->gtType = toType;
- tmp = tmp->gtOp.gtOp2;
+ tmp = tmp->gtOp.gtOp2;
}
noway_assert(tmp == lcl);
tmp->gtType = toType;
}
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nSubrange prop for index #%02u in BB%02u:\n",
- index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ printf("\nSubrange prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
return optAssertionProp_Update(op1, tree, stmt);
@@ -3298,14 +3367,12 @@ GenTreePtr Compiler::optAssertionProp_Cast(ASSERT_VALARG_TP assertions, const Ge
* Given a tree with an array bounds check node, eliminate it because it was
* checked already in the program.
*/
-GenTreePtr Compiler::optAssertionProp_Comma(ASSERT_VALARG_TP assertions,
- const GenTreePtr tree,
- const GenTreePtr stmt)
+GenTreePtr Compiler::optAssertionProp_Comma(ASSERT_VALARG_TP assertions, const GenTreePtr tree, const GenTreePtr stmt)
{
// Remove the bounds check as part of the GT_COMMA node since we need parent pointer to remove nodes.
// When processing visits the bounds check, it sets the throw kind to None if the check is redundant.
- if ((tree->gtGetOp1()->OperGet() == GT_ARR_BOUNDS_CHECK)
- && ((tree->gtGetOp1()->gtFlags & GTF_ARR_BOUND_INBND) != 0))
+ if ((tree->gtGetOp1()->OperGet() == GT_ARR_BOUNDS_CHECK) &&
+ ((tree->gtGetOp1()->gtFlags & GTF_ARR_BOUND_INBND) != 0))
{
optRemoveRangeCheck(tree, stmt, true, GTF_ASG, true /* force remove */);
return optAssertionProp_Update(tree, tree, stmt);
@@ -3333,10 +3400,10 @@ GenTreePtr Compiler::optAssertionProp_Ind(ASSERT_VALARG_TP assertions, const Gen
}
// Check for add of a constant.
- GenTreePtr op1 = tree->gtOp.gtOp1;
- if ((op1->gtOper == GT_ADD) && (op1->gtOp.gtOp2->gtOper == GT_CNS_INT))
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ if ((op1->gtOper == GT_ADD) && (op1->gtOp.gtOp2->gtOper == GT_CNS_INT))
{
- op1 = op1->gtOp.gtOp1;
+ op1 = op1->gtOp.gtOp1;
}
if (op1->gtOper != GT_LCL_VAR)
@@ -3347,24 +3414,24 @@ GenTreePtr Compiler::optAssertionProp_Ind(ASSERT_VALARG_TP assertions, const Gen
unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
#ifdef DEBUG
- bool vnBased = false;
- AssertionIndex index = NO_ASSERTION_INDEX;
+ bool vnBased = false;
+ AssertionIndex index = NO_ASSERTION_INDEX;
#endif
if (optAssertionIsNonNull(op1, assertions DEBUGARG(&vnBased) DEBUGARG(&index)))
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
(vnBased) ? printf("\nVN based non-null prop in BB%02u:\n", compCurBB->bbNum)
: printf("\nNon-null prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
tree->gtFlags &= ~GTF_EXCEPT;
// Set this flag to prevent reordering
- tree->gtFlags |= GTF_ORDER_SIDEEFF;
-
+ tree->gtFlags |= GTF_ORDER_SIDEEFF;
+
return optAssertionProp_Update(tree, tree, stmt);
}
@@ -3381,7 +3448,9 @@ GenTreePtr Compiler::optAssertionProp_Ind(ASSERT_VALARG_TP assertions, const Gen
* Note: If both VN and assertion table yield a matching assertion, "pVnBased"
* is only set and the return value is "NO_ASSERTION_INDEX."
*/
-bool Compiler::optAssertionIsNonNull(GenTreePtr op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex))
+bool Compiler::optAssertionIsNonNull(GenTreePtr op,
+ ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)
+ DEBUGARG(AssertionIndex* pIndex))
{
bool vnBased = (!optLocalAssertionProp && vnStore->IsKnownNonNull(op->gtVNPair.GetConservative()));
#ifdef DEBUG
@@ -3422,11 +3491,14 @@ Compiler::AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTreePtr op,
// Check each assertion to find if we have a vn == or != null assertion.
BitVecOps::Iter iter(apTraits, assertions);
- unsigned index = 0;
+ unsigned index = 0;
while (iter.NextElem(apTraits, &index))
{
index++;
- if (index > optAssertionCount) break;
+ if (index > optAssertionCount)
+ {
+ break;
+ }
AssertionDsc* curAssertion = optGetAssertion((AssertionIndex)index);
if (curAssertion->assertionKind != OAK_NOT_EQUAL)
{
@@ -3446,11 +3518,10 @@ Compiler::AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTreePtr op,
for (AssertionIndex index = 1; index <= optAssertionCount; index++)
{
AssertionDsc* curAssertion = optGetAssertion(index);
- if ((curAssertion->assertionKind == OAK_NOT_EQUAL) && // kind
- (curAssertion->op1.kind == O1K_LCLVAR) && // op1
- (curAssertion->op2.kind == O2K_CONST_INT) && // op2
- (curAssertion->op1.lcl.lclNum == lclNum) &&
- (curAssertion->op2.u1.iconVal == 0))
+ if ((curAssertion->assertionKind == OAK_NOT_EQUAL) && // kind
+ (curAssertion->op1.kind == O1K_LCLVAR) && // op1
+ (curAssertion->op2.kind == O2K_CONST_INT) && // op2
+ (curAssertion->op1.lcl.lclNum == lclNum) && (curAssertion->op2.u1.iconVal == 0))
{
return index;
}
@@ -3465,7 +3536,9 @@ Compiler::AssertionIndex Compiler::optAssertionIsNonNullInternal(GenTreePtr op,
* Returns the modified tree, or nullptr if no assertion prop took place.
*
*/
-GenTreePtr Compiler::optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, const GenTreePtr tree, const GenTreePtr stmt)
+GenTreePtr Compiler::optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions,
+ const GenTreePtr tree,
+ const GenTreePtr stmt)
{
assert(tree->gtOper == GT_CALL);
if ((tree->gtFlags & GTF_CALL_NULLCHECK) == 0)
@@ -3480,17 +3553,17 @@ GenTreePtr Compiler::optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, c
}
#ifdef DEBUG
- bool vnBased = false;
- AssertionIndex index = NO_ASSERTION_INDEX;
+ bool vnBased = false;
+ AssertionIndex index = NO_ASSERTION_INDEX;
#endif
if (optAssertionIsNonNull(op1, assertions DEBUGARG(&vnBased) DEBUGARG(&index)))
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
(vnBased) ? printf("\nVN based non-null prop in BB%02u:\n", compCurBB->bbNum)
: printf("\nNon-null prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
tree->gtFlags &= ~GTF_CALL_NULLCHECK;
@@ -3518,7 +3591,7 @@ GenTreePtr Compiler::optAssertionProp_Call(ASSERT_VALARG_TP assertions, const Ge
assert(tree->gtOper == GT_CALL);
if (optNonNullAssertionProp_Call(assertions, tree, stmt))
- {
+ {
return optAssertionProp_Update(tree, tree, stmt);
}
else if (!optLocalAssertionProp && (tree->gtCall.gtCallType == CT_HELPER))
@@ -3544,12 +3617,11 @@ GenTreePtr Compiler::optAssertionProp_Call(ASSERT_VALARG_TP assertions, const Ge
unsigned index = optAssertionIsSubtype(arg1, arg2, assertions);
if (index != NO_ASSERTION_INDEX)
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nDid VN based subtype prop for index #%02u in BB%02u:\n",
- index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ printf("\nDid VN based subtype prop for index #%02u in BB%02u:\n", index, compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
GenTreePtr list = nullptr;
@@ -3562,10 +3634,9 @@ GenTreePtr Compiler::optAssertionProp_Call(ASSERT_VALARG_TP assertions, const Ge
return optAssertionProp_Update(arg1, tree, stmt);
}
- }
+ }
}
-
return nullptr;
}
@@ -3584,11 +3655,14 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
assert(tree->gtOper == GT_ARR_BOUNDS_CHECK);
BitVecOps::Iter iter(apTraits, assertions);
- unsigned index = 0;
+ unsigned index = 0;
while (iter.NextElem(apTraits, &index))
{
index++;
- if (index > optAssertionCount) break;
+ if (index > optAssertionCount)
+ {
+ break;
+ }
// If it is not a nothrow assertion, skip.
AssertionDsc* curAssertion = optGetAssertion((AssertionIndex)index);
if (!curAssertion->IsBoundsCheckNoThrow())
@@ -3598,10 +3672,10 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
GenTreeBoundsChk* arrBndsChk = tree->AsBoundsChk();
- // Set 'isRedundant' to true if we can determine that 'arrBndsChk' can be
+ // Set 'isRedundant' to true if we can determine that 'arrBndsChk' can be
// classified as a redundant bounds check using 'curAssertion'
bool isRedundant = false;
-#ifdef DEBUG
+#ifdef DEBUG
const char* dbgMsg = "Not Set";
#endif
@@ -3615,17 +3689,17 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
if (curAssertion->op1.bnd.vnIdx == vnCurIdx)
{
isRedundant = true;
-#ifdef DEBUG
+#ifdef DEBUG
dbgMsg = "a[i] followed by a[i]";
#endif
}
- // Are we using zero as the index?
+ // Are we using zero as the index?
// It can always be considered as redundant with any previous value
// a[*] followed by a[0]
else if (vnCurIdx == vnStore->VNZeroForType(arrBndsChk->gtIndex->TypeGet()))
{
isRedundant = true;
-#ifdef DEBUG
+#ifdef DEBUG
dbgMsg = "a[*] followed by a[0]";
#endif
}
@@ -3649,7 +3723,7 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
if (index2 >= 0 && index1 >= index2)
{
isRedundant = true;
-#ifdef DEBUG
+#ifdef DEBUG
dbgMsg = "a[K1] followed by a[K2], with K2 >= 0 and K1 >= K2";
#endif
}
@@ -3662,14 +3736,16 @@ GenTreePtr Compiler::optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, const
}
if (!isRedundant)
+ {
continue;
+ }
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nVN based redundant (%s) bounds check assertion prop for index #%02u in BB%02u:\n",
- dbgMsg, index, compCurBB->bbNum);
- gtDispTree(tree, 0, nullptr, true);
+ printf("\nVN based redundant (%s) bounds check assertion prop for index #%02u in BB%02u:\n", dbgMsg, index,
+ compCurBB->bbNum);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
@@ -3705,7 +3781,7 @@ GenTreePtr Compiler::optAssertionProp_Update(const GenTreePtr newTree, const Gen
// locate our parent node and update it so that it points to newTree
if (newTree != tree)
{
- GenTreePtr * link = gtFindLink(stmt, tree);
+ GenTreePtr* link = gtFindLink(stmt, tree);
#ifdef DEBUG
if (link == nullptr)
{
@@ -3733,13 +3809,12 @@ GenTreePtr Compiler::optAssertionProp_Update(const GenTreePtr newTree, const Gen
}
// Record that we propagated the assertion.
- optAssertionPropagated = true;
+ optAssertionPropagated = true;
optAssertionPropagatedCurrentStmt = true;
return newTree;
}
-
/*****************************************************************************
*
* Given a tree and a set of available assertions we try to propagate an
@@ -3749,49 +3824,47 @@ GenTreePtr Compiler::optAssertionProp_Update(const GenTreePtr newTree, const Gen
* Returns the modified tree, or nullptr if no assertion prop took place.
*/
-GenTreePtr Compiler::optAssertionProp(ASSERT_VALARG_TP assertions,
- const GenTreePtr tree,
- const GenTreePtr stmt)
+GenTreePtr Compiler::optAssertionProp(ASSERT_VALARG_TP assertions, const GenTreePtr tree, const GenTreePtr stmt)
{
switch (tree->gtOper)
{
- case GT_LCL_VAR:
- return optAssertionProp_LclVar(assertions, tree, stmt);
+ case GT_LCL_VAR:
+ return optAssertionProp_LclVar(assertions, tree, stmt);
- case GT_IND:
- case GT_NULLCHECK:
- return optAssertionProp_Ind(assertions, tree, stmt);
+ case GT_IND:
+ case GT_NULLCHECK:
+ return optAssertionProp_Ind(assertions, tree, stmt);
- case GT_ARR_BOUNDS_CHECK:
- return optAssertionProp_BndsChk(assertions, tree, stmt);
+ case GT_ARR_BOUNDS_CHECK:
+ return optAssertionProp_BndsChk(assertions, tree, stmt);
- case GT_COMMA:
- return optAssertionProp_Comma(assertions, tree, stmt);
+ case GT_COMMA:
+ return optAssertionProp_Comma(assertions, tree, stmt);
- case GT_CAST:
- return optAssertionProp_Cast(assertions, tree, stmt);
+ case GT_CAST:
+ return optAssertionProp_Cast(assertions, tree, stmt);
- case GT_CALL:
- return optAssertionProp_Call(assertions, tree, stmt);
+ case GT_CALL:
+ return optAssertionProp_Call(assertions, tree, stmt);
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GT:
- case GT_GE:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GT:
+ case GT_GE:
- return optAssertionProp_RelOp(assertions, tree, stmt);
+ return optAssertionProp_RelOp(assertions, tree, stmt);
- default:
- return nullptr;
- }
+ default:
+ return nullptr;
+ }
}
//------------------------------------------------------------------------
-// optImpliedAssertions: Given a tree node that makes an assertion this
-// method computes the set of implied assertions
-// that are also true. The updated assertions are
+// optImpliedAssertions: Given a tree node that makes an assertion this
+// method computes the set of implied assertions
+// that are also true. The updated assertions are
// maintained on the Compiler object.
//
// Arguments:
@@ -3808,7 +3881,7 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac
if (!BitVecOps::IsEmpty(apTraits, activeAssertions))
{
const ASSERT_TP mappedAssertions = optGetVnMappedAssertions(curAssertion->op1.vn);
- if (mappedAssertions == NULL)
+ if (mappedAssertions == nullptr)
{
return;
}
@@ -3818,7 +3891,7 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac
if (curAssertion->op2.kind == O2K_LCLVAR_COPY)
{
const ASSERT_TP op2Assertions = optGetVnMappedAssertions(curAssertion->op2.vn);
- if (op2Assertions != NULL)
+ if (op2Assertions != nullptr)
{
BitVecOps::UnionD(apTraits, chkAssertions, op2Assertions);
}
@@ -3832,11 +3905,14 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac
// Check each assertion in chkAssertions to see if it can be applied to curAssertion
BitVecOps::Iter chkIter(apTraits, chkAssertions);
- unsigned chkIndex = 0;
+ unsigned chkIndex = 0;
while (chkIter.NextElem(apTraits, &chkIndex))
{
chkIndex++;
- if (chkIndex > optAssertionCount) break;
+ if (chkIndex > optAssertionCount)
+ {
+ break;
+ }
if (chkIndex == assertionIndex)
{
continue;
@@ -3852,14 +3928,12 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac
{
optImpliedByCopyAssertion(iterAssertion, curAssertion, activeAssertions);
}
-
}
}
// Is curAssertion a constant assignment of a 32-bit integer?
// (i.e GT_LVL_VAR X == GT_CNS_INT)
- else if ((curAssertion->assertionKind == OAK_EQUAL) &&
- (curAssertion->op1.kind == O1K_LCLVAR) &&
- (curAssertion->op2.kind == O2K_CONST_INT))
+ else if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) &&
+ (curAssertion->op2.kind == O2K_CONST_INT))
{
optImpliedByConstAssertion(curAssertion, activeAssertions);
}
@@ -3867,7 +3941,7 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac
/*****************************************************************************
*
- * Given a set of active assertions this method computes the set
+ * Given a set of active assertions this method computes the set
* of non-Null implied assertions that are also true
*/
@@ -3880,11 +3954,14 @@ void Compiler::optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions)
// Check each assertion in activeAssertions to see if it can be applied to constAssertion
BitVecOps::Iter chkIter(apTraits, activeAssertions);
- unsigned chkIndex = 0;
+ unsigned chkIndex = 0;
while (chkIter.NextElem(apTraits, &chkIndex))
{
chkIndex++;
- if (chkIndex > optAssertionCount) break;
+ if (chkIndex > optAssertionCount)
+ {
+ break;
+ }
// chkAssertion must be Type/Subtype is equal assertion
AssertionDsc* chkAssertion = optGetAssertion((AssertionIndex)chkIndex);
if ((chkAssertion->op1.kind != O1K_SUBTYPE && chkAssertion->op1.kind != O1K_EXACT_TYPE) ||
@@ -3907,8 +3984,7 @@ void Compiler::optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions)
// impAssertion must be a Non Null assertion on lclNum
if ((impAssertion->assertionKind != OAK_NOT_EQUAL) ||
((impAssertion->op1.kind != O1K_LCLVAR) && (impAssertion->op1.kind != O1K_VALUE_NUMBER)) ||
- (impAssertion->op2.kind != O2K_CONST_INT) ||
- (impAssertion->op1.vn != chkAssertion->op1.vn))
+ (impAssertion->op2.kind != O2K_CONST_INT) || (impAssertion->op1.vn != chkAssertion->op1.vn))
{
continue;
}
@@ -3917,11 +3993,11 @@ void Compiler::optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions)
if (!BitVecOps::IsMember(apTraits, activeAssertions, impIndex - 1))
{
BitVecOps::AddElemD(apTraits, activeAssertions, impIndex - 1);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\nCompiler::optImpliedByTypeOfAssertions: %s Assertion #%02d, implies assertion #%02d",
- (chkAssertion->op1.kind == O1K_SUBTYPE) ? "Subtype" : "Exact-type", chkIndex, impIndex);
+ (chkAssertion->op1.kind == O1K_SUBTYPE) ? "Subtype" : "Exact-type", chkIndex, impIndex);
}
#endif
}
@@ -3933,7 +4009,7 @@ void Compiler::optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions)
}
//------------------------------------------------------------------------
-// optGetVnMappedAssertions: Given a value number, get the assertions
+// optGetVnMappedAssertions: Given a value number, get the assertions
// we have about the value number.
//
// Arguments:
@@ -3955,31 +4031,34 @@ ASSERT_VALRET_TP Compiler::optGetVnMappedAssertions(ValueNum vn)
/*****************************************************************************
*
- * Given a const assertion this method computes the set of implied assertions
+ * Given a const assertion this method computes the set of implied assertions
* that are also true
*/
void Compiler::optImpliedByConstAssertion(AssertionDsc* constAssertion, ASSERT_TP& result)
-{
+{
noway_assert(constAssertion->assertionKind == OAK_EQUAL);
- noway_assert(constAssertion->op1.kind == O1K_LCLVAR);
+ noway_assert(constAssertion->op1.kind == O1K_LCLVAR);
noway_assert(constAssertion->op2.kind == O2K_CONST_INT);
ssize_t iconVal = constAssertion->op2.u1.iconVal;
const ASSERT_TP chkAssertions = optGetVnMappedAssertions(constAssertion->op1.vn);
- if (chkAssertions == NULL || BitVecOps::IsEmpty(apTraits, chkAssertions))
+ if (chkAssertions == nullptr || BitVecOps::IsEmpty(apTraits, chkAssertions))
{
return;
}
// Check each assertion in chkAssertions to see if it can be applied to constAssertion
BitVecOps::Iter chkIter(apTraits, chkAssertions);
- unsigned chkIndex = 0;
+ unsigned chkIndex = 0;
while (chkIter.NextElem(apTraits, &chkIndex))
{
chkIndex++;
- if (chkIndex > optAssertionCount) break;
+ if (chkIndex > optAssertionCount)
+ {
+ break;
+ }
// The impAssertion must be different from the const assertion.
AssertionDsc* impAssertion = optGetAssertion((AssertionIndex)chkIndex);
if (impAssertion == constAssertion)
@@ -3996,40 +4075,37 @@ void Compiler::optImpliedByConstAssertion(AssertionDsc* constAssertion, ASSERT_T
bool usable = false;
switch (impAssertion->op2.kind)
{
- case O2K_SUBRANGE:
- // Is the const assertion's constant, within implied assertion's bounds?
- usable = ((iconVal >= impAssertion->op2.u2.loBound) && (iconVal <= impAssertion->op2.u2.hiBound));
- break;
+ case O2K_SUBRANGE:
+ // Is the const assertion's constant, within implied assertion's bounds?
+ usable = ((iconVal >= impAssertion->op2.u2.loBound) && (iconVal <= impAssertion->op2.u2.hiBound));
+ break;
- case O2K_CONST_INT:
- // Is the const assertion's constant equal/not equal to the implied assertion?
- usable = ((impAssertion->assertionKind == OAK_EQUAL) && (impAssertion->op2.u1.iconVal == iconVal)) ||
- ((impAssertion->assertionKind == OAK_NOT_EQUAL) && (impAssertion->op2.u1.iconVal != iconVal));
- break;
+ case O2K_CONST_INT:
+ // Is the const assertion's constant equal/not equal to the implied assertion?
+ usable = ((impAssertion->assertionKind == OAK_EQUAL) && (impAssertion->op2.u1.iconVal == iconVal)) ||
+ ((impAssertion->assertionKind == OAK_NOT_EQUAL) && (impAssertion->op2.u1.iconVal != iconVal));
+ break;
- default:
- // leave 'usable' = false;
- break;
+ default:
+ // leave 'usable' = false;
+ break;
}
if (usable)
{
BitVecOps::AddElemD(apTraits, result, chkIndex - 1);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
AssertionDsc* firstAssertion = optGetAssertion(1);
printf("\nCompiler::optImpliedByConstAssertion: constAssertion #%02d , implies assertion #%02d",
- (constAssertion - firstAssertion) + 1,
- (impAssertion - firstAssertion) + 1);
+ (constAssertion - firstAssertion) + 1, (impAssertion - firstAssertion) + 1);
}
#endif
}
}
}
-
-
/*****************************************************************************
*
* Given a copy assertion and a dependent assertion this method computes the
@@ -4072,8 +4148,7 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD
}
}
- if (copyAssertLclNum == BAD_VAR_NUM ||
- copyAssertSsaNum == SsaConfig::RESERVED_SSA_NUM)
+ if (copyAssertLclNum == BAD_VAR_NUM || copyAssertSsaNum == SsaConfig::RESERVED_SSA_NUM)
{
return;
}
@@ -4081,8 +4156,7 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD
// Get the depAssert's lcl/ssa nums.
unsigned depAssertLclNum = BAD_VAR_NUM;
unsigned depAssertSsaNum = SsaConfig::RESERVED_SSA_NUM;
- if ((depAssertion->op1.kind == O1K_LCLVAR) &&
- (depAssertion->op2.kind == O2K_LCLVAR_COPY))
+ if ((depAssertion->op1.kind == O1K_LCLVAR) && (depAssertion->op2.kind == O2K_LCLVAR_COPY))
{
if ((depAssertion->op1.lcl.lclNum == copyAssertion->op1.lcl.lclNum) ||
(depAssertion->op1.lcl.lclNum == copyAssertion->op2.lcl.lclNum))
@@ -4098,16 +4172,14 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD
}
}
- if (depAssertLclNum == BAD_VAR_NUM ||
- depAssertSsaNum == SsaConfig::RESERVED_SSA_NUM)
+ if (depAssertLclNum == BAD_VAR_NUM || depAssertSsaNum == SsaConfig::RESERVED_SSA_NUM)
{
return;
}
// Is depAssertion a constant assignment of a 32-bit integer?
// (i.e GT_LVL_VAR X == GT_CNS_INT)
- bool depIsConstAssertion = ((depAssertion->assertionKind == OAK_EQUAL) &&
- (depAssertion->op1.kind == O1K_LCLVAR) &&
+ bool depIsConstAssertion = ((depAssertion->assertionKind == OAK_EQUAL) && (depAssertion->op1.kind == O1K_LCLVAR) &&
(depAssertion->op2.kind == O2K_CONST_INT));
// Search the assertion table for an assertion on op1 that matches depAssertion
@@ -4121,79 +4193,87 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD
{
continue;
}
-
+
if (!AssertionDsc::SameKind(depAssertion, impAssertion))
{
continue;
}
- bool op1MatchesCopy = (copyAssertLclNum == impAssertion->op1.lcl.lclNum) && (copyAssertSsaNum == impAssertion->op1.lcl.ssaNum);
+ bool op1MatchesCopy =
+ (copyAssertLclNum == impAssertion->op1.lcl.lclNum) && (copyAssertSsaNum == impAssertion->op1.lcl.ssaNum);
bool usable = false;
switch (impAssertion->op2.kind)
{
- case O2K_SUBRANGE:
- usable = op1MatchesCopy && ((impAssertion->op2.u2.loBound <= depAssertion->op2.u2.loBound) &&
- (impAssertion->op2.u2.hiBound >= depAssertion->op2.u2.hiBound));
- break;
+ case O2K_SUBRANGE:
+ usable = op1MatchesCopy && ((impAssertion->op2.u2.loBound <= depAssertion->op2.u2.loBound) &&
+ (impAssertion->op2.u2.hiBound >= depAssertion->op2.u2.hiBound));
+ break;
- case O2K_CONST_LONG:
- usable = op1MatchesCopy && (impAssertion->op2.lconVal == depAssertion->op2.lconVal);
- break;
+ case O2K_CONST_LONG:
+ usable = op1MatchesCopy && (impAssertion->op2.lconVal == depAssertion->op2.lconVal);
+ break;
- case O2K_CONST_DOUBLE:
- // Exact memory match because of positive and negative zero
- usable = op1MatchesCopy && (memcmp(&impAssertion->op2.dconVal, &depAssertion->op2.dconVal, sizeof(double)) == 0);
- break;
+ case O2K_CONST_DOUBLE:
+ // Exact memory match because of positive and negative zero
+ usable = op1MatchesCopy &&
+ (memcmp(&impAssertion->op2.dconVal, &depAssertion->op2.dconVal, sizeof(double)) == 0);
+ break;
- case O2K_IND_CNS_INT:
- // This is the ngen case where we have a GT_IND of an address.
- noway_assert((impAssertion->op1.kind == O1K_EXACT_TYPE) || (impAssertion->op1.kind == O1K_SUBTYPE));
+ case O2K_IND_CNS_INT:
+ // This is the ngen case where we have a GT_IND of an address.
+ noway_assert((impAssertion->op1.kind == O1K_EXACT_TYPE) || (impAssertion->op1.kind == O1K_SUBTYPE));
- __fallthrough;
+ __fallthrough;
- case O2K_CONST_INT:
- usable = op1MatchesCopy && (impAssertion->op2.u1.iconVal == depAssertion->op2.u1.iconVal);
- break;
+ case O2K_CONST_INT:
+ usable = op1MatchesCopy && (impAssertion->op2.u1.iconVal == depAssertion->op2.u1.iconVal);
+ break;
- case O2K_LCLVAR_COPY:
- // Check if op1 of impAssertion matches copyAssertion and also op2 of impAssertion matches depAssertion.
- if (op1MatchesCopy && (depAssertLclNum == impAssertion->op2.lcl.lclNum && depAssertSsaNum == impAssertion->op2.lcl.ssaNum))
- {
- usable = true;
- }
- else
- {
- // Otherwise, op2 of impAssertion should match copyAssertion and also op1 of impAssertion matches depAssertion.
- usable = ((copyAssertLclNum == impAssertion->op2.lcl.lclNum && copyAssertSsaNum == impAssertion->op2.lcl.ssaNum) &&
- (depAssertLclNum == impAssertion->op1.lcl.lclNum && depAssertSsaNum == impAssertion->op1.lcl.ssaNum));
- }
- break;
+ case O2K_LCLVAR_COPY:
+ // Check if op1 of impAssertion matches copyAssertion and also op2 of impAssertion matches depAssertion.
+ if (op1MatchesCopy && (depAssertLclNum == impAssertion->op2.lcl.lclNum &&
+ depAssertSsaNum == impAssertion->op2.lcl.ssaNum))
+ {
+ usable = true;
+ }
+ else
+ {
+ // Otherwise, op2 of impAssertion should match copyAssertion and also op1 of impAssertion matches
+ // depAssertion.
+ usable = ((copyAssertLclNum == impAssertion->op2.lcl.lclNum &&
+ copyAssertSsaNum == impAssertion->op2.lcl.ssaNum) &&
+ (depAssertLclNum == impAssertion->op1.lcl.lclNum &&
+ depAssertSsaNum == impAssertion->op1.lcl.ssaNum));
+ }
+ break;
- default:
- // leave 'usable' = false;
- break;
+ default:
+ // leave 'usable' = false;
+ break;
}
-
+
if (usable)
{
BitVecOps::AddElemD(apTraits, result, impIndex - 1);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
AssertionDsc* firstAssertion = optGetAssertion(1);
- printf("\nCompiler::optImpliedByCopyAssertion: copyAssertion #%02d and depAssertion #%02d, implies assertion #%02d",
- (copyAssertion - firstAssertion) + 1, (depAssertion - firstAssertion) + 1, (impAssertion - firstAssertion) + 1);
+ printf("\nCompiler::optImpliedByCopyAssertion: copyAssertion #%02d and depAssertion #%02d, implies "
+ "assertion #%02d",
+ (copyAssertion - firstAssertion) + 1, (depAssertion - firstAssertion) + 1,
+ (impAssertion - firstAssertion) + 1);
}
#endif
- // If the depAssertion is a const assertion then any other assertions that it implies could also imply a subrange assertion.
+ // If the depAssertion is a const assertion then any other assertions that it implies could also imply a
+ // subrange assertion.
if (depIsConstAssertion)
{
optImpliedByConstAssertion(impAssertion, result);
}
}
-
}
}
@@ -4207,13 +4287,13 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD
class AssertionPropFlowCallback
{
private:
- ASSERT_TP preMergeOut;
- ASSERT_TP preMergeJumpDestOut;
+ ASSERT_TP preMergeOut;
+ ASSERT_TP preMergeJumpDestOut;
ASSERT_TP* mJumpDestOut;
ASSERT_TP* mJumpDestGen;
- Compiler* m_pCompiler;
+ Compiler* m_pCompiler;
BitVecTraits* apTraits;
public:
@@ -4230,8 +4310,8 @@ public:
// At the start of the merge function of the dataflow equations, initialize premerge state (to detect change.)
void StartMerge(BasicBlock* block)
{
- JITDUMP("AssertionPropCallback::StartMerge: BB%02d in -> %s\n",
- block->bbNum, BitVecOps::ToString(apTraits, block->bbAssertionIn));
+ JITDUMP("AssertionPropCallback::StartMerge: BB%02d in -> %s\n", block->bbNum,
+ BitVecOps::ToString(apTraits, block->bbAssertionIn));
BitVecOps::Assign(apTraits, preMergeOut, block->bbAssertionOut);
BitVecOps::Assign(apTraits, preMergeJumpDestOut, mJumpDestOut[block->bbNum]);
}
@@ -4239,11 +4319,12 @@ public:
// During merge, perform the actual merging of the predecessor's (since this is a forward analysis) dataflow flags.
void Merge(BasicBlock* block, BasicBlock* predBlock, flowList* preds)
{
- ASSERT_TP pAssertionOut = ((predBlock->bbJumpKind == BBJ_COND) && (predBlock->bbJumpDest == block))
- ? mJumpDestOut[predBlock->bbNum] : predBlock->bbAssertionOut;
- JITDUMP("AssertionPropCallback::Merge : BB%02d in -> %s, predBlock BB%02d out -> %s\n",
- block->bbNum, BitVecOps::ToString(apTraits, block->bbAssertionIn),
- predBlock->bbNum, BitVecOps::ToString(apTraits, predBlock->bbAssertionOut));
+ ASSERT_TP pAssertionOut = ((predBlock->bbJumpKind == BBJ_COND) && (predBlock->bbJumpDest == block))
+ ? mJumpDestOut[predBlock->bbNum]
+ : predBlock->bbAssertionOut;
+ JITDUMP("AssertionPropCallback::Merge : BB%02d in -> %s, predBlock BB%02d out -> %s\n", block->bbNum,
+ BitVecOps::ToString(apTraits, block->bbAssertionIn), predBlock->bbNum,
+ BitVecOps::ToString(apTraits, predBlock->bbAssertionOut));
BitVecOps::IntersectionD(apTraits, block->bbAssertionIn, pAssertionOut);
}
@@ -4252,7 +4333,7 @@ public:
{
JITDUMP("AssertionPropCallback::EndMerge : BB%02d in -> %s\n\n", block->bbNum,
BitVecOps::ToString(apTraits, block->bbAssertionIn));
-
+
// PERF: eliminate this tmp by passing in a OperationTree (AST) to the bitset,
// so the expr tree is operated on a single bit level. See "expression templates."
ASSERT_TP tmp = BitVecOps::MakeCopy(apTraits, block->bbAssertionIn);
@@ -4264,26 +4345,24 @@ public:
BitVecOps::IntersectionD(apTraits, mJumpDestOut[block->bbNum], tmp);
bool changed = (!BitVecOps::Equal(apTraits, preMergeOut, block->bbAssertionOut) ||
- !BitVecOps::Equal(apTraits, preMergeJumpDestOut, mJumpDestOut[block->bbNum]));
+ !BitVecOps::Equal(apTraits, preMergeJumpDestOut, mJumpDestOut[block->bbNum]));
if (changed)
{
JITDUMP("AssertionPropCallback::Changed : BB%02d before out -> %s; after out -> %s;\n"
- "\t\tjumpDest before out -> %s; jumpDest after out -> %s;\n\n",
- block->bbNum,
- BitVecOps::ToString(apTraits, preMergeOut),
- BitVecOps::ToString(apTraits, block->bbAssertionOut),
- BitVecOps::ToString(apTraits, preMergeJumpDestOut),
- BitVecOps::ToString(apTraits, mJumpDestOut[block->bbNum]));
+ "\t\tjumpDest before out -> %s; jumpDest after out -> %s;\n\n",
+ block->bbNum, BitVecOps::ToString(apTraits, preMergeOut),
+ BitVecOps::ToString(apTraits, block->bbAssertionOut),
+ BitVecOps::ToString(apTraits, preMergeJumpDestOut),
+ BitVecOps::ToString(apTraits, mJumpDestOut[block->bbNum]));
}
else
{
- JITDUMP("AssertionPropCallback::Unchanged : BB%02d out -> %s; \t\tjumpDest out -> %s\n\n",
- block->bbNum,
- BitVecOps::ToString(apTraits, block->bbAssertionOut),
- BitVecOps::ToString(apTraits, mJumpDestOut[block->bbNum]));
+ JITDUMP("AssertionPropCallback::Unchanged : BB%02d out -> %s; \t\tjumpDest out -> %s\n\n", block->bbNum,
+ BitVecOps::ToString(apTraits, block->bbAssertionOut),
+ BitVecOps::ToString(apTraits, mJumpDestOut[block->bbNum]));
}
-
+
return changed;
}
};
@@ -4306,7 +4385,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen()
{
ASSERT_TP* jumpDestGen = fgAllocateTypeForEachBlk<ASSERT_TP>();
- ASSERT_TP valueGen = BitVecOps::MakeEmpty(apTraits);
+ ASSERT_TP valueGen = BitVecOps::MakeEmpty(apTraits);
ASSERT_TP jumpDestValueGen = BitVecOps::MakeEmpty(apTraits);
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
@@ -4325,20 +4404,20 @@ ASSERT_TP* Compiler::optComputeAssertionGen()
{
// Store whatever we have accumulated into jumpDest edge's valueGen.
if (tree->gtOper == GT_JTRUE)
- {
+ {
BitVecOps::Assign(apTraits, jumpDestValueGen, valueGen);
}
- if (!tree->HasAssertion())
+ if (!tree->HasAssertion())
{
continue;
}
// For regular trees, just update valueGen. For GT_JTRUE, for false part,
// update valueGen and true part update jumpDestValueGen.
- AssertionIndex assertionIndex[2] = {
- (AssertionIndex)tree->GetAssertion(),
- (tree->OperGet() == GT_JTRUE) ? optFindComplementary((AssertionIndex)tree->GetAssertion()) : 0
- };
+ AssertionIndex assertionIndex[2] = {(AssertionIndex)tree->GetAssertion(),
+ (tree->OperGet() == GT_JTRUE)
+ ? optFindComplementary((AssertionIndex)tree->GetAssertion())
+ : 0};
for (unsigned i = 0; i < 2; ++i)
{
@@ -4356,17 +4435,17 @@ ASSERT_TP* Compiler::optComputeAssertionGen()
BitVecOps::Assign(apTraits, block->bbAssertionGen, valueGen);
BitVecOps::Assign(apTraits, jumpDestGen[block->bbNum], jumpDestValueGen);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\nBB%02u valueGen = %s", block->bbNum, BitVecOps::ToString(apTraits, valueGen));
if (block->bbJumpKind == BBJ_COND)
{
- printf(" => BB%02u valueGen = %s,", block->bbJumpDest->bbNum, BitVecOps::ToString(apTraits, jumpDestValueGen));
+ printf(" => BB%02u valueGen = %s,", block->bbJumpDest->bbNum,
+ BitVecOps::ToString(apTraits, jumpDestValueGen));
}
}
#endif
-
}
return jumpDestGen;
}
@@ -4387,9 +4466,9 @@ ASSERT_TP* Compiler::optInitAssertionDataflowFlags()
// assertions (note that at this point we are not creating any new assertions).
// Also note that assertion indices start from 1.
ASSERT_TP apValidFull = optNewEmptyAssertSet();
- for (int i = 1; i <= optAssertionCount; i++)
+ for (int i = 1; i <= optAssertionCount; i++)
{
- BitVecOps::AddElemD(apTraits, apValidFull, i-1);
+ BitVecOps::AddElemD(apTraits, apValidFull, i - 1);
}
// Initially estimate the OUT sets to everything except killed expressions
@@ -4400,10 +4479,10 @@ ASSERT_TP* Compiler::optInitAssertionDataflowFlags()
// edges.
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- block->bbAssertionIn = optNewEmptyAssertSet();
+ block->bbAssertionIn = optNewEmptyAssertSet();
if (!bbIsHandlerBeg(block))
{
- BitVecOps::Assign(apTraits, block->bbAssertionIn, apValidFull);
+ BitVecOps::Assign(apTraits, block->bbAssertionIn, apValidFull);
}
block->bbAssertionGen = optNewEmptyAssertSet();
block->bbAssertionOut = optNewEmptyAssertSet();
@@ -4420,14 +4499,13 @@ ASSERT_TP* Compiler::optInitAssertionDataflowFlags()
// Callback data for the VN based constant prop visitor.
struct VNAssertionPropVisitorInfo
{
- Compiler* pThis;
- GenTreePtr stmt;
+ Compiler* pThis;
+ GenTreePtr stmt;
BasicBlock* block;
VNAssertionPropVisitorInfo(Compiler* pThis, BasicBlock* block, GenTreePtr stmt)
- : pThis(pThis)
- , stmt(stmt)
- , block(block)
- { }
+ : pThis(pThis), stmt(stmt), block(block)
+ {
+ }
};
//------------------------------------------------------------------------------
@@ -4436,7 +4514,7 @@ struct VNAssertionPropVisitorInfo
// replaced with a comma separated list of side effects + a new tree.
//
// Note:
-// The old and new trees may be the same. In this case, the tree will be
+// The old and new trees may be the same. In this case, the tree will be
// appended to the side-effect list (if present) and returned.
//
// Arguments:
@@ -4502,7 +4580,7 @@ GenTreePtr Compiler::optPrepareTreeForReplacement(GenTreePtr oldTree, GenTreePtr
// block - The block that contains the JTrue.
// stmt - The JTrue stmt which can be evaluated to a constant.
// tree - The JTrue node whose relop evaluates to 0 or non-zero value.
-//
+//
// Return Value:
// The jmpTrue tree node that has relop of the form "0 =/!= 0".
// If "tree" evaluates to "true" relop is "0 == 0". Else relop is "0 != 0".
@@ -4553,12 +4631,12 @@ GenTreePtr Compiler::optVNConstantPropOnJTrue(BasicBlock* block, GenTreePtr stmt
GenTreePtr newStmt;
if (sideEffList->OperGet() == GT_COMMA)
{
- newStmt = fgInsertStmtNearEnd(block, sideEffList->gtGetOp1());
+ newStmt = fgInsertStmtNearEnd(block, sideEffList->gtGetOp1());
sideEffList = sideEffList->gtGetOp2();
}
else
{
- newStmt = fgInsertStmtNearEnd(block, sideEffList);
+ newStmt = fgInsertStmtNearEnd(block, sideEffList);
sideEffList = nullptr;
}
fgMorphBlockStmt(block, newStmt DEBUGARG(__FUNCTION__));
@@ -4567,16 +4645,16 @@ GenTreePtr Compiler::optVNConstantPropOnJTrue(BasicBlock* block, GenTreePtr stmt
}
// Transform the relop's operands to be both zeroes.
- ValueNum vnZero = vnStore->VNZeroForType(TYP_INT);
- relop->gtOp.gtOp1 = gtNewIconNode(0);
+ ValueNum vnZero = vnStore->VNZeroForType(TYP_INT);
+ relop->gtOp.gtOp1 = gtNewIconNode(0);
relop->gtOp.gtOp1->gtVNPair = ValueNumPair(vnZero, vnZero);
- relop->gtOp.gtOp2 = gtNewIconNode(0);
+ relop->gtOp.gtOp2 = gtNewIconNode(0);
relop->gtOp.gtOp2->gtVNPair = ValueNumPair(vnZero, vnZero);
// Update the oper and restore the value numbers.
- ValueNum vnCns = relop->gtVNPair.GetConservative();
- ValueNum vnLib = relop->gtVNPair.GetLiberal();
- bool evalsToTrue = vnStore->CoercedConstantValue<INT64>(vnCns) != 0;
+ ValueNum vnCns = relop->gtVNPair.GetConservative();
+ ValueNum vnLib = relop->gtVNPair.GetLiberal();
+ bool evalsToTrue = vnStore->CoercedConstantValue<INT64>(vnCns) != 0;
relop->SetOper(evalsToTrue ? GT_EQ : GT_NE);
relop->gtVNPair = ValueNumPair(vnLib, vnCns);
@@ -4594,7 +4672,7 @@ GenTreePtr Compiler::optVNConstantPropOnJTrue(BasicBlock* block, GenTreePtr stmt
// tree - The currently visited tree node.
// stmt - The statement node in which the "tree" is present.
// block - The block that contains the statement that contains the tree.
-//
+//
// Return Value:
// Returns the standard visitor walk result.
//
@@ -4614,44 +4692,61 @@ Compiler::fgWalkResult Compiler::optVNConstantPropCurStmt(BasicBlock* block, Gen
switch (tree->OperGet())
{
- // Make sure we have an R-value.
- case GT_ADD: case GT_SUB: case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: case GT_MULHI:
- case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT:
- case GT_OR: case GT_XOR: case GT_AND:
- case GT_LSH: case GT_RSH: case GT_RSZ:
- case GT_NEG: case GT_CHS: case GT_CAST:
- case GT_INTRINSIC:
- break;
+ // Make sure we have an R-value.
+ case GT_ADD:
+ case GT_SUB:
+ case GT_DIV:
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ case GT_MULHI:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_NEG:
+ case GT_CHS:
+ case GT_CAST:
+ case GT_INTRINSIC:
+ break;
- case GT_JTRUE:
- break;
+ case GT_JTRUE:
+ break;
- case GT_MUL:
- // Don't transform long multiplies.
- if (tree->gtFlags & GTF_MUL_64RSLT)
- {
- return WALK_SKIP_SUBTREES;
- }
- break;
+ case GT_MUL:
+ // Don't transform long multiplies.
+ if (tree->gtFlags & GTF_MUL_64RSLT)
+ {
+ return WALK_SKIP_SUBTREES;
+ }
+ break;
- case GT_LCL_VAR:
- // Make sure the local variable is an R-value.
- if ((tree->gtFlags & (GTF_VAR_DEF | GTF_DONT_CSE)))
- {
- return WALK_CONTINUE;
- }
+ case GT_LCL_VAR:
+ // Make sure the local variable is an R-value.
+ if ((tree->gtFlags & (GTF_VAR_DEF | GTF_DONT_CSE)))
+ {
+ return WALK_CONTINUE;
+ }
#if FEATURE_ANYCSE
- // Let's not conflict with CSE (to save the movw/movt).
- if (lclNumIsCSE(tree->AsLclVarCommon()->GetLclNum()))
- {
- return WALK_CONTINUE;
- }
-#endif
- break;
+ // Let's not conflict with CSE (to save the movw/movt).
+ if (lclNumIsCSE(tree->AsLclVarCommon()->GetLclNum()))
+ {
+ return WALK_CONTINUE;
+ }
+#endif
+ break;
- default:
- // Unknown node, continue to walk.
- return WALK_CONTINUE;
+ default:
+ // Unknown node, continue to walk.
+ return WALK_CONTINUE;
}
// Perform the constant propagation
@@ -4683,7 +4778,7 @@ Compiler::fgWalkResult Compiler::optVNConstantPropCurStmt(BasicBlock* block, Gen
// block - The block that contains the statement that contains the tree.
// stmt - The statement node in which the "tree" is present.
// tree - The currently visited tree node.
-//
+//
// Return Value:
// None.
//
@@ -4694,7 +4789,7 @@ Compiler::fgWalkResult Compiler::optVNConstantPropCurStmt(BasicBlock* block, Gen
//
void Compiler::optVnNonNullPropCurStmt(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree)
{
- ASSERT_TP empty = BitVecOps::MakeEmpty(apTraits);
+ ASSERT_TP empty = BitVecOps::MakeEmpty(apTraits);
GenTreePtr newTree = nullptr;
if (tree->OperGet() == GT_CALL)
{
@@ -4729,8 +4824,8 @@ void Compiler::optVnNonNullPropCurStmt(BasicBlock* block, GenTreePtr stmt, GenTr
/* static */
Compiler::fgWalkResult Compiler::optVNAssertionPropCurStmtVisitor(GenTreePtr* ppTree, fgWalkData* data)
{
- VNAssertionPropVisitorInfo* pData = (VNAssertionPropVisitorInfo*) data->pCallbackData;
- Compiler* pThis = pData->pThis;
+ VNAssertionPropVisitorInfo* pData = (VNAssertionPropVisitorInfo*)data->pCallbackData;
+ Compiler* pThis = pData->pThis;
pThis->optVnNonNullPropCurStmt(pData->block, pData->stmt, *ppTree);
@@ -4790,7 +4885,7 @@ void Compiler::optAssertionPropMain()
return;
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In optAssertionPropMain()\n");
printf("Blocks/Trees at start of phase\n");
@@ -4850,7 +4945,7 @@ void Compiler::optAssertionPropMain()
}
}
- if (!optAssertionCount)
+ if (!optAssertionCount)
{
return;
}
@@ -4860,11 +4955,11 @@ void Compiler::optAssertionPropMain()
#endif
// Allocate the bits for the predicate sensitive dataflow analysis
- bbJtrueAssertionOut = optInitAssertionDataflowFlags();
+ bbJtrueAssertionOut = optInitAssertionDataflowFlags();
ASSERT_TP* jumpDestGen = optComputeAssertionGen();
// Modified dataflow algorithm for available expressions.
- DataFlow flow(this);
+ DataFlow flow(this);
AssertionPropFlowCallback ap(this, bbJtrueAssertionOut, jumpDestGen);
flow.ForwardAnalysis(ap);
@@ -4874,9 +4969,8 @@ void Compiler::optAssertionPropMain()
optImpliedByTypeOfAssertions(block->bbAssertionIn);
}
-
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\n");
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
@@ -4887,7 +4981,7 @@ void Compiler::optAssertionPropMain()
if (block->bbJumpKind == BBJ_COND)
{
printf(" => BB%02u", block->bbJumpDest->bbNum);
- printf(" valueOut= %s", BitVecOps::ToString(apTraits, bbJtrueAssertionOut[block->bbNum]));
+ printf(" valueOut= %s", BitVecOps::ToString(apTraits, bbJtrueAssertionOut[block->bbNum]));
}
}
printf("\n");
@@ -4907,7 +5001,7 @@ void Compiler::optAssertionPropMain()
}
// Make the current basic block address available globally.
- compCurBB = block;
+ compCurBB = block;
fgRemoveRestOfBlock = false;
// Walk the statement trees in this basic block
@@ -4928,13 +5022,13 @@ void Compiler::optAssertionPropMain()
// removes the current stmt.
GenTreePtr prev = (stmt == block->firstStmt()) ? nullptr : stmt->gtPrev;
- optAssertionPropagatedCurrentStmt = false; // set to true if a assertion propagation took place
- // and thus we must morph, set order, re-link
+ optAssertionPropagatedCurrentStmt = false; // set to true if a assertion propagation took place
+ // and thus we must morph, set order, re-link
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
JITDUMP("Propagating %s assertions for BB%02d, stmt [%06d], tree [%06d], tree -> %d\n",
- BitVecOps::ToString(apTraits, assertions),
- block->bbNum, dspTreeID(stmt), dspTreeID(tree), tree->GetAssertion());
+ BitVecOps::ToString(apTraits, assertions), block->bbNum, dspTreeID(stmt), dspTreeID(tree),
+ tree->GetAssertion());
GenTreePtr newTree = optAssertionProp(assertions, tree, stmt);
if (newTree)
@@ -4947,7 +5041,7 @@ void Compiler::optAssertionPropMain()
GenTreeLclVarCommon* lclVarTree = nullptr;
// If this tree makes an assertion - make it available.
- if (tree->HasAssertion())
+ if (tree->HasAssertion())
{
BitVecOps::AddElemD(apTraits, assertions, tree->GetAssertion() - 1);
@@ -4956,7 +5050,7 @@ void Compiler::optAssertionPropMain()
}
}
- if (optAssertionPropagatedCurrentStmt)
+ if (optAssertionPropagatedCurrentStmt)
{
#ifdef DEBUG
if (verbose)
@@ -4978,18 +5072,18 @@ void Compiler::optAssertionPropMain()
// Check if propagation removed statements starting from current stmt.
// If so, advance to the next good statement.
- GenTreePtr nextStmt = (prev == nullptr) ? block->firstStmt() : prev->gtNext;
- stmt = (stmt == nextStmt) ? stmt->gtNext : nextStmt;
+ GenTreePtr nextStmt = (prev == nullptr) ? block->firstStmt() : prev->gtNext;
+ stmt = (stmt == nextStmt) ? stmt->gtNext : nextStmt;
}
- optAssertionPropagatedCurrentStmt = false; // clear it back as we are done with stmts.
+ optAssertionPropagatedCurrentStmt = false; // clear it back as we are done with stmts.
}
-
+
#ifdef DEBUG
fgDebugCheckBBlist();
fgDebugCheckLinks();
#endif
- // Assertion propagation may have changed the reference counts
+ // Assertion propagation may have changed the reference counts
// We need to resort the variable table
if (optAssertionPropagated)
diff --git a/src/jit/bitset.cpp b/src/jit/bitset.cpp
index 3b9135c730..90ef253199 100644
--- a/src/jit/bitset.cpp
+++ b/src/jit/bitset.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -20,32 +19,26 @@ unsigned BitSetSupport::BitCountTable[16] = { 0, 1, 1, 2,
// clang-format on
#ifdef DEBUG
-template<typename BitSetType,
- unsigned Uniq,
- typename Env,
- typename BitSetTraits>
+template <typename BitSetType, unsigned Uniq, typename Env, typename BitSetTraits>
void BitSetSupport::RunTests(Env env)
{
- typedef BitSetOps<BitSetType,
- Uniq,
- Env,
- BitSetTraits> LclBitSetOps;
+ typedef BitSetOps<BitSetType, Uniq, Env, BitSetTraits> LclBitSetOps;
// The tests require that the Size is at least 52...
assert(BitSetTraits::GetSize(env) > 51);
BitSetType bs1;
LclBitSetOps::AssignNoCopy(env, bs1, LclBitSetOps::MakeEmpty(env));
- unsigned bs1bits[] = { 0, 10, 44, 45 };
+ unsigned bs1bits[] = {0, 10, 44, 45};
LclBitSetOps::AddElemD(env, bs1, bs1bits[0]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[1]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[2]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[3]);
typename LclBitSetOps::Iter bsi(env, bs1);
- unsigned bitNum = 0;
- unsigned k = 0;
+ unsigned bitNum = 0;
+ unsigned k = 0;
while (bsi.NextElem(env, &bitNum))
{
assert(bitNum == bs1bits[k]);
@@ -59,17 +52,17 @@ void BitSetSupport::RunTests(Env env)
BitSetType bs2;
LclBitSetOps::AssignNoCopy(env, bs2, LclBitSetOps::MakeEmpty(env));
- unsigned bs2bits[] = { 0, 10, 50, 51 };
+ unsigned bs2bits[] = {0, 10, 50, 51};
LclBitSetOps::AddElemD(env, bs2, bs2bits[0]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[1]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[2]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[3]);
- unsigned unionBits[] = { 0, 10, 44, 45, 50, 51 };
+ unsigned unionBits[] = {0, 10, 44, 45, 50, 51};
BitSetType bsU12;
LclBitSetOps::AssignNoCopy(env, bsU12, LclBitSetOps::Union(env, bs1, bs2));
- k = 0;
- bsi = typename LclBitSetOps::Iter(env, bsU12);
+ k = 0;
+ bsi = typename LclBitSetOps::Iter(env, bsU12);
bitNum = 0;
while (bsi.NextElem(env, &bitNum))
{
@@ -78,9 +71,9 @@ void BitSetSupport::RunTests(Env env)
}
assert(k == 6);
- k = 0;
+ k = 0;
typename LclBitSetOps::Iter bsiL = typename LclBitSetOps::Iter(env, bsU12);
- bitNum = 0;
+ bitNum = 0;
while (bsiL.NextElem(env, &bitNum))
{
assert(bitNum == unionBits[k]);
@@ -88,11 +81,11 @@ void BitSetSupport::RunTests(Env env)
}
assert(k == 6);
- unsigned intersectionBits[] = { 0, 10 };
+ unsigned intersectionBits[] = {0, 10};
BitSetType bsI12;
LclBitSetOps::AssignNoCopy(env, bsI12, LclBitSetOps::Intersection(env, bs1, bs2));
- k = 0;
- bsi = typename LclBitSetOps::Iter(env, bsI12);
+ k = 0;
+ bsi = typename LclBitSetOps::Iter(env, bsI12);
bitNum = 0;
while (bsi.NextElem(env, &bitNum))
{
@@ -105,65 +98,81 @@ void BitSetSupport::RunTests(Env env)
class TestBitSetTraits
{
public:
- static IAllocator* GetAllocator(IAllocator* alloc) { return alloc; }
- static unsigned GetSize(IAllocator* alloc) { return 64; }
+ static IAllocator* GetAllocator(IAllocator* alloc)
+ {
+ return alloc;
+ }
+ static unsigned GetSize(IAllocator* alloc)
+ {
+ return 64;
+ }
static unsigned GetArrSize(IAllocator* alloc, unsigned elemSize)
{
assert(elemSize == sizeof(size_t));
- return (64/8)/sizeof(size_t);
+ return (64 / 8) / sizeof(size_t);
+ }
+ static unsigned GetEpoch(IAllocator* alloc)
+ {
+ return 0;
}
- static unsigned GetEpoch(IAllocator* alloc) { return 0; }
};
void BitSetSupport::TestSuite(IAllocator* env)
{
BitSetSupport::RunTests<UINT64, BSUInt64, IAllocator*, TestBitSetTraits>(env);
BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, IAllocator*, TestBitSetTraits>(env);
- BitSetSupport::RunTests<BitSetUint64<IAllocator*, TestBitSetTraits>, BSUInt64Class, IAllocator*, TestBitSetTraits>(env);
+ BitSetSupport::RunTests<BitSetUint64<IAllocator*, TestBitSetTraits>, BSUInt64Class, IAllocator*, TestBitSetTraits>(
+ env);
}
#endif
-const char* BitSetSupport::OpNames[BitSetSupport::BSOP_NUMOPS] =
- {
+const char* BitSetSupport::OpNames[BitSetSupport::BSOP_NUMOPS] = {
#define BSOPNAME(x) #x,
#include "bitsetops.h"
#undef BSOPNAME
- };
+};
void BitSetSupport::BitSetOpCounter::RecordOp(BitSetSupport::Operation op)
{
- OpCounts[op]++; TotalOps++;
+ OpCounts[op]++;
+ TotalOps++;
if ((TotalOps % 1000000) == 0)
{
- if (OpOutputFile == NULL)
+ if (OpOutputFile == nullptr)
{
OpOutputFile = fopen(m_fileName, "a");
}
fprintf(OpOutputFile, "@ %d total ops.\n", TotalOps);
unsigned OpOrder[BSOP_NUMOPS];
- bool OpOrdered[BSOP_NUMOPS];
+ bool OpOrdered[BSOP_NUMOPS];
// First sort by total operations (into an index permutation array, using a simple n^2 sort).
- for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++) OpOrdered[k] = false;
for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++)
{
- bool candSet = false;
- unsigned cand = 0;
+ OpOrdered[k] = false;
+ }
+ for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++)
+ {
+ bool candSet = false;
+ unsigned cand = 0;
unsigned candInd = 0;
for (unsigned j = 0; j < BitSetSupport::BSOP_NUMOPS; j++)
{
- if (OpOrdered[j]) continue;
+ if (OpOrdered[j])
+ {
+ continue;
+ }
if (!candSet || OpCounts[j] > cand)
{
candInd = j;
- cand = OpCounts[j];
+ cand = OpCounts[j];
candSet = true;
}
}
assert(candSet);
- OpOrder[k] = candInd;
+ OpOrder[k] = candInd;
OpOrdered[candInd] = true;
}
diff --git a/src/jit/bitset.h b/src/jit/bitset.h
index d11eb72413..4ecb2fc0d4 100644
--- a/src/jit/bitset.h
+++ b/src/jit/bitset.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// A set of integers in the range [0..N], for some given N.
/*****************************************************************************/
@@ -15,10 +14,7 @@
class BitSetSupport
{
#ifdef DEBUG
- template<typename BitSetType,
- unsigned Brand,
- typename Env,
- typename BitSetTraits>
+ template <typename BitSetType, unsigned Brand, typename Env, typename BitSetTraits>
static void RunTests(Env env);
#endif
@@ -29,12 +25,12 @@ public:
static unsigned BitCountTable[16];
// Returns the number of 1 bits in the binary representation of "u".
- template<typename T>
+ template <typename T>
static unsigned CountBitsInIntegral(T u)
{
unsigned res = 0;
// We process "u" in 4-bit nibbles, hence the "*2" below.
- for (int i = 0; i < sizeof(T)*2; i++)
+ for (int i = 0; i < sizeof(T) * 2; i++)
{
res += BitCountTable[u & 0xf];
u >>= 4;
@@ -58,12 +54,13 @@ public:
class BitSetOpCounter
{
- unsigned TotalOps;
- unsigned OpCounts[BSOP_NUMOPS];
+ unsigned TotalOps;
+ unsigned OpCounts[BSOP_NUMOPS];
const char* m_fileName;
- FILE* OpOutputFile;
- public:
- BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(NULL)
+ FILE* OpOutputFile;
+
+ public:
+ BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(nullptr)
{
for (unsigned i = 0; i < BSOP_NUMOPS; i++)
{
@@ -75,15 +72,15 @@ public:
};
};
-template <> FORCEINLINE
-unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
+template <>
+FORCEINLINE unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
{
// Make sure we're 32 bit.
assert(sizeof(unsigned) == 4);
- c = (c & 0x55555555) + ((c >> 1) & 0x55555555);
- c = (c & 0x33333333) + ((c >> 2) & 0x33333333);
- c = (c & 0x0f0f0f0f) + ((c >> 4) & 0x0f0f0f0f);
- c = (c & 0x00ff00ff) + ((c >> 8) & 0x00ff00ff);
+ c = (c & 0x55555555) + ((c >> 1) & 0x55555555);
+ c = (c & 0x33333333) + ((c >> 2) & 0x33333333);
+ c = (c & 0x0f0f0f0f) + ((c >> 4) & 0x0f0f0f0f);
+ c = (c & 0x00ff00ff) + ((c >> 8) & 0x00ff00ff);
c = (c & 0x0000ffff) + ((c >> 16) & 0x0000ffff);
return c;
}
@@ -112,14 +109,14 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// function, which makes a copy of the referent data structure in the indirect case, and an
// "AssignNoCopy" version, which does not, and instead introduces sharing in the indirect case.
// Obviously, the latter should be used with care.
-//
+//
// (Orthogonally, there are also further versions of assignment that differ in whether the "rhs"
// argument may be uninitialized. The normal assignment operation requires the "rhs" argument not be
// uninitialized; "AssignNoCopy" has the same requirement. The "AssignAllowUninitRhs" version allows
// the "rhs" to be the uninit value, and sets the "lhs" to be uninitialized in that case.)
// This class has static methods that provide the operations on BitSets.
-//
+//
// An instantiation requires:
// typename BitSetType: the representation type of this kind of BitSet.
//
@@ -141,7 +138,7 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// "GetSize". A given BitSet implementation must call
// this with only one constant value. Thus, and "Env"
// may compute this result when GetSize changes.
-//
+//
// static unsigned GetEpoch(Env): the current epoch.
//
// (For many instantiations, BitSetValueArgType and BitSetValueRetType will be the same as BitSetType; in cases where
@@ -154,18 +151,15 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// yielded member.
//
// Finally, it should export two further types:
-//
+//
// ValArgType: the type used to pass a BitSet as a by-value argument.
// RetValType: the type that should be used to return a BitSet.
-//
+//
// For many instantiations, these can be identical to BitSetTypes. When the representation type is a class,
// however, ValArgType may need to be "const BitSetType&", and RetValArg may need to be a helper class, if the
// class hides default copy constructors and assignment operators to detect erroneous usage.
//
-template<typename BitSetType,
- unsigned Brand,
- typename Env,
- typename BitSetTraits>
+template <typename BitSetType, unsigned Brand, typename Env, typename BitSetTraits>
class BitSetOps
{
#if 0
@@ -278,25 +272,22 @@ class BitSetOps
typename ValArgType;
typename RetValType;
-#endif // 0 -- the above is #if'd out, since it's really just an extended comment on what an instantiation
+#endif // 0 -- the above is #if'd out, since it's really just an extended comment on what an instantiation
// should provide.
};
-template<typename BitSetType,
- unsigned Brand,
- typename Env,
- typename BitSetTraits,
- typename BitSetValueArgType,
- typename BitSetValueRetType,
- typename BaseIter>
+template <typename BitSetType,
+ unsigned Brand,
+ typename Env,
+ typename BitSetTraits,
+ typename BitSetValueArgType,
+ typename BitSetValueRetType,
+ typename BaseIter>
class BitSetOpsWithCounter
{
- typedef BitSetOps<BitSetType,
- Brand,
- Env,
- BitSetTraits> BSO;
+ typedef BitSetOps<BitSetType, Brand, Env, BitSetTraits> BSO;
- public:
+public:
static BitSetValueRetType UninitVal()
{
return BSO::UninitVal();
@@ -433,13 +424,17 @@ class BitSetOpsWithCounter
}
#endif
- class Iter {
+ class Iter
+ {
BaseIter m_iter;
- public:
- Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs) {}
+ public:
+ Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs)
+ {
+ }
- bool NextElem(Env env, unsigned* pElem) {
+ bool NextElem(Env env, unsigned* pElem)
+ {
BitSetTraits::GetOpCounter(env)->RecordOp(BitSetSupport::BSOP_NextBit);
return m_iter.NextElem(env, pElem);
}
@@ -448,12 +443,10 @@ class BitSetOpsWithCounter
// We define symbolic names for the various bitset implementations available, to allow choices between them.
-#define BSUInt64 0
-#define BSShortLong 1
+#define BSUInt64 0
+#define BSShortLong 1
#define BSUInt64Class 2
-
-
/*****************************************************************************/
#endif // _BITSET_H_
/*****************************************************************************/
diff --git a/src/jit/bitsetasshortlong.h b/src/jit/bitsetasshortlong.h
index d81872eba3..ec437e189c 100644
--- a/src/jit/bitsetasshortlong.h
+++ b/src/jit/bitsetasshortlong.h
@@ -2,9 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// A set of integers in the range [0..N], for some N defined by the "Env" (via "BitSetTraits").
-//
+//
// Represented as a pointer-sized item. If N bits can fit in this item, the representation is "direct"; otherwise,
// the item is a pointer to an array of K size_t's, where K is the number of size_t's necessary to hold N bits.
@@ -16,14 +15,15 @@
typedef size_t* BitSetShortLongRep;
-template<typename Env, typename BitSetTraits>
-class BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>
+template <typename Env, typename BitSetTraits>
+class BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>
{
public:
typedef BitSetShortLongRep Rep;
+
private:
static const unsigned BitsInSizeT = sizeof(size_t) * BitSetSupport::BitsInByte;
@@ -48,15 +48,14 @@ private:
static BitSetShortLongRep MakeFullArrayBits(Env env);
static bool IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i);
static bool EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2);
- static bool IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2);
- static bool IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2);
+ static bool IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2);
+ static bool IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2);
static void IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2);
#ifdef DEBUG
static const char* ToStringLong(Env env, BitSetShortLongRep bs);
#endif
public:
-
inline static BitSetShortLongRep UninitVal()
{
return nullptr;
@@ -108,7 +107,6 @@ public:
}
}
-
static void AssignNoCopy(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs)
{
lhs = rhs;
@@ -118,7 +116,7 @@ public:
{
if (IsShort(env))
{
- bs = (BitSetShortLongRep)0;
+ bs = (BitSetShortLongRep) nullptr;
}
else
{
@@ -154,7 +152,7 @@ public:
static bool IsEmpty(Env env, BitSetShortLongRep bs)
{
- if (IsShort(env))
+ if (IsShort(env))
{
return bs == nullptr;
}
@@ -167,7 +165,7 @@ public:
static unsigned Count(Env env, BitSetShortLongRep bs)
{
- if (IsShort(env))
+ if (IsShort(env))
{
return BitSetSupport::CountBitsInIntegral(size_t(bs));
}
@@ -195,7 +193,7 @@ public:
UnionD(env, res, bs2);
return res;
}
-
+
static void DiffD(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
{
if (IsShort(env))
@@ -220,8 +218,8 @@ public:
if (IsShort(env))
{
size_t mask = ((size_t)1) << i;
- mask = ~mask;
- bs = (BitSetShortLongRep)(((size_t)bs) & mask);
+ mask = ~mask;
+ bs = (BitSetShortLongRep)(((size_t)bs) & mask);
}
else
{
@@ -242,7 +240,7 @@ public:
if (IsShort(env))
{
size_t mask = ((size_t)1) << i;
- bs = (BitSetShortLongRep)(((size_t)bs) | mask);
+ bs = (BitSetShortLongRep)(((size_t)bs) | mask);
}
else
{
@@ -334,14 +332,14 @@ public:
if (IsShort(env))
{
assert(sizeof(BitSetShortLongRep) == sizeof(size_t));
- IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
- const int CharsForSizeT = sizeof(size_t)*2;
- char * res = nullptr;
- const int ShortAllocSize = CharsForSizeT + 4;
- res = (char*)alloc->Alloc(ShortAllocSize);
- size_t bits = (size_t)bs;
- unsigned remaining = ShortAllocSize;
- char* ptr = res;
+ IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
+ const int CharsForSizeT = sizeof(size_t) * 2;
+ char* res = nullptr;
+ const int ShortAllocSize = CharsForSizeT + 4;
+ res = (char*)alloc->Alloc(ShortAllocSize);
+ size_t bits = (size_t)bs;
+ unsigned remaining = ShortAllocSize;
+ char* ptr = res;
if (sizeof(size_t) == sizeof(int64_t))
{
sprintf_s(ptr, remaining, "%016llX", bits);
@@ -397,29 +395,29 @@ public:
class Iter
{
BitSetShortLongRep m_bs; // The BitSet that we're iterating over.
- size_t m_bits; // The "current" bits remaining to be iterated over.
+ size_t m_bits; // The "current" bits remaining to be iterated over.
// In the "short" case, these are all the remaining bits.
- // In the "long" case, these are remaining bits in element "m_index";
+ // In the "long" case, these are remaining bits in element "m_index";
// these and the bits in the remaining elements comprise the remaining bits.
- unsigned m_index; // If "m_bs" uses the long (indirect) representation, the current index in the array.
+ unsigned m_index; // If "m_bs" uses the long (indirect) representation, the current index in the array.
// the index of the element in A(bs) that is currently being iterated.
- unsigned m_bitNum; // The number of bits that have already been iterated over (set or clear). If you
- // add this to the bit number of the next bit in "m_bits", you get the proper bit number of that
- // bit in "m_bs".
+ unsigned m_bitNum; // The number of bits that have already been iterated over (set or clear). If you
+ // add this to the bit number of the next bit in "m_bits", you get the proper bit number of that
+ // bit in "m_bs".
public:
- Iter(Env env, const BitSetShortLongRep& bs): m_bs(bs), m_bitNum(0)
+ Iter(Env env, const BitSetShortLongRep& bs) : m_bs(bs), m_bitNum(0)
{
if (BitSetOps::IsShort(env))
{
m_index = 0;
- m_bits = (size_t)bs;
+ m_bits = (size_t)bs;
}
else
{
assert(bs != BitSetOps::UninitVal());
m_index = 0;
- m_bits = bs[0];
+ m_bits = bs[0];
}
}
@@ -431,7 +429,7 @@ public:
for (;;)
{
DWORD nextBit;
- BOOL hasBit;
+ BOOL hasBit;
#ifdef _HOST_64BIT_
static_assert_no_msg(sizeof(size_t) == 8);
hasBit = BitScanForward64(&nextBit, m_bits);
@@ -445,7 +443,9 @@ public:
{
*pElem = m_bitNum + nextBit;
m_bitNum += nextBit + 1;
- m_bits >>= nextBit; m_bits >>= 1; // Have to do these separately -- if we have 0x80000000, nextBit == 31, and shifting by 32 bits does nothing.
+ m_bits >>= nextBit;
+ m_bits >>= 1; // Have to do these separately -- if we have 0x80000000, nextBit == 31, and shifting
+ // by 32 bits does nothing.
return true;
}
else
@@ -458,10 +458,13 @@ public:
else
{
m_index++;
- if (m_index == len) return false;
+ if (m_index == len)
+ {
+ return false;
+ }
// Otherwise...
m_bitNum = m_index * sizeof(size_t) * BitSetSupport::BitsInByte;
- m_bits = m_bs[m_index];
+ m_bits = m_bs[m_index];
continue;
}
}
@@ -475,73 +478,72 @@ public:
typedef size_t* RetValType;
};
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::AssignLong(Env env, BitSetShortLongRep& lhs, BitSetShortLongRep rhs)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- for (unsigned i = 0; i < len; i++) lhs[i] = rhs[i];
+ for (unsigned i = 0; i < len; i++)
+ {
+ lhs[i] = rhs[i];
+ }
}
-template<typename Env, typename BitSetTraits>
-BitSetShortLongRep
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- MakeSingletonLong(Env env, unsigned bitNum)
+template <typename Env, typename BitSetTraits>
+BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::MakeSingletonLong(Env env, unsigned bitNum)
{
assert(!IsShort(env));
- BitSetShortLongRep res = MakeEmptyArrayBits(env);
- unsigned index = bitNum/BitsInSizeT;
- res[index] = ((size_t)1) << (bitNum % BitsInSizeT);
+ BitSetShortLongRep res = MakeEmptyArrayBits(env);
+ unsigned index = bitNum / BitsInSizeT;
+ res[index] = ((size_t)1) << (bitNum % BitsInSizeT);
return res;
}
-template<typename Env, typename BitSetTraits>
-BitSetShortLongRep
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- MakeCopyLong(Env env, BitSetShortLongRep bs)
+template <typename Env, typename BitSetTraits>
+BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::MakeCopyLong(Env env, BitSetShortLongRep bs)
{
assert(!IsShort(env));
BitSetShortLongRep res = MakeUninitArrayBits(env);
- unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- for (unsigned i = 0; i < len; i++) res[i] = bs[i];
+ unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
+ for (unsigned i = 0; i < len; i++)
+ {
+ res[i] = bs[i];
+ }
return res;
}
-template<typename Env, typename BitSetTraits>
-bool
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- IsEmptyLong(Env env, BitSetShortLongRep bs)
+template <typename Env, typename BitSetTraits>
+bool BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::IsEmptyLong(Env env, BitSetShortLongRep bs)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
for (unsigned i = 0; i < len; i++)
{
- if (bs[i] != 0) return false;
+ if (bs[i] != 0)
+ {
+ return false;
+ }
}
return true;
}
-template<typename Env, typename BitSetTraits>
-unsigned
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- CountLong(Env env, BitSetShortLongRep bs)
+template <typename Env, typename BitSetTraits>
+unsigned BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::CountLong(Env env, BitSetShortLongRep bs)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
@@ -553,68 +555,64 @@ BitSetOps</*BitSetType*/BitSetShortLongRep,
return res;
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::UnionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- for (unsigned i = 0; i < len; i++) bs1[i] |= bs2[i];
+ for (unsigned i = 0; i < len; i++)
+ {
+ bs1[i] |= bs2[i];
+ }
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::DiffDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- for (unsigned i = 0; i < len; i++) bs1[i] &= ~bs2[i];
+ for (unsigned i = 0; i < len; i++)
+ {
+ bs1[i] &= ~bs2[i];
+ }
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::AddElemDLong(Env env, BitSetShortLongRep& bs, unsigned i)
{
assert(!IsShort(env));
- unsigned index = i/BitsInSizeT;
- size_t mask = ((size_t)1) << (i % BitsInSizeT);
+ unsigned index = i / BitsInSizeT;
+ size_t mask = ((size_t)1) << (i % BitsInSizeT);
bs[index] |= mask;
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::RemoveElemDLong(Env env, BitSetShortLongRep& bs, unsigned i)
{
assert(!IsShort(env));
- unsigned index = i/BitsInSizeT;
- size_t mask = ((size_t)1) << (i % BitsInSizeT);
- mask = ~mask;
+ unsigned index = i / BitsInSizeT;
+ size_t mask = ((size_t)1) << (i % BitsInSizeT);
+ mask = ~mask;
bs[index] &= mask;
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- ClearDLong(Env env, BitSetShortLongRep& bs)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::ClearDLong(Env env, BitSetShortLongRep& bs)
{
assert(!IsShort(env));
// Recall that ClearD does *not* require "bs" to be of the current epoch.
@@ -622,163 +620,169 @@ BitSetOps</*BitSetType*/BitSetShortLongRep,
bs = MakeEmptyArrayBits(env);
}
-template<typename Env, typename BitSetTraits>
-BitSetShortLongRep
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- MakeUninitArrayBits(Env env)
+template <typename Env, typename BitSetTraits>
+BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::MakeUninitArrayBits(Env env)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
assert(len > 1); // Or else would not require an array.
- return (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len*sizeof(size_t)));
+ return (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len * sizeof(size_t)));
}
-template<typename Env, typename BitSetTraits>
-BitSetShortLongRep
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- MakeEmptyArrayBits(Env env)
+template <typename Env, typename BitSetTraits>
+BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::MakeEmptyArrayBits(Env env)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
assert(len > 1); // Or else would not require an array.
- BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len*sizeof(size_t)));
- for (unsigned i = 0; i < len; i++) res[i] = 0;
+ BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len * sizeof(size_t)));
+ for (unsigned i = 0; i < len; i++)
+ {
+ res[i] = 0;
+ }
return res;
}
-template<typename Env, typename BitSetTraits>
-BitSetShortLongRep
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- MakeFullArrayBits(Env env)
+template <typename Env, typename BitSetTraits>
+BitSetShortLongRep BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::MakeFullArrayBits(Env env)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
assert(len > 1); // Or else would not require an array.
- BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len*sizeof(size_t)));
- for (unsigned i = 0; i < len-1; i++) res[i] = size_t(-1);
+ BitSetShortLongRep res = (BitSetShortLongRep)(BitSetTraits::GetAllocator(env)->Alloc(len * sizeof(size_t)));
+ for (unsigned i = 0; i < len - 1; i++)
+ {
+ res[i] = size_t(-1);
+ }
// Start with all ones, shift in zeros in the last elem.
- unsigned lastElemBits = (BitSetTraits::GetSize(env)-1) % BitsInSizeT + 1;
- res[len-1] = (size_t(-1) >> (BitsInSizeT - lastElemBits));
+ unsigned lastElemBits = (BitSetTraits::GetSize(env) - 1) % BitsInSizeT + 1;
+ res[len - 1] = (size_t(-1) >> (BitsInSizeT - lastElemBits));
return res;
}
-template<typename Env, typename BitSetTraits>
-bool
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i)
+template <typename Env, typename BitSetTraits>
+bool BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::IsMemberLong(Env env, BitSetShortLongRep bs, unsigned i)
{
assert(!IsShort(env));
- unsigned index = i/BitsInSizeT;
+ unsigned index = i / BitsInSizeT;
unsigned bitInElem = (i % BitsInSizeT);
- size_t mask = ((size_t)1) << bitInElem;
+ size_t mask = ((size_t)1) << bitInElem;
return (bs[index] & mask) != 0;
}
-template<typename Env, typename BitSetTraits>
-void
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- IntersectionDLong(Env env, BitSetShortLongRep& bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+void BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::IntersectionDLong(Env env,
+ BitSetShortLongRep& bs1,
+ BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- for (unsigned i = 0; i < len; i++) bs1[i] &= bs2[i];
+ for (unsigned i = 0; i < len; i++)
+ {
+ bs1[i] &= bs2[i];
+ }
}
-template<typename Env, typename BitSetTraits>
-bool
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- IsEmptyIntersectionLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+bool BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::IsEmptyIntersectionLong(Env env,
+ BitSetShortLongRep bs1,
+ BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
for (unsigned i = 0; i < len; i++)
{
- if ((bs1[i] & bs2[i]) != 0) return false;
+ if ((bs1[i] & bs2[i]) != 0)
+ {
+ return false;
+ }
}
return true;
}
-template<typename Env, typename BitSetTraits>
-bool
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+bool BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::EqualLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
for (unsigned i = 0; i < len; i++)
{
- if (bs1[i] != bs2[i]) return false;
+ if (bs1[i] != bs2[i])
+ {
+ return false;
+ }
}
return true;
}
-template<typename Env, typename BitSetTraits>
-bool
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2)
+template <typename Env, typename BitSetTraits>
+bool BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::IsSubsetLong(Env env, BitSetShortLongRep bs1, BitSetShortLongRep bs2)
{
assert(!IsShort(env));
unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
for (unsigned i = 0; i < len; i++)
{
- if ((bs1[i] & bs2[i]) != bs1[i]) return false;
+ if ((bs1[i] & bs2[i]) != bs1[i])
+ {
+ return false;
+ }
}
return true;
}
#ifdef DEBUG
-template<typename Env, typename BitSetTraits>
-const char*
-BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>::
- ToStringLong(Env env, BitSetShortLongRep bs)
+template <typename Env, typename BitSetTraits>
+const char* BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>::ToStringLong(Env env, BitSetShortLongRep bs)
{
assert(!IsShort(env));
- unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
- const int CharsForSizeT = sizeof(size_t)*2;
- unsigned allocSz = len * CharsForSizeT + 4;
- unsigned remaining = allocSz;
- IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
- char * res = (char*)alloc->Alloc(allocSz);
- char * temp = res;
+ unsigned len = BitSetTraits::GetArrSize(env, sizeof(size_t));
+ const int CharsForSizeT = sizeof(size_t) * 2;
+ unsigned allocSz = len * CharsForSizeT + 4;
+ unsigned remaining = allocSz;
+ IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
+ char* res = (char*)alloc->Alloc(allocSz);
+ char* temp = res;
for (unsigned i = len; 0 < i; i--)
{
- size_t bits = bs[i-1];
+ size_t bits = bs[i - 1];
for (unsigned bytesDone = 0; bytesDone < sizeof(size_t); bytesDone += sizeof(unsigned))
{
unsigned bits0 = (unsigned)bits;
sprintf_s(temp, remaining, "%08X", bits0);
temp += 8;
remaining -= 8;
- bytesDone += 4; assert(sizeof(unsigned) == 4);
+ bytesDone += 4;
+ assert(sizeof(unsigned) == 4);
// Doing this twice by 16, rather than once by 32, avoids warnings when size_t == unsigned.
- bits = bits >> 16; bits = bits >> 16;
+ bits = bits >> 16;
+ bits = bits >> 16;
}
}
return res;
diff --git a/src/jit/bitsetasuint64.h b/src/jit/bitsetasuint64.h
index 0eacca9df1..150f7e9d61 100644
--- a/src/jit/bitsetasuint64.h
+++ b/src/jit/bitsetasuint64.h
@@ -2,29 +2,28 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef bitSetAsUint64_DEFINED
#define bitSetAsUint64_DEFINED 1
#include "bitset.h"
-template<typename Env, typename BitSetTraits>
-class BitSetOps</*BitSetType*/UINT64,
- /*Brand*/BSUInt64,
- /*Env*/Env,
- /*BitSetTraits*/BitSetTraits>
+template <typename Env, typename BitSetTraits>
+class BitSetOps</*BitSetType*/ UINT64,
+ /*Brand*/ BSUInt64,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>
{
public:
typedef UINT64 Rep;
+
private:
static UINT64 Singleton(unsigned bitNum)
{
- assert(bitNum < sizeof(UINT64)*BitSetSupport::BitsInByte);
+ assert(bitNum < sizeof(UINT64) * BitSetSupport::BitsInByte);
return (UINT64)1 << bitNum;
}
public:
-
static void Assign(Env env, UINT64& lhs, UINT64 rhs)
{
lhs = rhs;
@@ -99,7 +98,7 @@ public:
static UINT64 RemoveElem(Env env, UINT64 bs1, unsigned i)
{
- return bs1 & ~Singleton(i);
+ return bs1 & ~Singleton(i);
}
static void AddElemD(Env env, UINT64& bs1, unsigned i)
@@ -153,7 +152,7 @@ public:
static UINT64 MakeFull(Env env)
{
unsigned sz = BitSetTraits::GetSize(env);
- if (sz == sizeof(UINT64)*8)
+ if (sz == sizeof(UINT64) * 8)
{
return UINT64(-1);
}
@@ -166,23 +165,25 @@ public:
#ifdef DEBUG
static const char* ToString(Env env, UINT64 bs)
{
- IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
- const int CharsForUINT64 = sizeof(UINT64)*2;
- char * res = NULL;
- const int AllocSize = CharsForUINT64 + 4;
- res = (char*)alloc->Alloc(AllocSize);
- UINT64 bits = bs;
- unsigned remaining = AllocSize;
- char* ptr = res;
+ IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
+ const int CharsForUINT64 = sizeof(UINT64) * 2;
+ char* res = NULL;
+ const int AllocSize = CharsForUINT64 + 4;
+ res = (char*)alloc->Alloc(AllocSize);
+ UINT64 bits = bs;
+ unsigned remaining = AllocSize;
+ char* ptr = res;
for (unsigned bytesDone = 0; bytesDone < sizeof(UINT64); bytesDone += sizeof(unsigned))
{
unsigned bits0 = (unsigned)bits;
sprintf_s(ptr, remaining, "%08X", bits0);
ptr += 8;
remaining -= 8;
- bytesDone += 4; assert(sizeof(unsigned) == 4);
+ bytesDone += 4;
+ assert(sizeof(unsigned) == 4);
// Doing this twice by 16, rather than once by 32, avoids warnings when size_t == unsigned.
- bits = bits >> 16; bits = bits >> 16;
+ bits = bits >> 16;
+ bits = bits >> 16;
}
return res;
}
@@ -201,16 +202,22 @@ public:
class Iter
{
UINT64 m_bits;
- public:
- Iter(Env env, const UINT64& bits) : m_bits(bits) {}
+ public:
+ Iter(Env env, const UINT64& bits) : m_bits(bits)
+ {
+ }
bool NextElem(Env env, unsigned* pElem)
{
if (m_bits)
{
unsigned bitNum = *pElem;
- while ((m_bits & 0x1) == 0) { bitNum++; m_bits >>= 1; }
+ while ((m_bits & 0x1) == 0)
+ {
+ bitNum++;
+ m_bits >>= 1;
+ }
*pElem = bitNum;
m_bits &= ~0x1;
return true;
diff --git a/src/jit/bitsetasuint64inclass.h b/src/jit/bitsetasuint64inclass.h
index a5df174d3f..be92624613 100644
--- a/src/jit/bitsetasuint64inclass.h
+++ b/src/jit/bitsetasuint64inclass.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef bitSetAsUint64InClass_DEFINED
#define bitSetAsUint64InClass_DEFINED 1
@@ -10,21 +9,23 @@
#include "bitsetasuint64.h"
#include "stdmacros.h"
-template<typename Env, typename BitSetTraits>
+template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType;
-template<typename Env, typename BitSetTraits>
+template <typename Env, typename BitSetTraits>
class BitSetUint64Iter;
-template<typename Env, typename BitSetTraits>
+template <typename Env, typename BitSetTraits>
class BitSetUint64
{
public:
- typedef BitSetUint64<Env,BitSetTraits> Rep;
+ typedef BitSetUint64<Env, BitSetTraits> Rep;
+
private:
- friend class BitSetOps</*BitSetType*/BitSetUint64<Env,BitSetTraits>,
- /*Brand*/BSUInt64Class,
- /*Env*/Env, /*BitSetTraits*/BitSetTraits>;
+ friend class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
+ /*Brand*/ BSUInt64Class,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>;
friend class BitSetUint64ValueRetType<Env, BitSetTraits>;
friend class BitSetUint64Iter<Env, BitSetTraits>;
@@ -46,11 +47,11 @@ private:
#ifdef DEBUG
// In debug, make sure we don't have any public assignment, by making this private.
- BitSetUint64& operator=(const BitSetUint64& bs)
+ BitSetUint64& operator=(const BitSetUint64& bs)
{
- m_bits = bs.m_bits;
+ m_bits = bs.m_bits;
m_epoch = bs.m_epoch;
- return (*this);
+ return (*this);
}
#endif // DEBUG
@@ -58,7 +59,7 @@ private:
{
return m_bits == bs.m_bits
#ifdef DEBUG
- && m_epoch == bs.m_epoch
+ && m_epoch == bs.m_epoch
#endif
;
}
@@ -71,24 +72,23 @@ private:
// int argument, and just make copy constructor defined here visible.
public:
#endif
- BitSetUint64(const BitSetUint64& bs) : m_bits(bs.m_bits)
+ BitSetUint64(const BitSetUint64& bs)
+ : m_bits(bs.m_bits)
#ifdef DEBUG
, m_epoch(bs.m_epoch)
#endif
- {}
+ {
+ }
#ifdef DEBUG
public:
// But we add a public constructor that's *almost* the default constructor.
- BitSetUint64(const BitSetUint64& bs, int xxx) : m_bits(bs.m_bits)
- , m_epoch(bs.m_epoch)
- {}
+ BitSetUint64(const BitSetUint64& bs, int xxx) : m_bits(bs.m_bits), m_epoch(bs.m_epoch)
+ {
+ }
#endif
private:
-
-
-
// Return the number of bits set in the BitSet.
inline unsigned Count(Env env) const
{
@@ -112,7 +112,6 @@ private:
return res;
}
-
inline void RemoveElemD(Env env, unsigned i)
{
CheckEpoch(env);
@@ -127,7 +126,6 @@ private:
return res;
}
-
inline void AddElemD(Env env, unsigned i)
{
CheckEpoch(env);
@@ -222,89 +220,105 @@ private:
return Uint64BitSetOps::ToString(env, m_bits);
}
- public:
-
+public:
// Uninint
- BitSetUint64() :
- m_bits(0)
+ BitSetUint64()
+ : m_bits(0)
#ifdef DEBUG
, m_epoch(UINT32_MAX) // Undefined.
#endif
- {}
+ {
+ }
- BitSetUint64(Env env, bool full = false) :
- m_bits(0)
+ BitSetUint64(Env env, bool full = false)
+ : m_bits(0)
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
- if (full) m_bits = Uint64BitSetOps::MakeFull(env);
+ if (full)
+ {
+ m_bits = Uint64BitSetOps::MakeFull(env);
+ }
}
inline BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt);
- BitSetUint64(Env env, unsigned bitNum) :
- m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
+ BitSetUint64(Env env, unsigned bitNum)
+ : m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
assert(bitNum < BitSetTraits::GetSize(env));
}
-
-
};
-template<typename Env, typename BitSetTraits>
+template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType
{
friend class BitSetUint64<Env, BitSetTraits>;
BitSetUint64<Env, BitSetTraits> m_bs;
+
public:
- BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs) {}
+ BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs)
+ {
+ }
};
-template<typename Env, typename BitSetTraits>
-BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt) : m_bits(rt.m_bs.m_bits)
+template <typename Env, typename BitSetTraits>
+BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt)
+ : m_bits(rt.m_bs.m_bits)
#ifdef DEBUG
, m_epoch(rt.m_bs.m_epoch)
#endif
-{}
-
+{
+}
-// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
+// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
// bitset during bit iteration.
-template<typename Env, typename BitSetTraits>
+template <typename Env, typename BitSetTraits>
class BitSetUint64Iter
{
- UINT64 m_bits;
- unsigned m_bitNum;
+ UINT64 m_bits;
+ unsigned m_bitNum;
+
public:
- BitSetUint64Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs): m_bits(bs.m_bits), m_bitNum(0) {}
+ BitSetUint64Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs) : m_bits(bs.m_bits), m_bitNum(0)
+ {
+ }
bool NextElem(Env env, unsigned* pElem)
{
static const unsigned UINT64_SIZE = 64;
-
+
if ((m_bits & 0x1) != 0)
{
- *pElem = m_bitNum; m_bitNum++; m_bits >>= 1; return true;
- }
- else
+ *pElem = m_bitNum;
+ m_bitNum++;
+ m_bits >>= 1;
+ return true;
+ }
+ else
{
// Skip groups of 4 zeros -- an optimization for sparse bitsets.
while (m_bitNum < UINT64_SIZE && (m_bits & 0xf) == 0)
{
- m_bitNum += 4; m_bits >>= 4;
+ m_bitNum += 4;
+ m_bits >>= 4;
}
while (m_bitNum < UINT64_SIZE && (m_bits & 0x1) == 0)
{
- m_bitNum += 1; m_bits >>= 1;
+ m_bitNum += 1;
+ m_bits >>= 1;
}
if (m_bitNum < UINT64_SIZE)
{
- *pElem = m_bitNum; m_bitNum++; m_bits >>= 1; return true;
+ *pElem = m_bitNum;
+ m_bitNum++;
+ m_bits >>= 1;
+ return true;
}
else
{
@@ -314,19 +328,20 @@ public:
}
};
-template<typename Env, typename BitSetTraits>
-class BitSetOps</*BitSetType*/BitSetUint64<Env,BitSetTraits>,
- /*Brand*/BSUInt64Class,
- /*Env*/Env, /*BitSetTraits*/BitSetTraits>
+template <typename Env, typename BitSetTraits>
+class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
+ /*Brand*/ BSUInt64Class,
+ /*Env*/ Env,
+ /*BitSetTraits*/ BitSetTraits>
{
- typedef BitSetUint64<Env,BitSetTraits> BST;
- typedef const BitSetUint64<Env,BitSetTraits>& BSTValArg;
- typedef BitSetUint64ValueRetType<Env,BitSetTraits> BSTRetVal;
+ typedef BitSetUint64<Env, BitSetTraits> BST;
+ typedef const BitSetUint64<Env, BitSetTraits>& BSTValArg;
+ typedef BitSetUint64ValueRetType<Env, BitSetTraits> BSTRetVal;
public:
static BSTRetVal UninitVal()
{
- return BitSetUint64<Env,BitSetTraits>();
+ return BitSetUint64<Env, BitSetTraits>();
}
static bool MayBeUninit(BSTValArg bs)
@@ -467,7 +482,7 @@ public:
static BSTRetVal MakeFull(Env env)
{
- return BST(env, /*full*/true);
+ return BST(env, /*full*/ true);
}
#ifdef DEBUG
diff --git a/src/jit/bitsetops.h b/src/jit/bitsetops.h
index 1c1bca00b9..edf39eaf56 100644
--- a/src/jit/bitsetops.h
+++ b/src/jit/bitsetops.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
BSOPNAME(BSOP_Assign)
BSOPNAME(BSOP_AssignAllowUninitRhs)
BSOPNAME(BSOP_AssignNocopy)
diff --git a/src/jit/bitvec.h b/src/jit/bitvec.h
index dfeddaa779..4db211ba0a 100644
--- a/src/jit/bitvec.h
+++ b/src/jit/bitvec.h
@@ -14,7 +14,7 @@
//
// BitVecTraits traits(size, pCompiler);
// BitVec bitvec = BitVecOps::MakeEmpty(&traits);
-//
+//
// and call functions like so:
//
// BitVecOps::AddElemD(&traits, bitvec, 10);
@@ -25,17 +25,17 @@
#include "compilerbitsettraits.h"
#include "bitsetasshortlong.h"
-typedef BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/BitVecTraits*,
- /*BitSetTraits*/BitVecTraits>
- BitVecOps;
+typedef BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ BitVecTraits*,
+ /*BitSetTraits*/ BitVecTraits>
+ BitVecOps;
-typedef BitSetShortLongRep BitVec;
+typedef BitSetShortLongRep BitVec;
// These types should be used as the types for BitVec arguments and return values, respectively.
-typedef BitVecOps::ValArgType BitVec_ValArg_T;
-typedef BitVecOps::RetValType BitVec_ValRet_T;
+typedef BitVecOps::ValArgType BitVec_ValArg_T;
+typedef BitVecOps::RetValType BitVec_ValRet_T;
// Initialize "_varName" to "_initVal." Copies contents, not references; if "_varName" is uninitialized, allocates a
// set for it (using "_traits" for any necessary allocation), and copies the contents of "_initVal" into it.
@@ -49,8 +49,8 @@ typedef BitVecOps::RetValType BitVec_ValRet_T;
// Use this to initialize an iterator "_iterName" to iterate over a BitVec "_bitVec".
// "_bitNum" will be an unsigned variable to which we assign the elements of "_bitVec".
-#define BITVEC_ITER_INIT(_traits, _iterName, _bitVec, _bitNum) \
- unsigned _bitNum = 0; \
+#define BITVEC_ITER_INIT(_traits, _iterName, _bitVec, _bitNum) \
+ unsigned _bitNum = 0; \
BitVecOps::Iter _iterName(_traits, _bitVec)
#endif // _BITVEC_INCLUDED_
diff --git a/src/jit/block.cpp b/src/jit/block.cpp
index 47498265dc..37c85dd39d 100644
--- a/src/jit/block.cpp
+++ b/src/jit/block.cpp
@@ -20,10 +20,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
flowList* ShuffleHelper(unsigned hash, flowList* res)
{
flowList* head = res;
- for (flowList* prev = NULL; res != NULL; prev = res, res = res->flNext)
+ for (flowList *prev = nullptr; res != nullptr; prev = res, res = res->flNext)
{
unsigned blkHash = (hash ^ (res->flBlock->bbNum << 16) ^ res->flBlock->bbNum);
- if (((blkHash % 1879) & 1) && prev != NULL)
+ if (((blkHash % 1879) & 1) && prev != nullptr)
{
// Swap res with head.
prev->flNext = head;
@@ -51,26 +51,26 @@ unsigned SsaStressHashHelper()
}
#endif
-EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block) :
- m_comp(comp),
- m_block(block),
- m_curRegSucc(NULL),
- m_curTry(comp->ehGetBlockExnFlowDsc(block)),
- m_remainingRegSuccs(block->NumSucc(comp))
+EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block)
+ : m_comp(comp)
+ , m_block(block)
+ , m_curRegSucc(nullptr)
+ , m_curTry(comp->ehGetBlockExnFlowDsc(block))
+ , m_remainingRegSuccs(block->NumSucc(comp))
{
// If "block" is a "leave helper" block (the empty BBJ_ALWAYS block that pairs with a
// preceding BBJ_CALLFINALLY block to implement a "leave" IL instruction), then no exceptions
// can occur within it, so clear m_curTry if it's non-null.
- if (m_curTry != NULL)
+ if (m_curTry != nullptr)
{
BasicBlock* beforeBlock = block->bbPrev;
- if (beforeBlock != NULL && beforeBlock->isBBCallAlwaysPair())
+ if (beforeBlock != nullptr && beforeBlock->isBBCallAlwaysPair())
{
- m_curTry = NULL;
+ m_curTry = nullptr;
}
}
- if (m_curTry == NULL && m_remainingRegSuccs > 0)
+ if (m_curTry == nullptr && m_remainingRegSuccs > 0)
{
// Examine the successors to see if any are the start of try blocks.
FindNextRegSuccTry();
@@ -79,7 +79,7 @@ EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block) :
void EHSuccessorIter::FindNextRegSuccTry()
{
- assert(m_curTry == NULL);
+ assert(m_curTry == nullptr);
// Must now consider the next regular successor, if any.
while (m_remainingRegSuccs > 0)
@@ -94,7 +94,9 @@ void EHSuccessorIter::FindNextRegSuccTry()
// If the try region started by "m_curRegSucc" (represented by newTryIndex) contains m_block,
// we've already yielded its handler, as one of the EH handler successors of m_block itself.
if (m_comp->bbInExnFlowRegions(newTryIndex, m_block))
+ {
continue;
+ }
// Otherwise, consider this try.
m_curTry = m_comp->ehGetDsc(newTryIndex);
@@ -105,24 +107,24 @@ void EHSuccessorIter::FindNextRegSuccTry()
void EHSuccessorIter::operator++(void)
{
- assert(m_curTry != NULL);
+ assert(m_curTry != nullptr);
if (m_curTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
m_curTry = m_comp->ehGetDsc(m_curTry->ebdEnclosingTryIndex);
// If we've gone over into considering try's containing successors,
// then the enclosing try must have the successor as its first block.
- if (m_curRegSucc == NULL || m_curTry->ebdTryBeg == m_curRegSucc)
+ if (m_curRegSucc == nullptr || m_curTry->ebdTryBeg == m_curRegSucc)
{
return;
}
// Otherwise, give up, try the next regular successor.
- m_curTry = NULL;
+ m_curTry = nullptr;
}
else
{
- m_curTry = NULL;
+ m_curTry = nullptr;
}
// We've exhausted all try blocks.
@@ -132,14 +134,14 @@ void EHSuccessorIter::operator++(void)
BasicBlock* EHSuccessorIter::operator*()
{
- assert(m_curTry != NULL);
+ assert(m_curTry != nullptr);
return m_curTry->ExFlowBlock();
}
flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
{
BlockToFlowListMap* ehPreds = GetBlockToEHPreds();
- flowList* res;
+ flowList* res;
if (ehPreds->Lookup(blk, &res))
{
return res;
@@ -150,21 +152,22 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
if (bbIsExFlowBlock(blk, &tryIndex))
{
// Find the first block of the try.
- EHblkDsc* ehblk = ehGetDsc(tryIndex);
+ EHblkDsc* ehblk = ehGetDsc(tryIndex);
BasicBlock* tryStart = ehblk->ebdTryBeg;
- for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr; tryStartPreds = tryStartPreds->flNext)
+ for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr;
+ tryStartPreds = tryStartPreds->flNext)
{
res = new (this, CMK_FlowList) flowList(tryStartPreds->flBlock, res);
#if MEASURE_BLOCK_SIZE
- genFlowNodeCnt += 1;
+ genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
}
// Now add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs;
// these cannot cause transfer to the handler...)
- BasicBlock* prevBB = NULL;
+ BasicBlock* prevBB = nullptr;
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
@@ -172,14 +175,14 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
- for (BasicBlock* bb = fgFirstBB; bb != NULL; prevBB = bb, bb = bb->bbNext)
+ for (BasicBlock *bb = fgFirstBB; bb != nullptr; prevBB = bb, bb = bb->bbNext)
{
- if (bbInExnFlowRegions(tryIndex, bb) && (prevBB == NULL || !prevBB->isBBCallAlwaysPair()))
+ if (bbInExnFlowRegions(tryIndex, bb) && (prevBB == nullptr || !prevBB->isBBCallAlwaysPair()))
{
res = new (this, CMK_FlowList) flowList(bb, res);
#if MEASURE_BLOCK_SIZE
- genFlowNodeCnt += 1;
+ genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
}
@@ -198,7 +201,6 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
return res;
}
-
#ifdef DEBUG
//------------------------------------------------------------------------
@@ -212,7 +214,8 @@ void BasicBlock::dspBlockILRange()
}
else
{
- printf("[???" "..");
+ printf("[???"
+ "..");
}
if (bbCodeOffsEnd != BAD_IL_OFFSET)
@@ -223,7 +226,8 @@ void BasicBlock::dspBlockILRange()
else
{
// brace-matching editor workaround for following line: (
- printf("???" ")");
+ printf("???"
+ ")");
}
}
@@ -232,38 +236,126 @@ void BasicBlock::dspBlockILRange()
//
void BasicBlock::dspFlags()
{
- if (bbFlags & BBF_VISITED) printf("v ");
- if (bbFlags & BBF_MARKED) printf("m ");
- if (bbFlags & BBF_CHANGED) printf("! ");
- if (bbFlags & BBF_REMOVED) printf("del ");
- if (bbFlags & BBF_DONT_REMOVE) printf("keep ");
- if (bbFlags & BBF_IMPORTED) printf("i ");
- if (bbFlags & BBF_INTERNAL) printf("internal ");
- if (bbFlags & BBF_FAILED_VERIFICATION) printf("failV ");
- if (bbFlags & BBF_TRY_BEG) printf("try ");
- if (bbFlags & BBF_NEEDS_GCPOLL) printf("poll ");
- if (bbFlags & BBF_RUN_RARELY) printf("rare ");
- if (bbFlags & BBF_LOOP_HEAD) printf("Loop ");
- if (bbFlags & BBF_LOOP_CALL0) printf("Loop0 ");
- if (bbFlags & BBF_LOOP_CALL1) printf("Loop1 ");
- if (bbFlags & BBF_HAS_LABEL) printf("label ");
- if (bbFlags & BBF_JMP_TARGET) printf("target ");
- if (bbFlags & BBF_HAS_JMP) printf("jmp ");
- if (bbFlags & BBF_GC_SAFE_POINT) printf("gcsafe ");
- if (bbFlags & BBF_FUNCLET_BEG) printf("flet ");
- if (bbFlags & BBF_HAS_IDX_LEN) printf("idxlen ");
- if (bbFlags & BBF_HAS_NEWARRAY) printf("new[] ");
- if (bbFlags & BBF_HAS_NEWOBJ) printf("newobj ");
+ if (bbFlags & BBF_VISITED)
+ {
+ printf("v ");
+ }
+ if (bbFlags & BBF_MARKED)
+ {
+ printf("m ");
+ }
+ if (bbFlags & BBF_CHANGED)
+ {
+ printf("! ");
+ }
+ if (bbFlags & BBF_REMOVED)
+ {
+ printf("del ");
+ }
+ if (bbFlags & BBF_DONT_REMOVE)
+ {
+ printf("keep ");
+ }
+ if (bbFlags & BBF_IMPORTED)
+ {
+ printf("i ");
+ }
+ if (bbFlags & BBF_INTERNAL)
+ {
+ printf("internal ");
+ }
+ if (bbFlags & BBF_FAILED_VERIFICATION)
+ {
+ printf("failV ");
+ }
+ if (bbFlags & BBF_TRY_BEG)
+ {
+ printf("try ");
+ }
+ if (bbFlags & BBF_NEEDS_GCPOLL)
+ {
+ printf("poll ");
+ }
+ if (bbFlags & BBF_RUN_RARELY)
+ {
+ printf("rare ");
+ }
+ if (bbFlags & BBF_LOOP_HEAD)
+ {
+ printf("Loop ");
+ }
+ if (bbFlags & BBF_LOOP_CALL0)
+ {
+ printf("Loop0 ");
+ }
+ if (bbFlags & BBF_LOOP_CALL1)
+ {
+ printf("Loop1 ");
+ }
+ if (bbFlags & BBF_HAS_LABEL)
+ {
+ printf("label ");
+ }
+ if (bbFlags & BBF_JMP_TARGET)
+ {
+ printf("target ");
+ }
+ if (bbFlags & BBF_HAS_JMP)
+ {
+ printf("jmp ");
+ }
+ if (bbFlags & BBF_GC_SAFE_POINT)
+ {
+ printf("gcsafe ");
+ }
+ if (bbFlags & BBF_FUNCLET_BEG)
+ {
+ printf("flet ");
+ }
+ if (bbFlags & BBF_HAS_IDX_LEN)
+ {
+ printf("idxlen ");
+ }
+ if (bbFlags & BBF_HAS_NEWARRAY)
+ {
+ printf("new[] ");
+ }
+ if (bbFlags & BBF_HAS_NEWOBJ)
+ {
+ printf("newobj ");
+ }
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- if (bbFlags & BBF_FINALLY_TARGET) printf("ftarget ");
+ if (bbFlags & BBF_FINALLY_TARGET)
+ printf("ftarget ");
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- if (bbFlags & BBF_BACKWARD_JUMP) printf("bwd ");
- if (bbFlags & BBF_RETLESS_CALL) printf("retless ");
- if (bbFlags & BBF_LOOP_PREHEADER) printf("LoopPH ");
- if (bbFlags & BBF_COLD) printf("cold ");
- if (bbFlags & BBF_PROF_WEIGHT) printf("IBC ");
- if (bbFlags & BBF_FORWARD_SWITCH) printf("fswitch ");
- if (bbFlags & BBF_KEEP_BBJ_ALWAYS) printf("KEEP ");
+ if (bbFlags & BBF_BACKWARD_JUMP)
+ {
+ printf("bwd ");
+ }
+ if (bbFlags & BBF_RETLESS_CALL)
+ {
+ printf("retless ");
+ }
+ if (bbFlags & BBF_LOOP_PREHEADER)
+ {
+ printf("LoopPH ");
+ }
+ if (bbFlags & BBF_COLD)
+ {
+ printf("cold ");
+ }
+ if (bbFlags & BBF_PROF_WEIGHT)
+ {
+ printf("IBC ");
+ }
+ if (bbFlags & BBF_FORWARD_SWITCH)
+ {
+ printf("fswitch ");
+ }
+ if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
+ {
+ printf("KEEP ");
+ }
}
/*****************************************************************************
@@ -272,7 +364,7 @@ void BasicBlock::dspFlags()
* Returns the number of characters printed.
*/
-unsigned BasicBlock::dspPreds()
+unsigned BasicBlock::dspPreds()
{
unsigned count = 0;
for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
@@ -308,7 +400,7 @@ unsigned BasicBlock::dspPreds()
* Returns the number of characters printed.
*/
-unsigned BasicBlock::dspCheapPreds()
+unsigned BasicBlock::dspCheapPreds()
{
unsigned count = 0;
for (BasicBlockList* pred = bbCheapPreds; pred != nullptr; pred = pred->next)
@@ -337,13 +429,13 @@ unsigned BasicBlock::dspCheapPreds()
* Returns the count of successors.
*/
-unsigned BasicBlock::dspSuccs(Compiler* compiler)
+unsigned BasicBlock::dspSuccs(Compiler* compiler)
{
unsigned numSuccs = NumSucc(compiler);
- unsigned count = 0;
+ unsigned count = 0;
for (unsigned i = 0; i < numSuccs; i++)
{
- printf("%s", (count == 0) ? "" : ",");
+ printf("%s", (count == 0) ? "" : ",");
printf("BB%02u", GetSucc(i, compiler)->bbNum);
count++;
}
@@ -351,83 +443,84 @@ unsigned BasicBlock::dspSuccs(Compiler* compiler)
}
// Display a compact representation of the bbJumpKind, that is, where this block branches.
-// This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align things strictly.
-void BasicBlock::dspJumpKind()
+// This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align
+// things strictly.
+void BasicBlock::dspJumpKind()
{
switch (bbJumpKind)
{
- case BBJ_EHFINALLYRET:
- printf(" (finret)");
- break;
+ case BBJ_EHFINALLYRET:
+ printf(" (finret)");
+ break;
- case BBJ_EHFILTERRET:
- printf(" (fltret)");
- break;
+ case BBJ_EHFILTERRET:
+ printf(" (fltret)");
+ break;
- case BBJ_EHCATCHRET:
- printf(" -> BB%02u (cret)", bbJumpDest->bbNum);
- break;
+ case BBJ_EHCATCHRET:
+ printf(" -> BB%02u (cret)", bbJumpDest->bbNum);
+ break;
- case BBJ_THROW:
- printf(" (throw)");
- break;
+ case BBJ_THROW:
+ printf(" (throw)");
+ break;
- case BBJ_RETURN:
- printf(" (return)");
- break;
+ case BBJ_RETURN:
+ printf(" (return)");
+ break;
- case BBJ_NONE:
- // For fall-through blocks, print nothing.
- break;
+ case BBJ_NONE:
+ // For fall-through blocks, print nothing.
+ break;
- case BBJ_ALWAYS:
- if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
- {
- printf(" -> BB%02u (ALWAYS)", bbJumpDest->bbNum);
- }
- else
- {
- printf(" -> BB%02u (always)", bbJumpDest->bbNum);
- }
- break;
+ case BBJ_ALWAYS:
+ if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
+ {
+ printf(" -> BB%02u (ALWAYS)", bbJumpDest->bbNum);
+ }
+ else
+ {
+ printf(" -> BB%02u (always)", bbJumpDest->bbNum);
+ }
+ break;
- case BBJ_LEAVE:
- printf(" -> BB%02u (leave)", bbJumpDest->bbNum);
- break;
+ case BBJ_LEAVE:
+ printf(" -> BB%02u (leave)", bbJumpDest->bbNum);
+ break;
- case BBJ_CALLFINALLY:
- printf(" -> BB%02u (callf)", bbJumpDest->bbNum);
- break;
+ case BBJ_CALLFINALLY:
+ printf(" -> BB%02u (callf)", bbJumpDest->bbNum);
+ break;
- case BBJ_COND:
- printf(" -> BB%02u (cond)", bbJumpDest->bbNum);
- break;
+ case BBJ_COND:
+ printf(" -> BB%02u (cond)", bbJumpDest->bbNum);
+ break;
- case BBJ_SWITCH:
- printf(" ->");
+ case BBJ_SWITCH:
+ printf(" ->");
- unsigned jumpCnt;
- jumpCnt = bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = bbJumpSwt->bbsDstTab;
- do
- {
- printf("%cBB%02u",
- (jumpTab == bbJumpSwt->bbsDstTab) ? ' ' : ',',
- (*jumpTab)->bbNum);
- }
- while (++jumpTab, --jumpCnt);
+ unsigned jumpCnt;
+ jumpCnt = bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = bbJumpSwt->bbsDstTab;
+ do
+ {
+ printf("%cBB%02u", (jumpTab == bbJumpSwt->bbsDstTab) ? ' ' : ',', (*jumpTab)->bbNum);
+ } while (++jumpTab, --jumpCnt);
- printf(" (switch)");
- break;
+ printf(" (switch)");
+ break;
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
}
-void BasicBlock::dspBlockHeader(Compiler* compiler, bool showKind /*= true*/, bool showFlags /*= false*/, bool showPreds /*= true*/)
+void BasicBlock::dspBlockHeader(Compiler* compiler,
+ bool showKind /*= true*/,
+ bool showFlags /*= false*/,
+ bool showPreds /*= true*/)
{
printf("BB%02u ", bbNum);
dspBlockILRange();
@@ -470,8 +563,8 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
{
assert(to->bbTreeList == nullptr);
- to->bbFlags = from->bbFlags;
- to->bbWeight = from->bbWeight;
+ to->bbFlags = from->bbFlags;
+ to->bbWeight = from->bbWeight;
BlockSetOps::AssignAllowUninitRhs(compiler, to->bbReach, from->bbReach);
to->copyEHRegion(from);
to->bbCatchTyp = from->bbCatchTyp;
@@ -483,17 +576,18 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
to->bbCodeOffsEnd = from->bbCodeOffsEnd;
VarSetOps::AssignAllowUninitRhs(compiler, to->bbScope, from->bbScope);
#if FEATURE_STACK_FP_X87
- to->bbFPStateX87 = from->bbFPStateX87;
+ to->bbFPStateX87 = from->bbFPStateX87;
#endif // FEATURE_STACK_FP_X87
- to->bbNatLoopNum = from->bbNatLoopNum;
+ to->bbNatLoopNum = from->bbNatLoopNum;
#ifdef DEBUG
to->bbLoopNum = from->bbLoopNum;
to->bbTgtStkDepth = from->bbTgtStkDepth;
#endif // DEBUG
- for (GenTreePtr fromStmt = from->bbTreeList; fromStmt != NULL; fromStmt = fromStmt->gtNext)
+ for (GenTreePtr fromStmt = from->bbTreeList; fromStmt != nullptr; fromStmt = fromStmt->gtNext)
{
- compiler->fgInsertStmtAtEnd(to, compiler->fgNewStmtFromTree(compiler->gtCloneExpr(fromStmt->gtStmt.gtStmtExpr)));
+ compiler->fgInsertStmtAtEnd(to,
+ compiler->fgNewStmtFromTree(compiler->gtCloneExpr(fromStmt->gtStmt.gtStmtExpr)));
}
}
@@ -510,7 +604,9 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
GenTreeStmt* BasicBlock::firstStmt()
{
if (bbTreeList == nullptr)
+ {
return nullptr;
+ }
return bbTreeList->AsStmt();
}
@@ -530,7 +626,9 @@ GenTreeStmt* BasicBlock::firstStmt()
GenTreeStmt* BasicBlock::lastStmt()
{
if (bbTreeList == nullptr)
+ {
return nullptr;
+ }
GenTree* result = bbTreeList->gtPrev;
assert(result && result->gtNext == nullptr);
@@ -554,7 +652,9 @@ GenTreeStmt* BasicBlock::lastStmt()
GenTreeStmt* BasicBlock::lastTopLevelStmt()
{
if (bbTreeList == nullptr)
+ {
return nullptr;
+ }
GenTreePtr stmt = lastStmt();
@@ -635,5 +735,3 @@ unsigned PtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr)
#endif
return ptr->bbNum;
}
-
-
diff --git a/src/jit/block.h b/src/jit/block.h
index 0ab291235c..021121edba 100644
--- a/src/jit/block.h
+++ b/src/jit/block.h
@@ -17,7 +17,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#define _BLOCK_H_
/*****************************************************************************/
-#include "vartype.h" // For "var_types.h"
+#include "vartype.h" // For "var_types.h"
#include "_typeinfo.h"
/*****************************************************************************/
@@ -32,19 +32,18 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
#if LARGE_EXPSET
-typedef unsigned __int64 EXPSET_TP;
-#define EXPSET_SZ 64
+typedef unsigned __int64 EXPSET_TP;
+#define EXPSET_SZ 64
#else
-typedef unsigned int EXPSET_TP;
-#define EXPSET_SZ 32
+typedef unsigned int EXPSET_TP;
+#define EXPSET_SZ 32
#endif
-#define EXPSET_ALL ((EXPSET_TP)0-1)
-
-typedef BitVec ASSERT_TP;
-typedef BitVec_ValArg_T ASSERT_VALARG_TP;
-typedef BitVec_ValRet_T ASSERT_VALRET_TP;
+#define EXPSET_ALL ((EXPSET_TP)0 - 1)
+typedef BitVec ASSERT_TP;
+typedef BitVec_ValArg_T ASSERT_VALARG_TP;
+typedef BitVec_ValRet_T ASSERT_VALRET_TP;
/*****************************************************************************
*
@@ -52,38 +51,38 @@ typedef BitVec_ValRet_T ASSERT_VALRET_TP;
* of the following enumeration.
*/
-DECLARE_TYPED_ENUM(BBjumpKinds,BYTE)
+DECLARE_TYPED_ENUM(BBjumpKinds, BYTE)
{
- BBJ_EHFINALLYRET, // block ends with 'endfinally' (for finally or fault)
- BBJ_EHFILTERRET, // block ends with 'endfilter'
- BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS)
- BBJ_THROW, // block ends with 'throw'
- BBJ_RETURN, // block ends with 'ret'
-
- BBJ_NONE, // block flows into the next one (no jump)
-
- BBJ_ALWAYS, // block always jumps to the target
- BBJ_LEAVE, // block always jumps to the target, maybe out of guarded
- // region. Used temporarily until importing
- BBJ_CALLFINALLY, // block always calls the target finally
- BBJ_COND, // block conditionally jumps to the target
- BBJ_SWITCH, // block ends with a switch statement
-
- BBJ_COUNT
+ BBJ_EHFINALLYRET, // block ends with 'endfinally' (for finally or fault)
+ BBJ_EHFILTERRET, // block ends with 'endfilter'
+ BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS)
+ BBJ_THROW, // block ends with 'throw'
+ BBJ_RETURN, // block ends with 'ret'
+
+ BBJ_NONE, // block flows into the next one (no jump)
+
+ BBJ_ALWAYS, // block always jumps to the target
+ BBJ_LEAVE, // block always jumps to the target, maybe out of guarded
+ // region. Used temporarily until importing
+ BBJ_CALLFINALLY, // block always calls the target finally
+ BBJ_COND, // block conditionally jumps to the target
+ BBJ_SWITCH, // block ends with a switch statement
+
+ BBJ_COUNT
}
-END_DECLARE_TYPED_ENUM(BBjumpKinds,BYTE)
+END_DECLARE_TYPED_ENUM(BBjumpKinds, BYTE)
-struct GenTree;
-struct GenTreeStmt;
-struct BasicBlock;
-class Compiler;
-class typeInfo;
-struct BasicBlockList;
-struct flowList;
-struct EHblkDsc;
+struct GenTree;
+struct GenTreeStmt;
+struct BasicBlock;
+class Compiler;
+class typeInfo;
+struct BasicBlockList;
+struct flowList;
+struct EHblkDsc;
#if FEATURE_STACK_FP_X87
-struct FlatFPStateX87;
+struct FlatFPStateX87;
#endif
/*****************************************************************************
@@ -98,16 +97,17 @@ struct FlatFPStateX87;
* switches with just a default case to a BBJ_ALWAYS branch, and a switch with just two cases to a BBJ_COND.
* However, in debuggable code, we might not do that, so bbsCount might be 1.
*/
-struct BBswtDesc
+struct BBswtDesc
{
- unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault)
- BasicBlock** bbsDstTab; // case label table address
- bool bbsHasDefault;
+ unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault)
+ BasicBlock** bbsDstTab; // case label table address
+ bool bbsHasDefault;
- BBswtDesc()
- : bbsHasDefault(true) {}
+ BBswtDesc() : bbsHasDefault(true)
+ {
+ }
- void removeDefault()
+ void removeDefault()
{
assert(bbsHasDefault);
assert(bbsCount > 0);
@@ -115,7 +115,7 @@ struct BBswtDesc
bbsCount--;
}
- BasicBlock* getDefault()
+ BasicBlock* getDefault()
{
assert(bbsHasDefault);
assert(bbsCount > 0);
@@ -125,27 +125,27 @@ struct BBswtDesc
struct StackEntry
{
- GenTree* val;
- typeInfo seTypeInfo;
+ GenTree* val;
+ typeInfo seTypeInfo;
};
/*****************************************************************************/
enum ThisInitState
{
- TIS_Bottom, // We don't know anything about the 'this' pointer.
- TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized.
- TIS_Init, // The 'this' pointer for this constructor is known to be initialized.
- TIS_Top, // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init.
- // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise
- // allowing the fault block to generate code.
+ TIS_Bottom, // We don't know anything about the 'this' pointer.
+ TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized.
+ TIS_Init, // The 'this' pointer for this constructor is known to be initialized.
+ TIS_Top, // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init.
+ // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise
+ // allowing the fault block to generate code.
};
struct EntryState
{
- ThisInitState thisInitialized : 8; // used to track whether the this ptr is initialized (we could use
- // fewer bits here)
- unsigned esStackDepth : 24; // size of esStack
- StackEntry* esStack; // ptr to stack
+ ThisInitState thisInitialized : 8; // used to track whether the this ptr is initialized (we could use
+ // fewer bits here)
+ unsigned esStackDepth : 24; // size of esStack
+ StackEntry* esStack; // ptr to stack
};
// This encapsulates the "exception handling" successors of a block. That is,
@@ -191,8 +191,10 @@ class EHSuccessorIter
public:
// Returns the standard "end" iterator.
- EHSuccessorIter() :
- m_comp(NULL), m_block(NULL), m_curRegSucc(NULL), m_curTry(NULL), m_remainingRegSuccs(0) {}
+ EHSuccessorIter()
+ : m_comp(nullptr), m_block(nullptr), m_curRegSucc(nullptr), m_curTry(nullptr), m_remainingRegSuccs(0)
+ {
+ }
// Initializes the iterator to represent the EH successors of "block".
EHSuccessorIter(Compiler* comp, BasicBlock* block);
@@ -209,8 +211,7 @@ public:
bool operator==(const EHSuccessorIter& ehsi)
{
// Ignore the compiler; we'll assume that's the same.
- return m_curTry == ehsi.m_curTry
- && m_remainingRegSuccs == ehsi.m_remainingRegSuccs;
+ return m_curTry == ehsi.m_curTry && m_remainingRegSuccs == ehsi.m_remainingRegSuccs;
}
bool operator!=(const EHSuccessorIter& ehsi)
@@ -234,16 +235,20 @@ class AllSuccessorIter
inline bool CurTryIsBlkCallFinallyTarget();
public:
- inline AllSuccessorIter() { }
+ inline AllSuccessorIter()
+ {
+ }
// Initializes "this" to iterate over all successors of "block."
inline AllSuccessorIter(Compiler* comp, BasicBlock* block);
// Used for constructing an appropriate "end" iter. Should be called with
// the number of normal successors of the block being iterated.
- AllSuccessorIter(unsigned numSuccs) : m_normSucc(numSuccs), m_numNormSuccs(numSuccs), m_ehIter() {}
+ AllSuccessorIter(unsigned numSuccs) : m_normSucc(numSuccs), m_numNormSuccs(numSuccs), m_ehIter()
+ {
+ }
- // Go on to the next successor.
+ // Go on to the next successor.
inline void operator++(void);
// Requires that "this" is not equal to the standard "end" iterator. Returns the
@@ -254,8 +259,7 @@ public:
// and "m_block" fields.
bool operator==(const AllSuccessorIter& asi)
{
- return m_normSucc == asi.m_normSucc
- && m_ehIter == asi.m_ehIter;
+ return m_normSucc == asi.m_normSucc && m_ehIter == asi.m_ehIter;
}
bool operator!=(const AllSuccessorIter& asi)
@@ -271,104 +275,106 @@ public:
struct BasicBlock
{
- BasicBlock* bbNext; // next BB in ascending PC offset order
- BasicBlock* bbPrev;
-
- void setNext(BasicBlock* next)
- {
- bbNext = next;
- if (next)
- next->bbPrev = this;
+ BasicBlock* bbNext; // next BB in ascending PC offset order
+ BasicBlock* bbPrev;
+
+ void setNext(BasicBlock* next)
+ {
+ bbNext = next;
+ if (next)
+ {
+ next->bbPrev = this;
+ }
}
- unsigned bbNum; // the block's number
- unsigned bbPostOrderNum; // the block's post order number in the graph.
- unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero, the block is unreachable.
-
- unsigned bbFlags; // see BBF_xxxx below
-
-#define BBF_VISITED 0x00000001 // BB visited during optimizations
-#define BBF_MARKED 0x00000002 // BB marked during optimizations
-#define BBF_CHANGED 0x00000004 // input/output of this block has changed
-#define BBF_REMOVED 0x00000008 // BB has been removed from bb-list
-
-#define BBF_DONT_REMOVE 0x00000010 // BB should not be removed during flow graph optimizations
-#define BBF_IMPORTED 0x00000020 // BB byte-code has been imported
-#define BBF_INTERNAL 0x00000040 // BB has been added by the compiler
-#define BBF_FAILED_VERIFICATION 0x00000080 // BB has verification exception
-
-#define BBF_TRY_BEG 0x00000100 // BB starts a 'try' block
-#define BBF_FUNCLET_BEG 0x00000200 // BB is the beginning of a funclet
-#define BBF_HAS_NULLCHECK 0x00000400 // BB contains a null check
-#define BBF_NEEDS_GCPOLL 0x00000800 // This BB is the source of a back edge and needs a GC Poll
-
-#define BBF_RUN_RARELY 0x00001000 // BB is rarely run (catch clauses, blocks with throws etc)
-#define BBF_LOOP_HEAD 0x00002000 // BB is the head of a loop
-#define BBF_LOOP_CALL0 0x00004000 // BB starts a loop that sometimes won't call
-#define BBF_LOOP_CALL1 0x00008000 // BB starts a loop that will always call
-
-#define BBF_HAS_LABEL 0x00010000 // BB needs a label
-#define BBF_JMP_TARGET 0x00020000 // BB is a target of an implicit/explicit jump
-#define BBF_HAS_JMP 0x00040000 // BB executes a JMP instruction (instead of return)
-#define BBF_GC_SAFE_POINT 0x00080000 // BB has a GC safe point (a call). More abstractly, BB does not
- // require a (further) poll -- this may be because this BB has a
- // call, or, in some cases, because the BB occurs in a loop, and
- // we've determined that all paths in the loop body leading to BB
- // include a call.
-#define BBF_HAS_VTABREF 0x00100000 // BB contains reference of vtable
-#define BBF_HAS_IDX_LEN 0x00200000 // BB contains simple index or length expressions on an array local var.
-#define BBF_HAS_NEWARRAY 0x00400000 // BB contains 'new' of an array
-#define BBF_HAS_NEWOBJ 0x00800000 // BB contains 'new' of an object type.
+ unsigned bbNum; // the block's number
+ unsigned bbPostOrderNum; // the block's post order number in the graph.
+ unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero,
+ // the block is unreachable.
+
+ unsigned bbFlags; // see BBF_xxxx below
+
+#define BBF_VISITED 0x00000001 // BB visited during optimizations
+#define BBF_MARKED 0x00000002 // BB marked during optimizations
+#define BBF_CHANGED 0x00000004 // input/output of this block has changed
+#define BBF_REMOVED 0x00000008 // BB has been removed from bb-list
+
+#define BBF_DONT_REMOVE 0x00000010 // BB should not be removed during flow graph optimizations
+#define BBF_IMPORTED 0x00000020 // BB byte-code has been imported
+#define BBF_INTERNAL 0x00000040 // BB has been added by the compiler
+#define BBF_FAILED_VERIFICATION 0x00000080 // BB has verification exception
+
+#define BBF_TRY_BEG 0x00000100 // BB starts a 'try' block
+#define BBF_FUNCLET_BEG 0x00000200 // BB is the beginning of a funclet
+#define BBF_HAS_NULLCHECK 0x00000400 // BB contains a null check
+#define BBF_NEEDS_GCPOLL 0x00000800 // This BB is the source of a back edge and needs a GC Poll
+
+#define BBF_RUN_RARELY 0x00001000 // BB is rarely run (catch clauses, blocks with throws etc)
+#define BBF_LOOP_HEAD 0x00002000 // BB is the head of a loop
+#define BBF_LOOP_CALL0 0x00004000 // BB starts a loop that sometimes won't call
+#define BBF_LOOP_CALL1 0x00008000 // BB starts a loop that will always call
+
+#define BBF_HAS_LABEL 0x00010000 // BB needs a label
+#define BBF_JMP_TARGET 0x00020000 // BB is a target of an implicit/explicit jump
+#define BBF_HAS_JMP 0x00040000 // BB executes a JMP instruction (instead of return)
+#define BBF_GC_SAFE_POINT 0x00080000 // BB has a GC safe point (a call). More abstractly, BB does not
+ // require a (further) poll -- this may be because this BB has a
+ // call, or, in some cases, because the BB occurs in a loop, and
+ // we've determined that all paths in the loop body leading to BB
+ // include a call.
+#define BBF_HAS_VTABREF 0x00100000 // BB contains reference of vtable
+#define BBF_HAS_IDX_LEN 0x00200000 // BB contains simple index or length expressions on an array local var.
+#define BBF_HAS_NEWARRAY 0x00400000 // BB contains 'new' of an array
+#define BBF_HAS_NEWOBJ 0x00800000 // BB contains 'new' of an object type.
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
-#define BBF_FINALLY_TARGET 0x01000000 // BB is the target of a finally return: where a finally will return during
- // non-exceptional flow. Because the ARM calling sequence for calling a
- // finally explicitly sets the return address to the finally target and jumps
- // to the finally, instead of using a call instruction, ARM needs this to
- // generate correct code at the finally target, to allow for proper stack
- // unwind from within a non-exceptional call to a finally.
-#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
-#define BBF_BACKWARD_JUMP 0x02000000 // BB is surrounded by a backward jump/switch arc
-#define BBF_RETLESS_CALL 0x04000000 // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired
- // BBJ_ALWAYS); see isBBCallAlwaysPair().
-#define BBF_LOOP_PREHEADER 0x08000000 // BB is a loop preheader block
-
-#define BBF_COLD 0x10000000 // BB is cold
-#define BBF_PROF_WEIGHT 0x20000000 // BB weight is computed from profile data
-#define BBF_FORWARD_SWITCH 0x40000000 // Aux flag used in FP codegen to know if a jmptable entry has been forwarded
-#define BBF_KEEP_BBJ_ALWAYS 0x80000000 // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind
- // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the
- // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a
- // finally.
-
- bool isRunRarely() { return ((bbFlags & BBF_RUN_RARELY) != 0); }
- bool isLoopHead() { return ((bbFlags & BBF_LOOP_HEAD) != 0); }
+#define BBF_FINALLY_TARGET 0x01000000 // BB is the target of a finally return: where a finally will return during
+ // non-exceptional flow. Because the ARM calling sequence for calling a
+ // finally explicitly sets the return address to the finally target and jumps
+ // to the finally, instead of using a call instruction, ARM needs this to
+ // generate correct code at the finally target, to allow for proper stack
+ // unwind from within a non-exceptional call to a finally.
+#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
+#define BBF_BACKWARD_JUMP 0x02000000 // BB is surrounded by a backward jump/switch arc
+#define BBF_RETLESS_CALL 0x04000000 // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired
+ // BBJ_ALWAYS); see isBBCallAlwaysPair().
+#define BBF_LOOP_PREHEADER 0x08000000 // BB is a loop preheader block
+
+#define BBF_COLD 0x10000000 // BB is cold
+#define BBF_PROF_WEIGHT 0x20000000 // BB weight is computed from profile data
+#define BBF_FORWARD_SWITCH 0x40000000 // Aux flag used in FP codegen to know if a jmptable entry has been forwarded
+#define BBF_KEEP_BBJ_ALWAYS 0x80000000 // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind
+ // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the
+ // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a
+ // finally.
+
+ bool isRunRarely()
+ {
+ return ((bbFlags & BBF_RUN_RARELY) != 0);
+ }
+ bool isLoopHead()
+ {
+ return ((bbFlags & BBF_LOOP_HEAD) != 0);
+ }
// Flags to update when two blocks are compacted
-#define BBF_COMPACT_UPD (BBF_CHANGED | \
- BBF_GC_SAFE_POINT | BBF_HAS_JMP | \
- BBF_NEEDS_GCPOLL | \
- BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP | \
- BBF_HAS_NEWARRAY | BBF_HAS_NEWOBJ)
+#define BBF_COMPACT_UPD \
+ (BBF_CHANGED | BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_NEEDS_GCPOLL | BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP | \
+ BBF_HAS_NEWARRAY | BBF_HAS_NEWOBJ)
// Flags a block should not have had before it is split.
-#define BBF_SPLIT_NONEXIST (BBF_CHANGED | \
- BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | \
- BBF_RETLESS_CALL | \
- BBF_LOOP_PREHEADER | \
- BBF_COLD | \
- BBF_FORWARD_SWITCH)
+#define BBF_SPLIT_NONEXIST \
+ (BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | \
+ BBF_COLD | BBF_FORWARD_SWITCH)
// Flags lost by the top block when a block is split.
// Note, this is a conservative guess.
// For example, the top block might or might not have BBF_GC_SAFE_POINT,
// but we assume it does not have BBF_GC_SAFE_POINT any more.
-#define BBF_SPLIT_LOST (BBF_GC_SAFE_POINT | \
- BBF_HAS_JMP | \
- BBF_KEEP_BBJ_ALWAYS)
+#define BBF_SPLIT_LOST (BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_KEEP_BBJ_ALWAYS)
// Flags gained by the bottom block when a block is split.
// Note, this is a conservative guess.
@@ -377,41 +383,44 @@ struct BasicBlock
// TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ?
-#define BBF_SPLIT_GAINED (BBF_DONT_REMOVE | BBF_HAS_LABEL | \
- BBF_HAS_JMP | BBF_BACKWARD_JUMP | \
- BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | \
- BBF_PROF_WEIGHT | BBF_HAS_NEWOBJ | \
- BBF_KEEP_BBJ_ALWAYS)
+#define BBF_SPLIT_GAINED \
+ (BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | \
+ BBF_PROF_WEIGHT | BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS)
#ifndef __GNUC__ // GCC doesn't like C_ASSERT at global scope
-static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
-static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0);
+ static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
+ static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0);
#endif
#ifdef DEBUG
- void dspFlags(); // Print the flags
- unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
- unsigned dspPreds(); // Print the predecessors (bbPreds)
- unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH regions are printed: see NumSucc() for details.
- void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
- void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); // Print a simple basic block header for various output, including a list of predecessors and successors.
-#endif // DEBUG
-
-typedef unsigned weight_t; // Type used to hold block and edge weights
- // Note that for CLR v2.0 and earlier our
- // block weights were stored using unsigned shorts
-
-#define BB_UNITY_WEIGHT 100 // how much a normal execute once block weights
-#define BB_LOOP_WEIGHT 8 // how much more loops are weighted
-#define BB_ZERO_WEIGHT 0
-#define BB_MAX_WEIGHT ULONG_MAX // we're using an 'unsigned' for the weight
-#define BB_VERY_HOT_WEIGHT 256 // how many average hits a BB has (per BBT scenario run) for this block
- // to be considered as very hot
-
- weight_t bbWeight; // The dynamic execution weight of this block
+ void dspFlags(); // Print the flags
+ unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
+ unsigned dspPreds(); // Print the predecessors (bbPreds)
+ unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
+ // regions are printed: see NumSucc() for details.
+ void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
+ void dspBlockHeader(Compiler* compiler,
+ bool showKind = true,
+ bool showFlags = false,
+ bool showPreds = true); // Print a simple basic block header for various output, including a
+ // list of predecessors and successors.
+#endif // DEBUG
+
+ typedef unsigned weight_t; // Type used to hold block and edge weights
+ // Note that for CLR v2.0 and earlier our
+ // block weights were stored using unsigned shorts
+
+#define BB_UNITY_WEIGHT 100 // how much a normal execute once block weights
+#define BB_LOOP_WEIGHT 8 // how much more loops are weighted
+#define BB_ZERO_WEIGHT 0
+#define BB_MAX_WEIGHT ULONG_MAX // we're using an 'unsigned' for the weight
+#define BB_VERY_HOT_WEIGHT 256 // how many average hits a BB has (per BBT scenario run) for this block
+ // to be considered as very hot
+
+ weight_t bbWeight; // The dynamic execution weight of this block
// getBBWeight -- get the normalized weight of this block
- unsigned getBBWeight(Compiler* comp);
+ unsigned getBBWeight(Compiler* comp);
// setBBWeight -- if the block weight is not derived from a profile, then set the weight to the input
// weight, but make sure to not overflow BB_MAX_WEIGHT
@@ -438,9 +447,7 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
{
this->bbFlags |= BBF_PROF_WEIGHT;
// Check if the multiplication by BB_UNITY_WEIGHT will overflow.
- this->bbWeight = (weight <= BB_MAX_WEIGHT / BB_UNITY_WEIGHT)
- ? weight * BB_UNITY_WEIGHT
- : BB_MAX_WEIGHT;
+ this->bbWeight = (weight <= BB_MAX_WEIGHT / BB_UNITY_WEIGHT) ? weight * BB_UNITY_WEIGHT : BB_MAX_WEIGHT;
}
// this block will inherit the same weight and relevant bbFlags as bSrc
@@ -472,8 +479,7 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
// going to inherit. Since the number isn't exact, clear the BBF_PROF_WEIGHT flag.
void inheritWeightPercentage(BasicBlock* bSrc, unsigned percentage)
{
- assert(0 <= percentage &&
- percentage < 100);
+ assert(0 <= percentage && percentage < 100);
// Check for overflow
if (bSrc->bbWeight * 100 <= bSrc->bbWeight)
@@ -513,8 +519,8 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
{
if (this->bbWeight == BB_ZERO_WEIGHT)
{
- this->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag
- this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag
+ this->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag
+ this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag
this->bbWeight = 1;
}
}
@@ -570,14 +576,13 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
}
}
- BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
+ BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
/* The following union describes the jump target(s) of this block */
- union
- {
- unsigned bbJumpOffs; // PC offset (temporary only)
- BasicBlock* bbJumpDest; // basic block
- BBswtDesc* bbJumpSwt; // switch descriptor
+ union {
+ unsigned bbJumpOffs; // PC offset (temporary only)
+ BasicBlock* bbJumpDest; // basic block
+ BBswtDesc* bbJumpSwt; // switch descriptor
};
// NumSucc() gives the number of successors, and GetSucc() allows one to iterate over them.
@@ -603,10 +608,10 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
// instead of 2.
//
// Returns the number of successors of "this".
- unsigned NumSucc(Compiler* comp = NULL);
+ unsigned NumSucc(Compiler* comp = nullptr);
// Returns the "i"th successor. Requires (0 <= i < NumSucc()).
- BasicBlock* GetSucc(unsigned i, Compiler* comp = NULL);
+ BasicBlock* GetSucc(unsigned i, Compiler* comp = nullptr);
BasicBlock* GetUniquePred(Compiler* comp);
@@ -617,12 +622,12 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
return bbRefs;
}
- GenTree* bbTreeList; // the body of the block
- EntryState* bbEntryState; // verifier tracked state of all entries in stack.
+ GenTree* bbTreeList; // the body of the block
+ EntryState* bbEntryState; // verifier tracked state of all entries in stack.
-#define NO_BASE_TMP UINT_MAX // base# to use when we have none
- unsigned bbStkTempsIn; // base# for input stack temps
- unsigned bbStkTempsOut; // base# for output stack temps
+#define NO_BASE_TMP UINT_MAX // base# to use when we have none
+ unsigned bbStkTempsIn; // base# for input stack temps
+ unsigned bbStkTempsOut; // base# for output stack temps
#define MAX_XCPTN_INDEX (USHRT_MAX - 1)
@@ -631,11 +636,11 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
// index, into the compHndBBtab table, of innermost 'try' clause containing the BB (used for raising exceptions).
// Stored as index + 1; 0 means "no try index".
- unsigned short bbTryIndex;
+ unsigned short bbTryIndex;
// index, into the compHndBBtab table, of innermost handler (filter, catch, fault/finally) containing the BB.
// Stored as index + 1; 0 means "no handler index".
- unsigned short bbHndIndex;
+ unsigned short bbHndIndex;
// Given two EH indices that are either bbTryIndex or bbHndIndex (or related), determine if index1 might be more
// deeply nested than index2. Both index1 and index2 are in the range [0..compHndBBtabCount], where 0 means
@@ -667,42 +672,76 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
}
// catch type: class token of handler, or one of BBCT_*. Only set on first block of catch handler.
- unsigned bbCatchTyp;
-
- bool hasTryIndex() const { return bbTryIndex != 0; }
- bool hasHndIndex() const { return bbHndIndex != 0; }
- unsigned getTryIndex() const { assert(bbTryIndex != 0); return bbTryIndex - 1; }
- unsigned getHndIndex() const { assert(bbHndIndex != 0); return bbHndIndex - 1; }
- void setTryIndex(unsigned val) { bbTryIndex = (unsigned short)(val + 1); assert(bbTryIndex != 0); }
- void setHndIndex(unsigned val) { bbHndIndex = (unsigned short)(val + 1); assert(bbHndIndex != 0); }
- void clearTryIndex() { bbTryIndex = 0; }
- void clearHndIndex() { bbHndIndex = 0; }
-
- void copyEHRegion(const BasicBlock* from)
+ unsigned bbCatchTyp;
+
+ bool hasTryIndex() const
+ {
+ return bbTryIndex != 0;
+ }
+ bool hasHndIndex() const
+ {
+ return bbHndIndex != 0;
+ }
+ unsigned getTryIndex() const
+ {
+ assert(bbTryIndex != 0);
+ return bbTryIndex - 1;
+ }
+ unsigned getHndIndex() const
+ {
+ assert(bbHndIndex != 0);
+ return bbHndIndex - 1;
+ }
+ void setTryIndex(unsigned val)
+ {
+ bbTryIndex = (unsigned short)(val + 1);
+ assert(bbTryIndex != 0);
+ }
+ void setHndIndex(unsigned val)
+ {
+ bbHndIndex = (unsigned short)(val + 1);
+ assert(bbHndIndex != 0);
+ }
+ void clearTryIndex()
+ {
+ bbTryIndex = 0;
+ }
+ void clearHndIndex()
+ {
+ bbHndIndex = 0;
+ }
+
+ void copyEHRegion(const BasicBlock* from)
{
bbTryIndex = from->bbTryIndex;
bbHndIndex = from->bbHndIndex;
}
- static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbTryIndex == blk2->bbTryIndex; }
- static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbHndIndex == blk2->bbHndIndex; }
- static bool sameEHRegion (const BasicBlock* blk1, const BasicBlock* blk2) { return sameTryRegion(blk1,blk2) && sameHndRegion(blk1,blk2); }
+ static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2)
+ {
+ return blk1->bbTryIndex == blk2->bbTryIndex;
+ }
+ static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2)
+ {
+ return blk1->bbHndIndex == blk2->bbHndIndex;
+ }
+ static bool sameEHRegion(const BasicBlock* blk1, const BasicBlock* blk2)
+ {
+ return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2);
+ }
// Some non-zero value that will not collide with real tokens for bbCatchTyp
-#define BBCT_NONE 0x00000000
-#define BBCT_FAULT 0xFFFFFFFC
-#define BBCT_FINALLY 0xFFFFFFFD
-#define BBCT_FILTER 0xFFFFFFFE
-#define BBCT_FILTER_HANDLER 0xFFFFFFFF
-#define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && \
- (hndTyp) != BBCT_FAULT && \
- (hndTyp) != BBCT_FINALLY )
+#define BBCT_NONE 0x00000000
+#define BBCT_FAULT 0xFFFFFFFC
+#define BBCT_FINALLY 0xFFFFFFFD
+#define BBCT_FILTER 0xFFFFFFFE
+#define BBCT_FILTER_HANDLER 0xFFFFFFFF
+#define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY)
// TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead
- union
- {
- unsigned short bbStkDepth; // stack depth on entry
- unsigned short bbFPinVars; // number of inner enregistered FP vars
+ union {
+ unsigned short bbStkDepth; // stack depth on entry
+ unsigned short bbFPinVars; // number of inner enregistered FP vars
};
// Basic block predecessor lists. Early in compilation, some phases might need to compute "cheap" predecessor
@@ -711,63 +750,62 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
// in 'bbPreds', and then maintained throughout compilation. 'fgComputePredsDone' will be 'true' after the
// full predecessor lists are created. See the comment at fgComputeCheapPreds() to see how those differ from
// the "full" variant.
- union
- {
- BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed)
- flowList* bbPreds; // ptr to list of predecessors
+ union {
+ BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed)
+ flowList* bbPreds; // ptr to list of predecessors
};
- BlockSet bbReach; // Set of all blocks that can reach this one
- BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate
- // Dominator) used to compute the dominance tree.
- unsigned bbDfsNum; // The index of this block in DFS reverse post order
- // relative to the flow graph.
+ BlockSet bbReach; // Set of all blocks that can reach this one
+ BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate
+ // Dominator) used to compute the dominance tree.
+ unsigned bbDfsNum; // The index of this block in DFS reverse post order
+ // relative to the flow graph.
#if ASSERTION_PROP
// A set of blocks which dominate this one *except* the normal entry block. This is lazily initialized
// and used only by Assertion Prop, intersected with fgEnterBlks!
- BlockSet bbDoms;
+ BlockSet bbDoms;
#endif
- IL_OFFSET bbCodeOffs; // IL offset of the beginning of the block
- IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd)
- // range is not inclusive of the end offset. The count of IL bytes in the block
- // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET.
+ IL_OFFSET bbCodeOffs; // IL offset of the beginning of the block
+ IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd)
+ // range is not inclusive of the end offset. The count of IL bytes in the block
+ // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET.
#ifdef DEBUG
- void dspBlockILRange(); // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for BAD_IL_OFFSET.
-#endif // DEBUG
+ void dspBlockILRange(); // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for
+ // BAD_IL_OFFSET.
+#endif // DEBUG
- VARSET_TP bbVarUse; // variables used by block (before an assignment)
- VARSET_TP bbVarDef; // variables assigned by block (before a use)
- VARSET_TP bbVarTmp; // TEMP: only used by FP enregistering code!
+ VARSET_TP bbVarUse; // variables used by block (before an assignment)
+ VARSET_TP bbVarDef; // variables assigned by block (before a use)
+ VARSET_TP bbVarTmp; // TEMP: only used by FP enregistering code!
- VARSET_TP bbLiveIn; // variables live on entry
- VARSET_TP bbLiveOut; // variables live on exit
+ VARSET_TP bbLiveIn; // variables live on entry
+ VARSET_TP bbLiveOut; // variables live on exit
// Use, def, live in/out information for the implicit "Heap" variable.
- unsigned bbHeapUse: 1;
- unsigned bbHeapDef: 1;
- unsigned bbHeapLiveIn: 1;
- unsigned bbHeapLiveOut: 1;
- unsigned bbHeapHavoc: 1; // If true, at some point the block does an operation that leaves the heap
- // in an unknown state. (E.g., unanalyzed call, store through unknown
- // pointer...)
+ unsigned bbHeapUse : 1;
+ unsigned bbHeapDef : 1;
+ unsigned bbHeapLiveIn : 1;
+ unsigned bbHeapLiveOut : 1;
+ unsigned bbHeapHavoc : 1; // If true, at some point the block does an operation that leaves the heap
+ // in an unknown state. (E.g., unanalyzed call, store through unknown
+ // pointer...)
// We want to make phi functions for the special implicit var "Heap". But since this is not a real
// lclVar, and thus has no local #, we can't use a GenTreePhiArg. Instead, we use this struct.
struct HeapPhiArg
{
- bool m_isSsaNum; // If true, the phi arg is an SSA # for an internal try block heap state, being
- // added to the phi of a catch block. If false, it's a pred block.
- union
- {
- BasicBlock* m_predBB; // Predecessor block from which the SSA # flows.
- unsigned m_ssaNum; // SSA# for internal block heap state.
+ bool m_isSsaNum; // If true, the phi arg is an SSA # for an internal try block heap state, being
+ // added to the phi of a catch block. If false, it's a pred block.
+ union {
+ BasicBlock* m_predBB; // Predecessor block from which the SSA # flows.
+ unsigned m_ssaNum; // SSA# for internal block heap state.
};
HeapPhiArg* m_nextArg; // Next arg in the list, else NULL.
- unsigned GetSsaNum()
+ unsigned GetSsaNum()
{
if (m_isSsaNum)
{
@@ -780,22 +818,28 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
}
}
- HeapPhiArg(BasicBlock* predBB, HeapPhiArg* nextArg = NULL) : m_isSsaNum(false), m_predBB(predBB), m_nextArg(nextArg) {}
- HeapPhiArg(unsigned ssaNum, HeapPhiArg* nextArg = NULL) : m_isSsaNum(true), m_ssaNum(ssaNum), m_nextArg(nextArg) {}
+ HeapPhiArg(BasicBlock* predBB, HeapPhiArg* nextArg = nullptr)
+ : m_isSsaNum(false), m_predBB(predBB), m_nextArg(nextArg)
+ {
+ }
+ HeapPhiArg(unsigned ssaNum, HeapPhiArg* nextArg = nullptr)
+ : m_isSsaNum(true), m_ssaNum(ssaNum), m_nextArg(nextArg)
+ {
+ }
void* operator new(size_t sz, class Compiler* comp);
};
- static HeapPhiArg* EmptyHeapPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list
- // for Heap.
- HeapPhiArg* bbHeapSsaPhiFunc; // If the "in" Heap SSA var is not a phi definition, this value is NULL.
- // Otherwise, it is either the special value EmptyHeapPhiDefn, to indicate
- // that Heap needs a phi definition on entry, or else it is the linked list
- // of the phi arguments.
- unsigned bbHeapSsaNumIn; // The SSA # of "Heap" on entry to the block.
- unsigned bbHeapSsaNumOut; // The SSA # of "Heap" on exit from the block.
+ static HeapPhiArg* EmptyHeapPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list
+ // for Heap.
+ HeapPhiArg* bbHeapSsaPhiFunc; // If the "in" Heap SSA var is not a phi definition, this value is NULL.
+ // Otherwise, it is either the special value EmptyHeapPhiDefn, to indicate
+ // that Heap needs a phi definition on entry, or else it is the linked list
+ // of the phi arguments.
+ unsigned bbHeapSsaNumIn; // The SSA # of "Heap" on entry to the block.
+ unsigned bbHeapSsaNumOut; // The SSA # of "Heap" on exit from the block.
#ifdef DEBUGGING_SUPPORT
- VARSET_TP bbScope; // variables in scope over the block
+ VARSET_TP bbScope; // variables in scope over the block
#endif
void InitVarSets(class Compiler* comp);
@@ -806,55 +850,50 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
* thus we can union them since the two operations are completely disjunct.
*/
- union
- {
- EXPSET_TP bbCseGen; // CSEs computed by block
+ union {
+ EXPSET_TP bbCseGen; // CSEs computed by block
#if ASSERTION_PROP
- ASSERT_TP bbAssertionGen; // value assignments computed by block
+ ASSERT_TP bbAssertionGen; // value assignments computed by block
#endif
};
- union
- {
+ union {
#if ASSERTION_PROP
- ASSERT_TP bbAssertionKill; // value assignments killed by block
+ ASSERT_TP bbAssertionKill; // value assignments killed by block
#endif
};
- union
- {
- EXPSET_TP bbCseIn; // CSEs available on entry
+ union {
+ EXPSET_TP bbCseIn; // CSEs available on entry
#if ASSERTION_PROP
- ASSERT_TP bbAssertionIn; // value assignments available on entry
+ ASSERT_TP bbAssertionIn; // value assignments available on entry
#endif
};
- union
- {
- EXPSET_TP bbCseOut; // CSEs available on exit
+ union {
+ EXPSET_TP bbCseOut; // CSEs available on exit
#if ASSERTION_PROP
- ASSERT_TP bbAssertionOut; // value assignments available on exit
+ ASSERT_TP bbAssertionOut; // value assignments available on exit
#endif
};
-
- void* bbEmitCookie;
+ void* bbEmitCookie;
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- void* bbUnwindNopEmitCookie;
+ void* bbUnwindNopEmitCookie;
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
#ifdef VERIFIER
- stackDesc bbStackIn; // stack descriptor for input
- stackDesc bbStackOut; // stack descriptor for output
+ stackDesc bbStackIn; // stack descriptor for input
+ stackDesc bbStackOut; // stack descriptor for output
- verTypeVal* bbTypesIn; // list of variable types on input
- verTypeVal* bbTypesOut; // list of variable types on output
-#endif // VERIFIER
+ verTypeVal* bbTypesIn; // list of variable types on input
+ verTypeVal* bbTypesOut; // list of variable types on output
+#endif // VERIFIER
#if FEATURE_STACK_FP_X87
- FlatFPStateX87* bbFPStateX87; // State of FP stack on entry to the basic block
-#endif // FEATURE_STACK_FP_X87
+ FlatFPStateX87* bbFPStateX87; // State of FP stack on entry to the basic block
+#endif // FEATURE_STACK_FP_X87
/* The following fields used for loop detection */
@@ -864,55 +903,54 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
// This is the label a loop gets as part of the second, reachability-based
// loop discovery mechanism. This is apparently only used for debugging.
// We hope we'll eventually just have one loop-discovery mechanism, and this will go away.
- unsigned char bbLoopNum; // set to 'n' for a loop #n header
-#endif // DEBUG
+ unsigned char bbLoopNum; // set to 'n' for a loop #n header
+#endif // DEBUG
- unsigned char bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block,
- // or else NOT_IN_LOOP if this block is not in a loop.
+ unsigned char bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block,
+ // or else NOT_IN_LOOP if this block is not in a loop.
-#define MAX_LOOP_NUM 16 // we're using a 'short' for the mask
-#define LOOP_MASK_TP unsigned // must be big enough for a mask
+#define MAX_LOOP_NUM 16 // we're using a 'short' for the mask
+#define LOOP_MASK_TP unsigned // must be big enough for a mask
+//-------------------------------------------------------------------------
- //-------------------------------------------------------------------------
-
-#if MEASURE_BLOCK_SIZE
- static size_t s_Size;
- static size_t s_Count;
+#if MEASURE_BLOCK_SIZE
+ static size_t s_Size;
+ static size_t s_Count;
#endif // MEASURE_BLOCK_SIZE
- bool bbFallsThrough();
+ bool bbFallsThrough();
// Our slop fraction is 1/128 of the block weight rounded off
- static weight_t GetSlopFraction(weight_t weightBlk)
+ static weight_t GetSlopFraction(weight_t weightBlk)
{
- return ((weightBlk + 64) /128);
+ return ((weightBlk + 64) / 128);
}
// Given an the edge b1 -> b2, calculate the slop fraction by
// using the higher of the two block weights
- static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2)
+ static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2)
{
return GetSlopFraction(max(b1->bbWeight, b2->bbWeight));
}
#ifdef DEBUG
- unsigned bbTgtStkDepth; // Native stack depth on entry (for throw-blocks)
- static unsigned s_nMaxTrees; // The max # of tree nodes in any BB
+ unsigned bbTgtStkDepth; // Native stack depth on entry (for throw-blocks)
+ static unsigned s_nMaxTrees; // The max # of tree nodes in any BB
- unsigned bbStmtNum; // The statement number of the first stmt in this block
+ unsigned bbStmtNum; // The statement number of the first stmt in this block
// This is used in integrity checks. We semi-randomly pick a traversal stamp, label all blocks
// in the BB list with that stamp (in this field); then we can tell if (e.g.) predecessors are
// still in the BB list by whether they have the same stamp (with high probability).
- unsigned bbTraversalStamp;
+ unsigned bbTraversalStamp;
#endif // DEBUG
- ThisInitState bbThisOnEntry();
- unsigned bbStackDepthOnEntry();
- void bbSetStack(void* stackBuffer);
- StackEntry* bbStackOnEntry();
- void bbSetRunRarely();
+ ThisInitState bbThisOnEntry();
+ unsigned bbStackDepthOnEntry();
+ void bbSetStack(void* stackBuffer);
+ StackEntry* bbStackOnEntry();
+ void bbSetRunRarely();
// "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding
// zero-based number for use as an array index.
@@ -921,44 +959,60 @@ typedef unsigned weight_t; // Type used to hold block and edge weigh
assert(bbNum > 0);
return bbNum - 1;
}
-
+
GenTreeStmt* firstStmt();
GenTreeStmt* lastStmt();
GenTreeStmt* lastTopLevelStmt();
bool containsStatement(GenTree* statement);
- bool endsWithJmpMethod(Compiler *comp);
+ bool endsWithJmpMethod(Compiler* comp);
- bool endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall);
+ bool endsWithTailCall(Compiler* comp,
+ bool fastTailCallsOnly,
+ bool tailCallsConvertibleToLoopOnly,
+ GenTree** tailCall);
- bool endsWithTailCallOrJmp(Compiler *comp,
- bool fastTailCallsOnly = false);
+ bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false);
- bool endsWithTailCallConvertibleToLoop(Compiler *comp, GenTree** tailCall);
+ bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall);
// Returns the first statement in the statement list of "this" that is
// not an SSA definition (a lcl = phi(...) assignment).
GenTreeStmt* FirstNonPhiDef();
- GenTree* FirstNonPhiDefOrCatchArgAsg();
+ GenTree* FirstNonPhiDefOrCatchArgAsg();
- BasicBlock() :
+ BasicBlock()
+ :
#if ASSERTION_PROP
- BLOCKSET_INIT_NOCOPY(bbDoms, BlockSetOps::UninitVal()),
+ BLOCKSET_INIT_NOCOPY(bbDoms, BlockSetOps::UninitVal())
+ ,
#endif // ASSERTION_PROP
- VARSET_INIT_NOCOPY(bbLiveIn, VarSetOps::UninitVal()),
- VARSET_INIT_NOCOPY(bbLiveOut, VarSetOps::UninitVal())
+ VARSET_INIT_NOCOPY(bbLiveIn, VarSetOps::UninitVal())
+ , VARSET_INIT_NOCOPY(bbLiveOut, VarSetOps::UninitVal())
{
}
private:
- EHSuccessorIter StartEHSuccs(Compiler* comp) { return EHSuccessorIter(comp, this); }
- EHSuccessorIter EndEHSuccs() { return EHSuccessorIter(); }
+ EHSuccessorIter StartEHSuccs(Compiler* comp)
+ {
+ return EHSuccessorIter(comp, this);
+ }
+ EHSuccessorIter EndEHSuccs()
+ {
+ return EHSuccessorIter();
+ }
friend struct EHSuccs;
- AllSuccessorIter StartAllSuccs(Compiler* comp) { return AllSuccessorIter(comp, this); }
- AllSuccessorIter EndAllSuccs(Compiler* comp) { return AllSuccessorIter(NumSucc(comp)); }
+ AllSuccessorIter StartAllSuccs(Compiler* comp)
+ {
+ return AllSuccessorIter(comp, this);
+ }
+ AllSuccessorIter EndAllSuccs(Compiler* comp)
+ {
+ return AllSuccessorIter(NumSucc(comp));
+ }
friend struct AllSuccs;
@@ -968,11 +1022,20 @@ public:
{
Compiler* m_comp;
BasicBlock* m_block;
+
public:
- EHSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) {}
+ EHSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
+ {
+ }
- EHSuccessorIter begin() { return m_block->StartEHSuccs(m_comp); }
- EHSuccessorIter end() { return EHSuccessorIter(); }
+ EHSuccessorIter begin()
+ {
+ return m_block->StartEHSuccs(m_comp);
+ }
+ EHSuccessorIter end()
+ {
+ return EHSuccessorIter();
+ }
};
EHSuccs GetEHSuccs(Compiler* comp)
@@ -984,11 +1047,20 @@ public:
{
Compiler* m_comp;
BasicBlock* m_block;
+
public:
- AllSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) {}
+ AllSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
+ {
+ }
- AllSuccessorIter begin() { return m_block->StartAllSuccs(m_comp); }
- AllSuccessorIter end() { return AllSuccessorIter(m_block->NumSucc(m_comp)); }
+ AllSuccessorIter begin()
+ {
+ return m_block->StartAllSuccs(m_comp);
+ }
+ AllSuccessorIter end()
+ {
+ return AllSuccessorIter(m_block->NumSucc(m_comp));
+ }
};
AllSuccs GetAllSuccs(Compiler* comp)
@@ -1062,29 +1134,27 @@ typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, BasicBlock*, JitS
struct BasicBlockList
{
- BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list.
- BasicBlock* block; // The BasicBlock of interest.
-
- BasicBlockList()
- : next(NULL)
- , block(NULL)
- {}
-
- BasicBlockList(BasicBlock* blk, BasicBlockList* rest)
- : next(rest)
- , block(blk)
- {}
+ BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list.
+ BasicBlock* block; // The BasicBlock of interest.
+
+ BasicBlockList() : next(nullptr), block(nullptr)
+ {
+ }
+
+ BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk)
+ {
+ }
};
struct flowList
{
- flowList* flNext; // The next BasicBlock in the list, nullptr for end of list.
- BasicBlock* flBlock; // The BasicBlock of interest.
+ flowList* flNext; // The next BasicBlock in the list, nullptr for end of list.
+ BasicBlock* flBlock; // The BasicBlock of interest.
- BasicBlock::weight_t flEdgeWeightMin;
- BasicBlock::weight_t flEdgeWeightMax;
+ BasicBlock::weight_t flEdgeWeightMin;
+ BasicBlock::weight_t flEdgeWeightMax;
- unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts)
+ unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts)
// These two methods are used to set new values for flEdgeWeightMin and flEdgeWeightMax
// they are used only during the computation of the edge weights
@@ -1094,30 +1164,23 @@ struct flowList
bool setEdgeWeightMinChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop);
bool setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop);
- flowList()
- : flNext(NULL)
- , flBlock(NULL)
- , flEdgeWeightMin(0)
- , flEdgeWeightMax(0)
- , flDupCount(0)
- {}
+ flowList() : flNext(nullptr), flBlock(nullptr), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0)
+ {
+ }
flowList(BasicBlock* blk, flowList* rest)
- : flNext(rest)
- , flBlock(blk)
- , flEdgeWeightMin(0)
- , flEdgeWeightMax(0)
- , flDupCount(0)
- {}
+ : flNext(rest), flBlock(blk), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0)
+ {
+ }
};
// This enum represents a pre/post-visit action state to emulate a depth-first
// spanning tree traversal of a tree or graph.
enum DfsStackState
{
- DSS_Invalid, // The initialized, invalid error state
- DSS_Pre, // The DFS pre-order (first visit) traversal state
- DSS_Post // The DFS post-order (last visit) traversal state
+ DSS_Invalid, // The initialized, invalid error state
+ DSS_Pre, // The DFS pre-order (first visit) traversal state
+ DSS_Post // The DFS post-order (last visit) traversal state
};
// These structs represents an entry in a stack used to emulate a non-recursive
@@ -1125,42 +1188,38 @@ enum DfsStackState
// block pointer or a block number depending on which is more useful.
struct DfsBlockEntry
{
- DfsStackState dfsStackState; // The pre/post traversal action for this entry
- BasicBlock* dfsBlock; // The corresponding block for the action
-
- DfsBlockEntry()
- : dfsStackState(DSS_Invalid)
- , dfsBlock(nullptr)
- {}
-
- DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock)
- : dfsStackState(state)
- , dfsBlock(basicBlock)
- {}
+ DfsStackState dfsStackState; // The pre/post traversal action for this entry
+ BasicBlock* dfsBlock; // The corresponding block for the action
+
+ DfsBlockEntry() : dfsStackState(DSS_Invalid), dfsBlock(nullptr)
+ {
+ }
+
+ DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock) : dfsStackState(state), dfsBlock(basicBlock)
+ {
+ }
};
struct DfsNumEntry
{
- DfsStackState dfsStackState; // The pre/post traversal action for this entry
- unsigned dfsNum; // The corresponding block number for the action
-
- DfsNumEntry()
- : dfsStackState(DSS_Invalid)
- , dfsNum(0)
- {}
-
- DfsNumEntry(DfsStackState state, unsigned bbNum)
- : dfsStackState(state)
- , dfsNum(bbNum)
- {}
+ DfsStackState dfsStackState; // The pre/post traversal action for this entry
+ unsigned dfsNum; // The corresponding block number for the action
+
+ DfsNumEntry() : dfsStackState(DSS_Invalid), dfsNum(0)
+ {
+ }
+
+ DfsNumEntry(DfsStackState state, unsigned bbNum) : dfsStackState(state), dfsNum(bbNum)
+ {
+ }
};
/*****************************************************************************/
-extern BasicBlock* __cdecl verAllocBasicBlock();
+extern BasicBlock* __cdecl verAllocBasicBlock();
-#ifdef DEBUG
-extern void __cdecl verDispBasicBlocks();
+#ifdef DEBUG
+extern void __cdecl verDispBasicBlocks();
#endif
/*****************************************************************************
@@ -1169,14 +1228,10 @@ extern void __cdecl verDispBasicBlocks();
* emitter to convert a basic block to its corresponding emitter cookie.
*/
-void* emitCodeGetCookie(BasicBlock* block);
+void* emitCodeGetCookie(BasicBlock* block);
AllSuccessorIter::AllSuccessorIter(Compiler* comp, BasicBlock* block)
- : m_comp(comp),
- m_blk(block),
- m_normSucc(0),
- m_numNormSuccs(block->NumSucc(comp)),
- m_ehIter(comp, block)
+ : m_comp(comp), m_blk(block), m_normSucc(0), m_numNormSuccs(block->NumSucc(comp)), m_ehIter(comp, block)
{
if (CurTryIsBlkCallFinallyTarget())
{
@@ -1186,8 +1241,7 @@ AllSuccessorIter::AllSuccessorIter(Compiler* comp, BasicBlock* block)
bool AllSuccessorIter::CurTryIsBlkCallFinallyTarget()
{
- return (m_blk->bbJumpKind == BBJ_CALLFINALLY) &&
- (m_ehIter != EHSuccessorIter()) &&
+ return (m_blk->bbJumpKind == BBJ_CALLFINALLY) && (m_ehIter != EHSuccessorIter()) &&
(m_blk->bbJumpDest == (*m_ehIter));
}
diff --git a/src/jit/blockset.h b/src/jit/blockset.h
index b47dcd6c04..c8e27eabe8 100644
--- a/src/jit/blockset.h
+++ b/src/jit/blockset.h
@@ -16,7 +16,7 @@
// 0th bit to avoid having to do "bbNum - 1" calculations everywhere (at the BlockSet call
// sites). This makes reading the code easier, and avoids potential problems of forgetting
// to do a "- 1" somewhere.
-//
+//
// Basic blocks can be renumbered during compilation, so it is important to not mix
// BlockSets created before and after a renumbering. Every time the blocks are renumbered
// creates a different "epoch", during which the basic block numbers are stable.
@@ -25,27 +25,25 @@
#include "compilerbitsettraits.h"
#include "bitsetasshortlong.h"
-class BlockSetOps : public BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Compiler*,
- /*BitSetTraits*/BasicBlockBitSetTraits>
+class BlockSetOps : public BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ BasicBlockBitSetTraits>
{
public:
// Specialize BlockSetOps::MakeFull(). Since we number basic blocks from one, we remove bit zero from
// the block set. Otherwise, IsEmpty() would never return true.
- static
- BitSetShortLongRep
- MakeFull(Compiler* env)
+ static BitSetShortLongRep MakeFull(Compiler* env)
{
BitSetShortLongRep retval;
// First, make a full set using the BitSetOps::MakeFull
- retval = BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/BSShortLong,
- /*Env*/Compiler*,
- /*BitSetTraits*/BasicBlockBitSetTraits>::MakeFull(env);
-
+ retval = BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ BSShortLong,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ BasicBlockBitSetTraits>::MakeFull(env);
+
// Now, remove element zero, since we number basic blocks starting at one, and index the set with the
// basic block number. If we left this, then IsEmpty() would never return true.
BlockSetOps::RemoveElemD(env, retval, 0);
@@ -54,11 +52,11 @@ public:
}
};
-typedef BitSetShortLongRep BlockSet;
+typedef BitSetShortLongRep BlockSet;
// These types should be used as the types for BlockSet arguments and return values, respectively.
-typedef BlockSetOps::ValArgType BlockSet_ValArg_T;
-typedef BlockSetOps::RetValType BlockSet_ValRet_T;
+typedef BlockSetOps::ValArgType BlockSet_ValArg_T;
+typedef BlockSetOps::RetValType BlockSet_ValRet_T;
// Initialize "_varName" to "_initVal." Copies contents, not references; if "_varName" is uninitialized, allocates a
// var set for it (using "_comp" for any necessary allocation), and copies the contents of "_initVal" into it.
@@ -72,8 +70,8 @@ typedef BlockSetOps::RetValType BlockSet_ValRet_T;
// Use this to initialize an iterator "_iterName" to iterate over a BlockSet "_blockSet".
// "_blockNum" will be an unsigned variable to which we assign the elements of "_blockSet".
-#define BLOCKSET_ITER_INIT(_comp, _iterName, _blockSet, _blockNum) \
- unsigned _blockNum = 0; \
+#define BLOCKSET_ITER_INIT(_comp, _iterName, _blockSet, _blockNum) \
+ unsigned _blockNum = 0; \
BlockSetOps::Iter _iterName(_comp, _blockSet)
#endif // _BLOCKSET_INCLUDED_
diff --git a/src/jit/codegen.h b/src/jit/codegen.h
index 32937bdc8c..884a5ffa8c 100755
--- a/src/jit/codegen.h
+++ b/src/jit/codegen.h
@@ -6,7 +6,7 @@
// This class contains all the data & functionality for code generation
// of a method, except for the target-specific elements, which are
// primarily in the Target class.
-//
+//
#ifndef _CODEGEN_H_
#define _CODEGEN_H_
@@ -16,10 +16,9 @@
#include "jitgcinfo.h"
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) || defined(_TARGET_ARM_)
-#define FOREACH_REGISTER_FILE(file)\
- for ((file) = &(this->intRegState); \
- (file) != NULL; \
- (file) = ((file) == &(this->intRegState)) ? &(this->floatRegState) : NULL )
+#define FOREACH_REGISTER_FILE(file) \
+ for ((file) = &(this->intRegState); (file) != NULL; \
+ (file) = ((file) == &(this->intRegState)) ? &(this->floatRegState) : NULL)
#else
#define FOREACH_REGISTER_FILE(file) (file) = &(this->intRegState);
#endif
@@ -30,72 +29,84 @@ class CodeGen : public CodeGenInterface
friend class DisAssembler;
public:
-
// This could use further abstraction
- CodeGen(Compiler * theCompiler);
+ CodeGen(Compiler* theCompiler);
- virtual void genGenerateCode(void * * codePtr, ULONG * nativeSizeOfCode);
+ virtual void genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode);
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
- virtual bool genCreateAddrMode(GenTreePtr addr,
- int mode,
- bool fold,
- regMaskTP regMask,
- bool * revPtr,
- GenTreePtr * rv1Ptr,
- GenTreePtr * rv2Ptr,
+ virtual bool genCreateAddrMode(GenTreePtr addr,
+ int mode,
+ bool fold,
+ regMaskTP regMask,
+ bool* revPtr,
+ GenTreePtr* rv1Ptr,
+ GenTreePtr* rv2Ptr,
#if SCALED_ADDR_MODES
- unsigned * mulPtr,
+ unsigned* mulPtr,
#endif
- unsigned * cnsPtr,
- bool nogen = false);
+ unsigned* cnsPtr,
+ bool nogen = false);
// This should move to CodeGenClassic.h after genCreateAddrMode() is no longer dependent upon it
- void genIncRegBy (regNumber reg,
- ssize_t ival,
- GenTreePtr tree,
- var_types dstType = TYP_INT,
- bool ovfl = false);
+ void genIncRegBy(regNumber reg, ssize_t ival, GenTreePtr tree, var_types dstType = TYP_INT, bool ovfl = false);
private:
-
#if defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
// Bit masks used in negating a float or double number.
// The below gentrees encapsulate the data offset to the bitmasks as GT_CLS_VAR nodes.
- // This is to avoid creating more than one data constant for these bitmasks when a
+ // This is to avoid creating more than one data constant for these bitmasks when a
// method has more than one GT_NEG operation on floating point values.
- GenTreePtr negBitmaskFlt;
- GenTreePtr negBitmaskDbl;
+ GenTreePtr negBitmaskFlt;
+ GenTreePtr negBitmaskDbl;
// Bit masks used in computing Math.Abs() of a float or double number.
- GenTreePtr absBitmaskFlt;
- GenTreePtr absBitmaskDbl;
+ GenTreePtr absBitmaskFlt;
+ GenTreePtr absBitmaskDbl;
// Bit mask used in U8 -> double conversion to adjust the result.
- GenTreePtr u8ToDblBitmask;
+ GenTreePtr u8ToDblBitmask;
// Generates SSE2 code for the given tree as "Operand BitWiseOp BitMask"
- void genSSE2BitwiseOp(GenTreePtr treeNode);
+ void genSSE2BitwiseOp(GenTreePtr treeNode);
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
- void genPrepForCompiler ();
+ void genPrepForCompiler();
- void genPrepForEHCodegen ();
+ void genPrepForEHCodegen();
- inline RegState *regStateForType(var_types t) { return varTypeIsFloating(t) ? &floatRegState : &intRegState; }
- inline RegState *regStateForReg(regNumber reg) { return genIsValidFloatReg(reg) ? &floatRegState : &intRegState; }
+ inline RegState* regStateForType(var_types t)
+ {
+ return varTypeIsFloating(t) ? &floatRegState : &intRegState;
+ }
+ inline RegState* regStateForReg(regNumber reg)
+ {
+ return genIsValidFloatReg(reg) ? &floatRegState : &intRegState;
+ }
- regNumber genFramePointerReg() { if (isFramePointerUsed())
- return REG_FPBASE;
- else
- return REG_SPBASE; }
+ regNumber genFramePointerReg()
+ {
+ if (isFramePointerUsed())
+ {
+ return REG_FPBASE;
+ }
+ else
+ {
+ return REG_SPBASE;
+ }
+ }
- enum CompareKind { CK_SIGNED, CK_UNSIGNED, CK_LOGICAL };
+ enum CompareKind
+ {
+ CK_SIGNED,
+ CK_UNSIGNED,
+ CK_LOGICAL
+ };
static emitJumpKind genJumpKindForOper(genTreeOps cmp, CompareKind compareKind);
- // For a given compare oper tree, returns the conditions to use with jmp/set in 'jmpKind' array.
+ // For a given compare oper tree, returns the conditions to use with jmp/set in 'jmpKind' array.
// The corresponding elements of jmpToTrueLabel indicate whether the target of the jump is to the
- // 'true' label or a 'false' label.
+ // 'true' label or a 'false' label.
//
// 'true' label corresponds to jump target of the current basic block i.e. the target to
// branch to on compare condition being true. 'false' label corresponds to the target to
@@ -104,30 +115,30 @@ private:
#if !defined(_TARGET_64BIT_)
static void genJumpKindsForTreeLongHi(GenTreePtr cmpTree, emitJumpKind jmpKind[2]);
-#endif //!defined(_TARGET_64BIT_)
+#endif //! defined(_TARGET_64BIT_)
- static bool genShouldRoundFP();
+ static bool genShouldRoundFP();
- GenTreeIndir indirForm(var_types type, GenTree *base);
+ GenTreeIndir indirForm(var_types type, GenTree* base);
- GenTreeIntCon intForm(var_types type, ssize_t value);
+ GenTreeIntCon intForm(var_types type, ssize_t value);
- void genRangeCheck(GenTree *node);
+ void genRangeCheck(GenTree* node);
- void genLockedInstructions(GenTree *node);
+ void genLockedInstructions(GenTree* node);
//-------------------------------------------------------------------------
// Register-related methods
- void rsInit();
+ void rsInit();
#ifdef REG_OPT_RSVD
// On some targets such as the ARM we may need to have an extra reserved register
// that is used when addressing stack based locals and stack based temps.
- // This method returns the regNumber that should be used when an extra register
+ // This method returns the regNumber that should be used when an extra register
// is needed to access the stack based locals and stack based temps.
- //
- regNumber rsGetRsvdReg()
+ //
+ regNumber rsGetRsvdReg()
{
// We should have already added this register to the mask
// of reserved registers in regSet.rdMaskResvd
@@ -137,9 +148,9 @@ private:
}
#endif // REG_OPT_RSVD
- regNumber findStkLclInReg(unsigned lclNum)
+ regNumber findStkLclInReg(unsigned lclNum)
{
-#ifdef DEBUG
+#ifdef DEBUG
genInterruptibleUsed = true;
#endif
return regTracker.rsLclIsInReg(lclNum);
@@ -147,33 +158,31 @@ private:
//-------------------------------------------------------------------------
-
- bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
- unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
+ bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
+ unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
// Keeps track of how many bytes we've pushed on the processor's stack.
//
- unsigned genStackLevel;
+ unsigned genStackLevel;
#if STACK_PROBES
// Stack Probes
- bool genNeedPrologStackProbe;
+ bool genNeedPrologStackProbe;
- void genGenerateStackProbe();
+ void genGenerateStackProbe();
#endif
- regMaskTP genNewLiveRegMask (GenTreePtr first, GenTreePtr second);
+ regMaskTP genNewLiveRegMask(GenTreePtr first, GenTreePtr second);
// During codegen, determine the LiveSet after tree.
// Preconditions: must be called during codegen, when compCurLife and
// compCurLifeTree are being maintained, and tree must occur in the current
// statement.
- VARSET_VALRET_TP genUpdateLiveSetForward(GenTreePtr tree);
+ VARSET_VALRET_TP genUpdateLiveSetForward(GenTreePtr tree);
//-------------------------------------------------------------------------
-
- void genReportEH();
+ void genReportEH();
// Allocates storage for the GC info, writes the GC info into that storage, records the address of the
// GC info of the method with the EE, and returns a pointer to the "info" portion (just post-header) of
@@ -183,88 +192,76 @@ private:
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
- void* genCreateAndStoreGCInfo (unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
- void* genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
-#else // !JIT32_GCENCODER
- void genCreateAndStoreGCInfo (unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
- void genCreateAndStoreGCInfoX64 (unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
+ void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
+ void* genCreateAndStoreGCInfoJIT32(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr));
+#else // !JIT32_GCENCODER
+ void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
+ void genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
#endif // !JIT32_GCENCODER
/**************************************************************************
* PROTECTED
*************************************************************************/
-protected :
-
+protected:
// the current (pending) label ref, a label which has been referenced but not yet seen
- BasicBlock* genPendingCallLabel;
+ BasicBlock* genPendingCallLabel;
#ifdef DEBUG
// Last instr we have displayed for dspInstrs
- unsigned genCurDispOffset;
+ unsigned genCurDispOffset;
- static const char *genInsName(instruction ins);
+ static const char* genInsName(instruction ins);
#endif // DEBUG
//-------------------------------------------------------------------------
// JIT-time constants for use in multi-dimensional array code generation.
- unsigned genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension);
- unsigned genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension);
+ unsigned genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension);
+ unsigned genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension);
-#ifdef DEBUG
- static
- const char * genSizeStr (emitAttr size);
+#ifdef DEBUG
+ static const char* genSizeStr(emitAttr size);
- void genStressRegs (GenTreePtr tree);
+ void genStressRegs(GenTreePtr tree);
#endif // DEBUG
- void genCodeForBBlist ();
+ void genCodeForBBlist();
public:
#ifndef LEGACY_BACKEND
// genSpillVar is called by compUpdateLifeVar in the !LEGACY_BACKEND case
- void genSpillVar (GenTreePtr tree);
+ void genSpillVar(GenTreePtr tree);
#endif // !LEGACY_BACKEND
protected:
#ifndef LEGACY_BACKEND
- void genEmitHelperCall (unsigned helper,
- int argSize,
- emitAttr retSize,
- regNumber callTarget = REG_NA);
+ void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTarget = REG_NA);
#else
- void genEmitHelperCall (unsigned helper,
- int argSize,
- emitAttr retSize);
+ void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize);
#endif
- void genGCWriteBarrier (GenTreePtr tree,
- GCInfo::WriteBarrierForm wbf);
+ void genGCWriteBarrier(GenTreePtr tree, GCInfo::WriteBarrierForm wbf);
- BasicBlock * genCreateTempLabel ();
+ BasicBlock* genCreateTempLabel();
- void genDefineTempLabel (BasicBlock * label);
+ void genDefineTempLabel(BasicBlock* label);
- void genAdjustSP (ssize_t delta);
+ void genAdjustSP(ssize_t delta);
- void genExitCode (BasicBlock * block);
+ void genExitCode(BasicBlock* block);
//-------------------------------------------------------------------------
- GenTreePtr genMakeConst (const void * cnsAddr,
- var_types cnsType,
- GenTreePtr cnsTree,
- bool dblAlign);
+ GenTreePtr genMakeConst(const void* cnsAddr, var_types cnsType, GenTreePtr cnsTree, bool dblAlign);
//-------------------------------------------------------------------------
- void genJumpToThrowHlpBlk(emitJumpKind jumpKind,
- SpecialCodeKind codeKind,
- GenTreePtr failBlk = NULL);
-
- void genCheckOverflow (GenTreePtr tree);
+ void genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, GenTreePtr failBlk = nullptr);
+ void genCheckOverflow(GenTreePtr tree);
//-------------------------------------------------------------------------
//
@@ -276,99 +273,76 @@ protected:
// Prolog functions and data (there are a few exceptions for more generally used things)
//
- void genEstablishFramePointer(int delta, bool reportUnwindData);
- void genFnPrologCalleeRegArgs(regNumber xtraReg,
- bool * pXtraRegClobbered,
- RegState *regState);
- void genEnregisterIncomingStackArgs();
- void genCheckUseBlockInit();
+ void genEstablishFramePointer(int delta, bool reportUnwindData);
+ void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
+ void genEnregisterIncomingStackArgs();
+ void genCheckUseBlockInit();
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
- void genClearStackVec3ArgUpperBits();
-#endif //FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+ void genClearStackVec3ArgUpperBits();
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
#if defined(_TARGET_ARM64_)
- bool genInstrWithConstant(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- ssize_t imm,
- regNumber tmpReg,
- bool inUnwindRegion = false);
-
- void genStackPointerAdjustment(ssize_t spAdjustment,
- regNumber tmpReg,
- bool* pTmpRegIsZero);
-
- void genPrologSaveRegPair(regNumber reg1,
- regNumber reg2,
- int spOffset,
- int spDelta,
- bool lastSavedWasPreviousPair,
- regNumber tmpReg,
- bool* pTmpRegIsZero);
-
- void genPrologSaveReg(regNumber reg1,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero);
-
- void genEpilogRestoreRegPair(regNumber reg1,
- regNumber reg2,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero);
-
- void genEpilogRestoreReg(regNumber reg1,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero);
-
- void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
- int lowestCalleeSavedOffset,
- int spDelta);
-
- void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
- int lowestCalleeSavedOffset,
- int spDelta);
-
- void genPushCalleeSavedRegisters(regNumber initReg,
- bool * pInitRegZeroed);
+ bool genInstrWithConstant(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ ssize_t imm,
+ regNumber tmpReg,
+ bool inUnwindRegion = false);
+
+ void genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg, bool* pTmpRegIsZero);
+
+ void genPrologSaveRegPair(regNumber reg1,
+ regNumber reg2,
+ int spOffset,
+ int spDelta,
+ bool lastSavedWasPreviousPair,
+ regNumber tmpReg,
+ bool* pTmpRegIsZero);
+
+ void genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
+
+ void genEpilogRestoreRegPair(
+ regNumber reg1, regNumber reg2, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
+
+ void genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
+
+ void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta);
+
+ void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta);
+
+ void genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed);
#else
- void genPushCalleeSavedRegisters();
+ void genPushCalleeSavedRegisters();
#endif
- void genAllocLclFrame (unsigned frameSize,
- regNumber initReg,
- bool * pInitRegZeroed,
- regMaskTP maskArgRegsLiveIn);
+ void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn);
#if defined(_TARGET_ARM_)
- void genPushFltRegs (regMaskTP regMask);
- void genPopFltRegs (regMaskTP regMask);
- regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
+ void genPushFltRegs(regMaskTP regMask);
+ void genPopFltRegs(regMaskTP regMask);
+ regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
- regMaskTP genJmpCallArgMask();
+ regMaskTP genJmpCallArgMask();
- void genFreeLclFrame (unsigned frameSize,
- /* IN OUT */ bool* pUnwindStarted,
- bool jmpEpilog);
+ void genFreeLclFrame(unsigned frameSize,
+ /* IN OUT */ bool* pUnwindStarted,
+ bool jmpEpilog);
- bool genUsedPopToReturn; // True if we use the pop into PC to return,
- // False if we didn't and must branch to LR to return.
+ bool genUsedPopToReturn; // True if we use the pop into PC to return,
+ // False if we didn't and must branch to LR to return.
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
- // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the same.
+ // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
+ // same.
struct FuncletFrameInfoDsc
{
- regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
- unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
- unsigned fiSpDelta; // Stack pointer delta
- unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
- int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
+ regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
+ unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
+ unsigned fiSpDelta; // Stack pointer delta
+ unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
+ int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
};
FuncletFrameInfoDsc genFuncletInfo;
@@ -376,18 +350,20 @@ protected:
#elif defined(_TARGET_ARM64_)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
- // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the same.
+ // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
+ // same.
struct FuncletFrameInfoDsc
{
- regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
- int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function (negative)
- int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
- int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
- int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
- int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
- int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
- int fiSpDelta1; // Stack pointer delta 1 (negative)
- int fiSpDelta2; // Stack pointer delta 2 (negative)
+ regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
+ int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
+ // (negative)
+ int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
+ int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
+ int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
+ int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
+ int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
+ int fiSpDelta1; // Stack pointer delta 1 (negative)
+ int fiSpDelta2; // Stack pointer delta 2 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
@@ -395,12 +371,13 @@ protected:
#elif defined(_TARGET_AMD64_)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
- // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the same.
+ // funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
+ // same.
struct FuncletFrameInfoDsc
{
- unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
- unsigned fiSpDelta; // Stack pointer delta
- int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
+ unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
+ unsigned fiSpDelta; // Stack pointer delta
+ int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
};
FuncletFrameInfoDsc genFuncletInfo;
@@ -410,62 +387,46 @@ protected:
#if defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
// Save/Restore callee saved float regs to stack
- void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
- void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
+ void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
+ void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
#endif // _TARGET_XARCH_ && FEATURE_STACK_FP_X87
#if !FEATURE_STACK_FP_X87
- void genZeroInitFltRegs (const regMaskTP& initFltRegs,
- const regMaskTP& initDblRegs,
- const regNumber& initReg);
+ void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg);
#endif // !FEATURE_STACK_FP_X87
- regNumber genGetZeroReg (regNumber initReg,
- bool* pInitRegZeroed);
+ regNumber genGetZeroReg(regNumber initReg, bool* pInitRegZeroed);
- void genZeroInitFrame (int untrLclHi,
- int untrLclLo,
- regNumber initReg,
- bool * pInitRegZeroed);
+ void genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
- void genReportGenericContextArg (regNumber initReg,
- bool * pInitRegZeroed);
+ void genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed);
- void genSetGSSecurityCookie (regNumber initReg,
- bool * pInitRegZeroed);
+ void genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed);
- void genFinalizeFrame();
+ void genFinalizeFrame();
#ifdef PROFILING_SUPPORTED
- void genProfilingEnterCallback (regNumber initReg,
- bool * pInitRegZeroed);
- void genProfilingLeaveCallback(unsigned helper = CORINFO_HELP_PROF_FCN_LEAVE);
+ void genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed);
+ void genProfilingLeaveCallback(unsigned helper = CORINFO_HELP_PROF_FCN_LEAVE);
#endif // PROFILING_SUPPORTED
- void genPrologPadForReJit();
-
- void genEmitCall(int callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- void* addr
- X86_ARG(ssize_t argSize),
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- IL_OFFSETX ilOffset,
- regNumber base = REG_NA,
- bool isJump = false,
- bool isNoGC = false);
-
- void genEmitCall(int callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- GenTreeIndir* indir
- X86_ARG(ssize_t argSize),
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- IL_OFFSETX ilOffset);
+ void genPrologPadForReJit();
+
+ void genEmitCall(int callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) void* addr X86_ARG(ssize_t argSize),
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ IL_OFFSETX ilOffset,
+ regNumber base = REG_NA,
+ bool isJump = false,
+ bool isNoGC = false);
+ void genEmitCall(int callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) GenTreeIndir* indir X86_ARG(ssize_t argSize),
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ IL_OFFSETX ilOffset);
//
// Epilog functions
@@ -473,16 +434,16 @@ protected:
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(_TARGET_ARM_)
- bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
+ bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
#endif
#if defined(_TARGET_ARM64_)
- void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
+ void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
#else // !defined(_TARGET_ARM64_)
- void genPopCalleeSavedRegisters(bool jmpEpilog = false);
+ void genPopCalleeSavedRegisters(bool jmpEpilog = false);
#endif // !defined(_TARGET_ARM64_)
@@ -490,280 +451,258 @@ protected:
// Common or driving functions
//
- void genReserveProlog (BasicBlock* block); // currently unused
- void genReserveEpilog (BasicBlock* block);
- void genFnProlog ();
- void genFnEpilog (BasicBlock* block);
+ void genReserveProlog(BasicBlock* block); // currently unused
+ void genReserveEpilog(BasicBlock* block);
+ void genFnProlog();
+ void genFnEpilog(BasicBlock* block);
#if FEATURE_EH_FUNCLETS
- void genReserveFuncletProlog (BasicBlock* block);
- void genReserveFuncletEpilog (BasicBlock* block);
- void genFuncletProlog (BasicBlock* block);
- void genFuncletEpilog ();
- void genCaptureFuncletPrologEpilogInfo();
+ void genReserveFuncletProlog(BasicBlock* block);
+ void genReserveFuncletEpilog(BasicBlock* block);
+ void genFuncletProlog(BasicBlock* block);
+ void genFuncletEpilog();
+ void genCaptureFuncletPrologEpilogInfo();
- void genSetPSPSym (regNumber initReg,
- bool * pInitRegZeroed);
+ void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
- void genUpdateCurrentFunclet (BasicBlock * block);
+ void genUpdateCurrentFunclet(BasicBlock* block);
#else // FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
- void genUpdateCurrentFunclet(BasicBlock * block) { return; }
+ void genUpdateCurrentFunclet(BasicBlock* block)
+ {
+ return;
+ }
#endif // FEATURE_EH_FUNCLETS
- void genGeneratePrologsAndEpilogs();
+ void genGeneratePrologsAndEpilogs();
#if defined(DEBUG) && defined(_TARGET_ARM64_)
- void genArm64EmitterUnitTests();
+ void genArm64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
- void genAmd64EmitterUnitTests();
+ void genAmd64EmitterUnitTests();
#endif
- //-------------------------------------------------------------------------
- //
- // End prolog/epilog generation
- //
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
+//
+// End prolog/epilog generation
+//
+//-------------------------------------------------------------------------
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************/
#ifdef DEBUG
- void genIPmappingDisp(unsigned mappingNum, Compiler::IPmappingDsc* ipMapping);
- void genIPmappingListDisp();
+ void genIPmappingDisp(unsigned mappingNum, Compiler::IPmappingDsc* ipMapping);
+ void genIPmappingListDisp();
#endif // DEBUG
- void genIPmappingAdd (IL_OFFSETX offset,
- bool isLabel);
- void genIPmappingAddToFront(IL_OFFSETX offset);
- void genIPmappingGen ();
+ void genIPmappingAdd(IL_OFFSETX offset, bool isLabel);
+ void genIPmappingAddToFront(IL_OFFSETX offset);
+ void genIPmappingGen();
- void genEnsureCodeEmitted (IL_OFFSETX offsx);
+ void genEnsureCodeEmitted(IL_OFFSETX offsx);
//-------------------------------------------------------------------------
// scope info for the variables
- void genSetScopeInfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- bool avail,
- Compiler::siVarLoc & loc);
-
- void genSetScopeInfo ();
+ void genSetScopeInfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ bool avail,
+ Compiler::siVarLoc& loc);
- void genRemoveBBsection(BasicBlock *head, BasicBlock *tail);
-protected :
-
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX ScopeInfo XX
-XX XX
-XX Keeps track of the scopes during code-generation. XX
-XX This is used to translate the local-variable debugging information XX
-XX from IL offsets to native code offsets. XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ void genSetScopeInfo();
-/*****************************************************************************/
-/*****************************************************************************
- * ScopeInfo
- *
- * This class is called during code gen at block-boundaries, and when the
- * set of live variables changes. It keeps track of the scope of the variables
- * in terms of the native code PC.
- */
+ void genRemoveBBsection(BasicBlock* head, BasicBlock* tail);
+protected:
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX ScopeInfo XX
+ XX XX
+ XX Keeps track of the scopes during code-generation. XX
+ XX This is used to translate the local-variable debugging information XX
+ XX from IL offsets to native code offsets. XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+
+ /*****************************************************************************/
+ /*****************************************************************************
+ * ScopeInfo
+ *
+ * This class is called during code gen at block-boundaries, and when the
+ * set of live variables changes. It keeps track of the scope of the variables
+ * in terms of the native code PC.
+ */
public:
+ void siInit();
- void siInit ();
-
- void siBeginBlock (BasicBlock* block);
+ void siBeginBlock(BasicBlock* block);
- void siEndBlock (BasicBlock* block);
+ void siEndBlock(BasicBlock* block);
- virtual void siUpdate ();
+ virtual void siUpdate();
- void siCheckVarScope (unsigned varNum, IL_OFFSET offs);
+ void siCheckVarScope(unsigned varNum, IL_OFFSET offs);
- void siCloseAllOpenScopes();
+ void siCloseAllOpenScopes();
#ifdef DEBUG
- void siDispOpenScopes();
+ void siDispOpenScopes();
#endif
-
/**************************************************************************
* PROTECTED
*************************************************************************/
-protected :
-
+protected:
struct siScope
{
- emitLocation scStartLoc; // emitter location of start of scope
- emitLocation scEndLoc; // emitter location of end of scope
+ emitLocation scStartLoc; // emitter location of start of scope
+ emitLocation scEndLoc; // emitter location of end of scope
- unsigned scVarNum; // index into lvaTable
- unsigned scLVnum; // 'which' in eeGetLVinfo()
+ unsigned scVarNum; // index into lvaTable
+ unsigned scLVnum; // 'which' in eeGetLVinfo()
- unsigned scStackLevel; // Only for stk-vars
- bool scAvailable :1; // It has a home / Home recycled - TODO-Cleanup: it appears this is unused (always true)
+ unsigned scStackLevel; // Only for stk-vars
+ bool scAvailable : 1; // It has a home / Home recycled - TODO-Cleanup: it appears this is unused (always true)
- siScope * scPrev;
- siScope * scNext;
+ siScope* scPrev;
+ siScope* scNext;
};
- siScope siOpenScopeList, siScopeList,
- * siOpenScopeLast, * siScopeLast;
+ siScope siOpenScopeList, siScopeList, *siOpenScopeLast, *siScopeLast;
- unsigned siScopeCnt;
+ unsigned siScopeCnt;
- VARSET_TP siLastLife; // Life at last call to siUpdate()
+ VARSET_TP siLastLife; // Life at last call to siUpdate()
// Tracks the last entry for each tracked register variable
- siScope * siLatestTrackedScopes[lclMAX_TRACKED];
+ siScope* siLatestTrackedScopes[lclMAX_TRACKED];
- IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
+ IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
#if FEATURE_EH_FUNCLETS
- bool siInFuncletRegion; // Have we seen the start of the funclet region?
-#endif // FEATURE_EH_FUNCLETS
+ bool siInFuncletRegion; // Have we seen the start of the funclet region?
+#endif // FEATURE_EH_FUNCLETS
// Functions
- siScope * siNewScope (unsigned LVnum,
- unsigned varNum);
+ siScope* siNewScope(unsigned LVnum, unsigned varNum);
- void siRemoveFromOpenScopeList(siScope * scope);
+ void siRemoveFromOpenScopeList(siScope* scope);
- void siEndTrackedScope (unsigned varIndex);
+ void siEndTrackedScope(unsigned varIndex);
- void siEndScope (unsigned varNum);
+ void siEndScope(unsigned varNum);
- void siEndScope (siScope * scope);
+ void siEndScope(siScope* scope);
#ifdef DEBUG
- bool siVerifyLocalVarTab ();
+ bool siVerifyLocalVarTab();
#endif
#ifdef LATE_DISASM
public:
-
/* virtual */
- const char* siRegVarName (size_t offs,
- size_t size,
- unsigned reg);
+ const char* siRegVarName(size_t offs, size_t size, unsigned reg);
/* virtual */
- const char* siStackVarName (size_t offs,
- size_t size,
- unsigned reg,
- unsigned stkOffs);
+ const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs);
#endif // LATE_DISASM
-public :
-
-
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX PrologScopeInfo XX
-XX XX
-XX We need special handling in the prolog block, as the parameter variables XX
-XX may not be in the same position described by genLclVarTable - they all XX
-XX start out on the stack XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-
-public :
-
- void psiBegProlog ();
+public:
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX PrologScopeInfo XX
+ XX XX
+ XX We need special handling in the prolog block, as the parameter variables XX
+ XX may not be in the same position described by genLclVarTable - they all XX
+ XX start out on the stack XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
- void psiAdjustStackLevel(unsigned size);
+public:
+ void psiBegProlog();
- void psiMoveESPtoEBP ();
+ void psiAdjustStackLevel(unsigned size);
- void psiMoveToReg (unsigned varNum,
- regNumber reg = REG_NA,
- regNumber otherReg = REG_NA);
+ void psiMoveESPtoEBP();
- void psiMoveToStack (unsigned varNum);
+ void psiMoveToReg(unsigned varNum, regNumber reg = REG_NA, regNumber otherReg = REG_NA);
- void psiEndProlog ();
+ void psiMoveToStack(unsigned varNum);
+ void psiEndProlog();
/**************************************************************************
* PROTECTED
*************************************************************************/
-protected :
-
- struct psiScope
+protected:
+ struct psiScope
{
- emitLocation scStartLoc; // emitter location of start of scope
- emitLocation scEndLoc; // emitter location of end of scope
+ emitLocation scStartLoc; // emitter location of start of scope
+ emitLocation scEndLoc; // emitter location of end of scope
- unsigned scSlotNum; // index into lclVarTab
- unsigned scLVnum; // 'which' in eeGetLVinfo()
+ unsigned scSlotNum; // index into lclVarTab
+ unsigned scLVnum; // 'which' in eeGetLVinfo()
- bool scRegister;
+ bool scRegister;
- union
- {
+ union {
struct
{
- regNumberSmall scRegNum;
-
+ regNumberSmall scRegNum;
+
// Used for:
// - "other half" of long var on architectures with 32 bit size registers - x86.
- // - for System V structs it stores the second register
+ // - for System V structs it stores the second register
// used to pass a register passed struct.
- regNumberSmall scOtherReg;
+ regNumberSmall scOtherReg;
} u1;
struct
{
- regNumberSmall scBaseReg;
- NATIVE_OFFSET scOffset;
+ regNumberSmall scBaseReg;
+ NATIVE_OFFSET scOffset;
} u2;
};
- psiScope * scPrev;
- psiScope * scNext;
+ psiScope* scPrev;
+ psiScope* scNext;
};
- psiScope psiOpenScopeList, psiScopeList,
- * psiOpenScopeLast, * psiScopeLast;
+ psiScope psiOpenScopeList, psiScopeList, *psiOpenScopeLast, *psiScopeLast;
- unsigned psiScopeCnt;
+ unsigned psiScopeCnt;
// Implementation Functions
- psiScope * psiNewPrologScope(unsigned LVnum,
- unsigned slotNum);
+ psiScope* psiNewPrologScope(unsigned LVnum, unsigned slotNum);
- void psiEndPrologScope(psiScope * scope);
+ void psiEndPrologScope(psiScope* scope);
- void psSetScopeOffset(psiScope* newScope, LclVarDsc * lclVarDsc1);
+ void psSetScopeOffset(psiScope* newScope, LclVarDsc* lclVarDsc1);
/*****************************************************************************
* TrnslLocalVarInfo
@@ -774,333 +713,218 @@ protected :
#ifdef DEBUG
-
struct TrnslLocalVarInfo
{
- unsigned tlviVarNum;
- unsigned tlviLVnum;
- VarName tlviName;
- UNATIVE_OFFSET tlviStartPC;
- size_t tlviLength;
- bool tlviAvailable;
- Compiler::siVarLoc tlviVarLoc;
+ unsigned tlviVarNum;
+ unsigned tlviLVnum;
+ VarName tlviName;
+ UNATIVE_OFFSET tlviStartPC;
+ size_t tlviLength;
+ bool tlviAvailable;
+ Compiler::siVarLoc tlviVarLoc;
};
// Array of scopes of LocalVars in terms of native code
- TrnslLocalVarInfo * genTrnslLocalVarInfo;
- unsigned genTrnslLocalVarCount;
+ TrnslLocalVarInfo* genTrnslLocalVarInfo;
+ unsigned genTrnslLocalVarCount;
#endif
/*****************************************************************************/
#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
-
-
#ifndef LEGACY_BACKEND
#include "codegenlinear.h"
#else // LEGACY_BACKEND
#include "codegenclassic.h"
#endif // LEGACY_BACKEND
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX Instruction XX
+ XX XX
+ XX The interface to generate a machine-instruction. XX
+ XX Currently specific to x86 XX
+ XX TODO-Cleanup: Consider factoring this out of CodeGen XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+public:
+ void instInit();
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX Instruction XX
-XX XX
-XX The interface to generate a machine-instruction. XX
-XX Currently specific to x86 XX
-XX TODO-Cleanup: Consider factoring this out of CodeGen XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ regNumber genGetZeroRegister();
+ void instGen(instruction ins);
+#ifdef _TARGET_XARCH_
+ void instNop(unsigned size);
+#endif
-public :
+ void inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock);
- void instInit();
+ void inst_SET(emitJumpKind condition, regNumber reg);
- regNumber genGetZeroRegister();
+ void inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size = EA_UNKNOWN);
- void instGen (instruction ins);
-#ifdef _TARGET_XARCH_
- void instNop (unsigned size);
-#endif
+ void inst_RV_RV(instruction ins,
+ regNumber reg1,
+ regNumber reg2,
+ var_types type = TYP_I_IMPL,
+ emitAttr size = EA_UNKNOWN,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_RV_RV_RV(instruction ins,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ emitAttr size,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_IV(instruction ins, int val);
+ void inst_IV_handle(instruction ins, int val);
+ void inst_FS(instruction ins, unsigned stk = 0);
+
+ void inst_RV_IV(instruction ins, regNumber reg, ssize_t val, emitAttr size, insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type);
+ void inst_ST_IV(instruction ins, TempDsc* tmp, unsigned ofs, int val, var_types type);
+
+ void inst_SA_RV(instruction ins, unsigned ofs, regNumber reg, var_types type);
+ void inst_SA_IV(instruction ins, unsigned ofs, int val, var_types type);
+
+ void inst_RV_ST(
+ instruction ins, regNumber reg, TempDsc* tmp, unsigned ofs, var_types type, emitAttr size = EA_UNKNOWN);
+ void inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs);
+
+ void instEmit_indCall(GenTreePtr call,
+ size_t argSize,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
+
+ void instEmit_RM(instruction ins, GenTreePtr tree, GenTreePtr addr, unsigned offs);
+
+ void instEmit_RM_RV(instruction ins, emitAttr size, GenTreePtr tree, regNumber reg, unsigned offs);
+
+ void instEmit_RV_RM(instruction ins, emitAttr size, regNumber reg, GenTreePtr tree, unsigned offs);
+
+ void instEmit_RV_RIA(instruction ins, regNumber reg1, regNumber reg2, unsigned offs);
+
+ void inst_TT(instruction ins, GenTreePtr tree, unsigned offs = 0, int shfv = 0, emitAttr size = EA_UNKNOWN);
- void inst_JMP (emitJumpKind jmp,
- BasicBlock * tgtBlock);
-
- void inst_SET (emitJumpKind condition,
- regNumber reg);
-
- void inst_RV (instruction ins,
- regNumber reg,
- var_types type,
- emitAttr size = EA_UNKNOWN);
-
- void inst_RV_RV (instruction ins,
- regNumber reg1,
- regNumber reg2,
- var_types type = TYP_I_IMPL,
- emitAttr size = EA_UNKNOWN,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_RV_RV_RV (instruction ins,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- emitAttr size,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_IV (instruction ins,
- int val);
- void inst_IV_handle (instruction ins,
- int val);
- void inst_FS (instruction ins, unsigned stk = 0);
-
- void inst_RV_IV (instruction ins,
- regNumber reg,
- ssize_t val,
- emitAttr size,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_ST_RV (instruction ins,
- TempDsc * tmp,
- unsigned ofs,
- regNumber reg,
- var_types type);
- void inst_ST_IV (instruction ins,
- TempDsc * tmp,
- unsigned ofs,
- int val,
- var_types type);
-
- void inst_SA_RV (instruction ins,
- unsigned ofs,
- regNumber reg,
- var_types type);
- void inst_SA_IV (instruction ins,
- unsigned ofs,
- int val,
- var_types type);
-
- void inst_RV_ST (instruction ins,
- regNumber reg,
- TempDsc * tmp,
- unsigned ofs,
- var_types type,
- emitAttr size = EA_UNKNOWN);
- void inst_FS_ST (instruction ins,
- emitAttr size,
- TempDsc * tmp,
- unsigned ofs);
-
- void instEmit_indCall(GenTreePtr call,
- size_t argSize,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
-
- void instEmit_RM (instruction ins,
- GenTreePtr tree,
- GenTreePtr addr,
- unsigned offs);
-
- void instEmit_RM_RV (instruction ins,
- emitAttr size,
- GenTreePtr tree,
- regNumber reg,
- unsigned offs);
-
- void instEmit_RV_RM (instruction ins,
- emitAttr size,
- regNumber reg,
- GenTreePtr tree,
- unsigned offs);
-
- void instEmit_RV_RIA (instruction ins,
- regNumber reg1,
- regNumber reg2,
- unsigned offs);
-
- void inst_TT (instruction ins,
- GenTreePtr tree,
- unsigned offs = 0,
- int shfv = 0,
- emitAttr size = EA_UNKNOWN);
-
- void inst_TT_RV (instruction ins,
- GenTreePtr tree,
- regNumber reg,
- unsigned offs = 0,
- emitAttr size = EA_UNKNOWN,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_TT_IV (instruction ins,
- GenTreePtr tree,
- ssize_t val,
- unsigned offs = 0,
- emitAttr size = EA_UNKNOWN,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_RV_AT (instruction ins,
- emitAttr size,
- var_types type,
- regNumber reg,
- GenTreePtr tree,
- unsigned offs = 0,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_AT_IV (instruction ins,
- emitAttr size,
- GenTreePtr baseTree,
- int icon,
- unsigned offs = 0);
-
- void inst_RV_TT (instruction ins,
- regNumber reg,
- GenTreePtr tree,
- unsigned offs = 0,
- emitAttr size = EA_UNKNOWN,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_RV_TT_IV (instruction ins,
- regNumber reg,
- GenTreePtr tree,
- int val);
-
- void inst_FS_TT (instruction ins,
- GenTreePtr tree);
-
- void inst_RV_SH (instruction ins,
- emitAttr size,
- regNumber reg,
- unsigned val,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void inst_TT_SH (instruction ins,
- GenTreePtr tree,
- unsigned val, unsigned offs = 0);
-
- void inst_RV_CL (instruction ins, regNumber reg, var_types type = TYP_I_IMPL);
-
- void inst_TT_CL (instruction ins,
- GenTreePtr tree, unsigned offs = 0);
+ void inst_TT_RV(instruction ins,
+ GenTreePtr tree,
+ regNumber reg,
+ unsigned offs = 0,
+ emitAttr size = EA_UNKNOWN,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_TT_IV(instruction ins,
+ GenTreePtr tree,
+ ssize_t val,
+ unsigned offs = 0,
+ emitAttr size = EA_UNKNOWN,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_RV_AT(instruction ins,
+ emitAttr size,
+ var_types type,
+ regNumber reg,
+ GenTreePtr tree,
+ unsigned offs = 0,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_AT_IV(instruction ins, emitAttr size, GenTreePtr baseTree, int icon, unsigned offs = 0);
+
+ void inst_RV_TT(instruction ins,
+ regNumber reg,
+ GenTreePtr tree,
+ unsigned offs = 0,
+ emitAttr size = EA_UNKNOWN,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_RV_TT_IV(instruction ins, regNumber reg, GenTreePtr tree, int val);
+
+ void inst_FS_TT(instruction ins, GenTreePtr tree);
+
+ void inst_RV_SH(instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags = INS_FLAGS_DONT_CARE);
+
+ void inst_TT_SH(instruction ins, GenTreePtr tree, unsigned val, unsigned offs = 0);
+
+ void inst_RV_CL(instruction ins, regNumber reg, var_types type = TYP_I_IMPL);
+
+ void inst_TT_CL(instruction ins, GenTreePtr tree, unsigned offs = 0);
#if defined(_TARGET_XARCH_)
- void inst_RV_RV_IV (instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2,
- unsigned ival);
+ void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival);
#endif
- void inst_RV_RR (instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2);
+ void inst_RV_RR(instruction ins, emitAttr size, regNumber reg1, regNumber reg2);
- void inst_RV_ST (instruction ins,
- emitAttr size,
- regNumber reg,
- GenTreePtr tree);
+ void inst_RV_ST(instruction ins, emitAttr size, regNumber reg, GenTreePtr tree);
- void inst_mov_RV_ST (regNumber reg,
- GenTreePtr tree);
+ void inst_mov_RV_ST(regNumber reg, GenTreePtr tree);
- void instGetAddrMode (GenTreePtr addr,
- regNumber * baseReg,
- unsigned * indScale,
- regNumber * indReg,
- unsigned * cns);
+ void instGetAddrMode(GenTreePtr addr, regNumber* baseReg, unsigned* indScale, regNumber* indReg, unsigned* cns);
- void inst_set_SV_var (GenTreePtr tree);
+ void inst_set_SV_var(GenTreePtr tree);
#ifdef _TARGET_ARM_
- bool arm_Valid_Imm_For_Instr(instruction ins,
- ssize_t imm,
- insFlags flags);
- bool arm_Valid_Disp_For_LdSt(ssize_t disp, var_types type);
- bool arm_Valid_Imm_For_Alu (ssize_t imm);
- bool arm_Valid_Imm_For_Mov (ssize_t imm);
- bool arm_Valid_Imm_For_Small_Mov(regNumber reg,
- ssize_t imm,
- insFlags flags);
- bool arm_Valid_Imm_For_Add (ssize_t imm, insFlags flag);
- bool arm_Valid_Imm_For_Add_SP(ssize_t imm);
- bool arm_Valid_Imm_For_BL (ssize_t addr);
-
- bool ins_Writes_Dest (instruction ins);
+ bool arm_Valid_Imm_For_Instr(instruction ins, ssize_t imm, insFlags flags);
+ bool arm_Valid_Disp_For_LdSt(ssize_t disp, var_types type);
+ bool arm_Valid_Imm_For_Alu(ssize_t imm);
+ bool arm_Valid_Imm_For_Mov(ssize_t imm);
+ bool arm_Valid_Imm_For_Small_Mov(regNumber reg, ssize_t imm, insFlags flags);
+ bool arm_Valid_Imm_For_Add(ssize_t imm, insFlags flag);
+ bool arm_Valid_Imm_For_Add_SP(ssize_t imm);
+ bool arm_Valid_Imm_For_BL(ssize_t addr);
+
+ bool ins_Writes_Dest(instruction ins);
#endif
- bool isMoveIns(instruction ins);
- instruction ins_Move_Extend (var_types srcType,
- bool srcInReg);
-
- instruction ins_Copy (var_types dstType);
- instruction ins_CopyIntToFloat (var_types srcType, var_types dstTyp);
- instruction ins_CopyFloatToInt (var_types srcType, var_types dstTyp);
- static instruction ins_FloatStore (var_types type=TYP_DOUBLE);
- static instruction ins_FloatCopy (var_types type=TYP_DOUBLE);
- instruction ins_FloatConv (var_types to, var_types from);
- instruction ins_FloatCompare (var_types type);
- instruction ins_MathOp (genTreeOps oper, var_types type);
- instruction ins_FloatSqrt (var_types type);
+ bool isMoveIns(instruction ins);
+ instruction ins_Move_Extend(var_types srcType, bool srcInReg);
- void instGen_Return(unsigned stkArgSize);
+ instruction ins_Copy(var_types dstType);
+ instruction ins_CopyIntToFloat(var_types srcType, var_types dstTyp);
+ instruction ins_CopyFloatToInt(var_types srcType, var_types dstTyp);
+ static instruction ins_FloatStore(var_types type = TYP_DOUBLE);
+ static instruction ins_FloatCopy(var_types type = TYP_DOUBLE);
+ instruction ins_FloatConv(var_types to, var_types from);
+ instruction ins_FloatCompare(var_types type);
+ instruction ins_MathOp(genTreeOps oper, var_types type);
+ instruction ins_FloatSqrt(var_types type);
- void instGen_MemoryBarrier();
+ void instGen_Return(unsigned stkArgSize);
- void instGen_Set_Reg_To_Zero(emitAttr size,
- regNumber reg,
- insFlags flags = INS_FLAGS_DONT_CARE);
+ void instGen_MemoryBarrier();
+ void instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags = INS_FLAGS_DONT_CARE);
- void instGen_Set_Reg_To_Imm (emitAttr size,
- regNumber reg,
- ssize_t imm,
- insFlags flags = INS_FLAGS_DONT_CARE);
+ void instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags = INS_FLAGS_DONT_CARE);
- void instGen_Compare_Reg_To_Zero(emitAttr size,
- regNumber reg);
+ void instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg);
- void instGen_Compare_Reg_To_Reg (emitAttr size,
- regNumber reg1,
- regNumber reg2);
+ void instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2);
- void instGen_Compare_Reg_To_Imm (emitAttr size,
- regNumber reg,
- ssize_t imm);
+ void instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm);
- void instGen_Load_Reg_From_Lcl (var_types srcType,
- regNumber dstReg,
- int varNum,
- int offs);
+ void instGen_Load_Reg_From_Lcl(var_types srcType, regNumber dstReg, int varNum, int offs);
- void instGen_Store_Reg_Into_Lcl (var_types dstType,
- regNumber srcReg,
- int varNum,
- int offs);
+ void instGen_Store_Reg_Into_Lcl(var_types dstType, regNumber srcReg, int varNum, int offs);
- void instGen_Store_Imm_Into_Lcl (var_types dstType,
- emitAttr sizeAttr,
- ssize_t imm,
- int varNum,
- int offs,
- regNumber regToUse = REG_NA);
+ void instGen_Store_Imm_Into_Lcl(
+ var_types dstType, emitAttr sizeAttr, ssize_t imm, int varNum, int offs, regNumber regToUse = REG_NA);
-#ifdef DEBUG
- void __cdecl instDisp(instruction ins, bool noNL, const char *fmt, ...);
+#ifdef DEBUG
+ void __cdecl instDisp(instruction ins, bool noNL, const char* fmt, ...);
#endif
#ifdef _TARGET_XARCH_
- instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
+ instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
#endif // _TARGET_XARCH_
-
};
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -1120,8 +944,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* a tree (which has been made addressable).
*/
-inline
-void CodeGen::inst_FS_TT(instruction ins, GenTreePtr tree)
+inline void CodeGen::inst_FS_TT(instruction ins, GenTreePtr tree)
{
assert(instIsFP(ins));
@@ -1136,11 +959,9 @@ void CodeGen::inst_FS_TT(instruction ins, GenTreePtr tree)
* Generate a "shift reg, cl" instruction.
*/
-inline
-void CodeGen::inst_RV_CL(instruction ins, regNumber reg, var_types type)
+inline void CodeGen::inst_RV_CL(instruction ins, regNumber reg, var_types type)
{
inst_RV(ins, reg, type);
}
-
#endif // _CODEGEN_H_
diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
index 4b5e40fe0a..e4df26d4e9 100644
--- a/src/jit/codegenarm.cpp
+++ b/src/jit/codegenarm.cpp
@@ -27,7 +27,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "gcinfoencoder.h"
#endif
-
// Get the register assigned to the given node
regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
@@ -35,7 +34,6 @@ regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
return tree->gtRegNum;
}
-
//------------------------------------------------------------------------
// genSpillVar: Spill a local variable
//
@@ -48,31 +46,31 @@ regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
// Assumptions:
// The lclVar must be a register candidate (lvRegCandidate)
-void CodeGen::genSpillVar(GenTreePtr tree)
+void CodeGen::genSpillVar(GenTreePtr tree)
{
- regMaskTP regMask;
- unsigned varNum = tree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &(compiler->lvaTable[varNum]);
+ regMaskTP regMask;
+ unsigned varNum = tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
// We don't actually need to spill if it is already living in memory
bool needsSpill = ((tree->gtFlags & GTF_VAR_DEF) == 0 && varDsc->lvIsInReg());
if (needsSpill)
{
bool restoreRegVar = false;
- if (tree->gtOper == GT_REG_VAR)
+ if (tree->gtOper == GT_REG_VAR)
{
tree->SetOper(GT_LCL_VAR);
restoreRegVar = true;
}
// mask off the flag to generate the right spill code, then bring it back
- tree->gtFlags &= ~GTF_REG_VAL;
+ tree->gtFlags &= ~GTF_REG_VAL;
instruction storeIns = ins_Store(tree->TypeGet());
if (varTypeIsMultiReg(tree))
{
- assert(varDsc->lvRegNum == genRegPairLo(tree->gtRegPair));
+ assert(varDsc->lvRegNum == genRegPairLo(tree->gtRegPair));
assert(varDsc->lvOtherReg == genRegPairHi(tree->gtRegPair));
regNumber regLo = genRegPairLo(tree->gtRegPair);
regNumber regHi = genRegPairHi(tree->gtRegPair);
@@ -84,7 +82,7 @@ void CodeGen::genSpillVar(GenTreePtr tree)
assert(varDsc->lvRegNum == tree->gtRegNum);
inst_TT_RV(storeIns, tree, tree->gtRegNum);
}
- tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags |= GTF_REG_VAL;
if (restoreRegVar)
{
@@ -108,10 +106,9 @@ void CodeGen::genSpillVar(GenTreePtr tree)
#endif
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
-
}
- tree->gtFlags &= ~GTF_SPILL;
+ tree->gtFlags &= ~GTF_SPILL;
varDsc->lvRegNum = REG_STK;
if (varTypeIsMultiReg(tree))
{
@@ -120,22 +117,18 @@ void CodeGen::genSpillVar(GenTreePtr tree)
}
// inline
-void CodeGenInterface::genUpdateVarReg(LclVarDsc * varDsc, GenTreePtr tree)
+void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTreePtr tree)
{
assert(tree->OperIsScalarLocal() || (tree->gtOper == GT_COPY));
varDsc->lvRegNum = tree->gtRegNum;
}
-
/*****************************************************************************
*
* Generate code that will set the given register to the integer constant.
*/
-void CodeGen::genSetRegToIcon(regNumber reg,
- ssize_t val,
- var_types type,
- insFlags flags)
+void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
{
// Reg cannot be a FP reg
assert(!genIsValidFloatReg(reg));
@@ -156,14 +149,13 @@ void CodeGen::genSetRegToIcon(regNumber reg,
}
}
-
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
* Otherwise, ECX maybe modified.
*/
-void CodeGen::genEmitGSCookieCheck(bool pushReg)
+void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
NYI("ARM genEmitGSCookieCheck is not yet implemented for protojit");
}
@@ -173,18 +165,18 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
* Generate code for all the basic blocks in the function.
*/
-void CodeGen::genCodeForBBlist()
+void CodeGen::genCodeForBBlist()
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- unsigned savedStkLvl;
+ unsigned savedStkLvl;
-#ifdef DEBUG
- genInterruptibleUsed = true;
- unsigned stmtNum = 0;
- unsigned totalCostEx = 0;
- unsigned totalCostSz = 0;
+#ifdef DEBUG
+ genInterruptibleUsed = true;
+ unsigned stmtNum = 0;
+ unsigned totalCostEx = 0;
+ unsigned totalCostSz = 0;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
@@ -205,7 +197,8 @@ void CodeGen::genCodeForBBlist()
// Prepare the blocks for exception handling codegen: mark the blocks that needs labels.
genPrepForEHCodegen();
- assert(!compiler->fgFirstBBScratch || compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
+ assert(!compiler->fgFirstBBScratch ||
+ compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize the spill tracking logic */
@@ -229,7 +222,7 @@ void CodeGen::genCodeForBBlist()
/* If we have any pinvoke calls, we might potentially trash everything */
if (compiler->info.compCallUnmanaged)
{
- noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
+ noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
}
@@ -242,18 +235,16 @@ void CodeGen::genCodeForBBlist()
/* If any arguments live in registers, mark those regs as such */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter assigned to a register? */
- if (!varDsc->lvIsParam || !varDsc->lvRegister)
+ if (!varDsc->lvIsParam || !varDsc->lvRegister)
continue;
/* Is the argument live on entry to the method? */
- if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
continue;
/* Is this a floating-point argument? */
@@ -280,12 +271,10 @@ void CodeGen::genCodeForBBlist()
*
*/
- BasicBlock * block;
- BasicBlock * lblk; /* previous block */
+ BasicBlock* block;
+ BasicBlock* lblk; /* previous block */
- for (lblk = NULL, block = compiler->fgFirstBB;
- block != NULL;
- lblk = block, block = block->bbNext)
+ for (lblk = NULL, block = compiler->fgFirstBB; block != NULL; lblk = block, block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -311,14 +300,14 @@ void CodeGen::genCodeForBBlist()
// change? We cleared them out above. Maybe we should just not clear them out, but update the ones that change
// here. That would require handling the changes in recordVarLocationsAtStartOfBB().
- regMaskTP newLiveRegSet = RBM_NONE;
+ regMaskTP newLiveRegSet = RBM_NONE;
regMaskTP newRegGCrefSet = RBM_NONE;
regMaskTP newRegByrefSet = RBM_NONE;
VARSET_ITER_INIT(compiler, iter, block->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
if (varDsc->lvIsInReg())
{
@@ -374,7 +363,7 @@ void CodeGen::genCodeForBBlist()
{
assert(block->bbFlags & BBF_JMP_TARGET);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\nEmitting finally target NOP predecessor for BB%02u\n", block->bbNum);
@@ -386,10 +375,9 @@ void CodeGen::genCodeForBBlist()
// block starts an EH region. If we pointed the existing bbEmitCookie here, then the NOP
// would be executed, which we would prefer not to do.
- block->bbUnwindNopEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
-
+ block->bbUnwindNopEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+
instGen(INS_nop);
}
#endif // defined(_TARGET_ARM_)
@@ -404,22 +392,20 @@ void CodeGen::genCodeForBBlist()
}
#endif
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, block->bbNum);
#endif
block->bbEmitCookie = NULL;
- if (block->bbFlags & (BBF_JMP_TARGET|BBF_HAS_LABEL))
+ if (block->bbFlags & (BBF_JMP_TARGET | BBF_HAS_LABEL))
{
/* Mark a label and update the current set of live GC refs */
- block->bbEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- /*isFinally*/block->bbFlags & BBF_FINALLY_TARGET);
+ block->bbEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
+ /*isFinally*/ block->bbFlags & BBF_FINALLY_TARGET);
}
if (block == compiler->fgFirstColdBlock)
@@ -445,13 +431,13 @@ void CodeGen::genCodeForBBlist()
#if !FEATURE_FIXED_OUT_ARGS
/* Check for inserted throw blocks and adjust genStackLevel */
- if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
+ if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
noway_assert(block->bbFlags & BBF_JMP_TARGET);
genStackLevel = compiler->fgThrowHlpBlkStkLevel(block) * sizeof(int);
- if (genStackLevel)
+ if (genStackLevel)
{
NYI("Need emitMarkStackLvl()");
}
@@ -469,9 +455,9 @@ void CodeGen::genCodeForBBlist()
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) && block != compiler->fgFirstBB)
- genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::NO_MAPPING, true);
+ genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::NO_MAPPING, true);
- bool firstMapping = true;
+ bool firstMapping = true;
#endif // DEBUGGING_SUPPORT
/*---------------------------------------------------------------------
@@ -496,7 +482,7 @@ void CodeGen::genCodeForBBlist()
continue;
/* Get hold of the statement tree */
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
#if defined(DEBUGGING_SUPPORT)
@@ -515,13 +501,11 @@ void CodeGen::genCodeForBBlist()
noway_assert(stmt->gtStmt.gtStmtLastILoffs <= compiler->info.compILCodeSize ||
stmt->gtStmt.gtStmtLastILoffs == BAD_IL_OFFSET);
- if (compiler->opts.dspCode && compiler->opts.dspInstrs &&
- stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
+ if (compiler->opts.dspCode && compiler->opts.dspInstrs && stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
{
while (genCurDispOffset <= stmt->gtStmt.gtStmtLastILoffs)
{
- genCurDispOffset +=
- dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
+ genCurDispOffset += dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
}
}
@@ -530,12 +514,13 @@ void CodeGen::genCodeForBBlist()
{
printf("\nGenerating BB%02u, stmt %u\t\t", block->bbNum, stmtNum);
printf("Holding variables: ");
- dspRegMask(regSet.rsMaskVars); printf("\n\n");
+ dspRegMask(regSet.rsMaskVars);
+ printf("\n\n");
compiler->gtDispTree(compiler->opts.compDbgInfo ? stmt : tree);
printf("\n");
}
totalCostEx += (stmt->gtCostEx * block->getBBWeight(compiler));
- totalCostSz += stmt->gtCostSz;
+ totalCostSz += stmt->gtCostSz;
#endif // DEBUG
// Traverse the tree in linear order, generating code for each node in the
@@ -549,10 +534,8 @@ void CodeGen::genCodeForBBlist()
curPossiblyEmbeddedStmt = stmt->AsStmt();
compiler->compCurLifeTree = NULL;
- compiler->compCurStmt = stmt;
- for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList;
- treeNode != NULL;
- treeNode = treeNode->gtNext)
+ compiler->compCurStmt = stmt;
+ for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList; treeNode != NULL; treeNode = treeNode->gtNext)
{
genCodeForTreeNode(treeNode);
@@ -580,7 +563,7 @@ void CodeGen::genCodeForBBlist()
#ifdef DEBUG
/* Make sure we didn't bungle pointer register tracking */
- regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur);
+ regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur);
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.rsMaskVars;
// If return is a GC-type, clear it. Note that if a common
@@ -588,9 +571,8 @@ void CodeGen::genCodeForBBlist()
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
- if (tree->gtOper == GT_RETURN &&
- (varTypeIsGC(compiler->info.compRetType) ||
- (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
+ if (tree->gtOper == GT_RETURN && (varTypeIsGC(compiler->info.compRetType) ||
+ (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
@@ -599,14 +581,13 @@ void CodeGen::genCodeForBBlist()
// harmless "inc" instruction (does not interfere with the exception
// object).
- if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) &&
- (stmt == block->bbTreeList) &&
+ if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) && (stmt == block->bbTreeList) &&
handlerGetsXcptnObj(block->bbCatchTyp))
{
nonVarPtrRegs &= ~RBM_EXCEPTION_OBJECT;
}
- if (nonVarPtrRegs)
+ if (nonVarPtrRegs)
{
printf("Regset after tree=");
Compiler::printTreeID(tree);
@@ -624,7 +605,7 @@ void CodeGen::genCodeForBBlist()
noway_assert(nonVarPtrRegs == 0);
- for (GenTree * node = stmt->gtStmt.gtStmtList; node; node=node->gtNext)
+ for (GenTree* node = stmt->gtStmt.gtStmtList; node; node = node->gtNext)
{
assert(!(node->gtFlags & GTF_SPILL));
}
@@ -639,7 +620,7 @@ void CodeGen::genCodeForBBlist()
} //-------- END-FOR each statement-tree of the current block ---------
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
@@ -660,7 +641,7 @@ void CodeGen::genCodeForBBlist()
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
- //noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
+ // noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
@@ -680,8 +661,8 @@ void CodeGen::genCodeForBBlist()
VARSET_ITER_INIT(compiler, extraLiveVarIter, extraLiveVars, extraLiveVarIndex);
while (extraLiveVarIter.NextElem(compiler, &extraLiveVarIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
- LclVarDsc * varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(!varDsc->lvIsRegCandidate());
}
#endif
@@ -706,46 +687,45 @@ void CodeGen::genCodeForBBlist()
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
// if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions
// generated before the OS epilog starts, such as a GS cookie check.
- if ((block->bbNext == nullptr) ||
- !BasicBlock::sameEHRegion(block, block->bbNext))
+ if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
// We only need the NOP if we're not going to generate any more code as part of the block end.
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- case BBJ_THROW:
- case BBJ_CALLFINALLY:
- case BBJ_EHCATCHRET:
+ case BBJ_ALWAYS:
+ case BBJ_THROW:
+ case BBJ_CALLFINALLY:
+ case BBJ_EHCATCHRET:
// We're going to generate more code below anyway, so no need for the NOP.
- case BBJ_RETURN:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- // These are the "epilog follows" case, handled in the emitter.
-
- break;
-
- case BBJ_NONE:
- if (block->bbNext == nullptr)
- {
- // Call immediately before the end of the code; we should never get here .
- instGen(INS_BREAKPOINT); // This should never get executed
- }
- else
- {
- // We need the NOP
- instGen(INS_nop);
- }
- break;
-
- case BBJ_COND:
- case BBJ_SWITCH:
+ case BBJ_RETURN:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ // These are the "epilog follows" case, handled in the emitter.
+
+ break;
+
+ case BBJ_NONE:
+ if (block->bbNext == nullptr)
+ {
+ // Call immediately before the end of the code; we should never get here .
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
+ else
+ {
+ // We need the NOP
+ instGen(INS_nop);
+ }
+ break;
+
+ case BBJ_COND:
+ case BBJ_SWITCH:
// These can't have a call as the last instruction!
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
}
@@ -755,59 +735,59 @@ void CodeGen::genCodeForBBlist()
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- inst_JMP(EJ_jmp, block->bbJumpDest);
- break;
+ case BBJ_ALWAYS:
+ inst_JMP(EJ_jmp, block->bbJumpDest);
+ break;
- case BBJ_RETURN:
- genExitCode(block);
- break;
+ case BBJ_RETURN:
+ genExitCode(block);
+ break;
- case BBJ_THROW:
- // If we have a throw at the end of a function or funclet, we need to emit another instruction
- // afterwards to help the OS unwinder determine the correct context during unwind.
- // We insert an unexecuted breakpoint instruction in several situations
- // following a throw instruction:
- // 1. If the throw is the last instruction of the function or funclet. This helps
- // the OS unwinder determine the correct context during an unwind from the
- // thrown exception.
- // 2. If this is this is the last block of the hot section.
- // 3. If the subsequent block is a special throw block.
- // 4. On AMD64, if the next block is in a different EH region.
- if ((block->bbNext == NULL)
+ case BBJ_THROW:
+ // If we have a throw at the end of a function or funclet, we need to emit another instruction
+ // afterwards to help the OS unwinder determine the correct context during unwind.
+ // We insert an unexecuted breakpoint instruction in several situations
+ // following a throw instruction:
+ // 1. If the throw is the last instruction of the function or funclet. This helps
+ // the OS unwinder determine the correct context during an unwind from the
+ // thrown exception.
+ // 2. If this is this is the last block of the hot section.
+ // 3. If the subsequent block is a special throw block.
+ // 4. On AMD64, if the next block is in a different EH region.
+ if ((block->bbNext == NULL)
#if FEATURE_EH_FUNCLETS
- || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
+ || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
#endif // FEATURE_EH_FUNCLETS
#ifdef _TARGET_AMD64_
- || !BasicBlock::sameEHRegion(block, block->bbNext)
+ || !BasicBlock::sameEHRegion(block, block->bbNext)
#endif // _TARGET_AMD64_
- || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext))
- || block->bbNext == compiler->fgFirstColdBlock
- )
- {
- instGen(INS_BREAKPOINT); // This should never get executed
- }
+ || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
+ block->bbNext == compiler->fgFirstColdBlock)
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
- break;
+ break;
- case BBJ_CALLFINALLY:
+ case BBJ_CALLFINALLY:
- // Now set REG_LR to the address of where the finally funclet should
- // return to directly.
+ // Now set REG_LR to the address of where the finally funclet should
+ // return to directly.
- BasicBlock * bbFinallyRet; bbFinallyRet = NULL;
+ BasicBlock* bbFinallyRet;
+ bbFinallyRet = NULL;
- // We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
- // we would have otherwise created retless calls.
- assert(block->isBBCallAlwaysPair());
+ // We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
+ // we would have otherwise created retless calls.
+ assert(block->isBBCallAlwaysPair());
- assert(block->bbNext != NULL);
- assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
- assert(block->bbNext->bbJumpDest != NULL);
- assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
+ assert(block->bbNext != NULL);
+ assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
+ assert(block->bbNext->bbJumpDest != NULL);
+ assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
- bbFinallyRet = block->bbNext->bbJumpDest;
- bbFinallyRet->bbFlags |= BBF_JMP_TARGET;
+ bbFinallyRet = block->bbNext->bbJumpDest;
+ bbFinallyRet->bbFlags |= BBF_JMP_TARGET;
#if 0
// TODO-ARM-CQ:
@@ -818,85 +798,73 @@ void CodeGen::genCodeForBBlist()
EA_4BYTE,
bbFinallyRet,
REG_LR);
-#else // !0
- // Load the address where the finally funclet should return into LR.
- // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
- // the return.
- getEmitter()->emitIns_R_L (INS_movw,
- EA_4BYTE_DSP_RELOC,
- bbFinallyRet,
- REG_LR);
- getEmitter()->emitIns_R_L (INS_movt,
- EA_4BYTE_DSP_RELOC,
- bbFinallyRet,
- REG_LR);
+#else // !0
+ // Load the address where the finally funclet should return into LR.
+ // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
+ // the return.
+ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
#endif // !0
- // Jump to the finally BB
- inst_JMP(EJ_jmp, block->bbJumpDest);
+ // Jump to the finally BB
+ inst_JMP(EJ_jmp, block->bbJumpDest);
- // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
- // jump target using bbJumpDest - that is already used to point
- // to the finally block. So just skip past the BBJ_ALWAYS unless the
- // block is RETLESS.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
+ // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
+ // jump target using bbJumpDest - that is already used to point
+ // to the finally block. So just skip past the BBJ_ALWAYS unless the
+ // block is RETLESS.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- lblk = block;
- block = block->bbNext;
- }
- break;
+ lblk = block;
+ block = block->bbNext;
+ }
+ break;
#ifdef _TARGET_ARM_
- case BBJ_EHCATCHRET:
- // set r0 to the address the VM should return to after the catch
- getEmitter()->emitIns_R_L (INS_movw,
- EA_4BYTE_DSP_RELOC,
- block->bbJumpDest,
- REG_R0);
- getEmitter()->emitIns_R_L (INS_movt,
- EA_4BYTE_DSP_RELOC,
- block->bbJumpDest,
- REG_R0);
+ case BBJ_EHCATCHRET:
+ // set r0 to the address the VM should return to after the catch
+ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
+ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
- __fallthrough;
+ __fallthrough;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- genReserveFuncletEpilog(block);
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ genReserveFuncletEpilog(block);
+ break;
#elif defined(_TARGET_AMD64_)
- case BBJ_EHCATCHRET:
- // Set EAX to the address the VM should return to after the catch.
- // Generate a RIP-relative
- // lea reg, [rip + disp32] ; the RIP is implicit
- // which will be position-indepenent.
- // TODO-ARM-Bug?: For ngen, we need to generate a reloc for the displacement (maybe EA_PTR_DSP_RELOC).
- getEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
- __fallthrough;
-
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- genReserveFuncletEpilog(block);
- break;
+ case BBJ_EHCATCHRET:
+ // Set EAX to the address the VM should return to after the catch.
+ // Generate a RIP-relative
+ // lea reg, [rip + disp32] ; the RIP is implicit
+ // which will be position-indepenent.
+ // TODO-ARM-Bug?: For ngen, we need to generate a reloc for the displacement (maybe EA_PTR_DSP_RELOC).
+ getEmitter()->emitIns_R_L(INS_lea, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
+ __fallthrough;
+
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ genReserveFuncletEpilog(block);
+ break;
#endif // _TARGET_AMD64_
- case BBJ_NONE:
- case BBJ_COND:
- case BBJ_SWITCH:
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
-#ifdef DEBUG
+#ifdef DEBUG
compiler->compCurBB = 0;
#endif
@@ -913,12 +881,11 @@ void CodeGen::genCodeForBBlist()
compiler->tmpEnd();
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
- printf("totalCostEx = %6d, totalCostSz = %5d ",
- totalCostEx, totalCostSz);
+ printf("totalCostEx = %6d, totalCostSz = %5d ", totalCostEx, totalCostSz);
printf("%s\n", compiler->info.compFullName);
}
#endif
@@ -926,8 +893,7 @@ void CodeGen::genCodeForBBlist()
// return the child that has the same reg as the dst (if any)
// other child returned (out param) in 'other'
-GenTree *
-sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
+GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/)
{
if (tree->gtRegNum == REG_NA)
{
@@ -956,17 +922,14 @@ sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
// move an immediate value into an integer register
-void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
- regNumber reg,
- ssize_t imm,
- insFlags flags)
+void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
- size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
+ size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if ((imm == 0) && !EA_IS_RELOC(size))
@@ -995,16 +958,16 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
-void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
+void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
{
switch (tree->gtOper)
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- ssize_t cnsVal = con->IconValue();
+ GenTreeIntConCommon* con = tree->AsIntConCommon();
+ ssize_t cnsVal = con->IconValue();
bool needReloc = compiler->opts.compReloc && tree->IsIconHandle();
if (needReloc)
@@ -1019,14 +982,14 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
}
break;
- case GT_CNS_DBL:
+ case GT_CNS_DBL:
{
NYI("GT_CNS_DBL");
}
break;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -1036,12 +999,11 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
* Preconditions: All operands have been evaluated
*
*/
-void
-CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
+void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
{
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
JITDUMP("Generating: ");
DISPNODE(treeNode);
@@ -1055,39 +1017,38 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (treeNode->gtOper)
{
- case GT_CNS_INT:
- case GT_CNS_DBL:
- genSetRegToConst(targetReg, targetType, treeNode);
- genProduceReg(treeNode);
- break;
+ case GT_CNS_INT:
+ case GT_CNS_DBL:
+ genSetRegToConst(targetReg, targetType, treeNode);
+ genProduceReg(treeNode);
+ break;
- case GT_NEG:
- case GT_NOT:
+ case GT_NEG:
+ case GT_NOT:
{
NYI("GT_NEG and GT_NOT");
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- assert(varTypeIsIntegralOrI(treeNode));
- __fallthrough;
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ assert(varTypeIsIntegralOrI(treeNode));
+ __fallthrough;
- case GT_ADD:
- case GT_SUB:
+ case GT_ADD:
+ case GT_SUB:
{
const genTreeOps oper = treeNode->OperGet();
- if ((oper == GT_ADD || oper == GT_SUB) &&
- treeNode->gtOverflow())
+ if ((oper == GT_ADD || oper == GT_SUB) && treeNode->gtOverflow())
{
// This is also checked in the importer.
NYI("Overflow not yet implemented");
}
- GenTreePtr op1 = treeNode->gtGetOp1();
- GenTreePtr op2 = treeNode->gtGetOp2();
+ GenTreePtr op1 = treeNode->gtGetOp1();
+ GenTreePtr op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
@@ -1137,50 +1098,50 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
regNumber r = emit->emitInsBinary(ins, emitTypeSize(treeNode), dst, src);
noway_assert(r == targetReg);
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- genCodeForShift(treeNode);
- // genCodeForShift() calls genProduceReg()
- break;
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ genCodeForShift(treeNode);
+ // genCodeForShift() calls genProduceReg()
+ break;
- case GT_CAST:
- // Cast is never contained (?)
- noway_assert(targetReg != REG_NA);
+ case GT_CAST:
+ // Cast is never contained (?)
+ noway_assert(targetReg != REG_NA);
- // Overflow conversions from float/double --> int types go through helper calls.
- if (treeNode->gtOverflow() && !varTypeIsFloating(treeNode->gtOp.gtOp1))
- NYI("Unimplmented GT_CAST:int <--> int with overflow");
+ // Overflow conversions from float/double --> int types go through helper calls.
+ if (treeNode->gtOverflow() && !varTypeIsFloating(treeNode->gtOp.gtOp1))
+ NYI("Unimplmented GT_CAST:int <--> int with overflow");
- if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double <--> double/float
- genFloatToFloatCast(treeNode);
- }
- else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double --> int32/int64
- genFloatToIntCast(treeNode);
- }
- else if (varTypeIsFloating(targetType))
- {
- // Casts int32/uint32/int64/uint64 --> float/double
- genIntToFloatCast(treeNode);
- }
- else
- {
- // Casts int <--> int
- genIntToIntCast(treeNode);
- }
- // The per-case functions call genProduceReg()
- break;
+ if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double <--> double/float
+ genFloatToFloatCast(treeNode);
+ }
+ else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double --> int32/int64
+ genFloatToIntCast(treeNode);
+ }
+ else if (varTypeIsFloating(targetType))
+ {
+ // Casts int32/uint32/int64/uint64 --> float/double
+ genIntToFloatCast(treeNode);
+ }
+ else
+ {
+ // Casts int <--> int
+ genIntToIntCast(treeNode);
+ }
+ // The per-case functions call genProduceReg()
+ break;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
{
- GenTreeLclVarCommon *lcl = treeNode->AsLclVarCommon();
+ GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
// lcl_vars are not defs
assert((treeNode->gtFlags & GTF_VAR_DEF) == 0);
@@ -1197,14 +1158,15 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
if (!treeNode->InReg() && !(treeNode->gtFlags & GTF_SPILLED))
{
assert(!isRegCandidate);
- emit->emitIns_R_S(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode->gtRegNum, lcl->gtLclNum, 0);
+ emit->emitIns_R_S(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode->gtRegNum,
+ lcl->gtLclNum, 0);
genProduceReg(treeNode);
}
}
break;
- case GT_LCL_FLD_ADDR:
- case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD_ADDR:
+ case GT_LCL_VAR_ADDR:
{
// Address of a local var. This by itself should never be allocated a register.
// If it is worth storing the address in a register then it should be cse'ed into
@@ -1214,25 +1176,25 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
inst_RV_TT(INS_lea, targetReg, treeNode, 0, EA_BYREF);
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
NYI_IF(treeNode->gtRegNum == REG_NA, "GT_LCL_FLD: load local field not into a register is not supported");
- emitAttr size = emitTypeSize(targetType);
- unsigned offs = treeNode->gtLclFld.gtLclOffs;
+ emitAttr size = emitTypeSize(targetType);
+ unsigned offs = treeNode->gtLclFld.gtLclOffs;
unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
assert(varNum < compiler->lvaCount);
emit->emitIns_R_S(ins_Move_Extend(targetType, treeNode->InReg()), size, targetReg, varNum, offs);
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_FLD:
{
NYI_IF(targetType == TYP_STRUCT, "GT_STORE_LCL_FLD: struct store local field not supported");
noway_assert(!treeNode->InReg());
@@ -1243,7 +1205,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_VAR:
{
NYI_IF(targetType == TYP_STRUCT, "struct store local not supported");
@@ -1276,19 +1238,19 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_RETFILT:
- // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
- // the return register, if it's not already there. The processing is the same as GT_RETURN.
- if (targetType != TYP_VOID)
- {
- // For filters, the IL spec says the result is type int32. Further, the only specified legal values
- // are 0 or 1, with the use of other values "undefined".
- assert(targetType == TYP_INT);
- }
+ case GT_RETFILT:
+ // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
+ // the return register, if it's not already there. The processing is the same as GT_RETURN.
+ if (targetType != TYP_VOID)
+ {
+ // For filters, the IL spec says the result is type int32. Further, the only specified legal values
+ // are 0 or 1, with the use of other values "undefined".
+ assert(targetType == TYP_INT);
+ }
- __fallthrough;
+ __fallthrough;
- case GT_RETURN:
+ case GT_RETURN:
{
GenTreePtr op1 = treeNode->gtOp.gtOp1;
if (targetType == TYP_VOID)
@@ -1310,71 +1272,71 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_LEA:
+ case GT_LEA:
{
// if we are here, it is the case where there is an LEA that cannot
// be folded into a parent instruction
- GenTreeAddrMode *lea = treeNode->AsAddrMode();
+ GenTreeAddrMode* lea = treeNode->AsAddrMode();
genLeaInstruction(lea);
}
// genLeaInstruction calls genProduceReg()
break;
- case GT_IND:
- emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
- genProduceReg(treeNode);
- break;
+ case GT_IND:
+ emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
+ genProduceReg(treeNode);
+ break;
- case GT_MUL:
+ case GT_MUL:
{
NYI("GT_MUL");
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
- // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
- // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
- // on float/double args.
- noway_assert(!varTypeIsFloating(treeNode));
- __fallthrough;
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
+ // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
+ // on float/double args.
+ noway_assert(!varTypeIsFloating(treeNode));
+ __fallthrough;
- case GT_DIV:
+ case GT_DIV:
{
NYI("GT_DIV");
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_INTRINSIC:
+ case GT_INTRINSIC:
{
NYI("GT_INTRINSIC");
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
{
// TODO-ARM-CQ: Check if we can use the currently set flags.
// TODO-ARM-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
- GenTreeOp *tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1->gtEffectiveVal();
- GenTreePtr op2 = tree->gtOp2->gtEffectiveVal();
+ GenTreeOp* tree = treeNode->AsOp();
+ GenTreePtr op1 = tree->gtOp1->gtEffectiveVal();
+ GenTreePtr op2 = tree->gtOp2->gtEffectiveVal();
genConsumeIfReg(op1);
genConsumeIfReg(op2);
instruction ins = INS_cmp;
- emitAttr cmpAttr;
+ emitAttr cmpAttr;
if (varTypeIsFloating(op1))
{
NYI("Floating point compare");
@@ -1382,20 +1344,20 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
bool isUnordered = ((treeNode->gtFlags & GTF_RELOP_NAN_UN) != 0);
switch (tree->OperGet())
{
- case GT_EQ:
- ins = INS_beq;
- case GT_NE:
- ins = INS_bne;
- case GT_LT:
- ins = isUnordered ? INS_blt : INS_blo;
- case GT_LE:
- ins = isUnordered ? INS_ble : INS_bls;
- case GT_GE:
- ins = isUnordered ? INS_bpl : INS_bge;
- case GT_GT:
- ins = isUnordered ? INS_bhi : INS_bgt;
- default:
- unreached();
+ case GT_EQ:
+ ins = INS_beq;
+ case GT_NE:
+ ins = INS_bne;
+ case GT_LT:
+ ins = isUnordered ? INS_blt : INS_blo;
+ case GT_LE:
+ ins = isUnordered ? INS_ble : INS_bls;
+ case GT_GE:
+ ins = isUnordered ? INS_bpl : INS_bge;
+ case GT_GT:
+ ins = isUnordered ? INS_bhi : INS_bgt;
+ default:
+ unreached();
}
}
else
@@ -1410,9 +1372,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
else
{
- var_types cmpType = TYP_INT;
- bool op1Is64Bit = (varTypeIsLong(op1Type) || op1Type == TYP_REF);
- bool op2Is64Bit = (varTypeIsLong(op2Type) || op2Type == TYP_REF);
+ var_types cmpType = TYP_INT;
+ bool op1Is64Bit = (varTypeIsLong(op1Type) || op1Type == TYP_REF);
+ bool op2Is64Bit = (varTypeIsLong(op2Type) || op2Type == TYP_REF);
NYI_IF(op1Is64Bit || op2Is64Bit, "Long compare");
assert(!op1->isContainedMemoryOp() || op1Type == op2Type);
assert(!op2->isContainedMemoryOp() || op1Type == op2Type);
@@ -1430,9 +1392,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_JTRUE:
+ case GT_JTRUE:
{
- GenTree *cmp = treeNode->gtOp.gtOp1->gtEffectiveVal();
+ GenTree* cmp = treeNode->gtOp.gtOp1->gtEffectiveVal();
assert(cmp->OperIsCompare());
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
@@ -1442,18 +1404,18 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
CompareKind compareKind = ((cmp->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
emitJumpKind jmpKind = genJumpKindForOper(cmp->gtOper, compareKind);
- BasicBlock * jmpTarget = compiler->compCurBB->bbJumpDest;
+ BasicBlock* jmpTarget = compiler->compCurBB->bbJumpDest;
inst_JMP(jmpKind, jmpTarget);
}
break;
- case GT_RETURNTRAP:
+ case GT_RETURNTRAP:
{
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
- GenTree *data = treeNode->gtOp.gtOp1->gtEffectiveVal();
+ GenTree* data = treeNode->gtOp.gtOp1->gtEffectiveVal();
genConsumeIfReg(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
emit->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
@@ -1469,18 +1431,19 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
NYI("GT_STOREIND");
}
break;
- case GT_COPY:
+ case GT_COPY:
{
assert(treeNode->gtOp.gtOp1->IsLocal());
- GenTreeLclVarCommon* lcl = treeNode->gtOp.gtOp1->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
- inst_RV_RV(ins_Move_Extend(targetType, true), targetReg, genConsumeReg(treeNode->gtOp.gtOp1), targetType, emitTypeSize(targetType));
+ GenTreeLclVarCommon* lcl = treeNode->gtOp.gtOp1->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
+ inst_RV_RV(ins_Move_Extend(targetType, true), targetReg, genConsumeReg(treeNode->gtOp.gtOp1), targetType,
+ emitTypeSize(targetType));
// The old location is dying
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(treeNode->gtOp.gtOp1));
@@ -1492,15 +1455,15 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// The new location is going live
genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(treeNode));
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_LIST:
- case GT_ARGPLACE:
- // Nothing to do
- break;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ // Nothing to do
+ break;
- case GT_PUTARG_STK:
+ case GT_PUTARG_STK:
{
NYI_IF(targetType == TYP_STRUCT, "GT_PUTARG_STK: struct support not implemented");
@@ -1518,23 +1481,23 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
if (data->isContained())
{
emit->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), compiler->lvaOutgoingArgSpaceVar,
- argOffset,
- (int) data->AsIntConCommon()->IconValue());
+ argOffset, (int)data->AsIntConCommon()->IconValue());
}
else
{
genConsumeReg(data);
- emit->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, compiler->lvaOutgoingArgSpaceVar, argOffset);
+ emit->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum,
+ compiler->lvaOutgoingArgSpaceVar, argOffset);
}
}
break;
- case GT_PUTARG_REG:
+ case GT_PUTARG_REG:
{
NYI_IF(targetType == TYP_STRUCT, "GT_PUTARG_REG: struct support not implemented");
// commas show up here commonly, as part of a nullchk operation
- GenTree *op1 = treeNode->gtOp.gtOp1->gtEffectiveVal();
+ GenTree* op1 = treeNode->gtOp.gtOp1->gtEffectiveVal();
// If child node is not already in the register we need, move it
genConsumeReg(op1);
if (treeNode->gtRegNum != op1->gtRegNum)
@@ -1542,56 +1505,56 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
inst_RV_RV(ins_Move_Extend(targetType, true), treeNode->gtRegNum, op1->gtRegNum, targetType);
}
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_CALL:
- genCallInstruction(treeNode);
- break;
+ case GT_CALL:
+ genCallInstruction(treeNode);
+ break;
- case GT_LOCKADD:
- case GT_XCHG:
- case GT_XADD:
- genLockedInstructions(treeNode);
- break;
+ case GT_LOCKADD:
+ case GT_XCHG:
+ case GT_XADD:
+ genLockedInstructions(treeNode);
+ break;
- case GT_CMPXCHG:
+ case GT_CMPXCHG:
{
NYI("GT_CMPXCHG");
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_RELOAD:
- // do nothing - reload is just a marker.
- // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
- // into the register specified in this node.
- break;
+ case GT_RELOAD:
+ // do nothing - reload is just a marker.
+ // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
+ // into the register specified in this node.
+ break;
- case GT_NOP:
- break;
+ case GT_NOP:
+ break;
- case GT_NO_OP:
- NYI("GT_NO_OP");
- break;
+ case GT_NO_OP:
+ NYI("GT_NO_OP");
+ break;
- case GT_ARR_BOUNDS_CHECK:
- genRangeCheck(treeNode);
- break;
+ case GT_ARR_BOUNDS_CHECK:
+ genRangeCheck(treeNode);
+ break;
- case GT_PHYSREG:
- if (treeNode->gtRegNum != treeNode->AsPhysReg()->gtSrcReg)
- {
- inst_RV_RV(INS_mov, treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg, targetType);
+ case GT_PHYSREG:
+ if (treeNode->gtRegNum != treeNode->AsPhysReg()->gtSrcReg)
+ {
+ inst_RV_RV(INS_mov, treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg, targetType);
- genTransferRegGCState(treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg);
- }
- break;
+ genTransferRegGCState(treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg);
+ }
+ break;
- case GT_PHYSREGDST:
- break;
+ case GT_PHYSREGDST:
+ break;
- case GT_NULLCHECK:
+ case GT_NULLCHECK:
{
assert(!treeNode->gtOp.gtOp1->isContained());
regNumber reg = genConsumeReg(treeNode->gtOp.gtOp1);
@@ -1599,33 +1562,33 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_CATCH_ARG:
+ case GT_CATCH_ARG:
- noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
+ noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
- /* Catch arguments get passed in a register. genCodeForBBlist()
- would have marked it as holding a GC object, but not used. */
+ /* Catch arguments get passed in a register. genCodeForBBlist()
+ would have marked it as holding a GC object, but not used. */
- noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
- genConsumeReg(treeNode);
- break;
+ noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
+ genConsumeReg(treeNode);
+ break;
- case GT_PINVOKE_PROLOG:
- noway_assert(((gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
+ case GT_PINVOKE_PROLOG:
+ noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
- // the runtime side requires the codegen here to be consistent
- emit->emitDisableRandomNops();
- break;
+ // the runtime side requires the codegen here to be consistent
+ emit->emitDisableRandomNops();
+ break;
- case GT_LABEL:
- genPendingCallLabel = genCreateTempLabel();
- treeNode->gtLabel.gtLabBB = genPendingCallLabel;
- emit->emitIns_R_L(INS_lea, EA_PTRSIZE, genPendingCallLabel, treeNode->gtRegNum);
- break;
+ case GT_LABEL:
+ genPendingCallLabel = genCreateTempLabel();
+ treeNode->gtLabel.gtLabBB = genPendingCallLabel;
+ emit->emitIns_R_L(INS_lea, EA_PTRSIZE, genPendingCallLabel, treeNode->gtRegNum);
+ break;
- default:
+ default:
{
-#ifdef DEBUG
+#ifdef DEBUG
char message[256];
sprintf(message, "NYI: Unimplemented node type %s\n", GenTree::NodeName(treeNode->OperGet()));
notYetImplemented(message, __FILE__, __LINE__);
@@ -1639,41 +1602,38 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// generate code for the locked operations:
// GT_LOCKADD, GT_XCHG, GT_XADD
-void
-CodeGen::genLockedInstructions(GenTree* treeNode)
+void CodeGen::genLockedInstructions(GenTree* treeNode)
{
NYI("genLockedInstructions");
}
-
// generate code for GT_ARR_BOUNDS_CHECK node
-void
-CodeGen::genRangeCheck(GenTreePtr oper)
+void CodeGen::genRangeCheck(GenTreePtr oper)
{
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
- GenTreePtr arrLen = bndsChk->gtArrLen->gtEffectiveVal();
- GenTreePtr arrIdx = bndsChk->gtIndex->gtEffectiveVal();
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
+ GenTreePtr arrLen = bndsChk->gtArrLen->gtEffectiveVal();
+ GenTreePtr arrIdx = bndsChk->gtIndex->gtEffectiveVal();
+ GenTreePtr arrRef = NULL;
+ int lenOffset = 0;
- GenTree *src1, *src2;
+ GenTree * src1, *src2;
emitJumpKind jmpKind;
if (arrIdx->isContainedIntOrIImmed())
{
- // To encode using a cmp immediate, we place the
+ // To encode using a cmp immediate, we place the
// constant operand in the second position
- src1 = arrLen;
- src2 = arrIdx;
- jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
+ src1 = arrLen;
+ src2 = arrIdx;
+ jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
}
else
{
- src1 = arrIdx;
- src2 = arrLen;
- jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
+ src1 = arrIdx;
+ src2 = arrLen;
+ jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
}
genConsumeIfReg(src1);
@@ -1681,19 +1641,18 @@ CodeGen::genRangeCheck(GenTreePtr oper)
getEmitter()->emitInsBinary(INS_cmp, emitAttr(TYP_INT), src1, src2);
genJumpToThrowHlpBlk(jmpKind, SCK_RNGCHK_FAIL, bndsChk->gtIndRngFailBB);
-
}
// make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen
//
-GenTreeIndir CodeGen::indirForm(var_types type, GenTree *base)
+GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
GenTreeIndir i(GT_IND, type, base, nullptr);
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
@@ -1706,11 +1665,10 @@ GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
-
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
@@ -1720,19 +1678,42 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
switch (oper)
{
- case GT_ADD: ins = INS_add; break;
- case GT_AND: ins = INS_AND; break;
- case GT_MUL: ins = INS_MUL; break;
- case GT_LSH: ins = INS_SHIFT_LEFT_LOGICAL; break;
- case GT_NEG: ins = INS_rsb; break;
- case GT_NOT: ins = INS_NOT; break;
- case GT_OR: ins = INS_OR; break;
- case GT_RSH: ins = INS_SHIFT_RIGHT_ARITHM; break;
- case GT_RSZ: ins = INS_SHIFT_RIGHT_LOGICAL; break;
- case GT_SUB: ins = INS_sub; break;
- case GT_XOR: ins = INS_XOR; break;
- default: unreached();
- break;
+ case GT_ADD:
+ ins = INS_add;
+ break;
+ case GT_AND:
+ ins = INS_AND;
+ break;
+ case GT_MUL:
+ ins = INS_MUL;
+ break;
+ case GT_LSH:
+ ins = INS_SHIFT_LEFT_LOGICAL;
+ break;
+ case GT_NEG:
+ ins = INS_rsb;
+ break;
+ case GT_NOT:
+ ins = INS_NOT;
+ break;
+ case GT_OR:
+ ins = INS_OR;
+ break;
+ case GT_RSH:
+ ins = INS_SHIFT_RIGHT_ARITHM;
+ break;
+ case GT_RSZ:
+ ins = INS_SHIFT_RIGHT_LOGICAL;
+ break;
+ case GT_SUB:
+ ins = INS_sub;
+ break;
+ case GT_XOR:
+ ins = INS_XOR;
+ break;
+ default:
+ unreached();
+ break;
}
return ins;
}
@@ -1752,7 +1733,7 @@ void CodeGen::genCodeForShift(GenTreePtr tree)
NYI("genCodeForShift");
}
-void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
+void CodeGen::genUnspillRegIfNeeded(GenTree* tree)
{
regNumber dstReg = tree->gtRegNum;
@@ -1773,8 +1754,8 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
unspillTree->SetInReg();
- GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
+ GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
// TODO-Review: We would like to call:
// genUpdateRegLife(varDsc, /*isBorn*/ true, /*isDying*/ false DEBUGARG(tree));
@@ -1797,7 +1778,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", lcl->gtLclNum);
@@ -1814,11 +1795,8 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
{
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->gtRegNum);
compiler->tmpRlsTemp(t);
- getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType),
- emitActualTypeSize(unspillTree->gtType),
- dstReg,
- t->tdTempNum(),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitActualTypeSize(unspillTree->gtType), dstReg,
+ t->tdTempNum(), 0);
unspillTree->SetInReg();
}
@@ -1827,9 +1805,8 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
}
}
-
// do liveness update for a subnode that is being consumed by codegen
-regNumber CodeGen::genConsumeReg(GenTree *tree)
+regNumber CodeGen::genConsumeReg(GenTree* tree)
{
genUnspillRegIfNeeded(tree);
@@ -1844,8 +1821,8 @@ regNumber CodeGen::genConsumeReg(GenTree *tree)
if (genIsRegCandidateLocal(tree))
{
- GenTreeLclVarCommon *lcl = tree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
if (varDsc->lvRegNum == tree->gtRegNum && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
@@ -1879,7 +1856,7 @@ void CodeGen::genConsumeAddress(GenTree* addr)
}
// do liveness update for a subnode that is being consumed by codegen
-void CodeGen::genConsumeAddrMode(GenTreeAddrMode *addr)
+void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr)
{
if (addr->Base())
genConsumeReg(addr->Base());
@@ -1887,9 +1864,8 @@ void CodeGen::genConsumeAddrMode(GenTreeAddrMode *addr)
genConsumeReg(addr->Index());
}
-
// do liveness update for register produced by the current node in codegen
-void CodeGen::genProduceReg(GenTree *tree)
+void CodeGen::genProduceReg(GenTree* tree)
{
if (tree->gtFlags & GTF_SPILL)
{
@@ -1914,8 +1890,7 @@ void CodeGen::genProduceReg(GenTree *tree)
// If we've produced a register, mark it as a pointer, as needed.
// Except in the case of a dead definition of a lclVar.
- if (tree->gtHasReg() &&
- (!tree->IsLocal() || (tree->gtFlags & GTF_VAR_DEATH) == 0))
+ if (tree->gtHasReg() && (!tree->IsLocal() || (tree->gtFlags & GTF_VAR_DEATH) == 0))
{
gcInfo.gcMarkRegPtrVal(tree->gtRegNum, tree->TypeGet());
}
@@ -1925,21 +1900,21 @@ void CodeGen::genProduceReg(GenTree *tree)
// transfer gc/byref status of src reg to dst reg
void CodeGen::genTransferRegGCState(regNumber dst, regNumber src)
{
- regMaskTP srcMask = genRegMask(src);
- regMaskTP dstMask = genRegMask(dst);
-
- if (gcInfo.gcRegGCrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetGCref(dstMask);
- }
- else if (gcInfo.gcRegByrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetByref(dstMask);
- }
- else
- {
- gcInfo.gcMarkRegSetNpt(dstMask);
- }
+ regMaskTP srcMask = genRegMask(src);
+ regMaskTP dstMask = genRegMask(dst);
+
+ if (gcInfo.gcRegGCrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetGCref(dstMask);
+ }
+ else if (gcInfo.gcRegByrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetByref(dstMask);
+ }
+ else
+ {
+ gcInfo.gcMarkRegSetNpt(dstMask);
+ }
}
// Produce code for a GT_CALL node
@@ -1949,17 +1924,17 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
// produce code for a GT_LEA subnode
-void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
+void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
if (lea->Base() && lea->Index())
{
- regNumber baseReg = genConsumeReg(lea->Base());
+ regNumber baseReg = genConsumeReg(lea->Base());
regNumber indexReg = genConsumeReg(lea->Index());
- getEmitter()->emitIns_R_ARX (INS_lea, EA_BYREF, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
+ getEmitter()->emitIns_R_ARX(INS_lea, EA_BYREF, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
}
else if (lea->Base())
{
- getEmitter()->emitIns_R_AR (INS_lea, EA_BYREF, lea->gtRegNum, genConsumeReg(lea->Base()), lea->gtOffset);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_BYREF, lea->gtRegNum, genConsumeReg(lea->Base()), lea->gtOffset);
}
genProduceReg(lea);
@@ -1987,8 +1962,7 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
// For a signed convert from byte, the source must be in a byte-addressable register.
// Neither the source nor target type can be a floating point type.
//
-void
-CodeGen::genIntToIntCast(GenTreePtr treeNode)
+void CodeGen::genIntToIntCast(GenTreePtr treeNode)
{
NYI("Cast");
}
@@ -2007,8 +1981,7 @@ CodeGen::genIntToIntCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// The cast is between float and double.
//
-void
-CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
+void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
{
NYI("Cast");
}
@@ -2027,8 +2000,7 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
-void
-CodeGen::genIntToFloatCast(GenTreePtr treeNode)
+void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
{
NYI("Cast");
}
@@ -2047,8 +2019,7 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
-void
-CodeGen::genFloatToIntCast(GenTreePtr treeNode)
+void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
{
NYI("Cast");
}
@@ -2071,12 +2042,14 @@ CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigne
#endif
}
-// TODO-ARM-Cleanup: It seems that the ARM JIT (classic and otherwise) uses this method, so it seems to be inappropriately named?
+// TODO-ARM-Cleanup: It seems that the ARM JIT (classic and otherwise) uses this method, so it seems to be
+// inappropriately named?
-void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
+void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC) GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
+ IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
+ GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
+ GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
@@ -2091,23 +2064,24 @@ void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsig
gcInfoEncoder->Build();
- //GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- //let's save the values anyway for debugging purposes
+ // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
+ // let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; //not exposed by the GCEncoder interface
+ compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
/*****************************************************************************
* Emit a call to a helper function.
*/
-void CodeGen::genEmitHelperCall(unsigned helper,
- int argSize,
- emitAttr retSize
+void CodeGen::genEmitHelperCall(unsigned helper,
+ int argSize,
+ emitAttr retSize
#ifndef LEGACY_BACKEND
- ,regNumber callTargetReg /*= REG_NA */
-#endif // !LEGACY_BACKEND
- )
+ ,
+ regNumber callTargetReg /*= REG_NA */
+#endif // !LEGACY_BACKEND
+ )
{
NYI("Helper call");
}
@@ -2120,13 +2094,13 @@ void CodeGen::genEmitHelperCall(unsigned helper,
* Called for every scope info piece to record by the main genSetScopeInfo()
*/
-void CodeGen::genSetScopeInfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- bool avail,
- Compiler::siVarLoc& varLoc)
+void CodeGen::genSetScopeInfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ bool avail,
+ Compiler::siVarLoc& varLoc)
{
/* We need to do some mapping while reporting back these variables */
@@ -2147,15 +2121,15 @@ void CodeGen::genSetScopeInfo (unsigned which,
// Hang on to this compiler->info.
- TrnslLocalVarInfo &tlvi = genTrnslLocalVarInfo[which];
+ TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
- tlvi.tlviVarNum = ilVarNum;
- tlvi.tlviLVnum = LVnum;
- tlvi.tlviName = name;
- tlvi.tlviStartPC = startOffs;
- tlvi.tlviLength = length;
- tlvi.tlviAvailable = avail;
- tlvi.tlviVarLoc = varLoc;
+ tlvi.tlviVarNum = ilVarNum;
+ tlvi.tlviLVnum = LVnum;
+ tlvi.tlviName = name;
+ tlvi.tlviStartPC = startOffs;
+ tlvi.tlviLength = length;
+ tlvi.tlviAvailable = avail;
+ tlvi.tlviVarLoc = varLoc;
#endif // DEBUG
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 188f646e05..8ed2fcdbec 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -57,53 +57,53 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// Return Value:
// returns true if the immediate was too large and tmpReg was used and modified.
//
-bool CodeGen::genInstrWithConstant(instruction ins,
- emitAttr attr,
- regNumber reg1,
+bool CodeGen::genInstrWithConstant(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
regNumber reg2,
- ssize_t imm,
+ ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion /* = false */)
{
- bool immFitsInIns = false;
- emitAttr size = EA_SIZE(attr);
+ bool immFitsInIns = false;
+ emitAttr size = EA_SIZE(attr);
// reg1 is usually a dest register
// reg2 is always source register
- assert(tmpReg != reg2); // regTmp can not match any source register
+ assert(tmpReg != reg2); // regTmp can not match any source register
- switch (ins)
+ switch (ins)
{
- case INS_add:
- case INS_sub:
- if (imm < 0)
- {
- imm = -imm;
- ins = (ins == INS_add) ? INS_sub : INS_add;
- }
- immFitsInIns = emitter::emitIns_valid_imm_for_add(imm, size);
- break;
+ case INS_add:
+ case INS_sub:
+ if (imm < 0)
+ {
+ imm = -imm;
+ ins = (ins == INS_add) ? INS_sub : INS_add;
+ }
+ immFitsInIns = emitter::emitIns_valid_imm_for_add(imm, size);
+ break;
- case INS_strb:
- case INS_strh:
- case INS_str:
- // reg1 is a source register for store instructions
- assert(tmpReg != reg1); // regTmp can not match any source register
- immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
- break;
+ case INS_strb:
+ case INS_strh:
+ case INS_str:
+ // reg1 is a source register for store instructions
+ assert(tmpReg != reg1); // regTmp can not match any source register
+ immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
+ break;
- case INS_ldrsb:
- case INS_ldrsh:
- case INS_ldrsw:
- case INS_ldrb:
- case INS_ldrh:
- case INS_ldr:
- immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
- break;
+ case INS_ldrsb:
+ case INS_ldrsh:
+ case INS_ldrsw:
+ case INS_ldrb:
+ case INS_ldrh:
+ case INS_ldr:
+ immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
+ break;
- default:
- assert(!"Unexpected instruction in genInstrWithConstant");
- break;
+ default:
+ assert(!"Unexpected instruction in genInstrWithConstant");
+ break;
}
if (immFitsInIns)
@@ -122,7 +122,7 @@ bool CodeGen::genInstrWithConstant(instruction ins,
instGen_Set_Reg_To_Imm(size, tmpReg, imm);
regTracker.rsTrackRegTrash(tmpReg);
- // when we are in an unwind code region
+ // when we are in an unwind code region
// we record the extra instructions using unwindPadding()
if (inUnwindRegion)
{
@@ -164,10 +164,11 @@ void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool*
}
}
- // spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive value.
- ssize_t spDeltaAbs = abs(spDelta);
- unsigned unwindSpDelta = (unsigned) spDeltaAbs;
- assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned
+ // spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive
+ // value.
+ ssize_t spDeltaAbs = abs(spDelta);
+ unsigned unwindSpDelta = (unsigned)spDeltaAbs;
+ assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned
compiler->unwindAllocStack(unwindSpDelta);
}
@@ -203,7 +204,7 @@ void CodeGen::genPrologSaveRegPair(regNumber reg1,
{
assert(spOffset >= 0);
assert(spDelta <= 0);
- assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
+ assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
assert(genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2)); // registers must be both general-purpose, or both
// FP/SIMD
@@ -222,7 +223,8 @@ void CodeGen::genPrologSaveRegPair(regNumber reg1,
}
else // (spDelta < -512))
{
- // We need to do SP adjustment separately from the store; we can't fold in a pre-indexed addressing and the non-zero offset.
+ // We need to do SP adjustment separately from the store; we can't fold in a pre-indexed addressing and the
+ // non-zero offset.
// generate sub SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero);
@@ -235,10 +237,11 @@ void CodeGen::genPrologSaveRegPair(regNumber reg1,
// 64-bit STP offset range: -512 to 504, multiple of 8.
assert(spOffset <= 504);
getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
-
+
if (lastSavedWasPreviousPair)
{
- // This works as long as we've only been saving pairs, in order, and we've saved the previous one just before this one.
+ // This works as long as we've only been saving pairs, in order, and we've saved the previous one just
+ // before this one.
compiler->unwindSaveNext();
}
else
@@ -268,11 +271,7 @@ void CodeGen::genPrologSaveRegPair(regNumber reg1,
// Return Value:
// None.
-void CodeGen::genPrologSaveReg(regNumber reg1,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero)
+void CodeGen::genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta <= 0);
@@ -293,7 +292,8 @@ void CodeGen::genPrologSaveReg(regNumber reg1,
//------------------------------------------------------------------------
// genEpilogRestoreRegPair: This is the opposite of genPrologSaveRegPair(), run in the epilog instead of the prolog.
// The stack pointer adjustment, if requested, is done after the register restore, using post-index addressing.
-// The caller must ensure that we can use the LDP instruction, and that spOffset will be in the legal range for that instruction.
+// The caller must ensure that we can use the LDP instruction, and that spOffset will be in the legal range for that
+// instruction.
//
// Arguments:
// reg1 - First register of pair to restore.
@@ -308,12 +308,8 @@ void CodeGen::genPrologSaveReg(regNumber reg1,
// Return Value:
// None.
-void CodeGen::genEpilogRestoreRegPair(regNumber reg1,
- regNumber reg2,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero)
+void CodeGen::genEpilogRestoreRegPair(
+ regNumber reg1, regNumber reg2, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
@@ -363,11 +359,7 @@ void CodeGen::genEpilogRestoreRegPair(regNumber reg1,
// Return Value:
// None.
-void CodeGen::genEpilogRestoreReg(regNumber reg1,
- int spOffset,
- int spDelta,
- regNumber tmpReg,
- bool* pTmpRegIsZero)
+void CodeGen::genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
@@ -413,9 +405,7 @@ void CodeGen::genEpilogRestoreReg(regNumber reg1,
// Return Value:
// None.
-void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
- int lowestCalleeSavedOffset,
- int spDelta)
+void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta <= 0);
unsigned regsToSaveCount = genCountBits(regsToSaveMask);
@@ -431,7 +421,7 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
}
assert((spDelta % 16) == 0);
- assert((regsToSaveMask & RBM_FP) == 0); // we never save FP here
+ assert((regsToSaveMask & RBM_FP) == 0); // we never save FP here
assert(regsToSaveCount <= genCountBits(RBM_CALLEE_SAVED | RBM_LR)); // We also save LR, even though it is not in
// RBM_CALLEE_SAVED.
@@ -440,9 +430,9 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
int spOffset = lowestCalleeSavedOffset; // this is the offset *after* we change SP.
- unsigned intRegsToSaveCount = genCountBits(maskSaveRegsInt);
+ unsigned intRegsToSaveCount = genCountBits(maskSaveRegsInt);
unsigned floatRegsToSaveCount = genCountBits(maskSaveRegsFloat);
- bool isPairSave = false;
+ bool isPairSave = false;
#ifdef DEBUG
bool isRegsToSaveCountOdd = ((intRegsToSaveCount + floatRegsToSaveCount) % 2 != 0);
#endif
@@ -456,13 +446,12 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// If this is the first store that needs to change SP (spDelta != 0),
// then the offset must be 8 to account for alignment for the odd count
// or it must be 0 for the even count.
- assert((spDelta == 0) ||
- (isRegsToSaveCountOdd && spOffset == REGSIZE_BYTES) ||
+ assert((spDelta == 0) || (isRegsToSaveCountOdd && spOffset == REGSIZE_BYTES) ||
(!isRegsToSaveCountOdd && spOffset == 0));
- isPairSave = (intRegsToSaveCount >= 2);
+ isPairSave = (intRegsToSaveCount >= 2);
regMaskTP reg1Mask = genFindLowestBit(maskSaveRegsInt);
- regNumber reg1 = genRegNumFromMask(reg1Mask);
+ regNumber reg1 = genRegNumFromMask(reg1Mask);
maskSaveRegsInt &= ~reg1Mask;
intRegsToSaveCount -= 1;
@@ -471,7 +460,7 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// We can use a STP instruction.
regMaskTP reg2Mask = genFindLowestBit(maskSaveRegsInt);
- regNumber reg2 = genRegNumFromMask(reg2Mask);
+ regNumber reg2 = genRegNumFromMask(reg2Mask);
assert((reg2 == REG_NEXT(reg1)) || (reg2 == REG_LR));
maskSaveRegsInt &= ~reg2Mask;
intRegsToSaveCount -= 1;
@@ -483,7 +472,7 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// lastSavedWasPair = true;
spOffset += 2 * REGSIZE_BYTES;
- }
+ }
else
{
// No register pair; we use a STR instruction.
@@ -508,13 +497,12 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// If this is the first store that needs to change SP (spDelta != 0),
// then the offset must be 8 to account for alignment for the odd count
// or it must be 0 for the even count.
- assert((spDelta == 0) ||
- (isRegsToSaveCountOdd && spOffset == REGSIZE_BYTES) ||
+ assert((spDelta == 0) || (isRegsToSaveCountOdd && spOffset == REGSIZE_BYTES) ||
(!isRegsToSaveCountOdd && spOffset == 0));
- isPairSave = (floatRegsToSaveCount >= 2);
+ isPairSave = (floatRegsToSaveCount >= 2);
regMaskTP reg1Mask = genFindLowestBit(maskSaveRegsFloat);
- regNumber reg1 = genRegNumFromMask(reg1Mask);
+ regNumber reg1 = genRegNumFromMask(reg1Mask);
maskSaveRegsFloat &= ~reg1Mask;
floatRegsToSaveCount -= 1;
@@ -523,7 +511,7 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// We can use a STP instruction.
regMaskTP reg2Mask = genFindLowestBit(maskSaveRegsFloat);
- regNumber reg2 = genRegNumFromMask(reg2Mask);
+ regNumber reg2 = genRegNumFromMask(reg2Mask);
assert(reg2 == REG_NEXT(reg1));
maskSaveRegsFloat &= ~reg2Mask;
floatRegsToSaveCount -= 1;
@@ -552,7 +540,6 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
assert(floatRegsToSaveCount == 0);
}
-
//------------------------------------------------------------------------
// genRestoreCalleeSavedRegistersHelp: Restore the callee-saved registers in 'regsToRestoreMask' from the stack frame
// in the function or funclet epilog. This exactly reverses the actions of genSaveCalleeSavedRegistersHelp().
@@ -584,9 +571,7 @@ void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask,
// Return Value:
// None.
-void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
- int lowestCalleeSavedOffset,
- int spDelta)
+void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta >= 0);
unsigned regsToRestoreCount = genCountBits(regsToRestoreMask);
@@ -603,19 +588,22 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
assert((spDelta % 16) == 0);
assert((regsToRestoreMask & RBM_FP) == 0); // we never restore FP here
- assert(regsToRestoreCount <= genCountBits(RBM_CALLEE_SAVED | RBM_LR)); // We also save LR, even though it is not in RBM_CALLEE_SAVED.
+ assert(regsToRestoreCount <=
+ genCountBits(RBM_CALLEE_SAVED | RBM_LR)); // We also save LR, even though it is not in RBM_CALLEE_SAVED.
regMaskTP maskRestoreRegsFloat = regsToRestoreMask & RBM_ALLFLOAT;
regMaskTP maskRestoreRegsInt = regsToRestoreMask & ~maskRestoreRegsFloat;
assert(REGSIZE_BYTES == FPSAVE_REGSIZE_BYTES);
- int spOffset = lowestCalleeSavedOffset + regsToRestoreCount * REGSIZE_BYTES; // Point past the end, to start. We predecrement to find the offset to load from.
-
- unsigned floatRegsToRestoreCount = genCountBits(maskRestoreRegsFloat);
- unsigned intRegsToRestoreCount = genCountBits(maskRestoreRegsInt);
- int stackDelta = 0;
- bool isPairRestore = false;
- bool thisIsTheLastRestoreInstruction = false;
+ int spOffset = lowestCalleeSavedOffset + regsToRestoreCount * REGSIZE_BYTES; // Point past the end, to start. We
+ // predecrement to find the offset to
+ // load from.
+
+ unsigned floatRegsToRestoreCount = genCountBits(maskRestoreRegsFloat);
+ unsigned intRegsToRestoreCount = genCountBits(maskRestoreRegsInt);
+ int stackDelta = 0;
+ bool isPairRestore = false;
+ bool thisIsTheLastRestoreInstruction = false;
#ifdef DEBUG
bool isRegsToRestoreCountOdd = ((floatRegsToRestoreCount + intRegsToRestoreCount) % 2 != 0);
#endif
@@ -623,13 +611,12 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
// We want to restore in the opposite order we saved, so the unwind codes match. Be careful to handle odd numbers of
// callee-saved registers properly.
-
// Restore the floating-point/SIMD registers
while (maskRestoreRegsFloat != RBM_NONE)
{
thisIsTheLastRestoreInstruction = (floatRegsToRestoreCount <= 2) && (maskRestoreRegsInt == RBM_NONE);
- isPairRestore = (floatRegsToRestoreCount % 2) == 0;
+ isPairRestore = (floatRegsToRestoreCount % 2) == 0;
// Update stack delta only if it is the last restore (the first save).
if (thisIsTheLastRestoreInstruction)
@@ -651,19 +638,18 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
// If this is the last restore (the first save) that needs to change SP (stackDelta != 0),
// then the offset must be 8 to account for alignment for the odd count
// or it must be 0 for the even count.
- assert((stackDelta == 0) ||
- (isRegsToRestoreCountOdd && spOffset == FPSAVE_REGSIZE_BYTES) ||
+ assert((stackDelta == 0) || (isRegsToRestoreCountOdd && spOffset == FPSAVE_REGSIZE_BYTES) ||
(!isRegsToRestoreCountOdd && spOffset == 0));
regMaskTP reg2Mask = genFindHighestBit(maskRestoreRegsFloat);
- regNumber reg2 = genRegNumFromMask(reg2Mask);
+ regNumber reg2 = genRegNumFromMask(reg2Mask);
maskRestoreRegsFloat &= ~reg2Mask;
floatRegsToRestoreCount -= 1;
if (isPairRestore)
{
regMaskTP reg1Mask = genFindHighestBit(maskRestoreRegsFloat);
- regNumber reg1 = genRegNumFromMask(reg1Mask);
+ regNumber reg1 = genRegNumFromMask(reg1Mask);
maskRestoreRegsFloat &= ~reg1Mask;
floatRegsToRestoreCount -= 1;
@@ -682,7 +668,7 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
while (maskRestoreRegsInt != RBM_NONE)
{
thisIsTheLastRestoreInstruction = (intRegsToRestoreCount <= 2);
- isPairRestore = (intRegsToRestoreCount % 2) == 0;
+ isPairRestore = (intRegsToRestoreCount % 2) == 0;
// Update stack delta only if it is the last restore (the first save).
if (thisIsTheLastRestoreInstruction)
@@ -701,18 +687,18 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
// If this is the last restore (the first save) that needs to change SP (stackDelta != 0),
// then the offset must be 8 to account for alignment for the odd count
// or it must be 0 for the even count.
- assert((stackDelta == 0) || (isRegsToRestoreCountOdd && spOffset == REGSIZE_BYTES)
- || (!isRegsToRestoreCountOdd && spOffset == 0));
+ assert((stackDelta == 0) || (isRegsToRestoreCountOdd && spOffset == REGSIZE_BYTES) ||
+ (!isRegsToRestoreCountOdd && spOffset == 0));
regMaskTP reg2Mask = genFindHighestBit(maskRestoreRegsInt);
- regNumber reg2 = genRegNumFromMask(reg2Mask);
+ regNumber reg2 = genRegNumFromMask(reg2Mask);
maskRestoreRegsInt &= ~reg2Mask;
intRegsToRestoreCount -= 1;
if (isPairRestore)
{
regMaskTP reg1Mask = genFindHighestBit(maskRestoreRegsInt);
- regNumber reg1 = genRegNumFromMask(reg1Mask);
+ regNumber reg1 = genRegNumFromMask(reg1Mask);
maskRestoreRegsInt &= ~reg1Mask;
intRegsToRestoreCount -= 1;
@@ -912,10 +898,10 @@ void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask,
*/
// clang-format on
-void CodeGen::genFuncletProlog(BasicBlock* block)
+void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
@@ -955,8 +941,8 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
if (genFuncletInfo.fiFrameType == 1)
{
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR,
- REG_SPBASE, genFuncletInfo.fiSpDelta1, INS_OPTS_PRE_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
+ INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
assert(genFuncletInfo.fiSpDelta2 == 0);
@@ -973,21 +959,21 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
assert(genFuncletInfo.fiSpDelta2 == 0);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR,
- REG_SPBASE, genFuncletInfo.fiSP_to_FPLR_save_delta);
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
}
else
{
assert(genFuncletInfo.fiFrameType == 3);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
- genFuncletInfo.fiSpDelta1, INS_OPTS_PRE_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
+ INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
lowestCalleeSavedOffset += genFuncletInfo.fiSpDelta2; // We haven't done the second adjustment of SP yet.
}
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
-
+
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, lowestCalleeSavedOffset, 0);
if (genFuncletInfo.fiFrameType == 3)
@@ -1010,37 +996,41 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
// X2 is scratch when we have a large constant offset
// Load the CallerSP of the main function (stored in the PSP of the dynamically containing funclet or function)
- genInstrWithConstant(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiCallerSP_to_PSP_slot_delta, REG_R2, false);
+ genInstrWithConstant(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_R1,
+ genFuncletInfo.fiCallerSP_to_PSP_slot_delta, REG_R2, false);
regTracker.rsTrackRegTrash(REG_R1);
// Store the PSP value (aka CallerSP)
- genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2, false);
+ genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_SPBASE,
+ genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2, false);
// re-establish the frame pointer
- genInstrWithConstant(INS_add, EA_PTRSIZE, REG_FPBASE, REG_R1, genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
+ genInstrWithConstant(INS_add, EA_PTRSIZE, REG_FPBASE, REG_R1, genFuncletInfo.fiFunction_CallerSP_to_FP_delta,
+ REG_R2, false);
}
- else // This is a non-filter funclet
+ else // This is a non-filter funclet
{
// X3 is scratch, X2 can also become scratch
-
+
// compute the CallerSP, given the frame pointer. x3 is scratch.
- genInstrWithConstant(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE, -genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
+ genInstrWithConstant(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE, -genFuncletInfo.fiFunction_CallerSP_to_FP_delta,
+ REG_R2, false);
regTracker.rsTrackRegTrash(REG_R3);
- genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2, false);
+ genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R3, REG_SPBASE,
+ genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2, false);
}
}
-
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
-void CodeGen::genFuncletEpilog()
+void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
@@ -1065,7 +1055,7 @@ void CodeGen::genFuncletEpilog()
maskRestoreRegsInt &= ~(RBM_LR | RBM_FP); // We restore FP/LR at the end
int lowestCalleeSavedOffset = genFuncletInfo.fiSP_to_CalleeSave_delta;
-
+
if (genFuncletInfo.fiFrameType == 3)
{
// Note that genFuncletInfo.fiSpDelta2 is always a negative value
@@ -1079,11 +1069,11 @@ void CodeGen::genFuncletEpilog()
regMaskTP regsToRestoreMask = maskRestoreRegsInt | maskRestoreRegsFloat;
genRestoreCalleeSavedRegistersHelp(regsToRestoreMask, lowestCalleeSavedOffset, 0);
-
+
if (genFuncletInfo.fiFrameType == 1)
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR,
- REG_SPBASE, -genFuncletInfo.fiSpDelta1, INS_OPTS_POST_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
+ INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
assert(genFuncletInfo.fiSpDelta2 == 0);
@@ -1091,8 +1081,8 @@ void CodeGen::genFuncletEpilog()
}
else if (genFuncletInfo.fiFrameType == 2)
{
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR,
- REG_SPBASE, genFuncletInfo.fiSP_to_FPLR_save_delta);
+ getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
// fiFrameType==2 constraints:
@@ -1108,8 +1098,8 @@ void CodeGen::genFuncletEpilog()
{
assert(genFuncletInfo.fiFrameType == 3);
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR,
- REG_SPBASE, -genFuncletInfo.fiSpDelta1, INS_OPTS_POST_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
+ INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
@@ -1119,7 +1109,6 @@ void CodeGen::genFuncletEpilog()
compiler->unwindEndEpilog();
}
-
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
@@ -1130,7 +1119,7 @@ void CodeGen::genFuncletEpilog()
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
-void CodeGen::genCaptureFuncletPrologEpilogInfo()
+void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
return;
@@ -1145,7 +1134,7 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
assert((rsMaskSaveRegs & RBM_LR) != 0);
assert((rsMaskSaveRegs & RBM_FP) != 0);
- unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
+ unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
unsigned saveRegsPlusPSPSize = saveRegsCount * REGSIZE_BYTES + /* PSPSym */ REGSIZE_BYTES;
if (compiler->info.compIsVarArgs)
{
@@ -1167,27 +1156,27 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
if (maxFuncletFrameSizeAligned <= 512)
{
- unsigned funcletFrameSize = saveRegsPlusPSPSize + compiler->lvaOutgoingArgSpaceSize;
+ unsigned funcletFrameSize = saveRegsPlusPSPSize + compiler->lvaOutgoingArgSpaceSize;
unsigned funcletFrameSizeAligned = (unsigned)roundUp(funcletFrameSize, STACK_ALIGN);
assert(funcletFrameSizeAligned <= maxFuncletFrameSizeAligned);
unsigned funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
assert((funcletFrameAlignmentPad == 0) || (funcletFrameAlignmentPad == REGSIZE_BYTES));
- SP_to_FPLR_save_delta = compiler->lvaOutgoingArgSpaceSize;
- SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + funcletFrameAlignmentPad;
+ SP_to_FPLR_save_delta = compiler->lvaOutgoingArgSpaceSize;
+ SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + funcletFrameAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(saveRegsPlusPSPSize - 2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->lvaOutgoingArgSpaceSize == 0)
{
- genFuncletInfo.fiFrameType = 1;
+ genFuncletInfo.fiFrameType = 1;
}
else
{
- genFuncletInfo.fiFrameType = 2;
+ genFuncletInfo.fiFrameType = 2;
}
- genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned;
- genFuncletInfo.fiSpDelta2 = 0;
+ genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned;
+ genFuncletInfo.fiSpDelta2 = 0;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)funcletFrameSizeAligned);
}
@@ -1197,12 +1186,13 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
assert((saveRegsPlusPSPAlignmentPad == 0) || (saveRegsPlusPSPAlignmentPad == REGSIZE_BYTES));
SP_to_FPLR_save_delta = outgoingArgSpaceAligned;
- SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + saveRegsPlusPSPAlignmentPad;
- CallerSP_to_PSP_slot_delta = -(int)(saveRegsPlusPSPSizeAligned - 2 /* FP, LR */ * REGSIZE_BYTES - saveRegsPlusPSPAlignmentPad);
+ SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + saveRegsPlusPSPAlignmentPad;
+ CallerSP_to_PSP_slot_delta =
+ -(int)(saveRegsPlusPSPSizeAligned - 2 /* FP, LR */ * REGSIZE_BYTES - saveRegsPlusPSPAlignmentPad);
- genFuncletInfo.fiFrameType = 3;
- genFuncletInfo.fiSpDelta1 = -(int)saveRegsPlusPSPSizeAligned;
- genFuncletInfo.fiSpDelta2 = -(int)outgoingArgSpaceAligned;
+ genFuncletInfo.fiFrameType = 3;
+ genFuncletInfo.fiSpDelta1 = -(int)saveRegsPlusPSPSizeAligned;
+ genFuncletInfo.fiSpDelta2 = -(int)outgoingArgSpaceAligned;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)maxFuncletFrameSizeAligned);
}
@@ -1220,7 +1210,9 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
printf("\n");
printf("Funclet prolog / epilog info\n");
- printf(" Save regs: "); dspRegMask(genFuncletInfo.fiSaveRegs); printf("\n");
+ printf(" Save regs: ");
+ dspRegMask(genFuncletInfo.fiSaveRegs);
+ printf("\n");
printf(" Function CallerSP-to-FP delta: %d\n", genFuncletInfo.fiFunction_CallerSP_to_FP_delta);
printf(" SP to FP/LR save location delta: %d\n", genFuncletInfo.fiSP_to_FPLR_save_delta);
printf(" SP to PSP slot delta: %d\n", genFuncletInfo.fiSP_to_PSP_slot_delta);
@@ -1230,9 +1222,10 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
printf(" SP delta 1: %d\n", genFuncletInfo.fiSpDelta1);
printf(" SP delta 2: %d\n", genFuncletInfo.fiSpDelta2);
- if (CallerSP_to_PSP_slot_delta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
+ if (CallerSP_to_PSP_slot_delta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
{
- printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n", compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
+ printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
+ compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
}
#endif // DEBUG
@@ -1242,7 +1235,9 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
assert(genFuncletInfo.fiSP_to_CalleeSave_delta >= 0);
assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta <= 0);
assert(compiler->lvaPSPSym != BAD_VAR_NUM);
- assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta == compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and funclet!
+ assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta ==
+ compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
+ // funclet!
}
/*
@@ -1274,10 +1269,10 @@ regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
// Assumptions:
// The lclVar must be a register candidate (lvRegCandidate)
-void CodeGen::genSpillVar(GenTreePtr tree)
+void CodeGen::genSpillVar(GenTreePtr tree)
{
- unsigned varNum = tree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
assert(varDsc->lvIsRegCandidate());
@@ -1287,26 +1282,25 @@ void CodeGen::genSpillVar(GenTreePtr tree)
{
var_types lclTyp = varDsc->TypeGet();
if (varDsc->lvNormalizeOnStore())
- lclTyp = genActualType(lclTyp);
+ lclTyp = genActualType(lclTyp);
emitAttr size = emitTypeSize(lclTyp);
bool restoreRegVar = false;
- if (tree->gtOper == GT_REG_VAR)
+ if (tree->gtOper == GT_REG_VAR)
{
tree->SetOper(GT_LCL_VAR);
restoreRegVar = true;
}
// mask off the flag to generate the right spill code, then bring it back
- tree->gtFlags &= ~GTF_REG_VAL;
+ tree->gtFlags &= ~GTF_REG_VAL;
instruction storeIns = ins_Store(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(varNum));
-
assert(varDsc->lvRegNum == tree->gtRegNum);
inst_TT_RV(storeIns, tree, tree->gtRegNum, 0, size);
- tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags |= GTF_REG_VAL;
if (restoreRegVar)
{
@@ -1330,10 +1324,9 @@ void CodeGen::genSpillVar(GenTreePtr tree)
#endif
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
-
}
- tree->gtFlags &= ~GTF_SPILL;
+ tree->gtFlags &= ~GTF_SPILL;
varDsc->lvRegNum = REG_STK;
if (varTypeIsMultiReg(tree))
{
@@ -1342,13 +1335,12 @@ void CodeGen::genSpillVar(GenTreePtr tree)
}
// inline
-void CodeGenInterface::genUpdateVarReg(LclVarDsc * varDsc, GenTreePtr tree)
+void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTreePtr tree)
{
assert(tree->OperIsScalarLocal() || (tree->gtOper == GT_COPY));
varDsc->lvRegNum = tree->gtRegNum;
}
-
/*****************************************************************************/
/*****************************************************************************/
@@ -1357,10 +1349,7 @@ void CodeGenInterface::genUpdateVarReg(LclVarDsc * varDsc, GenTre
* Generate code that will set the given register to the integer constant.
*/
-void CodeGen::genSetRegToIcon(regNumber reg,
- ssize_t val,
- var_types type,
- insFlags flags)
+void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
{
// Reg cannot be a FP reg
assert(!genIsValidFloatReg(reg));
@@ -1373,7 +1362,6 @@ void CodeGen::genSetRegToIcon(regNumber reg,
instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
}
-
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
@@ -1381,21 +1369,21 @@ void CodeGen::genSetRegToIcon(regNumber reg,
* and this works fine in the case of tail calls
* Implementation Note: pushReg = true, in case of tail calls.
*/
-void CodeGen::genEmitGSCookieCheck(bool pushReg)
+void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that the return register is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by REG_INTRET (R0).
if (!pushReg && (compiler->info.compRetType == TYP_REF))
- gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
+ gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
regNumber regGSConst = REG_TMP_0;
regNumber regGSValue = REG_TMP_1;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
- // load the GS cookie constant into a reg
+ // load the GS cookie constant into a reg
//
genSetRegToIcon(regGSConst, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
}
@@ -1410,8 +1398,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
// Compare with the GC cookie constant
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
- BasicBlock *gsCheckBlk = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* gsCheckBlk = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
@@ -1422,18 +1410,18 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
* Generate code for all the basic blocks in the function.
*/
-void CodeGen::genCodeForBBlist()
+void CodeGen::genCodeForBBlist()
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- unsigned savedStkLvl;
+ unsigned savedStkLvl;
-#ifdef DEBUG
- genInterruptibleUsed = true;
- unsigned stmtNum = 0;
- UINT64 totalCostEx = 0;
- UINT64 totalCostSz = 0;
+#ifdef DEBUG
+ genInterruptibleUsed = true;
+ unsigned stmtNum = 0;
+ UINT64 totalCostEx = 0;
+ UINT64 totalCostSz = 0;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
@@ -1454,7 +1442,8 @@ void CodeGen::genCodeForBBlist()
// Prepare the blocks for exception handling codegen: mark the blocks that needs labels.
genPrepForEHCodegen();
- assert(!compiler->fgFirstBBScratch || compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
+ assert(!compiler->fgFirstBBScratch ||
+ compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize the spill tracking logic */
@@ -1486,18 +1475,16 @@ void CodeGen::genCodeForBBlist()
/* If any arguments live in registers, mark those regs as such */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter assigned to a register? */
- if (!varDsc->lvIsParam || !varDsc->lvRegister)
+ if (!varDsc->lvIsParam || !varDsc->lvRegister)
continue;
/* Is the argument live on entry to the method? */
- if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
continue;
/* Is this a floating-point argument? */
@@ -1524,12 +1511,10 @@ void CodeGen::genCodeForBBlist()
*
*/
- BasicBlock * block;
- BasicBlock * lblk; /* previous block */
+ BasicBlock* block;
+ BasicBlock* lblk; /* previous block */
- for (lblk = NULL, block = compiler->fgFirstBB;
- block != NULL;
- lblk = block, block = block->bbNext)
+ for (lblk = NULL, block = compiler->fgFirstBB; block != NULL; lblk = block, block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -1555,7 +1540,7 @@ void CodeGen::genCodeForBBlist()
// change? We cleared them out above. Maybe we should just not clear them out, but update the ones that change
// here. That would require handling the changes in recordVarLocationsAtStartOfBB().
- regMaskTP newLiveRegSet = RBM_NONE;
+ regMaskTP newLiveRegSet = RBM_NONE;
regMaskTP newRegGCrefSet = RBM_NONE;
regMaskTP newRegByrefSet = RBM_NONE;
#ifdef DEBUG
@@ -1565,8 +1550,8 @@ void CodeGen::genCodeForBBlist()
VARSET_ITER_INIT(compiler, iter, block->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
if (varDsc->lvIsInReg())
{
@@ -1653,21 +1638,19 @@ void CodeGen::genCodeForBBlist()
}
#endif
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, block->bbNum);
#endif
block->bbEmitCookie = NULL;
- if (block->bbFlags & (BBF_JMP_TARGET|BBF_HAS_LABEL))
+ if (block->bbFlags & (BBF_JMP_TARGET | BBF_HAS_LABEL))
{
/* Mark a label and update the current set of live GC refs */
- block->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- FALSE);
+ block->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, FALSE);
}
if (block == compiler->fgFirstColdBlock)
@@ -1700,14 +1683,14 @@ void CodeGen::genCodeForBBlist()
siBeginBlock(block);
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
- if (compiler->opts.compDbgInfo &&
- (block->bbFlags & BBF_INTERNAL) &&
- !compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to emit a NO_MAPPING entry, immediately after the prolog.
+ if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) &&
+ !compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to
+ // emit a NO_MAPPING entry, immediately after the prolog.
{
- genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::NO_MAPPING, true);
+ genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::NO_MAPPING, true);
}
- bool firstMapping = true;
+ bool firstMapping = true;
#endif // DEBUGGING_SUPPORT
/*---------------------------------------------------------------------
@@ -1729,7 +1712,7 @@ void CodeGen::genCodeForBBlist()
continue;
/* Get hold of the statement tree */
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
#if defined(DEBUGGING_SUPPORT)
@@ -1748,13 +1731,11 @@ void CodeGen::genCodeForBBlist()
noway_assert(stmt->gtStmt.gtStmtLastILoffs <= compiler->info.compILCodeSize ||
stmt->gtStmt.gtStmtLastILoffs == BAD_IL_OFFSET);
- if (compiler->opts.dspCode && compiler->opts.dspInstrs &&
- stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
+ if (compiler->opts.dspCode && compiler->opts.dspInstrs && stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
{
while (genCurDispOffset <= stmt->gtStmt.gtStmtLastILoffs)
{
- genCurDispOffset +=
- dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
+ genCurDispOffset += dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
}
}
@@ -1763,7 +1744,8 @@ void CodeGen::genCodeForBBlist()
{
printf("\nGenerating BB%02u, stmt %u\t\t", block->bbNum, stmtNum);
printf("Holding variables: ");
- dspRegMask(regSet.rsMaskVars); printf("\n\n");
+ dspRegMask(regSet.rsMaskVars);
+ printf("\n\n");
if (compiler->verboseTrees)
{
compiler->gtDispTree(compiler->opts.compDbgInfo ? stmt : tree);
@@ -1771,17 +1753,15 @@ void CodeGen::genCodeForBBlist()
}
}
totalCostEx += ((UINT64)stmt->gtCostEx * block->getBBWeight(compiler));
- totalCostSz += (UINT64) stmt->gtCostSz;
+ totalCostSz += (UINT64)stmt->gtCostSz;
#endif // DEBUG
// Traverse the tree in linear order, generating code for each node in the
// tree as we encounter it
compiler->compCurLifeTree = NULL;
- compiler->compCurStmt = stmt;
- for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList;
- treeNode != NULL;
- treeNode = treeNode->gtNext)
+ compiler->compCurStmt = stmt;
+ for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList; treeNode != NULL; treeNode = treeNode->gtNext)
{
genCodeForTreeNode(treeNode);
if (treeNode->gtHasReg() && treeNode->gtLsraInfo.isLocalDefUse)
@@ -1795,7 +1775,7 @@ void CodeGen::genCodeForBBlist()
#ifdef DEBUG
/* Make sure we didn't bungle pointer register tracking */
- regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur);
+ regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur);
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.rsMaskVars;
// If return is a GC-type, clear it. Note that if a common
@@ -1803,9 +1783,8 @@ void CodeGen::genCodeForBBlist()
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
- if (tree->gtOper == GT_RETURN &&
- (varTypeIsGC(compiler->info.compRetType) ||
- (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
+ if (tree->gtOper == GT_RETURN && (varTypeIsGC(compiler->info.compRetType) ||
+ (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
@@ -1814,14 +1793,13 @@ void CodeGen::genCodeForBBlist()
// harmless "inc" instruction (does not interfere with the exception
// object).
- if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) &&
- (stmt == block->bbTreeList) &&
+ if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) && (stmt == block->bbTreeList) &&
handlerGetsXcptnObj(block->bbCatchTyp))
{
nonVarPtrRegs &= ~RBM_EXCEPTION_OBJECT;
}
- if (nonVarPtrRegs)
+ if (nonVarPtrRegs)
{
printf("Regset after tree=");
compiler->printTreeID(tree);
@@ -1839,7 +1817,7 @@ void CodeGen::genCodeForBBlist()
noway_assert(nonVarPtrRegs == 0);
- for (GenTree * node = stmt->gtStmt.gtStmtList; node; node=node->gtNext)
+ for (GenTree* node = stmt->gtStmt.gtStmtList; node; node = node->gtNext)
{
assert(!(node->gtFlags & GTF_SPILL));
}
@@ -1865,7 +1843,7 @@ void CodeGen::genCodeForBBlist()
}
#endif // defined(DEBUG) && defined(_TARGET_ARM64_)
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
@@ -1886,7 +1864,7 @@ void CodeGen::genCodeForBBlist()
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
- //noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
+ // noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
@@ -1906,8 +1884,8 @@ void CodeGen::genCodeForBBlist()
VARSET_ITER_INIT(compiler, extraLiveVarIter, extraLiveVars, extraLiveVarIndex);
while (extraLiveVarIter.NextElem(compiler, &extraLiveVarIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
- LclVarDsc * varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(!varDsc->lvIsRegCandidate());
}
#endif
@@ -1980,121 +1958,118 @@ void CodeGen::genCodeForBBlist()
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- inst_JMP(EJ_jmp, block->bbJumpDest);
- break;
-
- case BBJ_RETURN:
- genExitCode(block);
- break;
+ case BBJ_ALWAYS:
+ inst_JMP(EJ_jmp, block->bbJumpDest);
+ break;
- case BBJ_THROW:
- // If we have a throw at the end of a function or funclet, we need to emit another instruction
- // afterwards to help the OS unwinder determine the correct context during unwind.
- // We insert an unexecuted breakpoint instruction in several situations
- // following a throw instruction:
- // 1. If the throw is the last instruction of the function or funclet. This helps
- // the OS unwinder determine the correct context during an unwind from the
- // thrown exception.
- // 2. If this is this is the last block of the hot section.
- // 3. If the subsequent block is a special throw block.
- // 4. On AMD64, if the next block is in a different EH region.
- if ((block->bbNext == NULL)
- || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
- || !BasicBlock::sameEHRegion(block, block->bbNext)
- || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext))
- || block->bbNext == compiler->fgFirstColdBlock
- )
- {
- instGen(INS_BREAKPOINT); // This should never get executed
- }
+ case BBJ_RETURN:
+ genExitCode(block);
+ break;
- break;
+ case BBJ_THROW:
+ // If we have a throw at the end of a function or funclet, we need to emit another instruction
+ // afterwards to help the OS unwinder determine the correct context during unwind.
+ // We insert an unexecuted breakpoint instruction in several situations
+ // following a throw instruction:
+ // 1. If the throw is the last instruction of the function or funclet. This helps
+ // the OS unwinder determine the correct context during an unwind from the
+ // thrown exception.
+ // 2. If this is this is the last block of the hot section.
+ // 3. If the subsequent block is a special throw block.
+ // 4. On AMD64, if the next block is in a different EH region.
+ if ((block->bbNext == NULL) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) ||
+ !BasicBlock::sameEHRegion(block, block->bbNext) ||
+ (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
+ block->bbNext == compiler->fgFirstColdBlock)
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
- case BBJ_CALLFINALLY:
+ break;
- // Generate a call to the finally, like this:
- // mov x0,qword ptr [fp + 10H] // Load x0 with PSPSym
- // bl finally-funclet
- // b finally-return // Only for non-retless finally calls
- // The 'b' can be a NOP if we're going to the next block.
+ case BBJ_CALLFINALLY:
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
- getEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
+ // Generate a call to the finally, like this:
+ // mov x0,qword ptr [fp + 10H] // Load x0 with PSPSym
+ // bl finally-funclet
+ // b finally-return // Only for non-retless finally calls
+ // The 'b' can be a NOP if we're going to the next block.
- if (block->bbFlags & BBF_RETLESS_CALL)
- {
- // We have a retless call, and the last instruction generated was a call.
- // If the next block is in a different EH region (or is the end of the code
- // block), then we need to generate a breakpoint here (since it will never
- // get executed) to get proper unwind behavior.
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
+ getEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
- if ((block->bbNext == nullptr) ||
- !BasicBlock::sameEHRegion(block, block->bbNext))
+ if (block->bbFlags & BBF_RETLESS_CALL)
{
- instGen(INS_BREAKPOINT); // This should never get executed
- }
- }
- else
- {
- // Because of the way the flowgraph is connected, the liveness info for this one instruction
- // after the call is not (can not be) correct in cases where a variable has a last use in the
- // handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitDisableGC();
+ // We have a retless call, and the last instruction generated was a call.
+ // If the next block is in a different EH region (or is the end of the code
+ // block), then we need to generate a breakpoint here (since it will never
+ // get executed) to get proper unwind behavior.
- // Now go to where the finally funclet needs to return to.
- if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
- {
- // Fall-through.
- // TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
- // to the next instruction? This would depend on stack walking from within the finally
- // handler working without this instruction being in this special EH region.
- instGen(INS_nop);
+ if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
}
else
{
- inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
- }
+ // Because of the way the flowgraph is connected, the liveness info for this one instruction
+ // after the call is not (can not be) correct in cases where a variable has a last use in the
+ // handler. So turn off GC reporting for this single instruction.
+ getEmitter()->emitDisableGC();
- getEmitter()->emitEnableGC();
- }
+ // Now go to where the finally funclet needs to return to.
+ if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
+ {
+ // Fall-through.
+ // TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
+ // to the next instruction? This would depend on stack walking from within the finally
+ // handler working without this instruction being in this special EH region.
+ instGen(INS_nop);
+ }
+ else
+ {
+ inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
+ }
- // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
- // jump target using bbJumpDest - that is already used to point
- // to the finally block. So just skip past the BBJ_ALWAYS unless the
- // block is RETLESS.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
+ getEmitter()->emitEnableGC();
+ }
- lblk = block;
- block = block->bbNext;
- }
- break;
+ // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
+ // jump target using bbJumpDest - that is already used to point
+ // to the finally block. So just skip past the BBJ_ALWAYS unless the
+ // block is RETLESS.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- case BBJ_EHCATCHRET:
- // For long address (default): `adrp + add` will be emitted.
- // For short address (proven later): `adr` will be emitted.
- getEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
+ lblk = block;
+ block = block->bbNext;
+ }
+ break;
- __fallthrough;
+ case BBJ_EHCATCHRET:
+ // For long address (default): `adrp + add` will be emitted.
+ // For short address (proven later): `adr` will be emitted.
+ getEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- genReserveFuncletEpilog(block);
- break;
+ __fallthrough;
- case BBJ_NONE:
- case BBJ_COND:
- case BBJ_SWITCH:
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ genReserveFuncletEpilog(block);
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ break;
+
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
-#ifdef DEBUG
+#ifdef DEBUG
compiler->compCurBB = 0;
#endif
@@ -2111,12 +2086,11 @@ void CodeGen::genCodeForBBlist()
compiler->tmpEnd();
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
- printf("totalCostEx = %6d, totalCostSz = %5d ",
- totalCostEx, totalCostSz);
+ printf("totalCostEx = %6d, totalCostSz = %5d ", totalCostEx, totalCostSz);
printf("%s\n", compiler->info.compFullName);
}
#endif
@@ -2125,8 +2099,7 @@ void CodeGen::genCodeForBBlist()
// return the child that has the same reg as the dst (if any)
// other child returned (out param) in 'other'
// TODO-Cleanup: move to CodeGenCommon.cpp
-GenTree *
-sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
+GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/)
{
if (tree->gtRegNum == REG_NA)
{
@@ -2155,16 +2128,13 @@ sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
// move an immediate value into an integer register
-void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
- regNumber reg,
- ssize_t imm,
- insFlags flags)
+void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
- size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
+ size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if (EA_IS_RELOC(size))
@@ -2187,10 +2157,11 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
getEmitter()->emitIns_R_I(INS_mov, size, reg, (imm & 0xffff));
getEmitter()->emitIns_R_I_I(INS_movk, size, reg, ((imm >> 16) & 0xffff), 16, INS_OPTS_LSL);
- if ((size == EA_8BYTE) && ((imm >> 32) != 0)) // Sometimes the upper 32 bits are zero and the first mov has zero-ed them
+ if ((size == EA_8BYTE) &&
+ ((imm >> 32) != 0)) // Sometimes the upper 32 bits are zero and the first mov has zero-ed them
{
getEmitter()->emitIns_R_I_I(INS_movk, EA_8BYTE, reg, ((imm >> 32) & 0xffff), 32, INS_OPTS_LSL);
- if ((imm >> 48) != 0) // Frequently the upper 16 bits are zero and the first mov has zero-ed them
+ if ((imm >> 48) != 0) // Frequently the upper 16 bits are zero and the first mov has zero-ed them
{
getEmitter()->emitIns_R_I_I(INS_movk, EA_8BYTE, reg, ((imm >> 48) & 0xffff), 48, INS_OPTS_LSL);
}
@@ -2212,16 +2183,16 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
-void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
+void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
{
switch (tree->gtOper)
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- ssize_t cnsVal = con->IconValue();
+ GenTreeIntConCommon* con = tree->AsIntConCommon();
+ ssize_t cnsVal = con->IconValue();
bool needReloc = compiler->opts.compReloc && tree->IsIconHandle();
if (needReloc)
@@ -2236,12 +2207,12 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
}
break;
- case GT_CNS_DBL:
+ case GT_CNS_DBL:
{
- emitter *emit = getEmitter();
- emitAttr size = emitTypeSize(tree);
- GenTreeDblCon *dblConst = tree->AsDblCon();
- double constValue = dblConst->gtDblCon.gtDconVal;
+ emitter* emit = getEmitter();
+ emitAttr size = emitTypeSize(tree);
+ GenTreeDblCon* dblConst = tree->AsDblCon();
+ double constValue = dblConst->gtDblCon.gtDconVal;
// Make sure we use "movi reg, 0x00" only for positive zero (0.0) and not for negative zero (-0.0)
if (*(__int64*)&constValue == 0)
@@ -2259,7 +2230,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
{
// Get a temp integer register to compute long address.
regMaskTP addrRegMask = tree->gtRsvdRegs;
- regNumber addrReg = genRegNumFromMask(addrRegMask);
+ regNumber addrReg = genRegNumFromMask(addrRegMask);
noway_assert(addrReg != REG_NA);
// We must load the FP constant from the constant pool
@@ -2272,12 +2243,11 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
}
break;
- default:
- unreached();
+ default:
+ unreached();
}
}
-
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
@@ -2323,7 +2293,7 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
inst_RV_RV(INS_mov, targetReg, REG_RDX, targetType);
}
-#else // !0
+#else // !0
NYI("genCodeForMulHi");
#endif // !0
}
@@ -2339,22 +2309,16 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
// This method is expected to have called genConsumeOperands() before calling it.
void CodeGen::genCodeForBinary(GenTree* treeNode)
{
- const genTreeOps oper = treeNode->OperGet();
- regNumber targetReg = treeNode->gtRegNum;
- var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ const genTreeOps oper = treeNode->OperGet();
+ regNumber targetReg = treeNode->gtRegNum;
+ var_types targetType = treeNode->TypeGet();
+ emitter* emit = getEmitter();
- assert (oper == GT_ADD ||
- oper == GT_SUB ||
- oper == GT_MUL ||
- oper == GT_DIV ||
- oper == GT_UDIV ||
- oper == GT_AND ||
- oper == GT_OR ||
- oper == GT_XOR);
+ assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL || oper == GT_DIV || oper == GT_UDIV || oper == GT_AND ||
+ oper == GT_OR || oper == GT_XOR);
- GenTreePtr op1 = treeNode->gtGetOp1();
- GenTreePtr op2 = treeNode->gtGetOp2();
+ GenTreePtr op1 = treeNode->gtGetOp1();
+ GenTreePtr op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
@@ -2376,8 +2340,7 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
// Returns true if the 'treeNode" is a GT_RETURN node of type struct.
// Otherwise returns false.
//
-bool
-CodeGen::isStructReturn(GenTreePtr treeNode)
+bool CodeGen::isStructReturn(GenTreePtr treeNode)
{
// This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
// For the GT_RET_FILT, the return is always
@@ -2398,8 +2361,7 @@ CodeGen::isStructReturn(GenTreePtr treeNode)
//
// Assumption:
// op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
-void
-CodeGen::genStructReturn(GenTreePtr treeNode)
+void CodeGen::genStructReturn(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_RETURN);
assert(isStructReturn(treeNode));
@@ -2407,17 +2369,17 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
if (op1->OperGet() == GT_LCL_VAR)
{
- GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
- LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
- var_types lclType = genActualType(varDsc->TypeGet());
+ GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
+ var_types lclType = genActualType(varDsc->TypeGet());
// Currently only multireg TYP_STRUCT types such as HFA's and 16-byte structs are supported
// In the future we could have FEATURE_SIMD types like TYP_SIMD16
- assert(lclType == TYP_STRUCT);
+ assert(lclType == TYP_STRUCT);
assert(varDsc->lvIsMultiRegRet);
- ReturnTypeDesc retTypeDesc;
- unsigned regCount;
+ ReturnTypeDesc retTypeDesc;
+ unsigned regCount;
retTypeDesc.InitializeStructReturnType(compiler, varDsc->lvVerTypeInfo.GetClassHandle());
regCount = retTypeDesc.GetReturnRegCount();
@@ -2425,12 +2387,12 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
assert(regCount >= 2);
assert(op1->isContained());
- // Copy var on stack into ABI return registers
+ // Copy var on stack into ABI return registers
int offset = 0;
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
- regNumber reg = retTypeDesc.GetABIReturnReg(i);
+ regNumber reg = retTypeDesc.GetABIReturnReg(i);
getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
offset += genTypeSize(type);
}
@@ -2441,27 +2403,27 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
genConsumeRegs(op1);
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
- ReturnTypeDesc* pRetTypeDesc;
- unsigned regCount;
- unsigned matchingCount = 0;
+ ReturnTypeDesc* pRetTypeDesc;
+ unsigned regCount;
+ unsigned matchingCount = 0;
pRetTypeDesc = call->GetReturnTypeDesc();
regCount = pRetTypeDesc->GetReturnRegCount();
- var_types regType [MAX_RET_REG_COUNT];
- regNumber returnReg [MAX_RET_REG_COUNT];
- regNumber allocatedReg[MAX_RET_REG_COUNT];
- regMaskTP srcRegsMask = 0;
- regMaskTP dstRegsMask = 0;
- bool needToShuffleRegs = false; // Set to true if we have to move any registers
+ var_types regType[MAX_RET_REG_COUNT];
+ regNumber returnReg[MAX_RET_REG_COUNT];
+ regNumber allocatedReg[MAX_RET_REG_COUNT];
+ regMaskTP srcRegsMask = 0;
+ regMaskTP dstRegsMask = 0;
+ bool needToShuffleRegs = false; // Set to true if we have to move any registers
for (unsigned i = 0; i < regCount; ++i)
{
- regType[i] = pRetTypeDesc->GetReturnRegType(i);
- returnReg[i] = pRetTypeDesc->GetABIReturnReg(i);
+ regType[i] = pRetTypeDesc->GetReturnRegType(i);
+ returnReg[i] = pRetTypeDesc->GetABIReturnReg(i);
regNumber reloadReg = REG_NA;
if (op1->IsCopyOrReload())
@@ -2491,7 +2453,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
//
srcRegsMask |= genRegMask(allocatedReg[i]);
dstRegsMask |= genRegMask(returnReg[i]);
-
+
needToShuffleRegs = true;
}
}
@@ -2501,7 +2463,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
assert(matchingCount < regCount);
unsigned remainingRegCount = regCount - matchingCount;
- regMaskTP extraRegMask = treeNode->gtRsvdRegs;
+ regMaskTP extraRegMask = treeNode->gtRsvdRegs;
while (remainingRegCount > 0)
{
@@ -2512,7 +2474,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
regMaskTP dstMask;
regNumber srcReg;
regNumber dstReg;
- var_types curType = TYP_UNKNOWN;
+ var_types curType = TYP_UNKNOWN;
regNumber freeUpReg = REG_NA;
if (availableMask == 0)
@@ -2520,14 +2482,14 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// Circular register dependencies
// So just free up the lowest register in dstRegsMask by moving it to the 'extra' register
- assert(dstRegsMask == srcRegsMask); // this has to be true for us to reach here
- assert(extraRegMask != 0); // we require an 'extra' register
- assert((extraRegMask & ~dstRegsMask) != 0); // it can't be part of dstRegsMask
+ assert(dstRegsMask == srcRegsMask); // this has to be true for us to reach here
+ assert(extraRegMask != 0); // we require an 'extra' register
+ assert((extraRegMask & ~dstRegsMask) != 0); // it can't be part of dstRegsMask
availableMask = extraRegMask & ~dstRegsMask;
regMaskTP srcMask = genFindLowestBit(srcRegsMask);
- freeUpReg = genRegNumFromMask(srcMask);
+ freeUpReg = genRegNumFromMask(srcMask);
}
dstMask = genFindLowestBit(availableMask);
@@ -2541,19 +2503,19 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
srcReg = freeUpReg;
// Find the 'srcReg' and set 'curType', change allocatedReg[] to dstReg
- // and add the new register mask bit to srcRegsMask
+ // and add the new register mask bit to srcRegsMask
//
for (unsigned i = 0; i < regCount; ++i)
{
if (allocatedReg[i] == srcReg)
{
- curType = regType[i];
+ curType = regType[i];
allocatedReg[i] = dstReg;
srcRegsMask |= genRegMask(dstReg);
}
}
}
- else // The normal case
+ else // The normal case
{
// Find the 'srcReg' and set 'curType'
//
@@ -2563,7 +2525,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
{
srcReg = allocatedReg[i];
curType = regType[i];
- }
+ }
}
// After we perform this move we will have one less registers to setup
remainingRegCount--;
@@ -2576,15 +2538,13 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
srcRegsMask &= ~genRegMask(srcReg);
dstRegsMask &= ~genRegMask(dstReg);
- } // while (remainingRegCount > 0)
+ } // while (remainingRegCount > 0)
- } // (needToShuffleRegs)
-
- } // op1 must be multi-reg GT_CALL
+ } // (needToShuffleRegs)
+ } // op1 must be multi-reg GT_CALL
}
-
//------------------------------------------------------------------------
// genReturn: Generates code for return statement.
// In case of struct return, delegates to the genStructReturn method.
@@ -2595,12 +2555,11 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// Return Value:
// None
//
-void
-CodeGen::genReturn(GenTreePtr treeNode)
+void CodeGen::genReturn(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
- GenTreePtr op1 = treeNode->gtGetOp1();
- var_types targetType = treeNode->TypeGet();
+ GenTreePtr op1 = treeNode->gtGetOp1();
+ var_types targetType = treeNode->TypeGet();
#ifdef DEBUG
if (targetType == TYP_VOID)
@@ -2628,16 +2587,16 @@ CodeGen::genReturn(GenTreePtr treeNode)
{
if (op1->OperGet() == GT_LCL_VAR)
{
- GenTreeLclVarCommon *lcl = op1->AsLclVarCommon();
- bool isRegCandidate = compiler->lvaTable[lcl->gtLclNum].lvIsRegCandidate();
+ GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
+ bool isRegCandidate = compiler->lvaTable[lcl->gtLclNum].lvIsRegCandidate();
if (isRegCandidate && ((op1->gtFlags & GTF_SPILLED) == 0))
{
assert(op1->InReg());
// We may need to generate a zero-extending mov instruction to load the value from this GT_LCL_VAR
- unsigned lclNum = lcl->gtLclNum;
- LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
+ unsigned lclNum = lcl->gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
var_types op1Type = genActualType(op1->TypeGet());
var_types lclType = genActualType(varDsc->TypeGet());
@@ -2677,17 +2636,16 @@ CodeGen::genReturn(GenTreePtr treeNode)
* Preconditions: All operands have been evaluated
*
*/
-void
-CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
+void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
{
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
- unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
+ unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
printf("Generating: ");
compiler->gtDispTree(treeNode, nullptr, nullptr, true);
}
@@ -2712,46 +2670,46 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (treeNode->gtOper)
{
- case GT_START_NONGC:
- getEmitter()->emitDisableGC();
- break;
+ case GT_START_NONGC:
+ getEmitter()->emitDisableGC();
+ break;
- case GT_PROF_HOOK:
- // We should be seeing this only if profiler hook is needed
- noway_assert(compiler->compIsProfilerHookNeeded());
+ case GT_PROF_HOOK:
+ // We should be seeing this only if profiler hook is needed
+ noway_assert(compiler->compIsProfilerHookNeeded());
#ifdef PROFILING_SUPPORTED
- // Right now this node is used only for tail calls. In future if
- // we intend to use it for Enter or Leave hooks, add a data member
- // to this node indicating the kind of profiler hook. For example,
- // helper number can be used.
- genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
+ // Right now this node is used only for tail calls. In future if
+ // we intend to use it for Enter or Leave hooks, add a data member
+ // to this node indicating the kind of profiler hook. For example,
+ // helper number can be used.
+ genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
- break;
+ break;
- case GT_LCLHEAP:
- genLclHeap(treeNode);
- break;
+ case GT_LCLHEAP:
+ genLclHeap(treeNode);
+ break;
- case GT_CNS_INT:
- case GT_CNS_DBL:
- genSetRegToConst(targetReg, targetType, treeNode);
- genProduceReg(treeNode);
- break;
+ case GT_CNS_INT:
+ case GT_CNS_DBL:
+ genSetRegToConst(targetReg, targetType, treeNode);
+ genProduceReg(treeNode);
+ break;
- case GT_NOT:
- assert(!varTypeIsFloating(targetType));
+ case GT_NOT:
+ assert(!varTypeIsFloating(targetType));
- __fallthrough;
+ __fallthrough;
- case GT_NEG:
+ case GT_NEG:
{
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!treeNode->isContained());
// The dst can only be a register.
- assert(targetReg != REG_NA);
+ assert(targetReg != REG_NA);
GenTreePtr operand = treeNode->gtGetOp1();
assert(!operand->isContained());
@@ -2760,173 +2718,174 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
getEmitter()->emitIns_R_R(ins, emitTypeSize(treeNode), targetReg, operandReg);
}
- genProduceReg(treeNode);
- break;
-
- case GT_DIV:
- case GT_UDIV:
- genConsumeOperands(treeNode->AsOp());
+ genProduceReg(treeNode);
+ break;
- if (varTypeIsFloating(targetType))
- {
- // Floating point divide never raises an exception
- genCodeForBinary(treeNode);
- }
- else // an integer divide operation
- {
- GenTreePtr divisorOp = treeNode->gtGetOp2();
- emitAttr size = EA_ATTR(genTypeSize(genActualType(treeNode->TypeGet())));
+ case GT_DIV:
+ case GT_UDIV:
+ genConsumeOperands(treeNode->AsOp());
- if (divisorOp->IsIntegralConst(0))
+ if (varTypeIsFloating(targetType))
{
- // We unconditionally throw a divide by zero exception
- genJumpToThrowHlpBlk(EJ_jmp, SCK_DIV_BY_ZERO);
-
- // We still need to call genProduceReg
- genProduceReg(treeNode);
+ // Floating point divide never raises an exception
+ genCodeForBinary(treeNode);
}
- else // the divisor is not the constant zero
+ else // an integer divide operation
{
- regNumber divisorReg = divisorOp->gtRegNum;
+ GenTreePtr divisorOp = treeNode->gtGetOp2();
+ emitAttr size = EA_ATTR(genTypeSize(genActualType(treeNode->TypeGet())));
- // Generate the require runtime checks for GT_DIV or GT_UDIV
- if (treeNode->gtOper == GT_DIV)
+ if (divisorOp->IsIntegralConst(0))
{
- BasicBlock* sdivLabel = genCreateTempLabel();
+ // We unconditionally throw a divide by zero exception
+ genJumpToThrowHlpBlk(EJ_jmp, SCK_DIV_BY_ZERO);
- // Two possible exceptions:
- // (AnyVal / 0) => DivideByZeroException
- // (MinInt / -1) => ArithmeticException
- //
- bool checkDividend = true;
+ // We still need to call genProduceReg
+ genProduceReg(treeNode);
+ }
+ else // the divisor is not the constant zero
+ {
+ regNumber divisorReg = divisorOp->gtRegNum;
- // Do we have an immediate for the 'divisorOp'?
- //
- if (divisorOp->IsCnsIntOrI())
+ // Generate the require runtime checks for GT_DIV or GT_UDIV
+ if (treeNode->gtOper == GT_DIV)
{
- GenTreeIntConCommon* intConstTree = divisorOp->AsIntConCommon();
- ssize_t intConstValue = intConstTree->IconValue();
- assert(intConstValue != 0); // already checked above by IsIntegralConst(0))
- if (intConstValue != -1)
- {
- checkDividend = false; // We statically know that the dividend is not -1
- }
- }
- else // insert check for divison by zero
- {
- // Check if the divisor is zero throw a DivideByZeroException
- emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
- }
-
- if (checkDividend)
- {
- // Check if the divisor is not -1 branch to 'sdivLabel'
- emit->emitIns_R_I(INS_cmp, size, divisorReg, -1);
-
- emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
- inst_JMP(jmpNotEqual, sdivLabel);
- // If control flow continues past here the 'divisorReg' is known to be -1
+ BasicBlock* sdivLabel = genCreateTempLabel();
- regNumber dividendReg = treeNode->gtGetOp1()->gtRegNum;
- // At this point the divisor is known to be -1
+ // Two possible exceptions:
+ // (AnyVal / 0) => DivideByZeroException
+ // (MinInt / -1) => ArithmeticException
//
- // Issue the 'adds zr, dividendReg, dividendReg' instruction
- // this will set both the Z and V flags only when dividendReg is MinInt
+ bool checkDividend = true;
+
+ // Do we have an immediate for the 'divisorOp'?
//
- emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg);
- inst_JMP(jmpNotEqual, sdivLabel); // goto sdiv if the Z flag is clear
- genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw
- // ArithmeticException
+ if (divisorOp->IsCnsIntOrI())
+ {
+ GenTreeIntConCommon* intConstTree = divisorOp->AsIntConCommon();
+ ssize_t intConstValue = intConstTree->IconValue();
+ assert(intConstValue != 0); // already checked above by IsIntegralConst(0))
+ if (intConstValue != -1)
+ {
+ checkDividend = false; // We statically know that the dividend is not -1
+ }
+ }
+ else // insert check for divison by zero
+ {
+ // Check if the divisor is zero throw a DivideByZeroException
+ emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
+ }
- genDefineTempLabel(sdivLabel);
+ if (checkDividend)
+ {
+ // Check if the divisor is not -1 branch to 'sdivLabel'
+ emit->emitIns_R_I(INS_cmp, size, divisorReg, -1);
+
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ inst_JMP(jmpNotEqual, sdivLabel);
+ // If control flow continues past here the 'divisorReg' is known to be -1
+
+ regNumber dividendReg = treeNode->gtGetOp1()->gtRegNum;
+ // At this point the divisor is known to be -1
+ //
+ // Issue the 'adds zr, dividendReg, dividendReg' instruction
+ // this will set both the Z and V flags only when dividendReg is MinInt
+ //
+ emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg);
+ inst_JMP(jmpNotEqual, sdivLabel); // goto sdiv if the Z flag is clear
+ genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw
+ // ArithmeticException
+
+ genDefineTempLabel(sdivLabel);
+ }
+ genCodeForBinary(treeNode); // Generate the sdiv instruction
}
- genCodeForBinary(treeNode); // Generate the sdiv instruction
- }
- else // (treeNode->gtOper == GT_UDIV)
- {
- // Only one possible exception
- // (AnyVal / 0) => DivideByZeroException
- //
- // Note that division by the constant 0 was already checked for above by the op2->IsIntegralConst(0) check
- //
- if (!divisorOp->IsCnsIntOrI())
+ else // (treeNode->gtOper == GT_UDIV)
{
- // divisorOp is not a constant, so it could be zero
+ // Only one possible exception
+ // (AnyVal / 0) => DivideByZeroException
+ //
+ // Note that division by the constant 0 was already checked for above by the
+ // op2->IsIntegralConst(0) check
//
- emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
+ if (!divisorOp->IsCnsIntOrI())
+ {
+ // divisorOp is not a constant, so it could be zero
+ //
+ emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpEqual, SCK_DIV_BY_ZERO);
+ }
+ genCodeForBinary(treeNode);
}
- genCodeForBinary(treeNode);
}
}
- }
- break;
+ break;
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- assert(varTypeIsIntegralOrI(treeNode));
- __fallthrough;
- case GT_ADD:
- case GT_SUB:
- case GT_MUL:
- genConsumeOperands(treeNode->AsOp());
- genCodeForBinary(treeNode);
- break;
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ assert(varTypeIsIntegralOrI(treeNode));
+ __fallthrough;
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ genConsumeOperands(treeNode->AsOp());
+ genCodeForBinary(treeNode);
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROR:
- genCodeForShift(treeNode);
- // genCodeForShift() calls genProduceReg()
- break;
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROR:
+ genCodeForShift(treeNode);
+ // genCodeForShift() calls genProduceReg()
+ break;
- case GT_CAST:
- if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double <--> double/float
- genFloatToFloatCast(treeNode);
- }
- else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double --> int32/int64
- genFloatToIntCast(treeNode);
- }
- else if (varTypeIsFloating(targetType))
- {
- // Casts int32/uint32/int64/uint64 --> float/double
- genIntToFloatCast(treeNode);
- }
- else
- {
- // Casts int <--> int
- genIntToIntCast(treeNode);
- }
- // The per-case functions call genProduceReg()
- break;
+ case GT_CAST:
+ if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double <--> double/float
+ genFloatToFloatCast(treeNode);
+ }
+ else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double --> int32/int64
+ genFloatToIntCast(treeNode);
+ }
+ else if (varTypeIsFloating(targetType))
+ {
+ // Casts int32/uint32/int64/uint64 --> float/double
+ genIntToFloatCast(treeNode);
+ }
+ else
+ {
+ // Casts int <--> int
+ genIntToIntCast(treeNode);
+ }
+ // The per-case functions call genProduceReg()
+ break;
- case GT_LCL_FLD_ADDR:
- case GT_LCL_VAR_ADDR:
- // Address of a local var. This by itself should never be allocated a register.
- // If it is worth storing the address in a register then it should be cse'ed into
- // a temp and that would be allocated a register.
- noway_assert(targetType == TYP_BYREF);
- noway_assert(!treeNode->InReg());
+ case GT_LCL_FLD_ADDR:
+ case GT_LCL_VAR_ADDR:
+ // Address of a local var. This by itself should never be allocated a register.
+ // If it is worth storing the address in a register then it should be cse'ed into
+ // a temp and that would be allocated a register.
+ noway_assert(targetType == TYP_BYREF);
+ noway_assert(!treeNode->InReg());
- inst_RV_TT(INS_lea, targetReg, treeNode, 0, EA_BYREF);
- genProduceReg(treeNode);
- break;
+ inst_RV_TT(INS_lea, targetReg, treeNode, 0, EA_BYREF);
+ genProduceReg(treeNode);
+ break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
assert(varNode->gtLclNum < compiler->lvaCount);
- unsigned varNum = varNode->gtLclNum;
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = varNode->gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
if (targetType == TYP_STRUCT)
{
@@ -2934,7 +2893,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
emitAttr size = emitTypeSize(targetType);
- noway_assert(targetType != TYP_STRUCT);
+ noway_assert(targetType != TYP_STRUCT);
noway_assert(targetReg != REG_NA);
unsigned offset = treeNode->gtLclFld.gtLclOffs;
@@ -2959,13 +2918,14 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
{
GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum; assert(varNum < compiler->lvaCount);
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
- bool isRegCandidate = varDsc->lvIsRegCandidate();
+ unsigned varNum = varNode->gtLclNum;
+ assert(varNum < compiler->lvaCount);
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ bool isRegCandidate = varDsc->lvIsRegCandidate();
// lcl_vars are not defs
assert((treeNode->gtFlags & GTF_VAR_DEF) == 0);
@@ -2996,7 +2956,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_FLD:
{
noway_assert(targetType != TYP_STRUCT);
@@ -3008,7 +2968,8 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
noway_assert(targetReg == REG_NA);
GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum; assert(varNum < compiler->lvaCount);
+ unsigned varNum = varNode->gtLclNum;
+ assert(varNum < compiler->lvaCount);
LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
// Ensure that lclVar nodes are typed correctly.
@@ -3033,7 +2994,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
instruction ins = ins_Store(targetType);
- emitAttr attr = emitTypeSize(targetType);
+ emitAttr attr = emitTypeSize(targetType);
attr = emit->emitInsAdjustLoadStoreAttr(ins, attr);
@@ -3041,15 +3002,16 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genUpdateLife(varNode);
- varDsc->lvRegNum = REG_STK;
+ varDsc->lvRegNum = REG_STK;
}
break;
- case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_VAR:
{
GenTreeLclVarCommon* varNode = treeNode->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum; assert(varNum < compiler->lvaCount);
+ unsigned varNum = varNode->gtLclNum;
+ assert(varNum < compiler->lvaCount);
LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
unsigned offset = 0;
@@ -3082,11 +3044,11 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
assert(dataReg != REG_NA);
- if (targetReg == REG_NA) // store into stack based LclVar
+ if (targetReg == REG_NA) // store into stack based LclVar
{
inst_set_SV_var(varNode);
- instruction ins = ins_Store(targetType);
+ instruction ins = ins_Store(targetType);
emitAttr attr = emitTypeSize(targetType);
attr = emit->emitInsAdjustLoadStoreAttr(ins, attr);
@@ -3097,7 +3059,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
varDsc->lvRegNum = REG_STK;
}
- else // store into register (i.e move into register)
+ else // store into register (i.e move into register)
{
if (dataReg != targetReg)
{
@@ -3110,81 +3072,81 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_RETFILT:
- // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
- // the return register, if it's not already there. The processing is the same as GT_RETURN.
- if (targetType != TYP_VOID)
- {
- // For filters, the IL spec says the result is type int32. Further, the only specified legal values
- // are 0 or 1, with the use of other values "undefined".
- assert(targetType == TYP_INT);
- }
+ case GT_RETFILT:
+ // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
+ // the return register, if it's not already there. The processing is the same as GT_RETURN.
+ if (targetType != TYP_VOID)
+ {
+ // For filters, the IL spec says the result is type int32. Further, the only specified legal values
+ // are 0 or 1, with the use of other values "undefined".
+ assert(targetType == TYP_INT);
+ }
- __fallthrough;
+ __fallthrough;
- case GT_RETURN:
- genReturn(treeNode);
- break;
+ case GT_RETURN:
+ genReturn(treeNode);
+ break;
- case GT_LEA:
+ case GT_LEA:
{
// if we are here, it is the case where there is an LEA that cannot
// be folded into a parent instruction
- GenTreeAddrMode *lea = treeNode->AsAddrMode();
+ GenTreeAddrMode* lea = treeNode->AsAddrMode();
genLeaInstruction(lea);
}
// genLeaInstruction calls genProduceReg()
break;
- case GT_IND:
- genConsumeAddress(treeNode->AsIndir()->Addr());
- emit->emitInsLoadStoreOp(ins_Load(targetType), emitTypeSize(treeNode), targetReg, treeNode->AsIndir());
- genProduceReg(treeNode);
- break;
+ case GT_IND:
+ genConsumeAddress(treeNode->AsIndir()->Addr());
+ emit->emitInsLoadStoreOp(ins_Load(targetType), emitTypeSize(treeNode), targetReg, treeNode->AsIndir());
+ genProduceReg(treeNode);
+ break;
- case GT_MULHI:
- genCodeForMulHi(treeNode->AsOp());
- genProduceReg(treeNode);
- break;
+ case GT_MULHI:
+ genCodeForMulHi(treeNode->AsOp());
+ genProduceReg(treeNode);
+ break;
- case GT_MOD:
- case GT_UMOD:
- // Integer MOD should have been morphed into a sequence of sub, mul, div in fgMorph.
- //
- // We shouldn't be seeing GT_MOD on float/double as it is morphed into a helper call by front-end.
- noway_assert(!"Codegen for GT_MOD/GT_UMOD");
- break;
+ case GT_MOD:
+ case GT_UMOD:
+ // Integer MOD should have been morphed into a sequence of sub, mul, div in fgMorph.
+ //
+ // We shouldn't be seeing GT_MOD on float/double as it is morphed into a helper call by front-end.
+ noway_assert(!"Codegen for GT_MOD/GT_UMOD");
+ break;
- case GT_INTRINSIC:
- genIntrinsic(treeNode);
- break;
+ case GT_INTRINSIC:
+ genIntrinsic(treeNode);
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- genSIMDIntrinsic(treeNode->AsSIMD());
- break;
+ case GT_SIMD:
+ genSIMDIntrinsic(treeNode->AsSIMD());
+ break;
#endif // FEATURE_SIMD
- case GT_CKFINITE:
- genCkfinite(treeNode);
- break;
+ case GT_CKFINITE:
+ genCkfinite(treeNode);
+ break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
{
// TODO-ARM64-CQ: Check if we can use the currently set flags.
// TODO-ARM64-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
- GenTreeOp* tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
+ GenTreeOp* tree = treeNode->AsOp();
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
assert(!op1->isContainedMemoryOp());
assert(!op2->isContainedMemoryOp());
@@ -3192,13 +3154,13 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genConsumeOperands(tree);
emitAttr cmpSize = EA_UNKNOWN;
-
+
if (varTypeIsFloating(op1Type))
{
assert(varTypeIsFloating(op2Type));
- assert(!op1->isContained());
+ assert(!op1->isContained());
assert(op1Type == op2Type);
- cmpSize = EA_ATTR(genTypeSize(op1Type));
+ cmpSize = EA_ATTR(genTypeSize(op1Type));
if (op2->IsIntegralConst(0))
{
@@ -3206,7 +3168,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
else
{
- assert(!op2->isContained());
+ assert(!op2->isContained());
emit->emitIns_R_R(INS_fcmp, cmpSize, op1->gtRegNum, op2->gtRegNum);
}
}
@@ -3216,8 +3178,8 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// We don't support swapping op1 and op2 to generate cmp reg, imm
assert(!op1->isContainedIntOrIImmed());
- // TODO-ARM64-CQ: the second register argument of a CMP can be sign/zero
- // extended as part of the instruction (using "CMP (extended register)").
+ // TODO-ARM64-CQ: the second register argument of a CMP can be sign/zero
+ // extended as part of the instruction (using "CMP (extended register)").
// We should use that if possible, swapping operands
// (and reversing the condition) if necessary.
unsigned op1Size = genTypeSize(op1Type);
@@ -3265,16 +3227,16 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_JTRUE:
+ case GT_JTRUE:
{
- GenTree *cmp = treeNode->gtOp.gtOp1->gtEffectiveVal();
+ GenTree* cmp = treeNode->gtOp.gtOp1->gtEffectiveVal();
assert(cmp->OperIsCompare());
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
// Get the "kind" and type of the comparison. Note that whether it is an unsigned cmp
// is governed by a flag NOT by the inherent type of the node
emitJumpKind jumpKind[2];
- bool branchToTrueLabel[2];
+ bool branchToTrueLabel[2];
genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
assert(jumpKind[0] != EJ_NONE);
@@ -3291,12 +3253,12 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_RETURNTRAP:
+ case GT_RETURNTRAP:
{
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
- GenTree *data = treeNode->gtOp.gtOp1;
+ GenTree* data = treeNode->gtOp.gtOp1;
genConsumeRegs(data);
emit->emitIns_R_I(INS_cmp, EA_4BYTE, data->gtRegNum, 0);
@@ -3311,10 +3273,10 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
- GenTree* data = treeNode->gtOp.gtOp2;
- GenTree* addr = treeNode->gtOp.gtOp1;
+ GenTree* data = treeNode->gtOp.gtOp2;
+ GenTree* addr = treeNode->gtOp.gtOp1;
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(treeNode, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
@@ -3363,10 +3325,10 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
else // A normal store, not a WriteBarrier store
{
- bool reverseOps = ((treeNode->gtFlags & GTF_REVERSE_OPS) != 0);
- bool dataIsUnary = false;
- GenTree* nonRMWsrc = nullptr;
- // We must consume the operands in the proper execution order,
+ bool reverseOps = ((treeNode->gtFlags & GTF_REVERSE_OPS) != 0);
+ bool dataIsUnary = false;
+ GenTree* nonRMWsrc = nullptr;
+ // We must consume the operands in the proper execution order,
// so that liveness is updated appropriately.
if (!reverseOps)
{
@@ -3383,7 +3345,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genConsumeAddress(addr);
}
- regNumber dataReg = REG_NA;
+ regNumber dataReg = REG_NA;
if (data->isContainedIntOrIImmed())
{
assert(data->IsIntegralConst(0));
@@ -3395,28 +3357,28 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
dataReg = data->gtRegNum;
}
- emit->emitInsLoadStoreOp(ins_Store(targetType), emitTypeSize(treeNode), dataReg, treeNode->AsIndir());
+ emit->emitInsLoadStoreOp(ins_Store(targetType), emitTypeSize(treeNode), dataReg, treeNode->AsIndir());
}
}
break;
- case GT_COPY:
- // This is handled at the time we call genConsumeReg() on the GT_COPY
- break;
+ case GT_COPY:
+ // This is handled at the time we call genConsumeReg() on the GT_COPY
+ break;
- case GT_SWAP:
+ case GT_SWAP:
{
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(treeNode->gtOp.gtOp1) && genIsRegCandidateLocal(treeNode->gtOp.gtOp2));
- GenTreeLclVarCommon* lcl1 = treeNode->gtOp.gtOp1->AsLclVarCommon();
- LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
- var_types type1 = varDsc1->TypeGet();
- GenTreeLclVarCommon* lcl2 = treeNode->gtOp.gtOp2->AsLclVarCommon();
- LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
- var_types type2 = varDsc2->TypeGet();
+ GenTreeLclVarCommon* lcl1 = treeNode->gtOp.gtOp1->AsLclVarCommon();
+ LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
+ var_types type1 = varDsc1->TypeGet();
+ GenTreeLclVarCommon* lcl2 = treeNode->gtOp.gtOp2->AsLclVarCommon();
+ LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
+ var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
@@ -3424,9 +3386,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeIsFloating(type1));
- regNumber oldOp1Reg = lcl1->gtRegNum;
+ regNumber oldOp1Reg = lcl1->gtRegNum;
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
- regNumber oldOp2Reg = lcl2->gtRegNum;
+ regNumber oldOp2Reg = lcl2->gtRegNum;
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
@@ -3447,8 +3409,8 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
- gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask|oldOp2RegMask);
- gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask|oldOp2RegMask);
+ gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
+ gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
@@ -3457,94 +3419,94 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_LIST:
- case GT_ARGPLACE:
- // Nothing to do
- break;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ // Nothing to do
+ break;
- case GT_PUTARG_STK:
- genPutArgStk(treeNode);
- break;
+ case GT_PUTARG_STK:
+ genPutArgStk(treeNode);
+ break;
- case GT_PUTARG_REG:
- assert(targetType != TYP_STRUCT); // Any TYP_STRUCT register args should have been removed by
- // fgMorphMultiregStructArg
- // We have a normal non-Struct targetType
- {
- GenTree *op1 = treeNode->gtOp.gtOp1;
- // If child node is not already in the register we need, move it
- genConsumeReg(op1);
- if (targetReg != op1->gtRegNum)
+ case GT_PUTARG_REG:
+ assert(targetType != TYP_STRUCT); // Any TYP_STRUCT register args should have been removed by
+ // fgMorphMultiregStructArg
+ // We have a normal non-Struct targetType
{
- inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
+ GenTree* op1 = treeNode->gtOp.gtOp1;
+ // If child node is not already in the register we need, move it
+ genConsumeReg(op1);
+ if (targetReg != op1->gtRegNum)
+ {
+ inst_RV_RV(ins_Copy(targetType), targetReg, op1->gtRegNum, targetType);
+ }
}
- }
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_CALL:
- genCallInstruction(treeNode);
- break;
+ case GT_CALL:
+ genCallInstruction(treeNode);
+ break;
- case GT_JMP:
- genJmpMethod(treeNode);
- break;
+ case GT_JMP:
+ genJmpMethod(treeNode);
+ break;
- case GT_LOCKADD:
- case GT_XCHG:
- case GT_XADD:
- genLockedInstructions(treeNode);
- break;
+ case GT_LOCKADD:
+ case GT_XCHG:
+ case GT_XADD:
+ genLockedInstructions(treeNode);
+ break;
- case GT_MEMORYBARRIER:
- instGen_MemoryBarrier();
- break;
+ case GT_MEMORYBARRIER:
+ instGen_MemoryBarrier();
+ break;
- case GT_CMPXCHG:
- NYI("GT_CMPXCHG");
- break;
+ case GT_CMPXCHG:
+ NYI("GT_CMPXCHG");
+ break;
- case GT_RELOAD:
- // do nothing - reload is just a marker.
- // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
- // into the register specified in this node.
- break;
+ case GT_RELOAD:
+ // do nothing - reload is just a marker.
+ // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
+ // into the register specified in this node.
+ break;
- case GT_NOP:
- break;
+ case GT_NOP:
+ break;
- case GT_NO_OP:
- if (treeNode->gtFlags & GTF_NO_OP_NO)
- {
- noway_assert(!"GTF_NO_OP_NO should not be set");
- }
- else
- {
- instGen(INS_nop);
- }
- break;
+ case GT_NO_OP:
+ if (treeNode->gtFlags & GTF_NO_OP_NO)
+ {
+ noway_assert(!"GTF_NO_OP_NO should not be set");
+ }
+ else
+ {
+ instGen(INS_nop);
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- genRangeCheck(treeNode);
- break;
+ genRangeCheck(treeNode);
+ break;
- case GT_PHYSREG:
- if (targetReg != treeNode->AsPhysReg()->gtSrcReg)
- {
- inst_RV_RV(ins_Copy(targetType), targetReg, treeNode->AsPhysReg()->gtSrcReg, targetType);
+ case GT_PHYSREG:
+ if (targetReg != treeNode->AsPhysReg()->gtSrcReg)
+ {
+ inst_RV_RV(ins_Copy(targetType), targetReg, treeNode->AsPhysReg()->gtSrcReg, targetType);
- genTransferRegGCState(targetReg, treeNode->AsPhysReg()->gtSrcReg);
- }
- genProduceReg(treeNode);
- break;
+ genTransferRegGCState(targetReg, treeNode->AsPhysReg()->gtSrcReg);
+ }
+ genProduceReg(treeNode);
+ break;
- case GT_PHYSREGDST:
- break;
+ case GT_PHYSREGDST:
+ break;
- case GT_NULLCHECK:
+ case GT_NULLCHECK:
{
assert(!treeNode->gtOp.gtOp1->isContained());
regNumber reg = genConsumeReg(treeNode->gtOp.gtOp1);
@@ -3552,38 +3514,38 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_CATCH_ARG:
+ case GT_CATCH_ARG:
- noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
+ noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
- /* Catch arguments get passed in a register. genCodeForBBlist()
- would have marked it as holding a GC object, but not used. */
+ /* Catch arguments get passed in a register. genCodeForBBlist()
+ would have marked it as holding a GC object, but not used. */
- noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
- genConsumeReg(treeNode);
- break;
+ noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
+ genConsumeReg(treeNode);
+ break;
- case GT_PINVOKE_PROLOG:
- noway_assert(((gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
+ case GT_PINVOKE_PROLOG:
+ noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
- // the runtime side requires the codegen here to be consistent
- emit->emitDisableRandomNops();
- break;
+ // the runtime side requires the codegen here to be consistent
+ emit->emitDisableRandomNops();
+ break;
- case GT_LABEL:
- genPendingCallLabel = genCreateTempLabel();
- treeNode->gtLabel.gtLabBB = genPendingCallLabel;
+ case GT_LABEL:
+ genPendingCallLabel = genCreateTempLabel();
+ treeNode->gtLabel.gtLabBB = genPendingCallLabel;
- // For long address (default): `adrp + add` will be emitted.
- // For short address (proven later): `adr` will be emitted.
- emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
- break;
+ // For long address (default): `adrp + add` will be emitted.
+ // For short address (proven later): `adr` will be emitted.
+ emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
+ break;
- case GT_COPYOBJ:
- genCodeForCpObj(treeNode->AsCpObj());
- break;
+ case GT_COPYOBJ:
+ genCodeForCpObj(treeNode->AsCpObj());
+ break;
- case GT_COPYBLK:
+ case GT_COPYBLK:
{
GenTreeCpBlk* cpBlkOp = treeNode->AsCpBlk();
if (cpBlkOp->gtBlkOpGcUnsafe)
@@ -3593,14 +3555,14 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (cpBlkOp->gtBlkOpKind)
{
- case GenTreeBlkOp::BlkOpKindHelper:
- genCodeForCpBlk(cpBlkOp);
- break;
- case GenTreeBlkOp::BlkOpKindUnroll:
- genCodeForCpBlkUnroll(cpBlkOp);
- break;
- default:
- unreached();
+ case GenTreeBlkOp::BlkOpKindHelper:
+ genCodeForCpBlk(cpBlkOp);
+ break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ genCodeForCpBlkUnroll(cpBlkOp);
+ break;
+ default:
+ unreached();
}
if (cpBlkOp->gtBlkOpGcUnsafe)
{
@@ -3609,46 +3571,46 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_INITBLK:
+ case GT_INITBLK:
{
GenTreeInitBlk* initBlkOp = treeNode->AsInitBlk();
switch (initBlkOp->gtBlkOpKind)
{
- case GenTreeBlkOp::BlkOpKindHelper:
- genCodeForInitBlk(initBlkOp);
- break;
- case GenTreeBlkOp::BlkOpKindUnroll:
- genCodeForInitBlkUnroll(initBlkOp);
- break;
- default:
- unreached();
+ case GenTreeBlkOp::BlkOpKindHelper:
+ genCodeForInitBlk(initBlkOp);
+ break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ genCodeForInitBlkUnroll(initBlkOp);
+ break;
+ default:
+ unreached();
}
}
break;
- case GT_JMPTABLE:
- genJumpTable(treeNode);
- break;
+ case GT_JMPTABLE:
+ genJumpTable(treeNode);
+ break;
- case GT_SWITCH_TABLE:
- genTableBasedSwitch(treeNode);
- break;
-
- case GT_ARR_INDEX:
- genCodeForArrIndex(treeNode->AsArrIndex());
- break;
+ case GT_SWITCH_TABLE:
+ genTableBasedSwitch(treeNode);
+ break;
- case GT_ARR_OFFSET:
- genCodeForArrOffset(treeNode->AsArrOffs());
- break;
+ case GT_ARR_INDEX:
+ genCodeForArrIndex(treeNode->AsArrIndex());
+ break;
- case GT_CLS_VAR_ADDR:
- NYI("GT_CLS_VAR_ADDR");
- break;
+ case GT_ARR_OFFSET:
+ genCodeForArrOffset(treeNode->AsArrOffs());
+ break;
- default:
+ case GT_CLS_VAR_ADDR:
+ NYI("GT_CLS_VAR_ADDR");
+ break;
+
+ default:
{
-#ifdef DEBUG
+#ifdef DEBUG
char message[256];
sprintf(message, "Unimplemented node type %s\n", GenTree::NodeName(treeNode->OperGet()));
#endif
@@ -3671,8 +3633,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// The child of store is a multi-reg call node.
// genProduceReg() on treeNode is made by caller of this routine.
//
-void
-CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
+void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
@@ -3681,20 +3642,20 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// Assumption: current ARM64 implementation requires that a multi-reg struct
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
- // being struct promoted.
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
+ // being struct promoted.
+ unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
noway_assert(varDsc->lvIsMultiRegRet);
- GenTree* op1 = treeNode->gtGetOp1();
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
+ GenTree* op1 = treeNode->gtGetOp1();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
assert(call->HasMultiRegRetVal());
genConsumeRegs(op1);
ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = pRetTypeDesc->GetReturnRegCount();
+ unsigned regCount = pRetTypeDesc->GetReturnRegCount();
if (treeNode->gtRegNum != REG_NA)
{
@@ -3709,7 +3670,7 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = pRetTypeDesc->GetReturnRegType(i);
- regNumber reg = call->GetRegNumByIdx(i);
+ regNumber reg = call->GetRegNumByIdx(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
@@ -3730,47 +3691,46 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
}
}
-
-
/***********************************************************************************************
* Generate code for localloc
*/
-void
-CodeGen::genLclHeap(GenTreePtr tree)
+void CodeGen::genLclHeap(GenTreePtr tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
-
+
GenTreePtr size = tree->gtOp.gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
- regNumber targetReg = tree->gtRegNum;
- regMaskTP tmpRegsMask = tree->gtRsvdRegs;
- regNumber regCnt = REG_NA;
- regNumber pspSymReg = REG_NA;
- var_types type = genActualType(size->gtType);
- emitAttr easz = emitTypeSize(type);
- BasicBlock* endLabel = nullptr;
- BasicBlock* loop = nullptr;
- unsigned stackAdjustment = 0;
-
+ regNumber targetReg = tree->gtRegNum;
+ regMaskTP tmpRegsMask = tree->gtRsvdRegs;
+ regNumber regCnt = REG_NA;
+ regNumber pspSymReg = REG_NA;
+ var_types type = genActualType(size->gtType);
+ emitAttr easz = emitTypeSize(type);
+ BasicBlock* endLabel = nullptr;
+ BasicBlock* loop = nullptr;
+ unsigned stackAdjustment = 0;
+
#ifdef DEBUG
// Verify ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
- BasicBlock * esp_check = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* esp_check = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
#endif
- noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
- noway_assert(genStackLevel == 0); // Can't have anything on the stack
-
+ noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
+ noway_assert(genStackLevel == 0); // Can't have anything on the stack
+
// Whether method has PSPSym.
bool hasPspSym;
#if FEATURE_EH_FUNCLETS
@@ -3784,7 +3744,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
- assert(size->isContained());
+ assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->gtIntCon.gtIconVal;
@@ -3795,7 +3755,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
}
// 'amount' is the total numbe of bytes to localloc to properly STACK_ALIGN
- amount = AlignUp(amount, STACK_ALIGN);
+ amount = AlignUp(amount, STACK_ALIGN);
}
else
{
@@ -3810,7 +3770,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// If the method has no PSPSym and compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (!hasPspSym && compiler->info.compInitMem)
- {
+ {
assert(genCountBits(tmpRegsMask) == 0);
regCnt = targetReg;
}
@@ -3826,12 +3786,12 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Align to STACK_ALIGN
// regCnt will be the total number of bytes to localloc
- inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
+ inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
inst_RV_IV(INS_AND, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
stackAdjustment = 0;
-#if FEATURE_EH_FUNCLETS
+#if FEATURE_EH_FUNCLETS
// If we have PSPsym, then need to re-locate it after localloc.
if (hasPspSym)
{
@@ -3846,8 +3806,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
}
#endif
-
-#if FEATURE_FIXED_OUT_ARGS
+#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
@@ -3863,7 +3822,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Nothing to pop-off from the stack but needs to relocate PSPSym with SP padded.
// iv) Method has neither PSPSym nor out-going arg area.
// Nothing needs to popped off from stack nor relocated.
- if (compiler->lvaOutgoingArgSpaceSize > 0)
+ if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
@@ -3873,7 +3832,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
#endif
if (size->IsCnsIntOrI())
- {
+ {
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
@@ -3888,12 +3847,12 @@ CodeGen::genLclHeap(GenTreePtr tree)
getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
cntStackAlignedWidthItems -= 1;
}
-
+
goto ALLOC_DONE;
}
- else if (!compiler->info.compInitMem && (amount < compiler->eeGetPageSize())) // must be < not <=
- {
- // Since the size is a page or less, simply adjust the SP value
+ else if (!compiler->info.compInitMem && (amount < compiler->eeGetPageSize())) // must be < not <=
+ {
+ // Since the size is a page or less, simply adjust the SP value
// The SP might already be in the guard page, must touch it BEFORE
// the alloc, not after.
// ldr wz, [SP, #0]
@@ -3909,7 +3868,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (!hasPspSym && compiler->info.compInitMem)
- {
+ {
assert(genCountBits(tmpRegsMask) == 0);
regCnt = targetReg;
}
@@ -3920,7 +3879,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
tmpRegsMask &= ~regCntMask;
regCnt = genRegNumFromMask(regCntMask);
}
- genSetRegToIcon(regCnt, amount, ((int)amount == amount)? TYP_INT : TYP_LONG);
+ genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
}
if (compiler->info.compInitMem)
@@ -3930,7 +3889,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// At this point 'regCnt' is set to the total number of bytes to locAlloc.
// Since we have to zero out the allocated memory AND ensure that RSP is always valid
// by tickling the pages, we will just push 0's on the stack.
- //
+ //
// Note: regCnt is guaranteed to be even on Amd64 since STACK_ALIGN/TARGET_POINTER_SIZE = 2
// and localloc size is a multiple of STACK_ALIGN.
@@ -3961,7 +3920,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
//
// Another subtlety is that you don't want SP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
- // call setup would not touch the guard page but just beyond it
+ // call setup would not touch the guard page but just beyond it
//
// Note that we go through a few hoops so that SP never points to
// illegal pages at any time during the ticking process
@@ -3982,18 +3941,18 @@ CodeGen::genLclHeap(GenTreePtr tree)
// mov SP, regCnt
//
- // Setup the regTmp
+ // Setup the regTmp
assert(tmpRegsMask != RBM_NONE);
assert(genCountBits(tmpRegsMask) == 1);
regNumber regTmp = genRegNumFromMask(tmpRegsMask);
- BasicBlock* loop = genCreateTempLabel();
- BasicBlock* done = genCreateTempLabel();
+ BasicBlock* loop = genCreateTempLabel();
+ BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
getEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
- inst_JMP(EJ_vc, loop); // branch if the V flag is not set
+ inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
@@ -4009,7 +3968,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
getEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
emitJumpKind jmpLTU = genJumpKindForOper(GT_LT, CK_UNSIGNED);
inst_JMP(jmpLTU, done);
-
+
// Update SP to be at the next page of stack that we will tickle
getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
@@ -4021,17 +3980,17 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Now just move the final value to SP
getEmitter()->emitIns_R_R(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt);
- }
+ }
ALLOC_DONE:
// Re-adjust SP to allocate PSPSym and out-going arg area
- if (stackAdjustment != 0)
+ if (stackAdjustment != 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert(stackAdjustment > 0);
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, (int) stackAdjustment);
+ getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, (int)stackAdjustment);
-#if FEATURE_EH_FUNCLETS
+#if FEATURE_EH_FUNCLETS
// Write PSPSym to its new location.
if (hasPspSym)
{
@@ -4042,7 +4001,7 @@ ALLOC_DONE:
// Return the stackalloc'ed address in result register.
// TargetReg = RSP + stackAdjustment.
//
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, targetReg, REG_SPBASE, (int) stackAdjustment);
+ getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, targetReg, REG_SPBASE, (int)stackAdjustment);
}
else // stackAdjustment == 0
{
@@ -4069,7 +4028,9 @@ BAILOUT:
// Update new ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, targetReg, compiler->lvaReturnEspCheck, 0);
}
#endif
@@ -4078,7 +4039,7 @@ BAILOUT:
}
// Generate code for InitBlk by performing a loop unroll
-// Preconditions:
+// Preconditions:
// a) Both the size and fill byte value are integer constants.
// b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes.
void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
@@ -4112,7 +4073,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
// which needs to be the new register.
regNumber valReg = initVal->gtRegNum;
initVal = initVal->gtSkipReloadOrCopy();
-#else // !0
+#else // !0
NYI("genCodeForInitBlkUnroll");
#endif // !0
}
@@ -4125,8 +4086,8 @@ void CodeGen::genCodeForInitBlk(GenTreeInitBlk* initBlkNode)
{
// Make sure we got the arguments of the initblk operation in the right registers
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Dest();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr dstAddr = initBlkNode->Dest();
+ GenTreePtr initVal = initBlkNode->InitVal();
#ifdef DEBUG
assert(!dstAddr->isContained());
@@ -4150,13 +4111,12 @@ void CodeGen::genCodeForInitBlk(GenTreeInitBlk* initBlkNode)
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
-
// Generate code for a load from some address + offset
// base: tree node which can be either a local address or arbitrary node
// offset: distance from the base from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset)
{
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
if (base->OperIsLocalAddr())
{
@@ -4188,12 +4148,11 @@ void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber sr
{
emit->emitIns_AR_R(ins, size, src, base->gtRegNum, offset);
}
-#else // !0
+#else // !0
NYI("genCodeForStoreOffset");
#endif // !0
}
-
// Generates CpBlk code by performing a loop unroll
// Preconditions:
// The size argument of the CpBlk node is a constant and <= 64 bytes.
@@ -4271,7 +4230,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
genCodeForStoreOffset(INS_mov, EA_1BYTE, tmpReg, dstAddr, offset);
}
}
-#else // !0
+#else // !0
NYI("genCodeForCpBlkUnroll");
#endif // !0
}
@@ -4282,7 +4241,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
// slots that don't contain GC pointers. The generated code will look like:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
-//
+//
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
// who happens to use the same registers as the previous call to maintain
// the same register requirements and register killsets:
@@ -4300,7 +4259,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
- GenTreePtr clsTok = cpObjNode->ClsTok();
+ GenTreePtr clsTok = cpObjNode->ClsTok();
GenTreePtr dstAddr = cpObjNode->Dest();
GenTreePtr srcAddr = cpObjNode->Source();
@@ -4333,7 +4292,7 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
#endif // DEBUG
unsigned slots = cpObjNode->gtSlots;
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
@@ -4341,14 +4300,16 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
// TODO-ARM64-CQ: Consider using LDP/STP to save codesize.
while (slots > 0)
{
- emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
- emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
+ emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
+ emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
slots--;
}
}
else
{
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
+ BYTE* gcPtrs = cpObjNode->gtGcPtrs;
unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
unsigned i = 0;
@@ -4356,17 +4317,19 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
{
switch (gcPtrs[i])
{
- case TYPE_GC_NONE:
- // TODO-ARM64-CQ: Consider using LDP/STP to save codesize in case of contigous NON-GC slots.
- emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
- emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
- break;
+ case TYPE_GC_NONE:
+ // TODO-ARM64-CQ: Consider using LDP/STP to save codesize in case of contigous NON-GC slots.
+ emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
+ emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
+ break;
- default:
- // We have a GC pointer, call the memory barrier.
- genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
- gcPtrCount--;
- break;
+ default:
+ // We have a GC pointer, call the memory barrier.
+ genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
+ gcPtrCount--;
+ break;
}
++i;
}
@@ -4386,9 +4349,9 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
void CodeGen::genCodeForCpBlk(GenTreeCpBlk* cpBlkNode)
{
// Make sure we got the arguments of the cpblk operation in the right registers
- GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Dest();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr blockSize = cpBlkNode->Size();
+ GenTreePtr dstAddr = cpBlkNode->Dest();
+ GenTreePtr srcAddr = cpBlkNode->Source();
assert(!dstAddr->isContained());
assert(!srcAddr->isContained());
@@ -4406,19 +4369,17 @@ void CodeGen::genCodeForCpBlk(GenTreeCpBlk* cpBlkNode)
#endif // 0
genConsumeRegAndCopy(blockSize, REG_ARG_2);
- genConsumeRegAndCopy(srcAddr, REG_ARG_1);
- genConsumeRegAndCopy(dstAddr, REG_ARG_0);
+ genConsumeRegAndCopy(srcAddr, REG_ARG_1);
+ genConsumeRegAndCopy(dstAddr, REG_ARG_0);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
}
-
// generate code do a switch statement based on a table of ip-relative offsets
-void
-CodeGen::genTableBasedSwitch(GenTree* treeNode)
+void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
- regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
+ regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
regNumber tmpReg = genRegNumFromMask(treeNode->gtRsvdRegs);
@@ -4436,8 +4397,7 @@ CodeGen::genTableBasedSwitch(GenTree* treeNode)
}
// emits the table and an instruction to get the address of the first element
-void
-CodeGen::genJumpTable(GenTree* treeNode)
+void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
@@ -4453,7 +4413,7 @@ CodeGen::genJumpTable(GenTree* treeNode)
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
- for (unsigned i = 0; i<jumpCount; i++)
+ for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_JMP_TARGET);
@@ -4468,20 +4428,14 @@ CodeGen::genJumpTable(GenTree* treeNode)
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
- getEmitter()->emitIns_R_C(INS_adr,
- emitTypeSize(TYP_I_IMPL),
- treeNode->gtRegNum,
- REG_NA,
- compiler->eeFindJitDataOffs(jmpTabBase),
- 0);
+ getEmitter()->emitIns_R_C(INS_adr, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum, REG_NA,
+ compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
-
// generate code for the locked operations:
// GT_LOCKADD, GT_XCHG, GT_XADD
-void
-CodeGen::genLockedInstructions(GenTree* treeNode)
+void CodeGen::genLockedInstructions(GenTree* treeNode)
{
#if 0
GenTree* data = treeNode->gtOp.gtOp2;
@@ -4536,30 +4490,28 @@ CodeGen::genLockedInstructions(GenTree* treeNode)
{
genProduceReg(treeNode);
}
-#else // !0
+#else // !0
NYI("genLockedInstructions");
#endif // !0
}
-
// generate code for BoundsCheck nodes
-void
-CodeGen::genRangeCheck(GenTreePtr oper)
+void CodeGen::genRangeCheck(GenTreePtr oper)
{
#ifdef FEATURE_SIMD
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
-#else // !FEATURE_SIMD
+#else // !FEATURE_SIMD
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
#endif // !FEATURE_SIMD
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
- GenTreePtr arrLen = bndsChk->gtArrLen;
- GenTreePtr arrIndex = bndsChk->gtIndex;
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
+ GenTreePtr arrLen = bndsChk->gtArrLen;
+ GenTreePtr arrIndex = bndsChk->gtIndex;
+ GenTreePtr arrRef = NULL;
+ int lenOffset = 0;
- GenTree *src1, *src2;
+ GenTree * src1, *src2;
emitJumpKind jmpKind;
genConsumeRegs(arrLen);
@@ -4567,16 +4519,16 @@ CodeGen::genRangeCheck(GenTreePtr oper)
if (arrIndex->isContainedIntOrIImmed())
{
- // To encode using a cmp immediate, we place the
+ // To encode using a cmp immediate, we place the
// constant operand in the second position
- src1 = arrLen;
- src2 = arrIndex;
+ src1 = arrLen;
+ src2 = arrIndex;
jmpKind = genJumpKindForOper(GT_LE, CK_UNSIGNED);
}
else
{
- src1 = arrIndex;
- src2 = arrLen;
+ src1 = arrIndex;
+ src2 = arrLen;
jmpKind = genJumpKindForOper(GT_GE, CK_UNSIGNED);
}
@@ -4612,8 +4564,7 @@ CodeGen::genRangeCheck(GenTreePtr oper)
// TODO-Cleanup: move to CodeGenCommon.cpp
// static
-unsigned
-CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
+unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
{
// Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
@@ -4633,8 +4584,7 @@ CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigne
// TODO-Cleanup: move to CodeGenCommon.cpp
// static
-unsigned
-CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
+unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
{
// Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
@@ -4651,48 +4601,48 @@ CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsi
// None.
//
-void
-CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
+void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
- emitter * emit = getEmitter();
- GenTreePtr arrObj = arrIndex->ArrObj();
- GenTreePtr indexNode = arrIndex->IndexExpr();
- regNumber arrReg = genConsumeReg(arrObj);
- regNumber indexReg = genConsumeReg(indexNode);
- regNumber tgtReg = arrIndex->gtRegNum; noway_assert(tgtReg != REG_NA);
+ emitter* emit = getEmitter();
+ GenTreePtr arrObj = arrIndex->ArrObj();
+ GenTreePtr indexNode = arrIndex->IndexExpr();
+ regNumber arrReg = genConsumeReg(arrObj);
+ regNumber indexReg = genConsumeReg(indexNode);
+ regNumber tgtReg = arrIndex->gtRegNum;
+ noway_assert(tgtReg != REG_NA);
// We will use a temp register to load the lower bound and dimension size values
- //
- regMaskTP tmpRegsMask = arrIndex->gtRsvdRegs; // there will be two bits set
- tmpRegsMask &= ~genRegMask(tgtReg); // remove the bit for 'tgtReg' from 'tmpRegsMask'
+ //
+ regMaskTP tmpRegsMask = arrIndex->gtRsvdRegs; // there will be two bits set
+ tmpRegsMask &= ~genRegMask(tgtReg); // remove the bit for 'tgtReg' from 'tmpRegsMask'
- regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
- regNumber tmpReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
+ regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
noway_assert(tmpReg != REG_NA);
assert(tgtReg != tmpReg);
- unsigned dim = arrIndex->gtCurrDim;
- unsigned rank = arrIndex->gtArrRank;
- var_types elemType = arrIndex->gtArrElemType;
- unsigned offset;
+ unsigned dim = arrIndex->gtCurrDim;
+ unsigned rank = arrIndex->gtArrRank;
+ var_types elemType = arrIndex->gtArrElemType;
+ unsigned offset;
offset = genOffsetOfMDArrayLowerBound(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
-
+
emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
-
+
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
-// genCodeForArrOffset: Generates code to compute the flattened array offset for
+// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
@@ -4707,8 +4657,7 @@ CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
-void
-CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
+void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTreePtr offsetNode = arrOffset->gtOffset;
GenTreePtr indexNode = arrOffset->gtIndex;
@@ -4718,20 +4667,24 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
if (!offsetNode->IsIntegralConst(0))
{
- emitter * emit = getEmitter();
- GenTreePtr arrObj = arrOffset->gtArrObj;
- regNumber arrReg = genConsumeReg(arrObj); noway_assert(arrReg != REG_NA);
- regNumber offsetReg = genConsumeReg(offsetNode); noway_assert(offsetReg != REG_NA);
- regNumber indexReg = genConsumeReg(indexNode); noway_assert(indexReg != REG_NA);
- regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask); noway_assert(tmpReg != REG_NA);
- unsigned dim = arrOffset->gtCurrDim;
- unsigned rank = arrOffset->gtArrRank;
- var_types elemType = arrOffset->gtArrElemType;
- unsigned offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
+ emitter* emit = getEmitter();
+ GenTreePtr arrObj = arrOffset->gtArrObj;
+ regNumber arrReg = genConsumeReg(arrObj);
+ noway_assert(arrReg != REG_NA);
+ regNumber offsetReg = genConsumeReg(offsetNode);
+ noway_assert(offsetReg != REG_NA);
+ regNumber indexReg = genConsumeReg(indexNode);
+ noway_assert(indexReg != REG_NA);
+ regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ noway_assert(tmpReg != REG_NA);
+ unsigned dim = arrOffset->gtCurrDim;
+ unsigned rank = arrOffset->gtArrRank;
+ var_types elemType = arrOffset->gtArrElemType;
+ unsigned offset = genOffsetOfMDArrayDimensionSize(elemType, rank, dim);
// Load tmpReg with the dimension size
- emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
+ emit->emitIns_R_R_I(ins_Load(TYP_INT), EA_8BYTE, tmpReg, arrReg, offset); // a 4 BYTE sign extending load
// Evaluate tgtReg = offsetReg*dim_size + indexReg.
emit->emitIns_R_R_R_R(INS_madd, EA_4BYTE, tgtReg, tmpReg, offsetReg, indexReg);
@@ -4751,13 +4704,13 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
// in cases where we don't want to instantiate all the indirs that happen
//
// TODO-Cleanup: move to CodeGenCommon.cpp
-GenTreeIndir CodeGen::indirForm(var_types type, GenTree *base)
+GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
GenTreeIndir i(GT_IND, type, base, nullptr);
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
@@ -4771,11 +4724,10 @@ GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
-
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins = INS_brk;
@@ -4784,79 +4736,79 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
switch (oper)
{
- case GT_ADD:
- ins = INS_fadd;
- break;
- case GT_SUB:
- ins = INS_fsub;
- break;
- case GT_MUL:
- ins = INS_fmul;
- break;
- case GT_DIV:
- ins = INS_fdiv;
- break;
- case GT_NEG:
- ins = INS_fneg;
- break;
+ case GT_ADD:
+ ins = INS_fadd;
+ break;
+ case GT_SUB:
+ ins = INS_fsub;
+ break;
+ case GT_MUL:
+ ins = INS_fmul;
+ break;
+ case GT_DIV:
+ ins = INS_fdiv;
+ break;
+ case GT_NEG:
+ ins = INS_fneg;
+ break;
- default:
- NYI("Unhandled oper in genGetInsForOper() - float");
- unreached();
- break;
+ default:
+ NYI("Unhandled oper in genGetInsForOper() - float");
+ unreached();
+ break;
}
}
else
{
switch (oper)
{
- case GT_ADD:
- ins = INS_add;
- break;
- case GT_AND:
- ins = INS_and;
- break;
- case GT_DIV:
- ins = INS_sdiv;
- break;
- case GT_UDIV:
- ins = INS_udiv;
- break;
- case GT_MUL:
- ins = INS_mul;
- break;
- case GT_LSH:
- ins = INS_lsl;
- break;
- case GT_NEG:
- ins = INS_neg;
- break;
- case GT_NOT:
- ins = INS_mvn;
- break;
- case GT_OR:
- ins = INS_orr;
- break;
- case GT_ROR:
- ins = INS_ror;
- break;
- case GT_RSH:
- ins = INS_asr;
- break;
- case GT_RSZ:
- ins = INS_lsr;
- break;
- case GT_SUB:
- ins = INS_sub;
- break;
- case GT_XOR:
- ins = INS_eor;
- break;
+ case GT_ADD:
+ ins = INS_add;
+ break;
+ case GT_AND:
+ ins = INS_and;
+ break;
+ case GT_DIV:
+ ins = INS_sdiv;
+ break;
+ case GT_UDIV:
+ ins = INS_udiv;
+ break;
+ case GT_MUL:
+ ins = INS_mul;
+ break;
+ case GT_LSH:
+ ins = INS_lsl;
+ break;
+ case GT_NEG:
+ ins = INS_neg;
+ break;
+ case GT_NOT:
+ ins = INS_mvn;
+ break;
+ case GT_OR:
+ ins = INS_orr;
+ break;
+ case GT_ROR:
+ ins = INS_ror;
+ break;
+ case GT_RSH:
+ ins = INS_asr;
+ break;
+ case GT_RSZ:
+ ins = INS_lsr;
+ break;
+ case GT_SUB:
+ ins = INS_sub;
+ break;
+ case GT_XOR:
+ ins = INS_eor;
+ break;
- default:
- NYI("Unhandled oper in genGetInsForOper() - integer");
- unreached();
- break;
+ default:
+ NYI("Unhandled oper in genGetInsForOper() - integer");
+ unreached();
+ break;
}
}
return ins;
@@ -4874,16 +4826,16 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
//
void CodeGen::genCodeForShift(GenTreePtr tree)
{
- var_types targetType = tree->TypeGet();
- genTreeOps oper = tree->OperGet();
- instruction ins = genGetInsForOper(oper, targetType);
- emitAttr size = emitTypeSize(tree);
+ var_types targetType = tree->TypeGet();
+ genTreeOps oper = tree->OperGet();
+ instruction ins = genGetInsForOper(oper, targetType);
+ emitAttr size = emitTypeSize(tree);
assert(tree->gtRegNum != REG_NA);
GenTreePtr operand = tree->gtGetOp1();
genConsumeReg(operand);
-
+
GenTreePtr shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
@@ -4892,9 +4844,9 @@ void CodeGen::genCodeForShift(GenTreePtr tree)
}
else
{
- unsigned immWidth = emitter::getBitWidth(size); // immWidth will be set to 32 or 64
- ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth-1);
-
+ unsigned immWidth = emitter::getBitWidth(size); // immWidth will be set to 32 or 64
+ ssize_t shiftByImm = shiftBy->gtIntCon.gtIconVal & (immWidth - 1);
+
getEmitter()->emitIns_R_R_I(ins, size, tree->gtRegNum, operand->gtRegNum, shiftByImm);
}
@@ -4902,7 +4854,7 @@ void CodeGen::genCodeForShift(GenTreePtr tree)
}
// TODO-Cleanup: move to CodeGenCommon.cpp
-void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
+void CodeGen::genUnspillRegIfNeeded(GenTree* tree)
{
regNumber dstReg = tree->gtRegNum;
@@ -4919,13 +4871,13 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
// Reset spilled flag, since we are going to load a local variable from its home location.
unspillTree->gtFlags &= ~GTF_SPILLED;
- GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
+ GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
var_types targetType = unspillTree->gtType;
instruction ins = ins_Load(targetType, compiler->isSIMDTypeLocalAligned(lcl->gtLclNum));
emitAttr attr = emitTypeSize(targetType);
- emitter * emit = getEmitter();
+ emitter* emit = getEmitter();
// Fixes Issue #3326
attr = emit->emitInsAdjustLoadStoreAttr(ins, attr);
@@ -4960,7 +4912,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", lcl->gtLclNum);
@@ -4978,28 +4930,28 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
}
else if (unspillTree->IsMultiRegCall())
{
- GenTreeCall* call = unspillTree->AsCall();
- ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = pRetTypeDesc->GetReturnRegCount();
- GenTreeCopyOrReload* reloadTree = nullptr;
+ GenTreeCall* call = unspillTree->AsCall();
+ ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
+ unsigned regCount = pRetTypeDesc->GetReturnRegCount();
+ GenTreeCopyOrReload* reloadTree = nullptr;
if (tree->OperGet() == GT_RELOAD)
{
reloadTree = tree->AsCopyOrReload();
}
// In case of multi-reg call node, GTF_SPILLED flag on it indicates that
- // one or more of its result regs are spilled. Call node needs to be
+ // one or more of its result regs are spilled. Call node needs to be
// queried to know which specific result regs to be unspilled.
for (unsigned i = 0; i < regCount; ++i)
{
unsigned flags = call->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILLED) != 0)
{
- var_types dstType = pRetTypeDesc->GetReturnRegType(i);
+ var_types dstType = pRetTypeDesc->GetReturnRegType(i);
regNumber unspillTreeReg = call->GetRegNumByIdx(i);
if (reloadTree != nullptr)
- {
+ {
dstReg = reloadTree->GetRegNumByIdx(i);
if (dstReg == REG_NA)
{
@@ -5012,13 +4964,10 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
}
TempDsc* t = regSet.rsUnspillInPlace(call, unspillTreeReg, i);
- getEmitter()->emitIns_R_S(ins_Load(dstType),
- emitActualTypeSize(dstType),
- dstReg,
- t->tdTempNum(),
+ getEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
0);
compiler->tmpRlsTemp(t);
- gcInfo.gcMarkRegPtrVal(dstReg, dstType);
+ gcInfo.gcMarkRegPtrVal(dstReg, dstType);
}
}
@@ -5028,24 +4977,21 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
else
{
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->gtRegNum);
- getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType),
- emitActualTypeSize(unspillTree->TypeGet()),
- dstReg,
- t->tdTempNum(),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitActualTypeSize(unspillTree->TypeGet()), dstReg,
+ t->tdTempNum(), 0);
compiler->tmpRlsTemp(t);
unspillTree->gtFlags &= ~GTF_SPILLED;
unspillTree->SetInReg();
gcInfo.gcMarkRegPtrVal(dstReg, unspillTree->TypeGet());
- }
+ }
}
}
// Do Liveness update for a subnodes that is being consumed by codegen
// including the logic for reload in case is needed and also takes care
// of locating the value on the desired register.
-void CodeGen::genConsumeRegAndCopy(GenTree *tree, regNumber needReg)
+void CodeGen::genConsumeRegAndCopy(GenTree* tree, regNumber needReg)
{
regNumber treeReg = genConsumeReg(tree);
if (treeReg != needReg)
@@ -5119,14 +5065,14 @@ void CodeGen::genRegCopy(GenTree* treeNode)
// Do liveness update for a subnode that is being consumed by codegen.
// TODO-Cleanup: move to CodeGenCommon.cpp
-regNumber CodeGen::genConsumeReg(GenTree *tree)
+regNumber CodeGen::genConsumeReg(GenTree* tree)
{
if (tree->OperGet() == GT_COPY)
{
genRegCopy(tree);
}
// Handle the case where we have a lclVar that needs to be copied before use (i.e. because it
- // interferes with one of the other sources (or the target, if it's a "delayed use" register)).
+ // interferes with one of the other sources (or the target, if it's a "delayed use" register)).
// TODO-Cleanup: This is a special copyReg case in LSRA - consider eliminating these and
// always using GT_COPY to make the lclVar location explicit.
// Note that we have to do this before calling genUpdateLife because otherwise if we spill it
@@ -5137,8 +5083,8 @@ regNumber CodeGen::genConsumeReg(GenTree *tree)
// because if it's on the stack it will always get reloaded into tree->gtRegNum).
if (genIsRegCandidateLocal(tree))
{
- GenTreeLclVarCommon *lcl = tree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
if ((varDsc->lvRegNum != REG_STK) && (varDsc->lvRegNum != tree->gtRegNum))
{
inst_RV_RV(ins_Copy(tree->TypeGet()), tree->gtRegNum, varDsc->lvRegNum);
@@ -5158,8 +5104,8 @@ regNumber CodeGen::genConsumeReg(GenTree *tree)
if (genIsRegCandidateLocal(tree))
{
- GenTreeLclVarCommon *lcl = tree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
assert(varDsc->lvLRACandidate);
if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
@@ -5196,7 +5142,7 @@ void CodeGen::genConsumeAddress(GenTree* addr)
// do liveness update for a subnode that is being consumed by codegen
// TODO-Cleanup: move to CodeGenCommon.cpp
-void CodeGen::genConsumeAddrMode(GenTreeAddrMode *addr)
+void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr)
{
if (addr->Base())
genConsumeReg(addr->Base());
@@ -5248,12 +5194,12 @@ void CodeGen::genConsumeRegs(GenTree* tree)
void CodeGen::genConsumeOperands(GenTreeOp* tree)
{
- GenTree* firstOp = tree->gtOp1;
+ GenTree* firstOp = tree->gtOp1;
GenTree* secondOp = tree->gtOp2;
if ((tree->gtFlags & GTF_REVERSE_OPS) != 0)
{
assert(secondOp != nullptr);
- firstOp = secondOp;
+ firstOp = secondOp;
secondOp = tree->gtOp1;
}
if (firstOp != nullptr)
@@ -5268,7 +5214,7 @@ void CodeGen::genConsumeOperands(GenTreeOp* tree)
// do liveness update for register produced by the current node in codegen
// TODO-Cleanup: move to CodeGenCommon.cpp
-void CodeGen::genProduceReg(GenTree *tree)
+void CodeGen::genProduceReg(GenTree* tree)
{
if (tree->gtFlags & GTF_SPILL)
{
@@ -5276,7 +5222,8 @@ void CodeGen::genProduceReg(GenTree *tree)
{
// Store local variable to its home location.
tree->gtFlags &= ~GTF_REG_VAL;
- inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(tree->gtLclVarCommon.gtLclNum)), tree, tree->gtRegNum);
+ inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(tree->gtLclVarCommon.gtLclNum)), tree,
+ tree->gtRegNum);
}
else
{
@@ -5301,8 +5248,7 @@ void CodeGen::genProduceReg(GenTree *tree)
// the register wouldn't be relevant.
// 2. The register candidate local is going dead. There's no point to mark
// the register as live, with a GC pointer, if the variable is dead.
- if (!genIsRegCandidateLocal(tree) ||
- ((tree->gtFlags & GTF_VAR_DEATH) == 0))
+ if (!genIsRegCandidateLocal(tree) || ((tree->gtFlags & GTF_VAR_DEATH) == 0))
{
gcInfo.gcMarkRegPtrVal(tree->gtRegNum, tree->TypeGet());
}
@@ -5314,54 +5260,42 @@ void CodeGen::genProduceReg(GenTree *tree)
// TODO-Cleanup: move to CodeGenCommon.cpp
void CodeGen::genTransferRegGCState(regNumber dst, regNumber src)
{
- regMaskTP srcMask = genRegMask(src);
- regMaskTP dstMask = genRegMask(dst);
-
- if (gcInfo.gcRegGCrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetGCref(dstMask);
- }
- else if (gcInfo.gcRegByrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetByref(dstMask);
- }
- else
- {
- gcInfo.gcMarkRegSetNpt(dstMask);
- }
-}
+ regMaskTP srcMask = genRegMask(src);
+ regMaskTP dstMask = genRegMask(dst);
+ if (gcInfo.gcRegGCrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetGCref(dstMask);
+ }
+ else if (gcInfo.gcRegByrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetByref(dstMask);
+ }
+ else
+ {
+ gcInfo.gcMarkRegSetNpt(dstMask);
+ }
+}
// generates an ip-relative call or indirect call via reg ('call reg')
// pass in 'addr' for a relative call or 'base' for a indirect register call
-// methHnd - optional, only used for pretty printing
+// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
// TODO-Cleanup: move to CodeGenCommon.cpp
void CodeGen::genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- void* addr,
- emitAttr retSize,
- emitAttr secondRetSize,
- IL_OFFSETX ilOffset,
- regNumber base,
- bool isJump,
- bool isNoGC)
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) void* addr,
+ emitAttr retSize,
+ emitAttr secondRetSize,
+ IL_OFFSETX ilOffset,
+ regNumber base,
+ bool isJump,
+ bool isNoGC)
{
-
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- 0,
- retSize,
- secondRetSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- base, REG_NA, 0, 0,
- isJump,
+
+ getEmitter()->emitIns_Call(emitter::EmitCallType(callType), methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, 0,
+ retSize, secondRetSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, base, REG_NA, 0, 0, isJump,
emitter::emitNoGChelper(compiler->eeGetHelperNum(methHnd)));
}
@@ -5371,44 +5305,32 @@ void CodeGen::genEmitCall(int callType,
// TODO-Cleanup: move to CodeGenCommon.cpp
void CodeGen::genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- GenTreeIndir* indir,
- emitAttr retSize,
- emitAttr secondRetSize,
- IL_OFFSETX ilOffset)
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) GenTreeIndir* indir,
+ emitAttr retSize,
+ emitAttr secondRetSize,
+ IL_OFFSETX ilOffset)
{
genConsumeAddress(indir->Addr());
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- nullptr,
- 0,
- retSize,
- secondRetSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indir->Base() ? indir->Base()->gtRegNum : REG_NA,
- indir->Index() ? indir->Index()->gtRegNum : REG_NA,
- indir->Scale(),
- indir->Offset());
+ getEmitter()->emitIns_Call(emitter::EmitCallType(callType), methHnd, INDEBUG_LDISASM_COMMA(sigInfo) nullptr, 0,
+ retSize, secondRetSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, indir->Base() ? indir->Base()->gtRegNum : REG_NA,
+ indir->Index() ? indir->Index()->gtRegNum : REG_NA, indir->Scale(), indir->Offset());
}
// Produce code for a GT_CALL node
void CodeGen::genCallInstruction(GenTreePtr node)
{
- GenTreeCall *call = node->AsCall();
+ GenTreeCall* call = node->AsCall();
assert(call->gtOper == GT_CALL);
- gtCallTypes callType = (gtCallTypes)call->gtCallType;
+ gtCallTypes callType = (gtCallTypes)call->gtCallType;
- IL_OFFSETX ilOffset = BAD_IL_OFFSET;
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET;
// all virtuals should have been expanded into a control expression
- assert (!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
+ assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Consume all the arg regs
for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
@@ -5419,16 +5341,16 @@ void CodeGen::genCallInstruction(GenTreePtr node)
fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode->gtSkipReloadOrCopy());
assert(curArgTabEntry);
-
+
if (curArgTabEntry->regNum == REG_STK)
continue;
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_LIST)
{
- GenTreeArgList* argListPtr = argNode->AsArgList();
- unsigned iterationNum = 0;
- regNumber argReg = curArgTabEntry->regNum;
+ GenTreeArgList* argListPtr = argNode->AsArgList();
+ unsigned iterationNum = 0;
+ regNumber argReg = curArgTabEntry->regNum;
for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
{
GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
@@ -5438,7 +5360,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
if (putArgRegNode->gtRegNum != argReg)
{
- inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg, putArgRegNode->gtRegNum);
+ inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
+ putArgRegNode->gtRegNum);
}
argReg = genRegArgNext(argReg);
@@ -5454,9 +5377,9 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
}
- // In the case of a varargs call,
+ // In the case of a varargs call,
// the ABI dictates that if we have floating point args,
- // we must pass the enregistered arguments in both the
+ // we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (call->IsVarargs() && varTypeIsFloating(argNode))
{
@@ -5473,18 +5396,18 @@ void CodeGen::genCallInstruction(GenTreePtr node)
// Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
CORINFO_METHOD_HANDLE methHnd;
- GenTree* target = call->gtControlExpr;
+ GenTree* target = call->gtControlExpr;
if (callType == CT_INDIRECT)
{
assert(target == nullptr);
- target = call->gtCall.gtCallAddr;
+ target = call->gtCall.gtCallAddr;
methHnd = nullptr;
}
else
{
methHnd = call->gtCallMethHnd;
}
-
+
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
@@ -5515,7 +5438,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
return;
}
- // For a pinvoke to unmanged code we emit a label to clear
+ // For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
@@ -5525,21 +5448,20 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
// Determine return value size(s).
- ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
- emitAttr retSize = EA_PTRSIZE;
- emitAttr secondRetSize = EA_UNKNOWN;
+ ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
+ emitAttr retSize = EA_PTRSIZE;
+ emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
- retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
+ retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
- if (call->gtType == TYP_REF ||
- call->gtType == TYP_ARRAY)
+ if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
{
retSize = EA_GCREF;
}
@@ -5559,40 +5481,35 @@ void CodeGen::genCallInstruction(GenTreePtr node)
(void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
}
#endif // DEBUGGING_SUPPORT
-
+
if (target != nullptr)
{
// For Arm64 a call target can not be a contained indirection
assert(!target->isContainedIndir());
-
+
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
//
assert(genIsValidIntReg(target->gtRegNum));
- genEmitCall(emitter::EC_INDIR_R,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- nullptr, //addr
- retSize,
- secondRetSize,
- ilOffset,
- genConsumeReg(target));
+ genEmitCall(emitter::EC_INDIR_R, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
+ retSize, secondRetSize, ilOffset, genConsumeReg(target));
}
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(callType == CT_HELPER || callType == CT_USER_FUNC);
-
- void *addr = nullptr;
+
+ void* addr = nullptr;
if (callType == CT_HELPER)
- {
+ {
// Direct call to a helper method.
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
- void *pAddr = nullptr;
- addr = compiler->compGetHelperFtn(helperNum, (void **)&pAddr);
+ void* pAddr = nullptr;
+ addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
if (addr == nullptr)
{
@@ -5602,7 +5519,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
else
{
// Direct call to a non-virtual user function.
- CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
+ CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
if (call->IsSameThis())
{
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
@@ -5636,12 +5553,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
REG_IP0);
#else
// Non-virtual direct call to known addresses
- genEmitCall(emitter::EC_FUNC_TOKEN,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- retSize,
- secondRetSize,
+ genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, retSize, secondRetSize,
ilOffset);
#endif
}
@@ -5679,8 +5591,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
- var_types regType = pRetTypeDesc->GetReturnRegType(i);
- returnReg = pRetTypeDesc->GetABIReturnReg(i);
+ var_types regType = pRetTypeDesc->GetReturnRegType(i);
+ returnReg = pRetTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
if (returnReg != allocatedReg)
{
@@ -5689,7 +5601,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
}
else
- {
+ {
if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
@@ -5702,9 +5614,9 @@ void CodeGen::genCallInstruction(GenTreePtr node)
if (call->gtRegNum != returnReg)
{
inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
- }
+ }
}
-
+
genProduceReg(call);
}
@@ -5726,16 +5638,16 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
- if (compiler->info.compArgsCount == 0)
+ if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
- unsigned varNum;
- LclVarDsc* varDsc;
-
+ unsigned varNum;
+ LclVarDsc* varDsc;
+
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
@@ -5749,10 +5661,10 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
if (varDsc->lvPromoted)
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
- varDsc = compiler->lvaTable + fieldVarNum;
+ varDsc = compiler->lvaTable + fieldVarNum;
}
noway_assert(varDsc->lvIsParam);
@@ -5761,7 +5673,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
- // If we need to generate a tail call profiler hook, then spill all
+ // If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->lvRegNum == varDsc->lvArgReg))
continue;
@@ -5778,7 +5690,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
assert(varDsc->lvRegNum != REG_STK);
assert(varDsc->TypeGet() != TYP_STRUCT);
var_types storeType = genActualType(varDsc->TypeGet());
- emitAttr storeSize = emitActualTypeSize(storeType);
+ emitAttr storeSize = emitActualTypeSize(storeType);
getEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->lvRegNum, varNum, 0);
@@ -5793,7 +5705,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
-
+
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
@@ -5801,22 +5713,22 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
#endif
// Next move any un-enregistered register arguments back to their register.
- regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
- unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
+ regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
+ unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
{
varDsc = compiler->lvaTable + varNum;
if (varDsc->lvPromoted)
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
- varDsc = compiler->lvaTable + fieldVarNum;
+ varDsc = compiler->lvaTable + fieldVarNum;
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
- if (!varDsc->lvIsRegArg)
+ if (!varDsc->lvIsRegArg)
continue;
// Register argument
@@ -5824,7 +5736,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// Is register argument already in the right register?
// If not load it from its stack location.
- regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ regNumber argReg = varDsc->lvArgReg; // incoming arg register
regNumber argRegNext = REG_NA;
if (varDsc->lvRegNum != argReg)
@@ -5913,9 +5825,9 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
if (remainingIntArgMask != RBM_NONE)
{
getEmitter()->emitDisableGC();
- for (int argNum = 0, argOffset=0; argNum < MAX_REG_ARG; ++argNum)
+ for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
- regNumber argReg = intArgRegs[argNum];
+ regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
@@ -5932,11 +5844,11 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
// produce code for a GT_LEA subnode
-void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
+void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
- emitter *emit = getEmitter();
- emitAttr size = emitTypeSize(lea);
+ emitter* emit = getEmitter();
+ emitAttr size = emitTypeSize(lea);
unsigned offset = lea->gtOffset;
// In ARM64 we can only load addresses of the form:
@@ -5955,9 +5867,9 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
// produce LEAs that are a 1:1 relationship to the ARM64 architecture.
if (lea->Base() && lea->Index())
{
- GenTree* memBase = lea->Base();
- GenTree* index = lea->Index();
- unsigned offset = lea->gtOffset;
+ GenTree* memBase = lea->Base();
+ GenTree* index = lea->Index();
+ unsigned offset = lea->gtOffset;
DWORD lsl;
@@ -5969,7 +5881,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
if (offset != 0)
{
regMaskTP tmpRegMask = lea->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
noway_assert(tmpReg != REG_NA);
if (emitter::emitIns_valid_imm_for_add(offset, EA_8BYTE))
@@ -5977,21 +5889,23 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
if (lsl > 0)
{
// Generate code to set tmpReg = base + index*scale
- emit->emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl, INS_OPTS_LSL);
+ emit->emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl,
+ INS_OPTS_LSL);
}
- else // no scale
+ else // no scale
{
// Generate code to set tmpReg = base + index
emit->emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum);
}
// Then compute target reg from [tmpReg + offset]
- emit->emitIns_R_R_I(INS_add, size, lea->gtRegNum, tmpReg, offset);;
+ emit->emitIns_R_R_I(INS_add, size, lea->gtRegNum, tmpReg, offset);
+ ;
}
else // large offset
{
// First load/store tmpReg with the large offset constant
- instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
+ instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the base register
// rd = rd + base
emit->emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, tmpReg, memBase->gtRegNum);
@@ -6007,7 +5921,8 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
if (lsl > 0)
{
// Then compute target reg from [base + index*scale]
- emit->emitIns_R_R_R_I(INS_add, size, lea->gtRegNum, memBase->gtRegNum, index->gtRegNum, lsl, INS_OPTS_LSL);
+ emit->emitIns_R_R_R_I(INS_add, size, lea->gtRegNum, memBase->gtRegNum, index->gtRegNum, lsl,
+ INS_OPTS_LSL);
}
else
{
@@ -6018,7 +5933,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
}
else if (lea->Base())
{
- GenTree* memBase = lea->Base();
+ GenTree* memBase = lea->Base();
if (emitter::emitIns_valid_imm_for_add(offset, EA_8BYTE))
{
@@ -6027,7 +5942,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
// Then compute target reg from [memBase + offset]
emit->emitIns_R_R_I(INS_add, size, lea->gtRegNum, memBase->gtRegNum, offset);
}
- else // offset is zero
+ else // offset is zero
{
emit->emitIns_R_R(INS_mov, size, lea->gtRegNum, memBase->gtRegNum);
}
@@ -6036,7 +5951,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
{
// We require a tmpReg to hold the offset
regMaskTP tmpRegMask = lea->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
noway_assert(tmpReg != REG_NA);
// First load tmpReg with the large offset constant
@@ -6048,7 +5963,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
}
else if (lea->Index())
{
- // If we encounter a GT_LEA node without a base it means it came out
+ // If we encounter a GT_LEA node without a base it means it came out
// when attempting to optimize an arbitrary arithmetic expression during lower.
// This is currently disabled in ARM64 since we need to adjust lower to account
// for the simpler instructions ARM64 supports.
@@ -6070,18 +5985,16 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
// jmpToTrueLabel[2] - (output) On Arm64 both branches will always branch to the true label
//
// Return Value:
-// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
+// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
//
// Assumptions:
// At least one conditional branch instruction will be returned.
-// Typically only one conditional branch is needed
+// Typically only one conditional branch is needed
// and the second jmpKind[] value is set to EJ_NONE
//-------------------------------------------------------------------------------------------
// static
-void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
- emitJumpKind jmpKind[2],
- bool jmpToTrueLabel[2])
+void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2])
{
// On Arm64 both branches will always branch to the true label
jmpToTrueLabel[0] = true;
@@ -6091,14 +6004,14 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
{
CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
- jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
- jmpKind[1] = EJ_NONE;
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
+ jmpKind[1] = EJ_NONE;
}
- else // We have a Floating Point Compare operation
+ else // We have a Floating Point Compare operation
{
assert(cmpTree->OperIsCompare());
- // For details on this mapping, see the ARM64 Condition Code
+ // For details on this mapping, see the ARM64 Condition Code
// table at section C1.2.3 in the ARMV8 architecture manual
//
@@ -6110,77 +6023,77 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
// Must branch if we have an NaN, unordered
switch (cmpTree->gtOper)
{
- case GT_EQ:
- jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
- jmpKind[1] = EJ_vs; // branch or set when we have a NaN
- break;
+ case GT_EQ:
+ jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
+ jmpKind[1] = EJ_vs; // branch or set when we have a NaN
+ break;
- case GT_NE:
- jmpKind[0] = EJ_ne; // branch or set when not equal (or have NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_NE:
+ jmpKind[0] = EJ_ne; // branch or set when not equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_LT:
- jmpKind[0] = EJ_lt; // branch or set when less than (or have NaN's)
- jmpKind[1] = EJ_NONE;
- break;
-
- case GT_LE:
- jmpKind[0] = EJ_le; // branch or set when less than or equal (or have NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LT:
+ jmpKind[0] = EJ_lt; // branch or set when less than (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_GT:
- jmpKind[0] = EJ_hi; // branch or set when greater than (or have NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LE:
+ jmpKind[0] = EJ_le; // branch or set when less than or equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_GE:
- jmpKind[0] = EJ_hs; // branch or set when greater than or equal (or have NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_GT:
+ jmpKind[0] = EJ_hi; // branch or set when greater than (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- default:
- unreached();
+ case GT_GE:
+ jmpKind[0] = EJ_hs; // branch or set when greater than or equal (or have NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ default:
+ unreached();
}
}
- else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
+ else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
{
// Do not branch if we have an NaN, unordered
switch (cmpTree->gtOper)
{
- case GT_EQ:
- jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_EQ:
+ jmpKind[0] = EJ_eq; // branch or set when equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_NE:
- jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
- jmpKind[1] = EJ_lo; // branch or set when less than (and no NaN's)
- break;
+ case GT_NE:
+ jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
+ jmpKind[1] = EJ_lo; // branch or set when less than (and no NaN's)
+ break;
- case GT_LT:
- jmpKind[0] = EJ_lo; // branch or set when less than (and no NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LT:
+ jmpKind[0] = EJ_lo; // branch or set when less than (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_LE:
- jmpKind[0] = EJ_ls; // branch or set when less than or equal (and no NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LE:
+ jmpKind[0] = EJ_ls; // branch or set when less than or equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_GT:
- jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_GT:
+ jmpKind[0] = EJ_gt; // branch or set when greater than (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_GE:
- jmpKind[0] = EJ_ge; // branch or set when greater than or equal (and no NaN's)
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_GE:
+ jmpKind[0] = EJ_ge; // branch or set when greater than or equal (and no NaN's)
+ jmpKind[1] = EJ_NONE;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
}
@@ -6203,7 +6116,7 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
{
emitJumpKind jumpKind[2];
- bool branchToTrueLabel[2];
+ bool branchToTrueLabel[2];
genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
assert(jumpKind[0] != EJ_NONE);
@@ -6214,32 +6127,32 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
//
if (jumpKind[1] != EJ_NONE)
{
- emitter * emit = getEmitter();
- bool ordered = ((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
- insCond secondCond;
+ emitter* emit = getEmitter();
+ bool ordered = ((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
+ insCond secondCond;
- // The only ones that require two operations are the
+ // The only ones that require two operations are the
// floating point compare operations of BEQ or BNE.UN
//
if (tree->gtOper == GT_EQ)
{
// This must be an ordered comparison.
assert(ordered);
- assert(jumpKind[1] == EJ_vs); // We complement this value
- secondCond = INS_COND_VC; // for the secondCond
+ assert(jumpKind[1] == EJ_vs); // We complement this value
+ secondCond = INS_COND_VC; // for the secondCond
}
else // gtOper == GT_NE
{
- // This must be BNE.UN (unordered comparison)
+ // This must be BNE.UN (unordered comparison)
assert((tree->gtOper == GT_NE) && !ordered);
- assert(jumpKind[1] == EJ_lo); // We complement this value
- secondCond = INS_COND_HS; // for the secondCond
+ assert(jumpKind[1] == EJ_lo); // We complement this value
+ secondCond = INS_COND_HS; // for the secondCond
}
// The second instruction is a 'csinc' instruction that either selects the previous dstReg
// or increments the ZR register, which produces a 1 result.
- emit->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, dstReg, dstReg, REG_ZR, secondCond);
+ emit->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, dstReg, dstReg, REG_ZR, secondCond);
}
}
@@ -6266,18 +6179,18 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
assert(treeNode->OperGet() == GT_CAST);
GenTreePtr castOp = treeNode->gtCast.CastOp();
- emitter * emit = getEmitter();
+ emitter* emit = getEmitter();
- var_types dstType = treeNode->CastToType();
- var_types srcType = genActualType(castOp->TypeGet());
- emitAttr movSize = emitActualTypeSize(dstType);
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = genActualType(castOp->TypeGet());
+ emitAttr movSize = emitActualTypeSize(dstType);
bool movRequired = false;
regNumber targetReg = treeNode->gtRegNum;
regNumber sourceReg = castOp->gtRegNum;
// For Long to Int conversion we will have a reserved integer register to hold the immediate mask
- regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
+ regNumber tmpReg = (treeNode->gtRsvdRegs == RBM_NONE) ? REG_NA : genRegNumFromMask(treeNode->gtRsvdRegs);
assert(genIsValidIntReg(targetReg));
assert(genIsValidIntReg(sourceReg));
@@ -6308,7 +6221,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
// cast to TYP_ULONG:
// We use a mov with size=EA_4BYTE
// which will zero out the upper bits
- movSize = EA_4BYTE;
+ movSize = EA_4BYTE;
movRequired = true;
}
}
@@ -6377,8 +6290,8 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
// If we need to treat a signed type as unsigned
if ((treeNode->gtFlags & GTF_UNSIGNED) != 0)
{
- extendType = genUnsignedType(srcType);
- movSize = emitTypeSize(extendType);
+ extendType = genUnsignedType(srcType);
+ movSize = emitTypeSize(extendType);
movRequired = true;
}
else
@@ -6388,12 +6301,12 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
extendType = srcType;
if (srcType == TYP_UINT)
{
- // If we are casting from a smaller type to
+ // If we are casting from a smaller type to
// a larger type, then we need to make sure the
// higher 4 bytes are zero to gaurentee the correct value.
// Therefore using a mov with EA_4BYTE in place of EA_8BYTE
// will zero the upper bits
- movSize = EA_4BYTE;
+ movSize = EA_4BYTE;
movRequired = true;
}
}
@@ -6415,7 +6328,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
assert(!emit->emitInsIsLoad(ins));
if ((ins != INS_mov) || movRequired || (targetReg != sourceReg))
- {
+ {
emit->emitIns_R_R(ins, movSize, targetReg, sourceReg);
}
@@ -6436,8 +6349,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
-void
-CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
+void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
@@ -6446,12 +6358,12 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
regNumber targetReg = treeNode->gtRegNum;
assert(genIsValidFloatReg(targetReg));
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- assert(!op1->isContained()); // Cannot be contained
- assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
@@ -6461,8 +6373,8 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
if (srcType != dstType)
{
- insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
- : INS_OPTS_D_TO_S; // convert Double to Single
+ insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
+ : INS_OPTS_D_TO_S; // convert Double to Single
getEmitter()->emitIns_R_R(INS_fcvt, emitTypeSize(treeNode), treeNode->gtRegNum, op1->gtRegNum, cvtOption);
}
@@ -6489,8 +6401,7 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
-void
-CodeGen::genIntToFloatCast(GenTreePtr treeNode)
+void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
@@ -6499,12 +6410,12 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
regNumber targetReg = treeNode->gtRegNum;
assert(genIsValidFloatReg(targetReg));
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- assert(!op1->isContained()); // Cannot be contained
- assert(genIsValidIntReg(op1->gtRegNum)); // Must be a valid int reg.
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidIntReg(op1->gtRegNum)); // Must be a valid int reg.
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
// force the srcType to unsigned if GT_UNSIGNED flag is set
@@ -6514,14 +6425,14 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
}
// We should never see a srcType whose size is neither EA_4BYTE or EA_8BYTE
- // For conversions from small types (byte/sbyte/int16/uint16) to float/double,
- // we expect the front-end or lowering phase to have generated two levels of cast.
+ // For conversions from small types (byte/sbyte/int16/uint16) to float/double,
+ // we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
- noway_assert((srcSize == EA_4BYTE) ||(srcSize == EA_8BYTE));
+ noway_assert((srcSize == EA_4BYTE) || (srcSize == EA_8BYTE));
- instruction ins = varTypeIsUnsigned(srcType) ? INS_ucvtf : INS_scvtf;
- insOpts cvtOption = INS_OPTS_NONE; // invalid value
+ instruction ins = varTypeIsUnsigned(srcType) ? INS_ucvtf : INS_scvtf;
+ insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (dstType == TYP_DOUBLE)
{
@@ -6570,8 +6481,7 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
-void
-CodeGen::genFloatToIntCast(GenTreePtr treeNode)
+void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
@@ -6579,29 +6489,29 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->gtRegNum;
- assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
+ assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- assert(!op1->isContained()); // Cannot be contained
- assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ assert(!op1->isContained()); // Cannot be contained
+ assert(genIsValidFloatReg(op1->gtRegNum)); // Must be a valid float reg.
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never see a dstType whose size is neither EA_4BYTE or EA_8BYTE
- // For conversions to small types (byte/sbyte/int16/uint16) from float/double,
- // we expect the front-end or lowering phase to have generated two levels of cast.
+ // For conversions to small types (byte/sbyte/int16/uint16) from float/double,
+ // we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
- noway_assert((dstSize == EA_4BYTE) ||(dstSize == EA_8BYTE));
+ noway_assert((dstSize == EA_4BYTE) || (dstSize == EA_8BYTE));
- instruction ins = INS_fcvtzs; // default to sign converts
- insOpts cvtOption = INS_OPTS_NONE; // invalid value
+ instruction ins = INS_fcvtzs; // default to sign converts
+ insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (varTypeIsUnsigned(dstType))
{
- ins = INS_fcvtzu; // use unsigned converts
+ ins = INS_fcvtzu; // use unsigned converts
}
if (srcType == TYP_DOUBLE)
@@ -6631,7 +6541,7 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
}
genConsumeOperands(treeNode->AsOp());
-
+
getEmitter()->emitIns_R_R(ins, dstSize, treeNode->gtRegNum, op1->gtRegNum, cvtOption);
genProduceReg(treeNode);
@@ -6648,25 +6558,24 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
-//
+//
// TODO-ARM64-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
-void
-CodeGen::genCkfinite(GenTreePtr treeNode)
+void CodeGen::genCkfinite(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- var_types targetType = treeNode->TypeGet();
- int expMask = (targetType == TYP_FLOAT) ? 0x7F8 : 0x7FF; // Bit mask to extract exponent.
- int shiftAmount = targetType == TYP_FLOAT ? 20 : 52;
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ var_types targetType = treeNode->TypeGet();
+ int expMask = (targetType == TYP_FLOAT) ? 0x7F8 : 0x7FF; // Bit mask to extract exponent.
+ int shiftAmount = targetType == TYP_FLOAT ? 20 : 52;
- emitter * emit = getEmitter();
+ emitter* emit = getEmitter();
// Extract exponent into a register.
regNumber intReg = genRegNumFromMask(treeNode->gtRsvdRegs);
- regNumber fpReg = genConsumeReg(op1);
+ regNumber fpReg = genConsumeReg(op1);
assert(intReg != REG_NA);
emit->emitIns_R_R(ins_Copy(targetType), emitTypeSize(treeNode), intReg, fpReg);
@@ -6699,7 +6608,6 @@ int CodeGenInterface::genSPtoFPdelta()
return delta;
}
-
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc.
@@ -6718,14 +6626,12 @@ int CodeGenInterface::genTotalFrameSize()
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) +
- compiler->compCalleeRegsPushed * REGSIZE_BYTES +
- compiler->compLclFrameSize;
+ compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
-
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
@@ -6744,7 +6650,6 @@ int CodeGenInterface::genCallerSPtoFPdelta()
return callerSPtoFPdelta;
}
-
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
@@ -6760,7 +6665,6 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
return callerSPtoSPdelta;
}
-
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
@@ -6770,9 +6674,8 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
// Return value:
// None
//
-void
-CodeGen::genIntrinsic(GenTreePtr treeNode)
-{
+void CodeGen::genIntrinsic(GenTreePtr treeNode)
+{
// Both operand and its result must be of the same floating point type.
GenTreePtr srcNode = treeNode->gtOp.gtOp1;
assert(varTypeIsFloating(srcNode));
@@ -6780,28 +6683,28 @@ CodeGen::genIntrinsic(GenTreePtr treeNode)
// Right now only Abs/Round/Sqrt are treated as math intrinsics.
//
- switch(treeNode->gtIntrinsic.gtIntrinsicId)
+ switch (treeNode->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Abs:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_fabs, emitTypeSize(treeNode), treeNode, srcNode);
- break;
+ case CORINFO_INTRINSIC_Abs:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_fabs, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
- case CORINFO_INTRINSIC_Round:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_frintn, emitTypeSize(treeNode), treeNode, srcNode);
- break;
+ case CORINFO_INTRINSIC_Round:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_frintn, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
- case CORINFO_INTRINSIC_Sqrt:
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(INS_fsqrt, emitTypeSize(treeNode), treeNode, srcNode);
- break;
+ case CORINFO_INTRINSIC_Sqrt:
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(INS_fsqrt, emitTypeSize(treeNode), treeNode, srcNode);
+ break;
- default:
- assert(!"genIntrinsic: Unsupported intrinsic");
- unreached();
+ default:
+ assert(!"genIntrinsic: Unsupported intrinsic");
+ unreached();
}
-
+
genProduceReg(treeNode);
}
@@ -6819,18 +6722,18 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
assert(treeNode->OperGet() == GT_PUTARG_STK);
var_types targetType = treeNode->TypeGet();
GenTreePtr source = treeNode->gtOp.gtOp1;
- emitter * emit = getEmitter();
+ emitter* emit = getEmitter();
- // This is the varNum for our store operations,
+ // This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
// When we are generating a tail call it will be the varNum for arg0
- unsigned varNumOut;
- unsigned argOffsetMax; // Records the maximum size of this area for assert checks
+ unsigned varNumOut;
+ unsigned argOffsetMax; // Records the maximum size of this area for assert checks
// This is the varNum for our load operations,
// only used when we have a multireg struct with a LclVar source
- unsigned varNumInp = BAD_VAR_NUM;
-
+ unsigned varNumInp = BAD_VAR_NUM;
+
// Get argument offset to use with 'varNumOut'
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
@@ -6840,7 +6743,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(treeNode->AsPutArgStk()->gtCall, treeNode);
assert(curArgTabEntry);
assert(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
-#endif // DEBUG
+#endif // DEBUG
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea = treeNode->AsPutArgStk()->putInIncomingArgArea;
@@ -6867,14 +6770,14 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
}
else
{
- varNumOut = compiler->lvaOutgoingArgSpaceVar;
+ varNumOut = compiler->lvaOutgoingArgSpaceVar;
argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
}
bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_LIST);
- if (!isStruct) // a normal non-Struct argument
+ if (!isStruct) // a normal non-Struct argument
{
- instruction storeIns = ins_Store(targetType);
+ instruction storeIns = ins_Store(targetType);
emitAttr storeAttr = emitTypeSize(targetType);
// If it is contained then source must be the integer constant zero
@@ -6890,11 +6793,11 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
emit->emitIns_S_R(storeIns, storeAttr, source->gtRegNum, varNumOut, argOffsetOut);
}
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
}
- else // We have some kind of a struct argument
+ else // We have some kind of a struct argument
{
- assert(source->isContained()); // We expect that this node was marked as contained in LowerArm64
+ assert(source->isContained()); // We expect that this node was marked as contained in LowerArm64
if (source->OperGet() == GT_LIST)
{
@@ -6912,31 +6815,32 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
- // Emit store instructions to store the registers produced by the GT_LIST into the outgoing argument area
+ // Emit store instructions to store the registers produced by the GT_LIST into the outgoing argument
+ // area
emit->emitIns_S_R(ins_Store(type), attr, reg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(attr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
}
}
- else // We must have a GT_OBJ or a GT_LCL_VAR
+ else // We must have a GT_OBJ or a GT_LCL_VAR
{
noway_assert((source->OperGet() == GT_LCL_VAR) || (source->OperGet() == GT_OBJ));
var_types targetType = source->TypeGet();
- noway_assert(varTypeIsStruct(targetType));
+ noway_assert(varTypeIsStruct(targetType));
// We will copy this struct to the stack, possibly using a ldp instruction
// Setup loReg and hiReg from the internal registers that we reserved in lower.
//
- regNumber loReg = REG_NA;
- regNumber hiReg = REG_NA;
- regNumber addrReg = REG_NA;
-
+ regNumber loReg = REG_NA;
+ regNumber hiReg = REG_NA;
+ regNumber addrReg = REG_NA;
+
// In lowerArm64/TreeNodeInfoInitPutArgStk we have reserved two internal integer registers
genGetRegPairFromMask(treeNode->gtRsvdRegs, &loReg, &hiReg);
- GenTreeLclVarCommon* varNode = nullptr;
- GenTreePtr addrNode = nullptr;
+ GenTreeLclVarCommon* varNode = nullptr;
+ GenTreePtr addrNode = nullptr;
if (source->OperGet() == GT_LCL_VAR)
{
@@ -6954,7 +6858,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
- // We will treat this case the same as above
+ // We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
@@ -6968,9 +6872,9 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
- BYTE gcPtrs[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0
- BYTE* structGcLayout = &gcPtrs[0]; // The GC layout for the struct
- unsigned gcPtrCount; // The count of GC pointers in the struct
+ BYTE gcPtrs[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0
+ BYTE* structGcLayout = &gcPtrs[0]; // The GC layout for the struct
+ unsigned gcPtrCount; // The count of GC pointers in the struct
int structSize;
bool isHfa;
@@ -6982,16 +6886,16 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
LclVarDsc* varDsc = &compiler->lvaTable[varNumInp];
assert(varDsc->lvType == TYP_STRUCT);
- assert(varDsc->lvOnFrame); // This struct also must live in the stack frame
- assert(!varDsc->lvRegister); // And it can't live in a register (SIMD)
-
- structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine
- // as that is how much stack is allocated for this LclVar
- isHfa = varDsc->lvIsHfa();
- gcPtrCount = varDsc->lvStructGcCount;
- structGcLayout = varDsc->lvGcLayout;
+ assert(varDsc->lvOnFrame); // This struct also must live in the stack frame
+ assert(!varDsc->lvRegister); // And it can't live in a register (SIMD)
+
+ structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine
+ // as that is how much stack is allocated for this LclVar
+ isHfa = varDsc->lvIsHfa();
+ gcPtrCount = varDsc->lvStructGcCount;
+ structGcLayout = varDsc->lvGcLayout;
}
- else // addrNode is used
+ else // addrNode is used
{
assert(addrNode != nullptr);
@@ -7006,8 +6910,8 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
gcPtrCount = compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
}
- bool hasGCpointers = (gcPtrCount > 0); // true if there are any GC pointers in the struct
-
+ bool hasGCpointers = (gcPtrCount > 0); // true if there are any GC pointers in the struct
+
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
@@ -7044,9 +6948,9 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// TODO-ARM64-CQ: Implement support for using a ldp instruction with a varNum (see emitIns_R_S)
//
- int remainingSize = structSize;
- unsigned structOffset = 0;
- unsigned nextIndex = 0;
+ int remainingSize = structSize;
+ unsigned structOffset = 0;
+ unsigned nextIndex = 0;
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
@@ -7057,7 +6961,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
{
// We have GC pointers, so use two ldr instructions
//
- // We must do it this way because we can't currently pass or track
+ // We must do it this way because we can't currently pass or track
// two different emitAttr values for a ldp instruction.
// Make sure that the first load instruction does not overwrite the addrReg.
@@ -7068,25 +6972,28 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
{
// Load from our varNumImp source
emit->emitIns_R_S(ins_Load(type0), emitTypeSize(type0), loReg, varNumInp, 0);
- emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp, TARGET_POINTER_SIZE);
+ emit->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), hiReg, varNumInp,
+ TARGET_POINTER_SIZE);
}
else
{
// Load from our address expression source
emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
- emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg, structOffset + TARGET_POINTER_SIZE);
+ emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
+ structOffset + TARGET_POINTER_SIZE);
}
}
else // loReg == addrReg
{
- assert(varNode == nullptr); // because addrReg is REG_NA when varNode is non-null
+ assert(varNode == nullptr); // because addrReg is REG_NA when varNode is non-null
assert(hiReg != addrReg);
// Load from our address expression source
- emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg, structOffset + TARGET_POINTER_SIZE);
+ emit->emitIns_R_R_I(ins_Load(type1), emitTypeSize(type1), hiReg, addrReg,
+ structOffset + TARGET_POINTER_SIZE);
emit->emitIns_R_R_I(ins_Load(type0), emitTypeSize(type0), loReg, addrReg, structOffset);
}
}
- else // our struct has no GC pointers
+ else // our struct has no GC pointers
{
if (varNode != nullptr)
{
@@ -7096,7 +7003,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
}
else
{
- // Use a ldp instruction
+ // Use a ldp instruction
// Load from our address expression source
emit->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, loReg, hiReg, addrReg, structOffset);
@@ -7105,11 +7012,12 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// Emit two store instructions to store the two registers into the outgoing argument area
emit->emitIns_S_R(ins_Store(type0), emitTypeSize(type0), loReg, varNumOut, argOffsetOut);
- emit->emitIns_S_R(ins_Store(type1), emitTypeSize(type1), hiReg, varNumOut, argOffsetOut + TARGET_POINTER_SIZE);
- argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ emit->emitIns_S_R(ins_Store(type1), emitTypeSize(type1), hiReg, varNumOut,
+ argOffsetOut + TARGET_POINTER_SIZE);
+ argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
- remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
+ remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
structOffset += (2 * TARGET_POINTER_SIZE);
nextIndex += 2;
}
@@ -7120,7 +7028,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// str x2, [sp, #16]
// str w3, [sp, #24]
//
- // When the first instruction has a loReg that is the same register as the addrReg,
+ // When the first instruction has a loReg that is the same register as the addrReg,
// we set deferLoad to true and issue the intructions in the reverse order
// ldr x3, [x2, #8]
// ldr x2, [x2]
@@ -7130,11 +7038,11 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
var_types nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
emitAttr nextAttr = emitTypeSize(nextType);
- regNumber curReg = loReg;
+ regNumber curReg = loReg;
- bool deferLoad = false;
- var_types deferType = TYP_UNKNOWN;
- emitAttr deferAttr = EA_PTRSIZE;
+ bool deferLoad = false;
+ var_types deferType = TYP_UNKNOWN;
+ emitAttr deferAttr = EA_PTRSIZE;
int deferOffset = 0;
while (remainingSize > 0)
@@ -7145,12 +7053,12 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
if ((curReg == addrReg) && (remainingSize != 0))
{
- deferLoad = true;
- deferType = nextType;
- deferAttr = emitTypeSize(nextType);
+ deferLoad = true;
+ deferType = nextType;
+ deferAttr = emitTypeSize(nextType);
deferOffset = structOffset;
}
- else // the typical case
+ else // the typical case
{
if (varNode != nullptr)
{
@@ -7165,7 +7073,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// Emit a store instruction to store the register into the outgoing argument area
emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
}
curReg = hiReg;
structOffset += TARGET_POINTER_SIZE;
@@ -7175,7 +7083,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
}
else // (remainingSize < TARGET_POINTER_SIZE)
{
- int loadSize = remainingSize;
+ int loadSize = remainingSize;
remainingSize = 0;
// We should never have to do a non-pointer sized load when we have a LclVar source
@@ -7199,10 +7107,10 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
noway_assert(loadSize == 4);
}
- instruction loadIns = ins_Load(loadType);
+ instruction loadIns = ins_Load(loadType);
emitAttr loadAttr = emitAttr(loadSize);
- // When deferLoad is false, curReg can be the same as addrReg
+ // When deferLoad is false, curReg can be the same as addrReg
// because the last instruction is allowed to overwrite addrReg.
//
noway_assert(!deferLoad || (curReg != addrReg));
@@ -7212,7 +7120,7 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// Emit a store instruction to store the register into the outgoing argument area
emit->emitIns_S_R(ins_Store(loadType), loadAttr, curReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(loadAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
}
}
@@ -7229,28 +7137,28 @@ void CodeGen::genPutArgStk(GenTreePtr treeNode)
// Emit a store instruction to store the register into the outgoing argument area
emit->emitIns_S_R(ins_Store(nextType), nextAttr, curReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(nextAttr);
- assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
+ assert(argOffsetOut <= argOffsetMax); // We can't write beyound the outgoing area area
}
}
}
}
-
/*****************************************************************************
*
* Create and record GC Info for the function.
*/
-void
-CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
+void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr))
{
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
}
-void
-CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
+void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC) GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
+ IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
+ GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
+ GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder != nullptr);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
@@ -7280,22 +7188,23 @@ CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUG
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
- preservedAreaSize += REGSIZE_BYTES;
+ preservedAreaSize += REGSIZE_BYTES;
preservedAreaSize += 1; // bool for synchronized methods
}
- // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the frame
+ // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
+ // frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
- }
+ }
#endif
-
+
gcInfoEncoder->Build();
- //GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- //let's save the values anyway for debugging purposes
+ // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
+ // let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; //not exposed by the GCEncoder interface
+ compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
/*****************************************************************************
@@ -7303,17 +7212,14 @@ CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUG
*
*/
-void CodeGen::genEmitHelperCall(unsigned helper,
- int argSize,
- emitAttr retSize,
- regNumber callTargetReg /*= REG_NA */)
+void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
void* addr = nullptr;
void* pAddr = nullptr;
- emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
- addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
- regNumber callTarget = REG_NA;
+ emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
+ addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
+ regNumber callTarget = REG_NA;
if (addr == nullptr)
{
@@ -7331,7 +7237,7 @@ void CodeGen::genEmitHelperCall(unsigned helper,
}
regMaskTP callTargetMask = genRegMask(callTargetReg);
- regMaskTP callKillSet = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
+ regMaskTP callKillSet = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
// assert that all registers in callTargetMask are in the callKillSet
noway_assert((callTargetMask & callKillSet) == callTargetMask);
@@ -7344,22 +7250,14 @@ void CodeGen::genEmitHelperCall(unsigned helper,
callType = emitter::EC_INDIR_R;
}
- getEmitter()->emitIns_Call(callType,
- compiler->eeFindHelper(helper),
- INDEBUG_LDISASM_COMMA(nullptr)
- addr,
- argSize,
- retSize,
- EA_UNKNOWN,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, /* IL offset */
- callTarget, /* ireg */
- REG_NA, 0, 0, /* xreg, xmul, disp */
- false, /* isJump */
+ getEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
+ retSize, EA_UNKNOWN, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, /* IL offset */
+ callTarget, /* ireg */
+ REG_NA, 0, 0, /* xreg, xmul, disp */
+ false, /* isJump */
emitter::emitNoGChelper(helper));
-
+
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
regTracker.rsTrashRegSet(killMask);
regTracker.rsTrashRegsForGCInterruptability();
@@ -7374,13 +7272,13 @@ void CodeGen::genEmitHelperCall(unsigned helper,
*/
// TODO-Cleanup: move to CodeGenCommon.cpp
-void CodeGen::genSetScopeInfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- bool avail,
- Compiler::siVarLoc& varLoc)
+void CodeGen::genSetScopeInfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ bool avail,
+ Compiler::siVarLoc& varLoc)
{
/* We need to do some mapping while reporting back these variables */
@@ -7401,15 +7299,15 @@ void CodeGen::genSetScopeInfo (unsigned which,
// Hang on to this compiler->info.
- TrnslLocalVarInfo &tlvi = genTrnslLocalVarInfo[which];
+ TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
- tlvi.tlviVarNum = ilVarNum;
- tlvi.tlviLVnum = LVnum;
- tlvi.tlviName = name;
- tlvi.tlviStartPC = startOffs;
- tlvi.tlviLength = length;
- tlvi.tlviAvailable = avail;
- tlvi.tlviVarLoc = varLoc;
+ tlvi.tlviVarNum = ilVarNum;
+ tlvi.tlviLVnum = LVnum;
+ tlvi.tlviName = name;
+ tlvi.tlviStartPC = startOffs;
+ tlvi.tlviLength = length;
+ tlvi.tlviAvailable = avail;
+ tlvi.tlviVarLoc = varLoc;
#endif // DEBUG
@@ -7417,7 +7315,6 @@ void CodeGen::genSetScopeInfo (unsigned which,
}
#endif // DEBUGGING_SUPPORT
-
/*****************************************************************************
* Unit testing of the ARM64 emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
@@ -7429,7 +7326,7 @@ void CodeGen::genSetScopeInfo (unsigned which,
//#define ALL_ARM64_EMITTER_UNIT_TESTS
#if defined(DEBUG)
-void CodeGen::genArm64EmitterUnitTests()
+void CodeGen::genArm64EmitterUnitTests()
{
if (!verbose)
{
@@ -7445,7 +7342,7 @@ void CodeGen::genArm64EmitterUnitTests()
// Mark the "fake" instructions in the output.
printf("*************** In genArm64EmitterUnitTests()\n");
- emitter* theEmitter = getEmitter();
+ emitter* theEmitter = getEmitter();
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// We use this:
@@ -7459,20 +7356,20 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// ldr/str Xt, [reg]
- theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
// ldr/str Wt, [reg]
- theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
- theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
+ theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrsb, EA_4BYTE, REG_R8, REG_R9); // target Wt
theEmitter->emitIns_R_R(INS_ldrsh, EA_4BYTE, REG_R8, REG_R9); // target Wt
@@ -7480,42 +7377,42 @@ void CodeGen::genArm64EmitterUnitTests()
theEmitter->emitIns_R_R(INS_ldrsh, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R(INS_ldrsw, EA_8BYTE, REG_R8, REG_R9); // target Xt
- theEmitter->emitIns_R_R_I(INS_ldurb, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldurh, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_sturb, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_sturh, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldurb, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldurh, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_sturb, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_sturh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursw, EA_8BYTE, REG_R8, REG_R9, 1);
// SP and ZR tests
- theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_SP, 1);
- theEmitter->emitIns_R_R_I(INS_ldurb, EA_8BYTE, REG_ZR, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldurh, EA_8BYTE, REG_ZR, REG_SP, 1);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_SP, 1);
+ theEmitter->emitIns_R_R_I(INS_ldurb, EA_8BYTE, REG_ZR, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldurh, EA_8BYTE, REG_ZR, REG_SP, 1);
// scaled
- theEmitter->emitIns_R_R_I(INS_ldrb, EA_1BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldrh, EA_2BYTE, REG_R8, REG_R9, 2);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 4);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 8);
+ theEmitter->emitIns_R_R_I(INS_ldrb, EA_1BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldrh, EA_2BYTE, REG_R8, REG_R9, 2);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 4);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 8);
// pre-/post-indexed (unscaled)
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Compares
+ // Compares
//
genDefineTempLabel(genCreateTempLabel());
@@ -7553,22 +7450,21 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_R1, REG_R12);
- theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_R2, REG_R13);
- theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_R3, REG_R14);
- theEmitter->emitIns_R_R(INS_rev, EA_8BYTE, REG_R4, REG_R15);
- theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_R5, REG_R0);
- theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_R6, REG_R1);
-
- theEmitter->emitIns_R_R(INS_cls, EA_4BYTE, REG_R7, REG_R2);
- theEmitter->emitIns_R_R(INS_clz, EA_4BYTE, REG_R8, REG_R3);
- theEmitter->emitIns_R_R(INS_rbit, EA_4BYTE, REG_R9, REG_R4);
- theEmitter->emitIns_R_R(INS_rev, EA_4BYTE, REG_R10, REG_R5);
+ theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_R1, REG_R12);
+ theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_R2, REG_R13);
+ theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_R3, REG_R14);
+ theEmitter->emitIns_R_R(INS_rev, EA_8BYTE, REG_R4, REG_R15);
+ theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_R5, REG_R0);
+ theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_R6, REG_R1);
+
+ theEmitter->emitIns_R_R(INS_cls, EA_4BYTE, REG_R7, REG_R2);
+ theEmitter->emitIns_R_R(INS_clz, EA_4BYTE, REG_R8, REG_R3);
+ theEmitter->emitIns_R_R(INS_rbit, EA_4BYTE, REG_R9, REG_R4);
+ theEmitter->emitIns_R_R(INS_rev, EA_4BYTE, REG_R10, REG_R5);
theEmitter->emitIns_R_R(INS_rev16, EA_4BYTE, REG_R11, REG_R6);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
-
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I
@@ -7577,48 +7473,48 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000000001234);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000043210000);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000567800000000);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765000000000000);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFFFFFF1234);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFF4321FFFF);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFF5678FFFFFFFF);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765FFFFFFFFFFFF);
-
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00001234);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x87650000);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xFFFF1234);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x4567FFFF);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000000001234);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000043210000);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000567800000000);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765000000000000);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFFFFFF1234);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFF4321FFFF);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFF5678FFFFFFFF);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765FFFFFFFFFFFF);
+
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00001234);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x87650000);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xFFFF1234);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x4567FFFF);
// mov reg, imm(N,r,s)
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x6666666666666666);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_SP, 0x7FFF00007FFF0000);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x5555555555555555);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xE003E003E003E003);
- theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0707070707070707);
-
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00FFFFF0);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x66666666);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x03FFC000);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x55555555);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xE003E003);
- theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x07070707);
-
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0xE003E003E003E003);
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x6666666666666666);
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x0707070707070707);
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x7FFF00007FFF0000);
- theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x5555555555555555);
-
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xE003E003);
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x00FFFFF0);
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x66666666);
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x07070707);
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xFFF00000);
- theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x55555555);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x6666666666666666);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_SP, 0x7FFF00007FFF0000);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x5555555555555555);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xE003E003E003E003);
+ theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0707070707070707);
+
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00FFFFF0);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x66666666);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x03FFC000);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x55555555);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xE003E003);
+ theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x07070707);
+
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0xE003E003E003E003);
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x6666666666666666);
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x0707070707070707);
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x7FFF00007FFF0000);
+ theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x5555555555555555);
+
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xE003E003);
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x00FFFFF0);
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x66666666);
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x07070707);
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xFFF00000);
+ theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x55555555);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -7630,27 +7526,27 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// tst reg, reg
- theEmitter->emitIns_R_R(INS_tst, EA_8BYTE, REG_R7, REG_R10);
+ theEmitter->emitIns_R_R(INS_tst, EA_8BYTE, REG_R7, REG_R10);
// mov reg, reg
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R7, REG_R10);
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R8, REG_SP);
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R7, REG_R10);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R8, REG_SP);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_SP, REG_R9);
- theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_R5, REG_R11);
- theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_R4, REG_R12);
+ theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_R5, REG_R11);
+ theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_8BYTE, REG_R3, REG_R13);
- theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_R7, REG_R10);
- theEmitter->emitIns_R_R(INS_mvn, EA_4BYTE, REG_R5, REG_R11);
- theEmitter->emitIns_R_R(INS_neg, EA_4BYTE, REG_R4, REG_R12);
+ theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_R7, REG_R10);
+ theEmitter->emitIns_R_R(INS_mvn, EA_4BYTE, REG_R5, REG_R11);
+ theEmitter->emitIns_R_R(INS_neg, EA_4BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_4BYTE, REG_R3, REG_R13);
theEmitter->emitIns_R_R(INS_sxtb, EA_8BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_sxth, EA_8BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_sxtw, EA_8BYTE, REG_R4, REG_R12);
- theEmitter->emitIns_R_R(INS_uxtb, EA_8BYTE, REG_R3, REG_R13); // map to Wt
- theEmitter->emitIns_R_R(INS_uxth, EA_8BYTE, REG_R2, REG_R14); // map to Wt
+ theEmitter->emitIns_R_R(INS_uxtb, EA_8BYTE, REG_R3, REG_R13); // map to Wt
+ theEmitter->emitIns_R_R(INS_uxth, EA_8BYTE, REG_R2, REG_R14); // map to Wt
theEmitter->emitIns_R_R(INS_sxtb, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_sxth, EA_4BYTE, REG_R5, REG_R11);
@@ -7667,8 +7563,8 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
- theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x1234, 0, INS_OPTS_LSL);
- theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
+ theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x1234, 0, INS_OPTS_LSL);
+ theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movk, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movn, EA_8BYTE, REG_R8, 0x5678, 32, INS_OPTS_LSL);
@@ -7687,106 +7583,106 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_I(INS_lsl, EA_8BYTE, REG_R0, REG_R0, 1);
- theEmitter->emitIns_R_R_I(INS_lsl, EA_4BYTE, REG_R9, REG_R3, 18);
- theEmitter->emitIns_R_R_I(INS_lsr, EA_8BYTE, REG_R7, REG_R0, 37);
- theEmitter->emitIns_R_R_I(INS_lsr, EA_4BYTE, REG_R0, REG_R1, 2);
- theEmitter->emitIns_R_R_I(INS_asr, EA_8BYTE, REG_R2, REG_R3, 53);
- theEmitter->emitIns_R_R_I(INS_asr, EA_4BYTE, REG_R9, REG_R3, 18);
+ theEmitter->emitIns_R_R_I(INS_lsl, EA_8BYTE, REG_R0, REG_R0, 1);
+ theEmitter->emitIns_R_R_I(INS_lsl, EA_4BYTE, REG_R9, REG_R3, 18);
+ theEmitter->emitIns_R_R_I(INS_lsr, EA_8BYTE, REG_R7, REG_R0, 37);
+ theEmitter->emitIns_R_R_I(INS_lsr, EA_4BYTE, REG_R0, REG_R1, 2);
+ theEmitter->emitIns_R_R_I(INS_asr, EA_8BYTE, REG_R2, REG_R3, 53);
+ theEmitter->emitIns_R_R_I(INS_asr, EA_4BYTE, REG_R9, REG_R3, 18);
- theEmitter->emitIns_R_R_I(INS_and, EA_8BYTE, REG_R2, REG_R3, 0x5555555555555555);
+ theEmitter->emitIns_R_R_I(INS_and, EA_8BYTE, REG_R2, REG_R3, 0x5555555555555555);
theEmitter->emitIns_R_R_I(INS_ands, EA_8BYTE, REG_R1, REG_R5, 0x6666666666666666);
- theEmitter->emitIns_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, 0x0707070707070707);
- theEmitter->emitIns_R_R_I(INS_orr, EA_8BYTE, REG_SP, REG_R3, 0xFFFC000000000000);
+ theEmitter->emitIns_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, 0x0707070707070707);
+ theEmitter->emitIns_R_R_I(INS_orr, EA_8BYTE, REG_SP, REG_R3, 0xFFFC000000000000);
theEmitter->emitIns_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, 0xE003E003);
- theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 31);
- theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 32);
- theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 63);
-
- theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 31);
-
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
-
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -1);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -0xfff);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0x1000);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
- theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+ theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 31);
+ theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 32);
+ theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 63);
+
+ theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 31);
+
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
+
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -1);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -0xfff);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0x1000);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
+ theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -7796,59 +7692,61 @@ void CodeGen::genArm64EmitterUnitTests()
//
// cmp
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0);
// CMP (shifted register)
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
// TST (shifted register)
- theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 34, INS_OPTS_ROR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 34, INS_OPTS_ROR);
- theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 24, INS_OPTS_ROR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 24, INS_OPTS_ROR);
// CMP (extended register)
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTW); // "cmp x8, x9, UXTW"; msdis disassembles this "cmp x8,x9", which looks like an msdis issue.
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTX);
-
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTW); // "cmp x8, x9, UXTW"; msdis
+ // disassembles this "cmp x8,x9",
+ // which looks like an msdis issue.
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTX);
+
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTX);
// CMP 64-bit (extended register) and left shift
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTX);
// CMP 32-bit (extended register) and left shift
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTW);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -7859,51 +7757,51 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_lsl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lsr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_asr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_ror, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_adc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_adcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sbc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sbcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_udiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sdiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_mneg, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lsl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lsr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_asr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_ror, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_adc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_adcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sbc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sbcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_udiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sdiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_mneg, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_smulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_smulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_umulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lslv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lsrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_asrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_rorv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
-
- theEmitter->emitIns_R_R_R(INS_lsl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lsr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_asr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_ror, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_adc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_adcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sbc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sbcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_udiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_sdiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_mul, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_mneg, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_smull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_umulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lslv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lsrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_asrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_rorv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
+
+ theEmitter->emitIns_R_R_R(INS_lsl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lsr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_asr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_ror, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_adc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_adcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sbc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sbcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_udiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_sdiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_mneg, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_smull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_smulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_umull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_smulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_umull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_umulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lslv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_lsrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_asrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
- theEmitter->emitIns_R_R_R(INS_rorv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_umulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lslv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_lsrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_asrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
+ theEmitter->emitIns_R_R_R(INS_rorv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -7914,29 +7812,29 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_8BYTE, REG_R2, REG_R3, 4, 39);
- theEmitter->emitIns_R_R_I_I(INS_bfm, EA_8BYTE, REG_R1, REG_R5, 20, 23);
- theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_8BYTE, REG_R8, REG_R9, 36, 7);
+ theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_8BYTE, REG_R2, REG_R3, 4, 39);
+ theEmitter->emitIns_R_R_I_I(INS_bfm, EA_8BYTE, REG_R1, REG_R5, 20, 23);
+ theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_8BYTE, REG_R8, REG_R9, 36, 7);
- theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_8BYTE, REG_R2, REG_R3, 7, 37);
- theEmitter->emitIns_R_R_I_I(INS_bfi, EA_8BYTE, REG_R1, REG_R5, 23, 21);
- theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_8BYTE, REG_R8, REG_R9, 39, 5);
+ theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_8BYTE, REG_R2, REG_R3, 7, 37);
+ theEmitter->emitIns_R_R_I_I(INS_bfi, EA_8BYTE, REG_R1, REG_R5, 23, 21);
+ theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_8BYTE, REG_R8, REG_R9, 39, 5);
- theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_8BYTE, REG_R2, REG_R3, 10, 24);
+ theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_8BYTE, REG_R2, REG_R3, 10, 24);
theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_8BYTE, REG_R1, REG_R5, 26, 16);
- theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_8BYTE, REG_R8, REG_R9, 42, 8);
+ theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_8BYTE, REG_R8, REG_R9, 42, 8);
- theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_4BYTE, REG_R2, REG_R3, 4, 19);
- theEmitter->emitIns_R_R_I_I(INS_bfm, EA_4BYTE, REG_R1, REG_R5, 10, 13);
- theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_4BYTE, REG_R8, REG_R9, 16, 7);
+ theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_4BYTE, REG_R2, REG_R3, 4, 19);
+ theEmitter->emitIns_R_R_I_I(INS_bfm, EA_4BYTE, REG_R1, REG_R5, 10, 13);
+ theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_4BYTE, REG_R8, REG_R9, 16, 7);
- theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_4BYTE, REG_R2, REG_R3, 5, 17);
- theEmitter->emitIns_R_R_I_I(INS_bfi, EA_4BYTE, REG_R1, REG_R5, 13, 11);
- theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_4BYTE, REG_R8, REG_R9, 19, 5);
+ theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_4BYTE, REG_R2, REG_R3, 5, 17);
+ theEmitter->emitIns_R_R_I_I(INS_bfi, EA_4BYTE, REG_R1, REG_R5, 13, 11);
+ theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_4BYTE, REG_R8, REG_R9, 19, 5);
- theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, REG_R2, REG_R3, 3, 14);
- theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_4BYTE, REG_R1, REG_R5, 11, 9);
- theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_4BYTE, REG_R8, REG_R9, 22, 8);
+ theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, REG_R2, REG_R3, 3, 14);
+ theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_4BYTE, REG_R1, REG_R5, 11, 9);
+ theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_4BYTE, REG_R8, REG_R9, 22, 8);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -7948,30 +7846,30 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// ADD (extended register)
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// ADD (extended register) and left shift
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// ADD (shifted register)
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 33, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 33, INS_OPTS_ASR);
// EXTR (extract field from register pair)
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1);
@@ -7983,67 +7881,67 @@ void CodeGen::genArm64EmitterUnitTests()
theEmitter->emitIns_R_R_R_I(INS_extr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 31);
// SUB (extended register)
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// SUB (extended register) and left shift
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// SUB (shifted register)
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 27, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 28, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 29, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 27, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 28, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 29, INS_OPTS_ASR);
// bit operations
- theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
-
- theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
- theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
-
- theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
-
- theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
- theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
- theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
- theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
+ theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+
+ theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
+ theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
+
+ theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+
+ theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
+ theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
+ theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
+ theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8052,46 +7950,46 @@ void CodeGen::genArm64EmitterUnitTests()
// R_R_R_I -- load/store pair
//
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
-
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
-
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16);
- theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
+
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
+
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16);
+ theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
// SP and ZR tests
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SP, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_R8, 16, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SP, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_R8, 16, INS_OPTS_PRE_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8103,51 +8001,51 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// LDR (register)
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
@@ -8160,7 +8058,7 @@ void CodeGen::genArm64EmitterUnitTests()
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
@@ -8178,47 +8076,47 @@ void CodeGen::genArm64EmitterUnitTests()
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
// STR (register)
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
-
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
-
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
-
- theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8229,19 +8127,19 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R_R(INS_madd, EA_4BYTE, REG_R0, REG_R12, REG_R27, REG_R10);
- theEmitter->emitIns_R_R_R_R(INS_msub, EA_4BYTE, REG_R1, REG_R13, REG_R28, REG_R11);
- theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_4BYTE, REG_R2, REG_R14, REG_R0, REG_R12);
- theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_4BYTE, REG_R3, REG_R15, REG_R1, REG_R13);
- theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_4BYTE, REG_R4, REG_R19, REG_R2, REG_R14);
- theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_4BYTE, REG_R5, REG_R20, REG_R3, REG_R15);
+ theEmitter->emitIns_R_R_R_R(INS_madd, EA_4BYTE, REG_R0, REG_R12, REG_R27, REG_R10);
+ theEmitter->emitIns_R_R_R_R(INS_msub, EA_4BYTE, REG_R1, REG_R13, REG_R28, REG_R11);
+ theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_4BYTE, REG_R2, REG_R14, REG_R0, REG_R12);
+ theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_4BYTE, REG_R3, REG_R15, REG_R1, REG_R13);
+ theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_4BYTE, REG_R4, REG_R19, REG_R2, REG_R14);
+ theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_4BYTE, REG_R5, REG_R20, REG_R3, REG_R15);
- theEmitter->emitIns_R_R_R_R(INS_madd, EA_8BYTE, REG_R6, REG_R21, REG_R4, REG_R19);
- theEmitter->emitIns_R_R_R_R(INS_msub, EA_8BYTE, REG_R7, REG_R22, REG_R5, REG_R20);
- theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_8BYTE, REG_R8, REG_R23, REG_R6, REG_R21);
- theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_8BYTE, REG_R9, REG_R24, REG_R7, REG_R22);
- theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_8BYTE, REG_R10, REG_R25, REG_R8, REG_R23);
- theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_8BYTE, REG_R11, REG_R26, REG_R9, REG_R24);
+ theEmitter->emitIns_R_R_R_R(INS_madd, EA_8BYTE, REG_R6, REG_R21, REG_R4, REG_R19);
+ theEmitter->emitIns_R_R_R_R(INS_msub, EA_8BYTE, REG_R7, REG_R22, REG_R5, REG_R20);
+ theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_8BYTE, REG_R8, REG_R23, REG_R6, REG_R21);
+ theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_8BYTE, REG_R9, REG_R24, REG_R7, REG_R22);
+ theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_8BYTE, REG_R10, REG_R25, REG_R8, REG_R23);
+ theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_8BYTE, REG_R11, REG_R26, REG_R9, REG_R24);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8315,19 +8213,19 @@ void CodeGen::genArm64EmitterUnitTests()
// csinc reg, reg, reg, cond
// csinv reg, reg, reg, cond
// csneg reg, reg, reg, cond
- theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R0, REG_R4, REG_R8, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R0, REG_R4, REG_R8, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R1, REG_R5, REG_R9, INS_COND_NE); // ne
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_HS); // hs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LO); // lo
- theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R4, REG_R8, REG_R2, INS_COND_MI); // mi
+ theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R4, REG_R8, REG_R2, INS_COND_MI); // mi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R5, REG_R9, REG_R3, INS_COND_PL); // pl
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_8BYTE, REG_R6, REG_R0, REG_R4, INS_COND_VS); // vs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_4BYTE, REG_R7, REG_R1, REG_R5, INS_COND_VC); // vc
- theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R8, REG_R2, REG_R6, INS_COND_HI); // hi
+ theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R8, REG_R2, REG_R6, INS_COND_HI); // hi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R9, REG_R3, REG_R7, INS_COND_LS); // ls
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R0, REG_R4, REG_R8, INS_COND_GE); // ge
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R2, REG_R5, REG_R9, INS_COND_LT); // lt
- theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_GT); // gt
+ theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_GT); // gt
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8337,84 +8235,84 @@ void CodeGen::genArm64EmitterUnitTests()
//
// ccmp reg1, reg2, nzcv, cond
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond -- encoded as ccmn
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, -3, INS_FLAGS_V, INS_COND_EQ); // eq
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, -2, INS_FLAGS_C, INS_COND_NE); // ne
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, -1, INS_FLAGS_Z, INS_COND_HS); // hs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, -5, INS_FLAGS_N, INS_COND_LO); // lo
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, -31, INS_FLAGS_CV, INS_COND_MI); // mi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, -28, INS_FLAGS_ZV, INS_COND_PL); // pl
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, -25, INS_FLAGS_ZC, INS_COND_VS); // vs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, -22, INS_FLAGS_NV, INS_COND_VC); // vc
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, -19, INS_FLAGS_NC, INS_COND_HI); // hi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, -16, INS_FLAGS_NZ, INS_COND_LS); // ls
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, -3, INS_FLAGS_V, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, -2, INS_FLAGS_C, INS_COND_NE); // ne
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, -1, INS_FLAGS_Z, INS_COND_HS); // hs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, -5, INS_FLAGS_N, INS_COND_LO); // lo
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, -31, INS_FLAGS_CV, INS_COND_MI); // mi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, -28, INS_FLAGS_ZV, INS_COND_PL); // pl
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, -25, INS_FLAGS_ZC, INS_COND_VS); // vs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, -22, INS_FLAGS_NV, INS_COND_VC); // vc
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, -19, INS_FLAGS_NC, INS_COND_HI); // hi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, -16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, -13, INS_FLAGS_NONE, INS_COND_GE); // ge
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, -10, INS_FLAGS_NZV, INS_COND_LT); // lt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, -7, INS_FLAGS_NZC, INS_COND_GT); // gt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, -4, INS_FLAGS_NZCV, INS_COND_LE); // le
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, -10, INS_FLAGS_NZV, INS_COND_LT); // lt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, -7, INS_FLAGS_NZC, INS_COND_GT); // gt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, -4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, reg2, nzcv, cond
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
- theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
+ theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, imm, nzcv, cond
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
- theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
+ theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8425,7 +8323,7 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R(INS_br, EA_PTRSIZE, REG_R8);
+ theEmitter->emitIns_R(INS_br, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_blr, EA_PTRSIZE, REG_R9);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_LR);
@@ -8474,165 +8372,165 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// ldr/str Vt, [reg]
- theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_V1, REG_R9);
- theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_V2, REG_R8);
- theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_V3, REG_R7);
- theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_V4, REG_R6);
- theEmitter->emitIns_R_R(INS_ldr, EA_2BYTE, REG_V5, REG_R5);
- theEmitter->emitIns_R_R(INS_str, EA_2BYTE, REG_V6, REG_R4);
- theEmitter->emitIns_R_R(INS_ldr, EA_1BYTE, REG_V7, REG_R3);
- theEmitter->emitIns_R_R(INS_str, EA_1BYTE, REG_V8, REG_R2);
- theEmitter->emitIns_R_R(INS_ldr, EA_16BYTE, REG_V9, REG_R1);
- theEmitter->emitIns_R_R(INS_str, EA_16BYTE, REG_V10, REG_R0);
+ theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_V1, REG_R9);
+ theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_V2, REG_R8);
+ theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_V3, REG_R7);
+ theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_V4, REG_R6);
+ theEmitter->emitIns_R_R(INS_ldr, EA_2BYTE, REG_V5, REG_R5);
+ theEmitter->emitIns_R_R(INS_str, EA_2BYTE, REG_V6, REG_R4);
+ theEmitter->emitIns_R_R(INS_ldr, EA_1BYTE, REG_V7, REG_R3);
+ theEmitter->emitIns_R_R(INS_str, EA_1BYTE, REG_V8, REG_R2);
+ theEmitter->emitIns_R_R(INS_ldr, EA_16BYTE, REG_V9, REG_R1);
+ theEmitter->emitIns_R_R(INS_str, EA_16BYTE, REG_V10, REG_R0);
// ldr/str Vt, [reg+cns] -- scaled
- theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 2);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 4);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 8);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 16);
-
- theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V7, REG_R10, 1);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V7, REG_R10, 2);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V7, REG_R10, 4);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V7, REG_R10, 8);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V7, REG_R10, 16);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 2);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 4);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 8);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 16);
+
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V7, REG_R10, 1);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V7, REG_R10, 2);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V7, REG_R10, 4);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V7, REG_R10, 8);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V7, REG_R10, 16);
// ldr/str Vt, [reg],cns -- post-indexed (unscaled)
// ldr/str Vt, [reg+cns]! -- post-indexed (unscaled)
- theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
-
- theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
-
- theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_I(INS_ldur, EA_1BYTE, REG_V8, REG_R9, 2);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_2BYTE, REG_V8, REG_R9, 3);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_V8, REG_R9, 5);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_V8, REG_R9, 9);
- theEmitter->emitIns_R_R_I(INS_ldur, EA_16BYTE, REG_V8, REG_R9, 17);
-
- theEmitter->emitIns_R_R_I(INS_stur, EA_1BYTE, REG_V7, REG_R10, 2);
- theEmitter->emitIns_R_R_I(INS_stur, EA_2BYTE, REG_V7, REG_R10, 3);
- theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_V7, REG_R10, 5);
- theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_V7, REG_R10, 9);
- theEmitter->emitIns_R_R_I(INS_stur, EA_16BYTE, REG_V7, REG_R10, 17);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
+
+ theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_1BYTE, REG_V8, REG_R9, 2);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_2BYTE, REG_V8, REG_R9, 3);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_V8, REG_R9, 5);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_V8, REG_R9, 9);
+ theEmitter->emitIns_R_R_I(INS_ldur, EA_16BYTE, REG_V8, REG_R9, 17);
+
+ theEmitter->emitIns_R_R_I(INS_stur, EA_1BYTE, REG_V7, REG_R10, 2);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_2BYTE, REG_V7, REG_R10, 3);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_V7, REG_R10, 5);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_V7, REG_R10, 9);
+ theEmitter->emitIns_R_R_I(INS_stur, EA_16BYTE, REG_V7, REG_R10, 17);
// load/store pair
- theEmitter->emitIns_R_R_R (INS_ldnp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V1, REG_V2, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_V2, REG_V3, REG_R10, 8);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 24);
-
- theEmitter->emitIns_R_R_R (INS_ldnp, EA_4BYTE, REG_V4, REG_V5, REG_SP);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 4);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V7, REG_V8, REG_SP, 12);
-
- theEmitter->emitIns_R_R_R (INS_ldnp, EA_16BYTE, REG_V8, REG_V9, REG_R10);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V9, REG_V10, REG_R10, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_16BYTE, REG_V10, REG_V11, REG_R10, 16);
- theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V11, REG_V12, REG_R10, 48);
-
- theEmitter->emitIns_R_R_R (INS_ldp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V1, REG_V2, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V2, REG_V3, REG_SP, 8);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 16);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V4, REG_V5, REG_R10, 24, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V5, REG_V6, REG_SP, 32, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V6, REG_V7, REG_SP, 40, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V7, REG_V8, REG_R10, 48, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_R (INS_ldp, EA_4BYTE, REG_V0, REG_V1, REG_R10);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V1, REG_V2, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V2, REG_V3, REG_SP, 4);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V3, REG_V4, REG_R10, 8);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V4, REG_V5, REG_R10, 12, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 16, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 20, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V7, REG_V8, REG_R10, 24, INS_OPTS_PRE_INDEX);
-
- theEmitter->emitIns_R_R_R (INS_ldp, EA_16BYTE, REG_V0, REG_V1, REG_R10);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V1, REG_V2, REG_SP, 0);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V2, REG_V3, REG_SP, 16);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V3, REG_V4, REG_R10, 32);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V4, REG_V5, REG_R10, 48, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V5, REG_V6, REG_SP, 64, INS_OPTS_POST_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V6, REG_V7, REG_SP, 80, INS_OPTS_PRE_INDEX);
- theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V7, REG_V8, REG_R10, 96, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R(INS_ldnp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V1, REG_V2, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_V2, REG_V3, REG_R10, 8);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 24);
+
+ theEmitter->emitIns_R_R_R(INS_ldnp, EA_4BYTE, REG_V4, REG_V5, REG_SP);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 4);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V7, REG_V8, REG_SP, 12);
+
+ theEmitter->emitIns_R_R_R(INS_ldnp, EA_16BYTE, REG_V8, REG_V9, REG_R10);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V9, REG_V10, REG_R10, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_16BYTE, REG_V10, REG_V11, REG_R10, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V11, REG_V12, REG_R10, 48);
+
+ theEmitter->emitIns_R_R_R(INS_ldp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V1, REG_V2, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V2, REG_V3, REG_SP, 8);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 16);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V4, REG_V5, REG_R10, 24, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V5, REG_V6, REG_SP, 32, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V6, REG_V7, REG_SP, 40, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V7, REG_V8, REG_R10, 48, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_R(INS_ldp, EA_4BYTE, REG_V0, REG_V1, REG_R10);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V1, REG_V2, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V2, REG_V3, REG_SP, 4);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V3, REG_V4, REG_R10, 8);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V4, REG_V5, REG_R10, 12, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 16, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 20, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V7, REG_V8, REG_R10, 24, INS_OPTS_PRE_INDEX);
+
+ theEmitter->emitIns_R_R_R(INS_ldp, EA_16BYTE, REG_V0, REG_V1, REG_R10);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V1, REG_V2, REG_SP, 0);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V2, REG_V3, REG_SP, 16);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V3, REG_V4, REG_R10, 32);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V4, REG_V5, REG_R10, 48, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V5, REG_V6, REG_SP, 64, INS_OPTS_POST_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V6, REG_V7, REG_SP, 80, INS_OPTS_PRE_INDEX);
+ theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V7, REG_V8, REG_R10, 96, INS_OPTS_PRE_INDEX);
// LDR (register)
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V1, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 3);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V1, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 2);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V1, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 4);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 4);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 4);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 4);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 4);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V1, REG_SP, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 1);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
-
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V1, REG_R7, REG_R9);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V2, REG_SP, REG_R9, INS_OPTS_SXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_UXTW);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V4, REG_SP, REG_R9, INS_OPTS_SXTX);
- theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V1, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 3);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V1, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 2);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V1, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 4);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 4);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 4);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 4);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 4);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V1, REG_SP, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 1);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
+
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V1, REG_R7, REG_R9);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V2, REG_SP, REG_R9, INS_OPTS_SXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_UXTW);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V4, REG_SP, REG_R9, INS_OPTS_SXTX);
+ theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8642,79 +8540,79 @@ void CodeGen::genArm64EmitterUnitTests()
//
// mov vector to vector
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_mov, EA_16BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_mov, EA_16BYTE, REG_V2, REG_V3);
- theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_V12, REG_V13);
- theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_V14, REG_V15);
- theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_V16, REG_V17);
+ theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_V12, REG_V13);
+ theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_V14, REG_V15);
+ theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_V16, REG_V17);
// mov vector to general
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R0, REG_V4);
- theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_R1, REG_V5);
- theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_R2, REG_V6);
- theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_R3, REG_V7);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_R0, REG_V4);
+ theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_R1, REG_V5);
+ theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_R2, REG_V6);
+ theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_R3, REG_V7);
// mov general to vector
- theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_V8, REG_R4);
- theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_V9, REG_R5);
- theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_V10, REG_R6);
- theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_V11, REG_R7);
+ theEmitter->emitIns_R_R(INS_mov, EA_8BYTE, REG_V8, REG_R4);
+ theEmitter->emitIns_R_R(INS_mov, EA_4BYTE, REG_V9, REG_R5);
+ theEmitter->emitIns_R_R(INS_mov, EA_2BYTE, REG_V10, REG_R6);
+ theEmitter->emitIns_R_R(INS_mov, EA_1BYTE, REG_V11, REG_R7);
// mov vector[index] to vector
- theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V2, REG_V3, 3);
- theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V4, REG_V5, 7);
- theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V6, REG_V7, 15);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V2, REG_V3, 3);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V4, REG_V5, 7);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V6, REG_V7, 15);
// mov to general from vector[index]
- theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_R8, REG_V16, 1);
- theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_R9, REG_V17, 2);
- theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_R10, REG_V18, 3);
- theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_R11, REG_V19, 4);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_R8, REG_V16, 1);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_R9, REG_V17, 2);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_R10, REG_V18, 3);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_R11, REG_V19, 4);
// mov to vector[index] from general
- theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V20, REG_R12, 1);
- theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V21, REG_R13, 2);
- theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V22, REG_R14, 6);
- theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V23, REG_R15, 8);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V20, REG_R12, 1);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V21, REG_R13, 2);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V22, REG_R14, 6);
+ theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V23, REG_R15, 8);
// mov vector[index] to vector[index2]
- theEmitter->emitIns_R_R_I_I(INS_mov, EA_8BYTE, REG_V8, REG_V9, 1, 0);
- theEmitter->emitIns_R_R_I_I(INS_mov, EA_4BYTE, REG_V10, REG_V11, 2, 1);
- theEmitter->emitIns_R_R_I_I(INS_mov, EA_2BYTE, REG_V12, REG_V13, 5, 2);
- theEmitter->emitIns_R_R_I_I(INS_mov, EA_1BYTE, REG_V14, REG_V15, 12, 3);
+ theEmitter->emitIns_R_R_I_I(INS_mov, EA_8BYTE, REG_V8, REG_V9, 1, 0);
+ theEmitter->emitIns_R_R_I_I(INS_mov, EA_4BYTE, REG_V10, REG_V11, 2, 1);
+ theEmitter->emitIns_R_R_I_I(INS_mov, EA_2BYTE, REG_V12, REG_V13, 5, 2);
+ theEmitter->emitIns_R_R_I_I(INS_mov, EA_1BYTE, REG_V14, REG_V15, 12, 3);
//////////////////////////////////////////////////////////////////////////////////
- // mov/dup scalar
- theEmitter->emitIns_R_R_I(INS_dup, EA_8BYTE, REG_V24, REG_V25, 1);
- theEmitter->emitIns_R_R_I(INS_dup, EA_4BYTE, REG_V26, REG_V27, 3);
- theEmitter->emitIns_R_R_I(INS_dup, EA_2BYTE, REG_V28, REG_V29, 7);
- theEmitter->emitIns_R_R_I(INS_dup, EA_1BYTE, REG_V30, REG_V31, 15);
+ // mov/dup scalar
+ theEmitter->emitIns_R_R_I(INS_dup, EA_8BYTE, REG_V24, REG_V25, 1);
+ theEmitter->emitIns_R_R_I(INS_dup, EA_4BYTE, REG_V26, REG_V27, 3);
+ theEmitter->emitIns_R_R_I(INS_dup, EA_2BYTE, REG_V28, REG_V29, 7);
+ theEmitter->emitIns_R_R_I(INS_dup, EA_1BYTE, REG_V30, REG_V31, 15);
// mov/ins vector element
- theEmitter->emitIns_R_R_I_I(INS_ins, EA_8BYTE, REG_V0, REG_V1, 0, 1);
- theEmitter->emitIns_R_R_I_I(INS_ins, EA_4BYTE, REG_V2, REG_V3, 2, 2);
- theEmitter->emitIns_R_R_I_I(INS_ins, EA_2BYTE, REG_V4, REG_V5, 4, 3);
- theEmitter->emitIns_R_R_I_I(INS_ins, EA_1BYTE, REG_V6, REG_V7, 8, 4);
+ theEmitter->emitIns_R_R_I_I(INS_ins, EA_8BYTE, REG_V0, REG_V1, 0, 1);
+ theEmitter->emitIns_R_R_I_I(INS_ins, EA_4BYTE, REG_V2, REG_V3, 2, 2);
+ theEmitter->emitIns_R_R_I_I(INS_ins, EA_2BYTE, REG_V4, REG_V5, 4, 3);
+ theEmitter->emitIns_R_R_I_I(INS_ins, EA_1BYTE, REG_V6, REG_V7, 8, 4);
// umov to general from vector element
- theEmitter->emitIns_R_R_I(INS_umov, EA_8BYTE, REG_R0, REG_V8, 1);
- theEmitter->emitIns_R_R_I(INS_umov, EA_4BYTE, REG_R1, REG_V9, 2);
- theEmitter->emitIns_R_R_I(INS_umov, EA_2BYTE, REG_R2, REG_V10, 4);
- theEmitter->emitIns_R_R_I(INS_umov, EA_1BYTE, REG_R3, REG_V11, 8);
+ theEmitter->emitIns_R_R_I(INS_umov, EA_8BYTE, REG_R0, REG_V8, 1);
+ theEmitter->emitIns_R_R_I(INS_umov, EA_4BYTE, REG_R1, REG_V9, 2);
+ theEmitter->emitIns_R_R_I(INS_umov, EA_2BYTE, REG_R2, REG_V10, 4);
+ theEmitter->emitIns_R_R_I(INS_umov, EA_1BYTE, REG_R3, REG_V11, 8);
// ins to vector element from general
- theEmitter->emitIns_R_R_I(INS_ins, EA_8BYTE, REG_V12, REG_R4, 1);
- theEmitter->emitIns_R_R_I(INS_ins, EA_4BYTE, REG_V13, REG_R5, 3);
- theEmitter->emitIns_R_R_I(INS_ins, EA_2BYTE, REG_V14, REG_R6, 7);
- theEmitter->emitIns_R_R_I(INS_ins, EA_1BYTE, REG_V15, REG_R7, 15);
+ theEmitter->emitIns_R_R_I(INS_ins, EA_8BYTE, REG_V12, REG_R4, 1);
+ theEmitter->emitIns_R_R_I(INS_ins, EA_4BYTE, REG_V13, REG_R5, 3);
+ theEmitter->emitIns_R_R_I(INS_ins, EA_2BYTE, REG_V14, REG_R6, 7);
+ theEmitter->emitIns_R_R_I(INS_ins, EA_1BYTE, REG_V15, REG_R7, 15);
// smov to general from vector element
- theEmitter->emitIns_R_R_I(INS_smov, EA_4BYTE, REG_R5, REG_V17, 2);
- theEmitter->emitIns_R_R_I(INS_smov, EA_2BYTE, REG_R6, REG_V18, 4);
- theEmitter->emitIns_R_R_I(INS_smov, EA_1BYTE, REG_R7, REG_V19, 8);
+ theEmitter->emitIns_R_R_I(INS_smov, EA_4BYTE, REG_R5, REG_V17, 2);
+ theEmitter->emitIns_R_R_I(INS_smov, EA_2BYTE, REG_R6, REG_V18, 4);
+ theEmitter->emitIns_R_R_I(INS_smov, EA_1BYTE, REG_R7, REG_V19, 8);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8724,68 +8622,68 @@ void CodeGen::genArm64EmitterUnitTests()
//
// movi imm8 (vector)
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V0, 0x00, INS_OPTS_8B);
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V1, 0xFF, INS_OPTS_8B);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V2, 0x00, INS_OPTS_16B);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V3, 0xFF, INS_OPTS_16B);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V0, 0x00, INS_OPTS_8B);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V1, 0xFF, INS_OPTS_8B);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V2, 0x00, INS_OPTS_16B);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V3, 0xFF, INS_OPTS_16B);
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V4, 0x007F, INS_OPTS_4H);
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V5, 0x7F00, INS_OPTS_4H); // LSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V6, 0x003F, INS_OPTS_8H);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V7, 0x3F00, INS_OPTS_8H); // LSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V4, 0x007F, INS_OPTS_4H);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V5, 0x7F00, INS_OPTS_4H); // LSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V6, 0x003F, INS_OPTS_8H);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V7, 0x3F00, INS_OPTS_8H); // LSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V8, 0x1F, INS_OPTS_2S);
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V9, 0x1F00, INS_OPTS_2S); // LSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V10, 0x1F0000, INS_OPTS_2S); // LSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V11, 0x1F000000, INS_OPTS_2S); // LSL 24
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V8, 0x1F, INS_OPTS_2S);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V9, 0x1F00, INS_OPTS_2S); // LSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V10, 0x1F0000, INS_OPTS_2S); // LSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V11, 0x1F000000, INS_OPTS_2S); // LSL 24
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V12, 0x1FFF, INS_OPTS_2S); // MSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V13, 0x1FFFFF, INS_OPTS_2S); // MSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V12, 0x1FFF, INS_OPTS_2S); // MSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V13, 0x1FFFFF, INS_OPTS_2S); // MSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V14, 0x37, INS_OPTS_4S);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V15, 0x3700, INS_OPTS_4S); // LSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V16, 0x370000, INS_OPTS_4S); // LSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V17, 0x37000000, INS_OPTS_4S); // LSL 24
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V14, 0x37, INS_OPTS_4S);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V15, 0x3700, INS_OPTS_4S); // LSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V16, 0x370000, INS_OPTS_4S); // LSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V17, 0x37000000, INS_OPTS_4S); // LSL 24
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V18, 0x37FF, INS_OPTS_4S); // MSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V19, 0x37FFFF, INS_OPTS_4S); // MSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V18, 0x37FF, INS_OPTS_4S); // MSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V19, 0x37FFFF, INS_OPTS_4S); // MSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V20, 0xFF80, INS_OPTS_4H); // mvni
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V21, 0xFFC0, INS_OPTS_8H); // mvni
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V20, 0xFF80, INS_OPTS_4H); // mvni
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V21, 0xFFC0, INS_OPTS_8H); // mvni
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V22, 0xFFFFFFE0, INS_OPTS_2S); // mvni
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V23, 0xFFFFF0FF, INS_OPTS_4S); // mvni LSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V24, 0xFFF8FFFF, INS_OPTS_2S); // mvni LSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V25, 0xFCFFFFFF, INS_OPTS_4S); // mvni LSL 24
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V22, 0xFFFFFFE0, INS_OPTS_2S); // mvni
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V23, 0xFFFFF0FF, INS_OPTS_4S); // mvni LSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V24, 0xFFF8FFFF, INS_OPTS_2S); // mvni LSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V25, 0xFCFFFFFF, INS_OPTS_4S); // mvni LSL 24
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V26, 0xFFFFFE00, INS_OPTS_2S); // mvni MSL 8
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V27, 0xFFFC0000, INS_OPTS_4S); // mvni MSL 16
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V26, 0xFFFFFE00, INS_OPTS_2S); // mvni MSL 8
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V27, 0xFFFC0000, INS_OPTS_4S); // mvni MSL 16
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V28, 0x00FF00FF00FF00FF, INS_OPTS_1D);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V29, 0x00FFFF0000FFFF00, INS_OPTS_2D);
- theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V30, 0xFF000000FF000000);
- theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V31, 0x0, INS_OPTS_2D);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V28, 0x00FF00FF00FF00FF, INS_OPTS_1D);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V29, 0x00FFFF0000FFFF00, INS_OPTS_2D);
+ theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V30, 0xFF000000FF000000);
+ theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V31, 0x0, INS_OPTS_2D);
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V8, 0x42FF, INS_OPTS_2S); // MSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V9, 0x42FFFF, INS_OPTS_2S); // MSL 16
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V8, 0x42FF, INS_OPTS_2S); // MSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V9, 0x42FFFF, INS_OPTS_2S); // MSL 16
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V14, 0x5DFF, INS_OPTS_4S); // MSL 8
- theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V15, 0x5DFFFF, INS_OPTS_4S); // MSL 16
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V14, 0x5DFF, INS_OPTS_4S); // MSL 8
+ theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V15, 0x5DFFFF, INS_OPTS_4S); // MSL 16
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8794,35 +8692,35 @@ void CodeGen::genArm64EmitterUnitTests()
// R_I orr/bic vector immediate
//
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
- theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
+ theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
- theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
+ theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
- theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
+ theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
- theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
+ theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8832,38 +8730,38 @@ void CodeGen::genArm64EmitterUnitTests()
//
// fmov imm8 (scalar)
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V14, 1.0);
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V15, -1.0);
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V0, 2.0); // encodes imm8 == 0
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V16, 10.0);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V17, -10.0);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V18, 31); // Largest encodable value
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V19, -31);
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V20, 1.25);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V21, -1.25);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V22, 0.125); // Smallest encodable value
- theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V23, -0.125);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V14, 1.0);
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V15, -1.0);
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V0, 2.0); // encodes imm8 == 0
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V16, 10.0);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V17, -10.0);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V18, 31); // Largest encodable value
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V19, -31);
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V20, 1.25);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V21, -1.25);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V22, 0.125); // Smallest encodable value
+ theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V23, -0.125);
// fmov imm8 (vector)
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V0, 2.0, INS_OPTS_2S);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V24, 1.0, INS_OPTS_2S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V25, 1.0, INS_OPTS_4S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V26, 1.0, INS_OPTS_2D);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V27, -10.0, INS_OPTS_2S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V28, -10.0, INS_OPTS_4S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V29, -10.0, INS_OPTS_2D);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V30, 31.0, INS_OPTS_2S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V31, 31.0, INS_OPTS_4S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V0, 31.0, INS_OPTS_2D);
- theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V1, -0.125, INS_OPTS_2S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V2, -0.125, INS_OPTS_4S);
- theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V3, -0.125, INS_OPTS_2D);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V0, 2.0, INS_OPTS_2S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V24, 1.0, INS_OPTS_2S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V25, 1.0, INS_OPTS_4S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V26, 1.0, INS_OPTS_2D);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V27, -10.0, INS_OPTS_2S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V28, -10.0, INS_OPTS_4S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V29, -10.0, INS_OPTS_2D);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V30, 31.0, INS_OPTS_2S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V31, 31.0, INS_OPTS_4S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V0, 31.0, INS_OPTS_2D);
+ theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V1, -0.125, INS_OPTS_2S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V2, -0.125, INS_OPTS_4S);
+ theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V3, -0.125, INS_OPTS_2D);
// fcmp with 0.0
- theEmitter->emitIns_R_F(INS_fcmp, EA_8BYTE, REG_V12, 0.0);
- theEmitter->emitIns_R_F(INS_fcmp, EA_4BYTE, REG_V13, 0.0);
- theEmitter->emitIns_R_F(INS_fcmpe, EA_8BYTE, REG_V14, 0.0);
- theEmitter->emitIns_R_F(INS_fcmpe, EA_4BYTE, REG_V15, 0.0);
+ theEmitter->emitIns_R_F(INS_fcmp, EA_8BYTE, REG_V12, 0.0);
+ theEmitter->emitIns_R_F(INS_fcmp, EA_4BYTE, REG_V13, 0.0);
+ theEmitter->emitIns_R_F(INS_fcmpe, EA_8BYTE, REG_V14, 0.0);
+ theEmitter->emitIns_R_F(INS_fcmpe, EA_4BYTE, REG_V15, 0.0);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8873,38 +8771,38 @@ void CodeGen::genArm64EmitterUnitTests()
//
// fmov to vector to vector
- theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V0, REG_V2);
- theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V1, REG_V3);
+ theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V0, REG_V2);
+ theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V1, REG_V3);
// fmov to vector to general
- theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_R0, REG_V4);
- theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_R1, REG_V5);
+ theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_R0, REG_V4);
+ theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_R1, REG_V5);
// using the optional conversion specifier
- theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_D_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_R3, REG_V7, INS_OPTS_S_TO_4BYTE);
-
+ theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_D_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_R3, REG_V7, INS_OPTS_S_TO_4BYTE);
+
// fmov to general to vector
- theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V8, REG_R4);
- theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V9, REG_R5);
- // using the optional conversion specifier
- theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V10, REG_R6, INS_OPTS_8BYTE_TO_D);
- theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V11, REG_R7, INS_OPTS_4BYTE_TO_S);
+ theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V8, REG_R4);
+ theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V9, REG_R5);
+ // using the optional conversion specifier
+ theEmitter->emitIns_R_R(INS_fmov, EA_8BYTE, REG_V10, REG_R6, INS_OPTS_8BYTE_TO_D);
+ theEmitter->emitIns_R_R(INS_fmov, EA_4BYTE, REG_V11, REG_R7, INS_OPTS_4BYTE_TO_S);
// fcmp/fcmpe
- theEmitter->emitIns_R_R(INS_fcmp, EA_8BYTE, REG_V8, REG_V16);
- theEmitter->emitIns_R_R(INS_fcmp, EA_4BYTE, REG_V9, REG_V17);
- theEmitter->emitIns_R_R(INS_fcmpe, EA_8BYTE, REG_V10, REG_V18);
- theEmitter->emitIns_R_R(INS_fcmpe, EA_4BYTE, REG_V11, REG_V19);
+ theEmitter->emitIns_R_R(INS_fcmp, EA_8BYTE, REG_V8, REG_V16);
+ theEmitter->emitIns_R_R(INS_fcmp, EA_4BYTE, REG_V9, REG_V17);
+ theEmitter->emitIns_R_R(INS_fcmpe, EA_8BYTE, REG_V10, REG_V18);
+ theEmitter->emitIns_R_R(INS_fcmpe, EA_4BYTE, REG_V11, REG_V19);
// fcvt
- theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V24, REG_V25, INS_OPTS_S_TO_D); // Single to Double
- theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V26, REG_V27, INS_OPTS_D_TO_S); // Double to Single
+ theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V24, REG_V25, INS_OPTS_S_TO_D); // Single to Double
+ theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V26, REG_V27, INS_OPTS_D_TO_S); // Double to Single
- theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V1, REG_V2, INS_OPTS_H_TO_S);
- theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V3, REG_V4, INS_OPTS_H_TO_D);
+ theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V1, REG_V2, INS_OPTS_H_TO_S);
+ theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V3, REG_V4, INS_OPTS_H_TO_D);
- theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V5, REG_V6, INS_OPTS_S_TO_H);
- theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V7, REG_V8, INS_OPTS_D_TO_H);
+ theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V5, REG_V6, INS_OPTS_S_TO_H);
+ theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V7, REG_V8, INS_OPTS_D_TO_H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -8914,192 +8812,192 @@ void CodeGen::genArm64EmitterUnitTests()
//
// fcvtas scalar
- theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V2, REG_V3);
// fcvtas scalar to general
- theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtas vector
- theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtas vector
+ theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtau scalar
- theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V2, REG_V3);
// fcvtau scalar to general
- theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtau vector
- theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtau vector
+ theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtms scalar
- theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V2, REG_V3);
// fcvtms scalar to general
- theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtms vector
- theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtms vector
+ theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtmu scalar
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V2, REG_V3);
// fcvtmu scalar to general
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtmu vector
- theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtmu vector
+ theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtns scalar
- theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V2, REG_V3);
// fcvtns scalar to general
- theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtns vector
- theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtns vector
+ theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtnu scalar
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V2, REG_V3);
// fcvtnu scalar to general
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtnu vector
- theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtnu vector
+ theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtps scalar
- theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V2, REG_V3);
// fcvtps scalar to general
- theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtps vector
- theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtps vector
+ theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtpu scalar
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V2, REG_V3);
// fcvtpu scalar to general
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtpu vector
- theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtpu vector
+ theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtzs scalar
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V2, REG_V3);
// fcvtzs scalar to general
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtzs vector
- theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtzs vector
+ theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtzu scalar
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V2, REG_V3);
// fcvtzu scalar to general
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
-
- // fcvtzu vector
- theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
+
+ // fcvtzu vector
+ theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// scvtf scalar
- theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V2, REG_V3);
// scvtf scalar from general
- theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
- theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
- theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
- theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
-
- // scvtf vector
- theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
+ theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
+
+ // scvtf vector
+ theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// ucvtf scalar
- theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V2, REG_V3);
// ucvtf scalar from general
- theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
- theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
- theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
- theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
-
- // ucvtf vector
- theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
+
+ // ucvtf vector
+ theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
@@ -9111,110 +9009,110 @@ void CodeGen::genArm64EmitterUnitTests()
//
// fabs scalar
- theEmitter->emitIns_R_R(INS_fabs, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fabs, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V2, REG_V3);
- // fabs vector
- theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
- theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
+ // fabs vector
+ theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
+ theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fneg scalar
- theEmitter->emitIns_R_R(INS_fneg, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fneg, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V2, REG_V3);
- // fneg vector
- theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
- theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
+ // fneg vector
+ theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
+ theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fsqrt scalar
- theEmitter->emitIns_R_R(INS_fsqrt, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_fsqrt, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V2, REG_V3);
- // fsqrt vector
- theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // fsqrt vector
+ theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
genDefineTempLabel(genCreateTempLabel());
// abs scalar
- theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V2, REG_V3);
- // abs vector
- theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
- theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
+ // abs vector
+ theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
+ theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
// neg scalar
- theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V2, REG_V3);
-
- // neg vector
- theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
- theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
-
- // mvn vector
- theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V4, REG_V5);
- theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V8, REG_V9);
- theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_16B);
-
- // cnt vector
- theEmitter->emitIns_R_R(INS_cnt, EA_8BYTE, REG_V22, REG_V23, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_cnt, EA_16BYTE, REG_V24, REG_V25, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V2, REG_V3);
+
+ // neg vector
+ theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
+ theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
+
+ // mvn vector
+ theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V4, REG_V5);
+ theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V8, REG_V9);
+ theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_16B);
+
+ // cnt vector
+ theEmitter->emitIns_R_R(INS_cnt, EA_8BYTE, REG_V22, REG_V23, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_cnt, EA_16BYTE, REG_V24, REG_V25, INS_OPTS_16B);
// not vector (the same encoding as mvn)
- theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V12, REG_V13);
- theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V14, REG_V15, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V16, REG_V17);
- theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V18, REG_V19, INS_OPTS_16B);
-
- // cls vector
- theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
-
- // clz vector
- theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
- theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
-
- // rbit vector
- theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_rbit, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
-
- // rev16 vector
- theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_rev16, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
-
- // rev32 vector
- theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V12, REG_V13);
+ theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V14, REG_V15, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V16, REG_V17);
+ theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V18, REG_V19, INS_OPTS_16B);
+
+ // cls vector
+ theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
+
+ // clz vector
+ theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
+
+ // rbit vector
+ theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_rbit, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
+
+ // rev16 vector
+ theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_rev16, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
+
+ // rev32 vector
+ theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- // rev64 vector
- theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
+ // rev64 vector
+ theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
#endif
@@ -9225,65 +9123,65 @@ void CodeGen::genArm64EmitterUnitTests()
//
// frinta scalar
- theEmitter->emitIns_R_R(INS_frinta, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frinta, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V2, REG_V3);
- // frinta vector
- theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frinta vector
+ theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frinti scalar
- theEmitter->emitIns_R_R(INS_frinti, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frinti, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V2, REG_V3);
- // frinti vector
- theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frinti vector
+ theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintm scalar
- theEmitter->emitIns_R_R(INS_frintm, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frintm, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V2, REG_V3);
- // frintm vector
- theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frintm vector
+ theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintn scalar
- theEmitter->emitIns_R_R(INS_frintn, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frintn, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V2, REG_V3);
- // frintn vector
- theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frintn vector
+ theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintp scalar
- theEmitter->emitIns_R_R(INS_frintp, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frintp, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V2, REG_V3);
- // frintp vector
- theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frintp vector
+ theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintx scalar
- theEmitter->emitIns_R_R(INS_frintx, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frintx, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V2, REG_V3);
- // frintx vector
- theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frintx vector
+ theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintz scalar
- theEmitter->emitIns_R_R(INS_frintz, EA_4BYTE, REG_V0, REG_V1);
- theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R(INS_frintz, EA_4BYTE, REG_V0, REG_V1);
+ theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V2, REG_V3);
- // frintz vector
- theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
+ // frintz vector
+ theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
@@ -9296,71 +9194,71 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_fadd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R(INS_fadd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R(INS_fsub, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ theEmitter->emitIns_R_R_R(INS_fsub, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R(INS_fdiv, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ theEmitter->emitIns_R_R_R(INS_fdiv, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R(INS_fmax, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ theEmitter->emitIns_R_R_R(INS_fmax, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R(INS_fmin, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ theEmitter->emitIns_R_R_R(INS_fmin, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- // fabd
- theEmitter->emitIns_R_R_R(INS_fabd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ // fabd
+ theEmitter->emitIns_R_R_R(INS_fabd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
+
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_fmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R(INS_fmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
- theEmitter->emitIns_R_R_R_I(INS_fmul, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_R_I(INS_fmul, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
- theEmitter->emitIns_R_R_R(INS_fmulx, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R(INS_fmulx, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
- theEmitter->emitIns_R_R_R(INS_fnmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
- theEmitter->emitIns_R_R_R(INS_fnmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
+
+ theEmitter->emitIns_R_R_R(INS_fnmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
+ theEmitter->emitIns_R_R_R(INS_fnmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -9372,238 +9270,238 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
// 'sshr' scalar
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 63);
// 'sshr' vector
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'ssra' scalar
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 63);
// 'ssra' vector
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'srshr' scalar
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 63);
// 'srshr' vector
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'srsra' scalar
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 63);
// 'srsra' vector
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'shl' scalar
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 63);
// 'shl' vector
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'ushr' scalar
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 63);
// 'ushr' vector
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'usra' scalar
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 63);
// 'usra' vector
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'urshr' scalar
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 63);
// 'urshr' vector
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'ursra' scalar
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 63);
// 'srsra' vector
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'sri' scalar
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 63);
// 'sri' vector
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'sli' scalar
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 1);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V2, REG_V3, 14);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 27);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V6, REG_V7, 40);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 63);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 1);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V2, REG_V3, 14);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 27);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V6, REG_V7, 40);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 63);
// 'sli' vector
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
- theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
- theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
- theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
+ theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// 'sshll' vector
- theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// 'ushll' vector
- theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// 'shrn' vector
- theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// 'rshrn' vector
- theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
- theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
- theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
- theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// 'sxtl' vector
- theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// 'uxtl' vector
- theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
- theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
- theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
- theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
- theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
+ theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
+ theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
+ theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
+ theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
+ theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -9617,59 +9515,59 @@ void CodeGen::genArm64EmitterUnitTests()
// Specifying an Arrangement is optional
//
- theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8);
- theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11);
- theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14);
- theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17);
- theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20);
+ theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8);
+ theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11);
+ theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14);
+ theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17);
+ theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29);
theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0);
- theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3);
+ theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3);
- theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6);
- theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9);
- theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12);
+ theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6);
+ theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9);
+ theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21);
- // Default Arrangement as per the ARM64 manual
+ // Default Arrangement as per the ARM64 manual
//
- theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_16B);
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V0, REG_V1, REG_V2); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V0, REG_V1, REG_V2); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_2D);
- theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V1, REG_V2, REG_V3); // scalar 8BYTE
- theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V1, REG_V2, REG_V3); // scalar 8BYTE
+ theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_4S);
@@ -9677,38 +9575,38 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- // saba vector
- theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
-
- // sabd vector
- theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
-
- // uaba vector
- theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
-
- // uabd vector
- theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
-
+ // saba vector
+ theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
+
+ // sabd vector
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
+
+ // uaba vector
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
+
+ // uabd vector
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
+
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
@@ -9718,57 +9616,57 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
- theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
- theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R(INS_pmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
+ theEmitter->emitIns_R_R_R(INS_pmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_pmul, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
// 'mul' vector by elem
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mla' vector by elem
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mls' vector by elem
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
- theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
+ theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -9779,25 +9677,25 @@ void CodeGen::genArm64EmitterUnitTests()
genDefineTempLabel(genCreateTempLabel());
- theEmitter->emitIns_R_R_R(INS_fmla, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R(INS_fmla, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R_I(INS_fmla, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R(INS_fmls, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
+
+ theEmitter->emitIns_R_R_R_I(INS_fmla, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
+
+ theEmitter->emitIns_R_R_R(INS_fmls, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
-
- theEmitter->emitIns_R_R_R_I(INS_fmls, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
- theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
- theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
- theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
+
+ theEmitter->emitIns_R_R_R_I(INS_fmls, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by elem 4BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by elem 8BYTE
+ theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
+ theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
+ theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
@@ -9806,15 +9704,15 @@ void CodeGen::genArm64EmitterUnitTests()
// R_R_R_R floating point operations, one dest, and three source
//
- theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_4BYTE, REG_V0, REG_V8, REG_V16, REG_V24);
- theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_4BYTE, REG_V1, REG_V9, REG_V17, REG_V25);
- theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_4BYTE, REG_V2, REG_V10, REG_V18, REG_V26);
- theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_4BYTE, REG_V3, REG_V11, REG_V19, REG_V27);
+ theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_4BYTE, REG_V0, REG_V8, REG_V16, REG_V24);
+ theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_4BYTE, REG_V1, REG_V9, REG_V17, REG_V25);
+ theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_4BYTE, REG_V2, REG_V10, REG_V18, REG_V26);
+ theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_4BYTE, REG_V3, REG_V11, REG_V19, REG_V27);
- theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_8BYTE, REG_V4, REG_V12, REG_V20, REG_V28);
- theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_8BYTE, REG_V5, REG_V13, REG_V21, REG_V29);
- theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_8BYTE, REG_V6, REG_V14, REG_V22, REG_V30);
- theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_8BYTE, REG_V7, REG_V15, REG_V23, REG_V31);
+ theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_8BYTE, REG_V4, REG_V12, REG_V20, REG_V28);
+ theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_8BYTE, REG_V5, REG_V13, REG_V21, REG_V29);
+ theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_8BYTE, REG_V6, REG_V14, REG_V22, REG_V30);
+ theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_8BYTE, REG_V7, REG_V15, REG_V23, REG_V31);
#endif
diff --git a/src/jit/codegenclassic.h b/src/jit/codegenclassic.h
index 46a70aade2..bc35562e0a 100644
--- a/src/jit/codegenclassic.h
+++ b/src/jit/codegenclassic.h
@@ -6,7 +6,7 @@
// This file contains the members of CodeGen that are defined and used
// only by the "classic" JIT backend. It is included by CodeGen.h in the
// definition of the CodeGen class.
-//
+//
#ifndef _CODEGENCLASSIC_H_
#define _CODEGENCLASSIC_H_
@@ -14,744 +14,592 @@
#ifdef LEGACY_BACKEND // Not necessary (it's this way in the #include location), but helpful to IntelliSense
public:
-
- regNumber genIsEnregisteredIntVariable(GenTreePtr tree);
-
- void sched_AM (instruction ins,
- emitAttr size,
- regNumber ireg,
- bool rdst,
- GenTreePtr tree,
- unsigned offs,
- bool cons = false,
- int cval = 0,
- insFlags flags = INS_FLAGS_DONT_CARE);
+regNumber genIsEnregisteredIntVariable(GenTreePtr tree);
+
+void sched_AM(instruction ins,
+ emitAttr size,
+ regNumber ireg,
+ bool rdst,
+ GenTreePtr tree,
+ unsigned offs,
+ bool cons = false,
+ int cval = 0,
+ insFlags flags = INS_FLAGS_DONT_CARE);
protected:
-
#if FEATURE_STACK_FP_X87
- VARSET_TP genFPregVars; // mask corresponding to genFPregCnt
- unsigned genFPdeadRegCnt; // The dead unpopped part of genFPregCnt
-#endif // FEATURE_STACK_FP_X87
-
-
- //-------------------------------------------------------------------------
+VARSET_TP genFPregVars; // mask corresponding to genFPregCnt
+unsigned genFPdeadRegCnt; // The dead unpopped part of genFPregCnt
+#endif // FEATURE_STACK_FP_X87
- void genSetRegToIcon (regNumber reg,
- ssize_t val,
- var_types type = TYP_INT,
- insFlags flags = INS_FLAGS_DONT_CARE);
+//-------------------------------------------------------------------------
- regNumber genGetRegSetToIcon (ssize_t val,
- regMaskTP regBest = 0,
- var_types type = TYP_INT);
- void genDecRegBy (regNumber reg,
- ssize_t ival,
- GenTreePtr tree);
+void genSetRegToIcon(regNumber reg, ssize_t val, var_types type = TYP_INT, insFlags flags = INS_FLAGS_DONT_CARE);
- void genMulRegBy (regNumber reg,
- ssize_t ival,
- GenTreePtr tree,
- var_types dstType = TYP_INT,
- bool ovfl = false);
+regNumber genGetRegSetToIcon(ssize_t val, regMaskTP regBest = 0, var_types type = TYP_INT);
+void genDecRegBy(regNumber reg, ssize_t ival, GenTreePtr tree);
- //-------------------------------------------------------------------------
+void genMulRegBy(regNumber reg, ssize_t ival, GenTreePtr tree, var_types dstType = TYP_INT, bool ovfl = false);
- bool genRegTrashable (regNumber reg,
- GenTreePtr tree);
+//-------------------------------------------------------------------------
- //
- // Prolog functions and data (there are a few exceptions for more generally used things)
- //
+bool genRegTrashable(regNumber reg, GenTreePtr tree);
- regMaskTP genPInvokeMethodProlog(regMaskTP initRegs);
-
- void genPInvokeMethodEpilog();
-
- regNumber genPInvokeCallProlog(LclVarDsc* varDsc,
- int argSize,
- CORINFO_METHOD_HANDLE methodToken,
- BasicBlock* returnLabel);
-
- void genPInvokeCallEpilog(LclVarDsc* varDsc,
- regMaskTP retVal);
-
- regNumber genLclHeap (GenTreePtr size);
+//
+// Prolog functions and data (there are a few exceptions for more generally used things)
+//
- void genSinglePush ();
+regMaskTP genPInvokeMethodProlog(regMaskTP initRegs);
- void genSinglePop ();
+void genPInvokeMethodEpilog();
+regNumber genPInvokeCallProlog(LclVarDsc* varDsc,
+ int argSize,
+ CORINFO_METHOD_HANDLE methodToken,
+ BasicBlock* returnLabel);
- void genDyingVars (VARSET_VALARG_TP beforeSet,
- VARSET_VALARG_TP afterSet);
+void genPInvokeCallEpilog(LclVarDsc* varDsc, regMaskTP retVal);
- bool genContainsVarDeath (GenTreePtr from, GenTreePtr to, unsigned varNum);
+regNumber genLclHeap(GenTreePtr size);
- void genComputeReg (GenTreePtr tree,
- regMaskTP needReg,
- RegSet::ExactReg mustReg,
- RegSet::KeepReg keepReg,
- bool freeOnly = false);
+void genSinglePush();
- void genCompIntoFreeReg (GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg);
+void genSinglePop();
- void genReleaseReg (GenTreePtr tree);
+void genDyingVars(VARSET_VALARG_TP beforeSet, VARSET_VALARG_TP afterSet);
- void genRecoverReg (GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg);
+bool genContainsVarDeath(GenTreePtr from, GenTreePtr to, unsigned varNum);
- void genMoveRegPairHalf (GenTreePtr tree,
- regNumber dst,
- regNumber src,
- int off = 0);
+void genComputeReg(
+ GenTreePtr tree, regMaskTP needReg, RegSet::ExactReg mustReg, RegSet::KeepReg keepReg, bool freeOnly = false);
- void genMoveRegPair (GenTreePtr tree,
- regMaskTP needReg,
- regPairNo newPair);
+void genCompIntoFreeReg(GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg);
- void genComputeRegPair (GenTreePtr tree,
- regPairNo needRegPair,
- regMaskTP avoidReg,
- RegSet::KeepReg keepReg,
- bool freeOnly = false);
+void genReleaseReg(GenTreePtr tree);
- void genCompIntoFreeRegPair(GenTreePtr tree,
- regMaskTP avoidReg,
- RegSet::KeepReg keepReg);
+void genRecoverReg(GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg);
- void genComputeAddressable(GenTreePtr tree,
- regMaskTP addrReg,
- RegSet::KeepReg keptReg,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool freeOnly = false);
+void genMoveRegPairHalf(GenTreePtr tree, regNumber dst, regNumber src, int off = 0);
- void genReleaseRegPair (GenTreePtr tree);
+void genMoveRegPair(GenTreePtr tree, regMaskTP needReg, regPairNo newPair);
- void genRecoverRegPair (GenTreePtr tree,
- regPairNo regPair,
- RegSet::KeepReg keepReg);
+void genComputeRegPair(
+ GenTreePtr tree, regPairNo needRegPair, regMaskTP avoidReg, RegSet::KeepReg keepReg, bool freeOnly = false);
- void genEvalIntoFreeRegPair(GenTreePtr tree,
- regPairNo regPair,
- regMaskTP avoidReg);
+void genCompIntoFreeRegPair(GenTreePtr tree, regMaskTP avoidReg, RegSet::KeepReg keepReg);
- void genMakeRegPairAvailable(regPairNo regPair);
+void genComputeAddressable(GenTreePtr tree,
+ regMaskTP addrReg,
+ RegSet::KeepReg keptReg,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool freeOnly = false);
- bool genMakeIndAddrMode (GenTreePtr addr,
- GenTreePtr oper,
- bool forLea,
- regMaskTP regMask,
- RegSet::KeepReg keepReg,
- regMaskTP* useMaskPtr,
- bool deferOp = false);
+void genReleaseRegPair(GenTreePtr tree);
- regMaskTP genMakeRvalueAddressable(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool forLoadStore,
- bool smallOK = false);
+void genRecoverRegPair(GenTreePtr tree, regPairNo regPair, RegSet::KeepReg keepReg);
- regMaskTP genMakeAddressable (GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool smallOK = false,
- bool deferOK = false);
+void genEvalIntoFreeRegPair(GenTreePtr tree, regPairNo regPair, regMaskTP avoidReg);
- regMaskTP genMakeAddrArrElem (GenTreePtr arrElem,
- GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg);
+void genMakeRegPairAvailable(regPairNo regPair);
- regMaskTP genMakeAddressable2 (GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool forLoadStore,
- bool smallOK = false,
- bool deferOK = false,
- bool evalSideEffs = false);
+bool genMakeIndAddrMode(GenTreePtr addr,
+ GenTreePtr oper,
+ bool forLea,
+ regMaskTP regMask,
+ RegSet::KeepReg keepReg,
+ regMaskTP* useMaskPtr,
+ bool deferOp = false);
- bool genStillAddressable (GenTreePtr tree);
+regMaskTP genMakeRvalueAddressable(
+ GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg, bool forLoadStore, bool smallOK = false);
+regMaskTP genMakeAddressable(
+ GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg, bool smallOK = false, bool deferOK = false);
- regMaskTP genRestoreAddrMode (GenTreePtr addr,
- GenTreePtr tree,
- bool lockPhase);
+regMaskTP genMakeAddrArrElem(GenTreePtr arrElem, GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg);
- regMaskTP genRestAddressable (GenTreePtr tree,
- regMaskTP addrReg,
- regMaskTP lockMask);
+regMaskTP genMakeAddressable2(GenTreePtr tree,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool forLoadStore,
+ bool smallOK = false,
+ bool deferOK = false,
+ bool evalSideEffs = false);
- regMaskTP genKeepAddressable (GenTreePtr tree,
- regMaskTP addrReg,
- regMaskTP avoidMask = RBM_NONE);
+bool genStillAddressable(GenTreePtr tree);
- void genDoneAddressable (GenTreePtr tree,
- regMaskTP addrReg,
- RegSet::KeepReg keptReg);
+regMaskTP genRestoreAddrMode(GenTreePtr addr, GenTreePtr tree, bool lockPhase);
- GenTreePtr genMakeAddrOrFPstk (GenTreePtr tree,
- regMaskTP* regMaskPtr,
- bool roundResult);
+regMaskTP genRestAddressable(GenTreePtr tree, regMaskTP addrReg, regMaskTP lockMask);
- void genEmitGSCookieCheck(bool pushReg);
+regMaskTP genKeepAddressable(GenTreePtr tree, regMaskTP addrReg, regMaskTP avoidMask = RBM_NONE);
- void genEvalSideEffects (GenTreePtr tree);
+void genDoneAddressable(GenTreePtr tree, regMaskTP addrReg, RegSet::KeepReg keptReg);
+GenTreePtr genMakeAddrOrFPstk(GenTreePtr tree, regMaskTP* regMaskPtr, bool roundResult);
- void genCondJump (GenTreePtr cond,
- BasicBlock* destTrue = NULL,
- BasicBlock* destFalse = NULL,
- bool bStackFPFixup = true);
+void genEmitGSCookieCheck(bool pushReg);
+void genEvalSideEffects(GenTreePtr tree);
- emitJumpKind genCondSetFlags (GenTreePtr cond);
+void genCondJump(GenTreePtr cond, BasicBlock* destTrue = NULL, BasicBlock* destFalse = NULL, bool bStackFPFixup = true);
+emitJumpKind genCondSetFlags(GenTreePtr cond);
- void genJCC (genTreeOps cmp,
- BasicBlock* block,
- var_types type);
+void genJCC(genTreeOps cmp, BasicBlock* block, var_types type);
- void genJccLongHi (genTreeOps cmp,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse,
- bool unsOper = false);
+void genJccLongHi(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool unsOper = false);
- void genJccLongLo (genTreeOps cmp,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse);
+void genJccLongLo(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse);
- void genCondJumpLng (GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse,
- bool bFPTransition = false);
+void genCondJumpLng(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool bFPTransition = false);
- bool genUse_fcomip();
+bool genUse_fcomip();
- void genTableSwitch (regNumber reg,
- unsigned jumpCnt,
- BasicBlock** jumpTab);
+void genTableSwitch(regNumber reg, unsigned jumpCnt, BasicBlock** jumpTab);
- regMaskTP WriteBarrier (GenTreePtr tgt,
- GenTreePtr assignVal,
- regMaskTP addrReg);
+regMaskTP WriteBarrier(GenTreePtr tgt, GenTreePtr assignVal, regMaskTP addrReg);
- void genCodeForTreeConst (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForTreeConst(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- void genCodeForTreeLeaf (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForTreeLeaf(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- // If "tree" is a comma node, generates code for the left comma arguments,
- // in order, returning the first right argument in the list that is not
- // a comma node.
- GenTreePtr genCodeForCommaTree (GenTreePtr tree);
+// If "tree" is a comma node, generates code for the left comma arguments,
+// in order, returning the first right argument in the list that is not
+// a comma node.
+GenTreePtr genCodeForCommaTree(GenTreePtr tree);
- void genCodeForTreeLeaf_GT_JMP (GenTreePtr tree);
+void genCodeForTreeLeaf_GT_JMP(GenTreePtr tree);
- static Compiler::fgWalkPreFn fgIsVarAssignedTo;
+static Compiler::fgWalkPreFn fgIsVarAssignedTo;
- void genCodeForQmark (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForQmark(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- bool genCodeForQmarkWithCMOV (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+bool genCodeForQmarkWithCMOV(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
#ifdef _TARGET_XARCH_
- void genCodeForMultEAX (GenTreePtr tree);
+void genCodeForMultEAX(GenTreePtr tree);
#endif
#ifdef _TARGET_ARM_
- void genCodeForMult64 (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForMult64(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
#endif
- void genCodeForTreeSmpBinArithLogOp (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForTreeSmpBinArithLogOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForTreeSmpBinArithLogAsgOp (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForTreeSmpBinArithLogAsgOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForUnsignedMod (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForUnsignedMod(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForSignedMod (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForSignedMod(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForUnsignedDiv (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForUnsignedDiv(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForSignedDiv (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForSignedDiv(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForGeneralDivide (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForGeneralDivide(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForAsgShift (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForAsgShift(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForShift (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForShift(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForRelop (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+void genCodeForRelop(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForCopyObj (GenTreePtr tree,
- regMaskTP destReg);
+void genCodeForCopyObj(GenTreePtr tree, regMaskTP destReg);
- void genCodeForBlkOp (GenTreePtr tree,
- regMaskTP destReg);
+void genCodeForBlkOp(GenTreePtr tree, regMaskTP destReg);
- void genCodeForTreeSmpOp (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForTreeSmpOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- regNumber genIntegerCast(GenTree* tree, regMaskTP needReg, regMaskTP bestReg);
-
- void genCodeForNumericCast(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg);
+regNumber genIntegerCast(GenTree* tree, regMaskTP needReg, regMaskTP bestReg);
- void genCodeForTreeSmpOp_GT_ADDR (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForNumericCast(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg);
- void genCodeForTreeSmpOpAsg (GenTreePtr tree);
+void genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- void genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree,
- regMaskTP addrReg,
- regNumber reg,
- bool ovfl);
+void genCodeForTreeSmpOpAsg(GenTreePtr tree);
- void genCodeForTreeSpecialOp (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree, regMaskTP addrReg, regNumber reg, bool ovfl);
- void genCodeForTree (GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg = RBM_NONE);
+void genCodeForTreeSpecialOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- void genCodeForTree_DONE_LIFE (GenTreePtr tree,
- regNumber reg)
- {
- /* We've computed the value of 'tree' into 'reg' */
+void genCodeForTree(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg = RBM_NONE);
- assert(reg != 0xFEEFFAAFu);
- assert(!IsUninitialized(reg));
+void genCodeForTree_DONE_LIFE(GenTreePtr tree, regNumber reg)
+{
+ /* We've computed the value of 'tree' into 'reg' */
- genMarkTreeInReg(tree, reg);
- }
+ assert(reg != 0xFEEFFAAFu);
+ assert(!IsUninitialized(reg));
- void genCodeForTree_DONE_LIFE (GenTreePtr tree,
- regPairNo regPair)
- {
- /* We've computed the value of 'tree' into 'regPair' */
+ genMarkTreeInReg(tree, reg);
+}
- genMarkTreeInRegPair(tree, regPair);
- }
+void genCodeForTree_DONE_LIFE(GenTreePtr tree, regPairNo regPair)
+{
+ /* We've computed the value of 'tree' into 'regPair' */
- void genCodeForTree_DONE (GenTreePtr tree,
- regNumber reg)
- {
- /* Check whether this subtree has freed up any variables */
+ genMarkTreeInRegPair(tree, regPair);
+}
- genUpdateLife(tree);
+void genCodeForTree_DONE(GenTreePtr tree, regNumber reg)
+{
+ /* Check whether this subtree has freed up any variables */
- genCodeForTree_DONE_LIFE(tree, reg);
- }
+ genUpdateLife(tree);
- void genCodeForTree_REG_VAR1 (GenTreePtr tree)
- {
- /* Value is already in a register */
+ genCodeForTree_DONE_LIFE(tree, reg);
+}
- regNumber reg = tree->gtRegNum;
+void genCodeForTree_REG_VAR1(GenTreePtr tree)
+{
+ /* Value is already in a register */
- gcInfo.gcMarkRegPtrVal(reg, tree->TypeGet());
+ regNumber reg = tree->gtRegNum;
- genCodeForTree_DONE(tree, reg);
- }
+ gcInfo.gcMarkRegPtrVal(reg, tree->TypeGet());
- void genCodeForTreeLng (GenTreePtr tree,
- regMaskTP needReg,
- regMaskTP avoidReg);
+ genCodeForTree_DONE(tree, reg);
+}
- regPairNo genCodeForLongModInt(GenTreePtr tree,
- regMaskTP needReg);
+void genCodeForTreeLng(GenTreePtr tree, regMaskTP needReg, regMaskTP avoidReg);
- unsigned genRegCountForLiveIntEnregVars(GenTreePtr tree);
+regPairNo genCodeForLongModInt(GenTreePtr tree, regMaskTP needReg);
+
+unsigned genRegCountForLiveIntEnregVars(GenTreePtr tree);
#ifdef _TARGET_ARM_
- void genStoreFromFltRetRegs (GenTreePtr tree);
- void genLoadIntoFltRetRegs (GenTreePtr tree);
- void genLdStFltRetRegsPromotedVar (LclVarDsc* varDsc, bool isLoadIntoFltReg);
+void genStoreFromFltRetRegs(GenTreePtr tree);
+void genLoadIntoFltRetRegs(GenTreePtr tree);
+void genLdStFltRetRegsPromotedVar(LclVarDsc* varDsc, bool isLoadIntoFltReg);
#endif
#if CPU_HAS_FP_SUPPORT
- void genRoundFpExpression(GenTreePtr op,
- var_types type = TYP_UNDEF);
- void genCodeForTreeFlt (GenTreePtr tree,
- regMaskTP needReg = RBM_ALLFLOAT,
- regMaskTP bestReg = RBM_NONE);
+void genRoundFpExpression(GenTreePtr op, var_types type = TYP_UNDEF);
+void genCodeForTreeFlt(GenTreePtr tree, regMaskTP needReg = RBM_ALLFLOAT, regMaskTP bestReg = RBM_NONE);
#endif
// FP stuff
-#include "fp.h"
+#include "fp.h"
- void genCodeForJumpTable (GenTreePtr tree);
- void genCodeForSwitchTable (GenTreePtr tree);
- void genCodeForSwitch (GenTreePtr tree);
+void genCodeForJumpTable(GenTreePtr tree);
+void genCodeForSwitchTable(GenTreePtr tree);
+void genCodeForSwitch(GenTreePtr tree);
- regMaskTP genPushRegs (regMaskTP regs,
- regMaskTP* byrefRegs,
- regMaskTP* noRefRegs);
- void genPopRegs (regMaskTP regs,
- regMaskTP byrefRegs,
- regMaskTP noRefRegs);
+regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
+void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
- size_t genPushArgList (GenTreePtr call);
+size_t genPushArgList(GenTreePtr call);
#ifdef _TARGET_ARM_
- // We are generating code for a promoted struct local variable. Fill the next slot (register or
- // 4-byte stack slot) with one or more field variables of the promoted struct local -- or 2 such slots
- // if the next field is a 64-bit value.
- // The arguments are:
- // "arg" is the current argument node.
- //
- // "curArgTabEntry" arg table entry pointer for "arg".
- //
- // "promotedStructLocalVarDesc" describes the struct local being copied, assumed non-NULL.
- //
- // "fieldSize" is somewhat misnamed; it must be the element in the struct's GC layout describing the next slot
- // of the struct -- it will be EA_4BYTE, EA_GCREF, or EA_BYREF.
- //
- // "*pNextPromotedStructFieldVar" must be the the local variable number of the next field variable to copy;
- // this location will be updated by the call to reflect the bytes that are copied.
- //
- // "*pBytesOfNextSlotOfCurPromotedStruct" must be the number of bytes within the struct local at which the next
- // slot to be copied starts. This location will be updated by the call to reflect the bytes that are copied.
- //
- // "*pCurRegNum" must be the current argument register number, and will be updated if argument registers are filled.
- //
- // "argOffset" must be the offset of the next slot to be filled in the outgoing argument area, if the argument is to be
- // put in the outgoing arg area of the stack (or else should be INT_MAX if the next slot to be filled is a register).
- // (Strictly speaking, after the addition of "argOffsetOfFirstStackSlot", this arg is redundant, and is only used
- // in assertions, and could be removed.)
- //
- // "fieldOffsetOfFirstStackSlot" must be the offset within the promoted struct local of the first slot that should be
- // copied to the outgoing argument area -- non-zero only in the case of a struct that spans registers and stack slots.
- //
- // "argOffsetOfFirstStackSlot" must be the 4-byte-aligned offset of the first offset in the outgoing argument area which could
- // contain part of the struct. (Explicit alignment may mean it doesn't actually contain part of the struct.)
- //
- // "*deadFieldVarRegs" is an out parameter, the set of registers containing promoted field variables that become dead after
- // this (implicit) use.
- //
- // "*pRegTmp" -- if a temporary register is needed, and this is not REG_STK, uses that register. Otherwise, if it is REG_STK,
- // allocates a register, uses it, and sets "*pRegTmp" to the allocated register.
- //
- // Returns "true" iff it filled two slots with an 8-byte value.
- bool genFillSlotFromPromotedStruct(GenTreePtr arg,
- fgArgTabEntryPtr curArgTabEntry,
- LclVarDsc* promotedStructLocalVarDesc,
- emitAttr fieldSize,
- unsigned* pNextPromotedStructFieldVar, // IN/OUT
- unsigned* pBytesOfNextSlotOfCurPromotedStruct, // IN/OUT
- regNumber* pCurRegNum, // IN/OUT
- int argOffset,
- int fieldOffsetOfFirstStackSlot,
- int argOffsetOfFirstStackSlot,
- regMaskTP* deadFieldVarRegs, // OUT
- regNumber* pRegTmp); // IN/OUT
-
+// We are generating code for a promoted struct local variable. Fill the next slot (register or
+// 4-byte stack slot) with one or more field variables of the promoted struct local -- or 2 such slots
+// if the next field is a 64-bit value.
+// The arguments are:
+// "arg" is the current argument node.
+//
+// "curArgTabEntry" arg table entry pointer for "arg".
+//
+// "promotedStructLocalVarDesc" describes the struct local being copied, assumed non-NULL.
+//
+// "fieldSize" is somewhat misnamed; it must be the element in the struct's GC layout describing the next slot
+// of the struct -- it will be EA_4BYTE, EA_GCREF, or EA_BYREF.
+//
+// "*pNextPromotedStructFieldVar" must be the the local variable number of the next field variable to copy;
+// this location will be updated by the call to reflect the bytes that are copied.
+//
+// "*pBytesOfNextSlotOfCurPromotedStruct" must be the number of bytes within the struct local at which the next
+// slot to be copied starts. This location will be updated by the call to reflect the bytes that are copied.
+//
+// "*pCurRegNum" must be the current argument register number, and will be updated if argument registers are filled.
+//
+// "argOffset" must be the offset of the next slot to be filled in the outgoing argument area, if the argument is to
+// be
+// put in the outgoing arg area of the stack (or else should be INT_MAX if the next slot to be filled is a
+// register).
+// (Strictly speaking, after the addition of "argOffsetOfFirstStackSlot", this arg is redundant, and is only used
+// in assertions, and could be removed.)
+//
+// "fieldOffsetOfFirstStackSlot" must be the offset within the promoted struct local of the first slot that should be
+// copied to the outgoing argument area -- non-zero only in the case of a struct that spans registers and stack
+// slots.
+//
+// "argOffsetOfFirstStackSlot" must be the 4-byte-aligned offset of the first offset in the outgoing argument area
+// which could
+// contain part of the struct. (Explicit alignment may mean it doesn't actually contain part of the struct.)
+//
+// "*deadFieldVarRegs" is an out parameter, the set of registers containing promoted field variables that become dead
+// after
+// this (implicit) use.
+//
+// "*pRegTmp" -- if a temporary register is needed, and this is not REG_STK, uses that register. Otherwise, if it is
+// REG_STK,
+// allocates a register, uses it, and sets "*pRegTmp" to the allocated register.
+//
+// Returns "true" iff it filled two slots with an 8-byte value.
+bool genFillSlotFromPromotedStruct(GenTreePtr arg,
+ fgArgTabEntryPtr curArgTabEntry,
+ LclVarDsc* promotedStructLocalVarDesc,
+ emitAttr fieldSize,
+ unsigned* pNextPromotedStructFieldVar, // IN/OUT
+ unsigned* pBytesOfNextSlotOfCurPromotedStruct, // IN/OUT
+ regNumber* pCurRegNum, // IN/OUT
+ int argOffset,
+ int fieldOffsetOfFirstStackSlot,
+ int argOffsetOfFirstStackSlot,
+ regMaskTP* deadFieldVarRegs, // OUT
+ regNumber* pRegTmp); // IN/OUT
#endif // _TARGET_ARM_
- // Requires that "curr" is a cpblk. If the RHS is a promoted struct local,
- // then returns a regMaskTP representing the set of registers holding
- // fieldVars of the RHS that go dead with this use (as determined by the live set
- // of cpBlk).
- regMaskTP genFindDeadFieldRegs(GenTreePtr cpBlk);
+// Requires that "curr" is a cpblk. If the RHS is a promoted struct local,
+// then returns a regMaskTP representing the set of registers holding
+// fieldVars of the RHS that go dead with this use (as determined by the live set
+// of cpBlk).
+regMaskTP genFindDeadFieldRegs(GenTreePtr cpBlk);
- void SetupLateArgs (GenTreePtr call);
+void SetupLateArgs(GenTreePtr call);
#ifdef _TARGET_ARM_
- void PushMkRefAnyArg (GenTreePtr mkRefAnyTree,
- fgArgTabEntryPtr curArgTabEntry,
- regMaskTP regNeedMask);
+void PushMkRefAnyArg(GenTreePtr mkRefAnyTree, fgArgTabEntryPtr curArgTabEntry, regMaskTP regNeedMask);
#endif // _TARGET_ARM_
- regMaskTP genLoadIndirectCallTarget(GenTreePtr call);
+regMaskTP genLoadIndirectCallTarget(GenTreePtr call);
- regMaskTP genCodeForCall (GenTreePtr call,
- bool valUsed);
+regMaskTP genCodeForCall(GenTreePtr call, bool valUsed);
- GenTreePtr genGetAddrModeBase (GenTreePtr tree);
+GenTreePtr genGetAddrModeBase(GenTreePtr tree);
- GenTreePtr genIsAddrMode (GenTreePtr tree,
- GenTreePtr* indxPtr);
-private:
-
- bool genIsLocalLastUse (GenTreePtr tree);
+GenTreePtr genIsAddrMode(GenTreePtr tree, GenTreePtr* indxPtr);
- bool genIsRegCandidateLocal(GenTreePtr tree);
-
- //=========================================================================
- // Debugging support
- //=========================================================================
+private:
+bool genIsLocalLastUse(GenTreePtr tree);
+bool genIsRegCandidateLocal(GenTreePtr tree);
+//=========================================================================
+// Debugging support
+//=========================================================================
#if FEATURE_STACK_FP_X87
- /*
- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- XX XX
- XX Flat FP model XX
- XX XX
- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- */
-
- bool StackFPIsSameAsFloat (double d);
- bool FlatFPSameRegisters (FlatFPStateX87* pState, regMaskTP mask);
-
- // FlatFPStateX87_ functions are the actual verbs to do stuff
- // like doing a transition, loading register, etc. It's also
- // responsible for emitting the x87 code to do so. We keep
- // them in Compiler because we don't want to store a pointer to the
- // emitter.
- void FlatFPX87_Kill (FlatFPStateX87* pState, unsigned iVirtual);
- void FlatFPX87_PushVirtual (FlatFPStateX87* pState, unsigned iRegister, bool bEmitCode = true);
- unsigned FlatFPX87_Pop (FlatFPStateX87* pState, bool bEmitCode = true);
- unsigned FlatFPX87_Top (FlatFPStateX87* pState, bool bEmitCode = true);
- void FlatFPX87_Unload (FlatFPStateX87* pState, unsigned iVirtual, bool bEmitCode = true);
-#endif
-
- // Codegen functions. This is the API that codegen will use
- regMaskTP genPushArgumentStackFP (GenTreePtr arg);
- void genRoundFpExpressionStackFP (GenTreePtr op, var_types type = TYP_UNDEF);
- void genCodeForTreeStackFP_Const (GenTreePtr tree);
- void genCodeForTreeStackFP_Leaf (GenTreePtr tree);
- void genCodeForTreeStackFP_SmpOp (GenTreePtr tree);
- void genCodeForTreeStackFP_Special (GenTreePtr tree);
- void genCodeForTreeStackFP_Cast (GenTreePtr tree);
- void genCodeForTreeStackFP (GenTreePtr tree);
- void genCondJumpFltStackFP (GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse,
- bool bDoTransition = true);
- void genCondJumpFloat (GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse);
- void genCondJumpLngStackFP (GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse);
-
- void genFloatConst(GenTree* tree, RegSet::RegisterPreference* pref);
- void genFloatLeaf(GenTree* tree, RegSet::RegisterPreference* pref);
- void genFloatSimple(GenTree* tree, RegSet::RegisterPreference* pref);
- void genFloatMath(GenTree* tree, RegSet::RegisterPreference* pref);
- void genFloatCheckFinite(GenTree* tree, RegSet::RegisterPreference* pref);
- void genLoadFloat(GenTreePtr tree, regNumber reg);
- void genFloatAssign(GenTree* tree);
- void genFloatArith(GenTree* tree, RegSet::RegisterPreference* pref);
- void genFloatAsgArith(GenTree* tree);
-
- regNumber genAssignArithFloat(genTreeOps oper,
- GenTreePtr dst, regNumber dstreg,
- GenTreePtr src, regNumber srcreg);
-
-
- GenTreePtr genMakeAddressableFloat(GenTreePtr tree,
- regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr,
- bool bCollapseConstantDoubles = true);
-
- void genCodeForTreeFloat(GenTreePtr tree,
- RegSet::RegisterPreference* pref = NULL);
-
- void genCodeForTreeFloat(GenTreePtr tree,
- regMaskTP needReg, regMaskTP bestReg);
-
- regNumber genArithmFloat(genTreeOps oper,
- GenTreePtr dst, regNumber dstreg,
- GenTreePtr src, regNumber srcreg,
- bool bReverse);
- void genCodeForTreeCastFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
- void genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
- void genCodeForTreeCastFromFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
- void genKeepAddressableFloat(GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr);
- void genDoneAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg);
- void genComputeAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg, regMaskTP needReg, RegSet::KeepReg keepReg, bool freeOnly = false);
- void genRoundFloatExpression(GenTreePtr op, var_types type);
-
+/*
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX Flat FP model XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+
+bool StackFPIsSameAsFloat(double d);
+bool FlatFPSameRegisters(FlatFPStateX87* pState, regMaskTP mask);
+
+// FlatFPStateX87_ functions are the actual verbs to do stuff
+// like doing a transition, loading register, etc. It's also
+// responsible for emitting the x87 code to do so. We keep
+// them in Compiler because we don't want to store a pointer to the
+// emitter.
+void FlatFPX87_Kill(FlatFPStateX87* pState, unsigned iVirtual);
+void FlatFPX87_PushVirtual(FlatFPStateX87* pState, unsigned iRegister, bool bEmitCode = true);
+unsigned FlatFPX87_Pop(FlatFPStateX87* pState, bool bEmitCode = true);
+unsigned FlatFPX87_Top(FlatFPStateX87* pState, bool bEmitCode = true);
+void FlatFPX87_Unload(FlatFPStateX87* pState, unsigned iVirtual, bool bEmitCode = true);
+#endif
+// Codegen functions. This is the API that codegen will use
+regMaskTP genPushArgumentStackFP(GenTreePtr arg);
+void genRoundFpExpressionStackFP(GenTreePtr op, var_types type = TYP_UNDEF);
+void genCodeForTreeStackFP_Const(GenTreePtr tree);
+void genCodeForTreeStackFP_Leaf(GenTreePtr tree);
+void genCodeForTreeStackFP_SmpOp(GenTreePtr tree);
+void genCodeForTreeStackFP_Special(GenTreePtr tree);
+void genCodeForTreeStackFP_Cast(GenTreePtr tree);
+void genCodeForTreeStackFP(GenTreePtr tree);
+void genCondJumpFltStackFP(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool bDoTransition = true);
+void genCondJumpFloat(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse);
+void genCondJumpLngStackFP(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse);
+
+void genFloatConst(GenTree* tree, RegSet::RegisterPreference* pref);
+void genFloatLeaf(GenTree* tree, RegSet::RegisterPreference* pref);
+void genFloatSimple(GenTree* tree, RegSet::RegisterPreference* pref);
+void genFloatMath(GenTree* tree, RegSet::RegisterPreference* pref);
+void genFloatCheckFinite(GenTree* tree, RegSet::RegisterPreference* pref);
+void genLoadFloat(GenTreePtr tree, regNumber reg);
+void genFloatAssign(GenTree* tree);
+void genFloatArith(GenTree* tree, RegSet::RegisterPreference* pref);
+void genFloatAsgArith(GenTree* tree);
+
+regNumber genAssignArithFloat(genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg);
+
+GenTreePtr genMakeAddressableFloat(GenTreePtr tree,
+ regMaskTP* regMaskIntPtr,
+ regMaskTP* regMaskFltPtr,
+ bool bCollapseConstantDoubles = true);
+
+void genCodeForTreeFloat(GenTreePtr tree, RegSet::RegisterPreference* pref = NULL);
+
+void genCodeForTreeFloat(GenTreePtr tree, regMaskTP needReg, regMaskTP bestReg);
+
+regNumber genArithmFloat(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse);
+void genCodeForTreeCastFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
+void genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
+void genCodeForTreeCastFromFloat(GenTreePtr tree, RegSet::RegisterPreference* pref);
+void genKeepAddressableFloat(GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr);
+void genDoneAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg);
+void genComputeAddressableFloat(GenTreePtr tree,
+ regMaskTP addrRegInt,
+ regMaskTP addrRegFlt,
+ RegSet::KeepReg keptReg,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool freeOnly = false);
+void genRoundFloatExpression(GenTreePtr op, var_types type);
#if FEATURE_STACK_FP_X87
- // Assumes then block will be generated before else block.
- struct QmarkStateStackFP
- {
- FlatFPStateX87 stackState;
- };
+// Assumes then block will be generated before else block.
+struct QmarkStateStackFP
+{
+ FlatFPStateX87 stackState;
+};
- void genQMarkRegVarTransition (GenTreePtr nextNode, VARSET_VALARG_TP liveset);
- void genQMarkBeforeElseStackFP (QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode);
- void genQMarkAfterElseBlockStackFP (QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode);
- void genQMarkAfterThenBlockStackFP (QmarkStateStackFP* pState);
+void genQMarkRegVarTransition(GenTreePtr nextNode, VARSET_VALARG_TP liveset);
+void genQMarkBeforeElseStackFP(QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode);
+void genQMarkAfterElseBlockStackFP(QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode);
+void genQMarkAfterThenBlockStackFP(QmarkStateStackFP* pState);
#endif
- GenTreePtr genMakeAddressableStackFP (GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr, bool bCollapseConstantDoubles = true);
- void genKeepAddressableStackFP (GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr);
- void genDoneAddressableStackFP (GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg);
+GenTreePtr genMakeAddressableStackFP(GenTreePtr tree,
+ regMaskTP* regMaskIntPtr,
+ regMaskTP* regMaskFltPtr,
+ bool bCollapseConstantDoubles = true);
+void genKeepAddressableStackFP(GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr);
+void genDoneAddressableStackFP(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg);
+void genCodeForTreeStackFP_Asg(GenTreePtr tree);
+void genCodeForTreeStackFP_AsgArithm(GenTreePtr tree);
+void genCodeForTreeStackFP_Arithm(GenTreePtr tree);
+void genCodeForTreeStackFP_DONE(GenTreePtr tree, regNumber reg);
+void genCodeForTreeFloat_DONE(GenTreePtr tree, regNumber reg);
- void genCodeForTreeStackFP_Asg (GenTreePtr tree);
- void genCodeForTreeStackFP_AsgArithm (GenTreePtr tree);
- void genCodeForTreeStackFP_Arithm (GenTreePtr tree);
- void genCodeForTreeStackFP_DONE (GenTreePtr tree, regNumber reg);
- void genCodeForTreeFloat_DONE (GenTreePtr tree, regNumber reg);
-
- void genSetupStateStackFP (BasicBlock* block);
- regMaskTP genRegMaskFromLivenessStackFP (VARSET_VALARG_TP varset);
-
- // bReverse means make op1 addressable and codegen for op2.
- // If op1 or op2 are comma expressions, will do code-gen for their non-last comma parts,
- // and set op1 and op2 to the remaining non-comma expressions.
- void genSetupForOpStackFP (GenTreePtr& op1, GenTreePtr& op2,
- bool bReverse,
- bool bMakeOp1Addressable,
- bool bOp1ReadOnly,
- bool bOp2ReadOnly);
+void genSetupStateStackFP(BasicBlock* block);
+regMaskTP genRegMaskFromLivenessStackFP(VARSET_VALARG_TP varset);
+// bReverse means make op1 addressable and codegen for op2.
+// If op1 or op2 are comma expressions, will do code-gen for their non-last comma parts,
+// and set op1 and op2 to the remaining non-comma expressions.
+void genSetupForOpStackFP(
+ GenTreePtr& op1, GenTreePtr& op2, bool bReverse, bool bMakeOp1Addressable, bool bOp1ReadOnly, bool bOp2ReadOnly);
#if FEATURE_STACK_FP_X87
-#ifdef DEBUG
- bool ConsistentAfterStatementStackFP ();
+#ifdef DEBUG
+bool ConsistentAfterStatementStackFP();
#endif
private:
- void SpillTempsStackFP (regMaskTP canSpillMask);
- void SpillForCallStackFP ();
- void UnspillRegVarsStackFp ();
-
- // Transition API. Takes care of the stack matching of basicblock boundaries
- void genCodeForPrologStackFP ();
- void genCodeForEndBlockTransitionStackFP (BasicBlock* block);
-
- void genCodeForBBTransitionStackFP (BasicBlock* pDst);
- void genCodeForTransitionStackFP (FlatFPStateX87* pSrc, FlatFPStateX87* pDst);
- void genCodeForTransitionFromMask (FlatFPStateX87* pSrc, regMaskTP mask, bool bEmitCode = true);
- BasicBlock* genTransitionBlockStackFP (FlatFPStateX87* pState, BasicBlock* pFrom, BasicBlock* pTarget);
-
- // This is the API codegen will use to emit virtual fp code. In theory, nobody above this API
- // should know about x87 instructions.
-
- int genNumberTemps ();
- void genDiscardStackFP (GenTreePtr tree);
- void genRegRenameWithMasks (regNumber dstReg, regNumber srcReg);
- void genRegVarBirthStackFP (GenTreePtr tree);
- void genRegVarBirthStackFP (LclVarDsc* varDsc);
- void genRegVarDeathStackFP (GenTreePtr tree);
- void genRegVarDeathStackFP (LclVarDsc* varDsc);
- void genLoadStackFP (GenTreePtr tree, regNumber reg);
- void genMovStackFP (GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg);
- bool genCompInsStackFP (GenTreePtr tos, GenTreePtr other);
- regNumber genArithmStackFP (genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse);
- regNumber genAsgArithmStackFP (genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg);
- void genCondJmpInsStackFP (emitJumpKind jumpKind,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse,
- bool bDoTransition = true);
- void genTableSwitchStackFP (regNumber reg,
- unsigned jumpCnt,
- BasicBlock** jumpTab);
-
- void JitDumpFPState ();
-#else // !FEATURE_STACK_FP_X87
- void SpillForCallRegisterFP (regMaskTP noSpillMask);
+void SpillTempsStackFP(regMaskTP canSpillMask);
+void SpillForCallStackFP();
+void UnspillRegVarsStackFp();
+
+// Transition API. Takes care of the stack matching of basicblock boundaries
+void genCodeForPrologStackFP();
+void genCodeForEndBlockTransitionStackFP(BasicBlock* block);
+
+void genCodeForBBTransitionStackFP(BasicBlock* pDst);
+void genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87* pDst);
+void genCodeForTransitionFromMask(FlatFPStateX87* pSrc, regMaskTP mask, bool bEmitCode = true);
+BasicBlock* genTransitionBlockStackFP(FlatFPStateX87* pState, BasicBlock* pFrom, BasicBlock* pTarget);
+
+// This is the API codegen will use to emit virtual fp code. In theory, nobody above this API
+// should know about x87 instructions.
+
+int genNumberTemps();
+void genDiscardStackFP(GenTreePtr tree);
+void genRegRenameWithMasks(regNumber dstReg, regNumber srcReg);
+void genRegVarBirthStackFP(GenTreePtr tree);
+void genRegVarBirthStackFP(LclVarDsc* varDsc);
+void genRegVarDeathStackFP(GenTreePtr tree);
+void genRegVarDeathStackFP(LclVarDsc* varDsc);
+void genLoadStackFP(GenTreePtr tree, regNumber reg);
+void genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg);
+bool genCompInsStackFP(GenTreePtr tos, GenTreePtr other);
+regNumber genArithmStackFP(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse);
+regNumber genAsgArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg);
+void genCondJmpInsStackFP(emitJumpKind jumpKind,
+ BasicBlock* jumpTrue,
+ BasicBlock* jumpFalse,
+ bool bDoTransition = true);
+void genTableSwitchStackFP(regNumber reg, unsigned jumpCnt, BasicBlock** jumpTab);
+
+void JitDumpFPState();
+#else // !FEATURE_STACK_FP_X87
+void SpillForCallRegisterFP(regMaskTP noSpillMask);
#endif // !FEATURE_STACK_FP_X87
- // When bOnlyNoMemAccess = true, the load will be generated only for constant loading that doesn't
- // involve memory accesses, (ie: fldz for positive zero, or fld1 for 1). Will return true the function
- // did the load
- bool genConstantLoadStackFP (GenTreePtr tree,
- bool bOnlyNoMemAccess = false);
- void genEndOfStatement ();
-
+// When bOnlyNoMemAccess = true, the load will be generated only for constant loading that doesn't
+// involve memory accesses, (ie: fldz for positive zero, or fld1 for 1). Will return true the function
+// did the load
+bool genConstantLoadStackFP(GenTreePtr tree, bool bOnlyNoMemAccess = false);
+void genEndOfStatement();
#if FEATURE_STACK_FP_X87
- struct genRegVarDiesInSubTreeData
- {
- regNumber reg;
- bool result;
- };
- static Compiler::fgWalkPreFn genRegVarDiesInSubTreeWorker;
- bool genRegVarDiesInSubTree (GenTreePtr tree, regNumber reg);
+struct genRegVarDiesInSubTreeData
+{
+ regNumber reg;
+ bool result;
+};
+static Compiler::fgWalkPreFn genRegVarDiesInSubTreeWorker;
+bool genRegVarDiesInSubTree(GenTreePtr tree, regNumber reg);
#endif // FEATURE_STACK_FP_X87
- // Float spill
- void UnspillFloat (RegSet::SpillDsc* spillDsc);
- void UnspillFloat (GenTreePtr tree);
- void UnspillFloat (LclVarDsc* varDsc);
- void UnspillFloatMachineDep (RegSet::SpillDsc* spillDsc);
- void UnspillFloatMachineDep (RegSet::SpillDsc* spillDsc, bool useSameReg);
- void RemoveSpillDsc (RegSet::SpillDsc* spillDsc);
+// Float spill
+void UnspillFloat(RegSet::SpillDsc* spillDsc);
+void UnspillFloat(GenTreePtr tree);
+void UnspillFloat(LclVarDsc* varDsc);
+void UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc);
+void UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc, bool useSameReg);
+void RemoveSpillDsc(RegSet::SpillDsc* spillDsc);
-protected :
- struct genLivenessSet
+protected:
+struct genLivenessSet
+{
+ VARSET_TP liveSet;
+ VARSET_TP varPtrSet;
+ regMaskSmall maskVars;
+ regMaskSmall gcRefRegs;
+ regMaskSmall byRefRegs;
+
+ genLivenessSet()
+ : VARSET_INIT_NOCOPY(liveSet, VarSetOps::UninitVal()), VARSET_INIT_NOCOPY(varPtrSet, VarSetOps::UninitVal())
{
- VARSET_TP liveSet;
- VARSET_TP varPtrSet;
- regMaskSmall maskVars;
- regMaskSmall gcRefRegs;
- regMaskSmall byRefRegs;
-
- genLivenessSet() : VARSET_INIT_NOCOPY(liveSet, VarSetOps::UninitVal()),
- VARSET_INIT_NOCOPY(varPtrSet, VarSetOps::UninitVal())
- {}
- };
-
- void saveLiveness (genLivenessSet* ls);
- void restoreLiveness (genLivenessSet* ls);
- void checkLiveness (genLivenessSet* ls);
- void unspillLiveness (genLivenessSet* ls);
-
- //-------------------------------------------------------------------------
- //
- // If we know that the flags register is set to a value that corresponds
- // to the current value of a register or variable, the following values
- // record that information.
- //
-
- emitLocation genFlagsEqLoc;
- regNumber genFlagsEqReg;
- unsigned genFlagsEqVar;
-
- void genFlagsEqualToNone ();
- void genFlagsEqualToReg (GenTreePtr tree, regNumber reg);
- void genFlagsEqualToVar (GenTreePtr tree, unsigned var);
- bool genFlagsAreReg (regNumber reg);
- bool genFlagsAreVar (unsigned var);
+ }
+};
+
+void saveLiveness(genLivenessSet* ls);
+void restoreLiveness(genLivenessSet* ls);
+void checkLiveness(genLivenessSet* ls);
+void unspillLiveness(genLivenessSet* ls);
+
+//-------------------------------------------------------------------------
+//
+// If we know that the flags register is set to a value that corresponds
+// to the current value of a register or variable, the following values
+// record that information.
+//
+
+emitLocation genFlagsEqLoc;
+regNumber genFlagsEqReg;
+unsigned genFlagsEqVar;
+
+void genFlagsEqualToNone();
+void genFlagsEqualToReg(GenTreePtr tree, regNumber reg);
+void genFlagsEqualToVar(GenTreePtr tree, unsigned var);
+bool genFlagsAreReg(regNumber reg);
+bool genFlagsAreVar(unsigned var);
#endif // LEGACY_BACKEND
#endif // _CODEGENCLASSIC_H_
-
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index e3b95a3fae..888f07f7ed 100755
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -30,32 +30,28 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-const BYTE genTypeSizes[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) sz,
- #include "typelist.h"
- #undef DEF_TP
+const BYTE genTypeSizes[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sz,
+#include "typelist.h"
+#undef DEF_TP
};
-const BYTE genTypeAlignments[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) al,
- #include "typelist.h"
- #undef DEF_TP
+const BYTE genTypeAlignments[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) al,
+#include "typelist.h"
+#undef DEF_TP
};
-const BYTE genTypeStSzs[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) st,
- #include "typelist.h"
- #undef DEF_TP
+const BYTE genTypeStSzs[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) st,
+#include "typelist.h"
+#undef DEF_TP
};
-const BYTE genActualTypes[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) jitType,
- #include "typelist.h"
- #undef DEF_TP
+const BYTE genActualTypes[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) jitType,
+#include "typelist.h"
+#undef DEF_TP
};
void CodeGenInterface::setFramePointerRequiredEH(bool value)
@@ -73,7 +69,9 @@ void CodeGenInterface::setFramePointerRequiredEH(bool value)
#ifdef DEBUG
if (verbose)
+ {
printf("Method has EH, marking method as fully interruptible\n");
+ }
#endif
m_cgInterruptible = true;
@@ -82,23 +80,20 @@ void CodeGenInterface::setFramePointerRequiredEH(bool value)
}
/*****************************************************************************/
-CodeGenInterface *getCodeGenerator(Compiler *comp)
+CodeGenInterface* getCodeGenerator(Compiler* comp)
{
return new (comp, CMK_Codegen) CodeGen(comp);
}
// CodeGen constructor
-CodeGenInterface::CodeGenInterface(Compiler* theCompiler) :
- gcInfo(theCompiler),
- regSet(theCompiler, gcInfo),
- compiler(theCompiler)
+CodeGenInterface::CodeGenInterface(Compiler* theCompiler)
+ : gcInfo(theCompiler), regSet(theCompiler, gcInfo), compiler(theCompiler)
{
}
/*****************************************************************************/
-CodeGen::CodeGen(Compiler * theCompiler) :
- CodeGenInterface(theCompiler)
+CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler)
{
#if defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
negBitmaskFlt = nullptr;
@@ -109,16 +104,16 @@ CodeGen::CodeGen(Compiler * theCompiler) :
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
regTracker.rsTrackInit(compiler, &regSet);
- gcInfo.regSet = &regSet;
- m_cgEmitter = new (compiler->getAllocator()) emitter();
+ gcInfo.regSet = &regSet;
+ m_cgEmitter = new (compiler->getAllocator()) emitter();
m_cgEmitter->codeGen = this;
- m_cgEmitter->gcInfo = &gcInfo;
+ m_cgEmitter->gcInfo = &gcInfo;
#ifdef DEBUG
setVerbose(compiler->verbose);
#endif // DEBUG
- compiler->tmpInit ();
+ compiler->tmpInit();
#ifdef DEBUG
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
@@ -126,7 +121,7 @@ CodeGen::CodeGen(Compiler * theCompiler) :
// are large. For ARM, this doesn't interact well with our decision about whether to use
// R10 or not as a reserved register.
if (regSet.rsStressRegs())
- compiler->tmpIntSpillMax = (SCHAR_MAX/sizeof(int));
+ compiler->tmpIntSpillMax = (SCHAR_MAX / sizeof(int));
#endif // defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
#endif // DEBUG
@@ -143,9 +138,9 @@ CodeGen::CodeGen(Compiler * theCompiler) :
getDisAssembler().disInit(compiler);
#endif
-#ifdef DEBUG
- genTempLiveChg = true;
- genTrnslLocalVarCount = 0;
+#ifdef DEBUG
+ genTempLiveChg = true;
+ genTrnslLocalVarCount = 0;
// Shouldn't be used before it is set in genFnProlog()
compiler->compCalleeRegsPushed = UninitializedWord<unsigned>();
@@ -177,28 +172,27 @@ CodeGen::CodeGen(Compiler * theCompiler) :
/* Assume that we not fully interruptible */
- genInterruptible = false;
-#ifdef DEBUG
+ genInterruptible = false;
+#ifdef DEBUG
genInterruptibleUsed = false;
- genCurDispOffset = (unsigned) -1;
+ genCurDispOffset = (unsigned)-1;
#endif
}
void CodeGenInterface::genMarkTreeInReg(GenTreePtr tree, regNumber reg)
{
- tree->gtRegNum = reg;
- tree->gtFlags |= GTF_REG_VAL;
+ tree->gtRegNum = reg;
+ tree->gtFlags |= GTF_REG_VAL;
}
#if CPU_LONG_USES_REGPAIR
void CodeGenInterface::genMarkTreeInRegPair(GenTreePtr tree, regPairNo regPair)
{
tree->gtRegPair = regPair;
- tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags |= GTF_REG_VAL;
}
#endif
-
#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
//---------------------------------------------------------------------
@@ -218,14 +212,12 @@ int CodeGenInterface::genTotalFrameSize()
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
- int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES +
- compiler->compLclFrameSize;
+ int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
-
//---------------------------------------------------------------------
// genSPtoFPdelta - return the offset from SP to the frame pointer.
// This number is going to be positive, since SP must be at the lowest
@@ -245,7 +237,6 @@ int CodeGenInterface::genSPtoFPdelta()
return delta;
}
-
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
@@ -260,7 +251,7 @@ int CodeGenInterface::genCallerSPtoFPdelta()
#if defined(_TARGET_ARM_)
// On ARM, we first push the prespill registers, then store LR, then R11 (FP), and point R11 at the saved R11.
- callerSPtoFPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
+ callerSPtoFPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoFPdelta -= 2 * REGSIZE_BYTES;
#elif defined(_TARGET_X86_)
// Thanks to ebp chaining, the difference between ebp-based addresses
@@ -276,7 +267,6 @@ int CodeGenInterface::genCallerSPtoFPdelta()
return callerSPtoFPdelta;
}
-
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
@@ -287,11 +277,11 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
int callerSPtoSPdelta = 0;
#if defined(_TARGET_ARM_)
- callerSPtoSPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
+ callerSPtoSPdelta -= genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
callerSPtoSPdelta -= genTotalFrameSize();
#elif defined(_TARGET_X86_)
callerSPtoSPdelta -= genTotalFrameSize();
- callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
+ callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
@@ -315,20 +305,20 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
// inline
// static
-bool CodeGen::genShouldRoundFP()
+bool CodeGen::genShouldRoundFP()
{
RoundLevel roundLevel = getRoundFloatLevel();
switch (roundLevel)
{
- case ROUND_NEVER:
- case ROUND_CMP_CONST:
- case ROUND_CMP:
- return false;
+ case ROUND_NEVER:
+ case ROUND_CMP_CONST:
+ case ROUND_CMP:
+ return false;
- default:
- assert(roundLevel == ROUND_ALWAYS);
- return true;
+ default:
+ assert(roundLevel == ROUND_ALWAYS);
+ return true;
}
}
@@ -337,10 +327,10 @@ bool CodeGen::genShouldRoundFP()
* Initialize some global variables.
*/
-void CodeGen::genPrepForCompiler()
+void CodeGen::genPrepForCompiler()
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
/* Figure out which non-register variables hold pointers */
@@ -353,26 +343,23 @@ void CodeGen::genPrepForCompiler()
VarSetOps::AssignNoCopy(compiler, compiler->raRegVarsMask, VarSetOps::MakeEmpty(compiler));
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->lvTracked
+ if (varDsc->lvTracked
#ifndef LEGACY_BACKEND
- || varDsc->lvIsRegCandidate()
+ || varDsc->lvIsRegCandidate()
#endif // !LEGACY_BACKEND
- )
+ )
{
if (varDsc->lvRegister
#if FEATURE_STACK_FP_X87
&& !varDsc->IsFloatRegType()
#endif
- )
+ )
{
VarSetOps::AddElemD(compiler, compiler->raRegVarsMask, varDsc->lvVarIndex);
}
- else if (compiler->lvaIsGCTracked(varDsc) &&
- (!varDsc->lvIsParam || varDsc->lvIsRegArg) )
+ else if (compiler->lvaIsGCTracked(varDsc) && (!varDsc->lvIsParam || varDsc->lvIsRegArg))
{
VarSetOps::AddElemD(compiler, gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex);
}
@@ -385,7 +372,6 @@ void CodeGen::genPrepForCompiler()
#endif
}
-
/*****************************************************************************
* To report exception handling information to the VM, we need the size of the exception
* handling regions. To compute that, we need to emit labels for the beginning block of
@@ -395,7 +381,7 @@ void CodeGen::genPrepForCompiler()
* The beginning blocks of the EH regions already should have this flag set.
*
* No blocks should be added or removed after this.
- *
+ *
* This code is closely couple with genReportEH() in the sense that any block
* that this procedure has determined it needs to have a label has to be selected
* using the same logic both here and in genReportEH(), so basically any time there is
@@ -403,18 +389,17 @@ void CodeGen::genPrepForCompiler()
* methods 'in sync'.
*/
-void CodeGen::genPrepForEHCodegen()
+void CodeGen::genPrepForEHCodegen()
{
assert(!compiler->fgSafeBasicBlockCreation);
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
bool anyFinallys = false;
for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ HBtab < HBtabEnd; HBtab++)
{
assert(HBtab->ebdTryBeg->bbFlags & BBF_HAS_LABEL);
assert(HBtab->ebdHndBeg->bbFlags & BBF_HAS_LABEL);
@@ -462,36 +447,33 @@ void CodeGen::genPrepForEHCodegen()
bbToLabel->bbFlags |= BBF_HAS_LABEL;
}
} // block is BBJ_CALLFINALLY
- } // for each block
- } // if (anyFinallys)
-#endif // _TARGET_AMD64_
+ } // for each block
+ } // if (anyFinallys)
+#endif // _TARGET_AMD64_
}
-
-void
-CodeGenInterface::genUpdateLife (GenTreePtr tree)
+void CodeGenInterface::genUpdateLife(GenTreePtr tree)
{
- compiler->compUpdateLife</*ForCodeGen*/true>(tree);
+ compiler->compUpdateLife</*ForCodeGen*/ true>(tree);
}
-void
-CodeGenInterface::genUpdateLife (VARSET_VALARG_TP newLife)
+void CodeGenInterface::genUpdateLife(VARSET_VALARG_TP newLife)
{
- compiler->compUpdateLife</*ForCodeGen*/true>(newLife);
+ compiler->compUpdateLife</*ForCodeGen*/ true>(newLife);
}
// Returns the liveSet after tree has executed.
// "tree" MUST occur in the current statement, AFTER the most recent
// update of compiler->compCurLifeTree and compiler->compCurLife.
//
-VARSET_VALRET_TP CodeGen::genUpdateLiveSetForward(GenTreePtr tree)
+VARSET_VALRET_TP CodeGen::genUpdateLiveSetForward(GenTreePtr tree)
{
- VARSET_TP VARSET_INIT(compiler, startLiveSet, compiler->compCurLife);
+ VARSET_TP VARSET_INIT(compiler, startLiveSet, compiler->compCurLife);
GenTreePtr startNode;
assert(tree != compiler->compCurLifeTree);
- if (compiler->compCurLifeTree == NULL)
+ if (compiler->compCurLifeTree == nullptr)
{
- assert(compiler->compCurStmt != NULL);
+ assert(compiler->compCurStmt != nullptr);
startNode = compiler->compCurStmt->gtStmt.gtStmtList;
}
else
@@ -507,20 +489,19 @@ VARSET_VALRET_TP CodeGen::genUpdateLiveSetForward(GenTreePtr tree)
// 1. "first" must occur after compiler->compCurLifeTree in execution order for the current statement
// 2. "second" must occur after "first" in the current statement
//
-regMaskTP
-CodeGen::genNewLiveRegMask(GenTreePtr first, GenTreePtr second)
+regMaskTP CodeGen::genNewLiveRegMask(GenTreePtr first, GenTreePtr second)
{
// First, compute the liveset after "first"
VARSET_TP firstLiveSet = genUpdateLiveSetForward(first);
// Now, update the set forward from "first" to "second"
- VARSET_TP secondLiveSet = compiler->fgUpdateLiveSet(firstLiveSet, first->gtNext, second);
- regMaskTP newLiveMask = genLiveMask(VarSetOps::Diff(compiler, secondLiveSet, firstLiveSet));
+ VARSET_TP secondLiveSet = compiler->fgUpdateLiveSet(firstLiveSet, first->gtNext, second);
+ regMaskTP newLiveMask = genLiveMask(VarSetOps::Diff(compiler, secondLiveSet, firstLiveSet));
return newLiveMask;
}
// Return the register mask for the given register variable
// inline
-regMaskTP CodeGenInterface::genGetRegMask(const LclVarDsc * varDsc)
+regMaskTP CodeGenInterface::genGetRegMask(const LclVarDsc* varDsc)
{
regMaskTP regMask = RBM_NONE;
@@ -543,24 +524,22 @@ regMaskTP CodeGenInterface::genGetRegMask(const LclVarDsc * varDsc)
// Return the register mask for the given lclVar or regVar tree node
// inline
-regMaskTP CodeGenInterface::genGetRegMask(GenTreePtr tree)
+regMaskTP CodeGenInterface::genGetRegMask(GenTreePtr tree)
{
- assert (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_REG_VAR);
+ assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_REG_VAR);
- regMaskTP regMask = RBM_NONE;
- const LclVarDsc * varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
+ regMaskTP regMask = RBM_NONE;
+ const LclVarDsc* varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
if (varDsc->lvPromoted)
{
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
- {
- noway_assert(compiler->lvaTable[i].lvIsStructField);
- if (compiler->lvaTable[i].lvIsInReg())
- {
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
+ noway_assert(compiler->lvaTable[i].lvIsStructField);
+ if (compiler->lvaTable[i].lvIsInReg())
+ {
regMask |= genGetRegMask(&compiler->lvaTable[i]);
}
- }
+ }
}
else if (varDsc->lvIsInReg())
{
@@ -579,40 +558,38 @@ regMaskTP CodeGenInterface::genGetRegMask(GenTreePtr tree)
// pLoReg: the address of where to write the first register
// pHiReg: the address of where to write the second register
//
-void CodeGenInterface::genGetRegPairFromMask(regMaskTP regPairMask, regNumber* pLoReg, regNumber* pHiReg)
+void CodeGenInterface::genGetRegPairFromMask(regMaskTP regPairMask, regNumber* pLoReg, regNumber* pHiReg)
{
assert(genCountBits(regPairMask) == 2);
- regMaskTP loMask = genFindLowestBit(regPairMask); // set loMask to a one-bit mask
- regMaskTP hiMask = regPairMask - loMask; // set hiMask to the other bit that was in tmpRegMask
+ regMaskTP loMask = genFindLowestBit(regPairMask); // set loMask to a one-bit mask
+ regMaskTP hiMask = regPairMask - loMask; // set hiMask to the other bit that was in tmpRegMask
- regNumber loReg = genRegNumFromMask(loMask); // set loReg from loMask
- regNumber hiReg = genRegNumFromMask(hiMask); // set hiReg from hiMask
+ regNumber loReg = genRegNumFromMask(loMask); // set loReg from loMask
+ regNumber hiReg = genRegNumFromMask(hiMask); // set hiReg from hiMask
*pLoReg = loReg;
*pHiReg = hiReg;
}
-
// The given lclVar is either going live (being born) or dying.
// It might be both going live and dying (that is, it is a dead store) under MinOpts.
// Update regSet.rsMaskVars accordingly.
// inline
-void CodeGenInterface::genUpdateRegLife(const LclVarDsc * varDsc, bool isBorn, bool isDying
- DEBUGARG(GenTreePtr tree))
+void CodeGenInterface::genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bool isDying DEBUGARG(GenTreePtr tree))
{
#if FEATURE_STACK_FP_X87
// The stack fp reg vars are handled elsewhere
- if (varTypeIsFloating(varDsc->TypeGet())) return;
+ if (varTypeIsFloating(varDsc->TypeGet()))
+ return;
#endif
regMaskTP regMask = genGetRegMask(varDsc);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
- printf("\t\t\t\t\t\t\tV%02u in reg ",
- (varDsc - compiler->lvaTable));
+ printf("\t\t\t\t\t\t\tV%02u in reg ", (varDsc - compiler->lvaTable));
varDsc->PrintVarReg();
printf(" is becoming %s ", (isDying) ? "dead" : "live");
Compiler::printTreeID(tree);
@@ -634,13 +611,13 @@ void CodeGenInterface::genUpdateRegLife(const LclVarDsc * varDsc,
}
}
-// Gets a register mask that represent the kill set for a helper call since
+// Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
//
-// TODO-CQ: Currently this list is incomplete (not all helpers calls are
+// TODO-CQ: Currently this list is incomplete (not all helpers calls are
// enumerated) and not 100% accurate (some killsets are bigger than
// what they really are).
-// There's some work to be done in several places in the JIT to
+// There's some work to be done in several places in the JIT to
// accurately track the registers that are getting killed by
// helper calls:
// a) LSRA needs several changes to accomodate more precise killsets
@@ -652,54 +629,53 @@ void CodeGenInterface::genUpdateRegLife(const LclVarDsc * varDsc,
// both in CodeGenAmd64.cpp and emitx86.cpp.
//
// The best solution for this problem would be to try to centralize
-// the killset information in a single place but then make the
+// the killset information in a single place but then make the
// corresponding changes so every code generation phase is in sync
// about this.
-//
+//
// The interim solution is to only add known helper calls that don't
// follow the AMD64 ABI and actually trash registers that are supposed to be non-volatile.
regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper)
{
- switch(helper)
+ switch (helper)
{
- case CORINFO_HELP_ASSIGN_BYREF:
+ case CORINFO_HELP_ASSIGN_BYREF:
#if defined(_TARGET_AMD64_)
- return RBM_RSI|RBM_RDI|RBM_CALLEE_TRASH;
+ return RBM_RSI | RBM_RDI | RBM_CALLEE_TRASH;
#elif defined(_TARGET_ARM64_)
- return RBM_CALLEE_TRASH_NOGC;
+ return RBM_CALLEE_TRASH_NOGC;
#else
- NYI("Model kill set for CORINFO_HELP_ASSIGN_BYREF on target arch");
- return RBM_CALLEE_TRASH;
+ NYI("Model kill set for CORINFO_HELP_ASSIGN_BYREF on target arch");
+ return RBM_CALLEE_TRASH;
#endif
- case CORINFO_HELP_PROF_FCN_ENTER:
+ case CORINFO_HELP_PROF_FCN_ENTER:
#ifdef _TARGET_AMD64_
- return RBM_PROFILER_ENTER_TRASH;
+ return RBM_PROFILER_ENTER_TRASH;
#else
- unreached();
+ unreached();
#endif
- case CORINFO_HELP_PROF_FCN_LEAVE:
- case CORINFO_HELP_PROF_FCN_TAILCALL:
+ case CORINFO_HELP_PROF_FCN_LEAVE:
+ case CORINFO_HELP_PROF_FCN_TAILCALL:
#ifdef _TARGET_AMD64_
- return RBM_PROFILER_LEAVE_TRASH;
+ return RBM_PROFILER_LEAVE_TRASH;
#else
- unreached();
+ unreached();
#endif
- case CORINFO_HELP_STOP_FOR_GC:
- return RBM_STOP_FOR_GC_TRASH;
+ case CORINFO_HELP_STOP_FOR_GC:
+ return RBM_STOP_FOR_GC_TRASH;
- case CORINFO_HELP_INIT_PINVOKE_FRAME:
- return RBM_INIT_PINVOKE_FRAME_TRASH;
+ case CORINFO_HELP_INIT_PINVOKE_FRAME:
+ return RBM_INIT_PINVOKE_FRAME_TRASH;
- default:
- return RBM_CALLEE_TRASH;
+ default:
+ return RBM_CALLEE_TRASH;
}
}
-
-//
-// Gets a register mask that represents the kill set for "NO GC" helper calls since
+//
+// Gets a register mask that represents the kill set for "NO GC" helper calls since
// not all JIT Helper calls follow the standard ABI on the target architecture.
//
// Note: This list may not be complete and defaults to the default NOGC registers.
@@ -710,41 +686,44 @@ regMaskTP Compiler::compNoGCHelperCallKillSet(CorInfoHelpFunc helper)
#ifdef _TARGET_AMD64_
switch (helper)
{
- case CORINFO_HELP_PROF_FCN_ENTER:
- return RBM_PROFILER_ENTER_TRASH;
+ case CORINFO_HELP_PROF_FCN_ENTER:
+ return RBM_PROFILER_ENTER_TRASH;
- case CORINFO_HELP_PROF_FCN_LEAVE:
- case CORINFO_HELP_PROF_FCN_TAILCALL:
- return RBM_PROFILER_LEAVE_TRASH;
+ case CORINFO_HELP_PROF_FCN_LEAVE:
+ case CORINFO_HELP_PROF_FCN_TAILCALL:
+ return RBM_PROFILER_LEAVE_TRASH;
- case CORINFO_HELP_ASSIGN_BYREF:
- // this helper doesn't trash RSI and RDI
- return RBM_CALLEE_TRASH_NOGC & ~(RBM_RSI | RBM_RDI);
+ case CORINFO_HELP_ASSIGN_BYREF:
+ // this helper doesn't trash RSI and RDI
+ return RBM_CALLEE_TRASH_NOGC & ~(RBM_RSI | RBM_RDI);
- default:
- return RBM_CALLEE_TRASH_NOGC;
+ default:
+ return RBM_CALLEE_TRASH_NOGC;
}
#else
return RBM_CALLEE_TRASH_NOGC;
#endif
}
-
-// Update liveness (always var liveness, i.e., compCurLife, and also, if "ForCodeGen" is true, reg liveness, i.e., regSet.rsMaskVars as well)
+// Update liveness (always var liveness, i.e., compCurLife, and also, if "ForCodeGen" is true, reg liveness, i.e.,
+// regSet.rsMaskVars as well)
// if the given lclVar (or indir(addr(local)))/regVar node is going live (being born) or dying.
-template<bool ForCodeGen>
+template <bool ForCodeGen>
void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
{
GenTreePtr indirAddrLocal = fgIsIndirOfAddrOfLocal(tree);
- assert(tree->OperIsNonPhiLocal() || indirAddrLocal != NULL);
+ assert(tree->OperIsNonPhiLocal() || indirAddrLocal != nullptr);
// Get the local var tree -- if "tree" is "Ldobj(addr(x))", or "ind(addr(x))" this is "x", else it's "tree".
GenTreePtr lclVarTree = indirAddrLocal;
- if (lclVarTree == NULL) lclVarTree = tree;
+ if (lclVarTree == nullptr)
+ {
+ lclVarTree = tree;
+ }
unsigned int lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
-#ifdef DEBUG
+#ifdef DEBUG
#if !defined(_TARGET_AMD64_) // no addr nodes on AMD and experimenting with with encountering vars in 'random' order
// Struct fields are not traversed in a consistent order, so ignore them when
// verifying that we see the var nodes in execution order
@@ -759,11 +738,10 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
{
assert(indirAddrLocal != NULL);
}
- else if (tree->gtNext != NULL
- && tree->gtNext->gtOper == GT_ADDR
- && ((tree->gtNext->gtNext == NULL || !tree->gtNext->gtNext->OperIsIndir())))
+ else if (tree->gtNext != NULL && tree->gtNext->gtOper == GT_ADDR &&
+ ((tree->gtNext->gtNext == NULL || !tree->gtNext->gtNext->OperIsIndir())))
{
- assert(tree->IsLocal()); // Can only take the address of a local.
+ assert(tree->IsLocal()); // Can only take the address of a local.
// The ADDR might occur in a context where the address it contributes is eventually
// dereferenced, so we can't say that this is not a use or def.
}
@@ -801,13 +779,18 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
// check lvPromoted, for the case where the fields are being
// tracked.
if (!varDsc->lvTracked && !varDsc->lvPromoted)
+ {
return;
+ }
- bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & GTF_VAR_USEASG) == 0); // if it's "x <op>= ..." then variable "x" must have had a previous, original, site to be born.
+ bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & GTF_VAR_USEASG) == 0); // if it's "x <op>=
+ // ..." then variable
+ // "x" must have had a
+ // previous, original,
+ // site to be born.
bool isDying = ((tree->gtFlags & GTF_VAR_DEATH) != 0);
#ifndef LEGACY_BACKEND
- bool spill = ((tree->gtFlags & GTF_SPILL) != 0);
+ bool spill = ((tree->gtFlags & GTF_SPILL) != 0);
#endif // !LEGACY_BACKEND
#ifndef LEGACY_BACKEND
@@ -819,8 +802,9 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
if (isBorn || isDying)
{
- bool hasDeadTrackedFieldVars = false; // If this is true, then, for a LDOBJ(ADDR(<promoted struct local>)),
- VARSET_TP* deadTrackedFieldVars = NULL; // *deadTrackedFieldVars indicates which tracked field vars are dying.
+ bool hasDeadTrackedFieldVars = false; // If this is true, then, for a LDOBJ(ADDR(<promoted struct local>)),
+ VARSET_TP* deadTrackedFieldVars =
+ nullptr; // *deadTrackedFieldVars indicates which tracked field vars are dying.
VARSET_TP VARSET_INIT_NOCOPY(varDeltaSet, VarSetOps::MakeEmpty(this));
if (varDsc->lvTracked)
@@ -834,7 +818,7 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
codeGen->genUpdateVarReg(varDsc, tree);
}
#endif // !LEGACY_BACKEND
- if (varDsc->lvIsInReg()
+ if (varDsc->lvIsInReg()
#ifndef LEGACY_BACKEND
&& tree->gtRegNum != REG_NA
#endif // !LEGACY_BACKEND
@@ -852,9 +836,9 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
}
else if (varDsc->lvPromoted)
{
- if (indirAddrLocal != NULL && isDying)
+ if (indirAddrLocal != nullptr && isDying)
{
- assert(!isBorn); // GTF_VAR_DEATH only set for LDOBJ last use.
+ assert(!isBorn); // GTF_VAR_DEATH only set for LDOBJ last use.
hasDeadTrackedFieldVars = GetPromotedStructDeathVars()->Lookup(indirAddrLocal, &deadTrackedFieldVars);
if (hasDeadTrackedFieldVars)
{
@@ -862,14 +846,12 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
}
}
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
- {
- LclVarDsc * fldVarDsc = &(lvaTable[i]);
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
+ LclVarDsc* fldVarDsc = &(lvaTable[i]);
noway_assert(fldVarDsc->lvIsStructField);
- if (fldVarDsc->lvTracked)
- {
+ if (fldVarDsc->lvTracked)
+ {
unsigned fldVarIndex = fldVarDsc->lvVarIndex;
noway_assert(fldVarIndex < lvaTrackedCount);
if (!hasDeadTrackedFieldVars)
@@ -882,7 +864,10 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
if (fldVarDsc->lvIsInReg())
{
#ifndef LEGACY_BACKEND
- if (isBorn) codeGen->genUpdateVarReg(fldVarDsc, tree);
+ if (isBorn)
+ {
+ codeGen->genUpdateVarReg(fldVarDsc, tree);
+ }
#endif // !LEGACY_BACKEND
codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(tree));
}
@@ -899,7 +884,10 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
if (lvaTable[i].lvIsInReg())
{
#ifndef LEGACY_BACKEND
- if (isBorn) codeGen->genUpdateVarReg(fldVarDsc, tree);
+ if (isBorn)
+ {
+ codeGen->genUpdateVarReg(fldVarDsc, tree);
+ }
#endif // !LEGACY_BACKEND
codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(tree));
}
@@ -911,7 +899,7 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
#endif // !LEGACY_BACKEND
}
}
- }
+ }
}
// First, update the live set
@@ -921,7 +909,7 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
// through a qmark/colon tree, we may encounter multiple last-use nodes.
// assert (VarSetOps::IsSubset(compiler, regVarDeltaSet, newLife));
VarSetOps::DiffD(this, newLife, varDeltaSet);
- if (pLastUseVars != NULL)
+ if (pLastUseVars != nullptr)
{
VarSetOps::Assign(this, *pLastUseVars, varDeltaSet);
}
@@ -929,7 +917,7 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
else
{
// This shouldn't be in newLife, unless this is debug code, in which
- // case we keep vars live everywhere, OR the variable is address-exposed,
+ // case we keep vars live everywhere, OR the variable is address-exposed,
// OR this block is part of a try block, in which case it may be live at the handler
// Could add a check that, if it's in newLife, that it's also in
// fgGetHandlerLiveVars(compCurBB), but seems excessive
@@ -941,7 +929,7 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
}
}
- if (!VarSetOps::Equal(this, compCurLife, newLife))
+ if (!VarSetOps::Equal(this, compCurLife, newLife))
{
#ifdef DEBUG
if (verbose)
@@ -960,10 +948,13 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
{
#ifndef LEGACY_BACKEND
- // Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the gcInfo.gcTrkStkPtrLcls
+ // Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
+ // gcInfo.gcTrkStkPtrLcls
// includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
- VARSET_TP VARSET_INIT_NOCOPY(gcTrkStkDeltaSet, VarSetOps::Intersection(this, codeGen->gcInfo.gcTrkStkPtrLcls, stackVarDeltaSet));
- if (!VarSetOps::IsEmpty(this, gcTrkStkDeltaSet))
+ VARSET_TP VARSET_INIT_NOCOPY(gcTrkStkDeltaSet,
+ VarSetOps::Intersection(this, codeGen->gcInfo.gcTrkStkPtrLcls,
+ stackVarDeltaSet));
+ if (!VarSetOps::IsEmpty(this, gcTrkStkDeltaSet))
{
#ifdef DEBUG
if (verbose)
@@ -997,7 +988,8 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
#ifdef DEBUG
if (verbose)
{
- VARSET_TP VARSET_INIT_NOCOPY(gcVarPtrSetNew, VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
+ VARSET_TP VARSET_INIT_NOCOPY(gcVarPtrSetNew,
+ VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
if (!VarSetOps::Equal(this, codeGen->gcInfo.gcVarPtrSetCur, gcVarPtrSetNew))
{
printf("\t\t\t\t\t\t\tGCvars: ");
@@ -1009,7 +1001,8 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
}
#endif // DEBUG
- VarSetOps::AssignNoCopy(this, codeGen->gcInfo.gcVarPtrSetCur, VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
+ VarSetOps::AssignNoCopy(this, codeGen->gcInfo.gcVarPtrSetCur,
+ VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
#endif // LEGACY_BACKEND
@@ -1044,12 +1037,12 @@ void Compiler::compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars)
// Need an explicit instantiation.
template void Compiler::compUpdateLifeVar<false>(GenTreePtr tree, VARSET_TP* pLastUseVars);
-template<bool ForCodeGen>
-void Compiler::compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(GenTreePtr tree))
+template <bool ForCodeGen>
+void Compiler::compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(GenTreePtr tree))
{
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
if (tree != nullptr)
@@ -1097,7 +1090,8 @@ void Compiler::compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(G
VarSetOps::IntersectionD(this, deadSet, raRegVarsMask);
VarSetOps::IntersectionD(this, bornSet, raRegVarsMask);
// And all gcTrkStkPtrLcls that are now live will be on the stack
- VarSetOps::AssignNoCopy(this, codeGen->gcInfo.gcVarPtrSetCur, VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
+ VarSetOps::AssignNoCopy(this, codeGen->gcInfo.gcVarPtrSetCur,
+ VarSetOps::Intersection(this, newLife, codeGen->gcInfo.gcTrkStkPtrLcls));
#endif // LEGACY_BACKEND
VarSetOps::Assign(this, compCurLife, newLife);
@@ -1109,9 +1103,9 @@ void Compiler::compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(G
while (deadIter.NextElem(this, &deadVarIndex))
{
unsigned varNum = lvaTrackedToVarNum[deadVarIndex];
- varDsc = lvaTable + varNum;
- bool isGCRef = (varDsc->TypeGet() == TYP_REF);
- bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
+ varDsc = lvaTable + varNum;
+ bool isGCRef = (varDsc->TypeGet() == TYP_REF);
+ bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
if (varDsc->lvIsInReg())
{
@@ -1144,11 +1138,11 @@ void Compiler::compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(G
while (bornIter.NextElem(this, &bornVarIndex))
{
unsigned varNum = lvaTrackedToVarNum[bornVarIndex];
- varDsc = lvaTable + varNum;
- bool isGCRef = (varDsc->TypeGet() == TYP_REF);
- bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
+ varDsc = lvaTable + varNum;
+ bool isGCRef = (varDsc->TypeGet() == TYP_REF);
+ bool isByRef = (varDsc->TypeGet() == TYP_BYREF);
- if (varDsc->lvIsInReg())
+ if (varDsc->lvIsInReg())
{
#ifndef LEGACY_BACKEND
#ifdef DEBUG
@@ -1190,7 +1184,7 @@ template void Compiler::compChangeLife<true>(VARSET_VALARG_TP newLife DEBUGARG(G
/*****************************************************************************
*
- * Get the mask of integer registers that contain 'live' enregistered
+ * Get the mask of integer registers that contain 'live' enregistered
* local variables after "tree".
*
* The output is the mask of integer registers that are currently
@@ -1201,63 +1195,63 @@ regMaskTP CodeGenInterface::genLiveMask(GenTreePtr tree)
regMaskTP liveMask = regSet.rsMaskVars;
GenTreePtr nextNode;
- if (compiler->compCurLifeTree == NULL)
+ if (compiler->compCurLifeTree == nullptr)
{
- assert(compiler->compCurStmt != NULL);
+ assert(compiler->compCurStmt != nullptr);
nextNode = compiler->compCurStmt->gtStmt.gtStmtList;
}
else
{
nextNode = compiler->compCurLifeTree->gtNext;
}
-
+
// Theoretically, we should always be able to find "tree" by walking
// forward in execution order. But unfortunately, there is at least
// one case (addressing) where a node may be evaluated out of order
// So, we have to handle that case
bool outOfOrder = false;
- for ( ;
- nextNode != tree->gtNext;
- nextNode = nextNode->gtNext)
+ for (; nextNode != tree->gtNext; nextNode = nextNode->gtNext)
{
- if (nextNode == NULL)
+ if (nextNode == nullptr)
{
outOfOrder = true;
break;
}
if (nextNode->gtOper == GT_LCL_VAR || nextNode->gtOper == GT_REG_VAR)
{
- bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & GTF_VAR_USEASG) == 0);
+ bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & GTF_VAR_USEASG) == 0);
bool isDying = ((nextNode->gtFlags & GTF_VAR_DEATH) != 0);
if (isBorn || isDying)
{
regMaskTP regMask = genGetRegMask(nextNode);
if (regMask != RBM_NONE)
{
- if (isBorn) liveMask |= regMask;
- else liveMask &= ~(regMask);
+ if (isBorn)
+ {
+ liveMask |= regMask;
+ }
+ else
+ {
+ liveMask &= ~(regMask);
+ }
}
}
}
}
if (outOfOrder)
{
- assert(compiler->compCurLifeTree != NULL);
+ assert(compiler->compCurLifeTree != nullptr);
liveMask = regSet.rsMaskVars;
// We were unable to find "tree" by traversing forward. We must now go
// backward from compiler->compCurLifeTree instead. We have to start with compiler->compCurLifeTree,
// since regSet.rsMaskVars reflects its completed execution
- for (nextNode = compiler->compCurLifeTree;
- nextNode != tree;
- nextNode = nextNode->gtPrev)
+ for (nextNode = compiler->compCurLifeTree; nextNode != tree; nextNode = nextNode->gtPrev)
{
- assert(nextNode != NULL);
+ assert(nextNode != nullptr);
if (nextNode->gtOper == GT_LCL_VAR || nextNode->gtOper == GT_REG_VAR)
{
- bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & GTF_VAR_USEASG) == 0);
+ bool isBorn = ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & GTF_VAR_USEASG) == 0);
bool isDying = ((nextNode->gtFlags & GTF_VAR_DEATH) != 0);
if (isBorn || isDying)
{
@@ -1266,8 +1260,14 @@ regMaskTP CodeGenInterface::genLiveMask(GenTreePtr tree)
{
// We're going backward - so things born are removed
// and vice versa
- if (isBorn) liveMask &= ~(regMask);
- else liveMask |= regMask;
+ if (isBorn)
+ {
+ liveMask &= ~(regMask);
+ }
+ else
+ {
+ liveMask |= regMask;
+ }
}
}
}
@@ -1278,7 +1278,7 @@ regMaskTP CodeGenInterface::genLiveMask(GenTreePtr tree)
/*****************************************************************************
*
- * Get the mask of integer registers that contain 'live' enregistered
+ * Get the mask of integer registers that contain 'live' enregistered
* local variables.
* The input is a liveSet which contains a set of local
@@ -1292,11 +1292,15 @@ regMaskTP CodeGenInterface::genLiveMask(VARSET_VALARG_TP liveSet)
{
// Check for the zero LiveSet mask
if (VarSetOps::IsEmpty(compiler, liveSet))
+ {
return RBM_NONE;
+ }
- // set if our liveSet matches the one we have cached: genLastLiveSet -> genLastLiveMask
+ // set if our liveSet matches the one we have cached: genLastLiveSet -> genLastLiveMask
if (VarSetOps::Equal(compiler, liveSet, genLastLiveSet))
- return genLastLiveMask;
+ {
+ return genLastLiveMask;
+ }
regMaskTP liveMask = 0;
@@ -1306,16 +1310,20 @@ regMaskTP CodeGenInterface::genLiveMask(VARSET_VALARG_TP liveSet)
// If the variable is not enregistered, then it can't contribute to the liveMask
if (!VarSetOps::IsMember(compiler, compiler->raRegVarsMask, varIndex))
+ {
continue;
+ }
// Find the variable in compiler->lvaTable
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
#if !FEATURE_FP_REGALLOC
// If the variable is a floating point type, then it can't contribute to the liveMask
if (varDsc->IsFloatRegType())
+ {
continue;
+ }
#endif
noway_assert(compiler->lvaTable[varNum].lvRegister);
@@ -1328,10 +1336,12 @@ regMaskTP CodeGenInterface::genLiveMask(VARSET_VALARG_TP liveSet)
else
{
regBit = genRegMask(varDsc->lvRegNum);
-
+
// For longs we may have two regs
- if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
+ if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
+ {
regBit |= genRegMask(varDsc->lvOtherReg);
+ }
}
noway_assert(regBit != 0);
@@ -1343,7 +1353,7 @@ regMaskTP CodeGenInterface::genLiveMask(VARSET_VALARG_TP liveSet)
liveMask |= regBit;
}
- // cache the last mapping between gtLiveSet -> liveMask
+ // cache the last mapping between gtLiveSet -> liveMask
VarSetOps::Assign(compiler, genLastLiveSet, liveSet);
genLastLiveMask = liveMask;
@@ -1354,44 +1364,32 @@ regMaskTP CodeGenInterface::genLiveMask(VARSET_VALARG_TP liveSet)
*
* Generate a spill.
*/
-void CodeGenInterface::spillReg(var_types type, TempDsc* tmp, regNumber reg)
+void CodeGenInterface::spillReg(var_types type, TempDsc* tmp, regNumber reg)
{
- getEmitter()->emitIns_S_R(ins_Store(type),
- emitActualTypeSize(type),
- reg,
- tmp->tdTempNum(),
- 0);
+ getEmitter()->emitIns_S_R(ins_Store(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
/*****************************************************************************
*
* Generate a reload.
*/
-void CodeGenInterface::reloadReg(var_types type, TempDsc* tmp, regNumber reg)
+void CodeGenInterface::reloadReg(var_types type, TempDsc* tmp, regNumber reg)
{
- getEmitter()->emitIns_R_S(ins_Load(type),
- emitActualTypeSize(type),
- reg,
- tmp->tdTempNum(),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(type), emitActualTypeSize(type), reg, tmp->tdTempNum(), 0);
}
#ifdef LEGACY_BACKEND
#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
-void CodeGenInterface::reloadFloatReg(var_types type, TempDsc* tmp, regNumber reg)
+void CodeGenInterface::reloadFloatReg(var_types type, TempDsc* tmp, regNumber reg)
{
var_types tmpType = tmp->tdTempType();
- getEmitter()->emitIns_R_S(ins_FloatLoad(type),
- emitActualTypeSize(tmpType),
- reg,
- tmp->tdTempNum(),
- 0);
+ getEmitter()->emitIns_R_S(ins_FloatLoad(type), emitActualTypeSize(tmpType), reg, tmp->tdTempNum(), 0);
}
#endif
#endif // LEGACY_BACKEND
// inline
-regNumber CodeGenInterface::genGetThisArgReg(GenTreePtr call)
+regNumber CodeGenInterface::genGetThisArgReg(GenTreePtr call)
{
noway_assert(call->IsCall());
return REG_ARG_0;
@@ -1405,7 +1403,7 @@ regNumber CodeGenInterface::genGetThisArgReg(GenTreePtr call)
//
// Return Value:
// TempDsc corresponding to tree
-TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
+TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
{
// tree must be in spilled state.
assert((tree->gtFlags & GTF_SPILLED) != 0);
@@ -1434,7 +1432,7 @@ TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
//
unsigned short CodeGenInterface::genAddrRelocTypeHint(size_t addr)
{
- return compiler->eeGetRelocTypeHint((void *)addr);
+ return compiler->eeGetRelocTypeHint((void*)addr);
}
#endif //_TARGET_AMD64_
@@ -1453,7 +1451,7 @@ bool CodeGenInterface::genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef _TARGET_AMD64_
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
-#else
+#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return false;
#endif
@@ -1474,7 +1472,7 @@ bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
{
#ifdef _TARGET_AMD64_
return genAddrRelocTypeHint(addr) == IMAGE_REL_BASED_REL32;
-#else
+#else
// x86: PC-relative addressing is available only for control flow instructions (jmp and call)
return true;
#endif
@@ -1513,7 +1511,7 @@ bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
}
#ifdef _TARGET_AMD64_
- // If code addr could be encoded as 32-bit offset relative to IP, we need to record a relocation.
+ // If code addr could be encoded as 32-bit offset relative to IP, we need to record a relocation.
if (genCodeIndirAddrCanBeEncodedAsPCRelOffset(addr))
{
return true;
@@ -1522,8 +1520,8 @@ bool CodeGenInterface::genCodeIndirAddrNeedsReloc(size_t addr)
// It could be possible that the code indir addr could be encoded as 32-bit displacement relative
// to zero. But we don't need to emit a relocation in that case.
return false;
-#else //_TARGET_X86_
- // On x86 there is need for recording relocations during jitting,
+#else //_TARGET_X86_
+ // On x86 there is need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif //_TARGET_X86_
@@ -1549,15 +1547,14 @@ bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
// By default all direct code addresses go through relocation so that VM will setup
// a jump stub if addr cannot be encoded as pc-relative offset.
return true;
-#else //_TARGET_X86_
- // On x86 there is no need for recording relocations during jitting,
+#else //_TARGET_X86_
+ // On x86 there is no need for recording relocations during jitting,
// because all addrs fit within 32-bits.
return false;
#endif //_TARGET_X86_
}
#endif //_TARGET_XARCH_
-
/*****************************************************************************
*
* The following can be used to create basic blocks that serve as labels for
@@ -1566,20 +1563,20 @@ bool CodeGenInterface::genCodeAddrNeedsReloc(size_t addr)
*/
// inline
-BasicBlock * CodeGen::genCreateTempLabel()
+BasicBlock* CodeGen::genCreateTempLabel()
{
#ifdef DEBUG
// These blocks don't affect FP
compiler->fgSafeBasicBlockCreation = true;
#endif
- BasicBlock * block = compiler->bbNewBasicBlock(BBJ_NONE);
+ BasicBlock* block = compiler->bbNewBasicBlock(BBJ_NONE);
#ifdef DEBUG
compiler->fgSafeBasicBlockCreation = false;
#endif
- block->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ block->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
// Use coldness of current block, as this label will
// be contained in it.
@@ -1588,24 +1585,25 @@ BasicBlock * CodeGen::genCreateTempLabel()
#ifdef DEBUG
block->bbTgtStkDepth = genStackLevel / sizeof(int);
#endif
- return block;
+ return block;
}
-
// inline
-void CodeGen::genDefineTempLabel(BasicBlock *label)
+void CodeGen::genDefineTempLabel(BasicBlock* label)
{
-#ifdef DEBUG
- if (compiler->opts.dspCode) printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, label->bbNum);
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ {
+ printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, label->bbNum);
+ }
#endif
- label->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
-
+ label->bbEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+
/* gcInfo.gcRegGCrefSetCur does not account for redundant load-suppression
of GC vars, and the emitter will not know about */
-
+
regTracker.rsTrackRegClrPtr();
}
@@ -1616,11 +1614,11 @@ void CodeGen::genDefineTempLabel(BasicBlock *label)
* return value) are used at this point.
*/
-void CodeGen::genAdjustSP(ssize_t delta)
+void CodeGen::genAdjustSP(ssize_t delta)
{
#ifdef _TARGET_X86_
- if (delta == sizeof(int))
- inst_RV (INS_pop, REG_ECX, TYP_INT);
+ if (delta == sizeof(int))
+ inst_RV(INS_pop, REG_ECX, TYP_INT);
else
#endif
inst_RV_IV(INS_add, REG_SPBASE, delta, EA_PTRSIZE);
@@ -1629,7 +1627,7 @@ void CodeGen::genAdjustSP(ssize_t delta)
#ifdef _TARGET_ARM_
// return size
// alignmentWB is out param
-unsigned CodeGenInterface::InferOpSizeAlign(GenTreePtr op, unsigned *alignmentWB)
+unsigned CodeGenInterface::InferOpSizeAlign(GenTreePtr op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
@@ -1638,7 +1636,7 @@ unsigned CodeGenInterface::InferOpSizeAlign(GenTreePtr op, unsigned *alignmentWB
{
opSize = InferStructOpSizeAlign(op, &alignment);
}
- else
+ else
{
alignment = genTypeAlignments[op->TypeGet()];
opSize = genTypeSizes[op->TypeGet()];
@@ -1652,7 +1650,7 @@ unsigned CodeGenInterface::InferOpSizeAlign(GenTreePtr op, unsigned *alignmentWB
}
// return size
// alignmentWB is out param
-unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned *alignmentWB)
+unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
@@ -1665,13 +1663,13 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned *align
if (op->gtOper == GT_OBJ)
{
CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->gtClass;
- opSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ opSize = compiler->info.compCompHnd->getClassSize(clsHnd);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
{
- unsigned varNum = op->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = op->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(varDsc->lvType == TYP_STRUCT);
opSize = varDsc->lvSize();
if (varDsc->lvStructDoubleAlign)
@@ -1691,14 +1689,15 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned *align
{
if (op2->IsIconHandle(GTF_ICON_CLASS_HDL))
{
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE) op2->gtIntCon.gtIconVal;
- opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
- alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)op2->gtIntCon.gtIconVal;
+ opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
+ alignment =
+ roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else
{
- opSize = op2->gtIntCon.gtIconVal;
- GenTreePtr op1 = op->gtOp.gtOp1;
+ opSize = op2->gtIntCon.gtIconVal;
+ GenTreePtr op1 = op->gtOp.gtOp1;
assert(op1->OperGet() == GT_LIST);
GenTreePtr dstAddr = op1->gtOp.gtOp1;
if (dstAddr->OperGet() == GT_ADDR)
@@ -1788,18 +1787,18 @@ unsigned CodeGenInterface::InferStructOpSizeAlign(GenTreePtr op, unsigned *align
* form an address mode later on.
*/
-bool CodeGen::genCreateAddrMode(GenTreePtr addr,
- int mode,
- bool fold,
- regMaskTP regMask,
- bool * revPtr,
- GenTreePtr * rv1Ptr,
- GenTreePtr * rv2Ptr,
+bool CodeGen::genCreateAddrMode(GenTreePtr addr,
+ int mode,
+ bool fold,
+ regMaskTP regMask,
+ bool* revPtr,
+ GenTreePtr* rv1Ptr,
+ GenTreePtr* rv2Ptr,
#if SCALED_ADDR_MODES
- unsigned * mulPtr,
+ unsigned* mulPtr,
#endif
- unsigned * cnsPtr,
- bool nogen)
+ unsigned* cnsPtr,
+ bool nogen)
{
#ifndef LEGACY_BACKEND
assert(nogen == true);
@@ -1833,31 +1832,35 @@ bool CodeGen::genCreateAddrMode(GenTreePtr addr,
/* All indirect address modes require the address to be an addition */
- if (addr->gtOper != GT_ADD)
+ if (addr->gtOper != GT_ADD)
+ {
return false;
+ }
// Can't use indirect addressing mode as we need to check for overflow.
// Also, can't use 'lea' as it doesn't set the flags.
if (addr->gtOverflow())
+ {
return false;
+ }
- GenTreePtr rv1 = 0;
- GenTreePtr rv2 = 0;
+ GenTreePtr rv1 = nullptr;
+ GenTreePtr rv2 = nullptr;
- GenTreePtr op1;
- GenTreePtr op2;
+ GenTreePtr op1;
+ GenTreePtr op2;
- ssize_t cns;
+ ssize_t cns;
#if SCALED_ADDR_MODES
- unsigned mul;
+ unsigned mul;
#endif
- GenTreePtr tmp;
+ GenTreePtr tmp;
/* What order are the sub-operands to be evaluated */
- if (addr->gtFlags & GTF_REVERSE_OPS)
+ if (addr->gtFlags & GTF_REVERSE_OPS)
{
op1 = addr->gtOp.gtOp2;
op2 = addr->gtOp.gtOp1;
@@ -1868,7 +1871,7 @@ bool CodeGen::genCreateAddrMode(GenTreePtr addr,
op2 = addr->gtOp.gtOp2;
}
- bool rev = false; // Is op2 first in the evaluation order?
+ bool rev = false; // Is op2 first in the evaluation order?
/*
A complex address mode can combine the following operands:
@@ -1903,21 +1906,23 @@ AGAIN:
#ifdef LEGACY_BACKEND
/* Check both operands as far as being register variables */
- if (mode != -1)
+ if (mode != -1)
{
- if (op1->gtOper == GT_LCL_VAR) genMarkLclVar(op1);
- if (op2->gtOper == GT_LCL_VAR) genMarkLclVar(op2);
+ if (op1->gtOper == GT_LCL_VAR)
+ genMarkLclVar(op1);
+ if (op2->gtOper == GT_LCL_VAR)
+ genMarkLclVar(op2);
}
#endif // LEGACY_BACKEND
/* Special case: keep constants as 'op2' */
- if (op1->IsCnsIntOrI())
+ if (op1->IsCnsIntOrI())
{
// Presumably op2 is assumed to not be a constant (shouldn't happen if we've done constant folding)?
tmp = op1;
- op1 = op2;
- op2 = tmp;
+ op1 = op2;
+ op2 = tmp;
}
/* Check for an addition of a constant */
@@ -1933,10 +1938,9 @@ AGAIN:
if ((op1->gtFlags & GTF_REG_VAL) && mode == 1 && !nogen)
{
- regNumber reg1 = op1->gtRegNum;
+ regNumber reg1 = op1->gtRegNum;
- if ((regMask == 0 || (regMask & genRegMask(reg1))) &&
- genRegTrashable(reg1, addr))
+ if ((regMask == 0 || (regMask & genRegMask(reg1))) && genRegTrashable(reg1, addr))
{
// In case genMarkLclVar(op1) bashed it above and it is
// the last use of the variable.
@@ -1961,48 +1965,52 @@ AGAIN:
switch (op1->gtOper)
{
- case GT_ADD:
+ case GT_ADD:
- if (op1->gtOverflow())
- break;
+ if (op1->gtOverflow())
+ {
+ break;
+ }
- op2 = op1->gtOp.gtOp2;
- op1 = op1->gtOp.gtOp1;
+ op2 = op1->gtOp.gtOp2;
+ op1 = op1->gtOp.gtOp1;
- goto AGAIN;
+ goto AGAIN;
#if SCALED_ADDR_MODES && !defined(_TARGET_ARM64_)
- // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
- case GT_MUL:
- if (op1->gtOverflow())
- return false; // Need overflow check
+ // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
+ case GT_MUL:
+ if (op1->gtOverflow())
+ {
+ return false; // Need overflow check
+ }
- __fallthrough;
+ __fallthrough;
- case GT_LSH:
+ case GT_LSH:
- mul = op1->GetScaledIndex();
- if (mul)
- {
- /* We can use "[mul*rv2 + icon]" */
+ mul = op1->GetScaledIndex();
+ if (mul)
+ {
+ /* We can use "[mul*rv2 + icon]" */
- rv1 = 0;
- rv2 = op1->gtOp.gtOp1;
+ rv1 = nullptr;
+ rv2 = op1->gtOp.gtOp1;
- goto FOUND_AM;
- }
- break;
+ goto FOUND_AM;
+ }
+ break;
#endif
- default:
- break;
+ default:
+ break;
}
}
/* The best we can do is "[rv1 + icon]" */
rv1 = op1;
- rv2 = 0;
+ rv2 = nullptr;
goto FOUND_AM;
}
@@ -2010,7 +2018,7 @@ AGAIN:
/* op2 is not a constant. So keep on trying.
Does op1 or op2 already sit in a register? */
- if (op1->gtFlags & GTF_REG_VAL)
+ if (op1->gtFlags & GTF_REG_VAL)
{
/* op1 is sitting in a register */
}
@@ -2019,8 +2027,8 @@ AGAIN:
/* op2 is sitting in a register. Keep the enregistered value as op1 */
tmp = op1;
- op1 = op2;
- op2 = tmp;
+ op1 = op2;
+ op2 = tmp;
noway_assert(rev == false);
rev = true;
@@ -2032,162 +2040,178 @@ AGAIN:
switch (op1->gtOper)
{
#ifndef _TARGET_ARM64_
- // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
- case GT_ADD:
+ // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
+ case GT_ADD:
- if (op1->gtOverflow())
- break;
+ if (op1->gtOverflow())
+ {
+ break;
+ }
- if (op1->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op1->gtOp.gtOp2->gtIntCon.gtIconVal))
- {
- cns += op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- op1 = op1->gtOp.gtOp1;
+ if (op1->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op1->gtOp.gtOp2->gtIntCon.gtIconVal))
+ {
+ cns += op1->gtOp.gtOp2->gtIntCon.gtIconVal;
+ op1 = op1->gtOp.gtOp1;
- goto AGAIN;
- }
+ goto AGAIN;
+ }
- break;
+ break;
#if SCALED_ADDR_MODES
- case GT_MUL:
+ case GT_MUL:
- if (op1->gtOverflow())
- break;
+ if (op1->gtOverflow())
+ {
+ break;
+ }
- __fallthrough;
+ __fallthrough;
- case GT_LSH:
+ case GT_LSH:
- mul = op1->GetScaledIndex();
- if (mul)
- {
- /* 'op1' is a scaled value */
+ mul = op1->GetScaledIndex();
+ if (mul)
+ {
+ /* 'op1' is a scaled value */
- rv1 = op2;
- rv2 = op1->gtOp.gtOp1;
+ rv1 = op2;
+ rv2 = op1->gtOp.gtOp1;
- int argScale;
- while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
- {
- if (jitIsScaleIndexMul(argScale * mul))
- {
- mul = mul * argScale;
- rv2 = rv2->gtOp.gtOp1;
- }
- else
+ int argScale;
+ while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
- break;
+ if (jitIsScaleIndexMul(argScale * mul))
+ {
+ mul = mul * argScale;
+ rv2 = rv2->gtOp.gtOp1;
+ }
+ else
+ {
+ break;
+ }
}
- }
- noway_assert(rev == false);
- rev = true;
+ noway_assert(rev == false);
+ rev = true;
- goto FOUND_AM;
- }
- break;
+ goto FOUND_AM;
+ }
+ break;
#endif // SCALED_ADDR_MODES
#endif // !_TARGET_ARM64_
- case GT_NOP:
+ case GT_NOP:
- if (!nogen)
- break;
+ if (!nogen)
+ {
+ break;
+ }
- op1 = op1->gtOp.gtOp1;
- goto AGAIN;
+ op1 = op1->gtOp.gtOp1;
+ goto AGAIN;
- case GT_COMMA:
+ case GT_COMMA:
- if (!nogen)
- break;
+ if (!nogen)
+ {
+ break;
+ }
- op1 = op1->gtOp.gtOp2;
- goto AGAIN;
+ op1 = op1->gtOp.gtOp2;
+ goto AGAIN;
- default:
- break;
+ default:
+ break;
}
noway_assert(op2);
switch (op2->gtOper)
{
#ifndef _TARGET_ARM64_
- // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
- case GT_ADD:
+ // TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
+ case GT_ADD:
- if (op2->gtOverflow())
- break;
+ if (op2->gtOverflow())
+ {
+ break;
+ }
- if (op2->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op2->gtOp.gtOp2->gtIntCon.gtIconVal))
- {
- cns += op2->gtOp.gtOp2->gtIntCon.gtIconVal;
- op2 = op2->gtOp.gtOp1;
+ if (op2->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op2->gtOp.gtOp2->gtIntCon.gtIconVal))
+ {
+ cns += op2->gtOp.gtOp2->gtIntCon.gtIconVal;
+ op2 = op2->gtOp.gtOp1;
- goto AGAIN;
- }
+ goto AGAIN;
+ }
- break;
+ break;
#if SCALED_ADDR_MODES
- case GT_MUL:
+ case GT_MUL:
- if (op2->gtOverflow())
- break;
+ if (op2->gtOverflow())
+ {
+ break;
+ }
- __fallthrough;
+ __fallthrough;
- case GT_LSH:
+ case GT_LSH:
- mul = op2->GetScaledIndex();
- if (mul)
- {
- // 'op2' is a scaled value...is it's argument also scaled?
- int argScale;
- rv2 = op2->gtOp.gtOp1;
- while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
+ mul = op2->GetScaledIndex();
+ if (mul)
{
- if (jitIsScaleIndexMul(argScale * mul))
- {
- mul = mul * argScale;
- rv2 = rv2->gtOp.gtOp1;
- }
- else
+ // 'op2' is a scaled value...is it's argument also scaled?
+ int argScale;
+ rv2 = op2->gtOp.gtOp1;
+ while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
- break;
+ if (jitIsScaleIndexMul(argScale * mul))
+ {
+ mul = mul * argScale;
+ rv2 = rv2->gtOp.gtOp1;
+ }
+ else
+ {
+ break;
+ }
}
- }
- rv1 = op1;
+ rv1 = op1;
- goto FOUND_AM;
- }
- break;
+ goto FOUND_AM;
+ }
+ break;
#endif // SCALED_ADDR_MODES
#endif // !_TARGET_ARM64_
- case GT_NOP:
+ case GT_NOP:
- if (!nogen)
- break;
+ if (!nogen)
+ {
+ break;
+ }
- op2 = op2->gtOp.gtOp1;
- goto AGAIN;
+ op2 = op2->gtOp.gtOp1;
+ goto AGAIN;
- case GT_COMMA:
+ case GT_COMMA:
- if (!nogen)
- break;
+ if (!nogen)
+ {
+ break;
+ }
- op2 = op2->gtOp.gtOp2;
- goto AGAIN;
+ op2 = op2->gtOp.gtOp2;
+ goto AGAIN;
- default:
- break;
+ default:
+ break;
}
goto ADD_OP12;
@@ -2202,58 +2226,62 @@ AGAIN:
// TODO-ARM64-CQ: For now we don't try to create a scaled index on ARM64.
switch (op2->gtOper)
{
- case GT_ADD:
+ case GT_ADD:
- if (op2->gtOverflow())
- break;
+ if (op2->gtOverflow())
+ {
+ break;
+ }
- if (op2->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op2->gtOp.gtOp2->gtIntCon.gtIconVal))
- {
- cns += op2->gtOp.gtOp2->gtIntCon.gtIconVal;
- op2 = op2->gtOp.gtOp1;
- goto AGAIN;
- }
+ if (op2->gtOp.gtOp2->IsIntCnsFitsInI32() && FitsIn<INT32>(cns + op2->gtOp.gtOp2->gtIntCon.gtIconVal))
+ {
+ cns += op2->gtOp.gtOp2->gtIntCon.gtIconVal;
+ op2 = op2->gtOp.gtOp1;
+ goto AGAIN;
+ }
- break;
+ break;
#if SCALED_ADDR_MODES
- case GT_MUL:
+ case GT_MUL:
- if (op2->gtOverflow())
- break;
+ if (op2->gtOverflow())
+ {
+ break;
+ }
- __fallthrough;
+ __fallthrough;
- case GT_LSH:
+ case GT_LSH:
- mul = op2->GetScaledIndex();
- if (mul)
- {
- rv1 = op1;
- rv2 = op2->gtOp.gtOp1;
- int argScale;
- while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
+ mul = op2->GetScaledIndex();
+ if (mul)
{
- if (jitIsScaleIndexMul(argScale * mul))
- {
- mul = mul * argScale;
- rv2 = rv2->gtOp.gtOp1;
- }
- else
+ rv1 = op1;
+ rv2 = op2->gtOp.gtOp1;
+ int argScale;
+ while ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (argScale = rv2->GetScaledIndex()) != 0)
{
- break;
+ if (jitIsScaleIndexMul(argScale * mul))
+ {
+ mul = mul * argScale;
+ rv2 = rv2->gtOp.gtOp1;
+ }
+ else
+ {
+ break;
+ }
}
- }
- goto FOUND_AM;
- }
- break;
+ goto FOUND_AM;
+ }
+ break;
#endif // SCALED_ADDR_MODES
- default:
- break;
+ default:
+ break;
}
#endif // !_TARGET_ARM64_
@@ -2272,37 +2300,38 @@ FOUND_AM:
#ifdef LEGACY_BACKEND
/* Check for register variables */
- if (mode != -1)
+ if (mode != -1)
{
- if (rv1 && rv1->gtOper == GT_LCL_VAR) genMarkLclVar(rv1);
- if (rv2 && rv2->gtOper == GT_LCL_VAR) genMarkLclVar(rv2);
+ if (rv1 && rv1->gtOper == GT_LCL_VAR)
+ genMarkLclVar(rv1);
+ if (rv2 && rv2->gtOper == GT_LCL_VAR)
+ genMarkLclVar(rv2);
}
#endif // LEGACY_BACKEND
- if (rv2)
+ if (rv2)
{
/* Make sure a GC address doesn't end up in 'rv2' */
- if (varTypeIsGC(rv2->TypeGet()))
+ if (varTypeIsGC(rv2->TypeGet()))
{
noway_assert(rv1 && !varTypeIsGC(rv1->TypeGet()));
tmp = rv1;
- rv1 = rv2;
- rv2 = tmp;
+ rv1 = rv2;
+ rv2 = tmp;
rev = !rev;
}
/* Special case: constant array index (that is range-checked) */
- if (fold)
+ if (fold)
{
- ssize_t tmpMul;
- GenTreePtr index;
+ ssize_t tmpMul;
+ GenTreePtr index;
- if ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) &&
- (rv2->gtOp.gtOp2->IsCnsIntOrI()))
+ if ((rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) && (rv2->gtOp.gtOp2->IsCnsIntOrI()))
{
/* For valuetype arrays where we can't use the scaled address
mode, rv2 will point to the scaled index. So we have to do
@@ -2310,13 +2339,15 @@ FOUND_AM:
tmpMul = compiler->optGetArrayRefScaleAndIndex(rv2, &index DEBUGARG(false));
if (mul)
+ {
tmpMul *= mul;
+ }
}
else
{
/* May be a simple array. rv2 will points to the actual index */
- index = rv2;
+ index = rv2;
tmpMul = mul;
}
@@ -2328,7 +2359,10 @@ FOUND_AM:
#if SCALED_ADDR_MODES
/* Scale the index if necessary */
- if (tmpMul) ixv *= tmpMul;
+ if (tmpMul)
+ {
+ ixv *= tmpMul;
+ }
#endif
if (FitsIn<INT32>(cns + ixv))
@@ -2341,7 +2375,7 @@ FOUND_AM:
/* There is no scaled operand any more */
mul = 0;
#endif
- rv2 = 0;
+ rv2 = nullptr;
}
}
}
@@ -2362,7 +2396,7 @@ FOUND_AM:
#endif
*cnsPtr = (unsigned)cns;
- return true;
+ return true;
}
/*****************************************************************************
@@ -2375,65 +2409,61 @@ FOUND_AM:
*/
// static
-emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, CompareKind compareKind)
+emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, CompareKind compareKind)
{
- const static
- BYTE genJCCinsSigned[] =
- {
+ const static BYTE genJCCinsSigned[] = {
#if defined(_TARGET_XARCH_)
- EJ_je, // GT_EQ
- EJ_jne, // GT_NE
- EJ_jl, // GT_LT
- EJ_jle, // GT_LE
- EJ_jge, // GT_GE
- EJ_jg, // GT_GT
+ EJ_je, // GT_EQ
+ EJ_jne, // GT_NE
+ EJ_jl, // GT_LT
+ EJ_jle, // GT_LE
+ EJ_jge, // GT_GE
+ EJ_jg, // GT_GT
#elif defined(_TARGET_ARMARCH_)
- EJ_eq, // GT_EQ
- EJ_ne, // GT_NE
- EJ_lt, // GT_LT
- EJ_le, // GT_LE
- EJ_ge, // GT_GE
- EJ_gt, // GT_GT
+ EJ_eq, // GT_EQ
+ EJ_ne, // GT_NE
+ EJ_lt, // GT_LT
+ EJ_le, // GT_LE
+ EJ_ge, // GT_GE
+ EJ_gt, // GT_GT
#endif
};
- const static
- BYTE genJCCinsUnsigned[] = /* unsigned comparison */
+ const static BYTE genJCCinsUnsigned[] = /* unsigned comparison */
{
#if defined(_TARGET_XARCH_)
- EJ_je, // GT_EQ
- EJ_jne, // GT_NE
- EJ_jb, // GT_LT
- EJ_jbe, // GT_LE
- EJ_jae, // GT_GE
- EJ_ja, // GT_GT
+ EJ_je, // GT_EQ
+ EJ_jne, // GT_NE
+ EJ_jb, // GT_LT
+ EJ_jbe, // GT_LE
+ EJ_jae, // GT_GE
+ EJ_ja, // GT_GT
#elif defined(_TARGET_ARMARCH_)
- EJ_eq, // GT_EQ
- EJ_ne, // GT_NE
- EJ_lo, // GT_LT
- EJ_ls, // GT_LE
- EJ_hs, // GT_GE
- EJ_hi, // GT_GT
+ EJ_eq, // GT_EQ
+ EJ_ne, // GT_NE
+ EJ_lo, // GT_LT
+ EJ_ls, // GT_LE
+ EJ_hs, // GT_GE
+ EJ_hi, // GT_GT
#endif
};
- const static
- BYTE genJCCinsLogical[] = /* logical operation */
+ const static BYTE genJCCinsLogical[] = /* logical operation */
{
#if defined(_TARGET_XARCH_)
- EJ_je, // GT_EQ (Z == 1)
- EJ_jne, // GT_NE (Z == 0)
- EJ_js, // GT_LT (S == 1)
- EJ_NONE, // GT_LE
- EJ_jns, // GT_GE (S == 0)
- EJ_NONE, // GT_GT
+ EJ_je, // GT_EQ (Z == 1)
+ EJ_jne, // GT_NE (Z == 0)
+ EJ_js, // GT_LT (S == 1)
+ EJ_NONE, // GT_LE
+ EJ_jns, // GT_GE (S == 0)
+ EJ_NONE, // GT_GT
#elif defined(_TARGET_ARMARCH_)
- EJ_eq, // GT_EQ (Z == 1)
- EJ_ne, // GT_NE (Z == 0)
- EJ_mi, // GT_LT (N == 1)
- EJ_NONE, // GT_LE
- EJ_pl, // GT_GE (N == 0)
- EJ_NONE, // GT_GT
+ EJ_eq, // GT_EQ (Z == 1)
+ EJ_ne, // GT_NE (Z == 0)
+ EJ_mi, // GT_LT (N == 1)
+ EJ_NONE, // GT_LE
+ EJ_pl, // GT_GE (N == 0)
+ EJ_NONE, // GT_GT
#endif
};
@@ -2504,7 +2534,7 @@ emitJumpKind CodeGen::genJumpKindForOper(genTreeOps cmp, CompareKind co
* for speed there might be multiple exit points).
*/
-void CodeGen::genExitCode(BasicBlock * block)
+void CodeGen::genExitCode(BasicBlock* block)
{
#ifdef DEBUGGING_SUPPORT
/* Just wrote the first instruction of the epilog - inform debugger
@@ -2512,7 +2542,7 @@ void CodeGen::genExitCode(BasicBlock * block)
that this is ok */
// For non-optimized debuggable code, there is only one epilog.
- genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::EPILOG, true);
+ genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::EPILOG, true);
#endif // DEBUGGING_SUPPORT
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
@@ -2526,14 +2556,13 @@ void CodeGen::genExitCode(BasicBlock * block)
// The GS cookie check created a temp label that has no live
// incoming GC registers, we need to fix that
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
/* Figure out which register parameters hold pointers */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount && varDsc->lvIsRegArg;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount && varDsc->lvIsRegArg;
+ varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
@@ -2548,7 +2577,6 @@ void CodeGen::genExitCode(BasicBlock * block)
genReserveEpilog(block);
}
-
/*****************************************************************************
*
* Generate code for an out-of-line exception.
@@ -2556,16 +2584,14 @@ void CodeGen::genExitCode(BasicBlock * block)
* For non-dbg code, we share the helper blocks created by fgAddCodeRef().
*/
-void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind,
- SpecialCodeKind codeKind,
- GenTreePtr failBlk)
+void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, GenTreePtr failBlk)
{
if (!compiler->opts.compDbgCode)
{
/* For non-debuggable code, find and use the helper block for
raising the exception. The block may be shared by other trees too. */
- BasicBlock * tgtBlk;
+ BasicBlock* tgtBlk;
if (failBlk)
{
@@ -2573,14 +2599,17 @@ void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind,
noway_assert(failBlk->gtOper == GT_LABEL);
tgtBlk = failBlk->gtLabel.gtLabBB;
- noway_assert(tgtBlk == compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB))->acdDstBlk);
+ noway_assert(
+ tgtBlk ==
+ compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB))->acdDstBlk);
}
else
{
/* Find the helper-block which raises the exception. */
- Compiler::AddCodeDsc * add = compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
- PREFIX_ASSUME_MSG((add != NULL), ("ERROR: failed to find exception throw block"));
+ Compiler::AddCodeDsc* add =
+ compiler->fgFindExcptnTarget(codeKind, compiler->bbThrowIndex(compiler->compCurBB));
+ PREFIX_ASSUME_MSG((add != nullptr), ("ERROR: failed to find exception throw block"));
tgtBlk = add->acdDstBlk;
}
@@ -2595,7 +2624,7 @@ void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind,
/* The code to throw the exception will be generated inline, and
we will jump around it in the normal non-exception case */
- BasicBlock * tgtBlk = nullptr;
+ BasicBlock* tgtBlk = nullptr;
emitJumpKind reverseJumpKind = emitter::emitReverseJumpKind(jumpKind);
if (reverseJumpKind != jumpKind)
{
@@ -2621,12 +2650,12 @@ void CodeGen::genJumpToThrowHlpBlk(emitJumpKind jumpKind,
*/
// inline
-void CodeGen::genCheckOverflow(GenTreePtr tree)
+void CodeGen::genCheckOverflow(GenTreePtr tree)
{
// Overflow-check should be asked for this tree
noway_assert(tree->gtOverflow());
- const var_types type = tree->TypeGet();
+ const var_types type = tree->TypeGet();
// Overflow checks can only occur for the non-small types: (i.e. TYP_INT,TYP_LONG)
noway_assert(!varTypeIsSmall(type));
@@ -2641,7 +2670,7 @@ void CodeGen::genCheckOverflow(GenTreePtr tree)
else
#endif
{
- bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
+ bool isUnsignedOverflow = ((tree->gtFlags & GTF_UNSIGNED) != 0);
#if defined(_TARGET_XARCH_)
@@ -2655,7 +2684,7 @@ void CodeGen::genCheckOverflow(GenTreePtr tree)
{
if ((tree->OperGet() != GT_SUB) && (tree->gtOper != GT_ASG_SUB))
{
- jumpKind = EJ_hs;
+ jumpKind = EJ_hs;
}
}
@@ -2677,7 +2706,7 @@ void CodeGen::genCheckOverflow(GenTreePtr tree)
*
*/
-void CodeGen::genUpdateCurrentFunclet(BasicBlock * block)
+void CodeGen::genUpdateCurrentFunclet(BasicBlock* block)
{
if (block->bbFlags & BBF_FUNCLET_BEG)
{
@@ -2713,30 +2742,28 @@ void CodeGen::genUpdateCurrentFunclet(BasicBlock * block)
}
#endif // FEATURE_EH_FUNCLETS
-
/*****************************************************************************
*
* Generate code for the function.
*/
-void CodeGen::genGenerateCode(void * * codePtr,
- ULONG * nativeSizeOfCode)
+void CodeGen::genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In genGenerateCode()\n");
compiler->fgDispBasicBlocks(compiler->verboseTrees);
}
#endif
- unsigned codeSize;
- unsigned prologSize;
- unsigned epilogSize;
+ unsigned codeSize;
+ unsigned prologSize;
+ unsigned epilogSize;
- void * consPtr;
+ void* consPtr;
-#ifdef DEBUG
+#ifdef DEBUG
genInterruptibleUsed = true;
#if STACK_PROBES
@@ -2758,28 +2785,39 @@ void CodeGen::genGenerateCode(void * * codePtr,
#ifdef DEBUG
if (compiler->opts.disAsmSpilled && regSet.rsNeededSpillReg)
+ {
compiler->opts.disAsm = true;
+ }
- if (compiler->opts.disAsm)
+ if (compiler->opts.disAsm)
{
printf("; Assembly listing for method %s\n", compiler->info.compFullName);
printf("; Emitting ");
if (compiler->compCodeOpt() == Compiler::SMALL_CODE)
+ {
printf("SMALL_CODE");
+ }
else if (compiler->compCodeOpt() == Compiler::FAST_CODE)
+ {
printf("FAST_CODE");
+ }
else
+ {
printf("BLENDED_CODE");
+ }
printf(" for ");
if (compiler->info.genCPU == CPU_X86)
+ {
printf("generic X86 CPU");
+ }
else if (compiler->info.genCPU == CPU_X86_PENTIUM_4)
+ {
printf("Pentium 4");
-
+ }
else if (compiler->info.genCPU == CPU_X64)
{
if (compiler->canUseAVX())
@@ -2793,36 +2831,54 @@ void CodeGen::genGenerateCode(void * * codePtr,
}
else if (compiler->info.genCPU == CPU_ARM)
+ {
printf("generic ARM CPU");
+ }
printf("\n");
if ((compiler->opts.compFlags & CLFLG_MAXOPT) == CLFLG_MAXOPT)
+ {
printf("; optimized code\n");
+ }
else if (compiler->opts.compDbgCode)
+ {
printf("; debuggable code\n");
+ }
else if (compiler->opts.MinOpts())
- printf("; compiler->opts.MinOpts() is true\n");
- else
+ {
+ printf("; compiler->opts.MinOpts() is true\n");
+ }
+ else
+ {
printf("; unknown optimization flags\n");
+ }
#if DOUBLE_ALIGN
if (compiler->genDoubleAlign())
printf("; double-aligned frame\n");
- else
+ else
#endif
printf("; %s based frame\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
if (genInterruptible)
+ {
printf("; fully interruptible\n");
+ }
else
+ {
printf("; partially interruptible\n");
+ }
if (compiler->fgHaveProfileData())
+ {
printf("; with IBC profile data\n");
+ }
if (compiler->fgProfileData_ILSizeMismatch)
+ {
printf("; discarded IBC profile data due to mismatch in ILSize\n");
+ }
}
#endif // DEBUG
@@ -2846,11 +2902,9 @@ void CodeGen::genGenerateCode(void * * codePtr,
// and thus saved on the frame).
// Compute the maximum estimated spill temp size.
- unsigned maxTmpSize = sizeof(double) + sizeof(float)
- + sizeof(__int64)+ sizeof(void*);
+ unsigned maxTmpSize = sizeof(double) + sizeof(float) + sizeof(__int64) + sizeof(void*);
- maxTmpSize += (compiler->tmpDoubleSpillMax * sizeof(double)) +
- (compiler->tmpIntSpillMax * sizeof(int));
+ maxTmpSize += (compiler->tmpDoubleSpillMax * sizeof(double)) + (compiler->tmpIntSpillMax * sizeof(int));
#ifdef DEBUG
@@ -2868,12 +2922,12 @@ void CodeGen::genGenerateCode(void * * codePtr,
}
// JIT uses 2 passes when assigning stack variable (i.e. args, temps, and locals) locations in varDsc->lvStkOffs.
- // During the 1st pass (in genGenerateCode), it estimates the maximum possible size for stack temps
+ // During the 1st pass (in genGenerateCode), it estimates the maximum possible size for stack temps
// and put it in maxTmpSize. Then it calculates the varDsc->lvStkOffs for each variable based on this estimation.
- // However during stress mode, we might spill more temps on the stack, which might grow the
+ // However during stress mode, we might spill more temps on the stack, which might grow the
// size of the temp area.
// This might cause varDsc->lvStkOffs to change during the 2nd pass (in emitEndCodeGen).
- // If the change of varDsc->lvStkOffs crosses the threshold for the instruction size,
+ // If the change of varDsc->lvStkOffs crosses the threshold for the instruction size,
// we will then have a mismatched estimated code size (during the 1st pass) and the actual emitted code size
// (during the 2nd pass).
// Also, if STRESS_UNSAFE_BUFFER_CHECKS is turned on, we might reorder the stack variable locations,
@@ -2893,13 +2947,13 @@ void CodeGen::genGenerateCode(void * * codePtr,
/* Estimate the offsets of locals/arguments and size of frame */
- unsigned lclSize = compiler->lvaFrameSize(Compiler::TENTATIVE_FRAME_LAYOUT);
+ unsigned lclSize = compiler->lvaFrameSize(Compiler::TENTATIVE_FRAME_LAYOUT);
#ifdef DEBUG
//
// Display the local frame offsets that we have tentatively decided upon
- //
- if (verbose)
+ //
+ if (verbose)
{
compiler->lvaTableDump();
}
@@ -2907,16 +2961,18 @@ void CodeGen::genGenerateCode(void * * codePtr,
#endif // LEGACY_BACKEND
-
getEmitter()->emitBegFN(isFramePointerUsed()
#if defined(DEBUG)
- , (compiler->compCodeOpt() != Compiler::SMALL_CODE) && !(compiler->opts.eeFlags & CORJIT_FLG_PREJIT)
+ ,
+ (compiler->compCodeOpt() != Compiler::SMALL_CODE) &&
+ !(compiler->opts.eeFlags & CORJIT_FLG_PREJIT)
#endif
#ifdef LEGACY_BACKEND
- , lclSize
+ ,
+ lclSize
#endif // LEGACY_BACKEND
- , maxTmpSize
- );
+ ,
+ maxTmpSize);
/* Now generate code for the function */
genCodeForBBlist();
@@ -2925,7 +2981,7 @@ void CodeGen::genGenerateCode(void * * codePtr,
#ifdef DEBUG
// After code generation, dump the frame layout again. It should be the same as before code generation, if code
// generation hasn't touched it (it shouldn't!).
- if (verbose)
+ if (verbose)
{
compiler->lvaTableDump();
}
@@ -2965,8 +3021,7 @@ void CodeGen::genGenerateCode(void * * codePtr,
// especially that caused by enabling JIT stress.
if (!JitConfig.JitNoForceFallback())
{
- if (JitConfig.JitForceFallback() ||
- compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5) )
+ if (JitConfig.JitForceFallback() || compiler->compStressCompile(Compiler::STRESS_GENERIC_VARN, 5))
{
NO_WAY_NOASSERT("Stress failure");
}
@@ -2983,7 +3038,7 @@ void CodeGen::genGenerateCode(void * * codePtr,
#if DISPLAY_SIZES
- size_t dataSize = getEmitter()->emitDataSize();
+ size_t dataSize = getEmitter()->emitDataSize();
#endif // DISPLAY_SIZES
@@ -3010,29 +3065,22 @@ void CodeGen::genGenerateCode(void * * codePtr,
compiler->EndPhase(PHASE_GENERATE_CODE);
- codeSize = getEmitter()->emitEndCodeGen( compiler,
- trackedStackPtrsContig,
- genInterruptible,
- genFullPtrRegMap,
- (compiler->info.compRetType == TYP_REF),
- compiler->compHndBBtabCount,
- &prologSize,
- &epilogSize,
- codePtr,
- &coldCodePtr,
- &consPtr);
+ codeSize = getEmitter()->emitEndCodeGen(compiler, trackedStackPtrsContig, genInterruptible, genFullPtrRegMap,
+ (compiler->info.compRetType == TYP_REF), compiler->compHndBBtabCount,
+ &prologSize, &epilogSize, codePtr, &coldCodePtr, &consPtr);
compiler->EndPhase(PHASE_EMIT_CODE);
#ifdef DEBUG
- if (compiler->opts.disAsm)
+ if (compiler->opts.disAsm)
{
- printf("; Total bytes of code %d, prolog size %d for method %s\n", codeSize, prologSize, compiler->info.compFullName);
+ printf("; Total bytes of code %d, prolog size %d for method %s\n", codeSize, prologSize,
+ compiler->info.compFullName);
printf("; ============================================================\n");
- printf(""); // in our logic this causes a flush
+ printf(""); // in our logic this causes a flush
}
- if (verbose)
+ if (verbose)
{
printf("*************** After end code gen, before unwindEmit()\n");
getEmitter()->emitDispIGlist(true);
@@ -3043,13 +3091,13 @@ void CodeGen::genGenerateCode(void * * codePtr,
/* Check our max stack level. Needed for fgAddCodeRef().
We need to relax the assert as our estimation won't include code-gen
stack changes (which we know don't affect fgAddCodeRef()) */
- noway_assert(getEmitter()->emitMaxStackDepth <= (compiler->fgPtrArgCntMax +
- compiler->compHndBBtabCount + // Return address for locally-called finallys
- genTypeStSz(TYP_LONG) + // longs/doubles may be transferred via stack, etc
- (compiler->compTailCallUsed?4:0))); // CORINFO_HELP_TAILCALL args
+ noway_assert(getEmitter()->emitMaxStackDepth <=
+ (compiler->fgPtrArgCntMax + compiler->compHndBBtabCount + // Return address for locally-called finallys
+ genTypeStSz(TYP_LONG) + // longs/doubles may be transferred via stack, etc
+ (compiler->compTailCallUsed ? 4 : 0))); // CORINFO_HELP_TAILCALL args
#endif
- *nativeSizeOfCode = codeSize;
+ *nativeSizeOfCode = codeSize;
compiler->info.compNativeCodeSize = (UNATIVE_OFFSET)codeSize;
// printf("%6u bytes of code generated for %s.%s\n", codeSize, compiler->info.compFullName);
@@ -3108,47 +3156,61 @@ void CodeGen::genGenerateCode(void * * codePtr,
#ifdef JIT32_GCENCODER
#ifdef DEBUG
- void* infoPtr =
-#endif // DEBUG
+ void* infoPtr =
+#endif // DEBUG
#endif
// Create and store the GC info for this method.
genCreateAndStoreGCInfo(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
-#ifdef DEBUG
+#ifdef DEBUG
FILE* dmpf = jitstdout;
compiler->opts.dmpHex = false;
- if (!strcmp(compiler->info.compMethodName, "<name of method you want the hex dump for"))
+ if (!strcmp(compiler->info.compMethodName, "<name of method you want the hex dump for"))
{
- FILE* codf;
- errno_t ec = fopen_s(&codf, "C:\\JIT.COD", "at"); // NOTE: file append mode
- if (ec != 0)
+ FILE* codf;
+ errno_t ec = fopen_s(&codf, "C:\\JIT.COD", "at"); // NOTE: file append mode
+ if (ec != 0)
{
assert(codf);
- dmpf = codf;
+ dmpf = codf;
compiler->opts.dmpHex = true;
}
}
- if (compiler->opts.dmpHex)
+ if (compiler->opts.dmpHex)
{
- size_t consSize = getEmitter()->emitDataSize();
- size_t infoSize = compiler->compInfoBlkSize;
+ size_t consSize = getEmitter()->emitDataSize();
+ size_t infoSize = compiler->compInfoBlkSize;
fprintf(dmpf, "Generated code for %s:\n", compiler->info.compFullName);
fprintf(dmpf, "\n");
- if (codeSize) fprintf(dmpf, " Code at %p [%04X bytes]\n", dspPtr(*codePtr), codeSize);
- if (consSize) fprintf(dmpf, " Const at %p [%04X bytes]\n", dspPtr(consPtr), consSize);
+ if (codeSize)
+ {
+ fprintf(dmpf, " Code at %p [%04X bytes]\n", dspPtr(*codePtr), codeSize);
+ }
+ if (consSize)
+ {
+ fprintf(dmpf, " Const at %p [%04X bytes]\n", dspPtr(consPtr), consSize);
+ }
#ifdef JIT32_GCENCODER
- if (infoSize) fprintf(dmpf, " Info at %p [%04X bytes]\n", dspPtr(infoPtr), infoSize);
+ if (infoSize)
+ fprintf(dmpf, " Info at %p [%04X bytes]\n", dspPtr(infoPtr), infoSize);
#endif // JIT32_GCENCODER
fprintf(dmpf, "\n");
- if (codeSize) hexDump(dmpf, "Code" , (BYTE*)*codePtr, codeSize);
- if (consSize) hexDump(dmpf, "Const", (BYTE*)consPtr, consSize);
+ if (codeSize)
+ {
+ hexDump(dmpf, "Code", (BYTE*)*codePtr, codeSize);
+ }
+ if (consSize)
+ {
+ hexDump(dmpf, "Const", (BYTE*)consPtr, consSize);
+ }
#ifdef JIT32_GCENCODER
- if (infoSize) hexDump(dmpf, "Info" , (BYTE*)infoPtr, infoSize);
+ if (infoSize)
+ hexDump(dmpf, "Info", (BYTE*)infoPtr, infoSize);
#endif // JIT32_GCENCODER
fflush(dmpf);
@@ -3161,7 +3223,6 @@ void CodeGen::genGenerateCode(void * * codePtr,
#endif // DEBUG
-
/* Tell the emitter that we're done with this function */
getEmitter()->emitEndFN();
@@ -3185,16 +3246,17 @@ void CodeGen::genGenerateCode(void * * codePtr,
compiler->EndPhase(PHASE_EMIT_GCEH);
}
-
/*****************************************************************************
*
* Report EH clauses to the VM
*/
-void CodeGen::genReportEH()
+void CodeGen::genReportEH()
{
- if (compiler->compHndBBtabCount == 0)
+ if (compiler->compHndBBtabCount == 0)
+ {
return;
+ }
#ifdef DEBUG
if (compiler->opts.dspEHTable)
@@ -3203,21 +3265,21 @@ void CodeGen::genReportEH()
}
#endif // DEBUG
- unsigned XTnum;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
unsigned EHCount = compiler->compHndBBtabCount;
#if FEATURE_EH_FUNCLETS
- // Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the VM.
+ // Count duplicated clauses. This uses the same logic as below, where we actually generate them for reporting to the
+ // VM.
unsigned duplicateClauseCount = 0;
unsigned enclosingTryIndex;
- for (XTnum = 0;
- XTnum < compiler->compHndBBtabCount;
- XTnum++)
+ for (XTnum = 0; XTnum < compiler->compHndBBtabCount; XTnum++)
{
- for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index, ignoring 'mutual protect' trys
+ for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index,
+ // ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
@@ -3227,7 +3289,7 @@ void CodeGen::genReportEH()
EHCount += duplicateClauseCount;
#if FEATURE_EH_CALLFINALLY_THUNKS
- unsigned clonedFinallyCount = 0;
+ unsigned clonedFinallyCount = 0;
// We don't keep track of how many cloned finally there are. So, go through and count.
// We do a quick pass first through the EH table to see if there are any try/finally
@@ -3235,8 +3297,7 @@ void CodeGen::genReportEH()
bool anyFinallys = false;
for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ HBtab < HBtabEnd; HBtab++)
{
if (HBtab->HasFinallyHandler())
{
@@ -3266,16 +3327,15 @@ void CodeGen::genReportEH()
#if FEATURE_EH_FUNCLETS
#if FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d cloned finallys, %d total EH entries reported to VM\n",
- compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount);
+ compiler->compHndBBtabCount, duplicateClauseCount, clonedFinallyCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount + clonedFinallyCount == EHCount);
-#else // !FEATURE_EH_CALLFINALLY_THUNKS
+#else // !FEATURE_EH_CALLFINALLY_THUNKS
printf("%d EH table entries, %d duplicate clauses, %d total EH entries reported to VM\n",
- compiler->compHndBBtabCount, duplicateClauseCount, EHCount);
+ compiler->compHndBBtabCount, duplicateClauseCount, EHCount);
assert(compiler->compHndBBtabCount + duplicateClauseCount == EHCount);
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
-#else // !FEATURE_EH_FUNCLETS
- printf("%d EH table entries, %d total EH entries reported to VM\n",
- compiler->compHndBBtabCount, EHCount);
+#else // !FEATURE_EH_FUNCLETS
+ printf("%d EH table entries, %d total EH entries reported to VM\n", compiler->compHndBBtabCount, EHCount);
assert(compiler->compHndBBtabCount == EHCount);
#endif // !FEATURE_EH_FUNCLETS
}
@@ -3287,16 +3347,17 @@ void CodeGen::genReportEH()
XTnum = 0; // This is the index we pass to the VM
for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ HBtab < HBtabEnd; HBtab++)
{
- UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
+ UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(HBtab->ebdTryBeg);
hndBeg = compiler->ehCodeOffset(HBtab->ebdHndBeg);
- tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize : compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
- hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize : compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
+ tryEnd = (HBtab->ebdTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
+ : compiler->ehCodeOffset(HBtab->ebdTryLast->bbNext);
+ hndEnd = (HBtab->ebdHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
+ : compiler->ehCodeOffset(HBtab->ebdHndLast->bbNext);
if (HBtab->HasFilter())
{
@@ -3330,7 +3391,7 @@ void CodeGen::genReportEH()
#if FEATURE_EH_FUNCLETS
// Now output duplicated clauses.
- //
+ //
// If a funclet has been created by moving a handler out of a try region that it was originally nested
// within, then we need to report a "duplicate" clause representing the fact that an exception in that
// handler can be caught by the 'try' it has been moved out of. This is because the original 'try' region
@@ -3463,24 +3524,24 @@ void CodeGen::genReportEH()
{
unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
unsigned XTnum2;
- for (XTnum2 = 0, HBtab = compiler->compHndBBtab;
- XTnum2 < compiler->compHndBBtabCount;
- XTnum2++ , HBtab++)
+ for (XTnum2 = 0, HBtab = compiler->compHndBBtab; XTnum2 < compiler->compHndBBtabCount; XTnum2++, HBtab++)
{
unsigned enclosingTryIndex;
EHblkDsc* fletTab = compiler->ehGetDsc(XTnum2);
- for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum2); // find the true enclosing try index, ignoring 'mutual protect' trys
+ for (enclosingTryIndex = compiler->ehTrueEnclosingTryIndexIL(XTnum2); // find the true enclosing try index,
+ // ignoring 'mutual protect' trys
enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX;
enclosingTryIndex = compiler->ehGetEnclosingTryIndex(enclosingTryIndex))
{
// The funclet we moved out is nested in a try region, so create a new EH descriptor for the funclet
// that will have the enclosing try protecting the funclet.
- noway_assert(XTnum2 < enclosingTryIndex); // the enclosing region must be less nested, and hence have a greater EH table index
+ noway_assert(XTnum2 < enclosingTryIndex); // the enclosing region must be less nested, and hence have a
+ // greater EH table index
- EHblkDsc* encTab = compiler->ehGetDsc(enclosingTryIndex);
+ EHblkDsc* encTab = compiler->ehGetDsc(enclosingTryIndex);
// The try region is the handler of the funclet. Note that for filters, we don't protect the
// filter region, only the filter handler region. This is because exceptions in filters never
@@ -3489,16 +3550,18 @@ void CodeGen::genReportEH()
BasicBlock* bbTryBeg = fletTab->ebdHndBeg;
BasicBlock* bbTryLast = fletTab->ebdHndLast;
- BasicBlock* bbHndBeg = encTab->ebdHndBeg; // The handler region is the same as the enclosing try
+ BasicBlock* bbHndBeg = encTab->ebdHndBeg; // The handler region is the same as the enclosing try
BasicBlock* bbHndLast = encTab->ebdHndLast;
- UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
+ UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
tryBeg = compiler->ehCodeOffset(bbTryBeg);
hndBeg = compiler->ehCodeOffset(bbHndBeg);
- tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize : compiler->ehCodeOffset(bbTryLast->bbNext);
- hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize : compiler->ehCodeOffset(bbHndLast->bbNext);
+ tryEnd = (bbTryLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
+ : compiler->ehCodeOffset(bbTryLast->bbNext);
+ hndEnd = (bbHndLast == compiler->fgLastBB) ? compiler->info.compNativeCodeSize
+ : compiler->ehCodeOffset(bbHndLast->bbNext);
if (encTab->HasFilter())
{
@@ -3518,13 +3581,14 @@ void CodeGen::genReportEH()
// Note that the JIT-EE interface reuses the CORINFO_EH_CLAUSE type, even though the names of
// the fields aren't really accurate. For example, we set "TryLength" to the offset of the
- // instruction immediately after the 'try' body. So, it really could be more accurately named "TryEndOffset".
+ // instruction immediately after the 'try' body. So, it really could be more accurately named
+ // "TryEndOffset".
CORINFO_EH_CLAUSE clause;
- clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
- clause.Flags = flags;
- clause.TryOffset = tryBeg;
- clause.TryLength = tryEnd;
+ clause.ClassToken = hndTyp; /* filter offset is passed back here for filter-based exception handlers */
+ clause.Flags = flags;
+ clause.TryOffset = tryBeg;
+ clause.TryLength = tryEnd;
clause.HandlerOffset = hndBeg;
clause.HandlerLength = hndEnd;
@@ -3538,11 +3602,13 @@ void CodeGen::genReportEH()
#ifndef DEBUG
if (duplicateClauseCount == reportedDuplicateClauseCount)
- break; // we've reported all of them; no need to continue looking
+ {
+ break; // we've reported all of them; no need to continue looking
+ }
#endif // !DEBUG
} // for each 'true' enclosing 'try'
- } // for each EH table entry
+ } // for each EH table entry
assert(duplicateClauseCount == reportedDuplicateClauseCount);
} // if (duplicateClauseCount > 0)
@@ -3555,7 +3621,7 @@ void CodeGen::genReportEH()
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
- UNATIVE_OFFSET hndBeg, hndEnd;
+ UNATIVE_OFFSET hndBeg, hndEnd;
hndBeg = compiler->ehCodeOffset(block);
@@ -3577,8 +3643,8 @@ void CodeGen::genReportEH()
}
CORINFO_EH_CLAUSE clause;
- clause.ClassToken = 0; // unused
- clause.Flags = (CORINFO_EH_CLAUSE_FLAGS)(CORINFO_EH_CLAUSE_FINALLY | COR_ILEXCEPTION_CLAUSE_DUPLICATED);
+ clause.ClassToken = 0; // unused
+ clause.Flags = (CORINFO_EH_CLAUSE_FLAGS)(CORINFO_EH_CLAUSE_FINALLY | COR_ILEXCEPTION_CLAUSE_DUPLICATED);
clause.TryOffset = hndBeg;
clause.TryLength = hndBeg;
clause.HandlerOffset = hndBeg;
@@ -3594,13 +3660,15 @@ void CodeGen::genReportEH()
#ifndef DEBUG
if (clonedFinallyCount == reportedClonedFinallyCount)
- break; // we're done; no need to keep looking
-#endif // !DEBUG
+ {
+ break; // we're done; no need to keep looking
+ }
+#endif // !DEBUG
} // block is BBJ_CALLFINALLY
- } // for each block
+ } // for each block
assert(clonedFinallyCount == reportedClonedFinallyCount);
- } // if (anyFinallys)
+ } // if (anyFinallys)
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#endif // FEATURE_EH_FUNCLETS
@@ -3608,13 +3676,12 @@ void CodeGen::genReportEH()
assert(XTnum == EHCount);
}
-void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarrierForm wbf)
+void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarrierForm wbf)
{
#ifndef LEGACY_BACKEND
noway_assert(tgt->gtOper == GT_STOREIND);
-#else // LEGACY_BACKEND
- noway_assert(tgt->gtOper == GT_IND ||
- tgt->gtOper == GT_CLS_VAR); // enforced by gcIsWriteBarrierCandidate
+#else // LEGACY_BACKEND
+ noway_assert(tgt->gtOper == GT_IND || tgt->gtOper == GT_CLS_VAR); // enforced by gcIsWriteBarrierCandidate
#endif // LEGACY_BACKEND
/* Call the proper vm helper */
@@ -3626,7 +3693,7 @@ void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarr
}
else
#endif
- if (tgt->gtOper != GT_CLS_VAR)
+ if (tgt->gtOper != GT_CLS_VAR)
{
if (wbf != GCInfo::WBF_BarrierUnchecked) // This overrides the tests below.
{
@@ -3640,18 +3707,17 @@ void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarr
}
}
}
- assert(((helper == CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP) && (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug))
- ||
- ((helper == CORINFO_HELP_CHECKED_ASSIGN_REF) && (wbf == GCInfo::WBF_BarrierChecked || wbf == GCInfo::WBF_BarrierUnknown))
- ||
- ((helper == CORINFO_HELP_ASSIGN_REF) && (wbf == GCInfo::WBF_BarrierUnchecked || wbf == GCInfo::WBF_BarrierUnknown)));
-
-
+ assert(((helper == CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP) && (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)) ||
+ ((helper == CORINFO_HELP_CHECKED_ASSIGN_REF) &&
+ (wbf == GCInfo::WBF_BarrierChecked || wbf == GCInfo::WBF_BarrierUnknown)) ||
+ ((helper == CORINFO_HELP_ASSIGN_REF) &&
+ (wbf == GCInfo::WBF_BarrierUnchecked || wbf == GCInfo::WBF_BarrierUnknown)));
#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
// We classify the "tgt" trees as follows:
- // If "tgt" is of the form (where [ x ] indicates an optional x, and { x1, ..., xn } means "one of the x_i forms"):
- // IND [-> ADDR -> IND] -> { GT_LCL_VAR, GT_REG_VAR, ADD({GT_LCL_VAR, GT_REG_VAR}, X), ADD(X, (GT_LCL_VAR, GT_REG_VAR)) }
+ // If "tgt" is of the form (where [ x ] indicates an optional x, and { x1, ..., xn } means "one of the x_i forms"):
+ // IND [-> ADDR -> IND] -> { GT_LCL_VAR, GT_REG_VAR, ADD({GT_LCL_VAR, GT_REG_VAR}, X), ADD(X, (GT_LCL_VAR,
+ // GT_REG_VAR)) }
// then let "v" be the GT_LCL_VAR or GT_REG_VAR.
// * If "v" is the return buffer argument, classify as CWBKind_RetBuf.
// * If "v" is another by-ref argument, classify as CWBKind_ByRefArg.
@@ -3675,7 +3741,7 @@ void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarr
}
else if (indArg->gtOper == GT_ADD)
{
- if (indArg->gtOp.gtOp1->gtOper == GT_LCL_VAR || indArg->gtOp.gtOp1->gtOper == GT_REG_VAR)
+ if (indArg->gtOp.gtOp1->gtOper == GT_LCL_VAR || indArg->gtOp.gtOp1->gtOper == GT_REG_VAR)
{
lcl = indArg->gtOp.gtOp1;
}
@@ -3686,25 +3752,25 @@ void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarr
}
if (lcl != NULL)
{
- wbKind = CWBKind_OtherByRefLocal; // Unclassified local variable.
+ wbKind = CWBKind_OtherByRefLocal; // Unclassified local variable.
unsigned lclNum = 0;
if (lcl->gtOper == GT_LCL_VAR)
lclNum = lcl->gtLclVarCommon.gtLclNum;
- else
+ else
{
assert(lcl->gtOper == GT_REG_VAR);
lclNum = lcl->gtRegVar.gtLclNum;
}
if (lclNum == compiler->info.compRetBuffArg)
{
- wbKind = CWBKind_RetBuf; // Ret buff. Can happen if the struct exceeds the size limit.
+ wbKind = CWBKind_RetBuf; // Ret buff. Can happen if the struct exceeds the size limit.
}
else
{
LclVarDsc* varDsc = &compiler->lvaTable[lclNum];
if (varDsc->lvIsParam && varDsc->lvType == TYP_BYREF)
{
- wbKind = CWBKind_ByRefArg; // Out (or in/out) arg
+ wbKind = CWBKind_ByRefArg; // Out (or in/out) arg
}
}
}
@@ -3729,23 +3795,23 @@ void CodeGen::genGCWriteBarrier(GenTreePtr tgt, GCInfo::WriteBarr
#endif // DEBUG
#endif // 0
genStackLevel += 4;
- inst_IV(INS_push, wbKind);
+ inst_IV(INS_push, wbKind);
genEmitHelperCall(helper,
- 4, // argSize
- EA_PTRSIZE); // retSize
+ 4, // argSize
+ EA_PTRSIZE); // retSize
genStackLevel -= 4;
}
else
{
genEmitHelperCall(helper,
- 0, // argSize
- EA_PTRSIZE); // retSize
+ 0, // argSize
+ EA_PTRSIZE); // retSize
}
-#else // !FEATURE_COUNT_GC_WRITE_BARRIERS
+#else // !FEATURE_COUNT_GC_WRITE_BARRIERS
genEmitHelperCall(helper,
- 0, // argSize
- EA_PTRSIZE); // retSize
+ 0, // argSize
+ EA_PTRSIZE); // retSize
#endif // !FEATURE_COUNT_GC_WRITE_BARRIERS
}
@@ -3759,7 +3825,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
/*****************************************************************************
*
* Generates code for moving incoming register arguments to their
@@ -3768,15 +3833,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
- bool* pXtraRegClobbered,
- RegState* regState)
+void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genFnPrologCalleeRegArgs() for %s regs\n", regState->rsIsFloat ? "float" : "int");
+ }
#endif
#ifdef _TARGET_ARM64_
@@ -3786,14 +3851,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// No need further action.
return;
}
-#endif
+#endif
- unsigned argMax; // maximum argNum value plus 1, (including the RetBuffArg)
- unsigned argNum; // current argNum, always in [0..argMax-1]
- unsigned fixedRetBufIndex; // argNum value used by the fixed return buffer argument (ARM64)
- unsigned regArgNum; // index into the regArgTab[] table
- regMaskTP regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn;
- bool doingFloat = regState->rsIsFloat;
+ unsigned argMax; // maximum argNum value plus 1, (including the RetBuffArg)
+ unsigned argNum; // current argNum, always in [0..argMax-1]
+ unsigned fixedRetBufIndex; // argNum value used by the fixed return buffer argument (ARM64)
+ unsigned regArgNum; // index into the regArgTab[] table
+ regMaskTP regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn;
+ bool doingFloat = regState->rsIsFloat;
// We should be generating the prolog block when we are called
assert(compiler->compGeneratingProlog);
@@ -3804,22 +3869,22 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// If a method has 3 args (and no fixed return buffer) then argMax is 3 and valid indexes are 0,1,2
// If a method has a fixed return buffer (on ARM64) then argMax gets set to 9 and valid index are 0-8
//
- // The regArgTab can always have unused entries,
+ // The regArgTab can always have unused entries,
// for example if an architecture always increments the arg register number but uses either
- // an integer register or a floating point register to hold the next argument
+ // an integer register or a floating point register to hold the next argument
// then with a mix of float and integer args you could have:
//
// sampleMethod(int i, float x, int j, float y, int k, float z);
- // r0, r2 and r4 as valid integer arguments with argMax as 5
+ // r0, r2 and r4 as valid integer arguments with argMax as 5
// and f1, f3 and f5 and valid floating point arguments with argMax as 6
- // The first one is doingFloat==false and the second one is doingFloat==true
+ // The first one is doingFloat==false and the second one is doingFloat==true
//
// If a fixed return buffer (in r8) was also present then the first one would become:
// r0, r2, r4 and r8 as valid integer arguments with argMax as 9
//
- argMax = regState->rsCalleeRegArgCount;
- fixedRetBufIndex = (unsigned)-1; // Invalid value
+ argMax = regState->rsCalleeRegArgCount;
+ fixedRetBufIndex = (unsigned)-1; // Invalid value
// If necessary we will select a correct xtraReg for circular floating point args later.
if (doingFloat)
@@ -3827,14 +3892,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
xtraReg = REG_NA;
noway_assert(argMax <= MAX_FLOAT_REG_ARG);
}
- else // we are doing the integer registers
+ else // we are doing the integer registers
{
noway_assert(argMax <= MAX_REG_ARG);
if (hasFixedRetBuffReg())
{
fixedRetBufIndex = theFixedRetBuffArgNum();
// We have an additional integer register argument when hasFixedRetBuffReg() is true
- argMax = fixedRetBufIndex+1;
+ argMax = fixedRetBufIndex + 1;
assert(argMax == (MAX_REG_ARG + 1));
}
}
@@ -3852,19 +3917,19 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
//
struct regArgElem
{
- unsigned varNum; // index into compiler->lvaTable[] for this register argument
+ unsigned varNum; // index into compiler->lvaTable[] for this register argument
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- var_types type; // the Jit type of this regArgTab entry
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
- // That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
- // argument register number 'x'. Only used when circular = true.
- char slot; // 0 means the register is not used for a register argument
- // 1 means the first part of a register argument
- // 2, 3 or 4 means the second,third or fourth part of a multireg argument
- bool stackArg; // true if the argument gets homed to the stack
- bool processed; // true after we've processed the argument (and it is in its final location)
- bool circular; // true if this register participates in a circular dependency loop.
+ var_types type; // the Jit type of this regArgTab entry
+#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ unsigned trashBy; // index into this regArgTab[] table of the register that will be copied to this register.
+ // That is, for regArgTab[x].trashBy = y, argument register number 'y' will be copied to
+ // argument register number 'x'. Only used when circular = true.
+ char slot; // 0 means the register is not used for a register argument
+ // 1 means the first part of a register argument
+ // 2, 3 or 4 means the second,third or fourth part of a multireg argument
+ bool stackArg; // true if the argument gets homed to the stack
+ bool processed; // true after we've processed the argument (and it is in its final location)
+ bool circular; // true if this register participates in a circular dependency loop.
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -3872,31 +3937,31 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// the type of the lclVar in ways that are not ascertainable from lvType.
// So, for that case we retain the type of the register in the regArgTab.
- var_types getRegType(Compiler* compiler)
+ var_types getRegType(Compiler* compiler)
{
- return type; // UNIX_AMD64 implementation
+ return type; // UNIX_AMD64 implementation
}
#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
// In other cases, we simply use the type of the lclVar to determine the type of the register.
- var_types getRegType(Compiler* compiler)
+ var_types getRegType(Compiler* compiler)
{
LclVarDsc varDsc = compiler->lvaTable[varNum];
// Check if this is an HFA register arg and return the HFA type
if (varDsc.lvIsHfaRegArg())
+ {
return varDsc.GetHfaType();
+ }
return varDsc.lvType;
}
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
- } regArgTab[max(MAX_REG_ARG+1, MAX_FLOAT_REG_ARG)] = {};
+ } regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {};
- unsigned varNum;
- LclVarDsc* varDsc;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++, varDsc++)
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
// Is this variable a register arg?
if (!varDsc->lvIsParam)
@@ -3916,20 +3981,21 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
//
if (varDsc->lvPromoted || varDsc->lvIsStructField)
{
- LclVarDsc * parentVarDsc = varDsc;
+ LclVarDsc* parentVarDsc = varDsc;
if (varDsc->lvIsStructField)
{
assert(!varDsc->lvPromoted);
- parentVarDsc = &compiler->lvaTable[varDsc->lvParentLcl];
- }
+ parentVarDsc = &compiler->lvaTable[varDsc->lvParentLcl];
+ }
Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(parentVarDsc);
if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT)
{
- noway_assert(parentVarDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(parentVarDsc->lvFieldCnt == 1); // We only handle one field here
- // For register arguments that are independent promoted structs we put the promoted field varNum in the regArgTab[]
+ // For register arguments that are independent promoted structs we put the promoted field varNum in the
+ // regArgTab[]
if (varDsc->lvPromoted)
{
continue;
@@ -3937,7 +4003,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
else
{
- // For register arguments that are not independent promoted structs we put the parent struct varNum in the regArgTab[]
+ // For register arguments that are not independent promoted structs we put the parent struct varNum in
+ // the regArgTab[]
if (varDsc->lvIsStructField)
{
continue;
@@ -3994,10 +4061,10 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
//
// Assumption 2:
// RyuJit backend is making another implicit assumption that Vector3 type args when passed in
- // registers or on stack, the upper most 4-bytes will be zero.
+ // registers or on stack, the upper most 4-bytes will be zero.
//
// For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
- // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
+ // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
// invalid.
//
// RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
@@ -4006,8 +4073,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// there is no need to clear upper 4-bytes of Vector3 type args.
//
// RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
- // Vector3 return values are returned two return registers and Caller assembles them into a
- // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
+ // Vector3 return values are returned two return registers and Caller assembles them into a
+ // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
// type args in prolog and Vector3 type return value of a call
if (varDsc->lvType == TYP_SIMD12)
@@ -4019,11 +4086,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
{
regType = compiler->GetEightByteType(structDesc, slotCounter);
}
-
+
regArgNum = genMapRegNumToRegArgNum(regNum, regType);
-
+
if ((!doingFloat && (structDesc.IsIntegralSlot(slotCounter))) ||
- (doingFloat && (structDesc.IsSseSlot(slotCounter))))
+ (doingFloat && (structDesc.IsSseSlot(slotCounter))))
{
// Store the reg for the first slot.
if (slots == 0)
@@ -4033,10 +4100,12 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// Bingo - add it to our table
noway_assert(regArgNum < argMax);
- noway_assert(regArgTab[regArgNum].slot == 0); // we better not have added it already (there better not be multiple vars representing this argument register)
+ noway_assert(regArgTab[regArgNum].slot == 0); // we better not have added it already (there better
+ // not be multiple vars representing this argument
+ // register)
regArgTab[regArgNum].varNum = varNum;
- regArgTab[regArgNum].slot = (char)(slotCounter + 1);
- regArgTab[regArgNum].type = regType;
+ regArgTab[regArgNum].slot = (char)(slotCounter + 1);
+ regArgTab[regArgNum].type = regType;
slots++;
}
}
@@ -4055,7 +4124,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
regArgNum = genMapRegNumToRegArgNum(varDsc->lvArgReg, regType);
noway_assert(regArgNum < argMax);
- // We better not have added it already (there better not be multiple vars representing this argument register)
+ // We better not have added it already (there better not be multiple vars representing this argument
+ // register)
noway_assert(regArgTab[regArgNum].slot == 0);
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
@@ -4064,7 +4134,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
regArgTab[regArgNum].varNum = varNum;
- regArgTab[regArgNum].slot = 1;
+ regArgTab[regArgNum].slot = 1;
slots = 1;
@@ -4084,20 +4154,21 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
slots = 2;
}
- // Note that regArgNum+1 represents an argument index not an actual argument register.
+ // Note that regArgNum+1 represents an argument index not an actual argument register.
// see genMapRegArgNumToRegNum(unsigned argNum, var_types type)
// This is the setup for the rest of a multireg struct arg
- for (int i = 1; i<slots; i++)
+ for (int i = 1; i < slots; i++)
{
noway_assert((regArgNum + i) < argMax);
- // We better not have added it already (there better not be multiple vars representing this argument register)
+ // We better not have added it already (there better not be multiple vars representing this argument
+ // register)
noway_assert(regArgTab[regArgNum + i].slot == 0);
regArgTab[regArgNum + i].varNum = varNum;
- regArgTab[regArgNum + i].slot = (char)(i+1);
+ regArgTab[regArgNum + i].slot = (char)(i + 1);
}
}
#endif // FEATURE_MULTIREG_ARGS
@@ -4109,29 +4180,29 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
if (lclSize > REGSIZE_BYTES)
{
unsigned maxRegArgNum = doingFloat ? MAX_FLOAT_REG_ARG : MAX_REG_ARG;
- slots = lclSize / REGSIZE_BYTES;
+ slots = lclSize / REGSIZE_BYTES;
if (regArgNum + slots > maxRegArgNum)
{
slots = maxRegArgNum - regArgNum;
}
}
- C_ASSERT((char)MAX_REG_ARG == MAX_REG_ARG);
+ C_ASSERT((char)MAX_REG_ARG == MAX_REG_ARG);
assert(slots < INT8_MAX);
- for (char i = 1; i < slots; i ++)
+ for (char i = 1; i < slots; i++)
{
regArgTab[regArgNum + i].varNum = varNum;
- regArgTab[regArgNum + i].slot = i + 1;
+ regArgTab[regArgNum + i].slot = i + 1;
}
#endif // _TARGET_ARM_
- for (int i = 0; i < slots; i ++)
+ for (int i = 0; i < slots; i++)
{
- regType = regArgTab[regArgNum + i].getRegType(compiler);
+ regType = regArgTab[regArgNum + i].getRegType(compiler);
regNumber regNum = genMapRegArgNumToRegNum(regArgNum + i, regType);
#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// lvArgReg could be INT or FLOAT reg. So the following assertion doesn't hold.
- // The type of the register depends on the classification of the first eightbyte
+ // The type of the register depends on the classification of the first eightbyte
// of the struct. For information on classification refer to the System V x86_64 ABI at:
// http://www.x86-64.org/documentation/abi.pdf
@@ -4154,16 +4225,17 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// For LSRA, it may not be in regArgMaskLive if it has a zero
// refcnt. This is in contrast with the non-LSRA case in which all
// non-tracked args are assumed live on entry.
- noway_assert((varDsc->lvRefCnt == 0) ||
- (varDsc->lvType == TYP_STRUCT) ||
+ noway_assert((varDsc->lvRefCnt == 0) || (varDsc->lvType == TYP_STRUCT) ||
(varDsc->lvAddrExposed && compiler->info.compIsVarArgs));
-#else // LEGACY_BACKEND
- noway_assert(varDsc->lvType == TYP_STRUCT || (varDsc->lvAddrExposed && (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)));
+#else // LEGACY_BACKEND
+ noway_assert(
+ varDsc->lvType == TYP_STRUCT ||
+ (varDsc->lvAddrExposed && (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)));
#endif // LEGACY_BACKEND
#endif // !_TARGET_X86_
- }
+ }
// Mark it as processed and be done with it
- regArgTab[regArgNum+i].processed = true;
+ regArgTab[regArgNum + i].processed = true;
goto NON_DEP;
}
@@ -4179,16 +4251,15 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
noway_assert(regArgMaskLive & genRegMask(regNum));
}
- regArgTab[regArgNum+i].processed = false;
+ regArgTab[regArgNum + i].processed = false;
/* mark stack arguments since we will take care of those first */
- regArgTab[regArgNum+i].stackArg = (varDsc->lvIsInReg()) ? false : true;
+ regArgTab[regArgNum + i].stackArg = (varDsc->lvIsInReg()) ? false : true;
/* If it goes on the stack or in a register that doesn't hold
* an argument anymore -> CANNOT form a circular dependency */
- if (varDsc->lvIsInReg() &&
- (genRegMask(regNum) & regArgMaskLive))
+ if (varDsc->lvIsInReg() && (genRegMask(regNum) & regArgMaskLive))
{
/* will trash another argument -> possible dependency
* We may need several passes after the table is constructed
@@ -4202,13 +4273,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
#if !defined(_TARGET_64BIT_)
- if ((i == 1) && varTypeIsStruct(varDsc) &&
- (varDsc->lvOtherReg == regNum))
+ if ((i == 1) && varTypeIsStruct(varDsc) && (varDsc->lvOtherReg == regNum))
{
goto NON_DEP;
}
- if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_LONG) &&
- (varDsc->lvOtherReg == regNum))
+ if ((i == 1) && (genActualType(varDsc->TypeGet()) == TYP_LONG) && (varDsc->lvOtherReg == regNum))
{
goto NON_DEP;
}
@@ -4219,13 +4288,13 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
goto NON_DEP;
}
#endif // !defined(_TARGET_64BIT_)
- regArgTab[regArgNum+i].circular = true;
+ regArgTab[regArgNum + i].circular = true;
}
else
{
NON_DEP:
- regArgTab[regArgNum+i].circular = false;
-
+ regArgTab[regArgNum + i].circular = false;
+
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
}
@@ -4236,7 +4305,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
* A circular dependency is a set of registers R1, R2, ..., Rn
* such that R1->R2 (that is, R1 needs to be moved to R2), R2->R3, ..., Rn->R1 */
- bool change = true;
+ bool change = true;
if (regArgMaskLive)
{
/* Possible circular dependencies still exist; the previous pass was not enough
@@ -4270,7 +4339,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
noway_assert(!regArgTab[argNum].stackArg);
var_types regType = regArgTab[argNum].getRegType(compiler);
- regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
+ regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
regNumber destRegNum = REG_NA;
if (regArgTab[argNum].slot == 1)
@@ -4287,12 +4356,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
assert((varDsc->lvType == TYP_SIMD12) || (varDsc->lvType == TYP_SIMD16));
regArgMaskLive &= ~genRegMask(regNum);
regArgTab[argNum].circular = false;
- change = true;
+ change = true;
continue;
}
#elif !defined(_TARGET_64BIT_)
- else if (regArgTab[argNum].slot == 2 &&
- genActualType(varDsc->TypeGet()) == TYP_LONG)
+ else if (regArgTab[argNum].slot == 2 && genActualType(varDsc->TypeGet()) == TYP_LONG)
{
destRegNum = varDsc->lvOtherReg;
}
@@ -4314,8 +4382,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
else
{
/* argument goes to a free register */
- regArgTab[argNum].circular = false;
- change = true;
+ regArgTab[argNum].circular = false;
+ change = true;
/* mark the argument register as free */
regArgMaskLive &= ~genRegMask(regNum);
@@ -4343,7 +4411,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef LEGACY_BACKEND
- noway_assert(((regArgMaskLive & RBM_FLTARG_REGS) == 0) && "Homing of float argument registers with circular dependencies not implemented.");
+ noway_assert(((regArgMaskLive & RBM_FLTARG_REGS) == 0) &&
+ "Homing of float argument registers with circular dependencies not implemented.");
#endif // LEGACY_BACKEND
/* Now move the arguments to their locations.
@@ -4353,7 +4422,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
regArgMaskLive = regState->rsCalleeRegArgMaskLiveIn; // reset the live in to what it was at the start
for (argNum = 0; argNum < argMax; argNum++)
{
- emitAttr size;
+ emitAttr size;
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// If this is the wrong register file, just continue.
@@ -4377,7 +4446,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
continue;
}
- varNum = regArgTab[argNum].varNum; noway_assert(varNum < compiler->lvaCount);
+ varNum = regArgTab[argNum].varNum;
+ noway_assert(varNum < compiler->lvaCount);
varDsc = compiler->lvaTable + varNum;
#ifndef _TARGET_64BIT_
@@ -4393,7 +4463,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
continue;
}
}
- else
+ else
#endif // !_TARGET_64BIT_
{
// If not a stack arg go to the next one
@@ -4414,7 +4484,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
#endif
- noway_assert(regArgTab[argNum].circular == false);
+ noway_assert(regArgTab[argNum].circular == false);
noway_assert(varDsc->lvIsParam);
noway_assert(varDsc->lvIsRegArg);
@@ -4422,11 +4492,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
(varDsc->lvType == TYP_LONG && varDsc->lvOtherReg == REG_STK && regArgTab[argNum].slot == 2));
var_types storeType = TYP_UNDEF;
- unsigned slotSize = TARGET_POINTER_SIZE;
+ unsigned slotSize = TARGET_POINTER_SIZE;
if (varTypeIsStruct(varDsc))
{
- storeType = TYP_I_IMPL; // Default store type for a struct type is a pointer sized integer
+ storeType = TYP_I_IMPL; // Default store type for a struct type is a pointer sized integer
#if FEATURE_MULTIREG_ARGS
// Must be <= MAX_PASS_MULTIREG_BYTES or else it wouldn't be passed in registers
noway_assert(varDsc->lvSize() <= MAX_PASS_MULTIREG_BYTES);
@@ -4439,14 +4509,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#ifdef _TARGET_ARM_
// On ARM32 the storeType for HFA args is always TYP_FLOAT
storeType = TYP_FLOAT;
- slotSize = (unsigned)emitActualTypeSize(storeType);
-#else // _TARGET_ARM64_
+ slotSize = (unsigned)emitActualTypeSize(storeType);
+#else // _TARGET_ARM64_
storeType = genActualType(varDsc->GetHfaType());
- slotSize = (unsigned)emitActualTypeSize(storeType);
+ slotSize = (unsigned)emitActualTypeSize(storeType);
#endif // _TARGET_ARM64_
}
}
- else // Not a struct type
+ else // Not a struct type
{
storeType = genActualType(varDsc->TypeGet());
}
@@ -4456,7 +4526,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#endif //_TARGET_X86_
regNumber srcRegNum = genMapRegArgNumToRegNum(argNum, storeType);
-
+
// Stack argument - if the ref count is 0 don't care about it
if (!varDsc->lvOnFrame)
@@ -4468,23 +4538,20 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// Since slot is typically 1, baseOffset is typically 0
int baseOffset = (regArgTab[argNum].slot - 1) * slotSize;
- getEmitter()->emitIns_S_R(ins_Store(storeType),
- size,
- srcRegNum,
- varNum,
- baseOffset);
+ getEmitter()->emitIns_S_R(ins_Store(storeType), size, srcRegNum, varNum, baseOffset);
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
// Check if we are writing past the end of the struct
if (varTypeIsStruct(varDsc))
{
- assert(varDsc->lvSize() >= baseOffset+(unsigned)size);
+ assert(varDsc->lvSize() >= baseOffset + (unsigned)size);
}
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
-
if (regArgTab[argNum].slot == 1)
+ {
psiMoveToStack(varNum);
+ }
}
/* mark the argument as processed */
@@ -4495,7 +4562,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#if defined(_TARGET_ARM_)
if (storeType == TYP_DOUBLE)
{
- regArgTab[argNum+1].processed = true;
+ regArgTab[argNum + 1].processed = true;
regArgMaskLive &= ~genRegMask(REG_NEXT(srcRegNum));
}
#endif
@@ -4504,11 +4571,11 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
/* Process any circular dependencies */
if (regArgMaskLive)
{
- unsigned begReg, destReg, srcReg;
- unsigned varNumDest, varNumSrc;
- LclVarDsc * varDscDest;
- LclVarDsc * varDscSrc;
- instruction insCopy = INS_mov;
+ unsigned begReg, destReg, srcReg;
+ unsigned varNumDest, varNumSrc;
+ LclVarDsc* varDscDest;
+ LclVarDsc* varDscSrc;
+ instruction insCopy = INS_mov;
if (doingFloat)
{
@@ -4517,8 +4584,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// Compute xtraReg here when we have a float argument
assert(xtraReg == REG_NA);
- regMaskTP fpAvailMask;
-
+ regMaskTP fpAvailMask;
+
fpAvailMask = RBM_FLT_CALLEE_TRASH & ~regArgMaskLive;
#if defined(FEATURE_HFA)
fpAvailMask &= RBM_ALLDOUBLE;
@@ -4544,7 +4611,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// We pick the lowest avail register number
regMaskTP tempMask = genFindLowestBit(fpAvailMask);
- xtraReg = genRegNumFromMask(tempMask);
+ xtraReg = genRegNumFromMask(tempMask);
#elif defined(_TARGET_X86_)
// This case shouldn't occur on x86 since NYI gets converted to an assert
NYI("Homing circular FP registers via xtraReg");
@@ -4572,22 +4639,23 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
destReg = begReg = argNum;
- srcReg = regArgTab[argNum].trashBy;
+ srcReg = regArgTab[argNum].trashBy;
- varNumDest = regArgTab[destReg].varNum;
+ varNumDest = regArgTab[destReg].varNum;
noway_assert(varNumDest < compiler->lvaCount);
varDscDest = compiler->lvaTable + varNumDest;
noway_assert(varDscDest->lvIsParam && varDscDest->lvIsRegArg);
noway_assert(srcReg < argMax);
- varNumSrc = regArgTab[srcReg].varNum; noway_assert(varNumSrc < compiler->lvaCount);
+ varNumSrc = regArgTab[srcReg].varNum;
+ noway_assert(varNumSrc < compiler->lvaCount);
varDscSrc = compiler->lvaTable + varNumSrc;
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
- emitAttr size = EA_PTRSIZE;
+ emitAttr size = EA_PTRSIZE;
#ifdef _TARGET_XARCH_
- //
+ //
// The following code relies upon the target architecture having an
// 'xchg' instruction which directly swaps the values held in two registers.
// On the ARM architecture we do not have such an instruction.
@@ -4596,7 +4664,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
{
/* only 2 registers form the circular dependency - use "xchg" */
- varNum = regArgTab[argNum].varNum; noway_assert(varNum < compiler->lvaCount);
+ varNum = regArgTab[argNum].varNum;
+ noway_assert(varNum < compiler->lvaCount);
varDsc = compiler->lvaTable + varNum;
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
@@ -4609,17 +4678,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
* have to "swap" the registers in the GC reg pointer mask
*/
- if (varTypeGCtype(varDscSrc->TypeGet()) !=
- varTypeGCtype(varDscDest->TypeGet()))
+ if (varTypeGCtype(varDscSrc->TypeGet()) != varTypeGCtype(varDscDest->TypeGet()))
{
size = EA_GCREF;
}
noway_assert(varDscDest->lvArgReg == varDscSrc->lvRegNum);
- getEmitter()->emitIns_R_R(INS_xchg, size,
- varDscSrc->lvRegNum,
- varDscSrc->lvArgReg);
+ getEmitter()->emitIns_R_R(INS_xchg, size, varDscSrc->lvRegNum, varDscSrc->lvArgReg);
regTracker.rsTrackRegTrash(varDscSrc->lvRegNum);
regTracker.rsTrackRegTrash(varDscSrc->lvArgReg);
@@ -4630,7 +4696,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
regArgMaskLive &= ~genRegMask(varDscSrc->lvArgReg);
regArgMaskLive &= ~genRegMask(varDscDest->lvArgReg);
- psiMoveToReg(varNumSrc );
+ psiMoveToReg(varNumSrc);
psiMoveToReg(varNumDest);
}
else
@@ -4650,8 +4716,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
break;
}
iter = regArgTab[iter].trashBy;
- }
- while (iter != begReg);
+ } while (iter != begReg);
// We may treat doubles as floats for ARM because we could have partial circular
// dependencies of a float with a lo/hi part of the double. We mark the
@@ -4666,7 +4731,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
#endif // _TARGET_ARM_
- if (destMemType == TYP_REF)
+ if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
@@ -4678,7 +4743,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
{
size = EA_8BYTE;
}
- else if (destMemType == TYP_FLOAT)
+ else if (destMemType == TYP_FLOAT)
{
size = EA_4BYTE;
}
@@ -4689,7 +4754,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
regNumber begRegNum = genMapRegArgNumToRegNum(begReg, destMemType);
- getEmitter()->emitIns_R_R (insCopy, size, xtraReg, begRegNum);
+ getEmitter()->emitIns_R_R(insCopy, size, xtraReg, begRegNum);
regTracker.rsTrackRegCopy(xtraReg, begRegNum);
@@ -4712,18 +4777,18 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
/* mark 'src' as processed */
noway_assert(srcReg < argMax);
- regArgTab[srcReg].processed = true;
+ regArgTab[srcReg].processed = true;
#ifdef _TARGET_ARM_
if (size == EA_8BYTE)
- regArgTab[srcReg+1].processed = true;
+ regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
/* move to the next pair */
destReg = srcReg;
- srcReg = regArgTab[srcReg].trashBy;
+ srcReg = regArgTab[srcReg].trashBy;
- varDscDest = varDscSrc;
+ varDscDest = varDscSrc;
destMemType = varDscDest->TypeGet();
#ifdef _TARGET_ARM_
if (!cycleAllDouble && destMemType == TYP_DOUBLE)
@@ -4731,16 +4796,16 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
destMemType = TYP_FLOAT;
}
#endif
- varNumSrc = regArgTab[srcReg].varNum;
+ varNumSrc = regArgTab[srcReg].varNum;
noway_assert(varNumSrc < compiler->lvaCount);
varDscSrc = compiler->lvaTable + varNumSrc;
noway_assert(varDscSrc->lvIsParam && varDscSrc->lvIsRegArg);
- if (destMemType == TYP_REF)
+ if (destMemType == TYP_REF)
{
size = EA_GCREF;
}
- else if (destMemType == TYP_DOUBLE)
+ else if (destMemType == TYP_DOUBLE)
{
size = EA_8BYTE;
}
@@ -4766,10 +4831,10 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
/* mark the beginning register as processed */
- regArgTab[srcReg].processed = true;
+ regArgTab[srcReg].processed = true;
#ifdef _TARGET_ARM_
if (size == EA_8BYTE)
- regArgTab[srcReg+1].processed = true;
+ regArgTab[srcReg + 1].processed = true;
#endif
regArgMaskLive &= ~genMapArgNumToRegMask(srcReg, destMemType);
}
@@ -4785,15 +4850,20 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
{
/* If already processed go to the next one */
if (regArgTab[argNum].processed)
+ {
continue;
+ }
- if (regArgTab[argNum].slot == 0) // Not a register argument
+ if (regArgTab[argNum].slot == 0)
+ { // Not a register argument
continue;
+ }
- varNum = regArgTab[argNum].varNum; noway_assert(varNum < compiler->lvaCount);
- varDsc = compiler->lvaTable + varNum;
+ varNum = regArgTab[argNum].varNum;
+ noway_assert(varNum < compiler->lvaCount);
+ varDsc = compiler->lvaTable + varNum;
var_types regType = regArgTab[argNum].getRegType(compiler);
- regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
+ regNumber regNum = genMapRegArgNumToRegNum(argNum, regType);
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
if (regType == TYP_UNDEF)
@@ -4806,7 +4876,6 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-
noway_assert(varDsc->lvIsParam && varDsc->lvIsRegArg);
#ifndef _TARGET_64BIT_
#ifndef _TARGET_ARM_
@@ -4815,14 +4884,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// idea of how to ignore it.
// On Arm, a long can be passed in register
- noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == sizeof(void *));
+ noway_assert(genTypeSize(genActualType(varDsc->TypeGet())) == sizeof(void*));
#endif
#endif //_TARGET_64BIT_
noway_assert(varDsc->lvIsInReg() && !regArgTab[argNum].circular);
/* Register argument - hopefully it stays in the same register */
- regNumber destRegNum = REG_NA;
+ regNumber destRegNum = REG_NA;
var_types destMemType = varDsc->TypeGet();
if (regArgTab[argNum].slot == 1)
@@ -4838,17 +4907,15 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#endif // _TARGET_ARM_
}
#ifndef _TARGET_64BIT_
- else if (regArgTab[argNum].slot == 2 &&
- genActualType(destMemType) == TYP_LONG)
+ else if (regArgTab[argNum].slot == 2 && genActualType(destMemType) == TYP_LONG)
{
#ifndef LEGACY_BACKEND
- assert(genActualType(varDsc->TypeGet()) == TYP_LONG
- || genActualType(varDsc->TypeGet()) == TYP_DOUBLE);
+ assert(genActualType(varDsc->TypeGet()) == TYP_LONG || genActualType(varDsc->TypeGet()) == TYP_DOUBLE);
if (genActualType(varDsc->TypeGet()) == TYP_DOUBLE)
{
- destRegNum = regNum;
+ destRegNum = regNum;
}
- else
+ else
#endif // !LEGACY_BACKEND
destRegNum = varDsc->lvOtherReg;
@@ -4878,7 +4945,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
assert(argNum > 0);
assert(regArgTab[argNum - 1].slot == 1);
-
+
if (!regArgTab[argNum - 1].processed)
{
// The first half of the double hasn't been processed; try to be processed at the same time
@@ -4892,7 +4959,7 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
// it as a single to finish the shuffling.
destMemType = TYP_FLOAT;
- destRegNum = REG_NEXT(varDsc->lvRegNum);
+ destRegNum = REG_NEXT(varDsc->lvRegNum);
}
#endif // !_TARGET_64BIT_
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
@@ -4925,12 +4992,14 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
#endif
if (destMask & regArgMaskLive)
+ {
continue;
+ }
/* Move it to the new register */
emitAttr size = emitActualTypeSize(destMemType);
-
+
getEmitter()->emitIns_R_R(ins_Copy(destMemType), size, destRegNum, regNum);
psiMoveToReg(varNum);
@@ -4950,12 +5019,10 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
}
#endif
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
- if (varTypeIsStruct(varDsc) &&
- argNum < (argMax - 1) &&
- regArgTab[argNum+1].slot == 2)
+ if (varTypeIsStruct(varDsc) && argNum < (argMax - 1) && regArgTab[argNum + 1].slot == 2)
{
- argRegCount = 2;
- int nextArgNum = argNum + 1;
+ argRegCount = 2;
+ int nextArgNum = argNum + 1;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
noway_assert(regArgTab[nextArgNum].varNum == varNum);
// Emit a shufpd with a 0 immediate, which preserves the 0th element of the dest reg
@@ -4974,12 +5041,12 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
assert(!regArgTab[nextArgNum].processed);
regArgTab[nextArgNum].processed = true;
regNumber nextRegNum = genMapRegArgNumToRegNum(nextArgNum, regArgTab[nextArgNum].getRegType(compiler));
- regArgMaskLive &= ~genRegMask(nextRegNum);
+ regArgMaskLive &= ~genRegMask(nextRegNum);
}
#endif // FEATURE_MULTIREG_ARGS
}
-
- noway_assert(regArgMaskLiveSave != regArgMaskLive); // if it doesn't change, we have an infinite loop
+
+ noway_assert(regArgMaskLiveSave != regArgMaskLive); // if it doesn't change, we have an infinite loop
}
}
#ifdef _PREFAST_
@@ -4989,48 +5056,54 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg,
/*****************************************************************************
* If any incoming stack arguments live in registers, load them.
*/
-void CodeGen::genEnregisterIncomingStackArgs()
+void CodeGen::genEnregisterIncomingStackArgs()
{
#ifdef DEBUG
if (verbose)
+ {
printf("*************** In genEnregisterIncomingStackArgs()\n");
+ }
#endif
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
- for (LclVarDsc * varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (LclVarDsc *varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter? */
- if (!varDsc->lvIsParam)
+ if (!varDsc->lvIsParam)
+ {
continue;
+ }
/* If it's a register argument then it's already been taken care of.
- But, on Arm when under a profiler, we would have prespilled a register argument
+ But, on Arm when under a profiler, we would have prespilled a register argument
and hence here we need to load it from its prespilled location.
*/
bool isPrespilledForProfiling = false;
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- isPrespilledForProfiling = compiler->compIsProfilerHookNeeded() &&
- compiler->lvaIsPreSpilled(varNum, regSet.rsMaskPreSpillRegs(false));
+ isPrespilledForProfiling =
+ compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(varNum, regSet.rsMaskPreSpillRegs(false));
#endif
- if (varDsc->lvIsRegArg && !isPrespilledForProfiling)
+ if (varDsc->lvIsRegArg && !isPrespilledForProfiling)
+ {
continue;
+ }
/* Has the parameter been assigned to a register? */
if (!varDsc->lvIsInReg())
+ {
continue;
+ }
var_types type = genActualType(varDsc->TypeGet());
#if FEATURE_STACK_FP_X87
- // Floating point locals are loaded onto the x86-FPU in the next section
+ // Floating point locals are loaded onto the x86-FPU in the next section
if (varTypeIsFloating(type))
continue;
#endif
@@ -5038,7 +5111,9 @@ void CodeGen::genEnregisterIncomingStackArgs()
/* Is the variable dead on entry */
if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ {
continue;
+ }
/* Load the incoming parameter into the register */
@@ -5052,8 +5127,8 @@ void CodeGen::genEnregisterIncomingStackArgs()
if (type == TYP_LONG)
{
regPairNo regPair = varDsc->lvArgInitRegPair;
- regNum = genRegPairLo(regPair);
- otherReg = genRegPairHi(regPair);
+ regNum = genRegPairLo(regPair);
+ otherReg = genRegPairHi(regPair);
}
else
#endif // _TARGET_ARM
@@ -5061,7 +5136,7 @@ void CodeGen::genEnregisterIncomingStackArgs()
regNum = varDsc->lvArgInitReg;
otherReg = REG_NA;
}
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
regNum = varDsc->lvRegNum;
if (type == TYP_LONG)
{
@@ -5080,22 +5155,14 @@ void CodeGen::genEnregisterIncomingStackArgs()
{
/* long - at least the low half must be enregistered */
- getEmitter()->emitIns_R_S(ins_Load(TYP_INT),
- EA_4BYTE,
- regNum,
- varNum,
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE, regNum, varNum, 0);
regTracker.rsTrackRegTrash(regNum);
/* Is the upper half also enregistered? */
if (otherReg != REG_STK)
{
- getEmitter()->emitIns_R_S(ins_Load(TYP_INT),
- EA_4BYTE,
- otherReg,
- varNum,
- sizeof(int));
+ getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE, otherReg, varNum, sizeof(int));
regTracker.rsTrackRegTrash(otherReg);
}
}
@@ -5104,11 +5171,7 @@ void CodeGen::genEnregisterIncomingStackArgs()
{
/* Loading a single register - this is the easy/common case */
- getEmitter()->emitIns_R_S(ins_Load(type),
- emitTypeSize(type),
- regNum,
- varNum,
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), regNum, varNum, 0);
regTracker.rsTrackRegTrash(regNum);
}
@@ -5116,10 +5179,9 @@ void CodeGen::genEnregisterIncomingStackArgs()
}
}
-
/*-------------------------------------------------------------------------
*
- * We have to decide whether we're going to use block initialization
+ * We have to decide whether we're going to use block initialization
* in the prolog before we assign final stack offsets. This is because
* when using block initialization we may need additional callee-saved
* registers which need to be saved on the frame, thus increasing the
@@ -5139,22 +5201,24 @@ void CodeGen::genCheckUseBlockInit()
{
#ifndef LEGACY_BACKEND // this is called before codegen in RyuJIT backend
assert(!compiler->compGeneratingProlog);
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
assert(compiler->compGeneratingProlog);
#endif // LEGACY_BACKEND
- unsigned initStkLclCnt = 0; // The number of int-sized stack local variables that need to be initialized (variables larger than int count for more than 1).
- unsigned largeGcStructs = 0; // The number of "large" structs with GC pointers. Used as part of the heuristic to determine whether to use block init.
+ unsigned initStkLclCnt = 0; // The number of int-sized stack local variables that need to be initialized (variables
+ // larger than int count for more than 1).
+ unsigned largeGcStructs = 0; // The number of "large" structs with GC pointers. Used as part of the heuristic to
+ // determine whether to use block init.
- unsigned varNum;
- LclVarDsc * varDsc;
-
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ unsigned varNum;
+ LclVarDsc* varDsc;
+
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->lvIsParam)
+ if (varDsc->lvIsParam)
+ {
continue;
+ }
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
@@ -5162,22 +5226,30 @@ void CodeGen::genCheckUseBlockInit()
continue;
}
- if (varNum == compiler->lvaInlinedPInvokeFrameVar || varNum == compiler->lvaStubArgumentVar)
+ if (varNum == compiler->lvaInlinedPInvokeFrameVar || varNum == compiler->lvaStubArgumentVar)
+ {
continue;
+ }
#if FEATURE_FIXED_OUT_ARGS
if (varNum == compiler->lvaPInvokeFrameRegSaveVar)
+ {
continue;
+ }
if (varNum == compiler->lvaOutgoingArgSpaceVar)
+ {
continue;
+ }
#endif
#if FEATURE_EH_FUNCLETS
// There's no need to force 0-initialization of the PSPSym, it will be
// initialized with a real value in the prolog
- if (varNum == compiler->lvaPSPSym)
+ if (varNum == compiler->lvaPSPSym)
+ {
continue;
-#endif
+ }
+#endif
if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
@@ -5186,15 +5258,17 @@ void CodeGen::genCheckUseBlockInit()
// field locals.
continue;
}
-
- if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0) || varDsc->lvMustInit)
+
+ if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0) ||
+ varDsc->lvMustInit)
{
if (varDsc->lvTracked)
{
/* For uninitialized use of tracked variables, the liveness
* will bubble to the top (compiler->fgFirstBB) in fgInterBlockLocalVarLiveness()
*/
- if (varDsc->lvMustInit || VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ if (varDsc->lvMustInit ||
+ VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
{
/* This var must be initialized */
@@ -5219,8 +5293,7 @@ void CodeGen::genCheckUseBlockInit()
else
{
// Var is partially enregistered
- noway_assert(genTypeSize(varDsc->TypeGet()) > sizeof(int) &&
- varDsc->lvOtherReg == REG_STK);
+ noway_assert(genTypeSize(varDsc->TypeGet()) > sizeof(int) && varDsc->lvOtherReg == REG_STK);
initStkLclCnt += genTypeStSz(TYP_INT);
}
}
@@ -5236,11 +5309,11 @@ void CodeGen::genCheckUseBlockInit()
// TODO-1stClassStructs
// This is here to duplicate previous behavior, where TYP_SIMD8 locals
// were not being re-typed correctly.
- if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT) || (varDsc->lvType == TYP_SIMD8)) &&
-#else // !FEATURE_SIMD
- if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT)) &&
+ if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT) || (varDsc->lvType == TYP_SIMD8)) &&
+#else // !FEATURE_SIMD
+ if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT)) &&
#endif // !FEATURE_SIMD
- varDsc->lvOnFrame &&
+ varDsc->lvOnFrame &&
(!varDsc->lvIsTemp || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0)))
{
varDsc->lvMustInit = true;
@@ -5253,13 +5326,15 @@ void CodeGen::genCheckUseBlockInit()
/* Ignore if not a pointer variable or value class with a GC field */
- if (!compiler->lvaTypeIsGC(varNum))
+ if (!compiler->lvaTypeIsGC(varNum))
+ {
continue;
+ }
#if CAN_DISABLE_DFA
/* If we don't know lifetimes of variables, must be conservative */
- if (compiler->opts.MinOpts())
+ if (compiler->opts.MinOpts())
{
varDsc->lvMustInit = true;
noway_assert(!varDsc->lvRegister);
@@ -5268,29 +5343,35 @@ void CodeGen::genCheckUseBlockInit()
#endif // CAN_DISABLE_DFA
{
if (!varDsc->lvTracked)
+ {
varDsc->lvMustInit = true;
+ }
}
/* Is this a 'must-init' stack pointer local? */
- if (varDsc->lvMustInit && varDsc->lvOnFrame)
+ if (varDsc->lvMustInit && varDsc->lvOnFrame)
+ {
initStkLclCnt += varDsc->lvStructGcCount;
+ }
if ((compiler->lvaLclSize(varNum) > (3 * sizeof(void*))) && (largeGcStructs <= 4))
+ {
largeGcStructs++;
+ }
}
/* Don't forget about spill temps that hold pointers */
- if (!TRACK_GC_TEMP_LIFETIMES)
+ if (!TRACK_GC_TEMP_LIFETIMES)
{
assert(compiler->tmpAllFree());
- for (TempDsc* tempThis = compiler->tmpListBeg();
- tempThis != nullptr;
- tempThis = compiler->tmpListNxt(tempThis))
+ for (TempDsc* tempThis = compiler->tmpListBeg(); tempThis != nullptr; tempThis = compiler->tmpListNxt(tempThis))
{
- if (varTypeIsGC(tempThis->tdTempType()))
+ if (varTypeIsGC(tempThis->tdTempType()))
+ {
initStkLclCnt++;
+ }
}
}
@@ -5306,9 +5387,9 @@ void CodeGen::genCheckUseBlockInit()
we waste all the other slots. Really need to compute the correct
and compare that against zeroing the slots individually */
- genUseBlockInit = (genInitStkLclCnt > (largeGcStructs + 4));
+ genUseBlockInit = (genInitStkLclCnt > (largeGcStructs + 4));
- if (genUseBlockInit)
+ if (genUseBlockInit)
{
regMaskTP maskCalleeRegArgMask = intRegState.rsCalleeRegArgMaskLiveIn;
@@ -5339,7 +5420,7 @@ void CodeGen::genCheckUseBlockInit()
{
regSet.rsSetRegsModified(RBM_R13);
}
-#else // !UNIX_AMD64_ABI
+#else // !UNIX_AMD64_ABI
if (maskCalleeRegArgMask & RBM_ECX)
{
regSet.rsSetRegsModified(RBM_ESI);
@@ -5353,9 +5434,9 @@ void CodeGen::genCheckUseBlockInit()
#endif // _TARGET_XARCH_
#ifdef _TARGET_ARM_
- //
- // On the Arm if we are using a block init to initialize, then we
- // must force spill R4/R5/R6 so that we can use them during
+ //
+ // On the Arm if we are using a block init to initialize, then we
+ // must force spill R4/R5/R6 so that we can use them during
// zero-initialization process.
//
int forceSpillRegCount = genCountBits(maskCalleeRegArgMask & ~regSet.rsMaskPreSpillRegs(false)) - 1;
@@ -5369,17 +5450,15 @@ void CodeGen::genCheckUseBlockInit()
}
}
-
/*-----------------------------------------------------------------------------
*
* Push any callee-saved registers we have used
*/
#if defined(_TARGET_ARM64_)
-void CodeGen::genPushCalleeSavedRegisters(regNumber initReg,
- bool * pInitRegZeroed)
+void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed)
#else
-void CodeGen::genPushCalleeSavedRegisters()
+void CodeGen::genPushCalleeSavedRegisters()
#endif
{
assert(compiler->compGeneratingProlog);
@@ -5394,7 +5473,7 @@ void CodeGen::genPushCalleeSavedRegisters()
#endif
#if ETW_EBP_FRAMED
- if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
+ if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
@@ -5403,7 +5482,9 @@ void CodeGen::genPushCalleeSavedRegisters()
#ifdef _TARGET_XARCH_
// On X86/X64 we have already pushed the FP (frame-pointer) prior to calling this method
if (isFramePointerUsed())
+ {
rsPushRegs &= ~RBM_FPBASE;
+ }
#endif
#ifdef _TARGET_ARMARCH_
@@ -5430,7 +5511,7 @@ void CodeGen::genPushCalleeSavedRegisters()
// Given the limited benefit from this optimization (<10k for mscorlib NGen image), the extra complexity
// is not worth it.
//
- rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
+ rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
regSet.rsMaskCalleeSaved = rsPushRegs;
#endif // _TARGET_ARMARCH_
@@ -5439,8 +5520,7 @@ void CodeGen::genPushCalleeSavedRegisters()
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
- compiler->compCalleeRegsPushed,
- genCountBits(rsPushRegs));
+ compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
@@ -5495,9 +5575,9 @@ void CodeGen::genPushCalleeSavedRegisters()
assert(maskSaveRegsFloat == RBM_NONE);
}
- int frameType = 0; // This number is arbitrary, is defined below, and corresponds to one of the frame styles we
- // generate based on various sizes.
- int calleeSaveSPDelta = 0;
+ int frameType = 0; // This number is arbitrary, is defined below, and corresponds to one of the frame styles we
+ // generate based on various sizes.
+ int calleeSaveSPDelta = 0;
int calleeSaveSPDeltaUnaligned = 0;
if (isFramePointerUsed())
@@ -5514,7 +5594,7 @@ void CodeGen::genPushCalleeSavedRegisters()
// Generate:
// stp fp,lr,[sp,#-framesz]!
//
- // The (totalFrameSize < 512) condition ensures that both the predecrement
+ // The (totalFrameSize < 512) condition ensures that both the predecrement
// and the postincrement of SP can occur with STP.
//
// After saving callee-saved registers, we establish the frame pointer with:
@@ -5523,10 +5603,11 @@ void CodeGen::genPushCalleeSavedRegisters()
frameType = 1;
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize, INS_OPTS_PRE_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
+ INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
- maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
+ maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else if (totalFrameSize <= 512)
@@ -5551,10 +5632,11 @@ void CodeGen::genPushCalleeSavedRegisters()
getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize);
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
- maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
+ maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else
@@ -5619,13 +5701,15 @@ void CodeGen::genPushCalleeSavedRegisters()
frameType = 3;
- calleeSaveSPDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize - 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
+ calleeSaveSPDeltaUnaligned =
+ totalFrameSize - compiler->compLclFrameSize - 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
assert(calleeSaveSPDeltaUnaligned >= 0);
assert((calleeSaveSPDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSPDelta = AlignUp((UINT)calleeSaveSPDeltaUnaligned, STACK_ALIGN);
offset = calleeSaveSPDelta - calleeSaveSPDeltaUnaligned;
- assert((offset == 0) || (offset == REGSIZE_BYTES)); // At most one alignment slot between SP and where we store the callee-saved registers.
+ assert((offset == 0) || (offset == REGSIZE_BYTES)); // At most one alignment slot between SP and where we
+ // store the callee-saved registers.
// We'll take care of these later, but callee-saved regs code shouldn't see them.
maskSaveRegsInt &= ~(RBM_FP | RBM_LR);
@@ -5682,7 +5766,8 @@ void CodeGen::genPushCalleeSavedRegisters()
{
int remainingFrameSz = totalFrameSize - calleeSaveSPDelta;
assert(remainingFrameSz > 0);
- assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component -- totalFrameSize and calleeSaveSPDelta -- is 16-byte aligned.
+ assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
+ // totalFrameSize and calleeSaveSPDelta -- is 16-byte aligned.
if (compiler->lvaOutgoingArgSpaceSize >= 504)
{
@@ -5690,14 +5775,15 @@ void CodeGen::genPushCalleeSavedRegisters()
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
- int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN);
- int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
+ int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN);
+ int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8));
genPrologSaveRegPair(REG_FP, REG_LR, alignmentAdjustment2, -spAdjustment2, false, initReg, pInitRegZeroed);
offset += spAdjustment2;
- // Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub" included some of it)
+ // Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub" included
+ // some of it)
int spAdjustment3 = compiler->lvaOutgoingArgSpaceSize - alignmentAdjustment2;
assert(spAdjustment3 > 0);
@@ -5711,7 +5797,8 @@ void CodeGen::genPushCalleeSavedRegisters()
}
else
{
- genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg, pInitRegZeroed);
+ genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg,
+ pInitRegZeroed);
offset += remainingFrameSz;
getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize);
@@ -5734,7 +5821,9 @@ void CodeGen::genPushCalleeSavedRegisters()
compiler->unwindPush(reg);
if (!doubleAlignOrFramePointerUsed())
+ {
psiAdjustStackLevel(REGSIZE_BYTES);
+ }
rsPushRegs &= ~regBit;
}
@@ -5745,22 +5834,20 @@ void CodeGen::genPushCalleeSavedRegisters()
#endif // _TARGET_*
}
-
/*-----------------------------------------------------------------------------
*
* Probe the stack and allocate the local stack frame: subtract from SP.
* On ARM64, this only does the probing; allocating the frame is done when callee-saved registers are saved.
*/
-void CodeGen::genAllocLclFrame(unsigned frameSize,
- regNumber initReg,
- bool * pInitRegZeroed,
- regMaskTP maskArgRegsLiveIn)
+void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
- if (frameSize == 0)
+ if (frameSize == 0)
+ {
return;
+ }
const size_t pageSize = compiler->eeGetPageSize();
@@ -5774,9 +5861,9 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
// Frame size is the same as register size.
inst_RV(INS_push, REG_EAX, TYP_I_IMPL);
}
- else
+ else
#endif // _TARGET_XARCH_
- if (frameSize < pageSize)
+ if (frameSize < pageSize)
{
#ifndef _TARGET_ARM64_
// Frame size is (0x0008..0x1000)
@@ -5792,21 +5879,19 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)pageSize);
getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, initReg, REG_SPBASE, initReg);
regTracker.rsTrackRegTrash(initReg);
- *pInitRegZeroed = false; // The initReg does not contain zero
+ *pInitRegZeroed = false; // The initReg does not contain zero
#else
- getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE,
- REG_EAX, REG_SPBASE, -(int)pageSize);
+ getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE, REG_EAX, REG_SPBASE, -(int)pageSize);
#endif
if (frameSize >= 0x2000)
- {
+ {
#if CPU_LOAD_STORE_ARCH
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -2 * (ssize_t)pageSize);
getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, initReg, REG_SPBASE, initReg);
regTracker.rsTrackRegTrash(initReg);
#else
- getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE,
- REG_EAX, REG_SPBASE, -2 * (int)pageSize);
+ getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE, REG_EAX, REG_SPBASE, -2 * (int)pageSize);
#endif
}
@@ -5821,7 +5906,6 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
inst_RV_IV(INS_sub, REG_SPBASE, frameSize, EA_PTRSIZE);
#endif
#endif // !_TARGET_ARM64_
-
}
else
{
@@ -5855,9 +5939,10 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
#if CPU_LOAD_STORE_ARCH
// TODO-ARM64-Bug?: set the availMask properly!
- regMaskTP availMask = (regSet.rsGetModifiedRegsMask() & RBM_ALLINT) | RBM_R12 | RBM_LR; // Set of available registers
- availMask &= ~maskArgRegsLiveIn; // Remove all of the incoming argument registers as they are currently live
- availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg
+ regMaskTP availMask =
+ (regSet.rsGetModifiedRegsMask() & RBM_ALLINT) | RBM_R12 | RBM_LR; // Set of available registers
+ availMask &= ~maskArgRegsLiveIn; // Remove all of the incoming argument registers as they are currently live
+ availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg
regNumber rOffset = initReg;
regNumber rLimit;
@@ -5867,16 +5952,17 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
// We pick the next lowest register number for rTemp
noway_assert(availMask != RBM_NONE);
tempMask = genFindLowestBit(availMask);
- rTemp = genRegNumFromMask(tempMask);
+ rTemp = genRegNumFromMask(tempMask);
availMask &= ~tempMask;
// We pick the next lowest register number for rLimit
noway_assert(availMask != RBM_NONE);
tempMask = genFindLowestBit(availMask);
- rLimit = genRegNumFromMask(tempMask);
+ rLimit = genRegNumFromMask(tempMask);
availMask &= ~tempMask;
- // TODO-LdStArch-Bug?: review this. The first time we load from [sp+0] which will always succeed. That doesn't make sense.
+ // TODO-LdStArch-Bug?: review this. The first time we load from [sp+0] which will always succeed. That doesn't
+ // make sense.
// TODO-ARM64-CQ: we could probably use ZR on ARM64 instead of rTemp.
//
// mov rLimit, -frameSize
@@ -5885,7 +5971,7 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
// sub rOffset, 0x1000 // Note that 0x1000 on ARM32 uses the funky Thumb immediate encoding
// cmp rOffset, rLimit
// jge loop
- noway_assert((ssize_t)(int)frameSize == (ssize_t)frameSize); // make sure framesize safely fits within an int
+ noway_assert((ssize_t)(int)frameSize == (ssize_t)frameSize); // make sure framesize safely fits within an int
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rLimit, -(int)frameSize);
getEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, rTemp, REG_SPBASE, rOffset);
regTracker.rsTrackRegTrash(rTemp);
@@ -5924,23 +6010,23 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
// jge loop 2
getEmitter()->emitIns_R_ARR(INS_TEST, EA_PTRSIZE, initReg, REG_SPBASE, initReg, 0);
- inst_RV_IV(INS_sub, initReg, pageSize, EA_PTRSIZE);
- inst_RV_IV(INS_cmp, initReg, -((ssize_t)frameSize), EA_PTRSIZE);
+ inst_RV_IV(INS_sub, initReg, pageSize, EA_PTRSIZE);
+ inst_RV_IV(INS_cmp, initReg, -((ssize_t)frameSize), EA_PTRSIZE);
int bytesForBackwardJump;
#ifdef _TARGET_AMD64_
- assert((initReg == REG_EAX) || (initReg == REG_EBP)); // We use RBP as initReg for EH funclets.
+ assert((initReg == REG_EAX) || (initReg == REG_EBP)); // We use RBP as initReg for EH funclets.
bytesForBackwardJump = ((initReg == REG_EAX) ? -18 : -20);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
assert(initReg == REG_EAX);
bytesForBackwardJump = -15;
#endif // !_TARGET_AMD64_
- inst_IV(INS_jge, bytesForBackwardJump); // Branch backwards to start of loop
+ inst_IV(INS_jge, bytesForBackwardJump); // Branch backwards to start of loop
#endif // !CPU_LOAD_STORE_ARCH
- *pInitRegZeroed = false; // The initReg does not contain zero
+ *pInitRegZeroed = false; // The initReg does not contain zero
#ifdef _TARGET_XARCH_
if (pushedStubParam)
@@ -5963,56 +6049,55 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
// sub esp, frameSize 6
inst_RV_IV(INS_sub, REG_SPBASE, frameSize, EA_PTRSIZE);
#endif
-
}
#ifndef _TARGET_ARM64_
compiler->unwindAllocStack(frameSize);
if (!doubleAlignOrFramePointerUsed())
+ {
psiAdjustStackLevel(frameSize);
+ }
#endif // !_TARGET_ARM64_
-
}
-
#if defined(_TARGET_ARM_)
-void CodeGen::genPushFltRegs(regMaskTP regMask)
+void CodeGen::genPushFltRegs(regMaskTP regMask)
{
- assert(regMask != 0); // Don't call uness we have some registers to push
- assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
+ assert(regMask != 0); // Don't call uness we have some registers to push
+ assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
- regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
- int slots = genCountBits(regMask);
- // regMask should be contiguously set
- regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
+ regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
+ int slots = genCountBits(regMask);
+ // regMask should be contiguously set
+ regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
- assert(lowReg == REG_F16); // Currently we expect to start at F16 in the unwind codes
+ assert(lowReg == REG_F16); // Currently we expect to start at F16 in the unwind codes
// Our calling convention requires that we only use vpush for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
- getEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots/2);
+ getEmitter()->emitIns_R_I(INS_vpush, EA_8BYTE, lowReg, slots / 2);
}
-void CodeGen::genPopFltRegs(regMaskTP regMask)
+void CodeGen::genPopFltRegs(regMaskTP regMask)
{
- assert(regMask != 0); // Don't call uness we have some registers to pop
- assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
+ assert(regMask != 0); // Don't call uness we have some registers to pop
+ assert((regMask & RBM_ALLFLOAT) == regMask); // Only floasting point registers should be in regMask
- regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
- int slots = genCountBits(regMask);
- // regMask should be contiguously set
- regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
+ regNumber lowReg = genRegNumFromMask(genFindLowestBit(regMask));
+ int slots = genCountBits(regMask);
+ // regMask should be contiguously set
+ regMaskTP tmpMask = ((regMask >> lowReg) + 1); // tmpMask should have a single bit set
assert((tmpMask & (tmpMask - 1)) == 0);
// Our calling convention requires that we only use vpop for TYP_DOUBLE registers
noway_assert(floatRegCanHoldType(lowReg, TYP_DOUBLE));
noway_assert((slots % 2) == 0);
- getEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots/2);
+ getEmitter()->emitIns_R_I(INS_vpop, EA_8BYTE, lowReg, slots / 2);
}
/*-----------------------------------------------------------------------------
@@ -6048,7 +6133,7 @@ void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStar
{
assert(compiler->compGeneratingEpilog);
- if (frameSize == 0)
+ if (frameSize == 0)
return;
// Add 'frameSize' to SP.
@@ -6071,7 +6156,7 @@ void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStar
}
getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, frameSize, INS_FLAGS_DONT_CARE);
- }
+ }
else
{
regMaskTP grabMask = RBM_INT_CALLEE_TRASH;
@@ -6083,7 +6168,7 @@ void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStar
#ifndef LEGACY_BACKEND
regNumber tmpReg;
tmpReg = REG_TMP_0;
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
regNumber tmpReg = regSet.rsGrabReg(grabMask);
#endif // LEGACY_BACKEND
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, frameSize);
@@ -6122,20 +6207,20 @@ regMaskTP CodeGen::genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskC
if (maskCalleeSavedFloat != RBM_NONE)
return RBM_NONE;
- // Allocate space for small frames by pushing extra registers. It generates smaller and faster code
+ // Allocate space for small frames by pushing extra registers. It generates smaller and faster code
// that extra sub sp,XXX/add sp,XXX.
- // R0 and R1 may be used by return value. Keep things simple and just skip the optimization
+ // R0 and R1 may be used by return value. Keep things simple and just skip the optimization
// for the 3*REGSIZE_BYTES and 4*REGSIZE_BYTES cases. They are less common and they have more
// significant negative side-effects (more memory bus traffic).
switch (frameSize)
{
- case REGSIZE_BYTES:
- return RBM_R3;
- case 2*REGSIZE_BYTES:
- return RBM_R2|RBM_R3;
- default:
- return RBM_NONE;
- }
+ case REGSIZE_BYTES:
+ return RBM_R3;
+ case 2 * REGSIZE_BYTES:
+ return RBM_R2 | RBM_R3;
+ default:
+ return RBM_NONE;
+ }
}
#endif // _TARGET_ARM_
@@ -6251,7 +6336,7 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP&
#if defined(_TARGET_ARM_)
-bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog)
+bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
@@ -6272,7 +6357,7 @@ bool CodeGen::genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmp
return false;
}
-void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
+void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
@@ -6292,7 +6377,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
if (!jmpEpilog)
{
- regMaskTP maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, maskPopRegsFloat);
+ regMaskTP maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
}
@@ -6301,13 +6386,12 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
assert(!regSet.rsRegsModified(RBM_FPBASE));
maskPopRegsInt |= RBM_FPBASE;
}
-
+
if (genCanUsePopToReturn(maskPopRegsInt, jmpEpilog))
{
- maskPopRegsInt |= RBM_PC;
+ maskPopRegsInt |= RBM_PC;
// Record the fact that we use a pop to the PC to perform the return
genUsedPopToReturn = true;
-
}
else
{
@@ -6323,7 +6407,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
#elif defined(_TARGET_ARM64_)
-void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog)
+void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
@@ -6334,15 +6418,16 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
rsRestoreRegs |= RBM_FPBASE;
}
- rsRestoreRegs |= RBM_LR; // We must save/restore the return address (in the LR register)
+ rsRestoreRegs |= RBM_LR; // We must save/restore the return address (in the LR register)
regMaskTP regsToRestoreMask = rsRestoreRegs;
int totalFrameSize = genTotalFrameSize();
- int calleeSaveSPOffset; // This will be the starting place for restoring the callee-saved registers, in decreasing order.
- int frameType = 0; // An indicator of what type of frame we are popping.
- int calleeSaveSPDelta = 0;
+ int calleeSaveSPOffset; // This will be the starting place for restoring the callee-saved registers, in decreasing
+ // order.
+ int frameType = 0; // An indicator of what type of frame we are popping.
+ int calleeSaveSPDelta = 0;
int calleeSaveSPDeltaUnaligned = 0;
if (isFramePointerUsed())
@@ -6360,7 +6445,8 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
- // Compute callee save SP offset which is at the top of local frame while the FP/LR is saved at the bottom of stack.
+ // Compute callee save SP offset which is at the top of local frame while the FP/LR is saved at the bottom
+ // of stack.
calleeSaveSPOffset = compiler->compLclFrameSize + 2 * REGSIZE_BYTES;
}
else if (totalFrameSize <= 512)
@@ -6370,20 +6456,23 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
{
// Restore sp from fp
// sub sp, fp, #outsz
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, compiler->lvaOutgoingArgSpaceSize);
+ getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE,
+ compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSetFrameReg(REG_FPBASE, compiler->lvaOutgoingArgSpaceSize);
}
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
- // Compute callee save SP offset which is at the top of local frame while the FP/LR is saved at the bottom of stack.
+ // Compute callee save SP offset which is at the top of local frame while the FP/LR is saved at the bottom
+ // of stack.
calleeSaveSPOffset = compiler->compLclFrameSize + 2 * REGSIZE_BYTES;
}
else
{
frameType = 3;
- calleeSaveSPDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize - 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll restore later.
+ calleeSaveSPDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize -
+ 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll restore later.
assert(calleeSaveSPDeltaUnaligned >= 0);
assert((calleeSaveSPDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSPDelta = AlignUp((UINT)calleeSaveSPDeltaUnaligned, STACK_ALIGN);
@@ -6399,13 +6488,14 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
- int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN);
- int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
+ int spAdjustment2 = (int)roundUp((size_t)spAdjustment2Unaligned, STACK_ALIGN);
+ int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == REGSIZE_BYTES));
if (compiler->compLocallocUsed)
{
- // Restore sp from fp. No need to update sp after this since we've set up fp before adjusting sp in prolog.
+ // Restore sp from fp. No need to update sp after this since we've set up fp before adjusting sp in
+ // prolog.
// sub sp, fp, #alignmentAdjustment2
getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, alignmentAdjustment2);
compiler->unwindSetFrameReg(REG_FPBASE, alignmentAdjustment2);
@@ -6432,7 +6522,8 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
{
// Restore sp from fp
// sub sp, fp, #outsz
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, compiler->lvaOutgoingArgSpaceSize);
+ getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE,
+ compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSetFrameReg(REG_FPBASE, compiler->lvaOutgoingArgSpaceSize);
}
@@ -6441,7 +6532,8 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
// add sp,sp,#remainingFrameSz ; might need to load this constant in a scratch register if
// ; it's large
- genEpilogRestoreRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, remainingFrameSz, REG_IP0, nullptr);
+ genEpilogRestoreRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, remainingFrameSz, REG_IP0,
+ nullptr);
}
// Unlike frameType=1 or frameType=2 that restore SP at the end,
@@ -6465,7 +6557,8 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
// Generate:
// ldp fp,lr,[sp],#framesz
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize, INS_OPTS_POST_INDEX);
+ getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize,
+ INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
}
else if (frameType == 2)
@@ -6474,7 +6567,8 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
// ldr fp,lr,[sp,#outsz]
// add sp,sp,#framesz
- getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize);
+ getEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
+ compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
@@ -6492,33 +6586,33 @@ void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilo
#elif defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
-void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
+void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
unsigned popCount = 0;
- if (regSet.rsRegsModified(RBM_EBX))
+ if (regSet.rsRegsModified(RBM_EBX))
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_FPBASE))
+ if (regSet.rsRegsModified(RBM_FPBASE))
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
assert(!doubleAlignOrFramePointerUsed());
-
+
popCount++;
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
#ifndef UNIX_AMD64_ABI
// For System V AMD64 calling convention ESI and EDI are volatile registers.
- if (regSet.rsRegsModified(RBM_ESI))
+ if (regSet.rsRegsModified(RBM_ESI))
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_EDI))
+ if (regSet.rsRegsModified(RBM_EDI))
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
@@ -6526,39 +6620,39 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
#endif // !defined(UNIX_AMD64_ABI)
#ifdef _TARGET_AMD64_
- if (regSet.rsRegsModified(RBM_R12))
+ if (regSet.rsRegsModified(RBM_R12))
{
popCount++;
inst_RV(INS_pop, REG_R12, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_R13))
+ if (regSet.rsRegsModified(RBM_R13))
{
popCount++;
inst_RV(INS_pop, REG_R13, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_R14))
+ if (regSet.rsRegsModified(RBM_R14))
{
popCount++;
inst_RV(INS_pop, REG_R14, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_R15))
+ if (regSet.rsRegsModified(RBM_R15))
{
popCount++;
inst_RV(INS_pop, REG_R15, TYP_I_IMPL);
}
#endif // _TARGET_AMD64_
- //Amd64/x86 doesn't support push/pop of xmm registers.
- //These will get saved to stack separately after allocating
- //space on stack in prolog sequence. PopCount is essentially
- //tracking the count of integer registers pushed.
+ // Amd64/x86 doesn't support push/pop of xmm registers.
+ // These will get saved to stack separately after allocating
+ // space on stack in prolog sequence. PopCount is essentially
+ // tracking the count of integer registers pushed.
noway_assert(compiler->compCalleeRegsPushed == popCount);
}
#elif defined(_TARGET_X86_)
-void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
+void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
@@ -6569,7 +6663,7 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
each takes one byte of machine code.
*/
- if (regSet.rsRegsModified(RBM_FPBASE))
+ if (regSet.rsRegsModified(RBM_FPBASE))
{
// EBP cannot be directly modified for EBP frame and double-aligned frames
noway_assert(!doubleAlignOrFramePointerUsed());
@@ -6577,17 +6671,17 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
popCount++;
}
- if (regSet.rsRegsModified(RBM_EBX))
+ if (regSet.rsRegsModified(RBM_EBX))
{
popCount++;
inst_RV(INS_pop, REG_EBX, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_ESI))
+ if (regSet.rsRegsModified(RBM_ESI))
{
popCount++;
inst_RV(INS_pop, REG_ESI, TYP_I_IMPL);
}
- if (regSet.rsRegsModified(RBM_EDI))
+ if (regSet.rsRegsModified(RBM_EDI))
{
popCount++;
inst_RV(INS_pop, REG_EDI, TYP_I_IMPL);
@@ -6599,12 +6693,11 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog)
// We need a register with value zero. Zero the initReg, if necessary, and set *pInitRegZeroed if so.
// Return the register to use. On ARM64, we never touch the initReg, and always just return REG_ZR.
-regNumber CodeGen::genGetZeroReg(regNumber initReg,
- bool* pInitRegZeroed)
+regNumber CodeGen::genGetZeroReg(regNumber initReg, bool* pInitRegZeroed)
{
#ifdef _TARGET_ARM64_
return REG_ZR;
-#else // !_TARGET_ARM64_
+#else // !_TARGET_ARM64_
if (*pInitRegZeroed == false)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, initReg);
@@ -6619,19 +6712,18 @@ regNumber CodeGen::genGetZeroReg(regNumber initReg,
* Do we have any untracked pointer locals at all,
* or do we need to initialize memory for locspace?
*
- * untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init code will end initializing memory (not inclusive).
- * untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will start zero initializing memory.
- * initReg - A scratch register (that gets set to zero on some platforms).
+ * untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init code will end
+ * initializing memory (not inclusive).
+ * untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will start zero
+ * initializing memory.
+ * initReg - A scratch register (that gets set to zero on some platforms).
* pInitRegZeroed - Sets a flag that tells the callee whether or not the initReg register got zeroed.
*/
-void CodeGen::genZeroInitFrame(int untrLclHi,
- int untrLclLo,
- regNumber initReg,
- bool * pInitRegZeroed)
+void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
- if (genUseBlockInit)
+ if (genUseBlockInit)
{
assert(untrLclHi > untrLclLo);
#ifdef _TARGET_ARMARCH_
@@ -6667,18 +6759,20 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
*/
regNumber rAddr;
- regNumber rCnt = REG_NA; // Invalid
+ regNumber rCnt = REG_NA; // Invalid
regMaskTP regMask;
- regMaskTP availMask = regSet.rsGetModifiedRegsMask() | RBM_INT_CALLEE_TRASH; // Set of available registers
- availMask &= ~intRegState.rsCalleeRegArgMaskLiveIn; // Remove all of the incoming argument registers as they are currently live
- availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg as we will zero it and maybe use it for a large constant.
+ regMaskTP availMask = regSet.rsGetModifiedRegsMask() | RBM_INT_CALLEE_TRASH; // Set of available registers
+ availMask &= ~intRegState.rsCalleeRegArgMaskLiveIn; // Remove all of the incoming argument registers as they are
+ // currently live
+ availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg as we will zero it and maybe use it for
+ // a large constant.
#if defined(_TARGET_ARM_)
if (compiler->compLocallocUsed)
{
- availMask &= ~RBM_SAVED_LOCALLOC_SP; // Remove the register reserved when we have a localloc frame
+ availMask &= ~RBM_SAVED_LOCALLOC_SP; // Remove the register reserved when we have a localloc frame
}
regNumber rZero1; // We're going to use initReg for rZero1
@@ -6687,26 +6781,27 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
// We pick the next lowest register number for rZero2
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
- rZero2 = genRegNumFromMask(regMask);
+ rZero2 = genRegNumFromMask(regMask);
availMask &= ~regMask;
- assert((genRegMask(rZero2) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rZero2 is not a live incoming argument reg
+ assert((genRegMask(rZero2) & intRegState.rsCalleeRegArgMaskLiveIn) ==
+ 0); // rZero2 is not a live incoming argument reg
// We pick the next lowest register number for rAddr
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
- rAddr = genRegNumFromMask(regMask);
+ rAddr = genRegNumFromMask(regMask);
availMask &= ~regMask;
#else // !define(_TARGET_ARM_)
- regNumber rZero1 = REG_ZR;
- rAddr = initReg;
- *pInitRegZeroed = false;
+ regNumber rZero1 = REG_ZR;
+ rAddr = initReg;
+ *pInitRegZeroed = false;
#endif // !defined(_TARGET_ARM_)
- bool useLoop = false;
- unsigned uCntBytes = untrLclHi - untrLclLo;
+ bool useLoop = false;
+ unsigned uCntBytes = untrLclHi - untrLclLo;
assert((uCntBytes % sizeof(int)) == 0); // The smallest stack slot is always 4 bytes.
unsigned uCntSlots = uCntBytes / REGSIZE_BYTES; // How many register sized stack slots we're going to use.
@@ -6722,14 +6817,15 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
// We pick the next lowest register number for rCnt
noway_assert(availMask != RBM_NONE);
regMask = genFindLowestBit(availMask);
- rCnt = genRegNumFromMask(regMask);
+ rCnt = genRegNumFromMask(regMask);
availMask &= ~regMask;
}
- assert((genRegMask(rAddr) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rAddr is not a live incoming argument reg
+ assert((genRegMask(rAddr) & intRegState.rsCalleeRegArgMaskLiveIn) ==
+ 0); // rAddr is not a live incoming argument reg
#if defined(_TARGET_ARM_)
if (arm_Valid_Imm_For_Add(untrLclLo, INS_FLAGS_DONT_CARE))
-#else // !_TARGET_ARM_
+#else // !_TARGET_ARM_
if (emitter::emitIns_valid_imm_for_add(untrLclLo, EA_PTRSIZE))
#endif // !_TARGET_ARM_
{
@@ -6746,14 +6842,15 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
if (useLoop)
{
noway_assert(uCntSlots >= 2);
- assert((genRegMask(rCnt) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // rCnt is not a live incoming argument reg
+ assert((genRegMask(rCnt) & intRegState.rsCalleeRegArgMaskLiveIn) ==
+ 0); // rCnt is not a live incoming argument reg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rCnt, (ssize_t)uCntSlots / 2);
}
#if defined(_TARGET_ARM_)
rZero1 = genGetZeroReg(initReg, pInitRegZeroed);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, rZero2);
- ssize_t stmImm = (ssize_t) (genRegMask(rZero1) | genRegMask(rZero2));
+ ssize_t stmImm = (ssize_t)(genRegMask(rZero1) | genRegMask(rZero2));
#endif // _TARGET_ARM_
if (!useLoop)
@@ -6762,19 +6859,21 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm);
-#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES, INS_OPTS_POST_INDEX);
+#else // !_TARGET_ARM_
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
+ INS_OPTS_POST_INDEX);
#endif // !_TARGET_ARM_
uCntBytes -= REGSIZE_BYTES * 2;
}
}
- else // useLoop is true
+ else // useLoop is true
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_I(INS_stm, EA_PTRSIZE, rAddr, stmImm); // zero stack slots
getEmitter()->emitIns_R_I(INS_sub, EA_PTRSIZE, rCnt, 1, INS_FLAGS_SET);
-#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES, INS_OPTS_POST_INDEX); // zero stack slots
+#else // !_TARGET_ARM_
+ getEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, rAddr, 2 * REGSIZE_BYTES,
+ INS_OPTS_POST_INDEX); // zero stack slots
getEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, rCnt, rCnt, 1);
#endif // !_TARGET_ARM_
getEmitter()->emitIns_J(INS_bhi, NULL, -3);
@@ -6785,7 +6884,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, rZero1, rAddr, 0);
-#else // _TARGET_ARM_
+#else // _TARGET_ARM_
if ((uCntBytes - REGSIZE_BYTES) == 0)
{
getEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, rAddr, 0);
@@ -6834,7 +6933,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
inst_RV_RV(INS_mov, REG_R13, REG_RDI);
regTracker.rsTrackRegTrash(REG_R13);
}
-#else // !UNIX_AMD64_ABI
+#else // !UNIX_AMD64_ABI
// For register arguments we may have to save ECX
if (intRegState.rsCalleeRegArgMaskLiveIn & RBM_ECX)
{
@@ -6842,20 +6941,16 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
inst_RV_RV(INS_mov, REG_ESI, REG_ECX);
regTracker.rsTrackRegTrash(REG_ESI);
}
-#endif // !UNIX_AMD64_ABI
+#endif // !UNIX_AMD64_ABI
noway_assert((intRegState.rsCalleeRegArgMaskLiveIn & RBM_EAX) == 0);
- getEmitter()->emitIns_R_AR(INS_lea,
- EA_PTRSIZE,
- REG_EDI,
- genFramePointerReg(),
- untrLclLo);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_EDI, genFramePointerReg(), untrLclLo);
regTracker.rsTrackRegTrash(REG_EDI);
inst_RV_IV(INS_mov, REG_ECX, (untrLclHi - untrLclLo) / sizeof(int), EA_4BYTE);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EAX);
- instGen (INS_r_stosd);
+ instGen(INS_r_stosd);
#ifdef UNIX_AMD64_ABI
// Move back the argument registers
@@ -6863,12 +6958,12 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
{
inst_RV_RV(INS_mov, REG_RCX, REG_R12);
}
-
+
if (intRegState.rsCalleeRegArgMaskLiveIn & RBM_RDI)
{
inst_RV_RV(INS_mov, REG_RDI, REG_R13);
}
-#else // !UNIX_AMD64_ABI
+#else // !UNIX_AMD64_ABI
// Move back the argument registers
if (intRegState.rsCalleeRegArgMaskLiveIn & RBM_ECX)
{
@@ -6879,23 +6974,23 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
#else // _TARGET_*
#error Unsupported or unset target architecture
#endif // _TARGET_*
-
}
else if (genInitStkLclCnt > 0)
{
- assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) == 0); // initReg is not a live incoming argument reg
+ assert((genRegMask(initReg) & intRegState.rsCalleeRegArgMaskLiveIn) ==
+ 0); // initReg is not a live incoming argument reg
/* Initialize any lvMustInit vars on the stack */
- LclVarDsc * varDsc;
- unsigned varNum;
+ LclVarDsc* varDsc;
+ unsigned varNum;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (!varDsc->lvMustInit)
+ if (!varDsc->lvMustInit)
+ {
continue;
+ }
// TODO-Review: I'm not sure that we're correctly handling the mustInit case for
// partially-enregistered vars in the case where we don't use a block init.
@@ -6905,17 +7000,15 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
// or when compInitMem is true
// or when in debug code
- noway_assert(varTypeIsGC(varDsc->TypeGet()) ||
- (varDsc->TypeGet() == TYP_STRUCT) ||
- compiler->info.compInitMem ||
- compiler->opts.compDbgCode);
+ noway_assert(varTypeIsGC(varDsc->TypeGet()) || (varDsc->TypeGet() == TYP_STRUCT) ||
+ compiler->info.compInitMem || compiler->opts.compDbgCode);
#ifdef _TARGET_64BIT_
if (!varDsc->lvOnFrame)
{
continue;
}
-#else // !_TARGET_64BIT_
+#else // !_TARGET_64BIT_
if (varDsc->lvRegister)
{
if (varDsc->lvOnFrame)
@@ -6926,25 +7019,26 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
noway_assert(compiler->info.compInitMem);
- getEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, genGetZeroReg(initReg, pInitRegZeroed), varNum, sizeof(int));
+ getEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, genGetZeroReg(initReg, pInitRegZeroed),
+ varNum, sizeof(int));
}
continue;
}
#endif // !_TARGET_64BIT_
- if ((varDsc->TypeGet() == TYP_STRUCT) &&
- !compiler->info.compInitMem &&
+ if ((varDsc->TypeGet() == TYP_STRUCT) && !compiler->info.compInitMem &&
(varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
// We only initialize the GC variables in the TYP_STRUCT
const unsigned slots = (unsigned)compiler->lvaLclSize(varNum) / REGSIZE_BYTES;
- const BYTE * gcPtrs = compiler->lvaGetGcLayout(varNum);
+ const BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
for (unsigned i = 0; i < slots; i++)
{
if (gcPtrs[i] != TYPE_GC_NONE)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
+ genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
}
}
}
@@ -6972,15 +7066,16 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
}
}
- if (!TRACK_GC_TEMP_LIFETIMES)
+ if (!TRACK_GC_TEMP_LIFETIMES)
{
assert(compiler->tmpAllFree());
- for (TempDsc* tempThis = compiler->tmpListBeg();
- tempThis != nullptr;
- tempThis = compiler->tmpListNxt(tempThis))
+ for (TempDsc* tempThis = compiler->tmpListBeg(); tempThis != nullptr;
+ tempThis = compiler->tmpListNxt(tempThis))
{
- if (!varTypeIsGC(tempThis->tdTempType()))
+ if (!varTypeIsGC(tempThis->tdTempType()))
+ {
continue;
+ }
// printf("initialize untracked spillTmp [EBP-%04X]\n", stkOffs);
@@ -6990,7 +7085,6 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
}
}
-
/*-----------------------------------------------------------------------------
*
* Save the generic context argument.
@@ -7000,8 +7094,7 @@ void CodeGen::genZeroInitFrame(int untrLclHi,
* ICodeManager::GetParamTypeArg().
*/
-void CodeGen::genReportGenericContextArg(regNumber initReg,
- bool* pInitRegZeroed)
+void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
@@ -7022,17 +7115,18 @@ void CodeGen::genReportGenericContextArg(regNumber initReg,
unsigned contextArg = reportArg ? compiler->info.compTypeCtxtArg : compiler->info.compThisArg;
noway_assert(contextArg != BAD_VAR_NUM);
- LclVarDsc * varDsc = &compiler->lvaTable[contextArg];
+ LclVarDsc* varDsc = &compiler->lvaTable[contextArg];
// We are still in the prolog and compiler->info.compTypeCtxtArg has not been
// moved to its final home location. So we need to use it from the
// incoming location.
-
+
regNumber reg;
bool isPrespilledForProfiling = false;
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- isPrespilledForProfiling = compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(contextArg, regSet.rsMaskPreSpillRegs(false));
+ isPrespilledForProfiling =
+ compiler->compIsProfilerHookNeeded() && compiler->lvaIsPreSpilled(contextArg, regSet.rsMaskPreSpillRegs(false));
#endif
// Load from the argument register only if it is not prespilled.
@@ -7048,7 +7142,8 @@ void CodeGen::genReportGenericContextArg(regNumber initReg,
// lvStkOffs is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
// On Arm compiler->compArgSize doesn't include r11 and lr sizes and hence we need to add 2*REGSIZE_BYTES
- noway_assert((2*REGSIZE_BYTES <= varDsc->lvStkOffs) && (size_t(varDsc->lvStkOffs) < compiler->compArgSize+2*REGSIZE_BYTES));
+ noway_assert((2 * REGSIZE_BYTES <= varDsc->lvStkOffs) &&
+ (size_t(varDsc->lvStkOffs) < compiler->compArgSize + 2 * REGSIZE_BYTES));
#else
// lvStkOffs is always valid for incoming stack-arguments, even if the argument
// will become enregistered.
@@ -7058,48 +7153,41 @@ void CodeGen::genReportGenericContextArg(regNumber initReg,
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
- reg = initReg;
+ reg = initReg;
*pInitRegZeroed = false;
// mov reg, [compiler->info.compTypeCtxtArg]
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg,
- genFramePointerReg(), varDsc->lvStkOffs);
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), varDsc->lvStkOffs);
regTracker.rsTrackRegTrash(reg);
}
#if CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- reg,
- genFramePointerReg(),
+ getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
-#else // CPU_LOAD_STORE_ARCH
+#else // CPU_LOAD_STORE_ARCH
// mov [ebp-lvaCachedGenericContextArgOffset()], reg
- getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- reg,
- genFramePointerReg(),
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(),
compiler->lvaCachedGenericContextArgOffset());
#endif // !CPU_LOAD_STORE_ARCH
}
-
/*-----------------------------------------------------------------------------
*
* Set the "GS" security cookie in the prolog.
*/
-void CodeGen::genSetGSSecurityCookie(regNumber initReg,
- bool * pInitRegZeroed)
+void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
+ {
return;
+ }
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
- if (compiler->gsGlobalSecurityCookieAddr == NULL)
+ if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
#ifdef _TARGET_AMD64_
// eax = #GlobalSecurityCookieVal64; [frame.GSSecurityCookie] = eax
@@ -7111,7 +7199,7 @@ void CodeGen::genSetGSSecurityCookie(regNumber initReg,
compiler->lvaGSSecurityCookie, 0, initReg);
#endif
}
- else
+ else
{
regNumber reg;
#ifdef _TARGET_XARCH_
@@ -7132,15 +7220,13 @@ void CodeGen::genSetGSSecurityCookie(regNumber initReg,
#else
// mov reg, dword ptr [compiler->gsGlobalSecurityCookieAddr]
// mov dword ptr [frame.GSSecurityCookie], reg
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC,
- reg, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, reg, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
regTracker.rsTrackRegTrash(reg);
#endif
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, compiler->lvaGSSecurityCookie, 0);
}
}
-
#ifdef PROFILING_SUPPORTED
/*-----------------------------------------------------------------------------
@@ -7148,19 +7234,20 @@ void CodeGen::genSetGSSecurityCookie(regNumber initReg,
* Generate the profiling function enter callback.
*/
-void CodeGen::genProfilingEnterCallback(regNumber initReg,
- bool *pInitRegZeroed)
+void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
// Give profiler a chance to back out of hooking this method
if (!compiler->compIsProfilerHookNeeded())
+ {
return;
+ }
#ifndef LEGACY_BACKEND
#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No profiling for System V systems yet.
- unsigned varNum;
- LclVarDsc* varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
// Since the method needs to make a profiler callback, it should have out-going arg space allocated.
noway_assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
@@ -7184,19 +7271,19 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
continue;
}
- var_types storeType = varDsc->lvaArgType();
- regNumber argReg = varDsc->lvArgReg;
+ var_types storeType = varDsc->lvaArgType();
+ regNumber argReg = varDsc->lvArgReg;
getEmitter()->emitIns_S_R(ins_Store(storeType), emitTypeSize(storeType), argReg, varNum, 0);
}
}
// Emit profiler EnterCallback(ProfilerMethHnd, caller's SP)
- // RCX = ProfilerMethHnd
+ // RCX = ProfilerMethHnd
if (compiler->compProfilerMethHndIndirected)
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of a pointer.
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
@@ -7217,26 +7304,26 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
// 1) Here we can query caller's SP offset since prolog will be generated after final frame layout.
// 2) caller's SP relative offset to FramePointer will be negative. We need to add absolute value
// of that offset to FramePointer to obtain caller's SP value.
- assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
+ assert(compiler->lvaOutgoingArgSpaceVar != BAD_VAR_NUM);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR (INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
// Can't have a call until we have enough padding for rejit
genPrologPadForReJit();
- // This will emit either
- // "call ip-relative 32-bit offset" or
+ // This will emit either
+ // "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
- genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
+ genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
// TODO-AMD64-CQ: Rather than reloading, see if this could be optimized by combining with prolog
// generation logic that moves args around as required by first BB entry point conditions
// computed by LSRA. Code pointers for investigating this further: genFnPrologCalleeRegArgs()
// and genEnregisterIncomingStackArgs().
- //
+ //
// Now reload arg registers from home locations.
// Vararg methods:
- // - we need to reload only known (i.e. fixed) reg args.
+ // - we need to reload only known (i.e. fixed) reg args.
// - if floating point type, also reload it into corresponding integer reg
for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
@@ -7247,15 +7334,15 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
continue;
}
- var_types loadType = varDsc->lvaArgType();
- regNumber argReg = varDsc->lvArgReg;
+ var_types loadType = varDsc->lvaArgType();
+ regNumber argReg = varDsc->lvArgReg;
getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
#if FEATURE_VARARG
if (compiler->info.compIsVarArgs && varTypeIsFloating(loadType))
{
- regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
- instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
+ regNumber intArgReg = compiler->getCallArgIntRegister(argReg);
+ instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
inst_RV_RV(ins, argReg, intArgReg, loadType);
}
#endif // FEATURE_VARARG
@@ -7271,9 +7358,9 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
NYI("RyuJIT: Emit Profiler Enter callback");
#endif
-#else //LEGACY_BACKEND
+#else // LEGACY_BACKEND
- unsigned saveStackLvl2 = genStackLevel;
+ unsigned saveStackLvl2 = genStackLevel;
#if defined(_TARGET_X86_)
// Important note: when you change enter probe layout, you must also update SKIP_ENTER_PROF_CALLBACK()
@@ -7295,17 +7382,17 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
regNumber argReg = regSet.rsGrabReg(RBM_PROFILER_ENTER_ARG);
noway_assert(argReg == REG_PROFILER_ENTER_ARG);
regSet.rsLockReg(RBM_PROFILER_ENTER_ARG);
-
+
if (compiler->compProfilerMethHndIndirected)
{
getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, argReg, (ssize_t)compiler->compProfilerMethHnd);
regTracker.rsTrackRegTrash(argReg);
}
else
- {
+ {
instGen_Set_Reg_To_Imm(EA_4BYTE, argReg, (ssize_t)compiler->compProfilerMethHnd);
}
-#else // _TARGET_*
+#else // _TARGET_*
NYI("Pushing the profilerHandle & caller's sp for the profiler callout and locking registers");
#endif // _TARGET_*
@@ -7314,12 +7401,12 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
//
genPrologPadForReJit();
- // This will emit either
- // "call ip-relative 32-bit offset" or
+ // This will emit either
+ // "call ip-relative 32-bit offset" or
// "mov rax, helper addr; call rax"
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER,
- 0, // argSize. Again, we have to lie about it
- EA_UNKNOWN); // retSize
+ 0, // argSize. Again, we have to lie about it
+ EA_UNKNOWN); // retSize
#if defined(_TARGET_X86_)
//
@@ -7337,28 +7424,29 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg,
{
*pInitRegZeroed = false;
}
-#else // _TARGET_*
+#else // _TARGET_*
NYI("Pushing the profilerHandle & caller's sp for the profiler callout and locking registers");
#endif // _TARGET_*
/* Restore the stack level */
genStackLevel = saveStackLvl2;
-#endif //LEGACY_BACKEND
+#endif // LEGACY_BACKEND
}
-
/*****************************************************************************
*
* Generates Leave profiler hook.
* Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
*/
-void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORINFO_HELP_PROF_FCN_LEAVE*/)
-{
+void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORINFO_HELP_PROF_FCN_LEAVE*/)
+{
// Only hook if profiler says it's okay.
if (!compiler->compIsProfilerHookNeeded())
+ {
return;
+ }
compiler->info.compProfilerCallback = true;
@@ -7382,7 +7470,7 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
// At this point return value is computed and stored in RAX or XMM0.
// On Amd64, Leave callback preserves the return register. We keep
- // RAX alive by not reporting as trashed by helper call. Also note
+ // RAX alive by not reporting as trashed by helper call. Also note
// that GC cannot kick-in while executing inside profiler callback,
// which is a requirement of profiler as well since it needs to examine
// return value which could be an obj ref.
@@ -7392,7 +7480,7 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
{
// Profiler hooks enabled during Ngen time.
// Profiler handle needs to be accessed through an indirection of an address.
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
else
{
@@ -7406,7 +7494,7 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
{
instGen_Set_Reg_To_Imm(EA_8BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
- }
+ }
// RDX = caller's SP
// TODO-AMD64-Cleanup: Once we start doing codegen after final frame layout, retain the "if" portion
@@ -7416,14 +7504,14 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
// Caller's SP relative offset to FramePointer will be negative. We need to add absolute
// value of that offset to FramePointer to obtain caller's SP value.
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
- getEmitter()->emitIns_R_AR (INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_ARG_1, genFramePointerReg(), -callerSPOffset);
}
else
{
// If we are here means that it is a tentative frame layout during which we
- // cannot use caller's SP offset since it is an estimate. For now we require the
+ // cannot use caller's SP offset since it is an estimate. For now we require the
// method to have at least a single arg so that we can use it to obtain caller's
- // SP.
+ // SP.
LclVarDsc* varDsc = compiler->lvaTable;
NYI_IF((varDsc == nullptr) || !varDsc->lvIsParam, "Profiler ELT callback for a method without any params");
@@ -7431,13 +7519,13 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_ARG_1, 0, 0);
}
- // We can use any callee trash register (other than RAX, RCX, RDX) for call target.
- // We use R8 here. This will emit either
- // "call ip-relative 32-bit offset" or
+ // We can use any callee trash register (other than RAX, RCX, RDX) for call target.
+ // We use R8 here. This will emit either
+ // "call ip-relative 32-bit offset" or
// "mov r8, helper addr; call r8"
genEmitHelperCall(helper, 0, EA_UNKNOWN, REG_ARG_2);
-#else //!_TARGET_AMD64_
+#else //!_TARGET_AMD64_
NYI("RyuJIT: Emit Profiler Leave callback");
#endif // _TARGET_*
@@ -7459,8 +7547,8 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
genSinglePush();
genEmitHelperCall(CORINFO_HELP_PROF_FCN_LEAVE,
- sizeof(int) * 1, // argSize
- EA_UNKNOWN); // retSize
+ sizeof(int) * 1, // argSize
+ EA_UNKNOWN); // retSize
//
// Adjust the number of stack slots used by this managed method if necessary.
@@ -7474,9 +7562,10 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
// Push the profilerHandle
//
- // We could optimize register usage based on return value is int/long/void. But to keep it simple we will lock RBM_PROFILER_RET_USED always.
+ // We could optimize register usage based on return value is int/long/void. But to keep it simple we will lock
+ // RBM_PROFILER_RET_USED always.
regNumber scratchReg = regSet.rsGrabReg(RBM_PROFILER_RET_SCRATCH);
- noway_assert(scratchReg == REG_PROFILER_RET_SCRATCH);
+ noway_assert(scratchReg == REG_PROFILER_RET_SCRATCH);
regSet.rsLockReg(RBM_PROFILER_RET_USED);
// Contract between JIT and Profiler Leave callout on arm:
@@ -7484,54 +7573,56 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
// Return size > 4 and <= 8: <REG_PROFILER_RET_SCRATCH,r1> will contain return value.
// Floating point or double or HFA return values will be in s0-s15 in case of non-vararg methods.
// It is assumed that profiler Leave callback doesn't trash registers r1,REG_PROFILER_RET_SCRATCH and s0-s15.
- //
- // In the following cases r0 doesn't contain a return value and hence need not be preserved before emitting Leave callback.
- bool r0Trashed;
+ //
+ // In the following cases r0 doesn't contain a return value and hence need not be preserved before emitting Leave
+ // callback.
+ bool r0Trashed;
emitAttr attr = EA_UNKNOWN;
if (compiler->info.compRetType == TYP_VOID ||
- (!compiler->info.compIsVarArgs && (varTypeIsFloating(compiler->info.compRetType) || compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))))
+ (!compiler->info.compIsVarArgs && (varTypeIsFloating(compiler->info.compRetType) ||
+ compiler->IsHfa(compiler->info.compMethodInfo->args.retTypeClass))))
{
r0Trashed = false;
}
else
{
// Has a return value and r0 is in use. For emitting Leave profiler callout we would need r0 for passing
- // profiler handle. Therefore, r0 is moved to REG_PROFILER_RETURN_SCRATCH as per contract.
+ // profiler handle. Therefore, r0 is moved to REG_PROFILER_RETURN_SCRATCH as per contract.
if (RBM_ARG_0 & gcInfo.gcRegGCrefSetCur)
{
attr = EA_GCREF;
gcInfo.gcMarkRegSetGCref(RBM_PROFILER_RET_SCRATCH);
- }
+ }
else if (RBM_ARG_0 & gcInfo.gcRegByrefSetCur)
{
attr = EA_BYREF;
gcInfo.gcMarkRegSetByref(RBM_PROFILER_RET_SCRATCH);
}
- else
+ else
{
attr = EA_4BYTE;
}
-
+
getEmitter()->emitIns_R_R(INS_mov, attr, REG_PROFILER_RET_SCRATCH, REG_ARG_0);
regTracker.rsTrackRegTrash(REG_PROFILER_RET_SCRATCH);
gcInfo.gcMarkRegSetNpt(RBM_ARG_0);
r0Trashed = true;
}
-
+
if (compiler->compProfilerMethHndIndirected)
{
getEmitter()->emitIns_R_AI(INS_ldr, EA_PTR_DSP_RELOC, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
regTracker.rsTrackRegTrash(REG_ARG_0);
}
else
- {
+ {
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_ARG_0, (ssize_t)compiler->compProfilerMethHnd);
}
genEmitHelperCall(CORINFO_HELP_PROF_FCN_LEAVE,
- 0, // argSize
- EA_UNKNOWN); // retSize
+ 0, // argSize
+ EA_UNKNOWN); // retSize
// Restore state that existed before profiler callback
if (r0Trashed)
@@ -7542,7 +7633,7 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
}
regSet.rsUnlockReg(RBM_PROFILER_RET_USED);
-#else // _TARGET_*
+#else // _TARGET_*
NYI("Pushing the profilerHandle & caller's sp for the profiler callout and locking them");
#endif // _TARGET_*
@@ -7554,14 +7645,13 @@ void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORIN
#endif // PROFILING_SUPPORTED
-
/*****************************************************************************
Esp frames :
----------
-
+
These instructions are just a reordering of the instructions used today.
-
+
push ebp
push esi
push edi
@@ -7574,17 +7664,17 @@ pop edi
pop esi
pop ebp
ret
-
+
Ebp frames :
----------
-
+
The epilog does "add esp, LOCALS_SIZE" instead of "mov ebp, esp".
Everything else is similar, though in a different order.
-
+
The security object will no longer be at a fixed offset. However, the
offset can still be determined by looking up the GC-info and determining
how many callee-saved registers are pushed.
-
+
push ebp
mov ebp, esp
push esi
@@ -7599,15 +7689,15 @@ pop esi
(mov esp, ebp if there are no callee-saved registers)
pop ebp
ret
-
+
Double-aligned frame :
--------------------
-
+
LOCALS_SIZE_ADJUSTED needs to include an unused DWORD if an odd number
-of callee-saved registers are pushed on the stack so that the locals
-themselves are qword-aligned. The instructions are the same as today,
+of callee-saved registers are pushed on the stack so that the locals
+themselves are qword-aligned. The instructions are the same as today,
just in a different order.
-
+
push ebp
mov ebp, esp
and esp, 0xFFFFFFFC
@@ -7624,14 +7714,14 @@ pop ebp
mov esp, ebp
pop ebp
ret
-
+
localloc (with ebp) frames :
--------------------------
-
-The instructions are the same as today, just in a different order.
-Also, today the epilog does "lea esp, [ebp-LOCALS_SIZE-calleeSavedRegsPushedSize]"
+
+The instructions are the same as today, just in a different order.
+Also, today the epilog does "lea esp, [ebp-LOCALS_SIZE-calleeSavedRegsPushedSize]"
which will change to "lea esp, [ebp-calleeSavedRegsPushedSize]".
-
+
push ebp
mov ebp, esp
push esi
@@ -7684,35 +7774,28 @@ void CodeGen::genPrologPadForReJit()
#endif
}
-
/*****************************************************************************
*
* Reserve space for a function prolog.
*/
-void CodeGen::genReserveProlog(BasicBlock* block)
+void CodeGen::genReserveProlog(BasicBlock* block)
{
- assert(block != NULL);
+ assert(block != nullptr);
JITDUMP("Reserving prolog IG for block BB%02u\n", block->bbNum);
/* Nothing is live on entry to the prolog */
- getEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG,
- block,
- VarSetOps::MakeEmpty(compiler),
- 0,
- 0,
- false);
+ getEmitter()->emitCreatePlaceholderIG(IGPT_PROLOG, block, VarSetOps::MakeEmpty(compiler), 0, 0, false);
}
-
/*****************************************************************************
*
* Reserve space for a function epilog.
*/
-void CodeGen::genReserveEpilog(BasicBlock* block)
+void CodeGen::genReserveEpilog(BasicBlock* block)
{
VARSET_TP VARSET_INIT(compiler, gcrefVarsArg, getEmitter()->emitThisGCrefVars);
regMaskTP gcrefRegsArg = gcInfo.gcRegGCrefSetCur;
@@ -7722,7 +7805,7 @@ void CodeGen::genReserveEpilog(BasicBlock* block)
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
- if (genFullPtrRegMap && !jmpEpilog)
+ if (genFullPtrRegMap && !jmpEpilog)
{
if (varTypeIsGC(compiler->info.compRetNativeType))
{
@@ -7732,26 +7815,25 @@ void CodeGen::genReserveEpilog(BasicBlock* block)
switch (compiler->info.compRetNativeType)
{
- case TYP_REF: gcrefRegsArg |= RBM_INTRET; break;
- case TYP_BYREF: byrefRegsArg |= RBM_INTRET; break;
- default: break;
+ case TYP_REF:
+ gcrefRegsArg |= RBM_INTRET;
+ break;
+ case TYP_BYREF:
+ byrefRegsArg |= RBM_INTRET;
+ break;
+ default:
+ break;
}
}
}
JITDUMP("Reserving epilog IG for block BB%02u\n", block->bbNum);
- assert(block != NULL);
- bool last = (block->bbNext == NULL);
- getEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG,
- block,
- gcrefVarsArg,
- gcrefRegsArg,
- byrefRegsArg,
- last);
+ assert(block != nullptr);
+ bool last = (block->bbNext == nullptr);
+ getEmitter()->emitCreatePlaceholderIG(IGPT_EPILOG, block, gcrefVarsArg, gcrefRegsArg, byrefRegsArg, last);
}
-
#if FEATURE_EH_FUNCLETS
/*****************************************************************************
@@ -7759,9 +7841,9 @@ void CodeGen::genReserveEpilog(BasicBlock* block)
* Reserve space for a funclet prolog.
*/
-void CodeGen::genReserveFuncletProlog(BasicBlock* block)
+void CodeGen::genReserveFuncletProlog(BasicBlock* block)
{
- assert(block != NULL);
+ assert(block != nullptr);
/* Currently, no registers are live on entry to the prolog, except maybe
the exception object. There might be some live stack vars, but they
@@ -7780,45 +7862,35 @@ void CodeGen::genReserveFuncletProlog(BasicBlock* block)
JITDUMP("Reserving funclet prolog IG for block BB%02u\n", block->bbNum);
- getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG,
- block,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- false);
+ getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_PROLOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, false);
}
-
/*****************************************************************************
*
* Reserve space for a funclet epilog.
*/
-void CodeGen::genReserveFuncletEpilog(BasicBlock* block)
+void CodeGen::genReserveFuncletEpilog(BasicBlock* block)
{
- assert(block != NULL);
+ assert(block != nullptr);
JITDUMP("Reserving funclet epilog IG for block BB%02u\n", block->bbNum);
- bool last = (block->bbNext == NULL);
- getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG,
- block,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- last);
+ bool last = (block->bbNext == nullptr);
+ getEmitter()->emitCreatePlaceholderIG(IGPT_FUNCLET_EPILOG, block, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, last);
}
#endif // FEATURE_EH_FUNCLETS
-
/*****************************************************************************
* Finalize the frame size and offset assignments.
*
* No changes can be made to the modified register set after this, since that can affect how many
* callee-saved registers get saved.
*/
-void CodeGen::genFinalizeFrame()
+void CodeGen::genFinalizeFrame()
{
JITDUMP("Finalizing stack frame\n");
@@ -7874,9 +7946,9 @@ void CodeGen::genFinalizeFrame()
#ifdef _TARGET_AMD64_
// On x64 we always save exactly RBP, RSI and RDI for EnC.
regMaskTP okRegs = (RBM_CALLEE_TRASH | RBM_FPBASE | RBM_RSI | RBM_RDI);
- regSet.rsSetRegsModified(RBM_RSI|RBM_RDI);
+ regSet.rsSetRegsModified(RBM_RSI | RBM_RDI);
noway_assert((regSet.rsGetModifiedRegsMask() & ~okRegs) == 0);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
// On x86 we save all callee saved regs so the saved reg area size is consistent
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
#endif // !_TARGET_AMD64_
@@ -7885,7 +7957,7 @@ void CodeGen::genFinalizeFrame()
/* If we have any pinvoke calls, we might potentially trash everything */
if (compiler->info.compCallUnmanaged)
{
- noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
+ noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
}
@@ -7908,7 +7980,7 @@ void CodeGen::genFinalizeFrame()
//
maskCalleeRegsPushed |= RBM_FPBASE;
- // This assert check that we are not using REG_FP
+ // This assert check that we are not using REG_FP
// as both the frame pointer and as a codegen register
//
assert(!regSet.rsRegsModified(RBM_FPBASE));
@@ -7923,7 +7995,8 @@ void CodeGen::genFinalizeFrame()
regMaskTP maskPushRegsFloat = maskCalleeRegsPushed & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = maskCalleeRegsPushed & ~maskPushRegsFloat;
- if ((maskPushRegsFloat != RBM_NONE) || (compiler->opts.MinOpts() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD)))
+ if ((maskPushRegsFloat != RBM_NONE) ||
+ (compiler->opts.MinOpts() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD)))
{
// Here we try to keep stack double-aligned before the vpush
if ((genCountBits(regSet.rsMaskPreSpillRegs(true) | maskPushRegsInt) % 2) != 0)
@@ -7935,7 +8008,7 @@ void CodeGen::genFinalizeFrame()
}
if (extraPushedReg < REG_R11)
{
- maskPushRegsInt |= genRegMask(extraPushedReg);
+ maskPushRegsInt |= genRegMask(extraPushedReg);
regSet.rsSetRegsModified(genRegMask(extraPushedReg));
}
}
@@ -7948,18 +8021,18 @@ void CodeGen::genFinalizeFrame()
//
if (maskPushRegsFloat != RBM_NONE)
{
- regMaskTP contiguousMask = genRegMaskFloat(REG_F16, TYP_DOUBLE);
+ regMaskTP contiguousMask = genRegMaskFloat(REG_F16, TYP_DOUBLE);
while (maskPushRegsFloat > contiguousMask)
{
contiguousMask <<= 2;
- contiguousMask |= genRegMaskFloat(REG_F16, TYP_DOUBLE);
+ contiguousMask |= genRegMaskFloat(REG_F16, TYP_DOUBLE);
}
if (maskPushRegsFloat != contiguousMask)
{
- regMaskTP maskExtraRegs = contiguousMask - maskPushRegsFloat;
- maskPushRegsFloat |= maskExtraRegs;
+ regMaskTP maskExtraRegs = contiguousMask - maskPushRegsFloat;
+ maskPushRegsFloat |= maskExtraRegs;
regSet.rsSetRegsModified(maskExtraRegs);
- maskCalleeRegsPushed |= maskExtraRegs;
+ maskCalleeRegsPushed |= maskExtraRegs;
}
}
#endif // _TARGET_ARM_
@@ -7987,7 +8060,7 @@ void CodeGen::genFinalizeFrame()
/* Assign the final offsets to things living on the stack frame */
compiler->lvaAssignFrameOffsets(Compiler::FINAL_FRAME_LAYOUT);
-
+
/* We want to make sure that the prolog size calculated here is accurate
(that is instructions will not shrink because of conservative stack
frame approximations). We do this by filling in the correct size
@@ -7997,8 +8070,10 @@ void CodeGen::genFinalizeFrame()
getEmitter()->emitMaxTmpSize = compiler->tmpSize;
#ifdef DEBUG
- if (compiler->opts.dspCode || compiler->opts.disAsm || compiler->opts.disAsm2 || verbose)
+ if (compiler->opts.dspCode || compiler->opts.disAsm || compiler->opts.disAsm2 || verbose)
+ {
compiler->lvaTableDump();
+ }
#endif
}
@@ -8009,7 +8084,7 @@ void CodeGen::genFinalizeFrame()
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
-void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
+void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
@@ -8023,7 +8098,8 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwi
else
{
getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
- // We don't update prolog scope info (there is no function to handle lea), but that is currently dead code anyway.
+ // We don't update prolog scope info (there is no function to handle lea), but that is currently dead code
+ // anyway.
}
if (reportUnwindData)
@@ -8046,8 +8122,6 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwi
#endif
}
-
-
/*****************************************************************************
*
* Generates code for a function prolog.
@@ -8069,20 +8143,22 @@ void CodeGen::genEstablishFramePointer(int delta, bool reportUnwi
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genFnProlog()
+void CodeGen::genFnProlog()
{
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
compiler->funSetCurrentFunc(0);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genFnProlog()\n");
+ }
#endif
-#ifdef DEBUG
+#ifdef DEBUG
genInterruptibleUsed = true;
#endif
@@ -8100,11 +8176,14 @@ void CodeGen::genFnProlog()
#ifdef DEBUGGING_SUPPORT
// Do this so we can put the prolog instruction group ahead of
// other instruction groups
- genIPmappingAddToFront( (IL_OFFSETX) ICorDebugInfo::PROLOG );
-#endif //DEBUGGING_SUPPORT
+ genIPmappingAddToFront((IL_OFFSETX)ICorDebugInfo::PROLOG);
+#endif // DEBUGGING_SUPPORT
-#ifdef DEBUG
- if (compiler->opts.dspCode) printf("\n__prolog:\n");
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ {
+ printf("\n__prolog:\n");
+ }
#endif
#ifdef DEBUGGING_SUPPORT
@@ -8136,7 +8215,7 @@ void CodeGen::genFnProlog()
// We cannot force 0-initialization of the PSPSym
// as it will overwrite the real value
- if (compiler->lvaPSPSym != BAD_VAR_NUM)
+ if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
LclVarDsc* varDsc = &compiler->lvaTable[compiler->lvaPSPSym];
assert(!varDsc->lvMustInit);
@@ -8150,40 +8229,38 @@ void CodeGen::genFnProlog()
* and untracked pointer variables.
* Also find which registers will need to be zero-initialized.
*
- * 'initRegs': - Generally, enregistered variables should not need to be
+ * 'initRegs': - Generally, enregistered variables should not need to be
* zero-inited. They only need to be zero-inited when they
* have a possibly uninitialized read on some control
* flow path. Apparently some of the IL_STUBs that we
* generate have this property.
*/
- int untrLclLo = +INT_MAX;
- int untrLclHi = -INT_MAX;
+ int untrLclLo = +INT_MAX;
+ int untrLclHi = -INT_MAX;
// 'hasUntrLcl' is true if there are any stack locals which must be init'ed.
// Note that they may be tracked, but simply not allocated to a register.
- bool hasUntrLcl = false;
+ bool hasUntrLcl = false;
- int GCrefLo = +INT_MAX;
- int GCrefHi = -INT_MAX;
- bool hasGCRef = false;
+ int GCrefLo = +INT_MAX;
+ int GCrefHi = -INT_MAX;
+ bool hasGCRef = false;
- regMaskTP initRegs = RBM_NONE; // Registers which must be init'ed.
- regMaskTP initFltRegs = RBM_NONE; // FP registers which must be init'ed.
- regMaskTP initDblRegs = RBM_NONE;
+ regMaskTP initRegs = RBM_NONE; // Registers which must be init'ed.
+ regMaskTP initFltRegs = RBM_NONE; // FP registers which must be init'ed.
+ regMaskTP initDblRegs = RBM_NONE;
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
+ if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
continue;
}
- if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
+ if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt == 0);
continue;
@@ -8198,22 +8275,28 @@ void CodeGen::genFnProlog()
if (compiler->lvaTypeIsGC(varNum) && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame)
{
// For fields of PROMOTION_TYPE_DEPENDENT type of promotion, they should have been
- // taken care of by the parent struct.
- if (!compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ // taken care of by the parent struct.
+ if (!compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
hasGCRef = true;
-
+
if (loOffs < GCrefLo)
+ {
GCrefLo = loOffs;
+ }
if (hiOffs > GCrefHi)
+ {
GCrefHi = hiOffs;
+ }
}
}
/* For lvMustInit vars, gather pertinent info */
- if (!varDsc->lvMustInit)
+ if (!varDsc->lvMustInit)
+ {
continue;
+ }
if (varDsc->lvIsInReg())
{
@@ -8254,24 +8337,28 @@ void CodeGen::genFnProlog()
hasUntrLcl = true;
- if (loOffs < untrLclLo)
+ if (loOffs < untrLclLo)
+ {
untrLclLo = loOffs;
- if (hiOffs > untrLclHi)
+ }
+ if (hiOffs > untrLclHi)
+ {
untrLclHi = hiOffs;
+ }
}
}
/* Don't forget about spill temps that hold pointers */
- if (!TRACK_GC_TEMP_LIFETIMES)
+ if (!TRACK_GC_TEMP_LIFETIMES)
{
assert(compiler->tmpAllFree());
- for (TempDsc* tempThis = compiler->tmpListBeg();
- tempThis != nullptr;
- tempThis = compiler->tmpListNxt(tempThis))
+ for (TempDsc* tempThis = compiler->tmpListBeg(); tempThis != nullptr; tempThis = compiler->tmpListNxt(tempThis))
{
- if (!varTypeIsGC(tempThis->tdTempType()))
+ if (!varTypeIsGC(tempThis->tdTempType()))
+ {
continue;
+ }
signed int loOffs = tempThis->tdTempOffs();
signed int hiOffs = loOffs + TARGET_POINTER_SIZE;
@@ -8290,22 +8377,25 @@ void CodeGen::genFnProlog()
hasUntrLcl = true;
if (loOffs < untrLclLo)
+ {
untrLclLo = loOffs;
+ }
if (hiOffs > untrLclHi)
+ {
untrLclHi = hiOffs;
+ }
}
}
assert((genInitStkLclCnt > 0) == hasUntrLcl);
-
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- if (genInitStkLclCnt > 0)
+ if (genInitStkLclCnt > 0)
{
- printf("Found %u lvMustInit stk vars, frame offsets %d through %d\n",
- genInitStkLclCnt, -untrLclLo, -untrLclHi);
+ printf("Found %u lvMustInit stk vars, frame offsets %d through %d\n", genInitStkLclCnt, -untrLclLo,
+ -untrLclHi);
}
}
#endif
@@ -8321,10 +8411,10 @@ void CodeGen::genFnProlog()
/* Choose the register to use for zero initialization */
- regNumber initReg = REG_SCRATCH; // Unless we find a better register below
- bool initRegZeroed = false;
- regMaskTP excludeMask = intRegState.rsCalleeRegArgMaskLiveIn;
- regMaskTP tempMask;
+ regNumber initReg = REG_SCRATCH; // Unless we find a better register below
+ bool initRegZeroed = false;
+ regMaskTP excludeMask = intRegState.rsCalleeRegArgMaskLiveIn;
+ regMaskTP tempMask;
// We should not use the special PINVOKE registers as the initReg
// since they are trashed by the jithelper call to setup the PINVOKE frame
@@ -8341,7 +8431,7 @@ void CodeGen::genFnProlog()
// We also must exclude the register used by compLvFrameListRoot when it is enregistered
//
- LclVarDsc * varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
if (varDsc->lvRegister)
{
excludeMask |= genRegMask(varDsc->lvRegNum);
@@ -8365,7 +8455,7 @@ void CodeGen::genFnProlog()
// because the loop's backwards branch depends upon the size of EAX encodings
assert(initReg == REG_EAX);
}
- else
+ else
#endif // _TARGET_XARCH_
{
tempMask = initRegs & ~excludeMask & ~regSet.rsMaskResvd;
@@ -8375,18 +8465,18 @@ void CodeGen::genFnProlog()
// We will use one of the registers that we were planning to zero init anyway.
// We pick the lowest register number.
tempMask = genFindLowestBit(tempMask);
- initReg = genRegNumFromMask(tempMask);
+ initReg = genRegNumFromMask(tempMask);
}
// Next we prefer to use one of the unused argument registers.
// If they aren't available we use one of the caller-saved integer registers.
- else
+ else
{
tempMask = regSet.rsGetModifiedRegsMask() & RBM_ALLINT & ~excludeMask & ~regSet.rsMaskResvd;
if (tempMask != RBM_NONE)
{
// We pick the lowest register number
tempMask = genFindLowestBit(tempMask);
- initReg = genRegNumFromMask(tempMask);
+ initReg = genRegNumFromMask(tempMask);
}
}
}
@@ -8396,11 +8486,11 @@ void CodeGen::genFnProlog()
#if defined(_TARGET_AMD64_)
// If we are a varargs call, in order to set up the arguments correctly this
// must be done in a 2 step process. As per the x64 ABI:
- // a) The caller sets up the argument shadow space (just before the return
+ // a) The caller sets up the argument shadow space (just before the return
// address, 4 pointer sized slots).
- // b) The callee is responsible to home the arguments on the shadow space
+ // b) The callee is responsible to home the arguments on the shadow space
// provided by the caller.
- // This way, the varargs iterator will be able to retrieve the
+ // This way, the varargs iterator will be able to retrieve the
// call arguments properly since both the arg regs and the stack allocated
// args will be contiguous.
if (compiler->info.compIsVarArgs)
@@ -8424,9 +8514,9 @@ void CodeGen::genFnProlog()
#endif // _TARGET_ARM_
#ifdef _TARGET_XARCH_
- if (doubleAlignOrFramePointerUsed())
+ if (doubleAlignOrFramePointerUsed())
{
- inst_RV (INS_push, REG_FPBASE, TYP_REF);
+ inst_RV(INS_push, REG_FPBASE, TYP_REF);
compiler->unwindPush(REG_FPBASE);
psiAdjustStackLevel(REGSIZE_BYTES);
@@ -8435,10 +8525,10 @@ void CodeGen::genFnProlog()
#endif // !_TARGET_AMD64_
#if DOUBLE_ALIGN
- if (compiler->genDoubleAlign())
+ if (compiler->genDoubleAlign())
{
noway_assert(isFramePointerUsed() == false);
- noway_assert(!regSet.rsRegsModified(RBM_FPBASE)); /* Trashing EBP is out. */
+ noway_assert(!regSet.rsRegsModified(RBM_FPBASE)); /* Trashing EBP is out. */
inst_RV_IV(INS_AND, REG_SPBASE, -8, EA_PTRSIZE);
}
@@ -8450,14 +8540,14 @@ void CodeGen::genFnProlog()
// Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame.
genAllocLclFrame(compiler->compLclFrameSize, initReg, &initRegZeroed, intRegState.rsCalleeRegArgMaskLiveIn);
genPushCalleeSavedRegisters(initReg, &initRegZeroed);
-#else // !_TARGET_ARM64_
+#else // !_TARGET_ARM64_
genPushCalleeSavedRegisters();
#endif // !_TARGET_ARM64_
#ifdef _TARGET_ARM_
- bool needToEstablishFP = false;
- int afterLclFrameSPtoFPdelta = 0;
- if (doubleAlignOrFramePointerUsed())
+ bool needToEstablishFP = false;
+ int afterLclFrameSPtoFPdelta = 0;
+ if (doubleAlignOrFramePointerUsed())
{
needToEstablishFP = true;
@@ -8465,7 +8555,7 @@ void CodeGen::genFnProlog()
// This makes the prolog and epilog match, giving us smaller unwind data. If the frame size is
// too big, we go ahead and do it here.
- int SPtoFPdelta = (compiler->compCalleeRegsPushed - 2) * REGSIZE_BYTES;
+ int SPtoFPdelta = (compiler->compCalleeRegsPushed - 2) * REGSIZE_BYTES;
afterLclFrameSPtoFPdelta = SPtoFPdelta + compiler->compLclFrameSize;
if (!arm_Valid_Imm_For_Add_SP(afterLclFrameSPtoFPdelta))
{
@@ -8487,7 +8577,8 @@ void CodeGen::genFnProlog()
regMaskTP maskStackAlloc = RBM_NONE;
#ifdef _TARGET_ARM_
- maskStackAlloc = genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED);
+ maskStackAlloc =
+ genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED);
#endif // _TARGET_ARM_
if (maskStackAlloc == RBM_NONE)
@@ -8496,7 +8587,7 @@ void CodeGen::genFnProlog()
}
#endif // !_TARGET_ARM64_
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef _TARGET_ARM_
if (compiler->compLocallocUsed)
@@ -8512,23 +8603,23 @@ void CodeGen::genFnProlog()
genPreserveCalleeSavedFltRegs(compiler->compLclFrameSize);
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
-#ifdef _TARGET_AMD64_
+#ifdef _TARGET_AMD64_
// Establish the AMD64 frame pointer after the OS-reported prolog.
- if (doubleAlignOrFramePointerUsed())
+ if (doubleAlignOrFramePointerUsed())
{
bool reportUnwindData = compiler->compLocallocUsed || compiler->opts.compDbgEnC;
genEstablishFramePointer(compiler->codeGen->genSPtoFPdelta(), reportUnwindData);
}
#endif //_TARGET_AMD64_
- //-------------------------------------------------------------------------
- //
- // This is the end of the OS-reported prolog for purposes of unwinding
- //
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
+//
+// This is the end of the OS-reported prolog for purposes of unwinding
+//
+//-------------------------------------------------------------------------
#ifdef _TARGET_ARM_
- if (needToEstablishFP)
+ if (needToEstablishFP)
{
genEstablishFramePointer(afterLclFrameSPtoFPdelta, /*reportUnwindData*/ false);
needToEstablishFP = false; // nobody uses this later, but set it anyway, just to be explicit
@@ -8538,15 +8629,12 @@ void CodeGen::genFnProlog()
if (compiler->info.compPublishStubParam)
{
#if CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_SECRET_STUB_PARAM,
- genFramePointerReg(),
+ getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM, genFramePointerReg(),
compiler->lvaTable[compiler->lvaStubArgumentVar].lvStkOffs);
#else
// mov [lvaStubArgumentVar], EAX
- getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM,
- genFramePointerReg(), compiler->lvaTable[compiler->lvaStubArgumentVar].lvStkOffs);
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SECRET_STUB_PARAM, genFramePointerReg(),
+ compiler->lvaTable[compiler->lvaStubArgumentVar].lvStkOffs);
#endif
assert(intRegState.rsCalleeRegArgMaskLiveIn & RBM_SECRET_STUB_PARAM);
@@ -8572,7 +8660,7 @@ void CodeGen::genFnProlog()
//
// Zero out the frame as needed
//
-
+
genZeroInitFrame(untrLclHi, untrLclLo, initReg, &initRegZeroed);
#if FEATURE_EH_FUNCLETS
@@ -8585,11 +8673,11 @@ void CodeGen::genFnProlog()
if (compiler->ehNeedsShadowSPslots() && !compiler->info.compInitMem)
{
/*
- // size/speed option?
+ // size/speed option?
getEmitter()->emitIns_I_ARR(INS_mov, EA_PTRSIZE, 0,
REG_EBP, REG_NA, -compiler->lvaShadowSPfirstOffs);
*/
-
+
// The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
unsigned filterEndOffsetSlotOffs = compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*));
@@ -8602,7 +8690,8 @@ void CodeGen::genFnProlog()
initRegZeroed = true;
}
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar, firstSlotOffs);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, initReg, compiler->lvaShadowSPslotsVar,
+ firstSlotOffs);
}
#endif // !FEATURE_EH_FUNCLETS
@@ -8621,8 +8710,9 @@ void CodeGen::genFnProlog()
// The local variable representing the security object must be on the stack frame
// and must be 0 initialized.
noway_assert((compiler->lvaSecurityObject == BAD_VAR_NUM) ||
- (compiler->lvaTable[compiler->lvaSecurityObject].lvOnFrame && compiler->lvaTable[compiler->lvaSecurityObject].lvMustInit));
-
+ (compiler->lvaTable[compiler->lvaSecurityObject].lvOnFrame &&
+ compiler->lvaTable[compiler->lvaSecurityObject].lvMustInit));
+
// Initialize any "hidden" slots/locals
if (compiler->compLocallocUsed)
@@ -8646,7 +8736,7 @@ void CodeGen::genFnProlog()
#endif // PROFILING_SUPPORTED
- if (!genInterruptible)
+ if (!genInterruptible)
{
/*-------------------------------------------------------------------------
*
@@ -8661,22 +8751,22 @@ void CodeGen::genFnProlog()
genPrologPadForReJit();
getEmitter()->emitMarkPrologEnd();
}
-
+
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) && defined(FEATURE_SIMD)
// The unused bits of Vector3 arguments must be cleared
// since native compiler doesn't initize the upper bits to zeros.
//
// TODO-Cleanup: This logic can be implemented in
- // genFnPrologCalleeRegArgs() for argument registers and
+ // genFnPrologCalleeRegArgs() for argument registers and
// genEnregisterIncomingStackArgs() for stack arguments.
genClearStackVec3ArgUpperBits();
-#endif //FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING && FEATURE_SIMD
/*-----------------------------------------------------------------------------
* Take care of register arguments first
*/
- RegState *regState;
+ RegState* regState;
#ifndef LEGACY_BACKEND
// Update the arg initial register locations.
@@ -8691,8 +8781,8 @@ void CodeGen::genFnProlog()
// we will use xtraReg (initReg) and set the xtraRegClobbered flag,
// if we don't need to use the xtraReg then this flag will stay false
//
- regNumber xtraReg;
- bool xtraRegClobbered = false;
+ regNumber xtraReg;
+ bool xtraRegClobbered = false;
if (genRegMask(initReg) & RBM_ARG_REGS)
{
@@ -8700,7 +8790,7 @@ void CodeGen::genFnProlog()
}
else
{
- xtraReg = REG_SCRATCH;
+ xtraReg = REG_SCRATCH;
initRegZeroed = false;
}
@@ -8713,7 +8803,7 @@ void CodeGen::genFnProlog()
}
}
- // Home the incoming arguments
+ // Home the incoming arguments
genEnregisterIncomingStackArgs();
/* Initialize any must-init registers variables now */
@@ -8722,7 +8812,7 @@ void CodeGen::genFnProlog()
{
regMaskTP regMask = 0x1;
- for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg), regMask<<=1)
+ for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg), regMask <<= 1)
{
if (regMask & initRegs)
{
@@ -8749,7 +8839,7 @@ void CodeGen::genFnProlog()
// If initReg is not in initRegs then we will use REG_SCRATCH
if ((genRegMask(initReg) & initRegs) == 0)
{
- initReg = REG_SCRATCH;
+ initReg = REG_SCRATCH;
initRegZeroed = false;
}
@@ -8763,7 +8853,7 @@ void CodeGen::genFnProlog()
}
#endif // _TARGET_ARM_
- genZeroInitFltRegs(initFltRegs, initDblRegs, initReg);
+ genZeroInitFltRegs(initFltRegs, initDblRegs, initReg);
}
#endif // !FEATURE_STACK_FP_X87
@@ -8775,14 +8865,14 @@ void CodeGen::genFnProlog()
genCodeForPrologStackFP();
#endif
-//-----------------------------------------------------------------------------
+ //-----------------------------------------------------------------------------
//
// Increase the prolog size here only if fully interruptible.
// And again make sure it's big enough for ReJIT
//
- if (genInterruptible)
+ if (genInterruptible)
{
genPrologPadForReJit();
getEmitter()->emitMarkPrologEnd();
@@ -8790,10 +8880,12 @@ void CodeGen::genFnProlog()
#ifdef DEBUGGING_SUPPORT
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
+ {
psiEndProlog();
+ }
#endif
- if (hasGCRef)
+ if (hasGCRef)
{
getEmitter()->emitSetFrameRangeGCRs(GCrefLo, GCrefHi);
}
@@ -8803,9 +8895,11 @@ void CodeGen::genFnProlog()
noway_assert(GCrefHi == -INT_MAX);
}
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ {
printf("\n");
+ }
#endif
#ifdef _TARGET_X86_
@@ -8823,26 +8917,25 @@ void CodeGen::genFnProlog()
noway_assert(compiler->info.compArgsCount > 0);
// MOV EAX, <VARARGS HANDLE>
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount-1, 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, compiler->info.compArgsCount - 1, 0);
regTracker.rsTrackRegTrash(REG_EAX);
// MOV EAX, [EAX]
getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, REG_EAX, 0);
- //EDX might actually be holding something here. So make sure to only use EAX for this code
- //sequence.
+ // EDX might actually be holding something here. So make sure to only use EAX for this code
+ // sequence.
- LclVarDsc * lastArg = &compiler->lvaTable[compiler->info.compArgsCount-1];
+ LclVarDsc* lastArg = &compiler->lvaTable[compiler->info.compArgsCount - 1];
noway_assert(!lastArg->lvRegister);
signed offset = lastArg->lvStkOffs;
assert(offset != BAD_STK_OFFS);
noway_assert(lastArg->lvFramePointerBased);
// LEA EAX, &<VARARGS HANDLE> + EAX
- getEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX,
- genFramePointerReg(), REG_EAX, offset);
+ getEmitter()->emitIns_R_ARR(INS_lea, EA_PTRSIZE, REG_EAX, genFramePointerReg(), REG_EAX, offset);
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
if (varDsc->lvRegNum != REG_EAX)
{
@@ -8854,7 +8947,6 @@ void CodeGen::genFnProlog()
{
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_EAX, argsStartVar, 0);
}
-
}
#endif // _TARGET_X86_
@@ -8887,10 +8979,10 @@ void CodeGen::genFnProlog()
#if defined(_TARGET_ARM_)
-void CodeGen::genFnEpilog(BasicBlock* block)
+void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In genFnEpilog()\n");
#endif
@@ -8900,10 +8992,11 @@ void CodeGen::genFnEpilog(BasicBlock* block)
gcInfo.gcRegGCrefSetCur = getEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = getEmitter()->emitInitByrefRegs;
-#ifdef DEBUG
- if (compiler->opts.dspCode) printf("\n__epilog:\n");
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ printf("\n__epilog:\n");
- if (verbose)
+ if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
@@ -8924,7 +9017,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
// localloc, the sequence might look something like this:
// movw r3, 0x38e0
// add sp, r3
- // pop {r4,r5,r6,r10,r11,pc}
+ // pop {r4,r5,r6,r10,r11,pc}
// In this case, the "movw" should not be part of the unwind codes, since it will
// be a NOP, and it is a waste to start with a NOP. Note that calling unwindBegEpilog()
// also sets the current location as the beginning offset of the epilog, so every
@@ -8948,7 +9041,9 @@ void CodeGen::genFnEpilog(BasicBlock* block)
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
- if (jmpEpilog || genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) == RBM_NONE)
+ if (jmpEpilog ||
+ genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) ==
+ RBM_NONE)
{
genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted, jmpEpilog);
}
@@ -8978,7 +9073,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
noway_assert(block->bbTreeList);
// We better not have used a pop PC to return otherwise this will be unreachable code
- noway_assert(!genUsedPopToReturn);
+ noway_assert(!genUsedPopToReturn);
/* figure out what jump we have */
@@ -8990,74 +9085,69 @@ void CodeGen::genFnEpilog(BasicBlock* block)
jmpNode = jmpNode->gtStmt.gtStmtExpr;
noway_assert(jmpNode->gtOper == GT_JMP);
- CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->gtVal.gtVal1;
+ CORINFO_METHOD_HANDLE methHnd = (CORINFO_METHOD_HANDLE)jmpNode->gtVal.gtVal1;
- CORINFO_CONST_LOOKUP addrInfo;
- void * addr;
- regNumber indCallReg;
- emitter::EmitCallType callType;
+ CORINFO_CONST_LOOKUP addrInfo;
+ void* addr;
+ regNumber indCallReg;
+ emitter::EmitCallType callType;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
switch (addrInfo.accessType)
{
- case IAT_VALUE:
- if (arm_Valid_Imm_For_BL((ssize_t)addrInfo.addr))
- {
- // Simple direct call
- callType = emitter::EC_FUNC_TOKEN;
- addr = addrInfo.addr;
- indCallReg = REG_NA;
- break;
- }
+ case IAT_VALUE:
+ if (arm_Valid_Imm_For_BL((ssize_t)addrInfo.addr))
+ {
+ // Simple direct call
+ callType = emitter::EC_FUNC_TOKEN;
+ addr = addrInfo.addr;
+ indCallReg = REG_NA;
+ break;
+ }
- // otherwise the target address doesn't fit in an immediate
- // so we have to burn a register...
- __fallthrough;
+ // otherwise the target address doesn't fit in an immediate
+ // so we have to burn a register...
+ __fallthrough;
- case IAT_PVALUE:
- // Load the address into a register, load indirect and call through a register
- // We have to use R12 since we assume the argument registers are in use
- callType = emitter::EC_INDIR_R;
- indCallReg = REG_R12;
- addr = NULL;
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
- if (addrInfo.accessType == IAT_PVALUE)
- {
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- regTracker.rsTrackRegTrash(indCallReg);
- }
- break;
+ case IAT_PVALUE:
+ // Load the address into a register, load indirect and call through a register
+ // We have to use R12 since we assume the argument registers are in use
+ callType = emitter::EC_INDIR_R;
+ indCallReg = REG_R12;
+ addr = NULL;
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
+ if (addrInfo.accessType == IAT_PVALUE)
+ {
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ regTracker.rsTrackRegTrash(indCallReg);
+ }
+ break;
- case IAT_PPVALUE:
- default:
- NO_WAY("Unsupported JMP indirection");
+ case IAT_PPVALUE:
+ default:
+ NO_WAY("Unsupported JMP indirection");
}
/* Simply emit a jump to the methodHnd. This is similar to a call so we can use
* the same descriptor with some minor adjustments.
*/
- getEmitter()->emitIns_Call(callType,
- methHnd,
- INDEBUG_LDISASM_COMMA(nullptr)
- addr,
- 0, // argSize
- EA_UNKNOWN, // retSize
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, // IL offset
- indCallReg, // ireg
- REG_NA, // xreg
- 0, // xmul
- 0, // disp
- true); // isJump
+ getEmitter()->emitIns_Call(callType, methHnd, INDEBUG_LDISASM_COMMA(nullptr) addr,
+ 0, // argSize
+ EA_UNKNOWN, // retSize
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, // IL offset
+ indCallReg, // ireg
+ REG_NA, // xreg
+ 0, // xmul
+ 0, // disp
+ true); // isJump
}
else
{
if (!genUsedPopToReturn)
{
- // If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
+ // If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
// so we need a "bx lr" instruction to return from the function.
inst_RV(INS_bx, REG_LR, TYP_I_IMPL);
compiler->unwindBranch16();
@@ -9069,7 +9159,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#elif defined(_TARGET_ARM64_)
-void CodeGen::genFnEpilog(BasicBlock* block)
+void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
@@ -9082,10 +9172,11 @@ void CodeGen::genFnEpilog(BasicBlock* block)
gcInfo.gcRegGCrefSetCur = getEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = getEmitter()->emitInitByrefRegs;
-#ifdef DEBUG
- if (compiler->opts.dspCode) printf("\n__epilog:\n");
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ printf("\n__epilog:\n");
- if (verbose)
+ if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
@@ -9125,7 +9216,8 @@ void CodeGen::genFnEpilog(BasicBlock* block)
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpStmt->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
- noway_assert((jmpNode->gtOper == GT_JMP) || ((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
+ noway_assert((jmpNode->gtOper == GT_JMP) ||
+ ((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
@@ -9146,18 +9238,13 @@ void CodeGen::genFnEpilog(BasicBlock* block)
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
- getEmitter()->emitIns_Call(callType,
- methHnd,
- INDEBUG_LDISASM_COMMA(nullptr)
- addrInfo.addr,
- 0, // argSize
- EA_UNKNOWN, // retSize
- EA_UNKNOWN, // secondRetSize
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
+ getEmitter()->emitIns_Call(callType, methHnd, INDEBUG_LDISASM_COMMA(nullptr) addrInfo.addr,
+ 0, // argSize
+ EA_UNKNOWN, // retSize
+ EA_UNKNOWN, // secondRetSize
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
BAD_IL_OFFSET, REG_NA, REG_NA, 0, 0, /* iloffset, ireg, xreg, xmul, disp */
- true); /* isJump */
+ true); /* isJump */
}
#if FEATURE_FASTTAILCALL
else
@@ -9168,7 +9255,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
// Do we need a special encoding for stack walker like rex.w prefix for x64?
getEmitter()->emitIns_R(INS_br, emitTypeSize(TYP_I_IMPL), REG_IP0);
}
-#endif //FEATURE_FASTTAILCALL
+#endif // FEATURE_FASTTAILCALL
}
else
{
@@ -9181,11 +9268,13 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#elif defined(_TARGET_XARCH_)
-void CodeGen::genFnEpilog(BasicBlock* block)
+void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genFnEpilog()\n");
+ }
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
@@ -9194,19 +9283,21 @@ void CodeGen::genFnEpilog(BasicBlock* block)
gcInfo.gcRegGCrefSetCur = getEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = getEmitter()->emitInitByrefRegs;
- noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
+ noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts
-#ifdef DEBUG
+#ifdef DEBUG
genInterruptibleUsed = true;
#endif
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ {
printf("\n__epilog:\n");
+ }
- if (verbose)
+ if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
@@ -9222,26 +9313,26 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#if !FEATURE_STACK_FP_X87
// Restore float registers that were saved to stack before SP is modified.
- genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
+ genRestoreCalleeSavedFltRegs(compiler->compLclFrameSize);
#endif // !FEATURE_STACK_FP_X87
/* Compute the size in bytes we've pushed/popped */
- if (!doubleAlignOrFramePointerUsed())
+ if (!doubleAlignOrFramePointerUsed())
{
// We have an ESP frame */
-
+
noway_assert(compiler->compLocallocUsed == false); // Only used with frame-pointer
/* Get rid of our local variables */
- if (compiler->compLclFrameSize)
+ if (compiler->compLclFrameSize)
{
#ifdef _TARGET_X86_
/* Add 'compiler->compLclFrameSize' to ESP */
/* Use pop ECX to increment ESP by 4, unless compiler->compJmpOpUsed is true */
- if ( (compiler->compLclFrameSize == sizeof(void*)) && !compiler->compJmpOpUsed )
+ if ((compiler->compLclFrameSize == sizeof(void*)) && !compiler->compJmpOpUsed)
{
inst_RV(INS_pop, REG_ECX, TYP_I_IMPL);
regTracker.rsTrackRegTrash(REG_ECX);
@@ -9260,7 +9351,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
else
{
noway_assert(doubleAlignOrFramePointerUsed());
-
+
/* Tear down the stack frame */
bool needMovEspEbp = false;
@@ -9300,13 +9391,13 @@ void CodeGen::genFnEpilog(BasicBlock* block)
// AMD64 can't use "mov esp, ebp", according to the ABI specification describing epilogs. So,
// do an LEA to "pop off" the frame allocation.
needLea = true;
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
// We will just generate "mov esp, ebp" and be done with it.
needMovEspEbp = true;
#endif // !_TARGET_AMD64_
}
}
- else if (compiler->compLclFrameSize == 0)
+ else if (compiler->compLclFrameSize == 0)
{
// do nothing before popping the callee-saved registers
}
@@ -9320,7 +9411,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#endif // _TARGET_X86
else
{
- // We need to make ESP point to the callee-saved registers
+ // We need to make ESP point to the callee-saved registers
needLea = true;
}
@@ -9330,7 +9421,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#ifdef _TARGET_AMD64_
// lea esp, [ebp + compiler->compLclFrameSize - genSPtoFPdelta]
- //
+ //
// Case 1: localloc not used.
// genSPToFPDelta = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize
// offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
@@ -9340,14 +9431,14 @@ void CodeGen::genFnEpilog(BasicBlock* block)
// genSPToFPDelta = Min(240, (int)compiler->lvaOutgoingArgSpaceSize)
// Offset = Amount to be aded to RBP to point at callee saved int regs.
offset = genSPtoFPdelta() - compiler->compLclFrameSize;
-
+
// Offset should fit within a byte if localloc is not used.
if (!compiler->compLocallocUsed)
{
noway_assert(offset < UCHAR_MAX);
}
#else
- // lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
+ // lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
offset = compiler->compCalleeRegsPushed * REGSIZE_BYTES;
noway_assert(offset < UCHAR_MAX); // the offset fits in a byte
#endif
@@ -9364,7 +9455,7 @@ void CodeGen::genFnEpilog(BasicBlock* block)
#ifdef _TARGET_AMD64_
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
if (needMovEspEbp)
{
// mov esp, ebp
@@ -9376,34 +9467,35 @@ void CodeGen::genFnEpilog(BasicBlock* block)
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
}
- getEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
+ getEmitter()->emitStartExitSeq(); // Mark the start of the "return" sequence
/* Check if this a special return block i.e.
* CEE_JMP instruction */
- if (jmpEpilog)
+ if (jmpEpilog)
{
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->bbTreeList);
// figure out what jump we have
- GenTreePtr jmpStmt = block->lastTopLevelStmt();
+ GenTreePtr jmpStmt = block->lastTopLevelStmt();
noway_assert(jmpStmt && (jmpStmt->gtOper == GT_STMT));
#if !FEATURE_FASTTAILCALL
- // x86
+ // x86
noway_assert(jmpStmt->gtNext == nullptr);
GenTreePtr jmpNode = jmpStmt->gtStmt.gtStmtExpr;
- noway_assert(jmpNode->gtOper == GT_JMP);
-#else
- // amd64
+ noway_assert(jmpNode->gtOper == GT_JMP);
+#else
+ // amd64
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
GenTreePtr jmpNode = jmpStmt->gtStmt.gtStmtExpr;
- noway_assert((jmpNode->gtOper != GT_JMP) || (jmpStmt->gtNext == nullptr));
+ noway_assert((jmpNode->gtOper != GT_JMP) || (jmpStmt->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
- noway_assert((jmpNode->gtOper == GT_JMP) || ((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
+ noway_assert((jmpNode->gtOper == GT_JMP) ||
+ ((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
@@ -9420,48 +9512,43 @@ void CodeGen::genFnEpilog(BasicBlock* block)
NO_WAY("Unsupported JMP indirection");
}
- const emitter::EmitCallType callType = (addrInfo.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN :
- emitter::EC_FUNC_TOKEN_INDIR;
+ const emitter::EmitCallType callType =
+ (addrInfo.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR;
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
- getEmitter()->emitIns_Call(callType,
- methHnd,
- INDEBUG_LDISASM_COMMA(nullptr)
- addrInfo.addr,
+ getEmitter()->emitIns_Call(callType, methHnd, INDEBUG_LDISASM_COMMA(nullptr) addrInfo.addr,
0, // argSize
EA_UNKNOWN // retSize
FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(EA_UNKNOWN), // secondRetSize
gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, REG_NA, REG_NA, 0, 0, /* iloffset, ireg, xreg, xmul, disp */
- true); /* isJump */
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, REG_NA, REG_NA,
+ 0, 0, /* iloffset, ireg, xreg, xmul, disp */
+ true); /* isJump */
}
#if FEATURE_FASTTAILCALL
else
{
- #ifdef _TARGET_AMD64_
+#ifdef _TARGET_AMD64_
// Fast tail call.
// Call target = RAX.
// Stack walker requires that a register indirect tail call be rex.w prefixed.
getEmitter()->emitIns_R(INS_rex_jmp, emitTypeSize(TYP_I_IMPL), REG_RAX);
- #else
+#else
assert(!"Fast tail call as epilog+jmp");
unreached();
- #endif //_TARGET_AMD64_
+#endif //_TARGET_AMD64_
}
-#endif //FEATURE_FASTTAILCALL
-
+#endif // FEATURE_FASTTAILCALL
}
else
{
- unsigned stkArgSize = 0; // Zero on all platforms except x86
+ unsigned stkArgSize = 0; // Zero on all platforms except x86
#if defined(_TARGET_X86_)
- noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * sizeof(void *));
- stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void *);
+ noway_assert(compiler->compArgSize >= intRegState.rsCalleeRegArgCount * sizeof(void*));
+ stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
noway_assert(compiler->compArgSize < 0x10000); // "ret" only has 2 byte operand
@@ -9567,9 +9654,9 @@ void CodeGen::genFnEpilog(BasicBlock* block)
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
- * |Callee saved registers |
+ * |Callee saved registers |
* |-----------------------|
- * |Pre-spill regs space | // This is only necessary to keep the PSP slot at the same offset
+ * |Pre-spill regs space | // This is only necessary to keep the PSP slot at the same offset
* | | // in function and funclet
* |-----------------------|
* | PSP slot |
@@ -9577,18 +9664,18 @@ void CodeGen::genFnEpilog(BasicBlock* block)
* ~ possible 4 byte pad ~
* ~ for alignment ~
* |-----------------------|
- * | Outgoing arg space |
+ * | Outgoing arg space |
* |-----------------------| <---- Ambient SP
- * | | |
- * ~ | Stack grows ~
- * | | downward |
+ * | | |
+ * ~ | Stack grows ~
+ * | | downward |
* V
*/
-void CodeGen::genFuncletProlog(BasicBlock* block)
+void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
@@ -9604,7 +9691,7 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
regMaskTP maskPushRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = genFuncletInfo.fiSaveRegs & ~maskPushRegsFloat;
- regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPushRegsFloat);
+ regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPushRegsFloat);
maskPushRegsInt |= maskStackAlloc;
assert(FitsIn<int>(maskPushRegsInt));
@@ -9633,8 +9720,8 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
maskArgRegsLiveIn = RBM_R0;
}
- regNumber initReg = REG_R3; // R3 is never live on entry to a funclet, so it can be trashed
- bool initRegZeroed = false;
+ regNumber initReg = REG_R3; // R3 is never live on entry to a funclet, so it can be trashed
+ bool initRegZeroed = false;
if (maskStackAlloc == RBM_NONE)
{
@@ -9648,30 +9735,34 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
{
// This is the first block of a filter
- getEmitter()->emitIns_R_R_I(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiPSP_slot_CallerSP_offset);
+ getEmitter()->emitIns_R_R_I(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_R1,
+ genFuncletInfo.fiPSP_slot_CallerSP_offset);
regTracker.rsTrackRegTrash(REG_R1);
- getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
- getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1, genFuncletInfo.fiFunctionCallerSPtoFPdelta);
+ getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R1, REG_SPBASE,
+ genFuncletInfo.fiPSP_slot_SP_offset);
+ getEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_FPBASE, REG_R1,
+ genFuncletInfo.fiFunctionCallerSPtoFPdelta);
}
else
{
// This is a non-filter funclet
- getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE, genFuncletInfo.fiFunctionCallerSPtoFPdelta);
+ getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
+ genFuncletInfo.fiFunctionCallerSPtoFPdelta);
regTracker.rsTrackRegTrash(REG_R3);
- getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiPSP_slot_SP_offset);
+ getEmitter()->emitIns_R_R_I(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_R3, REG_SPBASE,
+ genFuncletInfo.fiPSP_slot_SP_offset);
}
}
-
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
-void CodeGen::genFuncletEpilog()
+void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
@@ -9682,7 +9773,7 @@ void CodeGen::genFuncletEpilog()
// this:
// movw r3, 0x38e0
// add sp, r3
- // pop {r4,r5,r6,r10,r11,pc}
+ // pop {r4,r5,r6,r10,r11,pc}
// where the "movw" shouldn't be part of the unwind codes. See genFnEpilog() for more details.
bool unwindStarted = false;
@@ -9693,7 +9784,7 @@ void CodeGen::genFuncletEpilog()
regMaskTP maskPopRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskPopRegsInt = genFuncletInfo.fiSaveRegs & ~maskPopRegsFloat;
- regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPopRegsFloat);
+ regMaskTP maskStackAlloc = genStackAllocRegisterMask(genFuncletInfo.fiSpDelta, maskPopRegsFloat);
maskPopRegsInt |= maskStackAlloc;
if (maskStackAlloc == RBM_NONE)
@@ -9709,7 +9800,7 @@ void CodeGen::genFuncletEpilog()
}
maskPopRegsInt &= ~RBM_LR;
- maskPopRegsInt |= RBM_PC;
+ maskPopRegsInt |= RBM_PC;
if (maskPopRegsFloat != RBM_NONE)
{
@@ -9718,13 +9809,12 @@ void CodeGen::genFuncletEpilog()
}
assert(FitsIn<int>(maskPopRegsInt));
- inst_IV(INS_pop, (int)maskPopRegsInt);
+ inst_IV(INS_pop, (int)maskPopRegsInt);
compiler->unwindPopMaskInt(maskPopRegsInt);
compiler->unwindEndEpilog();
}
-
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
@@ -9735,40 +9825,43 @@ void CodeGen::genFuncletEpilog()
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
-void CodeGen::genCaptureFuncletPrologEpilogInfo()
+void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (compiler->ehAnyFunclets())
{
assert(isFramePointerUsed());
- assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be finalized
+ assert(compiler->lvaDoneFrameLayout ==
+ Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be finalized
// Frame pointer doesn't point at the end, it points at the pushed r11. So, instead
// of adding the number of callee-saved regs to CallerSP, we add 1 for lr and 1 for r11
// (plus the "pre spill regs"). Note that we assume r12 and r13 aren't saved
// (also assumed in genFnProlog()).
assert((regSet.rsMaskCalleeSaved & (RBM_R12 | RBM_R13)) == 0);
- unsigned preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
+ unsigned preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
genFuncletInfo.fiFunctionCallerSPtoFPdelta = preSpillRegArgSize + 2 * REGSIZE_BYTES;
regMaskTP rsMaskSaveRegs = regSet.rsMaskCalleeSaved;
- unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
- unsigned saveRegsSize = saveRegsCount * REGSIZE_BYTES; // bytes of regs we're saving
+ unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
+ unsigned saveRegsSize = saveRegsCount * REGSIZE_BYTES; // bytes of regs we're saving
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
- unsigned funcletFrameSize = preSpillRegArgSize + saveRegsSize + REGSIZE_BYTES /* PSP slot */ + compiler->lvaOutgoingArgSpaceSize;
+ unsigned funcletFrameSize =
+ preSpillRegArgSize + saveRegsSize + REGSIZE_BYTES /* PSP slot */ + compiler->lvaOutgoingArgSpaceSize;
- unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
+ unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
unsigned funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
- unsigned spDelta = funcletFrameSizeAligned - saveRegsSize;
+ unsigned spDelta = funcletFrameSizeAligned - saveRegsSize;
unsigned PSP_slot_SP_offset = compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad;
- int PSP_slot_CallerSP_offset = -(int)(funcletFrameSize - compiler->lvaOutgoingArgSpaceSize); // NOTE: it's negative!
+ int PSP_slot_CallerSP_offset =
+ -(int)(funcletFrameSize - compiler->lvaOutgoingArgSpaceSize); // NOTE: it's negative!
/* Now save it for future use */
- genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
- genFuncletInfo.fiSpDelta = spDelta;
- genFuncletInfo.fiPSP_slot_SP_offset = PSP_slot_SP_offset;
- genFuncletInfo.fiPSP_slot_CallerSP_offset = PSP_slot_CallerSP_offset;
+ genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
+ genFuncletInfo.fiSpDelta = spDelta;
+ genFuncletInfo.fiPSP_slot_SP_offset = PSP_slot_SP_offset;
+ genFuncletInfo.fiPSP_slot_CallerSP_offset = PSP_slot_CallerSP_offset;
#ifdef DEBUG
if (verbose)
@@ -9776,19 +9869,26 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Function CallerSP-to-FP delta: %d\n", genFuncletInfo.fiFunctionCallerSPtoFPdelta);
- printf(" Save regs: "); dspRegMask(rsMaskSaveRegs); printf("\n");
+ printf(" Save regs: ");
+ dspRegMask(rsMaskSaveRegs);
+ printf("\n");
printf(" SP delta: %d\n", genFuncletInfo.fiSpDelta);
printf(" PSP slot SP offset: %d\n", genFuncletInfo.fiPSP_slot_SP_offset);
printf(" PSP slot Caller SP offset: %d\n", genFuncletInfo.fiPSP_slot_CallerSP_offset);
- if (PSP_slot_CallerSP_offset != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
- printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n", compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
+ if (PSP_slot_CallerSP_offset !=
+ compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
+ printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
+ compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
#endif // DEBUG
assert(PSP_slot_CallerSP_offset < 0);
assert(compiler->lvaPSPSym != BAD_VAR_NUM);
- assert(PSP_slot_CallerSP_offset == compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and funclet!
+ assert(PSP_slot_CallerSP_offset == compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset
+ // used in main
+ // function and
+ // funclet!
}
}
@@ -9849,11 +9949,11 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
* | incoming |
* | arguments |
* +=======================+ <---- Caller's SP
- * | Return address |
+ * | Return address |
* |-----------------------|
- * | Saved EBP |
+ * | Saved EBP |
* |-----------------------|
- * |Callee saved registers |
+ * |Callee saved registers |
* |-----------------------|
* ~ possible 8 byte pad ~
* ~ for alignment ~
@@ -9862,9 +9962,9 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
* |-----------------------|
* | Outgoing arg space | // this only exists if the function makes a call
* |-----------------------| <---- Initial SP
- * | | |
- * ~ | Stack grows ~
- * | | downward |
+ * | | |
+ * ~ | Stack grows ~
+ * | | downward |
* V
*
* TODO-AMD64-Bug?: the frame pointer should really point to the PSP slot (the debugger seems to assume this
@@ -9873,15 +9973,17 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
* "FRAMEPTR OFFSETS" for details.
*/
-void CodeGen::genFuncletProlog(BasicBlock* block)
+void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genFuncletProlog()\n");
+ }
#endif
assert(!regSet.rsRegsModified(RBM_FPBASE));
- assert(block != NULL);
+ assert(block != nullptr);
assert(block->bbFlags & BBF_FUNCLET_BEG);
assert(isFramePointerUsed());
@@ -9903,7 +10005,7 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
compiler->unwindPush(REG_FPBASE);
// Callee saved int registers are pushed to stack.
- genPushCalleeSavedRegisters();
+ genPushCalleeSavedRegisters();
regMaskTP maskArgRegsLiveIn;
if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
@@ -9915,9 +10017,8 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
maskArgRegsLiveIn = RBM_ARG_0 | RBM_ARG_2;
}
-
- regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
- bool initRegZeroed = false;
+ regNumber initReg = REG_EBP; // We already saved EBP, so it can be trashed
+ bool initRegZeroed = false;
genAllocLclFrame(genFuncletInfo.fiSpDelta, initReg, &initRegZeroed, maskArgRegsLiveIn);
@@ -9936,14 +10037,14 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
if (genFuncletInfo.fiFunction_InitialSP_to_FP_delta != 0)
{
- getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE, genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_FPBASE, REG_FPBASE,
+ genFuncletInfo.fiFunction_InitialSP_to_FP_delta);
}
// We've modified EBP, but not really. Say that we haven't...
regSet.rsRemoveRegsModified(RBM_FPBASE);
}
-
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
@@ -9951,11 +10052,13 @@ void CodeGen::genFuncletProlog(BasicBlock* block)
* Note that we don't do anything with unwind codes, because AMD64 only cares about unwind codes for the prolog.
*/
-void CodeGen::genFuncletEpilog()
+void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genFuncletEpilog()\n");
+ }
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
@@ -9963,70 +10066,75 @@ void CodeGen::genFuncletEpilog()
// Restore callee saved XMM regs from their stack slots before modifying SP
// to position at callee saved int regs.
genRestoreCalleeSavedFltRegs(genFuncletInfo.fiSpDelta);
- inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
+ inst_RV_IV(INS_add, REG_SPBASE, genFuncletInfo.fiSpDelta, EA_PTRSIZE);
genPopCalleeSavedRegisters();
inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
instGen_Return(0);
}
-
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
-void CodeGen::genCaptureFuncletPrologEpilogInfo()
+void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
+ {
return;
+ }
// Note that compLclFrameSize can't be used (for can we call functions that depend on it),
// because we're not going to allocate the same size frame as the parent.
assert(isFramePointerUsed());
- assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be finalized
- assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
+ assert(compiler->lvaDoneFrameLayout ==
+ Compiler::FINAL_FRAME_LAYOUT); // The frame size and offsets must be finalized
+ assert(compiler->compCalleeFPRegsSavedMask != (regMaskTP)-1); // The float registers to be preserved is finalized
// Even though lvaToInitialSPRelativeOffset() depends on compLclFrameSize,
// that's ok, because we're figuring out an offset in the parent frame.
- genFuncletInfo.fiFunction_InitialSP_to_FP_delta = compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame pointer.
+ genFuncletInfo.fiFunction_InitialSP_to_FP_delta =
+ compiler->lvaToInitialSPRelativeOffset(0, true); // trick to find the Initial-SP-relative offset of the frame
+ // pointer.
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
#ifndef UNIX_AMD64_ABI
// No 4 slots for outgoing params on the stack for System V systems.
- assert((compiler->lvaOutgoingArgSpaceSize == 0) || (compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES))); // On AMD64, we always have 4 outgoing argument slots if there are any calls in the function.
+ // On AMD64, we always have 4 outgoing argument slots if there are any calls in the function.
+ assert((compiler->lvaOutgoingArgSpaceSize == 0) || (compiler->lvaOutgoingArgSpaceSize >= (4 * REGSIZE_BYTES)));
#endif // UNIX_AMD64_ABI
unsigned offset = compiler->lvaOutgoingArgSpaceSize;
- genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
+ genFuncletInfo.fiPSP_slot_InitialSP_offset = offset;
// How much stack do we allocate in the funclet?
- // We need to 16-byte align the stack.
-
- unsigned totalFrameSize = REGSIZE_BYTES // return address
- + REGSIZE_BYTES // pushed EBP
- + (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
+ // We need to 16-byte align the stack.
+
+ unsigned totalFrameSize =
+ REGSIZE_BYTES // return address
+ + REGSIZE_BYTES // pushed EBP
+ + (compiler->compCalleeRegsPushed * REGSIZE_BYTES); // pushed callee-saved int regs, not including EBP
// Entire 128-bits of XMM register is saved to stack due to ABI encoding requirement.
// Copying entire XMM register to/from memory will be performant if SP is aligned at XMM_REGSIZE_BYTES boundary.
unsigned calleeFPRegsSavedSize = genCountBits(compiler->compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
- unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
+ unsigned FPRegsPad = (calleeFPRegsSavedSize > 0) ? AlignmentPad(totalFrameSize, XMM_REGSIZE_BYTES) : 0;
- totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
- + calleeFPRegsSavedSize // pushed callee-saved float regs
- // below calculated 'pad' will go here
- + REGSIZE_BYTES // PSPSym
- + compiler->lvaOutgoingArgSpaceSize // outgoing arg space
- ;
+ totalFrameSize += FPRegsPad // Padding before pushing entire xmm regs
+ + calleeFPRegsSavedSize // pushed callee-saved float regs
+ // below calculated 'pad' will go here
+ + REGSIZE_BYTES // PSPSym
+ + compiler->lvaOutgoingArgSpaceSize // outgoing arg space
+ ;
unsigned pad = AlignmentPad(totalFrameSize, 16);
- genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
- + calleeFPRegsSavedSize // Callee saved xmm regs
- + pad
- + REGSIZE_BYTES // PSPSym
- + compiler->lvaOutgoingArgSpaceSize // outgoing arg space
- ;
+ genFuncletInfo.fiSpDelta = FPRegsPad // Padding to align SP on XMM_REGSIZE_BYTES boundary
+ + calleeFPRegsSavedSize // Callee saved xmm regs
+ + pad + REGSIZE_BYTES // PSPSym
+ + compiler->lvaOutgoingArgSpaceSize // outgoing arg space
+ ;
#ifdef DEBUG
if (verbose)
@@ -10040,7 +10148,9 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
#endif // DEBUG
assert(compiler->lvaPSPSym != BAD_VAR_NUM);
- assert(genFuncletInfo.fiPSP_slot_InitialSP_offset == compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and funclet!
+ assert(genFuncletInfo.fiPSP_slot_InitialSP_offset ==
+ compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
+ // funclet!
}
#elif defined(_TARGET_ARM64_)
@@ -10054,29 +10164,27 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
* Generates code for an EH funclet prolog.
*/
-void CodeGen::genFuncletProlog(BasicBlock* block)
+void CodeGen::genFuncletProlog(BasicBlock* block)
{
NYI("Funclet prolog");
}
-
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
-void CodeGen::genFuncletEpilog()
+void CodeGen::genFuncletEpilog()
{
NYI("Funclet epilog");
}
-
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
*/
-void CodeGen::genCaptureFuncletPrologEpilogInfo()
+void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (compiler->ehAnyFunclets())
{
@@ -10086,7 +10194,6 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
#endif // _TARGET_*
-
/*-----------------------------------------------------------------------------
*
* Set the main function PSPSym value in the frame.
@@ -10159,16 +10266,17 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo()
* correctly reported, the PSPSym could be omitted in some cases.)
***********************************
*/
-void CodeGen::genSetPSPSym(regNumber initReg,
- bool * pInitRegZeroed)
+void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->ehNeedsPSPSym())
+ {
return;
+ }
- noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
- assert(compiler->lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
+ noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
+ assert(compiler->lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
#if defined(_TARGET_ARM_)
@@ -10182,7 +10290,7 @@ void CodeGen::genSetPSPSym(regNumber initReg,
int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta();
- int callerSPOffs;
+ int callerSPOffs;
regNumber regBase;
if (arm_Valid_Imm_For_Add_SP(SPtoCallerSPdelta))
@@ -10190,7 +10298,7 @@ void CodeGen::genSetPSPSym(regNumber initReg,
// use the "add <reg>, sp, imm" form
callerSPOffs = SPtoCallerSPdelta;
- regBase = REG_SPBASE;
+ regBase = REG_SPBASE;
}
else
{
@@ -10200,13 +10308,13 @@ void CodeGen::genSetPSPSym(regNumber initReg,
noway_assert(arm_Valid_Imm_For_Add(FPtoCallerSPdelta, INS_FLAGS_DONT_CARE));
callerSPOffs = FPtoCallerSPdelta;
- regBase = REG_FPBASE;
+ regBase = REG_FPBASE;
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
- *pInitRegZeroed = false;
+ *pInitRegZeroed = false;
getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, regTmp, regBase, callerSPOffs);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
@@ -10218,7 +10326,7 @@ void CodeGen::genSetPSPSym(regNumber initReg,
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
- *pInitRegZeroed = false;
+ *pInitRegZeroed = false;
getEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
@@ -10243,16 +10351,15 @@ void CodeGen::genSetPSPSym(regNumber initReg,
#endif // FEATURE_EH_FUNCLETS
-
/*****************************************************************************
*
* Generates code for all the function and funclet prologs and epilogs.
*/
-void CodeGen::genGeneratePrologsAndEpilogs()
+void CodeGen::genGeneratePrologsAndEpilogs()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** Before prolog / epilog generation\n");
getEmitter()->emitDispIGlist(false);
@@ -10302,7 +10409,7 @@ void CodeGen::genGeneratePrologsAndEpilogs()
getEmitter()->emitFinishPrologEpilogGeneration();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** After prolog / epilog generation\n");
getEmitter()->emitDispIGlist(false);
@@ -10310,7 +10417,6 @@ void CodeGen::genGeneratePrologsAndEpilogs()
#endif
}
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -10331,14 +10437,11 @@ void CodeGen::genGenerateStackProbe()
// Why does the EE need such a deep probe? It should just need a couple
// of bytes, to set up a frame in the unmanaged code..
- static_assert_no_msg(
- CORINFO_STACKPROBE_DEPTH + JIT_RESERVED_STACK < compiler->eeGetPageSize());
+ static_assert_no_msg(CORINFO_STACKPROBE_DEPTH + JIT_RESERVED_STACK < compiler->eeGetPageSize());
JITDUMP("Emitting stack probe:\n");
- getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE,
- REG_EAX, REG_SPBASE,
- -(CORINFO_STACKPROBE_DEPTH+JIT_RESERVED_STACK));
-
+ getEmitter()->emitIns_AR_R(INS_TEST, EA_PTRSIZE, REG_EAX, REG_SPBASE,
+ -(CORINFO_STACKPROBE_DEPTH + JIT_RESERVED_STACK));
}
#endif // STACK_PROBES
@@ -10347,29 +10450,34 @@ void CodeGen::genGenerateStackProbe()
* Record the constant and return a tree node that yields its address.
*/
-GenTreePtr CodeGen::genMakeConst(const void * cnsAddr,
- var_types cnsType,
- GenTreePtr cnsTree,
- bool dblAlign)
-{
+GenTreePtr CodeGen::genMakeConst(const void* cnsAddr, var_types cnsType, GenTreePtr cnsTree, bool dblAlign)
+{
// Assign the constant an offset in the data section
- UNATIVE_OFFSET cnsSize = genTypeSize(cnsType);
- UNATIVE_OFFSET cnum = getEmitter()->emitDataConst(cnsAddr, cnsSize, dblAlign);
+ UNATIVE_OFFSET cnsSize = genTypeSize(cnsType);
+ UNATIVE_OFFSET cnum = getEmitter()->emitDataConst(cnsAddr, cnsSize, dblAlign);
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
{
printf(" @%s%02u ", "CNS", cnum);
switch (cnsType)
{
- case TYP_INT : printf("DD %d \n", *(int *)cnsAddr); break;
- case TYP_LONG : printf("DQ %lld\n", *(__int64 *)cnsAddr); break;
- case TYP_FLOAT : printf("DF %f \n", *(float *)cnsAddr); break;
- case TYP_DOUBLE: printf("DQ %lf\n", *(double *)cnsAddr); break;
+ case TYP_INT:
+ printf("DD %d \n", *(int*)cnsAddr);
+ break;
+ case TYP_LONG:
+ printf("DQ %lld\n", *(__int64*)cnsAddr);
+ break;
+ case TYP_FLOAT:
+ printf("DF %f \n", *(float*)cnsAddr);
+ break;
+ case TYP_DOUBLE:
+ printf("DQ %lf\n", *(double*)cnsAddr);
+ break;
- default:
- noway_assert(!"unexpected constant type");
+ default:
+ noway_assert(!"unexpected constant type");
}
}
#endif
@@ -10377,7 +10485,7 @@ GenTreePtr CodeGen::genMakeConst(const void * cnsAddr,
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
-
+
return new (compiler, GT_CLS_VAR) GenTreeClsVar(cnsType, compiler->eeFindJitDataOffs(cnum), nullptr);
}
@@ -10390,27 +10498,29 @@ GenTreePtr CodeGen::genMakeConst(const void * cnsAddr,
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
-void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
+void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
- regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
+ regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
- assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
+ assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
- if (regMask == RBM_NONE)
+ if (regMask == RBM_NONE)
+ {
return;
+ }
#ifdef _TARGET_AMD64_
unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
- unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
+ unsigned offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
// Offset is 16-byte aligned since we use movaps for preserving xmm regs.
assert((offset % 16) == 0);
instruction copyIns = ins_Copy(TYP_FLOAT);
-#else // !_TARGET_AMD64_
- unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
- instruction copyIns = INS_movupd;
+#else // !_TARGET_AMD64_
+ unsigned offset = lclFrameSize - XMM_REGSIZE_BYTES;
+ instruction copyIns = INS_movupd;
#endif // !_TARGET_AMD64_
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
@@ -10420,10 +10530,9 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize
{
// ABI requires us to preserve lower 128-bits of YMM register.
getEmitter()->emitIns_AR_R(copyIns,
- EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be EA_16BYTE
- reg,
- REG_SPBASE,
- offset);
+ EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
+ // EA_16BYTE
+ reg, REG_SPBASE, offset);
compiler->unwindSaveReg(reg, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
@@ -10432,7 +10541,7 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize
#ifdef FEATURE_AVX_SUPPORT
// Just before restoring float registers issue a Vzeroupper to zero out upper 128-bits of all YMM regs.
- // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
+ // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
// using SSE2.
if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
{
@@ -10449,39 +10558,41 @@ void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize
// lclFrameSize - Fixed frame size excluding callee pushed int regs.
// non-funclet: this will be compLclFrameSize.
// funclet frames: this will be FuncletInfo.fiSpDelta.
-void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
+void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
- regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
-
+ regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
+
// Only callee saved floating point registers should be in regMask
- assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
+ assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
// fast path return
- if (regMask == RBM_NONE)
+ if (regMask == RBM_NONE)
+ {
return;
+ }
#ifdef _TARGET_AMD64_
- unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
- instruction copyIns = ins_Copy(TYP_FLOAT);
-#else // !_TARGET_AMD64_
- unsigned firstFPRegPadding = 0;
- instruction copyIns = INS_movupd;
+ unsigned firstFPRegPadding = compiler->lvaIsCalleeSavedIntRegCountEven() ? REGSIZE_BYTES : 0;
+ instruction copyIns = ins_Copy(TYP_FLOAT);
+#else // !_TARGET_AMD64_
+ unsigned firstFPRegPadding = 0;
+ instruction copyIns = INS_movupd;
#endif // !_TARGET_AMD64_
- unsigned offset;
+ unsigned offset;
regNumber regBase;
if (compiler->compLocallocUsed)
{
// localloc frame: use frame pointer relative offset
assert(isFramePointerUsed());
- regBase = REG_FPBASE;
- offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
+ regBase = REG_FPBASE;
+ offset = lclFrameSize - genSPtoFPdelta() - firstFPRegPadding - XMM_REGSIZE_BYTES;
}
else
{
regBase = REG_SPBASE;
- offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
- }
+ offset = lclFrameSize - firstFPRegPadding - XMM_REGSIZE_BYTES;
+ }
#ifdef _TARGET_AMD64_
// Offset is 16-byte aligned since we use movaps for restoring xmm regs
@@ -10490,7 +10601,7 @@ void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
#ifdef FEATURE_AVX_SUPPORT
// Just before restoring float registers issue a Vzeroupper to zero out upper 128-bits of all YMM regs.
- // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
+ // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
// using SSE2.
if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
{
@@ -10505,10 +10616,9 @@ void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
{
// ABI requires us to restore lower 128-bits of YMM register.
getEmitter()->emitIns_R_AR(copyIns,
- EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be EA_16BYTE
- reg,
- regBase,
- offset);
+ EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
+ // EA_16BYTE
+ reg, regBase, offset);
regMask &= ~regBit;
offset -= XMM_REGSIZE_BYTES;
}
@@ -10518,7 +10628,7 @@ void CodeGen::genRestoreCalleeSavedFltRegs(unsigned lclFrameSize)
//-----------------------------------------------------------------------------------
// IsMultiRegPassedType: Returns true if the type is returned in multiple registers
-//
+//
// Arguments:
// hClass - type handle
//
@@ -10533,14 +10643,14 @@ bool Compiler::IsMultiRegPassedType(CORINFO_CLASS_HANDLE hClass)
}
structPassingKind howToPassStruct;
- var_types returnType = getArgTypeForStruct(hClass, &howToPassStruct);
-
+ var_types returnType = getArgTypeForStruct(hClass, &howToPassStruct);
+
return (returnType == TYP_STRUCT);
}
//-----------------------------------------------------------------------------------
// IsMultiRegReturnedType: Returns true if the type is returned in multiple registers
-//
+//
// Arguments:
// hClass - type handle
//
@@ -10555,8 +10665,8 @@ bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass)
}
structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(hClass, &howToReturnStruct);
-
+ var_types returnType = getReturnTypeForStruct(hClass, &howToReturnStruct);
+
return (returnType == TYP_STRUCT);
}
@@ -10622,7 +10732,7 @@ var_types Compiler::GetHfaType(CORINFO_CLASS_HANDLE hClass)
// the double precision registers and for that reason each
// double register is considered to be two single registers.
// Thus for ARM32 an HFA of 4 doubles this function will return 8.
-// On ARM64 given an HFA of 4 singles or 4 doubles this function will
+// On ARM64 given an HFA of 4 singles or 4 doubles this function will
// will return 4 for both.
// Arguments:
// hClass: the class handle of a HFA struct
@@ -10634,10 +10744,10 @@ unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass)
// A HFA of doubles is twice as large as an HFA of singles for ARM32
// (i.e. uses twice the number of single precison registers)
return info.compCompHnd->getClassSize(hClass) / REGSIZE_BYTES;
-#else // _TARGET_ARM64_
- var_types hfaType = GetHfaType(hClass);
- unsigned classSize = info.compCompHnd->getClassSize(hClass);
- // Note that the retail build issues a warning about a potential divsion by zero without the Max function
+#else // _TARGET_ARM64_
+ var_types hfaType = GetHfaType(hClass);
+ unsigned classSize = info.compCompHnd->getClassSize(hClass);
+ // Note that the retail build issues a warning about a potential divsion by zero without the Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
return classSize / elemSize;
#endif // _TARGET_ARM64_
@@ -10656,12 +10766,7 @@ unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass)
//
instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue)
{
- assert(ins == INS_rcl ||
- ins == INS_rcr ||
- ins == INS_rol ||
- ins == INS_ror ||
- ins == INS_shl ||
- ins == INS_shr ||
+ assert(ins == INS_rcl || ins == INS_rcr || ins == INS_rol || ins == INS_ror || ins == INS_shl || ins == INS_shr ||
ins == INS_sar);
// Which format should we use?
@@ -10718,8 +10823,7 @@ instruction CodeGen::genMapShiftInsToShiftByConstantIns(instruction ins, int shi
// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with lvArgReg equals to REG_STK.
//
-unsigned
-CodeGen::getFirstArgWithStackSlot()
+unsigned CodeGen::getFirstArgWithStackSlot()
{
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)
unsigned baseVarNum = 0;
@@ -10778,14 +10882,18 @@ CodeGen::getFirstArgWithStackSlot()
* have been finalized.
*/
-void CodeGen::genSetScopeInfo()
+void CodeGen::genSetScopeInfo()
{
if (!compiler->opts.compScopeInfo)
+ {
return;
+ }
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genSetScopeInfo()\n");
+ }
#endif
if (compiler->info.compVarScopesCount == 0)
@@ -10796,17 +10904,19 @@ void CodeGen::genSetScopeInfo()
}
noway_assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0));
- noway_assert(psiOpenScopeList.scNext == NULL);
+ noway_assert(psiOpenScopeList.scNext == nullptr);
- unsigned i;
- unsigned scopeCnt = siScopeCnt + psiScopeCnt;
+ unsigned i;
+ unsigned scopeCnt = siScopeCnt + psiScopeCnt;
compiler->eeSetLVcount(scopeCnt);
#ifdef DEBUG
- genTrnslLocalVarCount = scopeCnt;
+ genTrnslLocalVarCount = scopeCnt;
if (scopeCnt)
+ {
genTrnslLocalVarInfo = new (compiler, CMK_DebugOnly) TrnslLocalVarInfo[scopeCnt];
+ }
#endif
// Record the scopes found for the parameters over the prolog.
@@ -10814,18 +10924,16 @@ void CodeGen::genSetScopeInfo()
// have the same info in the prolog block as is given by compiler->lvaTable.
// eg. A register parameter is actually on the stack, before it is loaded to reg.
- CodeGen::psiScope * scopeP;
+ CodeGen::psiScope* scopeP;
- for (i=0, scopeP = psiScopeList.scNext;
- i < psiScopeCnt;
- i++, scopeP = scopeP->scNext)
+ for (i = 0, scopeP = psiScopeList.scNext; i < psiScopeCnt; i++, scopeP = scopeP->scNext)
{
noway_assert(scopeP != nullptr);
noway_assert(scopeP->scStartLoc.Valid());
noway_assert(scopeP->scEndLoc.Valid());
- UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(getEmitter());
- UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET startOffs = scopeP->scStartLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET endOffs = scopeP->scEndLoc.CodeOffset(getEmitter());
unsigned varNum = scopeP->scSlotNum;
noway_assert(startOffs <= endOffs);
@@ -10843,34 +10951,30 @@ void CodeGen::genSetScopeInfo()
endOffs++;
}
- Compiler::siVarLoc varLoc;
+ Compiler::siVarLoc varLoc;
if (scopeP->scRegister)
{
- varLoc.vlType = Compiler::VLT_REG;
- varLoc.vlReg.vlrReg = (regNumber) scopeP->u1.scRegNum;
+ varLoc.vlType = Compiler::VLT_REG;
+ varLoc.vlReg.vlrReg = (regNumber)scopeP->u1.scRegNum;
}
else
{
varLoc.vlType = Compiler::VLT_STK;
- varLoc.vlStk.vlsBaseReg = (regNumber) scopeP->u2.scBaseReg;
+ varLoc.vlStk.vlsBaseReg = (regNumber)scopeP->u2.scBaseReg;
varLoc.vlStk.vlsOffset = scopeP->u2.scOffset;
}
- genSetScopeInfo(i,
- startOffs, endOffs-startOffs, varNum, scopeP->scLVnum,
- true, varLoc);
+ genSetScopeInfo(i, startOffs, endOffs - startOffs, varNum, scopeP->scLVnum, true, varLoc);
}
// Record the scopes for the rest of the method.
// Check that the LocalVarInfo scopes look OK
- noway_assert(siOpenScopeList.scNext == NULL);
+ noway_assert(siOpenScopeList.scNext == nullptr);
- CodeGen::siScope * scopeL;
+ CodeGen::siScope* scopeL;
- for (i=0, scopeL = siScopeList.scNext;
- i < siScopeCnt;
- i++, scopeL = scopeL->scNext)
+ for (i = 0, scopeL = siScopeList.scNext; i < siScopeCnt; i++, scopeL = scopeL->scNext)
{
noway_assert(scopeL != nullptr);
noway_assert(scopeL->scStartLoc.Valid());
@@ -10878,29 +10982,29 @@ void CodeGen::genSetScopeInfo()
// Find the start and end IP
- UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(getEmitter());
- UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET startOffs = scopeL->scStartLoc.CodeOffset(getEmitter());
+ UNATIVE_OFFSET endOffs = scopeL->scEndLoc.CodeOffset(getEmitter());
noway_assert(scopeL->scStartLoc != scopeL->scEndLoc);
// For stack vars, find the base register, and offset
- regNumber baseReg;
- signed offset = compiler->lvaTable[scopeL->scVarNum].lvStkOffs;
+ regNumber baseReg;
+ signed offset = compiler->lvaTable[scopeL->scVarNum].lvStkOffs;
if (!compiler->lvaTable[scopeL->scVarNum].lvFramePointerBased)
{
- baseReg = REG_SPBASE;
- offset += scopeL->scStackLevel;
+ baseReg = REG_SPBASE;
+ offset += scopeL->scStackLevel;
}
else
{
- baseReg = REG_FPBASE;
+ baseReg = REG_FPBASE;
}
// Now fill in the varLoc
- Compiler::siVarLoc varLoc;
+ Compiler::siVarLoc varLoc;
// TODO-Review: This only works for always-enregistered variables. With LSRA, a variable might be in a register
// for part of its lifetime, or in different registers for different parts of its lifetime.
@@ -10911,85 +11015,86 @@ void CodeGen::genSetScopeInfo()
var_types type = genActualType(compiler->lvaTable[scopeL->scVarNum].TypeGet());
switch (type)
{
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
#ifdef _TARGET_64BIT_
- case TYP_LONG:
+ case TYP_LONG:
#endif // _TARGET_64BIT_
- varLoc.vlType = Compiler::VLT_REG;
- varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- break;
+ varLoc.vlType = Compiler::VLT_REG;
+ varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ break;
#ifndef _TARGET_64BIT_
- case TYP_LONG:
-#if!CPU_HAS_FP_SUPPORT
- case TYP_DOUBLE:
+ case TYP_LONG:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_DOUBLE:
#endif
- if (compiler->lvaTable[scopeL->scVarNum].lvOtherReg != REG_STK)
- {
- varLoc.vlType = Compiler::VLT_REG_REG;
- varLoc.vlRegReg.vlrrReg1 = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- varLoc.vlRegReg.vlrrReg2 = compiler->lvaTable[scopeL->scVarNum].lvOtherReg;
- }
- else
- {
- varLoc.vlType = Compiler::VLT_REG_STK;
- varLoc.vlRegStk.vlrsReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- varLoc.vlRegStk.vlrsStk.vlrssBaseReg = baseReg;
- if (!isFramePointerUsed() && varLoc.vlRegStk.vlrsStk.vlrssBaseReg == REG_SPBASE) {
- varLoc.vlRegStk.vlrsStk.vlrssBaseReg = (regNumber) ICorDebugInfo::REGNUM_AMBIENT_SP;
+ if (compiler->lvaTable[scopeL->scVarNum].lvOtherReg != REG_STK)
+ {
+ varLoc.vlType = Compiler::VLT_REG_REG;
+ varLoc.vlRegReg.vlrrReg1 = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ varLoc.vlRegReg.vlrrReg2 = compiler->lvaTable[scopeL->scVarNum].lvOtherReg;
}
- varLoc.vlRegStk.vlrsStk.vlrssOffset = offset + sizeof(int);
- }
- break;
+ else
+ {
+ varLoc.vlType = Compiler::VLT_REG_STK;
+ varLoc.vlRegStk.vlrsReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ varLoc.vlRegStk.vlrsStk.vlrssBaseReg = baseReg;
+ if (!isFramePointerUsed() && varLoc.vlRegStk.vlrsStk.vlrssBaseReg == REG_SPBASE)
+ {
+ varLoc.vlRegStk.vlrsStk.vlrssBaseReg = (regNumber)ICorDebugInfo::REGNUM_AMBIENT_SP;
+ }
+ varLoc.vlRegStk.vlrsStk.vlrssOffset = offset + sizeof(int);
+ }
+ break;
#endif // !_TARGET_64BIT_
#ifdef _TARGET_64BIT_
- case TYP_FLOAT:
- case TYP_DOUBLE:
- // TODO-AMD64-Bug: ndp\clr\src\inc\corinfo.h has a definition of RegNum that only goes up to R15,
- // so no XMM registers can get debug information.
- varLoc.vlType = Compiler::VLT_REG_FP;
- varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- break;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ // TODO-AMD64-Bug: ndp\clr\src\inc\corinfo.h has a definition of RegNum that only goes up to R15,
+ // so no XMM registers can get debug information.
+ varLoc.vlType = Compiler::VLT_REG_FP;
+ varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ break;
#else // !_TARGET_64BIT_
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if (isFloatRegType(type))
- {
- varLoc.vlType = Compiler::VLT_FPSTK;
- varLoc.vlFPstk.vlfReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- }
- break;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if (isFloatRegType(type))
+ {
+ varLoc.vlType = Compiler::VLT_FPSTK;
+ varLoc.vlFPstk.vlfReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ }
+ break;
#endif // CPU_HAS_FP_SUPPORT
#endif // !_TARGET_64BIT_
#ifdef FEATURE_SIMD
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
- varLoc.vlType = Compiler::VLT_REG_FP;
-
- // TODO-AMD64-Bug: ndp\clr\src\inc\corinfo.h has a definition of RegNum that only goes up to R15,
- // so no XMM registers can get debug information.
- //
- // Note: Need to initialize vlrReg field, otherwise during jit dump hitting an assert
- // in eeDispVar() --> getRegName() that regNumber is valid.
- varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
- break;
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
+ varLoc.vlType = Compiler::VLT_REG_FP;
+
+ // TODO-AMD64-Bug: ndp\clr\src\inc\corinfo.h has a definition of RegNum that only goes up to R15,
+ // so no XMM registers can get debug information.
+ //
+ // Note: Need to initialize vlrReg field, otherwise during jit dump hitting an assert
+ // in eeDispVar() --> getRegName() that regNumber is valid.
+ varLoc.vlReg.vlrReg = compiler->lvaTable[scopeL->scVarNum].lvRegNum;
+ break;
#endif // FEATURE_SIMD
- default:
- noway_assert(!"Invalid type");
+ default:
+ noway_assert(!"Invalid type");
}
}
else
@@ -10998,69 +11103,70 @@ void CodeGen::genSetScopeInfo()
LclVarDsc* varDsc = compiler->lvaTable + scopeL->scVarNum;
switch (genActualType(varDsc->TypeGet()))
{
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
- case TYP_FLOAT:
- case TYP_STRUCT:
- case TYP_BLK: //Needed because of the TYP_BLK stress mode
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
+ case TYP_FLOAT:
+ case TYP_STRUCT:
+ case TYP_BLK: // Needed because of the TYP_BLK stress mode
#ifdef FEATURE_SIMD
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
#endif
#ifdef _TARGET_64BIT_
- case TYP_LONG:
- case TYP_DOUBLE:
+ case TYP_LONG:
+ case TYP_DOUBLE:
#endif // _TARGET_64BIT_
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
- // In the AMD64 ABI we are supposed to pass a struct by reference when its
- // size is not 1, 2, 4 or 8 bytes in size. During fgMorph, the compiler modifies
- // the IR to comply with the ABI and therefore changes the type of the lclVar
- // that holds the struct from TYP_STRUCT to TYP_BYREF but it gives us a hint that
- // this is still a struct by setting the lvIsTemp flag.
- // The same is true for ARM64 and structs > 16 bytes.
- // (See Compiler::fgMarkImplicitByRefArgs in Morph.cpp for further detail)
- // Now, the VM expects a special enum for these type of local vars: VLT_STK_BYREF
- // to accomodate for this situation.
- if (varDsc->lvType == TYP_BYREF && varDsc->lvIsTemp)
- {
- assert(varDsc->lvIsParam);
- varLoc.vlType = Compiler::VLT_STK_BYREF;
- }
- else
+ // In the AMD64 ABI we are supposed to pass a struct by reference when its
+ // size is not 1, 2, 4 or 8 bytes in size. During fgMorph, the compiler modifies
+ // the IR to comply with the ABI and therefore changes the type of the lclVar
+ // that holds the struct from TYP_STRUCT to TYP_BYREF but it gives us a hint that
+ // this is still a struct by setting the lvIsTemp flag.
+ // The same is true for ARM64 and structs > 16 bytes.
+ // (See Compiler::fgMarkImplicitByRefArgs in Morph.cpp for further detail)
+ // Now, the VM expects a special enum for these type of local vars: VLT_STK_BYREF
+ // to accomodate for this situation.
+ if (varDsc->lvType == TYP_BYREF && varDsc->lvIsTemp)
+ {
+ assert(varDsc->lvIsParam);
+ varLoc.vlType = Compiler::VLT_STK_BYREF;
+ }
+ else
#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
- {
- varLoc.vlType = Compiler::VLT_STK;
- }
- varLoc.vlStk.vlsBaseReg = baseReg;
- varLoc.vlStk.vlsOffset = offset;
- if (!isFramePointerUsed() && varLoc.vlStk.vlsBaseReg == REG_SPBASE) {
- varLoc.vlStk.vlsBaseReg = (regNumber) ICorDebugInfo::REGNUM_AMBIENT_SP;
- }
- break;
+ {
+ varLoc.vlType = Compiler::VLT_STK;
+ }
+ varLoc.vlStk.vlsBaseReg = baseReg;
+ varLoc.vlStk.vlsOffset = offset;
+ if (!isFramePointerUsed() && varLoc.vlStk.vlsBaseReg == REG_SPBASE)
+ {
+ varLoc.vlStk.vlsBaseReg = (regNumber)ICorDebugInfo::REGNUM_AMBIENT_SP;
+ }
+ break;
#ifndef _TARGET_64BIT_
- case TYP_LONG:
- case TYP_DOUBLE:
- varLoc.vlType = Compiler::VLT_STK2;
- varLoc.vlStk2.vls2BaseReg = baseReg;
- varLoc.vlStk2.vls2Offset = offset;
- if (!isFramePointerUsed() && varLoc.vlStk2.vls2BaseReg == REG_SPBASE) {
- varLoc.vlStk2.vls2BaseReg = (regNumber) ICorDebugInfo::REGNUM_AMBIENT_SP;
- }
- break;
+ case TYP_LONG:
+ case TYP_DOUBLE:
+ varLoc.vlType = Compiler::VLT_STK2;
+ varLoc.vlStk2.vls2BaseReg = baseReg;
+ varLoc.vlStk2.vls2Offset = offset;
+ if (!isFramePointerUsed() && varLoc.vlStk2.vls2BaseReg == REG_SPBASE)
+ {
+ varLoc.vlStk2.vls2BaseReg = (regNumber)ICorDebugInfo::REGNUM_AMBIENT_SP;
+ }
+ break;
#endif // !_TARGET_64BIT_
- default:
- noway_assert(!"Invalid type");
+ default:
+ noway_assert(!"Invalid type");
}
}
- genSetScopeInfo(psiScopeCnt + i,
- startOffs, endOffs-startOffs, scopeL->scVarNum, scopeL->scLVnum,
- scopeL->scAvailable, varLoc);
+ genSetScopeInfo(psiScopeCnt + i, startOffs, endOffs - startOffs, scopeL->scVarNum, scopeL->scLVnum,
+ scopeL->scAvailable, varLoc);
}
compiler->eeSetLVdone();
@@ -11076,9 +11182,7 @@ void CodeGen::genSetScopeInfo()
*/
/* virtual */
-const char* CodeGen::siRegVarName(size_t offs,
- size_t size,
- unsigned reg)
+const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
@@ -11086,21 +11190,16 @@ const char* CodeGen::siRegVarName(size_t offs,
if (compiler->info.compVarScopesCount == 0)
return nullptr;
- noway_assert(genTrnslLocalVarCount==0 || genTrnslLocalVarInfo);
+ noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
- for (unsigned i=0; i<genTrnslLocalVarCount; i++)
+ for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
- if ( (genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg))
- && (genTrnslLocalVarInfo[i].tlviAvailable == true)
- && (genTrnslLocalVarInfo[i].tlviStartPC <= offs+size)
- && (genTrnslLocalVarInfo[i].tlviStartPC
- + genTrnslLocalVarInfo[i].tlviLength > offs)
- )
+ if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg)) &&
+ (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
+ (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
- return genTrnslLocalVarInfo[i].tlviName ?
- compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
+ return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
-
}
return NULL;
@@ -11113,10 +11212,7 @@ const char* CodeGen::siRegVarName(size_t offs,
*/
/* virtual */
-const char* CodeGen::siStackVarName(size_t offs,
- size_t size,
- unsigned reg,
- unsigned stkOffs)
+const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
if (!compiler->opts.compScopeInfo)
return nullptr;
@@ -11124,19 +11220,15 @@ const char* CodeGen::siStackVarName(size_t offs,
if (compiler->info.compVarScopesCount == 0)
return nullptr;
- noway_assert(genTrnslLocalVarCount==0 || genTrnslLocalVarInfo);
+ noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo);
- for (unsigned i=0; i<genTrnslLocalVarCount; i++)
+ for (unsigned i = 0; i < genTrnslLocalVarCount; i++)
{
- if ( (genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStk((regNumber)reg, stkOffs))
- && (genTrnslLocalVarInfo[i].tlviAvailable == true)
- && (genTrnslLocalVarInfo[i].tlviStartPC <= offs+size)
- && (genTrnslLocalVarInfo[i].tlviStartPC
- + genTrnslLocalVarInfo[i].tlviLength > offs)
- )
+ if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStk((regNumber)reg, stkOffs)) &&
+ (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) &&
+ (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs))
{
- return genTrnslLocalVarInfo[i].tlviName ?
- compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
+ return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL;
}
}
@@ -11153,7 +11245,7 @@ const char* CodeGen::siStackVarName(size_t offs,
* Display a IPmappingDsc. Pass -1 as mappingNum to not display a mapping number.
*/
-void CodeGen::genIPmappingDisp(unsigned mappingNum, Compiler::IPmappingDsc* ipMapping)
+void CodeGen::genIPmappingDisp(unsigned mappingNum, Compiler::IPmappingDsc* ipMapping)
{
if (mappingNum != unsigned(-1))
{
@@ -11194,14 +11286,12 @@ void CodeGen::genIPmappingDisp(unsigned mappingNum, Compiler::IPm
printf("\n");
}
-void CodeGen::genIPmappingListDisp()
+void CodeGen::genIPmappingListDisp()
{
- unsigned mappingNum = 0;
+ unsigned mappingNum = 0;
Compiler::IPmappingDsc* ipMapping;
- for (ipMapping = compiler->genIPmappingList;
- ipMapping != nullptr;
- ipMapping = ipMapping->ipmdNext)
+ for (ipMapping = compiler->genIPmappingList; ipMapping != nullptr; ipMapping = ipMapping->ipmdNext)
{
genIPmappingDisp(mappingNum, ipMapping);
++mappingNum;
@@ -11217,63 +11307,65 @@ void CodeGen::genIPmappingListDisp()
* Record the instr offset as being at the current code gen position.
*/
-void CodeGen::genIPmappingAdd(IL_OFFSETX offsx, bool isLabel)
+void CodeGen::genIPmappingAdd(IL_OFFSETX offsx, bool isLabel)
{
if (!compiler->opts.compDbgInfo)
+ {
return;
+ }
assert(offsx != BAD_IL_OFFSET);
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- break;
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ break;
- default:
+ default:
- if (offsx != ICorDebugInfo::NO_MAPPING)
- {
- noway_assert(jitGetILoffs(offsx) <= compiler->info.compILCodeSize);
- }
+ if (offsx != ICorDebugInfo::NO_MAPPING)
+ {
+ noway_assert(jitGetILoffs(offsx) <= compiler->info.compILCodeSize);
+ }
- // Ignore this one if it's the same IL offset as the last one we saw.
- // Note that we'll let through two identical IL offsets if the flag bits
- // differ, or two identical "special" mappings (e.g., PROLOG).
- if ((compiler->genIPmappingLast != nullptr) &&
- (offsx == compiler->genIPmappingLast->ipmdILoffsx))
- {
- JITDUMP("genIPmappingAdd: ignoring duplicate IL offset 0x%x\n", offsx);
- return;
- }
- break;
+ // Ignore this one if it's the same IL offset as the last one we saw.
+ // Note that we'll let through two identical IL offsets if the flag bits
+ // differ, or two identical "special" mappings (e.g., PROLOG).
+ if ((compiler->genIPmappingLast != nullptr) && (offsx == compiler->genIPmappingLast->ipmdILoffsx))
+ {
+ JITDUMP("genIPmappingAdd: ignoring duplicate IL offset 0x%x\n", offsx);
+ return;
+ }
+ break;
}
/* Create a mapping entry and append it to the list */
- Compiler::IPmappingDsc* addMapping = (Compiler::IPmappingDsc *)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
+ Compiler::IPmappingDsc* addMapping =
+ (Compiler::IPmappingDsc*)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
- addMapping->ipmdILoffsx = offsx;
- addMapping->ipmdIsLabel = isLabel;
- addMapping->ipmdNext = nullptr;
+ addMapping->ipmdILoffsx = offsx;
+ addMapping->ipmdIsLabel = isLabel;
+ addMapping->ipmdNext = nullptr;
if (compiler->genIPmappingList != nullptr)
{
assert(compiler->genIPmappingLast != nullptr);
assert(compiler->genIPmappingLast->ipmdNext == nullptr);
- compiler->genIPmappingLast->ipmdNext = addMapping;
+ compiler->genIPmappingLast->ipmdNext = addMapping;
}
else
{
assert(compiler->genIPmappingLast == nullptr);
- compiler->genIPmappingList = addMapping;
+ compiler->genIPmappingList = addMapping;
}
- compiler->genIPmappingLast = addMapping;
+ compiler->genIPmappingLast = addMapping;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Added IP mapping: ");
genIPmappingDisp(unsigned(-1), addMapping);
@@ -11281,52 +11373,54 @@ void CodeGen::genIPmappingAdd(IL_OFFSETX offsx, bool isLabel)
#endif // DEBUG
}
-
/*****************************************************************************
*
* Prepend an IPmappingDsc struct to the list that we're maintaining
* for the debugger.
* Record the instr offset as being at the current code gen position.
*/
-void CodeGen::genIPmappingAddToFront(IL_OFFSETX offsx)
+void CodeGen::genIPmappingAddToFront(IL_OFFSETX offsx)
{
if (!compiler->opts.compDbgInfo)
+ {
return;
+ }
assert(offsx != BAD_IL_OFFSET);
assert(compiler->compGeneratingProlog); // We only ever do this during prolog generation.
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::NO_MAPPING:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- break;
+ case ICorDebugInfo::NO_MAPPING:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ break;
- default:
- noway_assert(jitGetILoffs(offsx) <= compiler->info.compILCodeSize);
- break;
+ default:
+ noway_assert(jitGetILoffs(offsx) <= compiler->info.compILCodeSize);
+ break;
}
/* Create a mapping entry and prepend it to the list */
- Compiler::IPmappingDsc* addMapping = (Compiler::IPmappingDsc *)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
+ Compiler::IPmappingDsc* addMapping =
+ (Compiler::IPmappingDsc*)compiler->compGetMem(sizeof(*addMapping), CMK_DebugInfo);
addMapping->ipmdNativeLoc.CaptureLocation(getEmitter());
- addMapping->ipmdILoffsx = offsx;
- addMapping->ipmdIsLabel = true;
- addMapping->ipmdNext = nullptr;
+ addMapping->ipmdILoffsx = offsx;
+ addMapping->ipmdIsLabel = true;
+ addMapping->ipmdNext = nullptr;
- addMapping->ipmdNext = compiler->genIPmappingList;
+ addMapping->ipmdNext = compiler->genIPmappingList;
compiler->genIPmappingList = addMapping;
- if (compiler->genIPmappingLast == nullptr)
+ if (compiler->genIPmappingLast == nullptr)
{
- compiler->genIPmappingLast = addMapping;
+ compiler->genIPmappingLast = addMapping;
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Added IP mapping to front: ");
genIPmappingDisp(unsigned(-1), addMapping);
@@ -11337,13 +11431,13 @@ void CodeGen::genIPmappingAddToFront(IL_OFFSETX offsx)
/*****************************************************************************/
C_ASSERT(IL_OFFSETX(ICorDebugInfo::NO_MAPPING) != IL_OFFSETX(BAD_IL_OFFSET));
-C_ASSERT(IL_OFFSETX(ICorDebugInfo::PROLOG) != IL_OFFSETX(BAD_IL_OFFSET));
-C_ASSERT(IL_OFFSETX(ICorDebugInfo::EPILOG) != IL_OFFSETX(BAD_IL_OFFSET));
+C_ASSERT(IL_OFFSETX(ICorDebugInfo::PROLOG) != IL_OFFSETX(BAD_IL_OFFSET));
+C_ASSERT(IL_OFFSETX(ICorDebugInfo::EPILOG) != IL_OFFSETX(BAD_IL_OFFSET));
-C_ASSERT(IL_OFFSETX(BAD_IL_OFFSET) > MAX_IL_OFFSET);
+C_ASSERT(IL_OFFSETX(BAD_IL_OFFSET) > MAX_IL_OFFSET);
C_ASSERT(IL_OFFSETX(ICorDebugInfo::NO_MAPPING) > MAX_IL_OFFSET);
-C_ASSERT(IL_OFFSETX(ICorDebugInfo::PROLOG) > MAX_IL_OFFSET);
-C_ASSERT(IL_OFFSETX(ICorDebugInfo::EPILOG) > MAX_IL_OFFSET);
+C_ASSERT(IL_OFFSETX(ICorDebugInfo::PROLOG) > MAX_IL_OFFSET);
+C_ASSERT(IL_OFFSETX(ICorDebugInfo::EPILOG) > MAX_IL_OFFSET);
//------------------------------------------------------------------------
// jitGetILoffs: Returns the IL offset portion of the IL_OFFSETX type.
@@ -11356,19 +11450,19 @@ C_ASSERT(IL_OFFSETX(ICorDebugInfo::EPILOG) > MAX_IL_OFFSET);
// Return Value:
// The IL offset.
-IL_OFFSET jitGetILoffs(IL_OFFSETX offsx)
+IL_OFFSET jitGetILoffs(IL_OFFSETX offsx)
{
assert(offsx != BAD_IL_OFFSET);
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::NO_MAPPING:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- unreached();
+ case ICorDebugInfo::NO_MAPPING:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ unreached();
- default:
- return IL_OFFSET(offsx & ~IL_OFFSETX_BITS);
+ default:
+ return IL_OFFSET(offsx & ~IL_OFFSETX_BITS);
}
}
@@ -11382,19 +11476,19 @@ IL_OFFSET jitGetILoffs(IL_OFFSETX offsx)
// Return Value:
// The IL offset.
-IL_OFFSET jitGetILoffsAny(IL_OFFSETX offsx)
+IL_OFFSET jitGetILoffsAny(IL_OFFSETX offsx)
{
assert(offsx != BAD_IL_OFFSET);
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::NO_MAPPING:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- return IL_OFFSET(offsx);
+ case ICorDebugInfo::NO_MAPPING:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ return IL_OFFSET(offsx);
- default:
- return IL_OFFSET(offsx & ~IL_OFFSETX_BITS);
+ default:
+ return IL_OFFSET(offsx & ~IL_OFFSETX_BITS);
}
}
@@ -11408,19 +11502,19 @@ IL_OFFSET jitGetILoffsAny(IL_OFFSETX offsx)
// Return Value:
// 'true' if the stack empty bit is set; 'false' otherwise.
-bool jitIsStackEmpty(IL_OFFSETX offsx)
+bool jitIsStackEmpty(IL_OFFSETX offsx)
{
assert(offsx != BAD_IL_OFFSET);
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::NO_MAPPING:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- return true;
+ case ICorDebugInfo::NO_MAPPING:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ return true;
- default:
- return (offsx & IL_OFFSETX_STKBIT) == 0;
+ default:
+ return (offsx & IL_OFFSETX_STKBIT) == 0;
}
}
@@ -11434,39 +11528,47 @@ bool jitIsStackEmpty(IL_OFFSETX offsx)
// Return Value:
// 'true' if the call instruction bit is set; 'false' otherwise.
-bool jitIsCallInstruction(IL_OFFSETX offsx)
+bool jitIsCallInstruction(IL_OFFSETX offsx)
{
assert(offsx != BAD_IL_OFFSET);
switch ((int)offsx) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::NO_MAPPING:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::EPILOG:
- return false;
+ case ICorDebugInfo::NO_MAPPING:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ return false;
- default:
- return (offsx & IL_OFFSETX_CALLINSTRUCTIONBIT) != 0;
+ default:
+ return (offsx & IL_OFFSETX_CALLINSTRUCTIONBIT) != 0;
}
}
/*****************************************************************************/
-void CodeGen::genEnsureCodeEmitted(IL_OFFSETX offsx)
+void CodeGen::genEnsureCodeEmitted(IL_OFFSETX offsx)
{
if (!compiler->opts.compDbgCode)
+ {
return;
+ }
if (offsx == BAD_IL_OFFSET)
+ {
return;
+ }
/* If other IL were offsets reported, skip */
if (compiler->genIPmappingLast == nullptr)
+ {
return;
+ }
if (compiler->genIPmappingLast->ipmdILoffsx != offsx)
+ {
return;
+ }
/* offsx was the last reported offset. Make sure that we generated native code */
@@ -11481,17 +11583,21 @@ void CodeGen::genEnsureCodeEmitted(IL_OFFSETX offsx)
* Shut down the IP-mapping logic, report the info to the EE.
*/
-void CodeGen::genIPmappingGen()
+void CodeGen::genIPmappingGen()
{
if (!compiler->opts.compDbgInfo)
+ {
return;
+ }
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In genIPmappingGen()\n");
+ }
#endif
- if (compiler->genIPmappingList == nullptr)
+ if (compiler->genIPmappingList == nullptr)
{
compiler->eeSetLIcount(0);
compiler->eeSetLIdone();
@@ -11505,15 +11611,13 @@ void CodeGen::genIPmappingGen()
/* First count the number of distinct mapping records */
- mappingCnt = 0;
- lastNativeOfs = UNATIVE_OFFSET(~0);
+ mappingCnt = 0;
+ lastNativeOfs = UNATIVE_OFFSET(~0);
- for (prevMapping = nullptr,
- tmpMapping = compiler->genIPmappingList;
- tmpMapping != nullptr;
+ for (prevMapping = nullptr, tmpMapping = compiler->genIPmappingList; tmpMapping != nullptr;
tmpMapping = tmpMapping->ipmdNext)
{
- IL_OFFSETX srcIP = tmpMapping->ipmdILoffsx;
+ IL_OFFSETX srcIP = tmpMapping->ipmdILoffsx;
// Managed RetVal - since new sequence points are emitted to identify IL calls,
// make sure that those are not filtered and do not interfere with filtering of
@@ -11526,11 +11630,11 @@ void CodeGen::genIPmappingGen()
UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(getEmitter());
- if (nextNativeOfs != lastNativeOfs)
+ if (nextNativeOfs != lastNativeOfs)
{
mappingCnt++;
lastNativeOfs = nextNativeOfs;
- prevMapping = tmpMapping;
+ prevMapping = tmpMapping;
continue;
}
@@ -11553,10 +11657,9 @@ void CodeGen::genIPmappingGen()
// Leave prevMapping unchanged as tmpMapping is no longer valid
tmpMapping->ipmdNativeLoc.Init();
}
- else if (srcIP == (IL_OFFSETX)ICorDebugInfo::EPILOG ||
- srcIP == 0)
+ else if (srcIP == (IL_OFFSETX)ICorDebugInfo::EPILOG || srcIP == 0)
{
- //counting for special cases: see below
+ // counting for special cases: see below
mappingCnt++;
prevMapping = tmpMapping;
}
@@ -11589,31 +11692,30 @@ void CodeGen::genIPmappingGen()
/* Now tell them about the mappings */
- mappingCnt = 0;
- lastNativeOfs = UNATIVE_OFFSET(~0);
+ mappingCnt = 0;
+ lastNativeOfs = UNATIVE_OFFSET(~0);
- for (tmpMapping = compiler->genIPmappingList;
- tmpMapping != nullptr;
- tmpMapping = tmpMapping->ipmdNext)
+ for (tmpMapping = compiler->genIPmappingList; tmpMapping != nullptr; tmpMapping = tmpMapping->ipmdNext)
{
// Do we have to skip this record ?
if (!tmpMapping->ipmdNativeLoc.Valid())
+ {
continue;
+ }
UNATIVE_OFFSET nextNativeOfs = tmpMapping->ipmdNativeLoc.CodeOffset(getEmitter());
- IL_OFFSETX srcIP = tmpMapping->ipmdILoffsx;
+ IL_OFFSETX srcIP = tmpMapping->ipmdILoffsx;
if (jitIsCallInstruction(srcIP))
{
compiler->eeSetLIinfo(mappingCnt++, nextNativeOfs, jitGetILoffs(srcIP), jitIsStackEmpty(srcIP), true);
}
- else if (nextNativeOfs != lastNativeOfs)
+ else if (nextNativeOfs != lastNativeOfs)
{
compiler->eeSetLIinfo(mappingCnt++, nextNativeOfs, jitGetILoffsAny(srcIP), jitIsStackEmpty(srcIP), false);
lastNativeOfs = nextNativeOfs;
}
- else if (srcIP == (IL_OFFSETX)ICorDebugInfo::EPILOG ||
- srcIP == 0)
+ else if (srcIP == (IL_OFFSETX)ICorDebugInfo::EPILOG || srcIP == 0)
{
// For the special case of an IL instruction with no body
// followed by the epilog (say ret void immediately preceding
@@ -11670,7 +11772,6 @@ void CodeGen::genIPmappingGen()
#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
-
/*============================================================================
*
* These are empty stubs to help the late dis-assembler to compile
@@ -11684,18 +11785,13 @@ void CodeGen::genIPmappingGen()
#if !defined(DEBUGGING_SUPPORT) || !defined(DEBUG)
/* virtual */
-const char* CodeGen::siRegVarName(size_t offs,
- size_t size,
- unsigned reg)
+const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg)
{
return NULL;
}
/* virtual */
-const char* CodeGen::siStackVarName(size_t offs,
- size_t size,
- unsigned reg,
- unsigned stkOffs)
+const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs)
{
return NULL;
}
@@ -11704,4 +11800,3 @@ const char* CodeGen::siStackVarName(size_t offs,
#endif // !defined(DEBUGGING_SUPPORT) || !defined(DEBUG)
#endif // defined(LATE_DISASM)
/*****************************************************************************/
-
diff --git a/src/jit/codegeninterface.h b/src/jit/codegeninterface.h
index d321b5719a..f5eec89d33 100644
--- a/src/jit/codegeninterface.h
+++ b/src/jit/codegeninterface.h
@@ -16,7 +16,7 @@
// GC encoder. It is distinct from CodeGenInterface so that it can be
// included in the Compiler object, and avoid an extra indirection when
// accessed from members of Compiler.
-//
+//
#ifndef _CODEGEN_INTERFACE_H_
#define _CODEGEN_INTERFACE_H_
@@ -35,158 +35,175 @@ class emitter;
struct RegState
{
- regMaskTP rsCalleeRegArgMaskLiveIn; // mask of register arguments (live on entry to method)
+ regMaskTP rsCalleeRegArgMaskLiveIn; // mask of register arguments (live on entry to method)
#ifdef LEGACY_BACKEND
- unsigned rsCurRegArgNum; // current argument number (for caller)
+ unsigned rsCurRegArgNum; // current argument number (for caller)
#endif
- unsigned rsCalleeRegArgCount; // total number of incoming register arguments of this kind (int or float)
- bool rsIsFloat; // true for float argument registers, false for integer argument registers
+ unsigned rsCalleeRegArgCount; // total number of incoming register arguments of this kind (int or float)
+ bool rsIsFloat; // true for float argument registers, false for integer argument registers
};
//-------------------- CodeGenInterface ---------------------------------
// interface to hide the full CodeGen implementation from rest of Compiler
-CodeGenInterface *getCodeGenerator(Compiler *comp);
+CodeGenInterface* getCodeGenerator(Compiler* comp);
class CodeGenInterface
{
friend class emitter;
public:
- CodeGenInterface(Compiler *theCompiler);
- virtual void genGenerateCode (void * * codePtr, ULONG * nativeSizeOfCode) = 0;
+ CodeGenInterface(Compiler* theCompiler);
+ virtual void genGenerateCode(void** codePtr, ULONG* nativeSizeOfCode) = 0;
#ifndef LEGACY_BACKEND
// genSpillVar is called by compUpdateLifeVar in the RyuJIT backend case.
// TODO-Cleanup: We should handle the spill directly in CodeGen, rather than
// calling it from compUpdateLifeVar. Then this can be non-virtual.
- virtual void genSpillVar (GenTreePtr tree) = 0;
+ virtual void genSpillVar(GenTreePtr tree) = 0;
#endif // !LEGACY_BACKEND
//-------------------------------------------------------------------------
// The following property indicates whether to align loops.
// (Used to avoid effects of loop alignment when diagnosing perf issues.)
- __declspec(property(get=doAlignLoops,put=setAlignLoops)) bool genAlignLoops;
- bool doAlignLoops() { return m_genAlignLoops; }
- void setAlignLoops(bool value) { m_genAlignLoops = value; }
+ __declspec(property(get = doAlignLoops, put = setAlignLoops)) bool genAlignLoops;
+ bool doAlignLoops()
+ {
+ return m_genAlignLoops;
+ }
+ void setAlignLoops(bool value)
+ {
+ m_genAlignLoops = value;
+ }
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
- virtual bool genCreateAddrMode(GenTreePtr addr,
- int mode,
- bool fold,
- regMaskTP regMask,
- bool * revPtr,
- GenTreePtr * rv1Ptr,
- GenTreePtr * rv2Ptr,
+ virtual bool genCreateAddrMode(GenTreePtr addr,
+ int mode,
+ bool fold,
+ regMaskTP regMask,
+ bool* revPtr,
+ GenTreePtr* rv1Ptr,
+ GenTreePtr* rv2Ptr,
#if SCALED_ADDR_MODES
- unsigned * mulPtr,
+ unsigned* mulPtr,
#endif
- unsigned * cnsPtr,
- bool nogen = false) = 0;
+ unsigned* cnsPtr,
+ bool nogen = false) = 0;
- void genCalcFrameSize ();
+ void genCalcFrameSize();
- GCInfo gcInfo;
+ GCInfo gcInfo;
- RegSet regSet;
- RegState intRegState;
- RegState floatRegState;
+ RegSet regSet;
+ RegState intRegState;
+ RegState floatRegState;
// TODO-Cleanup: The only reason that regTracker needs to live in CodeGenInterface is that
// in RegSet::rsUnspillOneReg, it needs to mark the new register as "trash"
- RegTracker regTracker;
+ RegTracker regTracker;
+
public:
- void trashReg(regNumber reg) { regTracker.rsTrackRegTrash(reg); }
+ void trashReg(regNumber reg)
+ {
+ regTracker.rsTrackRegTrash(reg);
+ }
protected:
- Compiler* compiler;
- bool m_genAlignLoops;
+ Compiler* compiler;
+ bool m_genAlignLoops;
private:
- static const
- BYTE instInfo[INS_count];
+ static const BYTE instInfo[INS_count];
- #define INST_FP 0x01 // is it a FP instruction?
+#define INST_FP 0x01 // is it a FP instruction?
public:
- static
- bool instIsFP (instruction ins);
+ static bool instIsFP(instruction ins);
//-------------------------------------------------------------------------
// Liveness-related fields & methods
public:
- void genUpdateRegLife (const LclVarDsc * varDsc,
- bool isBorn,
- bool isDying
- DEBUGARG( GenTreePtr tree));
+ void genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bool isDying DEBUGARG(GenTreePtr tree));
#ifndef LEGACY_BACKEND
- void genUpdateVarReg (LclVarDsc * varDsc,
- GenTreePtr tree);
+ void genUpdateVarReg(LclVarDsc* varDsc, GenTreePtr tree);
#endif // !LEGACY_BACKEND
protected:
#ifdef DEBUG
- VARSET_TP genTempOldLife;
- bool genTempLiveChg;
+ VARSET_TP genTempOldLife;
+ bool genTempLiveChg;
#endif
- VARSET_TP genLastLiveSet; // A one element map (genLastLiveSet-> genLastLiveMask)
- regMaskTP genLastLiveMask; // these two are used in genLiveMask
-
+ VARSET_TP genLastLiveSet; // A one element map (genLastLiveSet-> genLastLiveMask)
+ regMaskTP genLastLiveMask; // these two are used in genLiveMask
- regMaskTP genGetRegMask (const LclVarDsc * varDsc);
- regMaskTP genGetRegMask (GenTreePtr tree);
+ regMaskTP genGetRegMask(const LclVarDsc* varDsc);
+ regMaskTP genGetRegMask(GenTreePtr tree);
- void genUpdateLife (GenTreePtr tree);
- void genUpdateLife (VARSET_VALARG_TP newLife);
+ void genUpdateLife(GenTreePtr tree);
+ void genUpdateLife(VARSET_VALARG_TP newLife);
- regMaskTP genLiveMask (GenTreePtr tree);
- regMaskTP genLiveMask (VARSET_VALARG_TP liveSet);
-
- void genGetRegPairFromMask(regMaskTP regPairMask, regNumber* pLoReg, regNumber* pHiReg);
+ regMaskTP genLiveMask(GenTreePtr tree);
+ regMaskTP genLiveMask(VARSET_VALARG_TP liveSet);
+ void genGetRegPairFromMask(regMaskTP regPairMask, regNumber* pLoReg, regNumber* pHiReg);
// The following property indicates whether the current method sets up
// an explicit stack frame or not.
private:
- PhasedVar<bool> m_cgFramePointerUsed;
+ PhasedVar<bool> m_cgFramePointerUsed;
+
public:
- bool isFramePointerUsed() const { return m_cgFramePointerUsed; }
- void setFramePointerUsed(bool value) { m_cgFramePointerUsed = value; }
- void resetFramePointerUsedWritePhase() { m_cgFramePointerUsed.ResetWritePhase(); }
+ bool isFramePointerUsed() const
+ {
+ return m_cgFramePointerUsed;
+ }
+ void setFramePointerUsed(bool value)
+ {
+ m_cgFramePointerUsed = value;
+ }
+ void resetFramePointerUsedWritePhase()
+ {
+ m_cgFramePointerUsed.ResetWritePhase();
+ }
// The following property indicates whether the current method requires
// an explicit frame. Does not prohibit double alignment of the stack.
private:
- PhasedVar<bool> m_cgFrameRequired;
-public:
- bool isFrameRequired() const { return m_cgFrameRequired; }
- void setFrameRequired(bool value) { m_cgFrameRequired = value; }
+ PhasedVar<bool> m_cgFrameRequired;
public:
+ bool isFrameRequired() const
+ {
+ return m_cgFrameRequired;
+ }
+ void setFrameRequired(bool value)
+ {
+ m_cgFrameRequired = value;
+ }
- int genCallerSPtoFPdelta();
- int genCallerSPtoInitialSPdelta();
- int genSPtoFPdelta();
- int genTotalFrameSize();
+public:
+ int genCallerSPtoFPdelta();
+ int genCallerSPtoInitialSPdelta();
+ int genSPtoFPdelta();
+ int genTotalFrameSize();
- regNumber genGetThisArgReg (GenTreePtr call);
+ regNumber genGetThisArgReg(GenTreePtr call);
#ifdef _TARGET_XARCH_
#ifdef _TARGET_AMD64_
// There are no reloc hints on x86
- unsigned short genAddrRelocTypeHint(size_t addr);
+ unsigned short genAddrRelocTypeHint(size_t addr);
#endif
- bool genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr);
- bool genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr);
- bool genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr);
- bool genCodeIndirAddrNeedsReloc(size_t addr);
- bool genCodeAddrNeedsReloc(size_t addr);
+ bool genDataIndirAddrCanBeEncodedAsPCRelOffset(size_t addr);
+ bool genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr);
+ bool genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr);
+ bool genCodeIndirAddrNeedsReloc(size_t addr);
+ bool genCodeAddrNeedsReloc(size_t addr);
#endif
-
- // If both isFramePointerRequired() and isFrameRequired() are false, the method is eligible
+ // If both isFramePointerRequired() and isFrameRequired() are false, the method is eligible
// for Frame-Pointer-Omission (FPO).
// The following property indicates whether the current method requires
@@ -194,13 +211,20 @@ public:
// accessible relative to the Frame Pointer. Prohibits double alignment
// of the stack.
private:
- PhasedVar<bool> m_cgFramePointerRequired;
+ PhasedVar<bool> m_cgFramePointerRequired;
+
public:
- bool isFramePointerRequired() const { return m_cgFramePointerRequired; }
- void setFramePointerRequired(bool value) { m_cgFramePointerRequired = value; }
- void setFramePointerRequiredEH(bool value);
+ bool isFramePointerRequired() const
+ {
+ return m_cgFramePointerRequired;
+ }
+ void setFramePointerRequired(bool value)
+ {
+ m_cgFramePointerRequired = value;
+ }
+ void setFramePointerRequiredEH(bool value);
- void setFramePointerRequiredGCInfo(bool value)
+ void setFramePointerRequiredGCInfo(bool value)
{
#ifdef JIT32_GCENCODER
m_cgFramePointerRequired = value;
@@ -208,150 +232,175 @@ public:
}
#if DOUBLE_ALIGN
-// The following property indicates whether we going to double-align the frame.
-// Arguments are accessed relative to the Frame Pointer (EBP), and
-// locals are accessed relative to the Stack Pointer (ESP).
+ // The following property indicates whether we going to double-align the frame.
+ // Arguments are accessed relative to the Frame Pointer (EBP), and
+ // locals are accessed relative to the Stack Pointer (ESP).
public:
- bool doDoubleAlign() const { return m_cgDoubleAlign; }
- void setDoubleAlign(bool value) { m_cgDoubleAlign = value; }
- bool doubleAlignOrFramePointerUsed() const { return isFramePointerUsed() || doDoubleAlign(); }
+ bool doDoubleAlign() const
+ {
+ return m_cgDoubleAlign;
+ }
+ void setDoubleAlign(bool value)
+ {
+ m_cgDoubleAlign = value;
+ }
+ bool doubleAlignOrFramePointerUsed() const
+ {
+ return isFramePointerUsed() || doDoubleAlign();
+ }
+
private:
- bool m_cgDoubleAlign;
-#else // !DOUBLE_ALIGN
+ bool m_cgDoubleAlign;
+#else // !DOUBLE_ALIGN
public:
- bool doubleAlignOrFramePointerUsed() const { return isFramePointerUsed(); }
+ bool doubleAlignOrFramePointerUsed() const
+ {
+ return isFramePointerUsed();
+ }
#endif // !DOUBLE_ALIGN
-
-#ifdef DEBUG
+#ifdef DEBUG
// The following is used to make sure the value of 'genInterruptible' isn't
// changed after it's been used by any logic that depends on its value.
public:
- bool isGCTypeFixed() { return genInterruptibleUsed; }
+ bool isGCTypeFixed()
+ {
+ return genInterruptibleUsed;
+ }
+
protected:
- bool genInterruptibleUsed;
+ bool genInterruptibleUsed;
#endif
public:
-
#if FEATURE_STACK_FP_X87
- FlatFPStateX87 compCurFPState;
- unsigned genFPregCnt; // count of current FP reg. vars (including dead but unpopped ones)
+ FlatFPStateX87 compCurFPState;
+ unsigned genFPregCnt; // count of current FP reg. vars (including dead but unpopped ones)
- void SetRegVarFloat (regNumber reg, var_types type, LclVarDsc* varDsc);
+ void SetRegVarFloat(regNumber reg, var_types type, LclVarDsc* varDsc);
- void inst_FN (instruction ins, unsigned stk);
+ void inst_FN(instruction ins, unsigned stk);
// Keeps track of the current level of the FP coprocessor stack
// (excluding FP reg. vars).
// Do not use directly, instead use the processor agnostic accessor
// methods below
//
- unsigned genFPstkLevel;
+ unsigned genFPstkLevel;
- void genResetFPstkLevel (unsigned newValue = 0);
- unsigned genGetFPstkLevel ();
- FlatFPStateX87* FlatFPAllocFPState (FlatFPStateX87* pInitFrom = 0);
+ void genResetFPstkLevel(unsigned newValue = 0);
+ unsigned genGetFPstkLevel();
+ FlatFPStateX87* FlatFPAllocFPState(FlatFPStateX87* pInitFrom = 0);
- void genIncrementFPstkLevel(unsigned inc = 1);
- void genDecrementFPstkLevel(unsigned dec = 1);
+ void genIncrementFPstkLevel(unsigned inc = 1);
+ void genDecrementFPstkLevel(unsigned dec = 1);
- static const char* regVarNameStackFP (regNumber reg);
+ static const char* regVarNameStackFP(regNumber reg);
// FlatFPStateX87_ functions are the actual verbs to do stuff
// like doing a transition, loading register, etc. It's also
// responsible for emitting the x87 code to do so. We keep
// them in Compiler because we don't want to store a pointer to the
// emitter.
- void FlatFPX87_MoveToTOS (FlatFPStateX87* pState, unsigned iVirtual, bool bEmitCode = true);
- void FlatFPX87_SwapStack (FlatFPStateX87* pState, unsigned i, unsigned j, bool bEmitCode = true);
+ void FlatFPX87_MoveToTOS(FlatFPStateX87* pState, unsigned iVirtual, bool bEmitCode = true);
+ void FlatFPX87_SwapStack(FlatFPStateX87* pState, unsigned i, unsigned j, bool bEmitCode = true);
#endif // FEATURE_STACK_FP_X87
#ifndef LEGACY_BACKEND
- regNumber genGetAssignedReg (GenTreePtr tree);
+ regNumber genGetAssignedReg(GenTreePtr tree);
#endif // !LEGACY_BACKEND
#ifdef LEGACY_BACKEND
// Changes GT_LCL_VAR nodes to GT_REG_VAR nodes if possible.
- bool genMarkLclVar (GenTreePtr tree);
+ bool genMarkLclVar(GenTreePtr tree);
- void genBashLclVar (GenTreePtr tree,
- unsigned varNum,
- LclVarDsc* varDsc);
+ void genBashLclVar(GenTreePtr tree, unsigned varNum, LclVarDsc* varDsc);
#endif // LEGACY_BACKEND
public:
- unsigned InferStructOpSizeAlign (GenTreePtr op,
- unsigned * alignmentWB);
- unsigned InferOpSizeAlign (GenTreePtr op,
- unsigned * alignmentWB);
+ unsigned InferStructOpSizeAlign(GenTreePtr op, unsigned* alignmentWB);
+ unsigned InferOpSizeAlign(GenTreePtr op, unsigned* alignmentWB);
- void genMarkTreeInReg (GenTreePtr tree, regNumber reg);
+ void genMarkTreeInReg(GenTreePtr tree, regNumber reg);
#if CPU_LONG_USES_REGPAIR
- void genMarkTreeInRegPair (GenTreePtr tree, regPairNo regPair);
+ void genMarkTreeInRegPair(GenTreePtr tree, regPairNo regPair);
#endif
// Methods to abstract target information
- bool validImmForInstr (instruction ins, ssize_t val, insFlags flags = INS_FLAGS_DONT_CARE);
- bool validDispForLdSt (ssize_t disp, var_types type);
- bool validImmForAdd (ssize_t imm, insFlags flags);
- bool validImmForAlu (ssize_t imm);
- bool validImmForMov (ssize_t imm);
- bool validImmForBL (ssize_t addr);
+ bool validImmForInstr(instruction ins, ssize_t val, insFlags flags = INS_FLAGS_DONT_CARE);
+ bool validDispForLdSt(ssize_t disp, var_types type);
+ bool validImmForAdd(ssize_t imm, insFlags flags);
+ bool validImmForAlu(ssize_t imm);
+ bool validImmForMov(ssize_t imm);
+ bool validImmForBL(ssize_t addr);
- instruction ins_Load (var_types srcType, bool aligned = false);
- instruction ins_Store (var_types dstType, bool aligned = false);
- static instruction ins_FloatLoad (var_types type=TYP_DOUBLE);
+ instruction ins_Load(var_types srcType, bool aligned = false);
+ instruction ins_Store(var_types dstType, bool aligned = false);
+ static instruction ins_FloatLoad(var_types type = TYP_DOUBLE);
// Methods for spilling - used by RegSet
- void spillReg (var_types type, TempDsc* tmp, regNumber reg);
- void reloadReg (var_types type, TempDsc* tmp, regNumber reg);
- void reloadFloatReg (var_types type, TempDsc* tmp, regNumber reg);
+ void spillReg(var_types type, TempDsc* tmp, regNumber reg);
+ void reloadReg(var_types type, TempDsc* tmp, regNumber reg);
+ void reloadFloatReg(var_types type, TempDsc* tmp, regNumber reg);
#ifdef LEGACY_BACKEND
- void SpillFloat (regNumber reg, bool bIsCall = false);
+ void SpillFloat(regNumber reg, bool bIsCall = false);
#endif // LEGACY_BACKEND
// The following method is used by xarch emitter for handling contained tree temps.
- TempDsc* getSpillTempDsc(GenTree* tree);
+ TempDsc* getSpillTempDsc(GenTree* tree);
public:
- emitter* getEmitter() { return m_cgEmitter; }
+ emitter* getEmitter()
+ {
+ return m_cgEmitter;
+ }
+
protected:
- emitter* m_cgEmitter;
+ emitter* m_cgEmitter;
#ifdef LATE_DISASM
public:
- DisAssembler& getDisAssembler() { return m_cgDisAsm; }
+ DisAssembler& getDisAssembler()
+ {
+ return m_cgDisAsm;
+ }
+
protected:
- DisAssembler m_cgDisAsm;
+ DisAssembler m_cgDisAsm;
#endif // LATE_DISASM
public:
-
#ifdef DEBUG
- void setVerbose(bool value) { verbose = value; }
- bool verbose;
+ void setVerbose(bool value)
+ {
+ verbose = value;
+ }
+ bool verbose;
#ifdef LEGACY_BACKEND
// Stress mode
- int genStressFloat ();
- regMaskTP genStressLockedMaskFloat ();
+ int genStressFloat();
+ regMaskTP genStressLockedMaskFloat();
#endif // LEGACY_BACKEND
#endif // DEBUG
-
-
-
// The following is set to true if we've determined that the current method
// is to be fully interruptible.
//
public:
- __declspec(property(get = getInterruptible, put=setInterruptible)) bool genInterruptible;
- bool getInterruptible() { return m_cgInterruptible; }
- void setInterruptible(bool value) { m_cgInterruptible = value; }
+ __declspec(property(get = getInterruptible, put = setInterruptible)) bool genInterruptible;
+ bool getInterruptible()
+ {
+ return m_cgInterruptible;
+ }
+ void setInterruptible(bool value)
+ {
+ m_cgInterruptible = value;
+ }
+
private:
- bool m_cgInterruptible;
+ bool m_cgInterruptible;
// The following will be set to true if we've determined that we need to
// generate a full-blown pointer register map for the current method.
@@ -360,33 +409,30 @@ private:
// for fully interruptible methods)
//
public:
- __declspec(property(get = doFullPtrRegMap, put=setFullPtrRegMap)) bool genFullPtrRegMap;
- bool doFullPtrRegMap() { return m_cgFullPtrRegMap; }
- void setFullPtrRegMap(bool value) { m_cgFullPtrRegMap = value; }
+ __declspec(property(get = doFullPtrRegMap, put = setFullPtrRegMap)) bool genFullPtrRegMap;
+ bool doFullPtrRegMap()
+ {
+ return m_cgFullPtrRegMap;
+ }
+ void setFullPtrRegMap(bool value)
+ {
+ m_cgFullPtrRegMap = value;
+ }
+
private:
- bool m_cgFullPtrRegMap;
+ bool m_cgFullPtrRegMap;
#ifdef DEBUGGING_SUPPORT
public:
- virtual void siUpdate () = 0;
+ virtual void siUpdate() = 0;
#endif // DEBUGGING_SUPPORT
#ifdef LATE_DISASM
public:
+ virtual const char* siRegVarName(size_t offs, size_t size, unsigned reg) = 0;
- virtual
- const char* siRegVarName (size_t offs,
- size_t size,
- unsigned reg) = 0;
-
- virtual
- const char* siStackVarName (size_t offs,
- size_t size,
- unsigned reg,
- unsigned stkOffs) = 0;
+ virtual const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs) = 0;
#endif // LATE_DISASM
-
};
-
#endif // _CODEGEN_INTERFACE_H_
diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
index 78edd0cc6a..feb4540907 100644
--- a/src/jit/codegenlegacy.cpp
+++ b/src/jit/codegenlegacy.cpp
@@ -33,7 +33,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "gcinfoencoder.h"
#endif
-
/*****************************************************************************
*
* Determine what variables die between beforeSet and afterSet, and
@@ -41,13 +40,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* compiler->compCurLife, gcInfo.gcVarPtrSetCur, regSet.rsMaskVars, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur
*/
-void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet,
- VARSET_VALARG_TP afterSet)
+void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet, VARSET_VALARG_TP afterSet)
{
- unsigned varNum;
- LclVarDsc * varDsc;
- regMaskTP regBit;
- VARSET_TP VARSET_INIT_NOCOPY(deadSet, VarSetOps::Diff(compiler, beforeSet, afterSet));
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ regMaskTP regBit;
+ VARSET_TP VARSET_INIT_NOCOPY(deadSet, VarSetOps::Diff(compiler, beforeSet, afterSet));
if (VarSetOps::IsEmpty(compiler, deadSet))
return;
@@ -75,7 +73,7 @@ void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet,
if (!varDsc->lvRegister)
{
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u,T%02u is a dyingVar\n", varNum, varDsc->lvVarIndex);
@@ -85,7 +83,7 @@ void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet,
}
#if !FEATURE_FP_REGALLOC
- // We don't do FP-enreg of vars whose liveness changes in GTF_COLON_COND
+ // We don't do FP-enreg of vars whose liveness changes in GTF_COLON_COND
if (!varDsc->IsFloatRegType())
#endif
{
@@ -98,17 +96,18 @@ void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet,
else
{
regBit = genRegMask(varDsc->lvRegNum);
- if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
+ if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
regBit |= genRegMask(varDsc->lvOtherReg);
}
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
- printf("\t\t\t\t\t\t\tV%02u,T%02u in reg %s is a dyingVar\n", varNum, varDsc->lvVarIndex, compiler->compRegVarName(varDsc->lvRegNum));
+ printf("\t\t\t\t\t\t\tV%02u,T%02u in reg %s is a dyingVar\n", varNum, varDsc->lvVarIndex,
+ compiler->compRegVarName(varDsc->lvRegNum));
}
#endif
- noway_assert((regSet.rsMaskVars & regBit) != 0);
+ noway_assert((regSet.rsMaskVars & regBit) != 0);
regSet.RemoveMaskVars(regBit);
@@ -125,17 +124,16 @@ void CodeGen::genDyingVars(VARSET_VALARG_TP beforeSet,
* Change the given enregistered local variable node to a register variable node
*/
-void CodeGenInterface::genBashLclVar(GenTreePtr tree, unsigned varNum,
- LclVarDsc* varDsc)
+void CodeGenInterface::genBashLclVar(GenTreePtr tree, unsigned varNum, LclVarDsc* varDsc)
{
noway_assert(tree->gtOper == GT_LCL_VAR);
noway_assert(varDsc->lvRegister);
- if (isRegPairType(varDsc->lvType))
+ if (isRegPairType(varDsc->lvType))
{
/* Check for the case of a variable that was narrowed to an int */
- if (isRegPairType(tree->gtType))
+ if (isRegPairType(tree->gtType))
{
genMarkTreeInRegPair(tree, gen2regs2pair(varDsc->lvRegNum, varDsc->lvOtherReg));
return;
@@ -153,20 +151,19 @@ void CodeGenInterface::genBashLclVar(GenTreePtr tree, unsigned
unsigned livenessFlags = (tree->gtFlags & GTF_LIVENESS_MASK);
- ValueNumPair vnp = tree->gtVNPair; // Save the ValueNumPair
+ ValueNumPair vnp = tree->gtVNPair; // Save the ValueNumPair
tree->SetOper(GT_REG_VAR);
- tree->gtVNPair = vnp; // Preserve the ValueNumPair, as SetOper will clear it.
+ tree->gtVNPair = vnp; // Preserve the ValueNumPair, as SetOper will clear it.
- tree->gtFlags |= livenessFlags;
- tree->gtFlags |= GTF_REG_VAL;
- tree->gtRegNum = varDsc->lvRegNum;
- tree->gtRegVar.gtRegNum = varDsc->lvRegNum;
+ tree->gtFlags |= livenessFlags;
+ tree->gtFlags |= GTF_REG_VAL;
+ tree->gtRegNum = varDsc->lvRegNum;
+ tree->gtRegVar.gtRegNum = varDsc->lvRegNum;
tree->gtRegVar.SetLclNum(varNum);
-
}
// inline
-void CodeGen::saveLiveness(genLivenessSet * ls)
+void CodeGen::saveLiveness(genLivenessSet* ls)
{
VarSetOps::Assign(compiler, ls->liveSet, compiler->compCurLife);
VarSetOps::Assign(compiler, ls->varPtrSet, gcInfo.gcVarPtrSetCur);
@@ -176,7 +173,7 @@ void CodeGen::saveLiveness(genLivenessSet * ls)
}
// inline
-void CodeGen::restoreLiveness(genLivenessSet * ls)
+void CodeGen::restoreLiveness(genLivenessSet* ls)
{
VarSetOps::Assign(compiler, compiler->compCurLife, ls->liveSet);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, ls->varPtrSet);
@@ -186,20 +183,20 @@ void CodeGen::restoreLiveness(genLivenessSet * ls)
}
// inline
-void CodeGen::checkLiveness(genLivenessSet * ls)
+void CodeGen::checkLiveness(genLivenessSet* ls)
{
assert(VarSetOps::Equal(compiler, compiler->compCurLife, ls->liveSet));
assert(VarSetOps::Equal(compiler, gcInfo.gcVarPtrSetCur, ls->varPtrSet));
- assert(regSet.rsMaskVars == ls->maskVars);
+ assert(regSet.rsMaskVars == ls->maskVars);
assert(gcInfo.gcRegGCrefSetCur == ls->gcRefRegs);
assert(gcInfo.gcRegByrefSetCur == ls->byRefRegs);
}
// inline
-bool CodeGenInterface::genMarkLclVar(GenTreePtr tree)
+bool CodeGenInterface::genMarkLclVar(GenTreePtr tree)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
assert(tree->gtOper == GT_LCL_VAR);
@@ -209,7 +206,7 @@ bool CodeGenInterface::genMarkLclVar(GenTreePtr tree)
assert(varNum < compiler->lvaCount);
varDsc = compiler->lvaTable + varNum;
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
genBashLclVar(tree, varNum, varDsc);
return true;
@@ -221,66 +218,65 @@ bool CodeGenInterface::genMarkLclVar(GenTreePtr tree)
}
// inline
-GenTreePtr CodeGen::genGetAddrModeBase(GenTreePtr tree)
+GenTreePtr CodeGen::genGetAddrModeBase(GenTreePtr tree)
{
- bool rev;
- unsigned mul;
- unsigned cns;
- GenTreePtr adr;
- GenTreePtr idx;
-
- if (genCreateAddrMode(tree, // address
- 0, // mode
- false, // fold
- RBM_NONE, // reg mask
- &rev, // reverse ops
- &adr, // base addr
- &idx, // index val
+ bool rev;
+ unsigned mul;
+ unsigned cns;
+ GenTreePtr adr;
+ GenTreePtr idx;
+
+ if (genCreateAddrMode(tree, // address
+ 0, // mode
+ false, // fold
+ RBM_NONE, // reg mask
+ &rev, // reverse ops
+ &adr, // base addr
+ &idx, // index val
#if SCALED_ADDR_MODES
- &mul, // scaling
+ &mul, // scaling
#endif
- &cns, // displacement
- true)) // don't generate code
- return adr;
+ &cns, // displacement
+ true)) // don't generate code
+ return adr;
else
- return NULL;
+ return NULL;
}
// inline
-void CodeGen::genSinglePush()
+void CodeGen::genSinglePush()
{
genStackLevel += sizeof(void*);
}
// inline
-void CodeGen::genSinglePop()
+void CodeGen::genSinglePop()
{
genStackLevel -= sizeof(void*);
}
-
#if FEATURE_STACK_FP_X87
// inline
-void CodeGenInterface::genResetFPstkLevel(unsigned newValue /* = 0 */)
+void CodeGenInterface::genResetFPstkLevel(unsigned newValue /* = 0 */)
{
genFPstkLevel = newValue;
}
// inline
-unsigned CodeGenInterface::genGetFPstkLevel()
+unsigned CodeGenInterface::genGetFPstkLevel()
{
return genFPstkLevel;
}
// inline
-void CodeGenInterface::genIncrementFPstkLevel(unsigned inc /* = 1 */)
+void CodeGenInterface::genIncrementFPstkLevel(unsigned inc /* = 1 */)
{
noway_assert((inc == 0) || genFPstkLevel + inc > genFPstkLevel);
genFPstkLevel += inc;
}
// inline
-void CodeGenInterface::genDecrementFPstkLevel(unsigned dec /* = 1 */)
+void CodeGenInterface::genDecrementFPstkLevel(unsigned dec /* = 1 */)
{
noway_assert((dec == 0) || genFPstkLevel - dec < genFPstkLevel);
genFPstkLevel -= dec;
@@ -293,18 +289,15 @@ void CodeGenInterface::genDecrementFPstkLevel(unsigned dec /* = 1 */)
* Generate code that will set the given register to the integer constant.
*/
-void CodeGen::genSetRegToIcon(regNumber reg,
- ssize_t val,
- var_types type,
- insFlags flags)
+void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
{
- noway_assert(type != TYP_REF || val== NULL);
+ noway_assert(type != TYP_REF || val == NULL);
/* Does the reg already hold this constant? */
- if (!regTracker.rsIconIsInReg(val, reg))
+ if (!regTracker.rsIconIsInReg(val, reg))
{
- if (val == 0)
+ if (val == 0)
{
instGen_Set_Reg_To_Zero(emitActualTypeSize(type), reg, flags);
}
@@ -336,11 +329,7 @@ void CodeGen::genSetRegToIcon(regNumber reg,
if (delta == (signed char)delta)
{
/* use an lea instruction to set reg */
- getEmitter()->emitIns_R_AR (INS_lea,
- emitTypeSize(type),
- reg,
- srcReg,
- (int)delta);
+ getEmitter()->emitIns_R_AR(INS_lea, emitTypeSize(type), reg, srcReg, (int)delta);
constantLoaded = true;
}
#elif defined(_TARGET_ARM_)
@@ -348,8 +337,10 @@ void CodeGen::genSetRegToIcon(regNumber reg,
That is, the value we need is 'regS + delta'.
We one to generate one of the following instructions, listed in order of preference:
- adds regD, delta ; 2 bytes. if regD == regS, regD is a low register, and 0<=delta<=255
- subs regD, delta ; 2 bytes. if regD == regS, regD is a low register, and -255<=delta<=0
+ adds regD, delta ; 2 bytes. if regD == regS, regD is a low register, and
+ 0<=delta<=255
+ subs regD, delta ; 2 bytes. if regD == regS, regD is a low register, and
+ -255<=delta<=0
adds regD, regS, delta ; 2 bytes. if regD and regS are low registers and 0<=delta<=7
subs regD, regS, delta ; 2 bytes. if regD and regS are low registers and -7<=delta<=0
mov regD, icon ; 4 bytes. icon is a wacky Thumb 12-bit immediate.
@@ -365,15 +356,17 @@ void CodeGen::genSetRegToIcon(regNumber reg,
0x80000000. In this case, delta will be 1.
*/
- bool useAdd = false;
+ bool useAdd = false;
regMaskTP regMask = genRegMask(reg);
regMaskTP srcRegMask = genRegMask(srcReg);
- if ((flags != INS_FLAGS_NOT_SET) && (reg == srcReg) && (regMask & RBM_LOW_REGS) && (unsigned_abs(delta) <= 255))
+ if ((flags != INS_FLAGS_NOT_SET) && (reg == srcReg) && (regMask & RBM_LOW_REGS) &&
+ (unsigned_abs(delta) <= 255))
{
useAdd = true;
}
- else if ((flags != INS_FLAGS_NOT_SET) && (regMask & RBM_LOW_REGS) && (srcRegMask & RBM_LOW_REGS) && (unsigned_abs(delta) <= 7))
+ else if ((flags != INS_FLAGS_NOT_SET) && (regMask & RBM_LOW_REGS) && (srcRegMask & RBM_LOW_REGS) &&
+ (unsigned_abs(delta) <= 7))
{
useAdd = true;
}
@@ -388,12 +381,7 @@ void CodeGen::genSetRegToIcon(regNumber reg,
if (useAdd)
{
- getEmitter()->emitIns_R_R_I (INS_add,
- EA_4BYTE,
- reg,
- srcReg,
- delta,
- flags);
+ getEmitter()->emitIns_R_R_I(INS_add, EA_4BYTE, reg, srcReg, delta, flags);
constantLoaded = true;
}
#else
@@ -411,10 +399,9 @@ void CodeGen::genSetRegToIcon(regNumber reg,
inst_RV_IV(INS_OR, reg, val, emitActualTypeSize(type));
}
else
- /* For SMALL_CODE it is smaller to push a small immediate and
- then pop it into the dest register */
- if ((compiler->compCodeOpt() == Compiler::SMALL_CODE) &&
- val == (signed char)val)
+ /* For SMALL_CODE it is smaller to push a small immediate and
+ then pop it into the dest register */
+ if ((compiler->compCodeOpt() == Compiler::SMALL_CODE) && val == (signed char)val)
{
/* "mov" has no s(sign)-bit and so always takes 6 bytes,
whereas push+pop takes 2+1 bytes */
@@ -426,7 +413,7 @@ void CodeGen::genSetRegToIcon(regNumber reg,
genSinglePop();
}
else
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_
{
instGen_Set_Reg_To_Imm(emitActualTypeSize(type), reg, val, flags);
}
@@ -449,9 +436,7 @@ void CodeGen::genSetRegToIcon(regNumber reg,
*
*/
-regNumber CodeGen::genGetRegSetToIcon(ssize_t val,
- regMaskTP regBest /* = 0 */,
- var_types type /* = TYP_INT */)
+regNumber CodeGen::genGetRegSetToIcon(ssize_t val, regMaskTP regBest /* = 0 */, var_types type /* = TYP_INT */)
{
regNumber regCns;
#if REDUNDANT_LOAD
@@ -459,7 +444,7 @@ regNumber CodeGen::genGetRegSetToIcon(ssize_t val,
// Is there already a register with zero that we can use?
regCns = regTracker.rsIconIsInReg(val);
- if (regCns == REG_NA)
+ if (regCns == REG_NA)
#endif
{
// If not, grab a register to hold the constant, preferring
@@ -474,8 +459,6 @@ regNumber CodeGen::genGetRegSetToIcon(ssize_t val,
return regCns;
}
-
-
/*****************************************************************************/
/*****************************************************************************
*
@@ -483,50 +466,46 @@ regNumber CodeGen::genGetRegSetToIcon(ssize_t val,
* 'tree' is the resulting tree
*/
-void CodeGen::genIncRegBy(regNumber reg,
- ssize_t ival,
- GenTreePtr tree,
- var_types dstType,
- bool ovfl)
+void CodeGen::genIncRegBy(regNumber reg, ssize_t ival, GenTreePtr tree, var_types dstType, bool ovfl)
{
- bool setFlags = (tree!=NULL) && tree->gtSetFlags();
+ bool setFlags = (tree != NULL) && tree->gtSetFlags();
#ifdef _TARGET_XARCH_
/* First check to see if we can generate inc or dec instruction(s) */
/* But avoid inc/dec on P4 in general for fast code or inside loops for blended code */
if (!ovfl && !compiler->optAvoidIncDec(compiler->compCurBB->getBBWeight(compiler)))
{
- emitAttr size = emitTypeSize(dstType);
+ emitAttr size = emitTypeSize(dstType);
switch (ival)
{
- case 2:
- inst_RV(INS_inc, reg, dstType, size);
- __fallthrough;
- case 1:
- inst_RV(INS_inc, reg, dstType, size);
+ case 2:
+ inst_RV(INS_inc, reg, dstType, size);
+ __fallthrough;
+ case 1:
+ inst_RV(INS_inc, reg, dstType, size);
- goto UPDATE_LIVENESS;
+ goto UPDATE_LIVENESS;
- case -2:
- inst_RV(INS_dec, reg, dstType, size);
- __fallthrough;
- case -1:
- inst_RV(INS_dec, reg, dstType, size);
+ case -2:
+ inst_RV(INS_dec, reg, dstType, size);
+ __fallthrough;
+ case -1:
+ inst_RV(INS_dec, reg, dstType, size);
- goto UPDATE_LIVENESS;
+ goto UPDATE_LIVENESS;
}
}
#endif
- insFlags flags = setFlags ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ insFlags flags = setFlags ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
inst_RV_IV(INS_add, reg, ival, emitActualTypeSize(dstType), flags);
#ifdef _TARGET_XARCH_
UPDATE_LIVENESS:
#endif
- if (setFlags)
+ if (setFlags)
genFlagsEqualToReg(tree, reg);
regTracker.rsTrackRegTrash(reg);
@@ -544,7 +523,6 @@ UPDATE_LIVENESS:
}
}
-
/*****************************************************************************
*
* Subtract the given constant from the specified register.
@@ -555,11 +533,10 @@ UPDATE_LIVENESS:
* 'tree' is the resulting tree.
*/
-void CodeGen::genDecRegBy(regNumber reg,
- ssize_t ival,
- GenTreePtr tree)
+void CodeGen::genDecRegBy(regNumber reg, ssize_t ival, GenTreePtr tree)
{
- noway_assert((tree->gtFlags & GTF_OVERFLOW) && ((tree->gtFlags & GTF_UNSIGNED) || ival == ((tree->gtType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN)));
+ noway_assert((tree->gtFlags & GTF_OVERFLOW) &&
+ ((tree->gtFlags & GTF_UNSIGNED) || ival == ((tree->gtType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN)));
noway_assert(tree->gtType == TYP_INT || tree->gtType == TYP_I_IMPL);
regTracker.rsTrackRegTrash(reg);
@@ -567,7 +544,7 @@ void CodeGen::genDecRegBy(regNumber reg,
noway_assert(!varTypeIsGC(tree->TypeGet()));
gcInfo.gcMarkRegSetNpt(genRegMask(reg));
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
inst_RV_IV(INS_sub, reg, ival, emitActualTypeSize(tree->TypeGet()), flags);
if (tree->gtSetFlags())
@@ -585,11 +562,7 @@ void CodeGen::genDecRegBy(regNumber reg,
* 'tree' is the resulting tree
*/
-void CodeGen::genMulRegBy(regNumber reg,
- ssize_t ival,
- GenTreePtr tree,
- var_types dstType,
- bool ovfl)
+void CodeGen::genMulRegBy(regNumber reg, ssize_t ival, GenTreePtr tree, var_types dstType, bool ovfl)
{
noway_assert(genActualType(dstType) == TYP_INT || genActualType(dstType) == TYP_I_IMPL);
@@ -600,10 +573,10 @@ void CodeGen::genMulRegBy(regNumber reg,
genMarkTreeInReg(tree, reg);
}
- bool use_shift = false;
- unsigned shift_by = 0;
+ bool use_shift = false;
+ unsigned shift_by = 0;
- if ((dstType >= TYP_INT) && !ovfl && (ival > 0) && ((ival & (ival-1)) == 0))
+ if ((dstType >= TYP_INT) && !ovfl && (ival > 0) && ((ival & (ival - 1)) == 0))
{
use_shift = true;
BitScanForwardPtr((ULONG*)&shift_by, (ULONG)ival);
@@ -613,15 +586,15 @@ void CodeGen::genMulRegBy(regNumber reg,
{
if (shift_by != 0)
{
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, emitTypeSize(dstType), reg, shift_by, flags);
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
}
}
else
{
- instruction ins;
+ instruction ins;
#ifdef _TARGET_XARCH_
ins = getEmitter()->inst3opImulForReg(reg);
#else
@@ -636,7 +609,7 @@ void CodeGen::genMulRegBy(regNumber reg,
/*****************************************************************************/
/*****************************************************************************
*
- * Compute the value 'tree' into a register that's in 'needReg'
+ * Compute the value 'tree' into a register that's in 'needReg'
* (or any free register if 'needReg' is RBM_NONE).
*
* Note that 'needReg' is just a recommendation unless mustReg==RegSet::EXACT_REG.
@@ -645,37 +618,26 @@ void CodeGen::genMulRegBy(regNumber reg,
* If you require that the register returned is trashable, pass true for 'freeOnly'.
*/
-void CodeGen::genComputeReg(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::ExactReg mustReg,
- RegSet::KeepReg keepReg,
- bool freeOnly)
+void CodeGen::genComputeReg(
+ GenTreePtr tree, regMaskTP needReg, RegSet::ExactReg mustReg, RegSet::KeepReg keepReg, bool freeOnly)
{
noway_assert(tree->gtType != TYP_VOID);
-
- regNumber reg;
- regNumber rg2;
+
+ regNumber reg;
+ regNumber rg2;
#if FEATURE_STACK_FP_X87
- noway_assert(genActualType(tree->gtType) == TYP_INT ||
- genActualType(tree->gtType) == TYP_I_IMPL ||
- genActualType(tree->gtType) == TYP_REF ||
- tree->gtType == TYP_BYREF);
+ noway_assert(genActualType(tree->gtType) == TYP_INT || genActualType(tree->gtType) == TYP_I_IMPL ||
+ genActualType(tree->gtType) == TYP_REF || tree->gtType == TYP_BYREF);
#elif defined(_TARGET_ARM_)
- noway_assert(genActualType(tree->gtType) == TYP_INT ||
- genActualType(tree->gtType) == TYP_I_IMPL ||
- genActualType(tree->gtType) == TYP_REF ||
- tree->gtType == TYP_BYREF ||
- genActualType(tree->gtType) == TYP_FLOAT ||
- genActualType(tree->gtType) == TYP_DOUBLE ||
+ noway_assert(genActualType(tree->gtType) == TYP_INT || genActualType(tree->gtType) == TYP_I_IMPL ||
+ genActualType(tree->gtType) == TYP_REF || tree->gtType == TYP_BYREF ||
+ genActualType(tree->gtType) == TYP_FLOAT || genActualType(tree->gtType) == TYP_DOUBLE ||
genActualType(tree->gtType) == TYP_STRUCT);
#else
- noway_assert(genActualType(tree->gtType) == TYP_INT ||
- genActualType(tree->gtType) == TYP_I_IMPL ||
- genActualType(tree->gtType) == TYP_REF ||
- tree->gtType == TYP_BYREF ||
- genActualType(tree->gtType) == TYP_FLOAT ||
- genActualType(tree->gtType) == TYP_DOUBLE);
+ noway_assert(genActualType(tree->gtType) == TYP_INT || genActualType(tree->gtType) == TYP_I_IMPL ||
+ genActualType(tree->gtType) == TYP_REF || tree->gtType == TYP_BYREF ||
+ genActualType(tree->gtType) == TYP_FLOAT || genActualType(tree->gtType) == TYP_DOUBLE);
#endif
/* Generate the value, hopefully into the right register */
@@ -696,7 +658,7 @@ void CodeGen::genComputeReg(GenTreePtr tree,
/* Did the value end up in an acceptable register? */
- if ((mustReg == RegSet::EXACT_REG) && needReg && !(genRegMask(reg) & needReg))
+ if ((mustReg == RegSet::EXACT_REG) && needReg && !(genRegMask(reg) & needReg))
{
/* Not good enough to satisfy the caller's orders */
@@ -714,12 +676,12 @@ void CodeGen::genComputeReg(GenTreePtr tree,
{
/* Do we have to end up with a free register? */
- if (!freeOnly)
+ if (!freeOnly)
goto REG_OK;
/* Did we luck out and the value got computed into an unused reg? */
- if (genRegMask(reg) & regSet.rsRegMaskFree())
+ if (genRegMask(reg) & regSet.rsRegMaskFree())
goto REG_OK;
/* Register already in use, so spill previous value */
@@ -738,11 +700,11 @@ void CodeGen::genComputeReg(GenTreePtr tree,
{
/* OK, let's find a trashable home for the value */
- regMaskTP rv1RegUsed;
+ regMaskTP rv1RegUsed;
- regSet.rsLockReg (genRegMask(reg), &rv1RegUsed);
+ regSet.rsLockReg(genRegMask(reg), &rv1RegUsed);
rg2 = regSet.rsPickReg(needReg);
- regSet.rsUnlockReg(genRegMask(reg), rv1RegUsed);
+ regSet.rsUnlockReg(genRegMask(reg), rv1RegUsed);
}
}
@@ -769,7 +731,7 @@ REG_OK:
/* Does the caller want us to mark the register as used? */
- if (keepReg == RegSet::KEEP_REG)
+ if (keepReg == RegSet::KEEP_REG)
{
/* In case we're computing a value into a register variable */
@@ -788,9 +750,7 @@ REG_OK:
*/
// inline
-void CodeGen::genCompIntoFreeReg(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg)
+void CodeGen::genCompIntoFreeReg(GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg)
{
genComputeReg(tree, needReg, RegSet::ANY_REG, keepReg, true);
}
@@ -801,9 +761,9 @@ void CodeGen::genCompIntoFreeReg(GenTreePtr tree,
* register (but also make sure the value is presently in a register).
*/
-void CodeGen::genReleaseReg(GenTreePtr tree)
+void CodeGen::genReleaseReg(GenTreePtr tree)
{
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
/* The register has been spilled -- reload it */
@@ -822,11 +782,9 @@ void CodeGen::genReleaseReg(GenTreePtr tree)
* where tree will be recovered to, so we disallow keepReg==RegSet::FREE_REG for GC type trees.
*/
-void CodeGen::genRecoverReg(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg)
+void CodeGen::genRecoverReg(GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg)
{
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
/* The register has been spilled -- reload it */
@@ -838,16 +796,16 @@ void CodeGen::genRecoverReg(GenTreePtr tree,
/* We need the tree in another register. So move it there */
noway_assert(tree->gtFlags & GTF_REG_VAL);
- regNumber oldReg = tree->gtRegNum;
+ regNumber oldReg = tree->gtRegNum;
/* Pick an acceptable register */
- regNumber reg = regSet.rsGrabReg(needReg);
+ regNumber reg = regSet.rsGrabReg(needReg);
/* Copy the value */
inst_RV_RV(INS_mov, reg, oldReg, tree->TypeGet());
- tree->gtRegNum = reg;
+ tree->gtRegNum = reg;
gcInfo.gcMarkRegPtrVal(tree);
regSet.rsMarkRegUsed(tree);
@@ -858,7 +816,7 @@ void CodeGen::genRecoverReg(GenTreePtr tree,
/* Free the register if the caller desired so */
- if (keepReg == RegSet::FREE_REG)
+ if (keepReg == RegSet::FREE_REG)
{
regSet.rsMarkRegFree(genRegMask(tree->gtRegNum));
// Can't use RegSet::FREE_REG on a GC type
@@ -870,19 +828,15 @@ void CodeGen::genRecoverReg(GenTreePtr tree,
}
}
-
/*****************************************************************************
*
* Move one half of a register pair to its new regPair(half).
*/
// inline
-void CodeGen::genMoveRegPairHalf(GenTreePtr tree,
- regNumber dst,
- regNumber src,
- int off)
+void CodeGen::genMoveRegPairHalf(GenTreePtr tree, regNumber dst, regNumber src, int off)
{
- if (src == REG_STK)
+ if (src == REG_STK)
{
// handle long to unsigned long overflow casts
while (tree->gtOper == GT_CAST)
@@ -913,16 +867,14 @@ void CodeGen::genMoveRegPairHalf(GenTreePtr tree,
* assume that the current register pair is marked as used and free it.
*/
-void CodeGen::genMoveRegPair(GenTreePtr tree,
- regMaskTP needReg,
- regPairNo newPair)
+void CodeGen::genMoveRegPair(GenTreePtr tree, regMaskTP needReg, regPairNo newPair)
{
- regPairNo oldPair;
+ regPairNo oldPair;
- regNumber oldLo;
- regNumber oldHi;
- regNumber newLo;
- regNumber newHi;
+ regNumber oldLo;
+ regNumber oldHi;
+ regNumber newLo;
+ regNumber newHi;
/* Either a target set or a specific pair may be requested */
@@ -930,42 +882,41 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
/* Get hold of the current pair */
- oldPair = tree->gtRegPair; noway_assert(oldPair != newPair);
+ oldPair = tree->gtRegPair;
+ noway_assert(oldPair != newPair);
/* Are we supposed to move to a specific pair? */
- if (newPair != REG_PAIR_NONE)
+ if (newPair != REG_PAIR_NONE)
{
- regMaskTP oldMask = genRegPairMask(oldPair);
- regMaskTP loMask = genRegMask(genRegPairLo(newPair));
- regMaskTP hiMask = genRegMask(genRegPairHi(newPair));
- regMaskTP overlap = oldMask & (loMask|hiMask);
+ regMaskTP oldMask = genRegPairMask(oldPair);
+ regMaskTP loMask = genRegMask(genRegPairLo(newPair));
+ regMaskTP hiMask = genRegMask(genRegPairHi(newPair));
+ regMaskTP overlap = oldMask & (loMask | hiMask);
/* First lock any registers that are in both pairs */
- noway_assert((regSet.rsMaskUsed & overlap) == overlap);
- noway_assert((regSet.rsMaskLock & overlap) == 0);
- regSet.rsMaskLock |= overlap;
+ noway_assert((regSet.rsMaskUsed & overlap) == overlap);
+ noway_assert((regSet.rsMaskLock & overlap) == 0);
+ regSet.rsMaskLock |= overlap;
/* Make sure any additional registers we need are free */
- if ((loMask & regSet.rsMaskUsed) != 0 &&
- (loMask & oldMask ) == 0)
+ if ((loMask & regSet.rsMaskUsed) != 0 && (loMask & oldMask) == 0)
{
regSet.rsGrabReg(loMask);
}
- if ((hiMask & regSet.rsMaskUsed) != 0 &&
- (hiMask & oldMask ) == 0)
+ if ((hiMask & regSet.rsMaskUsed) != 0 && (hiMask & oldMask) == 0)
{
regSet.rsGrabReg(hiMask);
}
/* Unlock those registers we have temporarily locked */
- noway_assert((regSet.rsMaskUsed & overlap) == overlap);
- noway_assert((regSet.rsMaskLock & overlap) == overlap);
- regSet.rsMaskLock -= overlap;
+ noway_assert((regSet.rsMaskUsed & overlap) == overlap);
+ noway_assert((regSet.rsMaskLock & overlap) == overlap);
+ regSet.rsMaskLock -= overlap;
/* We can now free the old pair */
@@ -979,18 +930,13 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
}
// If grabbed pair is the same as old one we're done
- if (newPair==oldPair)
+ if (newPair == oldPair)
{
- noway_assert(
- (oldLo = genRegPairLo(oldPair),
- oldHi = genRegPairHi(oldPair),
- newLo = genRegPairLo(newPair),
- newHi = genRegPairHi(newPair),
- newLo != REG_STK && newHi != REG_STK));
+ noway_assert((oldLo = genRegPairLo(oldPair), oldHi = genRegPairHi(oldPair), newLo = genRegPairLo(newPair),
+ newHi = genRegPairHi(newPair), newLo != REG_STK && newHi != REG_STK));
return;
}
-
/* Move the values from the old pair into the new one */
oldLo = genRegPairLo(oldPair);
@@ -1002,7 +948,7 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
/* Careful - the register pairs might overlap */
- if (newLo == oldLo)
+ if (newLo == oldLo)
{
/* The low registers are identical, just move the upper half */
@@ -1013,7 +959,7 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
{
/* The low registers are different, are the upper ones the same? */
- if (newHi == oldHi)
+ if (newHi == oldHi)
{
/* Just move the lower half, then */
genMoveRegPairHalf(tree, newLo, oldLo, 0);
@@ -1022,11 +968,11 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
{
/* Both sets are different - is there an overlap? */
- if (newLo == oldHi)
+ if (newLo == oldHi)
{
/* Are high and low simply swapped ? */
- if (newHi == oldLo)
+ if (newHi == oldLo)
{
#ifdef _TARGET_ARM_
/* Let's use XOR swap to reduce register pressure. */
@@ -1058,7 +1004,7 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
/* Record the fact that we're switching to another pair */
- tree->gtRegPair = newPair;
+ tree->gtRegPair = newPair;
}
/*****************************************************************************
@@ -1070,18 +1016,15 @@ void CodeGen::genMoveRegPair(GenTreePtr tree,
* value ends up in as being used.
*/
-void CodeGen::genComputeRegPair(GenTreePtr tree,
- regPairNo needRegPair,
- regMaskTP avoidReg,
- RegSet::KeepReg keepReg,
- bool freeOnly)
+void CodeGen::genComputeRegPair(
+ GenTreePtr tree, regPairNo needRegPair, regMaskTP avoidReg, RegSet::KeepReg keepReg, bool freeOnly)
{
- regMaskTP regMask;
- regPairNo regPair;
- regMaskTP tmpMask;
- regMaskTP tmpUsedMask;
- regNumber rLo;
- regNumber rHi;
+ regMaskTP regMask;
+ regPairNo regPair;
+ regMaskTP tmpMask;
+ regMaskTP tmpUsedMask;
+ regNumber rLo;
+ regNumber rHi;
noway_assert(isRegPairType(tree->gtType));
@@ -1115,8 +1058,8 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
regPair = tree->gtRegPair;
tmpMask = genRegPairMask(regPair);
- rLo = genRegPairLo(regPair);
- rHi = genRegPairHi(regPair);
+ rLo = genRegPairLo(regPair);
+ rHi = genRegPairHi(regPair);
/* At least one half is in a real register */
@@ -1124,9 +1067,9 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
/* Did the value end up in an acceptable register pair? */
- if (needRegPair != REG_PAIR_NONE)
+ if (needRegPair != REG_PAIR_NONE)
{
- if (needRegPair != regPair)
+ if (needRegPair != regPair)
{
/* This is a workaround. If we specify a regPair for genMoveRegPair */
/* it expects the source pair being marked as used */
@@ -1134,11 +1077,11 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
genMoveRegPair(tree, 0, needRegPair);
}
}
- else if (freeOnly)
+ else if (freeOnly)
{
/* Do we have to end up with a free register pair?
Something might have gotten freed up above */
- bool mustMoveReg=false;
+ bool mustMoveReg = false;
regMask = regSet.rsRegMaskFree() & ~avoidReg;
@@ -1161,7 +1104,7 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
/* Did the value end up in a free register pair? */
- if (mustMoveReg)
+ if (mustMoveReg)
{
/* We'll have to move the value to a free (trashable) pair */
genMoveRegPair(tree, regMask, REG_PAIR_NONE);
@@ -1173,8 +1116,8 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
noway_assert(!freeOnly);
/* it is possible to have tmpMask also in the regSet.rsMaskUsed */
- tmpUsedMask = tmpMask & regSet.rsMaskUsed;
- tmpMask &= ~regSet.rsMaskUsed;
+ tmpUsedMask = tmpMask & regSet.rsMaskUsed;
+ tmpMask &= ~regSet.rsMaskUsed;
/* Make sure that the value is in "real" registers*/
if (rLo == REG_STK)
@@ -1219,7 +1162,7 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
/* Does the caller want us to mark the register as used? */
- if (keepReg == RegSet::KEEP_REG)
+ if (keepReg == RegSet::KEEP_REG)
{
/* In case we're computing a value into a register variable */
@@ -1238,9 +1181,7 @@ void CodeGen::genComputeRegPair(GenTreePtr tree,
*/
// inline
-void CodeGen::genCompIntoFreeRegPair(GenTreePtr tree,
- regMaskTP avoidReg,
- RegSet::KeepReg keepReg)
+void CodeGen::genCompIntoFreeRegPair(GenTreePtr tree, regMaskTP avoidReg, RegSet::KeepReg keepReg)
{
genComputeRegPair(tree, REG_PAIR_NONE, avoidReg, keepReg, true);
}
@@ -1252,9 +1193,9 @@ void CodeGen::genCompIntoFreeRegPair(GenTreePtr tree,
* pair).
*/
-void CodeGen::genReleaseRegPair(GenTreePtr tree)
+void CodeGen::genReleaseRegPair(GenTreePtr tree)
{
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
/* The register has been spilled -- reload it */
@@ -1272,11 +1213,9 @@ void CodeGen::genReleaseRegPair(GenTreePtr tree)
* if 'keepReg' is 0, free the register pair.
*/
-void CodeGen::genRecoverRegPair(GenTreePtr tree,
- regPairNo regPair,
- RegSet::KeepReg keepReg)
+void CodeGen::genRecoverRegPair(GenTreePtr tree, regPairNo regPair, RegSet::KeepReg keepReg)
{
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
regMaskTP regMask;
@@ -1292,7 +1231,7 @@ void CodeGen::genRecoverRegPair(GenTreePtr tree,
/* Does the caller insist on the value being in a specific place? */
- if (regPair != REG_PAIR_NONE && regPair != tree->gtRegPair)
+ if (regPair != REG_PAIR_NONE && regPair != tree->gtRegPair)
{
/* No good -- we'll have to move the value to a new place */
@@ -1300,7 +1239,7 @@ void CodeGen::genRecoverRegPair(GenTreePtr tree,
/* Mark the pair as used if appropriate */
- if (keepReg == RegSet::KEEP_REG)
+ if (keepReg == RegSet::KEEP_REG)
regSet.rsMarkRegPairUsed(tree);
return;
@@ -1308,7 +1247,7 @@ void CodeGen::genRecoverRegPair(GenTreePtr tree,
/* Free the register pair if the caller desired so */
- if (keepReg == RegSet::FREE_REG)
+ if (keepReg == RegSet::FREE_REG)
regSet.rsMarkRegFree(genRegPairMask(tree->gtRegPair));
}
@@ -1319,7 +1258,7 @@ void CodeGen::genRecoverRegPair(GenTreePtr tree,
*/
// inline
-void CodeGen::genEvalIntoFreeRegPair(GenTreePtr tree, regPairNo regPair, regMaskTP avoidReg)
+void CodeGen::genEvalIntoFreeRegPair(GenTreePtr tree, regPairNo regPair, regMaskTP avoidReg)
{
genComputeRegPair(tree, regPair, avoidReg, RegSet::KEEP_REG);
genRecoverRegPair(tree, regPair, RegSet::FREE_REG);
@@ -1334,17 +1273,17 @@ void CodeGen::genEvalIntoFreeRegPair(GenTreePtr tree, regPairNo regPair,
*/
// inline
-void CodeGen::genMakeRegPairAvailable(regPairNo regPair)
+void CodeGen::genMakeRegPairAvailable(regPairNo regPair)
{
/* Make sure the target of the store is available */
- regNumber regLo = genRegPairLo(regPair);
- regNumber regHi = genRegPairHi(regPair);
+ regNumber regLo = genRegPairLo(regPair);
+ regNumber regHi = genRegPairHi(regPair);
- if ((regHi != REG_STK) && (regSet.rsMaskUsed & genRegMask(regHi)))
+ if ((regHi != REG_STK) && (regSet.rsMaskUsed & genRegMask(regHi)))
regSet.rsSpillReg(regHi);
- if ((regLo != REG_STK) && (regSet.rsMaskUsed & genRegMask(regLo)))
+ if ((regLo != REG_STK) && (regSet.rsMaskUsed & genRegMask(regLo)))
regSet.rsSpillReg(regLo);
}
@@ -1353,12 +1292,12 @@ void CodeGen::genMakeRegPairAvailable(regPairNo regPair)
*
* Return true if the given tree 'addr' can be computed via an addressing mode,
* such as "[ebx+esi*4+20]". If the expression isn't an address mode already
- * try to make it so (but we don't try 'too hard' to accomplish this).
+ * try to make it so (but we don't try 'too hard' to accomplish this).
*
* If we end up needing a register (or two registers) to hold some part(s) of the
* address, we return the use register mask via '*useMaskPtr'.
*
- * If keepReg==RegSet::KEEP_REG, the registers (viz. *useMaskPtr) will be marked as
+ * If keepReg==RegSet::KEEP_REG, the registers (viz. *useMaskPtr) will be marked as
* in use. The caller would then be responsible for calling
* regSet.rsMarkRegFree(*useMaskPtr).
*
@@ -1366,66 +1305,60 @@ void CodeGen::genMakeRegPairAvailable(regPairNo regPair)
* calling genDoneAddressable(addr, *useMaskPtr, RegSet::FREE_REG);
*/
-bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
- GenTreePtr oper,
- bool forLea,
- regMaskTP regMask,
- RegSet::KeepReg keepReg,
- regMaskTP * useMaskPtr,
- bool deferOK)
+bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
+ GenTreePtr oper,
+ bool forLea,
+ regMaskTP regMask,
+ RegSet::KeepReg keepReg,
+ regMaskTP* useMaskPtr,
+ bool deferOK)
{
if (addr->gtOper == GT_ARR_ELEM)
{
- regMaskTP regs = genMakeAddrArrElem(addr, oper, RBM_ALLINT, keepReg);
- *useMaskPtr = regs;
+ regMaskTP regs = genMakeAddrArrElem(addr, oper, RBM_ALLINT, keepReg);
+ *useMaskPtr = regs;
return true;
}
- bool rev;
- GenTreePtr rv1;
- GenTreePtr rv2;
- bool operIsArrIndex; // is oper an array index
- GenTreePtr scaledIndex; // If scaled addressing mode can't be used
+ bool rev;
+ GenTreePtr rv1;
+ GenTreePtr rv2;
+ bool operIsArrIndex; // is oper an array index
+ GenTreePtr scaledIndex; // If scaled addressing mode can't be used
+
+ regMaskTP anyMask = RBM_ALLINT;
- regMaskTP anyMask = RBM_ALLINT;
+ unsigned cns;
+ unsigned mul;
- unsigned cns;
- unsigned mul;
+ GenTreePtr tmp;
+ int ixv = INT_MAX; // unset value
- GenTreePtr tmp;
- int ixv = INT_MAX; // unset value
-
- GenTreePtr scaledIndexVal;
+ GenTreePtr scaledIndexVal;
- regMaskTP newLiveMask;
- regMaskTP rv1Mask;
- regMaskTP rv2Mask;
+ regMaskTP newLiveMask;
+ regMaskTP rv1Mask;
+ regMaskTP rv2Mask;
/* Deferred address mode forming NYI for x86 */
-
noway_assert(deferOK == false);
- noway_assert(oper == NULL
- || ((oper->OperIsIndir() || oper->OperIsAtomicOp())
- &&
- ((oper->gtOper == GT_CMPXCHG && oper->gtCmpXchg.gtOpLocation == addr)
- || oper->gtOp.gtOp1 == addr)));
+ noway_assert(oper == NULL ||
+ ((oper->OperIsIndir() || oper->OperIsAtomicOp()) &&
+ ((oper->gtOper == GT_CMPXCHG && oper->gtCmpXchg.gtOpLocation == addr) || oper->gtOp.gtOp1 == addr)));
operIsArrIndex = (oper != nullptr && oper->OperGet() == GT_IND && (oper->gtFlags & GTF_IND_ARR_INDEX) != 0);
if (addr->gtOper == GT_LEA)
{
- rev = (addr->gtFlags & GTF_REVERSE_OPS) != 0;
- GenTreeAddrMode * lea = addr->AsAddrMode();
- rv1 = lea->Base();
- rv2 = lea->Index();
- mul = lea->gtScale;
- cns = lea->gtOffset;
+ rev = (addr->gtFlags & GTF_REVERSE_OPS) != 0;
+ GenTreeAddrMode* lea = addr->AsAddrMode();
+ rv1 = lea->Base();
+ rv2 = lea->Index();
+ mul = lea->gtScale;
+ cns = lea->gtOffset;
- if (rv1 != NULL &&
- rv2 == NULL &&
- cns == 0 &&
- (rv1->gtFlags & GTF_REG_VAL) != 0)
+ if (rv1 != NULL && rv2 == NULL && cns == 0 && (rv1->gtFlags & GTF_REG_VAL) != 0)
{
scaledIndex = NULL;
goto YES;
@@ -1433,58 +1366,56 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
}
else
{
- // NOTE: FOR NOW THIS ISN'T APPROPRIATELY INDENTED - THIS IS TO MAKE IT
- // EASIER TO MERGE
+ // NOTE: FOR NOW THIS ISN'T APPROPRIATELY INDENTED - THIS IS TO MAKE IT
+ // EASIER TO MERGE
- /* Is the complete address already sitting in a register? */
+ /* Is the complete address already sitting in a register? */
- if ((addr->gtFlags & GTF_REG_VAL) ||
- (addr->gtOper == GT_LCL_VAR && genMarkLclVar(addr)))
- {
- genUpdateLife(addr);
+ if ((addr->gtFlags & GTF_REG_VAL) || (addr->gtOper == GT_LCL_VAR && genMarkLclVar(addr)))
+ {
+ genUpdateLife(addr);
- rv1 = addr;
- rv2 = scaledIndex = 0;
- cns = 0;
+ rv1 = addr;
+ rv2 = scaledIndex = 0;
+ cns = 0;
- goto YES;
- }
+ goto YES;
+ }
- /* Is it an absolute address */
+ /* Is it an absolute address */
- if (addr->IsCnsIntOrI())
- {
- rv1 = rv2 = scaledIndex = 0;
- // along this code path cns is never used, so place a BOGUS value in it as proof
- // cns = addr->gtIntCon.gtIconVal;
- cns = UINT_MAX;
+ if (addr->IsCnsIntOrI())
+ {
+ rv1 = rv2 = scaledIndex = 0;
+ // along this code path cns is never used, so place a BOGUS value in it as proof
+ // cns = addr->gtIntCon.gtIconVal;
+ cns = UINT_MAX;
- goto YES;
- }
+ goto YES;
+ }
- /* Is there a chance of forming an address mode? */
+ /* Is there a chance of forming an address mode? */
- if (!genCreateAddrMode(addr, forLea ? 1 : 0, false, regMask, &rev, &rv1, &rv2, &mul, &cns))
- {
- /* This better not be an array index */
- noway_assert(!operIsArrIndex);
+ if (!genCreateAddrMode(addr, forLea ? 1 : 0, false, regMask, &rev, &rv1, &rv2, &mul, &cns))
+ {
+ /* This better not be an array index */
+ noway_assert(!operIsArrIndex);
- return false;
- }
- // THIS IS THE END OF THE INAPPROPRIATELY INDENTED SECTION
+ return false;
+ }
+ // THIS IS THE END OF THE INAPPROPRIATELY INDENTED SECTION
}
- /* For scaled array access, RV2 may not be pointing to the index of the
- array if the CPU does not support the needed scaling factor. We will
- make it point to the actual index, and scaledIndex will point to
- the scaled value */
+ /* For scaled array access, RV2 may not be pointing to the index of the
+ array if the CPU does not support the needed scaling factor. We will
+ make it point to the actual index, and scaledIndex will point to
+ the scaled value */
- scaledIndex = NULL;
+ scaledIndex = NULL;
scaledIndexVal = NULL;
- if (operIsArrIndex && rv2 != NULL
- && (rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH)
- && rv2->gtOp.gtOp2->IsIntCnsFitsInI32())
+ if (operIsArrIndex && rv2 != NULL && (rv2->gtOper == GT_MUL || rv2->gtOper == GT_LSH) &&
+ rv2->gtOp.gtOp2->IsIntCnsFitsInI32())
{
scaledIndex = rv2;
compiler->optGetArrayRefScaleAndIndex(scaledIndex, &scaledIndexVal DEBUGARG(true));
@@ -1494,10 +1425,10 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Has the address already been computed? */
- if (addr->gtFlags & GTF_REG_VAL)
+ if (addr->gtFlags & GTF_REG_VAL)
{
- if (forLea)
- return true;
+ if (forLea)
+ return true;
rv1 = addr;
rv2 = NULL;
@@ -1529,7 +1460,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
// Trivial case : Is either rv1 or rv2 a NULL ?
- if (!rv2)
+ if (!rv2)
{
/* A single operand, make sure it's in a register */
@@ -1559,13 +1490,12 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
noway_assert(rv1 && rv2);
-
/* If we have to check a constant array index, compare it against
the array dimension (see below) but then fold the index with a
scaling factor (if any) and additional offset (if any).
*/
- if (rv2->gtOper == GT_CNS_INT || (scaledIndex != NULL && scaledIndexVal->gtOper == GT_CNS_INT))
+ if (rv2->gtOper == GT_CNS_INT || (scaledIndex != NULL && scaledIndexVal->gtOper == GT_CNS_INT))
{
if (scaledIndex != NULL)
{
@@ -1578,7 +1508,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Get hold of the index value and see if it's a constant */
- if (rv2->IsIntCnsFitsInI32())
+ if (rv2->IsIntCnsFitsInI32())
{
ixv = (int)rv2->gtIntCon.gtIconVal;
// Maybe I should just set "fold" true in the call to genMakeAddressable above.
@@ -1599,7 +1529,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Add the scaled index into the added value */
- if (mul)
+ if (mul)
cns += ixv * mul;
else
cns += ixv;
@@ -1612,11 +1542,11 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
}
}
- if (rv1->gtFlags & GTF_REG_VAL)
+ if (rv1->gtFlags & GTF_REG_VAL)
{
/* op1 already in register - how about op2? */
- if (rv2->gtFlags & GTF_REG_VAL)
+ if (rv2->gtFlags & GTF_REG_VAL)
{
/* Great - both operands are in registers already. Just update
the liveness and we are done. */
@@ -1671,7 +1601,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
{
// Free up rv2 in the right fashion (it might be re-marked if keepReg)
regSet.rsMarkRegUsed(rv1, oper);
- regSet.rsLockUsedReg (genRegMask(rv1->gtRegNum));
+ regSet.rsLockUsedReg(genRegMask(rv1->gtRegNum));
genReleaseReg(rv2);
regSet.rsUnlockUsedReg(genRegMask(rv1->gtRegNum));
genReleaseReg(rv1);
@@ -1687,19 +1617,19 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
goto DONE_REGS;
}
- if (forLea && !cns)
- return false;
+ if (forLea && !cns)
+ return false;
/* Make sure we preserve the correct operand order */
- if (rev)
+ if (rev)
{
/* Generate the second operand first */
// Determine what registers go live between rv2 and rv1
newLiveMask = genNewLiveRegMask(rv2, rv1);
- rv2Mask = regMask & ~newLiveMask;
+ rv2Mask = regMask & ~newLiveMask;
rv2Mask &= ~rv1->gtRsvdRegs;
if (rv2Mask == RBM_NONE)
@@ -1709,7 +1639,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
// so ignore the regMask hint, but try to avoid using
// the registers in newLiveMask and the rv1->gtRsvdRegs
//
- rv2Mask = RBM_ALLINT & ~newLiveMask;
+ rv2Mask = RBM_ALLINT & ~newLiveMask;
rv2Mask = regSet.rsMustExclude(rv2Mask, rv1->gtRsvdRegs);
}
@@ -1726,7 +1656,7 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Free up both operands in the right order (they might be
re-marked as used below)
*/
- regSet.rsLockUsedReg (genRegMask(rv1->gtRegNum));
+ regSet.rsLockUsedReg(genRegMask(rv1->gtRegNum));
genReleaseReg(rv2);
regSet.rsUnlockUsedReg(genRegMask(rv1->gtRegNum));
genReleaseReg(rv1);
@@ -1738,9 +1668,9 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
// Determine what registers go live between rv1 and rv2
newLiveMask = genNewLiveRegMask(rv1, rv2);
- rv1Mask = regMask & ~newLiveMask;
+ rv1Mask = regMask & ~newLiveMask;
rv1Mask &= ~rv2->gtRsvdRegs;
-
+
if (rv1Mask == RBM_NONE)
{
// The regMask hint cannot be honored
@@ -1784,35 +1714,37 @@ bool CodeGen::genMakeIndAddrMode(GenTreePtr addr,
/* Even though we have not explicitly marked rv2 as used,
rv2->gtRegNum may be used if rv2 is a multi-use or
an enregistered variable. */
- regMaskTP rv2Used;
- regSet.rsLockReg (genRegMask(rv2->gtRegNum), &rv2Used);
+ regMaskTP rv2Used;
+ regSet.rsLockReg(genRegMask(rv2->gtRegNum), &rv2Used);
/* Check for special case both rv1 and rv2 are the same register */
if (rv2Used != genRegMask(rv1->gtRegNum))
{
genReleaseReg(rv1);
- regSet.rsUnlockReg(genRegMask(rv2->gtRegNum), rv2Used);
+ regSet.rsUnlockReg(genRegMask(rv2->gtRegNum), rv2Used);
}
else
{
- regSet.rsUnlockReg(genRegMask(rv2->gtRegNum), rv2Used);
+ regSet.rsUnlockReg(genRegMask(rv2->gtRegNum), rv2Used);
genReleaseReg(rv1);
}
}
}
- /*-------------------------------------------------------------------------
- *
- * At this point, both rv1 and rv2 (if present) are in registers
- *
- */
+/*-------------------------------------------------------------------------
+ *
+ * At this point, both rv1 and rv2 (if present) are in registers
+ *
+ */
DONE_REGS:
/* We must verify that 'rv1' and 'rv2' are both sitting in registers */
- if (rv1 && !(rv1->gtFlags & GTF_REG_VAL)) return false;
- if (rv2 && !(rv2->gtFlags & GTF_REG_VAL)) return false;
+ if (rv1 && !(rv1->gtFlags & GTF_REG_VAL))
+ return false;
+ if (rv2 && !(rv2->gtFlags & GTF_REG_VAL))
+ return false;
YES:
@@ -1821,8 +1753,7 @@ YES:
// needs to know that it has to call rsFreeReg(reg1) twice. We can't do
// that currently as we return a single mask in useMaskPtr.
- if ((keepReg == RegSet::KEEP_REG) && oper && rv1 && rv2 &&
- (rv1->gtFlags & rv2->gtFlags & GTF_REG_VAL))
+ if ((keepReg == RegSet::KEEP_REG) && oper && rv1 && rv2 && (rv1->gtFlags & rv2->gtFlags & GTF_REG_VAL))
{
if (rv1->gtRegNum == rv2->gtRegNum)
{
@@ -1833,7 +1764,7 @@ YES:
/* Check either register operand to see if it needs to be saved */
- if (rv1)
+ if (rv1)
{
noway_assert(rv1->gtFlags & GTF_REG_VAL);
@@ -1849,7 +1780,7 @@ YES:
}
}
- if (rv2)
+ if (rv2)
{
noway_assert(rv2->gtFlags & GTF_REG_VAL);
@@ -1857,19 +1788,19 @@ YES:
regSet.rsMarkRegUsed(rv2, oper);
}
- if (deferOK)
+ if (deferOK)
{
noway_assert(!scaledIndex);
- return true;
+ return true;
}
/* Compute the set of registers the address depends on */
- regMaskTP useMask = RBM_NONE;
+ regMaskTP useMask = RBM_NONE;
if (rv1)
{
- if (rv1->gtFlags & GTF_SPILLED)
+ if (rv1->gtFlags & GTF_SPILLED)
regSet.rsUnspillReg(rv1, 0, RegSet::KEEP_REG);
noway_assert(rv1->gtFlags & GTF_REG_VAL);
@@ -1878,13 +1809,13 @@ YES:
if (rv2)
{
- if (rv2->gtFlags & GTF_SPILLED)
+ if (rv2->gtFlags & GTF_SPILLED)
{
if (rv1)
{
- regMaskTP lregMask = genRegMask(rv1->gtRegNum);
- regMaskTP used;
-
+ regMaskTP lregMask = genRegMask(rv1->gtRegNum);
+ regMaskTP used;
+
regSet.rsLockReg(lregMask, &used);
regSet.rsUnspillReg(rv2, 0, RegSet::KEEP_REG);
regSet.rsUnlockReg(lregMask, used);
@@ -1908,14 +1839,14 @@ YES:
* 'oper' is an array bounds check (a GT_ARR_BOUNDS_CHECK node).
*/
-void CodeGen::genRangeCheck(GenTreePtr oper)
+void CodeGen::genRangeCheck(GenTreePtr oper)
{
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
- GenTreePtr arrLen = bndsChk->gtArrLen;
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
+ GenTreePtr arrLen = bndsChk->gtArrLen;
+ GenTreePtr arrRef = NULL;
+ int lenOffset = 0;
// If "arrLen" is a ARR_LENGTH operation, get the array whose length that takes in a register.
// Otherwise, if the length is not a constant, get it (the length, not the arr reference) in
@@ -1924,7 +1855,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
if (arrLen->OperGet() == GT_ARR_LENGTH)
{
GenTreeArrLen* arrLenExact = arrLen->AsArrLen();
- lenOffset = arrLenExact->ArrLenOffset();
+ lenOffset = arrLenExact->ArrLenOffset();
#if !CPU_LOAD_STORE_ARCH && !defined(_TARGET_64BIT_)
// We always load the length into a register on ARM and x64.
@@ -1950,7 +1881,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
}
#endif
- // If we didn't find one of the special forms above, generate code to evaluate the array length to a register.
+ // If we didn't find one of the special forms above, generate code to evaluate the array length to a register.
if (arrRef == NULL)
{
// (Unless it's a constant.)
@@ -1966,7 +1897,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
/* Is the array index a constant value? */
GenTreePtr index = bndsChk->gtIndex;
- if (!index->IsCnsIntOrI())
+ if (!index->IsCnsIntOrI())
{
// No, it's not a constant.
genCodeForTree(index, RBM_ALLINT);
@@ -1983,12 +1914,14 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
noway_assert(index->gtFlags & GTF_REG_VAL);
noway_assert(regSet.rsMaskUsed & genRegMask(index->gtRegNum));
- noway_assert(index->TypeGet() == TYP_I_IMPL || (varTypeIsIntegral(index->TypeGet()) && !varTypeIsLong(index->TypeGet())));
+ noway_assert(index->TypeGet() == TYP_I_IMPL ||
+ (varTypeIsIntegral(index->TypeGet()) && !varTypeIsLong(index->TypeGet())));
var_types indxType = index->TypeGet();
- if (indxType != TYP_I_IMPL) indxType = TYP_INT;
+ if (indxType != TYP_I_IMPL)
+ indxType = TYP_INT;
if (arrRef != NULL)
- { // _TARGET_X86_ or X64 when we have a TYP_INT (32-bit) index expression in the index register
+ { // _TARGET_X86_ or X64 when we have a TYP_INT (32-bit) index expression in the index register
/* Generate "cmp index, [arrRef+LenOffs]" */
inst_RV_AT(INS_cmp, emitTypeSize(indxType), indxType, index->gtRegNum, arrRef, lenOffset);
@@ -2015,7 +1948,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
bool indIsInt = true;
#ifdef _TARGET_64BIT_
- int ixv = 0;
+ int ixv = 0;
ssize_t ixvFull = index->AsIntConCommon()->IconValue();
if (ixvFull > INT32_MAX)
{
@@ -2027,10 +1960,10 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
}
#else
ssize_t ixvFull = index->AsIntConCommon()->IconValue();
- int ixv = (int)ixvFull;
+ int ixv = (int)ixvFull;
#endif
if (arrRef != NULL && indIsInt)
- { // _TARGET_X86_ or X64 when we have a TYP_INT (32-bit) index expression in the index register
+ { // _TARGET_X86_ or X64 when we have a TYP_INT (32-bit) index expression in the index register
/* Generate "cmp [arrRef+LenOffs], ixv" */
inst_AT_IV(INS_cmp, EA_4BYTE, arrRef, ixv, lenOffset);
// Generate "jbe <fail_label>"
@@ -2052,7 +1985,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
}
else
{
- /* Generate "cmp arrLen, ixv" */
+ /* Generate "cmp arrLen, ixv" */
inst_RV_IV(INS_cmp, arrLen->gtRegNum, ixv, EA_4BYTE);
// Generate "jbe <fail_label>"
emitJumpKind jmpLEU = genJumpKindForOper(GT_LE, CK_UNSIGNED);
@@ -2070,7 +2003,7 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
regSet.rsMarkRegFree(arrLen->gtRegNum, arrLen);
}
- if (!index->IsCnsIntOrI())
+ if (!index->IsCnsIntOrI())
{
regSet.rsMarkRegFree(index->gtRegNum, index);
}
@@ -2085,11 +2018,8 @@ void CodeGen::genRangeCheck(GenTreePtr oper)
*/
// inline
-regMaskTP CodeGen::genMakeRvalueAddressable(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool forLoadStore,
- bool smallOK)
+regMaskTP CodeGen::genMakeRvalueAddressable(
+ GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg, bool forLoadStore, bool smallOK)
{
regNumber reg;
@@ -2114,10 +2044,9 @@ regMaskTP CodeGen::genMakeRvalueAddressable(GenTreePtr tree,
/*****************************************************************************/
-
-bool CodeGen::genIsLocalLastUse (GenTreePtr tree)
+bool CodeGen::genIsLocalLastUse(GenTreePtr tree)
{
- const LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
+ const LclVarDsc* varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
noway_assert(tree->OperGet() == GT_LCL_VAR);
noway_assert(varDsc->lvTracked);
@@ -2125,7 +2054,6 @@ bool CodeGen::genIsLocalLastUse (GenTreePtr tree)
return ((tree->gtFlags & GTF_VAR_DEATH) != 0);
}
-
/*****************************************************************************
*
* This is genMakeAddressable(GT_ARR_ELEM).
@@ -2139,10 +2067,7 @@ bool CodeGen::genIsLocalLastUse (GenTreePtr tree)
* where to look for the offset to use.
*/
-regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
- GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg)
+regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem, GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg)
{
noway_assert(arrElem->gtOper == GT_ARR_ELEM);
noway_assert(!tree || tree->gtOper == GT_IND || tree == arrElem);
@@ -2160,15 +2085,15 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
applies to all type of tree nodes except for GT_ARR_ELEM.
*/
- GenTreePtr arrObj = arrElem->gtArrElem.gtArrObj;
- unsigned rank = arrElem->gtArrElem.gtArrRank;
- var_types elemType = arrElem->gtArrElem.gtArrElemType;
- regMaskTP addrReg = RBM_NONE;
- regMaskTP regNeed = RBM_ALLINT;
+ GenTreePtr arrObj = arrElem->gtArrElem.gtArrObj;
+ unsigned rank = arrElem->gtArrElem.gtArrRank;
+ var_types elemType = arrElem->gtArrElem.gtArrElemType;
+ regMaskTP addrReg = RBM_NONE;
+ regMaskTP regNeed = RBM_ALLINT;
#if FEATURE_WRITE_BARRIER && !NOGC_WRITE_BARRIERS
// In CodeGen::WriteBarrier we set up ARG_1 followed by ARG_0
- // since the arrObj participates in the lea/add instruction
+ // since the arrObj participates in the lea/add instruction
// that computes ARG_0 we should avoid putting it in ARG_1
//
if (varTypeIsGC(elemType))
@@ -2188,9 +2113,7 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
// it can be collected from here on. This is not an issue for locals that are
// in a register, as they get marked as used an will be tracked.
// The bug that caused this is #100776. (untracked vars?)
- if (arrObj->OperGet() == GT_LCL_VAR &&
- compiler->optIsTrackedLocal(arrObj) &&
- genIsLocalLastUse(arrObj) &&
+ if (arrObj->OperGet() == GT_LCL_VAR && compiler->optIsTrackedLocal(arrObj) && genIsLocalLastUse(arrObj) &&
!genMarkLclVar(arrObj))
{
genCodeForTree(arrObj, regNeed);
@@ -2199,13 +2122,11 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
}
else
{
- addrReg = genMakeAddressable2(arrObj,
- regNeed,
- RegSet::KEEP_REG,
- true, // forLoadStore
- false, // smallOK
- false, // deferOK
- true); // evalSideEffs
+ addrReg = genMakeAddressable2(arrObj, regNeed, RegSet::KEEP_REG,
+ true, // forLoadStore
+ false, // smallOK
+ false, // deferOK
+ true); // evalSideEffs
}
unsigned dim;
@@ -2217,19 +2138,19 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
addrReg = genKeepAddressable(arrObj, addrReg);
genComputeAddressable(arrObj, addrReg, RegSet::KEEP_REG, regNeed, RegSet::KEEP_REG);
- regNumber arrReg = arrObj->gtRegNum;
- regMaskTP arrRegMask = genRegMask(arrReg);
- regMaskTP indRegMask = RBM_ALLINT & ~arrRegMask;
+ regNumber arrReg = arrObj->gtRegNum;
+ regMaskTP arrRegMask = genRegMask(arrReg);
+ regMaskTP indRegMask = RBM_ALLINT & ~arrRegMask;
regSet.rsLockUsedReg(arrRegMask);
/* Now process all the indices, do the range check, and compute
the offset of the element */
- regNumber accReg = DUMMY_INIT(REG_CORRUPT); // accumulates the offset calculation
+ regNumber accReg = DUMMY_INIT(REG_CORRUPT); // accumulates the offset calculation
for (dim = 0; dim < rank; dim++)
{
- GenTreePtr index = arrElem->gtArrElem.gtArrInds[dim];
+ GenTreePtr index = arrElem->gtArrElem.gtArrInds[dim];
/* Get the index into a free register (other than the register holding the array) */
@@ -2238,42 +2159,24 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
#if CPU_LOAD_STORE_ARCH
/* Subtract the lower bound, and do the range check */
- regNumber valueReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(arrReg) & ~genRegMask(index->gtRegNum));
- getEmitter()->emitIns_R_AR(
- INS_ldr, EA_4BYTE,
- valueReg,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * (dim + rank));
+ regNumber valueReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(arrReg) & ~genRegMask(index->gtRegNum));
+ getEmitter()->emitIns_R_AR(INS_ldr, EA_4BYTE, valueReg, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * (dim + rank));
regTracker.rsTrackRegTrash(valueReg);
- getEmitter()->emitIns_R_R(
- INS_sub, EA_4BYTE,
- index->gtRegNum,
- valueReg);
+ getEmitter()->emitIns_R_R(INS_sub, EA_4BYTE, index->gtRegNum, valueReg);
regTracker.rsTrackRegTrash(index->gtRegNum);
- getEmitter()->emitIns_R_AR(
- INS_ldr, EA_4BYTE,
- valueReg,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
- getEmitter()->emitIns_R_R(
- INS_cmp, EA_4BYTE,
- index->gtRegNum,
- valueReg);
+ getEmitter()->emitIns_R_AR(INS_ldr, EA_4BYTE, valueReg, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
+ getEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE, index->gtRegNum, valueReg);
#else
/* Subtract the lower bound, and do the range check */
- getEmitter()->emitIns_R_AR(
- INS_sub, EA_4BYTE,
- index->gtRegNum,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * (dim + rank));
+ getEmitter()->emitIns_R_AR(INS_sub, EA_4BYTE, index->gtRegNum, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * (dim + rank));
regTracker.rsTrackRegTrash(index->gtRegNum);
- getEmitter()->emitIns_R_AR(
- INS_cmp, EA_4BYTE,
- index->gtRegNum,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
+ getEmitter()->emitIns_R_AR(INS_cmp, EA_4BYTE, index->gtRegNum, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
#endif
emitJumpKind jmpGEU = genJumpKindForOper(GT_GE, CK_UNSIGNED);
genJumpToThrowHlpBlk(jmpGEU, SCK_RNGCHK_FAIL);
@@ -2293,22 +2196,13 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
noway_assert(accReg != DUMMY_INIT(REG_CORRUPT));
#if CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_AR(
- INS_ldr, EA_4BYTE,
- valueReg,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
+ getEmitter()->emitIns_R_AR(INS_ldr, EA_4BYTE, valueReg, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
regTracker.rsTrackRegTrash(valueReg);
- getEmitter()->emitIns_R_R(
- INS_MUL, EA_4BYTE,
- accReg,
- valueReg);
+ getEmitter()->emitIns_R_R(INS_MUL, EA_4BYTE, accReg, valueReg);
#else
- getEmitter()->emitIns_R_AR(
- INS_MUL, EA_4BYTE,
- accReg,
- arrReg,
- compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
+ getEmitter()->emitIns_R_AR(INS_MUL, EA_4BYTE, accReg, arrReg,
+ compiler->eeGetArrayDataOffset(elemType) + sizeof(int) * dim);
#endif
inst_RV_RV(INS_add, accReg, index->gtRegNum);
@@ -2336,7 +2230,7 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
/* We mark the addressability registers on arrObj and gtArrInds[0].
instGetAddrMode() knows to work with this. */
- regSet.rsMarkRegUsed(arrObj, tree);
+ regSet.rsMarkRegUsed(arrObj, tree);
regSet.rsMarkRegUsed(arrElem->gtArrElem.gtArrInds[0], tree);
}
@@ -2379,18 +2273,15 @@ regMaskTP CodeGen::genMakeAddrArrElem(GenTreePtr arrElem,
* to free the addressability registers.
*/
-regMaskTP CodeGen::genMakeAddressable(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool smallOK,
- bool deferOK)
+regMaskTP CodeGen::genMakeAddressable(
+ GenTreePtr tree, regMaskTP needReg, RegSet::KeepReg keepReg, bool smallOK, bool deferOK)
{
- GenTreePtr addr = NULL;
- regMaskTP regMask;
+ GenTreePtr addr = NULL;
+ regMaskTP regMask;
/* Is the value simply sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
genUpdateLife(tree);
@@ -2402,7 +2293,6 @@ regMaskTP CodeGen::genMakeAddressable(GenTreePtr tree,
// TODO: since stack temps are always addressable. This would require
// TODO: recording the fact that a particular tree is in a stack temp.
-
/* byte/char/short operand -- is this acceptable to the caller? */
if (varTypeIsSmall(tree->TypeGet()) && !smallOK)
@@ -2413,85 +2303,79 @@ regMaskTP CodeGen::genMakeAddressable(GenTreePtr tree,
switch (tree->gtOper)
{
- case GT_LCL_FLD:
-
- // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
- // to worry about it being enregistered.
- noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
-
- genUpdateLife(tree);
- return 0;
-
+ case GT_LCL_FLD:
- case GT_LCL_VAR:
+ // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
+ // to worry about it being enregistered.
+ noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
- if (!genMarkLclVar(tree))
- {
genUpdateLife(tree);
return 0;
- }
- __fallthrough; // it turns out the variable lives in a register
+ case GT_LCL_VAR:
- case GT_REG_VAR:
+ if (!genMarkLclVar(tree))
+ {
+ genUpdateLife(tree);
+ return 0;
+ }
- genUpdateLife(tree);
+ __fallthrough; // it turns out the variable lives in a register
- goto GOT_VAL;
+ case GT_REG_VAR:
+
+ genUpdateLife(tree);
- case GT_CLS_VAR:
+ goto GOT_VAL;
- return 0;
+ case GT_CLS_VAR:
- case GT_CNS_INT:
+ return 0;
+
+ case GT_CNS_INT:
#ifdef _TARGET_64BIT_
- // Non-relocs will be sign extended, so we don't have to enregister
- // constants that are equivalent to a sign-extended int.
- // Relocs can be left alone if they are RIP-relative.
- if ((genTypeSize(tree->TypeGet()) > 4) && (!tree->IsIntCnsFitsInI32() ||
- (tree->IsIconHandle() &&
- (IMAGE_REL_BASED_REL32 != compiler->eeGetRelocTypeHint((void*)tree->gtIntCon.gtIconVal)))))
- {
- break;
- }
+ // Non-relocs will be sign extended, so we don't have to enregister
+ // constants that are equivalent to a sign-extended int.
+ // Relocs can be left alone if they are RIP-relative.
+ if ((genTypeSize(tree->TypeGet()) > 4) &&
+ (!tree->IsIntCnsFitsInI32() ||
+ (tree->IsIconHandle() &&
+ (IMAGE_REL_BASED_REL32 != compiler->eeGetRelocTypeHint((void*)tree->gtIntCon.gtIconVal)))))
+ {
+ break;
+ }
#endif // _TARGET_64BIT_
- __fallthrough;
-
- case GT_CNS_LNG:
- case GT_CNS_DBL:
- // For MinOpts, we don't do constant folding, so we have
- // constants showing up in places we don't like.
- // force them into a register now to prevent that.
- if (compiler->opts.OptEnabled(CLFLG_CONSTANTFOLD))
- return 0;
- break;
+ __fallthrough;
+ case GT_CNS_LNG:
+ case GT_CNS_DBL:
+ // For MinOpts, we don't do constant folding, so we have
+ // constants showing up in places we don't like.
+ // force them into a register now to prevent that.
+ if (compiler->opts.OptEnabled(CLFLG_CONSTANTFOLD))
+ return 0;
+ break;
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
- /* Try to make the address directly addressable */
+ /* Try to make the address directly addressable */
- if (genMakeIndAddrMode(tree->gtOp.gtOp1,
- tree,
- false, /* not for LEA */
- needReg,
- keepReg,
- &regMask,
- deferOK))
- {
- genUpdateLife(tree);
- return regMask;
- }
+ if (genMakeIndAddrMode(tree->gtOp.gtOp1, tree, false, /* not for LEA */
+ needReg, keepReg, &regMask, deferOK))
+ {
+ genUpdateLife(tree);
+ return regMask;
+ }
- /* No good, we'll have to load the address into a register */
+ /* No good, we'll have to load the address into a register */
- addr = tree;
- tree = tree->gtOp.gtOp1;
- break;
+ addr = tree;
+ tree = tree->gtOp.gtOp1;
+ break;
- default:
- break;
+ default:
+ break;
}
EVAL_TREE:
@@ -2504,7 +2388,7 @@ GOT_VAL:
noway_assert(tree->gtFlags & GTF_REG_VAL);
- if (isRegPairType(tree->gtType))
+ if (isRegPairType(tree->gtType))
{
/* Are we supposed to hang on to the register? */
@@ -2523,7 +2407,7 @@ GOT_VAL:
regMask = genRegMask(tree->gtRegNum);
}
- return regMask;
+ return regMask;
}
/*****************************************************************************
@@ -2534,19 +2418,19 @@ GOT_VAL:
* freeOnly - target register needs to be a scratch register
*/
-void CodeGen::genComputeAddressable(GenTreePtr tree,
- regMaskTP addrReg,
- RegSet::KeepReg keptReg,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool freeOnly)
+void CodeGen::genComputeAddressable(GenTreePtr tree,
+ regMaskTP addrReg,
+ RegSet::KeepReg keptReg,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool freeOnly)
{
noway_assert(genStillAddressable(tree));
noway_assert(varTypeIsIntegralOrI(tree->TypeGet()));
genDoneAddressable(tree, addrReg, keptReg);
- regNumber reg;
+ regNumber reg;
if (tree->gtFlags & GTF_REG_VAL)
{
@@ -2588,13 +2472,13 @@ void CodeGen::genComputeAddressable(GenTreePtr tree,
* Should be similar to genMakeAddressable() but gives more control.
*/
-regMaskTP CodeGen::genMakeAddressable2(GenTreePtr tree,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool forLoadStore,
- bool smallOK,
- bool deferOK,
- bool evalSideEffs)
+regMaskTP CodeGen::genMakeAddressable2(GenTreePtr tree,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool forLoadStore,
+ bool smallOK,
+ bool deferOK,
+ bool evalSideEffs)
{
bool evalToReg = false;
@@ -2613,7 +2497,7 @@ regMaskTP CodeGen::genMakeAddressable2(GenTreePtr tree,
noway_assert(tree->gtFlags & GTF_REG_VAL);
- if (isRegPairType(tree->gtType))
+ if (isRegPairType(tree->gtType))
{
/* Are we supposed to hang on to the register? */
@@ -2645,14 +2529,14 @@ regMaskTP CodeGen::genMakeAddressable2(GenTreePtr tree,
*/
// inline
-bool CodeGen::genStillAddressable(GenTreePtr tree)
+bool CodeGen::genStillAddressable(GenTreePtr tree)
{
/* Has the value (or one or more of its sub-operands) been spilled? */
- if (tree->gtFlags & (GTF_SPILLED|GTF_SPILLED_OPER))
- return false;
+ if (tree->gtFlags & (GTF_SPILLED | GTF_SPILLED_OPER))
+ return false;
- return true;
+ return true;
}
/*****************************************************************************
@@ -2661,19 +2545,17 @@ bool CodeGen::genStillAddressable(GenTreePtr tree)
* argument indicates whether we're in the 'lock' or 'reload' phase.
*/
-regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr,
- GenTreePtr tree,
- bool lockPhase)
+regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr, GenTreePtr tree, bool lockPhase)
{
- regMaskTP regMask = RBM_NONE;
+ regMaskTP regMask = RBM_NONE;
/* Have we found a spilled value? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
/* Do nothing if we're locking, otherwise reload and lock */
- if (!lockPhase)
+ if (!lockPhase)
{
/* Unspill the register */
@@ -2693,33 +2575,33 @@ regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr,
regSet.rsMaskLock |= regMask;
}
- return regMask;
+ return regMask;
}
/* Is this sub-tree sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
regMask = genRegMask(tree->gtRegNum);
/* Lock the register if we're in the locking phase */
- if (lockPhase)
+ if (lockPhase)
regSet.rsMaskLock |= regMask;
}
else
{
/* Process any sub-operands of this node */
- unsigned kind = tree->OperKind();
+ unsigned kind = tree->OperKind();
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
/* Unary/binary operator */
- if (tree->gtOp.gtOp1)
+ if (tree->gtOp.gtOp1)
regMask |= genRestoreAddrMode(addr, tree->gtOp.gtOp1, lockPhase);
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
regMask |= genRestoreAddrMode(addr, tree->gtOp.gtOp2, lockPhase);
}
else if (tree->gtOper == GT_ARR_ELEM)
@@ -2727,7 +2609,7 @@ regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr,
/* gtArrObj is the array-object and gtArrInds[0] is marked with the register
which holds the offset-calculation */
- regMask |= genRestoreAddrMode(addr, tree->gtArrElem.gtArrObj, lockPhase);
+ regMask |= genRestoreAddrMode(addr, tree->gtArrElem.gtArrObj, lockPhase);
regMask |= genRestoreAddrMode(addr, tree->gtArrElem.gtArrInds[0], lockPhase);
}
else if (tree->gtOper == GT_CMPXCHG)
@@ -2738,11 +2620,11 @@ regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr,
{
/* Must be a leaf/constant node */
- noway_assert(kind & (GTK_LEAF|GTK_CONST));
+ noway_assert(kind & (GTK_LEAF | GTK_CONST));
}
}
- return regMask;
+ return regMask;
}
/*****************************************************************************
@@ -2753,19 +2635,17 @@ regMaskTP CodeGen::genRestoreAddrMode(GenTreePtr addr,
* registers).
*/
-regMaskTP CodeGen::genRestAddressable(GenTreePtr tree,
- regMaskTP addrReg,
- regMaskTP lockMask)
+regMaskTP CodeGen::genRestAddressable(GenTreePtr tree, regMaskTP addrReg, regMaskTP lockMask)
{
noway_assert((regSet.rsMaskLock & lockMask) == lockMask);
/* Is this a 'simple' register spill? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
/* The mask must match the original register/regpair */
- if (isRegPairType(tree->gtType))
+ if (isRegPairType(tree->gtType))
{
noway_assert(addrReg == genRegPairMask(tree->gtRegPair));
@@ -2782,15 +2662,15 @@ regMaskTP CodeGen::genRestAddressable(GenTreePtr tree,
addrReg = genRegMask(tree->gtRegNum);
}
- noway_assert((regSet.rsMaskLock & lockMask) == lockMask);
- regSet.rsMaskLock -= lockMask;
+ noway_assert((regSet.rsMaskLock & lockMask) == lockMask);
+ regSet.rsMaskLock -= lockMask;
- return addrReg;
+ return addrReg;
}
/* We have a complex address mode with some of its sub-operands spilled */
- noway_assert((tree->gtFlags & GTF_REG_VAL ) == 0);
+ noway_assert((tree->gtFlags & GTF_REG_VAL) == 0);
noway_assert((tree->gtFlags & GTF_SPILLED_OPER) != 0);
/*
@@ -2806,17 +2686,17 @@ regMaskTP CodeGen::genRestAddressable(GenTreePtr tree,
3. Unlock all the registers.
*/
- addrReg = genRestoreAddrMode(tree, tree, true);
- addrReg |= genRestoreAddrMode(tree, tree, false);
+ addrReg = genRestoreAddrMode(tree, tree, true);
+ addrReg |= genRestoreAddrMode(tree, tree, false);
/* Unlock all registers that the address mode uses */
lockMask |= addrReg;
- noway_assert((regSet.rsMaskLock & lockMask) == lockMask);
- regSet.rsMaskLock -= lockMask;
+ noway_assert((regSet.rsMaskLock & lockMask) == lockMask);
+ regSet.rsMaskLock -= lockMask;
- return addrReg;
+ return addrReg;
}
/*****************************************************************************
@@ -2829,15 +2709,13 @@ regMaskTP CodeGen::genRestAddressable(GenTreePtr tree,
* the address (these will be marked as used on exit).
*/
-regMaskTP CodeGen::genKeepAddressable(GenTreePtr tree,
- regMaskTP addrReg,
- regMaskTP avoidMask)
+regMaskTP CodeGen::genKeepAddressable(GenTreePtr tree, regMaskTP addrReg, regMaskTP avoidMask)
{
/* Is the operand still addressable? */
- tree = tree->gtEffectiveVal(/*commaOnly*/true); // Strip off commas for this purpose.
+ tree = tree->gtEffectiveVal(/*commaOnly*/ true); // Strip off commas for this purpose.
- if (!genStillAddressable(tree))
+ if (!genStillAddressable(tree))
{
if (avoidMask)
{
@@ -2851,10 +2729,10 @@ regMaskTP CodeGen::genKeepAddressable(GenTreePtr tree,
addrReg = genRestAddressable(tree, addrReg, avoidMask);
- noway_assert((regSet.rsMaskLock & avoidMask) == 0);
+ noway_assert((regSet.rsMaskLock & avoidMask) == 0);
}
- return addrReg;
+ return addrReg;
}
/*****************************************************************************
@@ -2866,9 +2744,7 @@ regMaskTP CodeGen::genKeepAddressable(GenTreePtr tree,
* by genMakeAddressable().
*/
-void CodeGen::genDoneAddressable(GenTreePtr tree,
- regMaskTP addrReg,
- RegSet::KeepReg keptReg)
+void CodeGen::genDoneAddressable(GenTreePtr tree, regMaskTP addrReg, RegSet::KeepReg keptReg)
{
if (keptReg == RegSet::FREE_REG)
{
@@ -2876,7 +2752,7 @@ void CodeGen::genDoneAddressable(GenTreePtr tree,
// ie. There may be a pending use in a higher-up tree.
addrReg &= ~regSet.rsMaskUsed;
-
+
/* addrReg was not marked as used. So just reset its GC info */
if (addrReg)
{
@@ -2901,48 +2777,41 @@ void CodeGen::genDoneAddressable(GenTreePtr tree,
* to evaluate into the FP stack, we do this and return zero.
*/
-GenTreePtr CodeGen::genMakeAddrOrFPstk(GenTreePtr tree,
- regMaskTP * regMaskPtr,
- bool roundResult)
+GenTreePtr CodeGen::genMakeAddrOrFPstk(GenTreePtr tree, regMaskTP* regMaskPtr, bool roundResult)
{
*regMaskPtr = 0;
switch (tree->gtOper)
{
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_CLS_VAR:
- return tree;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_CLS_VAR:
+ return tree;
- case GT_CNS_DBL:
- if (tree->gtType == TYP_FLOAT)
- {
- float f = forceCastToFloat(tree->gtDblCon.gtDconVal);
- return genMakeConst(&f, TYP_FLOAT, tree, false);
- }
- return genMakeConst(&tree->gtDblCon.gtDconVal, tree->gtType, tree, true);
+ case GT_CNS_DBL:
+ if (tree->gtType == TYP_FLOAT)
+ {
+ float f = forceCastToFloat(tree->gtDblCon.gtDconVal);
+ return genMakeConst(&f, TYP_FLOAT, tree, false);
+ }
+ return genMakeConst(&tree->gtDblCon.gtDconVal, tree->gtType, tree, true);
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
- /* Try to make the address directly addressable */
+ /* Try to make the address directly addressable */
- if (genMakeIndAddrMode(tree->gtOp.gtOp1,
- tree,
- false, /* not for LEA */
- 0,
- RegSet::FREE_REG,
- regMaskPtr,
- false))
- {
- genUpdateLife(tree);
- return tree;
- }
+ if (genMakeIndAddrMode(tree->gtOp.gtOp1, tree, false, /* not for LEA */
+ 0, RegSet::FREE_REG, regMaskPtr, false))
+ {
+ genUpdateLife(tree);
+ return tree;
+ }
- break;
+ break;
- default:
- break;
+ default:
+ break;
}
#if FEATURE_STACK_FP_X87
/* We have no choice but to compute the value 'tree' onto the FP stack */
@@ -2952,14 +2821,13 @@ GenTreePtr CodeGen::genMakeAddrOrFPstk(GenTreePtr tree,
return 0;
}
-
/*****************************************************************************/
/*****************************************************************************
*
* Display a string literal value (debug only).
*/
-#ifdef DEBUG
+#ifdef DEBUG
#endif
/*****************************************************************************
@@ -2970,17 +2838,17 @@ GenTreePtr CodeGen::genMakeAddrOrFPstk(GenTreePtr tree,
*
* TODO-ARM-Bug?: pushReg is not implemented (is it needed for ARM?)
*/
-void CodeGen::genEmitGSCookieCheck(bool pushReg)
+void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
// Make sure that EAX didn't die in the return expression
if (!pushReg && (compiler->info.compRetType == TYP_REF))
gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
// Add cookie check code for unsafe buffers
- BasicBlock *gsCheckBlk;
- regMaskTP byrefPushedRegs = RBM_NONE;
- regMaskTP norefPushedRegs = RBM_NONE;
- regMaskTP pushedRegs = RBM_NONE;
+ BasicBlock* gsCheckBlk;
+ regMaskTP byrefPushedRegs = RBM_NONE;
+ regMaskTP norefPushedRegs = RBM_NONE;
+ regMaskTP pushedRegs = RBM_NONE;
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
@@ -2992,30 +2860,24 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
#if CPU_LOAD_STORE_ARCH
regNumber reg = regSet.rsGrabReg(RBM_ALLINT);
- getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE,
- reg,
- compiler->lvaGSSecurityCookie, 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE, reg, compiler->lvaGSSecurityCookie, 0);
regTracker.rsTrackRegTrash(reg);
- if (arm_Valid_Imm_For_Alu(compiler->gsGlobalSecurityCookieVal) ||
+ if (arm_Valid_Imm_For_Alu(compiler->gsGlobalSecurityCookieVal) ||
arm_Valid_Imm_For_Alu(~compiler->gsGlobalSecurityCookieVal))
{
- getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE,
- reg,
- compiler->gsGlobalSecurityCookieVal);
+ getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, reg, compiler->gsGlobalSecurityCookieVal);
}
else
{
// Load CookieVal into a register
regNumber immReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
instGen_Set_Reg_To_Imm(EA_4BYTE, immReg, compiler->gsGlobalSecurityCookieVal);
- getEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE,
- reg, immReg);
+ getEmitter()->emitIns_R_R(INS_cmp, EA_4BYTE, reg, immReg);
}
#else
- getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE,
- compiler->lvaGSSecurityCookie, 0,
- (int)compiler->gsGlobalSecurityCookieVal);
+ getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
+ (int)compiler->gsGlobalSecurityCookieVal);
#endif
}
else
@@ -3023,37 +2885,38 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
regNumber regGSCheck;
regMaskTP regMaskGSCheck;
#if CPU_LOAD_STORE_ARCH
- regGSCheck = regSet.rsGrabReg(RBM_ALLINT);
+ regGSCheck = regSet.rsGrabReg(RBM_ALLINT);
regMaskGSCheck = genRegMask(regGSCheck);
#else
// Don't pick the 'this' register
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister &&
(compiler->lvaTable[compiler->info.compThisArg].lvRegNum == REG_ECX))
{
- regGSCheck = REG_EDX;
+ regGSCheck = REG_EDX;
regMaskGSCheck = RBM_EDX;
}
else
{
- regGSCheck = REG_ECX;
+ regGSCheck = REG_ECX;
regMaskGSCheck = RBM_ECX;
}
// NGen case
- if (pushReg && (regMaskGSCheck & (regSet.rsMaskUsed|regSet.rsMaskVars|regSet.rsMaskLock)))
+ if (pushReg && (regMaskGSCheck & (regSet.rsMaskUsed | regSet.rsMaskVars | regSet.rsMaskLock)))
{
pushedRegs = genPushRegs(regMaskGSCheck, &byrefPushedRegs, &norefPushedRegs);
}
else
{
- noway_assert((regMaskGSCheck & (regSet.rsMaskUsed|regSet.rsMaskVars|regSet.rsMaskLock)) == 0);
+ noway_assert((regMaskGSCheck & (regSet.rsMaskUsed | regSet.rsMaskVars | regSet.rsMaskLock)) == 0);
}
#endif
#if defined(_TARGET_ARM_)
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSCheck, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSCheck, regGSCheck,0);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSCheck, regGSCheck, 0);
#else
- getEmitter()->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTR_DSP_RELOC, regGSCheck, FLD_GLOBAL_DS, (ssize_t)compiler->gsGlobalSecurityCookieAddr);
+ getEmitter()->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTR_DSP_RELOC, regGSCheck, FLD_GLOBAL_DS,
+ (ssize_t)compiler->gsGlobalSecurityCookieAddr);
#endif // !_TARGET_ARM_
regTracker.rsTrashRegSet(regMaskGSCheck);
#ifdef _TARGET_ARM_
@@ -3066,7 +2929,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
#endif
}
- gsCheckBlk = genCreateTempLabel();
+ gsCheckBlk = genCreateTempLabel();
emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
@@ -3075,21 +2938,20 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
genPopRegs(pushedRegs, byrefPushedRegs, norefPushedRegs);
}
-
/*****************************************************************************
*
* Generate any side effects within the given expression tree.
*/
-void CodeGen::genEvalSideEffects(GenTreePtr tree)
+void CodeGen::genEvalSideEffects(GenTreePtr tree)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
AGAIN:
/* Does this sub-tree contain any side-effects? */
- if (tree->gtFlags & GTF_SIDE_EFFECT)
+ if (tree->gtFlags & GTF_SIDE_EFFECT)
{
#if FEATURE_STACK_FP_X87
/* Remember the current FP stack level */
@@ -3099,7 +2961,7 @@ AGAIN:
{
regMaskTP addrReg = genMakeAddressable(tree, RBM_ALLINT, RegSet::KEEP_REG, true, false);
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
gcInfo.gcMarkRegPtrVal(tree);
genDoneAddressable(tree, addrReg, RegSet::KEEP_REG);
@@ -3107,8 +2969,7 @@ AGAIN:
// GTF_IND_RNGCHK trees have already de-referenced the pointer, and so
// do not need an additional null-check
/* Do this only if the GTF_EXCEPT or GTF_IND_VOLATILE flag is set on the indir */
- else if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0 &&
- ((tree->gtFlags & GTF_EXCEPT) | GTF_IND_VOLATILE))
+ else if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0 && ((tree->gtFlags & GTF_EXCEPT) | GTF_IND_VOLATILE))
{
/* Compare against any register to do null-check */
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -3119,7 +2980,8 @@ AGAIN:
#elif CPU_LOAD_STORE_ARCH
if (varTypeIsFloating(tree->TypeGet()))
{
- genComputeAddressableFloat(tree, addrReg, RBM_NONE, RegSet::KEEP_REG, RBM_ALLFLOAT, RegSet::FREE_REG);
+ genComputeAddressableFloat(tree, addrReg, RBM_NONE, RegSet::KEEP_REG, RBM_ALLFLOAT,
+ RegSet::FREE_REG);
}
else
{
@@ -3128,7 +2990,7 @@ AGAIN:
#ifdef _TARGET_ARM_
if (tree->gtFlags & GTF_IND_VOLATILE)
{
- // Emit a memory barrier instruction after the load
+ // Emit a memory barrier instruction after the load
instGen_MemoryBarrier();
}
#endif
@@ -3145,7 +3007,7 @@ AGAIN:
{
/* Generate the expression and throw it away */
genCodeForTree(tree, RBM_ALL(tree->TypeGet()));
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
gcInfo.gcMarkRegPtrVal(tree);
}
@@ -3154,7 +3016,7 @@ AGAIN:
/* If the tree computed a value on the FP stack, pop the stack */
if (genNumberTemps() > iTemps)
{
- noway_assert(genNumberTemps() == iTemps+1);
+ noway_assert(genNumberTemps() == iTemps + 1);
genDiscardStackFP(tree);
}
#endif
@@ -3170,18 +3032,17 @@ AGAIN:
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
#if FEATURE_STACK_FP_X87
- if (tree->IsRegVar() && isFloatRegType(tree->gtType) &&
- tree->IsRegVarDeath())
+ if (tree->IsRegVar() && isFloatRegType(tree->gtType) && tree->IsRegVarDeath())
{
genRegVarDeathStackFP(tree);
FlatFPX87_Unload(&compCurFPState, tree->gtRegNum);
}
#endif
genUpdateLife(tree);
- gcInfo.gcMarkRegPtrVal (tree);
+ gcInfo.gcMarkRegPtrVal(tree);
return;
}
@@ -3189,7 +3050,7 @@ AGAIN:
noway_assert(kind & GTK_SMPOP);
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
genEvalSideEffects(tree->gtOp.gtOp1);
@@ -3199,7 +3060,7 @@ AGAIN:
else
{
tree = tree->gtOp.gtOp1;
- if (tree)
+ if (tree)
goto AGAIN;
}
}
@@ -3216,148 +3077,132 @@ AGAIN:
* RBM_NONE if a write-barrier is not needed.
*/
-regMaskTP CodeGen::WriteBarrier(GenTreePtr tgt,
- GenTreePtr assignVal,
- regMaskTP tgtAddrReg)
+regMaskTP CodeGen::WriteBarrier(GenTreePtr tgt, GenTreePtr assignVal, regMaskTP tgtAddrReg)
{
noway_assert(assignVal->gtFlags & GTF_REG_VAL);
GCInfo::WriteBarrierForm wbf = gcInfo.gcIsWriteBarrierCandidate(tgt, assignVal);
- if (wbf == GCInfo::WBF_NoBarrier)
+ if (wbf == GCInfo::WBF_NoBarrier)
return RBM_NONE;
- regMaskTP resultRegMask = RBM_NONE;
+ regMaskTP resultRegMask = RBM_NONE;
#if FEATURE_WRITE_BARRIER
- regNumber reg = assignVal->gtRegNum;
+ regNumber reg = assignVal->gtRegNum;
#if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
if (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug) // This one is always a call to a C++ method.
{
#endif
- const static int regToHelper[2][8] =
- {
- // If the target is known to be in managed memory
- {
- CORINFO_HELP_ASSIGN_REF_EAX,
- CORINFO_HELP_ASSIGN_REF_ECX,
- -1,
- CORINFO_HELP_ASSIGN_REF_EBX,
- -1,
- CORINFO_HELP_ASSIGN_REF_EBP,
- CORINFO_HELP_ASSIGN_REF_ESI,
- CORINFO_HELP_ASSIGN_REF_EDI,
- },
-
- // Don't know if the target is in managed memory
- {
- CORINFO_HELP_CHECKED_ASSIGN_REF_EAX,
- CORINFO_HELP_CHECKED_ASSIGN_REF_ECX,
- -1,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EBX,
- -1,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
- CORINFO_HELP_CHECKED_ASSIGN_REF_ESI,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
- },
- };
-
- noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
- noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
- noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
- noway_assert(regToHelper[0][REG_ESP] == -1 );
- noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
- noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
- noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
-
- noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
- noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
- noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
- noway_assert(regToHelper[1][REG_ESP] == -1 );
- noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
- noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
- noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
-
- noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
+ const static int regToHelper[2][8] = {
+ // If the target is known to be in managed memory
+ {
+ CORINFO_HELP_ASSIGN_REF_EAX, CORINFO_HELP_ASSIGN_REF_ECX, -1, CORINFO_HELP_ASSIGN_REF_EBX, -1,
+ CORINFO_HELP_ASSIGN_REF_EBP, CORINFO_HELP_ASSIGN_REF_ESI, CORINFO_HELP_ASSIGN_REF_EDI,
+ },
- /*
- Generate the following code:
+ // Don't know if the target is in managed memory
+ {
+ CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, -1,
+ CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, -1, CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
+ CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
+ },
+ };
- lea edx, tgt
- call write_barrier_helper_reg
+ noway_assert(regToHelper[0][REG_EAX] == CORINFO_HELP_ASSIGN_REF_EAX);
+ noway_assert(regToHelper[0][REG_ECX] == CORINFO_HELP_ASSIGN_REF_ECX);
+ noway_assert(regToHelper[0][REG_EBX] == CORINFO_HELP_ASSIGN_REF_EBX);
+ noway_assert(regToHelper[0][REG_ESP] == -1);
+ noway_assert(regToHelper[0][REG_EBP] == CORINFO_HELP_ASSIGN_REF_EBP);
+ noway_assert(regToHelper[0][REG_ESI] == CORINFO_HELP_ASSIGN_REF_ESI);
+ noway_assert(regToHelper[0][REG_EDI] == CORINFO_HELP_ASSIGN_REF_EDI);
- First grab the RBM_WRITE_BARRIER register for the target address.
- */
+ noway_assert(regToHelper[1][REG_EAX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EAX);
+ noway_assert(regToHelper[1][REG_ECX] == CORINFO_HELP_CHECKED_ASSIGN_REF_ECX);
+ noway_assert(regToHelper[1][REG_EBX] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBX);
+ noway_assert(regToHelper[1][REG_ESP] == -1);
+ noway_assert(regToHelper[1][REG_EBP] == CORINFO_HELP_CHECKED_ASSIGN_REF_EBP);
+ noway_assert(regToHelper[1][REG_ESI] == CORINFO_HELP_CHECKED_ASSIGN_REF_ESI);
+ noway_assert(regToHelper[1][REG_EDI] == CORINFO_HELP_CHECKED_ASSIGN_REF_EDI);
- regNumber rg1;
- bool trashOp1;
+ noway_assert((reg != REG_ESP) && (reg != REG_WRITE_BARRIER));
- if ((tgtAddrReg & RBM_WRITE_BARRIER) == 0)
- {
- rg1 = regSet.rsGrabReg(RBM_WRITE_BARRIER);
+ /*
+ Generate the following code:
- regSet.rsMaskUsed |= RBM_WRITE_BARRIER;
- regSet.rsMaskLock |= RBM_WRITE_BARRIER;
+ lea edx, tgt
+ call write_barrier_helper_reg
- trashOp1 = false;
- }
- else
- {
- rg1 = REG_WRITE_BARRIER;
+ First grab the RBM_WRITE_BARRIER register for the target address.
+ */
- trashOp1 = true;
- }
+ regNumber rg1;
+ bool trashOp1;
- noway_assert(rg1 == REG_WRITE_BARRIER);
+ if ((tgtAddrReg & RBM_WRITE_BARRIER) == 0)
+ {
+ rg1 = regSet.rsGrabReg(RBM_WRITE_BARRIER);
- /* Generate "lea EDX, [addr-mode]" */
+ regSet.rsMaskUsed |= RBM_WRITE_BARRIER;
+ regSet.rsMaskLock |= RBM_WRITE_BARRIER;
- noway_assert(tgt->gtType == TYP_REF);
- tgt->gtType = TYP_BYREF;
- inst_RV_TT(INS_lea, rg1, tgt, 0, EA_BYREF);
+ trashOp1 = false;
+ }
+ else
+ {
+ rg1 = REG_WRITE_BARRIER;
- /* Free up anything that was tied up by the LHS */
- genDoneAddressable(tgt, tgtAddrReg, RegSet::KEEP_REG);
+ trashOp1 = true;
+ }
- // In case "tgt" was a comma:
- tgt = tgt->gtEffectiveVal();
+ noway_assert(rg1 == REG_WRITE_BARRIER);
- regTracker.rsTrackRegTrash(rg1);
- gcInfo.gcMarkRegSetNpt(genRegMask(rg1));
- gcInfo.gcMarkRegPtrVal(rg1, TYP_BYREF);
+ /* Generate "lea EDX, [addr-mode]" */
+ noway_assert(tgt->gtType == TYP_REF);
+ tgt->gtType = TYP_BYREF;
+ inst_RV_TT(INS_lea, rg1, tgt, 0, EA_BYREF);
- /* Call the proper vm helper */
+ /* Free up anything that was tied up by the LHS */
+ genDoneAddressable(tgt, tgtAddrReg, RegSet::KEEP_REG);
- // enforced by gcIsWriteBarrierCandidate
- noway_assert(tgt->gtOper == GT_IND ||
- tgt->gtOper == GT_CLS_VAR);
+ // In case "tgt" was a comma:
+ tgt = tgt->gtEffectiveVal();
- unsigned tgtAnywhere = 0;
- if ((tgt->gtOper == GT_IND) &&
- ((tgt->gtFlags & GTF_IND_TGTANYWHERE) || (tgt->gtOp.gtOp1->TypeGet() == TYP_I_IMPL)))
- {
- tgtAnywhere = 1;
- }
+ regTracker.rsTrackRegTrash(rg1);
+ gcInfo.gcMarkRegSetNpt(genRegMask(rg1));
+ gcInfo.gcMarkRegPtrVal(rg1, TYP_BYREF);
- int helper = regToHelper[tgtAnywhere][reg];
- resultRegMask = genRegMask(reg);
+ /* Call the proper vm helper */
- gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER); // byref EDX is killed in the call
+ // enforced by gcIsWriteBarrierCandidate
+ noway_assert(tgt->gtOper == GT_IND || tgt->gtOper == GT_CLS_VAR);
- genEmitHelperCall(helper,
- 0, // argSize
- EA_PTRSIZE); // retSize
+ unsigned tgtAnywhere = 0;
+ if ((tgt->gtOper == GT_IND) &&
+ ((tgt->gtFlags & GTF_IND_TGTANYWHERE) || (tgt->gtOp.gtOp1->TypeGet() == TYP_I_IMPL)))
+ {
+ tgtAnywhere = 1;
+ }
- if (!trashOp1)
- {
- regSet.rsMaskUsed &= ~RBM_WRITE_BARRIER;
- regSet.rsMaskLock &= ~RBM_WRITE_BARRIER;
- }
+ int helper = regToHelper[tgtAnywhere][reg];
+ resultRegMask = genRegMask(reg);
- return resultRegMask;
+ gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER); // byref EDX is killed in the call
+
+ genEmitHelperCall(helper,
+ 0, // argSize
+ EA_PTRSIZE); // retSize
+
+ if (!trashOp1)
+ {
+ regSet.rsMaskUsed &= ~RBM_WRITE_BARRIER;
+ regSet.rsMaskLock &= ~RBM_WRITE_BARRIER;
+ }
+
+ return resultRegMask;
#ifdef DEBUG
}
@@ -3367,120 +3212,120 @@ regMaskTP CodeGen::WriteBarrier(GenTreePtr tgt,
#if defined(DEBUG) || !(defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS)
{
- /*
- Generate the following code (or its equivalent on the given target):
+ /*
+ Generate the following code (or its equivalent on the given target):
- mov arg1, srcReg
- lea arg0, tgt
- call write_barrier_helper
+ mov arg1, srcReg
+ lea arg0, tgt
+ call write_barrier_helper
- First, setup REG_ARG_1 with the GC ref that we are storing via the Write Barrier
- */
+ First, setup REG_ARG_1 with the GC ref that we are storing via the Write Barrier
+ */
- if (reg != REG_ARG_1)
- {
- // We may need to spill whatever is in the ARG_1 register
- //
- if ((regSet.rsMaskUsed & RBM_ARG_1) != 0)
+ if (reg != REG_ARG_1)
{
- regSet.rsSpillReg(REG_ARG_1);
- }
+ // We may need to spill whatever is in the ARG_1 register
+ //
+ if ((regSet.rsMaskUsed & RBM_ARG_1) != 0)
+ {
+ regSet.rsSpillReg(REG_ARG_1);
+ }
- inst_RV_RV(INS_mov, REG_ARG_1, reg, TYP_REF);
- }
- resultRegMask = RBM_ARG_1;
+ inst_RV_RV(INS_mov, REG_ARG_1, reg, TYP_REF);
+ }
+ resultRegMask = RBM_ARG_1;
- regTracker.rsTrackRegTrash(REG_ARG_1);
- gcInfo.gcMarkRegSetNpt(REG_ARG_1);
- gcInfo.gcMarkRegSetGCref(RBM_ARG_1); // gcref in ARG_1
+ regTracker.rsTrackRegTrash(REG_ARG_1);
+ gcInfo.gcMarkRegSetNpt(REG_ARG_1);
+ gcInfo.gcMarkRegSetGCref(RBM_ARG_1); // gcref in ARG_1
- bool free_arg1 = false;
- if ((regSet.rsMaskUsed & RBM_ARG_1) == 0)
- {
- regSet.rsMaskUsed |= RBM_ARG_1;
- free_arg1 = true;
- }
+ bool free_arg1 = false;
+ if ((regSet.rsMaskUsed & RBM_ARG_1) == 0)
+ {
+ regSet.rsMaskUsed |= RBM_ARG_1;
+ free_arg1 = true;
+ }
- // Then we setup REG_ARG_0 with the target address to store into via the Write Barrier
+ // Then we setup REG_ARG_0 with the target address to store into via the Write Barrier
- /* Generate "lea R0, [addr-mode]" */
+ /* Generate "lea R0, [addr-mode]" */
- noway_assert(tgt->gtType == TYP_REF);
- tgt->gtType = TYP_BYREF;
+ noway_assert(tgt->gtType == TYP_REF);
+ tgt->gtType = TYP_BYREF;
- tgtAddrReg = genKeepAddressable(tgt, tgtAddrReg);
+ tgtAddrReg = genKeepAddressable(tgt, tgtAddrReg);
- // We may need to spill whatever is in the ARG_0 register
- //
- if (((tgtAddrReg & RBM_ARG_0) == 0) && // tgtAddrReg does not contain REG_ARG_0
- ((regSet.rsMaskUsed & RBM_ARG_0) != 0) && // and regSet.rsMaskUsed contains REG_ARG_0
- (reg != REG_ARG_0)) // unless REG_ARG_0 contains the REF value being written, which we're finished with.
- {
- regSet.rsSpillReg(REG_ARG_0);
- }
+ // We may need to spill whatever is in the ARG_0 register
+ //
+ if (((tgtAddrReg & RBM_ARG_0) == 0) && // tgtAddrReg does not contain REG_ARG_0
+ ((regSet.rsMaskUsed & RBM_ARG_0) != 0) && // and regSet.rsMaskUsed contains REG_ARG_0
+ (reg != REG_ARG_0)) // unless REG_ARG_0 contains the REF value being written, which we're finished with.
+ {
+ regSet.rsSpillReg(REG_ARG_0);
+ }
- inst_RV_TT(INS_lea, REG_ARG_0, tgt, 0, EA_BYREF);
+ inst_RV_TT(INS_lea, REG_ARG_0, tgt, 0, EA_BYREF);
- /* Free up anything that was tied up by the LHS */
- genDoneAddressable(tgt, tgtAddrReg, RegSet::KEEP_REG);
+ /* Free up anything that was tied up by the LHS */
+ genDoneAddressable(tgt, tgtAddrReg, RegSet::KEEP_REG);
- regTracker.rsTrackRegTrash(REG_ARG_0);
- gcInfo.gcMarkRegSetNpt(REG_ARG_0);
- gcInfo.gcMarkRegSetByref(RBM_ARG_0); // byref in ARG_0
+ regTracker.rsTrackRegTrash(REG_ARG_0);
+ gcInfo.gcMarkRegSetNpt(REG_ARG_0);
+ gcInfo.gcMarkRegSetByref(RBM_ARG_0); // byref in ARG_0
#ifdef _TARGET_ARM_
#if NOGC_WRITE_BARRIERS
- // Finally, we may be required to spill whatever is in the further argument registers
- // trashed by the call. The write barrier trashes some further registers --
- // either the standard volatile var set, or, if we're using assembly barriers, a more specialized set.
+ // Finally, we may be required to spill whatever is in the further argument registers
+ // trashed by the call. The write barrier trashes some further registers --
+ // either the standard volatile var set, or, if we're using assembly barriers, a more specialized set.
- regMaskTP volatileRegsTrashed = RBM_CALLEE_TRASH_NOGC;
+ regMaskTP volatileRegsTrashed = RBM_CALLEE_TRASH_NOGC;
#else
- regMaskTP volatileRegsTrashed = RBM_CALLEE_TRASH;
+ regMaskTP volatileRegsTrashed = RBM_CALLEE_TRASH;
#endif
- // Spill any other registers trashed by the write barrier call and currently in use.
- regMaskTP mustSpill = (volatileRegsTrashed & regSet.rsMaskUsed & ~(RBM_ARG_0|RBM_ARG_1));
- if (mustSpill) regSet.rsSpillRegs(mustSpill);
+ // Spill any other registers trashed by the write barrier call and currently in use.
+ regMaskTP mustSpill = (volatileRegsTrashed & regSet.rsMaskUsed & ~(RBM_ARG_0 | RBM_ARG_1));
+ if (mustSpill)
+ regSet.rsSpillRegs(mustSpill);
#endif // _TARGET_ARM_
- bool free_arg0 = false;
- if ((regSet.rsMaskUsed & RBM_ARG_0) == 0)
- {
- regSet.rsMaskUsed |= RBM_ARG_0;
- free_arg0 = true;
- }
+ bool free_arg0 = false;
+ if ((regSet.rsMaskUsed & RBM_ARG_0) == 0)
+ {
+ regSet.rsMaskUsed |= RBM_ARG_0;
+ free_arg0 = true;
+ }
- // genEmitHelperCall might need to grab a register
- // so don't let it spill one of the arguments
- //
- regMaskTP reallyUsedRegs = RBM_NONE;
- regSet.rsLockReg(RBM_ARG_0|RBM_ARG_1, &reallyUsedRegs);
+ // genEmitHelperCall might need to grab a register
+ // so don't let it spill one of the arguments
+ //
+ regMaskTP reallyUsedRegs = RBM_NONE;
+ regSet.rsLockReg(RBM_ARG_0 | RBM_ARG_1, &reallyUsedRegs);
- genGCWriteBarrier(tgt, wbf);
+ genGCWriteBarrier(tgt, wbf);
- regSet.rsUnlockReg(RBM_ARG_0|RBM_ARG_1, reallyUsedRegs);
- gcInfo.gcMarkRegSetNpt(RBM_ARG_0 | RBM_ARG_1); // byref ARG_0 and reg ARG_1 are killed by the call
+ regSet.rsUnlockReg(RBM_ARG_0 | RBM_ARG_1, reallyUsedRegs);
+ gcInfo.gcMarkRegSetNpt(RBM_ARG_0 | RBM_ARG_1); // byref ARG_0 and reg ARG_1 are killed by the call
- if (free_arg0)
- {
- regSet.rsMaskUsed &= ~RBM_ARG_0;
- }
- if (free_arg1)
- {
- regSet.rsMaskUsed &= ~RBM_ARG_1;
- }
+ if (free_arg0)
+ {
+ regSet.rsMaskUsed &= ~RBM_ARG_0;
+ }
+ if (free_arg1)
+ {
+ regSet.rsMaskUsed &= ~RBM_ARG_1;
+ }
- return resultRegMask;
+ return resultRegMask;
}
#endif // defined(DEBUG) || !(defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS)
-#else // !FEATURE_WRITE_BARRIER
+#else // !FEATURE_WRITE_BARRIER
NYI("FEATURE_WRITE_BARRIER unimplemented");
- return resultRegMask;
+ return resultRegMask;
#endif // !FEATURE_WRITE_BARRIER
-
}
#ifdef _TARGET_X86_
@@ -3490,56 +3335,53 @@ regMaskTP CodeGen::WriteBarrier(GenTreePtr tgt,
* of two long values have been compared.
*/
-void CodeGen::genJccLongHi(genTreeOps cmp,
- BasicBlock * jumpTrue,
- BasicBlock * jumpFalse,
- bool isUnsigned )
+void CodeGen::genJccLongHi(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool isUnsigned)
{
if (cmp != GT_NE)
{
- jumpFalse->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ jumpFalse->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
}
switch (cmp)
{
- case GT_EQ:
- inst_JMP(EJ_jne, jumpFalse);
- break;
+ case GT_EQ:
+ inst_JMP(EJ_jne, jumpFalse);
+ break;
- case GT_NE:
- inst_JMP(EJ_jne, jumpTrue);
- break;
+ case GT_NE:
+ inst_JMP(EJ_jne, jumpTrue);
+ break;
- case GT_LT:
- case GT_LE:
- if (isUnsigned)
- {
- inst_JMP(EJ_ja , jumpFalse);
- inst_JMP(EJ_jb , jumpTrue);
- }
- else
- {
- inst_JMP(EJ_jg , jumpFalse);
- inst_JMP(EJ_jl , jumpTrue);
- }
- break;
+ case GT_LT:
+ case GT_LE:
+ if (isUnsigned)
+ {
+ inst_JMP(EJ_ja, jumpFalse);
+ inst_JMP(EJ_jb, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_jg, jumpFalse);
+ inst_JMP(EJ_jl, jumpTrue);
+ }
+ break;
- case GT_GE:
- case GT_GT:
- if (isUnsigned)
- {
- inst_JMP(EJ_jb , jumpFalse);
- inst_JMP(EJ_ja , jumpTrue);
- }
- else
- {
- inst_JMP(EJ_jl , jumpFalse);
- inst_JMP(EJ_jg , jumpTrue);
- }
- break;
+ case GT_GE:
+ case GT_GT:
+ if (isUnsigned)
+ {
+ inst_JMP(EJ_jb, jumpFalse);
+ inst_JMP(EJ_ja, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_jl, jumpFalse);
+ inst_JMP(EJ_jg, jumpTrue);
+ }
+ break;
- default:
- noway_assert(!"expected a comparison operator");
+ default:
+ noway_assert(!"expected a comparison operator");
}
}
@@ -3549,38 +3391,36 @@ void CodeGen::genJccLongHi(genTreeOps cmp,
* of two long values have been compared.
*/
-void CodeGen::genJccLongLo(genTreeOps cmp,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse)
+void CodeGen::genJccLongLo(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse)
{
switch (cmp)
{
- case GT_EQ:
- inst_JMP(EJ_je , jumpTrue);
- break;
+ case GT_EQ:
+ inst_JMP(EJ_je, jumpTrue);
+ break;
- case GT_NE:
- inst_JMP(EJ_jne, jumpTrue);
- break;
+ case GT_NE:
+ inst_JMP(EJ_jne, jumpTrue);
+ break;
- case GT_LT:
- inst_JMP(EJ_jb , jumpTrue);
- break;
+ case GT_LT:
+ inst_JMP(EJ_jb, jumpTrue);
+ break;
- case GT_LE:
- inst_JMP(EJ_jbe, jumpTrue);
- break;
+ case GT_LE:
+ inst_JMP(EJ_jbe, jumpTrue);
+ break;
- case GT_GE:
- inst_JMP(EJ_jae, jumpTrue);
- break;
+ case GT_GE:
+ inst_JMP(EJ_jae, jumpTrue);
+ break;
- case GT_GT:
- inst_JMP(EJ_ja , jumpTrue);
- break;
+ case GT_GT:
+ inst_JMP(EJ_ja, jumpTrue);
+ break;
- default:
- noway_assert(!"expected comparison");
+ default:
+ noway_assert(!"expected comparison");
}
}
#elif defined(_TARGET_ARM_)
@@ -3590,10 +3430,7 @@ void CodeGen::genJccLongLo(genTreeOps cmp,
* of two long values have been compared.
*/
-void CodeGen::genJccLongHi(genTreeOps cmp,
- BasicBlock * jumpTrue,
- BasicBlock * jumpFalse,
- bool isUnsigned)
+void CodeGen::genJccLongHi(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool isUnsigned)
{
if (cmp != GT_NE)
{
@@ -3602,44 +3439,44 @@ void CodeGen::genJccLongHi(genTreeOps cmp,
switch (cmp)
{
- case GT_EQ:
- inst_JMP(EJ_ne, jumpFalse);
- break;
+ case GT_EQ:
+ inst_JMP(EJ_ne, jumpFalse);
+ break;
- case GT_NE:
- inst_JMP(EJ_ne, jumpTrue);
- break;
+ case GT_NE:
+ inst_JMP(EJ_ne, jumpTrue);
+ break;
- case GT_LT:
- case GT_LE:
- if (isUnsigned)
- {
- inst_JMP(EJ_hi, jumpFalse);
- inst_JMP(EJ_lo, jumpTrue);
- }
- else
- {
- inst_JMP(EJ_gt, jumpFalse);
- inst_JMP(EJ_lt, jumpTrue);
- }
- break;
+ case GT_LT:
+ case GT_LE:
+ if (isUnsigned)
+ {
+ inst_JMP(EJ_hi, jumpFalse);
+ inst_JMP(EJ_lo, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_gt, jumpFalse);
+ inst_JMP(EJ_lt, jumpTrue);
+ }
+ break;
- case GT_GE:
- case GT_GT:
- if (isUnsigned)
- {
- inst_JMP(EJ_lo, jumpFalse);
- inst_JMP(EJ_hi, jumpTrue);
- }
- else
- {
- inst_JMP(EJ_lt, jumpFalse);
- inst_JMP(EJ_gt, jumpTrue);
- }
- break;
+ case GT_GE:
+ case GT_GT:
+ if (isUnsigned)
+ {
+ inst_JMP(EJ_lo, jumpFalse);
+ inst_JMP(EJ_hi, jumpTrue);
+ }
+ else
+ {
+ inst_JMP(EJ_lt, jumpFalse);
+ inst_JMP(EJ_gt, jumpTrue);
+ }
+ break;
- default:
- noway_assert(!"expected a comparison operator");
+ default:
+ noway_assert(!"expected a comparison operator");
}
}
@@ -3649,38 +3486,36 @@ void CodeGen::genJccLongHi(genTreeOps cmp,
* of two long values have been compared.
*/
-void CodeGen::genJccLongLo(genTreeOps cmp,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse)
+void CodeGen::genJccLongLo(genTreeOps cmp, BasicBlock* jumpTrue, BasicBlock* jumpFalse)
{
switch (cmp)
{
- case GT_EQ:
- inst_JMP(EJ_eq, jumpTrue);
- break;
+ case GT_EQ:
+ inst_JMP(EJ_eq, jumpTrue);
+ break;
- case GT_NE:
- inst_JMP(EJ_ne, jumpTrue);
- break;
+ case GT_NE:
+ inst_JMP(EJ_ne, jumpTrue);
+ break;
- case GT_LT:
- inst_JMP(EJ_lo, jumpTrue);
- break;
+ case GT_LT:
+ inst_JMP(EJ_lo, jumpTrue);
+ break;
- case GT_LE:
- inst_JMP(EJ_ls, jumpTrue);
- break;
+ case GT_LE:
+ inst_JMP(EJ_ls, jumpTrue);
+ break;
- case GT_GE:
- inst_JMP(EJ_hs, jumpTrue);
- break;
+ case GT_GE:
+ inst_JMP(EJ_hs, jumpTrue);
+ break;
- case GT_GT:
- inst_JMP(EJ_hi, jumpTrue);
- break;
+ case GT_GT:
+ inst_JMP(EJ_hi, jumpTrue);
+ break;
- default:
- noway_assert(!"expected comparison");
+ default:
+ noway_assert(!"expected comparison");
}
}
#endif
@@ -3689,50 +3524,47 @@ void CodeGen::genJccLongLo(genTreeOps cmp,
* Called by genCondJump() for TYP_LONG.
*/
-void CodeGen::genCondJumpLng(GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse,
- bool bFPTransition)
+void CodeGen::genCondJumpLng(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool bFPTransition)
{
noway_assert(jumpTrue && jumpFalse);
noway_assert((cond->gtFlags & GTF_REVERSE_OPS) == false); // Done in genCondJump()
noway_assert(cond->gtOp.gtOp1->gtType == TYP_LONG);
- GenTreePtr op1 = cond->gtOp.gtOp1;
- GenTreePtr op2 = cond->gtOp.gtOp2;
- genTreeOps cmp = cond->OperGet();
+ GenTreePtr op1 = cond->gtOp.gtOp1;
+ GenTreePtr op2 = cond->gtOp.gtOp2;
+ genTreeOps cmp = cond->OperGet();
- regMaskTP addrReg;
+ regMaskTP addrReg;
/* Are we comparing against a constant? */
- if (op2->gtOper == GT_CNS_LNG)
+ if (op2->gtOper == GT_CNS_LNG)
{
- __int64 lval = op2->gtLngCon.gtLconVal;
- regNumber rTmp;
+ __int64 lval = op2->gtLngCon.gtLconVal;
+ regNumber rTmp;
// We're "done" evaluating op2; let's strip any commas off op1 before we
// evaluate it.
op1 = genCodeForCommaTree(op1);
/* We can generate better code for some special cases */
- instruction ins = INS_invalid;
- bool useIncToSetFlags = false;
- bool specialCaseCmp = false;
+ instruction ins = INS_invalid;
+ bool useIncToSetFlags = false;
+ bool specialCaseCmp = false;
if (cmp == GT_EQ)
{
if (lval == 0)
{
/* op1 == 0 */
- ins = INS_OR;
+ ins = INS_OR;
useIncToSetFlags = false;
specialCaseCmp = true;
}
else if (lval == -1)
{
/* op1 == -1 */
- ins = INS_AND;
+ ins = INS_AND;
useIncToSetFlags = true;
specialCaseCmp = true;
}
@@ -3742,14 +3574,14 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
if (lval == 0)
{
/* op1 != 0 */
- ins = INS_OR;
+ ins = INS_OR;
useIncToSetFlags = false;
specialCaseCmp = true;
}
else if (lval == -1)
{
/* op1 != -1 */
- ins = INS_AND;
+ ins = INS_AND;
useIncToSetFlags = true;
specialCaseCmp = true;
}
@@ -3761,7 +3593,7 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
addrReg = genMakeRvalueAddressable(op1, 0, RegSet::KEEP_REG, false, true);
- regMaskTP tmpMask = regSet.rsRegMaskCanGrab();
+ regMaskTP tmpMask = regSet.rsRegMaskCanGrab();
insFlags flags = useIncToSetFlags ? INS_FLAGS_DONT_CARE : INS_FLAGS_SET;
if (op1->gtFlags & GTF_REG_VAL)
@@ -3797,9 +3629,8 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
/* Set the flags using INS_AND | INS_OR */
inst_RV_TT(ins, rTmp, op1, 4, EA_4BYTE, flags);
}
-
}
- else // op1 is not GTF_REG_VAL
+ else // op1 is not GTF_REG_VAL
{
rTmp = regSet.rsGrabReg(tmpMask);
@@ -3841,19 +3672,18 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
/* Compare the high part first */
- int ival = (int)(lval >> 32);
+ int ival = (int)(lval >> 32);
/* Comparing a register against 0 is easier */
- if (!ival && (op1->gtFlags & GTF_REG_VAL)
- && (rTmp = genRegPairHi(op1->gtRegPair)) != REG_STK )
+ if (!ival && (op1->gtFlags & GTF_REG_VAL) && (rTmp = genRegPairHi(op1->gtRegPair)) != REG_STK)
{
/* Generate 'test rTmp, rTmp' */
instGen_Compare_Reg_To_Zero(emitTypeSize(op1->TypeGet()), rTmp); // set flags
}
else
{
- if (!(op1->gtFlags & GTF_REG_VAL) && (op1->gtOper == GT_CNS_LNG))
+ if (!(op1->gtFlags & GTF_REG_VAL) && (op1->gtOper == GT_CNS_LNG))
{
/* Special case: comparison of two constants */
// Needed as gtFoldExpr() doesn't fold longs
@@ -3881,14 +3711,14 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
if (bFPTransition)
{
jumpTrue = genTransitionBlockStackFP(&compCurFPState, compiler->compCurBB, jumpTrue);
- }
+ }
#endif
/* Generate the appropriate jumps */
- if (cond->gtFlags & GTF_UNSIGNED)
- genJccLongHi(cmp, jumpTrue, jumpFalse, true);
+ if (cond->gtFlags & GTF_UNSIGNED)
+ genJccLongHi(cmp, jumpTrue, jumpFalse, true);
else
- genJccLongHi(cmp, jumpTrue, jumpFalse);
+ genJccLongHi(cmp, jumpTrue, jumpFalse);
/* Compare the low part second */
@@ -3896,21 +3726,20 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
/* Comparing a register against 0 is easier */
- if (!ival && (op1->gtFlags & GTF_REG_VAL)
- && (rTmp = genRegPairLo(op1->gtRegPair)) != REG_STK)
+ if (!ival && (op1->gtFlags & GTF_REG_VAL) && (rTmp = genRegPairLo(op1->gtRegPair)) != REG_STK)
{
/* Generate 'test rTmp, rTmp' */
instGen_Compare_Reg_To_Zero(emitTypeSize(op1->TypeGet()), rTmp); // set flags
}
else
{
- if (!(op1->gtFlags & GTF_REG_VAL) && (op1->gtOper == GT_CNS_LNG))
+ if (!(op1->gtFlags & GTF_REG_VAL) && (op1->gtOper == GT_CNS_LNG))
{
/* Special case: comparison of two constants */
// Needed as gtFoldExpr() doesn't fold longs
noway_assert(addrReg == 0);
- int op1_loword = (int) op1->gtLngCon.gtLconVal;
+ int op1_loword = (int)op1->gtLngCon.gtLconVal;
/* get the constant operand into a register */
rTmp = genGetRegSetToIcon(op1_loword);
@@ -3981,7 +3810,7 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
inst_RV_TT(INS_cmp, genRegPairHi(regPair), op2, 4);
- if (cond->gtFlags & GTF_UNSIGNED)
+ if (cond->gtFlags & GTF_UNSIGNED)
genJccLongHi(cmp, jumpTrue, jumpFalse, true);
else
genJccLongHi(cmp, jumpTrue, jumpFalse);
@@ -4006,7 +3835,6 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
}
}
-
/*****************************************************************************
* gen_fcomp_FN, gen_fcomp_FS_TT, gen_fcompp_FS
* Called by genCondJumpFlt() to generate the fcomp instruction appropriate
@@ -4029,7 +3857,7 @@ void CodeGen::genCondJumpLng(GenTreePtr cond,
* already placed its result in the EFLAGS register.
*/
-bool CodeGen::genUse_fcomip()
+bool CodeGen::genUse_fcomip()
{
return compiler->opts.compUseFCOMI;
}
@@ -4045,72 +3873,70 @@ bool CodeGen::genUse_fcomip()
* Returns the flags the following jump/set instruction should use.
*/
-emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
+emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
{
noway_assert(cond->OperIsCompare());
noway_assert(varTypeIsI(genActualType(cond->gtOp.gtOp1->gtType)));
- GenTreePtr op1 = cond->gtOp.gtOp1;
- GenTreePtr op2 = cond->gtOp.gtOp2;
- genTreeOps cmp = cond->OperGet();
+ GenTreePtr op1 = cond->gtOp.gtOp1;
+ GenTreePtr op2 = cond->gtOp.gtOp2;
+ genTreeOps cmp = cond->OperGet();
- if (cond->gtFlags & GTF_REVERSE_OPS)
+ if (cond->gtFlags & GTF_REVERSE_OPS)
{
/* Don't forget to modify the condition as well */
cond->gtOp.gtOp1 = op2;
cond->gtOp.gtOp2 = op1;
- cond->SetOper (GenTree::SwapRelop(cmp));
- cond->gtFlags &= ~GTF_REVERSE_OPS;
+ cond->SetOper(GenTree::SwapRelop(cmp));
+ cond->gtFlags &= ~GTF_REVERSE_OPS;
/* Get hold of the new values */
- cmp = cond->OperGet();
- op1 = cond->gtOp.gtOp1;
- op2 = cond->gtOp.gtOp2;
+ cmp = cond->OperGet();
+ op1 = cond->gtOp.gtOp1;
+ op2 = cond->gtOp.gtOp2;
}
// Note that op1's type may get bashed. So save it early
- var_types op1Type = op1->TypeGet();
- bool unsignedCmp = (cond->gtFlags & GTF_UNSIGNED) != 0;
- emitAttr size = EA_UNKNOWN;
+ var_types op1Type = op1->TypeGet();
+ bool unsignedCmp = (cond->gtFlags & GTF_UNSIGNED) != 0;
+ emitAttr size = EA_UNKNOWN;
+
+ regMaskTP regNeed;
+ regMaskTP addrReg1 = RBM_NONE;
+ regMaskTP addrReg2 = RBM_NONE;
+ emitJumpKind jumpKind = EJ_COUNT; // Initialize with an invalid value
- regMaskTP regNeed;
- regMaskTP addrReg1 = RBM_NONE;
- regMaskTP addrReg2 = RBM_NONE;
- emitJumpKind jumpKind = EJ_COUNT; // Initialize with an invalid value
+ bool byteCmp;
+ bool shortCmp;
- bool byteCmp;
- bool shortCmp;
-
regMaskTP newLiveMask;
regNumber op1Reg;
/* Are we comparing against a constant? */
- if (op2->IsCnsIntOrI())
+ if (op2->IsCnsIntOrI())
{
- ssize_t ival = op2->gtIntConCommon.IconValue();
+ ssize_t ival = op2->gtIntConCommon.IconValue();
/* unsigned less than comparisons with 1 ('< 1' )
should be transformed into '== 0' to potentially
suppress a tst instruction.
*/
- if ((ival == 1) && (cmp == GT_LT) && unsignedCmp)
+ if ((ival == 1) && (cmp == GT_LT) && unsignedCmp)
{
op2->gtIntCon.gtIconVal = ival = 0;
- cond->gtOper = cmp = GT_EQ;
+ cond->gtOper = cmp = GT_EQ;
}
/* Comparisons against 0 can be easier */
- if (ival == 0)
+ if (ival == 0)
{
// if we can safely change the comparison to unsigned we do so
- if (!unsignedCmp &&
- varTypeIsSmall(op1->TypeGet()) &&
- varTypeIsUnsigned(op1->TypeGet()))
+ if (!unsignedCmp && varTypeIsSmall(op1->TypeGet()) && varTypeIsUnsigned(op1->TypeGet()))
{
unsignedCmp = true;
}
@@ -4128,18 +3954,18 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
/* Is this a simple zero/non-zero test? */
- if (cmp == GT_EQ || cmp == GT_NE)
+ if (cmp == GT_EQ || cmp == GT_NE)
{
/* Is the operand an "AND" operation? */
- if (op1->gtOper == GT_AND)
+ if (op1->gtOper == GT_AND)
{
- GenTreePtr an1 = op1->gtOp.gtOp1;
- GenTreePtr an2 = op1->gtOp.gtOp2;
+ GenTreePtr an1 = op1->gtOp.gtOp1;
+ GenTreePtr an2 = op1->gtOp.gtOp2;
/* Check for the case "expr & icon" */
- if (an2->IsIntCnsFitsInI32())
+ if (an2->IsIntCnsFitsInI32())
{
int iVal = (int)an2->gtIntCon.gtIconVal;
@@ -4147,18 +3973,18 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
switch (an1->gtType)
{
- case TYP_BOOL:
- case TYP_BYTE:
- if (iVal & 0xffffff00)
- goto NO_TEST_FOR_AND;
- break;
- case TYP_CHAR:
- case TYP_SHORT:
- if (iVal & 0xffff0000)
- goto NO_TEST_FOR_AND;
- break;
- default:
- break;
+ case TYP_BOOL:
+ case TYP_BYTE:
+ if (iVal & 0xffffff00)
+ goto NO_TEST_FOR_AND;
+ break;
+ case TYP_CHAR:
+ case TYP_SHORT:
+ if (iVal & 0xffff0000)
+ goto NO_TEST_FOR_AND;
+ break;
+ default:
+ break;
}
if (an1->IsCnsIntOrI())
@@ -4177,7 +4003,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
genComputeAddressable(an1, addrReg1, RegSet::KEEP_REG, RBM_NONE, RegSet::KEEP_REG);
if (arm_Valid_Imm_For_Alu(iVal))
{
- inst_RV_IV(INS_TEST,an1->gtRegNum, iVal, emitActualTypeSize(an1->gtType));
+ inst_RV_IV(INS_TEST, an1->gtRegNum, iVal, emitActualTypeSize(an1->gtType));
}
else
{
@@ -4195,7 +4021,8 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
// Check to see if we can use a smaller immediate.
if ((an1->gtFlags & GTF_REG_VAL) && ((iVal & 0x0000FFFF) == iVal))
{
- var_types testType = (var_types)(((iVal & 0x000000FF) == iVal) ? TYP_UBYTE : TYP_USHORT);
+ var_types testType =
+ (var_types)(((iVal & 0x000000FF) == iVal) ? TYP_UBYTE : TYP_USHORT);
#if CPU_HAS_BYTE_REGS
// if we don't have byte-able register, switch to the 2-byte form
if ((testType == TYP_UBYTE) && !(genRegMask(an1->gtRegNum) & RBM_BYTE_REGS))
@@ -4213,12 +4040,9 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
}
}
-
goto DONE;
- NO_TEST_FOR_AND:
- ;
-
+ NO_TEST_FOR_AND:;
}
// TODO: Check for other cases that can generate 'test',
@@ -4234,7 +4058,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
{
/*
Unsigned comparison to 0. Using this table:
-
+
----------------------------------------------------
| Comparison | Flags Checked | Instruction Used |
----------------------------------------------------
@@ -4250,34 +4074,58 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
----------------------------------------------------
| > 0 | ZF = 0 | jne |
----------------------------------------------------
- */
+ */
switch (cmp)
{
#ifdef _TARGET_ARM_
- case GT_EQ: jumpKind = EJ_eq; break;
- case GT_NE: jumpKind = EJ_ne; break;
- case GT_LT: jumpKind = EJ_NONE; break;
- case GT_LE: jumpKind = EJ_eq; break;
- case GT_GE: jumpKind = EJ_NONE; break;
- case GT_GT: jumpKind = EJ_ne; break;
+ case GT_EQ:
+ jumpKind = EJ_eq;
+ break;
+ case GT_NE:
+ jumpKind = EJ_ne;
+ break;
+ case GT_LT:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_LE:
+ jumpKind = EJ_eq;
+ break;
+ case GT_GE:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_GT:
+ jumpKind = EJ_ne;
+ break;
#elif defined(_TARGET_X86_)
- case GT_EQ: jumpKind = EJ_je; break;
- case GT_NE: jumpKind = EJ_jne; break;
- case GT_LT: jumpKind = EJ_NONE; break;
- case GT_LE: jumpKind = EJ_je; break;
- case GT_GE: jumpKind = EJ_NONE; break;
- case GT_GT: jumpKind = EJ_jne; break;
+ case GT_EQ:
+ jumpKind = EJ_je;
+ break;
+ case GT_NE:
+ jumpKind = EJ_jne;
+ break;
+ case GT_LT:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_LE:
+ jumpKind = EJ_je;
+ break;
+ case GT_GE:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_GT:
+ jumpKind = EJ_jne;
+ break;
#endif // TARGET
- default:
- noway_assert(!"Unexpected comparison OpCode");
- break;
+ default:
+ noway_assert(!"Unexpected comparison OpCode");
+ break;
}
}
else
{
/*
Signed comparison to 0. Using this table:
-
+
-----------------------------------------------------
| Comparison | Flags Checked | Instruction Used |
-----------------------------------------------------
@@ -4298,31 +4146,55 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
switch (cmp)
{
#ifdef _TARGET_ARM_
- case GT_EQ: jumpKind = EJ_eq; break;
- case GT_NE: jumpKind = EJ_ne; break;
- case GT_LT: jumpKind = EJ_mi; break;
- case GT_LE: jumpKind = EJ_NONE; break;
- case GT_GE: jumpKind = EJ_pl; break;
- case GT_GT: jumpKind = EJ_NONE; break;
+ case GT_EQ:
+ jumpKind = EJ_eq;
+ break;
+ case GT_NE:
+ jumpKind = EJ_ne;
+ break;
+ case GT_LT:
+ jumpKind = EJ_mi;
+ break;
+ case GT_LE:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_GE:
+ jumpKind = EJ_pl;
+ break;
+ case GT_GT:
+ jumpKind = EJ_NONE;
+ break;
#elif defined(_TARGET_X86_)
- case GT_EQ: jumpKind = EJ_je; break;
- case GT_NE: jumpKind = EJ_jne; break;
- case GT_LT: jumpKind = EJ_js; break;
- case GT_LE: jumpKind = EJ_NONE; break;
- case GT_GE: jumpKind = EJ_jns; break;
- case GT_GT: jumpKind = EJ_NONE; break;
+ case GT_EQ:
+ jumpKind = EJ_je;
+ break;
+ case GT_NE:
+ jumpKind = EJ_jne;
+ break;
+ case GT_LT:
+ jumpKind = EJ_js;
+ break;
+ case GT_LE:
+ jumpKind = EJ_NONE;
+ break;
+ case GT_GE:
+ jumpKind = EJ_jns;
+ break;
+ case GT_GT:
+ jumpKind = EJ_NONE;
+ break;
#endif // TARGET
- default:
- noway_assert(!"Unexpected comparison OpCode");
- break;
+ default:
+ noway_assert(!"Unexpected comparison OpCode");
+ break;
}
assert(jumpKind == genJumpKindForOper(cmp, CK_LOGICAL));
}
- assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value above
+ assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value above
/* Is the value a simple local variable? */
- if (op1->gtOper == GT_LCL_VAR)
+ if (op1->gtOper == GT_LCL_VAR)
{
/* Is the flags register set to the value? */
@@ -4352,7 +4224,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
}
}
- if (flags)
+ if (flags)
{
if (jumpKind != EJ_NONE)
{
@@ -4362,9 +4234,9 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
/* Is the value in a register? */
- if (op1->gtFlags & GTF_REG_VAL)
+ if (op1->gtFlags & GTF_REG_VAL)
{
- regNumber reg = op1->gtRegNum;
+ regNumber reg = op1->gtRegNum;
/* With a 'test' we can do any signed test or any test for equality */
@@ -4396,7 +4268,6 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
{
bool smallOk = true;
-
/* make sure that constant is not out of op1's range
if it is, we need to perform an int with int comparison
and therefore, we set smallOk to false, so op1 gets loaded
@@ -4407,45 +4278,63 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
* comparison, we can use smallOk. But we don't know which
* flags will be needed. This probably doesn't happen often.
*/
- var_types gtType=op1->TypeGet();
+ var_types gtType = op1->TypeGet();
switch (gtType)
{
- case TYP_BYTE: if (ival != (signed char )ival) smallOk = false; break;
- case TYP_BOOL:
- case TYP_UBYTE: if (ival != (unsigned char )ival) smallOk = false; break;
+ case TYP_BYTE:
+ if (ival != (signed char)ival)
+ smallOk = false;
+ break;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ if (ival != (unsigned char)ival)
+ smallOk = false;
+ break;
- case TYP_SHORT: if (ival != (signed short)ival) smallOk = false; break;
- case TYP_CHAR: if (ival != (unsigned short)ival) smallOk = false; break;
+ case TYP_SHORT:
+ if (ival != (signed short)ival)
+ smallOk = false;
+ break;
+ case TYP_CHAR:
+ if (ival != (unsigned short)ival)
+ smallOk = false;
+ break;
#ifdef _TARGET_64BIT_
- case TYP_INT: if (!FitsIn<INT32>(ival)) smallOk = false; break;
- case TYP_UINT: if (!FitsIn<UINT32>(ival)) smallOk = false; break;
+ case TYP_INT:
+ if (!FitsIn<INT32>(ival))
+ smallOk = false;
+ break;
+ case TYP_UINT:
+ if (!FitsIn<UINT32>(ival))
+ smallOk = false;
+ break;
#endif // _TARGET_64BIT_
- default: break;
+ default:
+ break;
}
- if (smallOk && // constant is in op1's range
- !unsignedCmp && // signed comparison
- varTypeIsSmall(gtType) && // smalltype var
- varTypeIsUnsigned(gtType)) // unsigned type
+ if (smallOk && // constant is in op1's range
+ !unsignedCmp && // signed comparison
+ varTypeIsSmall(gtType) && // smalltype var
+ varTypeIsUnsigned(gtType)) // unsigned type
{
unsignedCmp = true;
}
/* Make the comparand addressable */
addrReg1 = genMakeRvalueAddressable(op1, RBM_NONE, RegSet::KEEP_REG, false, smallOk);
-
}
-// #if defined(DEBUGGING_SUPPORT)
+ // #if defined(DEBUGGING_SUPPORT)
/* Special case: comparison of two constants */
// Needed if Importer doesn't call gtFoldExpr()
- if (!(op1->gtFlags & GTF_REG_VAL) && (op1->IsCnsIntOrI()))
+ if (!(op1->gtFlags & GTF_REG_VAL) && (op1->IsCnsIntOrI()))
{
// noway_assert(compiler->opts.MinOpts() || compiler->opts.compDbgCode);
@@ -4458,7 +4347,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
addrReg1 = genRegMask(op1->gtRegNum);
}
-// #endif
+ // #endif
/* Compare the operand against the constant */
@@ -4489,7 +4378,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
noway_assert(op1->gtOper != GT_CNS_INT);
- if (op2->gtOper == GT_LCL_VAR)
+ if (op2->gtOper == GT_LCL_VAR)
genMarkLclVar(op2);
assert(((addrReg1 | addrReg2) & regSet.rsMaskUsed) == (addrReg1 | addrReg2));
@@ -4497,7 +4386,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
/* Are we comparing against a register? */
- if (op2->gtFlags & GTF_REG_VAL)
+ if (op2->gtFlags & GTF_REG_VAL)
{
/* Make the comparands addressable and mark as used */
@@ -4506,7 +4395,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
/* Is the size of the comparison byte/char/short ? */
- if (varTypeIsSmall(op1->TypeGet()))
+ if (varTypeIsSmall(op1->TypeGet()))
{
/* Is op2 sitting in an appropriate register? */
@@ -4544,7 +4433,7 @@ emitJumpKind CodeGen::genCondSetFlags(GenTreePtr cond)
goto DONE;
-NO_SMALL_CMP:
+ NO_SMALL_CMP:
// op1 has been made addressable and is marked as in use
// op2 is un-generated
@@ -4555,7 +4444,7 @@ NO_SMALL_CMP:
regNumber reg1 = regSet.rsPickReg();
noway_assert(varTypeIsSmall(op1->TypeGet()));
- instruction ins = ins_Move_Extend(op1->TypeGet(), (op1->gtFlags & GTF_REG_VAL)!=0);
+ instruction ins = ins_Move_Extend(op1->TypeGet(), (op1->gtFlags & GTF_REG_VAL) != 0);
// regSet.rsPickReg can cause one of the trees within this address mode to get spilled
// so we need to make sure it is still valid. Note that at this point, reg1 is
@@ -4582,8 +4471,8 @@ NO_SMALL_CMP:
}
// We come here if op2 is not enregistered or not in a "good" register.
-
- assert(addrReg1 == 0);
+
+ assert(addrReg1 == 0);
// Determine what registers go live between op1 and op2
newLiveMask = genNewLiveRegMask(op1, op2);
@@ -4600,12 +4489,12 @@ NO_SMALL_CMP:
#if CPU_HAS_BYTE_REGS
// if necessary setup regNeed to select just the byte-able registers
- if (byteCmp)
+ if (byteCmp)
regNeed = regSet.rsNarrowHint(RBM_BYTE_REGS, regNeed);
#endif // CPU_HAS_BYTE_REGS
// Compute the first comparand into some register, regNeed here is simply a hint because RegSet::ANY_REG is used.
- //
+ //
genComputeReg(op1, regNeed, RegSet::ANY_REG, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
@@ -4617,7 +4506,7 @@ NO_SMALL_CMP:
#if CPU_HAS_BYTE_REGS
// if necessary setup regNeed to select just the byte-able registers
- if (byteCmp)
+ if (byteCmp)
regNeed &= RBM_BYTE_REGS;
#endif // CPU_HAS_BYTE_REGS
@@ -4627,13 +4516,12 @@ NO_SMALL_CMP:
// avoid selecting op2 reserved regs, as using them will force a spill temp to be used.
regNeed = regSet.rsMustExclude(regNeed, op2->gtRsvdRegs);
- // Did we end up in an acceptable register?
+ // Did we end up in an acceptable register?
// and do we have an acceptable free register available to grab?
//
- if ( ((genRegMask(op1Reg) & regNeed) == 0) &&
- ((regSet.rsRegMaskFree() & regNeed) != 0) )
+ if (((genRegMask(op1Reg) & regNeed) == 0) && ((regSet.rsRegMaskFree() & regNeed) != 0))
{
- // Grab an acceptable register
+ // Grab an acceptable register
regNumber newReg = regSet.rsGrabReg(regNeed);
noway_assert(op1Reg != newReg);
@@ -4662,7 +4550,7 @@ NO_SMALL_CMP:
/* Mark the register as 'used' */
regSet.rsMarkRegUsed(op1);
-
+
addrReg1 = genRegMask(op1Reg);
assert(((addrReg1 | addrReg2) & regSet.rsMaskUsed) == (addrReg1 | addrReg2));
@@ -4682,7 +4570,7 @@ DONE_OP1:
#if CPU_HAS_BYTE_REGS
// if necessary setup regNeed to select just the byte-able registers
- if (byteCmp)
+ if (byteCmp)
regNeed &= RBM_BYTE_REGS;
#endif // CPU_HAS_BYTE_REGS
@@ -4738,7 +4626,7 @@ DONE_OP1:
inst_RV_TT(INS_cmp, op1->gtRegNum, op2, 0, size);
DONE:
-
+
jumpKind = genJumpKindForOper(cmp, unsignedCmp ? CK_UNSIGNED : CK_SIGNED);
DONE_FLAGS: // We have determined what jumpKind to use
@@ -4752,7 +4640,7 @@ DONE_FLAGS: // We have determined what jumpKind to use
genDoneAddressable(op1, addrReg1, RegSet::KEEP_REG);
genDoneAddressable(op2, addrReg2, RegSet::KEEP_REG);
- noway_assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value
+ noway_assert(jumpKind != EJ_COUNT); // Ensure that it was assigned a valid value
return jumpKind;
}
@@ -4765,20 +4653,16 @@ DONE_FLAGS: // We have determined what jumpKind to use
* the given relational operator yields 'true'.
*/
-void CodeGen::genCondJump(GenTreePtr cond,
- BasicBlock *destTrue,
- BasicBlock *destFalse,
- bool bStackFPFixup
- )
+void CodeGen::genCondJump(GenTreePtr cond, BasicBlock* destTrue, BasicBlock* destFalse, bool bStackFPFixup)
{
- BasicBlock * jumpTrue;
- BasicBlock * jumpFalse;
+ BasicBlock* jumpTrue;
+ BasicBlock* jumpFalse;
- GenTreePtr op1 = cond->gtOp.gtOp1;
- GenTreePtr op2 = cond->gtOp.gtOp2;
- genTreeOps cmp = cond->OperGet();
+ GenTreePtr op1 = cond->gtOp.gtOp1;
+ GenTreePtr op2 = cond->gtOp.gtOp2;
+ genTreeOps cmp = cond->OperGet();
- if (destTrue)
+ if (destTrue)
{
jumpTrue = destTrue;
jumpFalse = destFalse;
@@ -4794,80 +4678,78 @@ void CodeGen::genCondJump(GenTreePtr cond,
noway_assert(cond->OperIsCompare());
/* Make sure the more expensive operand is 'op1' */
- noway_assert( (cond->gtFlags & GTF_REVERSE_OPS) == 0 );
+ noway_assert((cond->gtFlags & GTF_REVERSE_OPS) == 0);
- if (cond->gtFlags & GTF_REVERSE_OPS) // TODO: note that this is now dead code, since the above is a noway_assert()
+ if (cond->gtFlags & GTF_REVERSE_OPS) // TODO: note that this is now dead code, since the above is a noway_assert()
{
/* Don't forget to modify the condition as well */
cond->gtOp.gtOp1 = op2;
cond->gtOp.gtOp2 = op1;
- cond->SetOper (GenTree::SwapRelop(cmp));
- cond->gtFlags &= ~GTF_REVERSE_OPS;
+ cond->SetOper(GenTree::SwapRelop(cmp));
+ cond->gtFlags &= ~GTF_REVERSE_OPS;
/* Get hold of the new values */
- cmp = cond->OperGet();
- op1 = cond->gtOp.gtOp1;
- op2 = cond->gtOp.gtOp2;
+ cmp = cond->OperGet();
+ op1 = cond->gtOp.gtOp1;
+ op2 = cond->gtOp.gtOp2;
}
/* What is the type of the operand? */
switch (genActualType(op1->gtType))
{
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
- emitJumpKind jumpKind;
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
+ emitJumpKind jumpKind;
- // Check if we can use the currently set flags. Else set them
+ // Check if we can use the currently set flags. Else set them
- jumpKind = genCondSetFlags(cond);
+ jumpKind = genCondSetFlags(cond);
#if FEATURE_STACK_FP_X87
- if (bStackFPFixup)
- {
- genCondJmpInsStackFP(jumpKind,
- jumpTrue,
- jumpFalse);
- }
- else
+ if (bStackFPFixup)
+ {
+ genCondJmpInsStackFP(jumpKind, jumpTrue, jumpFalse);
+ }
+ else
#endif
- {
- /* Generate the conditional jump */
- inst_JMP(jumpKind, jumpTrue);
- }
+ {
+ /* Generate the conditional jump */
+ inst_JMP(jumpKind, jumpTrue);
+ }
- return;
+ return;
- case TYP_LONG:
+ case TYP_LONG:
#if FEATURE_STACK_FP_X87
- if (bStackFPFixup)
- {
- genCondJumpLngStackFP(cond, jumpTrue, jumpFalse);
- }
- else
+ if (bStackFPFixup)
+ {
+ genCondJumpLngStackFP(cond, jumpTrue, jumpFalse);
+ }
+ else
#endif
- {
- genCondJumpLng(cond, jumpTrue, jumpFalse);
- }
- return;
+ {
+ genCondJumpLng(cond, jumpTrue, jumpFalse);
+ }
+ return;
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#if FEATURE_STACK_FP_X87
- genCondJumpFltStackFP(cond, jumpTrue, jumpFalse, bStackFPFixup);
+ genCondJumpFltStackFP(cond, jumpTrue, jumpFalse, bStackFPFixup);
#else
- genCondJumpFloat(cond, jumpTrue, jumpFalse);
+ genCondJumpFloat(cond, jumpTrue, jumpFalse);
#endif
- return;
+ return;
- default:
+ default:
#ifdef DEBUG
- compiler->gtDispTree(cond);
+ compiler->gtDispTree(cond);
#endif
- unreached(); // unexpected/unsupported 'jtrue' operands type
+ unreached(); // unexpected/unsupported 'jtrue' operands type
}
}
@@ -4877,7 +4759,7 @@ void CodeGen::genCondJump(GenTreePtr cond,
#ifdef DEBUG
-void CodeGen::genStressRegs(GenTreePtr tree)
+void CodeGen::genStressRegs(GenTreePtr tree)
{
if (regSet.rsStressRegs() < 2)
return;
@@ -4893,13 +4775,14 @@ void CodeGen::genStressRegs(GenTreePtr tree)
for (regNum = REG_FIRST, regBit = 1; regNum < REG_COUNT; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if ((spillRegs & regBit) && (regSet.rsUsedTree[regNum] != NULL) && (genTypeSize(regSet.rsUsedTree[regNum]->TypeGet()) > 0))
+ if ((spillRegs & regBit) && (regSet.rsUsedTree[regNum] != NULL) &&
+ (genTypeSize(regSet.rsUsedTree[regNum]->TypeGet()) > 0))
{
regSet.rsSpillReg(regNum);
spillRegs &= regSet.rsMaskUsed;
- if (!spillRegs)
+ if (!spillRegs)
break;
}
}
@@ -4913,8 +4796,8 @@ void CodeGen::genStressRegs(GenTreePtr tree)
/* It is sometimes reasonable to expect that calling genCodeForTree()
on certain trees won't spill anything */
- if ((compiler->compCurStmt == compiler->compCurBB->bbTreeList) &&
- (compiler->compCurBB->bbCatchTyp) && handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp))
+ if ((compiler->compCurStmt == compiler->compCurBB->bbTreeList) && (compiler->compCurBB->bbCatchTyp) &&
+ handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp))
{
trashRegs &= ~(RBM_EXCEPTION_OBJECT);
}
@@ -4929,8 +4812,8 @@ void CodeGen::genStressRegs(GenTreePtr tree)
if (tree->gtType == TYP_INT && tree->OperIsSimple())
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
if (op1 && (op1->gtFlags & GTF_REG_VAL))
trashRegs &= ~genRegMask(op1->gtRegNum);
if (op2 && (op2->gtFlags & GTF_REG_VAL))
@@ -4941,7 +4824,7 @@ void CodeGen::genStressRegs(GenTreePtr tree)
{
if (compiler->info.compCallUnmanaged)
{
- LclVarDsc * varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
if (varDsc->lvRegister)
trashRegs &= ~genRegMask(varDsc->lvRegNum);
}
@@ -4955,7 +4838,7 @@ void CodeGen::genStressRegs(GenTreePtr tree)
// This is obviously false for ARM, but this function is never called.
for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg))
{
- regMaskTP regMask = genRegMask(reg);
+ regMaskTP regMask = genRegMask(reg);
if (regSet.rsRegsModified(regMask & trashRegs))
genSetRegToIcon(reg, 0);
@@ -4964,22 +4847,19 @@ void CodeGen::genStressRegs(GenTreePtr tree)
#endif // DEBUG
-
/*****************************************************************************
*
* Generate code for a GTK_CONST tree
*/
-void CodeGen::genCodeForTreeConst(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeConst(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
noway_assert(tree->IsCnsIntOrI());
- ssize_t ival = tree->gtIntConCommon.IconValue();
- regMaskTP needReg = destReg;
- regNumber reg;
- bool needReloc = compiler->opts.compReloc && tree->IsIconHandle();
+ ssize_t ival = tree->gtIntConCommon.IconValue();
+ regMaskTP needReg = destReg;
+ regNumber reg;
+ bool needReloc = compiler->opts.compReloc && tree->IsIconHandle();
#if REDUNDANT_LOAD
@@ -5011,14 +4891,14 @@ void CodeGen::genCodeForTreeConst(GenTreePtr tree,
/* Is the constant already in register? If so, use this register */
reg = regTracker.rsIconIsInReg(ival);
- if (reg != REG_NA)
+ if (reg != REG_NA)
goto REG_LOADED;
}
}
#endif // REDUNDANT_LOAD
- reg = regSet.rsPickReg(needReg, bestReg);
+ reg = regSet.rsPickReg(needReg, bestReg);
/* If the constant is a handle, we need a reloc to be applied to it */
@@ -5034,10 +4914,10 @@ void CodeGen::genCodeForTreeConst(GenTreePtr tree,
REG_LOADED:
-#ifdef DEBUG
+#ifdef DEBUG
/* Special case: GT_CNS_INT - Restore the current live set if it was changed */
- if (!genTempLiveChg)
+ if (!genTempLiveChg)
{
VarSetOps::Assign(compiler, compiler->compCurLife, genTempOldLife);
genTempLiveChg = true;
@@ -5048,216 +4928,214 @@ REG_LOADED:
genCodeForTree_DONE(tree, reg);
}
-
/*****************************************************************************
*
* Generate code for a GTK_LEAF tree
*/
-void CodeGen::genCodeForTreeLeaf(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeLeaf(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- genTreeOps oper = tree->OperGet();
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP regs = regSet.rsMaskUsed;
- regMaskTP needReg = destReg;
- size_t size;
+ genTreeOps oper = tree->OperGet();
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP regs = regSet.rsMaskUsed;
+ regMaskTP needReg = destReg;
+ size_t size;
noway_assert(tree->OperKind() & GTK_LEAF);
switch (oper)
{
- case GT_REG_VAR:
- NO_WAY("GT_REG_VAR should have been caught above");
- break;
+ case GT_REG_VAR:
+ NO_WAY("GT_REG_VAR should have been caught above");
+ break;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
- /* Does the variable live in a register? */
+ /* Does the variable live in a register? */
- if (genMarkLclVar(tree))
- {
- genCodeForTree_REG_VAR1(tree);
- return;
- }
+ if (genMarkLclVar(tree))
+ {
+ genCodeForTree_REG_VAR1(tree);
+ return;
+ }
#if REDUNDANT_LOAD
- /* Is the local variable already in register? */
+ /* Is the local variable already in register? */
- reg = findStkLclInReg(tree->gtLclVarCommon.gtLclNum);
+ reg = findStkLclInReg(tree->gtLclVarCommon.gtLclNum);
- if (reg != REG_NA)
- {
- /* Use the register the variable happens to be in */
- regMaskTP regMask = genRegMask(reg);
+ if (reg != REG_NA)
+ {
+ /* Use the register the variable happens to be in */
+ regMaskTP regMask = genRegMask(reg);
- // If the register that it was in isn't one of the needRegs
- // then try to move it into a needReg register
+ // If the register that it was in isn't one of the needRegs
+ // then try to move it into a needReg register
- if (((regMask & needReg) == 0) && (regSet.rsRegMaskCanGrab() & needReg))
- {
- regNumber rg2 = reg;
- reg = regSet.rsPickReg(needReg, bestReg);
- if (reg != rg2)
+ if (((regMask & needReg) == 0) && (regSet.rsRegMaskCanGrab() & needReg))
{
- regMask = genRegMask(reg);
- inst_RV_RV(INS_mov, reg, rg2, tree->TypeGet());
+ regNumber rg2 = reg;
+ reg = regSet.rsPickReg(needReg, bestReg);
+ if (reg != rg2)
+ {
+ regMask = genRegMask(reg);
+ inst_RV_RV(INS_mov, reg, rg2, tree->TypeGet());
+ }
}
- }
- gcInfo.gcMarkRegPtrVal (reg, tree->TypeGet());
- regTracker.rsTrackRegLclVar(reg, tree->gtLclVarCommon.gtLclNum);
- break;
- }
+ gcInfo.gcMarkRegPtrVal(reg, tree->TypeGet());
+ regTracker.rsTrackRegLclVar(reg, tree->gtLclVarCommon.gtLclNum);
+ break;
+ }
#endif
- goto MEM_LEAF;
+ goto MEM_LEAF;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
- // to worry about it being enregistered.
- noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
- goto MEM_LEAF;
+ // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
+ // to worry about it being enregistered.
+ noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
+ goto MEM_LEAF;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
- MEM_LEAF:
+ MEM_LEAF:
- /* Pick a register for the value */
+ /* Pick a register for the value */
- reg = regSet.rsPickReg(needReg, bestReg);
+ reg = regSet.rsPickReg(needReg, bestReg);
- /* Load the variable into the register */
+ /* Load the variable into the register */
- size = genTypeSize(tree->gtType);
+ size = genTypeSize(tree->gtType);
- if (size < EA_4BYTE)
- {
- instruction ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL)!=0);
- inst_RV_TT(ins, reg, tree, 0);
+ if (size < EA_4BYTE)
+ {
+ instruction ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL) != 0);
+ inst_RV_TT(ins, reg, tree, 0);
- /* We've now "promoted" the tree-node to TYP_INT */
+ /* We've now "promoted" the tree-node to TYP_INT */
- tree->gtType = TYP_INT;
- }
- else
- {
- inst_RV_TT(INS_mov, reg, tree, 0);
- }
+ tree->gtType = TYP_INT;
+ }
+ else
+ {
+ inst_RV_TT(INS_mov, reg, tree, 0);
+ }
- regTracker.rsTrackRegTrash(reg);
+ regTracker.rsTrackRegTrash(reg);
- gcInfo.gcMarkRegPtrVal (reg, tree->TypeGet());
+ gcInfo.gcMarkRegPtrVal(reg, tree->TypeGet());
- switch (oper)
- {
- case GT_CLS_VAR:
- regTracker.rsTrackRegClsVar(reg, tree);
- break;
- case GT_LCL_VAR:
- regTracker.rsTrackRegLclVar(reg, tree->gtLclVarCommon.gtLclNum);
- break;
- case GT_LCL_FLD:
- break;
- default: noway_assert(!"Unexpected oper");
- }
+ switch (oper)
+ {
+ case GT_CLS_VAR:
+ regTracker.rsTrackRegClsVar(reg, tree);
+ break;
+ case GT_LCL_VAR:
+ regTracker.rsTrackRegLclVar(reg, tree->gtLclVarCommon.gtLclNum);
+ break;
+ case GT_LCL_FLD:
+ break;
+ default:
+ noway_assert(!"Unexpected oper");
+ }
#ifdef _TARGET_ARM_
- if (tree->gtFlags & GTF_IND_VOLATILE)
- {
- // Emit a memory barrier instruction after the load
- instGen_MemoryBarrier();
- }
+ if (tree->gtFlags & GTF_IND_VOLATILE)
+ {
+ // Emit a memory barrier instruction after the load
+ instGen_MemoryBarrier();
+ }
#endif
- break;
+ break;
- case GT_NO_OP:
- // The VM does certain things with actual NOP instructions
- // so generate something small that has no effect, but isn't
- // a typical NOP
- if (tree->gtFlags & GTF_NO_OP_NO)
- {
+ case GT_NO_OP:
+ // The VM does certain things with actual NOP instructions
+ // so generate something small that has no effect, but isn't
+ // a typical NOP
+ if (tree->gtFlags & GTF_NO_OP_NO)
+ {
#ifdef _TARGET_XARCH_
- // The VM expects 0x66 0x90 for a 2-byte NOP, not 0x90 0x90
- instGen(INS_nop);
- instGen(INS_nop);
-#elif defined (_TARGET_ARM_)
- // The VM isn't checking yet, when it does, hopefully it will
- // get fooled by the wider variant.
- instGen(INS_nopw);
+ // The VM expects 0x66 0x90 for a 2-byte NOP, not 0x90 0x90
+ instGen(INS_nop);
+ instGen(INS_nop);
+#elif defined(_TARGET_ARM_)
+ // The VM isn't checking yet, when it does, hopefully it will
+ // get fooled by the wider variant.
+ instGen(INS_nopw);
#else
- NYI("Non-nop NO_OP");
-#endif
- }
- else
- {
- instGen(INS_nop);
- }
- reg = REG_STK;
- break;
+ NYI("Non-nop NO_OP");
+#endif
+ }
+ else
+ {
+ instGen(INS_nop);
+ }
+ reg = REG_STK;
+ break;
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
-
- /* Have to clear the shadowSP of the nesting level which
- encloses the finally */
-
- unsigned finallyNesting;
- finallyNesting = (unsigned)tree->gtVal.gtVal1;
- noway_assert(tree->gtVal.gtVal1 < compiler->compHndBBtabCount); //assert we didn't truncate with the cast above.
- noway_assert(finallyNesting < compiler->compHndBBtabCount);
-
- // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs;
- PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > sizeof(void*)); //below doesn't underflow.
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
-
- unsigned curNestingSlotOffs;
- curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*));
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0,
- compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
- reg = REG_STK;
- break;
+ case GT_END_LFIN:
+
+ /* Have to clear the shadowSP of the nesting level which
+ encloses the finally */
+
+ unsigned finallyNesting;
+ finallyNesting = (unsigned)tree->gtVal.gtVal1;
+ noway_assert(tree->gtVal.gtVal1 <
+ compiler->compHndBBtabCount); // assert we didn't truncate with the cast above.
+ noway_assert(finallyNesting < compiler->compHndBBtabCount);
+
+ // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
+ unsigned filterEndOffsetSlotOffs;
+ PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
+ sizeof(void*)); // below doesn't underflow.
+ filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
+
+ unsigned curNestingSlotOffs;
+ curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*));
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
+ reg = REG_STK;
+ break;
#endif // !FEATURE_EH_FUNCLETS
- case GT_CATCH_ARG:
+ case GT_CATCH_ARG:
- noway_assert(compiler->compCurBB->bbCatchTyp && handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
+ noway_assert(compiler->compCurBB->bbCatchTyp && handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
- /* Catch arguments get passed in a register. genCodeForBBlist()
- would have marked it as holding a GC object, but not used. */
+ /* Catch arguments get passed in a register. genCodeForBBlist()
+ would have marked it as holding a GC object, but not used. */
- noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
- reg = REG_EXCEPTION_OBJECT;
- break;
+ noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
+ reg = REG_EXCEPTION_OBJECT;
+ break;
- case GT_JMP:
- genCodeForTreeLeaf_GT_JMP(tree);
- return;
+ case GT_JMP:
+ genCodeForTreeLeaf_GT_JMP(tree);
+ return;
- case GT_MEMORYBARRIER:
- // Emit the memory barrier instruction
- instGen_MemoryBarrier();
- reg = REG_STK;
- break;
+ case GT_MEMORYBARRIER:
+ // Emit the memory barrier instruction
+ instGen_MemoryBarrier();
+ reg = REG_STK;
+ break;
- default:
+ default:
#ifdef DEBUG
- compiler->gtDispTree(tree);
+ compiler->gtDispTree(tree);
#endif
- noway_assert(!"unexpected leaf");
+ noway_assert(!"unexpected leaf");
}
noway_assert(reg != DUMMY_INIT(REG_CORRUPT));
genCodeForTree_DONE(tree, reg);
}
-
-GenTreePtr CodeGen::genCodeForCommaTree (GenTreePtr tree)
+GenTreePtr CodeGen::genCodeForCommaTree(GenTreePtr tree)
{
while (tree->OperGet() == GT_COMMA)
{
@@ -5275,7 +5153,7 @@ GenTreePtr CodeGen::genCodeForCommaTree (GenTreePtr tree)
* Generate code for the a leaf node of type GT_JMP
*/
-void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
+void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
{
noway_assert(compiler->compCurBB->bbFlags & BBF_HAS_JMP);
@@ -5283,7 +5161,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
if (compiler->compIsProfilerHookNeeded())
{
/* fire the event at the call site */
- unsigned saveStackLvl2 = genStackLevel;
+ unsigned saveStackLvl2 = genStackLevel;
compiler->info.compProfilerCallback = true;
@@ -5293,11 +5171,14 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
//
regMaskTP byrefPushedRegs;
regMaskTP norefPushedRegs;
- regMaskTP pushedArgRegs = genPushRegs(RBM_ARG_REGS & (regSet.rsMaskUsed|regSet.rsMaskVars|regSet.rsMaskLock), &byrefPushedRegs, &norefPushedRegs);
+ regMaskTP pushedArgRegs =
+ genPushRegs(RBM_ARG_REGS & (regSet.rsMaskUsed | regSet.rsMaskVars | regSet.rsMaskLock), &byrefPushedRegs,
+ &norefPushedRegs);
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA, (ssize_t)compiler->compProfilerMethHnd);
+ getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA,
+ (ssize_t)compiler->compProfilerMethHnd);
}
else
{
@@ -5306,8 +5187,8 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
genSinglePush();
genEmitHelperCall(CORINFO_HELP_PROF_FCN_TAILCALL,
- sizeof(int) * 1, // argSize
- EA_UNKNOWN); // retSize
+ sizeof(int) * 1, // argSize
+ EA_UNKNOWN); // retSize
//
// Adjust the number of stack slots used by this managed method if necessary.
@@ -5320,7 +5201,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
genPopRegs(pushedArgRegs, byrefPushedRegs, norefPushedRegs);
#elif _TARGET_ARM_
// For GT_JMP nodes we have added r0 as a used register, when under arm profiler, to evaluate GT_JMP node.
- // To emit tailcall callback we need r0 to pass profiler handle. Any free register could be used as call target.
+ // To emit tailcall callback we need r0 to pass profiler handle. Any free register could be used as call target.
regNumber argReg = regSet.rsGrabReg(RBM_PROFILER_JMP_USED);
noway_assert(argReg == REG_PROFILER_JMP_ARG);
regSet.rsLockReg(RBM_PROFILER_JMP_USED);
@@ -5331,18 +5212,18 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
regTracker.rsTrackRegTrash(argReg);
}
else
- {
+ {
instGen_Set_Reg_To_Imm(EA_4BYTE, argReg, (ssize_t)compiler->compProfilerMethHnd);
}
genEmitHelperCall(CORINFO_HELP_PROF_FCN_TAILCALL,
- 0, // argSize
- EA_UNKNOWN); // retSize
+ 0, // argSize
+ EA_UNKNOWN); // retSize
regSet.rsUnlockReg(RBM_PROFILER_JMP_USED);
-#else
+#else
NYI("Pushing the profilerHandle & caller's sp for the profiler callout and locking 'arguments'");
-#endif //_TARGET_X86_
+#endif //_TARGET_X86_
/* Restore the stack level */
genStackLevel = saveStackLvl2;
@@ -5366,18 +5247,16 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
// arguments, which is safe because RegAlloc prevents that by
// not enregistering any RegArgs when a JMP opcode is used.
- if (compiler->info.compArgsCount == 0)
+ if (compiler->info.compArgsCount == 0)
{
return;
}
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
// First move any enregistered stack arguments back to the stack
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->info.compArgsCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg || !varDsc->lvRegister)
@@ -5392,31 +5271,20 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
{
/* long - at least the low half must be enregistered */
- getEmitter()->emitIns_S_R(ins_Store(TYP_INT),
- EA_4BYTE,
- varDsc->lvRegNum,
- varNum,
- 0);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, varDsc->lvRegNum, varNum, 0);
/* Is the upper half also enregistered? */
if (varDsc->lvOtherReg != REG_STK)
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_INT),
- EA_4BYTE,
- varDsc->lvOtherReg,
- varNum,
- sizeof(int));
+ getEmitter()->emitIns_S_R(ins_Store(TYP_INT), EA_4BYTE, varDsc->lvOtherReg, varNum, sizeof(int));
}
}
else
#endif // _TARGET_64BIT_
{
- getEmitter()->emitIns_S_R(ins_Store(varDsc->TypeGet()),
- emitTypeSize(varDsc->TypeGet()),
- varDsc->lvRegNum,
- varNum,
- 0);
+ getEmitter()->emitIns_S_R(ins_Store(varDsc->TypeGet()), emitTypeSize(varDsc->TypeGet()), varDsc->lvRegNum,
+ varNum, 0);
}
}
@@ -5425,13 +5293,11 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
#endif
// Next move any un-enregistered register arguments back to their register
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->info.compArgsCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->info.compArgsCount; varNum++, varDsc++)
{
/* Is this variable a register arg? */
- if (!varDsc->lvIsRegArg)
+ if (!varDsc->lvIsRegArg)
continue;
/* Register argument */
@@ -5447,20 +5313,13 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
{
/* long - at least the low half must be enregistered */
- getEmitter()->emitIns_R_S(ins_Load(TYP_INT),
- EA_4BYTE,
- varDsc->lvArgReg,
- varNum,
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE, varDsc->lvArgReg, varNum, 0);
regTracker.rsTrackRegTrash(varDsc->lvArgReg);
/* Also assume the upper half also enregistered */
- getEmitter()->emitIns_R_S(ins_Load(TYP_INT),
- EA_4BYTE,
- genRegArgNext(varDsc->lvArgReg),
- varNum,
- sizeof(int));
+ getEmitter()->emitIns_R_S(ins_Load(TYP_INT), EA_4BYTE, genRegArgNext(varDsc->lvArgReg), varNum,
+ sizeof(int));
regTracker.rsTrackRegTrash(genRegArgNext(varDsc->lvArgReg));
#ifdef _TARGET_ARM_
@@ -5471,7 +5330,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
else
#endif // _TARGET_64BIT_
#ifdef _TARGET_ARM_
- if (varDsc->lvIsHfaRegArg())
+ if (varDsc->lvIsHfaRegArg())
{
const var_types elemType = varDsc->GetHfaType();
const instruction loadOp = ins_Load(elemType);
@@ -5481,11 +5340,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)size)
{
- getEmitter()->emitIns_R_S(loadOp,
- size,
- argReg,
- varNum,
- ofs);
+ getEmitter()->emitIns_R_S(loadOp, size, argReg, varNum, ofs);
assert(genIsValidFloatReg(argReg)); // we don't use register tracking for FP
argReg = regNextOfType(argReg, elemType);
}
@@ -5500,11 +5355,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)size)
{
- getEmitter()->emitIns_R_S(loadOp,
- size,
- argReg,
- varNum,
- ofs);
+ getEmitter()->emitIns_R_S(loadOp, size, argReg, varNum, ofs);
regTracker.rsTrackRegTrash(argReg);
fixedArgsMask |= genRegMask(argReg);
@@ -5515,9 +5366,9 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
else
#endif //_TARGET_ARM_
{
- var_types loadType = varDsc->TypeGet();
- regNumber argReg = varDsc->lvArgReg; // incoming arg register
- bool twoParts = false;
+ var_types loadType = varDsc->TypeGet();
+ regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ bool twoParts = false;
if (compiler->info.compIsVarArgs && isFloatRegType(loadType))
{
@@ -5529,11 +5380,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
assert(isValidIntArgReg(argReg));
}
- getEmitter()->emitIns_R_S(ins_Load(loadType),
- emitTypeSize(loadType),
- argReg,
- varNum,
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
regTracker.rsTrackRegTrash(argReg);
#ifdef _TARGET_ARM_
@@ -5544,11 +5391,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
argReg = genRegArgNext(argReg);
assert(isValidIntArgReg(argReg));
- getEmitter()->emitIns_R_S(ins_Load(loadType),
- emitTypeSize(loadType),
- argReg,
- varNum,
- REGSIZE_BYTES);
+ getEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, REGSIZE_BYTES);
regTracker.rsTrackRegTrash(argReg);
#ifdef _TARGET_ARM_
@@ -5587,7 +5430,7 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
// Skip the 'vararg cookie.'
regDeclArgs = REG_NEXT(regDeclArgs);
- // Also add offset for the vararg cookie.
+ // Also add offset for the vararg cookie.
int offset = REGSIZE_BYTES;
// Load all the variable arguments in registers back to their registers.
@@ -5610,12 +5453,11 @@ void CodeGen::genCodeForTreeLeaf_GT_JMP(GenTreePtr tree)
* passed in pCallBackData. If the variable is assigned to, return
* Compiler::WALK_ABORT. Otherwise return Compiler::WALK_CONTINUE.
*/
-Compiler::fgWalkResult CodeGen::fgIsVarAssignedTo(GenTreePtr *pTree, Compiler::fgWalkData *data)
+Compiler::fgWalkResult CodeGen::fgIsVarAssignedTo(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
GenTreePtr tree = *pTree;
- if ((tree->OperIsAssignment()) &&
- (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR) &&
- (tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum == (unsigned) (size_t)data->pCallbackData))
+ if ((tree->OperIsAssignment()) && (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR) &&
+ (tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum == (unsigned)(size_t)data->pCallbackData))
{
return Compiler::WALK_ABORT;
}
@@ -5623,11 +5465,10 @@ Compiler::fgWalkResult CodeGen::fgIsVarAssignedTo(GenTreePtr *pTree, Compiler::f
return Compiler::WALK_CONTINUE;
}
-
regNumber CodeGen::genIsEnregisteredIntVariable(GenTreePtr tree)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
if (tree->gtOper == GT_LCL_VAR)
{
@@ -5637,7 +5478,7 @@ regNumber CodeGen::genIsEnregisteredIntVariable(GenTreePtr tree)
noway_assert(varNum < compiler->lvaCount);
varDsc = compiler->lvaTable + varNum;
- if (!varDsc->IsFloatRegType() && varDsc->lvRegister)
+ if (!varDsc->IsFloatRegType() && varDsc->lvRegister)
{
return varDsc->lvRegNum;
}
@@ -5647,12 +5488,12 @@ regNumber CodeGen::genIsEnregisteredIntVariable(GenTreePtr tree)
}
// inline
-void CodeGen::unspillLiveness(genLivenessSet * ls)
+void CodeGen::unspillLiveness(genLivenessSet* ls)
{
// Only try to unspill the registers that are missing from the currentLiveRegs
//
- regMaskTP cannotSpillMask = ls->maskVars | ls->gcRefRegs | ls->byRefRegs;
- regMaskTP currentLiveRegs = regSet.rsMaskVars | gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur;
+ regMaskTP cannotSpillMask = ls->maskVars | ls->gcRefRegs | ls->byRefRegs;
+ regMaskTP currentLiveRegs = regSet.rsMaskVars | gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur;
cannotSpillMask &= ~currentLiveRegs;
// Typically this will always be true and we will return
@@ -5667,8 +5508,8 @@ void CodeGen::unspillLiveness(genLivenessSet * ls)
if ((cannotSpillMask & genRegMask(reg)) == 0)
continue;
- RegSet::SpillDsc * spill = regSet.rsSpillDesc[reg];
-
+ RegSet::SpillDsc* spill = regSet.rsSpillDesc[reg];
+
// Was it spilled, if not then skip it.
//
if (!spill)
@@ -5685,23 +5526,21 @@ void CodeGen::unspillLiveness(genLivenessSet * ls)
* Generate code for a qmark colon
*/
-void CodeGen::genCodeForQmark(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForQmark(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- regNumber reg;
- regMaskTP regs = regSet.rsMaskUsed;
- regMaskTP needReg = destReg;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ regNumber reg;
+ regMaskTP regs = regSet.rsMaskUsed;
+ regMaskTP needReg = destReg;
noway_assert(compiler->compQmarkUsed);
noway_assert(tree->gtOper == GT_QMARK);
noway_assert(op1->OperIsCompare());
noway_assert(op2->gtOper == GT_COLON);
- GenTreePtr thenNode = op2->AsColon()->ThenNode();
- GenTreePtr elseNode = op2->AsColon()->ElseNode();
+ GenTreePtr thenNode = op2->AsColon()->ThenNode();
+ GenTreePtr elseNode = op2->AsColon()->ElseNode();
/* If elseNode is a Nop node you must reverse the
thenNode and elseNode prior to reaching here!
@@ -5736,12 +5575,12 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
or the 'lab_done' label
*/
- BasicBlock * lab_true;
- BasicBlock * lab_false;
- BasicBlock * lab_done;
+ BasicBlock* lab_true;
+ BasicBlock* lab_false;
+ BasicBlock* lab_done;
- genLivenessSet entryLiveness;
- genLivenessSet exitLiveness;
+ genLivenessSet entryLiveness;
+ genLivenessSet exitLiveness;
lab_true = genCreateTempLabel();
lab_false = genCreateTempLabel();
@@ -5754,7 +5593,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
#ifdef DEBUG
regMaskTP spillMask = regSet.rsMaskUsedFloat | regSet.rsMaskLockedFloat | regSet.rsMaskRegVarFloat;
- // spillMask should be the whole FP stack
+ // spillMask should be the whole FP stack
noway_assert(compCurFPState.m_uStackSize == genCountBits(spillMask));
#endif
@@ -5778,7 +5617,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
before spilling regSet.rsMaskUsed */
regMaskTP rsAdditionalCandidates = regSet.rsMaskUsed & regSet.rsMaskVars;
- regMaskTP rsAdditional = RBM_NONE;
+ regMaskTP rsAdditional = RBM_NONE;
// For each multi-use of an enregistered variable, we need to determine if
// it can get spilled inside the qmark colon. This can only happen if
@@ -5798,7 +5637,8 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
{
Compiler::printTreeID(tree);
printf(": Qmark-Colon additional spilling candidates are ");
- dspRegMask(rsAdditionalCandidates); printf("\n");
+ dspRegMask(rsAdditionalCandidates);
+ printf("\n");
}
#endif
@@ -5807,17 +5647,17 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
VARSET_TP VARSET_INIT(compiler, rsLiveNow, compiler->compCurLife);
VARSET_TP VARSET_INIT_NOCOPY(rsLiveAfter, compiler->fgUpdateLiveSet(compiler->compCurLife,
- compiler->compCurLifeTree,
- tree));
+ compiler->compCurLifeTree, tree));
- VARSET_TP VARSET_INIT_NOCOPY(regVarLiveNow, VarSetOps::Intersection(compiler, compiler->raRegVarsMask, rsLiveNow));
+ VARSET_TP VARSET_INIT_NOCOPY(regVarLiveNow,
+ VarSetOps::Intersection(compiler, compiler->raRegVarsMask, rsLiveNow));
VARSET_ITER_INIT(compiler, iter, regVarLiveNow, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
// Find the variable in compiler->lvaTable
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
#if !FEATURE_FP_REGALLOC
if (varDsc->IsFloatRegType())
@@ -5837,7 +5677,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
regBit = genRegMask(varDsc->lvRegNum);
// For longs we may need to spill both regs
- if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
+ if (isRegPairType(varDsc->lvType) && varDsc->lvOtherReg != REG_STK)
regBit |= genRegMask(varDsc->lvOtherReg);
}
@@ -5855,7 +5695,8 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
if (!(op2->gtFlags & GTF_ASG))
continue;
- if (compiler->fgWalkTreePre(&op2, CodeGen::fgIsVarAssignedTo, (void *)(size_t)varNum) == Compiler::WALK_ABORT)
+ if (compiler->fgWalkTreePre(&op2, CodeGen::fgIsVarAssignedTo, (void*)(size_t)varNum) ==
+ Compiler::WALK_ABORT)
{
// Variable was assigned to, so we need to spill it.
@@ -5865,7 +5706,8 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
{
Compiler::printTreeID(tree);
printf(": Qmark-Colon candidate ");
- dspRegMask(regBit); printf("\n");
+ dspRegMask(regBit);
+ printf("\n");
printf(" is assigned to inside colon and will be spilled\n");
}
#endif
@@ -5881,7 +5723,8 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
{
Compiler::printTreeID(tree);
printf(": Qmark-Colon candidate ");
- dspRegMask(regBit); printf("\n");
+ dspRegMask(regBit);
+ printf("\n");
printf(" is alive at end of colon and will be spilled\n");
}
#endif
@@ -5893,10 +5736,10 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
{
Compiler::printTreeID(tree);
printf(": Qmark-Colon approved additional spilling candidates are ");
- dspRegMask(rsAdditional); printf("\n");
+ dspRegMask(rsAdditional);
+ printf("\n");
}
#endif
-
}
noway_assert((rsAdditionalCandidates | rsAdditional) == rsAdditionalCandidates);
@@ -5904,7 +5747,8 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
// We only need to spill registers that are modified by the qmark tree, as specified in tree->gtUsedRegs.
// If we ever need to use and spill a register while generating code that is not in tree->gtUsedRegs,
// we will have unbalanced spills and generate bad code.
- regMaskTP rsSpill = ((regSet.rsMaskUsed & ~(regSet.rsMaskVars|regSet.rsMaskResvd)) | rsAdditional) & tree->gtUsedRegs;
+ regMaskTP rsSpill =
+ ((regSet.rsMaskUsed & ~(regSet.rsMaskVars | regSet.rsMaskResvd)) | rsAdditional) & tree->gtUsedRegs;
#ifdef DEBUG
// Under register stress, regSet.rsPickReg() ignores the recommended registers and always picks
@@ -5930,7 +5774,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
// regSet.rsSpillRegs() will assert if we try to spill any enregistered variables.
// So, pretend there aren't any, and spill them anyway. This will only occur
// if rsAdditional is non-empty.
- regMaskTP rsTemp = regSet.rsMaskVars;
+ regMaskTP rsTemp = regSet.rsMaskVars;
regSet.ClearMaskVars();
regSet.rsSpillRegs(rsSpill);
@@ -5944,11 +5788,9 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
}
}
-
// Generate the conditional jump but without doing any StackFP fixups.
genCondJump(op1, lab_true, lab_false, false);
-
/* Save the current liveness, register status, and GC pointers */
/* This is the liveness information upon entry */
/* to both the then and else parts of the qmark */
@@ -5969,14 +5811,14 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
#if FEATURE_STACK_FP_X87
// Store fpstate
- QmarkStateStackFP tempFPState;
- bool bHasFPUState = !compCurFPState.IsEmpty();
+ QmarkStateStackFP tempFPState;
+ bool bHasFPUState = !compCurFPState.IsEmpty();
genQMarkBeforeElseStackFP(&tempFPState, tree->gtQmark.gtElseLiveSet, op1->gtNext);
#endif
/* Does the operator yield a value? */
- if (tree->gtType == TYP_VOID)
+ if (tree->gtType == TYP_VOID)
{
/* Generate the code for the else part of the qmark */
@@ -5993,18 +5835,18 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
/* Is there a 'then' part? */
- if (thenNode->IsNothingNode())
+ if (thenNode->IsNothingNode())
{
#if FEATURE_STACK_FP_X87
if (bHasFPUState)
{
// We had FP state on entry just after the condition, so potentially, the else
// node may have to do transition work.
- lab_done = genCreateTempLabel();
+ lab_done = genCreateTempLabel();
/* Generate jmp lab_done */
- inst_JMP (EJ_jmp, lab_done);
+ inst_JMP(EJ_jmp, lab_done);
/* No 'then' - just generate the 'lab_true' label */
@@ -6024,11 +5866,11 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
}
else
{
- lab_done = genCreateTempLabel();
+ lab_done = genCreateTempLabel();
/* Generate jmp lab_done */
- inst_JMP (EJ_jmp, lab_done);
+ inst_JMP(EJ_jmp, lab_done);
/* Restore the liveness that we had upon entry of the then part of the qmark */
@@ -6101,13 +5943,13 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
saveLiveness(&exitLiveness);
/* Generate jmp lab_done */
- lab_done = genCreateTempLabel();
+ lab_done = genCreateTempLabel();
#ifdef DEBUG
// We will use this to assert we don't emit instructions if we decide not to
// do the jmp
unsigned emittedInstructions = getEmitter()->emitInsCount;
- bool bSkippedJump = false;
+ bool bSkippedJump = false;
#endif
// We would like to know here if the else node is really going to generate
// code, as if it isn't, we're generating here a jump to the next instruction.
@@ -6118,7 +5960,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
#if FEATURE_STACK_FP_X87
!bHasFPUState && // If there is no FPU state, we won't need an x87 transition
#endif
- genIsEnregisteredIntVariable(thenNode) == reg)
+ genIsEnregisteredIntVariable(thenNode) == reg)
{
#ifdef DEBUG
// For the moment, fix this easy case (enregistered else node), which
@@ -6129,7 +5971,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
}
else
{
- inst_JMP (EJ_jmp, lab_done);
+ inst_JMP(EJ_jmp, lab_done);
}
/* Restore the liveness that we had upon entry of the else part of the qmark */
@@ -6171,8 +6013,7 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
#endif
#ifdef DEBUG
- noway_assert(bSkippedJump == false ||
- getEmitter()->emitInsCount == emittedInstructions);
+ noway_assert(bSkippedJump == false || getEmitter()->emitInsCount == emittedInstructions);
#endif
/* Define the "result" label */
@@ -6187,11 +6028,9 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
genUpdateLife(tree);
genMarkTreeInReg(tree, reg);
-
}
}
-
/*****************************************************************************
*
* Generate code for a qmark colon using the CMOV instruction. It's OK
@@ -6199,19 +6038,17 @@ void CodeGen::genCodeForQmark(GenTreePtr tree,
* genCodeForQmark to implement it using branches).
*/
-bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
#ifdef _TARGET_XARCH_
- GenTreePtr cond = tree->gtOp.gtOp1;
- GenTreePtr colon = tree->gtOp.gtOp2;
+ GenTreePtr cond = tree->gtOp.gtOp1;
+ GenTreePtr colon = tree->gtOp.gtOp2;
// Warning: this naming of the local vars is backwards!
- GenTreePtr thenNode = colon->gtOp.gtOp1;
- GenTreePtr elseNode = colon->gtOp.gtOp2;
- GenTreePtr alwaysNode, predicateNode;
- regNumber reg;
- regMaskTP needReg = destReg;
+ GenTreePtr thenNode = colon->gtOp.gtOp1;
+ GenTreePtr elseNode = colon->gtOp.gtOp2;
+ GenTreePtr alwaysNode, predicateNode;
+ regNumber reg;
+ regMaskTP needReg = destReg;
noway_assert(tree->gtOper == GT_QMARK);
noway_assert(cond->OperIsCompare());
@@ -6233,24 +6070,21 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
/* thenNode better be a local or a constant */
- if ((thenNode->OperGet() != GT_CNS_INT) &&
- (thenNode->OperGet() != GT_LCL_VAR))
+ if ((thenNode->OperGet() != GT_CNS_INT) && (thenNode->OperGet() != GT_LCL_VAR))
{
return false;
}
/* elseNode better be a local or a constant or nothing */
- if ((elseNode->OperGet() != GT_CNS_INT) &&
- (elseNode->OperGet() != GT_LCL_VAR))
+ if ((elseNode->OperGet() != GT_CNS_INT) && (elseNode->OperGet() != GT_LCL_VAR))
{
return false;
}
/* can't handle two constants here */
- if ((thenNode->OperGet() == GT_CNS_INT) &&
- (elseNode->OperGet() == GT_CNS_INT))
+ if ((thenNode->OperGet() == GT_CNS_INT) && (elseNode->OperGet() == GT_CNS_INT))
{
return false;
}
@@ -6273,7 +6107,7 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
alwaysNode = elseNode;
predicateNode = thenNode;
- reverseCond = true;
+ reverseCond = true;
}
else
{
@@ -6287,7 +6121,7 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
// the value of the variable in the predicate node).
// This assert is just paranoid (we've already asserted it above)
- assert (predicateNode->OperGet() == GT_LCL_VAR);
+ assert(predicateNode->OperGet() == GT_LCL_VAR);
if ((predicateNode->gtFlags & GTF_VAR_DEATH) != 0)
{
return false;
@@ -6321,7 +6155,7 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
// Record the chosen register
- reg = alwaysNode->gtRegNum;
+ reg = alwaysNode->gtRegNum;
}
regNumber regPredicate = REG_NA;
@@ -6343,28 +6177,9 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
}
#endif
- const static
- instruction EJtoCMOV[] =
- {
- INS_nop,
- INS_nop,
- INS_cmovo,
- INS_cmovno,
- INS_cmovb,
- INS_cmovae,
- INS_cmove,
- INS_cmovne,
- INS_cmovbe,
- INS_cmova,
- INS_cmovs,
- INS_cmovns,
- INS_cmovpe,
- INS_cmovpo,
- INS_cmovl,
- INS_cmovge,
- INS_cmovle,
- INS_cmovg
- };
+ const static instruction EJtoCMOV[] = {INS_nop, INS_nop, INS_cmovo, INS_cmovno, INS_cmovb, INS_cmovae,
+ INS_cmove, INS_cmovne, INS_cmovbe, INS_cmova, INS_cmovs, INS_cmovns,
+ INS_cmovpe, INS_cmovpo, INS_cmovl, INS_cmovge, INS_cmovle, INS_cmovg};
noway_assert((unsigned)jumpKind < (sizeof(EJtoCMOV) / sizeof(EJtoCMOV[0])));
instruction cmov_ins = EJtoCMOV[jumpKind];
@@ -6395,31 +6210,30 @@ bool CodeGen::genCodeForQmarkWithCMOV(GenTreePtr tree,
#endif
}
-
#ifdef _TARGET_XARCH_
-void CodeGen::genCodeForMultEAX(GenTreePtr tree)
+void CodeGen::genCodeForMultEAX(GenTreePtr tree)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
- bool ovfl = tree->gtOverflow();
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP addrReg;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
+ bool ovfl = tree->gtOverflow();
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP addrReg;
noway_assert(tree->OperGet() == GT_MUL);
/* We'll evaluate 'op1' first */
-
+
regMaskTP op1Mask = regSet.rsMustExclude(RBM_EAX, op2->gtRsvdRegs);
-
+
/* Generate the op1 into op1Mask and hold on to it. freeOnly=true */
-
+
genComputeReg(op1, op1Mask, RegSet::ANY_REG, RegSet::KEEP_REG, true);
noway_assert(op1->gtFlags & GTF_REG_VAL);
// If op2 is a constant we need to load the constant into a register
if (op2->OperKind() & GTK_CONST)
{
- genCodeForTree(op2, RBM_EDX); // since EDX is going to be spilled anyway
+ genCodeForTree(op2, RBM_EDX); // since EDX is going to be spilled anyway
noway_assert(op2->gtFlags & GTF_REG_VAL);
regSet.rsMarkRegUsed(op2);
addrReg = genRegMask(op2->gtRegNum);
@@ -6440,17 +6254,16 @@ void CodeGen::genCodeForMultEAX(GenTreePtr tree)
// For 8 bit operations, we need to pick byte addressable registers
- if (ovfl && varTypeIsByte(tree->TypeGet()) &&
- !(genRegMask(reg) & RBM_BYTE_REGS))
+ if (ovfl && varTypeIsByte(tree->TypeGet()) && !(genRegMask(reg) & RBM_BYTE_REGS))
{
- regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
+ regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
inst_RV_RV(INS_mov, byteReg, reg);
-
+
regTracker.rsTrackRegTrash(byteReg);
- regSet.rsMarkRegFree (genRegMask(reg));
-
- reg = byteReg;
+ regSet.rsMarkRegFree(genRegMask(reg));
+
+ reg = byteReg;
op1->gtRegNum = reg;
regSet.rsMarkRegUsed(op1);
}
@@ -6474,7 +6287,7 @@ void CodeGen::genCodeForMultEAX(GenTreePtr tree)
/* Compute the new value */
noway_assert(op1->gtRegNum == REG_EAX);
-
+
// Make sure Edx is free (unless used by op2 itself)
bool op2Released = false;
@@ -6490,25 +6303,25 @@ void CodeGen::genCodeForMultEAX(GenTreePtr tree)
regSet.rsGrabReg(RBM_EDX);
op2Released = true;
-
+
/* keepReg==RegSet::FREE_REG so that the other multi-used trees
don't get marked as unspilled as well. */
regSet.rsUnspillReg(op2, RBM_EDX, RegSet::FREE_REG);
}
- instruction ins;
+ instruction ins;
if (tree->gtFlags & GTF_UNSIGNED)
ins = INS_mulEAX;
else
ins = INS_imulEAX;
-
+
inst_TT(ins, op2, 0, 0, opSize);
-
+
/* Both EAX and EDX are now trashed */
-
- regTracker.rsTrackRegTrash (REG_EAX);
- regTracker.rsTrackRegTrash (REG_EDX);
+
+ regTracker.rsTrackRegTrash(REG_EAX);
+ regTracker.rsTrackRegTrash(REG_EDX);
/* Free up anything that was tied up by the operand */
@@ -6529,18 +6342,16 @@ void CodeGen::genCodeForMultEAX(GenTreePtr tree)
if (ovfl)
genCheckOverflow(tree);
-
+
genCodeForTree_DONE(tree, reg);
}
#endif // _TARGET_XARCH_
#ifdef _TARGET_ARM_
-void CodeGen::genCodeForMult64(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForMult64(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
noway_assert(tree->OperGet() == GT_MUL);
@@ -6571,7 +6382,7 @@ void CodeGen::genCodeForMult64(GenTreePtr tree,
regHi = regSet.rsPickReg(destReg & ~genRegMask(regLo));
regSet.rsUnlockReg(genRegMask(regLo));
- instruction ins;
+ instruction ins;
if (tree->gtFlags & GTF_UNSIGNED)
ins = INS_umull;
else
@@ -6588,7 +6399,7 @@ void CodeGen::genCodeForMult64(GenTreePtr tree,
// Keep regLo [and regHi] locked while generating code for the gtOverflow() case
//
regSet.rsLockReg(genRegMask(regLo));
-
+
if (tree->gtFlags & GTF_MUL_64RSLT)
regSet.rsLockReg(genRegMask(regHi));
@@ -6614,7 +6425,7 @@ void CodeGen::genCodeForMult64(GenTreePtr tree,
regSet.rsUnlockReg(genRegMask(regHi));
}
- genUpdateLife(tree);
+ genUpdateLife(tree);
if (tree->gtFlags & GTF_MUL_64RSLT)
genMarkTreeInRegPair(tree, gen2regs2pair(regLo, regHi));
@@ -6623,16 +6434,13 @@ void CodeGen::genCodeForMult64(GenTreePtr tree,
}
#endif // _TARGET_ARM_
-
/*****************************************************************************
*
* Generate code for a simple binary arithmetic or logical operator.
* Handles GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_MUL.
*/
-void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
instruction ins;
genTreeOps oper = tree->OperGet();
@@ -6648,25 +6456,43 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
bool isArith;
switch (oper)
{
- case GT_AND: ins = INS_AND; isArith = false; break;
- case GT_OR : ins = INS_OR ; isArith = false; break;
- case GT_XOR: ins = INS_XOR; isArith = false; break;
- case GT_ADD: ins = INS_add; isArith = true; break;
- case GT_SUB: ins = INS_sub; isArith = true; break;
- case GT_MUL: ins = INS_MUL; isArith = true; break;
- default:
- unreached();
+ case GT_AND:
+ ins = INS_AND;
+ isArith = false;
+ break;
+ case GT_OR:
+ ins = INS_OR;
+ isArith = false;
+ break;
+ case GT_XOR:
+ ins = INS_XOR;
+ isArith = false;
+ break;
+ case GT_ADD:
+ ins = INS_add;
+ isArith = true;
+ break;
+ case GT_SUB:
+ ins = INS_sub;
+ isArith = true;
+ break;
+ case GT_MUL:
+ ins = INS_MUL;
+ isArith = true;
+ break;
+ default:
+ unreached();
}
#ifdef _TARGET_XARCH_
/* Special case: try to use the 3 operand form "imul reg, op1, icon" */
- if ((oper == GT_MUL) &&
- op2->IsIntCnsFitsInI32() && // op2 is a constant that fits in a sign-extended 32-bit immediate
- !op1->IsCnsIntOrI() && // op1 is not a constant
- (tree->gtFlags & GTF_MUL_64RSLT) == 0 && // tree not marked with MUL_64RSLT
- !varTypeIsByte(treeType) && // No encoding for say "imul al,al,imm"
- !tree->gtOverflow() ) // 3 operand imul doesn't set flags
+ if ((oper == GT_MUL) &&
+ op2->IsIntCnsFitsInI32() && // op2 is a constant that fits in a sign-extended 32-bit immediate
+ !op1->IsCnsIntOrI() && // op1 is not a constant
+ (tree->gtFlags & GTF_MUL_64RSLT) == 0 && // tree not marked with MUL_64RSLT
+ !varTypeIsByte(treeType) && // No encoding for say "imul al,al,imm"
+ !tree->gtOverflow()) // 3 operand imul doesn't set flags
{
/* Make the first operand addressable */
@@ -6674,7 +6500,7 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
/* Grab a register for the target */
- reg = regSet.rsPickReg(needReg, bestReg);
+ reg = regSet.rsPickReg(needReg, bestReg);
#if LEA_AVAILABLE
/* Compute the value into the target: reg=op1*op2_icon */
@@ -6690,7 +6516,8 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
inst_RV_TT(INS_mov, reg, op1, 0, emitActualTypeSize(op1->TypeGet()));
regSrc = reg;
}
- getEmitter()->emitIns_R_ARX(INS_lea, emitActualTypeSize(treeType), reg, regSrc, regSrc, (op2->gtIntCon.gtIconVal & -2), 0);
+ getEmitter()->emitIns_R_ARX(INS_lea, emitActualTypeSize(treeType), reg, regSrc, regSrc,
+ (op2->gtIntCon.gtIconVal & -2), 0);
}
else
#endif // LEA_AVAILABLE
@@ -6724,137 +6551,132 @@ void CodeGen::genCodeForTreeSmpBinArithLogOp(GenTreePtr tree,
/* We record the accurate (small) types in trees only we need to
* check for overflow. Otherwise we record genActualType()
*/
-
+
noway_assert(ovfl || (treeType == genActualType(treeType)));
-
+
#if LEA_AVAILABLE
-
+
/* Can we use an 'lea' to compute the result?
Can't use 'lea' for overflow as it doesn't set flags
Can't use 'lea' unless we have at least two free registers */
{
- bool bEnoughRegs = genRegCountForLiveIntEnregVars(tree) + // Live intreg variables
- genCountBits(regSet.rsMaskLock) + // Locked registers
- 2 // We will need two regisers
- <= genCountBits(RBM_ALLINT & ~(doubleAlignOrFramePointerUsed() ? RBM_FPBASE : 0));
-
+ bool bEnoughRegs = genRegCountForLiveIntEnregVars(tree) + // Live intreg variables
+ genCountBits(regSet.rsMaskLock) + // Locked registers
+ 2 // We will need two regisers
+ <= genCountBits(RBM_ALLINT & ~(doubleAlignOrFramePointerUsed() ? RBM_FPBASE : 0));
+
regMaskTP regs = RBM_NONE; // OUT argument
- if (!ovfl &&
- bEnoughRegs &&
- genMakeIndAddrMode(tree, NULL, true, needReg, RegSet::FREE_REG, &regs, false))
+ if (!ovfl && bEnoughRegs && genMakeIndAddrMode(tree, NULL, true, needReg, RegSet::FREE_REG, &regs, false))
{
emitAttr size;
-
+
/* Is the value now computed in some register? */
-
- if (tree->gtFlags & GTF_REG_VAL)
+
+ if (tree->gtFlags & GTF_REG_VAL)
{
genCodeForTree_REG_VAR1(tree);
return;
}
-
+
/* If we can reuse op1/2's register directly, and 'tree' is
a simple expression (ie. not in scaled index form),
might as well just use "add" instead of "lea" */
-
+
// However, if we're in a context where we want to evaluate "tree" into a specific
// register different from the reg we'd use in this optimization, then it doesn't
// make sense to do the "add", since we'd also have to do a "mov."
- if (op1->gtFlags & GTF_REG_VAL)
+ if (op1->gtFlags & GTF_REG_VAL)
{
reg = op1->gtRegNum;
-
- if ((genRegMask(reg) & regSet.rsRegMaskFree()) &&
- (genRegMask(reg) & needReg))
+
+ if ((genRegMask(reg) & regSet.rsRegMaskFree()) && (genRegMask(reg) & needReg))
{
if (op2->gtFlags & GTF_REG_VAL)
{
/* Simply add op2 to the register */
-
+
inst_RV_TT(INS_add, reg, op2, 0, emitTypeSize(treeType), flags);
-
- if (tree->gtSetFlags())
+
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
-
+
goto DONE_LEA_ADD;
}
else if (op2->OperGet() == GT_CNS_INT)
{
/* Simply add op2 to the register */
-
+
genIncRegBy(reg, op2->gtIntCon.gtIconVal, tree, treeType);
-
+
goto DONE_LEA_ADD;
}
}
}
-
- if (op2->gtFlags & GTF_REG_VAL)
+
+ if (op2->gtFlags & GTF_REG_VAL)
{
reg = op2->gtRegNum;
-
- if ((genRegMask(reg) & regSet.rsRegMaskFree()) &&
- (genRegMask(reg) & needReg))
+
+ if ((genRegMask(reg) & regSet.rsRegMaskFree()) && (genRegMask(reg) & needReg))
{
if (op1->gtFlags & GTF_REG_VAL)
{
/* Simply add op1 to the register */
-
+
inst_RV_TT(INS_add, reg, op1, 0, emitTypeSize(treeType), flags);
-
- if (tree->gtSetFlags())
+
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
-
+
goto DONE_LEA_ADD;
}
}
}
-
+
// The expression either requires a scaled-index form, or the
// op1 or op2's register can't be targeted, this can be
// caused when op1 or op2 are enregistered variables.
-
- reg = regSet.rsPickReg(needReg, bestReg);
+
+ reg = regSet.rsPickReg(needReg, bestReg);
size = emitActualTypeSize(treeType);
-
+
/* Generate "lea reg, [addr-mode]" */
-
+
inst_RV_AT(INS_lea, size, treeType, reg, tree, 0, flags);
-
+
#ifndef _TARGET_XARCH_
// Don't call genFlagsEqualToReg on x86/x64
// as it does not set the flags
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
#endif
-
-DONE_LEA_ADD:
+
+ DONE_LEA_ADD:
/* The register has been trashed now */
regTracker.rsTrackRegTrash(reg);
-
+
genDoneAddressable(tree, regs, RegSet::FREE_REG);
-
+
/* The following could be an 'inner' pointer!!! */
-
+
noway_assert(treeType == TYP_BYREF || !varTypeIsGC(treeType));
-
+
if (treeType == TYP_BYREF)
{
genUpdateLife(tree);
-
+
gcInfo.gcMarkRegSetNpt(genRegMask(reg)); // in case "reg" was a TYP_GCREF before
gcInfo.gcMarkRegPtrVal(reg, TYP_BYREF);
}
-
+
genCodeForTree_DONE(tree, reg);
return;
}
}
-
+
#endif // LEA_AVAILABLE
-
- noway_assert((varTypeIsGC(treeType) == false) ||
- (treeType == TYP_BYREF && (ins == INS_add || ins == INS_sub)));
+
+ noway_assert((varTypeIsGC(treeType) == false) || (treeType == TYP_BYREF && (ins == INS_add || ins == INS_sub)));
}
/* The following makes an assumption about gtSetEvalOrder(this) */
@@ -6863,7 +6685,7 @@ DONE_LEA_ADD:
/* Compute a useful register mask */
needReg = regSet.rsMustExclude(needReg, op2->gtRsvdRegs);
- needReg = regSet.rsNarrowHint (needReg, regSet.rsRegMaskFree());
+ needReg = regSet.rsNarrowHint(needReg, regSet.rsRegMaskFree());
// Determine what registers go live between op1 and op2
// Don't bother checking if op1 is already in a register.
@@ -6876,7 +6698,7 @@ DONE_LEA_ADD:
regMaskTP newLiveMask = genNewLiveRegMask(op1, op2);
if (newLiveMask)
{
- needReg = regSet.rsNarrowHint (needReg, ~newLiveMask);
+ needReg = regSet.rsNarrowHint(needReg, ~newLiveMask);
}
}
@@ -6891,30 +6713,30 @@ DONE_LEA_ADD:
/* Special case: small_val & small_mask */
- if ( varTypeIsSmall(op1->TypeGet()) &&
- op2->IsCnsIntOrI() &&
- oper == GT_AND)
+ if (varTypeIsSmall(op1->TypeGet()) && op2->IsCnsIntOrI() && oper == GT_AND)
{
- size_t and_val = op2->gtIntCon.gtIconVal;
- size_t andMask;
- var_types typ = op1->TypeGet();
+ size_t and_val = op2->gtIntCon.gtIconVal;
+ size_t andMask;
+ var_types typ = op1->TypeGet();
switch (typ)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- andMask = 0x000000FF;
- break;
- case TYP_SHORT:
- case TYP_CHAR:
- andMask = 0x0000FFFF;
- break;
- default: noway_assert(!"unexpected type"); return;
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ andMask = 0x000000FF;
+ break;
+ case TYP_SHORT:
+ case TYP_CHAR:
+ andMask = 0x0000FFFF;
+ break;
+ default:
+ noway_assert(!"unexpected type");
+ return;
}
// Is the 'and_val' completely contained within the bits found in 'andMask'
- if ((and_val & ~andMask) == 0)
+ if ((and_val & ~andMask) == 0)
{
// We must use unsigned instructions when loading op1
if (varTypeIsByte(typ))
@@ -6924,7 +6746,7 @@ DONE_LEA_ADD:
else // varTypeIsShort(typ)
{
assert(varTypeIsShort(typ));
- op1->gtType = TYP_CHAR;
+ op1->gtType = TYP_CHAR;
}
/* Generate the first operand into a scratch register */
@@ -6936,16 +6758,15 @@ DONE_LEA_ADD:
regNumber op1Reg = op1->gtRegNum;
- // Did we end up in an acceptable register?
+ // Did we end up in an acceptable register?
// and do we have an acceptable free register available to grab?
//
- if ( ((genRegMask(op1Reg) & needReg) == 0) &&
- ((regSet.rsRegMaskFree() & needReg) != 0) )
+ if (((genRegMask(op1Reg) & needReg) == 0) && ((regSet.rsRegMaskFree() & needReg) != 0))
{
// See if we can pick a register from bestReg
bestReg &= needReg;
- // Grab an acceptable register
+ // Grab an acceptable register
regNumber newReg;
if ((bestReg & regSet.rsRegMaskFree()) != 0)
newReg = regSet.rsGrabReg(bestReg);
@@ -6978,7 +6799,7 @@ DONE_LEA_ADD:
regSet.rsMarkRegUsed(op1);
reg = op1->gtRegNum;
- if (and_val != andMask) // Does the "and" mask only cover some of the bits?
+ if (and_val != andMask) // Does the "and" mask only cover some of the bits?
{
/* "and" the value */
@@ -6987,7 +6808,8 @@ DONE_LEA_ADD:
#ifdef DEBUG
/* Update the live set of register variables */
- if (compiler->opts.varNames) genUpdateLife(tree);
+ if (compiler->opts.varNames)
+ genUpdateLife(tree);
#endif
/* Now we can update the register pointer information */
@@ -7002,7 +6824,7 @@ DONE_LEA_ADD:
#ifdef _TARGET_XARCH_
- // Do we have to use the special "imul" instruction
+ // Do we have to use the special "imul" instruction
// which has eax as the implicit operand ?
//
bool multEAX = false;
@@ -7034,7 +6856,7 @@ DONE_LEA_ADD:
}
}
- if (multEAX)
+ if (multEAX)
{
noway_assert(oper == GT_MUL);
@@ -7056,13 +6878,13 @@ DONE_LEA_ADD:
}
else if (ovfl)
{
- // We always must use the 32x32 => 64 bit multiply
+ // We always must use the 32x32 => 64 bit multiply
// to detect overflow
mult64 = true;
}
}
- if (mult64)
+ if (mult64)
{
noway_assert(oper == GT_MUL);
@@ -7085,7 +6907,7 @@ DONE_LEA_ADD:
/* Compute a useful register mask */
needReg = regSet.rsMustExclude(needReg, op2->gtRsvdRegs);
- needReg = regSet.rsNarrowHint (needReg, regSet.rsRegMaskFree());
+ needReg = regSet.rsNarrowHint(needReg, regSet.rsRegMaskFree());
#if CPU_HAS_BYTE_REGS
/* 8-bit operations can only be done in the byte-regs */
@@ -7093,16 +6915,15 @@ DONE_LEA_ADD:
needReg = regSet.rsNarrowHint(RBM_BYTE_REGS, needReg);
#endif // CPU_HAS_BYTE_REGS
- // Did we end up in an acceptable register?
+ // Did we end up in an acceptable register?
// and do we have an acceptable free register available to grab?
//
- if ( ((genRegMask(op1Reg) & needReg) == 0) &&
- ((regSet.rsRegMaskFree() & needReg) != 0) )
+ if (((genRegMask(op1Reg) & needReg) == 0) && ((regSet.rsRegMaskFree() & needReg) != 0))
{
// See if we can pick a register from bestReg
bestReg &= needReg;
- // Grab an acceptable register
+ // Grab an acceptable register
regNumber newReg;
if ((bestReg & regSet.rsRegMaskFree()) != 0)
newReg = regSet.rsGrabReg(bestReg);
@@ -7139,7 +6960,7 @@ DONE_LEA_ADD:
bool isSmallConst = false;
#ifdef _TARGET_ARM_
- if ((op2->gtOper == GT_CNS_INT) && arm_Valid_Imm_For_Instr(ins, op2->gtIntCon.gtIconVal, INS_FLAGS_DONT_CARE))
+ if ((op2->gtOper == GT_CNS_INT) && arm_Valid_Imm_For_Instr(ins, op2->gtIntCon.gtIconVal, INS_FLAGS_DONT_CARE))
{
isSmallConst = true;
}
@@ -7150,42 +6971,35 @@ DONE_LEA_ADD:
#if CPU_LOAD_STORE_ARCH
genRecoverReg(op1, RBM_ALLINT, RegSet::KEEP_REG);
-#else // !CPU_LOAD_STORE_ARCH
+#else // !CPU_LOAD_STORE_ARCH
/* Is op1 spilled and op2 in a register? */
- if ((op1->gtFlags & GTF_SPILLED) &&
- (op2->gtFlags & GTF_REG_VAL) &&
- (ins != INS_sub) )
+ if ((op1->gtFlags & GTF_SPILLED) && (op2->gtFlags & GTF_REG_VAL) && (ins != INS_sub))
{
- noway_assert(ins == INS_add ||
- ins == INS_MUL ||
- ins == INS_AND ||
- ins == INS_OR ||
- ins == INS_XOR);
+ noway_assert(ins == INS_add || ins == INS_MUL || ins == INS_AND || ins == INS_OR || ins == INS_XOR);
// genMakeRvalueAddressable(GT_LCL_VAR) shouldn't spill anything
noway_assert(op2->gtOper != GT_LCL_VAR ||
varTypeIsSmall(compiler->lvaTable[op2->gtLclVarCommon.gtLclNum].TypeGet()));
- reg = op2->gtRegNum;
+ reg = op2->gtRegNum;
regMaskTP regMask = genRegMask(reg);
/* Is the register holding op2 available? */
- if (regMask & regSet.rsMaskVars)
+ if (regMask & regSet.rsMaskVars)
{
}
else
{
/* Get the temp we spilled into. */
- TempDsc * temp = regSet.rsUnspillInPlace(op1, op1->gtRegNum);
+ TempDsc* temp = regSet.rsUnspillInPlace(op1, op1->gtRegNum);
/* For 8bit operations, we need to make sure that op2 is
in a byte-addressable registers */
- if (varTypeIsByte(treeType) &&
- !(regMask & RBM_BYTE_REGS))
+ if (varTypeIsByte(treeType) && !(regMask & RBM_BYTE_REGS))
{
regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
@@ -7196,12 +7010,12 @@ DONE_LEA_ADD:
RBM_BYTE_REGS, and regSet.rsGrabReg() will only spill its args */
noway_assert(op2->gtFlags & GTF_REG_VAL);
- regSet.rsUnlockReg (regMask);
+ regSet.rsUnlockReg(regMask);
regSet.rsMarkRegFree(regMask);
- reg = byteReg;
- regMask = genRegMask(reg);
- op2->gtRegNum = reg;
+ reg = byteReg;
+ regMask = genRegMask(reg);
+ op2->gtRegNum = reg;
regSet.rsMarkRegUsed(op2);
}
@@ -7223,13 +7037,13 @@ DONE_LEA_ADD:
* we can use the flags
*/
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
{
genFlagsEqualToReg(tree, reg);
}
/* The result is where the second operand is sitting. Mark result reg as free */
- regSet.rsMarkRegFree(genRegMask(reg)) ;
+ regSet.rsMarkRegFree(genRegMask(reg));
gcInfo.gcMarkRegPtrVal(reg, treeType);
@@ -7248,17 +7062,16 @@ DONE_LEA_ADD:
// For 8 bit operations, we need to pick byte addressable registers
- if (varTypeIsByte(treeType) &&
- !(genRegMask(reg) & RBM_BYTE_REGS))
+ if (varTypeIsByte(treeType) && !(genRegMask(reg) & RBM_BYTE_REGS))
{
- regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
+ regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
inst_RV_RV(INS_mov, byteReg, reg);
regTracker.rsTrackRegTrash(byteReg);
- regSet.rsMarkRegFree (genRegMask(reg));
+ regSet.rsMarkRegFree(genRegMask(reg));
- reg = byteReg;
+ reg = byteReg;
op1->gtRegNum = reg;
regSet.rsMarkRegUsed(op1);
}
@@ -7283,26 +7096,24 @@ DONE_LEA_ADD:
/* Compute the new value */
- if (isArith &&
- !op2->InReg() &&
- (op2->OperKind() & GTK_CONST)
+ if (isArith && !op2->InReg() && (op2->OperKind() & GTK_CONST)
#if !CPU_HAS_FP_SUPPORT
- && (treeType == TYP_INT || treeType == TYP_I_IMPL)
+ && (treeType == TYP_INT || treeType == TYP_I_IMPL)
#endif
- )
+ )
{
- ssize_t ival = op2->gtIntCon.gtIconVal;
+ ssize_t ival = op2->gtIntCon.gtIconVal;
- if (oper == GT_ADD)
+ if (oper == GT_ADD)
{
genIncRegBy(reg, ival, tree, treeType, ovfl);
}
else if (oper == GT_SUB)
{
- if (ovfl &&
- ((tree->gtFlags & GTF_UNSIGNED) ||
- (ival == ((treeType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN))) // -0x80000000 == 0x80000000. Therefore we can't use -ival.
- )
+ if (ovfl && ((tree->gtFlags & GTF_UNSIGNED) ||
+ (ival == ((treeType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN))) // -0x80000000 == 0x80000000.
+ // Therefore we can't use -ival.
+ )
{
/* For unsigned overflow, we have to use INS_sub to set
the flags correctly */
@@ -7329,12 +7140,12 @@ DONE_LEA_ADD:
{
noway_assert(genRegMask(reg) & RBM_BYTE_REGS);
- regNumber op2reg = op2->gtRegNum;
- regMaskTP op2regMask = genRegMask(op2reg);
+ regNumber op2reg = op2->gtRegNum;
+ regMaskTP op2regMask = genRegMask(op2reg);
if (!(op2regMask & RBM_BYTE_REGS))
{
- regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
+ regNumber byteReg = regSet.rsGrabReg(RBM_BYTE_REGS);
inst_RV_RV(INS_mov, byteReg, op2reg);
regTracker.rsTrackRegTrash(byteReg);
@@ -7363,7 +7174,7 @@ DONE_LEA_ADD:
/* 'add'/'sub' set all CC flags, others only ZF+SF */
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
genReleaseReg(op1);
@@ -7380,39 +7191,51 @@ CHK_OVF:
genCodeForTree_DONE(tree, reg);
}
-
/*****************************************************************************
*
* Generate code for a simple binary arithmetic or logical assignment operator: x <op>= y.
* Handles GT_ASG_AND, GT_ASG_OR, GT_ASG_XOR, GT_ASG_ADD, GT_ASG_SUB.
*/
-void CodeGen::genCodeForTreeSmpBinArithLogAsgOp(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeSmpBinArithLogAsgOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- instruction ins;
- const genTreeOps oper = tree->OperGet();
- const var_types treeType = tree->TypeGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP needReg = destReg;
- regMaskTP addrReg;
+ instruction ins;
+ const genTreeOps oper = tree->OperGet();
+ const var_types treeType = tree->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP needReg = destReg;
+ regMaskTP addrReg;
/* Figure out what instruction to generate */
bool isArith;
switch (oper)
{
- case GT_ASG_AND: ins = INS_AND; isArith = false; break;
- case GT_ASG_OR : ins = INS_OR ; isArith = false; break;
- case GT_ASG_XOR: ins = INS_XOR; isArith = false; break;
- case GT_ASG_ADD: ins = INS_add; isArith = true; break;
- case GT_ASG_SUB: ins = INS_sub; isArith = true; break;
- default:
- unreached();
+ case GT_ASG_AND:
+ ins = INS_AND;
+ isArith = false;
+ break;
+ case GT_ASG_OR:
+ ins = INS_OR;
+ isArith = false;
+ break;
+ case GT_ASG_XOR:
+ ins = INS_XOR;
+ isArith = false;
+ break;
+ case GT_ASG_ADD:
+ ins = INS_add;
+ isArith = true;
+ break;
+ case GT_ASG_SUB:
+ ins = INS_sub;
+ isArith = true;
+ break;
+ default:
+ unreached();
}
bool ovfl = false;
@@ -7426,8 +7249,7 @@ void CodeGen::genCodeForTreeSmpBinArithLogAsgOp(GenTreePtr tree,
// We can't use += with overflow if the value cannot be changed
// in case of an overflow-exception which the "+" might cause
noway_assert(!ovfl ||
- ((op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_LCL_FLD) &&
- !compiler->compCurBB->hasTryIndex()));
+ ((op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_LCL_FLD) && !compiler->compCurBB->hasTryIndex()));
/* Do not allow overflow instructions with refs/byrefs */
@@ -7438,122 +7260,126 @@ void CodeGen::genCodeForTreeSmpBinArithLogAsgOp(GenTreePtr tree,
/* Is the second operand a constant? */
- if (op2->IsIntCnsFitsInI32())
+ if (op2->IsIntCnsFitsInI32())
{
- int ival = (int)op2->gtIntCon.gtIconVal;
+ int ival = (int)op2->gtIntCon.gtIconVal;
/* What is the target of the assignment? */
switch (op1->gtOper)
{
- case GT_REG_VAR:
+ case GT_REG_VAR:
-REG_VAR4:
+ REG_VAR4:
- reg = op1->gtRegVar.gtRegNum;
+ reg = op1->gtRegVar.gtRegNum;
- /* No registers are needed for addressing */
+ /* No registers are needed for addressing */
- addrReg = RBM_NONE;
+ addrReg = RBM_NONE;
#if !CPU_LOAD_STORE_ARCH
-INCDEC_REG:
-#endif
- /* We're adding a constant to a register */
-
- if (oper == GT_ASG_ADD)
- genIncRegBy(reg, ival, tree, treeType, ovfl);
- else if (ovfl &&
- ((tree->gtFlags & GTF_UNSIGNED) || ival == ((treeType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN)) // -0x80000000 == 0x80000000. Therefore we can't use -ival.
- )
- /* For unsigned overflow, we have to use INS_sub to set
- the flags correctly */
- genDecRegBy(reg, ival, tree);
- else
- genIncRegBy(reg, -ival, tree, treeType, ovfl);
+ INCDEC_REG:
+#endif
+ /* We're adding a constant to a register */
+
+ if (oper == GT_ASG_ADD)
+ genIncRegBy(reg, ival, tree, treeType, ovfl);
+ else if (ovfl && ((tree->gtFlags & GTF_UNSIGNED) ||
+ ival == ((treeType == TYP_INT) ? INT32_MIN : SSIZE_T_MIN)) // -0x80000000 ==
+ // 0x80000000.
+ // Therefore we can't
+ // use -ival.
+ )
+ /* For unsigned overflow, we have to use INS_sub to set
+ the flags correctly */
+ genDecRegBy(reg, ival, tree);
+ else
+ genIncRegBy(reg, -ival, tree, treeType, ovfl);
- break;
+ break;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
- /* Does the variable live in a register? */
+ /* Does the variable live in a register? */
- if (genMarkLclVar(op1))
- goto REG_VAR4;
+ if (genMarkLclVar(op1))
+ goto REG_VAR4;
- __fallthrough;
+ __fallthrough;
- default:
+ default:
- /* Make the target addressable for load/store */
- addrReg = genMakeAddressable2(op1, needReg, RegSet::KEEP_REG, true, true);
+ /* Make the target addressable for load/store */
+ addrReg = genMakeAddressable2(op1, needReg, RegSet::KEEP_REG, true, true);
- #if !CPU_LOAD_STORE_ARCH
- // For CPU_LOAD_STORE_ARCH, we always load from memory then store to memory
+#if !CPU_LOAD_STORE_ARCH
+ // For CPU_LOAD_STORE_ARCH, we always load from memory then store to memory
- /* For small types with overflow check, we need to
- sign/zero extend the result, so we need it in a reg */
+ /* For small types with overflow check, we need to
+ sign/zero extend the result, so we need it in a reg */
- if (ovfl && genTypeSize(treeType) < sizeof(int))
- #endif // !CPU_LOAD_STORE_ARCH
- {
- // Load op1 into a reg
+ if (ovfl && genTypeSize(treeType) < sizeof(int))
+#endif // !CPU_LOAD_STORE_ARCH
+ {
+ // Load op1 into a reg
- reg = regSet.rsGrabReg(RBM_ALLINT & ~addrReg);
+ reg = regSet.rsGrabReg(RBM_ALLINT & ~addrReg);
- inst_RV_TT(INS_mov, reg, op1);
+ inst_RV_TT(INS_mov, reg, op1);
- // Issue the add/sub and the overflow check
+ // Issue the add/sub and the overflow check
- inst_RV_IV(ins, reg, ival, emitActualTypeSize(treeType), flags);
- regTracker.rsTrackRegTrash(reg);
+ inst_RV_IV(ins, reg, ival, emitActualTypeSize(treeType), flags);
+ regTracker.rsTrackRegTrash(reg);
- if (ovfl)
- {
- genCheckOverflow(tree);
- }
+ if (ovfl)
+ {
+ genCheckOverflow(tree);
+ }
- /* Store the (sign/zero extended) result back to
- the stack location of the variable */
+ /* Store the (sign/zero extended) result back to
+ the stack location of the variable */
- inst_TT_RV(ins_Store(op1->TypeGet()), op1, reg);
+ inst_TT_RV(ins_Store(op1->TypeGet()), op1, reg);
- break;
- }
+ break;
+ }
#if !CPU_LOAD_STORE_ARCH
- else
- {
- /* Add/subtract the new value into/from the target */
-
- if (op1->gtFlags & GTF_REG_VAL)
+ else
{
- reg = op1->gtRegNum;
- goto INCDEC_REG;
- }
+ /* Add/subtract the new value into/from the target */
- /* Special case: inc/dec (up to P3, or for small code, or blended code outside loops) */
- if (!ovfl && (ival == 1 || ival == -1) && !compiler->optAvoidIncDec(compiler->compCurBB->getBBWeight(compiler)))
- {
- noway_assert(oper == GT_ASG_SUB || oper == GT_ASG_ADD);
- if (oper == GT_ASG_SUB)
- ival = -ival;
+ if (op1->gtFlags & GTF_REG_VAL)
+ {
+ reg = op1->gtRegNum;
+ goto INCDEC_REG;
+ }
- ins = (ival > 0) ? INS_inc : INS_dec;
- inst_TT(ins, op1);
- }
- else
- {
- inst_TT_IV(ins, op1, ival);
- }
+ /* Special case: inc/dec (up to P3, or for small code, or blended code outside loops) */
+ if (!ovfl && (ival == 1 || ival == -1) &&
+ !compiler->optAvoidIncDec(compiler->compCurBB->getBBWeight(compiler)))
+ {
+ noway_assert(oper == GT_ASG_SUB || oper == GT_ASG_ADD);
+ if (oper == GT_ASG_SUB)
+ ival = -ival;
- if ((op1->gtOper == GT_LCL_VAR) && (!ovfl || treeType == TYP_INT))
- {
- if (tree->gtSetFlags())
- genFlagsEqualToVar(tree, op1->gtLclVarCommon.gtLclNum);
- }
+ ins = (ival > 0) ? INS_inc : INS_dec;
+ inst_TT(ins, op1);
+ }
+ else
+ {
+ inst_TT_IV(ins, op1, ival);
+ }
- break;
- }
-#endif // !CPU_LOAD_STORE_ARCH
+ if ((op1->gtOper == GT_LCL_VAR) && (!ovfl || treeType == TYP_INT))
+ {
+ if (tree->gtSetFlags())
+ genFlagsEqualToVar(tree, op1->gtLclVarCommon.gtLclNum);
+ }
+
+ break;
+ }
+#endif // !CPU_LOAD_STORE_ARCH
} // end switch (op1->gtOper)
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
@@ -7561,7 +7387,7 @@ INCDEC_REG:
genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, reg, ovfl);
return;
} // end if (op2->IsIntCnsFitsInI32())
- } // end if (isArith)
+ } // end if (isArith)
noway_assert(!varTypeIsGC(treeType) || ins == INS_sub || ins == INS_add);
@@ -7569,149 +7395,146 @@ INCDEC_REG:
switch (op1->gtOper)
{
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
- /* Does the target variable live in a register? */
+ /* Does the target variable live in a register? */
- if (!genMarkLclVar(op1))
- break;
+ if (!genMarkLclVar(op1))
+ break;
- __fallthrough;
+ __fallthrough;
- case GT_REG_VAR:
+ case GT_REG_VAR:
- /* Get hold of the target register */
+ /* Get hold of the target register */
- reg = op1->gtRegVar.gtRegNum;
+ reg = op1->gtRegVar.gtRegNum;
- /* Make sure the target of the store is available */
+ /* Make sure the target of the store is available */
- if (regSet.rsMaskUsed & genRegMask(reg))
- {
- regSet.rsSpillReg(reg);
- }
+ if (regSet.rsMaskUsed & genRegMask(reg))
+ {
+ regSet.rsSpillReg(reg);
+ }
- /* Make the RHS addressable */
+ /* Make the RHS addressable */
- addrReg = genMakeRvalueAddressable(op2, 0, RegSet::KEEP_REG, false);
+ addrReg = genMakeRvalueAddressable(op2, 0, RegSet::KEEP_REG, false);
- /* Compute the new value into the target register */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ /* Compute the new value into the target register */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if CPU_HAS_BYTE_REGS
- // Fix 383833 X86 ILGEN
- regNumber reg2;
- if ((op2->gtFlags & GTF_REG_VAL) != 0)
- {
- reg2 = op2->gtRegNum;
- }
- else
- {
- reg2 = REG_STK;
- }
-
- // We can only generate a byte ADD,SUB,OR,AND operation when reg and reg2 are both BYTE registers
- // when op2 is in memory then reg2==REG_STK and we will need to force op2 into a register
- //
- if (varTypeIsByte(treeType) &&
- (((genRegMask(reg) & RBM_BYTE_REGS) == 0) || ((genRegMask(reg2) & RBM_BYTE_REGS) == 0)))
- {
- // We will force op2 into a register (via sign/zero extending load)
- // for the cases where op2 is in memory and thus could have
- // an unmapped page just beyond its location
- //
- if ((op2->OperIsIndir() || (op2->gtOper == GT_CLS_VAR)) && varTypeIsSmall(op2->TypeGet()))
+ // Fix 383833 X86 ILGEN
+ regNumber reg2;
+ if ((op2->gtFlags & GTF_REG_VAL) != 0)
{
- genCodeForTree(op2, 0);
- assert((op2->gtFlags & GTF_REG_VAL) != 0);
+ reg2 = op2->gtRegNum;
+ }
+ else
+ {
+ reg2 = REG_STK;
}
- inst_RV_TT(ins, reg, op2, 0, EA_4BYTE, flags);
-
- bool canOmit = false;
-
- if (varTypeIsUnsigned(treeType))
+ // We can only generate a byte ADD,SUB,OR,AND operation when reg and reg2 are both BYTE registers
+ // when op2 is in memory then reg2==REG_STK and we will need to force op2 into a register
+ //
+ if (varTypeIsByte(treeType) &&
+ (((genRegMask(reg) & RBM_BYTE_REGS) == 0) || ((genRegMask(reg2) & RBM_BYTE_REGS) == 0)))
{
- // When op2 is a byte sized constant we can omit the zero extend instruction
- if ((op2->gtOper == GT_CNS_INT) &&
- ((op2->gtIntCon.gtIconVal & 0xFF) == op2->gtIntCon.gtIconVal))
+ // We will force op2 into a register (via sign/zero extending load)
+ // for the cases where op2 is in memory and thus could have
+ // an unmapped page just beyond its location
+ //
+ if ((op2->OperIsIndir() || (op2->gtOper == GT_CLS_VAR)) && varTypeIsSmall(op2->TypeGet()))
{
- canOmit = true;
+ genCodeForTree(op2, 0);
+ assert((op2->gtFlags & GTF_REG_VAL) != 0);
}
- }
- else // treeType is signed
- {
- // When op2 is a positive 7-bit or smaller constant
- // we can omit the sign extension sequence.
- if ((op2->gtOper == GT_CNS_INT) &&
- ((op2->gtIntCon.gtIconVal & 0x7F) == op2->gtIntCon.gtIconVal))
+
+ inst_RV_TT(ins, reg, op2, 0, EA_4BYTE, flags);
+
+ bool canOmit = false;
+
+ if (varTypeIsUnsigned(treeType))
{
- canOmit = true;
+ // When op2 is a byte sized constant we can omit the zero extend instruction
+ if ((op2->gtOper == GT_CNS_INT) && ((op2->gtIntCon.gtIconVal & 0xFF) == op2->gtIntCon.gtIconVal))
+ {
+ canOmit = true;
+ }
}
- }
-
- if (!canOmit)
- {
- // If reg is a byte reg then we can use a movzx/movsx instruction
- //
- if ((genRegMask(reg) & RBM_BYTE_REGS) != 0)
+ else // treeType is signed
{
- instruction extendIns = ins_Move_Extend(treeType, true);
- inst_RV_RV(extendIns, reg, reg, treeType, emitTypeSize(treeType));
+ // When op2 is a positive 7-bit or smaller constant
+ // we can omit the sign extension sequence.
+ if ((op2->gtOper == GT_CNS_INT) && ((op2->gtIntCon.gtIconVal & 0x7F) == op2->gtIntCon.gtIconVal))
+ {
+ canOmit = true;
+ }
}
- else // we can't encode a movzx/movsx instruction
+
+ if (!canOmit)
{
- if (varTypeIsUnsigned(treeType))
+ // If reg is a byte reg then we can use a movzx/movsx instruction
+ //
+ if ((genRegMask(reg) & RBM_BYTE_REGS) != 0)
{
- // otherwise, we must zero the upper 24 bits of 'reg'
- inst_RV_IV(INS_AND, reg, 0xFF, EA_4BYTE);
+ instruction extendIns = ins_Move_Extend(treeType, true);
+ inst_RV_RV(extendIns, reg, reg, treeType, emitTypeSize(treeType));
}
- else // treeType is signed
+ else // we can't encode a movzx/movsx instruction
{
- // otherwise, we must sign extend the result in the non-byteable register 'reg'
- // We will shift the register left 24 bits, thus putting the sign-bit into the high bit
- // then we do an arithmetic shift back 24 bits which propagate the sign bit correctly.
- //
- inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, reg, 24);
- inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, reg, 24);
+ if (varTypeIsUnsigned(treeType))
+ {
+ // otherwise, we must zero the upper 24 bits of 'reg'
+ inst_RV_IV(INS_AND, reg, 0xFF, EA_4BYTE);
+ }
+ else // treeType is signed
+ {
+ // otherwise, we must sign extend the result in the non-byteable register 'reg'
+ // We will shift the register left 24 bits, thus putting the sign-bit into the high bit
+ // then we do an arithmetic shift back 24 bits which propagate the sign bit correctly.
+ //
+ inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, reg, 24);
+ inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, reg, 24);
+ }
}
}
}
- }
- else
+ else
#endif // CPU_HAS_BYTE_REGS
- {
- inst_RV_TT(ins, reg, op2, 0, emitTypeSize(treeType), flags);
- }
+ {
+ inst_RV_TT(ins, reg, op2, 0, emitTypeSize(treeType), flags);
+ }
- /* The zero flag is now equal to the register value */
+ /* The zero flag is now equal to the register value */
- if (tree->gtSetFlags())
- genFlagsEqualToReg(tree, reg);
+ if (tree->gtSetFlags())
+ genFlagsEqualToReg(tree, reg);
- /* Remember that we trashed the target */
+ /* Remember that we trashed the target */
- regTracker.rsTrackRegTrash(reg);
+ regTracker.rsTrackRegTrash(reg);
- /* Free up anything that was tied up by the RHS */
+ /* Free up anything that was tied up by the RHS */
- genDoneAddressable(op2, addrReg, RegSet::KEEP_REG);
+ genDoneAddressable(op2, addrReg, RegSet::KEEP_REG);
- genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, reg, ovfl);
- return;
+ genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, reg, ovfl);
+ return;
- default:
- break;
+ default:
+ break;
} // end switch (op1->gtOper)
#if !CPU_LOAD_STORE_ARCH
/* Special case: "x ^= -1" is actually "not(x)" */
- if (oper == GT_ASG_XOR)
+ if (oper == GT_ASG_XOR)
{
- if (op2->gtOper == GT_CNS_INT &&
- op2->gtIntCon.gtIconVal == -1)
+ if (op2->gtOper == GT_CNS_INT && op2->gtIntCon.gtIconVal == -1)
{
addrReg = genMakeAddressable(op1, RBM_ALLINT, RegSet::KEEP_REG, true);
inst_TT(INS_NOT, op1);
@@ -7726,14 +7549,13 @@ INCDEC_REG:
/* Setup target mask for op2 (byte-regs for small operands) */
unsigned needMask;
- needMask = (varTypeIsByte(treeType)) ? RBM_BYTE_REGS
- : RBM_ALLINT;
+ needMask = (varTypeIsByte(treeType)) ? RBM_BYTE_REGS : RBM_ALLINT;
/* Is the second operand a constant? */
- if (op2->IsIntCnsFitsInI32())
+ if (op2->IsIntCnsFitsInI32())
{
- int ival = (int)op2->gtIntCon.gtIconVal;
+ int ival = (int)op2->gtIntCon.gtIconVal;
/* Make the target addressable */
addrReg = genMakeAddressable(op1, needReg, RegSet::FREE_REG, true);
@@ -7748,7 +7570,7 @@ INCDEC_REG:
/* Is the value or the address to be computed first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* Compute the new value into a register */
@@ -7815,7 +7637,7 @@ INCDEC_REG:
/* Free up anything that was tied up either side */
regSet.rsUnlockUsedReg(addrReg);
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- genReleaseReg (op2);
+ genReleaseReg(op2);
}
else
{
@@ -7834,8 +7656,8 @@ INCDEC_REG:
regSet.rsLockUsedReg(addrReg);
#if !CPU_LOAD_STORE_ARCH
- // For CPU_LOAD_STORE_ARCH, we always load from memory then store to memory
-
+ // For CPU_LOAD_STORE_ARCH, we always load from memory then store to memory
+
/* For small types with overflow check, we need to
sign/zero extend the result, so we need it in a reg */
@@ -7871,21 +7693,18 @@ INCDEC_REG:
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
regSet.rsUnlockUsedReg(genRegMask(op2->gtRegNum));
- genReleaseReg (op2);
+ genReleaseReg(op2);
}
genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, reg, ovfl);
}
-
/*****************************************************************************
*
* Generate code for GT_UMOD.
*/
-void CodeGen::genCodeForUnsignedMod(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForUnsignedMod(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
assert(tree->OperGet() == GT_UMOD);
@@ -7898,14 +7717,14 @@ void CodeGen::genCodeForUnsignedMod(GenTreePtr tree,
/* Is this a division by an integer constant? */
noway_assert(op2);
- if (compiler->fgIsUnsignedModOptimizable(op2))
+ if (compiler->fgIsUnsignedModOptimizable(op2))
{
/* Generate the operand into some register */
genCompIntoFreeReg(op1, needReg, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Generate the appropriate sequence */
size_t ival = op2->gtIntCon.gtIconVal - 1;
@@ -7922,15 +7741,12 @@ void CodeGen::genCodeForUnsignedMod(GenTreePtr tree,
genCodeForGeneralDivide(tree, destReg, bestReg);
}
-
/*****************************************************************************
*
* Generate code for GT_MOD.
*/
-void CodeGen::genCodeForSignedMod(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForSignedMod(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
assert(tree->OperGet() == GT_MOD);
@@ -7943,17 +7759,17 @@ void CodeGen::genCodeForSignedMod(GenTreePtr tree,
/* Is this a division by an integer constant? */
noway_assert(op2);
- if (compiler->fgIsSignedModOptimizable(op2))
+ if (compiler->fgIsSignedModOptimizable(op2))
{
- ssize_t ival = op2->gtIntCon.gtIconVal;
- BasicBlock * skip = genCreateTempLabel();
+ ssize_t ival = op2->gtIntCon.gtIconVal;
+ BasicBlock* skip = genCreateTempLabel();
/* Generate the operand into some register */
genCompIntoFreeReg(op1, needReg, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Generate the appropriate sequence */
@@ -7979,9 +7795,9 @@ void CodeGen::genCodeForSignedMod(GenTreePtr tree,
}
else
{
- inst_RV_IV (INS_OR, reg, (int)ival, emitActualTypeSize(treeType));
+ inst_RV_IV(INS_OR, reg, (int)ival, emitActualTypeSize(treeType));
}
- genIncRegBy(reg, 1, NULL, treeType);
+ genIncRegBy(reg, 1, NULL, treeType);
/* Define the 'skip' label and we're done */
@@ -7994,15 +7810,12 @@ void CodeGen::genCodeForSignedMod(GenTreePtr tree,
genCodeForGeneralDivide(tree, destReg, bestReg);
}
-
/*****************************************************************************
*
* Generate code for GT_UDIV.
*/
-void CodeGen::genCodeForUnsignedDiv(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForUnsignedDiv(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
assert(tree->OperGet() == GT_UDIV);
@@ -8015,9 +7828,9 @@ void CodeGen::genCodeForUnsignedDiv(GenTreePtr tree,
/* Is this a division by an integer constant? */
noway_assert(op2);
- if (compiler->fgIsUnsignedDivOptimizable(op2))
+ if (compiler->fgIsUnsignedDivOptimizable(op2))
{
- size_t ival = op2->gtIntCon.gtIconVal;
+ size_t ival = op2->gtIntCon.gtIconVal;
/* Division by 1 must be handled elsewhere */
@@ -8028,7 +7841,7 @@ void CodeGen::genCodeForUnsignedDiv(GenTreePtr tree,
genCompIntoFreeReg(op1, needReg, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Generate "shr reg, log2(value)" */
@@ -8045,15 +7858,12 @@ void CodeGen::genCodeForUnsignedDiv(GenTreePtr tree,
genCodeForGeneralDivide(tree, destReg, bestReg);
}
-
/*****************************************************************************
*
* Generate code for GT_DIV.
*/
-void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForSignedDiv(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
assert(tree->OperGet() == GT_DIV);
@@ -8066,7 +7876,7 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
/* Is this a division by an integer constant? */
noway_assert(op2);
- if (compiler->fgIsSignedDivOptimizable(op2))
+ if (compiler->fgIsSignedDivOptimizable(op2))
{
ssize_t ival_s = op2->gtIntConCommon.IconValue();
assert(ival_s > 0); // Postcondition of compiler->fgIsSignedDivOptimizable...
@@ -8076,14 +7886,14 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
noway_assert(ival != 1);
- BasicBlock * onNegDivisee = genCreateTempLabel();
+ BasicBlock* onNegDivisee = genCreateTempLabel();
/* Generate the operand into some register */
genCompIntoFreeReg(op1, needReg, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
if (ival == 2)
{
@@ -8108,7 +7918,7 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
/* The result is the same as the operand */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
}
else
{
@@ -8127,7 +7937,7 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
emitJumpKind jmpGEL = genJumpKindForOper(GT_GE, CK_LOGICAL);
inst_JMP(jmpGEL, onNegDivisee);
- inst_RV_IV(INS_add, reg, (int)ival-1, emitActualTypeSize(treeType));
+ inst_RV_IV(INS_add, reg, (int)ival - 1, emitActualTypeSize(treeType));
/* Define the 'onNegDivisee' label and we're done */
@@ -8143,7 +7953,7 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
/* The result is the same as the operand */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
}
genCodeForTree_DONE(tree, reg);
@@ -8153,20 +7963,15 @@ void CodeGen::genCodeForSignedDiv(GenTreePtr tree,
genCodeForGeneralDivide(tree, destReg, bestReg);
}
-
/*****************************************************************************
*
* Generate code for a general divide. Handles the general case for GT_UMOD, GT_MOD, GT_UDIV, GT_DIV
* (if op2 is not a power of 2 constant).
*/
-void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForGeneralDivide(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- assert(tree->OperGet() == GT_UMOD ||
- tree->OperGet() == GT_MOD ||
- tree->OperGet() == GT_UDIV ||
+ assert(tree->OperGet() == GT_UMOD || tree->OperGet() == GT_MOD || tree->OperGet() == GT_UDIV ||
tree->OperGet() == GT_DIV);
GenTreePtr op1 = tree->gtOp.gtOp1;
@@ -8186,11 +7991,11 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* Which operand are we supposed to evaluate first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* We'll evaluate 'op2' first */
- gotOp1 = false;
+ gotOp1 = false;
destReg &= ~op1->gtRsvdRegs;
/* Also if op1 is an enregistered LCL_VAR then exclude its register as well */
@@ -8199,7 +8004,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
unsigned varNum = op1->gtLclVarCommon.gtLclNum;
noway_assert(varNum < compiler->lvaCount);
LclVarDsc* varDsc = compiler->lvaTable + varNum;
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
destReg &= ~genRegMask(varDsc->lvRegNum);
}
@@ -8215,7 +8020,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
if (RBM_EAX & op2->gtRsvdRegs)
op1Mask = RBM_ALLINT & ~op2->gtRsvdRegs;
else
- op1Mask = RBM_EAX; // EAX would be ideal
+ op1Mask = RBM_EAX; // EAX would be ideal
/* Generate the dividend into EAX and hold on to it. freeOnly=true */
@@ -8224,14 +8029,14 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* We want to avoid using EAX or EDX for the second operand */
- destReg = regSet.rsMustExclude(destReg, RBM_EAX|RBM_EDX);
+ destReg = regSet.rsMustExclude(destReg, RBM_EAX | RBM_EDX);
/* Make the second operand addressable */
op2 = genCodeForCommaTree(op2);
/* Special case: if op2 is a local var we are done */
- if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_LCL_FLD)
+ if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_LCL_FLD)
{
if ((op2->gtFlags & GTF_REG_VAL) == 0)
addrReg = genMakeRvalueAddressable(op2, destReg, RegSet::KEEP_REG, false);
@@ -8248,7 +8053,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* Make sure we have the dividend in EAX */
- if (gotOp1)
+ if (gotOp1)
{
/* We've previously computed op1 into EAX */
@@ -8285,7 +8090,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* Perform the division */
if (oper == GT_UMOD || oper == GT_UDIV)
- inst_TT(INS_UNSIGNED_DIVIDE, op2);
+ inst_TT(INS_UNSIGNED_DIVIDE, op2);
else
inst_TT(INS_SIGNED_DIVIDE, op2);
@@ -8303,17 +8108,16 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* Both EAX and EDX are now trashed */
- regTracker.rsTrackRegTrash (REG_EAX);
- regTracker.rsTrackRegTrash (REG_EDX);
+ regTracker.rsTrackRegTrash(REG_EAX);
+ regTracker.rsTrackRegTrash(REG_EDX);
/* Figure out which register the result is in */
- reg = (oper == GT_DIV || oper == GT_UDIV) ? REG_EAX
- : REG_EDX;
+ reg = (oper == GT_DIV || oper == GT_UDIV) ? REG_EAX : REG_EDX;
/* Don't forget to mark the first operand as using EAX and EDX */
- op1->gtRegNum = reg;
+ op1->gtRegNum = reg;
genCodeForTree_DONE(tree, reg);
@@ -8321,11 +8125,11 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
/* Which operand are we supposed to evaluate first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* We'll evaluate 'op2' first */
- gotOp1 = false;
+ gotOp1 = false;
destReg &= ~op1->gtRsvdRegs;
/* Also if op1 is an enregistered LCL_VAR then exclude its register as well */
@@ -8334,7 +8138,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
unsigned varNum = op1->gtLclVarCommon.gtLclNum;
noway_assert(varNum < compiler->lvaCount);
LclVarDsc* varDsc = compiler->lvaTable + varNum;
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
destReg &= ~genRegMask(varDsc->lvRegNum);
}
@@ -8344,7 +8148,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
{
/* We'll evaluate 'op1' first */
- gotOp1 = true;
+ gotOp1 = true;
regMaskTP op1Mask = RBM_ALLINT & ~op2->gtRsvdRegs;
/* Generate the dividend into a register and hold on to it. */
@@ -8359,7 +8163,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
noway_assert(op2->gtFlags & GTF_REG_VAL);
addrReg = genRegMask(op2->gtRegNum);
- if (gotOp1)
+ if (gotOp1)
{
// Recover op1 if spilled
genRecoverReg(op1, RBM_NONE, RegSet::KEEP_REG);
@@ -8383,7 +8187,7 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
ins = INS_sdiv;
getEmitter()->emitIns_R_R_R(ins, EA_4BYTE, reg, op1->gtRegNum, op2->gtRegNum);
-
+
if (oper == GT_UMOD || oper == GT_MOD)
{
getEmitter()->emitIns_R_R_R(INS_mul, EA_4BYTE, reg, op2->gtRegNum, reg);
@@ -8400,37 +8204,38 @@ void CodeGen::genCodeForGeneralDivide(GenTreePtr tree,
#endif
}
-
/*****************************************************************************
*
* Generate code for an assignment shift (x <op>= ). Handles GT_ASG_LSH, GT_ASG_RSH, GT_ASG_RSZ.
*/
-void CodeGen::genCodeForAsgShift(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForAsgShift(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- assert(tree->OperGet() == GT_ASG_LSH ||
- tree->OperGet() == GT_ASG_RSH ||
- tree->OperGet() == GT_ASG_RSZ);
-
- const genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- const var_types treeType = tree->TypeGet();
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- regMaskTP needReg = destReg;
- regNumber reg;
- instruction ins;
- regMaskTP addrReg;
+ assert(tree->OperGet() == GT_ASG_LSH || tree->OperGet() == GT_ASG_RSH || tree->OperGet() == GT_ASG_RSZ);
+
+ const genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ const var_types treeType = tree->TypeGet();
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ regMaskTP needReg = destReg;
+ regNumber reg;
+ instruction ins;
+ regMaskTP addrReg;
switch (oper)
{
- case GT_ASG_LSH: ins = INS_SHIFT_LEFT_LOGICAL; break;
- case GT_ASG_RSH: ins = INS_SHIFT_RIGHT_ARITHM; break;
- case GT_ASG_RSZ: ins = INS_SHIFT_RIGHT_LOGICAL; break;
- default:
- unreached();
+ case GT_ASG_LSH:
+ ins = INS_SHIFT_LEFT_LOGICAL;
+ break;
+ case GT_ASG_RSH:
+ ins = INS_SHIFT_RIGHT_ARITHM;
+ break;
+ case GT_ASG_RSZ:
+ ins = INS_SHIFT_RIGHT_LOGICAL;
+ break;
+ default:
+ unreached();
}
noway_assert(!varTypeIsGC(treeType));
@@ -8438,7 +8243,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
/* Shifts by a constant amount are easier */
- if (op2->IsCnsIntOrI())
+ if (op2->IsCnsIntOrI())
{
/* Make the target addressable */
@@ -8446,13 +8251,11 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
/* Are we shifting a register left by 1 bit? */
- if ((oper == GT_ASG_LSH) &&
- (op2->gtIntCon.gtIconVal == 1) &&
- (op1->gtFlags & GTF_REG_VAL))
+ if ((oper == GT_ASG_LSH) && (op2->gtIntCon.gtIconVal == 1) && (op1->gtFlags & GTF_REG_VAL))
{
/* The target lives in a register */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* "add reg, reg" is cheaper than "shl reg, 1" */
@@ -8493,7 +8296,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
/* If the target is a register, it has a new value */
- if (op1->gtFlags & GTF_REG_VAL)
+ if (op1->gtFlags & GTF_REG_VAL)
regTracker.rsTrackRegTrash(op1->gtRegNum);
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
@@ -8509,7 +8312,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
{
genFlagsEqualToVar(tree, op1->gtLclVarCommon.gtLclNum);
}
- else if (op1->gtOper == GT_REG_VAR)
+ else if (op1->gtOper == GT_REG_VAR)
{
genFlagsEqualToReg(tree, op1->gtRegNum);
}
@@ -8545,7 +8348,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
regSet.rsMarkRegUsed(op2);
tempRegs = regSet.rsMustExclude(RBM_ALLINT, genRegMask(op2->gtRegNum));
- addrReg = genMakeAddressable(op1, tempRegs, RegSet::KEEP_REG, true);
+ addrReg = genMakeAddressable(op1, tempRegs, RegSet::KEEP_REG, true);
genRecoverReg(op2, op2Regs, RegSet::KEEP_REG);
}
@@ -8557,7 +8360,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
excludeMask |= RBM_SHIFT;
tempRegs = regSet.rsMustExclude(RBM_ALLINT, excludeMask);
- addrReg = genMakeAddressable(op1, tempRegs, RegSet::KEEP_REG, true);
+ addrReg = genMakeAddressable(op1, tempRegs, RegSet::KEEP_REG, true);
/* Load the shift count into the necessary register */
genComputeReg(op2, op2Regs, RegSet::EXACT_REG, RegSet::KEEP_REG);
@@ -8583,7 +8386,7 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
/* If the value is in a register, it's now trash */
- if (op1->gtFlags & GTF_REG_VAL)
+ if (op1->gtFlags & GTF_REG_VAL)
regTracker.rsTrackRegTrash(op1->gtRegNum);
/* Release the op2 [RBM_SHIFT] operand */
@@ -8594,39 +8397,42 @@ void CodeGen::genCodeForAsgShift(GenTreePtr tree,
genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, /* unused for ovfl=false */ REG_NA, /* ovfl */ false);
}
-
/*****************************************************************************
*
* Generate code for a shift. Handles GT_LSH, GT_RSH, GT_RSZ.
*/
-void CodeGen::genCodeForShift(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForShift(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
assert(tree->OperIsShift());
- const genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- const var_types treeType = tree->TypeGet();
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- regMaskTP needReg = destReg;
- regNumber reg;
- instruction ins;
+ const genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ const var_types treeType = tree->TypeGet();
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ regMaskTP needReg = destReg;
+ regNumber reg;
+ instruction ins;
switch (oper)
{
- case GT_LSH: ins = INS_SHIFT_LEFT_LOGICAL; break;
- case GT_RSH: ins = INS_SHIFT_RIGHT_ARITHM; break;
- case GT_RSZ: ins = INS_SHIFT_RIGHT_LOGICAL; break;
- default:
- unreached();
+ case GT_LSH:
+ ins = INS_SHIFT_LEFT_LOGICAL;
+ break;
+ case GT_RSH:
+ ins = INS_SHIFT_RIGHT_ARITHM;
+ break;
+ case GT_RSZ:
+ ins = INS_SHIFT_RIGHT_LOGICAL;
+ break;
+ default:
+ unreached();
}
/* Is the shift count constant? */
noway_assert(op2);
- if (op2->IsIntCnsFitsInI32())
+ if (op2->IsIntCnsFitsInI32())
{
// TODO: Check to see if we could generate a LEA instead!
@@ -8646,7 +8452,7 @@ void CodeGen::genCodeForShift(GenTreePtr tree,
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef _TARGET_ARM_
- if (oper == GT_LSH)
+ if (oper == GT_LSH)
{
emitAttr size = emitActualTypeSize(treeType);
if (op2->gtIntConCommon.IconValue() == 1)
@@ -8667,9 +8473,9 @@ void CodeGen::genCodeForShift(GenTreePtr tree,
#endif // _TARGET_ARM_
{
#ifndef _TARGET_ARM_
-DO_SHIFT_BY_CNS:
+ DO_SHIFT_BY_CNS:
#endif // _TARGET_ARM_
- // If we are shifting 'reg' by zero bits and do not need the flags to be set
+ // If we are shifting 'reg' by zero bits and do not need the flags to be set
// then we can just skip emitting the instruction as 'reg' is already correct.
//
if ((op2->gtIntConCommon.IconValue() != 0) || tree->gtSetFlags())
@@ -8722,9 +8528,9 @@ DO_SHIFT_BY_CNS:
noway_assert(op2->gtFlags & GTF_REG_VAL);
#ifdef _TARGET_XARCH_
noway_assert(genRegMask(op2->gtRegNum) == op2RegMask);
-#endif
+#endif
// Check for the case of op1 being spilled during the evaluation of op2
- if (op1->gtFlags & GTF_SPILLED)
+ if (op1->gtFlags & GTF_SPILLED)
{
// The register has been spilled -- reload it to any register except ECX
regSet.rsLockUsedReg(op2RegMask);
@@ -8755,29 +8561,22 @@ DO_SHIFT_BY_CNS:
genCodeForTree_DONE(tree, reg);
}
-
/*****************************************************************************
*
* Generate code for a top-level relational operator (not one that is part of a GT_JTRUE tree).
* Handles GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT.
*/
-void CodeGen::genCodeForRelop(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForRelop(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- assert(tree->OperGet() == GT_EQ ||
- tree->OperGet() == GT_NE ||
- tree->OperGet() == GT_LT ||
- tree->OperGet() == GT_LE ||
- tree->OperGet() == GT_GE ||
- tree->OperGet() == GT_GT);
-
- const genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- const var_types treeType = tree->TypeGet();
- regMaskTP needReg = destReg;
- regNumber reg;
+ assert(tree->OperGet() == GT_EQ || tree->OperGet() == GT_NE || tree->OperGet() == GT_LT ||
+ tree->OperGet() == GT_LE || tree->OperGet() == GT_GE || tree->OperGet() == GT_GT);
+
+ const genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ const var_types treeType = tree->TypeGet();
+ regMaskTP needReg = destReg;
+ regNumber reg;
// Longs and float comparisons are converted to "?:"
noway_assert(!compiler->fgMorphRelopToQmark(op1));
@@ -8809,17 +8608,17 @@ void CodeGen::genCodeForRelop(GenTreePtr tree,
// mov reg, 1
// L_end:
- BasicBlock * L_true;
- BasicBlock * L_end;
+ BasicBlock* L_true;
+ BasicBlock* L_end;
L_true = genCreateTempLabel();
L_end = genCreateTempLabel();
inst_JMP(jumpKind, L_true);
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, reg, 0); // Executes when the cond is false
+ getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, reg, 0); // Executes when the cond is false
inst_JMP(EJ_jmp, L_end);
genDefineTempLabel(L_true);
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, reg, 1); // Executes when the cond is true
+ getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, reg, 1); // Executes when the cond is true
genDefineTempLabel(L_end);
regTracker.rsTrackRegTrash(reg);
@@ -8835,7 +8634,7 @@ void CodeGen::genCodeForRelop(GenTreePtr tree,
if (jumpKind == EJ_jb)
{
inst_RV_RV(INS_SUBC, reg, reg);
- inst_RV (INS_NEG, reg, TYP_INT);
+ inst_RV(INS_NEG, reg, TYP_INT);
regTracker.rsTrackRegTrash(reg);
}
else if (jumpKind == EJ_jae)
@@ -8862,7 +8661,7 @@ void CodeGen::genCodeForRelop(GenTreePtr tree,
}
#else
NYI("TARGET");
-#endif // _TARGET_XXX
+#endif // _TARGET_XXX
genCodeForTree_DONE(tree, reg);
}
@@ -8877,15 +8676,14 @@ void CodeGen::genCodeForRelop(GenTreePtr tree,
// Return Value:
// None
-void CodeGen::genCodeForCopyObj(GenTreePtr tree,
- regMaskTP destReg)
+void CodeGen::genCodeForCopyObj(GenTreePtr tree, regMaskTP destReg)
{
- GenTreePtr op1 = tree->gtGetOp1();
- GenTreePtr op2 = tree->gtGetOp2();
- regMaskTP needReg = destReg;
- regMaskTP regs = regSet.rsMaskUsed;
- GenTreePtr opsPtr[3];
- regMaskTP regsPtr[3];
+ GenTreePtr op1 = tree->gtGetOp1();
+ GenTreePtr op2 = tree->gtGetOp2();
+ regMaskTP needReg = destReg;
+ regMaskTP regs = regSet.rsMaskUsed;
+ GenTreePtr opsPtr[3];
+ regMaskTP regsPtr[3];
noway_assert(tree->OperGet() == GT_COPYOBJ);
noway_assert(op1->IsList());
@@ -8902,28 +8700,28 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
#ifdef _TARGET_ARM_
if (cpObjOp->IsVolatile())
{
- // Emit a memory barrier instruction before the CopyBlk
+ // Emit a memory barrier instruction before the CopyBlk
instGen_MemoryBarrier();
}
#endif
- GenTreePtr srcObj = cpObjOp->Source();
- GenTreePtr dstObj = cpObjOp->Dest();
+ GenTreePtr srcObj = cpObjOp->Source();
+ GenTreePtr dstObj = cpObjOp->Dest();
noway_assert(dstObj->gtType == TYP_BYREF || dstObj->gtType == TYP_I_IMPL);
#ifdef DEBUG
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)op2->gtIntCon.gtIconVal;
- size_t debugBlkSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)op2->gtIntCon.gtIconVal;
+ size_t debugBlkSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
// Since we round up, we are not handling the case where we have a non-pointer sized struct with GC pointers.
// The EE currently does not allow this. Let's assert it just to be safe.
noway_assert(compiler->info.compCompHnd->getClassSize(clsHnd) == debugBlkSize);
#endif
- size_t blkSize = cpObjOp->gtSlots * TARGET_POINTER_SIZE;
- unsigned slots = cpObjOp->gtSlots;
- BYTE * gcPtrs = cpObjOp->gtGcPtrs;
- unsigned gcPtrCount = cpObjOp->gtGcPtrCount;
+ size_t blkSize = cpObjOp->gtSlots * TARGET_POINTER_SIZE;
+ unsigned slots = cpObjOp->gtSlots;
+ BYTE* gcPtrs = cpObjOp->gtGcPtrs;
+ unsigned gcPtrCount = cpObjOp->gtGcPtrCount;
// Make sure gcPtr settings are consisten.
if (gcPtrCount > 0)
@@ -8931,40 +8729,40 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
assert(cpObjOp->HasGCPtr());
}
- GenTreePtr treeFirst, treeSecond;
- regNumber regFirst, regSecond;
+ GenTreePtr treeFirst, treeSecond;
+ regNumber regFirst, regSecond;
// Check what order the object-ptrs have to be evaluated in ?
if (op1->gtFlags & GTF_REVERSE_OPS)
{
- treeFirst = srcObj;
+ treeFirst = srcObj;
treeSecond = dstObj;
#if CPU_USES_BLOCK_MOVE
- regFirst = REG_ESI;
+ regFirst = REG_ESI;
regSecond = REG_EDI;
#else
- regFirst = REG_ARG_1;
+ regFirst = REG_ARG_1;
regSecond = REG_ARG_0;
#endif
}
else
{
- treeFirst = dstObj;
+ treeFirst = dstObj;
treeSecond = srcObj;
#if CPU_USES_BLOCK_MOVE
- regFirst = REG_EDI;
+ regFirst = REG_EDI;
regSecond = REG_ESI;
#else
- regFirst = REG_ARG_0;
+ regFirst = REG_ARG_0;
regSecond = REG_ARG_1;
#endif
}
- bool dstIsOnStack = (dstObj->gtOper == GT_ADDR && (dstObj->gtFlags & GTF_ADDR_ONSTACK));
- bool srcIsOnStack = (srcObj->gtOper == GT_ADDR && (srcObj->gtFlags & GTF_ADDR_ONSTACK));
- emitAttr srcType = (varTypeIsGC(srcObj) && !srcIsOnStack) ? EA_BYREF : EA_PTRSIZE;
- emitAttr dstType = (varTypeIsGC(dstObj) && !dstIsOnStack) ? EA_BYREF : EA_PTRSIZE;
+ bool dstIsOnStack = (dstObj->gtOper == GT_ADDR && (dstObj->gtFlags & GTF_ADDR_ONSTACK));
+ bool srcIsOnStack = (srcObj->gtOper == GT_ADDR && (srcObj->gtFlags & GTF_ADDR_ONSTACK));
+ emitAttr srcType = (varTypeIsGC(srcObj) && !srcIsOnStack) ? EA_BYREF : EA_PTRSIZE;
+ emitAttr dstType = (varTypeIsGC(dstObj) && !dstIsOnStack) ? EA_BYREF : EA_PTRSIZE;
#if CPU_USES_BLOCK_MOVE
// Materialize the trees in the order desired
@@ -8989,7 +8787,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
}
else
{
- // This helper will act like a MOVSD
+ // This helper will act like a MOVSD
// -- inputs EDI and ESI are byrefs
// -- including incrementing of ESI and EDI by 4
// -- helper will trash ECX
@@ -8997,8 +8795,8 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
regMaskTP argRegs = genRegMask(regFirst) | genRegMask(regSecond);
regSet.rsLockUsedReg(argRegs);
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF,
- 0, // argSize
- EA_PTRSIZE); // retSize
+ 0, // argSize
+ EA_PTRSIZE); // retSize
regSet.rsUnlockUsedReg(argRegs);
}
@@ -9015,7 +8813,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
/* The emitter won't record CORINFO_HELP_ASSIGN_BYREF in the GC tables as
it is a emitNoGChelper. However, we have to let the emitter know that
- the GC liveness has changed. We do this by creating a new label.
+ the GC liveness has changed. We do this by creating a new label.
*/
noway_assert(emitter::emitNoGChelper(CORINFO_HELP_ASSIGN_BYREF));
@@ -9025,15 +8823,15 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
#else // !CPU_USES_BLOCK_MOVE
#ifndef _TARGET_ARM_
- // Currently only the ARM implementation is provided
+// Currently only the ARM implementation is provided
#error "COPYBLK for non-ARM && non-CPU_USES_BLOCK_MOVE"
#endif
// Materialize the trees in the order desired
- bool helperUsed;
- regNumber regDst;
- regNumber regSrc;
- regNumber regTemp;
+ bool helperUsed;
+ regNumber regDst;
+ regNumber regSrc;
+ regNumber regTemp;
if ((gcPtrCount > 0) && !dstIsOnStack)
{
@@ -9044,7 +8842,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
/* The helper is a Asm-routine that will trash R2,R3 and LR */
{
/* Spill any callee-saved registers which are being used */
- regMaskTP spillRegs = RBM_CALLEE_TRASH_NOGC & regSet.rsMaskUsed;
+ regMaskTP spillRegs = RBM_CALLEE_TRASH_NOGC & regSet.rsMaskUsed;
if (spillRegs)
{
@@ -9056,7 +8854,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
// We will also use it as the temp register for our load/store sequences
//
assert(REG_R2 == REG_TMP_1);
- regTemp = regSet.rsGrabReg(RBM_R2);
+ regTemp = regSet.rsGrabReg(RBM_R2);
helperUsed = true;
}
else
@@ -9067,7 +8865,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
// Grab any temp register to use for our load/store sequences
//
- regTemp = regSet.rsGrabReg(RBM_ALLINT);
+ regTemp = regSet.rsGrabReg(RBM_ALLINT);
helperUsed = false;
}
assert(dstObj->gtFlags & GTF_REG_VAL);
@@ -9079,18 +8877,18 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
assert(regDst != regTemp);
assert(regSrc != regTemp);
- instruction loadIns = ins_Load(TYP_I_IMPL); // INS_ldr
- instruction storeIns = ins_Store(TYP_I_IMPL); // INS_str
+ instruction loadIns = ins_Load(TYP_I_IMPL); // INS_ldr
+ instruction storeIns = ins_Store(TYP_I_IMPL); // INS_str
- size_t offset = 0;
+ size_t offset = 0;
while (blkSize >= TARGET_POINTER_SIZE)
{
CorInfoGCType gcType;
CorInfoGCType gcTypeNext = TYPE_GC_NONE;
- var_types type = TYP_I_IMPL;
+ var_types type = TYP_I_IMPL;
-#if FEATURE_WRITE_BARRIER
- gcType = (CorInfoGCType)(*gcPtrs++);
+#if FEATURE_WRITE_BARRIER
+ gcType = (CorInfoGCType)(*gcPtrs++);
if (blkSize > TARGET_POINTER_SIZE)
gcTypeNext = (CorInfoGCType)(*gcPtrs);
@@ -9107,7 +8905,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
}
#else
gcType = TYPE_GC_NONE;
-#endif // FEATURE_WRITE_BARRIER
+#endif // FEATURE_WRITE_BARRIER
blkSize -= TARGET_POINTER_SIZE;
@@ -9119,8 +8917,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
getEmitter()->emitIns_R_R_I(storeIns, opSize, regTemp, regDst, offset);
offset += TARGET_POINTER_SIZE;
- if ((helperUsed && (gcTypeNext != TYPE_GC_NONE)) ||
- ((offset >= 128) && (blkSize > 0)))
+ if ((helperUsed && (gcTypeNext != TYPE_GC_NONE)) || ((offset >= 128) && (blkSize > 0)))
{
getEmitter()->emitIns_R_I(INS_add, srcType, regSrc, offset);
getEmitter()->emitIns_R_I(INS_add, dstType, regDst, offset);
@@ -9131,7 +8928,7 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
{
assert(offset == 0);
- // The helper will act like this:
+ // The helper will act like this:
// -- inputs R0 and R1 are byrefs
// -- helper will perform copy from *R1 into *R0
// -- helper will perform post increment of R0 and R1 by 4
@@ -9143,8 +8940,8 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
regMaskTP argRegs = genRegMask(regFirst) | genRegMask(regSecond);
regSet.rsLockUsedReg(argRegs);
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF,
- 0, // argSize
- EA_PTRSIZE); // retSize
+ 0, // argSize
+ EA_PTRSIZE); // retSize
regSet.rsUnlockUsedReg(argRegs);
regTracker.rsTrackRegMaskTrash(RBM_CALLEE_TRASH_NOGC);
@@ -9159,14 +8956,14 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
/* The emitter won't record CORINFO_HELP_ASSIGN_BYREF in the GC tables as
it is a emitNoGChelper. However, we have to let the emitter know that
- the GC liveness has changed. We do this by creating a new label.
+ the GC liveness has changed. We do this by creating a new label.
*/
noway_assert(emitter::emitNoGChelper(CORINFO_HELP_ASSIGN_BYREF));
genDefineTempLabel(&dummyBB);
-#endif // !CPU_USES_BLOCK_MOVE
+#endif // !CPU_USES_BLOCK_MOVE
assert(blkSize == 0);
@@ -9178,22 +8975,21 @@ void CodeGen::genCodeForCopyObj(GenTreePtr tree,
#ifdef _TARGET_ARM_
if (tree->AsBlkOp()->IsVolatile())
{
- // Emit a memory barrier instruction after the CopyBlk
+ // Emit a memory barrier instruction after the CopyBlk
instGen_MemoryBarrier();
}
#endif
}
-void CodeGen::genCodeForBlkOp(GenTreePtr tree,
- regMaskTP destReg)
+void CodeGen::genCodeForBlkOp(GenTreePtr tree, regMaskTP destReg)
{
- genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
- regMaskTP needReg = destReg;
- regMaskTP regs = regSet.rsMaskUsed;
- GenTreePtr opsPtr[3];
- regMaskTP regsPtr[3];
+ genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
+ regMaskTP needReg = destReg;
+ regMaskTP regs = regSet.rsMaskUsed;
+ GenTreePtr opsPtr[3];
+ regMaskTP regsPtr[3];
noway_assert(oper == GT_COPYBLK || oper == GT_INITBLK);
noway_assert(op1->IsList());
@@ -9207,20 +9003,18 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
#endif
{
GenTreePtr destPtr, srcPtrOrVal;
- destPtr = op1->gtOp.gtOp1;
+ destPtr = op1->gtOp.gtOp1;
srcPtrOrVal = op1->gtOp.gtOp2;
noway_assert(destPtr->TypeGet() == TYP_BYREF || varTypeIsIntegral(destPtr->TypeGet()));
noway_assert((oper == GT_COPYBLK &&
- (srcPtrOrVal->TypeGet() == TYP_BYREF || varTypeIsIntegral(srcPtrOrVal->TypeGet())))
- ||
- (oper == GT_INITBLK &&
- varTypeIsIntegral(srcPtrOrVal->TypeGet())));
+ (srcPtrOrVal->TypeGet() == TYP_BYREF || varTypeIsIntegral(srcPtrOrVal->TypeGet()))) ||
+ (oper == GT_INITBLK && varTypeIsIntegral(srcPtrOrVal->TypeGet())));
noway_assert(op1 && op1->IsList());
noway_assert(destPtr && srcPtrOrVal);
-#if CPU_USES_BLOCK_MOVE
- regs = (oper == GT_INITBLK) ? RBM_EAX : RBM_ESI; // What is the needReg for Val/Src
+#if CPU_USES_BLOCK_MOVE
+ regs = (oper == GT_INITBLK) ? RBM_EAX : RBM_ESI; // What is the needReg for Val/Src
/* Some special code for block moves/inits for constant sizes */
@@ -9228,18 +9022,17 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// Is this a fixed size COPYBLK?
// or a fixed size INITBLK with a constant init value?
//
- if ((op2->IsCnsIntOrI()) &&
- ((oper == GT_COPYBLK) || (srcPtrOrVal->IsCnsIntOrI())))
+ if ((op2->IsCnsIntOrI()) && ((oper == GT_COPYBLK) || (srcPtrOrVal->IsCnsIntOrI())))
{
- size_t length = (size_t)op2->gtIntCon.gtIconVal;
- size_t initVal = 0;
+ size_t length = (size_t)op2->gtIntCon.gtIconVal;
+ size_t initVal = 0;
instruction ins_P, ins_PR, ins_B;
if (oper == GT_INITBLK)
{
- ins_P = INS_stosp;
+ ins_P = INS_stosp;
ins_PR = INS_r_stosp;
- ins_B = INS_stosb;
+ ins_B = INS_stosb;
/* Properly extend the init constant from a U1 to a U4 */
initVal = 0xFF & ((unsigned)op1->gtOp.gtOp2->gtIntCon.gtIconVal);
@@ -9254,7 +9047,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
#ifdef _TARGET_64BIT_
if (length > 4)
{
- initVal = initVal | (initVal << 32);
+ initVal = initVal | (initVal << 32);
op1->gtOp.gtOp2->gtType = TYP_LONG;
}
else
@@ -9267,23 +9060,23 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
}
else
{
- ins_P = INS_movsp;
+ ins_P = INS_movsp;
ins_PR = INS_r_movsp;
- ins_B = INS_movsb;
+ ins_B = INS_movsb;
}
// Determine if we will be using SSE2
unsigned movqLenMin = 8;
unsigned movqLenMax = 24;
- bool bWillUseSSE2 = false;
- bool bWillUseOnlySSE2 = false;
- bool bNeedEvaluateCnst = true; // If we only use SSE2, we will just load the constant there.
+ bool bWillUseSSE2 = false;
+ bool bWillUseOnlySSE2 = false;
+ bool bNeedEvaluateCnst = true; // If we only use SSE2, we will just load the constant there.
#ifdef _TARGET_64BIT_
- // Until we get SSE2 instructions that move 16 bytes at a time instead of just 8
- // there is no point in wasting space on the bigger instructions
+// Until we get SSE2 instructions that move 16 bytes at a time instead of just 8
+// there is no point in wasting space on the bigger instructions
#else // !_TARGET_64BIT_
@@ -9303,7 +9096,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// Be less aggressive when we are inside a conditional
movqLenMax = 16;
}
- else if (curBBweight >= (BB_LOOP_WEIGHT*BB_UNITY_WEIGHT) / 2)
+ else if (curBBweight >= (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT) / 2)
{
// Be more aggressive when we are inside a loop
movqLenMax = 48;
@@ -9316,9 +9109,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
movqLenMax += 16;
}
- if (compiler->compCodeOpt() != Compiler::SMALL_CODE &&
- length >= movqLenMin &&
- length <= movqLenMax)
+ if (compiler->compCodeOpt() != Compiler::SMALL_CODE && length >= movqLenMin && length <= movqLenMax)
{
bWillUseSSE2 = true;
@@ -9366,8 +9157,8 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
if (bWillUseSSE2)
{
- int blkDisp = 0;
- regNumber xmmReg = REG_XMM0;
+ int blkDisp = 0;
+ regNumber xmmReg = REG_XMM0;
if (oper == GT_INITBLK)
{
@@ -9382,8 +9173,9 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
}
}
- JITLOG_THIS(compiler, (LL_INFO100, "Using XMM instructions for %3d byte %s while compiling %s\n",
- length, (oper == GT_INITBLK) ? "initblk" : "copyblk", compiler->info.compFullName));
+ JITLOG_THIS(compiler,
+ (LL_INFO100, "Using XMM instructions for %3d byte %s while compiling %s\n", length,
+ (oper == GT_INITBLK) ? "initblk" : "copyblk", compiler->info.compFullName));
while (length > 7)
{
@@ -9505,8 +9297,8 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// else No need to trash EAX as it wasnt destroyed by the "rep stos"
genReleaseReg(op1->gtOp.gtOp1);
- if (bNeedEvaluateCnst) genReleaseReg(op1->gtOp.gtOp2);
-
+ if (bNeedEvaluateCnst)
+ genReleaseReg(op1->gtOp.gtOp2);
}
else
{
@@ -9517,8 +9309,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// What order should the Dest, Val/Src, and Size be calculated
- compiler->fgOrderBlockOps(tree, RBM_EDI, regs, RBM_ECX,
- opsPtr, regsPtr); // OUT arguments
+ compiler->fgOrderBlockOps(tree, RBM_EDI, regs, RBM_ECX, opsPtr, regsPtr); // OUT arguments
noway_assert(((oper == GT_INITBLK) && (regs == RBM_EAX)) || ((oper == GT_COPYBLK) && (regs == RBM_ESI)));
genComputeReg(opsPtr[0], regsPtr[0], RegSet::EXACT_REG, RegSet::KEEP_REG, (regsPtr[0] != RBM_EAX));
@@ -9528,14 +9319,14 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
genRecoverReg(opsPtr[0], regsPtr[0], RegSet::KEEP_REG);
genRecoverReg(opsPtr[1], regsPtr[1], RegSet::KEEP_REG);
- noway_assert((op1->gtOp.gtOp1->gtFlags & GTF_REG_VAL) && // Dest
- (op1->gtOp.gtOp1->gtRegNum == REG_EDI));
+ noway_assert((op1->gtOp.gtOp1->gtFlags & GTF_REG_VAL) && // Dest
+ (op1->gtOp.gtOp1->gtRegNum == REG_EDI));
- noway_assert((op1->gtOp.gtOp2->gtFlags & GTF_REG_VAL) && // Val/Src
- (genRegMask(op1->gtOp.gtOp2->gtRegNum) == regs));
+ noway_assert((op1->gtOp.gtOp2->gtFlags & GTF_REG_VAL) && // Val/Src
+ (genRegMask(op1->gtOp.gtOp2->gtRegNum) == regs));
- noway_assert((op2->gtFlags & GTF_REG_VAL) && // Size
- (op2->gtRegNum == REG_ECX));
+ noway_assert((op2->gtFlags & GTF_REG_VAL) && // Size
+ (op2->gtRegNum == REG_ECX));
if (oper == GT_INITBLK)
instGen(INS_r_stosb);
@@ -9554,7 +9345,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
genReleaseReg(opsPtr[2]);
}
-#else // !CPU_USES_BLOCK_MOVE
+#else // !CPU_USES_BLOCK_MOVE
#ifndef _TARGET_ARM_
// Currently only the ARM implementation is provided
@@ -9564,15 +9355,14 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// Is this a fixed size COPYBLK?
// or a fixed size INITBLK with a constant init value?
//
- if ((op2->OperGet() == GT_CNS_INT) &&
- ((oper == GT_COPYBLK) || (srcPtrOrVal->OperGet() == GT_CNS_INT)))
+ if ((op2->OperGet() == GT_CNS_INT) && ((oper == GT_COPYBLK) || (srcPtrOrVal->OperGet() == GT_CNS_INT)))
{
- GenTreePtr dstOp = op1->gtOp.gtOp1;
- GenTreePtr srcOp = op1->gtOp.gtOp2;
- unsigned length = (unsigned)op2->gtIntCon.gtIconVal;
- unsigned fullStoreCount = length / TARGET_POINTER_SIZE;
- unsigned initVal = 0;
- bool useLoop = false;
+ GenTreePtr dstOp = op1->gtOp.gtOp1;
+ GenTreePtr srcOp = op1->gtOp.gtOp2;
+ unsigned length = (unsigned)op2->gtIntCon.gtIconVal;
+ unsigned fullStoreCount = length / TARGET_POINTER_SIZE;
+ unsigned initVal = 0;
+ bool useLoop = false;
if (oper == GT_INITBLK)
{
@@ -9585,22 +9375,21 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
if (initVal != 0)
{
- initVal = initVal | (initVal << 8) | (initVal << 16) | (initVal << 24);
+ initVal = initVal | (initVal << 8) | (initVal << 16) | (initVal << 24);
op1->gtOp.gtOp2->gtIntCon.gtIconVal = initVal;
}
}
// Will we be using a loop to implement this INITBLK/COPYBLK?
- if (((oper == GT_COPYBLK) && (fullStoreCount >= 8)) ||
- ((oper == GT_INITBLK) && (fullStoreCount >= 16)))
+ if (((oper == GT_COPYBLK) && (fullStoreCount >= 8)) || ((oper == GT_INITBLK) && (fullStoreCount >= 16)))
{
useLoop = true;
}
- regMaskTP usedRegs;
- regNumber regDst;
- regNumber regSrc;
- regNumber regTemp;
+ regMaskTP usedRegs;
+ regNumber regDst;
+ regNumber regSrc;
+ regNumber regTemp;
/* Evaluate dest and src/val */
@@ -9631,11 +9420,11 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
assert(dstOp->gtFlags & GTF_REG_VAL);
assert(srcOp->gtFlags & GTF_REG_VAL);
- regDst = dstOp->gtRegNum;
- regSrc = srcOp->gtRegNum;
- usedRegs = (genRegMask(regSrc) | genRegMask(regDst));
- bool dstIsOnStack = (dstOp->gtOper == GT_ADDR && (dstOp->gtFlags & GTF_ADDR_ONSTACK));
- emitAttr dstType = (varTypeIsGC(dstOp) && !dstIsOnStack) ? EA_BYREF : EA_PTRSIZE;
+ regDst = dstOp->gtRegNum;
+ regSrc = srcOp->gtRegNum;
+ usedRegs = (genRegMask(regSrc) | genRegMask(regDst));
+ bool dstIsOnStack = (dstOp->gtOper == GT_ADDR && (dstOp->gtFlags & GTF_ADDR_ONSTACK));
+ emitAttr dstType = (varTypeIsGC(dstOp) && !dstIsOnStack) ? EA_BYREF : EA_PTRSIZE;
emitAttr srcType;
if (oper == GT_COPYBLK)
@@ -9644,7 +9433,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
regTemp = regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
usedRegs |= genRegMask(regTemp);
bool srcIsOnStack = (srcOp->gtOper == GT_ADDR && (srcOp->gtFlags & GTF_ADDR_ONSTACK));
- srcType = (varTypeIsGC(srcOp) && !srcIsOnStack) ? EA_BYREF : EA_PTRSIZE;
+ srcType = (varTypeIsGC(srcOp) && !srcIsOnStack) ? EA_BYREF : EA_PTRSIZE;
}
else
{
@@ -9652,10 +9441,10 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
srcType = EA_PTRSIZE;
}
- instruction loadIns = ins_Load(TYP_I_IMPL); // INS_ldr
- instruction storeIns = ins_Store(TYP_I_IMPL); // INS_str
+ instruction loadIns = ins_Load(TYP_I_IMPL); // INS_ldr
+ instruction storeIns = ins_Store(TYP_I_IMPL); // INS_str
- int finalOffset;
+ int finalOffset;
// Can we emit a small number of ldr/str instructions to implement this INITBLK/COPYBLK?
if (!useLoop)
@@ -9678,26 +9467,28 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
finalOffset = fullStoreCount * TARGET_POINTER_SIZE;
length -= finalOffset;
}
- else // We will use a loop to implement this INITBLK/COPYBLK
+ else // We will use a loop to implement this INITBLK/COPYBLK
{
- unsigned pairStoreLoopCount = fullStoreCount / 2;
+ unsigned pairStoreLoopCount = fullStoreCount / 2;
// We need a second temp register for CopyBlk
- regNumber regTemp2 = REG_STK;
+ regNumber regTemp2 = REG_STK;
if (oper == GT_COPYBLK)
{
// Prefer a low register, but avoid one of the ones we've already grabbed
- regTemp2 = regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
+ regTemp2 =
+ regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
usedRegs |= genRegMask(regTemp2);
}
// Pick and initialize the loop counter register
regNumber regLoopIndex;
- regLoopIndex = regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
+ regLoopIndex =
+ regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
genSetRegToIcon(regLoopIndex, pairStoreLoopCount, TYP_INT);
// Create and define the Basic Block for the loop top
- BasicBlock * loopTopBlock = genCreateTempLabel();
+ BasicBlock* loopTopBlock = genCreateTempLabel();
genDefineTempLabel(loopTopBlock);
// The loop body
@@ -9752,8 +9543,8 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
if (length & sizeof(short))
{
- loadIns = ins_Load(TYP_USHORT); // INS_ldrh
- storeIns = ins_Store(TYP_USHORT); // INS_strh
+ loadIns = ins_Load(TYP_USHORT); // INS_ldrh
+ storeIns = ins_Store(TYP_USHORT); // INS_strh
if (oper == GT_COPYBLK)
{
@@ -9772,8 +9563,8 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
if (length & sizeof(char))
{
- loadIns = ins_Load(TYP_UBYTE); // INS_ldrb
- storeIns = ins_Store(TYP_UBYTE); // INS_strb
+ loadIns = ins_Load(TYP_UBYTE); // INS_ldrb
+ storeIns = ins_Store(TYP_UBYTE); // INS_strb
if (oper == GT_COPYBLK)
{
@@ -9802,8 +9593,7 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
// What order should the Dest, Val/Src, and Size be calculated
- compiler->fgOrderBlockOps(tree, RBM_ARG_0, RBM_ARG_1, RBM_ARG_2,
- opsPtr, regsPtr); // OUT arguments
+ compiler->fgOrderBlockOps(tree, RBM_ARG_0, RBM_ARG_1, RBM_ARG_2, opsPtr, regsPtr); // OUT arguments
genComputeReg(opsPtr[0], regsPtr[0], RegSet::EXACT_REG, RegSet::KEEP_REG);
genComputeReg(opsPtr[1], regsPtr[1], RegSet::EXACT_REG, RegSet::KEEP_REG);
@@ -9813,19 +9603,20 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
genRecoverReg(opsPtr[1], regsPtr[1], RegSet::KEEP_REG);
noway_assert((op1->gtOp.gtOp1->gtFlags & GTF_REG_VAL) && // Dest
- (op1->gtOp.gtOp1->gtRegNum == REG_ARG_0));
+ (op1->gtOp.gtOp1->gtRegNum == REG_ARG_0));
noway_assert((op1->gtOp.gtOp2->gtFlags & GTF_REG_VAL) && // Val/Src
- (op1->gtOp.gtOp2->gtRegNum == REG_ARG_1));
+ (op1->gtOp.gtOp2->gtRegNum == REG_ARG_1));
- noway_assert((op2->gtFlags & GTF_REG_VAL) && // Size
- (op2->gtRegNum == REG_ARG_2));
+ noway_assert((op2->gtFlags & GTF_REG_VAL) && // Size
+ (op2->gtRegNum == REG_ARG_2));
regSet.rsLockUsedReg(RBM_ARG_0 | RBM_ARG_1 | RBM_ARG_2);
genEmitHelperCall(oper == GT_COPYBLK ? CORINFO_HELP_MEMCPY
- /* GT_INITBLK */ : CORINFO_HELP_MEMSET,
- 0, EA_UNKNOWN);
+ /* GT_INITBLK */
+ : CORINFO_HELP_MEMSET,
+ 0, EA_UNKNOWN);
regTracker.rsTrackRegMaskTrash(RBM_CALLEE_TRASH);
@@ -9837,35 +9628,33 @@ void CodeGen::genCodeForBlkOp(GenTreePtr tree,
if ((oper == GT_COPYBLK) && tree->AsBlkOp()->IsVolatile())
{
- // Emit a memory barrier instruction after the CopyBlk
+ // Emit a memory barrier instruction after the CopyBlk
instGen_MemoryBarrier();
}
-#endif // !CPU_USES_BLOCK_MOVE
+#endif // !CPU_USES_BLOCK_MOVE
}
}
BasicBlock dummyBB;
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- const genTreeOps oper = tree->OperGet();
- const var_types treeType = tree->TypeGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP regs = regSet.rsMaskUsed;
- regMaskTP needReg = destReg;
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- emitAttr size;
- instruction ins;
- regMaskTP addrReg;
- GenTreePtr opsPtr[3];
- regMaskTP regsPtr[3];
+ const genTreeOps oper = tree->OperGet();
+ const var_types treeType = tree->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP regs = regSet.rsMaskUsed;
+ regMaskTP needReg = destReg;
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ emitAttr size;
+ instruction ins;
+ regMaskTP addrReg;
+ GenTreePtr opsPtr[3];
+ regMaskTP regsPtr[3];
#ifdef DEBUG
addrReg = 0xDEADCAFE;
@@ -9886,7 +9675,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
return;
case GT_ASG_AND:
- case GT_ASG_OR :
+ case GT_ASG_OR:
case GT_ASG_XOR:
case GT_ASG_ADD:
case GT_ASG_SUB:
@@ -9899,7 +9688,8 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
// Note that the specialCase here occurs when the treeType specifies a byte sized operation
// and we decided to enregister the op1 LclVar in a non-byteable register (ESI or EDI)
//
- bool specialCase; specialCase = false;
+ bool specialCase;
+ specialCase = false;
if (op1->gtOper == GT_REG_VAR)
{
/* Get hold of the target register */
@@ -9913,9 +9703,9 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
regTracker.rsTrackRegTrash(byteReg);
inst_RV(INS_NEG, byteReg, treeType, emitTypeSize(treeType));
- var_types op1Type = op1->TypeGet();
+ var_types op1Type = op1->TypeGet();
instruction wideningIns = ins_Move_Extend(op1Type, true);
- inst_RV_RV(wideningIns, reg, byteReg, op1Type, emitTypeSize(op1Type));
+ inst_RV_RV(wideningIns, reg, byteReg, op1Type, emitTypeSize(op1Type));
regTracker.rsTrackRegTrash(reg);
specialCase = true;
}
@@ -9934,11 +9724,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
{
// Fix 388382 ARM JitStress WP7
var_types op1Type = op1->TypeGet();
- regNumber reg = regSet.rsPickFreeReg();
+ regNumber reg = regSet.rsPickFreeReg();
inst_RV_TT(ins_Load(op1Type), reg, op1, 0, emitTypeSize(op1Type));
regTracker.rsTrackRegTrash(reg);
inst_RV_IV(INS_NEG, reg, 0, emitTypeSize(treeType), flags);
- inst_TT_RV(ins_Store(op1Type), op1, reg, 0, emitTypeSize(op1Type));
+ inst_TT_RV(ins_Store(op1Type), op1, reg, 0, emitTypeSize(op1Type));
}
#endif
if (op1->gtFlags & GTF_REG_VAL)
@@ -9949,7 +9739,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
return;
case GT_AND:
- case GT_OR :
+ case GT_OR:
case GT_XOR:
case GT_ADD:
case GT_SUB:
@@ -9987,12 +9777,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
genCompIntoFreeReg(op1, needReg, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Negate/reverse the value in the register */
- inst_RV((oper == GT_NEG) ? INS_NEG
- : INS_NOT, reg, treeType);
+ inst_RV((oper == GT_NEG) ? INS_NEG : INS_NOT, reg, treeType);
/* The register is now trashed */
@@ -10002,7 +9791,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
return;
case GT_IND:
- case GT_NULLCHECK: // At this point, explicit null checks are just like inds...
+ case GT_NULLCHECK: // At this point, explicit null checks are just like inds...
/* Make sure the operand is addressable */
@@ -10016,7 +9805,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* Pick a register for the value */
- if (needReg == RBM_ALLINT && bestReg == 0)
+ if (needReg == RBM_ALLINT && bestReg == 0)
{
/* Absent a better suggestion, pick a useless register */
@@ -10028,11 +9817,8 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
if (op1->IsCnsIntOrI() && op1->IsIconHandle(GTF_ICON_TLS_HDL))
{
noway_assert(size == EA_PTRSIZE);
- getEmitter()->emitIns_R_C (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- reg,
- FLD_GLOBAL_FS,
- (int)op1->gtIntCon.gtIconVal);
+ getEmitter()->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, FLD_GLOBAL_FS,
+ (int)op1->gtIntCon.gtIconVal);
}
else
{
@@ -10055,7 +9841,8 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
#ifdef DEBUG
/* Update the live set of register variables */
- if (compiler->opts.varNames) genUpdateLife(tree);
+ if (compiler->opts.varNames)
+ genUpdateLife(tree);
#endif
/* Now we can update the register pointer information */
@@ -10071,12 +9858,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
genCodeForNumericCast(tree, destReg, bestReg);
return;
-
case GT_JTRUE:
/* Is this a test of a relational operator? */
- if (op1->OperIsCompare())
+ if (op1->OperIsCompare())
{
/* Generate the conditional jump */
@@ -10086,7 +9872,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
return;
}
-#ifdef DEBUG
+#ifdef DEBUG
compiler->gtDispTree(tree);
#endif
NO_WAY("ISSUE: can we ever have a jumpCC without a compare node?");
@@ -10098,9 +9884,9 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
case GT_RETFILT:
noway_assert(tree->gtType == TYP_VOID || op1 != 0);
- if (op1 == 0) // endfinally
+ if (op1 == 0) // endfinally
{
- reg = REG_NA;
+ reg = REG_NA;
#ifdef _TARGET_XARCH_
/* Return using a pop-jmp sequence. As the "try" block calls
@@ -10111,18 +9897,18 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
inst_RV(INS_pop_hide, REG_EAX, TYP_I_IMPL);
inst_RV(INS_i_jmp, REG_EAX, TYP_I_IMPL);
#elif defined(_TARGET_ARM_)
- // Nothing needed for ARM
+// Nothing needed for ARM
#else
NYI("TARGET");
#endif
}
- else // endfilter
+ else // endfilter
{
genComputeReg(op1, RBM_INTRET, RegSet::EXACT_REG, RegSet::FREE_REG);
noway_assert(op1->gtFlags & GTF_REG_VAL);
noway_assert(op1->gtRegNum == REG_INTRET);
/* The return value has now been computed */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Return */
instGen_Return(0);
@@ -10147,11 +9933,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* Is there a return value and/or an exit statement? */
- if (op1)
+ if (op1)
{
- if (op1->gtType == TYP_VOID)
+ if (op1->gtType == TYP_VOID)
{
- //We're returning nothing, just generate the block (shared epilog calls).
+ // We're returning nothing, just generate the block (shared epilog calls).
genCodeForTree(op1, 0);
}
#ifdef _TARGET_ARM_
@@ -10177,17 +9963,17 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
// This can only occur when we are returning a non-HFA struct
// that is composed of a single float field and we performed
// struct promotion and enregistered the float field.
- //
+ //
genComputeReg(op1, 0, RegSet::ANY_REG, RegSet::FREE_REG);
getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, REG_INTRET, op1->gtRegNum);
}
#endif // _TARGET_ARM_
else
{
- //we can now go through this code for compiler->genReturnBB. I've regularized all the code.
+ // we can now go through this code for compiler->genReturnBB. I've regularized all the code.
+
+ // noway_assert(compiler->compCurBB != compiler->genReturnBB);
- //noway_assert(compiler->compCurBB != compiler->genReturnBB);
-
noway_assert(op1->gtType != TYP_VOID);
/* Generate the return value into the return register */
@@ -10202,15 +9988,14 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* The return value has now been computed */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
genCodeForTree_DONE(tree, reg);
-
}
#ifdef PROFILING_SUPPORTED
- //The profiling hook does not trash registers, so it's safe to call after we emit the code for
- //the GT_RETURN tree.
+ // The profiling hook does not trash registers, so it's safe to call after we emit the code for
+ // the GT_RETURN tree.
if (compiler->compCurBB == compiler->genReturnBB)
{
@@ -10225,8 +10010,8 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
- BasicBlock * esp_check = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* esp_check = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
@@ -10238,10 +10023,10 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
if (tree->gtFlags & GTF_REVERSE_OPS)
{
- if (tree->gtType == TYP_VOID)
+ if (tree->gtType == TYP_VOID)
{
genEvalSideEffects(op2);
- genUpdateLife (op2);
+ genUpdateLife(op2);
genEvalSideEffects(op1);
genUpdateLife(tree);
return;
@@ -10278,11 +10063,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* Generate side effects of the first operand */
genEvalSideEffects(op1);
- genUpdateLife (op1);
+ genUpdateLife(op1);
/* Is the value of the second operand used? */
- if (tree->gtType == TYP_VOID)
+ if (tree->gtType == TYP_VOID)
{
/* The right operand produces no result. The morpher is
responsible for resetting the type of GT_COMMA nodes
@@ -10300,7 +10085,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* The result of 'op2' is also the final result */
- reg = op2->gtRegNum;
+ reg = op2->gtRegNum;
/* Remember whether we set the flags */
@@ -10316,7 +10101,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* The result of 'op1' is also the final result */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
/* Remember whether we set the flags */
@@ -10333,7 +10118,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
case GT_NOP:
#if OPT_BOOL_OPS
- if (op1 == NULL)
+ if (op1 == NULL)
return;
#endif
@@ -10343,7 +10128,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
/* The result is the same as the operand */
- reg = op1->gtRegNum;
+ reg = op1->gtRegNum;
genCodeForTree_DONE(tree, reg);
return;
@@ -10352,7 +10137,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
switch (tree->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Round:
+ case CORINFO_INTRINSIC_Round:
{
noway_assert(tree->gtType == TYP_INT);
@@ -10376,12 +10161,11 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
genCodeForTreeFloat(tree, needReg, bestReg);
return;
#endif
- }
+ }
break;
- default:
- noway_assert(!"unexpected math intrinsic");
-
+ default:
+ noway_assert(!"unexpected math intrinsic");
}
genCodeForTree_DONE(tree, reg);
@@ -10397,7 +10181,7 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
genCodeForCopyObj(tree, destReg);
genCodeForTree_DONE(tree, REG_NA);
return;
-
+
case GT_COPYBLK:
case GT_INITBLK:
@@ -10432,146 +10216,147 @@ void CodeGen::genCodeForTreeSmpOp(GenTreePtr tree,
genComputeReg(op1, RBM_NONE, RegSet::ANY_REG, RegSet::KEEP_REG);
switch (cns)
{
- case 1:
- instGen(INS_lock);
- instEmit_RM(INS_inc, op1, op1, 0); break;
- case -1:
- instGen(INS_lock);
- instEmit_RM(INS_dec, op1, op1, 0); break;
- default:
- assert((int)cns == cns); // By test above for AMD64.
- instGen(INS_lock);
- inst_AT_IV(INS_add, EA_4BYTE, op1, (int)cns, 0); break;
+ case 1:
+ instGen(INS_lock);
+ instEmit_RM(INS_inc, op1, op1, 0);
+ break;
+ case -1:
+ instGen(INS_lock);
+ instEmit_RM(INS_dec, op1, op1, 0);
+ break;
+ default:
+ assert((int)cns == cns); // By test above for AMD64.
+ instGen(INS_lock);
+ inst_AT_IV(INS_add, EA_4BYTE, op1, (int)cns, 0);
+ break;
}
genReleaseReg(op1);
}
else
{
- //non constant addend means it needs to go into a register.
+ // non constant addend means it needs to go into a register.
ins = INS_add;
goto LockBinOpCommon;
}
- genFlagsEqualToNone(); // We didn't compute a result into a register.
- genUpdateLife(tree); // We didn't compute an operand into anything.
+ genFlagsEqualToNone(); // We didn't compute a result into a register.
+ genUpdateLife(tree); // We didn't compute an operand into anything.
return;
case GT_XADD:
- ins = INS_xadd; goto LockBinOpCommon;
+ ins = INS_xadd;
+ goto LockBinOpCommon;
case GT_XCHG:
- ins = INS_xchg; goto LockBinOpCommon;
-LockBinOpCommon:
+ ins = INS_xchg;
+ goto LockBinOpCommon;
+ LockBinOpCommon:
+ {
+ // Compute the second operand into a register. xadd and xchg are r/m32, r32. So even if op2
+ // is a constant, it needs to be in a register. This should be the output register if
+ // possible.
+ //
+ // For reference, gtOp1 is the location. gtOp2 is the addend or the value.
+
+ GenTreePtr location = op1;
+ GenTreePtr value = op2;
+
+ // Again, a friendly reminder. IL calling convention is left to right.
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
- //Compute the second operand into a register. xadd and xchg are r/m32, r32. So even if op2
- //is a constant, it needs to be in a register. This should be the output register if
- //possible.
- //
- //For reference, gtOp1 is the location. gtOp2 is the addend or the value.
+ // The atomic operations destroy this argument, so force it into a scratch register
+ reg = regSet.rsPickFreeReg();
+ genComputeReg(value, genRegMask(reg), RegSet::EXACT_REG, RegSet::KEEP_REG);
- GenTreePtr location = op1;
- GenTreePtr value = op2;
+ // Must evaluate location into a register
+ genCodeForTree(location, needReg, RBM_NONE);
+ assert(location->gtFlags & GTF_REG_VAL);
+ regSet.rsMarkRegUsed(location);
+ regSet.rsLockUsedReg(genRegMask(location->gtRegNum));
+ genRecoverReg(value, RBM_NONE, RegSet::KEEP_REG);
+ regSet.rsUnlockUsedReg(genRegMask(location->gtRegNum));
- //Again, a friendly reminder. IL calling convention is left to right.
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (ins != INS_xchg)
{
- // The atomic operations destroy this argument, so force it into a scratch register
+ // xchg implies the lock prefix, but xadd and add require it.
+ instGen(INS_lock);
+ }
+ instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
+ genReleaseReg(value);
+ regTracker.rsTrackRegTrash(reg);
+ genReleaseReg(location);
+ }
+ else
+ {
+ regMaskTP addrReg;
+ if (genMakeIndAddrMode(location, tree, false, /* not for LEA */
+ needReg, RegSet::KEEP_REG, &addrReg))
+ {
+ genUpdateLife(location);
+
reg = regSet.rsPickFreeReg();
genComputeReg(value, genRegMask(reg), RegSet::EXACT_REG, RegSet::KEEP_REG);
-
- // Must evaluate location into a register
- genCodeForTree(location, needReg, RBM_NONE);
- assert(location->gtFlags & GTF_REG_VAL);
- regSet.rsMarkRegUsed(location);
- regSet.rsLockUsedReg(genRegMask(location->gtRegNum));
- genRecoverReg(value, RBM_NONE, RegSet::KEEP_REG);
- regSet.rsUnlockUsedReg(genRegMask(location->gtRegNum));
+ addrReg = genKeepAddressable(location, addrReg, genRegMask(reg));
if (ins != INS_xchg)
{
- //xchg implies the lock prefix, but xadd and add require it.
+ // xchg implies the lock prefix, but xadd and add require it.
instGen(INS_lock);
}
- instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
+
+ // instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
+ // inst_TT_RV(ins, location, reg);
+ sched_AM(ins, EA_4BYTE, reg, false, location, 0);
+
genReleaseReg(value);
regTracker.rsTrackRegTrash(reg);
- genReleaseReg(location);
+ genDoneAddressable(location, addrReg, RegSet::KEEP_REG);
}
else
{
- regMaskTP addrReg;
- if (genMakeIndAddrMode(location,
- tree,
- false, /* not for LEA */
- needReg,
- RegSet::KEEP_REG,
- &addrReg))
- {
- genUpdateLife(location);
-
- reg = regSet.rsPickFreeReg();
- genComputeReg(value, genRegMask(reg), RegSet::EXACT_REG, RegSet::KEEP_REG);
- addrReg = genKeepAddressable(location, addrReg, genRegMask(reg));
-
- if (ins != INS_xchg)
- {
- //xchg implies the lock prefix, but xadd and add require it.
- instGen(INS_lock);
- }
+ // Must evalute location into a register.
+ genCodeForTree(location, needReg, RBM_NONE);
+ assert(location->gtFlags && GTF_REG_VAL);
+ regSet.rsMarkRegUsed(location);
- // instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
- // inst_TT_RV(ins, location, reg);
- sched_AM(ins, EA_4BYTE, reg, false, location, 0);
+ // xadd destroys this argument, so force it into a scratch register
+ reg = regSet.rsPickFreeReg();
+ genComputeReg(value, genRegMask(reg), RegSet::EXACT_REG, RegSet::KEEP_REG);
+ regSet.rsLockUsedReg(genRegMask(value->gtRegNum));
+ genRecoverReg(location, RBM_NONE, RegSet::KEEP_REG);
+ regSet.rsUnlockUsedReg(genRegMask(value->gtRegNum));
- genReleaseReg(value);
- regTracker.rsTrackRegTrash(reg);
- genDoneAddressable(location, addrReg, RegSet::KEEP_REG);
- }
- else
+ if (ins != INS_xchg)
{
- // Must evalute location into a register.
- genCodeForTree(location, needReg, RBM_NONE);
- assert(location->gtFlags && GTF_REG_VAL);
- regSet.rsMarkRegUsed(location);
-
- // xadd destroys this argument, so force it into a scratch register
- reg = regSet.rsPickFreeReg();
- genComputeReg(value, genRegMask(reg), RegSet::EXACT_REG, RegSet::KEEP_REG);
- regSet.rsLockUsedReg(genRegMask(value->gtRegNum));
- genRecoverReg(location, RBM_NONE, RegSet::KEEP_REG);
- regSet.rsUnlockUsedReg(genRegMask(value->gtRegNum));
-
- if (ins != INS_xchg)
- {
- //xchg implies the lock prefix, but xadd and add require it.
- instGen(INS_lock);
- }
+ // xchg implies the lock prefix, but xadd and add require it.
+ instGen(INS_lock);
+ }
- instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
+ instEmit_RM_RV(ins, EA_4BYTE, location, reg, 0);
- genReleaseReg(value);
- regTracker.rsTrackRegTrash(reg);
- genReleaseReg(location);
- }
+ genReleaseReg(value);
+ regTracker.rsTrackRegTrash(reg);
+ genReleaseReg(location);
}
+ }
- //The flags are equal to the target of the tree (i.e. the result of the add), not to the
- //result in the register. If tree is actually GT_IND->GT_ADDR->GT_LCL_VAR, we could use
- //that information to set the flags. Doesn't seem like there is a good reason for that.
- //Therefore, trash the flags.
- genFlagsEqualToNone();
+ // The flags are equal to the target of the tree (i.e. the result of the add), not to the
+ // result in the register. If tree is actually GT_IND->GT_ADDR->GT_LCL_VAR, we could use
+ // that information to set the flags. Doesn't seem like there is a good reason for that.
+ // Therefore, trash the flags.
+ genFlagsEqualToNone();
- if (ins == INS_add)
- {
- // If the operator was add, then we were called from the GT_LOCKADD
- // case. In that case we don't use the result, so we don't need to
- // update anything.
- genUpdateLife(tree);
- }
- else
- {
- genCodeForTree_DONE(tree, reg);
- }
+ if (ins == INS_add)
+ {
+ // If the operator was add, then we were called from the GT_LOCKADD
+ // case. In that case we don't use the result, so we don't need to
+ // update anything.
+ genUpdateLife(tree);
+ }
+ else
+ {
+ genCodeForTree_DONE(tree, reg);
}
+ }
return;
#else // !_TARGET_XARCH_
@@ -10586,11 +10371,10 @@ LockBinOpCommon:
case GT_ARR_LENGTH:
{
// Make the corresponding ind(a + c) node, and do codegen for that.
- GenTreePtr addr = compiler->gtNewOperNode(GT_ADD, TYP_BYREF,
- tree->gtArrLen.ArrRef(),
- compiler->gtNewIconNode(tree->AsArrLen()->ArrLenOffset()));
+ GenTreePtr addr = compiler->gtNewOperNode(GT_ADD, TYP_BYREF, tree->gtArrLen.ArrRef(),
+ compiler->gtNewIconNode(tree->AsArrLen()->ArrLenOffset()));
tree->SetOper(GT_IND);
- tree->gtFlags |= GTF_IND_ARR_LEN; // Record that this node represents an array length expression.
+ tree->gtFlags |= GTF_IND_ARR_LEN; // Record that this node represents an array length expression.
assert(tree->TypeGet() == TYP_INT);
tree->gtOp.gtOp1 = addr;
genCodeForTree(tree, destReg, bestReg);
@@ -10614,24 +10398,21 @@ LockBinOpCommon:
#pragma warning(pop) // End suppress PREFast warning about overly large function
#endif
-
-regNumber CodeGen::genIntegerCast(GenTree *tree,
- regMaskTP needReg,
- regMaskTP bestReg)
+regNumber CodeGen::genIntegerCast(GenTree* tree, regMaskTP needReg, regMaskTP bestReg)
{
instruction ins;
emitAttr size;
bool unsv;
bool andv = false;
regNumber reg;
- GenTreePtr op1 = tree->gtOp.gtOp1->gtEffectiveVal();
- var_types dstType = tree->CastToType();
+ GenTreePtr op1 = tree->gtOp.gtOp1->gtEffectiveVal();
+ var_types dstType = tree->CastToType();
var_types srcType = op1->TypeGet();
- if (genTypeSize(srcType) < genTypeSize(dstType))
+ if (genTypeSize(srcType) < genTypeSize(dstType))
{
// Widening cast
-
+
/* we need the source size */
size = EA_ATTR(genTypeSize(srcType));
@@ -10639,7 +10420,7 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
noway_assert(size < EA_PTRSIZE);
unsv = varTypeIsUnsigned(srcType);
- ins = ins_Move_Extend(srcType, op1->InReg());
+ ins = ins_Move_Extend(srcType, op1->InReg());
/*
Special case: for a cast of byte to char we first
@@ -10671,8 +10452,7 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
if (op1->InReg())
{
regMaskTP op1RegMask = genRegMask(op1->gtRegNum);
- if ( (((op1RegMask & bestReg) != 0) || (bestReg == 0)) &&
- ((op1RegMask & regSet.rsRegMaskFree()) != 0) )
+ if ((((op1RegMask & bestReg) != 0) || (bestReg == 0)) && ((op1RegMask & regSet.rsRegMaskFree()) != 0))
{
bestReg = op1RegMask;
}
@@ -10680,9 +10460,7 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
/* Is the value sitting in a non-byte-addressable register? */
- if (op1->InReg() &&
- (size == EA_1BYTE) &&
- !isByteReg(op1->gtRegNum))
+ if (op1->InReg() && (size == EA_1BYTE) && !isByteReg(op1->gtRegNum))
{
if (unsv)
{
@@ -10696,7 +10474,7 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
{
/* Move the value into a byte register */
- reg = regSet.rsGrabReg(RBM_BYTE_REGS);
+ reg = regSet.rsGrabReg(RBM_BYTE_REGS);
}
if (reg != op1->gtRegNum)
@@ -10719,7 +10497,7 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
// if we (might) need to set the flags and the value is in the same register
// and we have an unsigned value then use AND instead of MOVZX
- if (tree->gtSetFlags() && unsv && op1->InReg() && (op1->gtRegNum == reg))
+ if (tree->gtSetFlags() && unsv && op1->InReg() && (op1->gtRegNum == reg))
{
#ifdef _TARGET_X86_
noway_assert(ins == INS_movzx);
@@ -10734,10 +10512,10 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
/* Generate "and reg, MASK */
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
inst_RV_IV(INS_AND, reg, (size == EA_1BYTE) ? 0xFF : 0xFFFF, EA_4BYTE, flags);
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
}
else
@@ -10752,15 +10530,15 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
/* Mask off high bits for cast from byte to char */
- if (andv)
+ if (andv)
{
#ifdef _TARGET_XARCH_
noway_assert(genTypeSize(dstType) == 2 && ins == INS_movsx);
#endif
- insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ insFlags flags = tree->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
inst_RV_IV(INS_AND, reg, 0xFFFF, EA_4BYTE, flags);
- if (tree->gtSetFlags())
+ if (tree->gtSetFlags())
genFlagsEqualToReg(tree, reg);
}
}
@@ -10769,18 +10547,16 @@ regNumber CodeGen::genIntegerCast(GenTree *tree,
return reg;
}
-void CodeGen::genCodeForNumericCast(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForNumericCast(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types dstType = tree->CastToType();
- var_types baseType = TYP_INT;
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP needReg = destReg;
- regMaskTP addrReg;
- emitAttr size;
- BOOL unsv;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ var_types dstType = tree->CastToType();
+ var_types baseType = TYP_INT;
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP needReg = destReg;
+ regMaskTP addrReg;
+ emitAttr size;
+ BOOL unsv;
/*
* Constant casts should have been folded earlier
@@ -10788,11 +10564,8 @@ void CodeGen::genCodeForNumericCast(GenTreePtr tree,
* We don't do this optimization for debug code/no optimization
*/
- noway_assert((op1->gtOper != GT_CNS_INT &&
- op1->gtOper != GT_CNS_LNG &&
- op1->gtOper != GT_CNS_DBL) ||
- tree->gtOverflow() ||
- (op1->gtOper == GT_CNS_DBL && !_finite(op1->gtDblCon.gtDconVal)) ||
+ noway_assert((op1->gtOper != GT_CNS_INT && op1->gtOper != GT_CNS_LNG && op1->gtOper != GT_CNS_DBL) ||
+ tree->gtOverflow() || (op1->gtOper == GT_CNS_DBL && !_finite(op1->gtDblCon.gtDconVal)) ||
!compiler->opts.OptEnabled(CLFLG_CONSTANTFOLD));
noway_assert(dstType != TYP_VOID);
@@ -10801,314 +10574,313 @@ void CodeGen::genCodeForNumericCast(GenTreePtr tree,
switch (op1->TypeGet())
{
- case TYP_LONG:
+ case TYP_LONG:
- /* Special case: the long is generated via the mod of long
- with an int. This is really an int and need not be
- converted to a reg pair. NOTE: the flag only indicates
- that this is a case to TYP_INT, it hasn't actually
- verified the second operand of the MOD! */
+ /* Special case: the long is generated via the mod of long
+ with an int. This is really an int and need not be
+ converted to a reg pair. NOTE: the flag only indicates
+ that this is a case to TYP_INT, it hasn't actually
+ verified the second operand of the MOD! */
- if (((op1->gtOper == GT_MOD) || (op1->gtOper == GT_UMOD)) &&
- (op1->gtFlags & GTF_MOD_INT_RESULT))
- {
+ if (((op1->gtOper == GT_MOD) || (op1->gtOper == GT_UMOD)) && (op1->gtFlags & GTF_MOD_INT_RESULT))
+ {
- /* Verify that the op2 of the mod node is
- 1) An integer tree, or
- 2) A long constant that is small enough to fit in an integer
- */
+ /* Verify that the op2 of the mod node is
+ 1) An integer tree, or
+ 2) A long constant that is small enough to fit in an integer
+ */
- GenTreePtr modop2 = op1->gtOp.gtOp2;
- if ((genActualType(modop2->gtType) == TYP_INT) ||
- ((modop2->gtOper == GT_CNS_LNG) &&
- (modop2->gtLngCon.gtLconVal == (int)modop2->gtLngCon.gtLconVal)))
- {
- genCodeForTree(op1, destReg, bestReg);
+ GenTreePtr modop2 = op1->gtOp.gtOp2;
+ if ((genActualType(modop2->gtType) == TYP_INT) ||
+ ((modop2->gtOper == GT_CNS_LNG) && (modop2->gtLngCon.gtLconVal == (int)modop2->gtLngCon.gtLconVal)))
+ {
+ genCodeForTree(op1, destReg, bestReg);
#ifdef _TARGET_64BIT_
- reg = op1->gtRegNum;
-#else // _TARGET_64BIT_
- reg = genRegPairLo(op1->gtRegPair);
+ reg = op1->gtRegNum;
+#else // _TARGET_64BIT_
+ reg = genRegPairLo(op1->gtRegPair);
#endif //_TARGET_64BIT_
- genCodeForTree_DONE(tree, reg);
- return;
+ genCodeForTree_DONE(tree, reg);
+ return;
+ }
}
- }
-
- /* Make the operand addressable. When gtOverflow() is true,
- hold on to the addrReg as we will need it to access the higher dword */
-
- op1 = genCodeForCommaTree(op1); // Strip off any commas (necessary, since we seem to generate code for op1 twice!)
- // See, e.g., the TYP_INT case below...
- addrReg = genMakeAddressable2(op1, 0, tree->gtOverflow() ? RegSet::KEEP_REG : RegSet::FREE_REG, false);
+ /* Make the operand addressable. When gtOverflow() is true,
+ hold on to the addrReg as we will need it to access the higher dword */
- /* Load the lower half of the value into some register */
+ op1 = genCodeForCommaTree(op1); // Strip off any commas (necessary, since we seem to generate code for op1
+ // twice!)
+ // See, e.g., the TYP_INT case below...
- if (op1->gtFlags & GTF_REG_VAL)
- {
- /* Can we simply use the low part of the value? */
- reg = genRegPairLo(op1->gtRegPair);
+ addrReg = genMakeAddressable2(op1, 0, tree->gtOverflow() ? RegSet::KEEP_REG : RegSet::FREE_REG, false);
- if (tree->gtOverflow())
- goto REG_OK;
+ /* Load the lower half of the value into some register */
- regMaskTP loMask;
- loMask = genRegMask(reg);
- if (loMask & regSet.rsRegMaskFree())
- bestReg = loMask;
- }
+ if (op1->gtFlags & GTF_REG_VAL)
+ {
+ /* Can we simply use the low part of the value? */
+ reg = genRegPairLo(op1->gtRegPair);
- // for cast overflow we need to preserve addrReg for testing the hiDword
- // so we lock it to prevent regSet.rsPickReg from picking it.
- if (tree->gtOverflow())
- regSet.rsLockUsedReg(addrReg);
+ if (tree->gtOverflow())
+ goto REG_OK;
- reg = regSet.rsPickReg(needReg, bestReg);
+ regMaskTP loMask;
+ loMask = genRegMask(reg);
+ if (loMask & regSet.rsRegMaskFree())
+ bestReg = loMask;
+ }
- if (tree->gtOverflow())
- regSet.rsUnlockUsedReg(addrReg);
+ // for cast overflow we need to preserve addrReg for testing the hiDword
+ // so we lock it to prevent regSet.rsPickReg from picking it.
+ if (tree->gtOverflow())
+ regSet.rsLockUsedReg(addrReg);
- noway_assert(genStillAddressable(op1));
+ reg = regSet.rsPickReg(needReg, bestReg);
-REG_OK:
- if (((op1->gtFlags & GTF_REG_VAL) == 0) || (reg != genRegPairLo(op1->gtRegPair)))
- {
- /* Generate "mov reg, [addr-mode]" */
- inst_RV_TT(ins_Load(TYP_INT), reg, op1);
- }
+ if (tree->gtOverflow())
+ regSet.rsUnlockUsedReg(addrReg);
- /* conv.ovf.i8i4, or conv.ovf.u8u4 */
+ noway_assert(genStillAddressable(op1));
- if (tree->gtOverflow())
- {
- regNumber hiReg = (op1->gtFlags & GTF_REG_VAL) ? genRegPairHi(op1->gtRegPair)
- : REG_NA;
+ REG_OK:
+ if (((op1->gtFlags & GTF_REG_VAL) == 0) || (reg != genRegPairLo(op1->gtRegPair)))
+ {
+ /* Generate "mov reg, [addr-mode]" */
+ inst_RV_TT(ins_Load(TYP_INT), reg, op1);
+ }
- emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
- emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ /* conv.ovf.i8i4, or conv.ovf.u8u4 */
- switch (dstType)
+ if (tree->gtOverflow())
{
- case TYP_INT: // conv.ovf.i8.i4
- /* Generate the following sequence
-
- test loDWord, loDWord // set flags
- jl neg
- pos: test hiDWord, hiDWord // set flags
- jne ovf
- jmp done
- neg: cmp hiDWord, 0xFFFFFFFF
- jne ovf
- done:
+ regNumber hiReg = (op1->gtFlags & GTF_REG_VAL) ? genRegPairHi(op1->gtRegPair) : REG_NA;
- */
+ emitJumpKind jmpNotEqual = genJumpKindForOper(GT_NE, CK_SIGNED);
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
- instGen_Compare_Reg_To_Zero(EA_4BYTE, reg);
- if (tree->gtFlags & GTF_UNSIGNED) // conv.ovf.u8.i4 (i4 > 0 and upper bits 0)
+ switch (dstType)
{
- genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
- goto UPPER_BITS_ZERO;
- }
+ case TYP_INT: // conv.ovf.i8.i4
+ /* Generate the following sequence
+
+ test loDWord, loDWord // set flags
+ jl neg
+ pos: test hiDWord, hiDWord // set flags
+ jne ovf
+ jmp done
+ neg: cmp hiDWord, 0xFFFFFFFF
+ jne ovf
+ done:
+
+ */
+
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, reg);
+ if (tree->gtFlags & GTF_UNSIGNED) // conv.ovf.u8.i4 (i4 > 0 and upper bits 0)
+ {
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
+ goto UPPER_BITS_ZERO;
+ }
#if CPU_LOAD_STORE_ARCH
- // This is tricky.
- // We will generate code like
- // if (...)
- // {
- // ...
- // }
- // else
- // {
- // ...
- // }
- // We load the tree op1 into regs when we generate code for if clause.
- // When we generate else clause, we see the tree is already loaded into reg, and start use it directly.
- // Well, when the code is run, we may execute else clause without going through if clause.
- //
- genCodeForTree(op1, 0);
+ // This is tricky.
+ // We will generate code like
+ // if (...)
+ // {
+ // ...
+ // }
+ // else
+ // {
+ // ...
+ // }
+ // We load the tree op1 into regs when we generate code for if clause.
+ // When we generate else clause, we see the tree is already loaded into reg, and start use it
+ // directly.
+ // Well, when the code is run, we may execute else clause without going through if clause.
+ //
+ genCodeForTree(op1, 0);
#endif
- BasicBlock * neg;
- BasicBlock * done;
+ BasicBlock* neg;
+ BasicBlock* done;
- neg = genCreateTempLabel();
- done = genCreateTempLabel();
+ neg = genCreateTempLabel();
+ done = genCreateTempLabel();
- // Is the loDWord positive or negative
- inst_JMP(jmpLTS, neg);
+ // Is the loDWord positive or negative
+ inst_JMP(jmpLTS, neg);
- // If loDWord is positive, hiDWord should be 0 (sign extended loDWord)
+ // If loDWord is positive, hiDWord should be 0 (sign extended loDWord)
- if (hiReg < REG_STK)
- {
- instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg);
- }
- else
- {
- inst_TT_IV(INS_cmp, op1, 0x00000000, 4);
- }
+ if (hiReg < REG_STK)
+ {
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg);
+ }
+ else
+ {
+ inst_TT_IV(INS_cmp, op1, 0x00000000, 4);
+ }
- genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
- inst_JMP(EJ_jmp, done);
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
+ inst_JMP(EJ_jmp, done);
- // If loDWord is negative, hiDWord should be -1 (sign extended loDWord)
+ // If loDWord is negative, hiDWord should be -1 (sign extended loDWord)
- genDefineTempLabel(neg);
+ genDefineTempLabel(neg);
- if (hiReg < REG_STK)
- {
- inst_RV_IV(INS_cmp, hiReg, 0xFFFFFFFFL, EA_4BYTE);
- }
- else
- {
- inst_TT_IV(INS_cmp, op1, 0xFFFFFFFFL, 4);
- }
- genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
+ if (hiReg < REG_STK)
+ {
+ inst_RV_IV(INS_cmp, hiReg, 0xFFFFFFFFL, EA_4BYTE);
+ }
+ else
+ {
+ inst_TT_IV(INS_cmp, op1, 0xFFFFFFFFL, 4);
+ }
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
- // Done
+ // Done
- genDefineTempLabel(done);
+ genDefineTempLabel(done);
- break;
+ break;
- case TYP_UINT: // conv.ovf.u8u4
-UPPER_BITS_ZERO:
- // Just check that the upper DWord is 0
+ case TYP_UINT: // conv.ovf.u8u4
+ UPPER_BITS_ZERO:
+ // Just check that the upper DWord is 0
- if (hiReg < REG_STK)
- {
- instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- }
- else
- {
- inst_TT_IV(INS_cmp, op1, 0, 4);
+ if (hiReg < REG_STK)
+ {
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
+ }
+ else
+ {
+ inst_TT_IV(INS_cmp, op1, 0, 4);
+ }
+
+ genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
+ break;
+
+ default:
+ noway_assert(!"Unexpected dstType");
+ break;
}
-
- genJumpToThrowHlpBlk(jmpNotEqual, SCK_OVERFLOW);
- break;
- default:
- noway_assert(!"Unexpected dstType");
- break;
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
}
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- }
-
- regTracker.rsTrackRegTrash(reg);
- genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
+ regTracker.rsTrackRegTrash(reg);
+ genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
- genCodeForTree_DONE(tree, reg);
- return;
+ genCodeForTree_DONE(tree, reg);
+ return;
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_SHORT:
- case TYP_CHAR:
- case TYP_UBYTE:
- break;
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_SHORT:
+ case TYP_CHAR:
+ case TYP_UBYTE:
+ break;
- case TYP_UINT:
- case TYP_INT:
- break;
+ case TYP_UINT:
+ case TYP_INT:
+ break;
#if FEATURE_STACK_FP_X87
- case TYP_FLOAT:
- NO_WAY("OPCAST from TYP_FLOAT should have been converted into a helper call");
- break;
-
- case TYP_DOUBLE:
- if (compiler->opts.compCanUseSSE2)
- {
- // do the SSE2 based cast inline
- // getting the fp operand
-
- regMaskTP addrRegInt = 0;
- regMaskTP addrRegFlt = 0;
+ case TYP_FLOAT:
+ NO_WAY("OPCAST from TYP_FLOAT should have been converted into a helper call");
+ break;
- // make the operand addressable
- // We don't want to collapse constant doubles into floats, as the SSE2 instruction
- // operates on doubles. Note that these (casts from constant doubles) usually get
- // folded, but we don't do it for some cases (infinitys, etc). So essentially this
- // shouldn't affect performance or size at all. We're fixing this for #336067
- op1 = genMakeAddressableStackFP(op1, &addrRegInt, &addrRegFlt, false);
- if (!addrRegFlt && !op1->IsRegVar())
+ case TYP_DOUBLE:
+ if (compiler->opts.compCanUseSSE2)
{
- // we have the address
+ // do the SSE2 based cast inline
+ // getting the fp operand
- inst_RV_TT(INS_movsdsse2, REG_XMM0, op1, 0, EA_8BYTE);
- genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
- genUpdateLife(op1);
-
- reg = regSet.rsPickReg(needReg);
- getEmitter()->emitIns_R_R(INS_cvttsd2si, EA_8BYTE, reg, REG_XMM0);
+ regMaskTP addrRegInt = 0;
+ regMaskTP addrRegFlt = 0;
- regTracker.rsTrackRegTrash(reg);
- genCodeForTree_DONE(tree, reg);
- }
- else
- {
- // we will need to use a temp to get it into the xmm reg
- var_types typeTemp = op1->TypeGet();
- TempDsc * temp = compiler->tmpGetTemp(typeTemp);
+ // make the operand addressable
+ // We don't want to collapse constant doubles into floats, as the SSE2 instruction
+ // operates on doubles. Note that these (casts from constant doubles) usually get
+ // folded, but we don't do it for some cases (infinitys, etc). So essentially this
+ // shouldn't affect performance or size at all. We're fixing this for #336067
+ op1 = genMakeAddressableStackFP(op1, &addrRegInt, &addrRegFlt, false);
+ if (!addrRegFlt && !op1->IsRegVar())
+ {
+ // we have the address
- size = EA_ATTR(genTypeSize(typeTemp));
+ inst_RV_TT(INS_movsdsse2, REG_XMM0, op1, 0, EA_8BYTE);
+ genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
+ genUpdateLife(op1);
- if (addrRegFlt )
- {
- // On the fp stack; Take reg to top of stack
+ reg = regSet.rsPickReg(needReg);
+ getEmitter()->emitIns_R_R(INS_cvttsd2si, EA_8BYTE, reg, REG_XMM0);
- FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
+ regTracker.rsTrackRegTrash(reg);
+ genCodeForTree_DONE(tree, reg);
}
else
{
- // op1->IsRegVar()
- // pick a register
- reg = regSet.PickRegFloat();
- if (!op1->IsRegVarDeath())
+ // we will need to use a temp to get it into the xmm reg
+ var_types typeTemp = op1->TypeGet();
+ TempDsc* temp = compiler->tmpGetTemp(typeTemp);
+
+ size = EA_ATTR(genTypeSize(typeTemp));
+
+ if (addrRegFlt)
{
- // Load it on the fp stack
- genLoadStackFP(op1, reg);
+ // On the fp stack; Take reg to top of stack
+
+ FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
}
else
{
- // if it's dying, genLoadStackFP just renames it and then we move reg to TOS
- genLoadStackFP(op1, reg);
- FlatFPX87_MoveToTOS(&compCurFPState, reg);
+ // op1->IsRegVar()
+ // pick a register
+ reg = regSet.PickRegFloat();
+ if (!op1->IsRegVarDeath())
+ {
+ // Load it on the fp stack
+ genLoadStackFP(op1, reg);
+ }
+ else
+ {
+ // if it's dying, genLoadStackFP just renames it and then we move reg to TOS
+ genLoadStackFP(op1, reg);
+ FlatFPX87_MoveToTOS(&compCurFPState, reg);
+ }
}
- }
- // pop it off the fp stack
- compCurFPState.Pop();
+ // pop it off the fp stack
+ compCurFPState.Pop();
- getEmitter()->emitIns_S(INS_fstp, size, temp->tdTempNum(), 0);
- // pick a reg
- reg = regSet.rsPickReg(needReg);
+ getEmitter()->emitIns_S(INS_fstp, size, temp->tdTempNum(), 0);
+ // pick a reg
+ reg = regSet.rsPickReg(needReg);
- inst_RV_ST(INS_movsdsse2, REG_XMM0, temp, 0, TYP_DOUBLE, EA_8BYTE);
- getEmitter()->emitIns_R_R(INS_cvttsd2si, EA_8BYTE, reg, REG_XMM0);
+ inst_RV_ST(INS_movsdsse2, REG_XMM0, temp, 0, TYP_DOUBLE, EA_8BYTE);
+ getEmitter()->emitIns_R_R(INS_cvttsd2si, EA_8BYTE, reg, REG_XMM0);
- // done..release the temp
- compiler->tmpRlsTemp(temp);
+ // done..release the temp
+ compiler->tmpRlsTemp(temp);
- // the reg is now trashed
- regTracker.rsTrackRegTrash(reg);
- genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
- genUpdateLife(op1);
- genCodeForTree_DONE(tree, reg);
+ // the reg is now trashed
+ regTracker.rsTrackRegTrash(reg);
+ genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
+ genUpdateLife(op1);
+ genCodeForTree_DONE(tree, reg);
+ }
}
- }
#else
- case TYP_FLOAT:
- case TYP_DOUBLE:
- genCodeForTreeFloat(tree, needReg, bestReg);
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ genCodeForTreeFloat(tree, needReg, bestReg);
#endif // FEATURE_STACK_FP_X87
- return;
+ return;
- default:
- noway_assert(!"unexpected cast type");
+ default:
+ noway_assert(!"unexpected cast type");
}
if (tree->gtOverflow())
@@ -11125,46 +10897,49 @@ UPPER_BITS_ZERO:
switch (dstType)
{
- case TYP_BYTE:
- typeMask = ssize_t((int)0xFFFFFF80);
- typeMin = SCHAR_MIN; typeMax = SCHAR_MAX;
- unsv = (tree->gtFlags & GTF_UNSIGNED);
- break;
- case TYP_SHORT:
- typeMask = ssize_t((int)0xFFFF8000);
- typeMin = SHRT_MIN; typeMax = SHRT_MAX;
- unsv = (tree->gtFlags & GTF_UNSIGNED);
- break;
- case TYP_INT:
- typeMask = ssize_t((int)0x80000000L);
+ case TYP_BYTE:
+ typeMask = ssize_t((int)0xFFFFFF80);
+ typeMin = SCHAR_MIN;
+ typeMax = SCHAR_MAX;
+ unsv = (tree->gtFlags & GTF_UNSIGNED);
+ break;
+ case TYP_SHORT:
+ typeMask = ssize_t((int)0xFFFF8000);
+ typeMin = SHRT_MIN;
+ typeMax = SHRT_MAX;
+ unsv = (tree->gtFlags & GTF_UNSIGNED);
+ break;
+ case TYP_INT:
+ typeMask = ssize_t((int)0x80000000L);
#ifdef _TARGET_64BIT_
- unsv = (tree->gtFlags & GTF_UNSIGNED);
- typeMin = INT_MIN; typeMax = INT_MAX;
+ unsv = (tree->gtFlags & GTF_UNSIGNED);
+ typeMin = INT_MIN;
+ typeMax = INT_MAX;
#else // _TARGET_64BIT_
- noway_assert((tree->gtFlags & GTF_UNSIGNED) != 0);
- unsv = true;
+ noway_assert((tree->gtFlags & GTF_UNSIGNED) != 0);
+ unsv = true;
#endif // _TARGET_64BIT_
- break;
- case TYP_UBYTE:
- unsv = true;
- typeMask = ssize_t((int)0xFFFFFF00L);
- break;
- case TYP_CHAR:
- unsv = true;
- typeMask = ssize_t((int)0xFFFF0000L);
- break;
- case TYP_UINT:
- unsv = true;
+ break;
+ case TYP_UBYTE:
+ unsv = true;
+ typeMask = ssize_t((int)0xFFFFFF00L);
+ break;
+ case TYP_CHAR:
+ unsv = true;
+ typeMask = ssize_t((int)0xFFFF0000L);
+ break;
+ case TYP_UINT:
+ unsv = true;
#ifdef _TARGET_64BIT_
- typeMask = 0xFFFFFFFF00000000LL;
-#else // _TARGET_64BIT_
- typeMask = 0x80000000L;
- noway_assert((tree->gtFlags & GTF_UNSIGNED) == 0);
+ typeMask = 0xFFFFFFFF00000000LL;
+#else // _TARGET_64BIT_
+ typeMask = 0x80000000L;
+ noway_assert((tree->gtFlags & GTF_UNSIGNED) == 0);
#endif // _TARGET_64BIT_
- break;
- default:
- NO_WAY("Unknown type");
- return;
+ break;
+ default:
+ NO_WAY("Unknown type");
+ return;
}
// If we just have to check a mask.
@@ -11217,19 +10992,17 @@ UPPER_BITS_ZERO:
* Generate code for a leaf node of type GT_ADDR
*/
-void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
genTreeOps oper = tree->OperGet();
const var_types treeType = tree->TypeGet();
GenTreePtr op1;
regNumber reg;
- regMaskTP needReg = destReg;
+ regMaskTP needReg = destReg;
regMaskTP addrReg;
#ifdef DEBUG
- reg = (regNumber)0xFEEFFAAF; // to detect uninitialized use
+ reg = (regNumber)0xFEEFFAAF; // to detect uninitialized use
addrReg = 0xDEADCAFE;
#endif
@@ -11245,7 +11018,8 @@ void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
}
// (tree=op1, needReg=0, keepReg=RegSet::FREE_REG, smallOK=true)
- if (oper == GT_ARR_ELEM) {
+ if (oper == GT_ARR_ELEM)
+ {
// To get the address of the array element,
// we first call genMakeAddrArrElem to make the element addressable.
// (That is, for example, we first emit code to calculate EBX, and EAX.)
@@ -11260,7 +11034,7 @@ void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
addrReg = genMakeAddressable(op1, 0, RegSet::FREE_REG, true);
}
- noway_assert( treeType == TYP_BYREF || treeType == TYP_I_IMPL );
+ noway_assert(treeType == TYP_BYREF || treeType == TYP_I_IMPL);
// We want to reuse one of the scratch registers that were used
// in forming the address mode as the target register for the lea.
@@ -11268,7 +11042,7 @@ void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
// form the address (i.e. addrReg), we calculate the scratch register
// to use as the target register for the LEA
- bestReg = regSet.rsUseIfZero (bestReg, addrReg);
+ bestReg = regSet.rsUseIfZero(bestReg, addrReg);
bestReg = regSet.rsNarrowHint(bestReg, addrReg);
/* Even if addrReg is regSet.rsRegMaskCanGrab(), regSet.rsPickReg() won't spill
@@ -11295,13 +11069,12 @@ void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
// gcInfo.gcMarkRegSetNpt(genRegMask(reg));
noway_assert((gcInfo.gcRegGCrefSetCur & genRegMask(reg)) == 0);
- regTracker.rsTrackRegTrash(reg); // reg does have foldable value in it
+ regTracker.rsTrackRegTrash(reg); // reg does have foldable value in it
gcInfo.gcMarkRegPtrVal(reg, treeType);
genCodeForTree_DONE(tree, reg);
}
-
#ifdef _TARGET_ARM_
/*****************************************************************************
@@ -11312,7 +11085,7 @@ void CodeGen::genCodeForTreeSmpOp_GT_ADDR(GenTreePtr tree,
* isLoadIntoFlt - Perform a load operation if "true" or store if "false."
*
*/
-void CodeGen::genLdStFltRetRegsPromotedVar(LclVarDsc* varDsc, bool isLoadIntoFlt)
+void CodeGen::genLdStFltRetRegsPromotedVar(LclVarDsc* varDsc, bool isLoadIntoFlt)
{
regNumber curReg = REG_FLOATRET;
@@ -11354,12 +11127,12 @@ void CodeGen::genLdStFltRetRegsPromotedVar(LclVarDsc* varDsc, boo
}
}
-void CodeGen::genLoadIntoFltRetRegs(GenTreePtr tree)
+void CodeGen::genLoadIntoFltRetRegs(GenTreePtr tree)
{
assert(tree->TypeGet() == TYP_STRUCT);
assert(tree->gtOper == GT_LCL_VAR);
LclVarDsc* varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
- int slots = varDsc->lvSize() / REGSIZE_BYTES;
+ int slots = varDsc->lvSize() / REGSIZE_BYTES;
if (varDsc->lvPromoted)
{
genLdStFltRetRegsPromotedVar(varDsc, true);
@@ -11369,12 +11142,8 @@ void CodeGen::genLoadIntoFltRetRegs(GenTreePtr tree)
if (slots <= 2)
{
// Use the load float/double instruction.
- inst_RV_TT(
- ins_Load((slots == 1) ? TYP_FLOAT : TYP_DOUBLE),
- REG_FLOATRET,
- tree,
- 0,
- (slots == 1) ? EA_4BYTE : EA_8BYTE);
+ inst_RV_TT(ins_Load((slots == 1) ? TYP_FLOAT : TYP_DOUBLE), REG_FLOATRET, tree, 0,
+ (slots == 1) ? EA_4BYTE : EA_8BYTE);
}
else
{
@@ -11388,7 +11157,7 @@ void CodeGen::genLoadIntoFltRetRegs(GenTreePtr tree)
genMarkTreeInReg(tree, REG_FLOATRET);
}
-void CodeGen::genStoreFromFltRetRegs(GenTreePtr tree)
+void CodeGen::genStoreFromFltRetRegs(GenTreePtr tree)
{
assert(tree->TypeGet() == TYP_STRUCT);
assert(tree->OperGet() == GT_ASG);
@@ -11420,7 +11189,7 @@ void CodeGen::genStoreFromFltRetRegs(GenTreePtr tree)
regMaskTP mask = ((retMask >> REG_FLOATRET) + 1);
assert((mask & (mask - 1)) == 0);
assert(mask <= (1 << MAX_HFA_RET_SLOTS));
- assert((retMask & (((regMaskTP) RBM_FLOATRET) - 1)) == 0);
+ assert((retMask & (((regMaskTP)RBM_FLOATRET) - 1)) == 0);
#endif
int slots = genCountBits(retMask & RBM_ALLFLOAT);
@@ -11435,12 +11204,8 @@ void CodeGen::genStoreFromFltRetRegs(GenTreePtr tree)
{
if (slots <= 2)
{
- inst_TT_RV(
- ins_Store((slots == 1) ? TYP_FLOAT : TYP_DOUBLE),
- op1,
- REG_FLOATRET,
- 0,
- (slots == 1) ? EA_4BYTE : EA_8BYTE);
+ inst_TT_RV(ins_Store((slots == 1) ? TYP_FLOAT : TYP_DOUBLE), op1, REG_FLOATRET, 0,
+ (slots == 1) ? EA_4BYTE : EA_8BYTE);
}
else
{
@@ -11461,24 +11226,24 @@ void CodeGen::genStoreFromFltRetRegs(GenTreePtr tree)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genCodeForTreeSmpOpAsg(GenTreePtr tree)
+void CodeGen::genCodeForTreeSmpOpAsg(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_ASG);
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- regMaskTP needReg = RBM_ALLINT;
- regMaskTP bestReg = RBM_CORRUPT;
- regMaskTP addrReg = DUMMY_INIT(RBM_CORRUPT);
- bool ovfl = false; // Do we need an overflow check
- bool volat = false; // Is this a volatile store
- regMaskTP regGC;
- instruction ins;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ regMaskTP needReg = RBM_ALLINT;
+ regMaskTP bestReg = RBM_CORRUPT;
+ regMaskTP addrReg = DUMMY_INIT(RBM_CORRUPT);
+ bool ovfl = false; // Do we need an overflow check
+ bool volat = false; // Is this a volatile store
+ regMaskTP regGC;
+ instruction ins;
#ifdef DEBUGGING_SUPPORT
- unsigned lclVarNum = compiler->lvaCount;
- unsigned lclILoffs = DUMMY_INIT(0);
+ unsigned lclVarNum = compiler->lvaCount;
+ unsigned lclILoffs = DUMMY_INIT(0);
#endif
#ifdef _TARGET_ARM_
@@ -11492,7 +11257,7 @@ void CodeGen::genCodeForTreeSmpOpAsg(GenTreePtr tree)
}
#endif
-#ifdef DEBUG
+#ifdef DEBUG
if (varTypeIsFloating(op1) != varTypeIsFloating(op2))
{
if (varTypeIsFloating(op1))
@@ -11504,211 +11269,211 @@ void CodeGen::genCodeForTreeSmpOpAsg(GenTreePtr tree)
if ((tree->gtFlags & GTF_REVERSE_OPS) == 0)
{
- op1 = genCodeForCommaTree(op1); // Strip away any comma expressions.
+ op1 = genCodeForCommaTree(op1); // Strip away any comma expressions.
}
/* Is the target a register or local variable? */
switch (op1->gtOper)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- case GT_LCL_VAR:
- varNum = op1->gtLclVarCommon.gtLclNum;
- noway_assert(varNum < compiler->lvaCount);
- varDsc = compiler->lvaTable + varNum;
+ case GT_LCL_VAR:
+ varNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(varNum < compiler->lvaCount);
+ varDsc = compiler->lvaTable + varNum;
- #ifdef DEBUGGING_SUPPORT
- /* For non-debuggable code, every definition of a lcl-var has
- * to be checked to see if we need to open a new scope for it.
- * Remember the local var info to call siCheckVarScope
- * AFTER code generation of the assignment.
- */
- if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
- {
- lclVarNum = varNum;
- lclILoffs = op1->gtLclVar.gtLclILoffs;
- }
- #endif
+#ifdef DEBUGGING_SUPPORT
+ /* For non-debuggable code, every definition of a lcl-var has
+ * to be checked to see if we need to open a new scope for it.
+ * Remember the local var info to call siCheckVarScope
+ * AFTER code generation of the assignment.
+ */
+ if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
+ {
+ lclVarNum = varNum;
+ lclILoffs = op1->gtLclVar.gtLclILoffs;
+ }
+#endif
- /* Check against dead store ? (with min opts we may have dead stores) */
+ /* Check against dead store ? (with min opts we may have dead stores) */
- noway_assert(!varDsc->lvTracked || compiler->opts.MinOpts() || !(op1->gtFlags & GTF_VAR_DEATH));
+ noway_assert(!varDsc->lvTracked || compiler->opts.MinOpts() || !(op1->gtFlags & GTF_VAR_DEATH));
- /* Does this variable live in a register? */
+ /* Does this variable live in a register? */
- if (genMarkLclVar(op1))
- goto REG_VAR2;
+ if (genMarkLclVar(op1))
+ goto REG_VAR2;
- break;
+ break;
-REG_VAR2:
+ REG_VAR2:
- /* Get hold of the target register */
+ /* Get hold of the target register */
- regNumber op1Reg;
+ regNumber op1Reg;
- op1Reg = op1->gtRegVar.gtRegNum;
+ op1Reg = op1->gtRegVar.gtRegNum;
#ifdef DEBUG
- /* Compute the RHS (hopefully) into the variable's register.
- For debuggable code, op1Reg may already be part of regSet.rsMaskVars,
- as variables are kept alive everywhere. So we have to be
- careful if we want to compute the value directly into
- the variable's register. */
+ /* Compute the RHS (hopefully) into the variable's register.
+ For debuggable code, op1Reg may already be part of regSet.rsMaskVars,
+ as variables are kept alive everywhere. So we have to be
+ careful if we want to compute the value directly into
+ the variable's register. */
- bool needToUpdateRegSetCheckLevel;
- needToUpdateRegSetCheckLevel = false;
-#endif
+ bool needToUpdateRegSetCheckLevel;
+ needToUpdateRegSetCheckLevel = false;
+#endif
- // We should only be accessing lvVarIndex if varDsc is tracked.
- assert(varDsc->lvTracked);
+ // We should only be accessing lvVarIndex if varDsc is tracked.
+ assert(varDsc->lvTracked);
- if (VarSetOps::IsMember(compiler, genUpdateLiveSetForward(op2), varDsc->lvVarIndex))
- {
- noway_assert(compiler->opts.compDbgCode);
+ if (VarSetOps::IsMember(compiler, genUpdateLiveSetForward(op2), varDsc->lvVarIndex))
+ {
+ noway_assert(compiler->opts.compDbgCode);
- /* The predictor might expect us to generate op2 directly
- into the var's register. However, since the variable is
- already alive, first kill it and its register. */
+ /* The predictor might expect us to generate op2 directly
+ into the var's register. However, since the variable is
+ already alive, first kill it and its register. */
- if (rpCanAsgOperWithoutReg(op2, true))
+ if (rpCanAsgOperWithoutReg(op2, true))
+ {
+ genUpdateLife(VarSetOps::RemoveElem(compiler, compiler->compCurLife, varDsc->lvVarIndex));
+ needReg = regSet.rsNarrowHint(needReg, genRegMask(op1Reg));
+#ifdef DEBUG
+ needToUpdateRegSetCheckLevel = true;
+#endif
+ }
+ }
+ else
{
- genUpdateLife(VarSetOps::RemoveElem(compiler, compiler->compCurLife, varDsc->lvVarIndex));
needReg = regSet.rsNarrowHint(needReg, genRegMask(op1Reg));
-#ifdef DEBUG
- needToUpdateRegSetCheckLevel = true;
-#endif
}
- }
- else
- {
- needReg = regSet.rsNarrowHint(needReg, genRegMask(op1Reg));
- }
#ifdef DEBUG
- /* Special cases: op2 is a GT_CNS_INT */
+ /* Special cases: op2 is a GT_CNS_INT */
- if (op2->gtOper == GT_CNS_INT && !(op1->gtFlags & GTF_VAR_DEATH))
- {
- /* Save the old life status */
+ if (op2->gtOper == GT_CNS_INT && !(op1->gtFlags & GTF_VAR_DEATH))
+ {
+ /* Save the old life status */
- VarSetOps::Assign(compiler, genTempOldLife, compiler->compCurLife);
- VarSetOps::AddElemD(compiler, compiler->compCurLife, varDsc->lvVarIndex);
+ VarSetOps::Assign(compiler, genTempOldLife, compiler->compCurLife);
+ VarSetOps::AddElemD(compiler, compiler->compCurLife, varDsc->lvVarIndex);
- /* Set a flag to avoid printing the message
- and remember that life was changed. */
+ /* Set a flag to avoid printing the message
+ and remember that life was changed. */
- genTempLiveChg = false;
- }
+ genTempLiveChg = false;
+ }
#endif
-#ifdef DEBUG
- if (needToUpdateRegSetCheckLevel)
- compiler->compRegSetCheckLevel++;
-#endif
- genCodeForTree(op2, needReg, genRegMask(op1Reg));
-#ifdef DEBUG
- if (needToUpdateRegSetCheckLevel)
- compiler->compRegSetCheckLevel--;
- noway_assert(compiler->compRegSetCheckLevel>=0);
-#endif
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+#ifdef DEBUG
+ if (needToUpdateRegSetCheckLevel)
+ compiler->compRegSetCheckLevel++;
+#endif
+ genCodeForTree(op2, needReg, genRegMask(op1Reg));
+#ifdef DEBUG
+ if (needToUpdateRegSetCheckLevel)
+ compiler->compRegSetCheckLevel--;
+ noway_assert(compiler->compRegSetCheckLevel >= 0);
+#endif
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- /* Make sure the value ends up in the right place ... */
+ /* Make sure the value ends up in the right place ... */
- if (op2->gtRegNum != op1Reg)
- {
- /* Make sure the target of the store is available */
+ if (op2->gtRegNum != op1Reg)
+ {
+ /* Make sure the target of the store is available */
- if (regSet.rsMaskUsed & genRegMask(op1Reg))
- regSet.rsSpillReg(op1Reg);
+ if (regSet.rsMaskUsed & genRegMask(op1Reg))
+ regSet.rsSpillReg(op1Reg);
#ifdef _TARGET_ARM_
- if (op1->TypeGet() == TYP_FLOAT)
- {
- // This can only occur when we are returning a non-HFA struct
- // that is composed of a single float field.
- //
- inst_RV_RV(INS_vmov_i2f, op1Reg, op2->gtRegNum, op1->TypeGet());
- }
- else
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ // This can only occur when we are returning a non-HFA struct
+ // that is composed of a single float field.
+ //
+ inst_RV_RV(INS_vmov_i2f, op1Reg, op2->gtRegNum, op1->TypeGet());
+ }
+ else
#endif // _TARGET_ARM_
- {
- inst_RV_RV(INS_mov, op1Reg, op2->gtRegNum, op1->TypeGet());
- }
-
- /* The value has been transferred to 'op1Reg' */
+ {
+ inst_RV_RV(INS_mov, op1Reg, op2->gtRegNum, op1->TypeGet());
+ }
- regTracker.rsTrackRegCopy (op1Reg, op2->gtRegNum);
+ /* The value has been transferred to 'op1Reg' */
- if ((genRegMask(op2->gtRegNum) & regSet.rsMaskUsed) == 0)
- gcInfo.gcMarkRegSetNpt(genRegMask(op2->gtRegNum));
+ regTracker.rsTrackRegCopy(op1Reg, op2->gtRegNum);
- gcInfo.gcMarkRegPtrVal(op1Reg, tree->TypeGet());
- }
- else
- {
- // First we need to remove it from the original reg set mask (or else trigger an
- // assert when we add it to the other reg set mask).
- gcInfo.gcMarkRegSetNpt(genRegMask(op1Reg));
- gcInfo.gcMarkRegPtrVal(op1Reg, tree->TypeGet());
+ if ((genRegMask(op2->gtRegNum) & regSet.rsMaskUsed) == 0)
+ gcInfo.gcMarkRegSetNpt(genRegMask(op2->gtRegNum));
- // The emitter has logic that tracks the GCness of registers and asserts if you
- // try to do bad things to a GC pointer (like lose its GCness).
-
- // An explict cast of a GC pointer to an int (which is legal if the
- // pointer is pinned) is encoded as an assignment of a GC source
- // to a integer variable. Unfortunately if the source was the last
- // use, and the source register gets reused by the destination, no
- // code gets emitted (That is where we are at right now). The emitter
- // thinks the register is a GC pointer (it did not see the cast).
- // This causes asserts, as well as bad GC info since we will continue
- // to report the register as a GC pointer even if we do arithmetic
- // with it. So force the emitter to see the change in the type
- // of variable by placing a label.
- // We only have to do this check at this point because in the
- // CAST morphing, we create a temp and assignment whenever we
- // have a cast that loses its GCness.
-
- if (varTypeGCtype(op2->TypeGet()) != varTypeGCtype(op1->TypeGet()))
+ gcInfo.gcMarkRegPtrVal(op1Reg, tree->TypeGet());
+ }
+ else
{
- void* label = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ // First we need to remove it from the original reg set mask (or else trigger an
+ // assert when we add it to the other reg set mask).
+ gcInfo.gcMarkRegSetNpt(genRegMask(op1Reg));
+ gcInfo.gcMarkRegPtrVal(op1Reg, tree->TypeGet());
+
+ // The emitter has logic that tracks the GCness of registers and asserts if you
+ // try to do bad things to a GC pointer (like lose its GCness).
+
+ // An explict cast of a GC pointer to an int (which is legal if the
+ // pointer is pinned) is encoded as an assignment of a GC source
+ // to a integer variable. Unfortunately if the source was the last
+ // use, and the source register gets reused by the destination, no
+ // code gets emitted (That is where we are at right now). The emitter
+ // thinks the register is a GC pointer (it did not see the cast).
+ // This causes asserts, as well as bad GC info since we will continue
+ // to report the register as a GC pointer even if we do arithmetic
+ // with it. So force the emitter to see the change in the type
+ // of variable by placing a label.
+ // We only have to do this check at this point because in the
+ // CAST morphing, we create a temp and assignment whenever we
+ // have a cast that loses its GCness.
+
+ if (varTypeGCtype(op2->TypeGet()) != varTypeGCtype(op1->TypeGet()))
+ {
+ void* label = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur);
+ }
}
- }
-
- addrReg = 0;
+ addrReg = 0;
- genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, op1Reg, ovfl);
- goto LExit;
+ genCodeForTreeSmpOpAsg_DONE_ASSG(tree, addrReg, op1Reg, ovfl);
+ goto LExit;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
- // to worry about it being enregistered.
- noway_assert(compiler->lvaTable[op1->gtLclFld.gtLclNum].lvRegister == 0);
- break;
+ // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
+ // to worry about it being enregistered.
+ noway_assert(compiler->lvaTable[op1->gtLclFld.gtLclNum].lvRegister == 0);
+ break;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
- __fallthrough;
+ __fallthrough;
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
- assert((op1->OperGet() == GT_CLS_VAR) || (op1->OperGet() == GT_IND));
+ assert((op1->OperGet() == GT_CLS_VAR) || (op1->OperGet() == GT_IND));
- if (op1->gtFlags & GTF_IND_VOLATILE)
- {
- volat = true;
- }
+ if (op1->gtFlags & GTF_IND_VOLATILE)
+ {
+ volat = true;
+ }
- break;
+ break;
- default:
- break;
+ default:
+ break;
}
/* Is the value being assigned a simple one? */
@@ -11716,461 +11481,466 @@ REG_VAR2:
noway_assert(op2);
switch (op2->gtOper)
{
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
- if (!genMarkLclVar(op2))
- goto SMALL_ASG;
+ if (!genMarkLclVar(op2))
+ goto SMALL_ASG;
- __fallthrough;
+ __fallthrough;
- case GT_REG_VAR:
+ case GT_REG_VAR:
- /* Is the target a byte/short/char value? */
+ /* Is the target a byte/short/char value? */
- if (varTypeIsSmall(op1->TypeGet()))
- goto SMALL_ASG;
+ if (varTypeIsSmall(op1->TypeGet()))
+ goto SMALL_ASG;
- if (tree->gtFlags & GTF_REVERSE_OPS)
- goto SMALL_ASG;
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ goto SMALL_ASG;
- /* Make the target addressable */
+ /* Make the target addressable */
- op1 = genCodeForCommaTree(op1); // Strip away comma expressions.
+ op1 = genCodeForCommaTree(op1); // Strip away comma expressions.
- addrReg = genMakeAddressable(op1, needReg, RegSet::KEEP_REG, true);
+ addrReg = genMakeAddressable(op1, needReg, RegSet::KEEP_REG, true);
- /* Does the write barrier helper do the assignment? */
+ /* Does the write barrier helper do the assignment? */
- regGC = WriteBarrier(op1, op2, addrReg);
+ regGC = WriteBarrier(op1, op2, addrReg);
- // Was assignment done by the WriteBarrier
- if (regGC == RBM_NONE)
- {
-#ifdef _TARGET_ARM_
- if (volat)
+ // Was assignment done by the WriteBarrier
+ if (regGC == RBM_NONE)
{
- // Emit a memory barrier instruction before the store
- instGen_MemoryBarrier();
- }
+#ifdef _TARGET_ARM_
+ if (volat)
+ {
+ // Emit a memory barrier instruction before the store
+ instGen_MemoryBarrier();
+ }
#endif
- /* Move the value into the target */
+ /* Move the value into the target */
- inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegVar.gtRegNum);
+ inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegVar.gtRegNum);
- // This is done in WriteBarrier when (regGC != RBM_NONE)
+ // This is done in WriteBarrier when (regGC != RBM_NONE)
- /* Free up anything that was tied up by the LHS */
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- }
-
- /* Free up the RHS */
- genUpdateLife(op2);
+ /* Free up anything that was tied up by the LHS */
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ }
- /* Remember that we've also touched the op2 register */
+ /* Free up the RHS */
+ genUpdateLife(op2);
- addrReg |= genRegMask(op2->gtRegVar.gtRegNum);
- break;
+ /* Remember that we've also touched the op2 register */
+ addrReg |= genRegMask(op2->gtRegVar.gtRegNum);
+ break;
- case GT_CNS_INT:
+ case GT_CNS_INT:
- ssize_t ival; ival = op2->gtIntCon.gtIconVal;
- emitAttr size; size = emitTypeSize(tree->TypeGet());
+ ssize_t ival;
+ ival = op2->gtIntCon.gtIconVal;
+ emitAttr size;
+ size = emitTypeSize(tree->TypeGet());
- ins = ins_Store(op1->TypeGet());
+ ins = ins_Store(op1->TypeGet());
- // If we are storing a constant into a local variable
- // we extend the size of the store here
- // this normally takes place in CodeGen::inst_TT_IV on x86.
- //
- if ((op1->gtOper == GT_LCL_VAR) && (size < EA_4BYTE))
- {
- unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = compiler->lvaTable + varNum;
-
- // Fix the immediate by sign extending if needed
- if (!varTypeIsUnsigned(varDsc->TypeGet()))
+ // If we are storing a constant into a local variable
+ // we extend the size of the store here
+ // this normally takes place in CodeGen::inst_TT_IV on x86.
+ //
+ if ((op1->gtOper == GT_LCL_VAR) && (size < EA_4BYTE))
{
- if (size == EA_1BYTE)
+ unsigned varNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
+
+ // Fix the immediate by sign extending if needed
+ if (!varTypeIsUnsigned(varDsc->TypeGet()))
{
- if ((ival & 0x7f) != ival)
- ival = ival | 0xffffff00;
+ if (size == EA_1BYTE)
+ {
+ if ((ival & 0x7f) != ival)
+ ival = ival | 0xffffff00;
+ }
+ else
+ {
+ assert(size == EA_2BYTE);
+ if ((ival & 0x7fff) != ival)
+ ival = ival | 0xffff0000;
+ }
}
- else
+
+ // A local stack slot is at least 4 bytes in size, regardless of
+ // what the local var is typed as, so auto-promote it here
+ // unless it is a field of a promoted struct
+ if (!varDsc->lvIsStructField)
{
- assert(size == EA_2BYTE);
- if ((ival & 0x7fff) != ival)
- ival = ival | 0xffff0000;
+ size = EA_SET_SIZE(size, EA_4BYTE);
+ ins = ins_Store(TYP_INT);
}
}
- // A local stack slot is at least 4 bytes in size, regardless of
- // what the local var is typed as, so auto-promote it here
- // unless it is a field of a promoted struct
- if (!varDsc->lvIsStructField)
- {
- size = EA_SET_SIZE(size, EA_4BYTE);
- ins = ins_Store(TYP_INT);
- }
- }
-
- /* Make the target addressable */
+ /* Make the target addressable */
- addrReg = genMakeAddressable(op1, needReg, RegSet::KEEP_REG, true);
+ addrReg = genMakeAddressable(op1, needReg, RegSet::KEEP_REG, true);
#ifdef _TARGET_ARM_
- if (volat)
- {
- // Emit a memory barrier instruction before the store
- instGen_MemoryBarrier();
- }
+ if (volat)
+ {
+ // Emit a memory barrier instruction before the store
+ instGen_MemoryBarrier();
+ }
#endif
- /* Move the value into the target */
-
- noway_assert(op1->gtOper != GT_REG_VAR);
- if (compiler->opts.compReloc && op2->IsIconHandle())
- {
- /* The constant is actually a handle that may need relocation
- applied to it. genComputeReg will do the right thing (see
- code in genCodeForTreeConst), so we'll just call it to load
- the constant into a register. */
+ /* Move the value into the target */
- genComputeReg(op2, needReg & ~addrReg, RegSet::ANY_REG, RegSet::KEEP_REG);
- addrReg = genKeepAddressable(op1, addrReg, genRegMask(op2->gtRegNum));
- noway_assert(op2->gtFlags & GTF_REG_VAL);
- inst_TT_RV(ins, op1, op2->gtRegNum);
- genReleaseReg(op2);
- }
- else
- {
- regSet.rsLockUsedReg(addrReg);
+ noway_assert(op1->gtOper != GT_REG_VAR);
+ if (compiler->opts.compReloc && op2->IsIconHandle())
+ {
+ /* The constant is actually a handle that may need relocation
+ applied to it. genComputeReg will do the right thing (see
+ code in genCodeForTreeConst), so we'll just call it to load
+ the constant into a register. */
+ genComputeReg(op2, needReg & ~addrReg, RegSet::ANY_REG, RegSet::KEEP_REG);
+ addrReg = genKeepAddressable(op1, addrReg, genRegMask(op2->gtRegNum));
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+ inst_TT_RV(ins, op1, op2->gtRegNum);
+ genReleaseReg(op2);
+ }
+ else
+ {
+ regSet.rsLockUsedReg(addrReg);
#if REDUNDANT_LOAD
- bool copyIconFromReg = true;
- regNumber iconReg = REG_NA;
+ bool copyIconFromReg = true;
+ regNumber iconReg = REG_NA;
#ifdef _TARGET_ARM_
- // Only if the constant can't be encoded in a small instruction,
- // look for another register to copy the value from. (Assumes
- // target is a small register.)
- if ((op1->gtFlags & GTF_REG_VAL) &&
- !isRegPairType(tree->gtType) &&
- arm_Valid_Imm_For_Small_Mov(op1->gtRegNum, ival, INS_FLAGS_DONT_CARE))
- {
- copyIconFromReg = false;
- }
+ // Only if the constant can't be encoded in a small instruction,
+ // look for another register to copy the value from. (Assumes
+ // target is a small register.)
+ if ((op1->gtFlags & GTF_REG_VAL) && !isRegPairType(tree->gtType) &&
+ arm_Valid_Imm_For_Small_Mov(op1->gtRegNum, ival, INS_FLAGS_DONT_CARE))
+ {
+ copyIconFromReg = false;
+ }
#endif // _TARGET_ARM_
- if (copyIconFromReg)
- {
- iconReg = regTracker.rsIconIsInReg(ival);
- if (iconReg == REG_NA)
- copyIconFromReg = false;
- }
+ if (copyIconFromReg)
+ {
+ iconReg = regTracker.rsIconIsInReg(ival);
+ if (iconReg == REG_NA)
+ copyIconFromReg = false;
+ }
- if (copyIconFromReg &&
- (isByteReg(iconReg) || (genTypeSize(tree->TypeGet()) == EA_PTRSIZE) || (genTypeSize(tree->TypeGet()) == EA_4BYTE)))
- {
- /* Move the value into the target */
+ if (copyIconFromReg && (isByteReg(iconReg) || (genTypeSize(tree->TypeGet()) == EA_PTRSIZE) ||
+ (genTypeSize(tree->TypeGet()) == EA_4BYTE)))
+ {
+ /* Move the value into the target */
- inst_TT_RV(ins, op1, iconReg, 0, size);
- }
- else
+ inst_TT_RV(ins, op1, iconReg, 0, size);
+ }
+ else
#endif // REDUNDANT_LOAD
- {
- inst_TT_IV(ins, op1, ival, 0, size);
- }
-
- regSet.rsUnlockUsedReg(addrReg);
- }
+ {
+ inst_TT_IV(ins, op1, ival, 0, size);
+ }
- /* Free up anything that was tied up by the LHS */
+ regSet.rsUnlockUsedReg(addrReg);
+ }
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- break;
+ /* Free up anything that was tied up by the LHS */
- default:
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ break;
-SMALL_ASG:
+ default:
- bool isWriteBarrier = false;
- regMaskTP needRegOp1 = RBM_ALLINT;
- RegSet::ExactReg mustReg = RegSet::ANY_REG; // set to RegSet::EXACT_REG for op1 and NOGC helpers
+ SMALL_ASG:
- /* Is the LHS more complex than the RHS? */
+ bool isWriteBarrier = false;
+ regMaskTP needRegOp1 = RBM_ALLINT;
+ RegSet::ExactReg mustReg = RegSet::ANY_REG; // set to RegSet::EXACT_REG for op1 and NOGC helpers
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- /* Is the target a byte/short/char value? */
+ /* Is the LHS more complex than the RHS? */
- if (varTypeIsSmall(op1->TypeGet()))
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
- noway_assert(op1->gtOper != GT_LCL_VAR ||
- (op1->gtFlags & GTF_VAR_CAST) ||
- // TODO: Why does this have to be true?
- compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvIsStructField ||
- compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad());
+ /* Is the target a byte/short/char value? */
- if (op2->gtOper == GT_CAST && !op2->gtOverflow())
+ if (varTypeIsSmall(op1->TypeGet()))
{
- /* Special case: cast to small type */
+ noway_assert(op1->gtOper != GT_LCL_VAR || (op1->gtFlags & GTF_VAR_CAST) ||
+ // TODO: Why does this have to be true?
+ compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvIsStructField ||
+ compiler->lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad());
- if (op2->CastToType() >= op1->gtType)
+ if (op2->gtOper == GT_CAST && !op2->gtOverflow())
{
- /* Make sure the cast operand is not > int */
+ /* Special case: cast to small type */
- if (op2->CastFromType() <= TYP_INT)
+ if (op2->CastToType() >= op1->gtType)
{
- /* Cast via a non-smaller type */
+ /* Make sure the cast operand is not > int */
- op2 = op2->gtCast.CastOp();
+ if (op2->CastFromType() <= TYP_INT)
+ {
+ /* Cast via a non-smaller type */
+
+ op2 = op2->gtCast.CastOp();
+ }
}
}
- }
- if (op2->gtOper == GT_AND &&
- op2->gtOp.gtOp2->gtOper == GT_CNS_INT)
- {
- unsigned mask;
- switch (op1->gtType)
+ if (op2->gtOper == GT_AND && op2->gtOp.gtOp2->gtOper == GT_CNS_INT)
{
- case TYP_BYTE : mask = 0x000000FF; break;
- case TYP_SHORT: mask = 0x0000FFFF; break;
- case TYP_CHAR : mask = 0x0000FFFF; break;
- default: goto SIMPLE_SMALL;
- }
+ unsigned mask;
+ switch (op1->gtType)
+ {
+ case TYP_BYTE:
+ mask = 0x000000FF;
+ break;
+ case TYP_SHORT:
+ mask = 0x0000FFFF;
+ break;
+ case TYP_CHAR:
+ mask = 0x0000FFFF;
+ break;
+ default:
+ goto SIMPLE_SMALL;
+ }
- if (unsigned(op2->gtOp.gtOp2->gtIntCon.gtIconVal) == mask)
- {
- /* Redundant AND */
+ if (unsigned(op2->gtOp.gtOp2->gtIntCon.gtIconVal) == mask)
+ {
+ /* Redundant AND */
- op2 = op2->gtOp.gtOp1;
+ op2 = op2->gtOp.gtOp1;
+ }
}
- }
/* Must get the new value into a byte register */
-SIMPLE_SMALL:
+ SIMPLE_SMALL:
if (varTypeIsByte(op1->TypeGet()))
genComputeReg(op2, RBM_BYTE_REGS, RegSet::EXACT_REG, RegSet::KEEP_REG);
else
goto NOT_SMALL;
- }
- else
- {
-NOT_SMALL:
- /* Generate the RHS into a register */
-
- isWriteBarrier = gcInfo.gcIsWriteBarrierAsgNode(tree);
- if (isWriteBarrier)
+ }
+ else
{
-#if NOGC_WRITE_BARRIERS
- // Exclude the REG_WRITE_BARRIER from op2's needReg mask
- needReg = Target::exclude_WriteBarrierReg(needReg);
- mustReg = RegSet::EXACT_REG;
-#else // !NOGC_WRITE_BARRIERS
- // This code should be generic across architectures.
+ NOT_SMALL:
+ /* Generate the RHS into a register */
- // For the standard JIT Helper calls
- // op1 goes into REG_ARG_0 and
- // op2 goes into REG_ARG_1
- //
- needRegOp1 = RBM_ARG_0;
- needReg = RBM_ARG_1;
+ isWriteBarrier = gcInfo.gcIsWriteBarrierAsgNode(tree);
+ if (isWriteBarrier)
+ {
+#if NOGC_WRITE_BARRIERS
+ // Exclude the REG_WRITE_BARRIER from op2's needReg mask
+ needReg = Target::exclude_WriteBarrierReg(needReg);
+ mustReg = RegSet::EXACT_REG;
+#else // !NOGC_WRITE_BARRIERS
+ // This code should be generic across architectures.
+
+ // For the standard JIT Helper calls
+ // op1 goes into REG_ARG_0 and
+ // op2 goes into REG_ARG_1
+ //
+ needRegOp1 = RBM_ARG_0;
+ needReg = RBM_ARG_1;
#endif // !NOGC_WRITE_BARRIERS
+ }
+ genComputeReg(op2, needReg, mustReg, RegSet::KEEP_REG);
}
- genComputeReg(op2, needReg, mustReg, RegSet::KEEP_REG);
- }
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- /* Make the target addressable */
+ /* Make the target addressable */
- op1 = genCodeForCommaTree(op1); // Strip off any comma expressions.
- addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
-
- /* Make sure the RHS register hasn't been spilled;
- keep the register marked as "used", otherwise
- we might get the pointer lifetimes wrong.
- */
+ op1 = genCodeForCommaTree(op1); // Strip off any comma expressions.
+ addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
- if (varTypeIsByte(op1->TypeGet()))
- needReg = regSet.rsNarrowHint(RBM_BYTE_REGS, needReg);
+ /* Make sure the RHS register hasn't been spilled;
+ keep the register marked as "used", otherwise
+ we might get the pointer lifetimes wrong.
+ */
- genRecoverReg(op2, needReg, RegSet::KEEP_REG);
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+ if (varTypeIsByte(op1->TypeGet()))
+ needReg = regSet.rsNarrowHint(RBM_BYTE_REGS, needReg);
- /* Lock the RHS temporarily (lock only already used) */
+ genRecoverReg(op2, needReg, RegSet::KEEP_REG);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- regSet.rsLockUsedReg(genRegMask(op2->gtRegNum));
+ /* Lock the RHS temporarily (lock only already used) */
- /* Make sure the LHS is still addressable */
+ regSet.rsLockUsedReg(genRegMask(op2->gtRegNum));
- addrReg = genKeepAddressable(op1, addrReg);
+ /* Make sure the LHS is still addressable */
- /* We can unlock (only already used ) the RHS register */
+ addrReg = genKeepAddressable(op1, addrReg);
- regSet.rsUnlockUsedReg(genRegMask(op2->gtRegNum));
+ /* We can unlock (only already used ) the RHS register */
- /* Does the write barrier helper do the assignment? */
+ regSet.rsUnlockUsedReg(genRegMask(op2->gtRegNum));
- regGC = WriteBarrier(op1, op2, addrReg);
+ /* Does the write barrier helper do the assignment? */
- if (regGC != 0)
- {
- // Yes, assignment done by the WriteBarrier
- noway_assert(isWriteBarrier);
- }
- else
- {
-#ifdef _TARGET_ARM_
- if (volat)
+ regGC = WriteBarrier(op1, op2, addrReg);
+
+ if (regGC != 0)
{
- // Emit a memory barrier instruction before the store
- instGen_MemoryBarrier();
+ // Yes, assignment done by the WriteBarrier
+ noway_assert(isWriteBarrier);
}
+ else
+ {
+#ifdef _TARGET_ARM_
+ if (volat)
+ {
+ // Emit a memory barrier instruction before the store
+ instGen_MemoryBarrier();
+ }
#endif
- /* Move the value into the target */
+ /* Move the value into the target */
- inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegNum);
- }
+ inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegNum);
+ }
#ifdef DEBUG
- /* Update the current liveness info */
- if (compiler->opts.varNames) genUpdateLife(tree);
+ /* Update the current liveness info */
+ if (compiler->opts.varNames)
+ genUpdateLife(tree);
#endif
- // If op2 register is still in use, free it. (Might not be in use, if
- // a full-call write barrier was done, and the register was a caller-saved
- // register.)
- regMaskTP op2RM = genRegMask(op2->gtRegNum);
- if (op2RM & regSet.rsMaskUsed) regSet.rsMarkRegFree(genRegMask(op2->gtRegNum));
+ // If op2 register is still in use, free it. (Might not be in use, if
+ // a full-call write barrier was done, and the register was a caller-saved
+ // register.)
+ regMaskTP op2RM = genRegMask(op2->gtRegNum);
+ if (op2RM & regSet.rsMaskUsed)
+ regSet.rsMarkRegFree(genRegMask(op2->gtRegNum));
- // This is done in WriteBarrier when (regGC != 0)
- if (regGC == 0)
- {
- /* Free up anything that was tied up by the LHS */
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ // This is done in WriteBarrier when (regGC != 0)
+ if (regGC == 0)
+ {
+ /* Free up anything that was tied up by the LHS */
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ }
}
- }
- else
- {
- /* Make the target addressable */
+ else
+ {
+ /* Make the target addressable */
- isWriteBarrier = gcInfo.gcIsWriteBarrierAsgNode(tree);
+ isWriteBarrier = gcInfo.gcIsWriteBarrierAsgNode(tree);
- if (isWriteBarrier)
- {
+ if (isWriteBarrier)
+ {
#if NOGC_WRITE_BARRIERS
- /* Try to avoid RBM_TMP_0 */
- needRegOp1 = regSet.rsNarrowHint(needRegOp1, ~RBM_TMP_0);
- mustReg = RegSet::EXACT_REG; // For op2
-#else // !NOGC_WRITE_BARRIERS
- // This code should be generic across architectures.
-
- // For the standard JIT Helper calls
- // op1 goes into REG_ARG_0 and
- // op2 goes into REG_ARG_1
- //
- needRegOp1 = RBM_ARG_0;
- needReg = RBM_ARG_1;
- mustReg = RegSet::EXACT_REG; // For op2
-#endif // !NOGC_WRITE_BARRIERS
- }
+ /* Try to avoid RBM_TMP_0 */
+ needRegOp1 = regSet.rsNarrowHint(needRegOp1, ~RBM_TMP_0);
+ mustReg = RegSet::EXACT_REG; // For op2
+#else // !NOGC_WRITE_BARRIERS
+ // This code should be generic across architectures.
+
+ // For the standard JIT Helper calls
+ // op1 goes into REG_ARG_0 and
+ // op2 goes into REG_ARG_1
+ //
+ needRegOp1 = RBM_ARG_0;
+ needReg = RBM_ARG_1;
+ mustReg = RegSet::EXACT_REG; // For op2
+#endif // !NOGC_WRITE_BARRIERS
+ }
+
+ needRegOp1 = regSet.rsNarrowHint(needRegOp1, ~op2->gtRsvdRegs);
- needRegOp1 = regSet.rsNarrowHint(needRegOp1, ~op2->gtRsvdRegs);
+ op1 = genCodeForCommaTree(op1); // Strip away any comma expression.
- op1 = genCodeForCommaTree(op1); // Strip away any comma expression.
+ addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
- addrReg = genMakeAddressable(op1,
- needRegOp1,
- RegSet::KEEP_REG, true);
-
#if CPU_HAS_BYTE_REGS
- /* Is the target a byte value? */
- if (varTypeIsByte(op1->TypeGet()))
- {
- /* Must get the new value into a byte register */
- needReg = regSet.rsNarrowHint(RBM_BYTE_REGS, needReg);
- mustReg = RegSet::EXACT_REG;
+ /* Is the target a byte value? */
+ if (varTypeIsByte(op1->TypeGet()))
+ {
+ /* Must get the new value into a byte register */
+ needReg = regSet.rsNarrowHint(RBM_BYTE_REGS, needReg);
+ mustReg = RegSet::EXACT_REG;
- if (op2->gtType >= op1->gtType)
- op2->gtFlags |= GTF_SMALL_OK;
- }
+ if (op2->gtType >= op1->gtType)
+ op2->gtFlags |= GTF_SMALL_OK;
+ }
#endif
#if NOGC_WRITE_BARRIERS
- /* For WriteBarrier we can't use REG_WRITE_BARRIER */
- if (isWriteBarrier)
- needReg = Target::exclude_WriteBarrierReg(needReg);
+ /* For WriteBarrier we can't use REG_WRITE_BARRIER */
+ if (isWriteBarrier)
+ needReg = Target::exclude_WriteBarrierReg(needReg);
- /* Also avoid using the previously computed addrReg(s) */
- bestReg = regSet.rsNarrowHint(needReg, ~addrReg);
+ /* Also avoid using the previously computed addrReg(s) */
+ bestReg = regSet.rsNarrowHint(needReg, ~addrReg);
- /* If we have a reg available to grab then use bestReg */
- if (bestReg & regSet.rsRegMaskCanGrab())
- needReg = bestReg;
+ /* If we have a reg available to grab then use bestReg */
+ if (bestReg & regSet.rsRegMaskCanGrab())
+ needReg = bestReg;
- mustReg = RegSet::EXACT_REG;
+ mustReg = RegSet::EXACT_REG;
#endif
- /* Generate the RHS into a register */
- genComputeReg(op2, needReg, mustReg, RegSet::KEEP_REG);
- noway_assert(op2->gtFlags & GTF_REG_VAL);
-
- /* Make sure the target is still addressable */
- addrReg = genKeepAddressable(op1, addrReg, genRegMask(op2->gtRegNum));
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+ /* Generate the RHS into a register */
+ genComputeReg(op2, needReg, mustReg, RegSet::KEEP_REG);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- /* Does the write barrier helper do the assignment? */
+ /* Make sure the target is still addressable */
+ addrReg = genKeepAddressable(op1, addrReg, genRegMask(op2->gtRegNum));
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- regGC = WriteBarrier(op1, op2, addrReg);
+ /* Does the write barrier helper do the assignment? */
- if (regGC != 0)
- {
- // Yes, assignment done by the WriteBarrier
- noway_assert(isWriteBarrier);
- }
- else
- {
- assert(!isWriteBarrier);
+ regGC = WriteBarrier(op1, op2, addrReg);
-#ifdef _TARGET_ARM_
- if (volat)
+ if (regGC != 0)
{
- // Emit a memory barrier instruction before the store
- instGen_MemoryBarrier();
+ // Yes, assignment done by the WriteBarrier
+ noway_assert(isWriteBarrier);
}
+ else
+ {
+ assert(!isWriteBarrier);
+
+#ifdef _TARGET_ARM_
+ if (volat)
+ {
+ // Emit a memory barrier instruction before the store
+ instGen_MemoryBarrier();
+ }
#endif
- /* Move the value into the target */
+ /* Move the value into the target */
- inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegNum);
- }
+ inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2->gtRegNum);
+ }
- /* The new value is no longer needed */
+ /* The new value is no longer needed */
- genReleaseReg(op2);
+ genReleaseReg(op2);
#ifdef DEBUG
- /* Update the current liveness info */
- if (compiler->opts.varNames) genUpdateLife(tree);
+ /* Update the current liveness info */
+ if (compiler->opts.varNames)
+ genUpdateLife(tree);
#endif
- // This is done in WriteBarrier when (regGC != 0)
- if (regGC == 0)
- {
- /* Free up anything that was tied up by the LHS */
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ // This is done in WriteBarrier when (regGC != 0)
+ if (regGC == 0)
+ {
+ /* Free up anything that was tied up by the LHS */
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ }
}
- }
- addrReg = RBM_NONE;
- break;
+ addrReg = RBM_NONE;
+ break;
}
noway_assert(addrReg != DUMMY_INIT(RBM_CORRUPT));
@@ -12194,17 +11964,15 @@ LExit:
* Generate code to complete the assignment operation
*/
-void CodeGen::genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree,
- regMaskTP addrReg,
- regNumber reg,
- bool ovfl)
+void CodeGen::genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree, regMaskTP addrReg, regNumber reg, bool ovfl)
{
const var_types treeType = tree->TypeGet();
GenTreePtr op1 = tree->gtOp.gtOp1;
GenTreePtr op2 = tree->gtOp.gtOp2;
noway_assert(op2);
- if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_REG_VAR) genUpdateLife(op1);
+ if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_REG_VAR)
+ genUpdateLife(op1);
genUpdateLife(tree);
#if REDUNDANT_LOAD
@@ -12233,15 +12001,13 @@ void CodeGen::genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree,
we must have loaded it up from memory, done the increment,
checked for overflow, and then stored it back to memory */
- bool ovfCheckDone = (genTypeSize(op1->TypeGet()) < sizeof(int)) &&
- !(op1->gtFlags & GTF_REG_VAL);
+ bool ovfCheckDone = (genTypeSize(op1->TypeGet()) < sizeof(int)) && !(op1->gtFlags & GTF_REG_VAL);
if (!ovfCheckDone)
{
// For small sizes, reg should be set as we sign/zero extend it.
- noway_assert(genIsValidReg(reg) ||
- genTypeSize(treeType) == sizeof(int));
+ noway_assert(genIsValidReg(reg) || genTypeSize(treeType) == sizeof(int));
/* Currently we don't morph x=x+y into x+=y in try blocks
* if we need overflow check, as x+y may throw an exception.
@@ -12254,42 +12020,39 @@ void CodeGen::genCodeForTreeSmpOpAsg_DONE_ASSG(GenTreePtr tree,
}
}
-
/*****************************************************************************
*
* Generate code for a special op tree
*/
-void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
- genTreeOps oper = tree->OperGet();
- regNumber reg = DUMMY_INIT(REG_CORRUPT);
- regMaskTP regs = regSet.rsMaskUsed;
+ genTreeOps oper = tree->OperGet();
+ regNumber reg = DUMMY_INIT(REG_CORRUPT);
+ regMaskTP regs = regSet.rsMaskUsed;
noway_assert((tree->OperKind() & (GTK_CONST | GTK_LEAF | GTK_SMPOP)) == 0);
- switch (oper)
+ switch (oper)
{
- case GT_CALL:
- regs = genCodeForCall(tree, true);
+ case GT_CALL:
+ regs = genCodeForCall(tree, true);
- /* If the result is in a register, make sure it ends up in the right place */
+ /* If the result is in a register, make sure it ends up in the right place */
- if (regs != RBM_NONE)
- {
- genMarkTreeInReg(tree, genRegNumFromMask(regs));
- }
+ if (regs != RBM_NONE)
+ {
+ genMarkTreeInReg(tree, genRegNumFromMask(regs));
+ }
- genUpdateLife(tree);
- return;
+ genUpdateLife(tree);
+ return;
- case GT_FIELD:
- NO_WAY("should not see this operator in this phase");
- break;
+ case GT_FIELD:
+ NO_WAY("should not see this operator in this phase");
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
{
#ifdef FEATURE_ENABLE_NO_RANGE_CHECKS
// MUST NEVER CHECK-IN WITH THIS ENABLED.
@@ -12298,33 +12061,29 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
#endif
genRangeCheck(tree);
}
- return;
+ return;
- case GT_ARR_ELEM:
- genCodeForTreeSmpOp_GT_ADDR(tree, destReg, bestReg);
- return;
+ case GT_ARR_ELEM:
+ genCodeForTreeSmpOp_GT_ADDR(tree, destReg, bestReg);
+ return;
- case GT_CMPXCHG:
+ case GT_CMPXCHG:
{
#if defined(_TARGET_XARCH_)
// cmpxchg does not have an [r/m32], imm32 encoding, so we need a register for the value operand
-
+
// Since this is a "call", evaluate the operands from right to left. Don't worry about spilling
// right now, just get the trees evaluated.
// As a friendly reminder. IL args are evaluated left to right.
-
- GenTreePtr location = tree->gtCmpXchg.gtOpLocation; // arg1
- GenTreePtr value = tree->gtCmpXchg.gtOpValue; // arg2
- GenTreePtr comparand = tree->gtCmpXchg.gtOpComparand; // arg3
- regMaskTP addrReg;
-
- bool isAddr = genMakeIndAddrMode(location,
- tree,
- false, /* not for LEA */
- RBM_ALLINT,
- RegSet::KEEP_REG,
- &addrReg);
+
+ GenTreePtr location = tree->gtCmpXchg.gtOpLocation; // arg1
+ GenTreePtr value = tree->gtCmpXchg.gtOpValue; // arg2
+ GenTreePtr comparand = tree->gtCmpXchg.gtOpComparand; // arg3
+ regMaskTP addrReg;
+
+ bool isAddr = genMakeIndAddrMode(location, tree, false, /* not for LEA */
+ RBM_ALLINT, RegSet::KEEP_REG, &addrReg);
if (!isAddr)
{
@@ -12334,21 +12093,21 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
regSet.rsMarkRegUsed(location);
}
- // We must have a reg for the Value, but it doesn't really matter which register.
-
+ // We must have a reg for the Value, but it doesn't really matter which register.
+
// Try to avoid EAX and the address regsiter if possible.
genComputeReg(value, regSet.rsNarrowHint(RBM_ALLINT, RBM_EAX | addrReg), RegSet::ANY_REG, RegSet::KEEP_REG);
#ifdef DEBUG
// cmpxchg uses EAX as an implicit operand to hold the comparand
- // We're going to destroy EAX in this operation, so we better not be keeping
+ // We're going to destroy EAX in this operation, so we better not be keeping
// anything important in it.
if (RBM_EAX & regSet.rsMaskVars)
{
// We have a variable enregistered in EAX. Make sure it goes dead in this tree.
for (unsigned varNum = 0; varNum < compiler->lvaCount; ++varNum)
{
- const LclVarDsc & varDesc = compiler->lvaTable[varNum];
+ const LclVarDsc& varDesc = compiler->lvaTable[varNum];
if (!varDesc.lvIsRegCandidate())
continue;
if (!varDesc.lvRegister)
@@ -12369,14 +12128,14 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
#endif
genComputeReg(comparand, RBM_EAX, RegSet::EXACT_REG, RegSet::KEEP_REG);
- //By this point we've evaluated everything. However the odds are that we've spilled something by
- //now. Let's recover all the registers and force them to stay.
+ // By this point we've evaluated everything. However the odds are that we've spilled something by
+ // now. Let's recover all the registers and force them to stay.
- //Well, we just computed comparand, so it's still in EAX.
+ // Well, we just computed comparand, so it's still in EAX.
noway_assert(comparand->gtRegNum == REG_EAX);
regSet.rsLockUsedReg(RBM_EAX);
- //Stick it anywhere other than EAX.
+ // Stick it anywhere other than EAX.
genRecoverReg(value, ~RBM_EAX, RegSet::KEEP_REG);
reg = value->gtRegNum;
noway_assert(reg != REG_EAX);
@@ -12384,11 +12143,11 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
if (isAddr)
{
- addrReg = genKeepAddressable(/*location*/tree, addrReg, 0/*avoidMask*/);
+ addrReg = genKeepAddressable(/*location*/ tree, addrReg, 0 /*avoidMask*/);
}
else
{
- genRecoverReg(location, ~(RBM_EAX|genRegMask(reg)), RegSet::KEEP_REG);
+ genRecoverReg(location, ~(RBM_EAX | genRegMask(reg)), RegSet::KEEP_REG);
}
regSet.rsUnlockUsedReg(genRegMask(reg));
@@ -12409,7 +12168,7 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
genReleaseReg(value);
genReleaseReg(comparand);
- //EAX and the value register are both trashed at this point.
+ // EAX and the value register are both trashed at this point.
regTracker.rsTrackRegTrash(REG_EAX);
regTracker.rsTrackRegTrash(reg);
@@ -12423,25 +12182,24 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
#endif
}
- default:
-#ifdef DEBUG
- compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ compiler->gtDispTree(tree);
#endif
- noway_assert(!"unexpected operator");
- NO_WAY("unexpected operator");
+ noway_assert(!"unexpected operator");
+ NO_WAY("unexpected operator");
}
noway_assert(reg != DUMMY_INIT(REG_CORRUPT));
genCodeForTree_DONE(tree, reg);
}
-
/*****************************************************************************
*
* Generate code for the given tree. tree->gtRegNum will be set to the
* register where the tree lives.
*
- * If 'destReg' is non-zero, we'll do our best to compute the value into a
+ * If 'destReg' is non-zero, we'll do our best to compute the value into a
* register that is in that register set.
* Use genComputeReg() if you need the tree in a specific register.
* Use genCompIntoFreeReg() if the register needs to be written to. Otherwise,
@@ -12452,13 +12210,11 @@ void CodeGen::genCodeForTreeSpecialOp(GenTreePtr tree,
*
* The GCness of the register will be properly set in gcInfo.gcRegGCrefSetCur/gcInfo.gcRegByrefSetCur.
*
- * The register will not be marked as used. Use regSet.rsMarkRegUsed() if the
+ * The register will not be marked as used. Use regSet.rsMarkRegUsed() if the
* register will not be consumed right away and could possibly be spilled.
*/
-void CodeGen::genCodeForTree(GenTreePtr tree,
- regMaskTP destReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTree(GenTreePtr tree, regMaskTP destReg, regMaskTP bestReg)
{
#if 0
if (compiler->verbose)
@@ -12475,25 +12231,25 @@ void CodeGen::genCodeForTree(GenTreePtr tree,
assert(tree->IsNodeProperlySized());
// When assigning to a enregistered local variable we receive
- // a hint that we should target the register that is used to
+ // a hint that we should target the register that is used to
// hold the enregistered local variable.
// When receiving this hint both destReg and bestReg masks are set
// to the register that is used by the enregistered local variable.
- //
+ //
// However it is possible to us to have a different local variable
// targeting the same register to become alive (and later die)
// as we descend the expression tree.
- //
- // To handle such cases we will remove any registers that are alive from the
+ //
+ // To handle such cases we will remove any registers that are alive from the
// both the destReg and bestReg masks.
- //
+ //
regMaskTP liveMask = genLiveMask(tree);
// This removes any registers used to hold enregistered locals
// from the destReg and bestReg masks.
// After this either mask could become 0
- //
- destReg &= ~liveMask;
+ //
+ destReg &= ~liveMask;
bestReg &= ~liveMask;
/* 'destReg' of 0 really means 'any' */
@@ -12507,41 +12263,40 @@ void CodeGen::genCodeForTree(GenTreePtr tree,
switch (tree->TypeGet())
{
- case TYP_LONG:
-#if ! CPU_HAS_FP_SUPPORT
- case TYP_DOUBLE:
+ case TYP_LONG:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_DOUBLE:
#endif
- genCodeForTreeLng(tree, destReg, /*avoidReg*/RBM_NONE);
- return;
-
+ genCodeForTreeLng(tree, destReg, /*avoidReg*/ RBM_NONE);
+ return;
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
-
- // For comma nodes, we'll get back here for the last node in the comma list.
- if (tree->gtOper != GT_COMMA)
- {
- genCodeForTreeFlt(tree, RBM_ALLFLOAT, RBM_ALLFLOAT & (destReg | bestReg));
- return;
- }
- break;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+
+ // For comma nodes, we'll get back here for the last node in the comma list.
+ if (tree->gtOper != GT_COMMA)
+ {
+ genCodeForTreeFlt(tree, RBM_ALLFLOAT, RBM_ALLFLOAT & (destReg | bestReg));
+ return;
+ }
+ break;
#endif
#ifdef DEBUG
- case TYP_UINT:
- case TYP_ULONG:
- noway_assert(!"These types are only used as markers in GT_CAST nodes");
- break;
+ case TYP_UINT:
+ case TYP_ULONG:
+ noway_assert(!"These types are only used as markers in GT_CAST nodes");
+ break;
#endif
- default:
- break;
+ default:
+ break;
}
/* Is the value already in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
genCodeForTree_REG_VAR1(tree);
return;
@@ -12555,7 +12310,7 @@ void CodeGen::genCodeForTree(GenTreePtr tree,
unsigned kind = tree->OperKind();
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
/* Handle constant nodes */
@@ -12581,7 +12336,6 @@ void CodeGen::genCodeForTree(GenTreePtr tree,
}
}
-
/*****************************************************************************
*
* Generate code for all the basic blocks in the function.
@@ -12589,20 +12343,20 @@ void CodeGen::genCodeForTree(GenTreePtr tree,
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genCodeForBBlist()
+void CodeGen::genCodeForBBlist()
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- unsigned savedStkLvl;
+ unsigned savedStkLvl;
-#ifdef DEBUG
- genInterruptibleUsed = true;
- unsigned stmtNum = 0;
- unsigned totalCostEx = 0;
- unsigned totalCostSz = 0;
+#ifdef DEBUG
+ genInterruptibleUsed = true;
+ unsigned stmtNum = 0;
+ unsigned totalCostEx = 0;
+ unsigned totalCostSz = 0;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
@@ -12623,7 +12377,8 @@ void CodeGen::genCodeForBBlist()
// Prepare the blocks for exception handling codegen: mark the blocks that needs labels.
genPrepForEHCodegen();
- assert(!compiler->fgFirstBBScratch || compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
+ assert(!compiler->fgFirstBBScratch ||
+ compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize the spill tracking logic */
@@ -12631,14 +12386,13 @@ void CodeGen::genCodeForBBlist()
#ifdef DEBUGGING_SUPPORT
/* Initialize the line# tracking logic */
-
+
if (compiler->opts.compScopeInfo)
{
siInit();
}
#endif
-
#ifdef _TARGET_X86_
if (compiler->compTailCallUsed)
{
@@ -12657,7 +12411,7 @@ void CodeGen::genCodeForBBlist()
if (compiler->info.compCallUnmanaged)
{
- noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
+ noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
regSet.rsSetRegsModified(RBM_INT_CALLEE_SAVED & ~RBM_FPBASE);
}
@@ -12668,18 +12422,16 @@ void CodeGen::genCodeForBBlist()
/* If any arguments live in registers, mark those regs as such */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter assigned to a register? */
- if (!varDsc->lvIsParam || !varDsc->lvRegister)
+ if (!varDsc->lvIsParam || !varDsc->lvRegister)
continue;
/* Is the argument live on entry to the method? */
- if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
continue;
#if CPU_HAS_FP_SUPPORT
@@ -12693,11 +12445,11 @@ void CodeGen::genCodeForBBlist()
/* Mark the register as holding the variable */
- if (isRegPairType(varDsc->lvType))
+ if (isRegPairType(varDsc->lvType))
{
regTracker.rsTrackRegLclVarLng(varDsc->lvRegNum, varNum, true);
- if (varDsc->lvOtherReg != REG_STK)
+ if (varDsc->lvOtherReg != REG_STK)
regTracker.rsTrackRegLclVarLng(varDsc->lvOtherReg, varNum, false);
}
else
@@ -12711,19 +12463,17 @@ void CodeGen::genCodeForBBlist()
// Make sure a set is allocated for compiler->compCurLife (in the long case), so we can set it to empty without
// allocation at the start of each basic block.
VarSetOps::AssignNoCopy(compiler, compiler->compCurLife, VarSetOps::MakeEmpty(compiler));
-
+
/*-------------------------------------------------------------------------
*
* Walk the basic blocks and generate code for each one
*
*/
- BasicBlock * block;
- BasicBlock * lblk; /* previous block */
+ BasicBlock* block;
+ BasicBlock* lblk; /* previous block */
- for (lblk = NULL, block = compiler->fgFirstBB;
- block != NULL;
- lblk = block, block = block->bbNext)
+ for (lblk = NULL, block = compiler->fgFirstBB; block != NULL; lblk = block, block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -12734,14 +12484,14 @@ void CodeGen::genCodeForBBlist()
}
#endif // DEBUG
- VARSET_TP VARSET_INIT_NOCOPY(liveSet, VarSetOps::UninitVal());
+ VARSET_TP VARSET_INIT_NOCOPY(liveSet, VarSetOps::UninitVal());
- regMaskTP gcrefRegs = 0;
- regMaskTP byrefRegs = 0;
+ regMaskTP gcrefRegs = 0;
+ regMaskTP byrefRegs = 0;
/* Does any other block jump to this point ? */
- if (block->bbFlags & BBF_JMP_TARGET)
+ if (block->bbFlags & BBF_JMP_TARGET)
{
/* Someone may jump here, so trash all regs */
@@ -12758,11 +12508,11 @@ void CodeGen::genCodeForBBlist()
/* No registers are used or locked on entry to a basic block */
- regSet.rsMaskUsed = RBM_NONE;
- regSet.rsMaskMult = RBM_NONE;
- regSet.rsMaskLock = RBM_NONE;
+ regSet.rsMaskUsed = RBM_NONE;
+ regSet.rsMaskMult = RBM_NONE;
+ regSet.rsMaskLock = RBM_NONE;
- // If we need to reserve registers such that they are not used
+ // If we need to reserve registers such that they are not used
// by CodeGen in this BasicBlock we do so here.
// On the ARM when we have large frame offsets for locals we
// will have RBM_R10 in the regSet.rsMaskResvd set,
@@ -12771,30 +12521,28 @@ void CodeGen::genCodeForBBlist()
//
if (regSet.rsMaskResvd != RBM_NONE)
{
- regSet.rsLockReg(regSet.rsMaskResvd);
+ regSet.rsLockReg(regSet.rsMaskResvd);
regSet.rsSetRegsModified(regSet.rsMaskResvd);
}
/* Figure out which registers hold variables on entry to this block */
-
+
regMaskTP specialUseMask = regSet.rsMaskResvd;
- specialUseMask |= doubleAlignOrFramePointerUsed() ? RBM_SPBASE|RBM_FPBASE
- : RBM_SPBASE;
+ specialUseMask |= doubleAlignOrFramePointerUsed() ? RBM_SPBASE | RBM_FPBASE : RBM_SPBASE;
regSet.ClearMaskVars();
VarSetOps::ClearD(compiler, compiler->compCurLife);
VarSetOps::Assign(compiler, liveSet, block->bbLiveIn);
#if FEATURE_STACK_FP_X87
- VarSetOps::AssignNoCopy(compiler,
- genFPregVars,
+ VarSetOps::AssignNoCopy(compiler, genFPregVars,
VarSetOps::Intersection(compiler, liveSet, compiler->optAllFPregVars));
genFPregCnt = VarSetOps::Count(compiler, genFPregVars);
genFPdeadRegCnt = 0;
#endif
gcInfo.gcResetForBB();
-
- genUpdateLife(liveSet); // This updates regSet.rsMaskVars with bits from any enregistered LclVars
+
+ genUpdateLife(liveSet); // This updates regSet.rsMaskVars with bits from any enregistered LclVars
#if FEATURE_STACK_FP_X87
VarSetOps::IntersectionD(compiler, liveSet, compiler->optAllNonFPvars);
#endif
@@ -12810,20 +12558,20 @@ void CodeGen::genCodeForBBlist()
assert(varDsc->lvTracked);
/* Ignore the variable if it's not not in a reg */
- if (!varDsc->lvRegister)
+ if (!varDsc->lvRegister)
continue;
if (isFloatRegType(varDsc->lvType))
continue;
/* Get hold of the index and the bitmask for the variable */
- regNumber regNum = varDsc->lvRegNum;
- regMaskTP regMask = genRegMask(regNum);
+ regNumber regNum = varDsc->lvRegNum;
+ regMaskTP regMask = genRegMask(regNum);
regSet.AddMaskVars(regMask);
- if (varDsc->lvType == TYP_REF)
+ if (varDsc->lvType == TYP_REF)
gcrefRegs |= regMask;
- else if (varDsc->lvType == TYP_BYREF)
+ else if (varDsc->lvType == TYP_BYREF)
byrefRegs |= regMask;
/* Mark the register holding the variable as such */
@@ -12831,7 +12579,7 @@ void CodeGen::genCodeForBBlist()
if (varTypeIsMultiReg(varDsc))
{
regTracker.rsTrackRegLclVarLng(regNum, varNum, true);
- if (varDsc->lvOtherReg != REG_STK)
+ if (varDsc->lvOtherReg != REG_STK)
{
regTracker.rsTrackRegLclVarLng(varDsc->lvOtherReg, varNum, false);
regMask |= genRegMask(varDsc->lvOtherReg);
@@ -12843,13 +12591,11 @@ void CodeGen::genCodeForBBlist()
}
}
- gcInfo.gcPtrArgCnt = 0;
+ gcInfo.gcPtrArgCnt = 0;
#if FEATURE_STACK_FP_X87
- regSet.rsMaskUsedFloat =
- regSet.rsMaskRegVarFloat =
- regSet.rsMaskLockedFloat = RBM_NONE;
+ regSet.rsMaskUsedFloat = regSet.rsMaskRegVarFloat = regSet.rsMaskLockedFloat = RBM_NONE;
memset(regSet.genUsedRegsFloat, 0, sizeof(regSet.genUsedRegsFloat));
memset(regSet.genRegVarsFloat, 0, sizeof(regSet.genRegVarsFloat));
@@ -12872,7 +12618,7 @@ void CodeGen::genCodeForBBlist()
gcInfo.gcRegByrefSetCur = byrefRegs;
/* Blocks with handlerGetsXcptnObj()==true use GT_CATCH_ARG to
- represent the exception object (TYP_REF).
+ represent the exception object (TYP_REF).
We mark REG_EXCEPTION_OBJECT as holding a GC object on entry
to the block, it will be the first thing evaluated
(thanks to GTF_ORDER_SIDEEFF).
@@ -12880,7 +12626,7 @@ void CodeGen::genCodeForBBlist()
if (handlerGetsXcptnObj(block->bbCatchTyp))
{
- GenTreePtr firstStmt = block->FirstNonPhiDef();
+ GenTreePtr firstStmt = block->FirstNonPhiDef();
if (firstStmt != NULL)
{
GenTreePtr firstTree = firstStmt->gtStmt.gtStmtExpr;
@@ -12903,7 +12649,7 @@ void CodeGen::genCodeForBBlist()
{
assert(block->bbFlags & BBF_JMP_TARGET);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\nEmitting finally target NOP predecessor for BB%02u\n", block->bbNum);
@@ -12915,10 +12661,8 @@ void CodeGen::genCodeForBBlist()
// block starts an EH region. If we pointed the existing bbEmitCookie here, then the NOP
// would be executed, which we would prefer not to do.
- block->bbUnwindNopEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
+ block->bbUnwindNopEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
instGen(INS_nop);
}
@@ -12934,27 +12678,25 @@ void CodeGen::genCodeForBBlist()
}
#endif
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, block->bbNum);
#endif
block->bbEmitCookie = NULL;
- if (block->bbFlags & (BBF_JMP_TARGET|BBF_HAS_LABEL))
+ if (block->bbFlags & (BBF_JMP_TARGET | BBF_HAS_LABEL))
{
/* Mark a label and update the current set of live GC refs */
- block->bbEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
+ block->bbEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- /*isFinally*/block->bbFlags & BBF_FINALLY_TARGET
+ /*isFinally*/ block->bbFlags & BBF_FINALLY_TARGET
#else
- FALSE
+ FALSE
#endif
- );
+ );
}
if (block == compiler->fgFirstColdBlock)
@@ -12968,7 +12710,7 @@ void CodeGen::genCodeForBBlist()
// We should never have a block that falls through into the Cold section
noway_assert(!lblk->bbFallsThrough());
- // We require the block that starts the Cold section to have a label
+ // We require the block that starts the Cold section to have a label
noway_assert(block->bbEmitCookie);
getEmitter()->emitSetFirstColdIGCookie(block->bbEmitCookie);
}
@@ -12983,19 +12725,19 @@ void CodeGen::genCodeForBBlist()
#if !FEATURE_FIXED_OUT_ARGS
/* Check for inserted throw blocks and adjust genStackLevel */
- if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
+ if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
noway_assert(block->bbFlags & BBF_JMP_TARGET);
genStackLevel = compiler->fgThrowHlpBlkStkLevel(block) * sizeof(int);
- if (genStackLevel)
+ if (genStackLevel)
{
#ifdef _TARGET_X86_
getEmitter()->emitMarkStackLvl(genStackLevel);
inst_RV_IV(INS_add, REG_SPBASE, genStackLevel, EA_PTRSIZE);
genStackLevel = 0;
-#else // _TARGET_X86_
+#else // _TARGET_X86_
NYI("Need emitMarkStackLvl()");
#endif // _TARGET_X86_
}
@@ -13013,9 +12755,9 @@ void CodeGen::genCodeForBBlist()
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) && block != compiler->fgFirstBB)
- genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::NO_MAPPING, true);
+ genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::NO_MAPPING, true);
- bool firstMapping = true;
+ bool firstMapping = true;
#endif // DEBUGGING_SUPPORT
/*---------------------------------------------------------------------
@@ -13064,15 +12806,16 @@ void CodeGen::genCodeForBBlist()
#endif // DEBUG
/* Get hold of the statement tree */
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
-#ifdef DEBUG
+#ifdef DEBUG
stmtNum++;
if (compiler->verbose)
{
printf("\nGenerating BB%02u, stmt %u\t\t", block->bbNum, stmtNum);
printf("Holding variables: ");
- dspRegMask(regSet.rsMaskVars); printf("\n\n");
+ dspRegMask(regSet.rsMaskVars);
+ printf("\n\n");
compiler->gtDispTree(compiler->opts.compDbgInfo ? stmt : tree);
printf("\n");
#if FEATURE_STACK_FP_X87
@@ -13080,16 +12823,14 @@ void CodeGen::genCodeForBBlist()
#endif
printf("Execution Order:\n");
- for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList;
- treeNode != NULL;
- treeNode = treeNode->gtNext)
+ for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList; treeNode != NULL; treeNode = treeNode->gtNext)
{
compiler->gtDispTree(treeNode, 0, NULL, true);
}
printf("\n");
}
totalCostEx += (stmt->gtCostEx * block->getBBWeight(compiler));
- totalCostSz += stmt->gtCostSz;
+ totalCostSz += stmt->gtCostSz;
#endif // DEBUG
compiler->compCurStmt = stmt;
@@ -13097,34 +12838,34 @@ void CodeGen::genCodeForBBlist()
compiler->compCurLifeTree = NULL;
switch (tree->gtOper)
{
- case GT_CALL:
- // Managed Retval under managed debugger - we need to make sure that the returned ref-type is
- // reported as alive even though not used within the caller for managed debugger sake. So
- // consider the return value of the method as used if generating debuggable code.
- genCodeForCall(tree, compiler->opts.MinOpts() || compiler->opts.compDbgCode);
- genUpdateLife (tree);
- gcInfo.gcMarkRegSetNpt(RBM_INTRET);
- break;
+ case GT_CALL:
+ // Managed Retval under managed debugger - we need to make sure that the returned ref-type is
+ // reported as alive even though not used within the caller for managed debugger sake. So
+ // consider the return value of the method as used if generating debuggable code.
+ genCodeForCall(tree, compiler->opts.MinOpts() || compiler->opts.compDbgCode);
+ genUpdateLife(tree);
+ gcInfo.gcMarkRegSetNpt(RBM_INTRET);
+ break;
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
- // Just do the side effects
- genEvalSideEffects(tree);
- break;
+ // Just do the side effects
+ genEvalSideEffects(tree);
+ break;
- default:
- /* Generate code for the tree */
+ default:
+ /* Generate code for the tree */
- genCodeForTree(tree, 0);
- break;
+ genCodeForTree(tree, 0);
+ break;
}
regSet.rsSpillChk();
/* The value of the tree isn't used, unless it's a return stmt */
- if (tree->gtOper != GT_RETURN)
+ if (tree->gtOper != GT_RETURN)
gcInfo.gcMarkRegPtrVal(tree);
#if FEATURE_STACK_FP_X87
@@ -13134,7 +12875,7 @@ void CodeGen::genCodeForBBlist()
#ifdef DEBUG
/* Make sure we didn't bungle pointer register tracking */
- regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur);
+ regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur);
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.rsMaskVars;
// If return is a GC-type, clear it. Note that if a common
@@ -13142,9 +12883,8 @@ void CodeGen::genCodeForBBlist()
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
- if (tree->gtOper == GT_RETURN &&
- (varTypeIsGC(compiler->info.compRetType) ||
- (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
+ if (tree->gtOper == GT_RETURN && (varTypeIsGC(compiler->info.compRetType) ||
+ (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
@@ -13153,14 +12893,13 @@ void CodeGen::genCodeForBBlist()
// harmless "inc" instruction (does not interfere with the exception
// object).
- if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) &&
- (stmt == block->bbTreeList) &&
+ if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) && (stmt == block->bbTreeList) &&
(block->bbCatchTyp && handlerGetsXcptnObj(block->bbCatchTyp)))
{
nonVarPtrRegs &= ~RBM_EXCEPTION_OBJECT;
}
- if (nonVarPtrRegs)
+ if (nonVarPtrRegs)
{
printf("Regset after tree=");
Compiler::printTreeID(tree);
@@ -13187,7 +12926,7 @@ void CodeGen::genCodeForBBlist()
} //-------- END-FOR each statement-tree of the current block ---------
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
@@ -13208,7 +12947,7 @@ void CodeGen::genCodeForBBlist()
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
- //noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
+ // noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
@@ -13218,9 +12957,9 @@ void CodeGen::genCodeForBBlist()
genStackLevel -= savedStkLvl;
- gcInfo.gcMarkRegSetNpt(gcrefRegs|byrefRegs);
+ gcInfo.gcMarkRegSetNpt(gcrefRegs | byrefRegs);
- if (!VarSetOps::Equal(compiler, compiler->compCurLife, block->bbLiveOut))
+ if (!VarSetOps::Equal(compiler, compiler->compCurLife, block->bbLiveOut))
compiler->genChangeLife(block->bbLiveOut DEBUGARG(NULL));
/* Both stacks should always be empty on exit from a basic block */
@@ -13239,113 +12978,114 @@ void CodeGen::genCodeForBBlist()
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- inst_JMP(EJ_jmp, block->bbJumpDest);
- break;
+ case BBJ_ALWAYS:
+ inst_JMP(EJ_jmp, block->bbJumpDest);
+ break;
- case BBJ_RETURN:
- genExitCode(block);
- break;
+ case BBJ_RETURN:
+ genExitCode(block);
+ break;
- case BBJ_THROW:
- // If we have a throw at the end of a function or funclet, we need to emit another instruction
- // afterwards to help the OS unwinder determine the correct context during unwind.
- // We insert an unexecuted breakpoint instruction in several situations
- // following a throw instruction:
- // 1. If the throw is the last instruction of the function or funclet. This helps
- // the OS unwinder determine the correct context during an unwind from the
- // thrown exception.
- // 2. If this is this is the last block of the hot section.
- // 3. If the subsequent block is a special throw block.
- if ((block->bbNext == NULL)
+ case BBJ_THROW:
+ // If we have a throw at the end of a function or funclet, we need to emit another instruction
+ // afterwards to help the OS unwinder determine the correct context during unwind.
+ // We insert an unexecuted breakpoint instruction in several situations
+ // following a throw instruction:
+ // 1. If the throw is the last instruction of the function or funclet. This helps
+ // the OS unwinder determine the correct context during an unwind from the
+ // thrown exception.
+ // 2. If this is this is the last block of the hot section.
+ // 3. If the subsequent block is a special throw block.
+ if ((block->bbNext == NULL)
#if FEATURE_EH_FUNCLETS
- || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
+ || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
#endif // FEATURE_EH_FUNCLETS
- || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext))
- || block->bbNext == compiler->fgFirstColdBlock
- )
- {
- instGen(INS_BREAKPOINT); // This should never get executed
- }
+ || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
+ block->bbNext == compiler->fgFirstColdBlock)
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
- break;
+ break;
- case BBJ_CALLFINALLY:
+ case BBJ_CALLFINALLY:
#if defined(_TARGET_X86_)
- /* If we are about to invoke a finally locally from a try block,
- we have to set the hidden slot corresponding to the finally's
- nesting level. When invoked in response to an exception, the
- EE usually does it.
+ /* If we are about to invoke a finally locally from a try block,
+ we have to set the hidden slot corresponding to the finally's
+ nesting level. When invoked in response to an exception, the
+ EE usually does it.
- We must have : BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
+ We must have : BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
- This code depends on this order not being messed up.
- We will emit :
- mov [ebp-(n+1)],0
- mov [ebp- n ],0xFC
- push &step
- jmp finallyBlock
+ This code depends on this order not being messed up.
+ We will emit :
+ mov [ebp-(n+1)],0
+ mov [ebp- n ],0xFC
+ push &step
+ jmp finallyBlock
- step: mov [ebp- n ],0
- jmp leaveTarget
- leaveTarget:
- */
+ step: mov [ebp- n ],0
+ jmp leaveTarget
+ leaveTarget:
+ */
- noway_assert(isFramePointerUsed());
+ noway_assert(isFramePointerUsed());
- // Get the nesting level which contains the finally
- compiler->fgGetNestingLevel(block, &finallyNesting);
+ // Get the nesting level which contains the finally
+ compiler->fgGetNestingLevel(block, &finallyNesting);
- // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs;
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
-
- unsigned curNestingSlotOffs;
- curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
-
- // Zero out the slot for the next nesting level
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0,
- compiler->lvaShadowSPslotsVar, curNestingSlotOffs - sizeof(void*));
+ // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
+ unsigned filterEndOffsetSlotOffs;
+ filterEndOffsetSlotOffs =
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - (sizeof(void*)));
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK,
- compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
+ unsigned curNestingSlotOffs;
+ curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * sizeof(void*)));
- // Now push the address of where the finally funclet should
- // return to directly.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
- getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
- }
- else
- {
- // EE expects a DWORD, so we give him 0
- inst_IV(INS_push_hide, 0);
- }
+ // Zero out the slot for the next nesting level
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
+ curNestingSlotOffs - sizeof(void*));
+
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
+ curNestingSlotOffs);
- // Jump to the finally BB
- inst_JMP(EJ_jmp, block->bbJumpDest);
+ // Now push the address of where the finally funclet should
+ // return to directly.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
+ getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
+ }
+ else
+ {
+ // EE expects a DWORD, so we give him 0
+ inst_IV(INS_push_hide, 0);
+ }
+
+ // Jump to the finally BB
+ inst_JMP(EJ_jmp, block->bbJumpDest);
#elif defined(_TARGET_ARM_)
- // Now set REG_LR to the address of where the finally funclet should
- // return to directly.
+ // Now set REG_LR to the address of where the finally funclet should
+ // return to directly.
- BasicBlock * bbFinallyRet; bbFinallyRet = NULL;
+ BasicBlock* bbFinallyRet;
+ bbFinallyRet = NULL;
- // We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
- // we would have otherwise created retless calls.
- assert(block->isBBCallAlwaysPair());
+ // We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
+ // we would have otherwise created retless calls.
+ assert(block->isBBCallAlwaysPair());
- assert(block->bbNext != NULL);
- assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
- assert(block->bbNext->bbJumpDest != NULL);
- assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
+ assert(block->bbNext != NULL);
+ assert(block->bbNext->bbJumpKind == BBJ_ALWAYS);
+ assert(block->bbNext->bbJumpDest != NULL);
+ assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
- bbFinallyRet = block->bbNext->bbJumpDest;
- bbFinallyRet->bbFlags |= BBF_JMP_TARGET;
+ bbFinallyRet = block->bbNext->bbJumpDest;
+ bbFinallyRet->bbFlags |= BBF_JMP_TARGET;
#if 0
// We don't know the address of finally funclet yet. But adr requires the offset
@@ -13355,81 +13095,69 @@ void CodeGen::genCodeForBBlist()
EA_4BYTE,
bbFinallyRet,
REG_LR);
-#else // 0
- // Load the address where the finally funclet should return into LR.
- // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
- // the return.
- getEmitter()->emitIns_R_L (INS_movw,
- EA_4BYTE_DSP_RELOC,
- bbFinallyRet,
- REG_LR);
- getEmitter()->emitIns_R_L (INS_movt,
- EA_4BYTE_DSP_RELOC,
- bbFinallyRet,
- REG_LR);
- regTracker.rsTrackRegTrash(REG_LR);
+#else // 0
+ // Load the address where the finally funclet should return into LR.
+ // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
+ // the return.
+ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+ regTracker.rsTrackRegTrash(REG_LR);
#endif // 0
- // Jump to the finally BB
- inst_JMP(EJ_jmp, block->bbJumpDest);
+ // Jump to the finally BB
+ inst_JMP(EJ_jmp, block->bbJumpDest);
#else
- NYI("TARGET");
+ NYI("TARGET");
#endif
- // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
- // jump target using bbJumpDest - that is already used to point
- // to the finally block. So just skip past the BBJ_ALWAYS unless the
- // block is RETLESS.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
+ // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
+ // jump target using bbJumpDest - that is already used to point
+ // to the finally block. So just skip past the BBJ_ALWAYS unless the
+ // block is RETLESS.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- lblk = block;
- block = block->bbNext;
- }
- break;
+ lblk = block;
+ block = block->bbNext;
+ }
+ break;
#ifdef _TARGET_ARM_
- case BBJ_EHCATCHRET:
- // set r0 to the address the VM should return to after the catch
- getEmitter()->emitIns_R_L (INS_movw,
- EA_4BYTE_DSP_RELOC,
- block->bbJumpDest,
- REG_R0);
- getEmitter()->emitIns_R_L (INS_movt,
- EA_4BYTE_DSP_RELOC,
- block->bbJumpDest,
- REG_R0);
- regTracker.rsTrackRegTrash(REG_R0);
+ case BBJ_EHCATCHRET:
+ // set r0 to the address the VM should return to after the catch
+ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
+ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
+ regTracker.rsTrackRegTrash(REG_R0);
- __fallthrough;
+ __fallthrough;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- genReserveFuncletEpilog(block);
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ genReserveFuncletEpilog(block);
+ break;
#else // _TARGET_ARM_
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_EHCATCHRET:
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_EHCATCHRET:
+ break;
#endif // _TARGET_ARM_
- case BBJ_NONE:
- case BBJ_COND:
- case BBJ_SWITCH:
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
-#ifdef DEBUG
+#ifdef DEBUG
compiler->compCurBB = 0;
#endif
@@ -13446,12 +13174,11 @@ void CodeGen::genCodeForBBlist()
compiler->tmpEnd();
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
- printf("totalCostEx = %6d, totalCostSz = %5d ",
- totalCostEx, totalCostSz);
+ printf("totalCostEx = %6d, totalCostSz = %5d ", totalCostEx, totalCostSz);
printf("%s\n", compiler->info.compFullName);
}
#endif
@@ -13472,19 +13199,17 @@ void CodeGen::genCodeForBBlist()
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void CodeGen::genCodeForTreeLng(GenTreePtr tree,
- regMaskTP needReg,
- regMaskTP avoidReg)
+void CodeGen::genCodeForTreeLng(GenTreePtr tree, regMaskTP needReg, regMaskTP avoidReg)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
- regPairNo regPair = DUMMY_INIT(REG_PAIR_CORRUPT);
- regMaskTP addrReg;
- regNumber regLo;
- regNumber regHi;
+ regPairNo regPair = DUMMY_INIT(REG_PAIR_CORRUPT);
+ regMaskTP addrReg;
+ regNumber regLo;
+ regNumber regHi;
noway_assert(tree);
noway_assert(tree->gtOper != GT_STMT);
@@ -13495,10 +13220,10 @@ void CodeGen::genCodeForTreeLng(GenTreePtr tree,
oper = tree->OperGet();
kind = tree->OperKind();
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
-REG_VAR_LONG:
- regPair = tree->gtRegPair;
+ REG_VAR_LONG:
+ regPair = tree->gtRegPair;
gcInfo.gcMarkRegSetNpt(genRegPairMask(regPair));
@@ -13507,24 +13232,23 @@ REG_VAR_LONG:
/* Is this a constant node? */
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
- __int64 lval;
+ __int64 lval;
/* Pick a register pair for the value */
- regPair = regSet.rsPickRegPair(needReg);
+ regPair = regSet.rsPickRegPair(needReg);
/* Load the value into the registers */
CLANG_FORMAT_COMMENT_ANCHOR;
#if !CPU_HAS_FP_SUPPORT
- if (oper == GT_CNS_DBL)
+ if (oper == GT_CNS_DBL)
{
noway_assert(sizeof(__int64) == sizeof(double));
- noway_assert(sizeof(tree->gtLngCon.gtLconVal) ==
- sizeof(tree->gtDblCon.gtDconVal));
+ noway_assert(sizeof(tree->gtLngCon.gtLconVal) == sizeof(tree->gtDblCon.gtDconVal));
lval = *(__int64*)(&tree->gtDblCon.gtDconVal);
}
@@ -13536,155 +13260,154 @@ REG_VAR_LONG:
lval = tree->gtLngCon.gtLconVal;
}
- genSetRegToIcon(genRegPairLo(regPair), int(lval ));
+ genSetRegToIcon(genRegPairLo(regPair), int(lval));
genSetRegToIcon(genRegPairHi(regPair), int(lval >> 32));
goto DONE;
}
/* Is this a leaf node? */
- if (kind & GTK_LEAF)
+ if (kind & GTK_LEAF)
{
switch (oper)
{
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
#if REDUNDANT_LOAD
- /* This case has to consider the case in which an int64 LCL_VAR
- * may both be enregistered and also have a cached copy of itself
- * in a different set of registers.
- * We want to return the registers that have the most in common
- * with the needReg mask
- */
+ /* This case has to consider the case in which an int64 LCL_VAR
+ * may both be enregistered and also have a cached copy of itself
+ * in a different set of registers.
+ * We want to return the registers that have the most in common
+ * with the needReg mask
+ */
- /* Does the var have a copy of itself in the cached registers?
- * And are these cached registers both free?
- * If so use these registers if they match any needReg.
- */
+ /* Does the var have a copy of itself in the cached registers?
+ * And are these cached registers both free?
+ * If so use these registers if they match any needReg.
+ */
- regPair = regTracker.rsLclIsInRegPair(tree->gtLclVarCommon.gtLclNum);
+ regPair = regTracker.rsLclIsInRegPair(tree->gtLclVarCommon.gtLclNum);
- if ( ( regPair != REG_PAIR_NONE) &&
- ( (regSet.rsRegMaskFree() & needReg) == needReg ) &&
- ((genRegPairMask(regPair) & needReg) != RBM_NONE ))
- {
- goto DONE;
- }
+ if ((regPair != REG_PAIR_NONE) && ((regSet.rsRegMaskFree() & needReg) == needReg) &&
+ ((genRegPairMask(regPair) & needReg) != RBM_NONE))
+ {
+ goto DONE;
+ }
- /* Does the variable live in a register?
- * If so use these registers.
- */
- if (genMarkLclVar(tree))
- goto REG_VAR_LONG;
+ /* Does the variable live in a register?
+ * If so use these registers.
+ */
+ if (genMarkLclVar(tree))
+ goto REG_VAR_LONG;
- /* If tree is not an enregistered variable then
- * be sure to use any cached register that contain
- * a copy of this local variable
- */
- if (regPair != REG_PAIR_NONE)
- {
- goto DONE;
- }
+ /* If tree is not an enregistered variable then
+ * be sure to use any cached register that contain
+ * a copy of this local variable
+ */
+ if (regPair != REG_PAIR_NONE)
+ {
+ goto DONE;
+ }
#endif
- goto MEM_LEAF;
+ goto MEM_LEAF;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
- // to worry about it being enregistered.
- noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
- goto MEM_LEAF;
+ // We only use GT_LCL_FLD for lvDoNotEnregister vars, so we don't have
+ // to worry about it being enregistered.
+ noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
+ goto MEM_LEAF;
- case GT_CLS_VAR:
- MEM_LEAF:
+ case GT_CLS_VAR:
+ MEM_LEAF:
- /* Pick a register pair for the value */
+ /* Pick a register pair for the value */
- regPair = regSet.rsPickRegPair(needReg);
+ regPair = regSet.rsPickRegPair(needReg);
- /* Load the value into the registers */
+ /* Load the value into the registers */
- instruction loadIns;
+ instruction loadIns;
- loadIns = ins_Load(TYP_INT); // INS_ldr
- regLo = genRegPairLo(regPair);
- regHi = genRegPairHi(regPair);
+ loadIns = ins_Load(TYP_INT); // INS_ldr
+ regLo = genRegPairLo(regPair);
+ regHi = genRegPairHi(regPair);
#if CPU_LOAD_STORE_ARCH
- {
- regNumber regAddr = regSet.rsGrabReg(RBM_ALLINT);
- inst_RV_TT(INS_lea, regAddr, tree, 0);
- regTracker.rsTrackRegTrash(regAddr);
-
- if (regLo != regAddr)
{
- // assert(regLo != regAddr); // forced by if statement
- getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regLo, regAddr, 0);
- getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regHi, regAddr, 4);
- }
- else
- {
- // assert(regHi != regAddr); // implied by regpair property and the if statement
- getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regHi, regAddr, 4);
- getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regLo, regAddr, 0);
+ regNumber regAddr = regSet.rsGrabReg(RBM_ALLINT);
+ inst_RV_TT(INS_lea, regAddr, tree, 0);
+ regTracker.rsTrackRegTrash(regAddr);
+
+ if (regLo != regAddr)
+ {
+ // assert(regLo != regAddr); // forced by if statement
+ getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regLo, regAddr, 0);
+ getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regHi, regAddr, 4);
+ }
+ else
+ {
+ // assert(regHi != regAddr); // implied by regpair property and the if statement
+ getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regHi, regAddr, 4);
+ getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regLo, regAddr, 0);
+ }
}
- }
#else
- inst_RV_TT(loadIns, regLo, tree, 0);
- inst_RV_TT(loadIns, regHi, tree, 4);
+ inst_RV_TT(loadIns, regLo, tree, 0);
+ inst_RV_TT(loadIns, regHi, tree, 4);
#endif
#ifdef _TARGET_ARM_
- if ((oper == GT_CLS_VAR) && (tree->gtFlags & GTF_IND_VOLATILE))
- {
- // Emit a memory barrier instruction after the load
- instGen_MemoryBarrier();
- }
+ if ((oper == GT_CLS_VAR) && (tree->gtFlags & GTF_IND_VOLATILE))
+ {
+ // Emit a memory barrier instruction after the load
+ instGen_MemoryBarrier();
+ }
#endif
- regTracker.rsTrackRegTrash(regLo);
- regTracker.rsTrackRegTrash(regHi);
+ regTracker.rsTrackRegTrash(regLo);
+ regTracker.rsTrackRegTrash(regHi);
- goto DONE;
+ goto DONE;
- default:
-#ifdef DEBUG
- compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ compiler->gtDispTree(tree);
#endif
- noway_assert(!"unexpected leaf");
+ noway_assert(!"unexpected leaf");
}
}
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- instruction insLo;
- instruction insHi;
- bool doLo;
- bool doHi;
- bool setCarry = false;
- int helper;
+ instruction insLo;
+ instruction insHi;
+ bool doLo;
+ bool doHi;
+ bool setCarry = false;
+ int helper;
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
switch (oper)
{
- case GT_ASG:
+ case GT_ASG:
{
#ifdef DEBUGGING_SUPPORT
- unsigned lclVarNum = compiler->lvaCount;
+ unsigned lclVarNum = compiler->lvaCount;
unsigned lclVarILoffs = DUMMY_INIT(0);
#endif
/* Is the target a local ? */
- if (op1->gtOper == GT_LCL_VAR)
+ if (op1->gtOper == GT_LCL_VAR)
{
- unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc;
+ unsigned varNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc;
noway_assert(varNum < compiler->lvaCount);
varDsc = compiler->lvaTable + varNum;
@@ -13698,16 +13421,17 @@ REG_VAR_LONG:
* Remember the local var info to call siCheckVarScope
* AFTER codegen of the assignment.
*/
- if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
+ if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode &&
+ (compiler->info.compVarScopesCount > 0))
{
- lclVarNum = varNum;
- lclVarILoffs = op1->gtLclVar.gtLclILoffs;
+ lclVarNum = varNum;
+ lclVarILoffs = op1->gtLclVar.gtLclILoffs;
}
#endif
/* Has the variable been assigned to a register (pair) ? */
- if (genMarkLclVar(op1))
+ if (genMarkLclVar(op1))
{
noway_assert(op1->gtFlags & GTF_REG_VAL);
regPair = op1->gtRegPair;
@@ -13717,7 +13441,7 @@ REG_VAR_LONG:
/* Is the value being assigned a constant? */
- if (op2->gtOper == GT_CNS_LNG)
+ if (op2->gtOper == GT_CNS_LNG)
{
/* Move the value into the target */
@@ -13738,7 +13462,7 @@ REG_VAR_LONG:
}
ins = INS_mov;
}
- inst_TT_IV(ins, op1, (int)(op2->gtLngCon.gtLconVal ), 0);
+ inst_TT_IV(ins, op1, (int)(op2->gtLngCon.gtLconVal), 0);
// The REG_STK case has already been handled
if (regHi != REG_STK)
@@ -13752,7 +13476,7 @@ REG_VAR_LONG:
/* Compute the RHS into desired register pair */
- if (regHi != REG_STK)
+ if (regHi != REG_STK)
{
genComputeRegPair(op2, regPair, avoidReg, RegSet::KEEP_REG);
noway_assert(op2->gtFlags & GTF_REG_VAL);
@@ -13775,7 +13499,7 @@ REG_VAR_LONG:
/* move high first, target is on stack */
inst_TT_RV(ins_Store(TYP_INT), op1, curHi, 4);
- if (regLo != curLo)
+ if (regLo != curLo)
{
if ((regSet.rsMaskUsed & genRegMask(regLo)) && (regLo != curHi))
regSet.rsSpillReg(regLo);
@@ -13789,10 +13513,9 @@ REG_VAR_LONG:
}
}
-
/* Is the value being assigned a constant? */
- if (op2->gtOper == GT_CNS_LNG)
+ if (op2->gtOper == GT_CNS_LNG)
{
/* Make the target addressable */
@@ -13800,7 +13523,7 @@ REG_VAR_LONG:
/* Move the value into the target */
- inst_TT_IV(ins_Store(TYP_INT), op1, (int)(op2->gtLngCon.gtLconVal ), 0);
+ inst_TT_IV(ins_Store(TYP_INT), op1, (int)(op2->gtLngCon.gtLconVal), 0);
inst_TT_IV(ins_Store(TYP_INT), op1, (int)(op2->gtLngCon.gtLconVal >> 32), 4);
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
@@ -13843,18 +13566,14 @@ REG_VAR_LONG:
/* Eliminate worthless assignment "lcl = lcl" */
- if (op2->gtOper == GT_LCL_VAR &&
- op1->gtOper == GT_LCL_VAR && op2->gtLclVarCommon.gtLclNum ==
- op1->gtLclVarCommon.gtLclNum)
+ if (op2->gtOper == GT_LCL_VAR && op1->gtOper == GT_LCL_VAR &&
+ op2->gtLclVarCommon.gtLclNum == op1->gtLclVarCommon.gtLclNum)
{
genUpdateLife(op2);
goto LAsgExit;
}
-
- if (op2->gtOper == GT_CAST &&
- TYP_ULONG == op2->CastToType() &&
- op2->CastFromType() <= TYP_INT &&
+ if (op2->gtOper == GT_CAST && TYP_ULONG == op2->CastToType() && op2->CastFromType() <= TYP_INT &&
// op1,op2 need to be materialized in the correct order.
(tree->gtFlags & GTF_REVERSE_OPS))
{
@@ -13878,8 +13597,9 @@ REG_VAR_LONG:
// conv.ovf.u8 could overflow if the original number was negative
if (op2->gtOverflow())
{
- noway_assert((op2->gtFlags & GTF_UNSIGNED) == 0); // conv.ovf.u8.un should be bashed to conv.u8.un
- instGen_Compare_Reg_To_Zero(EA_4BYTE, regHi); // set flags
+ noway_assert((op2->gtFlags & GTF_UNSIGNED) ==
+ 0); // conv.ovf.u8.un should be bashed to conv.u8.un
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, regHi); // set flags
emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
}
@@ -13887,12 +13607,12 @@ REG_VAR_LONG:
/* Move the value into the target */
inst_TT_RV(ins_Store(TYP_INT), op1, regHi, 0);
- inst_TT_IV(ins_Store(TYP_INT), op1, 0, 4); // Store 0 in hi-word
+ inst_TT_IV(ins_Store(TYP_INT), op1, 0, 4); // Store 0 in hi-word
/* Free up anything that was tied up by either side */
genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- genReleaseReg (smallOpr);
+ genReleaseReg(smallOpr);
#if REDUNDANT_LOAD
if (op1->gtOper == GT_LCL_VAR)
@@ -13902,15 +13622,14 @@ REG_VAR_LONG:
/* mark RHS registers as containing the local var */
regTracker.rsTrackRegLclVarLng(regHi, op1->gtLclVarCommon.gtLclNum, true);
- }
+ }
#endif
goto LAsgExit;
}
-
/* Is the LHS more complex than the RHS? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* Generate the RHS into a register pair */
@@ -13918,7 +13637,7 @@ REG_VAR_LONG:
noway_assert(op2->gtFlags & GTF_REG_VAL);
/* Make the target addressable */
- op1 = genCodeForCommaTree(op1);
+ op1 = genCodeForCommaTree(op1);
addrReg = genMakeAddressable(op1, 0, RegSet::KEEP_REG);
/* Make sure the RHS register hasn't been spilled */
@@ -13929,7 +13648,7 @@ REG_VAR_LONG:
{
/* Make the target addressable */
- op1 = genCodeForCommaTree(op1);
+ op1 = genCodeForCommaTree(op1);
addrReg = genMakeAddressable(op1, RBM_ALLINT & ~op2->gtRsvdRegs, RegSet::KEEP_REG, true);
/* Generate the RHS into a register pair */
@@ -13966,7 +13685,7 @@ REG_VAR_LONG:
if ((op2->gtFlags & GTF_REG_VAL) &&
/* constant has precedence over local */
- // rsRegValues[op2->gtRegNum].rvdKind != RV_INT_CNS &&
+ // rsRegValues[op2->gtRegNum].rvdKind != RV_INT_CNS &&
tree->gtOper == GT_ASG)
{
regNumber regNo;
@@ -13974,16 +13693,15 @@ REG_VAR_LONG:
/* mark RHS registers as containing the local var */
regNo = genRegPairLo(op2->gtRegPair);
- if (regNo != REG_STK)
+ if (regNo != REG_STK)
regTracker.rsTrackRegLclVarLng(regNo, op1->gtLclVarCommon.gtLclNum, true);
regNo = genRegPairHi(op2->gtRegPair);
- if (regNo != REG_STK)
+ if (regNo != REG_STK)
{
/* For partially enregistered longs, we might have
stomped on op2's hiReg */
- if (!(op1->gtFlags & GTF_REG_VAL) ||
- regNo != genRegPairLo(op1->gtRegPair))
+ if (!(op1->gtFlags & GTF_REG_VAL) || regNo != genRegPairLo(op1->gtRegPair))
{
regTracker.rsTrackRegLclVarLng(regNo, op1->gtLclVarCommon.gtLclNum, false);
}
@@ -13992,8 +13710,7 @@ REG_VAR_LONG:
}
#endif
-
-LAsgExit:
+ LAsgExit:
genUpdateLife(op1);
genUpdateLife(tree);
@@ -14005,866 +13722,905 @@ LAsgExit:
if (lclVarNum < compiler->lvaCount)
siCheckVarScope(lclVarNum, lclVarILoffs);
#endif
- }
- return;
-
-
- case GT_SUB: insLo = INS_sub; insHi = INS_SUBC; setCarry = true; goto BINOP_OVF;
- case GT_ADD: insLo = INS_add; insHi = INS_ADDC; setCarry = true; goto BINOP_OVF;
-
- bool ovfl;
-
- BINOP_OVF:
- ovfl = tree->gtOverflow();
- goto _BINOP;
-
- case GT_AND: insLo = insHi = INS_AND; goto BINOP;
- case GT_OR : insLo = insHi = INS_OR ; goto BINOP;
- case GT_XOR: insLo = insHi = INS_XOR; goto BINOP;
-
- BINOP: ovfl = false; goto _BINOP;
-
- _BINOP:
-
- /* The following makes an assumption about gtSetEvalOrder(this) */
+ }
+ return;
- noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
+ case GT_SUB:
+ insLo = INS_sub;
+ insHi = INS_SUBC;
+ setCarry = true;
+ goto BINOP_OVF;
+ case GT_ADD:
+ insLo = INS_add;
+ insHi = INS_ADDC;
+ setCarry = true;
+ goto BINOP_OVF;
+
+ bool ovfl;
+
+ BINOP_OVF:
+ ovfl = tree->gtOverflow();
+ goto _BINOP;
+
+ case GT_AND:
+ insLo = insHi = INS_AND;
+ goto BINOP;
+ case GT_OR:
+ insLo = insHi = INS_OR;
+ goto BINOP;
+ case GT_XOR:
+ insLo = insHi = INS_XOR;
+ goto BINOP;
+
+ BINOP:
+ ovfl = false;
+ goto _BINOP;
+
+ _BINOP:
+
+ /* The following makes an assumption about gtSetEvalOrder(this) */
- /* Special case: check for "(long(intval) << 32) | longval" */
+ noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
- if (oper == GT_OR && op1->gtOper == GT_LSH)
- {
- GenTreePtr lshLHS = op1->gtOp.gtOp1;
- GenTreePtr lshRHS = op1->gtOp.gtOp2;
+ /* Special case: check for "(long(intval) << 32) | longval" */
- if (lshLHS->gtOper == GT_CAST &&
- lshRHS->gtOper == GT_CNS_INT &&
- lshRHS->gtIntCon.gtIconVal == 32 &&
- genTypeSize(TYP_INT) == genTypeSize(lshLHS->CastFromType()))
+ if (oper == GT_OR && op1->gtOper == GT_LSH)
{
+ GenTreePtr lshLHS = op1->gtOp.gtOp1;
+ GenTreePtr lshRHS = op1->gtOp.gtOp2;
- /* Throw away the cast of the shift operand. */
-
- op1 = lshLHS->gtCast.CastOp();
-
- /* Special case: check op2 for "ulong(intval)" */
- if ((op2->gtOper == GT_CAST) &&
- (op2->CastToType() == TYP_ULONG) &&
- genTypeSize(TYP_INT) == genTypeSize(op2->CastFromType()))
+ if (lshLHS->gtOper == GT_CAST && lshRHS->gtOper == GT_CNS_INT && lshRHS->gtIntCon.gtIconVal == 32 &&
+ genTypeSize(TYP_INT) == genTypeSize(lshLHS->CastFromType()))
{
- /* Throw away the cast of the second operand. */
- op2 = op2->gtCast.CastOp();
- goto SIMPLE_OR_LONG;
- }
- /* Special case: check op2 for "long(intval) & 0xFFFFFFFF" */
- else if (op2->gtOper == GT_AND)
- {
- GenTreePtr andLHS; andLHS = op2->gtOp.gtOp1;
- GenTreePtr andRHS; andRHS = op2->gtOp.gtOp2;
+ /* Throw away the cast of the shift operand. */
- if (andLHS->gtOper == GT_CAST &&
- andRHS->gtOper == GT_CNS_LNG &&
- andRHS->gtLngCon.gtLconVal == 0x00000000FFFFFFFF &&
- genTypeSize(TYP_INT) == genTypeSize(andLHS->CastFromType()))
+ op1 = lshLHS->gtCast.CastOp();
+
+ /* Special case: check op2 for "ulong(intval)" */
+ if ((op2->gtOper == GT_CAST) && (op2->CastToType() == TYP_ULONG) &&
+ genTypeSize(TYP_INT) == genTypeSize(op2->CastFromType()))
{
/* Throw away the cast of the second operand. */
- op2 = andLHS->gtCast.CastOp();
+ op2 = op2->gtCast.CastOp();
+ goto SIMPLE_OR_LONG;
+ }
+ /* Special case: check op2 for "long(intval) & 0xFFFFFFFF" */
+ else if (op2->gtOper == GT_AND)
+ {
+ GenTreePtr andLHS;
+ andLHS = op2->gtOp.gtOp1;
+ GenTreePtr andRHS;
+ andRHS = op2->gtOp.gtOp2;
+
+ if (andLHS->gtOper == GT_CAST && andRHS->gtOper == GT_CNS_LNG &&
+ andRHS->gtLngCon.gtLconVal == 0x00000000FFFFFFFF &&
+ genTypeSize(TYP_INT) == genTypeSize(andLHS->CastFromType()))
+ {
+ /* Throw away the cast of the second operand. */
+
+ op2 = andLHS->gtCast.CastOp();
-SIMPLE_OR_LONG:
- // Load the high DWORD, ie. op1
+ SIMPLE_OR_LONG:
+ // Load the high DWORD, ie. op1
- genCodeForTree(op1, needReg & ~op2->gtRsvdRegs);
+ genCodeForTree(op1, needReg & ~op2->gtRsvdRegs);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
- regHi = op1->gtRegNum;
- regSet.rsMarkRegUsed(op1);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
+ regHi = op1->gtRegNum;
+ regSet.rsMarkRegUsed(op1);
- // Load the low DWORD, ie. op2
+ // Load the low DWORD, ie. op2
- genCodeForTree(op2, needReg & ~genRegMask(regHi));
+ genCodeForTree(op2, needReg & ~genRegMask(regHi));
- noway_assert(op2->gtFlags & GTF_REG_VAL);
- regLo = op2->gtRegNum;
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+ regLo = op2->gtRegNum;
- /* Make sure regHi is still around. Also, force
- regLo to be excluded in case regLo==regHi */
+ /* Make sure regHi is still around. Also, force
+ regLo to be excluded in case regLo==regHi */
- genRecoverReg(op1, ~genRegMask(regLo), RegSet::FREE_REG);
- regHi = op1->gtRegNum;
+ genRecoverReg(op1, ~genRegMask(regLo), RegSet::FREE_REG);
+ regHi = op1->gtRegNum;
- regPair = gen2regs2pair(regLo, regHi);
- goto DONE;
+ regPair = gen2regs2pair(regLo, regHi);
+ goto DONE;
+ }
}
- }
- /* Generate the following sequence:
- Prepare op1 (discarding shift)
- Compute op2 into some regpair
- OR regpairhi, op1
- */
+ /* Generate the following sequence:
+ Prepare op1 (discarding shift)
+ Compute op2 into some regpair
+ OR regpairhi, op1
+ */
- /* First, make op1 addressable */
+ /* First, make op1 addressable */
- /* tempReg must avoid both needReg, op2->RsvdRegs and regSet.rsMaskResvd.
+ /* tempReg must avoid both needReg, op2->RsvdRegs and regSet.rsMaskResvd.
- It appears incorrect to exclude needReg as we are not ensuring that the reg pair into
- which the long value is computed is from needReg. But at this point the safest fix is
- to exclude regSet.rsMaskResvd.
+ It appears incorrect to exclude needReg as we are not ensuring that the reg pair into
+ which the long value is computed is from needReg. But at this point the safest fix is
+ to exclude regSet.rsMaskResvd.
- Note that needReg could be the set of free registers (excluding reserved ones). If we don't
- exclude regSet.rsMaskResvd, the expression below will have the effect of trying to choose a reg from
- reserved set which is bound to fail. To prevent that we avoid regSet.rsMaskResvd.
- */
- regMaskTP tempReg = RBM_ALLINT & ~needReg & ~op2->gtRsvdRegs & ~avoidReg & ~regSet.rsMaskResvd;
+ Note that needReg could be the set of free registers (excluding reserved ones). If we don't
+ exclude regSet.rsMaskResvd, the expression below will have the effect of trying to choose a
+ reg from
+ reserved set which is bound to fail. To prevent that we avoid regSet.rsMaskResvd.
+ */
+ regMaskTP tempReg = RBM_ALLINT & ~needReg & ~op2->gtRsvdRegs & ~avoidReg & ~regSet.rsMaskResvd;
- addrReg = genMakeAddressable(op1, tempReg, RegSet::KEEP_REG);
+ addrReg = genMakeAddressable(op1, tempReg, RegSet::KEEP_REG);
- genCompIntoFreeRegPair(op2, avoidReg, RegSet::KEEP_REG);
+ genCompIntoFreeRegPair(op2, avoidReg, RegSet::KEEP_REG);
- noway_assert(op2->gtFlags & GTF_REG_VAL);
- regPair = op2->gtRegPair;
- regHi = genRegPairHi(regPair);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+ regPair = op2->gtRegPair;
+ regHi = genRegPairHi(regPair);
- /* The operand might have interfered with the address */
+ /* The operand might have interfered with the address */
- addrReg = genKeepAddressable(op1, addrReg, genRegPairMask(regPair));
+ addrReg = genKeepAddressable(op1, addrReg, genRegPairMask(regPair));
- /* Now compute the result */
+ /* Now compute the result */
- inst_RV_TT(insHi, regHi, op1, 0);
+ inst_RV_TT(insHi, regHi, op1, 0);
- regTracker.rsTrackRegTrash(regHi);
+ regTracker.rsTrackRegTrash(regHi);
- /* Free up anything that was tied up by the LHS */
+ /* Free up anything that was tied up by the LHS */
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
- /* The result is where the second operand is sitting */
+ /* The result is where the second operand is sitting */
- genRecoverRegPair(op2, REG_PAIR_NONE, RegSet::FREE_REG);
+ genRecoverRegPair(op2, REG_PAIR_NONE, RegSet::FREE_REG);
- regPair = op2->gtRegPair;
- goto DONE;
+ regPair = op2->gtRegPair;
+ goto DONE;
+ }
}
- }
-
- /* Special case: check for "longval | (long(intval) << 32)" */
- if (oper == GT_OR && op2->gtOper == GT_LSH)
- {
- GenTreePtr lshLHS = op2->gtOp.gtOp1;
- GenTreePtr lshRHS = op2->gtOp.gtOp2;
-
- if (lshLHS->gtOper == GT_CAST &&
- lshRHS->gtOper == GT_CNS_INT &&
- lshRHS->gtIntCon.gtIconVal == 32 &&
- genTypeSize(TYP_INT) == genTypeSize(lshLHS->CastFromType()))
+ /* Special case: check for "longval | (long(intval) << 32)" */
+ if (oper == GT_OR && op2->gtOper == GT_LSH)
{
- /* We throw away the cast of the shift operand. */
-
- op2 = lshLHS->gtCast.CastOp();
+ GenTreePtr lshLHS = op2->gtOp.gtOp1;
+ GenTreePtr lshRHS = op2->gtOp.gtOp2;
- /* Special case: check op1 for "long(intval) & 0xFFFFFFFF" */
+ if (lshLHS->gtOper == GT_CAST && lshRHS->gtOper == GT_CNS_INT && lshRHS->gtIntCon.gtIconVal == 32 &&
+ genTypeSize(TYP_INT) == genTypeSize(lshLHS->CastFromType()))
- if (op1->gtOper == GT_AND)
{
- GenTreePtr andLHS = op1->gtOp.gtOp1;
- GenTreePtr andRHS = op1->gtOp.gtOp2;
+ /* We throw away the cast of the shift operand. */
- if (andLHS->gtOper == GT_CAST &&
- andRHS->gtOper == GT_CNS_LNG &&
- andRHS->gtLngCon.gtLconVal == 0x00000000FFFFFFFF &&
- genTypeSize(TYP_INT) == genTypeSize(andLHS->CastFromType()))
+ op2 = lshLHS->gtCast.CastOp();
+
+ /* Special case: check op1 for "long(intval) & 0xFFFFFFFF" */
+
+ if (op1->gtOper == GT_AND)
{
- /* Throw away the cast of the first operand. */
+ GenTreePtr andLHS = op1->gtOp.gtOp1;
+ GenTreePtr andRHS = op1->gtOp.gtOp2;
- op1 = andLHS->gtCast.CastOp();
+ if (andLHS->gtOper == GT_CAST && andRHS->gtOper == GT_CNS_LNG &&
+ andRHS->gtLngCon.gtLconVal == 0x00000000FFFFFFFF &&
+ genTypeSize(TYP_INT) == genTypeSize(andLHS->CastFromType()))
+ {
+ /* Throw away the cast of the first operand. */
- // Load the low DWORD, ie. op1
+ op1 = andLHS->gtCast.CastOp();
- genCodeForTree(op1, needReg & ~op2->gtRsvdRegs);
+ // Load the low DWORD, ie. op1
- noway_assert(op1->gtFlags & GTF_REG_VAL);
- regLo = op1->gtRegNum;
- regSet.rsMarkRegUsed(op1);
+ genCodeForTree(op1, needReg & ~op2->gtRsvdRegs);
- // Load the high DWORD, ie. op2
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
+ regLo = op1->gtRegNum;
+ regSet.rsMarkRegUsed(op1);
- genCodeForTree(op2, needReg & ~genRegMask(regLo));
+ // Load the high DWORD, ie. op2
- noway_assert(op2->gtFlags & GTF_REG_VAL);
- regHi = op2->gtRegNum;
+ genCodeForTree(op2, needReg & ~genRegMask(regLo));
- /* Make sure regLo is still around. Also, force
- regHi to be excluded in case regLo==regHi */
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+ regHi = op2->gtRegNum;
- genRecoverReg(op1, ~genRegMask(regHi), RegSet::FREE_REG);
- regLo = op1->gtRegNum;
+ /* Make sure regLo is still around. Also, force
+ regHi to be excluded in case regLo==regHi */
- regPair = gen2regs2pair(regLo, regHi);
- goto DONE;
+ genRecoverReg(op1, ~genRegMask(regHi), RegSet::FREE_REG);
+ regLo = op1->gtRegNum;
+
+ regPair = gen2regs2pair(regLo, regHi);
+ goto DONE;
+ }
}
- }
- /* Generate the following sequence:
- Compute op1 into some regpair
- Make op2 (ignoring shift) addressable
- OR regPairHi, op2
- */
+ /* Generate the following sequence:
+ Compute op1 into some regpair
+ Make op2 (ignoring shift) addressable
+ OR regPairHi, op2
+ */
- // First, generate the first operand into some register
+ // First, generate the first operand into some register
- genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- /* Make the second operand addressable */
+ /* Make the second operand addressable */
- addrReg = genMakeAddressable(op2, needReg, RegSet::KEEP_REG);
+ addrReg = genMakeAddressable(op2, needReg, RegSet::KEEP_REG);
- /* Make sure the result is in a free register pair */
+ /* Make sure the result is in a free register pair */
- genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::KEEP_REG);
- regPair = op1->gtRegPair;
- regHi = genRegPairHi(regPair);
+ genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::KEEP_REG);
+ regPair = op1->gtRegPair;
+ regHi = genRegPairHi(regPair);
- /* The operand might have interfered with the address */
+ /* The operand might have interfered with the address */
- addrReg = genKeepAddressable(op2, addrReg, genRegPairMask(regPair));
+ addrReg = genKeepAddressable(op2, addrReg, genRegPairMask(regPair));
- /* Compute the new value */
+ /* Compute the new value */
- inst_RV_TT(insHi, regHi, op2, 0);
+ inst_RV_TT(insHi, regHi, op2, 0);
- /* The value in the high register has been trashed */
+ /* The value in the high register has been trashed */
- regTracker.rsTrackRegTrash(regHi);
+ regTracker.rsTrackRegTrash(regHi);
- goto DONE_OR;
+ goto DONE_OR;
+ }
}
- }
- /* Generate the first operand into registers */
-
- if ( (genCountBits(needReg) == 2) &&
- ((regSet.rsRegMaskFree() & needReg) == needReg ) &&
- ((op2->gtRsvdRegs & needReg) == RBM_NONE) &&
- (!(tree->gtFlags & GTF_ASG)) )
- {
- regPair = regSet.rsPickRegPair(needReg);
- genComputeRegPair(op1, regPair, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
- }
- else
- {
- genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
- }
- noway_assert(op1->gtFlags & GTF_REG_VAL);
- regMaskTP op1Mask;
- regPair = op1->gtRegPair;
- op1Mask = genRegPairMask(regPair);
+ /* Generate the first operand into registers */
- /* Make the second operand addressable */
- regMaskTP needReg2;
- needReg2 = regSet.rsNarrowHint(needReg, ~op1Mask);
- addrReg = genMakeAddressable(op2, needReg2, RegSet::KEEP_REG);
+ if ((genCountBits(needReg) == 2) && ((regSet.rsRegMaskFree() & needReg) == needReg) &&
+ ((op2->gtRsvdRegs & needReg) == RBM_NONE) && (!(tree->gtFlags & GTF_ASG)))
+ {
+ regPair = regSet.rsPickRegPair(needReg);
+ genComputeRegPair(op1, regPair, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
+ }
+ else
+ {
+ genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::KEEP_REG);
+ }
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
+ regMaskTP op1Mask;
+ regPair = op1->gtRegPair;
+ op1Mask = genRegPairMask(regPair);
- // TODO: If 'op1' got spilled and 'op2' happens to be
- // TODO: in a register, and we have add/mul/and/or/xor,
- // TODO: reverse the operands since we can perform the
- // TODO: operation directly with the spill temp, e.g.
- // TODO: 'add regHi, [temp]'.
+ /* Make the second operand addressable */
+ regMaskTP needReg2;
+ needReg2 = regSet.rsNarrowHint(needReg, ~op1Mask);
+ addrReg = genMakeAddressable(op2, needReg2, RegSet::KEEP_REG);
- /* Make sure the result is in a free register pair */
+ // TODO: If 'op1' got spilled and 'op2' happens to be
+ // TODO: in a register, and we have add/mul/and/or/xor,
+ // TODO: reverse the operands since we can perform the
+ // TODO: operation directly with the spill temp, e.g.
+ // TODO: 'add regHi, [temp]'.
- genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::KEEP_REG);
- regPair = op1->gtRegPair;
- op1Mask = genRegPairMask(regPair);
+ /* Make sure the result is in a free register pair */
- regLo = genRegPairLo(regPair);
- regHi = genRegPairHi(regPair);
-
- /* Make sure that we don't spill regLo/regHi below */
- regSet.rsLockUsedReg(op1Mask);
+ genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::KEEP_REG);
+ regPair = op1->gtRegPair;
+ op1Mask = genRegPairMask(regPair);
- /* The operand might have interfered with the address */
+ regLo = genRegPairLo(regPair);
+ regHi = genRegPairHi(regPair);
- addrReg = genKeepAddressable(op2, addrReg);
+ /* Make sure that we don't spill regLo/regHi below */
+ regSet.rsLockUsedReg(op1Mask);
- /* The value in the register pair is about to be trashed */
+ /* The operand might have interfered with the address */
- regTracker.rsTrackRegTrash(regLo);
- regTracker.rsTrackRegTrash(regHi);
+ addrReg = genKeepAddressable(op2, addrReg);
- /* Compute the new value */
+ /* The value in the register pair is about to be trashed */
- doLo = true;
- doHi = true;
+ regTracker.rsTrackRegTrash(regLo);
+ regTracker.rsTrackRegTrash(regHi);
- if (op2->gtOper == GT_CNS_LNG)
- {
- __int64 icon = op2->gtLngCon.gtLconVal;
+ /* Compute the new value */
- /* Check for "(op1 AND -1)" and "(op1 [X]OR 0)" */
+ doLo = true;
+ doHi = true;
- switch (oper)
+ if (op2->gtOper == GT_CNS_LNG)
{
- case GT_AND:
- if ((int)(icon ) == -1)
- doLo = false;
- if ((int)(icon >> 32) == -1)
- doHi = false;
+ __int64 icon = op2->gtLngCon.gtLconVal;
- if (!(icon & I64(0x00000000FFFFFFFF)))
- {
- genSetRegToIcon(regLo, 0);
- doLo = false;
- }
+ /* Check for "(op1 AND -1)" and "(op1 [X]OR 0)" */
- if (!(icon & I64(0xFFFFFFFF00000000)))
+ switch (oper)
{
- /* Just to always set low first*/
+ case GT_AND:
+ if ((int)(icon) == -1)
+ doLo = false;
+ if ((int)(icon >> 32) == -1)
+ doHi = false;
- if (doLo)
- {
- inst_RV_TT(insLo, regLo, op2, 0);
- doLo = false;
- }
- genSetRegToIcon(regHi, 0);
- doHi = false;
- }
+ if (!(icon & I64(0x00000000FFFFFFFF)))
+ {
+ genSetRegToIcon(regLo, 0);
+ doLo = false;
+ }
- break;
+ if (!(icon & I64(0xFFFFFFFF00000000)))
+ {
+ /* Just to always set low first*/
- case GT_OR:
- case GT_XOR:
- if (!(icon & I64(0x00000000FFFFFFFF)))
- doLo = false;
- if (!(icon & I64(0xFFFFFFFF00000000)))
- doHi = false;
- break;
- default:
- break;
+ if (doLo)
+ {
+ inst_RV_TT(insLo, regLo, op2, 0);
+ doLo = false;
+ }
+ genSetRegToIcon(regHi, 0);
+ doHi = false;
+ }
+
+ break;
+
+ case GT_OR:
+ case GT_XOR:
+ if (!(icon & I64(0x00000000FFFFFFFF)))
+ doLo = false;
+ if (!(icon & I64(0xFFFFFFFF00000000)))
+ doHi = false;
+ break;
+ default:
+ break;
+ }
}
- }
- // Fix 383813 X86/ARM ILGEN
- // Fix 383793 ARM ILGEN
- // Fix 383911 ARM ILGEN
- regMaskTP newMask; newMask = addrReg & ~op1Mask;
- regSet.rsLockUsedReg(newMask);
+ // Fix 383813 X86/ARM ILGEN
+ // Fix 383793 ARM ILGEN
+ // Fix 383911 ARM ILGEN
+ regMaskTP newMask;
+ newMask = addrReg & ~op1Mask;
+ regSet.rsLockUsedReg(newMask);
- if (doLo)
- {
- insFlags flagsLo = setCarry ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- inst_RV_TT(insLo, regLo, op2, 0, EA_4BYTE, flagsLo);
- }
- if (doHi)
- {
- insFlags flagsHi = ovfl ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
- inst_RV_TT(insHi, regHi, op2, 4, EA_4BYTE, flagsHi);
- }
+ if (doLo)
+ {
+ insFlags flagsLo = setCarry ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ inst_RV_TT(insLo, regLo, op2, 0, EA_4BYTE, flagsLo);
+ }
+ if (doHi)
+ {
+ insFlags flagsHi = ovfl ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
+ inst_RV_TT(insHi, regHi, op2, 4, EA_4BYTE, flagsHi);
+ }
- regSet.rsUnlockUsedReg(newMask);
- regSet.rsUnlockUsedReg(op1Mask);
+ regSet.rsUnlockUsedReg(newMask);
+ regSet.rsUnlockUsedReg(op1Mask);
- DONE_OR:
+ DONE_OR:
- /* Free up anything that was tied up by the LHS */
+ /* Free up anything that was tied up by the LHS */
- genDoneAddressable(op2, addrReg, RegSet::KEEP_REG);
+ genDoneAddressable(op2, addrReg, RegSet::KEEP_REG);
- /* The result is where the first operand is sitting */
+ /* The result is where the first operand is sitting */
- genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::FREE_REG);
+ genRecoverRegPair(op1, REG_PAIR_NONE, RegSet::FREE_REG);
- regPair = op1->gtRegPair;
+ regPair = op1->gtRegPair;
- if (ovfl)
- genCheckOverflow(tree);
+ if (ovfl)
+ genCheckOverflow(tree);
- goto DONE;
+ goto DONE;
- case GT_UMOD:
+ case GT_UMOD:
- regPair = genCodeForLongModInt(tree, needReg);
- goto DONE;
+ regPair = genCodeForLongModInt(tree, needReg);
+ goto DONE;
- case GT_MUL:
+ case GT_MUL:
- /* Special case: both operands promoted from int */
+ /* Special case: both operands promoted from int */
- assert(tree->gtIsValid64RsltMul());
+ assert(tree->gtIsValid64RsltMul());
- /* Change to an integer multiply temporarily */
+ /* Change to an integer multiply temporarily */
- tree->gtType = TYP_INT;
+ tree->gtType = TYP_INT;
- noway_assert(op1->gtOper == GT_CAST && op2->gtOper == GT_CAST);
- tree->gtOp.gtOp1 = op1->gtCast.CastOp();
- tree->gtOp.gtOp2 = op2->gtCast.CastOp();
+ noway_assert(op1->gtOper == GT_CAST && op2->gtOper == GT_CAST);
+ tree->gtOp.gtOp1 = op1->gtCast.CastOp();
+ tree->gtOp.gtOp2 = op2->gtCast.CastOp();
- assert(tree->gtFlags & GTF_MUL_64RSLT);
+ assert(tree->gtFlags & GTF_MUL_64RSLT);
#if defined(_TARGET_X86_)
- // imul on x86 requires EDX:EAX
- genComputeReg(tree, (RBM_EAX|RBM_EDX), RegSet::EXACT_REG, RegSet::FREE_REG);
- noway_assert(tree->gtFlags & GTF_REG_VAL);
- noway_assert(tree->gtRegNum == REG_EAX); // Also REG_EDX is setup with hi 32-bits
+ // imul on x86 requires EDX:EAX
+ genComputeReg(tree, (RBM_EAX | RBM_EDX), RegSet::EXACT_REG, RegSet::FREE_REG);
+ noway_assert(tree->gtFlags & GTF_REG_VAL);
+ noway_assert(tree->gtRegNum == REG_EAX); // Also REG_EDX is setup with hi 32-bits
#elif defined(_TARGET_ARM_)
- genComputeReg(tree, needReg, RegSet::ANY_REG, RegSet::FREE_REG);
- noway_assert(tree->gtFlags & GTF_REG_VAL);
+ genComputeReg(tree, needReg, RegSet::ANY_REG, RegSet::FREE_REG);
+ noway_assert(tree->gtFlags & GTF_REG_VAL);
#else
- assert(!"Unsupported target for 64-bit multiply codegen");
+ assert(!"Unsupported target for 64-bit multiply codegen");
#endif
- /* Restore gtType, op1 and op2 from the change above */
+ /* Restore gtType, op1 and op2 from the change above */
- tree->gtType = TYP_LONG;
- tree->gtOp.gtOp1 = op1;
- tree->gtOp.gtOp2 = op2;
+ tree->gtType = TYP_LONG;
+ tree->gtOp.gtOp1 = op1;
+ tree->gtOp.gtOp2 = op2;
#if defined(_TARGET_X86_)
- /* The result is now in EDX:EAX */
- regPair = REG_PAIR_EAXEDX;
+ /* The result is now in EDX:EAX */
+ regPair = REG_PAIR_EAXEDX;
#elif defined(_TARGET_ARM_)
- regPair = tree->gtRegPair;
+ regPair = tree->gtRegPair;
#endif
- goto DONE;
+ goto DONE;
- case GT_LSH: helper = CORINFO_HELP_LLSH; goto SHIFT;
- case GT_RSH: helper = CORINFO_HELP_LRSH; goto SHIFT;
- case GT_RSZ: helper = CORINFO_HELP_LRSZ; goto SHIFT;
+ case GT_LSH:
+ helper = CORINFO_HELP_LLSH;
+ goto SHIFT;
+ case GT_RSH:
+ helper = CORINFO_HELP_LRSH;
+ goto SHIFT;
+ case GT_RSZ:
+ helper = CORINFO_HELP_LRSZ;
+ goto SHIFT;
- SHIFT:
+ SHIFT:
- noway_assert(op1->gtType == TYP_LONG);
- noway_assert(genActualType(op2->gtType) == TYP_INT);
+ noway_assert(op1->gtType == TYP_LONG);
+ noway_assert(genActualType(op2->gtType) == TYP_INT);
- /* Is the second operand a constant? */
+ /* Is the second operand a constant? */
- if (op2->gtOper == GT_CNS_INT)
- {
- unsigned int count = op2->gtIntCon.gtIconVal;
+ if (op2->gtOper == GT_CNS_INT)
+ {
+ unsigned int count = op2->gtIntCon.gtIconVal;
- /* Compute the left operand into a free register pair */
+ /* Compute the left operand into a free register pair */
- genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::FREE_REG);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genCompIntoFreeRegPair(op1, avoidReg | op2->gtRsvdRegs, RegSet::FREE_REG);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- regPair = op1->gtRegPair;
- regLo = genRegPairLo(regPair);
- regHi = genRegPairHi(regPair);
+ regPair = op1->gtRegPair;
+ regLo = genRegPairLo(regPair);
+ regHi = genRegPairHi(regPair);
- /* Assume the value in the register pair is trashed. In some cases, though,
- a register might be set to zero, and we can use that information to improve
- some code generation.
- */
+ /* Assume the value in the register pair is trashed. In some cases, though,
+ a register might be set to zero, and we can use that information to improve
+ some code generation.
+ */
- regTracker.rsTrackRegTrash(regLo);
- regTracker.rsTrackRegTrash(regHi);
+ regTracker.rsTrackRegTrash(regLo);
+ regTracker.rsTrackRegTrash(regHi);
- /* Generate the appropriate shift instructions */
+ /* Generate the appropriate shift instructions */
- switch (oper)
- {
- case GT_LSH:
- if (count == 0)
- {
- // regHi, regLo are correct
- }
- else if (count < 32)
+ switch (oper)
{
+ case GT_LSH:
+ if (count == 0)
+ {
+ // regHi, regLo are correct
+ }
+ else if (count < 32)
+ {
#if defined(_TARGET_XARCH_)
- inst_RV_RV_IV(INS_shld, EA_4BYTE, regHi, regLo, count);
+ inst_RV_RV_IV(INS_shld, EA_4BYTE, regHi, regLo, count);
#elif defined(_TARGET_ARM_)
- inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, count);
- getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regHi, regHi, regLo, 32 - count, INS_FLAGS_DONT_CARE, INS_OPTS_LSR);
-#else // _TARGET_*
- NYI("INS_shld");
+ inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, count);
+ getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regHi, regHi, regLo, 32 - count,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LSR);
+#else // _TARGET_*
+ NYI("INS_shld");
#endif // _TARGET_*
- inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regLo, count);
- }
- else // count >= 32
- {
- assert(count >= 32);
- if (count < 64)
- {
-#if defined(_TARGET_ARM_)
- if (count == 32)
- {
- // mov low dword into high dword (i.e. shift left by 32-bits)
- inst_RV_RV(INS_mov, regHi, regLo);
+ inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regLo, count);
}
- else
+ else // count >= 32
{
- assert(count > 32 && count < 64);
- getEmitter()->emitIns_R_R_I(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, regLo, count - 32);
+ assert(count >= 32);
+ if (count < 64)
+ {
+#if defined(_TARGET_ARM_)
+ if (count == 32)
+ {
+ // mov low dword into high dword (i.e. shift left by 32-bits)
+ inst_RV_RV(INS_mov, regHi, regLo);
+ }
+ else
+ {
+ assert(count > 32 && count < 64);
+ getEmitter()->emitIns_R_R_I(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, regLo,
+ count - 32);
+ }
+#else // _TARGET_*
+ // mov low dword into high dword (i.e. shift left by 32-bits)
+ inst_RV_RV(INS_mov, regHi, regLo);
+ if (count > 32)
+ {
+ // Shift high dword left by count - 32
+ inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, count - 32);
+ }
+#endif // _TARGET_*
+ }
+ else // count >= 64
+ {
+ assert(count >= 64);
+ genSetRegToIcon(regHi, 0);
+ }
+ genSetRegToIcon(regLo, 0);
}
-#else // _TARGET_*
- // mov low dword into high dword (i.e. shift left by 32-bits)
- inst_RV_RV(INS_mov, regHi, regLo);
- if (count > 32)
+ break;
+
+ case GT_RSH:
+ if (count == 0)
{
- // Shift high dword left by count - 32
- inst_RV_SH(INS_SHIFT_LEFT_LOGICAL, EA_4BYTE, regHi, count - 32);
+ // regHi, regLo are correct
}
-#endif // _TARGET_*
- }
- else // count >= 64
- {
- assert(count >= 64);
- genSetRegToIcon(regHi, 0);
- }
- genSetRegToIcon(regLo, 0);
- }
- break;
-
- case GT_RSH:
- if (count == 0)
- {
- // regHi, regLo are correct
- }
- else if (count < 32)
- {
+ else if (count < 32)
+ {
#if defined(_TARGET_XARCH_)
- inst_RV_RV_IV(INS_shrd, EA_4BYTE, regLo, regHi, count);
+ inst_RV_RV_IV(INS_shrd, EA_4BYTE, regLo, regHi, count);
#elif defined(_TARGET_ARM_)
- inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count);
- getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regLo, regLo, regHi, 32 - count, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
-#else // _TARGET_*
- NYI("INS_shrd");
+ inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count);
+ getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regLo, regLo, regHi, 32 - count,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
+#else // _TARGET_*
+ NYI("INS_shrd");
#endif // _TARGET_*
- inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, count);
- }
- else // count >= 32
- {
- assert(count >= 32);
- if (count < 64)
- {
-#if defined(_TARGET_ARM_)
- if (count == 32)
- {
- // mov high dword into low dword (i.e. shift right by 32-bits)
- inst_RV_RV(INS_mov, regLo, regHi);
+ inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, count);
}
- else
+ else // count >= 32
{
- assert(count > 32 && count < 64);
- getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regLo, regHi, count - 32);
- }
-#else // _TARGET_*
- // mov high dword into low dword (i.e. shift right by 32-bits)
- inst_RV_RV(INS_mov, regLo, regHi);
- if (count > 32)
- {
- // Shift low dword right by count - 32
- inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regLo, count - 32);
- }
+ assert(count >= 32);
+ if (count < 64)
+ {
+#if defined(_TARGET_ARM_)
+ if (count == 32)
+ {
+ // mov high dword into low dword (i.e. shift right by 32-bits)
+ inst_RV_RV(INS_mov, regLo, regHi);
+ }
+ else
+ {
+ assert(count > 32 && count < 64);
+ getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regLo, regHi,
+ count - 32);
+ }
+#else // _TARGET_*
+ // mov high dword into low dword (i.e. shift right by 32-bits)
+ inst_RV_RV(INS_mov, regLo, regHi);
+ if (count > 32)
+ {
+ // Shift low dword right by count - 32
+ inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regLo, count - 32);
+ }
#endif // _TARGET_*
- }
+ }
- // Propagate sign bit in high dword
- inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, 31);
+ // Propagate sign bit in high dword
+ inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, 31);
- if (count >= 64)
- {
- // Propagate the sign from the high dword
- inst_RV_RV(INS_mov, regLo, regHi, TYP_INT);
- }
- }
- break;
+ if (count >= 64)
+ {
+ // Propagate the sign from the high dword
+ inst_RV_RV(INS_mov, regLo, regHi, TYP_INT);
+ }
+ }
+ break;
- case GT_RSZ:
- if (count == 0)
- {
- // regHi, regLo are correct
- }
- else if (count < 32)
- {
-#if defined(_TARGET_XARCH_)
- inst_RV_RV_IV(INS_shrd, EA_4BYTE, regLo, regHi, count);
-#elif defined(_TARGET_ARM_)
- inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count);
- getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regLo, regLo, regHi, 32 - count, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
-#else // _TARGET_*
- NYI("INS_shrd");
-#endif // _TARGET_*
- inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regHi, count);
- }
- else // count >= 32
- {
- assert(count >= 32);
- if (count < 64)
- {
-#if defined(_TARGET_ARM_)
- if (count == 32)
+ case GT_RSZ:
+ if (count == 0)
{
- // mov high dword into low dword (i.e. shift right by 32-bits)
- inst_RV_RV(INS_mov, regLo, regHi);
+ // regHi, regLo are correct
}
- else
+ else if (count < 32)
{
- assert(count > 32 && count < 64);
- getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, regHi, count - 32);
+#if defined(_TARGET_XARCH_)
+ inst_RV_RV_IV(INS_shrd, EA_4BYTE, regLo, regHi, count);
+#elif defined(_TARGET_ARM_)
+ inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count);
+ getEmitter()->emitIns_R_R_R_I(INS_OR, EA_4BYTE, regLo, regLo, regHi, 32 - count,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
+#else // _TARGET_*
+ NYI("INS_shrd");
+#endif // _TARGET_*
+ inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regHi, count);
}
-#else // _TARGET_*
- // mov high dword into low dword (i.e. shift right by 32-bits)
- inst_RV_RV(INS_mov, regLo, regHi);
- if (count > 32)
+ else // count >= 32
{
- // Shift low dword right by count - 32
- inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count - 32);
- }
+ assert(count >= 32);
+ if (count < 64)
+ {
+#if defined(_TARGET_ARM_)
+ if (count == 32)
+ {
+ // mov high dword into low dword (i.e. shift right by 32-bits)
+ inst_RV_RV(INS_mov, regLo, regHi);
+ }
+ else
+ {
+ assert(count > 32 && count < 64);
+ getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, regHi,
+ count - 32);
+ }
+#else // _TARGET_*
+ // mov high dword into low dword (i.e. shift right by 32-bits)
+ inst_RV_RV(INS_mov, regLo, regHi);
+ if (count > 32)
+ {
+ // Shift low dword right by count - 32
+ inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, regLo, count - 32);
+ }
#endif // _TARGET_*
- }
- else // count >= 64
- {
- assert(count >= 64);
- genSetRegToIcon(regLo, 0);
- }
- genSetRegToIcon(regHi, 0);
+ }
+ else // count >= 64
+ {
+ assert(count >= 64);
+ genSetRegToIcon(regLo, 0);
+ }
+ genSetRegToIcon(regHi, 0);
+ }
+ break;
+
+ default:
+ noway_assert(!"Illegal oper for long shift");
+ break;
}
- break;
- default:
- noway_assert(!"Illegal oper for long shift");
- break;
+ goto DONE_SHF;
}
- goto DONE_SHF;
- }
-
- /* Which operand are we supposed to compute first? */
+ /* Which operand are we supposed to compute first? */
- assert((RBM_SHIFT_LNG & RBM_LNGARG_0) == 0);
+ assert((RBM_SHIFT_LNG & RBM_LNGARG_0) == 0);
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- /* The second operand can't be a constant */
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ /* The second operand can't be a constant */
- noway_assert(op2->gtOper != GT_CNS_INT);
+ noway_assert(op2->gtOper != GT_CNS_INT);
- /* Load the shift count, hopefully into RBM_SHIFT */
- RegSet::ExactReg exactReg;
- if ((RBM_SHIFT_LNG & op1->gtRsvdRegs) == 0)
- exactReg = RegSet::EXACT_REG;
- else
- exactReg = RegSet::ANY_REG;
- genComputeReg(op2, RBM_SHIFT_LNG, exactReg, RegSet::KEEP_REG);
+ /* Load the shift count, hopefully into RBM_SHIFT */
+ RegSet::ExactReg exactReg;
+ if ((RBM_SHIFT_LNG & op1->gtRsvdRegs) == 0)
+ exactReg = RegSet::EXACT_REG;
+ else
+ exactReg = RegSet::ANY_REG;
+ genComputeReg(op2, RBM_SHIFT_LNG, exactReg, RegSet::KEEP_REG);
- /* Compute the left operand into REG_LNGARG_0 */
+ /* Compute the left operand into REG_LNGARG_0 */
- genComputeRegPair(op1, REG_LNGARG_0, avoidReg, RegSet::KEEP_REG, false);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genComputeRegPair(op1, REG_LNGARG_0, avoidReg, RegSet::KEEP_REG, false);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- /* Lock op1 so that it doesn't get trashed */
+ /* Lock op1 so that it doesn't get trashed */
- regSet.rsLockUsedReg(RBM_LNGARG_0);
+ regSet.rsLockUsedReg(RBM_LNGARG_0);
- /* Make sure the shift count wasn't displaced */
+ /* Make sure the shift count wasn't displaced */
- genRecoverReg(op2, RBM_SHIFT_LNG, RegSet::KEEP_REG);
+ genRecoverReg(op2, RBM_SHIFT_LNG, RegSet::KEEP_REG);
- /* Lock op2 */
+ /* Lock op2 */
- regSet.rsLockUsedReg(RBM_SHIFT_LNG);
- }
- else
- {
- /* Compute the left operand into REG_LNGARG_0 */
+ regSet.rsLockUsedReg(RBM_SHIFT_LNG);
+ }
+ else
+ {
+ /* Compute the left operand into REG_LNGARG_0 */
- genComputeRegPair(op1, REG_LNGARG_0, avoidReg, RegSet::KEEP_REG, false);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genComputeRegPair(op1, REG_LNGARG_0, avoidReg, RegSet::KEEP_REG, false);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- /* Compute the shift count into RBM_SHIFT */
+ /* Compute the shift count into RBM_SHIFT */
- genComputeReg(op2, RBM_SHIFT_LNG, RegSet::EXACT_REG, RegSet::KEEP_REG);
+ genComputeReg(op2, RBM_SHIFT_LNG, RegSet::EXACT_REG, RegSet::KEEP_REG);
- /* Lock op2 */
+ /* Lock op2 */
- regSet.rsLockUsedReg(RBM_SHIFT_LNG);
+ regSet.rsLockUsedReg(RBM_SHIFT_LNG);
- /* Make sure the value hasn't been displaced */
+ /* Make sure the value hasn't been displaced */
- genRecoverRegPair(op1, REG_LNGARG_0, RegSet::KEEP_REG);
+ genRecoverRegPair(op1, REG_LNGARG_0, RegSet::KEEP_REG);
- /* Lock op1 so that it doesn't get trashed */
+ /* Lock op1 so that it doesn't get trashed */
- regSet.rsLockUsedReg(RBM_LNGARG_0);
- }
+ regSet.rsLockUsedReg(RBM_LNGARG_0);
+ }
#ifndef _TARGET_X86_
- /* The generic helper is a C-routine and so it follows the full ABI */
- {
- /* Spill any callee-saved registers which are being used */
- regMaskTP spillRegs = RBM_CALLEE_TRASH & regSet.rsMaskUsed;
+ /* The generic helper is a C-routine and so it follows the full ABI */
+ {
+ /* Spill any callee-saved registers which are being used */
+ regMaskTP spillRegs = RBM_CALLEE_TRASH & regSet.rsMaskUsed;
- /* But do not spill our argument registers. */
- spillRegs &= ~(RBM_LNGARG_0 | RBM_SHIFT_LNG);
+ /* But do not spill our argument registers. */
+ spillRegs &= ~(RBM_LNGARG_0 | RBM_SHIFT_LNG);
- if (spillRegs)
- {
- regSet.rsSpillRegs(spillRegs);
+ if (spillRegs)
+ {
+ regSet.rsSpillRegs(spillRegs);
+ }
}
- }
#endif // !_TARGET_X86_
- /* Perform the shift by calling a helper function */
+ /* Perform the shift by calling a helper function */
- noway_assert(op1->gtRegPair == REG_LNGARG_0);
- noway_assert(op2->gtRegNum == REG_SHIFT_LNG);
- noway_assert((regSet.rsMaskLock & (RBM_LNGARG_0 | RBM_SHIFT_LNG)) == (RBM_LNGARG_0 | RBM_SHIFT_LNG));
+ noway_assert(op1->gtRegPair == REG_LNGARG_0);
+ noway_assert(op2->gtRegNum == REG_SHIFT_LNG);
+ noway_assert((regSet.rsMaskLock & (RBM_LNGARG_0 | RBM_SHIFT_LNG)) == (RBM_LNGARG_0 | RBM_SHIFT_LNG));
- genEmitHelperCall(helper,
- 0, // argSize
- EA_8BYTE); // retSize
+ genEmitHelperCall(helper,
+ 0, // argSize
+ EA_8BYTE); // retSize
#ifdef _TARGET_X86_
- /* The value in the register pair is trashed */
+ /* The value in the register pair is trashed */
- regTracker.rsTrackRegTrash(genRegPairLo(REG_LNGARG_0));
- regTracker.rsTrackRegTrash(genRegPairHi(REG_LNGARG_0));
-#else // _TARGET_X86_
- /* The generic helper is a C-routine and so it follows the full ABI */
- regTracker.rsTrackRegMaskTrash(RBM_CALLEE_TRASH);
+ regTracker.rsTrackRegTrash(genRegPairLo(REG_LNGARG_0));
+ regTracker.rsTrackRegTrash(genRegPairHi(REG_LNGARG_0));
+#else // _TARGET_X86_
+ /* The generic helper is a C-routine and so it follows the full ABI */
+ regTracker.rsTrackRegMaskTrash(RBM_CALLEE_TRASH);
#endif // _TARGET_X86_
- /* Release both operands */
+ /* Release both operands */
- regSet.rsUnlockUsedReg(RBM_LNGARG_0 | RBM_SHIFT_LNG);
- genReleaseRegPair(op1);
- genReleaseReg (op2);
+ regSet.rsUnlockUsedReg(RBM_LNGARG_0 | RBM_SHIFT_LNG);
+ genReleaseRegPair(op1);
+ genReleaseReg(op2);
- DONE_SHF:
+ DONE_SHF:
- noway_assert(op1->gtFlags & GTF_REG_VAL);
- regPair = op1->gtRegPair;
- goto DONE;
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
+ regPair = op1->gtRegPair;
+ goto DONE;
- case GT_NEG:
- case GT_NOT:
+ case GT_NEG:
+ case GT_NOT:
- /* Generate the operand into some register pair */
+ /* Generate the operand into some register pair */
- genCompIntoFreeRegPair(op1, avoidReg, RegSet::FREE_REG);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genCompIntoFreeRegPair(op1, avoidReg, RegSet::FREE_REG);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- regPair = op1->gtRegPair;
+ regPair = op1->gtRegPair;
- /* Figure out which registers the value is in */
+ /* Figure out which registers the value is in */
- regLo = genRegPairLo(regPair);
- regHi = genRegPairHi(regPair);
+ regLo = genRegPairLo(regPair);
+ regHi = genRegPairHi(regPair);
- /* The value in the register pair is about to be trashed */
+ /* The value in the register pair is about to be trashed */
- regTracker.rsTrackRegTrash(regLo);
- regTracker.rsTrackRegTrash(regHi);
+ regTracker.rsTrackRegTrash(regLo);
+ regTracker.rsTrackRegTrash(regHi);
- /* Unary "neg": negate the value in the register pair */
- if (oper == GT_NEG)
- {
+ /* Unary "neg": negate the value in the register pair */
+ if (oper == GT_NEG)
+ {
#ifdef _TARGET_ARM_
- // ARM doesn't have an opcode that sets the carry bit like
- // x86, so we can't use neg/addc/neg. Instead we use subtract
- // with carry. Too bad this uses an extra register.
+ // ARM doesn't have an opcode that sets the carry bit like
+ // x86, so we can't use neg/addc/neg. Instead we use subtract
+ // with carry. Too bad this uses an extra register.
- // Lock regLo and regHi so we don't pick them, and then pick
- // a third register to be our 0.
- regMaskTP regPairMask = genRegMask(regLo) | genRegMask(regHi);
- regSet.rsLockReg(regPairMask);
- regMaskTP regBest = RBM_ALLINT & ~avoidReg;
- regNumber regZero = genGetRegSetToIcon(0, regBest);
- regSet.rsUnlockReg(regPairMask);
+ // Lock regLo and regHi so we don't pick them, and then pick
+ // a third register to be our 0.
+ regMaskTP regPairMask = genRegMask(regLo) | genRegMask(regHi);
+ regSet.rsLockReg(regPairMask);
+ regMaskTP regBest = RBM_ALLINT & ~avoidReg;
+ regNumber regZero = genGetRegSetToIcon(0, regBest);
+ regSet.rsUnlockReg(regPairMask);
- inst_RV_IV(INS_rsb, regLo, 0, EA_4BYTE, INS_FLAGS_SET);
- getEmitter()->emitIns_R_R_R_I(INS_sbc, EA_4BYTE, regHi, regZero, regHi, 0);
+ inst_RV_IV(INS_rsb, regLo, 0, EA_4BYTE, INS_FLAGS_SET);
+ getEmitter()->emitIns_R_R_R_I(INS_sbc, EA_4BYTE, regHi, regZero, regHi, 0);
#elif defined(_TARGET_XARCH_)
- inst_RV (INS_NEG, regLo, TYP_LONG);
- inst_RV_IV(INS_ADDC, regHi, 0, emitActualTypeSize(TYP_LONG));
- inst_RV (INS_NEG, regHi, TYP_LONG);
+ inst_RV(INS_NEG, regLo, TYP_LONG);
+ inst_RV_IV(INS_ADDC, regHi, 0, emitActualTypeSize(TYP_LONG));
+ inst_RV(INS_NEG, regHi, TYP_LONG);
#else
- NYI("GT_NEG on TYP_LONG");
+ NYI("GT_NEG on TYP_LONG");
#endif
- }
- else
- {
- /* Unary "not": flip all the bits in the register pair */
+ }
+ else
+ {
+ /* Unary "not": flip all the bits in the register pair */
- inst_RV (INS_NOT, regLo, TYP_LONG);
- inst_RV (INS_NOT, regHi, TYP_LONG);
- }
+ inst_RV(INS_NOT, regLo, TYP_LONG);
+ inst_RV(INS_NOT, regHi, TYP_LONG);
+ }
- goto DONE;
+ goto DONE;
#if LONG_ASG_OPS
- case GT_ASG_OR : insLo = insHi = INS_OR ; goto ASG_OPR;
- case GT_ASG_XOR: insLo = insHi = INS_XOR; goto ASG_OPR;
- case GT_ASG_AND: insLo = insHi = INS_AND; goto ASG_OPR;
- case GT_ASG_SUB: insLo = INS_sub; insHi = INS_SUBC; goto ASG_OPR;
- case GT_ASG_ADD: insLo = INS_add; insHi = INS_ADDC; goto ASG_OPR;
-
- ASG_OPR:
-
- if (op2->gtOper == GT_CNS_LNG)
- {
- __int64 lval = op2->gtLngCon.gtLconVal;
+ case GT_ASG_OR:
+ insLo = insHi = INS_OR;
+ goto ASG_OPR;
+ case GT_ASG_XOR:
+ insLo = insHi = INS_XOR;
+ goto ASG_OPR;
+ case GT_ASG_AND:
+ insLo = insHi = INS_AND;
+ goto ASG_OPR;
+ case GT_ASG_SUB:
+ insLo = INS_sub;
+ insHi = INS_SUBC;
+ goto ASG_OPR;
+ case GT_ASG_ADD:
+ insLo = INS_add;
+ insHi = INS_ADDC;
+ goto ASG_OPR;
+
+ ASG_OPR:
+
+ if (op2->gtOper == GT_CNS_LNG)
+ {
+ __int64 lval = op2->gtLngCon.gtLconVal;
- /* Make the target addressable */
+ /* Make the target addressable */
- addrReg = genMakeAddressable(op1, needReg, RegSet::FREE_REG);
+ addrReg = genMakeAddressable(op1, needReg, RegSet::FREE_REG);
- /* Optimize some special cases */
+ /* Optimize some special cases */
- doLo =
- doHi = true;
+ doLo = doHi = true;
- /* Check for "(op1 AND -1)" and "(op1 [X]OR 0)" */
+ /* Check for "(op1 AND -1)" and "(op1 [X]OR 0)" */
- switch (oper)
- {
- case GT_ASG_AND:
- if ((int)(lval ) == -1) doLo = false;
- if ((int)(lval >> 32) == -1) doHi = false;
- break;
+ switch (oper)
+ {
+ case GT_ASG_AND:
+ if ((int)(lval) == -1)
+ doLo = false;
+ if ((int)(lval >> 32) == -1)
+ doHi = false;
+ break;
- case GT_ASG_OR:
- case GT_ASG_XOR:
- if (!(lval & 0x00000000FFFFFFFF)) doLo = false;
- if (!(lval & 0xFFFFFFFF00000000)) doHi = false;
- break;
- }
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ if (!(lval & 0x00000000FFFFFFFF))
+ doLo = false;
+ if (!(lval & 0xFFFFFFFF00000000))
+ doHi = false;
+ break;
+ }
- if (doLo) inst_TT_IV(insLo, op1, (int)(lval ), 0);
- if (doHi) inst_TT_IV(insHi, op1, (int)(lval >> 32), 4);
+ if (doLo)
+ inst_TT_IV(insLo, op1, (int)(lval), 0);
+ if (doHi)
+ inst_TT_IV(insHi, op1, (int)(lval >> 32), 4);
- bool isArith = (oper == GT_ASG_ADD || oper == GT_ASG_SUB);
- if (doLo || doHi)
- tree->gtFlags |= GTF_ZSF_SET;
+ bool isArith = (oper == GT_ASG_ADD || oper == GT_ASG_SUB);
+ if (doLo || doHi)
+ tree->gtFlags |= GTF_ZSF_SET;
- genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
- goto DONE_ASSG_REGS;
- }
+ genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
+ goto DONE_ASSG_REGS;
+ }
- /* TODO: allow non-const long assignment operators */
+ /* TODO: allow non-const long assignment operators */
- noway_assert(!"non-const long asgop NYI");
+ noway_assert(!"non-const long asgop NYI");
#endif // LONG_ASG_OPS
- case GT_IND:
- case GT_NULLCHECK:
+ case GT_IND:
+ case GT_NULLCHECK:
{
- regMaskTP tmpMask;
- int hiFirst;
-
- regMaskTP availMask = RBM_ALLINT & ~needReg;
+ regMaskTP tmpMask;
+ int hiFirst;
+
+ regMaskTP availMask = RBM_ALLINT & ~needReg;
/* Make sure the operand is addressable */
@@ -14881,11 +14637,11 @@ SIMPLE_OR_LONG:
hiFirst = FALSE;
- if (tmpMask & addrReg)
+ if (tmpMask & addrReg)
{
/* Does one or both of the target registers overlap? */
- if ((tmpMask & addrReg) != tmpMask)
+ if ((tmpMask & addrReg) != tmpMask)
{
/* Only one register overlaps */
@@ -14893,12 +14649,12 @@ SIMPLE_OR_LONG:
/* If the low register overlaps, load the upper half first */
- if (addrReg & genRegMask(genRegPairLo(regPair)))
+ if (addrReg & genRegMask(genRegPairLo(regPair)))
hiFirst = TRUE;
}
else
{
- regMaskTP regFree;
+ regMaskTP regFree;
/* The register completely overlaps with the address */
@@ -14907,12 +14663,12 @@ SIMPLE_OR_LONG:
/* Can we pick another pair easily? */
regFree = regSet.rsRegMaskFree() & ~addrReg;
- if (needReg)
+ if (needReg)
regFree &= needReg;
/* More than one free register available? */
- if (regFree && !genMaxOneBit(regFree))
+ if (regFree && !genMaxOneBit(regFree))
{
regPair = regSet.rsPickRegPair(regFree);
tmpMask = genRegPairMask(regPair);
@@ -14926,12 +14682,12 @@ SIMPLE_OR_LONG:
// Grab one fresh reg, and use any one of addrReg
- if (regFree) // Try to follow 'needReg'
+ if (regFree) // Try to follow 'needReg'
regLo = regSet.rsGrabReg(regFree);
- else // Pick any reg besides addrReg
+ else // Pick any reg besides addrReg
regLo = regSet.rsGrabReg(RBM_ALLINT & ~addrReg);
- unsigned regBit = 0x1;
+ unsigned regBit = 0x1;
regNumber regNo;
for (regNo = REG_INT_FIRST; regNo <= REG_INT_LAST; regNo = REG_NEXT(regNo), regBit <<= 1)
@@ -14964,7 +14720,7 @@ SIMPLE_OR_LONG:
/* Load the target registers from where the value is */
- if (hiFirst)
+ if (hiFirst)
{
inst_RV_AT(ins_Load(TYP_INT), EA_4BYTE, TYP_INT, regHi, addr, 4);
regSet.rsLockReg(genRegMask(regHi));
@@ -14982,177 +14738,175 @@ SIMPLE_OR_LONG:
#ifdef _TARGET_ARM_
if (tree->gtFlags & GTF_IND_VOLATILE)
{
- // Emit a memory barrier instruction after the load
+ // Emit a memory barrier instruction after the load
instGen_MemoryBarrier();
}
#endif
genUpdateLife(tree);
genDoneAddressable(tree, addrReg, RegSet::FREE_REG);
-
}
- goto DONE;
+ goto DONE;
- case GT_CAST:
+ case GT_CAST:
- /* What are we casting from? */
+ /* What are we casting from? */
- switch (op1->gtType)
- {
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_CHAR:
- case TYP_SHORT:
- case TYP_INT:
- case TYP_UBYTE:
- case TYP_BYREF:
+ switch (op1->gtType)
{
- regMaskTP hiRegMask;
- regMaskTP loRegMask;
-
- // For an unsigned cast we don't need to sign-extend the 32 bit value
- if (tree->gtFlags & GTF_UNSIGNED)
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_CHAR:
+ case TYP_SHORT:
+ case TYP_INT:
+ case TYP_UBYTE:
+ case TYP_BYREF:
{
- // Does needReg have exactly two bits on and thus
- // specifies the exact register pair that we want to use
- if (!genMaxOneBit(needReg))
- {
- regPair = regSet.rsFindRegPairNo(needReg);
- if (needReg != genRegPairMask(regPair))
- goto ANY_FREE_REG_UNSIGNED;
- loRegMask = genRegMask(genRegPairLo(regPair));
- if ((loRegMask & regSet.rsRegMaskCanGrab()) == 0)
- goto ANY_FREE_REG_UNSIGNED;
- hiRegMask = genRegMask(genRegPairHi(regPair));
- }
- else
+ regMaskTP hiRegMask;
+ regMaskTP loRegMask;
+
+ // For an unsigned cast we don't need to sign-extend the 32 bit value
+ if (tree->gtFlags & GTF_UNSIGNED)
{
-ANY_FREE_REG_UNSIGNED:
- loRegMask = needReg;
- hiRegMask = needReg;
- }
+ // Does needReg have exactly two bits on and thus
+ // specifies the exact register pair that we want to use
+ if (!genMaxOneBit(needReg))
+ {
+ regPair = regSet.rsFindRegPairNo(needReg);
+ if (needReg != genRegPairMask(regPair))
+ goto ANY_FREE_REG_UNSIGNED;
+ loRegMask = genRegMask(genRegPairLo(regPair));
+ if ((loRegMask & regSet.rsRegMaskCanGrab()) == 0)
+ goto ANY_FREE_REG_UNSIGNED;
+ hiRegMask = genRegMask(genRegPairHi(regPair));
+ }
+ else
+ {
+ ANY_FREE_REG_UNSIGNED:
+ loRegMask = needReg;
+ hiRegMask = needReg;
+ }
- genComputeReg(op1, loRegMask, RegSet::ANY_REG, RegSet::KEEP_REG);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ genComputeReg(op1, loRegMask, RegSet::ANY_REG, RegSet::KEEP_REG);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- regLo = op1->gtRegNum;
- loRegMask = genRegMask(regLo);
- regSet.rsLockUsedReg(loRegMask);
- regHi = regSet.rsPickReg(hiRegMask);
- regSet.rsUnlockUsedReg(loRegMask);
+ regLo = op1->gtRegNum;
+ loRegMask = genRegMask(regLo);
+ regSet.rsLockUsedReg(loRegMask);
+ regHi = regSet.rsPickReg(hiRegMask);
+ regSet.rsUnlockUsedReg(loRegMask);
- regPair = gen2regs2pair(regLo, regHi);
+ regPair = gen2regs2pair(regLo, regHi);
- // Move 0 to the higher word of the ULong
- genSetRegToIcon(regHi, 0, TYP_INT);
+ // Move 0 to the higher word of the ULong
+ genSetRegToIcon(regHi, 0, TYP_INT);
- /* We can now free up the operand */
- genReleaseReg(op1);
+ /* We can now free up the operand */
+ genReleaseReg(op1);
- goto DONE;
- }
+ goto DONE;
+ }
#ifdef _TARGET_XARCH_
- /* Cast of 'int' to 'long' --> Use cdq if EAX,EDX are available
- and we need the result to be in those registers.
- cdq is smaller so we use it for SMALL_CODE
- */
-
- if ((needReg & (RBM_EAX|RBM_EDX)) == (RBM_EAX|RBM_EDX) &&
- (regSet.rsRegMaskFree() & RBM_EDX) )
- {
- genCodeForTree(op1, RBM_EAX);
- regSet.rsMarkRegUsed(op1);
-
- /* If we have to spill EDX, might as well use the faster
- sar as the spill will increase code size anyway */
+ /* Cast of 'int' to 'long' --> Use cdq if EAX,EDX are available
+ and we need the result to be in those registers.
+ cdq is smaller so we use it for SMALL_CODE
+ */
- if (op1->gtRegNum != REG_EAX ||
- !(regSet.rsRegMaskFree() & RBM_EDX))
+ if ((needReg & (RBM_EAX | RBM_EDX)) == (RBM_EAX | RBM_EDX) &&
+ (regSet.rsRegMaskFree() & RBM_EDX))
{
- hiRegMask = regSet.rsRegMaskFree();
- goto USE_SAR_FOR_CAST;
- }
+ genCodeForTree(op1, RBM_EAX);
+ regSet.rsMarkRegUsed(op1);
- regSet.rsGrabReg (RBM_EDX);
- regTracker.rsTrackRegTrash(REG_EDX);
+ /* If we have to spill EDX, might as well use the faster
+ sar as the spill will increase code size anyway */
- /* Convert the int in EAX into a long in EDX:EAX */
+ if (op1->gtRegNum != REG_EAX || !(regSet.rsRegMaskFree() & RBM_EDX))
+ {
+ hiRegMask = regSet.rsRegMaskFree();
+ goto USE_SAR_FOR_CAST;
+ }
- instGen(INS_cdq);
+ regSet.rsGrabReg(RBM_EDX);
+ regTracker.rsTrackRegTrash(REG_EDX);
- /* The result is in EDX:EAX */
+ /* Convert the int in EAX into a long in EDX:EAX */
- regPair = REG_PAIR_EAXEDX;
- }
- else
-#endif
- {
- /* use the sar instruction to sign-extend a 32-bit integer */
+ instGen(INS_cdq);
- // Does needReg have exactly two bits on and thus
- // specifies the exact register pair that we want to use
- if (!genMaxOneBit(needReg))
- {
- regPair = regSet.rsFindRegPairNo(needReg);
- if ((regPair == REG_PAIR_NONE) || (needReg != genRegPairMask(regPair)))
- goto ANY_FREE_REG_SIGNED;
- loRegMask = genRegMask(genRegPairLo(regPair));
- if ((loRegMask & regSet.rsRegMaskCanGrab()) == 0)
- goto ANY_FREE_REG_SIGNED;
- hiRegMask = genRegMask(genRegPairHi(regPair));
+ /* The result is in EDX:EAX */
+
+ regPair = REG_PAIR_EAXEDX;
}
else
+#endif
{
-ANY_FREE_REG_SIGNED:
- loRegMask = needReg;
- hiRegMask = RBM_NONE;
- }
+ /* use the sar instruction to sign-extend a 32-bit integer */
- genComputeReg(op1, loRegMask, RegSet::ANY_REG, RegSet::KEEP_REG);
+ // Does needReg have exactly two bits on and thus
+ // specifies the exact register pair that we want to use
+ if (!genMaxOneBit(needReg))
+ {
+ regPair = regSet.rsFindRegPairNo(needReg);
+ if ((regPair == REG_PAIR_NONE) || (needReg != genRegPairMask(regPair)))
+ goto ANY_FREE_REG_SIGNED;
+ loRegMask = genRegMask(genRegPairLo(regPair));
+ if ((loRegMask & regSet.rsRegMaskCanGrab()) == 0)
+ goto ANY_FREE_REG_SIGNED;
+ hiRegMask = genRegMask(genRegPairHi(regPair));
+ }
+ else
+ {
+ ANY_FREE_REG_SIGNED:
+ loRegMask = needReg;
+ hiRegMask = RBM_NONE;
+ }
+
+ genComputeReg(op1, loRegMask, RegSet::ANY_REG, RegSet::KEEP_REG);
#ifdef _TARGET_XARCH_
-USE_SAR_FOR_CAST:
+ USE_SAR_FOR_CAST:
#endif
- noway_assert(op1->gtFlags & GTF_REG_VAL);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
- regLo = op1->gtRegNum;
- loRegMask = genRegMask(regLo);
- regSet.rsLockUsedReg(loRegMask);
- regHi = regSet.rsPickReg(hiRegMask);
- regSet.rsUnlockUsedReg(loRegMask);
+ regLo = op1->gtRegNum;
+ loRegMask = genRegMask(regLo);
+ regSet.rsLockUsedReg(loRegMask);
+ regHi = regSet.rsPickReg(hiRegMask);
+ regSet.rsUnlockUsedReg(loRegMask);
- regPair = gen2regs2pair(regLo, regHi);
+ regPair = gen2regs2pair(regLo, regHi);
#ifdef _TARGET_ARM_
- /* Copy the lo32 bits from regLo to regHi and sign-extend it */
- // Use one instruction instead of two
- getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, regLo, 31);
+ /* Copy the lo32 bits from regLo to regHi and sign-extend it */
+ // Use one instruction instead of two
+ getEmitter()->emitIns_R_R_I(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, regLo, 31);
#else
- /* Copy the lo32 bits from regLo to regHi and sign-extend it */
- inst_RV_RV(INS_mov, regHi, regLo, TYP_INT);
- inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, 31);
+ /* Copy the lo32 bits from regLo to regHi and sign-extend it */
+ inst_RV_RV(INS_mov, regHi, regLo, TYP_INT);
+ inst_RV_SH(INS_SHIFT_RIGHT_ARITHM, EA_4BYTE, regHi, 31);
#endif
- /* The value in the upper register is trashed */
+ /* The value in the upper register is trashed */
- regTracker.rsTrackRegTrash(regHi);
- }
+ regTracker.rsTrackRegTrash(regHi);
+ }
- /* We can now free up the operand */
- genReleaseReg(op1);
+ /* We can now free up the operand */
+ genReleaseReg(op1);
- // conv.ovf.u8 could overflow if the original number was negative
- if (tree->gtOverflow() && TYP_ULONG == tree->CastToType())
- {
- regNumber hiReg = genRegPairHi(regPair);
- instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
+ // conv.ovf.u8 could overflow if the original number was negative
+ if (tree->gtOverflow() && TYP_ULONG == tree->CastToType())
+ {
+ regNumber hiReg = genRegPairHi(regPair);
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
+ }
}
- }
- goto DONE;
+ goto DONE;
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#if 0
/* Load the FP value onto the coprocessor stack */
@@ -15192,153 +14946,152 @@ USE_SAR_FOR_CAST:
compiler->tmpRlsTemp(temp);
goto DONE;
#else
- NO_WAY("Cast from TYP_FLOAT or TYP_DOUBLE supposed to be done via a helper call");
- break;
+ NO_WAY("Cast from TYP_FLOAT or TYP_DOUBLE supposed to be done via a helper call");
+ break;
#endif
- case TYP_LONG:
- case TYP_ULONG:
- {
- noway_assert(tree->gtOverflow()); // conv.ovf.u8 or conv.ovf.i8
+ case TYP_LONG:
+ case TYP_ULONG:
+ {
+ noway_assert(tree->gtOverflow()); // conv.ovf.u8 or conv.ovf.i8
- genComputeRegPair(op1, REG_PAIR_NONE, RBM_ALLINT & ~needReg, RegSet::FREE_REG);
- regPair = op1->gtRegPair;
+ genComputeRegPair(op1, REG_PAIR_NONE, RBM_ALLINT & ~needReg, RegSet::FREE_REG);
+ regPair = op1->gtRegPair;
- // Do we need to set the sign-flag, or can we checked if it is set?
- // and not do this "test" if so.
+ // Do we need to set the sign-flag, or can we checked if it is set?
+ // and not do this "test" if so.
- if (op1->gtFlags & GTF_REG_VAL)
- {
- regNumber hiReg = genRegPairHi(op1->gtRegPair);
- noway_assert(hiReg != REG_STK);
- instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
- }
- else
- {
- inst_TT_IV(INS_cmp, op1, 0, sizeof(int));
- }
+ if (op1->gtFlags & GTF_REG_VAL)
+ {
+ regNumber hiReg = genRegPairHi(op1->gtRegPair);
+ noway_assert(hiReg != REG_STK);
+ instGen_Compare_Reg_To_Zero(EA_4BYTE, hiReg); // set flags
+ }
+ else
+ {
+ inst_TT_IV(INS_cmp, op1, 0, sizeof(int));
+ }
- emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
- genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
- }
- goto DONE;
+ emitJumpKind jmpLTS = genJumpKindForOper(GT_LT, CK_SIGNED);
+ genJumpToThrowHlpBlk(jmpLTS, SCK_OVERFLOW);
+ }
+ goto DONE;
- default:
-#ifdef DEBUG
- compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ compiler->gtDispTree(tree);
#endif
- NO_WAY("unexpected cast to long");
- }
- break;
+ NO_WAY("unexpected cast to long");
+ }
+ break;
+ case GT_RETURN:
- case GT_RETURN:
+ /* TODO:
+ * This code is cloned from the regular processing of GT_RETURN values. We have to remember to
+ * call genPInvokeMethodEpilog anywhere that we have a GT_RETURN statement. We should really
+ * generate trees for the PInvoke prolog and epilog so we can remove these special cases.
+ */
- /* TODO:
- * This code is cloned from the regular processing of GT_RETURN values. We have to remember to
- * call genPInvokeMethodEpilog anywhere that we have a GT_RETURN statement. We should really
- * generate trees for the PInvoke prolog and epilog so we can remove these special cases.
- */
+ // TODO: this should be done AFTER we called exit mon so that
+ // we are sure that we don't have to keep 'this' alive
- // TODO: this should be done AFTER we called exit mon so that
- // we are sure that we don't have to keep 'this' alive
-
- if (compiler->info.compCallUnmanaged && (compiler->compCurBB == compiler->genReturnBB))
- {
- /* either it's an "empty" statement or the return statement
- of a synchronized method
- */
+ if (compiler->info.compCallUnmanaged && (compiler->compCurBB == compiler->genReturnBB))
+ {
+ /* either it's an "empty" statement or the return statement
+ of a synchronized method
+ */
- genPInvokeMethodEpilog();
- }
+ genPInvokeMethodEpilog();
+ }
#if CPU_LONG_USES_REGPAIR
- /* There must be a long return value */
+ /* There must be a long return value */
- noway_assert(op1);
+ noway_assert(op1);
- /* Evaluate the return value into EDX:EAX */
+ /* Evaluate the return value into EDX:EAX */
- genEvalIntoFreeRegPair(op1, REG_LNGRET, avoidReg);
+ genEvalIntoFreeRegPair(op1, REG_LNGRET, avoidReg);
- noway_assert(op1->gtFlags & GTF_REG_VAL);
- noway_assert(op1->gtRegPair == REG_LNGRET);
+ noway_assert(op1->gtFlags & GTF_REG_VAL);
+ noway_assert(op1->gtRegPair == REG_LNGRET);
#else
- NYI("64-bit return");
+ NYI("64-bit return");
#endif
#ifdef PROFILING_SUPPORTED
- //The profiling hook does not trash registers, so it's safe to call after we emit the code for
- //the GT_RETURN tree.
+ // The profiling hook does not trash registers, so it's safe to call after we emit the code for
+ // the GT_RETURN tree.
- if (compiler->compCurBB == compiler->genReturnBB)
- {
- genProfilingLeaveCallback();
- }
+ if (compiler->compCurBB == compiler->genReturnBB)
+ {
+ genProfilingLeaveCallback();
+ }
#endif
- return;
+ return;
- case GT_QMARK:
- noway_assert(!"inliner-generated ?: for longs NYI");
- NO_WAY("inliner-generated ?: for longs NYI");
- break;
+ case GT_QMARK:
+ noway_assert(!"inliner-generated ?: for longs NYI");
+ NO_WAY("inliner-generated ?: for longs NYI");
+ break;
- case GT_COMMA:
+ case GT_COMMA:
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- // Generate op2
- genCodeForTreeLng(op2, needReg, avoidReg);
- genUpdateLife (op2);
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ // Generate op2
+ genCodeForTreeLng(op2, needReg, avoidReg);
+ genUpdateLife(op2);
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
- regSet.rsMarkRegPairUsed(op2);
+ regSet.rsMarkRegPairUsed(op2);
- // Do side effects of op1
- genEvalSideEffects(op1);
+ // Do side effects of op1
+ genEvalSideEffects(op1);
- // Recover op2 if spilled
- genRecoverRegPair(op2, REG_PAIR_NONE, RegSet::KEEP_REG);
+ // Recover op2 if spilled
+ genRecoverRegPair(op2, REG_PAIR_NONE, RegSet::KEEP_REG);
- genReleaseRegPair(op2);
+ genReleaseRegPair(op2);
- genUpdateLife (tree);
+ genUpdateLife(tree);
- regPair = op2->gtRegPair;
- }
- else
- {
- noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
+ regPair = op2->gtRegPair;
+ }
+ else
+ {
+ noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
- /* Generate side effects of the first operand */
+ /* Generate side effects of the first operand */
- genEvalSideEffects(op1);
- genUpdateLife (op1);
+ genEvalSideEffects(op1);
+ genUpdateLife(op1);
- /* Is the value of the second operand used? */
+ /* Is the value of the second operand used? */
- if (tree->gtType == TYP_VOID)
- {
- /* The right operand produces no result */
+ if (tree->gtType == TYP_VOID)
+ {
+ /* The right operand produces no result */
- genEvalSideEffects(op2);
- genUpdateLife(tree);
- return;
- }
+ genEvalSideEffects(op2);
+ genUpdateLife(tree);
+ return;
+ }
- /* Generate the second operand, i.e. the 'real' value */
+ /* Generate the second operand, i.e. the 'real' value */
- genCodeForTreeLng(op2, needReg, avoidReg);
+ genCodeForTreeLng(op2, needReg, avoidReg);
- /* The result of 'op2' is also the final result */
+ /* The result of 'op2' is also the final result */
- regPair = op2->gtRegPair;
- }
+ regPair = op2->gtRegPair;
+ }
- goto DONE;
+ goto DONE;
- case GT_BOX:
+ case GT_BOX:
{
/* Generate the operand, i.e. the 'real' value */
@@ -15349,21 +15102,21 @@ USE_SAR_FOR_CAST:
regPair = op1->gtRegPair;
}
- goto DONE;
+ goto DONE;
- case GT_NOP:
- if (op1 == NULL)
- return;
+ case GT_NOP:
+ if (op1 == NULL)
+ return;
- genCodeForTreeLng(op1, needReg, avoidReg);
- regPair = op1->gtRegPair;
- goto DONE;
+ genCodeForTreeLng(op1, needReg, avoidReg);
+ regPair = op1->gtRegPair;
+ goto DONE;
- default:
- break;
+ default:
+ break;
}
-#ifdef DEBUG
+#ifdef DEBUG
compiler->gtDispTree(tree);
#endif
noway_assert(!"unexpected 64-bit operator");
@@ -15371,22 +15124,22 @@ USE_SAR_FOR_CAST:
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
regMaskTP retMask;
- case GT_CALL:
- retMask = genCodeForCall(tree, true);
- if (retMask == RBM_NONE)
- regPair = REG_PAIR_NONE;
- else
- regPair = regSet.rsFindRegPairNo(retMask);
- break;
+ case GT_CALL:
+ retMask = genCodeForCall(tree, true);
+ if (retMask == RBM_NONE)
+ regPair = REG_PAIR_NONE;
+ else
+ regPair = regSet.rsFindRegPairNo(retMask);
+ break;
- default:
-#ifdef DEBUG
- compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ compiler->gtDispTree(tree);
#endif
- NO_WAY("unexpected long operator");
+ NO_WAY("unexpected long operator");
}
DONE:
@@ -15403,33 +15156,30 @@ DONE:
#pragma warning(pop)
#endif
-
/*****************************************************************************
*
* Generate code for a mod of a long by an int.
*/
-regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
- regMaskTP needReg)
+regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree, regMaskTP needReg)
{
#ifdef _TARGET_X86_
- regPairNo regPair;
- regMaskTP addrReg;
+ regPairNo regPair;
+ regMaskTP addrReg;
- genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
/* Codegen only for Unsigned MOD */
noway_assert(oper == GT_UMOD);
/* op2 must be a long constant in the range 2 to 0x3fffffff */
- noway_assert((op2->gtOper == GT_CNS_LNG) &&
- (op2->gtLngCon.gtLconVal >= 2) &&
- (op2->gtLngCon.gtLconVal <= 0x3fffffff));
- int val = (int) op2->gtLngCon.gtLconVal;
+ noway_assert((op2->gtOper == GT_CNS_LNG) && (op2->gtLngCon.gtLconVal >= 2) &&
+ (op2->gtLngCon.gtLconVal <= 0x3fffffff));
+ int val = (int)op2->gtLngCon.gtLconVal;
op2->ChangeOperConst(GT_CNS_INT); // it's effectively an integer constant
@@ -15438,7 +15188,7 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
/* Which operand are we supposed to compute first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* Compute the second operand into a scratch register, other
than EAX or EDX */
@@ -15447,9 +15197,7 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
/* Special case: if op2 is a local var we are done */
- if (op2->gtOper == GT_LCL_VAR ||
- op2->gtOper == GT_LCL_FLD ||
- op2->gtOper == GT_CLS_VAR)
+ if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_LCL_FLD || op2->gtOper == GT_CLS_VAR)
{
addrReg = genMakeRvalueAddressable(op2, needReg, RegSet::KEEP_REG, false);
}
@@ -15486,9 +15234,7 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
/* Special case: if op2 is a local var we are done */
- if (op2->gtOper == GT_LCL_VAR ||
- op2->gtOper == GT_LCL_FLD ||
- op2->gtOper == GT_CLS_VAR)
+ if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_LCL_FLD || op2->gtOper == GT_CLS_VAR)
{
addrReg = genMakeRvalueAddressable(op2, needReg, RegSet::KEEP_REG, false);
}
@@ -15530,7 +15276,7 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
This works because (a * 2^32 + b) % c = ((a % c) * 2^32 + b) % c
*/
- BasicBlock * lab_no_overflow = genCreateTempLabel();
+ BasicBlock* lab_no_overflow = genCreateTempLabel();
// grab a temporary register other than eax, edx, and op2->gtRegNum
@@ -15539,16 +15285,16 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
// EAX and tempReg will be trashed by the mov instructions. Doing
// this early won't hurt, and might prevent confusion in genSetRegToIcon.
- regTracker.rsTrackRegTrash (REG_PAIR_TMP_LO);
- regTracker.rsTrackRegTrash (tempReg);
+ regTracker.rsTrackRegTrash(REG_PAIR_TMP_LO);
+ regTracker.rsTrackRegTrash(tempReg);
inst_RV_RV(INS_cmp, REG_PAIR_TMP_HI, op2->gtRegNum);
- inst_JMP(EJ_jb ,lab_no_overflow);
+ inst_JMP(EJ_jb, lab_no_overflow);
inst_RV_RV(INS_mov, tempReg, REG_PAIR_TMP_LO, TYP_INT);
inst_RV_RV(INS_mov, REG_PAIR_TMP_LO, REG_PAIR_TMP_HI, TYP_INT);
genSetRegToIcon(REG_PAIR_TMP_HI, 0, TYP_INT);
- inst_TT(INS_UNSIGNED_DIVIDE, op2);
+ inst_TT(INS_UNSIGNED_DIVIDE, op2);
inst_RV_RV(INS_mov, REG_PAIR_TMP_LO, tempReg, TYP_INT);
// Jump point for no overflow divide
@@ -15561,11 +15307,10 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
/* EAX, EDX, tempReg and op2->gtRegNum are now trashed */
- regTracker.rsTrackRegTrash (REG_PAIR_TMP_LO);
- regTracker.rsTrackRegTrash (REG_PAIR_TMP_HI);
- regTracker.rsTrackRegTrash (tempReg);
- regTracker.rsTrackRegTrash (op2->gtRegNum);
-
+ regTracker.rsTrackRegTrash(REG_PAIR_TMP_LO);
+ regTracker.rsTrackRegTrash(REG_PAIR_TMP_HI);
+ regTracker.rsTrackRegTrash(tempReg);
+ regTracker.rsTrackRegTrash(op2->gtRegNum);
if (tree->gtFlags & GTF_MOD_INT_RESULT)
{
@@ -15608,12 +15353,12 @@ regPairNo CodeGen::genCodeForLongModInt(GenTreePtr tree,
unsigned CodeGen::genRegCountForLiveIntEnregVars(GenTreePtr tree)
{
unsigned regCount = 0;
-
+
VARSET_ITER_INIT(compiler, iter, compiler->compCurLife, varNum);
while (iter.NextElem(compiler, &varNum))
{
- unsigned lclNum = compiler->lvaTrackedToVarNum[varNum];
- LclVarDsc * varDsc = &compiler->lvaTable[lclNum];
+ unsigned lclNum = compiler->lvaTrackedToVarNum[varNum];
+ LclVarDsc* varDsc = &compiler->lvaTable[lclNum];
if (varDsc->lvRegister && !varTypeIsFloating(varDsc->TypeGet()))
{
@@ -15621,39 +15366,39 @@ unsigned CodeGen::genRegCountForLiveIntEnregVars(GenTreePtr tree)
if (varTypeIsLong(varDsc->TypeGet()))
{
- // For enregistered LONG/ULONG, the lower half should always be in a register.
+ // For enregistered LONG/ULONG, the lower half should always be in a register.
noway_assert(varDsc->lvRegNum != REG_STK);
- // If the LONG/ULONG is NOT paritally enregistered, then the higher half should be in a register as well.
+ // If the LONG/ULONG is NOT paritally enregistered, then the higher half should be in a register as
+ // well.
if (varDsc->lvOtherReg != REG_STK)
{
++regCount;
}
}
}
- }
-
+ }
+
return regCount;
-
}
/*****************************************************************************/
/*****************************************************************************/
-#if CPU_HAS_FP_SUPPORT
+#if CPU_HAS_FP_SUPPORT
/*****************************************************************************
*
* Generate code for a floating-point operation.
*/
-void CodeGen::genCodeForTreeFlt(GenTreePtr tree,
- regMaskTP needReg, /* = RBM_ALLFLOAT */
- regMaskTP bestReg) /* = RBM_NONE */
+void CodeGen::genCodeForTreeFlt(GenTreePtr tree,
+ regMaskTP needReg, /* = RBM_ALLFLOAT */
+ regMaskTP bestReg) /* = RBM_NONE */
{
genCodeForTreeFloat(tree, needReg, bestReg);
if (tree->OperGet() == GT_RETURN)
{
- //Make sure to get ALL THE EPILOG CODE
+ // Make sure to get ALL THE EPILOG CODE
// TODO: this should be done AFTER we called exit mon so that
// we are sure that we don't have to keep 'this' alive
@@ -15668,8 +15413,8 @@ void CodeGen::genCodeForTreeFlt(GenTreePtr tree,
}
#ifdef PROFILING_SUPPORTED
- //The profiling hook does not trash registers, so it's safe to call after we emit the code for
- //the GT_RETURN tree.
+ // The profiling hook does not trash registers, so it's safe to call after we emit the code for
+ // the GT_RETURN tree.
if (compiler->compCurBB == compiler->genReturnBB)
{
@@ -15680,23 +15425,21 @@ void CodeGen::genCodeForTreeFlt(GenTreePtr tree,
}
/*****************************************************************************/
-#endif//CPU_HAS_FP_SUPPORT
+#endif // CPU_HAS_FP_SUPPORT
/*****************************************************************************
*
* Generate a table switch - the switch value (0-based) is in register 'reg'.
*/
-void CodeGen::genTableSwitch(regNumber reg,
- unsigned jumpCnt,
- BasicBlock ** jumpTab)
+void CodeGen::genTableSwitch(regNumber reg, unsigned jumpCnt, BasicBlock** jumpTab)
{
- unsigned jmpTabBase;
+ unsigned jmpTabBase;
if (jumpCnt == 1)
{
- //In debug code, we don't optimize away the trivial switch statements. So we can get here with a
- //BBJ_SWITCH with only a default case. Therefore, don't generate the switch table.
+ // In debug code, we don't optimize away the trivial switch statements. So we can get here with a
+ // BBJ_SWITCH with only a default case. Therefore, don't generate the switch table.
noway_assert(compiler->opts.MinOpts() || compiler->opts.compDbgCode);
inst_JMP(EJ_jmp, jumpTab[0]);
return;
@@ -15710,7 +15453,6 @@ void CodeGen::genTableSwitch(regNumber reg,
const bool fDefaultFollows = (compiler->compCurBB->bbNext == jumpTab[jumpCnt - 1]);
const bool fHaveScratchReg = ((regSet.rsRegMaskFree() & genRegMask(reg)) != 0);
-
unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc
// This means really just a single cmp/jcc (aka a simple if/else)
@@ -15718,15 +15460,15 @@ void CodeGen::genTableSwitch(regNumber reg,
minSwitchTabJumpCnt++;
#ifdef _TARGET_ARM_
- // On the ARM for small switch tables we will
+ // On the ARM for small switch tables we will
// generate a sequence of compare and branch instructions
// because the code to load the base of the switch
// table is huge and hideous due to the relocation... :(
- //
+ //
minSwitchTabJumpCnt++;
if (fHaveScratchReg)
minSwitchTabJumpCnt++;
-
+
#endif // _TARGET_ARM_
if (jumpCnt < minSwitchTabJumpCnt)
@@ -15734,7 +15476,7 @@ void CodeGen::genTableSwitch(regNumber reg,
/* Does the first case label follow? */
emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
- if (fFirstCaseFollows)
+ if (fFirstCaseFollows)
{
/* Check for the default case */
inst_RV_IV(INS_cmp, reg, jumpCnt - 1, EA_4BYTE);
@@ -15757,7 +15499,7 @@ void CodeGen::genTableSwitch(regNumber reg,
inst_RV_RV(INS_mov, tmpReg, reg);
regTracker.rsTrackRegTrash(tmpReg);
reg = tmpReg;
- }
+ }
while (jumpCnt > 0)
{
@@ -15788,7 +15530,7 @@ void CodeGen::genTableSwitch(regNumber reg,
inst_RV_RV(INS_mov, tmpReg, reg);
regTracker.rsTrackRegTrash(tmpReg);
reg = tmpReg;
- }
+ }
while (jumpCnt > 0)
{
@@ -15803,7 +15545,8 @@ void CodeGen::genTableSwitch(regNumber reg,
}
}
- if ((fFirstCaseFollows || fDefaultFollows) && compiler->fgInDifferentRegions(compiler->compCurBB, compiler->compCurBB->bbNext))
+ if ((fFirstCaseFollows || fDefaultFollows) &&
+ compiler->fgInDifferentRegions(compiler->compCurBB, compiler->compCurBB->bbNext))
{
inst_JMP(EJ_jmp, compiler->compCurBB->bbNext);
}
@@ -15821,8 +15564,8 @@ void CodeGen::genTableSwitch(regNumber reg,
jmpTabBase = getEmitter()->emitBBTableDataGenBeg(jumpCnt - 1, false);
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
printf("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
#endif
@@ -15832,8 +15575,8 @@ void CodeGen::genTableSwitch(regNumber reg,
noway_assert(target->bbFlags & BBF_JMP_TARGET);
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
printf(" DD L_M%03u_BB%02u\n", Compiler::s_compMethodsCount, target->bbNum);
#endif
@@ -15868,13 +15611,13 @@ void CodeGen::genTableSwitch(regNumber reg,
* Generate code for a switch statement.
*/
-void CodeGen::genCodeForSwitch(GenTreePtr tree)
+void CodeGen::genCodeForSwitch(GenTreePtr tree)
{
- unsigned jumpCnt;
- BasicBlock * * jumpTab;
+ unsigned jumpCnt;
+ BasicBlock** jumpTab;
- GenTreePtr oper;
- regNumber reg;
+ GenTreePtr oper;
+ regNumber reg;
noway_assert(tree->gtOper == GT_SWITCH);
oper = tree->gtOp.gtOp1;
@@ -15914,20 +15657,16 @@ void CodeGen::genCodeForSwitch(GenTreePtr tree)
*/
// inline
-void CodeGen::genEmitHelperCall(unsigned helper,
- int argSize,
- emitAttr retSize)
+void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize)
{
// Can we call the helper function directly
- void * addr = NULL, **pAddr = NULL;
+ void *addr = NULL, **pAddr = NULL;
#if defined(_TARGET_ARM_) && defined(DEBUG) && defined(PROFILING_SUPPORTED)
- // Don't ask VM if it hasn't requested ELT hooks
- if (!compiler->compProfilerHookNeeded &&
- compiler->opts.compJitELTHookEnabled &&
- (helper == CORINFO_HELP_PROF_FCN_ENTER ||
- helper == CORINFO_HELP_PROF_FCN_LEAVE ||
+ // Don't ask VM if it hasn't requested ELT hooks
+ if (!compiler->compProfilerHookNeeded && compiler->opts.compJitELTHookEnabled &&
+ (helper == CORINFO_HELP_PROF_FCN_ENTER || helper == CORINFO_HELP_PROF_FCN_LEAVE ||
helper == CORINFO_HELP_PROF_FCN_TAILCALL))
{
addr = compiler->compProfilerMethHnd;
@@ -15938,12 +15677,12 @@ void CodeGen::genEmitHelperCall(unsigned helper,
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, (void**)&pAddr);
}
-
#ifdef _TARGET_ARM_
if (!addr || !arm_Valid_Imm_For_BL((ssize_t)addr))
{
// Load the address into a register and call through a register
- regNumber indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
+ regNumber indCallReg =
+ regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
if (addr)
{
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
@@ -15954,66 +15693,49 @@ void CodeGen::genEmitHelperCall(unsigned helper,
regTracker.rsTrackRegTrash(indCallReg);
}
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- compiler->eeFindHelper(helper),
- INDEBUG_LDISASM_COMMA(nullptr)
- NULL, // addr
- argSize,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, // ilOffset
- indCallReg, // ireg
- REG_NA, 0, 0, // xreg, xmul, disp
- false, // isJump
- emitter::emitNoGChelper(helper),
- (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R, compiler->eeFindHelper(helper),
+ INDEBUG_LDISASM_COMMA(nullptr) NULL, // addr
+ argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, // ilOffset
+ indCallReg, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, // isJump
+ emitter::emitNoGChelper(helper),
+ (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
}
else
{
- getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN,
- compiler->eeFindHelper(helper),
- INDEBUG_LDISASM_COMMA(nullptr)
- addr,
- argSize,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, REG_NA, REG_NA, 0, 0, /* ilOffset, ireg, xreg, xmul, disp */
- false, /* isJump */
- emitter::emitNoGChelper(helper),
- (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, compiler->eeFindHelper(helper),
+ INDEBUG_LDISASM_COMMA(nullptr) addr, argSize, retSize, gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, REG_NA, REG_NA, 0,
+ 0, /* ilOffset, ireg, xreg, xmul, disp */
+ false, /* isJump */
+ emitter::emitNoGChelper(helper),
+ (CorInfoHelpFunc)helper == CORINFO_HELP_PROF_FCN_LEAVE);
}
#else
{
- emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
+ emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
if (!addr)
{
callType = emitter::EC_FUNC_TOKEN_INDIR;
- addr = pAddr;
+ addr = pAddr;
}
- getEmitter()->emitIns_Call(callType,
- compiler->eeFindHelper(helper),
- INDEBUG_LDISASM_COMMA(nullptr)
- addr,
- argSize,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, REG_NA, REG_NA, 0, 0, /* ilOffset, ireg, xreg, xmul, disp */
- false, /* isJump */
- emitter::emitNoGChelper(helper));
+ getEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr,
+ argSize, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, BAD_IL_OFFSET, REG_NA, REG_NA, 0,
+ 0, /* ilOffset, ireg, xreg, xmul, disp */
+ false, /* isJump */
+ emitter::emitNoGChelper(helper));
}
#endif
regTracker.rsTrashRegSet(RBM_CALLEE_TRASH);
- regTracker.rsTrashRegsForGCInterruptability();
+ regTracker.rsTrashRegsForGCInterruptability();
}
/*****************************************************************************
@@ -16022,7 +15744,7 @@ void CodeGen::genEmitHelperCall(unsigned helper,
* This function does not check if the register is marked as used, etc.
*/
-regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP * byrefRegs, regMaskTP * noRefRegs)
+regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs)
{
*byrefRegs = RBM_NONE;
*noRefRegs = RBM_NONE;
@@ -16032,14 +15754,14 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP * byrefRegs,
if (regs == RBM_NONE)
return RBM_NONE;
-#if FEATURE_FIXED_OUT_ARGS
+#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPushRegs with real regs!");
return RBM_NONE;
#else // FEATURE_FIXED_OUT_ARGS
- noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_I_IMPL));
+ noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_I_IMPL));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_I_IMPL));
regMaskTP pushedRegs = regs;
@@ -16056,14 +15778,12 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP * byrefRegs,
{
type = TYP_REF;
}
- else
- if (regBit & gcInfo.gcRegByrefSetCur)
+ else if (regBit & gcInfo.gcRegByrefSetCur)
{
*byrefRegs |= regBit;
type = TYP_BYREF;
}
- else
- if (noRefRegs != NULL)
+ else if (noRefRegs != NULL)
{
*noRefRegs |= regBit;
type = TYP_I_IMPL;
@@ -16084,7 +15804,6 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP * byrefRegs,
return pushedRegs;
#endif // FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -16092,12 +15811,12 @@ regMaskTP CodeGen::genPushRegs(regMaskTP regs, regMaskTP * byrefRegs,
* Pop the registers pushed by genPushRegs()
*/
-void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs)
+void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs)
{
if (regs == RBM_NONE)
return;
-#if FEATURE_FIXED_OUT_ARGS
+#if FEATURE_FIXED_OUT_ARGS
NYI("Don't call genPopRegs with real regs!");
@@ -16106,9 +15825,9 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, reg
noway_assert((regs & byrefRegs) == byrefRegs);
noway_assert((regs & noRefRegs) == noRefRegs);
// noway_assert((regs & regSet.rsRegMaskFree()) == regs); // Don't care. Caller is responsible for all this
- noway_assert((regs & (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur)) == RBM_NONE);
+ noway_assert((regs & (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur)) == RBM_NONE);
- noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_INT));
+ noway_assert(genTypeStSz(TYP_REF) == genTypeStSz(TYP_INT));
noway_assert(genTypeStSz(TYP_BYREF) == genTypeStSz(TYP_INT));
// Walk the registers in the reverse order as genPushRegs()
@@ -16124,8 +15843,7 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, reg
{
type = TYP_BYREF;
}
- else
- if (regBit & noRefRegs)
+ else if (regBit & noRefRegs)
{
type = TYP_INT;
}
@@ -16144,7 +15862,6 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, reg
}
#endif // FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -16153,13 +15870,13 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, reg
* stuff pushed.
*/
-#if !FEATURE_FIXED_OUT_ARGS
+#if !FEATURE_FIXED_OUT_ARGS
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-size_t CodeGen::genPushArgList(GenTreePtr call)
-{
+size_t CodeGen::genPushArgList(GenTreePtr call)
+{
GenTreeArgList* regArgs = call->gtCall.gtCallLateArgs;
size_t size = 0;
regMaskTP addrReg;
@@ -16168,7 +15885,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
// Create a local, artificial GenTreeArgList that includes the gtCallObjp, if that exists, as first argument,
// so we can iterate over this argument list more uniformly.
// Need to provide a temporary non-null first argument here: if we use this, we'll replace it.
- GenTreeArgList firstForObjp(/*temp dummy arg*/call, call->gtCall.gtCallArgs);
+ GenTreeArgList firstForObjp(/*temp dummy arg*/ call, call->gtCall.gtCallArgs);
if (call->gtCall.gtCallObjp == NULL)
{
args = call->gtCall.gtCallArgs;
@@ -16176,16 +15893,16 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
else
{
firstForObjp.Current() = call->gtCall.gtCallObjp;
- args = &firstForObjp;
+ args = &firstForObjp;
}
- GenTreePtr curr;
- var_types type;
- size_t opsz;
+ GenTreePtr curr;
+ var_types type;
+ size_t opsz;
for (; args; args = args->Rest())
{
- addrReg = DUMMY_INIT(RBM_CORRUPT); // to detect uninitialized use
+ addrReg = DUMMY_INIT(RBM_CORRUPT); // to detect uninitialized use
/* Get hold of the next argument value */
curr = args->Current();
@@ -16209,690 +15926,675 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
switch (type)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_SHORT:
- case TYP_CHAR:
- case TYP_UBYTE:
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_SHORT:
+ case TYP_CHAR:
+ case TYP_UBYTE:
- /* Don't want to push a small value, make it a full word */
+ /* Don't want to push a small value, make it a full word */
- genCodeForTree(curr, 0);
+ genCodeForTree(curr, 0);
- __fallthrough; // now the value should be in a register ...
+ __fallthrough; // now the value should be in a register ...
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
-#if ! CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_FLOAT:
#endif
- if (curr->gtFlags & GTF_LATE_ARG)
- {
- assert(curr->gtOper == GT_ASG);
- /* one more argument will be passed in a register */
- noway_assert(intRegState.rsCurRegArgNum < MAX_REG_ARG);
-
- /* arg is passed in the register, nothing on the stack */
-
- opsz = 0;
-
- }
+ if (curr->gtFlags & GTF_LATE_ARG)
+ {
+ assert(curr->gtOper == GT_ASG);
+ /* one more argument will be passed in a register */
+ noway_assert(intRegState.rsCurRegArgNum < MAX_REG_ARG);
- /* Is this value a handle? */
+ /* arg is passed in the register, nothing on the stack */
- if (curr->gtOper == GT_CNS_INT && curr->IsIconHandle())
- {
- /* Emit a fixup for the push instruction */
-
- inst_IV_handle(INS_push, curr->gtIntCon.gtIconVal);
- genSinglePush();
+ opsz = 0;
+ }
- addrReg = 0;
- break;
- }
+ /* Is this value a handle? */
+ if (curr->gtOper == GT_CNS_INT && curr->IsIconHandle())
+ {
+ /* Emit a fixup for the push instruction */
- /* Is the value a constant? */
+ inst_IV_handle(INS_push, curr->gtIntCon.gtIconVal);
+ genSinglePush();
- if (curr->gtOper == GT_CNS_INT)
- {
+ addrReg = 0;
+ break;
+ }
-#if REDUNDANT_LOAD
- regNumber reg = regTracker.rsIconIsInReg(curr->gtIntCon.gtIconVal);
+ /* Is the value a constant? */
- if (reg != REG_NA)
+ if (curr->gtOper == GT_CNS_INT)
{
- inst_RV(INS_push, reg, TYP_INT);
- }
- else
-#endif
- {
- inst_IV(INS_push, curr->gtIntCon.gtIconVal);
- }
- /* If the type is TYP_REF, then this must be a "null". So we can
- treat it as a TYP_INT as we don't need to report it as a GC ptr */
+#if REDUNDANT_LOAD
+ regNumber reg = regTracker.rsIconIsInReg(curr->gtIntCon.gtIconVal);
- noway_assert(curr->TypeGet() == TYP_INT ||
- (varTypeIsGC(curr->TypeGet()) && curr->gtIntCon.gtIconVal == 0));
+ if (reg != REG_NA)
+ {
+ inst_RV(INS_push, reg, TYP_INT);
+ }
+ else
+#endif
+ {
+ inst_IV(INS_push, curr->gtIntCon.gtIconVal);
+ }
- genSinglePush();
+ /* If the type is TYP_REF, then this must be a "null". So we can
+ treat it as a TYP_INT as we don't need to report it as a GC ptr */
- addrReg = 0;
- break;
- }
+ noway_assert(curr->TypeGet() == TYP_INT ||
+ (varTypeIsGC(curr->TypeGet()) && curr->gtIntCon.gtIconVal == 0));
+ genSinglePush();
- if (curr->gtFlags & GTF_LATE_ARG)
- {
- /* This must be a register arg temp assignment */
+ addrReg = 0;
+ break;
+ }
- noway_assert(curr->gtOper == GT_ASG);
+ if (curr->gtFlags & GTF_LATE_ARG)
+ {
+ /* This must be a register arg temp assignment */
- /* Evaluate it to the temp */
+ noway_assert(curr->gtOper == GT_ASG);
- genCodeForTree(curr, 0);
+ /* Evaluate it to the temp */
- /* Increment the current argument register counter */
+ genCodeForTree(curr, 0);
- intRegState.rsCurRegArgNum++;
+ /* Increment the current argument register counter */
- addrReg = 0;
- }
- else
- {
- /* This is a 32-bit integer non-register argument */
+ intRegState.rsCurRegArgNum++;
- addrReg = genMakeRvalueAddressable(curr, 0, RegSet::KEEP_REG, false);
- inst_TT(INS_push, curr);
- genSinglePush();
- genDoneAddressable(curr, addrReg, RegSet::KEEP_REG);
+ addrReg = 0;
+ }
+ else
+ {
+ /* This is a 32-bit integer non-register argument */
- }
- break;
+ addrReg = genMakeRvalueAddressable(curr, 0, RegSet::KEEP_REG, false);
+ inst_TT(INS_push, curr);
+ genSinglePush();
+ genDoneAddressable(curr, addrReg, RegSet::KEEP_REG);
+ }
+ break;
- case TYP_LONG:
+ case TYP_LONG:
#if !CPU_HAS_FP_SUPPORT
- case TYP_DOUBLE:
+ case TYP_DOUBLE:
#endif
- /* Is the value a constant? */
+ /* Is the value a constant? */
- if (curr->gtOper == GT_CNS_LNG)
- {
- inst_IV(INS_push, (int)(curr->gtLngCon.gtLconVal >> 32));
- genSinglePush();
- inst_IV(INS_push, (int)(curr->gtLngCon.gtLconVal ));
- genSinglePush();
+ if (curr->gtOper == GT_CNS_LNG)
+ {
+ inst_IV(INS_push, (int)(curr->gtLngCon.gtLconVal >> 32));
+ genSinglePush();
+ inst_IV(INS_push, (int)(curr->gtLngCon.gtLconVal));
+ genSinglePush();
- addrReg = 0;
- }
- else
- {
- addrReg = genMakeAddressable(curr, 0, RegSet::FREE_REG);
+ addrReg = 0;
+ }
+ else
+ {
+ addrReg = genMakeAddressable(curr, 0, RegSet::FREE_REG);
- inst_TT(INS_push, curr, sizeof(int));
- genSinglePush();
- inst_TT(INS_push, curr);
- genSinglePush();
- }
- break;
+ inst_TT(INS_push, curr, sizeof(int));
+ genSinglePush();
+ inst_TT(INS_push, curr);
+ genSinglePush();
+ }
+ break;
-#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
+#if CPU_HAS_FP_SUPPORT
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#endif
#if FEATURE_STACK_FP_X87
- addrReg = genPushArgumentStackFP(curr);
+ addrReg = genPushArgumentStackFP(curr);
#else
- NYI("FP codegen");
- addrReg = 0;
+ NYI("FP codegen");
+ addrReg = 0;
#endif
- break;
+ break;
- case TYP_VOID:
+ case TYP_VOID:
- /* Is this a nothing node, deferred register argument? */
+ /* Is this a nothing node, deferred register argument? */
- if (curr->gtFlags & GTF_LATE_ARG)
- {
- GenTree* arg = curr;
- if (arg->gtOper == GT_COMMA)
+ if (curr->gtFlags & GTF_LATE_ARG)
{
- while (arg->gtOper == GT_COMMA)
+ GenTree* arg = curr;
+ if (arg->gtOper == GT_COMMA)
{
- GenTreePtr op1 = arg->gtOp.gtOp1;
- genEvalSideEffects(op1);
- genUpdateLife(op1);
- arg = arg->gtOp.gtOp2;
- }
- if (!arg->IsNothingNode())
- {
- genEvalSideEffects(arg);
- genUpdateLife(arg);
+ while (arg->gtOper == GT_COMMA)
+ {
+ GenTreePtr op1 = arg->gtOp.gtOp1;
+ genEvalSideEffects(op1);
+ genUpdateLife(op1);
+ arg = arg->gtOp.gtOp2;
+ }
+ if (!arg->IsNothingNode())
+ {
+ genEvalSideEffects(arg);
+ genUpdateLife(arg);
+ }
}
- }
- /* increment the register count and continue with the next argument */
+ /* increment the register count and continue with the next argument */
- intRegState.rsCurRegArgNum++;
+ intRegState.rsCurRegArgNum++;
- noway_assert(opsz == 0);
+ noway_assert(opsz == 0);
- addrReg = 0;
- break;
- }
+ addrReg = 0;
+ break;
+ }
- __fallthrough;
+ __fallthrough;
- case TYP_STRUCT:
- {
- GenTree* arg = curr;
- while (arg->gtOper == GT_COMMA)
+ case TYP_STRUCT:
{
- GenTreePtr op1 = arg->gtOp.gtOp1;
- genEvalSideEffects(op1);
- genUpdateLife(op1);
- arg = arg->gtOp.gtOp2;
- }
+ GenTree* arg = curr;
+ while (arg->gtOper == GT_COMMA)
+ {
+ GenTreePtr op1 = arg->gtOp.gtOp1;
+ genEvalSideEffects(op1);
+ genUpdateLife(op1);
+ arg = arg->gtOp.gtOp2;
+ }
- noway_assert(arg->gtOper == GT_OBJ
- || arg->gtOper == GT_MKREFANY
- || arg->gtOper == GT_IND);
- noway_assert((arg->gtFlags & GTF_REVERSE_OPS) == 0);
- noway_assert(addrReg == DUMMY_INIT(RBM_CORRUPT));
+ noway_assert(arg->gtOper == GT_OBJ || arg->gtOper == GT_MKREFANY || arg->gtOper == GT_IND);
+ noway_assert((arg->gtFlags & GTF_REVERSE_OPS) == 0);
+ noway_assert(addrReg == DUMMY_INIT(RBM_CORRUPT));
- if (arg->gtOper == GT_MKREFANY)
- {
- GenTreePtr op1 = arg->gtOp.gtOp1;
- GenTreePtr op2 = arg->gtOp.gtOp2;
+ if (arg->gtOper == GT_MKREFANY)
+ {
+ GenTreePtr op1 = arg->gtOp.gtOp1;
+ GenTreePtr op2 = arg->gtOp.gtOp2;
- addrReg = genMakeAddressable(op1, RBM_NONE, RegSet::KEEP_REG);
+ addrReg = genMakeAddressable(op1, RBM_NONE, RegSet::KEEP_REG);
- /* Is this value a handle? */
- if (op2->gtOper == GT_CNS_INT && op2->IsIconHandle())
- {
- /* Emit a fixup for the push instruction */
+ /* Is this value a handle? */
+ if (op2->gtOper == GT_CNS_INT && op2->IsIconHandle())
+ {
+ /* Emit a fixup for the push instruction */
- inst_IV_handle(INS_push, op2->gtIntCon.gtIconVal);
+ inst_IV_handle(INS_push, op2->gtIntCon.gtIconVal);
+ genSinglePush();
+ }
+ else
+ {
+ regMaskTP addrReg2 = genMakeRvalueAddressable(op2, 0, RegSet::KEEP_REG, false);
+ inst_TT(INS_push, op2);
+ genSinglePush();
+ genDoneAddressable(op2, addrReg2, RegSet::KEEP_REG);
+ }
+ addrReg = genKeepAddressable(op1, addrReg);
+ inst_TT(INS_push, op1);
genSinglePush();
+ genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+
+ opsz = 2 * TARGET_POINTER_SIZE;
}
else
{
- regMaskTP addrReg2 = genMakeRvalueAddressable(op2, 0, RegSet::KEEP_REG, false);
- inst_TT(INS_push, op2);
- genSinglePush();
- genDoneAddressable(op2, addrReg2, RegSet::KEEP_REG);
-
- }
- addrReg = genKeepAddressable(op1, addrReg);
- inst_TT(INS_push, op1);
- genSinglePush();
- genDoneAddressable(op1, addrReg, RegSet::KEEP_REG);
+ noway_assert(arg->gtOper == GT_OBJ);
- opsz = 2*TARGET_POINTER_SIZE;
- }
- else
- {
- noway_assert(arg->gtOper == GT_OBJ);
-
- if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
- {
- GenTreePtr structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
- unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
-
- // As much as we would like this to be a noway_assert, we can't because
- // there are some weird casts out there, and backwards compatiblity
- // dictates we do *NOT* start rejecting them now. lvaGetPromotion and
- // lvPromoted in general currently do not require the local to be
- // TYP_STRUCT, so this assert is really more about how we wish the world
- // was then some JIT invariant.
- assert((structLocalTree->TypeGet() == TYP_STRUCT) || compiler->compUnsafeCastUsed);
-
- Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
-
- if (varDsc->lvPromoted &&
- promotionType==Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live on stack.
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR && arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
+ GenTreePtr structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
+ unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &compiler->lvaTable[structLclNum];
+
+ // As much as we would like this to be a noway_assert, we can't because
+ // there are some weird casts out there, and backwards compatiblity
+ // dictates we do *NOT* start rejecting them now. lvaGetPromotion and
+ // lvPromoted in general currently do not require the local to be
+ // TYP_STRUCT, so this assert is really more about how we wish the world
+ // was then some JIT invariant.
+ assert((structLocalTree->TypeGet() == TYP_STRUCT) || compiler->compUnsafeCastUsed);
+
+ Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
+
+ if (varDsc->lvPromoted &&
+ promotionType ==
+ Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live on stack.
+ {
+ assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
- addrReg = 0;
+ addrReg = 0;
- // Get the number of BYTES to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
- size_t bytesToBeCopied = opsz;
-
- // postponedFields is true if we have any postponed fields
- // Any field that does not start on a 4-byte boundary is a postponed field
- // Such a field is required to be a short or a byte
- //
- // postponedRegKind records the kind of scratch register we will
- // need to process the postponed fields
- // RBM_NONE means that we don't need a register
- //
- // expectedAlignedOffset records the aligned offset that
- // has to exist for a push to cover the postponed fields.
- // Since all promoted structs have the tightly packed property
- // we are guaranteed that we will have such a push
- //
- bool postponedFields = false;
- regMaskTP postponedRegKind = RBM_NONE;
- size_t expectedAlignedOffset = UINT_MAX;
-
- VARSET_TP* deadVarBits = NULL;
- compiler->GetPromotedStructDeathVars()->Lookup(structLocalTree, &deadVarBits);
-
- // Reverse loop, starts pushing from the end of the struct (i.e. the highest field offset)
- //
- for (int varNum = varDsc->lvFieldLclStart + varDsc->lvFieldCnt - 1;
- varNum >= (int) varDsc->lvFieldLclStart;
- varNum--)
- {
- LclVarDsc * fieldVarDsc = compiler->lvaTable + varNum;
-#ifdef DEBUG
- if (fieldVarDsc->lvExactSize == 2*sizeof(unsigned))
- {
- noway_assert(fieldVarDsc->lvFldOffset % (2*sizeof(unsigned)) == 0);
- noway_assert(fieldVarDsc->lvFldOffset + (2*sizeof(unsigned)) == bytesToBeCopied);
- }
-#endif
- // Whenever we see a stack-aligned fieldVarDsc then we use 4-byte push instruction(s)
- // For packed structs we will go back and store the unaligned bytes and shorts
- // in the next loop
+ // Get the number of BYTES to copy to the stack
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
+ size_t bytesToBeCopied = opsz;
+
+ // postponedFields is true if we have any postponed fields
+ // Any field that does not start on a 4-byte boundary is a postponed field
+ // Such a field is required to be a short or a byte
//
- if (fieldVarDsc->lvStackAligned())
- {
- if (fieldVarDsc->lvExactSize != 2*sizeof(unsigned) &&
- fieldVarDsc->lvFldOffset + sizeof(void*) != bytesToBeCopied)
- {
- // Might need 4-bytes paddings for fields other than LONG and DOUBLE.
- // Just push some junk (i.e EAX) on the stack.
- inst_RV(INS_push, REG_EAX, TYP_INT);
- genSinglePush();
+ // postponedRegKind records the kind of scratch register we will
+ // need to process the postponed fields
+ // RBM_NONE means that we don't need a register
+ //
+ // expectedAlignedOffset records the aligned offset that
+ // has to exist for a push to cover the postponed fields.
+ // Since all promoted structs have the tightly packed property
+ // we are guaranteed that we will have such a push
+ //
+ bool postponedFields = false;
+ regMaskTP postponedRegKind = RBM_NONE;
+ size_t expectedAlignedOffset = UINT_MAX;
- bytesToBeCopied -= sizeof(void*);
- }
+ VARSET_TP* deadVarBits = NULL;
+ compiler->GetPromotedStructDeathVars()->Lookup(structLocalTree, &deadVarBits);
- // If we have an expectedAlignedOffset make sure that this push instruction
- // is what we expect to cover the postponedFields
- //
- if (expectedAlignedOffset != UINT_MAX)
+ // Reverse loop, starts pushing from the end of the struct (i.e. the highest field offset)
+ //
+ for (int varNum = varDsc->lvFieldLclStart + varDsc->lvFieldCnt - 1;
+ varNum >= (int)varDsc->lvFieldLclStart; varNum--)
+ {
+ LclVarDsc* fieldVarDsc = compiler->lvaTable + varNum;
+#ifdef DEBUG
+ if (fieldVarDsc->lvExactSize == 2 * sizeof(unsigned))
{
- // This push must be for a small field
- noway_assert(fieldVarDsc->lvExactSize < 4);
- // The fldOffset for this push should be equal to the expectedAlignedOffset
- noway_assert(fieldVarDsc->lvFldOffset == expectedAlignedOffset);
- expectedAlignedOffset = UINT_MAX;
+ noway_assert(fieldVarDsc->lvFldOffset % (2 * sizeof(unsigned)) == 0);
+ noway_assert(fieldVarDsc->lvFldOffset + (2 * sizeof(unsigned)) == bytesToBeCopied);
}
-
- // Push the "upper half" of LONG var first
-
- if (isRegPairType(fieldVarDsc->lvType))
+#endif
+ // Whenever we see a stack-aligned fieldVarDsc then we use 4-byte push instruction(s)
+ // For packed structs we will go back and store the unaligned bytes and shorts
+ // in the next loop
+ //
+ if (fieldVarDsc->lvStackAligned())
{
- if (fieldVarDsc->lvOtherReg != REG_STK)
- {
- inst_RV(INS_push,
- fieldVarDsc->lvOtherReg,
- TYP_INT);
+ if (fieldVarDsc->lvExactSize != 2 * sizeof(unsigned) &&
+ fieldVarDsc->lvFldOffset + sizeof(void*) != bytesToBeCopied)
+ {
+ // Might need 4-bytes paddings for fields other than LONG and DOUBLE.
+ // Just push some junk (i.e EAX) on the stack.
+ inst_RV(INS_push, REG_EAX, TYP_INT);
genSinglePush();
-
- // Prepare the set of vars to be cleared from gcref/gcbyref set
- // in case they become dead after genUpdateLife.
- // genDoneAddressable() will remove dead gc vars by calling gcInfo.gcMarkRegSetNpt.
- // Although it is not addrReg, we just borrow the name here.
- addrReg |= genRegMask(fieldVarDsc->lvOtherReg);
+
+ bytesToBeCopied -= sizeof(void*);
}
- else
+
+ // If we have an expectedAlignedOffset make sure that this push instruction
+ // is what we expect to cover the postponedFields
+ //
+ if (expectedAlignedOffset != UINT_MAX)
{
- getEmitter()->emitIns_S(INS_push,
- EA_4BYTE,
- varNum,
- sizeof(void*));
- genSinglePush();
- }
+ // This push must be for a small field
+ noway_assert(fieldVarDsc->lvExactSize < 4);
+ // The fldOffset for this push should be equal to the expectedAlignedOffset
+ noway_assert(fieldVarDsc->lvFldOffset == expectedAlignedOffset);
+ expectedAlignedOffset = UINT_MAX;
+ }
- bytesToBeCopied -= sizeof(void*);
- }
+ // Push the "upper half" of LONG var first
- // Push the "upper half" of DOUBLE var if it is not enregistered.
-
- if (fieldVarDsc->lvType == TYP_DOUBLE)
- {
- if (!fieldVarDsc->lvRegister)
- {
- getEmitter()->emitIns_S(INS_push,
- EA_4BYTE,
- varNum,
- sizeof(void*));
- genSinglePush();
+ if (isRegPairType(fieldVarDsc->lvType))
+ {
+ if (fieldVarDsc->lvOtherReg != REG_STK)
+ {
+ inst_RV(INS_push, fieldVarDsc->lvOtherReg, TYP_INT);
+ genSinglePush();
+
+ // Prepare the set of vars to be cleared from gcref/gcbyref set
+ // in case they become dead after genUpdateLife.
+ // genDoneAddressable() will remove dead gc vars by calling
+ // gcInfo.gcMarkRegSetNpt.
+ // Although it is not addrReg, we just borrow the name here.
+ addrReg |= genRegMask(fieldVarDsc->lvOtherReg);
+ }
+ else
+ {
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ genSinglePush();
+ }
+
+ bytesToBeCopied -= sizeof(void*);
}
- bytesToBeCopied -= sizeof(void*);
- }
-
- //
- // Push the field local.
- //
-
- if (fieldVarDsc->lvRegister)
- {
- if (!varTypeIsFloating(genActualType(fieldVarDsc->TypeGet())))
+ // Push the "upper half" of DOUBLE var if it is not enregistered.
+
+ if (fieldVarDsc->lvType == TYP_DOUBLE)
{
- inst_RV(INS_push,
- fieldVarDsc->lvRegNum,
- genActualType(fieldVarDsc->TypeGet()));
- genSinglePush();
+ if (!fieldVarDsc->lvRegister)
+ {
+ getEmitter()->emitIns_S(INS_push, EA_4BYTE, varNum, sizeof(void*));
+ genSinglePush();
+ }
- // Prepare the set of vars to be cleared from gcref/gcbyref set
- // in case they become dead after genUpdateLife.
- // genDoneAddressable() will remove dead gc vars by calling gcInfo.gcMarkRegSetNpt.
- // Although it is not addrReg, we just borrow the name here.
- addrReg |= genRegMask(fieldVarDsc->lvRegNum);
+ bytesToBeCopied -= sizeof(void*);
}
- else
- {
- // Must be TYP_FLOAT or TYP_DOUBLE
- noway_assert(fieldVarDsc->lvRegNum != REG_FPNONE);
-
- noway_assert(fieldVarDsc->lvExactSize == sizeof(unsigned) ||
- fieldVarDsc->lvExactSize == 2*sizeof(unsigned));
-
- inst_RV_IV(INS_sub, REG_SPBASE, fieldVarDsc->lvExactSize, EA_PTRSIZE);
-
- genSinglePush();
- if (fieldVarDsc->lvExactSize == 2*sizeof(unsigned))
- {
+
+ //
+ // Push the field local.
+ //
+
+ if (fieldVarDsc->lvRegister)
+ {
+ if (!varTypeIsFloating(genActualType(fieldVarDsc->TypeGet())))
+ {
+ inst_RV(INS_push, fieldVarDsc->lvRegNum,
+ genActualType(fieldVarDsc->TypeGet()));
genSinglePush();
+
+ // Prepare the set of vars to be cleared from gcref/gcbyref set
+ // in case they become dead after genUpdateLife.
+ // genDoneAddressable() will remove dead gc vars by calling
+ // gcInfo.gcMarkRegSetNpt.
+ // Although it is not addrReg, we just borrow the name here.
+ addrReg |= genRegMask(fieldVarDsc->lvRegNum);
}
-
-#if FEATURE_STACK_FP_X87
- GenTree* fieldTree = new (compiler, GT_REG_VAR) GenTreeLclVar(fieldVarDsc->lvType, varNum, BAD_IL_OFFSET);
- fieldTree->gtOper = GT_REG_VAR;
- fieldTree->gtRegNum = fieldVarDsc->lvRegNum;
- fieldTree->gtRegVar.gtRegNum = fieldVarDsc->lvRegNum;
- if ((arg->gtFlags & GTF_VAR_DEATH) != 0)
+ else
{
- if (fieldVarDsc->lvTracked &&
- (deadVarBits == NULL || VarSetOps::IsMember(compiler, *deadVarBits, fieldVarDsc->lvVarIndex)))
+ // Must be TYP_FLOAT or TYP_DOUBLE
+ noway_assert(fieldVarDsc->lvRegNum != REG_FPNONE);
+
+ noway_assert(fieldVarDsc->lvExactSize == sizeof(unsigned) ||
+ fieldVarDsc->lvExactSize == 2 * sizeof(unsigned));
+
+ inst_RV_IV(INS_sub, REG_SPBASE, fieldVarDsc->lvExactSize, EA_PTRSIZE);
+
+ genSinglePush();
+ if (fieldVarDsc->lvExactSize == 2 * sizeof(unsigned))
{
- fieldTree->gtFlags |= GTF_VAR_DEATH;
+ genSinglePush();
}
- }
- genCodeForTreeStackFP_Leaf(fieldTree);
-
- // Take reg to top of stack
-
- FlatFPX87_MoveToTOS(&compCurFPState, fieldTree->gtRegNum);
-
- // Pop it off to stack
- compCurFPState.Pop();
-
- getEmitter()->emitIns_AR_R(INS_fstp, EA_ATTR(fieldVarDsc->lvExactSize), REG_NA, REG_SPBASE, 0);
+
+#if FEATURE_STACK_FP_X87
+ GenTree* fieldTree = new (compiler, GT_REG_VAR)
+ GenTreeLclVar(fieldVarDsc->lvType, varNum, BAD_IL_OFFSET);
+ fieldTree->gtOper = GT_REG_VAR;
+ fieldTree->gtRegNum = fieldVarDsc->lvRegNum;
+ fieldTree->gtRegVar.gtRegNum = fieldVarDsc->lvRegNum;
+ if ((arg->gtFlags & GTF_VAR_DEATH) != 0)
+ {
+ if (fieldVarDsc->lvTracked &&
+ (deadVarBits == NULL ||
+ VarSetOps::IsMember(compiler, *deadVarBits,
+ fieldVarDsc->lvVarIndex)))
+ {
+ fieldTree->gtFlags |= GTF_VAR_DEATH;
+ }
+ }
+ genCodeForTreeStackFP_Leaf(fieldTree);
+
+ // Take reg to top of stack
+
+ FlatFPX87_MoveToTOS(&compCurFPState, fieldTree->gtRegNum);
+
+ // Pop it off to stack
+ compCurFPState.Pop();
+
+ getEmitter()->emitIns_AR_R(INS_fstp, EA_ATTR(fieldVarDsc->lvExactSize),
+ REG_NA, REG_SPBASE, 0);
#else
- NYI_FLAT_FP_X87("FP codegen");
+ NYI_FLAT_FP_X87("FP codegen");
#endif
+ }
+ }
+ else
+ {
+ getEmitter()->emitIns_S(INS_push,
+ (fieldVarDsc->TypeGet() == TYP_REF) ? EA_GCREF
+ : EA_4BYTE,
+ varNum, 0);
+ genSinglePush();
}
- }
- else
- {
- getEmitter()->emitIns_S(INS_push,
- (fieldVarDsc->TypeGet() == TYP_REF)?EA_GCREF:EA_4BYTE,
- varNum,
- 0);
- genSinglePush();
- }
- bytesToBeCopied -= sizeof(void*);
- }
- else // not stack aligned
- {
- noway_assert(fieldVarDsc->lvExactSize < 4);
+ bytesToBeCopied -= sizeof(void*);
+ }
+ else // not stack aligned
+ {
+ noway_assert(fieldVarDsc->lvExactSize < 4);
- // We will need to use a store byte or store word
- // to set this unaligned location
- postponedFields = true;
+ // We will need to use a store byte or store word
+ // to set this unaligned location
+ postponedFields = true;
- if (expectedAlignedOffset != UINT_MAX)
- {
- // This should never change until it is set back to UINT_MAX by an aligned offset
- noway_assert(expectedAlignedOffset == roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*));
- }
+ if (expectedAlignedOffset != UINT_MAX)
+ {
+ // This should never change until it is set back to UINT_MAX by an aligned
+ // offset
+ noway_assert(expectedAlignedOffset ==
+ roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*));
+ }
- expectedAlignedOffset = roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*);
+ expectedAlignedOffset =
+ roundUp(fieldVarDsc->lvFldOffset, sizeof(void*)) - sizeof(void*);
- noway_assert(expectedAlignedOffset < bytesToBeCopied);
+ noway_assert(expectedAlignedOffset < bytesToBeCopied);
- if (fieldVarDsc->lvRegister)
- {
- // Do we need to use a byte-able register?
- if (fieldVarDsc->lvExactSize == 1)
+ if (fieldVarDsc->lvRegister)
{
- // Did we enregister fieldVarDsc2 in a non byte-able register?
- if ((genRegMask(fieldVarDsc->lvRegNum) & RBM_BYTE_REGS) == 0)
+ // Do we need to use a byte-able register?
+ if (fieldVarDsc->lvExactSize == 1)
{
- // then we will need to grab a byte-able register
- postponedRegKind = RBM_BYTE_REGS;
+ // Did we enregister fieldVarDsc2 in a non byte-able register?
+ if ((genRegMask(fieldVarDsc->lvRegNum) & RBM_BYTE_REGS) == 0)
+ {
+ // then we will need to grab a byte-able register
+ postponedRegKind = RBM_BYTE_REGS;
+ }
}
}
- }
- else // not enregistered
- {
- if (fieldVarDsc->lvExactSize == 1)
+ else // not enregistered
{
- // We will need to grab a byte-able register
- postponedRegKind = RBM_BYTE_REGS;
- }
- else
- {
- // We will need to grab any scratch register
- if (postponedRegKind != RBM_BYTE_REGS)
- postponedRegKind = RBM_ALLINT;
+ if (fieldVarDsc->lvExactSize == 1)
+ {
+ // We will need to grab a byte-able register
+ postponedRegKind = RBM_BYTE_REGS;
+ }
+ else
+ {
+ // We will need to grab any scratch register
+ if (postponedRegKind != RBM_BYTE_REGS)
+ postponedRegKind = RBM_ALLINT;
+ }
}
}
}
- }
- // Now we've pushed all of the aligned fields.
- //
- // We should have pushed bytes equal to the entire struct
- noway_assert(bytesToBeCopied == 0);
-
- // We should have seen a push that covers every postponed field
- noway_assert(expectedAlignedOffset == UINT_MAX);
-
- // Did we have any postponed fields?
- if (postponedFields)
- {
- regNumber regNum = REG_STK; // means no register
-
- // If we needed a scratch register then grab it here
-
- if (postponedRegKind != RBM_NONE)
- regNum = regSet.rsGrabReg(postponedRegKind);
-
- // Forward loop, starts from the lowest field offset
+ // Now we've pushed all of the aligned fields.
//
- for (unsigned varNum = varDsc->lvFieldLclStart;
- varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- varNum++)
- {
- LclVarDsc * fieldVarDsc = compiler->lvaTable + varNum;
-
- // All stack aligned fields have already been pushed
- if (fieldVarDsc->lvStackAligned())
- continue;
+ // We should have pushed bytes equal to the entire struct
+ noway_assert(bytesToBeCopied == 0);
- // We have a postponed field
+ // We should have seen a push that covers every postponed field
+ noway_assert(expectedAlignedOffset == UINT_MAX);
- // It must be a byte or a short
- noway_assert(fieldVarDsc->lvExactSize < 4);
+ // Did we have any postponed fields?
+ if (postponedFields)
+ {
+ regNumber regNum = REG_STK; // means no register
+
+ // If we needed a scratch register then grab it here
- // Is the field enregistered?
- if (fieldVarDsc->lvRegister)
+ if (postponedRegKind != RBM_NONE)
+ regNum = regSet.rsGrabReg(postponedRegKind);
+
+ // Forward loop, starts from the lowest field offset
+ //
+ for (unsigned varNum = varDsc->lvFieldLclStart;
+ varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; varNum++)
{
- // Frequently we can just use that register
- regNumber tmpRegNum = fieldVarDsc->lvRegNum;
-
- // Do we need to use a byte-able register?
- if (fieldVarDsc->lvExactSize == 1)
+ LclVarDsc* fieldVarDsc = compiler->lvaTable + varNum;
+
+ // All stack aligned fields have already been pushed
+ if (fieldVarDsc->lvStackAligned())
+ continue;
+
+ // We have a postponed field
+
+ // It must be a byte or a short
+ noway_assert(fieldVarDsc->lvExactSize < 4);
+
+ // Is the field enregistered?
+ if (fieldVarDsc->lvRegister)
{
- // Did we enregister the field in a non byte-able register?
- if ((genRegMask(tmpRegNum) & RBM_BYTE_REGS) == 0)
+ // Frequently we can just use that register
+ regNumber tmpRegNum = fieldVarDsc->lvRegNum;
+
+ // Do we need to use a byte-able register?
+ if (fieldVarDsc->lvExactSize == 1)
{
- // then we will need to use the byte-able register 'regNum'
- noway_assert((genRegMask(regNum) & RBM_BYTE_REGS) != 0);
-
- // Copy the register that contains fieldVarDsc into 'regNum'
- getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, regNum, fieldVarDsc->lvRegNum);
- regTracker.rsTrackRegLclVar(regNum, varNum);
-
- // tmpRegNum is the register that we will extract the byte value from
- tmpRegNum = regNum;
+ // Did we enregister the field in a non byte-able register?
+ if ((genRegMask(tmpRegNum) & RBM_BYTE_REGS) == 0)
+ {
+ // then we will need to use the byte-able register 'regNum'
+ noway_assert((genRegMask(regNum) & RBM_BYTE_REGS) != 0);
+
+ // Copy the register that contains fieldVarDsc into 'regNum'
+ getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, regNum,
+ fieldVarDsc->lvRegNum);
+ regTracker.rsTrackRegLclVar(regNum, varNum);
+
+ // tmpRegNum is the register that we will extract the byte value from
+ tmpRegNum = regNum;
+ }
+ noway_assert((genRegMask(tmpRegNum) & RBM_BYTE_REGS) != 0);
}
- noway_assert((genRegMask(tmpRegNum) & RBM_BYTE_REGS) != 0);
- }
-
- getEmitter()->emitIns_AR_R (ins_Store(fieldVarDsc->TypeGet()),
- (emitAttr)fieldVarDsc->lvExactSize,
- tmpRegNum,
- REG_SPBASE,
- fieldVarDsc->lvFldOffset);
- }
- else // not enregistered
- {
- // We will copy the non-enregister fieldVar into our scratch register 'regNum'
-
- noway_assert(regNum != REG_STK);
- getEmitter()->emitIns_R_S (ins_Load(fieldVarDsc->TypeGet()),
- (emitAttr)fieldVarDsc->lvExactSize,
- regNum,
- varNum,
- 0);
-
- regTracker.rsTrackRegLclVar(regNum, varNum);
-
- // Store the value (byte or short) into the stack
-
- getEmitter()->emitIns_AR_R (ins_Store(fieldVarDsc->TypeGet()),
- (emitAttr)fieldVarDsc->lvExactSize,
- regNum,
- REG_SPBASE,
- fieldVarDsc->lvFldOffset);
- }
- }
- }
- genUpdateLife(structLocalTree);
- break;
- }
+ getEmitter()->emitIns_AR_R(ins_Store(fieldVarDsc->TypeGet()),
+ (emitAttr)fieldVarDsc->lvExactSize, tmpRegNum,
+ REG_SPBASE, fieldVarDsc->lvFldOffset);
+ }
+ else // not enregistered
+ {
+ // We will copy the non-enregister fieldVar into our scratch register 'regNum'
- }
+ noway_assert(regNum != REG_STK);
+ getEmitter()->emitIns_R_S(ins_Load(fieldVarDsc->TypeGet()),
+ (emitAttr)fieldVarDsc->lvExactSize, regNum, varNum,
+ 0);
- genCodeForTree(arg->gtObj.gtOp1, 0);
- noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
- regNumber reg = arg->gtObj.gtOp1->gtRegNum;
- // Get the number of DWORDS to copy to the stack
- opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
- unsigned slots = (unsigned)(opsz / sizeof(void*));
+ regTracker.rsTrackRegLclVar(regNum, varNum);
- BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
+ // Store the value (byte or short) into the stack
- compiler->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
+ getEmitter()->emitIns_AR_R(ins_Store(fieldVarDsc->TypeGet()),
+ (emitAttr)fieldVarDsc->lvExactSize, regNum,
+ REG_SPBASE, fieldVarDsc->lvFldOffset);
+ }
+ }
+ }
+ genUpdateLife(structLocalTree);
- BOOL bNoneGC = TRUE;
- for (int i = slots - 1; i >= 0; --i)
- {
- if (gcLayout[i] != TYPE_GC_NONE)
- {
- bNoneGC = FALSE;
- break;
+ break;
+ }
}
- }
- /* passing large structures using movq instead of pushes does not increase codesize very much */
- unsigned movqLenMin = 8;
- unsigned movqLenMax = 64;
- unsigned curBBweight = compiler->compCurBB->getBBWeight(compiler);
+ genCodeForTree(arg->gtObj.gtOp1, 0);
+ noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
+ regNumber reg = arg->gtObj.gtOp1->gtRegNum;
+ // Get the number of DWORDS to copy to the stack
+ opsz = roundUp(compiler->info.compCompHnd->getClassSize(arg->gtObj.gtClass), sizeof(void*));
+ unsigned slots = (unsigned)(opsz / sizeof(void*));
- if ((compiler->compCodeOpt() == Compiler::SMALL_CODE) || (curBBweight == BB_ZERO_WEIGHT))
- {
- // Don't bother with this optimization in
- // rarely run blocks or when optimizing for size
- movqLenMax = movqLenMin = 0;
- }
- else if (compiler->compCodeOpt() == Compiler::FAST_CODE)
- {
- // Be more aggressive when optimizing for speed
- movqLenMax *= 2;
- }
+ BYTE* gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
- /* Adjust for BB weight */
- if (curBBweight >= (BB_LOOP_WEIGHT*BB_UNITY_WEIGHT)/2)
- {
- // Be more aggressive when we are inside a loop
- movqLenMax *= 2;
- }
+ compiler->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
- if (compiler->opts.compCanUseSSE2 && bNoneGC &&
- (opsz >= movqLenMin) && (opsz <= movqLenMax))
- {
- JITLOG_THIS(compiler, (LL_INFO10000, "Using XMM instructions to pass %3d byte valuetype while compiling %s\n",
- opsz, compiler->info.compFullName));
+ BOOL bNoneGC = TRUE;
+ for (int i = slots - 1; i >= 0; --i)
+ {
+ if (gcLayout[i] != TYPE_GC_NONE)
+ {
+ bNoneGC = FALSE;
+ break;
+ }
+ }
- int stkDisp = (int)(unsigned)opsz;
- int curDisp = 0;
- regNumber xmmReg = REG_XMM0;
+ /* passing large structures using movq instead of pushes does not increase codesize very much */
+ unsigned movqLenMin = 8;
+ unsigned movqLenMax = 64;
+ unsigned curBBweight = compiler->compCurBB->getBBWeight(compiler);
- if (opsz & 0x4)
+ if ((compiler->compCodeOpt() == Compiler::SMALL_CODE) || (curBBweight == BB_ZERO_WEIGHT))
{
- stkDisp -= sizeof(void*);
- getEmitter()->emitIns_AR_R(INS_push, EA_4BYTE, REG_NA, reg, stkDisp);
- genSinglePush();
+ // Don't bother with this optimization in
+ // rarely run blocks or when optimizing for size
+ movqLenMax = movqLenMin = 0;
+ }
+ else if (compiler->compCodeOpt() == Compiler::FAST_CODE)
+ {
+ // Be more aggressive when optimizing for speed
+ movqLenMax *= 2;
}
- inst_RV_IV(INS_sub, REG_SPBASE, stkDisp, EA_PTRSIZE);
- genStackLevel += stkDisp;
-
- while (curDisp < stkDisp)
+ /* Adjust for BB weight */
+ if (curBBweight >= (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT) / 2)
{
- getEmitter()->emitIns_R_AR(INS_movq, EA_8BYTE, xmmReg, reg, curDisp);
- getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_SPBASE, curDisp);
- curDisp += 2 * sizeof(void*);
+ // Be more aggressive when we are inside a loop
+ movqLenMax *= 2;
}
- noway_assert(curDisp == stkDisp);
- }
- else
- {
- for (int i = slots-1; i >= 0; --i)
+
+ if (compiler->opts.compCanUseSSE2 && bNoneGC && (opsz >= movqLenMin) && (opsz <= movqLenMax))
{
- emitAttr fieldSize;
- if (gcLayout[i] == TYPE_GC_NONE)
- fieldSize = EA_4BYTE;
- else if (gcLayout[i] == TYPE_GC_REF)
- fieldSize = EA_GCREF;
- else
+ JITLOG_THIS(compiler, (LL_INFO10000,
+ "Using XMM instructions to pass %3d byte valuetype while compiling %s\n",
+ opsz, compiler->info.compFullName));
+
+ int stkDisp = (int)(unsigned)opsz;
+ int curDisp = 0;
+ regNumber xmmReg = REG_XMM0;
+
+ if (opsz & 0x4)
{
- noway_assert(gcLayout[i] == TYPE_GC_BYREF);
- fieldSize = EA_BYREF;
+ stkDisp -= sizeof(void*);
+ getEmitter()->emitIns_AR_R(INS_push, EA_4BYTE, REG_NA, reg, stkDisp);
+ genSinglePush();
+ }
+
+ inst_RV_IV(INS_sub, REG_SPBASE, stkDisp, EA_PTRSIZE);
+ genStackLevel += stkDisp;
+
+ while (curDisp < stkDisp)
+ {
+ getEmitter()->emitIns_R_AR(INS_movq, EA_8BYTE, xmmReg, reg, curDisp);
+ getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_SPBASE, curDisp);
+ curDisp += 2 * sizeof(void*);
+ }
+ noway_assert(curDisp == stkDisp);
+ }
+ else
+ {
+ for (int i = slots - 1; i >= 0; --i)
+ {
+ emitAttr fieldSize;
+ if (gcLayout[i] == TYPE_GC_NONE)
+ fieldSize = EA_4BYTE;
+ else if (gcLayout[i] == TYPE_GC_REF)
+ fieldSize = EA_GCREF;
+ else
+ {
+ noway_assert(gcLayout[i] == TYPE_GC_BYREF);
+ fieldSize = EA_BYREF;
+ }
+ getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i * sizeof(void*));
+ genSinglePush();
}
- getEmitter()->emitIns_AR_R(INS_push, fieldSize, REG_NA, reg, i*sizeof(void*));
- genSinglePush();
}
+ gcInfo.gcMarkRegSetNpt(genRegMask(reg)); // Kill the pointer in op1
}
- gcInfo.gcMarkRegSetNpt(genRegMask(reg)); // Kill the pointer in op1
+
+ addrReg = 0;
+ break;
}
-
- addrReg = 0;
- break;
- }
- default:
- noway_assert(!"unhandled/unexpected arg type");
- NO_WAY("unhandled/unexpected arg type");
+ default:
+ noway_assert(!"unhandled/unexpected arg type");
+ NO_WAY("unhandled/unexpected arg type");
}
/* Update the current set of live variables */
@@ -16901,7 +16603,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
/* Update the current set of register pointers */
- noway_assert(addrReg != DUMMY_INIT(RBM_CORRUPT));
+ noway_assert(addrReg != DUMMY_INIT(RBM_CORRUPT));
genDoneAddressable(curr, addrReg, RegSet::FREE_REG);
/* Remember how much stuff we've pushed on the stack */
@@ -16910,7 +16612,6 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
/* Update the current argument stack offset */
-
/* Continue with the next argument, if any more are present */
} // while args
@@ -16920,8 +16621,8 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
for (args = regArgs; args; args = args->Rest())
{
curr = args->Current();
-
- assert(!curr->IsArgPlaceHolderNode()); // No place holders nodes are in the late args
+
+ assert(!curr->IsArgPlaceHolderNode()); // No place holders nodes are in the late args
fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, curr);
assert(curArgTabEntry);
@@ -16932,21 +16633,17 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
/* Evaluate the argument to a register [pair] */
- if (genTypeSize(genActualType(curr->TypeGet())) == sizeof(int))
+ if (genTypeSize(genActualType(curr->TypeGet())) == sizeof(int))
{
/* Check if this is the guess area for the resolve interface call
* Pass a size of EA_OFFSET*/
- if (curr->gtOper == GT_CLS_VAR && compiler->eeGetJitDataOffs(curr->gtClsVar.gtClsVarHnd) >= 0)
+ if (curr->gtOper == GT_CLS_VAR && compiler->eeGetJitDataOffs(curr->gtClsVar.gtClsVarHnd) >= 0)
{
- getEmitter()->emitIns_R_C(ins_Load(TYP_INT),
- EA_OFFSET,
- regNum,
- curr->gtClsVar.gtClsVarHnd,
- 0);
+ getEmitter()->emitIns_R_C(ins_Load(TYP_INT), EA_OFFSET, regNum, curr->gtClsVar.gtClsVarHnd, 0);
regTracker.rsTrackRegTrash(regNum);
-
+
/* The value is now in the appropriate register */
-
+
genMarkTreeInReg(curr, regNum);
}
else
@@ -16959,15 +16656,15 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
/* If the register is already marked as used, it will become
multi-used. However, since it is a callee-trashed register,
we will have to spill it before the call anyway. So do it now */
-
+
if (regSet.rsMaskUsed & genRegMask(regNum))
{
noway_assert(genRegMask(regNum) & RBM_CALLEE_TRASH);
regSet.rsSpillReg(regNum);
}
-
+
/* Mark the register as 'used' */
-
+
regSet.rsMarkRegUsed(curr);
}
else
@@ -16980,12 +16677,12 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
for (args = regArgs; args; args = args->Rest())
{
- curr = args->Current();
+ curr = args->Current();
assert(curr);
if (curr->gtFlags & GTF_SPILLED)
{
- if (isRegPairType(curr->gtType))
+ if (isRegPairType(curr->gtType))
{
regSet.rsUnspillRegPair(curr, genRegPairMask(curr->gtRegPair), RegSet::KEEP_REG);
}
@@ -17004,25 +16701,25 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
#pragma warning(pop)
#endif
-#else // FEATURE_FIXED_OUT_ARGS
+#else // FEATURE_FIXED_OUT_ARGS
//
// ARM and AMD64 uses this method to pass the stack based args
//
// returns size pushed (always zero)
-size_t CodeGen::genPushArgList(GenTreePtr call)
+size_t CodeGen::genPushArgList(GenTreePtr call)
{
-
- GenTreeArgList* lateArgs = call->gtCall.gtCallLateArgs;
- GenTreePtr curr;
- var_types type;
- int argSize;
+
+ GenTreeArgList* lateArgs = call->gtCall.gtCallLateArgs;
+ GenTreePtr curr;
+ var_types type;
+ int argSize;
GenTreeArgList* args;
// Create a local, artificial GenTreeArgList that includes the gtCallObjp, if that exists, as first argument,
// so we can iterate over this argument list more uniformly.
// Need to provide a temporary non-null first argument here: if we use this, we'll replace it.
- GenTreeArgList objpArgList(/*temp dummy arg*/call, call->gtCall.gtCallArgs);
+ GenTreeArgList objpArgList(/*temp dummy arg*/ call, call->gtCall.gtCallArgs);
if (call->gtCall.gtCallObjp == NULL)
{
args = call->gtCall.gtCallArgs;
@@ -17030,7 +16727,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
else
{
objpArgList.Current() = call->gtCall.gtCallObjp;
- args = &objpArgList;
+ args = &objpArgList;
}
for (; args; args = args->Rest())
@@ -17050,7 +16747,7 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
// that go dead after this use of the variable in the argument list.
regMaskTP deadFieldVarRegs = RBM_NONE;
- argSize = TARGET_POINTER_SIZE; // The default size for an arg is one pointer-sized item
+ argSize = TARGET_POINTER_SIZE; // The default size for an arg is one pointer-sized item
if (curr->IsArgPlaceHolderNode())
{
@@ -17067,375 +16764,365 @@ size_t CodeGen::genPushArgList(GenTreePtr call)
switch (type)
{
- case TYP_DOUBLE:
- case TYP_LONG:
+ case TYP_DOUBLE:
+ case TYP_LONG:
#if defined(_TARGET_ARM_)
- argSize = (TARGET_POINTER_SIZE * 2);
+ argSize = (TARGET_POINTER_SIZE * 2);
- /* Is the value a constant? */
+ /* Is the value a constant? */
- if (curr->gtOper == GT_CNS_LNG)
- {
- assert((curr->gtFlags & GTF_LATE_ARG) == 0);
-
- int hiVal = (int) (curr->gtLngCon.gtLconVal >> 32);
- int loVal = (int) (curr->gtLngCon.gtLconVal & 0xffffffff);
+ if (curr->gtOper == GT_CNS_LNG)
+ {
+ assert((curr->gtFlags & GTF_LATE_ARG) == 0);
- instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, loVal,
- compiler->lvaOutgoingArgSpaceVar, argOffset);
+ int hiVal = (int)(curr->gtLngCon.gtLconVal >> 32);
+ int loVal = (int)(curr->gtLngCon.gtLconVal & 0xffffffff);
- instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, hiVal,
- compiler->lvaOutgoingArgSpaceVar, argOffset + 4);
+ instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, loVal, compiler->lvaOutgoingArgSpaceVar, argOffset);
- break;
- }
- else
- {
- genCodeForTree(curr, 0);
+ instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, hiVal, compiler->lvaOutgoingArgSpaceVar,
+ argOffset + 4);
- if (curr->gtFlags & GTF_LATE_ARG)
- {
- // The arg was assigned into a temp and
- // will be moved to the correct register or slot later
-
- argSize = 0; // nothing is passed on the stack
+ break;
}
else
{
- // The arg is passed in the outgoing argument area of the stack frame
- //
- assert(curr->gtOper != GT_ASG); // GTF_LATE_ARG should be set if this is the case
- assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
+ genCodeForTree(curr, 0);
- if (type == TYP_LONG)
+ if (curr->gtFlags & GTF_LATE_ARG)
{
- regNumber regLo = genRegPairLo(curr->gtRegPair);
- regNumber regHi = genRegPairHi(curr->gtRegPair);
+ // The arg was assigned into a temp and
+ // will be moved to the correct register or slot later
- assert(regLo != REG_STK);
- inst_SA_RV(ins_Store(TYP_INT), argOffset, regLo, TYP_INT);
- if (regHi == REG_STK)
- {
- regHi = regSet.rsPickFreeReg();
- inst_RV_TT(ins_Load(TYP_INT), regHi, curr, 4);
- regTracker.rsTrackRegTrash(regHi);
- }
- inst_SA_RV(ins_Store(TYP_INT), argOffset+4, regHi, TYP_INT);
+ argSize = 0; // nothing is passed on the stack
}
- else // (type == TYP_DOUBLE)
+ else
{
- inst_SA_RV(ins_Store(type), argOffset, curr->gtRegNum, type);
+ // The arg is passed in the outgoing argument area of the stack frame
+ //
+ assert(curr->gtOper != GT_ASG); // GTF_LATE_ARG should be set if this is the case
+ assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
+
+ if (type == TYP_LONG)
+ {
+ regNumber regLo = genRegPairLo(curr->gtRegPair);
+ regNumber regHi = genRegPairHi(curr->gtRegPair);
+
+ assert(regLo != REG_STK);
+ inst_SA_RV(ins_Store(TYP_INT), argOffset, regLo, TYP_INT);
+ if (regHi == REG_STK)
+ {
+ regHi = regSet.rsPickFreeReg();
+ inst_RV_TT(ins_Load(TYP_INT), regHi, curr, 4);
+ regTracker.rsTrackRegTrash(regHi);
+ }
+ inst_SA_RV(ins_Store(TYP_INT), argOffset + 4, regHi, TYP_INT);
+ }
+ else // (type == TYP_DOUBLE)
+ {
+ inst_SA_RV(ins_Store(type), argOffset, curr->gtRegNum, type);
+ }
}
}
- }
- break;
+ break;
#elif defined(_TARGET_64BIT_)
- __fallthrough;
+ __fallthrough;
#else
-#error "Unknown target for passing TYP_LONG argument using FIXED_ARGS"
+#error "Unknown target for passing TYP_LONG argument using FIXED_ARGS"
#endif
- case TYP_REF:
- case TYP_BYREF:
+ case TYP_REF:
+ case TYP_BYREF:
- case TYP_FLOAT:
- case TYP_INT:
- /* Is the value a constant? */
+ case TYP_FLOAT:
+ case TYP_INT:
+ /* Is the value a constant? */
- if (curr->gtOper == GT_CNS_INT)
- {
- assert(!(curr->gtFlags & GTF_LATE_ARG));
+ if (curr->gtOper == GT_CNS_INT)
+ {
+ assert(!(curr->gtFlags & GTF_LATE_ARG));
-#if REDUNDANT_LOAD
- regNumber reg = regTracker.rsIconIsInReg(curr->gtIntCon.gtIconVal);
+#if REDUNDANT_LOAD
+ regNumber reg = regTracker.rsIconIsInReg(curr->gtIntCon.gtIconVal);
- if (reg != REG_NA)
- {
- inst_SA_RV(ins_Store(type), argOffset, reg, type);
- }
- else
+ if (reg != REG_NA)
+ {
+ inst_SA_RV(ins_Store(type), argOffset, reg, type);
+ }
+ else
#endif
- {
- bool needReloc = compiler->opts.compReloc && curr->IsIconHandle();
- emitAttr attr = needReloc ? EA_HANDLE_CNS_RELOC : emitTypeSize(type);
- instGen_Store_Imm_Into_Lcl(type, attr, curr->gtIntCon.gtIconVal,
- compiler->lvaOutgoingArgSpaceVar, argOffset);
+ {
+ bool needReloc = compiler->opts.compReloc && curr->IsIconHandle();
+ emitAttr attr = needReloc ? EA_HANDLE_CNS_RELOC : emitTypeSize(type);
+ instGen_Store_Imm_Into_Lcl(type, attr, curr->gtIntCon.gtIconVal,
+ compiler->lvaOutgoingArgSpaceVar, argOffset);
+ }
+ break;
}
- break;
- }
- /* This is passed as a pointer-sized integer argument */
+ /* This is passed as a pointer-sized integer argument */
- genCodeForTree(curr, 0);
-
- // The arg has been evaluated now, but will be put in a register or pushed on the stack later.
- if (curr->gtFlags & GTF_LATE_ARG)
- {
+ genCodeForTree(curr, 0);
+
+ // The arg has been evaluated now, but will be put in a register or pushed on the stack later.
+ if (curr->gtFlags & GTF_LATE_ARG)
+ {
#ifdef _TARGET_ARM_
- argSize = 0; // nothing is passed on the stack
+ argSize = 0; // nothing is passed on the stack
#endif
- }
- else
- {
- // The arg is passed in the outgoing argument area of the stack frame
-
- assert(curr->gtOper != GT_ASG); // GTF_LATE_ARG should be set if this is the case
- assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
- inst_SA_RV(ins_Store(type), argOffset, curr->gtRegNum, type);
-
- if ((genRegMask(curr->gtRegNum) & regSet.rsMaskUsed) == 0)
- gcInfo.gcMarkRegSetNpt(genRegMask(curr->gtRegNum));
- }
- break;
+ }
+ else
+ {
+ // The arg is passed in the outgoing argument area of the stack frame
- case TYP_VOID:
- /* Is this a nothing node, deferred register argument? */
+ assert(curr->gtOper != GT_ASG); // GTF_LATE_ARG should be set if this is the case
+ assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
+ inst_SA_RV(ins_Store(type), argOffset, curr->gtRegNum, type);
- if (curr->gtFlags & GTF_LATE_ARG)
- {
- /* Handle side-effects */
-DEFERRED:
- if (curr->OperIsCopyBlkOp() || curr->OperGet() == GT_COMMA)
+ if ((genRegMask(curr->gtRegNum) & regSet.rsMaskUsed) == 0)
+ gcInfo.gcMarkRegSetNpt(genRegMask(curr->gtRegNum));
+ }
+ break;
+
+ case TYP_VOID:
+ /* Is this a nothing node, deferred register argument? */
+
+ if (curr->gtFlags & GTF_LATE_ARG)
{
-#ifdef _TARGET_ARM_
+ /* Handle side-effects */
+ DEFERRED:
+ if (curr->OperIsCopyBlkOp() || curr->OperGet() == GT_COMMA)
{
- GenTreePtr curArgNode = curArgTabEntry->node;
- var_types curRegArgType = curArgNode->gtType;
- assert(curRegArgType != TYP_UNDEF);
-
- if (curRegArgType == TYP_STRUCT)
+#ifdef _TARGET_ARM_
{
- // If the RHS of the COPYBLK is a promoted struct local, then the use of that
- // is an implicit use of all its field vars. If these are last uses, remember that,
- // so we can later update the GC compiler->info.
- if (curr->OperIsCopyBlkOp())
- deadFieldVarRegs |= genFindDeadFieldRegs(curr);
+ GenTreePtr curArgNode = curArgTabEntry->node;
+ var_types curRegArgType = curArgNode->gtType;
+ assert(curRegArgType != TYP_UNDEF);
+
+ if (curRegArgType == TYP_STRUCT)
+ {
+ // If the RHS of the COPYBLK is a promoted struct local, then the use of that
+ // is an implicit use of all its field vars. If these are last uses, remember that,
+ // so we can later update the GC compiler->info.
+ if (curr->OperIsCopyBlkOp())
+ deadFieldVarRegs |= genFindDeadFieldRegs(curr);
+ }
}
- }
#endif // _TARGET_ARM_
- genCodeForTree(curr, 0);
- }
- else
- {
- assert(curr->IsArgPlaceHolderNode() || curr->IsNothingNode());
- }
+ genCodeForTree(curr, 0);
+ }
+ else
+ {
+ assert(curr->IsArgPlaceHolderNode() || curr->IsNothingNode());
+ }
#if defined(_TARGET_ARM_)
- argSize = curArgTabEntry->numSlots * TARGET_POINTER_SIZE;
+ argSize = curArgTabEntry->numSlots * TARGET_POINTER_SIZE;
#endif
- }
- else
- {
- for (GenTree* arg = curr; arg->gtOper == GT_COMMA; arg = arg->gtOp.gtOp2)
+ }
+ else
{
- GenTreePtr op1 = arg->gtOp.gtOp1;
+ for (GenTree* arg = curr; arg->gtOper == GT_COMMA; arg = arg->gtOp.gtOp2)
+ {
+ GenTreePtr op1 = arg->gtOp.gtOp1;
- genEvalSideEffects(op1);
- genUpdateLife(op1);
+ genEvalSideEffects(op1);
+ genUpdateLife(op1);
+ }
}
- }
- break;
+ break;
#ifdef _TARGET_ARM_
- case TYP_STRUCT:
- {
- GenTree* arg = curr;
- while (arg->gtOper == GT_COMMA)
+ case TYP_STRUCT:
{
- GenTreePtr op1 = arg->gtOp.gtOp1;
- genEvalSideEffects(op1);
- genUpdateLife(op1);
- arg = arg->gtOp.gtOp2;
- }
- noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_MKREFANY));
-
- CORINFO_CLASS_HANDLE clsHnd;
- unsigned argAlign;
- unsigned slots;
- BYTE* gcLayout = NULL;
+ GenTree* arg = curr;
+ while (arg->gtOper == GT_COMMA)
+ {
+ GenTreePtr op1 = arg->gtOp.gtOp1;
+ genEvalSideEffects(op1);
+ genUpdateLife(op1);
+ arg = arg->gtOp.gtOp2;
+ }
+ noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_MKREFANY));
+
+ CORINFO_CLASS_HANDLE clsHnd;
+ unsigned argAlign;
+ unsigned slots;
+ BYTE* gcLayout = NULL;
+
+ // If the struct being passed is a OBJ of a local struct variable that is promoted (in the
+ // INDEPENDENT fashion, which doesn't require writes to be written through to the variable's
+ // home stack loc) "promotedStructLocalVarDesc" will be set to point to the local variable
+ // table entry for the promoted struct local. As we fill slots with the contents of a
+ // promoted struct, "bytesOfNextSlotOfCurPromotedStruct" will be the number of filled bytes
+ // that indicate another filled slot, and "nextPromotedStructFieldVar" will be the local
+ // variable number of the next field variable to be copied.
+ LclVarDsc* promotedStructLocalVarDesc = NULL;
+ GenTreePtr structLocalTree = NULL;
+ unsigned bytesOfNextSlotOfCurPromotedStruct = TARGET_POINTER_SIZE; // Size of slot.
+ unsigned nextPromotedStructFieldVar = BAD_VAR_NUM;
+ unsigned promotedStructOffsetOfFirstStackSlot = 0;
+ unsigned argOffsetOfFirstStackSlot = UINT32_MAX; // Indicates uninitialized.
+
+ if (arg->OperGet() == GT_OBJ)
+ {
+ clsHnd = arg->gtObj.gtClass;
+ unsigned originalSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ argAlign =
+ roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
+ argSize = (unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE));
- // If the struct being passed is a OBJ of a local struct variable that is promoted (in the
- // INDEPENDENT fashion, which doesn't require writes to be written through to the variable's
- // home stack loc) "promotedStructLocalVarDesc" will be set to point to the local variable
- // table entry for the promoted struct local. As we fill slots with the contents of a
- // promoted struct, "bytesOfNextSlotOfCurPromotedStruct" will be the number of filled bytes
- // that indicate another filled slot, and "nextPromotedStructFieldVar" will be the local
- // variable number of the next field variable to be copied.
- LclVarDsc* promotedStructLocalVarDesc = NULL;
- GenTreePtr structLocalTree = NULL;
- unsigned bytesOfNextSlotOfCurPromotedStruct = TARGET_POINTER_SIZE; // Size of slot.
- unsigned nextPromotedStructFieldVar = BAD_VAR_NUM;
- unsigned promotedStructOffsetOfFirstStackSlot = 0;
- unsigned argOffsetOfFirstStackSlot = UINT32_MAX; // Indicates uninitialized.
-
- if (arg->OperGet() == GT_OBJ)
- {
- clsHnd = arg->gtObj.gtClass;
- unsigned originalSize = compiler->info.compCompHnd->getClassSize(clsHnd);
- argAlign = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
- argSize = (unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE));
-
- slots = (unsigned)(argSize / TARGET_POINTER_SIZE);
-
- gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
-
- compiler->info.compCompHnd->getClassGClayout(clsHnd, gcLayout);
+ slots = (unsigned)(argSize / TARGET_POINTER_SIZE);
- // Are we loading a promoted struct local var?
- if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
- {
- structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
- unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
-
- // As much as we would like this to be a noway_assert, we can't because
- // there are some weird casts out there, and backwards compatiblity
- // dictates we do *NOT* start rejecting them now. lvaGetPromotion and
- // lvPromoted in general currently do not require the local to be
- // TYP_STRUCT, so this assert is really more about how we wish the world
- // was then some JIT invariant.
- assert((structLocalTree->TypeGet() == TYP_STRUCT) || compiler->compUnsafeCastUsed);
+ gcLayout = new (compiler, CMK_Codegen) BYTE[slots];
- Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
+ compiler->info.compCompHnd->getClassGClayout(clsHnd, gcLayout);
- if (varDsc->lvPromoted &&
- promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live
- // on stack.
+ // Are we loading a promoted struct local var?
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR && arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
- promotedStructLocalVarDesc = varDsc;
- nextPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart;
+ structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
+ unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &compiler->lvaTable[structLclNum];
+
+ // As much as we would like this to be a noway_assert, we can't because
+ // there are some weird casts out there, and backwards compatiblity
+ // dictates we do *NOT* start rejecting them now. lvaGetPromotion and
+ // lvPromoted in general currently do not require the local to be
+ // TYP_STRUCT, so this assert is really more about how we wish the world
+ // was then some JIT invariant.
+ assert((structLocalTree->TypeGet() == TYP_STRUCT) || compiler->compUnsafeCastUsed);
+
+ Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
+
+ if (varDsc->lvPromoted &&
+ promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live
+ // on stack.
+ {
+ assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
+ promotedStructLocalVarDesc = varDsc;
+ nextPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart;
+ }
}
}
- }
- else
- {
- noway_assert(arg->OperGet() == GT_MKREFANY);
+ else
+ {
+ noway_assert(arg->OperGet() == GT_MKREFANY);
- clsHnd = NULL;
- argAlign = TARGET_POINTER_SIZE;
- argSize = 2*TARGET_POINTER_SIZE;
- slots = 2;
- }
-
- // Any TYP_STRUCT argument that is passed in registers must be moved over to the LateArg list
- noway_assert(regNum == REG_STK);
+ clsHnd = NULL;
+ argAlign = TARGET_POINTER_SIZE;
+ argSize = 2 * TARGET_POINTER_SIZE;
+ slots = 2;
+ }
- // This code passes a TYP_STRUCT by value using the outgoing arg space var
- //
- if (arg->OperGet() == GT_OBJ)
- {
- regNumber regSrc = REG_STK;
- regNumber regTmp = REG_STK; // This will get set below if the obj is not of a promoted struct local.
- int cStackSlots = 0;
+ // Any TYP_STRUCT argument that is passed in registers must be moved over to the LateArg list
+ noway_assert(regNum == REG_STK);
- if (promotedStructLocalVarDesc == NULL)
+ // This code passes a TYP_STRUCT by value using the outgoing arg space var
+ //
+ if (arg->OperGet() == GT_OBJ)
{
- genComputeReg(arg->gtObj.gtOp1, 0, RegSet::ANY_REG, RegSet::KEEP_REG);
- noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
- regSrc = arg->gtObj.gtOp1->gtRegNum;
- }
-
- // The number of bytes to add "argOffset" to get the arg offset of the current slot.
- int extraArgOffset = 0;
+ regNumber regSrc = REG_STK;
+ regNumber regTmp = REG_STK; // This will get set below if the obj is not of a promoted struct local.
+ int cStackSlots = 0;
- for (unsigned i = 0; i < slots; i++)
- {
- emitAttr fieldSize;
- if (gcLayout[i] == TYPE_GC_NONE)
- fieldSize = EA_PTRSIZE;
- else if (gcLayout[i] == TYPE_GC_REF)
- fieldSize = EA_GCREF;
- else
+ if (promotedStructLocalVarDesc == NULL)
{
- noway_assert(gcLayout[i] == TYPE_GC_BYREF);
- fieldSize = EA_BYREF;
+ genComputeReg(arg->gtObj.gtOp1, 0, RegSet::ANY_REG, RegSet::KEEP_REG);
+ noway_assert(arg->gtObj.gtOp1->gtFlags & GTF_REG_VAL);
+ regSrc = arg->gtObj.gtOp1->gtRegNum;
}
-
- // Pass the argument using the lvaOutgoingArgSpaceVar
- if (promotedStructLocalVarDesc != NULL)
+ // The number of bytes to add "argOffset" to get the arg offset of the current slot.
+ int extraArgOffset = 0;
+
+ for (unsigned i = 0; i < slots; i++)
{
- if (argOffsetOfFirstStackSlot == UINT32_MAX) argOffsetOfFirstStackSlot = argOffset;
+ emitAttr fieldSize;
+ if (gcLayout[i] == TYPE_GC_NONE)
+ fieldSize = EA_PTRSIZE;
+ else if (gcLayout[i] == TYPE_GC_REF)
+ fieldSize = EA_GCREF;
+ else
+ {
+ noway_assert(gcLayout[i] == TYPE_GC_BYREF);
+ fieldSize = EA_BYREF;
+ }
- regNumber maxRegArg = regNumber(MAX_REG_ARG);
- bool filledExtraSlot =
- genFillSlotFromPromotedStruct(arg,
- curArgTabEntry,
- promotedStructLocalVarDesc,
- fieldSize,
- &nextPromotedStructFieldVar,
- &bytesOfNextSlotOfCurPromotedStruct,
- /*pCurRegNum*/ &maxRegArg,
- /*argOffset*/ argOffset + extraArgOffset,
- /*fieldOffsetOfFirstStackSlot*/ promotedStructOffsetOfFirstStackSlot,
- argOffsetOfFirstStackSlot,
- &deadFieldVarRegs,
- &regTmp);
- extraArgOffset += TARGET_POINTER_SIZE;
- // If we filled an extra slot with an 8-byte value, skip a slot.
- if (filledExtraSlot)
+ // Pass the argument using the lvaOutgoingArgSpaceVar
+
+ if (promotedStructLocalVarDesc != NULL)
{
- i++;
- cStackSlots++;
+ if (argOffsetOfFirstStackSlot == UINT32_MAX)
+ argOffsetOfFirstStackSlot = argOffset;
+
+ regNumber maxRegArg = regNumber(MAX_REG_ARG);
+ bool filledExtraSlot = genFillSlotFromPromotedStruct(
+ arg, curArgTabEntry, promotedStructLocalVarDesc, fieldSize, &nextPromotedStructFieldVar,
+ &bytesOfNextSlotOfCurPromotedStruct,
+ /*pCurRegNum*/ &maxRegArg,
+ /*argOffset*/ argOffset + extraArgOffset,
+ /*fieldOffsetOfFirstStackSlot*/ promotedStructOffsetOfFirstStackSlot,
+ argOffsetOfFirstStackSlot, &deadFieldVarRegs, &regTmp);
extraArgOffset += TARGET_POINTER_SIZE;
+ // If we filled an extra slot with an 8-byte value, skip a slot.
+ if (filledExtraSlot)
+ {
+ i++;
+ cStackSlots++;
+ extraArgOffset += TARGET_POINTER_SIZE;
+ }
}
- }
- else
- {
- if (regTmp == REG_STK)
+ else
{
- regTmp = regSet.rsPickFreeReg();
+ if (regTmp == REG_STK)
+ {
+ regTmp = regSet.rsPickFreeReg();
+ }
+
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), fieldSize, regTmp, regSrc,
+ i * TARGET_POINTER_SIZE);
+
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar,
+ argOffset + cStackSlots * TARGET_POINTER_SIZE);
+ regTracker.rsTrackRegTrash(regTmp);
}
+ cStackSlots++;
+ }
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL),
- fieldSize,
- regTmp,
- regSrc,
- i*TARGET_POINTER_SIZE);
-
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- argOffset+cStackSlots*TARGET_POINTER_SIZE);
- regTracker.rsTrackRegTrash(regTmp);
- }
- cStackSlots++;
+ if (promotedStructLocalVarDesc == NULL)
+ {
+ regSet.rsMarkRegFree(genRegMask(regSrc));
+ }
+ if (structLocalTree != NULL)
+ genUpdateLife(structLocalTree);
}
-
- if (promotedStructLocalVarDesc == NULL)
+ else
{
- regSet.rsMarkRegFree(genRegMask(regSrc));
+ assert(arg->OperGet() == GT_MKREFANY);
+ PushMkRefAnyArg(arg, curArgTabEntry, RBM_ALLINT);
+ argSize = (curArgTabEntry->numSlots * TARGET_POINTER_SIZE);
}
- if (structLocalTree != NULL) genUpdateLife(structLocalTree);
}
- else
- {
- assert(arg->OperGet() == GT_MKREFANY);
- PushMkRefAnyArg(arg, curArgTabEntry, RBM_ALLINT);
- argSize = (curArgTabEntry->numSlots * TARGET_POINTER_SIZE);
- }
- }
- break;
+ break;
#endif // _TARGET_ARM_
- default:
- assert(!"unhandled/unexpected arg type");
- NO_WAY("unhandled/unexpected arg type");
+ default:
+ assert(!"unhandled/unexpected arg type");
+ NO_WAY("unhandled/unexpected arg type");
}
/* Update the current set of live variables */
genUpdateLife(curr);
- // Now, if some copied field locals were enregistered, and they're now dead, update the set of
+ // Now, if some copied field locals were enregistered, and they're now dead, update the set of
// register holding gc pointers.
if (deadFieldVarRegs != 0)
gcInfo.gcMarkRegSetNpt(deadFieldVarRegs);
@@ -17447,38 +17134,38 @@ DEFERRED:
/* Continue with the next argument, if any more are present */
} // while (args)
-
if (lateArgs)
{
SetupLateArgs(call);
}
-
+
/* Return the total size pushed */
-
+
return 0;
}
#ifdef _TARGET_ARM_
-bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
- fgArgTabEntryPtr curArgTabEntry,
- LclVarDsc* promotedStructLocalVarDesc,
- emitAttr fieldSize,
- unsigned* pNextPromotedStructFieldVar,
- unsigned* pBytesOfNextSlotOfCurPromotedStruct,
- regNumber* pCurRegNum,
- int argOffset,
- int fieldOffsetOfFirstStackSlot,
- int argOffsetOfFirstStackSlot,
- regMaskTP* deadFieldVarRegs,
- regNumber* pRegTmp)
+bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
+ fgArgTabEntryPtr curArgTabEntry,
+ LclVarDsc* promotedStructLocalVarDesc,
+ emitAttr fieldSize,
+ unsigned* pNextPromotedStructFieldVar,
+ unsigned* pBytesOfNextSlotOfCurPromotedStruct,
+ regNumber* pCurRegNum,
+ int argOffset,
+ int fieldOffsetOfFirstStackSlot,
+ int argOffsetOfFirstStackSlot,
+ regMaskTP* deadFieldVarRegs,
+ regNumber* pRegTmp)
{
unsigned nextPromotedStructFieldVar = *pNextPromotedStructFieldVar;
- unsigned limitPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart + promotedStructLocalVarDesc->lvFieldCnt;
+ unsigned limitPromotedStructFieldVar =
+ promotedStructLocalVarDesc->lvFieldLclStart + promotedStructLocalVarDesc->lvFieldCnt;
unsigned bytesOfNextSlotOfCurPromotedStruct = *pBytesOfNextSlotOfCurPromotedStruct;
- regNumber curRegNum = *pCurRegNum;
- regNumber regTmp = *pRegTmp;
- bool filledExtraSlot = false;
+ regNumber curRegNum = *pCurRegNum;
+ regNumber regTmp = *pRegTmp;
+ bool filledExtraSlot = false;
if (nextPromotedStructFieldVar == limitPromotedStructFieldVar)
{
@@ -17494,38 +17181,45 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// Does this field fill an entire slot, and does it go at the start of the slot?
// If so, things are easier...
- bool oneFieldFillsSlotFromStart =
- (fieldVarDsc->lvFldOffset < bytesOfNextSlotOfCurPromotedStruct) // The field should start in the current slot...
- && ((fieldVarDsc->lvFldOffset % 4) == 0) // at the start of the slot, and...
- && (nextPromotedStructFieldVar+1 == limitPromotedStructFieldVar // next field, if there is one, goes in the next slot.
- || compiler->lvaTable[nextPromotedStructFieldVar+1].lvFldOffset >= bytesOfNextSlotOfCurPromotedStruct);
+ bool oneFieldFillsSlotFromStart =
+ (fieldVarDsc->lvFldOffset < bytesOfNextSlotOfCurPromotedStruct) // The field should start in the current slot...
+ && ((fieldVarDsc->lvFldOffset % 4) == 0) // at the start of the slot, and...
+ && (nextPromotedStructFieldVar + 1 ==
+ limitPromotedStructFieldVar // next field, if there is one, goes in the next slot.
+ || compiler->lvaTable[nextPromotedStructFieldVar + 1].lvFldOffset >= bytesOfNextSlotOfCurPromotedStruct);
// Compute the proper size.
- if (fieldSize == EA_4BYTE) // Not a GC ref or byref.
+ if (fieldSize == EA_4BYTE) // Not a GC ref or byref.
{
switch (fieldVarDsc->lvExactSize)
{
- case 1: fieldSize = EA_1BYTE; break;
- case 2: fieldSize = EA_2BYTE; break;
- case 8:
- // An 8-byte field will be at an 8-byte-aligned offset unless explicit layout has been used,
- // in which case we should not have promoted the struct variable.
- noway_assert((fieldVarDsc->lvFldOffset % 8) == 0);
-
- // If the current reg number is not aligned, align it, and return to the calling loop, which will
- // consider that a filled slot and move on to the next argument register.
- if (curRegNum != MAX_REG_ARG && ((curRegNum % 2) != 0))
- {
- // We must update the slot target, however!
- bytesOfNextSlotOfCurPromotedStruct += 4;
- *pBytesOfNextSlotOfCurPromotedStruct = bytesOfNextSlotOfCurPromotedStruct;
- return false;
- }
- // Dest is an aligned pair of arg regs, if the struct type demands it.
- noway_assert((curRegNum % 2) == 0);
- // We leave the fieldSize as EA_4BYTE; but we must do 2 reg moves.
- break;
- default: assert(fieldVarDsc->lvExactSize == 4); break;
+ case 1:
+ fieldSize = EA_1BYTE;
+ break;
+ case 2:
+ fieldSize = EA_2BYTE;
+ break;
+ case 8:
+ // An 8-byte field will be at an 8-byte-aligned offset unless explicit layout has been used,
+ // in which case we should not have promoted the struct variable.
+ noway_assert((fieldVarDsc->lvFldOffset % 8) == 0);
+
+ // If the current reg number is not aligned, align it, and return to the calling loop, which will
+ // consider that a filled slot and move on to the next argument register.
+ if (curRegNum != MAX_REG_ARG && ((curRegNum % 2) != 0))
+ {
+ // We must update the slot target, however!
+ bytesOfNextSlotOfCurPromotedStruct += 4;
+ *pBytesOfNextSlotOfCurPromotedStruct = bytesOfNextSlotOfCurPromotedStruct;
+ return false;
+ }
+ // Dest is an aligned pair of arg regs, if the struct type demands it.
+ noway_assert((curRegNum % 2) == 0);
+ // We leave the fieldSize as EA_4BYTE; but we must do 2 reg moves.
+ break;
+ default:
+ assert(fieldVarDsc->lvExactSize == 4);
+ break;
}
}
else
@@ -17540,8 +17234,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// however if it is in memory we can use an integer type TYP_I_IMPL
//
var_types fieldTypeForInstr = var_types(fieldVarDsc->lvType);
- if ((fieldVarDsc->lvType == TYP_LONG) ||
- (!fieldVarDsc->lvRegister && varTypeIsFloating(fieldTypeForInstr)))
+ if ((fieldVarDsc->lvType == TYP_LONG) || (!fieldVarDsc->lvRegister && varTypeIsFloating(fieldTypeForInstr)))
{
fieldTypeForInstr = TYP_I_IMPL;
}
@@ -17567,10 +17260,8 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
{
// Move the field var living in stack to dst.
getEmitter()->emitIns_R_S(ins_Load(fieldVarDsc->TypeGet()),
- fieldVarDsc->TypeGet() == TYP_DOUBLE ? EA_8BYTE : EA_4BYTE,
- curRegNum,
- nextPromotedStructFieldVar,
- 0);
+ fieldVarDsc->TypeGet() == TYP_DOUBLE ? EA_8BYTE : EA_4BYTE, curRegNum,
+ nextPromotedStructFieldVar, 0);
assert(genIsValidFloatReg(curRegNum)); // we don't use register tracking for FP
}
@@ -17582,7 +17273,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
if (fieldVarDsc->TypeGet() == TYP_DOUBLE)
{
bytesOfNextSlotOfCurPromotedStruct += 4;
- curRegNum = REG_NEXT(curRegNum);
+ curRegNum = REG_NEXT(curRegNum);
arg->gtRegNum = curRegNum;
regSet.SetUsedRegFloat(arg, true);
filledExtraSlot = true;
@@ -17614,18 +17305,18 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
{
// Source is register and Dest is register.
- instruction insCopy = INS_mov;
+ instruction insCopy = INS_mov;
if (varTypeIsFloating(fieldTypeForInstr))
{
if (fieldTypeForInstr == TYP_FLOAT)
{
- insCopy = INS_vmov_f2i;
+ insCopy = INS_vmov_f2i;
}
else
{
assert(fieldTypeForInstr == TYP_DOUBLE);
- insCopy = INS_vmov_d2i;
+ insCopy = INS_vmov_d2i;
}
}
@@ -17644,10 +17335,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
regTmp = regSet.rsPickFreeReg();
}
// Copy the second register to the temp reg.
- getEmitter()->emitIns_R_R(INS_mov,
- fieldSize,
- regTmp,
- otherRegNum);
+ getEmitter()->emitIns_R_R(INS_mov, fieldSize, regTmp, otherRegNum);
regTracker.rsTrackRegCopy(regTmp, otherRegNum);
otherRegNum = regTmp;
}
@@ -17656,11 +17344,8 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
if (fieldVarDsc->lvType == TYP_DOUBLE)
{
assert(curRegNum <= REG_R2);
- getEmitter()->emitIns_R_R_R(insCopy,
- fieldSize,
- curRegNum,
- genRegArgNext(curRegNum),
- fieldVarDsc->lvRegNum);
+ getEmitter()->emitIns_R_R_R(insCopy, fieldSize, curRegNum, genRegArgNext(curRegNum),
+ fieldVarDsc->lvRegNum);
regTracker.rsTrackRegTrash(curRegNum);
regTracker.rsTrackRegTrash(genRegArgNext(curRegNum));
}
@@ -17670,10 +17355,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// It might be the case that it's already in the desired register; if so do nothing.
if (curRegNum != fieldVarDsc->lvRegNum)
{
- getEmitter()->emitIns_R_R(insCopy,
- fieldSize,
- curRegNum,
- fieldVarDsc->lvRegNum);
+ getEmitter()->emitIns_R_R(insCopy, fieldSize, curRegNum, fieldVarDsc->lvRegNum);
regTracker.rsTrackRegCopy(curRegNum, fieldVarDsc->lvRegNum);
}
}
@@ -17682,7 +17364,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
regSet.rsMarkArgRegUsedByPromotedFieldArg(arg, curRegNum, EA_IS_GCREF(fieldSize));
// Is there a second half of the value?
- if (fieldVarDsc->lvExactSize == 8)
+ if (fieldVarDsc->lvExactSize == 8)
{
curRegNum = genRegArgNext(curRegNum);
// The second dest reg must also be an argument register.
@@ -17700,22 +17382,18 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// Apparently when we partially enregister, we allocate stack space for the full
// 8 bytes, and enregister the low half. Thus the final TARGET_POINTER_SIZE offset
// parameter, to get the high half.
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- curRegNum,
- nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, curRegNum,
+ nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
regTracker.rsTrackRegTrash(curRegNum);
}
else
{
// The other half is in a register.
- // Again, it might be the case that it's already in the desired register; if so do nothing.
+ // Again, it might be the case that it's already in the desired register; if so do
+ // nothing.
if (curRegNum != otherRegNum)
{
- getEmitter()->emitIns_R_R(INS_mov,
- fieldSize,
- curRegNum,
- otherRegNum);
+ getEmitter()->emitIns_R_R(INS_mov, fieldSize, curRegNum, otherRegNum);
regTracker.rsTrackRegCopy(curRegNum, otherRegNum);
}
}
@@ -17732,13 +17410,10 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// Source is register and Dest is memory (OutgoingArgSpace).
// Now write the srcReg into the right location in the outgoing argument list.
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- fieldVarDsc->lvRegNum,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, fieldVarDsc->lvRegNum,
+ compiler->lvaOutgoingArgSpaceVar, fieldArgOffset);
- if (fieldVarDsc->lvExactSize == 8)
+ if (fieldVarDsc->lvExactSize == 8)
{
// Now, if it's an 8-byte TYP_LONG, we have to do the second 4 bytes.
if (fieldVarDsc->lvType == TYP_LONG)
@@ -17753,31 +17428,25 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// Apparently if we partially enregister, we allocate stack space for the full
// 8 bytes, and enregister the low half. Thus the final TARGET_POINTER_SIZE offset
// parameter, to get the high half.
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- regTmp,
- nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, regTmp,
+ nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
regTracker.rsTrackRegTrash(regTmp);
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset + TARGET_POINTER_SIZE);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar,
+ fieldArgOffset + TARGET_POINTER_SIZE);
}
else
{
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL),
- fieldSize,
- fieldVarDsc->lvOtherReg,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset + TARGET_POINTER_SIZE);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), fieldSize, fieldVarDsc->lvOtherReg,
+ compiler->lvaOutgoingArgSpaceVar,
+ fieldArgOffset + TARGET_POINTER_SIZE);
}
}
// Record the fact that we filled in an extra register slot
filledExtraSlot = true;
}
}
- assert(fieldVarDsc->lvTracked); // Must be tracked, since it's enregistered...
+ assert(fieldVarDsc->lvTracked); // Must be tracked, since it's enregistered...
// If the fieldVar becomes dead, then declare the register not to contain a pointer value.
if (arg->gtFlags & GTF_VAR_DEATH)
{
@@ -17793,24 +17462,19 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
if (curRegNum != MAX_REG_ARG)
{
// Dest is reg.
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- curRegNum,
- nextPromotedStructFieldVar, 0);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, curRegNum,
+ nextPromotedStructFieldVar, 0);
regTracker.rsTrackRegTrash(curRegNum);
regSet.rsMarkArgRegUsedByPromotedFieldArg(arg, curRegNum, EA_IS_GCREF(fieldSize));
- if (fieldVarDsc->lvExactSize == 8)
+ if (fieldVarDsc->lvExactSize == 8)
{
noway_assert(fieldSize == EA_4BYTE);
curRegNum = genRegArgNext(curRegNum);
- noway_assert(curRegNum < MAX_REG_ARG); // Because of 8-byte alignment.
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL),
- fieldSize,
- curRegNum,
- nextPromotedStructFieldVar,
- TARGET_POINTER_SIZE);
+ noway_assert(curRegNum < MAX_REG_ARG); // Because of 8-byte alignment.
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), fieldSize, curRegNum,
+ nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
regTracker.rsTrackRegTrash(curRegNum);
regSet.rsMarkArgRegUsedByPromotedFieldArg(arg, curRegNum, EA_IS_GCREF(fieldSize));
// Record the fact that we filled in an extra stack slot
@@ -17824,32 +17488,23 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
{
regTmp = regSet.rsPickFreeReg();
}
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- regTmp,
- nextPromotedStructFieldVar, 0);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, regTmp,
+ nextPromotedStructFieldVar, 0);
// Now write regTmp into the right location in the outgoing argument list.
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar, fieldArgOffset);
// We overwrote "regTmp", so erase any previous value we recorded that it contained.
regTracker.rsTrackRegTrash(regTmp);
- if (fieldVarDsc->lvExactSize == 8)
+ if (fieldVarDsc->lvExactSize == 8)
{
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- regTmp,
- nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
-
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset + TARGET_POINTER_SIZE);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, regTmp,
+ nextPromotedStructFieldVar, TARGET_POINTER_SIZE);
+
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar,
+ fieldArgOffset + TARGET_POINTER_SIZE);
// Record the fact that we filled in an extra stack slot
filledExtraSlot = true;
}
@@ -17857,7 +17512,7 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
}
// Bump up the following if we filled in an extra slot
- if (filledExtraSlot)
+ if (filledExtraSlot)
bytesOfNextSlotOfCurPromotedStruct += 4;
// Go to the next field.
@@ -17868,15 +17523,16 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
}
else
{
- // The next field should have the same parent variable, and we should have put the field vars in order sorted by offset.
- assert(fieldVarDsc->lvIsStructField && compiler->lvaTable[nextPromotedStructFieldVar].lvIsStructField
- && fieldVarDsc->lvParentLcl == compiler->lvaTable[nextPromotedStructFieldVar].lvParentLcl
- && fieldVarDsc->lvFldOffset < compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset);
+ // The next field should have the same parent variable, and we should have put the field vars in order
+ // sorted by offset.
+ assert(fieldVarDsc->lvIsStructField && compiler->lvaTable[nextPromotedStructFieldVar].lvIsStructField &&
+ fieldVarDsc->lvParentLcl == compiler->lvaTable[nextPromotedStructFieldVar].lvParentLcl &&
+ fieldVarDsc->lvFldOffset < compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset);
fieldVarDsc = &compiler->lvaTable[nextPromotedStructFieldVar];
}
bytesOfNextSlotOfCurPromotedStruct += 4;
}
- else // oneFieldFillsSlotFromStart == false
+ else // oneFieldFillsSlotFromStart == false
{
// The current slot should contain more than one field.
// We'll construct a word in memory for the slot, then load it into a register.
@@ -17890,28 +17546,24 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
// If the argument goes to the stack, the offset in the outgoing arg area for the argument.
int fieldArgOffset = argOffsetOfFirstStackSlot + fieldVarDsc->lvFldOffset - fieldOffsetOfFirstStackSlot;
- noway_assert(argOffset == INT32_MAX || (argOffset <= fieldArgOffset && fieldArgOffset < argOffset + TARGET_POINTER_SIZE));
-
+ noway_assert(argOffset == INT32_MAX ||
+ (argOffset <= fieldArgOffset && fieldArgOffset < argOffset + TARGET_POINTER_SIZE));
+
if (fieldVarDsc->lvRegister)
{
if (curRegNum != MAX_REG_ARG)
{
noway_assert(compiler->lvaPromotedStructAssemblyScratchVar != BAD_VAR_NUM);
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- fieldVarDsc->lvRegNum,
- compiler->lvaPromotedStructAssemblyScratchVar,
- fieldVarDsc->lvFldOffset % 4);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, fieldVarDsc->lvRegNum,
+ compiler->lvaPromotedStructAssemblyScratchVar,
+ fieldVarDsc->lvFldOffset % 4);
}
else
{
// Dest is stack; write directly.
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- fieldVarDsc->lvRegNum,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, fieldVarDsc->lvRegNum,
+ compiler->lvaOutgoingArgSpaceVar, fieldArgOffset);
}
}
else
@@ -17923,29 +17575,22 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
{
regTmp = regSet.rsPickFreeReg();
}
- getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr),
- fieldSize,
- regTmp,
- nextPromotedStructFieldVar, 0);
+ getEmitter()->emitIns_R_S(ins_Load(fieldTypeForInstr), fieldSize, regTmp,
+ nextPromotedStructFieldVar, 0);
regTracker.rsTrackRegTrash(regTmp);
if (curRegNum != MAX_REG_ARG)
- {
+ {
noway_assert(compiler->lvaPromotedStructAssemblyScratchVar != BAD_VAR_NUM);
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- regTmp,
- compiler->lvaPromotedStructAssemblyScratchVar,
- fieldVarDsc->lvFldOffset % 4);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, regTmp,
+ compiler->lvaPromotedStructAssemblyScratchVar,
+ fieldVarDsc->lvFldOffset % 4);
}
else
{
- getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- fieldArgOffset);
+ getEmitter()->emitIns_S_R(ins_Store(fieldTypeForInstr), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar, fieldArgOffset);
}
}
// Go to the next field.
@@ -17956,10 +17601,13 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
}
else
{
- // The next field should have the same parent variable, and we should have put the field vars in order sorted by offset.
- noway_assert(fieldVarDsc->lvIsStructField && compiler->lvaTable[nextPromotedStructFieldVar].lvIsStructField
- && fieldVarDsc->lvParentLcl == compiler->lvaTable[nextPromotedStructFieldVar].lvParentLcl
- && fieldVarDsc->lvFldOffset < compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset);
+ // The next field should have the same parent variable, and we should have put the field vars in
+ // order sorted by offset.
+ noway_assert(fieldVarDsc->lvIsStructField &&
+ compiler->lvaTable[nextPromotedStructFieldVar].lvIsStructField &&
+ fieldVarDsc->lvParentLcl ==
+ compiler->lvaTable[nextPromotedStructFieldVar].lvParentLcl &&
+ fieldVarDsc->lvFldOffset < compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset);
fieldVarDsc = &compiler->lvaTable[nextPromotedStructFieldVar];
}
}
@@ -17969,10 +17617,8 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
{
noway_assert(compiler->lvaPromotedStructAssemblyScratchVar != BAD_VAR_NUM);
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL),
- EA_4BYTE,
- curRegNum,
- compiler->lvaPromotedStructAssemblyScratchVar, 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_4BYTE, curRegNum,
+ compiler->lvaPromotedStructAssemblyScratchVar, 0);
regTracker.rsTrackRegTrash(curRegNum);
regSet.rsMarkArgRegUsedByPromotedFieldArg(arg, curRegNum, EA_IS_GCREF(fieldSize));
}
@@ -17982,10 +17628,10 @@ bool CodeGen::genFillSlotFromPromotedStruct(GenTreePtr arg,
}
// Write back the updates.
- *pNextPromotedStructFieldVar = nextPromotedStructFieldVar;
+ *pNextPromotedStructFieldVar = nextPromotedStructFieldVar;
*pBytesOfNextSlotOfCurPromotedStruct = bytesOfNextSlotOfCurPromotedStruct;
- *pCurRegNum = curRegNum;
- *pRegTmp = regTmp;
+ *pCurRegNum = curRegNum;
+ *pRegTmp = regTmp;
return filledExtraSlot;
}
@@ -17995,9 +17641,9 @@ regMaskTP CodeGen::genFindDeadFieldRegs(GenTreePtr cpBlk)
{
noway_assert(cpBlk->OperIsCopyBlkOp()); // Precondition.
GenTreePtr lst = cpBlk->gtOp.gtOp1;
- noway_assert(lst->OperGet() == GT_LIST); // Well-formedness.
+ noway_assert(lst->OperGet() == GT_LIST); // Well-formedness.
GenTreePtr rhs = lst->gtOp.gtOp2;
- regMaskTP res = 0;
+ regMaskTP res = 0;
if (rhs->OperGet() == GT_ADDR)
{
rhs = rhs->gtOp.gtOp1;
@@ -18024,23 +17670,22 @@ regMaskTP CodeGen::genFindDeadFieldRegs(GenTreePtr cpBlk)
return res;
}
-
void CodeGen::SetupLateArgs(GenTreePtr call)
{
GenTreeArgList* lateArgs;
- GenTreePtr curr;
+ GenTreePtr curr;
/* Generate the code to move the late arguments into registers */
for (lateArgs = call->gtCall.gtCallLateArgs; lateArgs; lateArgs = lateArgs->Rest())
{
- curr = lateArgs->Current();
+ curr = lateArgs->Current();
assert(curr);
fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, curr);
assert(curArgTabEntry);
- regNumber regNum = curArgTabEntry->regNum;
- unsigned argOffset = curArgTabEntry->slotNum * TARGET_POINTER_SIZE;
+ regNumber regNum = curArgTabEntry->regNum;
+ unsigned argOffset = curArgTabEntry->slotNum * TARGET_POINTER_SIZE;
assert(isRegParamType(curr->TypeGet()));
assert(curr->gtType != TYP_VOID);
@@ -18065,7 +17710,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// variables register (like a GT_REG_VAR). This probably
// is caused by RegAlloc assuming the first operand would
// evaluate into another register.
- regMaskTP rsTemp = regSet.rsMaskVars & regSet.rsMaskUsed & RBM_CALLEE_TRASH;
+ regMaskTP rsTemp = regSet.rsMaskVars & regSet.rsMaskUsed & RBM_CALLEE_TRASH;
regMaskTP gcRegSavedByref = gcInfo.gcRegByrefSetCur & rsTemp;
regMaskTP gcRegSavedGCRef = gcInfo.gcRegGCrefSetCur & rsTemp;
regSet.RemoveMaskVars(rsTemp);
@@ -18094,13 +17739,9 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
/* Check if this is the guess area for the resolve interface call
* Pass a size of EA_OFFSET*/
- if (curr->gtOper == GT_CLS_VAR && compiler->eeGetJitDataOffs(curr->gtClsVar.gtClsVarHnd) >= 0)
+ if (curr->gtOper == GT_CLS_VAR && compiler->eeGetJitDataOffs(curr->gtClsVar.gtClsVarHnd) >= 0)
{
- getEmitter()->emitIns_R_C(ins_Load(TYP_INT),
- EA_OFFSET,
- regNum,
- curr->gtClsVar.gtClsVarHnd,
- 0);
+ getEmitter()->emitIns_R_C(ins_Load(TYP_INT), EA_OFFSET, regNum, curr->gtClsVar.gtClsVarHnd, 0);
regTracker.rsTrackRegTrash(regNum);
/* The value is now in the appropriate register */
@@ -18120,10 +17761,11 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
genUpdateLife(op1);
arg = arg->gtOp.gtOp2;
}
- noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_MKREFANY));
+ noway_assert((arg->OperGet() == GT_OBJ) || (arg->OperGet() == GT_LCL_VAR) ||
+ (arg->OperGet() == GT_MKREFANY));
// This code passes a TYP_STRUCT by value using
- // the argument registers first and
+ // the argument registers first and
// then the lvaOutgoingArgSpaceVar area.
//
@@ -18132,7 +17774,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
unsigned firstStackSlot = 0;
unsigned argAlign = TARGET_POINTER_SIZE;
size_t originalSize = InferStructOpSizeAlign(arg, &argAlign);
-
+
unsigned slots = (unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE);
assert(slots > 0);
@@ -18155,8 +17797,9 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
if (curArgTabEntry->isHfaRegArg)
{
// HFA arguments that have been decided to go into registers fit the reg space.
- assert(regNum >= FIRST_FP_ARGREG && "HFA must go in FP register");
- assert(regNum + slots - 1 <= LAST_FP_ARGREG && "HFA argument doesn't fit entirely in FP argument registers");
+ assert(regNum >= FIRST_FP_ARGREG && "HFA must go in FP register");
+ assert(regNum + slots - 1 <= LAST_FP_ARGREG &&
+ "HFA argument doesn't fit entirely in FP argument registers");
firstStackSlot = slots;
}
else if (regNum + slots > MAX_REG_ARG)
@@ -18172,7 +17815,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
if (curArgTabEntry->isHfaRegArg)
{
// Mask out the registers used by an HFA arg from the ones used to compute tree into.
- for (unsigned i = regNum; i < regNum + slots; i ++)
+ for (unsigned i = regNum; i < regNum + slots; i++)
{
regNeedMask &= ~genRegMask(regNumber(i));
}
@@ -18192,32 +17835,32 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// working on the second slot, "bytesOfNextSlotOfCurPromotedStruct" will be 8, the point at which we're
// done), and "nextPromotedStructFieldVar" will be the local variable number of the next field variable
// to be copied.
- LclVarDsc* promotedStructLocalVarDesc = NULL;
- unsigned bytesOfNextSlotOfCurPromotedStruct = 0; // Size of slot.
- unsigned nextPromotedStructFieldVar = BAD_VAR_NUM;
- GenTreePtr structLocalTree = NULL;
-
- BYTE * gcLayout = NULL;
- regNumber regSrc = REG_NA;
+ LclVarDsc* promotedStructLocalVarDesc = NULL;
+ unsigned bytesOfNextSlotOfCurPromotedStruct = 0; // Size of slot.
+ unsigned nextPromotedStructFieldVar = BAD_VAR_NUM;
+ GenTreePtr structLocalTree = NULL;
+
+ BYTE* gcLayout = NULL;
+ regNumber regSrc = REG_NA;
if (arg->gtOper == GT_OBJ)
{
// Are we loading a promoted struct local var?
- if (arg->gtObj.gtOp1->gtOper == GT_ADDR &&
- arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ if (arg->gtObj.gtOp1->gtOper == GT_ADDR && arg->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
- structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
- unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
+ structLocalTree = arg->gtObj.gtOp1->gtOp.gtOp1;
+ unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &compiler->lvaTable[structLclNum];
Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
- if (varDsc->lvPromoted &&
- promotionType==Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live on stack.
+ if (varDsc->lvPromoted && promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is
+ // guaranteed to
+ // live on stack.
{
// Fix 388395 ARM JitStress WP7
noway_assert(structLocalTree->TypeGet() == TYP_STRUCT);
- assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
+ assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
promotedStructLocalVarDesc = varDsc;
nextPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart;
}
@@ -18245,18 +17888,19 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
unsigned varNum = arg->gtLclVarCommon.gtLclNum;
// Are we loading a promoted struct local var?
- structLocalTree = arg;
- unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &compiler->lvaTable[structLclNum];
+ structLocalTree = arg;
+ unsigned structLclNum = structLocalTree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &compiler->lvaTable[structLclNum];
noway_assert(structLocalTree->TypeGet() == TYP_STRUCT);
Compiler::lvaPromotionType promotionType = compiler->lvaGetPromotionType(varDsc);
- if (varDsc->lvPromoted &&
- promotionType==Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is guaranteed to live on stack.
+ if (varDsc->lvPromoted && promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT) // Otherwise it is
+ // guaranteed to live
+ // on stack.
{
- assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
+ assert(!varDsc->lvAddrExposed); // Compiler::PROMOTION_TYPE_INDEPENDENT ==> not exposed.
promotedStructLocalVarDesc = varDsc;
nextPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart;
}
@@ -18268,10 +17912,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
if (slots > 1)
regNeedMask &= ~genRegMask(regSrc);
- getEmitter()->emitIns_R_S(INS_lea,
- EA_PTRSIZE,
- regSrc,
- varNum, 0);
+ getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, regSrc, varNum, 0);
regTracker.rsTrackRegTrash(regSrc);
gcLayout = compiler->lvaGetGcLayout(varNum);
}
@@ -18288,9 +17929,9 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
{
argOffset += TARGET_POINTER_SIZE;
}
-
+
// Skip the copy loop below because we have already placed the argument in the right place
- slots = 0;
+ slots = 0;
gcLayout = NULL;
}
else
@@ -18304,28 +17945,30 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// We must do do the stack parts first, since those might need values
// from argument registers that will be overwritten in the portion of the
// loop that writes into the argument registers.
- bytesOfNextSlotOfCurPromotedStruct = (firstStackSlot+1) * TARGET_POINTER_SIZE;
+ bytesOfNextSlotOfCurPromotedStruct = (firstStackSlot + 1) * TARGET_POINTER_SIZE;
// Now find the var number of the first that starts in the first stack slot.
- unsigned fieldVarLim = promotedStructLocalVarDesc->lvFieldLclStart + promotedStructLocalVarDesc->lvFieldCnt;
- while (compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset < (firstStackSlot*TARGET_POINTER_SIZE)
- && nextPromotedStructFieldVar < fieldVarLim)
+ unsigned fieldVarLim =
+ promotedStructLocalVarDesc->lvFieldLclStart + promotedStructLocalVarDesc->lvFieldCnt;
+ while (compiler->lvaTable[nextPromotedStructFieldVar].lvFldOffset <
+ (firstStackSlot * TARGET_POINTER_SIZE) &&
+ nextPromotedStructFieldVar < fieldVarLim)
{
nextPromotedStructFieldVar++;
}
// If we reach the limit, meaning there is no field that goes even partly in the stack, only if the
// first stack slot is after the last slot.
- assert(nextPromotedStructFieldVar < fieldVarLim|| firstStackSlot >= slots);
+ assert(nextPromotedStructFieldVar < fieldVarLim || firstStackSlot >= slots);
}
-
- if (slots > 0) // the mkref case may have set "slots" to zero.
+
+ if (slots > 0) // the mkref case may have set "slots" to zero.
{
// First pass the stack portion of the struct (if any)
//
- int argOffsetOfFirstStackSlot = argOffset;
- for (unsigned i = firstStackSlot; i < slots; i++)
+ int argOffsetOfFirstStackSlot = argOffset;
+ for (unsigned i = firstStackSlot; i < slots; i++)
{
emitAttr fieldSize;
- if (gcLayout[i] == TYPE_GC_NONE)
+ if (gcLayout[i] == TYPE_GC_NONE)
fieldSize = EA_PTRSIZE;
else if (gcLayout[i] == TYPE_GC_REF)
fieldSize = EA_GCREF;
@@ -18341,19 +17984,14 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
regNumber regTmp = REG_STK;
bool filledExtraSlot =
- genFillSlotFromPromotedStruct(arg,
- curArgTabEntry,
- promotedStructLocalVarDesc,
- fieldSize,
- &nextPromotedStructFieldVar,
+ genFillSlotFromPromotedStruct(arg, curArgTabEntry, promotedStructLocalVarDesc, fieldSize,
+ &nextPromotedStructFieldVar,
&bytesOfNextSlotOfCurPromotedStruct,
- /*pCurRegNum*/&maxRegArg,
- argOffset,
- /*fieldOffsetOfFirstStackSlot*/ firstStackSlot * TARGET_POINTER_SIZE,
- argOffsetOfFirstStackSlot,
- &deadFieldVarRegs,
- &regTmp);
- if (filledExtraSlot)
+ /*pCurRegNum*/ &maxRegArg, argOffset,
+ /*fieldOffsetOfFirstStackSlot*/ firstStackSlot *
+ TARGET_POINTER_SIZE,
+ argOffsetOfFirstStackSlot, &deadFieldVarRegs, &regTmp);
+ if (filledExtraSlot)
{
i++;
argOffset += TARGET_POINTER_SIZE;
@@ -18365,7 +18003,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// and although regSrc has been excluded from regNeedMask, regNeedMask is only a *hint*
// to regSet.rsPickFreeReg, so we need to be a little more forceful.
// Otherwise, just re-use the same register.
- //
+ //
regNumber regTmp = regSrc;
if (slots != 1)
{
@@ -18378,18 +18016,12 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
regSet.rsUnlockReg(genRegMask(regSrc), regSrcUsed);
}
-
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL),
- fieldSize,
- regTmp,
- regSrc,
- i * TARGET_POINTER_SIZE);
-
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL),
- fieldSize,
- regTmp,
- compiler->lvaOutgoingArgSpaceVar,
- argOffset);
+
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), fieldSize, regTmp, regSrc,
+ i * TARGET_POINTER_SIZE);
+
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), fieldSize, regTmp,
+ compiler->lvaOutgoingArgSpaceVar, argOffset);
regTracker.rsTrackRegTrash(regTmp);
}
argOffset += TARGET_POINTER_SIZE;
@@ -18403,25 +18035,26 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
nextPromotedStructFieldVar = promotedStructLocalVarDesc->lvFieldLclStart;
// Create a nested loop here so that the first time thru the loop
- // we setup all of the regArg registers except for possibly
+ // we setup all of the regArg registers except for possibly
// the one that would overwrite regSrc. Then in the final loop
// (if necessary) we just setup regArg/regSrc with the overwrite
//
- bool overwriteRegSrc=false;
- bool needOverwriteRegSrc=false;
- do {
+ bool overwriteRegSrc = false;
+ bool needOverwriteRegSrc = false;
+ do
+ {
if (needOverwriteRegSrc)
overwriteRegSrc = true;
for (unsigned i = 0; i < firstStackSlot; i++)
{
- regNumber regArg = (regNumber) (regNum+i);
+ regNumber regArg = (regNumber)(regNum + i);
if (overwriteRegSrc == false)
{
if (regArg == regSrc)
{
- needOverwriteRegSrc=true;
+ needOverwriteRegSrc = true;
continue;
}
}
@@ -18432,7 +18065,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
}
emitAttr fieldSize;
- if (gcLayout[i] == TYPE_GC_NONE)
+ if (gcLayout[i] == TYPE_GC_NONE)
fieldSize = EA_PTRSIZE;
else if (gcLayout[i] == TYPE_GC_REF)
fieldSize = EA_GCREF;
@@ -18446,31 +18079,24 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
if (promotedStructLocalVarDesc != NULL)
{
bool filledExtraSlot =
- genFillSlotFromPromotedStruct(arg,
- curArgTabEntry,
- promotedStructLocalVarDesc,
- fieldSize,
- &nextPromotedStructFieldVar,
- &bytesOfNextSlotOfCurPromotedStruct,
- /*pCurRegNum*/&regArg,
- /*argOffset*/ INT32_MAX,
- /*fieldOffsetOfFirstStackSlot*/ INT32_MAX,
+ genFillSlotFromPromotedStruct(arg, curArgTabEntry, promotedStructLocalVarDesc,
+ fieldSize, &nextPromotedStructFieldVar,
+ &bytesOfNextSlotOfCurPromotedStruct,
+ /*pCurRegNum*/ &regArg,
+ /*argOffset*/ INT32_MAX,
+ /*fieldOffsetOfFirstStackSlot*/ INT32_MAX,
/*argOffsetOfFirstStackSlot*/ INT32_MAX,
- &deadFieldVarRegs,
- &regTmp);
- if (filledExtraSlot)
+ &deadFieldVarRegs, &regTmp);
+ if (filledExtraSlot)
i++;
}
else
{
getEmitter()->emitIns_R_AR(ins_Load(curArgTabEntry->isHfaRegArg ? TYP_FLOAT : TYP_I_IMPL),
- fieldSize,
- regArg,
- regSrc,
- i*TARGET_POINTER_SIZE);
+ fieldSize, regArg, regSrc, i * TARGET_POINTER_SIZE);
}
regTracker.rsTrackRegTrash(regArg);
- }
+ }
} while (needOverwriteRegSrc != overwriteRegSrc);
}
@@ -18478,27 +18104,25 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
{
regSet.rsMarkRegFree(genRegMask(regSrc));
}
-
- if (regNum != REG_STK && promotedStructLocalVarDesc == NULL) // If promoted, we already declared the regs
- // used.
+
+ if (regNum != REG_STK && promotedStructLocalVarDesc == NULL) // If promoted, we already declared the regs
+ // used.
{
arg->gtFlags |= GTF_REG_VAL;
for (unsigned i = 1; i < firstStackSlot; i++)
{
arg->gtRegNum = (regNumber)(regNum + i);
- curArgTabEntry->isHfaRegArg ? regSet.SetUsedRegFloat(arg, true)
- : regSet.rsMarkRegUsed(arg);
+ curArgTabEntry->isHfaRegArg ? regSet.SetUsedRegFloat(arg, true) : regSet.rsMarkRegUsed(arg);
}
arg->gtRegNum = regNum;
- curArgTabEntry->isHfaRegArg ? regSet.SetUsedRegFloat(arg, true)
- : regSet.rsMarkRegUsed(arg);
+ curArgTabEntry->isHfaRegArg ? regSet.SetUsedRegFloat(arg, true) : regSet.rsMarkRegUsed(arg);
}
// If we're doing struct promotion, the liveness of the promoted field vars may change after this use,
// so update liveness.
genUpdateLife(arg);
- // Now, if some copied field locals were enregistered, and they're now dead, update the set of
+ // Now, if some copied field locals were enregistered, and they're now dead, update the set of
// register holding gc pointers.
if (deadFieldVarRegs != RBM_NONE)
gcInfo.gcMarkRegSetNpt(deadFieldVarRegs);
@@ -18509,10 +18133,10 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
{
// The arg is passed in the outgoing argument area of the stack frame
genCompIntoFreeRegPair(curr, RBM_NONE, RegSet::FREE_REG);
- assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCompIntoFreeRegPair(curr, 0)
+ assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCompIntoFreeRegPair(curr, 0)
- inst_SA_RV(ins_Store(TYP_INT), argOffset+0, genRegPairLo(curr->gtRegPair), TYP_INT);
- inst_SA_RV(ins_Store(TYP_INT), argOffset+4, genRegPairHi(curr->gtRegPair), TYP_INT);
+ inst_SA_RV(ins_Store(TYP_INT), argOffset + 0, genRegPairLo(curr->gtRegPair), TYP_INT);
+ inst_SA_RV(ins_Store(TYP_INT), argOffset + 4, genRegPairHi(curr->gtRegPair), TYP_INT);
}
else
{
@@ -18529,7 +18153,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// The arg is passed in the outgoing argument area of the stack frame
//
genCodeForTree(curr, 0);
- assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
+ assert(curr->gtFlags & GTF_REG_VAL); // should be enregistered after genCodeForTree(curr, 0)
inst_SA_RV(ins_Store(curr->gtType), argOffset, curr->gtRegNum, curr->gtType);
@@ -18537,18 +18161,19 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
gcInfo.gcMarkRegSetNpt(genRegMask(curr->gtRegNum));
}
else
- {
+ {
if (!varTypeIsFloating(curr->gtType))
{
genComputeReg(curr, genRegMask(regNum), RegSet::EXACT_REG, RegSet::FREE_REG, false);
assert(curr->gtRegNum == regNum);
regSet.rsMarkRegUsed(curr);
}
- else // varTypeIsFloating(curr->gtType)
+ else // varTypeIsFloating(curr->gtType)
{
if (genIsValidFloatReg(regNum))
{
- genComputeReg(curr, genRegMaskFloat(regNum, curr->gtType), RegSet::EXACT_REG, RegSet::FREE_REG, false);
+ genComputeReg(curr, genRegMaskFloat(regNum, curr->gtType), RegSet::EXACT_REG, RegSet::FREE_REG,
+ false);
assert(curr->gtRegNum == regNum);
regSet.rsMarkRegUsed(curr);
}
@@ -18557,7 +18182,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
genCodeForTree(curr, 0);
// If we are loading a floating point type into integer registers
// then it must be for varargs.
- // genCodeForTree will load it into a floating point register,
+ // genCodeForTree will load it into a floating point register,
// now copy it into the correct integer register(s)
if (curr->TypeGet() == TYP_FLOAT)
{
@@ -18570,7 +18195,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
#endif
regTracker.rsTrackRegTrash(regNum);
- curr->gtType = TYP_INT; // Change this to TYP_INT in case we need to spill this register
+ curr->gtType = TYP_INT; // Change this to TYP_INT in case we need to spill this register
curr->gtRegNum = regNum;
regSet.rsMarkRegUsed(curr);
}
@@ -18578,7 +18203,7 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
{
assert(curr->TypeGet() == TYP_DOUBLE);
regNumber intRegNumLo = regNum;
- curr->gtType = TYP_LONG; // Change this to TYP_LONG in case we spill this
+ curr->gtType = TYP_LONG; // Change this to TYP_LONG in case we spill this
#ifdef _TARGET_ARM_
regNumber intRegNumHi = regNumber(intRegNumLo + 1);
assert(genRegMask(intRegNumHi) & RBM_CALLEE_TRASH);
@@ -18604,12 +18229,12 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
for (lateArgs = call->gtCall.gtCallLateArgs; lateArgs; lateArgs = lateArgs->Rest())
{
- curr = lateArgs->Current();
+ curr = lateArgs->Current();
assert(curr);
if (curr->gtFlags & GTF_SPILLED)
{
- if (isRegPairType(curr->gtType))
+ if (isRegPairType(curr->gtType))
{
regSet.rsUnspillRegPair(curr, genRegPairMask(curr->gtRegPair), RegSet::KEEP_REG);
}
@@ -18621,7 +18246,6 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
}
}
-
#ifdef _TARGET_ARM_
// 'Push' a single GT_MKREFANY argument onto a call's argument list
@@ -18630,23 +18254,21 @@ void CodeGen::SetupLateArgs(GenTreePtr call)
// regNum value will be equal to the the registers used to pass the
// the first part of the struct.
// If any part is to go onto the stack, we first generate the
-// value into a register specified by 'regNeedMask' and
+// value into a register specified by 'regNeedMask' and
// then store it to the out going argument area.
// When this method returns, both parts of the TypeReference have
// been pushed onto the stack, but *no* registers have been marked
// as 'in-use', that is the responsibility of the caller.
//
-void CodeGen::PushMkRefAnyArg ( GenTreePtr mkRefAnyTree,
- fgArgTabEntryPtr curArgTabEntry,
- regMaskTP regNeedMask)
+void CodeGen::PushMkRefAnyArg(GenTreePtr mkRefAnyTree, fgArgTabEntryPtr curArgTabEntry, regMaskTP regNeedMask)
{
- regNumber regNum = curArgTabEntry->regNum;
- regNumber regNum2;
+ regNumber regNum = curArgTabEntry->regNum;
+ regNumber regNum2;
assert(mkRefAnyTree->gtOper == GT_MKREFANY);
regMaskTP arg1RegMask = 0;
- int argOffset = curArgTabEntry->slotNum * TARGET_POINTER_SIZE;
+ int argOffset = curArgTabEntry->slotNum * TARGET_POINTER_SIZE;
- // Construct the TypedReference directly into the argument list of the call by
+ // Construct the TypedReference directly into the argument list of the call by
// 'pushing' the first field of the typed reference: the pointer.
// Do this by directly generating it into the argument register or outgoing arg area of the stack.
// Mark it as used so we don't trash it while generating the second field.
@@ -18693,9 +18315,9 @@ void CodeGen::PushMkRefAnyArg ( GenTreePtr mkRefAnyTree,
if (arg1RegMask != 0)
{
GenTreePtr op1 = mkRefAnyTree->gtOp.gtOp1;
- if (op1->gtFlags & GTF_SPILLED)
+ if (op1->gtFlags & GTF_SPILLED)
{
- /* The register that we loaded arg1 into has been spilled -- reload it back into the correct arg register */
+ /* The register that we loaded arg1 into has been spilled -- reload it back into the correct arg register */
regSet.rsUnspillReg(op1, arg1RegMask, RegSet::FREE_REG);
}
@@ -18707,10 +18329,9 @@ void CodeGen::PushMkRefAnyArg ( GenTreePtr mkRefAnyTree,
}
#endif // _TARGET_ARM_
-#endif // FEATURE_FIXED_OUT_ARGS
-
+#endif // FEATURE_FIXED_OUT_ARGS
-regMaskTP CodeGen::genLoadIndirectCallTarget(GenTreePtr call)
+regMaskTP CodeGen::genLoadIndirectCallTarget(GenTreePtr call)
{
assert((gtCallTypes)call->gtCall.gtCallType == CT_INDIRECT);
@@ -18723,50 +18344,47 @@ regMaskTP CodeGen::genLoadIndirectCallTarget(GenTreePtr call)
*/
struct
{
- GenTreePtr node;
- union
- {
- regNumber regNum;
- regPairNo regPair;
+ GenTreePtr node;
+ union {
+ regNumber regNum;
+ regPairNo regPair;
};
- }
- regArgTab[MAX_REG_ARG];
+ } regArgTab[MAX_REG_ARG];
/* Record the previously loaded arguments, if any */
- unsigned regIndex;
+ unsigned regIndex;
regMaskTP prefRegs = regSet.rsRegMaskFree();
- regMaskTP argRegs = RBM_NONE;
+ regMaskTP argRegs = RBM_NONE;
for (regIndex = 0; regIndex < MAX_REG_ARG; regIndex++)
{
- regMaskTP mask;
- regNumber regNum = genMapRegArgNumToRegNum(regIndex, TYP_INT);
- GenTreePtr argTree = regSet.rsUsedTree[regNum];
+ regMaskTP mask;
+ regNumber regNum = genMapRegArgNumToRegNum(regIndex, TYP_INT);
+ GenTreePtr argTree = regSet.rsUsedTree[regNum];
regArgTab[regIndex].node = argTree;
- if ((argTree != NULL) && (argTree->gtType != TYP_STRUCT)) // We won't spill the struct
+ if ((argTree != NULL) && (argTree->gtType != TYP_STRUCT)) // We won't spill the struct
{
assert(argTree->gtFlags & GTF_REG_VAL);
- if (isRegPairType(argTree->gtType))
+ if (isRegPairType(argTree->gtType))
{
regPairNo regPair = argTree->gtRegPair;
- assert(regNum == genRegPairHi(regPair) ||
- regNum == genRegPairLo(regPair));
+ assert(regNum == genRegPairHi(regPair) || regNum == genRegPairLo(regPair));
regArgTab[regIndex].regPair = regPair;
- mask = genRegPairMask(regPair);
+ mask = genRegPairMask(regPair);
}
else
{
assert(regNum == argTree->gtRegNum);
regArgTab[regIndex].regNum = regNum;
- mask = genRegMask(regNum);
+ mask = genRegMask(regNum);
}
assert(!(prefRegs & mask));
argRegs |= mask;
}
}
-
+
/* Record the register(s) used for the indirect call func ptr */
- fptrRegs = genMakeRvalueAddressable(call->gtCall.gtCallAddr, prefRegs, RegSet::KEEP_REG, false);
+ fptrRegs = genMakeRvalueAddressable(call->gtCall.gtCallAddr, prefRegs, RegSet::KEEP_REG, false);
/* If any of the previously loaded arguments were spilled, reload them */
@@ -18776,7 +18394,7 @@ regMaskTP CodeGen::genLoadIndirectCallTarget(GenTreePtr call)
if ((argTree != NULL) && (argTree->gtFlags & GTF_SPILLED))
{
assert(argTree->gtType != TYP_STRUCT); // We currently don't support spilling structs in argument registers
- if (isRegPairType(argTree->gtType))
+ if (isRegPairType(argTree->gtType))
{
regSet.rsUnspillRegPair(argTree, genRegPairMask(regArgTab[regIndex].regPair), RegSet::KEEP_REG);
}
@@ -18803,32 +18421,31 @@ regMaskTP CodeGen::genLoadIndirectCallTarget(GenTreePtr call)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
- bool valUsed)
+regMaskTP CodeGen::genCodeForCall(GenTreePtr call, bool valUsed)
{
- emitAttr retSize;
- size_t argSize;
- size_t args;
- regMaskTP retVal;
+ emitAttr retSize;
+ size_t argSize;
+ size_t args;
+ regMaskTP retVal;
emitter::EmitCallType emitCallType;
- unsigned saveStackLvl;
+ unsigned saveStackLvl;
- BasicBlock * returnLabel = DUMMY_INIT(NULL);
- LclVarDsc * frameListRoot = NULL;
+ BasicBlock* returnLabel = DUMMY_INIT(NULL);
+ LclVarDsc* frameListRoot = NULL;
- unsigned savCurIntArgReg;
- unsigned savCurFloatArgReg;
+ unsigned savCurIntArgReg;
+ unsigned savCurFloatArgReg;
- unsigned areg;
+ unsigned areg;
- regMaskTP fptrRegs = RBM_NONE;
- regMaskTP vptrMask = RBM_NONE;
+ regMaskTP fptrRegs = RBM_NONE;
+ regMaskTP vptrMask = RBM_NONE;
-#ifdef DEBUG
- unsigned stackLvl = getEmitter()->emitCurStackLvl;
+#ifdef DEBUG
+ unsigned stackLvl = getEmitter()->emitCurStackLvl;
if (compiler->verbose)
{
@@ -18838,8 +18455,8 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
}
#endif
- gtCallTypes callType = (gtCallTypes)call->gtCall.gtCallType;
- IL_OFFSETX ilOffset = BAD_IL_OFFSET;
+ gtCallTypes callType = (gtCallTypes)call->gtCall.gtCallType;
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET;
CORINFO_SIG_INFO* sigInfo = nullptr;
@@ -18886,7 +18503,6 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
saveStackLvl = genStackLevel;
-
/*-------------------------------------------------------------------------
* Set up the registers and arguments
*/
@@ -18898,10 +18514,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* We need to get a label for the return address with the proper stack depth. */
/* For the callee pops case (the default) that is before the args are pushed. */
- if ((call->gtFlags & GTF_CALL_UNMANAGED) &&
- !(call->gtFlags & GTF_CALL_POP_ARGS))
+ if ((call->gtFlags & GTF_CALL_UNMANAGED) && !(call->gtFlags & GTF_CALL_POP_ARGS))
{
- returnLabel = genCreateTempLabel();
+ returnLabel = genCreateTempLabel();
}
/*
@@ -18910,9 +18525,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
*/
noway_assert(intRegState.rsCurRegArgNum <= MAX_REG_ARG);
- savCurIntArgReg = intRegState.rsCurRegArgNum;
- savCurFloatArgReg = floatRegState.rsCurRegArgNum;
- intRegState.rsCurRegArgNum = 0;
+ savCurIntArgReg = intRegState.rsCurRegArgNum;
+ savCurFloatArgReg = floatRegState.rsCurRegArgNum;
+ intRegState.rsCurRegArgNum = 0;
floatRegState.rsCurRegArgNum = 0;
/* Pass the arguments */
@@ -18941,41 +18556,41 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* Make sure any callee-trashed registers are saved */
- regMaskTP calleeTrashedRegs = RBM_NONE;
+ regMaskTP calleeTrashedRegs = RBM_NONE;
#if GTF_CALL_REG_SAVE
- if (call->gtFlags & GTF_CALL_REG_SAVE)
+ if (call->gtFlags & GTF_CALL_REG_SAVE)
{
/* The return value reg(s) will definitely be trashed */
switch (call->gtType)
{
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
-#if!CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_FLOAT:
#endif
- calleeTrashedRegs = RBM_INTRET;
- break;
+ calleeTrashedRegs = RBM_INTRET;
+ break;
- case TYP_LONG:
-#if!CPU_HAS_FP_SUPPORT
- case TYP_DOUBLE:
+ case TYP_LONG:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_DOUBLE:
#endif
- calleeTrashedRegs = RBM_LNGRET;
- break;
+ calleeTrashedRegs = RBM_LNGRET;
+ break;
- case TYP_VOID:
+ case TYP_VOID:
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#endif
- calleeTrashedRegs = 0;
- break;
+ calleeTrashedRegs = 0;
+ break;
- default:
- noway_assert(!"unhandled/unexpected type");
+ default:
+ noway_assert(!"unhandled/unexpected type");
}
}
else
@@ -18986,13 +18601,13 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* Spill any callee-saved registers which are being used */
- regMaskTP spillRegs = calleeTrashedRegs & regSet.rsMaskUsed;
+ regMaskTP spillRegs = calleeTrashedRegs & regSet.rsMaskUsed;
/* We need to save all GC registers to the InlinedCallFrame.
Instead, just spill them to temps. */
if (call->gtFlags & GTF_CALL_UNMANAGED)
- spillRegs |= (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & regSet.rsMaskUsed;
+ spillRegs |= (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & regSet.rsMaskUsed;
// Ignore fptrRegs as it is needed only to perform the indirect call
@@ -19035,11 +18650,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
retSize = EA_PTRSIZE;
-
- if (valUsed)
+ if (valUsed)
{
- if (call->gtType == TYP_REF ||
- call->gtType == TYP_ARRAY)
+ if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
{
retSize = EA_GCREF;
}
@@ -19049,7 +18662,6 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
}
}
-
/*-------------------------------------------------------------------------
* For caller-pop calls, the GC info will report the arguments as pending
arguments as the caller explicitly pops them. Also should be
@@ -19057,8 +18669,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
call site (callee owns them)
*/
- args = (call->gtFlags & GTF_CALL_POP_ARGS) ? -int(argSize)
- : argSize;
+ args = (call->gtFlags & GTF_CALL_POP_ARGS) ? -int(argSize) : argSize;
#ifdef PROFILING_SUPPORTED
@@ -19070,11 +18681,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* fire the event at the call site */
/* alas, right now I can only handle calls via a method handle */
- if (compiler->compIsProfilerHookNeeded() &&
- (callType == CT_USER_FUNC) &&
- call->gtCall.IsTailCall())
+ if (compiler->compIsProfilerHookNeeded() && (callType == CT_USER_FUNC) && call->gtCall.IsTailCall())
{
- unsigned saveStackLvl2 = genStackLevel;
+ unsigned saveStackLvl2 = genStackLevel;
//
// Push the profilerHandle
@@ -19088,8 +18697,8 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
if (compiler->compProfilerMethHndIndirected)
{
- getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA,
- (ssize_t)compiler->compProfilerMethHnd);
+ getEmitter()->emitIns_AR_R(INS_push, EA_PTR_DSP_RELOC, REG_NA, REG_NA,
+ (ssize_t)compiler->compProfilerMethHnd);
}
else
{
@@ -19098,8 +18707,8 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
genSinglePush();
genEmitHelperCall(CORINFO_HELP_PROF_FCN_TAILCALL,
- sizeof(int) * 1, // argSize
- EA_UNKNOWN); // retSize
+ sizeof(int) * 1, // argSize
+ EA_UNKNOWN); // retSize
//
// Adjust the number of stack slots used by this managed method if necessary.
@@ -19119,7 +18728,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
// to disturb them and hence argument registers are locked here.
regMaskTP usedMask = RBM_NONE;
regSet.rsLockReg(RBM_ARG_REGS, &usedMask);
-
+
regNumber scratchReg = regSet.rsGrabReg(RBM_CALLEE_SAVED);
regSet.rsLockReg(genRegMask(scratchReg));
@@ -19128,7 +18737,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
{
attr = EA_GCREF;
gcInfo.gcMarkRegSetGCref(scratchReg);
- }
+ }
else if (RBM_R0 & gcInfo.gcRegByrefSetCur)
{
attr = EA_BYREF;
@@ -19148,13 +18757,13 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
regTracker.rsTrackRegTrash(REG_R0);
}
else
- {
+ {
instGen_Set_Reg_To_Imm(EA_4BYTE, REG_R0, (ssize_t)compiler->compProfilerMethHnd);
}
genEmitHelperCall(CORINFO_HELP_PROF_FCN_TAILCALL,
- 0, // argSize
- EA_UNKNOWN); // retSize
+ 0, // argSize
+ EA_UNKNOWN); // retSize
// Restore back to the state that existed before profiler callback
gcInfo.gcMarkRegSetNpt(scratchReg);
@@ -19162,10 +18771,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
regTracker.rsTrackRegTrash(REG_R0);
regSet.rsUnlockReg(genRegMask(scratchReg));
regSet.rsUnlockReg(RBM_ARG_REGS, usedMask);
-#else
+#else
NYI("Pushing the profilerHandle & caller's sp for the profiler callout and locking any registers");
-#endif //_TARGET_X86_
-
+#endif //_TARGET_X86_
/* Restore the stack level */
genStackLevel = saveStackLvl2;
@@ -19173,8 +18781,6 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#endif // PROFILING_SUPPORTED
-
-
#ifdef DEBUG
/*-------------------------------------------------------------------------
* Generate an ESP check for the call
@@ -19182,14 +18788,16 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
if (compiler->opts.compStackCheckOnCall
#if defined(USE_TRANSITION_THUNKS) || defined(USE_DYNAMIC_STACK_ALIGN)
- //check the stacks as frequently as possible
+ // check the stacks as frequently as possible
&& !call->IsHelperCall()
#else
&& call->gtCall.gtCallType == CT_USER_FUNC
#endif
- )
+ )
{
- noway_assert(compiler->lvaCallEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaCallEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaCallEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaCallEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaCallEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaCallEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaCallEspCheck, 0);
}
#endif
@@ -19198,8 +18806,8 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
* Generate the call
*/
- bool fPossibleSyncHelperCall = false;
- CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF; /* only initialized to avoid compiler C4701 warning */
+ bool fPossibleSyncHelperCall = false;
+ CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF; /* only initialized to avoid compiler C4701 warning */
bool fTailCallTargetIsVSD = false;
@@ -19210,27 +18818,29 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
an indirect call.
*/
- if ((call->gtCall.gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) && !fTailCall)
+ if ((call->gtCall.gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) && !fTailCall)
{
noway_assert(call->gtCall.gtCallType == CT_USER_FUNC);
- assert((compiler->info.compCompHnd->getMethodAttribs(call->gtCall.gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE|CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE|CORINFO_FLG_FINAL));
+ assert((compiler->info.compCompHnd->getMethodAttribs(call->gtCall.gtCallMethHnd) &
+ (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) ==
+ (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
/* Find the offsets of the 'this' pointer and new target */
- CORINFO_EE_INFO * pInfo;
- unsigned instOffs; // offset of new 'this' pointer
- unsigned firstTgtOffs; // offset of first target to invoke
- const regNumber regThis = genGetThisArgReg(call);
+ CORINFO_EE_INFO* pInfo;
+ unsigned instOffs; // offset of new 'this' pointer
+ unsigned firstTgtOffs; // offset of first target to invoke
+ const regNumber regThis = genGetThisArgReg(call);
- pInfo = compiler->eeGetEEInfo();
- instOffs = pInfo->offsetOfDelegateInstance;
+ pInfo = compiler->eeGetEEInfo();
+ instOffs = pInfo->offsetOfDelegateInstance;
firstTgtOffs = pInfo->offsetOfDelegateFirstTarget;
// Grab an available register to use for the CALL indirection
- regNumber indCallReg = regSet.rsGrabReg(RBM_ALLINT);
+ regNumber indCallReg = regSet.rsGrabReg(RBM_ALLINT);
- // Save the invoke-target-function in indCallReg
+ // Save the invoke-target-function in indCallReg
// 'mov indCallReg, dword ptr [regThis + firstTgtOffs]'
getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, indCallReg, regThis, firstTgtOffs);
regTracker.rsTrackRegTrash(indCallReg);
@@ -19244,884 +18854,814 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* Call through indCallReg */
getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg);
+ NULL, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, indCallReg);
}
else
- /*-------------------------------------------------------------------------
- * Virtual and interface calls
- */
+ /*-------------------------------------------------------------------------
+ * Virtual and interface calls
+ */
- switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
- {
- case GTF_CALL_VIRT_STUB:
+ switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
{
- regSet.rsSetRegsModified(RBM_VIRTUAL_STUB_PARAM);
+ case GTF_CALL_VIRT_STUB:
+ {
+ regSet.rsSetRegsModified(RBM_VIRTUAL_STUB_PARAM);
- // An x86 JIT which uses full stub dispatch must generate only
- // the following stub dispatch calls:
- //
- // (1) isCallRelativeIndirect:
- // call dword ptr [rel32] ; FF 15 ---rel32----
- // (2) isCallRelative:
- // call abc ; E8 ---rel32----
- // (3) isCallRegisterIndirect:
- // 3-byte nop ;
- // call dword ptr [eax] ; FF 10
- //
- // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
- // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
+ // An x86 JIT which uses full stub dispatch must generate only
+ // the following stub dispatch calls:
+ //
+ // (1) isCallRelativeIndirect:
+ // call dword ptr [rel32] ; FF 15 ---rel32----
+ // (2) isCallRelative:
+ // call abc ; E8 ---rel32----
+ // (3) isCallRegisterIndirect:
+ // 3-byte nop ;
+ // call dword ptr [eax] ; FF 10
+ //
+ // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
+ // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
- //
- // Please do not insert any Random NOPs while constructing this VSD call
- //
- getEmitter()->emitDisableRandomNops();
+ //
+ // Please do not insert any Random NOPs while constructing this VSD call
+ //
+ getEmitter()->emitDisableRandomNops();
- if (!fTailCall)
- {
- // This is code to set up an indirect call to a stub address computed
- // via dictionary lookup. However the dispatch stub receivers aren't set up
- // to accept such calls at the moment.
- if (callType == CT_INDIRECT)
+ if (!fTailCall)
{
- regNumber indReg;
+ // This is code to set up an indirect call to a stub address computed
+ // via dictionary lookup. However the dispatch stub receivers aren't set up
+ // to accept such calls at the moment.
+ if (callType == CT_INDIRECT)
+ {
+ regNumber indReg;
- // -------------------------------------------------------------------------
- // The importer decided we needed a stub call via a computed
- // stub dispatch address, i.e. an address which came from a dictionary lookup.
- // - The dictionary lookup produces an indirected address, suitable for call
- // via "call [REG_VIRTUAL_STUB_PARAM]"
- //
- // This combination will only be generated for shared generic code and when
- // stub dispatch is active.
+ // -------------------------------------------------------------------------
+ // The importer decided we needed a stub call via a computed
+ // stub dispatch address, i.e. an address which came from a dictionary lookup.
+ // - The dictionary lookup produces an indirected address, suitable for call
+ // via "call [REG_VIRTUAL_STUB_PARAM]"
+ //
+ // This combination will only be generated for shared generic code and when
+ // stub dispatch is active.
- // No need to null check the this pointer - the dispatch code will deal with this.
+ // No need to null check the this pointer - the dispatch code will deal with this.
- noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
+ noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
- // Now put the address in REG_VIRTUAL_STUB_PARAM.
- // This is typically a nop when the register used for
- // the gtCallAddr is REG_VIRTUAL_STUB_PARAM
- //
- inst_RV_TT(INS_mov, REG_VIRTUAL_STUB_PARAM, call->gtCall.gtCallAddr);
- regTracker.rsTrackRegTrash(REG_VIRTUAL_STUB_PARAM);
+ // Now put the address in REG_VIRTUAL_STUB_PARAM.
+ // This is typically a nop when the register used for
+ // the gtCallAddr is REG_VIRTUAL_STUB_PARAM
+ //
+ inst_RV_TT(INS_mov, REG_VIRTUAL_STUB_PARAM, call->gtCall.gtCallAddr);
+ regTracker.rsTrackRegTrash(REG_VIRTUAL_STUB_PARAM);
#if defined(_TARGET_X86_)
- // Emit enough bytes of nops so that this sequence can be distinguished
- // from other virtual stub dispatch calls.
- //
- // NOTE: THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
- // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
- //
- getEmitter()->emitIns_Nop(3);
+ // Emit enough bytes of nops so that this sequence can be distinguished
+ // from other virtual stub dispatch calls.
+ //
+ // NOTE: THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
+ // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
+ //
+ getEmitter()->emitIns_Nop(3);
- // Make the virtual stub call:
- // call [REG_VIRTUAL_STUB_PARAM]
- //
- emitCallType = emitter::EC_INDIR_ARD;
+ // Make the virtual stub call:
+ // call [REG_VIRTUAL_STUB_PARAM]
+ //
+ emitCallType = emitter::EC_INDIR_ARD;
- indReg = REG_VIRTUAL_STUB_PARAM;
- genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
+ indReg = REG_VIRTUAL_STUB_PARAM;
+ genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
#elif CPU_LOAD_STORE_ARCH // ARM doesn't allow us to use an indirection for the call
- genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
+ genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
- // Make the virtual stub call:
- // ldr indReg, [REG_VIRTUAL_STUB_PARAM]
- // call indReg
- //
- emitCallType = emitter::EC_INDIR_R;
+ // Make the virtual stub call:
+ // ldr indReg, [REG_VIRTUAL_STUB_PARAM]
+ // call indReg
+ //
+ emitCallType = emitter::EC_INDIR_R;
- // Now dereference [REG_VIRTUAL_STUB_PARAM] and put it in a new temp register 'indReg'
- //
- indReg = regSet.rsGrabReg(RBM_ALLINT & ~RBM_VIRTUAL_STUB_PARAM);
- assert(call->gtCall.gtCallAddr->gtFlags & GTF_REG_VAL);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indReg, REG_VIRTUAL_STUB_PARAM, 0);
- regTracker.rsTrackRegTrash(indReg);
+ // Now dereference [REG_VIRTUAL_STUB_PARAM] and put it in a new temp register 'indReg'
+ //
+ indReg = regSet.rsGrabReg(RBM_ALLINT & ~RBM_VIRTUAL_STUB_PARAM);
+ assert(call->gtCall.gtCallAddr->gtFlags & GTF_REG_VAL);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indReg, REG_VIRTUAL_STUB_PARAM, 0);
+ regTracker.rsTrackRegTrash(indReg);
#else
-#error "Unknown target for VSD call"
-#endif
-
- getEmitter()->emitIns_Call(emitCallType,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indReg);
- }
- else
- {
- // -------------------------------------------------------------------------
- // Check for a direct stub call.
- //
+#error "Unknown target for VSD call"
+#endif
- // Get stub addr. This will return NULL if virtual call stubs are not active
- void *stubAddr = NULL;
+ getEmitter()->emitIns_Call(emitCallType,
+ NULL, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, indReg);
+ }
+ else
+ {
+ // -------------------------------------------------------------------------
+ // Check for a direct stub call.
+ //
- stubAddr = (void *) call->gtCall.gtStubCallStubAddr;
+ // Get stub addr. This will return NULL if virtual call stubs are not active
+ void* stubAddr = NULL;
- noway_assert(stubAddr != NULL);
+ stubAddr = (void*)call->gtCall.gtStubCallStubAddr;
- // -------------------------------------------------------------------------
- // Direct stub calls, though the stubAddr itself may still need to be
- // accesed via an indirection.
- //
+ noway_assert(stubAddr != NULL);
- // No need to null check - the dispatch code will deal with null this.
+ // -------------------------------------------------------------------------
+ // Direct stub calls, though the stubAddr itself may still need to be
+ // accesed via an indirection.
+ //
- emitter::EmitCallType callTypeStubAddr = emitter::EC_FUNC_ADDR;
- void* addr = stubAddr;
- int disp = 0;
- regNumber callReg = REG_NA;
+ // No need to null check - the dispatch code will deal with null this.
- if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
- {
-#if CPU_LOAD_STORE_ARCH
- callReg = regSet.rsGrabReg(RBM_VIRTUAL_STUB_PARAM);
- noway_assert(callReg == REG_VIRTUAL_STUB_PARAM);
+ emitter::EmitCallType callTypeStubAddr = emitter::EC_FUNC_ADDR;
+ void* addr = stubAddr;
+ int disp = 0;
+ regNumber callReg = REG_NA;
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC,REG_VIRTUAL_STUB_PARAM,(ssize_t)stubAddr);
- // The stub will write-back to this register, so don't track it
- regTracker.rsTrackRegTrash(REG_VIRTUAL_STUB_PARAM);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE,REG_JUMP_THUNK_PARAM,REG_VIRTUAL_STUB_PARAM, 0);
- regTracker.rsTrackRegTrash(REG_JUMP_THUNK_PARAM);
- callTypeStubAddr = emitter::EC_INDIR_R;
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- REG_JUMP_THUNK_PARAM);
+ if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
+ {
+#if CPU_LOAD_STORE_ARCH
+ callReg = regSet.rsGrabReg(RBM_VIRTUAL_STUB_PARAM);
+ noway_assert(callReg == REG_VIRTUAL_STUB_PARAM);
+
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, REG_VIRTUAL_STUB_PARAM, (ssize_t)stubAddr);
+ // The stub will write-back to this register, so don't track it
+ regTracker.rsTrackRegTrash(REG_VIRTUAL_STUB_PARAM);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, REG_JUMP_THUNK_PARAM,
+ REG_VIRTUAL_STUB_PARAM, 0);
+ regTracker.rsTrackRegTrash(REG_JUMP_THUNK_PARAM);
+ callTypeStubAddr = emitter::EC_INDIR_R;
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
+ NULL, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, REG_JUMP_THUNK_PARAM);
#else
- // emit an indirect call
- callTypeStubAddr = emitter::EC_INDIR_C;
- addr = 0;
- disp = (ssize_t) stubAddr;
-#endif
-
- }
+ // emit an indirect call
+ callTypeStubAddr = emitter::EC_INDIR_C;
+ addr = 0;
+ disp = (ssize_t)stubAddr;
+#endif
+ }
#if CPU_LOAD_STORE_ARCH
- if (callTypeStubAddr != emitter::EC_INDIR_R)
+ if (callTypeStubAddr != emitter::EC_INDIR_R)
#endif
- {
- getEmitter()->emitIns_Call(callTypeStubAddr,
- call->gtCall.gtCallMethHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- callReg,
- REG_NA,
- 0,
- disp);
+ {
+ getEmitter()->emitIns_Call(callTypeStubAddr, call->gtCall.gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) addr, args, retSize,
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, callReg, REG_NA, 0, disp);
+ }
}
}
- }
- else // tailCall is true
- {
+ else // tailCall is true
+ {
// Non-X86 tail calls materialize the null-check in fgMorphTailCall, when it
// moves the this pointer out of it's usual place and into the argument list.
#ifdef _TARGET_X86_
- // Generate "cmp ECX, [ECX]" to trap null pointers
- const regNumber regThis = genGetThisArgReg(call);
- getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
+ // Generate "cmp ECX, [ECX]" to trap null pointers
+ const regNumber regThis = genGetThisArgReg(call);
+ getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
#endif // _TARGET_X86_
- if (callType == CT_INDIRECT)
- {
- noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
+ if (callType == CT_INDIRECT)
+ {
+ noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
- // Now put the address in EAX.
- inst_RV_TT(INS_mov, REG_TAILCALL_ADDR, call->gtCall.gtCallAddr);
- regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
+ // Now put the address in EAX.
+ inst_RV_TT(INS_mov, REG_TAILCALL_ADDR, call->gtCall.gtCallAddr);
+ regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
- genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
- }
- else
- {
- // importer/EE should guarantee the indirection
- noway_assert(call->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
+ genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
+ }
+ else
+ {
+ // importer/EE should guarantee the indirection
+ noway_assert(call->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, REG_TAILCALL_ADDR, ssize_t(call->gtCall.gtStubCallStubAddr));
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, REG_TAILCALL_ADDR,
+ ssize_t(call->gtCall.gtStubCallStubAddr));
+ }
+
+ fTailCallTargetIsVSD = true;
}
- fTailCallTargetIsVSD = true;
+ //
+ // OK to start inserting random NOPs again
+ //
+ getEmitter()->emitEnableRandomNops();
}
+ break;
- //
- // OK to start inserting random NOPs again
- //
- getEmitter()->emitEnableRandomNops();
- }
- break;
-
- case GTF_CALL_VIRT_VTABLE:
- // stub dispatching is off or this is not a virtual call (could be a tailcall)
- {
- regNumber vptrReg;
- unsigned vtabOffsOfIndirection;
- unsigned vtabOffsAfterIndirection;
+ case GTF_CALL_VIRT_VTABLE:
+ // stub dispatching is off or this is not a virtual call (could be a tailcall)
+ {
+ regNumber vptrReg;
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
- noway_assert(callType == CT_USER_FUNC);
+ noway_assert(callType == CT_USER_FUNC);
- vptrReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
- vptrMask = genRegMask(vptrReg);
+ vptrReg =
+ regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
+ vptrMask = genRegMask(vptrReg);
- /* The register no longer holds a live pointer value */
- gcInfo.gcMarkRegSetNpt(vptrMask);
+ /* The register no longer holds a live pointer value */
+ gcInfo.gcMarkRegSetNpt(vptrMask);
- // MOV vptrReg, [REG_CALL_THIS + offs]
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE,
- vptrReg, genGetThisArgReg(call), VPTR_OFFS);
- regTracker.rsTrackRegTrash(vptrReg);
+ // MOV vptrReg, [REG_CALL_THIS + offs]
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, genGetThisArgReg(call),
+ VPTR_OFFS);
+ regTracker.rsTrackRegTrash(vptrReg);
- noway_assert(vptrMask & ~call->gtCall.gtCallRegUsedMask);
+ noway_assert(vptrMask & ~call->gtCall.gtCallRegUsedMask);
- /* Get hold of the vtable offset (note: this might be expensive) */
+ /* Get hold of the vtable offset (note: this might be expensive) */
- compiler->info.compCompHnd->getMethodVTableOffset(call->gtCall.gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection);
+ compiler->info.compCompHnd->getMethodVTableOffset(call->gtCall.gtCallMethHnd,
+ &vtabOffsOfIndirection,
+ &vtabOffsAfterIndirection);
- /* Get the appropriate vtable chunk */
+ /* Get the appropriate vtable chunk */
- /* The register no longer holds a live pointer value */
- gcInfo.gcMarkRegSetNpt(vptrMask);
+ /* The register no longer holds a live pointer value */
+ gcInfo.gcMarkRegSetNpt(vptrMask);
- // MOV vptrReg, [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE,
- vptrReg, vptrReg, vtabOffsOfIndirection);
+ // MOV vptrReg, [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg,
+ vtabOffsOfIndirection);
- /* Call through the appropriate vtable slot */
+ /* Call through the appropriate vtable slot */
- if (fTailCall)
- {
- /* Load the function address: "[vptrReg+vtabOffs] -> reg_intret" */
+ if (fTailCall)
+ {
+ /* Load the function address: "[vptrReg+vtabOffs] -> reg_intret" */
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR,
- vptrReg, vtabOffsAfterIndirection);
- }
- else
- {
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR, vptrReg,
+ vtabOffsAfterIndirection);
+ }
+ else
+ {
#if CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg, vtabOffsAfterIndirection);
-
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- call->gtCall.gtCallMethHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- vptrReg); // ireg
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg,
+ vtabOffsAfterIndirection);
+
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R, call->gtCall.gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ vptrReg); // ireg
#else
- getEmitter()->emitIns_Call(emitter::EC_FUNC_VIRTUAL,
- call->gtCall.gtCallMethHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- vptrReg, // ireg
- REG_NA, // xreg
- 0, // xmul
- vtabOffsAfterIndirection); // disp
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_VIRTUAL, call->gtCall.gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ vptrReg, // ireg
+ REG_NA, // xreg
+ 0, // xmul
+ vtabOffsAfterIndirection); // disp
#endif // CPU_LOAD_STORE_ARCH
- }
- }
- break;
-
- case GTF_CALL_NONVIRT:
- {
- //------------------------ Non-virtual/Indirect calls -------------------------
- // Lots of cases follow
- // - Direct P/Invoke calls
- // - Indirect calls to P/Invoke functions via the P/Invoke stub
- // - Direct Helper calls
- // - Indirect Helper calls
- // - Direct calls to known addresses
- // - Direct calls where address is accessed by one or two indirections
- // - Indirect calls to computed addresses
- // - Tailcall versions of all of the above
+ }
+ }
+ break;
- CORINFO_METHOD_HANDLE methHnd = call->gtCall.gtCallMethHnd;
+ case GTF_CALL_NONVIRT:
+ {
+ //------------------------ Non-virtual/Indirect calls -------------------------
+ // Lots of cases follow
+ // - Direct P/Invoke calls
+ // - Indirect calls to P/Invoke functions via the P/Invoke stub
+ // - Direct Helper calls
+ // - Indirect Helper calls
+ // - Direct calls to known addresses
+ // - Direct calls where address is accessed by one or two indirections
+ // - Indirect calls to computed addresses
+ // - Tailcall versions of all of the above
+ CORINFO_METHOD_HANDLE methHnd = call->gtCall.gtCallMethHnd;
- //------------------------------------------------------
- // Non-virtual/Indirect calls: Insert a null check on the "this" pointer if needed
- //
- // For (final and private) functions which were called with
- // invokevirtual, but which we call directly, we need to
- // dereference the object pointer to make sure it's not NULL.
- //
+ //------------------------------------------------------
+ // Non-virtual/Indirect calls: Insert a null check on the "this" pointer if needed
+ //
+ // For (final and private) functions which were called with
+ // invokevirtual, but which we call directly, we need to
+ // dereference the object pointer to make sure it's not NULL.
+ //
- if (call->gtFlags & GTF_CALL_NULLCHECK)
- {
- /* Generate "cmp ECX, [ECX]" to trap null pointers */
- const regNumber regThis = genGetThisArgReg(call);
+ if (call->gtFlags & GTF_CALL_NULLCHECK)
+ {
+ /* Generate "cmp ECX, [ECX]" to trap null pointers */
+ const regNumber regThis = genGetThisArgReg(call);
#if CPU_LOAD_STORE_ARCH
- regNumber indReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the indirection
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, regThis, 0);
- regTracker.rsTrackRegTrash(indReg);
+ regNumber indReg =
+ regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the indirection
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, regThis, 0);
+ regTracker.rsTrackRegTrash(indReg);
#else
- getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
+ getEmitter()->emitIns_AR_R(INS_cmp, EA_4BYTE, regThis, regThis, 0);
#endif
- }
-
- if (call->gtFlags & GTF_CALL_UNMANAGED)
- {
- //------------------------------------------------------
- // Non-virtual/Indirect calls: PInvoke calls.
+ }
- noway_assert(compiler->info.compCallUnmanaged != 0);
+ if (call->gtFlags & GTF_CALL_UNMANAGED)
+ {
+ //------------------------------------------------------
+ // Non-virtual/Indirect calls: PInvoke calls.
- /* args shouldn't be greater than 64K */
+ noway_assert(compiler->info.compCallUnmanaged != 0);
- noway_assert((argSize&0xffff0000) == 0);
+ /* args shouldn't be greater than 64K */
- /* Remember the varDsc for the callsite-epilog */
+ noway_assert((argSize & 0xffff0000) == 0);
- frameListRoot = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
+ /* Remember the varDsc for the callsite-epilog */
- // exact codegen is required
- getEmitter()->emitDisableRandomNops();
+ frameListRoot = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
- int nArgSize = 0;
+ // exact codegen is required
+ getEmitter()->emitDisableRandomNops();
- regNumber indCallReg = REG_NA;
+ int nArgSize = 0;
- if (callType == CT_INDIRECT)
- {
- noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
+ regNumber indCallReg = REG_NA;
- if (call->gtCall.gtCallAddr->gtFlags & GTF_REG_VAL)
- indCallReg = call->gtCall.gtCallAddr->gtRegNum;
+ if (callType == CT_INDIRECT)
+ {
+ noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
- nArgSize = (call->gtFlags & GTF_CALL_POP_ARGS) ? 0 : (int)argSize;
- methHnd = 0;
- }
- else
- {
- noway_assert(callType == CT_USER_FUNC);
- }
+ if (call->gtCall.gtCallAddr->gtFlags & GTF_REG_VAL)
+ indCallReg = call->gtCall.gtCallAddr->gtRegNum;
- regNumber tcbReg;
- tcbReg = genPInvokeCallProlog(frameListRoot, nArgSize, methHnd, returnLabel);
+ nArgSize = (call->gtFlags & GTF_CALL_POP_ARGS) ? 0 : (int)argSize;
+ methHnd = 0;
+ }
+ else
+ {
+ noway_assert(callType == CT_USER_FUNC);
+ }
- void* addr = NULL;
+ regNumber tcbReg;
+ tcbReg = genPInvokeCallProlog(frameListRoot, nArgSize, methHnd, returnLabel);
- if (callType == CT_INDIRECT)
- {
- /* Double check that the callee didn't use/trash the
- registers holding the call target.
- */
- noway_assert(tcbReg != indCallReg);
+ void* addr = NULL;
- if (indCallReg == REG_NA)
+ if (callType == CT_INDIRECT)
{
- indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
-
- /* Please note that this even works with tcbReg == REG_EAX.
- tcbReg contains an interesting value only if frameListRoot is
- an enregistered local that stays alive across the call
- (certainly not EAX). If frameListRoot has been moved into
- EAX, we can trash it since it won't survive across the call
- anyways.
+ /* Double check that the callee didn't use/trash the
+ registers holding the call target.
*/
+ noway_assert(tcbReg != indCallReg);
- inst_RV_TT(INS_mov, indCallReg, call->gtCall.gtCallAddr);
- regTracker.rsTrackRegTrash(indCallReg);
- }
-
- emitCallType = emitter::EC_INDIR_R;
- }
- else
- {
- noway_assert(callType == CT_USER_FUNC);
-
- void* pAddr;
- addr = compiler->info.compCompHnd->getAddressOfPInvokeFixup(methHnd, (void**)&pAddr);
- if (addr != NULL)
- {
-#if CPU_LOAD_STORE_ARCH
- // Load the address into a register, indirect it and call through a register
- indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- regTracker.rsTrackRegTrash(indCallReg);
- // Now make the call "call indCallReg"
-
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- methHnd, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo) // sigInfo
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg);
+ if (indCallReg == REG_NA)
+ {
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL
+ // indirection
+
+ /* Please note that this even works with tcbReg == REG_EAX.
+ tcbReg contains an interesting value only if frameListRoot is
+ an enregistered local that stays alive across the call
+ (certainly not EAX). If frameListRoot has been moved into
+ EAX, we can trash it since it won't survive across the call
+ anyways.
+ */
+
+ inst_RV_TT(INS_mov, indCallReg, call->gtCall.gtCallAddr);
+ regTracker.rsTrackRegTrash(indCallReg);
+ }
emitCallType = emitter::EC_INDIR_R;
- break;
-#else
- emitCallType = emitter::EC_FUNC_TOKEN_INDIR;
- indCallReg = REG_NA;
-#endif
}
else
{
- // Double-indirection. Load the address into a register
- // and call indirectly through a register
- indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
+ noway_assert(callType == CT_USER_FUNC);
+
+ void* pAddr;
+ addr = compiler->info.compCompHnd->getAddressOfPInvokeFixup(methHnd, (void**)&pAddr);
+ if (addr != NULL)
+ {
+#if CPU_LOAD_STORE_ARCH
+ // Load the address into a register, indirect it and call through a register
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL
+ // indirection
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ regTracker.rsTrackRegTrash(indCallReg);
+ // Now make the call "call indCallReg"
+
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
+ methHnd, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo) // sigInfo
+ NULL, // addr
+ args,
+ retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, indCallReg);
+
+ emitCallType = emitter::EC_INDIR_R;
+ break;
+#else
+ emitCallType = emitter::EC_FUNC_TOKEN_INDIR;
+ indCallReg = REG_NA;
+#endif
+ }
+ else
+ {
+ // Double-indirection. Load the address into a register
+ // and call indirectly through a register
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL
+ // indirection
#if CPU_LOAD_STORE_ARCH
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)pAddr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- regTracker.rsTrackRegTrash(indCallReg);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)pAddr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ regTracker.rsTrackRegTrash(indCallReg);
- emitCallType = emitter::EC_INDIR_R;
+ emitCallType = emitter::EC_INDIR_R;
#else
- getEmitter()->emitIns_R_AI(INS_mov,
- EA_PTR_DSP_RELOC,
- indCallReg,
- (ssize_t)pAddr);
- regTracker.rsTrackRegTrash(indCallReg);
- emitCallType = emitter::EC_INDIR_ARD;
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, indCallReg, (ssize_t)pAddr);
+ regTracker.rsTrackRegTrash(indCallReg);
+ emitCallType = emitter::EC_INDIR_ARD;
#endif // CPU_LOAD_STORE_ARCH
+ }
}
- }
- getEmitter()->emitIns_Call(emitCallType,
- compiler->eeMarkNativeTarget(methHnd),
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg);
+ getEmitter()->emitIns_Call(emitCallType, compiler->eeMarkNativeTarget(methHnd),
+ INDEBUG_LDISASM_COMMA(sigInfo) addr, args, retSize,
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
+ ilOffset, indCallReg);
- if (callType == CT_INDIRECT)
- genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
+ if (callType == CT_INDIRECT)
+ genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
- getEmitter()->emitEnableRandomNops();
-
- // Done with PInvoke calls
- break;
- }
+ getEmitter()->emitEnableRandomNops();
- if (callType == CT_INDIRECT)
- {
- noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
+ // Done with PInvoke calls
+ break;
+ }
- if (call->gtCall.gtCallCookie)
+ if (callType == CT_INDIRECT)
{
- //------------------------------------------------------
- // Non-virtual indirect calls via the P/Invoke stub
+ noway_assert(genStillAddressable(call->gtCall.gtCallAddr));
- GenTreePtr cookie = call->gtCall.gtCallCookie;
- GenTreePtr target = call->gtCall.gtCallAddr;
+ if (call->gtCall.gtCallCookie)
+ {
+ //------------------------------------------------------
+ // Non-virtual indirect calls via the P/Invoke stub
+
+ GenTreePtr cookie = call->gtCall.gtCallCookie;
+ GenTreePtr target = call->gtCall.gtCallAddr;
- noway_assert((call->gtFlags & GTF_CALL_POP_ARGS) == 0);
+ noway_assert((call->gtFlags & GTF_CALL_POP_ARGS) == 0);
- noway_assert(cookie->gtOper == GT_CNS_INT ||
- cookie->gtOper == GT_IND && cookie->gtOp.gtOp1->gtOper == GT_CNS_INT);
+ noway_assert(cookie->gtOper == GT_CNS_INT ||
+ cookie->gtOper == GT_IND && cookie->gtOp.gtOp1->gtOper == GT_CNS_INT);
- noway_assert(args == argSize);
+ noway_assert(args == argSize);
#if defined(_TARGET_X86_)
- /* load eax with the real target */
+ /* load eax with the real target */
- inst_RV_TT(INS_mov, REG_EAX, target);
- regTracker.rsTrackRegTrash(REG_EAX);
+ inst_RV_TT(INS_mov, REG_EAX, target);
+ regTracker.rsTrackRegTrash(REG_EAX);
- if (cookie->gtOper == GT_CNS_INT)
- inst_IV_handle(INS_push, cookie->gtIntCon.gtIconVal);
- else
- inst_TT(INS_push, cookie);
+ if (cookie->gtOper == GT_CNS_INT)
+ inst_IV_handle(INS_push, cookie->gtIntCon.gtIconVal);
+ else
+ inst_TT(INS_push, cookie);
- /* Keep track of ESP for EBP-less frames */
- genSinglePush();
+ /* Keep track of ESP for EBP-less frames */
+ genSinglePush();
- argSize += sizeof(void *);
+ argSize += sizeof(void*);
#elif defined(_TARGET_ARM_)
- // Ensure that we spill these registers (if caller saved) in the prolog
- regSet.rsSetRegsModified(RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
+ // Ensure that we spill these registers (if caller saved) in the prolog
+ regSet.rsSetRegsModified(RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
- // ARM: load r12 with the real target
- // X64: load r10 with the real target
- inst_RV_TT(INS_mov, REG_PINVOKE_TARGET_PARAM, target);
- regTracker.rsTrackRegTrash(REG_PINVOKE_TARGET_PARAM);
+ // ARM: load r12 with the real target
+ // X64: load r10 with the real target
+ inst_RV_TT(INS_mov, REG_PINVOKE_TARGET_PARAM, target);
+ regTracker.rsTrackRegTrash(REG_PINVOKE_TARGET_PARAM);
- // ARM: load r4 with the pinvoke VASigCookie
- // X64: load r11 with the pinvoke VASigCookie
- if (cookie->gtOper == GT_CNS_INT)
- inst_RV_IV(INS_mov, REG_PINVOKE_COOKIE_PARAM, cookie->gtIntCon.gtIconVal, EA_HANDLE_CNS_RELOC);
- else
- inst_RV_TT(INS_mov, REG_PINVOKE_COOKIE_PARAM, cookie);
- regTracker.rsTrackRegTrash(REG_PINVOKE_COOKIE_PARAM);
+ // ARM: load r4 with the pinvoke VASigCookie
+ // X64: load r11 with the pinvoke VASigCookie
+ if (cookie->gtOper == GT_CNS_INT)
+ inst_RV_IV(INS_mov, REG_PINVOKE_COOKIE_PARAM, cookie->gtIntCon.gtIconVal,
+ EA_HANDLE_CNS_RELOC);
+ else
+ inst_RV_TT(INS_mov, REG_PINVOKE_COOKIE_PARAM, cookie);
+ regTracker.rsTrackRegTrash(REG_PINVOKE_COOKIE_PARAM);
- noway_assert(args == argSize);
+ noway_assert(args == argSize);
- // Ensure that we don't trash any of these registers if we have to load
- // the helper call target into a register to invoke it.
- regMaskTP regsUsed;
- regSet.rsLockReg(call->gtCall.gtCallRegUsedMask|RBM_PINVOKE_TARGET_PARAM|RBM_PINVOKE_COOKIE_PARAM, &regsUsed);
+ // Ensure that we don't trash any of these registers if we have to load
+ // the helper call target into a register to invoke it.
+ regMaskTP regsUsed;
+ regSet.rsLockReg(call->gtCall.gtCallRegUsedMask | RBM_PINVOKE_TARGET_PARAM |
+ RBM_PINVOKE_COOKIE_PARAM,
+ &regsUsed);
#else
- NYI("Non-virtual indirect calls via the P/Invoke stub");
+ NYI("Non-virtual indirect calls via the P/Invoke stub");
#endif
- args = argSize;
- noway_assert((size_t)(int)args == args);
+ args = argSize;
+ noway_assert((size_t)(int)args == args);
- genEmitHelperCall(CORINFO_HELP_PINVOKE_CALLI, (int)args, retSize);
+ genEmitHelperCall(CORINFO_HELP_PINVOKE_CALLI, (int)args, retSize);
#if defined(_TARGET_ARM_)
- regSet.rsUnlockReg(call->gtCall.gtCallRegUsedMask|RBM_PINVOKE_TARGET_PARAM|RBM_PINVOKE_COOKIE_PARAM, regsUsed);
+ regSet.rsUnlockReg(call->gtCall.gtCallRegUsedMask | RBM_PINVOKE_TARGET_PARAM |
+ RBM_PINVOKE_COOKIE_PARAM,
+ regsUsed);
#endif
#ifdef _TARGET_ARM_
- // genEmitHelperCall doesn't record all registers a helper call would trash.
- regTracker.rsTrackRegTrash(REG_PINVOKE_COOKIE_PARAM);
+ // genEmitHelperCall doesn't record all registers a helper call would trash.
+ regTracker.rsTrackRegTrash(REG_PINVOKE_COOKIE_PARAM);
#endif
-
- }
- else
- {
- //------------------------------------------------------
- // Non-virtual indirect calls
-
- if (fTailCall)
- {
- inst_RV_TT(INS_mov, REG_TAILCALL_ADDR, call->gtCall.gtCallAddr);
- regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
}
else
- instEmit_indCall(call, args, retSize);
- }
+ {
+ //------------------------------------------------------
+ // Non-virtual indirect calls
- genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
+ if (fTailCall)
+ {
+ inst_RV_TT(INS_mov, REG_TAILCALL_ADDR, call->gtCall.gtCallAddr);
+ regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
+ }
+ else
+ instEmit_indCall(call, args, retSize);
+ }
- // Done with indirect calls
- break;
- }
+ genDoneAddressable(call->gtCall.gtCallAddr, fptrRegs, RegSet::KEEP_REG);
- //------------------------------------------------------
- // Non-virtual direct/indirect calls: Work out if the address of the
- // call is known at JIT time (if not it is either an indirect call
- // or the address must be accessed via an single/double indirection)
+ // Done with indirect calls
+ break;
+ }
- noway_assert(callType == CT_USER_FUNC || callType == CT_HELPER);
+ //------------------------------------------------------
+ // Non-virtual direct/indirect calls: Work out if the address of the
+ // call is known at JIT time (if not it is either an indirect call
+ // or the address must be accessed via an single/double indirection)
- void * addr;
- InfoAccessType accessType;
+ noway_assert(callType == CT_USER_FUNC || callType == CT_HELPER);
- helperNum = compiler->eeGetHelperNum(methHnd);
+ void* addr;
+ InfoAccessType accessType;
- if (callType == CT_HELPER)
- {
- noway_assert(helperNum != CORINFO_HELP_UNDEF);
+ helperNum = compiler->eeGetHelperNum(methHnd);
- void * pAddr;
- addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
+ if (callType == CT_HELPER)
+ {
+ noway_assert(helperNum != CORINFO_HELP_UNDEF);
- accessType = IAT_VALUE;
+ void* pAddr;
+ addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
- if (!addr)
- {
- accessType = IAT_PVALUE;
- addr = pAddr;
- }
- }
- else
- {
- noway_assert(helperNum == CORINFO_HELP_UNDEF);
+ accessType = IAT_VALUE;
- CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
+ if (!addr)
+ {
+ accessType = IAT_PVALUE;
+ addr = pAddr;
+ }
+ }
+ else
+ {
+ noway_assert(helperNum == CORINFO_HELP_UNDEF);
- if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS)
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
+ CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
- if ((call->gtFlags & GTF_CALL_NULLCHECK) == 0)
- aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
+ if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS)
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
- CORINFO_CONST_LOOKUP addrInfo;
- compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
+ if ((call->gtFlags & GTF_CALL_NULLCHECK) == 0)
+ aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
- accessType = addrInfo.accessType;
- addr = addrInfo.addr;
- }
+ CORINFO_CONST_LOOKUP addrInfo;
+ compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo, aflags);
- if (fTailCall)
- {
- noway_assert(callType == CT_USER_FUNC);
+ accessType = addrInfo.accessType;
+ addr = addrInfo.addr;
+ }
- switch (accessType)
+ if (fTailCall)
{
- case IAT_VALUE:
- //------------------------------------------------------
- // Non-virtual direct calls to known addressess
- //
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, REG_TAILCALL_ADDR, (ssize_t)addr);
- break;
+ noway_assert(callType == CT_USER_FUNC);
- case IAT_PVALUE:
- //------------------------------------------------------
- // Non-virtual direct calls to addresses accessed by
- // a single indirection.
- //
- // For tailcalls we place the target address in REG_TAILCALL_ADDR
- CLANG_FORMAT_COMMENT_ANCHOR;
+ switch (accessType)
+ {
+ case IAT_VALUE:
+ //------------------------------------------------------
+ // Non-virtual direct calls to known addressess
+ //
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, REG_TAILCALL_ADDR, (ssize_t)addr);
+ break;
+
+ case IAT_PVALUE:
+ //------------------------------------------------------
+ // Non-virtual direct calls to addresses accessed by
+ // a single indirection.
+ //
+ // For tailcalls we place the target address in REG_TAILCALL_ADDR
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if CPU_LOAD_STORE_ARCH
- {
- regNumber indReg = REG_TAILCALL_ADDR;
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indReg, (ssize_t)addr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
- regTracker.rsTrackRegTrash(indReg);
- }
+ {
+ regNumber indReg = REG_TAILCALL_ADDR;
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indReg, (ssize_t)addr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
+ regTracker.rsTrackRegTrash(indReg);
+ }
#else
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_TAILCALL_ADDR,
- (ssize_t)addr);
- regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_TAILCALL_ADDR, (ssize_t)addr);
+ regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
#endif
- break;
+ break;
- case IAT_PPVALUE:
- //------------------------------------------------------
- // Non-virtual direct calls to addresses accessed by
- // a double indirection.
- //
- // For tailcalls we place the target address in REG_TAILCALL_ADDR
- CLANG_FORMAT_COMMENT_ANCHOR;
+ case IAT_PPVALUE:
+ //------------------------------------------------------
+ // Non-virtual direct calls to addresses accessed by
+ // a double indirection.
+ //
+ // For tailcalls we place the target address in REG_TAILCALL_ADDR
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if CPU_LOAD_STORE_ARCH
- {
- regNumber indReg = REG_TAILCALL_ADDR;
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indReg, (ssize_t)addr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
- regTracker.rsTrackRegTrash(indReg);
- }
+ {
+ regNumber indReg = REG_TAILCALL_ADDR;
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indReg, (ssize_t)addr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, indReg, indReg, 0);
+ regTracker.rsTrackRegTrash(indReg);
+ }
#else
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_TAILCALL_ADDR,
- (ssize_t)addr);
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR,
- REG_TAILCALL_ADDR, 0);
- regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, REG_TAILCALL_ADDR, (ssize_t)addr);
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR,
+ REG_TAILCALL_ADDR, 0);
+ regTracker.rsTrackRegTrash(REG_TAILCALL_ADDR);
#endif
- break;
+ break;
- default:
- noway_assert(!"Bad accessType");
- break;
+ default:
+ noway_assert(!"Bad accessType");
+ break;
+ }
}
- }
- else
- {
- switch (accessType)
+ else
{
- regNumber indCallReg;
+ switch (accessType)
+ {
+ regNumber indCallReg;
- case IAT_VALUE:
- //------------------------------------------------------
- // Non-virtual direct calls to known addressess
- //
- // The vast majority of calls end up here.... Wouldn't
- // it be nice if they all did!
- CLANG_FORMAT_COMMENT_ANCHOR;
+ case IAT_VALUE:
+ //------------------------------------------------------
+ // Non-virtual direct calls to known addressess
+ //
+ // The vast majority of calls end up here.... Wouldn't
+ // it be nice if they all did!
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef _TARGET_ARM_
- if (!arm_Valid_Imm_For_BL((ssize_t)addr))
- {
- // Load the address into a register and call through a register
- indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
-
- getEmitter()->emitIns_Call(emitter::EC_INDIR_R,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg, // ireg
- REG_NA, 0, 0, // xreg, xmul, disp
- false, // isJump
- emitter::emitNoGChelper(helperNum));
- }
- else
+ if (!arm_Valid_Imm_For_BL((ssize_t)addr))
+ {
+ // Load the address into a register and call through a register
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the
+ // CALL indirection
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
+
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, ilOffset,
+ indCallReg, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, // isJump
+ emitter::emitNoGChelper(helperNum));
+ }
+ else
#endif
- {
- getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- REG_NA, REG_NA, 0, 0, /* ireg, xreg, xmul, disp */
- false, /* isJump */
- emitter::emitNoGChelper(helperNum));
- }
- break;
+ {
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_TOKEN, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) addr, args, retSize,
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset, REG_NA, REG_NA, 0,
+ 0, /* ireg, xreg, xmul, disp */
+ false, /* isJump */
+ emitter::emitNoGChelper(helperNum));
+ }
+ break;
- case IAT_PVALUE:
- //------------------------------------------------------
- // Non-virtual direct calls to addresses accessed by
- // a single indirection.
- //
+ case IAT_PVALUE:
+ //------------------------------------------------------
+ // Non-virtual direct calls to addresses accessed by
+ // a single indirection.
+ //
- // Load the address into a register, load indirect and call through a register
- CLANG_FORMAT_COMMENT_ANCHOR;
+ // Load the address into a register, load indirect and call through a register
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if CPU_LOAD_STORE_ARCH
- indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL
+ // indirection
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- regTracker.rsTrackRegTrash(indCallReg);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ regTracker.rsTrackRegTrash(indCallReg);
- emitCallType = emitter::EC_INDIR_R;
- addr = NULL;
+ emitCallType = emitter::EC_INDIR_R;
+ addr = NULL;
#else
- emitCallType = emitter::EC_FUNC_TOKEN_INDIR;
- indCallReg = REG_NA;
+ emitCallType = emitter::EC_FUNC_TOKEN_INDIR;
+ indCallReg = REG_NA;
#endif // CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_Call( emitCallType,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg, // ireg
- REG_NA, 0, 0, // xreg, xmul, disp
- false, /* isJump */
- emitter::emitNoGChelper(helperNum));
- break;
+ getEmitter()->emitIns_Call(emitCallType, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, args,
+ retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ indCallReg, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, /* isJump */
+ emitter::emitNoGChelper(helperNum));
+ break;
- case IAT_PPVALUE:
- {
- //------------------------------------------------------
- // Non-virtual direct calls to addresses accessed by
- // a double indirection.
- //
- // Double-indirection. Load the address into a register
- // and call indirectly through the register
+ case IAT_PPVALUE:
+ {
+ //------------------------------------------------------
+ // Non-virtual direct calls to addresses accessed by
+ // a double indirection.
+ //
+ // Double-indirection. Load the address into a register
+ // and call indirectly through the register
- noway_assert(helperNum == CORINFO_HELP_UNDEF);
+ noway_assert(helperNum == CORINFO_HELP_UNDEF);
- // Grab an available register to use for the CALL indirection
- indCallReg = regSet.rsGrabReg(RBM_ALLINT);
+ // Grab an available register to use for the CALL indirection
+ indCallReg = regSet.rsGrabReg(RBM_ALLINT);
#if CPU_LOAD_STORE_ARCH
- instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
- regTracker.rsTrackRegTrash(indCallReg);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addr);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ getEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
+ regTracker.rsTrackRegTrash(indCallReg);
- emitCallType = emitter::EC_INDIR_R;
+ emitCallType = emitter::EC_INDIR_R;
#else
- getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC,
- indCallReg,
- (ssize_t)addr);
- regTracker.rsTrackRegTrash(indCallReg);
+ getEmitter()->emitIns_R_AI(INS_mov, EA_PTR_DSP_RELOC, indCallReg, (ssize_t)addr);
+ regTracker.rsTrackRegTrash(indCallReg);
- emitCallType = emitter::EC_INDIR_ARD;
+ emitCallType = emitter::EC_INDIR_ARD;
#endif // CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_Call(emitCallType,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- args,
- retSize,
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indCallReg, // ireg
- REG_NA, 0, 0, // xreg, xmul, disp
- false, // isJump
- emitter::emitNoGChelper(helperNum));
- }
- break;
+ getEmitter()->emitIns_Call(emitCallType, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ indCallReg, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, // isJump
+ emitter::emitNoGChelper(helperNum));
+ }
+ break;
- default:
- noway_assert(!"Bad accessType");
- break;
- }
+ default:
+ noway_assert(!"Bad accessType");
+ break;
+ }
- // tracking of region protected by the monitor in synchronized methods
- if ((helperNum != CORINFO_HELP_UNDEF) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
- {
- fPossibleSyncHelperCall = true;
+ // tracking of region protected by the monitor in synchronized methods
+ if ((helperNum != CORINFO_HELP_UNDEF) && (compiler->info.compFlags & CORINFO_FLG_SYNCH))
+ {
+ fPossibleSyncHelperCall = true;
+ }
}
}
- }
- break;
-
- default:
- noway_assert(!"strange call type");
- break;
+ break;
- }
+ default:
+ noway_assert(!"strange call type");
+ break;
+ }
/*-------------------------------------------------------------------------
* For tailcalls, REG_INTRET contains the address of the target function,
@@ -20138,26 +19678,25 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#ifdef _TARGET_X86_
noway_assert(0 <= (ssize_t)args); // caller-pop args not supported for tailcall
-
// Push the count of the incoming stack arguments
- unsigned nOldStkArgs = (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * sizeof(void *)))/sizeof(void*));
+ unsigned nOldStkArgs =
+ (unsigned)((compiler->compArgSize - (intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*));
getEmitter()->emitIns_I(INS_push, EA_4BYTE, nOldStkArgs);
genSinglePush(); // Keep track of ESP for EBP-less frames
args += sizeof(void*);
// Push the count of the outgoing stack arguments
- getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize/sizeof(void*));
+ getEmitter()->emitIns_I(INS_push, EA_4BYTE, argSize / sizeof(void*));
genSinglePush(); // Keep track of ESP for EBP-less frames
args += sizeof(void*);
// Push info about the callee-saved registers to be restored
// For now, we always spill all registers if compiler->compTailCallUsed
- DWORD calleeSavedRegInfo =
- 1 | // always restore EDI,ESI,EBX
- (fTailCallTargetIsVSD ? 0x2 : 0x0); // Stub dispatch flag
+ DWORD calleeSavedRegInfo = 1 | // always restore EDI,ESI,EBX
+ (fTailCallTargetIsVSD ? 0x2 : 0x0); // Stub dispatch flag
getEmitter()->emitIns_I(INS_push, EA_4BYTE, calleeSavedRegInfo);
genSinglePush(); // Keep track of ESP for EBP-less frames
args += sizeof(void*);
@@ -20170,9 +19709,9 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#else // _TARGET_X86_
- args = 0;
+ args = 0;
retSize = EA_UNKNOWN;
-
+
#endif // _TARGET_X86_
if (compiler->getNeedsGSSecurityCookie())
@@ -20187,7 +19726,6 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
// Now call the helper
genEmitHelperCall(CORINFO_HELP_TAILCALL, (int)args, retSize);
-
}
/*-------------------------------------------------------------------------
@@ -20210,12 +19748,12 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#ifdef _TARGET_ARM_
if (regSet.rsUsedTree[areg] == NULL)
{
- noway_assert(areg % 2 == 1 && (((areg+1) >= MAX_REG_ARG) ||
- (regSet.rsUsedTree[areg+1]->TypeGet() == TYP_STRUCT) ||
- (genTypeStSz(regSet.rsUsedTree[areg+1]->TypeGet()) == 2)));
+ noway_assert(areg % 2 == 1 &&
+ (((areg + 1) >= MAX_REG_ARG) || (regSet.rsUsedTree[areg + 1]->TypeGet() == TYP_STRUCT) ||
+ (genTypeStSz(regSet.rsUsedTree[areg + 1]->TypeGet()) == 2)));
continue;
}
-#endif
+#endif
regSet.rsMarkRegFree(curArgMask);
@@ -20247,21 +19785,21 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* restore the old argument register status */
- intRegState.rsCurRegArgNum = savCurIntArgReg;
+ intRegState.rsCurRegArgNum = savCurIntArgReg;
floatRegState.rsCurRegArgNum = savCurFloatArgReg;
noway_assert(intRegState.rsCurRegArgNum <= MAX_REG_ARG);
/* Mark all trashed registers as such */
- if (calleeTrashedRegs)
+ if (calleeTrashedRegs)
regTracker.rsTrashRegSet(calleeTrashedRegs);
regTracker.rsTrashRegsForGCInterruptability();
-#ifdef DEBUG
+#ifdef DEBUG
- if (!(call->gtFlags & GTF_CALL_POP_ARGS))
+ if (!(call->gtFlags & GTF_CALL_POP_ARGS))
{
if (compiler->verbose)
{
@@ -20313,39 +19851,38 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
/* No trashed registers may possibly hold a pointer at this point */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
+#ifdef DEBUG
- regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & (calleeTrashedRegs & RBM_ALLINT) & ~regSet.rsMaskVars & ~vptrMask;
- if (ptrRegs)
+ regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & (calleeTrashedRegs & RBM_ALLINT) &
+ ~regSet.rsMaskVars & ~vptrMask;
+ if (ptrRegs)
{
// A reg may be dead already. The assertion is too strong.
- LclVarDsc *varDsc;
- unsigned varNum;
-
+ LclVarDsc* varDsc;
+ unsigned varNum;
+
// use compiler->compCurLife
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount && ptrRegs != 0;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount && ptrRegs != 0; varNum++, varDsc++)
{
/* Ignore the variable if it's not tracked, not in a register, or a floating-point type */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
continue;
- if (!varDsc->lvRegister)
+ if (!varDsc->lvRegister)
continue;
- if (varDsc->IsFloatRegType())
+ if (varDsc->IsFloatRegType())
continue;
/* Get hold of the index and the bitmask for the variable */
- unsigned varIndex = varDsc->lvVarIndex;
+ unsigned varIndex = varDsc->lvVarIndex;
/* Is this variable live currently? */
- if (!VarSetOps::IsMember(compiler, compiler->compCurLife, varIndex))
+ if (!VarSetOps::IsMember(compiler, compiler->compCurLife, varIndex))
{
- regNumber regNum = varDsc->lvRegNum;
- regMaskTP regMask = genRegMask(regNum);
+ regNumber regNum = varDsc->lvRegNum;
+ regMaskTP regMask = genRegMask(regNum);
if (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF)
ptrRegs &= ~regMask;
@@ -20369,27 +19906,24 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
if (fPossibleSyncHelperCall)
{
- switch (helperNum) {
- case CORINFO_HELP_MON_ENTER:
- case CORINFO_HELP_MON_ENTER_STATIC:
- noway_assert(compiler->syncStartEmitCookie == NULL);
- compiler->syncStartEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
- noway_assert(compiler->syncStartEmitCookie != NULL);
- break;
- case CORINFO_HELP_MON_EXIT:
- case CORINFO_HELP_MON_EXIT_STATIC:
- noway_assert(compiler->syncEndEmitCookie == NULL);
- compiler->syncEndEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
- noway_assert(compiler->syncEndEmitCookie != NULL);
- break;
- default:
- break;
+ switch (helperNum)
+ {
+ case CORINFO_HELP_MON_ENTER:
+ case CORINFO_HELP_MON_ENTER_STATIC:
+ noway_assert(compiler->syncStartEmitCookie == NULL);
+ compiler->syncStartEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ noway_assert(compiler->syncStartEmitCookie != NULL);
+ break;
+ case CORINFO_HELP_MON_EXIT:
+ case CORINFO_HELP_MON_EXIT_STATIC:
+ noway_assert(compiler->syncEndEmitCookie == NULL);
+ compiler->syncEndEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ noway_assert(compiler->syncEndEmitCookie != NULL);
+ break;
+ default:
+ break;
}
}
#endif // _TARGET_X86_
@@ -20402,15 +19936,12 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
if (getInlinePInvokeCheckEnabled())
{
noway_assert(compiler->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
- BasicBlock * esp_check;
+ BasicBlock* esp_check;
- CORINFO_EE_INFO * pInfo = compiler->eeGetEEInfo();
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
/* mov ecx, dword ptr [frame.callSiteTracker] */
- getEmitter()->emitIns_R_S (INS_mov,
- EA_4BYTE,
- REG_ARG_0,
- compiler->lvaInlinedPInvokeFrameVar,
+ getEmitter()->emitIns_R_S(INS_mov, EA_4BYTE, REG_ARG_0, compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfCallSiteSP);
regTracker.rsTrackRegTrash(REG_ARG_0);
@@ -20420,10 +19951,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
{
if (argSize)
{
- getEmitter()->emitIns_R_I (INS_add,
- EA_PTRSIZE,
- REG_ARG_0,
- argSize);
+ getEmitter()->emitIns_R_I(INS_add, EA_PTRSIZE, REG_ARG_0, argSize);
}
}
/* cmp ecx, esp */
@@ -20457,41 +19985,33 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
// take care of the cdecl argument popping here as well but the stack depth tracking logic
// makes this very hard, i.e. it needs to "see" the actual pop.
- CORINFO_EE_INFO *pInfo = compiler->eeGetEEInfo();
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
if (argSize == 0 || (call->gtFlags & GTF_CALL_POP_ARGS))
{
/* mov esp, dword ptr [frame.callSiteTracker] */
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_SPBASE,
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE,
compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfCallSiteSP);
}
else
{
/* mov ecx, dword ptr [frame.callSiteTracker] */
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_ARG_0,
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0,
compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfCallSiteSP);
regTracker.rsTrackRegTrash(REG_ARG_0);
/* lea esp, [ecx + argSize] */
- getEmitter()->emitIns_R_AR (INS_lea,
- EA_PTRSIZE,
- REG_SPBASE,
- REG_ARG_0,
- (int)argSize);
+ getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, REG_SPBASE, REG_ARG_0, (int)argSize);
}
}
}
#endif // _TARGET_X86_
- if (call->gtFlags & GTF_CALL_POP_ARGS)
+ if (call->gtFlags & GTF_CALL_POP_ARGS)
{
- noway_assert(args == (size_t)-(int)argSize);
+ noway_assert(args == (size_t) - (int)argSize);
if (argSize)
{
@@ -20499,7 +20019,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
}
}
- if (pseudoStackLvl)
+ if (pseudoStackLvl)
{
noway_assert(call->gtType == TYP_VOID);
@@ -20508,30 +20028,28 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
instGen(INS_nop);
}
-
-
/* What does the function return? */
retVal = RBM_NONE;
switch (call->gtType)
{
- case TYP_REF:
- case TYP_ARRAY:
- case TYP_BYREF:
- gcInfo.gcMarkRegPtrVal(REG_INTRET, call->TypeGet());
+ case TYP_REF:
+ case TYP_ARRAY:
+ case TYP_BYREF:
+ gcInfo.gcMarkRegPtrVal(REG_INTRET, call->TypeGet());
- __fallthrough;
+ __fallthrough;
- case TYP_INT:
-#if!CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
+ case TYP_INT:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_FLOAT:
#endif
- retVal = RBM_INTRET;
- break;
+ retVal = RBM_INTRET;
+ break;
#ifdef _TARGET_ARM_
- case TYP_STRUCT:
+ case TYP_STRUCT:
{
assert(call->gtCall.gtRetClsHnd != NULL);
assert(compiler->IsHfa(call->gtCall.gtRetClsHnd));
@@ -20543,25 +20061,25 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
break;
#endif
- case TYP_LONG:
-#if!CPU_HAS_FP_SUPPORT
- case TYP_DOUBLE:
+ case TYP_LONG:
+#if !CPU_HAS_FP_SUPPORT
+ case TYP_DOUBLE:
#endif
- retVal = RBM_LNGRET;
- break;
+ retVal = RBM_LNGRET;
+ break;
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
- break;
+ break;
#endif
- case TYP_VOID:
- break;
+ case TYP_VOID:
+ break;
- default:
- noway_assert(!"unexpected/unhandled fn return type");
+ default:
+ noway_assert(!"unexpected/unhandled fn return type");
}
// We now have to generate the "call epilog" (if it was a call to unmanaged code).
@@ -20576,7 +20094,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
{
if (frameListRoot->lvRegister)
{
- bool isBorn = false;
+ bool isBorn = false;
bool isDying = true;
genUpdateRegLife(frameListRoot, isBorn, isDying DEBUGARG(call));
}
@@ -20585,14 +20103,16 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#ifdef DEBUG
if (compiler->opts.compStackCheckOnCall
#if defined(USE_TRANSITION_THUNKS) || defined(USE_DYNAMIC_STACK_ALIGN)
- //check the stack as frequently as possible
+ // check the stack as frequently as possible
&& !call->IsHelperCall()
#else
&& call->gtCall.gtCallType == CT_USER_FUNC
#endif
- )
+ )
{
- noway_assert(compiler->lvaCallEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaCallEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaCallEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaCallEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaCallEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaCallEspCheck].lvOnFrame);
if (argSize > 0)
{
getEmitter()->emitIns_R_R(INS_mov, EA_4BYTE, REG_ARG_0, REG_SPBASE);
@@ -20603,8 +20123,8 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
else
getEmitter()->emitIns_S_R(INS_cmp, EA_4BYTE, REG_SPBASE, compiler->lvaCallEspCheck, 0);
- BasicBlock * esp_check = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* esp_check = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
@@ -20618,7 +20138,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
if (call->gtType == TYP_FLOAT || call->gtType == TYP_DOUBLE)
{
// Restore return node if necessary
- if (call->gtFlags & GTF_SPILLED)
+ if (call->gtFlags & GTF_SPILLED)
{
UnspillFloat(call);
}
@@ -20644,7 +20164,6 @@ regMaskTP CodeGen::genCodeForCall(GenTreePtr call,
#pragma warning(pop)
#endif
-
/*****************************************************************************
*
* Create and record GC Info for the function.
@@ -20664,25 +20183,22 @@ CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigne
}
#ifdef JIT32_GCENCODER
-void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
+void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr))
{
- BYTE headerBuf[64];
- InfoHdr header;
+ BYTE headerBuf[64];
+ InfoHdr header;
int s_cached;
-#ifdef DEBUG
- size_t headerSize =
+#ifdef DEBUG
+ size_t headerSize =
#endif
- compiler->compInfoBlkSize = gcInfo.gcInfoBlockHdrSave(headerBuf,
- 0,
- codeSize,
- prologSize,
- epilogSize,
- &header,
- &s_cached);
+ compiler->compInfoBlkSize =
+ gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
- size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
+ size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
@@ -20703,7 +20219,7 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Allocate the info block for the method */
- compiler->compInfoBlkAddr = (BYTE *) compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
+ compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-Review: 'dataSize', below, is not defined
@@ -20729,24 +20245,20 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Create the method info block: header followed by GC tracking tables */
- compiler->compInfoBlkAddr += gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1,
- codeSize,
- prologSize,
- epilogSize,
- &header,
- &s_cached);
+ compiler->compInfoBlkAddr +=
+ gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
-#ifdef DEBUG
+#ifdef DEBUG
- if (0)
+ if (0)
{
- BYTE * temp = (BYTE *)infoPtr;
- unsigned size = compiler->compInfoBlkAddr - temp;
- BYTE * ptab = temp + headerSize;
+ BYTE* temp = (BYTE*)infoPtr;
+ unsigned size = compiler->compInfoBlkAddr - temp;
+ BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
@@ -20754,14 +20266,14 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
for (unsigned i = 0; i < size; i++)
{
- if (temp == ptab)
+ if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
- printf("\n %04X: %*c", i & ~0xF, 3*(i&0xF), ' ');
+ printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
- if (!(i % 16))
+ if (!(i % 16))
printf("\n %04X: ", i);
}
@@ -20775,9 +20287,9 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
#if DUMP_GC_TABLES
- if (compiler->opts.dspGCtbls)
+ if (compiler->opts.dspGCtbls)
{
- const BYTE *base = (BYTE *)infoPtr;
+ const BYTE* base = (BYTE*)infoPtr;
unsigned size;
unsigned methodSize;
InfoHdr dumpHeader;
@@ -20789,19 +20301,18 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
// printf("size of header encoding is %3u\n", size);
printf("\n");
- if (compiler->opts.dspGCtbls)
+ if (compiler->opts.dspGCtbls)
{
- base += size;
- size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
+ base += size;
+ size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
- noway_assert(compiler->compInfoBlkAddr == (base+size));
+ noway_assert(compiler->compInfoBlkAddr == (base + size));
}
-
}
#ifdef DEBUG
- if (jitOpts.testMask & 128)
+ if (jitOpts.testMask & 128)
{
for (unsigned offs = 0; offs < codeSize; offs++)
{
@@ -20813,17 +20324,18 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Make sure we ended up generating the expected number of bytes */
- noway_assert(compiler->compInfoBlkAddr == (BYTE *)infoPtr + compiler->compInfoBlkSize);
+ noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // JIT32_GCENCODER
-void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
+void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC) GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
+ IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
+ GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
+ GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
@@ -20838,39 +20350,40 @@ void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsig
gcInfoEncoder->Build();
- //GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- //let's save the values anyway for debugging purposes
+ // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
+ // let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; //not exposed by the GCEncoder interface
+ compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif
-
/*****************************************************************************
* For CEE_LOCALLOC
*/
-regNumber CodeGen::genLclHeap(GenTreePtr size)
+regNumber CodeGen::genLclHeap(GenTreePtr size)
{
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
// regCnt is a register used to hold both
// the amount to stack alloc (either in bytes or pointer sized words)
// and the final stack alloc address to return as the result
- //
- regNumber regCnt = DUMMY_INIT(REG_CORRUPT);
- var_types type = genActualType(size->gtType);
- emitAttr easz = emitTypeSize(type);
+ //
+ regNumber regCnt = DUMMY_INIT(REG_CORRUPT);
+ var_types type = genActualType(size->gtType);
+ emitAttr easz = emitTypeSize(type);
#ifdef DEBUG
// Verify ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
- BasicBlock * esp_check = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* esp_check = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
@@ -20880,45 +20393,46 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
noway_assert(isFramePointerUsed());
noway_assert(genStackLevel == 0); // Can't have anything on the stack
- BasicBlock* endLabel = NULL;
+ BasicBlock* endLabel = NULL;
#if FEATURE_FIXED_OUT_ARGS
- bool stackAdjusted = false;
+ bool stackAdjusted = false;
#endif
if (size->IsCnsIntOrI())
{
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP
- // essentially popping off the outgoing arg area,
+ // essentially popping off the outgoing arg area,
// We will restore it right before we return from this method
//
- if (compiler->lvaOutgoingArgSpaceSize > 0)
+ if (compiler->lvaOutgoingArgSpaceSize > 0)
{
- assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
+ assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) ==
+ 0); // This must be true for the stack to remain aligned
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjusted = true;
}
#endif
size_t amount = size->gtIntCon.gtIconVal;
-
+
// Convert amount to be properly STACK_ALIGN and count of DWORD_PTRs
- amount += (STACK_ALIGN - 1);
+ amount += (STACK_ALIGN - 1);
amount &= ~(STACK_ALIGN - 1);
- amount >>= STACK_ALIGN_SHIFT; // amount is number of pointer-sized words to locAlloc
- size->gtIntCon.gtIconVal = amount; // update the GT_CNS value in the node
-
+ amount >>= STACK_ALIGN_SHIFT; // amount is number of pointer-sized words to locAlloc
+ size->gtIntCon.gtIconVal = amount; // update the GT_CNS value in the node
+
/* If amount is zero then return null in RegCnt */
if (amount == 0)
{
- regCnt = regSet.rsGrabReg(RBM_ALLINT);
+ regCnt = regSet.rsGrabReg(RBM_ALLINT);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
goto DONE;
}
-
+
/* For small allocations we will generate up to six push 0 inline */
if (amount <= 6)
{
- regCnt = regSet.rsGrabReg(RBM_ALLINT);
+ regCnt = regSet.rsGrabReg(RBM_ALLINT);
#if CPU_LOAD_STORE_ARCH
regNumber regZero = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt));
// Set 'regZero' to zero
@@ -20928,32 +20442,32 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
while (amount != 0)
{
#if CPU_LOAD_STORE_ARCH
- inst_IV(INS_push, (unsigned) genRegMask(regZero));
+ inst_IV(INS_push, (unsigned)genRegMask(regZero));
#else
- inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
+ inst_IV(INS_push_hide, 0); // push_hide means don't track the stack
#endif
amount--;
}
-
+
regTracker.rsTrackRegTrash(regCnt);
// --- move regCnt, ESP
inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
goto DONE;
}
- else
+ else
{
if (!compiler->info.compInitMem)
{
- // Re-bias amount to be number of bytes to adjust the SP
+ // Re-bias amount to be number of bytes to adjust the SP
amount <<= STACK_ALIGN_SHIFT;
- size->gtIntCon.gtIconVal = amount; // update the GT_CNS value in the node
- if (amount < compiler->eeGetPageSize()) // must be < not <=
+ size->gtIntCon.gtIconVal = amount; // update the GT_CNS value in the node
+ if (amount < compiler->eeGetPageSize()) // must be < not <=
{
- // Since the size is a page or less, simply adjust ESP
-
+ // Since the size is a page or less, simply adjust ESP
+
// ESP might already be in the guard page, must touch it BEFORE
// the alloc, not after.
- regCnt = regSet.rsGrabReg(RBM_ALLINT);
+ regCnt = regSet.rsGrabReg(RBM_ALLINT);
inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
#if CPU_LOAD_STORE_ARCH
regNumber regTmp = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt));
@@ -20968,7 +20482,7 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
goto DONE;
}
}
- }
+ }
}
// Compute the size of the block to allocate
@@ -20978,12 +20492,13 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP
- // essentially popping off the outgoing arg area,
+ // essentially popping off the outgoing arg area,
// We will restore it right before we return from this method
//
if ((compiler->lvaOutgoingArgSpaceSize > 0) && !stackAdjusted)
{
- assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
+ assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) ==
+ 0); // This must be true for the stack to remain aligned
inst_RV_IV(INS_add, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
stackAdjusted = true;
}
@@ -21001,7 +20516,7 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
inst_JMP(jmpEqual, endLabel);
// Align to STACK_ALIGN
- inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
+ inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
@@ -21021,7 +20536,8 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
}
}
- BasicBlock* loop; loop = genCreateTempLabel();
+ BasicBlock* loop;
+ loop = genCreateTempLabel();
if (compiler->info.compInitMem)
{
@@ -21033,8 +20549,8 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(_TARGET_ARM_)
- regNumber regZero1 = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt));
- regNumber regZero2 = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt) & ~genRegMask(regZero1));
+ regNumber regZero1 = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt));
+ regNumber regZero2 = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regCnt) & ~genRegMask(regZero1));
// Set 'regZero1' and 'regZero2' to zero
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regZero1);
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regZero2);
@@ -21045,13 +20561,13 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
#if defined(_TARGET_X86_)
- inst_IV(INS_push_hide, 0); // --- push 0
+ inst_IV(INS_push_hide, 0); // --- push 0
// Are we done?
inst_RV(INS_dec, regCnt, type);
#elif defined(_TARGET_ARM_)
- inst_IV(INS_push, (unsigned) (genRegMask(regZero1) | genRegMask(regZero2)));
+ inst_IV(INS_push, (unsigned)(genRegMask(regZero1) | genRegMask(regZero2)));
// Are we done?
inst_RV_IV(INS_sub, regCnt, 2, emitActualTypeSize(type), INS_FLAGS_SET);
@@ -21120,7 +20636,7 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
// decrement of the ESP - we do the subtraction in another reg
// instead of adjusting ESP directly.
- regNumber regTemp = regSet.rsPickReg();
+ regNumber regTemp = regSet.rsPickReg();
// Tickle the decremented value, and move back to ESP,
// note that it has to be done BEFORE the update of ESP since
@@ -21140,7 +20656,8 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
inst_RV_IV(INS_sub, regTemp, compiler->eeGetPageSize(), EA_PTRSIZE);
inst_RV_RV(INS_mov, REG_SPBASE, regTemp, TYP_I_IMPL);
- genRecoverReg(size, RBM_ALLINT, RegSet::KEEP_REG); // not purely the 'size' tree anymore; though it is derived from 'size'
+ genRecoverReg(size, RBM_ALLINT,
+ RegSet::KEEP_REG); // not purely the 'size' tree anymore; though it is derived from 'size'
noway_assert(size->gtFlags & GTF_REG_VAL);
regCnt = size->gtRegNum;
inst_RV_RV(INS_cmp, REG_SPBASE, regCnt, TYP_I_IMPL);
@@ -21153,7 +20670,7 @@ regNumber CodeGen::genLclHeap(GenTreePtr size)
regSet.rsMarkRegFree(genRegMask(regCnt));
DONE:
-
+
noway_assert(regCnt != DUMMY_INIT(REG_CORRUPT));
if (endLabel != NULL)
@@ -21162,18 +20679,18 @@ DONE:
#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must readjust the SP
//
- if (stackAdjusted)
+ if (stackAdjusted)
{
assert(compiler->lvaOutgoingArgSpaceSize > 0);
- assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
+ assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) ==
+ 0); // This must be true for the stack to remain aligned
inst_RV_IV(INS_sub, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize, EA_PTRSIZE);
}
#endif
/* Write the lvaShadowSPfirst stack frame slot */
noway_assert(compiler->lvaLocAllocSPvar != BAD_VAR_NUM);
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE,
- compiler->lvaLocAllocSPvar, 0);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaLocAllocSPvar, 0);
#if STACK_PROBES
// Don't think it is worth it the codegen complexity to embed this
@@ -21188,7 +20705,9 @@ DONE:
// Update new ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
}
#endif
@@ -21196,7 +20715,6 @@ DONE:
return regCnt;
}
-
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************
@@ -21205,13 +20723,13 @@ DONE:
* Called for every scope info piece to record by the main genSetScopeInfo()
*/
-void CodeGen::genSetScopeInfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- bool avail,
- Compiler::siVarLoc& varLoc)
+void CodeGen::genSetScopeInfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ bool avail,
+ Compiler::siVarLoc& varLoc)
{
/* We need to do some mapping while reporting back these variables */
@@ -21224,10 +20742,8 @@ void CodeGen::genSetScopeInfo (unsigned which,
// Is this a varargs function?
- if (compiler->info.compIsVarArgs &&
- varNum != compiler->lvaVarargsHandleArg &&
- varNum < compiler->info.compArgsCount &&
- !compiler->lvaTable[varNum].lvIsRegArg)
+ if (compiler->info.compIsVarArgs && varNum != compiler->lvaVarargsHandleArg &&
+ varNum < compiler->info.compArgsCount && !compiler->lvaTable[varNum].lvIsRegArg)
{
noway_assert(varLoc.vlType == Compiler::VLT_STK || varLoc.vlType == Compiler::VLT_STK2);
@@ -21249,12 +20765,12 @@ void CodeGen::genSetScopeInfo (unsigned which,
unsigned varOffset = compiler->lvaTable[varNum].lvStkOffs;
noway_assert(cookieOffset < varOffset);
- unsigned offset = varOffset - cookieOffset;
- unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void *);
+ unsigned offset = varOffset - cookieOffset;
+ unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * sizeof(void*);
noway_assert(offset < stkArgSize);
offset = stkArgSize - offset;
- varLoc.vlType = Compiler::VLT_FIXED_VA;
+ varLoc.vlType = Compiler::VLT_FIXED_VA;
varLoc.vlFixedVarArg.vlfvOffset = offset;
}
@@ -21274,15 +20790,15 @@ void CodeGen::genSetScopeInfo (unsigned which,
// Hang on to this compiler->info.
- TrnslLocalVarInfo &tlvi = genTrnslLocalVarInfo[which];
+ TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
- tlvi.tlviVarNum = ilVarNum;
- tlvi.tlviLVnum = LVnum;
- tlvi.tlviName = name;
- tlvi.tlviStartPC = startOffs;
- tlvi.tlviLength = length;
- tlvi.tlviAvailable = avail;
- tlvi.tlviVarLoc = varLoc;
+ tlvi.tlviVarNum = ilVarNum;
+ tlvi.tlviLVnum = LVnum;
+ tlvi.tlviName = name;
+ tlvi.tlviStartPC = startOffs;
+ tlvi.tlviLength = length;
+ tlvi.tlviAvailable = avail;
+ tlvi.tlviVarLoc = varLoc;
#endif // DEBUG
@@ -21290,7 +20806,7 @@ void CodeGen::genSetScopeInfo (unsigned which,
}
/*****************************************************************************/
-#endif // DEBUGGING_SUPPORT
+#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
/*****************************************************************************
@@ -21302,13 +20818,13 @@ void CodeGen::genSetScopeInfo (unsigned which,
* constant operand, and one that's in a register. Thus, the only thing we
* need to determine is whether the register holding op1 is dead.
*/
-bool CodeGen::genRegTrashable(regNumber reg, GenTreePtr tree)
+bool CodeGen::genRegTrashable(regNumber reg, GenTreePtr tree)
{
- regMaskTP vars;
- regMaskTP mask = genRegMask(reg);
+ regMaskTP vars;
+ regMaskTP mask = genRegMask(reg);
- if (regSet.rsMaskUsed & mask)
- return false;
+ if (regSet.rsMaskUsed & mask)
+ return false;
assert(tree->gtOper == GT_ADD);
GenTreePtr regValTree = tree->gtOp.gtOp1;
@@ -21326,260 +20842,255 @@ bool CodeGen::genRegTrashable(regNumber reg, GenTreePtr tree)
if (regValTree->IsRegVar() && !regValTree->IsRegVarDeath())
return false;
else
- return true;
+ return true;
}
- /*****************************************************************************/
- //
- // This method calculates the USE and DEF values for a statement.
- // It also calls fgSetRngChkTarget for the statement.
- //
- // We refactor out this code from fgPerBlockLocalVarLiveness
- // and add QMARK logics to it.
- //
- // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
- //
- // The usage of this method is very limited.
- // We should only call it for the first node in the statement or
- // for the node after the GTF_RELOP_QMARK node.
- //
- // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
-
-
- /*
- Since a GT_QMARK tree can take two paths (i.e. the thenTree Path or the elseTree path),
- when we calculate its fgCurDefSet and fgCurUseSet, we need to combine the results
- from both trees.
-
- Note that the GT_QMARK trees are threaded as shown below with nodes 1 to 11
- linked by gtNext.
-
- The algorithm we use is:
- (1) We walk these nodes according the the evaluation order (i.e. from node 1 to node 11).
- (2) When we see the GTF_RELOP_QMARK node, we know we are about to split the path.
- We cache copies of current fgCurDefSet and fgCurUseSet.
- (The fact that it is recursively calling itself is for nested QMARK case,
- where we need to remember multiple copies of fgCurDefSet and fgCurUseSet.)
- (3) We walk the thenTree.
- (4) When we see GT_COLON node, we know that we just finished the thenTree.
- We then make a copy of the current fgCurDefSet and fgCurUseSet,
- restore them to the ones before the thenTree, and then continue walking
- the elseTree.
- (5) When we see the GT_QMARK node, we know we just finished the elseTree.
- So we combine the results from the thenTree and elseTree and then return.
-
-
- +--------------------+
- | GT_QMARK 11|
- +----------+---------+
- |
- *
- / \
- / \
- / \
- +---------------------+ +--------------------+
- | GT_<cond> 3 | | GT_COLON 7 |
- | w/ GTF_RELOP_QMARK | | w/ GTF_COLON_COND |
- +----------+----------+ +---------+----------+
- | |
- * *
- / \ / \
- / \ / \
- / \ / \
- 2 1 thenTree 6 elseTree 10
- x | |
- / * *
- +----------------+ / / \ / \
- |prevExpr->gtNext+------/ / \ / \
- +----------------+ / \ / \
- 5 4 9 8
+/*****************************************************************************/
+//
+// This method calculates the USE and DEF values for a statement.
+// It also calls fgSetRngChkTarget for the statement.
+//
+// We refactor out this code from fgPerBlockLocalVarLiveness
+// and add QMARK logics to it.
+//
+// NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
+//
+// The usage of this method is very limited.
+// We should only call it for the first node in the statement or
+// for the node after the GTF_RELOP_QMARK node.
+//
+// NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
+
+/*
+ Since a GT_QMARK tree can take two paths (i.e. the thenTree Path or the elseTree path),
+ when we calculate its fgCurDefSet and fgCurUseSet, we need to combine the results
+ from both trees.
+
+ Note that the GT_QMARK trees are threaded as shown below with nodes 1 to 11
+ linked by gtNext.
+
+ The algorithm we use is:
+ (1) We walk these nodes according the the evaluation order (i.e. from node 1 to node 11).
+ (2) When we see the GTF_RELOP_QMARK node, we know we are about to split the path.
+ We cache copies of current fgCurDefSet and fgCurUseSet.
+ (The fact that it is recursively calling itself is for nested QMARK case,
+ where we need to remember multiple copies of fgCurDefSet and fgCurUseSet.)
+ (3) We walk the thenTree.
+ (4) When we see GT_COLON node, we know that we just finished the thenTree.
+ We then make a copy of the current fgCurDefSet and fgCurUseSet,
+ restore them to the ones before the thenTree, and then continue walking
+ the elseTree.
+ (5) When we see the GT_QMARK node, we know we just finished the elseTree.
+ So we combine the results from the thenTree and elseTree and then return.
+
+
+ +--------------------+
+ | GT_QMARK 11|
+ +----------+---------+
+ |
+ *
+ / \
+ / \
+ / \
+ +---------------------+ +--------------------+
+ | GT_<cond> 3 | | GT_COLON 7 |
+ | w/ GTF_RELOP_QMARK | | w/ GTF_COLON_COND |
+ +----------+----------+ +---------+----------+
+ | |
+ * *
+ / \ / \
+ / \ / \
+ / \ / \
+ 2 1 thenTree 6 elseTree 10
+ x | |
+ / * *
+ +----------------+ / / \ / \
+ |prevExpr->gtNext+------/ / \ / \
+ +----------------+ / \ / \
+ 5 4 9 8
- */
+*/
-GenTreePtr Compiler::fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode, // The node to start walking with.
- GenTreePtr relopNode, // The node before the startNode.
- // (It should either be NULL or
- // a GTF_RELOP_QMARK node.)
- GenTreePtr asgdLclVar
- )
+GenTreePtr Compiler::fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode, // The node to start walking with.
+ GenTreePtr relopNode, // The node before the startNode.
+ // (It should either be NULL or
+ // a GTF_RELOP_QMARK node.)
+ GenTreePtr asgdLclVar)
{
GenTreePtr tree;
- VARSET_TP VARSET_INIT(this, defSet_BeforeSplit, fgCurDefSet); // Store the current fgCurDefSet and fgCurUseSet so
- VARSET_TP VARSET_INIT(this, useSet_BeforeSplit, fgCurUseSet); // we can restore then before entering the elseTree.
+ VARSET_TP VARSET_INIT(this, defSet_BeforeSplit, fgCurDefSet); // Store the current fgCurDefSet and fgCurUseSet so
+ VARSET_TP VARSET_INIT(this, useSet_BeforeSplit, fgCurUseSet); // we can restore then before entering the elseTree.
bool heapUse_BeforeSplit = fgCurHeapUse;
bool heapDef_BeforeSplit = fgCurHeapDef;
bool heapHavoc_BeforeSplit = fgCurHeapHavoc;
- VARSET_TP VARSET_INIT_NOCOPY(defSet_AfterThenTree, VarSetOps::MakeEmpty(this)); // These two variables will store the USE and DEF sets after
- VARSET_TP VARSET_INIT_NOCOPY(useSet_AfterThenTree, VarSetOps::MakeEmpty(this)); // evaluating the thenTree.
+ VARSET_TP VARSET_INIT_NOCOPY(defSet_AfterThenTree, VarSetOps::MakeEmpty(this)); // These two variables will store
+ // the USE and DEF sets after
+ VARSET_TP VARSET_INIT_NOCOPY(useSet_AfterThenTree, VarSetOps::MakeEmpty(this)); // evaluating the thenTree.
bool heapUse_AfterThenTree = fgCurHeapUse;
bool heapDef_AfterThenTree = fgCurHeapDef;
bool heapHavoc_AfterThenTree = fgCurHeapHavoc;
// relopNode is either NULL or a GTF_RELOP_QMARK node.
- assert(!relopNode ||
- (relopNode->OperKind() & GTK_RELOP) && (relopNode->gtFlags & GTF_RELOP_QMARK)
- );
+ assert(!relopNode || (relopNode->OperKind() & GTK_RELOP) && (relopNode->gtFlags & GTF_RELOP_QMARK));
// If relopNode is NULL, then the startNode must be the 1st node of the statement.
// If relopNode is non-NULL, then the startNode must be the node right after the GTF_RELOP_QMARK node.
- assert( (!relopNode && startNode == compCurStmt->gtStmt.gtStmtList) ||
- (relopNode && startNode == relopNode->gtNext)
- );
+ assert((!relopNode && startNode == compCurStmt->gtStmt.gtStmtList) ||
+ (relopNode && startNode == relopNode->gtNext));
for (tree = startNode; tree; tree = tree->gtNext)
{
switch (tree->gtOper)
{
- case GT_QMARK:
-
- // This must be a GT_QMARK node whose GTF_RELOP_QMARK node is recursively calling us.
- noway_assert(relopNode && tree->gtOp.gtOp1 == relopNode);
+ case GT_QMARK:
- // By the time we see a GT_QMARK, we must have finished processing the elseTree.
- // So it's the time to combine the results
- // from the the thenTree and the elseTree, and then return.
+ // This must be a GT_QMARK node whose GTF_RELOP_QMARK node is recursively calling us.
+ noway_assert(relopNode && tree->gtOp.gtOp1 == relopNode);
- VarSetOps::IntersectionD(this, fgCurDefSet, defSet_AfterThenTree);
- VarSetOps::UnionD(this, fgCurUseSet, useSet_AfterThenTree);
+ // By the time we see a GT_QMARK, we must have finished processing the elseTree.
+ // So it's the time to combine the results
+ // from the the thenTree and the elseTree, and then return.
- fgCurHeapDef = fgCurHeapDef && heapDef_AfterThenTree;
- fgCurHeapHavoc = fgCurHeapHavoc && heapHavoc_AfterThenTree;
- fgCurHeapUse = fgCurHeapUse || heapUse_AfterThenTree;
+ VarSetOps::IntersectionD(this, fgCurDefSet, defSet_AfterThenTree);
+ VarSetOps::UnionD(this, fgCurUseSet, useSet_AfterThenTree);
- // Return the GT_QMARK node itself so the caller can continue from there.
- // NOTE: the caller will get to the next node by doing the "tree = tree->gtNext"
- // in the "for" statement.
- goto _return;
+ fgCurHeapDef = fgCurHeapDef && heapDef_AfterThenTree;
+ fgCurHeapHavoc = fgCurHeapHavoc && heapHavoc_AfterThenTree;
+ fgCurHeapUse = fgCurHeapUse || heapUse_AfterThenTree;
- case GT_COLON:
- // By the time we see GT_COLON, we must have just walked the thenTree.
- // So we need to do two things here.
- // (1) Save the current fgCurDefSet and fgCurUseSet so that later we can combine them
- // with the result from the elseTree.
- // (2) Restore fgCurDefSet and fgCurUseSet to the points before the thenTree is walked.
- // and then continue walking the elseTree.
- VarSetOps::Assign(this, defSet_AfterThenTree, fgCurDefSet);
- VarSetOps::Assign(this, useSet_AfterThenTree, fgCurUseSet);
+ // Return the GT_QMARK node itself so the caller can continue from there.
+ // NOTE: the caller will get to the next node by doing the "tree = tree->gtNext"
+ // in the "for" statement.
+ goto _return;
- heapDef_AfterThenTree = fgCurHeapDef;
- heapHavoc_AfterThenTree = fgCurHeapHavoc;
- heapUse_AfterThenTree = fgCurHeapUse;
+ case GT_COLON:
+ // By the time we see GT_COLON, we must have just walked the thenTree.
+ // So we need to do two things here.
+ // (1) Save the current fgCurDefSet and fgCurUseSet so that later we can combine them
+ // with the result from the elseTree.
+ // (2) Restore fgCurDefSet and fgCurUseSet to the points before the thenTree is walked.
+ // and then continue walking the elseTree.
+ VarSetOps::Assign(this, defSet_AfterThenTree, fgCurDefSet);
+ VarSetOps::Assign(this, useSet_AfterThenTree, fgCurUseSet);
- VarSetOps::Assign(this, fgCurDefSet, defSet_BeforeSplit);
- VarSetOps::Assign(this, fgCurUseSet, useSet_BeforeSplit);
+ heapDef_AfterThenTree = fgCurHeapDef;
+ heapHavoc_AfterThenTree = fgCurHeapHavoc;
+ heapUse_AfterThenTree = fgCurHeapUse;
- fgCurHeapDef = heapDef_BeforeSplit;
- fgCurHeapHavoc = heapHavoc_BeforeSplit;
- fgCurHeapUse = heapUse_BeforeSplit;
+ VarSetOps::Assign(this, fgCurDefSet, defSet_BeforeSplit);
+ VarSetOps::Assign(this, fgCurUseSet, useSet_BeforeSplit);
- break;
+ fgCurHeapDef = heapDef_BeforeSplit;
+ fgCurHeapHavoc = heapHavoc_BeforeSplit;
+ fgCurHeapUse = heapUse_BeforeSplit;
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_LCL_VAR_ADDR:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_VAR:
- case GT_STORE_LCL_FLD:
- fgMarkUseDef(tree->AsLclVarCommon(), asgdLclVar);
- break;
+ break;
- case GT_CLS_VAR:
- // For Volatile indirection, first mutate the global heap
- // see comments in ValueNum.cpp (under case GT_CLS_VAR)
- // This models Volatile reads as def-then-use of the heap.
- // and allows for a CSE of a subsequent non-volatile read
- if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
- {
- // For any Volatile indirection, we must handle it as a
- // definition of the global heap
- fgCurHeapDef = true;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_FLD:
+ fgMarkUseDef(tree->AsLclVarCommon(), asgdLclVar);
+ break;
- }
- // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a heap def, when we get to assignment.
- // Otherwise, we treat it as a use here.
- if (!fgCurHeapDef && (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
- {
- fgCurHeapUse = true;
- }
- break;
+ case GT_CLS_VAR:
+ // For Volatile indirection, first mutate the global heap
+ // see comments in ValueNum.cpp (under case GT_CLS_VAR)
+ // This models Volatile reads as def-then-use of the heap.
+ // and allows for a CSE of a subsequent non-volatile read
+ if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
+ {
+ // For any Volatile indirection, we must handle it as a
+ // definition of the global heap
+ fgCurHeapDef = true;
+ }
+ // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a heap def, when we get to
+ // assignment.
+ // Otherwise, we treat it as a use here.
+ if (!fgCurHeapDef && (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
+ {
+ fgCurHeapUse = true;
+ }
+ break;
- case GT_IND:
- // For Volatile indirection, first mutate the global heap
- // see comments in ValueNum.cpp (under case GT_CLS_VAR)
- // This models Volatile reads as def-then-use of the heap.
- // and allows for a CSE of a subsequent non-volatile read
- if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
- {
- // For any Volatile indirection, we must handle it as a
- // definition of the global heap
- fgCurHeapDef = true;
- }
+ case GT_IND:
+ // For Volatile indirection, first mutate the global heap
+ // see comments in ValueNum.cpp (under case GT_CLS_VAR)
+ // This models Volatile reads as def-then-use of the heap.
+ // and allows for a CSE of a subsequent non-volatile read
+ if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
+ {
+ // For any Volatile indirection, we must handle it as a
+ // definition of the global heap
+ fgCurHeapDef = true;
+ }
- // If the GT_IND is the lhs of an assignment, we'll handle it
- // as a heap def, when we get to assignment.
- // Otherwise, we treat it as a use here.
- if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
- {
- GenTreeLclVarCommon* dummyLclVarTree = NULL;
- bool dummyIsEntire = false;
- GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/true);
- if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/0, &dummyLclVarTree, &dummyIsEntire))
+ // If the GT_IND is the lhs of an assignment, we'll handle it
+ // as a heap def, when we get to assignment.
+ // Otherwise, we treat it as a use here.
+ if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
{
- if (!fgCurHeapDef)
+ GenTreeLclVarCommon* dummyLclVarTree = NULL;
+ bool dummyIsEntire = false;
+ GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
+ if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/ 0, &dummyLclVarTree, &dummyIsEntire))
{
- fgCurHeapUse = true;
+ if (!fgCurHeapDef)
+ {
+ fgCurHeapUse = true;
+ }
+ }
+ else
+ {
+ // Defines a local addr
+ assert(dummyLclVarTree != nullptr);
+ fgMarkUseDef(dummyLclVarTree->AsLclVarCommon(), asgdLclVar);
}
}
- else
- {
- // Defines a local addr
- assert(dummyLclVarTree != nullptr);
- fgMarkUseDef(dummyLclVarTree->AsLclVarCommon(), asgdLclVar);
- }
- }
- break;
+ break;
// These should have been morphed away to become GT_INDs:
- case GT_FIELD:
- case GT_INDEX:
- unreached();
- break;
+ case GT_FIELD:
+ case GT_INDEX:
+ unreached();
+ break;
// We'll assume these are use-then-defs of the heap.
- case GT_LOCKADD:
- case GT_XADD:
- case GT_XCHG:
- case GT_CMPXCHG:
- if (!fgCurHeapDef)
- {
- fgCurHeapUse = true;
- }
- fgCurHeapDef = true;
- fgCurHeapHavoc = true;
- break;
+ case GT_LOCKADD:
+ case GT_XADD:
+ case GT_XCHG:
+ case GT_CMPXCHG:
+ if (!fgCurHeapDef)
+ {
+ fgCurHeapUse = true;
+ }
+ fgCurHeapDef = true;
+ fgCurHeapHavoc = true;
+ break;
- case GT_MEMORYBARRIER:
- // Simliar to any Volatile indirection, we must handle this as a definition of the global heap
- fgCurHeapDef = true;
- break;
+ case GT_MEMORYBARRIER:
+ // Simliar to any Volatile indirection, we must handle this as a definition of the global heap
+ fgCurHeapDef = true;
+ break;
// For now, all calls read/write the heap, the latter in its entirety. Might tighten this case later.
- case GT_CALL:
+ case GT_CALL:
{
- GenTreeCall* call = tree->AsCall();
- bool modHeap = true;
+ GenTreeCall* call = tree->AsCall();
+ bool modHeap = true;
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
- if ( !s_helperCallProperties.MutatesHeap(helpFunc)
- && !s_helperCallProperties.MayRunCctor(helpFunc))
+ if (!s_helperCallProperties.MutatesHeap(helpFunc) && !s_helperCallProperties.MayRunCctor(helpFunc))
{
modHeap = false;
}
@@ -21590,66 +21101,67 @@ GenTreePtr Compiler::fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode,
{
fgCurHeapUse = true;
}
- fgCurHeapDef = true;
+ fgCurHeapDef = true;
fgCurHeapHavoc = true;
}
}
- // If this is a p/invoke unmanaged call or if this is a tail-call
- // and we have an unmanaged p/invoke call in the method,
- // then we're going to run the p/invoke epilog.
- // So we mark the FrameRoot as used by this instruction.
- // This ensures that the block->bbVarUse will contain
- // the FrameRoot local var if is it a tracked variable.
+ // If this is a p/invoke unmanaged call or if this is a tail-call
+ // and we have an unmanaged p/invoke call in the method,
+ // then we're going to run the p/invoke epilog.
+ // So we mark the FrameRoot as used by this instruction.
+ // This ensures that the block->bbVarUse will contain
+ // the FrameRoot local var if is it a tracked variable.
- if (tree->gtCall.IsUnmanaged() || (tree->gtCall.IsTailCall() && info.compCallUnmanaged))
- {
- /* Get the TCB local and mark it as used */
+ if (tree->gtCall.IsUnmanaged() || (tree->gtCall.IsTailCall() && info.compCallUnmanaged))
+ {
+ /* Get the TCB local and mark it as used */
- noway_assert(info.compLvFrameListRoot < lvaCount);
+ noway_assert(info.compLvFrameListRoot < lvaCount);
- LclVarDsc* varDsc = &lvaTable[info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &lvaTable[info.compLvFrameListRoot];
- if (varDsc->lvTracked)
- {
- if (!VarSetOps::IsMember(this, fgCurDefSet, varDsc->lvVarIndex))
+ if (varDsc->lvTracked)
{
- VarSetOps::AddElemD(this, fgCurUseSet, varDsc->lvVarIndex);
+ if (!VarSetOps::IsMember(this, fgCurDefSet, varDsc->lvVarIndex))
+ {
+ VarSetOps::AddElemD(this, fgCurUseSet, varDsc->lvVarIndex);
+ }
}
}
- }
- break;
+ break;
- default:
+ default:
- // Determine whether it defines a heap location.
- if (tree->OperIsAssignment() || tree->OperIsBlkOp())
- {
- GenTreeLclVarCommon* dummyLclVarTree = NULL;
- if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ // Determine whether it defines a heap location.
+ if (tree->OperIsAssignment() || tree->OperIsBlkOp())
{
- // If it doesn't define a local, then it might update the heap.
- fgCurHeapDef = true;
+ GenTreeLclVarCommon* dummyLclVarTree = NULL;
+ if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ {
+ // If it doesn't define a local, then it might update the heap.
+ fgCurHeapDef = true;
+ }
}
- }
-
- // Are we seeing a GT_<cond> for a GT_QMARK node?
- if ( (tree->OperKind() & GTK_RELOP) &&
- (tree->gtFlags & GTF_RELOP_QMARK)
- ) {
- // We are about to enter the parallel paths (i.e. the thenTree and the elseTree).
- // Recursively call fgLegacyPerStatementLocalVarLiveness.
- // At the very beginning of fgLegacyPerStatementLocalVarLiveness, we will cache the values of the current
- // fgCurDefSet and fgCurUseSet into local variables defSet_BeforeSplit and useSet_BeforeSplit.
- // The cached values will be used to restore fgCurDefSet and fgCurUseSet once we see the GT_COLON node.
- tree = fgLegacyPerStatementLocalVarLiveness(tree->gtNext, tree, asgdLclVar);
- // We must have been returned here after seeing a GT_QMARK node.
- noway_assert(tree->gtOper == GT_QMARK);
- }
+ // Are we seeing a GT_<cond> for a GT_QMARK node?
+ if ((tree->OperKind() & GTK_RELOP) && (tree->gtFlags & GTF_RELOP_QMARK))
+ {
+ // We are about to enter the parallel paths (i.e. the thenTree and the elseTree).
+ // Recursively call fgLegacyPerStatementLocalVarLiveness.
+ // At the very beginning of fgLegacyPerStatementLocalVarLiveness, we will cache the values of the
+ // current
+ // fgCurDefSet and fgCurUseSet into local variables defSet_BeforeSplit and useSet_BeforeSplit.
+ // The cached values will be used to restore fgCurDefSet and fgCurUseSet once we see the GT_COLON
+ // node.
+ tree = fgLegacyPerStatementLocalVarLiveness(tree->gtNext, tree, asgdLclVar);
+
+ // We must have been returned here after seeing a GT_QMARK node.
+ noway_assert(tree->gtOper == GT_QMARK);
+ }
- break;
+ break;
}
}
@@ -21682,25 +21194,25 @@ _return:
* +20h Saved value of EBP method prolog
*/
-regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
+regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
{
assert(compiler->compGeneratingProlog);
noway_assert(!compiler->opts.ShouldUsePInvokeHelpers());
noway_assert(compiler->info.compCallUnmanaged);
- CORINFO_EE_INFO * pInfo = compiler->eeGetEEInfo();
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
noway_assert(compiler->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
/* let's find out if compLvFrameListRoot is enregistered */
- LclVarDsc * varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
noway_assert(!varDsc->lvIsParam);
noway_assert(varDsc->lvType == TYP_I_IMPL);
DWORD threadTlsIndex, *pThreadTlsIndex;
- threadTlsIndex = compiler->info.compCompHnd->getThreadTLSIndex((void**) &pThreadTlsIndex);
+ threadTlsIndex = compiler->info.compCompHnd->getThreadTLSIndex((void**)&pThreadTlsIndex);
#if defined(_TARGET_X86_)
if (threadTlsIndex == (DWORD)-1 || pInfo->osType != CORINFO_WINNT)
#else
@@ -21711,11 +21223,8 @@ regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
// InlinedCallFrame vptr through indirections, we'll call only one helper.
// The helper takes frame address in REG_PINVOKE_FRAME, returns TCB in REG_PINVOKE_TCB
// and uses REG_PINVOKE_SCRATCH as scratch register.
- getEmitter()->emitIns_R_S (INS_lea,
- EA_PTRSIZE,
- REG_PINVOKE_FRAME,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfFrameVptr);
+ getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_PINVOKE_FRAME, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfFrameVptr);
regTracker.rsTrackRegTrash(REG_PINVOKE_FRAME);
// We're about to trask REG_PINVOKE_TCB, it better not be in use!
@@ -21723,11 +21232,11 @@ regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
// Don't use the argument registers (including the special argument in
// REG_PINVOKE_FRAME) for computing the target address.
- regSet.rsLockReg(RBM_ARG_REGS|RBM_PINVOKE_FRAME);
+ regSet.rsLockReg(RBM_ARG_REGS | RBM_PINVOKE_FRAME);
genEmitHelperCall(CORINFO_HELP_INIT_PINVOKE_FRAME, 0, EA_UNKNOWN);
- regSet.rsUnlockReg(RBM_ARG_REGS|RBM_PINVOKE_FRAME);
+ regSet.rsUnlockReg(RBM_ARG_REGS | RBM_PINVOKE_FRAME);
if (varDsc->lvRegister)
{
@@ -21747,18 +21256,15 @@ regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
else
{
// move TCB to its stack location
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_PINVOKE_TCB,
- compiler->info.compLvFrameListRoot,
- 0);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB,
+ compiler->info.compLvFrameListRoot, 0);
}
// We are done, the rest of this function deals with the inlined case.
return initRegs;
}
- regNumber regTCB;
+ regNumber regTCB;
if (varDsc->lvRegister)
{
@@ -21784,33 +21290,22 @@ regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
if (threadTlsIndex < 64)
{
// mov reg, FS:[0xE10+threadTlsIndex*4]
- getEmitter()->emitIns_R_C (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- regTCB,
- FLD_GLOBAL_FS,
- WIN_NT_TLS_OFFSET + threadTlsIndex * sizeof(int));
+ getEmitter()->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regTCB, FLD_GLOBAL_FS,
+ WIN_NT_TLS_OFFSET + threadTlsIndex * sizeof(int));
regTracker.rsTrackRegTrash(regTCB);
}
else
{
noway_assert(pInfo->osMajor >= 5);
- DWORD basePtr = WIN_NT5_TLS_HIGHOFFSET;
+ DWORD basePtr = WIN_NT5_TLS_HIGHOFFSET;
threadTlsIndex -= 64;
// mov reg, FS:[0x2c] or mov reg, fs:[0xf94]
// mov reg, [reg+threadTlsIndex*4]
- getEmitter()->emitIns_R_C (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- regTCB,
- FLD_GLOBAL_FS,
- basePtr);
- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- regTCB,
- regTCB,
- threadTlsIndex*sizeof(int));
+ getEmitter()->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regTCB, FLD_GLOBAL_FS, basePtr);
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, regTCB, regTCB, threadTlsIndex * sizeof(int));
regTracker.rsTrackRegTrash(regTCB);
}
#endif
@@ -21819,92 +21314,67 @@ regMaskTP CodeGen::genPInvokeMethodProlog(regMaskTP initRegs)
if (!varDsc->lvRegister)
{
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- regTCB,
- compiler->info.compLvFrameListRoot,
- 0);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, regTCB, compiler->info.compLvFrameListRoot, 0);
}
/* set frame's vptr */
- const void * inlinedCallFrameVptr, **pInlinedCallFrameVptr;
- inlinedCallFrameVptr = compiler->info.compCompHnd->getInlinedCallFrameVptr((void**) &pInlinedCallFrameVptr);
+ const void *inlinedCallFrameVptr, **pInlinedCallFrameVptr;
+ inlinedCallFrameVptr = compiler->info.compCompHnd->getInlinedCallFrameVptr((void**)&pInlinedCallFrameVptr);
noway_assert(inlinedCallFrameVptr != NULL); // if we have the TLS index, vptr must also be known
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_HANDLE_CNS_RELOC, (ssize_t) inlinedCallFrameVptr,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfFrameVptr,
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_HANDLE_CNS_RELOC, (ssize_t)inlinedCallFrameVptr,
+ compiler->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameVptr,
REG_PINVOKE_SCRATCH);
// Set the GSCookie
- GSCookie gsCookie, * pGSCookie;
+ GSCookie gsCookie, *pGSCookie;
compiler->info.compCompHnd->getGSCookie(&gsCookie, &pGSCookie);
noway_assert(gsCookie != 0); // if we have the TLS index, GS cookie must also be known
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, (ssize_t) gsCookie,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfGSCookie,
- REG_PINVOKE_SCRATCH);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, (ssize_t)gsCookie, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfGSCookie, REG_PINVOKE_SCRATCH);
/* Get current frame root (mov reg2, [reg+offsetOfThreadFrame]) and
set next field in frame */
- getEmitter()->emitIns_R_AR (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_PINVOKE_SCRATCH,
- regTCB,
- pInfo->offsetOfThreadFrame);
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_SCRATCH, regTCB,
+ pInfo->offsetOfThreadFrame);
regTracker.rsTrackRegTrash(REG_PINVOKE_SCRATCH);
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_PINVOKE_SCRATCH,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_SCRATCH,
+ compiler->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
- noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
+ noway_assert(isFramePointerUsed()); // Setup of Pinvoke frame currently requires an EBP style frame
/* set EBP value in frame */
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- genFramePointerReg(),
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfCalleeSavedFP);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, genFramePointerReg(),
+ compiler->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfCalleeSavedFP);
/* reset track field in frame */
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfReturnAddress,
- REG_PINVOKE_SCRATCH);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfReturnAddress, REG_PINVOKE_SCRATCH);
/* get address of our frame */
- getEmitter()->emitIns_R_S (INS_lea,
- EA_PTRSIZE,
- REG_PINVOKE_SCRATCH,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfFrameVptr);
+ getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, REG_PINVOKE_SCRATCH, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfFrameVptr);
regTracker.rsTrackRegTrash(REG_PINVOKE_SCRATCH);
/* now "push" our N/direct frame */
- getEmitter()->emitIns_AR_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_PINVOKE_SCRATCH,
- regTCB,
- pInfo->offsetOfThreadFrame);
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_SCRATCH, regTCB,
+ pInfo->offsetOfThreadFrame);
return initRegs;
}
-
/*****************************************************************************
* Unchain the InlinedCallFrame.
* Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node
* or tail call.
*/
-void CodeGen::genPInvokeMethodEpilog()
+void CodeGen::genPInvokeMethodEpilog()
{
noway_assert(compiler->info.compCallUnmanaged);
noway_assert(!compiler->opts.ShouldUsePInvokeHelpers());
@@ -21912,22 +21382,21 @@ void CodeGen::genPInvokeMethodEpilog()
(compiler->compTailCallUsed && (compiler->compCurBB->bbJumpKind == BBJ_THROW)) ||
(compiler->compJmpOpUsed && (compiler->compCurBB->bbFlags & BBF_HAS_JMP)));
- CORINFO_EE_INFO * pInfo = compiler->eeGetEEInfo();
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
noway_assert(compiler->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
getEmitter()->emitDisableRandomNops();
- //debug check to make sure that we're not using ESI and/or EDI across this call, except for
- //compLvFrameListRoot.
+ // debug check to make sure that we're not using ESI and/or EDI across this call, except for
+ // compLvFrameListRoot.
unsigned regTrashCheck = 0;
/* XXX Tue 5/29/2007
* We explicitly add interference for these in CodeGen::rgPredictRegUse. If you change the code
* sequence or registers used, make sure to update the interference for compiler->genReturnLocal.
*/
- LclVarDsc * varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
- regNumber reg;
- regNumber reg2 = REG_PINVOKE_FRAME;
-
+ LclVarDsc* varDsc = &compiler->lvaTable[compiler->info.compLvFrameListRoot];
+ regNumber reg;
+ regNumber reg2 = REG_PINVOKE_FRAME;
//
// Two cases for epilog invocation:
@@ -21948,7 +21417,8 @@ void CodeGen::genPInvokeMethodEpilog()
{
#if FEATURE_FIXED_OUT_ARGS
// Save the register in the reserved local var slot.
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB, compiler->lvaPInvokeFrameRegSaveVar, 0);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB,
+ compiler->lvaPInvokeFrameRegSaveVar, 0);
#else
inst_RV(INS_push, REG_PINVOKE_TCB, TYP_I_IMPL);
#endif
@@ -21957,7 +21427,8 @@ void CodeGen::genPInvokeMethodEpilog()
{
#if FEATURE_FIXED_OUT_ARGS
// Save the register in the reserved local var slot.
- getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_FRAME, compiler->lvaPInvokeFrameRegSaveVar, REGSIZE_BYTES);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_FRAME,
+ compiler->lvaPInvokeFrameRegSaveVar, REGSIZE_BYTES);
#else
inst_RV(INS_push, REG_PINVOKE_FRAME, TYP_I_IMPL);
#endif
@@ -21976,11 +21447,8 @@ void CodeGen::genPInvokeMethodEpilog()
{
/* mov esi, [tcb address] */
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_PINVOKE_TCB,
- compiler->info.compLvFrameListRoot,
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB, compiler->info.compLvFrameListRoot,
+ 0);
regTracker.rsTrackRegTrash(REG_PINVOKE_TCB);
reg = REG_PINVOKE_TCB;
@@ -21989,31 +21457,23 @@ void CodeGen::genPInvokeMethodEpilog()
/* mov edi, [ebp-frame.next] */
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- reg2,
- compiler->lvaInlinedPInvokeFrameVar,
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg2, compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
regTracker.rsTrackRegTrash(reg2);
/* mov [esi+offsetOfThreadFrame], edi */
- getEmitter()->emitIns_AR_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- reg2,
- reg,
- pInfo->offsetOfThreadFrame);
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg2, reg, pInfo->offsetOfThreadFrame);
noway_assert(!(regSet.rsMaskUsed & regTrashCheck));
- if (compiler->genReturnLocal != BAD_VAR_NUM &&
- compiler->lvaTable[compiler->genReturnLocal].lvTracked &&
+ if (compiler->genReturnLocal != BAD_VAR_NUM && compiler->lvaTable[compiler->genReturnLocal].lvTracked &&
compiler->lvaTable[compiler->genReturnLocal].lvRegister)
{
- //really make sure we're not clobbering compiler->genReturnLocal.
- noway_assert(!(genRegMask(compiler->lvaTable[compiler->genReturnLocal].lvRegNum)
- & ( (varDsc->lvRegister ? genRegMask(varDsc->lvRegNum) : 0)
- | RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME)));
+ // really make sure we're not clobbering compiler->genReturnLocal.
+ noway_assert(
+ !(genRegMask(compiler->lvaTable[compiler->genReturnLocal].lvRegNum) &
+ ((varDsc->lvRegister ? genRegMask(varDsc->lvRegNum) : 0) | RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME)));
}
(void)regTrashCheck;
@@ -22025,7 +21485,8 @@ void CodeGen::genPInvokeMethodEpilog()
{
#if FEATURE_FIXED_OUT_ARGS
// Restore the register from the reserved local var slot.
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_FRAME, compiler->lvaPInvokeFrameRegSaveVar, REGSIZE_BYTES);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_FRAME,
+ compiler->lvaPInvokeFrameRegSaveVar, REGSIZE_BYTES);
#else
inst_RV(INS_pop, REG_PINVOKE_FRAME, TYP_I_IMPL);
#endif
@@ -22035,7 +21496,8 @@ void CodeGen::genPInvokeMethodEpilog()
{
#if FEATURE_FIXED_OUT_ARGS
// Restore the register from the reserved local var slot.
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB, compiler->lvaPInvokeFrameRegSaveVar, 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_PINVOKE_TCB,
+ compiler->lvaPInvokeFrameRegSaveVar, 0);
#else
inst_RV(INS_pop, REG_PINVOKE_TCB, TYP_I_IMPL);
#endif
@@ -22045,7 +21507,6 @@ void CodeGen::genPInvokeMethodEpilog()
getEmitter()->emitEnableRandomNops();
}
-
/*****************************************************************************
This function emits the call-site prolog for direct calls to unmanaged code.
It does all the necessary setup of the InlinedCallFrame.
@@ -22056,10 +21517,10 @@ void CodeGen::genPInvokeMethodEpilog()
(it could be either enregistered or loaded into one of the scratch registers)
*/
-regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameListRoot,
- int argSize,
- CORINFO_METHOD_HANDLE methodToken,
- BasicBlock* returnLabel)
+regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameListRoot,
+ int argSize,
+ CORINFO_METHOD_HANDLE methodToken,
+ BasicBlock* returnLabel)
{
// Some stack locals might be 'cached' in registers, we need to trash them
// from the regTracker *and* also ensure the gc tracker does not consider
@@ -22076,9 +21537,8 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
deadRegs &= regSet.rsMaskVars;
if (deadRegs)
{
- for (LclVarDsc * varDsc = compiler->lvaTable;
- ((varDsc < (compiler->lvaTable + compiler->lvaCount)) && deadRegs);
- varDsc++ )
+ for (LclVarDsc* varDsc = compiler->lvaTable;
+ ((varDsc < (compiler->lvaTable + compiler->lvaCount)) && deadRegs); varDsc++)
{
if (!varDsc->lvTracked || !varDsc->lvRegister)
continue;
@@ -22105,7 +21565,7 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
/* Since we are using the InlinedCallFrame, we should have spilled all
GC pointers to it - even from callee-saved registers */
- noway_assert(((gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & ~RBM_ARG_REGS) == 0);
+ noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~RBM_ARG_REGS) == 0);
/* must specify only one of these parameters */
noway_assert((argSize == 0) || (methodToken == NULL));
@@ -22120,26 +21580,23 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
*/
- CORINFO_EE_INFO * pInfo = compiler->eeGetEEInfo();
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
noway_assert(compiler->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
-
+
/* mov dword ptr [frame.callSiteTarget], value */
if (methodToken == NULL)
{
/* mov dword ptr [frame.callSiteTarget], argSize */
- instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, argSize,
- compiler->lvaInlinedPInvokeFrameVar,
+ instGen_Store_Imm_Into_Lcl(TYP_INT, EA_4BYTE, argSize, compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfCallTarget);
}
else
{
- void * embedMethHnd, * pEmbedMethHnd;
+ void *embedMethHnd, *pEmbedMethHnd;
- embedMethHnd = (void*)compiler->info.compCompHnd->embedMethodHandle(
- methodToken,
- &pEmbedMethHnd);
+ embedMethHnd = (void*)compiler->info.compCompHnd->embedMethodHandle(methodToken, &pEmbedMethHnd);
noway_assert((!embedMethHnd) != (!pEmbedMethHnd));
@@ -22147,9 +21604,9 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
{
/* mov dword ptr [frame.callSiteTarget], "MethodDesc" */
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_HANDLE_CNS_RELOC, (ssize_t) embedMethHnd,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfCallTarget);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_HANDLE_CNS_RELOC, (ssize_t)embedMethHnd,
+ compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfCallTarget);
}
else
{
@@ -22159,20 +21616,14 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
regNumber reg = regSet.rsPickFreeReg();
#if CPU_LOAD_STORE_ARCH
- instGen_Set_Reg_To_Imm (EA_HANDLE_CNS_RELOC,
- reg,
- (ssize_t) pEmbedMethHnd);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, (ssize_t)pEmbedMethHnd);
getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg, reg, 0);
-#else // !CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_AI(ins_Load(TYP_I_IMPL), EA_PTR_DSP_RELOC,
- reg, (ssize_t) pEmbedMethHnd);
+#else // !CPU_LOAD_STORE_ARCH
+ getEmitter()->emitIns_R_AI(ins_Load(TYP_I_IMPL), EA_PTR_DSP_RELOC, reg, (ssize_t)pEmbedMethHnd);
#endif // !CPU_LOAD_STORE_ARCH
regTracker.rsTrackRegTrash(reg);
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- reg,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfCallTarget);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfCallTarget);
}
}
@@ -22188,44 +21639,29 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
/* mov reg, dword ptr [tcb address] */
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- tcbReg,
- (unsigned)(frameListRoot - compiler->lvaTable),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tcbReg,
+ (unsigned)(frameListRoot - compiler->lvaTable), 0);
regTracker.rsTrackRegTrash(tcbReg);
}
#ifdef _TARGET_X86_
/* mov dword ptr [frame.callSiteTracker], esp */
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- REG_SPBASE,
- compiler->lvaInlinedPInvokeFrameVar,
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfCallSiteSP);
#endif // _TARGET_X86_
#if CPU_LOAD_STORE_ARCH
regNumber tmpReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(tcbReg));
- getEmitter()->emitIns_J_R (INS_adr,
- EA_PTRSIZE,
- returnLabel,
- tmpReg);
+ getEmitter()->emitIns_J_R(INS_adr, EA_PTRSIZE, returnLabel, tmpReg);
regTracker.rsTrackRegTrash(tmpReg);
- getEmitter()->emitIns_S_R (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- tmpReg,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
-#else // !CPU_LOAD_STORE_ARCH
+ getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, tmpReg, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
+#else // !CPU_LOAD_STORE_ARCH
/* mov dword ptr [frame.callSiteReturnAddress], label */
- getEmitter()->emitIns_J_S (ins_Store(TYP_I_IMPL),
- EA_PTRSIZE,
- returnLabel,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
+ getEmitter()->emitIns_J_S(ins_Store(TYP_I_IMPL), EA_PTRSIZE, returnLabel, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
#endif // !CPU_LOAD_STORE_ARCH
#if CPU_LOAD_STORE_ARCH
@@ -22233,19 +21669,11 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
noway_assert(tmpReg != tcbReg);
- getEmitter()->emitIns_AR_R(ins_Store(TYP_BYTE),
- EA_1BYTE,
- tmpReg,
- tcbReg,
- pInfo->offsetOfGCState);
-#else // !CPU_LOAD_STORE_ARCH
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_BYTE), EA_1BYTE, tmpReg, tcbReg, pInfo->offsetOfGCState);
+#else // !CPU_LOAD_STORE_ARCH
/* mov byte ptr [tcbReg+offsetOfGcState], 0 */
- getEmitter()->emitIns_I_AR (ins_Store(TYP_BYTE),
- EA_1BYTE,
- 0,
- tcbReg,
- pInfo->offsetOfGCState);
+ getEmitter()->emitIns_I_AR(ins_Store(TYP_BYTE), EA_1BYTE, 0, tcbReg, pInfo->offsetOfGCState);
#endif // !CPU_LOAD_STORE_ARCH
return tcbReg;
@@ -22275,18 +21703,17 @@ regNumber CodeGen::genPInvokeCallProlog(LclVarDsc* frameList
@f:
*/
-void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
- regMaskTP retVal)
+void CodeGen::genPInvokeCallEpilog(LclVarDsc* frameListRoot, regMaskTP retVal)
{
- BasicBlock * clab_nostop;
- CORINFO_EE_INFO * pInfo = compiler->eeGetEEInfo();
- regNumber reg2;
- regNumber reg3;
+ BasicBlock* clab_nostop;
+ CORINFO_EE_INFO* pInfo = compiler->eeGetEEInfo();
+ regNumber reg2;
+ regNumber reg3;
#ifdef _TARGET_ARM_
reg3 = REG_R3;
#else
- reg3 = REG_EDX;
+ reg3 = REG_EDX;
#endif
getEmitter()->emitDisableRandomNops();
@@ -22309,38 +21736,27 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
reg2 = REG_ECX;
#endif
- getEmitter()->emitIns_R_S (ins_Load(TYP_I_IMPL),
- EA_PTRSIZE,
- reg2,
- (unsigned)(frameListRoot - compiler->lvaTable),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, reg2,
+ (unsigned)(frameListRoot - compiler->lvaTable), 0);
regTracker.rsTrackRegTrash(reg2);
}
-
#ifdef _TARGET_ARM_
/* mov r3, 1 */
/* strb [r2+offsetOfGcState], r3 */
instGen_Set_Reg_To_Imm(EA_PTRSIZE, reg3, 1);
- getEmitter()->emitIns_AR_R (ins_Store(TYP_BYTE),
- EA_1BYTE,
- reg3,
- reg2,
- pInfo->offsetOfGCState);
+ getEmitter()->emitIns_AR_R(ins_Store(TYP_BYTE), EA_1BYTE, reg3, reg2, pInfo->offsetOfGCState);
#else
/* mov byte ptr [tcb+offsetOfGcState], 1 */
- getEmitter()->emitIns_I_AR (ins_Store(TYP_BYTE),
- EA_1BYTE,
- 1,
- reg2,
- pInfo->offsetOfGCState);
+ getEmitter()->emitIns_I_AR(ins_Store(TYP_BYTE), EA_1BYTE, 1, reg2, pInfo->offsetOfGCState);
#endif
/* test global flag (we return to managed code) */
- LONG * addrOfCaptureThreadGlobal, **pAddrOfCaptureThreadGlobal;
+ LONG *addrOfCaptureThreadGlobal, **pAddrOfCaptureThreadGlobal;
- addrOfCaptureThreadGlobal = compiler->info.compCompHnd->getAddrOfCaptureThreadGlobal((void**) &pAddrOfCaptureThreadGlobal);
+ addrOfCaptureThreadGlobal =
+ compiler->info.compCompHnd->getAddrOfCaptureThreadGlobal((void**)&pAddrOfCaptureThreadGlobal);
noway_assert((!addrOfCaptureThreadGlobal) != (!pAddrOfCaptureThreadGlobal));
// Can we directly use addrOfCaptureThreadGlobal?
@@ -22348,52 +21764,26 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
if (addrOfCaptureThreadGlobal)
{
#ifdef _TARGET_ARM_
- instGen_Set_Reg_To_Imm (EA_HANDLE_CNS_RELOC,
- reg3,
- (ssize_t)addrOfCaptureThreadGlobal);
- getEmitter()->emitIns_R_R_I (ins_Load(TYP_INT),
- EA_4BYTE,
- reg3,
- reg3,
- 0);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg3, (ssize_t)addrOfCaptureThreadGlobal);
+ getEmitter()->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, reg3, reg3, 0);
regTracker.rsTrackRegTrash(reg3);
- getEmitter()->emitIns_R_I (INS_cmp,
- EA_4BYTE,
- reg3,
- 0);
+ getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, reg3, 0);
#else
- getEmitter()->emitIns_C_I (INS_cmp,
- EA_PTR_DSP_RELOC,
- FLD_GLOBAL_DS,
- (ssize_t) addrOfCaptureThreadGlobal,
- 0);
+ getEmitter()->emitIns_C_I(INS_cmp, EA_PTR_DSP_RELOC, FLD_GLOBAL_DS, (ssize_t)addrOfCaptureThreadGlobal, 0);
#endif
}
else
{
#ifdef _TARGET_ARM_
- instGen_Set_Reg_To_Imm (EA_HANDLE_CNS_RELOC,
- reg3,
- (ssize_t)pAddrOfCaptureThreadGlobal);
- getEmitter()->emitIns_R_R_I (ins_Load(TYP_INT),
- EA_4BYTE,
- reg3,
- reg3,
- 0);
+ instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg3, (ssize_t)pAddrOfCaptureThreadGlobal);
+ getEmitter()->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, reg3, reg3, 0);
regTracker.rsTrackRegTrash(reg3);
- getEmitter()->emitIns_R_R_I (ins_Load(TYP_INT),
- EA_4BYTE,
- reg3,
- reg3,
- 0);
- getEmitter()->emitIns_R_I (INS_cmp,
- EA_4BYTE,
- reg3,
- 0);
+ getEmitter()->emitIns_R_R_I(ins_Load(TYP_INT), EA_4BYTE, reg3, reg3, 0);
+ getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, reg3, 0);
#else // !_TARGET_ARM_
getEmitter()->emitIns_R_AI(ins_Load(TYP_I_IMPL), EA_PTR_DSP_RELOC, REG_ECX,
- (ssize_t)pAddrOfCaptureThreadGlobal);
+ (ssize_t)pAddrOfCaptureThreadGlobal);
regTracker.rsTrackRegTrash(REG_ECX);
getEmitter()->emitIns_I_AR(INS_cmp, EA_4BYTE, 0, REG_ECX, 0);
@@ -22409,10 +21799,10 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
inst_JMP(jmpEqual, clab_nostop);
#ifdef _TARGET_ARM_
- // The helper preserves the return value on ARM
+// The helper preserves the return value on ARM
#else
/* save return value (if necessary) */
- if (retVal != RBM_NONE)
+ if (retVal != RBM_NONE)
{
if (retVal == RBM_INTRET || retVal == RBM_LNGRET)
{
@@ -22432,16 +21822,15 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
/* emit the call to the EE-helper that stops for GC (or other reasons) */
- genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC,
- 0, /* argSize */
- EA_UNKNOWN); /* retSize */
+ genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, /* argSize */
+ EA_UNKNOWN); /* retSize */
#ifdef _TARGET_ARM_
- // The helper preserves the return value on ARM
+// The helper preserves the return value on ARM
#else
/* restore return value (if necessary) */
- if (retVal != RBM_NONE)
+ if (retVal != RBM_NONE)
{
if (retVal == RBM_INTRET || retVal == RBM_LNGRET)
{
@@ -22473,9 +21862,8 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
/* mov dword ptr [frame.callSiteTracker], 0 */
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0,
- compiler->lvaInlinedPInvokeFrameVar,
- pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfReturnAddress);
getEmitter()->emitEnableRandomNops();
}
@@ -22486,7 +21874,7 @@ void CodeGen::genPInvokeCallEpilog(LclVarDsc * frameListRoot,
* TRACKING OF FLAGS
*****************************************************************************/
-void CodeGen::genFlagsEqualToNone()
+void CodeGen::genFlagsEqualToNone()
{
genFlagsEqReg = REG_NA;
genFlagsEqVar = (unsigned)-1;
@@ -22499,8 +21887,7 @@ void CodeGen::genFlagsEqualToNone()
* contents of the given register.
*/
-void CodeGen::genFlagsEqualToReg(GenTreePtr tree,
- regNumber reg)
+void CodeGen::genFlagsEqualToReg(GenTreePtr tree, regNumber reg)
{
genFlagsEqLoc.CaptureLocation(getEmitter());
genFlagsEqReg = reg;
@@ -22524,8 +21911,7 @@ void CodeGen::genFlagsEqualToReg(GenTreePtr tree,
* contents of the given local variable.
*/
-void CodeGen::genFlagsEqualToVar(GenTreePtr tree,
- unsigned var)
+void CodeGen::genFlagsEqualToVar(GenTreePtr tree, unsigned var)
{
genFlagsEqLoc.CaptureLocation(getEmitter());
genFlagsEqVar = var;
@@ -22552,9 +21938,9 @@ void CodeGen::genFlagsEqualToVar(GenTreePtr tree,
* true .. the zero flag (ZF) and sign flag (SF) is set
*/
-bool CodeGen::genFlagsAreReg(regNumber reg)
+bool CodeGen::genFlagsAreReg(regNumber reg)
{
- if ((genFlagsEqReg == reg) && genFlagsEqLoc.IsCurrentLocation(getEmitter()))
+ if ((genFlagsEqReg == reg) && genFlagsEqLoc.IsCurrentLocation(getEmitter()))
{
return true;
}
@@ -22562,9 +21948,9 @@ bool CodeGen::genFlagsAreReg(regNumber reg)
return false;
}
-bool CodeGen::genFlagsAreVar(unsigned var)
+bool CodeGen::genFlagsAreVar(unsigned var)
{
- if ((genFlagsEqVar == var) && genFlagsEqLoc.IsCurrentLocation(getEmitter()))
+ if ((genFlagsEqVar == var) && genFlagsEqLoc.IsCurrentLocation(getEmitter()))
{
return true;
}
@@ -22576,8 +21962,7 @@ bool CodeGen::genFlagsAreVar(unsigned var)
* This utility function returns true iff the execution path from "from"
* (inclusive) to "to" (exclusive) contains a death of the given var
*/
-bool
-CodeGen::genContainsVarDeath(GenTreePtr from, GenTreePtr to, unsigned varNum)
+bool CodeGen::genContainsVarDeath(GenTreePtr from, GenTreePtr to, unsigned varNum)
{
GenTreePtr tree;
for (tree = from; tree != NULL && tree != to; tree = tree->gtNext)
@@ -22585,8 +21970,9 @@ CodeGen::genContainsVarDeath(GenTreePtr from, GenTreePtr to, unsigned varNum)
if (tree->IsLocal() && (tree->gtFlags & GTF_VAR_DEATH))
{
unsigned dyingVarNum = tree->gtLclVarCommon.gtLclNum;
- if (dyingVarNum == varNum) return true;
- LclVarDsc * varDsc = &(compiler->lvaTable[varNum]);
+ if (dyingVarNum == varNum)
+ return true;
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
if (varDsc->lvPromoted)
{
assert(varDsc->lvType == TYP_STRUCT);
diff --git a/src/jit/codegenlinear.h b/src/jit/codegenlinear.h
index 2fbb8d004c..eb4b7cc0db 100644
--- a/src/jit/codegenlinear.h
+++ b/src/jit/codegenlinear.h
@@ -10,205 +10,210 @@
#ifndef LEGACY_BACKEND // Not necessary (it's this way in the #include location), but helpful to IntelliSense
- void genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree);
+void genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree);
- void genCodeForTreeNode(GenTreePtr treeNode);
+void genCodeForTreeNode(GenTreePtr treeNode);
- void genCodeForBinary(GenTreePtr treeNode);
+void genCodeForBinary(GenTreePtr treeNode);
- void genCodeForDivMod(GenTreeOp* treeNode);
+void genCodeForDivMod(GenTreeOp* treeNode);
- void genCodeForMulHi(GenTreeOp* treeNode);
+void genCodeForMulHi(GenTreeOp* treeNode);
- void genLeaInstruction(GenTreeAddrMode *lea);
+void genLeaInstruction(GenTreeAddrMode* lea);
- void genSetRegToCond(regNumber dstReg, GenTreePtr tree);
+void genSetRegToCond(regNumber dstReg, GenTreePtr tree);
- void genIntToIntCast(GenTreePtr treeNode);
+void genIntToIntCast(GenTreePtr treeNode);
- void genFloatToFloatCast(GenTreePtr treeNode);
+void genFloatToFloatCast(GenTreePtr treeNode);
- void genFloatToIntCast(GenTreePtr treeNode);
+void genFloatToIntCast(GenTreePtr treeNode);
- void genIntToFloatCast(GenTreePtr treeNode);
+void genIntToFloatCast(GenTreePtr treeNode);
- void genCkfinite(GenTreePtr treeNode);
+void genCkfinite(GenTreePtr treeNode);
- void genIntrinsic(GenTreePtr treeNode);
+void genIntrinsic(GenTreePtr treeNode);
- void genPutArgStk(GenTreePtr treeNode);
- unsigned getBaseVarForPutArgStk(GenTreePtr treeNode);
+void genPutArgStk(GenTreePtr treeNode);
+unsigned getBaseVarForPutArgStk(GenTreePtr treeNode);
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
- unsigned getFirstArgWithStackSlot();
+unsigned getFirstArgWithStackSlot();
#endif // _TARGET_XARCH_ || _TARGET_ARM64_
- void genCompareFloat(GenTreePtr treeNode);
+void genCompareFloat(GenTreePtr treeNode);
- void genCompareInt(GenTreePtr treeNode);
+void genCompareInt(GenTreePtr treeNode);
#if !defined(_TARGET_64BIT_)
- void genCompareLong(GenTreePtr treeNode);
- void genJTrueLong(GenTreePtr treeNode);
+void genCompareLong(GenTreePtr treeNode);
+void genJTrueLong(GenTreePtr treeNode);
#endif
#ifdef FEATURE_SIMD
- enum SIMDScalarMoveType
- {
- SMT_ZeroInitUpper, // zero initlaize target upper bits
- SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
- SMT_PreserveUpper // preserve target upper bits
- };
-
- instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned *ival = nullptr);
- void genSIMDScalarMove(var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
- void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
- void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicInitArray(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
- void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
-
- void genSIMDIntrinsic(GenTreeSIMD* simdNode);
- void genSIMDCheck(GenTree* treeNode);
-
- // TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
- // two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
- // values through an indirection. Note that Vector3 locals allocated on stack would have
- // their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
- // Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
- void genStoreIndTypeSIMD12(GenTree* treeNode);
- void genStoreLclFldTypeSIMD12(GenTree* treeNode);
- void genLoadIndTypeSIMD12(GenTree* treeNode);
- void genLoadLclFldTypeSIMD12(GenTree* treeNode);
+enum SIMDScalarMoveType
+{
+ SMT_ZeroInitUpper, // zero initlaize target upper bits
+ SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
+ SMT_PreserveUpper // preserve target upper bits
+};
+
+instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
+void genSIMDScalarMove(var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
+void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
+void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicInitArray(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
+void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
+
+void genSIMDIntrinsic(GenTreeSIMD* simdNode);
+void genSIMDCheck(GenTree* treeNode);
+
+// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
+// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
+// values through an indirection. Note that Vector3 locals allocated on stack would have
+// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
+// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
+void genStoreIndTypeSIMD12(GenTree* treeNode);
+void genStoreLclFldTypeSIMD12(GenTree* treeNode);
+void genLoadIndTypeSIMD12(GenTree* treeNode);
+void genLoadLclFldTypeSIMD12(GenTree* treeNode);
#endif // FEATURE_SIMD
#if !defined(_TARGET_64BIT_)
- // CodeGen for Long Ints
+// CodeGen for Long Ints
- void genStoreLongLclVar(GenTree* treeNode);
+void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(_TARGET_64BIT_)
- void genProduceReg(GenTree *tree);
+void genProduceReg(GenTree* tree);
+
+void genUnspillRegIfNeeded(GenTree* tree);
- void genUnspillRegIfNeeded(GenTree* tree);
-
- regNumber genConsumeReg(GenTree *tree);
+regNumber genConsumeReg(GenTree* tree);
- void genConsumeRegAndCopy(GenTree *tree, regNumber needReg);
+void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
- void genConsumeIfReg(GenTreePtr tree)
+void genConsumeIfReg(GenTreePtr tree)
+{
+ if (!tree->isContained())
{
- if (!tree->isContained())
- (void) genConsumeReg(tree);
+ (void)genConsumeReg(tree);
}
+}
- void genRegCopy(GenTreePtr tree);
+void genRegCopy(GenTreePtr tree);
- void genTransferRegGCState(regNumber dst, regNumber src);
+void genTransferRegGCState(regNumber dst, regNumber src);
- void genConsumeAddress(GenTree* addr);
+void genConsumeAddress(GenTree* addr);
- void genConsumeAddrMode(GenTreeAddrMode *mode);
+void genConsumeAddrMode(GenTreeAddrMode* mode);
- void genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
+void genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum);
+void genConsumePutStructArgStk(
+ GenTreePutArgStk* putArgStkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- void genConsumeRegs(GenTree* tree);
+void genConsumeRegs(GenTree* tree);
- void genConsumeOperands(GenTreeOp* tree);
+void genConsumeOperands(GenTreeOp* tree);
- void genEmitGSCookieCheck(bool pushReg);
+void genEmitGSCookieCheck(bool pushReg);
- void genSetRegToIcon (regNumber reg,
- ssize_t val,
- var_types type = TYP_INT,
- insFlags flags = INS_FLAGS_DONT_CARE);
+void genSetRegToIcon(regNumber reg, ssize_t val, var_types type = TYP_INT, insFlags flags = INS_FLAGS_DONT_CARE);
- void genCodeForShift (GenTreePtr tree);
+void genCodeForShift(GenTreePtr tree);
#ifdef _TARGET_XARCH_
- void genCodeForShiftRMW (GenTreeStoreInd* storeInd);
+void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
#endif // _TARGET_XARCH_
- void genCodeForCpObj (GenTreeCpObj* cpObjNode);
+void genCodeForCpObj(GenTreeCpObj* cpObjNode);
- void genCodeForCpBlk (GenTreeCpBlk* cpBlkNode);
+void genCodeForCpBlk(GenTreeCpBlk* cpBlkNode);
- void genCodeForCpBlkRepMovs (GenTreeCpBlk* cpBlkNode);
+void genCodeForCpBlkRepMovs(GenTreeCpBlk* cpBlkNode);
- void genCodeForCpBlkUnroll (GenTreeCpBlk* cpBlkNode);
+void genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- void genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum);
+void genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum);
- void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
- void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
+void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
+void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
+void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
+
+void genCodeForStoreOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
- void genCodeForStoreOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
+void genCodeForInitBlk(GenTreeInitBlk* initBlkNode);
- void genCodeForInitBlk (GenTreeInitBlk* initBlkNode);
+void genCodeForInitBlkRepStos(GenTreeInitBlk* initBlkNode);
- void genCodeForInitBlkRepStos (GenTreeInitBlk* initBlkNode);
+void genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode);
- void genCodeForInitBlkUnroll (GenTreeInitBlk* initBlkNode);
+void genJumpTable(GenTree* tree);
- void genJumpTable(GenTree* tree);
+void genTableBasedSwitch(GenTree* tree);
- void genTableBasedSwitch(GenTree* tree);
+void genCodeForArrIndex(GenTreeArrIndex* treeNode);
- void genCodeForArrIndex (GenTreeArrIndex* treeNode);
+void genCodeForArrOffset(GenTreeArrOffs* treeNode);
- void genCodeForArrOffset (GenTreeArrOffs* treeNode);
+instruction genGetInsForOper(genTreeOps oper, var_types type);
- instruction genGetInsForOper (genTreeOps oper, var_types type);
+void genStoreInd(GenTreePtr node);
- void genStoreInd(GenTreePtr node);
+bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
- bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
+void genCallInstruction(GenTreePtr call);
- void genCallInstruction(GenTreePtr call);
-
- void genJmpMethod(GenTreePtr jmp);
+void genJmpMethod(GenTreePtr jmp);
- void genMultiRegCallStoreToLocal(GenTreePtr treeNode);
+void genMultiRegCallStoreToLocal(GenTreePtr treeNode);
- // Deals with codegen for muti-register struct returns.
- bool isStructReturn(GenTreePtr treeNode);
- void genStructReturn(GenTreePtr treeNode);
+// Deals with codegen for muti-register struct returns.
+bool isStructReturn(GenTreePtr treeNode);
+void genStructReturn(GenTreePtr treeNode);
- // Codegen for GT_RETURN.
- void genReturn(GenTreePtr treeNode);
+// Codegen for GT_RETURN.
+void genReturn(GenTreePtr treeNode);
- void genLclHeap(GenTreePtr tree);
+void genLclHeap(GenTreePtr tree);
- bool genIsRegCandidateLocal (GenTreePtr tree)
+bool genIsRegCandidateLocal(GenTreePtr tree)
+{
+ if (!tree->IsLocal())
{
- if (!tree->IsLocal()) return false;
- const LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
- return(varDsc->lvIsRegCandidate());
+ return false;
}
+ const LclVarDsc* varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
+ return (varDsc->lvIsRegCandidate());
+}
#ifdef DEBUG
- GenTree* lastConsumedNode;
- void genCheckConsumeNode(GenTree* treeNode);
-#else // !DEBUG
- inline void genCheckConsumeNode(GenTree* treeNode) {}
+GenTree* lastConsumedNode;
+void genCheckConsumeNode(GenTree* treeNode);
+#else // !DEBUG
+inline void genCheckConsumeNode(GenTree* treeNode)
+{
+}
#endif // DEBUG
#endif // !LEGACY_BACKEND
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index 47060174c1..1be6bb7ea4 100755
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -43,10 +43,10 @@ regNumber CodeGenInterface::genGetAssignedReg(GenTreePtr tree)
// Assumptions:
// The lclVar must be a register candidate (lvRegCandidate)
-void CodeGen::genSpillVar(GenTreePtr tree)
+void CodeGen::genSpillVar(GenTreePtr tree)
{
- unsigned varNum = tree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
assert(varDsc->lvIsRegCandidate());
@@ -56,24 +56,26 @@ void CodeGen::genSpillVar(GenTreePtr tree)
{
var_types lclTyp = varDsc->TypeGet();
if (varDsc->lvNormalizeOnStore())
+ {
lclTyp = genActualType(lclTyp);
+ }
emitAttr size = emitTypeSize(lclTyp);
bool restoreRegVar = false;
- if (tree->gtOper == GT_REG_VAR)
+ if (tree->gtOper == GT_REG_VAR)
{
tree->SetOper(GT_LCL_VAR);
restoreRegVar = true;
}
// mask off the flag to generate the right spill code, then bring it back
- tree->gtFlags &= ~GTF_REG_VAL;
+ tree->gtFlags &= ~GTF_REG_VAL;
instruction storeIns = ins_Store(tree->TypeGet(), compiler->isSIMDTypeLocalAligned(varNum));
#if CPU_LONG_USES_REGPAIR
if (varTypeIsMultiReg(tree))
{
- assert(varDsc->lvRegNum == genRegPairLo(tree->gtRegPair));
+ assert(varDsc->lvRegNum == genRegPairLo(tree->gtRegPair));
assert(varDsc->lvOtherReg == genRegPairHi(tree->gtRegPair));
regNumber regLo = genRegPairLo(tree->gtRegPair);
regNumber regHi = genRegPairHi(tree->gtRegPair);
@@ -86,7 +88,7 @@ void CodeGen::genSpillVar(GenTreePtr tree)
assert(varDsc->lvRegNum == tree->gtRegNum);
inst_TT_RV(storeIns, tree, tree->gtRegNum, 0, size);
}
- tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags |= GTF_REG_VAL;
if (restoreRegVar)
{
@@ -110,10 +112,9 @@ void CodeGen::genSpillVar(GenTreePtr tree)
#endif
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
-
}
- tree->gtFlags &= ~GTF_SPILL;
+ tree->gtFlags &= ~GTF_SPILL;
varDsc->lvRegNum = REG_STK;
if (varTypeIsMultiReg(tree))
{
@@ -122,13 +123,12 @@ void CodeGen::genSpillVar(GenTreePtr tree)
}
// inline
-void CodeGenInterface::genUpdateVarReg(LclVarDsc * varDsc, GenTreePtr tree)
+void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTreePtr tree)
{
assert(tree->OperIsScalarLocal() || (tree->gtOper == GT_COPY));
varDsc->lvRegNum = tree->gtRegNum;
}
-
/*****************************************************************************/
/*****************************************************************************/
@@ -137,10 +137,7 @@ void CodeGenInterface::genUpdateVarReg(LclVarDsc * varDsc, GenTre
* Generate code that will set the given register to the integer constant.
*/
-void CodeGen::genSetRegToIcon(regNumber reg,
- ssize_t val,
- var_types type,
- insFlags flags)
+void CodeGen::genSetRegToIcon(regNumber reg, ssize_t val, var_types type, insFlags flags)
{
// Reg cannot be a FP reg
assert(!genIsValidFloatReg(reg));
@@ -161,16 +158,15 @@ void CodeGen::genSetRegToIcon(regNumber reg,
}
}
-
/*****************************************************************************
*
* Generate code to check that the GS cookie wasn't thrashed by a buffer
* overrun. If pushReg is true, preserve all registers around code sequence.
- * Otherwise ECX could be modified.
+ * Otherwise ECX could be modified.
*
* Implementation Note: pushReg = true, in case of tail calls.
*/
-void CodeGen::genEmitGSCookieCheck(bool pushReg)
+void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
@@ -190,11 +186,11 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
retTypeDesc.InitializeLongReturnType(compiler);
}
- else // we must have a struct return type
+ else // we must have a struct return type
{
retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
}
-
+
unsigned regCount = retTypeDesc.GetReturnRegCount();
// Only x86 and x64 Unix ABI allows multi-reg return and
@@ -220,14 +216,14 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
else
{
#ifdef _TARGET_AMD64_
- // For x64, structs that are not returned in registers are always
+ // For x64, structs that are not returned in registers are always
// returned in implicit RetBuf. If we reached here, we should not have
// a RetBuf and the return type should not be a struct.
assert(compiler->info.compRetBuffArg == BAD_VAR_NUM);
assert(!varTypeIsStruct(compiler->info.compRetNativeType));
#endif // _TARGET_AMD64_
- // For x86 Windows we can't make such assertions since we generate code for returning of
+ // For x86 Windows we can't make such assertions since we generate code for returning of
// the RetBuf in REG_INTRET only when the ProfilerHook is enabled. Otherwise
// compRetNativeType could be TYP_STRUCT.
gcInfo.gcMarkRegPtrVal(REG_INTRET, compiler->info.compRetNativeType);
@@ -238,9 +234,9 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
if (!pushReg)
{
// Non-tail call: we can use any callee trash register that is not
- // a return register or contain 'this' pointer (keep alive this), since
+ // a return register or contain 'this' pointer (keep alive this), since
// we are generating GS cookie check after a GT_RETURN block.
- // Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
+ // Note: On Amd64 System V RDX is an arg register - REG_ARG_2 - as well
// as return register for two-register-returned structs.
if (compiler->lvaKeepAliveAndReportThis() && compiler->lvaTable[compiler->info.compThisArg].lvRegister &&
(compiler->lvaTable[compiler->info.compThisArg].lvRegNum == REG_ARG_0))
@@ -257,7 +253,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
#ifdef _TARGET_X86_
NYI_X86("Tail calls from methods that need GS check");
regGSCheck = REG_NA;
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
// Tail calls from methods that need GS check: We need to preserve registers while
// emitting GS cookie check for a tail prefixed call or a jmp. To emit GS cookie
// check, we might need a register. This won't be an issue for jmp calls for the
@@ -277,7 +273,7 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
// There are two calls that use R11: VSD and calli pinvokes with cookie param. Tail
// prefix on pinvokes is ignored. That is, options 2 and 3 will allow tail prefixed
// VSD calls from methods that need GS check.
- //
+ //
// Tail prefixed calls: Right now for Jit64 compat, method requiring GS cookie check
// ignores tail prefix. In future, if we intend to support tail calls from such a method,
// consider one of the options mentioned above. For now adding an assert that we don't
@@ -285,8 +281,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
noway_assert(!compiler->compTailCallUsed);
// Jmp calls: specify method handle using which JIT queries VM for its entry point
- // address and hence it can neither be a VSD call nor PInvoke calli with cookie
- // parameter. Therefore, in case of jmp calls it is safe to use R11.
+ // address and hence it can neither be a VSD call nor PInvoke calli with cookie
+ // parameter. Therefore, in case of jmp calls it is safe to use R11.
regGSCheck = REG_R11;
#endif // !_TARGET_X86_
}
@@ -296,13 +292,13 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
// If GS cookie value fits within 32-bits we can use 'cmp mem64, imm32'.
// Otherwise, load the value into a reg and use 'cmp mem64, reg64'.
if ((int)compiler->gsGlobalSecurityCookieVal != (ssize_t)compiler->gsGlobalSecurityCookieVal)
- {
+ {
genSetRegToIcon(regGSCheck, compiler->gsGlobalSecurityCookieVal, TYP_I_IMPL);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
else
{
- getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
+ getEmitter()->emitIns_S_I(INS_cmp, EA_PTRSIZE, compiler->lvaGSSecurityCookie, 0,
(int)compiler->gsGlobalSecurityCookieVal);
}
}
@@ -314,8 +310,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, regGSCheck, compiler->lvaGSSecurityCookie, 0);
}
- BasicBlock *gsCheckBlk = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* gsCheckBlk = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, gsCheckBlk);
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN);
genDefineTempLabel(gsCheckBlk);
@@ -326,18 +322,18 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
* Generate code for all the basic blocks in the function.
*/
-void CodeGen::genCodeForBBlist()
+void CodeGen::genCodeForBBlist()
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- unsigned savedStkLvl;
+ unsigned savedStkLvl;
-#ifdef DEBUG
- genInterruptibleUsed = true;
- unsigned stmtNum = 0;
- UINT64 totalCostEx = 0;
- UINT64 totalCostSz = 0;
+#ifdef DEBUG
+ genInterruptibleUsed = true;
+ unsigned stmtNum = 0;
+ UINT64 totalCostEx = 0;
+ UINT64 totalCostSz = 0;
// You have to be careful if you create basic blocks from now on
compiler->fgSafeBasicBlockCreation = false;
@@ -358,7 +354,8 @@ void CodeGen::genCodeForBBlist()
// Prepare the blocks for exception handling codegen: mark the blocks that needs labels.
genPrepForEHCodegen();
- assert(!compiler->fgFirstBBScratch || compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
+ assert(!compiler->fgFirstBBScratch ||
+ compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
/* Initialize the spill tracking logic */
@@ -390,24 +387,28 @@ void CodeGen::genCodeForBBlist()
/* If any arguments live in registers, mark those regs as such */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
/* Is this variable a parameter assigned to a register? */
- if (!varDsc->lvIsParam || !varDsc->lvRegister)
+ if (!varDsc->lvIsParam || !varDsc->lvRegister)
+ {
continue;
+ }
/* Is the argument live on entry to the method? */
- if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ {
continue;
+ }
/* Is this a floating-point argument? */
if (varDsc->IsFloatRegType())
+ {
continue;
+ }
noway_assert(!varTypeIsFloating(varDsc->TypeGet()));
@@ -428,12 +429,10 @@ void CodeGen::genCodeForBBlist()
*
*/
- BasicBlock * block;
- BasicBlock * lblk; /* previous block */
+ BasicBlock* block;
+ BasicBlock* lblk; /* previous block */
- for (lblk = NULL, block = compiler->fgFirstBB;
- block != NULL;
- lblk = block, block = block->bbNext)
+ for (lblk = nullptr, block = compiler->fgFirstBB; block != nullptr; lblk = block, block = block->bbNext)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -459,7 +458,7 @@ void CodeGen::genCodeForBBlist()
// change? We cleared them out above. Maybe we should just not clear them out, but update the ones that change
// here. That would require handling the changes in recordVarLocationsAtStartOfBB().
- regMaskTP newLiveRegSet = RBM_NONE;
+ regMaskTP newLiveRegSet = RBM_NONE;
regMaskTP newRegGCrefSet = RBM_NONE;
regMaskTP newRegByrefSet = RBM_NONE;
#ifdef DEBUG
@@ -469,8 +468,8 @@ void CodeGen::genCodeForBBlist()
VARSET_ITER_INIT(compiler, iter, block->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
if (varDsc->lvIsInReg())
{
@@ -536,7 +535,7 @@ void CodeGen::genCodeForBBlist()
if (handlerGetsXcptnObj(block->bbCatchTyp))
{
GenTreePtr firstStmt = block->FirstNonPhiDef();
- if (firstStmt != NULL)
+ if (firstStmt != nullptr)
{
GenTreePtr firstTree = firstStmt->gtStmt.gtStmtExpr;
if (compiler->gtHasCatchArg(firstTree))
@@ -555,21 +554,21 @@ void CodeGen::genCodeForBBlist()
getEmitter()->emitLoopAlign();
}
-#ifdef DEBUG
- if (compiler->opts.dspCode)
+#ifdef DEBUG
+ if (compiler->opts.dspCode)
+ {
printf("\n L_M%03u_BB%02u:\n", Compiler::s_compMethodsCount, block->bbNum);
+ }
#endif
- block->bbEmitCookie = NULL;
+ block->bbEmitCookie = nullptr;
- if (block->bbFlags & (BBF_JMP_TARGET|BBF_HAS_LABEL))
+ if (block->bbFlags & (BBF_JMP_TARGET | BBF_HAS_LABEL))
{
/* Mark a label and update the current set of live GC refs */
- block->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- FALSE);
+ block->bbEmitCookie = getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+ gcInfo.gcRegByrefSetCur, FALSE);
}
if (block == compiler->fgFirstColdBlock)
@@ -602,14 +601,14 @@ void CodeGen::genCodeForBBlist()
siBeginBlock(block);
// BBF_INTERNAL blocks don't correspond to any single IL instruction.
- if (compiler->opts.compDbgInfo &&
- (block->bbFlags & BBF_INTERNAL) &&
- !compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to emit a NO_MAPPING entry, immediately after the prolog.
+ if (compiler->opts.compDbgInfo && (block->bbFlags & BBF_INTERNAL) &&
+ !compiler->fgBBisScratch(block)) // If the block is the distinguished first scratch block, then no need to
+ // emit a NO_MAPPING entry, immediately after the prolog.
{
- genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::NO_MAPPING, true);
+ genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::NO_MAPPING, true);
}
- bool firstMapping = true;
+ bool firstMapping = true;
#endif // DEBUGGING_SUPPORT
/*---------------------------------------------------------------------
@@ -631,10 +630,12 @@ void CodeGen::genCodeForBBlist()
noway_assert(stmt->gtOper == GT_STMT);
if (stmt->AsStmt()->gtStmtIsEmbedded())
+ {
continue;
+ }
/* Get hold of the statement tree */
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
#if defined(DEBUGGING_SUPPORT)
@@ -653,13 +654,11 @@ void CodeGen::genCodeForBBlist()
noway_assert(stmt->gtStmt.gtStmtLastILoffs <= compiler->info.compILCodeSize ||
stmt->gtStmt.gtStmtLastILoffs == BAD_IL_OFFSET);
- if (compiler->opts.dspCode && compiler->opts.dspInstrs &&
- stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
+ if (compiler->opts.dspCode && compiler->opts.dspInstrs && stmt->gtStmt.gtStmtLastILoffs != BAD_IL_OFFSET)
{
while (genCurDispOffset <= stmt->gtStmt.gtStmtLastILoffs)
{
- genCurDispOffset +=
- dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
+ genCurDispOffset += dumpSingleInstr(compiler->info.compCode, genCurDispOffset, "> ");
}
}
@@ -668,7 +667,8 @@ void CodeGen::genCodeForBBlist()
{
printf("\nGenerating BB%02u, stmt %u\t\t", block->bbNum, stmtNum);
printf("Holding variables: ");
- dspRegMask(regSet.rsMaskVars); printf("\n\n");
+ dspRegMask(regSet.rsMaskVars);
+ printf("\n\n");
if (compiler->verboseTrees)
{
compiler->gtDispTree(compiler->opts.compDbgInfo ? stmt : tree);
@@ -676,17 +676,15 @@ void CodeGen::genCodeForBBlist()
}
}
totalCostEx += ((UINT64)stmt->gtCostEx * block->getBBWeight(compiler));
- totalCostSz += (UINT64) stmt->gtCostSz;
+ totalCostSz += (UINT64)stmt->gtCostSz;
#endif // DEBUG
// Traverse the tree in linear order, generating code for each node in the
// tree as we encounter it
- compiler->compCurLifeTree = NULL;
- compiler->compCurStmt = stmt;
- for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList;
- treeNode != NULL;
- treeNode = treeNode->gtNext)
+ compiler->compCurLifeTree = nullptr;
+ compiler->compCurStmt = stmt;
+ for (GenTreePtr treeNode = stmt->gtStmt.gtStmtList; treeNode != nullptr; treeNode = treeNode->gtNext)
{
genCodeForTreeNode(treeNode);
if (treeNode->gtHasReg() && treeNode->gtLsraInfo.isLocalDefUse)
@@ -698,8 +696,7 @@ void CodeGen::genCodeForBBlist()
// If the next statement expr is a SIMDIntrinsicUpperRestore, don't call rsSpillChk because we
// haven't yet restored spills from the most recent call.
GenTree* nextTopLevelStmt = stmt->AsStmt()->gtStmtNextTopLevelStmt();
- if ((nextTopLevelStmt == nullptr) ||
- (nextTopLevelStmt->AsStmt()->gtStmtExpr->OperGet() != GT_SIMD) ||
+ if ((nextTopLevelStmt == nullptr) || (nextTopLevelStmt->AsStmt()->gtStmtExpr->OperGet() != GT_SIMD) ||
(nextTopLevelStmt->AsStmt()->gtStmtExpr->gtSIMD.gtSIMDIntrinsicID != SIMDIntrinsicUpperRestore))
#endif // FEATURE_SIMD
{
@@ -709,7 +706,7 @@ void CodeGen::genCodeForBBlist()
#ifdef DEBUG
/* Make sure we didn't bungle pointer register tracking */
- regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur);
+ regMaskTP ptrRegs = (gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur);
regMaskTP nonVarPtrRegs = ptrRegs & ~regSet.rsMaskVars;
// If return is a GC-type, clear it. Note that if a common
@@ -717,9 +714,9 @@ void CodeGen::genCodeForBBlist()
// even though we might return a ref. We can't use the compRetType
// as the determiner because something we are tracking as a byref
// might be used as a return value of a int function (which is legal)
- if (tree->gtOper == GT_RETURN &&
+ if (tree->gtOper == GT_RETURN &&
(varTypeIsGC(compiler->info.compRetType) ||
- (tree->gtOp.gtOp1 != 0 && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
+ (tree->gtOp.gtOp1 != nullptr && varTypeIsGC(tree->gtOp.gtOp1->TypeGet()))))
{
nonVarPtrRegs &= ~RBM_INTRET;
}
@@ -728,14 +725,13 @@ void CodeGen::genCodeForBBlist()
// harmless "inc" instruction (does not interfere with the exception
// object).
- if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) &&
- (stmt == block->bbTreeList) &&
+ if ((compiler->opts.eeFlags & CORJIT_FLG_BBINSTR) && (stmt == block->bbTreeList) &&
handlerGetsXcptnObj(block->bbCatchTyp))
{
nonVarPtrRegs &= ~RBM_EXCEPTION_OBJECT;
}
- if (nonVarPtrRegs)
+ if (nonVarPtrRegs)
{
printf("Regset after tree=");
compiler->printTreeID(tree);
@@ -753,7 +749,7 @@ void CodeGen::genCodeForBBlist()
noway_assert(nonVarPtrRegs == 0);
- for (GenTree * node = stmt->gtStmt.gtStmtList; node; node=node->gtNext)
+ for (GenTree* node = stmt->gtStmt.gtStmtList; node; node = node->gtNext)
{
assert(!(node->gtFlags & GTF_SPILL));
}
@@ -779,7 +775,7 @@ void CodeGen::genCodeForBBlist()
}
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_ARM64_)
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0))
{
@@ -787,10 +783,10 @@ void CodeGen::genCodeForBBlist()
/* Is this the last block, and are there any open scopes left ? */
- bool isLastBlockProcessed = (block->bbNext == NULL);
+ bool isLastBlockProcessed = (block->bbNext == nullptr);
if (block->isBBCallAlwaysPair())
{
- isLastBlockProcessed = (block->bbNext->bbNext == NULL);
+ isLastBlockProcessed = (block->bbNext->bbNext == nullptr);
}
if (isLastBlockProcessed && siOpenScopeList.scNext)
@@ -800,7 +796,7 @@ void CodeGen::genCodeForBBlist()
are at the end of the method. It would be nice if we could fix
our code so that this throw block will no longer be necessary. */
- //noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
+ // noway_assert(block->bbCodeOffsEnd != compiler->info.compILCodeSize);
siCloseAllOpenScopes();
}
@@ -820,8 +816,8 @@ void CodeGen::genCodeForBBlist()
VARSET_ITER_INIT(compiler, extraLiveVarIter, extraLiveVars, extraLiveVarIndex);
while (extraLiveVarIter.NextElem(compiler, &extraLiveVarIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
- LclVarDsc * varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[extraLiveVarIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
assert(!varDsc->lvIsRegCandidate());
}
#endif
@@ -836,7 +832,8 @@ void CodeGen::genCodeForBBlist()
// The document "X64 and ARM ABIs.docx" has more details. The situations:
// 1. If the call instruction is in a different EH region as the instruction that follows it.
// 2. If the call immediately precedes an OS epilog. (Note that what the JIT or VM consider an epilog might
- // be slightly different from what the OS considers an epilog, and it is the OS-reported epilog that matters here.)
+ // be slightly different from what the OS considers an epilog, and it is the OS-reported epilog that matters
+ // here.)
// We handle case #1 here, and case #2 in the emitter.
if (getEmitter()->emitIsLastInsCall())
{
@@ -844,46 +841,45 @@ void CodeGen::genCodeForBBlist()
// Note: we may be generating a few too many NOPs for the case of call preceding an epilog. Technically,
// if the next block is a BBJ_RETURN, an epilog will be generated, but there may be some instructions
// generated before the OS epilog starts, such as a GS cookie check.
- if ((block->bbNext == nullptr) ||
- !BasicBlock::sameEHRegion(block, block->bbNext))
+ if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
// We only need the NOP if we're not going to generate any more code as part of the block end.
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- case BBJ_THROW:
- case BBJ_CALLFINALLY:
- case BBJ_EHCATCHRET:
+ case BBJ_ALWAYS:
+ case BBJ_THROW:
+ case BBJ_CALLFINALLY:
+ case BBJ_EHCATCHRET:
// We're going to generate more code below anyway, so no need for the NOP.
- case BBJ_RETURN:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- // These are the "epilog follows" case, handled in the emitter.
+ case BBJ_RETURN:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ // These are the "epilog follows" case, handled in the emitter.
- break;
+ break;
- case BBJ_NONE:
- if (block->bbNext == nullptr)
- {
- // Call immediately before the end of the code; we should never get here .
- instGen(INS_BREAKPOINT); // This should never get executed
- }
- else
- {
- // We need the NOP
- instGen(INS_nop);
- }
- break;
+ case BBJ_NONE:
+ if (block->bbNext == nullptr)
+ {
+ // Call immediately before the end of the code; we should never get here .
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
+ else
+ {
+ // We need the NOP
+ instGen(INS_nop);
+ }
+ break;
- case BBJ_COND:
- case BBJ_SWITCH:
+ case BBJ_COND:
+ case BBJ_SWITCH:
// These can't have a call as the last instruction!
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
}
@@ -893,187 +889,186 @@ void CodeGen::genCodeForBBlist()
switch (block->bbJumpKind)
{
- case BBJ_ALWAYS:
- inst_JMP(EJ_jmp, block->bbJumpDest);
- break;
+ case BBJ_ALWAYS:
+ inst_JMP(EJ_jmp, block->bbJumpDest);
+ break;
- case BBJ_RETURN:
- genExitCode(block);
- break;
+ case BBJ_RETURN:
+ genExitCode(block);
+ break;
- case BBJ_THROW:
- // If we have a throw at the end of a function or funclet, we need to emit another instruction
- // afterwards to help the OS unwinder determine the correct context during unwind.
- // We insert an unexecuted breakpoint instruction in several situations
- // following a throw instruction:
- // 1. If the throw is the last instruction of the function or funclet. This helps
- // the OS unwinder determine the correct context during an unwind from the
- // thrown exception.
- // 2. If this is this is the last block of the hot section.
- // 3. If the subsequent block is a special throw block.
- // 4. On AMD64, if the next block is in a different EH region.
- if ((block->bbNext == NULL)
- || (block->bbNext->bbFlags & BBF_FUNCLET_BEG)
- || !BasicBlock::sameEHRegion(block, block->bbNext)
- || (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext))
- || block->bbNext == compiler->fgFirstColdBlock
- )
- {
- instGen(INS_BREAKPOINT); // This should never get executed
- }
+ case BBJ_THROW:
+ // If we have a throw at the end of a function or funclet, we need to emit another instruction
+ // afterwards to help the OS unwinder determine the correct context during unwind.
+ // We insert an unexecuted breakpoint instruction in several situations
+ // following a throw instruction:
+ // 1. If the throw is the last instruction of the function or funclet. This helps
+ // the OS unwinder determine the correct context during an unwind from the
+ // thrown exception.
+ // 2. If this is this is the last block of the hot section.
+ // 3. If the subsequent block is a special throw block.
+ // 4. On AMD64, if the next block is in a different EH region.
+ if ((block->bbNext == nullptr) || (block->bbNext->bbFlags & BBF_FUNCLET_BEG) ||
+ !BasicBlock::sameEHRegion(block, block->bbNext) ||
+ (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block->bbNext)) ||
+ block->bbNext == compiler->fgFirstColdBlock)
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
- break;
+ break;
- case BBJ_CALLFINALLY:
+ case BBJ_CALLFINALLY:
#if FEATURE_EH_FUNCLETS
- // Generate a call to the finally, like this:
- // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
- // call finally-funclet
- // jmp finally-return // Only for non-retless finally calls
- // The jmp can be a NOP if we're going to the next block.
- // If we're generating code for the main function (not a funclet), and there is no localloc,
- // then RSP at this point is the same value as that stored in the PSPsym. So just copy RSP
- // instead of loading the PSPSym in this case.
-
- if (!compiler->compLocallocUsed &&
- (compiler->funCurrentFunc()->funKind == FUNC_ROOT))
- {
- inst_RV_RV(INS_mov, REG_ARG_0, REG_SPBASE, TYP_I_IMPL);
- }
- else
- {
- getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
- }
- getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
-
- if (block->bbFlags & BBF_RETLESS_CALL)
- {
- // We have a retless call, and the last instruction generated was a call.
- // If the next block is in a different EH region (or is the end of the code
- // block), then we need to generate a breakpoint here (since it will never
- // get executed) to get proper unwind behavior.
+ // Generate a call to the finally, like this:
+ // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym
+ // call finally-funclet
+ // jmp finally-return // Only for non-retless finally calls
+ // The jmp can be a NOP if we're going to the next block.
+ // If we're generating code for the main function (not a funclet), and there is no localloc,
+ // then RSP at this point is the same value as that stored in the PSPsym. So just copy RSP
+ // instead of loading the PSPSym in this case.
- if ((block->bbNext == nullptr) ||
- !BasicBlock::sameEHRegion(block, block->bbNext))
+ if (!compiler->compLocallocUsed && (compiler->funCurrentFunc()->funKind == FUNC_ROOT))
{
- instGen(INS_BREAKPOINT); // This should never get executed
+ inst_RV_RV(INS_mov, REG_ARG_0, REG_SPBASE, TYP_I_IMPL);
}
- }
- else
- {
- // Because of the way the flowgraph is connected, the liveness info for this one instruction
- // after the call is not (can not be) correct in cases where a variable has a last use in the
- // handler. So turn off GC reporting for this single instruction.
- getEmitter()->emitDisableGC();
+ else
+ {
+ getEmitter()->emitIns_R_S(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_ARG_0, compiler->lvaPSPSym, 0);
+ }
+ getEmitter()->emitIns_J(INS_call, block->bbJumpDest);
- // Now go to where the finally funclet needs to return to.
- if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
+ if (block->bbFlags & BBF_RETLESS_CALL)
{
- // Fall-through.
- // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
- // to the next instruction? This would depend on stack walking from within the finally
- // handler working without this instruction being in this special EH region.
- instGen(INS_nop);
+ // We have a retless call, and the last instruction generated was a call.
+ // If the next block is in a different EH region (or is the end of the code
+ // block), then we need to generate a breakpoint here (since it will never
+ // get executed) to get proper unwind behavior.
+
+ if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
+ {
+ instGen(INS_BREAKPOINT); // This should never get executed
+ }
}
else
{
- inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
- }
+ // Because of the way the flowgraph is connected, the liveness info for this one instruction
+ // after the call is not (can not be) correct in cases where a variable has a last use in the
+ // handler. So turn off GC reporting for this single instruction.
+ getEmitter()->emitDisableGC();
- getEmitter()->emitEnableGC();
- }
-
-#else // !FEATURE_EH_FUNCLETS
+ // Now go to where the finally funclet needs to return to.
+ if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
+ {
+ // Fall-through.
+ // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly
+ // to the next instruction? This would depend on stack walking from within the finally
+ // handler working without this instruction being in this special EH region.
+ instGen(INS_nop);
+ }
+ else
+ {
+ inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
+ }
- // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
- // corresponding to the finally's nesting level. When invoked in response to an exception, the
- // EE does this.
- //
- // We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
- //
- // We will emit :
- // mov [ebp - (n + 1)], 0
- // mov [ebp - n ], 0xFC
- // push &step
- // jmp finallyBlock
- // ...
- // step:
- // mov [ebp - n ], 0
- // jmp leaveTarget
- // ...
- // leaveTarget:
-
- noway_assert(isFramePointerUsed());
-
- // Get the nesting level which contains the finally
- compiler->fgGetNestingLevel(block, &finallyNesting);
+ getEmitter()->emitEnableGC();
+ }
- // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs;
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
-
- unsigned curNestingSlotOffs;
- curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
-
- // Zero out the slot for the next nesting level
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs - TARGET_POINTER_SIZE);
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
+#else // !FEATURE_EH_FUNCLETS
- // Now push the address where the finally funclet should return to directly.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
- getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
- }
- else
- {
- // EE expects a DWORD, so we give him 0
- inst_IV(INS_push_hide, 0);
- }
+ // If we are about to invoke a finally locally from a try block, we have to set the ShadowSP slot
+ // corresponding to the finally's nesting level. When invoked in response to an exception, the
+ // EE does this.
+ //
+ // We have a BBJ_CALLFINALLY followed by a BBJ_ALWAYS.
+ //
+ // We will emit :
+ // mov [ebp - (n + 1)], 0
+ // mov [ebp - n ], 0xFC
+ // push &step
+ // jmp finallyBlock
+ // ...
+ // step:
+ // mov [ebp - n ], 0
+ // jmp leaveTarget
+ // ...
+ // leaveTarget:
+
+ noway_assert(isFramePointerUsed());
+
+ // Get the nesting level which contains the finally
+ compiler->fgGetNestingLevel(block, &finallyNesting);
+
+ // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
+ unsigned filterEndOffsetSlotOffs;
+ filterEndOffsetSlotOffs =
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
+
+ unsigned curNestingSlotOffs;
+ curNestingSlotOffs = (unsigned)(filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE));
+
+ // Zero out the slot for the next nesting level
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar,
+ curNestingSlotOffs - TARGET_POINTER_SIZE);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, LCL_FINALLY_MARK, compiler->lvaShadowSPslotsVar,
+ curNestingSlotOffs);
+
+ // Now push the address where the finally funclet should return to directly.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
+ getEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest);
+ }
+ else
+ {
+ // EE expects a DWORD, so we give him 0
+ inst_IV(INS_push_hide, 0);
+ }
- // Jump to the finally BB
- inst_JMP(EJ_jmp, block->bbJumpDest);
+ // Jump to the finally BB
+ inst_JMP(EJ_jmp, block->bbJumpDest);
#endif // !FEATURE_EH_FUNCLETS
- // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
- // jump target using bbJumpDest - that is already used to point
- // to the finally block. So just skip past the BBJ_ALWAYS unless the
- // block is RETLESS.
- if ( !(block->bbFlags & BBF_RETLESS_CALL) )
- {
- assert(block->isBBCallAlwaysPair());
+ // The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
+ // jump target using bbJumpDest - that is already used to point
+ // to the finally block. So just skip past the BBJ_ALWAYS unless the
+ // block is RETLESS.
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- lblk = block;
- block = block->bbNext;
- }
+ lblk = block;
+ block = block->bbNext;
+ }
- break;
+ break;
#if FEATURE_EH_FUNCLETS
- case BBJ_EHCATCHRET:
- // Set RAX to the address the VM should return to after the catch.
- // Generate a RIP-relative
- // lea reg, [rip + disp32] ; the RIP is implicit
- // which will be position-indepenent.
- getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
- __fallthrough;
-
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- genReserveFuncletEpilog(block);
- break;
+ case BBJ_EHCATCHRET:
+ // Set RAX to the address the VM should return to after the catch.
+ // Generate a RIP-relative
+ // lea reg, [rip + disp32] ; the RIP is implicit
+ // which will be position-indepenent.
+ getEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, block->bbJumpDest, REG_INTRET);
+ __fallthrough;
+
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ genReserveFuncletEpilog(block);
+ break;
#else // !FEATURE_EH_FUNCLETS
- case BBJ_EHCATCHRET:
- noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86
+ case BBJ_EHCATCHRET:
+ noway_assert(!"Unexpected BBJ_EHCATCHRET"); // not used on x86
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
{
// The last statement of the block must be a GT_RETFILT, which has already been generated.
GenTree* tmpNode = nullptr;
@@ -1105,18 +1100,18 @@ void CodeGen::genCodeForBBlist()
#endif // !FEATURE_EH_FUNCLETS
- case BBJ_NONE:
- case BBJ_COND:
- case BBJ_SWITCH:
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
-#ifdef DEBUG
- compiler->compCurBB = 0;
+#ifdef DEBUG
+ compiler->compCurBB = nullptr;
#endif
} //------------------ END-FOR each block of the method -------------------
@@ -1132,12 +1127,11 @@ void CodeGen::genCodeForBBlist()
compiler->tmpEnd();
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\n# ");
- printf("totalCostEx = %6d, totalCostSz = %5d ",
- totalCostEx, totalCostSz);
+ printf("totalCostEx = %6d, totalCostSz = %5d ", totalCostEx, totalCostSz);
printf("%s\n", compiler->info.compFullName);
}
#endif
@@ -1145,13 +1139,12 @@ void CodeGen::genCodeForBBlist()
// return the child that has the same reg as the dst (if any)
// other child returned (out param) in 'other'
-GenTree *
-sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
+GenTree* sameRegAsDst(GenTree* tree, GenTree*& other /*out*/)
{
if (tree->gtRegNum == REG_NA)
{
other = nullptr;
- return NULL;
+ return nullptr;
}
GenTreePtr op1 = tree->gtOp.gtOp1;
@@ -1169,23 +1162,20 @@ sameRegAsDst(GenTree *tree, GenTree *&other /*out*/)
else
{
other = nullptr;
- return NULL;
+ return nullptr;
}
}
// Move an immediate value into an integer register
-void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
- regNumber reg,
- ssize_t imm,
- insFlags flags)
+void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
- size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
+ size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if ((imm == 0) && !EA_IS_RELOC(size))
@@ -1212,17 +1202,17 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
-void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
+void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree)
{
switch (tree->gtOper)
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- ssize_t cnsVal = con->IconValue();
+ GenTreeIntConCommon* con = tree->AsIntConCommon();
+ ssize_t cnsVal = con->IconValue();
if (con->ImmedValNeedsReloc(compiler))
{
@@ -1236,7 +1226,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
}
break;
- case GT_CNS_DBL:
+ case GT_CNS_DBL:
{
double constValue = tree->gtDblCon.gtDconVal;
@@ -1253,7 +1243,7 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
if (targetType == TYP_FLOAT)
{
float f = forceCastToFloat(constValue);
- cns = genMakeConst(&f, targetType, tree, false);
+ cns = genMakeConst(&f, targetType, tree, false);
}
else
{
@@ -1265,12 +1255,11 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types tar
}
break;
- default:
- unreached();
+ default:
+ unreached();
}
}
-
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
@@ -1279,10 +1268,10 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
- emitAttr size = emitTypeSize(treeNode);
- GenTree *op1 = treeNode->gtOp.gtOp1;
- GenTree *op2 = treeNode->gtOp.gtOp2;
+ emitter* emit = getEmitter();
+ emitAttr size = emitTypeSize(treeNode);
+ GenTree* op1 = treeNode->gtOp.gtOp1;
+ GenTree* op2 = treeNode->gtOp.gtOp2;
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
@@ -1291,8 +1280,8 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
genConsumeOperands(treeNode->AsOp());
GenTree* regOp = op1;
- GenTree* rmOp = op2;
-
+ GenTree* rmOp = op2;
+
// Set rmOp to the contained memory operand (if any)
//
if (op1->isContained() || (!op2->isContained() && (op2->gtRegNum == targetReg)))
@@ -1301,15 +1290,15 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
rmOp = op1;
}
assert(!regOp->isContained());
-
+
// Setup targetReg when neither of the source operands was a matching register
if (regOp->gtRegNum != targetReg)
{
inst_RV_RV(ins_Copy(targetType), targetReg, regOp->gtRegNum, targetType);
}
-
+
emit->emitInsBinary(INS_imulEAX, size, treeNode, rmOp);
-
+
// Move the result to the desired register, if necessary
if (targetReg != REG_RDX)
{
@@ -1321,26 +1310,24 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
//
void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
- GenTree *dividend = treeNode->gtOp1;
- GenTree *divisor = treeNode->gtOp2;
- genTreeOps oper = treeNode->OperGet();
- emitAttr size = emitTypeSize(treeNode);
- regNumber targetReg = treeNode->gtRegNum;
- var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ GenTree* dividend = treeNode->gtOp1;
+ GenTree* divisor = treeNode->gtOp2;
+ genTreeOps oper = treeNode->OperGet();
+ emitAttr size = emitTypeSize(treeNode);
+ regNumber targetReg = treeNode->gtRegNum;
+ var_types targetType = treeNode->TypeGet();
+ emitter* emit = getEmitter();
// dividend is not contained.
assert(!dividend->isContained());
-
+
genConsumeOperands(treeNode->AsOp());
if (varTypeIsFloating(targetType))
{
// divisor is not contained or if contained is a memory op.
// Note that a reg optional operand is a treated as a memory op
// if no register is allocated to it.
- assert(!divisor->isContained() ||
- divisor->isMemoryOp() ||
- divisor->IsCnsFltOrDbl() ||
+ assert(!divisor->isContained() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
divisor->IsRegOptional());
// Floating point div/rem operation
@@ -1356,7 +1343,8 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
// because divss/divsd reg1, reg2 will over-write reg1. Therefore, in case of AMD64
// LSRA has to make sure that such a register assignment is not generated for floating
// point div/rem operations.
- noway_assert(!"GT_DIV/GT_MOD (float): case of reg2 = reg1 / reg2, LSRA should never generate such a reg assignment");
+ noway_assert(
+ !"GT_DIV/GT_MOD (float): case of reg2 = reg1 / reg2, LSRA should never generate such a reg assignment");
}
else
{
@@ -1368,7 +1356,9 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
// dividend must be in RAX
if (dividend->gtRegNum != REG_RAX)
+ {
inst_RV_RV(INS_mov, REG_RAX, dividend->gtRegNum, targetType);
+ }
// zero or sign extend rax to rdx
if (oper == GT_UMOD || oper == GT_UDIV)
@@ -1378,19 +1368,23 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
else
{
emit->emitIns(INS_cdq, size);
- // the cdq instruction writes RDX, So clear the gcInfo for RDX
+ // the cdq instruction writes RDX, So clear the gcInfo for RDX
gcInfo.gcMarkRegSetNpt(RBM_RDX);
}
// Perform the 'targetType' (64-bit or 32-bit) divide instruction
instruction ins;
if (oper == GT_UMOD || oper == GT_UDIV)
+ {
ins = INS_div;
+ }
else
+ {
ins = INS_idiv;
-
+ }
+
emit->emitInsBinary(ins, size, treeNode, divisor);
-
+
// DIV/IDIV instructions always store the quotient in RAX and the remainder in RDX.
// Move the result to the desired register, if necessary
if (oper == GT_DIV || oper == GT_UDIV)
@@ -1428,43 +1422,27 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
void CodeGen::genCodeForBinary(GenTree* treeNode)
{
- const genTreeOps oper = treeNode->OperGet();
- regNumber targetReg = treeNode->gtRegNum;
- var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ const genTreeOps oper = treeNode->OperGet();
+ regNumber targetReg = treeNode->gtRegNum;
+ var_types targetType = treeNode->TypeGet();
+ emitter* emit = getEmitter();
#if defined(_TARGET_64BIT_)
- assert (oper == GT_OR ||
- oper == GT_XOR ||
- oper == GT_AND ||
- oper == GT_ADD ||
- oper == GT_SUB);
-#else // !defined(_TARGET_64BIT_)
- assert (oper == GT_OR ||
- oper == GT_XOR ||
- oper == GT_AND ||
- oper == GT_ADD_LO ||
- oper == GT_ADD_HI ||
- oper == GT_SUB_LO ||
- oper == GT_SUB_HI ||
- oper == GT_MUL_HI ||
- oper == GT_DIV_HI ||
- oper == GT_MOD_HI ||
- oper == GT_ADD ||
- oper == GT_SUB);
+ assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD || oper == GT_SUB);
+#else // !defined(_TARGET_64BIT_)
+ assert(oper == GT_OR || oper == GT_XOR || oper == GT_AND || oper == GT_ADD_LO || oper == GT_ADD_HI ||
+ oper == GT_SUB_LO || oper == GT_SUB_HI || oper == GT_MUL_HI || oper == GT_DIV_HI || oper == GT_MOD_HI ||
+ oper == GT_ADD || oper == GT_SUB);
#endif // !defined(_TARGET_64BIT_)
-
+
GenTreePtr op1 = treeNode->gtGetOp1();
GenTreePtr op2 = treeNode->gtGetOp2();
- // Commutative operations can mark op1 as contained to generate "op reg, memop/immed"
+ // Commutative operations can mark op1 as contained to generate "op reg, memop/immed"
if (op1->isContained())
{
assert(treeNode->OperIsCommutative());
- assert(op1->isMemoryOp() ||
- op1->IsCnsNonZeroFltOrDbl() ||
- op1->IsIntCnsFitsInI32() ||
- op1->IsRegOptional());
+ assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
@@ -1475,8 +1453,8 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
- regNumber op1reg = op1->isContained() ? REG_NA: op1->gtRegNum;
- regNumber op2reg = op2->isContained() ? REG_NA: op2->gtRegNum;
+ regNumber op1reg = op1->isContained() ? REG_NA : op1->gtRegNum;
+ regNumber op2reg = op2->isContained() ? REG_NA : op2->gtRegNum;
GenTreePtr dst;
GenTreePtr src;
@@ -1500,15 +1478,13 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
src = op1;
}
// now we know there are 3 different operands so attempt to use LEA
- else if (oper == GT_ADD
- && !varTypeIsFloating(treeNode)
- && !treeNode->gtOverflowEx() // LEA does not set flags
- && (op2->isContainedIntOrIImmed() || !op2->isContained())
- )
+ else if (oper == GT_ADD && !varTypeIsFloating(treeNode) && !treeNode->gtOverflowEx() // LEA does not set flags
+ && (op2->isContainedIntOrIImmed() || !op2->isContained()))
{
if (op2->isContainedIntOrIImmed())
{
- emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, (int) op2->AsIntConCommon()->IconValue());
+ emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
+ (int)op2->AsIntConCommon()->IconValue());
}
else
{
@@ -1533,10 +1509,7 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
}
// try to use an inc or dec
- if (oper == GT_ADD
- && !varTypeIsFloating(treeNode)
- && src->isContainedIntOrIImmed()
- && !treeNode->gtOverflowEx())
+ if (oper == GT_ADD && !varTypeIsFloating(treeNode) && src->isContainedIntOrIImmed() && !treeNode->gtOverflowEx())
{
if (src->IsIntegralConst(1))
{
@@ -1557,8 +1530,7 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
if (treeNode->gtOverflowEx())
{
#if !defined(_TARGET_64BIT_)
- assert(oper == GT_ADD || oper == GT_SUB ||
- oper == GT_ADD_HI || oper == GT_SUB_HI);
+ assert(oper == GT_ADD || oper == GT_SUB || oper == GT_ADD_HI || oper == GT_SUB_HI);
#else
assert(oper == GT_ADD || oper == GT_SUB);
#endif
@@ -1578,8 +1550,7 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
// Otherwise returns false.
// For other platforms always returns false.
//
-bool
-CodeGen::isStructReturn(GenTreePtr treeNode)
+bool CodeGen::isStructReturn(GenTreePtr treeNode)
{
// This method could be called for 'treeNode' of GT_RET_FILT or GT_RETURN.
// For the GT_RET_FILT, the return is always
@@ -1592,7 +1563,7 @@ CodeGen::isStructReturn(GenTreePtr treeNode)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
return varTypeIsStruct(treeNode);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
assert(!varTypeIsStruct(treeNode));
return false;
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -1609,8 +1580,7 @@ CodeGen::isStructReturn(GenTreePtr treeNode)
//
// Assumption:
// op1 of GT_RETURN node is either GT_LCL_VAR or multi-reg GT_CALL
-void
-CodeGen::genStructReturn(GenTreePtr treeNode)
+void CodeGen::genStructReturn(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_RETURN);
GenTreePtr op1 = treeNode->gtGetOp1();
@@ -1619,7 +1589,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lclVar = op1->AsLclVarCommon();
- LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclVar->gtLclNum]);
assert(varDsc->lvIsMultiRegRet);
ReturnTypeDesc retTypeDesc;
@@ -1636,8 +1606,8 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// This is a case of operand is in a single reg and needs to be
// returned in multiple ABI return registers.
regNumber opReg = genConsumeReg(op1);
- regNumber reg0 = retTypeDesc.GetABIReturnReg(0);
- regNumber reg1 = retTypeDesc.GetABIReturnReg(1);
+ regNumber reg0 = retTypeDesc.GetABIReturnReg(0);
+ regNumber reg1 = retTypeDesc.GetABIReturnReg(1);
if (opReg != reg0 && opReg != reg1)
{
@@ -1663,7 +1633,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// reg0 = opReg.
// swap upper and lower 8-bytes of reg1 so that desired 8-byte is in bit position [63:0].
- inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
+ inst_RV_RV(ins_Copy(TYP_DOUBLE), reg0, opReg, TYP_DOUBLE);
}
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, reg1, reg1, 0x01);
}
@@ -1671,12 +1641,12 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
{
assert(op1->isContained());
- // Copy var on stack into ABI return registers
+ // Copy var on stack into ABI return registers
int offset = 0;
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc.GetReturnRegType(i);
- regNumber reg = retTypeDesc.GetABIReturnReg(i);
+ regNumber reg = retTypeDesc.GetABIReturnReg(i);
getEmitter()->emitIns_R_S(ins_Load(type), emitTypeSize(type), reg, lclVar->gtLclNum, offset);
offset += genTypeSize(type);
}
@@ -1688,10 +1658,10 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
genConsumeRegs(op1);
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
assert(regCount == MAX_RET_REG_COUNT);
// Handle circular dependency between call allocated regs and ABI return regs.
@@ -1702,14 +1672,14 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// kind of circular dependency could arise between xmm0 and xmm1 return regs.
// Codegen is expected to handle such circular dependency.
//
- var_types regType0 = retTypeDesc->GetReturnRegType(0);
- regNumber returnReg0 = retTypeDesc->GetABIReturnReg(0);
+ var_types regType0 = retTypeDesc->GetReturnRegType(0);
+ regNumber returnReg0 = retTypeDesc->GetABIReturnReg(0);
regNumber allocatedReg0 = call->GetRegNumByIdx(0);
- var_types regType1 = retTypeDesc->GetReturnRegType(1);
- regNumber returnReg1 = retTypeDesc->GetABIReturnReg(1);
+ var_types regType1 = retTypeDesc->GetReturnRegType(1);
+ regNumber returnReg1 = retTypeDesc->GetABIReturnReg(1);
regNumber allocatedReg1 = call->GetRegNumByIdx(1);
-
+
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
@@ -1727,8 +1697,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
}
}
- if (allocatedReg0 == returnReg1 &&
- allocatedReg1 == returnReg0)
+ if (allocatedReg0 == returnReg1 && allocatedReg1 == returnReg0)
{
// Circular dependency - swap allocatedReg0 and allocatedReg1
if (varTypeIsFloating(regType0))
@@ -1776,7 +1745,7 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
}
#else
unreached();
-#endif
+#endif
}
//------------------------------------------------------------------------
@@ -1789,12 +1758,11 @@ CodeGen::genStructReturn(GenTreePtr treeNode)
// Return Value:
// None
//
-void
-CodeGen::genReturn(GenTreePtr treeNode)
+void CodeGen::genReturn(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
- GenTreePtr op1 = treeNode->gtGetOp1();
- var_types targetType = treeNode->TypeGet();
+ GenTreePtr op1 = treeNode->gtGetOp1();
+ var_types targetType = treeNode->TypeGet();
#ifdef DEBUG
if (targetType == TYP_VOID)
@@ -1856,7 +1824,9 @@ CodeGen::genReturn(GenTreePtr treeNode)
if ((op1->gtFlags & GTF_REG_VAL) != 0)
{
op1->gtFlags &= ~GTF_REG_VAL;
- inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->gtLclVarCommon.gtLclNum)), op1, op1->gtRegNum);
+ inst_TT_RV(ins_Store(op1->gtType,
+ compiler->isSIMDTypeLocalAligned(op1->gtLclVarCommon.gtLclNum)),
+ op1, op1->gtRegNum);
}
// Now, load it to the fp stack.
getEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->gtLclNum, 0);
@@ -1929,8 +1899,7 @@ CodeGen::genReturn(GenTreePtr treeNode)
* Preconditions: All operands have been evaluated
*
*/
-void
-CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
+void CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
{
regNumber targetReg;
#if !defined(_TARGET_64BIT_)
@@ -1943,19 +1912,19 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
else
#endif // !defined(_TARGET_64BIT_)
{
- targetReg = treeNode->gtRegNum;
+ targetReg = treeNode->gtRegNum;
}
var_types targetType = treeNode->TypeGet();
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
-#ifdef DEBUG
+#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
- unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
+ unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
printf("Generating: ");
compiler->gtDispTree(treeNode, nullptr, nullptr, true);
}
@@ -1980,127 +1949,127 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (treeNode->gtOper)
{
- case GT_START_NONGC:
- getEmitter()->emitDisableGC();
- break;
+ case GT_START_NONGC:
+ getEmitter()->emitDisableGC();
+ break;
- case GT_PROF_HOOK:
+ case GT_PROF_HOOK:
#ifdef PROFILING_SUPPORTED
- // We should be seeing this only if profiler hook is needed
- noway_assert(compiler->compIsProfilerHookNeeded());
-
- // Right now this node is used only for tail calls. In future if
- // we intend to use it for Enter or Leave hooks, add a data member
- // to this node indicating the kind of profiler hook. For example,
- // helper number can be used.
- genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
+ // We should be seeing this only if profiler hook is needed
+ noway_assert(compiler->compIsProfilerHookNeeded());
+
+ // Right now this node is used only for tail calls. In future if
+ // we intend to use it for Enter or Leave hooks, add a data member
+ // to this node indicating the kind of profiler hook. For example,
+ // helper number can be used.
+ genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
- break;
+ break;
- case GT_LCLHEAP:
- genLclHeap(treeNode);
- break;
+ case GT_LCLHEAP:
+ genLclHeap(treeNode);
+ break;
- case GT_CNS_INT:
+ case GT_CNS_INT:
#ifdef _TARGET_X86_
- NYI_IF(treeNode->IsIconHandle(GTF_ICON_TLS_HDL), "TLS constants");
+ NYI_IF(treeNode->IsIconHandle(GTF_ICON_TLS_HDL), "TLS constants");
#endif // _TARGET_X86_
- __fallthrough;
-
- case GT_CNS_DBL:
- genSetRegToConst(targetReg, targetType, treeNode);
- genProduceReg(treeNode);
- break;
+ __fallthrough;
- case GT_NEG:
- case GT_NOT:
- if (varTypeIsFloating(targetType))
- {
- assert(treeNode->gtOper == GT_NEG);
- genSSE2BitwiseOp(treeNode);
- }
- else
- {
- GenTreePtr operand = treeNode->gtGetOp1();
- assert(!operand->isContained());
- regNumber operandReg = genConsumeReg(operand);
+ case GT_CNS_DBL:
+ genSetRegToConst(targetReg, targetType, treeNode);
+ genProduceReg(treeNode);
+ break;
- if (operandReg != targetReg)
+ case GT_NEG:
+ case GT_NOT:
+ if (varTypeIsFloating(targetType))
{
- inst_RV_RV(INS_mov, targetReg, operandReg, targetType);
+ assert(treeNode->gtOper == GT_NEG);
+ genSSE2BitwiseOp(treeNode);
}
+ else
+ {
+ GenTreePtr operand = treeNode->gtGetOp1();
+ assert(!operand->isContained());
+ regNumber operandReg = genConsumeReg(operand);
- instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
- inst_RV(ins, targetReg, targetType);
- }
- genProduceReg(treeNode);
- break;
+ if (operandReg != targetReg)
+ {
+ inst_RV_RV(INS_mov, targetReg, operandReg, targetType);
+ }
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- assert(varTypeIsIntegralOrI(treeNode));
- __fallthrough;
+ instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
+ inst_RV(ins, targetReg, targetType);
+ }
+ genProduceReg(treeNode);
+ break;
+
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ assert(varTypeIsIntegralOrI(treeNode));
+ __fallthrough;
#if !defined(_TARGET_64BIT_)
- case GT_ADD_LO:
- case GT_ADD_HI:
- case GT_SUB_LO:
- case GT_SUB_HI:
+ case GT_ADD_LO:
+ case GT_ADD_HI:
+ case GT_SUB_LO:
+ case GT_SUB_HI:
#endif // !defined(_TARGET_64BIT_)
- case GT_ADD:
- case GT_SUB:
- genConsumeOperands(treeNode->AsOp());
- genCodeForBinary(treeNode);
- break;
+ case GT_ADD:
+ case GT_SUB:
+ genConsumeOperands(treeNode->AsOp());
+ genCodeForBinary(treeNode);
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- genCodeForShift(treeNode);
- // genCodeForShift() calls genProduceReg()
- break;
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ genCodeForShift(treeNode);
+ // genCodeForShift() calls genProduceReg()
+ break;
- case GT_CAST:
+ case GT_CAST:
#if !defined(_TARGET_64BIT_)
- // We will NYI in DecomposeNode() if we are cast TO a long type, but we do not
- // yet support casting FROM a long type either, and that's simpler to catch
- // here.
- NYI_IF(varTypeIsLong(treeNode->gtOp.gtOp1), "Casts from TYP_LONG");
+ // We will NYI in DecomposeNode() if we are cast TO a long type, but we do not
+ // yet support casting FROM a long type either, and that's simpler to catch
+ // here.
+ NYI_IF(varTypeIsLong(treeNode->gtOp.gtOp1), "Casts from TYP_LONG");
#endif // !defined(_TARGET_64BIT_)
- if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double <--> double/float
- genFloatToFloatCast(treeNode);
- }
- else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
- {
- // Casts float/double --> int32/int64
- genFloatToIntCast(treeNode);
- }
- else if (varTypeIsFloating(targetType))
- {
- // Casts int32/uint32/int64/uint64 --> float/double
- genIntToFloatCast(treeNode);
- }
- else
- {
- // Casts int <--> int
- genIntToIntCast(treeNode);
- }
- // The per-case functions call genProduceReg()
- break;
+ if (varTypeIsFloating(targetType) && varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double <--> double/float
+ genFloatToFloatCast(treeNode);
+ }
+ else if (varTypeIsFloating(treeNode->gtOp.gtOp1))
+ {
+ // Casts float/double --> int32/int64
+ genFloatToIntCast(treeNode);
+ }
+ else if (varTypeIsFloating(targetType))
+ {
+ // Casts int32/uint32/int64/uint64 --> float/double
+ genIntToFloatCast(treeNode);
+ }
+ else
+ {
+ // Casts int <--> int
+ genIntToIntCast(treeNode);
+ }
+ // The per-case functions call genProduceReg()
+ break;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
{
// lcl_vars are not defs
assert((treeNode->gtFlags & GTF_VAR_DEF) == 0);
- GenTreeLclVarCommon *lcl = treeNode->AsLclVarCommon();
- bool isRegCandidate = compiler->lvaTable[lcl->gtLclNum].lvIsRegCandidate();
+ GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
+ bool isRegCandidate = compiler->lvaTable[lcl->gtLclNum].lvIsRegCandidate();
if (isRegCandidate && !(treeNode->gtFlags & GTF_VAR_DEATH))
{
@@ -2114,15 +2083,15 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
{
assert(!isRegCandidate);
- emit->emitIns_R_S(ins_Load(treeNode->TypeGet(), compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)),
+ emit->emitIns_R_S(ins_Load(treeNode->TypeGet(), compiler->isSIMDTypeLocalAligned(lcl->gtLclNum)),
emitTypeSize(treeNode), treeNode->gtRegNum, lcl->gtLclNum, 0);
genProduceReg(treeNode);
}
}
break;
- case GT_LCL_FLD_ADDR:
- case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD_ADDR:
+ case GT_LCL_VAR_ADDR:
{
// Address of a local var. This by itself should never be allocated a register.
// If it is worth storing the address in a register then it should be cse'ed into
@@ -2132,12 +2101,12 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
inst_RV_TT(INS_lea, targetReg, treeNode, 0, EA_BYREF);
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
- noway_assert(targetType != TYP_STRUCT);
+ noway_assert(targetType != TYP_STRUCT);
noway_assert(treeNode->gtRegNum != REG_NA);
#ifdef FEATURE_SIMD
@@ -2149,18 +2118,18 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
#endif
- emitAttr size = emitTypeSize(targetType);
- unsigned offs = treeNode->gtLclFld.gtLclOffs;
+ emitAttr size = emitTypeSize(targetType);
+ unsigned offs = treeNode->gtLclFld.gtLclOffs;
unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
assert(varNum < compiler->lvaCount);
emit->emitIns_R_S(ins_Move_Extend(targetType, treeNode->InReg()), size, targetReg, varNum, offs);
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_STORE_LCL_FLD:
- {
+ case GT_STORE_LCL_FLD:
+ {
noway_assert(targetType != TYP_STRUCT);
noway_assert(!treeNode->InReg());
assert(!varTypeIsFloating(targetType) || (targetType == treeNode->gtGetOp1()->TypeGet()));
@@ -2172,15 +2141,15 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genStoreLclFldTypeSIMD12(treeNode);
break;
}
-#endif
+#endif
GenTreePtr op1 = treeNode->gtGetOp1();
genConsumeRegs(op1);
emit->emitInsBinary(ins_Store(targetType), emitTypeSize(treeNode), treeNode, op1);
}
break;
- case GT_STORE_LCL_VAR:
- {
+ case GT_STORE_LCL_VAR:
+ {
GenTreePtr op1 = treeNode->gtGetOp1();
// var = call, where call returns a multi-reg return value
@@ -2194,7 +2163,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
noway_assert(targetType != TYP_STRUCT);
assert(!varTypeIsFloating(targetType) || (targetType == treeNode->gtGetOp1()->TypeGet()));
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
+ unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
// Ensure that lclVar nodes are typed correctly.
@@ -2207,13 +2176,14 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
break;
}
#endif // !defined(_TARGET_64BIT_)
-
+
genConsumeRegs(op1);
if (treeNode->gtRegNum == REG_NA)
{
// stack store
- emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)), emitTypeSize(treeNode), treeNode);
+ emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
+ emitTypeSize(treeNode), treeNode);
varDsc->lvRegNum = REG_STK;
}
else
@@ -2224,8 +2194,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// zero in the target register, because an xor is smaller than a copy. Note that we could
// potentially handle this in the register allocator, but we can't always catch it there
// because the target may not have a register allocated for it yet.
- if (!containedOp1 &&
- (op1->gtRegNum != treeNode->gtRegNum) &&
+ if (!containedOp1 && (op1->gtRegNum != treeNode->gtRegNum) &&
(op1->IsIntegralConst(0) || op1->IsFPZero()))
{
op1->gtRegNum = REG_NA;
@@ -2259,59 +2228,59 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_RETFILT:
- // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
- // the return register, if it's not already there. The processing is the same as GT_RETURN.
- if (targetType != TYP_VOID)
- {
- // For filters, the IL spec says the result is type int32. Further, the only specified legal values
- // are 0 or 1, with the use of other values "undefined".
- assert(targetType == TYP_INT);
- }
+ case GT_RETFILT:
+ // A void GT_RETFILT is the end of a finally. For non-void filter returns we need to load the result in
+ // the return register, if it's not already there. The processing is the same as GT_RETURN.
+ if (targetType != TYP_VOID)
+ {
+ // For filters, the IL spec says the result is type int32. Further, the only specified legal values
+ // are 0 or 1, with the use of other values "undefined".
+ assert(targetType == TYP_INT);
+ }
- __fallthrough;
+ __fallthrough;
- case GT_RETURN:
- genReturn(treeNode);
- break;
+ case GT_RETURN:
+ genReturn(treeNode);
+ break;
- case GT_LEA:
+ case GT_LEA:
{
// if we are here, it is the case where there is an LEA that cannot
// be folded into a parent instruction
- GenTreeAddrMode *lea = treeNode->AsAddrMode();
+ GenTreeAddrMode* lea = treeNode->AsAddrMode();
genLeaInstruction(lea);
}
// genLeaInstruction calls genProduceReg()
break;
- case GT_IND:
+ case GT_IND:
#ifdef FEATURE_SIMD
- // Handling of Vector3 type values loaded through indirection.
- if (treeNode->TypeGet() == TYP_SIMD12)
- {
- genLoadIndTypeSIMD12(treeNode);
- break;
- }
+ // Handling of Vector3 type values loaded through indirection.
+ if (treeNode->TypeGet() == TYP_SIMD12)
+ {
+ genLoadIndTypeSIMD12(treeNode);
+ break;
+ }
#endif // FEATURE_SIMD
- genConsumeAddress(treeNode->AsIndir()->Addr());
- emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
- genProduceReg(treeNode);
- break;
+ genConsumeAddress(treeNode->AsIndir()->Addr());
+ emit->emitInsMov(ins_Load(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode);
+ genProduceReg(treeNode);
+ break;
- case GT_MULHI:
- genCodeForMulHi(treeNode->AsOp());
- genProduceReg(treeNode);
- break;
+ case GT_MULHI:
+ genCodeForMulHi(treeNode->AsOp());
+ genProduceReg(treeNode);
+ break;
- case GT_MUL:
+ case GT_MUL:
{
instruction ins;
- emitAttr size = emitTypeSize(treeNode);
- bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
- bool requiresOverflowCheck = treeNode->gtOverflowEx();
-
+ emitAttr size = emitTypeSize(treeNode);
+ bool isUnsignedMultiply = ((treeNode->gtFlags & GTF_UNSIGNED) != 0);
+ bool requiresOverflowCheck = treeNode->gtOverflowEx();
+
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
@@ -2329,9 +2298,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// regOp :: A register op (especially the operand that matches 'targetReg')
// (can be nullptr when we have both a memory op and an immediate op)
- GenTree * immOp = nullptr;
- GenTree * rmOp = op1;
- GenTree * regOp;
+ GenTree* immOp = nullptr;
+ GenTree* rmOp = op1;
+ GenTree* regOp;
if (op2->isContainedIntOrIImmed())
{
@@ -2354,7 +2323,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
if (!requiresOverflowCheck && !rmOp->isContained() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
// We will use the LEA instruction to perform this multiply
- // Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
+ // Note that an LEA with base=x, index=x and scale=(imm-1) computes x*imm when imm=3,5 or 9.
unsigned int scale = (unsigned int)(imm - 1);
getEmitter()->emitIns_R_ARX(INS_lea, size, targetReg, rmOp->gtRegNum, rmOp->gtRegNum, scale, 0);
}
@@ -2365,15 +2334,15 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
emit->emitInsBinary(ins, size, rmOp, immOp);
}
}
- else // we have no contained immediate operand
+ else // we have no contained immediate operand
{
regOp = op1;
- rmOp = op2;
+ rmOp = op2;
regNumber mulTargetReg = targetReg;
if (isUnsignedMultiply && requiresOverflowCheck)
{
- ins = INS_mulEAX;
+ ins = INS_mulEAX;
mulTargetReg = REG_RAX;
}
else
@@ -2414,49 +2383,49 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
genCheckOverflow(treeNode);
}
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
- // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
- // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
- // on float/double args.
- noway_assert(!varTypeIsFloating(treeNode));
- __fallthrough;
-
- case GT_DIV:
- genCodeForDivMod(treeNode->AsOp());
- break;
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ // We shouldn't be seeing GT_MOD on float/double args as it should get morphed into a
+ // helper call by front-end. Similarly we shouldn't be seeing GT_UDIV and GT_UMOD
+ // on float/double args.
+ noway_assert(!varTypeIsFloating(treeNode));
+ __fallthrough;
- case GT_INTRINSIC:
- genIntrinsic(treeNode);
- break;
+ case GT_DIV:
+ genCodeForDivMod(treeNode->AsOp());
+ break;
+
+ case GT_INTRINSIC:
+ genIntrinsic(treeNode);
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- genSIMDIntrinsic(treeNode->AsSIMD());
- break;
+ case GT_SIMD:
+ genSIMDIntrinsic(treeNode->AsSIMD());
+ break;
#endif // FEATURE_SIMD
- case GT_CKFINITE:
- genCkfinite(treeNode);
- break;
+ case GT_CKFINITE:
+ genCkfinite(treeNode);
+ break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
{
// TODO-XArch-CQ: Check if we can use the currently set flags.
// TODO-XArch-CQ: Check for the case where we can simply transfer the carry bit to a register
// (signed < or >= where targetReg != REG_NA)
- GenTreePtr op1 = treeNode->gtGetOp1();
- var_types op1Type = op1->TypeGet();
+ GenTreePtr op1 = treeNode->gtGetOp1();
+ var_types op1Type = op1->TypeGet();
if (varTypeIsFloating(op1Type))
{
@@ -2487,9 +2456,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_JTRUE:
+ case GT_JTRUE:
{
- GenTree *cmp = treeNode->gtOp.gtOp1;
+ GenTree* cmp = treeNode->gtOp.gtOp1;
assert(cmp->OperIsCompare());
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
@@ -2507,13 +2476,13 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// is governed by a flag NOT by the inherent type of the node
// TODO-XArch-CQ: Check if we can use the currently set flags.
emitJumpKind jumpKind[2];
- bool branchToTrueLabel[2];
+ bool branchToTrueLabel[2];
genJumpKindsForTree(cmp, jumpKind, branchToTrueLabel);
BasicBlock* skipLabel = nullptr;
if (jumpKind[0] != EJ_NONE)
{
- BasicBlock *jmpTarget;
+ BasicBlock* jmpTarget;
if (branchToTrueLabel[0])
{
jmpTarget = compiler->compCurBB->bbJumpDest;
@@ -2537,17 +2506,19 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
if (skipLabel != nullptr)
+ {
genDefineTempLabel(skipLabel);
+ }
}
}
break;
- case GT_RETURNTRAP:
+ case GT_RETURNTRAP:
{
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
- GenTree *data = treeNode->gtOp.gtOp1;
+ GenTree* data = treeNode->gtOp.gtOp1;
genConsumeRegs(data);
GenTreeIntCon cns = intForm(TYP_INT, 0);
emit->emitInsBinary(INS_cmp, emitTypeSize(TYP_INT), data, &cns);
@@ -2568,27 +2539,27 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_STOREIND:
- genStoreInd(treeNode);
- break;
+ case GT_STOREIND:
+ genStoreInd(treeNode);
+ break;
- case GT_COPY:
- // This is handled at the time we call genConsumeReg() on the GT_COPY
- break;
+ case GT_COPY:
+ // This is handled at the time we call genConsumeReg() on the GT_COPY
+ break;
- case GT_SWAP:
+ case GT_SWAP:
{
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(treeNode->gtOp.gtOp1) && genIsRegCandidateLocal(treeNode->gtOp.gtOp2));
- GenTreeLclVarCommon* lcl1 = treeNode->gtOp.gtOp1->AsLclVarCommon();
- LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
- var_types type1 = varDsc1->TypeGet();
- GenTreeLclVarCommon* lcl2 = treeNode->gtOp.gtOp2->AsLclVarCommon();
- LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
- var_types type2 = varDsc2->TypeGet();
+ GenTreeLclVarCommon* lcl1 = treeNode->gtOp.gtOp1->AsLclVarCommon();
+ LclVarDsc* varDsc1 = &(compiler->lvaTable[lcl1->gtLclNum]);
+ var_types type1 = varDsc1->TypeGet();
+ GenTreeLclVarCommon* lcl2 = treeNode->gtOp.gtOp2->AsLclVarCommon();
+ LclVarDsc* varDsc2 = &(compiler->lvaTable[lcl2->gtLclNum]);
+ var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
@@ -2596,9 +2567,9 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeIsFloating(type1));
- regNumber oldOp1Reg = lcl1->gtRegNum;
+ regNumber oldOp1Reg = lcl1->gtRegNum;
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
- regNumber oldOp2Reg = lcl2->gtRegNum;
+ regNumber oldOp2Reg = lcl2->gtRegNum;
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
@@ -2617,8 +2588,8 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
- gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask|oldOp2RegMask);
- gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask|oldOp2RegMask);
+ gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
+ gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
@@ -2627,22 +2598,22 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_LIST:
- case GT_ARGPLACE:
- // Nothing to do
- break;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ // Nothing to do
+ break;
- case GT_PUTARG_STK:
- genPutArgStk(treeNode);
- break;
+ case GT_PUTARG_STK:
+ genPutArgStk(treeNode);
+ break;
- case GT_PUTARG_REG:
+ case GT_PUTARG_REG:
{
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
noway_assert(targetType != TYP_STRUCT);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
// commas show up here commonly, as part of a nullchk operation
- GenTree *op1 = treeNode->gtOp.gtOp1;
+ GenTree* op1 = treeNode->gtOp.gtOp1;
// If child node is not already in the register we need, move it
genConsumeReg(op1);
if (treeNode->gtRegNum != op1->gtRegNum)
@@ -2653,32 +2624,32 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_CALL:
- genCallInstruction(treeNode);
- break;
+ case GT_CALL:
+ genCallInstruction(treeNode);
+ break;
- case GT_JMP:
- genJmpMethod(treeNode);
- break;
+ case GT_JMP:
+ genJmpMethod(treeNode);
+ break;
- case GT_LOCKADD:
- case GT_XCHG:
- case GT_XADD:
- genLockedInstructions(treeNode);
- break;
+ case GT_LOCKADD:
+ case GT_XCHG:
+ case GT_XADD:
+ genLockedInstructions(treeNode);
+ break;
- case GT_MEMORYBARRIER:
- instGen_MemoryBarrier();
- break;
+ case GT_MEMORYBARRIER:
+ instGen_MemoryBarrier();
+ break;
- case GT_CMPXCHG:
+ case GT_CMPXCHG:
{
- GenTreePtr location = treeNode->gtCmpXchg.gtOpLocation; // arg1
- GenTreePtr value = treeNode->gtCmpXchg.gtOpValue; // arg2
- GenTreePtr comparand = treeNode->gtCmpXchg.gtOpComparand; // arg3
+ GenTreePtr location = treeNode->gtCmpXchg.gtOpLocation; // arg1
+ GenTreePtr value = treeNode->gtCmpXchg.gtOpValue; // arg2
+ GenTreePtr comparand = treeNode->gtCmpXchg.gtOpComparand; // arg3
assert(location->gtRegNum != REG_NA && location->gtRegNum != REG_RAX);
- assert(value->gtRegNum != REG_NA && value->gtRegNum != REG_RAX);
+ assert(value->gtRegNum != REG_NA && value->gtRegNum != REG_RAX);
genConsumeReg(location);
genConsumeReg(value);
@@ -2691,7 +2662,6 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
inst_RV_RV(ins_Copy(comparand->TypeGet()), REG_RAX, comparand->gtRegNum, comparand->TypeGet());
}
-
// location is Rm
instGen(INS_lock);
@@ -2703,50 +2673,50 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
inst_RV_RV(ins_Copy(targetType), targetReg, REG_RAX, targetType);
}
}
- genProduceReg(treeNode);
- break;
+ genProduceReg(treeNode);
+ break;
- case GT_RELOAD:
- // do nothing - reload is just a marker.
- // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
- // into the register specified in this node.
- break;
+ case GT_RELOAD:
+ // do nothing - reload is just a marker.
+ // The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
+ // into the register specified in this node.
+ break;
- case GT_NOP:
- break;
+ case GT_NOP:
+ break;
- case GT_NO_OP:
- if (treeNode->gtFlags & GTF_NO_OP_NO)
- {
- noway_assert(!"GTF_NO_OP_NO should not be set");
- }
- else
- {
- getEmitter()->emitIns_Nop(1);
- }
- break;
+ case GT_NO_OP:
+ if (treeNode->gtFlags & GTF_NO_OP_NO)
+ {
+ noway_assert(!"GTF_NO_OP_NO should not be set");
+ }
+ else
+ {
+ getEmitter()->emitIns_Nop(1);
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- genRangeCheck(treeNode);
- break;
+ genRangeCheck(treeNode);
+ break;
- case GT_PHYSREG:
- if (treeNode->gtRegNum != treeNode->AsPhysReg()->gtSrcReg)
- {
- inst_RV_RV(INS_mov, treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg, targetType);
+ case GT_PHYSREG:
+ if (treeNode->gtRegNum != treeNode->AsPhysReg()->gtSrcReg)
+ {
+ inst_RV_RV(INS_mov, treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg, targetType);
- genTransferRegGCState(treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg);
- }
- genProduceReg(treeNode);
- break;
+ genTransferRegGCState(treeNode->gtRegNum, treeNode->AsPhysReg()->gtSrcReg);
+ }
+ genProduceReg(treeNode);
+ break;
- case GT_PHYSREGDST:
- break;
+ case GT_PHYSREGDST:
+ break;
- case GT_NULLCHECK:
+ case GT_NULLCHECK:
{
assert(!treeNode->gtOp.gtOp1->isContained());
regNumber reg = genConsumeReg(treeNode->gtOp.gtOp1);
@@ -2754,57 +2724,59 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_CATCH_ARG:
+ case GT_CATCH_ARG:
- noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
+ noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
- /* Catch arguments get passed in a register. genCodeForBBlist()
- would have marked it as holding a GC object, but not used. */
+ /* Catch arguments get passed in a register. genCodeForBBlist()
+ would have marked it as holding a GC object, but not used. */
- noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
- genConsumeReg(treeNode);
- break;
+ noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
+ genConsumeReg(treeNode);
+ break;
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
-
- // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
- // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
-
- unsigned finallyNesting;
- finallyNesting = treeNode->gtVal.gtVal1;
- noway_assert(treeNode->gtVal.gtVal1 < compiler->compHndBBtabCount);
- noway_assert(finallyNesting < compiler->compHndBBtabCount);
-
- // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
- unsigned filterEndOffsetSlotOffs;
- PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) > TARGET_POINTER_SIZE); //below doesn't underflow.
- filterEndOffsetSlotOffs = (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
-
- unsigned curNestingSlotOffs;
- curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
- instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
- break;
+ case GT_END_LFIN:
+
+ // Have to clear the ShadowSP of the nesting level which encloses the finally. Generates:
+ // mov dword ptr [ebp-0xC], 0 // for some slot of the ShadowSP local var
+
+ unsigned finallyNesting;
+ finallyNesting = treeNode->gtVal.gtVal1;
+ noway_assert(treeNode->gtVal.gtVal1 < compiler->compHndBBtabCount);
+ noway_assert(finallyNesting < compiler->compHndBBtabCount);
+
+ // The last slot is reserved for ICodeManager::FixContext(ppEndRegion)
+ unsigned filterEndOffsetSlotOffs;
+ PREFIX_ASSUME(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) >
+ TARGET_POINTER_SIZE); // below doesn't underflow.
+ filterEndOffsetSlotOffs =
+ (unsigned)(compiler->lvaLclSize(compiler->lvaShadowSPslotsVar) - TARGET_POINTER_SIZE);
+
+ unsigned curNestingSlotOffs;
+ curNestingSlotOffs = filterEndOffsetSlotOffs - ((finallyNesting + 1) * TARGET_POINTER_SIZE);
+ instGen_Store_Imm_Into_Lcl(TYP_I_IMPL, EA_PTRSIZE, 0, compiler->lvaShadowSPslotsVar, curNestingSlotOffs);
+ break;
#endif // !FEATURE_EH_FUNCLETS
- case GT_PINVOKE_PROLOG:
- noway_assert(((gcInfo.gcRegGCrefSetCur|gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
+ case GT_PINVOKE_PROLOG:
+ noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
- // the runtime side requires the codegen here to be consistent
- emit->emitDisableRandomNops();
- break;
+ // the runtime side requires the codegen here to be consistent
+ emit->emitDisableRandomNops();
+ break;
- case GT_LABEL:
- genPendingCallLabel = genCreateTempLabel();
- treeNode->gtLabel.gtLabBB = genPendingCallLabel;
- emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->gtRegNum);
- break;
+ case GT_LABEL:
+ genPendingCallLabel = genCreateTempLabel();
+ treeNode->gtLabel.gtLabBB = genPendingCallLabel;
+ emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->gtRegNum);
+ break;
- case GT_COPYOBJ:
- genCodeForCpObj(treeNode->AsCpObj());
- break;
+ case GT_COPYOBJ:
+ genCodeForCpObj(treeNode->AsCpObj());
+ break;
- case GT_COPYBLK:
+ case GT_COPYBLK:
{
GenTreeCpBlk* cpBlkOp = treeNode->AsCpBlk();
if (cpBlkOp->gtBlkOpGcUnsafe)
@@ -2815,18 +2787,18 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
switch (cpBlkOp->gtBlkOpKind)
{
#ifdef _TARGET_AMD64_
- case GenTreeBlkOp::BlkOpKindHelper:
- genCodeForCpBlk(cpBlkOp);
- break;
+ case GenTreeBlkOp::BlkOpKindHelper:
+ genCodeForCpBlk(cpBlkOp);
+ break;
#endif // _TARGET_AMD64_
- case GenTreeBlkOp::BlkOpKindRepInstr:
- genCodeForCpBlkRepMovs(cpBlkOp);
- break;
- case GenTreeBlkOp::BlkOpKindUnroll:
- genCodeForCpBlkUnroll(cpBlkOp);
- break;
- default:
- unreached();
+ case GenTreeBlkOp::BlkOpKindRepInstr:
+ genCodeForCpBlkRepMovs(cpBlkOp);
+ break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ genCodeForCpBlkUnroll(cpBlkOp);
+ break;
+ default:
+ unreached();
}
if (cpBlkOp->gtBlkOpGcUnsafe)
{
@@ -2835,57 +2807,57 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
}
break;
- case GT_INITBLK:
+ case GT_INITBLK:
{
GenTreeInitBlk* initBlkOp = treeNode->AsInitBlk();
switch (initBlkOp->gtBlkOpKind)
{
- case GenTreeBlkOp::BlkOpKindHelper:
- genCodeForInitBlk(initBlkOp);
- break;
- case GenTreeBlkOp::BlkOpKindRepInstr:
- genCodeForInitBlkRepStos(initBlkOp);
- break;
- case GenTreeBlkOp::BlkOpKindUnroll:
- genCodeForInitBlkUnroll(initBlkOp);
- break;
- default:
- unreached();
+ case GenTreeBlkOp::BlkOpKindHelper:
+ genCodeForInitBlk(initBlkOp);
+ break;
+ case GenTreeBlkOp::BlkOpKindRepInstr:
+ genCodeForInitBlkRepStos(initBlkOp);
+ break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ genCodeForInitBlkUnroll(initBlkOp);
+ break;
+ default:
+ unreached();
}
}
break;
- case GT_JMPTABLE:
- genJumpTable(treeNode);
- break;
+ case GT_JMPTABLE:
+ genJumpTable(treeNode);
+ break;
- case GT_SWITCH_TABLE:
- genTableBasedSwitch(treeNode);
- break;
-
- case GT_ARR_INDEX:
- genCodeForArrIndex(treeNode->AsArrIndex());
- break;
+ case GT_SWITCH_TABLE:
+ genTableBasedSwitch(treeNode);
+ break;
- case GT_ARR_OFFSET:
- genCodeForArrOffset(treeNode->AsArrOffs());
- break;
+ case GT_ARR_INDEX:
+ genCodeForArrIndex(treeNode->AsArrIndex());
+ break;
- case GT_CLS_VAR_ADDR:
- getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
- genProduceReg(treeNode);
- break;
+ case GT_ARR_OFFSET:
+ genCodeForArrOffset(treeNode->AsArrOffs());
+ break;
+
+ case GT_CLS_VAR_ADDR:
+ getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->gtClsVar.gtClsVarHnd, 0);
+ genProduceReg(treeNode);
+ break;
#if !defined(_TARGET_64BIT_)
- case GT_LONG:
- assert(!treeNode->isContained());
- genConsumeRegs(treeNode);
- break;
+ case GT_LONG:
+ assert(!treeNode->isContained());
+ genConsumeRegs(treeNode);
+ break;
#endif
- default:
+ default:
{
-#ifdef DEBUG
+#ifdef DEBUG
char message[256];
sprintf(message, "Unimplemented node type %s\n", GenTree::NodeName(treeNode->OperGet()));
#endif
@@ -2908,8 +2880,7 @@ CodeGen::genCodeForTreeNode(GenTreePtr treeNode)
// The child of store is a multi-reg call node.
// genProduceReg() on treeNode is made by caller of this routine.
//
-void
-CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
+void CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_STORE_LCL_VAR);
@@ -2919,14 +2890,14 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// Assumption: current x64 Unix implementation requires that a multi-reg struct
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
- // being struct promoted.
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
+ // being struct promoted.
+ unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
noway_assert(varDsc->lvIsMultiRegRet);
- GenTree* op1 = treeNode->gtGetOp1();
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
+ GenTree* op1 = treeNode->gtGetOp1();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
assert(call->HasMultiRegRetVal());
genConsumeRegs(op1);
@@ -2936,7 +2907,7 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
unsigned regCount = retTypeDesc->GetReturnRegCount();
if (treeNode->gtRegNum != REG_NA)
- {
+ {
// Right now the only enregistrable structs supported are SIMD types.
assert(varTypeIsSIMD(treeNode));
assert(varTypeIsFloating(retTypeDesc->GetReturnRegType(0)));
@@ -2946,8 +2917,8 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// two different xmm registers and needs to assembled into a single
// xmm register.
regNumber targetReg = treeNode->gtRegNum;
- regNumber reg0 = call->GetRegNumByIdx(0);
- regNumber reg1 = call->GetRegNumByIdx(1);
+ regNumber reg0 = call->GetRegNumByIdx(0);
+ regNumber reg1 = call->GetRegNumByIdx(1);
if (op1->IsCopyOrReload())
{
@@ -2980,7 +2951,7 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// targetReg[127:64] = reg1[127:64]
inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, reg1, 0x00);
}
- else
+ else
{
assert(targetReg == reg1);
@@ -3007,7 +2978,7 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
- regNumber reg = call->GetRegNumByIdx(i);
+ regNumber reg = call->GetRegNumByIdx(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
@@ -3033,19 +3004,19 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// Assumption: current x86 implementation requires that a multi-reg long
// var in 'var = call' is flagged as lvIsMultiRegRet to prevent it from
// being promoted.
- unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
+ unsigned lclNum = treeNode->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
noway_assert(varDsc->lvIsMultiRegRet);
- GenTree* op1 = treeNode->gtGetOp1();
- GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
- GenTreeCall* call = actualOp1->AsCall();
+ GenTree* op1 = treeNode->gtGetOp1();
+ GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
+ GenTreeCall* call = actualOp1->AsCall();
assert(call->HasMultiRegRetVal());
genConsumeRegs(op1);
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
assert(regCount == MAX_RET_REG_COUNT);
// Stack store
@@ -3053,7 +3024,7 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
- regNumber reg = call->GetRegNumByIdx(i);
+ regNumber reg = call->GetRegNumByIdx(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
@@ -3070,13 +3041,12 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
offset += genTypeSize(type);
}
- varDsc->lvRegNum = REG_STK;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
+ varDsc->lvRegNum = REG_STK;
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
assert(!"Unreached");
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING && !_TARGET_X86_
}
-
//------------------------------------------------------------------------
// genLclHeap: Generate code for localloc.
//
@@ -3093,49 +3063,50 @@ CodeGen::genMultiRegCallStoreToLocal(GenTreePtr treeNode)
// is defined by convention relative to other items), and is used by the GC to find the
// "base" stack pointer in functions with localloc.
//
-void
-CodeGen::genLclHeap(GenTreePtr tree)
+void CodeGen::genLclHeap(GenTreePtr tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
-
+
GenTreePtr size = tree->gtOp.gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
- regNumber targetReg = tree->gtRegNum;
- regMaskTP tmpRegsMask = tree->gtRsvdRegs;
- regNumber regCnt = REG_NA;
- var_types type = genActualType(size->gtType);
- emitAttr easz = emitTypeSize(type);
- BasicBlock* endLabel = nullptr;
-
+ regNumber targetReg = tree->gtRegNum;
+ regMaskTP tmpRegsMask = tree->gtRsvdRegs;
+ regNumber regCnt = REG_NA;
+ var_types type = genActualType(size->gtType);
+ emitAttr easz = emitTypeSize(type);
+ BasicBlock* endLabel = nullptr;
+
#ifdef DEBUG
// Verify ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(INS_cmp, EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
- BasicBlock * esp_check = genCreateTempLabel();
- emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
+ BasicBlock* esp_check = genCreateTempLabel();
+ emitJumpKind jmpEqual = genJumpKindForOper(GT_EQ, CK_SIGNED);
inst_JMP(jmpEqual, esp_check);
getEmitter()->emitIns(INS_BREAKPOINT);
genDefineTempLabel(esp_check);
}
#endif
- noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
- noway_assert(genStackLevel == 0); // Can't have anything on the stack
+ noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
+ noway_assert(genStackLevel == 0); // Can't have anything on the stack
- unsigned stackAdjustment = 0;
- BasicBlock* loop = NULL;
+ unsigned stackAdjustment = 0;
+ BasicBlock* loop = nullptr;
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
- assert(size->isContained());
+ assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->gtIntCon.gtIconVal;
@@ -3146,7 +3117,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
- amount = AlignUp(amount, STACK_ALIGN);
+ amount = AlignUp(amount, STACK_ALIGN);
}
else
{
@@ -3162,7 +3133,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
- {
+ {
assert(genCountBits(tmpRegsMask) == 0);
regCnt = targetReg;
}
@@ -3189,7 +3160,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// add reg, 15
// shr reg, 4
- inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
+ inst_RV_IV(INS_add, regCnt, STACK_ALIGN - 1, emitActualTypeSize(type));
if (compiler->info.compInitMem)
{
@@ -3209,7 +3180,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
}
}
-#if FEATURE_FIXED_OUT_ARGS
+#if FEATURE_FIXED_OUT_ARGS
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
@@ -3220,7 +3191,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Therefore, we will pop off the out-going arg area from RSP before allocating the localloc space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
- if (compiler->lvaOutgoingArgSpaceSize > 0)
+ if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
@@ -3230,7 +3201,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
#endif
if (size->IsCnsIntOrI())
- {
+ {
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
assert((amount % STACK_ALIGN) == 0);
@@ -3247,11 +3218,12 @@ CodeGen::genLclHeap(GenTreePtr tree)
goto ALLOC_DONE;
}
- bool doNoInitLessThanOnePageAlloc = !compiler->info.compInitMem && (amount < compiler->eeGetPageSize()); // must be < not <=
+ bool doNoInitLessThanOnePageAlloc =
+ !compiler->info.compInitMem && (amount < compiler->eeGetPageSize()); // must be < not <=
#ifdef _TARGET_X86_
bool needRegCntRegister = true;
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
bool needRegCntRegister = !doNoInitLessThanOnePageAlloc;
#endif // !_TARGET_X86_
@@ -3261,7 +3233,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
- {
+ {
assert(genCountBits(tmpRegsMask) == 0);
regCnt = targetReg;
}
@@ -3285,12 +3257,13 @@ CodeGen::genLclHeap(GenTreePtr tree)
// For x86, we don't want to use "sub ESP" because we don't want the emitter to track the adjustment
// to ESP. So do the work in the count register.
// TODO-CQ: manipulate ESP directly, to share code, reduce #ifdefs, and improve CQ. This would require
- // creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't track".
+ // creating a way to temporarily turn off the emitter's tracking of ESP, maybe marking instrDescs as "don't
+ // track".
inst_RV_RV(INS_mov, regCnt, REG_SPBASE, TYP_I_IMPL);
getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
inst_RV_IV(INS_sub, regCnt, amount, EA_PTRSIZE);
inst_RV_RV(INS_mov, REG_SPBASE, regCnt, TYP_I_IMPL);
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
getEmitter()->emitIns_AR_R(INS_TEST, EA_4BYTE, REG_SPBASE, REG_SPBASE, 0);
inst_RV_IV(INS_sub, REG_SPBASE, amount, EA_PTRSIZE);
#endif // !_TARGET_X86_
@@ -3307,7 +3280,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
amount /= STACK_ALIGN;
}
- genSetRegToIcon(regCnt, amount, ((int)amount == amount)? TYP_INT : TYP_LONG);
+ genSetRegToIcon(regCnt, amount, ((int)amount == amount) ? TYP_INT : TYP_LONG);
}
loop = genCreateTempLabel();
@@ -3326,12 +3299,12 @@ CodeGen::genLclHeap(GenTreePtr tree)
#if defined(_TARGET_AMD64_)
// Push two 8-byte zeros. This matches the 16-byte STACK_ALIGN value.
static_assert_no_msg(STACK_ALIGN == (REGSIZE_BYTES * 2));
- inst_IV(INS_push_hide, 0); // --- push 8-byte 0
- inst_IV(INS_push_hide, 0); // --- push 8-byte 0
+ inst_IV(INS_push_hide, 0); // --- push 8-byte 0
+ inst_IV(INS_push_hide, 0); // --- push 8-byte 0
#elif defined(_TARGET_X86_)
// Push a single 4-byte zero. This matches the 4-byte STACK_ALIGN value.
static_assert_no_msg(STACK_ALIGN == REGSIZE_BYTES);
- inst_IV(INS_push_hide, 0); // --- push 4-byte 0
+ inst_IV(INS_push_hide, 0); // --- push 4-byte 0
#endif // _TARGET_X86_
// Decrement the loop counter and loop if not done.
@@ -3350,7 +3323,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
//
// Another subtlety is that you don't want ESP to be exactly on the
// boundary of the guard page because PUSH is predecrement, thus
- // call setup would not touch the guard page but just beyond it
+ // call setup would not touch the guard page but just beyond it
//
// Note that we go through a few hoops so that ESP never points to
// illegal pages at any time during the tickling process
@@ -3403,7 +3376,7 @@ CodeGen::genLclHeap(GenTreePtr tree)
ALLOC_DONE:
// Re-adjust SP to allocate out-going arg area
- if (stackAdjustment > 0)
+ if (stackAdjustment > 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
inst_RV_IV(INS_sub, REG_SPBASE, stackAdjustment, EA_PTRSIZE);
@@ -3414,7 +3387,9 @@ ALLOC_DONE:
getEmitter()->emitIns_R_AR(INS_lea, EA_PTRSIZE, targetReg, REG_SPBASE, stackAdjustment);
if (endLabel != nullptr)
+ {
genDefineTempLabel(endLabel);
+ }
BAILOUT:
@@ -3433,7 +3408,9 @@ BAILOUT:
// Update new ESP
if (compiler->opts.compStackCheckOnRet)
{
- noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC && compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister && compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
+ noway_assert(compiler->lvaReturnEspCheck != 0xCCCCCCCC &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvDoNotEnregister &&
+ compiler->lvaTable[compiler->lvaReturnEspCheck].lvOnFrame);
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, REG_SPBASE, compiler->lvaReturnEspCheck, 0);
}
#endif
@@ -3442,16 +3419,16 @@ BAILOUT:
}
// Generate code for InitBlk using rep stos.
-// Preconditions:
+// Preconditions:
// The size of the buffers must be a constant and also less than INITBLK_STOS_LIMIT bytes.
-// Any value larger than that, we'll use the helper even if both the
+// Any value larger than that, we'll use the helper even if both the
// fill byte and the size are integer constants.
void CodeGen::genCodeForInitBlkRepStos(GenTreeInitBlk* initBlkNode)
{
// Make sure we got the arguments of the initblk/initobj operation in the right registers
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Dest();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr dstAddr = initBlkNode->Dest();
+ GenTreePtr initVal = initBlkNode->InitVal();
#ifdef DEBUG
assert(!dstAddr->isContained());
@@ -3484,7 +3461,7 @@ void CodeGen::genCodeForInitBlkRepStos(GenTreeInitBlk* initBlkNode)
}
// Generate code for InitBlk by performing a loop unroll
-// Preconditions:
+// Preconditions:
// a) Both the size and fill byte value are integer constants.
// b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes.
//
@@ -3492,8 +3469,8 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
{
// Make sure we got the arguments of the initblk/initobj operation in the right registers
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Dest();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr dstAddr = initBlkNode->Dest();
+ GenTreePtr initVal = initBlkNode->InitVal();
#ifdef DEBUG
assert(!dstAddr->isContained());
@@ -3508,7 +3485,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
assert(size <= INITBLK_UNROLL_LIMIT);
assert(initVal->gtSkipReloadOrCopy()->IsCnsIntOrI());
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
genConsumeOperands(initBlkNode->gtGetOp1()->AsOp());
@@ -3516,7 +3493,7 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
// get the original initVal from below the GT_RELOAD, but only after capturing the valReg,
// which needs to be the new register.
regNumber valReg = initVal->gtRegNum;
- initVal = initVal->gtSkipReloadOrCopy();
+ initVal = initVal->gtSkipReloadOrCopy();
unsigned offset = 0;
@@ -3560,10 +3537,10 @@ void CodeGen::genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode)
offset += 4;
emit->emitIns_AR_R(INS_mov, EA_4BYTE, valReg, dstAddr->gtRegNum, offset);
offset += 4;
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
emit->emitIns_AR_R(INS_mov, EA_8BYTE, valReg, dstAddr->gtRegNum, offset);
offset += 8;
-#endif // !_TARGET_X86_
+#endif // !_TARGET_X86_
}
if ((size & 4) != 0)
{
@@ -3590,8 +3567,8 @@ void CodeGen::genCodeForInitBlk(GenTreeInitBlk* initBlkNode)
#ifdef _TARGET_AMD64_
// Make sure we got the arguments of the initblk operation in the right registers
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr dstAddr = initBlkNode->Dest();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr dstAddr = initBlkNode->Dest();
+ GenTreePtr initVal = initBlkNode->InitVal();
#ifdef DEBUG
assert(!dstAddr->isContained());
@@ -3607,18 +3584,17 @@ void CodeGen::genCodeForInitBlk(GenTreeInitBlk* initBlkNode)
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
NYI_X86("Helper call for InitBlk");
#endif // !_TARGET_AMD64_
}
-
// Generate code for a load from some address + offset
// baseNode: tree node which can be either a local address or arbitrary node
// offset: distance from the baseNode from which to load
void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* baseNode, unsigned offset)
{
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
if (baseNode->OperIsLocalAddr())
{
@@ -3646,7 +3622,7 @@ void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst
//
void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber src, GenTree* baseNode, unsigned offset)
{
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
if (baseNode->OperIsLocalAddr())
{
@@ -3663,7 +3639,6 @@ void CodeGen::genCodeForStoreOffset(instruction ins, emitAttr size, regNumber sr
}
}
-
// Generates CpBlk code by performing a loop unroll
// Preconditions:
// The size argument of the CpBlk node is a constant and <= 64 bytes.
@@ -3673,25 +3648,29 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
{
// Make sure we got the arguments of the cpblk operation in the right registers
GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Dest();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr dstAddr = cpBlkNode->Dest();
+ GenTreePtr srcAddr = cpBlkNode->Source();
assert(blockSize->IsCnsIntOrI());
size_t size = blockSize->gtIntCon.gtIconVal;
assert(size <= CPBLK_UNROLL_LIMIT);
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
if (!srcAddr->isContained())
+ {
genConsumeReg(srcAddr);
+ }
if (!dstAddr->isContained())
+ {
genConsumeReg(dstAddr);
+ }
unsigned offset = 0;
// If the size of this struct is larger than 16 bytes
- // let's use SSE2 to be able to do 16 byte at a time
+ // let's use SSE2 to be able to do 16 byte at a time
// loads and stores.
if (size >= XMM_REGSIZE_BYTES)
@@ -3701,7 +3680,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
assert(genIsValidFloatReg(xmmReg));
size_t slots = size / XMM_REGSIZE_BYTES;
- // TODO: In the below code the load and store instructions are for 16 bytes, but the
+ // TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
while (slots-- > 0)
@@ -3729,7 +3708,7 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, srcAddr, offset);
genCodeForStoreOffset(INS_mov, EA_4BYTE, tmpReg, dstAddr, offset);
}
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
genCodeForLoadOffset(INS_mov, EA_8BYTE, tmpReg, srcAddr, offset);
genCodeForStoreOffset(INS_mov, EA_8BYTE, tmpReg, dstAddr, offset);
offset += 8;
@@ -3757,21 +3736,20 @@ void CodeGen::genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode)
// Generate code for CpBlk by using rep movs
// Preconditions:
-// The size argument of the CpBlk is a constant and is between
+// The size argument of the CpBlk is a constant and is between
// CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
void CodeGen::genCodeForCpBlkRepMovs(GenTreeCpBlk* cpBlkNode)
{
// Make sure we got the arguments of the cpblk operation in the right registers
GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Dest();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr dstAddr = cpBlkNode->Dest();
+ GenTreePtr srcAddr = cpBlkNode->Source();
#ifdef DEBUG
assert(!dstAddr->isContained());
assert(!srcAddr->isContained());
assert(!blockSize->isContained());
-
#ifdef _TARGET_AMD64_
assert(blockSize->IsCnsIntOrI());
#endif
@@ -3803,7 +3781,7 @@ void CodeGen::genCodeForCpBlkRepMovs(GenTreeCpBlk* cpBlkNode)
// putArgNode - the PutArgStk tree.
// baseVarNum - the base var number, relative to which the by-val struct will be copied on the stack.
//
-// TODO-Amd64-Unix: Try to share code with copyblk.
+// TODO-Amd64-Unix: Try to share code with copyblk.
// Need refactoring of copyblk before it could be used for putarg_stk.
// The difference for now is that a putarg_stk contains its children, while cpyblk does not.
// This creates differences in code. After some significant refactoring it could be reused.
@@ -3815,13 +3793,13 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
noway_assert(putArgNode->TypeGet() == TYP_STRUCT);
// Make sure we got the arguments of the cpblk operation in the right registers
- GenTreePtr dstAddr = putArgNode;
- GenTreePtr src = putArgNode->gtOp.gtOp1;
+ GenTreePtr dstAddr = putArgNode;
+ GenTreePtr src = putArgNode->gtOp.gtOp1;
size_t size = putArgNode->getArgSize();
assert(size <= CPBLK_UNROLL_LIMIT);
- emitter *emit = getEmitter();
+ emitter* emit = getEmitter();
unsigned putArgOffset = putArgNode->getArgOffset();
assert(src->isContained());
@@ -3836,7 +3814,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
unsigned offset = 0;
// If the size of this struct is larger than 16 bytes
- // let's use SSE2 to be able to do 16 byte at a time
+ // let's use SSE2 to be able to do 16 byte at a time
// loads and stores.
if (size >= XMM_REGSIZE_BYTES)
{
@@ -3848,21 +3826,18 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
assert(putArgNode->gtGetOp1()->isContained());
assert(putArgNode->gtGetOp1()->gtOp.gtOper == GT_OBJ);
- // TODO: In the below code the load and store instructions are for 16 bytes, but the
+ // TODO: In the below code the load and store instructions are for 16 bytes, but the
// type is EA_8BYTE. The movdqa/u are 16 byte instructions, so it works, but
// this probably needs to be changed.
while (slots-- > 0)
{
// Load
- genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, src->gtGetOp1(), offset); // Load the address of the child of the Obj node.
+ genCodeForLoadOffset(INS_movdqu, EA_8BYTE, xmmReg, src->gtGetOp1(),
+ offset); // Load the address of the child of the Obj node.
// Store
- emit->emitIns_S_R(INS_movdqu,
- EA_8BYTE,
- xmmReg,
- baseVarNum,
- putArgOffset + offset);
-
+ emit->emitIns_S_R(INS_movdqu, EA_8BYTE, xmmReg, baseVarNum, putArgOffset + offset);
+
offset += XMM_REGSIZE_BYTES;
}
}
@@ -3873,16 +3848,12 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
// Grab the integer temp register to emit the remaining loads and stores.
regNumber tmpReg = genRegNumFromMask(putArgNode->gtRsvdRegs & RBM_ALLINT);
assert(genIsValidIntReg(tmpReg));
-
+
if ((size & 8) != 0)
{
genCodeForLoadOffset(INS_mov, EA_8BYTE, tmpReg, src->gtOp.gtOp1, offset);
- emit->emitIns_S_R(INS_mov,
- EA_8BYTE,
- tmpReg,
- baseVarNum,
- putArgOffset + offset);
+ emit->emitIns_S_R(INS_mov, EA_8BYTE, tmpReg, baseVarNum, putArgOffset + offset);
offset += 8;
}
@@ -3891,11 +3862,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
{
genCodeForLoadOffset(INS_mov, EA_4BYTE, tmpReg, src->gtOp.gtOp1, offset);
- emit->emitIns_S_R(INS_mov,
- EA_4BYTE,
- tmpReg,
- baseVarNum,
- putArgOffset + offset);
+ emit->emitIns_S_R(INS_mov, EA_4BYTE, tmpReg, baseVarNum, putArgOffset + offset);
offset += 4;
}
@@ -3904,11 +3871,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
{
genCodeForLoadOffset(INS_mov, EA_2BYTE, tmpReg, src->gtOp.gtOp1, offset);
- emit->emitIns_S_R(INS_mov,
- EA_2BYTE,
- tmpReg,
- baseVarNum,
- putArgOffset + offset);
+ emit->emitIns_S_R(INS_mov, EA_2BYTE, tmpReg, baseVarNum, putArgOffset + offset);
offset += 2;
}
@@ -3916,11 +3879,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
if ((size & 1) != 0)
{
genCodeForLoadOffset(INS_mov, EA_1BYTE, tmpReg, src->gtOp.gtOp1, offset);
- emit->emitIns_S_R(INS_mov,
- EA_1BYTE,
- tmpReg,
- baseVarNum,
- putArgOffset + offset);
+ emit->emitIns_S_R(INS_mov, EA_1BYTE, tmpReg, baseVarNum, putArgOffset + offset);
}
}
}
@@ -3933,7 +3892,7 @@ void CodeGen::genStructPutArgUnroll(GenTreePutArgStk* putArgNode, unsigned baseV
// baseVarNum - the base var number, relative to which the by-val struct bits will go.
//
// Preconditions:
-// The size argument of the PutArgStk (for structs) is a constant and is between
+// The size argument of the PutArgStk (for structs) is a constant and is between
// CPBLK_UNROLL_LIMIT and CPBLK_MOVS_LIMIT bytes.
//
void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode, unsigned baseVarNum)
@@ -3943,8 +3902,8 @@ void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode, unsigned base
assert(baseVarNum != BAD_VAR_NUM);
// Make sure we got the arguments of the cpblk operation in the right registers
- GenTreePtr dstAddr = putArgNode;
- GenTreePtr srcAddr = putArgNode->gtGetOp1();
+ GenTreePtr dstAddr = putArgNode;
+ GenTreePtr srcAddr = putArgNode->gtGetOp1();
// Validate state.
assert(putArgNode->gtRsvdRegs == (RBM_RDI | RBM_RCX | RBM_RSI));
@@ -3960,7 +3919,7 @@ void CodeGen::genStructPutArgRepMovs(GenTreePutArgStk* putArgNode, unsigned base
// and there is no way to know if the caller is native or not. So, the upper
// 32 bits of Vector argument on stack are always cleared to zero.
#ifdef FEATURE_SIMD
-void CodeGen::genClearStackVec3ArgUpperBits()
+void CodeGen::genClearStackVec3ArgUpperBits()
{
#ifdef DEBUG
if (verbose)
@@ -3985,17 +3944,11 @@ void CodeGen::genClearStackVec3ArgUpperBits()
if (!varDsc->lvIsRegArg)
{
// Clear the upper 32 bits by mov dword ptr [V_ARG_BASE+0xC], 0
- getEmitter()->emitIns_S_I(
- ins_Store(TYP_INT),
- EA_4BYTE,
- varNum,
- genTypeSize(TYP_FLOAT) * 3,
- 0);
-
+ getEmitter()->emitIns_S_I(ins_Store(TYP_INT), EA_4BYTE, varNum, genTypeSize(TYP_FLOAT) * 3, 0);
}
- else
+ else
{
- // Assume that for x64 linux, an argument is fully in registers
+ // Assume that for x64 linux, an argument is fully in registers
// or fully on stack.
regNumber argReg = varDsc->GetOtherArgReg();
@@ -4017,7 +3970,7 @@ void CodeGen::genClearStackVec3ArgUpperBits()
void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
{
// Make sure we got the arguments of the cpobj operation in the right registers
- GenTreePtr clsTok = cpObjNode->ClsTok();
+ GenTreePtr clsTok = cpObjNode->ClsTok();
GenTreePtr dstAddr = cpObjNode->Dest();
GenTreePtr srcAddr = cpObjNode->Source();
@@ -4034,30 +3987,28 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
assert(cpObjNode->gtGcPtrCount > 0);
// MovSq instruction is used for copying non-gcref fields and it needs
- // src = RSI and dst = RDI.
+ // src = RSI and dst = RDI.
// Either these registers must not contain lclVars, or they must be dying or marked for spill.
// This is because these registers are incremented as we go through the struct.
- GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
- GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
- unsigned srcLclVarNum = BAD_VAR_NUM;
- unsigned dstLclVarNum = BAD_VAR_NUM;
- bool isSrcAddrLiveOut = false;
- bool isDstAddrLiveOut = false;
+ GenTree* actualSrcAddr = srcAddr->gtSkipReloadOrCopy();
+ GenTree* actualDstAddr = dstAddr->gtSkipReloadOrCopy();
+ unsigned srcLclVarNum = BAD_VAR_NUM;
+ unsigned dstLclVarNum = BAD_VAR_NUM;
+ bool isSrcAddrLiveOut = false;
+ bool isDstAddrLiveOut = false;
if (genIsRegCandidateLocal(actualSrcAddr))
{
- srcLclVarNum = actualSrcAddr->AsLclVarCommon()->gtLclNum;
+ srcLclVarNum = actualSrcAddr->AsLclVarCommon()->gtLclNum;
isSrcAddrLiveOut = ((actualSrcAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
if (genIsRegCandidateLocal(actualDstAddr))
{
- dstLclVarNum = actualDstAddr->AsLclVarCommon()->gtLclNum;
+ dstLclVarNum = actualDstAddr->AsLclVarCommon()->gtLclNum;
isDstAddrLiveOut = ((actualDstAddr->gtFlags & (GTF_VAR_DEATH | GTF_SPILL)) == 0);
}
- assert((actualSrcAddr->gtRegNum != REG_RSI) ||
- !isSrcAddrLiveOut ||
+ assert((actualSrcAddr->gtRegNum != REG_RSI) || !isSrcAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isDstAddrLiveOut));
- assert((actualDstAddr->gtRegNum != REG_RDI) ||
- !isDstAddrLiveOut ||
+ assert((actualDstAddr->gtRegNum != REG_RDI) || !isDstAddrLiveOut ||
((srcLclVarNum == dstLclVarNum) && !isSrcAddrLiveOut));
#endif // DEBUG
@@ -4098,7 +4049,7 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
}
else
{
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
+ BYTE* gcPtrs = cpObjNode->gtGcPtrs;
unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
unsigned i = 0;
@@ -4106,47 +4057,47 @@ void CodeGen::genCodeForCpObj(GenTreeCpObj* cpObjNode)
{
switch (gcPtrs[i])
{
- case TYPE_GC_NONE:
- // Let's see if we can use rep movsq instead of a sequence of movsq instructions
- // to save cycles and code size.
- {
- unsigned nonGcSlotCount = 0;
-
- do
+ case TYPE_GC_NONE:
+ // Let's see if we can use rep movsq instead of a sequence of movsq instructions
+ // to save cycles and code size.
{
- nonGcSlotCount++;
- i++;
- } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
+ unsigned nonGcSlotCount = 0;
- // If we have a very small contiguous non-gc region, it's better just to
- // emit a sequence of movsq instructions
- if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
- {
- while (nonGcSlotCount > 0)
+ do
{
- instGen(INS_movsq);
- nonGcSlotCount--;
+ nonGcSlotCount++;
+ i++;
+ } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
+
+ // If we have a very small contiguous non-gc region, it's better just to
+ // emit a sequence of movsq instructions
+ if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
+ {
+ while (nonGcSlotCount > 0)
+ {
+ instGen(INS_movsq);
+ nonGcSlotCount--;
+ }
}
- }
- else
- {
+ else
+ {
#ifdef DEBUG
- // Otherwise, we can save code-size and improve CQ by emitting
- // rep movsq
- regNumber tmpReg = genRegNumFromMask(cpObjNode->gtRsvdRegs & RBM_ALLINT);
- assert(tmpReg == REG_RCX);
- isRepMovsqUsed = true;
+ // Otherwise, we can save code-size and improve CQ by emitting
+ // rep movsq
+ regNumber tmpReg = genRegNumFromMask(cpObjNode->gtRsvdRegs & RBM_ALLINT);
+ assert(tmpReg == REG_RCX);
+ isRepMovsqUsed = true;
#endif // DEBUG
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
- instGen(INS_r_movsq);
+ getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
+ instGen(INS_r_movsq);
+ }
}
- }
- break;
- default:
- // We have a GC pointer, call the memory barrier.
- genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
- gcPtrCount--;
- i++;
+ break;
+ default:
+ // We have a GC pointer, call the memory barrier.
+ genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
+ gcPtrCount--;
+ i++;
}
}
#ifdef DEBUG
@@ -4174,8 +4125,8 @@ void CodeGen::genCodeForCpBlk(GenTreeCpBlk* cpBlkNode)
#ifdef _TARGET_AMD64_
// Make sure we got the arguments of the cpblk operation in the right registers
GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr dstAddr = cpBlkNode->Dest();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr dstAddr = cpBlkNode->Dest();
+ GenTreePtr srcAddr = cpBlkNode->Source();
assert(!dstAddr->isContained());
assert(!srcAddr->isContained());
@@ -4191,21 +4142,20 @@ void CodeGen::genCodeForCpBlk(GenTreeCpBlk* cpBlkNode)
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
noway_assert(false && "Helper call for CpBlk is not needed.");
#endif // !_TARGET_AMD64_
}
// generate code do a switch statement based on a table of ip-relative offsets
-void
-CodeGen::genTableBasedSwitch(GenTree* treeNode)
+void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
- regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
+ regNumber idxReg = treeNode->gtOp.gtOp1->gtRegNum;
regNumber baseReg = treeNode->gtOp.gtOp2->gtRegNum;
regNumber tmpReg = genRegNumFromMask(treeNode->gtRsvdRegs);
-
+
// load the ip-relative offset (which is relative to start of fgFirstBB)
getEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
@@ -4217,14 +4167,12 @@ CodeGen::genTableBasedSwitch(GenTree* treeNode)
getEmitter()->emitIns_R(INS_i_jmp, emitTypeSize(TYP_I_IMPL), baseReg);
}
-
// emits the table and an instruction to get the address of the first element
-void
-CodeGen::genJumpTable(GenTree* treeNode)
+void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
-
+
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
@@ -4236,7 +4184,7 @@ CodeGen::genJumpTable(GenTree* treeNode)
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", Compiler::s_compMethodsCount, jmpTabBase);
- for (unsigned i=0; i<jumpCount; i++)
+ for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_JMP_TARGET);
@@ -4251,25 +4199,20 @@ CodeGen::genJumpTable(GenTree* treeNode)
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
- getEmitter()->emitIns_R_C(INS_lea,
- emitTypeSize(TYP_I_IMPL),
- treeNode->gtRegNum,
- compiler->eeFindJitDataOffs(jmpTabBase),
- 0);
+ getEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->gtRegNum,
+ compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
-
// generate code for the locked operations:
// GT_LOCKADD, GT_XCHG, GT_XADD
-void
-CodeGen::genLockedInstructions(GenTree* treeNode)
+void CodeGen::genLockedInstructions(GenTree* treeNode)
{
- GenTree* data = treeNode->gtOp.gtOp2;
- GenTree* addr = treeNode->gtOp.gtOp1;
- regNumber targetReg = treeNode->gtRegNum;
- regNumber dataReg = data->gtRegNum;
- regNumber addrReg = addr->gtRegNum;
+ GenTree* data = treeNode->gtOp.gtOp2;
+ GenTree* addr = treeNode->gtOp.gtOp1;
+ regNumber targetReg = treeNode->gtRegNum;
+ regNumber dataReg = data->gtRegNum;
+ regNumber addrReg = addr->gtRegNum;
instruction ins;
// all of these nodes implicitly do an indirection on op1
@@ -4283,7 +4226,8 @@ CodeGen::genLockedInstructions(GenTree* treeNode)
// If data is a lclVar that's not a last use, we'd better have allocated a register
// for the result (except in the case of GT_LOCKADD which does not produce a register result).
- assert(targetReg != REG_NA || treeNode->OperGet() == GT_LOCKADD || !genIsRegCandidateLocal(data) || (data->gtFlags & GTF_VAR_DEATH) != 0);
+ assert(targetReg != REG_NA || treeNode->OperGet() == GT_LOCKADD || !genIsRegCandidateLocal(data) ||
+ (data->gtFlags & GTF_VAR_DEATH) != 0);
genConsumeIfReg(data);
if (targetReg != REG_NA && dataReg != REG_NA && dataReg != targetReg)
@@ -4296,20 +4240,20 @@ CodeGen::genLockedInstructions(GenTree* treeNode)
}
switch (treeNode->OperGet())
{
- case GT_LOCKADD:
- instGen(INS_lock);
- ins = INS_add;
- break;
- case GT_XCHG:
- // lock is implied by xchg
- ins = INS_xchg;
- break;
- case GT_XADD:
- instGen(INS_lock);
- ins = INS_xadd;
- break;
- default:
- unreached();
+ case GT_LOCKADD:
+ instGen(INS_lock);
+ ins = INS_add;
+ break;
+ case GT_XCHG:
+ // lock is implied by xchg
+ ins = INS_xchg;
+ break;
+ case GT_XADD:
+ instGen(INS_lock);
+ ins = INS_xadd;
+ break;
+ default:
+ unreached();
}
getEmitter()->emitInsBinary(ins, emitTypeSize(data), &i, data);
@@ -4319,25 +4263,23 @@ CodeGen::genLockedInstructions(GenTree* treeNode)
}
}
-
// generate code for BoundsCheck nodes
-void
-CodeGen::genRangeCheck(GenTreePtr oper)
+void CodeGen::genRangeCheck(GenTreePtr oper)
{
#ifdef FEATURE_SIMD
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK || oper->OperGet() == GT_SIMD_CHK);
-#else // !FEATURE_SIMD
+#else // !FEATURE_SIMD
noway_assert(oper->OperGet() == GT_ARR_BOUNDS_CHECK);
#endif // !FEATURE_SIMD
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
- GenTreePtr arrLen = bndsChk->gtArrLen;
- GenTreePtr arrIndex = bndsChk->gtIndex;
- GenTreePtr arrRef = NULL;
- int lenOffset = 0;
+ GenTreePtr arrLen = bndsChk->gtArrLen;
+ GenTreePtr arrIndex = bndsChk->gtIndex;
+ GenTreePtr arrRef = nullptr;
+ int lenOffset = 0;
- GenTree *src1, *src2;
+ GenTree * src1, *src2;
emitJumpKind jmpKind;
genConsumeRegs(arrLen);
@@ -4353,14 +4295,14 @@ CodeGen::genRangeCheck(GenTreePtr oper)
// That is arrLen cannot be a contained immed.
assert(!arrLen->isContainedIntOrIImmed());
- src1 = arrLen;
- src2 = arrIndex;
+ src1 = arrLen;
+ src2 = arrIndex;
jmpKind = EJ_jbe;
}
else
{
// arrIndex could either be a contained memory op or a reg
- // In this case we will generate one of the following
+ // In this case we will generate one of the following
// cmp [mem], immed (if arrLen is a constant)
// cmp [mem], reg (if arrLen is in a reg)
// cmp reg, immed (if arrIndex is in a reg)
@@ -4370,8 +4312,8 @@ CodeGen::genRangeCheck(GenTreePtr oper)
// That is only one of arrIndex or arrLen can be a memory op.
assert(!arrIndex->isContainedMemoryOp() || !arrLen->isContainedMemoryOp());
- src1 = arrIndex;
- src2 = arrLen;
+ src1 = arrIndex;
+ src2 = arrLen;
jmpKind = EJ_jae;
}
@@ -4382,11 +4324,10 @@ CodeGen::genRangeCheck(GenTreePtr oper)
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitTypeSize(src1->TypeGet()));
-#endif //DEBUG
+#endif // DEBUG
getEmitter()->emitInsBinary(INS_cmp, emitTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
-
}
//------------------------------------------------------------------------
@@ -4401,8 +4342,7 @@ CodeGen::genRangeCheck(GenTreePtr oper)
// Return Value:
// The offset.
-unsigned
-CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
+unsigned CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigned dimension)
{
// Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * (dimension + rank);
@@ -4420,8 +4360,7 @@ CodeGen::genOffsetOfMDArrayLowerBound(var_types elemType, unsigned rank, unsigne
// Return Value:
// The offset.
-unsigned
-CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
+unsigned CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsigned dimension)
{
// Note that the lower bound and length fields of the Array object are always TYP_INT, even on 64-bit targets.
return compiler->eeGetArrayDataOffset(elemType) + genTypeSize(TYP_INT) * dimension;
@@ -4438,19 +4377,18 @@ CodeGen::genOffsetOfMDArrayDimensionSize(var_types elemType, unsigned rank, unsi
// None.
//
-void
-CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
+void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
- GenTreePtr arrObj = arrIndex->ArrObj();
- GenTreePtr indexNode = arrIndex->IndexExpr();
+ GenTreePtr arrObj = arrIndex->ArrObj();
+ GenTreePtr indexNode = arrIndex->IndexExpr();
- regNumber arrReg = genConsumeReg(arrObj);
- regNumber indexReg = genConsumeReg(indexNode);
- regNumber tgtReg = arrIndex->gtRegNum;
+ regNumber arrReg = genConsumeReg(arrObj);
+ regNumber indexReg = genConsumeReg(indexNode);
+ regNumber tgtReg = arrIndex->gtRegNum;
- unsigned dim = arrIndex->gtCurrDim;
- unsigned rank = arrIndex->gtArrRank;
- var_types elemType = arrIndex->gtArrElemType;
+ unsigned dim = arrIndex->gtCurrDim;
+ unsigned rank = arrIndex->gtArrRank;
+ var_types elemType = arrIndex->gtArrElemType;
noway_assert(tgtReg != REG_NA);
@@ -4460,23 +4398,17 @@ CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
inst_RV_RV(INS_mov, tgtReg, indexReg, indexNode->TypeGet());
}
- getEmitter()->emitIns_R_AR(INS_sub,
- emitActualTypeSize(TYP_INT),
- tgtReg,
- arrReg,
- genOffsetOfMDArrayLowerBound(elemType, rank, dim));
- getEmitter()->emitIns_R_AR(INS_cmp,
- emitActualTypeSize(TYP_INT),
- tgtReg,
- arrReg,
- genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
+ getEmitter()->emitIns_R_AR(INS_sub, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
+ genOffsetOfMDArrayLowerBound(elemType, rank, dim));
+ getEmitter()->emitIns_R_AR(INS_cmp, emitActualTypeSize(TYP_INT), tgtReg, arrReg,
+ genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
genJumpToThrowHlpBlk(EJ_jae, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
-// genCodeForArrOffset: Generates code to compute the flattened array offset for
+// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
@@ -4491,24 +4423,23 @@ CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
-void
-CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
+void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTreePtr offsetNode = arrOffset->gtOffset;
GenTreePtr indexNode = arrOffset->gtIndex;
GenTreePtr arrObj = arrOffset->gtArrObj;
- regNumber tgtReg = arrOffset->gtRegNum;
+ regNumber tgtReg = arrOffset->gtRegNum;
noway_assert(tgtReg != REG_NA);
- unsigned dim = arrOffset->gtCurrDim;
- unsigned rank = arrOffset->gtArrRank;
- var_types elemType = arrOffset->gtArrElemType;
+ unsigned dim = arrOffset->gtCurrDim;
+ unsigned rank = arrOffset->gtArrRank;
+ var_types elemType = arrOffset->gtArrElemType;
// We will use a temp register for the offset*scale+effectiveIndex computation.
regMaskTP tmpRegMask = arrOffset->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
// First, consume the operands in the correct order.
regNumber offsetReg = REG_NA;
@@ -4539,10 +4470,7 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
// tmpReg is used to load dim_size and the result of the multiplication.
// Note that dim_size will never be negative.
- getEmitter()->emitIns_R_AR(INS_mov,
- emitActualTypeSize(TYP_INT),
- tmpReg,
- arrReg,
+ getEmitter()->emitIns_R_AR(INS_mov, emitActualTypeSize(TYP_INT), tmpReg, arrReg,
genOffsetOfMDArrayDimensionSize(elemType, rank, dim));
inst_RV_RV(INS_imul, tmpReg, offsetReg);
@@ -4572,13 +4500,13 @@ CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
// make a temporary indir we can feed to pattern matching routines
// in cases where we don't want to instantiate all the indirs that happen
//
-GenTreeIndir CodeGen::indirForm(var_types type, GenTree *base)
+GenTreeIndir CodeGen::indirForm(var_types type, GenTree* base)
{
GenTreeIndir i(GT_IND, type, base, nullptr);
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
@@ -4591,11 +4519,10 @@ GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
i.gtRegNum = REG_NA;
// has to be nonnull (because contained nodes can't be the last in block)
// but don't want it to be a valid pointer
- i.gtNext = (GenTree *)(-1);
+ i.gtNext = (GenTree*)(-1);
return i;
}
-
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins;
@@ -4609,32 +4536,66 @@ instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
switch (oper)
{
- case GT_ADD: ins = INS_add; break;
- case GT_AND: ins = INS_and; break;
- case GT_LSH: ins = INS_shl; break;
- case GT_MUL: ins = INS_imul; break;
- case GT_NEG: ins = INS_neg; break;
- case GT_NOT: ins = INS_not; break;
- case GT_OR: ins = INS_or; break;
- case GT_ROL: ins = INS_rol; break;
- case GT_ROR: ins = INS_ror; break;
- case GT_RSH: ins = INS_sar; break;
- case GT_RSZ: ins = INS_shr; break;
- case GT_SUB: ins = INS_sub; break;
- case GT_XOR: ins = INS_xor; break;
+ case GT_ADD:
+ ins = INS_add;
+ break;
+ case GT_AND:
+ ins = INS_and;
+ break;
+ case GT_LSH:
+ ins = INS_shl;
+ break;
+ case GT_MUL:
+ ins = INS_imul;
+ break;
+ case GT_NEG:
+ ins = INS_neg;
+ break;
+ case GT_NOT:
+ ins = INS_not;
+ break;
+ case GT_OR:
+ ins = INS_or;
+ break;
+ case GT_ROL:
+ ins = INS_rol;
+ break;
+ case GT_ROR:
+ ins = INS_ror;
+ break;
+ case GT_RSH:
+ ins = INS_sar;
+ break;
+ case GT_RSZ:
+ ins = INS_shr;
+ break;
+ case GT_SUB:
+ ins = INS_sub;
+ break;
+ case GT_XOR:
+ ins = INS_xor;
+ break;
#if !defined(_TARGET_64BIT_)
- case GT_ADD_LO: ins = INS_add; break;
- case GT_ADD_HI: ins = INS_adc; break;
- case GT_SUB_LO: ins = INS_sub; break;
- case GT_SUB_HI: ins = INS_sbb; break;
+ case GT_ADD_LO:
+ ins = INS_add;
+ break;
+ case GT_ADD_HI:
+ ins = INS_adc;
+ break;
+ case GT_SUB_LO:
+ ins = INS_sub;
+ break;
+ case GT_SUB_HI:
+ ins = INS_sbb;
+ break;
#endif // !defined(_TARGET_64BIT_)
- default: unreached();
- break;
+ default:
+ unreached();
+ break;
}
return ins;
}
-
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
@@ -4657,11 +4618,11 @@ void CodeGen::genCodeForShift(GenTreePtr tree)
genConsumeOperands(tree->AsOp());
- var_types targetType = tree->TypeGet();
- instruction ins = genGetInsForOper(tree->OperGet(), targetType);
+ var_types targetType = tree->TypeGet();
+ instruction ins = genGetInsForOper(tree->OperGet(), targetType);
- GenTreePtr operand = tree->gtGetOp1();
- regNumber operandReg = operand->gtRegNum;
+ GenTreePtr operand = tree->gtGetOp1();
+ regNumber operandReg = operand->gtRegNum;
GenTreePtr shiftBy = tree->gtGetOp2();
if (shiftBy->isContainedIntOrIImmed())
@@ -4702,7 +4663,6 @@ void CodeGen::genCodeForShift(GenTreePtr tree)
genProduceReg(tree);
}
-
//------------------------------------------------------------------------
// genCodeForShiftRMW: Generates the code sequence for a GT_STOREIND GenTree node that
// represents a RMW bit shift or rotate operation (<<, >>, >>>, rol, ror), for example:
@@ -4724,16 +4684,16 @@ void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
assert(Lowering::IndirsAreEquivalent(data->gtOp.gtOp1, storeInd));
assert(data->gtRegNum == REG_NA);
- var_types targetType = data->TypeGet();
- genTreeOps oper = data->OperGet();
- instruction ins = genGetInsForOper(oper, targetType);
- emitAttr attr = EA_ATTR(genTypeSize(targetType));
+ var_types targetType = data->TypeGet();
+ genTreeOps oper = data->OperGet();
+ instruction ins = genGetInsForOper(oper, targetType);
+ emitAttr attr = EA_ATTR(genTypeSize(targetType));
GenTree* shiftBy = data->gtOp.gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
- ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
+ ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
// There is no source in this case, as the shift by count is embedded in the instruction opcode itself.
@@ -4761,10 +4721,10 @@ void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
}
}
-void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
+void CodeGen::genUnspillRegIfNeeded(GenTree* tree)
{
- regNumber dstReg = tree->gtRegNum;
- GenTree* unspillTree = tree;
+ regNumber dstReg = tree->gtRegNum;
+ GenTree* unspillTree = tree;
if (tree->gtOper == GT_RELOAD)
{
@@ -4778,8 +4738,8 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
// Reset spilled flag, since we are going to load a local variable from its home location.
unspillTree->gtFlags &= ~GTF_SPILLED;
- GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
+ GenTreeLclVarCommon* lcl = unspillTree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->gtLclNum];
// Load local variable from its home location.
// In most cases the tree type will indicate the correct type to use for the load.
@@ -4794,9 +4754,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
// extending load.
var_types treeType = unspillTree->TypeGet();
- if (treeType != genActualType(varDsc->lvType) &&
- !varTypeIsGC(treeType) &&
- !varDsc->lvNormalizeOnLoad())
+ if (treeType != genActualType(varDsc->lvType) && !varTypeIsGC(treeType) && !varDsc->lvNormalizeOnLoad())
{
assert(!varTypeIsGC(varDsc));
var_types spillType = genActualType(varDsc->lvType);
@@ -4836,7 +4794,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
#endif // DEBUG
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("\t\t\t\t\t\t\tV%02u in reg ", lcl->gtLclNum);
@@ -4854,28 +4812,28 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
}
else if (unspillTree->IsMultiRegCall())
{
- GenTreeCall* call = unspillTree->AsCall();
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
- GenTreeCopyOrReload* reloadTree = nullptr;
+ GenTreeCall* call = unspillTree->AsCall();
+ ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
+ GenTreeCopyOrReload* reloadTree = nullptr;
if (tree->OperGet() == GT_RELOAD)
{
reloadTree = tree->AsCopyOrReload();
}
// In case of multi-reg call node, GTF_SPILLED flag on it indicates that
- // one or more of its result regs are spilled. Call node needs to be
+ // one or more of its result regs are spilled. Call node needs to be
// queried to know which specific result regs to be unspilled.
for (unsigned i = 0; i < regCount; ++i)
{
unsigned flags = call->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILLED) != 0)
{
- var_types dstType = retTypeDesc->GetReturnRegType(i);
+ var_types dstType = retTypeDesc->GetReturnRegType(i);
regNumber unspillTreeReg = call->GetRegNumByIdx(i);
if (reloadTree != nullptr)
- {
+ {
dstReg = reloadTree->GetRegNumByIdx(i);
if (dstReg == REG_NA)
{
@@ -4888,13 +4846,10 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
}
TempDsc* t = regSet.rsUnspillInPlace(call, unspillTreeReg, i);
- getEmitter()->emitIns_R_S(ins_Load(dstType),
- emitActualTypeSize(dstType),
- dstReg,
- t->tdTempNum(),
+ getEmitter()->emitIns_R_S(ins_Load(dstType), emitActualTypeSize(dstType), dstReg, t->tdTempNum(),
0);
compiler->tmpRlsTemp(t);
- gcInfo.gcMarkRegPtrVal(dstReg, dstType);
+ gcInfo.gcMarkRegPtrVal(dstReg, dstType);
}
}
@@ -4904,24 +4859,21 @@ void CodeGen::genUnspillRegIfNeeded(GenTree *tree)
else
{
TempDsc* t = regSet.rsUnspillInPlace(unspillTree, unspillTree->gtRegNum);
- getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType),
- emitActualTypeSize(unspillTree->TypeGet()),
- dstReg,
- t->tdTempNum(),
- 0);
+ getEmitter()->emitIns_R_S(ins_Load(unspillTree->gtType), emitActualTypeSize(unspillTree->TypeGet()), dstReg,
+ t->tdTempNum(), 0);
compiler->tmpRlsTemp(t);
unspillTree->gtFlags &= ~GTF_SPILLED;
unspillTree->SetInReg();
gcInfo.gcMarkRegPtrVal(dstReg, unspillTree->TypeGet());
- }
+ }
}
}
// Do Liveness update for a subnodes that is being consumed by codegen
// including the logic for reload in case is needed and also takes care
// of locating the value on the desired register.
-void CodeGen::genConsumeRegAndCopy(GenTree *tree, regNumber needReg)
+void CodeGen::genConsumeRegAndCopy(GenTree* tree, regNumber needReg)
{
if (needReg == REG_NA)
{
@@ -4937,23 +4889,23 @@ void CodeGen::genConsumeRegAndCopy(GenTree *tree, regNumber needReg)
void CodeGen::genRegCopy(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_COPY);
- GenTree* op1 = treeNode->gtOp.gtOp1;
+ GenTree* op1 = treeNode->gtOp.gtOp1;
if (op1->IsMultiRegCall())
{
genConsumeReg(op1);
- GenTreeCopyOrReload* copyTree = treeNode->AsCopyOrReload();
- GenTreeCall* call = op1->AsCall();
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ GenTreeCopyOrReload* copyTree = treeNode->AsCopyOrReload();
+ GenTreeCall* call = op1->AsCall();
+ ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
- var_types type = retTypeDesc->GetReturnRegType(i);
+ var_types type = retTypeDesc->GetReturnRegType(i);
regNumber fromReg = call->GetRegNumByIdx(i);
- regNumber toReg = copyTree->GetRegNumByIdx(i);
-
+ regNumber toReg = copyTree->GetRegNumByIdx(i);
+
// A Multi-reg GT_COPY node will have valid reg only for those
// positions that corresponding result reg of call node needs
// to be copied.
@@ -4967,7 +4919,7 @@ void CodeGen::genRegCopy(GenTree* treeNode)
else
{
var_types targetType = treeNode->TypeGet();
- regNumber targetReg = treeNode->gtRegNum;
+ regNumber targetReg = treeNode->gtRegNum;
assert(targetReg != REG_NA);
// Check whether this node and the node from which we're copying the value have
@@ -4981,19 +4933,19 @@ void CodeGen::genRegCopy(GenTree* treeNode)
if (srcFltReg != tgtFltReg)
{
instruction ins;
- regNumber fpReg;
- regNumber intReg;
+ regNumber fpReg;
+ regNumber intReg;
if (tgtFltReg)
{
- ins = ins_CopyIntToFloat(op1->TypeGet(), treeNode->TypeGet());
- fpReg = targetReg;
+ ins = ins_CopyIntToFloat(op1->TypeGet(), treeNode->TypeGet());
+ fpReg = targetReg;
intReg = op1->gtRegNum;
}
else
{
- ins = ins_CopyFloatToInt(op1->TypeGet(), treeNode->TypeGet());
+ ins = ins_CopyFloatToInt(op1->TypeGet(), treeNode->TypeGet());
intReg = targetReg;
- fpReg = op1->gtRegNum;
+ fpReg = op1->gtRegNum;
}
inst_RV_RV(ins, fpReg, intReg, targetType);
}
@@ -5093,7 +5045,7 @@ regNumber CodeGen::genConsumeReg(GenTree* tree)
}
// Handle the case where we have a lclVar that needs to be copied before use (i.e. because it
- // interferes with one of the other sources (or the target, if it's a "delayed use" register)).
+ // interferes with one of the other sources (or the target, if it's a "delayed use" register)).
// TODO-Cleanup: This is a special copyReg case in LSRA - consider eliminating these and
// always using GT_COPY to make the lclVar location explicit.
// Note that we have to do this before calling genUpdateLife because otherwise if we spill it
@@ -5104,8 +5056,8 @@ regNumber CodeGen::genConsumeReg(GenTree* tree)
// because if it's on the stack it will always get reloaded into tree->gtRegNum).
if (genIsRegCandidateLocal(tree))
{
- GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
if (varDsc->lvRegNum != REG_STK && varDsc->lvRegNum != tree->gtRegNum)
{
inst_RV_RV(INS_mov, tree->gtRegNum, varDsc->lvRegNum);
@@ -5126,8 +5078,8 @@ regNumber CodeGen::genConsumeReg(GenTree* tree)
if (genIsRegCandidateLocal(tree))
{
- GenTreeLclVarCommon *lcl = tree->AsLclVarCommon();
- LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ LclVarDsc* varDsc = &compiler->lvaTable[lcl->GetLclNum()];
assert(varDsc->lvLRACandidate);
if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
@@ -5163,7 +5115,7 @@ void CodeGen::genConsumeAddress(GenTree* addr)
}
// do liveness update for a subnode that is being consumed by codegen
-void CodeGen::genConsumeAddrMode(GenTreeAddrMode *addr)
+void CodeGen::genConsumeAddrMode(GenTreeAddrMode* addr)
{
genConsumeOperands(addr);
}
@@ -5198,7 +5150,7 @@ void CodeGen::genConsumeRegs(GenTree* tree)
else if (tree->OperGet() == GT_LCL_VAR)
{
// A contained lcl var must be living on stack and marked as reg optional.
- unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
+ unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = compiler->lvaTable + varNum;
noway_assert(varDsc->lvRegNum == REG_STK);
@@ -5235,12 +5187,12 @@ void CodeGen::genConsumeRegs(GenTree* tree)
void CodeGen::genConsumeOperands(GenTreeOp* tree)
{
- GenTree* firstOp = tree->gtOp1;
+ GenTree* firstOp = tree->gtOp1;
GenTree* secondOp = tree->gtOp2;
if ((tree->gtFlags & GTF_REVERSE_OPS) != 0)
{
assert(secondOp != nullptr);
- firstOp = secondOp;
+ firstOp = secondOp;
secondOp = tree->gtOp1;
}
if (firstOp != nullptr)
@@ -5273,9 +5225,10 @@ void CodeGen::genConsumeOperands(GenTreeOp* tree)
// for copying on the stack a struct with references.
// The source address/offset is determined from the address on the GT_OBJ node, while
// the destination address is the address contained in 'baseVarNum' plus the offset
-// provided in the 'putArgNode'.
+// provided in the 'putArgNode'.
-void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum)
+void CodeGen::genConsumePutStructArgStk(
+ GenTreePutArgStk* putArgNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum)
{
assert(varTypeIsStruct(putArgNode));
assert(baseVarNum != BAD_VAR_NUM);
@@ -5289,7 +5242,7 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, regNumber
GenTree* src = putArgNode->gtGetOp1();
assert((src->gtOper == GT_OBJ) || ((src->gtOper == GT_IND && varTypeIsSIMD(src))));
GenTree* srcAddr = src->gtGetOp1();
-
+
size_t size = putArgNode->getArgSize();
assert(dstReg != REG_NA);
@@ -5310,7 +5263,7 @@ void CodeGen::genConsumePutStructArgStk(GenTreePutArgStk* putArgNode, regNumber
// Destination is always local (on the stack) - use EA_PTRSIZE.
getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, dstReg, baseVarNum, putArgNode->getArgOffset());
}
-
+
if (srcAddr->gtRegNum != srcReg)
{
if (srcAddr->OperIsLocalAddr())
@@ -5354,50 +5307,50 @@ void CodeGen::genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumb
// to the REQUIRED register (if a fixed register requirement) in execution order. This requires,
// then, that we first consume all the operands, then do any necessary moves.
- GenTree* dst = blkNode->Dest();
- GenTree* src = blkNode->gtOp.gtOp1->gtOp.gtOp2;
- GenTree* size = blkNode->gtOp.gtOp2;
- GenTree* op1;
- GenTree* op2;
- GenTree* op3;
+ GenTree* dst = blkNode->Dest();
+ GenTree* src = blkNode->gtOp.gtOp1->gtOp.gtOp2;
+ GenTree* size = blkNode->gtOp.gtOp2;
+ GenTree* op1;
+ GenTree* op2;
+ GenTree* op3;
regNumber reg1, reg2, reg3;
if (!blkNode->IsReverseOp() && !blkNode->gtOp1->IsReverseOp())
{
- op1 = dst;
+ op1 = dst;
reg1 = dstReg;
- op2 = src;
+ op2 = src;
reg2 = srcReg;
- op3 = size;
+ op3 = size;
reg3 = sizeReg;
}
else if (!blkNode->IsReverseOp())
{
// We know that the operands for the GT_LIST node 'blkNode->gtOp.gtOp1' are reversed.
- op1 = src;
+ op1 = src;
reg1 = srcReg;
- op2 = dst;
+ op2 = dst;
reg2 = dstReg;
- op3 = size;
+ op3 = size;
reg3 = sizeReg;
}
else if (!blkNode->gtOp1->IsReverseOp())
{
// We know from above that the operands to 'blkNode' are reversed.
- op1 = size;
+ op1 = size;
reg1 = sizeReg;
- op2 = dst;
+ op2 = dst;
reg2 = dstReg;
- op3 = src;
+ op3 = src;
reg3 = srcReg;
}
else
{
// They are BOTH reversed.
- op1 = size;
+ op1 = size;
reg1 = sizeReg;
- op2 = src;
- reg2= srcReg;
- op3 = dst;
+ op2 = src;
+ reg2 = srcReg;
+ op3 = dst;
reg3 = dstReg;
}
if (reg1 != REG_NA)
@@ -5453,20 +5406,21 @@ void CodeGen::genProduceReg(GenTree* tree)
tree->gtFlags &= ~GTF_REG_VAL;
// Ensure that lclVar stores are typed correctly.
unsigned varNum = tree->gtLclVarCommon.gtLclNum;
- assert(!compiler->lvaTable[varNum].lvNormalizeOnStore() || (tree->TypeGet() == genActualType(compiler->lvaTable[varNum].TypeGet())));
+ assert(!compiler->lvaTable[varNum].lvNormalizeOnStore() ||
+ (tree->TypeGet() == genActualType(compiler->lvaTable[varNum].TypeGet())));
inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(varNum)), tree, tree->gtRegNum);
}
else
{
// In case of multi-reg call node, spill flag on call node
// indicates that one or more of its allocated regs need to
- // be spilled. Call node needs to be further queried to
+ // be spilled. Call node needs to be further queried to
// know which of its result regs needs to be spilled.
if (tree->IsMultiRegCall())
{
- GenTreeCall* call = tree->AsCall();
+ GenTreeCall* call = tree->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
@@ -5476,7 +5430,7 @@ void CodeGen::genProduceReg(GenTree* tree)
regNumber reg = call->GetRegNumByIdx(i);
call->SetInReg();
regSet.rsSpillTree(reg, call, i);
- gcInfo.gcMarkRegSetNpt(genRegMask(reg));
+ gcInfo.gcMarkRegSetNpt(genRegMask(reg));
}
}
}
@@ -5506,20 +5460,19 @@ void CodeGen::genProduceReg(GenTree* tree)
// the register wouldn't be relevant.
// 2. The register candidate local is going dead. There's no point to mark
// the register as live, with a GC pointer, if the variable is dead.
- if (!genIsRegCandidateLocal(tree) ||
- ((tree->gtFlags & GTF_VAR_DEATH) == 0))
- {
+ if (!genIsRegCandidateLocal(tree) || ((tree->gtFlags & GTF_VAR_DEATH) == 0))
+ {
// Multi-reg call node will produce more than one register result.
// Mark all the regs produced by call node.
if (tree->IsMultiRegCall())
{
- GenTreeCall* call = tree->AsCall();
+ GenTreeCall* call = tree->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
- regNumber reg = call->GetRegNumByIdx(i);
+ regNumber reg = call->GetRegNumByIdx(i);
var_types type = retTypeDesc->GetReturnRegType(i);
gcInfo.gcMarkRegPtrVal(reg, type);
}
@@ -5532,16 +5485,16 @@ void CodeGen::genProduceReg(GenTree* tree)
// A multi-reg GT_COPY node produces those regs to which
// copy has taken place.
- GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
- GenTreeCall* call = copy->gtGetOp1()->AsCall();
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- unsigned regCount = retTypeDesc->GetReturnRegCount();
+ GenTreeCopyOrReload* copy = tree->AsCopyOrReload();
+ GenTreeCall* call = copy->gtGetOp1()->AsCall();
+ ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
+ unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
- var_types type = retTypeDesc->GetReturnRegType(i);
+ var_types type = retTypeDesc->GetReturnRegType(i);
regNumber fromReg = call->GetRegNumByIdx(i);
- regNumber toReg = copy->GetRegNumByIdx(i);
+ regNumber toReg = copy->GetRegNumByIdx(i);
if (toReg != REG_NA)
{
@@ -5561,93 +5514,66 @@ void CodeGen::genProduceReg(GenTree* tree)
// transfer gc/byref status of src reg to dst reg
void CodeGen::genTransferRegGCState(regNumber dst, regNumber src)
{
- regMaskTP srcMask = genRegMask(src);
- regMaskTP dstMask = genRegMask(dst);
-
- if (gcInfo.gcRegGCrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetGCref(dstMask);
- }
- else if (gcInfo.gcRegByrefSetCur & srcMask)
- {
- gcInfo.gcMarkRegSetByref(dstMask);
- }
- else
- {
- gcInfo.gcMarkRegSetNpt(dstMask);
- }
+ regMaskTP srcMask = genRegMask(src);
+ regMaskTP dstMask = genRegMask(dst);
+
+ if (gcInfo.gcRegGCrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetGCref(dstMask);
+ }
+ else if (gcInfo.gcRegByrefSetCur & srcMask)
+ {
+ gcInfo.gcMarkRegSetByref(dstMask);
+ }
+ else
+ {
+ gcInfo.gcMarkRegSetNpt(dstMask);
+ }
}
// generates an ip-relative call or indirect call via reg ('call reg')
// pass in 'addr' for a relative call or 'base' for a indirect register call
-// methHnd - optional, only used for pretty printing
+// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
-void CodeGen::genEmitCall(int callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- void* addr
- X86_ARG(ssize_t argSize),
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- IL_OFFSETX ilOffset,
- regNumber base,
- bool isJump,
- bool isNoGC)
+void CodeGen::genEmitCall(int callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) void* addr X86_ARG(ssize_t argSize),
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ IL_OFFSETX ilOffset,
+ regNumber base,
+ bool isJump,
+ bool isNoGC)
{
#if !defined(_TARGET_X86_)
- ssize_t argSize = 0;
+ ssize_t argSize = 0;
#endif // !defined(_TARGET_X86_)
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr,
- argSize,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- base, REG_NA, 0, 0,
- isJump,
+ getEmitter()->emitIns_Call(emitter::EmitCallType(callType), methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr, argSize,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, ilOffset, base, REG_NA, 0, 0, isJump,
emitter::emitNoGChelper(compiler->eeGetHelperNum(methHnd)));
}
// generates an indirect call via addressing mode (call []) given an indir node
// methHnd - optional, only used for pretty printing
// retSize - emitter type of return for GC purposes, should be EA_BYREF, EA_GCREF, or EA_PTRSIZE(not GC)
-void CodeGen::genEmitCall(int callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
- GenTreeIndir* indir
- X86_ARG(ssize_t argSize),
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- IL_OFFSETX ilOffset)
+void CodeGen::genEmitCall(int callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) GenTreeIndir* indir X86_ARG(ssize_t argSize),
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ IL_OFFSETX ilOffset)
{
#if !defined(_TARGET_X86_)
- ssize_t argSize = 0;
+ ssize_t argSize = 0;
#endif // !defined(_TARGET_X86_)
genConsumeAddress(indir->Addr());
- getEmitter()->emitIns_Call(emitter::EmitCallType(callType),
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- nullptr,
- argSize,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- ilOffset,
- indir->Base() ? indir->Base()->gtRegNum : REG_NA,
- indir->Index() ? indir->Index()->gtRegNum : REG_NA,
- indir->Scale(),
- indir->Offset());
+ getEmitter()->emitIns_Call(emitter::EmitCallType(callType), methHnd, INDEBUG_LDISASM_COMMA(sigInfo) nullptr,
+ argSize, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur, ilOffset,
+ indir->Base() ? indir->Base()->gtRegNum : REG_NA,
+ indir->Index() ? indir->Index()->gtRegNum : REG_NA, indir->Scale(), indir->Offset());
}
-
//------------------------------------------------------------------------
// genStoreInd: Generate code for a GT_STOREIND node.
//
@@ -5668,12 +5594,12 @@ void CodeGen::genStoreInd(GenTreePtr node)
genStoreIndTypeSIMD12(node);
return;
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
- GenTreeStoreInd* storeInd = node->AsStoreInd();
- GenTree* data = storeInd->Data();
- GenTree* addr = storeInd->Addr();
- var_types targetType = storeInd->TypeGet();
+ GenTreeStoreInd* storeInd = node->AsStoreInd();
+ GenTree* data = storeInd->Data();
+ GenTree* addr = storeInd->Addr();
+ var_types targetType = storeInd->TypeGet();
assert(!varTypeIsFloating(targetType) || (targetType == data->TypeGet()));
@@ -5685,7 +5611,9 @@ void CodeGen::genStoreInd(GenTreePtr node)
genConsumeOperands(storeInd->AsOp());
if (genEmitOptimizedGCWriteBarrier(writeBarrierForm, addr, data))
+ {
return;
+ }
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_ARG_0, as that is where 'addr' must go.
@@ -5707,11 +5635,11 @@ void CodeGen::genStoreInd(GenTreePtr node)
}
else
{
- bool reverseOps = ((storeInd->gtFlags & GTF_REVERSE_OPS) != 0);
- bool dataIsUnary = false;
- bool isRMWMemoryOp = storeInd->IsRMWMemoryOp();
- GenTree* rmwSrc = nullptr;
-
+ bool reverseOps = ((storeInd->gtFlags & GTF_REVERSE_OPS) != 0);
+ bool dataIsUnary = false;
+ bool isRMWMemoryOp = storeInd->IsRMWMemoryOp();
+ GenTree* rmwSrc = nullptr;
+
// We must consume the operands in the proper execution order, so that liveness is
// updated appropriately.
if (!reverseOps)
@@ -5720,7 +5648,7 @@ void CodeGen::genStoreInd(GenTreePtr node)
}
// If storeInd represents a RMW memory op then its data is a non-leaf node marked as contained
- // and non-indir operand of data is the source of RMW memory op.
+ // and non-indir operand of data is the source of RMW memory op.
if (isRMWMemoryOp)
{
assert(data->isContained() && !data->OperIsLeaf());
@@ -5758,7 +5686,7 @@ void CodeGen::genStoreInd(GenTreePtr node)
assert(rmwSrc != nullptr);
assert(rmwDst != nullptr);
- assert(Lowering::IndirsAreEquivalent(rmwDst, storeInd));
+ assert(Lowering::IndirsAreEquivalent(rmwDst, storeInd));
}
else
{
@@ -5775,14 +5703,16 @@ void CodeGen::genStoreInd(GenTreePtr node)
if (dataIsUnary)
{
// generate code for unary RMW memory ops like neg/not
- getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd), storeInd);
+ getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd),
+ storeInd);
}
else
{
if (data->OperIsShiftOrRotate())
{
// Generate code for shift RMW memory ops.
- // The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] = <amount> <shift> [addr]).
+ // The data address needs to be op1 (it must be [addr] = [addr] <shift> <amount>, not [addr] =
+ // <amount> <shift> [addr]).
assert(storeInd->IsRMWDstOp1());
assert(rmwSrc == data->gtGetOp2());
genCodeForShiftRMW(storeInd);
@@ -5790,7 +5720,8 @@ void CodeGen::genStoreInd(GenTreePtr node)
else
{
// generate code for remaining binary RMW memory ops like add/sub/and/or/xor
- getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd), storeInd, rmwSrc);
+ getEmitter()->emitInsRMW(genGetInsForOper(data->OperGet(), data->TypeGet()), emitTypeSize(storeInd),
+ storeInd, rmwSrc);
}
}
}
@@ -5801,7 +5732,6 @@ void CodeGen::genStoreInd(GenTreePtr node)
}
}
-
//------------------------------------------------------------------------
// genEmitOptimizedGCWriteBarrier: Generate write barrier store using the optimized
// helper functions.
@@ -5823,7 +5753,8 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri
bool useOptimizedWriteBarriers = true;
#ifdef DEBUG
- useOptimizedWriteBarriers = (writeBarrierForm != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
+ useOptimizedWriteBarriers =
+ (writeBarrierForm != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug); // This one is always a call to a C++ method.
#endif
if (!useOptimizedWriteBarriers)
@@ -5831,30 +5762,18 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri
return false;
}
- const static int regToHelper[2][8] =
- {
+ const static int regToHelper[2][8] = {
// If the target is known to be in managed memory
{
- CORINFO_HELP_ASSIGN_REF_EAX,
- CORINFO_HELP_ASSIGN_REF_ECX,
- -1,
- CORINFO_HELP_ASSIGN_REF_EBX,
- -1,
- CORINFO_HELP_ASSIGN_REF_EBP,
- CORINFO_HELP_ASSIGN_REF_ESI,
- CORINFO_HELP_ASSIGN_REF_EDI,
+ CORINFO_HELP_ASSIGN_REF_EAX, CORINFO_HELP_ASSIGN_REF_ECX, -1, CORINFO_HELP_ASSIGN_REF_EBX, -1,
+ CORINFO_HELP_ASSIGN_REF_EBP, CORINFO_HELP_ASSIGN_REF_ESI, CORINFO_HELP_ASSIGN_REF_EDI,
},
// Don't know if the target is in managed memory
{
- CORINFO_HELP_CHECKED_ASSIGN_REF_EAX,
- CORINFO_HELP_CHECKED_ASSIGN_REF_ECX,
- -1,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EBX,
- -1,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
- CORINFO_HELP_CHECKED_ASSIGN_REF_ESI,
- CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
+ CORINFO_HELP_CHECKED_ASSIGN_REF_EAX, CORINFO_HELP_CHECKED_ASSIGN_REF_ECX, -1,
+ CORINFO_HELP_CHECKED_ASSIGN_REF_EBX, -1, CORINFO_HELP_CHECKED_ASSIGN_REF_EBP,
+ CORINFO_HELP_CHECKED_ASSIGN_REF_ESI, CORINFO_HELP_CHECKED_ASSIGN_REF_EDI,
},
};
@@ -5898,11 +5817,11 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri
// with rationalized RyuJIT IR. So, for now, just emit the helper call directly here.
genEmitHelperCall(regToHelper[tgtAnywhere][reg],
- 0, // argSize
- EA_PTRSIZE); // retSize
+ 0, // argSize
+ EA_PTRSIZE); // retSize
return true;
-#else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
+#else // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
return false;
#endif // !defined(_TARGET_X86_) || !NOGC_WRITE_BARRIERS
}
@@ -5910,15 +5829,15 @@ bool CodeGen::genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarri
// Produce code for a GT_CALL node
void CodeGen::genCallInstruction(GenTreePtr node)
{
- GenTreeCall *call = node->AsCall();
+ GenTreeCall* call = node->AsCall();
assert(call->gtOper == GT_CALL);
- gtCallTypes callType = (gtCallTypes)call->gtCallType;
+ gtCallTypes callType = (gtCallTypes)call->gtCallType;
- IL_OFFSETX ilOffset = BAD_IL_OFFSET;
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET;
// all virtuals should have been expanded into a control expression
- assert (!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
+ assert(!call->IsVirtual() || call->gtControlExpr || call->gtCallAddr);
// Consume all the arg regs
for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
@@ -5931,14 +5850,16 @@ void CodeGen::genCallInstruction(GenTreePtr node)
assert(curArgTabEntry);
if (curArgTabEntry->regNum == REG_STK)
+ {
continue;
+ }
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_LIST)
{
- GenTreeArgList* argListPtr = argNode->AsArgList();
- unsigned iterationNum = 0;
+ GenTreeArgList* argListPtr = argNode->AsArgList();
+ unsigned iterationNum = 0;
for (; argListPtr != nullptr; argListPtr = argListPtr->Rest(), iterationNum++)
{
GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
@@ -5957,12 +5878,16 @@ void CodeGen::genCallInstruction(GenTreePtr node)
genConsumeReg(putArgRegNode);
- // Validate the putArgRegNode has the right type.
- assert(putArgRegNode->TypeGet() == compiler->GetTypeFromClassificationAndSizes(curArgTabEntry->structDesc.eightByteClassifications[iterationNum],
- curArgTabEntry->structDesc.eightByteSizes[iterationNum]));
+ // Validate the putArgRegNode has the right type.
+ assert(putArgRegNode->TypeGet() ==
+ compiler->GetTypeFromClassificationAndSizes(curArgTabEntry->structDesc
+ .eightByteClassifications[iterationNum],
+ curArgTabEntry->structDesc
+ .eightByteSizes[iterationNum]));
if (putArgRegNode->gtRegNum != argReg)
{
- inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg, putArgRegNode->gtRegNum);
+ inst_RV_RV(ins_Move_Extend(putArgRegNode->TypeGet(), putArgRegNode->InReg()), argReg,
+ putArgRegNode->gtRegNum);
}
}
}
@@ -5978,14 +5903,14 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
#if FEATURE_VARARG
- // In the case of a varargs call,
+ // In the case of a varargs call,
// the ABI dictates that if we have floating point args,
- // we must pass the enregistered arguments in both the
+ // we must pass the enregistered arguments in both the
// integer and floating point registers so, let's do that.
if (call->IsVarargs() && varTypeIsFloating(argNode))
{
- regNumber targetReg = compiler->getCallArgIntRegister(argNode->gtRegNum);
- instruction ins = ins_CopyFloatToInt(argNode->TypeGet(), TYP_LONG);
+ regNumber targetReg = compiler->getCallArgIntRegister(argNode->gtRegNum);
+ instruction ins = ins_CopyFloatToInt(argNode->TypeGet(), TYP_LONG);
inst_RV_RV(ins, argNode->gtRegNum, targetReg);
}
#endif // FEATURE_VARARG
@@ -5994,8 +5919,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
#if defined(_TARGET_X86_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// The call will pop its arguments.
// for each putarg_stk:
- ssize_t stackArgBytes = 0;
- GenTreePtr args = call->gtCallArgs;
+ ssize_t stackArgBytes = 0;
+ GenTreePtr args = call->gtCallArgs;
while (args)
{
GenTreePtr arg = args->gtOp.gtOp1;
@@ -6015,12 +5940,12 @@ void CodeGen::genCallInstruction(GenTreePtr node)
assert(arg->OperGet() == GT_PUTARG_STK);
GenTreeObj* obj = arg->gtGetOp1()->AsObj();
- stackArgBytes = compiler->info.compCompHnd->getClassSize(obj->gtClass);
+ stackArgBytes = compiler->info.compCompHnd->getClassSize(obj->gtClass);
}
else
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- stackArgBytes += genTypeSize(genActualType(arg->TypeGet()));
+ stackArgBytes += genTypeSize(genActualType(arg->TypeGet()));
}
args = args->gtOp.gtOp2;
}
@@ -6035,11 +5960,11 @@ void CodeGen::genCallInstruction(GenTreePtr node)
// Either gtControlExpr != null or gtCallAddr != null or it is a direct non-virtual call to a user or helper method.
CORINFO_METHOD_HANDLE methHnd;
- GenTree* target = call->gtControlExpr;
+ GenTree* target = call->gtControlExpr;
if (callType == CT_INDIRECT)
{
assert(target == nullptr);
- target = call->gtCall.gtCallAddr;
+ target = call->gtCall.gtCallAddr;
methHnd = nullptr;
}
else
@@ -6073,10 +5998,10 @@ void CodeGen::genCallInstruction(GenTreePtr node)
{
inst_RV_RV(INS_mov, REG_RAX, target->gtRegNum);
}
- return ;
- }
+ return;
+ }
- // For a pinvoke to unmanged code we emit a label to clear
+ // For a pinvoke to unmanged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
@@ -6086,21 +6011,20 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
// Determine return value size(s).
- ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- emitAttr retSize = EA_PTRSIZE;
- emitAttr secondRetSize = EA_UNKNOWN;
+ ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
+ emitAttr retSize = EA_PTRSIZE;
+ emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
- retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
+ retSize = emitTypeSize(retTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(retTypeDesc->GetReturnRegType(1));
}
else
{
assert(!varTypeIsStruct(call));
- if (call->gtType == TYP_REF ||
- call->gtType == TYP_ARRAY)
+ if (call->gtType == TYP_REF || call->gtType == TYP_ARRAY)
{
retSize = EA_GCREF;
}
@@ -6110,8 +6034,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
}
- bool fPossibleSyncHelperCall = false;
- CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF;
+ bool fPossibleSyncHelperCall = false;
+ CorInfoHelpFunc helperNum = CORINFO_HELP_UNDEF;
#ifdef DEBUGGING_SUPPORT
// We need to propagate the IL offset information to the call instruction, so we can emit
@@ -6123,7 +6047,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
(void)compiler->genCallSite2ILOffsetMap->Lookup(call, &ilOffset);
}
#endif // DEBUGGING_SUPPORT
-
+
#if defined(_TARGET_X86_)
// If the callee pops the arguments, we pass a positive value as the argSize, and the emitter will
// adjust its stack level accordingly.
@@ -6143,29 +6067,22 @@ void CodeGen::genCallInstruction(GenTreePtr node)
{
if (target->AsIndir()->HasBase() && target->AsIndir()->Base()->isContainedIntOrIImmed())
{
- // Note that if gtControlExpr is an indir of an absolute address, we mark it as
+ // Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
- genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- (void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
- X86_ARG(argSizeForEmitter),
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- ilOffset);
+ genEmitCall(emitter::EC_FUNC_TOKEN_INDIR, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo)(void*) target->AsIndir()
+ ->Base()
+ ->AsIntConCommon()
+ ->IconValue() X86_ARG(argSizeForEmitter),
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset);
}
else
{
- genEmitCall(emitter::EC_INDIR_ARD,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- target->AsIndir()
- X86_ARG(argSizeForEmitter),
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- ilOffset);
+ genEmitCall(emitter::EC_INDIR_ARD, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) target->AsIndir() X86_ARG(argSizeForEmitter),
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset);
}
}
else
@@ -6173,44 +6090,35 @@ void CodeGen::genCallInstruction(GenTreePtr node)
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
assert(genIsValidIntReg(target->gtRegNum));
- genEmitCall(emitter::EC_INDIR_R,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- nullptr //addr
+ genEmitCall(emitter::EC_INDIR_R, methHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) nullptr // addr
X86_ARG(argSizeForEmitter),
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- ilOffset,
- genConsumeReg(target));
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset, genConsumeReg(target));
}
}
#ifdef FEATURE_READYTORUN_COMPILER
else if (call->gtEntryPoint.addr != nullptr)
{
- genEmitCall((call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN : emitter::EC_FUNC_TOKEN_INDIR,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- (void*) call->gtEntryPoint.addr
- X86_ARG(argSizeForEmitter),
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- ilOffset);
+ genEmitCall((call->gtEntryPoint.accessType == IAT_VALUE) ? emitter::EC_FUNC_TOKEN
+ : emitter::EC_FUNC_TOKEN_INDIR,
+ methHnd, INDEBUG_LDISASM_COMMA(sigInfo)(void*) call->gtEntryPoint.addr X86_ARG(argSizeForEmitter),
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset);
}
#endif
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(callType == CT_HELPER || callType == CT_USER_FUNC);
-
- void *addr = nullptr;
+
+ void* addr = nullptr;
if (callType == CT_HELPER)
- {
+ {
// Direct call to a helper method.
helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
- void *pAddr = nullptr;
- addr = compiler->compGetHelperFtn(helperNum, (void **)&pAddr);
+ void* pAddr = nullptr;
+ addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
if (addr == nullptr)
{
@@ -6230,14 +6138,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
// Non-virtual direct calls to known addresses
- genEmitCall(emitter::EC_FUNC_TOKEN,
- methHnd,
- INDEBUG_LDISASM_COMMA(sigInfo)
- addr
- X86_ARG(argSizeForEmitter),
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- ilOffset);
+ genEmitCall(emitter::EC_FUNC_TOKEN, methHnd, INDEBUG_LDISASM_COMMA(sigInfo) addr X86_ARG(argSizeForEmitter),
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize), ilOffset);
}
// if it was a pinvoke we may have needed to get the address of a label
@@ -6291,8 +6193,8 @@ void CodeGen::genCallInstruction(GenTreePtr node)
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
- var_types regType = retTypeDesc->GetReturnRegType(i);
- returnReg = retTypeDesc->GetABIReturnReg(i);
+ var_types regType = retTypeDesc->GetReturnRegType(i);
+ returnReg = retTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
if (returnReg != allocatedReg)
{
@@ -6300,11 +6202,11 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
}
-#ifdef FEATURE_SIMD
- // A Vector3 return value is stored in xmm0 and xmm1.
+#ifdef FEATURE_SIMD
+ // A Vector3 return value is stored in xmm0 and xmm1.
// RyuJIT assumes that the upper unused bits of xmm1 are cleared but
// the native compiler doesn't guarantee it.
- if (returnType == TYP_SIMD12)
+ if (returnType == TYP_SIMD12)
{
returnReg = retTypeDesc->GetABIReturnReg(1);
// Clear the upper 32 bits by two shift instructions.
@@ -6316,7 +6218,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
#endif // FEATURE_SIMD
}
else
- {
+ {
#ifdef _TARGET_X86_
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
@@ -6327,7 +6229,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
}
else
#endif // _TARGET_X86_
- if (varTypeIsFloating(returnType))
+ if (varTypeIsFloating(returnType))
{
returnReg = REG_FLOATRET;
}
@@ -6339,7 +6241,7 @@ void CodeGen::genCallInstruction(GenTreePtr node)
if (call->gtRegNum != returnReg)
{
inst_RV_RV(ins_Copy(returnType), call->gtRegNum, returnReg, returnType);
- }
+ }
}
genProduceReg(call);
@@ -6361,27 +6263,24 @@ void CodeGen::genCallInstruction(GenTreePtr node)
if (fPossibleSyncHelperCall)
{
- switch (helperNum) {
- case CORINFO_HELP_MON_ENTER:
- case CORINFO_HELP_MON_ENTER_STATIC:
- noway_assert(compiler->syncStartEmitCookie == NULL);
- compiler->syncStartEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
- noway_assert(compiler->syncStartEmitCookie != NULL);
- break;
- case CORINFO_HELP_MON_EXIT:
- case CORINFO_HELP_MON_EXIT_STATIC:
- noway_assert(compiler->syncEndEmitCookie == NULL);
- compiler->syncEndEmitCookie = getEmitter()->emitAddLabel(
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
- noway_assert(compiler->syncEndEmitCookie != NULL);
- break;
- default:
- break;
+ switch (helperNum)
+ {
+ case CORINFO_HELP_MON_ENTER:
+ case CORINFO_HELP_MON_ENTER_STATIC:
+ noway_assert(compiler->syncStartEmitCookie == NULL);
+ compiler->syncStartEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ noway_assert(compiler->syncStartEmitCookie != NULL);
+ break;
+ case CORINFO_HELP_MON_EXIT:
+ case CORINFO_HELP_MON_EXIT_STATIC:
+ noway_assert(compiler->syncEndEmitCookie == NULL);
+ compiler->syncEndEmitCookie =
+ getEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
+ noway_assert(compiler->syncEndEmitCookie != NULL);
+ break;
+ default:
+ break;
}
}
@@ -6403,16 +6302,16 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
- if (compiler->info.compArgsCount == 0)
+ if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
- unsigned varNum;
- LclVarDsc* varDsc;
-
+ unsigned varNum;
+ LclVarDsc* varDsc;
+
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
@@ -6426,10 +6325,10 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
if (varDsc->lvPromoted)
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
- varDsc = compiler->lvaTable + fieldVarNum;
+ varDsc = compiler->lvaTable + fieldVarNum;
}
noway_assert(varDsc->lvIsParam);
@@ -6438,14 +6337,16 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
- // If we need to generate a tail call profiler hook, then spill all
+ // If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->lvRegNum == varDsc->lvArgReg))
+ {
continue;
+ }
}
else if (varDsc->lvRegNum == REG_STK)
{
- // Skip args which are currently living in stack.
+ // Skip args which are currently living in stack.
continue;
}
@@ -6454,7 +6355,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// assert should hold.
assert(varDsc->lvRegNum != REG_STK);
- var_types loadType = varDsc->lvaArgType();
+ var_types loadType = varDsc->lvaArgType();
getEmitter()->emitIns_S_R(ins_Store(loadType), emitTypeSize(loadType), varDsc->lvRegNum, varNum, 0);
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
@@ -6479,7 +6380,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
-
+
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
@@ -6487,23 +6388,25 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
#endif
// Next move any un-enregistered register arguments back to their register.
- regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
- unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
+ regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
+ unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; (varNum < compiler->info.compArgsCount); varNum++)
{
varDsc = compiler->lvaTable + varNum;
if (varDsc->lvPromoted)
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
- varDsc = compiler->lvaTable + fieldVarNum;
+ varDsc = compiler->lvaTable + fieldVarNum;
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
- if (!varDsc->lvIsRegArg)
+ if (!varDsc->lvIsRegArg)
+ {
continue;
+ }
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
if (varTypeIsStruct(varDsc))
@@ -6517,14 +6420,14 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
unsigned __int8 offset0 = 0;
unsigned __int8 offset1 = 0;
- var_types type0 = TYP_UNKNOWN;
- var_types type1 = TYP_UNKNOWN;
+ var_types type0 = TYP_UNKNOWN;
+ var_types type1 = TYP_UNKNOWN;
// Get the eightbyte data
compiler->GetStructTypeOffset(structDesc, &type0, &type1, &offset0, &offset1);
// Move the values into the right registers.
- //
+ //
// Update varDsc->lvArgReg and lvOtherArgReg life and GC Info to indicate varDsc stack slot is dead and
// argReg is going live. Note that we cannot modify varDsc->lvRegNum and lvOtherArgReg here because another
@@ -6537,7 +6440,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
regSet.rsMaskVars |= genRegMask(varDsc->lvArgReg);
gcInfo.gcMarkRegPtrVal(varDsc->lvArgReg, type0);
}
-
+
if (type1 != TYP_UNKNOWN)
{
getEmitter()->emitIns_R_S(ins_Load(type1), emitTypeSize(type1), varDsc->lvOtherArgReg, varNum, offset1);
@@ -6558,8 +6461,8 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
// Is register argument already in the right register?
// If not load it from its stack location.
- var_types loadType = varDsc->lvaArgType();
- regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ var_types loadType = varDsc->lvaArgType();
+ regNumber argReg = varDsc->lvArgReg; // incoming arg register
if (varDsc->lvRegNum != argReg)
{
@@ -6585,7 +6488,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
#endif // DEBUG
- VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
+ VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
@@ -6598,12 +6501,12 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
if (compiler->info.compIsVarArgs)
{
regNumber intArgReg;
- var_types loadType = varDsc->lvaArgType();
- regNumber argReg = varDsc->lvArgReg; // incoming arg register
+ var_types loadType = varDsc->lvaArgType();
+ regNumber argReg = varDsc->lvArgReg; // incoming arg register
if (varTypeIsFloating(loadType))
{
- intArgReg = compiler->getCallArgIntRegister(argReg);
+ intArgReg = compiler->getCallArgIntRegister(argReg);
instruction ins = ins_CopyFloatToInt(loadType, TYP_LONG);
inst_RV_RV(ins, argReg, intArgReg, loadType);
}
@@ -6620,7 +6523,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
firstArgVarNum = varNum;
}
}
-#endif // FEATURE_VARARG
+#endif // FEATURE_VARARG
}
#if FEATURE_VARARG && defined(_TARGET_AMD64_)
@@ -6641,14 +6544,14 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
- regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
+ regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
instruction insCopyIntToFloat = ins_CopyIntToFloat(TYP_LONG, TYP_DOUBLE);
getEmitter()->emitDisableGC();
- for (int argNum = 0, argOffset=0; argNum < MAX_REG_ARG; ++argNum)
+ for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
- regNumber argReg = intArgRegs[argNum];
+ regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
@@ -6662,7 +6565,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
argOffset += REGSIZE_BYTES;
- }
+ }
getEmitter()->emitEnableGC();
}
}
@@ -6670,7 +6573,7 @@ void CodeGen::genJmpMethod(GenTreePtr jmp)
}
// produce code for a GT_LEA subnode
-void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
+void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
emitAttr size = emitTypeSize(lea);
genConsumeOperands(lea);
@@ -6679,15 +6582,16 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
{
regNumber baseReg = lea->Base()->gtRegNum;
regNumber indexReg = lea->Index()->gtRegNum;
- getEmitter()->emitIns_R_ARX (INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
+ getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, baseReg, indexReg, lea->gtScale, lea->gtOffset);
}
else if (lea->Base())
{
- getEmitter()->emitIns_R_AR (INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->gtOffset);
+ getEmitter()->emitIns_R_AR(INS_lea, size, lea->gtRegNum, lea->Base()->gtRegNum, lea->gtOffset);
}
else if (lea->Index())
{
- getEmitter()->emitIns_R_ARX (INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale, lea->gtOffset);
+ getEmitter()->emitIns_R_ARX(INS_lea, size, lea->gtRegNum, REG_NA, lea->Index()->gtRegNum, lea->gtScale,
+ lea->gtOffset);
}
genProduceReg(lea);
@@ -6706,22 +6610,20 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode *lea)
// Only GT_EQ for a floating point compares can have a false value.
//
// Return Value:
-// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
+// Sets the proper values into the array elements of jmpKind[] and jmpToTrueLabel[]
//
// Assumptions:
// At least one conditional branch instruction will be returned.
-// Typically only one conditional branch is needed
+// Typically only one conditional branch is needed
// and the second jmpKind[] value is set to EJ_NONE
//
// Notes:
-// jmpToTrueLabel[i]= true implies branch when the compare operation is true.
+// jmpToTrueLabel[i]= true implies branch when the compare operation is true.
// jmpToTrueLabel[i]= false implies branch when the compare operation is false.
//-------------------------------------------------------------------------------------------
// static
-void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
- emitJumpKind jmpKind[2],
- bool jmpToTrueLabel[2])
+void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree, emitJumpKind jmpKind[2], bool jmpToTrueLabel[2])
{
// Except for BEQ (= ordered GT_EQ) both jumps are to the true label.
jmpToTrueLabel[0] = true;
@@ -6731,8 +6633,8 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
if (!varTypeIsFloating(cmpTree->gtOp.gtOp1->gtEffectiveVal()))
{
CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
- jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
- jmpKind[1] = EJ_NONE;
+ jmpKind[0] = genJumpKindForOper(cmpTree->gtOper, compareKind);
+ jmpKind[1] = EJ_NONE;
}
else
{
@@ -6745,62 +6647,62 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
// Must branch if we have an NaN, unordered
switch (cmpTree->gtOper)
{
- case GT_LT:
- case GT_GT:
- jmpKind[0] = EJ_jb;
- jmpKind[1] = EJ_NONE;
- break;
-
- case GT_LE:
- case GT_GE:
- jmpKind[0] = EJ_jbe;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LT:
+ case GT_GT:
+ jmpKind[0] = EJ_jb;
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_NE:
- jmpKind[0] = EJ_jpe;
- jmpKind[1] = EJ_jne;
- break;
+ case GT_LE:
+ case GT_GE:
+ jmpKind[0] = EJ_jbe;
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_EQ:
- jmpKind[0] = EJ_je;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_NE:
+ jmpKind[0] = EJ_jpe;
+ jmpKind[1] = EJ_jne;
+ break;
- default:
- unreached();
+ case GT_EQ:
+ jmpKind[0] = EJ_je;
+ jmpKind[1] = EJ_NONE;
+ break;
+
+ default:
+ unreached();
}
}
- else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
+ else // ((cmpTree->gtFlags & GTF_RELOP_NAN_UN) == 0)
{
// Do not branch if we have an NaN, unordered
switch (cmpTree->gtOper)
{
- case GT_LT:
- case GT_GT:
- jmpKind[0] = EJ_ja;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LT:
+ case GT_GT:
+ jmpKind[0] = EJ_ja;
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_LE:
- case GT_GE:
- jmpKind[0] = EJ_jae;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_LE:
+ case GT_GE:
+ jmpKind[0] = EJ_jae;
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_NE:
- jmpKind[0] = EJ_jne;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_NE:
+ jmpKind[0] = EJ_jne;
+ jmpKind[1] = EJ_NONE;
+ break;
- case GT_EQ:
- jmpKind[0] = EJ_jpe;
- jmpKind[1] = EJ_je;
- jmpToTrueLabel[0] = false;
- break;
+ case GT_EQ:
+ jmpKind[0] = EJ_jpe;
+ jmpKind[1] = EJ_je;
+ jmpToTrueLabel[0] = false;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
}
@@ -6821,56 +6723,55 @@ void CodeGen::genJumpKindsForTree(GenTreePtr cmpTree,
// Return Value:
// None.
//
-void CodeGen::genJumpKindsForTreeLongHi(GenTreePtr cmpTree,
- emitJumpKind jmpKind[2])
+void CodeGen::genJumpKindsForTreeLongHi(GenTreePtr cmpTree, emitJumpKind jmpKind[2])
{
assert(cmpTree->OperIsCompare());
CompareKind compareKind = ((cmpTree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
switch (cmpTree->gtOper)
{
- case GT_LT:
- case GT_LE:
- if (compareKind == CK_SIGNED)
- {
- jmpKind[0] = EJ_jl;
- jmpKind[1] = EJ_jg;
- }
- else
- {
- jmpKind[0] = EJ_jb;
- jmpKind[1] = EJ_ja;
- }
- break;
+ case GT_LT:
+ case GT_LE:
+ if (compareKind == CK_SIGNED)
+ {
+ jmpKind[0] = EJ_jl;
+ jmpKind[1] = EJ_jg;
+ }
+ else
+ {
+ jmpKind[0] = EJ_jb;
+ jmpKind[1] = EJ_ja;
+ }
+ break;
- case GT_GT:
- case GT_GE:
- if (compareKind == CK_SIGNED)
- {
- jmpKind[0] = EJ_jg;
- jmpKind[1] = EJ_jl;
- }
- else
- {
- jmpKind[0] = EJ_ja;
- jmpKind[1] = EJ_jb;
- }
- break;
+ case GT_GT:
+ case GT_GE:
+ if (compareKind == CK_SIGNED)
+ {
+ jmpKind[0] = EJ_jg;
+ jmpKind[1] = EJ_jl;
+ }
+ else
+ {
+ jmpKind[0] = EJ_ja;
+ jmpKind[1] = EJ_jb;
+ }
+ break;
- case GT_EQ:
- // GT_EQ will not jump to the true label if the hi parts are equal
- jmpKind[0] = EJ_NONE;
- jmpKind[1] = EJ_jne;
- break;
+ case GT_EQ:
+ // GT_EQ will not jump to the true label if the hi parts are equal
+ jmpKind[0] = EJ_NONE;
+ jmpKind[1] = EJ_jne;
+ break;
- case GT_NE:
- // GT_NE will always jump to the true label if the high parts are not equal
- jmpKind[0] = EJ_jne;
- jmpKind[1] = EJ_NONE;
- break;
+ case GT_NE:
+ // GT_NE will always jump to the true label if the high parts are not equal
+ jmpKind[0] = EJ_jne;
+ jmpKind[1] = EJ_NONE;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -6957,18 +6858,18 @@ void CodeGen::genJumpKindsForTreeLongHi(GenTreePtr cmpTree,
// labelFinal:
//
// TODO-X86-CQ: Check if hi or lo parts of op2 are 0 and change the compare to a test.
-void CodeGen::genCompareLong(GenTreePtr treeNode)
+void CodeGen::genCompareLong(GenTreePtr treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
assert(varTypeIsLong(op1->TypeGet()));
assert(varTypeIsLong(op2->TypeGet()));
-
- regNumber targetReg = treeNode->gtRegNum;
+
+ regNumber targetReg = treeNode->gtRegNum;
genConsumeOperands(tree);
@@ -6980,17 +6881,17 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
GenTreePtr hiOp2 = op2->gtGetOp2();
// Create compare for the high parts
- instruction ins = INS_cmp;
- var_types cmpType = TYP_INT;
- emitAttr cmpAttr = emitTypeSize(cmpType);
+ instruction ins = INS_cmp;
+ var_types cmpType = TYP_INT;
+ emitAttr cmpAttr = emitTypeSize(cmpType);
// Emit the compare instruction
getEmitter()->emitInsBinary(ins, cmpAttr, hiOp1, hiOp2);
// Generate the first jump for the high compare
CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
-
- BasicBlock* labelHi = genCreateTempLabel();
+
+ BasicBlock* labelHi = genCreateTempLabel();
BasicBlock* labelFinal = genCreateTempLabel();
if (compareKind == CK_SIGNED && (tree->gtOper != GT_NE && tree->gtOper != GT_EQ))
@@ -7056,7 +6957,6 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), targetReg, targetReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
genProduceReg(tree);
}
-
}
//------------------------------------------------------------------------
@@ -7073,7 +6973,7 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
// We only have to do the low compare if the high parts of the operands are equal.
//
// In the case where the result of a rel-op is not realized in a register, we generate:
-//
+//
// Opcode x86 equivalent Comment
// ------ -------------- -------
//
@@ -7090,7 +6990,7 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
// cmp loOp1,loOp2
// jbe trueLabel
// falseLabel:
-//
+//
// GT_GT; unsigned cmp hiOp1,hiOp2
// ja trueLabel
// jb falseLabel
@@ -7118,7 +7018,7 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
// cmp loOp1,loOp2
// jbe trueLabel
// falseLabel:
-//
+//
// GT_GT; signed cmp hiOp1,hiOp2
// jg trueLabel
// jl falseLabel
@@ -7146,18 +7046,18 @@ void CodeGen::genCompareLong(GenTreePtr treeNode)
// falseLabel:
//
// TODO-X86-CQ: Check if hi or lo parts of op2 are 0 and change the compare to a test.
-void CodeGen::genJTrueLong(GenTreePtr treeNode)
+void CodeGen::genJTrueLong(GenTreePtr treeNode)
{
assert(treeNode->OperIsCompare());
GenTreeOp* tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
assert(varTypeIsLong(op1->TypeGet()));
assert(varTypeIsLong(op2->TypeGet()));
- regNumber targetReg = treeNode->gtRegNum;
+ regNumber targetReg = treeNode->gtRegNum;
assert(targetReg == REG_NA);
@@ -7171,7 +7071,7 @@ void CodeGen::genJTrueLong(GenTreePtr treeNode)
// Generate the first jump for the high compare
CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
-
+
// TODO-X86-CQ: If the next block is a BBJ_ALWAYS, we can set falseLabel = compiler->compCurBB->bbNext->bbJumpDest.
BasicBlock* falseLabel = genCreateTempLabel();
@@ -7186,7 +7086,7 @@ void CodeGen::genJTrueLong(GenTreePtr treeNode)
{
inst_JMP(jumpKindHi[0], trueLabel);
}
-
+
if (jumpKindHi[1] != EJ_NONE)
{
inst_JMP(jumpKindHi[1], falseLabel);
@@ -7204,7 +7104,7 @@ void CodeGen::genJTrueLong(GenTreePtr treeNode)
// or fall through if the low compare is false.
genDefineTempLabel(falseLabel);
}
-#endif //!defined(_TARGET_64BIT_)
+#endif //! defined(_TARGET_64BIT_)
//------------------------------------------------------------------------
// genCompareFloat: Generate code for comparing two floating point values
@@ -7214,7 +7114,7 @@ void CodeGen::genJTrueLong(GenTreePtr treeNode)
//
// Return Value:
// None.
-// Comments:
+// Comments:
// SSE2 instruction ucomis[s|d] is performs unordered comparison and
// updates rFLAGS register as follows.
// Result of compare ZF PF CF
@@ -7277,20 +7177,20 @@ void CodeGen::genCompareFloat(GenTreePtr treeNode)
{
assert(treeNode->OperIsCompare());
- GenTreeOp *tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
+ GenTreeOp* tree = treeNode->AsOp();
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
assert(varTypeIsFloating(op1Type));
assert(op1Type == op2Type);
- regNumber targetReg = treeNode->gtRegNum;
+ regNumber targetReg = treeNode->gtRegNum;
instruction ins;
- emitAttr cmpAttr;
+ emitAttr cmpAttr;
bool reverseOps;
if ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0)
@@ -7306,11 +7206,11 @@ void CodeGen::genCompareFloat(GenTreePtr treeNode)
if (reverseOps)
{
GenTreePtr tmp = op1;
- op1 = op2;
- op2 = tmp;
+ op1 = op2;
+ op2 = tmp;
}
- ins = ins_FloatCompare(op1Type);
+ ins = ins_FloatCompare(op1Type);
cmpAttr = emitTypeSize(op1Type);
getEmitter()->emitInsBinary(ins, cmpAttr, op1, op2);
@@ -7335,19 +7235,19 @@ void CodeGen::genCompareInt(GenTreePtr treeNode)
{
assert(treeNode->OperIsCompare());
- GenTreeOp *tree = treeNode->AsOp();
- GenTreePtr op1 = tree->gtOp1;
- GenTreePtr op2 = tree->gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
+ GenTreeOp* tree = treeNode->AsOp();
+ GenTreePtr op1 = tree->gtOp1;
+ GenTreePtr op2 = tree->gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
genConsumeOperands(tree);
instruction ins;
- emitAttr cmpAttr;
+ emitAttr cmpAttr;
- regNumber targetReg = treeNode->gtRegNum;
- assert(!op1->isContainedIntOrIImmed()); // We no longer support swapping op1 and op2 to generate cmp reg, imm
+ regNumber targetReg = treeNode->gtRegNum;
+ assert(!op1->isContainedIntOrIImmed()); // We no longer support swapping op1 and op2 to generate cmp reg, imm
assert(!varTypeIsFloating(op2Type));
#ifdef _TARGET_X86_
@@ -7356,10 +7256,10 @@ void CodeGen::genCompareInt(GenTreePtr treeNode)
// By default we use an int32 sized cmp instruction
//
- ins = INS_cmp;
+ ins = INS_cmp;
var_types cmpType = TYP_INT;
- // In the if/then/else statement below we may change the
+ // In the if/then/else statement below we may change the
// 'cmpType' and/or 'ins' to generate a smaller instruction
// Are we comparing two values that are the same size?
@@ -7376,17 +7276,17 @@ void CodeGen::genCompareInt(GenTreePtr treeNode)
// If we have two different int64 types we need to use a long compare
cmpType = TYP_LONG;
}
-
+
cmpAttr = emitTypeSize(cmpType);
}
- else // Here we know that (op1Type != op2Type)
+ else // Here we know that (op1Type != op2Type)
{
// Do we have a short compare against a constant in op2?
//
// We checked for this case in LowerCmp() and if we can perform a small
// compare immediate we labeled this compare with a GTF_RELOP_SMALL
// and for unsigned small non-equality compares the GTF_UNSIGNED flag.
- //
+ //
if (op2->isContainedIntOrIImmed() && ((tree->gtFlags & GTF_RELOP_SMALL) != 0))
{
assert(varTypeIsSmall(op1Type));
@@ -7430,36 +7330,36 @@ void CodeGen::genCompareInt(GenTreePtr treeNode)
}
// See if we can generate a "test" instruction instead of a "cmp".
- // For this to generate the correct conditional branch we must have
+ // For this to generate the correct conditional branch we must have
// a compare against zero.
- //
+ //
if (op2->IsIntegralConst(0))
{
if (op1->isContained())
{
- // op1 can be a contained memory op
+ // op1 can be a contained memory op
// or the special contained GT_AND that we created in Lowering::LowerCmp()
- //
+ //
if ((op1->OperGet() == GT_AND))
{
noway_assert(op1->gtOp.gtOp2->isContainedIntOrIImmed());
- ins = INS_test; // we will generate "test andOp1, andOp2CnsVal"
- op2 = op1->gtOp.gtOp2; // must assign op2 before we overwrite op1
- op1 = op1->gtOp.gtOp1; // overwrite op1
+ ins = INS_test; // we will generate "test andOp1, andOp2CnsVal"
+ op2 = op1->gtOp.gtOp2; // must assign op2 before we overwrite op1
+ op1 = op1->gtOp.gtOp1; // overwrite op1
if (op1->isContainedMemoryOp())
{
// use the size andOp1 if it is a contained memoryop.
- cmpAttr = emitTypeSize(op1->TypeGet());
+ cmpAttr = emitTypeSize(op1->TypeGet());
}
// fallthrough to emit->emitInsBinary(ins, cmpAttr, op1, op2);
}
}
else // op1 is not contained thus it must be in a register
{
- ins = INS_test;
- op2 = op1; // we will generate "test reg1,reg1"
+ ins = INS_test;
+ op2 = op1; // we will generate "test reg1,reg1"
// fallthrough to emit->emitInsBinary(ins, cmpAttr, op1, op2);
}
}
@@ -7493,7 +7393,7 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
noway_assert((genRegMask(dstReg) & RBM_BYTE_REGS) != 0);
emitJumpKind jumpKind[2];
- bool branchToTrueLabel[2];
+ bool branchToTrueLabel[2];
genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
if (jumpKind[1] == EJ_NONE)
@@ -7502,7 +7402,7 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
inst_SET(jumpKind[0], dstReg);
}
else
- {
+ {
#ifdef DEBUG
// jmpKind[1] != EJ_NONE implies BEQ and BEN.UN of floating point values.
// These are represented by two conditions.
@@ -7511,7 +7411,7 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
// This must be an ordered comparison.
assert((tree->gtFlags & GTF_RELOP_NAN_UN) == 0);
}
- else
+ else
{
// This must be BNE.UN
assert((tree->gtOper == GT_NE) && ((tree->gtFlags & GTF_RELOP_NAN_UN) != 0));
@@ -7523,8 +7423,8 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
// That is, to materialize comparison reg needs to be set if PF=0 and ZF=1
// setnp reg // if (PF==0) reg = 1 else reg = 0
// jpe L1 // Jmp if PF==1
- // sete reg
- // L1:
+ // sete reg
+ // L1:
//
// BNE.UN == cmp, jpe <true label>, jne <true label>
// That is, to materialize the comparison reg needs to be set if either PF=1 or ZF=0;
@@ -7532,10 +7432,10 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
// jpe L1
// setne reg
// L1:
-
+
// reverse the jmpkind condition before setting dstReg if it is to false label.
inst_SET(branchToTrueLabel[0] ? jumpKind[0] : emitter::emitReverseJumpKind(jumpKind[0]), dstReg);
-
+
BasicBlock* label = genCreateTempLabel();
inst_JMP(jumpKind[0], label);
@@ -7580,23 +7480,23 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_CAST);
- GenTreePtr castOp = treeNode->gtCast.CastOp();
- regNumber targetReg = treeNode->gtRegNum;
- regNumber sourceReg = castOp->gtRegNum;
- var_types dstType = treeNode->CastToType();
- bool isUnsignedDst = varTypeIsUnsigned(dstType);
- var_types srcType = genActualType(castOp->TypeGet());
- bool isUnsignedSrc = varTypeIsUnsigned(srcType);
+ GenTreePtr castOp = treeNode->gtCast.CastOp();
+ regNumber targetReg = treeNode->gtRegNum;
+ regNumber sourceReg = castOp->gtRegNum;
+ var_types dstType = treeNode->CastToType();
+ bool isUnsignedDst = varTypeIsUnsigned(dstType);
+ var_types srcType = genActualType(castOp->TypeGet());
+ bool isUnsignedSrc = varTypeIsUnsigned(srcType);
// if necessary, force the srcType to unsigned when the GT_UNSIGNED flag is set
if (!isUnsignedSrc && (treeNode->gtFlags & GTF_UNSIGNED) != 0)
{
- srcType = genUnsignedType(srcType);
+ srcType = genUnsignedType(srcType);
isUnsignedSrc = true;
}
- bool requiresOverflowCheck = false;
- bool needAndAfter = false;
+ bool requiresOverflowCheck = false;
+ bool needAndAfter = false;
assert(genIsValidIntReg(targetReg));
assert(genIsValidIntReg(sourceReg));
@@ -7604,7 +7504,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
instruction ins = INS_invalid;
emitAttr size = EA_UNKNOWN;
- if (genTypeSize(srcType) < genTypeSize(dstType))
+ if (genTypeSize(srcType) < genTypeSize(dstType))
{
// Widening cast
@@ -7615,8 +7515,8 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
if (treeNode->gtOverflow() && (srcType == TYP_INT) && (dstType == TYP_ULONG))
{
requiresOverflowCheck = true;
- size = EA_ATTR(genTypeSize(srcType));
- ins = INS_mov;
+ size = EA_ATTR(genTypeSize(srcType));
+ ins = INS_mov;
}
else
{
@@ -7636,7 +7536,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
{
#ifdef _TARGET_X86_
NYI_X86("Cast to 64 bit for x86/RyuJIT");
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
ins = INS_movsxd;
#endif // !_TARGET_X86_
}
@@ -7663,8 +7563,8 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
if (treeNode->gtOverflow())
{
requiresOverflowCheck = true;
- size = EA_ATTR(genTypeSize(srcType));
- ins = INS_mov;
+ size = EA_ATTR(genTypeSize(srcType));
+ ins = INS_mov;
}
else
{
@@ -7678,74 +7578,74 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
genConsumeReg(castOp);
if (requiresOverflowCheck)
- {
- ssize_t typeMin = 0;
- ssize_t typeMax = 0;
- ssize_t typeMask = 0;
- bool needScratchReg = false;
- bool signCheckOnly = false;
+ {
+ ssize_t typeMin = 0;
+ ssize_t typeMax = 0;
+ ssize_t typeMask = 0;
+ bool needScratchReg = false;
+ bool signCheckOnly = false;
/* Do we need to compare the value, or just check masks */
switch (dstType)
{
- case TYP_BYTE:
- typeMask = ssize_t((int)0xFFFFFF80);
- typeMin = SCHAR_MIN;
- typeMax = SCHAR_MAX;
- break;
+ case TYP_BYTE:
+ typeMask = ssize_t((int)0xFFFFFF80);
+ typeMin = SCHAR_MIN;
+ typeMax = SCHAR_MAX;
+ break;
- case TYP_UBYTE:
- typeMask = ssize_t((int)0xFFFFFF00L);
- break;
+ case TYP_UBYTE:
+ typeMask = ssize_t((int)0xFFFFFF00L);
+ break;
- case TYP_SHORT:
- typeMask = ssize_t((int)0xFFFF8000);
- typeMin = SHRT_MIN;
- typeMax = SHRT_MAX;
- break;
+ case TYP_SHORT:
+ typeMask = ssize_t((int)0xFFFF8000);
+ typeMin = SHRT_MIN;
+ typeMax = SHRT_MAX;
+ break;
- case TYP_CHAR:
- typeMask = ssize_t((int)0xFFFF0000L);
- break;
+ case TYP_CHAR:
+ typeMask = ssize_t((int)0xFFFF0000L);
+ break;
- case TYP_INT:
- if (srcType == TYP_UINT)
- {
- signCheckOnly = true;
- }
- else
- {
- typeMask = 0xFFFFFFFF80000000LL;
- typeMin = INT_MIN;
- typeMax = INT_MAX;
- }
- break;
+ case TYP_INT:
+ if (srcType == TYP_UINT)
+ {
+ signCheckOnly = true;
+ }
+ else
+ {
+ typeMask = 0xFFFFFFFF80000000LL;
+ typeMin = INT_MIN;
+ typeMax = INT_MAX;
+ }
+ break;
- case TYP_UINT:
- if (srcType == TYP_INT)
- {
- signCheckOnly = true;
- }
- else
- {
- needScratchReg = true;
- }
- break;
+ case TYP_UINT:
+ if (srcType == TYP_INT)
+ {
+ signCheckOnly = true;
+ }
+ else
+ {
+ needScratchReg = true;
+ }
+ break;
- case TYP_LONG:
- noway_assert(srcType == TYP_ULONG);
- signCheckOnly = true;
- break;
+ case TYP_LONG:
+ noway_assert(srcType == TYP_ULONG);
+ signCheckOnly = true;
+ break;
- case TYP_ULONG:
- noway_assert((srcType == TYP_LONG) || (srcType == TYP_INT));
- signCheckOnly = true;
- break;
+ case TYP_ULONG:
+ noway_assert((srcType == TYP_LONG) || (srcType == TYP_INT));
+ signCheckOnly = true;
+ break;
- default:
- NO_WAY("Unknown type");
- return;
+ default:
+ NO_WAY("Unknown type");
+ return;
}
if (signCheckOnly)
@@ -7760,22 +7660,22 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
if (needScratchReg)
{
- // We need an additional temp register
+ // We need an additional temp register
// Make sure we have exactly one allocated.
assert(treeNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(treeNode->gtRsvdRegs) == 1);
tmpReg = genRegNumFromMask(treeNode->gtRsvdRegs);
}
- // When we are converting from unsigned or to unsigned, we
+ // When we are converting from unsigned or to unsigned, we
// will only have to check for any bits set using 'typeMask'
if (isUnsignedSrc || isUnsignedDst)
{
if (needScratchReg)
{
- inst_RV_RV(INS_mov, tmpReg, sourceReg, TYP_LONG); // Move the 64-bit value to a writeable temp reg
- inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, size, tmpReg, 32); // Shift right by 32 bits
- genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW); // Thow if result shift is non-zero
+ inst_RV_RV(INS_mov, tmpReg, sourceReg, TYP_LONG); // Move the 64-bit value to a writeable temp reg
+ inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, size, tmpReg, 32); // Shift right by 32 bits
+ genJumpToThrowHlpBlk(EJ_jne, SCK_OVERFLOW); // Thow if result shift is non-zero
}
else
{
@@ -7809,11 +7709,10 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
// On amd64, we can hit this path for a same-register
// 4-byte to 8-byte widening conversion, and need to
// emit the instruction to set the high bits correctly.
- || (EA_ATTR(genTypeSize(dstType)) == EA_8BYTE
- && EA_ATTR(genTypeSize(srcType)) == EA_4BYTE)
+ || (EA_ATTR(genTypeSize(dstType)) == EA_8BYTE && EA_ATTR(genTypeSize(srcType)) == EA_4BYTE)
#endif // _TARGET_AMD64_
- )
- inst_RV_RV(ins, targetReg, sourceReg, srcType, size);
+ )
+ inst_RV_RV(ins, targetReg, sourceReg, srcType, size);
}
else // non-overflow checking cast
{
@@ -7827,7 +7726,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
ins = INS_mov;
}
/* Is the value sitting in a non-byte-addressable register? */
- else if (castOp->InReg() && (size == EA_1BYTE) && !isByteReg(sourceReg))
+ else if (castOp->InReg() && (size == EA_1BYTE) && !isByteReg(sourceReg))
{
if (isUnsignedDst)
{
@@ -7854,11 +7753,17 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
/* Generate "and reg, MASK */
unsigned fillPattern;
if (size == EA_1BYTE)
+ {
fillPattern = 0xff;
+ }
else if (size == EA_2BYTE)
+ {
fillPattern = 0xffff;
+ }
else
+ {
fillPattern = 0xffffffff;
+ }
inst_RV_IV(INS_AND, targetReg, fillPattern, EA_4BYTE);
}
@@ -7876,10 +7781,9 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
#ifdef _TARGET_AMD64_
// On amd64, 'mov' is the opcode used to zero-extend from
// 4 bytes to 8 bytes.
- || (EA_ATTR(genTypeSize(dstType)) == EA_8BYTE
- && EA_ATTR(genTypeSize(srcType)) == EA_4BYTE)
+ || (EA_ATTR(genTypeSize(dstType)) == EA_8BYTE && EA_ATTR(genTypeSize(srcType)) == EA_4BYTE)
#endif // _TARGET_AMD64_
- )
+ )
{
inst_RV_RV(ins, targetReg, sourceReg, srcType, size);
}
@@ -7892,7 +7796,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
inst_RV_RV(ins, targetReg, sourceReg, srcType, size);
/* Mask off high bits for cast from byte to char */
- if (needAndAfter)
+ if (needAndAfter)
{
noway_assert(genTypeSize(dstType) == 2 && ins == INS_movsx);
inst_RV_IV(INS_AND, targetReg, 0xFFFF, EA_4BYTE);
@@ -7917,8 +7821,7 @@ void CodeGen::genIntToIntCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// The cast is between float and double or vice versa.
//
-void
-CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
+void CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
@@ -7927,7 +7830,7 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
regNumber targetReg = treeNode->gtRegNum;
assert(genIsValidFloatReg(targetReg));
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
#ifdef DEBUG
// If not contained, must be a valid float reg.
if (!op1->isContained())
@@ -7936,11 +7839,10 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
}
#endif
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
-
genConsumeOperands(treeNode->AsOp());
if (srcType == dstType && targetReg == op1->gtRegNum)
{
@@ -7953,7 +7855,7 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
instruction ins = ins_FloatConv(dstType, srcType);
getEmitter()->emitInsBinary(ins, emitTypeSize(dstType), treeNode, op1);
}
-
+
genProduceReg(treeNode);
}
@@ -7971,8 +7873,7 @@ CodeGen::genFloatToFloatCast(GenTreePtr treeNode)
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
-void
-CodeGen::genIntToFloatCast(GenTreePtr treeNode)
+void CodeGen::genIntToFloatCast(GenTreePtr treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
@@ -7989,8 +7890,8 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
}
#endif
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
#if !defined(_TARGET_64BIT_)
@@ -8002,7 +7903,7 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
// for GT_LCL_VAR_ADDR and GT_LCL_FLD_ADDR that represent stack addresses and can be considered
// as TYP_I_IMPL. In all other cases where src operand is a gc-type and not known to be on stack,
// Front-end (see fgMorphCast()) ensures this by assigning gc-type local to a non gc-type
- // temp and using temp as operand of cast operation.
+ // temp and using temp as operand of cast operation.
if (srcType == TYP_BYREF)
{
noway_assert(op1->OperGet() == GT_LCL_VAR_ADDR || op1->OperGet() == GT_LCL_FLD_ADDR);
@@ -8022,19 +7923,18 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
// either the front-end or lowering phase to have generated two levels of cast.
// The first one is for widening smaller int type to int32 and the second one is
// to the float/double.
- emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
- noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) ||
- (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
+ emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
+ noway_assert((srcSize == EA_ATTR(genTypeSize(TYP_INT))) || (srcSize == EA_ATTR(genTypeSize(TYP_LONG))));
// Also we don't expect to see uint32 -> float/double and uint64 -> float conversions
// here since they should have been lowered apropriately.
noway_assert(srcType != TYP_UINT);
- noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
+ noway_assert((srcType != TYP_ULONG) || (dstType != TYP_FLOAT));
// To convert int to a float/double, cvtsi2ss/sd SSE2 instruction is used
// which does a partial write to lower 4/8 bytes of xmm register keeping the other
- // upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
- // the partial write could introduce a false dependency and could cause a stall
+ // upper bytes unmodified. If "cvtsi2ss/sd xmmReg, r32/r64" occurs inside a loop,
+ // the partial write could introduce a false dependency and could cause a stall
// if there are further uses of xmmReg. We have such a case occuring with a
// customer reported version of SpectralNorm benchmark, resulting in 2x perf
// regression. To avoid false dependency, we emit "xorps xmmReg, xmmReg" before
@@ -8049,7 +7949,7 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
getEmitter()->emitInsBinary(ins, emitTypeSize(srcType), treeNode, op1);
// Handle the case of srcType = TYP_ULONG. SSE2 conversion instruction
- // will interpret ULONG value as LONG. Hence we need to adjust the
+ // will interpret ULONG value as LONG. Hence we need to adjust the
// result if sign-bit of srcType is set.
if (srcType == TYP_ULONG)
{
@@ -8072,12 +7972,12 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
// Adjust the result
// result = result + 0x43f00000 00000000
// addsd resultReg, 0x43f00000 00000000
- GenTreePtr *cns = &u8ToDblBitmask;
+ GenTreePtr* cns = &u8ToDblBitmask;
if (*cns == nullptr)
{
- double d;
+ double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
- *((__int64 *)&d) = 0x43f0000000000000LL;
+ *((__int64*)&d) = 0x43f0000000000000LL;
*cns = genMakeConst(&d, dstType, treeNode, true);
}
@@ -8105,8 +8005,7 @@ CodeGen::genIntToFloatCast(GenTreePtr treeNode)
//
// TODO-XArch-CQ: (Low-pri) - generate in-line code when DstType = uint64
//
-void
-CodeGen::genFloatToIntCast(GenTreePtr treeNode)
+void CodeGen::genFloatToIntCast(GenTreePtr treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
@@ -8124,8 +8023,8 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
}
#endif
- var_types dstType = treeNode->CastToType();
- var_types srcType = op1->TypeGet();
+ var_types dstType = treeNode->CastToType();
+ var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never be seeing dstType whose size is neither sizeof(TYP_INT) nor sizeof(TYP_LONG).
@@ -8133,15 +8032,14 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
// front-end or lowering phase to have generated two levels of cast. The first one is
// for float or double to int32/uint32 and the second one for narrowing int32/uint32 to
// the required smaller int type.
- emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
- noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) ||
- (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
+ emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
+ noway_assert((dstSize == EA_ATTR(genTypeSize(TYP_INT))) || (dstSize == EA_ATTR(genTypeSize(TYP_LONG))));
// We shouldn't be seeing uint64 here as it should have been converted
// into a helper call by either front-end or lowering phase.
noway_assert(!varTypeIsUnsigned(dstType) || (dstSize != EA_ATTR(genTypeSize(TYP_LONG))));
- // If the dstType is TYP_UINT, we have 32-bits to encode the
+ // If the dstType is TYP_UINT, we have 32-bits to encode the
// float number. Any of 33rd or above bits can be the sign bit.
// To acheive it we pretend as if we are converting it to a long.
if (varTypeIsUnsigned(dstType) && (dstSize == EA_ATTR(genTypeSize(TYP_INT))))
@@ -8149,7 +8047,7 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
dstType = TYP_LONG;
}
- // Note that we need to specify dstType here so that it will determine
+ // Note that we need to specify dstType here so that it will determine
// the size of destination integer register and also the rex.w prefix.
genConsumeOperands(treeNode->AsOp());
instruction ins = ins_FloatConv(TYP_INT, srcType);
@@ -8168,24 +8066,23 @@ CodeGen::genFloatToIntCast(GenTreePtr treeNode)
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
-//
+//
// TODO-XArch-CQ - mark the operand as contained if known to be in
// memory (e.g. field or an array element).
//
-void
-CodeGen::genCkfinite(GenTreePtr treeNode)
+void CodeGen::genCkfinite(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
- var_types targetType = treeNode->TypeGet();
- int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
- regNumber targetReg = treeNode->gtRegNum;
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ var_types targetType = treeNode->TypeGet();
+ int expMask = (targetType == TYP_FLOAT) ? 0x7F800000 : 0x7FF00000; // Bit mask to extract exponent.
+ regNumber targetReg = treeNode->gtRegNum;
// Extract exponent into a register.
assert(treeNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(treeNode->gtRsvdRegs) == 1);
- regNumber tmpReg = genRegNumFromMask(treeNode->gtRsvdRegs);
+ regNumber tmpReg = genRegNumFromMask(treeNode->gtRsvdRegs);
genConsumeReg(op1);
@@ -8335,7 +8232,6 @@ int CodeGenInterface::genSPtoFPdelta()
return delta;
}
-
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc. For AMD64, this does not include the caller-pushed
@@ -8349,8 +8245,7 @@ int CodeGenInterface::genTotalFrameSize()
{
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
- int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES +
- compiler->compLclFrameSize;
+ int totalFrameSize = compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
@@ -8378,7 +8273,6 @@ int CodeGenInterface::genCallerSPtoFPdelta()
return callerSPtoFPdelta;
}
-
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
@@ -8389,7 +8283,7 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
- callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
+ callerSPtoSPdelta -= REGSIZE_BYTES; // caller-pushed return address
// compCalleeRegsPushed does not account for the frame pointer
// TODO-Cleanup: shouldn't this be part of genTotalFrameSize?
@@ -8407,30 +8301,29 @@ int CodeGenInterface::genCallerSPtoInitialSPdelta()
// genSSE2BitwiseOp - generate SSE2 code for the given oper as "Operand BitWiseOp BitMask"
//
// Arguments:
-// treeNode - tree node
+// treeNode - tree node
//
// Return value:
// None
//
-// Assumptions:
+// Assumptions:
// i) tree oper is one of GT_NEG or GT_INTRINSIC Abs()
// ii) tree type is floating point type.
// iii) caller of this routine needs to call genProduceReg()
-void
-CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
-{
- regNumber targetReg = treeNode->gtRegNum;
+void CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
+{
+ regNumber targetReg = treeNode->gtRegNum;
var_types targetType = treeNode->TypeGet();
assert(varTypeIsFloating(targetType));
- float f;
- double d;
- GenTreePtr *bitMask = nullptr;
- instruction ins = INS_invalid;
- void *cnsAddr = nullptr;
- bool dblAlign = false;
+ float f;
+ double d;
+ GenTreePtr* bitMask = nullptr;
+ instruction ins = INS_invalid;
+ void* cnsAddr = nullptr;
+ bool dblAlign = false;
- switch(treeNode->OperGet())
+ switch (treeNode->OperGet())
{
case GT_NEG:
// Neg(x) = flip the sign bit.
@@ -8442,8 +8335,8 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
bitMask = &negBitmaskFlt;
static_assert_no_msg(sizeof(float) == sizeof(int));
- *((int *)&f) = 0x80000000;
- cnsAddr = &f;
+ *((int*)&f) = 0x80000000;
+ cnsAddr = &f;
}
else
{
@@ -8451,8 +8344,8 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x8000000000000000LL;
- cnsAddr = &d;
- dblAlign = true;
+ cnsAddr = &d;
+ dblAlign = true;
}
break;
@@ -8468,8 +8361,8 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
bitMask = &absBitmaskFlt;
static_assert_no_msg(sizeof(float) == sizeof(int));
- *((int *)&f) = 0x7fffffff;
- cnsAddr = &f;
+ *((int*)&f) = 0x7fffffff;
+ cnsAddr = &f;
}
else
{
@@ -8477,8 +8370,8 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x7fffffffffffffffLL;
- cnsAddr = &d;
- dblAlign = true;
+ cnsAddr = &d;
+ dblAlign = true;
}
break;
@@ -8514,7 +8407,7 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
operandReg = tmpReg;
}
-
+
inst_RV_TT(ins_Load(targetType, false), tmpReg, *bitMask);
assert(ins != INS_invalid);
inst_RV_RV(ins, targetReg, operandReg, targetType);
@@ -8529,27 +8422,27 @@ CodeGen::genSSE2BitwiseOp(GenTreePtr treeNode)
// Return value:
// None
//
-void
-CodeGen::genIntrinsic(GenTreePtr treeNode)
-{
+void CodeGen::genIntrinsic(GenTreePtr treeNode)
+{
// Right now only Sqrt/Abs are treated as math intrinsics.
- switch(treeNode->gtIntrinsic.gtIntrinsicId)
+ switch (treeNode->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Sqrt:
- noway_assert(treeNode->TypeGet() == TYP_DOUBLE);
- genConsumeOperands(treeNode->AsOp());
- getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode, treeNode->gtOp.gtOp1);
- break;
+ case CORINFO_INTRINSIC_Sqrt:
+ noway_assert(treeNode->TypeGet() == TYP_DOUBLE);
+ genConsumeOperands(treeNode->AsOp());
+ getEmitter()->emitInsBinary(ins_FloatSqrt(treeNode->TypeGet()), emitTypeSize(treeNode), treeNode,
+ treeNode->gtOp.gtOp1);
+ break;
- case CORINFO_INTRINSIC_Abs:
- genSSE2BitwiseOp(treeNode);
- break;
+ case CORINFO_INTRINSIC_Abs:
+ genSSE2BitwiseOp(treeNode);
+ break;
- default:
- assert(!"genIntrinsic: Unsupported intrinsic");
- unreached();
+ default:
+ assert(!"genIntrinsic: Unsupported intrinsic");
+ unreached();
}
-
+
genProduceReg(treeNode);
}
@@ -8565,15 +8458,14 @@ CodeGen::genIntrinsic(GenTreePtr treeNode)
// Note:
// If tail call the outgoing args are placed in the caller's incoming arg stack space.
// Otherwise, they go in the outgoing arg area on the current frame.
-//
-// On Windows the caller always creates slots (homing space) in its frame for the
+//
+// On Windows the caller always creates slots (homing space) in its frame for the
// first 4 arguments of a callee (register passed args). So, the baseVarNum is always 0.
// For System V systems there is no such calling convention requirement, and the code needs to find
-// the first stack passed argument from the caller. This is done by iterating over
+// the first stack passed argument from the caller. This is done by iterating over
// all the lvParam variables and finding the first with lvArgReg equals to REG_STK.
-//
-unsigned
-CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
+//
+unsigned CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
@@ -8606,7 +8498,7 @@ CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
assert(!varDsc->lvIsRegArg && varDsc->lvArgReg == REG_STK);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
// On Windows this assert is always true. The first argument will always be in REG_ARG_0 or REG_FLTARG_0.
assert(varDsc->lvIsRegArg && (varDsc->lvArgReg == REG_ARG_0 || varDsc->lvArgReg == REG_FLTARG_0));
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -8616,7 +8508,7 @@ CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
{
#if FEATURE_FIXED_OUT_ARGS
baseVarNum = compiler->lvaOutgoingArgSpaceVar;
-#else // !FEATURE_FIXED_OUT_ARGS
+#else // !FEATURE_FIXED_OUT_ARGS
NYI_X86("Stack args for x86/RyuJIT");
baseVarNum = BAD_VAR_NUM;
#endif // !FEATURE_FIXED_OUT_ARGS
@@ -8635,8 +8527,7 @@ CodeGen::getBaseVarForPutArgStk(GenTreePtr treeNode)
// Return value:
// None
//
-void
-CodeGen::genPutArgStk(GenTreePtr treeNode)
+void CodeGen::genPutArgStk(GenTreePtr treeNode)
{
var_types targetType = treeNode->TypeGet();
#ifdef _TARGET_X86_
@@ -8657,7 +8548,7 @@ CodeGen::genPutArgStk(GenTreePtr treeNode)
// TODO-Cleanup: Handle this in emitInsMov() in emitXArch.cpp?
if (data->isContainedIntOrIImmed())
{
- if (data->IsIconHandle())
+ if (data->IsIconHandle())
{
inst_IV_handle(INS_push, data->gtIntCon.gtIconVal);
}
@@ -8687,7 +8578,7 @@ CodeGen::genPutArgStk(GenTreePtr treeNode)
#else // !_TARGET_X86_
{
unsigned baseVarNum = getBaseVarForPutArgStk(treeNode);
-
+
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
if (varTypeIsStruct(targetType))
@@ -8703,7 +8594,7 @@ CodeGen::genPutArgStk(GenTreePtr treeNode)
// Get argument offset on stack.
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
- int argOffset = treeNode->AsPutArgStk()->getArgOffset();
+ int argOffset = treeNode->AsPutArgStk()->getArgOffset();
#ifdef DEBUG
fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(treeNode->AsPutArgStk()->gtCall, treeNode);
@@ -8715,16 +8606,14 @@ CodeGen::genPutArgStk(GenTreePtr treeNode)
if (data->isContained())
{
- getEmitter()->emitIns_S_I(ins_Store(targetType),
- emitTypeSize(targetType),
- baseVarNum,
- argOffset,
+ getEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
(int)data->AsIntConCommon()->IconValue());
}
else
{
genConsumeReg(data);
- getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum, argOffset);
+ getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), data->gtRegNum, baseVarNum,
+ argOffset);
}
}
#endif // !_TARGET_X86_
@@ -8746,41 +8635,37 @@ CodeGen::genPutArgStk(GenTreePtr treeNode)
// Return value:
// None
//
-void
-CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
+void CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
{
assert(treeNode->OperGet() == GT_PUTARG_STK);
assert(baseVarNum != BAD_VAR_NUM);
-
+
var_types targetType = treeNode->TypeGet();
if (varTypeIsSIMD(targetType))
{
regNumber srcReg = genConsumeReg(treeNode->gtGetOp1());
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
- getEmitter()->emitIns_S_R(ins_Store(targetType),
- emitTypeSize(targetType),
- srcReg,
- baseVarNum,
+ getEmitter()->emitIns_S_R(ins_Store(targetType), emitTypeSize(targetType), srcReg, baseVarNum,
treeNode->AsPutArgStk()->getArgOffset());
return;
}
assert(targetType == TYP_STRUCT);
-
+
GenTreePutArgStk* putArgStk = treeNode->AsPutArgStk();
if (putArgStk->gtNumberReferenceSlots == 0)
{
switch (putArgStk->gtPutArgStkKind)
{
- case GenTreePutArgStk::PutArgStkKindRepInstr:
- genStructPutArgRepMovs(putArgStk, baseVarNum);
- break;
- case GenTreePutArgStk::PutArgStkKindUnroll:
- genStructPutArgUnroll(putArgStk, baseVarNum);
- break;
- default:
- unreached();
+ case GenTreePutArgStk::PutArgStkKindRepInstr:
+ genStructPutArgRepMovs(putArgStk, baseVarNum);
+ break;
+ case GenTreePutArgStk::PutArgStkKindUnroll:
+ genStructPutArgUnroll(putArgStk, baseVarNum);
+ break;
+ default:
+ unreached();
}
}
else
@@ -8790,57 +8675,57 @@ CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
// Consume these registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumePutStructArgStk(putArgStk, REG_RDI, REG_RSI, REG_NA, baseVarNum);
- GenTreePtr dstAddr = putArgStk;
- GenTreePtr src = putArgStk->gtOp.gtOp1;
+ GenTreePtr dstAddr = putArgStk;
+ GenTreePtr src = putArgStk->gtOp.gtOp1;
assert(src->OperGet() == GT_OBJ);
- GenTreePtr srcAddr = src->gtGetOp1();
+ GenTreePtr srcAddr = src->gtGetOp1();
unsigned slots = putArgStk->gtNumSlots;
- // We are always on the stack we don't need to use the write barrier.
+ // We are always on the stack we don't need to use the write barrier.
BYTE* gcPtrs = putArgStk->gtGcPtrs;
unsigned gcPtrCount = putArgStk->gtNumberReferenceSlots;
- unsigned i = 0;
+ unsigned i = 0;
unsigned copiedSlots = 0;
while (i < slots)
{
switch (gcPtrs[i])
{
- case TYPE_GC_NONE:
- // Let's see if we can use rep movsq instead of a sequence of movsq instructions
- // to save cycles and code size.
- {
- unsigned nonGcSlotCount = 0;
-
- do
+ case TYPE_GC_NONE:
+ // Let's see if we can use rep movsq instead of a sequence of movsq instructions
+ // to save cycles and code size.
{
- nonGcSlotCount++;
- i++;
- } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
+ unsigned nonGcSlotCount = 0;
- // If we have a very small contiguous non-gc region, it's better just to
- // emit a sequence of movsq instructions
- if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
- {
- copiedSlots += nonGcSlotCount;
- while (nonGcSlotCount > 0)
+ do
{
- instGen(INS_movsq);
- nonGcSlotCount--;
+ nonGcSlotCount++;
+ i++;
+ } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
+
+ // If we have a very small contiguous non-gc region, it's better just to
+ // emit a sequence of movsq instructions
+ if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
+ {
+ copiedSlots += nonGcSlotCount;
+ while (nonGcSlotCount > 0)
+ {
+ instGen(INS_movsq);
+ nonGcSlotCount--;
+ }
+ }
+ else
+ {
+ getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
+ copiedSlots += nonGcSlotCount;
+ instGen(INS_r_movsq);
}
}
- else
- {
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
- copiedSlots += nonGcSlotCount;
- instGen(INS_r_movsq);
- }
- }
- break;
+ break;
- case TYPE_GC_REF: // Is an object ref
- case TYPE_GC_BYREF: // Is an interior pointer - promote it but don't scan it
+ case TYPE_GC_REF: // Is an object ref
+ case TYPE_GC_BYREF: // Is an interior pointer - promote it but don't scan it
{
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsq instruction,
@@ -8860,18 +8745,13 @@ CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
}
getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
- getEmitter()->emitIns_S_R(ins_Store(memType),
- emitTypeSize(memType),
- REG_RCX,
- baseVarNum,
+ getEmitter()->emitIns_S_R(ins_Store(memType), emitTypeSize(memType), REG_RCX, baseVarNum,
((copiedSlots + putArgStk->gtSlotNum) * TARGET_POINTER_SIZE));
// Source for the copy operation.
// If a LocalAddr, use EA_PTRSIZE - copy from stack.
// If not a LocalAddr, use EA_BYREF - the source location is not on the stack.
- getEmitter()->emitIns_R_I(INS_add,
- ((src->OperIsLocalAddr()) ? EA_PTRSIZE : EA_BYREF),
- REG_RSI,
+ getEmitter()->emitIns_R_I(INS_add, ((src->OperIsLocalAddr()) ? EA_PTRSIZE : EA_BYREF), REG_RSI,
TARGET_POINTER_SIZE);
// Always copying to the stack - outgoing arg area
@@ -8883,16 +8763,16 @@ CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
}
break;
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
}
assert(gcPtrCount == 0);
}
}
-#endif //defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
/*****************************************************************************
*
@@ -8900,38 +8780,35 @@ CodeGen::genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum)
*/
#ifdef _TARGET_AMD64_
void
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
void*
#endif // !_TARGET_AMD64_
CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
{
#ifdef JIT32_GCENCODER
return genCreateAndStoreGCInfoJIT32(codeSize, prologSize, epilogSize DEBUGARG(codePtr));
-#else // !JIT32_GCENCODER
+#else // !JIT32_GCENCODER
genCreateAndStoreGCInfoX64(codeSize, prologSize DEBUGARG(codePtr));
#endif // !JIT32_GCENCODER
}
#ifdef JIT32_GCENCODER
-void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr))
+void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize,
+ unsigned prologSize,
+ unsigned epilogSize DEBUGARG(void* codePtr))
{
- BYTE headerBuf[64];
- InfoHdr header;
+ BYTE headerBuf[64];
+ InfoHdr header;
int s_cached;
-#ifdef DEBUG
- size_t headerSize =
+#ifdef DEBUG
+ size_t headerSize =
#endif
- compiler->compInfoBlkSize = gcInfo.gcInfoBlockHdrSave(headerBuf,
- 0,
- codeSize,
- prologSize,
- epilogSize,
- &header,
- &s_cached);
+ compiler->compInfoBlkSize =
+ gcInfo.gcInfoBlockHdrSave(headerBuf, 0, codeSize, prologSize, epilogSize, &header, &s_cached);
size_t argTabOffset = 0;
- size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
+ size_t ptrMapSize = gcInfo.gcPtrTableSize(header, codeSize, &argTabOffset);
#if DISPLAY_SIZES
@@ -8952,7 +8829,7 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Allocate the info block for the method */
- compiler->compInfoBlkAddr = (BYTE *) compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
+ compiler->compInfoBlkAddr = (BYTE*)compiler->info.compCompHnd->allocGCInfo(compiler->compInfoBlkSize);
#if 0 // VERBOSE_SIZES
// TODO-X86-Cleanup: 'dataSize', below, is not defined
@@ -8978,24 +8855,20 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Create the method info block: header followed by GC tracking tables */
- compiler->compInfoBlkAddr += gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1,
- codeSize,
- prologSize,
- epilogSize,
- &header,
- &s_cached);
+ compiler->compInfoBlkAddr +=
+ gcInfo.gcInfoBlockHdrSave(compiler->compInfoBlkAddr, -1, codeSize, prologSize, epilogSize, &header, &s_cached);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize);
compiler->compInfoBlkAddr = gcInfo.gcPtrTableSave(compiler->compInfoBlkAddr, header, codeSize, &argTabOffset);
assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + headerSize + ptrMapSize);
-#ifdef DEBUG
+#ifdef DEBUG
- if (0)
+ if (0)
{
- BYTE * temp = (BYTE *)infoPtr;
- unsigned size = compiler->compInfoBlkAddr - temp;
- BYTE * ptab = temp + headerSize;
+ BYTE* temp = (BYTE*)infoPtr;
+ unsigned size = compiler->compInfoBlkAddr - temp;
+ BYTE* ptab = temp + headerSize;
noway_assert(size == headerSize + ptrMapSize);
@@ -9003,14 +8876,14 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
for (unsigned i = 0; i < size; i++)
{
- if (temp == ptab)
+ if (temp == ptab)
{
printf("\nMethod info block - ptrtab [%u bytes]:", ptrMapSize);
- printf("\n %04X: %*c", i & ~0xF, 3*(i&0xF), ' ');
+ printf("\n %04X: %*c", i & ~0xF, 3 * (i & 0xF), ' ');
}
else
{
- if (!(i % 16))
+ if (!(i % 16))
printf("\n %04X: ", i);
}
@@ -9024,9 +8897,9 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
#if DUMP_GC_TABLES
- if (compiler->opts.dspGCtbls)
+ if (compiler->opts.dspGCtbls)
{
- const BYTE *base = (BYTE *)infoPtr;
+ const BYTE* base = (BYTE*)infoPtr;
unsigned size;
unsigned methodSize;
InfoHdr dumpHeader;
@@ -9038,19 +8911,18 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
// printf("size of header encoding is %3u\n", size);
printf("\n");
- if (compiler->opts.dspGCtbls)
+ if (compiler->opts.dspGCtbls)
{
- base += size;
- size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
+ base += size;
+ size = gcInfo.gcDumpPtrTable(base, dumpHeader, methodSize);
// printf("size of pointer table is %3u\n", size);
printf("\n");
- noway_assert(compiler->compInfoBlkAddr == (base+size));
+ noway_assert(compiler->compInfoBlkAddr == (base + size));
}
-
}
#ifdef DEBUG
- if (jitOpts.testMask & 128)
+ if (jitOpts.testMask & 128)
{
for (unsigned offs = 0; offs < codeSize; offs++)
{
@@ -9062,17 +8934,17 @@ void* CodeGen::genCreateAndStoreGCInfoJIT32(unsigned codeSize, unsigned prologS
/* Make sure we ended up generating the expected number of bytes */
- noway_assert(compiler->compInfoBlkAddr == (BYTE *)infoPtr + compiler->compInfoBlkSize);
+ noway_assert(compiler->compInfoBlkAddr == (BYTE*)infoPtr + compiler->compInfoBlkSize);
return infoPtr;
}
#else // !JIT32_GCENCODER
-void
-CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
+void CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr))
{
- IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
- GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC) GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
+ IAllocator* allowZeroAlloc = new (compiler, CMK_GC) AllowZeroAllocator(compiler->getAllocatorGC());
+ GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
+ GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
@@ -9100,23 +8972,26 @@ CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUG
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
- preservedAreaSize += REGSIZE_BYTES;
+ {
+ preservedAreaSize += REGSIZE_BYTES;
+ }
// bool in synchronized methods that tracks whether the lock has been taken (takes 4 bytes on stack)
- preservedAreaSize += 4;
+ preservedAreaSize += 4;
}
- // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the frame
+ // Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
+ // frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
- }
+ }
#endif
-
+
gcInfoEncoder->Build();
- //GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
- //let's save the values anyway for debugging purposes
+ // GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
+ // let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
- compiler->compInfoBlkSize = 0; //not exposed by the GCEncoder interface
+ compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
#endif // !JIT32_GCENCODER
@@ -9125,21 +9000,18 @@ CodeGen::genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUG
*
*/
-void CodeGen::genEmitHelperCall(unsigned helper,
- int argSize,
- emitAttr retSize,
- regNumber callTargetReg)
+void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg)
{
- void* addr = nullptr;
+ void* addr = nullptr;
void* pAddr = nullptr;
- emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
- addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
- regNumber callTarget = REG_NA;
- regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
+ emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
+ addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
+ regNumber callTarget = REG_NA;
+ regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
if (!addr)
- {
+ {
assert(pAddr != nullptr);
// Absolute indirect call addr
@@ -9150,12 +9022,12 @@ void CodeGen::genEmitHelperCall(unsigned helper,
{
// generate call whose target is specified by 32-bit offset relative to PC or zero.
callType = emitter::EC_FUNC_TOKEN_INDIR;
- addr = pAddr;
+ addr = pAddr;
}
else
{
#ifdef _TARGET_AMD64_
- // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
+ // If this indirect address cannot be encoded as 32-bit offset relative to PC or Zero,
// load it into REG_HELPER_CALL_TARGET and use register indirect addressing mode to
// make the call.
// mov reg, addr
@@ -9165,7 +9037,7 @@ void CodeGen::genEmitHelperCall(unsigned helper,
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
- callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
+ callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
regMaskTP callTargetMask = genRegMask(callTargetReg);
noway_assert((callTargetMask & killMask) == callTargetMask);
}
@@ -9179,28 +9051,20 @@ void CodeGen::genEmitHelperCall(unsigned helper,
#endif
callTarget = callTargetReg;
- CodeGen::genSetRegToIcon(callTarget, (ssize_t) pAddr, TYP_I_IMPL);
+ CodeGen::genSetRegToIcon(callTarget, (ssize_t)pAddr, TYP_I_IMPL);
callType = emitter::EC_INDIR_ARD;
}
}
- getEmitter()->emitIns_Call(callType,
- compiler->eeFindHelper(helper),
- INDEBUG_LDISASM_COMMA(nullptr)
- addr,
- argSize,
- retSize
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(EA_UNKNOWN),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, // IL offset
- callTarget, // ireg
- REG_NA, 0, 0, // xreg, xmul, disp
- false, // isJump
+ getEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
+ retSize FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(EA_UNKNOWN), gcInfo.gcVarPtrSetCur,
+ gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, // IL offset
+ callTarget, // ireg
+ REG_NA, 0, 0, // xreg, xmul, disp
+ false, // isJump
emitter::emitNoGChelper(helper));
-
regTracker.rsTrashRegSet(killMask);
regTracker.rsTrashRegsForGCInterruptability();
}
@@ -9225,13 +9089,13 @@ void CodeGen::genEmitHelperCall(unsigned helper,
// 'treeNode' must be a TYP_LONG lclVar node for a lclVar that has NOT been promoted.
// Its operand must be a GT_LONG node.
//
-void CodeGen::genStoreLongLclVar(GenTree* treeNode)
+void CodeGen::genStoreLongLclVar(GenTree* treeNode)
{
emitter* emit = getEmitter();
GenTreeLclVarCommon* lclNode = treeNode->AsLclVarCommon();
- unsigned lclNum = lclNode->gtLclNum;
- LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
+ unsigned lclNum = lclNode->gtLclNum;
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclNum]);
assert(varDsc->TypeGet() == TYP_LONG);
assert(!varDsc->lvPromoted);
GenTreePtr op1 = treeNode->gtOp.gtOp1;
@@ -9261,7 +9125,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode)
//#define ALL_XARCH_EMITTER_UNIT_TESTS
#if defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
-void CodeGen::genAmd64EmitterUnitTests()
+void CodeGen::genAmd64EmitterUnitTests()
{
if (!verbose)
{
@@ -9352,7 +9216,7 @@ void CodeGen::genAmd64EmitterUnitTests()
getEmitter()->emitIns_R_R_R(INS_divss, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
getEmitter()->emitIns_R_R_R(INS_divsd, EA_8BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
-
+
// vdivss xmm0,xmm1,xmm2
getEmitter()->emitIns_R_R_R(INS_cvtss2sd, EA_4BYTE, REG_XMM0, REG_XMM1, REG_XMM2);
// vdivsd xmm0,xmm1,xmm2
@@ -9364,7 +9228,6 @@ void CodeGen::genAmd64EmitterUnitTests()
#endif // defined(DEBUG) && defined(LATE_DISASM) && defined(_TARGET_AMD64_)
-
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************
@@ -9373,13 +9236,13 @@ void CodeGen::genAmd64EmitterUnitTests()
* Called for every scope info piece to record by the main genSetScopeInfo()
*/
-void CodeGen::genSetScopeInfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- bool avail,
- Compiler::siVarLoc& varLoc)
+void CodeGen::genSetScopeInfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ bool avail,
+ Compiler::siVarLoc& varLoc)
{
/* We need to do some mapping while reporting back these variables */
@@ -9400,15 +9263,15 @@ void CodeGen::genSetScopeInfo (unsigned which,
// Hang on to this compiler->info.
- TrnslLocalVarInfo &tlvi = genTrnslLocalVarInfo[which];
+ TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which];
- tlvi.tlviVarNum = ilVarNum;
- tlvi.tlviLVnum = LVnum;
- tlvi.tlviName = name;
- tlvi.tlviStartPC = startOffs;
- tlvi.tlviLength = length;
- tlvi.tlviAvailable = avail;
- tlvi.tlviVarLoc = varLoc;
+ tlvi.tlviVarNum = ilVarNum;
+ tlvi.tlviLVnum = LVnum;
+ tlvi.tlviName = name;
+ tlvi.tlviStartPC = startOffs;
+ tlvi.tlviLength = length;
+ tlvi.tlviAvailable = avail;
+ tlvi.tlviVarLoc = varLoc;
#endif // DEBUG
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index a79ddb3480..318e314a61 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -28,46 +28,43 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if defined(DEBUG)
// Column settings for COMPlus_JitDumpIR. We could(should) make these programmable.
-#define COLUMN_OPCODE 30
+#define COLUMN_OPCODE 30
#define COLUMN_OPERANDS (COLUMN_OPCODE + 25)
-#define COLUMN_KINDS 110
-#define COLUMN_FLAGS (COLUMN_KINDS + 32)
+#define COLUMN_KINDS 110
+#define COLUMN_FLAGS (COLUMN_KINDS + 32)
#endif
#if defined(DEBUG)
unsigned Compiler::jitTotalMethodCompiled = 0;
-#endif // defined(DEBUG)
+#endif // defined(DEBUG)
#if defined(DEBUG)
-LONG Compiler::jitNestingLevel = 0;
-#endif // defined(DEBUG)
+LONG Compiler::jitNestingLevel = 0;
+#endif // defined(DEBUG)
#ifdef ALT_JIT
// static
-bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false;
-AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr;
+bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false;
+AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr;
#endif // ALT_JIT
/*****************************************************************************/
-inline
-unsigned getCurTime()
+inline unsigned getCurTime()
{
- SYSTEMTIME tim;
+ SYSTEMTIME tim;
GetSystemTime(&tim);
- return (((tim.wHour*60) + tim.wMinute)*60 + tim.wSecond)*1000 + tim.wMilliseconds;
+ return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds;
}
/*****************************************************************************/
#ifdef DEBUG
/*****************************************************************************/
-static
-FILE * jitSrcFilePtr;
+static FILE* jitSrcFilePtr;
-static
-unsigned jitCurSrcLine;
+static unsigned jitCurSrcLine;
void Compiler::JitLogEE(unsigned level, const char* fmt, ...)
{
@@ -85,18 +82,24 @@ void Compiler::JitLogEE(unsigned level, const char* fmt, ...)
va_end(args);
}
-void Compiler::compDspSrcLinesByLineNum(unsigned line, bool seek)
+void Compiler::compDspSrcLinesByLineNum(unsigned line, bool seek)
{
- if (!jitSrcFilePtr)
+ if (!jitSrcFilePtr)
+ {
return;
+ }
- if (jitCurSrcLine == line)
+ if (jitCurSrcLine == line)
+ {
return;
+ }
- if (jitCurSrcLine > line)
+ if (jitCurSrcLine > line)
{
- if (!seek)
+ if (!seek)
+ {
return;
+ }
if (fseek(jitSrcFilePtr, 0, SEEK_SET) != 0)
{
@@ -105,62 +108,72 @@ void Compiler::compDspSrcLinesByLineNum(unsigned line, bool seek)
jitCurSrcLine = 0;
}
- if (!seek)
+ if (!seek)
+ {
printf(";\n");
+ }
do
{
- char temp[128];
- size_t llen;
+ char temp[128];
+ size_t llen;
- if (!fgets(temp, sizeof(temp), jitSrcFilePtr))
+ if (!fgets(temp, sizeof(temp), jitSrcFilePtr))
+ {
return;
+ }
- if (seek)
+ if (seek)
+ {
continue;
+ }
llen = strlen(temp);
- if (llen && temp[llen-1] == '\n')
- temp[llen-1] = 0;
+ if (llen && temp[llen - 1] == '\n')
+ {
+ temp[llen - 1] = 0;
+ }
printf("; %s\n", temp);
- }
- while (++jitCurSrcLine < line);
+ } while (++jitCurSrcLine < line);
- if (!seek)
+ if (!seek)
+ {
printf(";\n");
+ }
}
-
/*****************************************************************************/
-void Compiler::compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP)
+void Compiler::compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP)
{
#ifdef DEBUGGING_SUPPORT
- static IPmappingDsc * nextMappingDsc;
- static unsigned lastLine;
+ static IPmappingDsc* nextMappingDsc;
+ static unsigned lastLine;
if (!opts.dspLines)
+ {
return;
+ }
- if (curIP==0)
+ if (curIP == 0)
{
if (genIPmappingList)
{
- nextMappingDsc = genIPmappingList;
- lastLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx);
+ nextMappingDsc = genIPmappingList;
+ lastLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx);
- unsigned firstLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx);
+ unsigned firstLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx);
- unsigned earlierLine = (firstLine < 5) ? 0 : firstLine - 5;
+ unsigned earlierLine = (firstLine < 5) ? 0 : firstLine - 5;
- compDspSrcLinesByLineNum(earlierLine, true); // display previous 5 lines
- compDspSrcLinesByLineNum( firstLine, false);
+ compDspSrcLinesByLineNum(earlierLine, true); // display previous 5 lines
+ compDspSrcLinesByLineNum(firstLine, false);
}
else
{
- nextMappingDsc = NULL;
+ nextMappingDsc = nullptr;
}
return;
@@ -186,25 +199,23 @@ void Compiler::compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP)
compDspSrcLinesByLineNum(nextOffs);
}
- lastLine = nextOffs;
- nextMappingDsc = nextMappingDsc->ipmdNext;
+ lastLine = nextOffs;
+ nextMappingDsc = nextMappingDsc->ipmdNext;
}
}
#endif
}
-
/*****************************************************************************/
-#endif//DEBUG
-
+#endif // DEBUG
/*****************************************************************************/
#if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS
-static unsigned genMethodCnt; // total number of methods JIT'ted
- unsigned genMethodICnt; // number of interruptible methods
- unsigned genMethodNCnt; // number of non-interruptible methods
+static unsigned genMethodCnt; // total number of methods JIT'ted
+unsigned genMethodICnt; // number of interruptible methods
+unsigned genMethodNCnt; // number of non-interruptible methods
static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0;
#endif
@@ -214,11 +225,11 @@ static unsigned genSmallMethodsNeedingExtraMemoryCnt = 0;
NodeSizeStats genNodeSizeStats;
NodeSizeStats genNodeSizeStatsPerFunc;
-unsigned genTreeNcntHistBuckets[] = { 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0 };
-Histogram genTreeNcntHist(HostAllocator::getHostAllocator(), genTreeNcntHistBuckets);
+unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0};
+Histogram genTreeNcntHist(HostAllocator::getHostAllocator(), genTreeNcntHistBuckets);
-unsigned genTreeNsizHistBuckets[] = { 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0 };
-Histogram genTreeNsizHist(HostAllocator::getHostAllocator(), genTreeNsizHistBuckets);
+unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0};
+Histogram genTreeNsizHist(HostAllocator::getHostAllocator(), genTreeNsizHistBuckets);
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
@@ -228,13 +239,13 @@ Histogram genTreeNsizHist(HostAllocator::getHostAllocator(), genTreeNsizHistBu
#if DISPLAY_SIZES
-size_t grossVMsize; // Total IL code size
-size_t grossNCsize; // Native code + data size
-size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER)
-size_t gcHeaderISize; // GC header size: interruptible methods
-size_t gcPtrMapISize; // GC pointer map size: interruptible methods
-size_t gcHeaderNSize; // GC header size: non-interruptible methods
-size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods
+size_t grossVMsize; // Total IL code size
+size_t grossNCsize; // Native code + data size
+size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER)
+size_t gcHeaderISize; // GC header size: interruptible methods
+size_t gcPtrMapISize; // GC pointer map size: interruptible methods
+size_t gcHeaderNSize; // GC header size: non-interruptible methods
+size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods
#endif // DISPLAY_SIZES
@@ -245,40 +256,40 @@ size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods
#if CALL_ARG_STATS
-unsigned argTotalCalls;
-unsigned argHelperCalls;
-unsigned argStaticCalls;
-unsigned argNonVirtualCalls;
-unsigned argVirtualCalls;
+unsigned argTotalCalls;
+unsigned argHelperCalls;
+unsigned argStaticCalls;
+unsigned argNonVirtualCalls;
+unsigned argVirtualCalls;
-unsigned argTotalArgs; // total number of args for all calls (including objectPtr)
-unsigned argTotalDWordArgs;
-unsigned argTotalLongArgs;
-unsigned argTotalFloatArgs;
-unsigned argTotalDoubleArgs;
+unsigned argTotalArgs; // total number of args for all calls (including objectPtr)
+unsigned argTotalDWordArgs;
+unsigned argTotalLongArgs;
+unsigned argTotalFloatArgs;
+unsigned argTotalDoubleArgs;
-unsigned argTotalRegArgs;
-unsigned argTotalTemps;
-unsigned argTotalLclVar;
-unsigned argTotalDeferred;
-unsigned argTotalConst;
+unsigned argTotalRegArgs;
+unsigned argTotalTemps;
+unsigned argTotalLclVar;
+unsigned argTotalDeferred;
+unsigned argTotalConst;
-unsigned argTotalObjPtr;
-unsigned argTotalGTF_ASGinArgs;
+unsigned argTotalObjPtr;
+unsigned argTotalGTF_ASGinArgs;
-unsigned argMaxTempsPerMethod;
+unsigned argMaxTempsPerMethod;
-unsigned argCntBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 10, 0 };
-Histogram argCntTable(HostAllocator::getHostAllocator(), argCntBuckets);
+unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0};
+Histogram argCntTable(HostAllocator::getHostAllocator(), argCntBuckets);
-unsigned argDWordCntBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 10, 0 };
-Histogram argDWordCntTable(HostAllocator::getHostAllocator(), argDWordCntBuckets);
+unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0};
+Histogram argDWordCntTable(HostAllocator::getHostAllocator(), argDWordCntBuckets);
-unsigned argDWordLngCntBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 10, 0 };
-Histogram argDWordLngCntTable(HostAllocator::getHostAllocator(), argDWordLngCntBuckets);
+unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0};
+Histogram argDWordLngCntTable(HostAllocator::getHostAllocator(), argDWordLngCntBuckets);
-unsigned argTempsCntBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 10, 0 };
-Histogram argTempsCntTable(HostAllocator::getHostAllocator(), argTempsCntBuckets);
+unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0};
+Histogram argTempsCntTable(HostAllocator::getHostAllocator(), argTempsCntBuckets);
#endif // CALL_ARG_STATS
@@ -304,17 +315,16 @@ Histogram argTempsCntTable(HostAllocator::getHostAllocator(), argTempsCntBucke
// 1001 .. 10000 ===> 0 count (100% of total)
// --------------------------------------------------
-unsigned bbCntBuckets[] = { 1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0 };
-Histogram bbCntTable(HostAllocator::getHostAllocator(), bbCntBuckets);
+unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0};
+Histogram bbCntTable(HostAllocator::getHostAllocator(), bbCntBuckets);
/* Histogram for the IL opcode size of methods with a single basic block */
-unsigned bbSizeBuckets[] = { 1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0 };
-Histogram bbOneBBSizeTable(HostAllocator::getHostAllocator(), bbSizeBuckets);
+unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0};
+Histogram bbOneBBSizeTable(HostAllocator::getHostAllocator(), bbSizeBuckets);
#endif // COUNT_BASIC_BLOCKS
-
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
@@ -326,28 +336,28 @@ Histogram bbOneBBSizeTable(HostAllocator::getHostAllocator(), bbSizeBuckets);
#if COUNT_LOOPS
-unsigned totalLoopMethods; // counts the total number of methods that have natural loops
-unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
-unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
-unsigned totalLoopCount; // counts the total number of natural loops
-unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
-unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
-unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
-unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const)
-unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
-bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
-unsigned loopsThisMethod; // counts the number of loops in the current method
-bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
+unsigned totalLoopMethods; // counts the total number of methods that have natural loops
+unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
+unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
+unsigned totalLoopCount; // counts the total number of natural loops
+unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
+unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
+unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
+unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const)
+unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
+bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
+unsigned loopsThisMethod; // counts the number of loops in the current method
+bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
/* Histogram for number of loops in a method */
-unsigned loopCountBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0 };
-Histogram loopCountTable(HostAllocator::getHostAllocator(), loopCountBuckets);
+unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0};
+Histogram loopCountTable(HostAllocator::getHostAllocator(), loopCountBuckets);
/* Histogram for number of loop exits */
-unsigned loopExitCountBuckets[] = { 0, 1, 2, 3, 4, 5, 6, 0 };
-Histogram loopExitCountTable(HostAllocator::getHostAllocator(), loopExitCountBuckets);
+unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0};
+Histogram loopExitCountTable(HostAllocator::getHostAllocator(), loopExitCountBuckets);
#endif // COUNT_LOOPS
@@ -355,7 +365,7 @@ Histogram loopExitCountTable(HostAllocator::getHostAllocator(), loopExitCountB
// getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types
//
// Arguments:
-// gcType - an enum value that originally came from an element
+// gcType - an enum value that originally came from an element
// of the BYTE[] returned from getClassGClayout()
//
// Return Value:
@@ -368,9 +378,9 @@ Histogram loopExitCountTable(HostAllocator::getHostAllocator(), loopExitCountB
// the JIT will often pass the address of a single BYTE, instead of a BYTE[]
//
-var_types Compiler::getJitGCType(BYTE gcType)
+var_types Compiler::getJitGCType(BYTE gcType)
{
- var_types result = TYP_UNKNOWN;
+ var_types result = TYP_UNKNOWN;
CorInfoGCType corInfoType = (CorInfoGCType)gcType;
if (corInfoType == TYPE_GC_NONE)
@@ -395,19 +405,19 @@ var_types Compiler::getJitGCType(BYTE gcType)
#if FEATURE_MULTIREG_ARGS
//---------------------------------------------------------------------------
// getStructGcPtrsFromOp: Given a GenTree node of TYP_STRUCT that represents
-// a pass by value argument, return the gcPtr layout
-// for the pointers sized fields
+// a pass by value argument, return the gcPtr layout
+// for the pointers sized fields
// Arguments:
// op - the operand of TYP_STRUCT that is passed by value
// gcPtrsOut - an array of BYTES that are written by this method
-// they will contain the VM's CorInfoGCType values
+// they will contain the VM's CorInfoGCType values
// for each pointer sized field
// Return Value:
// Two [or more] values are written into the gcPtrs array
//
// Note that for ARM64 there will alwys be exactly two pointer sized fields
-void Compiler::getStructGcPtrsFromOp(GenTreePtr op, BYTE *gcPtrsOut)
+void Compiler::getStructGcPtrsFromOp(GenTreePtr op, BYTE* gcPtrsOut)
{
assert(op->TypeGet() == TYP_STRUCT);
@@ -417,7 +427,7 @@ void Compiler::getStructGcPtrsFromOp(GenTreePtr op, BYTE *gcPtrsOut)
CORINFO_CLASS_HANDLE objClass = op->gtObj.gtClass;
int structSize = info.compCompHnd->getClassSize(objClass);
- assert(structSize <= 2*TARGET_POINTER_SIZE);
+ assert(structSize <= 2 * TARGET_POINTER_SIZE);
BYTE gcPtrsTmp[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
@@ -429,7 +439,7 @@ void Compiler::getStructGcPtrsFromOp(GenTreePtr op, BYTE *gcPtrsOut)
else if (op->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = op->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum;
+ unsigned varNum = varNode->gtLclNum;
assert(varNum < lvaCount);
LclVarDsc* varDsc = &lvaTable[varNum];
@@ -465,44 +475,43 @@ bool Compiler::isSingleFloat32Struct(CORINFO_CLASS_HANDLE clsHnd)
for (;;)
{
// all of class chain must be of value type and must have only one field
- if (!info.compCompHnd->isValueClass(clsHnd) &&
- info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1)
+ if (!info.compCompHnd->isValueClass(clsHnd) && info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1)
{
return false;
}
- CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd;
- CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
- CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd);
+ CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd;
+ CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
+ CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd);
switch (fieldType)
{
- case CORINFO_TYPE_VALUECLASS:
- clsHnd = *pClsHnd;
- break;
+ case CORINFO_TYPE_VALUECLASS:
+ clsHnd = *pClsHnd;
+ break;
- case CORINFO_TYPE_FLOAT:
- return true;
+ case CORINFO_TYPE_FLOAT:
+ return true;
- default:
- return false;
+ default:
+ return false;
}
}
}
#endif // ARM_SOFTFP
//-----------------------------------------------------------------------------
-// getPrimitiveTypeForStruct:
-// Get the "primitive" type that is is used for a struct
-// of size 'structSize'.
-// We examine 'clsHnd' to check the GC layout of the struct and
+// getPrimitiveTypeForStruct:
+// Get the "primitive" type that is is used for a struct
+// of size 'structSize'.
+// We examine 'clsHnd' to check the GC layout of the struct and
// return TYP_REF for structs that simply wrap an object.
-// If the struct is a one element HFA, we will return the
+// If the struct is a one element HFA, we will return the
// proper floating point type.
//
// Arguments:
// structSize - the size of the struct type, cannot be zero
-// clsHnd - the handle for the struct type, used when may have
+// clsHnd - the handle for the struct type, used when may have
// an HFA or if we need the GC layout for an object ref.
//
// Return Value:
@@ -510,15 +519,14 @@ bool Compiler::isSingleFloat32Struct(CORINFO_CLASS_HANDLE clsHnd)
// used to pass or return structs of this size.
// If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
// Notes:
-// For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not
+// For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not
// considered a primitive type by this method.
// So a struct that wraps a 'long' is passed and returned in the
// same way as any other 8-byte struct
// For ARM32 if we have an HFA struct that wraps a 64-bit double
// we will return TYP_DOUBLE.
//
-var_types Compiler::getPrimitiveTypeForStruct( unsigned structSize,
- CORINFO_CLASS_HANDLE clsHnd)
+var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd)
{
assert(structSize != 0);
@@ -526,140 +534,139 @@ var_types Compiler::getPrimitiveTypeForStruct( unsigned structSize,
switch (structSize)
{
- case 1:
- useType = TYP_BYTE;
- break;
+ case 1:
+ useType = TYP_BYTE;
+ break;
- case 2:
- useType = TYP_SHORT;
- break;
+ case 2:
+ useType = TYP_SHORT;
+ break;
-#ifndef _TARGET_XARCH_
- case 3:
- useType = TYP_INT;
- break;
+#ifndef _TARGET_XARCH_
+ case 3:
+ useType = TYP_INT;
+ break;
#endif // _TARGET_XARCH_
#ifdef _TARGET_64BIT_
- case 4:
- if (IsHfa(clsHnd))
- {
- // A structSize of 4 with IsHfa, it must be an HFA of one float
- useType = TYP_FLOAT;
- }
- else
- {
- useType = TYP_INT;
- }
- break;
+ case 4:
+ if (IsHfa(clsHnd))
+ {
+ // A structSize of 4 with IsHfa, it must be an HFA of one float
+ useType = TYP_FLOAT;
+ }
+ else
+ {
+ useType = TYP_INT;
+ }
+ break;
#ifndef _TARGET_XARCH_
- case 5:
- case 6:
- case 7:
- useType = TYP_I_IMPL;
- break;
+ case 5:
+ case 6:
+ case 7:
+ useType = TYP_I_IMPL;
+ break;
#endif // _TARGET_XARCH_
#endif // _TARGET_64BIT_
-
- case TARGET_POINTER_SIZE:
+ case TARGET_POINTER_SIZE:
#ifdef ARM_SOFTFP
- // For ARM_SOFTFP, HFA is unsupported so we need to check in another way
- // This matters only for size-4 struct cause bigger structs would be processed with RetBuf
- if (isSingleFloat32Struct(clsHnd))
-#else // !ARM_SOFTFP
- if (IsHfa(clsHnd))
+ // For ARM_SOFTFP, HFA is unsupported so we need to check in another way
+ // This matters only for size-4 struct cause bigger structs would be processed with RetBuf
+ if (isSingleFloat32Struct(clsHnd))
+#else // !ARM_SOFTFP
+ if (IsHfa(clsHnd))
#endif // ARM_SOFTFP
- {
+ {
#ifdef _TARGET_64BIT_
- var_types hfaType = GetHfaType(clsHnd);
+ var_types hfaType = GetHfaType(clsHnd);
- // A structSize of 8 with IsHfa, we have two possiblities:
- // An HFA of one double or an HFA of two floats
- //
- // Check and exclude the case of an HFA of two floats
- if (hfaType == TYP_DOUBLE)
- {
- // We have an HFA of one double
- useType = TYP_DOUBLE;
- }
- else
- {
- assert(hfaType == TYP_FLOAT);
+ // A structSize of 8 with IsHfa, we have two possiblities:
+ // An HFA of one double or an HFA of two floats
+ //
+ // Check and exclude the case of an HFA of two floats
+ if (hfaType == TYP_DOUBLE)
+ {
+ // We have an HFA of one double
+ useType = TYP_DOUBLE;
+ }
+ else
+ {
+ assert(hfaType == TYP_FLOAT);
- // We have an HFA of two floats
- // This should be passed or returned in two FP registers
- useType = TYP_UNKNOWN;
- }
+ // We have an HFA of two floats
+ // This should be passed or returned in two FP registers
+ useType = TYP_UNKNOWN;
+ }
#else // a 32BIT target
- // A structSize of 4 with IsHfa, it must be an HFA of one float
- useType = TYP_FLOAT;
+ // A structSize of 4 with IsHfa, it must be an HFA of one float
+ useType = TYP_FLOAT;
#endif // _TARGET_64BIT_
- }
- else
- {
- BYTE gcPtr = 0;
- // Check if this pointer-sized struct is wrapping a GC object
- info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
- useType = getJitGCType(gcPtr);
- }
- break;
+ }
+ else
+ {
+ BYTE gcPtr = 0;
+ // Check if this pointer-sized struct is wrapping a GC object
+ info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
+ useType = getJitGCType(gcPtr);
+ }
+ break;
#ifdef _TARGET_ARM_
- case 8:
- if (IsHfa(clsHnd))
- {
- var_types hfaType = GetHfaType(clsHnd);
-
- // A structSize of 8 with IsHfa, we have two possiblities:
- // An HFA of one double or an HFA of two floats
- //
- // Check and exclude the case of an HFA of two floats
- if (hfaType == TYP_DOUBLE)
- {
- // We have an HFA of one double
- useType = TYP_DOUBLE;
- }
- else
+ case 8:
+ if (IsHfa(clsHnd))
{
- assert(hfaType == TYP_FLOAT);
+ var_types hfaType = GetHfaType(clsHnd);
+
+ // A structSize of 8 with IsHfa, we have two possiblities:
+ // An HFA of one double or an HFA of two floats
+ //
+ // Check and exclude the case of an HFA of two floats
+ if (hfaType == TYP_DOUBLE)
+ {
+ // We have an HFA of one double
+ useType = TYP_DOUBLE;
+ }
+ else
+ {
+ assert(hfaType == TYP_FLOAT);
- // We have an HFA of two floats
- // This should be passed or returned in two FP registers
+ // We have an HFA of two floats
+ // This should be passed or returned in two FP registers
+ useType = TYP_UNKNOWN;
+ }
+ }
+ else
+ {
+ // We don't have an HFA
useType = TYP_UNKNOWN;
}
- }
- else
- {
- // We don't have an HFA
- useType = TYP_UNKNOWN;
- }
- break;
+ break;
#endif // _TARGET_ARM_
- default:
- useType = TYP_UNKNOWN;
- break;
+ default:
+ useType = TYP_UNKNOWN;
+ break;
}
return useType;
}
//-----------------------------------------------------------------------------
-// getArgTypeForStruct:
+// getArgTypeForStruct:
// Get the type that is used to pass values of the given struct type.
-// If you have already retrieved the struct size then it should be
+// If you have already retrieved the struct size then it should be
// passed as the optional third argument, as this allows us to avoid
// an extra call to getClassSize(clsHnd)
//
// Arguments:
// clsHnd - the handle for the struct type
-// wbPassStruct - An "out" argument with information about how
+// wbPassStruct - An "out" argument with information about how
// the struct is to be passed
-// structSize - the size of the struct type,
+// structSize - the size of the struct type,
// or zero if we should call getClassSize(clsHnd)
//
// Return Value:
@@ -670,7 +677,7 @@ var_types Compiler::getPrimitiveTypeForStruct( unsigned structSize,
// When *wbPassStruct is SPK_ByReference this method's return value
// is always TYP_UNKNOWN and the struct type is passed by reference to a copy
// When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value
-// is always TYP_STRUCT and the struct type is passed by value either
+// is always TYP_STRUCT and the struct type is passed by value either
// using multiple registers or on the stack.
//
// Assumptions:
@@ -681,15 +688,15 @@ var_types Compiler::getPrimitiveTypeForStruct( unsigned structSize,
// About HFA types:
// When the clsHnd is a one element HFA type we return the appropriate
// floating point primitive type and *wbPassStruct is SPK_PrimitiveType
-// If there are two or more elements in the HFA type then the this method's
+// If there are two or more elements in the HFA type then the this method's
// return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa
//
-var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbPassStruct,
- unsigned structSize /* = 0 */)
+var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ structPassingKind* wbPassStruct,
+ unsigned structSize /* = 0 */)
{
- var_types useType = TYP_UNKNOWN;
- structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return
+ var_types useType = TYP_UNKNOWN;
+ structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return
if (structSize == 0)
{
@@ -717,10 +724,10 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// On x86 we never pass structs as primitive types (unless the VM unwraps them for us)
useType = TYP_UNKNOWN;
-#else // all other targets
+#else // all other targets
// The largest primitive type is 8 bytes (TYP_DOUBLE)
- // so we can skip calling getPrimitiveTypeForStruct when we
+ // so we can skip calling getPrimitiveTypeForStruct when we
// have a struct that is larger than that.
//
if (structSize <= sizeof(double))
@@ -739,7 +746,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// Yes, we should use the "primitive" type in 'useType'
howToPassStruct = SPK_PrimitiveType;
}
- else // We can't replace the struct with a "primitive" type
+ else // We can't replace the struct with a "primitive" type
{
// See if we can pass this struct by value, possibly in multiple registers
// or if we should pass it by reference to a copy
@@ -756,9 +763,9 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// using multiple registers
// (when all of the parameters registers are used, then the stack will be used)
howToPassStruct = SPK_ByValueAsHfa;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
}
- else // Not an HFA struct type
+ else // Not an HFA struct type
{
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -769,7 +776,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is passed by value in multiple registers
// (when all of the parameters registers are used, then the stack will be used)
howToPassStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
}
else
{
@@ -778,10 +785,10 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is passed using one register
// (by reference to a copy)
howToPassStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
}
-#elif defined(_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
// Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct
assert(structSize > TARGET_POINTER_SIZE);
@@ -793,7 +800,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is passed by value in multiple registers
// (when all of the parameters registers are used, then the stack will be used)
howToPassStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
}
else // a structSize that is 17-32 bytes in size
{
@@ -801,25 +808,24 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is passed using one register
// (by reference to a copy)
howToPassStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
}
-#elif defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+#elif defined(_TARGET_X86_) || defined(_TARGET_ARM_)
- // Otherwise we pass this struct by value on the stack
+ // Otherwise we pass this struct by value on the stack
// setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI
howToPassStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
-#else // _TARGET_XXX_
+#else // _TARGET_XXX_
noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)");
-#endif // _TARGET_XXX_
-
+#endif // _TARGET_XXX_
}
}
- else // (structSize > MAX_PASS_MULTIREG_BYTES)
+ else // (structSize > MAX_PASS_MULTIREG_BYTES)
{
// We have a (large) struct that can't be replaced with a "primitive" type
// and can't be passed in multiple registers
@@ -827,24 +833,23 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
- // Otherwise we pass this struct by value on the stack
+ // Otherwise we pass this struct by value on the stack
// setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI
howToPassStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
// Otherwise we pass this struct by reference to a copy
// setup wbPassType and useType indicate that this is passed using one register (by reference to a copy)
howToPassStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
-#else // _TARGET_XXX_
+#else // _TARGET_XXX_
noway_assert(!"Unhandled TARGET in getArgTypeForStruct");
-#endif // _TARGET_XXX_
-
+#endif // _TARGET_XXX_
}
}
@@ -858,9 +863,9 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
}
//-----------------------------------------------------------------------------
-// getReturnTypeForStruct:
+// getReturnTypeForStruct:
// Get the type that is used to return values of the given struct type.
-// If you have already retrieved the struct size then it should be
+// If you have already retrieved the struct size then it should be
// passed as the optional third argument, as this allows us to avoid
// an extra call to getClassSize(clsHnd)
//
@@ -868,7 +873,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// clsHnd - the handle for the struct type
// wbReturnStruct - An "out" argument with information about how
// the struct is to be returned
-// structSize - the size of the struct type,
+// structSize - the size of the struct type,
// or zero if we should call getClassSize(clsHnd)
//
// Return Value:
@@ -888,7 +893,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// Notes:
// About HFA types:
// When the clsHnd is a one element HFA type then this method's return
-// value is the appropriate floating point primitive type and
+// value is the appropriate floating point primitive type and
// *wbReturnStruct is SPK_PrimitiveType.
// If there are two or more elements in the HFA type and the target supports
// multireg return types then the return value is TYP_STRUCT and
@@ -897,15 +902,15 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// the target doesn't support multreg return types then it is treated
// as if it wasn't an HFA type.
// About returning TYP_STRUCT:
-// Whenever this method's return value is TYP_STRUCT it always means
+// Whenever this method's return value is TYP_STRUCT it always means
// that multiple registers are used to return this struct.
//
-var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbReturnStruct,
- unsigned structSize /* = 0 */)
+var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ structPassingKind* wbReturnStruct,
+ unsigned structSize /* = 0 */)
{
- var_types useType = TYP_UNKNOWN;
- structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return
+ var_types useType = TYP_UNKNOWN;
+ structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return
assert(clsHnd != NO_CLASS_HANDLE);
@@ -934,7 +939,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
#else // not UNIX_AMD64
// The largest primitive type is 8 bytes (TYP_DOUBLE)
- // so we can skip calling getPrimitiveTypeForStruct when we
+ // so we can skip calling getPrimitiveTypeForStruct when we
// have a struct that is larger than that.
//
if (structSize <= sizeof(double))
@@ -945,7 +950,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-
+
#ifdef _TARGET_64BIT_
// Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled
//
@@ -953,13 +958,10 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// because when HFA are enabled, normally we would use two FP registers to pass or return it
//
// But if we don't have support for multiple register return types, we have to change this.
- // Since we what we have an 8-byte struct (float + float) we change useType to TYP_I_IMPL
+ // Since we what we have an 8-byte struct (float + float) we change useType to TYP_I_IMPL
// so that the struct is returned instead using an 8-byte integer register.
//
- if ((FEATURE_MULTIREG_RET == 0) &&
- (useType == TYP_UNKNOWN) &&
- (structSize == (2 * sizeof(float))) &&
- IsHfa(clsHnd) )
+ if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd))
{
useType = TYP_I_IMPL;
}
@@ -972,7 +974,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// Yes, we should use the "primitive" type in 'useType'
howToReturnStruct = SPK_PrimitiveType;
}
- else // We can't replace the struct with a "primitive" type
+ else // We can't replace the struct with a "primitive" type
{
// See if we can return this struct by value, possibly in multiple registers
// or if we should return it using a return buffer register
@@ -988,9 +990,9 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is returned by value as an HFA
// using multiple registers
howToReturnStruct = SPK_ByValueAsHfa;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
}
- else // Not an HFA struct type
+ else // Not an HFA struct type
{
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -1000,7 +1002,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
{
// setup wbPassType and useType indicate that this is returned by value in multiple registers
howToReturnStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
assert(structDesc.passedInRegisters == true);
}
else
@@ -1010,11 +1012,11 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is return using a return buffer register
// (reference to a return buffer)
howToReturnStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
assert(structDesc.passedInRegisters == false);
}
-#elif defined(_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
// Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct
assert(structSize > TARGET_POINTER_SIZE);
@@ -1025,7 +1027,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
{
// setup wbPassType and useType indicate that this is return by value in multiple registers
howToReturnStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ useType = TYP_STRUCT;
}
else // a structSize that is 17-32 bytes in size
{
@@ -1033,26 +1035,25 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is returned using a return buffer register
// (reference to a return buffer)
howToReturnStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
}
-#elif defined(_TARGET_ARM_) || defined(_TARGET_X86_)
+#elif defined(_TARGET_ARM_) || defined(_TARGET_X86_)
// Otherwise we return this struct using a return buffer
// setup wbPassType and useType indicate that this is returned using a return buffer register
// (reference to a return buffer)
howToReturnStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
-#else // _TARGET_XXX_
+#else // _TARGET_XXX_
noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)");
-#endif // _TARGET_XXX_
-
+#endif // _TARGET_XXX_
}
}
- else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0)
+ else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0)
{
// We have a (large) struct that can't be replaced with a "primitive" type
// and can't be returned in multiple registers
@@ -1061,7 +1062,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// setup wbPassType and useType indicate that this is returned using a return buffer register
// (reference to a return buffer)
howToReturnStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
+ useType = TYP_UNKNOWN;
}
}
@@ -1080,41 +1081,38 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
#if DATAFLOW_ITER
-unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
-unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
+unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
+unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
-
-#if MEASURE_BLOCK_SIZE
-size_t genFlowNodeSize;
-size_t genFlowNodeCnt;
+#if MEASURE_BLOCK_SIZE
+size_t genFlowNodeSize;
+size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
-
/*****************************************************************************/
// We keep track of methods we've already compiled.
-
/*****************************************************************************
* Declare the statics
*/
#ifdef DEBUG
/* static */
-unsigned Compiler::s_compMethodsCount = 0; // to produce unique label names
+unsigned Compiler::s_compMethodsCount = 0; // to produce unique label names
/* static */
-bool Compiler::s_dspMemStats = false;
+bool Compiler::s_dspMemStats = false;
#endif
#ifndef DEBUGGING_SUPPORT
/* static */
-const bool Compiler::Options::compDbgCode = false;
+const bool Compiler::Options::compDbgCode = false;
#endif
#ifndef PROFILING_SUPPORTED
-const bool Compiler::Options::compNoPInvokeInlineCB = false;
+const bool Compiler::Options::compNoPInvokeInlineCB = false;
#endif
/*****************************************************************************
@@ -1123,12 +1121,10 @@ const bool Compiler::Options::compNoPInvokeInlineCB = false;
*/
/* static */
-void Compiler::compStartup()
+void Compiler::compStartup()
{
#if DISPLAY_SIZES
- grossVMsize =
- grossNCsize =
- totalNCsize = 0;
+ grossVMsize = grossNCsize = totalNCsize = 0;
#endif // DISPLAY_SIZES
// Initialize the JIT's allocator.
@@ -1160,12 +1156,12 @@ void Compiler::compStartup()
*/
/* static */
-void Compiler::compShutdown()
+void Compiler::compShutdown()
{
#ifdef ALT_JIT
if (s_pAltJitExcludeAssembliesList != nullptr)
{
- s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor
+ s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor
s_pAltJitExcludeAssembliesList = nullptr;
}
#endif // ALT_JIT
@@ -1182,7 +1178,7 @@ void Compiler::compShutdown()
#endif // defined(DEBUG) || defined(INLINE_DATA)
#if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS
- if (genMethodCnt == 0)
+ if (genMethodCnt == 0)
{
return;
}
@@ -1204,77 +1200,51 @@ void Compiler::compShutdown()
#endif // FEATURE_JIT_METHOD_PERF
#if FUNC_INFO_LOGGING
- if (compJitFuncInfoFile != NULL)
+ if (compJitFuncInfoFile != nullptr)
{
fclose(compJitFuncInfoFile);
- compJitFuncInfoFile = NULL;
+ compJitFuncInfoFile = nullptr;
}
#endif // FUNC_INFO_LOGGING
#if COUNT_RANGECHECKS
- if (optRangeChkAll > 0)
+ if (optRangeChkAll > 0)
{
- fprintf(fout,
- "Removed %u of %u range checks\n",
- optRangeChkRmv,
- optRangeChkAll);
+ fprintf(fout, "Removed %u of %u range checks\n", optRangeChkRmv, optRangeChkAll);
}
#endif // COUNT_RANGECHECKS
-#if DISPLAY_SIZES
+#if DISPLAY_SIZES
- if (grossVMsize && grossNCsize)
+ if (grossVMsize && grossNCsize)
{
fprintf(fout, "\n");
fprintf(fout, "--------------------------------------\n");
fprintf(fout, "Function and GC info size stats\n");
fprintf(fout, "--------------------------------------\n");
- fprintf(fout,
- "[%7u VM, %8u %6s %4u%%] %s\n",
- grossVMsize,
- grossNCsize,
- Target::g_tgtCPUName,
- 100 * grossNCsize / grossVMsize,
- "Total (excluding GC info)");
-
+ fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, grossNCsize, Target::g_tgtCPUName,
+ 100 * grossNCsize / grossVMsize, "Total (excluding GC info)");
- fprintf(fout,
- "[%7u VM, %8u %6s %4u%%] %s\n",
- grossVMsize,
- totalNCsize,
- Target::g_tgtCPUName,
- 100 * totalNCsize / grossVMsize,
- "Total (including GC info)");
+ fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n", grossVMsize, totalNCsize, Target::g_tgtCPUName,
+ 100 * totalNCsize / grossVMsize, "Total (including GC info)");
- if (gcHeaderISize || gcHeaderNSize)
+ if (gcHeaderISize || gcHeaderNSize)
{
fprintf(fout, "\n");
- fprintf(fout,
- "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n",
- gcHeaderISize + gcPtrMapISize,
- gcHeaderNSize + gcPtrMapNSize,
- totalNCsize - grossNCsize,
- 100 * (totalNCsize - grossNCsize) / grossVMsize,
- 100 * (totalNCsize - grossNCsize) / grossNCsize,
+ fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n",
+ gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize,
+ 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize,
Target::g_tgtCPUName);
- fprintf(fout,
- "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n",
- gcHeaderISize,
- gcHeaderNSize,
- gcHeaderISize + gcHeaderNSize,
- (float)gcHeaderISize / (genMethodICnt + 0.001),
+ fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcHeaderISize,
+ gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001),
(float)gcHeaderNSize / (genMethodNCnt + 0.001),
(float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt);
- fprintf(fout,
- "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n",
- gcPtrMapISize,
- gcPtrMapNSize,
- gcPtrMapISize + gcPtrMapNSize,
- (float)gcPtrMapISize / (genMethodICnt + 0.001),
+ fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n", gcPtrMapISize,
+ gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001),
(float)gcPtrMapNSize / (genMethodNCnt + 0.001),
(float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt);
}
@@ -1282,27 +1252,20 @@ void Compiler::compShutdown()
{
fprintf(fout, "\n");
- fprintf(fout,
- "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n",
- totalNCsize - grossNCsize,
- 100 * (totalNCsize - grossNCsize) / grossVMsize,
- 100 * (totalNCsize - grossNCsize) / grossNCsize,
- Target::g_tgtCPUName);
+ fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n",
+ totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize,
+ 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName);
}
-
-#ifdef DEBUG
-#if DOUBLE_ALIGN
- fprintf(fout,
- "%u out of %u methods generated with double-aligned stack\n",
- Compiler::s_lvaDoubleAlignedProcsCount,
- genMethodCnt);
+#ifdef DEBUG
+#if DOUBLE_ALIGN
+ fprintf(fout, "%u out of %u methods generated with double-aligned stack\n",
+ Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt);
#endif
#endif
-
}
-#endif // DISPLAY_SIZES
+#endif // DISPLAY_SIZES
#if CALL_ARG_STATS
compDispCallArgStats(fout);
@@ -1324,18 +1287,17 @@ void Compiler::compShutdown()
fprintf(fout, "--------------------------------------------------\n");
#endif // COUNT_BASIC_BLOCKS
-
#if COUNT_LOOPS
fprintf(fout, "\n");
fprintf(fout, "---------------------------------------------------\n");
fprintf(fout, "Loop stats\n");
fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods);
- fprintf(fout, "Total number of loops is %5u\n", totalLoopCount);
- fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod);
- fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows);
- fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount);
+ fprintf(fout, "Total number of methods with loops is %5u\n", totalLoopMethods);
+ fprintf(fout, "Total number of loops is %5u\n", totalLoopCount);
+ fprintf(fout, "Maximum number of loops per method is %5u\n", maxLoopsPerMethod);
+ fprintf(fout, "# of methods overflowing nat loop table is %5u\n", totalLoopOverflows);
+ fprintf(fout, "Total number of 'unnatural' loops is %5u\n", totalUnnatLoopCount);
fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n", totalUnnatLoopOverflows);
fprintf(fout, "Total number of loops with an iterator is %5u\n", iterLoopCount);
fprintf(fout, "Total number of loops with a simple iterator is %5u\n", simpleTestLoopCount);
@@ -1361,23 +1323,21 @@ void Compiler::compShutdown()
#endif // DATAFLOW_ITER
-#if MEASURE_NODE_SIZE
+#if MEASURE_NODE_SIZE
fprintf(fout, "\n");
fprintf(fout, "---------------------------------------------------\n");
fprintf(fout, "GenTree node allocation stats\n");
fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout,
- "Allocated %6u tree nodes (%7u bytes total, avg %4u bytes per method)\n",
- genNodeSizeStats.genTreeNodeCnt,
- genNodeSizeStats.genTreeNodeSize,
+ fprintf(fout, "Allocated %6u tree nodes (%7u bytes total, avg %4u bytes per method)\n",
+ genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize,
genNodeSizeStats.genTreeNodeSize / genMethodCnt);
- fprintf(fout,
- "Allocated %7u bytes of unused tree node space (%3.2f%%)\n",
+ fprintf(fout, "Allocated %7u bytes of unused tree node space (%3.2f%%)\n",
genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize,
- (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / genNodeSizeStats.genTreeNodeSize);
+ (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) /
+ genNodeSizeStats.genTreeNodeSize);
fprintf(fout, "\n");
fprintf(fout, "---------------------------------------------------\n");
@@ -1391,23 +1351,17 @@ void Compiler::compShutdown()
#endif // MEASURE_NODE_SIZE
-#if MEASURE_BLOCK_SIZE
+#if MEASURE_BLOCK_SIZE
fprintf(fout, "\n");
fprintf(fout, "---------------------------------------------------\n");
fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n");
fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout,
- "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n",
- BasicBlock::s_Count,
- BasicBlock::s_Size,
- BasicBlock::s_Size / genMethodCnt);
- fprintf(fout,
- "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n",
- genFlowNodeCnt,
- genFlowNodeSize,
- genFlowNodeSize / genMethodCnt);
+ fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n", BasicBlock::s_Count,
+ BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt);
+ fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n", genFlowNodeCnt,
+ genFlowNodeSize, genFlowNodeSize / genMethodCnt);
#endif // MEASURE_BLOCK_SIZE
@@ -1444,26 +1398,22 @@ void Compiler::compShutdown()
fprintf(fout, "GC pointer table stats\n");
fprintf(fout, "---------------------------------------------------\n");
- fprintf(fout,
- "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n",
- GCInfo::s_gcRegPtrDscSize,
+ fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n", GCInfo::s_gcRegPtrDscSize,
GCInfo::s_gcRegPtrDscSize / genMethodCnt);
- fprintf(fout,
- "Total pointer table size: %8u (avg %4u per method)\n",
- GCInfo::s_gcTotalPtrTabSize,
+ fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n", GCInfo::s_gcTotalPtrTabSize,
GCInfo::s_gcTotalPtrTabSize / genMethodCnt);
#endif // MEASURE_PTRTAB_SIZE
#if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES
- if (genMethodCnt != 0)
+ if (genMethodCnt != 0)
{
fprintf(fout, "\n");
fprintf(fout, "A total of %6u methods compiled", genMethodCnt);
#if DISPLAY_SIZES
- if (genMethodICnt || genMethodNCnt)
+ if (genMethodICnt || genMethodNCnt)
{
fprintf(fout, " (%u interruptible, %u non-interruptible)", genMethodICnt, genMethodNCnt);
}
@@ -1498,10 +1448,10 @@ void Compiler::compShutdown()
*/
/* static */
-void Compiler::compDisplayStaticSizes(FILE* fout)
+void Compiler::compDisplayStaticSizes(FILE* fout)
{
-#if MEASURE_NODE_SIZE
+#if MEASURE_NODE_SIZE
/*
IMPORTANT: Use the following code to check the alignment of
GenTree members (in a retail build, of course).
@@ -1510,36 +1460,47 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
GenTree* gtDummy = nullptr;
fprintf(fout, "\n");
- fprintf(fout, "Offset / size of gtOper = %2u / %2u\n", offsetof(GenTree, gtOper ), sizeof(gtDummy->gtOper ));
- fprintf(fout, "Offset / size of gtType = %2u / %2u\n", offsetof(GenTree, gtType ), sizeof(gtDummy->gtType ));
+ fprintf(fout, "Offset / size of gtOper = %2u / %2u\n", offsetof(GenTree, gtOper), sizeof(gtDummy->gtOper));
+ fprintf(fout, "Offset / size of gtType = %2u / %2u\n", offsetof(GenTree, gtType), sizeof(gtDummy->gtType));
#if FEATURE_ANYCSE
- fprintf(fout, "Offset / size of gtCSEnum = %2u / %2u\n", offsetof(GenTree, gtCSEnum ), sizeof(gtDummy->gtCSEnum ));
+ fprintf(fout, "Offset / size of gtCSEnum = %2u / %2u\n", offsetof(GenTree, gtCSEnum),
+ sizeof(gtDummy->gtCSEnum));
#endif // FEATURE_ANYCSE
#if ASSERTION_PROP
- fprintf(fout, "Offset / size of gtAssertionNum = %2u / %2u\n", offsetof(GenTree, gtAssertionNum), sizeof(gtDummy->gtAssertionNum));
+ fprintf(fout, "Offset / size of gtAssertionNum = %2u / %2u\n", offsetof(GenTree, gtAssertionNum),
+ sizeof(gtDummy->gtAssertionNum));
#endif // ASSERTION_PROP
#if FEATURE_STACK_FP_X87
- fprintf(fout, "Offset / size of gtFPlvl = %2u / %2u\n", offsetof(GenTree, gtFPlvl ), sizeof(gtDummy->gtFPlvl ));
+ fprintf(fout, "Offset / size of gtFPlvl = %2u / %2u\n", offsetof(GenTree, gtFPlvl),
+ sizeof(gtDummy->gtFPlvl));
#endif // FEATURE_STACK_FP_X87
- // TODO: The section that report GenTree sizes should be made into a public static member function of the GenTree class (see https://github.com/dotnet/coreclr/pull/493)
- // fprintf(fout, "Offset / size of gtCostEx = %2u / %2u\n", offsetof(GenTree, _gtCostEx ), sizeof(gtDummy->_gtCostEx ));
- // fprintf(fout, "Offset / size of gtCostSz = %2u / %2u\n", offsetof(GenTree, _gtCostSz ), sizeof(gtDummy->_gtCostSz ));
- fprintf(fout, "Offset / size of gtFlags = %2u / %2u\n", offsetof(GenTree, gtFlags ), sizeof(gtDummy->gtFlags ));
- fprintf(fout, "Offset / size of gtVNPair = %2u / %2u\n", offsetof(GenTree, gtVNPair ), sizeof(gtDummy->gtVNPair ));
- fprintf(fout, "Offset / size of gtRsvdRegs = %2u / %2u\n", offsetof(GenTree, gtRsvdRegs ), sizeof(gtDummy->gtRsvdRegs ));
+ // TODO: The section that report GenTree sizes should be made into a public static member function of the GenTree
+ // class (see https://github.com/dotnet/coreclr/pull/493)
+ // fprintf(fout, "Offset / size of gtCostEx = %2u / %2u\n", offsetof(GenTree, _gtCostEx ),
+ // sizeof(gtDummy->_gtCostEx ));
+ // fprintf(fout, "Offset / size of gtCostSz = %2u / %2u\n", offsetof(GenTree, _gtCostSz ),
+ // sizeof(gtDummy->_gtCostSz ));
+ fprintf(fout, "Offset / size of gtFlags = %2u / %2u\n", offsetof(GenTree, gtFlags),
+ sizeof(gtDummy->gtFlags));
+ fprintf(fout, "Offset / size of gtVNPair = %2u / %2u\n", offsetof(GenTree, gtVNPair),
+ sizeof(gtDummy->gtVNPair));
+ fprintf(fout, "Offset / size of gtRsvdRegs = %2u / %2u\n", offsetof(GenTree, gtRsvdRegs),
+ sizeof(gtDummy->gtRsvdRegs));
#ifdef LEGACY_BACKEND
- fprintf(fout, "Offset / size of gtUsedRegs = %2u / %2u\n", offsetof(GenTree, gtUsedRegs ), sizeof(gtDummy->gtUsedRegs ));
+ fprintf(fout, "Offset / size of gtUsedRegs = %2u / %2u\n", offsetof(GenTree, gtUsedRegs),
+ sizeof(gtDummy->gtUsedRegs));
#endif // LEGACY_BACKEND
#ifndef LEGACY_BACKEND
- fprintf(fout, "Offset / size of gtLsraInfo = %2u / %2u\n", offsetof(GenTree, gtLsraInfo ), sizeof(gtDummy->gtLsraInfo ));
+ fprintf(fout, "Offset / size of gtLsraInfo = %2u / %2u\n", offsetof(GenTree, gtLsraInfo),
+ sizeof(gtDummy->gtLsraInfo));
#endif // !LEGACY_BACKEND
- fprintf(fout, "Offset / size of gtNext = %2u / %2u\n", offsetof(GenTree, gtNext ), sizeof(gtDummy->gtNext ));
- fprintf(fout, "Offset / size of gtPrev = %2u / %2u\n", offsetof(GenTree, gtPrev ), sizeof(gtDummy->gtPrev ));
+ fprintf(fout, "Offset / size of gtNext = %2u / %2u\n", offsetof(GenTree, gtNext), sizeof(gtDummy->gtNext));
+ fprintf(fout, "Offset / size of gtPrev = %2u / %2u\n", offsetof(GenTree, gtPrev), sizeof(gtDummy->gtPrev));
fprintf(fout, "\n");
-#if SMALL_TREE_NODES
+#if SMALL_TREE_NODES
fprintf(fout, "Small tree node size = %3u\n", TREE_NODE_SZ_SMALL);
-#endif // SMALL_TREE_NODES
+#endif // SMALL_TREE_NODES
fprintf(fout, "Large tree node size = %3u\n", TREE_NODE_SZ_LARGE);
fprintf(fout, "Size of GenTree = %3u\n", sizeof(GenTree));
fprintf(fout, "Size of GenTreeUnOp = %3u\n", sizeof(GenTreeUnOp));
@@ -1586,72 +1547,117 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
fprintf(fout, "\n");
#endif // MEASURE_NODE_SIZE
-#if MEASURE_BLOCK_SIZE
+#if MEASURE_BLOCK_SIZE
BasicBlock* bbDummy = nullptr;
fprintf(fout, "\n");
- fprintf(fout, "Offset / size of bbNext = %3u / %3u\n", offsetof(BasicBlock, bbNext ), sizeof(bbDummy->bbNext ));
- fprintf(fout, "Offset / size of bbNum = %3u / %3u\n", offsetof(BasicBlock, bbNum ), sizeof(bbDummy->bbNum ));
- fprintf(fout, "Offset / size of bbPostOrderNum = %3u / %3u\n", offsetof(BasicBlock, bbPostOrderNum ), sizeof(bbDummy->bbPostOrderNum ));
- fprintf(fout, "Offset / size of bbRefs = %3u / %3u\n", offsetof(BasicBlock, bbRefs ), sizeof(bbDummy->bbRefs ));
- fprintf(fout, "Offset / size of bbFlags = %3u / %3u\n", offsetof(BasicBlock, bbFlags ), sizeof(bbDummy->bbFlags ));
- fprintf(fout, "Offset / size of bbWeight = %3u / %3u\n", offsetof(BasicBlock, bbWeight ), sizeof(bbDummy->bbWeight ));
- fprintf(fout, "Offset / size of bbJumpKind = %3u / %3u\n", offsetof(BasicBlock, bbJumpKind ), sizeof(bbDummy->bbJumpKind ));
- fprintf(fout, "Offset / size of bbJumpOffs = %3u / %3u\n", offsetof(BasicBlock, bbJumpOffs ), sizeof(bbDummy->bbJumpOffs ));
- fprintf(fout, "Offset / size of bbJumpDest = %3u / %3u\n", offsetof(BasicBlock, bbJumpDest ), sizeof(bbDummy->bbJumpDest ));
- fprintf(fout, "Offset / size of bbJumpSwt = %3u / %3u\n", offsetof(BasicBlock, bbJumpSwt ), sizeof(bbDummy->bbJumpSwt ));
- fprintf(fout, "Offset / size of bbTreeList = %3u / %3u\n", offsetof(BasicBlock, bbTreeList ), sizeof(bbDummy->bbTreeList ));
- fprintf(fout, "Offset / size of bbEntryState = %3u / %3u\n", offsetof(BasicBlock, bbEntryState ), sizeof(bbDummy->bbEntryState ));
- fprintf(fout, "Offset / size of bbStkTempsIn = %3u / %3u\n", offsetof(BasicBlock, bbStkTempsIn ), sizeof(bbDummy->bbStkTempsIn ));
- fprintf(fout, "Offset / size of bbStkTempsOut = %3u / %3u\n", offsetof(BasicBlock, bbStkTempsOut ), sizeof(bbDummy->bbStkTempsOut ));
- fprintf(fout, "Offset / size of bbTryIndex = %3u / %3u\n", offsetof(BasicBlock, bbTryIndex ), sizeof(bbDummy->bbTryIndex ));
- fprintf(fout, "Offset / size of bbHndIndex = %3u / %3u\n", offsetof(BasicBlock, bbHndIndex ), sizeof(bbDummy->bbHndIndex ));
- fprintf(fout, "Offset / size of bbCatchTyp = %3u / %3u\n", offsetof(BasicBlock, bbCatchTyp ), sizeof(bbDummy->bbCatchTyp ));
- fprintf(fout, "Offset / size of bbStkDepth = %3u / %3u\n", offsetof(BasicBlock, bbStkDepth ), sizeof(bbDummy->bbStkDepth ));
- fprintf(fout, "Offset / size of bbFPinVars = %3u / %3u\n", offsetof(BasicBlock, bbFPinVars ), sizeof(bbDummy->bbFPinVars ));
- fprintf(fout, "Offset / size of bbPreds = %3u / %3u\n", offsetof(BasicBlock, bbPreds ), sizeof(bbDummy->bbPreds ));
- fprintf(fout, "Offset / size of bbReach = %3u / %3u\n", offsetof(BasicBlock, bbReach ), sizeof(bbDummy->bbReach ));
- fprintf(fout, "Offset / size of bbIDom = %3u / %3u\n", offsetof(BasicBlock, bbIDom ), sizeof(bbDummy->bbIDom ));
- fprintf(fout, "Offset / size of bbDfsNum = %3u / %3u\n", offsetof(BasicBlock, bbDfsNum ), sizeof(bbDummy->bbDfsNum ));
- fprintf(fout, "Offset / size of bbCodeOffs = %3u / %3u\n", offsetof(BasicBlock, bbCodeOffs ), sizeof(bbDummy->bbCodeOffs ));
- fprintf(fout, "Offset / size of bbCodeOffsEnd = %3u / %3u\n", offsetof(BasicBlock, bbCodeOffsEnd ), sizeof(bbDummy->bbCodeOffsEnd ));
- fprintf(fout, "Offset / size of bbVarUse = %3u / %3u\n", offsetof(BasicBlock, bbVarUse ), sizeof(bbDummy->bbVarUse ));
- fprintf(fout, "Offset / size of bbVarDef = %3u / %3u\n", offsetof(BasicBlock, bbVarDef ), sizeof(bbDummy->bbVarDef ));
- fprintf(fout, "Offset / size of bbVarTmp = %3u / %3u\n", offsetof(BasicBlock, bbVarTmp ), sizeof(bbDummy->bbVarTmp ));
- fprintf(fout, "Offset / size of bbLiveIn = %3u / %3u\n", offsetof(BasicBlock, bbLiveIn ), sizeof(bbDummy->bbLiveIn ));
- fprintf(fout, "Offset / size of bbLiveOut = %3u / %3u\n", offsetof(BasicBlock, bbLiveOut ), sizeof(bbDummy->bbLiveOut ));
- fprintf(fout, "Offset / size of bbHeapSsaPhiFunc = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaPhiFunc), sizeof(bbDummy->bbHeapSsaPhiFunc));
- fprintf(fout, "Offset / size of bbHeapSsaNumIn = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumIn ), sizeof(bbDummy->bbHeapSsaNumIn ));
- fprintf(fout, "Offset / size of bbHeapSsaNumOut = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumOut ), sizeof(bbDummy->bbHeapSsaNumOut ));
+ fprintf(fout, "Offset / size of bbNext = %3u / %3u\n", offsetof(BasicBlock, bbNext),
+ sizeof(bbDummy->bbNext));
+ fprintf(fout, "Offset / size of bbNum = %3u / %3u\n", offsetof(BasicBlock, bbNum),
+ sizeof(bbDummy->bbNum));
+ fprintf(fout, "Offset / size of bbPostOrderNum = %3u / %3u\n", offsetof(BasicBlock, bbPostOrderNum),
+ sizeof(bbDummy->bbPostOrderNum));
+ fprintf(fout, "Offset / size of bbRefs = %3u / %3u\n", offsetof(BasicBlock, bbRefs),
+ sizeof(bbDummy->bbRefs));
+ fprintf(fout, "Offset / size of bbFlags = %3u / %3u\n", offsetof(BasicBlock, bbFlags),
+ sizeof(bbDummy->bbFlags));
+ fprintf(fout, "Offset / size of bbWeight = %3u / %3u\n", offsetof(BasicBlock, bbWeight),
+ sizeof(bbDummy->bbWeight));
+ fprintf(fout, "Offset / size of bbJumpKind = %3u / %3u\n", offsetof(BasicBlock, bbJumpKind),
+ sizeof(bbDummy->bbJumpKind));
+ fprintf(fout, "Offset / size of bbJumpOffs = %3u / %3u\n", offsetof(BasicBlock, bbJumpOffs),
+ sizeof(bbDummy->bbJumpOffs));
+ fprintf(fout, "Offset / size of bbJumpDest = %3u / %3u\n", offsetof(BasicBlock, bbJumpDest),
+ sizeof(bbDummy->bbJumpDest));
+ fprintf(fout, "Offset / size of bbJumpSwt = %3u / %3u\n", offsetof(BasicBlock, bbJumpSwt),
+ sizeof(bbDummy->bbJumpSwt));
+ fprintf(fout, "Offset / size of bbTreeList = %3u / %3u\n", offsetof(BasicBlock, bbTreeList),
+ sizeof(bbDummy->bbTreeList));
+ fprintf(fout, "Offset / size of bbEntryState = %3u / %3u\n", offsetof(BasicBlock, bbEntryState),
+ sizeof(bbDummy->bbEntryState));
+ fprintf(fout, "Offset / size of bbStkTempsIn = %3u / %3u\n", offsetof(BasicBlock, bbStkTempsIn),
+ sizeof(bbDummy->bbStkTempsIn));
+ fprintf(fout, "Offset / size of bbStkTempsOut = %3u / %3u\n", offsetof(BasicBlock, bbStkTempsOut),
+ sizeof(bbDummy->bbStkTempsOut));
+ fprintf(fout, "Offset / size of bbTryIndex = %3u / %3u\n", offsetof(BasicBlock, bbTryIndex),
+ sizeof(bbDummy->bbTryIndex));
+ fprintf(fout, "Offset / size of bbHndIndex = %3u / %3u\n", offsetof(BasicBlock, bbHndIndex),
+ sizeof(bbDummy->bbHndIndex));
+ fprintf(fout, "Offset / size of bbCatchTyp = %3u / %3u\n", offsetof(BasicBlock, bbCatchTyp),
+ sizeof(bbDummy->bbCatchTyp));
+ fprintf(fout, "Offset / size of bbStkDepth = %3u / %3u\n", offsetof(BasicBlock, bbStkDepth),
+ sizeof(bbDummy->bbStkDepth));
+ fprintf(fout, "Offset / size of bbFPinVars = %3u / %3u\n", offsetof(BasicBlock, bbFPinVars),
+ sizeof(bbDummy->bbFPinVars));
+ fprintf(fout, "Offset / size of bbPreds = %3u / %3u\n", offsetof(BasicBlock, bbPreds),
+ sizeof(bbDummy->bbPreds));
+ fprintf(fout, "Offset / size of bbReach = %3u / %3u\n", offsetof(BasicBlock, bbReach),
+ sizeof(bbDummy->bbReach));
+ fprintf(fout, "Offset / size of bbIDom = %3u / %3u\n", offsetof(BasicBlock, bbIDom),
+ sizeof(bbDummy->bbIDom));
+ fprintf(fout, "Offset / size of bbDfsNum = %3u / %3u\n", offsetof(BasicBlock, bbDfsNum),
+ sizeof(bbDummy->bbDfsNum));
+ fprintf(fout, "Offset / size of bbCodeOffs = %3u / %3u\n", offsetof(BasicBlock, bbCodeOffs),
+ sizeof(bbDummy->bbCodeOffs));
+ fprintf(fout, "Offset / size of bbCodeOffsEnd = %3u / %3u\n", offsetof(BasicBlock, bbCodeOffsEnd),
+ sizeof(bbDummy->bbCodeOffsEnd));
+ fprintf(fout, "Offset / size of bbVarUse = %3u / %3u\n", offsetof(BasicBlock, bbVarUse),
+ sizeof(bbDummy->bbVarUse));
+ fprintf(fout, "Offset / size of bbVarDef = %3u / %3u\n", offsetof(BasicBlock, bbVarDef),
+ sizeof(bbDummy->bbVarDef));
+ fprintf(fout, "Offset / size of bbVarTmp = %3u / %3u\n", offsetof(BasicBlock, bbVarTmp),
+ sizeof(bbDummy->bbVarTmp));
+ fprintf(fout, "Offset / size of bbLiveIn = %3u / %3u\n", offsetof(BasicBlock, bbLiveIn),
+ sizeof(bbDummy->bbLiveIn));
+ fprintf(fout, "Offset / size of bbLiveOut = %3u / %3u\n", offsetof(BasicBlock, bbLiveOut),
+ sizeof(bbDummy->bbLiveOut));
+ fprintf(fout, "Offset / size of bbHeapSsaPhiFunc = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaPhiFunc),
+ sizeof(bbDummy->bbHeapSsaPhiFunc));
+ fprintf(fout, "Offset / size of bbHeapSsaNumIn = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumIn),
+ sizeof(bbDummy->bbHeapSsaNumIn));
+ fprintf(fout, "Offset / size of bbHeapSsaNumOut = %3u / %3u\n", offsetof(BasicBlock, bbHeapSsaNumOut),
+ sizeof(bbDummy->bbHeapSsaNumOut));
#ifdef DEBUGGING_SUPPORT
- fprintf(fout, "Offset / size of bbScope = %3u / %3u\n", offsetof(BasicBlock, bbScope ), sizeof(bbDummy->bbScope ));
+ fprintf(fout, "Offset / size of bbScope = %3u / %3u\n", offsetof(BasicBlock, bbScope),
+ sizeof(bbDummy->bbScope));
#endif // DEBUGGING_SUPPORT
- fprintf(fout, "Offset / size of bbCseGen = %3u / %3u\n", offsetof(BasicBlock, bbCseGen ), sizeof(bbDummy->bbCseGen ));
- fprintf(fout, "Offset / size of bbCseIn = %3u / %3u\n", offsetof(BasicBlock, bbCseIn ), sizeof(bbDummy->bbCseIn ));
- fprintf(fout, "Offset / size of bbCseOut = %3u / %3u\n", offsetof(BasicBlock, bbCseOut ), sizeof(bbDummy->bbCseOut ));
+ fprintf(fout, "Offset / size of bbCseGen = %3u / %3u\n", offsetof(BasicBlock, bbCseGen),
+ sizeof(bbDummy->bbCseGen));
+ fprintf(fout, "Offset / size of bbCseIn = %3u / %3u\n", offsetof(BasicBlock, bbCseIn),
+ sizeof(bbDummy->bbCseIn));
+ fprintf(fout, "Offset / size of bbCseOut = %3u / %3u\n", offsetof(BasicBlock, bbCseOut),
+ sizeof(bbDummy->bbCseOut));
- fprintf(fout, "Offset / size of bbEmitCookie = %3u / %3u\n", offsetof(BasicBlock, bbEmitCookie ), sizeof(bbDummy->bbEmitCookie ));
+ fprintf(fout, "Offset / size of bbEmitCookie = %3u / %3u\n", offsetof(BasicBlock, bbEmitCookie),
+ sizeof(bbDummy->bbEmitCookie));
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- fprintf(fout, "Offset / size of bbUnwindNopEmitCookie = %3u / %3u\n", offsetof(BasicBlock, bbUnwindNopEmitCookie), sizeof(bbDummy->bbUnwindNopEmitCookie));
+ fprintf(fout, "Offset / size of bbUnwindNopEmitCookie = %3u / %3u\n", offsetof(BasicBlock, bbUnwindNopEmitCookie),
+ sizeof(bbDummy->bbUnwindNopEmitCookie));
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
#ifdef VERIFIER
- fprintf(fout, "Offset / size of bbStackIn = %3u / %3u\n", offsetof(BasicBlock, bbStackIn ), sizeof(bbDummy->bbStackIn ));
- fprintf(fout, "Offset / size of bbStackOut = %3u / %3u\n", offsetof(BasicBlock, bbStackOut ), sizeof(bbDummy->bbStackOut ));
- fprintf(fout, "Offset / size of bbTypesIn = %3u / %3u\n", offsetof(BasicBlock, bbTypesIn ), sizeof(bbDummy->bbTypesIn ));
- fprintf(fout, "Offset / size of bbTypesOut = %3u / %3u\n", offsetof(BasicBlock, bbTypesOut ), sizeof(bbDummy->bbTypesOut ));
+ fprintf(fout, "Offset / size of bbStackIn = %3u / %3u\n", offsetof(BasicBlock, bbStackIn),
+ sizeof(bbDummy->bbStackIn));
+ fprintf(fout, "Offset / size of bbStackOut = %3u / %3u\n", offsetof(BasicBlock, bbStackOut),
+ sizeof(bbDummy->bbStackOut));
+ fprintf(fout, "Offset / size of bbTypesIn = %3u / %3u\n", offsetof(BasicBlock, bbTypesIn),
+ sizeof(bbDummy->bbTypesIn));
+ fprintf(fout, "Offset / size of bbTypesOut = %3u / %3u\n", offsetof(BasicBlock, bbTypesOut),
+ sizeof(bbDummy->bbTypesOut));
#endif // VERIFIER
#if FEATURE_STACK_FP_X87
- fprintf(fout, "Offset / size of bbFPStateX87 = %3u / %3u\n", offsetof(BasicBlock, bbFPStateX87 ), sizeof(bbDummy->bbFPStateX87 ));
+ fprintf(fout, "Offset / size of bbFPStateX87 = %3u / %3u\n", offsetof(BasicBlock, bbFPStateX87),
+ sizeof(bbDummy->bbFPStateX87));
#endif // FEATURE_STACK_FP_X87
#ifdef DEBUG
- fprintf(fout, "Offset / size of bbLoopNum = %3u / %3u\n", offsetof(BasicBlock, bbLoopNum ), sizeof(bbDummy->bbLoopNum ));
+ fprintf(fout, "Offset / size of bbLoopNum = %3u / %3u\n", offsetof(BasicBlock, bbLoopNum),
+ sizeof(bbDummy->bbLoopNum));
#endif // DEBUG
fprintf(fout, "\n");
@@ -1662,7 +1668,6 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
#if EMITTER_STATS
emitterStaticStats(fout);
#endif
-
}
/*****************************************************************************
@@ -1670,7 +1675,7 @@ void Compiler::compDisplayStaticSizes(FILE* fout)
* Constructor
*/
-void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inlineInfo)
+void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
{
assert(pAlloc);
compAllocator = pAlloc;
@@ -1679,7 +1684,7 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
InlineeCompiler = nullptr;
// Set the inline info.
- impInlineInfo = inlineInfo;
+ impInlineInfo = inlineInfo;
eeInfoInitialized = false;
@@ -1687,31 +1692,31 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
if (compIsForInlining())
{
- m_inlineStrategy = nullptr;
- compInlineResult = inlineInfo->inlineResult;
- compAsIAllocator = nullptr; // We shouldn't be using the compAsIAllocator for other than the root compiler.
+ m_inlineStrategy = nullptr;
+ compInlineResult = inlineInfo->inlineResult;
+ compAsIAllocator = nullptr; // We shouldn't be using the compAsIAllocator for other than the root compiler.
#if MEASURE_MEM_ALLOC
- compAsIAllocatorBitset = nullptr;
- compAsIAllocatorGC = nullptr;
- compAsIAllocatorLoopHoist = nullptr;
+ compAsIAllocatorBitset = nullptr;
+ compAsIAllocatorGC = nullptr;
+ compAsIAllocatorLoopHoist = nullptr;
#ifdef DEBUG
- compAsIAllocatorDebugOnly = nullptr;
+ compAsIAllocatorDebugOnly = nullptr;
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
- compQMarks = nullptr;
+ compQMarks = nullptr;
}
else
{
- m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this);
- compInlineResult = nullptr;
- compAsIAllocator = new (this, CMK_Unknown) CompAllocator(this, CMK_AsIAllocator);
+ m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this);
+ compInlineResult = nullptr;
+ compAsIAllocator = new (this, CMK_Unknown) CompAllocator(this, CMK_AsIAllocator);
#if MEASURE_MEM_ALLOC
- compAsIAllocatorBitset = new (this, CMK_Unknown) CompAllocator(this, CMK_bitset);
- compAsIAllocatorGC = new (this, CMK_Unknown) CompAllocator(this, CMK_GC);
- compAsIAllocatorLoopHoist = new (this, CMK_Unknown) CompAllocator(this, CMK_LoopHoist);
+ compAsIAllocatorBitset = new (this, CMK_Unknown) CompAllocator(this, CMK_bitset);
+ compAsIAllocatorGC = new (this, CMK_Unknown) CompAllocator(this, CMK_GC);
+ compAsIAllocatorLoopHoist = new (this, CMK_Unknown) CompAllocator(this, CMK_LoopHoist);
#ifdef DEBUG
- compAsIAllocatorDebugOnly = new (this, CMK_Unknown) CompAllocator(this, CMK_DebugOnly);
+ compAsIAllocatorDebugOnly = new (this, CMK_Unknown) CompAllocator(this, CMK_DebugOnly);
#endif // DEBUG
#endif // MEASURE_MEM_ALLOC
@@ -1725,10 +1730,10 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
// 2. NowayAsserts are tracked through telemetry.
// Note: JIT telemetry could gather data when compiler is not fully initialized.
// So you have to initialize the compiler variables you use for telemetry.
- assert((unsigned) PHASE_PRE_IMPORT == 0);
- previousCompletedPhase = PHASE_PRE_IMPORT;
- info.compILCodeSize = 0;
- info.compMethodHnd = nullptr;
+ assert((unsigned)PHASE_PRE_IMPORT == 0);
+ previousCompletedPhase = PHASE_PRE_IMPORT;
+ info.compILCodeSize = 0;
+ info.compMethodHnd = nullptr;
compJitTelemetry.Initialize(this);
#endif
@@ -1736,11 +1741,11 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
bRangeAllowStress = false;
#endif
- fgInit();
+ fgInit();
lvaInit();
if (!compIsForInlining())
- {
+ {
codeGen = getCodeGenerator(this);
#ifdef LEGACY_BACKEND
raInit();
@@ -1772,10 +1777,10 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
genMemStats.Init();
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
- m_loopsConsidered = 0;
+ m_loopsConsidered = 0;
m_curLoopHasHoistedExpression = false;
m_loopsWithHoistedExpressions = 0;
- m_totalHoistedExpressions = 0;
+ m_totalHoistedExpressions = 0;
#endif // LOOP_HOIST_STATS
#if MEASURE_NODE_SIZE
genNodeSizeStatsPerFunc.Init();
@@ -1795,34 +1800,34 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
compFloatingPointUsed = false;
compUnsafeCastUsed = false;
#if CPU_USES_BLOCK_MOVE
- compBlkOpUsed = false;
+ compBlkOpUsed = false;
#endif
#if FEATURE_STACK_FP_X87
compMayHaveTransitionBlocks = false;
#endif
compNeedsGSSecurityCookie = false;
- compGSReorderStackLayout = false;
+ compGSReorderStackLayout = false;
#if STACK_PROBES
- compStackProbePrologDone = false;
+ compStackProbePrologDone = false;
#endif
- compGeneratingProlog = false;
- compGeneratingEpilog = false;
+ compGeneratingProlog = false;
+ compGeneratingEpilog = false;
#ifndef LEGACY_BACKEND
- compLSRADone = false;
+ compLSRADone = false;
#endif // !LEGACY_BACKEND
- compRationalIRForm = false;
+ compRationalIRForm = false;
#ifdef DEBUG
- compCodeGenDone = false;
- compRegSetCheckLevel = 0;
- opts.compMinOptsIsUsed = false;
+ compCodeGenDone = false;
+ compRegSetCheckLevel = 0;
+ opts.compMinOptsIsUsed = false;
#endif
- opts.compMinOptsIsSet = false;
+ opts.compMinOptsIsSet = false;
- //Used by fgFindJumpTargets for inlining heuristics.
- opts.instrCount = 0;
+ // Used by fgFindJumpTargets for inlining heuristics.
+ opts.instrCount = 0;
// Used to track when we should consider running EarlyProp
optMethodFlags = 0;
@@ -1833,16 +1838,16 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
}
#ifdef DEBUG
- m_nodeTestData = nullptr;
- m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS;
+ m_nodeTestData = nullptr;
+ m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS;
#endif
- m_switchDescMap = nullptr;
- m_blockToEHPreds = nullptr;
- m_fieldSeqStore = nullptr;
- m_zeroOffsetFieldMap = nullptr;
- m_arrayInfoMap = nullptr;
- m_heapSsaMap = nullptr;
- m_refAnyClass = nullptr;
+ m_switchDescMap = nullptr;
+ m_blockToEHPreds = nullptr;
+ m_fieldSeqStore = nullptr;
+ m_zeroOffsetFieldMap = nullptr;
+ m_arrayInfoMap = nullptr;
+ m_heapSsaMap = nullptr;
+ m_refAnyClass = nullptr;
#ifdef DEBUG
if (!compIsForInlining())
@@ -1860,27 +1865,27 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
// check that HelperCallProperties are initialized
assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE));
- assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check
+ assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check
// We start with the flow graph in tree-order
fgOrder = FGOrderTree;
#ifdef FEATURE_SIMD
// SIMD Types
- SIMDFloatHandle = nullptr;
- SIMDDoubleHandle = nullptr;
- SIMDIntHandle = nullptr;
- SIMDUShortHandle = nullptr;
- SIMDUByteHandle = nullptr;
- SIMDShortHandle = nullptr;
- SIMDByteHandle = nullptr;
- SIMDLongHandle = nullptr;
- SIMDUIntHandle = nullptr;
- SIMDULongHandle = nullptr;
- SIMDVector2Handle = nullptr;
- SIMDVector3Handle = nullptr;
- SIMDVector4Handle = nullptr;
- SIMDVectorHandle = nullptr;
+ SIMDFloatHandle = nullptr;
+ SIMDDoubleHandle = nullptr;
+ SIMDIntHandle = nullptr;
+ SIMDUShortHandle = nullptr;
+ SIMDUByteHandle = nullptr;
+ SIMDShortHandle = nullptr;
+ SIMDByteHandle = nullptr;
+ SIMDLongHandle = nullptr;
+ SIMDUIntHandle = nullptr;
+ SIMDULongHandle = nullptr;
+ SIMDVector2Handle = nullptr;
+ SIMDVector3Handle = nullptr;
+ SIMDVector4Handle = nullptr;
+ SIMDVectorHandle = nullptr;
#endif
#ifdef DEBUG
@@ -1895,12 +1900,12 @@ void Compiler::compInit(ArenaAllocator * pAlloc, InlineInfo * inl
* Destructor
*/
-void Compiler::compDone()
+void Compiler::compDone()
{
}
-void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
- void ** ppIndirection) /* OUT */
+void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void** ppIndirection) /* OUT */
{
void* addr;
@@ -1917,15 +1922,19 @@ void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum,
return addr;
}
-unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd)
+unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd)
{
var_types sigType = genActualType(JITtype2varType(cit));
- unsigned sigSize;
+ unsigned sigSize;
sigSize = genTypeSize(sigType);
if (cit == CORINFO_TYPE_VALUECLASS)
+ {
sigSize = info.compCompHnd->getClassSize(clsHnd);
+ }
else if (cit == CORINFO_TYPE_REFANY)
+ {
sigSize = 2 * sizeof(void*);
+ }
return sigSize;
}
@@ -1935,9 +1944,11 @@ static bool DidComponentUnitTests = false;
void Compiler::compDoComponentUnitTestsOnce()
{
if (!JitConfig.RunComponentUnitTests())
+ {
return;
+ }
- if (!DidComponentUnitTests)
+ if (!DidComponentUnitTests)
{
DidComponentUnitTests = true;
ValueNumStore::RunTests(this);
@@ -1952,11 +1963,11 @@ void Compiler::compDoComponentUnitTestsOnce()
*/
/* static */
-void * Compiler::compGetMemCallback(void *p, size_t size, CompMemKind cmk)
+void* Compiler::compGetMemCallback(void* p, size_t size, CompMemKind cmk)
{
assert(p);
- return ((Compiler *)p)->compGetMem(size, cmk);
+ return ((Compiler*)p)->compGetMem(size, cmk);
}
/*****************************************************************************
@@ -1968,7 +1979,7 @@ void * Compiler::compGetMemCallback(void *p, size_t size, CompM
#ifdef DEBUG
-void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
+void* Compiler::compGetMem(size_t sz, CompMemKind cmk)
{
#if 0
#if SMALL_TREE_NODES
@@ -1989,11 +2000,11 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(sz, cmk);
#endif
- void * ptr = compAllocator->allocateMemory(sz);
+ void* ptr = compAllocator->allocateMemory(sz);
// Verify that the current block is aligned. Only then will the next
// block allocated be on an aligned boundary.
- assert ((size_t(ptr) & (sizeof(size_t)- 1)) == 0);
+ assert((size_t(ptr) & (sizeof(size_t) - 1)) == 0);
return ptr;
}
@@ -2004,7 +2015,7 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
#ifdef DEBUG
/*****************************************************************************/
-VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
+VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
{
if (isFloatReg)
{
@@ -2019,30 +2030,29 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
assert(genIsValidReg(reg));
}
- if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames)
+ if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
/* Look for the matching register */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* If the variable is not in a register, or not in the register we're looking for, quit. */
/* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */
- if ((varDsc->lvRegister != 0) &&
- (varDsc->lvRegNum == reg) &&
- (varDsc->IsFloatRegType() || !isFloatReg) &&
- (varDsc->lvSlotNum < info.compVarScopesCount))
+ if ((varDsc->lvRegister != 0) && (varDsc->lvRegNum == reg) && (varDsc->IsFloatRegType() || !isFloatReg) &&
+ (varDsc->lvSlotNum < info.compVarScopesCount))
{
/* check if variable in that register is live */
if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex))
{
/* variable is live - find the corresponding slot */
- VarScopeDsc* varScope = compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd);
+ VarScopeDsc* varScope =
+ compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd);
if (varScope)
+ {
return varScope->vsdName;
+ }
}
}
}
@@ -2051,7 +2061,7 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
// maybe var is marked dead, but still used (last use)
if (!isFloatReg && codeGen->regSet.rsUsedTree[reg] != NULL)
{
- GenTreePtr nodePtr;
+ GenTreePtr nodePtr;
if (GenTree::OperIsUnary(codeGen->regSet.rsUsedTree[reg]->OperGet()))
{
@@ -2063,21 +2073,21 @@ VarName Compiler::compVarName(regNumber reg, bool isFloatReg)
nodePtr = codeGen->regSet.rsUsedTree[reg];
}
- if ((nodePtr->gtOper == GT_REG_VAR) &&
- (nodePtr->gtRegVar.gtRegNum == reg) &&
+ if ((nodePtr->gtOper == GT_REG_VAR) && (nodePtr->gtRegVar.gtRegNum == reg) &&
(nodePtr->gtRegVar.gtLclNum < info.compVarScopesCount))
{
- VarScopeDsc* varScope = compFindLocalVar(nodePtr->gtRegVar.gtLclNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd);
+ VarScopeDsc* varScope =
+ compFindLocalVar(nodePtr->gtRegVar.gtLclNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd);
if (varScope)
return varScope->vsdName;
}
}
#endif // LEGACY_BACKEND
}
- return NULL;
+ return nullptr;
}
-const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg)
+const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg)
{
#ifdef _TARGET_ARM_
@@ -2090,13 +2100,14 @@ const char* Compiler::compRegVarName(regNumber reg, bool displayVar, boo
if (varName)
{
- const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1;
- static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 consecutive calls before printing
- static int index = 0; // for circular index into the name array
+ const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1;
+ static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2
+ // consecutive calls before printing
+ static int index = 0; // for circular index into the name array
- index = (index+1)%2; // circular reuse of index
- sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'",
- getRegName(reg, isFloatReg), VarNameToStr(varName));
+ index = (index + 1) % 2; // circular reuse of index
+ sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'", getRegName(reg, isFloatReg),
+ VarNameToStr(varName));
return nameVarReg[index];
}
@@ -2108,10 +2119,9 @@ const char* Compiler::compRegVarName(regNumber reg, bool displayVar, boo
return getRegName(reg, isFloatReg);
}
-
#define MAX_REG_PAIR_NAME_LENGTH 10
-const char * Compiler::compRegPairName(regPairNo regPair)
+const char* Compiler::compRegPairName(regPairNo regPair)
{
static char regNameLong[MAX_REG_PAIR_NAME_LENGTH];
@@ -2120,8 +2130,7 @@ const char * Compiler::compRegPairName(regPairNo regPair)
return "NA|NA";
}
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
strcpy_s(regNameLong, sizeof(regNameLong), compRegVarName(genRegPairLo(regPair)));
strcat_s(regNameLong, sizeof(regNameLong), "|");
@@ -2129,11 +2138,12 @@ const char * Compiler::compRegPairName(regPairNo regPair)
return regNameLong;
}
-
-const char * Compiler::compRegNameForSize(regNumber reg, size_t size)
+const char* Compiler::compRegNameForSize(regNumber reg, size_t size)
{
if (size == 0 || size >= 4)
+ {
return compRegVarName(reg, true);
+ }
// clang-format off
static
@@ -2160,20 +2170,21 @@ const char * Compiler::compRegNameForSize(regNumber reg, size_t size)
};
// clang-format on
- assert(isByteReg (reg));
+ assert(isByteReg(reg));
assert(genRegMask(reg) & RBM_BYTE_REGS);
assert(size == 1 || size == 2);
- return sizeNames[reg][size-1];
+ return sizeNames[reg][size - 1];
}
-const char * Compiler::compFPregVarName(unsigned fpReg, bool displayVar)
-{
- const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1;
- static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 consecutive calls before printing
- static int index = 0; // for circular index into the name array
+const char* Compiler::compFPregVarName(unsigned fpReg, bool displayVar)
+{
+ const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1;
+ static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 consecutive calls
+ // before printing
+ static int index = 0; // for circular index into the name array
- index = (index+1)%2; // circular reuse of index
+ index = (index + 1) % 2; // circular reuse of index
#if FEATURE_STACK_FP_X87
/* 'fpReg' is the distance from the bottom of the stack, ie.
@@ -2183,9 +2194,9 @@ const char * Compiler::compFPregVarName(unsigned fpReg, bool displayVar)
if (displayVar && codeGen->genFPregCnt)
{
assert(fpReg < FP_STK_SIZE);
- assert(compCodeGenDone || (fpReg <= codeGen->compCurFPState.m_uStackSize));
+ assert(compCodeGenDone || (fpReg <= codeGen->compCurFPState.m_uStackSize));
- int pos = codeGen->genFPregCnt - (fpReg+1 - codeGen->genGetFPstkLevel());
+ int pos = codeGen->genFPregCnt - (fpReg + 1 - codeGen->genGetFPstkLevel());
if (pos >= 0)
{
VarName varName = compVarName((regNumber)pos, true);
@@ -2206,30 +2217,29 @@ const char * Compiler::compFPregVarName(unsigned fpReg, bool displayVar)
return nameVarReg[index];
}
-const char * Compiler::compLocalVarName(unsigned varNum, unsigned offs)
+const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs)
{
- unsigned i;
- VarScopeDsc* t;
+ unsigned i;
+ VarScopeDsc* t;
- for (i = 0, t = info.compVarScopes;
- i < info.compVarScopesCount;
- i++ , t++)
+ for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++)
{
- if (t->vsdVarNum != varNum)
+ if (t->vsdVarNum != varNum)
+ {
continue;
+ }
- if (offs >= t->vsdLifeBeg &&
- offs < t->vsdLifeEnd)
+ if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd)
{
return VarNameToStr(t->vsdName);
}
}
- return NULL;
+ return nullptr;
}
/*****************************************************************************/
-#endif //DEBUG
+#endif // DEBUG
/*****************************************************************************/
void Compiler::compSetProcessor()
@@ -2261,8 +2271,7 @@ void Compiler::compSetProcessor()
// COMPlus_EnableAVX can be used to disable using AVX if available on a target machine.
// Note that FEATURE_AVX_SUPPORT is not enabled for ctpjit
opts.compCanUseAVX = false;
- if (((compileFlags & CORJIT_FLG_PREJIT) == 0) &&
- ((compileFlags & CORJIT_FLG_USE_AVX2) != 0))
+ if (((compileFlags & CORJIT_FLG_PREJIT) == 0) && ((compileFlags & CORJIT_FLG_USE_AVX2) != 0))
{
if (JitConfig.EnableAVX() != 0)
{
@@ -2278,8 +2287,8 @@ void Compiler::compSetProcessor()
#ifdef _TARGET_X86_
opts.compUseFCOMI = ((opts.eeFlags & CORJIT_FLG_USE_FCOMI) != 0);
- opts.compUseCMOV = ((opts.eeFlags & CORJIT_FLG_USE_CMOV) != 0);
- opts.compCanUseSSE2 = ((opts.eeFlags & CORJIT_FLG_USE_SSE2) != 0);
+ opts.compUseCMOV = ((opts.eeFlags & CORJIT_FLG_USE_CMOV) != 0);
+ opts.compCanUseSSE2 = ((opts.eeFlags & CORJIT_FLG_USE_SSE2) != 0);
#ifdef DEBUG
if (opts.compUseFCOMI)
@@ -2290,9 +2299,9 @@ void Compiler::compSetProcessor()
// Should we override the SSE2 setting
enum
{
- SSE2_FORCE_DISABLE = 0,
- SSE2_FORCE_USE = 1,
- SSE2_FORCE_INVALID = -1
+ SSE2_FORCE_DISABLE = 0,
+ SSE2_FORCE_USE = 1,
+ SSE2_FORCE_INVALID = -1
};
if (JitConfig.JitCanUseSSE2() == SSE2_FORCE_DISABLE)
@@ -2301,9 +2310,8 @@ void Compiler::compSetProcessor()
opts.compCanUseSSE2 = true;
else if (opts.compCanUseSSE2)
opts.compCanUseSSE2 = !compStressCompile(STRESS_GENERIC_VARN, 50);
-#endif // DEBUG
-#endif // _TARGET_X86_
-
+#endif // DEBUG
+#endif // _TARGET_X86_
}
#ifdef PROFILING_SUPPORTED
@@ -2314,7 +2322,7 @@ void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP)
{
return;
}
-#else //! _TARGET_AMD64_
+#else //! _TARGET_AMD64_
void DummyProfilerELTStub(UINT_PTR ProfilerHandle)
{
return;
@@ -2323,18 +2331,16 @@ void DummyProfilerELTStub(UINT_PTR ProfilerHandle)
#endif // PROFILING_SUPPORTED
-bool Compiler::compIsFullTrust()
+bool Compiler::compIsFullTrust()
{
- return (info.compCompHnd->canSkipMethodVerification(info.compMethodHnd)
- == CORINFO_VERIFICATION_CAN_SKIP);
+ return (info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) == CORINFO_VERIFICATION_CAN_SKIP);
}
-
-bool Compiler::compShouldThrowOnNoway(
+bool Compiler::compShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
- const char* filename, unsigned line
+ const char* filename, unsigned line
#endif
-)
+ )
{
#ifdef FEATURE_TRACELOGGING
compJitTelemetry.NotifyNowayAssert(filename, line);
@@ -2351,13 +2357,15 @@ bool Compiler::compShouldThrowOnNoway(
// value as the user intended.
unsigned ReinterpretHexAsDecimal(unsigned in)
{
- //ex: in: 0x100 returns: 100
+ // ex: in: 0x100 returns: 100
unsigned result = 0;
- unsigned index = 1;
+ unsigned index = 1;
// default value
if (in == INT_MAX)
+ {
return in;
+ }
while (in)
{
@@ -2370,11 +2378,11 @@ unsigned ReinterpretHexAsDecimal(unsigned in)
return result;
}
-void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
+void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
{
#ifdef UNIX_AMD64_ABI
opts.compNeedToAlignFrame = false;
-#endif // UNIX_AMD64_ABI
+#endif // UNIX_AMD64_ABI
memset(&opts, 0, sizeof(opts));
unsigned compileFlags = jitFlags->corJitFlags;
@@ -2387,15 +2395,14 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
opts.jitFlags = jitFlags;
opts.eeFlags = compileFlags;
- opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization
+ opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization
- if (opts.eeFlags & (CORJIT_FLG_DEBUG_CODE | CORJIT_FLG_MIN_OPT))
+ if (opts.eeFlags & (CORJIT_FLG_DEBUG_CODE | CORJIT_FLG_MIN_OPT))
{
opts.compFlags = CLFLG_MINOPT;
}
// Don't optimize .cctors (except prejit) or if we're an inlinee
- else if (!(opts.eeFlags & CORJIT_FLG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) &&
- !compIsForInlining())
+ else if (!(opts.eeFlags & CORJIT_FLG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining())
{
opts.compFlags = CLFLG_MINOPT;
}
@@ -2420,12 +2427,12 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
assert((opts.eeFlags & CORJIT_FLG_SIZE_OPT) == 0);
}
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef DEBUGGING_SUPPORT
opts.compDbgCode = (opts.eeFlags & CORJIT_FLG_DEBUG_CODE) != 0;
opts.compDbgInfo = (opts.eeFlags & CORJIT_FLG_DEBUG_INFO) != 0;
- opts.compDbgEnC = (opts.eeFlags & CORJIT_FLG_DEBUG_EnC) != 0;
+ opts.compDbgEnC = (opts.eeFlags & CORJIT_FLG_DEBUG_EnC) != 0;
#if REGEN_SHORTCUTS || REGEN_CALLPAT
// We never want to have debugging enabled when regenerating GC encoding patterns
opts.compDbgCode = false;
@@ -2437,14 +2444,14 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
compSetProcessor();
#ifdef DEBUG
- opts.dspOrder = false;
+ opts.dspOrder = false;
if (compIsForInlining())
{
- verbose = impInlineInfo->InlinerCompiler->verbose;
+ verbose = impInlineInfo->InlinerCompiler->verbose;
}
else
{
- verbose = false;
+ verbose = false;
codeGen->setVerbose(false);
}
verboseTrees = verbose && shouldUseVerboseTrees();
@@ -2454,7 +2461,7 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
#endif
opts.compNeedSecurityCheck = false;
- opts.altJit = false;
+ opts.altJit = false;
#if defined(LATE_DISASM) && !defined(DEBUG)
// For non-debug builds with the late disassembler built in, we currently always do late disassembly
@@ -2523,9 +2530,11 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
const wchar_t* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies();
if (wszAltJitExcludeAssemblyList != nullptr)
{
- // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is reclaimed
+ // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is
+ // reclaimed
// for every compilation. This is ok because we only allocate once, due to the static.
- s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator());
+ s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator())
+ AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator());
}
s_pAltJitExcludeAssembliesListInitialized = true;
}
@@ -2535,7 +2544,8 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
// We have an exclusion list. See if this method is in an assembly that is on the list.
// Note that we check this for every method, since we might inline across modules, and
// if the inlinee module is on the list, we don't want to use the altjit for it.
- const char* methodAssemblyName = info.compCompHnd->getAssemblyName(info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd)));
+ const char* methodAssemblyName = info.compCompHnd->getAssemblyName(
+ info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd)));
if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName))
{
opts.altJit = false;
@@ -2546,40 +2556,42 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
#ifdef DEBUG
- bool altJitConfig = !pfAltJit->isEmpty();
+ bool altJitConfig = !pfAltJit->isEmpty();
- // If we have a non-empty AltJit config then we change all of these other
+ // If we have a non-empty AltJit config then we change all of these other
// config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables
// would apply to both the altjit and the normal JIT, but we only care about
// debugging the altjit if the COMPlus_AltJit configuration is set.
- //
+ //
if (compIsForImportOnly() && (!altJitConfig || opts.altJit))
{
if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
assert(!"JitImportBreak reached");
+ }
}
- bool verboseDump = false;
- bool dumpIR = false;
- bool dumpIRTypes = false;
- bool dumpIRLocals = false;
- bool dumpIRRegs = false;
- bool dumpIRSsa = false;
- bool dumpIRValnums = false;
- bool dumpIRCosts = false;
- bool dumpIRFlags = false;
- bool dumpIRKinds = false;
- bool dumpIRNodes = false;
- bool dumpIRNoLists = false;
- bool dumpIRNoLeafs = false;
- bool dumpIRNoStmts = false;
- bool dumpIRTrees = false;
- bool dumpIRLinear = false;
- bool dumpIRDataflow = false;
- bool dumpIRBlockHeaders = false;
- bool dumpIRExit = false;
- LPCWSTR dumpIRPhase = nullptr;
- LPCWSTR dumpIRFormat = nullptr;
+ bool verboseDump = false;
+ bool dumpIR = false;
+ bool dumpIRTypes = false;
+ bool dumpIRLocals = false;
+ bool dumpIRRegs = false;
+ bool dumpIRSsa = false;
+ bool dumpIRValnums = false;
+ bool dumpIRCosts = false;
+ bool dumpIRFlags = false;
+ bool dumpIRKinds = false;
+ bool dumpIRNodes = false;
+ bool dumpIRNoLists = false;
+ bool dumpIRNoLeafs = false;
+ bool dumpIRNoStmts = false;
+ bool dumpIRTrees = false;
+ bool dumpIRLinear = false;
+ bool dumpIRDataflow = false;
+ bool dumpIRBlockHeaders = false;
+ bool dumpIRExit = false;
+ LPCWSTR dumpIRPhase = nullptr;
+ LPCWSTR dumpIRFormat = nullptr;
if (!altJitConfig || opts.altJit)
{
@@ -2595,23 +2607,24 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
{
verboseDump = true;
- }
- unsigned ngenHashDumpVal = (unsigned) JitConfig.NgenHashDump();
+ }
+ unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump();
if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash()))
{
verboseDump = true;
}
- if (JitConfig.NgenDumpIR().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (JitConfig.NgenDumpIR().contains(info.compMethodName, info.compClassName,
+ &info.compMethodInfo->args))
{
dumpIR = true;
}
- unsigned ngenHashDumpIRVal = (unsigned) JitConfig.NgenHashDumpIR();
+ unsigned ngenHashDumpIRVal = (unsigned)JitConfig.NgenHashDumpIR();
if ((ngenHashDumpIRVal != (DWORD)-1) && (ngenHashDumpIRVal == info.compMethodHash()))
{
dumpIR = true;
- }
+ }
dumpIRFormat = JitConfig.NgenDumpIRFormat();
- dumpIRPhase = JitConfig.NgenDumpIRPhase();
+ dumpIRPhase = JitConfig.NgenDumpIRPhase();
}
else
{
@@ -2619,22 +2632,22 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
{
verboseDump = true;
}
- unsigned jitHashDumpVal = (unsigned) JitConfig.JitHashDump();
+ unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump();
if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash()))
{
verboseDump = true;
- }
+ }
if (JitConfig.JitDumpIR().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
{
dumpIR = true;
- }
- unsigned jitHashDumpIRVal = (unsigned) JitConfig.JitHashDumpIR();
+ }
+ unsigned jitHashDumpIRVal = (unsigned)JitConfig.JitHashDumpIR();
if ((jitHashDumpIRVal != (DWORD)-1) && (jitHashDumpIRVal == info.compMethodHash()))
{
dumpIR = true;
- }
+ }
dumpIRFormat = JitConfig.JitDumpIRFormat();
- dumpIRPhase = JitConfig.JitDumpIRPhase();
+ dumpIRPhase = JitConfig.JitDumpIRPhase();
}
}
@@ -2650,16 +2663,18 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
this->dumpIRFormat = dumpIRFormat;
}
- dumpIRTrees = false;
+ dumpIRTrees = false;
dumpIRLinear = true;
if (dumpIRFormat != nullptr)
{
- for (LPCWSTR p = dumpIRFormat; (*p != 0); )
+ for (LPCWSTR p = dumpIRFormat; (*p != 0);)
{
for (; (*p != 0); p++)
{
if (*p != L' ')
+ {
break;
+ }
}
if (*p == 0)
@@ -2709,125 +2724,124 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
if (wcsncmp(p, W("types"), 5) == 0)
{
- dumpIRTypes = true;
+ dumpIRTypes = true;
}
if (wcsncmp(p, W("locals"), 6) == 0)
{
- dumpIRLocals = true;
+ dumpIRLocals = true;
}
if (wcsncmp(p, W("regs"), 4) == 0)
{
- dumpIRRegs = true;
+ dumpIRRegs = true;
}
-
+
if (wcsncmp(p, W("ssa"), 3) == 0)
{
- dumpIRSsa = true;
+ dumpIRSsa = true;
}
if (wcsncmp(p, W("valnums"), 7) == 0)
{
- dumpIRValnums = true;
+ dumpIRValnums = true;
}
if (wcsncmp(p, W("costs"), 5) == 0)
{
- dumpIRCosts = true;
+ dumpIRCosts = true;
}
if (wcsncmp(p, W("flags"), 5) == 0)
{
- dumpIRFlags = true;
+ dumpIRFlags = true;
}
if (wcsncmp(p, W("kinds"), 5) == 0)
{
- dumpIRKinds = true;
+ dumpIRKinds = true;
}
if (wcsncmp(p, W("nodes"), 5) == 0)
{
- dumpIRNodes = true;
+ dumpIRNodes = true;
}
if (wcsncmp(p, W("exit"), 4) == 0)
{
- dumpIRExit = true;
+ dumpIRExit = true;
}
if (wcsncmp(p, W("nolists"), 7) == 0)
{
- dumpIRNoLists = true;
+ dumpIRNoLists = true;
}
if (wcsncmp(p, W("noleafs"), 7) == 0)
{
- dumpIRNoLeafs = true;
+ dumpIRNoLeafs = true;
}
if (wcsncmp(p, W("nostmts"), 7) == 0)
{
- dumpIRNoStmts = true;
+ dumpIRNoStmts = true;
}
if (wcsncmp(p, W("trees"), 5) == 0)
{
- dumpIRTrees = true;
- dumpIRLinear = false;
+ dumpIRTrees = true;
+ dumpIRLinear = false;
}
if (wcsncmp(p, W("structural"), 10) == 0)
{
- dumpIRLinear = true;
- dumpIRNoStmts = false;
- dumpIRNoLeafs = false;
- dumpIRNoLists = false;
+ dumpIRLinear = true;
+ dumpIRNoStmts = false;
+ dumpIRNoLeafs = false;
+ dumpIRNoLists = false;
}
if (wcsncmp(p, W("all"), 3) == 0)
{
- dumpIRLinear = true;
- dumpIRKinds = true;
- dumpIRFlags = true;
- dumpIRTypes = true;
- dumpIRLocals = true;
- dumpIRRegs = true;
- dumpIRSsa = true;
- dumpIRValnums = true;
- dumpIRCosts = true;
- dumpIRNoStmts = false;
- dumpIRNoLeafs = false;
- dumpIRNoLists = false;
+ dumpIRLinear = true;
+ dumpIRKinds = true;
+ dumpIRFlags = true;
+ dumpIRTypes = true;
+ dumpIRLocals = true;
+ dumpIRRegs = true;
+ dumpIRSsa = true;
+ dumpIRValnums = true;
+ dumpIRCosts = true;
+ dumpIRNoStmts = false;
+ dumpIRNoLeafs = false;
+ dumpIRNoLists = false;
}
if (wcsncmp(p, W("linear"), 6) == 0)
{
- dumpIRTrees = false;
- dumpIRLinear = true;
+ dumpIRTrees = false;
+ dumpIRLinear = true;
}
if (wcsncmp(p, W("mixed"), 5) == 0)
{
- dumpIRTrees = true;
- dumpIRLinear = true;
+ dumpIRTrees = true;
+ dumpIRLinear = true;
}
if (wcsncmp(p, W("dataflow"), 8) == 0)
{
- dumpIRDataflow = true;
- dumpIRNoLeafs = true;
- dumpIRNoLists = true;
- dumpIRNoStmts = true;
+ dumpIRDataflow = true;
+ dumpIRNoLeafs = true;
+ dumpIRNoLists = true;
+ dumpIRNoStmts = true;
}
if (wcsncmp(p, W("blkhdrs"), 7) == 0)
{
- dumpIRBlockHeaders = true;
+ dumpIRBlockHeaders = true;
}
-
-
+
for (; (*p != 0); p++)
{
if (*p == L',')
@@ -2945,90 +2959,117 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
#endif // FEATURE_SIMD
if (compIsForInlining() || compIsForImportOnly())
+ {
return;
+ }
// The rest of the opts fields that we initialize here
// should only be used when we generate code for the method
// They should not be used when importing or inlining
- opts.genFPorder = true;
- opts.genFPopt = true;
+ opts.genFPorder = true;
+ opts.genFPopt = true;
- opts.instrCount = 0;
- opts.lvRefCount = 0;
+ opts.instrCount = 0;
+ opts.lvRefCount = 0;
#if FEATURE_TAILCALL_OPT
// By default opportunistic tail call optimization is enabled
- opts.compTailCallOpt = true;
+ opts.compTailCallOpt = true;
opts.compTailCallLoopOpt = true;
#endif
#ifdef DEBUG
- opts.dspInstrs = false;
- opts.dspEmit = false;
- opts.dspLines = false;
- opts.varNames = false;
- opts.dmpHex = false;
- opts.disAsm = false;
- opts.disAsmSpilled = false;
- opts.disDiffable = false;
- opts.dspCode = false;
- opts.dspEHTable = false;
- opts.dspGCtbls = false;
- opts.disAsm2 = false;
- opts.dspUnwind = false;
- s_dspMemStats = false;
- opts.compLongAddress = false;
+ opts.dspInstrs = false;
+ opts.dspEmit = false;
+ opts.dspLines = false;
+ opts.varNames = false;
+ opts.dmpHex = false;
+ opts.disAsm = false;
+ opts.disAsmSpilled = false;
+ opts.disDiffable = false;
+ opts.dspCode = false;
+ opts.dspEHTable = false;
+ opts.dspGCtbls = false;
+ opts.disAsm2 = false;
+ opts.dspUnwind = false;
+ s_dspMemStats = false;
+ opts.compLongAddress = false;
opts.compJitELTHookEnabled = false;
#ifdef LATE_DISASM
- opts.doLateDisasm = false;
+ opts.doLateDisasm = false;
#endif // LATE_DISASM
- compDebugBreak = false;
+ compDebugBreak = false;
- // If we have a non-empty AltJit config then we change all of these other
+ // If we have a non-empty AltJit config then we change all of these other
// config values to refer only to the AltJit.
- //
+ //
if (!altJitConfig || opts.altJit)
{
if (opts.eeFlags & CORJIT_FLG_PREJIT)
{
if ((JitConfig.NgenOrder() & 1) == 1)
+ {
opts.dspOrder = true;
+ }
if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.dspGCtbls = true;
+ }
if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.disAsm = true;
- if (JitConfig.NgenDisasm().contains("SPILLED", NULL, NULL))
+ }
+ if (JitConfig.NgenDisasm().contains("SPILLED", nullptr, nullptr))
+ {
opts.disAsmSpilled = true;
+ }
- if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName,
+ &info.compMethodInfo->args))
+ {
opts.dspUnwind = true;
+ }
if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.dspEHTable = true;
+ }
}
- else
+ else
{
if ((JitConfig.JitOrder() & 1) == 1)
+ {
opts.dspOrder = true;
+ }
if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.dspGCtbls = true;
+ }
if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.disAsm = true;
+ }
- if (JitConfig.JitDisasm().contains("SPILLED", NULL, NULL))
+ if (JitConfig.JitDisasm().contains("SPILLED", nullptr, nullptr))
+ {
opts.disAsmSpilled = true;
+ }
if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.dspUnwind = true;
+ }
if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
opts.dspEHTable = true;
+ }
}
#ifdef LATE_DISASM
@@ -3044,27 +3085,31 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
}
if (JitConfig.DisplayMemStats() != 0)
+ {
s_dspMemStats = true;
+ }
if (JitConfig.JitLongAddress() != 0)
+ {
opts.compLongAddress = true;
+ }
}
if (verboseDump)
{
- opts.dspCode = true;
+ opts.dspCode = true;
opts.dspEHTable = true;
- opts.dspGCtbls = true;
- opts.disAsm2 = true;
- opts.dspUnwind = true;
- verbose = true;
- verboseTrees = shouldUseVerboseTrees();
- verboseSsa = shouldUseVerboseSsa();
+ opts.dspGCtbls = true;
+ opts.disAsm2 = true;
+ opts.dspUnwind = true;
+ verbose = true;
+ verboseTrees = shouldUseVerboseTrees();
+ verboseSsa = shouldUseVerboseSsa();
codeGen->setVerbose(true);
}
treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1);
- morphNum = 0; // Initialize the morphed-trees counting.
+ morphNum = 0; // Initialize the morphed-trees counting.
expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel();
if (expensiveDebugCheckLevel == 0)
@@ -3078,18 +3123,21 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
if (verbose)
{
- printf("****** START compiling %s (MethodHash=%08x)\n",
- info.compFullName, info.compMethodHash());
+ printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash());
printf("Generating code for %s %s\n", Target::g_tgtPlatformName, Target::g_tgtCPUName);
- printf(""); // in our logic this causes a flush
+ printf(""); // in our logic this causes a flush
}
if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
assert(!"JitBreak reached");
+ }
unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak();
if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash()))
+ {
assert(!"JitHashBreak reached");
+ }
if (verbose ||
JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) ||
@@ -3102,30 +3150,31 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
#endif // DEBUG
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef DEBUGGING_SUPPORT
#ifdef DEBUG
assert(!codeGen->isGCTypeFixed());
- opts.compGcChecks = (JitConfig.JitGCChecks() != 0) ||
- compStressCompile(STRESS_GENERIC_VARN, 5);
+ opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5);
enum
{
- STACK_CHECK_ON_RETURN = 0x1,
- STACK_CHECK_ON_CALL = 0x2,
- STACK_CHECK_ALL = 0x3,
+ STACK_CHECK_ON_RETURN = 0x1,
+ STACK_CHECK_ON_CALL = 0x2,
+ STACK_CHECK_ALL = 0x3,
};
DWORD dwJitStackChecks = JitConfig.JitStackChecks();
- if (compStressCompile(STRESS_GENERIC_VARN, 5)) dwJitStackChecks = STACK_CHECK_ALL;
- opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0;
+ if (compStressCompile(STRESS_GENERIC_VARN, 5))
+ {
+ dwJitStackChecks = STACK_CHECK_ALL;
+ }
+ opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0;
opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0;
#endif
#ifdef PROFILING_SUPPORTED
- opts.compNoPInvokeInlineCB =
- (opts.eeFlags & CORJIT_FLG_PROF_NO_PINVOKE_INLINE) ? true : false;
+ opts.compNoPInvokeInlineCB = (opts.eeFlags & CORJIT_FLG_PROF_NO_PINVOKE_INLINE) ? true : false;
// Cache the profiler handle
if (opts.eeFlags & CORJIT_FLG_PROF_ENTERLEAVE)
@@ -3133,23 +3182,23 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
BOOL hookNeeded;
BOOL indirected;
info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected);
- compProfilerHookNeeded = !!hookNeeded;
+ compProfilerHookNeeded = !!hookNeeded;
compProfilerMethHndIndirected = !!indirected;
}
else
{
- compProfilerHookNeeded = false;
- compProfilerMethHnd = nullptr;
+ compProfilerHookNeeded = false;
+ compProfilerMethHnd = nullptr;
compProfilerMethHndIndirected = false;
}
#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
// Right now this ELT hook option is enabled only for arm and amd64
- // Honour complus_JitELTHookEnabled only if VM has not asked us to generate profiler
- // hooks in the first place. That is, Override VM only if it hasn't asked for a
+ // Honour complus_JitELTHookEnabled only if VM has not asked us to generate profiler
+ // hooks in the first place. That is, Override VM only if it hasn't asked for a
// profiler callback for this method.
- if (!compProfilerHookNeeded && (JitConfig.JitELTHookEnabled() != 0))
+ if (!compProfilerHookNeeded && (JitConfig.JitELTHookEnabled() != 0))
{
opts.compJitELTHookEnabled = true;
}
@@ -3157,7 +3206,7 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
// TBD: Exclude PInvoke stubs
if (opts.compJitELTHookEnabled)
{
- compProfilerMethHnd = (void *) DummyProfilerELTStub;
+ compProfilerMethHnd = (void*)DummyProfilerELTStub;
compProfilerMethHndIndirected = false;
}
#endif // _TARGET_ARM_ || _TARGET_AMD64_
@@ -3177,18 +3226,19 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
}
#endif
- opts.compMustInlinePInvokeCalli = (opts.eeFlags & CORJIT_FLG_IL_STUB) ? true : false;
+ opts.compMustInlinePInvokeCalli = (opts.eeFlags & CORJIT_FLG_IL_STUB) ? true : false;
- opts.compScopeInfo = opts.compDbgInfo;
+ opts.compScopeInfo = opts.compDbgInfo;
#endif // DEBUGGING_SUPPORT
-#ifdef LATE_DISASM
- codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, info.compMethodInfo->args.pSig);
+#ifdef LATE_DISASM
+ codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName,
+ info.compMethodInfo->args.pSig);
#endif
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
-#if RELOC_SUPPORT
+#if RELOC_SUPPORT
opts.compReloc = (opts.eeFlags & CORJIT_FLG_RELOC) ? true : false;
#endif
@@ -3197,7 +3247,7 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
// Whether encoding of absolute addr as PC-rel offset is enabled in RyuJIT
opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0);
#endif
-#endif //DEBUG
+#endif // DEBUG
opts.compProcedureSplitting = (opts.eeFlags & CORJIT_FLG_PROCSPLIT) ? true : false;
@@ -3215,87 +3265,99 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
// Note that opts.compdbgCode is true under ngen for checked assemblies!
opts.compProcedureSplitting = !opts.compDbgCode;
-#ifdef DEBUG
+#ifdef DEBUG
// JitForceProcedureSplitting is used to force procedure splitting on checked assemblies.
// This is useful for debugging on a checked build. Note that we still only do procedure
// splitting in the zapper.
- if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName,
+ &info.compMethodInfo->args))
+ {
opts.compProcedureSplitting = true;
+ }
// JitNoProcedureSplitting will always disable procedure splitting.
- if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName,
+ &info.compMethodInfo->args))
+ {
opts.compProcedureSplitting = false;
+ }
//
// JitNoProcedureSplittingEH will disable procedure splitting in functions with EH.
- if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName,
+ &info.compMethodInfo->args))
+ {
opts.compProcedureSplittingEH = false;
+ }
#endif
}
- fgProfileBuffer = NULL;
+ fgProfileBuffer = nullptr;
fgProfileData_ILSizeMismatch = false;
- fgNumProfileRuns = 0;
+ fgNumProfileRuns = 0;
if (opts.eeFlags & CORJIT_FLG_BBOPT)
{
assert(!compIsForInlining());
HRESULT hr;
- hr = info.compCompHnd->getBBProfileData(
- info.compMethodHnd,
- &fgProfileBufferCount,
- &fgProfileBuffer,
- &fgNumProfileRuns);
+ hr = info.compCompHnd->getBBProfileData(info.compMethodHnd, &fgProfileBufferCount, &fgProfileBuffer,
+ &fgNumProfileRuns);
// a failed result that also has a non-NULL fgProfileBuffer
// indicates that the ILSize for the method no longer matches
// the ILSize for the method when profile data was collected.
- //
+ //
// We will discard the IBC data in this case
- //
- if (FAILED(hr) && (fgProfileBuffer != NULL))
+ //
+ if (FAILED(hr) && (fgProfileBuffer != nullptr))
{
fgProfileData_ILSizeMismatch = true;
- fgProfileBuffer = NULL;
+ fgProfileBuffer = nullptr;
}
#ifdef DEBUG
// A successful result implies a non-NULL fgProfileBuffer
//
if (SUCCEEDED(hr))
- assert(fgProfileBuffer != NULL);
+ {
+ assert(fgProfileBuffer != nullptr);
+ }
// A failed result implies a NULL fgProfileBuffer
// see implementation of Compiler::fgHaveProfileData()
- //
+ //
if (FAILED(hr))
- assert(fgProfileBuffer == NULL);
+ {
+ assert(fgProfileBuffer == nullptr);
+ }
#endif
}
opts.compNeedStackProbes = false;
#ifdef DEBUG
- if (JitConfig.StackProbesOverride() != 0 ||
- compStressCompile(STRESS_GENERIC_VARN, 5))
+ if (JitConfig.StackProbesOverride() != 0 || compStressCompile(STRESS_GENERIC_VARN, 5))
{
opts.compNeedStackProbes = true;
}
#endif
-#ifdef DEBUG
+#ifdef DEBUG
// Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK
if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30))
{
compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset();
- if (verbose) {
- printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n", compMaxUncheckedOffsetForNullObject);
+ if (verbose)
+ {
+ printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n",
+ compMaxUncheckedOffsetForNullObject);
}
}
if (verbose)
{
- printf("OPTIONS: compCodeOpt = %s\n",
- (opts.compCodeOpt == BLENDED_CODE) ? "BLENDED_CODE" :
- (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" :
- (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE");
+ printf("OPTIONS: compCodeOpt = %s\n",
+ (opts.compCodeOpt == BLENDED_CODE)
+ ? "BLENDED_CODE"
+ : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE"
+ : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE");
printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode));
printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo));
@@ -3307,7 +3369,7 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
{
printf("OPTIONS: using real profile data\n");
}
-
+
if (fgProfileData_ILSizeMismatch)
{
printf("OPTIONS: discarded IBC profile data due to mismatch in ILSize\n");
@@ -3323,21 +3385,22 @@ void Compiler::compInitOptions(CORJIT_FLAGS* jitFlags)
opts.compGCPollType = GCPOLL_NONE;
if (opts.eeFlags & CORJIT_FLG_GCPOLL_CALLS)
+ {
opts.compGCPollType = GCPOLL_CALL;
+ }
else if (opts.eeFlags & CORJIT_FLG_GCPOLL_INLINE)
{
- //make sure that the EE didn't set both flags.
+ // make sure that the EE didn't set both flags.
assert(opts.compGCPollType == GCPOLL_NONE);
opts.compGCPollType = GCPOLL_INLINE;
}
-
}
#ifdef DEBUG
void JitDump(const char* pcFormat, ...)
{
- va_list lst;
+ va_list lst;
va_start(lst, pcFormat);
vflogf(jitstdout, pcFormat, lst);
va_end(lst);
@@ -3372,41 +3435,40 @@ bool Compiler::compJitHaltMethod()
* It should reflect the usefulness:overhead ratio.
*/
-const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = {
+const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = {
#define STRESS_MODE(mode) W("STRESS_") W(#mode),
STRESS_MODES
#undef STRESS_MODE
};
-bool Compiler::compStressCompile(compStressArea stressArea,
- unsigned weight)
+bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight)
{
unsigned hash;
- DWORD stressLevel;
+ DWORD stressLevel;
if (!bRangeAllowStress)
{
return false;
}
- if (!JitConfig.JitStressOnly().isEmpty() &&
+ if (!JitConfig.JitStressOnly().isEmpty() &&
!JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
{
return false;
- }
+ }
- bool doStress = false;
+ bool doStress = false;
const wchar_t* strStressModeNames;
// Does user explicitly prevent using this STRESS_MODE through the command line?
const wchar_t* strStressModeNamesNot = JitConfig.JitStressModeNamesNot();
- if ((strStressModeNamesNot != NULL) &&
- (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != NULL))
+ if ((strStressModeNamesNot != nullptr) &&
+ (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr))
{
if (verbose)
{
- printf("JitStressModeNamesNot contains %ws\n", s_compStressModeNames[stressArea]);
+ printf("JitStressModeNamesNot contains %ws\n", s_compStressModeNames[stressArea]);
}
doStress = false;
goto _done;
@@ -3414,9 +3476,9 @@ bool Compiler::compStressCompile(compStressArea stressArea,
// Does user explicitly set this STRESS_MODE through the command line?
strStressModeNames = JitConfig.JitStressModeNames();
- if (strStressModeNames != NULL)
+ if (strStressModeNames != nullptr)
{
- if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != NULL)
+ if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr)
{
if (verbose)
{
@@ -3448,11 +3510,15 @@ bool Compiler::compStressCompile(compStressArea stressArea,
/* Check for boundary conditions */
if (stressLevel == 0 || weight == 0)
+ {
return false;
+ }
// Should we allow unlimited stress ?
if (stressArea > STRESS_COUNT_VARN && stressLevel == 2)
+ {
return true;
+ }
if (weight == MAX_STRESS_WEIGHT)
{
@@ -3470,10 +3536,12 @@ bool Compiler::compStressCompile(compStressArea stressArea,
_done:
-
if (doStress && !compActiveStressModes[stressArea])
{
- if (verbose) printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]);
+ if (verbose)
+ {
+ printf("\n\n*** JitStress: %ws ***\n\n", s_compStressModeNames[stressArea]);
+ }
compActiveStressModes[stressArea] = 1;
}
@@ -3482,14 +3550,15 @@ _done:
#endif // DEBUG
-
-void Compiler::compInitDebuggingInfo()
+void Compiler::compInitDebuggingInfo()
{
- assert (!compIsForInlining());
-
+ assert(!compIsForInlining());
+
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In compInitDebuggingInfo() for %s\n", info.compFullName);
+ }
#endif
/*-------------------------------------------------------------------------
@@ -3499,7 +3568,7 @@ void Compiler::compInitDebuggingInfo()
info.compVarScopesCount = 0;
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (opts.compScopeInfo)
#endif
{
@@ -3526,7 +3595,8 @@ void Compiler::compInitDebuggingInfo()
fgInsertStmtAtEnd(fgFirstBB, gtNewNothingNode());
- JITDUMP("Debuggable code - Add new BB%02u to perform initialization of variables [%08X]\n", fgFirstBB->bbNum, dspPtr(fgFirstBB));
+ JITDUMP("Debuggable code - Add new BB%02u to perform initialization of variables [%08X]\n", fgFirstBB->bbNum,
+ dspPtr(fgFirstBB));
}
#endif // DEBUGGING_SUPPORT
@@ -3547,7 +3617,7 @@ void Compiler::compInitDebuggingInfo()
info.compStmtOffsetsCount = 0;
-#ifdef DEBUGGING_SUPPORT
+#ifdef DEBUGGING_SUPPORT
if (opts.compDbgInfo)
#endif
{
@@ -3560,33 +3630,44 @@ void Compiler::compInitDebuggingInfo()
{
printf("info.compStmtOffsetsCount = %d\n", info.compStmtOffsetsCount);
printf("info.compStmtOffsetsImplicit = %04Xh", info.compStmtOffsetsImplicit);
-
+
if (info.compStmtOffsetsImplicit)
{
printf(" ( ");
- if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) printf("STACK_EMPTY ");
- if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) printf("NOP ");
- if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) printf("CALL_SITE ");
+ if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)
+ {
+ printf("STACK_EMPTY ");
+ }
+ if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES)
+ {
+ printf("NOP ");
+ }
+ if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
+ {
+ printf("CALL_SITE ");
+ }
printf(")");
}
printf("\n");
- IL_OFFSET * pOffs = info.compStmtOffsets;
+ IL_OFFSET* pOffs = info.compStmtOffsets;
for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++)
+ {
printf("%02d) IL_%04Xh\n", i, *pOffs);
+ }
}
#endif
}
}
-void Compiler::compSetOptimizationLevel()
-{
+void Compiler::compSetOptimizationLevel()
+{
unsigned compileFlags;
bool theMinOptsValue;
unsigned jitMinOpts;
- compileFlags = opts.eeFlags;
+ compileFlags = opts.eeFlags;
- if (compIsForInlining())
+ if (compIsForInlining())
{
theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts();
goto _SetMinOpts;
@@ -3600,73 +3681,80 @@ void Compiler::compSetOptimizationLevel()
theMinOptsValue = true;
}
-#ifdef DEBUG
+#ifdef DEBUG
jitMinOpts = JitConfig.JitMinOpts();
if (!theMinOptsValue && (jitMinOpts > 0))
{
- unsigned methodCount = Compiler::jitTotalMethodCompiled;
+ unsigned methodCount = Compiler::jitTotalMethodCompiled;
unsigned methodCountMask = methodCount & 0xFFF;
- unsigned kind = (jitMinOpts & 0xF000000) >> 24;
+ unsigned kind = (jitMinOpts & 0xF000000) >> 24;
switch (kind)
{
- default:
- if (jitMinOpts <= methodCount)
- {
- if (verbose)
- printf(" Optimizations disabled by JitMinOpts and methodCount\n");
- theMinOptsValue = true;
- }
- break;
- case 0xD:
+ default:
+ if (jitMinOpts <= methodCount)
+ {
+ if (verbose)
+ {
+ printf(" Optimizations disabled by JitMinOpts and methodCount\n");
+ }
+ theMinOptsValue = true;
+ }
+ break;
+ case 0xD:
{
unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF;
- unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF;
+ unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF;
- if ((firstMinopts == methodCountMask) ||
- (secondMinopts == methodCountMask))
+ if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask))
{
- if (verbose)
+ if (verbose)
+ {
printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n");
+ }
theMinOptsValue = true;
}
}
break;
- case 0xE:
+ case 0xE:
{
unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF;
- unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF;
+ unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF;
- if ((startMinopts <= methodCountMask) &&
- (endMinopts >= methodCountMask))
+ if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask))
{
- if (verbose)
+ if (verbose)
+ {
printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n");
+ }
theMinOptsValue = true;
}
}
break;
- case 0xF:
+ case 0xF:
{
unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF;
- unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF;
-
- if ((( methodCountMask & bitsOne) == bitsOne) &&
- ((~methodCountMask & bitsZero) == bitsZero) )
+ unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF;
+
+ if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero))
{
- if (verbose)
+ if (verbose)
+ {
printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n");
+ }
theMinOptsValue = true;
}
}
break;
}
}
-
+
if (!theMinOptsValue)
{
if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
theMinOptsValue = true;
+ }
}
if (compStressCompile(STRESS_MIN_OPTS, 5))
@@ -3704,7 +3792,8 @@ void Compiler::compSetOptimizationLevel()
}
if (theMinOptsValue == true)
{
- JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n",
+ JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count "
+ "%3d,%3d for method %s\n",
info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName));
if (JitConfig.JitBreakOnMinOpts() != 0)
{
@@ -3716,18 +3805,17 @@ void Compiler::compSetOptimizationLevel()
// Retail check if we should force Minopts due to the complexity of the method
// For PREJIT we never drop down to MinOpts
// unless unless CLFLG_MINOPT is set
- if (!theMinOptsValue && !(compileFlags & CORJIT_FLG_PREJIT) &&
- ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) ||
- (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) ||
- (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) ||
- (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) ||
- (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount)))
+ if (!theMinOptsValue && !(compileFlags & CORJIT_FLG_PREJIT) &&
+ ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) ||
+ (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) ||
+ (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount)))
{
theMinOptsValue = true;
}
-#endif // DEBUG
+#endif // DEBUG
- JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n",
+ JITLOG((LL_INFO10000,
+ "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n",
info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName));
#if 0
@@ -3772,8 +3860,7 @@ _SetMinOpts:
#ifdef DEBUG
if (verbose && !compIsForInlining())
{
- printf("OPTIONS: opts.MinOpts() == %s\n",
- opts.MinOpts() ? "true" : "false");
+ printf("OPTIONS: opts.MinOpts() == %s\n", opts.MinOpts() ? "true" : "false");
}
#endif
@@ -3782,7 +3869,7 @@ _SetMinOpts:
if (opts.MinOpts() || opts.compDbgCode)
{
opts.compFlags &= ~CLFLG_MAXOPT;
- opts.compFlags |= CLFLG_MINOPT;
+ opts.compFlags |= CLFLG_MINOPT;
}
if (!compIsForInlining())
@@ -3791,7 +3878,9 @@ _SetMinOpts:
codeGen->setFrameRequired(false);
if (opts.MinOpts() || opts.compDbgCode)
+ {
codeGen->setFrameRequired(true);
+ }
#if !defined(_TARGET_AMD64_)
// The VM sets CORJIT_FLG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or
@@ -3804,7 +3893,7 @@ _SetMinOpts:
if (compileFlags & CORJIT_FLG_RELOC)
{
- codeGen->genAlignLoops = false; // loop alignment not supported for prejitted code
+ codeGen->genAlignLoops = false; // loop alignment not supported for prejitted code
// The zapper doesn't set CORJIT_FLG_ALIGN_LOOPS, and there is
// no reason for it to set it as the JIT doesn't currently support loop alignment
@@ -3825,7 +3914,7 @@ _SetMinOpts:
#ifdef _TARGET_ARMARCH_
// Function compRsvdRegCheck:
-// given a curState to use for calculating the total frame size
+// given a curState to use for calculating the total frame size
// it will return true if the REG_OPT_RSVD should be reserved so
// that it can be use to form large offsets when accessing stack
// based LclVar including both incoming and out going argument areas.
@@ -3833,7 +3922,7 @@ _SetMinOpts:
// The method advances the frame layout state to curState by calling
// lvaFrameSize(curState).
//
-bool Compiler::compRsvdRegCheck(FrameLayoutState curState)
+bool Compiler::compRsvdRegCheck(FrameLayoutState curState)
{
// Always do the layout even if returning early. Callers might
// depend on us to do the layout.
@@ -3860,7 +3949,7 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState)
// TODO-ARM64-CQ: update this!
return true; // just always assume we'll need it, for now
-#else // _TARGET_ARM_
+#else // _TARGET_ARM_
// frame layout:
//
@@ -3868,9 +3957,9 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState)
// inArgs compArgSize
// origSP --->
// LR --->
- // R11 --->
- // + callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes)
- // optional saved fp regs 16 * sizeof(float) (64 bytes)
+ // R11 --->
+ // + callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes)
+ // optional saved fp regs 16 * sizeof(float) (64 bytes)
// - lclSize
// incl. TEMPS MAX_SPILL_TEMP_SIZE
// + incl. outArgs
@@ -3925,11 +4014,13 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState)
}
#endif // _TARGET_ARMARCH_
-void Compiler::compFunctionTraceStart()
+void Compiler::compFunctionTraceStart()
{
#ifdef DEBUG
if (compIsForInlining())
+ {
return;
+ }
if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable)
{
@@ -3940,13 +4031,15 @@ void Compiler::compFunctionTraceStart()
}
for (LONG i = 0; i < newJitNestingLevel - 1; i++)
+ {
printf(" ");
+ }
printf("{ Start Jitting %s\n", info.compFullName); /* } editor brace matching workaround for this printf */
}
#endif // DEBUG
}
-void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI)
+void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI)
{
#ifdef DEBUG
assert(!compIsForInlining());
@@ -3960,31 +4053,28 @@ void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG me
}
for (LONG i = 0; i < newJitNestingLevel; i++)
+ {
printf(" ");
+ }
/* { editor brace-matching workaround for following printf */
- printf("} Jitted Entry %03x at" FMT_ADDR "method %s size %08x%s\n",
- Compiler::jitTotalMethodCompiled,
- DBG_ADDR(methodCodePtr),
- info.compFullName,
- methodCodeSize,
- isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""));
+ printf("} Jitted Entry %03x at" FMT_ADDR "method %s size %08x%s\n", Compiler::jitTotalMethodCompiled,
+ DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize,
+ isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : ""));
}
-#endif // DEBUG
+#endif // DEBUG
}
//*********************************************************************************************
// #Phases
-//
+//
// This is the most interesting 'toplevel' function in the JIT. It goes through the operations of
// importing, morphing, optimizations and code generation. This is called from the EE through the
-// code:CILJit::compileMethod function.
-//
+// code:CILJit::compileMethod function.
+//
// For an overview of the structure of the JIT, see:
// https://github.com/dotnet/coreclr/blob/master/Documentation/botr/ryujit-overview.md
-//
-void Compiler::compCompile(void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags)
+//
+void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, CORJIT_FLAGS* compileFlags)
{
if (compIsForInlining())
{
@@ -4001,7 +4091,7 @@ void Compiler::compCompile(void * * methodCodePtr,
if (info.compPublishStubParam)
{
assert(lvaStubArgumentVar == BAD_VAR_NUM);
- lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument"));
+ lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument"));
lvaTable[lvaStubArgumentVar].lvType = TYP_I_IMPL;
}
@@ -4024,15 +4114,17 @@ void Compiler::compCompile(void * * methodCodePtr,
if (compIsForInlining())
{
/* Quit inlining if fgImport() failed for any reason. */
-
+
if (compDonotInline())
+ {
return;
+ }
/* Filter out unimported BBs */
fgRemoveEmptyBlocks();
-
- return;
+
+ return;
}
assert(!compDonotInline());
@@ -4069,7 +4161,7 @@ void Compiler::compCompile(void * * methodCodePtr,
// Since we need a slots for security near ebp, its not possible
// to do this after an Edit without shifting all the locals.
// So we just always reserve space for these slots in case an Edit adds them
- opts.compNeedSecurityCheck = true;
+ opts.compNeedSecurityCheck = true;
// We don't care about localloc right now. If we do support it,
// EECodeManager::FixContextForEnC() needs to handle it smartly
@@ -4092,8 +4184,9 @@ void Compiler::compCompile(void * * methodCodePtr,
/* GS security checks for unsafe buffers */
if (getNeedsGSSecurityCookie())
{
-#ifdef DEBUG
- if (verbose) {
+#ifdef DEBUG
+ if (verbose)
+ {
printf("\n*************** -GS checks for unsafe buffers \n");
}
#endif
@@ -4105,8 +4198,9 @@ void Compiler::compCompile(void * * methodCodePtr,
gsCopyShadowParams();
}
-#ifdef DEBUG
- if (verbose) {
+#ifdef DEBUG
+ if (verbose)
+ {
fgDispBasicBlocks(true);
printf("\n");
}
@@ -4144,7 +4238,7 @@ void Compiler::compCompile(void * * methodCodePtr,
#endif // FEATURE_EH_FUNCLETS
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (!opts.MinOpts() && !opts.compDbgCode)
{
optOptimizeLayout();
EndPhase(PHASE_OPTIMIZE_LAYOUT);
@@ -4190,12 +4284,12 @@ void Compiler::compCompile(void * * methodCodePtr,
// IMPORTANT, after this point, every place where trees are modified or cloned
// the local variable reference counts must be updated
- // You can test the value of the following variable to see if
+ // You can test the value of the following variable to see if
// the local variable ref counts must be updated
//
assert(lvaLocalVarRefCounted == true);
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (!opts.MinOpts() && !opts.compDbgCode)
{
/* Optimize boolean conditions */
@@ -4224,15 +4318,15 @@ void Compiler::compCompile(void * * methodCodePtr,
// Now we have determined the order of evaluation and the gtCosts for every node.
// If verbose, dump the full set of trees here before the optimization phases mutate them
//
- if (verbose)
+ if (verbose)
{
- fgDispBasicBlocks(true); // 'true' will call fgDumpTrees() after dumping the BasicBlocks
+ fgDispBasicBlocks(true); // 'true' will call fgDumpTrees() after dumping the BasicBlocks
printf("\n");
}
#endif
// At this point we know if we are fully interruptible or not
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (!opts.MinOpts() && !opts.compDbgCode)
{
bool doSsa = true;
bool doEarlyProp = true;
@@ -4243,12 +4337,12 @@ void Compiler::compCompile(void * * methodCodePtr,
bool doRangeAnalysis = true;
#ifdef DEBUG
- doSsa = (JitConfig.JitDoSsa() != 0);
- doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0);
- doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0);
- doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0);
- doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0);
- doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0);
+ doSsa = (JitConfig.JitDoSsa() != 0);
+ doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0);
+ doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0);
+ doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0);
+ doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0);
+ doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0);
doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0);
#endif
@@ -4308,7 +4402,7 @@ void Compiler::compCompile(void * * methodCodePtr,
#endif // ASSERTION_PROP
/* update the flowgraph if we modified it during the optimization phase*/
- if (fgModified)
+ if (fgModified)
{
fgUpdateFlowGraph();
EndPhase(PHASE_UPDATE_FLOW_GRAPH);
@@ -4362,7 +4456,6 @@ void Compiler::compCompile(void * * methodCodePtr,
fgDebugCheckLinks();
#endif
-
/* Enable this to gather statistical data such as
* call and register argument info, flowgraph and loop info, etc. */
@@ -4386,8 +4479,8 @@ void Compiler::compCompile(void * * methodCodePtr,
#ifdef DEBUG
//
// Display the pre-regalloc frame offsets that we have tentatively decided upon
- //
- if (verbose)
+ //
+ if (verbose)
lvaTableDump();
#endif
#endif // _TARGET_ARMARCH_
@@ -4409,8 +4502,8 @@ void Compiler::compCompile(void * * methodCodePtr,
Lowering lower(this, m_pLinearScan); // PHASE_LOWERING
lower.Run();
- assert(lvaSortAgain == false); // We should have re-run fgLocalVarLiveness() in lower.Run()
- lvaTrackedFixed = true; // We can not add any new tracked variables after this point.
+ assert(lvaSortAgain == false); // We should have re-run fgLocalVarLiveness() in lower.Run()
+ lvaTrackedFixed = true; // We can not add any new tracked variables after this point.
/* Now that lowering is completed we can proceed to perform register allocation */
m_pLinearScan->doLinearScan();
@@ -4418,9 +4511,9 @@ void Compiler::compCompile(void * * methodCodePtr,
// Copied from rpPredictRegUse()
genFullPtrRegMap = (codeGen->genInterruptible || !codeGen->isFramePointerUsed());
-#else // LEGACY_BACKEND
-
- lvaTrackedFixed = true; // We cannot add any new tracked variables after this point.
+#else // LEGACY_BACKEND
+
+ lvaTrackedFixed = true; // We cannot add any new tracked variables after this point.
// For the classic JIT32 at this point lvaSortAgain can be set and raAssignVars() will call lvaSortOnly()
// Now do "classic" register allocation.
@@ -4437,7 +4530,7 @@ void Compiler::compCompile(void * * methodCodePtr,
codeGen->genGenerateCode(methodCodePtr, methodCodeSize);
#ifdef FEATURE_JIT_METHOD_PERF
- if (pCompJitTimer)
+ if (pCompJitTimer)
pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary);
#endif
@@ -4454,7 +4547,7 @@ void Compiler::compCompile(void * * methodCodePtr,
compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false);
#if FUNC_INFO_LOGGING
- if (compJitFuncInfoFile != NULL)
+ if (compJitFuncInfoFile != nullptr)
{
assert(!compIsForInlining());
#ifdef DEBUG // We only have access to info.compFullName in DEBUG builds.
@@ -4462,7 +4555,7 @@ void Compiler::compCompile(void * * methodCodePtr,
#elif FEATURE_SIMD
fprintf(compJitFuncInfoFile, " %s\n", eeGetMethodFullName(info.compMethodHnd));
#endif
- fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush
+ fprintf(compJitFuncInfoFile, ""); // in our logic this causes a flush
}
#endif // FUNC_INFO_LOGGING
}
@@ -4476,35 +4569,37 @@ void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo)
// Check if we need to add the Quirk for the PPP backward compat issue.
// This Quirk addresses a compatibility issue between the new RyuJit and the previous JIT64.
// A backward compatibity issue called 'PPP' exists where a PInvoke call passes a 32-byte struct
-// into a native API which basically writes 48 bytes of data into the struct.
+// into a native API which basically writes 48 bytes of data into the struct.
// With the stack frame layout used by the RyuJIT the extra 16 bytes written corrupts a
// caller saved register and this leads to an A/V in the calling method.
// The older JIT64 jit compiler just happened to have a different stack layout and/or
-// caller saved register set so that it didn't hit the A/V in the caller.
+// caller saved register set so that it didn't hit the A/V in the caller.
// By increasing the amount of stack allocted for the struct by 32 bytes we can fix this.
//
// Return true if we actually perform the Quirk, otherwise return false
//
-bool Compiler::compQuirkForPPP()
+bool Compiler::compQuirkForPPP()
{
- if (lvaCount != 2) // We require that there are exactly two locals
+ if (lvaCount != 2)
+ { // We require that there are exactly two locals
return false;
+ }
- if (compTailCallUsed) // Don't try this quirk if a tail call was used
+ if (compTailCallUsed)
+ { // Don't try this quirk if a tail call was used
return false;
+ }
- bool hasOutArgs = false;
- LclVarDsc * varDscExposedStruct = nullptr;
+ bool hasOutArgs = false;
+ LclVarDsc* varDscExposedStruct = nullptr;
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
/* Look for struct locals that are address taken */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++, varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- if (varDsc->lvIsParam) // It can't be a parameter
+ if (varDsc->lvIsParam) // It can't be a parameter
{
continue;
}
@@ -4512,14 +4607,12 @@ bool Compiler::compQuirkForPPP()
// We require that the OutgoingArg space lclVar exists
if (lclNum == lvaOutgoingArgSpaceVar)
{
- hasOutArgs = true; // Record that we saw it
+ hasOutArgs = true; // Record that we saw it
continue;
}
-
+
// Look for a 32-byte address exposed Struct and record its varDsc
- if ((varDsc->TypeGet() == TYP_STRUCT) &&
- varDsc->lvAddrExposed &&
- (varDsc->lvExactSize == 32) )
+ if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvAddrExposed && (varDsc->lvExactSize == 32))
{
varDscExposedStruct = varDsc;
}
@@ -4551,7 +4644,7 @@ bool Compiler::compQuirkForPPP()
/*****************************************************************************/
#ifdef DEBUG
-void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging
+void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging
bool Compiler::skipMethod()
{
@@ -4566,13 +4659,20 @@ bool Compiler::skipMethod()
// passed to ConfigMethodRange represents the set of all methods.
if (!fJitRange.Contains(info.compCompHnd, info.compMethodHnd))
+ {
return true;
+ }
if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
return true;
+ }
- if (!JitConfig.JitInclude().isEmpty() && !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ if (!JitConfig.JitInclude().isEmpty() &&
+ !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args))
+ {
return true;
+ }
return false;
}
@@ -4581,24 +4681,23 @@ bool Compiler::skipMethod()
/*****************************************************************************/
-int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_MODULE_HANDLE classPtr,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO * methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags)
+int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
+ CORINFO_MODULE_HANDLE classPtr,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags)
{
#ifdef FEATURE_JIT_METHOD_PERF
static bool checkedForJitTimeLog = false;
if (!checkedForJitTimeLog)
{
- // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for
- // retail builds. Do not call the regular Config helper here as it would pull
+ // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for
+ // retail builds. Do not call the regular Config helper here as it would pull
// in a copy of the config parser into the clrjit.dll.
- InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename,
- compHnd->getJitTimeLogFilename(), NULL);
+ InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, compHnd->getJitTimeLogFilename(), NULL);
// At a process or module boundary clear the file and start afresh.
JitTimer::PrintCsvHeader();
@@ -4616,14 +4715,14 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
#endif // FEATURE_JIT_METHOD_PERF
#ifdef DEBUG
- Compiler* me = this;
- forceFrameJIT = (void*) &me; // let us see the this pointer in fastchecked build
+ Compiler* me = this;
+ forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build
// set this early so we can use it without relying on random memory values
- verbose = compIsForInlining()?impInlineInfo->InlinerCompiler->verbose:false;
+ verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false;
this->dumpIR = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIR : false;
- this->dumpIRPhase = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRPhase : NULL;
- this->dumpIRFormat = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRFormat : NULL;
+ this->dumpIRPhase = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRPhase : nullptr;
+ this->dumpIRFormat = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRFormat : nullptr;
this->dumpIRTypes = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRTypes : false;
this->dumpIRLocals = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRLocals : false;
this->dumpIRRegs = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRRegs : false;
@@ -4632,7 +4731,7 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
this->dumpIRCosts = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRCosts : false;
this->dumpIRFlags = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRFlags : false;
this->dumpIRKinds = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRKinds : false;
- this->dumpIRNodes = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNodes: false;
+ this->dumpIRNodes = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNodes : false;
this->dumpIRNoLists = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoLists : false;
this->dumpIRNoLeafs = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoLeafs : false;
this->dumpIRNoStmts = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoStmts : false;
@@ -4651,14 +4750,13 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
#if FUNC_INFO_LOGGING
LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile();
- if (tmpJitFuncInfoFilename != NULL)
+ if (tmpJitFuncInfoFilename != nullptr)
{
- LPCWSTR oldFuncInfoFileName = InterlockedCompareExchangeT(&compJitFuncInfoFilename,
- tmpJitFuncInfoFilename,
- NULL);
- if (oldFuncInfoFileName == NULL)
+ LPCWSTR oldFuncInfoFileName =
+ InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL);
+ if (oldFuncInfoFileName == nullptr)
{
- assert(compJitFuncInfoFile == NULL);
+ assert(compJitFuncInfoFile == nullptr);
compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a"));
}
}
@@ -4666,9 +4764,9 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
// if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0);
- info.compCompHnd = compHnd;
- info.compMethodHnd = methodHnd;
- info.compMethodInfo = methodInfo;
+ info.compCompHnd = compHnd;
+ info.compMethodHnd = methodHnd;
+ info.compMethodInfo = methodInfo;
// Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM
// with an ARM-targeting "altjit").
@@ -4682,45 +4780,45 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
#if COR_JIT_EE_VERSION > 460
compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject;
-#else // COR_JIT_EE_VERSION <= 460
+#else // COR_JIT_EE_VERSION <= 460
compMaxUncheckedOffsetForNullObject = MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT;
#endif // COR_JIT_EE_VERSION > 460
// Set the context for token lookup.
if (compIsForInlining())
- {
+ {
impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle;
assert(impInlineInfo->inlineCandidateInfo->clsHandle == compHnd->getMethodClass(methodHnd));
info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle;
- assert(impInlineInfo->inlineCandidateInfo->clsAttr ==
- info.compCompHnd->getClassAttribs(info.compClassHnd));
- // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, info.compCompHnd->getClassAttribs(info.compClassHnd));
+ assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd));
+ // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr,
+ // info.compCompHnd->getClassAttribs(info.compClassHnd));
info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr;
}
else
- {
+ {
impTokenLookupContextHandle = MAKE_METHODCONTEXT(info.compMethodHnd);
- info.compClassHnd = compHnd->getMethodClass(methodHnd);
- info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd);
+ info.compClassHnd = compHnd->getMethodClass(methodHnd);
+ info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd);
}
info.compProfilerCallback = false; // Assume false until we are told to hook this method.
#if defined(DEBUG) || defined(LATE_DISASM)
- const char * classNamePtr;
+ const char* classNamePtr;
- info.compMethodName = eeGetMethodName(methodHnd, &classNamePtr);
- unsigned len = (unsigned)roundUp(strlen(classNamePtr)+1);
- info.compClassName = (char *)compGetMem(len, CMK_DebugOnly);
- strcpy_s((char *)info.compClassName, len, classNamePtr);
+ info.compMethodName = eeGetMethodName(methodHnd, &classNamePtr);
+ unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1);
+ info.compClassName = (char*)compGetMem(len, CMK_DebugOnly);
+ strcpy_s((char*)info.compClassName, len, classNamePtr);
- info.compFullName = eeGetMethodFullName(methodHnd);
+ info.compFullName = eeGetMethodFullName(methodHnd);
#endif // defined(DEBUG) || defined(LATE_DISASM)
-#ifdef DEBUG
+#ifdef DEBUG
if (!compIsForInlining())
{
JitTls::GetLogEnv()->setCompiler(this);
@@ -4771,106 +4869,103 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
switch (canSkipVerificationResult)
{
- case CORINFO_VERIFICATION_CANNOT_SKIP:
- // We cannot verify concrete instantiation.
- // We can only verify the typical/open instantiation
- // The VM should throw a VerificationException instead of allowing this.
- NO_WAY("Verification of closed instantiations is not supported");
- break;
+ case CORINFO_VERIFICATION_CANNOT_SKIP:
+ // We cannot verify concrete instantiation.
+ // We can only verify the typical/open instantiation
+ // The VM should throw a VerificationException instead of allowing this.
+ NO_WAY("Verification of closed instantiations is not supported");
+ break;
- case CORINFO_VERIFICATION_CAN_SKIP:
- // The VM should first verify the open instantiation. If unverifiable code
- // is detected, it should pass in CORJIT_FLG_SKIP_VERIFICATION.
- assert(!"The VM should have used CORJIT_FLG_SKIP_VERIFICATION");
- tiVerificationNeeded = false;
- break;
+ case CORINFO_VERIFICATION_CAN_SKIP:
+ // The VM should first verify the open instantiation. If unverifiable code
+ // is detected, it should pass in CORJIT_FLG_SKIP_VERIFICATION.
+ assert(!"The VM should have used CORJIT_FLG_SKIP_VERIFICATION");
+ tiVerificationNeeded = false;
+ break;
- case CORINFO_VERIFICATION_RUNTIME_CHECK:
- // This is a concrete generic instantiation with unverifiable code, that also
- // needs a runtime callout.
- tiVerificationNeeded = false;
- tiRuntimeCalloutNeeded = true;
- break;
+ case CORINFO_VERIFICATION_RUNTIME_CHECK:
+ // This is a concrete generic instantiation with unverifiable code, that also
+ // needs a runtime callout.
+ tiVerificationNeeded = false;
+ tiRuntimeCalloutNeeded = true;
+ break;
- case CORINFO_VERIFICATION_DONT_JIT:
- // We cannot verify concrete instantiation.
- // We can only verify the typical/open instantiation
- // The VM should throw a VerificationException instead of allowing this.
- BADCODE("NGEN of unverifiable transparent code is not supported");
- break;
+ case CORINFO_VERIFICATION_DONT_JIT:
+ // We cannot verify concrete instantiation.
+ // We can only verify the typical/open instantiation
+ // The VM should throw a VerificationException instead of allowing this.
+ BADCODE("NGEN of unverifiable transparent code is not supported");
+ break;
}
}
// load any constraints for verification, noting any cycles to be rejected by the verifying importer
if (tiVerificationNeeded)
{
- compHnd->initConstraintsForVerification(methodHnd,
- &info.hasCircularClassConstraints,
+ compHnd->initConstraintsForVerification(methodHnd, &info.hasCircularClassConstraints,
&info.hasCircularMethodConstraints);
}
}
-
+
/* Setup an error trap */
struct Param
{
- Compiler *pThis;
+ Compiler* pThis;
CORINFO_MODULE_HANDLE classPtr;
COMP_HANDLE compHnd;
- CORINFO_METHOD_INFO * methodInfo;
- void * * methodCodePtr;
- ULONG * methodCodeSize;
- CORJIT_FLAGS * compileFlags;
+ CORINFO_METHOD_INFO* methodInfo;
+ void** methodCodePtr;
+ ULONG* methodCodeSize;
+ CORJIT_FLAGS* compileFlags;
CorInfoInstantiationVerification instVerInfo;
- int result;
+ int result;
} param;
- param.pThis = this;
- param.classPtr = classPtr;
- param.compHnd = compHnd;
- param.methodInfo = methodInfo;
- param.methodCodePtr = methodCodePtr;
+ param.pThis = this;
+ param.classPtr = classPtr;
+ param.compHnd = compHnd;
+ param.methodInfo = methodInfo;
+ param.methodCodePtr = methodCodePtr;
param.methodCodeSize = methodCodeSize;
- param.compileFlags = compileFlags;
- param.instVerInfo = instVerInfo;
- param.result = CORJIT_INTERNALERROR;
+ param.compileFlags = compileFlags;
+ param.instVerInfo = instVerInfo;
+ param.result = CORJIT_INTERNALERROR;
- setErrorTrap(compHnd, Param *, pParam, &param) // ERROR TRAP: Start normal block
+ setErrorTrap(compHnd, Param*, pParam, &param) // ERROR TRAP: Start normal block
{
- pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr,
- pParam->compHnd,
- pParam->methodInfo,
- pParam->methodCodePtr,
- pParam->methodCodeSize,
- pParam->compileFlags,
- pParam->instVerInfo);
+ pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo,
+ pParam->methodCodePtr, pParam->methodCodeSize,
+ pParam->compileFlags, pParam->instVerInfo);
}
- finallyErrorTrap() // ERROR TRAP: The following block handles errors
+ finallyErrorTrap() // ERROR TRAP: The following block handles errors
{
/* Cleanup */
if (compIsForInlining())
+ {
goto DoneCleanUp;
+ }
/* Tell the emitter that we're done with this function */
genEmitter->emitEndCG();
-DoneCleanUp:
+ DoneCleanUp:
compDone();
}
- endErrorTrap() // ERROR TRAP: End
+ endErrorTrap() // ERROR TRAP: End
- return param.result;
+ return param.result;
}
#if defined(DEBUG) || defined(INLINE_DATA)
-unsigned Compiler::Info::compMethodHash() const
+unsigned Compiler::Info::compMethodHash() const
{
if (compMethodHashPrivate == 0)
{
- compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd);
+ compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd);
}
return compMethodHashPrivate;
}
@@ -4900,8 +4995,7 @@ void Compiler::compCompileFinish()
#ifdef DEBUG
if (s_dspMemStats || verbose)
{
- printf("\nAllocations for %s (MethodHash=%08x)\n",
- info.compFullName, info.compMethodHash());
+ printf("\nAllocations for %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash());
genMemStats.Print(jitstdout);
}
#endif // DEBUG
@@ -4920,18 +5014,19 @@ void Compiler::compCompileFinish()
// Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else
// we should bump up ArenaAllocator::getDefaultPageSize()
- if ((info.compILCodeSize <= 32) && // Is it a reasonably small method?
- (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge struct
- (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded?
- // Small methods cannot meaningfully have a big number of locals
- // or arguments. We always track arguments at the start of
- // the prolog which requires memory
- (info.compLocalsCount <= 32) &&
- (!opts.MinOpts()) && // We may have too many local variables, etc
- (getJitStressLevel() == 0) && // We need extra memory for stress
- !compAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for DirectAlloc
+ if ((info.compILCodeSize <= 32) && // Is it a reasonably small method?
+ (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge
+ // struct
+ (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded?
+ // Small methods cannot meaningfully have a big number of locals
+ // or arguments. We always track arguments at the start of
+ // the prolog which requires memory
+ (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc
+ (getJitStressLevel() == 0) && // We need extra memory for stress
+ !compAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for
+ // DirectAlloc
(compAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) &&
- // Factor of 2x is because data-structures are bigger under DEBUG
+// Factor of 2x is because data-structures are bigger under DEBUG
#ifndef LEGACY_BACKEND
// RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete.
(compAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) &&
@@ -4943,8 +5038,7 @@ void Compiler::compCompileFinish()
// Less than 1% of all methods should run into this.
// We cannot be more strict as there are always degenerate cases where we
// would need extra memory (like huge structs as locals - see lvaSetStruct()).
- assert((genMethodCnt < 500) ||
- (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt/100)));
+ assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100)));
}
#endif // DEBUG
@@ -4962,8 +5056,7 @@ void Compiler::compCompileFinish()
mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd);
unsigned profCallCount = 0;
- if (((opts.eeFlags & CORJIT_FLG_BBOPT) != 0) &&
- fgHaveProfileData())
+ if (((opts.eeFlags & CORJIT_FLG_BBOPT) != 0) && fgHaveProfileData())
{
assert(fgProfileBuffer[0].ILOffset == 0);
profCallCount = fgProfileBuffer[0].ExecutionCount;
@@ -4985,50 +5078,81 @@ void Compiler::compCompileFinish()
CorInfoRegionKind regionKind = info.compMethodInfo->regionKind;
-
if (opts.altJit)
+ {
printf("ALT | ");
+ }
else if (fgHaveProfileData())
+ {
printf("PRF | ");
+ }
else
+ {
printf(" | ");
+ }
if (regionKind == CORINFO_REGION_NONE)
+ {
printf(" | ");
+ }
else if (regionKind == CORINFO_REGION_HOT)
+ {
printf(" HOT | ");
+ }
else if (regionKind == CORINFO_REGION_COLD)
+ {
printf("COLD | ");
+ }
else if (regionKind == CORINFO_REGION_JIT)
+ {
printf(" JIT | ");
+ }
else
+ {
printf("UNKN | ");
+ }
+
+ printf("%8d | ", profCallCount);
- printf("%8d | ", profCallCount);
-
if (compHndBBtabCount > 0)
+ {
printf("EH | ");
+ }
else
+ {
printf(" | ");
+ }
if (rpFrameType == FT_EBP_FRAME)
+ {
printf("%3s | ", STR_FPBASE);
+ }
else if (rpFrameType == FT_ESP_FRAME)
- printf("%3s | ", STR_SPBASE );
+ {
+ printf("%3s | ", STR_SPBASE);
+ }
#if DOUBLE_ALIGN
else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME)
+ {
printf("dbl | ");
+ }
#endif
- else // (rpFrameType == FT_NOT_SET)
+ else
+ { // (rpFrameType == FT_NOT_SET)
printf("??? | ");
-
+ }
+
if (fgHasLoops)
+ {
printf("LOOP |");
+ }
else
+ {
printf(" |");
-
- printf(" %3d |", optCallCount);
- printf(" %3d |", optIndirectCallCount);
+ }
+
+ printf(" %3d |", optCallCount);
+ printf(" %3d |", optIndirectCallCount);
printf(" %3d |", fgBBcountAtCodegen);
printf(" %3d |", lvaCount);
@@ -5049,24 +5173,24 @@ void Compiler::compCompileFinish()
#ifndef LEGACY_BACKEND
printf(" LSRA |"); // TODO-Cleanup: dump some interesting LSRA stat into the order file?
#else // LEGACY_BACKEND
- printf("%s%4d p%1d |", (tmpCount>0)? "T" : " ", rpStkPredict/BB_UNITY_WEIGHT, rpPasses);
+ printf("%s%4d p%1d |", (tmpCount > 0) ? "T" : " ", rpStkPredict / BB_UNITY_WEIGHT, rpPasses);
#endif // LEGACY_BACKEND
printf(" %4d |", info.compMethodInfo->ILCodeSize);
printf(" %5d |", info.compTotalHotCodeSize);
printf(" %5d |", info.compTotalColdCodeSize);
-
+
printf(" %s\n", eeGetMethodFullName(info.compMethodHnd));
- printf(""); // in our logic this causes a flush
+ printf(""); // in our logic this causes a flush
}
if (verbose)
{
printf("****** DONE compiling %s\n", info.compFullName);
- printf(""); // in our logic this causes a flush
+ printf(""); // in our logic this causes a flush
}
// Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing
- // For ngen the int3 or breakpoint instruction will be right at the
+ // For ngen the int3 or breakpoint instruction will be right at the
// start of the ngen method and we will stop when we execute it.
//
if ((opts.eeFlags & CORJIT_FLG_PREJIT) == 0)
@@ -5089,45 +5213,61 @@ void Compiler::compCompileFinish()
#ifdef PSEUDORANDOM_NOP_INSERTION
// this is zlib adler32 checksum. source came from windows base
-#define BASE 65521L // largest prime smaller than 65536
-#define NMAX 5552
-// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
-
-#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
-#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
-#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
-#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
-#define DO16(buf) DO8(buf,0); DO8(buf,8);
-
-unsigned adler32(unsigned adler, char *buf, unsigned int len)
-{
- unsigned int s1 = adler & 0xffff;
- unsigned int s2 = (adler >> 16) & 0xffff;
- int k;
-
- if (buf == NULL) return 1L;
-
- while (len > 0) {
- k = len < NMAX ? len : NMAX;
- len -= k;
- while (k >= 16) {
- DO16(buf);
- buf += 16;
- k -= 16;
- }
- if (k != 0) do {
- s1 += *buf++;
- s2 += s1;
- } while (--k);
- s1 %= BASE;
- s2 %= BASE;
- }
- return (s2 << 16) | s1;
-}
-#endif
+#define BASE 65521L // largest prime smaller than 65536
+#define NMAX 5552
+// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
+
+#define DO1(buf, i) \
+ { \
+ s1 += buf[i]; \
+ s2 += s1; \
+ }
+#define DO2(buf, i) \
+ DO1(buf, i); \
+ DO1(buf, i + 1);
+#define DO4(buf, i) \
+ DO2(buf, i); \
+ DO2(buf, i + 2);
+#define DO8(buf, i) \
+ DO4(buf, i); \
+ DO4(buf, i + 4);
+#define DO16(buf) \
+ DO8(buf, 0); \
+ DO8(buf, 8);
+
+unsigned adler32(unsigned adler, char* buf, unsigned int len)
+{
+ unsigned int s1 = adler & 0xffff;
+ unsigned int s2 = (adler >> 16) & 0xffff;
+ int k;
+ if (buf == NULL)
+ return 1L;
+
+ while (len > 0)
+ {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16)
+ {
+ DO16(buf);
+ buf += 16;
+ k -= 16;
+ }
+ if (k != 0)
+ do
+ {
+ s1 += *buf++;
+ s2 += s1;
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
+#endif
-unsigned getMethodBodyChecksum(__in_z char *code, int size)
+unsigned getMethodBodyChecksum(__in_z char* code, int size)
{
#ifdef PSEUDORANDOM_NOP_INSERTION
return adler32(0, code, size);
@@ -5136,363 +5276,359 @@ unsigned getMethodBodyChecksum(__in_z char *code, int size)
#endif
}
+int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags,
+ CorInfoInstantiationVerification instVerInfo)
+{
+ CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd;
-int Compiler::compCompileHelper (CORINFO_MODULE_HANDLE classPtr,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO * methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags,
- CorInfoInstantiationVerification instVerInfo)
- {
- CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd;
-
- info.compCode = methodInfo->ILCode;
- info.compILCodeSize = methodInfo->ILCodeSize;
+ info.compCode = methodInfo->ILCode;
+ info.compILCodeSize = methodInfo->ILCodeSize;
- if (info.compILCodeSize == 0)
- BADCODE("code size is zero");
+ if (info.compILCodeSize == 0)
+ {
+ BADCODE("code size is zero");
+ }
- if (compIsForInlining())
- {
+ if (compIsForInlining())
+ {
#ifdef DEBUG
- unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr;
- unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd);
- unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE;
- assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore)));
+ unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr;
+ unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd);
+ unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE;
+ assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore)));
#endif
-
- info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr;
- }
- else
- {
- info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd);
+
+ info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr;
+ }
+ else
+ {
+ info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd);
#ifdef PSEUDORANDOM_NOP_INSERTION
- info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize);
+ info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize);
#endif
- }
+ }
+
+ // compInitOptions will set the correct verbose flag.
- // compInitOptions will set the correct verbose flag.
-
- compInitOptions(compileFlags);
+ compInitOptions(compileFlags);
#ifdef ALT_JIT
- if (!compIsForInlining() && !opts.altJit)
- {
- // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method,
- // so skip it.
- return CORJIT_SKIPPED;
- }
+ if (!compIsForInlining() && !opts.altJit)
+ {
+ // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method,
+ // so skip it.
+ return CORJIT_SKIPPED;
+ }
#endif // ALT_JIT
#ifdef DEBUG
- if (verbose)
- {
- printf("IL to import:\n");
- dumpILRange(info.compCode, info.compILCodeSize);
- }
+ if (verbose)
+ {
+ printf("IL to import:\n");
+ dumpILRange(info.compCode, info.compILCodeSize);
+ }
#endif
- // Check for COMPlus_AgressiveInlining
- if (JitConfig.JitAggressiveInlining())
- {
- compDoAggressiveInlining = true;
- }
-
- if (compDoAggressiveInlining)
- {
- info.compFlags |= CORINFO_FLG_FORCEINLINE;
- }
+ // Check for COMPlus_AgressiveInlining
+ if (JitConfig.JitAggressiveInlining())
+ {
+ compDoAggressiveInlining = true;
+ }
+
+ if (compDoAggressiveInlining)
+ {
+ info.compFlags |= CORINFO_FLG_FORCEINLINE;
+ }
#ifdef DEBUG
- // Check for ForceInline stress.
- if (compStressCompile(STRESS_FORCE_INLINE, 0))
- {
- info.compFlags |= CORINFO_FLG_FORCEINLINE;
- }
+ // Check for ForceInline stress.
+ if (compStressCompile(STRESS_FORCE_INLINE, 0))
+ {
+ info.compFlags |= CORINFO_FLG_FORCEINLINE;
+ }
- if (compIsForInlining())
- {
- JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n",
- eeGetMethodFullName(info.compMethodHnd),
- dspPtr(impTokenLookupContextHandle)));
- }
+ if (compIsForInlining())
+ {
+ JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n",
+ eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle)));
+ }
- // Force verification if asked to do so
- if (JitConfig.JitForceVer())
- tiVerificationNeeded = (instVerInfo == INSTVER_NOT_INSTANTIATION);
+ // Force verification if asked to do so
+ if (JitConfig.JitForceVer())
+ {
+ tiVerificationNeeded = (instVerInfo == INSTVER_NOT_INSTANTIATION);
+ }
- if (tiVerificationNeeded)
- {
- JITLOG((LL_INFO10000, "tiVerificationNeeded initially set to true for %s\n", info.compFullName));
- }
+ if (tiVerificationNeeded)
+ {
+ JITLOG((LL_INFO10000, "tiVerificationNeeded initially set to true for %s\n", info.compFullName));
+ }
#endif // DEBUG
- /* Since tiVerificationNeeded can be turned off in the middle of
- compiling a method, and it might have caused blocks to be queued up
- for reimporting, impCanReimport can be used to check for reimporting. */
+ /* Since tiVerificationNeeded can be turned off in the middle of
+ compiling a method, and it might have caused blocks to be queued up
+ for reimporting, impCanReimport can be used to check for reimporting. */
- impCanReimport = (tiVerificationNeeded || compStressCompile(STRESS_CHK_REIMPORT, 15));
-
- // Need security prolog/epilog callouts when there is a declarative security in the method.
- tiSecurityCalloutNeeded = ((info.compFlags & CORINFO_FLG_NOSECURITYWRAP) == 0);
+ impCanReimport = (tiVerificationNeeded || compStressCompile(STRESS_CHK_REIMPORT, 15));
- if (tiSecurityCalloutNeeded || (info.compFlags & CORINFO_FLG_SECURITYCHECK))
- {
- // We need to allocate the security object on the stack
- // when the method being compiled has a declarative security
- // (i.e. when CORINFO_FLG_NOSECURITYWRAP is reset for the current method).
- // This is also the case when we inject a prolog and epilog in the method.
- opts.compNeedSecurityCheck = true;
- }
+ // Need security prolog/epilog callouts when there is a declarative security in the method.
+ tiSecurityCalloutNeeded = ((info.compFlags & CORINFO_FLG_NOSECURITYWRAP) == 0);
+
+ if (tiSecurityCalloutNeeded || (info.compFlags & CORINFO_FLG_SECURITYCHECK))
+ {
+ // We need to allocate the security object on the stack
+ // when the method being compiled has a declarative security
+ // (i.e. when CORINFO_FLG_NOSECURITYWRAP is reset for the current method).
+ // This is also the case when we inject a prolog and epilog in the method.
+ opts.compNeedSecurityCheck = true;
+ }
- /* Initialize set a bunch of global values */
+ /* Initialize set a bunch of global values */
- info.compScopeHnd = classPtr;
- info.compXcptnsCount = methodInfo->EHcount;
- info.compMaxStack = methodInfo->maxStack;
- compHndBBtab = NULL;
- compHndBBtabCount = 0;
- compHndBBtabAllocCount = 0;
+ info.compScopeHnd = classPtr;
+ info.compXcptnsCount = methodInfo->EHcount;
+ info.compMaxStack = methodInfo->maxStack;
+ compHndBBtab = nullptr;
+ compHndBBtabCount = 0;
+ compHndBBtabAllocCount = 0;
- info.compNativeCodeSize = 0;
- info.compTotalHotCodeSize = 0;
- info.compTotalColdCodeSize = 0;
+ info.compNativeCodeSize = 0;
+ info.compTotalHotCodeSize = 0;
+ info.compTotalColdCodeSize = 0;
-#ifdef DEBUG
- compCurBB = 0;
- lvaTable = 0;
+#ifdef DEBUG
+ compCurBB = nullptr;
+ lvaTable = nullptr;
- // Reset node ID counter
- compGenTreeID = 0;
+ // Reset node ID counter
+ compGenTreeID = 0;
#endif
- /* Initialize emitter */
+ /* Initialize emitter */
- if (!compIsForInlining())
- {
- codeGen->getEmitter()->emitBegCG(this, compHnd);
- }
-
- info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
+ if (!compIsForInlining())
+ {
+ codeGen->getEmitter()->emitBegCG(this, compHnd);
+ }
- info.compIsContextful = (info.compClassAttr & CORINFO_FLG_CONTEXTFUL) != 0;
+ info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
- info.compPublishStubParam = (opts.eeFlags & CORJIT_FLG_PUBLISH_SECRET_PARAM) != 0;
+ info.compIsContextful = (info.compClassAttr & CORINFO_FLG_CONTEXTFUL) != 0;
- switch (methodInfo->args.getCallConv())
- {
+ info.compPublishStubParam = (opts.eeFlags & CORJIT_FLG_PUBLISH_SECRET_PARAM) != 0;
+
+ switch (methodInfo->args.getCallConv())
+ {
case CORINFO_CALLCONV_VARARG:
case CORINFO_CALLCONV_NATIVEVARARG:
- info.compIsVarArgs = true;
+ info.compIsVarArgs = true;
break;
case CORINFO_CALLCONV_DEFAULT:
- info.compIsVarArgs = false;
+ info.compIsVarArgs = false;
break;
default:
BADCODE("bad calling convention");
- }
- info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType);
+ }
+ info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType);
- info.compCallUnmanaged = 0;
- info.compLvFrameListRoot = BAD_VAR_NUM;
+ info.compCallUnmanaged = 0;
+ info.compLvFrameListRoot = BAD_VAR_NUM;
#if FEATURE_FIXED_OUT_ARGS
- lvaOutgoingArgSpaceSize = 0;
+ lvaOutgoingArgSpaceSize = 0;
#endif
- lvaGenericsContextUsed = false;
+ lvaGenericsContextUsed = false;
- info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0);
-
- /* Allocate the local variable table */
+ info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0);
- lvaInitTypeRef();
+ /* Allocate the local variable table */
- if (!compIsForInlining())
- {
- compInitDebuggingInfo();
- }
+ lvaInitTypeRef();
- const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE);
+ if (!compIsForInlining())
+ {
+ compInitDebuggingInfo();
+ }
- if (!compIsForInlining() && (opts.eeFlags & CORJIT_FLG_PREJIT))
- {
- // We're prejitting the root method. We also will analyze it as
- // a potential inline candidate.
- InlineResult prejitResult(this, methodHnd, "prejit");
+ const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE);
- // Do the initial inline screen.
- impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult);
+ if (!compIsForInlining() && (opts.eeFlags & CORJIT_FLG_PREJIT))
+ {
+ // We're prejitting the root method. We also will analyze it as
+ // a potential inline candidate.
+ InlineResult prejitResult(this, methodHnd, "prejit");
- // Temporarily install the prejitResult as the
- // compInlineResult so it's available to fgFindJumpTargets
- // and can accumulate more observations as the IL is
- // scanned.
- //
- // We don't pass prejitResult in as a parameter to avoid
- // potential aliasing confusion -- the other call to
- // fgFindBasicBlocks may have set up compInlineResult and
- // the code in fgFindJumpTargets references that data
- // member extensively.
- assert(compInlineResult == nullptr);
- assert(impInlineInfo == nullptr);
- compInlineResult = &prejitResult;
-
- // Find the basic blocks. We must do this regardless of
- // inlineability, since we are prejitting this method.
- //
- // This will also update the status of this method as
- // an inline candidate.
- fgFindBasicBlocks();
+ // Do the initial inline screen.
+ impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult);
- // Undo the temporary setup.
- assert(compInlineResult == &prejitResult);
- compInlineResult = nullptr;
+ // Temporarily install the prejitResult as the
+ // compInlineResult so it's available to fgFindJumpTargets
+ // and can accumulate more observations as the IL is
+ // scanned.
+ //
+ // We don't pass prejitResult in as a parameter to avoid
+ // potential aliasing confusion -- the other call to
+ // fgFindBasicBlocks may have set up compInlineResult and
+ // the code in fgFindJumpTargets references that data
+ // member extensively.
+ assert(compInlineResult == nullptr);
+ assert(impInlineInfo == nullptr);
+ compInlineResult = &prejitResult;
+
+ // Find the basic blocks. We must do this regardless of
+ // inlineability, since we are prejitting this method.
+ //
+ // This will also update the status of this method as
+ // an inline candidate.
+ fgFindBasicBlocks();
- // If still a viable, discretionary inline, assess
- // profitability.
- if (prejitResult.IsDiscretionaryCandidate())
- {
- prejitResult.DetermineProfitability(methodInfo);
- }
+ // Undo the temporary setup.
+ assert(compInlineResult == &prejitResult);
+ compInlineResult = nullptr;
- // Handle the results of the inline analysis.
- if (prejitResult.IsFailure())
- {
- // This method is a bad inlinee according to our
- // analysis. We will let the InlineResult destructor
- // mark it as noinline in the prejit image to save the
- // jit some work.
- //
- // This decision better not be context-dependent.
- assert(prejitResult.IsNever());
- }
- else
- {
- // This looks like a viable inline candidate. Since
- // we're not actually inlining, don't report anything.
- prejitResult.SetReported();
- }
- }
- else
+ // If still a viable, discretionary inline, assess
+ // profitability.
+ if (prejitResult.IsDiscretionaryCandidate())
{
- // We are jitting the root method, or inlining.
- fgFindBasicBlocks();
+ prejitResult.DetermineProfitability(methodInfo);
}
- // If we're inlining and the candidate is bad, bail out.
- if (compDonotInline())
+ // Handle the results of the inline analysis.
+ if (prejitResult.IsFailure())
{
- goto _Next;
- }
+ // This method is a bad inlinee according to our
+ // analysis. We will let the InlineResult destructor
+ // mark it as noinline in the prejit image to save the
+ // jit some work.
+ //
+ // This decision better not be context-dependent.
+ assert(prejitResult.IsNever());
+ }
+ else
+ {
+ // This looks like a viable inline candidate. Since
+ // we're not actually inlining, don't report anything.
+ prejitResult.SetReported();
+ }
+ }
+ else
+ {
+ // We are jitting the root method, or inlining.
+ fgFindBasicBlocks();
+ }
- compSetOptimizationLevel();
+ // If we're inlining and the candidate is bad, bail out.
+ if (compDonotInline())
+ {
+ goto _Next;
+ }
+
+ compSetOptimizationLevel();
#if COUNT_BASIC_BLOCKS
- bbCntTable.record(fgBBcount);
+ bbCntTable.record(fgBBcount);
- if (fgBBcount == 1)
- {
- bbOneBBSizeTable.record(methodInfo->ILCodeSize);
- }
+ if (fgBBcount == 1)
+ {
+ bbOneBBSizeTable.record(methodInfo->ILCodeSize);
+ }
#endif // COUNT_BASIC_BLOCKS
-#ifdef DEBUG
- if (verbose)
- {
- printf("Basic block list for '%s'\n", info.compFullName);
- fgDispBasicBlocks();
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Basic block list for '%s'\n", info.compFullName);
+ fgDispBasicBlocks();
+ }
#endif
-#ifdef DEBUG
- /* Give the function a unique number */
+#ifdef DEBUG
+ /* Give the function a unique number */
- if (opts.disAsm || opts.dspEmit || verbose)
- {
- s_compMethodsCount = ~info.compMethodHash() & 0xffff;
- }
- else
- {
- s_compMethodsCount++;
- }
+ if (opts.disAsm || opts.dspEmit || verbose)
+ {
+ s_compMethodsCount = ~info.compMethodHash() & 0xffff;
+ }
+ else
+ {
+ s_compMethodsCount++;
+ }
#endif
- if (compIsForInlining())
- {
- compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount);
-
- if (compInlineResult->IsFailure())
- {
- goto _Next;
- }
- }
+ if (compIsForInlining())
+ {
+ compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount);
-#ifdef DEBUG
- if (JitConfig.DumpJittedMethods() == 1 && !compIsForInlining())
- {
- printf("Compiling %4d %s::%s, IL size = %u, hsh=0x%x\n",
- Compiler::jitTotalMethodCompiled,
- info.compClassName, info.compMethodName, info.compILCodeSize,
- info.compMethodHash()
- );
- }
- if (compIsForInlining())
+ if (compInlineResult->IsFailure())
{
- compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID;
+ goto _Next;
}
+ }
+
+#ifdef DEBUG
+ if (JitConfig.DumpJittedMethods() == 1 && !compIsForInlining())
+ {
+ printf("Compiling %4d %s::%s, IL size = %u, hsh=0x%x\n", Compiler::jitTotalMethodCompiled, info.compClassName,
+ info.compMethodName, info.compILCodeSize, info.compMethodHash());
+ }
+ if (compIsForInlining())
+ {
+ compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID;
+ }
#endif
- compCompile(methodCodePtr,
- methodCodeSize,
- compileFlags);
+ compCompile(methodCodePtr, methodCodeSize, compileFlags);
-#ifdef DEBUG
- if (compIsForInlining())
- {
- impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID;
- }
+#ifdef DEBUG
+ if (compIsForInlining())
+ {
+ impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID;
+ }
#endif
_Next:
-
- if (compDonotInline())
- {
- // Verify we have only one inline result in play.
- assert(impInlineInfo->inlineResult == compInlineResult);
- }
- if (!compIsForInlining())
- {
- compCompileFinish();
+ if (compDonotInline())
+ {
+ // Verify we have only one inline result in play.
+ assert(impInlineInfo->inlineResult == compInlineResult);
+ }
- // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM
- // can't used the generated code (and we better be an AltJit!).
+ if (!compIsForInlining())
+ {
+ compCompileFinish();
- if (!info.compMatchedVM)
- {
- return CORJIT_SKIPPED;
- }
+ // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM
+ // can't used the generated code (and we better be an AltJit!).
+
+ if (!info.compMatchedVM)
+ {
+ return CORJIT_SKIPPED;
+ }
#ifdef ALT_JIT
#ifdef DEBUG
- if (JitConfig.RunAltJitCode() == 0)
- {
- return CORJIT_SKIPPED;
- }
+ if (JitConfig.RunAltJitCode() == 0)
+ {
+ return CORJIT_SKIPPED;
+ }
#endif // DEBUG
#endif // ALT_JIT
- }
+ }
- /* Success! */
- return CORJIT_OK;
+ /* Success! */
+ return CORJIT_OK;
}
-
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************/
@@ -5516,16 +5652,14 @@ _Next:
// Note:
// Usually called for scope count = 4. Could be called for values upto 8.
//
-VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs)
+VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs)
{
for (unsigned i = 0; i < info.compVarScopesCount; i++)
{
VarScopeDsc* dsc = &info.compVarScopes[i];
- if ((dsc->vsdVarNum == varNum) &&
- (dsc->vsdLifeBeg <= offs) &&
- (dsc->vsdLifeEnd > offs))
+ if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs))
{
- return dsc;
+ return dsc;
}
}
return nullptr;
@@ -5548,7 +5682,7 @@ VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned o
// the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST,
// else use the hashtable lookup.
//
-VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs)
+VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs)
{
if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST)
{
@@ -5580,7 +5714,7 @@ VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs)
// 2. Iterate through the linked list at index varNum to find a matching
// var scope.
//
-VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd)
+VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd)
{
assert(compVarScopeMap != nullptr);
@@ -5590,8 +5724,7 @@ VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg
VarScopeListNode* list = info->head;
while (list != nullptr)
{
- if ((list->data->vsdLifeBeg <= lifeBeg) &&
- (list->data->vsdLifeEnd > lifeEnd))
+ if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd))
{
return list->data;
}
@@ -5616,7 +5749,7 @@ VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg
// MAX_LINEAR_FIND_LCL_SCOPELIST is large.
// 2. Linked list preserves original array order.
//
-void Compiler::compInitVarScopeMap()
+void Compiler::compInitVarScopeMap()
{
if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST)
{
@@ -5641,7 +5774,7 @@ void Compiler::compInitVarScopeMap()
if (compVarScopeMap->Lookup(varNum, &info))
{
info->tail->next = node;
- info->tail = node;
+ info->tail = node;
}
// Create a new list.
else
@@ -5652,28 +5785,21 @@ void Compiler::compInitVarScopeMap()
}
}
-
-static
-int __cdecl genCmpLocalVarLifeBeg(const void * elem1, const void * elem2)
+static int __cdecl genCmpLocalVarLifeBeg(const void* elem1, const void* elem2)
{
- return (*((VarScopeDsc**) elem1))->vsdLifeBeg -
- (*((VarScopeDsc**) elem2))->vsdLifeBeg;
+ return (*((VarScopeDsc**)elem1))->vsdLifeBeg - (*((VarScopeDsc**)elem2))->vsdLifeBeg;
}
-static
-int __cdecl genCmpLocalVarLifeEnd(const void * elem1, const void * elem2)
+static int __cdecl genCmpLocalVarLifeEnd(const void* elem1, const void* elem2)
{
- return (*((VarScopeDsc**) elem1))->vsdLifeEnd -
- (*((VarScopeDsc**) elem2))->vsdLifeEnd;
+ return (*((VarScopeDsc**)elem1))->vsdLifeEnd - (*((VarScopeDsc**)elem2))->vsdLifeEnd;
}
-inline
-void Compiler::compInitScopeLists()
+inline void Compiler::compInitScopeLists()
{
if (info.compVarScopesCount == 0)
{
- compEnterScopeList =
- compExitScopeList = NULL;
+ compEnterScopeList = compExitScopeList = nullptr;
return;
}
@@ -5688,31 +5814,31 @@ void Compiler::compInitScopeLists()
}
qsort(compEnterScopeList, info.compVarScopesCount, sizeof(*compEnterScopeList), genCmpLocalVarLifeBeg);
- qsort(compExitScopeList, info.compVarScopesCount, sizeof(*compExitScopeList), genCmpLocalVarLifeEnd);
+ qsort(compExitScopeList, info.compVarScopesCount, sizeof(*compExitScopeList), genCmpLocalVarLifeEnd);
}
-void Compiler::compResetScopeLists()
+void Compiler::compResetScopeLists()
{
if (info.compVarScopesCount == 0)
+ {
return;
+ }
- assert (compEnterScopeList && compExitScopeList);
+ assert(compEnterScopeList && compExitScopeList);
compNextEnterScope = compNextExitScope = 0;
}
-
-VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs,
- bool scan)
+VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan)
{
- assert (info.compVarScopesCount);
- assert (compEnterScopeList && compExitScopeList);
+ assert(info.compVarScopesCount);
+ assert(compEnterScopeList && compExitScopeList);
if (compNextEnterScope < info.compVarScopesCount)
{
- assert (compEnterScopeList[compNextEnterScope]);
+ assert(compEnterScopeList[compNextEnterScope]);
unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg;
- assert (scan || (offs <= nextEnterOff));
+ assert(scan || (offs <= nextEnterOff));
if (!scan)
{
@@ -5730,21 +5856,19 @@ VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs,
}
}
- return NULL;
+ return nullptr;
}
-
-VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs,
- bool scan)
+VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan)
{
- assert (info.compVarScopesCount);
- assert (compEnterScopeList && compExitScopeList);
+ assert(info.compVarScopesCount);
+ assert(compEnterScopeList && compExitScopeList);
if (compNextExitScope < info.compVarScopesCount)
{
- assert (compExitScopeList[compNextExitScope]);
+ assert(compExitScopeList[compNextExitScope]);
unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd;
- assert (scan || (offs <= nextExitOffs));
+ assert(scan || (offs <= nextExitOffs));
if (!scan)
{
@@ -5762,26 +5886,26 @@ VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs,
}
}
- return NULL;
+ return nullptr;
}
// The function will call the callback functions for scopes with boundaries
// at instrs from the current status of the scope lists to 'offset',
// ordered by instrs.
-void Compiler::compProcessScopesUntil (unsigned offset,
- VARSET_TP* inScope,
- void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
- void (Compiler::*exitScopeFn) (VARSET_TP* inScope, VarScopeDsc*))
+void Compiler::compProcessScopesUntil(unsigned offset,
+ VARSET_TP* inScope,
+ void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
+ void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*))
{
assert(offset != BAD_IL_OFFSET);
assert(inScope != nullptr);
- bool foundExit = false, foundEnter = true;
- VarScopeDsc* scope;
- VarScopeDsc* nextExitScope = nullptr;
- VarScopeDsc* nextEnterScope = nullptr;
- unsigned offs = offset, curEnterOffs = 0;
+ bool foundExit = false, foundEnter = true;
+ VarScopeDsc* scope;
+ VarScopeDsc* nextExitScope = nullptr;
+ VarScopeDsc* nextEnterScope = nullptr;
+ unsigned offs = offset, curEnterOffs = 0;
goto START_FINDING_SCOPES;
@@ -5796,13 +5920,13 @@ void Compiler::compProcessScopesUntil (unsigned offset,
if (nextExitScope)
{
(this->*exitScopeFn)(inScope, nextExitScope);
- nextExitScope = NULL;
- foundExit = true;
+ nextExitScope = nullptr;
+ foundExit = true;
}
offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset;
- while ((scope = compGetNextExitScope(offs, true)) != NULL)
+ while ((scope = compGetNextExitScope(offs, true)) != nullptr)
{
foundExit = true;
@@ -5821,21 +5945,20 @@ void Compiler::compProcessScopesUntil (unsigned offset,
if (nextEnterScope)
{
(this->*enterScopeFn)(inScope, nextEnterScope);
- curEnterOffs = nextEnterScope->vsdLifeBeg;
- nextEnterScope = NULL;
- foundEnter = true;
+ curEnterOffs = nextEnterScope->vsdLifeBeg;
+ nextEnterScope = nullptr;
+ foundEnter = true;
}
offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset;
-START_FINDING_SCOPES :
+ START_FINDING_SCOPES:
- while ((scope = compGetNextEnterScope(offs, true)) != NULL)
+ while ((scope = compGetNextEnterScope(offs, true)) != nullptr)
{
foundEnter = true;
- if ( (nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd)
- || (scope->vsdLifeBeg > curEnterOffs) )
+ if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs))
{
// We overshot the last found exit scope. Save the scope for later
// and find an exiting scope
@@ -5851,41 +5974,39 @@ START_FINDING_SCOPES :
curEnterOffs = scope->vsdLifeBeg;
}
}
- }
- while (foundExit || foundEnter);
+ } while (foundExit || foundEnter);
}
-
/*****************************************************************************/
#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
#if defined(DEBUGGING_SUPPORT) && defined(DEBUG)
-void Compiler::compDispScopeLists()
+void Compiler::compDispScopeLists()
{
unsigned i;
printf("Local variable scopes = %d\n", info.compVarScopesCount);
if (info.compVarScopesCount)
+ {
printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n");
+ }
printf("Sorted by enter scope:\n");
for (i = 0; i < info.compVarScopesCount; i++)
{
VarScopeDsc* varScope = compEnterScopeList[i];
assert(varScope);
- printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh",
- i,
- varScope->vsdVarNum,
- varScope->vsdLVnum,
- VarNameToStr(varScope->vsdName) == NULL ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
- varScope->vsdLifeBeg,
- varScope->vsdLifeEnd);
+ printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum,
+ VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
+ varScope->vsdLifeBeg, varScope->vsdLifeEnd);
if (compNextEnterScope == i)
+ {
printf(" <-- next enter scope");
+ }
printf("\n");
}
@@ -5895,16 +6016,14 @@ void Compiler::compDispScopeLists()
{
VarScopeDsc* varScope = compExitScopeList[i];
assert(varScope);
- printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh",
- i,
- varScope->vsdVarNum,
- varScope->vsdLVnum,
- VarNameToStr(varScope->vsdName) == NULL ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
- varScope->vsdLifeBeg,
- varScope->vsdLifeEnd);
+ printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh", i, varScope->vsdVarNum, varScope->vsdLVnum,
+ VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
+ varScope->vsdLifeBeg, varScope->vsdLifeEnd);
if (compNextExitScope == i)
+ {
printf(" <-- next exit scope");
+ }
printf("\n");
}
@@ -5914,23 +6033,21 @@ void Compiler::compDispScopeLists()
#if defined(DEBUG)
-void Compiler::compDispLocalVars()
+void Compiler::compDispLocalVars()
{
printf("info.compVarScopesCount = %d\n", info.compVarScopesCount);
if (info.compVarScopesCount > 0)
+ {
printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n");
+ }
for (unsigned i = 0; i < info.compVarScopesCount; i++)
{
VarScopeDsc* varScope = &info.compVarScopes[i];
- printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n",
- i,
- varScope->vsdVarNum,
- varScope->vsdLVnum,
- VarNameToStr(varScope->vsdName) == NULL ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
- varScope->vsdLifeBeg,
- varScope->vsdLifeEnd);
+ printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n", i, varScope->vsdVarNum, varScope->vsdLVnum,
+ VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName),
+ varScope->vsdLifeBeg, varScope->vsdLifeEnd);
}
}
@@ -5940,30 +6057,29 @@ void Compiler::compDispLocalVars()
// Compile a single method
-int jitNativeCode ( CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_MODULE_HANDLE classPtr,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO* methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags,
- void * inlineInfoPtr
- )
+int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd,
+ CORINFO_MODULE_HANDLE classPtr,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags,
+ void* inlineInfoPtr)
{
//
// A non-NULL inlineInfo means we are compiling the inlinee method.
//
- InlineInfo * inlineInfo = (InlineInfo *)inlineInfoPtr;
+ InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr;
bool jitFallbackCompile = false;
START:
- int result = CORJIT_INTERNALERROR;
+ int result = CORJIT_INTERNALERROR;
- ArenaAllocator * pAlloc = NULL;
- ArenaAllocator alloc;
+ ArenaAllocator* pAlloc = nullptr;
+ ArenaAllocator alloc;
if (inlineInfo)
- {
+ {
// Use inliner's memory allocator when compiling the inlinee.
pAlloc = inlineInfo->InlinerCompiler->compGetAllocator();
}
@@ -5976,68 +6092,69 @@ START:
if (pAlloc == nullptr)
{
- alloc = ArenaAllocator(pMemoryManager);
+ alloc = ArenaAllocator(pMemoryManager);
pAlloc = &alloc;
}
}
+ Compiler* pComp;
+ pComp = nullptr;
- Compiler * pComp;
- pComp = NULL;
-
- struct Param {
- Compiler *pComp;
- ArenaAllocator * pAlloc;
- ArenaAllocator * alloc;
- bool jitFallbackCompile;
+ struct Param
+ {
+ Compiler* pComp;
+ ArenaAllocator* pAlloc;
+ ArenaAllocator* alloc;
+ bool jitFallbackCompile;
- CORINFO_METHOD_HANDLE methodHnd;
- CORINFO_MODULE_HANDLE classPtr;
- COMP_HANDLE compHnd;
+ CORINFO_METHOD_HANDLE methodHnd;
+ CORINFO_MODULE_HANDLE classPtr;
+ COMP_HANDLE compHnd;
CORINFO_METHOD_INFO* methodInfo;
- void * * methodCodePtr;
- ULONG * methodCodeSize;
- CORJIT_FLAGS * compileFlags;
- InlineInfo * inlineInfo;
+ void** methodCodePtr;
+ ULONG* methodCodeSize;
+ CORJIT_FLAGS* compileFlags;
+ InlineInfo* inlineInfo;
int result;
} param;
- param.pComp = NULL;
- param.pAlloc = pAlloc;
- param.alloc = &alloc;
+ param.pComp = nullptr;
+ param.pAlloc = pAlloc;
+ param.alloc = &alloc;
param.jitFallbackCompile = jitFallbackCompile;
- param.methodHnd = methodHnd;
- param.classPtr = classPtr;
- param.compHnd = compHnd;
- param.methodInfo = methodInfo;
- param.methodCodePtr = methodCodePtr;
- param.methodCodeSize = methodCodeSize;
- param.compileFlags = compileFlags;
- param.inlineInfo = inlineInfo;
- param.result = result;
+ param.methodHnd = methodHnd;
+ param.classPtr = classPtr;
+ param.compHnd = compHnd;
+ param.methodInfo = methodInfo;
+ param.methodCodePtr = methodCodePtr;
+ param.methodCodeSize = methodCodeSize;
+ param.compileFlags = compileFlags;
+ param.inlineInfo = inlineInfo;
+ param.result = result;
- setErrorTrap(compHnd, Param *, pParamOuter, &param)
+ setErrorTrap(compHnd, Param*, pParamOuter, &param)
{
- setErrorTrap(NULL, Param *, pParam, pParamOuter )
- {
+ setErrorTrap(nullptr, Param*, pParam, pParamOuter)
+ {
if (pParam->inlineInfo)
- {
+ {
// Lazily create the inlinee compiler object
- if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == NULL)
+ if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr)
{
- pParam->inlineInfo->InlinerCompiler->InlineeCompiler = (Compiler *)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
+ pParam->inlineInfo->InlinerCompiler->InlineeCompiler =
+ (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
}
// Use the inlinee compiler object
- pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler;
+ pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler;
#ifdef DEBUG
- // memset(pParam->pComp, 0xEE, sizeof(Compiler));
+// memset(pParam->pComp, 0xEE, sizeof(Compiler));
#endif
}
else
- {
+ {
// Allocate create the inliner compiler object
- pParam->pComp = (Compiler *)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
+ pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp)));
}
// push this compiler on the stack (TLS)
@@ -6045,43 +6162,39 @@ START:
JitTls::SetCompiler(pParam->pComp);
// PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here
-#if defined(_PREFAST_) || defined(_PREFIX_)
+#if defined(_PREFAST_) || defined(_PREFIX_)
PREFIX_ASSUME(pParam->pComp != NULL);
#else
- assert(pParam->pComp != NULL);
+ assert(pParam->pComp != nullptr);
#endif
-
+
pParam->pComp->compInit(pParam->pAlloc, pParam->inlineInfo);
#ifdef DEBUG
pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile;
#endif
-
+
// Now generate the code
- pParam->result = pParam->pComp->compCompile(pParam->methodHnd,
- pParam->classPtr,
- pParam->compHnd,
- pParam->methodInfo,
- pParam->methodCodePtr,
- pParam->methodCodeSize,
- pParam->compileFlags);
+ pParam->result =
+ pParam->pComp->compCompile(pParam->methodHnd, pParam->classPtr, pParam->compHnd, pParam->methodInfo,
+ pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags);
}
finallyErrorTrap()
{
- // Add a dummy touch to pComp so that it is kept alive, and is easy to get to
+ // Add a dummy touch to pComp so that it is kept alive, and is easy to get to
// during debugging since all other data can be obtained through it.
//
- if (pParamOuter->pComp) // If OOM is thrown when allocating memory for pComp, we will end up here.
- // In that case, pComp is still NULL.
+ if (pParamOuter->pComp) // If OOM is thrown when allocating memory for pComp, we will end up here.
+ // In that case, pComp is still NULL.
{
- pParamOuter->pComp->info.compCode = NULL;
+ pParamOuter->pComp->info.compCode = nullptr;
// pop the compiler off the TLS stack only if it was linked above
assert(JitTls::GetCompiler() == pParamOuter->pComp);
JitTls::SetCompiler(JitTls::GetCompiler()->prevCompiler);
}
- if (pParamOuter->inlineInfo == NULL)
+ if (pParamOuter->inlineInfo == nullptr)
{
// Free up the allocator we were using
pParamOuter->pAlloc->destroy();
@@ -6098,16 +6211,13 @@ START:
// there's no point trying to inline it again anywhere else.
inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR);
}
- param.result = __errc;
+ param.result = __errc;
}
endErrorTrap()
- result = param.result;
+ result = param.result;
- if (!inlineInfo &&
- (result == CORJIT_INTERNALERROR
- || result == CORJIT_RECOVERABLEERROR) &&
- !jitFallbackCompile)
+ if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR) && !jitFallbackCompile)
{
// If we failed the JIT, reattempt with debuggable code.
jitFallbackCompile = true;
@@ -6118,7 +6228,7 @@ START:
goto START;
}
-
+
return result;
}
@@ -6131,58 +6241,58 @@ START:
// classType: classification type
// size: size of the eightbyte.
//
-// static
+// static
var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size)
{
var_types type = TYP_UNKNOWN;
switch (classType)
{
- case SystemVClassificationTypeInteger:
- if (size == 1)
- {
- type = TYP_BYTE;
- }
- else if (size <= 2)
- {
- type = TYP_SHORT;
- }
- else if (size <= 4)
- {
- type = TYP_INT;
- }
- else if (size <= 8)
- {
- type = TYP_LONG;
- }
- else
- {
- assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type.");
- }
- break;
- case SystemVClassificationTypeIntegerReference:
- type = TYP_REF;
- break;
- case SystemVClassificationTypeIntegerByRef:
- type = TYP_BYREF;
- break;
- case SystemVClassificationTypeSSE:
- if (size <= 4)
- {
- type = TYP_FLOAT;
- }
- else if (size <= 8)
- {
- type = TYP_DOUBLE;
- }
- else
- {
- assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type.");
- }
- break;
+ case SystemVClassificationTypeInteger:
+ if (size == 1)
+ {
+ type = TYP_BYTE;
+ }
+ else if (size <= 2)
+ {
+ type = TYP_SHORT;
+ }
+ else if (size <= 4)
+ {
+ type = TYP_INT;
+ }
+ else if (size <= 8)
+ {
+ type = TYP_LONG;
+ }
+ else
+ {
+ assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type.");
+ }
+ break;
+ case SystemVClassificationTypeIntegerReference:
+ type = TYP_REF;
+ break;
+ case SystemVClassificationTypeIntegerByRef:
+ type = TYP_BYREF;
+ break;
+ case SystemVClassificationTypeSSE:
+ if (size <= 4)
+ {
+ type = TYP_FLOAT;
+ }
+ else if (size <= 8)
+ {
+ type = TYP_DOUBLE;
+ }
+ else
+ {
+ assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type.");
+ }
+ break;
- default:
- assert(false && "GetTypeFromClassificationAndSizes Invalid classification type.");
- break;
+ default:
+ assert(false && "GetTypeFromClassificationAndSizes Invalid classification type.");
+ break;
}
return type;
@@ -6197,56 +6307,57 @@ var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType
//
// Return Value:
// type of the eightbyte slot of the struct
-//
-//static
-var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum)
+//
+// static
+var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
+ unsigned slotNum)
{
var_types eightByteType = TYP_UNDEF;
- unsigned len = structDesc.eightByteSizes[slotNum];
+ unsigned len = structDesc.eightByteSizes[slotNum];
switch (structDesc.eightByteClassifications[slotNum])
{
- case SystemVClassificationTypeInteger:
- // See typelist.h for jit type definition.
- // All the types of size < 4 bytes are of jit type TYP_INT.
- if (structDesc.eightByteSizes[slotNum] <= 4)
- {
- eightByteType = TYP_INT;
- }
- else if (structDesc.eightByteSizes[slotNum] <= 8)
- {
- eightByteType = TYP_LONG;
- }
- else
- {
- assert(false && "GetEightByteType Invalid Integer classification type.");
- }
- break;
- case SystemVClassificationTypeIntegerReference:
- assert(len == REGSIZE_BYTES);
- eightByteType = TYP_REF;
- break;
- case SystemVClassificationTypeIntegerByRef:
- assert(len == REGSIZE_BYTES);
- eightByteType = TYP_BYREF;
- break;
- case SystemVClassificationTypeSSE:
- if (structDesc.eightByteSizes[slotNum] <= 4)
- {
- eightByteType = TYP_FLOAT;
- }
- else if (structDesc.eightByteSizes[slotNum] <= 8)
- {
- eightByteType = TYP_DOUBLE;
- }
- else
- {
- assert(false && "GetEightByteType Invalid SSE classification type.");
- }
- break;
- default:
- assert(false && "GetEightByteType Invalid classification type.");
- break;
+ case SystemVClassificationTypeInteger:
+ // See typelist.h for jit type definition.
+ // All the types of size < 4 bytes are of jit type TYP_INT.
+ if (structDesc.eightByteSizes[slotNum] <= 4)
+ {
+ eightByteType = TYP_INT;
+ }
+ else if (structDesc.eightByteSizes[slotNum] <= 8)
+ {
+ eightByteType = TYP_LONG;
+ }
+ else
+ {
+ assert(false && "GetEightByteType Invalid Integer classification type.");
+ }
+ break;
+ case SystemVClassificationTypeIntegerReference:
+ assert(len == REGSIZE_BYTES);
+ eightByteType = TYP_REF;
+ break;
+ case SystemVClassificationTypeIntegerByRef:
+ assert(len == REGSIZE_BYTES);
+ eightByteType = TYP_BYREF;
+ break;
+ case SystemVClassificationTypeSSE:
+ if (structDesc.eightByteSizes[slotNum] <= 4)
+ {
+ eightByteType = TYP_FLOAT;
+ }
+ else if (structDesc.eightByteSizes[slotNum] <= 8)
+ {
+ eightByteType = TYP_DOUBLE;
+ }
+ else
+ {
+ assert(false && "GetEightByteType Invalid SSE classification type.");
+ }
+ break;
+ default:
+ assert(false && "GetEightByteType Invalid classification type.");
+ break;
}
return eightByteType;
@@ -6262,12 +6373,12 @@ var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASS
// 'offset0' - out param; returns the offset of the first eightbyte.
// 'offset1' - out param; returns the offset of the second eightbyte.
//
-//static
+// static
void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
- var_types* type0,
- var_types* type1,
- unsigned __int8* offset0,
- unsigned __int8* offset1)
+ var_types* type0,
+ var_types* type1,
+ unsigned __int8* offset0,
+ unsigned __int8* offset1)
{
*offset0 = structDesc.eightByteOffsets[0];
*offset1 = structDesc.eightByteOffsets[1];
@@ -6297,17 +6408,16 @@ Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData()
{
NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly());
- if (m_nodeTestData == NULL) return reachable;
+ if (m_nodeTestData == nullptr)
+ {
+ return reachable;
+ }
// Otherwise, iterate.
- for (BasicBlock * block = fgFirstBB;
- block != NULL;
- block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
- for (GenTreePtr stmt = block->FirstNonPhiDef();
- stmt != NULL;
- stmt = stmt->gtNext)
+ for (GenTreePtr stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->gtNext)
{
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
@@ -6316,16 +6426,16 @@ Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData()
// For call nodes, translate late args to what they stand for.
if (tree->OperGet() == GT_CALL)
{
- GenTreeCall* call = tree->AsCall();
+ GenTreeCall* call = tree->AsCall();
GenTreeArgList* args = call->gtCallArgs;
- unsigned i = 0;
- while (args != NULL)
+ unsigned i = 0;
+ while (args != nullptr)
{
GenTreePtr arg = args->Current();
if (arg->gtFlags & GTF_LATE_ARG)
{
// Find the corresponding late arg.
- GenTreePtr lateArg = NULL;
+ GenTreePtr lateArg = nullptr;
for (unsigned j = 0; j < call->fgArgInfo->ArgCount(); j++)
{
if (call->fgArgInfo->ArgTable()[j]->argNum == i)
@@ -6334,7 +6444,7 @@ Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData()
break;
}
}
- assert(lateArg != NULL);
+ assert(lateArg != nullptr);
if (GetNodeTestData()->Lookup(lateArg, &tlAndN))
{
reachable->Set(lateArg, 0);
@@ -6362,7 +6472,7 @@ void Compiler::TransferTestDataToNode(GenTreePtr from, GenTreePtr to)
// If we need to, we can fix this...
// If the table is null, don't create it just to do the lookup, which would fail...
- if (m_nodeTestData != NULL && GetNodeTestData()->Lookup(from, &tlAndN))
+ if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN))
{
assert(!GetNodeTestData()->Lookup(to, &tlAndN));
// We can't currently associate multiple annotations with a single node.
@@ -6377,10 +6487,13 @@ void Compiler::TransferTestDataToNode(GenTreePtr from, GenTreePtr to)
void Compiler::CopyTestDataToCloneTree(GenTreePtr from, GenTreePtr to)
{
- if (m_nodeTestData == NULL) return;
- if (from == NULL)
+ if (m_nodeTestData == nullptr)
+ {
+ return;
+ }
+ if (from == nullptr)
{
- assert(to == NULL);
+ assert(to == nullptr);
return;
}
// Otherwise...
@@ -6395,38 +6508,38 @@ void Compiler::CopyTestDataToCloneTree(GenTreePtr from, GenTreePtr to)
}
// Now recurse, in parallel on both trees.
- genTreeOps oper = from->OperGet();
- unsigned kind = from->OperKind();
+ genTreeOps oper = from->OperGet();
+ unsigned kind = from->OperKind();
assert(oper == to->OperGet());
// Cconstant or leaf nodes have no children.
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
return;
}
// Otherwise, is it a 'simple' unary/binary operator?
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- if (from->gtOp.gtOp1 != NULL)
+ if (from->gtOp.gtOp1 != nullptr)
{
- assert(to->gtOp.gtOp1 != NULL);
+ assert(to->gtOp.gtOp1 != nullptr);
CopyTestDataToCloneTree(from->gtOp.gtOp1, to->gtOp.gtOp1);
}
else
{
- assert(to->gtOp.gtOp1 == NULL);
+ assert(to->gtOp.gtOp1 == nullptr);
}
- if (from->gtGetOp2() != NULL)
+ if (from->gtGetOp2() != nullptr)
{
- assert(to->gtGetOp2() != NULL);
+ assert(to->gtGetOp2() != nullptr);
CopyTestDataToCloneTree(from->gtGetOp2(), to->gtGetOp2());
}
else
{
- assert(to->gtGetOp2() == NULL);
+ assert(to->gtGetOp2() == nullptr);
}
return;
@@ -6434,55 +6547,55 @@ void Compiler::CopyTestDataToCloneTree(GenTreePtr from, GenTreePtr to)
// Otherwise, see what kind of a special operator we have here.
- switch (oper)
+ switch (oper)
{
- case GT_STMT:
- CopyTestDataToCloneTree(from->gtStmt.gtStmtExpr, to->gtStmt.gtStmtExpr);
- return;
+ case GT_STMT:
+ CopyTestDataToCloneTree(from->gtStmt.gtStmtExpr, to->gtStmt.gtStmtExpr);
+ return;
- case GT_CALL:
- CopyTestDataToCloneTree(from->gtCall.gtCallObjp, to->gtCall.gtCallObjp);
- CopyTestDataToCloneTree(from->gtCall.gtCallArgs, to->gtCall.gtCallArgs);
- CopyTestDataToCloneTree(from->gtCall.gtCallLateArgs, to->gtCall.gtCallLateArgs);
+ case GT_CALL:
+ CopyTestDataToCloneTree(from->gtCall.gtCallObjp, to->gtCall.gtCallObjp);
+ CopyTestDataToCloneTree(from->gtCall.gtCallArgs, to->gtCall.gtCallArgs);
+ CopyTestDataToCloneTree(from->gtCall.gtCallLateArgs, to->gtCall.gtCallLateArgs);
- if (from->gtCall.gtCallType == CT_INDIRECT)
- {
- CopyTestDataToCloneTree(from->gtCall.gtCallCookie, to->gtCall.gtCallCookie);
- CopyTestDataToCloneTree(from->gtCall.gtCallAddr, to->gtCall.gtCallAddr);
- }
- // The other call types do not have additional GenTree arguments.
-
- return;
+ if (from->gtCall.gtCallType == CT_INDIRECT)
+ {
+ CopyTestDataToCloneTree(from->gtCall.gtCallCookie, to->gtCall.gtCallCookie);
+ CopyTestDataToCloneTree(from->gtCall.gtCallAddr, to->gtCall.gtCallAddr);
+ }
+ // The other call types do not have additional GenTree arguments.
- case GT_FIELD:
- CopyTestDataToCloneTree(from->gtField.gtFldObj, to->gtField.gtFldObj);
- return;
+ return;
- case GT_ARR_ELEM:
- assert(from->gtArrElem.gtArrRank == to->gtArrElem.gtArrRank);
- for (unsigned dim = 0; dim < from->gtArrElem.gtArrRank; dim++)
- {
- CopyTestDataToCloneTree(from->gtArrElem.gtArrInds[dim], to->gtArrElem.gtArrInds[dim]);
- }
- CopyTestDataToCloneTree(from->gtArrElem.gtArrObj, to->gtArrElem.gtArrObj);
- return;
+ case GT_FIELD:
+ CopyTestDataToCloneTree(from->gtField.gtFldObj, to->gtField.gtFldObj);
+ return;
- case GT_CMPXCHG:
- CopyTestDataToCloneTree(from->gtCmpXchg.gtOpLocation, to->gtCmpXchg.gtOpLocation);
- CopyTestDataToCloneTree(from->gtCmpXchg.gtOpValue, to->gtCmpXchg.gtOpValue);
- CopyTestDataToCloneTree(from->gtCmpXchg.gtOpComparand, to->gtCmpXchg.gtOpComparand);
- return;
+ case GT_ARR_ELEM:
+ assert(from->gtArrElem.gtArrRank == to->gtArrElem.gtArrRank);
+ for (unsigned dim = 0; dim < from->gtArrElem.gtArrRank; dim++)
+ {
+ CopyTestDataToCloneTree(from->gtArrElem.gtArrInds[dim], to->gtArrElem.gtArrInds[dim]);
+ }
+ CopyTestDataToCloneTree(from->gtArrElem.gtArrObj, to->gtArrElem.gtArrObj);
+ return;
+
+ case GT_CMPXCHG:
+ CopyTestDataToCloneTree(from->gtCmpXchg.gtOpLocation, to->gtCmpXchg.gtOpLocation);
+ CopyTestDataToCloneTree(from->gtCmpXchg.gtOpValue, to->gtCmpXchg.gtOpValue);
+ CopyTestDataToCloneTree(from->gtCmpXchg.gtOpComparand, to->gtCmpXchg.gtOpComparand);
+ return;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- CopyTestDataToCloneTree(from->gtBoundsChk.gtArrLen, to->gtBoundsChk.gtArrLen);
- CopyTestDataToCloneTree(from->gtBoundsChk.gtIndex, to->gtBoundsChk.gtIndex);
- return;
+ CopyTestDataToCloneTree(from->gtBoundsChk.gtArrLen, to->gtBoundsChk.gtArrLen);
+ CopyTestDataToCloneTree(from->gtBoundsChk.gtIndex, to->gtBoundsChk.gtIndex);
+ return;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -6500,9 +6613,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
/*****************************************************************************/
-void codeGeneratorCodeSizeBeg(){}
+void codeGeneratorCodeSizeBeg()
+{
+}
/*****************************************************************************/
/*****************************************************************************
@@ -6511,29 +6625,26 @@ void codeGeneratorCodeSizeBeg(){}
* freeing them.
*/
-const
-size_t genMinSize2free = 64;
+const size_t genMinSize2free = 64;
/*****************************************************************************/
-
-
-
/*****************************************************************************
*
* Used for counting pointer assignments.
*/
-
/*****************************************************************************/
-void codeGeneratorCodeSizeEnd(){}
+void codeGeneratorCodeSizeEnd()
+{
+}
/*****************************************************************************
*
* Gather statistics - mainly used for the standalone
* Enable various #ifdef's to get the information you need
*/
-void Compiler::compJitStats()
+void Compiler::compJitStats()
{
#if CALL_ARG_STATS
@@ -6549,30 +6660,30 @@ void Compiler::compJitStats()
* Gather statistics about method calls and arguments
*/
-void Compiler::compCallArgStats()
+void Compiler::compCallArgStats()
{
- GenTreePtr args;
- GenTreePtr argx;
+ GenTreePtr args;
+ GenTreePtr argx;
- BasicBlock * block;
- GenTreePtr stmt;
- GenTreePtr call;
+ BasicBlock* block;
+ GenTreePtr stmt;
+ GenTreePtr call;
- unsigned argNum;
+ unsigned argNum;
- unsigned argDWordNum;
- unsigned argLngNum;
- unsigned argFltNum;
- unsigned argDblNum;
+ unsigned argDWordNum;
+ unsigned argLngNum;
+ unsigned argFltNum;
+ unsigned argDblNum;
- unsigned regArgNum;
- unsigned regArgDeferred;
- unsigned regArgTemp;
+ unsigned regArgNum;
+ unsigned regArgDeferred;
+ unsigned regArgTemp;
- unsigned regArgLclVar;
- unsigned regArgConst;
+ unsigned regArgLclVar;
+ unsigned regArgConst;
- unsigned argTempsThisMethod = 0;
+ unsigned argTempsThisMethod = 0;
assert(fgStmtListThreaded);
@@ -6584,28 +6695,22 @@ void Compiler::compCallArgStats()
for (call = stmt->gtStmt.gtStmtList; call; call = call->gtNext)
{
- if (call->gtOper != GT_CALL)
+ if (call->gtOper != GT_CALL)
continue;
- argNum =
+ argNum =
- regArgNum =
- regArgDeferred =
- regArgTemp =
+ regArgNum = regArgDeferred = regArgTemp =
- regArgConst =
- regArgLclVar=
+ regArgConst = regArgLclVar =
- argDWordNum =
- argLngNum =
- argFltNum =
- argDblNum = 0;
+ argDWordNum = argLngNum = argFltNum = argDblNum = 0;
argTotalCalls++;
if (!call->gtCall.gtCallObjp)
{
- if (call->gtCall.gtCallType == CT_HELPER)
+ if (call->gtCall.gtCallType == CT_HELPER)
{
argHelperCalls++;
}
@@ -6624,7 +6729,7 @@ void Compiler::compCallArgStats()
regArgDeferred++;
argTotalObjPtr++;
- if (call->gtFlags & (GTF_CALL_VIRT_VTABLE|GTF_CALL_VIRT_STUB))
+ if (call->gtFlags & (GTF_CALL_VIRT_VTABLE | GTF_CALL_VIRT_STUB))
{
/* virtual function */
argVirtualCalls++;
@@ -6636,7 +6741,8 @@ void Compiler::compCallArgStats()
}
#ifdef LEGACY_BACKEND
- // TODO-Cleaenup: We need to add support below for additional node types that RyuJIT backend has in the IR.
+ // TODO-Cleaenup: We need to add support below for additional node types that RyuJIT backend has in the
+ // IR.
// Gather arguments information.
for (args = call->gtCall.gtCallArgs; args; args = args->gtOp.gtOp2)
@@ -6647,41 +6753,41 @@ void Compiler::compCallArgStats()
switch (genActualType(argx->TypeGet()))
{
- case TYP_INT:
- case TYP_REF:
- case TYP_BYREF:
- argDWordNum++;
- break;
+ case TYP_INT:
+ case TYP_REF:
+ case TYP_BYREF:
+ argDWordNum++;
+ break;
- case TYP_LONG:
- argLngNum++;
- break;
+ case TYP_LONG:
+ argLngNum++;
+ break;
- case TYP_FLOAT:
- argFltNum++;
- break;
+ case TYP_FLOAT:
+ argFltNum++;
+ break;
- case TYP_DOUBLE:
- argDblNum++;
- break;
+ case TYP_DOUBLE:
+ argDblNum++;
+ break;
- case TYP_VOID:
- /* This is a deferred register argument */
- assert(argx->gtOper == GT_NOP);
- assert(argx->gtFlags & GTF_LATE_ARG);
- argDWordNum++;
- break;
+ case TYP_VOID:
+ /* This is a deferred register argument */
+ assert(argx->gtOper == GT_NOP);
+ assert(argx->gtFlags & GTF_LATE_ARG);
+ argDWordNum++;
+ break;
}
/* Is this argument a register argument? */
- if (argx->gtFlags & GTF_LATE_ARG)
+ if (argx->gtFlags & GTF_LATE_ARG)
{
regArgNum++;
/* We either have a deferred argument or a temp */
- if (argx->gtOper == GT_NOP)
+ if (argx->gtOper == GT_NOP)
{
regArgDeferred++;
}
@@ -6701,33 +6807,33 @@ void Compiler::compCallArgStats()
switch (argx->gtOper)
{
- case GT_CNS_INT:
- regArgConst++;
- break;
+ case GT_CNS_INT:
+ regArgConst++;
+ break;
- case GT_LCL_VAR:
- regArgLclVar++;
- break;
+ case GT_LCL_VAR:
+ regArgLclVar++;
+ break;
}
}
assert(argNum == argDWordNum + argLngNum + argFltNum + argDblNum);
assert(regArgNum == regArgDeferred + regArgTemp);
- argTotalArgs += argNum;
- argTotalRegArgs += regArgNum;
+ argTotalArgs += argNum;
+ argTotalRegArgs += regArgNum;
argTotalDWordArgs += argDWordNum;
- argTotalLongArgs += argLngNum;
+ argTotalLongArgs += argLngNum;
argTotalFloatArgs += argFltNum;
- argTotalDoubleArgs+= argDblNum;
+ argTotalDoubleArgs += argDblNum;
- argTotalDeferred += regArgDeferred;
- argTotalTemps += regArgTemp;
- argTotalConst += regArgConst;
- argTotalLclVar += regArgLclVar;
+ argTotalDeferred += regArgDeferred;
+ argTotalTemps += regArgTemp;
+ argTotalConst += regArgConst;
+ argTotalLclVar += regArgLclVar;
- argTempsThisMethod+= regArgTemp;
+ argTempsThisMethod += regArgTemp;
argCntTable.record(argNum);
argDWordCntTable.record(argDWordNum);
@@ -6743,55 +6849,57 @@ void Compiler::compCallArgStats()
{
argMaxTempsPerMethod = argTempsThisMethod;
}
-
}
-
/* static */
-void Compiler::compDispCallArgStats(FILE* fout)
+void Compiler::compDispCallArgStats(FILE* fout)
{
- if (argTotalCalls == 0) return;
+ if (argTotalCalls == 0)
+ return;
fprintf(fout, "\n");
fprintf(fout, "--------------------------------------------------\n");
fprintf(fout, "Call stats\n");
fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls, (float) argTotalCalls / genMethodCnt);
+ fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n", argTotalCalls,
+ (float)argTotalCalls / genMethodCnt);
- fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls ) / argTotalCalls);
- fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls ) / argTotalCalls);
- fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls ) / argTotalCalls);
- fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls);
+ fprintf(fout, "Percentage of helper calls = %4.2f %%\n", (float)(100 * argHelperCalls) / argTotalCalls);
+ fprintf(fout, "Percentage of static calls = %4.2f %%\n", (float)(100 * argStaticCalls) / argTotalCalls);
+ fprintf(fout, "Percentage of virtual calls = %4.2f %%\n", (float)(100 * argVirtualCalls) / argTotalCalls);
+ fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n", (float)(100 * argNonVirtualCalls) / argTotalCalls);
- fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float) argTotalArgs / argTotalCalls);
+ fprintf(fout, "Average # of arguments per call = %.2f%%\n\n", (float)argTotalArgs / argTotalCalls);
- fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs ) / argTotalArgs);
- fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs ) / argTotalArgs);
- fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs ) / argTotalArgs);
- fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs);
+ fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n", (float)(100 * argTotalDWordArgs) / argTotalArgs);
+ fprintf(fout, "Percentage of LONG arguments = %.2f %%\n", (float)(100 * argTotalLongArgs) / argTotalArgs);
+ fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n", (float)(100 * argTotalFloatArgs) / argTotalArgs);
+ fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n", (float)(100 * argTotalDoubleArgs) / argTotalArgs);
- if (argTotalRegArgs == 0) return;
+ if (argTotalRegArgs == 0)
+ return;
-/*
- fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred);
+ /*
+ fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred);
- fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps);
+ fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps);
- fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr);
- fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar);
- fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst);
-*/
+ fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr);
+ fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar);
+ fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst);
+ */
fprintf(fout, "\nRegister Arguments:\n\n");
- fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs);
- fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs);
+ fprintf(fout, "Percentage of deferred arguments = %.2f %%\n", (float)(100 * argTotalDeferred) / argTotalRegArgs);
+ fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n", (float)(100 * argTotalTemps) / argTotalRegArgs);
fprintf(fout, "Maximum # of temps per method = %d\n\n", argMaxTempsPerMethod);
- fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs);
- //fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / argTotalRegArgs);
- fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs);
+ fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n", (float)(100 * argTotalObjPtr) / argTotalRegArgs);
+ // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) /
+ // argTotalRegArgs);
+ fprintf(fout, "Percentage of constant arguments = %.2f %%\n", (float)(100 * argTotalConst) / argTotalRegArgs);
fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n", (float)(100 * argTotalLclVar) / argTotalRegArgs);
fprintf(fout, "--------------------------------------------------\n");
@@ -6812,13 +6920,13 @@ void Compiler::compDispCallArgStats(FILE* fout)
argTempsCntTable.dump(fout);
fprintf(fout, "--------------------------------------------------\n");
-/*
- fprintf(fout, "--------------------------------------------------\n");
- fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n");
- fprintf(fout, "--------------------------------------------------\n");
- argDWordLngCntTable.dump(fout);
- fprintf(fout, "--------------------------------------------------\n");
-*/
+ /*
+ fprintf(fout, "--------------------------------------------------\n");
+ fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n");
+ fprintf(fout, "--------------------------------------------------\n");
+ argDWordLngCntTable.dump(fout);
+ fprintf(fout, "--------------------------------------------------\n");
+ */
}
#endif // CALL_ARG_STATS
@@ -6827,53 +6935,45 @@ void Compiler::compDispCallArgStats(FILE* fout)
#ifdef FEATURE_JIT_METHOD_PERF
// Static variables
-CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock;
+CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock;
CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary;
#endif // FEATURE_JIT_METHOD_PERF
#if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS
-const char* PhaseNames[] =
-{
+const char* PhaseNames[] = {
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent) string_nm,
#include "compphases.h"
};
-const char* PhaseEnums[] =
-{
+const char* PhaseEnums[] = {
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent) #enum_nm,
#include "compphases.h"
};
-const LPCWSTR PhaseShortNames[] =
-{
+const LPCWSTR PhaseShortNames[] = {
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent) W(short_nm),
#include "compphases.h"
};
#endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS
#ifdef FEATURE_JIT_METHOD_PERF
-bool PhaseHasChildren[] =
-{
+bool PhaseHasChildren[] = {
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent) hasChildren,
#include "compphases.h"
};
-int PhaseParent[] =
-{
+int PhaseParent[] = {
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent) parent,
#include "compphases.h"
};
-CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) :
- m_byteCodeBytes(byteCodeBytes),
- m_totalCycles(0),
- m_parentPhaseEndSlop(0),
- m_timerFailure(false)
+CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes)
+ : m_byteCodeBytes(byteCodeBytes), m_totalCycles(0), m_parentPhaseEndSlop(0), m_timerFailure(false)
{
for (int i = 0; i < PHASE_NUMBER_OF; i++)
{
m_invokesByPhase[i] = 0;
- m_cyclesByPhase[i] = 0;
+ m_cyclesByPhase[i] = 0;
}
}
@@ -6884,7 +6984,8 @@ bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info)
void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info)
{
- if (info.m_timerFailure) return; // Don't update if there was a failure.
+ if (info.m_timerFailure)
+ return; // Don't update if there was a failure.
CritSecHolder timeLock(s_compTimeSummaryLock);
m_numMethods++;
@@ -6925,7 +7026,8 @@ LPCWSTR Compiler::compJitTimeLogFilename = NULL;
void CompTimeSummaryInfo::Print(FILE* f)
{
- if (f == NULL) return;
+ if (f == NULL)
+ return;
// Otherwise...
double countsPerSec = CycleTimer::CyclesPerSecond();
if (countsPerSec == 0.0)
@@ -6938,27 +7040,25 @@ void CompTimeSummaryInfo::Print(FILE* f)
fprintf(f, " Compiled %d methods.\n", m_numMethods);
if (m_numMethods != 0)
{
- fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n",
- m_total.m_byteCodeBytes, m_maximum.m_byteCodeBytes,
- (double)m_total.m_byteCodeBytes/(double)m_numMethods);
- double totTime_ms = ((double)m_total.m_totalCycles/countsPerSec)*1000.0;
- fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n",
- ((double)m_total.m_totalCycles/1000000.0), totTime_ms);
- fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n",
- ((double)m_maximum.m_totalCycles)/1000000.0, ((double)m_maximum.m_totalCycles/countsPerSec)*1000.0);
+ fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n", m_total.m_byteCodeBytes,
+ m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods);
+ double totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0;
+ fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_total.m_totalCycles / 1000000.0),
+ totTime_ms);
+ fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n", ((double)m_maximum.m_totalCycles) / 1000000.0,
+ ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0);
fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n",
- ((double)m_total.m_totalCycles)/1000000.0/(double)m_numMethods,
- totTime_ms/(double)m_numMethods);
+ ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods);
fprintf(f, " Total time by phases:\n");
fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)\n");
fprintf(f, " --------------------------------------------------------------------------------------\n");
// Ensure that at least the names array and the Phases enum have the same number of entries:
- assert(sizeof(PhaseNames)/sizeof(const char*) == PHASE_NUMBER_OF);
+ assert(sizeof(PhaseNames) / sizeof(const char*) == PHASE_NUMBER_OF);
for (int i = 0; i < PHASE_NUMBER_OF; i++)
{
- double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i])/countsPerSec)*1000.0;
- double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i])/countsPerSec)*1000.0;
+ double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0;
+ double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0;
// Indent nested phases, according to depth.
int ancPhase = PhaseParent[i];
while (ancPhase != -1)
@@ -6966,35 +7066,34 @@ void CompTimeSummaryInfo::Print(FILE* f)
fprintf(f, " ");
ancPhase = PhaseParent[ancPhase];
}
- fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%% %8.3f\n",
- PhaseNames[i],
- ((double)m_total.m_invokesByPhase[i])/((double)m_numMethods),
- ((double)m_total.m_cyclesByPhase[i])/1000000.0,
- phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), phase_max_ms);
+ fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%% %8.3f\n", PhaseNames[i],
+ ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods),
+ ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms),
+ phase_max_ms);
}
fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles.\n",
- m_total.m_parentPhaseEndSlop);
+ m_total.m_parentPhaseEndSlop);
}
if (m_numFilteredMethods > 0)
{
fprintf(f, " Compiled %d methods that meet the filter requirement.\n", m_numFilteredMethods);
- fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n",
- m_filtered.m_byteCodeBytes, (double)m_filtered.m_byteCodeBytes/(double)m_numFilteredMethods);
- double totTime_ms = ((double)m_filtered.m_totalCycles/countsPerSec)*1000.0;
- fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n",
- ((double)m_filtered.m_totalCycles/1000000.0), totTime_ms);
+ fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n", m_filtered.m_byteCodeBytes,
+ (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods);
+ double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0;
+ fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n", ((double)m_filtered.m_totalCycles / 1000000.0),
+ totTime_ms);
fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n",
- ((double)m_filtered.m_totalCycles)/1000000.0/(double)m_numFilteredMethods,
- totTime_ms/(double)m_numFilteredMethods);
+ ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods,
+ totTime_ms / (double)m_numFilteredMethods);
fprintf(f, " Total time by phases:\n");
fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n");
fprintf(f, " --------------------------------------------------------------------------------------\n");
// Ensure that at least the names array and the Phases enum have the same number of entries:
- assert(sizeof(PhaseNames)/sizeof(const char*) == PHASE_NUMBER_OF);
+ assert(sizeof(PhaseNames) / sizeof(const char*) == PHASE_NUMBER_OF);
for (int i = 0; i < PHASE_NUMBER_OF; i++)
{
- double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i])/countsPerSec)*1000.0;
+ double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0;
// Indent nested phases, according to depth.
int ancPhase = PhaseParent[i];
while (ancPhase != -1)
@@ -7002,19 +7101,17 @@ void CompTimeSummaryInfo::Print(FILE* f)
fprintf(f, " ");
ancPhase = PhaseParent[ancPhase];
}
- fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n",
- PhaseNames[i],
- ((double)m_filtered.m_invokesByPhase[i])/((double)m_numFilteredMethods),
- ((double)m_filtered.m_cyclesByPhase[i])/1000000.0,
- phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms));
+ fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n", PhaseNames[i],
+ ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods),
+ ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms,
+ (phase_tot_ms * 100.0 / totTime_ms));
}
fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles.\n",
- m_filtered.m_parentPhaseEndSlop);
+ m_filtered.m_parentPhaseEndSlop);
}
}
-JitTimer::JitTimer(unsigned byteCodeSize)
- : m_info(byteCodeSize)
+JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize)
{
#ifdef DEBUG
m_lastPhase = (Phases)-1;
@@ -7023,7 +7120,7 @@ JitTimer::JitTimer(unsigned byteCodeSize)
unsigned __int64 threadCurCycles;
if (GetThreadCycles(&threadCurCycles))
{
- m_start = threadCurCycles;
+ m_start = threadCurCycles;
m_curPhaseStart = threadCurCycles;
}
}
@@ -7126,7 +7223,7 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp)
{
return;
}
-
+
// eeGetMethodFullName uses locks, so don't enter crit sec before this call.
const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd);
@@ -7186,9 +7283,9 @@ void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum)
#if MEASURE_MEM_ALLOC
// static vars.
-CritSecObject Compiler::s_memStatsLock; // Default constructor.
-Compiler::AggregateMemStats Compiler::s_aggMemStats; // Default constructor.
-Compiler::MemStats Compiler::s_maxCompMemStats; // Default constructor.
+CritSecObject Compiler::s_memStatsLock; // Default constructor.
+Compiler::AggregateMemStats Compiler::s_aggMemStats; // Default constructor.
+Compiler::MemStats Compiler::s_maxCompMemStats; // Default constructor.
const char* Compiler::MemStats::s_CompMemKindNames[] = {
#define CompMemKindMacro(kind) #kind,
@@ -7197,10 +7294,8 @@ const char* Compiler::MemStats::s_CompMemKindNames[] = {
void Compiler::MemStats::Print(FILE* f)
{
- fprintf(f, "count: %10u, size: %10llu, max = %10llu\n",
- allocCnt, allocSz, allocSzMax);
- fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n",
- nraTotalSizeAlloc, nraTotalSizeUsed);
+ fprintf(f, "count: %10u, size: %10llu, max = %10llu\n", allocCnt, allocSz, allocSzMax);
+ fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n", nraTotalSizeAlloc, nraTotalSizeUsed);
PrintByKind(f);
}
@@ -7211,38 +7306,33 @@ void Compiler::MemStats::PrintByKind(FILE* f)
float allocSzF = static_cast<float>(allocSz);
for (int cmk = 0; cmk < CMK_Count; cmk++)
{
- float pct = 100.0f * static_cast<float>(allocSzByKind[cmk])/allocSzF;
+ float pct = 100.0f * static_cast<float>(allocSzByKind[cmk]) / allocSzF;
fprintf(f, " %20s | %10llu | %6.2f%%\n", s_CompMemKindNames[cmk], allocSzByKind[cmk], pct);
}
fprintf(f, "\n");
}
-
void Compiler::AggregateMemStats::Print(FILE* f)
{
fprintf(f, "For %9u methods:\n", nMethods);
- fprintf(f, " count: %12u (avg %7u per method)\n",
- allocCnt, allocCnt / nMethods);
- fprintf(f, " alloc size : %12llu (avg %7llu per method)\n",
- allocSz, allocSz / nMethods);
+ fprintf(f, " count: %12u (avg %7u per method)\n", allocCnt, allocCnt / nMethods);
+ fprintf(f, " alloc size : %12llu (avg %7llu per method)\n", allocSz, allocSz / nMethods);
fprintf(f, " max alloc : %12llu\n", allocSzMax);
fprintf(f, "\n");
- fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n",
- nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods);
- fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n",
- nraTotalSizeUsed, nraTotalSizeUsed / nMethods);
+ fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n", nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods);
+ fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n", nraTotalSizeUsed, nraTotalSizeUsed / nMethods);
PrintByKind(f);
}
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
// Static fields.
-CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor.
-unsigned Compiler::s_loopsConsidered = 0;
-unsigned Compiler::s_loopsWithHoistedExpressions = 0;
-unsigned Compiler::s_totalHoistedExpressions = 0;
+CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor.
+unsigned Compiler::s_loopsConsidered = 0;
+unsigned Compiler::s_loopsWithHoistedExpressions = 0;
+unsigned Compiler::s_totalHoistedExpressions = 0;
-// static
+// static
void Compiler::PrintAggregateLoopHoistStats(FILE* f)
{
fprintf(f, "\n");
@@ -7260,8 +7350,8 @@ void Compiler::PrintAggregateLoopHoistStats(FILE* f)
{
exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions);
}
- fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n",
- s_loopsConsidered, s_loopsWithHoistedExpressions, pctWithHoisted);
+ fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n", s_loopsConsidered,
+ s_loopsWithHoistedExpressions, pctWithHoisted);
fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n",
s_totalHoistedExpressions, exprsPerLoopWithExpr);
}
@@ -7270,9 +7360,9 @@ void Compiler::AddLoopHoistStats()
{
CritSecHolder statsLock(s_loopHoistStatsLock);
- s_loopsConsidered += m_loopsConsidered;
+ s_loopsConsidered += m_loopsConsidered;
s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions;
- s_totalHoistedExpressions += m_totalHoistedExpressions;
+ s_totalHoistedExpressions += m_totalHoistedExpressions;
}
void Compiler::PrintPerMethodLoopHoistStats()
@@ -7287,8 +7377,8 @@ void Compiler::PrintPerMethodLoopHoistStats()
{
exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions);
}
- printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n",
- m_loopsConsidered, m_loopsWithHoistedExpressions, pctWithHoisted);
+ printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n", m_loopsConsidered,
+ m_loopsWithHoistedExpressions, pctWithHoisted);
printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n",
m_totalHoistedExpressions, exprsPerLoopWithExpr);
}
@@ -7308,10 +7398,13 @@ void Compiler::RecordStateAtEndOfInlining()
{
#if defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM)
- m_compCyclesAtEndOfInlining = 0;
+ m_compCyclesAtEndOfInlining = 0;
m_compTickCountAtEndOfInlining = 0;
- bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining);
- if (!b) return; // We don't have a thread cycle counter.
+ bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining);
+ if (!b)
+ {
+ return; // We don't have a thread cycle counter.
+ }
m_compTickCountAtEndOfInlining = GetTickCount();
#endif // defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM)
@@ -7328,8 +7421,11 @@ void Compiler::RecordStateAtEndOfCompilation()
// Common portion
m_compCycles = 0;
unsigned __int64 compCyclesAtEnd;
- bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd);
- if (!b) return; // We don't have a thread cycle counter.
+ bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd);
+ if (!b)
+ {
+ return; // We don't have a thread cycle counter.
+ }
assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining);
m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining;
@@ -7340,7 +7436,7 @@ void Compiler::RecordStateAtEndOfCompilation()
// SQM only portion
unsigned __int64 mcycles64 = m_compCycles / ((unsigned __int64)1000000);
- unsigned mcycles;
+ unsigned mcycles;
if (mcycles64 > UINT32_MAX)
{
mcycles = UINT32_MAX;
@@ -7356,12 +7452,8 @@ void Compiler::RecordStateAtEndOfCompilation()
if (mcycles >= 1000)
{
- info.compCompHnd->logSQMLongJitEvent(mcycles,
- compTicks,
- info.compILCodeSize,
- fgBBcount,
- opts.MinOpts(),
- info.compMethodHnd);
+ info.compCompHnd->logSQMLongJitEvent(mcycles, compTicks, info.compILCodeSize, fgBBcount, opts.MinOpts(),
+ info.compMethodHnd);
}
#endif // FEATURE_CLRSQM
@@ -7369,10 +7461,10 @@ void Compiler::RecordStateAtEndOfCompilation()
#if FUNC_INFO_LOGGING
// static
-LPCWSTR Compiler::compJitFuncInfoFilename = NULL;
+LPCWSTR Compiler::compJitFuncInfoFilename = nullptr;
// static
-FILE* Compiler::compJitFuncInfoFile = NULL;
+FILE* Compiler::compJitFuncInfoFile = nullptr;
#endif // FUNC_INFO_LOGGING
#ifdef DEBUG
@@ -7384,11 +7476,11 @@ FILE* Compiler::compJitFuncInfoFile = NULL;
// not be big enough to handle all possible variable numbers.
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars)
{
- BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set.
+ BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set.
size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE);
- pVarNumSet = (BYTE*)_alloca(varNumSetBytes);
- memset(pVarNumSet, 0, varNumSetBytes); // empty the set
+ pVarNumSet = (BYTE*)_alloca(varNumSetBytes);
+ memset(pVarNumSet, 0, varNumSetBytes); // empty the set
VARSET_ITER_INIT(comp, iter, vars, varIndex);
while (iter.NextElem(comp, &varIndex))
@@ -7405,7 +7497,9 @@ void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars)
if (pVarNumSet[varNum] == 1)
{
if (!first)
+ {
printf(" ");
+ }
printf("V%02u", varNum);
first = false;
}
@@ -7413,7 +7507,6 @@ void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars)
printf("}");
}
-
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
@@ -7470,63 +7563,63 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* cDependsIR : Display dependencies of a tree DEP(t# ...) node
* based on child comma tree nodes
* dFormatIR : Display dump format specified on command line
- *
+ *
*
* The following don't require a Compiler* to work:
* dVarSet : Display a VARSET_TP (call dumpVarSet()).
* dRegMask : Display a regMaskTP (call dspRegMask(mask)).
*/
-void cBlock(Compiler* comp, BasicBlock* block)
+void cBlock(Compiler* comp, BasicBlock* block)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Block %u\n", sequenceNumber++);
comp->fgTableDispBasicBlock(block);
}
-void cBlocks(Compiler* comp)
+void cBlocks(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Blocks %u\n", sequenceNumber++);
comp->fgDispBasicBlocks();
}
-void cBlocksV(Compiler* comp)
+void cBlocksV(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *BlocksV %u\n", sequenceNumber++);
comp->fgDispBasicBlocks(true);
}
-void cTree(Compiler* comp, GenTree* tree)
+void cTree(Compiler* comp, GenTree* tree)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Tree %u\n", sequenceNumber++);
- comp->gtDispTree(tree, 0, ">>>");
+ comp->gtDispTree(tree, nullptr, ">>>");
}
-void cTrees(Compiler* comp)
+void cTrees(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Trees %u\n", sequenceNumber++);
- comp->fgDumpTrees(comp->fgFirstBB, NULL);
+ comp->fgDumpTrees(comp->fgFirstBB, nullptr);
}
-void cEH(Compiler* comp)
+void cEH(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *EH %u\n", sequenceNumber++);
comp->fgDispHandlerTab();
}
-void cVar(Compiler* comp, unsigned lclNum)
+void cVar(Compiler* comp, unsigned lclNum)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Var %u\n", sequenceNumber++);
comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT);
}
-void cVarDsc(Compiler* comp, LclVarDsc* varDsc)
+void cVarDsc(Compiler* comp, LclVarDsc* varDsc)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *VarDsc %u\n", sequenceNumber++);
@@ -7534,63 +7627,64 @@ void cVarDsc(Compiler* comp, LclVarDsc* varDsc)
comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT);
}
-void cVars(Compiler* comp)
+void cVars(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Vars %u\n", sequenceNumber++);
comp->lvaTableDump();
}
-void cVarsFinal(Compiler* comp)
+void cVarsFinal(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Vars %u\n", sequenceNumber++);
comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT);
}
-void cBlockCheapPreds(Compiler* comp, BasicBlock* block)
+void cBlockCheapPreds(Compiler* comp, BasicBlock* block)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
- printf("===================================================================== *BlockCheapPreds %u\n", sequenceNumber++);
+ printf("===================================================================== *BlockCheapPreds %u\n",
+ sequenceNumber++);
block->dspCheapPreds();
}
-void cBlockPreds(Compiler* comp, BasicBlock* block)
+void cBlockPreds(Compiler* comp, BasicBlock* block)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *BlockPreds %u\n", sequenceNumber++);
block->dspPreds();
}
-void cBlockSuccs(Compiler* comp, BasicBlock* block)
+void cBlockSuccs(Compiler* comp, BasicBlock* block)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *BlockSuccs %u\n", sequenceNumber++);
block->dspSuccs(comp);
}
-void cReach(Compiler* comp)
+void cReach(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Reach %u\n", sequenceNumber++);
comp->fgDispReach();
}
-void cDoms(Compiler* comp)
+void cDoms(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Doms %u\n", sequenceNumber++);
comp->fgDispDoms();
}
-void cLiveness(Compiler* comp)
+void cLiveness(Compiler* comp)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== *Liveness %u\n", sequenceNumber++);
comp->fgDispBBLiveness();
}
-void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars)
+void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== dCVarSet %u\n", sequenceNumber++);
@@ -7598,94 +7692,92 @@ void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars)
printf("\n"); // dumpConvertedVarSet() doesn't emit a trailing newline
}
-
-void dBlock(BasicBlock* block)
+void dBlock(BasicBlock* block)
{
cBlock(JitTls::GetCompiler(), block);
}
-void dBlocks()
+void dBlocks()
{
cBlocks(JitTls::GetCompiler());
}
-void dBlocksV()
+void dBlocksV()
{
cBlocksV(JitTls::GetCompiler());
}
-void dTree(GenTree* tree)
+void dTree(GenTree* tree)
{
cTree(JitTls::GetCompiler(), tree);
}
-void dTrees()
+void dTrees()
{
cTrees(JitTls::GetCompiler());
}
-void dEH()
+void dEH()
{
cEH(JitTls::GetCompiler());
}
-void dVar(unsigned lclNum)
+void dVar(unsigned lclNum)
{
cVar(JitTls::GetCompiler(), lclNum);
}
-void dVarDsc(LclVarDsc* varDsc)
+void dVarDsc(LclVarDsc* varDsc)
{
cVarDsc(JitTls::GetCompiler(), varDsc);
}
-void dVars()
+void dVars()
{
cVars(JitTls::GetCompiler());
}
-void dVarsFinal()
+void dVarsFinal()
{
cVarsFinal(JitTls::GetCompiler());
}
-void dBlockPreds(BasicBlock* block)
+void dBlockPreds(BasicBlock* block)
{
cBlockPreds(JitTls::GetCompiler(), block);
}
-void dBlockCheapPreds(BasicBlock* block)
+void dBlockCheapPreds(BasicBlock* block)
{
cBlockCheapPreds(JitTls::GetCompiler(), block);
}
-void dBlockSuccs(BasicBlock* block)
+void dBlockSuccs(BasicBlock* block)
{
cBlockSuccs(JitTls::GetCompiler(), block);
}
-void dReach()
+void dReach()
{
cReach(JitTls::GetCompiler());
}
-void dDoms()
+void dDoms()
{
cDoms(JitTls::GetCompiler());
}
-void dLiveness()
+void dLiveness()
{
cLiveness(JitTls::GetCompiler());
}
-void dCVarSet(VARSET_VALARG_TP vars)
+void dCVarSet(VARSET_VALARG_TP vars)
{
cCVarSet(JitTls::GetCompiler(), vars);
}
-
-void dRegMask(regMaskTP mask)
+void dRegMask(regMaskTP mask)
{
static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called
printf("===================================================================== dRegMask %u\n", sequenceNumber++);
@@ -7693,8 +7785,7 @@ void dRegMask(regMaskTP mask)
printf("\n"); // dspRegMask() doesn't emit a trailing newline
}
-void
-dBlockList(BasicBlockList* list)
+void dBlockList(BasicBlockList* list)
{
printf("WorkList: ");
while (list != nullptr)
@@ -7717,7 +7808,7 @@ BasicBlock* dbBlock;
// Debug APIs for finding Trees, Stmts, and/or Blocks.
// As a side effect, they set the debug variables above.
-GenTree* dFindTree(GenTree* tree, unsigned id)
+GenTree* dFindTree(GenTree* tree, unsigned id)
{
GenTree* child;
@@ -7732,7 +7823,7 @@ GenTree* dFindTree(GenTree* tree, unsigned id)
return tree;
}
- unsigned childCount = tree->NumChildren();
+ unsigned childCount = tree->NumChildren();
for (unsigned childIndex = 0; childIndex < childCount; childIndex++)
{
child = tree->GetChild(childIndex);
@@ -7740,20 +7831,20 @@ GenTree* dFindTree(GenTree* tree, unsigned id)
if (child != nullptr)
{
return child;
- }
+ }
}
return nullptr;
}
-GenTree* dFindTree(unsigned id)
+GenTree* dFindTree(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
+ Compiler* comp = JitTls::GetCompiler();
BasicBlock* block;
- GenTree* tree;
+ GenTree* tree;
dbTreeBlock = nullptr;
- dbTree = nullptr;
+ dbTree = nullptr;
for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
{
@@ -7771,9 +7862,9 @@ GenTree* dFindTree(unsigned id)
return nullptr;
}
-GenTreeStmt* dFindStmt(unsigned id)
+GenTreeStmt* dFindStmt(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
+ Compiler* comp = JitTls::GetCompiler();
BasicBlock* block;
dbStmt = nullptr;
@@ -7795,9 +7886,9 @@ GenTreeStmt* dFindStmt(unsigned id)
return nullptr;
}
-BasicBlock* dFindBlock(unsigned bbNum)
+BasicBlock* dFindBlock(unsigned bbNum)
{
- Compiler* comp = JitTls::GetCompiler();
+ Compiler* comp = JitTls::GetCompiler();
BasicBlock* block = nullptr;
dbBlock = nullptr;
@@ -7818,12 +7909,12 @@ BasicBlock* dFindBlock(unsigned bbNum)
* COMPlus_JitDumpIR support - dump out function in linear IR form
*/
-void cFuncIR(Compiler* comp)
+void cFuncIR(Compiler* comp)
{
BasicBlock* block;
printf("Method %s::%s, hsh=0x%x\n", comp->info.compClassName, comp->info.compMethodName,
- comp->info.compMethodHash());
+ comp->info.compMethodHash());
printf("\n");
@@ -7838,13 +7929,13 @@ void cFuncIR(Compiler* comp)
* COMPlus_JitDumpIR support - dump out the format specifiers from COMPlus_JitDumpIRFormat
*/
-void dFormatIR()
+void dFormatIR()
{
Compiler* comp = JitTls::GetCompiler();
- if (comp->dumpIRFormat != NULL)
+ if (comp->dumpIRFormat != nullptr)
{
- printf("COMPlus_JitDumpIRFormat=%ls", comp->dumpIRFormat);
+ printf("COMPlus_JitDumpIRFormat=%ls", comp->dumpIRFormat);
}
}
@@ -7853,7 +7944,7 @@ void dFormatIR()
* COMPlus_JitDumpIR support - dump out function in linear IR form
*/
-void dFuncIR()
+void dFuncIR()
{
cFuncIR(JitTls::GetCompiler());
}
@@ -7863,15 +7954,15 @@ void dFuncIR()
* COMPlus_JitDumpIR support - dump out loop in linear IR form
*/
-void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop)
+void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop)
{
- BasicBlock* blockHead = loop->lpHead;
- BasicBlock* blockFirst = loop->lpFirst;
- BasicBlock* blockTop = loop->lpTop;
- BasicBlock* blockEntry = loop->lpEntry;
+ BasicBlock* blockHead = loop->lpHead;
+ BasicBlock* blockFirst = loop->lpFirst;
+ BasicBlock* blockTop = loop->lpTop;
+ BasicBlock* blockEntry = loop->lpEntry;
BasicBlock* blockBottom = loop->lpBottom;
- BasicBlock* blockExit = loop->lpExit;
- BasicBlock* blockLast = blockBottom->bbNext;
+ BasicBlock* blockExit = loop->lpExit;
+ BasicBlock* blockLast = blockBottom->bbNext;
BasicBlock* block;
printf("LOOP\n");
@@ -7903,7 +7994,7 @@ void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop)
* COMPlus_JitDumpIR support - dump out loop in linear IR form
*/
-void dLoopIR(Compiler::LoopDsc* loop)
+void dLoopIR(Compiler::LoopDsc* loop)
{
cLoopIR(JitTls::GetCompiler(), loop);
}
@@ -7913,7 +8004,7 @@ void dLoopIR(Compiler::LoopDsc* loop)
* COMPlus_JitDumpIR support - dump out loop (given loop number) in linear IR form
*/
-void dLoopNumIR(unsigned loopNum)
+void dLoopNumIR(unsigned loopNum)
{
Compiler* comp = JitTls::GetCompiler();
@@ -7932,30 +8023,34 @@ void dLoopNumIR(unsigned loopNum)
* COMPlus_JitDumpIR support - dump spaces to specified tab stop
*/
-int dTabStopIR(int curr, int tabstop)
+int dTabStopIR(int curr, int tabstop)
{
int chars = 0;
if (tabstop <= curr)
+ {
chars += printf(" ");
+ }
for (int i = curr; i < tabstop; i++)
+ {
chars += printf(" ");
+ }
return chars;
}
-void cNodeIR(Compiler* comp, GenTree* tree);
+void cNodeIR(Compiler* comp, GenTree* tree);
/*****************************************************************************
*
* COMPlus_JitDumpIR support - dump out block in linear IR form
*/
-void cBlockIR(Compiler* comp, BasicBlock* block)
+void cBlockIR(Compiler* comp, BasicBlock* block)
{
bool noStmts = comp->dumpIRNoStmts;
- bool trees = comp->dumpIRTrees;
+ bool trees = comp->dumpIRTrees;
if (comp->dumpIRBlockHeaders)
{
@@ -7969,7 +8064,7 @@ void cBlockIR(Compiler* comp, BasicBlock* block)
printf("\n");
for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
- // Skip embedded stmts. They should have already been dumped prior to the stmt
+ // Skip embedded stmts. They should have already been dumped prior to the stmt
// that they are embedded into. Even though they appear on the stmt list
// after the stmt they are embedded into. Don't understand the rationale for that
// but make the dataflow view look consistent.
@@ -8004,97 +8099,94 @@ void cBlockIR(Compiler* comp, BasicBlock* block)
if (!noStmts && !trees)
{
- printf("\n");
+ printf("\n");
}
}
int chars = 0;
chars += dTabStopIR(chars, COLUMN_OPCODE);
-
+
chars += printf(" ");
switch (block->bbJumpKind)
{
- case BBJ_EHFINALLYRET:
- chars += printf("BRANCH(EHFINALLYRET)");
- break;
+ case BBJ_EHFINALLYRET:
+ chars += printf("BRANCH(EHFINALLYRET)");
+ break;
- case BBJ_EHFILTERRET:
- chars += printf("BRANCH(EHFILTERRET)");
- break;
+ case BBJ_EHFILTERRET:
+ chars += printf("BRANCH(EHFILTERRET)");
+ break;
- case BBJ_EHCATCHRET:
- chars += printf("BRANCH(EHCATCHRETURN)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
- chars += printf(" BB%02u",block->bbJumpDest->bbNum);
- break;
+ case BBJ_EHCATCHRET:
+ chars += printf("BRANCH(EHCATCHRETURN)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ chars += printf(" BB%02u", block->bbJumpDest->bbNum);
+ break;
- case BBJ_THROW:
- chars += printf("BRANCH(THROW)");
- break;
+ case BBJ_THROW:
+ chars += printf("BRANCH(THROW)");
+ break;
- case BBJ_RETURN:
- chars += printf("BRANCH(RETURN)");
- break;
+ case BBJ_RETURN:
+ chars += printf("BRANCH(RETURN)");
+ break;
- case BBJ_NONE:
- // For fall-through blocks
- chars += printf("BRANCH(NONE)");
- break;
+ case BBJ_NONE:
+ // For fall-through blocks
+ chars += printf("BRANCH(NONE)");
+ break;
- case BBJ_ALWAYS:
- chars += printf("BRANCH(ALWAYS)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
- chars += printf(" BB%02u",block->bbJumpDest->bbNum);
- if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)
- {
- chars += dTabStopIR(chars, COLUMN_KINDS);
- chars += printf("; [KEEP_BBJ_ALWAYS]");
- }
- break;
+ case BBJ_ALWAYS:
+ chars += printf("BRANCH(ALWAYS)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ chars += printf(" BB%02u", block->bbJumpDest->bbNum);
+ if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)
+ {
+ chars += dTabStopIR(chars, COLUMN_KINDS);
+ chars += printf("; [KEEP_BBJ_ALWAYS]");
+ }
+ break;
- case BBJ_LEAVE:
- chars += printf("BRANCH(LEAVE)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
- chars += printf(" BB%02u", block->bbJumpDest->bbNum);
- break;
+ case BBJ_LEAVE:
+ chars += printf("BRANCH(LEAVE)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ chars += printf(" BB%02u", block->bbJumpDest->bbNum);
+ break;
- case BBJ_CALLFINALLY:
- chars += printf("BRANCH(CALLFINALLY)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
- chars += printf(" BB%02u", block->bbJumpDest->bbNum);
- break;
+ case BBJ_CALLFINALLY:
+ chars += printf("BRANCH(CALLFINALLY)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ chars += printf(" BB%02u", block->bbJumpDest->bbNum);
+ break;
- case BBJ_COND:
- chars += printf("BRANCH(COND)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
- chars += printf(" BB%02u", block->bbJumpDest->bbNum);
- break;
+ case BBJ_COND:
+ chars += printf("BRANCH(COND)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ chars += printf(" BB%02u", block->bbJumpDest->bbNum);
+ break;
- case BBJ_SWITCH:
- chars += printf("BRANCH(SWITCH)");
- chars += dTabStopIR(chars, COLUMN_OPERANDS);
+ case BBJ_SWITCH:
+ chars += printf("BRANCH(SWITCH)");
+ chars += dTabStopIR(chars, COLUMN_OPERANDS);
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
- do
- {
- chars += printf("%c BB%02u",
- (jumpTab == block->bbJumpSwt->bbsDstTab) ? ' ' : ',',
- (*jumpTab)->bbNum);
- }
- while (++jumpTab, --jumpCnt);
- break;
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
+ do
+ {
+ chars += printf("%c BB%02u", (jumpTab == block->bbJumpSwt->bbsDstTab) ? ' ' : ',', (*jumpTab)->bbNum);
+ } while (++jumpTab, --jumpCnt);
+ break;
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
printf("\n");
- if (block->bbNext != NULL)
+ if (block->bbNext != nullptr)
{
printf("\n");
}
@@ -8105,7 +8197,7 @@ void cBlockIR(Compiler* comp, BasicBlock* block)
* COMPlus_JitDumpIR support - dump out block in linear IR form
*/
-void dBlockIR(BasicBlock* block)
+void dBlockIR(BasicBlock* block)
{
cBlockIR(JitTls::GetCompiler(), block);
}
@@ -8115,13 +8207,13 @@ void dBlockIR(BasicBlock* block)
* COMPlus_JitDumpIR support - dump out tree node type for linear IR form
*/
-int cTreeTypeIR(Compiler *comp, GenTree *tree)
+int cTreeTypeIR(Compiler* comp, GenTree* tree)
{
int chars = 0;
var_types type = tree->TypeGet();
- const char * typeName = varTypeName(type);
+ const char* typeName = varTypeName(type);
chars += printf(".%s", typeName);
return chars;
@@ -8132,7 +8224,7 @@ int cTreeTypeIR(Compiler *comp, GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree node type for linear IR form
*/
-int dTreeTypeIR(GenTree *tree)
+int dTreeTypeIR(GenTree* tree)
{
int chars = cTreeTypeIR(JitTls::GetCompiler(), tree);
@@ -8144,7 +8236,7 @@ int dTreeTypeIR(GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree node kind for linear IR form
*/
-int cTreeKindsIR(Compiler *comp, GenTree *tree)
+int cTreeKindsIR(Compiler* comp, GenTree* tree)
{
int chars = 0;
@@ -8152,27 +8244,49 @@ int cTreeKindsIR(Compiler *comp, GenTree *tree)
chars += printf("kinds=");
if (kind == GTK_SPECIAL)
+ {
chars += printf("[SPECIAL]");
+ }
if (kind & GTK_CONST)
+ {
chars += printf("[CONST]");
+ }
if (kind & GTK_LEAF)
+ {
chars += printf("[LEAF]");
+ }
if (kind & GTK_UNOP)
+ {
chars += printf("[UNOP]");
+ }
if (kind & GTK_BINOP)
+ {
chars += printf("[BINOP]");
+ }
if (kind & GTK_LOGOP)
+ {
chars += printf("[LOGOP]");
+ }
if (kind & GTK_ASGOP)
+ {
chars += printf("[ASGOP]");
+ }
if (kind & GTK_COMMUTE)
+ {
chars += printf("[COMMUTE]");
+ }
if (kind & GTK_EXOP)
+ {
chars += printf("[EXOP]");
+ }
if (kind & GTK_LOCAL)
+ {
chars += printf("[LOCAL]");
+ }
if (kind & GTK_SMPOP)
+ {
chars += printf("[SMPOP]");
+ }
return chars;
}
@@ -8182,7 +8296,7 @@ int cTreeKindsIR(Compiler *comp, GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree node kind for linear IR form
*/
-int dTreeKindsIR(GenTree *tree)
+int dTreeKindsIR(GenTree* tree)
{
int chars = cTreeKindsIR(JitTls::GetCompiler(), tree);
@@ -8194,7 +8308,7 @@ int dTreeKindsIR(GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree node flags for linear IR form
*/
-int cTreeFlagsIR(Compiler *comp, GenTree *tree)
+int cTreeFlagsIR(Compiler* comp, GenTree* tree)
{
int chars = 0;
@@ -8229,496 +8343,498 @@ int cTreeFlagsIR(Compiler *comp, GenTree *tree)
{
chars += printf("[COLON_COND]");
}
-
+
// Operator flags
genTreeOps op = tree->OperGet();
switch (op)
{
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- case GT_LCL_FLD:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_FLD:
- case GT_STORE_LCL_VAR:
- case GT_REG_VAR:
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ case GT_REG_VAR:
- if (tree->gtFlags & GTF_VAR_DEF)
- {
- chars += printf("[VAR_DEF]");
- }
- if (tree->gtFlags & GTF_VAR_USEASG)
- {
- chars += printf("[VAR_USEASG]");
- }
- if (tree->gtFlags & GTF_VAR_USEDEF)
- {
- chars += printf("[VAR_USEDEF]");
- }
- if (tree->gtFlags & GTF_VAR_CAST)
- {
- chars += printf("[VAR_CAST]");
- }
- if (tree->gtFlags & GTF_VAR_ITERATOR)
- {
- chars += printf("[VAR_ITERATOR]");
- }
- if (tree->gtFlags & GTF_VAR_CLONED)
- {
- chars += printf("[VAR_CLONED]");
- }
- if (tree->gtFlags & GTF_VAR_DEATH)
- {
- chars += printf("[VAR_DEATH]");
- }
- if (tree->gtFlags & GTF_VAR_ARR_INDEX)
- {
- chars += printf("[VAR_ARR_INDEX]");
- }
+ if (tree->gtFlags & GTF_VAR_DEF)
+ {
+ chars += printf("[VAR_DEF]");
+ }
+ if (tree->gtFlags & GTF_VAR_USEASG)
+ {
+ chars += printf("[VAR_USEASG]");
+ }
+ if (tree->gtFlags & GTF_VAR_USEDEF)
+ {
+ chars += printf("[VAR_USEDEF]");
+ }
+ if (tree->gtFlags & GTF_VAR_CAST)
+ {
+ chars += printf("[VAR_CAST]");
+ }
+ if (tree->gtFlags & GTF_VAR_ITERATOR)
+ {
+ chars += printf("[VAR_ITERATOR]");
+ }
+ if (tree->gtFlags & GTF_VAR_CLONED)
+ {
+ chars += printf("[VAR_CLONED]");
+ }
+ if (tree->gtFlags & GTF_VAR_DEATH)
+ {
+ chars += printf("[VAR_DEATH]");
+ }
+ if (tree->gtFlags & GTF_VAR_ARR_INDEX)
+ {
+ chars += printf("[VAR_ARR_INDEX]");
+ }
#if defined(DEBUG)
- if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF)
- {
- chars += printf("[VAR_CSE_REF]");
- }
+ if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF)
+ {
+ chars += printf("[VAR_CSE_REF]");
+ }
#endif
- if (op == GT_REG_VAR)
- {
- if (tree->gtFlags & GTF_REG_BIRTH)
+ if (op == GT_REG_VAR)
{
- chars += printf("[REG_BIRTH]");
+ if (tree->gtFlags & GTF_REG_BIRTH)
+ {
+ chars += printf("[REG_BIRTH]");
+ }
}
- }
- break;
+ break;
- case GT_NOP:
+ case GT_NOP:
- if (tree->gtFlags & GTF_NOP_DEATH)
- {
- chars += printf("[NOP_DEATH]");
- }
- break;
+ if (tree->gtFlags & GTF_NOP_DEATH)
+ {
+ chars += printf("[NOP_DEATH]");
+ }
+ break;
- case GT_NO_OP:
+ case GT_NO_OP:
- if (tree->gtFlags & GTF_NO_OP_NO)
- {
- chars += printf("[NO_OP_NO]");
- }
- break;
-
- case GT_FIELD:
+ if (tree->gtFlags & GTF_NO_OP_NO)
+ {
+ chars += printf("[NO_OP_NO]");
+ }
+ break;
- if (tree->gtFlags & GTF_FLD_NULLCHECK)
- {
- chars += printf("[FLD_NULLCHECK]");
- }
- if (tree->gtFlags & GTF_FLD_VOLATILE)
- {
- chars += printf("[FLD_VOLATILE]");
- }
- break;
+ case GT_FIELD:
- case GT_INDEX:
+ if (tree->gtFlags & GTF_FLD_NULLCHECK)
+ {
+ chars += printf("[FLD_NULLCHECK]");
+ }
+ if (tree->gtFlags & GTF_FLD_VOLATILE)
+ {
+ chars += printf("[FLD_VOLATILE]");
+ }
+ break;
- if (tree->gtFlags & GTF_INX_RNGCHK)
- {
- chars += printf("[INX_RNGCHK]");
- }
- if (tree->gtFlags & GTF_INX_REFARR_LAYOUT)
- {
- chars += printf("[INX_REFARR_LAYOUT]");
- }
- if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
- {
- chars += printf("[INX_STRING_LAYOUT]");
- }
- break;
+ case GT_INDEX:
- case GT_IND:
- case GT_STOREIND:
+ if (tree->gtFlags & GTF_INX_RNGCHK)
+ {
+ chars += printf("[INX_RNGCHK]");
+ }
+ if (tree->gtFlags & GTF_INX_REFARR_LAYOUT)
+ {
+ chars += printf("[INX_REFARR_LAYOUT]");
+ }
+ if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
+ {
+ chars += printf("[INX_STRING_LAYOUT]");
+ }
+ break;
- if (tree->gtFlags & GTF_IND_VOLATILE)
- {
- chars += printf("[IND_VOLATILE]");
- }
- if (tree->gtFlags & GTF_IND_REFARR_LAYOUT)
- {
- chars += printf("[IND_REFARR_LAYOUT]");
- }
- if (tree->gtFlags & GTF_IND_TGTANYWHERE)
- {
- chars += printf("[IND_TGTANYWHERE]");
- }
- if (tree->gtFlags & GTF_IND_TLS_REF)
- {
- chars += printf("[IND_TLS_REF]");
- }
- if (tree->gtFlags & GTF_IND_ASG_LHS)
- {
- chars += printf("[IND_ASG_LHS]");
- }
- if (tree->gtFlags & GTF_IND_UNALIGNED)
- {
- chars += printf("[IND_UNALIGNED]");
- }
- if (tree->gtFlags & GTF_IND_INVARIANT)
- {
- chars += printf("[IND_INVARIANT]");
- }
- if (tree->gtFlags & GTF_IND_ARR_LEN)
- {
- chars += printf("[IND_ARR_INDEX]");
- }
- break;
+ case GT_IND:
+ case GT_STOREIND:
- case GT_CLS_VAR:
+ if (tree->gtFlags & GTF_IND_VOLATILE)
+ {
+ chars += printf("[IND_VOLATILE]");
+ }
+ if (tree->gtFlags & GTF_IND_REFARR_LAYOUT)
+ {
+ chars += printf("[IND_REFARR_LAYOUT]");
+ }
+ if (tree->gtFlags & GTF_IND_TGTANYWHERE)
+ {
+ chars += printf("[IND_TGTANYWHERE]");
+ }
+ if (tree->gtFlags & GTF_IND_TLS_REF)
+ {
+ chars += printf("[IND_TLS_REF]");
+ }
+ if (tree->gtFlags & GTF_IND_ASG_LHS)
+ {
+ chars += printf("[IND_ASG_LHS]");
+ }
+ if (tree->gtFlags & GTF_IND_UNALIGNED)
+ {
+ chars += printf("[IND_UNALIGNED]");
+ }
+ if (tree->gtFlags & GTF_IND_INVARIANT)
+ {
+ chars += printf("[IND_INVARIANT]");
+ }
+ if (tree->gtFlags & GTF_IND_ARR_LEN)
+ {
+ chars += printf("[IND_ARR_INDEX]");
+ }
+ break;
- if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS)
- {
- chars += printf("[CLS_VAR_ASG_LHS]");
- }
- break;
+ case GT_CLS_VAR:
- case GT_ADDR:
+ if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS)
+ {
+ chars += printf("[CLS_VAR_ASG_LHS]");
+ }
+ break;
- if (tree->gtFlags & GTF_ADDR_ONSTACK)
- {
- chars += printf("[ADDR_ONSTACK]");
- }
- break;
+ case GT_ADDR:
- case GT_MUL:
+ if (tree->gtFlags & GTF_ADDR_ONSTACK)
+ {
+ chars += printf("[ADDR_ONSTACK]");
+ }
+ break;
- if (tree->gtFlags & GTF_MUL_64RSLT)
- {
- chars += printf("[64RSLT]");
- }
- if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
- {
- chars += printf("[ADDRMODE_NO_CSE]");
- }
- break;
+ case GT_MUL:
- case GT_ADD:
+ if (tree->gtFlags & GTF_MUL_64RSLT)
+ {
+ chars += printf("[64RSLT]");
+ }
+ if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
+ {
+ chars += printf("[ADDRMODE_NO_CSE]");
+ }
+ break;
- if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
- {
- chars += printf("[ADDRMODE_NO_CSE]");
- }
- break;
+ case GT_ADD:
- case GT_LSH:
+ if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
+ {
+ chars += printf("[ADDRMODE_NO_CSE]");
+ }
+ break;
- if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
- {
- chars += printf("[ADDRMODE_NO_CSE]");
- }
- break;
+ case GT_LSH:
- case GT_MOD:
- case GT_UMOD:
+ if (tree->gtFlags & GTF_ADDRMODE_NO_CSE)
+ {
+ chars += printf("[ADDRMODE_NO_CSE]");
+ }
+ break;
- if (tree->gtFlags & GTF_MOD_INT_RESULT)
- {
- chars += printf("[MOD_INT_RESULT]");
- }
- break;
+ case GT_MOD:
+ case GT_UMOD:
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GT:
- case GT_GE:
+ if (tree->gtFlags & GTF_MOD_INT_RESULT)
+ {
+ chars += printf("[MOD_INT_RESULT]");
+ }
+ break;
- if (tree->gtFlags & GTF_RELOP_NAN_UN)
- {
- chars += printf("[RELOP_NAN_UN]");
- }
- if (tree->gtFlags & GTF_RELOP_JMP_USED)
- {
- chars += printf("[RELOP_JMP_USED]");
- }
- if (tree->gtFlags & GTF_RELOP_QMARK)
- {
- chars += printf("[RELOP_QMARK]");
- }
- if (tree->gtFlags & GTF_RELOP_SMALL)
- {
- chars += printf("[RELOP_SMALL]");
- }
- break;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GT:
+ case GT_GE:
- case GT_QMARK:
+ if (tree->gtFlags & GTF_RELOP_NAN_UN)
+ {
+ chars += printf("[RELOP_NAN_UN]");
+ }
+ if (tree->gtFlags & GTF_RELOP_JMP_USED)
+ {
+ chars += printf("[RELOP_JMP_USED]");
+ }
+ if (tree->gtFlags & GTF_RELOP_QMARK)
+ {
+ chars += printf("[RELOP_QMARK]");
+ }
+ if (tree->gtFlags & GTF_RELOP_SMALL)
+ {
+ chars += printf("[RELOP_SMALL]");
+ }
+ break;
- if (tree->gtFlags & GTF_QMARK_CAST_INSTOF)
- {
- chars += printf("[QMARK_CAST_INSTOF]");
- }
- break;
+ case GT_QMARK:
- case GT_BOX:
+ if (tree->gtFlags & GTF_QMARK_CAST_INSTOF)
+ {
+ chars += printf("[QMARK_CAST_INSTOF]");
+ }
+ break;
- if (tree->gtFlags & GTF_BOX_VALUE)
- {
- chars += printf("[BOX_VALUE]");
- }
- break;
+ case GT_BOX:
- case GT_CNS_INT:
+ if (tree->gtFlags & GTF_BOX_VALUE)
+ {
+ chars += printf("[BOX_VALUE]");
+ }
+ break;
- {
- unsigned handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK);
+ case GT_CNS_INT:
- switch (handleKind)
{
+ unsigned handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK);
- case GTF_ICON_SCOPE_HDL:
+ switch (handleKind)
+ {
- chars += printf("[ICON_SCOPE_HDL]");
- break;
+ case GTF_ICON_SCOPE_HDL:
- case GTF_ICON_CLASS_HDL:
-
- chars += printf("[ICON_CLASS_HDL]");
- break;
+ chars += printf("[ICON_SCOPE_HDL]");
+ break;
- case GTF_ICON_METHOD_HDL:
+ case GTF_ICON_CLASS_HDL:
- chars += printf("[ICON_METHOD_HDL]");
- break;
+ chars += printf("[ICON_CLASS_HDL]");
+ break;
- case GTF_ICON_FIELD_HDL:
+ case GTF_ICON_METHOD_HDL:
- chars += printf("[ICON_FIELD_HDL]");
- break;
+ chars += printf("[ICON_METHOD_HDL]");
+ break;
- case GTF_ICON_STATIC_HDL:
+ case GTF_ICON_FIELD_HDL:
- chars += printf("[ICON_STATIC_HDL]");
- break;
+ chars += printf("[ICON_FIELD_HDL]");
+ break;
- case GTF_ICON_STR_HDL:
+ case GTF_ICON_STATIC_HDL:
- chars += printf("[ICON_STR_HDL]");
- break;
+ chars += printf("[ICON_STATIC_HDL]");
+ break;
- case GTF_ICON_PSTR_HDL:
+ case GTF_ICON_STR_HDL:
- chars += printf("[ICON_PSTR_HDL]");
- break;
+ chars += printf("[ICON_STR_HDL]");
+ break;
- case GTF_ICON_PTR_HDL:
+ case GTF_ICON_PSTR_HDL:
- chars += printf("[ICON_PTR_HDL]");
- break;
+ chars += printf("[ICON_PSTR_HDL]");
+ break;
- case GTF_ICON_VARG_HDL:
+ case GTF_ICON_PTR_HDL:
- chars += printf("[ICON_VARG_HDL]");
- break;
+ chars += printf("[ICON_PTR_HDL]");
+ break;
- case GTF_ICON_PINVKI_HDL:
+ case GTF_ICON_VARG_HDL:
- chars += printf("[ICON_PINVKI_HDL]");
- break;
+ chars += printf("[ICON_VARG_HDL]");
+ break;
- case GTF_ICON_TOKEN_HDL:
+ case GTF_ICON_PINVKI_HDL:
- chars += printf("[ICON_TOKEN_HDL]");
- break;
+ chars += printf("[ICON_PINVKI_HDL]");
+ break;
- case GTF_ICON_TLS_HDL:
+ case GTF_ICON_TOKEN_HDL:
- chars += printf("[ICON_TLD_HDL]");
- break;
+ chars += printf("[ICON_TOKEN_HDL]");
+ break;
- case GTF_ICON_FTN_ADDR:
+ case GTF_ICON_TLS_HDL:
- chars += printf("[ICON_FTN_ADDR]");
- break;
+ chars += printf("[ICON_TLD_HDL]");
+ break;
- case GTF_ICON_CIDMID_HDL:
+ case GTF_ICON_FTN_ADDR:
- chars += printf("[ICON_CIDMID_HDL]");
- break;
+ chars += printf("[ICON_FTN_ADDR]");
+ break;
- case GTF_ICON_BBC_PTR:
+ case GTF_ICON_CIDMID_HDL:
- chars += printf("[ICON_BBC_PTR]");
- break;
+ chars += printf("[ICON_CIDMID_HDL]");
+ break;
- case GTF_ICON_FIELD_OFF:
+ case GTF_ICON_BBC_PTR:
- chars += printf("[ICON_FIELD_OFF]");
- break;
- }
- }
- break;
+ chars += printf("[ICON_BBC_PTR]");
+ break;
- case GT_COPYBLK:
- case GT_INITBLK:
- case GT_COPYOBJ:
+ case GTF_ICON_FIELD_OFF:
- if (tree->AsBlkOp()->HasGCPtr())
- {
- chars += printf("[BLK_HASGCPTR]");
- }
- if (tree->AsBlkOp()->IsVolatile())
- {
- chars += printf("[BLK_VOLATILE]");
- }
- if (tree->AsBlkOp()->IsUnaligned())
- {
- chars += printf("[BLK_UNALIGNED]");
+ chars += printf("[ICON_FIELD_OFF]");
+ break;
+ }
}
break;
-
- case GT_CALL:
-
- if (tree->gtFlags & GTF_CALL_UNMANAGED)
- {
- chars += printf("[CALL_UNMANAGED]");
- }
- if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE)
- {
- chars += printf("[CALL_INLINE_CANDIDATE]");
- }
- if (tree->gtFlags & GTF_CALL_NONVIRT)
- {
- chars += printf("[CALL_NONVIRT]");
- }
- if (tree->gtFlags & GTF_CALL_VIRT_VTABLE)
- {
- chars += printf("[CALL_VIRT_VTABLE]");
- }
- if (tree->gtFlags & GTF_CALL_VIRT_STUB)
- {
- chars += printf("[CALL_VIRT_STUB]");
- }
- if (tree->gtFlags & GTF_CALL_NULLCHECK)
- {
- chars += printf("[CALL_NULLCHECK]");
- }
- if (tree->gtFlags & GTF_CALL_POP_ARGS)
- {
- chars += printf("[CALL_POP_ARGS]");
- }
- if (tree->gtFlags & GTF_CALL_HOISTABLE)
- {
- chars += printf("[CALL_HOISTABLE]");
- }
- if (tree->gtFlags & GTF_CALL_REG_SAVE)
- {
- chars += printf("[CALL_REG_SAVE]");
- }
- // More flags associated with calls.
+ case GT_COPYBLK:
+ case GT_INITBLK:
+ case GT_COPYOBJ:
- {
- GenTreeCall* call = tree->AsCall();
+ if (tree->AsBlkOp()->HasGCPtr())
+ {
+ chars += printf("[BLK_HASGCPTR]");
+ }
+ if (tree->AsBlkOp()->IsVolatile())
+ {
+ chars += printf("[BLK_VOLATILE]");
+ }
+ if (tree->AsBlkOp()->IsUnaligned())
+ {
+ chars += printf("[BLK_UNALIGNED]");
+ }
+ break;
- if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL)
- {
- chars += printf("[CALL_M_EXPLICIT_TAILCALL]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL)
- {
- chars += printf("[CALL_M_TAILCALL]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS)
- {
- chars += printf("[CALL_M_VARARGS]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
- {
- chars += printf("[CALL_M_RETBUFFARG]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV)
- {
- chars += printf("[CALL_M_DELEGATE_INV]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK)
- {
- chars += printf("[CALL_M_NOGCCHECK]");
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
- {
- chars += printf("[CALL_M_SPECIAL_INTRINSIC]");
- }
+ case GT_CALL:
- if (call->IsUnmanaged())
- {
- if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
+ if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
- chars += printf("[CALL_M_UNMGD_THISCALL]");
+ chars += printf("[CALL_UNMANAGED]");
}
- }
- else if (call->IsVirtualStub())
- {
- if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
+ if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE)
{
- chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]");
+ chars += printf("[CALL_INLINE_CANDIDATE]");
}
- }
- else if (!call->IsVirtual())
- {
- if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS)
+ if (tree->gtFlags & GTF_CALL_NONVIRT)
{
- chars += printf("[CALL_M_NONVIRT_SAME_THIS]");
+ chars += printf("[CALL_NONVIRT]");
+ }
+ if (tree->gtFlags & GTF_CALL_VIRT_VTABLE)
+ {
+ chars += printf("[CALL_VIRT_VTABLE]");
+ }
+ if (tree->gtFlags & GTF_CALL_VIRT_STUB)
+ {
+ chars += printf("[CALL_VIRT_STUB]");
+ }
+ if (tree->gtFlags & GTF_CALL_NULLCHECK)
+ {
+ chars += printf("[CALL_NULLCHECK]");
+ }
+ if (tree->gtFlags & GTF_CALL_POP_ARGS)
+ {
+ chars += printf("[CALL_POP_ARGS]");
+ }
+ if (tree->gtFlags & GTF_CALL_HOISTABLE)
+ {
+ chars += printf("[CALL_HOISTABLE]");
+ }
+ if (tree->gtFlags & GTF_CALL_REG_SAVE)
+ {
+ chars += printf("[CALL_REG_SAVE]");
}
- }
- if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH)
- {
- chars += printf("[CALL_M_FRAME_VAR_DEATH]");
- }
+ // More flags associated with calls.
+
+ {
+ GenTreeCall* call = tree->AsCall();
+
+ if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL)
+ {
+ chars += printf("[CALL_M_EXPLICIT_TAILCALL]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL)
+ {
+ chars += printf("[CALL_M_TAILCALL]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS)
+ {
+ chars += printf("[CALL_M_VARARGS]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
+ {
+ chars += printf("[CALL_M_RETBUFFARG]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV)
+ {
+ chars += printf("[CALL_M_DELEGATE_INV]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK)
+ {
+ chars += printf("[CALL_M_NOGCCHECK]");
+ }
+ if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
+ {
+ chars += printf("[CALL_M_SPECIAL_INTRINSIC]");
+ }
+
+ if (call->IsUnmanaged())
+ {
+ if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
+ {
+ chars += printf("[CALL_M_UNMGD_THISCALL]");
+ }
+ }
+ else if (call->IsVirtualStub())
+ {
+ if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
+ {
+ chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]");
+ }
+ }
+ else if (!call->IsVirtual())
+ {
+ if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS)
+ {
+ chars += printf("[CALL_M_NONVIRT_SAME_THIS]");
+ }
+ }
+
+ if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH)
+ {
+ chars += printf("[CALL_M_FRAME_VAR_DEATH]");
+ }
#ifndef LEGACY_BACKEND
- if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER)
- {
- chars += printf("[CALL_M_TAILCALL_VIA_HELPER]");
- }
+ if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER)
+ {
+ chars += printf("[CALL_M_TAILCALL_VIA_HELPER]");
+ }
#endif
#if FEATURE_TAILCALL_OPT
- if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL)
- {
- chars += printf("[CALL_M_IMPLICIT_TAILCALL]");
- }
+ if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL)
+ {
+ chars += printf("[CALL_M_IMPLICIT_TAILCALL]");
+ }
#endif
- if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE)
- {
- chars += printf("[CALL_M_PINVOKE]");
- }
- }
- break;
+ if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE)
+ {
+ chars += printf("[CALL_M_PINVOKE]");
+ }
+ }
+ break;
- case GT_STMT:
+ case GT_STMT:
- if (tree->gtFlags & GTF_STMT_CMPADD)
- {
- chars += printf("[STMT_CMPADD]");
- }
- if (tree->gtFlags & GTF_STMT_HAS_CSE)
- {
- chars += printf("[STMT_HAS_CSE]");
- }
- if (tree->gtFlags & GTF_STMT_TOP_LEVEL)
- {
- chars += printf("[STMT_TOP_LEVEL]");
- }
- if (tree->gtFlags & GTF_STMT_SKIP_LOWER)
- {
- chars += printf("[STMT_SKIP_LOWER]");
- }
- break;
+ if (tree->gtFlags & GTF_STMT_CMPADD)
+ {
+ chars += printf("[STMT_CMPADD]");
+ }
+ if (tree->gtFlags & GTF_STMT_HAS_CSE)
+ {
+ chars += printf("[STMT_HAS_CSE]");
+ }
+ if (tree->gtFlags & GTF_STMT_TOP_LEVEL)
+ {
+ chars += printf("[STMT_TOP_LEVEL]");
+ }
+ if (tree->gtFlags & GTF_STMT_SKIP_LOWER)
+ {
+ chars += printf("[STMT_SKIP_LOWER]");
+ }
+ break;
- default:
+ default:
{
- unsigned flags = (tree->gtFlags & (~(unsigned)(GTF_COMMON_MASK|GTF_OVERFLOW)));
- if (flags != 0)
- chars += printf("[%08X]", flags);
+ unsigned flags = (tree->gtFlags & (~(unsigned)(GTF_COMMON_MASK | GTF_OVERFLOW)));
+ if (flags != 0)
+ {
+ chars += printf("[%08X]", flags);
+ }
}
break;
}
@@ -8735,19 +8851,19 @@ int cTreeFlagsIR(Compiler *comp, GenTree *tree)
}
switch (op)
{
- case GT_MUL:
- case GT_CAST:
- case GT_ADD:
- case GT_SUB:
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- if (tree->gtFlags & GTF_OVERFLOW)
- {
- chars += printf("[OVERFLOW]");
- }
- break;
- default:
- break;
+ case GT_MUL:
+ case GT_CAST:
+ case GT_ADD:
+ case GT_SUB:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ if (tree->gtFlags & GTF_OVERFLOW)
+ {
+ chars += printf("[OVERFLOW]");
+ }
+ break;
+ default:
+ break;
}
if (tree->gtFlags & GTF_EXCEPT)
{
@@ -8851,7 +8967,7 @@ int cTreeFlagsIR(Compiler *comp, GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree node flags for linear IR form
*/
-int dTreeFlagsIR(GenTree *tree)
+int dTreeFlagsIR(GenTree* tree)
{
int chars = cTreeFlagsIR(JitTls::GetCompiler(), tree);
@@ -8863,7 +8979,7 @@ int dTreeFlagsIR(GenTree *tree)
* COMPlus_JitDumpIR support - dump out SSA number on tree node for linear IR form
*/
-int cSsaNumIR(Compiler *comp, GenTree *tree)
+int cSsaNumIR(Compiler* comp, GenTree* tree)
{
int chars = 0;
@@ -8872,13 +8988,11 @@ int cSsaNumIR(Compiler *comp, GenTree *tree)
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
- chars += printf("<u:%d><d:%d>", tree->gtLclVarCommon.gtSsaNum,
- comp->GetSsaNumForLocalVarDef(tree));
+ chars += printf("<u:%d><d:%d>", tree->gtLclVarCommon.gtSsaNum, comp->GetSsaNumForLocalVarDef(tree));
}
else
{
- chars += printf("<%s:%d>", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u",
- tree->gtLclVarCommon.gtSsaNum);
+ chars += printf("<%s:%d>", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->gtLclVarCommon.gtSsaNum);
}
}
@@ -8890,7 +9004,7 @@ int cSsaNumIR(Compiler *comp, GenTree *tree)
* COMPlus_JitDumpIR support - dump out SSA number on tree node for linear IR form
*/
-int dSsaNumIR(GenTree *tree)
+int dSsaNumIR(GenTree* tree)
{
int chars = cSsaNumIR(JitTls::GetCompiler(), tree);
@@ -8902,7 +9016,7 @@ int dSsaNumIR(GenTree *tree)
* COMPlus_JitDumpIR support - dump out Value Number on tree node for linear IR form
*/
-int cValNumIR(Compiler *comp, GenTree *tree)
+int cValNumIR(Compiler* comp, GenTree* tree)
{
int chars = 0;
@@ -8910,7 +9024,7 @@ int cValNumIR(Compiler *comp, GenTree *tree)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
ValueNumPair vnp = tree->gtVNPair;
- ValueNum vn;
+ ValueNum vn;
if (vnp.BothEqual())
{
chars += printf("<v:");
@@ -8950,7 +9064,7 @@ int cValNumIR(Compiler *comp, GenTree *tree)
* COMPlus_JitDumpIR support - dump out Value Number on tree node for linear IR form
*/
-int dValNumIR(GenTree *tree)
+int dValNumIR(GenTree* tree)
{
int chars = cValNumIR(JitTls::GetCompiler(), tree);
@@ -8962,229 +9076,229 @@ int dValNumIR(GenTree *tree)
* COMPlus_JitDumpIR support - dump out tree leaf node for linear IR form
*/
-int cLeafIR(Compiler *comp, GenTree* tree)
+int cLeafIR(Compiler* comp, GenTree* tree)
{
- int chars = 0;
- genTreeOps op = tree->OperGet();
+ int chars = 0;
+ genTreeOps op = tree->OperGet();
const char* ilKind = nullptr;
const char* ilName = nullptr;
- unsigned ilNum = 0;
+ unsigned ilNum = 0;
unsigned lclNum = 0;
bool hasSsa = false;
-
+
switch (op)
{
- case GT_PHI_ARG:
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- case GT_STORE_LCL_VAR:
- case GT_REG_VAR:
+ case GT_PHI_ARG:
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ case GT_STORE_LCL_VAR:
+ case GT_REG_VAR:
- lclNum = tree->gtLclVarCommon.gtLclNum;
- comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
- if (ilName != nullptr)
- {
- chars += printf("%s", ilName);
- }
- else
- {
- LclVarDsc * varDsc = comp->lvaTable + lclNum;
- chars += printf("%s%d", ilKind, ilNum);
- if (comp->dumpIRLocals)
+ lclNum = tree->gtLclVarCommon.gtLclNum;
+ comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
+ if (ilName != nullptr)
+ {
+ chars += printf("%s", ilName);
+ }
+ else
{
- chars += printf("(V%02u", lclNum);
- if (varDsc->lvTracked)
+ LclVarDsc* varDsc = comp->lvaTable + lclNum;
+ chars += printf("%s%d", ilKind, ilNum);
+ if (comp->dumpIRLocals)
{
- chars += printf(":T%02u", varDsc->lvVarIndex);
+ chars += printf("(V%02u", lclNum);
+ if (varDsc->lvTracked)
+ {
+ chars += printf(":T%02u", varDsc->lvVarIndex);
+ }
+ if (comp->dumpIRRegs)
+ {
+ if (varDsc->lvRegister)
+ {
+ if (isRegPairType(varDsc->TypeGet()))
+ {
+ chars += printf(":%s:%s",
+ getRegName(varDsc->lvOtherReg), // hi32
+ getRegName(varDsc->lvRegNum)); // lo32
+ }
+ else
+ {
+ chars += printf(":%s", getRegName(varDsc->lvRegNum));
+ }
+ }
+ else
+ {
+ switch (tree->GetRegTag())
+ {
+ case GenTree::GT_REGTAG_REG:
+ chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
+ break;
+#if CPU_LONG_USES_REGPAIR
+ case GenTree::GT_REGTAG_REGPAIR:
+ chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ chars += printf(")");
}
- if (comp->dumpIRRegs)
+ else if (comp->dumpIRRegs)
{
if (varDsc->lvRegister)
{
+ chars += printf("(");
if (isRegPairType(varDsc->TypeGet()))
{
- chars += printf(":%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
+ chars += printf("%s:%s",
+ getRegName(varDsc->lvOtherReg), // hi32
+ getRegName(varDsc->lvRegNum)); // lo32
}
else
{
- chars += printf(":%s", getRegName(varDsc->lvRegNum));
+ chars += printf("%s", getRegName(varDsc->lvRegNum));
}
+ chars += printf(")");
}
- else
+ else
{
switch (tree->GetRegTag())
{
- case GenTree::GT_REGTAG_REG:
- chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
- break;
+ case GenTree::GT_REGTAG_REG:
+ chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
+ break;
#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
- break;
+ case GenTree::GT_REGTAG_REGPAIR:
+ chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
+ break;
#endif
- default:
- break;
+ default:
+ break;
}
}
}
- chars += printf(")");
}
- else if (comp->dumpIRRegs)
+
+ if (op == GT_REG_VAR)
{
- if (varDsc->lvRegister)
+ if (isFloatRegType(tree->gtType))
{
- chars += printf("(");
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf("%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf("%s", getRegName(varDsc->lvRegNum));
- }
- chars += printf(")");
+ assert(tree->gtRegVar.gtRegNum == tree->gtRegNum);
+ chars += printf("(FPV%u)", tree->gtRegNum);
}
else
{
- switch (tree->GetRegTag())
- {
- case GenTree::GT_REGTAG_REG:
- chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
- break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
- default:
- break;
- }
+ chars += printf("(%s)", comp->compRegVarName(tree->gtRegVar.gtRegNum));
}
}
- }
- if (op == GT_REG_VAR)
- {
- if (isFloatRegType(tree->gtType))
+ hasSsa = true;
+ break;
+
+ case GT_LCL_FLD:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_FLD:
+
+ lclNum = tree->gtLclVarCommon.gtLclNum;
+ comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
+ if (ilName != nullptr)
{
- assert(tree->gtRegVar.gtRegNum == tree->gtRegNum);
- chars += printf("(FPV%u)", tree->gtRegNum);
+ chars += printf("%s+%u", ilName, tree->gtLclFld.gtLclOffs);
}
else
{
- chars += printf("(%s)", comp->compRegVarName(tree->gtRegVar.gtRegNum));
- }
- }
-
- hasSsa = true;
- break;
-
- case GT_LCL_FLD:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_FLD:
-
- lclNum = tree->gtLclVarCommon.gtLclNum;
- comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
- if (ilName != nullptr)
- {
- chars += printf("%s+%u", ilName, tree->gtLclFld.gtLclOffs);
- }
- else
- {
- chars += printf("%s%d+%u", ilKind, ilNum, tree->gtLclFld.gtLclOffs);
- LclVarDsc * varDsc = comp->lvaTable + lclNum;
- if (comp->dumpIRLocals)
- {
- chars += printf("(V%02u", lclNum);
- if (varDsc->lvTracked)
+ chars += printf("%s%d+%u", ilKind, ilNum, tree->gtLclFld.gtLclOffs);
+ LclVarDsc* varDsc = comp->lvaTable + lclNum;
+ if (comp->dumpIRLocals)
{
- chars += printf(":T%02u", varDsc->lvVarIndex);
+ chars += printf("(V%02u", lclNum);
+ if (varDsc->lvTracked)
+ {
+ chars += printf(":T%02u", varDsc->lvVarIndex);
+ }
+ if (comp->dumpIRRegs)
+ {
+ if (varDsc->lvRegister)
+ {
+ if (isRegPairType(varDsc->TypeGet()))
+ {
+ chars += printf(":%s:%s",
+ getRegName(varDsc->lvOtherReg), // hi32
+ getRegName(varDsc->lvRegNum)); // lo32
+ }
+ else
+ {
+ chars += printf(":%s", getRegName(varDsc->lvRegNum));
+ }
+ }
+ else
+ {
+ switch (tree->GetRegTag())
+ {
+ case GenTree::GT_REGTAG_REG:
+ chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
+ break;
+#if CPU_LONG_USES_REGPAIR
+ case GenTree::GT_REGTAG_REGPAIR:
+ chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ chars += printf(")");
}
- if (comp->dumpIRRegs)
+ else if (comp->dumpIRRegs)
{
if (varDsc->lvRegister)
{
+ chars += printf("(");
if (isRegPairType(varDsc->TypeGet()))
{
- chars += printf(":%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
+ chars += printf("%s:%s",
+ getRegName(varDsc->lvOtherReg), // hi32
+ getRegName(varDsc->lvRegNum)); // lo32
}
else
{
- chars += printf(":%s", getRegName(varDsc->lvRegNum));
+ chars += printf("%s", getRegName(varDsc->lvRegNum));
}
+ chars += printf(")");
}
else
{
switch (tree->GetRegTag())
{
- case GenTree::GT_REGTAG_REG:
- chars += printf(":%s", comp->compRegVarName(tree->gtRegNum));
- break;
+ case GenTree::GT_REGTAG_REG:
+ chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
+ break;
#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf(":%s", comp->compRegPairName(tree->gtRegPair));
- break;
+ case GenTree::GT_REGTAG_REGPAIR:
+ chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
+ break;
#endif
- default:
- break;
+ default:
+ break;
}
}
}
- chars += printf(")");
- }
- else if (comp->dumpIRRegs)
- {
- if (varDsc->lvRegister)
- {
- chars += printf("(");
- if (isRegPairType(varDsc->TypeGet()))
- {
- chars += printf("%s:%s",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
- }
- else
- {
- chars += printf("%s", getRegName(varDsc->lvRegNum));
- }
- chars += printf(")");
- }
- else
- {
- switch (tree->GetRegTag())
- {
- case GenTree::GT_REGTAG_REG:
- chars += printf("(%s)", comp->compRegVarName(tree->gtRegNum));
- break;
-#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- chars += printf("(%s)", comp->compRegPairName(tree->gtRegPair));
- break;
-#endif
- default:
- break;
- }
- }
}
- }
- // TODO: We probably want to expand field sequence.
- // gtDispFieldSeq(tree->gtLclFld.gtFieldSeq);
+ // TODO: We probably want to expand field sequence.
+ // gtDispFieldSeq(tree->gtLclFld.gtFieldSeq);
- hasSsa = true;
- break;
+ hasSsa = true;
+ break;
- case GT_CNS_INT:
+ case GT_CNS_INT:
- if (tree->IsIconHandle())
- {
+ if (tree->IsIconHandle())
+ {
#if 0
// TODO: Commented out because sometimes the CLR throws
// and exception when asking the names of some handles.
@@ -9288,105 +9402,109 @@ int cLeafIR(Compiler *comp, GenTree* tree)
}
#else
#ifdef _TARGET_64BIT_
- if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
- {
- chars += printf("HANDLE(0x%llx)", dspPtr(tree->gtIntCon.gtIconVal));
- }
- else
+ if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
+ {
+ chars += printf("HANDLE(0x%llx)", dspPtr(tree->gtIntCon.gtIconVal));
+ }
+ else
#endif
- {
- chars += printf("HANDLE(0x%0x)", dspPtr(tree->gtIntCon.gtIconVal));
- }
+ {
+ chars += printf("HANDLE(0x%0x)", dspPtr(tree->gtIntCon.gtIconVal));
+ }
#endif
- }
- else
- {
- if (tree->TypeGet() == TYP_REF)
- {
- assert(tree->gtIntCon.gtIconVal == 0);
- chars += printf("null");
- }
-#ifdef _TARGET_64BIT_
- else if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
- {
- chars += printf("0x%llx", tree->gtIntCon.gtIconVal);
}
else
-#endif
{
- chars += printf("%ld(0x%x)", tree->gtIntCon.gtIconVal, tree->gtIntCon.gtIconVal);
+ if (tree->TypeGet() == TYP_REF)
+ {
+ assert(tree->gtIntCon.gtIconVal == 0);
+ chars += printf("null");
+ }
+#ifdef _TARGET_64BIT_
+ else if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
+ {
+ chars += printf("0x%llx", tree->gtIntCon.gtIconVal);
+ }
+ else
+#endif
+ {
+ chars += printf("%ld(0x%x)", tree->gtIntCon.gtIconVal, tree->gtIntCon.gtIconVal);
+ }
}
- }
- break;
+ break;
- case GT_CNS_LNG:
+ case GT_CNS_LNG:
- chars += printf("CONST(LONG)");
- break;
+ chars += printf("CONST(LONG)");
+ break;
- case GT_CNS_DBL:
+ case GT_CNS_DBL:
- chars += printf("CONST(DOUBLE)");
- break;
+ chars += printf("CONST(DOUBLE)");
+ break;
- case GT_CNS_STR:
+ case GT_CNS_STR:
- chars += printf("CONST(STR)");
- break;
+ chars += printf("CONST(STR)");
+ break;
- case GT_JMP:
+ case GT_JMP:
{
- const char * methodName;
- const char * className;
+ const char* methodName;
+ const char* className;
methodName = comp->eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtVal.gtVal1, &className);
chars += printf(" %s.%s", className, methodName);
}
break;
- case GT_NO_OP:
- case GT_START_NONGC:
- case GT_PROF_HOOK:
- case GT_CATCH_ARG:
- case GT_MEMORYBARRIER:
- case GT_ARGPLACE:
- case GT_PINVOKE_PROLOG:
+ case GT_NO_OP:
+ case GT_START_NONGC:
+ case GT_PROF_HOOK:
+ case GT_CATCH_ARG:
+ case GT_MEMORYBARRIER:
+ case GT_ARGPLACE:
+ case GT_PINVOKE_PROLOG:
#ifndef LEGACY_BACKEND
- case GT_JMPTABLE:
+ case GT_JMPTABLE:
#endif
- // Do nothing.
- break;
+ // Do nothing.
+ break;
- case GT_RET_EXPR:
+ case GT_RET_EXPR:
- chars += printf("t%d", tree->gtRetExpr.gtInlineCandidate->gtTreeID);
- break;
+ chars += printf("t%d", tree->gtRetExpr.gtInlineCandidate->gtTreeID);
+ break;
- case GT_PHYSREG:
+ case GT_PHYSREG:
- chars += printf("%s", getRegName(tree->gtPhysReg.gtSrcReg, varTypeIsFloating(tree)));
- break;
+ chars += printf("%s", getRegName(tree->gtPhysReg.gtSrcReg, varTypeIsFloating(tree)));
+ break;
- case GT_LABEL:
+ case GT_LABEL:
- if (tree->gtLabel.gtLabBB)
- chars += printf("BB%02u", tree->gtLabel.gtLabBB->bbNum);
- else
- chars += printf("BB?");
- break;
+ if (tree->gtLabel.gtLabBB)
+ {
+ chars += printf("BB%02u", tree->gtLabel.gtLabBB->bbNum);
+ }
+ else
+ {
+ chars += printf("BB?");
+ }
+ break;
- case GT_CLS_VAR:
- case GT_CLS_VAR_ADDR:
- default:
+ case GT_CLS_VAR:
+ case GT_CLS_VAR_ADDR:
+ default:
- if (tree->OperIsLeaf())
- {
- chars += printf("<leaf nyi: %s>", tree->OpName(tree->OperGet()));
- }
+ if (tree->OperIsLeaf())
+ {
+ chars += printf("<leaf nyi: %s>", tree->OpName(tree->OperGet()));
+ }
- chars += printf("t%d", tree->gtTreeID);
- break;
+ chars += printf("t%d", tree->gtTreeID);
+ break;
}
if (comp->dumpIRTypes)
@@ -9410,7 +9528,7 @@ int cLeafIR(Compiler *comp, GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree leaf node for linear IR form
*/
-int dLeafIR(GenTree* tree)
+int dLeafIR(GenTree* tree)
{
int chars = cLeafIR(JitTls::GetCompiler(), tree);
@@ -9422,11 +9540,11 @@ int dLeafIR(GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree indir node for linear IR form
*/
-int cIndirIR(Compiler *comp, GenTree* tree)
+int cIndirIR(Compiler* comp, GenTree* tree)
{
assert(tree->gtOper == GT_IND);
- int chars = 0;
+ int chars = 0;
GenTree* child;
chars += printf("[");
@@ -9442,7 +9560,7 @@ int cIndirIR(Compiler *comp, GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree indir node for linear IR form
*/
-int dIndirIR(GenTree* tree)
+int dIndirIR(GenTree* tree)
{
int chars = cIndirIR(JitTls::GetCompiler(), tree);
@@ -9454,27 +9572,27 @@ int dIndirIR(GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree operand node for linear IR form
*/
-int cOperandIR(Compiler* comp, GenTree* operand)
+int cOperandIR(Compiler* comp, GenTree* operand)
{
int chars = 0;
- if (operand == NULL)
+ if (operand == nullptr)
{
chars += printf("t?");
return chars;
}
- bool dumpTypes = comp->dumpIRTypes;
- bool dumpValnums = comp->dumpIRValnums;
- bool foldIndirs = comp->dumpIRDataflow;
- bool foldLeafs = comp->dumpIRNoLeafs;
- bool foldCommas = comp->dumpIRDataflow;
+ bool dumpTypes = comp->dumpIRTypes;
+ bool dumpValnums = comp->dumpIRValnums;
+ bool foldIndirs = comp->dumpIRDataflow;
+ bool foldLeafs = comp->dumpIRNoLeafs;
+ bool foldCommas = comp->dumpIRDataflow;
bool dumpDataflow = comp->dumpIRDataflow;
- bool foldLists = comp->dumpIRNoLists;
- bool dumpRegs = comp->dumpIRRegs;
+ bool foldLists = comp->dumpIRNoLists;
+ bool dumpRegs = comp->dumpIRRegs;
genTreeOps op = operand->OperGet();
-
+
if (foldLeafs && operand->OperIsLeaf())
{
if ((op == GT_ARGPLACE) && foldLists)
@@ -9483,10 +9601,9 @@ int cOperandIR(Compiler* comp, GenTree* operand)
}
chars += cLeafIR(comp, operand);
}
- else if (dumpDataflow &&
- (operand->OperIsAssignment() || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD)))
+ else if (dumpDataflow && (operand->OperIsAssignment() || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD)))
{
- operand = operand->GetChild(0);
+ operand = operand->GetChild(0);
chars += cOperandIR(comp, operand);
}
else if ((op == GT_INDEX) && foldIndirs)
@@ -9520,16 +9637,18 @@ int cOperandIR(Compiler* comp, GenTree* operand)
}
else if ((op == GT_LIST) && foldLists)
{
- GenTree *list = operand;
+ GenTree* list = operand;
unsigned childCount = list->NumChildren();
-
- operand = list->GetChild(0);
+
+ operand = list->GetChild(0);
int operandChars = cOperandIR(comp, operand);
chars += operandChars;
if (childCount > 1)
{
if (operandChars > 0)
+ {
chars += printf(", ");
+ }
operand = list->GetChild(1);
if (operand->gtOper == GT_LIST)
{
@@ -9546,11 +9665,11 @@ int cOperandIR(Compiler* comp, GenTree* operand)
chars += printf("t%d", operand->gtTreeID);
if (dumpRegs)
{
- regNumber regNum = operand->GetReg();
- if (regNum != REG_NA)
- {
- chars += printf("(%s)", getRegName(regNum));
- }
+ regNumber regNum = operand->GetReg();
+ if (regNum != REG_NA)
+ {
+ chars += printf("(%s)", getRegName(regNum));
+ }
}
if (dumpTypes)
{
@@ -9570,7 +9689,7 @@ int cOperandIR(Compiler* comp, GenTree* operand)
* COMPlus_JitDumpIR support - dump out tree operand node for linear IR form
*/
-int dOperandIR(GenTree* operand)
+int dOperandIR(GenTree* operand)
{
int chars = cOperandIR(JitTls::GetCompiler(), operand);
@@ -9582,7 +9701,7 @@ int dOperandIR(GenTree* operand)
* COMPlus_JitDumpIR support - dump out tree list of nodes for linear IR form
*/
-int cListIR(Compiler* comp, GenTree* list)
+int cListIR(Compiler* comp, GenTree* list)
{
int chars = 0;
int operandChars;
@@ -9599,9 +9718,11 @@ int cListIR(Compiler* comp, GenTree* list)
for (unsigned childIndex = 0; childIndex < childCount; childIndex++)
{
if ((childIndex > 0) && (operandChars > 0))
+ {
chars += printf(", ");
-
- child = list->GetChild(childIndex);
+ }
+
+ child = list->GetChild(childIndex);
operandChars = cOperandIR(comp, child);
chars += operandChars;
}
@@ -9614,7 +9735,7 @@ int cListIR(Compiler* comp, GenTree* list)
* COMPlus_JitDumpIR support - dump out tree list of nodes for linear IR form
*/
-int dListIR(GenTree* list)
+int dListIR(GenTree* list)
{
int chars = cListIR(JitTls::GetCompiler(), list);
@@ -9626,7 +9747,7 @@ int dListIR(GenTree* list)
* COMPlus_JitDumpIR support - dump out tree dependencies based on comma nodes for linear IR form
*/
-int cDependsIR(Compiler* comp, GenTree* comma, bool *first)
+int cDependsIR(Compiler* comp, GenTree* comma, bool* first)
{
int chars = 0;
@@ -9642,7 +9763,9 @@ int cDependsIR(Compiler* comp, GenTree* comma, bool *first)
else
{
if (!(*first))
+ {
chars += printf(", ");
+ }
chars += printf("t%d", child->gtTreeID);
*first = false;
}
@@ -9661,11 +9784,11 @@ int cDependsIR(Compiler* comp, GenTree* comma, bool *first)
* COMPlus_JitDumpIR support - dump out tree dependencies based on comma nodes for linear IR form
*/
-int dDependsIR(GenTree* comma)
+int dDependsIR(GenTree* comma)
{
- int chars = 0;
+ int chars = 0;
bool first = TRUE;
-
+
chars = cDependsIR(JitTls::GetCompiler(), comma, &first);
return chars;
@@ -9676,17 +9799,17 @@ int dDependsIR(GenTree* comma)
* COMPlus_JitDumpIR support - dump out tree node in linear IR form
*/
-void cNodeIR(Compiler* comp, GenTree* tree)
+void cNodeIR(Compiler* comp, GenTree* tree)
{
- bool foldLeafs = comp->dumpIRNoLeafs;
- bool foldIndirs = comp->dumpIRDataflow;
- bool foldLists = comp->dumpIRNoLists;
+ bool foldLeafs = comp->dumpIRNoLeafs;
+ bool foldIndirs = comp->dumpIRDataflow;
+ bool foldLists = comp->dumpIRNoLists;
bool dataflowView = comp->dumpIRDataflow;
- bool dumpTypes = comp->dumpIRTypes;
- bool dumpValnums = comp->dumpIRValnums;
- bool noStmts =comp->dumpIRNoStmts;
- genTreeOps op = tree->OperGet();
- unsigned childCount = tree->NumChildren();
+ bool dumpTypes = comp->dumpIRTypes;
+ bool dumpValnums = comp->dumpIRValnums;
+ bool noStmts = comp->dumpIRNoStmts;
+ genTreeOps op = tree->OperGet();
+ unsigned childCount = tree->NumChildren();
GenTree* child;
// What are we skipping?
@@ -9744,7 +9867,7 @@ void cNodeIR(Compiler* comp, GenTree* tree)
// if (comp->compRationalIRForm)
// {
- // chars += printf("R");
+ // chars += printf("R");
// }
chars += printf(" ");
@@ -9765,7 +9888,7 @@ void cNodeIR(Compiler* comp, GenTree* tree)
chars += printf("]");
if (dumpTypes)
{
- chars += cTreeTypeIR(comp, tree);
+ chars += cTreeTypeIR(comp, tree);
}
if (dumpValnums)
{
@@ -9777,15 +9900,15 @@ void cNodeIR(Compiler* comp, GenTree* tree)
chars += printf("t%d", tree->gtTreeID);
if (comp->dumpIRRegs)
{
- regNumber regNum = tree->GetReg();
- if (regNum != REG_NA)
- {
- chars += printf("(%s)", getRegName(regNum));
- }
+ regNumber regNum = tree->GetReg();
+ if (regNum != REG_NA)
+ {
+ chars += printf("(%s)", getRegName(regNum));
+ }
}
if (dumpTypes)
{
- chars += cTreeTypeIR(comp, tree);
+ chars += cTreeTypeIR(comp, tree);
}
if (dumpValnums)
{
@@ -9796,13 +9919,12 @@ void cNodeIR(Compiler* comp, GenTree* tree)
// Dump opcode and tree ID if need in dataflow view.
chars += dTabStopIR(chars, COLUMN_OPCODE);
- const char * opName = tree->OpName(op);
+ const char* opName = tree->OpName(op);
chars += printf(" = %s", opName);
if (dataflowView)
{
- if (tree->OperIsAssignment()
- || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD) || (op == GT_STOREIND))
+ if (tree->OperIsAssignment() || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD) || (op == GT_STOREIND))
{
chars += printf("(t%d)", tree->gtTreeID);
}
@@ -9812,7 +9934,7 @@ void cNodeIR(Compiler* comp, GenTree* tree)
if (op == GT_CALL)
{
- GenTreeCall * call = tree->AsCall();
+ GenTreeCall* call = tree->AsCall();
if (call->gtCallType == CT_USER_FUNC)
{
@@ -9820,11 +9942,11 @@ void cNodeIR(Compiler* comp, GenTree* tree)
{
chars += printf(":VS");
}
- else if (call->IsVirtualVtable())
+ else if (call->IsVirtualVtable())
{
chars += printf(":VT");
}
- else if (call->IsVirtual())
+ else if (call->IsVirtual())
{
chars += printf(":V");
}
@@ -9872,62 +9994,62 @@ void cNodeIR(Compiler* comp, GenTree* tree)
}
else if (op == GT_INTRINSIC)
{
- CorInfoIntrinsics intrin = tree->gtIntrinsic.gtIntrinsicId;
+ CorInfoIntrinsics intrin = tree->gtIntrinsic.gtIntrinsicId;
chars += printf(":");
switch (intrin)
{
- case CORINFO_INTRINSIC_Sin:
- chars += printf("Sin");
- break;
- case CORINFO_INTRINSIC_Cos:
- chars += printf("Cos");
- break;
- case CORINFO_INTRINSIC_Sqrt:
- chars += printf("Sqrt");
- break;
- case CORINFO_INTRINSIC_Cosh:
- chars += printf("Cosh");
- break;
- case CORINFO_INTRINSIC_Sinh:
- chars += printf("Sinh");
- break;
- case CORINFO_INTRINSIC_Tan:
- chars += printf("Tan");
- break;
- case CORINFO_INTRINSIC_Tanh:
- chars += printf("Tanh");
- break;
- case CORINFO_INTRINSIC_Asin:
- chars += printf("Asin");
- break;
- case CORINFO_INTRINSIC_Acos:
- chars += printf("Acos");
- break;
- case CORINFO_INTRINSIC_Atan:
- chars += printf("Atan");
- break;
- case CORINFO_INTRINSIC_Atan2:
- chars += printf("Atan2");
- break;
- case CORINFO_INTRINSIC_Log10:
- chars += printf("Log10");
- break;
- case CORINFO_INTRINSIC_Pow:
- chars += printf("Pow");
- break;
- case CORINFO_INTRINSIC_Exp:
- chars += printf("Exp");
- break;
- case CORINFO_INTRINSIC_Ceiling:
- chars += printf("Ceiling");
- break;
- case CORINFO_INTRINSIC_Floor:
- chars += printf("Floor");
- break;
- default:
- chars += printf("unknown(%d)", intrin);
- break;
+ case CORINFO_INTRINSIC_Sin:
+ chars += printf("Sin");
+ break;
+ case CORINFO_INTRINSIC_Cos:
+ chars += printf("Cos");
+ break;
+ case CORINFO_INTRINSIC_Sqrt:
+ chars += printf("Sqrt");
+ break;
+ case CORINFO_INTRINSIC_Cosh:
+ chars += printf("Cosh");
+ break;
+ case CORINFO_INTRINSIC_Sinh:
+ chars += printf("Sinh");
+ break;
+ case CORINFO_INTRINSIC_Tan:
+ chars += printf("Tan");
+ break;
+ case CORINFO_INTRINSIC_Tanh:
+ chars += printf("Tanh");
+ break;
+ case CORINFO_INTRINSIC_Asin:
+ chars += printf("Asin");
+ break;
+ case CORINFO_INTRINSIC_Acos:
+ chars += printf("Acos");
+ break;
+ case CORINFO_INTRINSIC_Atan:
+ chars += printf("Atan");
+ break;
+ case CORINFO_INTRINSIC_Atan2:
+ chars += printf("Atan2");
+ break;
+ case CORINFO_INTRINSIC_Log10:
+ chars += printf("Log10");
+ break;
+ case CORINFO_INTRINSIC_Pow:
+ chars += printf("Pow");
+ break;
+ case CORINFO_INTRINSIC_Exp:
+ chars += printf("Exp");
+ break;
+ case CORINFO_INTRINSIC_Ceiling:
+ chars += printf("Ceiling");
+ break;
+ case CORINFO_INTRINSIC_Floor:
+ chars += printf("Floor");
+ break;
+ default:
+ chars += printf("unknown(%d)", intrin);
+ break;
}
}
@@ -9939,81 +10061,81 @@ void cNodeIR(Compiler* comp, GenTree* tree)
switch (op)
{
- default:
- break;
- case GT_FIELD:
+ default:
+ break;
+ case GT_FIELD:
{
- const char * className = NULL;
- const char * fieldName = comp->eeGetFieldName(tree->gtField.gtFldHnd, &className);
+ const char* className = nullptr;
+ const char* fieldName = comp->eeGetFieldName(tree->gtField.gtFldHnd, &className);
- chars += printf(" %s.%s", className, fieldName);
+ chars += printf(" %s.%s", className, fieldName);
}
break;
- case GT_CALL:
+ case GT_CALL:
- if (tree->gtCall.gtCallType != CT_INDIRECT)
- {
- const char * methodName;
- const char * className;
+ if (tree->gtCall.gtCallType != CT_INDIRECT)
+ {
+ const char* methodName;
+ const char* className;
- methodName = comp->eeGetMethodName(tree->gtCall.gtCallMethHnd, &className);
+ methodName = comp->eeGetMethodName(tree->gtCall.gtCallMethHnd, &className);
- chars += printf(" %s.%s", className, methodName);
- }
- break;
+ chars += printf(" %s.%s", className, methodName);
+ }
+ break;
- case GT_STORE_LCL_VAR:
- case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_FLD:
- if (!dataflowView)
- {
- chars += printf(" ");
- chars += cLeafIR(comp, tree);
- }
- break;
+ if (!dataflowView)
+ {
+ chars += printf(" ");
+ chars += cLeafIR(comp, tree);
+ }
+ break;
- case GT_STORE_CLS_VAR:
+ case GT_STORE_CLS_VAR:
- chars += printf(" ???");
- break;
+ chars += printf(" ???");
+ break;
- case GT_LEA:
+ case GT_LEA:
- GenTreeAddrMode * lea = tree->AsAddrMode();
- GenTree *base = lea->Base();
- GenTree *index = lea->Index();
- unsigned scale = lea->gtScale;
- unsigned offset = lea->gtOffset;
+ GenTreeAddrMode* lea = tree->AsAddrMode();
+ GenTree* base = lea->Base();
+ GenTree* index = lea->Index();
+ unsigned scale = lea->gtScale;
+ unsigned offset = lea->gtOffset;
- chars += printf(" [");
- if (base != NULL)
- {
- chars += cOperandIR(comp, base);
- }
- if (index != NULL)
- {
- if (base != NULL)
+ chars += printf(" [");
+ if (base != nullptr)
{
- chars += printf("+");
+ chars += cOperandIR(comp, base);
}
- chars += cOperandIR(comp, index);
- if (scale > 1)
+ if (index != nullptr)
{
- chars += printf("*%u", scale);
+ if (base != nullptr)
+ {
+ chars += printf("+");
+ }
+ chars += cOperandIR(comp, index);
+ if (scale > 1)
+ {
+ chars += printf("*%u", scale);
+ }
}
- }
- if ((offset != 0) || ((base == NULL) && (index == NULL)))
- {
- if ((base != NULL) || (index != NULL))
+ if ((offset != 0) || ((base == nullptr) && (index == nullptr)))
{
- chars += printf("+");
+ if ((base != nullptr) || (index != nullptr))
+ {
+ chars += printf("+");
+ }
+ chars += printf("%u", offset);
}
- chars += printf("%u", offset);
- }
- chars += printf("]");
- break;
+ chars += printf("]");
+ break;
}
// Dump operands.
@@ -10029,10 +10151,10 @@ void cNodeIR(Compiler* comp, GenTree* tree)
}
else if (op == GT_PHI)
{
- if (tree->gtOp.gtOp1 != NULL)
+ if (tree->gtOp.gtOp1 != nullptr)
{
bool first = true;
- for (GenTreeArgList* args = tree->gtOp.gtOp1->AsArgList(); args != NULL; args = args->Rest())
+ for (GenTreeArgList* args = tree->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
child = args->Current();
if (!first)
@@ -10047,13 +10169,13 @@ void cNodeIR(Compiler* comp, GenTree* tree)
}
else
{
- bool hasComma = false;
- bool first = true;
- int operandChars = 0;
+ bool hasComma = false;
+ bool first = true;
+ int operandChars = 0;
for (unsigned childIndex = 0; childIndex < childCount; childIndex++)
{
child = tree->GetChild(childIndex);
- if (child == NULL)
+ if (child == nullptr)
{
continue;
}
@@ -10087,7 +10209,9 @@ void cNodeIR(Compiler* comp, GenTree* tree)
operandChars = cOperandIR(comp, child);
chars += operandChars;
if (operandChars > 0)
+ {
first = false;
+ }
}
else
{
@@ -10096,9 +10220,10 @@ void cNodeIR(Compiler* comp, GenTree* tree)
operandChars = cOperandIR(comp, child);
chars += operandChars;
if (operandChars > 0)
+ {
first = false;
+ }
}
-
}
if (dataflowView && hasComma)
@@ -10154,17 +10279,17 @@ void cNodeIR(Compiler* comp, GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree in linear IR form
*/
-void cTreeIR(Compiler* comp, GenTree* tree)
+void cTreeIR(Compiler* comp, GenTree* tree)
{
- bool foldLeafs = comp->dumpIRNoLeafs;
- bool foldIndirs = comp->dumpIRDataflow;
- bool foldLists = comp->dumpIRNoLists;
+ bool foldLeafs = comp->dumpIRNoLeafs;
+ bool foldIndirs = comp->dumpIRDataflow;
+ bool foldLists = comp->dumpIRNoLists;
bool dataflowView = comp->dumpIRDataflow;
- bool dumpTypes = comp->dumpIRTypes;
- bool dumpValnums = comp->dumpIRValnums;
- bool noStmts =comp->dumpIRNoStmts;
- genTreeOps op = tree->OperGet();
- unsigned childCount = tree->NumChildren();
+ bool dumpTypes = comp->dumpIRTypes;
+ bool dumpValnums = comp->dumpIRValnums;
+ bool noStmts = comp->dumpIRNoStmts;
+ genTreeOps op = tree->OperGet();
+ unsigned childCount = tree->NumChildren();
GenTree* child;
// Recurse and dump trees that this node depends on.
@@ -10189,7 +10314,7 @@ void cTreeIR(Compiler* comp, GenTree* tree)
for (unsigned childIndex = 0; childIndex < childCount; childIndex++)
{
child = tree->GetChild(childIndex);
- if (child != NULL)
+ if (child != nullptr)
{
cTreeIR(comp, child);
}
@@ -10204,7 +10329,7 @@ void cTreeIR(Compiler* comp, GenTree* tree)
* COMPlus_JitDumpIR support - dump out tree in linear IR form
*/
-void dTreeIR(GenTree* tree)
+void dTreeIR(GenTree* tree)
{
cTreeIR(JitTls::GetCompiler(), tree);
}
@@ -10223,6 +10348,5 @@ BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts
// static
HelperCallProperties Compiler::s_helperCallProperties;
-
/*****************************************************************************/
/*****************************************************************************/
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 161ef8abd4..fd3c800474 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -43,7 +43,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "valuenum.h"
#include "reglist.h"
#include "jittelemetry.h"
-#ifdef LATE_DISASM
+#ifdef LATE_DISASM
#include "disasm.h"
#endif
@@ -59,33 +59,33 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "simd.h"
-// This is only used locally in the JIT to indicate that
-// a verification block should be inserted
-#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
+// This is only used locally in the JIT to indicate that
+// a verification block should be inserted
+#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
/*****************************************************************************
* Forward declarations
*/
-struct InfoHdr; // defined in GCInfo.h
-struct escapeMapping_t; // defined in flowgraph.cpp
-class emitter; // defined in emit.h
-struct ShadowParamVarInfo; // defined in GSChecks.cpp
-struct InitVarDscInfo; // defined in register_arg_convention.h
-class FgStack; // defined in flowgraph.cpp
+struct InfoHdr; // defined in GCInfo.h
+struct escapeMapping_t; // defined in flowgraph.cpp
+class emitter; // defined in emit.h
+struct ShadowParamVarInfo; // defined in GSChecks.cpp
+struct InitVarDscInfo; // defined in register_arg_convention.h
+class FgStack; // defined in flowgraph.cpp
#if FEATURE_STACK_FP_X87
-struct FlatFPStateX87; // defined in fp.h
+struct FlatFPStateX87; // defined in fp.h
#endif
#if FEATURE_ANYCSE
-class CSE_DataFlow; // defined in OptCSE.cpp
+class CSE_DataFlow; // defined in OptCSE.cpp
#endif
#ifdef DEBUG
-struct IndentStack;
+struct IndentStack;
#endif
// The following are defined in this file, Compiler.h
-class Compiler;
+class Compiler;
/*****************************************************************************
* Unwind info
@@ -100,15 +100,14 @@ class Compiler;
//
// Or the more-general IAllocator interface.
-void * __cdecl operator new(size_t n, IAllocator* alloc);
-void * __cdecl operator new[](size_t n, IAllocator* alloc);
+void* __cdecl operator new(size_t n, IAllocator* alloc);
+void* __cdecl operator new[](size_t n, IAllocator* alloc);
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
// caused these to be ambiguous with the global placement new operators.
-void * __cdecl operator new(size_t n, Compiler *context, CompMemKind cmk);
-void * __cdecl operator new[](size_t n, Compiler *context, CompMemKind cmk);
-void * __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
-
+void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk);
+void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk);
+void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
// Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions.
#include "loopcloning.h"
@@ -122,50 +121,50 @@ void * __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax
/*****************************************************************************/
-unsigned genLog2(unsigned value);
-unsigned genLog2(unsigned __int64 value);
+unsigned genLog2(unsigned value);
+unsigned genLog2(unsigned __int64 value);
-var_types genActualType (var_types type);
-var_types genUnsignedType(var_types type);
-var_types genSignedType (var_types type);
+var_types genActualType(var_types type);
+var_types genUnsignedType(var_types type);
+var_types genSignedType(var_types type);
-unsigned ReinterpretHexAsDecimal(unsigned);
+unsigned ReinterpretHexAsDecimal(unsigned);
/*****************************************************************************/
#ifdef FEATURE_SIMD
#ifdef FEATURE_AVX_SUPPORT
-const unsigned TEMP_MAX_SIZE = YMM_REGSIZE_BYTES;
-#else // !FEATURE_AVX_SUPPORT
-const unsigned TEMP_MAX_SIZE = XMM_REGSIZE_BYTES;
+const unsigned TEMP_MAX_SIZE = YMM_REGSIZE_BYTES;
+#else // !FEATURE_AVX_SUPPORT
+const unsigned TEMP_MAX_SIZE = XMM_REGSIZE_BYTES;
#endif // !FEATURE_AVX_SUPPORT
-#else // !FEATURE_SIMD
-const unsigned TEMP_MAX_SIZE = sizeof(double);
+#else // !FEATURE_SIMD
+const unsigned TEMP_MAX_SIZE = sizeof(double);
#endif // !FEATURE_SIMD
-const unsigned TEMP_SLOT_COUNT = (TEMP_MAX_SIZE / sizeof(int));
+const unsigned TEMP_SLOT_COUNT = (TEMP_MAX_SIZE / sizeof(int));
-const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR|CORINFO_FLG_STATIC);
+const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC);
#ifdef DEBUG
-const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
+const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
#endif
// The following holds the Local var info (scope information)
-typedef const char* VarName; // Actual ASCII string
-struct VarScopeDsc
+typedef const char* VarName; // Actual ASCII string
+struct VarScopeDsc
{
- IL_OFFSET vsdLifeBeg; // instr offset of beg of life
- IL_OFFSET vsdLifeEnd; // instr offset of end of life
- unsigned vsdVarNum; // (remapped) LclVarDsc number
+ IL_OFFSET vsdLifeBeg; // instr offset of beg of life
+ IL_OFFSET vsdLifeEnd; // instr offset of end of life
+ unsigned vsdVarNum; // (remapped) LclVarDsc number
#ifdef DEBUG
- VarName vsdName; // name of the var
+ VarName vsdName; // name of the var
#endif
- unsigned vsdLVnum; // 'which' in eeGetLVinfo().
- // Also, it is the index of this entry in the info.compVarScopes array,
- // which is useful since the array is also accessed via the
- // compEnterScopeList and compExitScopeList sorted arrays.
+ unsigned vsdLVnum; // 'which' in eeGetLVinfo().
+ // Also, it is the index of this entry in the info.compVarScopes array,
+ // which is useful since the array is also accessed via the
+ // compEnterScopeList and compExitScopeList sorted arrays.
};
/*****************************************************************************
@@ -174,154 +173,171 @@ struct VarScopeDsc
*/
// This is the location of a definition.
-struct DefLoc {
+struct DefLoc
+{
BasicBlock* m_blk;
GenTreePtr m_tree;
- DefLoc() :
- m_blk(nullptr),
- m_tree(nullptr)
+ DefLoc() : m_blk(nullptr), m_tree(nullptr)
{
}
};
// This class encapsulates all info about a local variable that may vary for different SSA names
// in the family.
-class LclSsaVarDsc
+class LclSsaVarDsc
{
public:
ValueNumPair m_vnPair;
DefLoc m_defLoc;
- LclSsaVarDsc() {}
+ LclSsaVarDsc()
+ {
+ }
};
typedef ExpandArray<LclSsaVarDsc> PerSsaArray;
-class LclVarDsc
+class LclVarDsc
{
public:
// The constructor. Most things can just be zero'ed.
LclVarDsc(Compiler* comp);
// note this only packs because var_types is a typedef of unsigned char
- var_types lvType :5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
-
- unsigned char lvIsParam :1; // is this a parameter?
- unsigned char lvIsRegArg :1; // is this a register argument?
- unsigned char lvFramePointerBased :1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
-
- unsigned char lvStructGcCount :3; // if struct, how many GC pointer (stop counting at 7). The only use of values >1 is to help determine whether to use block init in the prolog.
- unsigned char lvOnFrame :1; // (part of) the variable lives on the frame
- unsigned char lvDependReg :1; // did the predictor depend upon this being enregistered
- unsigned char lvRegister :1; // assigned to live in a register? For RyuJIT backend, this is only set if the variable is in the same register for the entire function.
- unsigned char lvTracked :1; // is this a tracked variable?
- bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; }
- unsigned char lvPinned :1; // is this a pinned variable?
-
- unsigned char lvMustInit :1; // must be initialized
- unsigned char lvAddrExposed :1; // The address of this variable is "exposed" -- passed as an argument, stored in a global location, etc.
- // We cannot reason reliably about the value of the variable.
- unsigned char lvDoNotEnregister :1; // Do not enregister this variable.
- unsigned char lvFieldAccessed :1; // The var is a struct local, and a field of the variable is accessed. Affects struct promotion.
+ var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
+
+ unsigned char lvIsParam : 1; // is this a parameter?
+ unsigned char lvIsRegArg : 1; // is this a register argument?
+ unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
+
+ unsigned char lvStructGcCount : 3; // if struct, how many GC pointer (stop counting at 7). The only use of values >1
+ // is to help determine whether to use block init in the prolog.
+ unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
+ unsigned char lvDependReg : 1; // did the predictor depend upon this being enregistered
+ unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
+ // variable is in the same register for the entire function.
+ unsigned char lvTracked : 1; // is this a tracked variable?
+ bool lvTrackedNonStruct()
+ {
+ return lvTracked && lvType != TYP_STRUCT;
+ }
+ unsigned char lvPinned : 1; // is this a pinned variable?
+
+ unsigned char lvMustInit : 1; // must be initialized
+ unsigned char lvAddrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a
+ // global location, etc.
+ // We cannot reason reliably about the value of the variable.
+ unsigned char lvDoNotEnregister : 1; // Do not enregister this variable.
+ unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects
+ // struct promotion.
#ifdef DEBUG
- // These further document the reasons for setting "lvDoNotEnregister". (Note that "lvAddrExposed" is one of the reasons;
- // also, lvType == TYP_STRUCT prevents enregistration. At least one of the reasons should be true.
- unsigned char lvVMNeedsStackAddr :1; // The VM may have access to a stack-relative address of the variable, and read/write its value.
- unsigned char lvLiveInOutOfHndlr :1; // The variable was live in or out of an exception handler, and this required the variable to be
- // in the stack (at least at those boundaries.)
- unsigned char lvLclFieldExpr :1; // The variable is not a struct, but was accessed like one (e.g., reading a particular byte from an int).
- unsigned char lvLclBlockOpAddr :1; // The variable was written to via a block operation that took its address.
- unsigned char lvLiveAcrossUCall :1; // The variable is live across an unmanaged call.
-#endif
- unsigned char lvIsCSE :1; // Indicates if this LclVar is a CSE variable.
- unsigned char lvRefAssign :1; // involved in pointer assignment
- unsigned char lvHasLdAddrOp:1; // has ldloca or ldarga opcode on this local.
- unsigned char lvStackByref :1; // This is a compiler temporary of TYP_BYREF that is known to point into our local stack frame.
-
- unsigned char lvArgWrite :1; // variable is a parameter and STARG was used on it
- unsigned char lvIsTemp :1; // Short-lifetime compiler temp
+ // These further document the reasons for setting "lvDoNotEnregister". (Note that "lvAddrExposed" is one of the
+ // reasons;
+ // also, lvType == TYP_STRUCT prevents enregistration. At least one of the reasons should be true.
+ unsigned char lvVMNeedsStackAddr : 1; // The VM may have access to a stack-relative address of the variable, and
+ // read/write its value.
+ unsigned char lvLiveInOutOfHndlr : 1; // The variable was live in or out of an exception handler, and this required
+ // the variable to be
+ // in the stack (at least at those boundaries.)
+ unsigned char lvLclFieldExpr : 1; // The variable is not a struct, but was accessed like one (e.g., reading a
+ // particular byte from an int).
+ unsigned char lvLclBlockOpAddr : 1; // The variable was written to via a block operation that took its address.
+ unsigned char lvLiveAcrossUCall : 1; // The variable is live across an unmanaged call.
+#endif
+ unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable.
+ unsigned char lvRefAssign : 1; // involved in pointer assignment
+ unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local.
+ unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local
+ // stack frame.
+
+ unsigned char lvArgWrite : 1; // variable is a parameter and STARG was used on it
+ unsigned char lvIsTemp : 1; // Short-lifetime compiler temp
#if OPT_BOOL_OPS
- unsigned char lvIsBoolean :1; // set if variable is boolean
-#endif
- unsigned char lvRngOptDone:1; // considered for range check opt?
- unsigned char lvLoopInc :1; // incremented in the loop?
- unsigned char lvLoopAsg :1; // reassigned in the loop (other than a monotonic inc/dec for the index var)?
- unsigned char lvArrIndx :1; // used as an array index?
- unsigned char lvArrIndxOff:1; // used as an array index with an offset?
- unsigned char lvArrIndxDom:1; // index dominates loop exit
+ unsigned char lvIsBoolean : 1; // set if variable is boolean
+#endif
+ unsigned char lvRngOptDone : 1; // considered for range check opt?
+ unsigned char lvLoopInc : 1; // incremented in the loop?
+ unsigned char lvLoopAsg : 1; // reassigned in the loop (other than a monotonic inc/dec for the index var)?
+ unsigned char lvArrIndx : 1; // used as an array index?
+ unsigned char lvArrIndxOff : 1; // used as an array index with an offset?
+ unsigned char lvArrIndxDom : 1; // index dominates loop exit
#if ASSERTION_PROP
- unsigned char lvSingleDef:1; // variable has a single def
- unsigned char lvDisqualify:1; // variable is no longer OK for add copy optimization
- unsigned char lvVolatileHint:1; // hint for AssertionProp
+ unsigned char lvSingleDef : 1; // variable has a single def
+ unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
+ unsigned char lvVolatileHint : 1; // hint for AssertionProp
#endif
#if FANCY_ARRAY_OPT
- unsigned char lvAssignOne :1; // assigned at least once?
- unsigned char lvAssignTwo :1; // assigned at least twice?
+ unsigned char lvAssignOne : 1; // assigned at least once?
+ unsigned char lvAssignTwo : 1; // assigned at least twice?
#endif
- unsigned char lvSpilled :1; // enregistered variable was spilled
+ unsigned char lvSpilled : 1; // enregistered variable was spilled
#ifndef _TARGET_64BIT_
- unsigned char lvStructDoubleAlign :1; // Must we double align this struct?
-#endif // !_TARGET_64BIT_
+ unsigned char lvStructDoubleAlign : 1; // Must we double align this struct?
+#endif // !_TARGET_64BIT_
#ifdef _TARGET_64BIT_
- unsigned char lvQuirkToLong :1; // Quirk to allocate this LclVar as a 64-bit long
+ unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long
#endif
#ifdef DEBUG
- unsigned char lvKeepType :1; // Don't change the type of this variable
- unsigned char lvNoLclFldStress :1;// Can't apply local field stress on this one
-#endif
- unsigned char lvIsPtr :1; // Might this be used in an address computation? (used by buffer overflow security checks)
- unsigned char lvIsUnsafeBuffer :1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
- unsigned char lvPromoted :1; // True when this local is a promoted struct, a normed struct, or a "split" long on a 32-bit target.
- unsigned char lvIsStructField :1; // Is this local var a field of a promoted struct local?
- unsigned char lvContainsFloatingFields :1; // Does this struct contains floating point fields?
- unsigned char lvOverlappingFields :1; // True when we have a struct with possibly overlapping fields
- unsigned char lvContainsHoles :1; // True when we have a promoted struct that contains holes
- unsigned char lvCustomLayout :1; // True when this struct has "CustomLayout"
-
- unsigned char lvIsMultiRegArg :1; // true if this is a multireg LclVar struct used in an argument context
- unsigned char lvIsMultiRegRet :1; // true if this is a multireg LclVar struct assigned from a multireg call
+ unsigned char lvKeepType : 1; // Don't change the type of this variable
+ unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one
+#endif
+ unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security
+ // checks)
+ unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
+ unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a
+ // 32-bit target.
+ unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local?
+ unsigned char lvContainsFloatingFields : 1; // Does this struct contains floating point fields?
+ unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields
+ unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes
+ unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout"
+
+ unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context
+ unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call
#ifdef FEATURE_HFA
- unsigned char _lvIsHfa :1; // Is this a struct variable who's class handle is an HFA type
- unsigned char _lvIsHfaRegArg :1; // Is this a HFA argument variable? // TODO-CLEANUP: Remove this and replace with (lvIsRegArg && lvIsHfa())
- unsigned char _lvHfaTypeIsFloat :1; // Is the HFA type float or double?
-#endif // FEATURE_HFA
+ unsigned char _lvIsHfa : 1; // Is this a struct variable who's class handle is an HFA type
+ unsigned char _lvIsHfaRegArg : 1; // Is this a HFA argument variable? // TODO-CLEANUP: Remove this and replace
+ // with (lvIsRegArg && lvIsHfa())
+ unsigned char _lvHfaTypeIsFloat : 1; // Is the HFA type float or double?
+#endif // FEATURE_HFA
#ifdef DEBUG
// TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct
// types, and is needed because of cases where TYP_STRUCT is bashed to an integral type.
// Consider cleaning this up so this workaround is not required.
- unsigned char lvUnusedStruct :1; // All references to this promoted struct are through its field locals.
- // I.e. there is no longer any reference to the struct directly.
- // In this case we can simply remove this struct local.
+ unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals.
+ // I.e. there is no longer any reference to the struct directly.
+ // In this case we can simply remove this struct local.
#endif
#ifndef LEGACY_BACKEND
- unsigned char lvLRACandidate :1; // Tracked for linear scan register allocation purposes
-#endif // !LEGACY_BACKEND
+ unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes
+#endif // !LEGACY_BACKEND
#ifdef FEATURE_SIMD
// Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
- unsigned char lvSIMDType :1; // This is a SIMD struct
- unsigned char lvUsedInSIMDIntrinsic :1; // This tells lclvar is used for simd intrinsic
-#endif // FEATURE_SIMD
- unsigned char lvRegStruct :1; // This is a reg-sized non-field-addressed struct.
-
- union
- {
- unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct local.
- unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). Valid on promoted struct local fields.
+ unsigned char lvSIMDType : 1; // This is a SIMD struct
+ unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
+#endif // FEATURE_SIMD
+ unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
+
+ union {
+ unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct
+ // local.
+ unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
+ // Valid on promoted struct local fields.
#ifdef FEATURE_SIMD
- var_types lvBaseType; // The base type of a SIMD local var. Valid on TYP_SIMD locals.
-#endif // FEATURE_SIMD
+ var_types lvBaseType; // The base type of a SIMD local var. Valid on TYP_SIMD locals.
+#endif // FEATURE_SIMD
};
- unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
- unsigned char lvFldOffset;
- unsigned char lvFldOrdinal;
+ unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
+ unsigned char lvFldOffset;
+ unsigned char lvFldOrdinal;
#if FEATURE_MULTIREG_ARGS
regNumber lvRegNumForSlot(unsigned slotNum)
@@ -397,10 +413,10 @@ public:
unsigned lvHfaSlots() const
{
assert(lvIsHfa());
- assert(lvType==TYP_STRUCT);
+ assert(lvType == TYP_STRUCT);
#ifdef _TARGET_ARM_
return lvExactSize / sizeof(float);
-#else // _TARGET_ARM64_
+#else // _TARGET_ARM64_
if (lvHfaTypeIsFloat())
{
return lvExactSize / sizeof(float);
@@ -414,111 +430,105 @@ public:
// lvIsMultiRegArgOrRet()
// returns true if this is a multireg LclVar struct used in an argument context
- // or if this is a multireg LclVar struct assigned from a multireg call
+ // or if this is a multireg LclVar struct assigned from a multireg call
bool lvIsMultiRegArgOrRet()
{
return lvIsMultiRegArg || lvIsMultiRegRet;
}
private:
-
- regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
- // register pair). For LEGACY_BACKEND, this is only set if lvRegister is
- // non-zero. For non-LEGACY_BACKEND, it is set during codegen any time the
- // variable is enregistered (in non-LEGACY_BACKEND, lvRegister is only set
- // to non-zero if the variable gets the same register assignment for its entire
- // lifetime).
+ regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
+ // register pair). For LEGACY_BACKEND, this is only set if lvRegister is
+ // non-zero. For non-LEGACY_BACKEND, it is set during codegen any time the
+ // variable is enregistered (in non-LEGACY_BACKEND, lvRegister is only set
+ // to non-zero if the variable gets the same register assignment for its entire
+ // lifetime).
#if !defined(_TARGET_64BIT_)
- regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
-#endif // !defined(_TARGET_64BIT_)
+ regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
+#endif // !defined(_TARGET_64BIT_)
- regNumberSmall _lvArgReg; // The register in which this argument is passed.
+ regNumberSmall _lvArgReg; // The register in which this argument is passed.
#if FEATURE_MULTIREG_ARGS
- regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
- // Note this is defined but not used by ARM32
-#endif // FEATURE_MULTIREG_ARGS
+ regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
+ // Note this is defined but not used by ARM32
+#endif // FEATURE_MULTIREG_ARGS
#ifndef LEGACY_BACKEND
- union
- {
- regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
- regPairNoSmall _lvArgInitRegPair; // the register pair into which the argument is moved at entry
+ union {
+ regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
+ regPairNoSmall _lvArgInitRegPair; // the register pair into which the argument is moved at entry
};
#endif // !LEGACY_BACKEND
public:
-
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
/////////////////////
- __declspec(property(get=GetRegNum,put=SetRegNum))
- regNumber lvRegNum;
+ __declspec(property(get = GetRegNum, put = SetRegNum)) regNumber lvRegNum;
regNumber GetRegNum() const
{
- return (regNumber) _lvRegNum;
+ return (regNumber)_lvRegNum;
}
void SetRegNum(regNumber reg)
{
- _lvRegNum = (regNumberSmall) reg;
+ _lvRegNum = (regNumberSmall)reg;
assert(_lvRegNum == reg);
}
- /////////////////////
+/////////////////////
#if defined(_TARGET_64BIT_)
- __declspec(property(get=GetOtherReg,put=SetOtherReg))
- regNumber lvOtherReg;
+ __declspec(property(get = GetOtherReg, put = SetOtherReg)) regNumber lvOtherReg;
regNumber GetOtherReg() const
{
- assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 "unreachable code" warnings
+ assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
+ // "unreachable code" warnings
return REG_NA;
}
void SetOtherReg(regNumber reg)
{
- assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 "unreachable code" warnings
+ assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
+ // "unreachable code" warnings
}
-#else // !_TARGET_64BIT_
- __declspec(property(get=GetOtherReg,put=SetOtherReg))
- regNumber lvOtherReg;
+#else // !_TARGET_64BIT_
+ __declspec(property(get = GetOtherReg, put = SetOtherReg)) regNumber lvOtherReg;
regNumber GetOtherReg() const
{
- return (regNumber) _lvOtherReg;
+ return (regNumber)_lvOtherReg;
}
void SetOtherReg(regNumber reg)
{
- _lvOtherReg = (regNumberSmall) reg;
+ _lvOtherReg = (regNumberSmall)reg;
assert(_lvOtherReg == reg);
}
#endif // !_TARGET_64BIT_
/////////////////////
- __declspec(property(get=GetArgReg,put=SetArgReg))
- regNumber lvArgReg;
+ __declspec(property(get = GetArgReg, put = SetArgReg)) regNumber lvArgReg;
regNumber GetArgReg() const
{
- return (regNumber) _lvArgReg;
+ return (regNumber)_lvArgReg;
}
void SetArgReg(regNumber reg)
{
- _lvArgReg = (regNumberSmall) reg;
+ _lvArgReg = (regNumberSmall)reg;
assert(_lvArgReg == reg);
}
#if FEATURE_MULTIREG_ARGS
- __declspec(property(get = GetOtherArgReg, put = SetOtherArgReg))
- regNumber lvOtherArgReg;
+ __declspec(property(get = GetOtherArgReg, put = SetOtherArgReg)) regNumber lvOtherArgReg;
regNumber GetOtherArgReg() const
{
@@ -556,41 +566,37 @@ public:
}
#endif
- /////////////////////
+/////////////////////
#ifndef LEGACY_BACKEND
- __declspec(property(get=GetArgInitReg,put=SetArgInitReg))
- regNumber lvArgInitReg;
+ __declspec(property(get = GetArgInitReg, put = SetArgInitReg)) regNumber lvArgInitReg;
regNumber GetArgInitReg() const
{
- return (regNumber) _lvArgInitReg;
+ return (regNumber)_lvArgInitReg;
}
void SetArgInitReg(regNumber reg)
{
- _lvArgInitReg = (regNumberSmall) reg;
+ _lvArgInitReg = (regNumberSmall)reg;
assert(_lvArgInitReg == reg);
}
/////////////////////
- __declspec(property(get=GetArgInitRegPair,put=SetArgInitRegPair))
- regPairNo lvArgInitRegPair;
+ __declspec(property(get = GetArgInitRegPair, put = SetArgInitRegPair)) regPairNo lvArgInitRegPair;
regPairNo GetArgInitRegPair() const
-{
- regPairNo regPair = (regPairNo) _lvArgInitRegPair;
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ {
+ regPairNo regPair = (regPairNo)_lvArgInitRegPair;
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
return regPair;
}
void SetArgInitRegPair(regPairNo regPair)
{
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
- _lvArgInitRegPair = (regPairNoSmall) regPair;
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
+ _lvArgInitRegPair = (regPairNoSmall)regPair;
assert(_lvArgInitRegPair == regPair);
}
@@ -626,27 +632,33 @@ public:
if (varTypeIsFloating(TypeGet()))
{
if (lvRegNum != REG_STK)
+ {
regMask = genRegMaskFloat(lvRegNum, TypeGet());
+ }
}
else
{
if (lvRegNum != REG_STK)
+ {
regMask = genRegMask(lvRegNum);
-
+ }
+
// For longs we may have two regs
- if (isRegPairType(lvType) && lvOtherReg != REG_STK)
+ if (isRegPairType(lvType) && lvOtherReg != REG_STK)
+ {
regMask |= genRegMask(lvOtherReg);
+ }
}
return regMask;
}
- regMaskSmall lvPrefReg; // set of regs it prefers to live in
+ regMaskSmall lvPrefReg; // set of regs it prefers to live in
- unsigned short lvVarIndex; // variable tracking index
- unsigned short lvRefCnt; // unweighted (real) reference count
- unsigned lvRefCntWtd; // weighted reference count
- int lvStkOffs; // stack offset of home
- unsigned lvExactSize; // (exact) size of the type in bytes
+ unsigned short lvVarIndex; // variable tracking index
+ unsigned short lvRefCnt; // unweighted (real) reference count
+ unsigned lvRefCntWtd; // weighted reference count
+ int lvStkOffs; // stack offset of home
+ unsigned lvExactSize; // (exact) size of the type in bytes
// Is this a promoted struct?
// This method returns true only for structs (including SIMD structs), not for
@@ -655,16 +667,16 @@ public:
// 1) if only structs are wanted, and
// 2) if Lowering has already been done.
// Otherwise lvPromoted is valid.
- bool lvPromotedStruct()
+ bool lvPromotedStruct()
{
#if !defined(_TARGET_64BIT_)
return (lvPromoted && !varTypeIsLong(lvType));
-#else // defined(_TARGET_64BIT_)
+#else // defined(_TARGET_64BIT_)
return lvPromoted;
#endif // defined(_TARGET_64BIT_)
}
- unsigned lvSize() // Size needed for storage representation. Only used for structs or TYP_BLK.
+ unsigned lvSize() // Size needed for storage representation. Only used for structs or TYP_BLK.
{
// TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted,
// where the struct itself is no longer used because all access is via its member fields.
@@ -673,88 +685,89 @@ public:
// See Compiler::raAssignVars() for details. For example:
// N002 ( 4, 3) [00EA067C] ------------- return struct $346
// N001 ( 3, 2) [00EA0628] ------------- lclVar struct(U) V03 loc2
- // float V03.f1 (offs=0x00) -> V12 tmp7 f8 (last use) (last use) $345
+ // float V03.f1 (offs=0x00) -> V12 tmp7
+ // f8 (last use) (last use) $345
// Here, the "struct(U)" shows that the "V03 loc2" variable is unused. Not shown is that V03
// is now TYP_INT in the local variable table. It's not really unused, because it's in the tree.
- assert(varTypeIsStruct(lvType) ||
- (lvType == TYP_BLK) ||
- (lvPromoted && lvUnusedStruct));
+ assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct));
return (unsigned)(roundUp(lvExactSize, TARGET_POINTER_SIZE));
}
#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
- unsigned lvSlotNum; // original slot # (if remapped)
+ unsigned lvSlotNum; // original slot # (if remapped)
#endif
- typeInfo lvVerTypeInfo; // type info needed for verification
-
- BYTE * lvGcLayout; // GC layout info for structs
+ typeInfo lvVerTypeInfo; // type info needed for verification
+ BYTE* lvGcLayout; // GC layout info for structs
#if FANCY_ARRAY_OPT
- GenTreePtr lvKnownDim; // array size if known
+ GenTreePtr lvKnownDim; // array size if known
#endif
#if ASSERTION_PROP
- BlockSet lvRefBlks; // Set of blocks that contain refs
- GenTreePtr lvDefStmt; // Pointer to the statement with the single definition
- void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
-#endif
- var_types TypeGet() const { return (var_types) lvType; }
- bool lvStackAligned() const
- {
- assert(lvIsStructField);
- return ((lvFldOffset % sizeof(void*)) == 0);
- }
- bool lvNormalizeOnLoad() const
- {
- return varTypeIsSmall(TypeGet()) &&
- // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
- (lvIsParam || lvAddrExposed || lvIsStructField);
- }
-
- bool lvNormalizeOnStore()
- {
- return varTypeIsSmall(TypeGet()) &&
- // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
- !(lvIsParam || lvAddrExposed || lvIsStructField);
- }
-
- void lvaResetSortAgainFlag(Compiler * pComp);
- void decRefCnts(BasicBlock::weight_t weight, Compiler * pComp, bool propagate = true);
- void incRefCnts(BasicBlock::weight_t weight, Compiler * pComp, bool propagate = true);
- void setPrefReg(regNumber regNum, Compiler * pComp);
- void addPrefReg(regMaskTP regMask, Compiler * pComp);
- bool IsFloatRegType() const
- {
- return isFloatRegType(lvType) || lvIsHfaRegArg();
- }
- var_types GetHfaType() const
- {
- return lvIsHfa() ? (lvHfaTypeIsFloat() ? TYP_FLOAT : TYP_DOUBLE) : TYP_UNDEF;
- }
- void SetHfaType(var_types type)
- {
- assert(varTypeIsFloating(type));
- lvSetHfaTypeIsFloat(type == TYP_FLOAT);
- }
+ BlockSet lvRefBlks; // Set of blocks that contain refs
+ GenTreePtr lvDefStmt; // Pointer to the statement with the single definition
+ void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
+#endif
+ var_types TypeGet() const
+ {
+ return (var_types)lvType;
+ }
+ bool lvStackAligned() const
+ {
+ assert(lvIsStructField);
+ return ((lvFldOffset % sizeof(void*)) == 0);
+ }
+ bool lvNormalizeOnLoad() const
+ {
+ return varTypeIsSmall(TypeGet()) &&
+ // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
+ (lvIsParam || lvAddrExposed || lvIsStructField);
+ }
+
+ bool lvNormalizeOnStore()
+ {
+ return varTypeIsSmall(TypeGet()) &&
+ // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
+ !(lvIsParam || lvAddrExposed || lvIsStructField);
+ }
+
+ void lvaResetSortAgainFlag(Compiler* pComp);
+ void decRefCnts(BasicBlock::weight_t weight, Compiler* pComp, bool propagate = true);
+ void incRefCnts(BasicBlock::weight_t weight, Compiler* pComp, bool propagate = true);
+ void setPrefReg(regNumber regNum, Compiler* pComp);
+ void addPrefReg(regMaskTP regMask, Compiler* pComp);
+ bool IsFloatRegType() const
+ {
+ return isFloatRegType(lvType) || lvIsHfaRegArg();
+ }
+ var_types GetHfaType() const
+ {
+ return lvIsHfa() ? (lvHfaTypeIsFloat() ? TYP_FLOAT : TYP_DOUBLE) : TYP_UNDEF;
+ }
+ void SetHfaType(var_types type)
+ {
+ assert(varTypeIsFloating(type));
+ lvSetHfaTypeIsFloat(type == TYP_FLOAT);
+ }
#ifndef LEGACY_BACKEND
- var_types lvaArgType();
+ var_types lvaArgType();
#endif
- PerSsaArray lvPerSsaData;
+ PerSsaArray lvPerSsaData;
#ifdef DEBUG
// Keep track of the # of SsaNames, for a bounds check.
- unsigned lvNumSsaNames;
+ unsigned lvNumSsaNames;
#endif
// Returns the address of the per-Ssa data for the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
- LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
+ LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
assert(SsaConfig::RESERVED_SSA_NUM == 0);
@@ -765,20 +778,22 @@ public:
#ifdef DEBUG
public:
-
void PrintVarReg() const
{
if (isRegPairType(TypeGet()))
- printf("%s:%s", getRegName(lvOtherReg), // hi32
- getRegName(lvRegNum)); // lo32
+ {
+ printf("%s:%s", getRegName(lvOtherReg), // hi32
+ getRegName(lvRegNum)); // lo32
+ }
else
+ {
printf("%s", getRegName(lvRegNum));
+ }
}
#endif // DEBUG
}; // class LclVarDsc
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -791,7 +806,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
/*****************************************************************************
*
* The following keeps track of temporaries allocated in the stack frame
@@ -801,31 +815,27 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* These are different from the more common temps allocated by lvaGrabTemp().
*/
-class TempDsc
+class TempDsc
{
public:
-
- TempDsc * tdNext;
+ TempDsc* tdNext;
private:
-
- int tdOffs;
+ int tdOffs;
#ifdef DEBUG
static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG
-#endif // DEBUG
+#endif // DEBUG
- int tdNum;
- BYTE tdSize;
- var_types tdType;
+ int tdNum;
+ BYTE tdSize;
+ var_types tdType;
public:
- TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType)
- : tdNum(_tdNum)
- , tdSize((BYTE) _tdSize)
- , tdType(_tdType)
+ TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType)
{
#ifdef DEBUG
- assert(tdNum < 0); // temps must have a negative number (so they have a different number from all local variables)
+ assert(tdNum <
+ 0); // temps must have a negative number (so they have a different number from all local variables)
tdOffs = BAD_TEMP_OFFSET;
#endif // DEBUG
if (tdNum != _tdNum)
@@ -835,27 +845,52 @@ public:
}
#ifdef DEBUG
- bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; }
+ bool tdLegalOffset() const
+ {
+ return tdOffs != BAD_TEMP_OFFSET;
+ }
#endif // DEBUG
- int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; }
- void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); }
- void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); }
+ int tdTempOffs() const
+ {
+ assert(tdLegalOffset());
+ return tdOffs;
+ }
+ void tdSetTempOffs(int offs)
+ {
+ tdOffs = offs;
+ assert(tdLegalOffset());
+ }
+ void tdAdjustTempOffs(int offs)
+ {
+ tdOffs += offs;
+ assert(tdLegalOffset());
+ }
- int tdTempNum () const { assert(tdNum < 0); return tdNum; }
- unsigned tdTempSize() const { return tdSize; }
- var_types tdTempType() const { return tdType; }
+ int tdTempNum() const
+ {
+ assert(tdNum < 0);
+ return tdNum;
+ }
+ unsigned tdTempSize() const
+ {
+ return tdSize;
+ }
+ var_types tdTempType() const
+ {
+ return tdType;
+ }
};
// interface to hide linearscan implementation from rest of compiler
class LinearScanInterface
{
-public:
- virtual void doLinearScan() = 0;
- virtual void recordVarLocationsAtStartOfBB(BasicBlock *bb) = 0;
+public:
+ virtual void doLinearScan() = 0;
+ virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
};
-LinearScanInterface *getLinearScanAllocator(Compiler *comp);
+LinearScanInterface* getLinearScanAllocator(Compiler* comp);
// Information about arrays: their element type and size, and the offset of the first element.
// We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes,
@@ -868,19 +903,14 @@ struct ArrayInfo
unsigned m_elemSize;
unsigned m_elemOffset;
- ArrayInfo()
- : m_elemType(TYP_UNDEF)
- , m_elemStructType(nullptr)
- , m_elemSize(0)
- , m_elemOffset(0)
- {}
+ ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0)
+ {
+ }
ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType)
- : m_elemType(elemType)
- , m_elemStructType(elemStructType)
- , m_elemSize(elemSize)
- , m_elemOffset(elemOffset)
- {}
+ : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset)
+ {
+ }
};
// This enumeration names the phases into which we divide compilation. The phases should completely
@@ -892,13 +922,13 @@ enum Phases
PHASE_NUMBER_OF
};
-extern const char* PhaseNames[];
-extern const char* PhaseEnums[];
+extern const char* PhaseNames[];
+extern const char* PhaseEnums[];
extern const LPCWSTR PhaseShortNames[];
//---------------------------------------------------------------
// Compilation time.
-//
+//
// A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods.
// We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles
@@ -913,9 +943,9 @@ struct CompTimeInfo
static const char* PhaseNames[];
static bool PhaseHasChildren[];
- static int PhaseParent[];
+ static int PhaseParent[];
- unsigned m_byteCodeBytes;
+ unsigned m_byteCodeBytes;
unsigned __int64 m_totalCycles;
unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF];
@@ -928,7 +958,7 @@ struct CompTimeInfo
// isn't, this means that we're doing something significant between the end of the last
// declared subphase and the end of its parent.
unsigned __int64 m_parentPhaseEndSlop;
- bool m_timerFailure;
+ bool m_timerFailure;
CompTimeInfo(unsigned byteCodeBytes);
#endif
@@ -945,12 +975,12 @@ class CompTimeSummaryInfo
{
// This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one).
static CritSecObject s_compTimeSummaryLock;
-
- int m_numMethods;
+
+ int m_numMethods;
CompTimeInfo m_total;
CompTimeInfo m_maximum;
- int m_numFilteredMethods;
+ int m_numFilteredMethods;
CompTimeInfo m_filtered;
// This method computes the number of cycles/sec for the current machine. The cycles are those counted
@@ -966,7 +996,9 @@ public:
// This is the unique CompTimeSummaryInfo object for this instance of the runtime.
static CompTimeSummaryInfo s_compTimeSummary;
- CompTimeSummaryInfo(): m_total(0), m_maximum(0), m_numMethods(0), m_filtered(0), m_numFilteredMethods(0) {}
+ CompTimeSummaryInfo() : m_total(0), m_maximum(0), m_numMethods(0), m_filtered(0), m_numFilteredMethods(0)
+ {
+ }
// Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary.
// This is thread safe.
@@ -980,19 +1012,18 @@ public:
// A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation,
// and when the current phase started. This is intended to be part of a Compilation object. This is
// disabled (FEATURE_JIT_METHOD_PERF not defined) when FEATURE_CORECLR is set, or on non-windows platforms.
-//
+//
class JitTimer
{
- unsigned __int64 m_start; // Start of the compilation.
- unsigned __int64 m_curPhaseStart; // Start of the current phase.
+ unsigned __int64 m_start; // Start of the compilation.
+ unsigned __int64 m_curPhaseStart; // Start of the current phase.
#ifdef DEBUG
- Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
+ Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
#endif
- CompTimeInfo m_info; // The CompTimeInfo for this compilation.
+ CompTimeInfo m_info; // The CompTimeInfo for this compilation.
-
- static CritSecObject s_csvLock; // Lock to protect the time log file.
- void PrintCsvMethodStats(Compiler* comp);
+ static CritSecObject s_csvLock; // Lock to protect the time log file.
+ void PrintCsvMethodStats(Compiler* comp);
private:
void* operator new(size_t);
@@ -1024,45 +1055,47 @@ public:
bool GetThreadCycles(unsigned __int64* cycles)
{
bool res = CycleTimer::GetThreadCyclesS(cycles);
- if (!res) { m_info.m_timerFailure = true; }
+ if (!res)
+ {
+ m_info.m_timerFailure = true;
+ }
return res;
}
};
#endif // FEATURE_JIT_METHOD_PERF
-
//------------------- Function/Funclet info -------------------------------
-DECLARE_TYPED_ENUM(FuncKind,BYTE)
+DECLARE_TYPED_ENUM(FuncKind, BYTE)
{
- FUNC_ROOT, // The main/root function (always id==0)
- FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
- FUNC_FILTER, // a funclet associated with an EH filter
- FUNC_COUNT
+ FUNC_ROOT, // The main/root function (always id==0)
+ FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
+ FUNC_FILTER, // a funclet associated with an EH filter
+ FUNC_COUNT
}
-END_DECLARE_TYPED_ENUM(FuncKind,BYTE)
+END_DECLARE_TYPED_ENUM(FuncKind, BYTE)
class emitLocation;
struct FuncInfoDsc
{
- FuncKind funKind;
- BYTE funFlags; // Currently unused, just here for padding
- unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
- // funclet. It is only valid if funKind field indicates this is a
- // EH-related funclet: FUNC_HANDLER or FUNC_FILTER
+ FuncKind funKind;
+ BYTE funFlags; // Currently unused, just here for padding
+ unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
+ // funclet. It is only valid if funKind field indicates this is a
+ // EH-related funclet: FUNC_HANDLER or FUNC_FILTER
#if defined(_TARGET_AMD64_)
// TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array.
- emitLocation* startLoc;
- emitLocation* endLoc;
- emitLocation* coldStartLoc; // locations for the cold section, if there is one.
- emitLocation* coldEndLoc;
- UNWIND_INFO unwindHeader;
+ emitLocation* startLoc;
+ emitLocation* endLoc;
+ emitLocation* coldStartLoc; // locations for the cold section, if there is one.
+ emitLocation* coldEndLoc;
+ UNWIND_INFO unwindHeader;
// Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd
// number of codes, the VM or Zapper will 4-byte align the whole thing.
- BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF*sizeof(UNWIND_CODE))];
- unsigned unwindCodeSlot;
+ BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))];
+ unsigned unwindCodeSlot;
#ifdef UNIX_AMD64_ABI
jitstd::vector<CFI_CODE>* cfiCodes;
@@ -1070,13 +1103,13 @@ struct FuncInfoDsc
#elif defined(_TARGET_ARMARCH_)
- UnwindInfo uwi; // Unwind information for this function/funclet's hot section
- UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
- // Note: we only have a pointer here instead of the actual object,
- // to save memory in the JIT case (compared to the NGEN case),
- // where we don't have any cold section.
- // Note 2: we currently don't support hot/cold splitting in functions
- // with EH, so uwiCold will be NULL for all funclets.
+ UnwindInfo uwi; // Unwind information for this function/funclet's hot section
+ UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
+ // Note: we only have a pointer here instead of the actual object,
+ // to save memory in the JIT case (compared to the NGEN case),
+ // where we don't have any cold section.
+ // Note 2: we currently don't support hot/cold splitting in functions
+ // with EH, so uwiCold will be NULL for all funclets.
#endif // _TARGET_ARMARCH_
@@ -1084,49 +1117,51 @@ struct FuncInfoDsc
// that isn't shared between the main function body and funclets.
};
-
-
struct fgArgTabEntry
{
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
fgArgTabEntry()
{
- otherRegNum = REG_NA;
- isStruct = false; // is this a struct arg
+ otherRegNum = REG_NA;
+ isStruct = false; // is this a struct arg
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- GenTreePtr node; // Initially points at the Op1 field of 'parent', but if the argument is replaced with an GT_ASG or placeholder
- // it will point at the actual argument in the gtCallLateArgs list.
- GenTreePtr parent; // Points at the GT_LIST node in the gtCallArgs for this argument
+ GenTreePtr node; // Initially points at the Op1 field of 'parent', but if the argument is replaced with an GT_ASG or
+ // placeholder
+ // it will point at the actual argument in the gtCallLateArgs list.
+ GenTreePtr parent; // Points at the GT_LIST node in the gtCallArgs for this argument
- unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
+ unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
- regNumber regNum; // The (first) register to use when passing this argument, set to REG_STK for arguments passed on the stack
- unsigned numRegs; // Count of number of registers that this argument uses
+ regNumber regNum; // The (first) register to use when passing this argument, set to REG_STK for arguments passed on
+ // the stack
+ unsigned numRegs; // Count of number of registers that this argument uses
- // A slot is a pointer sized region in the OutArg area.
- unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
- unsigned numSlots; // Count of number of slots that this argument uses
+ // A slot is a pointer sized region in the OutArg area.
+ unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
+ unsigned numSlots; // Count of number of slots that this argument uses
- unsigned alignment; // 1 or 2 (slots/registers)
- unsigned lateArgInx; // index into gtCallLateArgs list
- unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
+ unsigned alignment; // 1 or 2 (slots/registers)
+ unsigned lateArgInx; // index into gtCallLateArgs list
+ unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
- bool isSplit :1; // True when this argument is split between the registers and OutArg area
- bool needTmp :1; // True when we force this argument's evaluation into a temp LclVar
- bool needPlace :1; // True when we must replace this argument with a placeholder node
- bool isTmp :1; // True when we setup a temp LclVar for this argument due to size issues with the struct
- bool processed :1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
- bool isHfaRegArg :1; // True when the argument is passed as a HFA in FP registers.
- bool isBackFilled :1; // True when the argument fills a register slot skipped due to alignment requirements of previous arguments.
- bool isNonStandard:1; // True if it is an arg that is passed in a reg other than a standard arg reg, or is forced to be on the stack despite its arg list position.
+ bool isSplit : 1; // True when this argument is split between the registers and OutArg area
+ bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
+ bool needPlace : 1; // True when we must replace this argument with a placeholder node
+ bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct
+ bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
+ bool isHfaRegArg : 1; // True when the argument is passed as a HFA in FP registers.
+ bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of
+ // previous arguments.
+ bool isNonStandard : 1; // True if it is an arg that is passed in a reg other than a standard arg reg, or is forced
+ // to be on the stack despite its arg list position.
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- bool isStruct :1; // True if this is a struct arg
+ bool isStruct : 1; // True if this is a struct arg
- regNumber otherRegNum; // The (second) register to use when passing this argument.
+ regNumber otherRegNum; // The (second) register to use when passing this argument.
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
@@ -1146,8 +1181,9 @@ struct fgArgTabEntry
{
return isBackFilled;
}
-#else // !_TARGET_ARM_
- // To make the callers easier, we allow these calls (and the isHfaRegArg and isBackFilled data members) for all platforms.
+#else // !_TARGET_ARM_
+ // To make the callers easier, we allow these calls (and the isHfaRegArg and isBackFilled data members) for all
+ // platforms.
void SetIsHfaRegArg(bool hfaRegArg)
{
}
@@ -1166,110 +1202,106 @@ struct fgArgTabEntry
void Dump();
#endif
};
-typedef struct fgArgTabEntry * fgArgTabEntryPtr;
+typedef struct fgArgTabEntry* fgArgTabEntryPtr;
//-------------------------------------------------------------------------
//
-// The class fgArgInfo is used to handle the arguments
+// The class fgArgInfo is used to handle the arguments
// when morphing a GT_CALL node.
//
-class fgArgInfo
+class fgArgInfo
{
- Compiler * compiler; // Back pointer to the compiler instance so that we can allocate memory
- GenTreePtr callTree; // Back pointer to the GT_CALL node for this fgArgInfo
- unsigned argCount; // Updatable arg count value
- unsigned nextSlotNum; // Updatable slot count value
- unsigned stkLevel; // Stack depth when we make this call (for x86)
-
- unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
- bool hasRegArgs; // true if we have one or more register arguments
- bool hasStackArgs; // true if we have one or more stack arguments
- bool argsComplete; // marker for state
- bool argsSorted; // marker for state
- fgArgTabEntryPtr * argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
+ Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory
+ GenTreePtr callTree; // Back pointer to the GT_CALL node for this fgArgInfo
+ unsigned argCount; // Updatable arg count value
+ unsigned nextSlotNum; // Updatable slot count value
+ unsigned stkLevel; // Stack depth when we make this call (for x86)
+
+ unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
+ bool hasRegArgs; // true if we have one or more register arguments
+ bool hasStackArgs; // true if we have one or more stack arguments
+ bool argsComplete; // marker for state
+ bool argsSorted; // marker for state
+ fgArgTabEntryPtr* argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
private:
-
- void AddArg (fgArgTabEntryPtr curArgTabEntry);
+ void AddArg(fgArgTabEntryPtr curArgTabEntry);
public:
+ fgArgInfo(Compiler* comp, GenTreePtr call, unsigned argCount);
+ fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall);
- fgArgInfo(Compiler * comp, GenTreePtr call, unsigned argCount);
- fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall);
-
- fgArgTabEntryPtr AddRegArg (unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment);
+ fgArgTabEntryPtr AddRegArg(
+ unsigned argNum, GenTreePtr node, GenTreePtr parent, regNumber regNum, unsigned numRegs, unsigned alignment);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- fgArgTabEntryPtr AddRegArg (unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment,
- const bool isStruct,
- const regNumber otherRegNum = REG_NA,
- const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
+ fgArgTabEntryPtr AddRegArg(
+ unsigned argNum,
+ GenTreePtr node,
+ GenTreePtr parent,
+ regNumber regNum,
+ unsigned numRegs,
+ unsigned alignment,
+ const bool isStruct,
+ const regNumber otherRegNum = REG_NA,
+ const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- fgArgTabEntryPtr AddStkArg (unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- unsigned numSlots,
- unsigned alignment
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct));
-
- void RemorphReset ();
- fgArgTabEntryPtr RemorphRegArg (unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment);
-
- void RemorphStkArg (unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- unsigned numSlots,
- unsigned alignment);
-
- void SplitArg (unsigned argNum,
- unsigned numRegs,
- unsigned numSlots);
-
- void EvalToTmp (unsigned argNum,
- unsigned tmpNum,
- GenTreePtr newNode);
-
- void ArgsComplete();
-
- void SortArgs();
-
- void EvalArgsToTemps();
-
- void RecordStkLevel (unsigned stkLvl);
- unsigned RetrieveStkLevel ();
-
- unsigned ArgCount () { return argCount; }
- fgArgTabEntryPtr * ArgTable () { return argTable; }
- unsigned GetNextSlotNum() { return nextSlotNum; }
- bool HasRegArgs() { return hasRegArgs; }
- bool HasStackArgs() { return hasStackArgs; }
-};
+ fgArgTabEntryPtr AddStkArg(unsigned argNum,
+ GenTreePtr node,
+ GenTreePtr parent,
+ unsigned numSlots,
+ unsigned alignment FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct));
+
+ void RemorphReset();
+ fgArgTabEntryPtr RemorphRegArg(
+ unsigned argNum, GenTreePtr node, GenTreePtr parent, regNumber regNum, unsigned numRegs, unsigned alignment);
+ void RemorphStkArg(unsigned argNum, GenTreePtr node, GenTreePtr parent, unsigned numSlots, unsigned alignment);
+
+ void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots);
+
+ void EvalToTmp(unsigned argNum, unsigned tmpNum, GenTreePtr newNode);
+
+ void ArgsComplete();
+
+ void SortArgs();
+
+ void EvalArgsToTemps();
+
+ void RecordStkLevel(unsigned stkLvl);
+ unsigned RetrieveStkLevel();
+
+ unsigned ArgCount()
+ {
+ return argCount;
+ }
+ fgArgTabEntryPtr* ArgTable()
+ {
+ return argTable;
+ }
+ unsigned GetNextSlotNum()
+ {
+ return nextSlotNum;
+ }
+ bool HasRegArgs()
+ {
+ return hasRegArgs;
+ }
+ bool HasStackArgs()
+ {
+ return hasStackArgs;
+ }
+};
#ifdef DEBUG
-//XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// We have the ability to mark source expressions with "Test Labels."
// These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions
// that should be CSE defs, and other expressions that should uses of those defs, with a shared label.
-enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
+enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
{
TL_SsaName,
TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown).
@@ -1282,40 +1314,44 @@ enum TestLabel // This must be kept identical to System.Runtime.CompilerService
struct TestLabelAndNum
{
TestLabel m_tl;
- ssize_t m_num;
+ ssize_t m_num;
- TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) {}
+ TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0)
+ {
+ }
};
typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, TestLabelAndNum, JitSimplerHashBehavior> NodeToTestDataMap;
-
-//XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
// This class implements the "IAllocator" interface, so that we can use
// utilcode collection classes in the JIT, and have them use the JIT's allocator.
-class CompAllocator: public IAllocator
+class CompAllocator : public IAllocator
{
- Compiler * m_comp;
+ Compiler* m_comp;
#if MEASURE_MEM_ALLOC
CompMemKind m_cmk;
#endif
public:
- CompAllocator(Compiler * comp, CompMemKind cmk)
+ CompAllocator(Compiler* comp, CompMemKind cmk)
: m_comp(comp)
#if MEASURE_MEM_ALLOC
, m_cmk(cmk)
#endif
- {}
+ {
+ }
- inline void * Alloc(size_t sz);
+ inline void* Alloc(size_t sz);
- inline void * ArrayAlloc(size_t elems, size_t elemSize);
+ inline void* ArrayAlloc(size_t elems, size_t elemSize);
// For the compiler's no-release allocator, free operations are no-ops.
- void Free(void * p) {}
+ void Free(void* p)
+ {
+ }
};
/*
@@ -1346,7 +1382,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-class Compiler
+class Compiler
{
friend class emitter;
friend class UnwindInfo;
@@ -1370,62 +1406,64 @@ class Compiler
friend class DecomposeLongs;
#endif // !_TARGET_64BIT_
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX Misc structs definitions XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX Misc structs definitions XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
public:
-
- hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
+ hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
#ifdef DEBUG
- bool verbose;
- bool dumpIR;
- bool dumpIRNodes;
- bool dumpIRTypes;
- bool dumpIRKinds;
- bool dumpIRLocals;
- bool dumpIRRegs;
- bool dumpIRSsa;
- bool dumpIRValnums;
- bool dumpIRCosts;
- bool dumpIRFlags;
- bool dumpIRNoLists;
- bool dumpIRNoLeafs;
- bool dumpIRNoStmts;
- bool dumpIRTrees;
- bool dumpIRLinear;
- bool dumpIRDataflow;
- bool dumpIRBlockHeaders;
- bool dumpIRExit;
+ bool verbose;
+ bool dumpIR;
+ bool dumpIRNodes;
+ bool dumpIRTypes;
+ bool dumpIRKinds;
+ bool dumpIRLocals;
+ bool dumpIRRegs;
+ bool dumpIRSsa;
+ bool dumpIRValnums;
+ bool dumpIRCosts;
+ bool dumpIRFlags;
+ bool dumpIRNoLists;
+ bool dumpIRNoLeafs;
+ bool dumpIRNoStmts;
+ bool dumpIRTrees;
+ bool dumpIRLinear;
+ bool dumpIRDataflow;
+ bool dumpIRBlockHeaders;
+ bool dumpIRExit;
LPCWSTR dumpIRPhase;
LPCWSTR dumpIRFormat;
- bool verboseTrees;
- bool shouldUseVerboseTrees();
- bool asciiTrees; // If true, dump trees using only ASCII characters
- bool shouldDumpASCIITrees();
- bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
- bool shouldUseVerboseSsa();
- bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
- int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
+ bool verboseTrees;
+ bool shouldUseVerboseTrees();
+ bool asciiTrees; // If true, dump trees using only ASCII characters
+ bool shouldDumpASCIITrees();
+ bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
+ bool shouldUseVerboseSsa();
+ bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
+ int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
- const char* VarNameToStr(VarName name) { return name; }
+ const char* VarNameToStr(VarName name)
+ {
+ return name;
+ }
DWORD expensiveDebugCheckLevel;
#endif
#if FEATURE_MULTIREG_RET
- GenTreePtr impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass);
+ GenTreePtr impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass);
#endif // FEATURE_MULTIREG_RET
#ifdef ARM_SOFTFP
- bool isSingleFloat32Struct(CORINFO_CLASS_HANDLE hClass);
+ bool isSingleFloat32Struct(CORINFO_CLASS_HANDLE hClass);
#endif // ARM_SOFTFP
//-------------------------------------------------------------------------
@@ -1436,31 +1474,32 @@ public:
// floating-point registers instead of the general purpose registers.
//
- bool IsHfa(CORINFO_CLASS_HANDLE hClass);
- bool IsHfa(GenTreePtr tree);
+ bool IsHfa(CORINFO_CLASS_HANDLE hClass);
+ bool IsHfa(GenTreePtr tree);
- var_types GetHfaType(GenTreePtr tree);
- unsigned GetHfaCount(GenTreePtr tree);
+ var_types GetHfaType(GenTreePtr tree);
+ unsigned GetHfaCount(GenTreePtr tree);
- var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
- unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
+ var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
+ unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
- bool IsMultiRegPassedType (CORINFO_CLASS_HANDLE hClass);
- bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass);
+ bool IsMultiRegPassedType(CORINFO_CLASS_HANDLE hClass);
+ bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
//
- struct EHNodeDsc;
+ struct EHNodeDsc;
typedef struct EHNodeDsc* pEHNodeDsc;
- EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
- EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
+ EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
+ EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
- struct EHNodeDsc
+ struct EHNodeDsc
{
- enum EHBlockType {
+ enum EHBlockType
+ {
TryNode,
FilterNode,
HandlerNode,
@@ -1468,30 +1507,61 @@ public:
FaultNode
};
- EHBlockType ehnBlockType; // kind of EH block
- IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
- IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to the last IL offset, not "one past the last one", i.e., the range Start to End is inclusive).
- pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
- pEHNodeDsc ehnChild; // leftmost nested block
+ EHBlockType ehnBlockType; // kind of EH block
+ IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
+ IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to
+ // the last IL offset, not "one past the last one", i.e., the range Start to End is
+ // inclusive).
+ pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
+ pEHNodeDsc ehnChild; // leftmost nested block
union {
- pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
- pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
+ pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
+ pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
};
- pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
- pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
+ pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
+ pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
+ inline void ehnSetTryNodeType()
+ {
+ ehnBlockType = TryNode;
+ }
+ inline void ehnSetFilterNodeType()
+ {
+ ehnBlockType = FilterNode;
+ }
+ inline void ehnSetHandlerNodeType()
+ {
+ ehnBlockType = HandlerNode;
+ }
+ inline void ehnSetFinallyNodeType()
+ {
+ ehnBlockType = FinallyNode;
+ }
+ inline void ehnSetFaultNodeType()
+ {
+ ehnBlockType = FaultNode;
+ }
- inline void ehnSetTryNodeType() {ehnBlockType = TryNode;}
- inline void ehnSetFilterNodeType() {ehnBlockType = FilterNode;}
- inline void ehnSetHandlerNodeType() {ehnBlockType = HandlerNode;}
- inline void ehnSetFinallyNodeType() {ehnBlockType = FinallyNode;}
- inline void ehnSetFaultNodeType() {ehnBlockType = FaultNode;}
-
- inline BOOL ehnIsTryBlock() {return ehnBlockType == TryNode;}
- inline BOOL ehnIsFilterBlock() {return ehnBlockType == FilterNode;}
- inline BOOL ehnIsHandlerBlock() {return ehnBlockType == HandlerNode;}
- inline BOOL ehnIsFinallyBlock() {return ehnBlockType == FinallyNode;}
- inline BOOL ehnIsFaultBlock() {return ehnBlockType == FaultNode;}
+ inline BOOL ehnIsTryBlock()
+ {
+ return ehnBlockType == TryNode;
+ }
+ inline BOOL ehnIsFilterBlock()
+ {
+ return ehnBlockType == FilterNode;
+ }
+ inline BOOL ehnIsHandlerBlock()
+ {
+ return ehnBlockType == HandlerNode;
+ }
+ inline BOOL ehnIsFinallyBlock()
+ {
+ return ehnBlockType == FinallyNode;
+ }
+ inline BOOL ehnIsFaultBlock()
+ {
+ return ehnBlockType == FaultNode;
+ }
// returns true if there is any overlap between the two nodes
static inline BOOL ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2)
@@ -1509,104 +1579,107 @@ public:
// fails with BADCODE if inner is not completely nested inside outer
static inline BOOL ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer)
{
- return ((inner->ehnStartOffset >= outer->ehnStartOffset) &&
- (inner->ehnEndOffset <= outer->ehnEndOffset));
+ return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset));
}
-
};
-
- //-------------------------------------------------------------------------
- // Exception handling functions
- //
+//-------------------------------------------------------------------------
+// Exception handling functions
+//
#if !FEATURE_EH_FUNCLETS
- bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); }
+ bool ehNeedsShadowSPslots()
+ {
+ return (info.compXcptnsCount || opts.compDbgEnC);
+ }
// 0 for methods with no EH
// 1 for methods with non-nested EH, or where only the try blocks are nested
// 2 for a method with a catch within a catch
// etc.
- unsigned ehMaxHndNestingCount;
+ unsigned ehMaxHndNestingCount;
#endif // !FEATURE_EH_FUNCLETS
- static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
- static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
+ static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
+ static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
+
+ bool bbInCatchHandlerILRange(BasicBlock* blk);
+ bool bbInFilterILRange(BasicBlock* blk);
+ bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk);
+ bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk);
+ bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk);
+ bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk);
+ unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo);
- bool bbInCatchHandlerILRange (BasicBlock * blk);
- bool bbInFilterILRange (BasicBlock * blk);
- bool bbInTryRegions (unsigned regionIndex, BasicBlock * blk);
- bool bbInExnFlowRegions (unsigned regionIndex, BasicBlock * blk);
- bool bbInHandlerRegions (unsigned regionIndex, BasicBlock * blk);
- bool bbInCatchHandlerRegions (BasicBlock * tryBlk, BasicBlock * hndBlk);
- unsigned short bbFindInnermostCommonTryRegion (BasicBlock * bbOne, BasicBlock * bbTwo);
-
- unsigned short bbFindInnermostTryRegionContainingHandlerRegion (unsigned handlerIndex);
- unsigned short bbFindInnermostHandlerRegionContainingTryRegion (unsigned tryIndex);
+ unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex);
+ unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex);
// Returns true if "block" is the start of a try region.
- bool bbIsTryBeg(BasicBlock* block);
+ bool bbIsTryBeg(BasicBlock* block);
// Returns true if "block" is the start of a handler or filter region.
- bool bbIsHandlerBeg(BasicBlock* block);
+ bool bbIsHandlerBeg(BasicBlock* block);
// Returns true iff "block" is where control flows if an exception is raised in the
// try region, and sets "*regionIndex" to the index of the try for the handler.
// Differs from "IsHandlerBeg" in the case of filters, where this is true for the first
// block of the filter, but not for the filter's handler.
- bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
+ bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
- bool ehHasCallableHandlers();
+ bool ehHasCallableHandlers();
// Return the EH descriptor for the given region index.
- EHblkDsc* ehGetDsc(unsigned regionIndex);
+ EHblkDsc* ehGetDsc(unsigned regionIndex);
// Return the EH index given a region descriptor.
- unsigned ehGetIndex(EHblkDsc* ehDsc);
+ unsigned ehGetIndex(EHblkDsc* ehDsc);
// Return the EH descriptor index of the enclosing try, for the given region index.
- unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
+ unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
// Return the EH descriptor index of the enclosing handler, for the given region index.
- unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
+ unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
- // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this block is not in a 'try' region).
- EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
+ // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this
+ // block is not in a 'try' region).
+ EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
- // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr if this block is not in a filter or handler region).
- EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
+ // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr
+ // if this block is not in a filter or handler region).
+ EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
- // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or nullptr if this block's exceptions propagate to caller).
- EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
+ // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or
+ // nullptr if this block's exceptions propagate to caller).
+ EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
- EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
- EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
- bool ehIsBlockEHLast(BasicBlock* block);
+ EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
+ EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
+ bool ehIsBlockEHLast(BasicBlock* block);
- bool ehBlockHasExnFlowDsc(BasicBlock* block);
+ bool ehBlockHasExnFlowDsc(BasicBlock* block);
// Return the region index of the most nested EH region this block is in.
- unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
+ unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
// Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check.
- unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
+ unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
// Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX
// if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion'
// is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler.
// (It can never be a filter.)
- unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
+ unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
// A block has been deleted. Update the EH table appropriately.
- void ehUpdateForDeletedBlock(BasicBlock* block);
+ void ehUpdateForDeletedBlock(BasicBlock* block);
// Determine whether a block can be deleted while preserving the EH normalization rules.
- bool ehCanDeleteEmptyBlock(BasicBlock* block);
+ bool ehCanDeleteEmptyBlock(BasicBlock* block);
// Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region.
- void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
+ void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
// For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler,
// or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index
@@ -1615,18 +1688,18 @@ public:
// body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the
// BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never
// lives in a filter.)
- unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
+ unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
// Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's
// handler. Set begBlk to the first block, and endBlk to the block after the last block of the range
// (nullptr if the last block is the last block in the program).
// Precondition: 'finallyIndex' is the EH region of a try/finally clause.
- void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
+ void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
#ifdef DEBUG
// Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return
// 'true' if the BBJ_CALLFINALLY is in the correct EH region.
- bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
+ bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
#endif // DEBUG
#if FEATURE_EH_FUNCLETS
@@ -1636,408 +1709,347 @@ public:
// genFuncletProlog() for more details. However, the VM seems to use it for more
// purposes, maybe including debugging. Until we are sure otherwise, always create
// a PSPSym for functions with any EH.
- bool ehNeedsPSPSym() const { return compHndBBtabCount > 0; }
+ bool ehNeedsPSPSym() const
+ {
+ return compHndBBtabCount > 0;
+ }
- bool ehAnyFunclets(); // Are there any funclets in this function?
- unsigned ehFuncletCount(); // Return the count of funclets in the function
+ bool ehAnyFunclets(); // Are there any funclets in this function?
+ unsigned ehFuncletCount(); // Return the count of funclets in the function
- unsigned bbThrowIndex(BasicBlock * blk); // Get the index to use as the cache key for sharing throw blocks
-#else // !FEATURE_EH_FUNCLETS
- bool ehAnyFunclets() { return false; }
- unsigned ehFuncletCount() { return 0; }
+ unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks
+#else // !FEATURE_EH_FUNCLETS
+ bool ehAnyFunclets()
+ {
+ return false;
+ }
+ unsigned ehFuncletCount()
+ {
+ return 0;
+ }
- unsigned bbThrowIndex(BasicBlock * blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks
-#endif // !FEATURE_EH_FUNCLETS
+ unsigned bbThrowIndex(BasicBlock* blk)
+ {
+ return blk->bbTryIndex;
+ } // Get the index to use as the cache key for sharing throw blocks
+#endif // !FEATURE_EH_FUNCLETS
// Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
- flowList* BlockPredsWithEH(BasicBlock* blk);
+ flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
- typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, flowList*, JitSimplerHashBehavior> BlockToFlowListMap;
+ typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, flowList*, JitSimplerHashBehavior>
+ BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
- if (m_blockToEHPreds == NULL)
+ if (m_blockToEHPreds == nullptr)
{
m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator());
}
return m_blockToEHPreds;
}
- void* ehEmitCookie(BasicBlock* block);
- UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
+ void* ehEmitCookie(BasicBlock* block);
+ UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
- EHblkDsc* ehInitHndRange(BasicBlock* src,
- IL_OFFSET* hndBeg,
- IL_OFFSET* hndEnd,
- bool* inFilter);
+ EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter);
- EHblkDsc* ehInitTryRange(BasicBlock* src,
- IL_OFFSET* tryBeg,
- IL_OFFSET* tryEnd);
+ EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd);
- EHblkDsc* ehInitHndBlockRange(BasicBlock* blk,
- BasicBlock** hndBeg,
- BasicBlock** hndLast,
- bool* inFilter);
+ EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter);
- EHblkDsc* ehInitTryBlockRange(BasicBlock* blk,
- BasicBlock** tryBeg,
- BasicBlock** tryLast);
+ EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
- void fgSetTryEnd (EHblkDsc* handlerTab,
- BasicBlock* newTryLast);
+ void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
- void fgSetHndEnd (EHblkDsc* handlerTab,
- BasicBlock* newHndLast);
+ void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
- void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
+ void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
- void fgAllocEHTable();
+ void fgAllocEHTable();
- void fgRemoveEHTableEntry(unsigned XTnum);
+ void fgRemoveEHTableEntry(unsigned XTnum);
#if FEATURE_EH_FUNCLETS
- EHblkDsc * fgAddEHTableEntry (unsigned XTnum);
+ EHblkDsc* fgAddEHTableEntry(unsigned XTnum);
#endif // FEATURE_EH_FUNCLETS
#if !FEATURE_EH
- void fgRemoveEH();
+ void fgRemoveEH();
#endif // !FEATURE_EH
- void fgSortEHTable();
+ void fgSortEHTable();
// Causes the EH table to obey some well-formedness conditions, by inserting
// empty BB's when necessary:
// * No block is both the first block of a handler and the first block of a try.
// * No block is the first block of multiple 'try' regions.
// * No block is the last block of multiple EH regions.
- void fgNormalizeEH();
- bool fgNormalizeEHCase1();
- bool fgNormalizeEHCase2();
- bool fgNormalizeEHCase3();
+ void fgNormalizeEH();
+ bool fgNormalizeEHCase1();
+ bool fgNormalizeEHCase2();
+ bool fgNormalizeEHCase3();
#ifdef DEBUG
- void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
- void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
- void fgVerifyHandlerTab();
- void fgDispHandlerTab ();
+ void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
+ void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
+ void fgVerifyHandlerTab();
+ void fgDispHandlerTab();
#endif // DEBUG
- bool fgNeedToSortEHTable;
+ bool fgNeedToSortEHTable;
- void verInitEHTree (unsigned numEHClauses);
- void verInsertEhNode (CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
- void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
- void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
- void verCheckNestingLevel(EHNodeDsc* initRoot);
-
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX GenTree and BasicBlock XX
-XX XX
-XX Functions to allocate and display the GenTrees and BasicBlocks XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ void verInitEHTree(unsigned numEHClauses);
+ void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
+ void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
+ void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
+ void verCheckNestingLevel(EHNodeDsc* initRoot);
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX GenTree and BasicBlock XX
+ XX XX
+ XX Functions to allocate and display the GenTrees and BasicBlocks XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
// Functions to create nodes
- GenTreeStmt* gtNewStmt (GenTreePtr expr = NULL,
- IL_OFFSETX offset = BAD_IL_OFFSET);
+ GenTreeStmt* gtNewStmt(GenTreePtr expr = nullptr, IL_OFFSETX offset = BAD_IL_OFFSET);
// For unary opers.
- GenTreePtr gtNewOperNode (genTreeOps oper,
- var_types type,
- GenTreePtr op1,
- bool doSimplifications = TRUE);
+ GenTreePtr gtNewOperNode(genTreeOps oper, var_types type, GenTreePtr op1, bool doSimplifications = TRUE);
// For binary opers.
- GenTreePtr gtNewOperNode (genTreeOps oper,
- var_types type,
- GenTreePtr op1,
- GenTreePtr op2);
+ GenTreePtr gtNewOperNode(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2);
+
+ GenTreePtr gtNewQmarkNode(var_types type, GenTreePtr cond, GenTreePtr colon);
- GenTreePtr gtNewQmarkNode (var_types type,
- GenTreePtr cond,
- GenTreePtr colon);
+ GenTreePtr gtNewLargeOperNode(genTreeOps oper,
+ var_types type = TYP_I_IMPL,
+ GenTreePtr op1 = nullptr,
+ GenTreePtr op2 = nullptr);
- GenTreePtr gtNewLargeOperNode(genTreeOps oper,
- var_types type = TYP_I_IMPL,
- GenTreePtr op1 = NULL,
- GenTreePtr op2 = NULL);
+ GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
- GenTreeIntCon* gtNewIconNode (ssize_t value,
- var_types type = TYP_INT);
+ GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
- GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
+ GenTree* gtNewPhysRegNode(regNumber reg, GenTree* src);
- GenTree* gtNewPhysRegNode(regNumber reg, GenTree* src);
+ GenTreePtr gtNewJmpTableNode();
+ GenTreePtr gtNewIconHandleNode(
+ size_t value, unsigned flags, FieldSeqNode* fields = nullptr, unsigned handle1 = 0, void* handle2 = nullptr);
- GenTreePtr gtNewJmpTableNode();
- GenTreePtr gtNewIconHandleNode(size_t value,
- unsigned flags,
- FieldSeqNode* fields = NULL,
- unsigned handle1 = 0,
- void * handle2 = 0);
+ unsigned gtTokenToIconFlags(unsigned token);
- unsigned gtTokenToIconFlags(unsigned token);
+ GenTreePtr gtNewIconEmbHndNode(void* value,
+ void* pValue,
+ unsigned flags,
+ unsigned handle1 = 0,
+ void* handle2 = nullptr,
+ void* compileTimeHandle = nullptr);
- GenTreePtr gtNewIconEmbHndNode(void * value,
- void * pValue,
- unsigned flags,
- unsigned handle1 = 0,
- void * handle2 = 0,
- void * compileTimeHandle = 0);
+ GenTreePtr gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd, unsigned hnd1 = 0, void* hnd2 = nullptr);
+ GenTreePtr gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd, unsigned hnd1 = 0, void* hnd2 = nullptr);
+ GenTreePtr gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd, unsigned hnd1 = 0, void* hnd2 = nullptr);
+ GenTreePtr gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd, unsigned hnd1 = 0, void* hnd2 = nullptr);
- GenTreePtr gtNewIconEmbScpHndNode (CORINFO_MODULE_HANDLE scpHnd, unsigned hnd1 = 0, void * hnd2 = 0);
- GenTreePtr gtNewIconEmbClsHndNode (CORINFO_CLASS_HANDLE clsHnd, unsigned hnd1 = 0, void * hnd2 = 0);
- GenTreePtr gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd, unsigned hnd1 = 0, void * hnd2 = 0);
- GenTreePtr gtNewIconEmbFldHndNode (CORINFO_FIELD_HANDLE fldHnd, unsigned hnd1 = 0, void * hnd2 = 0);
+ GenTreePtr gtNewStringLiteralNode(InfoAccessType iat, void* pValue);
- GenTreePtr gtNewStringLiteralNode(InfoAccessType iat, void * pValue);
+ GenTreePtr gtNewLconNode(__int64 value);
- GenTreePtr gtNewLconNode (__int64 value);
+ GenTreePtr gtNewDconNode(double value);
- GenTreePtr gtNewDconNode (double value);
+ GenTreePtr gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle);
- GenTreePtr gtNewSconNode (int CPX,
- CORINFO_MODULE_HANDLE scpHandle);
+ GenTreePtr gtNewZeroConNode(var_types type);
- GenTreePtr gtNewZeroConNode(var_types type);
+ GenTreePtr gtNewOneConNode(var_types type);
- GenTreePtr gtNewOneConNode(var_types type);
+ GenTreeBlkOp* gtNewBlkOpNode(
+ genTreeOps oper, GenTreePtr dst, GenTreePtr srcOrFillVal, GenTreePtr sizeOrClsTok, bool volatil);
- GenTreeBlkOp* gtNewBlkOpNode (genTreeOps oper, GenTreePtr dst,
- GenTreePtr srcOrFillVal, GenTreePtr sizeOrClsTok,
- bool volatil);
protected:
- void gtBlockOpInit (GenTreePtr node, genTreeOps oper,
- GenTreePtr dst,
- GenTreePtr src, GenTreePtr size,
- bool volatil);
+ void gtBlockOpInit(GenTreePtr node, genTreeOps oper, GenTreePtr dst, GenTreePtr src, GenTreePtr size, bool volatil);
+
public:
- GenTreeObj* gtNewObjNode (CORINFO_CLASS_HANDLE structHnd, GenTreePtr addr);
+ GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTreePtr addr);
- GenTreeBlkOp* gtNewCpObjNode (GenTreePtr dst, GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd, bool volatil);
+ GenTreeBlkOp* gtNewCpObjNode(GenTreePtr dst, GenTreePtr src, CORINFO_CLASS_HANDLE structHnd, bool volatil);
- GenTreeBlkOp* gtCloneCpObjNode(GenTreeCpObj* source);
+ GenTreeBlkOp* gtCloneCpObjNode(GenTreeCpObj* source);
- GenTreeArgList* gtNewListNode(GenTreePtr op1,
- GenTreeArgList* op2);
+ GenTreeArgList* gtNewListNode(GenTreePtr op1, GenTreeArgList* op2);
- GenTreeCall* gtNewCallNode (gtCallTypes callType,
- CORINFO_METHOD_HANDLE handle,
- var_types type,
- GenTreeArgList* args,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET);
+ GenTreeCall* gtNewCallNode(gtCallTypes callType,
+ CORINFO_METHOD_HANDLE handle,
+ var_types type,
+ GenTreeArgList* args,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET);
- GenTreeCall* gtNewIndCallNode (GenTreePtr addr,
- var_types type,
- GenTreeArgList* args,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET);
+ GenTreeCall* gtNewIndCallNode(GenTreePtr addr,
+ var_types type,
+ GenTreeArgList* args,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET);
- GenTreeCall* gtNewHelperCallNode(unsigned helper,
- var_types type,
- unsigned flags = 0,
- GenTreeArgList* args = NULL);
+ GenTreeCall* gtNewHelperCallNode(unsigned helper,
+ var_types type,
+ unsigned flags = 0,
+ GenTreeArgList* args = nullptr);
- GenTreePtr gtNewLclvNode (unsigned lnum,
- var_types type,
- IL_OFFSETX ILoffs = BAD_IL_OFFSET);
+ GenTreePtr gtNewLclvNode(unsigned lnum, var_types type, IL_OFFSETX ILoffs = BAD_IL_OFFSET);
#ifdef FEATURE_SIMD
- GenTreeSIMD* gtNewSIMDNode (var_types type,
- GenTreePtr op1,
- SIMDIntrinsicID simdIntrinsicID,
- var_types baseType,
- unsigned size);
- GenTreeSIMD* gtNewSIMDNode (var_types type,
- GenTreePtr op1,
- GenTreePtr op2,
- SIMDIntrinsicID simdIntrinsicID,
- var_types baseType,
- unsigned size);
+ GenTreeSIMD* gtNewSIMDNode(
+ var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size);
+ GenTreeSIMD* gtNewSIMDNode(var_types type,
+ GenTreePtr op1,
+ GenTreePtr op2,
+ SIMDIntrinsicID simdIntrinsicID,
+ var_types baseType,
+ unsigned size);
#endif
- GenTreePtr gtNewLclLNode (unsigned lnum,
- var_types type,
- IL_OFFSETX ILoffs = BAD_IL_OFFSET);
- GenTreeLclFld* gtNewLclFldNode (unsigned lnum,
- var_types type,
- unsigned offset);
- GenTreePtr gtNewInlineCandidateReturnExpr(GenTreePtr inlineCandidate,
- var_types type);
-
- GenTreePtr gtNewCodeRef (BasicBlock * block);
+ GenTreePtr gtNewLclLNode(unsigned lnum, var_types type, IL_OFFSETX ILoffs = BAD_IL_OFFSET);
+ GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset);
+ GenTreePtr gtNewInlineCandidateReturnExpr(GenTreePtr inlineCandidate, var_types type);
- GenTreePtr gtNewFieldRef (var_types typ,
- CORINFO_FIELD_HANDLE fldHnd,
- GenTreePtr obj = NULL,
- DWORD offset = 0,
- bool nullcheck = false);
+ GenTreePtr gtNewCodeRef(BasicBlock* block);
- GenTreePtr gtNewIndexRef (var_types typ,
- GenTreePtr arrayOp,
- GenTreePtr indexOp);
+ GenTreePtr gtNewFieldRef(
+ var_types typ, CORINFO_FIELD_HANDLE fldHnd, GenTreePtr obj = nullptr, DWORD offset = 0, bool nullcheck = false);
- GenTreeArgList* gtNewArgList (GenTreePtr op);
+ GenTreePtr gtNewIndexRef(var_types typ, GenTreePtr arrayOp, GenTreePtr indexOp);
- GenTreeArgList* gtNewArgList (GenTreePtr op1,
- GenTreePtr op2);
+ GenTreeArgList* gtNewArgList(GenTreePtr op);
- static fgArgTabEntryPtr gtArgEntryByArgNum(GenTreePtr call, unsigned argNum);
- static fgArgTabEntryPtr gtArgEntryByNode (GenTreePtr call, GenTreePtr node);
- fgArgTabEntryPtr gtArgEntryByLateArgIndex(GenTreePtr call, unsigned lateArgInx);
- bool gtArgIsThisPtr (fgArgTabEntryPtr argEntry);
+ GenTreeArgList* gtNewArgList(GenTreePtr op1, GenTreePtr op2);
- GenTreePtr gtNewAssignNode (GenTreePtr dst,
- GenTreePtr src);
+ static fgArgTabEntryPtr gtArgEntryByArgNum(GenTreePtr call, unsigned argNum);
+ static fgArgTabEntryPtr gtArgEntryByNode(GenTreePtr call, GenTreePtr node);
+ fgArgTabEntryPtr gtArgEntryByLateArgIndex(GenTreePtr call, unsigned lateArgInx);
+ bool gtArgIsThisPtr(fgArgTabEntryPtr argEntry);
- GenTreePtr gtNewTempAssign (unsigned tmp,
- GenTreePtr val);
+ GenTreePtr gtNewAssignNode(GenTreePtr dst, GenTreePtr src);
- GenTreePtr gtNewRefCOMfield(GenTreePtr objPtr,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS access,
- CORINFO_FIELD_INFO * pFieldInfo,
- var_types lclTyp,
- CORINFO_CLASS_HANDLE structType,
- GenTreePtr assg);
+ GenTreePtr gtNewTempAssign(unsigned tmp, GenTreePtr val);
- GenTreePtr gtNewNothingNode();
+ GenTreePtr gtNewRefCOMfield(GenTreePtr objPtr,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS access,
+ CORINFO_FIELD_INFO* pFieldInfo,
+ var_types lclTyp,
+ CORINFO_CLASS_HANDLE structType,
+ GenTreePtr assg);
- GenTreePtr gtNewArgPlaceHolderNode(var_types type,
- CORINFO_CLASS_HANDLE clsHnd);
+ GenTreePtr gtNewNothingNode();
+ GenTreePtr gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd);
- GenTreePtr gtUnusedValNode (GenTreePtr expr);
+ GenTreePtr gtUnusedValNode(GenTreePtr expr);
- GenTreePtr gtNewCastNode (var_types typ,
- GenTreePtr op1,
- var_types castType);
+ GenTreePtr gtNewCastNode(var_types typ, GenTreePtr op1, var_types castType);
- GenTreePtr gtNewCastNodeL (var_types typ,
- GenTreePtr op1,
- var_types castType);
+ GenTreePtr gtNewCastNodeL(var_types typ, GenTreePtr op1, var_types castType);
- GenTreePtr gtNewAllocObjNode(unsigned int helper,
- CORINFO_CLASS_HANDLE clsHnd,
- var_types type,
- GenTreePtr op1);
+ GenTreePtr gtNewAllocObjNode(unsigned int helper, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTreePtr op1);
- //------------------------------------------------------------------------
- // Other GenTree functions
+ //------------------------------------------------------------------------
+ // Other GenTree functions
- GenTreePtr gtClone (GenTree * tree,
- bool complexOK = false);
+ GenTreePtr gtClone(GenTree* tree, bool complexOK = false);
- GenTreePtr gtCloneExpr (GenTree * tree,
- unsigned addFlags = 0,
- unsigned varNum = (unsigned)-1,
- int varVal = 0);
+ GenTreePtr gtCloneExpr(GenTree* tree, unsigned addFlags = 0, unsigned varNum = (unsigned)-1, int varVal = 0);
- GenTreePtr gtReplaceTree (GenTreePtr stmt,
- GenTreePtr tree,
- GenTreePtr replacementTree);
+ GenTreePtr gtReplaceTree(GenTreePtr stmt, GenTreePtr tree, GenTreePtr replacementTree);
- void gtUpdateSideEffects(GenTreePtr tree,
- unsigned oldGtFlags,
- unsigned newGtFlags);
+ void gtUpdateSideEffects(GenTreePtr tree, unsigned oldGtFlags, unsigned newGtFlags);
// Returns "true" iff the complexity (not formally defined, but first interpretation
// is #of nodes in subtree) of "tree" is greater than "limit".
// (This is somewhat redundant with the "gtCostEx/gtCostSz" fields, but can be used
// before they have been set.)
- bool gtComplexityExceeds(GenTreePtr* tree, unsigned limit);
+ bool gtComplexityExceeds(GenTreePtr* tree, unsigned limit);
- bool gtCompareTree (GenTree * op1,
- GenTree * op2);
+ bool gtCompareTree(GenTree* op1, GenTree* op2);
- GenTreePtr gtReverseCond (GenTree * tree);
+ GenTreePtr gtReverseCond(GenTree* tree);
- bool gtHasRef (GenTree * tree,
- ssize_t lclNum,
- bool defOnly);
+ bool gtHasRef(GenTree* tree, ssize_t lclNum, bool defOnly);
- bool gtHasLocalsWithAddrOp (GenTreePtr tree);
+ bool gtHasLocalsWithAddrOp(GenTreePtr tree);
- unsigned gtHashValue (GenTree * tree);
+ unsigned gtHashValue(GenTree* tree);
- unsigned gtSetListOrder (GenTree * list,
- bool regs);
+ unsigned gtSetListOrder(GenTree* list, bool regs);
- void gtWalkOp (GenTree * * op1,
- GenTree * * op2,
- GenTree * adr,
- bool constOnly);
+ void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* adr, bool constOnly);
#ifdef DEBUG
- GenTreePtr gtWalkOpEffectiveVal(GenTreePtr op);
+ GenTreePtr gtWalkOpEffectiveVal(GenTreePtr op);
#endif
- void gtPrepareCost (GenTree* tree);
- bool gtIsLikelyRegVar(GenTree* tree);
+ void gtPrepareCost(GenTree* tree);
+ bool gtIsLikelyRegVar(GenTree* tree);
- unsigned gtSetEvalOrderAndRestoreFPstkLevel(GenTree * tree);
+ unsigned gtSetEvalOrderAndRestoreFPstkLevel(GenTree* tree);
// Returns true iff the secondNode can be swapped with firstNode.
- bool gtCanSwapOrder (GenTree* firstNode,
- GenTree* secondNode);
+ bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode);
- unsigned gtSetEvalOrder (GenTree* tree);
+ unsigned gtSetEvalOrder(GenTree* tree);
#if FEATURE_STACK_FP_X87
- bool gtFPstLvlRedo;
- void gtComputeFPlvls (GenTreePtr tree);
+ bool gtFPstLvlRedo;
+ void gtComputeFPlvls(GenTreePtr tree);
#endif // FEATURE_STACK_FP_X87
- void gtSetStmtInfo (GenTree * stmt);
+ void gtSetStmtInfo(GenTree* stmt);
// Returns "true" iff "node" has any of the side effects in "flags".
- bool gtNodeHasSideEffects(GenTreePtr node,
- unsigned flags);
+ bool gtNodeHasSideEffects(GenTreePtr node, unsigned flags);
// Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags".
- bool gtTreeHasSideEffects(GenTreePtr tree,
- unsigned flags);
+ bool gtTreeHasSideEffects(GenTreePtr tree, unsigned flags);
- // Appends 'expr' in front of 'list'
+ // Appends 'expr' in front of 'list'
// 'list' will typically start off as 'nullptr'
// when 'list' is non-null a GT_COMMA node is used to insert 'expr'
- GenTreePtr gtBuildCommaList( GenTreePtr list,
- GenTreePtr expr);
+ GenTreePtr gtBuildCommaList(GenTreePtr list, GenTreePtr expr);
- void gtExtractSideEffList(GenTreePtr expr,
- GenTreePtr * pList,
- unsigned flags = GTF_SIDE_EFFECT,
- bool ignoreRoot = false);
+ void gtExtractSideEffList(GenTreePtr expr,
+ GenTreePtr* pList,
+ unsigned flags = GTF_SIDE_EFFECT,
+ bool ignoreRoot = false);
- GenTreePtr gtGetThisArg(GenTreePtr call);
+ GenTreePtr gtGetThisArg(GenTreePtr call);
- // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
+ // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
// static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but
// complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing
// the given "fldHnd", is such an object pointer.
- bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
+ bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
// Return true if call is a recursive call; return false otherwise.
- bool gtIsRecursiveCall(GenTreeCall * call) { return (call->gtCallMethHnd == info.compMethodHnd); }
+ bool gtIsRecursiveCall(GenTreeCall* call)
+ {
+ return (call->gtCallMethHnd == info.compMethodHnd);
+ }
//-------------------------------------------------------------------------
- GenTreePtr gtFoldExpr (GenTreePtr tree);
- GenTreePtr
+ GenTreePtr gtFoldExpr(GenTreePtr tree);
+ GenTreePtr
#ifdef __clang__
// TODO-Amd64-Unix: Remove this when the clang optimizer is fixed and/or the method implementation is
// refactored in a simpler code. This is a workaround for a bug in the clang-3.5 optimizer. The issue is that in
@@ -2046,104 +2058,96 @@ public:
// the implementation of the method in gentree.cpp. For the case of lval1 and lval2 equal to MIN_LONG
// (0x8000000000000000) this results in raising a SIGFPE. The method implementation is rather complex. Disable
// optimizations for now.
- __attribute__((optnone))
+ __attribute__((optnone))
#endif // __clang__
- gtFoldExprConst(GenTreePtr tree);
- GenTreePtr gtFoldExprSpecial(GenTreePtr tree);
- GenTreePtr gtFoldExprCompare(GenTreePtr tree);
+ gtFoldExprConst(GenTreePtr tree);
+ GenTreePtr gtFoldExprSpecial(GenTreePtr tree);
+ GenTreePtr gtFoldExprCompare(GenTreePtr tree);
//-------------------------------------------------------------------------
// Get the handle, if any.
- CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent (GenTreePtr tree);
+ CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTreePtr tree);
// Get the handle, and assert if not found.
- CORINFO_CLASS_HANDLE gtGetStructHandle (GenTreePtr tree);
+ CORINFO_CLASS_HANDLE gtGetStructHandle(GenTreePtr tree);
- //-------------------------------------------------------------------------
- // Functions to display the trees
+//-------------------------------------------------------------------------
+// Functions to display the trees
#ifdef DEBUG
- void gtDispNode (GenTreePtr tree,
- IndentStack* indentStack,
- __in_z const char* msg);
-
- void gtDispVN (GenTreePtr tree);
- void gtDispConst (GenTreePtr tree);
- void gtDispLeaf (GenTreePtr tree,
- IndentStack* indentStack);
- void gtDispNodeName (GenTreePtr tree);
- void gtDispRegVal (GenTreePtr tree);
-
- enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount };
- void gtDispChild (GenTreePtr child,
- IndentStack* indentStack,
- IndentInfo arcType,
- __in_opt const char * msg = nullptr,
- bool topOnly = false);
- void gtDispTree (GenTreePtr tree,
- IndentStack* indentStack = nullptr,
- __in_opt const char* msg = nullptr,
- bool topOnly = false);
- void gtGetLclVarNameInfo(unsigned lclNum,
- const char** ilKindOut,
- const char** ilNameOut,
- unsigned* ilNumOut);
- int gtGetLclVarName (unsigned lclNum,
- char* buf,
- unsigned buf_remaining);
- char* gtGetLclVarName (unsigned lclNum);
- void gtDispLclVar (unsigned varNum,
- bool padForBiggestDisp = true);
- void gtDispTreeList (GenTreePtr tree,
- IndentStack* indentStack = nullptr);
- void gtGetArgMsg (GenTreePtr call,
- GenTreePtr arg,
- unsigned argNum,
- int listCount,
- char* bufp,
- unsigned bufLength);
- void gtGetLateArgMsg (GenTreePtr call,
- GenTreePtr arg,
- int argNum,
- int listCount,
- char* bufp,
- unsigned bufLength);
- void gtDispArgList (GenTreePtr tree,
- IndentStack* indentStack);
- void gtDispFieldSeq (FieldSeqNode* pfsn);
-
- GenTreePtr gtDispLinearTree(GenTreeStmt* curStmt,
- GenTreePtr nextLinearNode,
- GenTreePtr tree,
- IndentStack* indentStack,
- __in_opt const char* msg = nullptr);
- GenTreePtr gtDispLinearStmt(GenTreeStmt* stmt,
- IndentStack* indentStack = nullptr);
+ void gtDispNode(GenTreePtr tree, IndentStack* indentStack, __in_z const char* msg);
+
+ void gtDispVN(GenTreePtr tree);
+ void gtDispConst(GenTreePtr tree);
+ void gtDispLeaf(GenTreePtr tree, IndentStack* indentStack);
+ void gtDispNodeName(GenTreePtr tree);
+ void gtDispRegVal(GenTreePtr tree);
+
+ enum IndentInfo
+ {
+ IINone,
+ IIArc,
+ IIArcTop,
+ IIArcBottom,
+ IIEmbedded,
+ IIError,
+ IndentInfoCount
+ };
+ void gtDispChild(GenTreePtr child,
+ IndentStack* indentStack,
+ IndentInfo arcType,
+ __in_opt const char* msg = nullptr,
+ bool topOnly = false);
+ void gtDispTree(GenTreePtr tree,
+ IndentStack* indentStack = nullptr,
+ __in_opt const char* msg = nullptr,
+ bool topOnly = false);
+ void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
+ int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
+ char* gtGetLclVarName(unsigned lclNum);
+ void gtDispLclVar(unsigned varNum, bool padForBiggestDisp = true);
+ void gtDispTreeList(GenTreePtr tree, IndentStack* indentStack = nullptr);
+ void gtGetArgMsg(GenTreePtr call, GenTreePtr arg, unsigned argNum, int listCount, char* bufp, unsigned bufLength);
+ void gtGetLateArgMsg(GenTreePtr call, GenTreePtr arg, int argNum, int listCount, char* bufp, unsigned bufLength);
+ void gtDispArgList(GenTreePtr tree, IndentStack* indentStack);
+ void gtDispFieldSeq(FieldSeqNode* pfsn);
+
+ GenTreePtr gtDispLinearTree(GenTreeStmt* curStmt,
+ GenTreePtr nextLinearNode,
+ GenTreePtr tree,
+ IndentStack* indentStack,
+ __in_opt const char* msg = nullptr);
+ GenTreePtr gtDispLinearStmt(GenTreeStmt* stmt, IndentStack* indentStack = nullptr);
#endif
// For tree walks
- enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT };
- struct fgWalkData;
- typedef fgWalkResult (fgWalkPreFn )(GenTreePtr * pTree, fgWalkData *data);
- typedef fgWalkResult (fgWalkPostFn)(GenTreePtr * pTree, fgWalkData *data);
+ enum fgWalkResult
+ {
+ WALK_CONTINUE,
+ WALK_SKIP_SUBTREES,
+ WALK_ABORT
+ };
+ struct fgWalkData;
+ typedef fgWalkResult(fgWalkPreFn)(GenTreePtr* pTree, fgWalkData* data);
+ typedef fgWalkResult(fgWalkPostFn)(GenTreePtr* pTree, fgWalkData* data);
#ifdef DEBUG
- static fgWalkPreFn gtAssertColonCond;
+ static fgWalkPreFn gtAssertColonCond;
#endif
- static fgWalkPreFn gtMarkColonCond;
- static fgWalkPreFn gtClearColonCond;
+ static fgWalkPreFn gtMarkColonCond;
+ static fgWalkPreFn gtClearColonCond;
- GenTreePtr * gtFindLink(GenTreePtr stmt, GenTreePtr node);
- bool gtHasCatchArg(GenTreePtr tree);
- bool gtHasUnmanagedCall(GenTreePtr tree);
+ GenTreePtr* gtFindLink(GenTreePtr stmt, GenTreePtr node);
+ bool gtHasCatchArg(GenTreePtr tree);
+ bool gtHasUnmanagedCall(GenTreePtr tree);
- typedef ArrayStack<GenTree*> GenTreeStack;
+ typedef ArrayStack<GenTree*> GenTreeStack;
- static bool gtHasCallOnStack(GenTreeStack *parentStack);
- void gtCheckQuirkAddrExposedLclVar(GenTreePtr argTree, GenTreeStack* parentStack);
+ static bool gtHasCallOnStack(GenTreeStack* parentStack);
+ void gtCheckQuirkAddrExposedLclVar(GenTreePtr argTree, GenTreeStack* parentStack);
- //=========================================================================
- // BasicBlock functions
+//=========================================================================
+// BasicBlock functions
#ifdef DEBUG
// This is a debug flag we will use to assert when creating block during codegen
// as this interferes with procedure splitting. If you know what you're doing, set
@@ -2151,41 +2155,41 @@ public:
bool fgSafeBasicBlockCreation;
#endif
- BasicBlock * bbNewBasicBlock (BBjumpKinds jumpKind);
+ BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind);
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX LclVarsInfo XX
-XX XX
-XX The variables to be used by the code generator. XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX LclVarsInfo XX
+ XX XX
+ XX The variables to be used by the code generator. XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
//
// For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will
// be placed in the stack frame and it's fields must be laid out sequentially.
- //
+ //
// For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by
// a local variable that can be enregistered or placed in the stack frame.
// The fields do not need to be laid out sequentially
- //
- enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted
- PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
- // and its field locals are independent of its parent struct local.
- PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
- // but its field locals depend on its parent struct local.
- };
-
- static int __cdecl RefCntCmp(const void *op1, const void *op2);
- static int __cdecl WtdRefCntCmp(const void *op1, const void *op2);
-
+ //
+ enum lvaPromotionType
+ {
+ PROMOTION_TYPE_NONE, // The struct local is not promoted
+ PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
+ // and its field locals are independent of its parent struct local.
+ PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
+ // but its field locals depend on its parent struct local.
+ };
+ static int __cdecl RefCntCmp(const void* op1, const void* op2);
+ static int __cdecl WtdRefCntCmp(const void* op1, const void* op2);
-/*****************************************************************************/
+ /*****************************************************************************/
enum FrameLayoutState
{
@@ -2197,67 +2201,66 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
FINAL_FRAME_LAYOUT
};
-public :
-
- bool lvaRefCountingStarted; // Set to true when we have started counting the local vars
- bool lvaLocalVarRefCounted; // Set to true after we have called lvaMarkLocalVars()
- bool lvaSortAgain; // true: We need to sort the lvaTable
- bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
- unsigned lvaCount; // total number of locals
+public:
+ bool lvaRefCountingStarted; // Set to true when we have started counting the local vars
+ bool lvaLocalVarRefCounted; // Set to true after we have called lvaMarkLocalVars()
+ bool lvaSortAgain; // true: We need to sort the lvaTable
+ bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
+ unsigned lvaCount; // total number of locals
- unsigned lvaRefCount; // total number of references to locals
- LclVarDsc * lvaTable; // variable descriptor table
- unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
+ unsigned lvaRefCount; // total number of references to locals
+ LclVarDsc* lvaTable; // variable descriptor table
+ unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
- LclVarDsc * * lvaRefSorted; // table sorted by refcount
+ LclVarDsc** lvaRefSorted; // table sorted by refcount
- unsigned short lvaTrackedCount; // actual # of locals being tracked
- unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
+ unsigned short lvaTrackedCount; // actual # of locals being tracked
+ unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// Only for AMD64 System V cache the first caller stack homed argument.
- unsigned lvaFirstStackIncomingArgNum; // First argument with stack slot in the caller.
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+ unsigned lvaFirstStackIncomingArgNum; // First argument with stack slot in the caller.
+#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
#ifdef DEBUG
- VARSET_TP lvaTrackedVars; // set of tracked variables
+ VARSET_TP lvaTrackedVars; // set of tracked variables
#endif
#ifndef _TARGET_64BIT_
- VARSET_TP lvaLongVars; // set of long (64-bit) variables
+ VARSET_TP lvaLongVars; // set of long (64-bit) variables
#endif
- VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
+ VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
- unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
- // It that changes, this changes. VarSets from different epochs
- // cannot be meaningfully combined.
+ unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
+ // It that changes, this changes. VarSets from different epochs
+ // cannot be meaningfully combined.
- unsigned GetCurLVEpoch()
+ unsigned GetCurLVEpoch()
{
return lvaCurEpoch;
}
- // reverse map of tracked number to var number
- unsigned lvaTrackedToVarNum[lclMAX_TRACKED];
+ // reverse map of tracked number to var number
+ unsigned lvaTrackedToVarNum[lclMAX_TRACKED];
#ifdef LEGACY_BACKEND
- // variable interference graph
- VARSET_TP lvaVarIntf[lclMAX_TRACKED];
+ // variable interference graph
+ VARSET_TP lvaVarIntf[lclMAX_TRACKED];
#endif
- // variable preference graph
- VARSET_TP lvaVarPref[lclMAX_TRACKED];
+ // variable preference graph
+ VARSET_TP lvaVarPref[lclMAX_TRACKED];
#if DOUBLE_ALIGN
#ifdef DEBUG
- // # of procs compiled a with double-aligned stack
- static unsigned s_lvaDoubleAlignedProcsCount;
+ // # of procs compiled a with double-aligned stack
+ static unsigned s_lvaDoubleAlignedProcsCount;
#endif
#endif
// Getters and setters for address-exposed and do-not-enregister local var properties.
- bool lvaVarAddrExposed (unsigned varNum);
- void lvaSetVarAddrExposed (unsigned varNum);
- bool lvaVarDoNotEnregister (unsigned varNum);
+ bool lvaVarAddrExposed(unsigned varNum);
+ void lvaSetVarAddrExposed(unsigned varNum);
+ bool lvaVarDoNotEnregister(unsigned varNum);
#ifdef DEBUG
// Reasons why we can't enregister. Some of these correspond to debug properties of local vars.
enum DoNotEnregisterReason
@@ -2268,38 +2271,39 @@ public :
DNER_VMNeedsStackAddr,
DNER_LiveInOutOfHandler,
DNER_LiveAcrossUnmanagedCall,
- DNER_BlockOp, // Is read or written via a block operation that explicitly takes the address.
+ DNER_BlockOp, // Is read or written via a block operation that explicitly takes the address.
#ifdef JIT32_GCENCODER
DNER_PinningRef,
#endif
};
#endif
- void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
+ void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
- unsigned lvaVarargsHandleArg;
+ unsigned lvaVarargsHandleArg;
#ifdef _TARGET_X86_
- unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack arguments
-#endif // _TARGET_X86_
+ unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack
+ // arguments
+#endif // _TARGET_X86_
- unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
- unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
+ unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
+ unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
#if FEATURE_FIXED_OUT_ARGS
- unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
+ unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
#endif
- unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
- // that tracks whether the lock has been taken
+ unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
+ // that tracks whether the lock has been taken
- unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
- // However, if there is a "ldarga 0" or "starg 0" in the IL,
- // we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
+ unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
+ // However, if there is a "ldarga 0" or "starg 0" in the IL,
+ // we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
- unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
- // in case there are multiple BBJ_RETURN blocks in the inlinee.
+ unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
+ // in case there are multiple BBJ_RETURN blocks in the inlinee.
#if FEATURE_FIXED_OUT_ARGS
- unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
- unsigned lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
-#endif // FEATURE_FIXED_OUT_ARGS
+ unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
+ unsigned lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
+#endif // FEATURE_FIXED_OUT_ARGS
#ifdef _TARGET_ARM_
// On architectures whose ABIs allow structs to be passed in registers, struct promotion will sometimes
@@ -2307,37 +2311,38 @@ public :
// field variables into an argument register is a hard problem. It's easier to reserve a word of memory into which
// such field can be copied, after which the assembled memory word can be read into the register. We will allocate
// this variable to be this scratch word whenever struct promotion occurs.
- unsigned lvaPromotedStructAssemblyScratchVar;
+ unsigned lvaPromotedStructAssemblyScratchVar;
#endif // _TARGET_ARM_
-
#ifdef DEBUG
- unsigned lvaReturnEspCheck; // confirms ESP not corrupted on return
- unsigned lvaCallEspCheck; // confirms ESP not corrupted after a call
+ unsigned lvaReturnEspCheck; // confirms ESP not corrupted on return
+ unsigned lvaCallEspCheck; // confirms ESP not corrupted after a call
#endif
- bool lvaGenericsContextUsed;
+ bool lvaGenericsContextUsed;
- bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or CORINFO_GENERICS_CTXT_FROM_THIS?
- bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
+ bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or
+ // CORINFO_GENERICS_CTXT_FROM_THIS?
+ bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
- //-------------------------------------------------------------------------
- // All these frame offsets are inter-related and must be kept in sync
+//-------------------------------------------------------------------------
+// All these frame offsets are inter-related and must be kept in sync
#if !FEATURE_EH_FUNCLETS
// This is used for the callable handlers
- unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
-#endif // FEATURE_EH_FUNCLETS
+ unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
+#endif // FEATURE_EH_FUNCLETS
+
+ unsigned lvaCachedGenericContextArgOffs;
+ unsigned lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as
+ // THIS pointer
- unsigned lvaCachedGenericContextArgOffs;
- unsigned lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as THIS pointer
-
- unsigned lvaLocAllocSPvar; // variable which has the result of the last alloca/localloc
+ unsigned lvaLocAllocSPvar; // variable which has the result of the last alloca/localloc
- unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
+ unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
// TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps.
- // after the reg predict we will use a computed maxTmpSize
+ // after the reg predict we will use a computed maxTmpSize
// which is based upon the number of spill temps predicted by reg predict
// All this is necessary because if we under-estimate the size of the spill
// temps we could fail when encoding instructions that reference stack offsets for ARM.
@@ -2347,174 +2352,165 @@ public :
//-------------------------------------------------------------------------
- unsigned lvaGetMaxSpillTempSize();
+ unsigned lvaGetMaxSpillTempSize();
#ifdef _TARGET_ARM_
- bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
+ bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
#endif // _TARGET_ARM_
- void lvaAssignFrameOffsets(FrameLayoutState curState);
- void lvaFixVirtualFrameOffsets();
+ void lvaAssignFrameOffsets(FrameLayoutState curState);
+ void lvaFixVirtualFrameOffsets();
#ifndef LEGACY_BACKEND
- void lvaUpdateArgsWithInitialReg();
+ void lvaUpdateArgsWithInitialReg();
#endif // !LEGACY_BACKEND
- void lvaAssignVirtualFrameOffsetsToArgs();
+ void lvaAssignVirtualFrameOffsetsToArgs();
#ifdef UNIX_AMD64_ABI
- int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int * callerArgOffset);
-#else // !UNIX_AMD64_ABI
- int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
+ int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset);
+#else // !UNIX_AMD64_ABI
+ int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
#endif // !UNIX_AMD64_ABI
- void lvaAssignVirtualFrameOffsetsToLocals();
- int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
+ void lvaAssignVirtualFrameOffsetsToLocals();
+ int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
#ifdef _TARGET_AMD64_
// Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even.
- bool lvaIsCalleeSavedIntRegCountEven();
+ bool lvaIsCalleeSavedIntRegCountEven();
#endif
- void lvaAlignFrame();
- void lvaAssignFrameOffsetsToPromotedStructs();
- int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
+ void lvaAlignFrame();
+ void lvaAssignFrameOffsetsToPromotedStructs();
+ int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
-#ifdef DEBUG
- void lvaDumpRegLocation(unsigned lclNum);
- void lvaDumpFrameLocation(unsigned lclNum);
- void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
- void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame layout state defined by lvaDoneFrameLayout
+#ifdef DEBUG
+ void lvaDumpRegLocation(unsigned lclNum);
+ void lvaDumpFrameLocation(unsigned lclNum);
+ void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
+ void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame
+ // layout state defined by lvaDoneFrameLayout
#endif
// Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller
// to avoid bugs from borderline cases.
-#define MAX_FrameSize 0x3FFFFFFF
- void lvaIncrementFrameSize(unsigned size);
+#define MAX_FrameSize 0x3FFFFFFF
+ void lvaIncrementFrameSize(unsigned size);
- unsigned lvaFrameSize(FrameLayoutState curState);
+ unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
- int lvaToCallerSPRelativeOffset(int offs, bool isFpBased);
+ int lvaToCallerSPRelativeOffset(int offs, bool isFpBased);
// Returns the caller-SP-relative offset for the local variable "varNum."
- int lvaGetCallerSPRelativeOffset(unsigned varNum);
+ int lvaGetCallerSPRelativeOffset(unsigned varNum);
// Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc.
- int lvaGetSPRelativeOffset(unsigned varNum);
+ int lvaGetSPRelativeOffset(unsigned varNum);
- int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
- int lvaGetInitialSPRelativeOffset(unsigned varNum);
+ int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
+ int lvaGetInitialSPRelativeOffset(unsigned varNum);
//------------------------ For splitting types ----------------------------
- void lvaInitTypeRef ();
+ void lvaInitTypeRef();
+ void lvaInitArgs(InitVarDscInfo* varDscInfo);
+ void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
+ void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo);
+ void lvaInitUserArgs(InitVarDscInfo* varDscInfo);
+ void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
+ void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
- void lvaInitArgs (InitVarDscInfo * varDscInfo);
- void lvaInitThisPtr (InitVarDscInfo * varDscInfo);
- void lvaInitRetBuffArg (InitVarDscInfo * varDscInfo);
- void lvaInitUserArgs (InitVarDscInfo * varDscInfo);
- void lvaInitGenericsCtxt (InitVarDscInfo * varDscInfo);
- void lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo);
+ void lvaInitVarDsc(LclVarDsc* varDsc,
+ unsigned varNum,
+ CorInfoType corInfoType,
+ CORINFO_CLASS_HANDLE typeHnd,
+ CORINFO_ARG_LIST_HANDLE varList,
+ CORINFO_SIG_INFO* varSig);
- void lvaInitVarDsc (LclVarDsc * varDsc,
- unsigned varNum,
- CorInfoType corInfoType,
- CORINFO_CLASS_HANDLE typeHnd,
- CORINFO_ARG_LIST_HANDLE varList,
- CORINFO_SIG_INFO * varSig);
-
- static unsigned lvaTypeRefMask (var_types type);
+ static unsigned lvaTypeRefMask(var_types type);
- var_types lvaGetActualType (unsigned lclNum);
- var_types lvaGetRealType (unsigned lclNum);
+ var_types lvaGetActualType(unsigned lclNum);
+ var_types lvaGetRealType(unsigned lclNum);
//-------------------------------------------------------------------------
- void lvaInit ();
+ void lvaInit();
- unsigned lvaArgSize (const void * argTok);
- unsigned lvaLclSize (unsigned varNum);
- unsigned lvaLclExactSize (unsigned varNum);
+ unsigned lvaArgSize(const void* argTok);
+ unsigned lvaLclSize(unsigned varNum);
+ unsigned lvaLclExactSize(unsigned varNum);
- bool lvaLclVarRefs (GenTreePtr tree,
- GenTreePtr * findPtr,
- varRefKinds * refsPtr,
- void* result);
+ bool lvaLclVarRefs(GenTreePtr tree, GenTreePtr* findPtr, varRefKinds* refsPtr, void* result);
// Call lvaLclVarRefs on "true"; accumulate "*result" into whichever of
// "allVars" and "trkdVars" is indiated by the nullness of "findPtr"; return
// the return result.
- bool lvaLclVarRefsAccum (GenTreePtr tree,
- GenTreePtr * findPtr,
- varRefKinds * refsPtr,
- ALLVARSET_TP* allVars,
- VARSET_TP* trkdVars);
+ bool lvaLclVarRefsAccum(
+ GenTreePtr tree, GenTreePtr* findPtr, varRefKinds* refsPtr, ALLVARSET_TP* allVars, VARSET_TP* trkdVars);
// If "findPtr" is non-NULL, assumes "result" is an "ALLVARSET_TP*", and
// (destructively) unions "allVars" into "*result". Otherwise, assumes "result" is a "VARSET_TP*",
// and (destructively) unions "trkedVars" into "*result".
- void lvaLclVarRefsAccumIntoRes(GenTreePtr * findPtr,
- void* result,
- ALLVARSET_VALARG_TP allVars,
- VARSET_VALARG_TP trkdVars);
+ void lvaLclVarRefsAccumIntoRes(GenTreePtr* findPtr,
+ void* result,
+ ALLVARSET_VALARG_TP allVars,
+ VARSET_VALARG_TP trkdVars);
- bool lvaHaveManyLocals () const;
+ bool lvaHaveManyLocals() const;
- unsigned lvaGrabTemp (bool shortLifetime
- DEBUGARG(const char * reason) );
- unsigned lvaGrabTemps (unsigned cnt
- DEBUGARG(const char * reason) );
- unsigned lvaGrabTempWithImplicitUse(bool shortLifetime
- DEBUGARG(const char * reason));
+ unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
+ unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason));
+ unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason));
- void lvaSortOnly ();
- void lvaSortByRefCount ();
- void lvaDumpRefCounts ();
+ void lvaSortOnly();
+ void lvaSortByRefCount();
+ void lvaDumpRefCounts();
- void lvaMarkLocalVars (BasicBlock* block);
+ void lvaMarkLocalVars(BasicBlock* block);
- void lvaMarkLocalVars (); // Local variable ref-counting
+ void lvaMarkLocalVars(); // Local variable ref-counting
- void lvaAllocOutgoingArgSpace(); // 'Commit' lvaOutgoingArgSpaceSize and lvaOutgoingArgSpaceVar
+ void lvaAllocOutgoingArgSpace(); // 'Commit' lvaOutgoingArgSpaceSize and lvaOutgoingArgSpaceVar
- VARSET_VALRET_TP lvaStmtLclMask (GenTreePtr stmt);
+ VARSET_VALRET_TP lvaStmtLclMask(GenTreePtr stmt);
- static fgWalkPreFn lvaIncRefCntsCB;
- void lvaIncRefCnts (GenTreePtr tree);
+ static fgWalkPreFn lvaIncRefCntsCB;
+ void lvaIncRefCnts(GenTreePtr tree);
- static fgWalkPreFn lvaDecRefCntsCB;
- void lvaDecRefCnts (GenTreePtr tree);
- void lvaRecursiveDecRefCounts(GenTreePtr tree);
- void lvaRecursiveIncRefCounts(GenTreePtr tree);
+ static fgWalkPreFn lvaDecRefCntsCB;
+ void lvaDecRefCnts(GenTreePtr tree);
+ void lvaRecursiveDecRefCounts(GenTreePtr tree);
+ void lvaRecursiveIncRefCounts(GenTreePtr tree);
-#ifdef DEBUG
+#ifdef DEBUG
struct lvaStressLclFldArgs
{
Compiler* m_pCompiler;
bool m_bFirstPass;
};
- static fgWalkPreFn lvaStressLclFldCB;
- void lvaStressLclFld ();
+ static fgWalkPreFn lvaStressLclFldCB;
+ void lvaStressLclFld();
- void lvaDispVarSet (VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
- void lvaDispVarSet (VARSET_VALARG_TP set);
+ void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
+ void lvaDispVarSet(VARSET_VALARG_TP set);
#endif
#ifdef _TARGET_ARM_
- int lvaFrameAddress (int varNum, bool mustBeFPBased, regNumber * pBaseReg, int addrModeOffset);
+ int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset);
#else
- int lvaFrameAddress (int varNum, bool* pFPbased);
+ int lvaFrameAddress(int varNum, bool* pFPbased);
#endif
- bool lvaIsParameter (unsigned varNum);
- bool lvaIsRegArgument (unsigned varNum);
- BOOL lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
- BOOL lvaIsOriginalThisReadOnly (); // return TRUE if there is no place in the code
- // that writes to arg0
+ bool lvaIsParameter(unsigned varNum);
+ bool lvaIsRegArgument(unsigned varNum);
+ BOOL lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
+ BOOL lvaIsOriginalThisReadOnly(); // return TRUE if there is no place in the code
+ // that writes to arg0
// Struct parameters that are passed by reference are marked as both lvIsParam and lvIsTemp
// (this is an overload of lvIsTemp because there are no temp parameters).
// For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
// For ARM64, this is structs larger than 16 bytes that are passed by reference.
- bool lvaIsImplicitByRefLocal(unsigned varNum)
+ bool lvaIsImplicitByRefLocal(unsigned varNum)
{
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
LclVarDsc* varDsc = &(lvaTable[varNum]);
@@ -2528,116 +2524,113 @@ public :
}
// Returns true if this local var is a multireg struct
- bool lvaIsMultiregStruct(LclVarDsc* varDsc);
+ bool lvaIsMultiregStruct(LclVarDsc* varDsc);
// If the class is a TYP_STRUCT, get/set a class handle describing it
- CORINFO_CLASS_HANDLE lvaGetStruct (unsigned varNum);
- void lvaSetStruct (unsigned varNum,
- CORINFO_CLASS_HANDLE typeHnd,
- bool unsafeValueClsCheck,
- bool setTypeInfo = true);
+ CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum);
+ void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true);
#define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct
// Info about struct fields
struct lvaStructFieldInfo
{
- CORINFO_FIELD_HANDLE fldHnd;
- unsigned char fldOffset;
- unsigned char fldOrdinal;
- var_types fldType;
- unsigned fldSize;
- CORINFO_CLASS_HANDLE fldTypeHnd;
+ CORINFO_FIELD_HANDLE fldHnd;
+ unsigned char fldOffset;
+ unsigned char fldOrdinal;
+ var_types fldType;
+ unsigned fldSize;
+ CORINFO_CLASS_HANDLE fldTypeHnd;
};
// Info about struct to be promoted.
struct lvaStructPromotionInfo
- {
- CORINFO_CLASS_HANDLE typeHnd;
+ {
+ CORINFO_CLASS_HANDLE typeHnd;
bool canPromote;
bool requiresScratchVar;
bool containsHoles;
bool customLayout;
- unsigned char fieldCnt;
+ unsigned char fieldCnt;
lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct];
lvaStructPromotionInfo()
- : typeHnd (0)
- , canPromote (false)
- , requiresScratchVar(false)
- , containsHoles (false)
- , customLayout (false)
- {}
- };
-
- static int __cdecl lvaFieldOffsetCmp(const void * field1, const void * field2);
- void lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd, lvaStructPromotionInfo * StructPromotionInfo, bool sortFields);
- void lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo * StructPromotionInfo);
- void lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo * StructPromotionInfo);
+ : typeHnd(nullptr), canPromote(false), requiresScratchVar(false), containsHoles(false), customLayout(false)
+ {
+ }
+ };
+
+ static int __cdecl lvaFieldOffsetCmp(const void* field1, const void* field2);
+ void lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
+ lvaStructPromotionInfo* StructPromotionInfo,
+ bool sortFields);
+ void lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo* StructPromotionInfo);
+ void lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo* StructPromotionInfo);
#if !defined(_TARGET_64BIT_)
- void lvaPromoteLongVars();
+ void lvaPromoteLongVars();
#endif // !defined(_TARGET_64BIT_)
- unsigned lvaGetFieldLocal(LclVarDsc * varDsc, unsigned int fldOffset);
- lvaPromotionType lvaGetPromotionType (const LclVarDsc * varDsc);
- lvaPromotionType lvaGetPromotionType (unsigned varNum);
- lvaPromotionType lvaGetParentPromotionType (const LclVarDsc * varDsc);
- lvaPromotionType lvaGetParentPromotionType (unsigned varNum);
- bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc * varDsc);
- bool lvaIsGCTracked(const LclVarDsc* varDsc);
-
- BYTE * lvaGetGcLayout (unsigned varNum);
- bool lvaTypeIsGC (unsigned varNum);
- unsigned lvaGSSecurityCookie; // LclVar number
- bool lvaTempsHaveLargerOffsetThanVars();
-
- unsigned lvaSecurityObject; // variable representing the security object on the stack
- unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
+ unsigned lvaGetFieldLocal(LclVarDsc* varDsc, unsigned int fldOffset);
+ lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc);
+ lvaPromotionType lvaGetPromotionType(unsigned varNum);
+ lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc);
+ lvaPromotionType lvaGetParentPromotionType(unsigned varNum);
+ bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
+ bool lvaIsGCTracked(const LclVarDsc* varDsc);
+
+ BYTE* lvaGetGcLayout(unsigned varNum);
+ bool lvaTypeIsGC(unsigned varNum);
+ unsigned lvaGSSecurityCookie; // LclVar number
+ bool lvaTempsHaveLargerOffsetThanVars();
+
+ unsigned lvaSecurityObject; // variable representing the security object on the stack
+ unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
#if FEATURE_EH_FUNCLETS
- unsigned lvaPSPSym; // variable representing the PSPSym
+ unsigned lvaPSPSym; // variable representing the PSPSym
#endif
- InlineInfo* impInlineInfo;
- InlineStrategy* m_inlineStrategy;
+ InlineInfo* impInlineInfo;
+ InlineStrategy* m_inlineStrategy;
// The Compiler* that is the root of the inlining tree of which "this" is a member.
- Compiler* impInlineRoot();
+ Compiler* impInlineRoot();
#if defined(DEBUG) || defined(INLINE_DATA)
- unsigned __int64 getInlineCycleCount() { return m_compCycles; }
+ unsigned __int64 getInlineCycleCount()
+ {
+ return m_compCycles;
+ }
#endif // defined(DEBUG) || defined(INLINE_DATA)
- bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
- bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
+ bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
+ bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
//=========================================================================
// PROTECTED
//=========================================================================
-protected :
-
- //---------------- Local variable ref-counting ----------------------------
+protected:
+//---------------- Local variable ref-counting ----------------------------
#if ASSERTION_PROP
- BasicBlock * lvaMarkRefsCurBlock;
- GenTreePtr lvaMarkRefsCurStmt;
+ BasicBlock* lvaMarkRefsCurBlock;
+ GenTreePtr lvaMarkRefsCurStmt;
#endif
BasicBlock::weight_t lvaMarkRefsWeight;
- static fgWalkPreFn lvaMarkLclRefsCallback;
- void lvaMarkLclRefs (GenTreePtr tree);
-
+ static fgWalkPreFn lvaMarkLclRefsCallback;
+ void lvaMarkLclRefs(GenTreePtr tree);
// Keeps the mapping from SSA #'s to VN's for the implicit "Heap" variable.
- PerSsaArray lvHeapPerSsaData;
- unsigned lvHeapNumSsaNames;
+ PerSsaArray lvHeapPerSsaData;
+ unsigned lvHeapNumSsaNames;
- public:
+public:
// Returns the address of the per-Ssa data for "Heap" at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
- LclSsaVarDsc* GetHeapPerSsaData(unsigned ssaNum)
+ LclSsaVarDsc* GetHeapPerSsaData(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
assert(SsaConfig::RESERVED_SSA_NUM == 0);
@@ -2646,296 +2639,268 @@ protected :
return &lvHeapPerSsaData.GetRef(ssaNum);
}
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX Importer XX
-XX XX
-XX Imports the given method and converts it to semantic trees XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX Importer XX
+ XX XX
+ XX Imports the given method and converts it to semantic trees XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
- void impInit ();
+public:
+ void impInit();
- void impImport (BasicBlock * method);
+ void impImport(BasicBlock* method);
- CORINFO_CLASS_HANDLE impGetRefAnyClass ();
- CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
- CORINFO_CLASS_HANDLE impGetTypeHandleClass ();
- CORINFO_CLASS_HANDLE impGetStringClass ();
- CORINFO_CLASS_HANDLE impGetObjectClass ();
+ CORINFO_CLASS_HANDLE impGetRefAnyClass();
+ CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
+ CORINFO_CLASS_HANDLE impGetTypeHandleClass();
+ CORINFO_CLASS_HANDLE impGetStringClass();
+ CORINFO_CLASS_HANDLE impGetObjectClass();
//=========================================================================
// PROTECTED
//=========================================================================
-protected :
-
+protected:
//-------------------- Stack manipulation ---------------------------------
- unsigned impStkSize; // Size of the full stack
+ unsigned impStkSize; // Size of the full stack
-#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
+#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
- StackEntry impSmallStack[SMALL_STACK_SIZE]; // Use this array if possible
+ StackEntry impSmallStack[SMALL_STACK_SIZE]; // Use this array if possible
- struct SavedStack // used to save/restore stack contents.
+ struct SavedStack // used to save/restore stack contents.
{
- unsigned ssDepth; // number of values on stack
- StackEntry * ssTrees; // saved tree values
+ unsigned ssDepth; // number of values on stack
+ StackEntry* ssTrees; // saved tree values
};
- bool impIsPrimitive(CorInfoType type);
- bool impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle);
+ bool impIsPrimitive(CorInfoType type);
+ bool impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle);
- void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN * pResolvedToken, CorInfoTokenKind kind);
- void impPushOnStackNoType(GenTreePtr tree);
+ void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind);
+ void impPushOnStackNoType(GenTreePtr tree);
- void impPushOnStack (GenTreePtr tree,
- typeInfo ti);
- void impPushNullObjRefOnStack();
- StackEntry impPopStack ();
- StackEntry impPopStack (CORINFO_CLASS_HANDLE& structTypeRet);
- GenTreePtr impPopStack (typeInfo& ti);
- StackEntry& impStackTop (unsigned n = 0);
+ void impPushOnStack(GenTreePtr tree, typeInfo ti);
+ void impPushNullObjRefOnStack();
+ StackEntry impPopStack();
+ StackEntry impPopStack(CORINFO_CLASS_HANDLE& structTypeRet);
+ GenTreePtr impPopStack(typeInfo& ti);
+ StackEntry& impStackTop(unsigned n = 0);
- void impSaveStackState (SavedStack * savePtr,
- bool copy);
- void impRestoreStackState(SavedStack * savePtr);
+ void impSaveStackState(SavedStack* savePtr, bool copy);
+ void impRestoreStackState(SavedStack* savePtr);
- GenTreePtr impImportLdvirtftn (GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_CALL_INFO* pCallInfo);
+ GenTreePtr impImportLdvirtftn(GenTreePtr thisPtr,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_CALL_INFO* pCallInfo);
- void impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolvedToken);
+ void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
- void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_CALL_INFO* pCallInfo);
+ void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
- bool impCanPInvokeInline(var_types callRetTyp);
- bool impCanPInvokeInlineCallSite(var_types callRetTyp);
- void impCheckForPInvokeCall( GenTreePtr call,
- CORINFO_METHOD_HANDLE methHnd,
- CORINFO_SIG_INFO * sig,
- unsigned mflags);
- GenTreePtr impImportIndirectCall(CORINFO_SIG_INFO * sig,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET);
- void impPopArgsForUnmanagedCall(GenTreePtr call,
- CORINFO_SIG_INFO * sig);
+ bool impCanPInvokeInline(var_types callRetTyp);
+ bool impCanPInvokeInlineCallSite(var_types callRetTyp);
+ void impCheckForPInvokeCall(GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags);
+ GenTreePtr impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset = BAD_IL_OFFSET);
+ void impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig);
- void impInsertHelperCall(CORINFO_HELPER_DESC * helperCall);
- void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result,
- CORINFO_HELPER_DESC * helperCall);
- void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result,
- CORINFO_HELPER_DESC * helperCall);
+ void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
+ void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
+ void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
- void impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
- CORINFO_METHOD_HANDLE calleeMethodHnd,
- CORINFO_CLASS_HANDLE delegateTypeHnd);
+ void impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
+ CORINFO_METHOD_HANDLE calleeMethodHnd,
+ CORINFO_CLASS_HANDLE delegateTypeHnd);
- var_types impImportCall (OPCODE opcode,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
- GenTreePtr newobjThis,
- int prefixFlags,
- CORINFO_CALL_INFO* callInfo,
- IL_OFFSET rawILOffset);
+ var_types impImportCall(OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a
+ // type parameter?
+ GenTreePtr newobjThis,
+ int prefixFlags,
+ CORINFO_CALL_INFO* callInfo,
+ IL_OFFSET rawILOffset);
- bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO * methInfo);
+ bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo);
- GenTreePtr impFixupCallStructReturn(GenTreePtr call,
- CORINFO_CLASS_HANDLE retClsHnd);
+ GenTreePtr impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd);
- GenTreePtr impInitCallLongReturn (GenTreePtr call);
+ GenTreePtr impInitCallLongReturn(GenTreePtr call);
- GenTreePtr impFixupStructReturnType(GenTreePtr op,
- CORINFO_CLASS_HANDLE retClsHnd);
+ GenTreePtr impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd);
#ifdef DEBUG
- var_types impImportJitTestLabelMark(int numArgs);
+ var_types impImportJitTestLabelMark(int numArgs);
#endif // DEBUG
- GenTreePtr impInitClass(CORINFO_RESOLVED_TOKEN * pResolvedToken);
-
- GenTreePtr impImportStaticReadOnlyField(void * fldAddr, var_types lclTyp);
-
- GenTreePtr impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS access,
- CORINFO_FIELD_INFO * pFieldInfo,
- var_types lclTyp);
-
- static void impBashVarAddrsToI (GenTreePtr tree1,
- GenTreePtr tree2 = NULL);
-
- GenTreePtr impImplicitIorI4Cast(GenTreePtr tree,
- var_types dstTyp);
-
- GenTreePtr impImplicitR4orR8Cast(GenTreePtr tree,
- var_types dstTyp);
-
- void impImportLeave (BasicBlock * block);
- void impResetLeaveBlock (BasicBlock * block,
- unsigned jmpAddr);
- BOOL impLocAllocOnStack ();
- GenTreePtr impIntrinsic (CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_METHOD_HANDLE method,
- CORINFO_SIG_INFO * sig,
- int memberRef,
- bool readonlyCall,
- bool tailCall,
- CorInfoIntrinsics * pIntrinsicID);
- GenTreePtr impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_SIG_INFO * sig,
- int memberRef,
- bool readonlyCall,
- CorInfoIntrinsics intrinsicID);
- GenTreePtr impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig);
-
- GenTreePtr impMethodPointer(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_CALL_INFO * pCallInfo);
-
- GenTreePtr impTransformThis (GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
- CORINFO_THIS_TRANSFORM transform);
+ GenTreePtr impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken);
+
+ GenTreePtr impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp);
+
+ GenTreePtr impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS access,
+ CORINFO_FIELD_INFO* pFieldInfo,
+ var_types lclTyp);
+
+ static void impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2 = nullptr);
+
+ GenTreePtr impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp);
+
+ GenTreePtr impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp);
+
+ void impImportLeave(BasicBlock* block);
+ void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
+ BOOL impLocAllocOnStack();
+ GenTreePtr impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_SIG_INFO* sig,
+ int memberRef,
+ bool readonlyCall,
+ bool tailCall,
+ CorInfoIntrinsics* pIntrinsicID);
+ GenTreePtr impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_SIG_INFO* sig,
+ int memberRef,
+ bool readonlyCall,
+ CorInfoIntrinsics intrinsicID);
+ GenTreePtr impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig);
+
+ GenTreePtr impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
+
+ GenTreePtr impTransformThis(GenTreePtr thisPtr,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ CORINFO_THIS_TRANSFORM transform);
//----------------- Manipulating the trees and stmts ----------------------
- GenTreePtr impTreeList; // Trees for the BB being imported
- GenTreePtr impTreeLast; // The last tree for the current BB
+ GenTreePtr impTreeList; // Trees for the BB being imported
+ GenTreePtr impTreeLast; // The last tree for the current BB
- enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 };
+ enum
+ {
+ CHECK_SPILL_ALL = -1,
+ CHECK_SPILL_NONE = -2
+ };
public:
- void impBeginTreeList ();
- void impEndTreeList (BasicBlock * block,
- GenTreePtr firstStmt,
- GenTreePtr lastStmt);
- void impEndTreeList (BasicBlock * block);
- void impAppendStmtCheck (GenTreePtr stmt,
- unsigned chkLevel);
- void impAppendStmt (GenTreePtr stmt,
- unsigned chkLevel);
- void impInsertStmtBefore (GenTreePtr stmt,
- GenTreePtr stmtBefore);
- GenTreePtr impAppendTree (GenTreePtr tree,
- unsigned chkLevel,
- IL_OFFSETX offset);
- void impInsertTreeBefore (GenTreePtr tree,
- IL_OFFSETX offset,
- GenTreePtr stmtBefore);
- void impAssignTempGen (unsigned tmp,
- GenTreePtr val,
- unsigned curLevel,
- GenTreePtr * pAfterStmt = NULL,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET,
- BasicBlock * block = NULL);
- void impAssignTempGen (unsigned tmpNum,
- GenTreePtr val,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt = NULL,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET,
- BasicBlock * block = NULL);
- GenTreePtr impCloneExpr (GenTreePtr tree,
- GenTreePtr * clone,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt
- DEBUGARG(const char * reason) );
- GenTreePtr impAssignStruct (GenTreePtr dest,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt = NULL,
- BasicBlock * block = NULL);
- GenTreePtr impAssignStructPtr (GenTreePtr dest,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt = NULL,
- BasicBlock * block = NULL);
-
- GenTreePtr impGetStructAddr (GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool willDeref);
-
- var_types impNormStructType (CORINFO_CLASS_HANDLE structHnd,
- BYTE* gcLayout = nullptr,
- unsigned* numGCVars = nullptr,
- var_types* simdBaseType = nullptr);
-
- GenTreePtr impNormStructVal (GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool forceNormalization = false);
-
- GenTreePtr impTokenToHandle (CORINFO_RESOLVED_TOKEN * pResolvedToken,
- BOOL *pRuntimeLookup = NULL,
- BOOL mustRestoreHandle = FALSE,
- BOOL importParent = FALSE);
-
- GenTreePtr impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- BOOL *pRuntimeLookup = NULL,
- BOOL mustRestoreHandle = FALSE)
+ void impBeginTreeList();
+ void impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt);
+ void impEndTreeList(BasicBlock* block);
+ void impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel);
+ void impAppendStmt(GenTreePtr stmt, unsigned chkLevel);
+ void impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore);
+ GenTreePtr impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset);
+ void impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore);
+ void impAssignTempGen(unsigned tmp,
+ GenTreePtr val,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt = nullptr,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET,
+ BasicBlock* block = nullptr);
+ void impAssignTempGen(unsigned tmpNum,
+ GenTreePtr val,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt = nullptr,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET,
+ BasicBlock* block = nullptr);
+ GenTreePtr impCloneExpr(GenTreePtr tree,
+ GenTreePtr* clone,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt DEBUGARG(const char* reason));
+ GenTreePtr impAssignStruct(GenTreePtr dest,
+ GenTreePtr src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt = nullptr,
+ BasicBlock* block = nullptr);
+ GenTreePtr impAssignStructPtr(GenTreePtr dest,
+ GenTreePtr src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt = nullptr,
+ BasicBlock* block = nullptr);
+
+ GenTreePtr impGetStructAddr(GenTreePtr structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool willDeref);
+
+ var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd,
+ BYTE* gcLayout = nullptr,
+ unsigned* numGCVars = nullptr,
+ var_types* simdBaseType = nullptr);
+
+ GenTreePtr impNormStructVal(GenTreePtr structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool forceNormalization = false);
+
+ GenTreePtr impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ BOOL* pRuntimeLookup = nullptr,
+ BOOL mustRestoreHandle = FALSE,
+ BOOL importParent = FALSE);
+
+ GenTreePtr impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ BOOL* pRuntimeLookup = nullptr,
+ BOOL mustRestoreHandle = FALSE)
{
return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, TRUE);
}
- GenTreePtr impLookupToTree(CORINFO_RESOLVED_TOKEN *pResolvedToken,
- CORINFO_LOOKUP *pLookup,
- unsigned flags,
- void *compileTimeHandle);
+ GenTreePtr impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ unsigned flags,
+ void* compileTimeHandle);
- GenTreePtr impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pResolvedToken,
- CORINFO_LOOKUP *pLookup,
- void * compileTimeHandle);
+ GenTreePtr impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ void* compileTimeHandle);
- GenTreePtr impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP *pLookup,
- unsigned flags,
- void *compileTimeHandle);
+ GenTreePtr impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, unsigned flags, void* compileTimeHandle);
- GenTreePtr impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CorInfoHelpFunc helper,
- var_types type,
- GenTreeArgList* arg = NULL,
- CORINFO_LOOKUP_KIND * pGenericLookupKind = NULL);
+ GenTreePtr impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CorInfoHelpFunc helper,
+ var_types type,
+ GenTreeArgList* arg = nullptr,
+ CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr);
- GenTreePtr impCastClassOrIsInstToTree(GenTreePtr op1,
- GenTreePtr op2,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- bool isCastClass );
+ GenTreePtr impCastClassOrIsInstToTree(GenTreePtr op1,
+ GenTreePtr op2,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ bool isCastClass);
- bool VarTypeIsMultiByteAndCanEnreg(var_types type,
- CORINFO_CLASS_HANDLE typeClass,
- unsigned *typeSize,
- bool forReturn);
+ bool VarTypeIsMultiByteAndCanEnreg(var_types type,
+ CORINFO_CLASS_HANDLE typeClass,
+ unsigned* typeSize,
+ bool forReturn);
- static bool IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId);
- static bool IsTargetIntrinsic(CorInfoIntrinsics intrinsicId);
- static bool IsMathIntrinsic(CorInfoIntrinsics intrinsicId);
- static bool IsMathIntrinsic(GenTreePtr tree);
+ static bool IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId);
+ static bool IsTargetIntrinsic(CorInfoIntrinsics intrinsicId);
+ static bool IsMathIntrinsic(CorInfoIntrinsics intrinsicId);
+ static bool IsMathIntrinsic(GenTreePtr tree);
private:
-
//----------------- Importing the method ----------------------------------
CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens.
#ifdef DEBUG
- unsigned impCurOpcOffs;
- const char * impCurOpcName;
- bool impNestedStackSpill;
-
+ unsigned impCurOpcOffs;
+ const char* impCurOpcName;
+ bool impNestedStackSpill;
// For displaying instrs with generated native code (-n:B)
- GenTreePtr impLastILoffsStmt; // oldest stmt added for which we did not gtStmtLastILoffs
- void impNoteLastILoffs ();
+ GenTreePtr impLastILoffsStmt; // oldest stmt added for which we did not gtStmtLastILoffs
+ void impNoteLastILoffs();
#endif
/* IL offset of the stmt currently being imported. It gets set to
@@ -2945,115 +2910,107 @@ private:
to get the actual IL offset value.
*/
- IL_OFFSETX impCurStmtOffs;
- void impCurStmtOffsSet (IL_OFFSET offs);
+ IL_OFFSETX impCurStmtOffs;
+ void impCurStmtOffsSet(IL_OFFSET offs);
- void impNoteBranchOffs ();
+ void impNoteBranchOffs();
- unsigned impInitBlockLineInfo ();
+ unsigned impInitBlockLineInfo();
- GenTreePtr impCheckForNullPointer (GenTreePtr obj);
- bool impIsThis (GenTreePtr obj);
- bool impIsLDFTN_TOKEN (const BYTE * delegateCreateStart, const BYTE * newobjCodeAddr);
- bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE * delegateCreateStart, const BYTE * newobjCodeAddr);
- bool impIsAnySTLOC (OPCODE opcode)
+ GenTreePtr impCheckForNullPointer(GenTreePtr obj);
+ bool impIsThis(GenTreePtr obj);
+ bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
+ bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
+ bool impIsAnySTLOC(OPCODE opcode)
{
- return ((opcode == CEE_STLOC) ||
- (opcode == CEE_STLOC_S) ||
- ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
+ return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) ||
+ ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
}
- GenTreeArgList* impPopList (unsigned count,
- unsigned * flagsPtr,
- CORINFO_SIG_INFO* sig,
- GenTreeArgList* prefixTree = NULL);
+ GenTreeArgList* impPopList(unsigned count,
+ unsigned* flagsPtr,
+ CORINFO_SIG_INFO* sig,
+ GenTreeArgList* prefixTree = nullptr);
- GenTreeArgList* impPopRevList (unsigned count,
- unsigned * flagsPtr,
- CORINFO_SIG_INFO* sig,
- unsigned skipReverseCount = 0);
+ GenTreeArgList* impPopRevList(unsigned count,
+ unsigned* flagsPtr,
+ CORINFO_SIG_INFO* sig,
+ unsigned skipReverseCount = 0);
/*
* Get current IL offset with stack-empty info incoporated
*/
- IL_OFFSETX impCurILOffset (IL_OFFSET offs, bool callInstruction = false);
+ IL_OFFSETX impCurILOffset(IL_OFFSET offs, bool callInstruction = false);
//---------------- Spilling the importer stack ----------------------------
struct PendingDsc
{
- PendingDsc * pdNext;
- BasicBlock * pdBB;
- SavedStack pdSavedStack;
- ThisInitState pdThisPtrInit;
+ PendingDsc* pdNext;
+ BasicBlock* pdBB;
+ SavedStack pdSavedStack;
+ ThisInitState pdThisPtrInit;
};
- PendingDsc * impPendingList; // list of BBs currently waiting to be imported.
- PendingDsc * impPendingFree; // Freed up dscs that can be reused
+ PendingDsc* impPendingList; // list of BBs currently waiting to be imported.
+ PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
ExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
- BYTE impGetPendingBlockMember(BasicBlock* blk)
+ BYTE impGetPendingBlockMember(BasicBlock* blk)
{
return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd());
}
// Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
- void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
+ void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
{
impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val);
}
- bool impCanReimport;
+ bool impCanReimport;
- bool impSpillStackEntry (unsigned level,
- unsigned varNum
+ bool impSpillStackEntry(unsigned level,
+ unsigned varNum
#ifdef DEBUG
- , bool bAssertOnRecursion
- , const char * reason
+ ,
+ bool bAssertOnRecursion,
+ const char* reason
#endif
- );
+ );
+ void impSpillStackEnsure(bool spillLeaves = false);
+ void impEvalSideEffects();
+ void impSpillSpecialSideEff();
+ void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason));
+ void impSpillValueClasses();
+ void impSpillEvalStack();
+ static fgWalkPreFn impFindValueClasses;
+ void impSpillLclRefs(ssize_t lclNum);
- void impSpillStackEnsure (bool spillLeaves = false);
- void impEvalSideEffects ();
- void impSpillSpecialSideEff ();
- void impSpillSideEffects (bool spillGlobEffects,
- unsigned chkLevel
- DEBUGARG(const char * reason) );
- void impSpillValueClasses ();
- void impSpillEvalStack();
- static fgWalkPreFn impFindValueClasses;
- void impSpillLclRefs (ssize_t lclNum);
+ BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd);
- BasicBlock * impPushCatchArgOnStack (BasicBlock * hndBlk,
- CORINFO_CLASS_HANDLE clsHnd);
+ void impImportBlockCode(BasicBlock* block);
- void impImportBlockCode (BasicBlock * block);
+ void impReimportMarkBlock(BasicBlock* block);
+ void impReimportMarkSuccessors(BasicBlock* block);
- void impReimportMarkBlock (BasicBlock * block);
- void impReimportMarkSuccessors(BasicBlock * block);
+ void impVerifyEHBlock(BasicBlock* block, bool isTryStart);
- void impVerifyEHBlock (BasicBlock * block,
- bool isTryStart);
-
- void impImportBlockPending (BasicBlock * block);
+ void impImportBlockPending(BasicBlock* block);
// Similar to impImportBlockPending, but assumes that block has already been imported once and is being
// reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState
// for the block, but instead, just re-uses the block's existing EntryState.
- void impReimportBlockPending (BasicBlock * block);
+ void impReimportBlockPending(BasicBlock* block);
- var_types impGetByRefResultType (genTreeOps oper,
- bool fUnsigned,
- GenTreePtr * pOp1,
- GenTreePtr * pOp2);
+ var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2);
- void impImportBlock (BasicBlock * block);
+ void impImportBlock(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values
// on the stack to local variables (the "spill temp" variables). The successor blocks will assume that
@@ -3068,7 +3025,7 @@ private:
// which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps
// chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending
// on which kind of member of the clique the block is).
- unsigned impGetSpillTmpBase (BasicBlock * block);
+ unsigned impGetSpillTmpBase(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We have previously
// assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks
@@ -3082,7 +3039,7 @@ private:
// This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark
// blocks for re-importation as appropriate (both successors, so they get the right incoming type, and
// predecessors, so they insert an upcast if needed).
- void impReimportSpillClique (BasicBlock * block);
+ void impReimportSpillClique(BasicBlock* block);
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
@@ -3092,73 +3049,88 @@ private:
enum SpillCliqueDir
{
- SpillCliquePred, SpillCliqueSucc
+ SpillCliquePred,
+ SpillCliqueSucc
};
// Abstract class for receiving a callback while walking a spill clique
- class SpillCliqueWalker {
+ class SpillCliqueWalker
+ {
public:
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0;
};
// This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique
- class SetSpillTempsBase : public SpillCliqueWalker {
+ class SetSpillTempsBase : public SpillCliqueWalker
+ {
unsigned m_baseTmp;
+
public:
- SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { }
+ SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp)
+ {
+ }
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This class is used for implementing impReimportSpillClique part on each block within the spill clique
- class ReimportSpillClique : public SpillCliqueWalker {
- Compiler * m_pComp;
+ class ReimportSpillClique : public SpillCliqueWalker
+ {
+ Compiler* m_pComp;
+
public:
- ReimportSpillClique(Compiler * pComp) : m_pComp(pComp) { }
+ ReimportSpillClique(Compiler* pComp) : m_pComp(pComp)
+ {
+ }
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each
// predecessor or successor within the spill clique
- void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker * callback);
+ void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback);
// For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the
// incoming locals. This walks that list an resets the types of the GenTrees to match the types of
// the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique).
- void impRetypeEntryStateTemps(BasicBlock * blk);
+ void impRetypeEntryStateTemps(BasicBlock* blk);
- BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
- void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
+ BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
+ void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
- void impPushVar(GenTree* op, typeInfo tiRetVal);
- void impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal);
- void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaTable[lclNum].lvVerTypeInfo); }
- void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
- void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
- bool impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &opcode);
+ void impPushVar(GenTree* op, typeInfo tiRetVal);
+ void impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal);
+ void impLoadVar(unsigned lclNum, IL_OFFSET offset)
+ {
+ impLoadVar(lclNum, offset, lvaTable[lclNum].lvVerTypeInfo);
+ }
+ void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
+ void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
+ bool impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode);
#ifdef _TARGET_ARM_
- void impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr op, CORINFO_CLASS_HANDLE hClass);
+ void impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr op, CORINFO_CLASS_HANDLE hClass);
#endif
// A free list of linked list nodes used to represent to-do stacks of basic blocks.
- struct BlockListNode
- {
- BasicBlock* m_blk;
- BlockListNode* m_next;
- BlockListNode(BasicBlock* blk, BlockListNode* next = NULL) : m_blk(blk), m_next(next) {}
- void* operator new (size_t sz, Compiler* comp);
+ struct BlockListNode
+ {
+ BasicBlock* m_blk;
+ BlockListNode* m_next;
+ BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next)
+ {
+ }
+ void* operator new(size_t sz, Compiler* comp);
};
- BlockListNode* impBlockListNodeFreeList;
+ BlockListNode* impBlockListNodeFreeList;
- BlockListNode* AllocBlockListNode();
- void FreeBlockListNode(BlockListNode* node);
+ BlockListNode* AllocBlockListNode();
+ void FreeBlockListNode(BlockListNode* node);
- bool impIsValueType (typeInfo* pTypeInfo);
- var_types mangleVarArgsType (var_types type);
+ bool impIsValueType(typeInfo* pTypeInfo);
+ var_types mangleVarArgsType(var_types type);
#if FEATURE_VARARG
- regNumber getCallArgIntRegister (regNumber floatReg);
- regNumber getCallArgFloatRegister (regNumber intReg);
+ regNumber getCallArgIntRegister(regNumber floatReg);
+ regNumber getCallArgFloatRegister(regNumber intReg);
#endif // FEATURE_VARARG
#if defined(DEBUG)
@@ -3166,103 +3138,93 @@ private:
#endif
#ifdef DEBUG
- static LONG jitNestingLevel;
+ static LONG jitNestingLevel;
#endif // DEBUG
- bool seenConditionalJump;
-
- static BOOL impIsAddressInLocal(GenTreePtr tree, GenTreePtr * lclVarTreeOut);
-
- void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
- InlineResult* inlineResult);
-
- // STATIC inlining decision based on the IL code.
- void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
- CORINFO_METHOD_INFO* methInfo,
- bool forceInline,
- InlineResult* inlineResult);
-
- void impCheckCanInline(GenTreePtr call,
- CORINFO_METHOD_HANDLE fncHandle,
- unsigned methAttr,
- CORINFO_CONTEXT_HANDLE exactContextHnd,
- InlineCandidateInfo** ppInlineCandidateInfo,
- InlineResult* inlineResult);
-
- void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
- GenTreePtr curArgVal,
- unsigned argNum,
- InlineResult* inlineResult);
-
- void impInlineInitVars(InlineInfo* pInlineInfo);
-
- unsigned impInlineFetchLocal(unsigned lclNum
- DEBUGARG(const char * reason) );
-
- GenTreePtr impInlineFetchArg(unsigned lclNum,
- InlArgInfo * inlArgInfo,
- InlLclVarInfo * lclTypeInfo);
-
- BOOL impInlineIsThis(GenTreePtr tree, InlArgInfo * inlArgInfo);
-
- BOOL impInlineIsGuaranteedThisDerefBeforeAnySideEffects(
- GenTreePtr additionalTreesToBeEvaluatedBefore,
- GenTreePtr variableBeingDereferenced,
- InlArgInfo * inlArgInfo);
-
- void impMarkInlineCandidate(GenTreePtr call,
- CORINFO_CONTEXT_HANDLE exactContextHnd,
- CORINFO_CALL_INFO* callInfo);
-
- bool impTailCallRetTypeCompatible(var_types callerRetType,
- CORINFO_CLASS_HANDLE callerRetTypeClass,
- var_types calleeRetType,
- CORINFO_CLASS_HANDLE calleeRetTypeClass);
-
- bool impIsTailCallILPattern(bool tailPrefixed,
- OPCODE curOpcode,
- const BYTE *codeAddrOfNextOpcode,
- const BYTE *codeEnd,
- bool isRecursive,
- bool *IsCallPopRet = nullptr);
-
- bool impIsImplicitTailCallCandidate(OPCODE curOpcode,
- const BYTE *codeAddrOfNextOpcode,
- const BYTE *codeEnd,
- int prefixFlags,
- bool isRecursive);
+ bool seenConditionalJump;
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX FlowGraph XX
-XX XX
-XX Info about the basic-blocks, their contents and the flow analysis XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ static BOOL impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut);
+
+ void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult);
+
+ // STATIC inlining decision based on the IL code.
+ void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
+ CORINFO_METHOD_INFO* methInfo,
+ bool forceInline,
+ InlineResult* inlineResult);
+
+ void impCheckCanInline(GenTreePtr call,
+ CORINFO_METHOD_HANDLE fncHandle,
+ unsigned methAttr,
+ CORINFO_CONTEXT_HANDLE exactContextHnd,
+ InlineCandidateInfo** ppInlineCandidateInfo,
+ InlineResult* inlineResult);
+
+ void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
+ GenTreePtr curArgVal,
+ unsigned argNum,
+ InlineResult* inlineResult);
+
+ void impInlineInitVars(InlineInfo* pInlineInfo);
+
+ unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason));
+
+ GenTreePtr impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo);
+
+ BOOL impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo);
+
+ BOOL impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
+ GenTreePtr variableBeingDereferenced,
+ InlArgInfo* inlArgInfo);
+ void impMarkInlineCandidate(GenTreePtr call, CORINFO_CONTEXT_HANDLE exactContextHnd, CORINFO_CALL_INFO* callInfo);
-public :
+ bool impTailCallRetTypeCompatible(var_types callerRetType,
+ CORINFO_CLASS_HANDLE callerRetTypeClass,
+ var_types calleeRetType,
+ CORINFO_CLASS_HANDLE calleeRetTypeClass);
- BasicBlock * fgFirstBB; // Beginning of the basic block list
- BasicBlock * fgLastBB; // End of the basic block list
- BasicBlock * fgFirstColdBlock; // First block to be placed in the cold section
+ bool impIsTailCallILPattern(bool tailPrefixed,
+ OPCODE curOpcode,
+ const BYTE* codeAddrOfNextOpcode,
+ const BYTE* codeEnd,
+ bool isRecursive,
+ bool* IsCallPopRet = nullptr);
+
+ bool impIsImplicitTailCallCandidate(
+ OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive);
+
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX FlowGraph XX
+ XX XX
+ XX Info about the basic-blocks, their contents and the flow analysis XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+
+public:
+ BasicBlock* fgFirstBB; // Beginning of the basic block list
+ BasicBlock* fgLastBB; // End of the basic block list
+ BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
#if FEATURE_EH_FUNCLETS
- BasicBlock * fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
+ BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
- BasicBlock * fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been created.
- BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
- unsigned fgEdgeCount; // # of control flow edges between the BBs
- unsigned fgBBcount; // # of BBs in the method
+ BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been
+ // created.
+ BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
+ unsigned fgEdgeCount; // # of control flow edges between the BBs
+ unsigned fgBBcount; // # of BBs in the method
#ifdef DEBUG
- unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
+ unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
#endif
- unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
- unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
- BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute dominance. Indexed by block number. Size: fgBBNumMax + 1.
+ unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
+ unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
+ BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute
+ // dominance. Indexed by block number. Size: fgBBNumMax + 1.
// After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute
// dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and
@@ -3270,18 +3232,17 @@ public :
// starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely
// to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array
// index). The arrays are of size fgBBNumMax + 1.
- unsigned * fgDomTreePreOrder;
- unsigned * fgDomTreePostOrder;
-
- bool fgBBVarSetsInited;
+ unsigned* fgDomTreePreOrder;
+ unsigned* fgDomTreePostOrder;
+ bool fgBBVarSetsInited;
// Allocate array like T* a = new T[fgBBNumMax + 1];
// Using helper so we don't keep forgetting +1.
template <typename T>
- T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
+ T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
- return (T*) compGetMem((fgBBNumMax + 1) * sizeof(T), cmk);
+ return (T*)compGetMem((fgBBNumMax + 1) * sizeof(T), cmk);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
@@ -3291,9 +3252,9 @@ public :
// participate in a block set until the blocks are all renumbered, causing the epoch
// to change. This is useful if continuing to use previous block sets is valuable.
// If the epoch is zero, then it is uninitialized, and block sets can't be used.
- unsigned fgCurBBEpoch;
+ unsigned fgCurBBEpoch;
- unsigned GetCurBasicBlockEpoch()
+ unsigned GetCurBasicBlockEpoch()
{
return fgCurBBEpoch;
}
@@ -3301,48 +3262,45 @@ public :
// The number of basic blocks in the current epoch. When the blocks are renumbered,
// this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains
// the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered.
- unsigned fgCurBBEpochSize;
+ unsigned fgCurBBEpochSize;
// The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize
// bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called.
- unsigned fgBBSetCountInSizeTUnits;
+ unsigned fgBBSetCountInSizeTUnits;
- void NewBasicBlockEpoch()
+ void NewBasicBlockEpoch()
{
INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits);
// We have a new epoch. Compute and cache the size needed for new BlockSets.
fgCurBBEpoch++;
fgCurBBEpochSize = fgBBNumMax + 1;
- fgBBSetCountInSizeTUnits = unsigned(roundUp(fgCurBBEpochSize, sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
+ fgBBSetCountInSizeTUnits =
+ unsigned(roundUp(fgCurBBEpochSize, sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
// All BlockSet objects are now invalid!
- fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
- fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
+ fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
+ fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
if (verbose)
{
unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t));
printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)",
- fgCurBBEpoch,
- fgCurBBEpochSize,
- epochArrSize,
- (epochArrSize <= 1) ? "short" : "long");
+ fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long");
if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1)))
{
// If we're not just establishing the first epoch, and the epoch array size has changed such that we're
// going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an
// array of size_t bitsets), then print that out.
- printf("; NOTE: BlockSet size was previously %s!",
- (oldEpochArrSize <= 1) ? "short" : "long");
+ printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long");
}
printf("\n");
}
#endif // DEBUG
}
- void EnsureBasicBlockEpoch()
+ void EnsureBasicBlockEpoch()
{
if (fgCurBBEpochSize != fgBBNumMax + 1)
{
@@ -3350,71 +3308,65 @@ public :
}
}
+ BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind);
+ void fgEnsureFirstBBisScratch();
+ bool fgFirstBBisScratch();
+ bool fgBBisScratch(BasicBlock* block);
- BasicBlock * fgNewBasicBlock (BBjumpKinds jumpKind);
- void fgEnsureFirstBBisScratch();
- bool fgFirstBBisScratch();
- bool fgBBisScratch(BasicBlock* block);
+ void fgExtendEHRegionBefore(BasicBlock* block);
+ void fgExtendEHRegionAfter(BasicBlock* block);
- void fgExtendEHRegionBefore (BasicBlock* block);
- void fgExtendEHRegionAfter (BasicBlock* block);
+ BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
- BasicBlock* fgNewBBbefore (BBjumpKinds jumpKind,
- BasicBlock * block,
- bool extendRegion);
+ BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
- BasicBlock* fgNewBBafter (BBjumpKinds jumpKind,
- BasicBlock * block,
- bool extendRegion);
+ BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
+ unsigned tryIndex,
+ unsigned hndIndex,
+ BasicBlock* nearBlk,
+ bool putInFilter = false,
+ bool runRarely = false,
+ bool insertAtEnd = false);
- BasicBlock * fgNewBBinRegion (BBjumpKinds jumpKind,
- unsigned tryIndex,
- unsigned hndIndex,
- BasicBlock * nearBlk,
- bool putInFilter = false,
- bool runRarely = false,
- bool insertAtEnd = false);
+ BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
+ BasicBlock* srcBlk,
+ bool runRarely = false,
+ bool insertAtEnd = false);
- BasicBlock * fgNewBBinRegion (BBjumpKinds jumpKind,
- BasicBlock * srcBlk,
- bool runRarely = false,
- bool insertAtEnd = false);
+ BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind);
- BasicBlock* fgNewBBinRegion (BBjumpKinds jumpKind);
+ BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind,
+ BasicBlock* afterBlk,
+ unsigned xcptnIndex,
+ bool putInTryRegion);
- BasicBlock * fgNewBBinRegionWorker(BBjumpKinds jumpKind,
- BasicBlock * afterBlk,
- unsigned xcptnIndex,
- bool putInTryRegion);
+ void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
+ void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
+ void fgUnlinkBlock(BasicBlock* block);
- void fgInsertBBbefore (BasicBlock * insertBeforeBlk,
- BasicBlock * newBlk);
- void fgInsertBBafter (BasicBlock * insertAfterBlk,
- BasicBlock * newBlk);
- void fgUnlinkBlock (BasicBlock * block);
-
-#if OPT_BOOL_OPS // Used to detect multiple logical "not" assignments.
- bool fgMultipleNots;
+#if OPT_BOOL_OPS // Used to detect multiple logical "not" assignments.
+ bool fgMultipleNots;
#endif
- bool fgModified; // True if the flow graph has been modified recently
- bool fgComputePredsDone; // Have we computed the bbPreds list
- bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
- bool fgDomsComputed; // Have we computed the dominator sets?
+ bool fgModified; // True if the flow graph has been modified recently
+ bool fgComputePredsDone; // Have we computed the bbPreds list
+ bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
+ bool fgDomsComputed; // Have we computed the dominator sets?
- bool fgHasSwitch; // any BBJ_SWITCH jumps?
- bool fgHasPostfix; // any postfix ++/-- found?
- unsigned fgIncrCount; // number of increment nodes found
+ bool fgHasSwitch; // any BBJ_SWITCH jumps?
+ bool fgHasPostfix; // any postfix ++/-- found?
+ unsigned fgIncrCount; // number of increment nodes found
- BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler begin blocks.
+ BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
+ // begin blocks.
#ifdef DEBUG
- bool fgReachabilitySetsValid; // Are the bbReach sets valid?
- bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
-#endif // DEBUG
+ bool fgReachabilitySetsValid; // Are the bbReach sets valid?
+ bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
+#endif // DEBUG
- bool fgRemoveRestOfBlock; // true if we know that we will throw
- bool fgStmtRemoved; // true if we remove statements -> need new DFA
+ bool fgRemoveRestOfBlock; // true if we know that we will throw
+ bool fgStmtRemoved; // true if we remove statements -> need new DFA
// There are two modes for ordering of the trees.
// - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in
@@ -3427,206 +3379,200 @@ public :
// statement DO NOT belong to one of the embedded trees). It is possible that we will want
// to relax this requirement, but it makes it easier to validate the order.
- enum FlowGraphOrder { FGOrderTree, FGOrderLinear };
- FlowGraphOrder fgOrder;
+ enum FlowGraphOrder
+ {
+ FGOrderTree,
+ FGOrderLinear
+ };
+ FlowGraphOrder fgOrder;
// The following are boolean flags that keep track of the state of internal data structures
- bool fgStmtListThreaded;
- bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
- bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
- bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
- bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
- bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
- bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph
- BasicBlock::weight_t fgCalledWeight; // count of the number of times this method was called
- // This is derived from the profile data
- // or is BB_UNITY_WEIGHT when we don't have profile data
+ bool fgStmtListThreaded;
+ bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
+ bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
+ bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
+ bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
+ bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
+ bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph
+ BasicBlock::weight_t fgCalledWeight; // count of the number of times this method was called
+ // This is derived from the profile data
+ // or is BB_UNITY_WEIGHT when we don't have profile data
#if FEATURE_EH_FUNCLETS
- bool fgFuncletsCreated; // true if the funclet creation phase has been run
-#endif // FEATURE_EH_FUNCLETS
+ bool fgFuncletsCreated; // true if the funclet creation phase has been run
+#endif // FEATURE_EH_FUNCLETS
- bool fgGlobalMorph; // indicates if we are during the global morphing phase
- // since fgMorphTree can be called from several places
- bool fgExpandInline; // indicates that we are creating tree for the inliner
+ bool fgGlobalMorph; // indicates if we are during the global morphing phase
+ // since fgMorphTree can be called from several places
+ bool fgExpandInline; // indicates that we are creating tree for the inliner
- bool impBoxTempInUse; // the temp below is valid and available
- unsigned impBoxTemp; // a temporary that is used for boxing
+ bool impBoxTempInUse; // the temp below is valid and available
+ unsigned impBoxTemp; // a temporary that is used for boxing
#ifdef DEBUG
- bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
- // and we are trying to compile again in a "safer", minopts mode?
+ bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
+ // and we are trying to compile again in a "safer", minopts mode?
#endif
#if defined(DEBUG)
- unsigned impInlinedCodeSize;
+ unsigned impInlinedCodeSize;
#endif
//-------------------------------------------------------------------------
- void fgInit ();
+ void fgInit();
+
+ void fgImport();
- void fgImport ();
+ void fgInline();
- void fgInline ();
-
- GenTreePtr fgGetCritSectOfStaticMethod();
+ GenTreePtr fgGetCritSectOfStaticMethod();
#if !defined(_TARGET_X86_)
- void fgAddSyncMethodEnterExit();
+ void fgAddSyncMethodEnterExit();
- GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
+ GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
- void fgConvertSyncReturnToLeave(BasicBlock* block);
+ void fgConvertSyncReturnToLeave(BasicBlock* block);
#endif // !_TARGET_X86_
- void fgAddReversePInvokeEnterExit();
+ void fgAddReversePInvokeEnterExit();
- bool fgMoreThanOneReturnBlock();
+ bool fgMoreThanOneReturnBlock();
// The number of separate return points in the method.
- unsigned fgReturnCount;
+ unsigned fgReturnCount;
- void fgAddInternal ();
+ void fgAddInternal();
- bool fgFoldConditional (BasicBlock * block);
+ bool fgFoldConditional(BasicBlock* block);
- void fgMorphStmts (BasicBlock * block,
- bool * mult, bool * lnot, bool * loadw);
- void fgMorphBlocks ();
+ void fgMorphStmts(BasicBlock* block, bool* mult, bool* lnot, bool* loadw);
+ void fgMorphBlocks();
- bool fgMorphBlockStmt (BasicBlock * block,
- GenTreePtr stmt
- DEBUGARG(const char * msg) );
+ bool fgMorphBlockStmt(BasicBlock* block, GenTreePtr stmt DEBUGARG(const char* msg));
- void fgSetOptions ();
+ void fgSetOptions();
#ifdef DEBUG
- static fgWalkPreFn fgAssertNoQmark;
- void fgPreExpandQmarkChecks (GenTreePtr expr);
- void fgPostExpandQmarkChecks ();
- static void fgCheckQmarkAllowedForm (GenTreePtr tree);
+ static fgWalkPreFn fgAssertNoQmark;
+ void fgPreExpandQmarkChecks(GenTreePtr expr);
+ void fgPostExpandQmarkChecks();
+ static void fgCheckQmarkAllowedForm(GenTreePtr tree);
#endif
- IL_OFFSET fgFindBlockILOffset (BasicBlock* block);
+ IL_OFFSET fgFindBlockILOffset(BasicBlock* block);
- BasicBlock* fgSplitBlockAtBeginning (BasicBlock* curr);
- BasicBlock* fgSplitBlockAtEnd (BasicBlock* curr);
- BasicBlock* fgSplitBlockAfterStatement (BasicBlock* curr, GenTree *stmt);
- BasicBlock* fgSplitEdge (BasicBlock* curr, BasicBlock* succ);
+ BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr);
+ BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr);
+ BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, GenTree* stmt);
+ BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ);
- GenTreeStmt* fgNewStmtFromTree (GenTreePtr tree, BasicBlock* block,
- IL_OFFSETX offs);
- GenTreeStmt* fgNewStmtFromTree (GenTreePtr tree);
- GenTreeStmt* fgNewStmtFromTree (GenTreePtr tree, BasicBlock* block);
- GenTreeStmt* fgNewStmtFromTree (GenTreePtr tree, IL_OFFSETX offs);
+ GenTreeStmt* fgNewStmtFromTree(GenTreePtr tree, BasicBlock* block, IL_OFFSETX offs);
+ GenTreeStmt* fgNewStmtFromTree(GenTreePtr tree);
+ GenTreeStmt* fgNewStmtFromTree(GenTreePtr tree, BasicBlock* block);
+ GenTreeStmt* fgNewStmtFromTree(GenTreePtr tree, IL_OFFSETX offs);
- GenTreePtr fgGetLastTopLevelStmt(BasicBlock *block);
+ GenTreePtr fgGetLastTopLevelStmt(BasicBlock* block);
- GenTreePtr fgGetTopLevelQmark (GenTreePtr expr, GenTreePtr* ppDst = NULL);
- void fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt);
- void fgExpandQmarkStmt (BasicBlock* block, GenTreePtr expr);
- void fgExpandQmarkNodes ();
+ GenTreePtr fgGetTopLevelQmark(GenTreePtr expr, GenTreePtr* ppDst = nullptr);
+ void fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt);
+ void fgExpandQmarkStmt(BasicBlock* block, GenTreePtr expr);
+ void fgExpandQmarkNodes();
- void fgMorph ();
+ void fgMorph();
- // Do "simple lowering." This functionality is (conceptually) part of "general"
+ // Do "simple lowering." This functionality is (conceptually) part of "general"
// lowering that is distributed between fgMorph and the lowering phase of LSRA.
- void fgSimpleLowering();
+ void fgSimpleLowering();
- bool fgShouldCreateAssignOp(GenTreePtr tree, bool *bReverse);
+ bool fgShouldCreateAssignOp(GenTreePtr tree, bool* bReverse);
- GenTreePtr fgInitThisClass ();
+ GenTreePtr fgInitThisClass();
- GenTreePtr fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls,
- CorInfoHelpFunc helper);
+ GenTreePtr fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper);
- GenTreePtr fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
+ GenTreePtr fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
- void fgLocalVarLiveness();
+ void fgLocalVarLiveness();
- void fgLocalVarLivenessInit();
+ void fgLocalVarLivenessInit();
#ifdef LEGACY_BACKEND
- GenTreePtr fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode,
- GenTreePtr relopNode,
- GenTreePtr asgdLclVar);
+ GenTreePtr fgLegacyPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr relopNode, GenTreePtr asgdLclVar);
#else
- void fgPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr asgdLclVar);
+ void fgPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr asgdLclVar);
#endif
- void fgPerBlockLocalVarLiveness();
+ void fgPerBlockLocalVarLiveness();
- VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock *block);
+ VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block);
- void fgLiveVarAnalysis (bool updateInternalOnly = false);
+ void fgLiveVarAnalysis(bool updateInternalOnly = false);
// This is used in the liveness computation, as a temporary. When we use the
// arbitrary-length VarSet representation, it is better not to allocate a new one
// at each call.
VARSET_TP fgMarkIntfUnionVS;
- bool fgMarkIntf (VARSET_VALARG_TP varSet);
+ bool fgMarkIntf(VARSET_VALARG_TP varSet);
- bool fgMarkIntf (VARSET_VALARG_TP varSet1,
- VARSET_VALARG_TP varSet2);
+ bool fgMarkIntf(VARSET_VALARG_TP varSet1, VARSET_VALARG_TP varSet2);
- void fgUpdateRefCntForClone(BasicBlock* addedToBlock,
- GenTreePtr clonedTree);
+ void fgUpdateRefCntForClone(BasicBlock* addedToBlock, GenTreePtr clonedTree);
- void fgUpdateRefCntForExtract(GenTreePtr wholeTree,
- GenTreePtr keptTree);
+ void fgUpdateRefCntForExtract(GenTreePtr wholeTree, GenTreePtr keptTree);
- void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
+ void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
- bool fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, GenTree* lclVarNode, GenTree* node);
+ bool fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, GenTree* lclVarNode, GenTree* node);
- VARSET_VALRET_TP fgComputeLife (VARSET_VALARG_TP life,
- GenTreePtr startNode,
- GenTreePtr endNode,
- VARSET_VALARG_TP volatileVars,
- bool* pStmtInfoDirty
- DEBUGARG( bool * treeModf));
+ VARSET_VALRET_TP fgComputeLife(VARSET_VALARG_TP life,
+ GenTreePtr startNode,
+ GenTreePtr endNode,
+ VARSET_VALARG_TP volatileVars,
+ bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
- bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool* doAgain, bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
+ bool fgRemoveDeadStore(GenTree** pTree,
+ LclVarDsc* varDsc,
+ VARSET_TP life,
+ bool* doAgain,
+ bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
// For updating liveset during traversal AFTER fgComputeLife has completed
- VARSET_VALRET_TP fgGetVarBits (GenTreePtr tree);
- VARSET_VALRET_TP fgUpdateLiveSet (VARSET_VALARG_TP liveSet,
- GenTreePtr tree);
+ VARSET_VALRET_TP fgGetVarBits(GenTreePtr tree);
+ VARSET_VALRET_TP fgUpdateLiveSet(VARSET_VALARG_TP liveSet, GenTreePtr tree);
// Returns the set of live variables after endTree,
// assuming that liveSet is the set of live variables BEFORE tree.
// Requires that fgComputeLife has completed, and that tree is in the same
// statement as endTree, and that it comes before endTree in execution order
- VARSET_VALRET_TP fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
- GenTreePtr tree,
- GenTreePtr endTree)
+ VARSET_VALRET_TP fgUpdateLiveSet(VARSET_VALARG_TP liveSet, GenTreePtr tree, GenTreePtr endTree)
{
VARSET_TP VARSET_INIT(this, newLiveSet, liveSet);
- while (tree != NULL && tree != endTree->gtNext)
+ while (tree != nullptr && tree != endTree->gtNext)
{
VarSetOps::AssignNoCopy(this, newLiveSet, fgUpdateLiveSet(newLiveSet, tree));
tree = tree->gtNext;
}
- assert (tree == endTree->gtNext);
+ assert(tree == endTree->gtNext);
return newLiveSet;
}
- void fgInterBlockLocalVarLiveness();
+ void fgInterBlockLocalVarLiveness();
// The presence of "x op= y" operations presents some difficulties for SSA: this is both a use of some SSA name of
// "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, unsigned, JitSimplerHashBehavior> NodeToUnsignedMap;
- NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
- NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
+ NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
+ NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
- if (m_opAsgnVarDefSsaNums == NULL)
+ if (m_opAsgnVarDefSsaNums == nullptr)
{
m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator());
}
@@ -3639,7 +3585,7 @@ public :
// VN.
inline ValueNum GetUseAsgDefVNOrTreeVN(GenTreePtr tree);
- // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
+ // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
// Except: assumes that lcl is a def, and if it is
// a def appearing in "lcl op= rhs" (GTF_VAR_USEASG), looks up and returns the SSA number for the "def",
// rather than the "use" SSA number recorded in the tree "lcl".
@@ -3659,78 +3605,81 @@ public :
// It would be a shame, given the simple form at the source level, to be unable to track the values in the
// fields of "s1" after this. But "s1" does not appear in the assignments that modify it. How, then, to
// give it SSA names and value numbers?
- //
+ //
// The solution is to use the side table described below to annotate each of the field-wise assignments at the
// end with an instance of the structure below, whose fields are described in the declaration.
struct IndirectAssignmentAnnotation
{
- unsigned m_lclNum; // The local num that is being indirectly assigned.
- FieldSeqNode* m_fieldSeq; // If the LHS of the struct assignment is itself a struct field dereference,
- // as in "s0.g = s2", then "m_lclNum" would be "s0", and "m_fieldSeq" would
- // be the singleton field sequence "g". The individual assignments would
- // further append the fields of "s.g" to that.
- bool m_isEntire; // True iff this assignment writes all of m_lclNum. (This can occur if the
- // structure has a single field).
- unsigned m_defSsaNum; // The new SSA number of "m_lclNum" after the assignment.
- unsigned m_useSsaNum; // Only valid if "m_isEntire" is false; if so, the SSA number of "m_lclNum" before the
- // assignment.
-
- IndirectAssignmentAnnotation(unsigned lclNum,
+ unsigned m_lclNum; // The local num that is being indirectly assigned.
+ FieldSeqNode* m_fieldSeq; // If the LHS of the struct assignment is itself a struct field dereference,
+ // as in "s0.g = s2", then "m_lclNum" would be "s0", and "m_fieldSeq" would
+ // be the singleton field sequence "g". The individual assignments would
+ // further append the fields of "s.g" to that.
+ bool m_isEntire; // True iff this assignment writes all of m_lclNum. (This can occur if the
+ // structure has a single field).
+ unsigned m_defSsaNum; // The new SSA number of "m_lclNum" after the assignment.
+ unsigned m_useSsaNum; // Only valid if "m_isEntire" is false; if so, the SSA number of "m_lclNum" before the
+ // assignment.
+
+ IndirectAssignmentAnnotation(unsigned lclNum,
FieldSeqNode* fldSeq,
- bool isEntire,
- unsigned defSsaNum = SsaConfig::RESERVED_SSA_NUM,
- unsigned useSsaNum = SsaConfig::RESERVED_SSA_NUM)
+ bool isEntire,
+ unsigned defSsaNum = SsaConfig::RESERVED_SSA_NUM,
+ unsigned useSsaNum = SsaConfig::RESERVED_SSA_NUM)
: m_lclNum(lclNum), m_fieldSeq(fldSeq), m_isEntire(isEntire), m_defSsaNum(defSsaNum), m_useSsaNum(useSsaNum)
- {}
-
+ {
+ }
};
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IndirectAssignmentAnnotation*, JitSimplerHashBehavior> NodeToIndirAssignMap;
+ typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IndirectAssignmentAnnotation*, JitSimplerHashBehavior>
+ NodeToIndirAssignMap;
NodeToIndirAssignMap* m_indirAssignMap;
NodeToIndirAssignMap* GetIndirAssignMap()
{
- if (m_indirAssignMap == NULL)
+ if (m_indirAssignMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_IndirAssignMap, and use that for allocation.
IAllocator* ialloc = new (this, CMK_IndirAssignMap) CompAllocator(this, CMK_IndirAssignMap);
- m_indirAssignMap = new (ialloc) NodeToIndirAssignMap(ialloc);
+ m_indirAssignMap = new (ialloc) NodeToIndirAssignMap(ialloc);
}
return m_indirAssignMap;
}
-
// Performs SSA conversion.
- void fgSsaBuild();
+ void fgSsaBuild();
// Reset any data structures to the state expected by "fgSsaBuild", so it can be run again.
- void fgResetForSsa();
+ void fgResetForSsa();
- unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
+ unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
// Returns "true" iff lcl "lclNum" should be excluded from SSA.
- inline bool fgExcludeFromSsa(unsigned lclNum);
+ inline bool fgExcludeFromSsa(unsigned lclNum);
// The value numbers for this compilation.
- ValueNumStore* vnStore;
- public:
+ ValueNumStore* vnStore;
- ValueNumStore* GetValueNumStore() { return vnStore; }
+public:
+ ValueNumStore* GetValueNumStore()
+ {
+ return vnStore;
+ }
// Do value numbering (assign a value number to each
// tree node).
- void fgValueNumber();
+ void fgValueNumber();
// Updates "fgCurHeap" via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
- // The 'indType' is the indirection type of the lhs of the assignment and will typically
- // match the element type of the array or fldSeq. When this type doesn't match
+ // The 'indType' is the indirection type of the lhs of the assignment and will typically
+ // match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
- void fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
- ValueNum arrVN,
- ValueNum inxVN,
- FieldSeqNode* fldSeq,
- ValueNum rhsVN,
- var_types indType);
+ void fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
+ ValueNum arrVN,
+ ValueNum inxVN,
+ FieldSeqNode* fldSeq,
+ ValueNum rhsVN,
+ var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
@@ -3738,25 +3687,23 @@ public :
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique
// VN for the conservative VN.) Also marks the tree's argument as the address of an array element.
- // The type tree->TypeGet() will typically match the element type of the array or fldSeq.
+ // The type tree->TypeGet() will typically match the element type of the array or fldSeq.
// When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN
//
- ValueNum fgValueNumberArrIndexVal(GenTreePtr tree,
- CORINFO_CLASS_HANDLE elemTypeEq,
- ValueNum arrVN,
- ValueNum inxVN,
- ValueNum excVN,
- FieldSeqNode* fldSeq);
+ ValueNum fgValueNumberArrIndexVal(GenTreePtr tree,
+ CORINFO_CLASS_HANDLE elemTypeEq,
+ ValueNum arrVN,
+ ValueNum inxVN,
+ ValueNum excVN,
+ FieldSeqNode* fldSeq);
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvn" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
// dereferencing the array in the current heap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
- ValueNum fgValueNumberArrIndexVal(GenTreePtr tree,
- struct VNFuncApp* funcApp,
- ValueNum addrXvn);
+ ValueNum fgValueNumberArrIndexVal(GenTreePtr tree, struct VNFuncApp* funcApp, ValueNum addrXvn);
- unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
+ unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
@@ -3766,65 +3713,62 @@ public :
// If "false", then we may assume that all inputs to phi RHS's of such definitions
// have already been assigned value numbers; if they are all assigned the *same* value
// number, then the LHS SSA name gets the same VN.
- void fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis);
+ void fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
// assumed for the heap at the start "entryBlk".
- ValueNum fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned loopNum);
+ ValueNum fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned loopNum);
// Called when an operation (performed by "tree", described by "msg") may cause the global Heap to be mutated.
- void fgMutateHeap (GenTreePtr tree
- DEBUGARG(const char * msg) );
+ void fgMutateHeap(GenTreePtr tree DEBUGARG(const char* msg));
// Tree caused an update in the current heap VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
- void fgValueNumberRecordHeapSsa(GenTreePtr tree);
+ void fgValueNumberRecordHeapSsa(GenTreePtr tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
- void fgValueNumberTreeConst(GenTreePtr tree);
+ void fgValueNumberTreeConst(GenTreePtr tree);
// Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree.
// (With some exceptions: the VN of the lhs of an assignment is assigned as part of the
// assignment.)
// If "evalAsgLhsInd" is true, evaluate a GT_IND node, even if it's labeled as the LHS of
// an assignment.
- void fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd = false);
+ void fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd = false);
// Does value-numbering for a block assignment.
- void fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd);
+ void fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd);
- // Does value-numbering for a cast tree.
- void fgValueNumberCastTree(GenTreePtr tree);
+ // Does value-numbering for a cast tree.
+ void fgValueNumberCastTree(GenTreePtr tree);
- // Does value-numbering for an intrinsic tree.
- void fgValueNumberIntrinsic(GenTreePtr tree);
+ // Does value-numbering for an intrinsic tree.
+ void fgValueNumberIntrinsic(GenTreePtr tree);
// Does value-numbering for a call. We interpret some helper calls.
- void fgValueNumberCall(GenTreeCall* call);
+ void fgValueNumberCall(GenTreeCall* call);
// The VN of some nodes in "args" may have changed -- reassign VNs to the arg list nodes.
- void fgUpdateArgListVNs(GenTreeArgList* args);
+ void fgUpdateArgListVNs(GenTreeArgList* args);
// Does value-numbering for a helper "call" that has a VN function symbol "vnf".
- void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
+ void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
// Requires "helpCall" to be a helper call. Assigns it a value number;
// we understand the semantics of some of the calls. Returns "true" if
// the call may modify the heap (we assume arbitrary memory side effects if so).
- bool fgValueNumberHelperCall(GenTreeCall* helpCall);
-
+ bool fgValueNumberHelperCall(GenTreeCall* helpCall);
// Requires "helpFunc" to be pure. Returns the corresponding VNFunc.
- VNFunc fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc);
+ VNFunc fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc);
// This is the current value number for the "Heap" implicit variable while
// doing value numbering. This is the value number under the "liberal" interpretation
// of heap values; the "conservative" interpretation needs no VN, since every access of
// the heap yields an unknown value.
- ValueNum fgCurHeapVN;
-
+ ValueNum fgCurHeapVN;
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
@@ -3833,8 +3777,9 @@ public :
{
if (elemStructType != nullptr)
{
- assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp));
- assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
+ assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF ||
+ varTypeIsIntegral(elemTyp));
+ assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
return elemStructType;
}
else
@@ -3846,7 +3791,7 @@ public :
// If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the
// var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is
// the struct type of the element).
- static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
+ static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
{
size_t clsHndVal = size_t(clsHnd);
if (clsHndVal & 0x1)
@@ -3860,128 +3805,122 @@ public :
}
// Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types
- var_types getJitGCType(BYTE gcType);
-
- enum structPassingKind { SPK_Unknown, // Invalid value, never returned
- SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
- SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
- // for ARM64 and UNIX_X64 in multiple registers. (when all of the
- // parameters registers are used, then the stack will be used)
- // for X86 passed on the stack, for ARM32 passed in registers
- // or the stack or split between registers and the stack.
- SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
- SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer.
+ var_types getJitGCType(BYTE gcType);
+
+ enum structPassingKind
+ {
+ SPK_Unknown, // Invalid value, never returned
+ SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
+ SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
+ // for ARM64 and UNIX_X64 in multiple registers. (when all of the
+ // parameters registers are used, then the stack will be used)
+ // for X86 passed on the stack, for ARM32 passed in registers
+ // or the stack or split between registers and the stack.
+ SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
+ SPK_ByReference
+ }; // The struct is passed/returned by reference to a copy/buffer.
// Get the "primitive" type that is is used when we are given a struct of size 'structSize'.
// For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref.
// A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double
// If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
//
- var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd);
+ var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd);
// Get the type that is used to pass values of the given struct type.
// If you have already retrieved the struct size then pass it as the optional third argument
//
- var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbPassStruct,
- unsigned structSize = 0);
+ var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ structPassingKind* wbPassStruct,
+ unsigned structSize = 0);
// Get the type that is used to return values of the given struct type.
// If you have already retrieved the struct size then pass it as the optional third argument
//
- var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbPassStruct,
- unsigned structSize = 0);
-
+ var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ structPassingKind* wbPassStruct,
+ unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
// If "level" is non-zero, we also print out a partial expansion of the value.
void vnpPrint(ValueNumPair vnp, unsigned level);
- void vnPrint (ValueNum vn, unsigned level);
+ void vnPrint(ValueNum vn, unsigned level);
#endif
// Dominator computation member functions
// Not exposed outside Compiler
protected:
+ bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2
+
+ bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2
- bool fgDominate (BasicBlock *b1, BasicBlock *b2); // Return true if b1 dominates b2
+ void fgComputeDoms(); // Computes the immediate dominators for each basic block in the
+ // flow graph. We first assume the fields bbIDom on each
+ // basic block are invalid. This computation is needed later
+ // by fgBuildDomTree to build the dominance tree structure.
+ // Based on: A Simple, Fast Dominance Algorithm
+ // by Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy
- bool fgReachable (BasicBlock *b1, BasicBlock *b2); // Returns true if block b1 can reach block b2
+ BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block.
+ // Note: this is relatively slow compared to calling fgDominate(),
+ // especially if dealing with a single block versus block check.
- void fgComputeDoms (); // Computes the immediate dominators for each basic block in the
- // flow graph. We first assume the fields bbIDom on each
- // basic block are invalid. This computation is needed later
- // by fgBuildDomTree to build the dominance tree structure.
- // Based on: A Simple, Fast Dominance Algorithm
- // by Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy
+ void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
- BlockSet_ValRet_T fgGetDominatorSet (BasicBlock* block); // Returns a set of blocks that dominate the given block.
- // Note: this is relatively slow compared to calling fgDominate(),
- // especially if dealing with a single block versus block check.
+ void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
- void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
+ bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets.
- void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
+ void fgComputeReachability(); // Perform flow graph node reachability analysis.
- bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets.
+ BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets.
- void fgComputeReachability (); // Perform flow graph node reachability analysis.
+ void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
+ // processed in topological sort, this function takes care of that.
- BasicBlock * fgIntersectDom (BasicBlock *a, BasicBlock *b); // Intersect two immediate dominator sets.
+ void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count);
- void fgDfsInvPostOrder (); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
- // processed in topological sort, this function takes care of that.
+ BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph.
+ // Returns this as a set.
- void fgDfsInvPostOrderHelper (BasicBlock* block,
- BlockSet& visited,
- unsigned* count);
+ BlockSet_ValRet_T fgDomTreeEntryNodes(BasicBlockList** domTree); // Computes which nodes in the dominance forest are
+ // root nodes. Returns this as a set.
- BlockSet_ValRet_T fgDomFindStartNodes (); // Computes which basic blocks don't have incoming edges in the flow graph. Returns this as a set.
-
- BlockSet_ValRet_T fgDomTreeEntryNodes (BasicBlockList** domTree); // Computes which nodes in the dominance forest are root nodes. Returns this as a set.
-
#ifdef DEBUG
- void fgDispDomTree (BasicBlockList** domTree); // Helper that prints out the Dominator Tree in debug builds.
-#endif // DEBUG
+ void fgDispDomTree(BasicBlockList** domTree); // Helper that prints out the Dominator Tree in debug builds.
+#endif // DEBUG
- void fgBuildDomTree (); // Once we compute all the immediate dominator sets for each node in the flow graph
- // (performed by fgComputeDoms), this procedure builds the dominance tree represented
- // adjacency lists.
+ void fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph
+ // (performed by fgComputeDoms), this procedure builds the dominance tree represented
+ // adjacency lists.
// In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder
// traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B)
// && postOrder(A) >= postOrder(B) making the computation O(1).
- void fgTraverseDomTree (unsigned bbNum,
- BasicBlockList** domTree,
- unsigned* preNum,
- unsigned* postNum);
-
- // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, and dominators.
- void fgUpdateChangedFlowGraph();
+ void fgTraverseDomTree(unsigned bbNum, BasicBlockList** domTree, unsigned* preNum, unsigned* postNum);
-public:
+ // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, and
+ // dominators.
+ void fgUpdateChangedFlowGraph();
+public:
// Compute the predecessors of the blocks in the control flow graph.
- void fgComputePreds ();
+ void fgComputePreds();
// Remove all predecessor information.
- void fgRemovePreds ();
+ void fgRemovePreds();
// Compute the cheap flow graph predecessors lists. This is used in some early phases
// before the full predecessors lists are computed.
- void fgComputeCheapPreds();
+ void fgComputeCheapPreds();
private:
+ void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred);
- void fgAddCheapPred (BasicBlock* block,
- BasicBlock* blockPred);
-
- void fgRemoveCheapPred (BasicBlock* block,
- BasicBlock* blockPred);
+ void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred);
public:
-
enum GCPollType
{
GCPOLL_NONE,
@@ -3989,32 +3928,31 @@ public:
GCPOLL_INLINE
};
-
// Initialize the per-block variable sets (used for liveness analysis).
- void fgInitBlockVarSets();
+ void fgInitBlockVarSets();
- //true if we've gone through and created GC Poll calls.
- bool fgGCPollsCreated;
- void fgMarkGCPollBlocks();
- void fgCreateGCPolls();
- bool fgCreateGCPoll(GCPollType pollType, BasicBlock * block);
+ // true if we've gone through and created GC Poll calls.
+ bool fgGCPollsCreated;
+ void fgMarkGCPollBlocks();
+ void fgCreateGCPolls();
+ bool fgCreateGCPoll(GCPollType pollType, BasicBlock* block);
// Requires that "block" is a block that returns from
// a finally. Returns the number of successors (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
- unsigned fgNSuccsOfFinallyRet(BasicBlock * block);
+ unsigned fgNSuccsOfFinallyRet(BasicBlock* block);
// Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from
// a finally. Returns its "i"th successor (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
// Requires that "i" < fgNSuccsOfFinallyRet(block).
- BasicBlock * fgSuccOfFinallyRet(BasicBlock * block, unsigned i);
+ BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i);
- private:
+private:
// Factor out common portions of the impls of the methods above.
- void fgSuccOfFinallyRetWork(BasicBlock * block, unsigned i, BasicBlock ** bres, unsigned * nres);
- public:
+ void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres);
+public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
// SwitchUniqueSuccSet contains the non-duplicated switch targets.
@@ -4024,8 +3962,9 @@ public:
// we leave the entry associated with the block, but it will no longer be accessed.)
struct SwitchUniqueSuccSet
{
- unsigned numDistinctSuccs; // Number of distinct targets of the switch.
- BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target successors.
+ unsigned numDistinctSuccs; // Number of distinct targets of the switch.
+ BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
+ // successors.
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
@@ -4033,16 +3972,18 @@ public:
void UpdateTarget(IAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
- typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet, JitSimplerHashBehavior> BlockToSwitchDescMap;
+ typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet, JitSimplerHashBehavior>
+ BlockToSwitchDescMap;
- private:
+private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
// iteration over only the distinct successors.
BlockToSwitchDescMap* m_switchDescMap;
- public:
+
+public:
BlockToSwitchDescMap* GetSwitchDescMap()
{
- if (m_switchDescMap == NULL)
+ if (m_switchDescMap == nullptr)
{
m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator());
}
@@ -4069,370 +4010,329 @@ public:
// Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap.
void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk);
- BasicBlock * fgFirstBlockOfHandler(BasicBlock* block);
+ BasicBlock* fgFirstBlockOfHandler(BasicBlock* block);
+
+ flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred);
- flowList * fgGetPredForBlock (BasicBlock* block,
- BasicBlock* blockPred);
+ flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred);
- flowList* fgGetPredForBlock (BasicBlock* block,
- BasicBlock* blockPred,
- flowList*** ptrToPred);
+ flowList* fgSpliceOutPred(BasicBlock* block, BasicBlock* blockPred);
- flowList * fgSpliceOutPred (BasicBlock* block,
- BasicBlock* blockPred);
+ flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
- flowList * fgRemoveRefPred (BasicBlock* block,
- BasicBlock* blockPred);
+ flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
- flowList* fgRemoveAllRefPreds(BasicBlock* block,
- BasicBlock* blockPred);
+ flowList* fgRemoveAllRefPreds(BasicBlock* block, flowList** ptrToPred);
- flowList* fgRemoveAllRefPreds(BasicBlock* block,
- flowList** ptrToPred);
+ void fgRemoveBlockAsPred(BasicBlock* block);
- void fgRemoveBlockAsPred(BasicBlock* block);
+ void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock);
- void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock,
- BasicBlock* newSwitchBlock);
+ void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget);
- void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch,
- BasicBlock* newTarget,
- BasicBlock* oldTarget);
+ void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget);
- void fgReplaceJumpTarget(BasicBlock* block,
- BasicBlock* newTarget,
- BasicBlock* oldTarget);
+ void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
- void fgReplacePred (BasicBlock* block,
- BasicBlock* oldPred,
- BasicBlock* newPred);
+ flowList* fgAddRefPred(BasicBlock* block,
+ BasicBlock* blockPred,
+ flowList* oldEdge = nullptr,
+ bool initializingPreds = false); // Only set to 'true' when we are computing preds in
+ // fgComputePreds()
- flowList * fgAddRefPred (BasicBlock* block,
- BasicBlock* blockPred,
- flowList* oldEdge = NULL,
- bool initializingPreds = false); // Only set to 'true' when we are computing preds in fgComputePreds()
+ void fgFindBasicBlocks();
- void fgFindBasicBlocks ();
+ bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt);
- bool fgIsBetterFallThrough(BasicBlock * bCur, BasicBlock * bAlt);
+ bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
- bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
+ BasicBlock* fgFindInsertPoint(unsigned regionIndex,
+ bool putInTryRegion,
+ BasicBlock* startBlk,
+ BasicBlock* endBlk,
+ BasicBlock* nearBlk,
+ BasicBlock* jumpBlk,
+ bool runRarely);
- BasicBlock * fgFindInsertPoint (unsigned regionIndex,
- bool putInTryRegion,
- BasicBlock * startBlk,
- BasicBlock * endBlk,
- BasicBlock * nearBlk,
- BasicBlock * jumpBlk,
- bool runRarely);
+ unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr);
- unsigned fgGetNestingLevel (BasicBlock * block,
- unsigned * pFinallyNesting = NULL);
+ void fgRemoveEmptyBlocks();
- void fgRemoveEmptyBlocks();
+ void fgRemoveLinearOrderDependencies(GenTreePtr stmt);
- void fgRemoveLinearOrderDependencies(GenTreePtr stmt);
+ void fgRemoveStmt(BasicBlock* block, GenTreePtr stmt, bool updateRefCnt = true);
- void fgRemoveStmt (BasicBlock * block,
- GenTreePtr stmt,
- bool updateRefCnt = true);
+ bool fgCheckRemoveStmt(BasicBlock* block, GenTreePtr stmt);
- bool fgCheckRemoveStmt (BasicBlock * block,
- GenTreePtr stmt);
+ void fgCreateLoopPreHeader(unsigned lnum);
- void fgCreateLoopPreHeader(unsigned lnum);
+ void fgUnreachableBlock(BasicBlock* block);
- void fgUnreachableBlock(BasicBlock * block);
+ void fgRemoveJTrue(BasicBlock* block);
- void fgRemoveJTrue (BasicBlock * block);
+ BasicBlock* fgLastBBInMainFunction();
- BasicBlock * fgLastBBInMainFunction();
+ BasicBlock* fgEndBBAfterMainFunction();
- BasicBlock * fgEndBBAfterMainFunction();
+ void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd);
- void fgUnlinkRange (BasicBlock * bBeg,
- BasicBlock * bEnd);
+ void fgRemoveBlock(BasicBlock* block, bool unreachable);
- void fgRemoveBlock (BasicBlock * block,
- bool unreachable);
+ bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext);
- bool fgCanCompactBlocks(BasicBlock * block,
- BasicBlock * bNext);
+ void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext);
- void fgCompactBlocks (BasicBlock * block,
- BasicBlock * bNext);
+ void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext);
- void fgUpdateLoopsAfterCompacting(BasicBlock * block, BasicBlock* bNext);
+ BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst);
- BasicBlock * fgConnectFallThrough(BasicBlock * bSrc,
- BasicBlock * bDst);
+ bool fgRenumberBlocks();
- bool fgRenumberBlocks();
+ bool fgExpandRarelyRunBlocks();
- bool fgExpandRarelyRunBlocks();
-
- bool fgEhAllowsMoveBlock(BasicBlock * bBefore, BasicBlock * bAfter);
+ bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter);
- void fgMoveBlocksAfter(BasicBlock * bStart, BasicBlock * bEnd, BasicBlock * insertAfterBlk);
+ void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk);
enum FG_RELOCATE_TYPE
{
- FG_RELOCATE_TRY, // relocate the 'try' region
- FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
+ FG_RELOCATE_TRY, // relocate the 'try' region
+ FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
};
- BasicBlock * fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
+ BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
#if FEATURE_EH_FUNCLETS
#if defined(_TARGET_ARM_)
- void fgClearFinallyTargetBit(BasicBlock * block);
+ void fgClearFinallyTargetBit(BasicBlock* block);
#endif // defined(_TARGET_ARM_)
- bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
- bool fgAnyIntraHandlerPreds(BasicBlock* block);
- void fgInsertFuncletPrologBlock(BasicBlock* block);
- void fgCreateFuncletPrologBlocks();
- void fgCreateFunclets();
-#else // !FEATURE_EH_FUNCLETS
- bool fgRelocateEHRegions();
+ bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
+ bool fgAnyIntraHandlerPreds(BasicBlock* block);
+ void fgInsertFuncletPrologBlock(BasicBlock* block);
+ void fgCreateFuncletPrologBlocks();
+ void fgCreateFunclets();
+#else // !FEATURE_EH_FUNCLETS
+ bool fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
- bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
+ bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
- bool fgBlockEndFavorsTailDuplication(BasicBlock *block);
+ bool fgBlockEndFavorsTailDuplication(BasicBlock* block);
- bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock *block);
+ bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block);
- bool fgOptimizeFallthroughTailDup(BasicBlock* block, BasicBlock* target);
+ bool fgOptimizeFallthroughTailDup(BasicBlock* block, BasicBlock* target);
- bool fgOptimizeEmptyBlock(BasicBlock* block);
+ bool fgOptimizeEmptyBlock(BasicBlock* block);
- bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
+ bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
- bool fgOptimizeBranch(BasicBlock * bJump);
+ bool fgOptimizeBranch(BasicBlock* bJump);
- bool fgOptimizeSwitchBranches(BasicBlock * block);
+ bool fgOptimizeSwitchBranches(BasicBlock* block);
- bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
+ bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
- bool fgOptimizeSwitchJumps();
+ bool fgOptimizeSwitchJumps();
#ifdef DEBUG
- void fgPrintEdgeWeights();
+ void fgPrintEdgeWeights();
#endif
- void fgComputeEdgeWeights();
+ void fgComputeEdgeWeights();
- void fgReorderBlocks ();
+ void fgReorderBlocks();
- void fgDetermineFirstColdBlock();
+ void fgDetermineFirstColdBlock();
- bool fgIsForwardBranch (BasicBlock * bJump, BasicBlock * bSrc = NULL);
+ bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr);
- bool fgUpdateFlowGraph (bool doTailDup = false);
+ bool fgUpdateFlowGraph(bool doTailDup = false);
- void fgFindOperOrder ();
+ void fgFindOperOrder();
// method that returns if you should split here
- typedef bool (fgSplitPredicate)(GenTree * tree, GenTree *parent, fgWalkData *data);
+ typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data);
- void fgSetBlockOrder ();
+ void fgSetBlockOrder();
- void fgRemoveReturnBlock(BasicBlock * block);
+ void fgRemoveReturnBlock(BasicBlock* block);
/* Helper code that has been factored out */
- inline void fgConvertBBToThrowBB(BasicBlock * block);
+ inline void fgConvertBBToThrowBB(BasicBlock* block);
- bool fgCastNeeded(GenTreePtr tree, var_types toType);
- GenTreePtr fgDoNormalizeOnStore(GenTreePtr tree);
- GenTreePtr fgMakeTmpArgNode(unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters));
+ bool fgCastNeeded(GenTreePtr tree, var_types toType);
+ GenTreePtr fgDoNormalizeOnStore(GenTreePtr tree);
+ GenTreePtr fgMakeTmpArgNode(
+ unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters));
// The following check for loops that don't execute calls
- bool fgLoopCallMarked;
+ bool fgLoopCallMarked;
- void fgLoopCallTest (BasicBlock *srcBB,
- BasicBlock *dstBB);
- void fgLoopCallMark ();
+ void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB);
+ void fgLoopCallMark();
- void fgMarkLoopHead (BasicBlock * block);
+ void fgMarkLoopHead(BasicBlock* block);
- unsigned fgGetCodeEstimate(BasicBlock * block);
+ unsigned fgGetCodeEstimate(BasicBlock* block);
#if DUMP_FLOWGRAPHS
- const char * fgProcessEscapes(const char * nameIn, escapeMapping_t *map);
- FILE * fgOpenFlowGraphFile(bool * wbDontClose, Phases phase, LPCWSTR type);
- bool fgDumpFlowGraph(Phases phase);
+ const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map);
+ FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, LPCWSTR type);
+ bool fgDumpFlowGraph(Phases phase);
#endif // DUMP_FLOWGRAPHS
#ifdef DEBUG
- void fgDispDoms ();
- void fgDispReach ();
- void fgDispBBLiveness (BasicBlock * block);
- void fgDispBBLiveness ();
- void fgTableDispBasicBlock (BasicBlock * block,
- int ibcColWidth = 0);
- void fgDispBasicBlocks (BasicBlock * firstBlock,
- BasicBlock * lastBlock,
- bool dumpTrees);
- void fgDispBasicBlocks (bool dumpTrees = false);
- void fgDumpStmtTree (GenTreePtr stmt, unsigned blkNum);
- void fgDumpTrees (BasicBlock* firstBlock,
- BasicBlock* lastBlock);
-
- static fgWalkPreFn fgStress64RsltMulCB;
- void fgStress64RsltMul ();
- void fgDebugCheckUpdate ();
- void fgDebugCheckBBlist (bool checkBBNum = false, bool checkBBRefs = true);
- void fgDebugCheckBlockLinks ();
- void fgDebugCheckLinks (bool morphTrees = false);
- void fgDebugCheckNodeLinks (BasicBlock* block, GenTreePtr stmt);
- unsigned fgDebugCheckLinearTree (BasicBlock* block,
- GenTreePtr stmt,
- GenTreePtr tree,
- bool printNodes = false);
- void fgDebugCheckLinearNodeLinks (BasicBlock* block, GenTreePtr topLevelStmt, bool printNodes = false);
- void fgDebugCheckFlags (GenTreePtr tree);
+ void fgDispDoms();
+ void fgDispReach();
+ void fgDispBBLiveness(BasicBlock* block);
+ void fgDispBBLiveness();
+ void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0);
+ void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees);
+ void fgDispBasicBlocks(bool dumpTrees = false);
+ void fgDumpStmtTree(GenTreePtr stmt, unsigned blkNum);
+ void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock);
+
+ static fgWalkPreFn fgStress64RsltMulCB;
+ void fgStress64RsltMul();
+ void fgDebugCheckUpdate();
+ void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true);
+ void fgDebugCheckBlockLinks();
+ void fgDebugCheckLinks(bool morphTrees = false);
+ void fgDebugCheckNodeLinks(BasicBlock* block, GenTreePtr stmt);
+ unsigned fgDebugCheckLinearTree(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree, bool printNodes = false);
+ void fgDebugCheckLinearNodeLinks(BasicBlock* block, GenTreePtr topLevelStmt, bool printNodes = false);
+ void fgDebugCheckFlags(GenTreePtr tree);
#endif
#ifdef LEGACY_BACKEND
- static void fgOrderBlockOps (GenTreePtr tree,
- regMaskTP reg0,
- regMaskTP reg1,
- regMaskTP reg2,
- GenTreePtr * opsPtr, // OUT
- regMaskTP * regsPtr); // OUT
-#endif // LEGACY_BACKEND
+ static void fgOrderBlockOps(GenTreePtr tree,
+ regMaskTP reg0,
+ regMaskTP reg1,
+ regMaskTP reg2,
+ GenTreePtr* opsPtr, // OUT
+ regMaskTP* regsPtr); // OUT
+#endif // LEGACY_BACKEND
static GenTreeStmt* fgFindTopLevelStmtBackwards(GenTreeStmt* stmt);
- static GenTreePtr fgGetFirstNode (GenTreePtr tree);
- static void fgSnipNode (GenTreeStmt* stmt, GenTreePtr node);
- static void fgSnipInnerNode (GenTreePtr node);
- static void fgDeleteTreeFromList(GenTreeStmt* stmt, GenTreePtr tree);
- static bool fgTreeIsInStmt(GenTree* tree, GenTreeStmt* stmt);
- static void fgInsertTreeInListBefore(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt);
- static void fgInsertTreeInListAfter(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt);
- GenTreeStmt* fgInsertTreeBeforeAsEmbedded(GenTree* tree, GenTree* before, GenTreeStmt* stmt, BasicBlock* block);
- GenTreeStmt* fgInsertTreeAfterAsEmbedded(GenTree* tree, GenTree* before, GenTreeStmt* stmt, BasicBlock* block);
- bool fgNodeContainsEmbeddedStatement(GenTree* tree, GenTreeStmt* topLevel);
- void fgRemoveContainedEmbeddedStatements(GenTreePtr tree, GenTreeStmt* topLevel, BasicBlock* block);
- bool fgStmtContainsNode(GenTreeStmt* stmt, GenTree* tree);
-
- inline bool fgIsInlining() { return fgExpandInline; }
+ static GenTreePtr fgGetFirstNode(GenTreePtr tree);
+ static void fgSnipNode(GenTreeStmt* stmt, GenTreePtr node);
+ static void fgSnipInnerNode(GenTreePtr node);
+ static void fgDeleteTreeFromList(GenTreeStmt* stmt, GenTreePtr tree);
+ static bool fgTreeIsInStmt(GenTree* tree, GenTreeStmt* stmt);
+ static void fgInsertTreeInListBefore(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt);
+ static void fgInsertTreeInListAfter(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt);
+ GenTreeStmt* fgInsertTreeBeforeAsEmbedded(GenTree* tree, GenTree* before, GenTreeStmt* stmt, BasicBlock* block);
+ GenTreeStmt* fgInsertTreeAfterAsEmbedded(GenTree* tree, GenTree* before, GenTreeStmt* stmt, BasicBlock* block);
+ bool fgNodeContainsEmbeddedStatement(GenTree* tree, GenTreeStmt* topLevel);
+ void fgRemoveContainedEmbeddedStatements(GenTreePtr tree, GenTreeStmt* topLevel, BasicBlock* block);
+ bool fgStmtContainsNode(GenTreeStmt* stmt, GenTree* tree);
+
+ inline bool fgIsInlining()
+ {
+ return fgExpandInline;
+ }
void fgTraverseRPO();
//--------------------- Walking the trees in the IR -----------------------
- struct fgWalkData
+ struct fgWalkData
{
- Compiler * compiler;
- fgWalkPreFn * wtprVisitorFn;
- fgWalkPostFn * wtpoVisitorFn;
- void * pCallbackData; // user-provided data
- bool wtprLclsOnly; // whether to only visit lclvar nodes
- GenTreePtr parent; // parent of current node, provided to callback
- GenTreeStack *parentStack; // stack of parent nodes, if asked for
+ Compiler* compiler;
+ fgWalkPreFn* wtprVisitorFn;
+ fgWalkPostFn* wtpoVisitorFn;
+ void* pCallbackData; // user-provided data
+ bool wtprLclsOnly; // whether to only visit lclvar nodes
+ GenTreePtr parent; // parent of current node, provided to callback
+ GenTreeStack* parentStack; // stack of parent nodes, if asked for
#ifdef DEBUG
- bool printModified; // callback can use this
+ bool printModified; // callback can use this
#endif
};
- template<bool computeStack>
- static fgWalkResult fgWalkTreePreRec (GenTreePtr *pTree, fgWalkData *fgWalkPre);
+ template <bool computeStack>
+ static fgWalkResult fgWalkTreePreRec(GenTreePtr* pTree, fgWalkData* fgWalkPre);
// general purpose tree-walker that is capable of doing pre- and post- order
// callbacks at the same time
- template<bool doPreOrder, bool doPostOrder>
- static fgWalkResult fgWalkTreeRec (GenTreePtr *pTree, fgWalkData *fgWalkPre);
+ template <bool doPreOrder, bool doPostOrder>
+ static fgWalkResult fgWalkTreeRec(GenTreePtr* pTree, fgWalkData* fgWalkPre);
- fgWalkResult fgWalkTreePre (GenTreePtr *pTree,
- fgWalkPreFn *visitor,
- void *pCallBackData = NULL,
- bool lclVarsOnly = false,
- bool computeStack = false);
+ fgWalkResult fgWalkTreePre(GenTreePtr* pTree,
+ fgWalkPreFn* visitor,
+ void* pCallBackData = nullptr,
+ bool lclVarsOnly = false,
+ bool computeStack = false);
- fgWalkResult fgWalkTree (GenTreePtr *pTree,
- fgWalkPreFn *preVisitor,
- fgWalkPostFn *postVisitor,
- void *pCallBackData = NULL);
+ fgWalkResult fgWalkTree(GenTreePtr* pTree,
+ fgWalkPreFn* preVisitor,
+ fgWalkPostFn* postVisitor,
+ void* pCallBackData = nullptr);
- void fgWalkAllTreesPre (fgWalkPreFn *visitor,
- void *pCallBackData);
+ void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData);
//----- Postorder
- template<bool computeStack>
- static fgWalkResult fgWalkTreePostRec (GenTreePtr *pTree, fgWalkData *fgWalkPre);
+ template <bool computeStack>
+ static fgWalkResult fgWalkTreePostRec(GenTreePtr* pTree, fgWalkData* fgWalkPre);
+
+ fgWalkResult fgWalkTreePost(GenTreePtr* pTree,
+ fgWalkPostFn* visitor,
+ void* pCallBackData = nullptr,
+ bool computeStack = false);
- fgWalkResult fgWalkTreePost (GenTreePtr *pTree,
- fgWalkPostFn *visitor,
- void *pCallBackData = NULL,
- bool computeStack = false);
-
- // An fgWalkPreFn that looks for expressions that have inline throws in
+ // An fgWalkPreFn that looks for expressions that have inline throws in
// minopts mode. Basically it looks for tress with gtOverflowEx() or
// GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It
// returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags
// properly propagated to parent trees). It returns WALK_CONTINUE
// otherwise.
- static fgWalkResult fgChkThrowCB (GenTreePtr * pTree,
- Compiler::fgWalkData * data);
- static fgWalkResult fgChkLocAllocCB(GenTreePtr * pTree,
- Compiler::fgWalkData * data);
- static fgWalkResult fgChkQmarkCB(GenTreePtr * pTree,
- Compiler::fgWalkData * data);
+ static fgWalkResult fgChkThrowCB(GenTreePtr* pTree, Compiler::fgWalkData* data);
+ static fgWalkResult fgChkLocAllocCB(GenTreePtr* pTree, Compiler::fgWalkData* data);
+ static fgWalkResult fgChkQmarkCB(GenTreePtr* pTree, Compiler::fgWalkData* data);
/**************************************************************************
* PROTECTED
*************************************************************************/
-protected :
-
+protected:
friend class SsaBuilder;
friend struct ValueNumberState;
//--------------------- Detect the basic blocks ---------------------------
- BasicBlock * * fgBBs; // Table of pointers to the BBs
+ BasicBlock** fgBBs; // Table of pointers to the BBs
- void fgInitBBLookup ();
- BasicBlock * fgLookupBB (unsigned addr);
+ void fgInitBBLookup();
+ BasicBlock* fgLookupBB(unsigned addr);
- void fgMarkJumpTarget (BYTE * jumpTarget,
- IL_OFFSET offs);
+ void fgMarkJumpTarget(BYTE* jumpTarget, IL_OFFSET offs);
- void fgFindJumpTargets (const BYTE * codeAddr,
- IL_OFFSET codeSize,
- BYTE * jumpTarget);
+ void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE* jumpTarget);
- void fgMarkBackwardJump (BasicBlock * startBlock,
- BasicBlock * endBlock);
+ void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
- void fgLinkBasicBlocks();
+ void fgLinkBasicBlocks();
- void fgMakeBasicBlocks (const BYTE * codeAddr,
- IL_OFFSET codeSize,
- BYTE * jumpTarget);
+ void fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE* jumpTarget);
- void fgCheckBasicBlockControlFlow();
+ void fgCheckBasicBlockControlFlow();
- void fgControlFlowPermitted(BasicBlock* blkSrc,
- BasicBlock* blkDest,
- BOOL IsLeave = false /* is the src a leave block */);
+ void fgControlFlowPermitted(BasicBlock* blkSrc,
+ BasicBlock* blkDest,
+ BOOL IsLeave = false /* is the src a leave block */);
- bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc,
- BasicBlock* blkDest,
- bool sibling);
+ bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling);
- void fgObserveInlineConstants(OPCODE opcode,
- const FgStack& stack,
- bool isInlining);
+ void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining);
- void fgAdjustForAddressExposedOrWrittenThis();
+ void fgAdjustForAddressExposedOrWrittenThis();
bool fgProfileData_ILSizeMismatch;
- ICorJitInfo::ProfileBuffer *fgProfileBuffer;
+ ICorJitInfo::ProfileBuffer* fgProfileBuffer;
ULONG fgProfileBufferCount;
ULONG fgNumProfileRuns;
- unsigned fgStressBBProf()
+ unsigned fgStressBBProf()
{
#ifdef DEBUG
unsigned result = JitConfig.JitStressBBProf();
@@ -4449,127 +4349,106 @@ protected :
#endif
}
- bool fgHaveProfileData();
- bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, unsigned *weight);
-
- bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); }
- void fgInstrumentMethod();
+ bool fgHaveProfileData();
+ bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, unsigned* weight);
+
+ bool fgIsUsingProfileWeights()
+ {
+ return (fgHaveProfileData() || fgStressBBProf());
+ }
+ void fgInstrumentMethod();
- //-------- Insert a statement at the start or end of a basic block --------
+//-------- Insert a statement at the start or end of a basic block --------
#ifdef DEBUG
public:
- static bool fgBlockContainsStatementBounded(BasicBlock *block, GenTree* stmt, bool answerOnBoundExceeded = true);
+ static bool fgBlockContainsStatementBounded(BasicBlock* block, GenTree* stmt, bool answerOnBoundExceeded = true);
#endif
public:
- GenTreeStmt* fgInsertStmtAtEnd (BasicBlock * block,
- GenTreePtr node);
-public: // Used by linear scan register allocation
- GenTreeStmt* fgInsertStmtNearEnd(BasicBlock * block,
- GenTreePtr node);
+ GenTreeStmt* fgInsertStmtAtEnd(BasicBlock* block, GenTreePtr node);
+
+public: // Used by linear scan register allocation
+ GenTreeStmt* fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node);
+
private:
- GenTreePtr fgInsertStmtAtBeg (BasicBlock * block,
- GenTreePtr stmt);
- GenTreePtr fgInsertStmtAfter (BasicBlock * block,
- GenTreePtr insertionPoint,
- GenTreePtr stmt
- );
-public: // Used by linear scan register allocation
- GenTreePtr fgInsertStmtBefore(BasicBlock * block,
- GenTreePtr insertionPoint,
- GenTreePtr stmt
- );
- void fgReplaceStmt (BasicBlock * block,
- GenTreeStmt* stmt,
- GenTreePtr newTree);
+ GenTreePtr fgInsertStmtAtBeg(BasicBlock* block, GenTreePtr stmt);
+ GenTreePtr fgInsertStmtAfter(BasicBlock* block, GenTreePtr insertionPoint, GenTreePtr stmt);
+
+public: // Used by linear scan register allocation
+ GenTreePtr fgInsertStmtBefore(BasicBlock* block, GenTreePtr insertionPoint, GenTreePtr stmt);
+ void fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt, GenTreePtr newTree);
private:
- GenTreePtr fgInsertStmtListAfter(BasicBlock * block,
- GenTreePtr stmtAfter,
- GenTreePtr stmtList
- );
+ GenTreePtr fgInsertStmtListAfter(BasicBlock* block, GenTreePtr stmtAfter, GenTreePtr stmtList);
- GenTreePtr fgMorphSplitTree(GenTree **splitPoint,
- GenTree *stmt,
- BasicBlock *blk);
+ GenTreePtr fgMorphSplitTree(GenTree** splitPoint, GenTree* stmt, BasicBlock* blk);
// insert the given subtree as an embedded statement of parentStmt
- GenTreeStmt* fgMakeEmbeddedStmt(BasicBlock *block, GenTreePtr tree, GenTreePtr parentStmt);
+ GenTreeStmt* fgMakeEmbeddedStmt(BasicBlock* block, GenTreePtr tree, GenTreePtr parentStmt);
// Insert the given single node before 'before'.
// Either the callee must ensure that 'before' is part of compCurStmt,
// or before->gtPrev must be non-null
- void fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before);
+ void fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before);
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
- GenTreeStmt* fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lvaNum=BAD_VAR_NUM);
- GenTree* fgInsertCommaFormTemp (GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
- GenTree* fgMakeMultiUse (GenTree** ppTree);
+ GenTreeStmt* fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lvaNum = BAD_VAR_NUM);
+ GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
+ GenTree* fgMakeMultiUse(GenTree** ppTree);
// After replacing oldChild with newChild, fixup the fgArgTabEntryPtr
// if it happens to be an argument to a call.
- void fgFixupIfCallArg(ArrayStack<GenTree *> *parentStack,
- GenTree *oldChild,
- GenTree *newChild);
-
- void fgFixupArgTabEntryPtr(GenTreePtr parentCall,
- GenTreePtr oldArg,
- GenTreePtr newArg);
+ void fgFixupIfCallArg(ArrayStack<GenTree*>* parentStack, GenTree* oldChild, GenTree* newChild);
+
+ void fgFixupArgTabEntryPtr(GenTreePtr parentCall, GenTreePtr oldArg, GenTreePtr newArg);
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
- GenTreePtr fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree);
- bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
-
- //-------- Determine the order in which the trees will be evaluated -------
+ GenTreePtr fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree);
+ bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
- unsigned fgTreeSeqNum;
- GenTree * fgTreeSeqLst;
- GenTree * fgTreeSeqBeg;
+ //-------- Determine the order in which the trees will be evaluated -------
- GenTree* fgSetTreeSeq (GenTree* tree, GenTree* prev = nullptr);
- void fgSetTreeSeqHelper(GenTree * tree);
- void fgSetTreeSeqFinish(GenTreePtr tree);
- void fgSetStmtSeq (GenTree * tree);
- void fgSetBlockOrder (BasicBlock * block);
+ unsigned fgTreeSeqNum;
+ GenTree* fgTreeSeqLst;
+ GenTree* fgTreeSeqBeg;
+ GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr);
+ void fgSetTreeSeqHelper(GenTree* tree);
+ void fgSetTreeSeqFinish(GenTreePtr tree);
+ void fgSetStmtSeq(GenTree* tree);
+ void fgSetBlockOrder(BasicBlock* block);
//------------------------- Morphing --------------------------------------
- unsigned fgPtrArgCntCur;
- unsigned fgPtrArgCntMax;
- hashBv* fgOutgoingArgTemps;
- hashBv* fgCurrentlyInUseArgTemps;
+ unsigned fgPtrArgCntCur;
+ unsigned fgPtrArgCntMax;
+ hashBv* fgOutgoingArgTemps;
+ hashBv* fgCurrentlyInUseArgTemps;
- bool compCanEncodePtrArgCntMax();
+ bool compCanEncodePtrArgCntMax();
- void fgSetRngChkTarget (GenTreePtr tree,
- bool delay = true);
+ void fgSetRngChkTarget(GenTreePtr tree, bool delay = true);
#if REARRANGE_ADDS
- void fgMoveOpsLeft (GenTreePtr tree);
+ void fgMoveOpsLeft(GenTreePtr tree);
#endif
- bool fgIsCommaThrow (GenTreePtr tree,
- bool forFolding = false);
+ bool fgIsCommaThrow(GenTreePtr tree, bool forFolding = false);
- bool fgIsThrow (GenTreePtr tree);
+ bool fgIsThrow(GenTreePtr tree);
- bool fgInDifferentRegions(BasicBlock * blk1, BasicBlock * blk2);
- bool fgIsBlockCold (BasicBlock * block);
+ bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2);
+ bool fgIsBlockCold(BasicBlock* block);
- GenTreePtr fgMorphCastIntoHelper(GenTreePtr tree,
- int helper,
- GenTreePtr oper);
+ GenTreePtr fgMorphCastIntoHelper(GenTreePtr tree, int helper, GenTreePtr oper);
- GenTreePtr fgMorphIntoHelperCall(GenTreePtr tree,
- int helper,
- GenTreeArgList* args);
+ GenTreePtr fgMorphIntoHelperCall(GenTreePtr tree, int helper, GenTreeArgList* args);
- GenTreePtr fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
+ GenTreePtr fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
- bool fgMorphRelopToQmark (GenTreePtr tree);
+ bool fgMorphRelopToQmark(GenTreePtr tree);
// A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address,
// it is useful to know whether the address will be immediately dereferenced, or whether the address value will
@@ -4581,123 +4460,134 @@ private:
// argument is a GT_LIST, requires us to "tell" that List node that its parent is a GT_COPYBLK, so it "knows" that
// each of its arguments should be evaluated in MACK_Ind contexts. (This would not be true for GT_LIST nodes
// representing method call argument lists.)
- enum MorphAddrContextKind {
+ enum MorphAddrContextKind
+ {
MACK_Ind,
MACK_Addr,
- MACK_CopyBlock, // This is necessary so we know we have to start a new "Ind" context for each of the
- // addresses in the arg list.
+ MACK_CopyBlock, // This is necessary so we know we have to start a new "Ind" context for each of the
+ // addresses in the arg list.
};
- struct MorphAddrContext {
+ struct MorphAddrContext
+ {
MorphAddrContextKind m_kind;
- bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
- // top-level indirection and here have been constants.
- size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
- // In that case, is the sum of those constant offsets.
+ bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
+ // top-level indirection and here have been constants.
+ size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
+ // In that case, is the sum of those constant offsets.
- MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) {}
+ MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0)
+ {
+ }
};
// A MACK_CopyBlock context is immutable, so we can just make one of these and share it.
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
- GenTreePtr fgCopySIMDNode(GenTreeSIMD* simdNode);
- GenTreePtr getSIMDStructFromField(GenTreePtr tree, var_types* baseTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic=false);
- GenTreePtr fgMorphFieldAssignToSIMDIntrinsicSet(GenTreePtr tree);
- GenTreePtr fgMorphFieldToSIMDIntrinsicGet(GenTreePtr tree);
- bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr stmt);
- void impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt);
-
- // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
- // in function: Complier::impMarkContiguousSIMDFieldAssignments.
- GenTreePtr fgPreviousCandidateSIMDFieldAsgStmt;
-
+ GenTreePtr fgCopySIMDNode(GenTreeSIMD* simdNode);
+ GenTreePtr getSIMDStructFromField(GenTreePtr tree,
+ var_types* baseTypeOut,
+ unsigned* indexOut,
+ unsigned* simdSizeOut,
+ bool ignoreUsedInSIMDIntrinsic = false);
+ GenTreePtr fgMorphFieldAssignToSIMDIntrinsicSet(GenTreePtr tree);
+ GenTreePtr fgMorphFieldToSIMDIntrinsicGet(GenTreePtr tree);
+ bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr stmt);
+ void impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt);
+
+ // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
+ // in function: Complier::impMarkContiguousSIMDFieldAssignments.
+ GenTreePtr fgPreviousCandidateSIMDFieldAsgStmt;
+
#endif // FEATURE_SIMD
- GenTreePtr fgMorphArrayIndex (GenTreePtr tree);
- GenTreePtr fgMorphCast (GenTreePtr tree);
- GenTreePtr fgUnwrapProxy (GenTreePtr objRef);
- GenTreeCall* fgMorphArgs (GenTreeCall* call);
-
- void fgMakeOutgoingStructArgCopy(
- GenTreeCall* call,
- GenTree* args,
- unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structDescPtr));
-
- void fgFixupStructReturn (GenTreePtr call);
- GenTreePtr fgMorphLocalVar (GenTreePtr tree);
- bool fgAddrCouldBeNull (GenTreePtr addr);
- GenTreePtr fgMorphField (GenTreePtr tree, MorphAddrContext* mac);
- bool fgCanFastTailCall (GenTreeCall* call);
- void fgMorphTailCall (GenTreeCall* call);
- void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
- GenTreePtr fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg, fgArgTabEntryPtr argTabEntry, BasicBlock* block, IL_OFFSETX callILOffset,
- GenTreePtr tmpAssignmentInsertionPoint, GenTreePtr paramAssignmentInsertionPoint);
- static int fgEstimateCallStackSize(GenTreeCall* call);
- GenTreePtr fgMorphCall (GenTreeCall* call);
- void fgMorphCallInline (GenTreeCall* call, InlineResult* result);
- void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result);
+ GenTreePtr fgMorphArrayIndex(GenTreePtr tree);
+ GenTreePtr fgMorphCast(GenTreePtr tree);
+ GenTreePtr fgUnwrapProxy(GenTreePtr objRef);
+ GenTreeCall* fgMorphArgs(GenTreeCall* call);
+
+ void fgMakeOutgoingStructArgCopy(GenTreeCall* call,
+ GenTree* args,
+ unsigned argIndex,
+ CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
+ const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structDescPtr));
+
+ void fgFixupStructReturn(GenTreePtr call);
+ GenTreePtr fgMorphLocalVar(GenTreePtr tree);
+ bool fgAddrCouldBeNull(GenTreePtr addr);
+ GenTreePtr fgMorphField(GenTreePtr tree, MorphAddrContext* mac);
+ bool fgCanFastTailCall(GenTreeCall* call);
+ void fgMorphTailCall(GenTreeCall* call);
+ void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
+ GenTreePtr fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
+ fgArgTabEntryPtr argTabEntry,
+ BasicBlock* block,
+ IL_OFFSETX callILOffset,
+ GenTreePtr tmpAssignmentInsertionPoint,
+ GenTreePtr paramAssignmentInsertionPoint);
+ static int fgEstimateCallStackSize(GenTreeCall* call);
+ GenTreePtr fgMorphCall(GenTreeCall* call);
+ void fgMorphCallInline(GenTreeCall* call, InlineResult* result);
+ void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result);
#if DEBUG
- void fgNoteNonInlineCandidate(GenTreePtr tree, GenTreeCall* call);
- static fgWalkPreFn fgFindNonInlineCandidate;
-#endif
- GenTreePtr fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_CONTEXT_HANDLE * ExactContextHnd);
- GenTreePtr fgMorphLeaf (GenTreePtr tree);
- void fgAssignSetVarDef (GenTreePtr tree);
- GenTreePtr fgMorphOneAsgBlockOp(GenTreePtr tree);
- GenTreePtr fgMorphInitBlock (GenTreePtr tree);
- GenTreePtr fgMorphCopyBlock (GenTreePtr tree);
- GenTreePtr fgMorphForRegisterFP(GenTreePtr tree);
- GenTreePtr fgMorphSmpOp (GenTreePtr tree, MorphAddrContext* mac = NULL);
- GenTreePtr fgMorphSmpOpPre (GenTreePtr tree);
- GenTreePtr fgMorphDivByConst (GenTreeOp* tree);
- GenTreePtr fgMorphModByConst (GenTreeOp* tree);
- GenTreePtr fgMorphModToSubMulDiv(GenTreeOp* tree);
- GenTreePtr fgMorphSmpOpOptional(GenTreeOp* tree);
- GenTreePtr fgMorphRecognizeBoxNullable(GenTree* compare);
- bool fgShouldUseMagicNumberDivide(GenTreeOp* tree);
-
- GenTreePtr fgMorphToEmulatedFP (GenTreePtr tree);
- GenTreePtr fgMorphConst (GenTreePtr tree);
+ void fgNoteNonInlineCandidate(GenTreePtr tree, GenTreeCall* call);
+ static fgWalkPreFn fgFindNonInlineCandidate;
+#endif
+ GenTreePtr fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_CONTEXT_HANDLE* ExactContextHnd);
+ GenTreePtr fgMorphLeaf(GenTreePtr tree);
+ void fgAssignSetVarDef(GenTreePtr tree);
+ GenTreePtr fgMorphOneAsgBlockOp(GenTreePtr tree);
+ GenTreePtr fgMorphInitBlock(GenTreePtr tree);
+ GenTreePtr fgMorphCopyBlock(GenTreePtr tree);
+ GenTreePtr fgMorphForRegisterFP(GenTreePtr tree);
+ GenTreePtr fgMorphSmpOp(GenTreePtr tree, MorphAddrContext* mac = nullptr);
+ GenTreePtr fgMorphSmpOpPre(GenTreePtr tree);
+ GenTreePtr fgMorphDivByConst(GenTreeOp* tree);
+ GenTreePtr fgMorphModByConst(GenTreeOp* tree);
+ GenTreePtr fgMorphModToSubMulDiv(GenTreeOp* tree);
+ GenTreePtr fgMorphSmpOpOptional(GenTreeOp* tree);
+ GenTreePtr fgMorphRecognizeBoxNullable(GenTree* compare);
+ bool fgShouldUseMagicNumberDivide(GenTreeOp* tree);
+
+ GenTreePtr fgMorphToEmulatedFP(GenTreePtr tree);
+ GenTreePtr fgMorphConst(GenTreePtr tree);
+
public:
- GenTreePtr fgMorphTree (GenTreePtr tree, MorphAddrContext* mac = NULL);
+ GenTreePtr fgMorphTree(GenTreePtr tree, MorphAddrContext* mac = nullptr);
+
private:
#if LOCAL_ASSERTION_PROP
- void fgKillDependentAssertions (unsigned lclNum DEBUGARG(GenTreePtr tree));
+ void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTreePtr tree));
#endif
- void fgMorphTreeDone (GenTreePtr tree,
- GenTreePtr oldTree = NULL
- DEBUGARG(int morphNum = 0));
+ void fgMorphTreeDone(GenTreePtr tree, GenTreePtr oldTree = nullptr DEBUGARG(int morphNum = 0));
+
+ GenTreePtr fgMorphStmt;
- GenTreePtr fgMorphStmt;
-
- unsigned fgGetBigOffsetMorphingTemp (var_types type); // We cache one temp per type to be
- // used when morphing big offset.
+ unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be
+ // used when morphing big offset.
//----------------------- Liveness analysis -------------------------------
- VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
- VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
+ VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
+ VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
- bool fgCurHeapUse; // True iff the current basic block uses the heap before defining it.
- bool fgCurHeapDef; // True iff the current basic block defines the heap.
- bool fgCurHeapHavoc; // True if the current basic block is known to set the heap to a "havoc" value.
+ bool fgCurHeapUse; // True iff the current basic block uses the heap before defining it.
+ bool fgCurHeapDef; // True iff the current basic block defines the heap.
+ bool fgCurHeapHavoc; // True if the current basic block is known to set the heap to a "havoc" value.
- void fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *asgdLclVar = NULL);
+ void fgMarkUseDef(GenTreeLclVarCommon* tree, GenTree* asgdLclVar = nullptr);
#ifdef DEBUGGING_SUPPORT
- void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
- void fgEndScopeLife (VARSET_TP* inScope, VarScopeDsc* var);
+ void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
+ void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
- void fgMarkInScope(BasicBlock * block, VARSET_VALARG_TP inScope);
- void fgUnmarkInScope(BasicBlock * block, VARSET_VALARG_TP unmarkScope);
+ void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope);
+ void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope);
- void fgExtendDbgScopes();
- void fgExtendDbgLifetimes();
+ void fgExtendDbgScopes();
+ void fgExtendDbgLifetimes();
#ifdef DEBUG
- void fgDispDebugScopes();
+ void fgDispDebugScopes();
#endif // DEBUG
#endif // DEBUGGING_SUPPORT
@@ -4708,88 +4598,83 @@ private:
// range checking or explicit calls to enable GC, and so on.
//
public:
-
-
- struct AddCodeDsc
+ struct AddCodeDsc
{
- AddCodeDsc * acdNext;
- BasicBlock * acdDstBlk; // block to which we jump
- unsigned acdData;
- SpecialCodeKind acdKind; // what kind of a special block is this?
- unsigned short acdStkLvl;
+ AddCodeDsc* acdNext;
+ BasicBlock* acdDstBlk; // block to which we jump
+ unsigned acdData;
+ SpecialCodeKind acdKind; // what kind of a special block is this?
+ unsigned short acdStkLvl;
};
+
private:
- static unsigned acdHelper (SpecialCodeKind codeKind);
+ static unsigned acdHelper(SpecialCodeKind codeKind);
- AddCodeDsc * fgAddCodeList;
- bool fgAddCodeModf;
- bool fgRngChkThrowAdded;
- AddCodeDsc * fgExcptnTargetCache[SCK_COUNT];
+ AddCodeDsc* fgAddCodeList;
+ bool fgAddCodeModf;
+ bool fgRngChkThrowAdded;
+ AddCodeDsc* fgExcptnTargetCache[SCK_COUNT];
- BasicBlock * fgRngChkTarget (BasicBlock * block,
- unsigned stkDepth,
- SpecialCodeKind kind);
+ BasicBlock* fgRngChkTarget(BasicBlock* block, unsigned stkDepth, SpecialCodeKind kind);
+
+ BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind, unsigned stkDepth = 0);
- BasicBlock * fgAddCodeRef (BasicBlock * srcBlk,
- unsigned refData,
- SpecialCodeKind kind,
- unsigned stkDepth = 0);
public:
- AddCodeDsc * fgFindExcptnTarget(SpecialCodeKind kind,
- unsigned refData);
+ AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData);
+
private:
- bool fgIsCodeAdded ();
+ bool fgIsCodeAdded();
- bool fgIsThrowHlpBlk (BasicBlock * block);
- unsigned fgThrowHlpBlkStkLevel(BasicBlock *block);
+ bool fgIsThrowHlpBlk(BasicBlock* block);
+ unsigned fgThrowHlpBlkStkLevel(BasicBlock* block);
- unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
+ unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
- unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
- void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result);
- void fgInsertInlineeBlocks (InlineInfo* pInlineInfo);
- GenTreePtr fgInlinePrependStatements(InlineInfo* inlineInfo);
+ unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
+ void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result);
+ void fgInsertInlineeBlocks(InlineInfo* pInlineInfo);
+ GenTreePtr fgInlinePrependStatements(InlineInfo* inlineInfo);
#if FEATURE_MULTIREG_RET
- GenTreePtr fgGetStructAsStructPtr(GenTreePtr tree);
- GenTreePtr fgAssignStructInlineeToVar(GenTreePtr child, CORINFO_CLASS_HANDLE retClsHnd);
- void fgAttachStructInlineeToAsg(GenTreePtr tree, GenTreePtr child, CORINFO_CLASS_HANDLE retClsHnd);
+ GenTreePtr fgGetStructAsStructPtr(GenTreePtr tree);
+ GenTreePtr fgAssignStructInlineeToVar(GenTreePtr child, CORINFO_CLASS_HANDLE retClsHnd);
+ void fgAttachStructInlineeToAsg(GenTreePtr tree, GenTreePtr child, CORINFO_CLASS_HANDLE retClsHnd);
#endif // FEATURE_MULTIREG_RET
- static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
+ static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
#ifdef DEBUG
- static fgWalkPreFn fgDebugCheckInlineCandidates;
+ static fgWalkPreFn fgDebugCheckInlineCandidates;
#endif
- void fgPromoteStructs();
- fgWalkResult fgMorphStructField(GenTreePtr tree, fgWalkData *fgWalkPre);
- fgWalkResult fgMorphLocalField(GenTreePtr tree, fgWalkData *fgWalkPre);
- void fgMarkImplicitByRefArgs();
- bool fgMorphImplicitByRefArgs(GenTree** pTree, fgWalkData *fgWalkPre);
+ void fgPromoteStructs();
+ fgWalkResult fgMorphStructField(GenTreePtr tree, fgWalkData* fgWalkPre);
+ fgWalkResult fgMorphLocalField(GenTreePtr tree, fgWalkData* fgWalkPre);
+ void fgMarkImplicitByRefArgs();
+ bool fgMorphImplicitByRefArgs(GenTree** pTree, fgWalkData* fgWalkPre);
static fgWalkPreFn fgMarkAddrTakenLocalsPreCB;
static fgWalkPostFn fgMarkAddrTakenLocalsPostCB;
void fgMarkAddressExposedLocals();
- bool fgNodesMayInterfere(GenTree* store, GenTree* load);
+ bool fgNodesMayInterfere(GenTree* store, GenTree* load);
// Returns true if the type of tree is of size at least "width", or if "tree" is not a
// local variable.
- bool fgFitsInOrNotLoc(GenTreePtr tree, unsigned width);
+ bool fgFitsInOrNotLoc(GenTreePtr tree, unsigned width);
// The given local variable, required to be a struct variable, is being assigned via
// a "lclField", to make it masquerade as an integral type in the ABI. Make sure that
// the variable is not enregistered, and is therefore not promoted independently.
- void fgLclFldAssign(unsigned lclNum);
+ void fgLclFldAssign(unsigned lclNum);
- static fgWalkPreFn gtHasLocalsWithAddrOpCB;
- bool gtCanOptimizeTypeEquality(GenTreePtr tree);
- bool gtIsTypeHandleToRuntimeTypeHelper(GenTreePtr tree);
- bool gtIsActiveCSE_Candidate(GenTreePtr tree);
+ static fgWalkPreFn gtHasLocalsWithAddrOpCB;
+ bool gtCanOptimizeTypeEquality(GenTreePtr tree);
+ bool gtIsTypeHandleToRuntimeTypeHelper(GenTreePtr tree);
+ bool gtIsActiveCSE_Candidate(GenTreePtr tree);
#ifdef DEBUG
- bool fgPrintInlinedMethods;
+ bool fgPrintInlinedMethods;
#endif
-
+
bool fgIsBigOffset(size_t offset);
// The following are used when morphing special cases of integer div/mod operations and also by codegen
@@ -4798,43 +4683,41 @@ private:
bool fgIsSignedModOptimizable(GenTreePtr divisor);
bool fgIsUnsignedModOptimizable(GenTreePtr divisor);
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX Optimizer XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX Optimizer XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-
-public :
-
- void optInit ();
-
-protected :
+public:
+ void optInit();
- LclVarDsc * optIsTrackedLocal (GenTreePtr tree);
+protected:
+ LclVarDsc* optIsTrackedLocal(GenTreePtr tree);
public:
- void optRemoveRangeCheck(GenTreePtr tree, GenTreePtr stmt, bool updateCSEcounts, unsigned sideEffFlags = 0, bool forceRemove = false);
- bool optIsRangeCheckRemovable(GenTreePtr tree);
+ void optRemoveRangeCheck(
+ GenTreePtr tree, GenTreePtr stmt, bool updateCSEcounts, unsigned sideEffFlags = 0, bool forceRemove = false);
+ bool optIsRangeCheckRemovable(GenTreePtr tree);
+
protected:
- static fgWalkPreFn optValidRangeCheckIndex;
- static fgWalkPreFn optRemoveTreeVisitor; // Helper passed to Compiler::fgWalkAllTreesPre() to decrement the LclVar usage counts
+ static fgWalkPreFn optValidRangeCheckIndex;
+ static fgWalkPreFn optRemoveTreeVisitor; // Helper passed to Compiler::fgWalkAllTreesPre() to decrement the LclVar
+ // usage counts
- void optRemoveTree(GenTreePtr deadTree, GenTreePtr keepList);
+ void optRemoveTree(GenTreePtr deadTree, GenTreePtr keepList);
/**************************************************************************
*
*************************************************************************/
protected:
-
// Do hoisting for all loops.
- void optHoistLoopCode();
+ void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, bool, JitSimplerHashBehavior> VNToBoolMap;
@@ -4844,16 +4727,16 @@ protected:
{
private:
// The set of variables hoisted in the current loop (or nullptr if there are none).
- VNSet* m_pHoistedInCurLoop;
+ VNSet* m_pHoistedInCurLoop;
public:
// Value numbers of expressions that have been hoisted in parent loops in the loop nest.
- VNSet m_hoistedInParentLoops;
+ VNSet m_hoistedInParentLoops;
// Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest.
// Previous decisions on loop-invariance of value numbers in the current loop.
VNToBoolMap m_curLoopVnInvariantCache;
- VNSet* GetHoistedInCurLoop(Compiler* comp)
+ VNSet* GetHoistedInCurLoop(Compiler* comp)
{
if (m_pHoistedInCurLoop == nullptr)
{
@@ -4862,130 +4745,129 @@ protected:
return m_pHoistedInCurLoop;
}
- VNSet* ExtractHoistedInCurLoop()
+ VNSet* ExtractHoistedInCurLoop()
{
- VNSet* res = m_pHoistedInCurLoop;
+ VNSet* res = m_pHoistedInCurLoop;
m_pHoistedInCurLoop = nullptr;
return res;
}
- LoopHoistContext(Compiler* comp) :
- m_pHoistedInCurLoop(nullptr),
- m_hoistedInParentLoops(comp->getAllocatorLoopHoist()),
- m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
- {}
+ LoopHoistContext(Compiler* comp)
+ : m_pHoistedInCurLoop(nullptr)
+ , m_hoistedInParentLoops(comp->getAllocatorLoopHoist())
+ , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
+ {
+ }
};
// Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it.
// Tracks the expressions that have been hoisted by containing loops by temporary recording their
// value numbers in "m_hoistedInParentLoops". This set is not modified by the call.
- void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
+ void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
// Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.)
- // Assumes that expressions have been hoisted in containing loops if their value numbers are in "m_hoistedInParentLoops".
- //
- void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
+ // Assumes that expressions have been hoisted in containing loops if their value numbers are in
+ // "m_hoistedInParentLoops".
+ //
+ void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
// Hoist all expressions in "blk" that are invariant in loop "lnum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted
// expressions to "hoistInLoop".
- void optHoistLoopExprsForBlock(BasicBlock* blk, unsigned lnum, LoopHoistContext* hoistCtxt);
+ void optHoistLoopExprsForBlock(BasicBlock* blk, unsigned lnum, LoopHoistContext* hoistCtxt);
// Return true if the tree looks profitable to hoist out of loop 'lnum'.
- bool optIsProfitableToHoistableTree(GenTreePtr tree,unsigned lnum);
+ bool optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum);
- // Hoist all proper sub-expressions of "tree" (which occurs in "stmt", which occurs in "blk")
+ // Hoist all proper sub-expressions of "tree" (which occurs in "stmt", which occurs in "blk")
// that are invariant in loop "lnum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "hoistedInParents"; add VN's of hoisted
- // expressions to "hoistInLoop".
+ // expressions to "hoistInLoop".
// Returns "true" iff "tree" is loop-invariant (wrt "lnum").
// Assumes that the value of "*firstBlockAndBeforeSideEffect" indicates that we're in the first block, and before
// any possible globally visible side effects. Assume is called in evaluation order, and updates this.
- bool optHoistLoopExprsForTree(GenTreePtr tree,
- unsigned lnum,
- LoopHoistContext* hoistCtxt,
- bool* firstBlockAndBeforeSideEffect,
- bool* pHoistable);
+ bool optHoistLoopExprsForTree(GenTreePtr tree,
+ unsigned lnum,
+ LoopHoistContext* hoistCtxt,
+ bool* firstBlockAndBeforeSideEffect,
+ bool* pHoistable);
// Performs the hoisting 'tree' into the PreHeader for loop 'lnum'
- void optHoistCandidate(GenTreePtr tree, unsigned lnum, LoopHoistContext* hoistCtxt);
+ void optHoistCandidate(GenTreePtr tree, unsigned lnum, LoopHoistContext* hoistCtxt);
// Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum".
// Constants and init values are always loop invariant.
// VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop.
- bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* recordedVNs);
+ bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* recordedVNs);
// Returns "true" iff "tree" is valid at the head of loop "lnum", in the context of the hoist substitution
// "subst". If "tree" is a local SSA var, it is valid if its SSA definition occurs outside of the loop, or
// if it is in the domain of "subst" (meaning that it's definition has been previously hoisted, with a "standin"
// local.) If tree is a constant, it is valid. Otherwise, if it is an operator, it is valid iff its children are.
- bool optTreeIsValidAtLoopHead(GenTreePtr tree, unsigned lnum);
+ bool optTreeIsValidAtLoopHead(GenTreePtr tree, unsigned lnum);
// If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop
// in the loop table.
- bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
+ bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
// Records the set of "side effects" of all loops: fields (object instance and static)
// written to, and SZ-array element type equivalence classes updated.
- void optComputeLoopSideEffects();
+ void optComputeLoopSideEffects();
private:
// Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop,
// including all nested loops, and records the set of "side effects" of the loop: fields (object instance and
// static) written to, and SZ-array element type equivalence classes updated.
- void optComputeLoopNestSideEffects(unsigned lnum);
+ void optComputeLoopNestSideEffects(unsigned lnum);
// Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part.
- void optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
+ void optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
// Hoist the expression "expr" out of loop "lnum".
- void optPerformHoistExpr(GenTreePtr expr,
- unsigned lnum);
+ void optPerformHoistExpr(GenTreePtr expr, unsigned lnum);
+
public:
- void optOptimizeBools();
+ void optOptimizeBools();
+
private:
- GenTree * optIsBoolCond (GenTree * condBranch,
- GenTree * * compPtr,
- bool * boolPtr);
+ GenTree* optIsBoolCond(GenTree* condBranch, GenTree** compPtr, bool* boolPtr);
#ifdef DEBUG
- void optOptimizeBoolsGcStress(BasicBlock * condBlock);
+ void optOptimizeBoolsGcStress(BasicBlock* condBlock);
#endif
-public :
-
- void optOptimizeLayout(); // Optimize the BasicBlock layout of the method
+public:
+ void optOptimizeLayout(); // Optimize the BasicBlock layout of the method
- void optOptimizeLoops(); // for "while-do" loops duplicates simple loop conditions and transforms
- // the loop into a "do-while" loop
- // Also finds all natural loops and records them in the loop table
+ void optOptimizeLoops(); // for "while-do" loops duplicates simple loop conditions and transforms
+ // the loop into a "do-while" loop
+ // Also finds all natural loops and records them in the loop table
// Optionally clone loops in the loop table.
- void optCloneLoops();
+ void optCloneLoops();
- // Clone loop "loopInd" in the loop table.
- void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
+ // Clone loop "loopInd" in the loop table.
+ void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
// Ensure that loop "loopInd" has a unique head block. (If the existing entry has
// non-loop predecessors other than the head entry, create a new, empty block that goes (only) to the entry,
- // and redirects the preds of the entry to this new block.) Sets the weight of the newly created block to "ambientWeight".
- void optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight);
-
- void optUnrollLoops (); // Unrolls loops (needs to have cost info)
+ // and redirects the preds of the entry to this new block.) Sets the weight of the newly created block to
+ // "ambientWeight".
+ void optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight);
-protected :
+ void optUnrollLoops(); // Unrolls loops (needs to have cost info)
+protected:
// This enumeration describes what is killed by a call.
- enum callInterf
+ enum callInterf
{
- CALLINT_NONE, // no interference (most helpers)
- CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
- CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
- CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
- CALLINT_ALL, // kills everything (normal method call)
+ CALLINT_NONE, // no interference (most helpers)
+ CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
+ CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
+ CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
+ CALLINT_ALL, // kills everything (normal method call)
};
public:
-
// A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in
// bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered"
// in bbNext order; we use comparisons on the bbNum to decide order.)
@@ -4994,335 +4876,340 @@ public:
// The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a
// single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at
// Compiler::optFindNaturalLoops().
- struct LoopDsc
- {
- BasicBlock * lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
- BasicBlock * lpFirst; // FIRST block (in bbNext order) reachable within this loop. (May be part of a nested loop, but not the outer loop.)
- BasicBlock * lpTop; // loop TOP (the back edge from lpBottom reaches here) (in most cases FIRST and TOP are the same)
- BasicBlock * lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
- BasicBlock * lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
- BasicBlock * lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
-
- callInterf lpAsgCall; // "callInterf" for calls in the loop
- ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
- varRefKinds lpAsgInds:8;// set of inds modified within the loop
-
- unsigned short lpFlags; // Mask of the LPFLG_* constants
-
- unsigned char lpExitCnt; // number of exits from the loop
-
- unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
- // or else BasicBlock::NOT_IN_LOOP if no such loop exists.
- unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
- // (Actually, an "immediately" nested loop --
- // no other child of this loop is a parent of lpChild.)
- unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
- // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
- // by following "lpChild" then "lpSibling" links.
-
-#define LPFLG_DO_WHILE 0x0001 // it's a do-while loop (i.e ENTRY is at the TOP)
-#define LPFLG_ONE_EXIT 0x0002 // the loop has only one exit
-
-#define LPFLG_ITER 0x0004 // for (i = icon or lclVar; test_condition(); i++)
-#define LPFLG_HOISTABLE 0x0008 // the loop is in a form that is suitable for hoisting expressions
-#define LPFLG_CONST 0x0010 // for (i=icon;i<icon;i++){ ... } - constant loop
-
-#define LPFLG_VAR_INIT 0x0020 // iterator is initialized with a local var (var # found in lpVarInit)
-#define LPFLG_CONST_INIT 0x0040 // iterator is initialized with a constant (found in lpConstInit)
-
-#define LPFLG_VAR_LIMIT 0x0100 // iterator is compared with a local var (var # found in lpVarLimit)
-#define LPFLG_CONST_LIMIT 0x0200 // iterator is compared with a constant (found in lpConstLimit)
-#define LPFLG_ARRLEN_LIMIT 0x0400 // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
-
-#define LPFLG_HAS_PREHEAD 0x0800 // lpHead is known to be a preHead for this loop
-#define LPFLG_REMOVED 0x1000 // has been removed from the loop table (unrolled or optimized away)
-#define LPFLG_DONT_UNROLL 0x2000 // do not unroll this loop
-
-#define LPFLG_ASGVARS_YES 0x4000 // "lpAsgVars" has been computed
-#define LPFLG_ASGVARS_INC 0x8000 // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
- // type are assigned to.
-
-
- bool lpLoopHasHeapHavoc; // The loop contains an operation that we assume has arbitrary heap side effects.
- // If this is set, the fields below may not be accurate (since they become irrelevant.)
- bool lpContainsCall; // True if executing the loop body *may* execute a call
-
- VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
- VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
-
- int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been hoisted
- int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
- int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or accross this loop
-
- int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been hoisted
- int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
- int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or accross this loop
-
- typedef SimplerHashTable<CORINFO_FIELD_HANDLE, PtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, bool, JitSimplerHashBehavior> FieldHandleSet;
- FieldHandleSet* lpFieldsModified; // This has entries (mappings to "true") for all static field and object instance fields modified
- // in the loop.
-
- typedef SimplerHashTable<CORINFO_CLASS_HANDLE, PtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool, JitSimplerHashBehavior> ClassHandleSet;
- ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that arrays of that type are modified
- // in the loop.
+ struct LoopDsc
+ {
+ BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
+ BasicBlock* lpFirst; // FIRST block (in bbNext order) reachable within this loop. (May be part of a nested
+ // loop, but not the outer loop.)
+ BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here) (in most cases FIRST and TOP are the
+ // same)
+ BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
+ BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
+ BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
+
+ callInterf lpAsgCall; // "callInterf" for calls in the loop
+ ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
+ varRefKinds lpAsgInds : 8; // set of inds modified within the loop
+
+ unsigned short lpFlags; // Mask of the LPFLG_* constants
+
+ unsigned char lpExitCnt; // number of exits from the loop
+
+ unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
+ // or else BasicBlock::NOT_IN_LOOP if no such loop exists.
+ unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
+ // (Actually, an "immediately" nested loop --
+ // no other child of this loop is a parent of lpChild.)
+ unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
+ // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
+ // by following "lpChild" then "lpSibling" links.
+
+#define LPFLG_DO_WHILE 0x0001 // it's a do-while loop (i.e ENTRY is at the TOP)
+#define LPFLG_ONE_EXIT 0x0002 // the loop has only one exit
+
+#define LPFLG_ITER 0x0004 // for (i = icon or lclVar; test_condition(); i++)
+#define LPFLG_HOISTABLE 0x0008 // the loop is in a form that is suitable for hoisting expressions
+#define LPFLG_CONST 0x0010 // for (i=icon;i<icon;i++){ ... } - constant loop
+
+#define LPFLG_VAR_INIT 0x0020 // iterator is initialized with a local var (var # found in lpVarInit)
+#define LPFLG_CONST_INIT 0x0040 // iterator is initialized with a constant (found in lpConstInit)
+
+#define LPFLG_VAR_LIMIT 0x0100 // iterator is compared with a local var (var # found in lpVarLimit)
+#define LPFLG_CONST_LIMIT 0x0200 // iterator is compared with a constant (found in lpConstLimit)
+#define LPFLG_ARRLEN_LIMIT 0x0400 // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
+
+#define LPFLG_HAS_PREHEAD 0x0800 // lpHead is known to be a preHead for this loop
+#define LPFLG_REMOVED 0x1000 // has been removed from the loop table (unrolled or optimized away)
+#define LPFLG_DONT_UNROLL 0x2000 // do not unroll this loop
+
+#define LPFLG_ASGVARS_YES 0x4000 // "lpAsgVars" has been computed
+#define LPFLG_ASGVARS_INC 0x8000 // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
+ // type are assigned to.
+
+ bool lpLoopHasHeapHavoc; // The loop contains an operation that we assume has arbitrary heap side effects.
+ // If this is set, the fields below may not be accurate (since they become irrelevant.)
+ bool lpContainsCall; // True if executing the loop body *may* execute a call
+
+ VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
+ VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
+
+ int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been
+ // hoisted
+ int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
+ int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or accross this loop
+
+ int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been
+ // hoisted
+ int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
+ int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or accross this loop
+
+ typedef SimplerHashTable<CORINFO_FIELD_HANDLE,
+ PtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>,
+ bool,
+ JitSimplerHashBehavior>
+ FieldHandleSet;
+ FieldHandleSet* lpFieldsModified; // This has entries (mappings to "true") for all static field and object
+ // instance fields modified
+ // in the loop.
+
+ typedef SimplerHashTable<CORINFO_CLASS_HANDLE,
+ PtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>,
+ bool,
+ JitSimplerHashBehavior>
+ ClassHandleSet;
+ ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
+ // arrays of that type are modified
+ // in the loop.
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
- void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
+ void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
- inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd);
- // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles (shifted left, with a low-order bit set to distinguish.)
+ inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd);
+ // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles
+ // (shifted left, with a low-order bit set to distinguish.)
// Use the {Encode/Decode}ElemType methods to construct/destruct these.
- inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
-
+ inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
/* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */
- GenTreePtr lpIterTree; // The "i <op>= const" tree
- unsigned lpIterVar (); // iterator variable #
- int lpIterConst(); // the constant with which the iterator is incremented
- genTreeOps lpIterOper (); // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
- void VERIFY_lpIterTree();
+ GenTreePtr lpIterTree; // The "i <op>= const" tree
+ unsigned lpIterVar(); // iterator variable #
+ int lpIterConst(); // the constant with which the iterator is incremented
+ genTreeOps lpIterOper(); // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
+ void VERIFY_lpIterTree();
- var_types lpIterOperType();// For overflow instructions
+ var_types lpIterOperType(); // For overflow instructions
- union
- {
- int lpConstInit; // initial constant value of iterator : Valid if LPFLG_CONST_INIT
- unsigned lpVarInit; // initial local var number to which we initialize the iterator : Valid if LPFLG_VAR_INIT
+ union {
+ int lpConstInit; // initial constant value of iterator : Valid if LPFLG_CONST_INIT
+ unsigned lpVarInit; // initial local var number to which we initialize the iterator : Valid if
+ // LPFLG_VAR_INIT
};
/* The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var" */
- GenTreePtr lpTestTree; // pointer to the node containing the loop test
- genTreeOps lpTestOper(); // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, etc.)
- void VERIFY_lpTestTree();
+ GenTreePtr lpTestTree; // pointer to the node containing the loop test
+ genTreeOps lpTestOper(); // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, etc.)
+ void VERIFY_lpTestTree();
- bool lpIsReversed(); // true if the iterator node is the second operand in the loop condition
- GenTreePtr lpIterator(); // the iterator node in the loop test
- GenTreePtr lpLimit(); // the limit node in the loop test
+ bool lpIsReversed(); // true if the iterator node is the second operand in the loop condition
+ GenTreePtr lpIterator(); // the iterator node in the loop test
+ GenTreePtr lpLimit(); // the limit node in the loop test
- int lpConstLimit(); // limit constant value of iterator - loop condition is "i RELOP const" : Valid if LPFLG_CONST_LIMIT
- unsigned lpVarLimit(); // the lclVar # in the loop condition ( "i RELOP lclVar" ) : Valid if LPFLG_VAR_LIMIT
- bool lpArrLenLimit(Compiler* comp, ArrIndex* index); // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) : Valid if LPFLG_ARRLEN_LIMIT
+ int lpConstLimit(); // limit constant value of iterator - loop condition is "i RELOP const" : Valid if
+ // LPFLG_CONST_LIMIT
+ unsigned lpVarLimit(); // the lclVar # in the loop condition ( "i RELOP lclVar" ) : Valid if
+ // LPFLG_VAR_LIMIT
+ bool lpArrLenLimit(Compiler* comp, ArrIndex* index); // The array length in the loop condition ( "i RELOP
+ // arr.len" or "i RELOP arr[i][j].len" ) : Valid if
+ // LPFLG_ARRLEN_LIMIT
// Returns "true" iff "*this" contains the blk.
- bool lpContains(BasicBlock* blk)
+ bool lpContains(BasicBlock* blk)
{
return lpFirst->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum;
}
- // Returns "true" iff "*this" (properly) contains the range [first, bottom] (allowing firsts
+ // Returns "true" iff "*this" (properly) contains the range [first, bottom] (allowing firsts
// to be equal, but requiring bottoms to be different.)
- bool lpContains(BasicBlock* first, BasicBlock* bottom)
+ bool lpContains(BasicBlock* first, BasicBlock* bottom)
{
return lpFirst->bbNum <= first->bbNum && bottom->bbNum < lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains "lp2" (allowing firsts to be equal, but requiring
// bottoms to be different.)
- bool lpContains(const LoopDsc& lp2)
+ bool lpContains(const LoopDsc& lp2)
{
return lpContains(lp2.lpFirst, lp2.lpBottom);
}
// Returns "true" iff "*this" is (properly) contained by the range [first, bottom]
// (allowing firsts to be equal, but requiring bottoms to be different.)
- bool lpContainedBy(BasicBlock* first, BasicBlock* bottom)
+ bool lpContainedBy(BasicBlock* first, BasicBlock* bottom)
{
- return first->bbNum <= lpFirst->bbNum && lpBottom->bbNum < bottom->bbNum;
+ return first->bbNum <= lpFirst->bbNum && lpBottom->bbNum < bottom->bbNum;
}
// Returns "true" iff "*this" is (properly) contained by "lp2"
// (allowing firsts to be equal, but requiring bottoms to be different.)
- bool lpContainedBy(const LoopDsc& lp2)
+ bool lpContainedBy(const LoopDsc& lp2)
{
return lpContains(lp2.lpFirst, lp2.lpBottom);
}
// Returns "true" iff "*this" is disjoint from the range [top, bottom].
- bool lpDisjoint(BasicBlock* first, BasicBlock* bottom)
+ bool lpDisjoint(BasicBlock* first, BasicBlock* bottom)
{
return bottom->bbNum < lpFirst->bbNum || lpBottom->bbNum < first->bbNum;
}
// Returns "true" iff "*this" is disjoint from "lp2".
- bool lpDisjoint(const LoopDsc& lp2)
+ bool lpDisjoint(const LoopDsc& lp2)
{
return lpDisjoint(lp2.lpFirst, lp2.lpBottom);
}
// Returns "true" iff the loop is well-formed (see code for defn).
- bool lpWellFormed()
+ bool lpWellFormed()
{
- return lpFirst->bbNum <= lpTop->bbNum
- && lpTop->bbNum <= lpEntry->bbNum
- && lpEntry->bbNum <= lpBottom->bbNum
- && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
+ return lpFirst->bbNum <= lpTop->bbNum && lpTop->bbNum <= lpEntry->bbNum &&
+ lpEntry->bbNum <= lpBottom->bbNum &&
+ (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
-
};
-protected :
-
- bool fgMightHaveLoop(); // returns true if there are any backedges
- bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
-
-public :
-
- LoopDsc optLoopTable[MAX_LOOP_NUM]; // loop descriptor table
- unsigned char optLoopCount; // number of tracked loops
+protected:
+ bool fgMightHaveLoop(); // returns true if there are any backedges
+ bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
-protected :
+public:
+ LoopDsc optLoopTable[MAX_LOOP_NUM]; // loop descriptor table
+ unsigned char optLoopCount; // number of tracked loops
- unsigned optCallCount; // number of calls made in the method
- unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
- unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
- unsigned optLoopsCloned; // number of loops cloned in the current method.
+protected:
+ unsigned optCallCount; // number of calls made in the method
+ unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
+ unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
+ unsigned optLoopsCloned; // number of loops cloned in the current method.
#ifdef DEBUG
- unsigned optFindLoopNumberFromBeginBlock(BasicBlock *begBlk);
- void optPrintLoopInfo(unsigned loopNum,
- BasicBlock * lpHead,
- BasicBlock * lpFirst,
- BasicBlock * lpTop,
- BasicBlock * lpEntry,
- BasicBlock * lpBottom,
- unsigned char lpExitCnt,
- BasicBlock * lpExit,
- unsigned parentLoop = BasicBlock::NOT_IN_LOOP
- );
- void optPrintLoopInfo(unsigned lnum);
- void optPrintLoopRecording(unsigned lnum);
-
- void optCheckPreds ();
-#endif
-
- void optSetBlockWeights ();
-
- void optMarkLoopBlocks (BasicBlock *begBlk,
- BasicBlock *endBlk,
- bool excludeEndBlk);
-
- void optUnmarkLoopBlocks(BasicBlock *begBlk,
- BasicBlock *endBlk);
-
- void optUpdateLoopsBeforeRemoveBlock(BasicBlock * block,
- bool skipUnmarkLoop = false);
-
- bool optIsLoopTestEvalIntoTemp(GenTreePtr test, GenTreePtr* newTest);
- unsigned optIsLoopIncrTree(GenTreePtr incr);
- bool optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
- bool optComputeIterInfo(GenTreePtr incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
- bool optPopulateInitInfo(unsigned loopInd, GenTreePtr init, unsigned iterVar);
- bool optExtractInitTestIncr(BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTreePtr* ppInit, GenTreePtr* ppTest, GenTreePtr* ppIncr);
-
-
- void optRecordLoop (BasicBlock * head,
- BasicBlock * first,
- BasicBlock * top,
- BasicBlock * entry,
- BasicBlock * bottom,
- BasicBlock * exit,
- unsigned char exitCnt);
-
- void optFindNaturalLoops();
+ unsigned optFindLoopNumberFromBeginBlock(BasicBlock* begBlk);
+ void optPrintLoopInfo(unsigned loopNum,
+ BasicBlock* lpHead,
+ BasicBlock* lpFirst,
+ BasicBlock* lpTop,
+ BasicBlock* lpEntry,
+ BasicBlock* lpBottom,
+ unsigned char lpExitCnt,
+ BasicBlock* lpExit,
+ unsigned parentLoop = BasicBlock::NOT_IN_LOOP);
+ void optPrintLoopInfo(unsigned lnum);
+ void optPrintLoopRecording(unsigned lnum);
+
+ void optCheckPreds();
+#endif
+
+ void optSetBlockWeights();
+
+ void optMarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk, bool excludeEndBlk);
+
+ void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
+
+ void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false);
+
+ bool optIsLoopTestEvalIntoTemp(GenTreePtr test, GenTreePtr* newTest);
+ unsigned optIsLoopIncrTree(GenTreePtr incr);
+ bool optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
+ bool optComputeIterInfo(GenTreePtr incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
+ bool optPopulateInitInfo(unsigned loopInd, GenTreePtr init, unsigned iterVar);
+ bool optExtractInitTestIncr(BasicBlock* head,
+ BasicBlock* bottom,
+ BasicBlock* exit,
+ GenTreePtr* ppInit,
+ GenTreePtr* ppTest,
+ GenTreePtr* ppIncr);
+
+ void optRecordLoop(BasicBlock* head,
+ BasicBlock* first,
+ BasicBlock* top,
+ BasicBlock* entry,
+ BasicBlock* bottom,
+ BasicBlock* exit,
+ unsigned char exitCnt);
+
+ void optFindNaturalLoops();
// Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' --
// each loop has a unique "top." Returns "true" iff the flowgraph has been modified.
- bool optCanonicalizeLoopNest(unsigned char loopInd);
+ bool optCanonicalizeLoopNest(unsigned char loopInd);
// Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top,"
// unshared with any other loop. Returns "true" iff the flowgraph has been modified
- bool optCanonicalizeLoop(unsigned char loopInd);
+ bool optCanonicalizeLoop(unsigned char loopInd);
// Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". Requires "l2" to be
// a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". Returns true
// iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2".
- bool optLoopContains(unsigned l1, unsigned l2);
+ bool optLoopContains(unsigned l1, unsigned l2);
// Requires "loopInd" to be a valid index into the loop table.
// Updates the loop table by changing loop "loopInd", whose head is required
// to be "from", to be "to". Also performs this transformation for any
// loop nested in "loopInd" that shares the same head as "loopInd".
- void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
+ void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
// Updates the successors of "blk": if "blk2" is a successor of "blk", and there is a mapping for "blk2->blk3" in
// "redirectMap", change "blk" so that "blk3" is this successor. Note that the predecessor lists are not updated.
- void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap);
+ void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap);
// Marks the containsCall information to "lnum" and any parent loops.
- void AddContainsCallAllContainingLoops(unsigned lnum);
+ void AddContainsCallAllContainingLoops(unsigned lnum);
// Adds the variable liveness information from 'blk' to "lnum" and any parent loops.
- void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock * blk);
+ void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk);
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
- void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd);
+ void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd);
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
- void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
+ void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
// Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone
// of "from".) Copies the jump destination from "from" to "to".
- void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
+ void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
-
// The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level)
- unsigned optLoopDepth(unsigned lnum)
+ unsigned optLoopDepth(unsigned lnum)
{
unsigned par = optLoopTable[lnum].lpParent;
- if (par == BasicBlock::NOT_IN_LOOP) return 0;
- else return 1 + optLoopDepth(par);
+ if (par == BasicBlock::NOT_IN_LOOP)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1 + optLoopDepth(par);
+ }
}
- void fgOptWhileLoop (BasicBlock * block);
+ void fgOptWhileLoop(BasicBlock* block);
- bool optComputeLoopRep (int constInit,
- int constLimit,
- int iterInc,
- genTreeOps iterOper,
- var_types iterType,
- genTreeOps testOper,
- bool unsignedTest,
- bool dupCond,
- unsigned * iterCount);
+ bool optComputeLoopRep(int constInit,
+ int constLimit,
+ int iterInc,
+ genTreeOps iterOper,
+ var_types iterType,
+ genTreeOps testOper,
+ bool unsignedTest,
+ bool dupCond,
+ unsigned* iterCount);
#if FEATURE_STACK_FP_X87
public:
- VARSET_TP optAllFloatVars; // mask of all tracked FP variables
- VARSET_TP optAllFPregVars; // mask of all enregistered FP variables
- VARSET_TP optAllNonFPvars; // mask of all tracked non-FP variables
-#endif // FEATURE_STACK_FP_X87
+ VARSET_TP optAllFloatVars; // mask of all tracked FP variables
+ VARSET_TP optAllFPregVars; // mask of all enregistered FP variables
+ VARSET_TP optAllNonFPvars; // mask of all tracked non-FP variables
+#endif // FEATURE_STACK_FP_X87
private:
- static fgWalkPreFn optIsVarAssgCB;
-protected:
+ static fgWalkPreFn optIsVarAssgCB;
- bool optIsVarAssigned(BasicBlock * beg,
- BasicBlock * end,
- GenTreePtr skip,
- unsigned var);
+protected:
+ bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTreePtr skip, unsigned var);
- bool optIsVarAssgLoop(unsigned lnum,
- unsigned var);
+ bool optIsVarAssgLoop(unsigned lnum, unsigned var);
- int optIsSetAssgLoop(unsigned lnum,
- ALLVARSET_VALARG_TP vars,
- varRefKinds inds = VR_NONE);
+ int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE);
- bool optNarrowTree (GenTreePtr tree,
- var_types srct,
- var_types dstt,
- ValueNumPair vnpNarrow,
- bool doit);
+ bool optNarrowTree(GenTreePtr tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit);
/**************************************************************************
* Optimization conditions
*************************************************************************/
- bool optFastCodeOrBlendedLoop(BasicBlock::weight_t bbWeight);
- bool optPentium4(void);
- bool optAvoidIncDec(BasicBlock::weight_t bbWeight);
- bool optAvoidIntMult(void);
+ bool optFastCodeOrBlendedLoop(BasicBlock::weight_t bbWeight);
+ bool optPentium4(void);
+ bool optAvoidIncDec(BasicBlock::weight_t bbWeight);
+ bool optAvoidIntMult(void);
#if FEATURE_ANYCSE
-protected :
-
+protected:
// The following is the upper limit on how many expressions we'll keep track
// of for the CSE analysis.
//
@@ -5330,175 +5217,169 @@ protected :
static const int MIN_CSE_COST = 2;
-
/* Generic list of nodes - used by the CSE logic */
- struct treeLst
+ struct treeLst
{
- treeLst * tlNext;
- GenTreePtr tlTree;
+ treeLst* tlNext;
+ GenTreePtr tlTree;
};
- typedef struct treeLst * treeLstPtr;
+ typedef struct treeLst* treeLstPtr;
- struct treeStmtLst
+ struct treeStmtLst
{
- treeStmtLst * tslNext;
- GenTreePtr tslTree; // tree node
- GenTreePtr tslStmt; // statement containing the tree
- BasicBlock * tslBlock; // block containing the statement
+ treeStmtLst* tslNext;
+ GenTreePtr tslTree; // tree node
+ GenTreePtr tslStmt; // statement containing the tree
+ BasicBlock* tslBlock; // block containing the statement
};
- typedef struct treeStmtLst * treeStmtLstPtr;
-
+ typedef struct treeStmtLst* treeStmtLstPtr;
// The following logic keeps track of expressions via a simple hash table.
- struct CSEdsc
+ struct CSEdsc
{
- CSEdsc * csdNextInBucket; // used by the hash table
+ CSEdsc* csdNextInBucket; // used by the hash table
- unsigned csdHashValue; // the orginal hashkey
+ unsigned csdHashValue; // the orginal hashkey
- unsigned csdIndex; // 1..optCSECandidateCount
- char csdLiveAcrossCall; // 0 or 1
+ unsigned csdIndex; // 1..optCSECandidateCount
+ char csdLiveAcrossCall; // 0 or 1
- unsigned short csdDefCount; // definition count
- unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
+ unsigned short csdDefCount; // definition count
+ unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
- unsigned csdDefWtCnt; // weighted def count
- unsigned csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
+ unsigned csdDefWtCnt; // weighted def count
+ unsigned csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
- GenTreePtr csdTree; // treenode containing the 1st occurance
- GenTreePtr csdStmt; // stmt containing the 1st occurance
- BasicBlock * csdBlock; // block containing the 1st occurance
+ GenTreePtr csdTree; // treenode containing the 1st occurance
+ GenTreePtr csdStmt; // stmt containing the 1st occurance
+ BasicBlock* csdBlock; // block containing the 1st occurance
- treeStmtLstPtr csdTreeList; // list of matching tree nodes: head
- treeStmtLstPtr csdTreeLast; // list of matching tree nodes: tail
+ treeStmtLstPtr csdTreeList; // list of matching tree nodes: head
+ treeStmtLstPtr csdTreeLast; // list of matching tree nodes: tail
};
static const size_t s_optCSEhashSize;
- CSEdsc * * optCSEhash;
- CSEdsc * * optCSEtab;
+ CSEdsc** optCSEhash;
+ CSEdsc** optCSEtab;
- void optCSEstop ();
+ void optCSEstop();
- CSEdsc * optCSEfindDsc (unsigned index);
- void optUnmarkCSE (GenTreePtr tree);
+ CSEdsc* optCSEfindDsc(unsigned index);
+ void optUnmarkCSE(GenTreePtr tree);
// user defined callback data for the tree walk function optCSE_MaskHelper()
struct optCSE_MaskData
{
- EXPSET_TP CSE_defMask;
- EXPSET_TP CSE_useMask;
+ EXPSET_TP CSE_defMask;
+ EXPSET_TP CSE_useMask;
};
// Treewalk helper for optCSE_DefMask and optCSE_UseMask
- static fgWalkPreFn optCSE_MaskHelper;
+ static fgWalkPreFn optCSE_MaskHelper;
// This function walks all the node for an given tree
// and return the mask of CSE definitions and uses for the tree
//
- void optCSE_GetMaskData (GenTreePtr tree, optCSE_MaskData* pMaskData);
+ void optCSE_GetMaskData(GenTreePtr tree, optCSE_MaskData* pMaskData);
// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
- bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
- bool optCSE_canSwap(GenTree* tree);
+ bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
+ bool optCSE_canSwap(GenTree* tree);
static fgWalkPostFn optPropagateNonCSE;
static fgWalkPreFn optHasNonCSEChild;
- static fgWalkPreFn optUnmarkCSEs;
+ static fgWalkPreFn optUnmarkCSEs;
- static int __cdecl optCSEcostCmpEx(const void *op1, const void *op2);
- static int __cdecl optCSEcostCmpSz(const void *op1, const void *op2);
-
- void optCleanupCSEs();
+ static int __cdecl optCSEcostCmpEx(const void* op1, const void* op2);
+ static int __cdecl optCSEcostCmpSz(const void* op1, const void* op2);
+ void optCleanupCSEs();
#ifdef DEBUG
- void optEnsureClearCSEInfo();
+ void optEnsureClearCSEInfo();
#endif // DEBUG
-#endif // FEATURE_ANYCSE
+#endif // FEATURE_ANYCSE
#if FEATURE_VALNUM_CSE
/**************************************************************************
* Value Number based CSEs
*************************************************************************/
-public :
-
- void optOptimizeValnumCSEs();
-
-protected :
-
- void optValnumCSE_Init ();
- unsigned optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt);
- unsigned optValnumCSE_Locate();
- void optValnumCSE_InitDataFlow();
- void optValnumCSE_DataFlow();
- void optValnumCSE_Availablity();
- void optValnumCSE_Heuristic();
- void optValnumCSE_UnmarkCSEs(GenTreePtr deadTree, GenTreePtr keepList);
+public:
+ void optOptimizeValnumCSEs();
+protected:
+ void optValnumCSE_Init();
+ unsigned optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt);
+ unsigned optValnumCSE_Locate();
+ void optValnumCSE_InitDataFlow();
+ void optValnumCSE_DataFlow();
+ void optValnumCSE_Availablity();
+ void optValnumCSE_Heuristic();
+ void optValnumCSE_UnmarkCSEs(GenTreePtr deadTree, GenTreePtr keepList);
#endif // FEATURE_VALNUM_CSE
#if FEATURE_ANYCSE
- bool optDoCSE; // True when we have found a duplicate CSE tree
- bool optValnumCSE_phase; // True when we are executing the optValnumCSE_phase
- unsigned optCSECandidateTotal; // Grand total of CSE candidates for both Lexical and ValNum
- unsigned optCSECandidateCount; // Count of CSE's candidates, reset for Lexical and ValNum CSE's
- unsigned optCSEstart; // The first local variable number that is a CSE
- unsigned optCSEcount; // The total count of CSE's introduced.
- unsigned optCSEweight; // The weight of the current block when we are
- // scanning for CSE expressions
-
- bool optIsCSEcandidate (GenTreePtr tree);
-
- // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
+ bool optDoCSE; // True when we have found a duplicate CSE tree
+ bool optValnumCSE_phase; // True when we are executing the optValnumCSE_phase
+ unsigned optCSECandidateTotal; // Grand total of CSE candidates for both Lexical and ValNum
+ unsigned optCSECandidateCount; // Count of CSE's candidates, reset for Lexical and ValNum CSE's
+ unsigned optCSEstart; // The first local variable number that is a CSE
+ unsigned optCSEcount; // The total count of CSE's introduced.
+ unsigned optCSEweight; // The weight of the current block when we are
+ // scanning for CSE expressions
+
+ bool optIsCSEcandidate(GenTreePtr tree);
+
+ // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
//
- bool lclNumIsTrueCSE(unsigned lclNum) const
- {
- return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart+optCSEcount));
- }
+ bool lclNumIsTrueCSE(unsigned lclNum) const
+ {
+ return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount));
+ }
// lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop.
- //
- bool lclNumIsCSE(unsigned lclNum) const
- {
- return lvaTable[lclNum].lvIsCSE;
- }
+ //
+ bool lclNumIsCSE(unsigned lclNum) const
+ {
+ return lvaTable[lclNum].lvIsCSE;
+ }
#ifdef DEBUG
- bool optConfigDisableCSE();
- bool optConfigDisableCSE2();
+ bool optConfigDisableCSE();
+ bool optConfigDisableCSE2();
#endif
- void optOptimizeCSEs();
-
+ void optOptimizeCSEs();
-#endif // FEATURE_ANYCSE
+#endif // FEATURE_ANYCSE
- struct isVarAssgDsc
+ struct isVarAssgDsc
{
- GenTreePtr ivaSkip;
+ GenTreePtr ivaSkip;
#ifdef DEBUG
- void * ivaSelf;
+ void* ivaSelf;
#endif
- unsigned ivaVar; // Variable we are interested in, or -1
- ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
- bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
- varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
- callInterf ivaMaskCall; // What kind of calls are there?
+ unsigned ivaVar; // Variable we are interested in, or -1
+ ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
+ bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
+ varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
+ callInterf ivaMaskCall; // What kind of calls are there?
};
- static callInterf optCallInterf (GenTreePtr call);
+ static callInterf optCallInterf(GenTreePtr call);
public:
// VN based copy propagation.
typedef ArrayStack<GenTreePtr> GenTreePtrStack;
- typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, GenTreePtrStack*, JitSimplerHashBehavior> LclNumToGenTreePtrStack;
+ typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, GenTreePtrStack*, JitSimplerHashBehavior>
+ LclNumToGenTreePtrStack;
// Kill set to track variables with intervening definitions.
VARSET_TP optCopyPropKillSet;
@@ -5519,9 +5400,7 @@ public:
unsigned m_lvNum;
unsigned m_ssaNum;
- SSAName(unsigned lvNum, unsigned ssaNum) :
- m_lvNum(lvNum),
- m_ssaNum(ssaNum)
+ SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum)
{
}
@@ -5536,17 +5415,17 @@ public:
}
};
-#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
-#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
-#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
-#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
-#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
+#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
+#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
+#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
+#define OMF_HAS_VTABLEREF 0x00000008 // Method contains method table reference.
+#define OMF_HAS_NULLCHECK 0x00000010 // Method contains null check.
- unsigned optMethodFlags;
+ unsigned optMethodFlags;
// Recursion bound controls how far we can go backwards tracking for a SSA value.
- // No throughput diff was found with backward walk bound between 3-8.
- static const int optEarlyPropRecurBound = 5;
+ // No throughput diff was found with backward walk bound between 3-8.
+ static const int optEarlyPropRecurBound = 5;
enum class optPropKind
{
@@ -5556,100 +5435,106 @@ public:
OPK_NULLCHECK
};
- bool gtIsVtableRef(GenTreePtr tree);
+ bool gtIsVtableRef(GenTreePtr tree);
GenTreePtr getArrayLengthFromAllocation(GenTreePtr tree);
GenTreePtr getObjectHandleNodeFromAllocation(GenTreePtr tree);
GenTreePtr optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth);
GenTreePtr optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind);
- bool optEarlyPropRewriteTree(GenTreePtr tree);
- bool optDoEarlyPropForBlock(BasicBlock* block);
- bool optDoEarlyPropForFunc();
- void optEarlyProp();
- void optFoldNullCheck(GenTreePtr tree);
- bool optCanMoveNullCheckPastTree(GenTreePtr tree, bool isInsideTry);
+ bool optEarlyPropRewriteTree(GenTreePtr tree);
+ bool optDoEarlyPropForBlock(BasicBlock* block);
+ bool optDoEarlyPropForFunc();
+ void optEarlyProp();
+ void optFoldNullCheck(GenTreePtr tree);
+ bool optCanMoveNullCheckPastTree(GenTreePtr tree, bool isInsideTry);
#if ASSERTION_PROP
/**************************************************************************
* Value/Assertion propagation
*************************************************************************/
public:
-
// Data structures for assertion prop
BitVecTraits* apTraits;
- ASSERT_TP apFull;
- ASSERT_TP apEmpty;
-
- enum optAssertionKind { OAK_INVALID,
- OAK_EQUAL,
- OAK_NOT_EQUAL,
- OAK_SUBRANGE,
- OAK_NO_THROW,
- OAK_COUNT };
-
- enum optOp1Kind { O1K_INVALID,
- O1K_LCLVAR,
- O1K_ARR_BND,
- O1K_ARRLEN_OPER_BND,
- O1K_ARRLEN_LOOP_BND,
- O1K_CONSTANT_LOOP_BND,
- O1K_EXACT_TYPE,
- O1K_SUBTYPE,
- O1K_VALUE_NUMBER,
- O1K_COUNT };
-
- enum optOp2Kind { O2K_INVALID,
- O2K_LCLVAR_COPY,
- O2K_IND_CNS_INT,
- O2K_CONST_INT,
- O2K_CONST_LONG,
- O2K_CONST_DOUBLE,
- O2K_ARR_LEN,
- O2K_SUBRANGE,
- O2K_COUNT };
+ ASSERT_TP apFull;
+ ASSERT_TP apEmpty;
+
+ enum optAssertionKind
+ {
+ OAK_INVALID,
+ OAK_EQUAL,
+ OAK_NOT_EQUAL,
+ OAK_SUBRANGE,
+ OAK_NO_THROW,
+ OAK_COUNT
+ };
+
+ enum optOp1Kind
+ {
+ O1K_INVALID,
+ O1K_LCLVAR,
+ O1K_ARR_BND,
+ O1K_ARRLEN_OPER_BND,
+ O1K_ARRLEN_LOOP_BND,
+ O1K_CONSTANT_LOOP_BND,
+ O1K_EXACT_TYPE,
+ O1K_SUBTYPE,
+ O1K_VALUE_NUMBER,
+ O1K_COUNT
+ };
+
+ enum optOp2Kind
+ {
+ O2K_INVALID,
+ O2K_LCLVAR_COPY,
+ O2K_IND_CNS_INT,
+ O2K_CONST_INT,
+ O2K_CONST_LONG,
+ O2K_CONST_DOUBLE,
+ O2K_ARR_LEN,
+ O2K_SUBRANGE,
+ O2K_COUNT
+ };
struct AssertionDsc
{
- optAssertionKind assertionKind;
+ optAssertionKind assertionKind;
struct SsaVar
{
- unsigned lclNum; // assigned to or property of this local var number
- unsigned ssaNum;
+ unsigned lclNum; // assigned to or property of this local var number
+ unsigned ssaNum;
};
struct ArrBnd
{
- ValueNum vnIdx;
- ValueNum vnLen;
+ ValueNum vnIdx;
+ ValueNum vnLen;
};
struct AssertionDscOp1
{
- optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
- ValueNum vn;
- union
- {
+ optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
+ ValueNum vn;
+ union {
SsaVar lcl;
ArrBnd bnd;
};
} op1;
struct AssertionDscOp2
{
- optOp2Kind kind; // a const or copy assignment
- ValueNum vn;
+ optOp2Kind kind; // a const or copy assignment
+ ValueNum vn;
struct IntVal
{
- ssize_t iconVal; // integer
- unsigned iconFlags; // gtFlags
+ ssize_t iconVal; // integer
+ unsigned iconFlags; // gtFlags
};
- struct Range // integer subrange
+ struct Range // integer subrange
{
- ssize_t loBound;
- ssize_t hiBound;
+ ssize_t loBound;
+ ssize_t hiBound;
};
- union
- {
- SsaVar lcl;
- IntVal u1;
+ union {
+ SsaVar lcl;
+ IntVal u1;
__int64 lconVal;
- double dconVal;
- Range u2;
+ double dconVal;
+ Range u2;
};
} op2;
@@ -5663,25 +5548,22 @@ public:
}
bool IsConstantBound()
{
- return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_CONSTANT_LOOP_BND);
+ return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
+ op1.kind == O1K_CONSTANT_LOOP_BND);
}
bool IsBoundsCheckNoThrow()
{
- return ((assertionKind == OAK_NO_THROW) &&
- (op1.kind == O1K_ARR_BND));
+ return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND));
}
bool IsCopyAssertion()
{
- return ((assertionKind == OAK_EQUAL) &&
- (op1.kind == O1K_LCLVAR) &&
- (op2.kind == O2K_LCLVAR_COPY));
+ return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY));
}
static bool SameKind(AssertionDsc* a1, AssertionDsc* a2)
{
- return a1->assertionKind == a2->assertionKind &&
- a1->op1.kind == a2->op1.kind &&
+ return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind &&
a1->op2.kind == a2->op2.kind;
}
@@ -5702,51 +5584,50 @@ public:
{
switch (type)
{
- case TYP_BYTE:
- return SCHAR_MIN;
- case TYP_SHORT:
- return SHRT_MIN;
- case TYP_INT:
- return INT_MIN;
- case TYP_BOOL:
- case TYP_UBYTE:
- case TYP_CHAR:
- case TYP_USHORT:
- case TYP_UINT:
- return 0;
- default:
- unreached();
+ case TYP_BYTE:
+ return SCHAR_MIN;
+ case TYP_SHORT:
+ return SHRT_MIN;
+ case TYP_INT:
+ return INT_MIN;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ case TYP_CHAR:
+ case TYP_USHORT:
+ case TYP_UINT:
+ return 0;
+ default:
+ unreached();
}
}
static ssize_t GetUpperBoundForIntegralType(var_types type)
{
switch (type)
{
- case TYP_BOOL:
- return 1;
- case TYP_BYTE:
- return SCHAR_MAX;
- case TYP_SHORT:
- return SHRT_MAX;
- case TYP_INT:
- return INT_MAX;
- case TYP_UBYTE:
- return UCHAR_MAX;
- case TYP_CHAR:
- case TYP_USHORT:
- return USHRT_MAX;
- case TYP_UINT:
- return UINT_MAX;
- default:
- unreached();
+ case TYP_BOOL:
+ return 1;
+ case TYP_BYTE:
+ return SCHAR_MAX;
+ case TYP_SHORT:
+ return SHRT_MAX;
+ case TYP_INT:
+ return INT_MAX;
+ case TYP_UBYTE:
+ return UCHAR_MAX;
+ case TYP_CHAR:
+ case TYP_USHORT:
+ return USHRT_MAX;
+ case TYP_UINT:
+ return UINT_MAX;
+ default:
+ unreached();
}
}
bool HasSameOp1(AssertionDsc* that, bool vnBased)
{
return (op1.kind == that->op1.kind) &&
- ((vnBased && (op1.vn == that->op1.vn)) ||
- (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
+ ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
}
bool HasSameOp2(AssertionDsc* that, bool vnBased)
@@ -5757,49 +5638,45 @@ public:
}
switch (op2.kind)
{
- case O2K_IND_CNS_INT:
- case O2K_CONST_INT:
- return ((op2.u1.iconVal == that->op2.u1.iconVal) &&
- (op2.u1.iconFlags == that->op2.u1.iconFlags));
+ case O2K_IND_CNS_INT:
+ case O2K_CONST_INT:
+ return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
- case O2K_CONST_LONG:
- return (op2.lconVal == that->op2.lconVal);
+ case O2K_CONST_LONG:
+ return (op2.lconVal == that->op2.lconVal);
- case O2K_CONST_DOUBLE:
- // exact match because of positive and negative zero.
- return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
+ case O2K_CONST_DOUBLE:
+ // exact match because of positive and negative zero.
+ return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
- case O2K_LCLVAR_COPY:
- case O2K_ARR_LEN:
- return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
- (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum);
+ case O2K_LCLVAR_COPY:
+ case O2K_ARR_LEN:
+ return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
+ (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum);
- case O2K_SUBRANGE:
- return ((op2.u2.loBound == that->op2.u2.loBound) &&
- (op2.u2.hiBound == that->op2.u2.hiBound));
+ case O2K_SUBRANGE:
+ return ((op2.u2.loBound == that->op2.u2.loBound) && (op2.u2.hiBound == that->op2.u2.hiBound));
- case O2K_INVALID:
- // we will return false
- break;
+ case O2K_INVALID:
+ // we will return false
+ break;
- default:
- assert(!"Unexpected value for op2.kind in AssertionDsc.");
- break;
+ default:
+ assert(!"Unexpected value for op2.kind in AssertionDsc.");
+ break;
}
return false;
}
-
bool Complementary(AssertionDsc* that, bool vnBased)
{
- return ComplementaryKind(assertionKind, that->assertionKind) &&
- HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
+ return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) &&
+ HasSameOp2(that, vnBased);
}
bool Equals(AssertionDsc* that, bool vnBased)
{
- return (assertionKind == that->assertionKind) &&
- HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
+ return (assertionKind == that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
}
};
@@ -5808,8 +5685,8 @@ public:
protected:
static fgWalkPreFn optAddCopiesCallback;
static fgWalkPreFn optVNAssertionPropCurStmtVisitor;
- unsigned optAddCopyLclNum;
- GenTreePtr optAddCopyAsgnNode;
+ unsigned optAddCopyLclNum;
+ GenTreePtr optAddCopyAsgnNode;
bool optLocalAssertionProp; // indicates that we are performing local assertion prop
bool optAssertionPropagated; // set to true if we modified the trees
@@ -5817,15 +5694,14 @@ protected:
#ifdef DEBUG
GenTreePtr optAssertionPropCurrentTree;
#endif
- AssertionIndex* optComplementaryAssertionMap;
- ExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
+ AssertionIndex* optComplementaryAssertionMap;
+ ExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
// using the value of a local var) for each local var
- AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
- AssertionIndex optAssertionCount; // total number of assertions in the assertion table
+ AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
+ AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
-public :
-
+public:
void optVnNonNullPropCurStmt(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree);
fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree);
GenTreePtr optVNConstantPropOnRelOp(GenTreePtr tree);
@@ -5838,7 +5714,8 @@ public :
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
- typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP, JitSimplerHashBehavior> ValueNumToAssertsMap;
+ typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP, JitSimplerHashBehavior>
+ ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
@@ -5854,28 +5731,31 @@ public :
#endif
// Assertion prop data flow functions.
- void optAssertionPropMain();
+ void optAssertionPropMain();
GenTreePtr optVNAssertionPropCurStmt(BasicBlock* block, GenTreePtr stmt);
bool optIsTreeKnownIntValue(bool vnBased, GenTreePtr tree, ssize_t* pConstant, unsigned* pIconFlags);
ASSERT_TP* optInitAssertionDataflowFlags();
ASSERT_TP* optComputeAssertionGen();
// Assertion Gen functions.
- void optAssertionGen (GenTreePtr tree);
+ void optAssertionGen(GenTreePtr tree);
AssertionIndex optAssertionGenPhiDefn(GenTreePtr tree);
AssertionIndex optCreateJTrueBoundsAssertion(GenTreePtr tree);
- AssertionIndex optAssertionGenJtrue (GenTreePtr tree);
+ AssertionIndex optAssertionGenJtrue(GenTreePtr tree);
AssertionIndex optCreateJtrueAssertions(GenTreePtr op1, GenTreePtr op2, Compiler::optAssertionKind assertionKind);
- AssertionIndex optFindComplementary (AssertionIndex assertionIndex);
- void optMapComplementary (AssertionIndex assertionIndex, AssertionIndex index);
+ AssertionIndex optFindComplementary(AssertionIndex assertionIndex);
+ void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index);
// Assertion creation functions.
AssertionIndex optCreateAssertion(GenTreePtr op1, GenTreePtr op2, optAssertionKind assertionKind);
- AssertionIndex optCreateAssertion(GenTreePtr op1, GenTreePtr op2, optAssertionKind assertionKind, AssertionDsc* assertion);
+ AssertionIndex optCreateAssertion(GenTreePtr op1,
+ GenTreePtr op2,
+ optAssertionKind assertionKind,
+ AssertionDsc* assertion);
void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTreePtr op1, GenTreePtr op2);
bool optAssertionVnInvolvesNan(AssertionDsc* assertion);
- AssertionIndex optAddAssertion (AssertionDsc* assertion);
+ AssertionIndex optAddAssertion(AssertionDsc* assertion);
void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index);
#ifdef DEBUG
void optPrintVnAssertionMapping();
@@ -5886,16 +5766,22 @@ public :
AssertionIndex optAssertionIsSubrange(GenTreePtr tree, var_types toType, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsSubtype(GenTreePtr tree, GenTreePtr methodTableArg, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsNonNullInternal(GenTreePtr op, ASSERT_VALARG_TP assertions);
- bool optAssertionIsNonNull(GenTreePtr op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
+ bool optAssertionIsNonNull(GenTreePtr op,
+ ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
// Used for Relop propagation.
AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTreePtr op1, GenTreePtr op2);
- AssertionIndex optLocalAssertionIsEqualOrNotEqual(optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
+ AssertionIndex optLocalAssertionIsEqualOrNotEqual(
+ optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
// Assertion prop for lcl var functions.
bool optAssertionProp_LclVarTypeCheck(GenTreePtr tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc);
- GenTreePtr optCopyAssertionProp(AssertionDsc* curAssertion, GenTreePtr tree, GenTreePtr stmt DEBUGARG(AssertionIndex index));
- GenTreePtr optConstantAssertionProp(AssertionDsc* curAssertion, const GenTreePtr tree, const GenTreePtr stmt DEBUGARG(AssertionIndex index));
+ GenTreePtr optCopyAssertionProp(AssertionDsc* curAssertion,
+ GenTreePtr tree,
+ GenTreePtr stmt DEBUGARG(AssertionIndex index));
+ GenTreePtr optConstantAssertionProp(AssertionDsc* curAssertion,
+ const GenTreePtr tree,
+ const GenTreePtr stmt DEBUGARG(AssertionIndex index));
GenTreePtr optVnConstantAssertionProp(const GenTreePtr tree, const GenTreePtr stmt);
// Assertion propagation functions.
@@ -5922,7 +5808,7 @@ public :
ASSERT_VALRET_TP optNewEmptyAssertSet();
#ifdef DEBUG
- void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex=0);
+ void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0);
void optDebugCheckAssertion(AssertionDsc* assertion);
void optDebugCheckAssertions(AssertionIndex AssertionIndex);
#endif
@@ -5933,128 +5819,128 @@ public :
* Range checks
*************************************************************************/
-public :
+public:
struct LoopCloneVisitorInfo
{
LoopCloneContext* context;
- unsigned loopNum;
- GenTreePtr stmt;
+ unsigned loopNum;
+ GenTreePtr stmt;
LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, GenTreePtr stmt)
- : context(context)
- , loopNum(loopNum)
- , stmt(nullptr) {}
+ : context(context), loopNum(loopNum), stmt(nullptr)
+ {
+ }
};
- bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
- bool optExtractArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lhsNum);
- bool optReconstructArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lhsNum);
- bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
- static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
- fgWalkResult optCanOptimizeByLoopCloning(GenTreePtr tree, LoopCloneVisitorInfo* info);
- void optObtainLoopCloningOpts(LoopCloneContext* context);
- bool optIsLoopClonable(unsigned loopInd);
+ bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
+ bool optExtractArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lhsNum);
+ bool optReconstructArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lhsNum);
+ bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
+ static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
+ fgWalkResult optCanOptimizeByLoopCloning(GenTreePtr tree, LoopCloneVisitorInfo* info);
+ void optObtainLoopCloningOpts(LoopCloneContext* context);
+ bool optIsLoopClonable(unsigned loopInd);
- bool optCanCloneLoops();
+ bool optCanCloneLoops();
#ifdef DEBUG
- void optDebugLogLoopCloning(BasicBlock* block, GenTreePtr insertBefore);
+ void optDebugLogLoopCloning(BasicBlock* block, GenTreePtr insertBefore);
#endif
- void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
- bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
- bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
- BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* head, BasicBlock* slow);
- void optInsertLoopCloningStress(BasicBlock* head);
+ void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
+ bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
+ bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
+ BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context,
+ unsigned loopNum,
+ BasicBlock* head,
+ BasicBlock* slow);
+ void optInsertLoopCloningStress(BasicBlock* head);
#if COUNT_RANGECHECKS
- static unsigned optRangeChkRmv;
- static unsigned optRangeChkAll;
+ static unsigned optRangeChkRmv;
+ static unsigned optRangeChkAll;
#endif
-protected :
+protected:
struct arraySizes
{
- unsigned arrayVar;
- int arrayDim;
+ unsigned arrayVar;
+ int arrayDim;
- #define MAX_ARRAYS 4 // a magic max number of arrays tracked for bounds check elimination
+#define MAX_ARRAYS 4 // a magic max number of arrays tracked for bounds check elimination
};
-
- struct RngChkDsc
+
+ struct RngChkDsc
{
- RngChkDsc * rcdNextInBucket; // used by the hash table
+ RngChkDsc* rcdNextInBucket; // used by the hash table
- unsigned short rcdHashValue; // to make matching faster
- unsigned short rcdIndex; // 0..optRngChkCount-1
+ unsigned short rcdHashValue; // to make matching faster
+ unsigned short rcdIndex; // 0..optRngChkCount-1
- GenTreePtr rcdTree; // the array index tree
+ GenTreePtr rcdTree; // the array index tree
};
unsigned optRngChkCount;
static const size_t optRngChkHashSize;
- ssize_t optGetArrayRefScaleAndIndex(GenTreePtr mul,
- GenTreePtr *pIndex
- DEBUGARG(bool bRngChk));
- GenTreePtr optFindLocalInit (BasicBlock *block,
- GenTreePtr local,
- VARSET_TP* pKilledInOut,
- bool* isKilledAfterInit);
+ ssize_t optGetArrayRefScaleAndIndex(GenTreePtr mul, GenTreePtr* pIndex DEBUGARG(bool bRngChk));
+ GenTreePtr optFindLocalInit(BasicBlock* block, GenTreePtr local, VARSET_TP* pKilledInOut, bool* isKilledAfterInit);
#if FANCY_ARRAY_OPT
- bool optIsNoMore (GenTreePtr op1, GenTreePtr op2,
- int add1 = 0, int add2 = 0);
+ bool optIsNoMore(GenTreePtr op1, GenTreePtr op2, int add1 = 0, int add2 = 0);
#endif
- bool optReachWithoutCall(BasicBlock * srcBB,
- BasicBlock * dstBB);
-
-protected :
-
- bool optLoopsMarked;
-
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX RegAlloc XX
-XX XX
-XX Does the register allocation and puts the remaining lclVars on the stack XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
+protected:
+ bool optLoopsMarked;
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX RegAlloc XX
+ XX XX
+ XX Does the register allocation and puts the remaining lclVars on the stack XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+public:
#ifndef LEGACY_BACKEND
- bool doLSRA() const { return true; }
-#else // LEGACY_BACKEND
- bool doLSRA() const { return false; }
+ bool doLSRA() const
+ {
+ return true;
+ }
+#else // LEGACY_BACKEND
+ bool doLSRA() const
+ {
+ return false;
+ }
#endif // LEGACY_BACKEND
#ifdef LEGACY_BACKEND
- void raInit ();
- void raAssignVars(); // register allocation
-#endif // LEGACY_BACKEND
+ void raInit();
+ void raAssignVars(); // register allocation
+#endif // LEGACY_BACKEND
- VARSET_TP raRegVarsMask; // Set of all enregistered variables (not including FEATURE_STACK_FP_X87 enregistered variables)
- regNumber raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc);
+ VARSET_TP raRegVarsMask; // Set of all enregistered variables (not including FEATURE_STACK_FP_X87 enregistered
+ // variables)
+ regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc);
- void raMarkStkVars ();
+ void raMarkStkVars();
protected:
-
// Some things are used by both LSRA and regpredict allocators.
- FrameType rpFrameType;
- bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
+ FrameType rpFrameType;
+ bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
#ifdef LEGACY_BACKEND
- regMaskTP rpMaskPInvokeEpilogIntf; // pinvoke epilog trashes esi/edi holding stack args needed to setup tail call's args
-#endif // LEGACY_BACKEND
+ regMaskTP rpMaskPInvokeEpilogIntf; // pinvoke epilog trashes esi/edi holding stack args needed to setup tail call's
+ // args
+#endif // LEGACY_BACKEND
- bool rpMustCreateEBPFrame(INDEBUG(const char ** wbReason));
+ bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason));
#if FEATURE_FP_REGALLOC
enum enumConfigRegisterFP
@@ -6064,102 +5950,83 @@ protected:
CONFIG_REGISTER_FP_CALLEE_SAVED = 0x2,
CONFIG_REGISTER_FP_FULL = 0x3,
};
- enumConfigRegisterFP raConfigRegisterFP();
+ enumConfigRegisterFP raConfigRegisterFP();
#endif // FEATURE_FP_REGALLOC
public:
- regMaskTP raConfigRestrictMaskFP();
+ regMaskTP raConfigRestrictMaskFP();
private:
#ifndef LEGACY_BACKEND
- LinearScanInterface* m_pLinearScan; // Linear Scan allocator
-#else // LEGACY_BACKEND
- unsigned raAvoidArgRegMask; // Mask of incoming argument registers that we may need to avoid
- VARSET_TP raLclRegIntf[REG_COUNT]; // variable to register interference graph
- bool raNewBlocks; // True is we added killing blocks for FPU registers
- unsigned rpPasses; // Number of passes made by the register predicter
- unsigned rpPassesMax; // Maximum number of passes made by the register predicter
- unsigned rpPassesPessimize; // Number of passes non-pessimizing made by the register predicter
- unsigned rpStkPredict; // Weighted count of variables were predicted STK (lower means register allocation is better)
- unsigned rpPredictSpillCnt; // Predicted number of integer spill tmps for the current tree
- regMaskTP rpPredictAssignMask; // Mask of registers to consider in rpPredictAssignRegVars()
- VARSET_TP rpLastUseVars; // Set of last use variables in rpPredictTreeRegUse
- VARSET_TP rpUseInPlace; // Set of variables that we used in place
- int rpAsgVarNum; // VarNum for the target of GT_ASG node
- bool rpPredictAssignAgain; // Must rerun the rpPredictAssignRegVars()
- bool rpAddedVarIntf; // Set to true if we need to add a new var intf
- bool rpLostEnreg; // Set to true if we lost an enregister var that had lvDependReg set
- bool rpReverseEBPenreg; // Decided to reverse the enregistration of EBP
+ LinearScanInterface* m_pLinearScan; // Linear Scan allocator
+#else // LEGACY_BACKEND
+ unsigned raAvoidArgRegMask; // Mask of incoming argument registers that we may need to avoid
+ VARSET_TP raLclRegIntf[REG_COUNT]; // variable to register interference graph
+ bool raNewBlocks; // True is we added killing blocks for FPU registers
+ unsigned rpPasses; // Number of passes made by the register predicter
+ unsigned rpPassesMax; // Maximum number of passes made by the register predicter
+ unsigned rpPassesPessimize; // Number of passes non-pessimizing made by the register predicter
+ unsigned rpStkPredict; // Weighted count of variables were predicted STK (lower means register allocation is better)
+ unsigned rpPredictSpillCnt; // Predicted number of integer spill tmps for the current tree
+ regMaskTP rpPredictAssignMask; // Mask of registers to consider in rpPredictAssignRegVars()
+ VARSET_TP rpLastUseVars; // Set of last use variables in rpPredictTreeRegUse
+ VARSET_TP rpUseInPlace; // Set of variables that we used in place
+ int rpAsgVarNum; // VarNum for the target of GT_ASG node
+ bool rpPredictAssignAgain; // Must rerun the rpPredictAssignRegVars()
+ bool rpAddedVarIntf; // Set to true if we need to add a new var intf
+ bool rpLostEnreg; // Set to true if we lost an enregister var that had lvDependReg set
+ bool rpReverseEBPenreg; // Decided to reverse the enregistration of EBP
public:
- bool rpRegAllocDone; // Set to true after we have completed register allocation
+ bool rpRegAllocDone; // Set to true after we have completed register allocation
private:
- regMaskTP rpPredictMap[PREDICT_COUNT]; // Holds the regMaskTP for each of the enum values
+ regMaskTP rpPredictMap[PREDICT_COUNT]; // Holds the regMaskTP for each of the enum values
- void raSetupArgMasks(RegState *r);
+ void raSetupArgMasks(RegState* r);
- const regNumber* raGetRegVarOrder (var_types regType,
- unsigned * wbVarOrderSize);
+ const regNumber* raGetRegVarOrder(var_types regType, unsigned* wbVarOrderSize);
#ifdef DEBUG
- void raDumpVarIntf (); // Dump the variable to variable interference graph
- void raDumpRegIntf (); // Dump the variable to register interference graph
+ void raDumpVarIntf(); // Dump the variable to variable interference graph
+ void raDumpRegIntf(); // Dump the variable to register interference graph
#endif
- void raAdjustVarIntf ();
+ void raAdjustVarIntf();
- regMaskTP rpPredictRegMask (rpPredictReg predictReg,
- var_types type);
+ regMaskTP rpPredictRegMask(rpPredictReg predictReg, var_types type);
- bool rpRecordRegIntf (regMaskTP regMask,
- VARSET_VALARG_TP life
- DEBUGARG( const char * msg));
+ bool rpRecordRegIntf(regMaskTP regMask, VARSET_VALARG_TP life DEBUGARG(const char* msg));
- bool rpRecordVarIntf (unsigned varNum,
- VARSET_VALARG_TP intfVar
- DEBUGARG( const char * msg));
- regMaskTP rpPredictRegPick (var_types type,
- rpPredictReg predictReg,
- regMaskTP lockedRegs);
+ bool rpRecordVarIntf(unsigned varNum, VARSET_VALARG_TP intfVar DEBUGARG(const char* msg));
+ regMaskTP rpPredictRegPick(var_types type, rpPredictReg predictReg, regMaskTP lockedRegs);
- regMaskTP rpPredictGrabReg (var_types type,
- rpPredictReg predictReg,
- regMaskTP lockedRegs);
+ regMaskTP rpPredictGrabReg(var_types type, rpPredictReg predictReg, regMaskTP lockedRegs);
- static fgWalkPreFn rpMarkRegIntf;
+ static fgWalkPreFn rpMarkRegIntf;
- regMaskTP rpPredictAddressMode(GenTreePtr tree,
- var_types type,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs,
- GenTreePtr lenCSE);
+ regMaskTP rpPredictAddressMode(
+ GenTreePtr tree, var_types type, regMaskTP lockedRegs, regMaskTP rsvdRegs, GenTreePtr lenCSE);
- void rpPredictRefAssign (unsigned lclNum);
+ void rpPredictRefAssign(unsigned lclNum);
- regMaskTP rpPredictBlkAsgRegUse(GenTreePtr tree,
- rpPredictReg predictReg,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs);
+ regMaskTP rpPredictBlkAsgRegUse(GenTreePtr tree, rpPredictReg predictReg, regMaskTP lockedRegs, regMaskTP rsvdRegs);
- regMaskTP rpPredictTreeRegUse (GenTreePtr tree,
- rpPredictReg predictReg,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs);
+ regMaskTP rpPredictTreeRegUse(GenTreePtr tree, rpPredictReg predictReg, regMaskTP lockedRegs, regMaskTP rsvdRegs);
- regMaskTP rpPredictAssignRegVars(regMaskTP regAvail);
+ regMaskTP rpPredictAssignRegVars(regMaskTP regAvail);
- void rpPredictRegUse (); // Entry point
+ void rpPredictRegUse(); // Entry point
- unsigned raPredictTreeRegUse (GenTreePtr tree);
- unsigned raPredictListRegUse (GenTreePtr list);
+ unsigned raPredictTreeRegUse(GenTreePtr tree);
+ unsigned raPredictListRegUse(GenTreePtr list);
- void raSetRegVarOrder (var_types regType,
- regNumber * customVarOrder,
- unsigned * customVarOrderSize,
- regMaskTP prefReg,
- regMaskTP avoidReg);
+ void raSetRegVarOrder(var_types regType,
+ regNumber* customVarOrder,
+ unsigned* customVarOrderSize,
+ regMaskTP prefReg,
+ regMaskTP avoidReg);
// We use (unsigned)-1 as an uninitialized sentinel for rpStkPredict and
// also as the maximum value of lvRefCntWtd. Don't allow overflow, and
// saturate at UINT_MAX - 1, to avoid using the sentinel.
- void raAddToStkPredict(unsigned val)
+ void raAddToStkPredict(unsigned val)
{
unsigned newStkPredict = rpStkPredict + val;
if ((newStkPredict < rpStkPredict) || (newStkPredict == UINT_MAX))
@@ -6168,13 +6035,13 @@ private:
rpStkPredict = newStkPredict;
}
-#ifdef DEBUG
+#ifdef DEBUG
#if !FEATURE_FP_REGALLOC
- void raDispFPlifeInfo ();
+ void raDispFPlifeInfo();
#endif
#endif
- regMaskTP genReturnRegForTree (GenTreePtr tree);
+ regMaskTP genReturnRegForTree(GenTreePtr tree);
#endif // LEGACY_BACKEND
/* raIsVarargsStackArg is called by raMaskStkVars and by
@@ -6186,17 +6053,15 @@ private:
at compile time).
*/
- bool raIsVarargsStackArg(unsigned lclNum)
+ bool raIsVarargsStackArg(unsigned lclNum)
{
#ifdef _TARGET_X86_
- LclVarDsc *varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
assert(varDsc->lvIsParam);
- return (info.compIsVarArgs &&
- !varDsc->lvIsRegArg &&
- (lclNum != lvaVarargsHandleArg));
+ return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg));
#else // _TARGET_X86_
@@ -6222,21 +6087,20 @@ private:
VarRegPrediction* rpBestRecordedPrediction;
#endif // LEGACY_BACKEND
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX EEInterface XX
-XX XX
-XX Get to the class and method info from the Execution Engine given XX
-XX tokens for the class and method XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX EEInterface XX
+ XX XX
+ XX Get to the class and method info from the Execution Engine given XX
+ XX tokens for the class and method XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+public:
/* These are the different addressing modes used to access a local var.
* The JIT has to report the location of the locals back to the EE
* for debugging purposes.
@@ -6245,10 +6109,10 @@ public :
enum siVarLocType
{
VLT_REG,
- VLT_REG_BYREF, // this type is currently only used for value types on X64
+ VLT_REG_BYREF, // this type is currently only used for value types on X64
VLT_REG_FP,
VLT_STK,
- VLT_STK_BYREF, // this type is currently only used for value types on X64
+ VLT_STK_BYREF, // this type is currently only used for value types on X64
VLT_REG_REG,
VLT_REG_STK,
VLT_STK_REG,
@@ -6262,10 +6126,9 @@ public :
struct siVarLoc
{
- siVarLocType vlType;
+ siVarLocType vlType;
- union
- {
+ union {
// VLT_REG/VLT_REG_FP -- Any pointer-sized enregistered value (TYP_INT, TYP_REF, etc)
// eg. EAX
// VLT_REG_BYREF -- the specified register contains the address of the variable
@@ -6273,9 +6136,8 @@ public :
struct
{
- regNumber vlrReg;
- }
- vlReg;
+ regNumber vlrReg;
+ } vlReg;
// VLT_STK -- Any 32 bit value which is on the stack
// eg. [ESP+0x20], or [EBP-0x28]
@@ -6284,36 +6146,32 @@ public :
struct
{
- regNumber vlsBaseReg;
- NATIVE_OFFSET vlsOffset;
- }
- vlStk;
+ regNumber vlsBaseReg;
+ NATIVE_OFFSET vlsOffset;
+ } vlStk;
// VLT_REG_REG -- TYP_LONG/TYP_DOUBLE with both DWords enregistered
// eg. RBM_EAXEDX
struct
{
- regNumber vlrrReg1;
- regNumber vlrrReg2;
- }
- vlRegReg;
+ regNumber vlrrReg1;
+ regNumber vlrrReg2;
+ } vlRegReg;
// VLT_REG_STK -- Partly enregistered TYP_LONG/TYP_DOUBLE
// eg { LowerDWord=EAX UpperDWord=[ESP+0x8] }
struct
{
- regNumber vlrsReg;
+ regNumber vlrsReg;
struct
{
- regNumber vlrssBaseReg;
- NATIVE_OFFSET vlrssOffset;
- }
- vlrsStk;
- }
- vlRegStk;
+ regNumber vlrssBaseReg;
+ NATIVE_OFFSET vlrssOffset;
+ } vlrsStk;
+ } vlRegStk;
// VLT_STK_REG -- Partly enregistered TYP_LONG/TYP_DOUBLE
// eg { LowerDWord=[ESP+0x8] UpperDWord=EAX }
@@ -6322,33 +6180,29 @@ public :
{
struct
{
- regNumber vlsrsBaseReg;
- NATIVE_OFFSET vlsrsOffset;
- }
- vlsrStk;
+ regNumber vlsrsBaseReg;
+ NATIVE_OFFSET vlsrsOffset;
+ } vlsrStk;
- regNumber vlsrReg;
- }
- vlStkReg;
+ regNumber vlsrReg;
+ } vlStkReg;
// VLT_STK2 -- Any 64 bit value which is on the stack, in 2 successsive DWords
// eg 2 DWords at [ESP+0x10]
struct
{
- regNumber vls2BaseReg;
- NATIVE_OFFSET vls2Offset;
- }
- vlStk2;
+ regNumber vls2BaseReg;
+ NATIVE_OFFSET vls2Offset;
+ } vlStk2;
// VLT_FPSTK -- enregisterd TYP_DOUBLE (on the FP stack)
// eg. ST(3). Actually it is ST("FPstkHeight - vpFpStk")
struct
{
- unsigned vlfReg;
- }
- vlFPstk;
+ unsigned vlfReg;
+ } vlFPstk;
// VLT_FIXED_VA -- fixed argument of a varargs function.
// The argument location depends on the size of the variable
@@ -6358,45 +6212,42 @@ public :
struct
{
- unsigned vlfvOffset;
- }
- vlFixedVarArg;
+ unsigned vlfvOffset;
+ } vlFixedVarArg;
// VLT_MEMORY
struct
{
- void *rpValue; // pointer to the in-process
- // location of the value.
+ void* rpValue; // pointer to the in-process
+ // location of the value.
} vlMemory;
-
};
// Helper functions
- bool vlIsInReg(regNumber reg);
- bool vlIsOnStk(regNumber reg, signed offset);
+ bool vlIsInReg(regNumber reg);
+ bool vlIsOnStk(regNumber reg, signed offset);
};
/*************************************************************************/
-public :
-
+public:
// Get handles
- void eeGetCallInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedToken,
- CORINFO_CALLINFO_FLAGS flags,
- CORINFO_CALL_INFO* pResult);
+ void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedToken,
+ CORINFO_CALLINFO_FLAGS flags,
+ CORINFO_CALL_INFO* pResult);
inline CORINFO_CALLINFO_FLAGS addVerifyFlag(CORINFO_CALLINFO_FLAGS flags);
- void eeGetFieldInfo ( CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS flags,
- CORINFO_FIELD_INFO *pResult);
+ void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS flags,
+ CORINFO_FIELD_INFO* pResult);
// Get the flags
- BOOL eeIsValueClass (CORINFO_CLASS_HANDLE clsHnd);
+ BOOL eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd);
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD)
@@ -6415,78 +6266,75 @@ public :
switch (code)
{
- case EXCEPTIONCODE_DebugBreakorAV:
- case EXCEPTIONCODE_MC:
- case EXCEPTIONCODE_LWM:
- case EXCEPTIONCODE_SASM:
- case EXCEPTIONCODE_SSYM:
- case EXCEPTIONCODE_CALLUTILS:
- case EXCEPTIONCODE_TYPEUTILS:
- case EXCEPTIONCODE_ASSERT:
- return true;
- default:
- return false;
+ case EXCEPTIONCODE_DebugBreakorAV:
+ case EXCEPTIONCODE_MC:
+ case EXCEPTIONCODE_LWM:
+ case EXCEPTIONCODE_SASM:
+ case EXCEPTIONCODE_SSYM:
+ case EXCEPTIONCODE_CALLUTILS:
+ case EXCEPTIONCODE_TYPEUTILS:
+ case EXCEPTIONCODE_ASSERT:
+ return true;
+ default:
+ return false;
}
}
- const char* eeGetMethodName (CORINFO_METHOD_HANDLE hnd, const char** className);
- const char* eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd);
+ const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className);
+ const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
- bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
- CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
+ bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
+ CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
#endif
- var_types eeGetArgType (CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
- var_types eeGetArgType (CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
- unsigned eeGetArgSize (CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
+ var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
+ var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
+ unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
// VOM info, method sigs
- void eeGetSig (unsigned sigTok,
- CORINFO_MODULE_HANDLE scope,
- CORINFO_CONTEXT_HANDLE context,
- CORINFO_SIG_INFO* retSig);
+ void eeGetSig(unsigned sigTok,
+ CORINFO_MODULE_HANDLE scope,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO* retSig);
- void eeGetCallSiteSig (unsigned sigTok,
- CORINFO_MODULE_HANDLE scope,
- CORINFO_CONTEXT_HANDLE context,
- CORINFO_SIG_INFO* retSig);
+ void eeGetCallSiteSig(unsigned sigTok,
+ CORINFO_MODULE_HANDLE scope,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO* retSig);
- void eeGetMethodSig (CORINFO_METHOD_HANDLE methHnd,
- CORINFO_SIG_INFO* retSig,
- CORINFO_CLASS_HANDLE owner = NULL);
+ void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr);
// Method entry-points, instrs
- void * eeGetFieldAddress (CORINFO_FIELD_HANDLE handle,
- void ** *ppIndir);
+ void* eeGetFieldAddress(CORINFO_FIELD_HANDLE handle, void*** ppIndir);
- CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
+ CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
- CORINFO_EE_INFO eeInfo;
- bool eeInfoInitialized;
+ CORINFO_EE_INFO eeInfo;
+ bool eeInfoInitialized;
- CORINFO_EE_INFO * eeGetEEInfo();
+ CORINFO_EE_INFO* eeGetEEInfo();
// Gets the offset of a SDArray's first element
- unsigned eeGetArrayDataOffset(var_types type);
+ unsigned eeGetArrayDataOffset(var_types type);
// Gets the offset of a MDArray's first element
- unsigned eeGetMDArrayDataOffset(var_types type, unsigned rank);
+ unsigned eeGetMDArrayDataOffset(var_types type, unsigned rank);
- GenTreePtr eeGetPInvokeCookie(CORINFO_SIG_INFO *szMetaSig);
+ GenTreePtr eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig);
// Returns the page size for the target machine as reported by the EE.
- inline size_t eeGetPageSize()
+ inline size_t eeGetPageSize()
{
#if COR_JIT_EE_VERSION > 460
return eeGetEEInfo()->osPageSize;
-#else // COR_JIT_EE_VERSION <= 460
+#else // COR_JIT_EE_VERSION <= 460
return CORINFO_PAGE_SIZE;
#endif // COR_JIT_EE_VERSION > 460
}
// Returns the frame size at which we will generate a loop to probe the stack.
- inline size_t getVeryLargeFrameSize()
+ inline size_t getVeryLargeFrameSize()
{
#ifdef _TARGET_ARM_
// The looping probe code is 40 bytes, whereas the straight-line probing for
@@ -6498,7 +6346,7 @@ public :
#endif
}
- inline bool generateCFIUnwindCodes()
+ inline bool generateCFIUnwindCodes()
{
#if COR_JIT_EE_VERSION > 460 && defined(UNIX_AMD64_ABI)
return eeGetEEInfo()->targetAbi == CORINFO_CORERT_ABI;
@@ -6509,88 +6357,76 @@ public :
// Exceptions
- unsigned eeGetEHcount (CORINFO_METHOD_HANDLE handle);
+ unsigned eeGetEHcount(CORINFO_METHOD_HANDLE handle);
// Debugging support - Line number info
- void eeGetStmtOffsets();
+ void eeGetStmtOffsets();
- unsigned eeBoundariesCount;
+ unsigned eeBoundariesCount;
- struct boundariesDsc
+ struct boundariesDsc
{
- UNATIVE_OFFSET nativeIP;
- IL_OFFSET ilOffset;
- unsigned sourceReason;
- }
- * eeBoundaries; // Boundaries to report to EE
- void eeSetLIcount (unsigned count);
- void eeSetLIinfo (unsigned which,
- UNATIVE_OFFSET offs,
- unsigned srcIP,
- bool stkEmpty,
- bool callInstruction);
- void eeSetLIdone ();
+ UNATIVE_OFFSET nativeIP;
+ IL_OFFSET ilOffset;
+ unsigned sourceReason;
+ } * eeBoundaries; // Boundaries to report to EE
+ void eeSetLIcount(unsigned count);
+ void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, unsigned srcIP, bool stkEmpty, bool callInstruction);
+ void eeSetLIdone();
#ifdef DEBUG
- static void eeDispILOffs(IL_OFFSET offs);
- static void eeDispLineInfo(const boundariesDsc* line);
- void eeDispLineInfos();
+ static void eeDispILOffs(IL_OFFSET offs);
+ static void eeDispLineInfo(const boundariesDsc* line);
+ void eeDispLineInfos();
#endif // DEBUG
// Debugging support - Local var info
- void eeGetVars ();
+ void eeGetVars();
- unsigned eeVarsCount;
+ unsigned eeVarsCount;
struct VarResultInfo
{
- UNATIVE_OFFSET startOffset;
- UNATIVE_OFFSET endOffset;
- DWORD varNumber;
- siVarLoc loc;
- }
- * eeVars;
- void eeSetLVcount (unsigned count);
- void eeSetLVinfo (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- VarName namex,
- bool avail,
- const siVarLoc &loc);
- void eeSetLVdone ();
+ UNATIVE_OFFSET startOffset;
+ UNATIVE_OFFSET endOffset;
+ DWORD varNumber;
+ siVarLoc loc;
+ } * eeVars;
+ void eeSetLVcount(unsigned count);
+ void eeSetLVinfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ VarName namex,
+ bool avail,
+ const siVarLoc& loc);
+ void eeSetLVdone();
#ifdef DEBUG
- void eeDispVar (ICorDebugInfo::NativeVarInfo* var);
- void eeDispVars (CORINFO_METHOD_HANDLE ftn,
- ULONG32 cVars,
- ICorDebugInfo::NativeVarInfo* vars);
+ void eeDispVar(ICorDebugInfo::NativeVarInfo* var);
+ void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars);
#endif // DEBUG
// ICorJitInfo wrappers
- void eeReserveUnwindInfo(BOOL isFunclet,
- BOOL isColdCode,
- ULONG unwindSize);
+ void eeReserveUnwindInfo(BOOL isFunclet, BOOL isColdCode, ULONG unwindSize);
- void eeAllocUnwindInfo(BYTE* pHotCode,
- BYTE* pColdCode,
- ULONG startOffset,
- ULONG endOffset,
- ULONG unwindSize,
- BYTE* pUnwindBlock,
- CorJitFuncKind funcKind);
+ void eeAllocUnwindInfo(BYTE* pHotCode,
+ BYTE* pColdCode,
+ ULONG startOffset,
+ ULONG endOffset,
+ ULONG unwindSize,
+ BYTE* pUnwindBlock,
+ CorJitFuncKind funcKind);
- void eeSetEHcount(unsigned cEH);
+ void eeSetEHcount(unsigned cEH);
- void eeSetEHinfo(unsigned EHnumber,
- const CORINFO_EH_CLAUSE* clause);
+ void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause);
-
- WORD eeGetRelocTypeHint(void * target);
+ WORD eeGetRelocTypeHint(void* target);
// ICorStaticInfo wrapper functions
@@ -6598,15 +6434,15 @@ public :
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
#ifdef DEBUG
- static void dumpSystemVClassificationType(SystemVClassificationType ct);
+ static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
- void eeGetSystemVAmd64PassStructInRegisterDescriptor(/*IN*/ CORINFO_CLASS_HANDLE structHnd,
- /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
+ void eeGetSystemVAmd64PassStructInRegisterDescriptor(
+ /*IN*/ CORINFO_CLASS_HANDLE structHnd,
+ /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-
- template<typename ParamType>
+ template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
@@ -6616,88 +6452,87 @@ public :
// Utility functions
- const char * eeGetFieldName (CORINFO_FIELD_HANDLE fieldHnd,
- const char ** classNamePtr = NULL);
+ const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr);
#if defined(DEBUG)
- const wchar_t * eeGetCPString (size_t stringHandle);
+ const wchar_t* eeGetCPString(size_t stringHandle);
#endif
- const char* eeGetClassName (CORINFO_CLASS_HANDLE clsHnd);
+ const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd);
- static CORINFO_METHOD_HANDLE eeFindHelper (unsigned helper);
- static CorInfoHelpFunc eeGetHelperNum (CORINFO_METHOD_HANDLE method);
+ static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper);
+ static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method);
- static fgWalkPreFn CountSharedStaticHelper;
- static bool IsSharedStaticHelper (GenTreePtr tree);
- static bool IsTreeAlwaysHoistable (GenTreePtr tree);
+ static fgWalkPreFn CountSharedStaticHelper;
+ static bool IsSharedStaticHelper(GenTreePtr tree);
+ static bool IsTreeAlwaysHoistable(GenTreePtr tree);
- static CORINFO_FIELD_HANDLE eeFindJitDataOffs (unsigned jitDataOffs);
- // returns true/false if 'field' is a Jit Data offset
- static bool eeIsJitDataOffs (CORINFO_FIELD_HANDLE field);
- // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
- static int eeGetJitDataOffs (CORINFO_FIELD_HANDLE field);
+ static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs);
+ // returns true/false if 'field' is a Jit Data offset
+ static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field);
+ // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
+ static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field);
-/*****************************************************************************/
-
-public :
+ /*****************************************************************************/
- void tmpInit ();
+public:
+ void tmpInit();
- enum TEMP_USAGE_TYPE { TEMP_USAGE_FREE, TEMP_USAGE_USED };
+ enum TEMP_USAGE_TYPE
+ {
+ TEMP_USAGE_FREE,
+ TEMP_USAGE_USED
+ };
- static var_types tmpNormalizeType (var_types type);
- TempDsc* tmpGetTemp (var_types type); // get temp for the given type
- void tmpRlsTemp (TempDsc* temp);
- TempDsc* tmpFindNum (int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
+ static var_types tmpNormalizeType(var_types type);
+ TempDsc* tmpGetTemp(var_types type); // get temp for the given type
+ void tmpRlsTemp(TempDsc* temp);
+ TempDsc* tmpFindNum(int temp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
- void tmpEnd ();
- TempDsc* tmpListBeg (TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
- TempDsc* tmpListNxt (TempDsc* curTemp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
- void tmpDone ();
+ void tmpEnd();
+ TempDsc* tmpListBeg(TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
+ TempDsc* tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType = TEMP_USAGE_FREE) const;
+ void tmpDone();
#ifdef DEBUG
- bool tmpAllFree () const;
+ bool tmpAllFree() const;
#endif // DEBUG
#ifndef LEGACY_BACKEND
- void tmpPreAllocateTemps(var_types type, unsigned count);
+ void tmpPreAllocateTemps(var_types type, unsigned count);
#endif // !LEGACY_BACKEND
-protected :
-
+protected:
#ifdef LEGACY_BACKEND
- unsigned tmpIntSpillMax; // number of int-sized spill temps
- unsigned tmpDoubleSpillMax; // number of double-sized spill temps
-#endif // LEGACY_BACKEND
+ unsigned tmpIntSpillMax; // number of int-sized spill temps
+ unsigned tmpDoubleSpillMax; // number of double-sized spill temps
+#endif // LEGACY_BACKEND
- unsigned tmpCount; // Number of temps
- unsigned tmpSize; // Size of all the temps
+ unsigned tmpCount; // Number of temps
+ unsigned tmpSize; // Size of all the temps
#ifdef DEBUG
public:
// Used by RegSet::rsSpillChk()
- unsigned tmpGetCount; // Temps which haven't been released yet
+ unsigned tmpGetCount; // Temps which haven't been released yet
#endif
private:
+ static unsigned tmpSlot(unsigned size); // which slot in tmpFree[] or tmpUsed[] to use
- static unsigned tmpSlot (unsigned size); // which slot in tmpFree[] or tmpUsed[] to use
-
- TempDsc* tmpFree[TEMP_MAX_SIZE / sizeof(int)];
- TempDsc* tmpUsed[TEMP_MAX_SIZE / sizeof(int)];
+ TempDsc* tmpFree[TEMP_MAX_SIZE / sizeof(int)];
+ TempDsc* tmpUsed[TEMP_MAX_SIZE / sizeof(int)];
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX CodeGenerator XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX CodeGenerator XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
- CodeGenInterface* codeGen;
+public:
+ CodeGenInterface* codeGen;
#ifdef DEBUGGING_SUPPORT
@@ -6705,135 +6540,159 @@ public :
struct IPmappingDsc
{
- IPmappingDsc * ipmdNext; // next line# record
- IL_OFFSETX ipmdILoffsx; // the instr offset
- emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
- bool ipmdIsLabel; // Can this code be a branch label?
+ IPmappingDsc* ipmdNext; // next line# record
+ IL_OFFSETX ipmdILoffsx; // the instr offset
+ emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
+ bool ipmdIsLabel; // Can this code be a branch label?
};
// Record the instr offset mapping to the generated code
- IPmappingDsc * genIPmappingList;
- IPmappingDsc * genIPmappingLast;
+ IPmappingDsc* genIPmappingList;
+ IPmappingDsc* genIPmappingLast;
// Managed RetVal - A side hash table meant to record the mapping from a
// GT_CALL node to its IL offset. This info is used to emit sequence points
// that can be used by debugger to determine the native offset at which the
// managed RetVal will be available.
//
- // In fact we can store IL offset in a GT_CALL node. This was ruled out in
+ // In fact we can store IL offset in a GT_CALL node. This was ruled out in
// favor of a side table for two reasons: 1) We need IL offset for only those
// GT_CALL nodes (created during importation) that correspond to an IL call and
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IL_OFFSETX, JitSimplerHashBehavior> CallSiteILOffsetTable;
- CallSiteILOffsetTable * genCallSite2ILOffsetMap;
+ typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IL_OFFSETX, JitSimplerHashBehavior>
+ CallSiteILOffsetTable;
+ CallSiteILOffsetTable* genCallSite2ILOffsetMap;
#endif // DEBUGGING_SUPPORT
- unsigned genReturnLocal; // Local number for the return value when applicable.
- BasicBlock * genReturnBB; // jumped to when not optimizing for speed.
+ unsigned genReturnLocal; // Local number for the return value when applicable.
+ BasicBlock* genReturnBB; // jumped to when not optimizing for speed.
// The following properties are part of CodeGenContext. Getters are provided here for
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
- __declspec(property(get = getEmitter)) emitter* genEmitter;
- emitter* getEmitter() { return codeGen->getEmitter(); }
+ __declspec(property(get = getEmitter)) emitter* genEmitter;
+ emitter* getEmitter()
+ {
+ return codeGen->getEmitter();
+ }
- const bool isFramePointerUsed() { return codeGen->isFramePointerUsed(); }
+ const bool isFramePointerUsed()
+ {
+ return codeGen->isFramePointerUsed();
+ }
- __declspec(property(get = getInterruptible, put=setInterruptible)) bool genInterruptible;
- bool getInterruptible() { return codeGen->genInterruptible; }
- void setInterruptible(bool value) { codeGen->setInterruptible(value); }
+ __declspec(property(get = getInterruptible, put = setInterruptible)) bool genInterruptible;
+ bool getInterruptible()
+ {
+ return codeGen->genInterruptible;
+ }
+ void setInterruptible(bool value)
+ {
+ codeGen->setInterruptible(value);
+ }
#if DOUBLE_ALIGN
- const bool genDoubleAlign() { return codeGen->doDoubleAlign(); }
- DWORD getCanDoubleAlign(); // Defined & used only by RegAlloc
-#endif // DOUBLE_ALIGN
- __declspec(property(get = getFullPtrRegMap, put=setFullPtrRegMap)) bool genFullPtrRegMap;
- bool getFullPtrRegMap() { return codeGen->genFullPtrRegMap; }
- void setFullPtrRegMap(bool value) { codeGen->setFullPtrRegMap(value); }
+ const bool genDoubleAlign()
+ {
+ return codeGen->doDoubleAlign();
+ }
+ DWORD getCanDoubleAlign(); // Defined & used only by RegAlloc
+#endif // DOUBLE_ALIGN
+ __declspec(property(get = getFullPtrRegMap, put = setFullPtrRegMap)) bool genFullPtrRegMap;
+ bool getFullPtrRegMap()
+ {
+ return codeGen->genFullPtrRegMap;
+ }
+ void setFullPtrRegMap(bool value)
+ {
+ codeGen->setFullPtrRegMap(value);
+ }
-
- // Things that MAY belong either in CodeGen or CodeGenContext
+// Things that MAY belong either in CodeGen or CodeGenContext
#if FEATURE_EH_FUNCLETS
- FuncInfoDsc * compFuncInfos;
- unsigned short compCurrFuncIdx;
- unsigned short compFuncInfoCount;
+ FuncInfoDsc* compFuncInfos;
+ unsigned short compCurrFuncIdx;
+ unsigned short compFuncInfoCount;
- unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; }
+ unsigned short compFuncCount()
+ {
+ assert(fgFuncletsCreated);
+ return compFuncInfoCount;
+ }
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
- void genUpdateCurrentFunclet(BasicBlock * block) { return; }
+ void genUpdateCurrentFunclet(BasicBlock* block)
+ {
+ return;
+ }
- FuncInfoDsc compFuncInfoRoot;
+ FuncInfoDsc compFuncInfoRoot;
- static
- const unsigned compCurrFuncIdx = 0;
+ static const unsigned compCurrFuncIdx = 0;
- unsigned short compFuncCount() { return 1; }
+ unsigned short compFuncCount()
+ {
+ return 1;
+ }
#endif // !FEATURE_EH_FUNCLETS
- FuncInfoDsc * funCurrentFunc ();
- void funSetCurrentFunc (unsigned funcIdx);
- FuncInfoDsc * funGetFunc (unsigned funcIdx);
- unsigned int funGetFuncIdx (BasicBlock *block);
-
+ FuncInfoDsc* funCurrentFunc();
+ void funSetCurrentFunc(unsigned funcIdx);
+ FuncInfoDsc* funGetFunc(unsigned funcIdx);
+ unsigned int funGetFuncIdx(BasicBlock* block);
// LIVENESS
+ VARSET_TP compCurLife; // current live variables
+ GenTreePtr compCurLifeTree; // node after which compCurLife has been computed
- VARSET_TP compCurLife; // current live variables
- GenTreePtr compCurLifeTree; // node after which compCurLife has been computed
-
- template<bool ForCodeGen>
- void compChangeLife (VARSET_VALARG_TP newLife
- DEBUGARG( GenTreePtr tree));
+ template <bool ForCodeGen>
+ void compChangeLife(VARSET_VALARG_TP newLife DEBUGARG(GenTreePtr tree));
- void genChangeLife (VARSET_VALARG_TP newLife
- DEBUGARG( GenTreePtr tree))
+ void genChangeLife(VARSET_VALARG_TP newLife DEBUGARG(GenTreePtr tree))
{
- compChangeLife</*ForCodeGen*/true>(newLife DEBUGARG(tree));
+ compChangeLife</*ForCodeGen*/ true>(newLife DEBUGARG(tree));
}
- template<bool ForCodeGen>
- void compUpdateLife (GenTreePtr tree);
+ template <bool ForCodeGen>
+ void compUpdateLife(GenTreePtr tree);
// Updates "compCurLife" to its state after evaluate of "true". If "pLastUseVars" is
// non-null, sets "*pLastUseVars" to the set of tracked variables for which "tree" was a last
// use. (Can be more than one var in the case of dependently promoted struct vars.)
- template<bool ForCodeGen>
- void compUpdateLifeVar (GenTreePtr tree, VARSET_TP* pLastUseVars = NULL);
+ template <bool ForCodeGen>
+ void compUpdateLifeVar(GenTreePtr tree, VARSET_TP* pLastUseVars = nullptr);
- template<bool ForCodeGen>
- inline
- void compUpdateLife (VARSET_VALARG_TP newLife);
+ template <bool ForCodeGen>
+ inline void compUpdateLife(VARSET_VALARG_TP newLife);
- // Gets a register mask that represent the kill set for a helper call since
+ // Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
- regMaskTP compHelperCallKillSet (CorInfoHelpFunc helper);
+ regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper);
// Gets a register mask that represent the kill set for a NoGC helper call.
- regMaskTP compNoGCHelperCallKillSet (CorInfoHelpFunc helper);
+ regMaskTP compNoGCHelperCallKillSet(CorInfoHelpFunc helper);
#ifdef _TARGET_ARM_
// Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at
// "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the
- // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
+ // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
// i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and
// a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask.
- void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
- unsigned firstArgRegNum,
- regMaskTP* pArgSkippedRegMask);
+ void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask);
#endif // _TARGET_ARM_
- // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR node, else NULL.
- static GenTreePtr fgIsIndirOfAddrOfLocal(GenTreePtr tree);
+ // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR
+ // node, else NULL.
+ static GenTreePtr fgIsIndirOfAddrOfLocal(GenTreePtr tree);
// This is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
@@ -6845,14 +6704,13 @@ public :
NodeToVarsetPtrMap* GetPromotedStructDeathVars()
{
- if (m_promotedStructDeathVars == NULL)
+ if (m_promotedStructDeathVars == nullptr)
{
m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator());
}
return m_promotedStructDeathVars;
}
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -6868,88 +6726,92 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
public:
-
//
// Infrastructure functions: start/stop/reserve/emit.
//
- void unwindBegProlog();
- void unwindEndProlog();
- void unwindBegEpilog();
- void unwindEndEpilog();
- void unwindReserve();
- void unwindEmit(void* pHotCode, void* pColdCode);
+ void unwindBegProlog();
+ void unwindEndProlog();
+ void unwindBegEpilog();
+ void unwindEndEpilog();
+ void unwindReserve();
+ void unwindEmit(void* pHotCode, void* pColdCode);
//
// Specific unwind information functions: called by code generation to indicate a particular
// prolog or epilog unwindable instruction has been generated.
//
- void unwindPush(regNumber reg);
- void unwindAllocStack(unsigned size);
- void unwindSetFrameReg(regNumber reg, unsigned offset);
- void unwindSaveReg(regNumber reg, unsigned offset);
+ void unwindPush(regNumber reg);
+ void unwindAllocStack(unsigned size);
+ void unwindSetFrameReg(regNumber reg, unsigned offset);
+ void unwindSaveReg(regNumber reg, unsigned offset);
#if defined(_TARGET_ARM_)
- void unwindPushMaskInt(regMaskTP mask);
- void unwindPushMaskFloat(regMaskTP mask);
- void unwindPopMaskInt(regMaskTP mask);
- void unwindPopMaskFloat(regMaskTP mask);
- void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
- void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only called via unwindPadding().
- void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last instruction and the current location.
-#endif // _TARGET_ARM_
+ void unwindPushMaskInt(regMaskTP mask);
+ void unwindPushMaskFloat(regMaskTP mask);
+ void unwindPopMaskInt(regMaskTP mask);
+ void unwindPopMaskFloat(regMaskTP mask);
+ void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
+ void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only
+ // called via unwindPadding().
+ void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
+ // instruction and the current location.
+#endif // _TARGET_ARM_
#if defined(_TARGET_ARM64_)
- void unwindNop();
- void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last instruction and the current location.
- void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
- void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
- void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
- void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
- void unwindSaveNext(); // unwind code: save_next
- void unwindReturn(regNumber reg); // ret lr
-#endif // defined(_TARGET_ARM64_)
+ void unwindNop();
+ void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
+ // instruction and the current location.
+ void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
+ void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
+ void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
+ void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
+ void unwindSaveNext(); // unwind code: save_next
+ void unwindReturn(regNumber reg); // ret lr
+#endif // defined(_TARGET_ARM64_)
//
// Private "helper" functions for the unwind implementation.
//
private:
-
#if FEATURE_EH_FUNCLETS
- void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc);
+ void unwindGetFuncLocations(FuncInfoDsc* func,
+ bool getHotSectionData,
+ /* OUT */ emitLocation** ppStartLoc,
+ /* OUT */ emitLocation** ppEndLoc);
#endif // FEATURE_EH_FUNCLETS
- void unwindReserveFunc(FuncInfoDsc* func);
- void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
+ void unwindReserveFunc(FuncInfoDsc* func);
+ void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#if defined(_TARGET_AMD64_)
- void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
- void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
- UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
+ void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
+ void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
+ UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
- void unwindBegPrologWindows();
- void unwindPushWindows(regNumber reg);
- void unwindAllocStackWindows(unsigned size);
- void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
- void unwindSaveRegWindows(regNumber reg, unsigned offset);
+ void unwindBegPrologWindows();
+ void unwindPushWindows(regNumber reg);
+ void unwindAllocStackWindows(unsigned size);
+ void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
+ void unwindSaveRegWindows(regNumber reg, unsigned offset);
#ifdef UNIX_AMD64_ABI
- void unwindBegPrologCFI();
- void unwindPushCFI(regNumber reg);
- void unwindAllocStackCFI(unsigned size);
- void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
- void unwindSaveRegCFI(regNumber reg, unsigned offset);
- int mapRegNumToDwarfReg(regNumber reg);
- void createCfiCode(FuncInfoDsc* func, UCHAR codeOffset, UCHAR opcode, USHORT dwarfReg, INT offset = 0);
+ void unwindBegPrologCFI();
+ void unwindPushCFI(regNumber reg);
+ void unwindAllocStackCFI(unsigned size);
+ void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
+ void unwindSaveRegCFI(regNumber reg, unsigned offset);
+ int mapRegNumToDwarfReg(regNumber reg);
+ void createCfiCode(FuncInfoDsc* func, UCHAR codeOffset, UCHAR opcode, USHORT dwarfReg, INT offset = 0);
#endif // UNIX_AMD64_ABI
#elif defined(_TARGET_ARM_)
- void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
- void unwindPushPopMaskFloat(regMaskTP mask);
- void unwindSplit(FuncInfoDsc* func);
+ void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
+ void unwindPushPopMaskFloat(regMaskTP mask);
+ void unwindSplit(FuncInfoDsc* func);
#endif // _TARGET_ARM_
@@ -6957,28 +6819,28 @@ private:
#pragma endregion // Note: region is NOT under !defined(__GNUC__)
#endif
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX SIMD XX
-XX XX
-XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
-XX that contains the distinguished, well-known SIMD type definitions). XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX SIMD XX
+ XX XX
+ XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
+ XX that contains the distinguished, well-known SIMD type definitions). XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
// Get highest available instruction set for floating point codegen
- InstructionSet getFloatingPointInstructionSet()
+ InstructionSet getFloatingPointInstructionSet()
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
if (canUseAVX())
{
return InstructionSet_AVX;
}
-
+
// min bar is SSE2
assert(canUseSSE2());
return InstructionSet_SSE2;
@@ -6990,7 +6852,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
// Get highest available instruction set for SIMD codegen
- InstructionSet getSIMDInstructionSet()
+ InstructionSet getSIMDInstructionSet()
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
return getFloatingPointInstructionSet();
@@ -7004,92 +6866,103 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef FEATURE_SIMD
// Should we support SIMD intrinsics?
- bool featureSIMD;
+ bool featureSIMD;
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
- unsigned lvaSIMDInitTempVarNum;
+ unsigned lvaSIMDInitTempVarNum;
// SIMD Types
- CORINFO_CLASS_HANDLE SIMDFloatHandle;
- CORINFO_CLASS_HANDLE SIMDDoubleHandle;
- CORINFO_CLASS_HANDLE SIMDIntHandle;
- CORINFO_CLASS_HANDLE SIMDUShortHandle;
- CORINFO_CLASS_HANDLE SIMDUByteHandle;
- CORINFO_CLASS_HANDLE SIMDShortHandle;
- CORINFO_CLASS_HANDLE SIMDByteHandle;
- CORINFO_CLASS_HANDLE SIMDLongHandle;
- CORINFO_CLASS_HANDLE SIMDUIntHandle;
- CORINFO_CLASS_HANDLE SIMDULongHandle;
- CORINFO_CLASS_HANDLE SIMDVector2Handle;
- CORINFO_CLASS_HANDLE SIMDVector3Handle;
- CORINFO_CLASS_HANDLE SIMDVector4Handle;
- CORINFO_CLASS_HANDLE SIMDVectorHandle;
+ CORINFO_CLASS_HANDLE SIMDFloatHandle;
+ CORINFO_CLASS_HANDLE SIMDDoubleHandle;
+ CORINFO_CLASS_HANDLE SIMDIntHandle;
+ CORINFO_CLASS_HANDLE SIMDUShortHandle;
+ CORINFO_CLASS_HANDLE SIMDUByteHandle;
+ CORINFO_CLASS_HANDLE SIMDShortHandle;
+ CORINFO_CLASS_HANDLE SIMDByteHandle;
+ CORINFO_CLASS_HANDLE SIMDLongHandle;
+ CORINFO_CLASS_HANDLE SIMDUIntHandle;
+ CORINFO_CLASS_HANDLE SIMDULongHandle;
+ CORINFO_CLASS_HANDLE SIMDVector2Handle;
+ CORINFO_CLASS_HANDLE SIMDVector3Handle;
+ CORINFO_CLASS_HANDLE SIMDVector4Handle;
+ CORINFO_CLASS_HANDLE SIMDVectorHandle;
// Get the handle for a SIMD type.
- CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, var_types simdBaseType)
+ CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, var_types simdBaseType)
{
if (simdBaseType == TYP_FLOAT)
{
- switch(simdType)
+ switch (simdType)
{
- case TYP_SIMD8:
- return SIMDVector2Handle;
- case TYP_SIMD12:
- return SIMDVector3Handle;
- case TYP_SIMD16:
- if ((getSIMDVectorType() == TYP_SIMD32) ||
- (SIMDVector4Handle != NO_CLASS_HANDLE))
- {
- return SIMDVector4Handle;
- }
- break;
- case TYP_SIMD32:
- break;
- default:
- unreached();
+ case TYP_SIMD8:
+ return SIMDVector2Handle;
+ case TYP_SIMD12:
+ return SIMDVector3Handle;
+ case TYP_SIMD16:
+ if ((getSIMDVectorType() == TYP_SIMD32) || (SIMDVector4Handle != NO_CLASS_HANDLE))
+ {
+ return SIMDVector4Handle;
+ }
+ break;
+ case TYP_SIMD32:
+ break;
+ default:
+ unreached();
}
}
assert(simdType == getSIMDVectorType());
- switch(simdBaseType)
+ switch (simdBaseType)
{
- case TYP_FLOAT: return SIMDFloatHandle;
- case TYP_DOUBLE: return SIMDDoubleHandle;
- case TYP_INT: return SIMDIntHandle;
- case TYP_CHAR: return SIMDUShortHandle;
- case TYP_USHORT: return SIMDUShortHandle;
- case TYP_UBYTE: return SIMDUByteHandle;
- case TYP_SHORT: return SIMDShortHandle;
- case TYP_BYTE: return SIMDByteHandle;
- case TYP_LONG: return SIMDLongHandle;
- case TYP_UINT: return SIMDUIntHandle;
- case TYP_ULONG: return SIMDULongHandle;
- default: assert(!"Didn't find a class handle for simdType");
+ case TYP_FLOAT:
+ return SIMDFloatHandle;
+ case TYP_DOUBLE:
+ return SIMDDoubleHandle;
+ case TYP_INT:
+ return SIMDIntHandle;
+ case TYP_CHAR:
+ return SIMDUShortHandle;
+ case TYP_USHORT:
+ return SIMDUShortHandle;
+ case TYP_UBYTE:
+ return SIMDUByteHandle;
+ case TYP_SHORT:
+ return SIMDShortHandle;
+ case TYP_BYTE:
+ return SIMDByteHandle;
+ case TYP_LONG:
+ return SIMDLongHandle;
+ case TYP_UINT:
+ return SIMDUIntHandle;
+ case TYP_ULONG:
+ return SIMDULongHandle;
+ default:
+ assert(!"Didn't find a class handle for simdType");
}
return NO_CLASS_HANDLE;
}
// SIMD Methods
- CORINFO_METHOD_HANDLE SIMDVectorFloat_set_Item;
- CORINFO_METHOD_HANDLE SIMDVectorFloat_get_Length;
- CORINFO_METHOD_HANDLE SIMDVectorFloat_op_Addition;
+ CORINFO_METHOD_HANDLE SIMDVectorFloat_set_Item;
+ CORINFO_METHOD_HANDLE SIMDVectorFloat_get_Length;
+ CORINFO_METHOD_HANDLE SIMDVectorFloat_op_Addition;
// Returns true if the tree corresponds to a TYP_SIMD lcl var.
// Note that both SIMD vector args and locals are mared as lvSIMDType = true, but
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT.
- bool isSIMDTypeLocal(GenTree* tree)
+ bool isSIMDTypeLocal(GenTree* tree)
{
return tree->OperIsLocal() && lvaTable[tree->AsLclVarCommon()->gtLclNum].lvSIMDType;
}
// Returns true if the type of the tree is a byref of TYP_SIMD
- bool isAddrOfSIMDType(GenTree* tree)
+ bool isAddrOfSIMDType(GenTree* tree)
{
- if (tree->TypeGet() == TYP_BYREF || tree->TypeGet() == TYP_I_IMPL)
- {
- switch(tree->OperGet())
- {
+ if (tree->TypeGet() == TYP_BYREF || tree->TypeGet() == TYP_I_IMPL)
+ {
+ switch (tree->OperGet())
+ {
case GT_ADDR:
return varTypeIsSIMD(tree->gtGetOp1());
@@ -7098,25 +6971,22 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
default:
return isSIMDTypeLocal(tree);
- }
- }
+ }
+ }
- return false;
+ return false;
}
- static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
+ static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
{
- return (intrinsicId == SIMDIntrinsicEqual ||
- intrinsicId == SIMDIntrinsicLessThan ||
- intrinsicId == SIMDIntrinsicLessThanOrEqual ||
- intrinsicId == SIMDIntrinsicGreaterThan ||
- intrinsicId == SIMDIntrinsicGreaterThanOrEqual
- );
+ return (intrinsicId == SIMDIntrinsicEqual || intrinsicId == SIMDIntrinsicLessThan ||
+ intrinsicId == SIMDIntrinsicLessThanOrEqual || intrinsicId == SIMDIntrinsicGreaterThan ||
+ intrinsicId == SIMDIntrinsicGreaterThanOrEqual);
}
// Returns base type of a TYP_SIMD local.
// Returns TYP_UNKNOWN if the local is not TYP_SIMD.
- var_types getBaseTypeOfSIMDLocal(GenTree* tree)
+ var_types getBaseTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
@@ -7136,13 +7006,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass());
}
- // Get the base (element) type and size in bytes for a SIMD type. Returns TYP_UNKNOWN
+ // Get the base (element) type and size in bytes for a SIMD type. Returns TYP_UNKNOWN
// if it is not a SIMD type or is an unsupported base type.
- var_types getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
- unsigned *sizeBytes = nullptr);
+ var_types getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
- var_types getBaseTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
- {
+ var_types getBaseTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
+ {
return getBaseTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
@@ -7150,115 +7019,107 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// Also sets typeHnd, argCount, baseType and sizeBytes out params.
const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd,
CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_SIG_INFO * sig,
+ CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
var_types* baseType,
- unsigned* sizeBytes);
+ unsigned* sizeBytes);
- // Pops and returns GenTree node from importers type stack.
+ // Pops and returns GenTree node from importers type stack.
// Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes.
- GenTreePtr impSIMDPopStack(var_types type, bool expectAddr = false);
+ GenTreePtr impSIMDPopStack(var_types type, bool expectAddr = false);
// Create a GT_SIMD tree for a Get property of SIMD vector with a fixed index.
- GenTreeSIMD* impSIMDGetFixed(var_types simdType,
- var_types baseType,
- unsigned simdSize,
- int index);
+ GenTreeSIMD* impSIMDGetFixed(var_types simdType, var_types baseType, unsigned simdSize, int index);
// Creates a GT_SIMD tree for Select operation
- GenTreePtr impSIMDSelect(CORINFO_CLASS_HANDLE typeHnd,
- var_types baseType,
- unsigned simdVectorSize,
- GenTree* op1,
- GenTree* op2,
- GenTree* op3);
+ GenTreePtr impSIMDSelect(CORINFO_CLASS_HANDLE typeHnd,
+ var_types baseType,
+ unsigned simdVectorSize,
+ GenTree* op1,
+ GenTree* op2,
+ GenTree* op3);
// Creates a GT_SIMD tree for Min/Max operation
- GenTreePtr impSIMDMinMax(SIMDIntrinsicID intrinsicId,
- CORINFO_CLASS_HANDLE typeHnd,
- var_types baseType,
- unsigned simdVectorSize,
- GenTree* op1,
- GenTree* op2);
-
- // Transforms operands and returns the SIMD intrinsic to be applied on
+ GenTreePtr impSIMDMinMax(SIMDIntrinsicID intrinsicId,
+ CORINFO_CLASS_HANDLE typeHnd,
+ var_types baseType,
+ unsigned simdVectorSize,
+ GenTree* op1,
+ GenTree* op2);
+
+ // Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
- SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
- CORINFO_CLASS_HANDLE typeHnd,
- unsigned simdVectorSize,
- var_types* baseType,
- GenTree** op1,
- GenTree** op2);
+ SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
+ CORINFO_CLASS_HANDLE typeHnd,
+ unsigned simdVectorSize,
+ var_types* baseType,
+ GenTree** op1,
+ GenTree** op2);
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- // Transforms operands and returns the SIMD intrinsic to be applied on
+ // Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
- SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned simdVectorSize,
- GenTree** op1,
- GenTree** op2);
+ SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
+ unsigned simdVectorSize,
+ GenTree** op1,
+ GenTree** op2);
- // Transforms operands and returns the SIMD intrinsic to be applied on
+ // Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain > comparison result.
- SIMDIntrinsicID impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeHnd,
- unsigned simdVectorSize,
- GenTree** op1,
- GenTree** op2);
+ SIMDIntrinsicID impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeHnd,
+ unsigned simdVectorSize,
+ GenTree** op1,
+ GenTree** op2);
- // Transforms operands and returns the SIMD intrinsic to be applied on
+ // Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain >= comparison result.
- SIMDIntrinsicID impSIMDLongRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned simdVectorSize,
- GenTree** op1,
- GenTree** op2);
-
- // Transforms operands and returns the SIMD intrinsic to be applied on
- // transformed operands to obtain >= comparison result in case of int32
+ SIMDIntrinsicID impSIMDLongRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDLE typeHnd,
+ unsigned simdVectorSize,
+ GenTree** op1,
+ GenTree** op2);
+
+ // Transforms operands and returns the SIMD intrinsic to be applied on
+ // transformed operands to obtain >= comparison result in case of int32
// and small int base type vectors.
- SIMDIntrinsicID impSIMDIntegralRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned simdVectorSize,
- var_types baseType,
- GenTree** op1,
- GenTree** op2);
-#endif //defined(_TARGET_AMD64_) && !defined(LEGACY_BACKEND)
-
- void setLclRelatedToSIMDIntrinsic(GenTreePtr tree);
- bool areFieldsContiguous(GenTreePtr op1, GenTreePtr op2);
- bool areArrayElementsContiguous(GenTreePtr op1, GenTreePtr op2);
- bool areArgumentsContiguous(GenTreePtr op1, GenTreePtr op2);
- GenTreePtr createAddressNodeForSIMDInit(GenTreePtr tree, unsigned simdSize);
+ SIMDIntrinsicID impSIMDIntegralRelOpGreaterThanOrEqual(
+ CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, var_types baseType, GenTree** op1, GenTree** op2);
+#endif // defined(_TARGET_AMD64_) && !defined(LEGACY_BACKEND)
+
+ void setLclRelatedToSIMDIntrinsic(GenTreePtr tree);
+ bool areFieldsContiguous(GenTreePtr op1, GenTreePtr op2);
+ bool areArrayElementsContiguous(GenTreePtr op1, GenTreePtr op2);
+ bool areArgumentsContiguous(GenTreePtr op1, GenTreePtr op2);
+ GenTreePtr createAddressNodeForSIMDInit(GenTreePtr tree, unsigned simdSize);
// check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT.
- GenTreePtr impSIMDIntrinsic(OPCODE opcode,
- GenTreePtr newobjThis,
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_METHOD_HANDLE method,
- CORINFO_SIG_INFO * sig,
- int memberRef);
+ GenTreePtr impSIMDIntrinsic(OPCODE opcode,
+ GenTreePtr newobjThis,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_SIG_INFO* sig,
+ int memberRef);
- GenTreePtr getOp1ForConstructor(OPCODE opcode,
- GenTreePtr newobjThis,
- CORINFO_CLASS_HANDLE clsHnd);
+ GenTreePtr getOp1ForConstructor(OPCODE opcode, GenTreePtr newobjThis, CORINFO_CLASS_HANDLE clsHnd);
// Whether SIMD vector occupies part of SIMD register.
// SSE2: vector2f/3f are considered sub register SIMD types.
// AVX: vector2f, 3f and 4f are all considered sub register SIMD types.
- bool isSubRegisterSIMDType(CORINFO_CLASS_HANDLE typeHnd)
+ bool isSubRegisterSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
- unsigned sizeBytes = 0;
- var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
+ unsigned sizeBytes = 0;
+ var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return (baseType == TYP_FLOAT) && (sizeBytes < getSIMDVectorRegisterByteLength());
}
- bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
+ bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
{
return (simdNode->gtSIMDSize < getSIMDVectorRegisterByteLength());
}
// Get the type for the hardware SIMD vector.
// This is the maximum SIMD type supported for this target.
- var_types getSIMDVectorType()
+ var_types getSIMDVectorType()
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
if (canUseAVX())
@@ -7277,7 +7138,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
// Get the size of the SIMD type in bytes
- int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
+ int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
(void)getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
@@ -7285,16 +7146,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
// Get the the number of elements of basetype of SIMD vector given by its size and baseType
- static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
+ static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
// Get the the number of elements of basetype of SIMD vector given by its type handle
- int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
+ int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
- int getSIMDTypeAlignment(var_types simdType);
+ int getSIMDTypeAlignment(var_types simdType);
// Get the number of bytes in a SIMD Vector for the current compilation.
- unsigned getSIMDVectorRegisterByteLength()
+ unsigned getSIMDVectorRegisterByteLength()
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
if (canUseAVX())
@@ -7313,26 +7174,26 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
// The minimum and maximum possible number of bytes in a SIMD vector.
- unsigned int maxSIMDStructBytes()
+ unsigned int maxSIMDStructBytes()
{
return getSIMDVectorRegisterByteLength();
}
- unsigned int minSIMDStructBytes()
+ unsigned int minSIMDStructBytes()
{
return emitTypeSize(TYP_SIMD8);
}
#ifdef FEATURE_AVX_SUPPORT
- // (maxPossibleSIMDStructBytes is for use in a context that requires a compile-time constant.)
- static const unsigned maxPossibleSIMDStructBytes = 32;
-#else // !FEATURE_AVX_SUPPORT
- static const unsigned maxPossibleSIMDStructBytes = 16;
+ // (maxPossibleSIMDStructBytes is for use in a context that requires a compile-time constant.)
+ static const unsigned maxPossibleSIMDStructBytes = 32;
+#else // !FEATURE_AVX_SUPPORT
+ static const unsigned maxPossibleSIMDStructBytes = 16;
#endif // !FEATURE_AVX_SUPPORT
// Returns the codegen type for a given SIMD size.
- var_types getSIMDTypeForSize(unsigned size)
+ var_types getSIMDTypeForSize(unsigned size)
{
- var_types simdType = TYP_UNDEF;
+ var_types simdType = TYP_UNDEF;
if (size == 8)
{
simdType = TYP_SIMD8;
@@ -7362,7 +7223,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
{
if (lvaSIMDInitTempVarNum == BAD_VAR_NUM)
{
- lvaSIMDInitTempVarNum = lvaGrabTempWithImplicitUse(false DEBUGARG("SIMDInitTempVar"));
+ lvaSIMDInitTempVarNum = lvaGrabTempWithImplicitUse(false DEBUGARG("SIMDInitTempVar"));
lvaTable[lvaSIMDInitTempVarNum].lvType = getSIMDVectorType();
}
return lvaSIMDInitTempVarNum;
@@ -7391,38 +7252,38 @@ public:
return TARGET_POINTER_SIZE;
}
}
-private:
+private:
// These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType()
// is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use
// of this routines also avoids the need of #ifdef FEATURE_SIMD specific code.
// Is this var is of type simd struct?
- bool lclVarIsSIMDType(unsigned varNum)
+ bool lclVarIsSIMDType(unsigned varNum)
{
LclVarDsc* varDsc = lvaTable + varNum;
return varDsc->lvIsSIMDType();
}
// Is this Local node a SIMD local?
- bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
+ bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
{
return lclVarIsSIMDType(lclVarTree->gtLclNum);
}
// Returns true if the TYP_SIMD locals on stack are aligned at their
// preferred byte boundary specified by getSIMDTypeAlignment().
- bool isSIMDTypeLocalAligned(unsigned varNum)
+ bool isSIMDTypeLocalAligned(unsigned varNum)
{
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF)
{
bool ebpBased;
- int off = lvaFrameAddress(varNum, &ebpBased);
+ int off = lvaFrameAddress(varNum, &ebpBased);
// TODO-Cleanup: Can't this use the lvExactSize on the varDsc?
- int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
+ int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
bool isAligned = ((off % alignment) == 0);
- noway_assert (isAligned || lvaTable[varNum].lvIsParam);
+ noway_assert(isAligned || lvaTable[varNum].lvIsParam);
return isAligned;
}
#endif // FEATURE_SIMD
@@ -7431,7 +7292,7 @@ private:
}
// Whether SSE2 is available
- bool canUseSSE2() const
+ bool canUseSSE2() const
{
#ifdef _TARGET_XARCH_
return opts.compCanUseSSE2;
@@ -7440,7 +7301,7 @@ private:
#endif
}
- bool canUseAVX() const
+ bool canUseAVX() const
{
#ifdef FEATURE_AVX_SUPPORT
return opts.compCanUseAVX;
@@ -7449,85 +7310,89 @@ private:
#endif
}
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX Compiler XX
-XX XX
-XX Generic info about the compilation and the method being compiled. XX
-XX It is responsible for driving the other phases. XX
-XX It is also responsible for all the memory management. XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX Compiler XX
+ XX XX
+ XX Generic info about the compilation and the method being compiled. XX
+ XX It is responsible for driving the other phases. XX
+ XX It is also responsible for all the memory management. XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
- Compiler * InlineeCompiler; // The Compiler instance for the inlinee
+public:
+ Compiler* InlineeCompiler; // The Compiler instance for the inlinee
- InlineResult* compInlineResult; // The result of importing the inlinee method.
-
- bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
- bool compJmpOpUsed; // Does the method do a JMP
- bool compLongUsed; // Does the method use TYP_LONG
- bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
- bool compTailCallUsed; // Does the method do a tailcall
- bool compLocallocUsed; // Does the method use localloc.
- bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
- bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
- bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types
+ InlineResult* compInlineResult; // The result of importing the inlinee method.
+ bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
+ bool compJmpOpUsed; // Does the method do a JMP
+ bool compLongUsed; // Does the method use TYP_LONG
+ bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
+ bool compTailCallUsed; // Does the method do a tailcall
+ bool compLocallocUsed; // Does the method use localloc.
+ bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
+ bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
+ bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types
- // NOTE: These values are only reliable after
- // the importing is completely finished.
+ // NOTE: These values are only reliable after
+ // the importing is completely finished.
- ExpandArrayStack<GenTreePtr>* compQMarks; // The set of QMark nodes created in the current compilation, so
- // we can iterate over these efficiently.
+ ExpandArrayStack<GenTreePtr>* compQMarks; // The set of QMark nodes created in the current compilation, so
+ // we can iterate over these efficiently.
#if CPU_USES_BLOCK_MOVE
- bool compBlkOpUsed; // Does the method do a COPYBLK or INITBLK
+ bool compBlkOpUsed; // Does the method do a COPYBLK or INITBLK
#endif
#ifdef DEBUG
// State information - which phases have completed?
// These are kept together for easy discoverability
- bool bRangeAllowStress;
- bool compCodeGenDone;
- int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
- bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
-#endif // DEBUG
+ bool bRangeAllowStress;
+ bool compCodeGenDone;
+ int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
+ bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
+#endif // DEBUG
- bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
- bool fgLocalVarLivenessChanged;
+ bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
+ bool fgLocalVarLivenessChanged;
#if STACK_PROBES
- bool compStackProbePrologDone;
+ bool compStackProbePrologDone;
#endif
#ifndef LEGACY_BACKEND
- bool compLSRADone;
+ bool compLSRADone;
#endif // !LEGACY_BACKEND
- bool compRationalIRForm;
-
- bool compUsesThrowHelper; // There is a call to a THOROW_HELPER for the compiled method.
-
- bool compGeneratingProlog;
- bool compGeneratingEpilog;
- bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
- // Insert cookie on frame and code to check the cookie, like VC++ -GS.
- bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
- // copies of susceptible parameters to avoid buffer overrun attacks through locals/params
- bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; }
- void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; }
-
- FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
- // frame layout calculations, this is the level we are currently
- // computing.
+ bool compRationalIRForm;
+
+ bool compUsesThrowHelper; // There is a call to a THOROW_HELPER for the compiled method.
+
+ bool compGeneratingProlog;
+ bool compGeneratingEpilog;
+ bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
+ // Insert cookie on frame and code to check the cookie, like VC++ -GS.
+ bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
+ // copies of susceptible parameters to avoid buffer overrun attacks through locals/params
+ bool getNeedsGSSecurityCookie() const
+ {
+ return compNeedsGSSecurityCookie;
+ }
+ void setNeedsGSSecurityCookie()
+ {
+ compNeedsGSSecurityCookie = true;
+ }
+
+ FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
+ // frame layout calculations, this is the level we are currently
+ // computing.
//---------------------------- JITing options -----------------------------
- enum codeOptimize
+ enum codeOptimize
{
BLENDED_CODE,
SMALL_CODE,
@@ -7538,51 +7403,61 @@ public :
struct Options
{
- CORJIT_FLAGS* jitFlags; // all flags passed from the EE
- unsigned eeFlags; // CorJitFlag flags passed from the EE
- unsigned compFlags; // method attributes
+ CORJIT_FLAGS* jitFlags; // all flags passed from the EE
+ unsigned eeFlags; // CorJitFlag flags passed from the EE
+ unsigned compFlags; // method attributes
- codeOptimize compCodeOpt; // what type of code optimizations
+ codeOptimize compCodeOpt; // what type of code optimizations
- bool compUseFCOMI;
- bool compUseCMOV;
+ bool compUseFCOMI;
+ bool compUseCMOV;
#ifdef _TARGET_XARCH_
- bool compCanUseSSE2; // Allow CodeGen to use "movq XMM" instructions
+ bool compCanUseSSE2; // Allow CodeGen to use "movq XMM" instructions
#ifdef FEATURE_AVX_SUPPORT
- bool compCanUseAVX; // Allow CodeGen to use AVX 256-bit vectors for SIMD operations
+ bool compCanUseAVX; // Allow CodeGen to use AVX 256-bit vectors for SIMD operations
#endif
#endif
- // optimize maximally and/or favor speed over size?
+// optimize maximally and/or favor speed over size?
-#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
-#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
-#define DEFAULT_MIN_OPTS_BB_COUNT 2000
-#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
-#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
+#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
+#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
+#define DEFAULT_MIN_OPTS_BB_COUNT 2000
+#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
+#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
// Maximun number of locals before turning off the inlining
-#define MAX_LV_NUM_COUNT_FOR_INLINING 512
-
- bool compMinOpts;
- unsigned instrCount;
- unsigned lvRefCount;
- bool compMinOptsIsSet;
-# ifdef DEBUG
- bool compMinOptsIsUsed;
-
- inline bool MinOpts()
- { assert(compMinOptsIsSet);
- compMinOptsIsUsed = true;
- return compMinOpts;
+#define MAX_LV_NUM_COUNT_FOR_INLINING 512
+
+ bool compMinOpts;
+ unsigned instrCount;
+ unsigned lvRefCount;
+ bool compMinOptsIsSet;
+#ifdef DEBUG
+ bool compMinOptsIsUsed;
+
+ inline bool MinOpts()
+ {
+ assert(compMinOptsIsSet);
+ compMinOptsIsUsed = true;
+ return compMinOpts;
+ }
+ inline bool IsMinOptsSet()
+ {
+ return compMinOptsIsSet;
}
- inline bool IsMinOptsSet() { return compMinOptsIsSet; }
-# else // !DEBUG
- inline bool MinOpts() { return compMinOpts; }
- inline bool IsMinOptsSet() { return compMinOptsIsSet; }
-# endif // !DEBUG
- inline void SetMinOpts(bool val)
+#else // !DEBUG
+ inline bool MinOpts()
+ {
+ return compMinOpts;
+ }
+ inline bool IsMinOptsSet()
+ {
+ return compMinOptsIsSet;
+ }
+#endif // !DEBUG
+ inline void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
assert(!compMinOptsIsSet || (compMinOpts == val));
@@ -7590,18 +7465,27 @@ public :
compMinOptsIsSet = true;
}
- //true if the CLFLG_* for an optimization is set.
- inline bool OptEnabled(unsigned optFlag) { return !!(compFlags & optFlag); }
+ // true if the CLFLG_* for an optimization is set.
+ inline bool OptEnabled(unsigned optFlag)
+ {
+ return !!(compFlags & optFlag);
+ }
#ifdef FEATURE_READYTORUN_COMPILER
- inline bool IsReadyToRun() { return (eeFlags & CORJIT_FLG_READYTORUN) != 0; }
+ inline bool IsReadyToRun()
+ {
+ return (eeFlags & CORJIT_FLG_READYTORUN) != 0;
+ }
#else
- inline bool IsReadyToRun() { return false; }
+ inline bool IsReadyToRun()
+ {
+ return false;
+ }
#endif
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline (e.g. when targeting CoreRT).
- inline bool ShouldUsePInvokeHelpers()
+ inline bool ShouldUsePInvokeHelpers()
{
#if COR_JIT_EE_VERSION > 460
return (jitFlags->corJitFlags2 & CORJIT_FLG2_USE_PINVOKE_HELPERS) != 0;
@@ -7612,7 +7496,7 @@ public :
// true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method
// prolog/epilog
- inline bool IsReversePInvoke()
+ inline bool IsReversePInvoke()
{
#if COR_JIT_EE_VERSION > 460
return (jitFlags->corJitFlags2 & CORJIT_FLG2_REVERSE_PINVOKE) != 0;
@@ -7622,7 +7506,7 @@ public :
}
// true if we must generate code compatible with JIT32 quirks
- inline bool IsJit32Compat()
+ inline bool IsJit32Compat()
{
#if defined(_TARGET_X86_) && COR_JIT_EE_VERSION > 460
return (jitFlags->corJitFlags2 & CORJIT_FLG2_DESKTOP_QUIRKS) != 0;
@@ -7632,7 +7516,7 @@ public :
}
// true if we must generate code compatible with Jit64 quirks
- inline bool IsJit64Compat()
+ inline bool IsJit64Compat()
{
#if defined(_TARGET_AMD64_) && COR_JIT_EE_VERSION > 460
return (jitFlags->corJitFlags2 & CORJIT_FLG2_DESKTOP_QUIRKS) != 0;
@@ -7644,55 +7528,54 @@ public :
}
#ifdef DEBUGGING_SUPPORT
- bool compScopeInfo; // Generate the LocalVar info ?
- bool compDbgCode; // Generate debugger-friendly code?
- bool compDbgInfo; // Gather debugging info?
- bool compDbgEnC;
+ bool compScopeInfo; // Generate the LocalVar info ?
+ bool compDbgCode; // Generate debugger-friendly code?
+ bool compDbgInfo; // Gather debugging info?
+ bool compDbgEnC;
#else
- static const bool compDbgCode;
+ static const bool compDbgCode;
#endif
#ifdef PROFILING_SUPPORTED
- bool compNoPInvokeInlineCB;
+ bool compNoPInvokeInlineCB;
#else
- static const bool compNoPInvokeInlineCB;
+ static const bool compNoPInvokeInlineCB;
#endif
- bool compMustInlinePInvokeCalli; // Unmanaged CALLI in IL stubs must be inlined
+ bool compMustInlinePInvokeCalli; // Unmanaged CALLI in IL stubs must be inlined
#ifdef DEBUG
- bool compGcChecks; // Check arguments and return values to ensure they are sane
- bool compStackCheckOnRet; // Check ESP on return to ensure it is correct
- bool compStackCheckOnCall; // Check ESP after every call to ensure it is correct
-
-#endif
-
- bool compNeedSecurityCheck; // This flag really means where or not a security object needs
- // to be allocated on the stack.
- // It will be set to true in the following cases:
- // 1. When the method being compiled has a declarative security
- // (i.e. when CORINFO_FLG_NOSECURITYWRAP is reset for the current method).
- // This is also the case when we inject a prolog and epilog in the method.
- // (or)
- // 2. When the method being compiled has imperative security (i.e. the method
- // calls into another method that has CORINFO_FLG_SECURITYCHECK flag set).
- // (or)
- // 3. When opts.compDbgEnC is true. (See also Compiler::compCompile).
- //
- // When this flag is set, jit will allocate a gc-reference local variable (lvaSecurityObject),
- // which gets reported as a GC root to stackwalker.
- // (See also ICodeManager::GetAddrOfSecurityObject.)
-
-
-#if RELOC_SUPPORT
- bool compReloc;
+ bool compGcChecks; // Check arguments and return values to ensure they are sane
+ bool compStackCheckOnRet; // Check ESP on return to ensure it is correct
+ bool compStackCheckOnCall; // Check ESP after every call to ensure it is correct
+
+#endif
+
+ bool compNeedSecurityCheck; // This flag really means where or not a security object needs
+ // to be allocated on the stack.
+ // It will be set to true in the following cases:
+ // 1. When the method being compiled has a declarative security
+ // (i.e. when CORINFO_FLG_NOSECURITYWRAP is reset for the current method).
+ // This is also the case when we inject a prolog and epilog in the method.
+ // (or)
+ // 2. When the method being compiled has imperative security (i.e. the method
+ // calls into another method that has CORINFO_FLG_SECURITYCHECK flag set).
+ // (or)
+ // 3. When opts.compDbgEnC is true. (See also Compiler::compCompile).
+ //
+// When this flag is set, jit will allocate a gc-reference local variable (lvaSecurityObject),
+// which gets reported as a GC root to stackwalker.
+// (See also ICodeManager::GetAddrOfSecurityObject.)
+
+#if RELOC_SUPPORT
+ bool compReloc;
#endif
#ifdef DEBUG
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
+ bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
#endif
-#endif //DEBUG
+#endif // DEBUG
#ifdef UNIX_AMD64_ABI
// This flag is indicating if there is a need to align the frame.
@@ -7701,43 +7584,44 @@ public :
// On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of
// 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that
// there are calls and making sure the frame alignment logic is executed.
- bool compNeedToAlignFrame;
+ bool compNeedToAlignFrame;
#endif // UNIX_AMD64_ABI
- bool compProcedureSplitting; // Separate cold code from hot code
+ bool compProcedureSplitting; // Separate cold code from hot code
- bool genFPorder; // Preserve FP order (operations are non-commutative)
- bool genFPopt; // Can we do frame-pointer-omission optimization?
- bool altJit; // True if we are an altjit and are compiling this method
+ bool genFPorder; // Preserve FP order (operations are non-commutative)
+ bool genFPopt; // Can we do frame-pointer-omission optimization?
+ bool altJit; // True if we are an altjit and are compiling this method
#ifdef DEBUG
- bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
- bool dspCode; // Display native code generated
- bool dspEHTable; // Display the EH table reported to the VM
- bool dspInstrs; // Display the IL instructions intermixed with the native code output
- bool dspEmit; // Display emitter output
- bool dspLines; // Display source-code lines intermixed with native code output
- bool dmpHex; // Display raw bytes in hex of native code output
- bool varNames; // Display variables names in native code output
- bool disAsm; // Display native code as it is generated
- bool disAsmSpilled; // Display native code when any register spilling occurs
- bool disDiffable; // Makes the Disassembly code 'diff-able'
- bool disAsm2; // Display native code after it is generated using external disassembler
- bool dspOrder; // Display names of each of the methods that we ngen/jit
- bool dspUnwind; // Display the unwind info output
- bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
- bool compLongAddress;// Force using large pseudo instructions for long address (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
- bool dspGCtbls; // Display the GC tables
+ bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
+ bool dspCode; // Display native code generated
+ bool dspEHTable; // Display the EH table reported to the VM
+ bool dspInstrs; // Display the IL instructions intermixed with the native code output
+ bool dspEmit; // Display emitter output
+ bool dspLines; // Display source-code lines intermixed with native code output
+ bool dmpHex; // Display raw bytes in hex of native code output
+ bool varNames; // Display variables names in native code output
+ bool disAsm; // Display native code as it is generated
+ bool disAsmSpilled; // Display native code when any register spilling occurs
+ bool disDiffable; // Makes the Disassembly code 'diff-able'
+ bool disAsm2; // Display native code after it is generated using external disassembler
+ bool dspOrder; // Display names of each of the methods that we ngen/jit
+ bool dspUnwind; // Display the unwind info output
+ bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
+ bool compLongAddress; // Force using large pseudo instructions for long address
+ // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
+ bool dspGCtbls; // Display the GC tables
#endif
#ifdef LATE_DISASM
- bool doLateDisasm; // Run the late disassembler
-#endif // LATE_DISASM
+ bool doLateDisasm; // Run the late disassembler
+#endif // LATE_DISASM
-#if DUMP_GC_TABLES && !defined(DEBUG) && defined(JIT32_GCENCODER)
- // Only the JIT32_GCENCODER implements GC dumping in non-DEBUG code.
- #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
- static const bool dspGCtbls = true;
+#if DUMP_GC_TABLES && !defined(DEBUG) && defined(JIT32_GCENCODER)
+// Only the JIT32_GCENCODER implements GC dumping in non-DEBUG code.
+#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
+ static const bool dspGCtbls = true;
#endif
// We need stack probes to guarantee that we won't trigger a stack overflow
@@ -7747,17 +7631,17 @@ public :
// We will only be doing this currently for hosted environments. Unfortunately
// we need to take care of stubs, so potentially, we will have to do the probes
// for any call. We have a plan for not needing for stubs though
- bool compNeedStackProbes;
+ bool compNeedStackProbes;
// Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub())
- // This options helps one to make JIT behave as if it is under profiler.
- bool compJitELTHookEnabled;
+ // This options helps one to make JIT behave as if it is under profiler.
+ bool compJitELTHookEnabled;
#if FEATURE_TAILCALL_OPT
// Whether opportunistic or implicit tail call optimization is enabled.
- bool compTailCallOpt;
+ bool compTailCallOpt;
// Whether optimization of transforming a recursive tail call into a loop is enabled.
- bool compTailCallLoopOpt;
+ bool compTailCallLoopOpt;
#endif
#ifdef ARM_SOFTFP
@@ -7767,25 +7651,24 @@ public :
#endif
GCPollType compGCPollType;
- }
- opts;
+ } opts;
#ifdef ALT_JIT
- static bool s_pAltJitExcludeAssembliesListInitialized;
+ static bool s_pAltJitExcludeAssembliesListInitialized;
static AssemblyNamesList2* s_pAltJitExcludeAssembliesList;
#endif // ALT_JIT
#ifdef DEBUG
- static bool s_dspMemStats; // Display per-phase memory statistics for every function
+ static bool s_dspMemStats; // Display per-phase memory statistics for every function
- template<typename T>
+ template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
- template<typename T>
+ template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o);
@@ -7809,7 +7692,6 @@ public :
#endif // DEBUG
-
// clang-format off
#define STRESS_MODES \
\
@@ -7853,42 +7735,38 @@ public :
// clang-format on
#ifdef DEBUG
- static
- const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
- BYTE compActiveStressModes[STRESS_COUNT];
+ static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
+ BYTE compActiveStressModes[STRESS_COUNT];
#endif // DEBUG
- #define MAX_STRESS_WEIGHT 100
+#define MAX_STRESS_WEIGHT 100
- bool compStressCompile(compStressArea stressArea,
- unsigned weightPercentage);
+ bool compStressCompile(compStressArea stressArea, unsigned weightPercentage);
#ifdef DEBUG
- bool compInlineStress()
+ bool compInlineStress()
{
return compStressCompile(STRESS_LEGACY_INLINE, 50);
}
- bool compRandomInlineStress()
+ bool compRandomInlineStress()
{
return compStressCompile(STRESS_RANDOM_INLINE, 50);
}
#endif // DEBUG
- bool compTailCallStress()
+ bool compTailCallStress()
{
#ifdef DEBUG
- return (JitConfig.TailcallStress() !=0
- || compStressCompile(STRESS_TAILCALL, 5)
- );
+ return (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5));
#else
return false;
#endif
}
- codeOptimize compCodeOpt()
+ codeOptimize compCodeOpt()
{
#if 0
// Switching between size & speed has measurable throughput impact
@@ -7905,26 +7783,26 @@ public :
}
#ifdef DEBUG
- CLRRandom* inlRNG;
+ CLRRandom* inlRNG;
#endif
//--------------------- Info about the procedure --------------------------
struct Info
{
- COMP_HANDLE compCompHnd;
- CORINFO_MODULE_HANDLE compScopeHnd;
- CORINFO_CLASS_HANDLE compClassHnd;
- CORINFO_METHOD_HANDLE compMethodHnd;
- CORINFO_METHOD_INFO* compMethodInfo;
+ COMP_HANDLE compCompHnd;
+ CORINFO_MODULE_HANDLE compScopeHnd;
+ CORINFO_CLASS_HANDLE compClassHnd;
+ CORINFO_METHOD_HANDLE compMethodHnd;
+ CORINFO_METHOD_INFO* compMethodInfo;
- BOOL hasCircularClassConstraints;
- BOOL hasCircularMethodConstraints;
+ BOOL hasCircularClassConstraints;
+ BOOL hasCircularMethodConstraints;
#if defined(DEBUG) || defined(LATE_DISASM)
- const char* compMethodName;
- const char* compClassName;
- const char* compFullName;
+ const char* compMethodName;
+ const char* compClassName;
+ const char* compFullName;
#endif // defined(DEBUG) || defined(LATE_DISASM)
#if defined(DEBUG) || defined(INLINE_DATA)
@@ -7936,113 +7814,112 @@ public :
#ifdef PSEUDORANDOM_NOP_INSERTION
// things for pseudorandom nop insertion
- unsigned compChecksum;
- CLRRandom compRNG;
+ unsigned compChecksum;
+ CLRRandom compRNG;
#endif
// The following holds the FLG_xxxx flags for the method we're compiling.
- unsigned compFlags;
+ unsigned compFlags;
// The following holds the class attributes for the method we're compiling.
- unsigned compClassAttr;
-
- const BYTE * compCode;
- IL_OFFSET compILCodeSize; // The IL code size
- UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
- // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
- // (1) the code is not hot/cold split, and we issued less code than we expected, or
- // (2) the code is hot/cold split, and we issued less code than we expected
- // in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
-
- bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
- bool compIsVarArgs : 1; // Does the method have varargs parameters?
- bool compIsContextful : 1; // contextful method
- bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
- bool compUnwrapContextful: 1; // JIT should unwrap proxies when possible
- bool compProfilerCallback: 1; // JIT inserted a profiler Enter callback
- bool compPublishStubParam: 1; // EAX captured in prolog will be available through an instrinsic
- bool compRetBuffDefStack: 1; // The ret buff argument definitely points into the stack.
-
- var_types compRetType; // Return type of the method as declared in IL
- var_types compRetNativeType; // Normalized return type as per target arch ABI
- unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
- unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
- unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
- int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
- unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
- unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
- unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
- unsigned compMaxStack;
- UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
- UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
-
- unsigned compCallUnmanaged; // count of unmanaged calls
- unsigned compLvFrameListRoot; // lclNum for the Frame root
- unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
- // You should generally use compHndBBtabCount instead: it is the
- // current number of EH clauses (after additions like synchronized
- // methods and funclets, and removals like unreachable code deletion).
-
- bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
- // and the VM expects that, or the JIT is a "self-host" compiler
- // (e.g., x86 hosted targeting x86) and the VM expects that.
+ unsigned compClassAttr;
+
+ const BYTE* compCode;
+ IL_OFFSET compILCodeSize; // The IL code size
+ UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
+ // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
+ // (1) the code is not hot/cold split, and we issued less code than we expected, or
+ // (2) the code is hot/cold split, and we issued less code than we expected
+ // in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
+
+ bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
+ bool compIsVarArgs : 1; // Does the method have varargs parameters?
+ bool compIsContextful : 1; // contextful method
+ bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
+ bool compUnwrapContextful : 1; // JIT should unwrap proxies when possible
+ bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback
+ bool compPublishStubParam : 1; // EAX captured in prolog will be available through an instrinsic
+ bool compRetBuffDefStack : 1; // The ret buff argument definitely points into the stack.
+
+ var_types compRetType; // Return type of the method as declared in IL
+ var_types compRetNativeType; // Normalized return type as per target arch ABI
+ unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
+ unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
+ unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
+ int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
+ unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
+ unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
+ unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
+ unsigned compMaxStack;
+ UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
+ UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
+
+ unsigned compCallUnmanaged; // count of unmanaged calls
+ unsigned compLvFrameListRoot; // lclNum for the Frame root
+ unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
+ // You should generally use compHndBBtabCount instead: it is the
+ // current number of EH clauses (after additions like synchronized
+ // methods and funclets, and removals like unreachable code deletion).
+
+ bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
+ // and the VM expects that, or the JIT is a "self-host" compiler
+ // (e.g., x86 hosted targeting x86) and the VM expects that.
#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
/* The following holds IL scope information about local variables.
*/
- unsigned compVarScopesCount;
- VarScopeDsc* compVarScopes;
+ unsigned compVarScopesCount;
+ VarScopeDsc* compVarScopes;
/* The following holds information about instr offsets for
* which we need to report IP-mappings
*/
- IL_OFFSET * compStmtOffsets; // sorted
- unsigned compStmtOffsetsCount;
- ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
+ IL_OFFSET* compStmtOffsets; // sorted
+ unsigned compStmtOffsetsCount;
+ ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
#endif // DEBUGGING_SUPPORT || DEBUG
- #define CPU_X86 0x0100 // The generic X86 CPU
- #define CPU_X86_PENTIUM_4 0x0110
+#define CPU_X86 0x0100 // The generic X86 CPU
+#define CPU_X86_PENTIUM_4 0x0110
- #define CPU_X64 0x0200 // The generic x64 CPU
- #define CPU_AMD_X64 0x0210 // AMD x64 CPU
- #define CPU_INTEL_X64 0x0240 // Intel x64 CPU
+#define CPU_X64 0x0200 // The generic x64 CPU
+#define CPU_AMD_X64 0x0210 // AMD x64 CPU
+#define CPU_INTEL_X64 0x0240 // Intel x64 CPU
- #define CPU_ARM 0x0300 // The generic ARM CPU
+#define CPU_ARM 0x0300 // The generic ARM CPU
- unsigned genCPU; // What CPU are we running on
- }
- info;
+ unsigned genCPU; // What CPU are we running on
+ } info;
// Returns true if the method being compiled returns a non-void and non-struct value.
- // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
+ // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
// single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2,
// 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs).
// Methods returning such structs are considered to return non-struct return value and
// this method returns true in that case.
- bool compMethodReturnsNativeScalarType()
+ bool compMethodReturnsNativeScalarType()
{
return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType);
}
// Returns true if the method being compiled returns RetBuf addr as its return value
- bool compMethodReturnsRetBufAddr()
- {
- // There are cases where implicit RetBuf argument should be explicitly returned in a register.
- // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
- // These cases are:
- // 1. Profiler Leave calllback expects the address of retbuf as return value for
- // methods with hidden RetBuf argument. impReturnInstruction() when profiler
- // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
- // methods with hidden RetBufArg.
- //
- // 2. As per the System V ABI, the address of RetBuf needs to be returned by
- // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
- // returning the address of RetBuf.
+ bool compMethodReturnsRetBufAddr()
+ {
+ // There are cases where implicit RetBuf argument should be explicitly returned in a register.
+ // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
+ // These cases are:
+ // 1. Profiler Leave calllback expects the address of retbuf as return value for
+ // methods with hidden RetBuf argument. impReturnInstruction() when profiler
+ // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
+ // methods with hidden RetBufArg.
+ //
+ // 2. As per the System V ABI, the address of RetBuf needs to be returned by
+ // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
+ // returning the address of RetBuf.
//
// 3. Windows 64-bit native calling convention also requires the address of RetBuff
// to be returned in RAX.
@@ -8050,7 +7927,7 @@ public :
#ifdef _TARGET_AMD64_
return (info.compRetBuffArg != BAD_VAR_NUM);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
return (compIsProfilerHookNeeded()) && (info.compRetBuffArg != BAD_VAR_NUM);
#endif // !_TARGET_AMD64_
}
@@ -8058,13 +7935,13 @@ public :
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
- bool compMethodReturnsMultiRegRetType()
- {
-#if FEATURE_MULTIREG_RET
-#if defined(_TARGET_X86_)
+ bool compMethodReturnsMultiRegRetType()
+ {
+#if FEATURE_MULTIREG_RET
+#if defined(_TARGET_X86_)
// On x86 only 64-bit longs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType);
-#else // targets: X64-UNIX, ARM64 or ARM32
+#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
@@ -8073,60 +7950,59 @@ public :
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
-#endif // FEATURE_MULTIREG_RET
+#endif // FEATURE_MULTIREG_RET
}
#if FEATURE_MULTIREG_ARGS
// Given a GenTree node of TYP_STRUCT that represents a pass by value argument
- // return the gcPtr layout for the pointers sized fields
- void getStructGcPtrsFromOp(GenTreePtr op, BYTE *gcPtrsOut);
+ // return the gcPtr layout for the pointers sized fields
+ void getStructGcPtrsFromOp(GenTreePtr op, BYTE* gcPtrsOut);
#endif // FEATURE_MULTIREG_ARGS
// Returns true if the method being compiled returns a value
- bool compMethodHasRetVal()
+ bool compMethodHasRetVal()
{
- return compMethodReturnsNativeScalarType() ||
- compMethodReturnsRetBufAddr() ||
+ return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() ||
compMethodReturnsMultiRegRetType();
}
#if defined(DEBUG)
- void compDispLocalVars();
+ void compDispLocalVars();
#endif // DEBUGGING_SUPPORT || DEBUG
- //-------------------------- Global Compiler Data ------------------------------------
+//-------------------------- Global Compiler Data ------------------------------------
-#ifdef DEBUG
- static unsigned s_compMethodsCount; // to produce unique label names
- unsigned compGenTreeID;
+#ifdef DEBUG
+ static unsigned s_compMethodsCount; // to produce unique label names
+ unsigned compGenTreeID;
#endif
- BasicBlock * compCurBB; // the current basic block in process
- GenTreePtr compCurStmt; // the current statement in process
-#ifdef DEBUG
- unsigned compCurStmtNum; // to give all statements an increasing StmtNum when printing dumps
+ BasicBlock* compCurBB; // the current basic block in process
+ GenTreePtr compCurStmt; // the current statement in process
+#ifdef DEBUG
+ unsigned compCurStmtNum; // to give all statements an increasing StmtNum when printing dumps
#endif
// The following is used to create the 'method JIT info' block.
- size_t compInfoBlkSize;
- BYTE * compInfoBlkAddr;
+ size_t compInfoBlkSize;
+ BYTE* compInfoBlkAddr;
- EHblkDsc * compHndBBtab; // array of EH data
- unsigned compHndBBtabCount; // element count of used elements in EH data array
- unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
+ EHblkDsc* compHndBBtab; // array of EH data
+ unsigned compHndBBtabCount; // element count of used elements in EH data array
+ unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
#if defined(_TARGET_X86_)
//-------------------------------------------------------------------------
// Tracking of region covered by the monitor in synchronized methods
- void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
- void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
+ void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
+ void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
#endif // !_TARGET_X86_
- Phases previousCompletedPhase; // the most recently completed phase
+ Phases previousCompletedPhase; // the most recently completed phase
//-------------------------------------------------------------------------
// The following keeps track of how many bytes of local frame space we've
@@ -8134,98 +8010,107 @@ public :
// need to pop when we return.
//
- unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
-
+ unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
+
// Count of callee-saved regs we pushed in the prolog.
// Does not include EBP for isFramePointerUsed() and double-aligned frames.
// In case of Amd64 this doesn't include float regs saved on stack.
- unsigned compCalleeRegsPushed;
+ unsigned compCalleeRegsPushed;
#if defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
// Mask of callee saved float regs on stack.
- regMaskTP compCalleeFPRegsSavedMask;
+ regMaskTP compCalleeFPRegsSavedMask;
#endif
#ifdef _TARGET_AMD64_
- // Quirk for VS debug-launch scenario to work:
- // Bytes of padding between save-reg area and locals.
- #define VSQUIRK_STACK_PAD (2*REGSIZE_BYTES)
- unsigned compVSQuirkStackPaddingNeeded;
- bool compQuirkForPPPflag;
+// Quirk for VS debug-launch scenario to work:
+// Bytes of padding between save-reg area and locals.
+#define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES)
+ unsigned compVSQuirkStackPaddingNeeded;
+ bool compQuirkForPPPflag;
#endif
-
- unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
- unsigned compMapILargNum (unsigned ILargNum); // map accounting for hidden args
- unsigned compMapILvarNum (unsigned ILvarNum); // map accounting for hidden args
- unsigned compMap2ILvarNum(unsigned varNum); // map accounting for hidden args
+ unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
+
+ unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args
+ unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args
+ unsigned compMap2ILvarNum(unsigned varNum); // map accounting for hidden args
//-------------------------------------------------------------------------
- static void compStartup (); // One-time initialization
- static void compShutdown (); // One-time finalization
+ static void compStartup(); // One-time initialization
+ static void compShutdown(); // One-time finalization
- void compInit (ArenaAllocator * pAlloc, InlineInfo * inlineInfo);
- void compDone ();
+ void compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo);
+ void compDone();
- static void compDisplayStaticSizes(FILE* fout);
+ static void compDisplayStaticSizes(FILE* fout);
//------------ Some utility functions --------------
- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
- void ** ppIndirection); /* OUT */
-
+ void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void** ppIndirection); /* OUT */
+
// Several JIT/EE interface functions return a CorInfoType, and also return a
// class handle as an out parameter if the type is a value class. Returns the
// size of the type these describe.
- unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
+ unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
// in debug. (Perhaps should be under the control of a COMPlus_ flag.)
// These should fail by asserting.
- void compDoComponentUnitTestsOnce();
+ void compDoComponentUnitTestsOnce();
#endif // DEBUG
- int compCompile (CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_MODULE_HANDLE classPtr,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO * methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags);
- void compCompileFinish();
- int compCompileHelper (CORINFO_MODULE_HANDLE classPtr,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO * methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags,
- CorInfoInstantiationVerification instVerInfo);
-
- ArenaAllocator * compGetAllocator();
+ int compCompile(CORINFO_METHOD_HANDLE methodHnd,
+ CORINFO_MODULE_HANDLE classPtr,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags);
+ void compCompileFinish();
+ int compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags,
+ CorInfoInstantiationVerification instVerInfo);
+
+ ArenaAllocator* compGetAllocator();
#if MEASURE_MEM_ALLOC
struct MemStats
{
- unsigned allocCnt; // # of allocs
- UINT64 allocSz; // total size of those alloc.
- UINT64 allocSzMax; // Maximum single allocation.
- UINT64 allocSzByKind[CMK_Count]; // Classified by "kind".
- UINT64 nraTotalSizeAlloc;
- UINT64 nraTotalSizeUsed;
+ unsigned allocCnt; // # of allocs
+ UINT64 allocSz; // total size of those alloc.
+ UINT64 allocSzMax; // Maximum single allocation.
+ UINT64 allocSzByKind[CMK_Count]; // Classified by "kind".
+ UINT64 nraTotalSizeAlloc;
+ UINT64 nraTotalSizeUsed;
- static const char* s_CompMemKindNames[]; // Names of the kinds.
+ static const char* s_CompMemKindNames[]; // Names of the kinds.
- MemStats()
- : allocCnt(0), allocSz(0), allocSzMax(0), nraTotalSizeAlloc(0), nraTotalSizeUsed(0)
+ MemStats() : allocCnt(0), allocSz(0), allocSzMax(0), nraTotalSizeAlloc(0), nraTotalSizeUsed(0)
{
- for (int i = 0; i < CMK_Count; i++) allocSzByKind[i] = 0;
+ for (int i = 0; i < CMK_Count; i++)
+ {
+ allocSzByKind[i] = 0;
+ }
}
MemStats(const MemStats& ms)
- : allocCnt(ms.allocCnt), allocSz(ms.allocSz), allocSzMax(ms.allocSzMax), nraTotalSizeAlloc(ms.nraTotalSizeAlloc), nraTotalSizeUsed(ms.nraTotalSizeUsed)
+ : allocCnt(ms.allocCnt)
+ , allocSz(ms.allocSz)
+ , allocSzMax(ms.allocSzMax)
+ , nraTotalSizeAlloc(ms.nraTotalSizeAlloc)
+ , nraTotalSizeUsed(ms.nraTotalSizeUsed)
{
- for (int i = 0; i < CMK_Count; i++) allocSzByKind[i] = ms.allocSzByKind[i];
+ for (int i = 0; i < CMK_Count; i++)
+ {
+ allocSzByKind[i] = ms.allocSzByKind[i];
+ }
}
// Until we have ubiquitous constructors.
@@ -8245,16 +8130,18 @@ public :
allocSzByKind[cmk] += sz;
}
- void Print(FILE* f); // Print these stats to f.
+ void Print(FILE* f); // Print these stats to f.
void PrintByKind(FILE* f); // Do just the by-kind histogram part.
};
MemStats genMemStats;
- struct AggregateMemStats: public MemStats
+ struct AggregateMemStats : public MemStats
{
unsigned nMethods;
- AggregateMemStats(): MemStats(), nMethods(0) {}
+ AggregateMemStats() : MemStats(), nMethods(0)
+ {
+ }
void Add(const MemStats& ms)
{
@@ -8262,17 +8149,20 @@ public :
allocCnt += ms.allocCnt;
allocSz += ms.allocSz;
allocSzMax = max(allocSzMax, ms.allocSzMax);
- for (int i = 0; i < CMK_Count; i++) allocSzByKind[i] += ms.allocSzByKind[i];
+ for (int i = 0; i < CMK_Count; i++)
+ {
+ allocSzByKind[i] += ms.allocSzByKind[i];
+ }
nraTotalSizeAlloc += ms.nraTotalSizeAlloc;
- nraTotalSizeUsed += ms.nraTotalSizeUsed;
+ nraTotalSizeUsed += ms.nraTotalSizeUsed;
}
void Print(FILE* f); // Print these stats to jitstdout.
};
- static CritSecObject s_memStatsLock; // This lock protects the data structures below.
- static MemStats s_maxCompMemStats; // Stats for the compilation with the largest amount allocated.
- static AggregateMemStats s_aggMemStats; // Aggregates statistics for all compilations.
+ static CritSecObject s_memStatsLock; // This lock protects the data structures below.
+ static MemStats s_maxCompMemStats; // Stats for the compilation with the largest amount allocated.
+ static AggregateMemStats s_aggMemStats; // Aggregates statistics for all compilations.
#endif // MEASURE_MEM_ALLOC
@@ -8285,7 +8175,7 @@ public :
void AddLoopHoistStats();
void PrintPerMethodLoopHoistStats();
- static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
+ static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
static unsigned s_loopsConsidered;
static unsigned s_loopsWithHoistedExpressions;
static unsigned s_totalHoistedExpressions;
@@ -8293,49 +8183,42 @@ public :
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
- void * compGetMemArray (size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
- void * compGetMemArrayA (size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
- void * compGetMem (size_t sz, CompMemKind cmk = CMK_Unknown);
- void * compGetMemA (size_t sz, CompMemKind cmk = CMK_Unknown);
- static
- void * compGetMemCallback (void *, size_t, CompMemKind cmk = CMK_Unknown);
- void compFreeMem (void *);
+ void* compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
+ void* compGetMemArrayA(size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
+ void* compGetMem(size_t sz, CompMemKind cmk = CMK_Unknown);
+ void* compGetMemA(size_t sz, CompMemKind cmk = CMK_Unknown);
+ static void* compGetMemCallback(void*, size_t, CompMemKind cmk = CMK_Unknown);
+ void compFreeMem(void*);
- bool compIsForImportOnly();
- bool compIsForInlining();
- bool compDonotInline();
+ bool compIsForImportOnly();
+ bool compIsForInlining();
+ bool compDonotInline();
#ifdef DEBUG
- const char * compLocalVarName (unsigned varNum, unsigned offs);
- VarName compVarName (regNumber reg,
- bool isFloatReg = false);
- const char * compRegVarName (regNumber reg,
- bool displayVar = false,
- bool isFloatReg = false);
- const char * compRegPairName (regPairNo regPair);
- const char * compRegNameForSize (regNumber reg,
- size_t size);
- const char * compFPregVarName (unsigned fpReg,
- bool displayVar = false);
- void compDspSrcLinesByNativeIP (UNATIVE_OFFSET curIP);
- void compDspSrcLinesByLineNum (unsigned line,
- bool seek = false);
+ const char* compLocalVarName(unsigned varNum, unsigned offs);
+ VarName compVarName(regNumber reg, bool isFloatReg = false);
+ const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false);
+ const char* compRegPairName(regPairNo regPair);
+ const char* compRegNameForSize(regNumber reg, size_t size);
+ const char* compFPregVarName(unsigned fpReg, bool displayVar = false);
+ void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP);
+ void compDspSrcLinesByLineNum(unsigned line, bool seek = false);
#endif // DEBUG
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef DEBUGGING_SUPPORT
typedef ListNode<VarScopeDsc*> VarScopeListNode;
struct VarScopeMapInfo
{
- VarScopeListNode* head;
- VarScopeListNode* tail;
+ VarScopeListNode* head;
+ VarScopeListNode* tail;
static VarScopeMapInfo* Create(VarScopeListNode* node, IAllocator* alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
- info->head = node;
- info->tail = node;
+ info->head = node;
+ info->tail = node;
return info;
}
};
@@ -8343,176 +8226,190 @@ public :
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
- typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*, JitSimplerHashBehavior> VarNumToScopeDscMap;
+ typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*, JitSimplerHashBehavior>
+ VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
- VarNumToScopeDscMap* compVarScopeMap;
-
- VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
+ VarNumToScopeDscMap* compVarScopeMap;
- VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
+ VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
- VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
+ VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
- void compInitVarScopeMap();
+ VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
- VarScopeDsc** compEnterScopeList; // List has the offsets where variables
- // enter scope, sorted by instr offset
- unsigned compNextEnterScope;
+ void compInitVarScopeMap();
- VarScopeDsc** compExitScopeList; // List has the offsets where variables
- // go out of scope, sorted by instr offset
- unsigned compNextExitScope;
+ VarScopeDsc** compEnterScopeList; // List has the offsets where variables
+ // enter scope, sorted by instr offset
+ unsigned compNextEnterScope;
+ VarScopeDsc** compExitScopeList; // List has the offsets where variables
+ // go out of scope, sorted by instr offset
+ unsigned compNextExitScope;
- void compInitScopeLists ();
+ void compInitScopeLists();
- void compResetScopeLists ();
+ void compResetScopeLists();
- VarScopeDsc* compGetNextEnterScope (unsigned offs, bool scan=false);
+ VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false);
- VarScopeDsc* compGetNextExitScope (unsigned offs, bool scan=false);
+ VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false);
- void compProcessScopesUntil (unsigned offset,
- VARSET_TP* inScope,
- void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
- void (Compiler::*exitScopeFn) (VARSET_TP* inScope, VarScopeDsc*));
+ void compProcessScopesUntil(unsigned offset,
+ VARSET_TP* inScope,
+ void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
+ void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*));
#ifdef DEBUG
- void compDispScopeLists ();
+ void compDispScopeLists();
#endif // DEBUG
#endif // DEBUGGING_SUPPORT
-
- bool compIsProfilerHookNeeded();
+ bool compIsProfilerHookNeeded();
//-------------------------------------------------------------------------
/* Statistical Data Gathering */
- void compJitStats(); // call this function and enable
- // various ifdef's below for statistical data
+ void compJitStats(); // call this function and enable
+ // various ifdef's below for statistical data
#if CALL_ARG_STATS
- void compCallArgStats();
- static void compDispCallArgStats(FILE* fout);
+ void compCallArgStats();
+ static void compDispCallArgStats(FILE* fout);
#endif
-
//-------------------------------------------------------------------------
-protected :
-
+protected:
#ifdef DEBUG
bool skipMethod();
#endif
- ArenaAllocator * compAllocator;
+ ArenaAllocator* compAllocator;
public:
// This one presents an implementation of the "IAllocator" abstract class that uses "compAllocator",
// suitable for use by utilcode collection types.
- IAllocator* compAsIAllocator;
+ IAllocator* compAsIAllocator;
#if MEASURE_MEM_ALLOC
- IAllocator* compAsIAllocatorBitset; // An allocator that uses the CMK_bitset tracker.
- IAllocator* compAsIAllocatorGC; // An allocator that uses the CMK_GC tracker.
- IAllocator* compAsIAllocatorLoopHoist; // An allocator that uses the CMK_LoopHoist tracker.
+ IAllocator* compAsIAllocatorBitset; // An allocator that uses the CMK_bitset tracker.
+ IAllocator* compAsIAllocatorGC; // An allocator that uses the CMK_GC tracker.
+ IAllocator* compAsIAllocatorLoopHoist; // An allocator that uses the CMK_LoopHoist tracker.
#ifdef DEBUG
- IAllocator* compAsIAllocatorDebugOnly; // An allocator that uses the CMK_DebugOnly tracker.
-#endif // DEBUG
-#endif // MEASURE_MEM_ALLOC
+ IAllocator* compAsIAllocatorDebugOnly; // An allocator that uses the CMK_DebugOnly tracker.
+#endif // DEBUG
+#endif // MEASURE_MEM_ALLOC
- void compFunctionTraceStart();
- void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
+ void compFunctionTraceStart();
+ void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
protected:
+ size_t compMaxUncheckedOffsetForNullObject;
- size_t compMaxUncheckedOffsetForNullObject;
-
- void compInitOptions (CORJIT_FLAGS* compileFlags);
+ void compInitOptions(CORJIT_FLAGS* compileFlags);
- void compSetProcessor();
- void compInitDebuggingInfo();
- void compSetOptimizationLevel();
+ void compSetProcessor();
+ void compInitDebuggingInfo();
+ void compSetOptimizationLevel();
#ifdef _TARGET_ARMARCH_
- bool compRsvdRegCheck(FrameLayoutState curState);
+ bool compRsvdRegCheck(FrameLayoutState curState);
#endif
- void compCompile (void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags);
+ void compCompile(void** methodCodePtr, ULONG* methodCodeSize, CORJIT_FLAGS* compileFlags);
// Data required for generating profiler Enter/Leave/TailCall hooks
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef PROFILING_SUPPORTED
- bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
- void *compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
- bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
+ bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
+ void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
+ bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
#endif
#ifdef _TARGET_AMD64_
- bool compQuirkForPPP(); // Check if this method should be Quirked for the PPP issue
+ bool compQuirkForPPP(); // Check if this method should be Quirked for the PPP issue
#endif
public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
- static void ProcessShutdownWork(ICorStaticInfo* statInfo);
+ static void ProcessShutdownWork(ICorStaticInfo* statInfo);
- IAllocator* getAllocator() { return compAsIAllocator; }
+ IAllocator* getAllocator()
+ {
+ return compAsIAllocator;
+ }
#if MEASURE_MEM_ALLOC
- IAllocator* getAllocatorBitset() { return compAsIAllocatorBitset; }
- IAllocator* getAllocatorGC() { return compAsIAllocatorGC; }
- IAllocator* getAllocatorLoopHoist() { return compAsIAllocatorLoopHoist; }
-#else // !MEASURE_MEM_ALLOC
- IAllocator* getAllocatorBitset() { return compAsIAllocator; }
- IAllocator* getAllocatorGC() { return compAsIAllocator; }
- IAllocator* getAllocatorLoopHoist() { return compAsIAllocator; }
+ IAllocator* getAllocatorBitset()
+ {
+ return compAsIAllocatorBitset;
+ }
+ IAllocator* getAllocatorGC()
+ {
+ return compAsIAllocatorGC;
+ }
+ IAllocator* getAllocatorLoopHoist()
+ {
+ return compAsIAllocatorLoopHoist;
+ }
+#else // !MEASURE_MEM_ALLOC
+ IAllocator* getAllocatorBitset()
+ {
+ return compAsIAllocator;
+ }
+ IAllocator* getAllocatorGC()
+ {
+ return compAsIAllocator;
+ }
+ IAllocator* getAllocatorLoopHoist()
+ {
+ return compAsIAllocator;
+ }
#endif // !MEASURE_MEM_ALLOC
#ifdef DEBUG
- IAllocator* getAllocatorDebugOnly()
+ IAllocator* getAllocatorDebugOnly()
{
#if MEASURE_MEM_ALLOC
return compAsIAllocatorDebugOnly;
-#else // !MEASURE_MEM_ALLOC
+#else // !MEASURE_MEM_ALLOC
return compAsIAllocator;
#endif // !MEASURE_MEM_ALLOC
}
#endif // DEBUG
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX typeInfo XX
-XX XX
-XX Checks for type compatibility and merges types XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
-
-public :
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX typeInfo XX
+ XX XX
+ XX Checks for type compatibility and merges types XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
+public:
// Set to TRUE if verification cannot be skipped for this method
// If we detect unverifiable code, we will lazily check
// canSkipMethodVerification() to see if verification is REALLY needed.
- BOOL tiVerificationNeeded;
+ BOOL tiVerificationNeeded;
// It it initially TRUE, and it gets set to FALSE if we run into unverifiable code
// Note that this is valid only if tiVerificationNeeded was ever TRUE.
- BOOL tiIsVerifiableCode;
+ BOOL tiIsVerifiableCode;
// Set to TRUE if runtime callout is needed for this method
- BOOL tiRuntimeCalloutNeeded;
+ BOOL tiRuntimeCalloutNeeded;
// Set to TRUE if security prolog/epilog callout is needed for this method
// Note: This flag is different than compNeedSecurityCheck.
- // compNeedSecurityCheck means whether or not a security object needs
+ // compNeedSecurityCheck means whether or not a security object needs
// to be allocated on the stack, which is currently true for EnC as well.
// tiSecurityCalloutNeeded means whether or not security callouts need
// to be inserted in the jitted code.
- BOOL tiSecurityCalloutNeeded;
+ BOOL tiSecurityCalloutNeeded;
// Returns TRUE if child is equal to or a subtype of parent for merge purposes
// This support is necessary to suport attributes that are not described in
@@ -8521,147 +8418,137 @@ public :
// it is safe to have mismatches here (that tiCompatibleWith will not flag),
// but when deciding if we need to reimport a block, we need to take these
// in account
- BOOL tiMergeCompatibleWith (const typeInfo& pChild,
- const typeInfo& pParent,
- bool normalisedForStack) const;
+ BOOL tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Returns TRUE if child is equal to or a subtype of parent.
// normalisedForStack indicates that both types are normalised for the stack
- BOOL tiCompatibleWith (const typeInfo& pChild,
- const typeInfo& pParent,
- bool normalisedForStack) const;
+ BOOL tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Merges pDest and pSrc. Returns FALSE if merge is undefined.
// *pDest is modified to represent the merged type. Sets "*changed" to true
// if this changes "*pDest".
- BOOL tiMergeToCommonParent (typeInfo *pDest,
- const typeInfo *pSrc,
- bool* changed) const;
+ BOOL tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const;
// Set pDest from the primitive value type.
// Eg. System.Int32 -> ELEMENT_TYPE_I4
- BOOL tiFromPrimitiveValueClass (typeInfo *pDest,
- const typeInfo *pVC) const;
+ BOOL tiFromPrimitiveValueClass(typeInfo* pDest, const typeInfo* pVC) const;
#ifdef DEBUG
- // <BUGNUM> VSW 471305
- // IJW allows assigning REF to BYREF. The following allows us to temporarily
+ // <BUGNUM> VSW 471305
+ // IJW allows assigning REF to BYREF. The following allows us to temporarily
// bypass the assert check in gcMarkRegSetGCref and gcMarkRegSetByref
// We use a "short" as we need to push/pop this scope.
// </BUGNUM>
- short compRegSetCheckLevel;
+ short compRegSetCheckLevel;
#endif
-
-/*
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XX XX
-XX IL verification stuff XX
-XX XX
-XX XX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-*/
+
+ /*
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XX XX
+ XX IL verification stuff XX
+ XX XX
+ XX XX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ */
public:
// The following is used to track liveness of local variables, initialization
// of valueclass constructors, and type safe use of IL instructions.
// dynamic state info needed for verification
- EntryState verCurrentState;
+ EntryState verCurrentState;
// this ptr of object type .ctors are considered intited only after
// the base class ctor is called, or an alternate ctor is called.
// An uninited this ptr can be used to access fields, but cannot
// be used to call a member function.
- BOOL verTrackObjCtorInitState;
+ BOOL verTrackObjCtorInitState;
- void verInitBBEntryState(BasicBlock* block,
- EntryState* currentState);
+ void verInitBBEntryState(BasicBlock* block, EntryState* currentState);
// Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state.
- void verSetThisInit(BasicBlock* block, ThisInitState tis);
- void verInitCurrentState();
- void verResetCurrentState(BasicBlock* block,
- EntryState* currentState);
+ void verSetThisInit(BasicBlock* block, ThisInitState tis);
+ void verInitCurrentState();
+ void verResetCurrentState(BasicBlock* block, EntryState* currentState);
// Merges the current verification state into the entry state of "block", return FALSE if that merge fails,
// TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block".
- BOOL verMergeEntryStates(BasicBlock* block, bool* changed);
-
- void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
- void verHandleVerificationFailure(BasicBlock* block
- DEBUGARG(bool logMsg));
- typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo
- typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
- BOOL verIsSDArray(typeInfo ti);
- typeInfo verGetArrayElemType(typeInfo ti);
-
- typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig,
- CORINFO_ARG_LIST_HANDLE args);
- BOOL verNeedsVerification();
- BOOL verIsByRefLike(const typeInfo& ti);
- BOOL verIsSafeToReturnByRef(const typeInfo& ti);
+ BOOL verMergeEntryStates(BasicBlock* block, bool* changed);
+
+ void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
+ void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg));
+ typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd,
+ bool bashStructToRef = false); // converts from jit type representation to typeInfo
+ typeInfo verMakeTypeInfo(CorInfoType ciType,
+ CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
+ BOOL verIsSDArray(typeInfo ti);
+ typeInfo verGetArrayElemType(typeInfo ti);
+
+ typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args);
+ BOOL verNeedsVerification();
+ BOOL verIsByRefLike(const typeInfo& ti);
+ BOOL verIsSafeToReturnByRef(const typeInfo& ti);
// generic type variables range over types that satisfy IsBoxable
- BOOL verIsBoxable(const typeInfo& ti);
-
- void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line));
- void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line));
- bool verCheckTailCallConstraint (OPCODE opcode,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
- bool speculative // If true, won't throw if verificatoin fails. Instead it will
- // return false to the caller.
- // If false, it will throw.
- );
- bool verIsBoxedValueType (typeInfo ti);
-
- void verVerifyCall (OPCODE opcode,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
- bool tailCall,
- bool readonlyCall, // is this a "readonly." call?
- const BYTE* delegateCreateStart,
- const BYTE* codeAddr,
- CORINFO_CALL_INFO* callInfo
- DEBUGARG(const char * methodName));
-
- BOOL verCheckDelegateCreation(const BYTE* delegateCreateStart,
- const BYTE* codeAddr,
- mdMemberRef & targetMemberRef);
-
- typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
- typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
- void verVerifyField(CORINFO_RESOLVED_TOKEN * pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, BOOL mutator, BOOL allowPlainStructAsThis = FALSE);
- void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
- void verVerifyThisPtrInitialised();
- BOOL verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context,
- CORINFO_CLASS_HANDLE target);
-
-
+ BOOL verIsBoxable(const typeInfo& ti);
+
+ void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file)
+ DEBUGARG(unsigned line));
+ void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file)
+ DEBUGARG(unsigned line));
+ bool verCheckTailCallConstraint(OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call
+ // on a type parameter?
+ bool speculative // If true, won't throw if verificatoin fails. Instead it will
+ // return false to the caller.
+ // If false, it will throw.
+ );
+ bool verIsBoxedValueType(typeInfo ti);
+
+ void verVerifyCall(OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ bool tailCall,
+ bool readonlyCall, // is this a "readonly." call?
+ const BYTE* delegateCreateStart,
+ const BYTE* codeAddr,
+ CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName));
+
+ BOOL verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef);
+
+ typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
+ typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
+ void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ const CORINFO_FIELD_INFO& fieldInfo,
+ const typeInfo* tiThis,
+ BOOL mutator,
+ BOOL allowPlainStructAsThis = FALSE);
+ void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
+ void verVerifyThisPtrInitialised();
+ BOOL verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target);
// Register allocator
- void raInitStackFP ();
- void raEnregisterVarsPrePassStackFP ();
- void raSetRegLclBirthDeath (GenTreePtr tree, VARSET_VALARG_TP lastlife, bool fromLDOBJ);
- void raEnregisterVarsPostPassStackFP ();
- void raGenerateFPRefCounts ();
- void raEnregisterVarsStackFP ();
- void raUpdateHeightsForVarsStackFP (VARSET_VALARG_TP mask);
+ void raInitStackFP();
+ void raEnregisterVarsPrePassStackFP();
+ void raSetRegLclBirthDeath(GenTreePtr tree, VARSET_VALARG_TP lastlife, bool fromLDOBJ);
+ void raEnregisterVarsPostPassStackFP();
+ void raGenerateFPRefCounts();
+ void raEnregisterVarsStackFP();
+ void raUpdateHeightsForVarsStackFP(VARSET_VALARG_TP mask);
- regNumber raRegForVarStackFP (unsigned varTrackedIndex);
- void raAddPayloadStackFP (VARSET_VALARG_TP mask, unsigned weight);
+ regNumber raRegForVarStackFP(unsigned varTrackedIndex);
+ void raAddPayloadStackFP(VARSET_VALARG_TP mask, unsigned weight);
// returns true if enregistering v1 would save more mem accesses than v2
- bool raVarIsGreaterValueStackFP (LclVarDsc *lv1, LclVarDsc *lv2);
-
-
+ bool raVarIsGreaterValueStackFP(LclVarDsc* lv1, LclVarDsc* lv2);
#ifdef DEBUG
- void raDumpHeightsStackFP();
- void raDumpVariableRegIntfFloat();
+ void raDumpHeightsStackFP();
+ void raDumpVariableRegIntfFloat();
#endif
#if FEATURE_STACK_FP_X87
@@ -8699,36 +8586,36 @@ public:
//
// If at any point we find we need to optimize this, we should throw work at unblocking the restrictions our
// current procedure splitting and exception code have.
- bool compMayHaveTransitionBlocks;
+ bool compMayHaveTransitionBlocks;
- VARSET_TP raMaskDontEnregFloat; // mask for additional restrictions
+ VARSET_TP raMaskDontEnregFloat; // mask for additional restrictions
- VARSET_TP raLclRegIntfFloat[REG_FPCOUNT];
+ VARSET_TP raLclRegIntfFloat[REG_FPCOUNT];
- unsigned raCntStkStackFP;
- unsigned raCntWtdStkDblStackFP;
- unsigned raCntStkParamDblStackFP;
+ unsigned raCntStkStackFP;
+ unsigned raCntWtdStkDblStackFP;
+ unsigned raCntStkParamDblStackFP;
// Payload in mem accesses for enregistering a variable (we dont want to mix with refcounts)
// TODO: Do we want to put this in LclVarDsc?
- unsigned raPayloadStackFP[lclMAX_TRACKED];
- unsigned raHeightsStackFP[lclMAX_TRACKED][FP_VIRTUALREGISTERS+1];
+ unsigned raPayloadStackFP[lclMAX_TRACKED];
+ unsigned raHeightsStackFP[lclMAX_TRACKED][FP_VIRTUALREGISTERS + 1];
#ifdef DEBUG
// Useful for debugging
- unsigned raHeightsNonWeightedStackFP[lclMAX_TRACKED][FP_VIRTUALREGISTERS+1];
+ unsigned raHeightsNonWeightedStackFP[lclMAX_TRACKED][FP_VIRTUALREGISTERS + 1];
#endif
-#endif //FEATURE_STACK_FP_X87
+#endif // FEATURE_STACK_FP_X87
#ifdef DEBUG
// One line log function. Default level is 0. Increasing it gives you
- // more log information
+ // more log information
- // levels are currently unused: #define JITDUMP(level,...) ();
- void JitLogEE (unsigned level, const char* fmt, ...);
+ // levels are currently unused: #define JITDUMP(level,...) ();
+ void JitLogEE(unsigned level, const char* fmt, ...);
- bool compDebugBreak;
+ bool compDebugBreak;
- bool compJitHaltMethod();
+ bool compJitHaltMethod();
#endif
@@ -8742,12 +8629,11 @@ public:
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
-
struct ShadowParamVarInfo
{
- FixedBitVect *assignGroup; // the closure set of variables whose values depend on each other
- unsigned shadowCopy; // Lcl var num, valid only if not set to NO_SHADOW_COPY
-
+ FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other
+ unsigned shadowCopy; // Lcl var num, valid only if not set to NO_SHADOW_COPY
+
static bool mayNeedShadowCopy(LclVarDsc* varDsc)
{
#if defined(_TARGET_AMD64_) && !defined(LEGACY_BACKEND)
@@ -8755,7 +8641,7 @@ public:
// slots and update all trees to refer to shadow slots is done immediately after
// fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines
// not to shadow a parameter. Also, LSRA could potentially spill a param which is passed
- // in register. Therefore, conservatively all params may need a shadow copy. Note that
+ // in register. Therefore, conservatively all params may need a shadow copy. Note that
// GS cookie logic further checks whether the param is a ptr or an unsafe buffer before
// creating a shadow slot even though this routine returns true.
//
@@ -8768,19 +8654,19 @@ public:
// Possible solution to address case (a)
// - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked
// in this routine. Note that live out of exception handler is something we may not be
- // able to do it here since GS cookie logic is invoked ahead of liveness computation.
+ // able to do it here since GS cookie logic is invoked ahead of liveness computation.
// Therefore, for methods with exception handling and need GS cookie check we might have
// to take conservative approach.
//
// Possible solution to address case (b)
- // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
+ // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
// create a new spill temp if the method needs GS cookie check.
return varDsc->lvIsParam;
#else // !(defined(_TARGET_AMD64_) && defined(LEGACY_BACKEND))
return varDsc->lvIsParam && !varDsc->lvIsRegArg;
#endif
}
-
+
#ifdef DEBUG
void Print()
{
@@ -8789,57 +8675,60 @@ public:
#endif
};
- GSCookie *gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
- GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
- ShadowParamVarInfo *gsShadowVarInfo; // Table used by shadow param analysis code
+ GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
+ GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
+ ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code
+
+ void gsGSChecksInitCookie(); // Grabs cookie variable
+ void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
+ bool gsFindVulnerableParams(); // Shadow param analysis code
+ void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
- void gsGSChecksInitCookie(); // Grabs cookie variable
- void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
- bool gsFindVulnerableParams (); // Shadow param analysis code
- void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
+ static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
+ static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
- static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
- static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
+#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
+ // This can be overwritten by setting complus_JITInlineSize env variable.
-#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
- // This can be overwritten by setting complus_JITInlineSize env variable.
+#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
-#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
-
private:
#ifdef FEATURE_JIT_METHOD_PERF
- JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
- static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
+ JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
+ static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
- static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
- static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
+ static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
+ static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
#endif
- inline void EndPhase(Phases phase); // Indicate the end of the given phase.
+ inline void EndPhase(Phases phase); // Indicate the end of the given phase.
#if defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM)
// These variables are associated with maintaining SQM data about compile time.
- unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase in the current compilation.
- unsigned __int64 m_compCycles; // Net cycle count for current compilation
- DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of the inlining phase in the current compilation.
-#endif // defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM)
+ unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase
+ // in the current compilation.
+ unsigned __int64 m_compCycles; // Net cycle count for current compilation
+ DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of
+ // the inlining phase in the current compilation.
+#endif // defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM)
// Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete.
// (We do this after inlining because this marks the last point at which the JIT is likely to cause
// type-loading and class initialization).
- void RecordStateAtEndOfInlining();
+ void RecordStateAtEndOfInlining();
// Assumes being called at the end of compilation. Update the SQM state.
- void RecordStateAtEndOfCompilation();
+ void RecordStateAtEndOfCompilation();
#ifdef FEATURE_CLRSQM
// Does anything SQM related necessary at process shutdown time.
- static void ProcessShutdownSQMWork(ICorStaticInfo* statInfo);
+ static void ProcessShutdownSQMWork(ICorStaticInfo* statInfo);
#endif // FEATURE_CLRSQM
public:
#if FUNC_INFO_LOGGING
- static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the filename to write it to.
- static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
-#endif // FUNC_INFO_LOGGING
+ static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the
+ // filename to write it to.
+ static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
+#endif // FUNC_INFO_LOGGING
Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers.
@@ -8849,7 +8738,7 @@ public:
#ifndef FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway();
-#else // FEATURE_TRACELOGGING
+#else // FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway(const char* filename, unsigned line);
@@ -8857,22 +8746,25 @@ public:
JitTelemetry compJitTelemetry;
// Get common parameters that have to be logged with most telemetry data.
- void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash);
+ void compGetTelemetryDefaults(const char** assemblyName,
+ const char** scopeName,
+ const char** methodName,
+ unsigned* methodHash);
#endif // !FEATURE_TRACELOGGING
#ifdef DEBUG
- private:
+private:
NodeToTestDataMap* m_nodeTestData;
static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000;
- unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
- // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
- // Current kept in this.
- public:
+ unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
+ // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
+ // Current kept in this.
+public:
NodeToTestDataMap* GetNodeTestData()
{
Compiler* compRoot = impInlineRoot();
- if (compRoot->m_nodeTestData == NULL)
+ if (compRoot->m_nodeTestData == nullptr)
{
compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly());
}
@@ -8880,7 +8772,7 @@ public:
}
typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, int, JitSimplerHashBehavior> NodeToIntMap;
-
+
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
NodeToIntMap* FindReachableNodesInNodeTestData();
@@ -8897,7 +8789,7 @@ public:
// test attributes are satisfied.
void JitTestCheckSSA(); // SSA builder tests.
void JitTestCheckVN(); // Value numbering tests.
-#endif // DEBUG
+#endif // DEBUG
// The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
// operations.
@@ -8906,10 +8798,10 @@ public:
FieldSeqStore* GetFieldSeqStore()
{
Compiler* compRoot = impInlineRoot();
- if (compRoot->m_fieldSeqStore == NULL)
+ if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_FieldSeqStore) CompAllocator(this, CMK_FieldSeqStore);
+ IAllocator* ialloc = new (this, CMK_FieldSeqStore) CompAllocator(this, CMK_FieldSeqStore);
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
@@ -8926,10 +8818,11 @@ public:
NodeToFieldSeqMap* GetZeroOffsetFieldMap()
{
// Don't need to worry about inlining here
- if (m_zeroOffsetFieldMap == NULL)
+ if (m_zeroOffsetFieldMap == nullptr)
{
- // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ZeroOffsetFieldMap) CompAllocator(this, CMK_ZeroOffsetFieldMap);
+ // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
+ // allocation.
+ IAllocator* ialloc = new (this, CMK_ZeroOffsetFieldMap) CompAllocator(this, CMK_ZeroOffsetFieldMap);
m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
@@ -8947,17 +8840,17 @@ public:
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTreePtr op1, FieldSeqNode* fieldSeq);
-
- typedef SimplerHashTable<const GenTree*, PtrKeyFuncs<GenTree>, ArrayInfo, JitSimplerHashBehavior> NodeToArrayInfoMap;
+ typedef SimplerHashTable<const GenTree*, PtrKeyFuncs<GenTree>, ArrayInfo, JitSimplerHashBehavior>
+ NodeToArrayInfoMap;
NodeToArrayInfoMap* m_arrayInfoMap;
NodeToArrayInfoMap* GetArrayInfoMap()
{
Compiler* compRoot = impInlineRoot();
- if (compRoot->m_arrayInfoMap == NULL)
+ if (compRoot->m_arrayInfoMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc);
}
return compRoot->m_arrayInfoMap;
@@ -8974,7 +8867,7 @@ public:
if (compRoot->m_heapSsaMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
compRoot->m_heapSsaMap = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_heapSsaMap;
@@ -8984,7 +8877,7 @@ public:
CORINFO_CLASS_HANDLE m_refAnyClass;
CORINFO_FIELD_HANDLE GetRefanyDataField()
{
- if (m_refAnyClass == NULL)
+ if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
@@ -8992,7 +8885,7 @@ public:
}
CORINFO_FIELD_HANDLE GetRefanyTypeField()
{
- if (m_refAnyClass == NULL)
+ if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
@@ -9010,22 +8903,23 @@ public:
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
- static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum);
- static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
- var_types* type0,
- var_types* type1,
- unsigned __int8* offset0,
- unsigned __int8* offset1);
+ static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
+ unsigned slotNum);
+ static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
+ var_types* type0,
+ var_types* type1,
+ unsigned __int8* offset0,
+ unsigned __int8* offset1);
void fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgument);
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- void fgMorphMultiregStructArgs(GenTreeCall* call);
- GenTreePtr fgMorphMultiregStructArg (GenTreePtr arg, fgArgTabEntryPtr fgEntryPtr);
+ void fgMorphMultiregStructArgs(GenTreeCall* call);
+ GenTreePtr fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr fgEntryPtr);
}; // end of class Compiler
// Inline methods of CompAllocator.
-void * CompAllocator::Alloc(size_t sz)
+void* CompAllocator::Alloc(size_t sz)
{
#if MEASURE_MEM_ALLOC
return m_comp->compGetMem(sz, m_cmk);
@@ -9034,7 +8928,7 @@ void * CompAllocator::Alloc(size_t sz)
#endif
}
-void * CompAllocator::ArrayAlloc(size_t elems, size_t elemSize)
+void* CompAllocator::ArrayAlloc(size_t elems, size_t elemSize)
{
#if MEASURE_MEM_ALLOC
return m_comp->compGetMemArray(elems, elemSize, m_cmk);
@@ -9043,20 +8937,20 @@ void * CompAllocator::ArrayAlloc(size_t elems, size_t elemSize)
#endif
}
-
// LclVarDsc constructor. Uses Compiler, so must come after Compiler definition.
-inline
-LclVarDsc::LclVarDsc(Compiler* comp)
- :
- // Initialize the ArgRegs to REG_STK.
- // The morph will do the right thing to change
+inline LclVarDsc::LclVarDsc(Compiler* comp)
+ : // Initialize the ArgRegs to REG_STK.
+ // The morph will do the right thing to change
// to the right register if passed in register.
- _lvArgReg(REG_STK),
+ _lvArgReg(REG_STK)
+ ,
#if FEATURE_MULTIREG_ARGS
- _lvOtherArgReg(REG_STK),
+ _lvOtherArgReg(REG_STK)
+ ,
#endif // FEATURE_MULTIREG_ARGS
#if ASSERTION_PROP
- lvRefBlks(BlockSetOps::UninitVal()),
+ lvRefBlks(BlockSetOps::UninitVal())
+ ,
#endif // ASSERTION_PROP
lvPerSsaData(comp->getAllocator())
{
@@ -9074,16 +8968,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// Values used to mark the types a stack slot is used for
-const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
-const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
-const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
-const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
-const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
-const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
-const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
-const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
+const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
+const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
+const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
+const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
+const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
+const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
+const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
+const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
-//const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
+// const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
/*****************************************************************************
*
@@ -9092,16 +8986,16 @@ const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
#if DISPLAY_SIZES
-extern size_t grossVMsize;
-extern size_t grossNCsize;
-extern size_t totalNCsize;
+extern size_t grossVMsize;
+extern size_t grossNCsize;
+extern size_t totalNCsize;
-extern unsigned genMethodICnt;
-extern unsigned genMethodNCnt;
-extern size_t gcHeaderISize;
-extern size_t gcPtrMapISize;
-extern size_t gcHeaderNSize;
-extern size_t gcPtrMapNSize;
+extern unsigned genMethodICnt;
+extern unsigned genMethodNCnt;
+extern size_t gcHeaderISize;
+extern size_t gcPtrMapISize;
+extern size_t gcHeaderNSize;
+extern size_t gcPtrMapNSize;
#endif // DISPLAY_SIZES
@@ -9111,11 +9005,10 @@ extern size_t gcPtrMapNSize;
*/
#if COUNT_BASIC_BLOCKS
-extern Histogram bbCntTable;
-extern Histogram bbOneBBSizeTable;
+extern Histogram bbCntTable;
+extern Histogram bbOneBBSizeTable;
#endif
-
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
@@ -9127,20 +9020,21 @@ extern Histogram bbOneBBSizeTable;
#if COUNT_LOOPS
-extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
-extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
-extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
-extern unsigned totalLoopCount; // counts the total number of natural loops
-extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
-extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
-extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
-extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const)
-extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
-extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
-extern unsigned loopsThisMethod; // counts the number of loops in the current method
-extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
-extern Histogram loopCountTable; // Histogram of loop counts
-extern Histogram loopExitCountTable; // Histogram of loop exit counts
+extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
+extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
+extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
+extern unsigned totalLoopCount; // counts the total number of natural loops
+extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
+extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
+extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
+extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter <
+ // const)
+extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
+extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
+extern unsigned loopsThisMethod; // counts the number of loops in the current method
+extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
+extern Histogram loopCountTable; // Histogram of loop counts
+extern Histogram loopExitCountTable; // Histogram of loop exit counts
#endif // COUNT_LOOPS
@@ -9150,17 +9044,17 @@ extern Histogram loopExitCountTable; // Histogram of loop exit counts
#if DATAFLOW_ITER
-extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
-extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
+extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
+extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
-#if MEASURE_BLOCK_SIZE
-extern size_t genFlowNodeSize;
-extern size_t genFlowNodeCnt;
+#if MEASURE_BLOCK_SIZE
+extern size_t genFlowNodeSize;
+extern size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
-#if MEASURE_NODE_SIZE
+#if MEASURE_NODE_SIZE
struct NodeSizeStats
{
void Init()
@@ -9171,16 +9065,16 @@ struct NodeSizeStats
}
size_t genTreeNodeCnt;
- size_t genTreeNodeSize; // The size we allocate
- size_t genTreeNodeActualSize; // The actual size of the node. Note that the actual size will likely be smaller
- // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
- // a smaller node to a larger one. TODO-Cleanup: add stats on
- // SetOper()/ChangeOper() usage to quanitfy this.
+ size_t genTreeNodeSize; // The size we allocate
+ size_t genTreeNodeActualSize; // The actual size of the node. Note that the actual size will likely be smaller
+ // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
+ // a smaller node to a larger one. TODO-Cleanup: add stats on
+ // SetOper()/ChangeOper() usage to quanitfy this.
};
-extern NodeSizeStats genNodeSizeStats; // Total node size stats
-extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
-extern Histogram genTreeNcntHist;
-extern Histogram genTreeNsizHist;
+extern NodeSizeStats genNodeSizeStats; // Total node size stats
+extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
+extern Histogram genTreeNcntHist;
+extern Histogram genTreeNsizHist;
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
@@ -9204,73 +9098,73 @@ extern unsigned fatal_NYI;
#ifdef _TARGET_XARCH_
-const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
-const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
-const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
+const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
+const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
+const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
-const instruction INS_AND = INS_and;
-const instruction INS_OR = INS_or;
-const instruction INS_XOR = INS_xor;
-const instruction INS_NEG = INS_neg;
-const instruction INS_TEST = INS_test;
-const instruction INS_MUL = INS_imul;
-const instruction INS_SIGNED_DIVIDE = INS_idiv;
-const instruction INS_UNSIGNED_DIVIDE = INS_div;
-const instruction INS_BREAKPOINT = INS_int3;
-const instruction INS_ADDC = INS_adc;
-const instruction INS_SUBC = INS_sbb;
-const instruction INS_NOT = INS_not;
+const instruction INS_AND = INS_and;
+const instruction INS_OR = INS_or;
+const instruction INS_XOR = INS_xor;
+const instruction INS_NEG = INS_neg;
+const instruction INS_TEST = INS_test;
+const instruction INS_MUL = INS_imul;
+const instruction INS_SIGNED_DIVIDE = INS_idiv;
+const instruction INS_UNSIGNED_DIVIDE = INS_div;
+const instruction INS_BREAKPOINT = INS_int3;
+const instruction INS_ADDC = INS_adc;
+const instruction INS_SUBC = INS_sbb;
+const instruction INS_NOT = INS_not;
#endif
#ifdef _TARGET_ARM_
-const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
-const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
-const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
+const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
+const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
+const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
-const instruction INS_AND = INS_and;
-const instruction INS_OR = INS_orr;
-const instruction INS_XOR = INS_eor;
-const instruction INS_NEG = INS_rsb;
-const instruction INS_TEST = INS_tst;
-const instruction INS_MUL = INS_mul;
-const instruction INS_SIGNED_DIVIDE = INS_sdiv;
-const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
-const instruction INS_BREAKPOINT = INS_bkpt;
-const instruction INS_ADDC = INS_adc;
-const instruction INS_SUBC = INS_sbc;
-const instruction INS_NOT = INS_mvn;
+const instruction INS_AND = INS_and;
+const instruction INS_OR = INS_orr;
+const instruction INS_XOR = INS_eor;
+const instruction INS_NEG = INS_rsb;
+const instruction INS_TEST = INS_tst;
+const instruction INS_MUL = INS_mul;
+const instruction INS_SIGNED_DIVIDE = INS_sdiv;
+const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
+const instruction INS_BREAKPOINT = INS_bkpt;
+const instruction INS_ADDC = INS_adc;
+const instruction INS_SUBC = INS_sbc;
+const instruction INS_NOT = INS_mvn;
#endif
#ifdef _TARGET_ARM64_
-const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
-const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
-const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
+const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
+const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
+const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
-const instruction INS_AND = INS_and;
-const instruction INS_OR = INS_orr;
-const instruction INS_XOR = INS_eor;
-const instruction INS_NEG = INS_neg;
-const instruction INS_TEST = INS_tst;
-const instruction INS_MUL = INS_mul;
-const instruction INS_SIGNED_DIVIDE = INS_sdiv;
-const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
-const instruction INS_BREAKPOINT = INS_bkpt;
-const instruction INS_ADDC = INS_adc;
-const instruction INS_SUBC = INS_sbc;
-const instruction INS_NOT = INS_mvn;
+const instruction INS_AND = INS_and;
+const instruction INS_OR = INS_orr;
+const instruction INS_XOR = INS_eor;
+const instruction INS_NEG = INS_neg;
+const instruction INS_TEST = INS_tst;
+const instruction INS_MUL = INS_mul;
+const instruction INS_SIGNED_DIVIDE = INS_sdiv;
+const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
+const instruction INS_BREAKPOINT = INS_bkpt;
+const instruction INS_ADDC = INS_adc;
+const instruction INS_SUBC = INS_sbc;
+const instruction INS_NOT = INS_mvn;
#endif
/*****************************************************************************/
-extern const BYTE genTypeSizes[];
-extern const BYTE genTypeAlignments[];
-extern const BYTE genTypeStSzs[];
-extern const BYTE genActualTypes[];
+extern const BYTE genTypeSizes[];
+extern const BYTE genTypeAlignments[];
+extern const BYTE genTypeStSzs[];
+extern const BYTE genActualTypes[];
/*****************************************************************************/
@@ -9278,16 +9172,16 @@ extern const BYTE genActualTypes[];
// the probing loop generated for very large stack frames (see `getVeryLargeFrameSize`).
#ifdef _TARGET_ARM_
-#define VERY_LARGE_FRAME_SIZE_REG_MASK (RBM_R4 | RBM_R5 | RBM_R6)
+#define VERY_LARGE_FRAME_SIZE_REG_MASK (RBM_R4 | RBM_R5 | RBM_R6)
#elif defined(_TARGET_ARM64_)
-#define VERY_LARGE_FRAME_SIZE_REG_MASK (RBM_R9 | RBM_R10 | RBM_R11)
+#define VERY_LARGE_FRAME_SIZE_REG_MASK (RBM_R9 | RBM_R10 | RBM_R11)
#endif
/*****************************************************************************/
-#define REG_CORRUPT regNumber(REG_NA+1)
-#define RBM_CORRUPT (RBM_ILLEGAL|regMaskTP(1))
-#define REG_PAIR_CORRUPT regPairNo(REG_PAIR_NONE+1)
+#define REG_CORRUPT regNumber(REG_NA + 1)
+#define RBM_CORRUPT (RBM_ILLEGAL | regMaskTP(1))
+#define REG_PAIR_CORRUPT regPairNo(REG_PAIR_NONE + 1)
/*****************************************************************************/
@@ -9301,15 +9195,15 @@ extern BasicBlock dummyBB;
// __stmt: a GT_STMT type GenTree*
// __node: a GenTree*, already declared, that gets updated with each node in the statement, in execution order
-#define foreach_treenode_execution_order(__node, __stmt) \
+#define foreach_treenode_execution_order(__node, __stmt) \
for ((__node) = (__stmt)->gtStmt.gtStmtList; (__node); (__node) = (__node)->gtNext)
// foreach_block: An iterator over all blocks in the function.
// __compiler: the Compiler* object
// __block : a BasicBlock*, already declared, that gets updated each iteration.
-#define foreach_block(__compiler, __block) \
- for ((__block) = (__compiler)->fgFirstBB; (__block); (__block) = (__block)->bbNext)
+#define foreach_block(__compiler, __block) \
+ for ((__block) = (__compiler)->fgFirstBB; (__block); (__block) = (__block)->bbNext)
/*****************************************************************************/
/*****************************************************************************/
@@ -9335,82 +9229,82 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* See the function definition comment for more details.
*/
-void cBlock(Compiler* comp, BasicBlock* block);
-void cBlocks(Compiler* comp);
-void cBlocksV(Compiler* comp);
-void cTree(Compiler* comp, GenTree* tree);
-void cTrees(Compiler* comp);
-void cEH(Compiler* comp);
-void cVar(Compiler* comp, unsigned lclNum);
-void cVarDsc(Compiler* comp, LclVarDsc* varDsc);
-void cVars(Compiler* comp);
-void cVarsFinal(Compiler* comp);
-void cBlockPreds(Compiler* comp, BasicBlock* block);
-void cReach(Compiler* comp);
-void cDoms(Compiler* comp);
-void cLiveness(Compiler* comp);
-void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars);
-
-void cFuncIR(Compiler* comp);
-void cBlockIR(Compiler* comp, BasicBlock* block);
-void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop);
-void cTreeIR(Compiler* comp, GenTree* tree);
-int cTreeTypeIR(Compiler *comp, GenTree *tree);
-int cTreeKindsIR(Compiler *comp, GenTree *tree);
-int cTreeFlagsIR(Compiler *comp, GenTree *tree);
-int cOperandIR(Compiler* comp, GenTree* operand);
-int cLeafIR(Compiler *comp, GenTree* tree);
-int cIndirIR(Compiler *comp, GenTree* tree);
-int cListIR(Compiler* comp, GenTree* list);
-int cSsaNumIR(Compiler *comp, GenTree *tree);
-int cValNumIR(Compiler *comp, GenTree *tree);
-int cDependsIR(Compiler* comp, GenTree* comma, bool *first);
-
-void dBlock(BasicBlock* block);
-void dBlocks();
-void dBlocksV();
-void dTree(GenTree* tree);
-void dTrees();
-void dEH();
-void dVar(unsigned lclNum);
-void dVarDsc(LclVarDsc* varDsc);
-void dVars();
-void dVarsFinal();
-void dBlockPreds(BasicBlock* block);
-void dReach();
-void dDoms();
-void dLiveness();
-void dCVarSet(VARSET_VALARG_TP vars);
-
-void dVarSet(VARSET_VALARG_TP vars);
-void dRegMask(regMaskTP mask);
-
-void dFuncIR();
-void dBlockIR(BasicBlock* block);
-void dTreeIR(GenTree* tree);
-void dLoopIR(Compiler::LoopDsc* loop);
-void dLoopNumIR(unsigned loopNum);
-int dTabStopIR(int curr, int tabstop);
-int dTreeTypeIR(GenTree *tree);
-int dTreeKindsIR(GenTree *tree);
-int dTreeFlagsIR(GenTree *tree);
-int dOperandIR(GenTree* operand);
-int dLeafIR(GenTree* tree);
-int dIndirIR(GenTree* tree);
-int dListIR(GenTree* list);
-int dSsaNumIR(GenTree *tree);
-int dValNumIR(GenTree *tree);
-int dDependsIR(GenTree* comma);
-void dFormatIR();
-
-GenTree* dFindTree(GenTree* tree, unsigned id);
-GenTree* dFindTree(unsigned id);
+void cBlock(Compiler* comp, BasicBlock* block);
+void cBlocks(Compiler* comp);
+void cBlocksV(Compiler* comp);
+void cTree(Compiler* comp, GenTree* tree);
+void cTrees(Compiler* comp);
+void cEH(Compiler* comp);
+void cVar(Compiler* comp, unsigned lclNum);
+void cVarDsc(Compiler* comp, LclVarDsc* varDsc);
+void cVars(Compiler* comp);
+void cVarsFinal(Compiler* comp);
+void cBlockPreds(Compiler* comp, BasicBlock* block);
+void cReach(Compiler* comp);
+void cDoms(Compiler* comp);
+void cLiveness(Compiler* comp);
+void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars);
+
+void cFuncIR(Compiler* comp);
+void cBlockIR(Compiler* comp, BasicBlock* block);
+void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop);
+void cTreeIR(Compiler* comp, GenTree* tree);
+int cTreeTypeIR(Compiler* comp, GenTree* tree);
+int cTreeKindsIR(Compiler* comp, GenTree* tree);
+int cTreeFlagsIR(Compiler* comp, GenTree* tree);
+int cOperandIR(Compiler* comp, GenTree* operand);
+int cLeafIR(Compiler* comp, GenTree* tree);
+int cIndirIR(Compiler* comp, GenTree* tree);
+int cListIR(Compiler* comp, GenTree* list);
+int cSsaNumIR(Compiler* comp, GenTree* tree);
+int cValNumIR(Compiler* comp, GenTree* tree);
+int cDependsIR(Compiler* comp, GenTree* comma, bool* first);
+
+void dBlock(BasicBlock* block);
+void dBlocks();
+void dBlocksV();
+void dTree(GenTree* tree);
+void dTrees();
+void dEH();
+void dVar(unsigned lclNum);
+void dVarDsc(LclVarDsc* varDsc);
+void dVars();
+void dVarsFinal();
+void dBlockPreds(BasicBlock* block);
+void dReach();
+void dDoms();
+void dLiveness();
+void dCVarSet(VARSET_VALARG_TP vars);
+
+void dVarSet(VARSET_VALARG_TP vars);
+void dRegMask(regMaskTP mask);
+
+void dFuncIR();
+void dBlockIR(BasicBlock* block);
+void dTreeIR(GenTree* tree);
+void dLoopIR(Compiler::LoopDsc* loop);
+void dLoopNumIR(unsigned loopNum);
+int dTabStopIR(int curr, int tabstop);
+int dTreeTypeIR(GenTree* tree);
+int dTreeKindsIR(GenTree* tree);
+int dTreeFlagsIR(GenTree* tree);
+int dOperandIR(GenTree* operand);
+int dLeafIR(GenTree* tree);
+int dIndirIR(GenTree* tree);
+int dListIR(GenTree* list);
+int dSsaNumIR(GenTree* tree);
+int dValNumIR(GenTree* tree);
+int dDependsIR(GenTree* comma);
+void dFormatIR();
+
+GenTree* dFindTree(GenTree* tree, unsigned id);
+GenTree* dFindTree(unsigned id);
GenTreeStmt* dFindStmt(unsigned id);
-BasicBlock* dFindBlock(unsigned bbNum);
+BasicBlock* dFindBlock(unsigned bbNum);
#endif // DEBUG
-#include "compiler.hpp" // All the shared inline functions
+#include "compiler.hpp" // All the shared inline functions
/*****************************************************************************/
#endif //_COMPILER_H_
diff --git a/src/jit/compiler.hpp b/src/jit/compiler.hpp
index acf8896c72..a2af592733 100644
--- a/src/jit/compiler.hpp
+++ b/src/jit/compiler.hpp
@@ -34,8 +34,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
/*****************************************************************************/
-inline
-bool getInlinePInvokeEnabled()
+inline bool getInlinePInvokeEnabled()
{
#ifdef DEBUG
return JitConfig.JitPInvokeEnabled() && !JitConfig.StressCOMCall();
@@ -44,8 +43,7 @@ bool getInlinePInvokeEnabled()
#endif
}
-inline
-bool getInlinePInvokeCheckEnabled()
+inline bool getInlinePInvokeCheckEnabled()
{
#ifdef DEBUG
return JitConfig.JitPInvokeCheckEnabled() != 0;
@@ -55,37 +53,34 @@ bool getInlinePInvokeCheckEnabled()
}
// Enforce float narrowing for buggy compilers (notably preWhidbey VC)
-inline
-float forceCastToFloat(double d)
-{
+inline float forceCastToFloat(double d)
+{
Volatile<float> f = (float)d;
return f;
}
// Enforce UInt32 narrowing for buggy compilers (notably Whidbey Beta 2 LKG)
-inline
-UINT32 forceCastToUInt32(double d)
-{
+inline UINT32 forceCastToUInt32(double d)
+{
Volatile<UINT32> u = (UINT32)d;
return u;
}
enum RoundLevel
{
- ROUND_NEVER = 0, // Never round
- ROUND_CMP_CONST = 1, // Round values compared against constants
- ROUND_CMP = 2, // Round comparands and return values
- ROUND_ALWAYS = 3, // Round always
+ ROUND_NEVER = 0, // Never round
+ ROUND_CMP_CONST = 1, // Round values compared against constants
+ ROUND_CMP = 2, // Round comparands and return values
+ ROUND_ALWAYS = 3, // Round always
COUNT_ROUND_LEVEL,
DEFAULT_ROUND_LEVEL = ROUND_NEVER
};
-inline
-RoundLevel getRoundFloatLevel()
+inline RoundLevel getRoundFloatLevel()
{
#ifdef DEBUG
- return (RoundLevel) JitConfig.JitRoundFloat();
+ return (RoundLevel)JitConfig.JitRoundFloat();
#else
return DEFAULT_ROUND_LEVEL;
#endif
@@ -97,9 +92,8 @@ RoundLevel getRoundFloatLevel()
* Return the lowest bit that is set
*/
-template<typename T>
-inline
-T genFindLowestBit(T value)
+template <typename T>
+inline T genFindLowestBit(T value)
{
return (value & (0 - value));
}
@@ -112,8 +106,7 @@ T genFindLowestBit(T value)
* compiler intrinsics, but our CRT header file intrin.h doesn't define these for ARM64 yet.
*/
-inline
-unsigned int genFindHighestBit(unsigned int mask)
+inline unsigned int genFindHighestBit(unsigned int mask)
{
assert(mask != 0);
unsigned int bit = 1U << ((sizeof(unsigned int) * 8) - 1); // start looking at the top
@@ -124,8 +117,7 @@ unsigned int genFindHighestBit(unsigned int mask)
return bit;
}
-inline
-unsigned __int64 genFindHighestBit(unsigned __int64 mask)
+inline unsigned __int64 genFindHighestBit(unsigned __int64 mask)
{
assert(mask != 0);
unsigned __int64 bit = 1ULL << ((sizeof(unsigned __int64) * 8) - 1); // start looking at the top
@@ -157,17 +149,15 @@ unsigned __int64 genFindHighestBit(unsigned __int64 mask)
}
#endif // 0
-
/*****************************************************************************
*
* Return true if the given 64-bit value has exactly zero or one bits set.
*/
template <typename T>
-inline
-BOOL genMaxOneBit(T value)
+inline BOOL genMaxOneBit(T value)
{
- return (value & (value-1))==0;
+ return (value & (value - 1)) == 0;
}
/*****************************************************************************
@@ -175,10 +165,9 @@ BOOL genMaxOneBit(T value)
* Return true if the given 32-bit value has exactly zero or one bits set.
*/
-inline
-BOOL genMaxOneBit(unsigned value)
+inline BOOL genMaxOneBit(unsigned value)
{
- return (value & (value-1))==0;
+ return (value & (value - 1)) == 0;
}
/*****************************************************************************
@@ -187,8 +176,7 @@ BOOL genMaxOneBit(unsigned value)
* bit, in other words return the logarithm in base 2 of the given value.
*/
-inline
-unsigned genLog2(unsigned value)
+inline unsigned genLog2(unsigned value)
{
return BitPosition(value);
}
@@ -199,20 +187,19 @@ unsigned genLog2(unsigned value)
* bit, in other words return the logarithm in base 2 of the given value.
*/
-inline
-unsigned genLog2(unsigned __int64 value)
+inline unsigned genLog2(unsigned __int64 value)
{
- unsigned lo32 = (unsigned) value;
- unsigned hi32 = (unsigned) (value >> 32);
+ unsigned lo32 = (unsigned)value;
+ unsigned hi32 = (unsigned)(value >> 32);
- if (lo32 != 0)
+ if (lo32 != 0)
{
assert(hi32 == 0);
- return genLog2(lo32);
+ return genLog2(lo32);
}
else
{
- return genLog2(hi32) + 32;
+ return genLog2(hi32) + 32;
}
}
@@ -221,10 +208,9 @@ unsigned genLog2(unsigned __int64 value)
* Return the lowest bit that is set in the given register mask.
*/
-inline
-regMaskTP genFindLowestReg(regMaskTP value)
+inline regMaskTP genFindLowestReg(regMaskTP value)
{
- return (regMaskTP)genFindLowestBit(value);
+ return (regMaskTP)genFindLowestBit(value);
}
/*****************************************************************************
@@ -232,10 +218,10 @@ regMaskTP genFindLowestReg(regMaskTP value)
* A rather simple routine that counts the number of bits in a given number.
*/
-template<typename T>
+template <typename T>
inline unsigned genCountBits(T bits)
{
- unsigned cnt = 0;
+ unsigned cnt = 0;
while (bits)
{
@@ -243,53 +229,48 @@ inline unsigned genCountBits(T bits)
bits -= genFindLowestBit(bits);
}
- return cnt;
+ return cnt;
}
/*****************************************************************************
*
* Given 3 masks value, end, start, returns the bits of value between start
* and end (exclusive).
- *
+ *
* value[bitNum(end) - 1, bitNum(start) + 1]
*/
-inline
-unsigned __int64 BitsBetween(unsigned __int64 value, unsigned __int64 end, unsigned __int64 start)
+inline unsigned __int64 BitsBetween(unsigned __int64 value, unsigned __int64 end, unsigned __int64 start)
{
assert(start != 0);
assert(start < end);
assert((start & (start - 1)) == 0);
assert((end & (end - 1)) == 0);
- return value &
- ~((start - 1) | start) & // Ones to the left of set bit in the start mask.
- (end - 1); // Ones to the right of set bit in the end mask.
-
+ return value & ~((start - 1) | start) & // Ones to the left of set bit in the start mask.
+ (end - 1); // Ones to the right of set bit in the end mask.
}
/*****************************************************************************/
-inline
-bool jitIsScaleIndexMul(size_t val)
+inline bool jitIsScaleIndexMul(size_t val)
{
switch (val)
{
- case 1:
- case 2:
- case 4:
- case 8:
- return true;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return true;
- default:
- return false;
+ default:
+ return false;
}
}
// Returns "tree" iff "val" is a valid addressing mode scale shift amount on
// the target architecture.
-inline
-bool jitIsScaleIndexShift(ssize_t val)
+inline bool jitIsScaleIndexShift(ssize_t val)
{
// It happens that this is the right test for all our current targets: x86, x64 and ARM.
// This test would become target-dependent if we added a new target with a different constraint.
@@ -302,8 +283,7 @@ bool jitIsScaleIndexShift(ssize_t val)
*/
/* static */
-inline
-bool Compiler::jitIsBetween(unsigned value, unsigned start, unsigned end)
+inline bool Compiler::jitIsBetween(unsigned value, unsigned start, unsigned end)
{
return start <= value && value < end;
}
@@ -314,8 +294,7 @@ bool Compiler::jitIsBetween(unsigned value, unsigned start, unsig
*/
/* static */
-inline
-bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end)
+inline bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end)
{
return start <= value && value <= end;
}
@@ -323,63 +302,61 @@ bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned sta
/******************************************************************************************
* Return the EH descriptor for the given region index.
*/
-inline EHblkDsc* Compiler::ehGetDsc(unsigned regionIndex)
+inline EHblkDsc* Compiler::ehGetDsc(unsigned regionIndex)
{
assert(regionIndex < compHndBBtabCount);
return &compHndBBtab[regionIndex];
}
-
/******************************************************************************************
* Return the EH descriptor index of the enclosing try, for the given region index.
*/
-inline unsigned Compiler::ehGetEnclosingTryIndex(unsigned regionIndex)
+inline unsigned Compiler::ehGetEnclosingTryIndex(unsigned regionIndex)
{
return ehGetDsc(regionIndex)->ebdEnclosingTryIndex;
}
-
/******************************************************************************************
* Return the EH descriptor index of the enclosing handler, for the given region index.
*/
-inline unsigned Compiler::ehGetEnclosingHndIndex(unsigned regionIndex)
+inline unsigned Compiler::ehGetEnclosingHndIndex(unsigned regionIndex)
{
return ehGetDsc(regionIndex)->ebdEnclosingHndIndex;
}
-
/******************************************************************************************
* Return the EH index given a region descriptor.
*/
-inline unsigned Compiler::ehGetIndex(EHblkDsc* ehDsc)
+inline unsigned Compiler::ehGetIndex(EHblkDsc* ehDsc)
{
- assert(compHndBBtab <= ehDsc &&
- ehDsc < compHndBBtab + compHndBBtabCount);
+ assert(compHndBBtab <= ehDsc && ehDsc < compHndBBtab + compHndBBtabCount);
return (unsigned)(ehDsc - compHndBBtab);
}
-
/******************************************************************************************
* Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of
* (or nullptr if this block is not in a 'try' region).
*/
-inline EHblkDsc* Compiler::ehGetBlockTryDsc(BasicBlock* block)
+inline EHblkDsc* Compiler::ehGetBlockTryDsc(BasicBlock* block)
{
if (!block->hasTryIndex())
+ {
return nullptr;
+ }
return ehGetDsc(block->getTryIndex());
}
-
/******************************************************************************************
* Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of
* (or nullptr if this block is not in a filter or handler region).
*/
-inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block)
+inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block)
{
if (!block->hasHndIndex())
+ {
return nullptr;
+ }
return ehGetDsc(block->getHndIndex());
}
@@ -391,7 +368,7 @@ inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block)
* This is only valid during codegen.
*
*/
-inline FuncInfoDsc * Compiler::funCurrentFunc()
+inline FuncInfoDsc* Compiler::funCurrentFunc()
{
return funGetFunc(compCurrFuncIdx);
}
@@ -401,7 +378,7 @@ inline FuncInfoDsc * Compiler::funCurrentFunc()
* This is only valid after funclets are created.
*
*/
-inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
+inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
{
assert(fgFuncletsCreated);
assert(FitsIn<unsigned short>(funcIdx));
@@ -414,9 +391,9 @@ inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
* This is only valid after funclets are created.
*
*/
-inline FuncInfoDsc * Compiler::funGetFunc(unsigned funcIdx)
-{
- assert(fgFuncletsCreated);
+inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx)
+{
+ assert(fgFuncletsCreated);
assert(funcIdx < compFuncInfoCount);
return &compFuncInfos[funcIdx];
}
@@ -429,12 +406,12 @@ inline FuncInfoDsc * Compiler::funGetFunc(unsigned funcIdx)
* if this should return the filter funclet or the filter handler funclet.
*
*/
-inline unsigned Compiler::funGetFuncIdx(BasicBlock * block)
+inline unsigned Compiler::funGetFuncIdx(BasicBlock* block)
{
- assert(fgFuncletsCreated);
+ assert(fgFuncletsCreated);
assert(block->bbFlags & BBF_FUNCLET_BEG);
- EHblkDsc * eh = ehGetDsc(block->getHndIndex());
+ EHblkDsc* eh = ehGetDsc(block->getHndIndex());
unsigned int funcIdx = eh->ebdFuncIndex;
if (eh->ebdHndBeg != block)
{
@@ -459,7 +436,7 @@ inline unsigned Compiler::funGetFuncIdx(BasicBlock * block)
* always the root function.
*
*/
-inline FuncInfoDsc * Compiler::funCurrentFunc()
+inline FuncInfoDsc* Compiler::funCurrentFunc()
{
return &compFuncInfoRoot;
}
@@ -469,7 +446,7 @@ inline FuncInfoDsc * Compiler::funCurrentFunc()
* This is only valid after funclets are created.
*
*/
-inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
+inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
{
assert(funcIdx == 0);
}
@@ -479,8 +456,8 @@ inline void Compiler::funSetCurrentFunc(unsigned funcIdx)
* This is only valid after funclets are created.
*
*/
-inline FuncInfoDsc * Compiler::funGetFunc(unsigned funcIdx)
-{
+inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx)
+{
assert(funcIdx == 0);
return &compFuncInfoRoot;
}
@@ -489,7 +466,7 @@ inline FuncInfoDsc * Compiler::funGetFunc(unsigned funcIdx)
* No funclets, so always 0.
*
*/
-inline unsigned Compiler::funGetFuncIdx(BasicBlock * block)
+inline unsigned Compiler::funGetFuncIdx(BasicBlock* block)
{
return 0;
}
@@ -501,10 +478,9 @@ inline unsigned Compiler::funGetFuncIdx(BasicBlock * block)
* Map a register mask to a register number
*/
-inline
-regNumber genRegNumFromMask(regMaskTP mask)
+inline regNumber genRegNumFromMask(regMaskTP mask)
{
- assert(mask != 0); // Must have one bit set, so can't have a mask of zero
+ assert(mask != 0); // Must have one bit set, so can't have a mask of zero
/* Convert the mask to a register number */
@@ -514,7 +490,7 @@ regNumber genRegNumFromMask(regMaskTP mask)
assert(genRegMask(regNum) == mask);
- return regNum;
+ return regNum;
}
/*****************************************************************************
@@ -522,14 +498,12 @@ regNumber genRegNumFromMask(regMaskTP mask)
* Return the size in bytes of the given type.
*/
-extern const
-BYTE genTypeSizes[TYP_COUNT];
+extern const BYTE genTypeSizes[TYP_COUNT];
-template<class T>
-inline unsigned
-genTypeSize(T type)
+template <class T>
+inline unsigned genTypeSize(T type)
{
- assert((unsigned)TypeGet(type) < sizeof(genTypeSizes)/sizeof(genTypeSizes[0]));
+ assert((unsigned)TypeGet(type) < sizeof(genTypeSizes) / sizeof(genTypeSizes[0]));
return genTypeSizes[TypeGet(type)];
}
@@ -540,13 +514,11 @@ genTypeSize(T type)
* returns 1 for 32-bit types and 2 for 64-bit types.
*/
-extern const
-BYTE genTypeStSzs[TYP_COUNT];
+extern const BYTE genTypeStSzs[TYP_COUNT];
-inline
-unsigned genTypeStSz(var_types type)
+inline unsigned genTypeStSz(var_types type)
{
- assert((unsigned)type < sizeof(genTypeStSzs)/sizeof(genTypeStSzs[0]));
+ assert((unsigned)type < sizeof(genTypeStSzs) / sizeof(genTypeStSzs[0]));
return genTypeStSzs[type];
}
@@ -556,24 +528,21 @@ unsigned genTypeStSz(var_types type)
* Return the number of registers required to hold a value of the given type.
*/
-
/*****************************************************************************
*
* The following function maps a 'precise' type to an actual type as seen
* by the VM (for example, 'byte' maps to 'int').
*/
-extern const
-BYTE genActualTypes[TYP_COUNT];
+extern const BYTE genActualTypes[TYP_COUNT];
-inline
-var_types genActualType(var_types type)
+inline var_types genActualType(var_types type)
{
/* Spot check to make certain the table is in synch with the enum */
assert(genActualTypes[TYP_DOUBLE] == TYP_DOUBLE);
- assert(genActualTypes[TYP_FNC ] == TYP_FNC);
- assert(genActualTypes[TYP_REF ] == TYP_REF);
+ assert(genActualTypes[TYP_FNC] == TYP_FNC);
+ assert(genActualTypes[TYP_REF] == TYP_REF);
assert((unsigned)type < sizeof(genActualTypes));
return (var_types)genActualTypes[type];
@@ -581,18 +550,26 @@ var_types genActualType(var_types type)
/*****************************************************************************/
-inline
-var_types genUnsignedType(var_types type)
+inline var_types genUnsignedType(var_types type)
{
/* Force signed types into corresponding unsigned type */
switch (type)
{
- case TYP_BYTE: type = TYP_UBYTE; break;
- case TYP_SHORT: type = TYP_CHAR; break;
- case TYP_INT: type = TYP_UINT; break;
- case TYP_LONG: type = TYP_ULONG; break;
- default: break;
+ case TYP_BYTE:
+ type = TYP_UBYTE;
+ break;
+ case TYP_SHORT:
+ type = TYP_CHAR;
+ break;
+ case TYP_INT:
+ type = TYP_UINT;
+ break;
+ case TYP_LONG:
+ type = TYP_ULONG;
+ break;
+ default:
+ break;
}
return type;
@@ -600,17 +577,21 @@ var_types genUnsignedType(var_types type)
/*****************************************************************************/
-inline
-var_types genSignedType(var_types type)
+inline var_types genSignedType(var_types type)
{
/* Force non-small unsigned type into corresponding signed type */
/* Note that we leave the small types alone */
switch (type)
{
- case TYP_UINT: type = TYP_INT; break;
- case TYP_ULONG: type = TYP_LONG; break;
- default: break;
+ case TYP_UINT:
+ type = TYP_INT;
+ break;
+ case TYP_ULONG:
+ type = TYP_LONG;
+ break;
+ default:
+ break;
}
return type;
@@ -620,39 +601,35 @@ var_types genSignedType(var_types type)
* Can this type be passed as a parameter in a register?
*/
-inline
-bool isRegParamType(var_types type)
+inline bool isRegParamType(var_types type)
{
#if defined(_TARGET_X86_)
- return (type <= TYP_INT ||
- type == TYP_REF ||
- type == TYP_BYREF);
-#else // !_TARGET_X86_
+ return (type <= TYP_INT || type == TYP_REF || type == TYP_BYREF);
+#else // !_TARGET_X86_
return true;
#endif // !_TARGET_X86_
}
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
/*****************************************************************************/
- // Returns true if 'type' is a struct that can be enregistered for call args
- // or can be returned by value in multiple registers.
- // if 'type' is not a struct the return value will be false.
- //
- // Arguments:
- // type - the basic jit var_type for the item being queried
- // typeClass - the handle for the struct when 'type' is TYP_STRUCT
- // typeSize - Out param (if non-null) is updated with the size of 'type'.
- // forReturn - this is true when we asking about a GT_RETURN context;
- // this is false when we are asking about an argument context
- //
-inline
-bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type,
- CORINFO_CLASS_HANDLE typeClass,
- unsigned *typeSize,
- bool forReturn)
+// Returns true if 'type' is a struct that can be enregistered for call args
+// or can be returned by value in multiple registers.
+// if 'type' is not a struct the return value will be false.
+//
+// Arguments:
+// type - the basic jit var_type for the item being queried
+// typeClass - the handle for the struct when 'type' is TYP_STRUCT
+// typeSize - Out param (if non-null) is updated with the size of 'type'.
+// forReturn - this is true when we asking about a GT_RETURN context;
+// this is false when we are asking about an argument context
+//
+inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type,
+ CORINFO_CLASS_HANDLE typeClass,
+ unsigned* typeSize,
+ bool forReturn)
{
- bool result = false;
- unsigned size = 0;
+ bool result = false;
+ unsigned size = 0;
if (varTypeIsStruct(type))
{
@@ -686,19 +663,20 @@ bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type,
}
#endif //_TARGET_AMD64_ || _TARGET_ARM64_
-
/*****************************************************************************/
#ifdef DEBUG
-inline
-const char * varTypeGCstring(var_types type)
+inline const char* varTypeGCstring(var_types type)
{
switch (type)
{
- case TYP_REF: return "gcr";
- case TYP_BYREF: return "byr";
- default: return "non";
+ case TYP_REF:
+ return "gcr";
+ case TYP_BYREF:
+ return "byr";
+ default:
+ return "non";
}
}
@@ -706,84 +684,88 @@ const char * varTypeGCstring(var_types type)
/*****************************************************************************/
-const char * varTypeName(var_types);
+const char* varTypeName(var_types);
/*****************************************************************************
*
* Helpers to pull big-endian values out of a byte stream.
*/
-inline unsigned genGetU1(const BYTE *addr)
+inline unsigned genGetU1(const BYTE* addr)
{
- return addr[0];
+ return addr[0];
}
-inline signed genGetI1(const BYTE *addr)
+inline signed genGetI1(const BYTE* addr)
{
- return (signed char)addr[0];
+ return (signed char)addr[0];
}
-inline unsigned genGetU2(const BYTE *addr)
+inline unsigned genGetU2(const BYTE* addr)
{
- return (addr[0] << 8) | addr[1];
+ return (addr[0] << 8) | addr[1];
}
-inline signed genGetI2(const BYTE *addr)
+inline signed genGetI2(const BYTE* addr)
{
- return (signed short)((addr[0] << 8) | addr[1]);
+ return (signed short)((addr[0] << 8) | addr[1]);
}
-inline unsigned genGetU4(const BYTE *addr)
+inline unsigned genGetU4(const BYTE* addr)
{
- return (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ return (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
}
/*****************************************************************************/
// Helpers to pull little-endian values out of a byte stream.
-inline
-unsigned __int8 getU1LittleEndian(const BYTE * ptr)
-{ return *(UNALIGNED unsigned __int8 *)ptr; }
+inline unsigned __int8 getU1LittleEndian(const BYTE* ptr)
+{
+ return *(UNALIGNED unsigned __int8*)ptr;
+}
-inline
-unsigned __int16 getU2LittleEndian(const BYTE * ptr)
-{ return GET_UNALIGNED_VAL16(ptr); }
+inline unsigned __int16 getU2LittleEndian(const BYTE* ptr)
+{
+ return GET_UNALIGNED_VAL16(ptr);
+}
-inline
-unsigned __int32 getU4LittleEndian(const BYTE * ptr)
-{ return GET_UNALIGNED_VAL32(ptr); }
+inline unsigned __int32 getU4LittleEndian(const BYTE* ptr)
+{
+ return GET_UNALIGNED_VAL32(ptr);
+}
-inline
- signed __int8 getI1LittleEndian(const BYTE * ptr)
-{ return *(UNALIGNED signed __int8 *)ptr; }
+inline signed __int8 getI1LittleEndian(const BYTE* ptr)
+{
+ return *(UNALIGNED signed __int8*)ptr;
+}
-inline
- signed __int16 getI2LittleEndian(const BYTE * ptr)
-{ return GET_UNALIGNED_VAL16(ptr); }
+inline signed __int16 getI2LittleEndian(const BYTE* ptr)
+{
+ return GET_UNALIGNED_VAL16(ptr);
+}
-inline
- signed __int32 getI4LittleEndian(const BYTE * ptr)
-{ return GET_UNALIGNED_VAL32(ptr); }
+inline signed __int32 getI4LittleEndian(const BYTE* ptr)
+{
+ return GET_UNALIGNED_VAL32(ptr);
+}
-inline
- signed __int64 getI8LittleEndian(const BYTE * ptr)
-{ return GET_UNALIGNED_VAL64(ptr); }
+inline signed __int64 getI8LittleEndian(const BYTE* ptr)
+{
+ return GET_UNALIGNED_VAL64(ptr);
+}
-inline
-float getR4LittleEndian(const BYTE * ptr)
+inline float getR4LittleEndian(const BYTE* ptr)
{
__int32 val = getI4LittleEndian(ptr);
- return *(float *)&val;
+ return *(float*)&val;
}
-inline
-double getR8LittleEndian(const BYTE * ptr)
+inline double getR8LittleEndian(const BYTE* ptr)
{
__int64 val = getI8LittleEndian(ptr);
- return *(double *)&val;
+ return *(double*)&val;
}
-
/*****************************************************************************
*
* Return the bitmask to use in the EXPSET_TP for the CSE with the given CSE index.
@@ -795,20 +777,18 @@ double getR8LittleEndian(const BYTE * ptr)
* This precondition is checked by the assert on the first line of this method.
*/
-inline
-EXPSET_TP genCSEnum2bit(unsigned index)
+inline EXPSET_TP genCSEnum2bit(unsigned index)
{
assert((index > 0) && (index <= EXPSET_SZ));
- return ((EXPSET_TP)1 << (index-1));
+ return ((EXPSET_TP)1 << (index - 1));
}
-#ifdef DEBUG
-const char * genES2str(EXPSET_TP set);
-const char * refCntWtd2str(unsigned refCntWtd);
+#ifdef DEBUG
+const char* genES2str(EXPSET_TP set);
+const char* refCntWtd2str(unsigned refCntWtd);
#endif
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -821,19 +801,19 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper)
{
-#if SMALL_TREE_NODES
- size_t size = GenTree::s_gtNodeSizes[oper];
+#if SMALL_TREE_NODES
+ size_t size = GenTree::s_gtNodeSizes[oper];
#else
- size_t size = TREE_NODE_SZ_LARGE;
+ size_t size = TREE_NODE_SZ_LARGE;
#endif
-#if MEASURE_NODE_SIZE
- genNodeSizeStats.genTreeNodeCnt += 1;
- genNodeSizeStats.genTreeNodeSize += size;
+#if MEASURE_NODE_SIZE
+ genNodeSizeStats.genTreeNodeCnt += 1;
+ genNodeSizeStats.genTreeNodeSize += size;
genNodeSizeStats.genTreeNodeActualSize += sz;
- genNodeSizeStatsPerFunc.genTreeNodeCnt += 1;
- genNodeSizeStatsPerFunc.genTreeNodeSize += size;
+ genNodeSizeStatsPerFunc.genTreeNodeCnt += 1;
+ genNodeSizeStatsPerFunc.genTreeNodeSize += size;
genNodeSizeStatsPerFunc.genTreeNodeActualSize += sz;
#endif // MEASURE_NODE_SIZE
@@ -842,12 +822,11 @@ void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper)
}
// GenTree constructor
-inline
-GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode))
+inline GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode))
{
- gtOper = oper;
- gtType = type;
- gtFlags = 0;
+ gtOper = oper;
+ gtType = type;
+ gtFlags = 0;
#ifdef DEBUG
gtDebugFlags = 0;
#endif // DEBUG
@@ -855,27 +834,27 @@ GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode))
gtUsedRegs = 0;
#endif // LEGACY_BACKEND
#if FEATURE_ANYCSE
- gtCSEnum = NO_CSE;
+ gtCSEnum = NO_CSE;
#endif // FEATURE_ANYCSE
#if ASSERTION_PROP
ClearAssertion();
#endif
#if FEATURE_STACK_FP_X87
- gtFPlvl = 0;
+ gtFPlvl = 0;
#endif
- gtNext = NULL;
- gtPrev = NULL;
- gtRegNum = REG_NA;
+ gtNext = nullptr;
+ gtPrev = nullptr;
+ gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
INDEBUG(gtCostsInitialized = false;)
#ifdef DEBUG
-#if SMALL_TREE_NODES
+#if SMALL_TREE_NODES
size_t size = GenTree::s_gtNodeSizes[oper];
- if (size == TREE_NODE_SZ_SMALL && !largeNode)
+ if (size == TREE_NODE_SZ_SMALL && !largeNode)
{
gtDebugFlags |= GTF_DEBUG_NODE_SMALL;
}
@@ -891,8 +870,8 @@ GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode))
#endif
#ifdef DEBUG
- gtSeqNum = 0;
- gtTreeID = JitTls::GetCompiler()->compGenTreeID++;
+ gtSeqNum = 0;
+ gtTreeID = JitTls::GetCompiler()->compGenTreeID++;
gtVNPair.SetBoth(ValueNumStore::NoVN);
gtRegTag = GT_REGTAG_NONE;
gtOperSave = GT_NONE;
@@ -901,25 +880,24 @@ GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode))
/*****************************************************************************/
-inline
-GenTreeStmt* Compiler::gtNewStmt(GenTreePtr expr, IL_OFFSETX offset)
+inline GenTreeStmt* Compiler::gtNewStmt(GenTreePtr expr, IL_OFFSETX offset)
{
/* NOTE - GT_STMT is now a small node in retail */
- GenTreeStmt* stmt = new(this, GT_STMT) GenTreeStmt(expr, offset);
+ GenTreeStmt* stmt = new (this, GT_STMT) GenTreeStmt(expr, offset);
return stmt;
}
/*****************************************************************************/
-inline
-GenTreePtr Compiler::gtNewOperNode(genTreeOps oper,
- var_types type, GenTreePtr op1, bool doSimplifications)
+inline GenTreePtr Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTreePtr op1, bool doSimplifications)
{
assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0);
- assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary operator.
- assert(op1 != NULL || oper == GT_PHI || oper == GT_RETFILT || oper == GT_NOP || (oper == GT_RETURN && type == TYP_VOID));
+ assert((GenTree::OperKind(oper) & GTK_EXOP) ==
+ 0); // Can't use this to construct any types that extend unary/binary operator.
+ assert(op1 != nullptr || oper == GT_PHI || oper == GT_RETFILT || oper == GT_NOP ||
+ (oper == GT_RETURN && type == TYP_VOID));
if (doSimplifications)
{
@@ -949,7 +927,7 @@ GenTreePtr Compiler::gtNewOperNode(genTreeOps oper,
}
}
- GenTreePtr node = new(this, oper) GenTreeOp(oper, type, op1, NULL);
+ GenTreePtr node = new (this, oper) GenTreeOp(oper, type, op1, nullptr);
//
// the GT_ADDR of a Local Variable implies GTF_ADDR_ONSTACK
@@ -962,11 +940,10 @@ GenTreePtr Compiler::gtNewOperNode(genTreeOps oper,
return node;
}
-
// Returns an opcode that is of the largest node size in use.
inline genTreeOps LargeOpOpcode()
{
-#if SMALL_TREE_NODES
+#if SMALL_TREE_NODES
// Allocate a large node
assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
#endif
@@ -978,22 +955,19 @@ inline genTreeOps LargeOpOpcode()
* Use to create nodes which may later be morphed to another (big) operator
*/
-inline
-GenTreePtr Compiler::gtNewLargeOperNode(genTreeOps oper,
- var_types type,
- GenTreePtr op1,
- GenTreePtr op2)
+inline GenTreePtr Compiler::gtNewLargeOperNode(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2)
{
assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0);
- assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary operator.
-#if SMALL_TREE_NODES
+ assert((GenTree::OperKind(oper) & GTK_EXOP) ==
+ 0); // Can't use this to construct any types that extend unary/binary operator.
+#if SMALL_TREE_NODES
// Allocate a large node
- assert(GenTree::s_gtNodeSizes[oper ] == TREE_NODE_SZ_SMALL);
+ assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL);
- GenTreePtr node = new(this, LargeOpOpcode()) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/true));
+ GenTreePtr node = new (this, LargeOpOpcode()) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
#else
- GenTreePtr node = new(this, oper) GenTreeOp(oper, type, op1, op2);
+ GenTreePtr node = new (this, oper) GenTreeOp(oper, type, op1, op2);
#endif
return node;
@@ -1005,31 +979,29 @@ GenTreePtr Compiler::gtNewLargeOperNode(genTreeOps oper,
* that may need to be fixed up).
*/
-inline
-GenTreePtr Compiler::gtNewIconHandleNode(size_t value,
- unsigned flags,
- FieldSeqNode* fields,
- unsigned handle1,
- void * handle2)
+inline GenTreePtr Compiler::gtNewIconHandleNode(
+ size_t value, unsigned flags, FieldSeqNode* fields, unsigned handle1, void* handle2)
{
- GenTreePtr node;
+ GenTreePtr node;
assert((flags & (GTF_ICON_HDL_MASK | GTF_ICON_FIELD_OFF)) != 0);
// Interpret "fields == NULL" as "not a field."
- if (fields == NULL)
+ if (fields == nullptr)
+ {
fields = FieldSeqStore::NotAField();
+ }
#if defined(LATE_DISASM)
- node = new (this, LargeOpOpcode()) GenTreeIntCon(TYP_I_IMPL, value, fields DEBUGARG(/*largeNode*/true));
+ node = new (this, LargeOpOpcode()) GenTreeIntCon(TYP_I_IMPL, value, fields DEBUGARG(/*largeNode*/ true));
node->gtIntCon.gtIconHdl.gtIconHdl1 = handle1;
node->gtIntCon.gtIconHdl.gtIconHdl2 = handle2;
#else
- node = new(this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, value, fields);
+ node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, value, fields);
#endif
node->gtFlags |= flags;
return node;
-}
+}
/*****************************************************************************
*
@@ -1038,10 +1010,9 @@ GenTreePtr Compiler::gtNewIconHandleNode(size_t value,
* These are versions for each specific type of HANDLE
*/
-inline
-GenTreePtr Compiler::gtNewIconEmbScpHndNode (CORINFO_MODULE_HANDLE scpHnd, unsigned hnd1, void * hnd2)
+inline GenTreePtr Compiler::gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd, unsigned hnd1, void* hnd2)
{
- void * embedScpHnd, * pEmbedScpHnd;
+ void *embedScpHnd, *pEmbedScpHnd;
embedScpHnd = (void*)info.compCompHnd->embedModuleHandle(scpHnd, &pEmbedScpHnd);
@@ -1052,10 +1023,9 @@ GenTreePtr Compiler::gtNewIconEmbScpHndNode (CORINFO_MODULE_HANDLE scpHnd, u
//-----------------------------------------------------------------------------
-inline
-GenTreePtr Compiler::gtNewIconEmbClsHndNode (CORINFO_CLASS_HANDLE clsHnd, unsigned hnd1, void * hnd2)
+inline GenTreePtr Compiler::gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd, unsigned hnd1, void* hnd2)
{
- void * embedClsHnd, * pEmbedClsHnd;
+ void *embedClsHnd, *pEmbedClsHnd;
embedClsHnd = (void*)info.compCompHnd->embedClassHandle(clsHnd, &pEmbedClsHnd);
@@ -1066,10 +1036,9 @@ GenTreePtr Compiler::gtNewIconEmbClsHndNode (CORINFO_CLASS_HANDLE clsHnd, un
//-----------------------------------------------------------------------------
-inline
-GenTreePtr Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd, unsigned hnd1, void * hnd2)
+inline GenTreePtr Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd, unsigned hnd1, void* hnd2)
{
- void * embedMethHnd, * pEmbedMethHnd;
+ void *embedMethHnd, *pEmbedMethHnd;
embedMethHnd = (void*)info.compCompHnd->embedMethodHandle(methHnd, &pEmbedMethHnd);
@@ -1080,10 +1049,9 @@ GenTreePtr Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd, un
//-----------------------------------------------------------------------------
-inline
-GenTreePtr Compiler::gtNewIconEmbFldHndNode (CORINFO_FIELD_HANDLE fldHnd, unsigned hnd1, void * hnd2)
+inline GenTreePtr Compiler::gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd, unsigned hnd1, void* hnd2)
{
- void * embedFldHnd, * pEmbedFldHnd;
+ void *embedFldHnd, *pEmbedFldHnd;
embedFldHnd = (void*)info.compCompHnd->embedFieldHandle(fldHnd, &pEmbedFldHnd);
@@ -1092,19 +1060,11 @@ GenTreePtr Compiler::gtNewIconEmbFldHndNode (CORINFO_FIELD_HANDLE fldHnd, un
return gtNewIconEmbHndNode(embedFldHnd, pEmbedFldHnd, GTF_ICON_FIELD_HDL, hnd1, hnd2, fldHnd);
}
-
/*****************************************************************************/
-inline
-GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper,
- var_types type,
- unsigned flags,
- GenTreeArgList* args)
-{
- GenTreeCall* result = gtNewCallNode(CT_HELPER,
- eeFindHelper(helper),
- type,
- args);
+inline GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper, var_types type, unsigned flags, GenTreeArgList* args)
+{
+ GenTreeCall* result = gtNewCallNode(CT_HELPER, eeFindHelper(helper), type, args);
result->gtFlags |= flags;
#if DEBUG
@@ -1128,22 +1088,20 @@ GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper,
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
-inline
-GenTreePtr Compiler::gtNewAllocObjNode(unsigned int helper,
- CORINFO_CLASS_HANDLE clsHnd,
- var_types type,
- GenTreePtr op1)
+inline GenTreePtr Compiler::gtNewAllocObjNode(unsigned int helper,
+ CORINFO_CLASS_HANDLE clsHnd,
+ var_types type,
+ GenTreePtr op1)
{
- GenTreePtr node = new(this, GT_ALLOCOBJ) GenTreeAllocObj(type, helper, clsHnd, op1);
+ GenTreePtr node = new (this, GT_ALLOCOBJ) GenTreeAllocObj(type, helper, clsHnd, op1);
return node;
}
/*****************************************************************************/
-inline
-GenTreePtr Compiler::gtNewCodeRef(BasicBlock *block)
+inline GenTreePtr Compiler::gtNewCodeRef(BasicBlock* block)
{
- GenTreePtr node = new(this, GT_LABEL) GenTreeLabel(block);
+ GenTreePtr node = new (this, GT_LABEL) GenTreeLabel(block);
return node;
}
@@ -1152,44 +1110,40 @@ GenTreePtr Compiler::gtNewCodeRef(BasicBlock *block)
* A little helper to create a data member reference node.
*/
-inline
-GenTreePtr Compiler::gtNewFieldRef(var_types typ,
- CORINFO_FIELD_HANDLE fldHnd,
- GenTreePtr obj,
- DWORD offset,
- bool nullcheck)
+inline GenTreePtr Compiler::gtNewFieldRef(
+ var_types typ, CORINFO_FIELD_HANDLE fldHnd, GenTreePtr obj, DWORD offset, bool nullcheck)
{
#if SMALL_TREE_NODES
/* 'GT_FIELD' nodes may later get transformed into 'GT_IND' */
assert(GenTree::s_gtNodeSizes[GT_IND] <= GenTree::s_gtNodeSizes[GT_FIELD]);
- GenTreePtr tree = new(this, GT_FIELD) GenTreeField(typ);
+ GenTreePtr tree = new (this, GT_FIELD) GenTreeField(typ);
#else
- GenTreePtr tree = new(this, GT_FIELD) GenTreeField(typ);
+ GenTreePtr tree = new (this, GT_FIELD) GenTreeField(typ);
#endif
- tree->gtField.gtFldObj = obj;
- tree->gtField.gtFldHnd = fldHnd;
+ tree->gtField.gtFldObj = obj;
+ tree->gtField.gtFldHnd = fldHnd;
tree->gtField.gtFldOffset = offset;
- tree->gtFlags |= GTF_GLOB_REF;
+ tree->gtFlags |= GTF_GLOB_REF;
#ifdef FEATURE_READYTORUN_COMPILER
tree->gtField.gtFieldLookup.addr = nullptr;
#endif
if (nullcheck)
+ {
tree->gtFlags |= GTF_FLD_NULLCHECK;
+ }
// If "obj" is the address of a local, note that a field of that struct local has been accessed.
- if (obj != NULL &&
- obj->OperGet() == GT_ADDR &&
- varTypeIsStruct(obj->gtOp.gtOp1) &&
+ if (obj != nullptr && obj->OperGet() == GT_ADDR && varTypeIsStruct(obj->gtOp.gtOp1) &&
obj->gtOp.gtOp1->OperGet() == GT_LCL_VAR)
{
- unsigned lclNum = obj->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
+ unsigned lclNum = obj->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
lvaTable[lclNum].lvFieldAccessed = 1;
}
- return tree;
+ return tree;
}
/*****************************************************************************
@@ -1197,71 +1151,59 @@ GenTreePtr Compiler::gtNewFieldRef(var_types typ,
* A little helper to create an array index node.
*/
-inline
-GenTreePtr Compiler::gtNewIndexRef(var_types typ,
- GenTreePtr arrayOp,
- GenTreePtr indexOp)
+inline GenTreePtr Compiler::gtNewIndexRef(var_types typ, GenTreePtr arrayOp, GenTreePtr indexOp)
{
- GenTreeIndex* gtIndx = new(this, GT_INDEX) GenTreeIndex(typ, arrayOp, indexOp, genTypeSize(typ));
+ GenTreeIndex* gtIndx = new (this, GT_INDEX) GenTreeIndex(typ, arrayOp, indexOp, genTypeSize(typ));
- return gtIndx;
+ return gtIndx;
}
-
-
-
/*****************************************************************************
*
* Create (and check for) a "nothing" node, i.e. a node that doesn't produce
* any code. We currently use a "nop" node of type void for this purpose.
*/
-inline
-GenTreePtr Compiler::gtNewNothingNode()
+inline GenTreePtr Compiler::gtNewNothingNode()
{
- return new (this, GT_NOP) GenTreeOp(GT_NOP, TYP_VOID);
+ return new (this, GT_NOP) GenTreeOp(GT_NOP, TYP_VOID);
}
/*****************************************************************************/
-inline
-bool GenTree::IsNothingNode() const
+inline bool GenTree::IsNothingNode() const
{
return (gtOper == GT_NOP && gtType == TYP_VOID);
}
-
/*****************************************************************************
*
* Change the given node to a NOP - May be later changed to a GT_COMMA
*
*****************************************************************************/
-inline
-void GenTree::gtBashToNOP()
+inline void GenTree::gtBashToNOP()
{
ChangeOper(GT_NOP);
gtType = TYP_VOID;
- gtOp.gtOp1 = gtOp.gtOp2 = 0;
+ gtOp.gtOp1 = gtOp.gtOp2 = nullptr;
gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
}
-
// return new arg placeholder node. Does not do anything but has a type associated
// with it so we can keep track of register arguments in lists associated w/ call nodes
inline GenTreePtr Compiler::gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreePtr node = new (this, GT_ARGPLACE) GenTreeArgPlace(type, clsHnd);
- return node;
+ return node;
}
/*****************************************************************************/
-inline
-GenTreePtr Compiler::gtUnusedValNode(GenTreePtr expr)
-{
+inline GenTreePtr Compiler::gtUnusedValNode(GenTreePtr expr)
+{
return gtNewOperNode(GT_COMMA, TYP_VOID, expr, gtNewNothingNode());
}
@@ -1272,11 +1214,10 @@ GenTreePtr Compiler::gtUnusedValNode(GenTreePtr expr)
* operands
*/
-inline
-void Compiler::gtSetStmtInfo(GenTree * stmt)
+inline void Compiler::gtSetStmtInfo(GenTree* stmt)
{
assert(stmt->gtOper == GT_STMT);
- GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
+ GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
#if FEATURE_STACK_FP_X87
/* We will try to compute the FP stack level at each node */
@@ -1288,7 +1229,9 @@ void Compiler::gtSetStmtInfo(GenTree * stmt)
#ifdef DEBUG
if (verbose && 0)
+ {
gtDispTree(stmt);
+ }
#endif
/* Recursively process the expression */
@@ -1304,7 +1247,7 @@ void Compiler::gtSetStmtInfo(GenTree * stmt)
/* Do we need to recompute FP stack levels? */
- if (gtFPstLvlRedo)
+ if (gtFPstLvlRedo)
{
codeGen->genResetFPstkLevel();
gtComputeFPlvls(expr);
@@ -1313,18 +1256,17 @@ void Compiler::gtSetStmtInfo(GenTree * stmt)
#endif // FEATURE_STACK_FP_X87
}
-
#if FEATURE_STACK_FP_X87
-inline unsigned Compiler::gtSetEvalOrderAndRestoreFPstkLevel(GenTree * tree)
+inline unsigned Compiler::gtSetEvalOrderAndRestoreFPstkLevel(GenTree* tree)
{
- unsigned FPlvlSave = codeGen->genFPstkLevel;
- unsigned result = gtSetEvalOrder(tree);
+ unsigned FPlvlSave = codeGen->genFPstkLevel;
+ unsigned result = gtSetEvalOrder(tree);
codeGen->genFPstkLevel = FPlvlSave;
return result;
}
-#else // !FEATURE_STACK_FP_X87
-inline unsigned Compiler::gtSetEvalOrderAndRestoreFPstkLevel(GenTree * tree)
+#else // !FEATURE_STACK_FP_X87
+inline unsigned Compiler::gtSetEvalOrderAndRestoreFPstkLevel(GenTree* tree)
{
return gtSetEvalOrder(tree);
}
@@ -1334,20 +1276,17 @@ inline unsigned Compiler::gtSetEvalOrderAndRestoreFPstkLevel(GenTree * t
#if SMALL_TREE_NODES
/*****************************************************************************/
-inline
-void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
+inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
{
- assert(((gtDebugFlags & GTF_DEBUG_NODE_SMALL) != 0) !=
- ((gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0));
+ assert(((gtDebugFlags & GTF_DEBUG_NODE_SMALL) != 0) != ((gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0));
/* Make sure the node isn't too small for the new operator */
assert(GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_SMALL ||
GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_LARGE);
- assert(GenTree::s_gtNodeSizes[ oper] == TREE_NODE_SZ_SMALL ||
- GenTree::s_gtNodeSizes[ oper] == TREE_NODE_SZ_LARGE);
+ assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE);
- assert(GenTree::s_gtNodeSizes[ oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE));
+ assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE));
gtOper = oper;
@@ -1358,7 +1297,7 @@ void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate
// a gtUnOp...)
if (OperKind(oper) == GTK_UNOP)
{
- gtOp.gtOp2 = NULL;
+ gtOp.gtOp2 = nullptr;
}
#endif // DEBUG
@@ -1370,7 +1309,7 @@ void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate
if (oper == GT_CNS_INT)
{
- gtIntCon.gtFieldSeq = NULL;
+ gtIntCon.gtFieldSeq = nullptr;
}
if (vnUpdate == CLEAR_VN)
@@ -1380,8 +1319,7 @@ void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate
}
}
-inline
-void GenTree::CopyFrom(const GenTree* src, Compiler* comp)
+inline void GenTree::CopyFrom(const GenTree* src, Compiler* comp)
{
/* The source may be big only if the target is also a big node */
@@ -1399,46 +1337,40 @@ void GenTree::CopyFrom(const GenTree* src, Compiler* comp)
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
- bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
+ bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
}
-inline
-GenTreePtr Compiler::gtNewCastNode(var_types typ, GenTreePtr op1,
- var_types castType)
+inline GenTreePtr Compiler::gtNewCastNode(var_types typ, GenTreePtr op1, var_types castType)
{
GenTreePtr res = new (this, GT_CAST) GenTreeCast(typ, op1, castType);
return res;
}
-inline
-GenTreePtr Compiler::gtNewCastNodeL(var_types typ, GenTreePtr op1,
- var_types castType)
+inline GenTreePtr Compiler::gtNewCastNodeL(var_types typ, GenTreePtr op1, var_types castType)
{
/* Some casts get transformed into 'GT_CALL' or 'GT_IND' nodes */
- assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_CAST]);
- assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_IND ]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_CAST]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_IND]);
/* Make a big node first and then change it to be GT_CAST */
- GenTreePtr res = new (this, LargeOpOpcode()) GenTreeCast(typ, op1, castType DEBUGARG(/*largeNode*/true));
+ GenTreePtr res = new (this, LargeOpOpcode()) GenTreeCast(typ, op1, castType DEBUGARG(/*largeNode*/ true));
return res;
-
}
/*****************************************************************************/
#else // SMALL_TREE_NODES
/*****************************************************************************/
+inline void GenTree::InitNodeSize()
+{
+}
-inline
-void GenTree::InitNodeSize(){}
-
-inline
-void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
+inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
{
gtOper = oper;
@@ -1449,26 +1381,21 @@ void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate
}
}
-inline
-void GenTree::CopyFrom(GenTreePtr src)
+inline void GenTree::CopyFrom(GenTreePtr src)
{
- *this = *src;
+ *this = *src;
#ifdef DEBUG
gtSeqNum = 0;
#endif
}
-inline
-GenTreePtr Compiler::gtNewCastNode(var_types typ, GenTreePtr op1,
- var_types castType)
+inline GenTreePtr Compiler::gtNewCastNode(var_types typ, GenTreePtr op1, var_types castType)
{
- GenTreePtr tree = gtNewOperNode(GT_CAST, typ, op1);
+ GenTreePtr tree = gtNewOperNode(GT_CAST, typ, op1);
tree->gtCast.gtCastType = castType;
}
-inline
-GenTreePtr Compiler::gtNewCastNodeL(var_types typ, GenTreePtr op1,
- var_types castType)
+inline GenTreePtr Compiler::gtNewCastNodeL(var_types typ, GenTreePtr op1, var_types castType)
{
return gtNewCastNode(typ, op1, castType);
}
@@ -1477,18 +1404,16 @@ GenTreePtr Compiler::gtNewCastNodeL(var_types typ, GenTreePtr op1,
#endif // SMALL_TREE_NODES
/*****************************************************************************/
-inline
-void GenTree::SetOperResetFlags(genTreeOps oper)
+inline void GenTree::SetOperResetFlags(genTreeOps oper)
{
SetOper(oper);
gtFlags &= GTF_NODE_MASK;
}
-inline
-void GenTree::ChangeOperConst(genTreeOps oper)
+inline void GenTree::ChangeOperConst(genTreeOps oper)
{
#ifdef _TARGET_64BIT_
- assert(oper != GT_CNS_LNG); // We should never see a GT_CNS_LNG for a 64-bit target!
+ assert(oper != GT_CNS_LNG); // We should never see a GT_CNS_LNG for a 64-bit target!
#endif
assert(OperIsConst(oper)); // use ChangeOper() instead
SetOperResetFlags(oper);
@@ -1499,8 +1424,7 @@ void GenTree::ChangeOperConst(genTreeOps oper)
}
}
-inline
-void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
+inline void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
{
assert(!OperIsConst(oper)); // use ChangeOperLeaf() instead
@@ -1510,29 +1434,26 @@ void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpd
// Do "oper"-specific initializations...
switch (oper)
{
- case GT_LCL_FLD:
- gtLclFld.gtLclOffs = 0;
- gtLclFld.gtFieldSeq = FieldSeqStore::NotAField();
- break;
- default:
- break;
+ case GT_LCL_FLD:
+ gtLclFld.gtLclOffs = 0;
+ gtLclFld.gtFieldSeq = FieldSeqStore::NotAField();
+ break;
+ default:
+ break;
}
}
-inline
-void GenTree::ChangeOperUnchecked(genTreeOps oper)
+inline void GenTree::ChangeOperUnchecked(genTreeOps oper)
{
gtOper = oper; // Trust the caller and don't use SetOper()
gtFlags &= GTF_COMMON_MASK;
}
-
/*****************************************************************************
* Returns true if the node is &var (created by ldarga and ldloca)
*/
-inline
-bool GenTree::IsVarAddr() const
+inline bool GenTree::IsVarAddr() const
{
if (gtOper == GT_ADDR)
{
@@ -1554,19 +1475,15 @@ bool GenTree::IsVarAddr() const
* an operator for which GTF_OVERFLOW is invalid.
*/
-inline
-bool GenTree::gtOverflow() const
+inline bool GenTree::gtOverflow() const
{
#if !defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)
- assert(gtOper == GT_MUL || gtOper == GT_CAST ||
- gtOper == GT_ADD || gtOper == GT_SUB ||
- gtOper == GT_ASG_ADD || gtOper == GT_ASG_SUB ||
- gtOper == GT_ADD_LO || gtOper == GT_SUB_LO ||
- gtOper == GT_ADD_HI || gtOper == GT_SUB_HI);
+ assert(gtOper == GT_MUL || gtOper == GT_CAST || gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_ASG_ADD ||
+ gtOper == GT_ASG_SUB || gtOper == GT_ADD_LO || gtOper == GT_SUB_LO || gtOper == GT_ADD_HI ||
+ gtOper == GT_SUB_HI);
#else
- assert(gtOper == GT_MUL || gtOper == GT_CAST ||
- gtOper == GT_ADD || gtOper == GT_SUB ||
- gtOper == GT_ASG_ADD || gtOper == GT_ASG_SUB);
+ assert(gtOper == GT_MUL || gtOper == GT_CAST || gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_ASG_ADD ||
+ gtOper == GT_ASG_SUB);
#endif
if (gtFlags & GTF_OVERFLOW)
@@ -1581,15 +1498,13 @@ bool GenTree::gtOverflow() const
}
}
-inline
-bool GenTree::gtOverflowEx() const
+inline bool GenTree::gtOverflowEx() const
{
- if ( gtOper == GT_MUL || gtOper == GT_CAST ||
- gtOper == GT_ADD || gtOper == GT_SUB ||
+ if (gtOper == GT_MUL || gtOper == GT_CAST || gtOper == GT_ADD || gtOper == GT_SUB ||
#if !defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)
- gtOper == GT_ADD_HI || gtOper == GT_SUB_HI ||
+ gtOper == GT_ADD_HI || gtOper == GT_SUB_HI ||
#endif
- gtOper == GT_ASG_ADD || gtOper == GT_ASG_SUB)
+ gtOper == GT_ASG_ADD || gtOper == GT_ASG_SUB)
{
return gtOverflow();
}
@@ -1606,35 +1521,33 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-inline bool Compiler::lvaHaveManyLocals() const
+inline bool Compiler::lvaHaveManyLocals() const
{
return (lvaCount >= lclMAX_TRACKED);
}
-
/*****************************************************************************
*
* Allocate a temporary variable or a set of temp variables.
*/
-inline unsigned Compiler::lvaGrabTemp(bool shortLifetime
- DEBUGARG(const char * reason) )
+inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason))
{
if (compIsForInlining())
{
// Grab the temp using Inliner's Compiler instance.
- Compiler * pComp = impInlineInfo->InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner)
+ Compiler* pComp = impInlineInfo->InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner)
if (pComp->lvaHaveManyLocals())
{
- // Don't create more LclVar with inlining
+ // Don't create more LclVar with inlining
compInlineResult->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS);
}
unsigned tmpNum = pComp->lvaGrabTemp(shortLifetime DEBUGARG(reason));
- lvaTable = pComp->lvaTable;
- lvaCount = pComp->lvaCount;
- lvaTableCnt = pComp->lvaTableCnt;
+ lvaTable = pComp->lvaTable;
+ lvaCount = pComp->lvaCount;
+ lvaTableCnt = pComp->lvaTableCnt;
return tmpNum;
}
@@ -1643,18 +1556,20 @@ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime
/* Check if the lvaTable has to be grown */
if (lvaCount + 1 > lvaTableCnt)
- {
+ {
unsigned newLvaTableCnt = lvaCount + (lvaCount / 2) + 1;
// Check for overflow
if (newLvaTableCnt <= lvaCount)
+ {
IMPL_LIMITATION("too many locals");
-
- // Note: compGetMemArray might throw.
- LclVarDsc * newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ }
+
+ // Note: compGetMemArray might throw.
+ LclVarDsc* newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable));
- memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
+ memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
for (unsigned i = lvaCount; i < newLvaTableCnt; i++)
{
@@ -1668,14 +1583,14 @@ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime
memset(lvaTable, fDefaultFill2.val_DontUse_(CLRConfig::INTERNAL_JitDefaultFill, 0xFF), lvaCount * sizeof(*lvaTable));
#endif
#endif
-
+
lvaTableCnt = newLvaTableCnt;
- lvaTable = newLvaTable;
+ lvaTable = newLvaTable;
}
- lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame
+ lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame
lvaTable[lvaCount].lvIsTemp = shortLifetime;
- lvaTable[lvaCount].lvOnFrame = true;
+ lvaTable[lvaCount].lvOnFrame = true;
unsigned tempNum = lvaCount;
@@ -1686,33 +1601,31 @@ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime
{
printf("\nlvaGrabTemp returning %d (", tempNum);
gtDispLclVar(tempNum, false);
- printf(")%s called for %s.\n",
- shortLifetime ? "" : " (a long lifetime temp)",
- reason);
+ printf(")%s called for %s.\n", shortLifetime ? "" : " (a long lifetime temp)", reason);
}
#endif // DEBUG
return tempNum;
}
-inline unsigned Compiler::lvaGrabTemps(unsigned cnt
- DEBUGARG(const char * reason) )
+inline unsigned Compiler::lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason))
{
if (compIsForInlining())
{
- // Grab the temps using Inliner's Compiler instance.
+ // Grab the temps using Inliner's Compiler instance.
unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTemps(cnt DEBUGARG(reason));
- lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
- lvaCount = impInlineInfo->InlinerCompiler->lvaCount;
+ lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
+ lvaCount = impInlineInfo->InlinerCompiler->lvaCount;
lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt;
return tmpNum;
}
-
+
#ifdef DEBUG
if (verbose)
{
- printf("\nlvaGrabTemps(%d) returning %d..%d (long lifetime temps) called for %s", cnt, lvaCount, lvaCount+cnt-1, reason);
+ printf("\nlvaGrabTemps(%d) returning %d..%d (long lifetime temps) called for %s", cnt, lvaCount,
+ lvaCount + cnt - 1, reason);
}
#endif
@@ -1726,13 +1639,15 @@ inline unsigned Compiler::lvaGrabTemps(unsigned cnt
// Check for overflow
if (newLvaTableCnt <= lvaCount)
+ {
IMPL_LIMITATION("too many locals");
+ }
- // Note: compGetMemArray might throw.
- LclVarDsc * newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ // Note: compGetMemArray might throw.
+ LclVarDsc* newLvaTable = (LclVarDsc*)compGetMemArray(newLvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable));
- memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
+ memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable));
for (unsigned i = lvaCount; i < newLvaTableCnt; i++)
{
new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(this); // call the constructor.
@@ -1747,16 +1662,16 @@ inline unsigned Compiler::lvaGrabTemps(unsigned cnt
#endif
lvaTableCnt = newLvaTableCnt;
- lvaTable = newLvaTable;
+ lvaTable = newLvaTable;
}
- unsigned tempNum = lvaCount;
+ unsigned tempNum = lvaCount;
while (cnt--)
{
- lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame
+ lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame
lvaTable[lvaCount].lvIsTemp = false;
- lvaTable[lvaCount].lvOnFrame = true;
+ lvaTable[lvaCount].lvOnFrame = true;
lvaCount++;
}
@@ -1770,23 +1685,22 @@ inline unsigned Compiler::lvaGrabTemps(unsigned cnt
* be forced to be kept alive, and not be optimized away.
*/
-inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime
- DEBUGARG(const char * reason))
+inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason))
{
if (compIsForInlining())
{
- // Grab the temp using Inliner's Compiler instance.
+ // Grab the temp using Inliner's Compiler instance.
unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTempWithImplicitUse(shortLifetime DEBUGARG(reason));
- lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
- lvaCount = impInlineInfo->InlinerCompiler->lvaCount;
+ lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
+ lvaCount = impInlineInfo->InlinerCompiler->lvaCount;
lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt;
return tmpNum;
}
unsigned lclNum = lvaGrabTemp(shortLifetime DEBUGARG(reason));
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
// This will prevent it from being optimized away
// TODO-CQ: We shouldn't have to go as far as to declare these
@@ -1807,8 +1721,7 @@ inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime
* and zero lvRefCntWtd when lvRefCnt is zero
*/
-inline
-void LclVarDsc::lvaResetSortAgainFlag(Compiler * comp)
+inline void LclVarDsc::lvaResetSortAgainFlag(Compiler* comp)
{
if (!comp->lvaTrackedFixed)
{
@@ -1817,7 +1730,9 @@ void LclVarDsc::lvaResetSortAgainFlag(Compiler * comp)
}
/* Set weighted ref count to zero if ref count is zero */
if (lvRefCnt == 0)
- lvRefCntWtd = 0;
+ {
+ lvRefCntWtd = 0;
+ }
}
/*****************************************************************************
@@ -1825,28 +1740,26 @@ void LclVarDsc::lvaResetSortAgainFlag(Compiler * comp)
* Decrement the ref counts for a local variable
*/
-inline
-void LclVarDsc::decRefCnts(BasicBlock::weight_t weight, Compiler * comp, bool propagate)
-{
+inline void LclVarDsc::decRefCnts(BasicBlock::weight_t weight, Compiler* comp, bool propagate)
+{
/* Decrement lvRefCnt and lvRefCntWtd */
Compiler::lvaPromotionType promotionType = DUMMY_INIT(Compiler::PROMOTION_TYPE_NONE);
if (varTypeIsStruct(lvType))
- {
+ {
promotionType = comp->lvaGetPromotionType(this);
}
//
// Decrement counts on the local itself.
- //
- if (lvType != TYP_STRUCT ||
- promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)
+ //
+ if (lvType != TYP_STRUCT || promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)
{
- assert(lvRefCnt); // Can't decrement below zero
+ assert(lvRefCnt); // Can't decrement below zero
// TODO: Well, the assert above could be bogus.
- // If lvRefCnt has overflowed before, then might drop to 0.
+ // If lvRefCnt has overflowed before, then might drop to 0.
// Therefore we do need the following check to keep lvRefCnt from underflow:
- if (lvRefCnt > 0)
+ if (lvRefCnt > 0)
{
//
// Decrement lvRefCnt
@@ -1858,44 +1771,47 @@ void LclVarDsc::decRefCnts(BasicBlock::weight_t weight, Compiler * comp
//
if (weight != 0)
{
- if (lvIsTemp && (weight*2 > weight)) {
+ if (lvIsTemp && (weight * 2 > weight))
+ {
weight *= 2;
}
- if (lvRefCntWtd <= weight) // Can't go below zero
- lvRefCntWtd = 0;
+ if (lvRefCntWtd <= weight)
+ { // Can't go below zero
+ lvRefCntWtd = 0;
+ }
else
+ {
lvRefCntWtd -= weight;
+ }
}
- }
+ }
}
if (varTypeIsStruct(lvType) && propagate)
- {
+ {
// For promoted struct locals, decrement lvRefCnt on its field locals as well.
if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT ||
- promotionType == Compiler::PROMOTION_TYPE_DEPENDENT )
+ promotionType == Compiler::PROMOTION_TYPE_DEPENDENT)
{
- for (unsigned i = lvFieldLclStart;
- i < lvFieldLclStart + lvFieldCnt;
- ++i)
- {
+ for (unsigned i = lvFieldLclStart; i < lvFieldLclStart + lvFieldCnt; ++i)
+ {
comp->lvaTable[i].decRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
}
- }
- }
-
+ }
+ }
+
if (lvIsStructField && propagate)
- {
+ {
// Depending on the promotion type, decrement the ref count for the parent struct as well.
- promotionType = comp->lvaGetParentPromotionType(this);
- LclVarDsc * parentvarDsc = &comp->lvaTable[lvParentLcl];
+ promotionType = comp->lvaGetParentPromotionType(this);
+ LclVarDsc* parentvarDsc = &comp->lvaTable[lvParentLcl];
assert(!parentvarDsc->lvRegStruct);
- if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT)
+ if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT)
{
- parentvarDsc->decRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
- }
- }
+ parentvarDsc->decRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
+ }
+ }
lvaResetSortAgainFlag(comp);
@@ -1904,8 +1820,7 @@ void LclVarDsc::decRefCnts(BasicBlock::weight_t weight, Compiler * comp
{
unsigned varNum = (unsigned)(this - comp->lvaTable);
assert(&comp->lvaTable[varNum] == this);
- printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n",
- varNum, lvRefCnt, refCntWtd2str(lvRefCntWtd));
+ printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n", varNum, lvRefCnt, refCntWtd2str(lvRefCntWtd));
}
#endif
}
@@ -1915,27 +1830,25 @@ void LclVarDsc::decRefCnts(BasicBlock::weight_t weight, Compiler * comp
* Increment the ref counts for a local variable
*/
-inline
-void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler *comp, bool propagate)
-{
+inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, bool propagate)
+{
Compiler::lvaPromotionType promotionType = DUMMY_INIT(Compiler::PROMOTION_TYPE_NONE);
if (varTypeIsStruct(lvType))
- {
+ {
promotionType = comp->lvaGetPromotionType(this);
}
//
// Increment counts on the local itself.
- //
- if (lvType != TYP_STRUCT ||
- promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)
+ //
+ if (lvType != TYP_STRUCT || promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)
{
//
// Increment lvRefCnt
//
- int newRefCnt = lvRefCnt+1;
- if (newRefCnt == (unsigned short)newRefCnt) // lvRefCnt is an "unsigned short". Don't overflow it.
- {
+ int newRefCnt = lvRefCnt + 1;
+ if (newRefCnt == (unsigned short)newRefCnt) // lvRefCnt is an "unsigned short". Don't overflow it.
+ {
lvRefCnt = (unsigned short)newRefCnt;
}
@@ -1948,47 +1861,48 @@ void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler *com
{
// We double the weight of internal temps
//
- if (lvIsTemp && (weight*2 > weight)) {
+ if (lvIsTemp && (weight * 2 > weight))
+ {
weight *= 2;
}
- unsigned newWeight = lvRefCntWtd + weight;
- if (newWeight >= lvRefCntWtd) { // lvRefCntWtd is an "unsigned". Don't overflow it
+ unsigned newWeight = lvRefCntWtd + weight;
+ if (newWeight >= lvRefCntWtd)
+ { // lvRefCntWtd is an "unsigned". Don't overflow it
lvRefCntWtd = newWeight;
}
- else { // On overflow we assign ULONG_MAX
+ else
+ { // On overflow we assign ULONG_MAX
lvRefCntWtd = ULONG_MAX;
}
- }
- }
+ }
+ }
if (varTypeIsStruct(lvType) && propagate)
- {
+ {
// For promoted struct locals, increment lvRefCnt on its field locals as well.
if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT ||
- promotionType == Compiler::PROMOTION_TYPE_DEPENDENT )
+ promotionType == Compiler::PROMOTION_TYPE_DEPENDENT)
{
- for (unsigned i = lvFieldLclStart;
- i < lvFieldLclStart + lvFieldCnt;
- ++i)
- {
+ for (unsigned i = lvFieldLclStart; i < lvFieldLclStart + lvFieldCnt; ++i)
+ {
comp->lvaTable[i].incRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
}
- }
- }
+ }
+ }
if (lvIsStructField && propagate)
- {
+ {
// Depending on the promotion type, increment the ref count for the parent struct as well.
- promotionType = comp->lvaGetParentPromotionType(this);
- LclVarDsc * parentvarDsc = &comp->lvaTable[lvParentLcl];
+ promotionType = comp->lvaGetParentPromotionType(this);
+ LclVarDsc* parentvarDsc = &comp->lvaTable[lvParentLcl];
assert(!parentvarDsc->lvRegStruct);
if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT)
{
- parentvarDsc->incRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
- }
- }
-
+ parentvarDsc->incRefCnts(comp->lvaMarkRefsWeight, comp, false); // Don't propagate
+ }
+ }
+
lvaResetSortAgainFlag(comp);
#ifdef DEBUG
@@ -1996,8 +1910,7 @@ void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler *com
{
unsigned varNum = (unsigned)(this - comp->lvaTable);
assert(&comp->lvaTable[varNum] == this);
- printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n",
- varNum, lvRefCnt, refCntWtd2str(lvRefCntWtd));
+ printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n", varNum, lvRefCnt, refCntWtd2str(lvRefCntWtd));
}
#endif
}
@@ -2007,16 +1920,17 @@ void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler *com
* Set the lvPrefReg field to reg
*/
-inline
-void LclVarDsc::setPrefReg(regNumber regNum, Compiler * comp)
+inline void LclVarDsc::setPrefReg(regNumber regNum, Compiler* comp)
{
regMaskTP regMask;
if (isFloatRegType(TypeGet()))
{
// Check for FP struct-promoted field being passed in integer register
- //
+ //
if (!genIsValidFloatReg(regNum))
+ {
return;
+ }
regMask = genRegMaskFloat(regNum, TypeGet());
}
else
@@ -2032,21 +1946,21 @@ void LclVarDsc::setPrefReg(regNumber regNum, Compiler * comp)
/* Only interested if we have a new register bit set */
if (lvPrefReg & regMask)
+ {
return;
+ }
#ifdef DEBUG
if (comp->verbose)
{
if (lvPrefReg)
{
- printf("Change preferred register for V%02u from ",
- this - comp->lvaTable);
+ printf("Change preferred register for V%02u from ", this - comp->lvaTable);
dspRegMask(lvPrefReg);
}
else
{
- printf("Set preferred register for V%02u",
- this - comp->lvaTable);
+ printf("Set preferred register for V%02u", this - comp->lvaTable);
}
printf(" to ");
dspRegMask(regMask);
@@ -2075,8 +1989,7 @@ void LclVarDsc::setPrefReg(regNumber regNum, Compiler * comp)
* Add regMask to the lvPrefReg field
*/
-inline
-void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler * comp)
+inline void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler* comp)
{
assert(regMask != RBM_NONE);
@@ -2088,21 +2001,21 @@ void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler * comp)
/* Only interested if we have a new register bit set */
if (lvPrefReg & regMask)
+ {
return;
+ }
#ifdef DEBUG
if (comp->verbose)
{
if (lvPrefReg)
{
- printf("Additional preferred register for V%02u from ",
- this-comp->lvaTable);
+ printf("Additional preferred register for V%02u from ", this - comp->lvaTable);
dspRegMask(lvPrefReg);
}
else
{
- printf("Set preferred register for V%02u",
- this-comp->lvaTable);
+ printf("Set preferred register for V%02u", this - comp->lvaTable);
}
printf(" to ");
dspRegMask(lvPrefReg | regMask);
@@ -2132,28 +2045,31 @@ void LclVarDsc::addPrefReg(regMaskTP regMask, Compiler * comp)
* referenced in a statement.
*/
-inline
-VARSET_VALRET_TP Compiler::lvaStmtLclMask(GenTreePtr stmt)
+inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(GenTreePtr stmt)
{
- GenTreePtr tree;
- unsigned varNum;
- LclVarDsc * varDsc;
- VARSET_TP VARSET_INIT_NOCOPY(lclMask, VarSetOps::MakeEmpty(this));
+ GenTreePtr tree;
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ VARSET_TP VARSET_INIT_NOCOPY(lclMask, VarSetOps::MakeEmpty(this));
assert(stmt->gtOper == GT_STMT);
assert(fgStmtListThreaded);
for (tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
- if (tree->gtOper != GT_LCL_VAR)
+ if (tree->gtOper != GT_LCL_VAR)
+ {
continue;
+ }
varNum = tree->gtLclVarCommon.gtLclNum;
assert(varNum < lvaCount);
varDsc = lvaTable + varNum;
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
+ {
continue;
+ }
VarSetOps::UnionD(this, lclMask, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex));
}
@@ -2167,15 +2083,14 @@ VARSET_VALRET_TP Compiler::lvaStmtLclMask(GenTreePtr stmt)
* of the struct and returns true iff it contains a GC ref.
*/
-inline
-bool Compiler::lvaTypeIsGC(unsigned varNum)
+inline bool Compiler::lvaTypeIsGC(unsigned varNum)
{
if (lvaTable[varNum].TypeGet() == TYP_STRUCT)
{
- assert(lvaTable[varNum].lvGcLayout != 0); // bits are intialized
+ assert(lvaTable[varNum].lvGcLayout != nullptr); // bits are intialized
return (lvaTable[varNum].lvStructGcCount != 0);
}
- return(varTypeIsGC(lvaTable[varNum].TypeGet()));
+ return (varTypeIsGC(lvaTable[varNum].TypeGet()));
}
/*****************************************************************************
@@ -2193,11 +2108,12 @@ bool Compiler::lvaTypeIsGC(unsigned varNum)
should catch the exception or not.
*/
-inline
-bool Compiler::lvaKeepAliveAndReportThis()
+inline bool Compiler::lvaKeepAliveAndReportThis()
{
if (info.compIsStatic || lvaTable[0].TypeGet() != TYP_REF)
+ {
return false;
+ }
#ifdef JIT32_GCENCODER
if (info.compFlags & CORINFO_FLG_SYNCH)
@@ -2235,8 +2151,7 @@ bool Compiler::lvaKeepAliveAndReportThis()
Similar to lvaKeepAliveAndReportThis
*/
-inline
-bool Compiler::lvaReportParamTypeArg()
+inline bool Compiler::lvaReportParamTypeArg()
{
if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE))
{
@@ -2245,12 +2160,16 @@ bool Compiler::lvaReportParamTypeArg()
// If the VM requires us to keep the generics context alive and report it (for example, if any catch
// clause catches a type that uses a generic parameter of this method) this flag will be set.
if (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE)
+ {
return true;
+ }
// Otherwise, if an exact type parameter is needed in the body, report the generics context.
// We do this because collectible types needs the generics context when gc-ing.
if (lvaGenericsContextUsed)
+ {
return true;
+ }
}
// Otherwise, we don't need to report it -- the generics context parameter is unused.
@@ -2259,15 +2178,13 @@ bool Compiler::lvaReportParamTypeArg()
//*****************************************************************************
-inline
-unsigned Compiler::lvaCachedGenericContextArgOffset()
+inline unsigned Compiler::lvaCachedGenericContextArgOffset()
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
-
+
return lvaCachedGenericContextArgOffs;
}
-
/*****************************************************************************
*
* Return the stack framed offset of the given variable; set *FPbased to
@@ -2277,7 +2194,7 @@ unsigned Compiler::lvaCachedGenericContextArgOffset()
* mustBeFPBased - strong about whether the base reg is FP. But it is also
* strong about not being FPBased after FINAL_FRAME_LAYOUT. i.e.,
* it enforces SP based.
- *
+ *
* addrModeOffset - is the addressing mode offset, for example: v02 + 0x10
* So, V02 itself is at offset sp + 0x10 and then addrModeOffset is what gets
* added beyond that.
@@ -2285,27 +2202,30 @@ unsigned Compiler::lvaCachedGenericContextArgOffset()
inline
#ifdef _TARGET_ARM_
-int Compiler::lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber * pBaseReg, int addrModeOffset)
+ int
+ Compiler::lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset)
#else
-int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
+ int
+ Compiler::lvaFrameAddress(int varNum, bool* pFPbased)
#endif
{
assert(lvaDoneFrameLayout != NO_FRAME_LAYOUT);
- int offset;
- bool FPbased;
- bool fConservative = false;
- var_types type = TYP_UNDEF;
- if (varNum >= 0)
+ int offset;
+ bool FPbased;
+ bool fConservative = false;
+ var_types type = TYP_UNDEF;
+ if (varNum >= 0)
{
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
assert((unsigned)varNum < lvaCount);
- varDsc = lvaTable + varNum;
- type = varDsc->TypeGet();
+ varDsc = lvaTable + varNum;
+ type = varDsc->TypeGet();
bool isPrespilledArg = false;
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() && lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false));
+ isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() &&
+ lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false));
#endif
// If we have finished with register allocation, and this isn't a stack-based local,
@@ -2318,14 +2238,13 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
assert(varDsc->lvIsParam);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#elif defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
- // For !LEGACY_BACKEND on x86, a stack parameter that is enregistered will have a stack location.
+ // For !LEGACY_BACKEND on x86, a stack parameter that is enregistered will have a stack location.
assert(varDsc->lvIsParam && !varDsc->lvIsRegArg);
-#else // !(_TARGET_AMD64 || !(defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)))
+#else // !(_TARGET_AMD64 || !(defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)))
// Otherwise, we only have a valid stack location for:
// A parameter that was passed on the stack, being homed into its register home,
// or a prespilled argument on arm under profiler.
- assert((varDsc->lvIsParam && !varDsc->lvIsRegArg && varDsc->lvRegister) ||
- isPrespilledArg);
+ assert((varDsc->lvIsParam && !varDsc->lvIsRegArg && varDsc->lvRegister) || isPrespilledArg);
#endif // !(_TARGET_AMD64 || !(defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)))
}
@@ -2334,25 +2253,25 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
#ifdef DEBUG
#if FEATURE_FIXED_OUT_ARGS
if ((unsigned)varNum == lvaOutgoingArgSpaceVar)
+ {
assert(FPbased == false);
+ }
else
#endif
{
-#if DOUBLE_ALIGN
- assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() &&
- varDsc->lvIsParam &&
- !varDsc->lvIsRegArg)));
+#if DOUBLE_ALIGN
+ assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)));
#else
#ifdef _TARGET_X86_
- assert(FPbased == isFramePointerUsed());
+ assert(FPbased == isFramePointerUsed());
#endif
#endif
}
-#endif // DEBUG
+#endif // DEBUG
offset = varDsc->lvStkOffs;
}
- else // Its a spill-temp
+ else // Its a spill-temp
{
FPbased = isFramePointerUsed();
if (lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
@@ -2365,9 +2284,9 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
tmpDsc = tmpFindNum(varNum, Compiler::TEMP_USAGE_USED);
}
#endif // !LEGACY_BACKEND
- assert(tmpDsc != NULL);
+ assert(tmpDsc != nullptr);
offset = tmpDsc->tdTempOffs();
- type = tmpDsc->tdTempType();
+ type = tmpDsc->tdTempType();
}
else
{
@@ -2377,11 +2296,11 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
// : :
// +-------------------------+ base --+
// | LR, ++N for ARM | | frameBaseOffset (= N)
- // +-------------------------+ |
+ // +-------------------------+ |
// | R11, ++N for ARM | <---FP |
// +-------------------------+ --+
// | compCalleeRegsPushed - N| | lclFrameOffset
- // +-------------------------+ --+
+ // +-------------------------+ --+
// | lclVars | |
// +-------------------------+ |
// | tmp[MAX_SPILL_TEMP] | |
@@ -2394,7 +2313,7 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
// : :
// ---------------------------------------------------
- type = compFloatingPointUsed ? TYP_FLOAT : TYP_INT;
+ type = compFloatingPointUsed ? TYP_FLOAT : TYP_INT;
fConservative = true;
if (!FPbased)
{
@@ -2405,7 +2324,7 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
#else
int outGoingArgSpaceSize = 0;
#endif
- offset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int) lvaGetMaxSpillTempSize());
+ offset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize());
}
else
{
@@ -2415,7 +2334,7 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
#ifdef _TARGET_ARM_
offset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta();
#else
- offset = -(codeGen->genTotalFrameSize());
+ offset = -(codeGen->genTotalFrameSize());
#endif
}
}
@@ -2437,13 +2356,13 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
// we have already selected the instruction. Min-opts will have R10 enabled, so just
// use that.
- int spOffset = fConservative ? compLclFrameSize : offset + codeGen->genSPtoFPdelta();
- int actualOffset = (spOffset + addrModeOffset);
+ int spOffset = fConservative ? compLclFrameSize : offset + codeGen->genSPtoFPdelta();
+ int actualOffset = (spOffset + addrModeOffset);
int ldrEncodeLimit = (varTypeIsFloating(type) ? 0x3FC : 0xFFC);
// Use ldr sp imm encoding.
if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT || opts.MinOpts() || (actualOffset <= ldrEncodeLimit))
{
- offset = spOffset;
+ offset = spOffset;
*pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE;
}
// Use ldr +/-imm8 encoding.
@@ -2452,9 +2371,9 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
*pBaseReg = REG_FPBASE;
}
// Use a single movw. prefer locals.
- else if (actualOffset <= 0xFFFC) // Fix 383910 ARM ILGEN
+ else if (actualOffset <= 0xFFFC) // Fix 383910 ARM ILGEN
{
- offset = spOffset;
+ offset = spOffset;
*pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE;
}
// Use movw, movt.
@@ -2469,47 +2388,42 @@ int Compiler::lvaFrameAddress(int varNum, bool * pFPbased)
*pBaseReg = REG_SPBASE;
}
#else
- *pFPbased = FPbased;
+ *pFPbased = FPbased;
#endif
return offset;
}
-inline
-bool Compiler::lvaIsParameter(unsigned varNum)
+inline bool Compiler::lvaIsParameter(unsigned varNum)
{
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
assert(varNum < lvaCount);
varDsc = lvaTable + varNum;
- return varDsc->lvIsParam;
+ return varDsc->lvIsParam;
}
-inline
-bool Compiler::lvaIsRegArgument(unsigned varNum)
+inline bool Compiler::lvaIsRegArgument(unsigned varNum)
{
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
assert(varNum < lvaCount);
varDsc = lvaTable + varNum;
- return varDsc->lvIsRegArg;
+ return varDsc->lvIsRegArg;
}
-inline
-BOOL Compiler::lvaIsOriginalThisArg(unsigned varNum)
+inline BOOL Compiler::lvaIsOriginalThisArg(unsigned varNum)
{
assert(varNum < lvaCount);
- BOOL isOriginalThisArg =
- (varNum == info.compThisArg) &&
- (info.compIsStatic == false);
+ BOOL isOriginalThisArg = (varNum == info.compThisArg) && (info.compIsStatic == false);
#ifdef DEBUG
if (isOriginalThisArg)
- {
- LclVarDsc * varDsc = lvaTable + varNum;
+ {
+ LclVarDsc* varDsc = lvaTable + varNum;
// Should never write to or take the address of the original 'this' arg
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -2518,23 +2432,22 @@ BOOL Compiler::lvaIsOriginalThisArg(unsigned varNum)
// copy to a new local, and mark the original as DoNotEnregister, to
// ensure that it is stack-allocated. It should not be the case that the original one can be modified -- it
// should not be written to, or address-exposed.
- assert(!varDsc->lvArgWrite && (!varDsc->lvAddrExposed || ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0)));
+ assert(!varDsc->lvArgWrite &&
+ (!varDsc->lvAddrExposed || ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0)));
#else
assert(!varDsc->lvArgWrite && !varDsc->lvAddrExposed);
#endif
}
-#endif
+#endif
return isOriginalThisArg;
}
-inline
-BOOL Compiler::lvaIsOriginalThisReadOnly()
+inline BOOL Compiler::lvaIsOriginalThisReadOnly()
{
- return lvaArg0Var == info.compThisArg;
+ return lvaArg0Var == info.compThisArg;
}
-
/*****************************************************************************
*
* The following is used to detect the cases where the same local variable#
@@ -2542,15 +2455,12 @@ BOOL Compiler::lvaIsOriginalThisReadOnly()
* integer/address and a float value.
*/
-/* static */ inline
-unsigned Compiler::lvaTypeRefMask(var_types type)
+/* static */ inline unsigned Compiler::lvaTypeRefMask(var_types type)
{
- const static
- BYTE lvaTypeRefMasks[] =
- {
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) howUsed,
- #include "typelist.h"
- #undef DEF_TP
+ const static BYTE lvaTypeRefMasks[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) howUsed,
+#include "typelist.h"
+#undef DEF_TP
};
assert((unsigned)type < sizeof(lvaTypeRefMasks));
@@ -2566,14 +2476,12 @@ unsigned Compiler::lvaTypeRefMask(var_types type)
* integer/address and a float value.
*/
-inline
-var_types Compiler::lvaGetActualType(unsigned lclNum)
+inline var_types Compiler::lvaGetActualType(unsigned lclNum)
{
return genActualType(lvaGetRealType(lclNum));
}
-inline
-var_types Compiler::lvaGetRealType(unsigned lclNum)
+inline var_types Compiler::lvaGetRealType(unsigned lclNum)
{
return lvaTable[lclNum].TypeGet();
}
@@ -2588,8 +2496,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-inline
-unsigned Compiler::compMapILargNum(unsigned ILargNum)
+inline unsigned Compiler::compMapILargNum(unsigned ILargNum)
{
assert(ILargNum < info.compILargsCount || tiVerificationNeeded);
@@ -2598,39 +2505,39 @@ unsigned Compiler::compMapILargNum(unsigned ILargNum)
if (ILargNum >= info.compRetBuffArg)
{
ILargNum++;
- assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
+ assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
}
- if (ILargNum >= (unsigned) info.compTypeCtxtArg)
+ if (ILargNum >= (unsigned)info.compTypeCtxtArg)
{
ILargNum++;
- assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
+ assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
}
- if (ILargNum >= (unsigned) lvaVarargsHandleArg)
+ if (ILargNum >= (unsigned)lvaVarargsHandleArg)
{
ILargNum++;
- assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
+ assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted.
}
assert(ILargNum < info.compArgsCount || tiVerificationNeeded);
- return(ILargNum);
+ return (ILargNum);
}
// For ARM varargs, all arguments go in integer registers, so swizzle the type
-inline
-var_types Compiler::mangleVarArgsType(var_types type)
+inline var_types Compiler::mangleVarArgsType(var_types type)
{
#ifdef _TARGET_ARMARCH_
if (info.compIsVarArgs || opts.compUseSoftFP)
{
- switch (type) {
- case TYP_FLOAT:
- return TYP_INT;
- case TYP_DOUBLE:
- return TYP_LONG;
- default:
- break;
+ switch (type)
+ {
+ case TYP_FLOAT:
+ return TYP_INT;
+ case TYP_DOUBLE:
+ return TYP_LONG;
+ default:
+ break;
}
}
#endif // _TARGET_ARMARCH_
@@ -2644,16 +2551,16 @@ inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg)
#ifdef _TARGET_AMD64_
switch (floatReg)
{
- case REG_XMM0:
- return REG_RCX;
- case REG_XMM1:
- return REG_RDX;
- case REG_XMM2:
- return REG_R8;
- case REG_XMM3:
- return REG_R9;
- default:
- unreached();
+ case REG_XMM0:
+ return REG_RCX;
+ case REG_XMM1:
+ return REG_RDX;
+ case REG_XMM2:
+ return REG_R8;
+ case REG_XMM3:
+ return REG_R9;
+ default:
+ unreached();
}
#else // !_TARGET_AMD64_
// How will float args be passed for RyuJIT/x86?
@@ -2667,16 +2574,16 @@ inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg)
#ifdef _TARGET_AMD64_
switch (intReg)
{
- case REG_RCX:
- return REG_XMM0;
- case REG_RDX:
- return REG_XMM1;
- case REG_R8:
- return REG_XMM2;
- case REG_R9:
- return REG_XMM3;
- default:
- unreached();
+ case REG_RCX:
+ return REG_XMM0;
+ case REG_RDX:
+ return REG_XMM1;
+ case REG_R8:
+ return REG_XMM2;
+ case REG_R9:
+ return REG_XMM3;
+ default:
+ unreached();
}
#else // !_TARGET_AMD64_
// How will float args be passed for RyuJIT/x86?
@@ -2698,23 +2605,24 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-inline
-bool rpCanAsgOperWithoutReg(GenTreePtr op, bool lclvar)
+inline bool rpCanAsgOperWithoutReg(GenTreePtr op, bool lclvar)
{
var_types type;
switch (op->OperGet())
{
- case GT_CNS_LNG:
- case GT_CNS_INT:
- return true;
- case GT_LCL_VAR:
- type = genActualType(op->TypeGet());
- if (lclvar && ((type == TYP_INT) || (type == TYP_REF) || (type == TYP_BYREF)))
+ case GT_CNS_LNG:
+ case GT_CNS_INT:
return true;
- break;
- default:
- break;
+ case GT_LCL_VAR:
+ type = genActualType(op->TypeGet());
+ if (lclvar && ((type == TYP_INT) || (type == TYP_REF) || (type == TYP_BYREF)))
+ {
+ return true;
+ }
+ break;
+ default:
+ break;
}
return false;
@@ -2731,22 +2639,20 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-inline
-bool Compiler::compCanEncodePtrArgCntMax()
+inline bool Compiler::compCanEncodePtrArgCntMax()
{
#ifdef JIT32_GCENCODER
// DDB 204533:
// The GC encoding for fully interruptible methods does not
// support more than 1023 pushed arguments, so we have to
// use a partially interruptible GC info/encoding.
- //
+ //
return (fgPtrArgCntMax < MAX_PTRARG_OFS);
#else // JIT32_GCENCODER
return true;
#endif
}
-
/*****************************************************************************
*
* Call the given function pointer for all nodes in the tree. The 'visitor'
@@ -2759,23 +2665,19 @@ bool Compiler::compCanEncodePtrArgCntMax()
* computeStack - true if we want to make stack visible to callback function
*/
-inline
-Compiler::fgWalkResult Compiler::fgWalkTreePre(GenTreePtr * pTree,
- fgWalkPreFn * visitor,
- void * callBackData,
- bool lclVarsOnly,
- bool computeStack)
+inline Compiler::fgWalkResult Compiler::fgWalkTreePre(
+ GenTreePtr* pTree, fgWalkPreFn* visitor, void* callBackData, bool lclVarsOnly, bool computeStack)
{
fgWalkData walkData;
- walkData.compiler = this;
- walkData.wtprVisitorFn = visitor;
- walkData.pCallbackData = callBackData;
- walkData.parent = NULL;
- walkData.wtprLclsOnly = lclVarsOnly;
+ walkData.compiler = this;
+ walkData.wtprVisitorFn = visitor;
+ walkData.pCallbackData = callBackData;
+ walkData.parent = nullptr;
+ walkData.wtprLclsOnly = lclVarsOnly;
#ifdef DEBUG
- walkData.printModified = false;
+ walkData.printModified = false;
#endif
fgWalkResult result;
@@ -2783,12 +2685,12 @@ Compiler::fgWalkResult Compiler::fgWalkTreePre(GenTreePtr * pTree,
{
GenTreeStack parentStack(this);
walkData.parentStack = &parentStack;
- result = fgWalkTreePreRec<true>(pTree, &walkData);
+ result = fgWalkTreePreRec<true>(pTree, &walkData);
}
else
{
- walkData.parentStack = NULL;
- result = fgWalkTreePreRec<false>(pTree, &walkData);
+ walkData.parentStack = nullptr;
+ result = fgWalkTreePreRec<false>(pTree, &walkData);
}
#ifdef DEBUG
@@ -2808,34 +2710,33 @@ Compiler::fgWalkResult Compiler::fgWalkTreePre(GenTreePtr * pTree,
*
* WALK_ABORT stop walking and return immediately
* WALK_CONTINUE continue walking
- *
+ *
* computeStack - true if we want to make stack visible to callback function
*/
-inline
-Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTreePtr *pTree,
- fgWalkPostFn *visitor,
- void * callBackData,
- bool computeStack)
+inline Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTreePtr* pTree,
+ fgWalkPostFn* visitor,
+ void* callBackData,
+ bool computeStack)
{
fgWalkData walkData;
walkData.compiler = this;
walkData.wtpoVisitorFn = visitor;
walkData.pCallbackData = callBackData;
- walkData.parent = NULL;
+ walkData.parent = nullptr;
fgWalkResult result;
if (computeStack)
{
GenTreeStack parentStack(this);
walkData.parentStack = &parentStack;
- result = fgWalkTreePostRec<true>(pTree, &walkData);
+ result = fgWalkTreePostRec<true>(pTree, &walkData);
}
else
{
- walkData.parentStack = NULL;
- result = fgWalkTreePostRec<false>(pTree, &walkData);
+ walkData.parentStack = nullptr;
+ result = fgWalkTreePostRec<false>(pTree, &walkData);
}
assert(result == WALK_CONTINUE || result == WALK_ABORT);
@@ -2855,43 +2756,47 @@ Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTreePtr *pTree,
* overflow exception
*/
-inline
-bool Compiler::fgIsThrowHlpBlk(BasicBlock * block)
+inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block)
{
if (!fgIsCodeAdded())
+ {
return false;
+ }
if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW)
+ {
return false;
+ }
- GenTreePtr call = block->bbTreeList->gtStmt.gtStmtExpr;
+ GenTreePtr call = block->bbTreeList->gtStmt.gtStmtExpr;
if (!call || (call->gtOper != GT_CALL))
+ {
return false;
+ }
- if (!((call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) ||
+ if (!((call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) ||
(call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) ||
#if COR_JIT_EE_VERSION > 460
(call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWNULLREF)) ||
#endif // COR_JIT_EE_VERSION
(call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW))))
+ {
return false;
+ }
// We can get to this point for blocks that we didn't create as throw helper blocks
// under stress, with crazy flow graph optimizations. So, walk the fgAddCodeList
// for the final determination.
- for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext)
+ for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext)
{
- if (block == add->acdDstBlk)
+ if (block == add->acdDstBlk)
{
- return add->acdKind == SCK_RNGCHK_FAIL ||
- add->acdKind == SCK_DIV_BY_ZERO ||
- add->acdKind == SCK_OVERFLOW
+ return add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW
#if COR_JIT_EE_VERSION > 460
- || add->acdKind == SCK_ARG_EXCPN
- || add->acdKind == SCK_ARG_RNG_EXCPN
-#endif //COR_JIT_EE_VERSION
+ || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN
+#endif // COR_JIT_EE_VERSION
;
}
}
@@ -2906,24 +2811,21 @@ bool Compiler::fgIsThrowHlpBlk(BasicBlock * block)
* (by calling the EE helper).
*/
-inline
-unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock *block)
+inline unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock* block)
{
- for (AddCodeDsc * add = fgAddCodeList; add; add = add->acdNext)
+ for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext)
{
- if (block == add->acdDstBlk)
+ if (block == add->acdDstBlk)
{
// Compute assert cond separately as assert macro cannot have conditional compilation directives.
- bool cond = (add->acdKind == SCK_RNGCHK_FAIL ||
- add->acdKind == SCK_DIV_BY_ZERO ||
- add->acdKind == SCK_OVERFLOW
+ bool cond =
+ (add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW
#if COR_JIT_EE_VERSION > 460
- || add->acdKind == SCK_ARG_EXCPN
- || add->acdKind == SCK_ARG_RNG_EXCPN
-#endif //COR_JIT_EE_VERSION
- );
+ || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN
+#endif // COR_JIT_EE_VERSION
+ );
assert(cond);
-
+
// TODO: bbTgtStkDepth is DEBUG-only.
// Should we use it regularly and avoid this search.
assert(block->bbTgtStkDepth == add->acdStkLvl);
@@ -2931,7 +2833,8 @@ unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock *block)
}
}
- noway_assert(!"fgThrowHlpBlkStkLevel should only be called if fgIsThrowHlpBlk() is true, but we can't find the block in the fgAddCodeList list");
+ noway_assert(!"fgThrowHlpBlkStkLevel should only be called if fgIsThrowHlpBlk() is true, but we can't find the "
+ "block in the fgAddCodeList list");
/* We couldn't find the basic block: it must not have been a throw helper block */
@@ -2942,10 +2845,10 @@ unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock *block)
Small inline function to change a given block to a throw block.
*/
-inline void Compiler::fgConvertBBToThrowBB(BasicBlock * block)
+inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block)
{
block->bbJumpKind = BBJ_THROW;
- block->bbSetRunRarely(); // any block with a throw is rare
+ block->bbSetRunRarely(); // any block with a throw is rare
}
/*****************************************************************************
@@ -2953,28 +2856,26 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock * block)
* Return true if we've added any new basic blocks.
*/
-inline
-bool Compiler::fgIsCodeAdded()
+inline bool Compiler::fgIsCodeAdded()
{
- return fgAddCodeModf;
+ return fgAddCodeModf;
}
/*****************************************************************************
- Is the offset too big?
+ Is the offset too big?
*/
-inline
-bool Compiler::fgIsBigOffset(size_t offset) {
+inline bool Compiler::fgIsBigOffset(size_t offset)
+{
return (offset > compMaxUncheckedOffsetForNullObject);
}
/***********************************************************************************
*
-* Returns true if back-end will do other than integer division which currently occurs only
+* Returns true if back-end will do other than integer division which currently occurs only
* if "divisor" is a positive integer constant and a power of 2 other than 1 and INT_MIN
*/
-inline
-bool Compiler::fgIsSignedDivOptimizable(GenTreePtr divisor)
+inline bool Compiler::fgIsSignedDivOptimizable(GenTreePtr divisor)
{
if (!opts.MinOpts() && divisor->IsCnsIntOrI())
{
@@ -2985,12 +2886,11 @@ bool Compiler::fgIsSignedDivOptimizable(GenTreePtr divisor)
and during codegen we need to encode ival-1 within 32 bits. If ival were INT_MIN
then ival-1 would cause underflow.
- Note that we could put #ifdef around the third check so that it is applied only on
+ Note that we could put #ifdef around the third check so that it is applied only on
64-bit platforms but the below is a more generic way to express it as it is a no-op
on 32-bit platforms.
*/
return (ival > 0 && genMaxOneBit(ival) && ((ssize_t)(int)ival == ival));
-
}
return false;
@@ -2998,12 +2898,11 @@ bool Compiler::fgIsSignedDivOptimizable(GenTreePtr divisor)
/************************************************************************************
*
-* Returns true if back-end will do other than integer division which currently occurs
+* Returns true if back-end will do other than integer division which currently occurs
* if "divisor" is an unsigned integer constant and a power of 2 other than 1 and zero.
*/
-inline
-bool Compiler::fgIsUnsignedDivOptimizable(GenTreePtr divisor)
+inline bool Compiler::fgIsUnsignedDivOptimizable(GenTreePtr divisor)
{
if (!opts.MinOpts() && divisor->IsCnsIntOrI())
{
@@ -3016,15 +2915,13 @@ bool Compiler::fgIsUnsignedDivOptimizable(GenTreePtr divisor)
return false;
}
-
/*****************************************************************************
*
-* Returns true if back-end will do other than integer division which currently occurs
+* Returns true if back-end will do other than integer division which currently occurs
* if "divisor" is a positive integer constant and a power of 2 other than zero
*/
-inline
-bool Compiler::fgIsSignedModOptimizable(GenTreePtr divisor)
+inline bool Compiler::fgIsSignedModOptimizable(GenTreePtr divisor)
{
if (!opts.MinOpts() && divisor->IsCnsIntOrI())
{
@@ -3037,15 +2934,13 @@ bool Compiler::fgIsSignedModOptimizable(GenTreePtr divisor)
return false;
}
-
/*****************************************************************************
*
-* Returns true if back-end will do other than integer division which currently occurs
+* Returns true if back-end will do other than integer division which currently occurs
* if "divisor" is a positive integer constant and a power of 2 other than zero
*/
-inline
-bool Compiler::fgIsUnsignedModOptimizable(GenTreePtr divisor)
+inline bool Compiler::fgIsUnsignedModOptimizable(GenTreePtr divisor)
{
if (!opts.MinOpts() && divisor->IsCnsIntOrI())
{
@@ -3058,7 +2953,6 @@ bool Compiler::fgIsUnsignedModOptimizable(GenTreePtr divisor)
return false;
}
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -3069,11 +2963,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
/*****************************************************************************/
-/* static */ inline
-unsigned Compiler::tmpSlot(unsigned size)
+/* static */ inline unsigned Compiler::tmpSlot(unsigned size)
{
noway_assert(size >= sizeof(int));
noway_assert(size <= TEMP_MAX_SIZE);
@@ -3089,8 +2981,7 @@ unsigned Compiler::tmpSlot(unsigned size)
* over a function body.
*/
-inline
-void Compiler::tmpEnd()
+inline void Compiler::tmpEnd()
{
#ifdef DEBUG
if (verbose && (tmpCount > 0))
@@ -3106,17 +2997,14 @@ void Compiler::tmpEnd()
* compiled.
*/
-inline
-void Compiler::tmpDone()
+inline void Compiler::tmpDone()
{
#ifdef DEBUG
unsigned count;
TempDsc* temp;
assert(tmpAllFree());
- for (temp = tmpListBeg( ), count = temp ? 1 : 0;
- temp;
- temp = tmpListNxt(temp), count += temp ? 1 : 0)
+ for (temp = tmpListBeg(), count = temp ? 1 : 0; temp; temp = tmpListNxt(temp), count += temp ? 1 : 0)
{
assert(temp->tdLegalOffset());
}
@@ -3128,14 +3016,12 @@ void Compiler::tmpDone()
}
#ifdef DEBUG
-inline
-bool Compiler::shouldUseVerboseTrees()
+inline bool Compiler::shouldUseVerboseTrees()
{
return (JitConfig.JitDumpVerboseTrees() == 1);
}
-inline
-bool Compiler::shouldUseVerboseSsa()
+inline bool Compiler::shouldUseVerboseSsa()
{
return (JitConfig.JitDumpVerboseSsa() == 1);
}
@@ -3146,8 +3032,7 @@ bool Compiler::shouldUseVerboseSsa()
// Notes:
// This is set to default to 1 in clrConfigValues.h
-inline
-bool Compiler::shouldDumpASCIITrees()
+inline bool Compiler::shouldDumpASCIITrees()
{
return (JitConfig.JitDumpASCII() == 1);
}
@@ -3159,8 +3044,7 @@ bool Compiler::shouldDumpASCIITrees()
* 2: Check-all stress. Performance will be REALLY horrible
*/
-inline
-DWORD getJitStressLevel()
+inline DWORD getJitStressLevel()
{
return JitConfig.JitStress();
}
@@ -3169,15 +3053,13 @@ DWORD getJitStressLevel()
* Should we do the strict check for non-virtual call to the virtual method?
*/
-inline
-DWORD StrictCheckForNonVirtualCallToVirtualMethod()
+inline DWORD StrictCheckForNonVirtualCallToVirtualMethod()
{
return JitConfig.JitStrictCheckForNonVirtualCallToVirtualMethod() == 1;
}
#endif // DEBUG
-
/*****************************************************************************/
/* Map a register argument number ("RegArgNum") to a register number ("RegNum").
* A RegArgNum is in this range:
@@ -3189,25 +3071,22 @@ DWORD StrictCheckForNonVirtualCallToVirtualMethod()
* we return the fixed return buffer register
*/
-inline
-regNumber genMapIntRegArgNumToRegNum(unsigned argNum)
+inline regNumber genMapIntRegArgNumToRegNum(unsigned argNum)
{
if (hasFixedRetBuffReg() && (argNum == theFixedRetBuffArgNum()))
{
return theFixedRetBuffReg();
}
- assert (argNum < ArrLen(intArgRegs));
-
+ assert(argNum < ArrLen(intArgRegs));
+
return intArgRegs[argNum];
}
-
-inline
-regNumber genMapFloatRegArgNumToRegNum(unsigned argNum)
+inline regNumber genMapFloatRegArgNumToRegNum(unsigned argNum)
{
#ifndef _TARGET_X86_
- assert (argNum < ArrLen(fltArgRegs));
+ assert(argNum < ArrLen(fltArgRegs));
return fltArgRegs[argNum];
#else
@@ -3219,9 +3098,13 @@ regNumber genMapFloatRegArgNumToRegNum(unsigned argNum)
__forceinline regNumber genMapRegArgNumToRegNum(unsigned argNum, var_types type)
{
if (varTypeIsFloating(type))
+ {
return genMapFloatRegArgNumToRegNum(argNum);
+ }
else
+ {
return genMapIntRegArgNumToRegNum(argNum);
+ }
}
/*****************************************************************************/
@@ -3230,20 +3113,17 @@ __forceinline regNumber genMapRegArgNumToRegNum(unsigned argNum, var_types type)
* (for a double on ARM) is returned.
*/
-inline
-regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum)
+inline regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum)
{
- assert (argNum < ArrLen(intArgMasks));
-
+ assert(argNum < ArrLen(intArgMasks));
+
return intArgMasks[argNum];
}
-
-inline
-regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum)
+inline regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum)
{
#ifndef _TARGET_X86_
- assert (argNum < ArrLen(fltArgMasks));
+ assert(argNum < ArrLen(fltArgMasks));
return fltArgMasks[argNum];
#else
@@ -3254,7 +3134,7 @@ regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum)
__forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type)
{
- regMaskTP result;
+ regMaskTP result;
if (varTypeIsFloating(type))
{
result = genMapFloatRegArgNumToRegMask(argNum);
@@ -3278,28 +3158,35 @@ __forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type)
* If we have a fixed return buffer register we return theFixedRetBuffArgNum
*/
-inline
-unsigned genMapIntRegNumToRegArgNum(regNumber regNum)
+inline unsigned genMapIntRegNumToRegArgNum(regNumber regNum)
{
assert(genRegMask(regNum) & fullIntArgRegMask());
switch (regNum)
{
- case REG_ARG_0: return 0;
+ case REG_ARG_0:
+ return 0;
#if MAX_REG_ARG >= 2
- case REG_ARG_1: return 1;
+ case REG_ARG_1:
+ return 1;
#if MAX_REG_ARG >= 3
- case REG_ARG_2: return 2;
+ case REG_ARG_2:
+ return 2;
#if MAX_REG_ARG >= 4
- case REG_ARG_3: return 3;
+ case REG_ARG_3:
+ return 3;
#if MAX_REG_ARG >= 5
- case REG_ARG_4: return 4;
+ case REG_ARG_4:
+ return 4;
#if MAX_REG_ARG >= 6
- case REG_ARG_5: return 5;
+ case REG_ARG_5:
+ return 5;
#if MAX_REG_ARG >= 7
- case REG_ARG_6: return 6;
+ case REG_ARG_6:
+ return 6;
#if MAX_REG_ARG >= 8
- case REG_ARG_7: return 7;
+ case REG_ARG_7:
+ return 7;
#endif
#endif
#endif
@@ -3307,24 +3194,23 @@ unsigned genMapIntRegNumToRegArgNum(regNumber regNum)
#endif
#endif
#endif
- default:
- // Check for the Arm64 fixed return buffer argument register
- if (hasFixedRetBuffReg() && (regNum == theFixedRetBuffReg()))
- {
- return theFixedRetBuffArgNum();
- }
- else
- {
- assert(!"invalid register arg register");
- return BAD_VAR_NUM;
- }
+ default:
+ // Check for the Arm64 fixed return buffer argument register
+ if (hasFixedRetBuffReg() && (regNum == theFixedRetBuffReg()))
+ {
+ return theFixedRetBuffArgNum();
+ }
+ else
+ {
+ assert(!"invalid register arg register");
+ return BAD_VAR_NUM;
+ }
}
}
-inline
-unsigned genMapFloatRegNumToRegArgNum(regNumber regNum)
+inline unsigned genMapFloatRegNumToRegArgNum(regNumber regNum)
{
- assert (genRegMask(regNum) & RBM_FLTARG_REGS);
+ assert(genRegMask(regNum) & RBM_FLTARG_REGS);
#ifdef _TARGET_ARM_
return regNum - REG_F0;
@@ -3337,67 +3223,71 @@ unsigned genMapFloatRegNumToRegArgNum(regNumber regNum)
#if MAX_FLOAT_REG_ARG >= 1
switch (regNum)
{
- case REG_FLTARG_0: return 0;
+ case REG_FLTARG_0:
+ return 0;
#if MAX_REG_ARG >= 2
- case REG_FLTARG_1: return 1;
+ case REG_FLTARG_1:
+ return 1;
#if MAX_REG_ARG >= 3
- case REG_FLTARG_2: return 2;
+ case REG_FLTARG_2:
+ return 2;
#if MAX_REG_ARG >= 4
- case REG_FLTARG_3: return 3;
+ case REG_FLTARG_3:
+ return 3;
#if MAX_REG_ARG >= 5
- case REG_FLTARG_4: return 4;
+ case REG_FLTARG_4:
+ return 4;
#endif
#endif
#endif
#endif
- default:
- assert(!"invalid register arg register");
- return BAD_VAR_NUM;
+ default:
+ assert(!"invalid register arg register");
+ return BAD_VAR_NUM;
}
#else
- assert(!"flt reg args not allowed");
+ assert(!"flt reg args not allowed");
return BAD_VAR_NUM;
-#endif
+#endif
#endif // !arm
}
-inline
-unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type)
+inline unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type)
{
if (varTypeIsFloating(type))
+ {
return genMapFloatRegNumToRegArgNum(regNum);
+ }
else
+ {
return genMapIntRegNumToRegArgNum(regNum);
+ }
}
-
/*****************************************************************************/
/* Return a register mask with the first 'numRegs' argument registers set.
*/
-inline
-regMaskTP genIntAllRegArgMask(unsigned numRegs)
+inline regMaskTP genIntAllRegArgMask(unsigned numRegs)
{
- assert (numRegs <= MAX_REG_ARG);
+ assert(numRegs <= MAX_REG_ARG);
regMaskTP result = RBM_NONE;
- for (unsigned i=0; i<numRegs; i++)
+ for (unsigned i = 0; i < numRegs; i++)
{
result |= intArgMasks[i];
}
return result;
}
-
#if !FEATURE_STACK_FP_X87
-inline
-regMaskTP genFltAllRegArgMask(unsigned numRegs)
+inline regMaskTP genFltAllRegArgMask(unsigned numRegs)
{
- assert (numRegs <= MAX_FLOAT_REG_ARG);
+ assert(numRegs <= MAX_FLOAT_REG_ARG);
regMaskTP result = RBM_NONE;
- for (unsigned i=0; i<numRegs; i++)
+ for (unsigned i = 0; i < numRegs; i++)
{
result |= fltArgMasks[i];
}
@@ -3406,8 +3296,6 @@ regMaskTP genFltAllRegArgMask(unsigned numRegs)
#endif // !FEATURE_STACK_FP_X87
-
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -3424,15 +3312,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* in the given expression tree node.
*/
-template<bool ForCodeGen>
-inline
-void Compiler::compUpdateLife(GenTreePtr tree)
+template <bool ForCodeGen>
+inline void Compiler::compUpdateLife(GenTreePtr tree)
{
// TODO-Cleanup: We shouldn't really be calling this more than once
- if (tree == compCurLifeTree) return;
+ if (tree == compCurLifeTree)
+ {
+ return;
+ }
- if (!tree->OperIsNonPhiLocal() &&
- fgIsIndirOfAddrOfLocal(tree) == NULL)
+ if (!tree->OperIsNonPhiLocal() && fgIsIndirOfAddrOfLocal(tree) == nullptr)
{
return;
}
@@ -3440,13 +3329,12 @@ void Compiler::compUpdateLife(GenTreePtr tree)
compUpdateLifeVar<ForCodeGen>(tree);
}
-template<bool ForCodeGen>
-inline
-void Compiler::compUpdateLife(VARSET_VALARG_TP newLife)
+template <bool ForCodeGen>
+inline void Compiler::compUpdateLife(VARSET_VALARG_TP newLife)
{
if (!VarSetOps::Equal(this, compCurLife, newLife))
{
- compChangeLife<ForCodeGen>(newLife DEBUGARG(NULL));
+ compChangeLife<ForCodeGen>(newLife DEBUGARG(nullptr));
}
#ifdef DEBUG
else
@@ -3461,15 +3349,13 @@ void Compiler::compUpdateLife(VARSET_VALARG_TP newLife)
#endif // DEBUG
}
-
/*****************************************************************************
*
* We stash cookies in basic blocks for the code emitter; this call retrieves
* the cookie associated with the given basic block.
*/
-inline
-void * emitCodeGetCookie(BasicBlock *block)
+inline void* emitCodeGetCookie(BasicBlock* block)
{
assert(block);
return block->bbEmitCookie;
@@ -3485,7 +3371,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
#if LOCAL_ASSERTION_PROP
/*****************************************************************************
@@ -3494,15 +3379,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* used only during local assertion prop
*/
-inline
-void Compiler::optAssertionReset(AssertionIndex limit)
+inline void Compiler::optAssertionReset(AssertionIndex limit)
{
PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount);
while (optAssertionCount > limit)
{
- AssertionIndex index = optAssertionCount;
- AssertionDsc* curAssertion = optGetAssertion(index);
+ AssertionIndex index = optAssertionCount;
+ AssertionDsc* curAssertion = optGetAssertion(index);
optAssertionCount--;
unsigned lclNum = curAssertion->op1.lcl.lclNum;
assert(lclNum < lvaTableCnt);
@@ -3510,35 +3394,33 @@ void Compiler::optAssertionReset(AssertionIndex limit)
//
// Find the Copy assertions
- //
- if ((curAssertion->assertionKind == OAK_EQUAL) &&
- (curAssertion->op1.kind == O1K_LCLVAR) &&
- (curAssertion->op2.kind == O2K_LCLVAR_COPY))
+ //
+ if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) &&
+ (curAssertion->op2.kind == O2K_LCLVAR_COPY))
{
//
// op2.lcl.lclNum no longer depends upon this assertion
- //
+ //
lclNum = curAssertion->op2.lcl.lclNum;
BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1);
}
}
while (optAssertionCount < limit)
{
- AssertionIndex index = ++optAssertionCount;
- AssertionDsc* curAssertion = optGetAssertion(index);
- unsigned lclNum = curAssertion->op1.lcl.lclNum;
+ AssertionIndex index = ++optAssertionCount;
+ AssertionDsc* curAssertion = optGetAssertion(index);
+ unsigned lclNum = curAssertion->op1.lcl.lclNum;
BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1);
//
// Check for Copy assertions
- //
- if ((curAssertion->assertionKind == OAK_EQUAL) &&
- (curAssertion->op1.kind == O1K_LCLVAR) &&
- (curAssertion->op2.kind == O2K_LCLVAR_COPY))
+ //
+ if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) &&
+ (curAssertion->op2.kind == O2K_LCLVAR_COPY))
{
//
// op2.lcl.lclNum now depends upon this assertion
- //
+ //
lclNum = curAssertion->op2.lcl.lclNum;
BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1);
}
@@ -3551,8 +3433,7 @@ void Compiler::optAssertionReset(AssertionIndex limit)
* used only during local assertion prop
*/
-inline
-void Compiler::optAssertionRemove(AssertionIndex index)
+inline void Compiler::optAssertionRemove(AssertionIndex index)
{
assert(index > 0);
assert(index <= optAssertionCount);
@@ -3567,7 +3448,7 @@ void Compiler::optAssertionRemove(AssertionIndex index)
// index-th entry in the table with the data found at the end of the table
// Since we are reordering the rable the optAssertionDep bits need to be recreated
// using optAssertionReset(0) and optAssertionReset(newAssertionCount) will
- // correctly update the optAssertionDep bits
+ // correctly update the optAssertionDep bits
//
if (index == optAssertionCount)
{
@@ -3576,14 +3457,13 @@ void Compiler::optAssertionRemove(AssertionIndex index)
//
// Check for Copy assertions
- //
- if ((curAssertion->assertionKind == OAK_EQUAL) &&
- (curAssertion->op1.kind == O1K_LCLVAR) &&
- (curAssertion->op2.kind == O2K_LCLVAR_COPY))
+ //
+ if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) &&
+ (curAssertion->op2.kind == O2K_LCLVAR_COPY))
{
//
// op2.lcl.lclNum no longer depends upon this assertion
- //
+ //
lclNum = curAssertion->op2.lcl.lclNum;
BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1);
}
@@ -3592,13 +3472,13 @@ void Compiler::optAssertionRemove(AssertionIndex index)
}
else
{
- AssertionDsc* lastAssertion = optGetAssertion(optAssertionCount);
- AssertionIndex newAssertionCount = optAssertionCount-1;
+ AssertionDsc* lastAssertion = optGetAssertion(optAssertionCount);
+ AssertionIndex newAssertionCount = optAssertionCount - 1;
- optAssertionReset(0); // This make optAssertionCount equal 0
+ optAssertionReset(0); // This make optAssertionCount equal 0
- memcpy(curAssertion, // the entry to be removed
- lastAssertion, // last entry in the table
+ memcpy(curAssertion, // the entry to be removed
+ lastAssertion, // last entry in the table
sizeof(AssertionDsc));
optAssertionReset(newAssertionCount);
@@ -3606,33 +3486,32 @@ void Compiler::optAssertionRemove(AssertionIndex index)
}
#endif // LOCAL_ASSERTION_PROP
-inline
-void Compiler::LoopDsc::AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd)
+inline void Compiler::LoopDsc::AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd)
{
if (lpFieldsModified == nullptr)
{
- lpFieldsModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::FieldHandleSet(comp->getAllocatorLoopHoist());
+ lpFieldsModified =
+ new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::FieldHandleSet(comp->getAllocatorLoopHoist());
}
lpFieldsModified->Set(fldHnd, true);
}
-inline
-void Compiler::LoopDsc::AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd)
+inline void Compiler::LoopDsc::AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd)
{
if (lpArrayElemTypesModified == nullptr)
{
- lpArrayElemTypesModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::ClassHandleSet(comp->getAllocatorLoopHoist());
+ lpArrayElemTypesModified =
+ new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::ClassHandleSet(comp->getAllocatorLoopHoist());
}
lpArrayElemTypesModified->Set(structHnd, true);
}
-inline
-void Compiler::LoopDsc::VERIFY_lpIterTree()
+inline void Compiler::LoopDsc::VERIFY_lpIterTree()
{
#ifdef DEBUG
assert(lpFlags & LPFLG_ITER);
- //iterTree should be "lcl <op>= const"
+ // iterTree should be "lcl <op>= const"
assert(lpIterTree);
@@ -3646,14 +3525,14 @@ void Compiler::LoopDsc::VERIFY_lpIterTree()
switch (rhs->gtOper)
{
- case GT_ADD:
- case GT_SUB:
- case GT_MUL:
- case GT_RSH:
- case GT_LSH:
- break;
- default:
- assert(!"Unknown operator for loop increment");
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ case GT_RSH:
+ case GT_LSH:
+ break;
+ default:
+ assert(!"Unknown operator for loop increment");
}
assert(rhs->gtOp.gtOp1->OperGet() == GT_LCL_VAR);
assert(rhs->gtOp.gtOp1->AsLclVarCommon()->GetLclNum() == lhs->AsLclVarCommon()->GetLclNum());
@@ -3669,8 +3548,7 @@ void Compiler::LoopDsc::VERIFY_lpIterTree()
//-----------------------------------------------------------------------------
-inline
-unsigned Compiler::LoopDsc::lpIterVar()
+inline unsigned Compiler::LoopDsc::lpIterVar()
{
VERIFY_lpIterTree();
return lpIterTree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
@@ -3678,25 +3556,23 @@ unsigned Compiler::LoopDsc::lpIterVar()
//-----------------------------------------------------------------------------
-inline
-int Compiler::LoopDsc::lpIterConst()
+inline int Compiler::LoopDsc::lpIterConst()
{
VERIFY_lpIterTree();
if (lpIterTree->OperGet() == GT_ASG)
{
GenTreePtr rhs = lpIterTree->gtOp.gtOp2;
- return (int) rhs->gtOp.gtOp2->gtIntCon.gtIconVal;
+ return (int)rhs->gtOp.gtOp2->gtIntCon.gtIconVal;
}
else
{
- return (int) lpIterTree->gtOp.gtOp2->gtIntCon.gtIconVal;
+ return (int)lpIterTree->gtOp.gtOp2->gtIntCon.gtIconVal;
}
}
//-----------------------------------------------------------------------------
-inline
-genTreeOps Compiler::LoopDsc::lpIterOper()
+inline genTreeOps Compiler::LoopDsc::lpIterOper()
{
VERIFY_lpIterTree();
if (lpIterTree->OperGet() == GT_ASG)
@@ -3710,9 +3586,7 @@ genTreeOps Compiler::LoopDsc::lpIterOper()
}
}
-
-inline
-var_types Compiler::LoopDsc::lpIterOperType()
+inline var_types Compiler::LoopDsc::lpIterOperType()
{
VERIFY_lpIterTree();
@@ -3720,36 +3594,34 @@ var_types Compiler::LoopDsc::lpIterOperType()
assert(genActualType(type) == TYP_INT);
if ((lpIterTree->gtFlags & GTF_UNSIGNED) && type == TYP_INT)
+ {
type = TYP_UINT;
+ }
return type;
}
-
-inline
-void Compiler::LoopDsc::VERIFY_lpTestTree()
+inline void Compiler::LoopDsc::VERIFY_lpTestTree()
{
#ifdef DEBUG
assert(lpFlags & LPFLG_ITER);
assert(lpTestTree);
- genTreeOps oper = lpTestTree->OperGet();
+ genTreeOps oper = lpTestTree->OperGet();
assert(GenTree::OperIsCompare(oper));
- GenTreePtr iterator = NULL;
- GenTreePtr limit = NULL;
- if ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR)
- && (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0)
+ GenTreePtr iterator = nullptr;
+ GenTreePtr limit = nullptr;
+ if ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0)
{
- iterator = lpTestTree->gtOp.gtOp2;
- limit = lpTestTree->gtOp.gtOp1;
+ iterator = lpTestTree->gtOp.gtOp2;
+ limit = lpTestTree->gtOp.gtOp1;
}
- else
- if ((lpTestTree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
- && (lpTestTree->gtOp.gtOp1->gtFlags & GTF_VAR_ITERATOR) != 0)
+ else if ((lpTestTree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
+ (lpTestTree->gtOp.gtOp1->gtFlags & GTF_VAR_ITERATOR) != 0)
{
- iterator = lpTestTree->gtOp.gtOp1;
- limit = lpTestTree->gtOp.gtOp2;
+ iterator = lpTestTree->gtOp.gtOp1;
+ limit = lpTestTree->gtOp.gtOp2;
}
else
{
@@ -3758,61 +3630,59 @@ void Compiler::LoopDsc::VERIFY_lpTestTree()
}
if (lpFlags & LPFLG_CONST_LIMIT)
+ {
assert(limit->OperIsConst());
+ }
if (lpFlags & LPFLG_VAR_LIMIT)
+ {
assert(limit->OperGet() == GT_LCL_VAR);
+ }
if (lpFlags & LPFLG_ARRLEN_LIMIT)
+ {
assert(limit->OperGet() == GT_ARR_LENGTH);
+ }
#endif
}
//-----------------------------------------------------------------------------
-inline
-bool Compiler::LoopDsc::lpIsReversed()
+inline bool Compiler::LoopDsc::lpIsReversed()
{
VERIFY_lpTestTree();
- return ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR)
- && (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0);
+ return ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR) &&
+ (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0);
}
//-----------------------------------------------------------------------------
-inline
-genTreeOps Compiler::LoopDsc::lpTestOper()
+inline genTreeOps Compiler::LoopDsc::lpTestOper()
{
VERIFY_lpTestTree();
genTreeOps op = lpTestTree->OperGet();
- return lpIsReversed() ?
- GenTree::SwapRelop(op) : op;
+ return lpIsReversed() ? GenTree::SwapRelop(op) : op;
}
//-----------------------------------------------------------------------------
-inline
-GenTreePtr Compiler::LoopDsc::lpIterator()
+inline GenTreePtr Compiler::LoopDsc::lpIterator()
{
VERIFY_lpTestTree();
- return lpIsReversed() ?
- lpTestTree->gtOp.gtOp2 : lpTestTree->gtOp.gtOp1;
+ return lpIsReversed() ? lpTestTree->gtOp.gtOp2 : lpTestTree->gtOp.gtOp1;
}
//-----------------------------------------------------------------------------
-inline
-GenTreePtr Compiler::LoopDsc::lpLimit()
+inline GenTreePtr Compiler::LoopDsc::lpLimit()
{
VERIFY_lpTestTree();
- return lpIsReversed() ?
- lpTestTree->gtOp.gtOp1 : lpTestTree->gtOp.gtOp2;
+ return lpIsReversed() ? lpTestTree->gtOp.gtOp1 : lpTestTree->gtOp.gtOp2;
}
//-----------------------------------------------------------------------------
-inline
-int Compiler::LoopDsc::lpConstLimit()
+inline int Compiler::LoopDsc::lpConstLimit()
{
VERIFY_lpTestTree();
assert(lpFlags & LPFLG_CONST_LIMIT);
@@ -3824,8 +3694,7 @@ int Compiler::LoopDsc::lpConstLimit()
//-----------------------------------------------------------------------------
-inline
-unsigned Compiler::LoopDsc::lpVarLimit()
+inline unsigned Compiler::LoopDsc::lpVarLimit()
{
VERIFY_lpTestTree();
assert(lpFlags & LPFLG_VAR_LIMIT);
@@ -3837,8 +3706,7 @@ unsigned Compiler::LoopDsc::lpVarLimit()
//-----------------------------------------------------------------------------
-inline
-bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index)
+inline bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index)
{
VERIFY_lpTestTree();
assert(lpFlags & LPFLG_ARRLEN_LIMIT);
@@ -3850,7 +3718,7 @@ bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index
if (limit->gtArrLen.ArrRef()->gtOper == GT_LCL_VAR)
{
index->arrLcl = limit->gtArrLen.ArrRef()->gtLclVarCommon.gtLclNum;
- index->rank = 0;
+ index->rank = 0;
return true;
}
// We have a[i].length, extract a[i] pattern.
@@ -3865,34 +3733,33 @@ bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index
* Is "var" assigned in the loop "lnum" ?
*/
-inline
-bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var)
+inline bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var)
{
assert(lnum < optLoopCount);
- if (var < lclMAX_ALLSET_TRACKED)
+ if (var < lclMAX_ALLSET_TRACKED)
{
ALLVARSET_TP ALLVARSET_INIT_NOCOPY(vs, AllVarSetOps::MakeSingleton(this, var));
- return optIsSetAssgLoop(lnum, vs) != 0;
+ return optIsSetAssgLoop(lnum, vs) != 0;
}
else
- return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext,
- optLoopTable[lnum].lpBottom,
- 0,
- var);
+ {
+ return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var);
+ }
}
/*****************************************************************************
* If the tree is a tracked local variable, return its LclVarDsc ptr.
*/
-inline
-LclVarDsc * Compiler::optIsTrackedLocal(GenTreePtr tree)
+inline LclVarDsc* Compiler::optIsTrackedLocal(GenTreePtr tree)
{
- LclVarDsc * varDsc;
- unsigned lclNum;
+ LclVarDsc* varDsc;
+ unsigned lclNum;
if (tree->gtOper != GT_LCL_VAR)
- return NULL;
+ {
+ return nullptr;
+ }
lclNum = tree->gtLclVarCommon.gtLclNum;
@@ -3900,8 +3767,10 @@ LclVarDsc * Compiler::optIsTrackedLocal(GenTreePtr tree)
varDsc = lvaTable + lclNum;
/* if variable not tracked, return NULL */
- if (!varDsc->lvTracked)
- return NULL;
+ if (!varDsc->lvTracked)
+ {
+ return nullptr;
+ }
return varDsc;
}
@@ -3919,35 +3788,30 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// are we compiling for fast code, or are we compiling for blended code and
// inside a loop?
// We return true for BLENDED_CODE if the Block executes more than BB_LOOP_WEIGHT/2
-inline
-bool Compiler::optFastCodeOrBlendedLoop(BasicBlock::weight_t bbWeight)
+inline bool Compiler::optFastCodeOrBlendedLoop(BasicBlock::weight_t bbWeight)
{
return (compCodeOpt() == FAST_CODE) ||
- ((compCodeOpt() == BLENDED_CODE) && (bbWeight > (BB_LOOP_WEIGHT/2*BB_UNITY_WEIGHT)));
+ ((compCodeOpt() == BLENDED_CODE) && (bbWeight > (BB_LOOP_WEIGHT / 2 * BB_UNITY_WEIGHT)));
}
// are we running on a Intel Pentium 4?
-inline
-bool Compiler::optPentium4(void)
+inline bool Compiler::optPentium4(void)
{
return (info.genCPU == CPU_X86_PENTIUM_4);
}
// should we use add/sub instead of inc/dec? (faster on P4, but increases size)
-inline
-bool Compiler::optAvoidIncDec(BasicBlock::weight_t bbWeight)
+inline bool Compiler::optAvoidIncDec(BasicBlock::weight_t bbWeight)
{
return optPentium4() && optFastCodeOrBlendedLoop(bbWeight);
}
// should we try to replace integer multiplication with lea/add/shift sequences?
-inline
-bool Compiler::optAvoidIntMult(void)
+inline bool Compiler::optAvoidIntMult(void)
{
return (compCodeOpt() != SMALL_CODE);
}
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -3958,37 +3822,35 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-extern var_types JITtype2varType(CorInfoType type);
+extern var_types JITtype2varType(CorInfoType type);
#include "ee_il_dll.hpp"
-inline
-CORINFO_METHOD_HANDLE Compiler::eeFindHelper (unsigned helper)
+inline CORINFO_METHOD_HANDLE Compiler::eeFindHelper(unsigned helper)
{
assert(helper < CORINFO_HELP_COUNT);
/* Helpers are marked by the fact that they are odd numbers
* force this to be an odd number (will shift it back to extract) */
- return((CORINFO_METHOD_HANDLE)(size_t) ((helper << 2) + 1));
+ return ((CORINFO_METHOD_HANDLE)(size_t)((helper << 2) + 1));
}
-inline
-CorInfoHelpFunc Compiler::eeGetHelperNum (CORINFO_METHOD_HANDLE method)
+inline CorInfoHelpFunc Compiler::eeGetHelperNum(CORINFO_METHOD_HANDLE method)
{
// Helpers are marked by the fact that they are odd numbers
- if (!(((size_t) method) & 1))
- return(CORINFO_HELP_UNDEF);
- return((CorInfoHelpFunc) (((size_t) method) >> 2));
+ if (!(((size_t)method) & 1))
+ {
+ return (CORINFO_HELP_UNDEF);
+ }
+ return ((CorInfoHelpFunc)(((size_t)method) >> 2));
}
-inline
-Compiler::fgWalkResult Compiler::CountSharedStaticHelper(GenTreePtr *pTree,
- fgWalkData *data)
+inline Compiler::fgWalkResult Compiler::CountSharedStaticHelper(GenTreePtr* pTree, fgWalkData* data)
{
if (Compiler::IsSharedStaticHelper(*pTree))
{
- int* pCount = (int*) data->pCallbackData;
+ int* pCount = (int*)data->pCallbackData;
(*pCount)++;
}
@@ -3998,45 +3860,41 @@ Compiler::fgWalkResult Compiler::CountSharedStaticHelper(GenTreePtr *pTree,
// TODO-Cleanup: Replace calls to IsSharedStaticHelper with new HelperCallProperties
//
-inline bool Compiler::IsSharedStaticHelper (GenTreePtr tree)
+inline bool Compiler::IsSharedStaticHelper(GenTreePtr tree)
{
- if (tree->gtOper != GT_CALL ||
- tree->gtCall.gtCallType != CT_HELPER)
+ if (tree->gtOper != GT_CALL || tree->gtCall.gtCallType != CT_HELPER)
{
return false;
}
CorInfoHelpFunc helper = eeGetHelperNum(tree->gtCall.gtCallMethHnd);
- bool result1 =
- // More helpers being added to IsSharedStaticHelper (that have similar behaviors but are not true ShareStaticHelperts)
- helper == CORINFO_HELP_STRCNS ||
- helper == CORINFO_HELP_BOX ||
-
- // helpers being added to IsSharedStaticHelper
- helper == CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT ||
- helper == CORINFO_HELP_GETSTATICFIELDADDR_TLS ||
- helper == CORINFO_HELP_GETGENERICS_GCSTATIC_BASE ||
- helper == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE ||
- helper == CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE ||
- helper == CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE ||
-
- helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE ||
- helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE ||
- helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR ||
- helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR ||
- helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS ||
- helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS ||
- helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE ||
- helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE ||
- helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR ||
- helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR ||
- helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS ||
- helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS ||
+ bool result1 =
+ // More helpers being added to IsSharedStaticHelper (that have similar behaviors but are not true
+ // ShareStaticHelperts)
+ helper == CORINFO_HELP_STRCNS || helper == CORINFO_HELP_BOX ||
+
+ // helpers being added to IsSharedStaticHelper
+ helper == CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT || helper == CORINFO_HELP_GETSTATICFIELDADDR_TLS ||
+ helper == CORINFO_HELP_GETGENERICS_GCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE ||
+ helper == CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE ||
+ helper == CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE ||
+
+ helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE ||
+ helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR ||
+ helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR ||
+ helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS ||
+ helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS ||
+ helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE ||
+ helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE ||
+ helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR ||
+ helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR ||
+ helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS ||
+ helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS ||
#ifdef FEATURE_READYTORUN_COMPILER
- helper == CORINFO_HELP_READYTORUN_STATIC_BASE ||
+ helper == CORINFO_HELP_READYTORUN_STATIC_BASE ||
#endif
- helper == CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS;
+ helper == CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS;
#if 0
// See above TODO-Cleanup
bool result2 = s_helperCallProperties.IsPure(helper) && s_helperCallProperties.NonNullReturn(helper);
@@ -4045,12 +3903,11 @@ inline bool Compiler::IsSharedStaticHelper (GenTreePtr tree)
return result1;
}
-inline bool Compiler::IsTreeAlwaysHoistable (GenTreePtr tree)
+inline bool Compiler::IsTreeAlwaysHoistable(GenTreePtr tree)
{
if (IsSharedStaticHelper(tree))
{
- return (GTF_CALL_HOISTABLE & tree->gtFlags)?true:false;
-
+ return (GTF_CALL_HOISTABLE & tree->gtFlags) ? true : false;
}
else
{
@@ -4065,39 +3922,38 @@ inline bool Compiler::IsTreeAlwaysHoistable (GenTreePtr tree)
// The special values that we use are FLD_GLOBAL_DS and FLD_GLOBAL_FS
//
-inline
-bool jitStaticFldIsGlobAddr(CORINFO_FIELD_HANDLE fldHnd)
+inline bool jitStaticFldIsGlobAddr(CORINFO_FIELD_HANDLE fldHnd)
{
return (fldHnd == FLD_GLOBAL_DS || fldHnd == FLD_GLOBAL_FS);
}
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD)
-inline
-bool Compiler::eeIsNativeMethod (CORINFO_METHOD_HANDLE method)
+inline bool Compiler::eeIsNativeMethod(CORINFO_METHOD_HANDLE method)
{
return ((((size_t)method) & 0x2) == 0x2);
}
-inline
-CORINFO_METHOD_HANDLE Compiler::eeGetMethodHandleForNative (CORINFO_METHOD_HANDLE method)
+inline CORINFO_METHOD_HANDLE Compiler::eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method)
{
- assert ((((size_t)method)& 0x3) == 0x2);
- return (CORINFO_METHOD_HANDLE)(((size_t)method)& ~0x3);
+ assert((((size_t)method) & 0x3) == 0x2);
+ return (CORINFO_METHOD_HANDLE)(((size_t)method) & ~0x3);
}
#endif
-inline
-CORINFO_METHOD_HANDLE Compiler::eeMarkNativeTarget (CORINFO_METHOD_HANDLE method)
+inline CORINFO_METHOD_HANDLE Compiler::eeMarkNativeTarget(CORINFO_METHOD_HANDLE method)
{
- assert ((((size_t)method)& 0x3) == 0);
- if (method == NULL)
+ assert((((size_t)method) & 0x3) == 0);
+ if (method == nullptr)
+ {
return method;
+ }
else
- return (CORINFO_METHOD_HANDLE)(((size_t)method)| 0x2);
+ {
+ return (CORINFO_METHOD_HANDLE)(((size_t)method) | 0x2);
+ }
}
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -4108,19 +3964,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
#ifndef DEBUG
-inline
-bool Compiler::compStressCompile(compStressArea stressArea,
- unsigned weightPercentage)
+inline bool Compiler::compStressCompile(compStressArea stressArea, unsigned weightPercentage)
{
return false;
}
#endif
-
-inline
-ArenaAllocator * Compiler::compGetAllocator()
+inline ArenaAllocator* Compiler::compGetAllocator()
{
return compAllocator;
}
@@ -4133,8 +3984,7 @@ ArenaAllocator * Compiler::compGetAllocator()
#ifndef DEBUG
-inline
-void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
+inline void* Compiler::compGetMem(size_t sz, CompMemKind cmk)
{
assert(sz);
@@ -4142,7 +3992,7 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(sz, cmk);
#endif
- return compAllocator->allocateMemory(sz);
+ return compAllocator->allocateMemory(sz);
}
#endif
@@ -4162,10 +4012,9 @@ void * Compiler::compGetMem(size_t sz, CompMemKind cmk)
* is why we __forceinline).
*/
-#define MAX_MEMORY_PER_ALLOCATION (512*1024*1024)
+#define MAX_MEMORY_PER_ALLOCATION (512 * 1024 * 1024)
-__forceinline
-void * Compiler::compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk)
+__forceinline void* Compiler::compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk)
{
if (numElem > (MAX_MEMORY_PER_ALLOCATION / elemSize))
{
@@ -4175,8 +4024,7 @@ void * Compiler::compGetMemArray(size_t numElem, size_t elemSiz
return compGetMem(numElem * elemSize, cmk);
}
-__forceinline
-void * Compiler::compGetMemArrayA(size_t numElem, size_t elemSize, CompMemKind cmk)
+__forceinline void* Compiler::compGetMemArrayA(size_t numElem, size_t elemSize, CompMemKind cmk)
{
if (numElem > (MAX_MEMORY_PER_ALLOCATION / elemSize))
{
@@ -4193,8 +4041,7 @@ void * Compiler::compGetMemArrayA(size_t numElem, size_t elemSi
* The JIT will always try to keep all the blocks aligned.
*/
-inline
-void * Compiler::compGetMemA(size_t sz, CompMemKind cmk)
+inline void* Compiler::compGetMemA(size_t sz, CompMemKind cmk)
{
assert(sz);
@@ -4204,7 +4051,7 @@ void * Compiler::compGetMemA(size_t sz, CompMemKind cmk)
genMemStats.AddAlloc(allocSz, cmk);
#endif
- void * ptr = compAllocator->allocateMemory(allocSz);
+ void* ptr = compAllocator->allocateMemory(allocSz);
// Verify that the current block is aligned. Only then will the next
// block allocated be on an aligned boundary.
@@ -4213,31 +4060,28 @@ void * Compiler::compGetMemA(size_t sz, CompMemKind cmk)
return ptr;
}
-inline
-void Compiler::compFreeMem(void * ptr)
-{}
-
-#define compFreeMem(ptr) compFreeMem((void *)ptr)
+inline void Compiler::compFreeMem(void* ptr)
+{
+}
+#define compFreeMem(ptr) compFreeMem((void*)ptr)
-inline
-bool Compiler::compIsProfilerHookNeeded()
+inline bool Compiler::compIsProfilerHookNeeded()
{
#ifdef PROFILING_SUPPORTED
- return compProfilerHookNeeded
+ return compProfilerHookNeeded
#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
- // IL stubs are excluded by VM and we need to do the same even running
- // under a complus env hook to generate profiler hooks
- || (opts.compJitELTHookEnabled && !(opts.eeFlags & CORJIT_FLG_IL_STUB))
+ // IL stubs are excluded by VM and we need to do the same even running
+ // under a complus env hook to generate profiler hooks
+ || (opts.compJitELTHookEnabled && !(opts.eeFlags & CORJIT_FLG_IL_STUB))
#endif
;
-#else //PROFILING_SUPPORTED
+#else // PROFILING_SUPPORTED
return false;
-#endif
+#endif
}
-
/*****************************************************************************
*
* Check for the special case where the object is the constant 0.
@@ -4246,28 +4090,29 @@ bool Compiler::compIsProfilerHookNeeded()
* We simply grab a temp and assign 0 to it and use it in place of the NULL.
*/
-inline
-GenTreePtr Compiler::impCheckForNullPointer(GenTreePtr obj)
+inline GenTreePtr Compiler::impCheckForNullPointer(GenTreePtr obj)
{
/* If it is not a GC type, we will be able to fold it.
So don't need to do anything */
if (!varTypeIsGC(obj->TypeGet()))
+ {
return obj;
+ }
if (obj->gtOper == GT_CNS_INT)
{
assert(obj->gtType == TYP_REF || obj->gtType == TYP_BYREF);
- assert (obj->gtIntCon.gtIconVal == 0);
+ assert(obj->gtIntCon.gtIconVal == 0);
unsigned tmp = lvaGrabTemp(true DEBUGARG("CheckForNullPointer"));
// We don't need to spill while appending as we are only assigning
// NULL to a freshly-grabbed temp.
- impAssignTempGen (tmp, obj, (unsigned)CHECK_SPILL_NONE);
+ impAssignTempGen(tmp, obj, (unsigned)CHECK_SPILL_NONE);
- obj = gtNewLclvNode (tmp, obj->gtType);
+ obj = gtNewLclvNode(tmp, obj->gtType);
}
return obj;
@@ -4280,18 +4125,15 @@ GenTreePtr Compiler::impCheckForNullPointer(GenTreePtr obj)
* even if we might have created the copy of 'this' pointer in lvaArg0Var.
*/
-inline
-bool Compiler::impIsThis(GenTreePtr obj)
+inline bool Compiler::impIsThis(GenTreePtr obj)
{
if (compIsForInlining())
{
return impInlineInfo->InlinerCompiler->impIsThis(obj);
}
else
- {
- return ((obj != NULL) &&
- (obj->gtOper == GT_LCL_VAR) &&
- lvaIsOriginalThisArg(obj->gtLclVarCommon.gtLclNum));
+ {
+ return ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR) && lvaIsOriginalThisArg(obj->gtLclVarCommon.gtLclNum));
}
}
@@ -4300,13 +4142,11 @@ bool Compiler::impIsThis(GenTreePtr obj)
* Check to see if the delegate is created using "LDFTN <TOK>" or not.
*/
-inline
-bool Compiler::impIsLDFTN_TOKEN(const BYTE * delegateCreateStart, const BYTE * newobjCodeAddr)
+inline bool Compiler::impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr)
{
assert(newobjCodeAddr[0] == CEE_NEWOBJ);
- return (newobjCodeAddr - delegateCreateStart == 6 && // LDFTN <TOK> takes 6 bytes
- delegateCreateStart[0] == CEE_PREFIX1 &&
- delegateCreateStart[1] == (CEE_LDFTN & 0xFF));
+ return (newobjCodeAddr - delegateCreateStart == 6 && // LDFTN <TOK> takes 6 bytes
+ delegateCreateStart[0] == CEE_PREFIX1 && delegateCreateStart[1] == (CEE_LDFTN & 0xFF));
}
/*****************************************************************************
@@ -4314,13 +4154,11 @@ bool Compiler::impIsLDFTN_TOKEN(const BYTE * delegateCreateStart,
* Check to see if the delegate is created using "DUP LDVIRTFTN <TOK>" or not.
*/
-inline
-bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE * delegateCreateStart, const BYTE * newobjCodeAddr)
+inline bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr)
{
assert(newobjCodeAddr[0] == CEE_NEWOBJ);
- return (newobjCodeAddr - delegateCreateStart == 7 && // DUP LDVIRTFTN <TOK> takes 6 bytes
- delegateCreateStart[0] == CEE_DUP &&
- delegateCreateStart[1] == CEE_PREFIX1 &&
+ return (newobjCodeAddr - delegateCreateStart == 7 && // DUP LDVIRTFTN <TOK> takes 6 bytes
+ delegateCreateStart[0] == CEE_DUP && delegateCreateStart[1] == CEE_PREFIX1 &&
delegateCreateStart[2] == (CEE_LDVIRTFTN & 0xFF));
}
/*****************************************************************************
@@ -4328,11 +4166,9 @@ bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE * delegateCrea
* Returns true if the compiler instance is created for import only (verification).
*/
-inline
-bool Compiler::compIsForImportOnly()
+inline bool Compiler::compIsForImportOnly()
{
return ((opts.eeFlags & CORJIT_FLG_IMPORT_ONLY) != 0);
-
}
/*****************************************************************************
@@ -4340,10 +4176,9 @@ bool Compiler::compIsForImportOnly()
* Returns true if the compiler instance is created for inlining.
*/
-inline
-bool Compiler::compIsForInlining()
+inline bool Compiler::compIsForInlining()
{
- return (impInlineInfo != NULL);
+ return (impInlineInfo != nullptr);
}
/*****************************************************************************
@@ -4351,13 +4186,12 @@ bool Compiler::compIsForInlining()
* Check the inline result field in the compiler to see if inlining failed or not.
*/
-inline
-bool Compiler::compDonotInline()
+inline bool Compiler::compDonotInline()
{
if (compIsForInlining())
{
- assert(compInlineResult != nullptr);
- return compInlineResult->IsFailure();
+ assert(compInlineResult != nullptr);
+ return compInlineResult->IsFailure();
}
else
{
@@ -4365,23 +4199,20 @@ bool Compiler::compDonotInline()
}
}
-inline
-bool Compiler::impIsPrimitive(CorInfoType jitType)
+inline bool Compiler::impIsPrimitive(CorInfoType jitType)
{
- return ((CORINFO_TYPE_BOOL <= jitType && jitType <= CORINFO_TYPE_DOUBLE) ||
- jitType == CORINFO_TYPE_PTR);
+ return ((CORINFO_TYPE_BOOL <= jitType && jitType <= CORINFO_TYPE_DOUBLE) || jitType == CORINFO_TYPE_PTR);
}
/*****************************************************************************
*
- * Get the promotion type of a struct local.
+ * Get the promotion type of a struct local.
*/
-inline
-Compiler::lvaPromotionType Compiler::lvaGetPromotionType (const LclVarDsc * varDsc)
-{
+inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(const LclVarDsc* varDsc)
+{
assert(!varDsc->lvPromoted || varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct);
-
+
if (!varDsc->lvPromoted)
{
// no struct promotion for this LclVar
@@ -4398,7 +4229,7 @@ Compiler::lvaPromotionType Compiler::lvaGetPromotionType (const LclVarDsc *
return PROMOTION_TYPE_INDEPENDENT;
}
- // Has struct promotion for arguments been disabled using COMPlus_JitNoStructPromotion=2
+ // Has struct promotion for arguments been disabled using COMPlus_JitNoStructPromotion=2
if (fgNoStructParamPromotion)
{
// The struct parameter is not enregistered
@@ -4416,19 +4247,17 @@ Compiler::lvaPromotionType Compiler::lvaGetPromotionType (const LclVarDsc *
// The struct parameter is not enregistered
return PROMOTION_TYPE_DEPENDENT;
#endif
-
}
/*****************************************************************************
*
- * Get the promotion type of a struct local.
+ * Get the promotion type of a struct local.
*/
-inline
-Compiler::lvaPromotionType Compiler::lvaGetPromotionType (unsigned varNum)
-{
+inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(unsigned varNum)
+{
assert(varNum < lvaCount);
- return lvaGetPromotionType(&lvaTable[varNum]);
+ return lvaGetPromotionType(&lvaTable[varNum]);
}
/*****************************************************************************
@@ -4436,9 +4265,8 @@ Compiler::lvaPromotionType Compiler::lvaGetPromotionType (unsigned varNum)
* Given a field local, get the promotion type of its parent struct local.
*/
-inline
-Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType (const LclVarDsc * varDsc)
-{
+inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(const LclVarDsc* varDsc)
+{
assert(varDsc->lvIsStructField);
assert(varDsc->lvParentLcl < lvaCount);
@@ -4452,11 +4280,10 @@ Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType (const LclVarDs
* Given a field local, get the promotion type of its parent struct local.
*/
-inline
-Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType (unsigned varNum)
-{
+inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(unsigned varNum)
+{
assert(varNum < lvaCount);
- return lvaGetParentPromotionType(&lvaTable[varNum]);
+ return lvaGetParentPromotionType(&lvaTable[varNum]);
}
/*****************************************************************************
@@ -4465,21 +4292,23 @@ Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType (unsigned varNu
* Return false otherwise.
*/
-inline
-bool Compiler::lvaIsFieldOfDependentlyPromotedStruct (const LclVarDsc * varDsc)
-{
+inline bool Compiler::lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc)
+{
if (!varDsc->lvIsStructField)
+ {
return false;
+ }
lvaPromotionType promotionType = lvaGetParentPromotionType(varDsc);
if (promotionType == PROMOTION_TYPE_DEPENDENT)
+ {
return true;
+ }
assert(promotionType == PROMOTION_TYPE_INDEPENDENT);
return false;
}
-
//------------------------------------------------------------------------
// lvaIsGCTracked: Determine whether this var should be reported
// as tracked for GC purposes.
@@ -4502,14 +4331,13 @@ bool Compiler::lvaIsFieldOfDependentlyPromotedStruct (const LclVarDsc * varD
// but there was too much logic that depends on these being untracked, so changing
// this would require non-trivial effort.
-inline
-bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc)
+inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc)
{
- if(varDsc->lvTracked && (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF))
+ if (varDsc->lvTracked && (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF))
{
#ifdef _TARGET_AMD64_
return !lvaIsFieldOfDependentlyPromotedStruct(varDsc);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
return true;
#endif // !_TARGET_AMD64_
}
@@ -4517,13 +4345,13 @@ bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc)
{
return false;
}
-
}
inline void Compiler::EndPhase(Phases phase)
{
#if defined(FEATURE_JIT_METHOD_PERF)
- if (pCompJitTimer != NULL) pCompJitTimer->EndPhase(phase);
+ if (pCompJitTimer != NULL)
+ pCompJitTimer->EndPhase(phase);
#endif
#if DUMP_FLOWGRAPHS
fgDumpFlowGraph(phase);
@@ -4532,30 +4360,29 @@ inline void Compiler::EndPhase(Phases phase)
#ifdef DEBUG
if (dumpIR)
{
- if ((*dumpIRPhase == L'*')
- || (wcscmp(dumpIRPhase, PhaseShortNames[phase]) == 0))
- {
- printf("\n");
- printf("IR after %s (switch: %ls)\n", PhaseEnums[phase], PhaseShortNames[phase]);
- printf("\n");
-
- if (dumpIRLinear)
- {
- dFuncIR();
- }
- else if (dumpIRTrees)
- {
- dTrees();
- }
-
- // If we are just dumping a single method and we have a request to exit
- // after dumping, do so now.
-
- if (dumpIRExit && ((*dumpIRPhase != L'*') || (phase == PHASE_EMIT_GCEH)))
- {
- exit(0);
- }
- }
+ if ((*dumpIRPhase == L'*') || (wcscmp(dumpIRPhase, PhaseShortNames[phase]) == 0))
+ {
+ printf("\n");
+ printf("IR after %s (switch: %ls)\n", PhaseEnums[phase], PhaseShortNames[phase]);
+ printf("\n");
+
+ if (dumpIRLinear)
+ {
+ dFuncIR();
+ }
+ else if (dumpIRTrees)
+ {
+ dTrees();
+ }
+
+ // If we are just dumping a single method and we have a request to exit
+ // after dumping, do so now.
+
+ if (dumpIRExit && ((*dumpIRPhase != L'*') || (phase == PHASE_EMIT_GCEH)))
+ {
+ exit(0);
+ }
+ }
}
#endif
}
@@ -4565,29 +4392,28 @@ bool Compiler::fgExcludeFromSsa(unsigned lclNum)
{
if (opts.MinOpts())
{
- return true; // If we're doing MinOpts, no SSA vars.
+ return true; // If we're doing MinOpts, no SSA vars.
}
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varDsc->lvAddrExposed)
{
- return true; // We exclude address-exposed variables.
+ return true; // We exclude address-exposed variables.
}
if (!varDsc->lvTracked)
{
- return true; // SSA is only done for tracked variables
+ return true; // SSA is only done for tracked variables
}
// lvPromoted structs are never tracked...
assert(!varDsc->lvPromoted);
if (varDsc->lvOverlappingFields)
{
- return true; // Don't use SSA on structs that have overlapping fields
+ return true; // Don't use SSA on structs that have overlapping fields
}
- if (varDsc->lvIsStructField &&
- (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT))
+ if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT))
{
// SSA must exclude struct fields that are not independent
// - because we don't model the struct assignment properly when multiple fields can be assigned by one struct
@@ -4597,7 +4423,7 @@ bool Compiler::fgExcludeFromSsa(unsigned lclNum)
//
// Example mscorlib method: CompatibilitySwitches:IsCompatibilitySwitchSet
//
- return true;
+ return true;
}
// otherwise this variable is *not* excluded for SSA
return false;
@@ -4622,9 +4448,12 @@ ValueNum Compiler::GetUseAsgDefVNOrTreeVN(GenTreePtr op)
unsigned Compiler::GetSsaNumForLocalVarDef(GenTreePtr lcl)
{
// Address-taken variables don't have SSA numbers.
- if (fgExcludeFromSsa(lcl->AsLclVarCommon()->gtLclNum)) return SsaConfig::RESERVED_SSA_NUM;
+ if (fgExcludeFromSsa(lcl->AsLclVarCommon()->gtLclNum))
+ {
+ return SsaConfig::RESERVED_SSA_NUM;
+ }
- assert(lcl->gtFlags & (GTF_VAR_DEF|GTF_VAR_USEDEF));
+ assert(lcl->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEDEF));
if (lcl->gtFlags & GTF_VAR_USEASG)
{
assert((lcl->gtFlags & GTF_VAR_USEDEF) == 0);
@@ -4652,34 +4481,34 @@ unsigned Compiler::GetSsaNumForLocalVarDef(GenTreePtr lcl)
*
* Note that compGetMem is an arena allocator that returns memory that is
* not zero-initialized and can contain data from a prior allocation lifetime.
- * it also requires that 'sz' be aligned to a multiple of sizeof(int)
+ * it also requires that 'sz' be aligned to a multiple of sizeof(int)
*/
-inline void * __cdecl operator new(size_t sz, Compiler *context, CompMemKind cmk)
+inline void* __cdecl operator new(size_t sz, Compiler* context, CompMemKind cmk)
{
sz = AlignUp(sz, sizeof(int));
assert(sz != 0 && (sz & (sizeof(int) - 1)) == 0);
- return context->compGetMem( sz, cmk);
+ return context->compGetMem(sz, cmk);
}
-inline void * __cdecl operator new[](size_t sz, Compiler *context, CompMemKind cmk)
+inline void* __cdecl operator new[](size_t sz, Compiler* context, CompMemKind cmk)
{
sz = AlignUp(sz, sizeof(int));
assert(sz != 0 && (sz & (sizeof(int) - 1)) == 0);
- return context->compGetMem( sz, cmk);
+ return context->compGetMem(sz, cmk);
}
-inline void * __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */)
+inline void* __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */)
{
return p;
}
-inline void * __cdecl operator new(size_t sz, IAllocator* alloc)
+inline void* __cdecl operator new(size_t sz, IAllocator* alloc)
{
return alloc->Alloc(sz);
}
-inline void * __cdecl operator new[](size_t sz, IAllocator* alloc)
+inline void* __cdecl operator new[](size_t sz, IAllocator* alloc)
{
return alloc->Alloc(sz);
}
@@ -4693,10 +4522,10 @@ inline void printRegMask(regMaskTP mask)
printf(REG_MASK_ALL_FMT, mask);
}
-inline char *regMaskToString(regMaskTP mask, Compiler *context)
+inline char* regMaskToString(regMaskTP mask, Compiler* context)
{
const size_t cchRegMask = 24;
- char *regmask = new(context, CMK_Unknown) char[cchRegMask];
+ char* regmask = new (context, CMK_Unknown) char[cchRegMask];
sprintf_s(regmask, cchRegMask, REG_MASK_ALL_FMT, mask);
@@ -4708,10 +4537,10 @@ inline void printRegMaskInt(regMaskTP mask)
printf(REG_MASK_INT_FMT, (mask & RBM_ALLINT));
}
-inline char *regMaskIntToString(regMaskTP mask, Compiler *context)
+inline char* regMaskIntToString(regMaskTP mask, Compiler* context)
{
const size_t cchRegMask = 24;
- char *regmask = new(context, CMK_Unknown) char[cchRegMask];
+ char* regmask = new (context, CMK_Unknown) char[cchRegMask];
sprintf_s(regmask, cchRegMask, REG_MASK_INT_FMT, (mask & RBM_ALLINT));
@@ -4720,23 +4549,23 @@ inline char *regMaskIntToString(regMaskTP mask, Compiler *context)
#endif // DEBUG
-inline void BasicBlock::InitVarSets(Compiler* comp)
+inline void BasicBlock::InitVarSets(Compiler* comp)
{
- VarSetOps::AssignNoCopy(comp, bbVarUse, VarSetOps::MakeEmpty(comp));
- VarSetOps::AssignNoCopy(comp, bbVarDef, VarSetOps::MakeEmpty(comp));
- VarSetOps::AssignNoCopy(comp, bbVarTmp, VarSetOps::MakeEmpty(comp));
- VarSetOps::AssignNoCopy(comp, bbLiveIn, VarSetOps::MakeEmpty(comp));
+ VarSetOps::AssignNoCopy(comp, bbVarUse, VarSetOps::MakeEmpty(comp));
+ VarSetOps::AssignNoCopy(comp, bbVarDef, VarSetOps::MakeEmpty(comp));
+ VarSetOps::AssignNoCopy(comp, bbVarTmp, VarSetOps::MakeEmpty(comp));
+ VarSetOps::AssignNoCopy(comp, bbLiveIn, VarSetOps::MakeEmpty(comp));
VarSetOps::AssignNoCopy(comp, bbLiveOut, VarSetOps::MakeEmpty(comp));
- VarSetOps::AssignNoCopy(comp, bbScope, VarSetOps::MakeEmpty(comp));
+ VarSetOps::AssignNoCopy(comp, bbScope, VarSetOps::MakeEmpty(comp));
- bbHeapUse = false;
- bbHeapDef = false;
- bbHeapLiveIn = false;
+ bbHeapUse = false;
+ bbHeapDef = false;
+ bbHeapLiveIn = false;
bbHeapLiveOut = false;
}
// Returns true if the basic block ends with GT_JMP
-inline bool BasicBlock::endsWithJmpMethod(Compiler *comp)
+inline bool BasicBlock::endsWithJmpMethod(Compiler* comp)
{
if (comp->compJmpOpUsed && (bbJumpKind == BBJ_RETURN) && (bbFlags & BBF_HAS_JMP))
{
@@ -4755,12 +4584,12 @@ inline bool BasicBlock::endsWithJmpMethod(Compiler *comp)
// Params:
// comp - Compiler instance
// fastTailCallsOnly - Only consider fast tail calls excluding tail calls via helper.
-inline bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp,
- bool fastTailCallsOnly /*=false*/)
+inline bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/)
{
- GenTreePtr tailCall = nullptr;
- bool tailCallsConvertibleToLoopOnly = false;
- return endsWithJmpMethod(comp) || endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, &tailCall);
+ GenTreePtr tailCall = nullptr;
+ bool tailCallsConvertibleToLoopOnly = false;
+ return endsWithJmpMethod(comp) ||
+ endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, &tailCall);
}
//------------------------------------------------------------------------------
@@ -4779,10 +4608,13 @@ inline bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp,
// Notes:
// At most one of fastTailCallsOnly and tailCallsConvertibleToLoopOnly flags can be true.
-inline bool BasicBlock::endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall)
+inline bool BasicBlock::endsWithTailCall(Compiler* comp,
+ bool fastTailCallsOnly,
+ bool tailCallsConvertibleToLoopOnly,
+ GenTree** tailCall)
{
assert(!fastTailCallsOnly || !tailCallsConvertibleToLoopOnly);
- *tailCall = nullptr;
+ *tailCall = nullptr;
bool result = false;
// Is this a tail call?
@@ -4850,14 +4682,14 @@ inline bool BasicBlock::endsWithTailCall(Compiler* comp, bool fastTailCallsOnly,
inline bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall)
{
- bool fastTailCallsOnly = false;
+ bool fastTailCallsOnly = false;
bool tailCallsConvertibleToLoopOnly = true;
return endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, tailCall);
}
// Returns the last top level stmt of a given basic block.
// Returns nullptr if the block is empty.
-inline GenTreePtr Compiler::fgGetLastTopLevelStmt(BasicBlock *block)
+inline GenTreePtr Compiler::fgGetLastTopLevelStmt(BasicBlock* block)
{
// Return if the block is empty
if (block->bbTreeList == nullptr)
@@ -4870,11 +4702,8 @@ inline GenTreePtr Compiler::fgGetLastTopLevelStmt(BasicBlock *block)
inline GenTreeBlkOp* Compiler::gtCloneCpObjNode(GenTreeCpObj* source)
{
- GenTreeCpObj* result = new (this, GT_COPYOBJ) GenTreeCpObj(source->gtGcPtrCount,
- source->gtSlots,
- source->gtGcPtrs);
- gtBlockOpInit(result, GT_COPYOBJ, source->Dest(), source->Source(),
- source->ClsTok(), source->IsVolatile());
+ GenTreeCpObj* result = new (this, GT_COPYOBJ) GenTreeCpObj(source->gtGcPtrCount, source->gtSlots, source->gtGcPtrs);
+ gtBlockOpInit(result, GT_COPYOBJ, source->Dest(), source->Source(), source->ClsTok(), source->IsVolatile());
return result;
}
diff --git a/src/jit/compilerbitsettraits.h b/src/jit/compilerbitsettraits.h
index 29881d4c9b..4365c518d7 100644
--- a/src/jit/compilerbitsettraits.h
+++ b/src/jit/compilerbitsettraits.h
@@ -11,12 +11,12 @@
#include "bitsetasshortlong.h"
///////////////////////////////////////////////////////////////////////////////
-//
+//
// CompAllocBitSetTraits: a base class for other BitSet traits classes.
-//
+//
// The classes in this file define "BitSetTraits" arguments to the "BitSetOps" type, ones that assume that
// Compiler* is the "Env" type.
-//
+//
// This class just captures the compiler's allocator as an IAllocator.
//
class CompAllocBitSetTraits
@@ -30,14 +30,14 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
-//
+//
// TrackedVarBitSetTraits
-//
+//
// This class is customizes the bit set to represent sets of tracked local vars.
// The size of the bitset is determined by the # of tracked locals (up to some internal
// maximum), and the Compiler* tracks the tracked local epochs.
//
-class TrackedVarBitSetTraits: public CompAllocBitSetTraits
+class TrackedVarBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
@@ -50,16 +50,16 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
-//
+//
// AllVarBitSetTraits
-//
+//
// This class is customizes the bit set to represent sets of all local vars (tracked or not) --
// at least up to some maximum index. (This index is private to the Compiler, and it is
// the responsibility of the compiler not to use indices >= this maximum.)
// We rely on the fact that variables are never deleted, and therefore use the
// total # of locals as the epoch number (up to the maximum).
//
-class AllVarBitSetTraits: public CompAllocBitSetTraits
+class AllVarBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
@@ -72,9 +72,9 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
-//
+//
// BasicBlockBitSetTraits
-//
+//
// This class is customizes the bit set to represent sets of BasicBlocks.
// The size of the bitset is determined by maximum assigned BasicBlock number
// (Compiler::fgBBNumMax) (Note that fgBBcount is not equal to this during inlining,
@@ -83,7 +83,7 @@ public:
// Thus, if you only care about the inlinee, during inlining, this bit set will waste
// the lower numbered block bits.) The Compiler* tracks the BasicBlock epochs.
//
-class BasicBlockBitSetTraits: public CompAllocBitSetTraits
+class BasicBlockBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
@@ -96,23 +96,21 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
-//
+//
// BitVecTraits
-//
+//
// This class simplifies creation and usage of "ShortLong" bitsets.
//
struct BitVecTraits
{
private:
- unsigned size;
+ unsigned size;
Compiler* comp;
public:
-
- BitVecTraits(unsigned size, Compiler* comp)
- : size(size)
- , comp(comp)
- { }
+ BitVecTraits(unsigned size, Compiler* comp) : size(size), comp(comp)
+ {
+ }
static inline IAllocator* GetAllocator(BitVecTraits* b);
diff --git a/src/jit/compilerbitsettraits.hpp b/src/jit/compilerbitsettraits.hpp
index 1b543951d8..e2ba2f8a7a 100644
--- a/src/jit/compilerbitsettraits.hpp
+++ b/src/jit/compilerbitsettraits.hpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef CompilerBitSetTraits_HPP_DEFINED
#define CompilerBitSetTraits_HPP_DEFINED 1
@@ -10,19 +9,19 @@
#include "compiler.h"
///////////////////////////////////////////////////////////////////////////////
-//
+//
// CompAllocBitSetTraits
-//
+//
///////////////////////////////////////////////////////////////////////////////
-// static
+// static
IAllocator* CompAllocBitSetTraits::GetAllocator(Compiler* comp)
{
return comp->getAllocatorBitset();
}
#ifdef DEBUG
-// static
+// static
IAllocator* CompAllocBitSetTraits::GetDebugOnlyAllocator(Compiler* comp)
{
return comp->getAllocatorDebugOnly();
@@ -30,9 +29,9 @@ IAllocator* CompAllocBitSetTraits::GetDebugOnlyAllocator(Compiler* comp)
#endif // DEBUG
///////////////////////////////////////////////////////////////////////////////
-//
+//
// TrackedVarBitSetTraits
-//
+//
///////////////////////////////////////////////////////////////////////////////
// static
@@ -48,7 +47,7 @@ unsigned TrackedVarBitSetTraits::GetArrSize(Compiler* comp, unsigned elemSize)
return comp->lvaTrackedCountInSizeTUnits;
}
-// static
+// static
unsigned TrackedVarBitSetTraits::GetEpoch(Compiler* comp)
{
return comp->GetCurLVEpoch();
@@ -60,14 +59,14 @@ BitSetSupport::BitSetOpCounter* TrackedVarBitSetTraits::GetOpCounter(Compiler* c
#if VARSET_COUNTOPS
return &Compiler::m_varsetOpCounter;
#else
- return NULL;
+ return nullptr;
#endif
}
///////////////////////////////////////////////////////////////////////////////
-//
+//
// AllVarBitSetTraits
-//
+//
///////////////////////////////////////////////////////////////////////////////
// static
@@ -82,7 +81,7 @@ unsigned AllVarBitSetTraits::GetArrSize(Compiler* comp, unsigned elemSize)
return unsigned(roundUp(GetSize(comp), elemSize));
}
-// static
+// static
unsigned AllVarBitSetTraits::GetEpoch(Compiler* comp)
{
return GetSize(comp);
@@ -94,14 +93,14 @@ BitSetSupport::BitSetOpCounter* AllVarBitSetTraits::GetOpCounter(Compiler* comp)
#if ALLVARSET_COUNTOPS
return &Compiler::m_allvarsetOpCounter;
#else
- return NULL;
+ return nullptr;
#endif
}
///////////////////////////////////////////////////////////////////////////////
-//
+//
// BasicBlockBitSetTraits
-//
+//
///////////////////////////////////////////////////////////////////////////////
// static
@@ -118,10 +117,10 @@ unsigned BasicBlockBitSetTraits::GetArrSize(Compiler* comp, unsigned elemSize)
assert(GetEpoch(comp) != 0);
assert(elemSize == sizeof(size_t));
- return comp->fgBBSetCountInSizeTUnits; // This is precomputed to avoid doing math every time this function is called
+ return comp->fgBBSetCountInSizeTUnits; // This is precomputed to avoid doing math every time this function is called
}
-// static
+// static
unsigned BasicBlockBitSetTraits::GetEpoch(Compiler* comp)
{
return comp->GetCurBasicBlockEpoch();
@@ -130,13 +129,13 @@ unsigned BasicBlockBitSetTraits::GetEpoch(Compiler* comp)
// static
BitSetSupport::BitSetOpCounter* BasicBlockBitSetTraits::GetOpCounter(Compiler* comp)
{
- return NULL;
+ return nullptr;
}
///////////////////////////////////////////////////////////////////////////////
-//
+//
// BitVecTraits
-//
+//
///////////////////////////////////////////////////////////////////////////////
// static
@@ -164,7 +163,7 @@ unsigned BitVecTraits::GetArrSize(BitVecTraits* b, unsigned elemSize)
{
assert(elemSize == sizeof(size_t));
unsigned elemBits = 8 * elemSize;
- return (unsigned) roundUp(b->size, elemBits)/elemBits;
+ return (unsigned)roundUp(b->size, elemBits) / elemBits;
}
// static
@@ -176,7 +175,7 @@ unsigned BitVecTraits::GetEpoch(BitVecTraits* b)
// static
BitSetSupport::BitSetOpCounter* BitVecTraits::GetOpCounter(BitVecTraits* b)
{
- return NULL;
+ return nullptr;
}
#endif // CompilerBitSetTraits_HPP_DEFINED
diff --git a/src/jit/compmemkind.h b/src/jit/compmemkind.h
index 1e18d516f8..e27d2071f7 100644
--- a/src/jit/compmemkind.h
+++ b/src/jit/compmemkind.h
@@ -4,7 +4,7 @@
/*****************************************************************************/
#ifndef CompMemKindMacro
-#error Define CompMemKindMacro before including this file.
+#error Define CompMemKindMacro before including this file.
#endif
// This list of macro invocations should be used to define the CompMemKind enumeration,
diff --git a/src/jit/copyprop.cpp b/src/jit/copyprop.cpp
index f8b75c5a64..bf714f0963 100644
--- a/src/jit/copyprop.cpp
+++ b/src/jit/copyprop.cpp
@@ -21,8 +21,7 @@
#include "ssabuilder.h"
template <typename T>
-inline
-static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 1)
+inline static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 1)
{
return jitstd::allocator<T>(alloc).allocate(count);
}
@@ -62,7 +61,6 @@ void Compiler::optBlockCopyPropPopStacks(BasicBlock* block, LclNumToGenTreePtrSt
}
}
-
/*******************************************************************************************************
*
* Given the "lclVar" and "copyVar" compute if the copy prop will be beneficial.
@@ -108,11 +106,10 @@ int Compiler::optCopyProp_LclVarScore(LclVarDsc* lclVarDsc, LclVarDsc* copyVarDs
}
#endif
- // Otherwise we prefer to use the op2LclNum
+ // Otherwise we prefer to use the op2LclNum
return score + ((preferOp2) ? 1 : -1);
}
-
/**************************************************************************************
*
* Perform copy propagation on a given tree as we walk the graph and if it is a local
@@ -164,7 +161,7 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
{
continue;
}
-
+
// Skip variables with assignments embedded in the statement (i.e., with a comma). Because we
// are not currently updating their SSA names as live in the copy-prop pass of the stmt.
if (VarSetOps::IsMember(this, optCopyPropKillSet, lvaTable[newLclNum].lvVarIndex))
@@ -176,7 +173,8 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
{
continue;
}
- if (gsShadowVarInfo != NULL && lvaTable[newLclNum].lvIsParam && gsShadowVarInfo[newLclNum].shadowCopy == lclNum)
+ if (gsShadowVarInfo != nullptr && lvaTable[newLclNum].lvIsParam &&
+ gsShadowVarInfo[newLclNum].shadowCopy == lclNum)
{
continue;
}
@@ -205,7 +203,7 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
// else
// x1 = 2
// print(c) <-- x is not live here. Let's say 'c' shares the value number with "x0."
- //
+ //
// If we simply substituted 'c' with "x0", we would be wrong. Ideally, there would be a phi
// node x2 = phi(x0, x1) which can then be used to substitute 'c' with. But because of pruning
// there would be no such phi node. To solve this we'll check if 'x' is live, before replacing
@@ -231,7 +229,7 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
}
}
unsigned newSsaNum = SsaConfig::RESERVED_SSA_NUM;
- if (op->gtFlags & (GTF_VAR_DEF|GTF_VAR_USEDEF))
+ if (op->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEDEF))
{
newSsaNum = GetSsaNumForLocalVarDef(op);
}
@@ -253,7 +251,7 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
printf(" V%02d @%08X by ", lclNum, tree->GetVN(VNK_Conservative));
printTreeID(op);
printf(" V%02d @%08X.\n", newLclNum, op->GetVN(VNK_Conservative));
- gtDispTree(tree, 0, NULL, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
@@ -265,7 +263,7 @@ void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree,
if (verbose)
{
printf("copy propagated to:\n");
- gtDispTree(tree, 0, NULL, true);
+ gtDispTree(tree, nullptr, nullptr, true);
}
#endif
break;
@@ -294,7 +292,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
JITDUMP("Copy Assertion for BB%02u\n", block->bbNum);
// There are no definitions at the start of the block. So clear it.
- compCurLifeTree = NULL;
+ compCurLifeTree = nullptr;
VarSetOps::Assign(this, compCurLife, block->bbLiveIn);
for (GenTreePtr stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
@@ -303,7 +301,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
// Walk the tree to find if any local variable can be replaced with current live definitions.
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
- compUpdateLife</*ForCodeGen*/false>(tree);
+ compUpdateLife</*ForCodeGen*/ false>(tree);
optCopyProp(block, stmt, tree, curSsaName);
// TODO-Review: Merge this loop with the following loop to correctly update the
@@ -313,7 +311,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
// 2. The subsequent loop maintains a stack for each lclNum with
// currently active SSA numbers when definitions are encountered.
//
- // If there is an embedded definition using a "comma" in a stmt, then the currently
+ // If there is an embedded definition using a "comma" in a stmt, then the currently
// live SSA number will get updated only in the next loop (2). However, this new
// definition is now supposed to be live (on tos). If we did not update the stacks
// using (2), copy prop (1) will use a SSA num defined outside the stmt ignoring the
@@ -350,7 +348,7 @@ void Compiler::optBlockCopyProp(BasicBlock* block, LclNumToGenTreePtrStack* curS
// If we encounter first use of a param or this pointer add it as a live definition.
// Since they are always live, do it only once.
else if ((tree->gtOper == GT_LCL_VAR) && !(tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) &&
- (lvaTable[lclNum].lvIsParam || lvaTable[lclNum].lvVerTypeInfo.IsThisPtr()))
+ (lvaTable[lclNum].lvIsParam || lvaTable[lclNum].lvVerTypeInfo.IsThisPtr()))
{
GenTreePtrStack* stack;
if (!curSsaName->Lookup(lclNum, &stack))
@@ -412,12 +410,11 @@ void Compiler::optVnCopyProp()
struct BlockWork
{
BasicBlock* m_blk;
- bool m_processed;
+ bool m_processed;
- BlockWork(BasicBlock* blk, bool processed = false)
- : m_blk(blk)
- , m_processed(processed)
- {}
+ BlockWork(BasicBlock* blk, bool processed = false) : m_blk(blk), m_processed(processed)
+ {
+ }
};
typedef jitstd::vector<BlockWork> BlockWorkStack;
@@ -427,7 +424,8 @@ void Compiler::optVnCopyProp()
// The map from lclNum to its recently live definitions as a stack.
LclNumToGenTreePtrStack curSsaName(getAllocator());
- BlockWorkStack* worklist = new (allocate_any<BlockWorkStack>(allocator), jitstd::placement_t()) BlockWorkStack(allocator);
+ BlockWorkStack* worklist =
+ new (allocate_any<BlockWorkStack>(allocator), jitstd::placement_t()) BlockWorkStack(allocator);
worklist->push_back(BlockWork(fgFirstBB));
while (!worklist->empty())
@@ -463,4 +461,3 @@ void Compiler::optVnCopyProp()
// Destroy (release) the varset.
VarSetOps::AssignNoCopy(this, compCurLife, VarSetOps::UninitVal());
}
-
diff --git a/src/jit/dataflow.h b/src/jit/dataflow.h
index d95e72baae..c9803a0cc1 100644
--- a/src/jit/dataflow.h
+++ b/src/jit/dataflow.h
@@ -18,7 +18,6 @@
#include "compiler.h"
#include "jitstd.h"
-
class DataFlow
{
private:
@@ -72,7 +71,7 @@ void DataFlow::ForwardAnalysis(TCallback& callback)
if (callback.EndMerge(block))
{
AllSuccessorIter succsBegin = block->GetAllSuccs(m_pCompiler).begin();
- AllSuccessorIter succsEnd = block->GetAllSuccs(m_pCompiler).end();
+ AllSuccessorIter succsEnd = block->GetAllSuccs(m_pCompiler).end();
for (AllSuccessorIter succ = succsBegin; succ != succsEnd; ++succ)
{
worklist.insert(worklist.end(), *succ);
@@ -80,4 +79,3 @@ void DataFlow::ForwardAnalysis(TCallback& callback)
}
}
}
-
diff --git a/src/jit/decomposelongs.cpp b/src/jit/decomposelongs.cpp
index 4af97efe2e..5e18c49212 100644
--- a/src/jit/decomposelongs.cpp
+++ b/src/jit/decomposelongs.cpp
@@ -48,7 +48,6 @@ void DecomposeLongs::PrepareForDecomposition()
m_compiler->lvaPromoteLongVars();
}
-
//------------------------------------------------------------------------
// DecomposeBlock: Do LONG decomposition to all the statements in the given block.
// This must be done before lowering the block, as decomposition can insert
@@ -81,7 +80,6 @@ void DecomposeLongs::DecomposeBlock(BasicBlock* block)
}
}
-
//------------------------------------------------------------------------
// DecomposeStmt: Do LONG decomposition to a statement tree.
//
@@ -100,7 +98,6 @@ void DecomposeLongs::DecomposeStmt(GenTreeStmt* stmt)
m_compiler->compCurStmt = savedStmt;
}
-
//------------------------------------------------------------------------
// DecompNodeHelper: fgWalkTreePost callback helper for LONG decomposition
//
@@ -119,7 +116,6 @@ Compiler::fgWalkResult DecomposeLongs::DecompNodeHelper(GenTree** ppTree, Compil
return Compiler::WALK_CONTINUE;
}
-
//------------------------------------------------------------------------
// DecomposeNode: Decompose long-type trees into lower and upper halves.
//
@@ -143,7 +139,8 @@ void DecomposeLongs::DecomposeNode(GenTree** ppTree, Compiler::fgWalkData* data)
#ifdef DEBUG
if (m_compiler->verbose)
{
- printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted half:\n");
+ printf("Changing implicit reference to lo half of long lclVar to an explicit reference of its promoted "
+ "half:\n");
m_compiler->gtDispTree(tree);
}
#endif // DEBUG
@@ -170,112 +167,112 @@ void DecomposeLongs::DecomposeNode(GenTree** ppTree, Compiler::fgWalkData* data)
switch (tree->OperGet())
{
- case GT_PHI:
- case GT_PHI_ARG:
- break;
-
- case GT_LCL_VAR:
- DecomposeLclVar(ppTree, data);
- break;
-
- case GT_LCL_FLD:
- DecomposeLclFld(ppTree, data);
- break;
-
- case GT_STORE_LCL_VAR:
- DecomposeStoreLclVar(ppTree, data);
- break;
-
- case GT_CAST:
- DecomposeCast(ppTree, data);
- break;
-
- case GT_CNS_LNG:
- DecomposeCnsLng(ppTree, data);
- break;
-
- case GT_CALL:
- DecomposeCall(ppTree, data);
- break;
-
- case GT_RETURN:
- assert(tree->gtOp.gtOp1->OperGet() == GT_LONG);
- break;
-
- case GT_STOREIND:
- DecomposeStoreInd(ppTree, data);
- break;
-
- case GT_STORE_LCL_FLD:
- assert(tree->gtOp.gtOp1->OperGet() == GT_LONG);
- NYI("st.lclFld of of TYP_LONG");
- break;
-
- case GT_IND:
- DecomposeInd(ppTree, data);
- break;
-
- case GT_NOT:
- DecomposeNot(ppTree, data);
- break;
-
- case GT_NEG:
- DecomposeNeg(ppTree, data);
- break;
-
- // Binary operators. Those that require different computation for upper and lower half are
- // handled by the use of GetHiOper().
- case GT_ADD:
- case GT_SUB:
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- DecomposeArith(ppTree, data);
- break;
-
- case GT_MUL:
- NYI("Arithmetic binary operators on TYP_LONG - GT_MUL");
- break;
-
- case GT_DIV:
- NYI("Arithmetic binary operators on TYP_LONG - GT_DIV");
- break;
-
- case GT_MOD:
- NYI("Arithmetic binary operators on TYP_LONG - GT_MOD");
- break;
-
- case GT_UDIV:
- NYI("Arithmetic binary operators on TYP_LONG - GT_UDIV");
- break;
-
- case GT_UMOD:
- NYI("Arithmetic binary operators on TYP_LONG - GT_UMOD");
- break;
-
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- NYI("Arithmetic binary operators on TYP_LONG - SHIFT");
- break;
-
- case GT_ROL:
- case GT_ROR:
- NYI("Arithmetic binary operators on TYP_LONG - ROTATE");
- break;
-
- case GT_MULHI:
- NYI("Arithmetic binary operators on TYP_LONG - MULHI");
- break;
-
- case GT_LOCKADD:
- case GT_XADD:
- case GT_XCHG:
- case GT_CMPXCHG:
- NYI("Interlocked operations on TYP_LONG");
- break;
-
- default:
+ case GT_PHI:
+ case GT_PHI_ARG:
+ break;
+
+ case GT_LCL_VAR:
+ DecomposeLclVar(ppTree, data);
+ break;
+
+ case GT_LCL_FLD:
+ DecomposeLclFld(ppTree, data);
+ break;
+
+ case GT_STORE_LCL_VAR:
+ DecomposeStoreLclVar(ppTree, data);
+ break;
+
+ case GT_CAST:
+ DecomposeCast(ppTree, data);
+ break;
+
+ case GT_CNS_LNG:
+ DecomposeCnsLng(ppTree, data);
+ break;
+
+ case GT_CALL:
+ DecomposeCall(ppTree, data);
+ break;
+
+ case GT_RETURN:
+ assert(tree->gtOp.gtOp1->OperGet() == GT_LONG);
+ break;
+
+ case GT_STOREIND:
+ DecomposeStoreInd(ppTree, data);
+ break;
+
+ case GT_STORE_LCL_FLD:
+ assert(tree->gtOp.gtOp1->OperGet() == GT_LONG);
+ NYI("st.lclFld of of TYP_LONG");
+ break;
+
+ case GT_IND:
+ DecomposeInd(ppTree, data);
+ break;
+
+ case GT_NOT:
+ DecomposeNot(ppTree, data);
+ break;
+
+ case GT_NEG:
+ DecomposeNeg(ppTree, data);
+ break;
+
+ // Binary operators. Those that require different computation for upper and lower half are
+ // handled by the use of GetHiOper().
+ case GT_ADD:
+ case GT_SUB:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ DecomposeArith(ppTree, data);
+ break;
+
+ case GT_MUL:
+ NYI("Arithmetic binary operators on TYP_LONG - GT_MUL");
+ break;
+
+ case GT_DIV:
+ NYI("Arithmetic binary operators on TYP_LONG - GT_DIV");
+ break;
+
+ case GT_MOD:
+ NYI("Arithmetic binary operators on TYP_LONG - GT_MOD");
+ break;
+
+ case GT_UDIV:
+ NYI("Arithmetic binary operators on TYP_LONG - GT_UDIV");
+ break;
+
+ case GT_UMOD:
+ NYI("Arithmetic binary operators on TYP_LONG - GT_UMOD");
+ break;
+
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ NYI("Arithmetic binary operators on TYP_LONG - SHIFT");
+ break;
+
+ case GT_ROL:
+ case GT_ROR:
+ NYI("Arithmetic binary operators on TYP_LONG - ROTATE");
+ break;
+
+ case GT_MULHI:
+ NYI("Arithmetic binary operators on TYP_LONG - MULHI");
+ break;
+
+ case GT_LOCKADD:
+ case GT_XADD:
+ case GT_XCHG:
+ case GT_CMPXCHG:
+ NYI("Interlocked operations on TYP_LONG");
+ break;
+
+ default:
{
JITDUMP("Illegal TYP_LONG node %s in Decomposition.", GenTree::NodeName(tree->OperGet()));
noway_assert(!"Illegal TYP_LONG node in Decomposition.");
@@ -291,7 +288,6 @@ void DecomposeLongs::DecomposeNode(GenTree** ppTree, Compiler::fgWalkData* data)
}
#endif
}
-
//------------------------------------------------------------------------
// FinalizeDecomposition: A helper function to finalize LONG decomposition by
@@ -307,7 +303,10 @@ void DecomposeLongs::DecomposeNode(GenTree** ppTree, Compiler::fgWalkData* data)
// Return Value:
// None.
//
-void DecomposeLongs::FinalizeDecomposition(GenTree** ppTree, Compiler::fgWalkData* data, GenTree* loResult, GenTree* hiResult)
+void DecomposeLongs::FinalizeDecomposition(GenTree** ppTree,
+ Compiler::fgWalkData* data,
+ GenTree* loResult,
+ GenTree* hiResult)
{
assert(ppTree != nullptr);
assert(*ppTree != nullptr);
@@ -328,7 +327,6 @@ void DecomposeLongs::FinalizeDecomposition(GenTree** ppTree, Compiler::fgWalkDat
*ppTree = newTree;
}
-
//------------------------------------------------------------------------
// DecomposeLclVar: Decompose GT_LCL_VAR.
//
@@ -346,13 +344,13 @@ void DecomposeLongs::DecomposeLclVar(GenTree** ppTree, Compiler::fgWalkData* dat
assert(data != nullptr);
assert((*ppTree)->OperGet() == GT_LCL_VAR);
- GenTree* tree = *ppTree;
- unsigned varNum = tree->AsLclVarCommon()->gtLclNum;
+ GenTree* tree = *ppTree;
+ unsigned varNum = tree->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = m_compiler->lvaTable + varNum;
m_compiler->lvaDecRefCnts(tree);
GenTree* loResult = tree;
- loResult->gtType = TYP_INT;
+ loResult->gtType = TYP_INT;
GenTree* hiResult = m_compiler->gtNewLclLNode(varNum, TYP_INT);
if (varDsc->lvPromoted)
@@ -368,11 +366,11 @@ void DecomposeLongs::DecomposeLclVar(GenTree** ppTree, Compiler::fgWalkData* dat
noway_assert(varDsc->lvLRACandidate == false);
loResult->SetOper(GT_LCL_FLD);
- loResult->AsLclFld()->gtLclOffs = 0;
+ loResult->AsLclFld()->gtLclOffs = 0;
loResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField();
hiResult->SetOper(GT_LCL_FLD);
- hiResult->AsLclFld()->gtLclOffs = 4;
+ hiResult->AsLclFld()->gtLclOffs = 4;
hiResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField();
}
@@ -382,7 +380,6 @@ void DecomposeLongs::DecomposeLclVar(GenTree** ppTree, Compiler::fgWalkData* dat
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
-
//------------------------------------------------------------------------
// DecomposeLclFld: Decompose GT_LCL_FLD.
//
@@ -400,18 +397,15 @@ void DecomposeLongs::DecomposeLclFld(GenTree** ppTree, Compiler::fgWalkData* dat
assert(data != nullptr);
assert((*ppTree)->OperGet() == GT_LCL_FLD);
- GenTree* tree = *ppTree;
+ GenTree* tree = *ppTree;
GenTreeLclFld* loResult = tree->AsLclFld();
- loResult->gtType = TYP_INT;
+ loResult->gtType = TYP_INT;
- GenTree* hiResult = m_compiler->gtNewLclFldNode(loResult->gtLclNum,
- TYP_INT,
- loResult->gtLclOffs + 4);
+ GenTree* hiResult = m_compiler->gtNewLclFldNode(loResult->gtLclNum, TYP_INT, loResult->gtLclOffs + 4);
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
-
//------------------------------------------------------------------------
// DecomposeStoreLclVar: Decompose GT_STORE_LCL_VAR.
//
@@ -432,9 +426,9 @@ void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData
GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
- GenTree* tree = *ppTree;
+ GenTree* tree = *ppTree;
GenTree* nextTree = tree->gtNext;
- GenTree* rhs = tree->gtGetOp1();
+ GenTree* rhs = tree->gtGetOp1();
if ((rhs->OperGet() == GT_PHI) || (rhs->OperGet() == GT_CALL))
{
// GT_CALLs are not decomposed, so will not be converted to GT_LONG
@@ -443,12 +437,12 @@ void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData
}
noway_assert(rhs->OperGet() == GT_LONG);
- unsigned varNum = tree->AsLclVarCommon()->gtLclNum;
+ unsigned varNum = tree->AsLclVarCommon()->gtLclNum;
LclVarDsc* varDsc = m_compiler->lvaTable + varNum;
m_compiler->lvaDecRefCnts(tree);
- GenTree* loRhs = rhs->gtGetOp1();
- GenTree* hiRhs = rhs->gtGetOp2();
+ GenTree* loRhs = rhs->gtGetOp1();
+ GenTree* hiRhs = rhs->gtGetOp2();
GenTree* hiStore = m_compiler->gtNewLclLNode(varNum, TYP_INT);
if (varDsc->lvPromoted)
@@ -466,19 +460,19 @@ void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData
noway_assert(varDsc->lvLRACandidate == false);
tree->SetOper(GT_STORE_LCL_FLD);
- tree->AsLclFld()->gtLclOffs = 0;
+ tree->AsLclFld()->gtLclOffs = 0;
tree->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField();
hiStore->SetOper(GT_STORE_LCL_FLD);
- hiStore->AsLclFld()->gtLclOffs = 4;
+ hiStore->AsLclFld()->gtLclOffs = 4;
hiStore->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField();
}
tree->gtOp.gtOp1 = loRhs;
- tree->gtType = TYP_INT;
+ tree->gtType = TYP_INT;
loRhs->gtNext = tree;
- tree->gtPrev = loRhs;
+ tree->gtPrev = loRhs;
hiStore->gtOp.gtOp1 = hiRhs;
hiStore->CopyCosts(tree);
@@ -487,9 +481,9 @@ void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData
m_compiler->lvaIncRefCnts(tree);
m_compiler->lvaIncRefCnts(hiStore);
- tree->gtNext = hiRhs;
- hiRhs->gtPrev = tree;
- hiRhs->gtNext = hiStore;
+ tree->gtNext = hiRhs;
+ hiRhs->gtPrev = tree;
+ hiRhs->gtNext = hiStore;
hiStore->gtPrev = hiRhs;
hiStore->gtNext = nextTree;
if (nextTree != nullptr)
@@ -501,14 +495,13 @@ void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData
bool isEmbeddedStmt = !curStmt->gtStmtIsTopLevel();
if (!isEmbeddedStmt)
{
- tree->gtNext = nullptr;
+ tree->gtNext = nullptr;
hiRhs->gtPrev = nullptr;
}
InsertNodeAsStmt(hiStore);
}
-
//------------------------------------------------------------------------
// DecomposeCast: Decompose GT_CAST.
//
@@ -527,37 +520,36 @@ void DecomposeLongs::DecomposeCast(GenTree** ppTree, Compiler::fgWalkData* data)
assert((*ppTree)->OperGet() == GT_CAST);
assert(m_compiler->compCurStmt != nullptr);
- GenTree* tree = *ppTree;
- GenTree* loResult = nullptr;
- GenTree* hiResult = nullptr;
- GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
+ GenTree* tree = *ppTree;
+ GenTree* loResult = nullptr;
+ GenTree* hiResult = nullptr;
+ GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
assert(tree->gtPrev == tree->gtGetOp1());
NYI_IF(tree->gtOverflow(), "TYP_LONG cast with overflow");
switch (tree->AsCast()->CastFromType())
{
- case TYP_INT:
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- loResult = tree->gtGetOp1();
- hiResult = new (m_compiler, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0);
- m_compiler->fgSnipNode(curStmt, tree);
- }
- else
- {
- NYI("Lowering of signed cast TYP_INT->TYP_LONG");
- }
- break;
+ case TYP_INT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ loResult = tree->gtGetOp1();
+ hiResult = new (m_compiler, GT_CNS_INT) GenTreeIntCon(TYP_INT, 0);
+ m_compiler->fgSnipNode(curStmt, tree);
+ }
+ else
+ {
+ NYI("Lowering of signed cast TYP_INT->TYP_LONG");
+ }
+ break;
- default:
- NYI("Unimplemented type for Lowering of cast to TYP_LONG");
- break;
+ default:
+ NYI("Unimplemented type for Lowering of cast to TYP_LONG");
+ break;
}
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
-
//------------------------------------------------------------------------
// DecomposeCnsLng: Decompose GT_CNS_LNG.
//
@@ -575,8 +567,8 @@ void DecomposeLongs::DecomposeCnsLng(GenTree** ppTree, Compiler::fgWalkData* dat
assert(data != nullptr);
assert((*ppTree)->OperGet() == GT_CNS_LNG);
- GenTree* tree = *ppTree;
- INT32 hiVal = tree->AsLngCon()->HiVal();
+ GenTree* tree = *ppTree;
+ INT32 hiVal = tree->AsLngCon()->HiVal();
GenTree* loResult = tree;
loResult->ChangeOperConst(GT_CNS_INT);
@@ -587,7 +579,6 @@ void DecomposeLongs::DecomposeCnsLng(GenTree** ppTree, Compiler::fgWalkData* dat
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
-
//------------------------------------------------------------------------
// DecomposeCall: Decompose GT_CALL.
//
@@ -629,17 +620,17 @@ void DecomposeLongs::DecomposeCall(GenTree** ppTree, Compiler::fgWalkData* data)
}
// Otherwise, we need to force var = call()
- GenTree* tree = *ppTree;
+ GenTree* tree = *ppTree;
GenTree** treePtr = nullptr;
- parent = tree->gtGetParent(&treePtr);
+ parent = tree->gtGetParent(&treePtr);
assert(treePtr != nullptr);
- GenTreeStmt* asgStmt = m_compiler->fgInsertEmbeddedFormTemp(treePtr);
- GenTree* stLclVar = asgStmt->gtStmtExpr;
+ GenTreeStmt* asgStmt = m_compiler->fgInsertEmbeddedFormTemp(treePtr);
+ GenTree* stLclVar = asgStmt->gtStmtExpr;
assert(stLclVar->OperIsLocalStore());
- unsigned varNum = stLclVar->AsLclVarCommon()->gtLclNum;
+ unsigned varNum = stLclVar->AsLclVarCommon()->gtLclNum;
m_compiler->lvaTable[varNum].lvIsMultiRegRet = true;
m_compiler->fgFixupIfCallArg(data->parentStack, tree, *treePtr);
@@ -647,7 +638,6 @@ void DecomposeLongs::DecomposeCall(GenTree** ppTree, Compiler::fgWalkData* data)
DecomposeNode(treePtr, data);
}
-
//------------------------------------------------------------------------
// DecomposeStoreInd: Decompose GT_STOREIND.
//
@@ -669,8 +659,8 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
assert(tree->gtOp.gtOp2->OperGet() == GT_LONG);
- GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
- bool isEmbeddedStmt = !curStmt->gtStmtIsTopLevel();
+ GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
+ bool isEmbeddedStmt = !curStmt->gtStmtIsTopLevel();
// Example input trees (a nested embedded statement case)
//
@@ -685,8 +675,8 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
// | | { | | { * stmtExpr void (embedded) (IL ???... ???)
// | | { | | { | /--* lclFld long V01 arg1 u:2[+8] Fseq[i] $380
// | | { | | { \--* st.lclVar long (P) V21 cse8
- // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0
- // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1
+ // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0
+ // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1
// | | { | | /--* lclVar int V22 rat0 $380
// | | { | | +--* lclVar int V23 rat1
// | | { | +--* gt_long long
@@ -700,8 +690,8 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
// (editor brace matching compensation: }}}}}}}}}}}}}}}}}})
GenTree* linkBegin = m_compiler->fgGetFirstNode(tree)->gtPrev;
- GenTree* linkEnd = tree->gtNext;
- GenTree* gtLong = tree->gtOp.gtOp2;
+ GenTree* linkEnd = tree->gtNext;
+ GenTree* gtLong = tree->gtOp.gtOp2;
// Save address to a temp. It is used in storeIndLow and storeIndHigh trees.
GenTreeStmt* addrStmt = CreateTemporary(&tree->gtOp.gtOp1);
@@ -723,7 +713,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
}
// Example trees after embedded statements for address and data are added.
- // This example saves all address and data trees into temp variables
+ // This example saves all address and data trees into temp variables
// to show how those embedded statements are created.
//
// * stmtExpr void (top level) (IL ???... ???)
@@ -758,13 +748,13 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
//
// (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}})
- GenTree* addrBase = tree->gtOp.gtOp1;
- GenTree* dataHigh = gtLong->gtOp.gtOp2;
- GenTree* dataLow = gtLong->gtOp.gtOp1;
+ GenTree* addrBase = tree->gtOp.gtOp1;
+ GenTree* dataHigh = gtLong->gtOp.gtOp2;
+ GenTree* dataLow = gtLong->gtOp.gtOp1;
GenTree* storeIndLow = tree;
// Rewrite storeIndLow tree to save only lower 32-bit data.
- //
+ //
// | | { | /--* lclVar byref V24 rat2 (address)
// ...
// | | { | +--* lclVar int V25 rat3 (lower 32-bit data)
@@ -778,7 +768,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
m_compiler->fgSnipNode(curStmt, gtLong);
m_compiler->fgSnipNode(curStmt, dataHigh);
storeIndLow->gtOp.gtOp2 = dataLow;
- storeIndLow->gtType = TYP_INT;
+ storeIndLow->gtType = TYP_INT;
// Construct storeIndHigh tree
//
@@ -790,10 +780,11 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
//
// (editor brace matching compensation: }}}}})
- GenTree* addrBaseHigh = new(m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR,
- addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
- GenTree* addrHigh = new(m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
- GenTree* storeIndHigh = new(m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh);
+ GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR)
+ GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
+ GenTree* addrHigh =
+ new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
+ GenTree* storeIndHigh = new (m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh);
storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK));
storeIndHigh->gtFlags |= GTF_REVERSE_OPS;
storeIndHigh->CopyCosts(storeIndLow);
@@ -804,7 +795,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
SimpleLinkNodeAfter(dataHigh, addrBaseHigh);
SimpleLinkNodeAfter(addrBaseHigh, addrHigh);
SimpleLinkNodeAfter(addrHigh, storeIndHigh);
-
+
// External links of storeIndHigh tree
// dataHigh->gtPrev = nullptr;
if (isEmbeddedStmt)
@@ -812,7 +803,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
// If storeIndTree is an embedded statement, connect storeIndLow
// and dataHigh
storeIndLow->gtNext = dataHigh;
- dataHigh->gtPrev = storeIndLow;
+ dataHigh->gtPrev = storeIndLow;
}
storeIndHigh->gtNext = linkEnd;
if (linkEnd != nullptr)
@@ -822,7 +813,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
InsertNodeAsStmt(storeIndHigh);
- // Example final output
+ // Example final output
//
// * stmtExpr void (top level) (IL ???... ???)
// | /--* argPlace ref $280
@@ -839,8 +830,8 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
// | | { | | { | +--* lclFld int V01 arg1 [+12]
// | | { | | { | /--* gt_long long
// | | { | | { \--* st.lclVar long (P) V21 cse8
- // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0
- // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1
+ // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0
+ // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1
// | | { | | { * stmtExpr void (embedded) (IL ???... ???)
// | | { | | { | /--* lclVar int V22 rat0 $380
// | | { | | { \--* st.lclVar int V25 rat3
@@ -865,7 +856,6 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
// (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}})
}
-
//------------------------------------------------------------------------
// DecomposeInd: Decompose GT_IND.
//
@@ -877,7 +867,7 @@ void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* d
//
void DecomposeLongs::DecomposeInd(GenTree** ppTree, Compiler::fgWalkData* data)
{
- GenTreePtr indLow = *ppTree;
+ GenTreePtr indLow = *ppTree;
GenTreeStmt* addrStmt = CreateTemporary(&indLow->gtOp.gtOp1);
JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n");
DISPTREE(addrStmt);
@@ -886,12 +876,13 @@ void DecomposeLongs::DecomposeInd(GenTree** ppTree, Compiler::fgWalkData* data)
indLow->gtType = TYP_INT;
// Create tree of ind(addr+4)
- GenTreePtr addrBase = indLow->gtGetOp1();
- GenTreePtr addrBaseHigh = new(m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR,
- addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
- GenTreePtr addrHigh = new(m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
+ GenTreePtr addrBase = indLow->gtGetOp1();
+ GenTreePtr addrBaseHigh = new (m_compiler, GT_LCL_VAR)
+ GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
+ GenTreePtr addrHigh =
+ new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
GenTreePtr indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr);
-
+
// Connect linear links
SimpleLinkNodeAfter(addrBaseHigh, addrHigh);
SimpleLinkNodeAfter(addrHigh, indHigh);
@@ -920,21 +911,21 @@ void DecomposeLongs::DecomposeNot(GenTree** ppTree, Compiler::fgWalkData* data)
GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
GenTree* tree = *ppTree;
- GenTree* op1 = tree->gtGetOp1();
+ GenTree* op1 = tree->gtGetOp1();
noway_assert(op1->OperGet() == GT_LONG);
GenTree* loOp1 = op1->gtGetOp1();
GenTree* hiOp1 = op1->gtGetOp2();
m_compiler->fgSnipNode(curStmt, op1);
- GenTree* loResult = tree;
- loResult->gtType = TYP_INT;
+ GenTree* loResult = tree;
+ loResult->gtType = TYP_INT;
loResult->gtOp.gtOp1 = loOp1;
- loOp1->gtNext = loResult;
- loResult->gtPrev = loOp1;
+ loOp1->gtNext = loResult;
+ loResult->gtPrev = loOp1;
GenTree* hiResult = new (m_compiler, GT_NOT) GenTreeOp(GT_NOT, TYP_INT, hiOp1, nullptr);
- hiOp1->gtNext = hiResult;
- hiResult->gtPrev = hiOp1;
+ hiOp1->gtNext = hiResult;
+ hiResult->gtPrev = hiOp1;
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
@@ -958,8 +949,8 @@ void DecomposeLongs::DecomposeNeg(GenTree** ppTree, Compiler::fgWalkData* data)
assert(m_compiler->compCurStmt != nullptr);
GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
- GenTree* tree = *ppTree;
- GenTree* op1 = tree->gtGetOp1();
+ GenTree* tree = *ppTree;
+ GenTree* op1 = tree->gtGetOp1();
noway_assert(op1->OperGet() == GT_LONG);
CreateTemporary(&(op1->gtOp.gtOp1));
@@ -970,11 +961,11 @@ void DecomposeLongs::DecomposeNeg(GenTree** ppTree, Compiler::fgWalkData* data)
GenTree* hiOp1 = op1->gtGetOp2();
Compiler::fgSnipNode(curStmt, op1);
- GenTree* loResult = tree;
- loResult->gtType = TYP_INT;
+ GenTree* loResult = tree;
+ loResult->gtType = TYP_INT;
loResult->gtOp.gtOp1 = loOp1;
- GenTree* zero = m_compiler->gtNewZeroConNode(TYP_INT);
+ GenTree* zero = m_compiler->gtNewZeroConNode(TYP_INT);
GenTree* hiAdjust = m_compiler->gtNewOperNode(GT_ADD_HI, TYP_INT, hiOp1, zero);
GenTree* hiResult = m_compiler->gtNewOperNode(GT_NEG, TYP_INT, hiAdjust);
hiResult->gtFlags = tree->gtFlags;
@@ -1008,14 +999,10 @@ void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data
assert(m_compiler->compCurStmt != nullptr);
GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt();
- GenTree* tree = *ppTree;
- genTreeOps oper = tree->OperGet();
+ GenTree* tree = *ppTree;
+ genTreeOps oper = tree->OperGet();
- assert((oper == GT_ADD) ||
- (oper == GT_SUB) ||
- (oper == GT_OR) ||
- (oper == GT_XOR) ||
- (oper == GT_AND));
+ assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND));
NYI_IF((tree->gtFlags & GTF_REVERSE_OPS) != 0, "Binary operator with GTF_REVERSE_OPS");
@@ -1042,15 +1029,11 @@ void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data
// requires a unique high operator, and the child nodes are not simple locals (e.g.,
// they are decomposed nodes), then we also can't decompose the node, as we aren't
// guaranteed the high and low parts will be executed immediately after each other.
-
- NYI_IF(hiOp1->OperIsHigh() ||
- hiOp2->OperIsHigh() ||
- (GenTree::OperIsHigh(GetHiOper(oper)) &&
- (!loOp1->OperIsLeaf() ||
- !hiOp1->OperIsLeaf() ||
- !loOp1->OperIsLeaf() ||
- !hiOp2->OperIsLeaf())),
- "Can't decompose expression tree TYP_LONG node");
+
+ NYI_IF(hiOp1->OperIsHigh() || hiOp2->OperIsHigh() ||
+ (GenTree::OperIsHigh(GetHiOper(oper)) &&
+ (!loOp1->OperIsLeaf() || !hiOp1->OperIsLeaf() || !loOp1->OperIsLeaf() || !hiOp2->OperIsLeaf())),
+ "Can't decompose expression tree TYP_LONG node");
// Now, remove op1 and op2 from the node list.
m_compiler->fgSnipNode(curStmt, op1);
@@ -1060,7 +1043,7 @@ void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data
// will be the lo halves of op1 from above.
GenTree* loResult = tree;
loResult->SetOper(GetLoOper(loResult->OperGet()));
- loResult->gtType = TYP_INT;
+ loResult->gtType = TYP_INT;
loResult->gtOp.gtOp1 = loOp1;
loResult->gtOp.gtOp2 = loOp2;
@@ -1088,7 +1071,7 @@ void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data
loResult->gtPrev = loOp2;
// Next, reorder the hiOps and the hiResult.
- GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2);
+ GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2);
hiOp1->gtNext = hiOp2First;
hiOp2First->gtPrev = hiOp1;
hiOp2->gtNext = hiResult;
@@ -1110,7 +1093,6 @@ void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data
FinalizeDecomposition(ppTree, data, loResult, hiResult);
}
-
//------------------------------------------------------------------------
// CreateTemporary: call fgInsertEmbeddedFormTemp to replace *ppTree with
// a new temp that is assigned to the value previously at *ppTree by inserting
@@ -1129,9 +1111,8 @@ GenTreeStmt* DecomposeLongs::CreateTemporary(GenTree** ppTree)
GenTreeStmt* newStmt = m_compiler->fgInsertEmbeddedFormTemp(ppTree);
if (newStmt->gtStmtIsTopLevel())
{
- for (GenTreeStmt* nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded();
- nextEmbeddedStmt != nullptr;
- nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
+ for (GenTreeStmt* nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded(); nextEmbeddedStmt != nullptr;
+ nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
{
DecomposeStmt(nextEmbeddedStmt);
}
@@ -1140,7 +1121,6 @@ GenTreeStmt* DecomposeLongs::CreateTemporary(GenTree** ppTree)
return newStmt;
}
-
//------------------------------------------------------------------------
// InsertNodeAsStmt: Insert a node as the root node of a new statement.
// If the current statement is embedded, the new statement will also be
@@ -1198,7 +1178,6 @@ void DecomposeLongs::InsertNodeAsStmt(GenTree* node)
#endif // DEBUG
}
-
//------------------------------------------------------------------------
// GetHiOper: Convert arithmetic operator to "high half" operator of decomposed node.
//
@@ -1213,21 +1192,36 @@ genTreeOps DecomposeLongs::GetHiOper(genTreeOps oper)
{
switch (oper)
{
- case GT_ADD: return GT_ADD_HI; break;
- case GT_SUB: return GT_SUB_HI; break;
- case GT_MUL: return GT_MUL_HI; break;
- case GT_DIV: return GT_DIV_HI; break;
- case GT_MOD: return GT_MOD_HI; break;
- case GT_OR: return GT_OR; break;
- case GT_AND: return GT_AND; break;
- case GT_XOR: return GT_XOR; break;
- default:
- assert(!"GetHiOper called for invalid oper");
- return GT_NONE;
+ case GT_ADD:
+ return GT_ADD_HI;
+ break;
+ case GT_SUB:
+ return GT_SUB_HI;
+ break;
+ case GT_MUL:
+ return GT_MUL_HI;
+ break;
+ case GT_DIV:
+ return GT_DIV_HI;
+ break;
+ case GT_MOD:
+ return GT_MOD_HI;
+ break;
+ case GT_OR:
+ return GT_OR;
+ break;
+ case GT_AND:
+ return GT_AND;
+ break;
+ case GT_XOR:
+ return GT_XOR;
+ break;
+ default:
+ assert(!"GetHiOper called for invalid oper");
+ return GT_NONE;
}
}
-
//------------------------------------------------------------------------
// GetLoOper: Convert arithmetic operator to "low half" operator of decomposed node.
//
@@ -1242,18 +1236,27 @@ genTreeOps DecomposeLongs::GetLoOper(genTreeOps oper)
{
switch (oper)
{
- case GT_ADD: return GT_ADD_LO; break;
- case GT_SUB: return GT_SUB_LO; break;
- case GT_OR: return GT_OR; break;
- case GT_AND: return GT_AND; break;
- case GT_XOR: return GT_XOR; break;
- default:
- assert(!"GetLoOper called for invalid oper");
- return GT_NONE;
+ case GT_ADD:
+ return GT_ADD_LO;
+ break;
+ case GT_SUB:
+ return GT_SUB_LO;
+ break;
+ case GT_OR:
+ return GT_OR;
+ break;
+ case GT_AND:
+ return GT_AND;
+ break;
+ case GT_XOR:
+ return GT_XOR;
+ break;
+ default:
+ assert(!"GetLoOper called for invalid oper");
+ return GT_NONE;
}
}
-
//------------------------------------------------------------------------
// SimpleLinkNodeAfter: insert a node after a given node in the execution order.
// NOTE: Does not support inserting after the last node of a statement, which
@@ -1275,9 +1278,9 @@ void DecomposeLongs::SimpleLinkNodeAfter(GenTree* insertionPoint, GenTree* node)
assert(insertionPoint != nullptr);
assert(node != nullptr);
- GenTree* nextTree = insertionPoint->gtNext;
- node->gtPrev = insertionPoint;
- node->gtNext = nextTree;
+ GenTree* nextTree = insertionPoint->gtNext;
+ node->gtPrev = insertionPoint;
+ node->gtNext = nextTree;
insertionPoint->gtNext = node;
if (nextTree != nullptr)
{
@@ -1285,6 +1288,5 @@ void DecomposeLongs::SimpleLinkNodeAfter(GenTree* insertionPoint, GenTree* node)
}
}
-
#endif // !_TARGET_64BIT_
#endif // !LEGACY_BACKEND
diff --git a/src/jit/decomposelongs.h b/src/jit/decomposelongs.h
index 778e6de244..fc02950f49 100644
--- a/src/jit/decomposelongs.h
+++ b/src/jit/decomposelongs.h
@@ -19,17 +19,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class DecomposeLongs
{
public:
-
- DecomposeLongs(Compiler* compiler)
- : m_compiler(compiler)
+ DecomposeLongs(Compiler* compiler) : m_compiler(compiler)
{
}
void PrepareForDecomposition();
void DecomposeBlock(BasicBlock* block);
-
-private:
+private:
// Driver functions
static Compiler::fgWalkResult DecompNodeHelper(GenTree** ppTree, Compiler::fgWalkData* data);
void DecomposeStmt(GenTreeStmt* stmt);
diff --git a/src/jit/delayload.cpp b/src/jit/delayload.cpp
index cb39112159..895a13a6bf 100644
--- a/src/jit/delayload.cpp
+++ b/src/jit/delayload.cpp
@@ -8,4 +8,3 @@
#include "shimload.h"
ExternC PfnDliHook __pfnDliNotifyHook = ShimDelayLoadHook;
-
diff --git a/src/jit/disasm.cpp b/src/jit/disasm.cpp
index e7030b309e..925f2c3343 100644
--- a/src/jit/disasm.cpp
+++ b/src/jit/disasm.cpp
@@ -6,7 +6,7 @@
* File: dis.cpp
*
-*
+*
* File Comments:
*
* This file handles disassembly. It is adapted from the MS linker.
@@ -19,46 +19,65 @@
#endif
/*****************************************************************************/
-#ifdef LATE_DISASM
+#ifdef LATE_DISASM
/*****************************************************************************/
// Define DISASM_DEBUG to get verbose output of late disassembler inner workings.
//#define DISASM_DEBUG
#ifdef DISASM_DEBUG
#ifdef DEBUG
-#define DISASM_DUMP(...) if (VERBOSE) printf(__VA_ARGS__)
+#define DISASM_DUMP(...) \
+ if (VERBOSE) \
+ printf(__VA_ARGS__)
#else // !DEBUG
#define DISASM_DUMP(...) printf(__VA_ARGS__)
#endif // !DEBUG
-#else // !DISASM_DEBUG
+#else // !DISASM_DEBUG
#define DISASM_DUMP(...)
#endif // !DISASM_DEBUG
/*****************************************************************************/
-#define MAX_CLASSNAME_LENGTH 1024
+#define MAX_CLASSNAME_LENGTH 1024
#if defined(_AMD64_)
-#pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatAddr@DIS@@QEBA_K_KPEAG0@Z=__imp_?CchFormatAddr@DIS@@QEBA_K_KPEA_W0@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatInstr@DIS@@QEBA_KPEAG_K@Z=__imp_?CchFormatInstr@DIS@@QEBA_KPEA_W_K@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchaddrSet@DIS@@QEAAP6A_KPEBV1@_KPEAG1PEA_K@ZP6A_K01213@Z@Z=__imp_?PfncchaddrSet@DIS@@QEAAP6A_KPEBV1@_KPEA_W1PEA_K@ZP6A_K01213@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchregSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@PEAG_K@ZP6A_K0123@Z@Z=__imp_?PfncchregSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@PEA_W_K@ZP6A_K0123@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchregrelSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@KPEAG_KPEAK@ZP6A_K01K234@Z@Z=__imp_?PfncchregrelSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@KPEA_W_KPEAK@ZP6A_K01K234@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEAG1PEA_K@ZP6A_K011213@Z@Z=__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEA_W1PEA_K@ZP6A_K011213@Z@Z")
+#pragma comment(linker, \
+ "/ALTERNATENAME:__imp_?CchFormatAddr@DIS@@QEBA_K_KPEAG0@Z=__imp_?CchFormatAddr@DIS@@QEBA_K_KPEA_W0@Z")
+#pragma comment(linker, \
+ "/ALTERNATENAME:__imp_?CchFormatInstr@DIS@@QEBA_KPEAG_K@Z=__imp_?CchFormatInstr@DIS@@QEBA_KPEA_W_K@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchaddrSet@DIS@@QEAAP6A_KPEBV1@_KPEAG1PEA_K@ZP6A_K01213@Z@Z=__imp_?PfncchaddrSet@DIS@@QEAAP6A_KPEBV1@_KPEA_W1PEA_K@ZP6A_K01213@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchregSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@PEAG_K@ZP6A_K0123@Z@Z=__imp_?PfncchregSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@PEA_W_K@ZP6A_K0123@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchregrelSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@KPEAG_KPEAK@ZP6A_K01K234@Z@Z=__imp_?PfncchregrelSet@DIS@@QEAAP6A_KPEBV1@W4REGA@1@KPEA_W_KPEAK@ZP6A_K01K234@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEAG1PEA_K@ZP6A_K011213@Z@Z=__imp_?PfncchfixupSet@DIS@@QEAAP6A_KPEBV1@_K1PEA_W1PEA_K@ZP6A_K011213@Z@Z")
#elif defined(_X86_)
#pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatAddr@DIS@@QBEI_KPAGI@Z=__imp_?CchFormatAddr@DIS@@QBEI_KPA_WI@Z")
#pragma comment(linker, "/ALTERNATENAME:__imp_?CchFormatInstr@DIS@@QBEIPAGI@Z=__imp_?CchFormatInstr@DIS@@QBEIPA_WI@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchaddrSet@DIS@@QAEP6GIPBV1@_KPAGIPA_K@ZP6GI012I3@Z@Z=__imp_?PfncchaddrSet@DIS@@QAEP6GIPBV1@_KPA_WIPA_K@ZP6GI012I3@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchregSet@DIS@@QAEP6GIPBV1@W4REGA@1@PAGI@ZP6GI012I@Z@Z=__imp_?PfncchregSet@DIS@@QAEP6GIPBV1@W4REGA@1@PA_WI@ZP6GI012I@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchregrelSet@DIS@@QAEP6GIPBV1@W4REGA@1@KPAGIPAK@ZP6GI01K2I3@Z@Z=__imp_?PfncchregrelSet@DIS@@QAEP6GIPBV1@W4REGA@1@KPA_WIPAK@ZP6GI01K2I3@Z@Z")
-#pragma comment(linker, "/ALTERNATENAME:__imp_?PfncchfixupSet@DIS@@QAEP6GIPBV1@_KIPAGIPA_K@ZP6GI01I2I3@Z@Z=__imp_?PfncchfixupSet@DIS@@QAEP6GIPBV1@_KIPA_WIPA_K@ZP6GI01I2I3@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchaddrSet@DIS@@QAEP6GIPBV1@_KPAGIPA_K@ZP6GI012I3@Z@Z=__imp_?PfncchaddrSet@DIS@@QAEP6GIPBV1@_KPA_WIPA_K@ZP6GI012I3@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchregSet@DIS@@QAEP6GIPBV1@W4REGA@1@PAGI@ZP6GI012I@Z@Z=__imp_?PfncchregSet@DIS@@QAEP6GIPBV1@W4REGA@1@PA_WI@ZP6GI012I@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchregrelSet@DIS@@QAEP6GIPBV1@W4REGA@1@KPAGIPAK@ZP6GI01K2I3@Z@Z=__imp_?PfncchregrelSet@DIS@@QAEP6GIPBV1@W4REGA@1@KPA_WIPAK@ZP6GI01K2I3@Z@Z")
+#pragma comment( \
+ linker, \
+ "/ALTERNATENAME:__imp_?PfncchfixupSet@DIS@@QAEP6GIPBV1@_KIPAGIPA_K@ZP6GI01I2I3@Z@Z=__imp_?PfncchfixupSet@DIS@@QAEP6GIPBV1@_KIPA_WIPA_K@ZP6GI01I2I3@Z@Z")
#endif
-
/*****************************************************************************
* Given an absolute address from the beginning of the code
* find the corresponding emitter block and the relative offset
@@ -70,14 +89,15 @@
// These structs were defined in emit.h. Fake them here so DisAsm.cpp can compile
typedef struct codeFix
-{ codeFix * cfNext;
+{
+ codeFix* cfNext;
unsigned cfFixup;
-}
- * codeFixPtr;
+} * codeFixPtr;
typedef struct codeBlk
-{ codeFix * cbFixupLst; }
- * codeBlkPtr;
+{
+ codeFix* cbFixupLst;
+} * codeBlkPtr;
/*****************************************************************************
* The following is the callback for jump label and direct function calls fixups.
@@ -88,26 +108,16 @@ typedef struct codeBlk
*/
/* static */
-size_t __stdcall DisAssembler::disCchAddr (const DIS* pdis,
- DIS::ADDR addr,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp)
+size_t __stdcall DisAssembler::disCchAddr(
+ const DIS* pdis, DIS::ADDR addr, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp)
{
- DisAssembler * pDisAsm = (DisAssembler *) pdis->PvClient();
+ DisAssembler* pDisAsm = (DisAssembler*)pdis->PvClient();
assert(pDisAsm);
- return pDisAsm->disCchAddrMember(pdis,
- addr,
- wz,
- cchMax,
- pdwDisp);
+ return pDisAsm->disCchAddrMember(pdis, addr, wz, cchMax, pdwDisp);
}
-size_t DisAssembler::disCchAddrMember (const DIS* pdis,
- DIS::ADDR addr,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp)
+size_t DisAssembler::disCchAddrMember(
+ const DIS* pdis, DIS::ADDR addr, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp)
{
/* First check the termination type of the instruction
* because this might be a helper or static function call
@@ -125,85 +135,83 @@ size_t DisAssembler::disCchAddrMember (const DIS* pdis,
{
// int disCallSize;
- case DISX86::trmtaJmpShort:
- case DISX86::trmtaJmpCcShort:
+ case DISX86::trmtaJmpShort:
+ case DISX86::trmtaJmpCcShort:
- /* We have a short jump in the current code block - generate the label to which we jump */
+ /* We have a short jump in the current code block - generate the label to which we jump */
- assert(0 <= disTarget &&
- disTarget < disTotalCodeSize);
- swprintf_s(wz, cchMax, W("short L_%02u"), disLabels[disTarget]);
- retval = 1;
- break;
+ assert(0 <= disTarget && disTarget < disTotalCodeSize);
+ swprintf_s(wz, cchMax, W("short L_%02u"), disLabels[disTarget]);
+ retval = 1;
+ break;
- case DISX86::trmtaJmpNear:
- case DISX86::trmtaJmpCcNear:
+ case DISX86::trmtaJmpNear:
+ case DISX86::trmtaJmpCcNear:
- /* We have a near jump. Check if is in the current code block.
- * Otherwise we have no target for it. */
+ /* We have a near jump. Check if is in the current code block.
+ * Otherwise we have no target for it. */
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
- {
- swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
- retval = 1;
- }
- break;
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
+ {
+ swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
+ retval = 1;
+ }
+ break;
- case DISX86::trmtaCallNear16:
- case DISX86::trmtaCallNear32:
+ case DISX86::trmtaCallNear16:
+ case DISX86::trmtaCallNear32:
- /* check for local calls (i.e. CALL label) */
+ /* check for local calls (i.e. CALL label) */
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
- {
- /* not a "call ds:[0000]" - go ahead */
- /* disTarget within block boundary -> local call */
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
+ {
+ /* not a "call ds:[0000]" - go ahead */
+ /* disTarget within block boundary -> local call */
- swprintf_s(wz, cchMax, W("short L_%02u"), disLabels[disTarget]);
- retval = 1;
- break;
- }
+ swprintf_s(wz, cchMax, W("short L_%02u"), disLabels[disTarget]);
+ retval = 1;
+ break;
+ }
- /* this is a near call - in our case usually VM helper functions */
+ /* this is a near call - in our case usually VM helper functions */
- /* find the emitter block and the offset of the call fixup */
- /* for the fixup offset we have to add the opcode size for the call - in the case of a near call is 1 */
+ /* find the emitter block and the offset of the call fixup */
+ /* for the fixup offset we have to add the opcode size for the call - in the case of a near call is 1 */
- // disCallSize = 1;
+ // disCallSize = 1;
- {
- size_t absoluteTarget = (size_t)disGetLinearAddr(disTarget);
- const char* name = disGetMethodFullName(absoluteTarget);
- if (name != nullptr)
{
- swprintf_s(wz, cchMax, W("%p %S"), dspAddr(absoluteTarget), name);
- retval = 1;
- break;
+ size_t absoluteTarget = (size_t)disGetLinearAddr(disTarget);
+ const char* name = disGetMethodFullName(absoluteTarget);
+ if (name != nullptr)
+ {
+ swprintf_s(wz, cchMax, W("%p %S"), dspAddr(absoluteTarget), name);
+ retval = 1;
+ break;
+ }
}
- }
- break;
+ break;
#ifdef _TARGET_AMD64_
- case DISX86::trmtaFallThrough:
+ case DISX86::trmtaFallThrough:
- /* memory indirect case. Could be for an LEA for the base address of a switch table, which is an arbitrary address, currently of the first block after the prolog. */
+ /* memory indirect case. Could be for an LEA for the base address of a switch table, which is an arbitrary
+ * address, currently of the first block after the prolog. */
- /* find the emitter block and the offset for the fixup
- * "addr" is the address of the immediate */
+ /* find the emitter block and the offset for the fixup
+ * "addr" is the address of the immediate */
- break;
+ break;
#endif // _TARGET_AMD64_
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
- break;
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
+ break;
}
#elif defined(_TARGET_ARM64_)
@@ -216,102 +224,99 @@ size_t DisAssembler::disCchAddrMember (const DIS* pdis,
{
// int disCallSize;
- case DISARM64::TRMTA::trmtaBra:
- case DISARM64::TRMTA::trmtaBraCase:
- case DISARM64::TRMTA::trmtaBraCc:
- case DISARM64::TRMTA::trmtaBraCcCase:
- case DISARM64::TRMTA::trmtaBraCcInd:
- case DISARM64::TRMTA::trmtaBraInd:
-
- /* We have a jump. Check if is in the current code block.
- * Otherwise we have no target for it. */
-
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
- {
- swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
- retval = 1;
- }
- break;
-
- case DISARM64::trmtaCall:
- case DISARM64::trmtaCallCc:
- case DISARM64::trmtaCallCcInd:
- case DISARM64::trmtaCallInd:
-
- /* check for local calls (i.e. CALL label) */
+ case DISARM64::TRMTA::trmtaBra:
+ case DISARM64::TRMTA::trmtaBraCase:
+ case DISARM64::TRMTA::trmtaBraCc:
+ case DISARM64::TRMTA::trmtaBraCcCase:
+ case DISARM64::TRMTA::trmtaBraCcInd:
+ case DISARM64::TRMTA::trmtaBraInd:
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
- {
- /* not a "call [0000]" - go ahead */
- /* disTarget within block boundary -> local call */
+ /* We have a jump. Check if is in the current code block.
+ * Otherwise we have no target for it. */
- swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
- retval = 1;
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
+ {
+ swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
+ retval = 1;
+ }
break;
- }
-
- /* this is a near call - in our case usually VM helper functions */
- /* find the emitter block and the offset of the call fixup */
- /* for the fixup offset we have to add the opcode size for the call - in the case of a near call is 1 */
+ case DISARM64::trmtaCall:
+ case DISARM64::trmtaCallCc:
+ case DISARM64::trmtaCallCcInd:
+ case DISARM64::trmtaCallInd:
- // disCallSize = 1;
+ /* check for local calls (i.e. CALL label) */
- {
- size_t absoluteTarget = (size_t)disGetLinearAddr(disTarget);
- const char* name = disGetMethodFullName(absoluteTarget);
- if (name != nullptr)
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
{
- swprintf_s(wz, cchMax, W("%p %S"), dspAddr(absoluteTarget), name);
+ /* not a "call [0000]" - go ahead */
+ /* disTarget within block boundary -> local call */
+
+ swprintf_s(wz, cchMax, W("L_%02u"), disLabels[disTarget]);
retval = 1;
break;
}
- }
- break;
+ /* this is a near call - in our case usually VM helper functions */
- case DISARM64::trmtaFallThrough:
-
- /* memory indirect case. Could be for an LEA for the base address of a switch table, which is an arbitrary address, currently of the first block after the prolog. */
+ /* find the emitter block and the offset of the call fixup */
+ /* for the fixup offset we have to add the opcode size for the call - in the case of a near call is 1 */
- /* find the emitter block and the offset for the fixup
- * "addr" is the address of the immediate */
+ // disCallSize = 1;
- {
- DIS::INSTRUCTION instr;
- DIS::OPERAND ops[DISARM64::coperandMax];
- bool ok = pdis->FDecode(&instr, ops, ArrLen(ops));
- if (ok)
{
- bool isAddress = false;
- switch ((DISARM64::OPA)instr.opa)
+ size_t absoluteTarget = (size_t)disGetLinearAddr(disTarget);
+ const char* name = disGetMethodFullName(absoluteTarget);
+ if (name != nullptr)
{
- case DISARM64::opaAdr:
- case DISARM64::opaAdrp:
- isAddress = true;
- break;
- default:
+ swprintf_s(wz, cchMax, W("%p %S"), dspAddr(absoluteTarget), name);
+ retval = 1;
break;
}
+ }
+
+ break;
- if (isAddress &&
- 0 <= addr &&
- addr < disTotalCodeSize)
+ case DISARM64::trmtaFallThrough:
+
+ /* memory indirect case. Could be for an LEA for the base address of a switch table, which is an arbitrary
+ * address, currently of the first block after the prolog. */
+
+ /* find the emitter block and the offset for the fixup
+ * "addr" is the address of the immediate */
+
+ {
+ DIS::INSTRUCTION instr;
+ DIS::OPERAND ops[DISARM64::coperandMax];
+ bool ok = pdis->FDecode(&instr, ops, ArrLen(ops));
+ if (ok)
{
- swprintf_s(wz, cchMax, W("L_%02u"), disLabels[addr]);
- retval = 1;
+ bool isAddress = false;
+ switch ((DISARM64::OPA)instr.opa)
+ {
+ case DISARM64::opaAdr:
+ case DISARM64::opaAdrp:
+ isAddress = true;
+ break;
+ default:
+ break;
+ }
+
+ if (isAddress && 0 <= addr && addr < disTotalCodeSize)
+ {
+ swprintf_s(wz, cchMax, W("L_%02u"), disLabels[addr]);
+ retval = 1;
+ }
}
}
- }
- break;
+ break;
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
- break;
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
+ break;
}
#else // _TARGET_*
@@ -335,8 +340,6 @@ size_t DisAssembler::disCchAddrMember (const DIS* pdis,
return retval;
}
-
-
/*****************************************************************************
* We annotate some instructions to get info needed to display the symbols
* for that instruction.
@@ -345,235 +348,224 @@ size_t DisAssembler::disCchAddrMember (const DIS* pdis,
*/
/* static */
-size_t __stdcall DisAssembler::disCchFixup (const DIS* pdis,
- DIS::ADDR addr,
- size_t size,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp)
+size_t __stdcall DisAssembler::disCchFixup(
+ const DIS* pdis, DIS::ADDR addr, size_t size, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp)
{
- DisAssembler * pDisAsm = (DisAssembler *) pdis->PvClient();
+ DisAssembler* pDisAsm = (DisAssembler*)pdis->PvClient();
assert(pDisAsm);
- return pDisAsm->disCchFixupMember(pdis,
- addr,
- size,
- wz,
- cchMax,
- pdwDisp);
+ return pDisAsm->disCchFixupMember(pdis, addr, size, wz, cchMax, pdwDisp);
}
-size_t DisAssembler::disCchFixupMember (const DIS* pdis,
- DIS::ADDR addr,
- size_t size,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp)
+size_t DisAssembler::disCchFixupMember(
+ const DIS* pdis, DIS::ADDR addr, size_t size, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp)
{
#if defined(_TARGET_XARCH_)
DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta());
- //DIS::ADDR disIndAddr;
+ // DIS::ADDR disIndAddr;
+
+ DISASM_DUMP("FixupMember %016I64X (%08IX), size %d, termType %u\n", addr, disGetLinearAddr((size_t)addr), size,
+ terminationType);
- DISASM_DUMP("FixupMember %016I64X (%08IX), size %d, termType %u\n", addr, disGetLinearAddr((size_t)addr), size, terminationType);
-
// Is there a relocation registered for the address?
size_t absoluteAddr = (size_t)disGetLinearAddr((size_t)addr);
size_t targetAddr;
- bool anyReloc = GetRelocationMap()->Lookup(absoluteAddr, &targetAddr);
+ bool anyReloc = GetRelocationMap()->Lookup(absoluteAddr, &targetAddr);
switch (terminationType)
{
DIS::ADDR disCallSize;
- case DISX86::trmtaFallThrough:
+ case DISX86::trmtaFallThrough:
- /* memory indirect case */
+ /* memory indirect case */
- assert(addr > pdis->Addr());
+ assert(addr > pdis->Addr());
- /* find the emitter block and the offset for the fixup
- * "addr" is the address of the immediate */
+ /* find the emitter block and the offset for the fixup
+ * "addr" is the address of the immediate */
- if (anyReloc)
- {
- // Make instructions like "mov rcx, 7FE8247A638h" diffable.
- swprintf_s(wz, cchMax, W("%IXh"), dspAddr(targetAddr));
- break;
- }
+ if (anyReloc)
+ {
+ // Make instructions like "mov rcx, 7FE8247A638h" diffable.
+ swprintf_s(wz, cchMax, W("%IXh"), dspAddr(targetAddr));
+ break;
+ }
- return 0;
+ return 0;
- case DISX86::trmtaJmpInd:
+ case DISX86::trmtaJmpInd:
- /* pretty rare case - something like "jmp [eax*4]"
- * not a function call or anything worth annotating */
+ /* pretty rare case - something like "jmp [eax*4]"
+ * not a function call or anything worth annotating */
- return 0;
+ return 0;
- case DISX86::trmtaTrap:
- case DISX86::trmtaTrapCc:
+ case DISX86::trmtaTrap:
+ case DISX86::trmtaTrapCc:
- /* some instructions like division have a TRAP termination type - ignore it */
+ /* some instructions like division have a TRAP termination type - ignore it */
- return 0;
+ return 0;
- case DISX86::trmtaJmpShort:
- case DISX86::trmtaJmpCcShort:
+ case DISX86::trmtaJmpShort:
+ case DISX86::trmtaJmpCcShort:
- case DISX86::trmtaJmpNear:
- case DISX86::trmtaJmpCcNear:
-
- /* these are treated by the CchAddr callback - skip them */
+ case DISX86::trmtaJmpNear:
+ case DISX86::trmtaJmpCcNear:
- return 0;
+ /* these are treated by the CchAddr callback - skip them */
- case DISX86::trmtaCallNear16:
- case DISX86::trmtaCallNear32:
-
- if (anyReloc)
- {
- const char* name = disGetMethodFullName(targetAddr);
- if (name != nullptr)
+ return 0;
+
+ case DISX86::trmtaCallNear16:
+ case DISX86::trmtaCallNear32:
+
+ if (anyReloc)
{
- swprintf_s(wz, cchMax, W("%p %S"), dspAddr(targetAddr), name);
- break;
+ const char* name = disGetMethodFullName(targetAddr);
+ if (name != nullptr)
+ {
+ swprintf_s(wz, cchMax, W("%p %S"), dspAddr(targetAddr), name);
+ break;
+ }
}
- }
- /* these are treated by the CchAddr callback - skip them */
+ /* these are treated by the CchAddr callback - skip them */
- return 0;
+ return 0;
- case DISX86::trmtaCallInd:
+ case DISX86::trmtaCallInd:
- /* here we have an indirect call - find the indirect address */
+ /* here we have an indirect call - find the indirect address */
- //BYTE * code = disGetLinearAddr((size_t)addr);
- //disIndAddr = (DIS::ADDR) (code+0);
+ // BYTE * code = disGetLinearAddr((size_t)addr);
+ // disIndAddr = (DIS::ADDR) (code+0);
- /* find the size of the call opcode - less the immediate */
- /* for the fixup offset we have to add the opcode size for the call */
- /* addr is the address of the immediate, pdis->Addr() returns the address of the disassembled instruction */
+ /* find the size of the call opcode - less the immediate */
+ /* for the fixup offset we have to add the opcode size for the call */
+ /* addr is the address of the immediate, pdis->Addr() returns the address of the disassembled instruction */
- assert(addr > pdis->Addr());
- disCallSize = addr - pdis->Addr();
+ assert(addr > pdis->Addr());
+ disCallSize = addr - pdis->Addr();
- /* find the emitter block and the offset of the call fixup */
+ /* find the emitter block and the offset of the call fixup */
- return 0;
+ return 0;
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
- break;
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
+ break;
}
#elif defined(_TARGET_ARM64_)
DISARM64::TRMTA terminationType = DISARM64::TRMTA(pdis->Trmta());
- //DIS::ADDR disIndAddr;
+ // DIS::ADDR disIndAddr;
+
+ DISASM_DUMP("FixupMember %016I64X (%08IX), size %d, termType %u\n", addr, disGetLinearAddr((size_t)addr), size,
+ terminationType);
- DISASM_DUMP("FixupMember %016I64X (%08IX), size %d, termType %u\n", addr, disGetLinearAddr((size_t)addr), size, terminationType);
-
// Is there a relocation registered for the address?
size_t absoluteAddr = (size_t)disGetLinearAddr((size_t)addr);
size_t targetAddr;
- bool anyReloc = GetRelocationMap()->Lookup(absoluteAddr, &targetAddr);
+ bool anyReloc = GetRelocationMap()->Lookup(absoluteAddr, &targetAddr);
switch (terminationType)
{
DIS::ADDR disCallSize;
- case DISARM64::TRMTA::trmtaUnknown:
- return 0;
+ case DISARM64::TRMTA::trmtaUnknown:
+ return 0;
- case DISARM64::TRMTA::trmtaFallThrough:
+ case DISARM64::TRMTA::trmtaFallThrough:
- if (anyReloc)
- {
- /* memory indirect case */
+ if (anyReloc)
+ {
+ /* memory indirect case */
- assert(addr > pdis->Addr());
+ assert(addr > pdis->Addr());
- /* find the emitter block and the offset for the fixup
- * "addr" is the address of the immediate */
+ /* find the emitter block and the offset for the fixup
+ * "addr" is the address of the immediate */
- // Make instructions like "mov rcx, 7FE8247A638h" diffable.
- swprintf_s(wz, cchMax, W("%IXh"), dspAddr(targetAddr));
- break;
- }
+ // Make instructions like "mov rcx, 7FE8247A638h" diffable.
+ swprintf_s(wz, cchMax, W("%IXh"), dspAddr(targetAddr));
+ break;
+ }
- return 0;
+ return 0;
- case DISARM64::TRMTA::trmtaBraInd:
- case DISARM64::TRMTA::trmtaBraCcInd:
+ case DISARM64::TRMTA::trmtaBraInd:
+ case DISARM64::TRMTA::trmtaBraCcInd:
- /* pretty rare case - something like "jmp [eax*4]"
- * not a function call or anything worth annotating */
+ /* pretty rare case - something like "jmp [eax*4]"
+ * not a function call or anything worth annotating */
- return 0;
+ return 0;
- case DISARM64::TRMTA::trmtaTrap:
- case DISARM64::TRMTA::trmtaTrapCc:
+ case DISARM64::TRMTA::trmtaTrap:
+ case DISARM64::TRMTA::trmtaTrapCc:
- /* some instructions like division have a TRAP termination type - ignore it */
+ /* some instructions like division have a TRAP termination type - ignore it */
- return 0;
+ return 0;
+
+ case DISARM64::TRMTA::trmtaBra:
+ case DISARM64::TRMTA::trmtaBraCase:
+ case DISARM64::TRMTA::trmtaBraCc:
+ case DISARM64::TRMTA::trmtaBraCcCase:
- case DISARM64::TRMTA::trmtaBra:
- case DISARM64::TRMTA::trmtaBraCase:
- case DISARM64::TRMTA::trmtaBraCc:
- case DISARM64::TRMTA::trmtaBraCcCase:
-
- /* these are treated by the CchAddr callback - skip them */
+ /* these are treated by the CchAddr callback - skip them */
- return 0;
+ return 0;
- case DISARM64::TRMTA::trmtaCall:
- case DISARM64::TRMTA::trmtaCallCc:
-
- if (anyReloc)
- {
- const char* name = disGetMethodFullName(targetAddr);
- if (name != nullptr)
+ case DISARM64::TRMTA::trmtaCall:
+ case DISARM64::TRMTA::trmtaCallCc:
+
+ if (anyReloc)
{
- swprintf_s(wz, cchMax, W("%p %S"), dspAddr(targetAddr), name);
- break;
+ const char* name = disGetMethodFullName(targetAddr);
+ if (name != nullptr)
+ {
+ swprintf_s(wz, cchMax, W("%p %S"), dspAddr(targetAddr), name);
+ break;
+ }
}
- }
- /* these are treated by the CchAddr callback - skip them */
+ /* these are treated by the CchAddr callback - skip them */
- return 0;
+ return 0;
- case DISARM64::TRMTA::trmtaCallInd:
- case DISARM64::TRMTA::trmtaCallCcInd:
+ case DISARM64::TRMTA::trmtaCallInd:
+ case DISARM64::TRMTA::trmtaCallCcInd:
- /* here we have an indirect call - find the indirect address */
+ /* here we have an indirect call - find the indirect address */
- //BYTE * code = disGetLinearAddr((size_t)addr);
- //disIndAddr = (DIS::ADDR) (code+0);
+ // BYTE * code = disGetLinearAddr((size_t)addr);
+ // disIndAddr = (DIS::ADDR) (code+0);
- /* find the size of the call opcode - less the immediate */
- /* for the fixup offset we have to add the opcode size for the call */
- /* addr is the address of the immediate, pdis->Addr() returns the address of the disassembled instruction */
+ /* find the size of the call opcode - less the immediate */
+ /* for the fixup offset we have to add the opcode size for the call */
+ /* addr is the address of the immediate, pdis->Addr() returns the address of the disassembled instruction */
- assert(addr > pdis->Addr());
- disCallSize = addr - pdis->Addr();
+ assert(addr > pdis->Addr());
+ disCallSize = addr - pdis->Addr();
- /* find the emitter block and the offset of the call fixup */
+ /* find the emitter block and the offset of the call fixup */
- return 0;
+ return 0;
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
- break;
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
+ break;
}
#else // _TARGET_*
@@ -587,8 +579,6 @@ size_t DisAssembler::disCchFixupMember (const DIS* pdis,
return 1;
}
-
-
/*****************************************************************************
* This the callback for register-relative operands in an instruction.
* If the register is ESP or EBP, the operand may be a local variable
@@ -598,129 +588,112 @@ size_t DisAssembler::disCchFixupMember (const DIS* pdis,
*/
/* static */
-size_t __stdcall DisAssembler::disCchRegRel (const DIS* pdis,
- DIS::REGA reg,
- DWORD disp,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORD* pdwDisp)
+size_t __stdcall DisAssembler::disCchRegRel(
+ const DIS* pdis, DIS::REGA reg, DWORD disp, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORD* pdwDisp)
{
- DisAssembler * pDisAsm = (DisAssembler *) pdis->PvClient();
+ DisAssembler* pDisAsm = (DisAssembler*)pdis->PvClient();
assert(pDisAsm);
- return pDisAsm->disCchRegRelMember(pdis,
- reg,
- disp,
- wz,
- cchMax,
- pdwDisp);
+ return pDisAsm->disCchRegRelMember(pdis, reg, disp, wz, cchMax, pdwDisp);
}
-size_t DisAssembler::disCchRegRelMember(const DIS* pdis,
- DIS::REGA reg,
- DWORD disp,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORD* pdwDisp)
+size_t DisAssembler::disCchRegRelMember(
+ const DIS* pdis, DIS::REGA reg, DWORD disp, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORD* pdwDisp)
{
#if defined(_TARGET_XARCH_)
DISX86::TRMTA terminationType = DISX86::TRMTA(pdis->Trmta());
- //DIS::ADDR disIndAddr;
+ // DIS::ADDR disIndAddr;
DISASM_DUMP("RegRelMember reg %u, disp %u, termType %u\n", reg, disp, terminationType);
switch (terminationType)
{
- int disOpcodeSize;
- const char * var;
+ int disOpcodeSize;
+ const char* var;
- case DISX86::trmtaFallThrough:
+ case DISX86::trmtaFallThrough:
/* some instructions like division have a TRAP termination type - ignore it */
- case DISX86::trmtaTrap:
- case DISX86::trmtaTrapCc:
+ case DISX86::trmtaTrap:
+ case DISX86::trmtaTrapCc:
- var = disComp->codeGen->siStackVarName(
- (size_t)(pdis->Addr() - disStartAddr),
- pdis->Cb(),
- reg,
- disp );
- if (var)
- {
- swprintf_s(wz, cchMax, W("%hs+%Xh '%hs'"), getRegName(reg), disp, var);
- *pdwDisp = 0;
-
- return 1;
- }
+ var = disComp->codeGen->siStackVarName((size_t)(pdis->Addr() - disStartAddr), pdis->Cb(), reg, disp);
+ if (var)
+ {
+ swprintf_s(wz, cchMax, W("%hs+%Xh '%hs'"), getRegName(reg), disp, var);
+ *pdwDisp = 0;
- /* This case consists of non-static members */
+ return 1;
+ }
- /* find the emitter block and the offset for the fixup
- * fixup is emited after the coding of the instruction - size = word (2 bytes)
- * GRRRR!!! - for the 16 bit case we have to check for the address size prefix = 0x66
- */
+ /* This case consists of non-static members */
- if (*disGetLinearAddr(disCurOffset) == 0x66)
- {
- disOpcodeSize = 3;
- }
- else
- {
- disOpcodeSize = 2;
- }
-
- return 0;
+ /* find the emitter block and the offset for the fixup
+ * fixup is emited after the coding of the instruction - size = word (2 bytes)
+ * GRRRR!!! - for the 16 bit case we have to check for the address size prefix = 0x66
+ */
- case DISX86::trmtaCallNear16:
- case DISX86::trmtaCallNear32:
- case DISX86::trmtaJmpInd:
+ if (*disGetLinearAddr(disCurOffset) == 0x66)
+ {
+ disOpcodeSize = 3;
+ }
+ else
+ {
+ disOpcodeSize = 2;
+ }
- break;
+ return 0;
- case DISX86::trmtaCallInd:
+ case DISX86::trmtaCallNear16:
+ case DISX86::trmtaCallNear32:
+ case DISX86::trmtaJmpInd:
- /* check if this is a one byte displacement */
+ break;
- if ((signed char)disp == (int)disp)
- {
- /* we have a one byte displacement -> there were no previous callbacks */
+ case DISX86::trmtaCallInd:
- /* find the size of the call opcode - less the immediate */
- /* this is a call R/M indirect -> opcode size is 2 */
+ /* check if this is a one byte displacement */
- disOpcodeSize = 2;
+ if ((signed char)disp == (int)disp)
+ {
+ /* we have a one byte displacement -> there were no previous callbacks */
- /* find the emitter block and the offset of the call fixup */
+ /* find the size of the call opcode - less the immediate */
+ /* this is a call R/M indirect -> opcode size is 2 */
- return 0;
- }
- else
- {
- /* check if we already have a symbol name as replacement */
+ disOpcodeSize = 2;
- if (disHasName)
- {
- /* CchFixup has been called before - we have a symbol name saved in global var disFuncTempBuf */
+ /* find the emitter block and the offset of the call fixup */
- swprintf_s(wz, cchMax, W("%hs+%u '%hs'"), getRegName(reg), disp, disFuncTempBuf);
- *pdwDisp = 0;
- disHasName = false;
- return 1;
+ return 0;
}
- else
+ else
{
- return 0;
+ /* check if we already have a symbol name as replacement */
+
+ if (disHasName)
+ {
+ /* CchFixup has been called before - we have a symbol name saved in global var disFuncTempBuf */
+
+ swprintf_s(wz, cchMax, W("%hs+%u '%hs'"), getRegName(reg), disp, disFuncTempBuf);
+ *pdwDisp = 0;
+ disHasName = false;
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
}
- }
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
- break;
+ break;
}
#elif defined(_TARGET_ARM64_)
@@ -731,88 +704,83 @@ size_t DisAssembler::disCchRegRelMember(const DIS* pdis,
switch (terminationType)
{
- int disOpcodeSize;
- const char * var;
+ int disOpcodeSize;
+ const char* var;
- case DISARM64::TRMTA::trmtaFallThrough:
+ case DISARM64::TRMTA::trmtaFallThrough:
/* some instructions like division have a TRAP termination type - ignore it */
- case DISARM64::TRMTA::trmtaTrap:
- case DISARM64::TRMTA::trmtaTrapCc:
-
- var = disComp->codeGen->siStackVarName(
- (size_t)(pdis->Addr() - disStartAddr),
- pdis->Cb(),
- reg,
- disp );
- if (var)
- {
- swprintf_s(wz, cchMax, W("%hs+%Xh '%hs'"), getRegName(reg), disp, var);
- *pdwDisp = 0;
-
- return 1;
- }
+ case DISARM64::TRMTA::trmtaTrap:
+ case DISARM64::TRMTA::trmtaTrapCc:
- /* This case consists of non-static members */
+ var = disComp->codeGen->siStackVarName((size_t)(pdis->Addr() - disStartAddr), pdis->Cb(), reg, disp);
+ if (var)
+ {
+ swprintf_s(wz, cchMax, W("%hs+%Xh '%hs'"), getRegName(reg), disp, var);
+ *pdwDisp = 0;
- // TODO-ARM64-Bug?: Is this correct?
- disOpcodeSize = 2;
- return 0;
+ return 1;
+ }
- case DISARM64::TRMTA::trmtaCall:
- case DISARM64::TRMTA::trmtaCallCc:
- case DISARM64::TRMTA::trmtaBraInd:
- case DISARM64::TRMTA::trmtaBraCcInd:
- break;
+ /* This case consists of non-static members */
- case DISARM64::TRMTA::trmtaCallInd:
- case DISARM64::TRMTA::trmtaCallCcInd:
+ // TODO-ARM64-Bug?: Is this correct?
+ disOpcodeSize = 2;
+ return 0;
- /* check if this is a one byte displacement */
+ case DISARM64::TRMTA::trmtaCall:
+ case DISARM64::TRMTA::trmtaCallCc:
+ case DISARM64::TRMTA::trmtaBraInd:
+ case DISARM64::TRMTA::trmtaBraCcInd:
+ break;
- if ((signed char)disp == (int)disp)
- {
- /* we have a one byte displacement -> there were no previous callbacks */
+ case DISARM64::TRMTA::trmtaCallInd:
+ case DISARM64::TRMTA::trmtaCallCcInd:
- /* find the size of the call opcode - less the immediate */
- /* this is a call R/M indirect -> opcode size is 2 */
+ /* check if this is a one byte displacement */
- // TODO-ARM64-Bug?: Is this correct?
- disOpcodeSize = 2;
+ if ((signed char)disp == (int)disp)
+ {
+ /* we have a one byte displacement -> there were no previous callbacks */
- /* find the emitter block and the offset of the call fixup */
+ /* find the size of the call opcode - less the immediate */
+ /* this is a call R/M indirect -> opcode size is 2 */
- return 0;
- }
- else
- {
- /* check if we already have a symbol name as replacement */
+ // TODO-ARM64-Bug?: Is this correct?
+ disOpcodeSize = 2;
- if (disHasName)
- {
- /* CchFixup has been called before - we have a symbol name saved in global var disFuncTempBuf */
+ /* find the emitter block and the offset of the call fixup */
- swprintf_s(wz, cchMax, W("%hs+%u '%hs'"), getRegName(reg), disp, disFuncTempBuf);
- *pdwDisp = 0;
- disHasName = false;
- return 1;
+ return 0;
}
- else
+ else
{
- return 0;
+ /* check if we already have a symbol name as replacement */
+
+ if (disHasName)
+ {
+ /* CchFixup has been called before - we have a symbol name saved in global var disFuncTempBuf */
+
+ swprintf_s(wz, cchMax, W("%hs+%u '%hs'"), getRegName(reg), disp, disFuncTempBuf);
+ *pdwDisp = 0;
+ disHasName = false;
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
}
- }
- default:
+ default:
- printf("Termination type is %d\n", (int) terminationType);
- assert(!"treat this case\n");
+ printf("Termination type is %d\n", (int)terminationType);
+ assert(!"treat this case\n");
- break;
+ break;
}
-
#else // _TARGET_*
#error Unsupported or unset target architecture
#endif // _TARGET_*
@@ -824,7 +792,6 @@ size_t DisAssembler::disCchRegRelMember(const DIS* pdis,
return 1;
}
-
/*****************************************************************************
*
* Callback for register operands. Most probably, this is a local variable or
@@ -834,24 +801,15 @@ size_t DisAssembler::disCchRegRelMember(const DIS* pdis,
*/
/* static */
-size_t __stdcall DisAssembler::disCchReg (const DIS* pdis,
- DIS::REGA reg,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax)
+size_t __stdcall DisAssembler::disCchReg(const DIS* pdis, DIS::REGA reg, __in_ecount(cchMax) wchar_t* wz, size_t cchMax)
{
- DisAssembler * pDisAsm = (DisAssembler *) pdis->PvClient();
+ DisAssembler* pDisAsm = (DisAssembler*)pdis->PvClient();
assert(pDisAsm);
- return pDisAsm->disCchRegMember(pdis,
- reg,
- wz,
- cchMax);
+ return pDisAsm->disCchRegMember(pdis, reg, wz, cchMax);
}
-size_t DisAssembler::disCchRegMember (const DIS* pdis,
- DIS::REGA reg,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax)
+size_t DisAssembler::disCchRegMember(const DIS* pdis, DIS::REGA reg, __in_ecount(cchMax) wchar_t* wz, size_t cchMax)
{
// TODO-Review: DIS::REGA does not directly map to our regNumber! E.g., look at DISARM64::REGA --
// the Wt registers come first (and do map to our regNumber), but the Xt registers follow.
@@ -899,11 +857,10 @@ size_t DisAssembler::disCchRegMember (const DIS* pdis,
#endif // 0
}
-
/*****************************************************************************
* Helper function to lazily create a map from code address to CORINFO_METHOD_HANDLE.
*/
-AddrToMethodHandleMap* DisAssembler::GetAddrToMethodHandleMap()
+AddrToMethodHandleMap* DisAssembler::GetAddrToMethodHandleMap()
{
if (disAddrToMethodHandleMap == nullptr)
{
@@ -913,11 +870,10 @@ AddrToMethodHandleMap* DisAssembler::GetAddrToMethodHandleMap()
return disAddrToMethodHandleMap;
}
-
/*****************************************************************************
* Helper function to lazily create a map from code address to CORINFO_METHOD_HANDLE.
*/
-AddrToMethodHandleMap* DisAssembler::GetHelperAddrToMethodHandleMap()
+AddrToMethodHandleMap* DisAssembler::GetHelperAddrToMethodHandleMap()
{
if (disHelperAddrToMethodHandleMap == nullptr)
{
@@ -927,11 +883,10 @@ AddrToMethodHandleMap* DisAssembler::GetHelperAddrToMethodHandleMap()
return disHelperAddrToMethodHandleMap;
}
-
/*****************************************************************************
* Helper function to lazily create a map from relocation address to relocation target address.
*/
-AddrToAddrMap* DisAssembler::GetRelocationMap()
+AddrToAddrMap* DisAssembler::GetRelocationMap()
{
if (disRelocationMap == nullptr)
{
@@ -941,21 +896,20 @@ AddrToAddrMap* DisAssembler::GetRelocationMap()
return disRelocationMap;
}
-
/*****************************************************************************
* Return the count of bytes disassembled.
*/
-size_t DisAssembler::CbDisassemble(DIS* pdis,
- size_t offs,
- DIS::ADDR addr,
- const BYTE* pb,
- size_t cbMax,
- FILE* pfile,
- bool findLabels,
- bool printit /* = false */,
- bool dispOffs /* = false */,
- bool dispCodeBytes /* = false */)
+size_t DisAssembler::CbDisassemble(DIS* pdis,
+ size_t offs,
+ DIS::ADDR addr,
+ const BYTE* pb,
+ size_t cbMax,
+ FILE* pfile,
+ bool findLabels,
+ bool printit /* = false */,
+ bool dispOffs /* = false */,
+ bool dispCodeBytes /* = false */)
{
assert(pdis);
@@ -964,7 +918,7 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
if (cb == 0)
{
DISASM_DUMP("CbDisassemble offs %Iu addr %I64u\n", offs, addr);
- //assert(!"can't disassemble instruction!!!");
+ // assert(!"can't disassemble instruction!!!");
fprintf(pfile, "MSVCDIS can't disassemble instruction @ offset %Iu (0x%02x)!!!\n", offs, offs);
#if defined(_TARGET_ARM64_)
fprintf(pfile, "%08Xh\n", *(unsigned int*)pb);
@@ -977,12 +931,12 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
#if defined(_TARGET_ARM64_)
assert(cb == 4); // all instructions are 4 bytes!
-#endif // _TARGET_ARM64_
+#endif // _TARGET_ARM64_
/* remember current offset and instruction size */
disCurOffset = (size_t)addr;
- disInstSize = cb;
+ disInstSize = cb;
/* Set the disTarget address */
@@ -997,52 +951,56 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
switch (terminationType)
{
- case DISX86::trmtaCallNear16:
- case DISX86::trmtaCallNear32:
- case DISX86::trmtaCallFar:
+ case DISX86::trmtaCallNear16:
+ case DISX86::trmtaCallNear32:
+ case DISX86::trmtaCallFar:
{
// Don't count addresses in the relocation table
size_t targetAddr;
- size_t absoluteAddr = (size_t)disGetLinearAddr((size_t)pdis->AddrAddress(1)); // Get the address in the instruction of the call target address (the address the reloc is applied to).
+ size_t absoluteAddr =
+ (size_t)disGetLinearAddr((size_t)pdis->AddrAddress(1)); // Get the address in the instruction of the
+ // call target address (the address the
+ // reloc is applied to).
if (GetRelocationMap()->Lookup(absoluteAddr, &targetAddr))
{
break;
}
}
- __fallthrough;
+ __fallthrough;
- case DISX86::trmtaJmpShort:
- case DISX86::trmtaJmpNear:
- case DISX86::trmtaJmpFar:
- case DISX86::trmtaJmpCcShort:
- case DISX86::trmtaJmpCcNear:
+ case DISX86::trmtaJmpShort:
+ case DISX86::trmtaJmpNear:
+ case DISX86::trmtaJmpFar:
+ case DISX86::trmtaJmpCcShort:
+ case DISX86::trmtaJmpCcNear:
- /* a CALL is local iff the disTarget is within the block boundary */
+ /* a CALL is local iff the disTarget is within the block boundary */
- /* mark the jump label in the disTarget vector and return */
+ /* mark the jump label in the disTarget vector and return */
- if (disTarget != DIS::addrNil) // There seems to be an assumption that you can't branch to the first address of the function (prolog).
- {
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
+ if (disTarget != DIS::addrNil) // There seems to be an assumption that you can't branch to the first
+ // address of the function (prolog).
{
- /* we're OK, disTarget within block boundary */
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
+ {
+ /* we're OK, disTarget within block boundary */
- disLabels[disTarget] = 1;
+ disLabels[disTarget] = 1;
+ }
}
- }
- break;
+ break;
- case DISX86::trmtaFallThrough:
- // We'd like to be able to get a label for code like "lea rcx, [4]" that we use for jump tables, but I can't figure out how.
- break;
+ case DISX86::trmtaFallThrough:
+ // We'd like to be able to get a label for code like "lea rcx, [4]" that we use for jump tables, but I
+ // can't figure out how.
+ break;
- default:
+ default:
- /* jump is not in the current code block */
- break;
+ /* jump is not in the current code block */
+ break;
} // end switch
#elif defined(_TARGET_ARM64_)
@@ -1052,65 +1010,67 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
switch (terminationType)
{
- case DISARM64::TRMTA::trmtaCall:
- case DISARM64::TRMTA::trmtaCallCc:
+ case DISARM64::TRMTA::trmtaCall:
+ case DISARM64::TRMTA::trmtaCallCc:
{
// Don't count addresses in the relocation table
size_t targetAddr;
- size_t absoluteAddr = (size_t)disGetLinearAddr((size_t)pdis->AddrAddress(1)); // Get the address in the instruction of the call target address (the address the reloc is applied to).
+ size_t absoluteAddr =
+ (size_t)disGetLinearAddr((size_t)pdis->AddrAddress(1)); // Get the address in the instruction of the
+ // call target address (the address the
+ // reloc is applied to).
if (GetRelocationMap()->Lookup(absoluteAddr, &targetAddr))
{
break;
}
}
- __fallthrough;
+ __fallthrough;
- case DISARM64::TRMTA::trmtaBra:
- case DISARM64::TRMTA::trmtaBraCase:
- case DISARM64::TRMTA::trmtaBraCc:
- case DISARM64::TRMTA::trmtaBraCcCase:
+ case DISARM64::TRMTA::trmtaBra:
+ case DISARM64::TRMTA::trmtaBraCase:
+ case DISARM64::TRMTA::trmtaBraCc:
+ case DISARM64::TRMTA::trmtaBraCcCase:
- /* a CALL is local iff the disTarget is within the block boundary */
+ /* a CALL is local iff the disTarget is within the block boundary */
- /* mark the jump label in the disTarget vector and return */
+ /* mark the jump label in the disTarget vector and return */
- if (disTarget != DIS::addrNil) // There seems to be an assumption that you can't branch to the first address of the function (prolog).
- {
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
+ if (disTarget != DIS::addrNil) // There seems to be an assumption that you can't branch to the first
+ // address of the function (prolog).
{
- /* we're OK, disTarget within block boundary */
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
+ {
+ /* we're OK, disTarget within block boundary */
- disLabels[disTarget] = 1;
+ disLabels[disTarget] = 1;
+ }
}
- }
- break;
+ break;
- case DISARM64::TRMTA::trmtaFallThrough:
+ case DISARM64::TRMTA::trmtaFallThrough:
{
DIS::INSTRUCTION instr;
- DIS::OPERAND ops[DISARM64::coperandMax];
- bool ok = pdis->FDecode(&instr, ops, ArrLen(ops));
+ DIS::OPERAND ops[DISARM64::coperandMax];
+ bool ok = pdis->FDecode(&instr, ops, ArrLen(ops));
if (ok)
{
switch ((DISARM64::OPA)instr.opa)
{
- case DISARM64::opaAdr:
- case DISARM64::opaAdrp:
- // operand 1 is an address
- assert(instr.coperand >= 2);
- assert(ops[1].opcls == DIS::opclsImmediate);
- assert(ops[1].imcls == DIS::imclsAddress);
- disTarget = ops[1].dwl;
- break;
- default:
- break;
+ case DISARM64::opaAdr:
+ case DISARM64::opaAdrp:
+ // operand 1 is an address
+ assert(instr.coperand >= 2);
+ assert(ops[1].opcls == DIS::opclsImmediate);
+ assert(ops[1].imcls == DIS::imclsAddress);
+ disTarget = ops[1].dwl;
+ break;
+ default:
+ break;
}
- if (0 <= disTarget &&
- disTarget < disTotalCodeSize)
+ if (0 <= disTarget && disTarget < disTotalCodeSize)
{
/* we're OK, disTarget within block boundary */
@@ -1120,10 +1080,10 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
}
break;
- default:
+ default:
- /* jump is not in the current code block */
- break;
+ /* jump is not in the current code block */
+ break;
} // end switch
#else // _TARGET_*
@@ -1146,7 +1106,7 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
}
wchar_t wz[MAX_CLASSNAME_LENGTH];
- pdis->CchFormatInstr(wz, sizeof(wz)/sizeof(wz[0]));
+ pdis->CchFormatInstr(wz, sizeof(wz) / sizeof(wz[0]));
if (printit)
{
@@ -1156,11 +1116,11 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
}
#ifdef _TARGET_ARM64_
- #define CCH_INDENT 8 // fixed sized instructions, always 8 characters
+#define CCH_INDENT 8 // fixed sized instructions, always 8 characters
#elif defined(_TARGET_AMD64_)
- #define CCH_INDENT 30 // large constants sometimes
+#define CCH_INDENT 30 // large constants sometimes
#else
- #define CCH_INDENT 24
+#define CCH_INDENT 24
#endif
size_t cchIndent = CCH_INDENT;
@@ -1177,14 +1137,14 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
wchar_t wzBytes[MAX_CLASSNAME_LENGTH];
assert(cchBytesMax < MAX_CLASSNAME_LENGTH);
- size_t cchBytes = pdis->CchFormatBytes(wzBytes, sizeof(wzBytes)/sizeof(wzBytes[0]));
+ size_t cchBytes = pdis->CchFormatBytes(wzBytes, sizeof(wzBytes) / sizeof(wzBytes[0]));
if (cchBytes > CCH_INDENT)
{
// Truncate the bytes if they are too long
- static const wchar_t * elipses = W("...\0");
- const size_t cchElipses = 4;
+ static const wchar_t* elipses = W("...\0");
+ const size_t cchElipses = 4;
memcpy(&wzBytes[CCH_INDENT - cchElipses], elipses, cchElipses * sizeof(wchar_t));
@@ -1203,32 +1163,25 @@ size_t DisAssembler::CbDisassemble(DIS* pdis,
return cb;
}
-
-
// TODO-Cleanup: this is currently unused, unreferenced.
-size_t CbDisassembleWithBytes(
- DIS * pdis,
- DIS::ADDR addr,
- const BYTE * pb,
- size_t cbMax,
- FILE * pfile)
+size_t CbDisassembleWithBytes(DIS* pdis, DIS::ADDR addr, const BYTE* pb, size_t cbMax, FILE* pfile)
{
assert(pdis);
- DisAssembler * pDisAsm = (DisAssembler *)pdis->PvClient();
- assert (pDisAsm);
+ DisAssembler* pDisAsm = (DisAssembler*)pdis->PvClient();
+ assert(pDisAsm);
wchar_t wz[MAX_CLASSNAME_LENGTH];
- pdis->CchFormatAddr(addr, wz, sizeof(wz)/sizeof(wz[0]));
+ pdis->CchFormatAddr(addr, wz, sizeof(wz) / sizeof(wz[0]));
- size_t cchIndent = (size_t) fprintf(pfile, " %ls: ", wz);
+ size_t cchIndent = (size_t)fprintf(pfile, " %ls: ", wz);
size_t cb = pdis->CbDisassemble(addr, pb, cbMax);
if (cb == 0)
{
fprintf(pfile, "%02Xh\n", *pb);
- return(1);
+ return (1);
}
size_t cchBytesMax = pdis->CchFormatBytesMax();
@@ -1241,10 +1194,10 @@ size_t CbDisassembleWithBytes(
}
wchar_t wzBytes[64];
- size_t cchBytes = pdis->CchFormatBytes(wzBytes, sizeof(wzBytes)/sizeof(wzBytes[0]));
+ size_t cchBytes = pdis->CchFormatBytes(wzBytes, sizeof(wzBytes) / sizeof(wzBytes[0]));
- wchar_t *pwzBytes;
- wchar_t *pwzNext;
+ wchar_t* pwzBytes;
+ wchar_t* pwzNext;
for (pwzBytes = wzBytes; pwzBytes != NULL; pwzBytes = pwzNext)
{
@@ -1259,7 +1212,7 @@ size_t CbDisassembleWithBytes(
else
{
- wchar_t ch = pwzBytes[cchBytesMax];
+ wchar_t ch = pwzBytes[cchBytesMax];
pwzBytes[cchBytesMax] = '\0';
if (ch == W(' '))
@@ -1273,13 +1226,13 @@ size_t CbDisassembleWithBytes(
assert(pwzNext);
pwzBytes[cchBytesMax] = ch;
- *pwzNext++ = '\0';
+ *pwzNext++ = '\0';
}
}
if (fFirst)
{
- pdis->CchFormatInstr(wz, sizeof(wz)/sizeof(wz[0]));
+ pdis->CchFormatInstr(wz, sizeof(wz) / sizeof(wz[0]));
fprintf(pfile, "%-*ls %ls\n", cchBytesMax, pwzBytes, wz);
}
@@ -1289,14 +1242,12 @@ size_t CbDisassembleWithBytes(
}
}
- return(cb);
+ return (cb);
}
-
-void DisAssembler::DisasmBuffer(FILE* pfile,
- bool printit)
+void DisAssembler::DisasmBuffer(FILE* pfile, bool printit)
{
- DIS *pdis = NULL;
+ DIS* pdis = NULL;
#ifdef _TARGET_X86_
pdis = DIS::PdisNew(DIS::distX86);
@@ -1325,24 +1276,19 @@ void DisAssembler::DisasmBuffer(FILE* pfile,
/* Calculate addresses */
- size_t ibCur = 0;
- DIS::ADDR addr = 0; // Always emit code with respect to a "0" base address.
+ size_t ibCur = 0;
+ DIS::ADDR addr = 0; // Always emit code with respect to a "0" base address.
/* First walk the code to find all jump targets */
while (ibCur < disTotalCodeSize)
{
- size_t cb;
+ size_t cb;
- cb = CbDisassemble(pdis,
- ibCur,
- addr + ibCur,
- disGetLinearAddr(ibCur),
- disGetBufferSize(ibCur),
- pfile,
- true); // find labels
+ cb = CbDisassemble(pdis, ibCur, addr + ibCur, disGetLinearAddr(ibCur), disGetBufferSize(ibCur), pfile,
+ true); // find labels
- //CbDisassemble returning > MAX_INT... give me a break.
+ // CbDisassemble returning > MAX_INT... give me a break.
ibCur += cb;
}
@@ -1378,21 +1324,16 @@ void DisAssembler::DisasmBuffer(FILE* pfile,
{
size_t cb;
- cb = CbDisassemble (pdis,
- ibCur,
- addr + ibCur,
- disGetLinearAddr(ibCur),
- disGetBufferSize(ibCur),
- pfile,
- false, // find labels
- printit,
- !disDiffable, // display relative offset
+ cb = CbDisassemble(pdis, ibCur, addr + ibCur, disGetLinearAddr(ibCur), disGetBufferSize(ibCur), pfile,
+ false, // find labels
+ printit,
+ !disDiffable, // display relative offset
#ifdef DEBUG
- !disDiffable // Display code bytes?
+ !disDiffable // Display code bytes?
#else
- false // Display code bytes?
+ false // Display code bytes?
#endif
- );
+ );
ibCur += (unsigned)cb;
}
@@ -1400,7 +1341,6 @@ void DisAssembler::DisasmBuffer(FILE* pfile,
delete pdis;
}
-
/*****************************************************************************
* Given a linear offset into the code, find a pointer to the actual code (either in the hot or cold section)
*
@@ -1408,7 +1348,7 @@ void DisAssembler::DisasmBuffer(FILE* pfile,
* offset - The linear offset into the code. It must point within the code.
*/
-const BYTE* DisAssembler::disGetLinearAddr(size_t offset)
+const BYTE* DisAssembler::disGetLinearAddr(size_t offset)
{
if (offset < disHotCodeSize)
{
@@ -1420,7 +1360,6 @@ const BYTE* DisAssembler::disGetLinearAddr(size_t offset)
}
}
-
/*****************************************************************************
* Given a linear offset into the code, determine how many bytes are remaining in the buffer.
* This will only return the number of bytes left in either the hot or cold buffer. This is used
@@ -1430,7 +1369,7 @@ const BYTE* DisAssembler::disGetLinearAddr(size_t offset)
* offset - The linear offset into the code. It must point within the code.
*/
-size_t DisAssembler::disGetBufferSize(size_t offset)
+size_t DisAssembler::disGetBufferSize(size_t offset)
{
if (offset < disHotCodeSize)
{
@@ -1442,12 +1381,11 @@ size_t DisAssembler::disGetBufferSize(size_t offset)
}
}
-
/*****************************************************************************
* Get the function name for a given absolute address.
*/
-const char* DisAssembler::disGetMethodFullName(size_t addr)
+const char* DisAssembler::disGetMethodFullName(size_t addr)
{
CORINFO_METHOD_HANDLE res;
@@ -1474,14 +1412,14 @@ const char* DisAssembler::disGetMethodFullName(size_t addr)
* methHnd - The method handle associated with 'addr'.
*/
-void DisAssembler::disSetMethod(size_t addr, CORINFO_METHOD_HANDLE methHnd)
+void DisAssembler::disSetMethod(size_t addr, CORINFO_METHOD_HANDLE methHnd)
{
if (!disComp->opts.doLateDisasm)
{
return;
}
- if (disComp->eeGetHelperNum(methHnd))
+ if (disComp->eeGetHelperNum(methHnd))
{
DISASM_DUMP("Helper function: %p => %p\n", addr, methHnd);
GetHelperAddrToMethodHandleMap()->Set(addr, methHnd);
@@ -1501,7 +1439,7 @@ void DisAssembler::disSetMethod(size_t addr, CORINFO_METHOD_HANDLE me
* targetAddr - The absolute address the relocation points to.
*/
-void DisAssembler::disRecordRelocation(size_t relocAddr, size_t targetAddr)
+void DisAssembler::disRecordRelocation(size_t relocAddr, size_t targetAddr)
{
if (!disComp->opts.doLateDisasm)
{
@@ -1517,7 +1455,7 @@ void DisAssembler::disRecordRelocation(size_t relocAddr, size_t targe
* Disassemble the code which has been generated
*/
-void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCodePtr, size_t coldCodeSize)
+void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCodePtr, size_t coldCodeSize)
{
if (!disComp->opts.doLateDisasm)
{
@@ -1527,7 +1465,7 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* col
#ifdef DEBUG
// Should we make it diffable?
disDiffable = disComp->opts.dspDiffable;
-#else // !DEBUG
+#else // !DEBUG
// NOTE: non-debug builds are always diffable!
disDiffable = true;
#endif // !DEBUG
@@ -1542,7 +1480,7 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* col
disAsmFile = nullptr;
}
}
-#else // !DEBUG
+#else // !DEBUG
// NOTE: non-DEBUG builds always use jitstdout currently!
disAsmFile = jitstdout;
#endif // !DEBUG
@@ -1557,11 +1495,8 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* col
assert(hotCodeSize > 0);
if (coldCodeSize == 0)
{
- fprintf(disAsmFile,
- "************************** %hs:%hs size 0x%04IX **************************\n\n",
- disCurClassName,
- disCurMethodName,
- hotCodeSize);
+ fprintf(disAsmFile, "************************** %hs:%hs size 0x%04IX **************************\n\n",
+ disCurClassName, disCurMethodName, hotCodeSize);
fprintf(disAsmFile, "Base address : %ph\n", dspAddr(hotCodePtr));
}
@@ -1569,20 +1504,17 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* col
{
fprintf(disAsmFile,
"************************** %hs:%hs hot size 0x%04IX cold size 0x%04IX **************************\n\n",
- disCurClassName,
- disCurMethodName,
- hotCodeSize,
- coldCodeSize);
+ disCurClassName, disCurMethodName, hotCodeSize, coldCodeSize);
fprintf(disAsmFile, "Hot address : %ph\n", dspAddr(hotCodePtr));
fprintf(disAsmFile, "Cold address : %ph\n", dspAddr(coldCodePtr));
}
- disStartAddr = 0;
- disHotCodeBlock = (size_t) hotCodePtr;
- disHotCodeSize = hotCodeSize;
- disColdCodeBlock = (size_t) coldCodePtr;
- disColdCodeSize = coldCodeSize;
+ disStartAddr = 0;
+ disHotCodeBlock = (size_t)hotCodePtr;
+ disHotCodeSize = hotCodeSize;
+ disColdCodeBlock = (size_t)coldCodePtr;
+ disColdCodeSize = coldCodeSize;
disTotalCodeSize = disHotCodeSize + disColdCodeSize;
@@ -1601,14 +1533,11 @@ void DisAssembler::disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* col
}
}
-
/*****************************************************************************/
// This function is called for every method. Checks if we are supposed to disassemble
// the method, and where to send the disassembly output.
-void DisAssembler::disOpenForLateDisAsm(const char* curMethodName,
- const char* curClassName,
- PCCOR_SIGNATURE sig)
+void DisAssembler::disOpenForLateDisAsm(const char* curMethodName, const char* curClassName, PCCOR_SIGNATURE sig)
{
if (!disComp->opts.doLateDisasm)
{
@@ -1621,21 +1550,19 @@ void DisAssembler::disOpenForLateDisAsm(const char* curMethodName
/*****************************************************************************/
-void DisAssembler::disInit(Compiler* pComp)
+void DisAssembler::disInit(Compiler* pComp)
{
assert(pComp);
- disComp = pComp;
- disHasName = false;
- disLabels = nullptr;
- disAddrToMethodHandleMap = nullptr;
- disHelperAddrToMethodHandleMap = nullptr;
- disRelocationMap = nullptr;
- disDiffable = false;
- disAsmFile = nullptr;
+ disComp = pComp;
+ disHasName = false;
+ disLabels = nullptr;
+ disAddrToMethodHandleMap = nullptr;
+ disHelperAddrToMethodHandleMap = nullptr;
+ disRelocationMap = nullptr;
+ disDiffable = false;
+ disAsmFile = nullptr;
}
-
-
/*****************************************************************************/
-#endif //LATE_DISASM
+#endif // LATE_DISASM
/*****************************************************************************/
diff --git a/src/jit/disasm.h b/src/jit/disasm.h
index 27480615fa..972243e4dc 100644
--- a/src/jit/disasm.h
+++ b/src/jit/disasm.h
@@ -5,7 +5,7 @@
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
-XX DisAsm XX
+XX DisAsm XX
XX XX
XX The dis-assembler to display the native code generated XX
XX XX
@@ -41,7 +41,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "msvcdis.h"
#pragma warning(default : 4640)
-
#ifdef _TARGET_XARCH_
#include "disx86.h"
#elif defined(_TARGET_ARM64_)
@@ -52,24 +51,25 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if CHECK_STRUCT_PADDING
#pragma warning(push)
-#pragma warning(default:4820) // 'bytes' bytes padding added after construct 'member_name'
-#endif // CHECK_STRUCT_PADDING
+#pragma warning(default : 4820) // 'bytes' bytes padding added after construct 'member_name'
+#endif // CHECK_STRUCT_PADDING
/*****************************************************************************/
#ifdef _HOST_64BIT_
-template<typename T>
-struct SizeTKeyFuncs: LargePrimitiveKeyFuncs<T>
+template <typename T>
+struct SizeTKeyFuncs : LargePrimitiveKeyFuncs<T>
{
};
-#else // !_HOST_64BIT_
-template<typename T>
-struct SizeTKeyFuncs: SmallPrimitiveKeyFuncs<T>
+#else // !_HOST_64BIT_
+template <typename T>
+struct SizeTKeyFuncs : SmallPrimitiveKeyFuncs<T>
{
};
#endif // _HOST_64BIT_
-typedef SimplerHashTable<size_t, SizeTKeyFuncs<size_t>, CORINFO_METHOD_HANDLE, JitSimplerHashBehavior> AddrToMethodHandleMap;
+typedef SimplerHashTable<size_t, SizeTKeyFuncs<size_t>, CORINFO_METHOD_HANDLE, JitSimplerHashBehavior>
+ AddrToMethodHandleMap;
typedef SimplerHashTable<size_t, SizeTKeyFuncs<size_t>, size_t, JitSimplerHashBehavior> AddrToAddrMap;
class Compiler;
@@ -77,102 +77,97 @@ class Compiler;
class DisAssembler
{
public:
-
// Constructor
- void disInit(Compiler* pComp);
+ void disInit(Compiler* pComp);
// Initialize the class for the current method being generated.
- void disOpenForLateDisAsm(const char* curMethodName,
- const char* curClassName,
- PCCOR_SIGNATURE sig);
+ void disOpenForLateDisAsm(const char* curMethodName, const char* curClassName, PCCOR_SIGNATURE sig);
// Disassemble a buffer: called after code for a method is generated.
- void disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCodePtr, size_t coldCodeSize);
+ void disAsmCode(BYTE* hotCodePtr, size_t hotCodeSize, BYTE* coldCodePtr, size_t coldCodeSize);
// Register an address to be associated with a method handle.
- void disSetMethod(size_t addr, CORINFO_METHOD_HANDLE methHnd);
+ void disSetMethod(size_t addr, CORINFO_METHOD_HANDLE methHnd);
// Register a relocation address.
- void disRecordRelocation(size_t relocAddr, size_t targetAddr);
+ void disRecordRelocation(size_t relocAddr, size_t targetAddr);
private:
-
/* Address of the hot and cold code blocks to dissasemble */
- size_t disHotCodeBlock;
- size_t disColdCodeBlock;
+ size_t disHotCodeBlock;
+ size_t disColdCodeBlock;
/* Size of the hot and cold code blocks to dissasemble */
- size_t disHotCodeSize;
- size_t disColdCodeSize;
+ size_t disHotCodeSize;
+ size_t disColdCodeSize;
/* Total code size (simply cached version of disHotCodeSize + disColdCodeSize) */
- size_t disTotalCodeSize;
+ size_t disTotalCodeSize;
/* Address where the code block is to be loaded */
- size_t disStartAddr;
+ size_t disStartAddr;
/* Current offset in the code block */
- size_t disCurOffset;
+ size_t disCurOffset;
/* Size (in bytes) of current dissasembled instruction */
- size_t disInstSize;
+ size_t disInstSize;
/* Target address of a jump */
- size_t disTarget;
+ size_t disTarget;
/* temporary buffer for function names */
// TODO-Review: there is some issue here where this is never set!
- char disFuncTempBuf[1024];
+ char disFuncTempBuf[1024];
/* Method and class name to output */
- const char* disCurMethodName;
- const char* disCurClassName;
+ const char* disCurMethodName;
+ const char* disCurClassName;
/* flag that signals when replacing a symbol name has been deferred for following callbacks */
// TODO-Review: there is some issue here where this is never set to 'true'!
- bool disHasName;
+ bool disHasName;
/* An array of labels, for jumps, LEAs, etc. There is one element in the array for each byte in the generated code.
* That byte is zero if the corresponding byte of generated code is not a label. Otherwise, the value
* is a label number.
*/
- BYTE* disLabels;
+ BYTE* disLabels;
- void DisasmBuffer (FILE * pfile,
- bool printit);
+ void DisasmBuffer(FILE* pfile, bool printit);
/* For the purposes of disassembly, we pretend that the hot and cold sections are linear, and not split.
* These functions create this model for the rest of the disassembly code.
*/
/* Given a linear offset into the code, find a pointer to the actual code (either in the hot or cold section) */
- const BYTE* disGetLinearAddr(size_t offset);
+ const BYTE* disGetLinearAddr(size_t offset);
/* Given a linear offset into the code, determine how many bytes are left in the hot or cold buffer the offset
* points to */
- size_t disGetBufferSize(size_t offset);
+ size_t disGetBufferSize(size_t offset);
// Map of instruction addresses to call target method handles for normal calls.
- AddrToMethodHandleMap* disAddrToMethodHandleMap;
- AddrToMethodHandleMap* GetAddrToMethodHandleMap();
+ AddrToMethodHandleMap* disAddrToMethodHandleMap;
+ AddrToMethodHandleMap* GetAddrToMethodHandleMap();
// Map of instruction addresses to call target method handles for JIT helper calls.
- AddrToMethodHandleMap* disHelperAddrToMethodHandleMap;
- AddrToMethodHandleMap* GetHelperAddrToMethodHandleMap();
+ AddrToMethodHandleMap* disHelperAddrToMethodHandleMap;
+ AddrToMethodHandleMap* GetHelperAddrToMethodHandleMap();
// Map of relocation addresses to relocation target.
- AddrToAddrMap* disRelocationMap;
- AddrToAddrMap* GetRelocationMap();
+ AddrToAddrMap* disRelocationMap;
+ AddrToAddrMap* GetRelocationMap();
- const char* disGetMethodFullName(size_t addr);
+ const char* disGetMethodFullName(size_t addr);
- FILE* disAsmFile;
+ FILE* disAsmFile;
- Compiler* disComp;
+ Compiler* disComp;
- bool disDiffable; // 'true' if the output should be diffable (hide or obscure absolute addresses)
+ bool disDiffable; // 'true' if the output should be diffable (hide or obscure absolute addresses)
- template<typename T>
+ template <typename T>
T dspAddr(T addr)
{
return (addr == 0) ? 0 : (disDiffable ? T(0xD1FFAB1E) : addr);
@@ -180,80 +175,52 @@ private:
/* Callbacks from msdis */
- static
- size_t __stdcall disCchAddr (const DIS* pdis,
- DIS::ADDR addr,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp);
-
- size_t disCchAddrMember (const DIS* pdis,
- DIS::ADDR addr,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp);
-
- static
- size_t __stdcall disCchFixup (const DIS* pdis,
- DIS::ADDR addr,
- size_t size,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp);
-
- size_t disCchFixupMember (const DIS* pdis,
- DIS::ADDR addr,
- size_t size,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORDLONG* pdwDisp);
-
- static
- size_t __stdcall disCchRegRel (const DIS* pdis,
- DIS::REGA reg,
- DWORD disp,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORD* pdwDisp);
-
- size_t disCchRegRelMember (const DIS* pdis,
- DIS::REGA reg,
- DWORD disp,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax,
- DWORD* pdwDisp);
-
- static
- size_t __stdcall disCchReg (const DIS* pdis,
- DIS::REGA reg,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax);
-
- size_t disCchRegMember (const DIS* pdis,
- DIS::REGA reg,
- __in_ecount(cchMax) wchar_t* wz,
- size_t cchMax);
+ static size_t __stdcall disCchAddr(
+ const DIS* pdis, DIS::ADDR addr, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp);
- /* Disassemble helper */
+ size_t disCchAddrMember(
+ const DIS* pdis, DIS::ADDR addr, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORDLONG* pdwDisp);
- size_t CbDisassemble(DIS* pdis,
- size_t offs,
- DIS::ADDR addr,
- const BYTE* pb,
- size_t cbMax,
- FILE* pfile,
- bool findLabels,
- bool printit = false,
- bool dispOffs = false,
- bool dispCodeBytes = false);
-};
+ static size_t __stdcall disCchFixup(const DIS* pdis,
+ DIS::ADDR addr,
+ size_t size,
+ __in_ecount(cchMax) wchar_t* wz,
+ size_t cchMax,
+ DWORDLONG* pdwDisp);
+
+ size_t disCchFixupMember(const DIS* pdis,
+ DIS::ADDR addr,
+ size_t size,
+ __in_ecount(cchMax) wchar_t* wz,
+ size_t cchMax,
+ DWORDLONG* pdwDisp);
+ static size_t __stdcall disCchRegRel(
+ const DIS* pdis, DIS::REGA reg, DWORD disp, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORD* pdwDisp);
+ size_t disCchRegRelMember(
+ const DIS* pdis, DIS::REGA reg, DWORD disp, __in_ecount(cchMax) wchar_t* wz, size_t cchMax, DWORD* pdwDisp);
+ static size_t __stdcall disCchReg(const DIS* pdis, DIS::REGA reg, __in_ecount(cchMax) wchar_t* wz, size_t cchMax);
+
+ size_t disCchRegMember(const DIS* pdis, DIS::REGA reg, __in_ecount(cchMax) wchar_t* wz, size_t cchMax);
+
+ /* Disassemble helper */
+
+ size_t CbDisassemble(DIS* pdis,
+ size_t offs,
+ DIS::ADDR addr,
+ const BYTE* pb,
+ size_t cbMax,
+ FILE* pfile,
+ bool findLabels,
+ bool printit = false,
+ bool dispOffs = false,
+ bool dispCodeBytes = false);
+};
/*****************************************************************************/
-#endif // LATE_DISASM
+#endif // LATE_DISASM
/*****************************************************************************/
-#endif // _DIS_H_
+#endif // _DIS_H_
/*****************************************************************************/
-
diff --git a/src/jit/earlyprop.cpp b/src/jit/earlyprop.cpp
index 65073336c8..ce6d506738 100644
--- a/src/jit/earlyprop.cpp
+++ b/src/jit/earlyprop.cpp
@@ -4,7 +4,7 @@
//
// Early Value Propagation
//
-// This phase performs an SSA-based value propagation optimization that currently only applies to array
+// This phase performs an SSA-based value propagation optimization that currently only applies to array
// lengths, runtime type handles, and explicit null checks. An SSA-based backwards tracking of local variables
// is performed at each point of interest, e.g., an array length reference site, a method table reference site, or
// an indirection.
@@ -16,18 +16,17 @@
#include "jitpch.h"
#include "ssabuilder.h"
-
bool Compiler::optDoEarlyPropForFunc()
{
- bool propArrayLen = (optMethodFlags & OMF_HAS_NEWARRAY) && (optMethodFlags & OMF_HAS_ARRAYREF);
- bool propGetType = (optMethodFlags & OMF_HAS_NEWOBJ) && (optMethodFlags & OMF_HAS_VTABLEREF);
+ bool propArrayLen = (optMethodFlags & OMF_HAS_NEWARRAY) && (optMethodFlags & OMF_HAS_ARRAYREF);
+ bool propGetType = (optMethodFlags & OMF_HAS_NEWOBJ) && (optMethodFlags & OMF_HAS_VTABLEREF);
bool propNullCheck = (optMethodFlags & OMF_HAS_NULLCHECK) != 0;
return propArrayLen || propGetType || propNullCheck;
}
bool Compiler::optDoEarlyPropForBlock(BasicBlock* block)
{
- bool bbHasArrayRef = (block->bbFlags & BBF_HAS_IDX_LEN) != 0;
+ bool bbHasArrayRef = (block->bbFlags & BBF_HAS_IDX_LEN) != 0;
bool bbHasVtableRef = (block->bbFlags & BBF_HAS_VTABREF) != 0;
bool bbHasNullCheck = (block->bbFlags & BBF_HAS_NULLCHECK) != 0;
return bbHasArrayRef || bbHasVtableRef || bbHasNullCheck;
@@ -40,7 +39,7 @@ bool Compiler::optDoEarlyPropForBlock(BasicBlock* block)
// tree - The input tree.
//
// Return Value:
-// Return true if the tree is a method table reference.
+// Return true if the tree is a method table reference.
bool Compiler::gtIsVtableRef(GenTreePtr tree)
{
@@ -52,8 +51,7 @@ bool Compiler::gtIsVtableRef(GenTreePtr tree)
{
GenTreeAddrMode* addrMode = addr->AsAddrMode();
- return (!addrMode->HasIndex() &&
- (addrMode->Base()->TypeGet() == TYP_REF));
+ return (!addrMode->HasIndex() && (addrMode->Base()->TypeGet() == TYP_REF));
}
}
@@ -61,14 +59,14 @@ bool Compiler::gtIsVtableRef(GenTreePtr tree)
}
//------------------------------------------------------------------------------
-// getArrayLengthFromAllocation: Return the array length for an array allocation
+// getArrayLengthFromAllocation: Return the array length for an array allocation
// helper call.
//
// Arguments:
// tree - The array allocation helper call.
//
// Return Value:
-// Return the array length node.
+// Return the array length node.
GenTreePtr Compiler::getArrayLengthFromAllocation(GenTreePtr tree)
{
@@ -95,14 +93,14 @@ GenTreePtr Compiler::getArrayLengthFromAllocation(GenTreePtr tree)
}
//-----------------------------------------------------------------------------
-// getObjectHandleNodeFromAllocation: Return the type handle for an object allocation
+// getObjectHandleNodeFromAllocation: Return the type handle for an object allocation
// helper call.
//
// Arguments:
// tree - The object allocation helper call.
//
// Return Value:
-// Return the object type handle node.
+// Return the object type handle node.
GenTreePtr Compiler::getObjectHandleNodeFromAllocation(GenTreePtr tree)
{
@@ -141,17 +139,17 @@ GenTreePtr Compiler::getObjectHandleNodeFromAllocation(GenTreePtr tree)
// 2. Runtime type handle propagation.
// 3. Null check folding.
//
-// For array length propagation, a demand-driven SSA-based backwards tracking of constant
-// array lengths is performed at each array length reference site which is in form of a
-// GT_ARR_LENGTH node. When a GT_ARR_LENGTH node is seen, the array ref pointer which is
-// the only child node of the GT_ARR_LENGTH is tracked. This is only done for array ref
-// pointers that have valid SSA forms.The tracking is along SSA use-def chain and stops
-// at the original array allocation site where we can grab the array length. The
-// GT_ARR_LENGTH node will then be rewritten to a GT_CNS_INT node if the array length is
+// For array length propagation, a demand-driven SSA-based backwards tracking of constant
+// array lengths is performed at each array length reference site which is in form of a
+// GT_ARR_LENGTH node. When a GT_ARR_LENGTH node is seen, the array ref pointer which is
+// the only child node of the GT_ARR_LENGTH is tracked. This is only done for array ref
+// pointers that have valid SSA forms.The tracking is along SSA use-def chain and stops
+// at the original array allocation site where we can grab the array length. The
+// GT_ARR_LENGTH node will then be rewritten to a GT_CNS_INT node if the array length is
// constant.
//
-// Similarly, the same algorithm also applies to rewriting a method table (also known as
-// vtable) reference site which is in form of GT_INDIR node. The base pointer, which is
+// Similarly, the same algorithm also applies to rewriting a method table (also known as
+// vtable) reference site which is in form of GT_INDIR node. The base pointer, which is
// an object reference pointer, is treated in the same way as an array reference pointer.
//
// Null check folding tries to find GT_INDIR(obj + const) that GT_NULLCHECK(obj) can be folded into
@@ -176,20 +174,22 @@ void Compiler::optEarlyProp()
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
if (!optDoEarlyPropForBlock(block))
+ {
continue;
+ }
compCurBB = block;
- for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; )
+ for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr;)
{
// Preserve the next link before the propagation and morph.
GenTreeStmt* next = stmt->gtNextStmt;
compCurStmt = stmt;
- // Walk the stmt tree in linear order to rewrite any array length reference with a
+ // Walk the stmt tree in linear order to rewrite any array length reference with a
// constant array length.
- bool isRewritten = false;
+ bool isRewritten = false;
bool bbHasNullCheck = (block->bbFlags & BBF_HAS_NULLCHECK) != 0;
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree != nullptr; tree = tree->gtNext)
{
@@ -199,7 +199,7 @@ void Compiler::optEarlyProp()
}
}
- // Morph the stmt and update the evaluation order if the stmt has been rewritten.
+ // Morph the stmt and update the evaluation order if the stmt has been rewritten.
if (isRewritten)
{
gtSetStmtInfo(stmt);
@@ -208,13 +208,13 @@ void Compiler::optEarlyProp()
stmt = next;
}
- }
+ }
#ifdef DEBUG
if (verbose)
{
JITDUMP("\nAfter optEarlyProp:\n");
- fgDispBasicBlocks(/*dumpTrees*/true);
+ fgDispBasicBlocks(/*dumpTrees*/ true);
}
#endif
}
@@ -226,17 +226,17 @@ void Compiler::optEarlyProp()
// tree - The input tree node to be rewritten.
//
// Return Value:
-// Return true iff "tree" is successfully rewritten.
+// Return true iff "tree" is successfully rewritten.
bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
{
GenTreePtr objectRefPtr = nullptr;
- optPropKind propKind = optPropKind::OPK_INVALID;
+ optPropKind propKind = optPropKind::OPK_INVALID;
if (tree->OperGet() == GT_ARR_LENGTH)
{
objectRefPtr = tree->gtOp.gtOp1;
- propKind = optPropKind::OPK_ARRAYLEN;
+ propKind = optPropKind::OPK_ARRAYLEN;
}
else if (tree->OperGet() == GT_IND)
{
@@ -256,7 +256,7 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
}
objectRefPtr = tree->gtOp.gtOp1;
- propKind = optPropKind::OPK_OBJ_GETTYPE;
+ propKind = optPropKind::OPK_OBJ_GETTYPE;
}
else
{
@@ -268,17 +268,16 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
return false;
}
- if (!objectRefPtr->OperIsScalarLocal() ||
- fgExcludeFromSsa(objectRefPtr->AsLclVarCommon()->GetLclNum()))
+ if (!objectRefPtr->OperIsScalarLocal() || fgExcludeFromSsa(objectRefPtr->AsLclVarCommon()->GetLclNum()))
{
return false;
}
bool isRewritten = false;
- GenTreePtr root = compCurStmt;
- unsigned lclNum = objectRefPtr->AsLclVarCommon()->GetLclNum();
- unsigned ssaNum = objectRefPtr->AsLclVarCommon()->GetSsaNum();
+ GenTreePtr root = compCurStmt;
+ unsigned lclNum = objectRefPtr->AsLclVarCommon()->GetLclNum();
+ unsigned ssaNum = objectRefPtr->AsLclVarCommon()->GetSsaNum();
GenTreePtr actualVal = optPropGetValue(lclNum, ssaNum, propKind);
@@ -291,9 +290,9 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
if (actualVal->gtIntCon.gtIconVal > INT32_MAX)
{
// Don't propagate array lengths that are beyond the maximum value of a GT_ARR_LENGTH.
- // node. CORINFO_HELP_NEWARR_1_OBJ helper call allows to take a long integer as the
+ // node. CORINFO_HELP_NEWARR_1_OBJ helper call allows to take a long integer as the
// array length argument, but the type of GT_ARR_LENGTH is always INT32.
- return false;
+ return false;
}
}
else if (propKind == optPropKind::OPK_OBJ_GETTYPE)
@@ -316,8 +315,7 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
// LabelIndex to update the FieldSeq annotations. EarlyProp may replace
// array length expressions with constants, so check if this is an array
// length operator that is part of an array index expression.
- bool isIndexExpr = (tree->OperGet() == GT_ARR_LENGTH &&
- ((tree->gtFlags & GTF_ARRLEN_ARR_IDX) != 0));
+ bool isIndexExpr = (tree->OperGet() == GT_ARR_LENGTH && ((tree->gtFlags & GTF_ARRLEN_ARR_IDX) != 0));
if (actualVal->GetNodeSize() <= tree->GetNodeSize())
{
@@ -332,7 +330,8 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
actualValCopy->CopyFrom(actualVal, this);
actualValCopy->gtType = origType;
- if (isIndexExpr) {
+ if (isIndexExpr)
+ {
actualValCopy->LabelIndex(this);
}
@@ -355,11 +354,11 @@ bool Compiler::optEarlyPropRewriteTree(GenTreePtr tree)
#endif
}
- return isRewritten;
+ return isRewritten;
}
//-------------------------------------------------------------------------------------------
-// optPropGetValue: Given an SSA object ref pointer, get the value needed based on valueKind.
+// optPropGetValue: Given an SSA object ref pointer, get the value needed based on valueKind.
//
// Arguments:
// lclNum - The local var number of the ref pointer.
@@ -376,7 +375,7 @@ GenTreePtr Compiler::optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKi
//-----------------------------------------------------------------------------------
// optPropGetValueRec: Given an SSA object ref pointer, get the value needed based on valueKind
-// within a recursion bound.
+// within a recursion bound.
//
// Arguments:
// lclNum - The local var number of the array pointer.
@@ -397,14 +396,14 @@ GenTreePtr Compiler::optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPro
SSAName ssaName(lclNum, ssaNum);
GenTreePtr value = nullptr;
- // Bound the recursion with a hard limit.
+ // Bound the recursion with a hard limit.
if (walkDepth > optEarlyPropRecurBound)
{
return nullptr;
}
// Track along the use-def chain to get the array length
- GenTreePtr treelhs = lvaTable[lclNum].GetPerSsaData(ssaNum)->m_defLoc.m_tree;
+ GenTreePtr treelhs = lvaTable[lclNum].GetPerSsaData(ssaNum)->m_defLoc.m_tree;
if (treelhs == nullptr)
{
@@ -414,7 +413,7 @@ GenTreePtr Compiler::optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPro
}
else
{
- GenTreePtr *lhsPtr;
+ GenTreePtr* lhsPtr;
GenTreePtr treeDefParent = treelhs->gtGetParent(&lhsPtr);
if (treeDefParent->OperGet() == GT_ASG)
@@ -444,7 +443,7 @@ GenTreePtr Compiler::optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPro
}
}
}
- else if(valueKind == optPropKind::OPK_OBJ_GETTYPE)
+ else if (valueKind == optPropKind::OPK_OBJ_GETTYPE)
{
value = getObjectHandleNodeFromAllocation(treeRhs);
if (value != nullptr)
@@ -484,8 +483,8 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
// y y const
//
//
- // some trees in the same
- // basic block with
+ // some trees in the same
+ // basic block with
// no unsafe side effects
//
// indir
@@ -515,17 +514,17 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
// Find the definition of the indirected local (x in the picture)
GenTreePtr indLocalTree = tree->gtGetOp1();
- unsigned lclNum = indLocalTree->AsLclVarCommon()->GetLclNum();
- unsigned ssaNum = indLocalTree->AsLclVarCommon()->GetSsaNum();
+ unsigned lclNum = indLocalTree->AsLclVarCommon()->GetLclNum();
+ unsigned ssaNum = indLocalTree->AsLclVarCommon()->GetSsaNum();
if (ssaNum != SsaConfig::RESERVED_SSA_NUM)
{
- DefLoc defLoc = lvaTable[lclNum].GetPerSsaData(ssaNum)->m_defLoc;
+ DefLoc defLoc = lvaTable[lclNum].GetPerSsaData(ssaNum)->m_defLoc;
BasicBlock* defBlock = defLoc.m_blk;
if (compCurBB == defBlock)
{
- GenTreePtr defTree = defLoc.m_tree;
+ GenTreePtr defTree = defLoc.m_tree;
GenTreePtr defParent = defTree->gtGetParent(nullptr);
if ((defParent->OperGet() == GT_ASG) && (defParent->gtNext == nullptr))
@@ -554,14 +553,15 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
{
// Walk from the use to the def in reverse execution order to see
// if any nodes have unsafe side effects.
- GenTreePtr currentTree = indLocalTree->gtPrev;
- bool isInsideTry = compCurBB->hasTryIndex();
- bool canRemoveNullCheck = true;
- const unsigned maxNodesWalked = 25;
- unsigned nodesWalked = 0;
+ GenTreePtr currentTree = indLocalTree->gtPrev;
+ bool isInsideTry = compCurBB->hasTryIndex();
+ bool canRemoveNullCheck = true;
+ const unsigned maxNodesWalked = 25;
+ unsigned nodesWalked = 0;
// First walk the nodes in the statement containing the indirection
- // in reverse execution order starting with the indirection's predecessor.
+ // in reverse execution order starting with the indirection's
+ // predecessor.
while (canRemoveNullCheck && (currentTree != nullptr))
{
if ((nodesWalked++ > maxNodesWalked) ||
@@ -575,11 +575,11 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
}
}
- // Then walk the statement list in reverse execution order
+ // Then walk the statement list in reverse execution order
// until we get to the statement containing the null check.
// We only need to check the side effects at the root of each statement.
GenTreePtr curStmt = compCurStmt->gtPrev;
- currentTree = curStmt->gtStmt.gtStmtExpr;
+ currentTree = curStmt->gtStmt.gtStmtExpr;
while (canRemoveNullCheck && (currentTree != defParent))
{
if ((nodesWalked++ > maxNodesWalked) ||
@@ -604,7 +604,8 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
nullCheckTree->gtFlags |= GTF_ORDER_SIDEEFF;
defRHS->gtFlags &= ~(GTF_EXCEPT | GTF_DONT_CSE);
- defRHS->gtFlags |= additionNode->gtFlags & (GTF_EXCEPT | GTF_DONT_CSE);
+ defRHS->gtFlags |=
+ additionNode->gtFlags & (GTF_EXCEPT | GTF_DONT_CSE);
// Re-morph the statement.
fgMorphBlockStmt(compCurBB, curStmt DEBUGARG("optFoldNullCheck"));
@@ -635,7 +636,7 @@ void Compiler::optFoldNullCheck(GenTreePtr tree)
// Arguments:
// tree - The input GT_INDIR tree.
// isInsideTry - True if tree is inside try, false otherwise
-//
+//
// Return Value:
// True if GT_NULLCHECK can be folded into a node that is after tree is execution order,
// false otherwise.
@@ -663,4 +664,4 @@ bool Compiler::optCanMoveNullCheckPastTree(GenTreePtr tree, bool isInsideTry)
}
}
return result;
-} \ No newline at end of file
+}
diff --git a/src/jit/ee_il_dll.cpp b/src/jit/ee_il_dll.cpp
index 896c4013b5..527244221e 100755
--- a/src/jit/ee_il_dll.cpp
+++ b/src/jit/ee_il_dll.cpp
@@ -30,33 +30,31 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
FILE* jitstdout = nullptr;
-ICorJitHost* g_jitHost = nullptr;
-static CILJit* ILJitter = 0; // The one and only JITTER I return
-bool g_jitInitialized = false;
+ICorJitHost* g_jitHost = nullptr;
+static CILJit* ILJitter = nullptr; // The one and only JITTER I return
+bool g_jitInitialized = false;
#ifndef FEATURE_MERGE_JIT_AND_ENGINE
-HINSTANCE g_hInst = NULL;
+HINSTANCE g_hInst = nullptr;
#endif // FEATURE_MERGE_JIT_AND_ENGINE
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
-JitOptions jitOpts =
-{
- NULL, // methodName
- NULL, // className
- 0.1, // CGknob
- 0, // testMask
+JitOptions jitOpts = {
+ nullptr, // methodName
+ nullptr, // className
+ 0.1, // CGknob
+ 0, // testMask
- (JitOptions *)NULL // lastDummyField.
+ (JitOptions*)nullptr // lastDummyField.
};
#endif // DEBUG
/*****************************************************************************/
-extern "C"
-void __stdcall jitStartup(ICorJitHost* jitHost)
+extern "C" void __stdcall jitStartup(ICorJitHost* jitHost)
{
if (g_jitInitialized)
{
@@ -132,8 +130,7 @@ void jitShutdown()
#ifndef FEATURE_MERGE_JIT_AND_ENGINE
-extern "C"
-BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID pvReserved)
+extern "C" BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID pvReserved)
{
if (dwReason == DLL_PROCESS_ATTACH)
{
@@ -156,8 +153,7 @@ HINSTANCE GetModuleInst()
return (g_hInst);
}
-extern "C"
-void __stdcall sxsJitStartup(CoreClrCallbacks const & cccallbacks)
+extern "C" void __stdcall sxsJitStartup(CoreClrCallbacks const& cccallbacks)
{
#ifndef SELF_NO_HOST
InitUtilcode(cccallbacks);
@@ -172,10 +168,13 @@ void __stdcall sxsJitStartup(CoreClrCallbacks const & cccallbacks)
/*****************************************************************************/
-struct CILJitSingletonAllocator { int x; };
-const CILJitSingletonAllocator CILJitSingleton = { 0 };
+struct CILJitSingletonAllocator
+{
+ int x;
+};
+const CILJitSingletonAllocator CILJitSingleton = {0};
-void *__cdecl operator new(size_t, const CILJitSingletonAllocator&)
+void* __cdecl operator new(size_t, const CILJitSingletonAllocator&)
{
static char CILJitBuff[sizeof(CILJit)];
return CILJitBuff;
@@ -185,11 +184,11 @@ ICorJitCompiler* g_realJitCompiler = nullptr;
ICorJitCompiler* __stdcall getJit()
{
- if (ILJitter == 0)
+ if (ILJitter == nullptr)
{
ILJitter = new (CILJitSingleton) CILJit();
}
- return(ILJitter);
+ return (ILJitter);
}
/*****************************************************************************/
@@ -215,8 +214,7 @@ void SetJitTls(void* value)
#else // !defined(FEATURE_MERGE_JIT_AND_ENGINE) || !defined(FEATURE_IMPLICIT_TLS)
-extern "C"
-{
+extern "C" {
void* GetJitTls();
void SetJitTls(void* value);
}
@@ -225,9 +223,7 @@ void SetJitTls(void* value);
#if defined(DEBUG)
-JitTls::JitTls(ICorJitInfo* jitInfo)
- : m_compiler(nullptr)
- , m_logEnv(jitInfo)
+JitTls::JitTls(ICorJitInfo* jitInfo) : m_compiler(nullptr), m_logEnv(jitInfo)
{
m_next = reinterpret_cast<JitTls*>(GetJitTls());
SetJitTls(this);
@@ -278,21 +274,17 @@ void JitTls::SetCompiler(Compiler* compiler)
//****************************************************************************
// The main JIT function for the 32 bit JIT. See code:ICorJitCompiler#EEToJitInterface for more on the EE-JIT
// interface. Things really don't get going inside the JIT until the code:Compiler::compCompile#Phases
-// method. Usually that is where you want to go.
+// method. Usually that is where you want to go.
-CorJitResult CILJit::compileMethod (
- ICorJitInfo* compHnd,
- CORINFO_METHOD_INFO* methodInfo,
- unsigned flags,
- BYTE ** entryAddress,
- ULONG * nativeSizeOfCode)
+CorJitResult CILJit::compileMethod(
+ ICorJitInfo* compHnd, CORINFO_METHOD_INFO* methodInfo, unsigned flags, BYTE** entryAddress, ULONG* nativeSizeOfCode)
{
if (g_realJitCompiler != nullptr)
{
return g_realJitCompiler->compileMethod(compHnd, methodInfo, flags, entryAddress, nativeSizeOfCode);
}
- CORJIT_FLAGS jitFlags = { 0 };
+ CORJIT_FLAGS jitFlags = {0};
DWORD jitFlagsSize = 0;
#if COR_JIT_EE_VERSION > 460
@@ -308,25 +300,21 @@ CorJitResult CILJit::compileMethod (
jitFlags.corJitFlags = flags;
}
- int result;
- void * methodCodePtr = NULL;
- CORINFO_METHOD_HANDLE methodHandle = methodInfo->ftn;
+ int result;
+ void* methodCodePtr = nullptr;
+ CORINFO_METHOD_HANDLE methodHandle = methodInfo->ftn;
JitTls jitTls(compHnd); // Initialize any necessary thread-local state
assert(methodInfo->ILCode);
- result = jitNativeCode(methodHandle,
- methodInfo->scope,
- compHnd,
- methodInfo,
- &methodCodePtr,
- nativeSizeOfCode,
- &jitFlags,
- NULL);
+ result = jitNativeCode(methodHandle, methodInfo->scope, compHnd, methodInfo, &methodCodePtr, nativeSizeOfCode,
+ &jitFlags, nullptr);
if (result == CORJIT_OK)
+ {
*entryAddress = (BYTE*)methodCodePtr;
+ }
return CorJitResult(result);
}
@@ -334,7 +322,7 @@ CorJitResult CILJit::compileMethod (
/*****************************************************************************
* Notification from VM to clear any caches
*/
-void CILJit::clearCache ( void )
+void CILJit::clearCache(void)
{
if (g_realJitCompiler != nullptr)
{
@@ -348,14 +336,16 @@ void CILJit::clearCache ( void )
/*****************************************************************************
* Notify vm that we have something to clean up
*/
-BOOL CILJit::isCacheCleanupRequired ( void )
+BOOL CILJit::isCacheCleanupRequired(void)
{
BOOL doCleanup;
if (g_realJitCompiler != nullptr)
{
if (g_realJitCompiler->isCacheCleanupRequired())
+ {
return TRUE;
+ }
// Continue...
}
@@ -404,8 +394,7 @@ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(DWORD cpuCompileFlags)
#ifdef _TARGET_AMD64_
#ifdef FEATURE_AVX_SUPPORT
- if (((cpuCompileFlags & CORJIT_FLG_PREJIT) == 0) &&
- ((cpuCompileFlags & CORJIT_FLG_FEATURE_SIMD) != 0) &&
+ if (((cpuCompileFlags & CORJIT_FLG_PREJIT) == 0) && ((cpuCompileFlags & CORJIT_FLG_FEATURE_SIMD) != 0) &&
((cpuCompileFlags & CORJIT_FLG_USE_AVX2) != 0))
{
if (JitConfig.EnableAVX() != 0)
@@ -415,7 +404,7 @@ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(DWORD cpuCompileFlags)
}
#endif // FEATURE_AVX_SUPPORT
return 16;
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
return 0;
#endif // !_TARGET_AMD64_
}
@@ -425,45 +414,44 @@ void CILJit::setRealJit(ICorJitCompiler* realJitCompiler)
g_realJitCompiler = realJitCompiler;
}
-
/*****************************************************************************
* Returns the number of bytes required for the given type argument
*/
-unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig)
+unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig)
{
-#if defined(_TARGET_AMD64_)
+#if defined(_TARGET_AMD64_)
// Everything fits into a single 'slot' size
// to accommodate irregular sized structs, they are passed byref
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- CORINFO_CLASS_HANDLE argClass;
- CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
- var_types argType = JITtype2varType(argTypeJit);
+ CORINFO_CLASS_HANDLE argClass;
+ CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
+ var_types argType = JITtype2varType(argTypeJit);
if (varTypeIsStruct(argType))
{
unsigned structSize = info.compCompHnd->getClassSize(argClass);
- return structSize; // TODO: roundUp() needed here?
+ return structSize; // TODO: roundUp() needed here?
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
return sizeof(size_t);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
- CORINFO_CLASS_HANDLE argClass;
- CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
- var_types argType = JITtype2varType(argTypeJit);
+ CORINFO_CLASS_HANDLE argClass;
+ CorInfoType argTypeJit = strip(info.compCompHnd->getArgType(sig, list, &argClass));
+ var_types argType = JITtype2varType(argTypeJit);
if (varTypeIsStruct(argType))
{
unsigned structSize = info.compCompHnd->getClassSize(argClass);
// make certain the EE passes us back the right thing for refanys
- assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2*sizeof(void*));
+ assert(argTypeJit != CORINFO_TYPE_REFANY || structSize == 2 * sizeof(void*));
- // For each target that supports passing struct args in multiple registers
+ // For each target that supports passing struct args in multiple registers
// apply the target specific rules for them here:
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -480,8 +468,8 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_
// Is the struct larger than 16 bytes
if (structSize > (2 * TARGET_POINTER_SIZE))
{
- var_types hfaType = GetHfaType(argClass); // set to float or double if it is an HFA, otherwise TYP_UNDEF
- bool isHfa = (hfaType != TYP_UNDEF);
+ var_types hfaType = GetHfaType(argClass); // set to float or double if it is an HFA, otherwise TYP_UNDEF
+ bool isHfa = (hfaType != TYP_UNDEF);
if (!isHfa)
{
// This struct is passed by reference using a single 'slot'
@@ -491,7 +479,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_
// otherwise will we pass this struct by value in multiple registers
}
#elif defined(_TARGET_ARM_)
- // otherwise will we pass this struct by value in multiple registers
+// otherwise will we pass this struct by value in multiple registers
#else
NYI("unknown target");
#endif // defined(_TARGET_XXX_)
@@ -502,7 +490,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_
}
else
{
- unsigned argSize = sizeof(int) * genTypeStSz(argType);
+ unsigned argSize = sizeof(int) * genTypeStSz(argType);
assert(0 < argSize && argSize <= sizeof(__int64));
return (unsigned)roundUp(argSize, TARGET_POINTER_SIZE);
}
@@ -511,11 +499,11 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_
/*****************************************************************************/
-GenTreePtr Compiler::eeGetPInvokeCookie(CORINFO_SIG_INFO *szMetaSig)
+GenTreePtr Compiler::eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig)
{
- void * cookie, * pCookie;
+ void *cookie, *pCookie;
cookie = info.compCompHnd->GetCookieForPInvokeCalliSig(szMetaSig, &pCookie);
- assert((cookie == NULL) != (pCookie == NULL));
+ assert((cookie == nullptr) != (pCookie == nullptr));
return gtNewIconEmbHndNode(cookie, pCookie, GTF_ICON_PINVKI_HDL);
}
@@ -527,12 +515,11 @@ GenTreePtr Compiler::eeGetPInvokeCookie(CORINFO_SIG_INFO *szMetaSig)
// type - The array element type
//
// Return Value:
-// The offset to the first array element.
+// The offset to the first array element.
-unsigned Compiler::eeGetArrayDataOffset(var_types type)
+unsigned Compiler::eeGetArrayDataOffset(var_types type)
{
- return varTypeIsGC(type) ? eeGetEEInfo()->offsetOfObjArrayData
- : offsetof(CORINFO_Array, u1Elems);
+ return varTypeIsGC(type) ? eeGetEEInfo()->offsetOfObjArrayData : offsetof(CORINFO_Array, u1Elems);
}
//------------------------------------------------------------------------
@@ -548,26 +535,23 @@ unsigned Compiler::eeGetArrayDataOffset(var_types type)
// Assumptions:
// The rank should be greater than 0.
-unsigned Compiler::eeGetMDArrayDataOffset(var_types type, unsigned rank)
+unsigned Compiler::eeGetMDArrayDataOffset(var_types type, unsigned rank)
{
assert(rank > 0);
- // Note that below we're specifically using genTypeSize(TYP_INT) because array
+ // Note that below we're specifically using genTypeSize(TYP_INT) because array
// indices are not native int.
return eeGetArrayDataOffset(type) + 2 * genTypeSize(TYP_INT) * rank;
}
/*****************************************************************************/
-void Compiler::eeGetStmtOffsets()
+void Compiler::eeGetStmtOffsets()
{
ULONG32 offsetsCount;
- DWORD * offsets;
+ DWORD* offsets;
ICorDebugInfo::BoundaryTypes offsetsImplicit;
- info.compCompHnd->getBoundaries(info.compMethodHnd,
- &offsetsCount,
- &offsets,
- &offsetsImplicit);
+ info.compCompHnd->getBoundaries(info.compMethodHnd, &offsetsCount, &offsets, &offsetsImplicit);
/* Set the implicit boundaries */
@@ -578,14 +562,18 @@ void Compiler::eeGetStmtOffsets()
info.compStmtOffsetsCount = 0;
if (offsetsCount == 0)
+ {
return;
+ }
info.compStmtOffsets = new (this, CMK_DebugInfo) IL_OFFSET[offsetsCount];
for (unsigned i = 0; i < offsetsCount; i++)
{
if (offsets[i] > info.compILCodeSize)
+ {
continue;
+ }
info.compStmtOffsets[info.compStmtOffsetsCount] = offsets[i];
info.compStmtOffsetsCount++;
@@ -599,28 +587,31 @@ void Compiler::eeGetStmtOffsets()
* Debugging support - Local var info
*/
-void Compiler::eeSetLVcount (unsigned count)
+void Compiler::eeSetLVcount(unsigned count)
{
assert(opts.compScopeInfo);
-
+
JITDUMP("VarLocInfo count is %d\n", count);
eeVarsCount = count;
if (eeVarsCount)
- eeVars = (VarResultInfo *)info.compCompHnd->allocateArray(eeVarsCount * sizeof(eeVars[0]));
+ {
+ eeVars = (VarResultInfo*)info.compCompHnd->allocateArray(eeVarsCount * sizeof(eeVars[0]));
+ }
else
- eeVars = NULL;
+ {
+ eeVars = nullptr;
+ }
}
-void Compiler::eeSetLVinfo
- (unsigned which,
- UNATIVE_OFFSET startOffs,
- UNATIVE_OFFSET length,
- unsigned varNum,
- unsigned LVnum,
- VarName name,
- bool avail,
- const Compiler::siVarLoc & varLoc)
+void Compiler::eeSetLVinfo(unsigned which,
+ UNATIVE_OFFSET startOffs,
+ UNATIVE_OFFSET length,
+ unsigned varNum,
+ unsigned LVnum,
+ VarName name,
+ bool avail,
+ const Compiler::siVarLoc& varLoc)
{
// ICorDebugInfo::VarLoc and Compiler::siVarLoc have to overlap
// This is checked in siInit()
@@ -629,16 +620,16 @@ void Compiler::eeSetLVinfo
assert(eeVarsCount > 0);
assert(which < eeVarsCount);
- if (eeVars != NULL)
+ if (eeVars != nullptr)
{
- eeVars[which].startOffset = startOffs;
- eeVars[which].endOffset = startOffs + length;
- eeVars[which].varNumber = varNum;
- eeVars[which].loc = varLoc;
+ eeVars[which].startOffset = startOffs;
+ eeVars[which].endOffset = startOffs + length;
+ eeVars[which].varNumber = varNum;
+ eeVars[which].loc = varLoc;
}
}
-void Compiler::eeSetLVdone()
+void Compiler::eeSetLVdone()
{
// necessary but not sufficient condition that the 2 struct definitions overlap
assert(sizeof(eeVars[0]) == sizeof(ICorDebugInfo::NativeVarInfo));
@@ -647,62 +638,64 @@ void Compiler::eeSetLVdone()
#ifdef DEBUG
if (verbose)
{
- eeDispVars(info.compMethodHnd,
- eeVarsCount,
- (ICorDebugInfo::NativeVarInfo *) eeVars);
+ eeDispVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars);
}
#endif // DEBUG
- info.compCompHnd->setVars(info.compMethodHnd,
- eeVarsCount,
- (ICorDebugInfo::NativeVarInfo *) eeVars);
+ info.compCompHnd->setVars(info.compMethodHnd, eeVarsCount, (ICorDebugInfo::NativeVarInfo*)eeVars);
- eeVars = NULL; // We give up ownership after setVars()
+ eeVars = nullptr; // We give up ownership after setVars()
}
-void Compiler::eeGetVars()
+void Compiler::eeGetVars()
{
- ICorDebugInfo::ILVarInfo * varInfoTable;
- ULONG32 varInfoCount;
- bool extendOthers;
+ ICorDebugInfo::ILVarInfo* varInfoTable;
+ ULONG32 varInfoCount;
+ bool extendOthers;
- info.compCompHnd->getVars(info.compMethodHnd,
- &varInfoCount, &varInfoTable, &extendOthers);
+ info.compCompHnd->getVars(info.compMethodHnd, &varInfoCount, &varInfoTable, &extendOthers);
#ifdef DEBUG
if (verbose)
+ {
printf("getVars() returned cVars = %d, extendOthers = %s\n", varInfoCount, extendOthers ? "true" : "false");
+ }
#endif
// Over allocate in case extendOthers is set.
- SIZE_T varInfoCountExtra = varInfoCount;
+ SIZE_T varInfoCountExtra = varInfoCount;
if (extendOthers)
+ {
varInfoCountExtra += info.compLocalsCount;
+ }
if (varInfoCountExtra == 0)
+ {
return;
+ }
info.compVarScopes = new (this, CMK_DebugInfo) VarScopeDsc[varInfoCountExtra];
- VarScopeDsc* localVarPtr = info.compVarScopes;
- ICorDebugInfo::ILVarInfo *v = varInfoTable;
+ VarScopeDsc* localVarPtr = info.compVarScopes;
+ ICorDebugInfo::ILVarInfo* v = varInfoTable;
for (unsigned i = 0; i < varInfoCount; i++, v++)
{
#ifdef DEBUG
if (verbose)
- printf("var:%d start:%d end:%d\n",
- v->varNumber,
- v->startOffset,
- v->endOffset);
+ {
+ printf("var:%d start:%d end:%d\n", v->varNumber, v->startOffset, v->endOffset);
+ }
#endif
if (v->startOffset >= v->endOffset)
+ {
continue;
+ }
assert(v->startOffset <= info.compILCodeSize);
- assert(v->endOffset <= info.compILCodeSize);
+ assert(v->endOffset <= info.compILCodeSize);
localVarPtr->vsdLifeBeg = v->startOffset;
localVarPtr->vsdLifeEnd = v->endOffset;
@@ -710,7 +703,7 @@ void Compiler::eeGetVars()
localVarPtr->vsdVarNum = compMapILvarNum(v->varNumber);
#ifdef DEBUG
- localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum);
+ localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum);
#endif
localVarPtr++;
@@ -722,27 +715,32 @@ void Compiler::eeGetVars()
to zero-initalize all of them. This will be expensive if it's used
for too many variables.
*/
- if (extendOthers)
+ if (extendOthers)
{
// Allocate a bit-array for all the variables and initialize to false
- bool * varInfoProvided = (bool *)compGetMemA(info.compLocalsCount *
- sizeof(varInfoProvided[0]));
+ bool* varInfoProvided = (bool*)compGetMemA(info.compLocalsCount * sizeof(varInfoProvided[0]));
unsigned i;
for (i = 0; i < info.compLocalsCount; i++)
+ {
varInfoProvided[i] = false;
+ }
// Find which vars have absolutely no varInfo provided
for (i = 0; i < info.compVarScopesCount; i++)
+ {
varInfoProvided[info.compVarScopes[i].vsdVarNum] = true;
+ }
// Create entries for the variables with no varInfo
for (unsigned varNum = 0; varNum < info.compLocalsCount; varNum++)
{
if (varInfoProvided[varNum])
+ {
continue;
+ }
// Create a varInfo with scope over the entire method
@@ -752,7 +750,7 @@ void Compiler::eeGetVars()
localVarPtr->vsdLVnum = info.compVarScopesCount;
#ifdef DEBUG
- localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum);
+ localVarPtr->vsdName = gtGetLclVarName(localVarPtr->vsdVarNum);
#endif
localVarPtr++;
@@ -763,116 +761,116 @@ void Compiler::eeGetVars()
assert(localVarPtr <= info.compVarScopes + varInfoCountExtra);
if (varInfoCount != 0)
+ {
info.compCompHnd->freeArray(varInfoTable);
+ }
#ifdef DEBUG
if (verbose)
+ {
compDispLocalVars();
+ }
#endif // DEBUG
}
#ifdef DEBUG
-void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var)
+void Compiler::eeDispVar(ICorDebugInfo::NativeVarInfo* var)
{
- const char* name = NULL;
+ const char* name = nullptr;
if (var->varNumber == (DWORD)ICorDebugInfo::VARARGS_HND_ILNUM)
+ {
name = "varargsHandle";
+ }
else if (var->varNumber == (DWORD)ICorDebugInfo::RETBUF_ILNUM)
+ {
name = "retBuff";
+ }
else if (var->varNumber == (DWORD)ICorDebugInfo::TYPECTXT_ILNUM)
+ {
name = "typeCtx";
-
- printf("%3d(%10s) : From %08Xh to %08Xh, in ",
- var->varNumber,
- (VarNameToStr(name) == NULL) ? "UNKNOWN" : VarNameToStr(name),
- var->startOffset,
- var->endOffset);
+ }
+ printf("%3d(%10s) : From %08Xh to %08Xh, in ", var->varNumber,
+ (VarNameToStr(name) == nullptr) ? "UNKNOWN" : VarNameToStr(name), var->startOffset, var->endOffset);
switch (var->loc.vlType)
{
- case VLT_REG:
- case VLT_REG_BYREF:
- case VLT_REG_FP:
- printf("%s", getRegName(var->loc.vlReg.vlrReg));
- if (var->loc.vlType == (ICorDebugInfo::VarLocType)VLT_REG_BYREF)
- {
- printf(" byref");
- }
- break;
+ case VLT_REG:
+ case VLT_REG_BYREF:
+ case VLT_REG_FP:
+ printf("%s", getRegName(var->loc.vlReg.vlrReg));
+ if (var->loc.vlType == (ICorDebugInfo::VarLocType)VLT_REG_BYREF)
+ {
+ printf(" byref");
+ }
+ break;
- case VLT_STK:
- case VLT_STK_BYREF:
- if ((int) var->loc.vlStk.vlsBaseReg != (int) ICorDebugInfo::REGNUM_AMBIENT_SP)
- {
- printf("%s[%d] (1 slot)", getRegName(var->loc.vlStk.vlsBaseReg),
- var->loc.vlStk.vlsOffset);
- }
- else
- {
- printf(STR_SPBASE "'[%d] (1 slot)", var->loc.vlStk.vlsOffset);
- }
- if (var->loc.vlType == (ICorDebugInfo::VarLocType)VLT_REG_BYREF)
- {
- printf(" byref");
- }
- break;
+ case VLT_STK:
+ case VLT_STK_BYREF:
+ if ((int)var->loc.vlStk.vlsBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ printf("%s[%d] (1 slot)", getRegName(var->loc.vlStk.vlsBaseReg), var->loc.vlStk.vlsOffset);
+ }
+ else
+ {
+ printf(STR_SPBASE "'[%d] (1 slot)", var->loc.vlStk.vlsOffset);
+ }
+ if (var->loc.vlType == (ICorDebugInfo::VarLocType)VLT_REG_BYREF)
+ {
+ printf(" byref");
+ }
+ break;
#ifndef _TARGET_AMD64_
- case VLT_REG_REG:
- printf("%s-%s", getRegName(var->loc.vlRegReg.vlrrReg1),
- getRegName(var->loc.vlRegReg.vlrrReg2));
- break;
+ case VLT_REG_REG:
+ printf("%s-%s", getRegName(var->loc.vlRegReg.vlrrReg1), getRegName(var->loc.vlRegReg.vlrrReg2));
+ break;
- case VLT_REG_STK:
- if ((int) var->loc.vlRegStk.vlrsStk.vlrssBaseReg != (int) ICorDebugInfo::REGNUM_AMBIENT_SP)
- {
- printf("%s-%s[%d]", getRegName(var->loc.vlRegStk.vlrsReg),
- getRegName(var->loc.vlRegStk.vlrsStk.vlrssBaseReg),
- var->loc.vlRegStk.vlrsStk.vlrssOffset);
- }
- else
- {
- printf("%s-" STR_SPBASE "'[%d]", getRegName(var->loc.vlRegStk.vlrsReg),
- var->loc.vlRegStk.vlrsStk.vlrssOffset);
- }
- break;
+ case VLT_REG_STK:
+ if ((int)var->loc.vlRegStk.vlrsStk.vlrssBaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ printf("%s-%s[%d]", getRegName(var->loc.vlRegStk.vlrsReg),
+ getRegName(var->loc.vlRegStk.vlrsStk.vlrssBaseReg), var->loc.vlRegStk.vlrsStk.vlrssOffset);
+ }
+ else
+ {
+ printf("%s-" STR_SPBASE "'[%d]", getRegName(var->loc.vlRegStk.vlrsReg),
+ var->loc.vlRegStk.vlrsStk.vlrssOffset);
+ }
+ break;
- case VLT_STK_REG:
- unreached(); // unexpected
+ case VLT_STK_REG:
+ unreached(); // unexpected
- case VLT_STK2:
- if ((int) var->loc.vlStk2.vls2BaseReg != (int) ICorDebugInfo::REGNUM_AMBIENT_SP)
- {
- printf("%s[%d] (2 slots)", getRegName(var->loc.vlStk2.vls2BaseReg),
- var->loc.vlStk2.vls2Offset);
- }
- else
- {
- printf(STR_SPBASE "'[%d] (2 slots)", var->loc.vlStk2.vls2Offset);
- }
- break;
+ case VLT_STK2:
+ if ((int)var->loc.vlStk2.vls2BaseReg != (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ printf("%s[%d] (2 slots)", getRegName(var->loc.vlStk2.vls2BaseReg), var->loc.vlStk2.vls2Offset);
+ }
+ else
+ {
+ printf(STR_SPBASE "'[%d] (2 slots)", var->loc.vlStk2.vls2Offset);
+ }
+ break;
- case VLT_FPSTK:
- printf("ST(L-%d)", var->loc.vlFPstk.vlfReg);
- break;
+ case VLT_FPSTK:
+ printf("ST(L-%d)", var->loc.vlFPstk.vlfReg);
+ break;
- case VLT_FIXED_VA:
- printf("fxd_va[%d]", var->loc.vlFixedVarArg.vlfvOffset);
- break;
+ case VLT_FIXED_VA:
+ printf("fxd_va[%d]", var->loc.vlFixedVarArg.vlfvOffset);
+ break;
#endif // !_TARGET_AMD64_
- default:
- unreached(); // unexpected
+ default:
+ unreached(); // unexpected
}
printf("\n");
}
// Same parameters as ICorStaticInfo::setVars().
-void Compiler::eeDispVars(CORINFO_METHOD_HANDLE ftn,
- ULONG32 cVars,
- ICorDebugInfo::NativeVarInfo* vars)
+void Compiler::eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars)
{
printf("*************** Variable debug info\n");
printf("%d vars\n", cVars);
@@ -888,37 +886,38 @@ void Compiler::eeDispVars(CORINFO_METHOD_HANDLE ftn,
* Debugging support - Line number info
*/
-void Compiler::eeSetLIcount (unsigned count)
+void Compiler::eeSetLIcount(unsigned count)
{
assert(opts.compDbgInfo);
-
+
eeBoundariesCount = count;
if (eeBoundariesCount)
- eeBoundaries = (boundariesDsc *) info.compCompHnd->allocateArray(eeBoundariesCount * sizeof(eeBoundaries[0]));
+ {
+ eeBoundaries = (boundariesDsc*)info.compCompHnd->allocateArray(eeBoundariesCount * sizeof(eeBoundaries[0]));
+ }
else
- eeBoundaries = NULL;
+ {
+ eeBoundaries = nullptr;
+ }
}
-void Compiler::eeSetLIinfo (unsigned which,
- UNATIVE_OFFSET nativeOffset,
- IL_OFFSET ilOffset,
- bool stkEmpty,
- bool callInstruction)
+void Compiler::eeSetLIinfo(
+ unsigned which, UNATIVE_OFFSET nativeOffset, IL_OFFSET ilOffset, bool stkEmpty, bool callInstruction)
{
assert(opts.compDbgInfo);
assert(eeBoundariesCount > 0);
assert(which < eeBoundariesCount);
- if (eeBoundaries != NULL)
+ if (eeBoundaries != nullptr)
{
- eeBoundaries[which].nativeIP = nativeOffset;
- eeBoundaries[which].ilOffset = ilOffset;
+ eeBoundaries[which].nativeIP = nativeOffset;
+ eeBoundaries[which].ilOffset = ilOffset;
eeBoundaries[which].sourceReason = stkEmpty ? ICorDebugInfo::STACK_EMPTY : 0;
eeBoundaries[which].sourceReason |= callInstruction ? ICorDebugInfo::CALL_INSTRUCTION : 0;
}
}
-void Compiler::eeSetLIdone()
+void Compiler::eeSetLIdone()
{
assert(opts.compDbgInfo);
@@ -932,38 +931,36 @@ void Compiler::eeSetLIdone()
// necessary but not sufficient condition that the 2 struct definitions overlap
assert(sizeof(eeBoundaries[0]) == sizeof(ICorDebugInfo::OffsetMapping));
- info.compCompHnd->setBoundaries(info.compMethodHnd,
- eeBoundariesCount,
- (ICorDebugInfo::OffsetMapping *) eeBoundaries);
+ info.compCompHnd->setBoundaries(info.compMethodHnd, eeBoundariesCount, (ICorDebugInfo::OffsetMapping*)eeBoundaries);
- eeBoundaries = NULL; // we give up ownership after setBoundaries();
+ eeBoundaries = nullptr; // we give up ownership after setBoundaries();
}
#if defined(DEBUG)
/* static */
-void Compiler::eeDispILOffs(IL_OFFSET offs)
+void Compiler::eeDispILOffs(IL_OFFSET offs)
{
- const char * specialOffs[] = { "EPILOG", "PROLOG", "NO_MAP" };
+ const char* specialOffs[] = {"EPILOG", "PROLOG", "NO_MAP"};
switch ((int)offs) // Need the cast since offs is unsigned and the case statements are comparing to signed.
{
- case ICorDebugInfo::EPILOG:
- case ICorDebugInfo::PROLOG:
- case ICorDebugInfo::NO_MAPPING:
- assert(DWORD(ICorDebugInfo::EPILOG) + 1 == (unsigned)ICorDebugInfo::PROLOG);
- assert(DWORD(ICorDebugInfo::EPILOG) + 2 == (unsigned)ICorDebugInfo::NO_MAPPING);
- int specialOffsNum;
- specialOffsNum = offs - DWORD(ICorDebugInfo::EPILOG);
- printf("%s", specialOffs[specialOffsNum]);
- break;
- default:
- printf("0x%04X", offs);
+ case ICorDebugInfo::EPILOG:
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::NO_MAPPING:
+ assert(DWORD(ICorDebugInfo::EPILOG) + 1 == (unsigned)ICorDebugInfo::PROLOG);
+ assert(DWORD(ICorDebugInfo::EPILOG) + 2 == (unsigned)ICorDebugInfo::NO_MAPPING);
+ int specialOffsNum;
+ specialOffsNum = offs - DWORD(ICorDebugInfo::EPILOG);
+ printf("%s", specialOffs[specialOffsNum]);
+ break;
+ default:
+ printf("0x%04X", offs);
}
}
/* static */
-void Compiler::eeDispLineInfo(const boundariesDsc* line)
+void Compiler::eeDispLineInfo(const boundariesDsc* line)
{
printf("IL offs ");
@@ -996,7 +993,7 @@ void Compiler::eeDispLineInfo(const boundariesDsc* line)
assert((line->sourceReason & ~(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION)) == 0);
}
-void Compiler::eeDispLineInfos()
+void Compiler::eeDispLineInfos()
{
printf("IP mapping count : %d\n", eeBoundariesCount); // this might be zero
for (unsigned i = 0; i < eeBoundariesCount; i++)
@@ -1016,17 +1013,13 @@ void Compiler::eeDispLineInfos()
* (e.g., host AMD64, target ARM64), then VM will get confused anyway.
*/
-void Compiler::eeReserveUnwindInfo(BOOL isFunclet,
- BOOL isColdCode,
- ULONG unwindSize)
+void Compiler::eeReserveUnwindInfo(BOOL isFunclet, BOOL isColdCode, ULONG unwindSize)
{
#ifdef DEBUG
if (verbose)
{
- printf("reserveUnwindInfo(isFunclet=%s, isColdCode=%s, unwindSize=0x%x)\n",
- isFunclet ? "TRUE" : "FALSE",
- isColdCode ? "TRUE" : "FALSE",
- unwindSize);
+ printf("reserveUnwindInfo(isFunclet=%s, isColdCode=%s, unwindSize=0x%x)\n", isFunclet ? "TRUE" : "FALSE",
+ isColdCode ? "TRUE" : "FALSE", unwindSize);
}
#endif // DEBUG
@@ -1036,31 +1029,34 @@ void Compiler::eeReserveUnwindInfo(BOOL isFunclet,
}
}
-void Compiler::eeAllocUnwindInfo(BYTE* pHotCode,
- BYTE* pColdCode,
- ULONG startOffset,
- ULONG endOffset,
- ULONG unwindSize,
- BYTE* pUnwindBlock,
- CorJitFuncKind funcKind)
+void Compiler::eeAllocUnwindInfo(BYTE* pHotCode,
+ BYTE* pColdCode,
+ ULONG startOffset,
+ ULONG endOffset,
+ ULONG unwindSize,
+ BYTE* pUnwindBlock,
+ CorJitFuncKind funcKind)
{
#ifdef DEBUG
if (verbose)
{
- printf("allocUnwindInfo(pHotCode=0x%p, pColdCode=0x%p, startOffset=0x%x, endOffset=0x%x, unwindSize=0x%x, pUnwindBlock=0x%p, funKind=%d",
- dspPtr(pHotCode),
- dspPtr(pColdCode),
- startOffset,
- endOffset,
- unwindSize,
- dspPtr(pUnwindBlock),
- funcKind);
+ printf("allocUnwindInfo(pHotCode=0x%p, pColdCode=0x%p, startOffset=0x%x, endOffset=0x%x, unwindSize=0x%x, "
+ "pUnwindBlock=0x%p, funKind=%d",
+ dspPtr(pHotCode), dspPtr(pColdCode), startOffset, endOffset, unwindSize, dspPtr(pUnwindBlock), funcKind);
switch (funcKind)
{
- case CORJIT_FUNC_ROOT: printf(" (main function)"); break;
- case CORJIT_FUNC_HANDLER: printf(" (handler)"); break;
- case CORJIT_FUNC_FILTER: printf(" (filter)"); break;
- default: printf(" (ILLEGAL)"); break;
+ case CORJIT_FUNC_ROOT:
+ printf(" (main function)");
+ break;
+ case CORJIT_FUNC_HANDLER:
+ printf(" (handler)");
+ break;
+ case CORJIT_FUNC_FILTER:
+ printf(" (filter)");
+ break;
+ default:
+ printf(" (ILLEGAL)");
+ break;
}
printf(")\n");
}
@@ -1068,18 +1064,12 @@ void Compiler::eeAllocUnwindInfo(BYTE* pHotCode,
if (info.compMatchedVM)
{
- info.compCompHnd->allocUnwindInfo(
- pHotCode,
- pColdCode,
- startOffset,
- endOffset,
- unwindSize,
- pUnwindBlock,
- funcKind);
+ info.compCompHnd->allocUnwindInfo(pHotCode, pColdCode, startOffset, endOffset, unwindSize, pUnwindBlock,
+ funcKind);
}
}
-void Compiler::eeSetEHcount(unsigned cEH)
+void Compiler::eeSetEHcount(unsigned cEH)
{
#ifdef DEBUG
if (verbose)
@@ -1094,8 +1084,7 @@ void Compiler::eeSetEHcount(unsigned cEH)
}
}
-void Compiler::eeSetEHinfo(unsigned EHnumber,
- const CORINFO_EH_CLAUSE *clause)
+void Compiler::eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause)
{
#ifdef DEBUG
if (opts.dspEHTable)
@@ -1110,7 +1099,7 @@ void Compiler::eeSetEHinfo(unsigned EHnumber,
}
}
-WORD Compiler::eeGetRelocTypeHint(void * target)
+WORD Compiler::eeGetRelocTypeHint(void* target)
{
if (info.compMatchedVM)
{
@@ -1123,7 +1112,6 @@ WORD Compiler::eeGetRelocTypeHint(void * target)
}
}
-
CORINFO_FIELD_HANDLE Compiler::eeFindJitDataOffs(unsigned dataOffs)
{
// Data offsets are marked by the fact that the low two bits are 0b01 0x1
@@ -1137,14 +1125,14 @@ bool Compiler::eeIsJitDataOffs(CORINFO_FIELD_HANDLE field)
unsigned value = static_cast<unsigned>(reinterpret_cast<uintptr_t>(field));
if (((CORINFO_FIELD_HANDLE)(size_t)value) != field)
{
- return false; // upper bits were set, not a jit data offset
+ return false; // upper bits were set, not a jit data offset
}
// Data offsets are marked by the fact that the low two bits are 0b01 0x1
return (value & iaut_MASK) == iaut_DATA_OFFSET;
}
-int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field)
+int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field)
{
// Data offsets are marked by the fact that the low two bits are 0b01 0x1
if (eeIsJitDataOffs(field))
@@ -1160,7 +1148,6 @@ int Compiler::eeGetJitDataOffs(CORINFO_FIELD_HANDLE field)
}
}
-
/*****************************************************************************
*
* ICorStaticInfo wrapper functions
@@ -1173,21 +1160,40 @@ void Compiler::dumpSystemVClassificationType(SystemVClassificationType ct)
{
switch (ct)
{
- case SystemVClassificationTypeUnknown: printf("UNKNOWN"); break;
- case SystemVClassificationTypeStruct: printf("Struct"); break;
- case SystemVClassificationTypeNoClass: printf("NoClass"); break;
- case SystemVClassificationTypeMemory: printf("Memory"); break;
- case SystemVClassificationTypeInteger: printf("Integer"); break;
- case SystemVClassificationTypeIntegerReference: printf("IntegerReference"); break;
- case SystemVClassificationTypeIntegerByRef: printf("IntegerByReference"); break;
- case SystemVClassificationTypeSSE: printf("SSE"); break;
- default: printf("ILLEGAL"); break;
+ case SystemVClassificationTypeUnknown:
+ printf("UNKNOWN");
+ break;
+ case SystemVClassificationTypeStruct:
+ printf("Struct");
+ break;
+ case SystemVClassificationTypeNoClass:
+ printf("NoClass");
+ break;
+ case SystemVClassificationTypeMemory:
+ printf("Memory");
+ break;
+ case SystemVClassificationTypeInteger:
+ printf("Integer");
+ break;
+ case SystemVClassificationTypeIntegerReference:
+ printf("IntegerReference");
+ break;
+ case SystemVClassificationTypeIntegerByRef:
+ printf("IntegerByReference");
+ break;
+ case SystemVClassificationTypeSSE:
+ printf("SSE");
+ break;
+ default:
+ printf("ILLEGAL");
+ break;
}
}
#endif // DEBUG
-void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(/*IN*/ CORINFO_CLASS_HANDLE structHnd,
- /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
+void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(
+ /*IN*/ CORINFO_CLASS_HANDLE structHnd,
+ /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
{
bool ok = info.compCompHnd->getSystemVAmd64PassStructInRegisterDescriptor(structHnd, structPassInRegDescPtr);
noway_assert(ok);
@@ -1195,7 +1201,8 @@ void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(/*IN*/ CORINFO_C
#ifdef DEBUG
if (verbose)
{
- printf("**** getSystemVAmd64PassStructInRegisterDescriptor(0x%x (%s), ...) =>\n", dspPtr(structHnd), eeGetClassName(structHnd));
+ printf("**** getSystemVAmd64PassStructInRegisterDescriptor(0x%x (%s), ...) =>\n", dspPtr(structHnd),
+ eeGetClassName(structHnd));
printf(" passedInRegisters = %s\n", dspBool(structPassInRegDescPtr->passedInRegisters));
if (structPassInRegDescPtr->passedInRegisters)
{
@@ -1204,9 +1211,8 @@ void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(/*IN*/ CORINFO_C
{
printf(" eightByte #%d -- classification: ", i);
dumpSystemVClassificationType(structPassInRegDescPtr->eightByteClassifications[i]);
- printf(", byteSize: %d, byteOffset: %d\n",
- structPassInRegDescPtr->eightByteSizes[i],
- structPassInRegDescPtr->eightByteOffsets[i]);
+ printf(", byteSize: %d, byteOffset: %d\n", structPassInRegDescPtr->eightByteSizes[i],
+ structPassInRegDescPtr->eightByteOffsets[i]);
}
}
}
@@ -1227,32 +1233,32 @@ static bool isValidTokenForTryResolveToken(ICorJitInfo* corInfo, CORINFO_RESOLVE
CorInfoTokenKind tokenType = resolvedToken->tokenType;
switch (TypeFromToken(resolvedToken->token))
{
- case mdtModuleRef:
- case mdtTypeDef:
- case mdtTypeRef:
- case mdtTypeSpec:
- if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
- return false;
- break;
-
- case mdtMethodDef:
- case mdtMethodSpec:
- if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
- return false;
- break;
-
- case mdtFieldDef:
- if ((tokenType & CORINFO_TOKENKIND_Field) == 0)
+ case mdtModuleRef:
+ case mdtTypeDef:
+ case mdtTypeRef:
+ case mdtTypeSpec:
+ if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
+ return false;
+ break;
+
+ case mdtMethodDef:
+ case mdtMethodSpec:
+ if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
+ return false;
+ break;
+
+ case mdtFieldDef:
+ if ((tokenType & CORINFO_TOKENKIND_Field) == 0)
+ return false;
+ break;
+
+ case mdtMemberRef:
+ if ((tokenType & (CORINFO_TOKENKIND_Method | CORINFO_TOKENKIND_Field)) == 0)
+ return false;
+ break;
+
+ default:
return false;
- break;
-
- case mdtMemberRef:
- if ((tokenType & (CORINFO_TOKENKIND_Method | CORINFO_TOKENKIND_Field)) == 0)
- return false;
- break;
-
- default:
- return false;
}
return true;
@@ -1262,10 +1268,10 @@ static bool isValidTokenForTryResolveToken(ICorJitInfo* corInfo, CORINFO_RESOLVE
// `eeTryResolveToken` below.
struct TryResolveTokenFilterParam
{
- ICorJitInfo* m_corInfo;
+ ICorJitInfo* m_corInfo;
CORINFO_RESOLVED_TOKEN* m_resolvedToken;
- EXCEPTION_POINTERS m_exceptionPointers;
- bool m_success;
+ EXCEPTION_POINTERS m_exceptionPointers;
+ bool m_success;
};
LONG TryResolveTokenFilter(struct _EXCEPTION_POINTERS* exceptionPointers, void* theParam)
@@ -1292,9 +1298,9 @@ LONG TryResolveTokenFilter(struct _EXCEPTION_POINTERS* exceptionPointers, void*
bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
{
TryResolveTokenFilterParam param;
- param.m_corInfo = info.compCompHnd;
+ param.m_corInfo = info.compCompHnd;
param.m_resolvedToken = resolvedToken;
- param.m_success = true;
+ param.m_success = true;
PAL_TRY(TryResolveTokenFilterParam*, pParam, &param)
{
@@ -1316,17 +1322,17 @@ bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
struct TrapParam
{
- ICorJitInfo* m_corInfo;
+ ICorJitInfo* m_corInfo;
EXCEPTION_POINTERS m_exceptionPointers;
void (*m_function)(void*);
void* m_param;
- bool m_success;
+ bool m_success;
};
static LONG __EEFilter(PEXCEPTION_POINTERS exceptionPointers, void* param)
{
- auto* trapParam = reinterpret_cast<TrapParam*>(param);
+ auto* trapParam = reinterpret_cast<TrapParam*>(param);
trapParam->m_exceptionPointers = *exceptionPointers;
return trapParam->m_corInfo->FilterException(exceptionPointers);
}
@@ -1334,10 +1340,10 @@ static LONG __EEFilter(PEXCEPTION_POINTERS exceptionPointers, void* param)
bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param)
{
TrapParam trapParam;
- trapParam.m_corInfo = info.compCompHnd;
+ trapParam.m_corInfo = info.compCompHnd;
trapParam.m_function = function;
- trapParam.m_param = param;
- trapParam.m_success = true;
+ trapParam.m_param = param;
+ trapParam.m_success = true;
PAL_TRY(TrapParam*, __trapParam, &trapParam)
{
@@ -1354,7 +1360,7 @@ bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param)
}
#else // CORJIT_EE_VER <= 460
-
+
bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
{
return info.compCompHnd->tryResolveToken(resolvedToken);
@@ -1377,10 +1383,9 @@ bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param)
/*****************************************************************************/
// static helper names - constant array
-const char* jitHlpFuncTable[CORINFO_HELP_COUNT] =
-{
-#define JITHELPER(code, pfnHelper, sig) #code,
-#define DYNAMICJITHELPER(code, pfnHelper,sig) #code,
+const char* jitHlpFuncTable[CORINFO_HELP_COUNT] = {
+#define JITHELPER(code, pfnHelper, sig) #code,
+#define DYNAMICJITHELPER(code, pfnHelper, sig) #code,
#include "jithelpers.h"
};
@@ -1392,42 +1397,42 @@ const char* jitHlpFuncTable[CORINFO_HELP_COUNT] =
struct FilterSuperPMIExceptionsParam_ee_il
{
- Compiler* pThis;
- Compiler::Info* pJitInfo;
- CORINFO_FIELD_HANDLE field;
- CORINFO_METHOD_HANDLE method;
- CORINFO_CLASS_HANDLE clazz;
- const char** classNamePtr;
- const char* fieldOrMethodOrClassNamePtr;
- EXCEPTION_POINTERS exceptionPointers;
+ Compiler* pThis;
+ Compiler::Info* pJitInfo;
+ CORINFO_FIELD_HANDLE field;
+ CORINFO_METHOD_HANDLE method;
+ CORINFO_CLASS_HANDLE clazz;
+ const char** classNamePtr;
+ const char* fieldOrMethodOrClassNamePtr;
+ EXCEPTION_POINTERS exceptionPointers;
};
static LONG FilterSuperPMIExceptions_ee_il(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
- FilterSuperPMIExceptionsParam_ee_il *pSPMIEParam =
- (FilterSuperPMIExceptionsParam_ee_il *)lpvParam;
- pSPMIEParam->exceptionPointers = *pExceptionPointers;
+ FilterSuperPMIExceptionsParam_ee_il* pSPMIEParam = (FilterSuperPMIExceptionsParam_ee_il*)lpvParam;
+ pSPMIEParam->exceptionPointers = *pExceptionPointers;
if (pSPMIEParam->pThis->IsSuperPMIException(pExceptionPointers->ExceptionRecord->ExceptionCode))
+ {
return EXCEPTION_EXECUTE_HANDLER;
-
+ }
+
return EXCEPTION_CONTINUE_SEARCH;
}
-const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method,
- const char** classNamePtr)
+const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method, const char** classNamePtr)
{
- if (eeGetHelperNum(method))
+ if (eeGetHelperNum(method))
{
- if (classNamePtr != 0)
+ if (classNamePtr != nullptr)
+ {
*classNamePtr = "HELPER";
-
+ }
CorInfoHelpFunc ftnNum = eeGetHelperNum(method);
- const char* name = info.compCompHnd->getHelperName(ftnNum);
+ const char* name = info.compCompHnd->getHelperName(ftnNum);
// If it's something unknown from a RET VM, or from SuperPMI, then use our own helper name table.
- if ((strcmp(name, "AnyJITHelper") == 0) ||
- (strcmp(name, "Yickish helper name") == 0))
+ if ((strcmp(name, "AnyJITHelper") == 0) || (strcmp(name, "Yickish helper name") == 0))
{
if (ftnNum < CORINFO_HELP_COUNT)
{
@@ -1439,21 +1444,24 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method
if (eeIsNativeMethod(method))
{
- if (classNamePtr != 0)
- *classNamePtr = "NATIVE";
+ if (classNamePtr != nullptr)
+ {
+ *classNamePtr = "NATIVE";
+ }
method = eeGetMethodHandleForNative(method);
}
FilterSuperPMIExceptionsParam_ee_il param;
- param.pThis = this;
- param.pJitInfo = &info;
- param.method = method;
+ param.pThis = this;
+ param.pJitInfo = &info;
+ param.method = method;
param.classNamePtr = classNamePtr;
- PAL_TRY(FilterSuperPMIExceptionsParam_ee_il *, pParam, &param)
+ PAL_TRY(FilterSuperPMIExceptionsParam_ee_il*, pParam, &param)
{
- pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getMethodName(pParam->method, pParam->classNamePtr);
+ pParam->fieldOrMethodOrClassNamePtr =
+ pParam->pJitInfo->compCompHnd->getMethodName(pParam->method, pParam->classNamePtr);
}
PAL_EXCEPT_FILTER(FilterSuperPMIExceptions_ee_il)
{
@@ -1469,19 +1477,19 @@ const char* Compiler::eeGetMethodName(CORINFO_METHOD_HANDLE method
return param.fieldOrMethodOrClassNamePtr;
}
-const char * Compiler::eeGetFieldName (CORINFO_FIELD_HANDLE field,
- const char * * classNamePtr)
+const char* Compiler::eeGetFieldName(CORINFO_FIELD_HANDLE field, const char** classNamePtr)
{
FilterSuperPMIExceptionsParam_ee_il param;
- param.pThis = this;
- param.pJitInfo = &info;
- param.field = field;
+ param.pThis = this;
+ param.pJitInfo = &info;
+ param.field = field;
param.classNamePtr = classNamePtr;
- PAL_TRY(FilterSuperPMIExceptionsParam_ee_il *, pParam, &param)
+ PAL_TRY(FilterSuperPMIExceptionsParam_ee_il*, pParam, &param)
{
- pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getFieldName(pParam->field, pParam->classNamePtr);
+ pParam->fieldOrMethodOrClassNamePtr =
+ pParam->pJitInfo->compCompHnd->getFieldName(pParam->field, pParam->classNamePtr);
}
PAL_EXCEPT_FILTER(FilterSuperPMIExceptions_ee_il)
{
@@ -1492,15 +1500,15 @@ const char * Compiler::eeGetFieldName (CORINFO_FIELD_HANDLE field,
return param.fieldOrMethodOrClassNamePtr;
}
-const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd)
+const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd)
{
FilterSuperPMIExceptionsParam_ee_il param;
- param.pThis = this;
+ param.pThis = this;
param.pJitInfo = &info;
- param.clazz = clsHnd;
+ param.clazz = clsHnd;
- PAL_TRY(FilterSuperPMIExceptionsParam_ee_il *, pParam, &param)
+ PAL_TRY(FilterSuperPMIExceptionsParam_ee_il*, pParam, &param)
{
pParam->fieldOrMethodOrClassNamePtr = pParam->pJitInfo->compCompHnd->getClassName(pParam->clazz);
}
@@ -1514,29 +1522,31 @@ const char* Compiler::eeGetClassName(CORINFO_CLASS_HANDLE clsHnd)
#endif // DEBUG || FEATURE_JIT_METHOD_PERF
-
#ifdef DEBUG
-const wchar_t * Compiler::eeGetCPString (size_t strHandle)
+const wchar_t* Compiler::eeGetCPString(size_t strHandle)
{
char buff[512 + sizeof(CORINFO_String)];
- // make this bulletproof, so it works even if we are wrong.
- if (ReadProcessMemory(GetCurrentProcess(), (void*) strHandle, buff, 4, 0) == 0)
- return(0);
+ // make this bulletproof, so it works even if we are wrong.
+ if (ReadProcessMemory(GetCurrentProcess(), (void*)strHandle, buff, 4, nullptr) == 0)
+ {
+ return (nullptr);
+ }
- CORINFO_String* asString = *((CORINFO_String**) strHandle);
+ CORINFO_String* asString = *((CORINFO_String**)strHandle);
- if (ReadProcessMemory(GetCurrentProcess(), asString, buff, sizeof(buff), 0) == 0)
- return(0);
+ if (ReadProcessMemory(GetCurrentProcess(), asString, buff, sizeof(buff), nullptr) == 0)
+ {
+ return (nullptr);
+ }
- if (asString->stringLen >= 255 ||
- asString->chars[asString->stringLen] != 0 )
+ if (asString->stringLen >= 255 || asString->chars[asString->stringLen] != 0)
{
- return 0;
+ return nullptr;
}
- return(asString->chars);
+ return (asString->chars);
}
#endif // DEBUG
diff --git a/src/jit/ee_il_dll.hpp b/src/jit/ee_il_dll.hpp
index b1e0327d6b..d9bf95fde8 100644
--- a/src/jit/ee_il_dll.hpp
+++ b/src/jit/ee_il_dll.hpp
@@ -4,24 +4,22 @@
extern ICorJitHost* g_jitHost;
-class CILJit: public ICorJitCompiler
+class CILJit : public ICorJitCompiler
{
- CorJitResult __stdcall compileMethod (
- ICorJitInfo* comp, /* IN */
- CORINFO_METHOD_INFO*methodInfo, /* IN */
- unsigned flags, /* IN */
- BYTE ** nativeEntry, /* OUT */
- ULONG * nativeSizeOfCode /* OUT */
- );
+ CorJitResult __stdcall compileMethod(ICorJitInfo* comp, /* IN */
+ CORINFO_METHOD_INFO* methodInfo, /* IN */
+ unsigned flags, /* IN */
+ BYTE** nativeEntry, /* OUT */
+ ULONG* nativeSizeOfCode /* OUT */
+ );
- void clearCache( void );
- BOOL isCacheCleanupRequired( void );
+ void clearCache(void);
+ BOOL isCacheCleanupRequired(void);
void ProcessShutdownWork(ICorStaticInfo* statInfo);
- void getVersionIdentifier(
- GUID* versionIdentifier /* OUT */
- );
+ void getVersionIdentifier(GUID* versionIdentifier /* OUT */
+ );
unsigned getMaxIntrinsicSIMDVectorLength(DWORD cpuCompileFlags);
@@ -34,21 +32,20 @@ class CILJit: public ICorJitCompiler
*/
FORCEINLINE
-void Compiler::eeGetCallInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedToken,
- CORINFO_CALLINFO_FLAGS flags,
- CORINFO_CALL_INFO* pResult)
+void Compiler::eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedToken,
+ CORINFO_CALLINFO_FLAGS flags,
+ CORINFO_CALL_INFO* pResult)
{
info.compCompHnd->getCallInfo(pResolvedToken, pConstrainedToken, info.compMethodHnd, flags, pResult);
}
FORCEINLINE
-void Compiler::eeGetFieldInfo(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS accessFlags,
- CORINFO_FIELD_INFO *pResult)
+void Compiler::eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS accessFlags,
+ CORINFO_FIELD_INFO* pResult)
{
- info.compCompHnd->getFieldInfo(pResolvedToken,
- info.compMethodHnd, accessFlags, pResult);
+ info.compCompHnd->getFieldInfo(pResolvedToken, info.compMethodHnd, accessFlags, pResult);
}
/*****************************************************************************
@@ -57,30 +54,28 @@ void Compiler::eeGetFieldInfo(CORINFO_RESOLVED_TOKEN * pR
*/
FORCEINLINE
-BOOL Compiler::eeIsValueClass (CORINFO_CLASS_HANDLE clsHnd)
+BOOL Compiler::eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd)
{
return info.compCompHnd->isValueClass(clsHnd);
}
FORCEINLINE
-void Compiler::eeGetSig (unsigned sigTok,
- CORINFO_MODULE_HANDLE scope,
- CORINFO_CONTEXT_HANDLE context,
- CORINFO_SIG_INFO* retSig)
+void Compiler::eeGetSig(unsigned sigTok,
+ CORINFO_MODULE_HANDLE scope,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO* retSig)
{
info.compCompHnd->findSig(scope, sigTok, context, retSig);
- assert(!varTypeIsComposite(JITtype2varType(retSig->retType)) || retSig->retTypeClass != NULL);
+ assert(!varTypeIsComposite(JITtype2varType(retSig->retType)) || retSig->retTypeClass != nullptr);
}
FORCEINLINE
-void Compiler::eeGetMethodSig (CORINFO_METHOD_HANDLE methHnd,
- CORINFO_SIG_INFO* sigRet,
- CORINFO_CLASS_HANDLE owner)
+void Compiler::eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sigRet, CORINFO_CLASS_HANDLE owner)
{
- info.compCompHnd->getMethodSig(methHnd, sigRet,owner);
+ info.compCompHnd->getMethodSig(methHnd, sigRet, owner);
- assert(!varTypeIsComposite(JITtype2varType(sigRet->retType)) || sigRet->retTypeClass != NULL);
+ assert(!varTypeIsComposite(JITtype2varType(sigRet->retType)) || sigRet->retTypeClass != nullptr);
}
/**********************************************************************
@@ -88,32 +83,29 @@ void Compiler::eeGetMethodSig (CORINFO_METHOD_HANDLE methHnd
*/
FORCEINLINE
-void Compiler::eeGetCallSiteSig (unsigned sigTok,
- CORINFO_MODULE_HANDLE scope,
- CORINFO_CONTEXT_HANDLE context,
- CORINFO_SIG_INFO* sigRet)
+void Compiler::eeGetCallSiteSig(unsigned sigTok,
+ CORINFO_MODULE_HANDLE scope,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO* sigRet)
{
info.compCompHnd->findCallSiteSig(scope, sigTok, context, sigRet);
- assert(!varTypeIsComposite(JITtype2varType(sigRet->retType)) || sigRet->retTypeClass != NULL);
+ assert(!varTypeIsComposite(JITtype2varType(sigRet->retType)) || sigRet->retTypeClass != nullptr);
}
/*****************************************************************************/
-inline
-var_types Compiler::eeGetArgType (CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig)
+inline var_types Compiler::eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig)
{
- CORINFO_CLASS_HANDLE argClass;
- return(JITtype2varType(strip(info.compCompHnd->getArgType(sig, list, &argClass))));
-
+ CORINFO_CLASS_HANDLE argClass;
+ return (JITtype2varType(strip(info.compCompHnd->getArgType(sig, list, &argClass))));
}
/*****************************************************************************/
-inline
-var_types Compiler::eeGetArgType (CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned)
+inline var_types Compiler::eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned)
{
- CORINFO_CLASS_HANDLE argClass;
- CorInfoTypeWithMod type = info.compCompHnd->getArgType(sig, list, &argClass);
- *isPinned = ((type & ~CORINFO_TYPE_MASK) != 0);
+ CORINFO_CLASS_HANDLE argClass;
+ CorInfoTypeWithMod type = info.compCompHnd->getArgType(sig, list, &argClass);
+ *isPinned = ((type & ~CORINFO_TYPE_MASK) != 0);
return JITtype2varType(strip(type));
}
@@ -122,8 +114,7 @@ var_types Compiler::eeGetArgType (CORINFO_ARG_LIST_HANDLE list,
* Native Direct Optimizations
*/
-inline
-CORINFO_EE_INFO *Compiler::eeGetEEInfo()
+inline CORINFO_EE_INFO* Compiler::eeGetEEInfo()
{
if (!eeInfoInitialized)
{
@@ -134,76 +125,74 @@ CORINFO_EE_INFO *Compiler::eeGetEEInfo()
return &eeInfo;
}
-
/*****************************************************************************
*
* Convert the type returned from the VM to a var_type.
*/
-inline
-var_types JITtype2varType(CorInfoType type)
+inline var_types JITtype2varType(CorInfoType type)
{
- static const unsigned char varTypeMap[CORINFO_TYPE_COUNT] =
- { // see the definition of enum CorInfoType in file inc/corinfo.h
- TYP_UNDEF, // CORINFO_TYPE_UNDEF = 0x0,
- TYP_VOID, // CORINFO_TYPE_VOID = 0x1,
- TYP_BOOL, // CORINFO_TYPE_BOOL = 0x2,
- TYP_CHAR, // CORINFO_TYPE_CHAR = 0x3,
- TYP_BYTE, // CORINFO_TYPE_BYTE = 0x4,
- TYP_UBYTE, // CORINFO_TYPE_UBYTE = 0x5,
- TYP_SHORT, // CORINFO_TYPE_SHORT = 0x6,
- TYP_CHAR, // CORINFO_TYPE_USHORT = 0x7,
- TYP_INT, // CORINFO_TYPE_INT = 0x8,
- TYP_INT, // CORINFO_TYPE_UINT = 0x9,
- TYP_LONG, // CORINFO_TYPE_LONG = 0xa,
- TYP_LONG, // CORINFO_TYPE_ULONG = 0xb,
- TYP_I_IMPL, // CORINFO_TYPE_NATIVEINT = 0xc,
- TYP_I_IMPL, // CORINFO_TYPE_NATIVEUINT = 0xd,
- TYP_FLOAT, // CORINFO_TYPE_FLOAT = 0xe,
- TYP_DOUBLE, // CORINFO_TYPE_DOUBLE = 0xf,
- TYP_REF, // CORINFO_TYPE_STRING = 0x10, // Not used, should remove
- TYP_I_IMPL, // CORINFO_TYPE_PTR = 0x11,
- TYP_BYREF, // CORINFO_TYPE_BYREF = 0x12,
- TYP_STRUCT, // CORINFO_TYPE_VALUECLASS = 0x13,
- TYP_REF, // CORINFO_TYPE_CLASS = 0x14,
- TYP_STRUCT, // CORINFO_TYPE_REFANY = 0x15,
-
- // Generic type variables only appear when we're doing
- // verification of generic code, in which case we're running
- // in "import only" mode. Annoyingly the "import only"
- // mode of the JIT actually does a fair bit of compilation,
- // so we have to trick the compiler into thinking it's compiling
- // a real instantiation. We do that by just pretending we're
- // compiling the "object" instantiation of the code, i.e. by
- // turing all generic type variables refs, except for a few
- // choice places to do with verification, where we use
- // verification types and CLASS_HANDLEs to track the difference.
-
- TYP_REF, // CORINFO_TYPE_VAR = 0x16,
+ static const unsigned char varTypeMap[CORINFO_TYPE_COUNT] = {
+ // see the definition of enum CorInfoType in file inc/corinfo.h
+ TYP_UNDEF, // CORINFO_TYPE_UNDEF = 0x0,
+ TYP_VOID, // CORINFO_TYPE_VOID = 0x1,
+ TYP_BOOL, // CORINFO_TYPE_BOOL = 0x2,
+ TYP_CHAR, // CORINFO_TYPE_CHAR = 0x3,
+ TYP_BYTE, // CORINFO_TYPE_BYTE = 0x4,
+ TYP_UBYTE, // CORINFO_TYPE_UBYTE = 0x5,
+ TYP_SHORT, // CORINFO_TYPE_SHORT = 0x6,
+ TYP_CHAR, // CORINFO_TYPE_USHORT = 0x7,
+ TYP_INT, // CORINFO_TYPE_INT = 0x8,
+ TYP_INT, // CORINFO_TYPE_UINT = 0x9,
+ TYP_LONG, // CORINFO_TYPE_LONG = 0xa,
+ TYP_LONG, // CORINFO_TYPE_ULONG = 0xb,
+ TYP_I_IMPL, // CORINFO_TYPE_NATIVEINT = 0xc,
+ TYP_I_IMPL, // CORINFO_TYPE_NATIVEUINT = 0xd,
+ TYP_FLOAT, // CORINFO_TYPE_FLOAT = 0xe,
+ TYP_DOUBLE, // CORINFO_TYPE_DOUBLE = 0xf,
+ TYP_REF, // CORINFO_TYPE_STRING = 0x10, // Not used, should remove
+ TYP_I_IMPL, // CORINFO_TYPE_PTR = 0x11,
+ TYP_BYREF, // CORINFO_TYPE_BYREF = 0x12,
+ TYP_STRUCT, // CORINFO_TYPE_VALUECLASS = 0x13,
+ TYP_REF, // CORINFO_TYPE_CLASS = 0x14,
+ TYP_STRUCT, // CORINFO_TYPE_REFANY = 0x15,
+
+ // Generic type variables only appear when we're doing
+ // verification of generic code, in which case we're running
+ // in "import only" mode. Annoyingly the "import only"
+ // mode of the JIT actually does a fair bit of compilation,
+ // so we have to trick the compiler into thinking it's compiling
+ // a real instantiation. We do that by just pretending we're
+ // compiling the "object" instantiation of the code, i.e. by
+ // turing all generic type variables refs, except for a few
+ // choice places to do with verification, where we use
+ // verification types and CLASS_HANDLEs to track the difference.
+
+ TYP_REF, // CORINFO_TYPE_VAR = 0x16,
};
// spot check to make certain enumerations have not changed
- assert(varTypeMap[CORINFO_TYPE_CLASS] == TYP_REF );
- assert(varTypeMap[CORINFO_TYPE_BYREF] == TYP_BYREF );
- assert(varTypeMap[CORINFO_TYPE_PTR] == TYP_I_IMPL);
- assert(varTypeMap[CORINFO_TYPE_INT] == TYP_INT );
- assert(varTypeMap[CORINFO_TYPE_UINT] == TYP_INT );
- assert(varTypeMap[CORINFO_TYPE_DOUBLE] == TYP_DOUBLE);
- assert(varTypeMap[CORINFO_TYPE_VOID] == TYP_VOID );
+ assert(varTypeMap[CORINFO_TYPE_CLASS] == TYP_REF);
+ assert(varTypeMap[CORINFO_TYPE_BYREF] == TYP_BYREF);
+ assert(varTypeMap[CORINFO_TYPE_PTR] == TYP_I_IMPL);
+ assert(varTypeMap[CORINFO_TYPE_INT] == TYP_INT);
+ assert(varTypeMap[CORINFO_TYPE_UINT] == TYP_INT);
+ assert(varTypeMap[CORINFO_TYPE_DOUBLE] == TYP_DOUBLE);
+ assert(varTypeMap[CORINFO_TYPE_VOID] == TYP_VOID);
assert(varTypeMap[CORINFO_TYPE_VALUECLASS] == TYP_STRUCT);
- assert(varTypeMap[CORINFO_TYPE_REFANY] == TYP_STRUCT);
+ assert(varTypeMap[CORINFO_TYPE_REFANY] == TYP_STRUCT);
assert(type < CORINFO_TYPE_COUNT);
assert(varTypeMap[type] != TYP_UNDEF);
- return((var_types) varTypeMap[type]);
+ return ((var_types)varTypeMap[type]);
};
inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
{
- return (CORINFO_CALLINFO_FLAGS) (flag1 | flag2);
+ return (CORINFO_CALLINFO_FLAGS)(flag1 | flag2);
}
inline CORINFO_CALLINFO_FLAGS Compiler::addVerifyFlag(CORINFO_CALLINFO_FLAGS flags)
{
diff --git a/src/jit/eeinterface.cpp b/src/jit/eeinterface.cpp
index e383862057..d8db947f02 100644
--- a/src/jit/eeinterface.cpp
+++ b/src/jit/eeinterface.cpp
@@ -23,7 +23,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD)
#pragma warning(push)
-#pragma warning(disable:4701) // difficult to get rid of C4701 with 'sig' below
+#pragma warning(disable : 4701) // difficult to get rid of C4701 with 'sig' below
/*****************************************************************************/
@@ -35,30 +35,31 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
struct FilterSuperPMIExceptionsParam_eeinterface
{
- Compiler* pThis;
- Compiler::Info* pJitInfo;
- bool hasThis;
- size_t siglength;
- CORINFO_SIG_INFO sig;
- CORINFO_ARG_LIST_HANDLE argLst;
- CORINFO_METHOD_HANDLE hnd;
- const char* returnType;
- EXCEPTION_POINTERS exceptionPointers;
+ Compiler* pThis;
+ Compiler::Info* pJitInfo;
+ bool hasThis;
+ size_t siglength;
+ CORINFO_SIG_INFO sig;
+ CORINFO_ARG_LIST_HANDLE argLst;
+ CORINFO_METHOD_HANDLE hnd;
+ const char* returnType;
+ EXCEPTION_POINTERS exceptionPointers;
};
static LONG FilterSuperPMIExceptions_eeinterface(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
- FilterSuperPMIExceptionsParam_eeinterface *pSPMIEParam =
- (FilterSuperPMIExceptionsParam_eeinterface *)lpvParam;
- pSPMIEParam->exceptionPointers = *pExceptionPointers;
+ FilterSuperPMIExceptionsParam_eeinterface* pSPMIEParam = (FilterSuperPMIExceptionsParam_eeinterface*)lpvParam;
+ pSPMIEParam->exceptionPointers = *pExceptionPointers;
if (pSPMIEParam->pThis->IsSuperPMIException(pExceptionPointers->ExceptionRecord->ExceptionCode))
+ {
return EXCEPTION_EXECUTE_HANDLER;
+ }
return EXCEPTION_CONTINUE_SEARCH;
}
-const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
+const char* Compiler::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
{
const char* className;
const char* methodName = eeGetMethodName(hnd, &className);
@@ -69,13 +70,13 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
FilterSuperPMIExceptionsParam_eeinterface param;
param.returnType = nullptr;
- param.pThis = this;
- param.hasThis = false;
- param.siglength = 0;
- param.hnd = hnd;
- param.pJitInfo = &info;
+ param.pThis = this;
+ param.hasThis = false;
+ param.siglength = 0;
+ param.hnd = hnd;
+ param.pJitInfo = &info;
- size_t length = 0;
+ size_t length = 0;
unsigned i;
/* Generating the full signature is a two-pass process. First we have to walk
@@ -88,7 +89,9 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
/* initialize length with length of className and '.' */
if (className)
- length = strlen(className)+1;
+ {
+ length = strlen(className) + 1;
+ }
else
{
assert(strlen("<NULL>.") == 7);
@@ -100,9 +103,9 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
/* figure out the signature */
- EXCEPTION_POINTERS exceptionPointers;
+ EXCEPTION_POINTERS exceptionPointers;
- PAL_TRY(FilterSuperPMIExceptionsParam_eeinterface *, pParam, &param)
+ PAL_TRY(FilterSuperPMIExceptionsParam_eeinterface*, pParam, &param)
{
unsigned i;
pParam->pThis->eeGetMethodSig(pParam->hnd, &pParam->sig);
@@ -119,7 +122,9 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
/* add ',' if there is more than one argument */
if (pParam->sig.numArgs > 1)
+ {
pParam->siglength += (pParam->sig.numArgs - 1);
+ }
if (JITtype2varType(pParam->sig.retType) != TYP_VOID)
{
@@ -127,7 +132,8 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
pParam->siglength += strlen(pParam->returnType) + 1; // don't forget the delimiter ':'
}
- // Does it have a 'this' pointer? Don't count explicit this, which has the this pointer type as the first element of the arg type list
+ // Does it have a 'this' pointer? Don't count explicit this, which has the this pointer type as the first
+ // element of the arg type list
if (pParam->sig.hasThis() && !pParam->sig.hasExplicitThis())
{
assert(strlen(":this") == 5);
@@ -145,7 +151,7 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
length += param.siglength + 2;
- char *retName = (char*)compGetMemA(length, CMK_DebugOnly);
+ char* retName = (char*)compGetMemA(length, CMK_DebugOnly);
/* Now generate the full signature string in the allocated buffer */
@@ -175,7 +181,9 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
param.argLst = info.compCompHnd->getArgNext(param.argLst);
if (i + 1 < param.sig.numArgs)
+ {
strcat_s(retName, length, ",");
+ }
}
}
@@ -192,9 +200,9 @@ const char* Compiler::eeGetMethodFullName (CORINFO_METHOD_HANDLE hnd)
strcat_s(retName, length, ":this");
}
- assert(strlen(retName) == (length-1));
+ assert(strlen(retName) == (length - 1));
- return(retName);
+ return (retName);
}
#pragma warning(pop)
diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
index 6111a2b290..f4128b64b6 100644
--- a/src/jit/emit.cpp
+++ b/src/jit/emit.cpp
@@ -26,7 +26,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* Represent an emitter location.
*/
-void emitLocation::CaptureLocation(emitter* emit)
+void emitLocation::CaptureLocation(emitter* emit)
{
ig = emit->emitCurIG;
codePos = emit->emitCurOffset();
@@ -34,20 +34,19 @@ void emitLocation::CaptureLocation(emitter* emit)
assert(Valid());
}
-bool emitLocation::IsCurrentLocation(emitter* emit) const
+bool emitLocation::IsCurrentLocation(emitter* emit) const
{
assert(Valid());
- return (ig == emit->emitCurIG) &&
- (codePos == emit->emitCurOffset());
+ return (ig == emit->emitCurIG) && (codePos == emit->emitCurOffset());
}
-UNATIVE_OFFSET emitLocation::CodeOffset(emitter* emit) const
+UNATIVE_OFFSET emitLocation::CodeOffset(emitter* emit) const
{
assert(Valid());
return emit->emitCodeOffset(ig, codePos);
}
-int emitLocation::GetInsNum() const
+int emitLocation::GetInsNum() const
{
return emitGetInsNumFromCodePos(codePos);
}
@@ -58,7 +57,7 @@ int emitLocation::GetInsNum() const
// TODO-AMD64-Bug?: We only support a single main function prolog group, but allow for multiple funclet prolog
// groups (not that we actually use that flexibility, since the funclet prolog will be small). How to
// handle that?
-UNATIVE_OFFSET emitLocation::GetFuncletPrologOffset(emitter* emit) const
+UNATIVE_OFFSET emitLocation::GetFuncletPrologOffset(emitter* emit) const
{
assert(ig->igFuncIdx != 0);
assert((ig->igFlags & IGF_FUNCLET_PROLOG) != 0);
@@ -69,7 +68,7 @@ UNATIVE_OFFSET emitLocation::GetFuncletPrologOffset(emitter* emit) const
#endif // _TARGET_AMD64_
#ifdef DEBUG
-void emitLocation::Print() const
+void emitLocation::Print() const
{
unsigned insNum = emitGetInsNumFromCodePos(codePos);
unsigned insOfs = emitGetInsOfsFromCodePos(codePos);
@@ -77,7 +76,6 @@ void emitLocation::Print() const
}
#endif // DEBUG
-
/*****************************************************************************
*
* Return the name of an instruction format.
@@ -85,60 +83,65 @@ void emitLocation::Print() const
#if defined(DEBUG) || EMITTER_STATS
-const char * emitter::emitIfName(unsigned f)
+const char* emitter::emitIfName(unsigned f)
{
- static
- const char * const ifNames[] =
- {
- #define IF_DEF(en, op1, op2) "IF_" #en,
- #include "emitfmts.h"
+ static const char* const ifNames[] = {
+#define IF_DEF(en, op1, op2) "IF_" #en,
+#include "emitfmts.h"
};
- static
- char errBuff[32];
+ static char errBuff[32];
- if (f < sizeof(ifNames)/sizeof(*ifNames))
- return ifNames[f];
+ if (f < sizeof(ifNames) / sizeof(*ifNames))
+ {
+ return ifNames[f];
+ }
sprintf_s(errBuff, sizeof(errBuff), "??%u??", f);
- return errBuff;
+ return errBuff;
}
#endif
-
-#ifdef TRANSLATE_PDB
+#ifdef TRANSLATE_PDB
/* these are protected */
-AddrMap * emitter::emitPDBOffsetTable = 0;
-LocalMap * emitter::emitPDBLocalTable = 0;
-bool emitter::emitIsPDBEnabled = true;
-BYTE * emitter::emitILBaseOfCode = 0;
-BYTE * emitter::emitILMethodBase = 0;
-BYTE * emitter::emitILMethodStart = 0;
-BYTE * emitter::emitImgBaseOfCode = 0;
+AddrMap* emitter::emitPDBOffsetTable = 0;
+LocalMap* emitter::emitPDBLocalTable = 0;
+bool emitter::emitIsPDBEnabled = true;
+BYTE* emitter::emitILBaseOfCode = 0;
+BYTE* emitter::emitILMethodBase = 0;
+BYTE* emitter::emitILMethodStart = 0;
+BYTE* emitter::emitImgBaseOfCode = 0;
-void emitter::MapCode( int ilOffset, BYTE *imgDest )
+void emitter::MapCode(int ilOffset, BYTE* imgDest)
{
- if ( emitIsPDBEnabled )
+ if (emitIsPDBEnabled)
{
- emitPDBOffsetTable->MapSrcToDest( ilOffset, (int)( imgDest - emitImgBaseOfCode ));
+ emitPDBOffsetTable->MapSrcToDest(ilOffset, (int)(imgDest - emitImgBaseOfCode));
}
}
-
-void emitter::MapFunc( int imgOff, int procLen, int dbgStart, int dbgEnd, short frameReg,
- int stkAdjust, int lvaCount, OptJit::LclVarDsc *lvaTable, bool framePtr )
+
+void emitter::MapFunc(int imgOff,
+ int procLen,
+ int dbgStart,
+ int dbgEnd,
+ short frameReg,
+ int stkAdjust,
+ int lvaCount,
+ OptJit::LclVarDsc* lvaTable,
+ bool framePtr)
{
- if ( emitIsPDBEnabled )
+ if (emitIsPDBEnabled)
{
// this code stores information about local symbols for the PDB translation
- assert( lvaCount >=0 ); // don't allow a negative count
+ assert(lvaCount >= 0); // don't allow a negative count
- LvaDesc *rgLvaDesc = 0;
+ LvaDesc* rgLvaDesc = 0;
- if ( lvaCount > 0 )
+ if (lvaCount > 0)
{
rgLvaDesc = new LvaDesc[lvaCount];
@@ -146,62 +149,54 @@ void emitter::MapFunc( int imgOff, int procLen, int dbgStart, int dbgEnd, sh
{
NOMEM();
}
-
- LvaDesc *pDst = rgLvaDesc;
- OptJit::LclVarDsc *pSrc = lvaTable;
- for ( int i = 0; i < lvaCount; ++i, ++pDst, ++pSrc )
+ LvaDesc* pDst = rgLvaDesc;
+ OptJit::LclVarDsc* pSrc = lvaTable;
+ for (int i = 0; i < lvaCount; ++i, ++pDst, ++pSrc)
{
pDst->slotNum = pSrc->lvSlotNum;
pDst->isReg = pSrc->lvRegister;
- pDst->reg = (pSrc->lvRegister ? pSrc->lvRegNum : frameReg );
- pDst->off = pSrc->lvStkOffs + stkAdjust;
+ pDst->reg = (pSrc->lvRegister ? pSrc->lvRegNum : frameReg);
+ pDst->off = pSrc->lvStkOffs + stkAdjust;
}
}
- emitPDBLocalTable->AddFunc( (int)(emitILMethodBase - emitILBaseOfCode),
- imgOff - (int)emitImgBaseOfCode,
- procLen,
- dbgStart - imgOff,
- dbgEnd - imgOff,
- lvaCount,
- rgLvaDesc,
- framePtr );
+ emitPDBLocalTable->AddFunc((int)(emitILMethodBase - emitILBaseOfCode), imgOff - (int)emitImgBaseOfCode, procLen,
+ dbgStart - imgOff, dbgEnd - imgOff, lvaCount, rgLvaDesc, framePtr);
// do not delete rgLvaDesc here -- responsibility is now on emitPDBLocalTable destructor
}
}
-
/* these are public */
-void emitter::SetILBaseOfCode ( BYTE *pTextBase )
+void emitter::SetILBaseOfCode(BYTE* pTextBase)
{
emitILBaseOfCode = pTextBase;
}
-void emitter::SetILMethodBase ( BYTE *pMethodEntry )
+void emitter::SetILMethodBase(BYTE* pMethodEntry)
{
emitILMethodBase = pMethodEntry;
}
-void emitter::SetILMethodStart( BYTE *pMethodCode )
+void emitter::SetILMethodStart(BYTE* pMethodCode)
{
emitILMethodStart = pMethodCode;
}
-void emitter::SetImgBaseOfCode( BYTE *pTextBase )
+void emitter::SetImgBaseOfCode(BYTE* pTextBase)
{
emitImgBaseOfCode = pTextBase;
}
void emitter::SetIDBaseToProlog()
{
- emitInstrDescILBase = (int)( emitILMethodBase - emitILBaseOfCode );
+ emitInstrDescILBase = (int)(emitILMethodBase - emitILBaseOfCode);
}
-void emitter::SetIDBaseToOffset( int methodOffset )
+void emitter::SetIDBaseToOffset(int methodOffset)
{
- emitInstrDescILBase = methodOffset + (int)( emitILMethodStart - emitILBaseOfCode );
+ emitInstrDescILBase = methodOffset + (int)(emitILMethodStart - emitILBaseOfCode);
}
void emitter::DisablePDBTranslation()
@@ -215,41 +210,38 @@ bool emitter::IsPDBEnabled()
return emitIsPDBEnabled;
}
-void emitter::InitTranslationMaps( int ilCodeSize )
+void emitter::InitTranslationMaps(int ilCodeSize)
{
- if ( emitIsPDBEnabled )
+ if (emitIsPDBEnabled)
{
- emitPDBOffsetTable = AddrMap::Create( ilCodeSize );
- emitPDBLocalTable = LocalMap::Create();
+ emitPDBOffsetTable = AddrMap::Create(ilCodeSize);
+ emitPDBLocalTable = LocalMap::Create();
}
}
void emitter::DeleteTranslationMaps()
{
- if ( emitPDBOffsetTable )
+ if (emitPDBOffsetTable)
{
delete emitPDBOffsetTable;
emitPDBOffsetTable = 0;
}
- if ( emitPDBLocalTable )
+ if (emitPDBLocalTable)
{
delete emitPDBLocalTable;
emitPDBLocalTable = 0;
}
}
-void emitter::InitTranslator( PDBRewriter * pPDB,
- int * rgSecMap,
- IMAGE_SECTION_HEADER ** rgpHeader,
- int numSections )
+void emitter::InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADER** rgpHeader, int numSections)
{
- if ( emitIsPDBEnabled )
+ if (emitIsPDBEnabled)
{
- pPDB->InitMaps( rgSecMap, // new PE section header order
- rgpHeader, // array of section headers
- numSections, // number of sections
- emitPDBOffsetTable, // code offset translation table
- emitPDBLocalTable ); // slot variable translation table
+ pPDB->InitMaps(rgSecMap, // new PE section header order
+ rgpHeader, // array of section headers
+ numSections, // number of sections
+ emitPDBOffsetTable, // code offset translation table
+ emitPDBLocalTable); // slot variable translation table
}
}
@@ -259,107 +251,107 @@ void emitter::InitTranslator( PDBRewriter * pPDB,
#if EMITTER_STATS
-static unsigned totAllocdSize;
-static unsigned totActualSize;
+static unsigned totAllocdSize;
+static unsigned totActualSize;
- unsigned emitter::emitIFcounts[emitter::IF_COUNT];
+unsigned emitter::emitIFcounts[emitter::IF_COUNT];
-static unsigned emitSizeBuckets[] = { 100, 1024*1, 1024*2, 1024*3, 1024*4, 1024*5, 1024*10, 0 };
-static Histogram emitSizeTable(HostAllocator::getHostAllocator(), emitSizeBuckets);
+static unsigned emitSizeBuckets[] = {100, 1024 * 1, 1024 * 2, 1024 * 3, 1024 * 4, 1024 * 5, 1024 * 10, 0};
+static Histogram emitSizeTable(HostAllocator::getHostAllocator(), emitSizeBuckets);
-static unsigned GCrefsBuckets[] = { 0, 1, 2, 5, 10, 20, 50, 128, 256, 512, 1024, 0 };
-static Histogram GCrefsTable(HostAllocator::getHostAllocator(), GCrefsBuckets);
+static unsigned GCrefsBuckets[] = {0, 1, 2, 5, 10, 20, 50, 128, 256, 512, 1024, 0};
+static Histogram GCrefsTable(HostAllocator::getHostAllocator(), GCrefsBuckets);
-static unsigned stkDepthBuckets[] = { 0, 1, 2, 5, 10, 16, 32, 128, 1024, 0 };
-static Histogram stkDepthTable(HostAllocator::getHostAllocator(), stkDepthBuckets);
+static unsigned stkDepthBuckets[] = {0, 1, 2, 5, 10, 16, 32, 128, 1024, 0};
+static Histogram stkDepthTable(HostAllocator::getHostAllocator(), stkDepthBuckets);
-size_t emitter::emitSizeMethod;
+size_t emitter::emitSizeMethod;
-size_t emitter::emitTotMemAlloc;
-unsigned emitter::emitTotalInsCnt;
-unsigned emitter::emitTotalIGcnt;
-unsigned emitter::emitTotalPhIGcnt;
-unsigned emitter::emitTotalIGjmps;
-unsigned emitter::emitTotalIGptrs;
-unsigned emitter::emitTotalIGicnt;
-size_t emitter::emitTotalIGsize;
-unsigned emitter::emitTotalIGmcnt;
+size_t emitter::emitTotMemAlloc;
+unsigned emitter::emitTotalInsCnt;
+unsigned emitter::emitTotalIGcnt;
+unsigned emitter::emitTotalPhIGcnt;
+unsigned emitter::emitTotalIGjmps;
+unsigned emitter::emitTotalIGptrs;
+unsigned emitter::emitTotalIGicnt;
+size_t emitter::emitTotalIGsize;
+unsigned emitter::emitTotalIGmcnt;
-unsigned emitter::emitSmallDspCnt;
-unsigned emitter::emitLargeDspCnt;
+unsigned emitter::emitSmallDspCnt;
+unsigned emitter::emitLargeDspCnt;
-unsigned emitter::emitSmallCnsCnt;
-unsigned emitter::emitLargeCnsCnt;
-unsigned emitter::emitSmallCns[SMALL_CNS_TSZ];
+unsigned emitter::emitSmallCnsCnt;
+unsigned emitter::emitLargeCnsCnt;
+unsigned emitter::emitSmallCns[SMALL_CNS_TSZ];
-void emitterStaticStats(FILE* fout)
+void emitterStaticStats(FILE* fout)
{
// insGroup members
fprintf(fout, "\n");
fprintf(fout, "insGroup:\n");
- fprintf(fout, "Offset of igNext = %2u\n", offsetof(insGroup, igNext ));
+ fprintf(fout, "Offset of igNext = %2u\n", offsetof(insGroup, igNext));
#ifdef DEBUG
- fprintf(fout, "Offset of igSelf = %2u\n", offsetof(insGroup, igSelf ));
-#endif
- fprintf(fout, "Offset of igNum = %2u\n", offsetof(insGroup, igNum ));
- fprintf(fout, "Offset of igOffs = %2u\n", offsetof(insGroup, igOffs ));
- fprintf(fout, "Offset of igFuncIdx = %2u\n", offsetof(insGroup, igFuncIdx ));
- fprintf(fout, "Offset of igFlags = %2u\n", offsetof(insGroup, igFlags ));
- fprintf(fout, "Offset of igSize = %2u\n", offsetof(insGroup, igSize ));
- fprintf(fout, "Offset of igData = %2u\n", offsetof(insGroup, igData ));
+ fprintf(fout, "Offset of igSelf = %2u\n", offsetof(insGroup, igSelf));
+#endif
+ fprintf(fout, "Offset of igNum = %2u\n", offsetof(insGroup, igNum));
+ fprintf(fout, "Offset of igOffs = %2u\n", offsetof(insGroup, igOffs));
+ fprintf(fout, "Offset of igFuncIdx = %2u\n", offsetof(insGroup, igFuncIdx));
+ fprintf(fout, "Offset of igFlags = %2u\n", offsetof(insGroup, igFlags));
+ fprintf(fout, "Offset of igSize = %2u\n", offsetof(insGroup, igSize));
+ fprintf(fout, "Offset of igData = %2u\n", offsetof(insGroup, igData));
#if EMIT_TRACK_STACK_DEPTH
- fprintf(fout, "Offset of igStkLvl = %2u\n", offsetof(insGroup, igStkLvl ));
+ fprintf(fout, "Offset of igStkLvl = %2u\n", offsetof(insGroup, igStkLvl));
#endif
- fprintf(fout, "Offset of igGCregs = %2u\n", offsetof(insGroup, igGCregs ));
- fprintf(fout, "Offset of igInsCnt = %2u\n", offsetof(insGroup, igInsCnt ));
- fprintf(fout, "Size of insGroup = %u\n", sizeof( insGroup ));
+ fprintf(fout, "Offset of igGCregs = %2u\n", offsetof(insGroup, igGCregs));
+ fprintf(fout, "Offset of igInsCnt = %2u\n", offsetof(insGroup, igInsCnt));
+ fprintf(fout, "Size of insGroup = %u\n", sizeof(insGroup));
// insPlaceholderGroupData members
fprintf(fout, "\n");
fprintf(fout, "insPlaceholderGroupData:\n");
- fprintf(fout, "Offset of igPhNext = %2u\n", offsetof(insPlaceholderGroupData, igPhNext ));
- fprintf(fout, "Offset of igPhBB = %2u\n", offsetof(insPlaceholderGroupData, igPhBB ));
- fprintf(fout, "Offset of igPhInitGCrefVars = %2u\n", offsetof(insPlaceholderGroupData, igPhInitGCrefVars ));
- fprintf(fout, "Offset of igPhInitGCrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhInitGCrefRegs ));
- fprintf(fout, "Offset of igPhInitByrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhInitByrefRegs ));
- fprintf(fout, "Offset of igPhPrevGCrefVars = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevGCrefVars ));
- fprintf(fout, "Offset of igPhPrevGCrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevGCrefRegs ));
- fprintf(fout, "Offset of igPhPrevByrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevByrefRegs ));
- fprintf(fout, "Offset of igPhType = %2u\n", offsetof(insPlaceholderGroupData, igPhType ));
- fprintf(fout, "Size of insPlaceholderGroupData = %u\n", sizeof( insPlaceholderGroupData ));
+ fprintf(fout, "Offset of igPhNext = %2u\n", offsetof(insPlaceholderGroupData, igPhNext));
+ fprintf(fout, "Offset of igPhBB = %2u\n", offsetof(insPlaceholderGroupData, igPhBB));
+ fprintf(fout, "Offset of igPhInitGCrefVars = %2u\n", offsetof(insPlaceholderGroupData, igPhInitGCrefVars));
+ fprintf(fout, "Offset of igPhInitGCrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhInitGCrefRegs));
+ fprintf(fout, "Offset of igPhInitByrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhInitByrefRegs));
+ fprintf(fout, "Offset of igPhPrevGCrefVars = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevGCrefVars));
+ fprintf(fout, "Offset of igPhPrevGCrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevGCrefRegs));
+ fprintf(fout, "Offset of igPhPrevByrefRegs = %2u\n", offsetof(insPlaceholderGroupData, igPhPrevByrefRegs));
+ fprintf(fout, "Offset of igPhType = %2u\n", offsetof(insPlaceholderGroupData, igPhType));
+ fprintf(fout, "Size of insPlaceholderGroupData = %u\n", sizeof(insPlaceholderGroupData));
fprintf(fout, "\n");
fprintf(fout, "Size of tinyID = %2u\n", TINY_IDSC_SIZE);
- fprintf(fout, "Size of instrDesc = %2u\n", sizeof( emitter::instrDesc ));
- //fprintf(fout, "Offset of _idIns = %2u\n", offsetof(emitter::instrDesc, _idIns ));
- //fprintf(fout, "Offset of _idInsFmt = %2u\n", offsetof(emitter::instrDesc, _idInsFmt ));
- //fprintf(fout, "Offset of _idOpSize = %2u\n", offsetof(emitter::instrDesc, _idOpSize ));
- //fprintf(fout, "Offset of idSmallCns = %2u\n", offsetof(emitter::instrDesc, idSmallCns ));
- //fprintf(fout, "Offset of _idAddrUnion= %2u\n", offsetof(emitter::instrDesc, _idAddrUnion));
- //fprintf(fout, "\n");
- //fprintf(fout, "Size of _idAddrUnion= %2u\n", sizeof(((emitter::instrDesc*)0)->_idAddrUnion));
+ fprintf(fout, "Size of instrDesc = %2u\n", sizeof(emitter::instrDesc));
+ // fprintf(fout, "Offset of _idIns = %2u\n", offsetof(emitter::instrDesc, _idIns ));
+ // fprintf(fout, "Offset of _idInsFmt = %2u\n", offsetof(emitter::instrDesc, _idInsFmt ));
+ // fprintf(fout, "Offset of _idOpSize = %2u\n", offsetof(emitter::instrDesc, _idOpSize ));
+ // fprintf(fout, "Offset of idSmallCns = %2u\n", offsetof(emitter::instrDesc, idSmallCns ));
+ // fprintf(fout, "Offset of _idAddrUnion= %2u\n", offsetof(emitter::instrDesc, _idAddrUnion));
+ // fprintf(fout, "\n");
+ // fprintf(fout, "Size of _idAddrUnion= %2u\n", sizeof(((emitter::instrDesc*)0)->_idAddrUnion));
fprintf(fout, "\n");
fprintf(fout, "GCInfo::regPtrDsc:\n");
- fprintf(fout, "Offset of rpdNext = %2u\n", offsetof(GCInfo::regPtrDsc, rpdNext ));
- fprintf(fout, "Offset of rpdOffs = %2u\n", offsetof(GCInfo::regPtrDsc, rpdOffs ));
+ fprintf(fout, "Offset of rpdNext = %2u\n", offsetof(GCInfo::regPtrDsc, rpdNext));
+ fprintf(fout, "Offset of rpdOffs = %2u\n", offsetof(GCInfo::regPtrDsc, rpdOffs));
fprintf(fout, "Offset of <union> = %2u\n", offsetof(GCInfo::regPtrDsc, rpdPtrArg));
- fprintf(fout, "Size of GCInfo::regPtrDsc = %2u\n", sizeof( GCInfo::regPtrDsc ));
+ fprintf(fout, "Size of GCInfo::regPtrDsc = %2u\n", sizeof(GCInfo::regPtrDsc));
fprintf(fout, "\n");
}
-void emitterStats(FILE* fout)
+void emitterStats(FILE* fout)
{
- if (totAllocdSize > 0)
+ if (totAllocdSize > 0)
{
assert(totActualSize <= totAllocdSize);
fprintf(fout, "\nTotal allocated code size = %u\n", totAllocdSize);
- if (totActualSize < totAllocdSize)
+ if (totActualSize < totAllocdSize)
{
fprintf(fout, "Total generated code size = %u ", totActualSize);
@@ -369,12 +361,13 @@ void emitterStats(FILE* fout)
assert(emitter::emitTotalInsCnt);
- fprintf(fout, "Average of %4.2f bytes of code generated per instruction\n", (double)totActualSize / emitter::emitTotalInsCnt);
+ fprintf(fout, "Average of %4.2f bytes of code generated per instruction\n",
+ (double)totActualSize / emitter::emitTotalInsCnt);
}
fprintf(fout, "\nInstruction format frequency table:\n\n");
- unsigned f, ic = 0, dc = 0;
+ unsigned f, ic = 0, dc = 0;
for (f = 0; f < emitter::IF_COUNT; f++)
{
@@ -383,9 +376,9 @@ void emitterStats(FILE* fout)
for (f = 0; f < emitter::IF_COUNT; f++)
{
- unsigned c = emitter::emitIFcounts[f];
+ unsigned c = emitter::emitIFcounts[f];
- if ((c > 0) && (1000 * c >= ic))
+ if ((c > 0) && (1000 * c >= ic))
{
dc += c;
fprintf(fout, " %-13s %8u (%5.2f%%)\n", emitter::emitIfName(f), c, 100.0 * c / ic);
@@ -395,29 +388,39 @@ void emitterStats(FILE* fout)
fprintf(fout, " --------------------------------\n");
fprintf(fout, " %-13s %8u (%5.2f%%)\n", "Total shown", dc, 100.0 * dc / ic);
- if (emitter::emitTotalIGmcnt)
+ if (emitter::emitTotalIGmcnt)
{
- fprintf(fout, "Total of %8u methods\n", emitter::emitTotalIGmcnt);
- fprintf(fout, "Total of %8u insGroup\n", emitter::emitTotalIGcnt);
+ fprintf(fout, "Total of %8u methods\n", emitter::emitTotalIGmcnt);
+ fprintf(fout, "Total of %8u insGroup\n", emitter::emitTotalIGcnt);
fprintf(fout, "Total of %8u insPlaceholderGroupData\n", emitter::emitTotalPhIGcnt);
- fprintf(fout, "Total of %8u instructions\n", emitter::emitTotalIGicnt);
- fprintf(fout, "Total of %8u jumps\n", emitter::emitTotalIGjmps);
- fprintf(fout, "Total of %8u GC livesets\n", emitter::emitTotalIGptrs);
+ fprintf(fout, "Total of %8u instructions\n", emitter::emitTotalIGicnt);
+ fprintf(fout, "Total of %8u jumps\n", emitter::emitTotalIGjmps);
+ fprintf(fout, "Total of %8u GC livesets\n", emitter::emitTotalIGptrs);
fprintf(fout, "\n");
- fprintf(fout, "Average of %8.1lf insGroup per method\n", (double)emitter::emitTotalIGcnt / emitter::emitTotalIGmcnt);
- fprintf(fout, "Average of %8.1lf insPhGroup per method\n", (double)emitter::emitTotalPhIGcnt/ emitter::emitTotalIGmcnt);
- fprintf(fout, "Average of %8.1lf instructions per method\n", (double)emitter::emitTotalIGicnt / emitter::emitTotalIGmcnt);
- fprintf(fout, "Average of %8.1lf desc. bytes per method\n", (double)emitter::emitTotalIGsize / emitter::emitTotalIGmcnt);
- fprintf(fout, "Average of %8.1lf jumps per method\n", (double)emitter::emitTotalIGjmps / emitter::emitTotalIGmcnt);
- fprintf(fout, "Average of %8.1lf GC livesets per method\n", (double)emitter::emitTotalIGptrs / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf insGroup per method\n",
+ (double)emitter::emitTotalIGcnt / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf insPhGroup per method\n",
+ (double)emitter::emitTotalPhIGcnt / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf instructions per method\n",
+ (double)emitter::emitTotalIGicnt / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf desc. bytes per method\n",
+ (double)emitter::emitTotalIGsize / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf jumps per method\n",
+ (double)emitter::emitTotalIGjmps / emitter::emitTotalIGmcnt);
+ fprintf(fout, "Average of %8.1lf GC livesets per method\n",
+ (double)emitter::emitTotalIGptrs / emitter::emitTotalIGmcnt);
fprintf(fout, "\n");
- fprintf(fout, "Average of %8.1lf instructions per group \n", (double)emitter::emitTotalIGicnt / emitter::emitTotalIGcnt);
- fprintf(fout, "Average of %8.1lf desc. bytes per group \n", (double)emitter::emitTotalIGsize / emitter::emitTotalIGcnt);
- fprintf(fout, "Average of %8.1lf jumps per group \n", (double)emitter::emitTotalIGjmps / emitter::emitTotalIGcnt);
+ fprintf(fout, "Average of %8.1lf instructions per group \n",
+ (double)emitter::emitTotalIGicnt / emitter::emitTotalIGcnt);
+ fprintf(fout, "Average of %8.1lf desc. bytes per group \n",
+ (double)emitter::emitTotalIGsize / emitter::emitTotalIGcnt);
+ fprintf(fout, "Average of %8.1lf jumps per group \n",
+ (double)emitter::emitTotalIGjmps / emitter::emitTotalIGcnt);
fprintf(fout, "\n");
- fprintf(fout, "Average of %8.1lf bytes per instrDesc\n", (double)emitter::emitTotalIGsize / emitter::emitTotalIGicnt);
+ fprintf(fout, "Average of %8.1lf bytes per instrDesc\n",
+ (double)emitter::emitTotalIGsize / emitter::emitTotalIGicnt);
fprintf(fout, "\n");
- fprintf(fout, "A total of %8u desc. bytes\n" , emitter::emitTotalIGsize);
+ fprintf(fout, "A total of %8u desc. bytes\n", emitter::emitTotalIGsize);
fprintf(fout, "\n");
}
@@ -433,14 +436,15 @@ void emitterStats(FILE* fout)
stkDepthTable.dump(fout);
fprintf(fout, "\n");
- int i;
- unsigned c;
- unsigned m;
+ int i;
+ unsigned c;
+ unsigned m;
- if (emitter::emitSmallCnsCnt || emitter::emitLargeCnsCnt)
+ if (emitter::emitSmallCnsCnt || emitter::emitLargeCnsCnt)
{
- fprintf(fout, "SmallCnsCnt = %6u\n" , emitter::emitSmallCnsCnt);
- fprintf(fout, "LargeCnsCnt = %6u (%3u %% of total)\n", emitter::emitLargeCnsCnt, 100*emitter::emitLargeCnsCnt/(emitter::emitLargeCnsCnt+emitter::emitSmallCnsCnt));
+ fprintf(fout, "SmallCnsCnt = %6u\n", emitter::emitSmallCnsCnt);
+ fprintf(fout, "LargeCnsCnt = %6u (%3u %% of total)\n", emitter::emitLargeCnsCnt,
+ 100 * emitter::emitLargeCnsCnt / (emitter::emitLargeCnsCnt + emitter::emitSmallCnsCnt));
}
#if 0
@@ -463,22 +467,20 @@ void emitterStats(FILE* fout)
fprintf(fout, "%8u bytes allocated in the emitter\n", emitter::emitTotMemAlloc);
}
-#endif // EMITTER_STATS
+#endif // EMITTER_STATS
/*****************************************************************************/
-const unsigned short emitTypeSizes[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) sze,
- #include "typelist.h"
- #undef DEF_TP
+const unsigned short emitTypeSizes[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) sze,
+#include "typelist.h"
+#undef DEF_TP
};
-const unsigned short emitTypeActSz[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) asze,
- #include "typelist.h"
- #undef DEF_TP
+const unsigned short emitTypeActSz[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) asze,
+#include "typelist.h"
+#undef DEF_TP
};
/*****************************************************************************/
@@ -487,7 +489,7 @@ const unsigned short emitTypeActSz[] =
* Initialize the emitter - called once, at DLL load time.
*/
-void emitter::emitInit()
+void emitter::emitInit()
{
}
@@ -496,17 +498,16 @@ void emitter::emitInit()
* Shut down the emitter - called once, at DLL exit time.
*/
-void emitter::emitDone()
+void emitter::emitDone()
{
}
-
/*****************************************************************************
*
* Allocate memory.
*/
-void* emitter::emitGetMem(size_t sz)
+void* emitter::emitGetMem(size_t sz)
{
assert(sz % sizeof(int) == 0);
@@ -514,10 +515,9 @@ void* emitter::emitGetMem(size_t sz)
emitTotMemAlloc += sz;
#endif
- return emitComp->compGetMem(sz, CMK_InstDesc);
+ return emitComp->compGetMem(sz, CMK_InstDesc);
}
-
/*****************************************************************************
*
* emitLclVarAddr support methods
@@ -531,43 +531,54 @@ void emitLclVarAddr::initLclVarAddr(int varNum, unsigned offset)
if (offset < 32768)
{
_lvaTag = LVA_STANDARD_ENCODING;
- _lvaExtra = offset; // offset known to be in [0..32767]
- _lvaVarNum = (unsigned) varNum; // varNum known to be in [0..32767]
+ _lvaExtra = offset; // offset known to be in [0..32767]
+ _lvaVarNum = (unsigned)varNum; // varNum known to be in [0..32767]
}
else // offset >= 32768
{
// We could support larger local offsets here at the cost of less varNums
if (offset >= 65536)
+ {
IMPL_LIMITATION("JIT doesn't support offsets larger than 65535 into valuetypes\n");
+ }
_lvaTag = LVA_LARGE_OFFSET;
- _lvaExtra = (offset-32768); // (offset-32768) is known to be in [0..32767]
- _lvaVarNum = (unsigned) varNum; // varNum known to be in [0..32767]
+ _lvaExtra = (offset - 32768); // (offset-32768) is known to be in [0..32767]
+ _lvaVarNum = (unsigned)varNum; // varNum known to be in [0..32767]
}
}
else // varNum < 0, These are used for Compiler spill temps
{
if (varNum < -32767)
+ {
IMPL_LIMITATION("JIT doesn't support more than 32767 Compiler Spill temps\n");
+ }
if (offset > 32767)
- IMPL_LIMITATION("JIT doesn't support offsets larger than 32767 into valuetypes for Compiler Spill temps\n");
+ {
+ IMPL_LIMITATION(
+ "JIT doesn't support offsets larger than 32767 into valuetypes for Compiler Spill temps\n");
+ }
- _lvaTag = LVA_COMPILER_TEMP;
- _lvaExtra = offset; // offset known to be in [0..32767]
- _lvaVarNum = (unsigned) (-varNum); // -varNum known to be in [1..32767]
+ _lvaTag = LVA_COMPILER_TEMP;
+ _lvaExtra = offset; // offset known to be in [0..32767]
+ _lvaVarNum = (unsigned)(-varNum); // -varNum known to be in [1..32767]
}
}
else // varNum >= 32768
{
if (offset >= 256)
+ {
IMPL_LIMITATION("JIT doesn't support offsets larger than 255 into valuetypes for local vars > 32767\n");
- if (varNum >= 0x00400000) // 0x00400000 == 2^22
+ }
+ if (varNum >= 0x00400000)
+ { // 0x00400000 == 2^22
IMPL_LIMITATION("JIT doesn't support more than 2^22 variables\n");
+ }
_lvaTag = LVA_LARGE_VARNUM;
- _lvaVarNum = varNum & 0x00007FFF; // varNum bits 14 to 0
- _lvaExtra = (varNum & 0x003F8000) >> 15; // varNum bits 21 to 15 in _lvaExtra bits 6 to 0, 7 bits total
- _lvaExtra |= (offset << 7); // offset bits 7 to 0 in _lvaExtra bits 14 to 7, 8 bits total
+ _lvaVarNum = varNum & 0x00007FFF; // varNum bits 14 to 0
+ _lvaExtra = (varNum & 0x003F8000) >> 15; // varNum bits 21 to 15 in _lvaExtra bits 6 to 0, 7 bits total
+ _lvaExtra |= (offset << 7); // offset bits 7 to 0 in _lvaExtra bits 14 to 7, 8 bits total
}
}
@@ -576,27 +587,27 @@ int emitLclVarAddr::lvaVarNum()
{
switch (_lvaTag)
{
- case LVA_COMPILER_TEMP:
- return -((int) _lvaVarNum);
- case LVA_LARGE_VARNUM:
- return (int) (((_lvaExtra & 0x007F) << 15) + _lvaVarNum);
- default: // LVA_STANDARD_ENCODING or LVA_LARGE_OFFSET
- assert((_lvaTag == LVA_STANDARD_ENCODING) || (_lvaTag == LVA_LARGE_OFFSET));
- return (int) _lvaVarNum;
+ case LVA_COMPILER_TEMP:
+ return -((int)_lvaVarNum);
+ case LVA_LARGE_VARNUM:
+ return (int)(((_lvaExtra & 0x007F) << 15) + _lvaVarNum);
+ default: // LVA_STANDARD_ENCODING or LVA_LARGE_OFFSET
+ assert((_lvaTag == LVA_STANDARD_ENCODING) || (_lvaTag == LVA_LARGE_OFFSET));
+ return (int)_lvaVarNum;
}
}
-unsigned emitLclVarAddr::lvaOffset() // returns the offset into the variable to access
+unsigned emitLclVarAddr::lvaOffset() // returns the offset into the variable to access
{
switch (_lvaTag)
{
- case LVA_LARGE_OFFSET:
- return (32768 + _lvaExtra);
- case LVA_LARGE_VARNUM:
- return (_lvaExtra & 0x7F80) >> 7;
- default: // LVA_STANDARD_ENCODING or LVA_COMPILER_TEMP
- assert((_lvaTag == LVA_STANDARD_ENCODING) || (_lvaTag == LVA_COMPILER_TEMP));
- return _lvaExtra;
+ case LVA_LARGE_OFFSET:
+ return (32768 + _lvaExtra);
+ case LVA_LARGE_VARNUM:
+ return (_lvaExtra & 0x7F80) >> 7;
+ default: // LVA_STANDARD_ENCODING or LVA_COMPILER_TEMP
+ assert((_lvaTag == LVA_STANDARD_ENCODING) || (_lvaTag == LVA_COMPILER_TEMP));
+ return _lvaExtra;
}
}
@@ -605,14 +616,13 @@ unsigned emitLclVarAddr::lvaOffset() // returns the offset into the variable
* Record some info about the method about to be emitted.
*/
-void emitter::emitBegCG(Compiler *comp,
- COMP_HANDLE cmpHandle)
+void emitter::emitBegCG(Compiler* comp, COMP_HANDLE cmpHandle)
{
emitComp = comp;
emitCmpHandle = cmpHandle;
}
-void emitter::emitEndCG()
+void emitter::emitEndCG()
{
}
@@ -621,22 +631,24 @@ void emitter::emitEndCG()
* Prepare the given IG for emission of code.
*/
-void emitter::emitGenIG(insGroup *ig)
+void emitter::emitGenIG(insGroup* ig)
{
/* Set the "current IG" value */
- emitCurIG = ig;
+ emitCurIG = ig;
#if EMIT_TRACK_STACK_DEPTH
/* Record the stack level on entry to this group */
- ig->igStkLvl = emitCurStackLvl;
+ ig->igStkLvl = emitCurStackLvl;
// If we don't have enough bits in igStkLvl, refuse to compile
if (ig->igStkLvl != emitCurStackLvl)
+ {
IMPL_LIMITATION("Too many arguments pushed on stack");
+ }
// printf("Start IG #%02u [stk=%02u]\n", ig->igNum, emitCurStackLvl);
@@ -649,16 +661,16 @@ void emitter::emitGenIG(insGroup *ig)
/* Prepare to issue instructions */
- emitCurIGinsCnt = 0;
- emitCurIGsize = 0;
+ emitCurIGinsCnt = 0;
+ emitCurIGsize = 0;
- assert(emitCurIGjmpList == NULL);
+ assert(emitCurIGjmpList == nullptr);
/* Allocate the temp instruction buffer if we haven't done so */
- if (emitCurIGfreeBase == NULL)
+ if (emitCurIGfreeBase == nullptr)
{
- emitIGbuffSize = SC_IG_BUFFER_SIZE;
+ emitIGbuffSize = SC_IG_BUFFER_SIZE;
emitCurIGfreeBase = (BYTE*)emitGetMem(emitIGbuffSize);
}
@@ -673,17 +685,18 @@ void emitter::emitGenIG(insGroup *ig)
insGroup* emitter::emitSavIG(bool emitAdd)
{
- insGroup * ig;
- BYTE * id;
+ insGroup* ig;
+ BYTE* id;
- size_t sz;
- size_t gs;
+ size_t sz;
+ size_t gs;
assert(emitCurIGfreeNext <= emitCurIGfreeEndp);
/* Get hold of the IG descriptor */
- ig = emitCurIG; assert(ig);
+ ig = emitCurIG;
+ assert(ig);
/* Compute how much code we've generated */
@@ -695,7 +708,7 @@ insGroup* emitter::emitSavIG(bool emitAdd)
/* Do we need space for GC? */
- if (!(ig->igFlags & IGF_EMIT_ADD))
+ if (!(ig->igFlags & IGF_EMIT_ADD))
{
/* Is the initial set of live GC vars different from the previous one? */
@@ -735,16 +748,16 @@ insGroup* emitter::emitSavIG(bool emitAdd)
{
/* Record the byref regs in front the of the instructions */
- *castto(id, unsigned *)++ = (unsigned) emitInitByrefRegs;
+ *castto(id, unsigned*)++ = (unsigned)emitInitByrefRegs;
}
/* Do we need to store the liveset? */
- if (ig->igFlags & IGF_GC_VARS)
+ if (ig->igFlags & IGF_GC_VARS)
{
/* Record the liveset in front the of the instructions */
- VarSetOps::AssignNoCopy(emitComp, (*castto(id, VARSET_TP *)), VarSetOps::MakeEmpty(emitComp));
- VarSetOps::Assign(emitComp, (*castto(id, VARSET_TP *)++), emitInitGCrefVars);
+ VarSetOps::AssignNoCopy(emitComp, (*castto(id, VARSET_TP*)), VarSetOps::MakeEmpty(emitComp));
+ VarSetOps::Assign(emitComp, (*castto(id, VARSET_TP*)++), emitInitGCrefVars);
}
/* Record the collected instructions */
@@ -760,8 +773,8 @@ insGroup* emitter::emitSavIG(bool emitAdd)
// If there's an error during emission, we may want to connect the post-copy address
// of an instrDesc with the pre-copy address (the one that was originally created). This
// printing enables that.
- printf("copying instruction group from [0x%x..0x%x) to [0x%x..0x%x).\n",
- dspPtr(emitCurIGfreeBase), dspPtr(emitCurIGfreeBase+sz), dspPtr(id), dspPtr(id+sz));
+ printf("copying instruction group from [0x%x..0x%x) to [0x%x..0x%x).\n", dspPtr(emitCurIGfreeBase),
+ dspPtr(emitCurIGfreeBase + sz), dspPtr(id), dspPtr(id + sz));
}
#endif
@@ -770,24 +783,24 @@ insGroup* emitter::emitSavIG(bool emitAdd)
noway_assert((BYTE)emitCurIGinsCnt == emitCurIGinsCnt);
noway_assert((unsigned short)emitCurIGsize == emitCurIGsize);
- ig->igInsCnt = (BYTE)emitCurIGinsCnt;
- ig->igSize = (unsigned short)emitCurIGsize;
+ ig->igInsCnt = (BYTE)emitCurIGinsCnt;
+ ig->igSize = (unsigned short)emitCurIGsize;
emitCurCodeOffset += emitCurIGsize;
assert(IsCodeAligned(emitCurCodeOffset));
#if EMITTER_STATS
- emitTotalIGicnt += emitCurIGinsCnt;
- emitTotalIGsize += sz;
- emitSizeMethod += sz;
+ emitTotalIGicnt += emitCurIGinsCnt;
+ emitTotalIGsize += sz;
+ emitSizeMethod += sz;
#endif
// printf("Group [%08X]%3u has %2u instructions (%4u bytes at %08X)\n", ig, ig->igNum, emitCurIGinsCnt, sz, id);
/* Record the live GC register set - if and only if it is not an emitter added block */
- if (!(ig->igFlags & IGF_EMIT_ADD))
+ if (!(ig->igFlags & IGF_EMIT_ADD))
{
- ig->igGCregs = (regMaskSmall)emitInitGCrefRegs;
+ ig->igGCregs = (regMaskSmall)emitInitGCrefRegs;
}
if (!emitAdd)
@@ -807,17 +820,17 @@ insGroup* emitter::emitSavIG(bool emitAdd)
emitForceStoreGCState = false;
}
-#ifdef DEBUG
- if (emitComp->opts.dspCode)
+#ifdef DEBUG
+ if (emitComp->opts.dspCode)
{
printf("\n G_M%03u_IG%02u:", Compiler::s_compMethodsCount, ig->igNum);
if (emitComp->verbose)
{
- printf(" ; offs=%06XH, funclet=%02u", ig->igOffs, ig->igFuncIdx);
+ printf(" ; offs=%06XH, funclet=%02u", ig->igOffs, ig->igFuncIdx);
}
else
{
- printf(" ; funclet=%02u", ig->igFuncIdx);
+ printf(" ; funclet=%02u", ig->igFuncIdx);
}
printf("\n");
}
@@ -825,10 +838,10 @@ insGroup* emitter::emitSavIG(bool emitAdd)
/* Did we have any jumps in this group? */
- if (emitCurIGjmpList)
+ if (emitCurIGjmpList)
{
- instrDescJmp * list = NULL;
- instrDescJmp * last = NULL;
+ instrDescJmp* list = nullptr;
+ instrDescJmp* last = nullptr;
/* Move jumps to the global list, update their 'next' links */
@@ -841,19 +854,19 @@ insGroup* emitter::emitSavIG(bool emitAdd)
/* Figure out the address of where the jump got copied */
- size_t of = (BYTE*)oj - emitCurIGfreeBase;
+ size_t of = (BYTE*)oj - emitCurIGfreeBase;
instrDescJmp* nj = (instrDescJmp*)(ig->igData + of);
// printf("Jump moved from %08X to %08X\n", oj, nj);
// printf("jmp [%08X] at %08X + %03u\n", nj, ig, nj->idjOffs);
- assert(nj->idjIG == ig);
+ assert(nj->idjIG == ig);
assert(nj->idIns() == oj->idIns());
assert(nj->idjNext == oj->idjNext);
/* Make sure the jumps are correctly ordered */
- assert(last == NULL || last->idjOffs > nj->idjOffs);
+ assert(last == nullptr || last->idjOffs > nj->idjOffs);
if (ig->igFlags & IGF_FUNCLET_PROLOG)
{
@@ -870,33 +883,32 @@ insGroup* emitter::emitSavIG(bool emitAdd)
/* Append the new jump to the list */
nj->idjNext = list;
- list = nj;
+ list = nj;
- if (last == NULL)
+ if (last == nullptr)
{
last = nj;
}
- }
- while (emitCurIGjmpList);
+ } while (emitCurIGjmpList);
- if (last != NULL)
+ if (last != nullptr)
{
/* Append the jump(s) from this IG to the global list */
bool prologJump = (ig == emitPrologIG);
- if ((emitJumpList == NULL) || prologJump)
+ if ((emitJumpList == nullptr) || prologJump)
{
last->idjNext = emitJumpList;
emitJumpList = list;
}
else
{
- last->idjNext = NULL;
+ last->idjNext = nullptr;
emitJumpLast->idjNext = list;
}
- if (!prologJump || (emitJumpLast == NULL))
+ if (!prologJump || (emitJumpLast == nullptr))
{
- emitJumpLast = last;
+ emitJumpLast = last;
}
}
}
@@ -907,7 +919,7 @@ insGroup* emitter::emitSavIG(bool emitAdd)
{
assert(emitLastIns != nullptr);
assert(emitCurIGfreeBase <= (BYTE*)emitLastIns);
- assert( (BYTE*)emitLastIns < emitCurIGfreeBase + sz);
+ assert((BYTE*)emitLastIns < emitCurIGfreeBase + sz);
emitLastIns = (instrDesc*)((BYTE*)id + ((BYTE*)emitLastIns - (BYTE*)emitCurIGfreeBase));
}
@@ -915,39 +927,38 @@ insGroup* emitter::emitSavIG(bool emitAdd)
emitCurIGfreeNext = emitCurIGfreeBase;
- return ig;
+ return ig;
}
#ifdef LEGACY_BACKEND
-void emitter::emitTmpSizeChanged(unsigned tmpSize)
+void emitter::emitTmpSizeChanged(unsigned tmpSize)
{
assert(emitGrowableMaxByteOffs <= SCHAR_MAX);
#ifdef DEBUG
// Workaround for FP code
- bool bAssert = JitConfig.JitMaxTempAssert()?true:false;
+ bool bAssert = JitConfig.JitMaxTempAssert() ? true : false;
if (tmpSize > emitMaxTmpSize && bAssert)
{
// TODO-Review: We have a known issue involving floating point code and this assert.
- // The generated code will be ok, This is only a warning.
+ // The generated code will be ok, This is only a warning.
// To not receive this assert again you can set the registry key: JITMaxTempAssert=0.
//
assert(!"Incorrect max tmp size set.");
- }
+ }
#endif
-
+
if (tmpSize <= emitMaxTmpSize)
return;
- unsigned change = tmpSize - emitMaxTmpSize;
+ unsigned change = tmpSize - emitMaxTmpSize;
/* If we have used a small offset to access a variable, growing the
temp size is a problem if we should have used a large offset instead.
Detect if such a situation happens and bail */
- if ( emitGrowableMaxByteOffs <= SCHAR_MAX &&
- (emitGrowableMaxByteOffs + change) > SCHAR_MAX)
+ if (emitGrowableMaxByteOffs <= SCHAR_MAX && (emitGrowableMaxByteOffs + change) > SCHAR_MAX)
{
#ifdef DEBUG
if (emitComp->verbose)
@@ -966,126 +977,122 @@ void emitter::emitTmpSizeChanged(unsigned tmpSize)
* Start generating code to be scheduled; called once per method.
*/
-void emitter::emitBegFN(bool hasFramePtr
+void emitter::emitBegFN(bool hasFramePtr
#if defined(DEBUG)
- , bool chkAlign
+ ,
+ bool chkAlign
#endif
#ifdef LEGACY_BACKEND
- , unsigned lclSize
+ ,
+ unsigned lclSize
#endif // LEGACY_BACKEND
- , unsigned maxTmpSize
- )
+ ,
+ unsigned maxTmpSize)
{
- insGroup * ig;
+ insGroup* ig;
/* Assume we won't need the temp instruction buffer */
- emitCurIGfreeBase = NULL;
- emitIGbuffSize = 0;
+ emitCurIGfreeBase = nullptr;
+ emitIGbuffSize = 0;
/* Record stack frame info (the temp size is just an estimate) */
- emitHasFramePtr = hasFramePtr;
+ emitHasFramePtr = hasFramePtr;
- emitMaxTmpSize = maxTmpSize;
+ emitMaxTmpSize = maxTmpSize;
#ifdef LEGACY_BACKEND
- emitLclSize = lclSize;
+ emitLclSize = lclSize;
emitGrowableMaxByteOffs = 0;
#ifdef DEBUG
- emitMaxByteOffsIdNum= (unsigned)-1;
+ emitMaxByteOffsIdNum = (unsigned)-1;
#endif // DEBUG
#endif // LEGACY_BACKEND
#ifdef DEBUG
- emitChkAlign = chkAlign;
+ emitChkAlign = chkAlign;
#endif
/* We have no epilogs yet */
- emitEpilogSize = 0;
- emitEpilogCnt = 0;
+ emitEpilogSize = 0;
+ emitEpilogCnt = 0;
#ifdef _TARGET_XARCH_
emitExitSeqBegLoc.Init();
- emitExitSeqSize = INT_MAX;
+ emitExitSeqSize = INT_MAX;
#endif // _TARGET_XARCH_
- emitPlaceholderList =
- emitPlaceholderLast = NULL;
+ emitPlaceholderList = emitPlaceholderLast = nullptr;
#ifdef JIT32_GCENCODER
- emitEpilogList =
- emitEpilogLast = NULL;
+ emitEpilogList = emitEpilogLast = NULL;
#endif // JIT32_GCENCODER
/* We don't have any jumps */
- emitJumpList =
- emitJumpLast = NULL;
- emitCurIGjmpList = NULL;
+ emitJumpList = emitJumpLast = nullptr;
+ emitCurIGjmpList = nullptr;
- emitFwdJumps = false;
- emitNoGCIG = false;
- emitForceNewIG = false;
+ emitFwdJumps = false;
+ emitNoGCIG = false;
+ emitForceNewIG = false;
/* We have not recorded any live sets */
assert(VarSetOps::IsEmpty(emitComp, emitThisGCrefVars));
assert(VarSetOps::IsEmpty(emitComp, emitInitGCrefVars));
assert(VarSetOps::IsEmpty(emitComp, emitPrevGCrefVars));
- emitThisGCrefRegs = RBM_NONE;
- emitInitGCrefRegs = RBM_NONE;
- emitPrevGCrefRegs = RBM_NONE;
- emitThisByrefRegs = RBM_NONE;
- emitInitByrefRegs = RBM_NONE;
- emitPrevByrefRegs = RBM_NONE;
+ emitThisGCrefRegs = RBM_NONE;
+ emitInitGCrefRegs = RBM_NONE;
+ emitPrevGCrefRegs = RBM_NONE;
+ emitThisByrefRegs = RBM_NONE;
+ emitInitByrefRegs = RBM_NONE;
+ emitPrevByrefRegs = RBM_NONE;
emitForceStoreGCState = false;
#ifdef DEBUG
- emitIssuing = false;
+ emitIssuing = false;
#endif
/* Assume there will be no GC ref variables */
- emitGCrFrameOffsMin =
- emitGCrFrameOffsMax =
- emitGCrFrameOffsCnt = 0;
-#ifdef DEBUG
- emitGCrFrameLiveTab = NULL;
+ emitGCrFrameOffsMin = emitGCrFrameOffsMax = emitGCrFrameOffsCnt = 0;
+#ifdef DEBUG
+ emitGCrFrameLiveTab = nullptr;
#endif
/* We have no groups / code at this point */
- emitIGlist =
- emitIGlast = NULL;
+ emitIGlist = emitIGlast = nullptr;
- emitCurCodeOffset = 0;
- emitFirstColdIG = NULL;
- emitTotalCodeSize = 0;
+ emitCurCodeOffset = 0;
+ emitFirstColdIG = nullptr;
+ emitTotalCodeSize = 0;
-#if EMITTER_STATS
+#if EMITTER_STATS
emitTotalIGmcnt++;
- emitSizeMethod = 0;
+ emitSizeMethod = 0;
#endif
- emitInsCount = 0;
+ emitInsCount = 0;
/* The stack is empty now */
- emitCurStackLvl = 0;
+ emitCurStackLvl = 0;
#if EMIT_TRACK_STACK_DEPTH
- emitMaxStackDepth = 0;
- emitCntStackDepth = sizeof(int);
+ emitMaxStackDepth = 0;
+ emitCntStackDepth = sizeof(int);
#endif
/* No data sections have been created */
- emitDataSecCur = 0;
+ emitDataSecCur = nullptr;
memset(&emitConsDsc, 0, sizeof(emitConsDsc));
@@ -1094,31 +1101,27 @@ void emitter::emitBegFN(bool hasFramePtr
emitEnableRandomNops();
emitComp->info.compRNG.Init(emitComp->info.compChecksum);
- emitNextNop = emitNextRandomNop();
+ emitNextNop = emitNextRandomNop();
emitInInstrumentation = false;
#endif // PSEUDORANDOM_NOP_INSERTION
/* Create the first IG, it will be used for the prolog */
- emitNxtIGnum = 1;
+ emitNxtIGnum = 1;
- emitPrologIG =
- emitIGlist =
- emitIGlast =
- emitCurIG =
- ig = emitAllocIG();
+ emitPrologIG = emitIGlist = emitIGlast = emitCurIG = ig = emitAllocIG();
#ifdef ARM_HAZARD_AVOIDANCE
// This first IG is actually preceeded by the method prolog which may be composed of many T1 instructions
emitCurInstrCntT1 = MAX_INSTR_COUNT_T1;
#endif
- emitLastIns = NULL;
+ emitLastIns = nullptr;
- ig->igNext = NULL;
+ ig->igNext = nullptr;
#ifdef DEBUG
- emitScratchSigInfo = nullptr;
+ emitScratchSigInfo = nullptr;
#endif // DEBUG
/* Append another group, to start generating the method body */
@@ -1129,7 +1132,7 @@ void emitter::emitBegFN(bool hasFramePtr
#ifdef PSEUDORANDOM_NOP_INSERTION
int emitter::emitNextRandomNop()
{
- return emitComp->info.compRNG.Next(1,9);
+ return emitComp->info.compRNG.Next(1, 9);
}
#endif
@@ -1138,7 +1141,7 @@ int emitter::emitNextRandomNop()
* Done generating code to be scheduled; called once per method.
*/
-void emitter::emitEndFN()
+void emitter::emitEndFN()
{
}
@@ -1155,39 +1158,39 @@ int emitter::instrDesc::idAddrUnion::iiaGetJitDataOffset() const
return Compiler::eeGetJitDataOffs(iiaFieldHnd);
}
-void emitter::dispIns(instrDesc* id)
+void emitter::dispIns(instrDesc* id)
{
-#ifdef DEBUG
+#ifdef DEBUG
emitInsSanityCheck(id);
if (emitComp->opts.dspCode)
+ {
emitDispIns(id, true, false, false);
+ }
-#if EMIT_TRACK_STACK_DEPTH
+#if EMIT_TRACK_STACK_DEPTH
assert((int)emitCurStackLvl >= 0);
#endif
- size_t sz = emitSizeOfInsDsc(id);
- assert(id->idDebugOnlyInfo()->idSize == sz);
-#endif // DEBUG
+ size_t sz = emitSizeOfInsDsc(id);
+ assert(id->idDebugOnlyInfo()->idSize == sz);
+#endif // DEBUG
-#if EMITTER_STATS
+#if EMITTER_STATS
emitIFcounts[id->idInsFmt()]++;
#endif
}
-void emitter::appendToCurIG(instrDesc* id)
+void emitter::appendToCurIG(instrDesc* id)
{
emitCurIGsize += id->idCodeSize();
#ifdef ARM_HAZARD_AVOIDANCE
//
- // Do we have a T1 instruction or an unbound jump instruction?
+ // Do we have a T1 instruction or an unbound jump instruction?
// (it could be bound to a T1 instruction)
if (id->idInstrIsT1() ||
- ( ((id->idInsFmt() == IF_T2_J2) ||
- (id->idInsFmt() == IF_T2_J1) ||
- (id->idInsFmt() == IF_LARGEJMP) )
- && (id->idIsBound() == false) ))
+ (((id->idInsFmt() == IF_T2_J2) || (id->idInsFmt() == IF_T2_J1) || (id->idInsFmt() == IF_LARGEJMP)) &&
+ (id->idIsBound() == false)))
{
if (emitCurInstrCntT1 < MAX_INSTR_COUNT_T1)
{
@@ -1207,18 +1210,21 @@ void emitter::appendToCurIG(instrDesc* id)
* Display (optionally) an instruction offset.
*/
-#ifdef DEBUG
+#ifdef DEBUG
-void emitter::emitDispInsOffs(unsigned offs, bool doffs)
+void emitter::emitDispInsOffs(unsigned offs, bool doffs)
{
- if (doffs)
+ if (doffs)
+ {
printf("%06X", offs);
+ }
else
+ {
printf(" ");
+ }
}
-#endif // DEBUG
-
+#endif // DEBUG
#ifdef JIT32_GCENCODER
@@ -1229,22 +1235,22 @@ void emitter::emitDispInsOffs(unsigned offs, bool doffs)
* values returned by the callback.
*/
-size_t emitter::emitGenEpilogLst(size_t (*fp)(void *, unsigned),
- void *cp)
+size_t emitter::emitGenEpilogLst(size_t (*fp)(void*, unsigned), void* cp)
{
- EpilogList* el;
- size_t sz;
+ EpilogList* el;
+ size_t sz;
for (el = emitEpilogList, sz = 0; el; el = el->elNext)
{
assert(el->elIG->igFlags & IGF_EPILOG);
- UNATIVE_OFFSET ofs = el->elIG->igOffs; // The epilog starts at the beginning of the IG, so the IG offset is correct
+ UNATIVE_OFFSET ofs =
+ el->elIG->igOffs; // The epilog starts at the beginning of the IG, so the IG offset is correct
sz += fp(cp, ofs);
}
- return sz;
+ return sz;
}
#endif // JIT32_GCENCODER
@@ -1254,9 +1260,9 @@ size_t emitter::emitGenEpilogLst(size_t (*fp)(void *, unsigned),
* The following series of methods allocates instruction descriptors.
*/
-void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
+void* emitter::emitAllocInstr(size_t sz, emitAttr opsz)
{
- instrDesc * id;
+ instrDesc* id;
#ifdef DEBUG
// Under STRESS_EMITTER, put every instruction in its own instruction group.
@@ -1267,15 +1273,12 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
// these groups cannot be more than a single instruction group. Note that
// the prolog/epilog placeholder groups ARE generated in order, and are
// re-used. But generating additional groups would not work.
- if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 1)
- && emitCurIGinsCnt
- && !emitIGisInProlog(emitCurIG)
- && !emitIGisInEpilog(emitCurIG)
+ if (emitComp->compStressCompile(Compiler::STRESS_EMITTER, 1) && emitCurIGinsCnt && !emitIGisInProlog(emitCurIG) &&
+ !emitIGisInEpilog(emitCurIG)
#if FEATURE_EH_FUNCLETS
- && !emitIGisInFuncletProlog(emitCurIG)
- && !emitIGisInFuncletEpilog(emitCurIG)
+ && !emitIGisInFuncletProlog(emitCurIG) && !emitIGisInFuncletEpilog(emitCurIG)
#endif // FEATURE_EH_FUNCLETS
- )
+ )
{
emitNxtIG(true);
}
@@ -1286,18 +1289,17 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
// ARM - This is currently broken on _TARGET_ARM_
// When nopSize is odd we misalign emitCurIGsize
//
- if (!(emitComp->opts.eeFlags & CORJIT_FLG_PREJIT)
- && !emitInInstrumentation
- && !emitIGisInProlog(emitCurIG) // don't do this in prolog or epilog
- && !emitIGisInEpilog(emitCurIG)
- && emitRandomNops // sometimes we turn off where exact codegen is needed (pinvoke inline)
+ if (!(emitComp->opts.eeFlags & CORJIT_FLG_PREJIT) && !emitInInstrumentation &&
+ !emitIGisInProlog(emitCurIG) // don't do this in prolog or epilog
+ && !emitIGisInEpilog(emitCurIG) &&
+ emitRandomNops // sometimes we turn off where exact codegen is needed (pinvoke inline)
)
{
if (emitNextNop == 0)
{
- int nopSize = 4;
+ int nopSize = 4;
emitInInstrumentation = true;
- instrDesc *idnop = emitNewInstr();
+ instrDesc* idnop = emitNewInstr();
emitInInstrumentation = false;
idnop->idInsFmt(IF_NONE);
idnop->idIns(INS_nop);
@@ -1319,8 +1321,7 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
/* Make sure we have enough space for the new instruction */
- if ((emitCurIGfreeNext + sz >= emitCurIGfreeEndp) ||
- emitForceNewIG)
+ if ((emitCurIGfreeNext + sz >= emitCurIGfreeEndp) || emitForceNewIG)
{
emitNxtIG(true);
}
@@ -1342,41 +1343,39 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
#if HAS_TINY_DESC
/* Is the second area to be cleared actually present? */
- if (sz >= SMALL_IDSC_SIZE)
+ if (sz >= SMALL_IDSC_SIZE)
{
/* Clear the second 4 bytes, or the 'SMALL' part */
*(int*)((BYTE*)id + (SMALL_IDSC_SIZE - sizeof(int))) = 0;
// These fields should have been zero-ed by the above
- assert(id->idIsLargeCns() == false);
- assert(id->idIsLargeDsp() == false);
+ assert(id->idIsLargeCns() == false);
+ assert(id->idIsLargeDsp() == false);
assert(id->idIsLargeCall() == false);
}
#endif
-
// Make sure that idAddrUnion is just a union of various pointer sized things
- C_ASSERT(sizeof(CORINFO_FIELD_HANDLE) <= sizeof(void*));
- C_ASSERT(sizeof(CORINFO_METHOD_HANDLE) <= sizeof(void*));
- C_ASSERT(sizeof(emitter::emitAddrMode) <= sizeof(void*));
- C_ASSERT(sizeof(emitLclVarAddr) <= sizeof(void*));
- C_ASSERT(sizeof(emitter::instrDesc) == (SMALL_IDSC_SIZE + sizeof(void*)));
-
+ C_ASSERT(sizeof(CORINFO_FIELD_HANDLE) <= sizeof(void*));
+ C_ASSERT(sizeof(CORINFO_METHOD_HANDLE) <= sizeof(void*));
+ C_ASSERT(sizeof(emitter::emitAddrMode) <= sizeof(void*));
+ C_ASSERT(sizeof(emitLclVarAddr) <= sizeof(void*));
+ C_ASSERT(sizeof(emitter::instrDesc) == (SMALL_IDSC_SIZE + sizeof(void*)));
emitInsCount++;
#if defined(DEBUG) || defined(LATE_DISASM)
/* In debug mode we clear/set some additional fields */
- instrDescDebugInfo * info = (instrDescDebugInfo *) emitGetMem(sizeof(*info));
+ instrDescDebugInfo* info = (instrDescDebugInfo*)emitGetMem(sizeof(*info));
info->idNum = emitInsCount;
info->idSize = sz;
info->idVarRefOffs = 0;
info->idMemCookie = 0;
- info->idClsCookie = 0;
-#ifdef TRANSLATE_PDB
- info->idilStart = emitInstrDescILBase;
+ info->idClsCookie = nullptr;
+#ifdef TRANSLATE_PDB
+ info->idilStart = emitInstrDescILBase;
#endif
info->idFinallyCall = false;
info->idCatchRet = false;
@@ -1389,14 +1388,14 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
/* Store the size and handle the two special values
that indicate GCref and ByRef */
- if (EA_IS_GCREF(opsz))
+ if (EA_IS_GCREF(opsz))
{
/* A special value indicates a GCref pointer value */
id->idGCref(GCT_GCREF);
id->idOpSize(EA_PTRSIZE);
}
- else if (EA_IS_BYREF(opsz))
+ else if (EA_IS_BYREF(opsz))
{
/* A special value indicates a Byref pointer value */
@@ -1409,14 +1408,13 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
id->idOpSize(EA_SIZE(opsz));
}
-
#if RELOC_SUPPORT
// Amd64: ip-relative addressing is supported even when not generating relocatable ngen code
- if (EA_IS_DSP_RELOC(opsz)
+ if (EA_IS_DSP_RELOC(opsz)
#ifndef _TARGET_AMD64_
- && emitComp->opts.compReloc
+ && emitComp->opts.compReloc
#endif //_TARGET_AMD64_
- )
+ )
{
/* Mark idInfo()->idDspReloc to remember that the */
/* address mode has a displacement that is relocatable */
@@ -1431,7 +1429,7 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
}
#endif
-#if EMITTER_STATS
+#if EMITTER_STATS
emitTotalInsCnt++;
#endif
@@ -1439,40 +1437,34 @@ void * emitter::emitAllocInstr(size_t sz, emitAttr opsz)
emitCurIGinsCnt++;
- return id;
+ return id;
}
-
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Make sure the code offsets of all instruction groups look reasonable.
*/
-void emitter::emitCheckIGoffsets()
+void emitter::emitCheckIGoffsets()
{
- insGroup * tempIG;
- size_t offsIG;
+ insGroup* tempIG;
+ size_t offsIG;
- for (tempIG = emitIGlist, offsIG = 0;
- tempIG;
- tempIG = tempIG->igNext)
+ for (tempIG = emitIGlist, offsIG = 0; tempIG; tempIG = tempIG->igNext)
{
- if (tempIG->igOffs != offsIG)
+ if (tempIG->igOffs != offsIG)
{
- printf("Block #%u has offset %08X, expected %08X\n", tempIG->igNum,
- tempIG->igOffs,
- offsIG);
+ printf("Block #%u has offset %08X, expected %08X\n", tempIG->igNum, tempIG->igOffs, offsIG);
assert(!"bad block offset");
}
offsIG += tempIG->igSize;
}
- if (emitTotalCodeSize && emitTotalCodeSize != offsIG)
+ if (emitTotalCodeSize && emitTotalCodeSize != offsIG)
{
- printf("Total code size is %08X, expected %08X\n", emitTotalCodeSize,
- offsIG);
+ printf("Total code size is %08X, expected %08X\n", emitTotalCodeSize, offsIG);
assert(!"bad total code size");
}
@@ -1485,7 +1477,7 @@ void emitter::emitCheckIGoffsets()
* Begin generating a method prolog.
*/
-void emitter::emitBegProlog()
+void emitter::emitBegProlog()
{
assert(emitComp->compGeneratingProlog);
@@ -1499,7 +1491,7 @@ void emitter::emitBegProlog()
#endif
- emitNoGCIG = true;
+ emitNoGCIG = true;
emitForceNewIG = false;
/* Switch to the pre-allocated prolog IG */
@@ -1511,10 +1503,10 @@ void emitter::emitBegProlog()
// These were initialized to Empty at the start of compilation.
VarSetOps::ClearD(emitComp, emitInitGCrefVars);
VarSetOps::ClearD(emitComp, emitPrevGCrefVars);
- emitInitGCrefRegs = RBM_NONE;
- emitPrevGCrefRegs = RBM_NONE;
- emitInitByrefRegs = RBM_NONE;
- emitPrevByrefRegs = RBM_NONE;
+ emitInitGCrefRegs = RBM_NONE;
+ emitPrevGCrefRegs = RBM_NONE;
+ emitInitByrefRegs = RBM_NONE;
+ emitPrevByrefRegs = RBM_NONE;
}
/*****************************************************************************
@@ -1522,14 +1514,14 @@ void emitter::emitBegProlog()
* Return the code offset of the current location in the prolog.
*/
-unsigned emitter::emitGetPrologOffsetEstimate()
+unsigned emitter::emitGetPrologOffsetEstimate()
{
/* For now only allow a single prolog ins group */
assert(emitPrologIG);
assert(emitPrologIG == emitCurIG);
- return emitCurIGsize;
+ return emitCurIGsize;
}
/*****************************************************************************
@@ -1538,7 +1530,7 @@ unsigned emitter::emitGetPrologOffsetEstimate()
* so it can be used later to compute the actual size of the prolog.
*/
-void emitter::emitMarkPrologEnd()
+void emitter::emitMarkPrologEnd()
{
assert(emitComp->compGeneratingProlog);
@@ -1555,20 +1547,22 @@ void emitter::emitMarkPrologEnd()
* Finish generating a method prolog.
*/
-void emitter::emitEndProlog()
+void emitter::emitEndProlog()
{
assert(emitComp->compGeneratingProlog);
- size_t prolSz;
+ size_t prolSz;
- insGroup * tempIG;
+ insGroup* tempIG;
emitNoGCIG = false;
/* Save the prolog IG if non-empty or if only one block */
- if (emitCurIGnonEmpty() || emitCurIG == emitPrologIG)
+ if (emitCurIGnonEmpty() || emitCurIG == emitPrologIG)
+ {
emitSavIG();
+ }
#if EMIT_TRACK_STACK_DEPTH
/* Reset the stack depth values */
@@ -1578,21 +1572,20 @@ void emitter::emitEndProlog()
#endif
}
-
/*****************************************************************************
*
* Create a placeholder instruction group to be used by a prolog or epilog,
* either for the main function, or a funclet.
*/
-void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType,
- BasicBlock* igBB,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- bool last)
+void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igType,
+ BasicBlock* igBB,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ bool last)
{
- assert(igBB != NULL);
+ assert(igBB != nullptr);
bool emitAdd = false;
@@ -1605,12 +1598,14 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
#ifdef _TARGET_AMD64_
emitOutputPreEpilogNOP();
#endif // _TARGET_AMD64_
-
+
emitAdd = true;
}
if (emitCurIGnonEmpty())
+ {
emitNxtIG(emitAdd);
+ }
/* Update GC tracking for the beginning of the placeholder IG */
@@ -1624,7 +1619,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
/* Convert the group to a placeholder group */
- insGroup * igPh = emitCurIG;
+ insGroup* igPh = emitCurIG;
igPh->igFlags |= IGF_PLACEHOLDER;
@@ -1642,9 +1637,9 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
igPh->igPhData = new (emitComp, CMK_InstDesc) insPlaceholderGroupData;
- igPh->igPhData->igPhNext = NULL;
- igPh->igPhData->igPhType = igType;
- igPh->igPhData->igPhBB = igBB;
+ igPh->igPhData->igPhNext = nullptr;
+ igPh->igPhData->igPhType = igType;
+ igPh->igPhData->igPhBB = igBB;
VarSetOps::AssignNoCopy(emitComp, igPh->igPhData->igPhPrevGCrefVars, VarSetOps::UninitVal());
VarSetOps::Assign(emitComp, igPh->igPhData->igPhPrevGCrefVars, emitPrevGCrefVars);
@@ -1684,16 +1679,20 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
/* Link it into the placeholder list */
if (emitPlaceholderList)
+ {
emitPlaceholderLast->igPhData->igPhNext = igPh;
+ }
else
- emitPlaceholderList = igPh;
+ {
+ emitPlaceholderList = igPh;
+ }
emitPlaceholderLast = igPh;
// Give an estimated size of this placeholder IG and
// increment emitCurCodeOffset since we are not calling emitNewIG()
//
- emitCurIGsize += MAX_PLACEHOLDER_IG_SIZE;
+ emitCurIGsize += MAX_PLACEHOLDER_IG_SIZE;
emitCurCodeOffset += emitCurIGsize;
#ifdef DEBUGGING_SUPPORT
@@ -1707,11 +1706,11 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
{
if (igType == IGPT_FUNCLET_PROLOG)
{
- codeGen->genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::PROLOG, true);
+ codeGen->genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::PROLOG, true);
}
else if (igType == IGPT_FUNCLET_EPILOG)
{
- codeGen->genIPmappingAdd((IL_OFFSETX) ICorDebugInfo::EPILOG, true);
+ codeGen->genIPmappingAdd((IL_OFFSETX)ICorDebugInfo::EPILOG, true);
}
}
#endif // FEATURE_EH_FUNCLETS
@@ -1722,7 +1721,7 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
if (last)
{
- emitCurIG = NULL;
+ emitCurIG = nullptr;
}
else
{
@@ -1773,26 +1772,24 @@ void emitter::emitCreatePlaceholderIG(insGroupPlaceholderType igT
* Generate all prologs and epilogs
*/
-void emitter::emitGeneratePrologEpilog()
+void emitter::emitGeneratePrologEpilog()
{
-#ifdef DEBUG
- unsigned prologCnt = 0;
- unsigned epilogCnt = 0;
+#ifdef DEBUG
+ unsigned prologCnt = 0;
+ unsigned epilogCnt = 0;
#if FEATURE_EH_FUNCLETS
- unsigned funcletPrologCnt = 0;
- unsigned funcletEpilogCnt = 0;
+ unsigned funcletPrologCnt = 0;
+ unsigned funcletEpilogCnt = 0;
#endif // FEATURE_EH_FUNCLETS
#endif // DEBUG
- insGroup* igPh;
- insGroup* igPhNext;
+ insGroup* igPh;
+ insGroup* igPhNext;
// Generating the prolog/epilog is going to destroy the placeholder group,
// so save the "next" pointer before that happens.
- for (igPh = emitPlaceholderList;
- igPh != NULL;
- igPh = igPhNext)
+ for (igPh = emitPlaceholderList; igPh != nullptr; igPh = igPhNext)
{
assert(igPh->igFlags & IGF_PLACEHOLDER);
@@ -1802,40 +1799,41 @@ void emitter::emitGeneratePrologEpilog()
switch (igPh->igPhData->igPhType)
{
- case IGPT_PROLOG: // currently unused
- INDEBUG(++prologCnt);
- break;
+ case IGPT_PROLOG: // currently unused
+ INDEBUG(++prologCnt);
+ break;
- case IGPT_EPILOG:
- INDEBUG(++epilogCnt);
- emitBegFnEpilog(igPh);
- codeGen->genFnEpilog(igPhBB);
- emitEndFnEpilog();
- break;
+ case IGPT_EPILOG:
+ INDEBUG(++epilogCnt);
+ emitBegFnEpilog(igPh);
+ codeGen->genFnEpilog(igPhBB);
+ emitEndFnEpilog();
+ break;
#if FEATURE_EH_FUNCLETS
- case IGPT_FUNCLET_PROLOG:
- INDEBUG(++funcletPrologCnt);
- emitBegFuncletProlog(igPh);
- codeGen->genFuncletProlog(igPhBB);
- emitEndFuncletProlog();
- break;
+ case IGPT_FUNCLET_PROLOG:
+ INDEBUG(++funcletPrologCnt);
+ emitBegFuncletProlog(igPh);
+ codeGen->genFuncletProlog(igPhBB);
+ emitEndFuncletProlog();
+ break;
- case IGPT_FUNCLET_EPILOG:
- INDEBUG(++funcletEpilogCnt);
- emitBegFuncletEpilog(igPh);
- codeGen->genFuncletEpilog();
- emitEndFuncletEpilog();
- break;
+ case IGPT_FUNCLET_EPILOG:
+ INDEBUG(++funcletEpilogCnt);
+ emitBegFuncletEpilog(igPh);
+ codeGen->genFuncletEpilog();
+ emitEndFuncletEpilog();
+ break;
#endif // FEATURE_EH_FUNCLETS
- default: unreached();
+ default:
+ unreached();
}
}
-#ifdef DEBUG
+#ifdef DEBUG
if (emitComp->verbose)
{
printf("%d prologs, %d epilogs", prologCnt, epilogCnt);
@@ -1844,9 +1842,9 @@ void emitter::emitGeneratePrologEpilog()
#endif // FEATURE_EH_FUNCLETS
printf("\n");
- // prolog/epilog code doesn't use this yet
- // noway_assert(prologCnt == 1);
- // noway_assert(epilogCnt == emitEpilogCnt); // Is this correct?
+// prolog/epilog code doesn't use this yet
+// noway_assert(prologCnt == 1);
+// noway_assert(epilogCnt == emitEpilogCnt); // Is this correct?
#if FEATURE_EH_FUNCLETS
assert(funcletPrologCnt == emitComp->ehFuncletCount());
#endif // FEATURE_EH_FUNCLETS
@@ -1854,29 +1852,31 @@ void emitter::emitGeneratePrologEpilog()
#endif // DEBUG
}
-
/*****************************************************************************
*
* Begin all prolog and epilog generation
*/
-void emitter::emitStartPrologEpilogGeneration()
+void emitter::emitStartPrologEpilogGeneration()
{
/* Save the current IG if it's non-empty */
- if (emitCurIGnonEmpty())
+ if (emitCurIGnonEmpty())
+ {
emitSavIG();
+ }
else
- assert(emitCurIG == NULL);
+ {
+ assert(emitCurIG == nullptr);
+ }
}
-
/*****************************************************************************
*
* Finish all prolog and epilog generation
*/
-void emitter::emitFinishPrologEpilogGeneration()
+void emitter::emitFinishPrologEpilogGeneration()
{
/* Update the offsets of all the blocks */
@@ -1884,24 +1884,25 @@ void emitter::emitFinishPrologEpilogGeneration()
/* We should not generate any more code after this */
- emitCurIG = NULL;
+ emitCurIG = nullptr;
}
-
/*****************************************************************************
*
* Common code for prolog / epilog beginning. Convert the placeholder group to actual code IG,
* and set it as the current group.
*/
-void emitter::emitBegPrologEpilog(insGroup* igPh)
+void emitter::emitBegPrologEpilog(insGroup* igPh)
{
assert(igPh->igFlags & IGF_PLACEHOLDER);
/* Save the current IG if it's non-empty */
- if (emitCurIGnonEmpty())
+ if (emitCurIGnonEmpty())
+ {
emitSavIG();
+ }
/* Convert the placeholder group to a normal group.
* We need to be very careful to re-initialize the IG properly.
@@ -1911,7 +1912,7 @@ void emitter::emitBegPrologEpilog(insGroup* igPh)
*/
igPh->igFlags &= ~IGF_PLACEHOLDER;
- emitNoGCIG = true;
+ emitNoGCIG = true;
emitForceNewIG = false;
/* Set up the GC info that we stored in the placeholder */
@@ -1925,7 +1926,7 @@ void emitter::emitBegPrologEpilog(insGroup* igPh)
emitThisGCrefRegs = emitInitGCrefRegs = igPh->igPhData->igPhInitGCrefRegs;
emitThisByrefRegs = emitInitByrefRegs = igPh->igPhData->igPhInitByrefRegs;
- igPh->igPhData = NULL;
+ igPh->igPhData = nullptr;
/* Create a non-placeholder group pointer that we'll now use */
@@ -1955,14 +1956,16 @@ void emitter::emitBegPrologEpilog(insGroup* igPh)
* Common code for end of prolog / epilog
*/
-void emitter::emitEndPrologEpilog()
+void emitter::emitEndPrologEpilog()
{
emitNoGCIG = false;
/* Save the IG if non-empty */
- if (emitCurIGnonEmpty())
+ if (emitCurIGnonEmpty())
+ {
emitSavIG();
+ }
assert(emitCurIGsize <= MAX_PLACEHOLDER_IG_SIZE);
@@ -1974,13 +1977,12 @@ void emitter::emitEndPrologEpilog()
#endif
}
-
/*****************************************************************************
*
* Begin generating a main function epilog.
*/
-void emitter::emitBegFnEpilog(insGroup* igPh)
+void emitter::emitBegFnEpilog(insGroup* igPh)
{
emitEpilogCnt++;
@@ -1989,8 +1991,8 @@ void emitter::emitBegFnEpilog(insGroup* igPh)
#ifdef JIT32_GCENCODER
EpilogList* el = new (emitComp, CMK_GC) EpilogList;
- el->elNext = NULL;
- el->elIG = emitCurIG;
+ el->elNext = NULL;
+ el->elIG = emitCurIG;
if (emitEpilogLast)
emitEpilogLast->elNext = el;
@@ -2011,16 +2013,16 @@ void emitter::emitBegFnEpilog(insGroup* igPh)
* Finish generating a funclet epilog.
*/
-void emitter::emitEndFnEpilog()
+void emitter::emitEndFnEpilog()
{
emitEndPrologEpilog();
UNATIVE_OFFSET newSize;
- UNATIVE_OFFSET epilogBegCodeOffset = emitEpilogBegLoc.CodeOffset(this);
+ UNATIVE_OFFSET epilogBegCodeOffset = emitEpilogBegLoc.CodeOffset(this);
#ifdef _TARGET_XARCH_
UNATIVE_OFFSET epilogExitSeqStartCodeOffset = emitExitSeqBegLoc.CodeOffset(this);
#else
- UNATIVE_OFFSET epilogExitSeqStartCodeOffset = emitCodeOffset(emitCurIG, emitCurOffset());
+ UNATIVE_OFFSET epilogExitSeqStartCodeOffset = emitCodeOffset(emitCurIG, emitCurOffset());
#endif
newSize = epilogExitSeqStartCodeOffset - epilogBegCodeOffset;
@@ -2030,32 +2032,30 @@ void emitter::emitEndFnEpilog()
/* Compute total epilog size */
assert(emitEpilogSize == 0 || emitEpilogSize == newSize); // All epilogs must be identical
- emitEpilogSize = newSize;
+ emitEpilogSize = newSize;
UNATIVE_OFFSET epilogEndCodeOffset = emitCodeOffset(emitCurIG, emitCurOffset());
assert(epilogExitSeqStartCodeOffset != epilogEndCodeOffset);
newSize = epilogEndCodeOffset - epilogExitSeqStartCodeOffset;
- if (newSize < emitExitSeqSize)
+ if (newSize < emitExitSeqSize)
{
// We expect either the epilog to be the same every time, or that
// one will be a ret or a ret <n> and others will be a jmp addr or jmp [addr];
// we make the epilogs the minimum of these. Note that this ONLY works
- // because the only instruction is the last one and thus a slight
- // underestimation of the epilog size is harmless (since the EIP
+ // because the only instruction is the last one and thus a slight
+ // underestimation of the epilog size is harmless (since the EIP
// can not be between instructions).
- assert(emitEpilogCnt == 1 ||
- (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp (size is either 6 or 5)
+ assert(emitEpilogCnt == 1 ||
+ (emitExitSeqSize - newSize) <= 5 // delta between size of various forms of jmp (size is either 6 or 5)
// and various forms of ret (size is either 1 or 3). The combination can
// be anything been 1 and 5.
- );
+ );
emitExitSeqSize = newSize;
}
#endif // _TARGET_X86_
-
}
-
#if FEATURE_EH_FUNCLETS
/*****************************************************************************
@@ -2063,7 +2063,7 @@ void emitter::emitEndFnEpilog()
* Begin generating a funclet prolog.
*/
-void emitter::emitBegFuncletProlog(insGroup* igPh)
+void emitter::emitBegFuncletProlog(insGroup* igPh)
{
emitBegPrologEpilog(igPh);
}
@@ -2073,7 +2073,7 @@ void emitter::emitBegFuncletProlog(insGroup* igPh)
* Finish generating a funclet prolog.
*/
-void emitter::emitEndFuncletProlog()
+void emitter::emitEndFuncletProlog()
{
emitEndPrologEpilog();
}
@@ -2083,7 +2083,7 @@ void emitter::emitEndFuncletProlog()
* Begin generating a funclet epilog.
*/
-void emitter::emitBegFuncletEpilog(insGroup* igPh)
+void emitter::emitBegFuncletEpilog(insGroup* igPh)
{
emitBegPrologEpilog(igPh);
}
@@ -2093,7 +2093,7 @@ void emitter::emitBegFuncletEpilog(insGroup* igPh)
* Finish generating a funclet epilog.
*/
-void emitter::emitEndFuncletEpilog()
+void emitter::emitEndFuncletEpilog()
{
emitEndPrologEpilog();
}
@@ -2108,12 +2108,12 @@ void emitter::emitEndFuncletEpilog()
* at the very end of the method body.
*/
-bool emitter::emitHasEpilogEnd()
+bool emitter::emitHasEpilogEnd()
{
- if (emitEpilogCnt == 1 && (emitIGlast->igFlags & IGF_EPILOG)) // This wouldn't work for funclets
- return true;
+ if (emitEpilogCnt == 1 && (emitIGlast->igFlags & IGF_EPILOG)) // This wouldn't work for funclets
+ return true;
else
- return false;
+ return false;
}
#endif // JIT32_GCENCODER
@@ -2125,7 +2125,7 @@ bool emitter::emitHasEpilogEnd()
* Mark the beginning of the epilog exit sequence by remembering our position.
*/
-void emitter::emitStartExitSeq()
+void emitter::emitStartExitSeq()
{
assert(emitComp->compGeneratingEpilog);
@@ -2144,12 +2144,12 @@ void emitter::emitStartExitSeq()
* offsHi - The FP offset at which the GC pointer region ends (exclusive).
*/
-void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
+void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
{
assert(emitComp->compGeneratingProlog);
assert(offsHi > offsLo);
-#ifdef DEBUG
+#ifdef DEBUG
// A total of 47254 methods compiled.
//
@@ -2167,19 +2167,19 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
// 257 .. 512 ===> 4 count (100% of total)
// 513 .. 1024 ===> 0 count (100% of total)
- if (emitComp->verbose)
+ if (emitComp->verbose)
{
unsigned count = (offsHi - offsLo) / sizeof(void*);
printf("%u tracked GC refs are at stack offsets ", count);
- if (offsLo >= 0)
+ if (offsLo >= 0)
{
- printf(" %04X ... %04X\n", offsLo, offsHi);
+ printf(" %04X ... %04X\n", offsLo, offsHi);
assert(offsHi >= 0);
}
else
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- if (!emitComp->compIsProfilerHookNeeded())
+ if (!emitComp->compIsProfilerHookNeeded())
#endif
{
#ifdef _TARGET_AMD64_
@@ -2189,7 +2189,6 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
printf("-%04X ... -%04X\n", -offsLo, -offsHi);
assert(offsHi <= 0);
#endif
-
}
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
else
@@ -2201,7 +2200,6 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
printf("-%04X ... %04X\n", -offsLo, offsHi);
}
#endif
-
}
#endif // DEBUG
@@ -2221,7 +2219,7 @@ void emitter::emitSetFrameRangeGCRs(int offsLo, int offsHi)
* method.
*/
-void emitter::emitSetFrameRangeLcls(int offsLo, int offsHi)
+void emitter::emitSetFrameRangeLcls(int offsLo, int offsHi)
{
}
@@ -2231,7 +2229,7 @@ void emitter::emitSetFrameRangeLcls(int offsLo, int offsHi)
* method.
*/
-void emitter::emitSetFrameRangeArgs(int offsLo, int offsHi)
+void emitter::emitSetFrameRangeArgs(int offsLo, int offsHi)
{
}
@@ -2241,51 +2239,16 @@ void emitter::emitSetFrameRangeArgs(int offsLo, int offsHi)
* small encoding (0 through 3), and vice versa.
*/
-const emitter::opSize emitter::emitSizeEncode[] =
-{
- emitter::OPSZ1,
- emitter::OPSZ2,
- OPSIZE_INVALID,
- emitter::OPSZ4,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- emitter::OPSZ8,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- emitter::OPSZ16,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- OPSIZE_INVALID,
- emitter::OPSZ32,
+const emitter::opSize emitter::emitSizeEncode[] = {
+ emitter::OPSZ1, emitter::OPSZ2, OPSIZE_INVALID, emitter::OPSZ4, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID,
+ emitter::OPSZ8, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID,
+ OPSIZE_INVALID, emitter::OPSZ16, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID,
+ OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID,
+ OPSIZE_INVALID, OPSIZE_INVALID, OPSIZE_INVALID, emitter::OPSZ32,
};
-const emitAttr emitter::emitSizeDecode[emitter::OPSZ_COUNT] =
-{
- EA_1BYTE,
- EA_2BYTE,
- EA_4BYTE,
- EA_8BYTE,
- EA_16BYTE,
- EA_32BYTE
-};
+const emitAttr emitter::emitSizeDecode[emitter::OPSZ_COUNT] = {EA_1BYTE, EA_2BYTE, EA_4BYTE,
+ EA_8BYTE, EA_16BYTE, EA_32BYTE};
/*****************************************************************************
*
@@ -2293,13 +2256,13 @@ const emitAttr emitter::emitSizeDecode[emitter::OPSZ_COUNT] =
* a displacement and a constant.
*/
-emitter::instrDesc * emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, int dsp)
+emitter::instrDesc* emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, int dsp)
{
- if (dsp == 0)
+ if (dsp == 0)
{
if (instrDesc::fitsInSmallCns(cns))
{
- instrDesc *id = emitAllocInstr (size);
+ instrDesc* id = emitAllocInstr(size);
id->idSmallCns(cns);
@@ -2309,31 +2272,31 @@ emitter::instrDesc * emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, in
emitSmallDspCnt++;
#endif
- return id;
+ return id;
}
else
{
- instrDescCns *id = emitAllocInstrCns (size);
+ instrDescCns* id = emitAllocInstrCns(size);
id->idSetIsLargeCns();
- id->idcCnsVal = cns;
+ id->idcCnsVal = cns;
#if EMITTER_STATS
emitLargeCnsCnt++;
emitSmallDspCnt++;
#endif
- return id;
+ return id;
}
}
else
{
if (instrDesc::fitsInSmallCns(cns))
{
- instrDescDsp *id = emitAllocInstrDsp (size);
+ instrDescDsp* id = emitAllocInstrDsp(size);
id->idSetIsLargeDsp();
- id->iddDspVal = dsp;
+ id->iddDspVal = dsp;
id->idSmallCns(cns);
@@ -2343,11 +2306,11 @@ emitter::instrDesc * emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, in
emitSmallCns[cns - ID_MIN_SMALL_CNS]++;
#endif
- return id;
+ return id;
}
else
{
- instrDescCnsDsp*id = emitAllocInstrCnsDsp(size);
+ instrDescCnsDsp* id = emitAllocInstrCnsDsp(size);
id->idSetIsLargeCns();
id->iddcCnsVal = cns;
@@ -2360,7 +2323,7 @@ emitter::instrDesc * emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, in
emitLargeCnsCnt++;
#endif
- return id;
+ return id;
}
}
}
@@ -2371,23 +2334,23 @@ emitter::instrDesc * emitter::emitNewInstrCnsDsp(emitAttr size, ssize_t cns, in
* Don't need to record live pointers for such call sites.
*/
-bool emitter::emitNoGChelper(unsigned IHX)
+bool emitter::emitNoGChelper(unsigned IHX)
{
// TODO-Throughput: Make this faster (maybe via a simple table of bools?)
switch (IHX)
{
- case CORINFO_HELP_UNDEF:
- return false;
+ case CORINFO_HELP_UNDEF:
+ return false;
- case CORINFO_HELP_PROF_FCN_LEAVE:
- case CORINFO_HELP_PROF_FCN_ENTER:
+ case CORINFO_HELP_PROF_FCN_LEAVE:
+ case CORINFO_HELP_PROF_FCN_ENTER:
#ifdef _TARGET_AMD64_
- case CORINFO_HELP_PROF_FCN_TAILCALL:
+ case CORINFO_HELP_PROF_FCN_TAILCALL:
#endif
- case CORINFO_HELP_LLSH:
- case CORINFO_HELP_LRSH:
- case CORINFO_HELP_LRSZ:
+ case CORINFO_HELP_LLSH:
+ case CORINFO_HELP_LRSH:
+ case CORINFO_HELP_LRSZ:
// case CORINFO_HELP_LMUL:
// case CORINFO_HELP_LDIV:
@@ -2396,37 +2359,37 @@ bool emitter::emitNoGChelper(unsigned IHX)
// case CORINFO_HELP_ULMOD:
#ifdef _TARGET_X86_
- case CORINFO_HELP_ASSIGN_REF_EAX:
- case CORINFO_HELP_ASSIGN_REF_ECX:
- case CORINFO_HELP_ASSIGN_REF_EBX:
- case CORINFO_HELP_ASSIGN_REF_EBP:
- case CORINFO_HELP_ASSIGN_REF_ESI:
- case CORINFO_HELP_ASSIGN_REF_EDI:
-
- case CORINFO_HELP_CHECKED_ASSIGN_REF_EAX:
- case CORINFO_HELP_CHECKED_ASSIGN_REF_ECX:
- case CORINFO_HELP_CHECKED_ASSIGN_REF_EBX:
- case CORINFO_HELP_CHECKED_ASSIGN_REF_EBP:
- case CORINFO_HELP_CHECKED_ASSIGN_REF_ESI:
- case CORINFO_HELP_CHECKED_ASSIGN_REF_EDI:
+ case CORINFO_HELP_ASSIGN_REF_EAX:
+ case CORINFO_HELP_ASSIGN_REF_ECX:
+ case CORINFO_HELP_ASSIGN_REF_EBX:
+ case CORINFO_HELP_ASSIGN_REF_EBP:
+ case CORINFO_HELP_ASSIGN_REF_ESI:
+ case CORINFO_HELP_ASSIGN_REF_EDI:
+
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_EAX:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_ECX:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_EBX:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_EBP:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_ESI:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF_EDI:
#endif
- case CORINFO_HELP_ASSIGN_REF:
+ case CORINFO_HELP_ASSIGN_REF:
+
+ case CORINFO_HELP_CHECKED_ASSIGN_REF:
- case CORINFO_HELP_CHECKED_ASSIGN_REF:
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
-
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
- case CORINFO_HELP_ASSIGN_BYREF:
+ case CORINFO_HELP_ASSIGN_BYREF:
- case CORINFO_HELP_INIT_PINVOKE_FRAME:
+ case CORINFO_HELP_INIT_PINVOKE_FRAME:
- return true;
+ return true;
}
- return false;
+ return false;
}
/*****************************************************************************
@@ -2434,15 +2397,14 @@ bool emitter::emitNoGChelper(unsigned IHX)
* Mark the current spot as having a label.
*/
-void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- BOOL isFinallyTarget)
+void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, BOOL isFinallyTarget)
{
/* Create a new IG if the current one is non-empty */
if (emitCurIGnonEmpty())
+ {
emitNxtIG();
+ }
VarSetOps::Assign(emitComp, emitThisGCrefVars, GCvars);
VarSetOps::Assign(emitComp, emitInitGCrefVars, GCvars);
@@ -2456,31 +2418,30 @@ void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars,
}
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
-#ifdef DEBUG
+#ifdef DEBUG
if (EMIT_GC_VERBOSE)
{
printf("Label: IG%02u, GCvars=%s ", emitCurIG->igNum, VarSetOps::ToString(emitComp, GCvars));
dumpConvertedVarSet(emitComp, GCvars);
printf(", gcrefRegs=");
printRegMaskInt(gcrefRegs);
- emitDispRegSet (gcrefRegs);
+ emitDispRegSet(gcrefRegs);
printf(", byrefRegs=");
printRegMaskInt(byrefRegs);
- emitDispRegSet (byrefRegs);
+ emitDispRegSet(byrefRegs);
printf("\n");
}
#endif
return emitCurIG;
}
-
#ifdef _TARGET_ARMARCH_
// Does the argument location point to an IG at the end of a function or funclet?
// We can ignore the codePos part of the location, since it doesn't affect the
// determination. If 'emitLocNextFragment' is non-NULL, it indicates the first
// IG of the next fragment, so it represents a function end.
-bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment /* = NULL */)
+bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment /* = NULL */)
{
assert(emitLoc);
@@ -2502,8 +2463,7 @@ bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation*
#if FEATURE_EH_FUNCLETS
// Is the next IG a placeholder group for a funclet prolog?
- if ((ig->igNext->igFlags & IGF_PLACEHOLDER) &&
- (ig->igNext->igPhData->igPhType == IGPT_FUNCLET_PROLOG))
+ if ((ig->igNext->igFlags & IGF_PLACEHOLDER) && (ig->igNext->igPhData->igPhType == IGPT_FUNCLET_PROLOG))
{
return true;
}
@@ -2513,7 +2473,6 @@ bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation*
return false;
}
-
/*****************************************************************************
*
* Split the region from 'startLoc' to 'endLoc' into fragments by calling
@@ -2535,20 +2494,23 @@ bool emitter::emitIsFuncEnd(emitLocation* emitLoc, emitLocation*
* If 'endLoc' is NULL, it means the end of the code.
*/
-void emitter::emitSplit(emitLocation* startLoc, emitLocation* endLoc, UNATIVE_OFFSET maxSplitSize, void* context, emitSplitCallbackType callbackFunc)
-{
- insGroup* igStart = (startLoc == NULL) ? emitIGlist : startLoc->GetIG();
- insGroup* igEnd = (endLoc == NULL) ? NULL : endLoc->GetIG();
- insGroup* igPrev;
- insGroup* ig;
- insGroup* igLastReported;
- insGroup* igLastCandidate;
- UNATIVE_OFFSET curSize;
- UNATIVE_OFFSET candidateSize;
+void emitter::emitSplit(emitLocation* startLoc,
+ emitLocation* endLoc,
+ UNATIVE_OFFSET maxSplitSize,
+ void* context,
+ emitSplitCallbackType callbackFunc)
+{
+ insGroup* igStart = (startLoc == NULL) ? emitIGlist : startLoc->GetIG();
+ insGroup* igEnd = (endLoc == NULL) ? NULL : endLoc->GetIG();
+ insGroup* igPrev;
+ insGroup* ig;
+ insGroup* igLastReported;
+ insGroup* igLastCandidate;
+ UNATIVE_OFFSET curSize;
+ UNATIVE_OFFSET candidateSize;
for (igPrev = NULL, ig = igLastReported = igStart, igLastCandidate = NULL, candidateSize = 0, curSize = 0;
- ig != igEnd && ig != NULL;
- igPrev = ig , ig = ig->igNext)
+ ig != igEnd && ig != NULL; igPrev = ig, ig = ig->igNext)
{
// Keep looking until we've gone past the maximum split size
if (curSize >= maxSplitSize)
@@ -2565,7 +2527,8 @@ void emitter::emitSplit(emitLocation* startLoc, emitLocation* endL
reportCandidate = false;
}
- // Don't report the same thing twice (this also happens for the first block, since igLastReported is initialized to igStart).
+ // Don't report the same thing twice (this also happens for the first block, since igLastReported is
+ // initialized to igStart).
if (igLastCandidate == igLastReported)
{
#ifdef DEBUG
@@ -2580,13 +2543,14 @@ void emitter::emitSplit(emitLocation* startLoc, emitLocation* endL
{
#ifdef DEBUG
if (EMITVERBOSE && (candidateSize >= maxSplitSize))
- printf("emitSplit: split at IG%02u is size %d, larger than requested maximum size of %d\n", igLastCandidate->igNum, candidateSize, maxSplitSize);
+ printf("emitSplit: split at IG%02u is size %d, larger than requested maximum size of %d\n",
+ igLastCandidate->igNum, candidateSize, maxSplitSize);
#endif
// hand memory ownership to the callback function
emitLocation* pEmitLoc = new (emitComp, CMK_Unknown) emitLocation(igLastCandidate);
callbackFunc(context, pEmitLoc);
- igLastReported = igLastCandidate;
+ igLastReported = igLastCandidate;
igLastCandidate = NULL;
curSize -= candidateSize;
}
@@ -2597,19 +2561,15 @@ void emitter::emitSplit(emitLocation* startLoc, emitLocation* endL
// IGs are marked as prolog or epilog. We don't actually know if two adjacent
// IGs are part of the *same* prolog or epilog, so we have to assume they are.
- if (igPrev &&
- (
- ((igPrev->igFlags & IGF_FUNCLET_PROLOG) && (ig->igFlags & IGF_FUNCLET_PROLOG)) ||
- ((igPrev->igFlags & IGF_EPILOG) && (ig->igFlags & IGF_EPILOG))
- )
- )
+ if (igPrev && (((igPrev->igFlags & IGF_FUNCLET_PROLOG) && (ig->igFlags & IGF_FUNCLET_PROLOG)) ||
+ ((igPrev->igFlags & IGF_EPILOG) && (ig->igFlags & IGF_EPILOG))))
{
// We can't update the candidate
}
else
{
igLastCandidate = ig;
- candidateSize = curSize;
+ candidateSize = curSize;
}
curSize += ig->igSize;
@@ -2617,7 +2577,6 @@ void emitter::emitSplit(emitLocation* startLoc, emitLocation* endL
} // end for loop
}
-
/*****************************************************************************
*
* Given an instruction group, find the array of instructions (instrDesc) and
@@ -2628,24 +2587,23 @@ void emitter::emitSplit(emitLocation* startLoc, emitLocation* endL
* This function can't be called for placeholder groups, which have no instrDescs.
*/
-void emitter::emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt)
+void emitter::emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt)
{
assert(!(ig->igFlags & IGF_PLACEHOLDER));
if (ig == emitCurIG)
{
- *id = (instrDesc*)emitCurIGfreeBase;
+ *id = (instrDesc*)emitCurIGfreeBase;
*insCnt = emitCurIGinsCnt;
}
else
{
- *id = (instrDesc*)ig->igData;
+ *id = (instrDesc*)ig->igData;
*insCnt = ig->igInsCnt;
}
assert(*id);
}
-
/*****************************************************************************
*
* Given a location (an 'emitLocation'), find the instruction group (IG) and
@@ -2662,7 +2620,10 @@ void emitter::emitGetInstrDescs(insGroup* ig, instrDesc** id, int* in
* adding code, namely, the end of currently generated code.
*/
-bool emitter::emitGetLocationInfo(emitLocation* emitLoc, insGroup** pig, instrDesc** pid, int* pinsRemaining /* = NULL */)
+bool emitter::emitGetLocationInfo(emitLocation* emitLoc,
+ insGroup** pig,
+ instrDesc** pid,
+ int* pinsRemaining /* = NULL */)
{
assert(emitLoc != nullptr);
assert(emitLoc->Valid());
@@ -2670,10 +2631,10 @@ bool emitter::emitGetLocationInfo(emitLocation* emitLoc, insGroup** p
assert(pig != nullptr);
assert(pid != nullptr);
- insGroup* ig = emitLoc->GetIG();
- instrDesc* id;
- int insNum = emitLoc->GetInsNum();
- int insCnt;
+ insGroup* ig = emitLoc->GetIG();
+ instrDesc* id;
+ int insNum = emitLoc->GetInsNum();
+ int insCnt;
emitGetInstrDescs(ig, &id, &insCnt);
assert(insNum <= insCnt);
@@ -2725,7 +2686,7 @@ bool emitter::emitGetLocationInfo(emitLocation* emitLoc, insGroup** p
int i;
for (i = 0; i != insNum; ++i)
{
- castto(id, BYTE *) += emitSizeOfInsDsc(id);
+ castto(id, BYTE*) += emitSizeOfInsDsc(id);
}
// Return the info we found
@@ -2741,7 +2702,6 @@ bool emitter::emitGetLocationInfo(emitLocation* emitLoc, insGroup** p
return true;
}
-
/*****************************************************************************
*
* Compute the next instrDesc, either in this IG, or in a subsequent IG. 'id'
@@ -2750,11 +2710,11 @@ bool emitter::emitGetLocationInfo(emitLocation* emitLoc, insGroup** p
* the instructions up to the current instruction (based on 'emitCurIG').
*/
-bool emitter::emitNextID(insGroup*& ig, instrDesc*& id, int& insRemaining)
+bool emitter::emitNextID(insGroup*& ig, instrDesc*& id, int& insRemaining)
{
if (insRemaining > 0)
{
- castto(id, BYTE *) += emitSizeOfInsDsc(id);
+ castto(id, BYTE*) += emitSizeOfInsDsc(id);
--insRemaining;
return true;
}
@@ -2786,7 +2746,6 @@ bool emitter::emitNextID(insGroup*& ig, instrDesc*& id, int& insRemai
return false;
}
-
/*****************************************************************************
*
* Walk instrDesc's from the location given by 'locFrom', up to the current location.
@@ -2794,11 +2753,11 @@ bool emitter::emitNextID(insGroup*& ig, instrDesc*& id, int& insRemai
* passed through to the callback function.
*/
-void emitter::emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc_t processFunc, void* context)
+void emitter::emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc_t processFunc, void* context)
{
- insGroup * ig;
- instrDesc * id;
- int insRemaining;
+ insGroup* ig;
+ instrDesc* id;
+ int insRemaining;
if (!emitGetLocationInfo(locFrom, &ig, &id, &insRemaining))
return; // no instructions at the 'from' location
@@ -2811,13 +2770,12 @@ void emitter::emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc
} while (emitNextID(ig, id, insRemaining));
}
-
/*****************************************************************************
*
* A callback function for emitWalkIDs() that calls Compiler::unwindNop().
*/
-void emitter::emitGenerateUnwindNop(instrDesc* id, void* context)
+void emitter::emitGenerateUnwindNop(instrDesc* id, void* context)
{
Compiler* comp = (Compiler*)context;
#if defined(_TARGET_ARM_)
@@ -2827,14 +2785,13 @@ void emitter::emitGenerateUnwindNop(instrDesc* id, void* context)
#endif // defined(_TARGET_ARM64_)
}
-
/*****************************************************************************
*
* emitUnwindNopPadding: call unwindNop() for every instruction from a given
* location 'emitLoc' up to the current location.
*/
-void emitter::emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp)
+void emitter::emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp)
{
emitWalkIDs(locFrom, emitGenerateUnwindNop, comp);
}
@@ -2853,32 +2810,36 @@ void emitter::emitUnwindNopPadding(emitLocation* locFrom, Compiler* c
* An instruction must exist at the specified location.
*/
-unsigned emitter::emitGetInstructionSize(emitLocation* emitLoc)
+unsigned emitter::emitGetInstructionSize(emitLocation* emitLoc)
{
- insGroup* ig;
- instrDesc* id;
+ insGroup* ig;
+ instrDesc* id;
bool anyInstrs = emitGetLocationInfo(emitLoc, &ig, &id);
- assert(anyInstrs); // There better be an instruction at this location (otherwise, we're at the end of the instruction list)
+ assert(anyInstrs); // There better be an instruction at this location (otherwise, we're at the end of the
+ // instruction list)
return id->idCodeSize();
}
#endif // defined(_TARGET_ARM_)
-
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Returns the name for the register to use to access frame based variables
*/
-const char * emitter::emitGetFrameReg()
+const char* emitter::emitGetFrameReg()
{
if (emitHasFramePtr)
+ {
return STR_FPBASE;
+ }
else
+ {
return STR_SPBASE;
+ }
}
/*****************************************************************************
@@ -2886,22 +2847,28 @@ const char * emitter::emitGetFrameReg()
* Display a register set in a readable form.
*/
-void emitter::emitDispRegSet(regMaskTP regs)
+void emitter::emitDispRegSet(regMaskTP regs)
{
- regNumber reg;
- bool sp = false;
+ regNumber reg;
+ bool sp = false;
printf(" {");
for (reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- if ((regs & genRegMask(reg)) == 0)
+ if ((regs & genRegMask(reg)) == 0)
+ {
continue;
+ }
- if (sp)
+ if (sp)
+ {
printf(" ");
+ }
else
+ {
sp = true;
+ }
printf("%s", emitRegName(reg));
}
@@ -2914,29 +2881,35 @@ void emitter::emitDispRegSet(regMaskTP regs)
* Display the current GC ref variable set in a readable form.
*/
-void emitter::emitDispVarSet()
+void emitter::emitDispVarSet()
{
- unsigned vn;
- int of;
- bool sp = false;
+ unsigned vn;
+ int of;
+ bool sp = false;
- for (vn = 0, of = emitGCrFrameOffsMin;
- vn < emitGCrFrameOffsCnt;
- vn += 1, of += sizeof(void *))
+ for (vn = 0, of = emitGCrFrameOffsMin; vn < emitGCrFrameOffsCnt; vn += 1, of += sizeof(void*))
{
- if (emitGCrFrameLiveTab[vn])
+ if (emitGCrFrameLiveTab[vn])
{
- if (sp)
+ if (sp)
+ {
printf(" ");
+ }
else
+ {
sp = true;
+ }
printf("[%s", emitGetFrameReg());
- if (of < 0)
+ if (of < 0)
+ {
printf("-%02XH", -of);
+ }
else if (of > 0)
+ {
printf("+%02XH", +of);
+ }
printf("]");
}
@@ -2949,21 +2922,21 @@ void emitter::emitDispVarSet()
}
/*****************************************************************************/
-#endif//DEBUG
+#endif // DEBUG
#if MULTIREG_HAS_SECOND_GC_RET
-//------------------------------------------------------------------------
-// emitSetSecondRetRegGCType: Sets the GC type of the second return register for instrDescCGCA struct.
-//
-// Arguments:
-// id - The large call instr descriptor to set the second GC return register type on.
-// secondRetSize - The EA_SIZE for second return register type.
-//
-// Return Value:
-// None
-//
-
-void emitter::emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize)
+//------------------------------------------------------------------------
+// emitSetSecondRetRegGCType: Sets the GC type of the second return register for instrDescCGCA struct.
+//
+// Arguments:
+// id - The large call instr descriptor to set the second GC return register type on.
+// secondRetSize - The EA_SIZE for second return register type.
+//
+// Return Value:
+// None
+//
+
+void emitter::emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize)
{
if (EA_IS_GCREF(secondRetSize))
{
@@ -2991,36 +2964,35 @@ void emitter::emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr s
* address mode displacement.
*/
-emitter::instrDesc * emitter::emitNewInstrCallInd(int argCnt,
- ssize_t disp,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSizeIn
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
+emitter::instrDesc* emitter::emitNewInstrCallInd(int argCnt,
+ ssize_t disp,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSizeIn
+ MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
{
- emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE;
+ emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE;
bool gcRefRegsInScratch = ((gcrefRegs & RBM_CALLEE_TRASH) != 0);
-
+
// Allocate a larger descriptor if any GC values need to be saved
// or if we have an absurd number of arguments or a large address
// mode displacement, or we have some byref registers
- //
- // On Amd64 System V OSs a larger descriptor is also needed if the
- // call returns a two-register-returned struct and the second
+ //
+ // On Amd64 System V OSs a larger descriptor is also needed if the
+ // call returns a two-register-returned struct and the second
// register (RDX) is a GCRef or ByRef pointer.
-
- if (!VarSetOps::IsEmpty(emitComp, GCvars) || // any frame GCvars live
- (gcRefRegsInScratch) || // any register gc refs live in scratch regs
- (byrefRegs != 0) || // any register byrefs live
- (disp < AM_DISP_MIN) || // displacement too negative
- (disp > AM_DISP_MAX) || // displacement too positive
- (argCnt > ID_MAX_SMALL_CNS) || // too many args
- (argCnt < 0) // caller pops arguments
- // There is a second ref/byref return register.
- MULTIREG_HAS_SECOND_GC_RET_ONLY( || EA_IS_GCREF_OR_BYREF(secondRetSize)))
+ if (!VarSetOps::IsEmpty(emitComp, GCvars) || // any frame GCvars live
+ (gcRefRegsInScratch) || // any register gc refs live in scratch regs
+ (byrefRegs != 0) || // any register byrefs live
+ (disp < AM_DISP_MIN) || // displacement too negative
+ (disp > AM_DISP_MAX) || // displacement too positive
+ (argCnt > ID_MAX_SMALL_CNS) || // too many args
+ (argCnt < 0) // caller pops arguments
+ // There is a second ref/byref return register.
+ MULTIREG_HAS_SECOND_GC_RET_ONLY(|| EA_IS_GCREF_OR_BYREF(secondRetSize)))
{
instrDescCGCA* id;
@@ -3029,20 +3001,20 @@ emitter::instrDesc * emitter::emitNewInstrCallInd(int
id->idSetIsLargeCall();
VarSetOps::Assign(emitComp, id->idcGCvars, GCvars);
- id->idcGcrefRegs = gcrefRegs;
- id->idcByrefRegs = byrefRegs;
- id->idcArgCnt = argCnt;
- id->idcDisp = disp;
+ id->idcGcrefRegs = gcrefRegs;
+ id->idcByrefRegs = byrefRegs;
+ id->idcArgCnt = argCnt;
+ id->idcDisp = disp;
-#if MULTIREG_HAS_SECOND_GC_RET
+#if MULTIREG_HAS_SECOND_GC_RET
emitSetSecondRetRegGCType(id, secondRetSize);
-#endif // MULTIREG_HAS_SECOND_GC_RET
+#endif // MULTIREG_HAS_SECOND_GC_RET
- return id;
+ return id;
}
else
{
- instrDesc * id;
+ instrDesc* id;
id = emitNewInstrCns(retSize, argCnt);
@@ -3050,13 +3022,13 @@ emitter::instrDesc * emitter::emitNewInstrCallInd(int
assert(!id->idIsLargeCns());
/* Store the displacement and make sure the value fit */
- id->idAddr()->iiaAddrMode.amDisp = disp;
+ id->idAddr()->iiaAddrMode.amDisp = disp;
assert(id->idAddr()->iiaAddrMode.amDisp == disp);
/* Save the the live GC registers in the unused register fields */
emitEncodeCallGCregs(gcrefRegs, id);
- return id;
+ return id;
}
}
@@ -3071,32 +3043,32 @@ emitter::instrDesc * emitter::emitNewInstrCallInd(int
* and an arbitrarily large argument count.
*/
-emitter::instrDesc *emitter::emitNewInstrCallDir(int argCnt,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSizeIn
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
+emitter::instrDesc* emitter::emitNewInstrCallDir(int argCnt,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSizeIn
+ MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
{
- emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE;
+ emitAttr retSize = (retSizeIn != EA_UNKNOWN) ? retSizeIn : EA_PTRSIZE;
// Allocate a larger descriptor if new GC values need to be saved
// or if we have an absurd number of arguments or if we need to
// save the scope.
- //
- // On Amd64 System V OSs a larger descriptor is also needed if the
- // call returns a two-register-returned struct and the second
+ //
+ // On Amd64 System V OSs a larger descriptor is also needed if the
+ // call returns a two-register-returned struct and the second
// register (RDX) is a GCRef or ByRef pointer.
bool gcRefRegsInScratch = ((gcrefRegs & RBM_CALLEE_TRASH) != 0);
- if (!VarSetOps::IsEmpty(emitComp, GCvars) || // any frame GCvars live
- gcRefRegsInScratch || // any register gc refs live in scratch regs
- (byrefRegs != 0) || // any register byrefs live
- (argCnt > ID_MAX_SMALL_CNS) || // too many args
- (argCnt < 0) // caller pops arguments
- // There is a second ref/byref return register.
- MULTIREG_HAS_SECOND_GC_RET_ONLY( || EA_IS_GCREF_OR_BYREF(secondRetSize)))
+ if (!VarSetOps::IsEmpty(emitComp, GCvars) || // any frame GCvars live
+ gcRefRegsInScratch || // any register gc refs live in scratch regs
+ (byrefRegs != 0) || // any register byrefs live
+ (argCnt > ID_MAX_SMALL_CNS) || // too many args
+ (argCnt < 0) // caller pops arguments
+ // There is a second ref/byref return register.
+ MULTIREG_HAS_SECOND_GC_RET_ONLY(|| EA_IS_GCREF_OR_BYREF(secondRetSize)))
{
instrDescCGCA* id = emitAllocInstrCGCA(retSize);
@@ -3105,20 +3077,20 @@ emitter::instrDesc *emitter::emitNewInstrCallDir(int
id->idSetIsLargeCall();
VarSetOps::Assign(emitComp, id->idcGCvars, GCvars);
- id->idcGcrefRegs = gcrefRegs;
- id->idcByrefRegs = byrefRegs;
- id->idcDisp = 0;
- id->idcArgCnt = argCnt;
+ id->idcGcrefRegs = gcrefRegs;
+ id->idcByrefRegs = byrefRegs;
+ id->idcDisp = 0;
+ id->idcArgCnt = argCnt;
#if MULTIREG_HAS_SECOND_GC_RET
emitSetSecondRetRegGCType(id, secondRetSize);
#endif // MULTIREG_HAS_SECOND_GC_RET
- return id;
+ return id;
}
else
{
- instrDesc * id = emitNewInstrCns(retSize, argCnt);
+ instrDesc* id = emitNewInstrCns(retSize, argCnt);
// printf("Direct call w/o GC vars / big arg cnt / explicit scope\n");
@@ -3128,35 +3100,37 @@ emitter::instrDesc *emitter::emitNewInstrCallDir(int
/* Save the the live GC registers in the unused register fields */
emitEncodeCallGCregs(gcrefRegs, id);
- return id;
+ return id;
}
}
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Return a string with the name of the given class field (blank string (not
* NULL) is returned when the name isn't available).
*/
-const char * emitter::emitFldName(CORINFO_FIELD_HANDLE fieldVal)
+const char* emitter::emitFldName(CORINFO_FIELD_HANDLE fieldVal)
{
- if (emitComp->opts.varNames)
+ if (emitComp->opts.varNames)
{
- const char * memberName;
- const char * className;
+ const char* memberName;
+ const char* className;
- const int TEMP_BUFFER_LEN = 1024;
- static char buff[TEMP_BUFFER_LEN];
+ const int TEMP_BUFFER_LEN = 1024;
+ static char buff[TEMP_BUFFER_LEN];
memberName = emitComp->eeGetFieldName(fieldVal, &className);
sprintf_s(buff, TEMP_BUFFER_LEN, "'<%s>.%s'", className, memberName);
- return buff;
+ return buff;
}
else
- return "";
+ {
+ return "";
+ }
}
/*****************************************************************************
@@ -3165,27 +3139,26 @@ const char * emitter::emitFldName(CORINFO_FIELD_HANDLE fieldVal)
* NULL) is returned when the name isn't available).
*/
-const char * emitter::emitFncName(CORINFO_METHOD_HANDLE methHnd)
+const char* emitter::emitFncName(CORINFO_METHOD_HANDLE methHnd)
{
return emitComp->eeGetMethodFullName(methHnd);
}
/*****************************************************************************/
-#endif//DEBUG
+#endif // DEBUG
/*****************************************************************************
*
* Be very careful, some instruction descriptors are allocated as "tiny" and
* don't have some of the tail fields of instrDesc (in particular, "idInfo").
*/
-const BYTE emitter::emitFmtToOps[] =
-{
- #define IF_DEF(en, op1, op2) ID_OP_##op2,
- #include "emitfmts.h"
+const BYTE emitter::emitFmtToOps[] = {
+#define IF_DEF(en, op1, op2) ID_OP_##op2,
+#include "emitfmts.h"
};
-#ifdef DEBUG
-const unsigned emitter::emitFmtCount = sizeof(emitFmtToOps)/sizeof(emitFmtToOps[0]);
+#ifdef DEBUG
+const unsigned emitter::emitFmtCount = sizeof(emitFmtToOps) / sizeof(emitFmtToOps[0]);
#endif
/*****************************************************************************
@@ -3193,9 +3166,9 @@ const unsigned emitter::emitFmtCount = sizeof(emitFmtToOps)/sizeof(emitFmtT
* Display the current instruction group list.
*/
-#ifdef DEBUG
+#ifdef DEBUG
-void emitter::emitDispIGflags(unsigned flags)
+void emitter::emitDispIGflags(unsigned flags)
{
if (flags & IGF_GC_VARS)
{
@@ -3237,40 +3210,54 @@ void emitter::emitDispIGflags(unsigned flags)
}
}
-
-void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool verbose)
+void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool verbose)
{
const int TEMP_BUFFER_LEN = 40;
- char buff[TEMP_BUFFER_LEN];
+ char buff[TEMP_BUFFER_LEN];
sprintf_s(buff, TEMP_BUFFER_LEN, "G_M%03u_IG%02u: ", Compiler::s_compMethodsCount, ig->igNum);
printf("%s; ", buff);
- if ((igPrev == NULL) || (igPrev->igFuncIdx != ig->igFuncIdx))
+ if ((igPrev == nullptr) || (igPrev->igFuncIdx != ig->igFuncIdx))
{
printf("func=%02u, ", ig->igFuncIdx);
}
if (ig->igFlags & IGF_PLACEHOLDER)
{
- insGroup * igPh = ig;
+ insGroup* igPh = ig;
const char* pszType;
switch (igPh->igPhData->igPhType)
{
- case IGPT_PROLOG: pszType = "prolog"; break;
- case IGPT_EPILOG: pszType = "epilog"; break;
+ case IGPT_PROLOG:
+ pszType = "prolog";
+ break;
+ case IGPT_EPILOG:
+ pszType = "epilog";
+ break;
#if FEATURE_EH_FUNCLETS
- case IGPT_FUNCLET_PROLOG: pszType = "funclet prolog"; break;
- case IGPT_FUNCLET_EPILOG: pszType = "funclet epilog"; break;
+ case IGPT_FUNCLET_PROLOG:
+ pszType = "funclet prolog";
+ break;
+ case IGPT_FUNCLET_EPILOG:
+ pszType = "funclet epilog";
+ break;
#endif // FEATURE_EH_FUNCLETS
- default: pszType = "UNKNOWN"; break;
+ default:
+ pszType = "UNKNOWN";
+ break;
}
printf("%s placeholder, next placeholder=", pszType);
if (igPh->igPhData->igPhNext)
+ {
printf("IG%02u ", igPh->igPhData->igPhNext->igNum);
+ }
else
+ {
printf("<END>");
- printf(", BB=%08XH (BB%02u)", dspPtr(igPh->igPhData->igPhBB), (igPh->igPhData->igPhBB != nullptr) ? igPh->igPhData->igPhBB->bbNum : 0 );
+ }
+ printf(", BB=%08XH (BB%02u)", dspPtr(igPh->igPhData->igPhBB),
+ (igPh->igPhData->igPhBB != nullptr) ? igPh->igPhData->igPhBB->bbNum : 0);
emitDispIGflags(igPh->igFlags);
@@ -3288,28 +3275,26 @@ void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool ver
}
printf("\n");
- printf("%*s; PrevGCVars=%s ",
- strlen(buff), "",
- VarSetOps::ToString(emitComp, igPh->igPhData->igPhPrevGCrefVars));
+ printf("%*s; PrevGCVars=%s ", strlen(buff), "",
+ VarSetOps::ToString(emitComp, igPh->igPhData->igPhPrevGCrefVars));
dumpConvertedVarSet(emitComp, igPh->igPhData->igPhPrevGCrefVars);
printf(", PrevGCrefRegs=");
printRegMaskInt(igPh->igPhData->igPhPrevGCrefRegs);
- emitDispRegSet (igPh->igPhData->igPhPrevGCrefRegs);
+ emitDispRegSet(igPh->igPhData->igPhPrevGCrefRegs);
printf(", PrevByrefRegs=");
printRegMaskInt(igPh->igPhData->igPhPrevByrefRegs);
- emitDispRegSet (igPh->igPhData->igPhPrevByrefRegs);
+ emitDispRegSet(igPh->igPhData->igPhPrevByrefRegs);
printf("\n");
- printf("%*s; InitGCVars=%s ",
- strlen(buff), "",
- VarSetOps::ToString(emitComp, igPh->igPhData->igPhInitGCrefVars));
+ printf("%*s; InitGCVars=%s ", strlen(buff), "",
+ VarSetOps::ToString(emitComp, igPh->igPhData->igPhInitGCrefVars));
dumpConvertedVarSet(emitComp, igPh->igPhData->igPhInitGCrefVars);
printf(", InitGCrefRegs=");
printRegMaskInt(igPh->igPhData->igPhInitGCrefRegs);
- emitDispRegSet (igPh->igPhData->igPhInitGCrefRegs);
+ emitDispRegSet(igPh->igPhData->igPhInitGCrefRegs);
printf(", InitByrefRegs=");
printRegMaskInt(igPh->igPhData->igPhInitByrefRegs);
- emitDispRegSet (igPh->igPhData->igPhInitByrefRegs);
+ emitDispRegSet(igPh->igPhData->igPhInitByrefRegs);
printf("\n");
assert(!(ig->igFlags & IGF_GC_VARS));
@@ -3325,18 +3310,18 @@ void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool ver
dumpConvertedVarSet(emitComp, ig->igGCvars());
}
- if (!(ig->igFlags & IGF_EMIT_ADD))
+ if (!(ig->igFlags & IGF_EMIT_ADD))
{
printf(", gcrefRegs=");
printRegMaskInt(ig->igGCregs);
- emitDispRegSet (ig->igGCregs);
+ emitDispRegSet(ig->igGCregs);
}
if (ig->igFlags & IGF_BYREF_REGS)
{
printf(", byrefRegs=");
printRegMaskInt(ig->igByrefRegs());
- emitDispRegSet (ig->igByrefRegs());
+ emitDispRegSet(ig->igByrefRegs());
}
emitDispIGflags(ig->igFlags);
@@ -3351,26 +3336,25 @@ void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool ver
}
printf("\n");
- if (verbose)
+ if (verbose)
{
- BYTE * ins = ig->igData;
+ BYTE* ins = ig->igData;
UNATIVE_OFFSET ofs = ig->igOffs;
unsigned cnt = ig->igInsCnt;
- if (cnt)
+ if (cnt)
{
printf("\n");
do
{
- instrDesc * id = (instrDesc *)ins;
+ instrDesc* id = (instrDesc*)ins;
- emitDispIns(id, false, true, false, ofs, NULL, 0, ig);
+ emitDispIns(id, false, true, false, ofs, nullptr, 0, ig);
ins += emitSizeOfInsDsc(id);
- ofs += emitInstCodeSz (id);
- }
- while (--cnt);
+ ofs += emitInstCodeSz(id);
+ } while (--cnt);
printf("\n");
}
@@ -3378,46 +3362,44 @@ void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool ver
}
}
-void emitter::emitDispIGlist(bool verbose)
+void emitter::emitDispIGlist(bool verbose)
{
- insGroup * ig;
- insGroup * igPrev;
+ insGroup* ig;
+ insGroup* igPrev;
- for (igPrev = NULL, ig = emitIGlist;
- ig;
- igPrev = ig , ig = ig->igNext)
+ for (igPrev = nullptr, ig = emitIGlist; ig; igPrev = ig, ig = ig->igNext)
{
emitDispIG(ig, igPrev, verbose);
}
}
-void emitter::emitDispGCinfo()
+void emitter::emitDispGCinfo()
{
printf("Emitter GC tracking info:");
printf("\n emitPrevGCrefVars(0x%p)=%016llX ", dspPtr(&emitPrevGCrefVars), emitPrevGCrefVars);
- dumpConvertedVarSet(emitComp, emitPrevGCrefVars);
+ dumpConvertedVarSet(emitComp, emitPrevGCrefVars);
printf("\n emitPrevGCrefRegs(0x%p)=", dspPtr(&emitPrevGCrefRegs));
- printRegMaskInt(emitPrevGCrefRegs);
- emitDispRegSet (emitPrevGCrefRegs);
+ printRegMaskInt(emitPrevGCrefRegs);
+ emitDispRegSet(emitPrevGCrefRegs);
printf("\n emitPrevByrefRegs(0x%p)=", dspPtr(&emitPrevByrefRegs));
- printRegMaskInt(emitPrevByrefRegs);
- emitDispRegSet (emitPrevByrefRegs);
+ printRegMaskInt(emitPrevByrefRegs);
+ emitDispRegSet(emitPrevByrefRegs);
printf("\n emitInitGCrefVars(0x%p)=%016llX ", dspPtr(&emitInitGCrefVars), emitInitGCrefVars);
- dumpConvertedVarSet(emitComp, emitInitGCrefVars);
+ dumpConvertedVarSet(emitComp, emitInitGCrefVars);
printf("\n emitInitGCrefRegs(0x%p)=", dspPtr(&emitInitGCrefRegs));
- printRegMaskInt(emitInitGCrefRegs);
- emitDispRegSet (emitInitGCrefRegs);
+ printRegMaskInt(emitInitGCrefRegs);
+ emitDispRegSet(emitInitGCrefRegs);
printf("\n emitInitByrefRegs(0x%p)=", dspPtr(&emitInitByrefRegs));
- printRegMaskInt(emitInitByrefRegs);
- emitDispRegSet (emitInitByrefRegs);
+ printRegMaskInt(emitInitByrefRegs);
+ emitDispRegSet(emitInitByrefRegs);
printf("\n emitThisGCrefVars(0x%p)=%016llX ", dspPtr(&emitThisGCrefVars), emitThisGCrefVars);
- dumpConvertedVarSet(emitComp, emitThisGCrefVars);
+ dumpConvertedVarSet(emitComp, emitThisGCrefVars);
printf("\n emitThisGCrefRegs(0x%p)=", dspPtr(&emitThisGCrefRegs));
- printRegMaskInt(emitThisGCrefRegs);
- emitDispRegSet (emitThisGCrefRegs);
+ printRegMaskInt(emitThisGCrefRegs);
+ emitDispRegSet(emitThisGCrefRegs);
printf("\n emitThisByrefRegs(0x%p)=", dspPtr(&emitThisByrefRegs));
- printRegMaskInt(emitThisByrefRegs);
- emitDispRegSet (emitThisByrefRegs);
+ printRegMaskInt(emitThisByrefRegs);
+ emitDispRegSet(emitThisByrefRegs);
printf("\n\n");
}
@@ -3429,14 +3411,13 @@ void emitter::emitDispGCinfo()
* emitOutputInstr() that does a few debug checks.
*/
-size_t emitter::emitIssue1Instr(insGroup *ig,
- instrDesc *id, BYTE **dp)
+size_t emitter::emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp)
{
- size_t is;
+ size_t is;
/* Record the beginning offset of the instruction */
- BYTE * curInsAdr = *dp;
+ BYTE* curInsAdr = *dp;
/* Issue the next instruction */
@@ -3444,7 +3425,7 @@ size_t emitter::emitIssue1Instr(insGroup *ig,
is = emitOutputInstr(ig, id, dp);
- // printf("[S=%02u]\n", emitCurStackLvl);
+// printf("[S=%02u]\n", emitCurStackLvl);
#if EMIT_TRACK_STACK_DEPTH
@@ -3454,9 +3435,7 @@ size_t emitter::emitIssue1Instr(insGroup *ig,
push entries.
*/
- assert(emitFullGCinfo == false ||
- emitCurStackLvl != 0 ||
- u2.emitGcArgTrackCnt == 0);
+ assert(emitFullGCinfo == false || emitCurStackLvl != 0 || u2.emitGcArgTrackCnt == 0);
#endif
@@ -3464,9 +3443,9 @@ size_t emitter::emitIssue1Instr(insGroup *ig,
/* Did the size of the instruction match our expectations? */
- UNATIVE_OFFSET csz = (UNATIVE_OFFSET)(*dp - curInsAdr);
+ UNATIVE_OFFSET csz = (UNATIVE_OFFSET)(*dp - curInsAdr);
- if (csz != id->idCodeSize())
+ if (csz != id->idCodeSize())
{
/* It is fatal to under-estimate the instruction size */
noway_assert(emitInstCodeSz(id) >= csz);
@@ -3480,33 +3459,32 @@ size_t emitter::emitIssue1Instr(insGroup *ig,
/* The instruction size estimate wasn't accurate; remember this */
- ig->igFlags |= IGF_UPD_ISZ;
+ ig->igFlags |= IGF_UPD_ISZ;
#if defined(_TARGET_XARCH_)
id->idCodeSize(csz);
#elif defined(_TARGET_ARM_)
- // This is done as part of emitSetShortJump();
- // insSize isz = emitInsSize(id->idInsFmt());
- // id->idInsSize(isz);
+// This is done as part of emitSetShortJump();
+// insSize isz = emitInsSize(id->idInsFmt());
+// id->idInsSize(isz);
#else
/* It is fatal to over-estimate the instruction size */
IMPL_LIMITATION("Over-estimated instruction size");
#endif
}
-
#endif
-#ifdef DEBUG
+#ifdef DEBUG
/* Make sure the instruction descriptor size also matches our expectations */
- if (is != emitSizeOfInsDsc(id))
+ if (is != emitSizeOfInsDsc(id))
{
- printf("%s at %u: Expected size = %u , actual size = %u\n",
- emitIfName(id->idInsFmt()), id->idDebugOnlyInfo()->idNum, is, emitSizeOfInsDsc(id));
+ printf("%s at %u: Expected size = %u , actual size = %u\n", emitIfName(id->idInsFmt()),
+ id->idDebugOnlyInfo()->idNum, is, emitSizeOfInsDsc(id));
assert(is == emitSizeOfInsDsc(id));
}
#endif
- return is;
+ return is;
}
/*****************************************************************************
@@ -3516,14 +3494,12 @@ size_t emitter::emitIssue1Instr(insGroup *ig,
* groups and thus it isn't cheap).
*/
-void emitter::emitRecomputeIGoffsets()
+void emitter::emitRecomputeIGoffsets()
{
- UNATIVE_OFFSET offs;
- insGroup * ig;
+ UNATIVE_OFFSET offs;
+ insGroup* ig;
- for (ig = emitIGlist, offs = 0;
- ig;
- ig = ig->igNext)
+ for (ig = emitIGlist, offs = 0; ig; ig = ig->igNext)
{
ig->igOffs = offs;
assert(IsCodeAligned(ig->igOffs));
@@ -3534,7 +3510,7 @@ void emitter::emitRecomputeIGoffsets()
emitTotalCodeSize = offs;
-#ifdef DEBUG
+#ifdef DEBUG
emitCheckIGoffsets();
#endif
}
@@ -3549,95 +3525,94 @@ void emitter::emitRecomputeIGoffsets()
* NYI).
*/
-void emitter::emitJumpDistBind()
+void emitter::emitJumpDistBind()
{
#ifdef DEBUG
- if (emitComp->verbose)
+ if (emitComp->verbose)
{
printf("*************** In emitJumpDistBind()\n");
}
- if (EMIT_INSTLIST_VERBOSE)
+ if (EMIT_INSTLIST_VERBOSE)
{
printf("\nInstruction list before jump distance binding:\n\n");
emitDispIGlist(true);
}
#endif
- instrDescJmp * jmp;
+ instrDescJmp* jmp;
- UNATIVE_OFFSET minShortExtra; // The smallest offset greater than that required for a jump to be converted
- // to a small jump. If it is small enough, we will iterate in hopes of
- // converting those jumps we missed converting the first (or second...) time.
+ UNATIVE_OFFSET minShortExtra; // The smallest offset greater than that required for a jump to be converted
+ // to a small jump. If it is small enough, we will iterate in hopes of
+ // converting those jumps we missed converting the first (or second...) time.
#if defined(_TARGET_ARM_)
- UNATIVE_OFFSET minMediumExtra; // Same as 'minShortExtra', but for medium-sized jumps.
-#endif // _TARGET_ARM_
+ UNATIVE_OFFSET minMediumExtra; // Same as 'minShortExtra', but for medium-sized jumps.
+#endif // _TARGET_ARM_
- UNATIVE_OFFSET adjIG;
- UNATIVE_OFFSET adjLJ;
- insGroup * lstIG;
+ UNATIVE_OFFSET adjIG;
+ UNATIVE_OFFSET adjLJ;
+ insGroup* lstIG;
#ifdef DEBUG
- insGroup * prologIG = emitPrologIG;
+ insGroup* prologIG = emitPrologIG;
#endif // DEBUG
- int jmp_iteration = 1;
+ int jmp_iteration = 1;
- /*****************************************************************************/
- /* If we iterate to look for more jumps to shorten, we start again here. */
- /*****************************************************************************/
+/*****************************************************************************/
+/* If we iterate to look for more jumps to shorten, we start again here. */
+/*****************************************************************************/
AGAIN:
-#ifdef DEBUG
+#ifdef DEBUG
emitCheckIGoffsets();
#endif
- /*
- In the following loop we convert all jump targets from "BasicBlock *"
- to "insGroup *" values. We also estimate which jumps will be short.
- */
+/*
+ In the following loop we convert all jump targets from "BasicBlock *"
+ to "insGroup *" values. We also estimate which jumps will be short.
+ */
-#ifdef DEBUG
- insGroup * lastIG = NULL;
- instrDescJmp * lastLJ = NULL;
+#ifdef DEBUG
+ insGroup* lastIG = nullptr;
+ instrDescJmp* lastLJ = nullptr;
#endif
- lstIG = NULL;
- adjLJ = 0;
- adjIG = 0;
- minShortExtra = (UNATIVE_OFFSET)-1;
+ lstIG = nullptr;
+ adjLJ = 0;
+ adjIG = 0;
+ minShortExtra = (UNATIVE_OFFSET)-1;
#if defined(_TARGET_ARM_)
minMediumExtra = (UNATIVE_OFFSET)-1;
#endif // _TARGET_ARM_
- for (jmp = emitJumpList;
- jmp;
- jmp = jmp->idjNext)
+ for (jmp = emitJumpList; jmp; jmp = jmp->idjNext)
{
- insGroup * jmpIG;
- insGroup * tgtIG;
+ insGroup* jmpIG;
+ insGroup* tgtIG;
- UNATIVE_OFFSET jsz; // size of the jump instruction in bytes
+ UNATIVE_OFFSET jsz; // size of the jump instruction in bytes
- UNATIVE_OFFSET ssz = 0; // small jump size
- NATIVE_OFFSET nsd = 0; // small jump max. neg distance
- NATIVE_OFFSET psd = 0; // small jump max. pos distance
+ UNATIVE_OFFSET ssz = 0; // small jump size
+ NATIVE_OFFSET nsd = 0; // small jump max. neg distance
+ NATIVE_OFFSET psd = 0; // small jump max. pos distance
#if defined(_TARGET_ARM_)
- UNATIVE_OFFSET msz = 0; // medium jump size
- NATIVE_OFFSET nmd = 0; // medium jump max. neg distance
- NATIVE_OFFSET pmd = 0; // medium jump max. pos distance
- NATIVE_OFFSET mextra; // How far beyond the medium jump range is this jump offset?
-#endif // _TARGET_ARM_
-
- NATIVE_OFFSET extra; // How far beyond the short jump range is this jump offset?
- UNATIVE_OFFSET srcInstrOffs; // offset of the source instruction of the jump
- UNATIVE_OFFSET srcEncodingOffs; // offset of the source used by the instruction set to calculate the relative offset of the jump
- UNATIVE_OFFSET dstOffs;
- NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded
- UNATIVE_OFFSET oldSize;
- UNATIVE_OFFSET sizeDif;
+ UNATIVE_OFFSET msz = 0; // medium jump size
+ NATIVE_OFFSET nmd = 0; // medium jump max. neg distance
+ NATIVE_OFFSET pmd = 0; // medium jump max. pos distance
+ NATIVE_OFFSET mextra; // How far beyond the medium jump range is this jump offset?
+#endif // _TARGET_ARM_
+
+ NATIVE_OFFSET extra; // How far beyond the short jump range is this jump offset?
+ UNATIVE_OFFSET srcInstrOffs; // offset of the source instruction of the jump
+ UNATIVE_OFFSET srcEncodingOffs; // offset of the source used by the instruction set to calculate the relative
+ // offset of the jump
+ UNATIVE_OFFSET dstOffs;
+ NATIVE_OFFSET jmpDist; // the relative jump distance, as it will be encoded
+ UNATIVE_OFFSET oldSize;
+ UNATIVE_OFFSET sizeDif;
#ifdef _TARGET_XARCH_
assert(jmp->idInsFmt() == IF_LABEL || jmp->idInsFmt() == IF_RWR_LABEL || jmp->idInsFmt() == IF_SWR_LABEL);
@@ -3646,7 +3621,7 @@ AGAIN:
if (jmp->idInsFmt() == IF_LABEL)
{
- if (emitIsCondJump(jmp))
+ if (emitIsCondJump(jmp))
{
ssz = JCC_SIZE_SMALL;
nsd = JCC_DIST_SMALL_MAX_NEG;
@@ -3662,19 +3637,13 @@ AGAIN:
#endif // _TARGET_XARCH_
#ifdef _TARGET_ARM_
- assert((jmp->idInsFmt() == IF_T2_J1) ||
- (jmp->idInsFmt() == IF_T2_J2) ||
- (jmp->idInsFmt() == IF_T1_I) ||
- (jmp->idInsFmt() == IF_T1_K) ||
- (jmp->idInsFmt() == IF_T1_M) ||
- (jmp->idInsFmt() == IF_T2_M1) ||
- (jmp->idInsFmt() == IF_T2_N1) ||
- (jmp->idInsFmt() == IF_T1_J3) ||
- (jmp->idInsFmt() == IF_LARGEJMP) );
+ assert((jmp->idInsFmt() == IF_T2_J1) || (jmp->idInsFmt() == IF_T2_J2) || (jmp->idInsFmt() == IF_T1_I) ||
+ (jmp->idInsFmt() == IF_T1_K) || (jmp->idInsFmt() == IF_T1_M) || (jmp->idInsFmt() == IF_T2_M1) ||
+ (jmp->idInsFmt() == IF_T2_N1) || (jmp->idInsFmt() == IF_T1_J3) || (jmp->idInsFmt() == IF_LARGEJMP));
/* Figure out the smallest size we can end up with */
- if (emitIsCondJump(jmp))
+ if (emitIsCondJump(jmp))
{
ssz = JCC_SIZE_SMALL;
nsd = JCC_DIST_SMALL_MAX_NEG;
@@ -3684,19 +3653,19 @@ AGAIN:
nmd = JCC_DIST_MEDIUM_MAX_NEG;
pmd = JCC_DIST_MEDIUM_MAX_POS;
}
- else if (emitIsCmpJump(jmp))
+ else if (emitIsCmpJump(jmp))
{
ssz = JMP_SIZE_SMALL;
nsd = 0;
psd = 126;
}
- else if (emitIsUncondJump(jmp))
+ else if (emitIsUncondJump(jmp))
{
ssz = JMP_SIZE_SMALL;
nsd = JMP_DIST_SMALL_MAX_NEG;
psd = JMP_DIST_SMALL_MAX_POS;
}
- else if (emitIsLoadLabel(jmp))
+ else if (emitIsLoadLabel(jmp))
{
ssz = LBL_SIZE_SMALL;
nsd = LBL_DIST_SMALL_MAX_NEG;
@@ -3711,7 +3680,7 @@ AGAIN:
#ifdef _TARGET_ARM64_
/* Figure out the smallest size we can end up with */
- if (emitIsCondJump(jmp))
+ if (emitIsCondJump(jmp))
{
ssz = JCC_SIZE_SMALL;
nsd = JCC_DIST_SMALL_MAX_NEG;
@@ -3745,17 +3714,13 @@ AGAIN:
}
#endif // _TARGET_ARM64_
- /* Make sure the jumps are properly ordered */
+/* Make sure the jumps are properly ordered */
-#ifdef DEBUG
- assert(lastLJ == NULL ||
- lastIG != jmp->idjIG ||
- lastLJ->idjOffs < jmp->idjOffs);
- lastLJ = (lastIG == jmp->idjIG) ? jmp : NULL;
+#ifdef DEBUG
+ assert(lastLJ == nullptr || lastIG != jmp->idjIG || lastLJ->idjOffs < jmp->idjOffs);
+ lastLJ = (lastIG == jmp->idjIG) ? jmp : nullptr;
- assert(lastIG == NULL ||
- lastIG->igNum <= jmp->idjIG->igNum ||
- jmp->idjIG == prologIG ||
+ assert(lastIG == nullptr || lastIG->igNum <= jmp->idjIG->igNum || jmp->idjIG == prologIG ||
emitNxtIGnum > unsigned(0xFFFF)); // igNum might overflow
lastIG = jmp->idjIG;
#endif // DEBUG
@@ -3770,23 +3735,23 @@ AGAIN:
/* Are we in a group different from the previous jump? */
- if (lstIG != jmpIG)
+ if (lstIG != jmpIG)
{
/* Were there any jumps before this one? */
- if (lstIG)
+ if (lstIG)
{
/* Adjust the offsets of the intervening blocks */
do
{
- lstIG = lstIG->igNext; assert(lstIG);
+ lstIG = lstIG->igNext;
+ assert(lstIG);
// printf("Adjusted offset of block %02u from %04X to %04X\n", lstIG->igNum, lstIG->igOffs,
// lstIG->igOffs - adjIG);
lstIG->igOffs -= adjIG;
assert(IsCodeAligned(lstIG->igOffs));
- }
- while (lstIG != jmpIG);
+ } while (lstIG != jmpIG);
}
/* We've got the first jump in a new group */
@@ -3814,7 +3779,7 @@ AGAIN:
int doff = jmp->idAddr()->iiaGetJitDataOffset();
assert(doff >= 0);
ssize_t imm = emitGetInsSC(jmp);
- assert((imm >= 0) && (imm < 0x1000)); // 0x1000 is arbitrary, currently 'imm' is always 0
+ assert((imm >= 0) && (imm < 0x1000)); // 0x1000 is arbitrary, currently 'imm' is always 0
unsigned dataOffs = (unsigned)(doff + imm);
assert(dataOffs < emitDataSize());
@@ -3826,7 +3791,7 @@ AGAIN:
// Check if the distance is within the encoding length.
jmpDist = maxDstOffs - srcOffs;
- extra = jmpDist - psd;
+ extra = jmpDist - psd;
if (extra <= 0)
{
goto SHORT_JMP;
@@ -3839,11 +3804,11 @@ AGAIN:
/* Have we bound this jump's target already? */
- if (jmp->idIsBound())
+ if (jmp->idIsBound())
{
/* Does the jump already have the smallest size? */
- if (jmp->idjShort)
+ if (jmp->idjShort)
{
assert(emitSizeOfJump(jmp) == ssz);
@@ -3860,8 +3825,8 @@ AGAIN:
/* First time we've seen this label, convert its target */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
- if (EMITVERBOSE)
+#ifdef DEBUG
+ if (EMITVERBOSE)
{
printf("Binding: ");
emitDispIns(jmp, false, false, false);
@@ -3871,8 +3836,8 @@ AGAIN:
tgtIG = (insGroup*)emitCodeGetCookie(jmp->idAddr()->iiaBBlabel);
-#ifdef DEBUG
- if (EMITVERBOSE)
+#ifdef DEBUG
+ if (EMITVERBOSE)
{
if (tgtIG)
{
@@ -3880,8 +3845,9 @@ AGAIN:
}
else
{
- printf("-- ERROR, no emitter cookie for BB%02u; it is probably missing BBF_JMP_TARGET or BBF_HAS_LABEL.\n",
- jmp->idAddr()->iiaBBlabel->bbNum);
+ printf("-- ERROR, no emitter cookie for BB%02u; it is probably missing BBF_JMP_TARGET or "
+ "BBF_HAS_LABEL.\n",
+ jmp->idAddr()->iiaBBlabel->bbNum);
}
}
assert(tgtIG);
@@ -3899,25 +3865,22 @@ AGAIN:
#ifdef _TARGET_XARCH_
/* Done if this is not a variable-sized jump */
- if ( (jmp->idIns() == INS_push) ||
- (jmp->idIns() == INS_mov) ||
- (jmp->idIns() == INS_call) ||
- (jmp->idIns() == INS_push_hide) )
+ if ((jmp->idIns() == INS_push) || (jmp->idIns() == INS_mov) || (jmp->idIns() == INS_call) ||
+ (jmp->idIns() == INS_push_hide))
{
continue;
}
#endif
#ifdef _TARGET_ARM_
- if ( (jmp->idIns() == INS_push) ||
- (jmp->idIns() == INS_mov) ||
- (jmp->idIns() == INS_movt) ||
- (jmp->idIns() == INS_movw) )
+ if ((jmp->idIns() == INS_push) || (jmp->idIns() == INS_mov) || (jmp->idIns() == INS_movt) ||
+ (jmp->idIns() == INS_movw))
{
continue;
}
#endif
#ifdef _TARGET_ARM64_
- // There is only one size of unconditional branch; we don't support functions larger than 2^28 bytes (our branch range).
+ // There is only one size of unconditional branch; we don't support functions larger than 2^28 bytes (our branch
+ // range).
if (emitIsUncondJump(jmp))
{
continue;
@@ -3937,17 +3900,21 @@ AGAIN:
*/
srcInstrOffs = jmpIG->igOffs + jmp->idjOffs;
- dstOffs = tgtIG->igOffs; /* Note that the destination is always the beginning of an IG, so no need for an offset inside it */
+
+ /* Note that the destination is always the beginning of an IG, so no need for an offset inside it */
+ dstOffs = tgtIG->igOffs;
#if defined(_TARGET_ARM_)
- srcEncodingOffs = srcInstrOffs + 4; // For relative branches, ARM PC is always considered to be the instruction address + 4
+ srcEncodingOffs =
+ srcInstrOffs + 4; // For relative branches, ARM PC is always considered to be the instruction address + 4
#elif defined(_TARGET_ARM64_)
- srcEncodingOffs = srcInstrOffs; // For relative branches, ARM64 PC is always considered to be the instruction address
+ srcEncodingOffs =
+ srcInstrOffs; // For relative branches, ARM64 PC is always considered to be the instruction address
#else
- srcEncodingOffs = srcInstrOffs + ssz; // Encoding offset of relative offset for small branch
+ srcEncodingOffs = srcInstrOffs + ssz; // Encoding offset of relative offset for small branch
#endif
- if (jmpIG->igNum < tgtIG->igNum)
+ if (jmpIG->igNum < tgtIG->igNum)
{
/* Forward jump */
@@ -3963,27 +3930,34 @@ AGAIN:
/* How much beyond the max. short distance does the jump go? */
- extra = jmpDist - psd;
+ extra = jmpDist - psd;
#if DEBUG_EMIT
- assert(jmp->idDebugOnlyInfo() != NULL);
- if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ assert(jmp->idDebugOnlyInfo() != nullptr);
+ if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[1] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[1] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ }
printf("[1] Jump block is at %08X\n", jmpIG->igOffs);
printf("[1] Jump reloffset is %04X\n", jmp->idjOffs);
printf("[1] Jump source is at %08X\n", srcEncodingOffs);
printf("[1] Label block is at %08X\n", dstOffs);
printf("[1] Jump dist. is %04X\n", jmpDist);
- if (extra > 0)
- printf("[1] Dist excess [S] = %d \n", extra);
+ if (extra > 0)
+ {
+ printf("[1] Dist excess [S] = %d \n", extra);
+ }
+ }
+ if (EMITVERBOSE)
+ {
+ printf("Estimate of fwd jump [%08X/%03u]: %04X -> %04X = %04X\n", dspPtr(jmp),
+ jmp->idDebugOnlyInfo()->idNum, srcInstrOffs, dstOffs, jmpDist);
}
- if (EMITVERBOSE)
- printf("Estimate of fwd jump [%08X/%03u]: %04X -> %04X = %04X\n", dspPtr(jmp), jmp->idDebugOnlyInfo()->idNum, srcInstrOffs, dstOffs, jmpDist);
#endif // DEBUG_EMIT
- if (extra <= 0)
+ if (extra <= 0)
{
/* This jump will be a short one */
goto SHORT_JMP;
@@ -3999,27 +3973,34 @@ AGAIN:
/* How much beyond the max. short distance does the jump go? */
- extra = jmpDist + nsd;
+ extra = jmpDist + nsd;
#if DEBUG_EMIT
- assert(jmp->idDebugOnlyInfo() != NULL);
- if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ assert(jmp->idDebugOnlyInfo() != nullptr);
+ if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[2] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[2] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ }
printf("[2] Jump block is at %08X\n", jmpIG->igOffs);
printf("[2] Jump reloffset is %04X\n", jmp->idjOffs);
printf("[2] Jump source is at %08X\n", srcEncodingOffs);
printf("[2] Label block is at %08X\n", dstOffs);
printf("[2] Jump dist. is %04X\n", jmpDist);
- if (extra > 0)
- printf("[2] Dist excess [S] = %d \n", extra);
+ if (extra > 0)
+ {
+ printf("[2] Dist excess [S] = %d \n", extra);
+ }
+ }
+ if (EMITVERBOSE)
+ {
+ printf("Estimate of bwd jump [%08X/%03u]: %04X -> %04X = %04X\n", dspPtr(jmp),
+ jmp->idDebugOnlyInfo()->idNum, srcInstrOffs, dstOffs, jmpDist);
}
- if (EMITVERBOSE)
- printf("Estimate of bwd jump [%08X/%03u]: %04X -> %04X = %04X\n", dspPtr(jmp), jmp->idDebugOnlyInfo()->idNum, srcInstrOffs, dstOffs, jmpDist);
#endif // DEBUG_EMIT
- if (extra <= 0)
+ if (extra <= 0)
{
/* This jump will be a short one */
goto SHORT_JMP;
@@ -4037,8 +4018,10 @@ AGAIN:
/* Keep track of the closest distance we got */
- if (minShortExtra > (unsigned)extra)
- minShortExtra = (unsigned)extra;
+ if (minShortExtra > (unsigned)extra)
+ {
+ minShortExtra = (unsigned)extra;
+ }
#if defined(_TARGET_ARM_)
@@ -4047,30 +4030,30 @@ AGAIN:
// 'srcInstrOffs', 'srcEncodingOffs', 'dstOffs', 'jmpDist' have already been computed
// and don't need to be recomputed.
- if (emitIsCondJump(jmp))
+ if (emitIsCondJump(jmp))
{
- if (jmpIG->igNum < tgtIG->igNum)
+ if (jmpIG->igNum < tgtIG->igNum)
{
/* Forward jump */
/* How much beyond the max. medium distance does the jump go? */
- mextra = jmpDist - pmd;
+ mextra = jmpDist - pmd;
#if DEBUG_EMIT
assert(jmp->idDebugOnlyInfo() != NULL);
- if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (mextra > 0)
+ if (mextra > 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[6] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[6] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
printf("[6] Dist excess [S] = %d \n", mextra);
}
}
#endif // DEBUG_EMIT
- if (mextra <= 0)
+ if (mextra <= 0)
{
/* This jump will be a medium one */
goto MEDIUM_JMP;
@@ -4082,22 +4065,22 @@ AGAIN:
/* How much beyond the max. medium distance does the jump go? */
- mextra = jmpDist + nmd;
+ mextra = jmpDist + nmd;
#if DEBUG_EMIT
assert(jmp->idDebugOnlyInfo() != NULL);
- if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (mextra > 0)
+ if (mextra > 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[7] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[7] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
printf("[7] Dist excess [S] = %d \n", mextra);
}
}
#endif // DEBUG_EMIT
- if (mextra <= 0)
+ if (mextra <= 0)
{
/* This jump will be a medium one */
goto MEDIUM_JMP;
@@ -4108,8 +4091,8 @@ AGAIN:
/* Keep track of the closest distance we got */
- if (minMediumExtra > (unsigned)mextra)
- minMediumExtra = (unsigned)mextra;
+ if (minMediumExtra > (unsigned)mextra)
+ minMediumExtra = (unsigned)mextra;
}
#endif // _TARGET_ARM_
@@ -4121,67 +4104,68 @@ AGAIN:
continue;
- /*****************************************************************************/
- /* Handle conversion to short jump */
- /*****************************************************************************/
+ /*****************************************************************************/
+ /* Handle conversion to short jump */
+ /*****************************************************************************/
SHORT_JMP:
-
+
/* Try to make this jump a short one */
emitSetShortJump(jmp);
if (!jmp->idjShort)
{
- continue; // This jump must be kept long
+ continue; // This jump must be kept long
}
/* This jump is becoming either short or medium */
oldSize = jsz;
- jsz = ssz;
+ jsz = ssz;
assert(oldSize >= jsz);
sizeDif = oldSize - jsz;
#if defined(_TARGET_XARCH_)
jmp->idCodeSize(jsz);
#elif defined(_TARGET_ARM_)
+#if 0
// This is done as part of emitSetShortJump():
- // insSize isz = emitInsSize(jmp->idInsFmt());
- // jmp->idInsSize(isz);
+ insSize isz = emitInsSize(jmp->idInsFmt());
+ jmp->idInsSize(isz);
+#endif
#elif defined(_TARGET_ARM64_)
// The size of IF_LARGEJMP/IF_LARGEADR/IF_LARGELDC are 8 or 12.
// All other code size is 4.
assert((sizeDif == 4) || (sizeDif == 8));
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
goto NEXT_JMP;
-
#if defined(_TARGET_ARM_)
- /*****************************************************************************/
- /* Handle conversion to medium jump */
- /*****************************************************************************/
+ /*****************************************************************************/
+ /* Handle conversion to medium jump */
+ /*****************************************************************************/
MEDIUM_JMP:
-
+
/* Try to make this jump a medium one */
emitSetMediumJump(jmp);
if (jmp->idCodeSize() > msz)
{
- continue; // This jump wasn't shortened
+ continue; // This jump wasn't shortened
}
assert(jmp->idCodeSize() == msz);
/* This jump is becoming medium */
oldSize = jsz;
- jsz = msz;
+ jsz = msz;
assert(oldSize >= jsz);
sizeDif = oldSize - jsz;
@@ -4189,7 +4173,7 @@ AGAIN:
#endif // _TARGET_ARM_
- /*****************************************************************************/
+ /*****************************************************************************/
NEXT_JMP:
@@ -4197,26 +4181,28 @@ AGAIN:
assert((0 == (jsz | jmpDist)) || (jsz == emitSizeOfJump(jmp)));
-#ifdef DEBUG
- if (EMITVERBOSE)
+#ifdef DEBUG
+ if (EMITVERBOSE)
+ {
printf("Shrinking jump [%08X/%03u]\n", dspPtr(jmp), jmp->idDebugOnlyInfo()->idNum);
+ }
#endif
noway_assert((unsigned short)sizeDif == sizeDif);
- adjIG += sizeDif;
- adjLJ += sizeDif;
- jmpIG->igSize -= (unsigned short)sizeDif;
+ adjIG += sizeDif;
+ adjLJ += sizeDif;
+ jmpIG->igSize -= (unsigned short)sizeDif;
emitTotalCodeSize -= sizeDif;
/* The jump size estimate wasn't accurate; flag its group */
- jmpIG->igFlags |= IGF_UPD_ISZ;
+ jmpIG->igFlags |= IGF_UPD_ISZ;
} // end for each jump
/* Did we shorten any jumps? */
- if (adjIG)
+ if (adjIG)
{
/* Adjust offsets of any remaining blocks */
@@ -4225,38 +4211,48 @@ AGAIN:
for (;;)
{
lstIG = lstIG->igNext;
- if (!lstIG)
+ if (!lstIG)
+ {
break;
+ }
// printf("Adjusted offset of block %02u from %04X to %04X\n", lstIG->igNum, lstIG->igOffs,
// lstIG->igOffs - adjIG);
lstIG->igOffs -= adjIG;
assert(IsCodeAligned(lstIG->igOffs));
}
-#ifdef DEBUG
+#ifdef DEBUG
emitCheckIGoffsets();
#endif
/* Is there a chance of other jumps becoming short? */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
+#ifdef DEBUG
#if defined(_TARGET_ARM_)
- if (EMITVERBOSE) printf("Total shrinkage = %3u, min extra short jump size = %3u, min extra medium jump size = %u\n", adjIG, minShortExtra, minMediumExtra);
+ if (EMITVERBOSE)
+ printf("Total shrinkage = %3u, min extra short jump size = %3u, min extra medium jump size = %u\n", adjIG,
+ minShortExtra, minMediumExtra);
#else
- if (EMITVERBOSE) printf("Total shrinkage = %3u, min extra jump size = %3u\n", adjIG, minShortExtra);
+ if (EMITVERBOSE)
+ {
+ printf("Total shrinkage = %3u, min extra jump size = %3u\n", adjIG, minShortExtra);
+ }
#endif
#endif
- if ( (minShortExtra <= adjIG)
+ if ((minShortExtra <= adjIG)
#if defined(_TARGET_ARM_)
- || (minMediumExtra <= adjIG)
+ || (minMediumExtra <= adjIG)
#endif // _TARGET_ARM_
- )
+ )
{
jmp_iteration++;
-#ifdef DEBUG
- if (EMITVERBOSE) printf("Iterating branch shortening. Iteration = %d\n", jmp_iteration);
+#ifdef DEBUG
+ if (EMITVERBOSE)
+ {
+ printf("Iterating branch shortening. Iteration = %d\n", jmp_iteration);
+ }
#endif
goto AGAIN;
@@ -4264,9 +4260,9 @@ AGAIN:
}
}
-void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup * jmpIG)
+void emitter::emitCheckFuncletBranch(instrDesc* jmp, insGroup* jmpIG)
{
-#ifdef DEBUG
+#ifdef DEBUG
// We should not be jumping/branching across funclets/functions
// Except possibly a 'call' to a finally funclet for a local unwind
// or a 'return' from a catch handler (that can go just about anywhere)
@@ -4275,11 +4271,13 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
assert(jmp->idIsBound());
#ifdef _TARGET_AMD64_
- // An lea of a code address (for constant data stored with the code)
- // is treated like a jump for emission purposes but is not really a jump so
+ // An lea of a code address (for constant data stored with the code)
+ // is treated like a jump for emission purposes but is not really a jump so
// we don't have to check anything here.
if (jmp->idIns() == INS_lea)
+ {
return;
+ }
#endif
#ifdef _TARGET_ARMARCH_
@@ -4299,7 +4297,7 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
}
#endif // _TARGET_ARM64_
- insGroup * tgtIG = jmp->idAddr()->iiaIGlabel;
+ insGroup* tgtIG = jmp->idAddr()->iiaIGlabel;
assert(tgtIG);
if (tgtIG->igFuncIdx != jmpIG->igFuncIdx)
{
@@ -4310,15 +4308,15 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
// No branches back to the root method
assert(tgtIG->igFuncIdx > 0);
- FuncInfoDsc * tgtFunc = emitComp->funGetFunc(tgtIG->igFuncIdx);
+ FuncInfoDsc* tgtFunc = emitComp->funGetFunc(tgtIG->igFuncIdx);
assert(tgtFunc->funKind == FUNC_HANDLER);
- EHblkDsc * tgtEH = emitComp->ehGetDsc(tgtFunc->funEHIndex);
+ EHblkDsc* tgtEH = emitComp->ehGetDsc(tgtFunc->funEHIndex);
// Only branches to finallys (not faults, catches, filters, etc.)
assert(tgtEH->HasFinallyHandler());
// Only to the first block of the finally (which is properly marked)
- BasicBlock * tgtBlk = tgtEH->ebdHndBeg;
+ BasicBlock* tgtBlk = tgtEH->ebdHndBeg;
assert(tgtBlk->bbFlags & BBF_FUNCLET_BEG);
// And now we made it back to where we started
@@ -4330,14 +4328,14 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
// Again there isn't enough information to prove this correct
// so just allow a 'branch' to any other 'parent' funclet
- FuncInfoDsc * jmpFunc = emitComp->funGetFunc(jmpIG->igFuncIdx);
+ FuncInfoDsc* jmpFunc = emitComp->funGetFunc(jmpIG->igFuncIdx);
assert(jmpFunc->funKind == FUNC_HANDLER);
- EHblkDsc * jmpEH = emitComp->ehGetDsc(jmpFunc->funEHIndex);
+ EHblkDsc* jmpEH = emitComp->ehGetDsc(jmpFunc->funEHIndex);
// Only branches out of catches
assert(jmpEH->HasCatchHandler());
- FuncInfoDsc * tgtFunc = emitComp->funGetFunc(tgtIG->igFuncIdx);
+ FuncInfoDsc* tgtFunc = emitComp->funGetFunc(tgtIG->igFuncIdx);
assert(tgtFunc);
if (tgtFunc->funKind == FUNC_HANDLER)
{
@@ -4363,7 +4361,6 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
#endif // DEBUG
}
-
/*****************************************************************************
*
* Compute the code sizes that we're going to use to allocate the code buffers.
@@ -4376,11 +4373,10 @@ void emitter::emitCheckFuncletBranch(instrDesc * jmp, insGroup *
* Compiler::info.compTotalColdCodeSize
*/
-void emitter::emitComputeCodeSizes()
+void emitter::emitComputeCodeSizes()
{
- assert((emitComp->fgFirstColdBlock == NULL) ==
- (emitFirstColdIG == NULL));
-
+ assert((emitComp->fgFirstColdBlock == nullptr) == (emitFirstColdIG == nullptr));
+
if (emitFirstColdIG)
{
emitTotalHotCodeSize = emitFirstColdIG->igOffs;
@@ -4396,48 +4392,50 @@ void emitter::emitComputeCodeSizes()
emitComp->info.compTotalColdCodeSize = emitTotalColdCodeSize;
#ifdef DEBUG
- if (emitComp->verbose)
+ if (emitComp->verbose)
{
- printf("\nHot code size = 0x%X bytes\n", emitTotalHotCodeSize);
- printf( "Cold code size = 0x%X bytes\n", emitTotalColdCodeSize);
+ printf("\nHot code size = 0x%X bytes\n", emitTotalHotCodeSize);
+ printf("Cold code size = 0x%X bytes\n", emitTotalColdCodeSize);
}
#endif
}
-
/*****************************************************************************
*
* Called at the end of code generation, this method creates the code, data
* and GC info blocks for the method. Returns the size of the method (which must fit in an unsigned).
*/
-unsigned emitter::emitEndCodeGen(Compiler *comp,
- bool contTrkPtrLcls,
- bool fullyInt,
- bool fullPtrMap,
- bool returnsGCr,
- unsigned xcptnsCount,
- unsigned *prologSize,
- unsigned *epilogSize, void **codeAddr,
- void **coldCodeAddr,
- void **consAddr)
+unsigned emitter::emitEndCodeGen(Compiler* comp,
+ bool contTrkPtrLcls,
+ bool fullyInt,
+ bool fullPtrMap,
+ bool returnsGCr,
+ unsigned xcptnsCount,
+ unsigned* prologSize,
+ unsigned* epilogSize,
+ void** codeAddr,
+ void** coldCodeAddr,
+ void** consAddr)
{
#ifdef DEBUG
- if (emitComp->verbose)
+ if (emitComp->verbose)
+ {
printf("*************** In emitEndCodeGen()\n");
+ }
#endif
- insGroup * ig;
+ insGroup* ig;
- BYTE * consBlock;
- BYTE * codeBlock;
- BYTE * coldCodeBlock;
- BYTE * cp;
+ BYTE* consBlock;
+ BYTE* codeBlock;
+ BYTE* coldCodeBlock;
+ BYTE* cp;
- assert(emitCurIG == NULL);
+ assert(emitCurIG == nullptr);
- emitCodeBlock = NULL;
- emitConsBlock = NULL;
+ emitCodeBlock = nullptr;
+ emitConsBlock = nullptr;
/* Tell everyone whether we have fully interruptible code or not */
@@ -4452,8 +4450,8 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
// Default values, correct even if EMIT_TRACK_STACK_DEPTH is 0.
emitSimpleStkUsed = true;
- u1.emitSimpleStkMask = 0;
- u1.emitSimpleByrefStkMask = 0;
+ u1.emitSimpleStkMask = 0;
+ u1.emitSimpleByrefStkMask = 0;
#if EMIT_TRACK_STACK_DEPTH
/* Convert max. stack depth from # of bytes to # of entries */
@@ -4462,7 +4460,7 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Should we use the simple stack */
- if (emitMaxStackDepth > MAX_SIMPLE_STK_DEPTH || emitFullGCinfo)
+ if (emitMaxStackDepth > MAX_SIMPLE_STK_DEPTH || emitFullGCinfo)
{
/* We won't use the "simple" argument table */
@@ -4470,17 +4468,21 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Allocate the argument tracking table */
- if (emitMaxStackDepth <= sizeof(u2.emitArgTrackLcl))
+ if (emitMaxStackDepth <= sizeof(u2.emitArgTrackLcl))
+ {
u2.emitArgTrackTab = (BYTE*)u2.emitArgTrackLcl;
+ }
else
+ {
u2.emitArgTrackTab = (BYTE*)emitGetMem(roundUp(emitMaxStackDepth));
+ }
- u2.emitArgTrackTop = u2.emitArgTrackTab;
- u2.emitGcArgTrackCnt = 0;
+ u2.emitArgTrackTop = u2.emitArgTrackTab;
+ u2.emitGcArgTrackCnt = 0;
}
#endif
- if (emitEpilogCnt == 0)
+ if (emitEpilogCnt == 0)
{
/* No epilogs, make sure the epilog size is set to 0 */
@@ -4493,14 +4495,14 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Return the size of the epilog to the caller */
- *epilogSize = emitEpilogSize;
+ *epilogSize = emitEpilogSize;
#ifdef _TARGET_XARCH_
- *epilogSize += emitExitSeqSize;
+ *epilogSize += emitExitSeqSize;
#endif // _TARGET_XARCH_
-#ifdef DEBUG
- if (EMIT_INSTLIST_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_INSTLIST_VERBOSE)
{
printf("\nInstruction list before instruction issue:\n\n");
emitDispIGlist(true);
@@ -4514,21 +4516,21 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
// If we're doing procedure splitting and we found cold blocks, then
// allocate hot and cold buffers. Otherwise only allocate a hot
// buffer.
-
- coldCodeBlock = NULL;
+
+ coldCodeBlock = nullptr;
CorJitAllocMemFlag allocMemFlag = CORJIT_ALLOCMEM_DEFAULT_CODE_ALIGN;
#ifdef _TARGET_X86_
//
// These are the heuristics we use to decide whether or not to force the
- // code to be 16-byte aligned.
+ // code to be 16-byte aligned.
//
// 1. For ngen code with IBC data, use 16-byte alignment if the method
// has been called more than BB_VERY_HOT_WEIGHT times.
// 2. For JITed code and ngen code without IBC data, use 16-byte alignment
- // when the code is 16 bytes or smaller. We align small getters/setters
- // because of they are penalized heavily on certain hardware when not 16-byte
+ // when the code is 16 bytes or smaller. We align small getters/setters
+ // because of they are penalized heavily on certain hardware when not 16-byte
// aligned (VSWhidbey #373938). To minimize size impact of this optimization,
// we do not align large methods because of the penalty is amortized for them.
//
@@ -4564,46 +4566,37 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
roDataAlignmentDelta = (UNATIVE_OFFSET)ALIGN_UP(emitTotalHotCodeSize, roDataAlignment) - emitTotalHotCodeSize;
assert((roDataAlignmentDelta == 0) || (roDataAlignmentDelta == 4));
}
- emitCmpHandle->allocMem(emitTotalHotCodeSize + roDataAlignmentDelta + emitConsDsc.dsdOffs, emitTotalColdCodeSize,
- 0,
- xcptnsCount,
- allocMemFlag,
- (void**)&codeBlock, (void**)&coldCodeBlock,
- (void**)&consBlock);
+ emitCmpHandle->allocMem(emitTotalHotCodeSize + roDataAlignmentDelta + emitConsDsc.dsdOffs, emitTotalColdCodeSize, 0,
+ xcptnsCount, allocMemFlag, (void**)&codeBlock, (void**)&coldCodeBlock, (void**)&consBlock);
consBlock = codeBlock + emitTotalHotCodeSize + roDataAlignmentDelta;
#else
- emitCmpHandle->allocMem( emitTotalHotCodeSize, emitTotalColdCodeSize,
- emitConsDsc.dsdOffs,
- xcptnsCount,
- allocMemFlag,
- (void**)&codeBlock, (void**)&coldCodeBlock,
- (void**)&consBlock);
+ emitCmpHandle->allocMem(emitTotalHotCodeSize, emitTotalColdCodeSize, emitConsDsc.dsdOffs, xcptnsCount, allocMemFlag,
+ (void**)&codeBlock, (void**)&coldCodeBlock, (void**)&consBlock);
#endif
-
// if (emitConsDsc.dsdOffs)
// printf("Cons=%08X\n", consBlock);
/* Give the block addresses to the caller and other functions here */
- *codeAddr = emitCodeBlock = codeBlock;
+ *codeAddr = emitCodeBlock = codeBlock;
*coldCodeAddr = emitColdCodeBlock = coldCodeBlock;
- *consAddr = emitConsBlock = consBlock;
+ *consAddr = emitConsBlock = consBlock;
/* Nothing has been pushed on the stack */
CLANG_FORMAT_COMMENT_ANCHOR;
#if EMIT_TRACK_STACK_DEPTH
- emitCurStackLvl = 0;
+ emitCurStackLvl = 0;
#endif
/* Assume no live GC ref variables on entry */
- VarSetOps::ClearD(emitComp, emitThisGCrefVars); // This is initialized to Empty at the start of codegen.
+ VarSetOps::ClearD(emitComp, emitThisGCrefVars); // This is initialized to Empty at the start of codegen.
emitThisGCrefRegs = emitThisByrefRegs = RBM_NONE;
- emitThisGCrefVset = true;
+ emitThisGCrefVset = true;
#ifdef DEBUG
@@ -4612,12 +4605,10 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
// We don't use these after this point
VarSetOps::AssignNoCopy(emitComp, emitPrevGCrefVars, VarSetOps::UninitVal());
- emitPrevGCrefRegs =
- emitPrevByrefRegs = 0xBAADFEED;
+ emitPrevGCrefRegs = emitPrevByrefRegs = 0xBAADFEED;
VarSetOps::AssignNoCopy(emitComp, emitInitGCrefVars, VarSetOps::UninitVal());
- emitInitGCrefRegs =
- emitInitByrefRegs = 0xBAADFEED;
+ emitInitGCrefRegs = emitInitByrefRegs = 0xBAADFEED;
#endif
@@ -4625,15 +4616,14 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
codeGen->gcInfo.gcVarPtrSetInit();
- emitSyncThisObjOffs = -1; /* -1 means no offset set */
- emitSyncThisObjReg = REG_NA; /* REG_NA means not set */
-
+ emitSyncThisObjOffs = -1; /* -1 means no offset set */
+ emitSyncThisObjReg = REG_NA; /* REG_NA means not set */
#ifdef JIT32_GCENCODER
- if (emitComp->lvaKeepAliveAndReportThis())
+ if (emitComp->lvaKeepAliveAndReportThis())
{
assert(emitComp->lvaIsOriginalThisArg(0));
- LclVarDsc * thisDsc = &emitComp->lvaTable[0];
+ LclVarDsc* thisDsc = &emitComp->lvaTable[0];
/* If "this" (which is passed in as a register argument in REG_ARG_0)
is enregistered, we normally spot the "mov REG_ARG_0 -> thisReg"
@@ -4647,17 +4637,16 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
we try to save space by avoiding that.
*/
- if (thisDsc->lvRegister)
+ if (thisDsc->lvRegister)
{
emitSyncThisObjReg = thisDsc->lvRegNum;
- if (emitSyncThisObjReg == (int)REG_ARG_0 &&
+ if (emitSyncThisObjReg == (int)REG_ARG_0 &&
(codeGen->intRegState.rsCalleeRegArgMaskLiveIn & genRegMask(REG_ARG_0)))
{
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
{
- emitGCregLiveSet(GCT_GCREF,
- genRegMask(REG_ARG_0),
+ emitGCregLiveSet(GCT_GCREF, genRegMask(REG_ARG_0),
emitCodeBlock, // from offset 0
true);
}
@@ -4677,13 +4666,13 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Are there any GC ref variables on the stack? */
- if (emitGCrFrameOffsCnt)
+ if (emitGCrFrameOffsCnt)
{
- size_t siz;
- unsigned cnt;
- unsigned num;
- LclVarDsc* dsc;
- int* tab;
+ size_t siz;
+ unsigned cnt;
+ unsigned num;
+ LclVarDsc* dsc;
+ int* tab;
/* Allocate and clear emitGCrFrameLiveTab[]. This is the table
mapping "stkOffs -> varPtrDsc". It holds a pointer to
@@ -4697,7 +4686,7 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
present, instead of lvaTrackedCount.
*/
- siz = emitGCrFrameOffsCnt * sizeof(*emitGCrFrameLiveTab);
+ siz = emitGCrFrameOffsCnt * sizeof(*emitGCrFrameLiveTab);
emitGCrFrameLiveTab = (varPtrDsc**)emitGetMem(roundUp(siz));
memset(emitGCrFrameLiveTab, 0, siz);
@@ -4707,30 +4696,33 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
Entries of Tracked stack byrefs have the lower bit set to 1.
*/
- emitTrkVarCnt = cnt = emitComp->lvaTrackedCount; assert(cnt);
+ emitTrkVarCnt = cnt = emitComp->lvaTrackedCount;
+ assert(cnt);
emitGCrFrameOffsTab = tab = (int*)emitGetMem(cnt * sizeof(int));
memset(emitGCrFrameOffsTab, -1, cnt * sizeof(int));
/* Now fill in all the actual used entries */
- for (num = 0, dsc = emitComp->lvaTable, cnt = emitComp->lvaCount;
- num < cnt;
- num++ , dsc++)
+ for (num = 0, dsc = emitComp->lvaTable, cnt = emitComp->lvaCount; num < cnt; num++, dsc++)
{
- if (!dsc->lvOnFrame || (dsc->lvIsParam && !dsc->lvIsRegArg))
+ if (!dsc->lvOnFrame || (dsc->lvIsParam && !dsc->lvIsRegArg))
+ {
continue;
+ }
#if FEATURE_FIXED_OUT_ARGS
- if (num == emitComp->lvaOutgoingArgSpaceVar)
+ if (num == emitComp->lvaOutgoingArgSpaceVar)
+ {
continue;
+ }
#endif // FEATURE_FIXED_OUT_ARGS
- int offs = dsc->lvStkOffs;
+ int offs = dsc->lvStkOffs;
/* Is it within the interesting range of offsets */
- if (offs >= emitGCrFrameOffsMin && offs < emitGCrFrameOffsMax)
+ if (offs >= emitGCrFrameOffsMin && offs < emitGCrFrameOffsMax)
{
/* Are tracked stack ptr locals laid out contiguously?
If not, skip non-ptrs. The emitter is optimized to work
@@ -4746,28 +4738,27 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
}
}
- unsigned indx = dsc->lvVarIndex;
+ unsigned indx = dsc->lvVarIndex;
assert(!dsc->lvRegister);
- assert( dsc->lvTracked);
- assert( dsc->lvRefCnt != 0);
+ assert(dsc->lvTracked);
+ assert(dsc->lvRefCnt != 0);
- assert( dsc->TypeGet() == TYP_REF ||
- dsc->TypeGet() == TYP_BYREF);
+ assert(dsc->TypeGet() == TYP_REF || dsc->TypeGet() == TYP_BYREF);
assert(indx < emitComp->lvaTrackedCount);
- // printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs);
-
+// printf("Variable #%2u/%2u is at stack offset %d\n", num, indx, offs);
+
#ifdef JIT32_GCENCODER
/* Remember the frame offset of the "this" argument for synchronized methods */
- if (emitComp->lvaIsOriginalThisArg(num) && emitComp->lvaKeepAliveAndReportThis())
+ if (emitComp->lvaIsOriginalThisArg(num) && emitComp->lvaKeepAliveAndReportThis())
{
emitSyncThisObjOffs = offs;
offs |= this_OFFSET_FLAG;
}
#endif // JIT32_GCENCODER
-
+
if (dsc->TypeGet() == TYP_BYREF)
{
offs |= byref_OFFSET_FLAG;
@@ -4778,14 +4769,14 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
}
else
{
-#ifdef DEBUG
+#ifdef DEBUG
emitTrkVarCnt = 0;
- emitGCrFrameOffsTab = NULL;
+ emitGCrFrameOffsTab = nullptr;
#endif
}
-#ifdef DEBUG
- if (emitComp->verbose)
+#ifdef DEBUG
+ if (emitComp->verbose)
{
printf("\n***************************************************************************\n");
printf("Instructions as they come out of the scheduler\n\n");
@@ -4797,9 +4788,7 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
#define DEFAULT_CODE_BUFFER_INIT 0xcc
- for (ig = emitIGlist;
- ig;
- ig = ig->igNext)
+ for (ig = emitIGlist; ig; ig = ig->igNext)
{
assert(!(ig->igFlags & IGF_PLACEHOLDER)); // There better not be any placeholder groups left
@@ -4817,31 +4806,33 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
assert(coldCodeBlock);
cp = coldCodeBlock;
#ifdef DEBUG
- if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
+ if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
{
printf("\n************** Beginning of cold code **************\n");
}
#endif
}
-
+
/* Are we overflowing? */
if (ig->igNext && ig->igNum + 1 != ig->igNext->igNum)
+ {
NO_WAY("Too many instruction groups");
+ }
// If this instruction group is returned to from a funclet implementing a finally,
// on architectures where it is necessary generate GC info for the current instruction as
// if it were the instruction following a call.
emitGenGCInfoIfFuncletRetTarget(ig, cp);
-
- instrDesc * id = (instrDesc *)ig->igData;
+
+ instrDesc* id = (instrDesc*)ig->igData;
#ifdef DEBUG
/* Print the IG label, but only if it is a branch label */
- if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
+ if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
{
- if (emitComp->verbose)
+ if (emitComp->verbose)
{
printf("\n");
emitDispIG(ig); // Display the flags, IG data, etc.
@@ -4854,16 +4845,18 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
#endif // DEBUG
- BYTE * bp = cp;
+ BYTE* bp = cp;
/* Record the actual offset of the block, noting the difference */
- emitOffsAdj = ig->igOffs - emitCurCodeOffs(cp); assert(emitOffsAdj >= 0);
+ emitOffsAdj = ig->igOffs - emitCurCodeOffs(cp);
+ assert(emitOffsAdj >= 0);
#if DEBUG_EMIT
if ((emitOffsAdj != 0) && emitComp->verbose)
{
- printf("Block predicted offs = %08X, actual = %08X -> size adj = %d\n", ig->igOffs, emitCurCodeOffs(cp), emitOffsAdj);
+ printf("Block predicted offs = %08X, actual = %08X -> size adj = %d\n", ig->igOffs, emitCurCodeOffs(cp),
+ emitOffsAdj);
}
#endif // DEBUG_EMIT
@@ -4874,12 +4867,12 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Set the proper stack level if appropriate */
- if (ig->igStkLvl != emitCurStackLvl)
+ if (ig->igStkLvl != emitCurStackLvl)
{
/* We are pushing stuff implicitly at this label */
assert((unsigned)ig->igStkLvl > (unsigned)emitCurStackLvl);
- emitStackPushN(cp, (ig->igStkLvl - (unsigned)emitCurStackLvl)/sizeof(int));
+ emitStackPushN(cp, (ig->igStkLvl - (unsigned)emitCurStackLvl) / sizeof(int));
}
#endif
@@ -4890,17 +4883,21 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
{
/* Is there a new set of live GC ref variables? */
- if (ig->igFlags & IGF_GC_VARS)
+ if (ig->igFlags & IGF_GC_VARS)
+ {
emitUpdateLiveGCvars(ig->igGCvars(), cp);
+ }
else if (!emitThisGCrefVset)
+ {
emitUpdateLiveGCvars(emitThisGCrefVars, cp);
+ }
/* Update the set of live GC ref registers */
{
- regMaskTP GCregs = ig->igGCregs;
+ regMaskTP GCregs = ig->igGCregs;
- if (GCregs != emitThisGCrefRegs)
+ if (GCregs != emitThisGCrefRegs)
{
emitUpdateLiveGCregs(GCT_GCREF, GCregs, cp);
}
@@ -4908,11 +4905,11 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Is there a new set of live byref registers? */
- if (ig->igFlags & IGF_BYREF_REGS)
+ if (ig->igFlags & IGF_BYREF_REGS)
{
- unsigned byrefRegs = ig->igByrefRegs();
+ unsigned byrefRegs = ig->igByrefRegs();
- if (byrefRegs != emitThisByrefRegs)
+ if (byrefRegs != emitThisByrefRegs)
{
emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, cp);
}
@@ -4931,13 +4928,13 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
for (unsigned cnt = ig->igInsCnt; cnt; cnt--)
{
- castto(id, BYTE *) += emitIssue1Instr(ig, id, &cp);
+ castto(id, BYTE*) += emitIssue1Instr(ig, id, &cp);
}
- emitCurIG = NULL;
+ emitCurIG = nullptr;
assert(ig->igSize >= cp - bp);
- ig->igSize = (unsigned short)(cp - bp);
+ ig->igSize = (unsigned short)(cp - bp);
}
#if EMIT_TRACK_STACK_DEPTH
@@ -4946,99 +4943,109 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Output any initialized data we may have */
- if (emitConsDsc.dsdOffs)
+ if (emitConsDsc.dsdOffs)
{
emitOutputDataSec(&emitConsDsc, consBlock);
}
/* Make sure all GC ref variables are marked as dead */
- if (emitGCrFrameOffsCnt)
+ if (emitGCrFrameOffsCnt)
{
- unsigned vn;
- int of;
- varPtrDsc * * dp;
+ unsigned vn;
+ int of;
+ varPtrDsc** dp;
- for (vn = 0, of = emitGCrFrameOffsMin, dp = emitGCrFrameLiveTab;
- vn < emitGCrFrameOffsCnt;
- vn++ , of += sizeof(void*) , dp++)
+ for (vn = 0, of = emitGCrFrameOffsMin, dp = emitGCrFrameLiveTab; vn < emitGCrFrameOffsCnt;
+ vn++, of += sizeof(void*), dp++)
{
- if (*dp)
+ if (*dp)
+ {
emitGCvarDeadSet(of, cp, vn);
+ }
}
}
/* No GC registers are live any more */
- if (emitThisByrefRegs)
+ if (emitThisByrefRegs)
+ {
emitUpdateLiveGCregs(GCT_BYREF, RBM_NONE, cp);
- if (emitThisGCrefRegs)
+ }
+ if (emitThisGCrefRegs)
+ {
emitUpdateLiveGCregs(GCT_GCREF, RBM_NONE, cp);
+ }
/* Patch any forward jumps */
- if (emitFwdJumps)
+ if (emitFwdJumps)
{
- instrDescJmp * jmp;
+ instrDescJmp* jmp;
for (jmp = emitJumpList; jmp; jmp = jmp->idjNext)
{
- insGroup * tgt;
+ insGroup* tgt;
#ifdef _TARGET_XARCH_
assert(jmp->idInsFmt() == IF_LABEL || jmp->idInsFmt() == IF_RWR_LABEL || jmp->idInsFmt() == IF_SWR_LABEL);
#endif
tgt = jmp->idAddr()->iiaIGlabel;
- if (jmp->idjTemp.idjAddr == NULL)
+ if (jmp->idjTemp.idjAddr == nullptr)
+ {
continue;
+ }
- if (jmp->idjOffs != tgt->igOffs)
+ if (jmp->idjOffs != tgt->igOffs)
{
- BYTE * adr = jmp->idjTemp.idjAddr;
- int adj = jmp->idjOffs - tgt->igOffs;
+ BYTE* adr = jmp->idjTemp.idjAddr;
+ int adj = jmp->idjOffs - tgt->igOffs;
#ifdef _TARGET_ARM_
// On Arm, the offset is encoded in unit of 2 bytes.
adj >>= 1;
#endif
-#if DEBUG_EMIT
- if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (jmp->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
#ifdef _TARGET_ARM_
- printf("[5] This output is broken for ARM, since it doesn't properly decode the jump offsets of the instruction at adr\n");
+ printf("[5] This output is broken for ARM, since it doesn't properly decode the jump offsets of "
+ "the instruction at adr\n");
#endif
- if (INTERESTING_JUMP_NUM == 0)
+ if (INTERESTING_JUMP_NUM == 0)
+ {
printf("[5] Jump %u:\n", jmp->idDebugOnlyInfo()->idNum);
+ }
- if (jmp->idjShort)
+ if (jmp->idjShort)
{
- printf("[5] Jump is at %08X\n" , (adr + 1 - emitCodeBlock));
- printf("[5] Jump distance is %02X - %02X = %02X\n", *(BYTE *)adr, adj, *(BYTE *)adr - adj);
+ printf("[5] Jump is at %08X\n", (adr + 1 - emitCodeBlock));
+ printf("[5] Jump distance is %02X - %02X = %02X\n", *(BYTE*)adr, adj, *(BYTE*)adr - adj);
}
else
{
- printf("[5] Jump is at %08X\n" , (adr + 4 - emitCodeBlock));
- printf("[5] Jump distance is %08X - %02X = %08X\n", *(int *)adr, adj, *(int *)adr - adj);
+ printf("[5] Jump is at %08X\n", (adr + 4 - emitCodeBlock));
+ printf("[5] Jump distance is %08X - %02X = %08X\n", *(int*)adr, adj, *(int*)adr - adj);
}
}
#endif // DEBUG_EMIT
- if (jmp->idjShort)
+ if (jmp->idjShort)
{
// Patch Forward Short Jump
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(_TARGET_XARCH_)
- *(BYTE *)adr -= (BYTE)adj;
+ *(BYTE*)adr -= (BYTE)adj;
#elif defined(_TARGET_ARM_)
// The following works because the jump offset is in the low order bits of the instruction.
// Presumably we could also just call "emitOutputLJ(NULL, adr, jmp)", like for long jumps?
- *(short int *)adr -= (short)adj;
+ *(short int*)adr -= (short)adj;
#elif defined(_TARGET_ARM64_)
assert(!jmp->idAddr()->iiaHasInstrCount());
emitOutputLJ(NULL, adr, jmp);
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
}
else
@@ -5046,27 +5053,30 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
// Patch Forward non-Short Jump
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(_TARGET_XARCH_)
- *(int *)adr -= adj;
+ *(int*)adr -= adj;
#elif defined(_TARGET_ARMARCH_)
assert(!jmp->idAddr()->iiaHasInstrCount());
emitOutputLJ(NULL, adr, jmp);
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
}
}
}
}
-#ifdef DEBUG
+#ifdef DEBUG
if (emitComp->opts.disAsm)
+ {
printf("\n");
+ }
- if (emitComp->verbose)
+ if (emitComp->verbose)
+ {
printf("Allocated method code size = %4u , actual size = %4u\n", emitTotalCodeSize, cp - codeBlock);
+ }
#endif
-
unsigned actualCodeSize = emitCurCodeOffs(cp);
/* Fill in eventual unused space */
@@ -5099,7 +5109,7 @@ unsigned emitter::emitEndCodeGen(Compiler *comp,
/* Return the amount of code we've generated */
- return actualCodeSize;
+ return actualCodeSize;
}
// See specification comment at the declaration.
@@ -5114,12 +5124,12 @@ void emitter::emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp)
{
// We don't actually have a call instruction in this case, so we don't have
// a real size for that instruction. We'll use 1.
- emitStackPop(cp, /*isCall*/true, /*callInstrSize*/1, /*args*/0);
-
+ emitStackPop(cp, /*isCall*/ true, /*callInstrSize*/ 1, /*args*/ 0);
+
/* Do we need to record a call location for GC purposes? */
- if (!emitFullGCinfo)
+ if (!emitFullGCinfo)
{
- emitRecordGCcall(cp, /*callInstrSize*/1);
+ emitRecordGCcall(cp, /*callInstrSize*/ 1);
}
}
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
@@ -5131,29 +5141,33 @@ void emitter::emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp)
* instruction number for this instruction
*/
-unsigned emitter::emitFindInsNum(insGroup *ig, instrDesc *idMatch)
+unsigned emitter::emitFindInsNum(insGroup* ig, instrDesc* idMatch)
{
- instrDesc * id = (instrDesc *)ig->igData;
+ instrDesc* id = (instrDesc*)ig->igData;
- // Check if we are the first instruction in the group
- if (id == idMatch)
+ // Check if we are the first instruction in the group
+ if (id == idMatch)
+ {
return 0;
+ }
/* Walk the list of instructions until we find a match */
- unsigned insNum = 0;
- unsigned insRemaining = ig->igInsCnt;
+ unsigned insNum = 0;
+ unsigned insRemaining = ig->igInsCnt;
while (insRemaining > 0)
{
- castto(id, BYTE *) += emitSizeOfInsDsc(id);
+ castto(id, BYTE*) += emitSizeOfInsDsc(id);
insNum++;
insRemaining--;
if (id == idMatch)
+ {
return insNum;
+ }
}
assert(!"emitFindInsNum failed");
- return -1;
+ return -1;
}
/*****************************************************************************
@@ -5163,10 +5177,10 @@ unsigned emitter::emitFindInsNum(insGroup *ig, instrDesc *idMatch)
* to find the true offset by looking for the instruction within the group.
*/
-UNATIVE_OFFSET emitter::emitFindOffset(insGroup *ig, unsigned insNum)
+UNATIVE_OFFSET emitter::emitFindOffset(insGroup* ig, unsigned insNum)
{
- instrDesc * id = (instrDesc *)ig->igData;
- UNATIVE_OFFSET of = 0;
+ instrDesc* id = (instrDesc*)ig->igData;
+ UNATIVE_OFFSET of = 0;
#ifdef DEBUG
/* Make sure we were passed reasonable arguments */
@@ -5180,12 +5194,12 @@ UNATIVE_OFFSET emitter::emitFindOffset(insGroup *ig, unsigned insNum)
{
of += emitInstCodeSz(id);
- castto(id, BYTE *) += emitSizeOfInsDsc(id);
+ castto(id, BYTE*) += emitSizeOfInsDsc(id);
insNum--;
}
- return of;
+ return of;
}
/*****************************************************************************
@@ -5195,14 +5209,12 @@ UNATIVE_OFFSET emitter::emitFindOffset(insGroup *ig, unsigned insNum)
* block.
*/
-UNATIVE_OFFSET emitter::emitDataGenBeg(UNATIVE_OFFSET size,
- bool dblAlign,
- bool codeLtab)
+UNATIVE_OFFSET emitter::emitDataGenBeg(UNATIVE_OFFSET size, bool dblAlign, bool codeLtab)
{
- unsigned secOffs;
- dataSection * secDesc;
+ unsigned secOffs;
+ dataSection* secDesc;
- assert(emitDataSecCur == 0);
+ assert(emitDataSecCur == nullptr);
/* The size better not be some kind of an odd thing */
@@ -5234,37 +5246,40 @@ UNATIVE_OFFSET emitter::emitDataGenBeg(UNATIVE_OFFSET size,
/* Allocate a data section descriptor and add it to the list */
- secDesc = emitDataSecCur = (dataSection *)emitGetMem(roundUp(sizeof(*secDesc) + size));
+ secDesc = emitDataSecCur = (dataSection*)emitGetMem(roundUp(sizeof(*secDesc) + size));
secDesc->dsSize = size;
secDesc->dsType = dataSection::data;
-
- secDesc->dsNext = 0;
- if (emitConsDsc.dsdLast)
+ secDesc->dsNext = nullptr;
+
+ if (emitConsDsc.dsdLast)
+ {
emitConsDsc.dsdLast->dsNext = secDesc;
+ }
else
- emitConsDsc.dsdList = secDesc;
+ {
+ emitConsDsc.dsdList = secDesc;
+ }
emitConsDsc.dsdLast = secDesc;
- return secOffs;
+ return secOffs;
}
// Start generating a constant data section for the current function
-// populated with BasicBlock references.
-// You can choose the references to be either absolute pointers, or
+// populated with BasicBlock references.
+// You can choose the references to be either absolute pointers, or
// 4-byte relative addresses.
-// Currently the relative references are relative to the start of the
+// Currently the relative references are relative to the start of the
// first block (this is somewhat arbitrary)
-UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries,
- bool relativeAddr)
+UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries, bool relativeAddr)
{
- unsigned secOffs;
- dataSection * secDesc;
+ unsigned secOffs;
+ dataSection* secDesc;
- assert(emitDataSecCur == 0);
+ assert(emitDataSecCur == nullptr);
UNATIVE_OFFSET emittedSize;
@@ -5276,7 +5291,7 @@ UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries,
{
emittedSize = numEntries * TARGET_POINTER_SIZE;
}
-
+
/* Get hold of the current offset */
secOffs = emitConsDsc.dsdOffs;
@@ -5287,26 +5302,26 @@ UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries,
/* Allocate a data section descriptor and add it to the list */
- secDesc = emitDataSecCur = (dataSection *)emitGetMem(roundUp(sizeof(*secDesc) + numEntries * sizeof(BasicBlock*)));
-
+ secDesc = emitDataSecCur = (dataSection*)emitGetMem(roundUp(sizeof(*secDesc) + numEntries * sizeof(BasicBlock*)));
+
secDesc->dsSize = emittedSize;
-
+
secDesc->dsType = relativeAddr ? dataSection::blockRelative32 : dataSection::blockAbsoluteAddr;
-
- secDesc->dsNext = 0;
- if (emitConsDsc.dsdLast)
+ secDesc->dsNext = nullptr;
+
+ if (emitConsDsc.dsdLast)
{
emitConsDsc.dsdLast->dsNext = secDesc;
}
else
{
- emitConsDsc.dsdList = secDesc;
+ emitConsDsc.dsdList = secDesc;
}
emitConsDsc.dsdLast = secDesc;
- return secOffs;
+ return secOffs;
}
/*****************************************************************************
@@ -5314,9 +5329,7 @@ UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries,
* Emit the given block of bits into the current data section.
*/
-void emitter::emitDataGenData(unsigned offs,
- const void *data,
- size_t size)
+void emitter::emitDataGenData(unsigned offs, const void* data, size_t size)
{
assert(emitDataSecCur && (emitDataSecCur->dsSize >= offs + size));
@@ -5330,8 +5343,7 @@ void emitter::emitDataGenData(unsigned offs,
* Emit the address of the given basic block into the current data section.
*/
-void emitter::emitDataGenData(unsigned index,
- BasicBlock *label)
+void emitter::emitDataGenData(unsigned index, BasicBlock* label)
{
assert(emitDataSecCur != nullptr);
assert(emitDataSecCur->dsType == dataSection::blockAbsoluteAddr ||
@@ -5349,13 +5361,13 @@ void emitter::emitDataGenData(unsigned index,
* We're done generating a data section.
*/
-void emitter::emitDataGenEnd()
+void emitter::emitDataGenEnd()
{
#ifdef DEBUG
- assert(emitDataSecCur); emitDataSecCur = 0;
+ assert(emitDataSecCur);
+ emitDataSecCur = nullptr;
#endif
-
}
/********************************************************************************
@@ -5388,9 +5400,9 @@ UNATIVE_OFFSET emitter::emitDataConst(const void* cnsAddr, unsigned cnsSize, boo
* Output the given data section at the specified address.
*/
-void emitter::emitOutputDataSec(dataSecDsc *sec, BYTE *dst)
+void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst)
{
-#ifdef DEBUG
+#ifdef DEBUG
if (EMITVERBOSE)
{
printf("\nEmitting data sections: %u total bytes\n", sec->dsdOffs);
@@ -5405,7 +5417,7 @@ void emitter::emitOutputDataSec(dataSecDsc *sec, BYTE *dst)
/* Walk and emit the contents of all the data blocks */
- dataSection * dsc;
+ dataSection* dsc;
for (dsc = sec->dsdList; dsc; dsc = dsc->dsNext)
{
@@ -5418,16 +5430,16 @@ void emitter::emitOutputDataSec(dataSecDsc *sec, BYTE *dst)
assert(dscSize && dscSize % sizeof(BasicBlock*) == 0);
size_t numElems = dscSize / TARGET_POINTER_SIZE;
- BYTE** bDst = (BYTE**) dst;
- for (unsigned i=0; i<numElems; i++)
+ BYTE** bDst = (BYTE**)dst;
+ for (unsigned i = 0; i < numElems; i++)
{
BasicBlock* block = ((BasicBlock**)dsc->dsCont)[i];
- // Convert the BasicBlock* value to an IG address
- insGroup* lab = (insGroup*)emitCodeGetCookie(block);
+ // Convert the BasicBlock* value to an IG address
+ insGroup* lab = (insGroup*)emitCodeGetCookie(block);
- // Append the appropriate address to the destination
- BYTE* target = emitOffsetToPtr(lab->igOffs);
+ // Append the appropriate address to the destination
+ BYTE* target = emitOffsetToPtr(lab->igOffs);
#ifdef _TARGET_ARM_
target = (BYTE*)((size_t)target | 1); // Or in thumb bit
@@ -5440,37 +5452,35 @@ void emitter::emitOutputDataSec(dataSecDsc *sec, BYTE *dst)
JITDUMP(" BB%02u: 0x%p\n", block->bbNum, bDst[i]);
}
-
}
// relative label table
else if (dsc->dsType == dataSection::blockRelative32)
{
JITDUMP(" section %u, size %u, block relative addr\n", secNum++, dscSize);
- unsigned elemSize = 4;
- size_t numElems = dscSize / 4;
- unsigned* uDst = (unsigned *) dst;
+ unsigned elemSize = 4;
+ size_t numElems = dscSize / 4;
+ unsigned* uDst = (unsigned*)dst;
insGroup* labFirst = (insGroup*)emitCodeGetCookie(emitComp->fgFirstBB);
- for (unsigned i=0; i<numElems; i++)
+ for (unsigned i = 0; i < numElems; i++)
{
BasicBlock* block = ((BasicBlock**)dsc->dsCont)[i];
- // Convert the BasicBlock* value to an IG address
- insGroup* lab = (insGroup*)emitCodeGetCookie(block);
+ // Convert the BasicBlock* value to an IG address
+ insGroup* lab = (insGroup*)emitCodeGetCookie(block);
assert(FitsIn<uint32_t>(lab->igOffs - labFirst->igOffs));
- uDst[i] = lab->igOffs - labFirst->igOffs;
+ uDst[i] = lab->igOffs - labFirst->igOffs;
JITDUMP(" BB%02u: 0x%x\n", block->bbNum, uDst[i]);
}
-
}
else
{
JITDUMP(" section %u, size %u, raw data\n", secNum++, dscSize);
- // Simple binary data: copy the bytes to the target
+ // Simple binary data: copy the bytes to the target
assert(dsc->dsType == dataSection::data);
memcpy(dst, dsc->dsCont, dscSize);
@@ -5501,22 +5511,21 @@ void emitter::emitOutputDataSec(dataSecDsc *sec, BYTE *dst)
* Record the fact that the given variable now contains a live GC ref.
*/
-void emitter::emitGCvarLiveSet(int offs,
- GCtype gcType,
- BYTE * addr,
- ssize_t disp)
+void emitter::emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp)
{
assert(emitIssuing);
- varPtrDsc * desc;
+ varPtrDsc* desc;
assert((abs(offs) % sizeof(ssize_t)) == 0);
assert(needsGC(gcType));
/* Compute the index into the GC frame table if the caller didn't do it */
- if (disp == -1)
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void *);
+ if (disp == -1)
+ {
+ disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ }
assert((size_t)disp < emitGCrFrameOffsCnt);
@@ -5531,47 +5540,51 @@ void emitter::emitGCvarLiveSet(int offs,
desc->vpdVarNum = offs;
- desc->vpdNext = NULL;
+ desc->vpdNext = nullptr;
/* the lower 2 bits encode props about the stk ptr */
- if (offs == emitSyncThisObjOffs)
+ if (offs == emitSyncThisObjOffs)
{
desc->vpdVarNum |= this_OFFSET_FLAG;
}
- if (gcType == GCT_BYREF)
+ if (gcType == GCT_BYREF)
{
desc->vpdVarNum |= byref_OFFSET_FLAG;
}
/* Append the new entry to the end of the list */
- if (codeGen->gcInfo.gcVarPtrLast == NULL)
+ if (codeGen->gcInfo.gcVarPtrLast == nullptr)
{
- assert(codeGen->gcInfo.gcVarPtrList == NULL);
+ assert(codeGen->gcInfo.gcVarPtrList == nullptr);
codeGen->gcInfo.gcVarPtrList = codeGen->gcInfo.gcVarPtrLast = desc;
}
else
{
- assert(codeGen->gcInfo.gcVarPtrList != NULL);
+ assert(codeGen->gcInfo.gcVarPtrList != nullptr);
codeGen->gcInfo.gcVarPtrLast->vpdNext = desc;
codeGen->gcInfo.gcVarPtrLast = desc;
}
/* Record the variable descriptor in the table */
- assert(emitGCrFrameLiveTab[disp] == NULL);
- emitGCrFrameLiveTab[disp] = desc;
+ assert(emitGCrFrameLiveTab[disp] == nullptr);
+ emitGCrFrameLiveTab[disp] = desc;
-#ifdef DEBUG
- if (EMITVERBOSE)
+#ifdef DEBUG
+ if (EMITVERBOSE)
{
printf("[%08X] %s var born at [%s", dspPtr(desc), GCtypeStr(gcType), emitGetFrameReg());
- if (offs < 0)
+ if (offs < 0)
+ {
printf("-%02XH", -offs);
+ }
else if (offs > 0)
+ {
printf("+%02XH", +offs);
+ }
printf("]\n");
}
@@ -5587,50 +5600,53 @@ void emitter::emitGCvarLiveSet(int offs,
* Record the fact that the given variable no longer contains a live GC ref.
*/
-void emitter::emitGCvarDeadSet(int offs, BYTE *addr, ssize_t disp)
+void emitter::emitGCvarDeadSet(int offs, BYTE* addr, ssize_t disp)
{
assert(emitIssuing);
- varPtrDsc * desc;
+ varPtrDsc* desc;
assert(abs(offs) % sizeof(int) == 0);
/* Compute the index into the GC frame table if the caller didn't do it */
- if (disp == -1)
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void *);
+ if (disp == -1)
+ {
+ disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
+ }
assert((unsigned)disp < emitGCrFrameOffsCnt);
/* Get hold of the lifetime descriptor and clear the entry */
- desc = emitGCrFrameLiveTab[disp];
- emitGCrFrameLiveTab[disp] = NULL;
+ desc = emitGCrFrameLiveTab[disp];
+ emitGCrFrameLiveTab[disp] = nullptr;
- assert( desc);
+ assert(desc);
assert((desc->vpdVarNum & ~OFFSET_MASK) == (unsigned)offs);
/* Record the death code offset */
assert(desc->vpdEndOfs == 0xFACEDEAD);
- desc->vpdEndOfs = emitCurCodeOffs(addr);
+ desc->vpdEndOfs = emitCurCodeOffs(addr);
-#ifdef DEBUG
- if (EMITVERBOSE)
+#ifdef DEBUG
+ if (EMITVERBOSE)
{
- GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
- bool isThis = (desc->vpdVarNum & this_OFFSET_FLAG) != 0;
+ GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
+ bool isThis = (desc->vpdVarNum & this_OFFSET_FLAG) != 0;
- printf("[%08X] %s%s var died at [%s",
- dspPtr(desc),
- GCtypeStr(gcType),
- isThis ? "this-ptr" : "",
+ printf("[%08X] %s%s var died at [%s", dspPtr(desc), GCtypeStr(gcType), isThis ? "this-ptr" : "",
emitGetFrameReg());
- if (offs < 0)
+ if (offs < 0)
+ {
printf("-%02XH", -offs);
+ }
else if (offs > 0)
+ {
printf("+%02XH", +offs);
+ }
printf("]\n");
}
@@ -5646,21 +5662,25 @@ void emitter::emitGCvarDeadSet(int offs, BYTE *addr, ssize_t disp
* Record a new set of live GC ref variables.
*/
-void emitter::emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE *addr)
+void emitter::emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE* addr)
{
assert(emitIssuing);
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
+ {
return;
+ }
/* Is the current set accurate and unchanged? */
- if (emitThisGCrefVset && VarSetOps::Equal(emitComp, emitThisGCrefVars, vars))
+ if (emitThisGCrefVset && VarSetOps::Equal(emitComp, emitThisGCrefVars, vars))
+ {
return;
+ }
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
printf("New GC ref live vars=%s ", VarSetOps::ToString(emitComp, vars));
dumpConvertedVarSet(emitComp, vars);
@@ -5672,38 +5692,35 @@ void emitter::emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE *a
/* Are there any GC ref variables on the stack? */
- if (emitGCrFrameOffsCnt)
+ if (emitGCrFrameOffsCnt)
{
- int * tab;
- unsigned cnt = emitTrkVarCnt;
- unsigned num;
+ int* tab;
+ unsigned cnt = emitTrkVarCnt;
+ unsigned num;
/* Test all the tracked variable bits in the mask */
- for (num = 0, tab = emitGCrFrameOffsTab;
- num < cnt;
- num++ , tab++)
+ for (num = 0, tab = emitGCrFrameOffsTab; num < cnt; num++, tab++)
{
- int val = *tab;
+ int val = *tab;
- if (val != -1)
+ if (val != -1)
{
// byref_OFFSET_FLAG and this_OFFSET_FLAG are set
// in the table-offsets for byrefs and this-ptr
- int offs = val & ~OFFSET_MASK;
+ int offs = val & ~OFFSET_MASK;
// printf("var #%2u at %3d is now %s\n", num, offs, (vars & 1) ? "live" : "dead");
- if (VarSetOps::IsMember(emitComp, vars, num))
+ if (VarSetOps::IsMember(emitComp, vars, num))
{
- GCtype gcType = (val & byref_OFFSET_FLAG) ? GCT_BYREF
- : GCT_GCREF;
+ GCtype gcType = (val & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
emitGCvarLiveUpd(offs, INT_MAX, gcType, addr);
}
else
{
- emitGCvarDeadUpd(offs, addr);
+ emitGCvarDeadUpd(offs, addr);
}
}
}
@@ -5718,32 +5735,31 @@ void emitter::emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE *a
* will not be fully interruptible).
*/
-void emitter::emitRecordGCcall(BYTE * codePos,
- unsigned char callInstrSize)
+void emitter::emitRecordGCcall(BYTE* codePos, unsigned char callInstrSize)
{
assert(emitIssuing);
assert(!emitFullGCinfo);
- unsigned offs = emitCurCodeOffs(codePos);
- unsigned regs = (emitThisGCrefRegs|emitThisByrefRegs) & ~RBM_INTRET;
- callDsc * call;
+ unsigned offs = emitCurCodeOffs(codePos);
+ unsigned regs = (emitThisGCrefRegs | emitThisByrefRegs) & ~RBM_INTRET;
+ callDsc* call;
#ifdef JIT32_GCENCODER
- // The JIT32 GCInfo encoder allows us to (as the comment previously here said):
+ // The JIT32 GCInfo encoder allows us to (as the comment previously here said):
// "Bail if this is a totally boring call", but the GCInfoEncoder/Decoder interface
// requires a definition for every call site, so we skip these "early outs" when we're
// using the general encoder.
- if (regs == 0)
+ if (regs == 0)
{
#if EMIT_TRACK_STACK_DEPTH
- if (emitCurStackLvl == 0)
+ if (emitCurStackLvl == 0)
return;
#endif
/* Nope, only interesting calls get recorded */
- if (emitSimpleStkUsed)
+ if (emitSimpleStkUsed)
{
- if (!u1.emitSimpleStkMask)
+ if (!u1.emitSimpleStkMask)
return;
}
else
@@ -5754,22 +5770,22 @@ void emitter::emitRecordGCcall(BYTE * codePos,
}
#endif // JIT32_GCENCODER
-#ifdef DEBUG
+#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+ if (EMIT_GC_VERBOSE)
{
printf("; Call at %04X [stk=%u], GCvars=", offs - callInstrSize, emitCurStackLvl);
emitDispVarSet();
printf(", gcrefRegs=");
printRegMaskInt(emitThisGCrefRegs);
- emitDispRegSet (emitThisGCrefRegs);
- //printRegMaskInt(emitThisGCrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
- //emitDispRegSet (emitThisGCrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
+ emitDispRegSet(emitThisGCrefRegs);
+ // printRegMaskInt(emitThisGCrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
+ // emitDispRegSet (emitThisGCrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
printf(", byrefRegs=");
printRegMaskInt(emitThisByrefRegs);
- emitDispRegSet (emitThisByrefRegs);
- //printRegMaskInt(emitThisByrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
- //emitDispRegSet (emitThisByrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
+ emitDispRegSet(emitThisByrefRegs);
+ // printRegMaskInt(emitThisByrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
+ // emitDispRegSet (emitThisByrefRegs & ~RBM_INTRET & RBM_CALLEE_SAVED); // only display callee-saved
printf("\n");
}
@@ -5779,15 +5795,15 @@ void emitter::emitRecordGCcall(BYTE * codePos,
call = new (emitComp, CMK_GC) callDsc;
- call->cdBlock = NULL;
- call->cdOffs = offs;
+ call->cdBlock = nullptr;
+ call->cdOffs = offs;
#ifndef JIT32_GCENCODER
call->cdCallInstrSize = callInstrSize;
#endif
- call->cdNext = NULL;
+ call->cdNext = nullptr;
- call->cdGCrefRegs = (regMaskSmall)emitThisGCrefRegs;
- call->cdByrefRegs = (regMaskSmall)emitThisByrefRegs;
+ call->cdGCrefRegs = (regMaskSmall)emitThisGCrefRegs;
+ call->cdByrefRegs = (regMaskSmall)emitThisByrefRegs;
#if EMIT_TRACK_STACK_DEPTH
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -5796,21 +5812,21 @@ void emitter::emitRecordGCcall(BYTE * codePos,
#endif
// Append the call descriptor to the list */
- if (codeGen->gcInfo.gcCallDescLast == NULL)
+ if (codeGen->gcInfo.gcCallDescLast == nullptr)
{
- assert(codeGen->gcInfo.gcCallDescList == NULL);
+ assert(codeGen->gcInfo.gcCallDescList == nullptr);
codeGen->gcInfo.gcCallDescList = codeGen->gcInfo.gcCallDescLast = call;
}
else
{
- assert(codeGen->gcInfo.gcCallDescList != NULL);
+ assert(codeGen->gcInfo.gcCallDescList != nullptr);
codeGen->gcInfo.gcCallDescLast->cdNext = call;
codeGen->gcInfo.gcCallDescLast = call;
}
/* Record the current "pending" argument list */
- if (emitSimpleStkUsed)
+ if (emitSimpleStkUsed)
{
/* The biggest call is less than MAX_SIMPLE_STK_DEPTH. So use
small format */
@@ -5824,22 +5840,21 @@ void emitter::emitRecordGCcall(BYTE * codePos,
/* The current call has too many arguments, so we need to report the
offsets of each individual GC arg. */
- call->cdArgCnt = u2.emitGcArgTrackCnt;
+ call->cdArgCnt = u2.emitGcArgTrackCnt;
if (call->cdArgCnt == 0)
{
- call->u1.cdArgMask =
- call->u1.cdByrefArgMask = 0;
+ call->u1.cdArgMask = call->u1.cdByrefArgMask = 0;
return;
}
- call->cdArgTable = new (emitComp, CMK_GC) unsigned[u2.emitGcArgTrackCnt];
+ call->cdArgTable = new (emitComp, CMK_GC) unsigned[u2.emitGcArgTrackCnt];
unsigned gcArgs = 0;
- unsigned stkLvl = emitCurStackLvl/sizeof(int);
+ unsigned stkLvl = emitCurStackLvl / sizeof(int);
for (unsigned i = 0; i < stkLvl; i++)
{
- GCtype gcType = (GCtype)u2.emitArgTrackTab[stkLvl-i-1];
+ GCtype gcType = (GCtype)u2.emitArgTrackTab[stkLvl - i - 1];
if (needsGC(gcType))
{
@@ -5863,44 +5878,42 @@ void emitter::emitRecordGCcall(BYTE * codePos,
* Record a new set of live GC ref registers.
*/
-void emitter::emitUpdateLiveGCregs(GCtype gcType,
- regMaskTP regs,
- BYTE * addr)
+void emitter::emitUpdateLiveGCregs(GCtype gcType, regMaskTP regs, BYTE* addr)
{
assert(emitIssuing);
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
+ {
return;
+ }
- regMaskTP life;
- regMaskTP dead;
- regMaskTP chg;
+ regMaskTP life;
+ regMaskTP dead;
+ regMaskTP chg;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
printf("New %sReg live regs=", GCtypeStr(gcType));
printRegMaskInt(regs);
- emitDispRegSet (regs);
+ emitDispRegSet(regs);
printf("\n");
}
#endif
assert(needsGC(gcType));
- regMaskTP & emitThisXXrefRegs = (gcType == GCT_GCREF) ? emitThisGCrefRegs
- : emitThisByrefRegs;
- regMaskTP & emitThisYYrefRegs = (gcType == GCT_GCREF) ? emitThisByrefRegs
- : emitThisGCrefRegs;
+ regMaskTP& emitThisXXrefRegs = (gcType == GCT_GCREF) ? emitThisGCrefRegs : emitThisByrefRegs;
+ regMaskTP& emitThisYYrefRegs = (gcType == GCT_GCREF) ? emitThisByrefRegs : emitThisGCrefRegs;
assert(emitThisXXrefRegs != regs);
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
{
/* Figure out which GC registers are becoming live/dead at this point */
- dead = ( emitThisXXrefRegs & ~regs);
- life = (~emitThisXXrefRegs & regs);
+ dead = (emitThisXXrefRegs & ~regs);
+ life = (~emitThisXXrefRegs & regs);
/* Can't simultaneously become live and dead at the same time */
@@ -5913,24 +5926,27 @@ void emitter::emitUpdateLiveGCregs(GCtype gcType,
do
{
- regMaskTP bit = genFindLowestBit(chg);
- regNumber reg = genRegNumFromMask(bit);
+ regMaskTP bit = genFindLowestBit(chg);
+ regNumber reg = genRegNumFromMask(bit);
- if (life & bit)
+ if (life & bit)
+ {
emitGCregLiveUpd(gcType, reg, addr);
+ }
else
+ {
emitGCregDeadUpd(reg, addr);
+ }
chg -= bit;
- }
- while (chg);
+ } while (chg);
assert(emitThisXXrefRegs == regs);
}
else
{
emitThisYYrefRegs &= ~regs; // Kill the regs from the other GC type (if live)
- emitThisXXrefRegs = regs; // Mark them as live in the requested GC type
+ emitThisXXrefRegs = regs; // Mark them as live in the requested GC type
}
// The 2 GC reg masks can't be overlapping
@@ -5943,26 +5959,23 @@ void emitter::emitUpdateLiveGCregs(GCtype gcType,
* Record the fact that the given register now contains a live GC ref.
*/
-void emitter::emitGCregLiveSet(GCtype gcType,
- regMaskTP regMask,
- BYTE * addr,
- bool isThis)
+void emitter::emitGCregLiveSet(GCtype gcType, regMaskTP regMask, BYTE* addr, bool isThis)
{
assert(emitIssuing);
assert(needsGC(gcType));
- regPtrDsc * regPtrNext;
+ regPtrDsc* regPtrNext;
assert(!isThis || emitComp->lvaKeepAliveAndReportThis());
// assert(emitFullyInt || isThis);
assert(emitFullGCinfo);
- assert(((emitThisGCrefRegs|emitThisByrefRegs) & regMask) == 0);
+ assert(((emitThisGCrefRegs | emitThisByrefRegs) & regMask) == 0);
/* Allocate a new regptr entry and fill it in */
- regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = gcType;
+ regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = gcType;
regPtrNext->rpdOffs = emitCurCodeOffs(addr);
regPtrNext->rpdArg = FALSE;
@@ -5977,24 +5990,22 @@ void emitter::emitGCregLiveSet(GCtype gcType,
* Record the fact that the given register no longer contains a live GC ref.
*/
-void emitter::emitGCregDeadSet(GCtype gcType,
- regMaskTP regMask,
- BYTE * addr)
+void emitter::emitGCregDeadSet(GCtype gcType, regMaskTP regMask, BYTE* addr)
{
assert(emitIssuing);
assert(needsGC(gcType));
- regPtrDsc * regPtrNext;
+ regPtrDsc* regPtrNext;
// assert(emitFullyInt);
assert(emitFullGCinfo);
- assert(((emitThisGCrefRegs|emitThisByrefRegs) & regMask) != 0);
+ assert(((emitThisGCrefRegs | emitThisByrefRegs) & regMask) != 0);
/* Allocate a new regptr entry and fill it in */
- regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = gcType;
+ regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = gcType;
regPtrNext->rpdOffs = emitCurCodeOffs(addr);
regPtrNext->rpdCall = FALSE;
@@ -6009,19 +6020,22 @@ void emitter::emitGCregDeadSet(GCtype gcType,
* Emit an 8-bit integer as code.
*/
-unsigned char emitter::emitOutputByte(BYTE *dst, ssize_t val)
+unsigned char emitter::emitOutputByte(BYTE* dst, ssize_t val)
{
- *castto(dst, unsigned char *) = (unsigned char)val;
+ *castto(dst, unsigned char*) = (unsigned char)val;
-#ifdef DEBUG
- if (emitComp->opts.dspEmit) printf("; emit_byte 0%02XH\n", val & 0xFF);
+#ifdef DEBUG
+ if (emitComp->opts.dspEmit)
+ {
+ printf("; emit_byte 0%02XH\n", val & 0xFF);
+ }
#ifdef _TARGET_AMD64_
// if we're emitting code bytes, ensure that we've already emitted the rex prefix!
assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL));
#endif // _TARGET_AMD64_
#endif
- return sizeof(unsigned char);
+ return sizeof(unsigned char);
}
/*****************************************************************************
@@ -6029,19 +6043,22 @@ unsigned char emitter::emitOutputByte(BYTE *dst, ssize_t val)
* Emit a 16-bit integer as code.
*/
-unsigned char emitter::emitOutputWord(BYTE *dst, ssize_t val)
+unsigned char emitter::emitOutputWord(BYTE* dst, ssize_t val)
{
MISALIGNED_WR_I2(dst, (short)val);
-#ifdef DEBUG
- if (emitComp->opts.dspEmit) printf("; emit_word 0%02XH,0%02XH\n", (val & 0xFF), (val >> 8) & 0xFF);
+#ifdef DEBUG
+ if (emitComp->opts.dspEmit)
+ {
+ printf("; emit_word 0%02XH,0%02XH\n", (val & 0xFF), (val >> 8) & 0xFF);
+ }
#ifdef _TARGET_AMD64_
// if we're emitting code bytes, ensure that we've already emitted the rex prefix!
assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL));
#endif // _TARGET_AMD64_
#endif
- return sizeof(short);
+ return sizeof(short);
}
/*****************************************************************************
@@ -6049,19 +6066,22 @@ unsigned char emitter::emitOutputWord(BYTE *dst, ssize_t val)
* Emit a 32-bit integer as code.
*/
-unsigned char emitter::emitOutputLong(BYTE *dst, ssize_t val)
+unsigned char emitter::emitOutputLong(BYTE* dst, ssize_t val)
{
MISALIGNED_WR_I4(dst, (int)val);
-#ifdef DEBUG
- if (emitComp->opts.dspEmit) printf("; emit_long 0%08XH\n", val);
+#ifdef DEBUG
+ if (emitComp->opts.dspEmit)
+ {
+ printf("; emit_long 0%08XH\n", val);
+ }
#ifdef _TARGET_AMD64_
// if we're emitting code bytes, ensure that we've already emitted the rex prefix!
assert(((val & 0xFF00000000LL) == 0) || ((val & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL));
#endif // _TARGET_AMD64_
#endif
- return sizeof(int );
+ return sizeof(int);
}
/*****************************************************************************
@@ -6069,22 +6089,22 @@ unsigned char emitter::emitOutputLong(BYTE *dst, ssize_t val)
* Emit a pointer-sized integer as code.
*/
-unsigned char emitter::emitOutputSizeT(BYTE *dst, ssize_t val)
+unsigned char emitter::emitOutputSizeT(BYTE* dst, ssize_t val)
{
MISALIGNED_WR_ST(dst, val);
-#ifdef DEBUG
+#ifdef DEBUG
if (emitComp->opts.dspEmit)
{
#ifdef _TARGET_AMD64_
printf("; emit_size_t 0%016llXH\n", (size_t)val);
-#else // _TARGET_AMD64_
+#else // _TARGET_AMD64_
printf("; emit_size_t 0%08XH\n", (size_t)val);
#endif // _TARGET_AMD64_
}
#endif // DEBUG
- return sizeof(size_t );
+ return sizeof(size_t);
}
/*****************************************************************************
@@ -6093,12 +6113,12 @@ unsigned char emitter::emitOutputSizeT(BYTE *dst, ssize_t val)
* this can only be called at the end of code generation.
*/
-UNATIVE_OFFSET emitter::emitCodeOffset(void *blockPtr, unsigned codePos)
+UNATIVE_OFFSET emitter::emitCodeOffset(void* blockPtr, unsigned codePos)
{
- insGroup * ig;
+ insGroup* ig;
- UNATIVE_OFFSET of;
- unsigned no = emitGetInsNumFromCodePos(codePos);
+ UNATIVE_OFFSET of;
+ unsigned no = emitGetInsNumFromCodePos(codePos);
/* Make sure we weren't passed some kind of a garbage thing */
@@ -6109,7 +6129,7 @@ UNATIVE_OFFSET emitter::emitCodeOffset(void *blockPtr, unsigned codePos)
/* The first and last offsets are always easy */
- if (no == 0)
+ if (no == 0)
{
of = 0;
}
@@ -6139,7 +6159,7 @@ UNATIVE_OFFSET emitter::emitCodeOffset(void *blockPtr, unsigned codePos)
assert(of == emitFindOffset(ig, emitGetInsNumFromCodePos(codePos)));
}
- return ig->igOffs + of;
+ return ig->igOffs + of;
}
/*****************************************************************************
@@ -6147,46 +6167,52 @@ UNATIVE_OFFSET emitter::emitCodeOffset(void *blockPtr, unsigned codePos)
* Record the fact that the given register now contains a live GC ref.
*/
-void emitter::emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE *addr)
+void emitter::emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr)
{
assert(emitIssuing);
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
+ {
return;
+ }
assert(needsGC(gcType));
regMaskTP regMask = genRegMask(reg);
- regMaskTP & emitThisXXrefRegs = (gcType == GCT_GCREF) ? emitThisGCrefRegs
- : emitThisByrefRegs;
- regMaskTP & emitThisYYrefRegs = (gcType == GCT_GCREF) ? emitThisByrefRegs
- : emitThisGCrefRegs;
+ regMaskTP& emitThisXXrefRegs = (gcType == GCT_GCREF) ? emitThisGCrefRegs : emitThisByrefRegs;
+ regMaskTP& emitThisYYrefRegs = (gcType == GCT_GCREF) ? emitThisByrefRegs : emitThisGCrefRegs;
- if ((emitThisXXrefRegs & regMask) == 0)
+ if ((emitThisXXrefRegs & regMask) == 0)
{
// If the register was holding the other GC type, that type should
// go dead now
if (emitThisYYrefRegs & regMask)
+ {
emitGCregDeadUpd(reg, addr);
+ }
// For synchronized methods, "this" is always alive and in the same register.
// However, if we generate any code after the epilog block (where "this"
// goes dead), "this" will come alive again. We need to notice that.
// Note that we only expect isThis to be true at an insGroup boundary.
-
+
bool isThis = (reg == emitSyncThisObjReg) ? true : false;
-
- if (emitFullGCinfo)
+
+ if (emitFullGCinfo)
+ {
emitGCregLiveSet(gcType, regMask, addr, isThis);
+ }
- emitThisXXrefRegs |= regMask;
+ emitThisXXrefRegs |= regMask;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
printf("%sReg +[%s]\n", GCtypeStr(gcType), emitRegName(reg));
+ }
#endif
}
@@ -6200,57 +6226,70 @@ void emitter::emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE
* Record the fact that the given set of registers no longer contain live GC refs.
*/
-void emitter::emitGCregDeadUpdMask(regMaskTP regs, BYTE *addr)
+void emitter::emitGCregDeadUpdMask(regMaskTP regs, BYTE* addr)
{
assert(emitIssuing);
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
+ {
return;
+ }
// First, handle the gcref regs going dead
- regMaskTP gcrefRegs = emitThisGCrefRegs & regs;
+ regMaskTP gcrefRegs = emitThisGCrefRegs & regs;
// "this" can never go dead in synchronized methods, except in the epilog
// after the call to CORINFO_HELP_MON_EXIT.
- assert(emitSyncThisObjReg == REG_NA ||
- (genRegMask(emitSyncThisObjReg) & regs) == 0);
+ assert(emitSyncThisObjReg == REG_NA || (genRegMask(emitSyncThisObjReg) & regs) == 0);
- if (gcrefRegs)
+ if (gcrefRegs)
{
assert((emitThisByrefRegs & gcrefRegs) == 0);
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
+ {
emitGCregDeadSet(GCT_GCREF, gcrefRegs, addr);
+ }
emitThisGCrefRegs &= ~gcrefRegs;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
- printf("gcrReg "); printRegMaskInt(gcrefRegs); printf(" -"); emitDispRegSet(gcrefRegs); printf("\n");
+ printf("gcrReg ");
+ printRegMaskInt(gcrefRegs);
+ printf(" -");
+ emitDispRegSet(gcrefRegs);
+ printf("\n");
}
#endif
}
// Second, handle the byref regs going dead
- regMaskTP byrefRegs = emitThisByrefRegs & regs;
+ regMaskTP byrefRegs = emitThisByrefRegs & regs;
if (byrefRegs)
{
assert((emitThisGCrefRegs & byrefRegs) == 0);
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
+ {
emitGCregDeadSet(GCT_BYREF, byrefRegs, addr);
+ }
emitThisByrefRegs &= ~byrefRegs;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
- printf("byrReg "); printRegMaskInt(byrefRegs); printf(" -"); emitDispRegSet(byrefRegs); printf("\n");
+ printf("byrReg ");
+ printRegMaskInt(byrefRegs);
+ printf(" -");
+ emitDispRegSet(byrefRegs);
+ printf("\n");
}
#endif
}
@@ -6261,40 +6300,50 @@ void emitter::emitGCregDeadUpdMask(regMaskTP regs, BYTE *addr)
* Record the fact that the given register no longer contains a live GC ref.
*/
-void emitter::emitGCregDeadUpd(regNumber reg, BYTE *addr)
+void emitter::emitGCregDeadUpd(regNumber reg, BYTE* addr)
{
assert(emitIssuing);
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
+ {
return;
+ }
- regMaskTP regMask = genRegMask(reg);
+ regMaskTP regMask = genRegMask(reg);
- if ((emitThisGCrefRegs & regMask) != 0)
+ if ((emitThisGCrefRegs & regMask) != 0)
{
assert((emitThisByrefRegs & regMask) == 0);
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
+ {
emitGCregDeadSet(GCT_GCREF, regMask, addr);
+ }
emitThisGCrefRegs &= ~regMask;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
printf("%s -[%s]\n", "gcrReg", emitRegName(reg));
+ }
#endif
}
else if ((emitThisByrefRegs & regMask) != 0)
{
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
+ {
emitGCregDeadSet(GCT_BYREF, regMask, addr);
+ }
emitThisByrefRegs &= ~regMask;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
printf("%s -[%s]\n", "byrReg", emitRegName(reg));
+ }
#endif
}
}
@@ -6307,10 +6356,7 @@ void emitter::emitGCregDeadUpd(regNumber reg, BYTE *addr)
* need a valid value to check if the variable is tracked or not.
*/
-void emitter::emitGCvarLiveUpd(int offs,
- int varNum,
- GCtype gcType,
- BYTE* addr)
+void emitter::emitGCvarLiveUpd(int offs, int varNum, GCtype gcType, BYTE* addr)
{
assert(abs(offs) % sizeof(int) == 0);
assert(needsGC(gcType));
@@ -6324,19 +6370,21 @@ void emitter::emitGCvarLiveUpd(int offs,
outgoing argument space.
Allocate a new ptr arg entry and fill it in */
- regPtrDsc * regPtrNext = gcInfo->gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = gcType;
- regPtrNext->rpdOffs = emitCurCodeOffs(addr);
- regPtrNext->rpdArg = TRUE;
- regPtrNext->rpdCall = FALSE;
+ regPtrDsc* regPtrNext = gcInfo->gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = gcType;
+ regPtrNext->rpdOffs = emitCurCodeOffs(addr);
+ regPtrNext->rpdArg = TRUE;
+ regPtrNext->rpdCall = FALSE;
noway_assert(FitsIn<unsigned short>(offs));
- regPtrNext->rpdPtrArg = (unsigned short)offs;
- regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_PUSH;
- regPtrNext->rpdIsThis = FALSE;
+ regPtrNext->rpdPtrArg = (unsigned short)offs;
+ regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_PUSH;
+ regPtrNext->rpdIsThis = FALSE;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
printf("[%04X] %s arg write\n", offs, GCtypeStr(gcType));
+ }
#endif
}
}
@@ -6345,8 +6393,7 @@ void emitter::emitGCvarLiveUpd(int offs,
{
/* Is the frame offset within the "interesting" range? */
- if (offs >= emitGCrFrameOffsMin &&
- offs < emitGCrFrameOffsMax)
+ if (offs >= emitGCrFrameOffsMin && offs < emitGCrFrameOffsMax)
{
/* Normally all variables in this range must be tracked stack
pointers. However, for EnC, we relax this condition. So we
@@ -6361,7 +6408,7 @@ void emitter::emitGCvarLiveUpd(int offs,
{
// This is NOT a spill temp
LclVarDsc* varDsc = &emitComp->lvaTable[varNum];
- isTracked = emitComp->lvaIsGCTracked(varDsc);
+ isTracked = emitComp->lvaIsGCTracked(varDsc);
}
else
{
@@ -6374,7 +6421,8 @@ void emitter::emitGCvarLiveUpd(int offs,
assert(!emitContTrkPtrLcls ||
// EBP based variables in the double-aligned frames are indeed input arguments.
// and we don't require them to fall into the "interesting" range.
- ((emitComp->rpFrameType == FT_DOUBLE_ALIGN_FRAME) && (varNum >= 0) && (emitComp->lvaTable[varNum].lvFramePointerBased == 1)));
+ ((emitComp->rpFrameType == FT_DOUBLE_ALIGN_FRAME) && (varNum >= 0) &&
+ (emitComp->lvaTable[varNum].lvFramePointerBased == 1)));
#else
assert(!emitContTrkPtrLcls);
#endif
@@ -6382,17 +6430,19 @@ void emitter::emitGCvarLiveUpd(int offs,
}
}
- size_t disp;
+ size_t disp;
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void *);
+ disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently dead, mark it as live */
- if (emitGCrFrameLiveTab[disp] == NULL)
+ if (emitGCrFrameLiveTab[disp] == nullptr)
+ {
emitGCvarLiveSet(offs, gcType, addr, disp);
+ }
}
}
}
@@ -6402,27 +6452,28 @@ void emitter::emitGCvarLiveUpd(int offs,
* Record the fact that the given variable no longer contains a live GC ref.
*/
-void emitter::emitGCvarDeadUpd(int offs, BYTE *addr)
+void emitter::emitGCvarDeadUpd(int offs, BYTE* addr)
{
assert(emitIssuing);
assert(abs(offs) % sizeof(int) == 0);
/* Is the frame offset within the "interesting" range? */
- if (offs >= emitGCrFrameOffsMin &&
- offs < emitGCrFrameOffsMax)
+ if (offs >= emitGCrFrameOffsMin && offs < emitGCrFrameOffsMax)
{
- size_t disp;
+ size_t disp;
/* Compute the index into the GC frame table */
- disp = (offs - emitGCrFrameOffsMin) / sizeof(void *);
+ disp = (offs - emitGCrFrameOffsMin) / sizeof(void*);
assert(disp < emitGCrFrameOffsCnt);
/* If the variable is currently live, mark it as dead */
- if (emitGCrFrameLiveTab[disp] != NULL)
+ if (emitGCrFrameLiveTab[disp] != nullptr)
+ {
emitGCvarDeadSet(offs, addr, disp);
+ }
}
}
@@ -6433,7 +6484,7 @@ void emitter::emitGCvarDeadUpd(int offs, BYTE *addr)
insGroup* emitter::emitAllocAndLinkIG()
{
- insGroup* ig = emitAllocIG();
+ insGroup* ig = emitAllocIG();
assert(emitCurIG);
@@ -6445,7 +6496,7 @@ insGroup* emitter::emitAllocAndLinkIG()
/* Set the new IG as the current IG */
- emitCurIG = ig;
+ emitCurIG = ig;
return ig;
}
@@ -6457,21 +6508,21 @@ insGroup* emitter::emitAllocAndLinkIG()
insGroup* emitter::emitAllocIG()
{
- insGroup * ig;
+ insGroup* ig;
/* Allocate a group descriptor */
size_t sz = sizeof(insGroup);
- ig = (insGroup*)emitGetMem(sz);
+ ig = (insGroup*)emitGetMem(sz);
-#ifdef DEBUG
- ig->igSelf = ig;
+#ifdef DEBUG
+ ig->igSelf = ig;
#endif
#if EMITTER_STATS
emitTotalIGcnt += 1;
emitTotalIGsize += sz;
- emitSizeMethod += sz;
+ emitSizeMethod += sz;
#endif
/* Do basic initialization */
@@ -6486,31 +6537,31 @@ insGroup* emitter::emitAllocIG()
* Initialize an instruction group
*/
-void emitter::emitInitIG(insGroup* ig)
+void emitter::emitInitIG(insGroup* ig)
{
/* Assign the next available index to the instruction group */
- ig->igNum = emitNxtIGnum;
+ ig->igNum = emitNxtIGnum;
emitNxtIGnum++;
/* Record the (estimated) code offset of the group */
- ig->igOffs = emitCurCodeOffset;
+ ig->igOffs = emitCurCodeOffset;
assert(IsCodeAligned(ig->igOffs));
/* Set the current function index */
- ig->igFuncIdx = emitComp->compCurrFuncIdx;
+ ig->igFuncIdx = emitComp->compCurrFuncIdx;
- ig->igFlags = 0;
+ ig->igFlags = 0;
/* Zero out some fields to avoid printing garbage in JitDumps. These
really only need to be set in DEBUG, but do it in all cases to make
sure we act the same in non-DEBUG builds.
*/
- ig->igSize = 0;
+ ig->igSize = 0;
ig->igGCregs = RBM_NONE;
ig->igInsCnt = 0;
}
@@ -6520,13 +6571,13 @@ void emitter::emitInitIG(insGroup* ig)
* Insert instruction group 'ig' after 'igInsertAfterIG'
*/
-void emitter::emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig)
+void emitter::emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig)
{
assert(emitIGlist);
assert(emitIGlast);
- ig->igNext = insertAfterIG->igNext;
- insertAfterIG->igNext = ig;
+ ig->igNext = insertAfterIG->igNext;
+ insertAfterIG->igNext = ig;
if (emitIGlast == insertAfterIG)
{
@@ -6540,7 +6591,7 @@ void emitter::emitInsertIGAfter(insGroup* insertAfterIG, insGroup
* Save the current IG and start a new one.
*/
-void emitter::emitNxtIG(bool emitAdd)
+void emitter::emitNxtIG(bool emitAdd)
{
/* Right now we don't allow multi-IG prologs */
@@ -6553,7 +6604,7 @@ void emitter::emitNxtIG(bool emitAdd)
/* Update the GC live sets for the group's start
* Do it only if not an emitter added block */
- if (!emitAdd)
+ if (!emitAdd)
{
VarSetOps::Assign(emitComp, emitInitGCrefVars, emitThisGCrefVars);
emitInitGCrefRegs = emitThisGCrefRegs;
@@ -6567,27 +6618,29 @@ void emitter::emitNxtIG(bool emitAdd)
/* If this is an emitter added block, flag it */
if (emitAdd)
+ {
emitCurIG->igFlags |= IGF_EMIT_ADD;
+ }
// We've created a new IG; no need to force another one.
emitForceNewIG = false;
-}
-
+}
/*****************************************************************************
*
* emitGetInsSC: Get the instruction's constant value.
*/
-ssize_t emitter::emitGetInsSC(instrDesc *id)
+ssize_t emitter::emitGetInsSC(instrDesc* id)
{
-#ifdef _TARGET_ARM_ // should it be _TARGET_ARMARCH_? Why do we need this? Note that on ARM64 we store scaled immediates for some formats
+#ifdef _TARGET_ARM_ // should it be _TARGET_ARMARCH_? Why do we need this? Note that on ARM64 we store scaled immediates
+ // for some formats
if (id->idIsLclVar())
{
int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
regNumber baseReg;
- int offs = id->idAddr()->iiaLclVar.lvaOffset();
+ int offs = id->idAddr()->iiaLclVar.lvaOffset();
#if defined(_TARGET_ARM_)
int adr = emitComp->lvaFrameAddress(varNum, id->idIsLclFPBase(), &baseReg, offs);
int dsp = adr + offs;
@@ -6596,8 +6649,8 @@ ssize_t emitter::emitGetInsSC(instrDesc *id)
#elif defined(_TARGET_ARM64_)
// TODO-ARM64-Cleanup: this is currently unreachable. Do we need it?
bool FPbased;
- int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
- int dsp = adr + offs;
+ int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
+ int dsp = adr + offs;
if (id->idIns() == INS_sub)
dsp = -dsp;
#endif
@@ -6605,10 +6658,14 @@ ssize_t emitter::emitGetInsSC(instrDesc *id)
}
else
#endif // _TARGET_ARM_
- if (id->idIsLargeCns())
- return ((instrDescCns*)id)->idcCnsVal;
+ if (id->idIsLargeCns())
+ {
+ return ((instrDescCns*)id)->idcCnsVal;
+ }
else
- return id->idSmallCns();
+ {
+ return id->idSmallCns();
+ }
}
/*****************************************************************************/
@@ -6618,19 +6675,19 @@ ssize_t emitter::emitGetInsSC(instrDesc *id)
* Record a push of a single dword on the stack.
*/
-void emitter::emitStackPush(BYTE *addr, GCtype gcType)
+void emitter::emitStackPush(BYTE* addr, GCtype gcType)
{
#ifdef DEBUG
assert(IsValidGCtype(gcType));
#endif
- if (emitSimpleStkUsed)
+ if (emitSimpleStkUsed)
{
assert(!emitFullGCinfo); // Simple stk not used for emitFullGCinfo
- assert(emitCurStackLvl/sizeof(int) < MAX_SIMPLE_STK_DEPTH);
+ assert(emitCurStackLvl / sizeof(int) < MAX_SIMPLE_STK_DEPTH);
- u1.emitSimpleStkMask <<= 1;
- u1.emitSimpleStkMask |= (unsigned)needsGC(gcType);
+ u1.emitSimpleStkMask <<= 1;
+ u1.emitSimpleStkMask |= (unsigned)needsGC(gcType);
u1.emitSimpleByrefStkMask <<= 1;
u1.emitSimpleByrefStkMask |= (gcType == GCT_BYREF);
@@ -6650,16 +6707,16 @@ void emitter::emitStackPush(BYTE *addr, GCtype gcType)
* Record a push of a bunch of non-GC dwords on the stack.
*/
-void emitter::emitStackPushN(BYTE *addr, unsigned count)
+void emitter::emitStackPushN(BYTE* addr, unsigned count)
{
assert(count);
- if (emitSimpleStkUsed)
+ if (emitSimpleStkUsed)
{
assert(!emitFullGCinfo); // Simple stk not used for emitFullGCinfo
- u1.emitSimpleStkMask <<= count;
- u1.emitSimpleByrefStkMask <<= count;
+ u1.emitSimpleStkMask <<= count;
+ u1.emitSimpleByrefStkMask <<= count;
}
else
{
@@ -6674,25 +6731,24 @@ void emitter::emitStackPushN(BYTE *addr, unsigned count)
* Record a pop of the given number of dwords from the stack.
*/
-void emitter::emitStackPop(BYTE *addr, bool isCall, unsigned char callInstrSize, unsigned count)
+void emitter::emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize, unsigned count)
{
- assert(emitCurStackLvl/sizeof(int) >= count);
+ assert(emitCurStackLvl / sizeof(int) >= count);
assert(!isCall || callInstrSize > 0);
- if (count)
+ if (count)
{
- if (emitSimpleStkUsed)
+ if (emitSimpleStkUsed)
{
assert(!emitFullGCinfo); // Simple stk not used for emitFullGCinfo
- unsigned cnt = count;
+ unsigned cnt = count;
do
{
- u1.emitSimpleStkMask >>= 1;
+ u1.emitSimpleStkMask >>= 1;
u1.emitSimpleByrefStkMask >>= 1;
- }
- while (--cnt);
+ } while (--cnt);
}
else
{
@@ -6707,11 +6763,11 @@ void emitter::emitStackPop(BYTE *addr, bool isCall, unsigned char
// For the general encoder we do the call below always when it's a call, to ensure that the call is
// recorded (when we're doing the ptr reg map for a non-fully-interruptible method).
- if (emitFullGCinfo
+ if (emitFullGCinfo
#ifndef JIT32_GCENCODER
- || (emitComp->genFullPtrRegMap && (!emitComp->genInterruptible) && isCall)
+ || (emitComp->genFullPtrRegMap && (!emitComp->genInterruptible) && isCall)
#endif // JIT32_GCENCODER
- )
+ )
{
emitStackPopLargeStk(addr, isCall, callInstrSize, 0);
}
@@ -6723,11 +6779,9 @@ void emitter::emitStackPop(BYTE *addr, bool isCall, unsigned char
* Record a push of a single word on the stack for a full pointer map.
*/
-void emitter::emitStackPushLargeStk (BYTE * addr,
- GCtype gcType,
- unsigned count)
+void emitter::emitStackPushLargeStk(BYTE* addr, GCtype gcType, unsigned count)
{
- S_UINT32 level(emitCurStackLvl / sizeof(int));
+ S_UINT32 level(emitCurStackLvl / sizeof(int));
assert(IsValidGCtype(gcType));
assert(count);
@@ -6740,31 +6794,35 @@ void emitter::emitStackPushLargeStk (BYTE * addr,
// printf("Pushed [%d] at lvl %2u [max=%u]\n", isGCref, emitArgTrackTop - emitArgTrackTab, emitMaxStackDepth);
assert(level.IsOverflow() || u2.emitArgTrackTop == u2.emitArgTrackTab + level.Value());
- *u2.emitArgTrackTop++ = (BYTE)gcType;
+ *u2.emitArgTrackTop++ = (BYTE)gcType;
assert(u2.emitArgTrackTop <= u2.emitArgTrackTab + emitMaxStackDepth);
if (!emitHasFramePtr || needsGC(gcType))
{
- if (emitFullGCinfo)
+ if (emitFullGCinfo)
{
/* Append an "arg push" entry if this is a GC ref or
FPO method. Allocate a new ptr arg entry and fill it in */
- regPtrDsc * regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = gcType;
+ regPtrDsc* regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = gcType;
- regPtrNext->rpdOffs = emitCurCodeOffs(addr);
- regPtrNext->rpdArg = TRUE;
- regPtrNext->rpdCall = FALSE;
+ regPtrNext->rpdOffs = emitCurCodeOffs(addr);
+ regPtrNext->rpdArg = TRUE;
+ regPtrNext->rpdCall = FALSE;
if (level.IsOverflow() || !FitsIn<unsigned short>(level.Value()))
+ {
IMPL_LIMITATION("Too many/too big arguments to encode GC information");
- regPtrNext->rpdPtrArg = (unsigned short)level.Value();
- regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_PUSH;
- regPtrNext->rpdIsThis = FALSE;
+ }
+ regPtrNext->rpdPtrArg = (unsigned short)level.Value();
+ regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_PUSH;
+ regPtrNext->rpdIsThis = FALSE;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
printf("[%08X] %s arg push %u\n", dspPtr(regPtrNext), GCtypeStr(gcType), level.Value());
+ }
#endif
}
@@ -6774,27 +6832,22 @@ void emitter::emitStackPushLargeStk (BYTE * addr,
}
level += 1;
assert(!level.IsOverflow());
- }
- while (--count);
+ } while (--count);
}
-
/*****************************************************************************
*
* Record a pop of the given number of words from the stack for a full ptr
* map.
*/
-void emitter::emitStackPopLargeStk(BYTE * addr,
- bool isCall,
- unsigned char callInstrSize,
- unsigned count)
+void emitter::emitStackPopLargeStk(BYTE* addr, bool isCall, unsigned char callInstrSize, unsigned count)
{
assert(emitIssuing);
- unsigned argStkCnt;
- S_UINT16 argRecCnt(0); // arg count for ESP, ptr-arg count for EBP
- unsigned gcrefRegs, byrefRegs;
+ unsigned argStkCnt;
+ S_UINT16 argRecCnt(0); // arg count for ESP, ptr-arg count for EBP
+ unsigned gcrefRegs, byrefRegs;
#ifdef JIT32_GCENCODER
// For the general encoder, we always need to record calls, so we make this call
@@ -6804,13 +6857,11 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
/* Count how many pointer records correspond to this "pop" */
- for (argStkCnt = count;
- argStkCnt;
- argStkCnt--)
+ for (argStkCnt = count; argStkCnt; argStkCnt--)
{
assert(u2.emitArgTrackTop > u2.emitArgTrackTab);
- GCtype gcType = (GCtype)(*--u2.emitArgTrackTop);
+ GCtype gcType = (GCtype)(*--u2.emitArgTrackTop);
assert(IsValidGCtype(gcType));
@@ -6818,8 +6869,10 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
// This is an "interesting" argument
- if (!emitHasFramePtr || needsGC(gcType))
+ if (!emitHasFramePtr || needsGC(gcType))
+ {
argRecCnt += 1;
+ }
}
assert(u2.emitArgTrackTop >= u2.emitArgTrackTab);
@@ -6838,22 +6891,28 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
// Do we have any interesting (i.e., callee-saved) registers live here?
- gcrefRegs =
- byrefRegs = 0;
+ gcrefRegs = byrefRegs = 0;
// We make a bitmask whose bits correspond to callee-saved register indices (in the sequence
// of callee-saved registers only).
for (unsigned calleeSavedRegIdx = 0; calleeSavedRegIdx < CNT_CALLEE_SAVED; calleeSavedRegIdx++)
{
regMaskTP calleeSavedRbm = raRbmCalleeSaveOrder[calleeSavedRegIdx];
- if (emitThisGCrefRegs & calleeSavedRbm) gcrefRegs |= (1 << calleeSavedRegIdx);
- if (emitThisByrefRegs & calleeSavedRbm) byrefRegs |= (1 << calleeSavedRegIdx);
+ if (emitThisGCrefRegs & calleeSavedRbm)
+ {
+ gcrefRegs |= (1 << calleeSavedRegIdx);
+ }
+ if (emitThisByrefRegs & calleeSavedRbm)
+ {
+ byrefRegs |= (1 << calleeSavedRegIdx);
+ }
}
#ifdef JIT32_GCENCODER
- // For the general encoder, we always have to record calls, so we don't take this early return. /* Are there any args to pop at this call site?
+ // For the general encoder, we always have to record calls, so we don't take this early return. /* Are there any
+ // args to pop at this call site?
- if (argRecCnt.Value() == 0)
+ if (argRecCnt.Value() == 0)
{
/*
Or do we have a partially interruptible EBP-less frame, and any
@@ -6862,8 +6921,7 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
CLANG_FORMAT_COMMENT_ANCHOR;
#if !FPO_INTERRUPTIBLE
- if (emitFullyInt ||
- (gcrefRegs == 0 && byrefRegs == 0 && u2.emitGcArgTrackCnt == 0))
+ if (emitFullyInt || (gcrefRegs == 0 && byrefRegs == 0 && u2.emitGcArgTrackCnt == 0))
#endif
return;
}
@@ -6881,11 +6939,11 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
/* Allocate a new ptr arg entry and fill it in */
- regPtrDsc * regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = GCT_GCREF; // Pops need a non-0 value (??)
+ regPtrDsc* regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = GCT_GCREF; // Pops need a non-0 value (??)
- regPtrNext->rpdOffs = emitCurCodeOffs(addr);
- regPtrNext->rpdCall = (isCall || isCallRelatedPop);
+ regPtrNext->rpdOffs = emitCurCodeOffs(addr);
+ regPtrNext->rpdCall = (isCall || isCallRelatedPop);
#ifndef JIT32_GCENCODER
if (regPtrNext->rpdCall)
{
@@ -6893,25 +6951,27 @@ void emitter::emitStackPopLargeStk(BYTE * addr,
regPtrNext->rpdCallInstrSize = callInstrSize;
}
#endif
- regPtrNext->rpdCallGCrefRegs= gcrefRegs;
- regPtrNext->rpdCallByrefRegs= byrefRegs;
- regPtrNext->rpdArg = TRUE;
- regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_POP;
- regPtrNext->rpdPtrArg = argRecCnt.Value();
+ regPtrNext->rpdCallGCrefRegs = gcrefRegs;
+ regPtrNext->rpdCallByrefRegs = byrefRegs;
+ regPtrNext->rpdArg = TRUE;
+ regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_POP;
+ regPtrNext->rpdPtrArg = argRecCnt.Value();
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE) printf("[%08X] ptr arg pop %u\n", dspPtr(regPtrNext), count);
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
+ printf("[%08X] ptr arg pop %u\n", dspPtr(regPtrNext), count);
+ }
#endif
}
-
/*****************************************************************************
* For caller-pop arguments, we report the arguments as pending arguments.
* However, any GC arguments are now dead, so we need to report them
* as non-GC.
*/
-void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned char callInstrSize)
+void emitter::emitStackKillArgs(BYTE* addr, unsigned count, unsigned char callInstrSize)
{
assert(count > 0);
@@ -6922,18 +6982,18 @@ void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned
/* We don't need to report this to the GC info, but we do need
to kill mark the ptrs on the stack as non-GC */
- assert(emitCurStackLvl/sizeof(int) >= count);
+ assert(emitCurStackLvl / sizeof(int) >= count);
for (unsigned lvl = 0; lvl < count; lvl++)
{
- u1.emitSimpleStkMask &= ~(1 << lvl);
+ u1.emitSimpleStkMask &= ~(1 << lvl);
u1.emitSimpleByrefStkMask &= ~(1 << lvl);
}
}
else
{
- BYTE * argTrackTop = u2.emitArgTrackTop;
- S_UINT16 gcCnt(0);
+ BYTE* argTrackTop = u2.emitArgTrackTop;
+ S_UINT16 gcCnt(0);
for (unsigned i = 0; i < count; i++)
{
@@ -6941,7 +7001,7 @@ void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned
--argTrackTop;
- GCtype gcType = (GCtype)(*argTrackTop);
+ GCtype gcType = (GCtype)(*argTrackTop);
assert(IsValidGCtype(gcType));
if (needsGC(gcType))
@@ -6958,10 +7018,14 @@ void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned
/* We're about to kill the corresponding (pointer) arg records */
if (emitHasFramePtr)
+ {
u2.emitGcArgTrackCnt -= gcCnt.Value();
+ }
if (!emitFullGCinfo)
+ {
return;
+ }
/* Right after the call, the arguments are still sitting on the
stack, but they are effectively dead. For fully-interruptible
@@ -6971,17 +7035,20 @@ void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned
{
/* Allocate a new ptr arg entry and fill it in */
- regPtrDsc * regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
- regPtrNext->rpdGCtype = GCT_GCREF; // Kills need a non-0 value (??)
+ regPtrDsc* regPtrNext = codeGen->gcInfo.gcRegPtrAllocDsc();
+ regPtrNext->rpdGCtype = GCT_GCREF; // Kills need a non-0 value (??)
- regPtrNext->rpdOffs = emitCurCodeOffs(addr);
+ regPtrNext->rpdOffs = emitCurCodeOffs(addr);
- regPtrNext->rpdArg = TRUE;
- regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_KILL;
- regPtrNext->rpdPtrArg = gcCnt.Value();
+ regPtrNext->rpdArg = TRUE;
+ regPtrNext->rpdArgType = (unsigned short)GCInfo::rpdARG_KILL;
+ regPtrNext->rpdPtrArg = gcCnt.Value();
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE) printf("[%08X] ptr arg kill %u\n", dspPtr(regPtrNext), count);
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
+ {
+ printf("[%08X] ptr arg kill %u\n", dspPtr(regPtrNext), count);
+ }
#endif
}
@@ -6995,11 +7062,11 @@ void emitter::emitStackKillArgs(BYTE *addr, unsigned count, unsigned
/*****************************************************************************
* A helper for recording a relocation with the EE.
*/
-void emitter::emitRecordRelocation(void* location, /* IN */
- void* target, /* IN */
- WORD fRelocType, /* IN */
- WORD slotNum /* = 0 */, /* IN */
- INT32 addlDelta /* = 0 */) /* IN */
+void emitter::emitRecordRelocation(void* location, /* IN */
+ void* target, /* IN */
+ WORD fRelocType, /* IN */
+ WORD slotNum /* = 0 */, /* IN */
+ INT32 addlDelta /* = 0 */) /* IN */
{
// If we're an unmatched altjit, don't tell the VM anything. We still record the relocation for
// late disassembly; maybe we'll need it?
@@ -7015,9 +7082,9 @@ void emitter::emitRecordRelocation(void* location, /* IN */
/*****************************************************************************
* A helper for recording a call site with the EE.
*/
-void emitter::emitRecordCallSite(ULONG instrOffset, /* IN */
- CORINFO_SIG_INFO* callSig, /* IN */
- CORINFO_METHOD_HANDLE methodHandle) /* IN */
+void emitter::emitRecordCallSite(ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO* callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle) /* IN */
{
#if defined(DEBUG)
// Since CORINFO_SIG_INFO is a heavyweight structure, in most cases we can
@@ -7045,11 +7112,10 @@ void emitter::emitRecordCallSite(ULONG instrOffset, /* I
}
/*****************************************************************************/
-#endif//EMIT_TRACK_STACK_DEPTH
+#endif // EMIT_TRACK_STACK_DEPTH
/*****************************************************************************/
/*****************************************************************************/
-
#ifdef DEBUG
/*****************************************************************************
@@ -7062,20 +7128,18 @@ void emitter::emitRecordCallSite(ULONG instrOffset, /* I
* printf()).
*/
-const char* emitter::emitOffsetToLabel(unsigned offs)
+const char* emitter::emitOffsetToLabel(unsigned offs)
{
- const size_t TEMP_BUFFER_LEN = 40;
- static unsigned curBuf = 0;
- static char buf[4][TEMP_BUFFER_LEN];
- char* retbuf;
+ const size_t TEMP_BUFFER_LEN = 40;
+ static unsigned curBuf = 0;
+ static char buf[4][TEMP_BUFFER_LEN];
+ char* retbuf;
- insGroup* ig;
+ insGroup* ig;
UNATIVE_OFFSET of;
UNATIVE_OFFSET nextof = 0;
- for (ig = emitIGlist;
- ig != nullptr;
- ig = ig->igNext)
+ for (ig = emitIGlist; ig != nullptr; ig = ig->igNext)
{
assert(nextof == ig->igOffs);
diff --git a/src/jit/emit.h b/src/jit/emit.h
index aaf042e4bb..db65e262f1 100644
--- a/src/jit/emit.h
+++ b/src/jit/emit.h
@@ -15,7 +15,7 @@
#include "jitgcinfo.h"
/*****************************************************************************/
-#ifdef TRANSLATE_PDB
+#ifdef TRANSLATE_PDB
#ifndef _ADDRMAP_INCLUDED_
#include "addrmap.h"
#endif
@@ -29,25 +29,25 @@
/*****************************************************************************/
#ifdef _MSC_VER
-#pragma warning(disable:4200) // allow arrays of 0 size inside structs
+#pragma warning(disable : 4200) // allow arrays of 0 size inside structs
#endif
#define TRACK_GC_TEMP_LIFETIMES 0
/*****************************************************************************/
-#if 0
+#if 0
#define EMITVERBOSE 1
#else
#define EMITVERBOSE (emitComp->verbose)
#endif
-#if 0
+#if 0
#define EMIT_GC_VERBOSE 0
#else
#define EMIT_GC_VERBOSE (emitComp->verbose)
#endif
-#if 1
+#if 1
#define EMIT_INSTLIST_VERBOSE 0
#else
#define EMIT_INSTLIST_VERBOSE (emitComp->verbose)
@@ -55,20 +55,19 @@
/*****************************************************************************/
-#ifdef DEBUG
-#define DEBUG_EMIT 1
+#ifdef DEBUG
+#define DEBUG_EMIT 1
#else
-#define DEBUG_EMIT 0
+#define DEBUG_EMIT 0
#endif
-#if EMITTER_STATS
-void emitterStats(FILE* fout);
-void emitterStaticStats(FILE* fout); // Static stats about the emitter (data structure offsets, sizes, etc.)
+#if EMITTER_STATS
+void emitterStats(FILE* fout);
+void emitterStaticStats(FILE* fout); // Static stats about the emitter (data structure offsets, sizes, etc.)
#endif
void printRegMaskInt(regMaskTP mask);
-
/*****************************************************************************/
/* Forward declarations */
@@ -82,8 +81,7 @@ typedef void (*emitSplitCallbackType)(void* context, emitLocation* emitLoc);
//-----------------------------------------------------------------------------
-inline
-bool needsGC(GCtype gcType)
+inline bool needsGC(GCtype gcType)
{
if (gcType == GCT_NONE)
{
@@ -100,25 +98,26 @@ bool needsGC(GCtype gcType)
#ifdef DEBUG
-inline
-bool IsValidGCtype(GCtype gcType)
+inline bool IsValidGCtype(GCtype gcType)
{
- return (gcType == GCT_NONE ||
- gcType == GCT_GCREF ||
- gcType == GCT_BYREF);
+ return (gcType == GCT_NONE || gcType == GCT_GCREF || gcType == GCT_BYREF);
}
// Get a string name to represent the GC type
-inline
-const char * GCtypeStr(GCtype gcType)
+inline const char* GCtypeStr(GCtype gcType)
{
switch (gcType)
{
- case GCT_NONE: return "npt";
- case GCT_GCREF: return "gcr";
- case GCT_BYREF: return "byr";
- default: assert(!"Invalid GCtype"); return "err";
+ case GCT_NONE:
+ return "npt";
+ case GCT_GCREF:
+ return "gcr";
+ case GCT_BYREF:
+ return "byr";
+ default:
+ assert(!"Invalid GCtype");
+ return "err";
}
}
@@ -126,8 +125,8 @@ const char * GCtypeStr(GCtype gcType)
/*****************************************************************************/
-#if DEBUG_EMIT
-#define INTERESTING_JUMP_NUM -1 // set to 0 to see all jump info
+#if DEBUG_EMIT
+#define INTERESTING_JUMP_NUM -1 // set to 0 to see all jump info
//#define INTERESTING_JUMP_NUM 0
#endif
@@ -139,22 +138,15 @@ const char * GCtypeStr(GCtype gcType)
class emitLocation
{
public:
-
- emitLocation()
- : ig(NULL)
- , codePos(0)
+ emitLocation() : ig(nullptr), codePos(0)
{
}
- emitLocation(insGroup* _ig)
- : ig(_ig)
- , codePos(0)
+ emitLocation(insGroup* _ig) : ig(_ig), codePos(0)
{
}
- emitLocation(void* emitCookie)
- : ig((insGroup*)emitCookie)
- , codePos(0)
+ emitLocation(void* emitCookie) : ig((insGroup*)emitCookie), codePos(0)
{
}
@@ -220,26 +212,23 @@ public:
#endif // DEBUG
private:
-
- insGroup* ig; // the instruction group
- unsigned codePos; // the code position within the IG (see emitCurOffset())
+ insGroup* ig; // the instruction group
+ unsigned codePos; // the code position within the IG (see emitCurOffset())
};
-
/************************************************************************/
/* The following describes an instruction group */
/************************************************************************/
-DECLARE_TYPED_ENUM(insGroupPlaceholderType,unsigned char)
+DECLARE_TYPED_ENUM(insGroupPlaceholderType, unsigned char)
{
- IGPT_PROLOG, // currently unused
- IGPT_EPILOG,
+ IGPT_PROLOG, // currently unused
+ IGPT_EPILOG,
#if FEATURE_EH_FUNCLETS
- IGPT_FUNCLET_PROLOG,
- IGPT_FUNCLET_EPILOG,
+ IGPT_FUNCLET_PROLOG, IGPT_FUNCLET_EPILOG,
#endif // FEATURE_EH_FUNCLETS
}
-END_DECLARE_TYPED_ENUM(insGroupPlaceholderType,unsigned char)
+END_DECLARE_TYPED_ENUM(insGroupPlaceholderType, unsigned char)
#if defined(_MSC_VER) && defined(_TARGET_ARM_)
// ARM aligns structures that contain 64-bit ints or doubles on 64-bit boundaries. This causes unwanted
@@ -248,10 +237,10 @@ END_DECLARE_TYPED_ENUM(insGroupPlaceholderType,unsigned char)
#pragma pack(4)
#endif // defined(_MSC_VER) && defined(_TARGET_ARM_)
-struct insPlaceholderGroupData
+struct insPlaceholderGroupData
{
- insGroup * igPhNext;
- BasicBlock * igPhBB;
+ insGroup* igPhNext;
+ BasicBlock* igPhBB;
VARSET_TP igPhInitGCrefVars;
regMaskTP igPhInitGCrefRegs;
regMaskTP igPhInitByrefRegs;
@@ -261,81 +250,79 @@ struct insPlaceholderGroupData
insGroupPlaceholderType igPhType;
}; // end of struct insPlaceholderGroupData
-struct insGroup
+struct insGroup
{
- insGroup * igNext;
+ insGroup* igNext;
-#ifdef DEBUG
- insGroup * igSelf; // for consistency checking
+#ifdef DEBUG
+ insGroup* igSelf; // for consistency checking
#endif
- UNATIVE_OFFSET igNum; // for ordering (and display) purposes
- UNATIVE_OFFSET igOffs; // offset of this group within method
- unsigned int igFuncIdx; // Which function/funclet does this belong to? (Index into Compiler::compFuncInfos array.)
- unsigned short igFlags; // see IGF_xxx below
- unsigned short igSize; // # of bytes of code in this group
+ UNATIVE_OFFSET igNum; // for ordering (and display) purposes
+ UNATIVE_OFFSET igOffs; // offset of this group within method
+ unsigned int igFuncIdx; // Which function/funclet does this belong to? (Index into Compiler::compFuncInfos array.)
+ unsigned short igFlags; // see IGF_xxx below
+ unsigned short igSize; // # of bytes of code in this group
- #define IGF_GC_VARS 0x0001 // new set of live GC ref variables
- #define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers
+#define IGF_GC_VARS 0x0001 // new set of live GC ref variables
+#define IGF_BYREF_REGS 0x0002 // new set of live by-ref registers
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- #define IGF_FINALLY_TARGET 0x0004 // this group is the start of a basic block that is returned to after a finally.
-#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- #define IGF_FUNCLET_PROLOG 0x0008 // this group belongs to a funclet prolog
+#define IGF_FINALLY_TARGET 0x0004 // this group is the start of a basic block that is returned to after a finally.
+#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
+#define IGF_FUNCLET_PROLOG 0x0008 // this group belongs to a funclet prolog
#ifdef DEBUG
- #define IGF_FUNCLET_EPILOG 0x0010 // this group belongs to a funclet epilog. Currently, this is only needed for DEBUG.
+#define IGF_FUNCLET_EPILOG 0x0010 // this group belongs to a funclet epilog. Currently, this is only needed for DEBUG.
#endif
- #define IGF_EPILOG 0x0020 // this group belongs to a main function epilog
- #define IGF_NOGCINTERRUPT 0x0040 // this IG is is a no-interrupt region (prolog, epilog, etc.)
- #define IGF_UPD_ISZ 0x0080 // some instruction sizes updated
- #define IGF_PLACEHOLDER 0x0100 // this is a placeholder group, to be filled in later
- #define IGF_EMIT_ADD 0x0200 // this is a block added by the emitter
- // because the codegen block was too big. Also used for
- // placeholder IGs that aren't also labels.
-
- // Mask of IGF_* flags that should be propagated to new blocks when they are created.
- // This allows prologs and epilogs to be any number of IGs, but still be
- // automatically marked properly.
+#define IGF_EPILOG 0x0020 // this group belongs to a main function epilog
+#define IGF_NOGCINTERRUPT 0x0040 // this IG is is a no-interrupt region (prolog, epilog, etc.)
+#define IGF_UPD_ISZ 0x0080 // some instruction sizes updated
+#define IGF_PLACEHOLDER 0x0100 // this is a placeholder group, to be filled in later
+#define IGF_EMIT_ADD 0x0200 // this is a block added by the emitter
+ // because the codegen block was too big. Also used for
+ // placeholder IGs that aren't also labels.
+
+// Mask of IGF_* flags that should be propagated to new blocks when they are created.
+// This allows prologs and epilogs to be any number of IGs, but still be
+// automatically marked properly.
#if FEATURE_EH_FUNCLETS
#ifdef DEBUG
- #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG | IGF_FUNCLET_EPILOG)
+#define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG | IGF_FUNCLET_EPILOG)
#else // DEBUG
- #define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG)
+#define IGF_PROPAGATE_MASK (IGF_EPILOG | IGF_FUNCLET_PROLOG)
#endif // DEBUG
-#else // FEATURE_EH_FUNCLETS
- #define IGF_PROPAGATE_MASK (IGF_EPILOG)
+#else // FEATURE_EH_FUNCLETS
+#define IGF_PROPAGATE_MASK (IGF_EPILOG)
#endif // FEATURE_EH_FUNCLETS
// Try to do better packing based on how large regMaskSmall is (8, 16, or 64 bits).
CLANG_FORMAT_COMMENT_ANCHOR;
#if REGMASK_BITS <= 32
- union
- {
- BYTE * igData; // addr of instruction descriptors
- insPlaceholderGroupData * igPhData; // when igFlags & IGF_PLACEHOLDER
+ union {
+ BYTE* igData; // addr of instruction descriptors
+ insPlaceholderGroupData* igPhData; // when igFlags & IGF_PLACEHOLDER
};
-#if EMIT_TRACK_STACK_DEPTH
- unsigned igStkLvl; // stack level on entry
+#if EMIT_TRACK_STACK_DEPTH
+ unsigned igStkLvl; // stack level on entry
#endif
- regMaskSmall igGCregs; // set of registers with live GC refs
- unsigned char igInsCnt; // # of instructions in this group
+ regMaskSmall igGCregs; // set of registers with live GC refs
+ unsigned char igInsCnt; // # of instructions in this group
#else // REGMASK_BITS
- regMaskSmall igGCregs; // set of registers with live GC refs
+ regMaskSmall igGCregs; // set of registers with live GC refs
- union
- {
- BYTE * igData; // addr of instruction descriptors
- insPlaceholderGroupData * igPhData; // when igFlags & IGF_PLACEHOLDER
+ union {
+ BYTE* igData; // addr of instruction descriptors
+ insPlaceholderGroupData* igPhData; // when igFlags & IGF_PLACEHOLDER
};
-#if EMIT_TRACK_STACK_DEPTH
- unsigned igStkLvl; // stack level on entry
+#if EMIT_TRACK_STACK_DEPTH
+ unsigned igStkLvl; // stack level on entry
#endif
- unsigned char igInsCnt; // # of instructions in this group
+ unsigned char igInsCnt; // # of instructions in this group
#endif // REGMASK_BITS
@@ -343,30 +330,32 @@ struct insGroup
{
assert(igFlags & IGF_GC_VARS);
- BYTE * ptr = (BYTE *)igData;
+ BYTE* ptr = (BYTE*)igData;
ptr -= sizeof(VARSET_TP);
return *(VARSET_TP*)ptr;
}
- unsigned igByrefRegs() const
+ unsigned igByrefRegs() const
{
assert(igFlags & IGF_BYREF_REGS);
- BYTE * ptr = (BYTE *)igData;
+ BYTE* ptr = (BYTE*)igData;
if (igFlags & IGF_GC_VARS)
+ {
ptr -= sizeof(VARSET_TP);
+ }
ptr -= sizeof(unsigned);
- return *(unsigned *)ptr;
+ return *(unsigned*)ptr;
}
}; // end of struct insGroup
// For AMD64 the maximum prolog/epilog size supported on the OS is 256 bytes
-// Since it is incorrect for us to be jumping across funclet prolog/epilogs
+// Since it is incorrect for us to be jumping across funclet prolog/epilogs
// we will use the following estimate as the maximum placeholder size.
//
#define MAX_PLACEHOLDER_IG_SIZE 256
@@ -375,58 +364,57 @@ struct insGroup
#pragma pack(pop)
#endif // defined(_MSC_VER) && defined(_TARGET_ARM_)
-
/*****************************************************************************/
#define DEFINE_ID_OPS
#include "emitfmts.h"
-#undef DEFINE_ID_OPS
+#undef DEFINE_ID_OPS
-enum LclVarAddrTag {
+enum LclVarAddrTag
+{
LVA_STANDARD_ENCODING = 0,
LVA_LARGE_OFFSET = 1,
LVA_COMPILER_TEMP = 2,
LVA_LARGE_VARNUM = 3
};
-struct emitLclVarAddr
+struct emitLclVarAddr
{
// Constructor
void initLclVarAddr(int varNum, unsigned offset);
- int lvaVarNum(); // Returns the variable to access. Note that it returns a negative number for compiler spill temps.
- unsigned lvaOffset(); // returns the offset into the variable to access
+ int lvaVarNum(); // Returns the variable to access. Note that it returns a negative number for compiler spill temps.
+ unsigned lvaOffset(); // returns the offset into the variable to access
-// This struct should be 32 bits in size for the release build.
-// We have this constraint because this type is used in a union
-// with several other pointer sized types in the instrDesc struct.
-//
+ // This struct should be 32 bits in size for the release build.
+ // We have this constraint because this type is used in a union
+ // with several other pointer sized types in the instrDesc struct.
+ //
protected:
- unsigned _lvaVarNum :15; // Usually the lvaVarNum
- unsigned _lvaExtra :15; // Usually the lvaOffset
- unsigned _lvaTag :2; // tag field to support larger varnums
+ unsigned _lvaVarNum : 15; // Usually the lvaVarNum
+ unsigned _lvaExtra : 15; // Usually the lvaOffset
+ unsigned _lvaTag : 2; // tag field to support larger varnums
};
-enum idAddrUnionTag
+enum idAddrUnionTag
{
iaut_ALIGNED_POINTER = 0x0,
iaut_DATA_OFFSET = 0x1,
iaut_INST_COUNT = 0x2,
iaut_UNUSED_TAG = 0x3,
-
- iaut_MASK = 0x3,
- iaut_SHIFT = 2
+
+ iaut_MASK = 0x3,
+ iaut_SHIFT = 2
};
-class emitter
+class emitter
{
- friend class emitLocation;
- friend class Compiler;
- friend class CodeGen;
- friend class CodeGenInterface;
+ friend class emitLocation;
+ friend class Compiler;
+ friend class CodeGen;
+ friend class CodeGenInterface;
public:
-
/*************************************************************************
*
* Define the public entry points.
@@ -444,71 +432,64 @@ public:
#endif // FEATURE_AVX_SUPPORT
}
- #include "emitpub.h"
+#include "emitpub.h"
protected:
-
/************************************************************************/
/* Miscellaneous stuff */
/************************************************************************/
- Compiler* emitComp;
- GCInfo* gcInfo;
- CodeGen* codeGen;
+ Compiler* emitComp;
+ GCInfo* gcInfo;
+ CodeGen* codeGen;
typedef GCInfo::varPtrDsc varPtrDsc;
typedef GCInfo::regPtrDsc regPtrDsc;
typedef GCInfo::CallDsc callDsc;
- void* emitGetMem(size_t sz);
+ void* emitGetMem(size_t sz);
- DECLARE_TYPED_ENUM(opSize,unsigned)
+ DECLARE_TYPED_ENUM(opSize, unsigned)
{
- OPSZ1 = 0,
- OPSZ2 = 1,
- OPSZ4 = 2,
- OPSZ8 = 3,
- OPSZ16 = 4,
- OPSZ32 = 5,
- OPSZ_COUNT = 6,
+ OPSZ1 = 0, OPSZ2 = 1, OPSZ4 = 2, OPSZ8 = 3, OPSZ16 = 4, OPSZ32 = 5, OPSZ_COUNT = 6,
#ifdef _TARGET_AMD64_
OPSZP = OPSZ8,
#else
OPSZP = OPSZ4,
#endif
}
- END_DECLARE_TYPED_ENUM(opSize,unsigned)
+ END_DECLARE_TYPED_ENUM(opSize, unsigned)
-#define OPSIZE_INVALID ((opSize) 0xffff)
+#define OPSIZE_INVALID ((opSize)0xffff)
static const emitter::opSize emitSizeEncode[];
static const emitAttr emitSizeDecode[];
- static emitter::opSize emitEncodeSize(emitAttr size);
- static emitAttr emitDecodeSize(emitter::opSize ensz);
+ static emitter::opSize emitEncodeSize(emitAttr size);
+ static emitAttr emitDecodeSize(emitter::opSize ensz);
// Currently, we only allow one IG for the prolog
- bool emitIGisInProlog(const insGroup * ig)
+ bool emitIGisInProlog(const insGroup* ig)
{
return ig == emitPrologIG;
}
- bool emitIGisInEpilog(const insGroup * ig)
+ bool emitIGisInEpilog(const insGroup* ig)
{
- return (ig != NULL) && ((ig->igFlags & IGF_EPILOG) != 0);
+ return (ig != nullptr) && ((ig->igFlags & IGF_EPILOG) != 0);
}
#if FEATURE_EH_FUNCLETS
- bool emitIGisInFuncletProlog(const insGroup * ig)
+ bool emitIGisInFuncletProlog(const insGroup* ig)
{
- return (ig != NULL) && ((ig->igFlags & IGF_FUNCLET_PROLOG) != 0);
+ return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_PROLOG) != 0);
}
#ifdef DEBUG
- bool emitIGisInFuncletEpilog(const insGroup * ig)
+ bool emitIGisInFuncletEpilog(const insGroup* ig)
{
- return (ig != NULL) && ((ig->igFlags & IGF_FUNCLET_EPILOG) != 0);
+ return (ig != nullptr) && ((ig->igFlags & IGF_FUNCLET_EPILOG) != 0);
}
#endif // DEBUG
#endif // FEATURE_EH_FUNCLETS
@@ -516,107 +497,117 @@ protected:
// If "ig" corresponds to the start of a basic block that is the
// target of a funclet return, generate GC information for it's start
// address "cp", as if it were the return address of a call.
- void emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp);
+ void emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp);
- void emitRecomputeIGoffsets();
+ void emitRecomputeIGoffsets();
/************************************************************************/
/* The following describes a single instruction */
/************************************************************************/
- DECLARE_TYPED_ENUM(insFormat,unsigned)
+ DECLARE_TYPED_ENUM(insFormat, unsigned)
{
- #define IF_DEF(en, op1, op2) IF_##en,
- #include "emitfmts.h"
+#define IF_DEF(en, op1, op2) IF_##en,
+#include "emitfmts.h"
IF_COUNT
}
- END_DECLARE_TYPED_ENUM(insFormat,unsigned)
+ END_DECLARE_TYPED_ENUM(insFormat, unsigned)
-#define AM_DISP_BITS ((sizeof(unsigned)*8) - 2*(REGNUM_BITS+1) - 2)
-#define AM_DISP_BIG_VAL (- (1<<(AM_DISP_BITS-1) ))
-#define AM_DISP_MIN (-((1<<(AM_DISP_BITS-1))-1))
-#define AM_DISP_MAX (+((1<<(AM_DISP_BITS-1))-1))
+#define AM_DISP_BITS ((sizeof(unsigned) * 8) - 2 * (REGNUM_BITS + 1) - 2)
+#define AM_DISP_BIG_VAL (-(1 << (AM_DISP_BITS - 1)))
+#define AM_DISP_MIN (-((1 << (AM_DISP_BITS - 1)) - 1))
+#define AM_DISP_MAX (+((1 << (AM_DISP_BITS - 1)) - 1))
- struct emitAddrMode
+ struct emitAddrMode
{
- regNumber amBaseReg :REGNUM_BITS+1;
- regNumber amIndxReg :REGNUM_BITS+1;
- emitter::opSize amScale :2;
- int amDisp :AM_DISP_BITS;
+ regNumber amBaseReg : REGNUM_BITS + 1;
+ regNumber amIndxReg : REGNUM_BITS + 1;
+ emitter::opSize amScale : 2;
+ int amDisp : AM_DISP_BITS;
};
#if defined(DEBUG) || defined(LATE_DISASM) // LATE_DISASM needs the idMemCookie on calls to display the call target name
- struct instrDesc;
+ struct instrDesc;
- struct instrDescDebugInfo
+ struct instrDescDebugInfo
{
- unsigned idNum;
- size_t idSize; // size of the instruction descriptor
- unsigned idVarRefOffs; // IL offset for LclVar reference
- size_t idMemCookie; // for display of member names in addr modes
- void * idClsCookie; // for display of member names in addr modes
-#ifdef TRANSLATE_PDB
- unsigned int idilStart; // instruction descriptor source information for PDB translation
+ unsigned idNum;
+ size_t idSize; // size of the instruction descriptor
+ unsigned idVarRefOffs; // IL offset for LclVar reference
+ size_t idMemCookie; // for display of member names in addr modes
+ void* idClsCookie; // for display of member names in addr modes
+#ifdef TRANSLATE_PDB
+ unsigned int idilStart; // instruction descriptor source information for PDB translation
#endif
- bool idFinallyCall; // Branch instruction is a call to finally
- bool idCatchRet; // Instruction is for a catch 'return'
- CORINFO_SIG_INFO* idCallSig; // Used to report native call site signatures to the EE
+ bool idFinallyCall; // Branch instruction is a call to finally
+ bool idCatchRet; // Instruction is for a catch 'return'
+ CORINFO_SIG_INFO* idCallSig; // Used to report native call site signatures to the EE
};
#endif // defined(DEBUG) || defined(LATE_DISASM)
#ifdef _TARGET_ARM_
- unsigned insEncodeSetFlags(insFlags sf);
+ unsigned insEncodeSetFlags(insFlags sf);
- DECLARE_TYPED_ENUM(insSize,unsigned)
+ DECLARE_TYPED_ENUM(insSize, unsigned)
{
- ISZ_16BIT,
- ISZ_32BIT,
- ISZ_48BIT // pseudo-instruction for conditional branch with imm24 range,
- // encoded as IT of condition followed by an unconditional branch
+ ISZ_16BIT, ISZ_32BIT, ISZ_48BIT // pseudo-instruction for conditional branch with imm24 range,
+ // encoded as IT of condition followed by an unconditional branch
}
- END_DECLARE_TYPED_ENUM(insSize,unsigned)
+ END_DECLARE_TYPED_ENUM(insSize, unsigned)
- unsigned insEncodeShiftOpts(insOpts opt);
- unsigned insEncodePUW_G0(insOpts opt, int imm);
- unsigned insEncodePUW_H0(insOpts opt, int imm);
+ unsigned insEncodeShiftOpts(insOpts opt);
+ unsigned insEncodePUW_G0(insOpts opt, int imm);
+ unsigned insEncodePUW_H0(insOpts opt, int imm);
#endif // _TARGET_ARM_
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
-#define HAS_TINY_DESC 1
+#define HAS_TINY_DESC 1
#else
-#define HAS_TINY_DESC 0
+#define HAS_TINY_DESC 0
#endif
- struct instrDescCns;
+ struct instrDescCns;
- struct instrDesc
+ struct instrDesc
{
private:
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- instruction _idIns :9; // The assembly instruction
-#else // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
- instruction _idIns :8; // The assembly instruction
+ // The assembly instruction
+ instruction _idIns : 9;
+#else // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
+ // The assembly instruction
+ instruction _idIns : 8;
#endif // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
- insFormat _idInsFmt :8; // The format for the instruction
+ // The format for the instruction
+ insFormat _idInsFmt : 8;
public:
- instruction idIns() const { return _idIns; }
- void idIns(instruction ins)
- { _idIns = ins; assert(_idIns==ins); }
+ instruction idIns() const
+ {
+ return _idIns;
+ }
+ void idIns(instruction ins)
+ {
+ _idIns = ins;
+ assert(_idIns == ins);
+ }
- insFormat idInsFmt() const { return _idInsFmt; }
- void idInsFmt(insFormat insFmt)
- {
+ insFormat idInsFmt() const
+ {
+ return _idInsFmt;
+ }
+ void idInsFmt(insFormat insFmt)
+ {
#if defined(_TARGET_ARM64_)
- noway_assert(insFmt != IF_NONE); // Only the x86 emitter uses IF_NONE, it is invalid for ARM64 (and ARM32)
+ noway_assert(insFmt != IF_NONE); // Only the x86 emitter uses IF_NONE, it is invalid for ARM64 (and ARM32)
#endif
- _idInsFmt = insFmt;
- assert(_idInsFmt==insFmt);
- }
+ _idInsFmt = insFmt;
+ assert(_idInsFmt == insFmt);
+ }
/*
The idReg1 and idReg2 fields hold the first and second register
@@ -625,10 +616,10 @@ protected:
to make sure all of these fields stay reasonably packed.
*/
- void idSetRelocFlags(emitAttr attr)
+ void idSetRelocFlags(emitAttr attr)
{
- _idCnsReloc = (EA_IS_CNS_RELOC(attr)?1:0);
- _idDspReloc = (EA_IS_DSP_RELOC(attr)?1:0);
+ _idCnsReloc = (EA_IS_CNS_RELOC(attr) ? 1 : 0);
+ _idDspReloc = (EA_IS_DSP_RELOC(attr) ? 1 : 0);
}
////////////////////////////////////////////////////////////////////////
@@ -639,39 +630,36 @@ protected:
// arm64: 16 bits
private:
-
#ifdef _TARGET_XARCH_
- unsigned _idCodeSize :4; // size of instruction in bytes
+ unsigned _idCodeSize : 4; // size of instruction in bytes
#endif
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- opSize _idOpSize :3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32
- // At this point we have fully consumed first DWORD so that next field
- // doesn't cross a byte boundary.
+ opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32
+ // At this point we have fully consumed first DWORD so that next field
+ // doesn't cross a byte boundary.
#elif defined(_TARGET_ARM64_)
- // Moved the definition of '_idOpSize' later
- // so that we don't cross a 32-bit boundary when laying out bitfields
-#else // ARM or x86-LEGACY_BACKEND
- opSize _idOpSize :2; // operand size: 0=1 , 1=2 , 2=4 , 3=8
+// Moved the definition of '_idOpSize' later so that we don't cross a 32-bit boundary when laying out bitfields
+#else // ARM or x86-LEGACY_BACKEND
+ opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8
#endif // ARM or x86-LEGACY_BACKEND
// On Amd64, this is where the second DWORD begins
- // On System V a call could return a struct in 2 registers. The instrDescCGCA struct below has member that
+ // On System V a call could return a struct in 2 registers. The instrDescCGCA struct below has member that
// stores the GC-ness of the second register.
// It is added to the instrDescCGCA and not here (the base struct) since it is not needed by all the
// instructions. This struct (instrDesc) is very carefully kept to be no more than 128 bytes. There is no more
// space to add members for keeping GC-ness of the second return registers. It will also bloat the base struct
// unnecessarily since the GC-ness of the second register is only needed for call instructions.
// The instrDescCGCA struct's member keeping the GC-ness of the first return register is _idcSecondRetRegGCType.
- GCtype _idGCref :2; // GCref operand? (value is a "GCtype")
+ GCtype _idGCref : 2; // GCref operand? (value is a "GCtype")
// Note that we use the _idReg1 and _idReg2 fields to hold
// the live gcrefReg mask for the call instructions on x86/x64
//
- regNumber _idReg1 :REGNUM_BITS; // register num
-
+ regNumber _idReg1 : REGNUM_BITS; // register num
- regNumber _idReg2 :REGNUM_BITS;
+ regNumber _idReg2 : REGNUM_BITS;
////////////////////////////////////////////////////////////////////////
// Space taken up to here:
@@ -685,8 +673,8 @@ protected:
//
// For x86 use last two bits to differentiate if we are tiny or small
//
- unsigned _idTinyDsc :1; // is this a "tiny" descriptor?
- unsigned _idSmallDsc :1; // is this a "small" descriptor?
+ unsigned _idTinyDsc : 1; // is this a "tiny" descriptor?
+ unsigned _idSmallDsc : 1; // is this a "small" descriptor?
#else // !HAS_TINY_DESC
@@ -701,46 +689,46 @@ protected:
// or not small (which is bigger, just like x86)
//
- unsigned _idSmallDsc :1; // is this a "small" descriptor?
- unsigned _idLargeCns :1; // does a large constant follow?
- unsigned _idLargeDsp :1; // does a large displacement follow?
- unsigned _idLargeCall :1; // large call descriptor used
+ unsigned _idSmallDsc : 1; // is this a "small" descriptor?
+ unsigned _idLargeCns : 1; // does a large constant follow?
+ unsigned _idLargeDsp : 1; // does a large displacement follow?
+ unsigned _idLargeCall : 1; // large call descriptor used
- unsigned _idBound :1; // jump target / frame offset bound
- unsigned _idCallRegPtr:1; // IL indirect calls: addr in reg
- unsigned _idCallAddr :1; // IL indirect calls: can make a direct call to iiaAddr
- unsigned _idNoGC :1; // Some helpers don't get recorded in GC tables
+ unsigned _idBound : 1; // jump target / frame offset bound
+ unsigned _idCallRegPtr : 1; // IL indirect calls: addr in reg
+ unsigned _idCallAddr : 1; // IL indirect calls: can make a direct call to iiaAddr
+ unsigned _idNoGC : 1; // Some helpers don't get recorded in GC tables
#ifdef _TARGET_ARM64_
- opSize _idOpSize :3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16
- insOpts _idInsOpt :6; // options for instructions
- unsigned _idLclVar :1; // access a local on stack
+ opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16
+ insOpts _idInsOpt : 6; // options for instructions
+ unsigned _idLclVar : 1; // access a local on stack
#endif
#ifdef _TARGET_ARM_
- insSize _idInsSize :2; // size of instruction: 16, 32 or 48 bits
- insFlags _idInsFlags :1; // will this instruction set the flags
- unsigned _idLclVar :1; // access a local on stack
- unsigned _idLclFPBase :1; // access a local on stack - SP based offset
- insOpts _idInsOpt :3; // options for Load/Store instructions
-# ifdef ARM_HAZARD_AVOIDANCE
-# define _idKraitNop _idLclFPBase // Repurpose the _idLclFPBase for Krait Hazard
-# endif
-
- // For arm we have used 16 bits
- #define ID_EXTRA_BITFIELD_BITS (16)
+ insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits
+ insFlags _idInsFlags : 1; // will this instruction set the flags
+ unsigned _idLclVar : 1; // access a local on stack
+ unsigned _idLclFPBase : 1; // access a local on stack - SP based offset
+ insOpts _idInsOpt : 3; // options for Load/Store instructions
+#ifdef ARM_HAZARD_AVOIDANCE
+#define _idKraitNop _idLclFPBase // Repurpose the _idLclFPBase for Krait Hazard
+#endif
+
+// For arm we have used 16 bits
+#define ID_EXTRA_BITFIELD_BITS (16)
#elif defined(_TARGET_ARM64_)
- // For Arm64, we have used 15 bits from the second DWORD.
- #define ID_EXTRA_BITFIELD_BITS (16)
+// For Arm64, we have used 15 bits from the second DWORD.
+#define ID_EXTRA_BITFIELD_BITS (16)
#elif defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- // For xarch !LEGACY_BACKEND, we have used 14 bits from the second DWORD.
- #define ID_EXTRA_BITFIELD_BITS (14)
+// For xarch !LEGACY_BACKEND, we have used 14 bits from the second DWORD.
+#define ID_EXTRA_BITFIELD_BITS (14)
#elif defined(_TARGET_X86_)
- // For x86, we have used 6 bits from the second DWORD.
- #define ID_EXTRA_BITFIELD_BITS (6)
+// For x86, we have used 6 bits from the second DWORD.
+#define ID_EXTRA_BITFIELD_BITS (6)
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
////////////////////////////////////////////////////////////////////////
@@ -753,14 +741,14 @@ protected:
#ifdef RELOC_SUPPORT
- unsigned _idCnsReloc :1; // LargeCns is an RVA and needs reloc tag
- unsigned _idDspReloc :1; // LargeDsp is an RVA and needs reloc tag
+ unsigned _idCnsReloc : 1; // LargeCns is an RVA and needs reloc tag
+ unsigned _idDspReloc : 1; // LargeDsp is an RVA and needs reloc tag
- #define ID_EXTRA_RELOC_BITS (2)
+#define ID_EXTRA_RELOC_BITS (2)
#else // RELOC_SUPPORT
- #define ID_EXTRA_RELOC_BITS (0)
+#define ID_EXTRA_RELOC_BITS (0)
#endif // RELOC_SUPPORT
@@ -772,13 +760,13 @@ protected:
// arm64: 50 bits
CLANG_FORMAT_COMMENT_ANCHOR;
- #define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS)
+#define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS)
- /* Use whatever bits are left over for small constants */
+/* Use whatever bits are left over for small constants */
- #define ID_BIT_SMALL_CNS (32-ID_EXTRA_BITS)
- #define ID_MIN_SMALL_CNS 0
- #define ID_MAX_SMALL_CNS (int)((1<<ID_BIT_SMALL_CNS)-1U)
+#define ID_BIT_SMALL_CNS (32 - ID_EXTRA_BITS)
+#define ID_MIN_SMALL_CNS 0
+#define ID_MAX_SMALL_CNS (int)((1 << ID_BIT_SMALL_CNS) - 1U)
////////////////////////////////////////////////////////////////////////
// Small constant size (assuming RELOC_SUPPORT):
@@ -787,7 +775,7 @@ protected:
// arm: 14 bits
// arm64: 14 bits
- unsigned _idSmallCns :ID_BIT_SMALL_CNS;
+ unsigned _idSmallCns : ID_BIT_SMALL_CNS;
////////////////////////////////////////////////////////////////////////
// Space taken up to here (with RELOC_SUPPORT): 64 bits, all architectures, by design.
@@ -798,15 +786,19 @@ protected:
#if defined(DEBUG) || defined(LATE_DISASM)
- instrDescDebugInfo * _idDebugOnlyInfo;
+ instrDescDebugInfo* _idDebugOnlyInfo;
public:
- instrDescDebugInfo * idDebugOnlyInfo() const
- { return _idDebugOnlyInfo; }
- void idDebugOnlyInfo(instrDescDebugInfo * info)
- { _idDebugOnlyInfo = info; }
- private:
+ instrDescDebugInfo* idDebugOnlyInfo() const
+ {
+ return _idDebugOnlyInfo;
+ }
+ void idDebugOnlyInfo(instrDescDebugInfo* info)
+ {
+ _idDebugOnlyInfo = info;
+ }
+ private:
#endif // defined(DEBUG) || defined(LATE_DISASM)
//
@@ -827,71 +819,70 @@ protected:
#if HAS_TINY_DESC
- unsigned _idLargeCns :1; // does a large constant follow?
- unsigned _idLargeDsp :1; // does a large displacement follow?
- unsigned _idLargeCall :1; // large call descriptor used
- unsigned _idBound :1; // jump target / frame offset bound
-
- unsigned _idCallRegPtr:1; // IL indirect calls: addr in reg
- unsigned _idCallAddr :1; // IL indirect calls: can make a direct call to iiaAddr
- unsigned _idNoGC :1; // Some helpers don't get recorded in GC tables
+ unsigned _idLargeCns : 1; // does a large constant follow?
+ unsigned _idLargeDsp : 1; // does a large displacement follow?
+ unsigned _idLargeCall : 1; // large call descriptor used
+ unsigned _idBound : 1; // jump target / frame offset bound
+ unsigned _idCallRegPtr : 1; // IL indirect calls: addr in reg
+ unsigned _idCallAddr : 1; // IL indirect calls: can make a direct call to iiaAddr
+ unsigned _idNoGC : 1; // Some helpers don't get recorded in GC tables
- #define ID_EXTRA_BITFIELD_BITS (7)
+#define ID_EXTRA_BITFIELD_BITS (7)
- //
- // For x86, we are using 7 bits from the second DWORD for bitfields.
- //
+//
+// For x86, we are using 7 bits from the second DWORD for bitfields.
+//
#ifdef RELOC_SUPPORT
- unsigned _idCnsReloc :1; // LargeCns is an RVA and needs reloc tag
- unsigned _idDspReloc :1; // LargeDsp is an RVA and needs reloc tag
+ unsigned _idCnsReloc : 1; // LargeCns is an RVA and needs reloc tag
+ unsigned _idDspReloc : 1; // LargeDsp is an RVA and needs reloc tag
- #define ID_EXTRA_RELOC_BITS (2)
+#define ID_EXTRA_RELOC_BITS (2)
#else // RELOC_SUPPORT
- #define ID_EXTRA_RELOC_BITS (0)
+#define ID_EXTRA_RELOC_BITS (0)
#endif // RELOC_SUPPORT
- #define ID_EXTRA_REG_BITS (0)
+#define ID_EXTRA_REG_BITS (0)
- #define ID_EXTRA_BITS (ID_EXTRA_BITFIELD_BITS + ID_EXTRA_RELOC_BITS + ID_EXTRA_REG_BITS)
+#define ID_EXTRA_BITS (ID_EXTRA_BITFIELD_BITS + ID_EXTRA_RELOC_BITS + ID_EXTRA_REG_BITS)
- /* Use whatever bits are left over for small constants */
+/* Use whatever bits are left over for small constants */
- #define ID_BIT_SMALL_CNS (32-ID_EXTRA_BITS)
- #define ID_MIN_SMALL_CNS 0
- #define ID_MAX_SMALL_CNS (int)((1<<ID_BIT_SMALL_CNS)-1U)
+#define ID_BIT_SMALL_CNS (32 - ID_EXTRA_BITS)
+#define ID_MIN_SMALL_CNS 0
+#define ID_MAX_SMALL_CNS (int)((1 << ID_BIT_SMALL_CNS) - 1U)
// For x86 (assuming RELOC_SUPPORT) we have 23 bits remaining for the
// small constant in this extra DWORD.
- unsigned _idSmallCns :ID_BIT_SMALL_CNS;
+ unsigned _idSmallCns : ID_BIT_SMALL_CNS;
#endif // HAS_TINY_DESC
- //
- // This is the end of the 'small' instrDesc which is the same on all
- // platforms (except 64-bit DEBUG which is a little bigger).
- // Non-DEBUG sizes:
- // x86/amd64/arm/arm64: 64 bits
- // DEBUG sizes (includes one pointer):
- // x86: 2 DWORDs, 64 bits
- // amd64: 4 DWORDs, 128 bits
- // arm: 3 DWORDs, 96 bits
- // arm64: 4 DWORDs, 128 bits
- // There should no padding or alignment issues on any platform or
- // configuration (including DEBUG which has 1 extra pointer).
- //
+//
+// This is the end of the 'small' instrDesc which is the same on all
+// platforms (except 64-bit DEBUG which is a little bigger).
+// Non-DEBUG sizes:
+// x86/amd64/arm/arm64: 64 bits
+// DEBUG sizes (includes one pointer):
+// x86: 2 DWORDs, 64 bits
+// amd64: 4 DWORDs, 128 bits
+// arm: 3 DWORDs, 96 bits
+// arm64: 4 DWORDs, 128 bits
+// There should no padding or alignment issues on any platform or
+// configuration (including DEBUG which has 1 extra pointer).
+//
- /*
- If you add lots more fields that need to be cleared (such
- as various flags), you might need to update the body of
- emitter::emitAllocInstr() to clear them.
- */
+/*
+ If you add lots more fields that need to be cleared (such
+ as various flags), you might need to update the body of
+ emitter::emitAllocInstr() to clear them.
+ */
#if defined(DEBUG) || defined(LATE_DISASM)
#define TINY_IDSC_DEBUG_EXTRA (sizeof(void*))
@@ -900,48 +891,47 @@ protected:
#endif
#if HAS_TINY_DESC
- #define TINY_IDSC_SIZE (4 + TINY_IDSC_DEBUG_EXTRA)
- #define SMALL_IDSC_SIZE (8 + TINY_IDSC_DEBUG_EXTRA)
+#define TINY_IDSC_SIZE (4 + TINY_IDSC_DEBUG_EXTRA)
+#define SMALL_IDSC_SIZE (8 + TINY_IDSC_DEBUG_EXTRA)
#else
- #define TINY_IDSC_SIZE (8 + TINY_IDSC_DEBUG_EXTRA)
- #define SMALL_IDSC_SIZE TINY_IDSC_SIZE
+#define TINY_IDSC_SIZE (8 + TINY_IDSC_DEBUG_EXTRA)
+#define SMALL_IDSC_SIZE TINY_IDSC_SIZE
#endif
void checkSizes();
- union idAddrUnion
- {
+ union idAddrUnion {
// TODO-Cleanup: We should really add a DEBUG-only tag to this union so we can add asserts
// about reading what we think is here, to avoid unexpected corruption issues.
- emitLclVarAddr iiaLclVar;
- BasicBlock* iiaBBlabel;
- insGroup* iiaIGlabel;
- BYTE* iiaAddr;
- emitAddrMode iiaAddrMode;
+ emitLclVarAddr iiaLclVar;
+ BasicBlock* iiaBBlabel;
+ insGroup* iiaIGlabel;
+ BYTE* iiaAddr;
+ emitAddrMode iiaAddrMode;
- CORINFO_FIELD_HANDLE iiaFieldHnd; // iiaFieldHandle is also used to encode
- // an offset into the JIT data constant area
- bool iiaIsJitDataOffset() const;
- int iiaGetJitDataOffset() const;
+ CORINFO_FIELD_HANDLE iiaFieldHnd; // iiaFieldHandle is also used to encode
+ // an offset into the JIT data constant area
+ bool iiaIsJitDataOffset() const;
+ int iiaGetJitDataOffset() const;
#ifdef _TARGET_ARMARCH_
// iiaEncodedInstrCount and its accessor functions are used to specify an instruction
// count for jumps, instead of using a label and multiple blocks. This is used in the
// prolog as well as for IF_LARGEJMP pseudo-branch instructions.
- int iiaEncodedInstrCount;
+ int iiaEncodedInstrCount;
- bool iiaHasInstrCount() const
+ bool iiaHasInstrCount() const
{
return (iiaEncodedInstrCount & iaut_MASK) == iaut_INST_COUNT;
}
- int iiaGetInstrCount() const
+ int iiaGetInstrCount() const
{
assert(iiaHasInstrCount());
return (iiaEncodedInstrCount >> iaut_SHIFT);
}
- void iiaSetInstrCount(int count)
+ void iiaSetInstrCount(int count)
{
assert(abs(count) < 10);
iiaEncodedInstrCount = (count << iaut_SHIFT) | iaut_INST_COUNT;
@@ -949,307 +939,533 @@ protected:
struct
{
- regNumber _idReg3 :REGNUM_BITS;
- regNumber _idReg4 :REGNUM_BITS;
+ regNumber _idReg3 : REGNUM_BITS;
+ regNumber _idReg4 : REGNUM_BITS;
#ifdef _TARGET_ARM64_
- unsigned _idReg3Scaled :1; // Reg3 is scaled by idOpSize bits
+ unsigned _idReg3Scaled : 1; // Reg3 is scaled by idOpSize bits
#endif
};
#elif defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
struct
{
- regNumber _idReg3 : REGNUM_BITS;
+ regNumber _idReg3 : REGNUM_BITS;
};
#endif // defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- } _idAddrUnion;
+ } _idAddrUnion;
/* Trivial wrappers to return properly typed enums */
public:
-
#if HAS_TINY_DESC
- bool idIsTiny() const { return (_idTinyDsc != 0); }
- void idSetIsTiny() { _idTinyDsc = 1; }
+ bool idIsTiny() const
+ {
+ return (_idTinyDsc != 0);
+ }
+ void idSetIsTiny()
+ {
+ _idTinyDsc = 1;
+ }
#else
- bool idIsTiny() const { return false; }
- void idSetIsTiny() { _idSmallDsc = 1; }
+ bool idIsTiny() const
+ {
+ return false;
+ }
+ void idSetIsTiny()
+ {
+ _idSmallDsc = 1;
+ }
-#endif // HAS_TINY_DESC
+#endif // HAS_TINY_DESC
- bool idIsSmallDsc() const { return (_idSmallDsc != 0); }
- void idSetIsSmallDsc() { _idSmallDsc = 1; }
+ bool idIsSmallDsc() const
+ {
+ return (_idSmallDsc != 0);
+ }
+ void idSetIsSmallDsc()
+ {
+ _idSmallDsc = 1;
+ }
-#if defined(_TARGET_XARCH_)
+#if defined(_TARGET_XARCH_)
- unsigned idCodeSize() const { return _idCodeSize; }
- void idCodeSize(unsigned sz) { _idCodeSize = sz; assert(sz == _idCodeSize); }
+ unsigned idCodeSize() const
+ {
+ return _idCodeSize;
+ }
+ void idCodeSize(unsigned sz)
+ {
+ _idCodeSize = sz;
+ assert(sz == _idCodeSize);
+ }
#elif defined(_TARGET_ARM64_)
- unsigned idCodeSize() const {
- int size = 4;
- switch (idInsFmt())
- {
- case IF_LARGEADR:
- // adrp + add
- case IF_LARGEJMP:
- // b<cond> + b<uncond>
- size = 8;
- break;
- case IF_LARGELDC:
- if (isVectorRegister(idReg1()))
- {
- // adrp + ldr + fmov
- size = 12;
- }
- else
- {
- // adrp + ldr
- size = 8;
- }
- break;
- default:
- break;
- }
-
- return size;
- }
+ unsigned idCodeSize() const
+ {
+ int size = 4;
+ switch (idInsFmt())
+ {
+ case IF_LARGEADR:
+ // adrp + add
+ case IF_LARGEJMP:
+ // b<cond> + b<uncond>
+ size = 8;
+ break;
+ case IF_LARGELDC:
+ if (isVectorRegister(idReg1()))
+ {
+ // adrp + ldr + fmov
+ size = 12;
+ }
+ else
+ {
+ // adrp + ldr
+ size = 8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return size;
+ }
#elif defined(_TARGET_ARM_)
- bool idInstrIsT1() const { return (_idInsSize == ISZ_16BIT); }
- unsigned idCodeSize() const { unsigned result = (_idInsSize == ISZ_16BIT) ? 2 : (_idInsSize == ISZ_32BIT) ? 4 : 6;
+ bool idInstrIsT1() const
+ {
+ return (_idInsSize == ISZ_16BIT);
+ }
+ unsigned idCodeSize() const
+ {
+ unsigned result = (_idInsSize == ISZ_16BIT) ? 2 : (_idInsSize == ISZ_32BIT) ? 4 : 6;
#ifdef ARM_HAZARD_AVOIDANCE
- if (idKraitNop())
- result += 4;
+ if (idKraitNop())
+ result += 4;
#endif
- return result;
- }
- insSize idInsSize() const { return _idInsSize; }
- void idInsSize(insSize isz) { _idInsSize = isz; assert(isz == _idInsSize);
+ return result;
+ }
+ insSize idInsSize() const
+ {
+ return _idInsSize;
+ }
+ void idInsSize(insSize isz)
+ {
+ _idInsSize = isz;
+ assert(isz == _idInsSize);
#ifdef ARM_HAZARD_AVOIDANCE
- if (idIsKraitBranch() && idInstrIsT1())
- idKraitNop(false);
+ if (idIsKraitBranch() && idInstrIsT1())
+ idKraitNop(false);
#endif
- }
+ }
#ifdef ARM_HAZARD_AVOIDANCE
// This function returns true if the current instruction represents a non T1
// unconditional branch instruction that is subject to the Krait errata
// Note: The T2 pop encoding is handled separately as it only occurs in epilogs
//
- bool idIsKraitBranch() const { if (idInstrIsT1())
- return false;
- if ((idIns() == INS_b) ||
- (idIns() == INS_bl) ||
- ((idIns() == INS_ldr) && (idReg1() == REG_PC)) )
- {
- return true;
- }
- return false;
- }
- bool idKraitNop() const { if (!idIsKraitBranch())
- return false;
- else
- return (_idKraitNop != 0);
- }
- void idKraitNop(bool val) { if (idIsKraitBranch())
- _idKraitNop = val;
- assert(val == idKraitNop());
- }
+ bool idIsKraitBranch() const
+ {
+ if (idInstrIsT1())
+ return false;
+ if ((idIns() == INS_b) || (idIns() == INS_bl) || ((idIns() == INS_ldr) && (idReg1() == REG_PC)))
+ {
+ return true;
+ }
+ return false;
+ }
+ bool idKraitNop() const
+ {
+ if (!idIsKraitBranch())
+ return false;
+ else
+ return (_idKraitNop != 0);
+ }
+ void idKraitNop(bool val)
+ {
+ if (idIsKraitBranch())
+ _idKraitNop = val;
+ assert(val == idKraitNop());
+ }
#endif
- insFlags idInsFlags() const { return _idInsFlags; }
- void idInsFlags(insFlags sf) { _idInsFlags = sf; assert(sf == _idInsFlags); }
+ insFlags idInsFlags() const
+ {
+ return _idInsFlags;
+ }
+ void idInsFlags(insFlags sf)
+ {
+ _idInsFlags = sf;
+ assert(sf == _idInsFlags);
+ }
#endif // _TARGET_ARM_
- emitAttr idOpSize() { return emitDecodeSize(_idOpSize); }
- void idOpSize(emitAttr opsz){ _idOpSize = emitEncodeSize(opsz); }
+ emitAttr idOpSize()
+ {
+ return emitDecodeSize(_idOpSize);
+ }
+ void idOpSize(emitAttr opsz)
+ {
+ _idOpSize = emitEncodeSize(opsz);
+ }
- GCtype idGCref() const { return (GCtype) _idGCref; }
- void idGCref(GCtype gctype) { _idGCref = gctype; }
+ GCtype idGCref() const
+ {
+ return (GCtype)_idGCref;
+ }
+ void idGCref(GCtype gctype)
+ {
+ _idGCref = gctype;
+ }
- regNumber idReg1() const { return _idReg1; }
- void idReg1(regNumber reg) { _idReg1 = reg; assert(reg == _idReg1); }
+ regNumber idReg1() const
+ {
+ return _idReg1;
+ }
+ void idReg1(regNumber reg)
+ {
+ _idReg1 = reg;
+ assert(reg == _idReg1);
+ }
- regNumber idReg2() const { return _idReg2; }
- void idReg2(regNumber reg) { _idReg2 = reg; assert(reg == _idReg2); }
+ regNumber idReg2() const
+ {
+ return _idReg2;
+ }
+ void idReg2(regNumber reg)
+ {
+ _idReg2 = reg;
+ assert(reg == _idReg2);
+ }
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- regNumber idReg3() const
+ regNumber idReg3() const
{
- assert(!idIsTiny()); assert(!idIsSmallDsc()); return idAddr()->_idReg3;
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ return idAddr()->_idReg3;
}
- void idReg3(regNumber reg)
- {
+ void idReg3(regNumber reg)
+ {
assert(!idIsTiny());
assert(!idIsSmallDsc());
idAddr()->_idReg3 = reg;
assert(reg == idAddr()->_idReg3);
- }
+ }
#endif // defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
#ifdef _TARGET_ARMARCH_
- insOpts idInsOpt() const { return (insOpts) _idInsOpt; }
- void idInsOpt(insOpts opt) { _idInsOpt = opt; assert(opt == _idInsOpt); }
-
- regNumber idReg3() const { assert(!idIsTiny()); assert(!idIsSmallDsc());
- return idAddr()->_idReg3; }
- void idReg3(regNumber reg) { assert(!idIsTiny()); assert(!idIsSmallDsc());
- idAddr()->_idReg3 = reg;
- assert(reg == idAddr()->_idReg3); }
- regNumber idReg4() const { assert(!idIsTiny()); assert(!idIsSmallDsc());
- return idAddr()->_idReg4; }
- void idReg4(regNumber reg) { assert(!idIsTiny()); assert(!idIsSmallDsc());
- idAddr()->_idReg4 = reg;
- assert(reg == idAddr()->_idReg4); }
+ insOpts idInsOpt() const
+ {
+ return (insOpts)_idInsOpt;
+ }
+ void idInsOpt(insOpts opt)
+ {
+ _idInsOpt = opt;
+ assert(opt == _idInsOpt);
+ }
+
+ regNumber idReg3() const
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ return idAddr()->_idReg3;
+ }
+ void idReg3(regNumber reg)
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ idAddr()->_idReg3 = reg;
+ assert(reg == idAddr()->_idReg3);
+ }
+ regNumber idReg4() const
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ return idAddr()->_idReg4;
+ }
+ void idReg4(regNumber reg)
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ idAddr()->_idReg4 = reg;
+ assert(reg == idAddr()->_idReg4);
+ }
#ifdef _TARGET_ARM64_
- bool idReg3Scaled() const { assert(!idIsTiny()); assert(!idIsSmallDsc());
- return (idAddr()->_idReg3Scaled == 1); }
- void idReg3Scaled(bool val) { assert(!idIsTiny()); assert(!idIsSmallDsc());
- idAddr()->_idReg3Scaled = val ? 1 : 0; }
+ bool idReg3Scaled() const
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ return (idAddr()->_idReg3Scaled == 1);
+ }
+ void idReg3Scaled(bool val)
+ {
+ assert(!idIsTiny());
+ assert(!idIsSmallDsc());
+ idAddr()->_idReg3Scaled = val ? 1 : 0;
+ }
#endif // _TARGET_ARM64_
#endif // _TARGET_ARMARCH_
inline static bool fitsInSmallCns(ssize_t val)
- { return ((val >= ID_MIN_SMALL_CNS) && (val <= ID_MAX_SMALL_CNS)); }
+ {
+ return ((val >= ID_MIN_SMALL_CNS) && (val <= ID_MAX_SMALL_CNS));
+ }
- bool idIsLargeCns() const { assert(!idIsTiny()); return _idLargeCns != 0; }
- void idSetIsLargeCns() { assert(!idIsTiny()); _idLargeCns = 1; }
+ bool idIsLargeCns() const
+ {
+ assert(!idIsTiny());
+ return _idLargeCns != 0;
+ }
+ void idSetIsLargeCns()
+ {
+ assert(!idIsTiny());
+ _idLargeCns = 1;
+ }
- bool idIsLargeDsp() const { assert(!idIsTiny()); return _idLargeDsp != 0; }
- void idSetIsLargeDsp() { assert(!idIsTiny()); _idLargeDsp = 1; }
- void idSetIsSmallDsp() { assert(!idIsTiny()); _idLargeDsp = 0; }
+ bool idIsLargeDsp() const
+ {
+ assert(!idIsTiny());
+ return _idLargeDsp != 0;
+ }
+ void idSetIsLargeDsp()
+ {
+ assert(!idIsTiny());
+ _idLargeDsp = 1;
+ }
+ void idSetIsSmallDsp()
+ {
+ assert(!idIsTiny());
+ _idLargeDsp = 0;
+ }
- bool idIsLargeCall() const { assert(!idIsTiny()); return _idLargeCall != 0; }
- void idSetIsLargeCall() { assert(!idIsTiny()); _idLargeCall = 1; }
+ bool idIsLargeCall() const
+ {
+ assert(!idIsTiny());
+ return _idLargeCall != 0;
+ }
+ void idSetIsLargeCall()
+ {
+ assert(!idIsTiny());
+ _idLargeCall = 1;
+ }
- bool idIsBound() const { assert(!idIsTiny()); return _idBound != 0; }
- void idSetIsBound() { assert(!idIsTiny()); _idBound = 1; }
+ bool idIsBound() const
+ {
+ assert(!idIsTiny());
+ return _idBound != 0;
+ }
+ void idSetIsBound()
+ {
+ assert(!idIsTiny());
+ _idBound = 1;
+ }
- bool idIsCallRegPtr() const { assert(!idIsTiny()); return _idCallRegPtr != 0; }
- void idSetIsCallRegPtr() { assert(!idIsTiny()); _idCallRegPtr = 1; }
+ bool idIsCallRegPtr() const
+ {
+ assert(!idIsTiny());
+ return _idCallRegPtr != 0;
+ }
+ void idSetIsCallRegPtr()
+ {
+ assert(!idIsTiny());
+ _idCallRegPtr = 1;
+ }
- bool idIsCallAddr() const { assert(!idIsTiny()); return _idCallAddr != 0; }
- void idSetIsCallAddr() { assert(!idIsTiny()); _idCallAddr = 1; }
+ bool idIsCallAddr() const
+ {
+ assert(!idIsTiny());
+ return _idCallAddr != 0;
+ }
+ void idSetIsCallAddr()
+ {
+ assert(!idIsTiny());
+ _idCallAddr = 1;
+ }
// Only call instructions that call helper functions may be marked as "IsNoGC", indicating
// that a thread executing such a call cannot be stopped for GC. Thus, in partially-interruptible
// code, it is not necessary to generate GC info for a call so labeled.
- bool idIsNoGC() const { assert(!idIsTiny()); return _idNoGC != 0; }
- void idSetIsNoGC(bool val) { assert(!idIsTiny()); _idNoGC = val; }
+ bool idIsNoGC() const
+ {
+ assert(!idIsTiny());
+ return _idNoGC != 0;
+ }
+ void idSetIsNoGC(bool val)
+ {
+ assert(!idIsTiny());
+ _idNoGC = val;
+ }
#ifdef _TARGET_ARMARCH_
- bool idIsLclVar() const { return !idIsTiny() && _idLclVar != 0; }
- void idSetIsLclVar() { assert(!idIsTiny()); _idLclVar = 1; }
+ bool idIsLclVar() const
+ {
+ return !idIsTiny() && _idLclVar != 0;
+ }
+ void idSetIsLclVar()
+ {
+ assert(!idIsTiny());
+ _idLclVar = 1;
+ }
#endif // _TARGET_ARMARCH_
#if defined(_TARGET_ARM_)
-# ifdef ARM_HAZARD_AVOIDANCE
- bool idIsLclFPBase() const { assert(!idIsKraitBranch()); return !idIsTiny() && _idLclFPBase != 0; }
- void idSetIsLclFPBase() { assert(!idIsKraitBranch()); assert(!idIsTiny()); _idLclFPBase = 1; }
-# else
- bool idIsLclFPBase() const { return !idIsTiny() && _idLclFPBase != 0; }
- void idSetIsLclFPBase() { assert(!idIsTiny()); _idLclFPBase = 1; }
-# endif
+#ifdef ARM_HAZARD_AVOIDANCE
+ bool idIsLclFPBase() const
+ {
+ assert(!idIsKraitBranch());
+ return !idIsTiny() && _idLclFPBase != 0;
+ }
+ void idSetIsLclFPBase()
+ {
+ assert(!idIsKraitBranch());
+ assert(!idIsTiny());
+ _idLclFPBase = 1;
+ }
+#else
+ bool idIsLclFPBase() const
+ {
+ return !idIsTiny() && _idLclFPBase != 0;
+ }
+ void idSetIsLclFPBase()
+ {
+ assert(!idIsTiny());
+ _idLclFPBase = 1;
+ }
+#endif
#endif // defined(_TARGET_ARM_)
#ifdef RELOC_SUPPORT
- bool idIsCnsReloc() const { assert(!idIsTiny()); return _idCnsReloc != 0; }
- void idSetIsCnsReloc() { assert(!idIsTiny()); _idCnsReloc = 1; }
+ bool idIsCnsReloc() const
+ {
+ assert(!idIsTiny());
+ return _idCnsReloc != 0;
+ }
+ void idSetIsCnsReloc()
+ {
+ assert(!idIsTiny());
+ _idCnsReloc = 1;
+ }
+
+ bool idIsDspReloc() const
+ {
+ assert(!idIsTiny());
+ return _idDspReloc != 0;
+ }
+ void idSetIsDspReloc(bool val = true)
+ {
+ assert(!idIsTiny());
+ _idDspReloc = val;
+ }
+ bool idIsReloc()
+ {
+ return idIsDspReloc() || idIsCnsReloc();
+ }
- bool idIsDspReloc() const { assert(!idIsTiny()); return _idDspReloc != 0; }
- void idSetIsDspReloc(bool val = true)
- { assert(!idIsTiny()); _idDspReloc = val; }
- bool idIsReloc() { return idIsDspReloc() || idIsCnsReloc(); }
-
#endif
- unsigned idSmallCns() const { assert(!idIsTiny()); return _idSmallCns; }
- void idSmallCns(size_t value)
- { assert(!idIsTiny()); assert(fitsInSmallCns(value));
- _idSmallCns = value; }
+ unsigned idSmallCns() const
+ {
+ assert(!idIsTiny());
+ return _idSmallCns;
+ }
+ void idSmallCns(size_t value)
+ {
+ assert(!idIsTiny());
+ assert(fitsInSmallCns(value));
+ _idSmallCns = value;
+ }
- inline const idAddrUnion* idAddr() const { assert(!idIsSmallDsc() && !idIsTiny());
- return &this->_idAddrUnion; }
+ inline const idAddrUnion* idAddr() const
+ {
+ assert(!idIsSmallDsc() && !idIsTiny());
+ return &this->_idAddrUnion;
+ }
- inline idAddrUnion* idAddr() { assert(!idIsSmallDsc() && !idIsTiny());
- return &this->_idAddrUnion; }
+ inline idAddrUnion* idAddr()
+ {
+ assert(!idIsSmallDsc() && !idIsTiny());
+ return &this->_idAddrUnion;
+ }
}; // End of struct instrDesc
- void dispIns(instrDesc* id);
+ void dispIns(instrDesc* id);
- void appendToCurIG(instrDesc* id);
+ void appendToCurIG(instrDesc* id);
/********************************************************************************************/
- struct instrDescJmp : instrDesc
+ struct instrDescJmp : instrDesc
{
- instrDescJmp * idjNext; // next jump in the group/method
- insGroup * idjIG; // containing group
+ instrDescJmp* idjNext; // next jump in the group/method
+ insGroup* idjIG; // containing group
- union
- {
- BYTE * idjAddr; // address of jump ins (for patching)
- }
- idjTemp;
+ union {
+ BYTE* idjAddr; // address of jump ins (for patching)
+ } idjTemp;
- unsigned idjOffs :30;// Before jump emission, this is the byte offset within IG of the jump instruction.
- // After emission, for forward jumps, this is the target offset -- in bytes from the
- // beginning of the function -- of the target instruction of the jump, used to
- // determine if this jump needs to be patched.
- unsigned idjShort : 1;// is the jump known to be a short one?
- unsigned idjKeepLong : 1;// should the jump be kept long? (used for
- // hot to cold and cold to hot jumps)
+ unsigned idjOffs : 30; // Before jump emission, this is the byte offset within IG of the jump instruction.
+ // After emission, for forward jumps, this is the target offset -- in bytes from the
+ // beginning of the function -- of the target instruction of the jump, used to
+ // determine if this jump needs to be patched.
+ unsigned idjShort : 1; // is the jump known to be a short one?
+ unsigned idjKeepLong : 1; // should the jump be kept long? (used for
+ // hot to cold and cold to hot jumps)
};
#if !defined(_TARGET_ARM64_) // This shouldn't be needed for ARM32, either, but I don't want to touch the ARM32 JIT.
- struct instrDescLbl : instrDescJmp
+ struct instrDescLbl : instrDescJmp
{
- emitLclVarAddr dstLclVar;
+ emitLclVarAddr dstLclVar;
};
#endif // !_TARGET_ARM64_
- struct instrDescCns : instrDesc // large const
+ struct instrDescCns : instrDesc // large const
{
- ssize_t idcCnsVal;
+ ssize_t idcCnsVal;
};
- struct instrDescDsp : instrDesc // large displacement
+ struct instrDescDsp : instrDesc // large displacement
{
- ssize_t iddDspVal;
+ ssize_t iddDspVal;
};
- struct instrDescCnsDsp : instrDesc // large cons + disp
+ struct instrDescCnsDsp : instrDesc // large cons + disp
{
- ssize_t iddcCnsVal;
- int iddcDspVal;
+ ssize_t iddcCnsVal;
+ int iddcDspVal;
};
- struct instrDescAmd : instrDesc // large addrmode disp
+ struct instrDescAmd : instrDesc // large addrmode disp
{
- ssize_t idaAmdVal;
+ ssize_t idaAmdVal;
};
- struct instrDescCnsAmd : instrDesc // large cons + addrmode disp
+ struct instrDescCnsAmd : instrDesc // large cons + addrmode disp
{
- ssize_t idacCnsVal;
- ssize_t idacAmdVal;
+ ssize_t idacCnsVal;
+ ssize_t idacAmdVal;
};
- struct instrDescCGCA : instrDesc // call with ...
+ struct instrDescCGCA : instrDesc // call with ...
{
- VARSET_TP idcGCvars; // ... updated GC vars or
- ssize_t idcDisp; // ... big addrmode disp
- regMaskTP idcGcrefRegs; // ... gcref registers
- regMaskTP idcByrefRegs; // ... byref registers
- unsigned idcArgCnt; // ... lots of args or (<0 ==> caller pops args)
+ VARSET_TP idcGCvars; // ... updated GC vars or
+ ssize_t idcDisp; // ... big addrmode disp
+ regMaskTP idcGcrefRegs; // ... gcref registers
+ regMaskTP idcByrefRegs; // ... byref registers
+ unsigned idcArgCnt; // ... lots of args or (<0 ==> caller pops args)
#if MULTIREG_HAS_SECOND_GC_RET
// This method handle the GC-ness of the second register in a 2 register returned struct on System V.
- GCtype idSecondGCref() const { return (GCtype)_idcSecondRetRegGCType; }
- void idSecondGCref(GCtype gctype) { _idcSecondRetRegGCType = gctype; }
+ GCtype idSecondGCref() const
+ {
+ return (GCtype)_idcSecondRetRegGCType;
+ }
+ void idSecondGCref(GCtype gctype)
+ {
+ _idcSecondRetRegGCType = gctype;
+ }
private:
// This member stores the GC-ness of the second register in a 2 register returned struct on System V.
@@ -1259,73 +1475,69 @@ protected:
// for keeping GC-ness of the second return registers. It will also bloat the base struct unnecessarily
// since the GC-ness of the second register is only needed for call instructions.
// The base struct's member keeping the GC-ness of the first return register is _idGCref.
- GCtype _idcSecondRetRegGCType : 2; // ... GC type for the second return register.
-#endif // MULTIREG_HAS_SECOND_GC_RET
+ GCtype _idcSecondRetRegGCType : 2; // ... GC type for the second return register.
+#endif // MULTIREG_HAS_SECOND_GC_RET
};
- struct instrDescArmFP : instrDesc
+ struct instrDescArmFP : instrDesc
{
regNumber r1;
regNumber r2;
regNumber r3;
};
- insUpdateModes emitInsUpdateMode(instruction ins);
- insFormat emitInsModeFormat(instruction ins, insFormat base);
+ insUpdateModes emitInsUpdateMode(instruction ins);
+ insFormat emitInsModeFormat(instruction ins, insFormat base);
- static const BYTE emitInsModeFmtTab[];
-#ifdef DEBUG
- static const unsigned emitInsModeFmtCnt;
+ static const BYTE emitInsModeFmtTab[];
+#ifdef DEBUG
+ static const unsigned emitInsModeFmtCnt;
#endif
- size_t emitGetInstrDescSize (const instrDesc * id);
- size_t emitGetInstrDescSizeSC(const instrDesc * id);
+ size_t emitGetInstrDescSize(const instrDesc* id);
+ size_t emitGetInstrDescSizeSC(const instrDesc* id);
- ssize_t emitGetInsCns (instrDesc *id);
- ssize_t emitGetInsDsp (instrDesc *id);
- ssize_t emitGetInsAmd (instrDesc *id);
- ssize_t emitGetInsCnsDsp(instrDesc *id, ssize_t *dspPtr);
- ssize_t emitGetInsSC (instrDesc *id);
- ssize_t emitGetInsCIdisp(instrDesc *id);
- unsigned emitGetInsCIargs(instrDesc *id);
+ ssize_t emitGetInsCns(instrDesc* id);
+ ssize_t emitGetInsDsp(instrDesc* id);
+ ssize_t emitGetInsAmd(instrDesc* id);
+ ssize_t emitGetInsCnsDsp(instrDesc* id, ssize_t* dspPtr);
+ ssize_t emitGetInsSC(instrDesc* id);
+ ssize_t emitGetInsCIdisp(instrDesc* id);
+ unsigned emitGetInsCIargs(instrDesc* id);
// Return the argument count for a direct call "id".
- int emitGetInsCDinfo(instrDesc *id);
+ int emitGetInsCDinfo(instrDesc* id);
- unsigned emitInsCount;
+ unsigned emitInsCount;
- /************************************************************************/
- /* A few routines used for debug display purposes */
- /************************************************************************/
+/************************************************************************/
+/* A few routines used for debug display purposes */
+/************************************************************************/
#if defined(DEBUG) || EMITTER_STATS
- static const char * emitIfName (unsigned f);
+ static const char* emitIfName(unsigned f);
#endif // defined(DEBUG) || EMITTER_STATS
-#ifdef DEBUG
+#ifdef DEBUG
- unsigned emitVarRefOffs;
+ unsigned emitVarRefOffs;
- const char * emitRegName (regNumber reg,
- emitAttr size = EA_PTRSIZE,
- bool varName = true);
- const char * emitFloatRegName(regNumber reg,
- emitAttr size = EA_PTRSIZE,
- bool varName = true);
+ const char* emitRegName(regNumber reg, emitAttr size = EA_PTRSIZE, bool varName = true);
+ const char* emitFloatRegName(regNumber reg, emitAttr size = EA_PTRSIZE, bool varName = true);
- const char * emitFldName (CORINFO_FIELD_HANDLE fieldVal);
- const char * emitFncName (CORINFO_METHOD_HANDLE callVal);
+ const char* emitFldName(CORINFO_FIELD_HANDLE fieldVal);
+ const char* emitFncName(CORINFO_METHOD_HANDLE callVal);
- void emitDispIGflags (unsigned flags);
- void emitDispIG (insGroup* ig, insGroup* igPrev = NULL, bool verbose = false);
- void emitDispIGlist (bool verbose = false);
- void emitDispGCinfo ();
- void emitDispClsVar (CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc = false);
- void emitDispFrameRef(int varx, int disp, int offs, bool asmfm);
- void emitDispInsOffs (unsigned offs, bool doffs);
- void emitDispInsHex (BYTE * code, size_t sz);
+ void emitDispIGflags(unsigned flags);
+ void emitDispIG(insGroup* ig, insGroup* igPrev = nullptr, bool verbose = false);
+ void emitDispIGlist(bool verbose = false);
+ void emitDispGCinfo();
+ void emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc = false);
+ void emitDispFrameRef(int varx, int disp, int offs, bool asmfm);
+ void emitDispInsOffs(unsigned offs, bool doffs);
+ void emitDispInsHex(BYTE* code, size_t sz);
#else // !DEBUG
#define emitVarRefOffs 0
@@ -1335,21 +1547,21 @@ protected:
/* Method prolog and epilog */
/************************************************************************/
- unsigned emitPrologEndPos;
+ unsigned emitPrologEndPos;
- unsigned emitEpilogCnt;
- UNATIVE_OFFSET emitEpilogSize;
+ unsigned emitEpilogCnt;
+ UNATIVE_OFFSET emitEpilogSize;
#ifdef _TARGET_XARCH_
- void emitStartExitSeq(); // Mark the start of the "return" sequence
- emitLocation emitExitSeqBegLoc;
- UNATIVE_OFFSET emitExitSeqSize; // minimum size of any return sequence - the 'ret' after the epilog
+ void emitStartExitSeq(); // Mark the start of the "return" sequence
+ emitLocation emitExitSeqBegLoc;
+ UNATIVE_OFFSET emitExitSeqSize; // minimum size of any return sequence - the 'ret' after the epilog
#endif // _TARGET_XARCH_
- insGroup * emitPlaceholderList; // per method placeholder list - head
- insGroup * emitPlaceholderLast; // per method placeholder list - tail
+ insGroup* emitPlaceholderList; // per method placeholder list - head
+ insGroup* emitPlaceholderLast; // per method placeholder list - tail
#ifdef JIT32_GCENCODER
@@ -1362,63 +1574,62 @@ protected:
insGroup* elIG;
};
- EpilogList* emitEpilogList; // per method epilog list - head
- EpilogList* emitEpilogLast; // per method epilog list - tail
+ EpilogList* emitEpilogList; // per method epilog list - head
+ EpilogList* emitEpilogLast; // per method epilog list - tail
public:
- bool emitHasEpilogEnd();
+ bool emitHasEpilogEnd();
- size_t emitGenEpilogLst(size_t (*fp)(void *, unsigned),
- void *cp);
+ size_t emitGenEpilogLst(size_t (*fp)(void*, unsigned), void* cp);
#endif // JIT32_GCENCODER
- void emitBegPrologEpilog(insGroup* igPh);
- void emitEndPrologEpilog();
+ void emitBegPrologEpilog(insGroup* igPh);
+ void emitEndPrologEpilog();
- emitLocation emitEpilogBegLoc;
+ emitLocation emitEpilogBegLoc;
- void emitBegFnEpilog(insGroup* igPh);
- void emitEndFnEpilog();
+ void emitBegFnEpilog(insGroup* igPh);
+ void emitEndFnEpilog();
#if FEATURE_EH_FUNCLETS
- void emitBegFuncletProlog(insGroup* igPh);
- void emitEndFuncletProlog();
+ void emitBegFuncletProlog(insGroup* igPh);
+ void emitEndFuncletProlog();
- void emitBegFuncletEpilog(insGroup* igPh);
- void emitEndFuncletEpilog();
+ void emitBegFuncletEpilog(insGroup* igPh);
+ void emitEndFuncletEpilog();
#endif // FEATURE_EH_FUNCLETS
- /************************************************************************/
- /* Members and methods used in PDB translation */
- /************************************************************************/
+/************************************************************************/
+/* Members and methods used in PDB translation */
+/************************************************************************/
#ifdef TRANSLATE_PDB
- inline void SetIDSource( instrDesc *pID );
- void MapCode ( int ilOffset, BYTE *imgDest );
- void MapFunc ( int imgOff,
- int procLen,
- int dbgStart,
- int dbgEnd,
- short frameReg,
- int stkAdjust,
- int lvaCount,
- OptJit::LclVarDsc *lvaTable,
- bool framePtr );
+ inline void SetIDSource(instrDesc* pID);
+ void MapCode(int ilOffset, BYTE* imgDest);
+ void MapFunc(int imgOff,
+ int procLen,
+ int dbgStart,
+ int dbgEnd,
+ short frameReg,
+ int stkAdjust,
+ int lvaCount,
+ OptJit::LclVarDsc* lvaTable,
+ bool framePtr);
private:
- int emitInstrDescILBase; // code offset of IL that produced this instruction desctriptor
- int emitInstrDescILBase; // code offset of IL that produced this instruction desctriptor
- static AddrMap * emitPDBOffsetTable; // translation table for mapping IL addresses to native addresses
- static LocalMap * emitPDBLocalTable; // local symbol translation table
- static bool emitIsPDBEnabled; // flag to disable PDB translation code when a PDB is not found
- static BYTE * emitILBaseOfCode; // start of IL .text section
- static BYTE * emitILMethodBase; // beginning of IL method (start of header)
- static BYTE * emitILMethodStart; // beginning of IL method code (right after the header)
- static BYTE * emitImgBaseOfCode; // start of the image .text section
+ int emitInstrDescILBase; // code offset of IL that produced this instruction desctriptor
+ int emitInstrDescILBase; // code offset of IL that produced this instruction desctriptor
+ static AddrMap* emitPDBOffsetTable; // translation table for mapping IL addresses to native addresses
+ static LocalMap* emitPDBLocalTable; // local symbol translation table
+ static bool emitIsPDBEnabled; // flag to disable PDB translation code when a PDB is not found
+ static BYTE* emitILBaseOfCode; // start of IL .text section
+ static BYTE* emitILMethodBase; // beginning of IL method (start of header)
+ static BYTE* emitILMethodStart; // beginning of IL method code (right after the header)
+ static BYTE* emitImgBaseOfCode; // start of the image .text section
#endif
@@ -1426,40 +1637,37 @@ private:
/* Methods to record a code position and later convert to offset */
/************************************************************************/
- unsigned emitFindInsNum(insGroup *ig, instrDesc *id);
- UNATIVE_OFFSET emitFindOffset(insGroup *ig, unsigned insNum);
-
+ unsigned emitFindInsNum(insGroup* ig, instrDesc* id);
+ UNATIVE_OFFSET emitFindOffset(insGroup* ig, unsigned insNum);
- /************************************************************************/
- /* Members and methods used to issue (encode) instructions. */
- /************************************************************************/
+/************************************************************************/
+/* Members and methods used to issue (encode) instructions. */
+/************************************************************************/
#ifdef DEBUG
// If we have started issuing instructions from the list of instrDesc, this is set
- bool emitIssuing;
+ bool emitIssuing;
#endif
- BYTE * emitCodeBlock; // Hot code block
- BYTE * emitColdCodeBlock; // Cold code block
- BYTE * emitConsBlock; // Read-only (constant) data block
+ BYTE* emitCodeBlock; // Hot code block
+ BYTE* emitColdCodeBlock; // Cold code block
+ BYTE* emitConsBlock; // Read-only (constant) data block
- UNATIVE_OFFSET emitTotalHotCodeSize;
- UNATIVE_OFFSET emitTotalColdCodeSize;
+ UNATIVE_OFFSET emitTotalHotCodeSize;
+ UNATIVE_OFFSET emitTotalColdCodeSize;
- UNATIVE_OFFSET emitCurCodeOffs(BYTE *dst)
+ UNATIVE_OFFSET emitCurCodeOffs(BYTE* dst)
{
size_t distance;
- if ((dst >= emitCodeBlock) &&
- (dst <= (emitCodeBlock + emitTotalHotCodeSize)))
+ if ((dst >= emitCodeBlock) && (dst <= (emitCodeBlock + emitTotalHotCodeSize)))
{
distance = (dst - emitCodeBlock);
}
else
{
- assert (emitFirstColdIG);
- assert (emitColdCodeBlock);
- assert ((dst >= emitColdCodeBlock) &&
- (dst <= (emitColdCodeBlock + emitTotalColdCodeSize)));
+ assert(emitFirstColdIG);
+ assert(emitColdCodeBlock);
+ assert((dst >= emitColdCodeBlock) && (dst <= (emitColdCodeBlock + emitTotalColdCodeSize)));
distance = (dst - emitColdCodeBlock + emitTotalHotCodeSize);
}
@@ -1467,144 +1675,146 @@ private:
return (UNATIVE_OFFSET)distance;
}
- BYTE * emitOffsetToPtr(UNATIVE_OFFSET offset)
+ BYTE* emitOffsetToPtr(UNATIVE_OFFSET offset)
{
- if (offset < emitTotalHotCodeSize)
+ if (offset < emitTotalHotCodeSize)
{
return emitCodeBlock + offset;
}
else
{
assert(offset < (emitTotalHotCodeSize + emitTotalColdCodeSize));
-
+
return emitColdCodeBlock + (offset - emitTotalHotCodeSize);
}
}
- BYTE * emitDataOffsetToPtr(UNATIVE_OFFSET offset)
+ BYTE* emitDataOffsetToPtr(UNATIVE_OFFSET offset)
{
assert(offset < emitDataSize());
return emitConsBlock + offset;
}
- bool emitJumpCrossHotColdBoundary(size_t srcOffset, size_t dstOffset)
+ bool emitJumpCrossHotColdBoundary(size_t srcOffset, size_t dstOffset)
{
if (emitTotalColdCodeSize == 0)
+ {
return false;
+ }
assert(srcOffset < (emitTotalHotCodeSize + emitTotalColdCodeSize));
assert(dstOffset < (emitTotalHotCodeSize + emitTotalColdCodeSize));
- return ((srcOffset < emitTotalHotCodeSize) !=
- (dstOffset < emitTotalHotCodeSize));
+ return ((srcOffset < emitTotalHotCodeSize) != (dstOffset < emitTotalHotCodeSize));
}
- unsigned char emitOutputByte(BYTE *dst, ssize_t val);
- unsigned char emitOutputWord(BYTE *dst, ssize_t val);
- unsigned char emitOutputLong(BYTE *dst, ssize_t val);
- unsigned char emitOutputSizeT(BYTE *dst, ssize_t val);
+ unsigned char emitOutputByte(BYTE* dst, ssize_t val);
+ unsigned char emitOutputWord(BYTE* dst, ssize_t val);
+ unsigned char emitOutputLong(BYTE* dst, ssize_t val);
+ unsigned char emitOutputSizeT(BYTE* dst, ssize_t val);
- size_t emitIssue1Instr(insGroup *ig, instrDesc *id, BYTE **dp);
- size_t emitOutputInstr(insGroup *ig, instrDesc *id, BYTE **dp);
+ size_t emitIssue1Instr(insGroup* ig, instrDesc* id, BYTE** dp);
+ size_t emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp);
- bool emitHasFramePtr;
+ bool emitHasFramePtr;
#ifdef PSEUDORANDOM_NOP_INSERTION
- bool emitInInstrumentation;
+ bool emitInInstrumentation;
#endif // PSEUDORANDOM_NOP_INSERTION
- unsigned emitMaxTmpSize;
+ unsigned emitMaxTmpSize;
#ifdef LEGACY_BACKEND
- unsigned emitLclSize;
- unsigned emitGrowableMaxByteOffs;
- void emitTmpSizeChanged(unsigned tmpSize);
+ unsigned emitLclSize;
+ unsigned emitGrowableMaxByteOffs;
+ void emitTmpSizeChanged(unsigned tmpSize);
#ifdef DEBUG
- unsigned emitMaxByteOffsIdNum;
+ unsigned emitMaxByteOffsIdNum;
#endif // DEBUG
#endif // LEGACY_BACKEND
#ifdef DEBUG
- bool emitChkAlign; // perform some alignment checks
+ bool emitChkAlign; // perform some alignment checks
#endif
- insGroup * emitCurIG;
+ insGroup* emitCurIG;
- void emitSetShortJump(instrDescJmp * id);
- void emitSetMediumJump(instrDescJmp * id);
- UNATIVE_OFFSET emitSizeOfJump(instrDescJmp *jmp);
- UNATIVE_OFFSET emitInstCodeSz(instrDesc *id);
+ void emitSetShortJump(instrDescJmp* id);
+ void emitSetMediumJump(instrDescJmp* id);
+ UNATIVE_OFFSET emitSizeOfJump(instrDescJmp* jmp);
+ UNATIVE_OFFSET emitInstCodeSz(instrDesc* id);
#ifndef LEGACY_BACKEND
CORINFO_FIELD_HANDLE emitLiteralConst(ssize_t cnsValIn, emitAttr attr = EA_8BYTE);
- CORINFO_FIELD_HANDLE emitFltOrDblConst(GenTreeDblCon *tree, emitAttr attr = EA_UNKNOWN);
- regNumber emitInsBinary (instruction ins, emitAttr attr, GenTree* dst, GenTree* src);
- regNumber emitInsTernary (instruction ins, emitAttr attr, GenTree* dst, GenTree* src1, GenTree* src2);
- void emitInsMov(instruction ins, emitAttr attr, GenTree *node);
- insFormat emitMapFmtForIns(insFormat fmt, instruction ins);
- insFormat emitMapFmtAtoM(insFormat fmt);
- void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins);
- void spillIntArgRegsToShadowSlots();
+ CORINFO_FIELD_HANDLE emitFltOrDblConst(GenTreeDblCon* tree, emitAttr attr = EA_UNKNOWN);
+ regNumber emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src);
+ regNumber emitInsTernary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src1, GenTree* src2);
+ void emitInsMov(instruction ins, emitAttr attr, GenTree* node);
+ insFormat emitMapFmtForIns(insFormat fmt, instruction ins);
+ insFormat emitMapFmtAtoM(insFormat fmt);
+ void emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins);
+ void spillIntArgRegsToShadowSlots();
#endif // !LEGACY_BACKEND
- /************************************************************************/
- /* The logic that creates and keeps track of instruction groups */
- /************************************************************************/
+/************************************************************************/
+/* The logic that creates and keeps track of instruction groups */
+/************************************************************************/
#ifdef _TARGET_ARM_
- // The only place where this limited instruction group size is a problem is
- // in the prolog, where we only support a single instruction group. We should really fix that.
- // ARM can require a bigger prolog instruction group. One scenario is where a
- // function uses all the incoming integer and single-precision floating-point arguments,
- // and must store them all to the frame on entry. If the frame is very large, we generate
- // ugly code like "movw r10, 0x488; add r10, sp; vstr s0, [r10]" for each store, which
- // eats up our insGroup buffer.
- #define SC_IG_BUFFER_SIZE (100*sizeof(instrDesc)+14*TINY_IDSC_SIZE)
+// The only place where this limited instruction group size is a problem is
+// in the prolog, where we only support a single instruction group. We should really fix that.
+// ARM can require a bigger prolog instruction group. One scenario is where a
+// function uses all the incoming integer and single-precision floating-point arguments,
+// and must store them all to the frame on entry. If the frame is very large, we generate
+// ugly code like "movw r10, 0x488; add r10, sp; vstr s0, [r10]" for each store, which
+// eats up our insGroup buffer.
+#define SC_IG_BUFFER_SIZE (100 * sizeof(instrDesc) + 14 * TINY_IDSC_SIZE)
#else // !_TARGET_ARM_
- #define SC_IG_BUFFER_SIZE (50*sizeof(instrDesc)+14*TINY_IDSC_SIZE)
+#define SC_IG_BUFFER_SIZE (50 * sizeof(instrDesc) + 14 * TINY_IDSC_SIZE)
#endif // !_TARGET_ARM_
- size_t emitIGbuffSize;
+ size_t emitIGbuffSize;
- insGroup * emitIGlist; // first instruction group
- insGroup * emitIGlast; // last instruction group
- insGroup * emitIGthis; // issued instruction group
+ insGroup* emitIGlist; // first instruction group
+ insGroup* emitIGlast; // last instruction group
+ insGroup* emitIGthis; // issued instruction group
- insGroup * emitPrologIG; // prolog instruction group
+ insGroup* emitPrologIG; // prolog instruction group
- instrDescJmp* emitJumpList; // list of local jumps in method
- instrDescJmp* emitJumpLast; // last of local jumps in method
- void emitJumpDistBind(); // Bind all the local jumps in method
+ instrDescJmp* emitJumpList; // list of local jumps in method
+ instrDescJmp* emitJumpLast; // last of local jumps in method
+ void emitJumpDistBind(); // Bind all the local jumps in method
- void emitCheckFuncletBranch(instrDesc * jmp, insGroup * jmpIG); // Check for illegal branches between funclets
+ void emitCheckFuncletBranch(instrDesc* jmp, insGroup* jmpIG); // Check for illegal branches between funclets
- bool emitFwdJumps; // forward jumps present?
- bool emitNoGCIG; // Are we generating IGF_NOGCINTERRUPT insGroups (for prologs, epilogs, etc.)
- bool emitForceNewIG; // If we generate an instruction, and not another instruction group, force create a new emitAdd instruction group.
+ bool emitFwdJumps; // forward jumps present?
+ bool emitNoGCIG; // Are we generating IGF_NOGCINTERRUPT insGroups (for prologs, epilogs, etc.)
+ bool emitForceNewIG; // If we generate an instruction, and not another instruction group, force create a new emitAdd
+ // instruction group.
- BYTE * emitCurIGfreeNext; // next available byte in buffer
- BYTE * emitCurIGfreeEndp; // one byte past the last available byte in buffer
- BYTE * emitCurIGfreeBase; // first byte address
+ BYTE* emitCurIGfreeNext; // next available byte in buffer
+ BYTE* emitCurIGfreeEndp; // one byte past the last available byte in buffer
+ BYTE* emitCurIGfreeBase; // first byte address
- unsigned emitCurIGinsCnt; // # of collected instr's in buffer
- unsigned emitCurIGsize; // estimated code size of current group in bytes
+ unsigned emitCurIGinsCnt; // # of collected instr's in buffer
+ unsigned emitCurIGsize; // estimated code size of current group in bytes
#ifdef ARM_HAZARD_AVOIDANCE
-#define MAX_INSTR_COUNT_T1 3
- unsigned emitCurInstrCntT1; // The count of consecutive T1 instructions issued by the JIT
+#define MAX_INSTR_COUNT_T1 3
+ unsigned emitCurInstrCntT1; // The count of consecutive T1 instructions issued by the JIT
#endif
- UNATIVE_OFFSET emitCurCodeOffset; // current code offset within group
- UNATIVE_OFFSET emitTotalCodeSize; // bytes of code in entire method
+ UNATIVE_OFFSET emitCurCodeOffset; // current code offset within group
+ UNATIVE_OFFSET emitTotalCodeSize; // bytes of code in entire method
- insGroup * emitFirstColdIG; // first cold instruction group
+ insGroup* emitFirstColdIG; // first cold instruction group
- void emitSetFirstColdIGCookie(void *bbEmitCookie)
+ void emitSetFirstColdIGCookie(void* bbEmitCookie)
{
- emitFirstColdIG = (insGroup *) bbEmitCookie;
+ emitFirstColdIG = (insGroup*)bbEmitCookie;
}
- int emitOffsAdj; // current code offset adjustment
+ int emitOffsAdj; // current code offset adjustment
- instrDescJmp * emitCurIGjmpList; // list of jumps in current IG
+ instrDescJmp* emitCurIGjmpList; // list of jumps in current IG
// emitPrev* and emitInit* are only used during code generation, not during
// emission (issuing), to determine what GC values to store into an IG.
@@ -1612,18 +1822,18 @@ private:
// in that tracking. See emitSavIG(): the important use of ByrefRegs is commented
// out, and GCrefRegs is always saved.
- VARSET_TP emitPrevGCrefVars;
- regMaskTP emitPrevGCrefRegs;
- regMaskTP emitPrevByrefRegs;
+ VARSET_TP emitPrevGCrefVars;
+ regMaskTP emitPrevGCrefRegs;
+ regMaskTP emitPrevByrefRegs;
+
+ VARSET_TP emitInitGCrefVars;
+ regMaskTP emitInitGCrefRegs;
+ regMaskTP emitInitByrefRegs;
- VARSET_TP emitInitGCrefVars;
- regMaskTP emitInitGCrefRegs;
- regMaskTP emitInitByrefRegs;
-
// If this is set, we ignore comparing emitPrev* and emitInit* to determine
// whether to save GC state (to save space in the IG), and always save it.
- bool emitForceStoreGCState;
+ bool emitForceStoreGCState;
// emitThis* variables are used during emission, to track GC updates
// on a per-instruction basis. During code generation, per-instruction
@@ -1635,206 +1845,208 @@ private:
// really the only one used; the others seem to be calculated, but not
// used due to bugs.
- VARSET_TP emitThisGCrefVars;
- regMaskTP emitThisGCrefRegs; // Current set of registers holding GC references
- regMaskTP emitThisByrefRegs; // Current set of registers holding BYREF references
+ VARSET_TP emitThisGCrefVars;
+ regMaskTP emitThisGCrefRegs; // Current set of registers holding GC references
+ regMaskTP emitThisByrefRegs; // Current set of registers holding BYREF references
- bool emitThisGCrefVset; // Is "emitThisGCrefVars" up to date?
+ bool emitThisGCrefVset; // Is "emitThisGCrefVars" up to date?
- regNumber emitSyncThisObjReg; // where is "this" enregistered for synchronized methods?
+ regNumber emitSyncThisObjReg; // where is "this" enregistered for synchronized methods?
#if MULTIREG_HAS_SECOND_GC_RET
- void emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize);
+ void emitSetSecondRetRegGCType(instrDescCGCA* id, emitAttr secondRetSize);
#endif // MULTIREG_HAS_SECOND_GC_RET
- static void emitEncodeCallGCregs(regMaskTP regs, instrDesc *id);
- static unsigned emitDecodeCallGCregs(instrDesc *id);
+ static void emitEncodeCallGCregs(regMaskTP regs, instrDesc* id);
+ static unsigned emitDecodeCallGCregs(instrDesc* id);
- unsigned emitNxtIGnum;
+ unsigned emitNxtIGnum;
// random nop insertion to break up nop sleds
- unsigned emitNextNop;
- bool emitRandomNops;
- void emitEnableRandomNops() { emitRandomNops = true; }
- void emitDisableRandomNops() { emitRandomNops = false; }
-
- insGroup * emitAllocAndLinkIG();
- insGroup * emitAllocIG();
- void emitInitIG(insGroup* ig);
- void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig);
-
- void emitNewIG();
- void emitDisableGC();
- void emitEnableGC();
- void emitGenIG(insGroup *ig);
- insGroup * emitSavIG(bool emitAdd = false);
- void emitNxtIG(bool emitAdd = false);
-
- bool emitCurIGnonEmpty()
- {
- return (emitCurIG && emitCurIGfreeNext > emitCurIGfreeBase);
+ unsigned emitNextNop;
+ bool emitRandomNops;
+ void emitEnableRandomNops()
+ {
+ emitRandomNops = true;
}
+ void emitDisableRandomNops()
+ {
+ emitRandomNops = false;
+ }
+
+ insGroup* emitAllocAndLinkIG();
+ insGroup* emitAllocIG();
+ void emitInitIG(insGroup* ig);
+ void emitInsertIGAfter(insGroup* insertAfterIG, insGroup* ig);
+
+ void emitNewIG();
+ void emitDisableGC();
+ void emitEnableGC();
+ void emitGenIG(insGroup* ig);
+ insGroup* emitSavIG(bool emitAdd = false);
+ void emitNxtIG(bool emitAdd = false);
- instrDesc * emitLastIns;
+ bool emitCurIGnonEmpty()
+ {
+ return (emitCurIG && emitCurIGfreeNext > emitCurIGfreeBase);
+ }
-#ifdef DEBUG
- void emitCheckIGoffsets();
+ instrDesc* emitLastIns;
+
+#ifdef DEBUG
+ void emitCheckIGoffsets();
#endif
// Terminates any in-progress instruction group, making the current IG a new empty one.
- // Mark this instruction group as having a label; return the the new instruction group.
+ // Mark this instruction group as having a label; return the the new instruction group.
// Sets the emitter's record of the currently live GC variables
// and registers. The "isFinallyTarget" parameter indicates that the current location is
// the start of a basic block that is returned to after a finally clause in non-exceptional execution.
- void* emitAddLabel(VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- BOOL isFinallyTarget = FALSE);
+ void* emitAddLabel(VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, BOOL isFinallyTarget = FALSE);
#ifdef _TARGET_ARMARCH_
- void emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt);
+ void emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt);
- bool emitGetLocationInfo(emitLocation* emitLoc, insGroup** pig, instrDesc** pid, int* pinsRemaining = NULL);
+ bool emitGetLocationInfo(emitLocation* emitLoc, insGroup** pig, instrDesc** pid, int* pinsRemaining = NULL);
- bool emitNextID(insGroup*& ig, instrDesc*& id, int& insRemaining);
+ bool emitNextID(insGroup*& ig, instrDesc*& id, int& insRemaining);
typedef void (*emitProcessInstrFunc_t)(instrDesc* id, void* context);
- void emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc_t processFunc, void* context);
+ void emitWalkIDs(emitLocation* locFrom, emitProcessInstrFunc_t processFunc, void* context);
- static void emitGenerateUnwindNop(instrDesc* id, void* context);
+ static void emitGenerateUnwindNop(instrDesc* id, void* context);
#endif // _TARGET_ARMARCH_
#if defined(_TARGET_ARM_)
- emitter::insFormat emitInsFormat(instruction ins);
- size_t emitInsCode(instruction ins, insFormat fmt);
+ emitter::insFormat emitInsFormat(instruction ins);
+ size_t emitInsCode(instruction ins, insFormat fmt);
#endif
#ifdef _TARGET_X86_
- void emitMarkStackLvl(unsigned stackLevel);
+ void emitMarkStackLvl(unsigned stackLevel);
#endif
- int emitNextRandomNop();
-
- void * emitAllocInstr(size_t sz, emitAttr attr);
+ int emitNextRandomNop();
+
+ void* emitAllocInstr(size_t sz, emitAttr attr);
- instrDesc *emitAllocInstr (emitAttr attr)
+ instrDesc* emitAllocInstr(emitAttr attr)
{
- return (instrDesc *)emitAllocInstr(sizeof(instrDesc ), attr);
+ return (instrDesc*)emitAllocInstr(sizeof(instrDesc), attr);
}
- instrDescJmp *emitAllocInstrJmp ()
+ instrDescJmp* emitAllocInstrJmp()
{
- return (instrDescJmp *)emitAllocInstr(sizeof(instrDescJmp ), EA_1BYTE);
+ return (instrDescJmp*)emitAllocInstr(sizeof(instrDescJmp), EA_1BYTE);
}
#if !defined(_TARGET_ARM64_)
- instrDescLbl *emitAllocInstrLbl ()
+ instrDescLbl* emitAllocInstrLbl()
{
- return (instrDescLbl *)emitAllocInstr(sizeof(instrDescLbl ), EA_4BYTE);
+ return (instrDescLbl*)emitAllocInstr(sizeof(instrDescLbl), EA_4BYTE);
}
#endif // !_TARGET_ARM64_
- instrDescCns *emitAllocInstrCns (emitAttr attr)
+ instrDescCns* emitAllocInstrCns(emitAttr attr)
{
- return (instrDescCns *)emitAllocInstr(sizeof(instrDescCns ), attr);
+ return (instrDescCns*)emitAllocInstr(sizeof(instrDescCns), attr);
}
- instrDescCns *emitAllocInstrCns (emitAttr attr, int cns)
+ instrDescCns* emitAllocInstrCns(emitAttr attr, int cns)
{
- instrDescCns *result = (instrDescCns *)emitAllocInstr(sizeof(instrDescCns ), attr);
+ instrDescCns* result = (instrDescCns*)emitAllocInstr(sizeof(instrDescCns), attr);
result->idSetIsLargeCns();
result->idcCnsVal = cns;
return result;
}
- instrDescDsp *emitAllocInstrDsp (emitAttr attr)
+ instrDescDsp* emitAllocInstrDsp(emitAttr attr)
{
- return (instrDescDsp *)emitAllocInstr(sizeof(instrDescDsp ), attr);
+ return (instrDescDsp*)emitAllocInstr(sizeof(instrDescDsp), attr);
}
- instrDescCnsDsp*emitAllocInstrCnsDsp(emitAttr attr)
+ instrDescCnsDsp* emitAllocInstrCnsDsp(emitAttr attr)
{
- return (instrDescCnsDsp*)emitAllocInstr(sizeof(instrDescCnsDsp), attr);
+ return (instrDescCnsDsp*)emitAllocInstr(sizeof(instrDescCnsDsp), attr);
}
- instrDescAmd *emitAllocInstrAmd (emitAttr attr)
+ instrDescAmd* emitAllocInstrAmd(emitAttr attr)
{
- return (instrDescAmd *)emitAllocInstr(sizeof(instrDescAmd ), attr);
+ return (instrDescAmd*)emitAllocInstr(sizeof(instrDescAmd), attr);
}
- instrDescCnsAmd*emitAllocInstrCnsAmd (emitAttr attr)
+ instrDescCnsAmd* emitAllocInstrCnsAmd(emitAttr attr)
{
- return (instrDescCnsAmd*)emitAllocInstr(sizeof(instrDescCnsAmd), attr);
+ return (instrDescCnsAmd*)emitAllocInstr(sizeof(instrDescCnsAmd), attr);
}
- instrDescCGCA *emitAllocInstrCGCA (emitAttr attr)
+ instrDescCGCA* emitAllocInstrCGCA(emitAttr attr)
{
- return (instrDescCGCA *)emitAllocInstr(sizeof(instrDescCGCA ), attr);
+ return (instrDescCGCA*)emitAllocInstr(sizeof(instrDescCGCA), attr);
}
- instrDesc *emitNewInstrTiny (emitAttr attr);
- instrDesc *emitNewInstrSmall (emitAttr attr);
- instrDesc *emitNewInstr (emitAttr attr = EA_4BYTE);
- instrDesc *emitNewInstrSC (emitAttr attr, ssize_t cns);
- instrDesc *emitNewInstrCns (emitAttr attr, ssize_t cns);
- instrDesc *emitNewInstrDsp (emitAttr attr, ssize_t dsp);
- instrDesc *emitNewInstrCnsDsp (emitAttr attr, ssize_t cns, int dsp);
- instrDescJmp *emitNewInstrJmp ();
+ instrDesc* emitNewInstrTiny(emitAttr attr);
+ instrDesc* emitNewInstrSmall(emitAttr attr);
+ instrDesc* emitNewInstr(emitAttr attr = EA_4BYTE);
+ instrDesc* emitNewInstrSC(emitAttr attr, ssize_t cns);
+ instrDesc* emitNewInstrCns(emitAttr attr, ssize_t cns);
+ instrDesc* emitNewInstrDsp(emitAttr attr, ssize_t dsp);
+ instrDesc* emitNewInstrCnsDsp(emitAttr attr, ssize_t cns, int dsp);
+ instrDescJmp* emitNewInstrJmp();
#if !defined(_TARGET_ARM64_)
- instrDescLbl *emitNewInstrLbl ();
+ instrDescLbl* emitNewInstrLbl();
#endif // !_TARGET_ARM64_
- static const BYTE emitFmtToOps[];
+ static const BYTE emitFmtToOps[];
-#ifdef DEBUG
+#ifdef DEBUG
static const unsigned emitFmtCount;
#endif
- bool emitIsTinyInsDsc (instrDesc *id);
- bool emitIsScnsInsDsc (instrDesc *id);
+ bool emitIsTinyInsDsc(instrDesc* id);
+ bool emitIsScnsInsDsc(instrDesc* id);
- size_t emitSizeOfInsDsc (instrDesc *id);
+ size_t emitSizeOfInsDsc(instrDesc* id);
/************************************************************************/
/* The following keeps track of stack-based GC values */
/************************************************************************/
- unsigned emitTrkVarCnt;
- int * emitGCrFrameOffsTab; // Offsets of tracked stack ptr vars (varTrkIndex -> stkOffs)
+ unsigned emitTrkVarCnt;
+ int* emitGCrFrameOffsTab; // Offsets of tracked stack ptr vars (varTrkIndex -> stkOffs)
- unsigned emitGCrFrameOffsCnt; // Number of tracked stack ptr vars
- int emitGCrFrameOffsMin; // Min offset of a tracked stack ptr var
- int emitGCrFrameOffsMax; // Max offset of a tracked stack ptr var
- bool emitContTrkPtrLcls; // All lcl between emitGCrFrameOffsMin/Max are only tracked stack ptr vars
- varPtrDsc * * emitGCrFrameLiveTab; // Cache of currently live varPtrs (stkOffs -> varPtrDsc)
+ unsigned emitGCrFrameOffsCnt; // Number of tracked stack ptr vars
+ int emitGCrFrameOffsMin; // Min offset of a tracked stack ptr var
+ int emitGCrFrameOffsMax; // Max offset of a tracked stack ptr var
+ bool emitContTrkPtrLcls; // All lcl between emitGCrFrameOffsMin/Max are only tracked stack ptr vars
+ varPtrDsc** emitGCrFrameLiveTab; // Cache of currently live varPtrs (stkOffs -> varPtrDsc)
- int emitArgFrameOffsMin;
- int emitArgFrameOffsMax;
+ int emitArgFrameOffsMin;
+ int emitArgFrameOffsMax;
- int emitLclFrameOffsMin;
- int emitLclFrameOffsMax;
+ int emitLclFrameOffsMin;
+ int emitLclFrameOffsMax;
- int emitSyncThisObjOffs; // what is the offset of "this" for synchronized methods?
+ int emitSyncThisObjOffs; // what is the offset of "this" for synchronized methods?
public:
+ void emitSetFrameRangeGCRs(int offsLo, int offsHi);
+ void emitSetFrameRangeLcls(int offsLo, int offsHi);
+ void emitSetFrameRangeArgs(int offsLo, int offsHi);
- void emitSetFrameRangeGCRs(int offsLo, int offsHi);
- void emitSetFrameRangeLcls(int offsLo, int offsHi);
- void emitSetFrameRangeArgs(int offsLo, int offsHi);
-
- static instruction emitJumpKindToIns(emitJumpKind jumpKind);
- static emitJumpKind emitInsToJumpKind(instruction ins);
- static emitJumpKind emitReverseJumpKind(emitJumpKind jumpKind);
+ static instruction emitJumpKindToIns(emitJumpKind jumpKind);
+ static emitJumpKind emitInsToJumpKind(instruction ins);
+ static emitJumpKind emitReverseJumpKind(emitJumpKind jumpKind);
#ifdef _TARGET_ARM_
- static unsigned emitJumpKindCondCode(emitJumpKind jumpKind);
+ static unsigned emitJumpKindCondCode(emitJumpKind jumpKind);
#endif
-#ifdef DEBUG
- void emitInsSanityCheck(instrDesc *id);
+#ifdef DEBUG
+ void emitInsSanityCheck(instrDesc* id);
#endif
#ifdef _TARGET_ARMARCH_
@@ -1843,119 +2055,104 @@ public:
// instructions that pre- or post-increment their memory address registers are *not* considered to write
// to GC registers, even if that memory address is a by-ref: such an instruction cannot change the GC
// status of that register, since it must be a byref before and remains one after.
- //
+ //
// This may return false positives.
- bool emitInsMayWriteToGCReg(instrDesc *id);
+ bool emitInsMayWriteToGCReg(instrDesc* id);
// Returns "true" if instruction "id->idIns()" writes to a LclVar stack location.
- bool emitInsWritesToLclVarStackLoc(instrDesc *id);
+ bool emitInsWritesToLclVarStackLoc(instrDesc* id);
// Returns true if the instruction may write to more than one register.
- bool emitInsMayWriteMultipleRegs(instrDesc *id);
+ bool emitInsMayWriteMultipleRegs(instrDesc* id);
#endif // _TARGET_ARMARCH_
/************************************************************************/
/* The following is used to distinguish helper vs non-helper calls */
/************************************************************************/
- static bool emitNoGChelper(unsigned IHX);
+ static bool emitNoGChelper(unsigned IHX);
/************************************************************************/
/* The following logic keeps track of live GC ref values */
/************************************************************************/
- bool emitFullGCinfo; // full GC pointer maps?
- bool emitFullyInt; // fully interruptible code?
+ bool emitFullGCinfo; // full GC pointer maps?
+ bool emitFullyInt; // fully interruptible code?
#if EMIT_TRACK_STACK_DEPTH
- unsigned emitCntStackDepth; // 0 in prolog/epilog, One DWORD elsewhere
- unsigned emitMaxStackDepth; // actual computed max. stack depth
+ unsigned emitCntStackDepth; // 0 in prolog/epilog, One DWORD elsewhere
+ unsigned emitMaxStackDepth; // actual computed max. stack depth
#endif
/* Stack modelling wrt GC */
- bool emitSimpleStkUsed; // using the "simple" stack table?
+ bool emitSimpleStkUsed; // using the "simple" stack table?
- union
- {
- struct // if emitSimpleStkUsed==true
+ union {
+ struct // if emitSimpleStkUsed==true
{
- #define BITS_IN_BYTE (8)
- #define MAX_SIMPLE_STK_DEPTH (BITS_IN_BYTE*sizeof(unsigned))
+#define BITS_IN_BYTE (8)
+#define MAX_SIMPLE_STK_DEPTH (BITS_IN_BYTE * sizeof(unsigned))
- unsigned emitSimpleStkMask; // bit per pushed dword (if it fits. Lowest bit <==> last pushed arg)
- unsigned emitSimpleByrefStkMask; // byref qualifier for emitSimpleStkMask
+ unsigned emitSimpleStkMask; // bit per pushed dword (if it fits. Lowest bit <==> last pushed arg)
+ unsigned emitSimpleByrefStkMask; // byref qualifier for emitSimpleStkMask
} u1;
- struct // if emitSimpleStkUsed==false
+ struct // if emitSimpleStkUsed==false
{
- BYTE emitArgTrackLcl[16]; // small local table to avoid malloc
- BYTE * emitArgTrackTab; // base of the argument tracking stack
- BYTE * emitArgTrackTop; // top of the argument tracking stack
- USHORT emitGcArgTrackCnt; // count of pending arg records (stk-depth for frameless methods, gc ptrs on stk for framed methods)
+ BYTE emitArgTrackLcl[16]; // small local table to avoid malloc
+ BYTE* emitArgTrackTab; // base of the argument tracking stack
+ BYTE* emitArgTrackTop; // top of the argument tracking stack
+ USHORT emitGcArgTrackCnt; // count of pending arg records (stk-depth for frameless methods, gc ptrs on stk
+ // for framed methods)
} u2;
};
- unsigned emitCurStackLvl; // amount of bytes pushed on stack
-
+ unsigned emitCurStackLvl; // amount of bytes pushed on stack
#if EMIT_TRACK_STACK_DEPTH
/* Functions for stack tracking */
- void emitStackPush (BYTE * addr,
- GCtype gcType);
+ void emitStackPush(BYTE* addr, GCtype gcType);
- void emitStackPushN (BYTE * addr,
- unsigned count);
+ void emitStackPushN(BYTE* addr, unsigned count);
- void emitStackPop (BYTE * addr,
- bool isCall,
- unsigned char callInstrSize,
- unsigned count = 1);
+ void emitStackPop(BYTE* addr, bool isCall, unsigned char callInstrSize, unsigned count = 1);
- void emitStackKillArgs (BYTE * addr,
- unsigned count,
- unsigned char callInstrSize);
+ void emitStackKillArgs(BYTE* addr, unsigned count, unsigned char callInstrSize);
- void emitRecordGCcall (BYTE * codePos,
- unsigned char callInstrSize);
+ void emitRecordGCcall(BYTE* codePos, unsigned char callInstrSize);
// Helpers for the above
- void emitStackPushLargeStk(BYTE* addr,
- GCtype gcType,
- unsigned count = 1);
- void emitStackPopLargeStk(BYTE * addr,
- bool isCall,
- unsigned char callInstrSize,
- unsigned count = 1);
+ void emitStackPushLargeStk(BYTE* addr, GCtype gcType, unsigned count = 1);
+ void emitStackPopLargeStk(BYTE* addr, bool isCall, unsigned char callInstrSize, unsigned count = 1);
#endif // EMIT_TRACK_STACK_DEPTH
/* Liveness of stack variables, and registers */
- void emitUpdateLiveGCvars(int offs, BYTE *addr, bool birth);
- void emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE *addr);
- void emitUpdateLiveGCregs(GCtype gcType,
- regMaskTP regs, BYTE *addr);
+ void emitUpdateLiveGCvars(int offs, BYTE* addr, bool birth);
+ void emitUpdateLiveGCvars(VARSET_VALARG_TP vars, BYTE* addr);
+ void emitUpdateLiveGCregs(GCtype gcType, regMaskTP regs, BYTE* addr);
-#ifdef DEBUG
- const char * emitGetFrameReg ();
- void emitDispRegSet (regMaskTP regs);
- void emitDispVarSet ();
+#ifdef DEBUG
+ const char* emitGetFrameReg();
+ void emitDispRegSet(regMaskTP regs);
+ void emitDispVarSet();
#endif
- void emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE *addr);
- void emitGCregLiveSet(GCtype gcType, regMaskTP mask, BYTE *addr, bool isThis);
- void emitGCregDeadUpdMask(regMaskTP, BYTE *addr);
- void emitGCregDeadUpd(regNumber reg, BYTE *addr);
- void emitGCregDeadSet(GCtype gcType, regMaskTP mask, BYTE *addr);
+ void emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr);
+ void emitGCregLiveSet(GCtype gcType, regMaskTP mask, BYTE* addr, bool isThis);
+ void emitGCregDeadUpdMask(regMaskTP, BYTE* addr);
+ void emitGCregDeadUpd(regNumber reg, BYTE* addr);
+ void emitGCregDeadSet(GCtype gcType, regMaskTP mask, BYTE* addr);
- void emitGCvarLiveUpd(int offs, int varNum, GCtype gcType, BYTE *addr);
- void emitGCvarLiveSet(int offs, GCtype gcType, BYTE *addr, ssize_t disp = -1);
- void emitGCvarDeadUpd(int offs, BYTE *addr);
- void emitGCvarDeadSet(int offs, BYTE *addr, ssize_t disp = -1);
+ void emitGCvarLiveUpd(int offs, int varNum, GCtype gcType, BYTE* addr);
+ void emitGCvarLiveSet(int offs, GCtype gcType, BYTE* addr, ssize_t disp = -1);
+ void emitGCvarDeadUpd(int offs, BYTE* addr);
+ void emitGCvarDeadSet(int offs, BYTE* addr, ssize_t disp = -1);
- GCtype emitRegGCtype (regNumber reg);
+ GCtype emitRegGCtype(regNumber reg);
// We have a mixture of code emission methods, some of which return the size of the emitted instruction,
// requiring the caller to add this to the current code pointer (dst += <call to emit code>), others of which
@@ -1964,8 +2161,7 @@ public:
// "emitCodeWithInstructionSize(dst, <call to emitCode>, &instrSize)" will do the call, and set
// "*instrSize" to the after-before code pointer difference. Returns the result of the call. (And
// asserts that the instruction size fits in an unsigned char.)
- static BYTE * emitCodeWithInstructionSize(BYTE * codePtrBefore, BYTE * newCodePointer, unsigned char* instrSize);
-
+ static BYTE* emitCodeWithInstructionSize(BYTE* codePtrBefore, BYTE* newCodePointer, unsigned char* instrSize);
/************************************************************************/
/* The following logic keeps track of initialized data sections */
@@ -1973,56 +2169,57 @@ public:
/* One of these is allocated for every blob of initialized data */
- struct dataSection
+ struct dataSection
{
enum sectionType
{
- data, blockAbsoluteAddr, blockRelative32
+ data,
+ blockAbsoluteAddr,
+ blockRelative32
};
- dataSection * dsNext;
- UNATIVE_OFFSET dsSize;
- sectionType dsType;
+ dataSection* dsNext;
+ UNATIVE_OFFSET dsSize;
+ sectionType dsType;
// variable-sized array used to store the constant data
// or BasicBlock* array in the block cases.
- BYTE dsCont[0];
+ BYTE dsCont[0];
};
/* These describe the entire initialized/uninitialized data sections */
- struct dataSecDsc
+ struct dataSecDsc
{
- dataSection * dsdList;
- dataSection * dsdLast;
- UNATIVE_OFFSET dsdOffs;
+ dataSection* dsdList;
+ dataSection* dsdLast;
+ UNATIVE_OFFSET dsdOffs;
};
- dataSecDsc emitConsDsc;
+ dataSecDsc emitConsDsc;
- dataSection * emitDataSecCur;
+ dataSection* emitDataSecCur;
- void emitOutputDataSec(dataSecDsc *sec,
- BYTE *dst);
+ void emitOutputDataSec(dataSecDsc* sec, BYTE* dst);
/************************************************************************/
/* Handles to the current class and method. */
/************************************************************************/
- COMP_HANDLE emitCmpHandle;
+ COMP_HANDLE emitCmpHandle;
/************************************************************************/
/* Helpers for interface to EE */
/************************************************************************/
- void emitRecordRelocation(void* location, /* IN */
- void* target, /* IN */
- WORD fRelocType, /* IN */
- WORD slotNum = 0, /* IN */
- INT32 addlDelta = 0); /* IN */
+ void emitRecordRelocation(void* location, /* IN */
+ void* target, /* IN */
+ WORD fRelocType, /* IN */
+ WORD slotNum = 0, /* IN */
+ INT32 addlDelta = 0); /* IN */
- void emitRecordCallSite(ULONG instrOffset, /* IN */
- CORINFO_SIG_INFO* callSig, /* IN */
- CORINFO_METHOD_HANDLE methodHandle); /* IN */
+ void emitRecordCallSite(ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO* callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle); /* IN */
#ifdef DEBUG
// This is a scratch buffer used to minimize the number of sig info structs
@@ -2030,24 +2227,24 @@ public:
CORINFO_SIG_INFO* emitScratchSigInfo;
#endif // DEBUG
- /************************************************************************/
- /* Logic to collect and display statistics */
- /************************************************************************/
+/************************************************************************/
+/* Logic to collect and display statistics */
+/************************************************************************/
#if EMITTER_STATS
- friend void emitterStats(FILE* fout);
- friend void emitterStaticStats(FILE* fout);
+ friend void emitterStats(FILE* fout);
+ friend void emitterStaticStats(FILE* fout);
- static size_t emitSizeMethod;
+ static size_t emitSizeMethod;
static unsigned emitTotalInsCnt;
- static unsigned emitTotalIGcnt; // total number of insGroup allocated
- static unsigned emitTotalPhIGcnt; // total number of insPlaceholderGroupData allocated
+ static unsigned emitTotalIGcnt; // total number of insGroup allocated
+ static unsigned emitTotalPhIGcnt; // total number of insPlaceholderGroupData allocated
static unsigned emitTotalIGicnt;
static size_t emitTotalIGsize;
- static unsigned emitTotalIGmcnt; // total method count
+ static unsigned emitTotalIGmcnt; // total method count
static unsigned emitTotalIGjmps;
static unsigned emitTotalIGptrs;
@@ -2057,7 +2254,7 @@ public:
static unsigned emitLargeDspCnt;
static unsigned emitSmallCnsCnt;
- #define SMALL_CNS_TSZ 256
+#define SMALL_CNS_TSZ 256
static unsigned emitSmallCns[SMALL_CNS_TSZ];
static unsigned emitLargeCnsCnt;
@@ -2065,12 +2262,12 @@ public:
#endif // EMITTER_STATS
- /*************************************************************************
- *
- * Define any target-dependent emitter members.
- */
+/*************************************************************************
+ *
+ * Define any target-dependent emitter members.
+ */
- #include "emitdef.h"
+#include "emitdef.h"
// It would be better if this were a constructor, but that would entail revamping the allocation
// infrastructure of the entire JIT...
@@ -2089,15 +2286,14 @@ public:
#include "emitinl.h"
-inline
-void emitter::instrDesc::checkSizes()
+inline void emitter::instrDesc::checkSizes()
{
#ifdef DEBUG
#if HAS_TINY_DESC
C_ASSERT(TINY_IDSC_SIZE == (offsetof(instrDesc, _idDebugOnlyInfo) + sizeof(instrDescDebugInfo*)));
#else // !tiny
C_ASSERT(SMALL_IDSC_SIZE == (offsetof(instrDesc, _idDebugOnlyInfo) + sizeof(instrDescDebugInfo*)));
-#endif
+#endif
#endif
C_ASSERT(SMALL_IDSC_SIZE == offsetof(instrDesc, _idAddrUnion));
}
@@ -2109,16 +2305,14 @@ void emitter::instrDesc::checkSizes()
* fields allocated).
*/
-inline
-bool emitter::emitIsTinyInsDsc(instrDesc *id)
+inline bool emitter::emitIsTinyInsDsc(instrDesc* id)
{
- return id->idIsTiny();
+ return id->idIsTiny();
}
-inline
-bool emitter::emitIsScnsInsDsc(instrDesc *id)
+inline bool emitter::emitIsScnsInsDsc(instrDesc* id)
{
- return id->idIsSmallDsc();
+ return id->idIsSmallDsc();
}
/*****************************************************************************
@@ -2126,9 +2320,7 @@ bool emitter::emitIsScnsInsDsc(instrDesc *id)
* Given an instruction, return its "update mode" (RD/WR/RW).
*/
-
-inline
-insUpdateModes emitter::emitInsUpdateMode(instruction ins)
+inline insUpdateModes emitter::emitInsUpdateMode(instruction ins)
{
#ifdef DEBUG
assert((unsigned)ins < emitInsModeFmtCnt);
@@ -2141,8 +2333,7 @@ insUpdateModes emitter::emitInsUpdateMode(instruction ins)
* Return the number of epilog blocks generated so far.
*/
-inline
-unsigned emitter::emitGetEpilogCnt()
+inline unsigned emitter::emitGetEpilogCnt()
{
return emitEpilogCnt;
}
@@ -2152,10 +2343,9 @@ unsigned emitter::emitGetEpilogCnt()
* Return the current size of the specified data section.
*/
-inline
-UNATIVE_OFFSET emitter::emitDataSize()
+inline UNATIVE_OFFSET emitter::emitDataSize()
{
- return emitConsDsc.dsdOffs;
+ return emitConsDsc.dsdOffs;
}
/*****************************************************************************
@@ -2164,8 +2354,7 @@ UNATIVE_OFFSET emitter::emitDataSize()
* be later converted to an actual code offset in bytes.
*/
-inline
-void * emitter::emitCurBlock()
+inline void* emitter::emitCurBlock()
{
return emitCurIG;
}
@@ -2179,22 +2368,19 @@ void * emitter::emitCurBlock()
* and its estimated offset to the caller.
*/
-inline
-unsigned emitGetInsNumFromCodePos(unsigned codePos)
+inline unsigned emitGetInsNumFromCodePos(unsigned codePos)
{
return (codePos & 0xFFFF);
}
-inline
-unsigned emitGetInsOfsFromCodePos(unsigned codePos)
+inline unsigned emitGetInsOfsFromCodePos(unsigned codePos)
{
return (codePos >> 16);
}
-inline
-unsigned emitter::emitCurOffset()
+inline unsigned emitter::emitCurOffset()
{
- unsigned codePos = emitCurIGinsCnt + (emitCurIGsize << 16);
+ unsigned codePos = emitCurIGinsCnt + (emitCurIGsize << 16);
assert(emitGetInsOfsFromCodePos(codePos) == emitCurIGsize);
assert(emitGetInsNumFromCodePos(codePos) == emitCurIGinsCnt);
@@ -2204,27 +2390,23 @@ unsigned emitter::emitCurOffset()
return codePos;
}
-extern
-const unsigned short emitTypeSizes[TYP_COUNT];
+extern const unsigned short emitTypeSizes[TYP_COUNT];
template <class T>
-inline
-emitAttr emitTypeSize(T type)
+inline emitAttr emitTypeSize(T type)
{
assert(TypeGet(type) < TYP_COUNT);
assert(emitTypeSizes[TypeGet(type)] > 0);
- return (emitAttr) emitTypeSizes[TypeGet(type)];
+ return (emitAttr)emitTypeSizes[TypeGet(type)];
}
-extern
-const unsigned short emitTypeActSz[TYP_COUNT];
+extern const unsigned short emitTypeActSz[TYP_COUNT];
-inline
-emitAttr emitActualTypeSize(var_types type)
+inline emitAttr emitActualTypeSize(var_types type)
{
assert(type < TYP_COUNT);
assert(emitTypeActSz[type] > 0);
- return (emitAttr) emitTypeActSz[type];
+ return (emitAttr)emitTypeActSz[type];
}
/*****************************************************************************
@@ -2233,22 +2415,17 @@ emitAttr emitActualTypeSize(var_types type)
* storage in instruction descriptors.
*/
-/* static */ inline emitter::opSize emitter::emitEncodeSize(emitAttr size)
+/* static */ inline emitter::opSize emitter::emitEncodeSize(emitAttr size)
{
- assert(size == EA_1BYTE ||
- size == EA_2BYTE ||
- size == EA_4BYTE ||
- size == EA_8BYTE ||
- size == EA_16BYTE ||
- size == EA_32BYTE
- );
-
- return emitSizeEncode[((int) size)-1];
+ assert(size == EA_1BYTE || size == EA_2BYTE || size == EA_4BYTE || size == EA_8BYTE || size == EA_16BYTE ||
+ size == EA_32BYTE);
+
+ return emitSizeEncode[((int)size) - 1];
}
-/* static */ inline emitAttr emitter::emitDecodeSize(emitter::opSize ensz)
+/* static */ inline emitAttr emitter::emitDecodeSize(emitter::opSize ensz)
{
- assert( ((unsigned) ensz) < OPSZ_COUNT);
+ assert(((unsigned)ensz) < OPSZ_COUNT);
return emitSizeDecode[ensz];
}
@@ -2258,75 +2435,69 @@ emitAttr emitActualTypeSize(var_types type)
* Little helpers to allocate various flavors of instructions.
*/
-inline
-emitter::instrDesc *emitter::emitNewInstrTiny (emitAttr attr)
+inline emitter::instrDesc* emitter::emitNewInstrTiny(emitAttr attr)
{
- instrDesc *id;
+ instrDesc* id;
- id = (instrDesc*)emitAllocInstr(TINY_IDSC_SIZE, attr);
+ id = (instrDesc*)emitAllocInstr(TINY_IDSC_SIZE, attr);
id->idSetIsTiny();
- return id;
+ return id;
}
-inline
-emitter::instrDesc *emitter::emitNewInstrSmall (emitAttr attr)
+inline emitter::instrDesc* emitter::emitNewInstrSmall(emitAttr attr)
{
- instrDesc *id;
+ instrDesc* id;
// This is larger than the Tiny Descr
- id = (instrDesc*)emitAllocInstr(SMALL_IDSC_SIZE, attr);
+ id = (instrDesc*)emitAllocInstr(SMALL_IDSC_SIZE, attr);
id->idSetIsSmallDsc();
- return id;
+ return id;
}
-inline
-emitter::instrDesc *emitter::emitNewInstr (emitAttr attr)
+inline emitter::instrDesc* emitter::emitNewInstr(emitAttr attr)
{
// This is larger than the Small Descr
- return emitAllocInstr(attr);
+ return emitAllocInstr(attr);
}
-inline
-emitter::instrDescJmp*emitter::emitNewInstrJmp()
+inline emitter::instrDescJmp* emitter::emitNewInstrJmp()
{
- return emitAllocInstrJmp();
+ return emitAllocInstrJmp();
}
#if !defined(_TARGET_ARM64_)
-inline
-emitter::instrDescLbl*emitter::emitNewInstrLbl()
+inline emitter::instrDescLbl* emitter::emitNewInstrLbl()
{
- return emitAllocInstrLbl();
+ return emitAllocInstrLbl();
}
#endif // !_TARGET_ARM64_
-inline
-emitter::instrDesc * emitter::emitNewInstrDsp (emitAttr attr, ssize_t dsp)
+inline emitter::instrDesc* emitter::emitNewInstrDsp(emitAttr attr, ssize_t dsp)
{
- if (dsp == 0)
+ if (dsp == 0)
{
- instrDesc *id = emitAllocInstr (attr);
+ instrDesc* id = emitAllocInstr(attr);
#if EMITTER_STATS
emitSmallDspCnt++;
#endif
- return id;
+ return id;
}
else
{
- instrDescDsp *id = emitAllocInstrDsp (attr);
+ instrDescDsp* id = emitAllocInstrDsp(attr);
id->idSetIsLargeDsp();
- id->iddDspVal = dsp;
+ id->iddDspVal = dsp;
#if EMITTER_STATS
emitLargeDspCnt++;
#endif
- return id;
+ return id;
}
}
@@ -2338,36 +2509,36 @@ emitter::instrDesc * emitter::emitNewInstrDsp (emitAttr attr, ssize_t dsp
* Note that this very similar to emitter::emitNewInstrSC(), except it never
* allocates a small descriptor.
*/
-inline emitter::instrDesc * emitter::emitNewInstrCns (emitAttr attr, ssize_t cns)
+inline emitter::instrDesc* emitter::emitNewInstrCns(emitAttr attr, ssize_t cns)
{
if (instrDesc::fitsInSmallCns(cns))
{
- instrDesc *id = emitAllocInstr(attr);
+ instrDesc* id = emitAllocInstr(attr);
id->idSmallCns(cns);
-
+
#if EMITTER_STATS
emitSmallCnsCnt++;
- if (cns - ID_MIN_SMALL_CNS >= SMALL_CNS_TSZ)
- emitSmallCns[ SMALL_CNS_TSZ - 1 ]++;
+ if (cns - ID_MIN_SMALL_CNS >= SMALL_CNS_TSZ)
+ emitSmallCns[SMALL_CNS_TSZ - 1]++;
else
emitSmallCns[cns - ID_MIN_SMALL_CNS]++;
#endif
- return id;
+ return id;
}
else
{
- instrDescCns *id = emitAllocInstrCns (attr);
+ instrDescCns* id = emitAllocInstrCns(attr);
id->idSetIsLargeCns();
- id->idcCnsVal = cns;
+ id->idcCnsVal = cns;
#if EMITTER_STATS
emitLargeCnsCnt++;
#endif
- return id;
+ return id;
}
}
@@ -2377,16 +2548,22 @@ inline emitter::instrDesc * emitter::emitNewInstrCns (emitAttr attr, ssize_t
*
*/
-inline size_t emitter::emitGetInstrDescSize(const instrDesc * id)
+inline size_t emitter::emitGetInstrDescSize(const instrDesc* id)
{
if (id->idIsTiny())
+ {
return TINY_IDSC_SIZE;
+ }
if (id->idIsSmallDsc())
+ {
return SMALL_IDSC_SIZE;
+ }
if (id->idIsLargeCns())
+ {
return sizeof(instrDescCns);
+ }
return sizeof(instrDesc);
}
@@ -2400,10 +2577,9 @@ inline size_t emitter::emitGetInstrDescSize(const instrDesc * id)
* emitNewInstrCns() always allocates at least sizeof(instrDesc).
*/
-inline
-emitter::instrDesc *emitter::emitNewInstrSC(emitAttr attr, ssize_t cns)
+inline emitter::instrDesc* emitter::emitNewInstrSC(emitAttr attr, ssize_t cns)
{
- instrDesc *id;
+ instrDesc* id;
if (instrDesc::fitsInSmallCns(cns))
{
@@ -2417,10 +2593,10 @@ emitter::instrDesc *emitter::emitNewInstrSC(emitAttr attr, ssize_t cns)
id = (instrDesc*)emitAllocInstr(sizeof(instrDescCns), attr);
id->idSetIsLargeCns();
- ((instrDescCns*)id)->idcCnsVal = cns;
+ ((instrDescCns*)id)->idcCnsVal = cns;
}
- return id;
+ return id;
}
/*****************************************************************************
@@ -2428,14 +2604,20 @@ emitter::instrDesc *emitter::emitNewInstrSC(emitAttr attr, ssize_t cns)
* Get the instrDesc size for something that contains a constant
*/
-inline size_t emitter::emitGetInstrDescSizeSC(const instrDesc * id)
+inline size_t emitter::emitGetInstrDescSizeSC(const instrDesc* id)
{
if (id->idIsSmallDsc())
+ {
return SMALL_IDSC_SIZE;
+ }
else if (id->idIsLargeCns())
+ {
return sizeof(instrDescCns);
+ }
else
+ {
return sizeof(instrDesc);
+ }
}
/*****************************************************************************
@@ -2444,52 +2626,50 @@ inline size_t emitter::emitGetInstrDescSizeSC(const instrDesc * id)
* get stored in different places within the instruction descriptor.
*/
-inline
-ssize_t emitter::emitGetInsCns (instrDesc *id)
+inline ssize_t emitter::emitGetInsCns(instrDesc* id)
{
- return id->idIsLargeCns() ? ((instrDescCns*)id)->idcCnsVal
- : id ->idSmallCns();
+ return id->idIsLargeCns() ? ((instrDescCns*)id)->idcCnsVal : id->idSmallCns();
}
-inline
-ssize_t emitter::emitGetInsDsp (instrDesc *id)
+inline ssize_t emitter::emitGetInsDsp(instrDesc* id)
{
if (id->idIsLargeDsp())
{
if (id->idIsLargeCns())
+ {
return ((instrDescCnsDsp*)id)->iddcDspVal;
+ }
return ((instrDescDsp*)id)->iddDspVal;
}
return 0;
}
-inline
-ssize_t emitter::emitGetInsCnsDsp(instrDesc *id, ssize_t *dspPtr)
+inline ssize_t emitter::emitGetInsCnsDsp(instrDesc* id, ssize_t* dspPtr)
{
- if (id->idIsLargeCns())
+ if (id->idIsLargeCns())
{
- if (id->idIsLargeDsp())
+ if (id->idIsLargeDsp())
{
*dspPtr = ((instrDescCnsDsp*)id)->iddcDspVal;
- return ((instrDescCnsDsp*)id)->iddcCnsVal;
+ return ((instrDescCnsDsp*)id)->iddcCnsVal;
}
else
{
*dspPtr = 0;
- return ((instrDescCns *)id)->idcCnsVal;
+ return ((instrDescCns*)id)->idcCnsVal;
}
}
else
{
- if (id->idIsLargeDsp())
+ if (id->idIsLargeDsp())
{
- *dspPtr = ((instrDescDsp *)id)->iddDspVal;
- return id ->idSmallCns();
+ *dspPtr = ((instrDescDsp*)id)->iddDspVal;
+ return id->idSmallCns();
}
else
{
*dspPtr = 0;
- return id ->idSmallCns();
+ return id->idSmallCns();
}
}
}
@@ -2499,12 +2679,11 @@ ssize_t emitter::emitGetInsCnsDsp(instrDesc *id, ssize_t *dspPtr)
* Get hold of the argument count for an indirect call.
*/
-inline
-unsigned emitter::emitGetInsCIargs(instrDesc *id)
+inline unsigned emitter::emitGetInsCIargs(instrDesc* id)
{
- if (id->idIsLargeCall())
+ if (id->idIsLargeCall())
{
- return ((instrDescCGCA*)id)->idcArgCnt;
+ return ((instrDescCGCA*)id)->idcArgCnt;
}
else
{
@@ -2522,23 +2701,27 @@ unsigned emitter::emitGetInsCIargs(instrDesc *id)
* Returns true if the given register contains a live GC ref.
*/
-inline
-GCtype emitter::emitRegGCtype (regNumber reg)
+inline GCtype emitter::emitRegGCtype(regNumber reg)
{
assert(emitIssuing);
- if ((emitThisGCrefRegs & genRegMask(reg)) != 0)
+ if ((emitThisGCrefRegs & genRegMask(reg)) != 0)
+ {
return GCT_GCREF;
- else if ((emitThisByrefRegs & genRegMask(reg)) != 0)
+ }
+ else if ((emitThisByrefRegs & genRegMask(reg)) != 0)
+ {
return GCT_BYREF;
+ }
else
+ {
return GCT_NONE;
+ }
}
+#ifdef DEBUG
-#ifdef DEBUG
-
-#if EMIT_TRACK_STACK_DEPTH
+#if EMIT_TRACK_STACK_DEPTH
#define CHECK_STACK_DEPTH() assert((int)emitCurStackLvl >= 0)
#else
#define CHECK_STACK_DEPTH()
@@ -2551,10 +2734,13 @@ GCtype emitter::emitRegGCtype (regNumber reg)
* Return true when a given code offset is properly aligned for the target
*/
-inline bool IsCodeAligned(UNATIVE_OFFSET offset) { return ((offset & (CODE_ALIGN-1)) == 0); }
+inline bool IsCodeAligned(UNATIVE_OFFSET offset)
+{
+ return ((offset & (CODE_ALIGN - 1)) == 0);
+}
// Static:
-inline BYTE* emitter::emitCodeWithInstructionSize(BYTE * codePtrBefore, BYTE * newCodePointer, unsigned char* instrSize)
+inline BYTE* emitter::emitCodeWithInstructionSize(BYTE* codePtrBefore, BYTE* newCodePointer, unsigned char* instrSize)
{
// DLD: Perhaps this method should return the instruction size, and we should do dst += <that size>
// as is done in other cases?
@@ -2570,10 +2756,9 @@ inline BYTE* emitter::emitCodeWithInstructionSize(BYTE * codePtrBefore, BYTE * n
* Add a new IG to the current list, and get it ready to receive code.
*/
-inline
-void emitter::emitNewIG()
+inline void emitter::emitNewIG()
{
- insGroup* ig = emitAllocAndLinkIG();
+ insGroup* ig = emitAllocAndLinkIG();
/* It's linked in. Now, set it up to accept code */
@@ -2610,7 +2795,6 @@ inline void emitter::emitEnableGC()
emitForceNewIG = true;
}
-
- /*****************************************************************************/
+/*****************************************************************************/
#endif // _EMIT_H_
/*****************************************************************************/
diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
index 952ef75d46..893d380b98 100644
--- a/src/jit/emitarm.cpp
+++ b/src/jit/emitarm.cpp
@@ -26,27 +26,25 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-const instruction emitJumpKindInstructions[] =
-{
+const instruction emitJumpKindInstructions[] = {
INS_nop,
- #define JMP_SMALL(en, rev, ins) INS_##ins,
- #include "emitjmps.h"
+#define JMP_SMALL(en, rev, ins) INS_##ins,
+#include "emitjmps.h"
};
-const emitJumpKind emitReverseJumpKinds[] =
-{
+const emitJumpKind emitReverseJumpKinds[] = {
EJ_NONE,
- #define JMP_SMALL(en, rev, ins) EJ_##rev,
- #include "emitjmps.h"
+#define JMP_SMALL(en, rev, ins) EJ_##rev,
+#include "emitjmps.h"
};
/*****************************************************************************
* Look up the instruction for a jump kind
*/
-/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
+/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
{
assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions));
return emitJumpKindInstructions[jumpKind];
@@ -57,7 +55,7 @@ const emitJumpKind emitReverseJumpKinds[] =
* branch instruction with a jump kind!
*/
-/*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins)
+/*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins)
{
for (unsigned i = 0; i < ArrLen(emitJumpKindInstructions); i++)
{
@@ -75,7 +73,7 @@ const emitJumpKind emitReverseJumpKinds[] =
* Reverse the conditional jump
*/
-/*static*/ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
+/*static*/ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
{
assert(jumpKind < EJ_COUNT);
return emitReverseJumpKinds[jumpKind];
@@ -86,16 +84,16 @@ const emitJumpKind emitReverseJumpKinds[] =
* Return the allocated size (in bytes) of the given instruction descriptor.
*/
-size_t emitter::emitSizeOfInsDsc(instrDesc *id)
+size_t emitter::emitSizeOfInsDsc(instrDesc* id)
{
- assert (!emitIsTinyInsDsc(id));
+ assert(!emitIsTinyInsDsc(id));
- if (emitIsScnsInsDsc(id))
+ if (emitIsScnsInsDsc(id))
return SMALL_IDSC_SIZE;
assert((unsigned)id->idInsFmt() < emitFmtCount);
- ID_OPS idOp = (ID_OPS) emitFmtToOps[id->idInsFmt()];
+ ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()];
bool isCallIns = (id->idIns() == INS_bl) || (id->idIns() == INS_blx);
bool maybeCallIns = (id->idIns() == INS_b) || (id->idIns() == INS_bx);
@@ -104,41 +102,41 @@ size_t emitter::emitSizeOfInsDsc(instrDesc *id)
// Only ID_OP_CALL and ID_OP_SPEC check for this, so we enforce that the
// INS_call instruction always uses one of these idOps.
- assert(!isCallIns || // either not a call or
- idOp == ID_OP_CALL || // is a direct call
- idOp == ID_OP_SPEC || // is an indirect call
- idOp == ID_OP_JMP ); // is a local call to finally clause
+ assert(!isCallIns || // either not a call or
+ idOp == ID_OP_CALL || // is a direct call
+ idOp == ID_OP_SPEC || // is an indirect call
+ idOp == ID_OP_JMP); // is a local call to finally clause
switch (idOp)
{
- case ID_OP_NONE:
- break;
+ case ID_OP_NONE:
+ break;
- case ID_OP_JMP:
- return sizeof(instrDescJmp);
+ case ID_OP_JMP:
+ return sizeof(instrDescJmp);
- case ID_OP_LBL:
- return sizeof(instrDescLbl);
+ case ID_OP_LBL:
+ return sizeof(instrDescLbl);
- case ID_OP_CALL:
- case ID_OP_SPEC:
- assert(isCallIns || maybeCallIns);
- if (id->idIsLargeCall())
- {
- /* Must be a "fat" indirect call descriptor */
- return sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
- return sizeof(instrDesc);
- }
- break;
+ case ID_OP_CALL:
+ case ID_OP_SPEC:
+ assert(isCallIns || maybeCallIns);
+ if (id->idIsLargeCall())
+ {
+ /* Must be a "fat" indirect call descriptor */
+ return sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
+ return sizeof(instrDesc);
+ }
+ break;
- default:
- NO_WAY("unexpected instruction descriptor format");
- break;
+ default:
+ NO_WAY("unexpected instruction descriptor format");
+ break;
}
if (id->idIsLargeCns())
@@ -157,477 +155,517 @@ size_t emitter::emitSizeOfInsDsc(instrDesc *id)
}
}
-
bool offsetFitsInVectorMem(int disp)
{
unsigned imm = unsigned_abs(disp);
return ((imm & 0x03fc) == imm);
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* The following called for each recorded instruction -- use for debugging.
*/
-void emitter::emitInsSanityCheck(instrDesc *id)
+void emitter::emitInsSanityCheck(instrDesc* id)
{
/* What instruction format have we got? */
switch (id->idInsFmt())
{
- case IF_T1_A: // T1_A ................
- case IF_T2_A: // T2_A ................ ................
- break;
+ case IF_T1_A: // T1_A ................
+ case IF_T2_A: // T2_A ................ ................
+ break;
- case IF_T1_B: // T1_B ........cccc.... cond
- case IF_T2_B: // T2_B ................ ............iiii imm4
- assert(emitGetInsSC(id) < 0x10);
- break;
+ case IF_T1_B: // T1_B ........cccc.... cond
+ case IF_T2_B: // T2_B ................ ............iiii imm4
+ assert(emitGetInsSC(id) < 0x10);
+ break;
- case IF_T1_C: // T1_C .....iiiiinnnddd R1 R2 imm5
- assert(isLowRegister(id->idReg1()));
- assert(isLowRegister(id->idReg2()));
- if (emitInsIsLoadOrStore(id->idIns()))
- {
- emitAttr size = id->idOpSize();
- int imm = emitGetInsSC(id);
-
- imm = insUnscaleImm(imm, size);
- assert(imm < 0x20);
- }
- else
- {
- assert(id->idSmallCns() < 0x20);
- }
- break;
+ case IF_T1_C: // T1_C .....iiiiinnnddd R1 R2 imm5
+ assert(isLowRegister(id->idReg1()));
+ assert(isLowRegister(id->idReg2()));
+ if (emitInsIsLoadOrStore(id->idIns()))
+ {
+ emitAttr size = id->idOpSize();
+ int imm = emitGetInsSC(id);
- case IF_T1_D0: // T1_D0 ........Dmmmmddd R1* R2*
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ imm = insUnscaleImm(imm, size);
+ assert(imm < 0x20);
+ }
+ else
+ {
+ assert(id->idSmallCns() < 0x20);
+ }
+ break;
- case IF_T1_D1: // T1_D1 .........mmmm... R1*
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_T1_D0: // T1_D0 ........Dmmmmddd R1* R2*
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_T1_D2: // T1_D2 .........mmmm... R3*
- assert(isGeneralRegister(id->idReg3()));
- break;
+ case IF_T1_D1: // T1_D1 .........mmmm... R1*
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_T1_E: // T1_E ..........nnnddd R1 R2
- assert(isLowRegister(id->idReg1()));
- assert(isLowRegister(id->idReg2()));
- assert(id->idSmallCns() < 0x20);
- break;
+ case IF_T1_D2: // T1_D2 .........mmmm... R3*
+ assert(isGeneralRegister(id->idReg3()));
+ break;
- case IF_T1_F: // T1_F .........iiiiiii SP imm7
- assert(id->idReg1() == REG_SP);
- assert(id->idOpSize() == EA_4BYTE);
- assert((emitGetInsSC(id) & ~0x1FC) == 0);
- break;
+ case IF_T1_E: // T1_E ..........nnnddd R1 R2
+ assert(isLowRegister(id->idReg1()));
+ assert(isLowRegister(id->idReg2()));
+ assert(id->idSmallCns() < 0x20);
+ break;
- case IF_T1_G: // T1_G .......iiinnnddd R1 R2 imm3
- assert(isLowRegister(id->idReg1()));
- assert(isLowRegister(id->idReg2()));
- assert(id->idSmallCns() < 0x8);
- break;
+ case IF_T1_F: // T1_F .........iiiiiii SP imm7
+ assert(id->idReg1() == REG_SP);
+ assert(id->idOpSize() == EA_4BYTE);
+ assert((emitGetInsSC(id) & ~0x1FC) == 0);
+ break;
- case IF_T1_H: // T1_H .......mmmnnnddd R1 R2 R3
- assert(isLowRegister(id->idReg1()));
- assert(isLowRegister(id->idReg2()));
- assert(isLowRegister(id->idReg3()));
- break;
+ case IF_T1_G: // T1_G .......iiinnnddd R1 R2 imm3
+ assert(isLowRegister(id->idReg1()));
+ assert(isLowRegister(id->idReg2()));
+ assert(id->idSmallCns() < 0x8);
+ break;
- case IF_T1_I: // T1_I ......i.iiiiiddd R1 imm6
- assert(isLowRegister(id->idReg1()));
- break;
+ case IF_T1_H: // T1_H .......mmmnnnddd R1 R2 R3
+ assert(isLowRegister(id->idReg1()));
+ assert(isLowRegister(id->idReg2()));
+ assert(isLowRegister(id->idReg3()));
+ break;
- case IF_T1_J0: // T1_J0 .....dddiiiiiiii R1 imm8
- assert(isLowRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T1_I: // T1_I ......i.iiiiiddd R1 imm6
+ assert(isLowRegister(id->idReg1()));
+ break;
- case IF_T1_J1: // T1_J1 .....dddiiiiiiii R1 <regmask8>
- assert(isLowRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T1_J0: // T1_J0 .....dddiiiiiiii R1 imm8
+ assert(isLowRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T1_J2: // T1_J2 .....dddiiiiiiii R1 SP imm8
- assert(isLowRegister(id->idReg1()));
- assert(id->idReg2() == REG_SP);
- assert(id->idOpSize() == EA_4BYTE);
- assert((emitGetInsSC(id)& ~0x3FC) == 0);
- break;
+ case IF_T1_J1: // T1_J1 .....dddiiiiiiii R1 <regmask8>
+ assert(isLowRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T1_L0: // T1_L0 ........iiiiiiii imm8
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T1_J2: // T1_J2 .....dddiiiiiiii R1 SP imm8
+ assert(isLowRegister(id->idReg1()));
+ assert(id->idReg2() == REG_SP);
+ assert(id->idOpSize() == EA_4BYTE);
+ assert((emitGetInsSC(id) & ~0x3FC) == 0);
+ break;
- case IF_T1_L1: // T1_L1 .......Rrrrrrrrr <regmask8+2>
- assert(emitGetInsSC(id) < 0x400);
- break;
+ case IF_T1_L0: // T1_L0 ........iiiiiiii imm8
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T2_C0: // T2_C0 ...........Snnnn .iiiddddiishmmmm R1 R2 R3 S, imm5, sh
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(emitGetInsSC(id) < 0x20);
- break;
+ case IF_T1_L1: // T1_L1 .......Rrrrrrrrr <regmask8+2>
+ assert(emitGetInsSC(id) < 0x400);
+ break;
- case IF_T2_C4: // T2_C4 ...........Snnnn ....dddd....mmmm R1 R2 R3 S
- case IF_T2_C5: // T2_C5 ............nnnn ....dddd....mmmm R1 R2 R3
- case IF_T2_G1: // T2_G1 ............nnnn ttttTTTT........ R1 R2 R3
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- break;
+ case IF_T2_C0: // T2_C0 ...........Snnnn .iiiddddiishmmmm R1 R2 R3 S, imm5, sh
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(emitGetInsSC(id) < 0x20);
+ break;
- case IF_T2_C1: // T2_C1 ...........S.... .iiiddddiishmmmm R1 R2 S, imm5, sh
- case IF_T2_C2: // T2_C2 ...........S.... .iiiddddii..mmmm R1 R2 S, imm5
- case IF_T2_C8: // T2_C8 ............nnnn .iii....iishmmmm R1 R2 imm5, sh
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(emitGetInsSC(id) < 0x20);
- break;
+ case IF_T2_C4: // T2_C4 ...........Snnnn ....dddd....mmmm R1 R2 R3 S
+ case IF_T2_C5: // T2_C5 ............nnnn ....dddd....mmmm R1 R2 R3
+ case IF_T2_G1: // T2_G1 ............nnnn ttttTTTT........ R1 R2 R3
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ break;
- case IF_T2_C6: // T2_C6 ................ ....dddd..iimmmm R1 R2 imm2
- case IF_T2_C7: // T2_C7 ............nnnn ..........shmmmm R1 R2 imm2
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(emitGetInsSC(id) < 0x4);
- break;
+ case IF_T2_C1: // T2_C1 ...........S.... .iiiddddiishmmmm R1 R2 S, imm5, sh
+ case IF_T2_C2: // T2_C2 ...........S.... .iiiddddii..mmmm R1 R2 S, imm5
+ case IF_T2_C8: // T2_C8 ............nnnn .iii....iishmmmm R1 R2 imm5, sh
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(emitGetInsSC(id) < 0x20);
+ break;
- case IF_T2_C3: // T2_C3 ...........S.... ....dddd....mmmm R1 R2 S
- case IF_T2_C9: // T2_C9 ............nnnn ............mmmm R1 R2
- case IF_T2_C10: // T2_C10 ............mmmm ....dddd....mmmm R1 R2
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ case IF_T2_C6: // T2_C6 ................ ....dddd..iimmmm R1 R2 imm2
+ case IF_T2_C7: // T2_C7 ............nnnn ..........shmmmm R1 R2 imm2
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(emitGetInsSC(id) < 0x4);
+ break;
- case IF_T2_D0: // T2_D0 ............nnnn .iiiddddii.wwwww R1 R2 imm5, imm5
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(emitGetInsSC(id) < 0x400);
- break;
+ case IF_T2_C3: // T2_C3 ...........S.... ....dddd....mmmm R1 R2 S
+ case IF_T2_C9: // T2_C9 ............nnnn ............mmmm R1 R2
+ case IF_T2_C10: // T2_C10 ............mmmm ....dddd....mmmm R1 R2
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_T2_D1: // T2_D1 ................ .iiiddddii.wwwww R1 imm5, imm5
- assert(isGeneralRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x400);
- break;
+ case IF_T2_D0: // T2_D0 ............nnnn .iiiddddii.wwwww R1 R2 imm5, imm5
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(emitGetInsSC(id) < 0x400);
+ break;
- case IF_T2_E0: // T2_E0 ............nnnn tttt......shmmmm R1 R2 R3 imm2
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- if (id->idIsLclVar())
- {
- assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
- }
- else
- {
- assert(isGeneralRegister(id->idReg3()));
- assert(emitGetInsSC(id) < 0x4);
- }
- break;
+ case IF_T2_D1: // T2_D1 ................ .iiiddddii.wwwww R1 imm5, imm5
+ assert(isGeneralRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x400);
+ break;
- case IF_T2_E1: // T2_E1 ............nnnn tttt............ R1 R2
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ case IF_T2_E0: // T2_E0 ............nnnn tttt......shmmmm R1 R2 R3 imm2
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ if (id->idIsLclVar())
+ {
+ assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
+ }
+ else
+ {
+ assert(isGeneralRegister(id->idReg3()));
+ assert(emitGetInsSC(id) < 0x4);
+ }
+ break;
- case IF_T2_E2: // T2_E2 ................ tttt............ R1
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_T2_E1: // T2_E1 ............nnnn tttt............ R1 R2
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_T2_F1: // T2_F1 ............nnnn ttttdddd....mmmm R1 R2 R3 R4
- case IF_T2_F2: // T2_F2 ............nnnn aaaadddd....mmmm R1 R2 R3 R4
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(isGeneralRegister(id->idReg4()));
- break;
+ case IF_T2_E2: // T2_E2 ................ tttt............ R1
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_T2_G0: // T2_G0 .......PU.W.nnnn ttttTTTTiiiiiiii R1 R2 R3 imm8, PUW
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(unsigned_abs(emitGetInsSC(id)) < 0x100);
- break;
+ case IF_T2_F1: // T2_F1 ............nnnn ttttdddd....mmmm R1 R2 R3 R4
+ case IF_T2_F2: // T2_F2 ............nnnn aaaadddd....mmmm R1 R2 R3 R4
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(isGeneralRegister(id->idReg4()));
+ break;
- case IF_T2_H0: // T2_H0 ............nnnn tttt.PUWiiiiiiii R1 R2 imm8, PUW
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(unsigned_abs(emitGetInsSC(id)) < 0x100);
- break;
+ case IF_T2_G0: // T2_G0 .......PU.W.nnnn ttttTTTTiiiiiiii R1 R2 R3 imm8, PUW
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(unsigned_abs(emitGetInsSC(id)) < 0x100);
+ break;
- case IF_T2_H1: // T2_H1 ............nnnn tttt....iiiiiiii R1 R2 imm8
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T2_H0: // T2_H0 ............nnnn tttt.PUWiiiiiiii R1 R2 imm8, PUW
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(unsigned_abs(emitGetInsSC(id)) < 0x100);
+ break;
- case IF_T2_H2: // T2_H2 ............nnnn ........iiiiiiii R1 imm8
- assert(isGeneralRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T2_H1: // T2_H1 ............nnnn tttt....iiiiiiii R1 R2 imm8
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T2_I0: // T2_I0 ..........W.nnnn rrrrrrrrrrrrrrrr R1 W, imm16
- assert(isGeneralRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x10000);
- break;
+ case IF_T2_H2: // T2_H2 ............nnnn ........iiiiiiii R1 imm8
+ assert(isGeneralRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T2_N: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_T2_I0: // T2_I0 ..........W.nnnn rrrrrrrrrrrrrrrr R1 W, imm16
+ assert(isGeneralRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x10000);
+ break;
- case IF_T2_N2: // T2_N2 .....i......iiii .iiiddddiiiiiiii R1 imm16
- assert(isGeneralRegister(id->idReg1()));
- assert((size_t)emitGetInsSC(id) < emitDataSize());
- break;
+ case IF_T2_N: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_T2_I1: // T2_I1 ................ rrrrrrrrrrrrrrrr imm16
- assert(emitGetInsSC(id) < 0x10000);
- break;
+ case IF_T2_N2: // T2_N2 .....i......iiii .iiiddddiiiiiiii R1 imm16
+ assert(isGeneralRegister(id->idReg1()));
+ assert((size_t)emitGetInsSC(id) < emitDataSize());
+ break;
- case IF_T2_K1: // T2_K1 ............nnnn ttttiiiiiiiiiiii R1 R2 imm12
- case IF_T2_M0: // T2_M0 .....i......nnnn .iiiddddiiiiiiii R1 R2 imm12
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(emitGetInsSC(id) < 0x1000);
- break;
+ case IF_T2_I1: // T2_I1 ................ rrrrrrrrrrrrrrrr imm16
+ assert(emitGetInsSC(id) < 0x10000);
+ break;
- case IF_T2_L0: // T2_L0 .....i.....Snnnn .iiiddddiiiiiiii R1 R2 S, imm8<<imm4
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isModImmConst(emitGetInsSC(id)));
- break;
+ case IF_T2_K1: // T2_K1 ............nnnn ttttiiiiiiiiiiii R1 R2 imm12
+ case IF_T2_M0: // T2_M0 .....i......nnnn .iiiddddiiiiiiii R1 R2 imm12
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(emitGetInsSC(id) < 0x1000);
+ break;
- case IF_T2_K4: // T2_K4 ........U....... ttttiiiiiiiiiiii R1 PC U, imm12
- case IF_T2_M1: // T2_M1 .....i.......... .iiiddddiiiiiiii R1 PC imm12
- assert(isGeneralRegister(id->idReg1()));
- assert(id->idReg2() == REG_PC);
- assert(emitGetInsSC(id) < 0x1000);
- break;
+ case IF_T2_L0: // T2_L0 .....i.....Snnnn .iiiddddiiiiiiii R1 R2 S, imm8<<imm4
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isModImmConst(emitGetInsSC(id)));
+ break;
- case IF_T2_K3: // T2_K3 ........U....... ....iiiiiiiiiiii PC U, imm12
- assert(id->idReg1() == REG_PC);
- assert(emitGetInsSC(id) < 0x1000);
- break;
+ case IF_T2_K4: // T2_K4 ........U....... ttttiiiiiiiiiiii R1 PC U, imm12
+ case IF_T2_M1: // T2_M1 .....i.......... .iiiddddiiiiiiii R1 PC imm12
+ assert(isGeneralRegister(id->idReg1()));
+ assert(id->idReg2() == REG_PC);
+ assert(emitGetInsSC(id) < 0x1000);
+ break;
- case IF_T2_K2: // T2_K2 ............nnnn ....iiiiiiiiiiii R1 imm12
- assert(isGeneralRegister(id->idReg1()));
- assert(emitGetInsSC(id) < 0x1000);
- break;
+ case IF_T2_K3: // T2_K3 ........U....... ....iiiiiiiiiiii PC U, imm12
+ assert(id->idReg1() == REG_PC);
+ assert(emitGetInsSC(id) < 0x1000);
+ break;
- case IF_T2_L1: // T2_L1 .....i.....S.... .iiiddddiiiiiiii R1 S, imm8<<imm4
- case IF_T2_L2: // T2_L2 .....i......nnnn .iii....iiiiiiii R1 imm8<<imm4
- assert(isGeneralRegister(id->idReg1()));
- assert(isModImmConst(emitGetInsSC(id)));
- break;
+ case IF_T2_K2: // T2_K2 ............nnnn ....iiiiiiiiiiii R1 imm12
+ assert(isGeneralRegister(id->idReg1()));
+ assert(emitGetInsSC(id) < 0x1000);
+ break;
- case IF_T1_J3: // T1_J3 .....dddiiiiiiii R1 PC imm8
- assert(isGeneralRegister(id->idReg1()));
- assert(id->idReg2() == REG_PC);
- assert(emitGetInsSC(id) < 0x100);
- break;
+ case IF_T2_L1: // T2_L1 .....i.....S.... .iiiddddiiiiiiii R1 S, imm8<<imm4
+ case IF_T2_L2: // T2_L2 .....i......nnnn .iii....iiiiiiii R1 imm8<<imm4
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isModImmConst(emitGetInsSC(id)));
+ break;
- case IF_T1_K: // T1_K ....cccciiiiiiii Branch imm8, cond4
- case IF_T1_M: // T1_M .....iiiiiiiiiii Branch imm11
- case IF_T2_J1: // T2_J1 .....Scccciiiiii ..j.jiiiiiiiiiii Branch imm20, cond4
- case IF_T2_J2: // T2_J2 .....Siiiiiiiiii ..j.jiiiiiiiiii. Branch imm24
- case IF_T2_N1: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
- case IF_T2_J3: // T2_J3 .....Siiiiiiiiii ..j.jiiiiiiiiii. Call imm24
- case IF_LARGEJMP:
- break;
+ case IF_T1_J3: // T1_J3 .....dddiiiiiiii R1 PC imm8
+ assert(isGeneralRegister(id->idReg1()));
+ assert(id->idReg2() == REG_PC);
+ assert(emitGetInsSC(id) < 0x100);
+ break;
- case IF_T2_VFP3:
- if (id->idOpSize() == EA_8BYTE)
- {
- assert(isDoubleReg(id->idReg1()));
- assert(isDoubleReg(id->idReg2()));
- assert(isDoubleReg(id->idReg3()));
- }
- else
- {
- assert(id->idOpSize() == EA_4BYTE);
- assert(isFloatReg(id->idReg1()));
- assert(isFloatReg(id->idReg2()));
- assert(isFloatReg(id->idReg3()));
- }
- break;
+ case IF_T1_K: // T1_K ....cccciiiiiiii Branch imm8, cond4
+ case IF_T1_M: // T1_M .....iiiiiiiiiii Branch imm11
+ case IF_T2_J1: // T2_J1 .....Scccciiiiii ..j.jiiiiiiiiiii Branch imm20, cond4
+ case IF_T2_J2: // T2_J2 .....Siiiiiiiiii ..j.jiiiiiiiiii. Branch imm24
+ case IF_T2_N1: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
+ case IF_T2_J3: // T2_J3 .....Siiiiiiiiii ..j.jiiiiiiiiii. Call imm24
+ case IF_LARGEJMP:
+ break;
- case IF_T2_VFP2:
- assert(isFloatReg(id->idReg1()));
- assert(isFloatReg(id->idReg2()));
- break;
-
- case IF_T2_VLDST:
- if (id->idOpSize() == EA_8BYTE)
- assert(isDoubleReg(id->idReg1()));
- else
+ case IF_T2_VFP3:
+ if (id->idOpSize() == EA_8BYTE)
+ {
+ assert(isDoubleReg(id->idReg1()));
+ assert(isDoubleReg(id->idReg2()));
+ assert(isDoubleReg(id->idReg3()));
+ }
+ else
+ {
+ assert(id->idOpSize() == EA_4BYTE);
+ assert(isFloatReg(id->idReg1()));
+ assert(isFloatReg(id->idReg2()));
+ assert(isFloatReg(id->idReg3()));
+ }
+ break;
+
+ case IF_T2_VFP2:
assert(isFloatReg(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(offsetFitsInVectorMem(emitGetInsSC(id)));
- break;
+ assert(isFloatReg(id->idReg2()));
+ break;
- case IF_T2_VMOVD:
- assert(id->idOpSize() == EA_8BYTE);
- if (id->idIns() == INS_vmov_d2i)
- {
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isDoubleReg(id->idReg3()));
- }
- else
- {
- assert(id->idIns() == INS_vmov_i2d);
- assert(isDoubleReg(id->idReg1()));
+ case IF_T2_VLDST:
+ if (id->idOpSize() == EA_8BYTE)
+ assert(isDoubleReg(id->idReg1()));
+ else
+ assert(isFloatReg(id->idReg1()));
assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- }
- break;
+ assert(offsetFitsInVectorMem(emitGetInsSC(id)));
+ break;
- case IF_T2_VMOVS:
- assert(id->idOpSize() == EA_4BYTE);
- if (id->idIns() == INS_vmov_i2f)
- {
- assert(isFloatReg(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- }
- else
- {
- assert(id->idIns() == INS_vmov_f2i);
- assert(isGeneralRegister(id->idReg1()));
- assert(isFloatReg(id->idReg2()));
- }
- break;
+ case IF_T2_VMOVD:
+ assert(id->idOpSize() == EA_8BYTE);
+ if (id->idIns() == INS_vmov_d2i)
+ {
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isDoubleReg(id->idReg3()));
+ }
+ else
+ {
+ assert(id->idIns() == INS_vmov_i2d);
+ assert(isDoubleReg(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ }
+ break;
- default:
- printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
- assert(!"Unexpected format");
- break;
+ case IF_T2_VMOVS:
+ assert(id->idOpSize() == EA_4BYTE);
+ if (id->idIns() == INS_vmov_i2f)
+ {
+ assert(isFloatReg(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ }
+ else
+ {
+ assert(id->idIns() == INS_vmov_f2i);
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isFloatReg(id->idReg2()));
+ }
+ break;
+
+ default:
+ printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
+ assert(!"Unexpected format");
+ break;
}
}
-#endif // DEBUG
+#endif // DEBUG
-bool emitter::emitInsMayWriteToGCReg(instrDesc *id)
+bool emitter::emitInsMayWriteToGCReg(instrDesc* id)
{
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
switch (fmt)
{
- // These are the formats with "destination" or "target" registers:
- case IF_T1_C: case IF_T1_D0: case IF_T1_E: case IF_T1_G: case IF_T1_H:
- case IF_T1_J0: case IF_T1_J1: case IF_T1_J2: case IF_T1_J3:
- case IF_T2_C0: case IF_T2_C1: case IF_T2_C2: case IF_T2_C3: case IF_T2_C4: case IF_T2_C5: case IF_T2_C6: case IF_T2_C10:
- case IF_T2_D0: case IF_T2_D1: case IF_T2_F1: case IF_T2_F2:
- case IF_T2_L0: case IF_T2_L1: case IF_T2_M0: case IF_T2_M1:
- case IF_T2_N: case IF_T2_N1: case IF_T2_N2: case IF_T2_VFP3: case IF_T2_VFP2: case IF_T2_VLDST:
- case IF_T2_E0: case IF_T2_E1: case IF_T2_E2:
- case IF_T2_G0: case IF_T2_G1: case IF_T2_H0: case IF_T2_H1:
- case IF_T2_K1: case IF_T2_K4:
- // Some formats with "destination" or "target" registers are actually used for store instructions, for the
- // "source" value written to memory.
- // Similarly, PUSH has a target register, indicating the start of the set of registers to push. POP
- // *does* write to at least one register, so we do not make that a special case.
- // Various compare/test instructions do not write (except to the flags). Technically "teq" does not need to be
- // be in this list because it has no forms matched above, but I'm putting it here for completeness.
- switch (ins)
- {
- case INS_str: case INS_strb: case INS_strh: case INS_strd:
- case INS_strex: case INS_strexb: case INS_strexd: case INS_strexh:
- case INS_push:
- case INS_cmp: case INS_cmn: case INS_tst: case INS_teq:
- return false;
+ // These are the formats with "destination" or "target" registers:
+ case IF_T1_C:
+ case IF_T1_D0:
+ case IF_T1_E:
+ case IF_T1_G:
+ case IF_T1_H:
+ case IF_T1_J0:
+ case IF_T1_J1:
+ case IF_T1_J2:
+ case IF_T1_J3:
+ case IF_T2_C0:
+ case IF_T2_C1:
+ case IF_T2_C2:
+ case IF_T2_C3:
+ case IF_T2_C4:
+ case IF_T2_C5:
+ case IF_T2_C6:
+ case IF_T2_C10:
+ case IF_T2_D0:
+ case IF_T2_D1:
+ case IF_T2_F1:
+ case IF_T2_F2:
+ case IF_T2_L0:
+ case IF_T2_L1:
+ case IF_T2_M0:
+ case IF_T2_M1:
+ case IF_T2_N:
+ case IF_T2_N1:
+ case IF_T2_N2:
+ case IF_T2_VFP3:
+ case IF_T2_VFP2:
+ case IF_T2_VLDST:
+ case IF_T2_E0:
+ case IF_T2_E1:
+ case IF_T2_E2:
+ case IF_T2_G0:
+ case IF_T2_G1:
+ case IF_T2_H0:
+ case IF_T2_H1:
+ case IF_T2_K1:
+ case IF_T2_K4:
+ // Some formats with "destination" or "target" registers are actually used for store instructions, for the
+ // "source" value written to memory.
+ // Similarly, PUSH has a target register, indicating the start of the set of registers to push. POP
+ // *does* write to at least one register, so we do not make that a special case.
+ // Various compare/test instructions do not write (except to the flags). Technically "teq" does not need to
+ // be
+ // be in this list because it has no forms matched above, but I'm putting it here for completeness.
+ switch (ins)
+ {
+ case INS_str:
+ case INS_strb:
+ case INS_strh:
+ case INS_strd:
+ case INS_strex:
+ case INS_strexb:
+ case INS_strexd:
+ case INS_strexh:
+ case INS_push:
+ case INS_cmp:
+ case INS_cmn:
+ case INS_tst:
+ case INS_teq:
+ return false;
+ default:
+ return true;
+ }
+ case IF_T2_VMOVS:
+ // VMOV.i2f reads from the integer register. Conversely VMOV.f2i writes to GC pointer-sized
+ // integer register that might have previously held GC pointers, so they need to be included.
+ assert(id->idGCref() == GCT_NONE);
+ return (ins == INS_vmov_f2i);
+
+ case IF_T2_VMOVD:
+ // VMOV.i2d reads from the integer registers. Conversely VMOV.d2i writes to GC pointer-sized
+ // integer registers that might have previously held GC pointers, so they need to be included.
+ assert(id->idGCref() == GCT_NONE);
+ return (ins == INS_vmov_d2i);
+
default:
- return true;
- }
- case IF_T2_VMOVS:
- // VMOV.i2f reads from the integer register. Conversely VMOV.f2i writes to GC pointer-sized
- // integer register that might have previously held GC pointers, so they need to be included.
- assert(id->idGCref() == GCT_NONE);
- return (ins == INS_vmov_f2i);
-
- case IF_T2_VMOVD:
- // VMOV.i2d reads from the integer registers. Conversely VMOV.d2i writes to GC pointer-sized
- // integer registers that might have previously held GC pointers, so they need to be included.
- assert(id->idGCref() == GCT_NONE);
- return (ins == INS_vmov_d2i);
-
- default:
- return false;
+ return false;
}
}
-bool emitter::emitInsWritesToLclVarStackLoc(instrDesc *id)
+bool emitter::emitInsWritesToLclVarStackLoc(instrDesc* id)
{
if (!id->idIsLclVar())
return false;
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
// This list is related to the list of instructions used to store local vars in emitIns_S_R().
// We don't accept writing to float local vars.
switch (ins)
{
- case INS_strb:
- case INS_strh:
- case INS_str:
- return true;
- default:
- return false;
+ case INS_strb:
+ case INS_strh:
+ case INS_str:
+ return true;
+ default:
+ return false;
}
}
-bool emitter::emitInsMayWriteMultipleRegs(instrDesc *id)
+bool emitter::emitInsMayWriteMultipleRegs(instrDesc* id)
{
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
switch (ins)
{
- case INS_ldm: case INS_ldmdb:
- case INS_pop:
- case INS_smlal:
- case INS_smull:
- case INS_umlal:
- case INS_umull:
- case INS_vmov_d2i:
- return true;
- default:
- return false;
+ case INS_ldm:
+ case INS_ldmdb:
+ case INS_pop:
+ case INS_smlal:
+ case INS_smull:
+ case INS_umlal:
+ case INS_umull:
+ case INS_vmov_d2i:
+ return true;
+ default:
+ return false;
}
}
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Return a string that represents the given register.
*/
-const char * emitter::emitRegName(regNumber reg, emitAttr attr, bool varName)
+const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName)
{
assert(reg < REG_COUNT);
- const char * rn = emitComp->compRegVarName(reg, varName, false);
+ const char* rn = emitComp->compRegVarName(reg, varName, false);
assert(strlen(rn) >= 1);
- return rn;
+ return rn;
}
-
-const char *emitter::emitFloatRegName(regNumber reg, emitAttr attr, bool varName)
+const char* emitter::emitFloatRegName(regNumber reg, emitAttr attr, bool varName)
{
assert(reg < REG_COUNT);
- const char * rn = emitComp->compRegVarName(reg, varName, true);
+ const char* rn = emitComp->compRegVarName(reg, varName, true);
assert(strlen(rn) >= 1);
- return rn;
+ return rn;
}
#endif // DEBUG
@@ -636,7 +674,7 @@ const char *emitter::emitFloatRegName(regNumber reg, emitAttr attr, bool varName
* Returns the base encoding of the given CPU instruction.
*/
-emitter::insFormat emitter::emitInsFormat(instruction ins)
+emitter::insFormat emitter::emitInsFormat(instruction ins)
{
// clang-format off
const static insFormat insFormats[] =
@@ -656,13 +694,13 @@ emitter::insFormat emitter::emitInsFormat(instruction ins)
assert(ins < ArrLen(insFormats));
assert((insFormats[ins] != IF_NONE));
- return insFormats[ins];
+ return insFormats[ins];
}
// INST_FP is 1
-#define LD 2
-#define ST 4
-#define CMP 8
+#define LD 2
+#define ST 4
+#define CMP 8
// clang-format off
/*static*/ const BYTE CodeGenInterface::instInfo[] =
@@ -684,7 +722,7 @@ emitter::insFormat emitter::emitInsFormat(instruction ins)
* Returns true if the instruction is some kind of load instruction
*/
-bool emitter::emitInsIsLoad(instruction ins)
+bool emitter::emitInsIsLoad(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -698,7 +736,7 @@ bool emitter::emitInsIsLoad(instruction ins)
* Returns true if the instruction is some kind of compare or test instruction
*/
-bool emitter::emitInsIsCompare(instruction ins)
+bool emitter::emitInsIsCompare(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -712,7 +750,7 @@ bool emitter::emitInsIsCompare(instruction ins)
* Returns true if the instruction is some kind of store instruction
*/
-bool emitter::emitInsIsStore(instruction ins)
+bool emitter::emitInsIsStore(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -726,19 +764,18 @@ bool emitter::emitInsIsStore(instruction ins)
* Returns true if the instruction is some kind of load/store instruction
*/
-bool emitter::emitInsIsLoadOrStore(instruction ins)
+bool emitter::emitInsIsLoadOrStore(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
- return (CodeGenInterface::instInfo[ins] & (LD|ST)) ? true : false;
+ return (CodeGenInterface::instInfo[ins] & (LD | ST)) ? true : false;
else
return false;
-
}
-#undef LD
-#undef ST
-#undef CMP
+#undef LD
+#undef ST
+#undef CMP
/*****************************************************************************
*
@@ -886,279 +923,279 @@ size_t emitter::emitInsCode(instruction ins, insFormat fmt)
switch (insFmt)
{
- case IF_EN9:
- for (index=0; index<9; index++)
- {
- if (fmt == formatEncode9[index])
+ case IF_EN9:
+ for (index = 0; index < 9; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode9[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN8:
- for (index=0; index<8; index++)
- {
- if (fmt == formatEncode8[index])
+ case IF_EN8:
+ for (index = 0; index < 8; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode8[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN6A:
- for (index=0; index<6; index++)
- {
- if (fmt == formatEncode6A[index])
+ case IF_EN6A:
+ for (index = 0; index < 6; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode6A[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN6B:
- for (index=0; index<6; index++)
- {
- if (fmt == formatEncode6B[index])
+ case IF_EN6B:
+ for (index = 0; index < 6; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode6B[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN5A:
- for (index=0; index<5; index++)
- {
- if (fmt == formatEncode5A[index])
+ case IF_EN5A:
+ for (index = 0; index < 5; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode5A[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN5B:
- for (index=0; index<5; index++)
- {
- if (fmt == formatEncode5B[index])
+ case IF_EN5B:
+ for (index = 0; index < 5; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode5B[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4A:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4A[index])
+ case IF_EN4A:
+ for (index = 0; index < 4; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode4A[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4B:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4B[index])
+ case IF_EN4B:
+ for (index = 0; index < 4; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode4B[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3A:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3A[index])
+ case IF_EN3A:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3A[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3B:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3B[index])
+ case IF_EN3B:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3B[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN3C:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3C[index])
+ break;
+ case IF_EN3C:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3C[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN3D:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3D[index])
+ break;
+ case IF_EN3D:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3D[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN3E:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3E[index])
+ break;
+ case IF_EN3E:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3E[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN3F:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3F[index])
+ break;
+ case IF_EN3F:
+ for (index = 0; index < 3; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode3F[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2A:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2A[index])
+ case IF_EN2A:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2A[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN2B:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2B[index])
+ break;
+ case IF_EN2B:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2B[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN2C:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2C[index])
+ break;
+ case IF_EN2C:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2C[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN2D:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2D[index])
+ break;
+ case IF_EN2D:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2D[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN2E:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2E[index])
+ break;
+ case IF_EN2E:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2E[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
- case IF_EN2F:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2F[index])
+ break;
+ case IF_EN2F:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2F[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2G:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2G[index])
+ case IF_EN2G:
+ for (index = 0; index < 2; index++)
{
- found = true;
- break;
+ if (fmt == formatEncode2G[index])
+ {
+ found = true;
+ break;
+ }
}
- }
- break;
+ break;
- default:
- index = 0;
- found = true;
- break;
+ default:
+ index = 0;
+ found = true;
+ break;
}
assert(found);
switch (index)
{
- case 0:
- assert(ins < ArrLen(insCodes1));
- code = insCodes1[ins];
- break;
- case 1:
- assert(ins < ArrLen(insCodes2));
- code = insCodes2[ins];
- break;
- case 2:
- assert(ins < ArrLen(insCodes3));
- code = insCodes3[ins];
- break;
- case 3:
- assert(ins < ArrLen(insCodes4));
- code = insCodes4[ins];
- break;
- case 4:
- assert(ins < ArrLen(insCodes5));
- code = insCodes5[ins];
- break;
- case 5:
- assert(ins < ArrLen(insCodes6));
- code = insCodes6[ins];
- break;
- case 6:
- assert(ins < ArrLen(insCodes7));
- code = insCodes7[ins];
- break;
- case 7:
- assert(ins < ArrLen(insCodes8));
- code = insCodes8[ins];
- break;
- case 8:
- assert(ins < ArrLen(insCodes9));
- code = insCodes9[ins];
- break;
+ case 0:
+ assert(ins < ArrLen(insCodes1));
+ code = insCodes1[ins];
+ break;
+ case 1:
+ assert(ins < ArrLen(insCodes2));
+ code = insCodes2[ins];
+ break;
+ case 2:
+ assert(ins < ArrLen(insCodes3));
+ code = insCodes3[ins];
+ break;
+ case 3:
+ assert(ins < ArrLen(insCodes4));
+ code = insCodes4[ins];
+ break;
+ case 4:
+ assert(ins < ArrLen(insCodes5));
+ code = insCodes5[ins];
+ break;
+ case 5:
+ assert(ins < ArrLen(insCodes6));
+ code = insCodes6[ins];
+ break;
+ case 6:
+ assert(ins < ArrLen(insCodes7));
+ code = insCodes7[ins];
+ break;
+ case 7:
+ assert(ins < ArrLen(insCodes8));
+ code = insCodes8[ins];
+ break;
+ case 8:
+ assert(ins < ArrLen(insCodes9));
+ code = insCodes9[ins];
+ break;
}
-
+
assert((code != BAD_CODE));
- return code;
+ return code;
}
/*****************************************************************************
@@ -1166,8 +1203,8 @@ size_t emitter::emitInsCode(instruction ins, insFormat fmt)
* Return the code size of the given instruction format. The 'insSize' return type enum
* indicates a 16 bit, 32 bit, or 48 bit instruction.
*/
-
-emitter::insSize emitter::emitInsSize(insFormat insFmt)
+
+emitter::insSize emitter::emitInsSize(insFormat insFmt)
{
if ((insFmt >= IF_T1_A) && (insFmt < IF_T2_A))
return ISZ_16BIT;
@@ -1188,9 +1225,9 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
* using the special modified immediate constant available in Thumb
*/
-/*static*/ bool emitter::isModImmConst(int val32)
+/*static*/ bool emitter::isModImmConst(int val32)
{
- unsigned uval32 = (unsigned) val32;
+ unsigned uval32 = (unsigned)val32;
unsigned imm8 = uval32 & 0xff;
/* encode = 0000x */
@@ -1214,10 +1251,11 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
unsigned mask32 = 0x00000ff;
- unsigned encode = 31; /* 11111 */
+ unsigned encode = 31; /* 11111 */
unsigned temp;
- do {
+ do
+ {
mask32 <<= 1;
temp = uval32 & ~mask32;
if (temp == 0)
@@ -1228,7 +1266,6 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
return false;
}
-
/*****************************************************************************
*
* encodeModImmConst() returns the special ARM 12-bit immediate encoding.
@@ -1238,7 +1275,7 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
/*static*/ int emitter::encodeModImmConst(int val32)
{
- unsigned uval32 = (unsigned) val32;
+ unsigned uval32 = (unsigned)val32;
unsigned imm8 = uval32 & 0xff;
unsigned encode = imm8 >> 7;
unsigned imm32a;
@@ -1273,14 +1310,15 @@ emitter::insSize emitter::emitInsSize(insFormat insFmt)
/* encode = 0011x */
if (imm32c == uval32)
{
- encode +=6;
+ encode += 6;
goto DONE;
}
mask32 = 0x00000ff;
- encode = 31; /* 11111 */
- do {
+ encode = 31; /* 11111 */
+ do
+ {
mask32 <<= 1;
temp = uval32 & ~mask32;
if (temp == 0)
@@ -1299,12 +1337,12 @@ DONE:
unsigned result = (encode << 7) | (imm8 & 0x7f);
assert(result <= 0x0fff);
assert(result >= 0);
- return (int) result;
+ return (int)result;
}
/*****************************************************************************
*
- * emitIns_valid_imm_for_alu() returns true when the immediate 'imm'
+ * emitIns_valid_imm_for_alu() returns true when the immediate 'imm'
* can be encoded using the 12-bit funky Arm immediate encoding
*/
/*static*/ bool emitter::emitIns_valid_imm_for_alu(int imm)
@@ -1316,23 +1354,23 @@ DONE:
/*****************************************************************************
*
- * emitIns_valid_imm_for_mov() returns true when the immediate 'imm'
+ * emitIns_valid_imm_for_mov() returns true when the immediate 'imm'
* can be encoded using a single mov or mvn instruction.
*/
/*static*/ bool emitter::emitIns_valid_imm_for_mov(int imm)
{
- if ((imm & 0x0000ffff) == imm) // 16-bit immediate
+ if ((imm & 0x0000ffff) == imm) // 16-bit immediate
return true;
- if (isModImmConst(imm)) // funky arm immediate
+ if (isModImmConst(imm)) // funky arm immediate
return true;
- if (isModImmConst(~imm)) // funky arm immediate via mvn
+ if (isModImmConst(~imm)) // funky arm immediate via mvn
return true;
return false;
}
/*****************************************************************************
*
- * emitIns_valid_imm_for_small_mov() returns true when the immediate 'imm'
+ * emitIns_valid_imm_for_small_mov() returns true when the immediate 'imm'
* can be encoded using a single 2-byte mov instruction.
*/
/*static*/ bool emitter::emitIns_valid_imm_for_small_mov(regNumber reg, int imm, insFlags flags)
@@ -1342,23 +1380,23 @@ DONE:
/*****************************************************************************
*
- * emitIns_valid_imm_for_add() returns true when the immediate 'imm'
+ * emitIns_valid_imm_for_add() returns true when the immediate 'imm'
* can be encoded using a single add or sub instruction.
*/
/*static*/ bool emitter::emitIns_valid_imm_for_add(int imm, insFlags flags)
{
- if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate via add/sub
+ if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate via add/sub
return true;
- if (isModImmConst(imm)) // funky arm immediate
+ if (isModImmConst(imm)) // funky arm immediate
return true;
- if (isModImmConst(-imm)) // funky arm immediate via sub
+ if (isModImmConst(-imm)) // funky arm immediate via sub
return true;
return false;
}
/*****************************************************************************
*
- * emitIns_valid_imm_for_add_sp() returns true when the immediate 'imm'
+ * emitIns_valid_imm_for_add_sp() returns true when the immediate 'imm'
* can be encoded in "add Rd,SP,i10".
*/
/*static*/ bool emitter::emitIns_valid_imm_for_add_sp(int imm)
@@ -1370,24 +1408,24 @@ DONE:
#ifdef ARM_HAZARD_AVOIDANCE
// This function is called whenever we about to emit an unconditional branch instruction
-// that could be encoded using a T2 instruction
+// that could be encoded using a T2 instruction
// It returns true if we need to mark the instruction via idKraitNop(true)
//
-bool emitter::emitKraitHazardActive(instrDesc * id)
+bool emitter::emitKraitHazardActive(instrDesc* id)
{
- // Does the current instruction represent an
+ // Does the current instruction represent an
// unconditional branch instruction that is subject to the Krait errata
//
if (id->idIsKraitBranch())
{
// Only need to handle the Krait Hazard when we are Jitting
//
- if ((emitComp->opts.eeFlags & CORJIT_FLG_PREJIT) == 0)
+ if ((emitComp->opts.eeFlags & CORJIT_FLG_PREJIT) == 0)
{
// Have we seen the necessary number of T1 instructions?
if (emitCurInstrCntT1 >= MAX_INSTR_COUNT_T1)
{
- return true; /* Assume that we need to add a nopw as well */
+ return true; /* Assume that we need to add a nopw as well */
}
}
}
@@ -1401,11 +1439,11 @@ bool emitter::emitKraitHazardActive(instrDesc * id)
* Add an instruction with no operands.
*/
-void emitter::emitIns(instruction ins)
+void emitter::emitIns(instruction ins)
{
- instrDesc * id = emitNewInstrSmall(EA_4BYTE);
- insFormat fmt = emitInsFormat(ins);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSmall(EA_4BYTE);
+ insFormat fmt = emitInsFormat(ins);
+ insSize isz = emitInsSize(fmt);
assert((fmt == IF_T1_A) || (fmt == IF_T2_A));
@@ -1417,108 +1455,105 @@ void emitter::emitIns(instruction ins)
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction with a single immediate value.
*/
-void emitter::emitIns_I(instruction ins,
- emitAttr attr,
- ssize_t imm)
+void emitter::emitIns_I(instruction ins, emitAttr attr, ssize_t imm)
{
- insFormat fmt = IF_NONE;
- bool hasLR = false;
- bool hasPC = false;
- bool useT2 = false;
- bool onlyT1 = false;
+ insFormat fmt = IF_NONE;
+ bool hasLR = false;
+ bool hasPC = false;
+ bool useT2 = false;
+ bool onlyT1 = false;
/* Figure out the encoding format of the instruction */
switch (ins)
{
#ifdef FEATURE_ITINSTRUCTION
- case INS_it:
- case INS_itt:
- case INS_ite:
- case INS_ittt:
- case INS_itte:
- case INS_itet:
- case INS_itee:
- case INS_itttt:
- case INS_ittte:
- case INS_ittet:
- case INS_ittee:
- case INS_itett:
- case INS_itete:
- case INS_iteet:
- case INS_iteee:
- assert((imm & 0x0F) == imm);
- fmt = IF_T1_B;
- attr = EA_4BYTE;
- break;
+ case INS_it:
+ case INS_itt:
+ case INS_ite:
+ case INS_ittt:
+ case INS_itte:
+ case INS_itet:
+ case INS_itee:
+ case INS_itttt:
+ case INS_ittte:
+ case INS_ittet:
+ case INS_ittee:
+ case INS_itett:
+ case INS_itete:
+ case INS_iteet:
+ case INS_iteee:
+ assert((imm & 0x0F) == imm);
+ fmt = IF_T1_B;
+ attr = EA_4BYTE;
+ break;
#endif // FEATURE_ITINSTRUCTION
- case INS_push:
- assert((imm & 0xA000) == 0); // Cannot push PC or SP
+ case INS_push:
+ assert((imm & 0xA000) == 0); // Cannot push PC or SP
- if (imm & 0x4000) // Is the LR being pushed?
- hasLR = true;
+ if (imm & 0x4000) // Is the LR being pushed?
+ hasLR = true;
- goto COMMON_PUSH_POP;
+ goto COMMON_PUSH_POP;
- case INS_pop:
- assert((imm & 0x2000) == 0); // Cannot pop SP
- assert((imm & 0xC000) != 0xC000); // Cannot pop both PC and LR
+ case INS_pop:
+ assert((imm & 0x2000) == 0); // Cannot pop SP
+ assert((imm & 0xC000) != 0xC000); // Cannot pop both PC and LR
- if (imm & 0x8000) // Is the PC being popped?
- hasPC = true;
- if (imm & 0x4000) // Is the LR being popped?
- {
- hasLR = true;
- useT2 = true;
- }
+ if (imm & 0x8000) // Is the PC being popped?
+ hasPC = true;
+ if (imm & 0x4000) // Is the LR being popped?
+ {
+ hasLR = true;
+ useT2 = true;
+ }
-COMMON_PUSH_POP:
+ COMMON_PUSH_POP:
- if (((imm-1) & imm) == 0) // Is only one or zero bits set in imm?
- {
- if (((imm == 0) && !hasLR) || // imm has no bits set, but hasLR is set
- (!hasPC && !hasLR) ) // imm has one bit set, and neither of hasPC/hasLR are set
+ if (((imm - 1) & imm) == 0) // Is only one or zero bits set in imm?
{
- onlyT1 = true; // if only one bit is set we must use the T1 encoding
+ if (((imm == 0) && !hasLR) || // imm has no bits set, but hasLR is set
+ (!hasPC && !hasLR)) // imm has one bit set, and neither of hasPC/hasLR are set
+ {
+ onlyT1 = true; // if only one bit is set we must use the T1 encoding
+ }
}
- }
- imm &= ~0xE000; // ensure that PC, LR and SP bits are removed from imm
+ imm &= ~0xE000; // ensure that PC, LR and SP bits are removed from imm
- if (((imm & 0x00ff) == imm) && !useT2)
- {
- fmt = IF_T1_L1;
- }
- else if (!onlyT1)
- {
- fmt = IF_T2_I1;
- }
- else
- {
- // We have to use the Thumb-2 push single register encoding
- regNumber reg = genRegNumFromMask(imm);
- emitIns_R(ins, attr, reg);
- return;
- }
+ if (((imm & 0x00ff) == imm) && !useT2)
+ {
+ fmt = IF_T1_L1;
+ }
+ else if (!onlyT1)
+ {
+ fmt = IF_T2_I1;
+ }
+ else
+ {
+ // We have to use the Thumb-2 push single register encoding
+ regNumber reg = genRegNumFromMask(imm);
+ emitIns_R(ins, attr, reg);
+ return;
+ }
- //
- // Encode the PC and LR bits as the lowest two bits
- //
- imm <<= 2;
- if (hasPC)
- imm |= 2;
- if (hasLR)
- imm |= 1;
+ //
+ // Encode the PC and LR bits as the lowest two bits
+ //
+ imm <<= 2;
+ if (hasPC)
+ imm |= 2;
+ if (hasLR)
+ imm |= 1;
- assert(imm != 0);
+ assert(imm != 0);
- break;
+ break;
#if 0
// TODO-ARM-Cleanup: Enable or delete.
@@ -1534,30 +1569,26 @@ COMMON_PUSH_POP:
break;
#endif
- case INS_dmb:
- case INS_ism:
- if ((imm & 0x000f) == imm)
- {
- fmt = IF_T2_B;
- attr = EA_4BYTE;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
-
- default:
- unreached();
+ case INS_dmb:
+ case INS_ism:
+ if ((imm & 0x000f) == imm)
+ {
+ fmt = IF_T2_B;
+ attr = EA_4BYTE;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
+
+ default:
+ unreached();
}
- assert((fmt == IF_T1_B) ||
- (fmt == IF_T1_L0) ||
- (fmt == IF_T1_L1) ||
- (fmt == IF_T2_I1) ||
- (fmt == IF_T2_B));
+ assert((fmt == IF_T1_B) || (fmt == IF_T1_L0) || (fmt == IF_T1_L1) || (fmt == IF_T2_I1) || (fmt == IF_T2_B));
- instrDesc * id = emitNewInstrSC(attr, imm);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSC(attr, imm);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -1567,56 +1598,52 @@ COMMON_PUSH_POP:
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing a single register.
*/
-void emitter::emitIns_R(instruction ins,
- emitAttr attr,
- regNumber reg)
+void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_pop:
- case INS_push:
- if (isLowRegister(reg))
- {
- int regmask = 1 << ((int) reg);
- emitIns_I(ins, attr, regmask);
- return;
- }
- assert(size == EA_PTRSIZE);
- fmt = IF_T2_E2;
- break;
+ case INS_pop:
+ case INS_push:
+ if (isLowRegister(reg))
+ {
+ int regmask = 1 << ((int)reg);
+ emitIns_I(ins, attr, regmask);
+ return;
+ }
+ assert(size == EA_PTRSIZE);
+ fmt = IF_T2_E2;
+ break;
- case INS_vmrs:
- assert(size == EA_PTRSIZE);
- fmt = IF_T2_E2;
- break;
+ case INS_vmrs:
+ assert(size == EA_PTRSIZE);
+ fmt = IF_T2_E2;
+ break;
- case INS_bx:
- assert(size == EA_PTRSIZE);
- fmt = IF_T1_D1;
- break;
- case INS_rsb:
- case INS_mvn:
- emitIns_R_R_I(ins, attr, reg, reg, 0);
- return;
-
- default:
- unreached();
+ case INS_bx:
+ assert(size == EA_PTRSIZE);
+ fmt = IF_T1_D1;
+ break;
+ case INS_rsb:
+ case INS_mvn:
+ emitIns_R_R_I(ins, attr, reg, reg, 0);
+ return;
+
+ default:
+ unreached();
}
- assert((fmt == IF_T1_D1) ||
- (fmt == IF_T2_E2));
+ assert((fmt == IF_T1_D1) || (fmt == IF_T2_E2));
- instrDesc * id = emitNewInstrSmall(attr);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSmall(attr);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -1627,100 +1654,96 @@ void emitter::emitIns_R(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing a register and a constant.
*/
-void emitter::emitIns_R_I(instruction ins,
- emitAttr attr,
- regNumber reg,
- int imm,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void emitter::emitIns_R_I(
+ instruction ins, emitAttr attr, regNumber reg, int imm, insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_add:
- case INS_sub:
- if ((reg == REG_SP) && insDoesNotSetFlags(flags) && ((imm & 0x01fc) == imm))
- {
- fmt = IF_T1_F;
- sf = INS_FLAGS_NOT_SET;
- }
- else if (isLowRegister(reg) && insSetsFlags(flags) && (unsigned_abs(imm) <= 0x00ff))
- {
- if (imm < 0)
+ case INS_add:
+ case INS_sub:
+ if ((reg == REG_SP) && insDoesNotSetFlags(flags) && ((imm & 0x01fc) == imm))
{
- assert((ins == INS_add) || (ins == INS_sub));
- if (ins == INS_add)
- ins = INS_sub;
- else // ins == INS_sub
- ins = INS_add;
- imm = -imm;
+ fmt = IF_T1_F;
+ sf = INS_FLAGS_NOT_SET;
}
- fmt = IF_T1_J0;
- sf = INS_FLAGS_SET;
- }
- else
- {
- // otherwise we have to use a Thumb-2 encoding
+ else if (isLowRegister(reg) && insSetsFlags(flags) && (unsigned_abs(imm) <= 0x00ff))
+ {
+ if (imm < 0)
+ {
+ assert((ins == INS_add) || (ins == INS_sub));
+ if (ins == INS_add)
+ ins = INS_sub;
+ else // ins == INS_sub
+ ins = INS_add;
+ imm = -imm;
+ }
+ fmt = IF_T1_J0;
+ sf = INS_FLAGS_SET;
+ }
+ else
+ {
+ // otherwise we have to use a Thumb-2 encoding
+ emitIns_R_R_I(ins, attr, reg, reg, imm, flags);
+ return;
+ }
+ break;
+
+ case INS_adc:
emitIns_R_R_I(ins, attr, reg, reg, imm, flags);
return;
- }
- break;
- case INS_adc:
- emitIns_R_R_I(ins, attr, reg, reg, imm, flags);
- return;
+ case INS_vpush:
+ case INS_vpop:
+ assert(imm > 0);
+ if (attr == EA_8BYTE)
+ {
+ assert(isDoubleReg(reg));
+ assert(imm <= 16);
+ imm *= 2;
+ }
+ else
+ {
+ assert(attr == EA_4BYTE);
+ assert(isFloatReg(reg));
+ assert(imm <= 16);
+ }
+ assert(((reg - REG_F0) + imm) <= 32);
+ imm *= 4;
- case INS_vpush:
- case INS_vpop:
- assert(imm > 0);
- if (attr == EA_8BYTE)
- {
- assert(isDoubleReg(reg));
- assert(imm <= 16);
- imm *= 2;
- }
- else
- {
- assert(attr == EA_4BYTE);
- assert(isFloatReg(reg));
- assert(imm <= 16);
- }
- assert(((reg-REG_F0)+imm) <= 32);
- imm *= 4;
+ if (ins == INS_vpush)
+ imm = -imm;
- if (ins == INS_vpush)
- imm = -imm;
+ sf = INS_FLAGS_NOT_SET;
+ fmt = IF_T2_VLDST;
+ break;
- sf = INS_FLAGS_NOT_SET;
- fmt = IF_T2_VLDST;
- break;
-
- case INS_stm:
+ case INS_stm:
{
- sf = INS_FLAGS_NOT_SET;
-
+ sf = INS_FLAGS_NOT_SET;
+
bool hasLR = false;
bool hasPC = false;
bool useT2 = false;
bool onlyT1 = false;
- assert((imm & 0x2000) == 0); // Cannot pop SP
- assert((imm & 0xC000) != 0xC000); // Cannot pop both PC and LR
- assert((imm & 0xFFFF0000) == 0); // Can only contain lower 16 bits
-
- if (imm & 0x8000) // Is the PC being popped?
+ assert((imm & 0x2000) == 0); // Cannot pop SP
+ assert((imm & 0xC000) != 0xC000); // Cannot pop both PC and LR
+ assert((imm & 0xFFFF0000) == 0); // Can only contain lower 16 bits
+
+ if (imm & 0x8000) // Is the PC being popped?
hasPC = true;
- if (imm & 0x4000) // Is the LR being pushed?
+ if (imm & 0x4000) // Is the LR being pushed?
{
hasLR = true;
useT2 = true;
@@ -1729,16 +1752,16 @@ void emitter::emitIns_R_I(instruction ins,
if (!isLowRegister(reg))
useT2 = true;
- if (((imm-1) & imm) == 0) // Is only one or zero bits set in imm?
+ if (((imm - 1) & imm) == 0) // Is only one or zero bits set in imm?
{
- if (((imm == 0) && !hasLR) || // imm has no bits set, but hasLR is set
- (!hasPC && !hasLR) ) // imm has one bit set, and neither of hasPC/hasLR are set
+ if (((imm == 0) && !hasLR) || // imm has no bits set, but hasLR is set
+ (!hasPC && !hasLR)) // imm has one bit set, and neither of hasPC/hasLR are set
{
- onlyT1 = true; // if only one bit is set we must use the T1 encoding
+ onlyT1 = true; // if only one bit is set we must use the T1 encoding
}
}
- imm &= ~0xE000; // ensure that PC, LR and SP bits are removed from imm
+ imm &= ~0xE000; // ensure that PC, LR and SP bits are removed from imm
if (((imm & 0x00ff) == imm) && !useT2)
{
@@ -1757,9 +1780,9 @@ void emitter::emitIns_R_I(instruction ins,
return;
}
- //
- // Encode the PC and LR bits as the lowest two bits
- //
+ //
+ // Encode the PC and LR bits as the lowest two bits
+ //
if (fmt == IF_T2_I0)
{
imm <<= 2;
@@ -1772,178 +1795,169 @@ void emitter::emitIns_R_I(instruction ins,
}
break;
- case INS_and:
- case INS_bic:
- case INS_eor:
- case INS_orr:
- case INS_orn:
- case INS_rsb:
- case INS_sbc:
-
- case INS_ror:
- case INS_asr:
- case INS_lsl:
- case INS_lsr:
- // use the Reg, Reg, Imm encoding
- emitIns_R_R_I(ins, attr, reg, reg, imm, flags);
- return;
+ case INS_and:
+ case INS_bic:
+ case INS_eor:
+ case INS_orr:
+ case INS_orn:
+ case INS_rsb:
+ case INS_sbc:
- case INS_mov:
- assert(!EA_IS_CNS_RELOC(attr));
+ case INS_ror:
+ case INS_asr:
+ case INS_lsl:
+ case INS_lsr:
+ // use the Reg, Reg, Imm encoding
+ emitIns_R_R_I(ins, attr, reg, reg, imm, flags);
+ return;
- if (isLowRegister(reg) && insSetsFlags(flags) && ((imm & 0x00ff) == imm))
- {
- fmt = IF_T1_J0;
- sf = INS_FLAGS_SET;
- }
- else if (isModImmConst(imm))
- {
- fmt = IF_T2_L1;
- sf = insMustSetFlags(flags);
- }
- else if (isModImmConst(~imm)) // See if we can use move negated instruction instead
- {
- ins = INS_mvn;
- imm = ~imm;
- fmt = IF_T2_L1;
- sf = insMustSetFlags(flags);
- }
- else if (insDoesNotSetFlags(flags) && ((imm & 0x0000ffff) == imm))
- {
- // mov => movw instruction
- ins = INS_movw;
- fmt = IF_T2_N;
- sf = INS_FLAGS_NOT_SET;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ case INS_mov:
+ assert(!EA_IS_CNS_RELOC(attr));
- case INS_movw:
- case INS_movt:
- assert(insDoesNotSetFlags(flags));
- sf = INS_FLAGS_NOT_SET;
- if ((imm & 0x0000ffff) == imm || EA_IS_RELOC(attr))
- {
- fmt = IF_T2_N;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ if (isLowRegister(reg) && insSetsFlags(flags) && ((imm & 0x00ff) == imm))
+ {
+ fmt = IF_T1_J0;
+ sf = INS_FLAGS_SET;
+ }
+ else if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L1;
+ sf = insMustSetFlags(flags);
+ }
+ else if (isModImmConst(~imm)) // See if we can use move negated instruction instead
+ {
+ ins = INS_mvn;
+ imm = ~imm;
+ fmt = IF_T2_L1;
+ sf = insMustSetFlags(flags);
+ }
+ else if (insDoesNotSetFlags(flags) && ((imm & 0x0000ffff) == imm))
+ {
+ // mov => movw instruction
+ ins = INS_movw;
+ fmt = IF_T2_N;
+ sf = INS_FLAGS_NOT_SET;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
- case INS_mvn:
- if (isModImmConst(imm))
- {
- fmt = IF_T2_L1;
- sf = insMustSetFlags(flags);
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ case INS_movw:
+ case INS_movt:
+ assert(insDoesNotSetFlags(flags));
+ sf = INS_FLAGS_NOT_SET;
+ if ((imm & 0x0000ffff) == imm || EA_IS_RELOC(attr))
+ {
+ fmt = IF_T2_N;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
- case INS_cmp:
- assert(!EA_IS_CNS_RELOC(attr));
- assert(insSetsFlags(flags));
- sf = INS_FLAGS_SET;
- if (isLowRegister(reg) && ((imm & 0x0ff) == imm))
- {
- fmt = IF_T1_J0;
- }
- else if (isModImmConst(imm))
- {
- fmt = IF_T2_L2;
- }
- else if (isModImmConst(-imm))
- {
- ins = INS_cmn;
- fmt = IF_T2_L2;
- imm = -imm;
- }
- else
- {
+ case INS_mvn:
+ if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L1;
+ sf = insMustSetFlags(flags);
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
+
+ case INS_cmp:
+ assert(!EA_IS_CNS_RELOC(attr));
+ assert(insSetsFlags(flags));
+ sf = INS_FLAGS_SET;
+ if (isLowRegister(reg) && ((imm & 0x0ff) == imm))
+ {
+ fmt = IF_T1_J0;
+ }
+ else if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L2;
+ }
+ else if (isModImmConst(-imm))
+ {
+ ins = INS_cmn;
+ fmt = IF_T2_L2;
+ imm = -imm;
+ }
+ else
+ {
#ifndef LEGACY_BACKEND
- assert(!"emitIns_R_I: immediate doesn't fit into the instruction");
-#else // LEGACY_BACKEND
- // Load val into a register
- regNumber valReg = codeGen->regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
- codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, valReg, (ssize_t)imm);
- emitIns_R_R(ins, attr, reg, valReg, flags);
+ assert(!"emitIns_R_I: immediate doesn't fit into the instruction");
+#else // LEGACY_BACKEND
+ // Load val into a register
+ regNumber valReg = codeGen->regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
+ codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, valReg, (ssize_t)imm);
+ emitIns_R_R(ins, attr, reg, valReg, flags);
#endif // LEGACY_BACKEND
- return;
- }
- break;
+ return;
+ }
+ break;
- case INS_cmn:
- case INS_tst:
- case INS_teq:
- assert(insSetsFlags(flags));
- sf = INS_FLAGS_SET;
- if (isModImmConst(imm))
- {
- fmt = IF_T2_L2;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ case INS_cmn:
+ case INS_tst:
+ case INS_teq:
+ assert(insSetsFlags(flags));
+ sf = INS_FLAGS_SET;
+ if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L2;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
#ifdef FEATURE_PLI_INSTRUCTION
- case INS_pli:
- assert(insDoesNotSetFlags(flags));
- if ((reg == REG_SP) && (unsigned_abs(imm) <= 0x0fff))
- {
- fmt = IF_T2_K3;
- sf = INS_FLAGS_NOT_SET;
- }
- __fallthrough;
+ case INS_pli:
+ assert(insDoesNotSetFlags(flags));
+ if ((reg == REG_SP) && (unsigned_abs(imm) <= 0x0fff))
+ {
+ fmt = IF_T2_K3;
+ sf = INS_FLAGS_NOT_SET;
+ }
+ __fallthrough;
#endif // FEATURE_PLI_INSTRUCTION
- case INS_pld:
- case INS_pldw:
- assert(insDoesNotSetFlags(flags));
- sf = INS_FLAGS_NOT_SET;
- if ((imm >= 0) && (imm <= 0x0fff))
- {
- fmt = IF_T2_K2;
- }
- else if ((imm < 0) && (-imm <= 0x00ff))
- {
- imm = -imm;
- fmt = IF_T2_H2;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
-
- default:
- unreached();
- }
- assert((fmt == IF_T1_F ) ||
- (fmt == IF_T1_J0) ||
- (fmt == IF_T1_J1) ||
- (fmt == IF_T2_H2) ||
- (fmt == IF_T2_I0) ||
- (fmt == IF_T2_K2) ||
- (fmt == IF_T2_K3) ||
- (fmt == IF_T2_L1) ||
- (fmt == IF_T2_L2) ||
- (fmt == IF_T2_M1) ||
- (fmt == IF_T2_N ) ||
- (fmt == IF_T2_VLDST));
+ case INS_pld:
+ case INS_pldw:
+ assert(insDoesNotSetFlags(flags));
+ sf = INS_FLAGS_NOT_SET;
+ if ((imm >= 0) && (imm <= 0x0fff))
+ {
+ fmt = IF_T2_K2;
+ }
+ else if ((imm < 0) && (-imm <= 0x00ff))
+ {
+ imm = -imm;
+ fmt = IF_T2_H2;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
+
+ default:
+ unreached();
+ }
+ assert((fmt == IF_T1_F) || (fmt == IF_T1_J0) || (fmt == IF_T1_J1) || (fmt == IF_T2_H2) || (fmt == IF_T2_I0) ||
+ (fmt == IF_T2_K2) || (fmt == IF_T2_K3) || (fmt == IF_T2_L1) || (fmt == IF_T2_L2) || (fmt == IF_T2_M1) ||
+ (fmt == IF_T2_N) || (fmt == IF_T2_VLDST));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrSC(attr, imm);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSC(attr, imm);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -1955,313 +1969,302 @@ void emitter::emitIns_R_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing two registers
*/
-void emitter::emitIns_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void emitter::emitIns_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_add:
- if (insDoesNotSetFlags(flags))
- {
- fmt = IF_T1_D0;
- sf = INS_FLAGS_NOT_SET;
- break;
- }
- __fallthrough;
+ case INS_add:
+ if (insDoesNotSetFlags(flags))
+ {
+ fmt = IF_T1_D0;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ }
+ __fallthrough;
- case INS_sub:
- // Use the Thumb-1 reg,reg,reg encoding
- emitIns_R_R_R(ins, attr, reg1, reg1, reg2, flags);
- return;
+ case INS_sub:
+ // Use the Thumb-1 reg,reg,reg encoding
+ emitIns_R_R_R(ins, attr, reg1, reg1, reg2, flags);
+ return;
- case INS_mov:
- if (insDoesNotSetFlags(flags))
- {
- assert(reg1 != reg2);
- fmt = IF_T1_D0;
- sf = INS_FLAGS_NOT_SET;
- }
- else // insSetsFlags(flags)
- {
- sf = INS_FLAGS_SET;
+ case INS_mov:
+ if (insDoesNotSetFlags(flags))
+ {
+ assert(reg1 != reg2);
+ fmt = IF_T1_D0;
+ sf = INS_FLAGS_NOT_SET;
+ }
+ else // insSetsFlags(flags)
+ {
+ sf = INS_FLAGS_SET;
+ if (isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ }
+ else
+ {
+ fmt = IF_T2_C3;
+ }
+ }
+ break;
+
+ case INS_cmp:
+ assert(insSetsFlags(flags));
+ sf = INS_FLAGS_SET;
if (isLowRegister(reg1) && isLowRegister(reg2))
{
- fmt = IF_T1_E;
+ fmt = IF_T1_E; // both are low registers
}
else
{
- fmt = IF_T2_C3;
+ fmt = IF_T1_D0; // one or both are high registers
}
- }
- break;
+ break;
- case INS_cmp:
- assert(insSetsFlags(flags));
- sf = INS_FLAGS_SET;
- if (isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E; // both are low registers
- }
- else
- {
- fmt = IF_T1_D0; // one or both are high registers
- }
- break;
+ case INS_vmov_f2i:
+ assert(isGeneralRegister(reg1));
+ assert(isFloatReg(reg2));
+ fmt = IF_T2_VMOVS;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- case INS_vmov_f2i:
- assert(isGeneralRegister(reg1));
- assert(isFloatReg(reg2));
- fmt = IF_T2_VMOVS;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_vmov_i2f:
+ assert(isFloatReg(reg1));
+ assert(isGeneralRegister(reg2));
+ fmt = IF_T2_VMOVS;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- case INS_vmov_i2f:
- assert(isFloatReg(reg1));
- assert(isGeneralRegister(reg2));
- fmt = IF_T2_VMOVS;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_vcvt_d2i:
+ case INS_vcvt_d2u:
+ case INS_vcvt_d2f:
+ assert(isFloatReg(reg1));
+ assert(isDoubleReg(reg2));
+ goto VCVT_COMMON;
- case INS_vcvt_d2i:
- case INS_vcvt_d2u:
- case INS_vcvt_d2f:
- assert(isFloatReg(reg1));
- assert(isDoubleReg(reg2));
- goto VCVT_COMMON;
-
- case INS_vcvt_f2d:
- case INS_vcvt_u2d:
- case INS_vcvt_i2d:
- assert(isDoubleReg(reg1));
- assert(isFloatReg(reg2));
- goto VCVT_COMMON;
-
- case INS_vcvt_u2f:
- case INS_vcvt_i2f:
- case INS_vcvt_f2i:
- case INS_vcvt_f2u:
- assert(size == EA_4BYTE);
- assert(isFloatReg(reg1));
- assert(isFloatReg(reg2));
- goto VCVT_COMMON;
-
- case INS_vmov:
- assert(reg1 != reg2);
- __fallthrough;
-
- case INS_vabs:
- case INS_vsqrt:
- case INS_vcmp:
- case INS_vneg:
- if (size == EA_8BYTE)
- {
+ case INS_vcvt_f2d:
+ case INS_vcvt_u2d:
+ case INS_vcvt_i2d:
assert(isDoubleReg(reg1));
- assert(isDoubleReg(reg2));
- }
- else
- {
- assert(isFloatReg(reg1));
assert(isFloatReg(reg2));
- }
- __fallthrough;
+ goto VCVT_COMMON;
-VCVT_COMMON:
- fmt = IF_T2_VFP2;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_vcvt_u2f:
+ case INS_vcvt_i2f:
+ case INS_vcvt_f2i:
+ case INS_vcvt_f2u:
+ assert(size == EA_4BYTE);
+ assert(isFloatReg(reg1));
+ assert(isFloatReg(reg2));
+ goto VCVT_COMMON;
- case INS_vadd:
- case INS_vmul:
- case INS_vsub:
- case INS_vdiv:
- emitIns_R_R_R(ins, attr, reg1, reg1, reg2);
- return;
+ case INS_vmov:
+ assert(reg1 != reg2);
+ __fallthrough;
- case INS_vldr:
- case INS_vstr:
- case INS_ldr:
- case INS_ldrb:
- case INS_ldrsb:
- case INS_ldrh:
- case INS_ldrsh:
-
- case INS_str:
- case INS_strb:
- case INS_strh:
- emitIns_R_R_I(ins, attr, reg1, reg2, 0);
- return;
+ case INS_vabs:
+ case INS_vsqrt:
+ case INS_vcmp:
+ case INS_vneg:
+ if (size == EA_8BYTE)
+ {
+ assert(isDoubleReg(reg1));
+ assert(isDoubleReg(reg2));
+ }
+ else
+ {
+ assert(isFloatReg(reg1));
+ assert(isFloatReg(reg2));
+ }
+ __fallthrough;
- case INS_adc:
- case INS_and:
- case INS_bic:
- case INS_eor:
- case INS_orr:
- case INS_sbc:
- if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_SET;
+ VCVT_COMMON:
+ fmt = IF_T2_VFP2;
+ sf = INS_FLAGS_NOT_SET;
break;
- }
- __fallthrough;
-
- case INS_orn:
- // assert below fired for bug 281892 where the two operands of an OR were
- // the same static field load which got cse'd.
- // there's no reason why this assert would be true in general
- // assert(reg1 != reg2);
- // Use the Thumb-2 three register encoding
- emitIns_R_R_R_I(ins, attr, reg1, reg1, reg2, 0, flags);
- return;
- case INS_asr:
- case INS_lsl:
- case INS_lsr:
- case INS_ror:
- // assert below fired for bug 296394 where the two operands of an
- // arithmetic right shift were the same local variable
- // there's no reason why this assert would be true in general
- // assert(reg1 != reg2);
- if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_SET;
- }
- else
- {
- // Use the Thumb-2 three register encoding
- emitIns_R_R_R(ins, attr, reg1, reg1, reg2, flags);
+ case INS_vadd:
+ case INS_vmul:
+ case INS_vsub:
+ case INS_vdiv:
+ emitIns_R_R_R(ins, attr, reg1, reg1, reg2);
return;
- }
- break;
- case INS_mul:
- // We will prefer the T2 encoding, unless (flags == INS_FLAGS_SET)
- // The thumb-1 instruction executes much slower as it must always set the flags
- //
- if (insMustSetFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_SET;
- }
- else
- {
- // Use the Thumb-2 three register encoding
- emitIns_R_R_R(ins, attr, reg1, reg2, reg1, flags);
+ case INS_vldr:
+ case INS_vstr:
+ case INS_ldr:
+ case INS_ldrb:
+ case INS_ldrsb:
+ case INS_ldrh:
+ case INS_ldrsh:
+
+ case INS_str:
+ case INS_strb:
+ case INS_strh:
+ emitIns_R_R_I(ins, attr, reg1, reg2, 0);
return;
- }
- break;
- case INS_mvn:
- case INS_cmn:
- case INS_tst:
- if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_SET;
- }
- else
- {
- // Use the Thumb-2 register with shift encoding
- emitIns_R_R_I(ins, attr, reg1, reg2, 0, flags);
- return;
- }
- break;
+ case INS_adc:
+ case INS_and:
+ case INS_bic:
+ case INS_eor:
+ case INS_orr:
+ case INS_sbc:
+ if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_SET;
+ break;
+ }
+ __fallthrough;
- case INS_sxtb:
- case INS_uxtb:
- assert(size == EA_1BYTE);
- goto EXTEND_COMMON;
-
- case INS_sxth:
- case INS_uxth:
- assert(size == EA_2BYTE);
-EXTEND_COMMON:
- assert(insDoesNotSetFlags(flags));
- if (isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_NOT_SET;
- }
- else
- {
- // Use the Thumb-2 reg,reg with rotation encoding
- emitIns_R_R_I(ins, attr, reg1, reg2, 0, INS_FLAGS_NOT_SET);
+ case INS_orn:
+ // assert below fired for bug 281892 where the two operands of an OR were
+ // the same static field load which got cse'd.
+ // there's no reason why this assert would be true in general
+ // assert(reg1 != reg2);
+ // Use the Thumb-2 three register encoding
+ emitIns_R_R_R_I(ins, attr, reg1, reg1, reg2, 0, flags);
return;
- }
- break;
- case INS_tbb:
- assert(size == EA_1BYTE);
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_C9;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_asr:
+ case INS_lsl:
+ case INS_lsr:
+ case INS_ror:
+ // assert below fired for bug 296394 where the two operands of an
+ // arithmetic right shift were the same local variable
+ // there's no reason why this assert would be true in general
+ // assert(reg1 != reg2);
+ if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_SET;
+ }
+ else
+ {
+ // Use the Thumb-2 three register encoding
+ emitIns_R_R_R(ins, attr, reg1, reg1, reg2, flags);
+ return;
+ }
+ break;
- case INS_tbh:
- assert(size == EA_2BYTE);
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_C9;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_mul:
+ // We will prefer the T2 encoding, unless (flags == INS_FLAGS_SET)
+ // The thumb-1 instruction executes much slower as it must always set the flags
+ //
+ if (insMustSetFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_SET;
+ }
+ else
+ {
+ // Use the Thumb-2 three register encoding
+ emitIns_R_R_R(ins, attr, reg1, reg2, reg1, flags);
+ return;
+ }
+ break;
- case INS_clz:
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_C10;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_mvn:
+ case INS_cmn:
+ case INS_tst:
+ if (insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_SET;
+ }
+ else
+ {
+ // Use the Thumb-2 register with shift encoding
+ emitIns_R_R_I(ins, attr, reg1, reg2, 0, flags);
+ return;
+ }
+ break;
- case INS_ldrexb:
- case INS_strexb:
- assert(size == EA_1BYTE);
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_E1;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_sxtb:
+ case INS_uxtb:
+ assert(size == EA_1BYTE);
+ goto EXTEND_COMMON;
- case INS_ldrexh:
- case INS_strexh:
- assert(size == EA_2BYTE);
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_E1;
- sf = INS_FLAGS_NOT_SET;
- break;
- default:
+ case INS_sxth:
+ case INS_uxth:
+ assert(size == EA_2BYTE);
+ EXTEND_COMMON:
+ assert(insDoesNotSetFlags(flags));
+ if (isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_NOT_SET;
+ }
+ else
+ {
+ // Use the Thumb-2 reg,reg with rotation encoding
+ emitIns_R_R_I(ins, attr, reg1, reg2, 0, INS_FLAGS_NOT_SET);
+ return;
+ }
+ break;
+
+ case INS_tbb:
+ assert(size == EA_1BYTE);
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_C9;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ case INS_tbh:
+ assert(size == EA_2BYTE);
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_C9;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ case INS_clz:
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_C10;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ case INS_ldrexb:
+ case INS_strexb:
+ assert(size == EA_1BYTE);
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_E1;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ case INS_ldrexh:
+ case INS_strexh:
+ assert(size == EA_2BYTE);
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_E1;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ default:
#ifdef DEBUG
- printf("did not expect instruction %s\n", codeGen->genInsName(ins));
+ printf("did not expect instruction %s\n", codeGen->genInsName(ins));
#endif
- unreached();
+ unreached();
}
- assert((fmt == IF_T1_D0 ) ||
- (fmt == IF_T1_E ) ||
- (fmt == IF_T2_C3 ) ||
- (fmt == IF_T2_C9 ) ||
- (fmt == IF_T2_C10) ||
- (fmt == IF_T2_VFP2)||
- (fmt == IF_T2_VMOVD)||
- (fmt == IF_T2_VMOVS)||
- (fmt == IF_T2_E1 ) );
+ assert((fmt == IF_T1_D0) || (fmt == IF_T1_E) || (fmt == IF_T2_C3) || (fmt == IF_T2_C9) || (fmt == IF_T2_C10) ||
+ (fmt == IF_T2_VFP2) || (fmt == IF_T2_VMOVD) || (fmt == IF_T2_VMOVS) || (fmt == IF_T2_E1));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrSmall(attr);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSmall(attr);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -2279,29 +2282,25 @@ EXTEND_COMMON:
* Add an instruction referencing a register and two constants.
*/
-void emitter::emitIns_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg,
- int imm1,
- int imm2,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void emitter::emitIns_R_I_I(
+ instruction ins, emitAttr attr, regNumber reg, int imm1, int imm2, insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
- int imm = 0; // combined immediates
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
+ int imm = 0; // combined immediates
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_bfc:
+ case INS_bfc:
{
int lsb = imm1;
int msb = lsb + imm2 - 1;
- assert((lsb >= 0) && (lsb <= 31)); // required for encoding of INS_bfc
- assert((msb >= 0) && (msb <= 31)); // required for encoding of INS_bfc
- assert(msb >= lsb); // required for encoding of INS_bfc
+ assert((lsb >= 0) && (lsb <= 31)); // required for encoding of INS_bfc
+ assert((msb >= 0) && (msb <= 31)); // required for encoding of INS_bfc
+ assert(msb >= lsb); // required for encoding of INS_bfc
imm = (lsb << 5) | msb;
@@ -2310,15 +2309,15 @@ void emitter::emitIns_R_I_I(instruction ins,
sf = INS_FLAGS_NOT_SET;
}
break;
-
- default:
- unreached();
+
+ default:
+ unreached();
}
assert(fmt == IF_T2_D1);
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrSC(attr, imm);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSC(attr, imm);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -2330,534 +2329,510 @@ void emitter::emitIns_R_I_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing two registers and a constant.
*/
-void emitter::emitIns_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm,
- insFlags flags /* = INS_FLAGS_DONT_CARE */,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ int imm,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */,
+ insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
if (ins == INS_lea)
{
ins = INS_add;
}
-
+
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_add:
- assert(insOptsNone(opt));
+ case INS_add:
+ assert(insOptsNone(opt));
- // Can we possibly encode the immediate 'imm' using a Thumb-1 encoding?
- if ((reg2 == REG_SP) && insDoesNotSetFlags(flags) && ((imm & 0x03fc) == imm))
- {
- if ((reg1 == REG_SP) && ((imm & 0x01fc) == imm))
- {
- // Use Thumb-1 encoding
- emitIns_R_I(ins, attr, reg1, imm, flags);
- return;
- }
- else if (isLowRegister(reg1))
+ // Can we possibly encode the immediate 'imm' using a Thumb-1 encoding?
+ if ((reg2 == REG_SP) && insDoesNotSetFlags(flags) && ((imm & 0x03fc) == imm))
{
- fmt = IF_T1_J2;
- sf = INS_FLAGS_NOT_SET;
- break;
+ if ((reg1 == REG_SP) && ((imm & 0x01fc) == imm))
+ {
+ // Use Thumb-1 encoding
+ emitIns_R_I(ins, attr, reg1, imm, flags);
+ return;
+ }
+ else if (isLowRegister(reg1))
+ {
+ fmt = IF_T1_J2;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ }
}
- }
- __fallthrough;
+ __fallthrough;
- case INS_sub:
- assert(insOptsNone(opt));
+ case INS_sub:
+ assert(insOptsNone(opt));
- // Is it just a mov?
- if (imm == 0)
- {
- // Is the mov even necessary?
- // Fix 383915 ARM ILGEN
- if (reg1 != reg2)
+ // Is it just a mov?
+ if (imm == 0)
{
- emitIns_R_R(INS_mov, attr, reg1, reg2, flags);
+ // Is the mov even necessary?
+ // Fix 383915 ARM ILGEN
+ if (reg1 != reg2)
+ {
+ emitIns_R_R(INS_mov, attr, reg1, reg2, flags);
+ }
+ return;
}
- return;
- }
- // Can we encode the immediate 'imm' using a Thumb-1 encoding?
- else if (isLowRegister(reg1) && isLowRegister(reg2) &&
- insSetsFlags(flags) && (unsigned_abs(imm) <= 0x0007))
- {
- if (imm < 0)
+ // Can we encode the immediate 'imm' using a Thumb-1 encoding?
+ else if (isLowRegister(reg1) && isLowRegister(reg2) && insSetsFlags(flags) && (unsigned_abs(imm) <= 0x0007))
{
- assert((ins == INS_add) || (ins == INS_sub));
- if (ins == INS_add)
- ins = INS_sub;
- else
- ins = INS_add;
- imm = -imm;
+ if (imm < 0)
+ {
+ assert((ins == INS_add) || (ins == INS_sub));
+ if (ins == INS_add)
+ ins = INS_sub;
+ else
+ ins = INS_add;
+ imm = -imm;
+ }
+ fmt = IF_T1_G;
+ sf = INS_FLAGS_SET;
}
- fmt = IF_T1_G;
- sf = INS_FLAGS_SET;
- }
- else if ((reg1 == reg2) && isLowRegister(reg1) &&
- insSetsFlags(flags) && (unsigned_abs(imm) <= 0x00ff))
- {
- if (imm < 0)
+ else if ((reg1 == reg2) && isLowRegister(reg1) && insSetsFlags(flags) && (unsigned_abs(imm) <= 0x00ff))
{
- assert((ins == INS_add) || (ins == INS_sub));
- if (ins == INS_add)
- ins = INS_sub;
- else
- ins = INS_add;
- imm = -imm;
+ if (imm < 0)
+ {
+ assert((ins == INS_add) || (ins == INS_sub));
+ if (ins == INS_add)
+ ins = INS_sub;
+ else
+ ins = INS_add;
+ imm = -imm;
+ }
+ // Use Thumb-1 encoding
+ emitIns_R_I(ins, attr, reg1, imm, flags);
+ return;
}
- // Use Thumb-1 encoding
- emitIns_R_I(ins, attr, reg1, imm, flags);
- return;
- }
- else if (isModImmConst(imm))
- {
- fmt = IF_T2_L0;
- sf = insMustSetFlags(flags);
- }
- else if (isModImmConst(-imm))
- {
- assert((ins == INS_add) || (ins == INS_sub));
- ins = (ins == INS_add) ? INS_sub : INS_add;
- imm = -imm;
- fmt = IF_T2_L0;
- sf = insMustSetFlags(flags);
- }
- else if (insDoesNotSetFlags(flags) && (unsigned_abs(imm) <= 0x0fff))
- {
- if (imm < 0)
+ else if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L0;
+ sf = insMustSetFlags(flags);
+ }
+ else if (isModImmConst(-imm))
{
assert((ins == INS_add) || (ins == INS_sub));
ins = (ins == INS_add) ? INS_sub : INS_add;
imm = -imm;
+ fmt = IF_T2_L0;
+ sf = insMustSetFlags(flags);
+ }
+ else if (insDoesNotSetFlags(flags) && (unsigned_abs(imm) <= 0x0fff))
+ {
+ if (imm < 0)
+ {
+ assert((ins == INS_add) || (ins == INS_sub));
+ ins = (ins == INS_add) ? INS_sub : INS_add;
+ imm = -imm;
+ }
+ // add/sub => addw/subw instruction
+ // Note that even when using the w prefix the immediate is still only 12 bits?
+ ins = (ins == INS_add) ? INS_addw : INS_subw;
+ fmt = IF_T2_M0;
+ sf = INS_FLAGS_NOT_SET;
}
- // add/sub => addw/subw instruction
- // Note that even when using the w prefix the immediate is still only 12 bits?
- ins = (ins == INS_add) ? INS_addw : INS_subw;
- fmt = IF_T2_M0;
- sf = INS_FLAGS_NOT_SET;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
-
- case INS_and:
- case INS_bic:
- case INS_orr:
- case INS_orn:
- assert(insOptsNone(opt));
- if (isModImmConst(imm))
- {
- fmt = IF_T2_L0;
- sf = insMustSetFlags(flags);
- }
- else if (isModImmConst(~imm))
- {
- fmt = IF_T2_L0;
- sf = insMustSetFlags(flags);
- imm = ~imm;
-
- if (ins == INS_and)
- ins = INS_bic;
- else if (ins == INS_bic)
- ins = INS_and;
- else if (ins == INS_orr)
- ins = INS_orn;
- else if (ins == INS_orn)
- ins = INS_orr;
else
+ {
assert(!"Instruction cannot be encoded");
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
-
- case INS_rsb:
- assert(insOptsNone(opt));
- if (imm == 0 && isLowRegister(reg1) && isLowRegister(reg2) && insSetsFlags(flags))
- {
- fmt = IF_T1_E;
- sf = INS_FLAGS_SET;
+ }
break;
- }
- __fallthrough;
-
- case INS_adc:
- case INS_eor:
- case INS_sbc:
- assert(insOptsNone(opt));
- if (isModImmConst(imm))
- {
- fmt = IF_T2_L0;
- sf = insMustSetFlags(flags);
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
- case INS_adr:
- assert(insOptsNone(opt));
- assert(insDoesNotSetFlags(flags));
- assert(reg2 == REG_PC);
- sf = INS_FLAGS_NOT_SET;
+ case INS_and:
+ case INS_bic:
+ case INS_orr:
+ case INS_orn:
+ assert(insOptsNone(opt));
+ if (isModImmConst(imm))
+ {
+ fmt = IF_T2_L0;
+ sf = insMustSetFlags(flags);
+ }
+ else if (isModImmConst(~imm))
+ {
+ fmt = IF_T2_L0;
+ sf = insMustSetFlags(flags);
+ imm = ~imm;
+
+ if (ins == INS_and)
+ ins = INS_bic;
+ else if (ins == INS_bic)
+ ins = INS_and;
+ else if (ins == INS_orr)
+ ins = INS_orn;
+ else if (ins == INS_orn)
+ ins = INS_orr;
+ else
+ assert(!"Instruction cannot be encoded");
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
- if (isLowRegister(reg1) && ((imm & 0x00ff) == imm))
- {
- fmt = IF_T1_J3;
- }
- else if ((imm & 0x0fff) == imm)
- {
- fmt = IF_T2_M1;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ case INS_rsb:
+ assert(insOptsNone(opt));
+ if (imm == 0 && isLowRegister(reg1) && isLowRegister(reg2) && insSetsFlags(flags))
+ {
+ fmt = IF_T1_E;
+ sf = INS_FLAGS_SET;
+ break;
+ }
+ __fallthrough;
- case INS_mvn:
- assert((imm >= 0) && (imm <= 31)); // required for encoding
- assert(!insOptAnyInc(opt));
- if (imm==0)
- {
+ case INS_adc:
+ case INS_eor:
+ case INS_sbc:
assert(insOptsNone(opt));
- if (isLowRegister(reg1) && isLowRegister(reg2) && insSetsFlags(flags))
+ if (isModImmConst(imm))
{
- // Use the Thumb-1 reg,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ fmt = IF_T2_L0;
+ sf = insMustSetFlags(flags);
}
- }
- else // imm > 0 && imm <= 31
- {
- assert(insOptAnyShift(opt));
- }
- fmt = IF_T2_C1;
- sf = insMustSetFlags(flags);
- break;
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
- case INS_cmp:
- case INS_cmn:
- case INS_teq:
- case INS_tst:
- assert(insSetsFlags(flags));
- assert((imm >= 0) && (imm <= 31)); // required for encoding
- assert(!insOptAnyInc(opt));
- if (imm==0)
- {
+ case INS_adr:
assert(insOptsNone(opt));
- if (ins == INS_cmp)
+ assert(insDoesNotSetFlags(flags));
+ assert(reg2 == REG_PC);
+ sf = INS_FLAGS_NOT_SET;
+
+ if (isLowRegister(reg1) && ((imm & 0x00ff) == imm))
{
- // Use the Thumb-1 reg,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ fmt = IF_T1_J3;
}
- if (((ins == INS_cmn) || (ins == INS_tst)) &&
- isLowRegister(reg1) && isLowRegister(reg2))
+ else if ((imm & 0x0fff) == imm)
{
- // Use the Thumb-1 reg,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ fmt = IF_T2_M1;
}
- }
- else // imm > 0 && imm <= 31)
- {
- assert(insOptAnyShift(opt));
- if (insOptsRRX(opt))
- assert(imm==1);
- }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
- fmt = IF_T2_C8;
- sf = INS_FLAGS_SET;
- break;
+ case INS_mvn:
+ assert((imm >= 0) && (imm <= 31)); // required for encoding
+ assert(!insOptAnyInc(opt));
+ if (imm == 0)
+ {
+ assert(insOptsNone(opt));
+ if (isLowRegister(reg1) && isLowRegister(reg2) && insSetsFlags(flags))
+ {
+ // Use the Thumb-1 reg,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
+ }
+ else // imm > 0 && imm <= 31
+ {
+ assert(insOptAnyShift(opt));
+ }
+ fmt = IF_T2_C1;
+ sf = insMustSetFlags(flags);
+ break;
- case INS_ror:
- case INS_asr:
- case INS_lsl:
- case INS_lsr:
- assert(insOptsNone(opt));
-
- // On ARM, the immediate shift count of LSL and ROR must be between 1 and 31. For LSR and ASR, it is between
- // 1 and 32, though we don't ever use 32. Although x86 allows an immediate shift count of 8-bits in
- // instruction encoding, the CPU looks at only the lower 5 bits. As per ECMA, specifying a shift count to
- // the IL SHR, SHL, or SHL.UN instruction that is greater than or equal to the width of the type will yield
- // an undefined value. We choose that undefined value in this case to match x86 behavior, by only using the
- // lower 5 bits of the constant shift count.
- imm &= 0x1f;
-
- if (imm == 0)
- {
- // Additional Fix 383915 ARM ILGEN
- if ((reg1 != reg2) || insMustSetFlags(flags))
+ case INS_cmp:
+ case INS_cmn:
+ case INS_teq:
+ case INS_tst:
+ assert(insSetsFlags(flags));
+ assert((imm >= 0) && (imm <= 31)); // required for encoding
+ assert(!insOptAnyInc(opt));
+ if (imm == 0)
{
- // Use MOV/MOVS instriction
- emitIns_R_R(INS_mov, attr, reg1, reg2, flags);
+ assert(insOptsNone(opt));
+ if (ins == INS_cmp)
+ {
+ // Use the Thumb-1 reg,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
+ if (((ins == INS_cmn) || (ins == INS_tst)) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ // Use the Thumb-1 reg,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
}
- return;
- }
-
- if (insSetsFlags(flags) && (ins != INS_ror) &&
- isLowRegister(reg1) && isLowRegister(reg2))
- {
- fmt = IF_T1_C;
+ else // imm > 0 && imm <= 31)
+ {
+ assert(insOptAnyShift(opt));
+ if (insOptsRRX(opt))
+ assert(imm == 1);
+ }
+
+ fmt = IF_T2_C8;
sf = INS_FLAGS_SET;
- }
- else
- {
- fmt = IF_T2_C2;
- sf = insMustSetFlags(flags);
- }
- break;
+ break;
- case INS_sxtb:
- case INS_uxtb:
- assert(size == EA_1BYTE);
- goto EXTEND_COMMON;
+ case INS_ror:
+ case INS_asr:
+ case INS_lsl:
+ case INS_lsr:
+ assert(insOptsNone(opt));
- case INS_sxth:
- case INS_uxth:
- assert(size == EA_2BYTE);
-EXTEND_COMMON:
- assert(insOptsNone(opt));
- assert(insDoesNotSetFlags(flags));
- assert((imm & 0x018) == imm); // required for encoding
+ // On ARM, the immediate shift count of LSL and ROR must be between 1 and 31. For LSR and ASR, it is between
+ // 1 and 32, though we don't ever use 32. Although x86 allows an immediate shift count of 8-bits in
+ // instruction encoding, the CPU looks at only the lower 5 bits. As per ECMA, specifying a shift count to
+ // the IL SHR, SHL, or SHL.UN instruction that is greater than or equal to the width of the type will yield
+ // an undefined value. We choose that undefined value in this case to match x86 behavior, by only using the
+ // lower 5 bits of the constant shift count.
+ imm &= 0x1f;
- if ((imm == 0) && isLowRegister(reg1) && isLowRegister(reg2))
- {
- // Use Thumb-1 encoding
- emitIns_R_R(ins, attr, reg1, reg2, INS_FLAGS_NOT_SET);
- return;
- }
+ if (imm == 0)
+ {
+ // Additional Fix 383915 ARM ILGEN
+ if ((reg1 != reg2) || insMustSetFlags(flags))
+ {
+ // Use MOV/MOVS instriction
+ emitIns_R_R(INS_mov, attr, reg1, reg2, flags);
+ }
+ return;
+ }
- fmt = IF_T2_C6;
- sf = INS_FLAGS_NOT_SET;
- break;
+ if (insSetsFlags(flags) && (ins != INS_ror) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ fmt = IF_T1_C;
+ sf = INS_FLAGS_SET;
+ }
+ else
+ {
+ fmt = IF_T2_C2;
+ sf = insMustSetFlags(flags);
+ }
+ break;
- case INS_pld:
- case INS_pldw:
-#ifdef FEATURE_PLI_INSTRUCTION
- case INS_pli:
-#endif // FEATURE_PLI_INSTRUCTION
- assert(insOptsNone(opt));
- assert(insDoesNotSetFlags(flags));
- assert((imm & 0x003) == imm); // required for encoding
+ case INS_sxtb:
+ case INS_uxtb:
+ assert(size == EA_1BYTE);
+ goto EXTEND_COMMON;
- fmt = IF_T2_C7;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_sxth:
+ case INS_uxth:
+ assert(size == EA_2BYTE);
+ EXTEND_COMMON:
+ assert(insOptsNone(opt));
+ assert(insDoesNotSetFlags(flags));
+ assert((imm & 0x018) == imm); // required for encoding
- case INS_ldrb:
- case INS_strb:
- assert(size == EA_1BYTE);
- assert(insDoesNotSetFlags(flags));
+ if ((imm == 0) && isLowRegister(reg1) && isLowRegister(reg2))
+ {
+ // Use Thumb-1 encoding
+ emitIns_R_R(ins, attr, reg1, reg2, INS_FLAGS_NOT_SET);
+ return;
+ }
- if (isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- insOptsNone(opt) &&
- ((imm & 0x001f) == imm))
- {
- fmt = IF_T1_C;
+ fmt = IF_T2_C6;
sf = INS_FLAGS_NOT_SET;
break;
- }
- goto COMMON_THUMB2_LDST;
-
- case INS_ldrsb:
- assert(size == EA_1BYTE);
- goto COMMON_THUMB2_LDST;
- case INS_ldrh:
- case INS_strh:
- assert(size == EA_2BYTE);
- assert(insDoesNotSetFlags(flags));
+ case INS_pld:
+ case INS_pldw:
+#ifdef FEATURE_PLI_INSTRUCTION
+ case INS_pli:
+#endif // FEATURE_PLI_INSTRUCTION
+ assert(insOptsNone(opt));
+ assert(insDoesNotSetFlags(flags));
+ assert((imm & 0x003) == imm); // required for encoding
- if (isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- insOptsNone(opt) &&
- ((imm & 0x003e) == imm))
- {
- fmt = IF_T1_C;
+ fmt = IF_T2_C7;
sf = INS_FLAGS_NOT_SET;
break;
- }
- goto COMMON_THUMB2_LDST;
-
- case INS_ldrsh:
- assert(size == EA_2BYTE);
- goto COMMON_THUMB2_LDST;
-
- case INS_vldr:
- case INS_vstr:
- case INS_vldm:
- case INS_vstm:
- assert(fmt == IF_NONE);
- assert(insDoesNotSetFlags(flags));
- assert(offsetFitsInVectorMem(imm)); // required for encoding
- if (insOptAnyInc(opt))
- {
- if (insOptsPostInc(opt))
- {
- assert(imm > 0);
- }
- else // insOptsPreDec(opt)
+
+ case INS_ldrb:
+ case INS_strb:
+ assert(size == EA_1BYTE);
+ assert(insDoesNotSetFlags(flags));
+
+ if (isLowRegister(reg1) && isLowRegister(reg2) && insOptsNone(opt) && ((imm & 0x001f) == imm))
{
- assert(imm < 0);
+ fmt = IF_T1_C;
+ sf = INS_FLAGS_NOT_SET;
+ break;
}
- }
- else
- {
- assert(insOptsNone(opt));
- }
+ goto COMMON_THUMB2_LDST;
- sf = INS_FLAGS_NOT_SET;
- fmt = IF_T2_VLDST;
- break;
+ case INS_ldrsb:
+ assert(size == EA_1BYTE);
+ goto COMMON_THUMB2_LDST;
- case INS_ldr:
- case INS_str:
- assert(size == EA_4BYTE);
- assert(insDoesNotSetFlags(flags));
+ case INS_ldrh:
+ case INS_strh:
+ assert(size == EA_2BYTE);
+ assert(insDoesNotSetFlags(flags));
- // Can we possibly encode the immediate 'imm' using a Thumb-1 encoding?
- if (isLowRegister(reg1) && insOptsNone(opt) && ((imm & 0x03fc) == imm))
- {
- if (reg2 == REG_SP)
+ if (isLowRegister(reg1) && isLowRegister(reg2) && insOptsNone(opt) && ((imm & 0x003e) == imm))
{
- fmt = IF_T1_J2;
+ fmt = IF_T1_C;
sf = INS_FLAGS_NOT_SET;
break;
}
- else if (reg2 == REG_PC)
+ goto COMMON_THUMB2_LDST;
+
+ case INS_ldrsh:
+ assert(size == EA_2BYTE);
+ goto COMMON_THUMB2_LDST;
+
+ case INS_vldr:
+ case INS_vstr:
+ case INS_vldm:
+ case INS_vstm:
+ assert(fmt == IF_NONE);
+ assert(insDoesNotSetFlags(flags));
+ assert(offsetFitsInVectorMem(imm)); // required for encoding
+ if (insOptAnyInc(opt))
{
- if (ins == INS_ldr)
+ if (insOptsPostInc(opt))
{
- fmt = IF_T1_J3;
- sf = INS_FLAGS_NOT_SET;
- break;
+ assert(imm > 0);
+ }
+ else // insOptsPreDec(opt)
+ {
+ assert(imm < 0);
}
}
- else if (isLowRegister(reg2))
+ else
+ {
+ assert(insOptsNone(opt));
+ }
+
+ sf = INS_FLAGS_NOT_SET;
+ fmt = IF_T2_VLDST;
+ break;
+
+ case INS_ldr:
+ case INS_str:
+ assert(size == EA_4BYTE);
+ assert(insDoesNotSetFlags(flags));
+
+ // Can we possibly encode the immediate 'imm' using a Thumb-1 encoding?
+ if (isLowRegister(reg1) && insOptsNone(opt) && ((imm & 0x03fc) == imm))
{
- // Only the smaller range 'imm' can be encoded
- if ((imm & 0x07c) == imm)
+ if (reg2 == REG_SP)
{
- fmt = IF_T1_C;
+ fmt = IF_T1_J2;
sf = INS_FLAGS_NOT_SET;
break;
}
+ else if (reg2 == REG_PC)
+ {
+ if (ins == INS_ldr)
+ {
+ fmt = IF_T1_J3;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ }
+ }
+ else if (isLowRegister(reg2))
+ {
+ // Only the smaller range 'imm' can be encoded
+ if ((imm & 0x07c) == imm)
+ {
+ fmt = IF_T1_C;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ }
+ }
}
- }
- //
- // If we did not find a thumb-1 encoding above
- //
- __fallthrough;
-
-COMMON_THUMB2_LDST:
- assert(fmt == IF_NONE);
- assert(insDoesNotSetFlags(flags));
- sf = INS_FLAGS_NOT_SET;
+ //
+ // If we did not find a thumb-1 encoding above
+ //
+ __fallthrough;
- if (insOptAnyInc(opt))
- {
- if (insOptsPostInc(opt))
- assert(imm > 0);
- else // insOptsPreDec(opt)
- assert(imm < 0);
+ COMMON_THUMB2_LDST:
+ assert(fmt == IF_NONE);
+ assert(insDoesNotSetFlags(flags));
+ sf = INS_FLAGS_NOT_SET;
- if (unsigned_abs(imm) <= 0x00ff)
+ if (insOptAnyInc(opt))
{
- fmt = IF_T2_H0;
+ if (insOptsPostInc(opt))
+ assert(imm > 0);
+ else // insOptsPreDec(opt)
+ assert(imm < 0);
+
+ if (unsigned_abs(imm) <= 0x00ff)
+ {
+ fmt = IF_T2_H0;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
}
- else
+ else
{
- assert(!"Instruction cannot be encoded");
+ assert(insOptsNone(opt));
+ if ((reg2 == REG_PC) && (unsigned_abs(imm) <= 0x0fff))
+ {
+ fmt = IF_T2_K4;
+ }
+ else if ((imm & 0x0fff) == imm)
+ {
+ fmt = IF_T2_K1;
+ }
+ else if (unsigned_abs(imm) <= 0x0ff)
+ {
+ fmt = IF_T2_H0;
+ }
+ else
+ {
+ // Load imm into a register
+ regNumber rsvdReg = codeGen->rsGetRsvdReg();
+ codeGen->instGen_Set_Reg_To_Imm(EA_4BYTE, rsvdReg, (ssize_t)imm);
+ emitIns_R_R_R(ins, attr, reg1, reg2, rsvdReg);
+ return;
+ }
}
- }
- else {
+ break;
+
+ case INS_ldrex:
+ case INS_strex:
assert(insOptsNone(opt));
- if ((reg2 == REG_PC) && (unsigned_abs(imm) <= 0x0fff))
- {
- fmt = IF_T2_K4;
- }
- else if ((imm & 0x0fff) == imm)
- {
- fmt = IF_T2_K1;
- }
- else if (unsigned_abs(imm) <= 0x0ff)
+ assert(insDoesNotSetFlags(flags));
+ sf = INS_FLAGS_NOT_SET;
+
+ if ((imm & 0x03fc) == imm)
{
fmt = IF_T2_H0;
}
- else
+ else
{
- // Load imm into a register
- regNumber rsvdReg = codeGen->rsGetRsvdReg();
- codeGen->instGen_Set_Reg_To_Imm(EA_4BYTE, rsvdReg, (ssize_t)imm);
- emitIns_R_R_R(ins, attr, reg1, reg2, rsvdReg);
- return;
+ assert(!"Instruction cannot be encoded");
}
- }
- break;
-
- case INS_ldrex:
- case INS_strex:
- assert(insOptsNone(opt));
- assert(insDoesNotSetFlags(flags));
- sf = INS_FLAGS_NOT_SET;
-
- if ((imm & 0x03fc) == imm)
- {
- fmt = IF_T2_H0;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
+ break;
- default:
- assert(!"Unexpected instruction");
- }
- assert((fmt == IF_T1_C ) ||
- (fmt == IF_T1_E ) ||
- (fmt == IF_T1_G ) ||
- (fmt == IF_T1_J2) ||
- (fmt == IF_T1_J3) ||
- (fmt == IF_T2_C1) ||
- (fmt == IF_T2_C2) ||
- (fmt == IF_T2_C6) ||
- (fmt == IF_T2_C7) ||
- (fmt == IF_T2_C8) ||
- (fmt == IF_T2_H0) ||
- (fmt == IF_T2_H1) ||
- (fmt == IF_T2_K1) ||
- (fmt == IF_T2_K4) ||
- (fmt == IF_T2_L0) ||
- (fmt == IF_T2_M0) ||
- (fmt == IF_T2_VLDST) ||
- (fmt == IF_T2_M1) );
+ default:
+ assert(!"Unexpected instruction");
+ }
+ assert((fmt == IF_T1_C) || (fmt == IF_T1_E) || (fmt == IF_T1_G) || (fmt == IF_T1_J2) || (fmt == IF_T1_J3) ||
+ (fmt == IF_T2_C1) || (fmt == IF_T2_C2) || (fmt == IF_T2_C6) || (fmt == IF_T2_C7) || (fmt == IF_T2_C8) ||
+ (fmt == IF_T2_H0) || (fmt == IF_T2_H1) || (fmt == IF_T2_K1) || (fmt == IF_T2_K4) || (fmt == IF_T2_L0) ||
+ (fmt == IF_T2_M0) || (fmt == IF_T2_VLDST) || (fmt == IF_T2_M1));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrSC(attr, imm);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSC(attr, imm);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -2871,222 +2846,213 @@ COMMON_THUMB2_LDST:
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing three registers.
*/
-void emitter::emitIns_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void emitter::emitIns_R_R_R(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_add:
- // Encodings do not support SP in the reg3 slot
- if (reg3 == REG_SP)
- {
- // Swap reg2 and reg3
- reg3 = reg2;
- reg2 = REG_SP;
- }
- __fallthrough;
-
- case INS_sub:
- assert (reg3 != REG_SP);
+ case INS_add:
+ // Encodings do not support SP in the reg3 slot
+ if (reg3 == REG_SP)
+ {
+ // Swap reg2 and reg3
+ reg3 = reg2;
+ reg2 = REG_SP;
+ }
+ __fallthrough;
- if (isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- isLowRegister(reg3) &&
- insSetsFlags(flags) )
- {
- fmt = IF_T1_H;
- sf = INS_FLAGS_SET;
- break;
- }
+ case INS_sub:
+ assert(reg3 != REG_SP);
- if ((ins == INS_add) && insDoesNotSetFlags(flags))
- {
- if (reg1 == reg2)
+ if (isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3) && insSetsFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg3, flags);
- return;
+ fmt = IF_T1_H;
+ sf = INS_FLAGS_SET;
+ break;
}
- if (reg1 == reg3)
+
+ if ((ins == INS_add) && insDoesNotSetFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ if (reg1 == reg2)
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg3, flags);
+ return;
+ }
+ if (reg1 == reg3)
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
}
- }
-
- // Use the Thumb-2 reg,reg,reg with shift encoding
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
- return;
- case INS_adc:
- case INS_and:
- case INS_bic:
- case INS_eor:
- case INS_orr:
- case INS_sbc:
- if (reg1 == reg2)
- {
- // Try to encode as a Thumb-1 instruction
- emitIns_R_R(ins, attr, reg1, reg3, flags);
+ // Use the Thumb-2 reg,reg,reg with shift encoding
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
return;
- }
- __fallthrough;
- case INS_orn:
- // Use the Thumb-2 three register encoding, with imm=0
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
- return;
+ case INS_adc:
+ case INS_and:
+ case INS_bic:
+ case INS_eor:
+ case INS_orr:
+ case INS_sbc:
+ if (reg1 == reg2)
+ {
+ // Try to encode as a Thumb-1 instruction
+ emitIns_R_R(ins, attr, reg1, reg3, flags);
+ return;
+ }
+ __fallthrough;
- case INS_asr:
- case INS_lsl:
- case INS_lsr:
- if (reg1 == reg2 && insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg3))
- {
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg3, flags);
+ case INS_orn:
+ // Use the Thumb-2 three register encoding, with imm=0
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
return;
- }
- __fallthrough;
-
- case INS_ror:
- fmt = IF_T2_C4;
- sf = insMustSetFlags(flags);
- break;
- case INS_mul:
- if (insMustSetFlags(flags))
- {
- if ((reg1 == reg2) && isLowRegister(reg1))
+ case INS_asr:
+ case INS_lsl:
+ case INS_lsr:
+ if (reg1 == reg2 && insSetsFlags(flags) && isLowRegister(reg1) && isLowRegister(reg3))
{
// Use the Thumb-1 regdest,reg encoding
emitIns_R_R(ins, attr, reg1, reg3, flags);
return;
}
- if ((reg1 == reg3) && isLowRegister(reg1))
+ __fallthrough;
+
+ case INS_ror:
+ fmt = IF_T2_C4;
+ sf = insMustSetFlags(flags);
+ break;
+
+ case INS_mul:
+ if (insMustSetFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ if ((reg1 == reg2) && isLowRegister(reg1))
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg3, flags);
+ return;
+ }
+ if ((reg1 == reg3) && isLowRegister(reg1))
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
}
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- }
- __fallthrough;
+ __fallthrough;
- case INS_sdiv:
- case INS_udiv:
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_C5;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_sdiv:
+ case INS_udiv:
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_C5;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- case INS_ldrb:
- case INS_strb:
- case INS_ldrsb:
- assert(size == EA_1BYTE);
- goto COMMON_THUMB1_LDST;
+ case INS_ldrb:
+ case INS_strb:
+ case INS_ldrsb:
+ assert(size == EA_1BYTE);
+ goto COMMON_THUMB1_LDST;
- case INS_ldrsh:
- case INS_ldrh:
- case INS_strh:
- assert(size == EA_2BYTE);
- goto COMMON_THUMB1_LDST;
+ case INS_ldrsh:
+ case INS_ldrh:
+ case INS_strh:
+ assert(size == EA_2BYTE);
+ goto COMMON_THUMB1_LDST;
- case INS_ldr:
- case INS_str:
- assert(size == EA_4BYTE);
+ case INS_ldr:
+ case INS_str:
+ assert(size == EA_4BYTE);
-COMMON_THUMB1_LDST:
- assert(insDoesNotSetFlags(flags));
+ COMMON_THUMB1_LDST:
+ assert(insDoesNotSetFlags(flags));
- if (isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3))
- {
- fmt = IF_T1_H;
+ if (isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3))
+ {
+ fmt = IF_T1_H;
+ sf = INS_FLAGS_NOT_SET;
+ }
+ else
+ {
+ // Use the Thumb-2 reg,reg,reg with shift encoding
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
+ return;
+ }
+ break;
+
+ case INS_vadd:
+ case INS_vmul:
+ case INS_vsub:
+ case INS_vdiv:
+ if (size == EA_8BYTE)
+ {
+ assert(isDoubleReg(reg1));
+ assert(isDoubleReg(reg2));
+ assert(isDoubleReg(reg3));
+ }
+ else
+ {
+ assert(isFloatReg(reg1));
+ assert(isFloatReg(reg2));
+ assert(isFloatReg(reg3));
+ }
+ fmt = IF_T2_VFP3;
sf = INS_FLAGS_NOT_SET;
- }
- else
- {
- // Use the Thumb-2 reg,reg,reg with shift encoding
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, flags);
- return;
- }
- break;
+ break;
- case INS_vadd:
- case INS_vmul:
- case INS_vsub:
- case INS_vdiv:
- if (size == EA_8BYTE)
- {
+ case INS_vmov_i2d:
assert(isDoubleReg(reg1));
- assert(isDoubleReg(reg2));
- assert(isDoubleReg(reg3));
- }
- else
- {
- assert(isFloatReg(reg1));
- assert(isFloatReg(reg2));
- assert(isFloatReg(reg3));
- }
- fmt = IF_T2_VFP3;
- sf = INS_FLAGS_NOT_SET;
- break;
-
- case INS_vmov_i2d:
- assert(isDoubleReg(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- fmt = IF_T2_VMOVD;
- sf = INS_FLAGS_NOT_SET;
- break;
-
- case INS_vmov_d2i:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isDoubleReg(reg3));
- fmt = IF_T2_VMOVD;
- sf = INS_FLAGS_NOT_SET;
- break;
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ fmt = IF_T2_VMOVD;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- case INS_ldrexd:
- case INS_strexd:
- assert(insDoesNotSetFlags(flags));
- fmt = IF_T2_G1;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_vmov_d2i:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isDoubleReg(reg3));
+ fmt = IF_T2_VMOVD;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+ case INS_ldrexd:
+ case INS_strexd:
+ assert(insDoesNotSetFlags(flags));
+ fmt = IF_T2_G1;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
- assert((fmt == IF_T1_H ) ||
- (fmt == IF_T2_C4) ||
- (fmt == IF_T2_C5) ||
- (fmt == IF_T2_VFP3) ||
- (fmt == IF_T2_VMOVD) ||
+ assert((fmt == IF_T1_H) || (fmt == IF_T2_C4) || (fmt == IF_T2_C5) || (fmt == IF_T2_VFP3) || (fmt == IF_T2_VMOVD) ||
(fmt == IF_T2_G1));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstr(attr);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstr(attr);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3100,61 +3066,60 @@ COMMON_THUMB1_LDST:
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing two registers and two constants.
*/
-void emitter::emitIns_R_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm1,
- int imm2,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void emitter::emitIns_R_R_I_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ int imm1,
+ int imm2,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
- int lsb = imm1;
- int width = imm2;
- int msb = lsb + width - 1;
- int imm = 0; /* combined immediate */
+ int lsb = imm1;
+ int width = imm2;
+ int msb = lsb + width - 1;
+ int imm = 0; /* combined immediate */
- assert((lsb >= 0) && (lsb <= 31)); // required for encodings
- assert((width > 0) && (width <= 32)); // required for encodings
- assert((msb >= 0) && (msb <= 31)); // required for encodings
- assert(msb >= lsb); // required for encodings
+ assert((lsb >= 0) && (lsb <= 31)); // required for encodings
+ assert((width > 0) && (width <= 32)); // required for encodings
+ assert((msb >= 0) && (msb <= 31)); // required for encodings
+ assert(msb >= lsb); // required for encodings
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_bfi:
- assert(insDoesNotSetFlags(flags));
- imm = (lsb << 5) | msb;
+ case INS_bfi:
+ assert(insDoesNotSetFlags(flags));
+ imm = (lsb << 5) | msb;
- fmt = IF_T2_D0;
- sf = INS_FLAGS_NOT_SET;
- break;
+ fmt = IF_T2_D0;
+ sf = INS_FLAGS_NOT_SET;
+ break;
- case INS_sbfx:
- case INS_ubfx:
- assert(insDoesNotSetFlags(flags));
- imm = (lsb << 5) | (width-1);
+ case INS_sbfx:
+ case INS_ubfx:
+ assert(insDoesNotSetFlags(flags));
+ imm = (lsb << 5) | (width - 1);
- fmt = IF_T2_D0;
- sf = INS_FLAGS_NOT_SET;
- break;
-
- default:
- unreached();
+ fmt = IF_T2_D0;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ default:
+ unreached();
}
assert((fmt == IF_T2_D0));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrSC(attr, imm);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrSC(attr, imm);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3172,177 +3137,162 @@ void emitter::emitIns_R_R_I_I(instruction ins,
* Add an instruction referencing three registers and a constant.
*/
-void emitter::emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- int imm,
- insFlags flags /* = INS_FLAGS_DONT_CARE */,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ int imm,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */,
+ insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_DONT_CARE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_DONT_CARE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_add:
- case INS_sub:
- if (imm == 0)
- {
- if (isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- isLowRegister(reg3) &&
- insSetsFlags(flags) )
+ case INS_add:
+ case INS_sub:
+ if (imm == 0)
{
- // Use the Thumb-1 reg,reg,reg encoding
- emitIns_R_R_R(ins, attr, reg1, reg2, reg3, flags);
- return;
- }
- if ((ins == INS_add) && insDoesNotSetFlags(flags))
- {
- if (reg1 == reg2)
+ if (isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3) && insSetsFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg3, flags);
+ // Use the Thumb-1 reg,reg,reg encoding
+ emitIns_R_R_R(ins, attr, reg1, reg2, reg3, flags);
return;
}
- if (reg1 == reg3)
+ if ((ins == INS_add) && insDoesNotSetFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ if (reg1 == reg2)
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg3, flags);
+ return;
+ }
+ if (reg1 == reg3)
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
}
}
- }
- __fallthrough;
-
- case INS_adc:
- case INS_and:
- case INS_bic:
- case INS_eor:
- case INS_orn:
- case INS_orr:
- case INS_sbc:
- assert((imm >= 0) && (imm <= 31)); // required for encoding
- assert(!insOptAnyInc(opt));
- if (imm==0)
- {
- if (opt == INS_OPTS_LSL) // left shift of zero
- opt = INS_OPTS_NONE; // is a nop
+ __fallthrough;
- assert(insOptsNone(opt));
- if (isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- isLowRegister(reg3) &&
- insSetsFlags(flags) )
+ case INS_adc:
+ case INS_and:
+ case INS_bic:
+ case INS_eor:
+ case INS_orn:
+ case INS_orr:
+ case INS_sbc:
+ assert((imm >= 0) && (imm <= 31)); // required for encoding
+ assert(!insOptAnyInc(opt));
+ if (imm == 0)
{
- if (reg1 == reg2)
- {
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg3, flags);
- return;
- }
- if ((reg1 == reg3) &&
- (ins != INS_bic) &&
- (ins != INS_orn) &&
- (ins != INS_sbc) )
+ if (opt == INS_OPTS_LSL) // left shift of zero
+ opt = INS_OPTS_NONE; // is a nop
+
+ assert(insOptsNone(opt));
+ if (isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3) && insSetsFlags(flags))
{
- // Use the Thumb-1 regdest,reg encoding
- emitIns_R_R(ins, attr, reg1, reg2, flags);
- return;
+ if (reg1 == reg2)
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg3, flags);
+ return;
+ }
+ if ((reg1 == reg3) && (ins != INS_bic) && (ins != INS_orn) && (ins != INS_sbc))
+ {
+ // Use the Thumb-1 regdest,reg encoding
+ emitIns_R_R(ins, attr, reg1, reg2, flags);
+ return;
+ }
}
}
- }
- else // imm > 0 && imm <= 31)
- {
- assert(insOptAnyShift(opt));
- if (insOptsRRX(opt))
- assert(imm==1);
- }
- fmt = IF_T2_C0;
- sf = insMustSetFlags(flags);
- break;
+ else // imm > 0 && imm <= 31)
+ {
+ assert(insOptAnyShift(opt));
+ if (insOptsRRX(opt))
+ assert(imm == 1);
+ }
+ fmt = IF_T2_C0;
+ sf = insMustSetFlags(flags);
+ break;
- case INS_ldrb:
- case INS_ldrsb:
- case INS_strb:
- assert(size == EA_1BYTE);
- goto COMMON_THUMB2_LDST;
-
- case INS_ldrh:
- case INS_ldrsh:
- case INS_strh:
- assert(size == EA_2BYTE);
- goto COMMON_THUMB2_LDST;
-
- case INS_ldr:
- case INS_str:
- assert(size == EA_4BYTE);
-
-COMMON_THUMB2_LDST:
- assert(insDoesNotSetFlags(flags));
- assert((imm & 0x0003) == imm); // required for encoding
-
- if ((imm == 0) &&
- insOptsNone(opt) &&
- isLowRegister(reg1) &&
- isLowRegister(reg2) &&
- isLowRegister(reg3))
- {
- // Use the Thumb-1 reg,reg,reg encoding
- emitIns_R_R_R(ins, attr, reg1, reg2, reg3, flags);
- return;
- }
- assert(insOptsNone(opt) || insOptsLSL(opt));
- fmt = IF_T2_E0;
- sf = INS_FLAGS_NOT_SET;
- break;
+ case INS_ldrb:
+ case INS_ldrsb:
+ case INS_strb:
+ assert(size == EA_1BYTE);
+ goto COMMON_THUMB2_LDST;
- case INS_ldrd:
- case INS_strd:
- assert(insDoesNotSetFlags(flags));
- assert((imm & 0x03) == 0);
- sf = INS_FLAGS_NOT_SET;
+ case INS_ldrh:
+ case INS_ldrsh:
+ case INS_strh:
+ assert(size == EA_2BYTE);
+ goto COMMON_THUMB2_LDST;
- if (insOptAnyInc(opt))
- {
- if (insOptsPostInc(opt))
- assert(imm > 0);
- else // insOptsPreDec(opt)
- assert(imm < 0);
- }
- else
- {
- assert(insOptsNone(opt));
- }
+ case INS_ldr:
+ case INS_str:
+ assert(size == EA_4BYTE);
- if (unsigned_abs(imm) <= 0x03fc)
- {
- imm >>= 2;
- fmt = IF_T2_G0;
- }
- else
- {
- assert(!"Instruction cannot be encoded");
- }
- break;
-
- default:
- unreached();
+ COMMON_THUMB2_LDST:
+ assert(insDoesNotSetFlags(flags));
+ assert((imm & 0x0003) == imm); // required for encoding
+
+ if ((imm == 0) && insOptsNone(opt) && isLowRegister(reg1) && isLowRegister(reg2) && isLowRegister(reg3))
+ {
+ // Use the Thumb-1 reg,reg,reg encoding
+ emitIns_R_R_R(ins, attr, reg1, reg2, reg3, flags);
+ return;
+ }
+ assert(insOptsNone(opt) || insOptsLSL(opt));
+ fmt = IF_T2_E0;
+ sf = INS_FLAGS_NOT_SET;
+ break;
+
+ case INS_ldrd:
+ case INS_strd:
+ assert(insDoesNotSetFlags(flags));
+ assert((imm & 0x03) == 0);
+ sf = INS_FLAGS_NOT_SET;
+
+ if (insOptAnyInc(opt))
+ {
+ if (insOptsPostInc(opt))
+ assert(imm > 0);
+ else // insOptsPreDec(opt)
+ assert(imm < 0);
+ }
+ else
+ {
+ assert(insOptsNone(opt));
+ }
+
+ if (unsigned_abs(imm) <= 0x03fc)
+ {
+ imm >>= 2;
+ fmt = IF_T2_G0;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+ }
+ break;
+
+ default:
+ unreached();
}
- assert((fmt == IF_T2_C0) ||
- (fmt == IF_T2_E0) ||
- (fmt == IF_T2_G0) );
+ assert((fmt == IF_T2_C0) || (fmt == IF_T2_E0) || (fmt == IF_T2_G0));
assert(sf != INS_FLAGS_DONT_CARE);
// 3-reg ops can't use the small instrdesc
- instrDescCns *id = emitAllocInstrCns(attr);
+ instrDescCns* id = emitAllocInstrCns(attr);
id->idSetIsLargeCns();
- id->idcCnsVal = imm;
+ id->idcCnsVal = imm;
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3367,38 +3317,34 @@ COMMON_THUMB2_LDST:
* Add an instruction referencing four registers.
*/
-void emitter::emitIns_R_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- regNumber reg4)
+void emitter::emitIns_R_R_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4)
{
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_NOT_SET;
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_NOT_SET;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_smull:
- case INS_umull:
- case INS_smlal:
- case INS_umlal:
- assert(reg1 != reg2); // Illegal encoding
- fmt = IF_T2_F1;
- break;
- case INS_mla:
- case INS_mls:
- fmt = IF_T2_F2;
- break;
- default:
- unreached();
+ case INS_smull:
+ case INS_umull:
+ case INS_smlal:
+ case INS_umlal:
+ assert(reg1 != reg2); // Illegal encoding
+ fmt = IF_T2_F1;
+ break;
+ case INS_mla:
+ case INS_mls:
+ fmt = IF_T2_F2;
+ break;
+ default:
+ unreached();
}
assert((fmt == IF_T2_F1) || (fmt == IF_T2_F2));
- instrDesc * id = emitNewInstr(attr);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstr(attr);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3413,7 +3359,6 @@ void emitter::emitIns_R_R_R_R(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction with a static data member operand. If 'size' is 0, the
@@ -3421,38 +3366,26 @@ void emitter::emitIns_R_R_R_R(instruction ins,
* value (e.g. "push offset clsvar", rather than "push dword ptr [clsvar]").
*/
-void emitter::emitIns_C (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
NYI("emitIns_C");
}
-
/*****************************************************************************
*
* Add an instruction referencing stack-based local variable.
*/
-void emitter::emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs)
+void emitter::emitIns_S(instruction ins, emitAttr attr, int varx, int offs)
{
NYI("emitIns_S");
}
-
/*****************************************************************************
*
* Add an instruction referencing a register and a stack-based local variable.
*/
-void emitter::emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int varx,
- int offs)
+void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs)
{
if (ins == INS_mov)
{
@@ -3461,37 +3394,37 @@ void emitter::emitIns_R_S (instruction ins,
switch (ins)
{
- case INS_add:
- case INS_ldr:
- case INS_ldrh:
- case INS_ldrb:
- case INS_ldrsh:
- case INS_ldrsb:
- case INS_vldr:
- case INS_vmov:
- case INS_movw:
- case INS_movt:
- break;
+ case INS_add:
+ case INS_ldr:
+ case INS_ldrh:
+ case INS_ldrb:
+ case INS_ldrsh:
+ case INS_ldrsb:
+ case INS_vldr:
+ case INS_vmov:
+ case INS_movw:
+ case INS_movt:
+ break;
- case INS_lea:
- ins = INS_add;
- break;
+ case INS_lea:
+ ins = INS_add;
+ break;
- default:
- NYI("emitIns_R_S");
- return;
+ default:
+ NYI("emitIns_R_S");
+ return;
}
-
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_NOT_SET;
- regNumber reg2;
+
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_NOT_SET;
+ regNumber reg2;
/* Figure out the variable's frame position */
- int base;
- int disp;
+ int base;
+ int disp;
unsigned undisp;
- base = emitComp->lvaFrameAddress(varx, emitComp->funCurrentFunc()->funKind != FUNC_ROOT, &reg2, offs);
+ base = emitComp->lvaFrameAddress(varx, emitComp->funCurrentFunc()->funKind != FUNC_ROOT, &reg2, offs);
disp = base + offs;
undisp = unsigned_abs(disp);
@@ -3520,12 +3453,9 @@ void emitter::emitIns_R_S (instruction ins,
}
else if (emitInsIsLoadOrStore(ins))
{
- if (isLowRegister(reg1) &&
- (reg2 == REG_SP) &&
- (ins == INS_ldr) &&
- ((disp & 0x03fc) == disp && disp <= 0x03f8))
+ if (isLowRegister(reg1) && (reg2 == REG_SP) && (ins == INS_ldr) && ((disp & 0x03fc) == disp && disp <= 0x03f8))
{
- fmt = IF_T1_J2;
+ fmt = IF_T1_J2;
}
else if (disp >= 0 && disp <= 0x0ffb)
{
@@ -3537,17 +3467,15 @@ void emitter::emitIns_R_S (instruction ins,
}
else
{
- // Load disp into a register
- regNumber rsvdReg = codeGen->rsGetRsvdReg();
+ // Load disp into a register
+ regNumber rsvdReg = codeGen->rsGetRsvdReg();
emitIns_genStackOffset(rsvdReg, varx, offs);
fmt = IF_T2_E0;
}
}
else if (ins == INS_add)
{
- if (isLowRegister(reg1) &&
- (reg2 == REG_SP) &&
- ((disp & 0x03fc) == disp && disp <= 0x03f8))
+ if (isLowRegister(reg1) && (reg2 == REG_SP) && ((disp & 0x03fc) == disp && disp <= 0x03f8))
{
fmt = IF_T1_J2;
}
@@ -3555,10 +3483,10 @@ void emitter::emitIns_R_S (instruction ins,
{
if (disp < 0)
{
- ins = INS_sub;
+ ins = INS_sub;
disp = -disp;
}
- // add/sub => addw/subw instruction
+ // add/sub => addw/subw instruction
// Note that even when using the w prefix the immediate is still only 12 bits?
ins = (ins == INS_add) ? INS_addw : INS_subw;
fmt = IF_T2_M0;
@@ -3577,18 +3505,12 @@ void emitter::emitIns_R_S (instruction ins,
fmt = IF_T2_N;
}
- assert((fmt == IF_T1_J2) ||
- (fmt == IF_T2_E0) ||
- (fmt == IF_T2_H0) ||
- (fmt == IF_T2_K1) ||
- (fmt == IF_T2_L0) ||
- (fmt == IF_T2_N) ||
- (fmt == IF_T2_VLDST) ||
- (fmt == IF_T2_M0));
+ assert((fmt == IF_T1_J2) || (fmt == IF_T2_E0) || (fmt == IF_T2_H0) || (fmt == IF_T2_K1) || (fmt == IF_T2_L0) ||
+ (fmt == IF_T2_N) || (fmt == IF_T2_VLDST) || (fmt == IF_T2_M0));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrCns(attr, disp);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrCns(attr, disp);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3614,8 +3536,8 @@ void emitter::emitIns_R_S (instruction ins,
void emitter::emitIns_genStackOffset(regNumber r, int varx, int offs)
{
regNumber regBase;
- int base;
- int disp;
+ int base;
+ int disp;
base = emitComp->lvaFrameAddress(varx, emitComp->funCurrentFunc()->funKind != FUNC_ROOT, &regBase, offs);
disp = base + offs;
@@ -3632,11 +3554,7 @@ void emitter::emitIns_genStackOffset(regNumber r, int varx, int offs)
*
* Add an instruction referencing a stack-based local variable and a register
*/
-void emitter::emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int varx,
- int offs)
+void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs)
{
if (ins == INS_mov)
{
@@ -3645,27 +3563,27 @@ void emitter::emitIns_S_R (instruction ins,
switch (ins)
{
- case INS_str:
- case INS_strh:
- case INS_strb:
- case INS_vstr:
- break;
+ case INS_str:
+ case INS_strh:
+ case INS_strb:
+ case INS_vstr:
+ break;
- default:
- NYI("emitIns_R_S");
- return;
+ default:
+ NYI("emitIns_R_S");
+ return;
}
- insFormat fmt = IF_NONE;
- insFlags sf = INS_FLAGS_NOT_SET;
- regNumber reg2;
+ insFormat fmt = IF_NONE;
+ insFlags sf = INS_FLAGS_NOT_SET;
+ regNumber reg2;
/* Figure out the variable's frame position */
- int base;
- int disp;
+ int base;
+ int disp;
unsigned undisp;
- base = emitComp->lvaFrameAddress(varx, emitComp->funCurrentFunc()->funKind != FUNC_ROOT, &reg2, offs);
+ base = emitComp->lvaFrameAddress(varx, emitComp->funCurrentFunc()->funKind != FUNC_ROOT, &reg2, offs);
disp = base + offs;
undisp = unsigned_abs(disp);
@@ -3692,12 +3610,9 @@ void emitter::emitIns_S_R (instruction ins,
return;
}
}
- else if (isLowRegister(reg1) &&
- (reg2 == REG_SP) &&
- (ins == INS_str) &&
- ((disp & 0x03fc) == disp && disp <= 0x03f8))
+ else if (isLowRegister(reg1) && (reg2 == REG_SP) && (ins == INS_str) && ((disp & 0x03fc) == disp && disp <= 0x03f8))
{
- fmt = IF_T1_J2;
+ fmt = IF_T1_J2;
}
else if (disp >= 0 && disp <= 0x0ffb)
{
@@ -3714,15 +3629,11 @@ void emitter::emitIns_S_R (instruction ins,
emitIns_genStackOffset(rsvdReg, varx, offs);
fmt = IF_T2_E0;
}
- assert((fmt == IF_T1_J2) ||
- (fmt == IF_T2_E0) ||
- (fmt == IF_T2_H0) ||
- (fmt == IF_T2_VLDST) ||
- (fmt == IF_T2_K1) );
+ assert((fmt == IF_T1_J2) || (fmt == IF_T2_E0) || (fmt == IF_T2_H0) || (fmt == IF_T2_VLDST) || (fmt == IF_T2_K1));
assert(sf != INS_FLAGS_DONT_CARE);
- instrDesc * id = emitNewInstrCns(attr, disp);
- insSize isz = emitInsSize(fmt);
+ instrDesc* id = emitNewInstrCns(attr, disp);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3743,30 +3654,20 @@ void emitter::emitIns_S_R (instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing stack-based local variable and an immediate
*/
-void emitter::emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val)
+void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val)
{
NYI("emitIns_S_I");
}
-
/*****************************************************************************
*
* Add an instruction with a register + static member operands.
*/
-void emitter::emitIns_R_C (instruction ins,
- emitAttr attr,
- regNumber reg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
if (ins == INS_mov)
{
@@ -3778,8 +3679,8 @@ void emitter::emitIns_R_C (instruction ins,
ins = INS_add;
}
- int doff = Compiler::eeGetJitDataOffs(fldHnd);
- ssize_t addr = NULL;
+ int doff = Compiler::eeGetJitDataOffs(fldHnd);
+ ssize_t addr = NULL;
if (doff >= 0)
{
@@ -3791,7 +3692,7 @@ void emitter::emitIns_R_C (instruction ins,
}
else if (fldHnd == FLD_GLOBAL_DS)
{
- addr = (ssize_t) offs;
+ addr = (ssize_t)offs;
offs = 0;
}
else
@@ -3802,15 +3703,15 @@ void emitter::emitIns_R_C (instruction ins,
NO_WAY("could not obtain address of static field");
}
- // We can use reg to load the constant address,
- // as long as it is not a floating point register
+ // We can use reg to load the constant address,
+ // as long as it is not a floating point register
regNumber regTmp = reg;
-
+
if (isFloatReg(regTmp))
{
#ifndef LEGACY_BACKEND
assert(!"emitIns_R_C() cannot be called with floating point target");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
regTmp = codeGen->regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
#endif // LEGACY_BACKEND
}
@@ -3824,29 +3725,24 @@ void emitter::emitIns_R_C (instruction ins,
}
}
-
/*****************************************************************************
*
* Add an instruction with a static member + register operands.
*/
-void emitter::emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs)
+void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs)
{
#ifndef LEGACY_BACKEND
assert(!"emitIns_C_R not supported for RyuJIT backend");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
if (ins == INS_mov)
{
assert(!"Please use ins_Store() to select the correct instruction");
}
assert(emitInsIsStore(ins));
- int doff = Compiler::eeGetJitDataOffs(fldHnd);
- ssize_t addr = NULL;
+ int doff = Compiler::eeGetJitDataOffs(fldHnd);
+ ssize_t addr = NULL;
if (doff >= 0)
{
@@ -3858,7 +3754,7 @@ void emitter::emitIns_C_R (instruction ins,
}
else if (fldHnd == FLD_GLOBAL_DS)
{
- addr = (ssize_t) offs;
+ addr = (ssize_t)offs;
offs = 0;
}
else
@@ -3878,17 +3774,12 @@ void emitter::emitIns_C_R (instruction ins,
#endif // LEGACY_BACKEND
}
-
/*****************************************************************************
*
* Add an instruction with a static member + constant.
*/
-void emitter::emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs,
- ssize_t val)
+void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs, ssize_t val)
{
NYI("emitIns_C_I");
}
@@ -3898,24 +3789,19 @@ void emitter::emitIns_C_I (instruction ins,
* The following adds instructions referencing address modes.
*/
-void emitter::emitIns_I_AR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- int offs,
- int memCookie,
- void * clsCookie)
+void emitter::emitIns_I_AR(
+ instruction ins, emitAttr attr, int val, regNumber reg, int offs, int memCookie, void* clsCookie)
{
NYI("emitIns_I_AR");
}
-void emitter::emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie /* = 0 */,
- void * clsCookie /* = NULL */)
+void emitter::emitIns_R_AR(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie /* = 0 */,
+ void* clsCookie /* = NULL */)
{
if (ins == INS_mov)
{
@@ -3932,9 +3818,9 @@ void emitter::emitIns_R_AR (instruction ins,
{
#ifndef LEGACY_BACKEND
assert(!"emitIns_R_AR: immediate doesn't fit in the instruction");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
// Load val into a register
- regNumber immReg = codeGen->regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(ireg) & ~genRegMask(reg) );
+ regNumber immReg = codeGen->regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(ireg) & ~genRegMask(reg));
codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, immReg, (ssize_t)offs);
emitIns_R_R_R(INS_add, attr, ireg, reg, immReg);
#endif // LEGACY_BACKEND
@@ -3961,22 +3847,19 @@ void emitter::emitIns_R_AR (instruction ins,
NYI("emitIns_R_AR");
}
-void emitter::emitIns_R_AI (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp)
+void emitter::emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp)
{
if (emitInsIsLoad(ins))
{
- // We can use ireg to load the constant address,
- // as long as it is not a floating point register
+ // We can use ireg to load the constant address,
+ // as long as it is not a floating point register
regNumber regTmp = ireg;
-
+
if (isFloatReg(regTmp))
{
#ifndef LEGACY_BACKEND
assert(!"emitIns_R_AI with floating point reg");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
regTmp = codeGen->regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(ireg));
#endif // LEGACY_BACKEND
}
@@ -3988,13 +3871,13 @@ void emitter::emitIns_R_AI (instruction ins,
NYI("emitIns_R_AI");
}
-void emitter::emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie /* = 0 */,
- void * clsCookie /* = NULL */)
+void emitter::emitIns_AR_R(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie /* = 0 */,
+ void* clsCookie /* = NULL */)
{
if (ins == INS_mov)
{
@@ -4003,19 +3886,14 @@ void emitter::emitIns_AR_R (instruction ins,
emitIns_R_R_I(ins, attr, ireg, reg, offs);
}
-void emitter::emitIns_R_ARR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp)
+void emitter::emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp)
{
if (ins == INS_mov)
{
assert(!"Please use ins_Load() to select the correct instruction");
}
- if (ins == INS_lea)
+ if (ins == INS_lea)
{
emitIns_R_R_R(INS_add, attr, ireg, reg, rg2);
if (disp != 0)
@@ -4030,17 +3908,12 @@ void emitter::emitIns_R_ARR (instruction ins,
{
emitIns_R_R_R_I(ins, attr, ireg, reg, rg2, 0, INS_FLAGS_DONT_CARE, INS_OPTS_NONE);
return;
- }
+ }
}
assert(!"emitIns_R_ARR: Unexpected instruction");
}
-void emitter::emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp)
+void emitter::emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp)
{
if (ins == INS_mov)
{
@@ -4062,24 +3935,19 @@ void emitter::emitIns_ARR_R (instruction ins,
assert(!"emitIns_ARR_R: Unexpected instruction");
}
-void emitter::emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp)
+void emitter::emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp)
{
if (ins == INS_mov)
{
assert(!"Please use ins_Load() to select the correct instruction");
}
- unsigned shift = genLog2((unsigned) mul);
+ unsigned shift = genLog2((unsigned)mul);
if ((ins == INS_lea) || emitInsIsLoad(ins))
{
- if (ins == INS_lea)
+ if (ins == INS_lea)
{
ins = INS_add;
}
@@ -4096,7 +3964,7 @@ void emitter::emitIns_R_ARX (instruction ins,
{
// If all of the below things are true we can generate a Thumb-1 add instruction
// followed by a Thumb-2 add instruction
- // We also useForm1 when reg is a low register since the second instruction
+ // We also useForm1 when reg is a low register since the second instruction
// can then always be generated using a Thumb-1 add
//
if ((reg >= REG_R8) && (ireg < REG_R8) && (rg2 < REG_R8) && ((disp >> shift) <= 7))
@@ -4111,10 +3979,8 @@ void emitter::emitIns_R_ARX (instruction ins,
// Thumb-1 instruction add Rd, Rx, disp>>shift
// Thumb-2 instructions ldr Rd, Rb, Rd LSL shift
//
- emitIns_R_R_I(INS_add, EA_4BYTE, ireg, rg2, disp >> shift);
- emitIns_R_R_R_I(ins, attr, ireg, reg,
- ireg, shift,
- INS_FLAGS_NOT_SET, INS_OPTS_LSL);
+ emitIns_R_R_I(INS_add, EA_4BYTE, ireg, rg2, disp >> shift);
+ emitIns_R_R_R_I(ins, attr, ireg, reg, ireg, shift, INS_FLAGS_NOT_SET, INS_OPTS_LSL);
}
else
{
@@ -4122,10 +3988,8 @@ void emitter::emitIns_R_ARX (instruction ins,
// Thumb-2 instruction add Rd, Rb, Rx LSL shift
// Thumb-1/2 instructions ldr Rd, Rd, disp
//
- emitIns_R_R_R_I(INS_add, attr, ireg, reg,
- rg2, shift,
- INS_FLAGS_NOT_SET, INS_OPTS_LSL);
- emitIns_R_R_I(ins, attr, ireg, ireg, disp);
+ emitIns_R_R_R_I(INS_add, attr, ireg, reg, rg2, shift, INS_FLAGS_NOT_SET, INS_OPTS_LSL);
+ emitIns_R_R_I(ins, attr, ireg, ireg, disp);
}
return;
}
@@ -4139,12 +4003,12 @@ void emitter::emitIns_R_ARX (instruction ins,
* Record that a jump instruction uses the short encoding
*
*/
-void emitter::emitSetShortJump(instrDescJmp * id)
+void emitter::emitSetShortJump(instrDescJmp* id)
{
if (id->idjKeepLong)
return;
- if (emitIsCondJump(id))
+ if (emitIsCondJump(id))
{
id->idInsFmt(IF_T1_K);
}
@@ -4170,7 +4034,7 @@ void emitter::emitSetShortJump(instrDescJmp * id)
id->idjShort = true;
#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
printf("[8] Converting jump %u to short\n", id->idDebugOnlyInfo()->idNum);
}
@@ -4185,13 +4049,13 @@ void emitter::emitSetShortJump(instrDescJmp * id)
* Record that a jump instruction uses the medium encoding
*
*/
-void emitter::emitSetMediumJump(instrDescJmp * id)
+void emitter::emitSetMediumJump(instrDescJmp* id)
{
if (id->idjKeepLong)
return;
#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
printf("[9] Converting jump %u to medium\n", id->idDebugOnlyInfo()->idNum);
}
@@ -4217,11 +4081,9 @@ void emitter::emitSetMediumJump(instrDescJmp * id)
* branch. Thus, we can handle branch offsets of imm24 instead of just imm20.
*/
-void emitter::emitIns_J(instruction ins,
- BasicBlock * dst,
- int instrCount /* = 0 */)
+void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 */)
{
- insFormat fmt = IF_NONE;
+ insFormat fmt = IF_NONE;
if (dst != NULL)
{
@@ -4235,35 +4097,34 @@ void emitter::emitIns_J(instruction ins,
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_b:
- fmt = IF_T2_J2; /* Assume the jump will be long */
- break;
+ case INS_b:
+ fmt = IF_T2_J2; /* Assume the jump will be long */
+ break;
- case INS_beq:
- case INS_bne:
- case INS_bhs:
- case INS_blo:
- case INS_bmi:
- case INS_bpl:
- case INS_bvs:
- case INS_bvc:
- case INS_bhi:
- case INS_bls:
- case INS_bge:
- case INS_blt:
- case INS_bgt:
- case INS_ble:
- fmt = IF_LARGEJMP; /* Assume the jump will be long */
- break;
-
- default:
- unreached();
+ case INS_beq:
+ case INS_bne:
+ case INS_bhs:
+ case INS_blo:
+ case INS_bmi:
+ case INS_bpl:
+ case INS_bvs:
+ case INS_bvc:
+ case INS_bhi:
+ case INS_bls:
+ case INS_bge:
+ case INS_blt:
+ case INS_bgt:
+ case INS_ble:
+ fmt = IF_LARGEJMP; /* Assume the jump will be long */
+ break;
+
+ default:
+ unreached();
}
- assert((fmt == IF_LARGEJMP) ||
- (fmt == IF_T2_J2));
+ assert((fmt == IF_LARGEJMP) || (fmt == IF_T2_J2));
- instrDescJmp * id = emitNewInstrJmp();
- insSize isz = emitInsSize(fmt);
+ instrDescJmp* id = emitNewInstrJmp();
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4283,10 +4144,10 @@ void emitter::emitIns_J(instruction ins,
if (dst != NULL)
{
id->idAddr()->iiaBBlabel = dst;
- id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
+ id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
#ifdef DEBUG
- if (emitComp->opts.compLongAddress) // Force long branches
+ if (emitComp->opts.compLongAddress) // Force long branches
id->idjKeepLong = 1;
#endif // DEBUG
}
@@ -4301,13 +4162,13 @@ void emitter::emitIns_J(instruction ins,
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#if EMITTER_STATS
emitTotalIGjmps++;
@@ -4315,9 +4176,9 @@ void emitter::emitIns_J(instruction ins,
/* Figure out the max. size of the jump/call instruction */
- if (!id->idjKeepLong)
+ if (!id->idjKeepLong)
{
- insGroup * tgt = NULL;
+ insGroup* tgt = NULL;
/* Can we guess at the jump distance? */
@@ -4326,10 +4187,10 @@ void emitter::emitIns_J(instruction ins,
tgt = (insGroup*)emitCodeGetCookie(dst);
}
- if (tgt)
+ if (tgt)
{
- UNATIVE_OFFSET srcOffs;
- int jmpDist;
+ UNATIVE_OFFSET srcOffs;
+ int jmpDist;
assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL);
@@ -4339,35 +4200,36 @@ void emitter::emitIns_J(instruction ins,
/* Compute the distance estimate */
- jmpDist = srcOffs - tgt->igOffs; assert(jmpDist >= 0);
- jmpDist += 4; // Adjustment for ARM PC
+ jmpDist = srcOffs - tgt->igOffs;
+ assert(jmpDist >= 0);
+ jmpDist += 4; // Adjustment for ARM PC
switch (fmt)
{
- case IF_T2_J2:
- if (JMP_DIST_SMALL_MAX_NEG <= -jmpDist)
- {
- /* This jump surely will be short */
- emitSetShortJump(id);
- }
- break;
+ case IF_T2_J2:
+ if (JMP_DIST_SMALL_MAX_NEG <= -jmpDist)
+ {
+ /* This jump surely will be short */
+ emitSetShortJump(id);
+ }
+ break;
- case IF_LARGEJMP:
- if (JCC_DIST_SMALL_MAX_NEG <= -jmpDist)
- {
- /* This jump surely will be short */
- emitSetShortJump(id);
- }
- else if (JCC_DIST_MEDIUM_MAX_NEG <= -jmpDist)
- {
- /* This jump surely will be medium */
- emitSetMediumJump(id);
- }
- break;
+ case IF_LARGEJMP:
+ if (JCC_DIST_SMALL_MAX_NEG <= -jmpDist)
+ {
+ /* This jump surely will be short */
+ emitSetShortJump(id);
+ }
+ else if (JCC_DIST_MEDIUM_MAX_NEG <= -jmpDist)
+ {
+ /* This jump surely will be medium */
+ emitSetMediumJump(id);
+ }
+ break;
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
}
}
@@ -4380,35 +4242,31 @@ void emitter::emitIns_J(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add a label instruction.
*/
-void emitter::emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg)
+void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
- insFormat fmt = IF_NONE;
+ insFormat fmt = IF_NONE;
assert(dst->bbFlags & BBF_JMP_TARGET);
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_movt:
- case INS_movw:
- fmt = IF_T2_N1;
- break;
- default:
- unreached();
+ case INS_movt:
+ case INS_movw:
+ fmt = IF_T2_N1;
+ break;
+ default:
+ unreached();
}
assert(fmt == IF_T2_N1);
- instrDescJmp * id = emitNewInstrJmp();
- insSize isz = emitInsSize(fmt);
+ instrDescJmp* id = emitNewInstrJmp();
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idReg1(reg);
@@ -4424,19 +4282,19 @@ void emitter::emitIns_R_L (instruction ins,
#endif // DEBUG
id->idAddr()->iiaBBlabel = dst;
- id->idjShort = false;
- id->idjKeepLong = true;
+ id->idjShort = false;
+ id->idjKeepLong = true;
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
-
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
+
// Set the relocation flags - these give hint to zap to perform
// relocation of the specified 32bit address.
id->idSetRelocFlags(attr);
@@ -4449,22 +4307,18 @@ void emitter::emitIns_R_L (instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add a data label instruction.
*/
-void emitter::emitIns_R_D (instruction ins,
- emitAttr attr,
- unsigned offs,
- regNumber reg)
+void emitter::emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumber reg)
{
noway_assert((ins == INS_movw) || (ins == INS_movt));
- insFormat fmt = IF_T2_N2;
- instrDesc * id = emitNewInstrSC(attr, offs);
- insSize isz = emitInsSize(fmt);
+ insFormat fmt = IF_T2_N2;
+ instrDesc* id = emitNewInstrSC(attr, offs);
+ insSize isz = emitInsSize(fmt);
id->idIns(ins);
id->idReg1(reg);
@@ -4484,33 +4338,32 @@ void emitter::emitIns_R_D (instruction ins,
appendToCurIG(id);
}
-void emitter::emitIns_J_R (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg)
+void emitter::emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
assert(dst->bbFlags & BBF_JMP_TARGET);
- instrDescJmp *id;
- if (ins == INS_adr) {
+ instrDescJmp* id;
+ if (ins == INS_adr)
+ {
id = emitNewInstrLbl();
id->idIns(INS_adr);
id->idInsFmt(IF_T2_M1);
id->idInsSize(emitInsSize(IF_T2_M1));
- id->idAddr()->iiaBBlabel = dst;
+ id->idAddr()->iiaBBlabel = dst;
id->idReg1(reg);
id->idReg2(REG_PC);
/* Assume the label reference will be long */
- id->idjShort = 0;
- id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
+ id->idjShort = 0;
+ id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
}
- else {
+ else
+ {
assert(ins == INS_cbz || INS_cbnz);
assert(isLowRegister(reg));
- id = emitNewInstrJmp();
+ id = emitNewInstrJmp();
id->idIns(ins);
id->idInsFmt(IF_T1_I);
@@ -4522,16 +4375,16 @@ void emitter::emitIns_J_R (instruction ins,
id->idAddr()->iiaBBlabel = dst;
id->idjKeepLong = false;
}
-
+
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#if EMITTER_STATS
emitTotalIGjmps++;
@@ -4560,23 +4413,23 @@ void emitter::emitIns_J_R (instruction ins,
* Please consult the "debugger team notification" comment in genFnProlog().
*/
-void emitter::emitIns_Call(EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd, // used for pretty printing
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize,
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset /* = BAD_IL_OFFSET */,
- regNumber ireg /* = REG_NA */,
- regNumber xreg /* = REG_NA */,
- unsigned xmul /* = 0 */,
- int disp /* = 0 */,
- bool isJump /* = false */,
- bool isNoGC /* = false */,
- bool isProfLeaveCB /* = false */)
+void emitter::emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd, // used for pretty printing
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize,
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset /* = BAD_IL_OFFSET */,
+ regNumber ireg /* = REG_NA */,
+ regNumber xreg /* = REG_NA */,
+ unsigned xmul /* = 0 */,
+ int disp /* = 0 */,
+ bool isJump /* = false */,
+ bool isNoGC /* = false */,
+ bool isProfLeaveCB /* = false */)
{
/* Sanity check the arguments depending on callType */
@@ -4584,18 +4437,17 @@ void emitter::emitIns_Call(EmitCallType callType,
assert((callType != EC_FUNC_TOKEN && callType != EC_FUNC_ADDR) ||
(ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp == 0));
assert(callType < EC_INDIR_R || addr == NULL);
- assert(callType != EC_INDIR_R ||
- (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
+ assert(callType != EC_INDIR_R || (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
// ARM never uses these
assert(xreg == REG_NA && xmul == 0 && disp == 0);
// Our stack level should be always greater than the bytes of arguments we push. Just
// a sanity test.
- assert((unsigned) abs(argSize) <= codeGen->genStackLevel);
+ assert((unsigned)abs(argSize) <= codeGen->genStackLevel);
- int argCnt;
- instrDesc *id;
+ int argCnt;
+ instrDesc* id;
/* This is the saved set of registers after a normal call */
regMaskTP savedSet = RBM_CALLEE_SAVED;
@@ -4629,24 +4481,24 @@ void emitter::emitIns_Call(EmitCallType callType,
gcrefRegs &= savedSet;
byrefRegs &= savedSet;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
printf("Call: GCvars=%s ", VarSetOps::ToString(emitComp, ptrVars));
dumpConvertedVarSet(emitComp, ptrVars);
printf(", gcrefRegs=");
printRegMaskInt(gcrefRegs);
- emitDispRegSet (gcrefRegs);
+ emitDispRegSet(gcrefRegs);
printf(", byrefRegs=");
printRegMaskInt(byrefRegs);
- emitDispRegSet (byrefRegs);
+ emitDispRegSet(byrefRegs);
printf("\n");
}
#endif
- assert( argSize % (int)sizeof(void*) == 0);
+ assert(argSize % (int)sizeof(void*) == 0);
argCnt = argSize / (int)sizeof(void*);
-
+
#ifdef DEBUGGING_SUPPORT
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -4669,23 +4521,22 @@ void emitter::emitIns_Call(EmitCallType callType,
Indir. call with GC vars 5,768
*/
- if (callType >= EC_INDIR_R)
+ if (callType >= EC_INDIR_R)
{
/* Indirect call, virtual calls */
assert(callType == EC_INDIR_R);
- id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs, retSize);
+ id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs, retSize);
}
else
{
/* Helper/static/nonvirtual/function calls (direct or through handle),
and calls to an absolute addr. */
- assert(callType == EC_FUNC_TOKEN ||
- callType == EC_FUNC_ADDR);
+ assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_ADDR);
- id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs, retSize);
+ id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs, retSize);
}
/* Update the emitter's live GC ref sets */
@@ -4702,38 +4553,37 @@ void emitter::emitIns_Call(EmitCallType callType,
/* Record the address: method, indirection, or funcptr */
- if (callType > EC_FUNC_ADDR)
+ if (callType > EC_FUNC_ADDR)
{
/* This is an indirect call (either a virtual call or func ptr call) */
switch (callType)
{
- case EC_INDIR_R: // the address is in a register
+ case EC_INDIR_R: // the address is in a register
- id->idSetIsCallRegPtr();
+ id->idSetIsCallRegPtr();
- if (isJump)
- {
- ins = INS_bx; // INS_bx Reg
- }
- else
- {
- ins = INS_blx; // INS_blx Reg
- }
- fmt = IF_T1_D2;
+ if (isJump)
+ {
+ ins = INS_bx; // INS_bx Reg
+ }
+ else
+ {
+ ins = INS_blx; // INS_blx Reg
+ }
+ fmt = IF_T1_D2;
- id->idIns(ins);
- id->idInsFmt(fmt);
- id->idInsSize(emitInsSize(fmt));
- id->idReg3(ireg);
- assert(xreg == REG_NA);
- break;
+ id->idIns(ins);
+ id->idInsFmt(fmt);
+ id->idInsSize(emitInsSize(fmt));
+ id->idReg3(ireg);
+ assert(xreg == REG_NA);
+ break;
- default:
- NO_WAY("unexpected instruction");
- break;
+ default:
+ NO_WAY("unexpected instruction");
+ break;
}
-
}
else
{
@@ -4746,17 +4596,17 @@ void emitter::emitIns_Call(EmitCallType callType,
if (isJump)
{
- ins = INS_b; // INS_b imm24
+ ins = INS_b; // INS_b imm24
}
else
{
- ins = INS_bl; // INS_bl imm24
+ ins = INS_bl; // INS_bl imm24
}
fmt = IF_T2_J3;
id->idIns(ins);
- id->idInsFmt(fmt);
+ id->idInsFmt(fmt);
id->idInsSize(emitInsSize(fmt));
id->idAddr()->iiaAddr = (BYTE*)addr;
@@ -4783,18 +4633,19 @@ void emitter::emitIns_Call(EmitCallType callType,
#endif
}
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
- if (id->idIsLargeCall())
+ if (id->idIsLargeCall())
{
- printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
+ printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum,
+ VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
}
}
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
- id->idDebugOnlyInfo()->idMemCookie = (size_t) methHnd; // method token
+ id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token
id->idDebugOnlyInfo()->idClsCookie = 0;
id->idDebugOnlyInfo()->idCallSig = sigInfo;
#endif
@@ -4812,11 +4663,11 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
- * Returns an encoding for the specified register (any-reg) to be used in
- * a Thumb-1 encoding in the M4 position
+ * Returns an encoding for the specified register (any-reg) to be used in
+ * a Thumb-1 encoding in the M4 position
*/
-inline unsigned insEncodeRegT1_M4(regNumber reg)
+inline unsigned insEncodeRegT1_M4(regNumber reg)
{
assert(reg < REG_STK);
@@ -4825,11 +4676,11 @@ inline unsigned insEncodeRegT1_M4(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register (any-reg) to be used in
- * a Thumb-1 encoding in the D4 position
+ * Returns an encoding for the specified register (any-reg) to be used in
+ * a Thumb-1 encoding in the D4 position
*/
-inline unsigned insEncodeRegT1_D4(regNumber reg)
+inline unsigned insEncodeRegT1_D4(regNumber reg)
{
assert(reg < REG_STK);
@@ -4838,11 +4689,11 @@ inline unsigned insEncodeRegT1_D4(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register (low-only) to be used in
- * a Thumb-1 encoding in the M3 position
+ * Returns an encoding for the specified register (low-only) to be used in
+ * a Thumb-1 encoding in the M3 position
*/
-inline unsigned insEncodeRegT1_M3(regNumber reg)
+inline unsigned insEncodeRegT1_M3(regNumber reg)
{
assert(reg < REG_R8);
@@ -4851,11 +4702,11 @@ inline unsigned insEncodeRegT1_M3(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register (low-only) to be used in
- * a Thumb-1 encoding in the N3 position
+ * Returns an encoding for the specified register (low-only) to be used in
+ * a Thumb-1 encoding in the N3 position
*/
-inline unsigned insEncodeRegT1_N3(regNumber reg)
+inline unsigned insEncodeRegT1_N3(regNumber reg)
{
assert(reg < REG_R8);
@@ -4864,11 +4715,11 @@ inline unsigned insEncodeRegT1_N3(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register (low-only) to be used in
- * a Thumb-1 encoding in the D3 position
+ * Returns an encoding for the specified register (low-only) to be used in
+ * a Thumb-1 encoding in the D3 position
*/
-inline unsigned insEncodeRegT1_D3(regNumber reg)
+inline unsigned insEncodeRegT1_D3(regNumber reg)
{
assert(reg < REG_R8);
@@ -4876,11 +4727,11 @@ inline unsigned insEncodeRegT1_D3(regNumber reg)
}
/*****************************************************************************
*
- * Returns an encoding for the specified register (low-only) to be used in
- * a Thumb-1 encoding in the DI position
+ * Returns an encoding for the specified register (low-only) to be used in
+ * a Thumb-1 encoding in the DI position
*/
-inline unsigned insEncodeRegT1_DI(regNumber reg)
+inline unsigned insEncodeRegT1_DI(regNumber reg)
{
assert(reg < REG_R8);
@@ -4889,11 +4740,11 @@ inline unsigned insEncodeRegT1_DI(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register to be used in
- * a Thumb-2 encoding in the N position
+ * Returns an encoding for the specified register to be used in
+ * a Thumb-2 encoding in the N position
*/
-inline unsigned insEncodeRegT2_N(regNumber reg)
+inline unsigned insEncodeRegT2_N(regNumber reg)
{
assert(reg < REG_STK);
@@ -4911,7 +4762,7 @@ inline unsigned floatRegIndex(regNumber reg, int size)
assert(emitter::isFloatReg(reg));
unsigned result = reg - REG_F0;
-
+
// the assumption here is that the register F8 also refers to D4
if (size == EA_8BYTE)
{
@@ -4921,19 +4772,19 @@ inline unsigned floatRegIndex(regNumber reg, int size)
return result;
}
-// variant: SOME arm VFP instructions use the convention that
+// variant: SOME arm VFP instructions use the convention that
// for doubles, the split bit holds the msb of the register index
// for singles it holds the lsb
-// excerpt : d = if dp_operation then UInt(D:Vd)
+// excerpt : d = if dp_operation then UInt(D:Vd)
// if single UInt(Vd:D);
-inline unsigned floatRegEncoding(unsigned index, int size, bool variant=false)
+inline unsigned floatRegEncoding(unsigned index, int size, bool variant = false)
{
if (!variant || size == EA_8BYTE)
return index;
- else
+ else
{
- return ((index&1 ) << 4) | (index >> 1);
+ return ((index & 1) << 4) | (index >> 1);
}
}
@@ -4941,7 +4792,7 @@ inline unsigned floatRegEncoding(unsigned index, int size, bool variant=false)
inline unsigned insEncodeRegT2_VectorM(regNumber reg, int size, bool variant)
{
unsigned enc = floatRegIndex(reg, size);
- enc = floatRegEncoding(enc, size, variant);
+ enc = floatRegEncoding(enc, size, variant);
return ((enc & 0xf) << 0) | ((enc & 0x10) << 1);
}
@@ -4949,7 +4800,7 @@ inline unsigned insEncodeRegT2_VectorM(regNumber reg, int size, bool variant)
inline unsigned insEncodeRegT2_VectorN(regNumber reg, int size, bool variant)
{
unsigned enc = floatRegIndex(reg, size);
- enc = floatRegEncoding(enc, size, variant);
+ enc = floatRegEncoding(enc, size, variant);
return ((enc & 0xf) << 16) | ((enc & 0x10) << 3);
}
@@ -4957,18 +4808,17 @@ inline unsigned insEncodeRegT2_VectorN(regNumber reg, int size, bool variant)
inline unsigned insEncodeRegT2_VectorD(regNumber reg, int size, bool variant)
{
unsigned enc = floatRegIndex(reg, size);
- enc = floatRegEncoding(enc, size, variant);
+ enc = floatRegEncoding(enc, size, variant);
return ((enc & 0xf) << 12) | ((enc & 0x10) << 18);
}
-
/*****************************************************************************
*
- * Returns an encoding for the specified register to be used in
- * a Thumb-2 encoding in the T position
+ * Returns an encoding for the specified register to be used in
+ * a Thumb-2 encoding in the T position
*/
-inline unsigned insEncodeRegT2_T(regNumber reg)
+inline unsigned insEncodeRegT2_T(regNumber reg)
{
assert(reg < REG_STK);
@@ -4977,24 +4827,24 @@ inline unsigned insEncodeRegT2_T(regNumber reg)
/*****************************************************************************
*
- * Returns an encoding for the specified register to be used in
- * a Thumb-2 encoding in the D position
+ * Returns an encoding for the specified register to be used in
+ * a Thumb-2 encoding in the D position
*/
-inline unsigned insEncodeRegT2_D(regNumber reg)
+inline unsigned insEncodeRegT2_D(regNumber reg)
{
assert(reg < REG_STK);
- return reg << 8;
+ return reg << 8;
}
/*****************************************************************************
*
- * Returns an encoding for the specified register to be used in
- * a Thumb-2 encoding in the M position
+ * Returns an encoding for the specified register to be used in
+ * a Thumb-2 encoding in the M position
*/
-inline unsigned insEncodeRegT2_M(regNumber reg)
+inline unsigned insEncodeRegT2_M(regNumber reg)
{
assert(reg < REG_STK);
@@ -5006,7 +4856,7 @@ inline unsigned insEncodeRegT2_M(regNumber reg)
* Returns the encoding for the Set Flags bit to be used in a Thumb-2 encoding
*/
-unsigned emitter::insEncodeSetFlags(insFlags sf)
+unsigned emitter::insEncodeSetFlags(insFlags sf)
{
if (sf == INS_FLAGS_SET)
return (1 << 20);
@@ -5019,7 +4869,7 @@ unsigned emitter::insEncodeSetFlags(insFlags sf)
* Returns the encoding for the Shift Type bits to be used in a Thumb-2 encoding
*/
-unsigned emitter::insEncodeShiftOpts(insOpts opt)
+unsigned emitter::insEncodeShiftOpts(insOpts opt)
{
if (opt == INS_OPTS_NONE)
return 0;
@@ -5036,60 +4886,60 @@ unsigned emitter::insEncodeShiftOpts(insOpts opt)
assert(!"Invalid insOpts");
return 0;
-}
+}
/*****************************************************************************
*
* Returns the encoding for the PUW bits to be used in a T2_G0 Thumb-2 encoding
*/
-unsigned emitter::insEncodePUW_G0(insOpts opt, int imm)
+unsigned emitter::insEncodePUW_G0(insOpts opt, int imm)
{
unsigned result = 0;
if (opt != INS_OPTS_LDST_POST_INC)
- result |= (1 << 24); // The P bit
+ result |= (1 << 24); // The P bit
if (imm >= 0)
- result |= (1 << 23); // The U bit
-
+ result |= (1 << 23); // The U bit
+
if (opt != INS_OPTS_NONE)
- result |= (1 << 21); // The W bits
+ result |= (1 << 21); // The W bits
return result;
-}
+}
/*****************************************************************************
*
* Returns the encoding for the PUW bits to be used in a T2_H0 Thumb-2 encoding
*/
-unsigned emitter::insEncodePUW_H0(insOpts opt, int imm)
+unsigned emitter::insEncodePUW_H0(insOpts opt, int imm)
{
unsigned result = 0;
if (opt != INS_OPTS_LDST_POST_INC)
- result |= (1 << 10); // The P bit
+ result |= (1 << 10); // The P bit
if (imm >= 0)
- result |= (1 << 9); // The U bit
-
+ result |= (1 << 9); // The U bit
+
if (opt != INS_OPTS_NONE)
- result |= (1 << 8); // The W bits
+ result |= (1 << 8); // The W bits
return result;
-}
+}
/*****************************************************************************
*
* Returns the encoding for the Shift Count bits to be used in a Thumb-2 encoding
*/
-inline unsigned insEncodeShiftCount(int imm)
+inline unsigned insEncodeShiftCount(int imm)
{
unsigned result;
assert((imm & 0x001F) == imm);
- result = (imm & 0x03) << 6;
+ result = (imm & 0x03) << 6;
result |= (imm & 0x1C) << 10;
return result;
@@ -5100,12 +4950,12 @@ inline unsigned insEncodeShiftCount(int imm)
* Returns the encoding for the immediate use by BFI/BFC Thumb-2 encodings
*/
-inline unsigned insEncodeBitFieldImm(int imm)
+inline unsigned insEncodeBitFieldImm(int imm)
{
unsigned result;
assert((imm & 0x03FF) == imm);
- result = (imm & 0x001f);
+ result = (imm & 0x001f);
result |= (imm & 0x0060) << 1;
result |= (imm & 0x0380) << 5;
@@ -5116,28 +4966,28 @@ inline unsigned insEncodeBitFieldImm(int imm)
*
* Unscales the immediate based on the operand size in 'size'
*/
-/*static*/ int emitter::insUnscaleImm(int imm, emitAttr size)
-{
+/*static*/ int emitter::insUnscaleImm(int imm, emitAttr size)
+{
switch (size)
{
- case EA_8BYTE:
- case EA_4BYTE:
- assert((imm & 0x0003) == 0);
- imm >>= 2;
- break;
+ case EA_8BYTE:
+ case EA_4BYTE:
+ assert((imm & 0x0003) == 0);
+ imm >>= 2;
+ break;
- case EA_2BYTE:
- assert((imm & 0x0001) == 0);
- imm >>= 1;
- break;
+ case EA_2BYTE:
+ assert((imm & 0x0001) == 0);
+ imm >>= 1;
+ break;
- case EA_1BYTE:
- // Do nothing
- break;
+ case EA_1BYTE:
+ // Do nothing
+ break;
- default:
- assert(!"Invalid value in size");
- break;
+ default:
+ assert(!"Invalid value in size");
+ break;
}
return imm;
}
@@ -5147,46 +4997,43 @@ inline unsigned insEncodeBitFieldImm(int imm)
* Emit a Thumb-1 instruction (a 16-bit integer as code)
*/
-/*static*/ unsigned emitter::emitOutput_Thumb1Instr(BYTE *dst, ssize_t code)
+/*static*/ unsigned emitter::emitOutput_Thumb1Instr(BYTE* dst, ssize_t code)
{
unsigned short word1 = code & 0xffff;
assert(word1 == code);
#ifdef DEBUG
unsigned short top5bits = (word1 & 0xf800) >> 11;
- assert(top5bits < 29);
+ assert(top5bits < 29);
#endif
MISALIGNED_WR_I2(dst, word1);
- return sizeof(short);
+ return sizeof(short);
}
/*****************************************************************************
*
* Emit a Thumb-2 instruction (two 16-bit integers as code)
*/
-/*static*/ unsigned emitter::emitOutput_Thumb2Instr(BYTE *dst, ssize_t code)
+/*static*/ unsigned emitter::emitOutput_Thumb2Instr(BYTE* dst, ssize_t code)
{
unsigned short word1 = (code >> 16) & 0xffff;
- unsigned short word2 = (code ) & 0xffff;
+ unsigned short word2 = (code)&0xffff;
assert(((word1 << 16) | word2) == code);
#ifdef DEBUG
unsigned short top5bits = (word1 & 0xf800) >> 11;
- assert(top5bits >= 29);
+ assert(top5bits >= 29);
#endif
MISALIGNED_WR_I2(dst, word1);
dst += 2;
MISALIGNED_WR_I2(dst, word2);
- return sizeof(short) * 2;
+ return sizeof(short) * 2;
}
-
-
-
/*****************************************************************************
*
* Output a local jump instruction.
@@ -5194,47 +5041,47 @@ inline unsigned insEncodeBitFieldImm(int imm)
* to handle forward branch patching.
*/
-BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i)
+BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
{
- unsigned srcOffs;
- unsigned dstOffs;
- ssize_t distVal;
+ unsigned srcOffs;
+ unsigned dstOffs;
+ ssize_t distVal;
- instrDescJmp * id = (instrDescJmp*)i;
- instruction ins = id->idIns();
- ssize_t code;
+ instrDescJmp* id = (instrDescJmp*)i;
+ instruction ins = id->idIns();
+ ssize_t code;
- bool loadLabel = false;
- bool isJump = false;
- bool relAddr = true; // does the instruction use relative-addressing?
+ bool loadLabel = false;
+ bool isJump = false;
+ bool relAddr = true; // does the instruction use relative-addressing?
- size_t sdistneg;
+ size_t sdistneg;
switch (ins)
{
- default:
- sdistneg = JCC_DIST_SMALL_MAX_NEG;
- isJump = true;
- break;
+ default:
+ sdistneg = JCC_DIST_SMALL_MAX_NEG;
+ isJump = true;
+ break;
- case INS_cbz:
- case INS_cbnz:
- // One size fits all!
- sdistneg = 0;
- isJump = true;
- break;
+ case INS_cbz:
+ case INS_cbnz:
+ // One size fits all!
+ sdistneg = 0;
+ isJump = true;
+ break;
- case INS_adr:
- sdistneg = LBL_DIST_SMALL_MAX_NEG;
- loadLabel = true;
- break;
+ case INS_adr:
+ sdistneg = LBL_DIST_SMALL_MAX_NEG;
+ loadLabel = true;
+ break;
- case INS_movw:
- case INS_movt:
- sdistneg = LBL_DIST_SMALL_MAX_NEG;
- relAddr = false;
- loadLabel = true;
- break;
+ case INS_movw:
+ case INS_movt:
+ sdistneg = LBL_DIST_SMALL_MAX_NEG;
+ relAddr = false;
+ loadLabel = true;
+ break;
}
/* Figure out the distance to the target */
@@ -5262,30 +5109,30 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
if (ins == INS_adr)
{
// for adr, the distance is calculated from 4-byte aligned srcOffs.
- distVal = (ssize_t) ((emitOffsetToPtr(dstOffs) - (BYTE*)(((size_t)emitOffsetToPtr(srcOffs))&~3)) + 1);
+ distVal = (ssize_t)((emitOffsetToPtr(dstOffs) - (BYTE*)(((size_t)emitOffsetToPtr(srcOffs)) & ~3)) + 1);
}
else
{
- distVal = (ssize_t) (emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
+ distVal = (ssize_t)(emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
}
}
else
- {
- assert (ins == INS_movw || ins == INS_movt);
- distVal = (ssize_t) emitOffsetToPtr(dstOffs) + 1; // Or in thumb bit
+ {
+ assert(ins == INS_movw || ins == INS_movt);
+ distVal = (ssize_t)emitOffsetToPtr(dstOffs) + 1; // Or in thumb bit
}
- if (dstOffs <= srcOffs)
+ if (dstOffs <= srcOffs)
{
- /* This is a backward jump - distance is known at this point */
+/* This is a backward jump - distance is known at this point */
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
printf("[3] Jump block is at %08X - %02X = %08X\n", blkOffs, emitOffsAdj, blkOffs - emitOffsAdj);
printf("[3] Jump is at %08X - %02X = %08X\n", srcOffs, emitOffsAdj, srcOffs - emitOffsAdj);
printf("[3] Label block is at %08X - %02X = %08X\n", dstOffs, emitOffsAdj, dstOffs - emitOffsAdj);
@@ -5293,11 +5140,11 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
#endif
// This format only supports forward branches
- noway_assert(id->idInsFmt() != IF_T1_I);
+ noway_assert(id->idInsFmt() != IF_T1_I);
/* Can we use a short jump? */
- if (isJump && ((unsigned)(distVal - 4) >= (unsigned)sdistneg))
+ if (isJump && ((unsigned)(distVal - 4) >= (unsigned)sdistneg))
{
emitSetShortJump(id);
}
@@ -5306,7 +5153,7 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
{
/* This is a forward jump - distance will be an upper limit */
- emitFwdJumps = true;
+ emitFwdJumps = true;
/* The target offset will be closer by at least 'emitOffsAdj', but only if this
jump doesn't cross the hot-cold boundary. */
@@ -5323,21 +5170,20 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
/* Are we overflowing the id->idjOffs bitfield? */
if (id->idjOffs != dstOffs)
- IMPL_LIMITATION("Method is too large");
+ IMPL_LIMITATION("Method is too large");
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
- printf("[4] Jump block is at %08X\n" , blkOffs);
- printf("[4] Jump is at %08X\n" , srcOffs);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ printf("[4] Jump block is at %08X\n", blkOffs);
+ printf("[4] Jump is at %08X\n", srcOffs);
printf("[4] Label block is at %08X - %02X = %08X\n", dstOffs + emitOffsAdj, emitOffsAdj, dstOffs);
}
#endif
-
}
/* Adjust the offset to emit relative to the end of the instruction */
@@ -5345,16 +5191,13 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
if (relAddr)
distVal -= 4;
-#ifdef DEBUG
+#ifdef DEBUG
if (0 && emitComp->verbose)
{
- size_t sz = 4; // Thumb-2 pretends all instructions are 4-bytes long for computing jump offsets?
- int distValSize = id->idjShort ? 4 : 8;
- printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n",
- (dstOffs <= srcOffs)?"Fwd":"Bwd", dspPtr(id), id->idDebugOnlyInfo()->idNum,
- distValSize, srcOffs + sz,
- distValSize, dstOffs,
- distVal);
+ size_t sz = 4; // Thumb-2 pretends all instructions are 4-bytes long for computing jump offsets?
+ int distValSize = id->idjShort ? 4 : 8;
+ printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n", (dstOffs <= srcOffs) ? "Fwd" : "Bwd",
+ dspPtr(id), id->idDebugOnlyInfo()->idNum, distValSize, srcOffs + sz, distValSize, dstOffs, distVal);
}
#endif
@@ -5364,7 +5207,7 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
{
/* What size jump should we use? */
- if (id->idjShort)
+ if (id->idjShort)
{
/* Short jump */
@@ -5429,9 +5272,11 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
// but the "it" instruction was deprecated, so we can't use it.
dst = emitOutputShortBranch(dst,
- emitJumpKindToIns(emitReverseJumpKind(emitInsToJumpKind(ins))), // reverse the conditional instruction
+ emitJumpKindToIns(emitReverseJumpKind(
+ emitInsToJumpKind(ins))), // reverse the conditional instruction
IF_T1_K,
- 6 - 4, /* 6 bytes from start of this large conditional pseudo-instruction to L_not. Jumps are encoded as offset from instr address + 4. */
+ 6 - 4, /* 6 bytes from start of this large conditional pseudo-instruction to
+ L_not. Jumps are encoded as offset from instr address + 4. */
NULL /* only used for cbz/cbnz */);
// Now, pretend we've got a normal unconditional branch, and fall through to the code to emit that.
@@ -5454,11 +5299,11 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
assert((distVal & 1) == 0);
assert(distVal >= -1048576);
- assert(distVal <= 1048574);
+ assert(distVal <= 1048574);
if (distVal < 0)
- code |= 1<<26;
- code |= ((distVal >> 1) & 0x0007ff);
+ code |= 1 << 26;
+ code |= ((distVal >> 1) & 0x0007ff);
code |= (((distVal >> 1) & 0x01f800) << 5);
code |= (((distVal >> 1) & 0x020000) >> 4);
code |= (((distVal >> 1) & 0x040000) >> 7);
@@ -5478,21 +5323,21 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
#endif
{
assert(distVal >= -16777216);
- assert(distVal <= 16777214);
+ assert(distVal <= 16777214);
if (distVal < 0)
- code |= 1<<26;
- code |= ((distVal >> 1) & 0x0007ff);
+ code |= 1 << 26;
+ code |= ((distVal >> 1) & 0x0007ff);
code |= (((distVal >> 1) & 0x1ff800) << 5);
- bool S = (distVal < 0);
- bool I1 = ((distVal & 0x00800000) == 0);
- bool I2 = ((distVal & 0x00400000) == 0);
+ bool S = (distVal < 0);
+ bool I1 = ((distVal & 0x00800000) == 0);
+ bool I2 = ((distVal & 0x00400000) == 0);
if (S ^ I1)
- code |= (1 << 13); // J1 bit
+ code |= (1 << 13); // J1 bit
if (S ^ I2)
- code |= (1 << 11); // J2 bit
+ code |= (1 << 11); // J2 bit
}
}
else
@@ -5505,7 +5350,7 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
#ifdef RELOC_SUPPORT
if (emitComp->opts.compReloc)
{
- if (emitJumpCrossHotColdBoundary(srcOffs, dstOffs))
+ if (emitJumpCrossHotColdBoundary(srcOffs, dstOffs))
{
assert(id->idjKeepLong);
if (emitComp->info.compMatchedVM)
@@ -5529,12 +5374,12 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
if (fmt == IF_T1_J3)
{
- assert((dstOffs & 3) == 0); // The target label must be 4-byte aligned
+ assert((dstOffs & 3) == 0); // The target label must be 4-byte aligned
assert(distVal >= 0);
assert(distVal <= 1022);
code |= ((distVal >> 2) & 0xff);
-
- dst += emitOutput_Thumb1Instr(dst, code);
+
+ dst += emitOutput_Thumb1Instr(dst, code);
}
else if (fmt == IF_T2_M1)
{
@@ -5542,17 +5387,17 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
assert(distVal <= +4095);
if (distVal < 0)
{
- code |= 0x00A0<<16;
+ code |= 0x00A0 << 16;
distVal = -distVal;
}
assert((distVal & 0x0fff) == distVal);
- code |= (distVal & 0x00ff);
+ code |= (distVal & 0x00ff);
code |= ((distVal & 0x0700) << 4);
code |= ((distVal & 0x0800) << 15);
- code |= id->idReg1() << 8;
-
- dst += emitOutput_Thumb2Instr(dst, code);
+ code |= id->idReg1() << 8;
+
+ dst += emitOutput_Thumb2Instr(dst, code);
}
else if (fmt == IF_T2_N1)
{
@@ -5569,17 +5414,17 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
((instrDescJmp*)id)->idjTemp.idjAddr = (dstOffs > srcOffs) ? dst : NULL;
assert((imm & 0x0000ffff) == imm);
- code |= (imm & 0x00ff);
- code |= ((imm & 0x0700) << 4);
+ code |= (imm & 0x00ff);
+ code |= ((imm & 0x0700) << 4);
code |= ((imm & 0x0800) << 15);
- code |= ((imm & 0xf000) << 4);
- dst += emitOutput_Thumb2Instr(dst, code);
+ code |= ((imm & 0xf000) << 4);
+ dst += emitOutput_Thumb2Instr(dst, code);
if (id->idIsCnsReloc() || id->idIsDspReloc())
{
assert(ins == INS_movt || ins == INS_movw);
if ((ins == INS_movt) && emitComp->info.compMatchedVM)
- emitRecordRelocation((void*)(dst-8), (void *)distVal, IMAGE_REL_BASED_THUMB_MOV32);
+ emitRecordRelocation((void*)(dst - 8), (void*)distVal, IMAGE_REL_BASED_THUMB_MOV32);
}
}
else
@@ -5588,16 +5433,15 @@ BYTE * emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
}
}
- return dst;
+ return dst;
}
-
/*****************************************************************************
*
* Output a short branch instruction.
*/
-BYTE * emitter::emitOutputShortBranch(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id)
+BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id)
{
size_t code;
@@ -5607,20 +5451,20 @@ BYTE * emitter::emitOutputShortBranch(BYTE *dst, instruction ins, i
{
assert((distVal & 1) == 0);
assert(distVal >= -256);
- assert(distVal <= 254);
+ assert(distVal <= 254);
if (distVal < 0)
- code |= 1<<7;
+ code |= 1 << 7;
code |= ((distVal >> 1) & 0x7f);
}
else if (fmt == IF_T1_M)
{
assert((distVal & 1) == 0);
assert(distVal >= -2048);
- assert(distVal <= 2046);
+ assert(distVal <= 2046);
if (distVal < 0)
- code |= 1<<10;
+ code |= 1 << 10;
code |= ((distVal >> 1) & 0x3ff);
}
else if (fmt == IF_T1_I)
@@ -5629,23 +5473,22 @@ BYTE * emitter::emitOutputShortBranch(BYTE *dst, instruction ins, i
assert(ins == INS_cbz || INS_cbnz);
assert((distVal & 1) == 0);
assert(distVal >= 0);
- assert(distVal <= 126);
+ assert(distVal <= 126);
code |= ((distVal << 3) & 0x0200);
code |= ((distVal << 2) & 0x00F8);
- code |= ( id->idReg1() & 0x0007);
+ code |= (id->idReg1() & 0x0007);
}
else
{
assert(!"Unknown fmt");
}
- dst += emitOutput_Thumb1Instr(dst, code);
+ dst += emitOutput_Thumb1Instr(dst, code);
return dst;
}
-
#ifdef FEATURE_ITINSTRUCTION
/*****************************************************************************
@@ -5658,26 +5501,26 @@ BYTE * emitter::emitOutputShortBranch(BYTE *dst, instruction ins, i
* Output an IT instruction.
*/
-BYTE * emitter::emitOutputIT(BYTE *dst, instruction ins, insFormat fmt, ssize_t condcode)
+BYTE* emitter::emitOutputIT(BYTE* dst, instruction ins, insFormat fmt, ssize_t condcode)
{
ssize_t imm0;
- size_t code, mask, bit;
-
- code = emitInsCode(ins, fmt);
- code |= (condcode << 4); // encode firstcond
- imm0 = condcode & 1; // this is firstcond[0]
- mask = code & 0x0f; // initialize mask encoded in opcode
- bit = 0x08; // where in mask we are encoding
- while ((mask & (bit-1)) != 0) // are the remaining bits all zeros?
- { // then we are done
+ size_t code, mask, bit;
+
+ code = emitInsCode(ins, fmt);
+ code |= (condcode << 4); // encode firstcond
+ imm0 = condcode & 1; // this is firstcond[0]
+ mask = code & 0x0f; // initialize mask encoded in opcode
+ bit = 0x08; // where in mask we are encoding
+ while ((mask & (bit - 1)) != 0) // are the remaining bits all zeros?
+ { // then we are done
// otherwise determine the setting of bit
if ((imm0 == 1) ^ ((bit & mask) != 0))
{
- code |= bit; // set the current bit
+ code |= bit; // set the current bit
}
else
{
- code &= ~bit; // clear the current bit
+ code &= ~bit; // clear the current bit
}
bit >>= 1;
}
@@ -5693,10 +5536,10 @@ BYTE * emitter::emitOutputIT(BYTE *dst, instruction ins, insFormat
* Output a 32-bit nop instruction.
*/
-BYTE * emitter::emitOutputNOP (BYTE *dst, instruction ins, insFormat fmt)
-{
+BYTE* emitter::emitOutputNOP(BYTE* dst, instruction ins, insFormat fmt)
+{
size_t code = emitInsCode(ins, fmt);
-
+
dst += emitOutput_Thumb2Instr(dst, code);
return dst;
@@ -5711,21 +5554,20 @@ BYTE * emitter::emitOutputNOP (BYTE *dst, instruction ins, insForma
* descriptor in bytes.
*/
-size_t emitter::emitOutputInstr(insGroup *ig,
- instrDesc *id, BYTE **dp)
+size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
{
- BYTE * dst = *dp;
- BYTE * odst = dst;
- size_t code = 0;
- size_t sz = 0;
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
- emitAttr size = id->idOpSize();
- unsigned char callInstrSize = 0;
- ssize_t condcode;
-
-#ifdef DEBUG
- bool dspOffs = emitComp->opts.dspGCtbls || !emitComp->opts.disDiffable;
+ BYTE* dst = *dp;
+ BYTE* odst = dst;
+ size_t code = 0;
+ size_t sz = 0;
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
+ emitAttr size = id->idOpSize();
+ unsigned char callInstrSize = 0;
+ ssize_t condcode;
+
+#ifdef DEBUG
+ bool dspOffs = emitComp->opts.dspGCtbls || !emitComp->opts.disDiffable;
#endif // DEBUG
assert(REG_NA == (int)REG_NA);
@@ -5736,815 +5578,816 @@ size_t emitter::emitOutputInstr(insGroup *ig,
switch (fmt)
{
- int imm;
- int imm0;
- int mask;
- int bit;
- BYTE * addr;
- regMaskTP gcrefRegs;
- regMaskTP byrefRegs;
-
- case IF_T1_A: // T1_A ................
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ int imm;
+ int imm0;
+ int mask;
+ int bit;
+ BYTE* addr;
+ regMaskTP gcrefRegs;
+ regMaskTP byrefRegs;
+
+ case IF_T1_A: // T1_A ................
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
#ifdef FEATURE_ITINSTRUCTION
- case IF_T1_B: // T1_B ........cccc.... cond
- assert(id->idGCref() == GCT_NONE);
- condcode = emitGetInsSC(id);
- dst = emitOutputIT(dst, ins, fmt, condcode);
- sz = SMALL_IDSC_SIZE;
- break;
+ case IF_T1_B: // T1_B ........cccc.... cond
+ assert(id->idGCref() == GCT_NONE);
+ condcode = emitGetInsSC(id);
+ dst = emitOutputIT(dst, ins, fmt, condcode);
+ sz = SMALL_IDSC_SIZE;
+ break;
#endif // FEATURE_ITINSTRUCTION
- case IF_T1_C: // T1_C .....iiiiinnnddd R1 R2 imm5
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_D3(id->idReg1());
- code |= insEncodeRegT1_N3(id->idReg2());
- if (emitInsIsLoadOrStore(ins))
- {
- imm = insUnscaleImm(imm, size);
- }
- assert((imm & 0x001f) == imm);
- code |= (imm << 6);
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
-
- case IF_T1_D0: // T1_D0 ........Dmmmmddd R1* R2*
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_D4(id->idReg1());
- code |= insEncodeRegT1_M4(id->idReg2());
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_C: // T1_C .....iiiiinnnddd R1 R2 imm5
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_D3(id->idReg1());
+ code |= insEncodeRegT1_N3(id->idReg2());
+ if (emitInsIsLoadOrStore(ins))
+ {
+ imm = insUnscaleImm(imm, size);
+ }
+ assert((imm & 0x001f) == imm);
+ code |= (imm << 6);
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T1_E: // T1_E ..........nnnddd R1 R2
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_D3(id->idReg1());
- code |= insEncodeRegT1_N3(id->idReg2());
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_D0: // T1_D0 ........Dmmmmddd R1* R2*
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_D4(id->idReg1());
+ code |= insEncodeRegT1_M4(id->idReg2());
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T1_F: // T1_F .........iiiiiii SP imm7
- sz = emitGetInstrDescSize(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- imm = insUnscaleImm(imm, size);
- assert((imm & 0x007F) == imm);
- code |= imm;
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_E: // T1_E ..........nnnddd R1 R2
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_D3(id->idReg1());
+ code |= insEncodeRegT1_N3(id->idReg2());
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T1_G: // T1_G .......iiinnnddd R1 R2 imm3
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_D3(id->idReg1());
- code |= insEncodeRegT1_N3(id->idReg2());
- assert((imm & 0x0007) == imm);
- code |= (imm << 6);
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_F: // T1_F .........iiiiiii SP imm7
+ sz = emitGetInstrDescSize(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ imm = insUnscaleImm(imm, size);
+ assert((imm & 0x007F) == imm);
+ code |= imm;
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T1_H: // T1_H .......mmmnnnddd R1 R2 R3
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_D3(id->idReg1());
- code |= insEncodeRegT1_N3(id->idReg2());
- code |= insEncodeRegT1_M3(id->idReg3());
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_G: // T1_G .......iiinnnddd R1 R2 imm3
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_D3(id->idReg1());
+ code |= insEncodeRegT1_N3(id->idReg2());
+ assert((imm & 0x0007) == imm);
+ code |= (imm << 6);
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T1_I: // T1_I ......i.iiiiiddd R1 imm6
- assert(id->idIsBound());
+ case IF_T1_H: // T1_H .......mmmnnnddd R1 R2 R3
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_D3(id->idReg1());
+ code |= insEncodeRegT1_N3(id->idReg2());
+ code |= insEncodeRegT1_M3(id->idReg3());
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
+ case IF_T1_I: // T1_I ......i.iiiiiddd R1 imm6
+ assert(id->idIsBound());
- case IF_T1_J0: // T1_J0 .....dddiiiiiiii R1 imm8
- case IF_T1_J1: // T1_J1 .....dddiiiiiiii R1 <regmask8>
- case IF_T1_J2: // T1_J2 .....dddiiiiiiii R1 SP imm8
- sz = emitGetInstrDescSize(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_DI(id->idReg1());
- if (fmt == IF_T1_J2)
- {
- imm = insUnscaleImm(imm, size);
- }
- assert((imm & 0x00ff) == imm);
- code |= imm;
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- case IF_T1_L0: // T1_L0 ........iiiiiiii imm8
- case IF_T1_L1: // T1_L1 .......Rrrrrrrrr <regmask8>
- sz = emitGetInstrDescSize(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- if (fmt == IF_T1_L1)
- {
- assert((imm & 0x3) != 0x3);
- if (imm & 0x3)
- code |= 0x0100; // R bit
- imm >>= 2;
- }
- assert((imm & 0x00ff) == imm);
- code |= imm;
- dst += emitOutput_Thumb1Instr(dst, code);
- break;
+ case IF_T1_J0: // T1_J0 .....dddiiiiiiii R1 imm8
+ case IF_T1_J1: // T1_J1 .....dddiiiiiiii R1 <regmask8>
+ case IF_T1_J2: // T1_J2 .....dddiiiiiiii R1 SP imm8
+ sz = emitGetInstrDescSize(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_DI(id->idReg1());
+ if (fmt == IF_T1_J2)
+ {
+ imm = insUnscaleImm(imm, size);
+ }
+ assert((imm & 0x00ff) == imm);
+ code |= imm;
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T2_A: // T2_A ................ ................
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T1_L0: // T1_L0 ........iiiiiiii imm8
+ case IF_T1_L1: // T1_L1 .......Rrrrrrrrr <regmask8>
+ sz = emitGetInstrDescSize(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ if (fmt == IF_T1_L1)
+ {
+ assert((imm & 0x3) != 0x3);
+ if (imm & 0x3)
+ code |= 0x0100; // R bit
+ imm >>= 2;
+ }
+ assert((imm & 0x00ff) == imm);
+ code |= imm;
+ dst += emitOutput_Thumb1Instr(dst, code);
+ break;
- case IF_T2_B: // T2_B ................ ............iiii imm4
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- assert((imm & 0x000F) == imm);
- code |= imm;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_A: // T2_A ................ ................
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C0: // T2_C0 ...........Snnnn .iiiddddiishmmmm R1 R2 R3 S, imm5, sh
- case IF_T2_C4: // T2_C4 ...........Snnnn ....dddd....mmmm R1 R2 R3 S
- case IF_T2_C5: // T2_C5 ............nnnn ....dddd....mmmm R1 R2 R3
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- code |= insEncodeRegT2_N(id->idReg2());
- code |= insEncodeRegT2_M(id->idReg3());
- if (fmt != IF_T2_C5)
- code |= insEncodeSetFlags (id->idInsFlags());
- if (fmt == IF_T2_C0)
- {
- imm = emitGetInsSC(id);
- code |= insEncodeShiftCount(imm);
- code |= insEncodeShiftOpts(id->idInsOpt());
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_B: // T2_B ................ ............iiii imm4
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ assert((imm & 0x000F) == imm);
+ code |= imm;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C1: // T2_C1 ...........S.... .iiiddddiishmmmm R1 R2 S, imm5, sh
- case IF_T2_C2: // T2_C2 ...........S.... .iiiddddii..mmmm R1 R2 S, imm5
- case IF_T2_C6: // T2_C6 ................ ....dddd..iimmmm R1 R2 imm2
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- code |= insEncodeRegT2_M(id->idReg2());
- if (fmt == IF_T2_C6)
- {
- assert((imm & 0x0018) == imm);
- code |= (imm << 1);
- }
- else
- {
- code |= insEncodeSetFlags (id->idInsFlags());
- code |= insEncodeShiftCount(imm);
- if (fmt == IF_T2_C1)
+ case IF_T2_C0: // T2_C0 ...........Snnnn .iiiddddiishmmmm R1 R2 R3 S, imm5, sh
+ case IF_T2_C4: // T2_C4 ...........Snnnn ....dddd....mmmm R1 R2 R3 S
+ case IF_T2_C5: // T2_C5 ............nnnn ....dddd....mmmm R1 R2 R3
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ code |= insEncodeRegT2_N(id->idReg2());
+ code |= insEncodeRegT2_M(id->idReg3());
+ if (fmt != IF_T2_C5)
+ code |= insEncodeSetFlags(id->idInsFlags());
+ if (fmt == IF_T2_C0)
+ {
+ imm = emitGetInsSC(id);
+ code |= insEncodeShiftCount(imm);
code |= insEncodeShiftOpts(id->idInsOpt());
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C3: // T2_C3 ...........S.... ....dddd....mmmm R1 R2 S
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- code |= insEncodeRegT2_M(id->idReg2());
- code |= insEncodeSetFlags (id->idInsFlags());
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_C1: // T2_C1 ...........S.... .iiiddddiishmmmm R1 R2 S, imm5, sh
+ case IF_T2_C2: // T2_C2 ...........S.... .iiiddddii..mmmm R1 R2 S, imm5
+ case IF_T2_C6: // T2_C6 ................ ....dddd..iimmmm R1 R2 imm2
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ code |= insEncodeRegT2_M(id->idReg2());
+ if (fmt == IF_T2_C6)
+ {
+ assert((imm & 0x0018) == imm);
+ code |= (imm << 1);
+ }
+ else
+ {
+ code |= insEncodeSetFlags(id->idInsFlags());
+ code |= insEncodeShiftCount(imm);
+ if (fmt == IF_T2_C1)
+ code |= insEncodeShiftOpts(id->idInsOpt());
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C7: // T2_C7 ............nnnn ..........shmmmm R1 R2 imm2
- case IF_T2_C8: // T2_C8 ............nnnn .iii....iishmmmm R1 R2 imm5, sh
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_N(id->idReg1());
- code |= insEncodeRegT2_M(id->idReg2());
- if (fmt == IF_T2_C7)
- {
- assert((imm & 0x0003) == imm);
- code |= (imm << 4);
- }
- else if (fmt == IF_T2_C8)
- {
- code |= insEncodeShiftCount(imm);
- code |= insEncodeShiftOpts(id->idInsOpt());
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_C3: // T2_C3 ...........S.... ....dddd....mmmm R1 R2 S
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ code |= insEncodeRegT2_M(id->idReg2());
+ code |= insEncodeSetFlags(id->idInsFlags());
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C9: // T2_C9 ............nnnn ............mmmm R1 R2
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_N(id->idReg1());
- code |= insEncodeRegT2_M(id->idReg2());
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_C7: // T2_C7 ............nnnn ..........shmmmm R1 R2 imm2
+ case IF_T2_C8: // T2_C8 ............nnnn .iii....iishmmmm R1 R2 imm5, sh
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_N(id->idReg1());
+ code |= insEncodeRegT2_M(id->idReg2());
+ if (fmt == IF_T2_C7)
+ {
+ assert((imm & 0x0003) == imm);
+ code |= (imm << 4);
+ }
+ else if (fmt == IF_T2_C8)
+ {
+ code |= insEncodeShiftCount(imm);
+ code |= insEncodeShiftOpts(id->idInsOpt());
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_C10: // T2_C10 ............mmmm ....dddd....mmmm R1 R2
- sz = SMALL_IDSC_SIZE;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- code |= insEncodeRegT2_M(id->idReg2());
- code |= insEncodeRegT2_N(id->idReg2());
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_C9: // T2_C9 ............nnnn ............mmmm R1 R2
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_N(id->idReg1());
+ code |= insEncodeRegT2_M(id->idReg2());
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_D0: // T2_D0 ............nnnn .iiiddddii.wwwww R1 R2 imm5, imm5
- case IF_T2_D1: // T2_D1 ................ .iiiddddii.wwwww R1 imm5, imm5
- sz = SMALL_IDSC_SIZE;
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- if (fmt == IF_T2_D0)
+ case IF_T2_C10: // T2_C10 ............mmmm ....dddd....mmmm R1 R2
+ sz = SMALL_IDSC_SIZE;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ code |= insEncodeRegT2_M(id->idReg2());
code |= insEncodeRegT2_N(id->idReg2());
- code |= insEncodeBitFieldImm(imm);
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_E0: // T2_E0 ............nnnn tttt......shmmmm R1 R2 R3 imm2
- case IF_T2_E1: // T2_E1 ............nnnn tttt............ R1 R2
- case IF_T2_E2: // T2_E2 ................ tttt............ R1
+ case IF_T2_D0: // T2_D0 ............nnnn .iiiddddii.wwwww R1 R2 imm5, imm5
+ case IF_T2_D1: // T2_D1 ................ .iiiddddii.wwwww R1 imm5, imm5
+ sz = SMALL_IDSC_SIZE;
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ if (fmt == IF_T2_D0)
+ code |= insEncodeRegT2_N(id->idReg2());
+ code |= insEncodeBitFieldImm(imm);
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
+
+ case IF_T2_E0: // T2_E0 ............nnnn tttt......shmmmm R1 R2 R3 imm2
+ case IF_T2_E1: // T2_E1 ............nnnn tttt............ R1 R2
+ case IF_T2_E2: // T2_E2 ................ tttt............ R1
#ifdef ARM_HAZARD_AVOIDANCE
- if (id->idKraitNop())
- {
- // This is a pseudo-format representing a 32-bit nop followed by ldr pc
- // First emit the nop
+ if (id->idKraitNop())
+ {
+ // This is a pseudo-format representing a 32-bit nop followed by ldr pc
+ // First emit the nop
- dst = emitOutputNOP(dst, INS_nopw, IF_T2_A);
- }
+ dst = emitOutputNOP(dst, INS_nopw, IF_T2_A);
+ }
#endif
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_T(id->idReg1());
- if (fmt == IF_T2_E0)
- {
- sz = emitGetInstrDescSize(id);
- code |= insEncodeRegT2_N(id->idReg2());
- if (id->idIsLclVar())
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_T(id->idReg1());
+ if (fmt == IF_T2_E0)
{
- code |= insEncodeRegT2_M(codeGen->rsGetRsvdReg());
- imm = 0;
+ sz = emitGetInstrDescSize(id);
+ code |= insEncodeRegT2_N(id->idReg2());
+ if (id->idIsLclVar())
+ {
+ code |= insEncodeRegT2_M(codeGen->rsGetRsvdReg());
+ imm = 0;
+ }
+ else
+ {
+ code |= insEncodeRegT2_M(id->idReg3());
+ imm = emitGetInsSC(id);
+ assert((imm & 0x0003) == imm);
+ code |= (imm << 4);
+ }
}
else
{
- code |= insEncodeRegT2_M(id->idReg3());
- imm = emitGetInsSC(id);
- assert((imm & 0x0003) == imm);
- code |= (imm << 4);
- }
- }
- else
- {
- sz = SMALL_IDSC_SIZE;
- if (fmt != IF_T2_E2)
- {
- code |= insEncodeRegT2_N(id->idReg2());
+ sz = SMALL_IDSC_SIZE;
+ if (fmt != IF_T2_E2)
+ {
+ code |= insEncodeRegT2_N(id->idReg2());
+ }
}
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_F1: // T2_F1 ............nnnn ttttdddd....mmmm R1 R2 R3 R4
- sz = emitGetInstrDescSize(id);;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_T(id->idReg1());
- code |= insEncodeRegT2_D(id->idReg2());
- code |= insEncodeRegT2_N(id->idReg3());
- code |= insEncodeRegT2_M(id->idReg4());
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_F1: // T2_F1 ............nnnn ttttdddd....mmmm R1 R2 R3 R4
+ sz = emitGetInstrDescSize(id);
+ ;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_T(id->idReg1());
+ code |= insEncodeRegT2_D(id->idReg2());
+ code |= insEncodeRegT2_N(id->idReg3());
+ code |= insEncodeRegT2_M(id->idReg4());
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_F2: // T2_F2 ............nnnn aaaadddd....mmmm R1 R2 R3 R4
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- code |= insEncodeRegT2_N(id->idReg2());
- code |= insEncodeRegT2_M(id->idReg3());
- code |= insEncodeRegT2_T(id->idReg4());
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_F2: // T2_F2 ............nnnn aaaadddd....mmmm R1 R2 R3 R4
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ code |= insEncodeRegT2_N(id->idReg2());
+ code |= insEncodeRegT2_M(id->idReg3());
+ code |= insEncodeRegT2_T(id->idReg4());
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_G0: // T2_G0 .......PU.W.nnnn ttttTTTTiiiiiiii R1 R2 R3 imm8, PUW
- case IF_T2_G1: // T2_G1 ............nnnn ttttTTTT........ R1 R2 R3
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_T(id->idReg1());
- code |= insEncodeRegT2_D(id->idReg2());
- code |= insEncodeRegT2_N(id->idReg3());
- if (fmt == IF_T2_G0)
- {
- sz = emitGetInstrDescSizeSC(id);
- imm = emitGetInsSC(id);
- assert(unsigned_abs(imm) <= 0x00ff);
- code |= abs(imm);
- code |= insEncodePUW_G0(id->idInsOpt(), imm);
- }
- else
- {
- sz = emitGetInstrDescSize(id);
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_G0: // T2_G0 .......PU.W.nnnn ttttTTTTiiiiiiii R1 R2 R3 imm8, PUW
+ case IF_T2_G1: // T2_G1 ............nnnn ttttTTTT........ R1 R2 R3
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_T(id->idReg1());
+ code |= insEncodeRegT2_D(id->idReg2());
+ code |= insEncodeRegT2_N(id->idReg3());
+ if (fmt == IF_T2_G0)
+ {
+ sz = emitGetInstrDescSizeSC(id);
+ imm = emitGetInsSC(id);
+ assert(unsigned_abs(imm) <= 0x00ff);
+ code |= abs(imm);
+ code |= insEncodePUW_G0(id->idInsOpt(), imm);
+ }
+ else
+ {
+ sz = emitGetInstrDescSize(id);
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_H0: // T2_H0 ............nnnn tttt.PUWiiiiiiii R1 R2 imm8, PUW
- case IF_T2_H1: // T2_H1 ............nnnn tttt....iiiiiiii R1 R2 imm8
- case IF_T2_H2: // T2_H2 ............nnnn ........iiiiiiii R1 imm8
- sz = emitGetInstrDescSizeSC(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_T(id->idReg1());
+ case IF_T2_H0: // T2_H0 ............nnnn tttt.PUWiiiiiiii R1 R2 imm8, PUW
+ case IF_T2_H1: // T2_H1 ............nnnn tttt....iiiiiiii R1 R2 imm8
+ case IF_T2_H2: // T2_H2 ............nnnn ........iiiiiiii R1 imm8
+ sz = emitGetInstrDescSizeSC(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_T(id->idReg1());
- if (fmt != IF_T2_H2)
- code |= insEncodeRegT2_N(id->idReg2());
+ if (fmt != IF_T2_H2)
+ code |= insEncodeRegT2_N(id->idReg2());
- if (fmt == IF_T2_H0)
- {
- assert(unsigned_abs(imm) <= 0x00ff);
- code |= insEncodePUW_H0(id->idInsOpt(), imm);
- code |= unsigned_abs(imm);
- }
- else
- {
- assert((imm & 0x00ff) == imm);
+ if (fmt == IF_T2_H0)
+ {
+ assert(unsigned_abs(imm) <= 0x00ff);
+ code |= insEncodePUW_H0(id->idInsOpt(), imm);
+ code |= unsigned_abs(imm);
+ }
+ else
+ {
+ assert((imm & 0x00ff) == imm);
+ code |= imm;
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
+
+ case IF_T2_I0: // T2_I0 ..........W.nnnn rrrrrrrrrrrrrrrr R1 W, imm16
+ case IF_T2_I1: // T2_I1 ................ rrrrrrrrrrrrrrrr imm16
+ sz = emitGetInstrDescSizeSC(id);
+ code = emitInsCode(ins, fmt);
+ if (fmt == IF_T2_I0)
+ {
+ code |= insEncodeRegT2_N(id->idReg1());
+ code |= (1 << 21); // W bit
+ }
+ imm = emitGetInsSC(id);
+ assert((imm & 0x3) != 0x3);
+ if (imm & 0x2)
+ code |= 0x8000; // PC bit
+ if (imm & 0x1)
+ code |= 0x4000; // LR bit
+ imm >>= 2;
+ assert(imm <= 0x1fff); // 13 bits
code |= imm;
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_I0: // T2_I0 ..........W.nnnn rrrrrrrrrrrrrrrr R1 W, imm16
- case IF_T2_I1: // T2_I1 ................ rrrrrrrrrrrrrrrr imm16
- sz = emitGetInstrDescSizeSC(id);
- code = emitInsCode(ins, fmt);
- if (fmt == IF_T2_I0)
- {
- code |= insEncodeRegT2_N(id->idReg1());
- code |= (1 << 21); // W bit
- }
- imm = emitGetInsSC(id);
- assert((imm & 0x3) != 0x3);
- if (imm & 0x2)
- code |= 0x8000; // PC bit
- if (imm & 0x1)
- code |= 0x4000; // LR bit
- imm >>= 2;
- assert(imm <= 0x1fff); // 13 bits
- code |= imm;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_K1: // T2_K1 ............nnnn ttttiiiiiiiiiiii R1 R2 imm12
+ case IF_T2_K4: // T2_K4 ........U....... ttttiiiiiiiiiiii R1 PC U, imm12
+ case IF_T2_K3: // T2_K3 ........U....... ....iiiiiiiiiiii PC U, imm12
+ sz = emitGetInstrDescSize(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ if (fmt != IF_T2_K3)
+ {
+ code |= insEncodeRegT2_T(id->idReg1());
+ }
+ if (fmt == IF_T2_K1)
+ {
+ code |= insEncodeRegT2_N(id->idReg2());
+ assert(imm <= 0xfff); // 12 bits
+ code |= imm;
+ }
+ else
+ {
+ assert(unsigned_abs(imm) <= 0xfff); // 12 bits (signed)
+ code |= abs(imm);
+ if (imm >= 0)
+ code |= (1 << 23); // U bit
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_K1: // T2_K1 ............nnnn ttttiiiiiiiiiiii R1 R2 imm12
- case IF_T2_K4: // T2_K4 ........U....... ttttiiiiiiiiiiii R1 PC U, imm12
- case IF_T2_K3: // T2_K3 ........U....... ....iiiiiiiiiiii PC U, imm12
- sz = emitGetInstrDescSize(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- if (fmt != IF_T2_K3)
- {
- code |= insEncodeRegT2_T(id->idReg1());
- }
- if (fmt == IF_T2_K1)
- {
- code |= insEncodeRegT2_N(id->idReg2());
- assert(imm <= 0xfff); // 12 bits
+ case IF_T2_K2: // T2_K2 ............nnnn ....iiiiiiiiiiii R1 imm12
+ sz = emitGetInstrDescSizeSC(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_N(id->idReg1());
+ assert(imm <= 0xfff); // 12 bits
code |= imm;
- }
- else
- {
- assert(unsigned_abs(imm) <= 0xfff); // 12 bits (signed)
- code |= abs(imm);
- if (imm >= 0)
- code |= (1 << 23); // U bit
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_K2: // T2_K2 ............nnnn ....iiiiiiiiiiii R1 imm12
- sz = emitGetInstrDescSizeSC(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_N(id->idReg1());
- assert(imm <= 0xfff); // 12 bits
- code |= imm;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_L0: // T2_L0 .....i.....Snnnn .iiiddddiiiiiiii R1 R2 S, imm8<<imm4
+ case IF_T2_L1: // T2_L1 .....i.....S.... .iiiddddiiiiiiii R1 S, imm8<<imm4
+ case IF_T2_L2: // T2_L2 .....i......nnnn .iii....iiiiiiii R1 imm8<<imm4
+ sz = emitGetInstrDescSize(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
- case IF_T2_L0: // T2_L0 .....i.....Snnnn .iiiddddiiiiiiii R1 R2 S, imm8<<imm4
- case IF_T2_L1: // T2_L1 .....i.....S.... .iiiddddiiiiiiii R1 S, imm8<<imm4
- case IF_T2_L2: // T2_L2 .....i......nnnn .iii....iiiiiiii R1 imm8<<imm4
- sz = emitGetInstrDescSize(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
+ if (fmt == IF_T2_L2)
+ code |= insEncodeRegT2_N(id->idReg1());
+ else
+ {
+ code |= insEncodeSetFlags(id->idInsFlags());
+ code |= insEncodeRegT2_D(id->idReg1());
+ if (fmt == IF_T2_L0)
+ code |= insEncodeRegT2_N(id->idReg2());
+ }
+ assert(isModImmConst(imm)); // Funky ARM imm encoding
+ imm = encodeModImmConst(imm);
+ assert(imm <= 0xfff); // 12 bits
+ code |= (imm & 0x00ff);
+ code |= (imm & 0x0700) << 4;
+ code |= (imm & 0x0800) << 15;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- if (fmt == IF_T2_L2)
- code |= insEncodeRegT2_N(id->idReg1());
- else
- {
- code |= insEncodeSetFlags (id->idInsFlags());
+ case IF_T2_M0: // T2_M0 .....i......nnnn .iiiddddiiiiiiii R1 R2 imm12
+ sz = emitGetInstrDescSizeSC(id);
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
code |= insEncodeRegT2_D(id->idReg1());
- if (fmt == IF_T2_L0)
+ if (fmt == IF_T2_M0)
code |= insEncodeRegT2_N(id->idReg2());
- }
- assert(isModImmConst(imm)); // Funky ARM imm encoding
- imm = encodeModImmConst(imm);
- assert(imm <= 0xfff); // 12 bits
- code |= (imm & 0x00ff);
- code |= (imm & 0x0700) << 4;
- code |= (imm & 0x0800) << 15;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
-
- case IF_T2_M0: // T2_M0 .....i......nnnn .iiiddddiiiiiiii R1 R2 imm12
- sz = emitGetInstrDescSizeSC(id);
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- if (fmt == IF_T2_M0)
- code |= insEncodeRegT2_N(id->idReg2());
- imm = emitGetInsSC(id);
- assert(imm <= 0xfff); // 12 bits
- code |= (imm & 0x00ff);
- code |= (imm & 0x0700) << 4;
- code |= (imm & 0x0800) << 15;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ imm = emitGetInsSC(id);
+ assert(imm <= 0xfff); // 12 bits
+ code |= (imm & 0x00ff);
+ code |= (imm & 0x0700) << 4;
+ code |= (imm & 0x0800) << 15;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_N: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
- case IF_T2_N2: // T2_N2 .....i......iiii .iiiddddiiiiiiii R1 imm16
- sz = emitGetInstrDescSizeSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_D(id->idReg1());
- imm = emitGetInsSC(id);
- if (fmt == IF_T2_N2)
- {
- assert(!id->idIsLclVar());
- assert((ins == INS_movw) || (ins == INS_movt));
- imm += (size_t)emitConsBlock;
+ case IF_T2_N: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
+ case IF_T2_N2: // T2_N2 .....i......iiii .iiiddddiiiiiiii R1 imm16
+ sz = emitGetInstrDescSizeSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_D(id->idReg1());
+ imm = emitGetInsSC(id);
+ if (fmt == IF_T2_N2)
+ {
+ assert(!id->idIsLclVar());
+ assert((ins == INS_movw) || (ins == INS_movt));
+ imm += (size_t)emitConsBlock;
#ifdef RELOC_SUPPORT
- if (!id->idIsCnsReloc() && !id->idIsDspReloc())
+ if (!id->idIsCnsReloc() && !id->idIsDspReloc())
#endif
+ {
+ goto SPLIT_IMM;
+ }
+ }
+ else if (id->idIsLclVar())
{
- goto SPLIT_IMM;
+ SPLIT_IMM:
+ if (ins == INS_movw)
+ {
+ imm &= 0xffff;
+ }
+ else
+ {
+ imm = (imm >> 16) & 0xffff;
+ }
}
- }
- else if (id->idIsLclVar())
- {
-SPLIT_IMM:
- if (ins == INS_movw)
+
+#ifdef RELOC_SUPPORT
+ if (id->idIsCnsReloc() || id->idIsDspReloc())
{
- imm &= 0xffff;
+ assert((ins == INS_movt) || (ins == INS_movw));
+ dst += emitOutput_Thumb2Instr(dst, code);
+ if ((ins == INS_movt) && emitComp->info.compMatchedVM)
+ emitRecordRelocation((void*)(dst - 8), (void*)imm, IMAGE_REL_BASED_THUMB_MOV32);
}
else
+#endif // RELOC_SUPPORT
{
- imm = (imm >> 16) & 0xffff;
+ assert((imm & 0x0000ffff) == imm);
+ code |= (imm & 0x00ff);
+ code |= ((imm & 0x0700) << 4);
+ code |= ((imm & 0x0800) << 15);
+ code |= ((imm & 0xf000) << 4);
+ dst += emitOutput_Thumb2Instr(dst, code);
}
- }
+ break;
-#ifdef RELOC_SUPPORT
- if (id->idIsCnsReloc() || id->idIsDspReloc())
- {
- assert((ins == INS_movt) || (ins == INS_movw));
- dst += emitOutput_Thumb2Instr(dst, code);
- if ((ins == INS_movt) && emitComp->info.compMatchedVM)
- emitRecordRelocation((void*)(dst-8), (void *)imm, IMAGE_REL_BASED_THUMB_MOV32);
- }
- else
-#endif // RELOC_SUPPORT
- {
- assert((imm & 0x0000ffff) == imm);
- code |= (imm & 0x00ff);
- code |= ((imm & 0x0700) << 4);
- code |= ((imm & 0x0800) << 15);
- code |= ((imm & 0xf000) << 4);
- dst += emitOutput_Thumb2Instr(dst, code);
- }
- break;
+ case IF_T2_VFP3:
+ // these are the binary operators
+ // d = n - m
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_VectorN(id->idReg2(), size, true);
+ code |= insEncodeRegT2_VectorM(id->idReg3(), size, true);
+ code |= insEncodeRegT2_VectorD(id->idReg1(), size, true);
+ if (size == EA_8BYTE)
+ code |= 1 << 8;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T2_VFP3:
- // these are the binary operators
- // d = n - m
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_VectorN(id->idReg2(), size, true);
- code |= insEncodeRegT2_VectorM(id->idReg3(), size, true);
- code |= insEncodeRegT2_VectorD(id->idReg1(), size, true);
- if (size == EA_8BYTE)
- code |= 1 << 8;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_VFP2:
+ {
+ emitAttr srcSize;
+ emitAttr dstSize;
+ size_t szCode = 0;
- case IF_T2_VFP2:
- {
- emitAttr srcSize;
- emitAttr dstSize;
- size_t szCode = 0;
+ switch (ins)
+ {
+ case INS_vcvt_i2d:
+ case INS_vcvt_u2d:
+ case INS_vcvt_f2d:
+ srcSize = EA_4BYTE;
+ dstSize = EA_8BYTE;
+ break;
- switch (ins)
- {
- case INS_vcvt_i2d:
- case INS_vcvt_u2d:
- case INS_vcvt_f2d:
- srcSize = EA_4BYTE;
- dstSize = EA_8BYTE;
- break;
+ case INS_vcvt_d2i:
+ case INS_vcvt_d2u:
+ case INS_vcvt_d2f:
+ srcSize = EA_8BYTE;
+ dstSize = EA_4BYTE;
+ break;
- case INS_vcvt_d2i:
- case INS_vcvt_d2u:
- case INS_vcvt_d2f:
- srcSize = EA_8BYTE;
- dstSize = EA_4BYTE;
- break;
+ case INS_vmov:
+ case INS_vabs:
+ case INS_vsqrt:
+ case INS_vcmp:
+ case INS_vneg:
+ if (id->idOpSize() == EA_8BYTE)
+ szCode |= (1 << 8);
+ __fallthrough;
+
+ default:
+ srcSize = dstSize = id->idOpSize();
+ break;
+ }
- case INS_vmov:
- case INS_vabs:
- case INS_vsqrt:
- case INS_vcmp:
- case INS_vneg:
- if (id->idOpSize() == EA_8BYTE)
- szCode |= (1<<8);
- __fallthrough;
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ code |= szCode;
+ code |= insEncodeRegT2_VectorD(id->idReg1(), dstSize, true);
+ code |= insEncodeRegT2_VectorM(id->idReg2(), srcSize, true);
- default:
- srcSize = dstSize = id->idOpSize();
+ dst += emitOutput_Thumb2Instr(dst, code);
break;
}
-
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- code |= szCode;
- code |= insEncodeRegT2_VectorD(id->idReg1(), dstSize, true);
- code |= insEncodeRegT2_VectorM(id->idReg2(), srcSize, true);
-
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
- }
-
- case IF_T2_VLDST:
- sz = emitGetInstrDescSizeSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT2_N(id->idReg2());
- code |= insEncodeRegT2_VectorD(id->idReg1(), size, true);
- imm = emitGetInsSC(id);
- if (imm < 0)
- imm = -imm; // bit 23 at 0 means negate
- else
- code |= 1 << 23; // set the positive bit
-
- // offset is +/- 1020
- assert(!(imm % 4));
- assert(imm >> 10 == 0);
- code |= imm >> 2;
- // bit 8 is set for doubles
- if (id->idOpSize() == EA_8BYTE)
- code |= (1<<8);
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_VLDST:
+ sz = emitGetInstrDescSizeSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT2_N(id->idReg2());
+ code |= insEncodeRegT2_VectorD(id->idReg1(), size, true);
- case IF_T2_VMOVD:
- // 3op assemble a double from two int regs (or back)
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- if (ins == INS_vmov_i2d)
- {
- code |= insEncodeRegT2_VectorM(id->idReg1(), size, true);
- code |= id->idReg2() << 12;
- code |= id->idReg3() << 16;
- }
- else
- {
- assert(ins == INS_vmov_d2i);
- code |= id->idReg1() << 12;
- code |= id->idReg2() << 16;
- code |= insEncodeRegT2_VectorM(id->idReg3(), size, true);
- }
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ imm = emitGetInsSC(id);
+ if (imm < 0)
+ imm = -imm; // bit 23 at 0 means negate
+ else
+ code |= 1 << 23; // set the positive bit
- case IF_T2_VMOVS:
- // 2op assemble a float from one int reg (or back)
- sz = emitGetInstrDescSize(id);
- code = emitInsCode(ins, fmt);
- if (ins == INS_vmov_f2i)
- {
- code |= insEncodeRegT2_VectorN(id->idReg2(), EA_4BYTE, true);
- code |= id->idReg1() << 12;
- }
- else
- {
- assert(ins == INS_vmov_i2f);
- code |= insEncodeRegT2_VectorN(id->idReg1(), EA_4BYTE, true);
- code |= id->idReg2() << 12;
- }
+ // offset is +/- 1020
+ assert(!(imm % 4));
+ assert(imm >> 10 == 0);
+ code |= imm >> 2;
+ // bit 8 is set for doubles
+ if (id->idOpSize() == EA_8BYTE)
+ code |= (1 << 8);
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- dst += emitOutput_Thumb2Instr(dst, code);
- break;
+ case IF_T2_VMOVD:
+ // 3op assemble a double from two int regs (or back)
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ if (ins == INS_vmov_i2d)
+ {
+ code |= insEncodeRegT2_VectorM(id->idReg1(), size, true);
+ code |= id->idReg2() << 12;
+ code |= id->idReg3() << 16;
+ }
+ else
+ {
+ assert(ins == INS_vmov_d2i);
+ code |= id->idReg1() << 12;
+ code |= id->idReg2() << 16;
+ code |= insEncodeRegT2_VectorM(id->idReg3(), size, true);
+ }
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T1_J3: // T1_J3 .....dddiiiiiiii R1 PC imm8
- case IF_T2_M1: // T2_M1 .....i.......... .iiiddddiiiiiiii R1 PC imm12
- assert(id->idGCref() == GCT_NONE);
- assert(id->idIsBound());
+ case IF_T2_VMOVS:
+ // 2op assemble a float from one int reg (or back)
+ sz = emitGetInstrDescSize(id);
+ code = emitInsCode(ins, fmt);
+ if (ins == INS_vmov_f2i)
+ {
+ code |= insEncodeRegT2_VectorN(id->idReg2(), EA_4BYTE, true);
+ code |= id->idReg1() << 12;
+ }
+ else
+ {
+ assert(ins == INS_vmov_i2f);
+ code |= insEncodeRegT2_VectorN(id->idReg1(), EA_4BYTE, true);
+ code |= id->idReg2() << 12;
+ }
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescLbl);
- break;
+ dst += emitOutput_Thumb2Instr(dst, code);
+ break;
- case IF_T1_K: // T1_K ....cccciiiiiiii Branch imm8, cond4
- case IF_T1_M: // T1_M .....iiiiiiiiiii Branch imm11
- case IF_T2_J1: // T2_J1 .....Scccciiiiii ..j.jiiiiiiiiiii Branch imm20, cond4
- case IF_T2_J2: // T2_J2 .....Siiiiiiiiii ..j.jiiiiiiiiii. Branch imm24
- case IF_T2_N1: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
- case IF_LARGEJMP:
- assert(id->idGCref() == GCT_NONE);
- assert(id->idIsBound());
-
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
+ case IF_T1_J3: // T1_J3 .....dddiiiiiiii R1 PC imm8
+ case IF_T2_M1: // T2_M1 .....i.......... .iiiddddiiiiiiii R1 PC imm12
+ assert(id->idGCref() == GCT_NONE);
+ assert(id->idIsBound());
- case IF_T1_D1: // T1_D1 .........mmmm... R1*
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescLbl);
+ break;
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_M4(id->idReg1());
- dst += emitOutput_Thumb1Instr(dst, code);
- sz = SMALL_IDSC_SIZE;
- break;
+ case IF_T1_K: // T1_K ....cccciiiiiiii Branch imm8, cond4
+ case IF_T1_M: // T1_M .....iiiiiiiiiii Branch imm11
+ case IF_T2_J1: // T2_J1 .....Scccciiiiii ..j.jiiiiiiiiiii Branch imm20, cond4
+ case IF_T2_J2: // T2_J2 .....Siiiiiiiiii ..j.jiiiiiiiiii. Branch imm24
+ case IF_T2_N1: // T2_N .....i......iiii .iiiddddiiiiiiii R1 imm16
+ case IF_LARGEJMP:
+ assert(id->idGCref() == GCT_NONE);
+ assert(id->idIsBound());
+
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- case IF_T1_D2: // T1_D2 .........mmmm... R3*
+ case IF_T1_D1: // T1_D1 .........mmmm... R1*
- /* Is this a "fat" call descriptor? */
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_M4(id->idReg1());
+ dst += emitOutput_Thumb1Instr(dst, code);
+ sz = SMALL_IDSC_SIZE;
+ break;
- if (id->idIsLargeCall())
- {
- instrDescCGCA* idCall = (instrDescCGCA*) id;
- gcrefRegs = idCall->idcGcrefRegs;
- byrefRegs = idCall->idcByrefRegs;
- VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
- sz = sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
+ case IF_T1_D2: // T1_D2 .........mmmm... R3*
- gcrefRegs = emitDecodeCallGCregs(id);
- byrefRegs = 0;
- VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
- sz = sizeof(instrDesc);
- }
+ /* Is this a "fat" call descriptor? */
+
+ if (id->idIsLargeCall())
+ {
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
+ gcrefRegs = idCall->idcGcrefRegs;
+ byrefRegs = idCall->idcByrefRegs;
+ VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
+ sz = sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
- code = emitInsCode(ins, fmt);
- code |= insEncodeRegT1_M4(id->idReg3());
- callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb1Instr(dst, code));
- dst += callInstrSize;
- goto DONE_CALL;
+ gcrefRegs = emitDecodeCallGCregs(id);
+ byrefRegs = 0;
+ VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
+ sz = sizeof(instrDesc);
+ }
+
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeRegT1_M4(id->idReg3());
+ callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb1Instr(dst, code));
+ dst += callInstrSize;
+ goto DONE_CALL;
- case IF_T2_J3: // T2_J3 .....Siiiiiiiiii ..j.jiiiiiiiiii. Call imm24
+ case IF_T2_J3: // T2_J3 .....Siiiiiiiiii ..j.jiiiiiiiiii. Call imm24
#ifdef ARM_HAZARD_AVOIDANCE
- if (id->idKraitNop())
- {
- // This is a pseudo-format representing a 32-bit nop followed by unconditional call.
- // First emit the nop
+ if (id->idKraitNop())
+ {
+ // This is a pseudo-format representing a 32-bit nop followed by unconditional call.
+ // First emit the nop
- dst = emitOutputNOP(dst, INS_nopw, IF_T2_A);
- }
+ dst = emitOutputNOP(dst, INS_nopw, IF_T2_A);
+ }
#endif
- /* Is this a "fat" call descriptor? */
+ /* Is this a "fat" call descriptor? */
- if (id->idIsLargeCall())
- {
- instrDescCGCA* idCall = (instrDescCGCA*) id;
- gcrefRegs = idCall->idcGcrefRegs;
- byrefRegs = idCall->idcByrefRegs;
- VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
- sz = sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
+ if (id->idIsLargeCall())
+ {
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
+ gcrefRegs = idCall->idcGcrefRegs;
+ byrefRegs = idCall->idcByrefRegs;
+ VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
+ sz = sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
- gcrefRegs = emitDecodeCallGCregs(id);
- byrefRegs = 0;
- VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
- sz = sizeof(instrDesc);
- }
+ gcrefRegs = emitDecodeCallGCregs(id);
+ byrefRegs = 0;
+ VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
+ sz = sizeof(instrDesc);
+ }
- addr = id->idAddr()->iiaAddr;
- code = emitInsCode(ins, fmt);
+ addr = id->idAddr()->iiaAddr;
+ code = emitInsCode(ins, fmt);
#ifdef RELOC_SUPPORT
- if (id->idIsDspReloc())
- {
- callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb2Instr(dst, code));
- dst += callInstrSize;
- if (emitComp->info.compMatchedVM)
- emitRecordRelocation((void*)(dst - 4), addr, IMAGE_REL_BASED_THUMB_BRANCH24);
- }
- else
+ if (id->idIsDspReloc())
+ {
+ callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb2Instr(dst, code));
+ dst += callInstrSize;
+ if (emitComp->info.compMatchedVM)
+ emitRecordRelocation((void*)(dst - 4), addr, IMAGE_REL_BASED_THUMB_BRANCH24);
+ }
+ else
#endif // RELOC_SUPPORT
- {
- addr = (BYTE *)((size_t)addr & ~1); // Clear the lowest bit from target address
+ {
+ addr = (BYTE*)((size_t)addr & ~1); // Clear the lowest bit from target address
- /* Calculate PC relative displacement */
- int disp = addr - (dst + 4);
- bool S = (disp < 0);
- bool I1 = ((disp & 0x00800000) == 0);
- bool I2 = ((disp & 0x00400000) == 0);
+ /* Calculate PC relative displacement */
+ int disp = addr - (dst + 4);
+ bool S = (disp < 0);
+ bool I1 = ((disp & 0x00800000) == 0);
+ bool I2 = ((disp & 0x00400000) == 0);
- if (S)
- code |= (1 << 26); // S bit
- if (S ^ I1)
- code |= (1 << 13); // J1 bit
- if (S ^ I2)
- code |= (1 << 11); // J2 bit
+ if (S)
+ code |= (1 << 26); // S bit
+ if (S ^ I1)
+ code |= (1 << 13); // J1 bit
+ if (S ^ I2)
+ code |= (1 << 11); // J2 bit
- int immLo = (disp & 0x00000ffe) >> 1;
- int immHi = (disp & 0x003ff000) >> 12;
+ int immLo = (disp & 0x00000ffe) >> 1;
+ int immHi = (disp & 0x003ff000) >> 12;
- code |= (immHi << 16);
- code |= immLo;
+ code |= (immHi << 16);
+ code |= immLo;
- disp = abs(disp);
- assert((disp & 0x00fffffe) == disp);
+ disp = abs(disp);
+ assert((disp & 0x00fffffe) == disp);
- callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb2Instr(dst, code));
- dst += callInstrSize;
- }
+ callInstrSize = SafeCvtAssert<unsigned char>(emitOutput_Thumb2Instr(dst, code));
+ dst += callInstrSize;
+ }
-DONE_CALL:
+ DONE_CALL:
- /* We update the GC info before the call as the variables cannot be
- used by the call. Killing variables before the call helps with
- boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029.
- If we ever track aliased variables (which could be used by the
- call), we would have to keep them alive past the call. */
+ /* We update the GC info before the call as the variables cannot be
+ used by the call. Killing variables before the call helps with
+ boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029.
+ If we ever track aliased variables (which could be used by the
+ call), we would have to keep them alive past the call. */
- emitUpdateLiveGCvars(GCvars, *dp);
+ emitUpdateLiveGCvars(GCvars, *dp);
- // If the method returns a GC ref, mark R0 appropriately.
- if (id->idGCref() == GCT_GCREF)
- gcrefRegs |= RBM_R0;
- else if (id->idGCref() == GCT_BYREF)
- byrefRegs |= RBM_R0;
+ // If the method returns a GC ref, mark R0 appropriately.
+ if (id->idGCref() == GCT_GCREF)
+ gcrefRegs |= RBM_R0;
+ else if (id->idGCref() == GCT_BYREF)
+ byrefRegs |= RBM_R0;
- // If the GC register set has changed, report the new set.
- if (gcrefRegs != emitThisGCrefRegs)
- emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst);
+ // If the GC register set has changed, report the new set.
+ if (gcrefRegs != emitThisGCrefRegs)
+ emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst);
- if (byrefRegs != emitThisByrefRegs)
- emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst);
-
- // Some helper calls may be marked as not requiring GC info to be recorded.
- if ((!id->idIsNoGC()))
- {
- // On ARM, as on AMD64, we don't change the stack pointer to push/pop args.
- // So we're not really doing a "stack pop" here (note that "args" is 0), but we use this mechanism
- // to record the call for GC info purposes. (It might be best to use an alternate call,
- // and protect "emitStackPop" under the EMIT_TRACK_STACK_DEPTH preprocessor variable.)
- emitStackPop(dst, /*isCall*/true, callInstrSize, /*args*/0);
-
- /* Do we need to record a call location for GC purposes? */
+ if (byrefRegs != emitThisByrefRegs)
+ emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst);
- if (!emitFullGCinfo)
+ // Some helper calls may be marked as not requiring GC info to be recorded.
+ if ((!id->idIsNoGC()))
{
- emitRecordGCcall(dst, callInstrSize);
+ // On ARM, as on AMD64, we don't change the stack pointer to push/pop args.
+ // So we're not really doing a "stack pop" here (note that "args" is 0), but we use this mechanism
+ // to record the call for GC info purposes. (It might be best to use an alternate call,
+ // and protect "emitStackPop" under the EMIT_TRACK_STACK_DEPTH preprocessor variable.)
+ emitStackPop(dst, /*isCall*/ true, callInstrSize, /*args*/ 0);
+
+ /* Do we need to record a call location for GC purposes? */
+
+ if (!emitFullGCinfo)
+ {
+ emitRecordGCcall(dst, callInstrSize);
+ }
}
- }
- break;
+ break;
/********************************************************************/
/* oops */
/********************************************************************/
- default:
+ default:
-#ifdef DEBUG
- printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
- assert(!"don't know how to encode this instruction");
+#ifdef DEBUG
+ printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
+ assert(!"don't know how to encode this instruction");
#endif
- break;
+ break;
}
// Determine if any registers now hold GC refs, or whether a register that was overwritten held a GC ref.
// We assume here that "id->idGCref()" is not GC_NONE only if the instruction described by "id" writes a
// GC ref to register "id->idReg1()". (It may, apparently, also not be GC_NONE in other cases, such as
// for stores, but we ignore those cases here.)
- if (emitInsMayWriteToGCReg(id)) // True if "id->idIns()" writes to a register than can hold GC ref.
+ if (emitInsMayWriteToGCReg(id)) // True if "id->idIns()" writes to a register than can hold GC ref.
{
// If we ever generate instructions that write to multiple registers (LDM, or POP),
// then we'd need to more work here to ensure that changes in the status of GC refs are
@@ -6555,15 +6398,17 @@ DONE_CALL:
// be emitted outside of the prolog and epilog here.
switch (ins)
{
- case INS_smull: case INS_umull:
- case INS_smlal: case INS_umlal:
- case INS_vmov_d2i:
- // For each of these, idReg1() and idReg2() are the destination registers.
- emitGCregDeadUpd(id->idReg1(), dst);
- emitGCregDeadUpd(id->idReg2(), dst);
- break;
- default:
- assert(false); // We need to recognize this multi-target instruction...
+ case INS_smull:
+ case INS_umull:
+ case INS_smlal:
+ case INS_umlal:
+ case INS_vmov_d2i:
+ // For each of these, idReg1() and idReg2() are the destination registers.
+ emitGCregDeadUpd(id->idReg1(), dst);
+ emitGCregDeadUpd(id->idReg2(), dst);
+ break;
+ default:
+ assert(false); // We need to recognize this multi-target instruction...
}
}
else
@@ -6584,10 +6429,10 @@ DONE_CALL:
// ref or overwritten one.
if (emitInsWritesToLclVarStackLoc(id))
{
- int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
- unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
+ int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
+ unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
regNumber regBase;
- int adr = emitComp->lvaFrameAddress(varNum, true, &regBase, ofs);
+ int adr = emitComp->lvaFrameAddress(varNum, true, &regBase, ofs);
if (id->idGCref() != GCT_NONE)
{
emitGCvarLiveUpd(adr + ofs, varNum, id->idGCref(), dst);
@@ -6604,39 +6449,40 @@ DONE_CALL:
else
{
TempDsc* tmpDsc = emitComp->tmpFindNum(varNum);
- vt = tmpDsc->tdTempType();
+ vt = tmpDsc->tdTempType();
}
if (vt == TYP_REF || vt == TYP_BYREF)
emitGCvarDeadUpd(adr + ofs, dst);
}
}
-#ifdef DEBUG
+#ifdef DEBUG
/* Make sure we set the instruction descriptor size correctly */
size_t expected = emitSizeOfInsDsc(id);
assert(sz == expected);
- if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
+ if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
{
- emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(odst), *dp, (dst-*dp), ig);
+ emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(odst), *dp, (dst - *dp), ig);
}
if (emitComp->compDebugBreak)
{
// set JitEmitPrintRefRegs=1 will print out emitThisGCrefRegs and emitThisByrefRegs
// at the beginning of this method.
- if (JitConfig.JitEmitPrintRefRegs() != 0) {
+ if (JitConfig.JitEmitPrintRefRegs() != 0)
+ {
printf("Before emitOutputInstr for id->idDebugOnlyInfo()->idNum=0x%02x\n", id->idDebugOnlyInfo()->idNum);
printf(" emitThisGCrefRegs(0x%p)=", dspPtr(&emitThisGCrefRegs));
- printRegMaskInt(emitThisGCrefRegs);
- emitDispRegSet (emitThisGCrefRegs);
- printf("\n");
+ printRegMaskInt(emitThisGCrefRegs);
+ emitDispRegSet(emitThisGCrefRegs);
+ printf("\n");
printf(" emitThisByrefRegs(0x%p)=", dspPtr(&emitThisByrefRegs));
- printRegMaskInt(emitThisByrefRegs);
- emitDispRegSet (emitThisByrefRegs);
- printf("\n");
- }
+ printRegMaskInt(emitThisByrefRegs);
+ emitDispRegSet(emitThisByrefRegs);
+ printf("\n");
+ }
// For example, set JitBreakEmitOutputInstr=a6 will break when this method is called for
// emitting instruction a6, (i.e. IN00a6 in jitdump).
@@ -6653,29 +6499,28 @@ DONE_CALL:
*dp = dst;
- return sz;
+ return sz;
}
-
/*****************************************************************************/
/*****************************************************************************/
#ifdef DEBUG
-static bool insAlwaysSetFlags(instruction ins)
+static bool insAlwaysSetFlags(instruction ins)
{
bool result = false;
switch (ins)
{
- case INS_cmp:
- case INS_cmn:
- case INS_teq:
- case INS_tst:
- result = true;
- break;
-
- default:
- break;
+ case INS_cmp:
+ case INS_cmn:
+ case INS_teq:
+ case INS_tst:
+ result = true;
+ break;
+
+ default:
+ break;
}
return result;
}
@@ -6685,10 +6530,10 @@ static bool insAlwaysSetFlags(instruction ins)
* Display the instruction name, optionally the instruction
* can add the "s" suffix if it must set the flags.
*/
-void emitter::emitDispInst(instruction ins, insFlags flags)
+void emitter::emitDispInst(instruction ins, insFlags flags)
{
- const char * insstr = codeGen->genInsName(ins);
- int len = strlen(insstr);
+ const char* insstr = codeGen->genInsName(ins);
+ int len = strlen(insstr);
/* Display the instruction name */
@@ -6702,11 +6547,11 @@ void emitter::emitDispInst(instruction ins, insFlags flags)
//
// Add at least one space after the instruction name
// and add spaces until we have reach the normal size of 8
- do {
- printf(" ");
- len++;
- }
- while (len < 8);
+ do
+ {
+ printf(" ");
+ len++;
+ } while (len < 8);
}
/*****************************************************************************
@@ -6715,7 +6560,7 @@ void emitter::emitDispInst(instruction ins, insFlags flags)
* If we are formatting for an assembly listing don't print the hex value
* since it will prevent us from doing assembly diffs
*/
-void emitter::emitDispReloc(int value, bool addComma)
+void emitter::emitDispReloc(int value, bool addComma)
{
if (emitComp->opts.disAsm)
{
@@ -6736,11 +6581,12 @@ void emitter::emitDispReloc(int value, bool addComma)
*
* Display an immediate value
*/
-void emitter::emitDispImm(int imm, bool addComma, bool alwaysHex /* =false */)
+void emitter::emitDispImm(int imm, bool addComma, bool alwaysHex /* =false */)
{
if (!alwaysHex && (imm > -1000) && (imm < 1000))
printf("%d", imm);
- else if ((imm > 0) || (imm == -imm) || // -0x80000000 == 0x80000000. So we don't want to add an extra "-" at the beginning.
+ else if ((imm > 0) ||
+ (imm == -imm) || // -0x80000000 == 0x80000000. So we don't want to add an extra "-" at the beginning.
(emitComp->opts.disDiffable && (imm == 0xD1FFAB1E))) // Don't display this as negative
printf("0x%02x", imm);
else // val <= -1000
@@ -6754,12 +6600,10 @@ void emitter::emitDispImm(int imm, bool addComma, bool alwaysHex
*
* Display an arm condition for the IT instructions
*/
-void emitter::emitDispCond(int cond)
+void emitter::emitDispCond(int cond)
{
- const static char* armCond[16] = { "eq", "ne", "hs", "lo",
- "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt",
- "gt", "le", "AL", "NV" }; // The last two are invalid
+ const static char* armCond[16] = {"eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "AL", "NV"}; // The last two are invalid
assert(0 <= cond && (unsigned)cond < ArrLen(armCond));
printf(armCond[cond]);
}
@@ -6768,14 +6612,14 @@ void emitter::emitDispCond(int cond)
*
* Display a register range in a range format
*/
-void emitter::emitDispRegRange(regNumber reg, int len, emitAttr attr)
+void emitter::emitDispRegRange(regNumber reg, int len, emitAttr attr)
{
printf("{");
emitDispReg(reg, attr, false);
if (len > 1)
{
printf("-");
- emitDispReg((regNumber) (reg + len - 1), attr, false);
+ emitDispReg((regNumber)(reg + len - 1), attr, false);
}
printf("}");
}
@@ -6784,7 +6628,7 @@ void emitter::emitDispRegRange(regNumber reg, int len, emitAttr a
*
* Display an register mask in a list format
*/
-void emitter::emitDispRegmask(int imm, bool encodedPC_LR)
+void emitter::emitDispRegmask(int imm, bool encodedPC_LR)
{
bool printedOne = false;
bool hasPC;
@@ -6798,13 +6642,13 @@ void emitter::emitDispRegmask(int imm, bool encodedPC_LR)
}
else
{
- hasPC = (imm & RBM_PC) != 0;
- hasLR = (imm & RBM_LR) != 0;
- imm &= ~(RBM_PC | RBM_LR);
+ hasPC = (imm & RBM_PC) != 0;
+ hasLR = (imm & RBM_LR) != 0;
+ imm &= ~(RBM_PC | RBM_LR);
}
regNumber reg = REG_R0;
- unsigned bit = 1;
+ unsigned bit = 1;
printf("{");
while (imm != 0)
@@ -6818,7 +6662,7 @@ void emitter::emitDispRegmask(int imm, bool encodedPC_LR)
imm -= bit;
}
- reg = regNumber (reg + 1);
+ reg = regNumber(reg + 1);
bit <<= 1;
}
@@ -6845,7 +6689,7 @@ void emitter::emitDispRegmask(int imm, bool encodedPC_LR)
* Returns the encoding for the Shift Type bits to be used in a Thumb-2 encoding
*/
-void emitter::emitDispShiftOpts(insOpts opt)
+void emitter::emitDispShiftOpts(insOpts opt)
{
if (opt == INS_OPTS_LSL)
printf(" LSL ");
@@ -6857,19 +6701,18 @@ void emitter::emitDispShiftOpts(insOpts opt)
printf(" ROR ");
else if (opt == INS_OPTS_RRX)
printf(" RRX ");
-}
-
+}
/*****************************************************************************
*
* Display a register
*/
-void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addComma)
+void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addComma)
{
if (isFloatReg(reg))
{
- const char *size = attr == EA_8BYTE ? "d" : "s";
- printf("%s%s", size, emitFloatRegName(reg, attr)+1);
+ const char* size = attr == EA_8BYTE ? "d" : "s";
+ printf("%s%s", size, emitFloatRegName(reg, attr) + 1);
}
else
{
@@ -6888,7 +6731,7 @@ void emitter::emitDispFloatReg(regNumber reg, emitAttr attr, bool addComma)
*
* Display an addressing operand [reg]
*/
-void emitter::emitDispAddrR(regNumber reg, emitAttr attr)
+void emitter::emitDispAddrR(regNumber reg, emitAttr attr)
{
printf("[");
emitDispReg(reg, attr, false);
@@ -6900,7 +6743,7 @@ void emitter::emitDispAddrR(regNumber reg, emitAttr attr)
*
* Display an addressing operand [reg + imm]
*/
-void emitter::emitDispAddrRI(regNumber reg, int imm, emitAttr attr)
+void emitter::emitDispAddrRI(regNumber reg, int imm, emitAttr attr)
{
bool regIsSPorFP = (reg == REG_SP) || (reg == REG_FP);
@@ -6926,7 +6769,7 @@ void emitter::emitDispAddrRI(regNumber reg, int imm, emitAttr at
*
* Display an addressing operand [reg + reg]
*/
-void emitter::emitDispAddrRR(regNumber reg1, regNumber reg2, emitAttr attr)
+void emitter::emitDispAddrRR(regNumber reg1, regNumber reg2, emitAttr attr)
{
printf("[");
emitDispReg(reg1, attr, false);
@@ -6944,8 +6787,7 @@ void emitter::emitDispAddrRR(regNumber reg1, regNumber reg2, emit
*
* Display an addressing operand [reg + reg * imm]
*/
-void emitter::emitDispAddrRRI(regNumber reg1, regNumber reg2,
- int imm, emitAttr attr)
+void emitter::emitDispAddrRRI(regNumber reg1, regNumber reg2, int imm, emitAttr attr)
{
printf("[");
emitDispReg(reg1, attr, false);
@@ -6974,8 +6816,7 @@ void emitter::emitDispAddrRRI(regNumber reg1, regNumber reg2,
*
* Display an addressing operand [reg + imm]
*/
-void emitter::emitDispAddrPUW(regNumber reg, int imm,
- insOpts opt, emitAttr attr)
+void emitter::emitDispAddrPUW(regNumber reg, int imm, insOpts opt, emitAttr attr)
{
bool regIsSPorFP = (reg == REG_SP) || (reg == REG_FP);
@@ -7003,9 +6844,9 @@ void emitter::emitDispAddrPUW(regNumber reg, int imm,
/*****************************************************************************
*
- * Display the gc-ness of the operand
+ * Display the gc-ness of the operand
*/
-void emitter::emitDispGC(emitAttr attr)
+void emitter::emitDispGC(emitAttr attr)
{
#if 0
// TODO-ARM-Cleanup: Fix or delete.
@@ -7018,22 +6859,21 @@ void emitter::emitDispGC(emitAttr attr)
/*****************************************************************************
*
- * Display (optionally) the instruction encoding in hex
+ * Display (optionally) the instruction encoding in hex
*/
-void emitter::emitDispInsHex(BYTE * code, size_t sz)
+void emitter::emitDispInsHex(BYTE* code, size_t sz)
{
// We do not display the instruction hex if we want diff-able disassembly
if (!emitComp->opts.disDiffable)
{
if (sz == 2)
{
- printf(" %04X ", (*((unsigned short *) code)));
+ printf(" %04X ", (*((unsigned short*)code)));
}
else if (sz == 4)
{
- printf(" %04X %04X", (*((unsigned short *) (code+0))),
- (*((unsigned short *) (code+2))));
+ printf(" %04X %04X", (*((unsigned short*)(code + 0))), (*((unsigned short*)(code + 2))));
}
}
}
@@ -7043,19 +6883,13 @@ void emitter::emitDispInsHex(BYTE * code, size_t sz)
* Display the given instruction.
*/
-void emitter::emitDispInsHelp(instrDesc * id,
- bool isNew,
- bool doffs,
- bool asmfm,
- unsigned offset,
- BYTE * code,
- size_t sz,
- insGroup * ig)
+void emitter::emitDispInsHelp(
+ instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* code, size_t sz, insGroup* ig)
{
if (EMITVERBOSE)
{
- unsigned idNum = id->idDebugOnlyInfo()->idNum; // Do not remove this! It is needed for VisualStudio
- // conditional breakpoints
+ unsigned idNum = id->idDebugOnlyInfo()->idNum; // Do not remove this! It is needed for VisualStudio
+ // conditional breakpoints
printf("IN%04x: ", idNum);
}
@@ -7063,7 +6897,7 @@ void emitter::emitDispInsHelp(instrDesc * id,
if (code == NULL)
sz = 0;
- if (!emitComp->opts.dspEmit && !isNew && !asmfm && sz)
+ if (!emitComp->opts.dspEmit && !isNew && !asmfm && sz)
doffs = true;
/* Display the instruction offset */
@@ -7078,8 +6912,8 @@ void emitter::emitDispInsHelp(instrDesc * id,
/* Get the instruction and format */
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
emitDispInst(ins, id->idInsFlags());
@@ -7088,495 +6922,493 @@ void emitter::emitDispInsHelp(instrDesc * id,
assert(isNew == false || (int)emitSizeOfInsDsc(id) == emitCurIGfreeNext - (BYTE*)id);
/* Figure out the operand size */
- emitAttr attr;
- if (id->idGCref() == GCT_GCREF)
+ emitAttr attr;
+ if (id->idGCref() == GCT_GCREF)
attr = EA_GCREF;
- else if (id->idGCref() == GCT_BYREF)
+ else if (id->idGCref() == GCT_BYREF)
attr = EA_BYREF;
else
attr = id->idOpSize();
switch (fmt)
{
- int imm;
- int offs;
- const char * methodName;
+ int imm;
+ int offs;
+ const char* methodName;
- case IF_T1_A: // None
- case IF_T2_A:
- break;
+ case IF_T1_A: // None
+ case IF_T2_A:
+ break;
- case IF_T1_L0: // Imm
- case IF_T2_B:
- emitDispImm(emitGetInsSC(id), false);
- break;
+ case IF_T1_L0: // Imm
+ case IF_T2_B:
+ emitDispImm(emitGetInsSC(id), false);
+ break;
- case IF_T1_B: // <cond>
- emitDispCond(emitGetInsSC(id));
- break;
+ case IF_T1_B: // <cond>
+ emitDispCond(emitGetInsSC(id));
+ break;
- case IF_T1_L1: // <regmask8>
- case IF_T2_I1: // <regmask16>
- emitDispRegmask(emitGetInsSC(id), true);
- break;
+ case IF_T1_L1: // <regmask8>
+ case IF_T2_I1: // <regmask16>
+ emitDispRegmask(emitGetInsSC(id), true);
+ break;
- case IF_T2_E2: // Reg
- if (id->idIns() == INS_vmrs)
- {
- if (id->idReg1() != REG_R15)
+ case IF_T2_E2: // Reg
+ if (id->idIns() == INS_vmrs)
{
- emitDispReg(id->idReg1(), attr, true);
- printf("FPSCR");
+ if (id->idReg1() != REG_R15)
+ {
+ emitDispReg(id->idReg1(), attr, true);
+ printf("FPSCR");
+ }
+ else
+ {
+ printf("APSR, FPSCR");
+ }
}
else
{
- printf("APSR, FPSCR");
+ emitDispReg(id->idReg1(), attr, false);
}
- }
- else
- {
- emitDispReg(id->idReg1(), attr, false);
- }
- break;
+ break;
- case IF_T1_D1:
- emitDispReg(id->idReg1(), attr, false);
- break;
+ case IF_T1_D1:
+ emitDispReg(id->idReg1(), attr, false);
+ break;
- case IF_T1_D2:
- emitDispReg(id->idReg3(), attr, false);
- {
- CORINFO_METHOD_HANDLE handle = (CORINFO_METHOD_HANDLE) id->idDebugOnlyInfo()->idMemCookie;
- if (handle != 0)
+ case IF_T1_D2:
+ emitDispReg(id->idReg3(), attr, false);
{
- methodName = emitComp->eeGetMethodFullName(handle);
- printf("\t\t// %s", methodName);
+ CORINFO_METHOD_HANDLE handle = (CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie;
+ if (handle != 0)
+ {
+ methodName = emitComp->eeGetMethodFullName(handle);
+ printf("\t\t// %s", methodName);
+ }
}
- }
- break;
+ break;
- case IF_T1_F: // SP, Imm
- emitDispReg(REG_SP, attr, true);
- emitDispImm(emitGetInsSC(id), false);
- break;
+ case IF_T1_F: // SP, Imm
+ emitDispReg(REG_SP, attr, true);
+ emitDispImm(emitGetInsSC(id), false);
+ break;
- case IF_T1_J0: // Reg, Imm
- case IF_T2_L1:
- case IF_T2_L2:
- case IF_T2_N:
- emitDispReg(id->idReg1(), attr, true);
- imm = emitGetInsSC(id);
- if (fmt == IF_T2_N)
- {
- if (emitComp->opts.disDiffable)
- imm = 0xD1FF;
-#if RELOC_SUPPORT
- if (id->idIsCnsReloc() || id->idIsDspReloc())
+ case IF_T1_J0: // Reg, Imm
+ case IF_T2_L1:
+ case IF_T2_L2:
+ case IF_T2_N:
+ emitDispReg(id->idReg1(), attr, true);
+ imm = emitGetInsSC(id);
+ if (fmt == IF_T2_N)
{
if (emitComp->opts.disDiffable)
- imm = 0xD1FFAB1E;
- printf("%s RELOC ", (id->idIns() == INS_movw) ? "LOW" : "HIGH");
- }
+ imm = 0xD1FF;
+#if RELOC_SUPPORT
+ if (id->idIsCnsReloc() || id->idIsDspReloc())
+ {
+ if (emitComp->opts.disDiffable)
+ imm = 0xD1FFAB1E;
+ printf("%s RELOC ", (id->idIns() == INS_movw) ? "LOW" : "HIGH");
+ }
#endif // RELOC_SUPPORT
- }
- emitDispImm(imm, false, (fmt == IF_T2_N));
- break;
-
- case IF_T2_N2:
- emitDispReg(id->idReg1(), attr, true);
- imm = emitGetInsSC(id);
- {
- dataSection * jdsc = 0;
- NATIVE_OFFSET offs = 0;
-
- /* Find the appropriate entry in the data section list */
+ }
+ emitDispImm(imm, false, (fmt == IF_T2_N));
+ break;
- for (jdsc = emitConsDsc.dsdList;
- jdsc;
- jdsc = jdsc->dsNext)
+ case IF_T2_N2:
+ emitDispReg(id->idReg1(), attr, true);
+ imm = emitGetInsSC(id);
{
- UNATIVE_OFFSET size = jdsc->dsSize;
+ dataSection* jdsc = 0;
+ NATIVE_OFFSET offs = 0;
- /* Is this a label table? */
+ /* Find the appropriate entry in the data section list */
- if (jdsc->dsType == dataSection::blockAbsoluteAddr)
+ for (jdsc = emitConsDsc.dsdList; jdsc; jdsc = jdsc->dsNext)
{
- if (offs == imm)
- break;
- }
+ UNATIVE_OFFSET size = jdsc->dsSize;
- offs += size;
- }
+ /* Is this a label table? */
- assert(jdsc != NULL);
+ if (jdsc->dsType == dataSection::blockAbsoluteAddr)
+ {
+ if (offs == imm)
+ break;
+ }
-#ifdef RELOC_SUPPORT
- if (id->idIsDspReloc())
- {
- printf("reloc ");
- }
-#endif
- printf("%s ADDRESS J_M%03u_DS%02u", (id->idIns() == INS_movw) ? "LOW" : "HIGH",
- Compiler::s_compMethodsCount,
- imm);
+ offs += size;
+ }
- // After the MOVT, dump the table
- if (id->idIns() == INS_movt)
- {
- unsigned cnt = jdsc->dsSize / TARGET_POINTER_SIZE;
- BasicBlock * * bbp = (BasicBlock**)jdsc->dsCont;
+ assert(jdsc != NULL);
- bool isBound = (emitCodeGetCookie(*bbp) != NULL);
+#ifdef RELOC_SUPPORT
+ if (id->idIsDspReloc())
+ {
+ printf("reloc ");
+ }
+#endif
+ printf("%s ADDRESS J_M%03u_DS%02u", (id->idIns() == INS_movw) ? "LOW" : "HIGH",
+ Compiler::s_compMethodsCount, imm);
- if (isBound)
+ // After the MOVT, dump the table
+ if (id->idIns() == INS_movt)
{
- printf("\n\n J_M%03u_DS%02u LABEL DWORD", Compiler::s_compMethodsCount, imm);
+ unsigned cnt = jdsc->dsSize / TARGET_POINTER_SIZE;
+ BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
- /* Display the label table (it's stored as "BasicBlock*" values) */
+ bool isBound = (emitCodeGetCookie(*bbp) != NULL);
- do
+ if (isBound)
{
- insGroup * lab;
+ printf("\n\n J_M%03u_DS%02u LABEL DWORD", Compiler::s_compMethodsCount, imm);
+
+ /* Display the label table (it's stored as "BasicBlock*" values) */
- /* Convert the BasicBlock* value to an IG address */
+ do
+ {
+ insGroup* lab;
- lab = (insGroup*)emitCodeGetCookie(*bbp++); assert(lab);
+ /* Convert the BasicBlock* value to an IG address */
- printf("\n DD G_M%03u_IG%02u", Compiler::s_compMethodsCount, lab->igNum);
+ lab = (insGroup*)emitCodeGetCookie(*bbp++);
+ assert(lab);
+
+ printf("\n DD G_M%03u_IG%02u", Compiler::s_compMethodsCount, lab->igNum);
+ } while (--cnt);
}
- while (--cnt);
}
}
- }
- break;
+ break;
- case IF_T2_H2: // [Reg+imm]
- case IF_T2_K2:
- emitDispAddrRI(id->idReg1(), emitGetInsSC(id), attr);
- break;
+ case IF_T2_H2: // [Reg+imm]
+ case IF_T2_K2:
+ emitDispAddrRI(id->idReg1(), emitGetInsSC(id), attr);
+ break;
- case IF_T2_K3: // [PC+imm]
- emitDispAddrRI(REG_PC, emitGetInsSC(id), attr);
- break;
+ case IF_T2_K3: // [PC+imm]
+ emitDispAddrRI(REG_PC, emitGetInsSC(id), attr);
+ break;
- case IF_T1_J1: // reg, <regmask8>
- case IF_T2_I0: // reg, <regmask16>
- emitDispReg(id->idReg1(), attr, false);
- printf("!, ");
- emitDispRegmask(emitGetInsSC(id), false);
- break;
+ case IF_T1_J1: // reg, <regmask8>
+ case IF_T2_I0: // reg, <regmask16>
+ emitDispReg(id->idReg1(), attr, false);
+ printf("!, ");
+ emitDispRegmask(emitGetInsSC(id), false);
+ break;
- case IF_T1_D0: // Reg, Reg
- case IF_T1_E:
- case IF_T2_C3:
- case IF_T2_C9:
- case IF_T2_C10:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, false);
- if (fmt == IF_T1_E && id->idIns() == INS_rsb)
- {
- printf(", 0");
- }
- break;
+ case IF_T1_D0: // Reg, Reg
+ case IF_T1_E:
+ case IF_T2_C3:
+ case IF_T2_C9:
+ case IF_T2_C10:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, false);
+ if (fmt == IF_T1_E && id->idIns() == INS_rsb)
+ {
+ printf(", 0");
+ }
+ break;
- case IF_T2_E1: // Reg, [Reg]
- emitDispReg(id->idReg1(), attr, true);
- emitDispAddrR(id->idReg2(), attr);
- break;
+ case IF_T2_E1: // Reg, [Reg]
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispAddrR(id->idReg2(), attr);
+ break;
- case IF_T2_D1: // Reg, Imm, Imm
- emitDispReg(id->idReg1(), attr, true);
- imm = emitGetInsSC(id);
- {
- int lsb = (imm >> 5) & 0x1f;
- int msb = imm & 0x1f;
- int imm1 = lsb;
- int imm2 = msb + 1 - lsb;
- emitDispImm(imm1, true);
- emitDispImm(imm2, false);
- }
- break;
+ case IF_T2_D1: // Reg, Imm, Imm
+ emitDispReg(id->idReg1(), attr, true);
+ imm = emitGetInsSC(id);
+ {
+ int lsb = (imm >> 5) & 0x1f;
+ int msb = imm & 0x1f;
+ int imm1 = lsb;
+ int imm2 = msb + 1 - lsb;
+ emitDispImm(imm1, true);
+ emitDispImm(imm2, false);
+ }
+ break;
- case IF_T1_C: // Reg, Reg, Imm
- case IF_T1_G:
- case IF_T2_C2:
- case IF_T2_H1:
- case IF_T2_K1:
- case IF_T2_L0:
- case IF_T2_M0:
- emitDispReg(id->idReg1(), attr, true);
- imm = emitGetInsSC(id);
- if (emitInsIsLoadOrStore(ins))
- {
- emitDispAddrRI(id->idReg2(), imm, attr);
- }
- else
- {
- emitDispReg(id->idReg2(), attr, true);
- emitDispImm(imm, false);
- }
- break;
+ case IF_T1_C: // Reg, Reg, Imm
+ case IF_T1_G:
+ case IF_T2_C2:
+ case IF_T2_H1:
+ case IF_T2_K1:
+ case IF_T2_L0:
+ case IF_T2_M0:
+ emitDispReg(id->idReg1(), attr, true);
+ imm = emitGetInsSC(id);
+ if (emitInsIsLoadOrStore(ins))
+ {
+ emitDispAddrRI(id->idReg2(), imm, attr);
+ }
+ else
+ {
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispImm(imm, false);
+ }
+ break;
- case IF_T1_J2:
- emitDispReg(id->idReg1(), attr, true);
- imm = emitGetInsSC(id);
- if (emitInsIsLoadOrStore(ins))
- {
- emitDispAddrRI(REG_SP, imm, attr);
- }
- else
- {
- emitDispReg(REG_SP, attr, true);
- emitDispImm(imm, false);
- }
- break;
+ case IF_T1_J2:
+ emitDispReg(id->idReg1(), attr, true);
+ imm = emitGetInsSC(id);
+ if (emitInsIsLoadOrStore(ins))
+ {
+ emitDispAddrRI(REG_SP, imm, attr);
+ }
+ else
+ {
+ emitDispReg(REG_SP, attr, true);
+ emitDispImm(imm, false);
+ }
+ break;
- case IF_T2_K4:
- emitDispReg(id->idReg1(), attr, true);
- emitDispAddrRI(REG_PC, emitGetInsSC(id), attr);
- break;
+ case IF_T2_K4:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispAddrRI(REG_PC, emitGetInsSC(id), attr);
+ break;
- case IF_T2_C1:
- case IF_T2_C8:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, false);
- imm = emitGetInsSC(id);
- if (id->idInsOpt() == INS_OPTS_RRX)
- {
- emitDispShiftOpts(id->idInsOpt());
- assert(imm == 1);
- }
- else if (imm > 0)
- {
- emitDispShiftOpts(id->idInsOpt());
- emitDispImm(imm, false);
- }
- break;
+ case IF_T2_C1:
+ case IF_T2_C8:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, false);
+ imm = emitGetInsSC(id);
+ if (id->idInsOpt() == INS_OPTS_RRX)
+ {
+ emitDispShiftOpts(id->idInsOpt());
+ assert(imm == 1);
+ }
+ else if (imm > 0)
+ {
+ emitDispShiftOpts(id->idInsOpt());
+ emitDispImm(imm, false);
+ }
+ break;
- case IF_T2_C6:
- imm = emitGetInsSC(id);
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, (imm != 0));
- if (imm != 0)
- {
- emitDispImm(imm, false);
- }
- break;
+ case IF_T2_C6:
+ imm = emitGetInsSC(id);
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, (imm != 0));
+ if (imm != 0)
+ {
+ emitDispImm(imm, false);
+ }
+ break;
- case IF_T2_C7:
- emitDispAddrRRI(id->idReg1(), id->idReg2(), emitGetInsSC(id), attr);
- break;
+ case IF_T2_C7:
+ emitDispAddrRRI(id->idReg1(), id->idReg2(), emitGetInsSC(id), attr);
+ break;
- case IF_T2_H0:
- emitDispReg(id->idReg1(), attr, true);
- emitDispAddrPUW(id->idReg2(), emitGetInsSC(id), id->idInsOpt(), attr);
- break;
+ case IF_T2_H0:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispAddrPUW(id->idReg2(), emitGetInsSC(id), id->idInsOpt(), attr);
+ break;
- case IF_T1_H: // Reg, Reg, Reg
- emitDispReg(id->idReg1(), attr, true);
- if (emitInsIsLoadOrStore(ins))
- {
- emitDispAddrRR(id->idReg2(), id->idReg3(), attr);
- }
- else
- {
+ case IF_T1_H: // Reg, Reg, Reg
+ emitDispReg(id->idReg1(), attr, true);
+ if (emitInsIsLoadOrStore(ins))
+ {
+ emitDispAddrRR(id->idReg2(), id->idReg3(), attr);
+ }
+ else
+ {
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispReg(id->idReg3(), attr, false);
+ }
+ break;
+
+ case IF_T2_C4:
+ case IF_T2_C5:
+ emitDispReg(id->idReg1(), attr, true);
emitDispReg(id->idReg2(), attr, true);
emitDispReg(id->idReg3(), attr, false);
- }
- break;
+ break;
- case IF_T2_C4:
- case IF_T2_C5:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- emitDispReg(id->idReg3(), attr, false);
- break;
+ case IF_T2_VFP3:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispReg(id->idReg3(), attr, false);
+ break;
- case IF_T2_VFP3:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- emitDispReg(id->idReg3(), attr, false);
- break;
+ case IF_T2_VFP2:
+ switch (id->idIns())
+ {
+ case INS_vcvt_d2i:
+ case INS_vcvt_d2u:
+ case INS_vcvt_d2f:
+ emitDispReg(id->idReg1(), EA_4BYTE, true);
+ emitDispReg(id->idReg2(), EA_8BYTE, false);
+ break;
- case IF_T2_VFP2:
- switch (id->idIns())
- {
- case INS_vcvt_d2i:
- case INS_vcvt_d2u:
- case INS_vcvt_d2f:
- emitDispReg(id->idReg1(), EA_4BYTE, true);
- emitDispReg(id->idReg2(), EA_8BYTE, false);
- break;
+ case INS_vcvt_i2d:
+ case INS_vcvt_u2d:
+ case INS_vcvt_f2d:
+ emitDispReg(id->idReg1(), EA_8BYTE, true);
+ emitDispReg(id->idReg2(), EA_4BYTE, false);
+ break;
- case INS_vcvt_i2d:
- case INS_vcvt_u2d:
- case INS_vcvt_f2d:
- emitDispReg(id->idReg1(), EA_8BYTE, true);
- emitDispReg(id->idReg2(), EA_4BYTE, false);
+ // we just use the type on the instruction
+ // unless it is an asymmetrical one like the converts
+ default:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, false);
+ break;
+ }
break;
- // we just use the type on the instruction
- // unless it is an asymmetrical one like the converts
- default:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, false);
- break;
- }
- break;
+ case IF_T2_VLDST:
+ imm = emitGetInsSC(id);
+ switch (id->idIns())
+ {
+ case INS_vldr:
+ case INS_vstr:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispAddrPUW(id->idReg2(), imm, id->idInsOpt(), attr);
+ break;
- case IF_T2_VLDST:
- imm = emitGetInsSC(id);
- switch (id->idIns())
- {
- case INS_vldr:
- case INS_vstr:
- emitDispReg(id->idReg1(), attr, true);
- emitDispAddrPUW(id->idReg2(), imm, id->idInsOpt(), attr);
- break;
+ case INS_vldm:
+ case INS_vstm:
+ emitDispReg(id->idReg2(), attr, false);
+ if (insOptAnyInc(id->idInsOpt()))
+ printf("!");
+ printf(", ");
+ emitDispRegRange(id->idReg1(), abs(imm) >> 2, attr);
+ break;
- case INS_vldm:
- case INS_vstm:
- emitDispReg(id->idReg2(), attr, false);
- if (insOptAnyInc(id->idInsOpt()))
- printf("!");
- printf(", ");
- emitDispRegRange(id->idReg1(), abs(imm) >> 2, attr);
- break;
+ case INS_vpush:
+ case INS_vpop:
+ emitDispRegRange(id->idReg1(), abs(imm) >> 2, attr);
+ break;
- case INS_vpush:
- case INS_vpop:
- emitDispRegRange(id->idReg1(), abs(imm) >> 2, attr);
- break;
+ default:
+ unreached();
+ }
+ break;
- default:
- unreached();
- }
- break;
+ case IF_T2_VMOVD:
+ switch (id->idIns())
+ {
+ case INS_vmov_i2d:
+ emitDispReg(id->idReg1(), attr, true); // EA_8BYTE
+ emitDispReg(id->idReg2(), EA_4BYTE, true);
+ emitDispReg(id->idReg3(), EA_4BYTE, false);
+ break;
+ case INS_vmov_d2i:
+ emitDispReg(id->idReg1(), EA_4BYTE, true);
+ emitDispReg(id->idReg2(), EA_4BYTE, true);
+ emitDispReg(id->idReg3(), attr, false); // EA_8BYTE
+ break;
+ default:
+ unreached();
+ }
+ break;
- case IF_T2_VMOVD:
- switch (id->idIns())
- {
- case INS_vmov_i2d:
- emitDispReg(id->idReg1(), attr, true); // EA_8BYTE
- emitDispReg(id->idReg2(), EA_4BYTE, true);
- emitDispReg(id->idReg3(), EA_4BYTE, false);
+ case IF_T2_VMOVS:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, false);
break;
- case INS_vmov_d2i:
- emitDispReg(id->idReg1(), EA_4BYTE, true);
- emitDispReg(id->idReg2(), EA_4BYTE, true);
- emitDispReg(id->idReg3(), attr, false); // EA_8BYTE
+
+ case IF_T2_G1:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispAddrRR(id->idReg2(), id->idReg3(), attr);
break;
- default: unreached();
- }
- break;
- case IF_T2_VMOVS:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, false);
- break;
+ case IF_T2_D0: // Reg, Reg, Imm, Imm
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, true);
+ imm = emitGetInsSC(id);
+ if (ins == INS_bfi)
+ {
+ int lsb = (imm >> 5) & 0x1f;
+ int msb = imm & 0x1f;
+ int imm1 = lsb;
+ int imm2 = msb + 1 - lsb;
+ emitDispImm(imm1, true);
+ emitDispImm(imm2, false);
+ }
+ else
+ {
+ int lsb = (imm >> 5) & 0x1f;
+ int widthm1 = imm & 0x1f;
+ int imm1 = lsb;
+ int imm2 = widthm1 + 1;
+ emitDispImm(imm1, true);
+ emitDispImm(imm2, false);
+ }
+ break;
- case IF_T2_G1:
- emitDispReg(id->idReg1(), attr, true);
- emitDispAddrRR(id->idReg2(), id->idReg3(), attr);
- break;
-
- case IF_T2_D0: // Reg, Reg, Imm, Imm
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- imm = emitGetInsSC(id);
- if (ins == INS_bfi)
- {
- int lsb = (imm >> 5) & 0x1f;
- int msb = imm & 0x1f;
- int imm1 = lsb;
- int imm2 = msb + 1 - lsb;
- emitDispImm(imm1, true);
- emitDispImm(imm2, false);
- }
- else
- {
- int lsb = (imm >> 5) & 0x1f;
- int widthm1 = imm & 0x1f;
- int imm1 = lsb;
- int imm2 = widthm1 + 1;
- emitDispImm(imm1, true);
- emitDispImm(imm2, false);
- }
- break;
-
- case IF_T2_C0: // Reg, Reg, Reg, Imm
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- emitDispReg(id->idReg3(), attr, false);
- imm = emitGetInsSC(id);
- if (id->idInsOpt() == INS_OPTS_RRX)
- {
- emitDispShiftOpts(id->idInsOpt());
- assert(imm == 1);
- }
- else if (imm > 0)
- {
- emitDispShiftOpts(id->idInsOpt());
- emitDispImm(imm, false);
- }
- break;
+ case IF_T2_C0: // Reg, Reg, Reg, Imm
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispReg(id->idReg3(), attr, false);
+ imm = emitGetInsSC(id);
+ if (id->idInsOpt() == INS_OPTS_RRX)
+ {
+ emitDispShiftOpts(id->idInsOpt());
+ assert(imm == 1);
+ }
+ else if (imm > 0)
+ {
+ emitDispShiftOpts(id->idInsOpt());
+ emitDispImm(imm, false);
+ }
+ break;
- case IF_T2_E0:
- emitDispReg(id->idReg1(), attr, true);
- if (id->idIsLclVar())
- {
- emitDispAddrRRI(id->idReg2(), codeGen->rsGetRsvdReg(), 0, attr);
- }
- else
- {
- emitDispAddrRRI(id->idReg2(), id->idReg3(), emitGetInsSC(id), attr);
- }
- break;
+ case IF_T2_E0:
+ emitDispReg(id->idReg1(), attr, true);
+ if (id->idIsLclVar())
+ {
+ emitDispAddrRRI(id->idReg2(), codeGen->rsGetRsvdReg(), 0, attr);
+ }
+ else
+ {
+ emitDispAddrRRI(id->idReg2(), id->idReg3(), emitGetInsSC(id), attr);
+ }
+ break;
- case IF_T2_G0:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- emitDispAddrPUW(id->idReg3(), emitGetInsSC(id), id->idInsOpt(), attr);
- break;
+ case IF_T2_G0:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispAddrPUW(id->idReg3(), emitGetInsSC(id), id->idInsOpt(), attr);
+ break;
- case IF_T2_F1: // Reg, Reg, Reg, Reg
- case IF_T2_F2:
- emitDispReg(id->idReg1(), attr, true);
- emitDispReg(id->idReg2(), attr, true);
- emitDispReg(id->idReg3(), attr, true);
- emitDispReg(id->idReg4(), attr, false);
- break;
+ case IF_T2_F1: // Reg, Reg, Reg, Reg
+ case IF_T2_F2:
+ emitDispReg(id->idReg1(), attr, true);
+ emitDispReg(id->idReg2(), attr, true);
+ emitDispReg(id->idReg3(), attr, true);
+ emitDispReg(id->idReg4(), attr, false);
+ break;
- case IF_T1_J3:
- case IF_T2_M1: // Load Label
- emitDispReg(id->idReg1(), attr, true);
- if (id->idIsBound())
- printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
- else
- printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
- break;
+ case IF_T1_J3:
+ case IF_T2_M1: // Load Label
+ emitDispReg(id->idReg1(), attr, true);
+ if (id->idIsBound())
+ printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ else
+ printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ break;
- case IF_T1_I: // Special Compare-and-branch
- emitDispReg(id->idReg1(), attr, true);
- __fallthrough;
+ case IF_T1_I: // Special Compare-and-branch
+ emitDispReg(id->idReg1(), attr, true);
+ __fallthrough;
- case IF_T1_K: // Special Branch, conditional
- case IF_T1_M:
- assert(((instrDescJmp*)id)->idjShort);
- printf("SHORT ");
- __fallthrough;
+ case IF_T1_K: // Special Branch, conditional
+ case IF_T1_M:
+ assert(((instrDescJmp*)id)->idjShort);
+ printf("SHORT ");
+ __fallthrough;
- case IF_T2_N1:
- if (fmt == IF_T2_N1)
- {
- emitDispReg(id->idReg1(), attr, true);
- printf("%s ADDRESS ", (id->idIns()==INS_movw)?"LOW":"HIGH");
- }
- __fallthrough;
-
- case IF_T2_J1:
- case IF_T2_J2:
- case IF_LARGEJMP:
+ case IF_T2_N1:
+ if (fmt == IF_T2_N1)
+ {
+ emitDispReg(id->idReg1(), attr, true);
+ printf("%s ADDRESS ", (id->idIns() == INS_movw) ? "LOW" : "HIGH");
+ }
+ __fallthrough;
+
+ case IF_T2_J1:
+ case IF_T2_J2:
+ case IF_LARGEJMP:
{
if (id->idAddr()->iiaHasInstrCount())
{
@@ -7584,79 +7416,72 @@ void emitter::emitDispInsHelp(instrDesc * id,
if (ig == NULL)
{
- printf("pc%s%d instructions", (instrCount >= 0) ? "+" : "" , instrCount);
+ printf("pc%s%d instructions", (instrCount >= 0) ? "+" : "", instrCount);
}
else
{
- unsigned insNum = emitFindInsNum(ig, id);
+ unsigned insNum = emitFindInsNum(ig, id);
UNATIVE_OFFSET srcOffs = ig->igOffs + emitFindOffset(ig, insNum + 1);
UNATIVE_OFFSET dstOffs = ig->igOffs + emitFindOffset(ig, insNum + 1 + instrCount);
- ssize_t relOffs = (ssize_t) (emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
- printf("pc%s%d (%d instructions)", (relOffs >= 0) ? "+" : "" , relOffs, instrCount);
+ ssize_t relOffs = (ssize_t)(emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
+ printf("pc%s%d (%d instructions)", (relOffs >= 0) ? "+" : "", relOffs, instrCount);
}
}
- else if (id->idIsBound())
+ else if (id->idIsBound())
printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
else
printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
}
break;
- case IF_T2_J3:
- if (id->idIsCallAddr())
- {
- offs = (ssize_t)id->idAddr()->iiaAddr;
- methodName = "";
- }
- else
- {
- offs = 0;
- methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
- }
+ case IF_T2_J3:
+ if (id->idIsCallAddr())
+ {
+ offs = (ssize_t)id->idAddr()->iiaAddr;
+ methodName = "";
+ }
+ else
+ {
+ offs = 0;
+ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
+ }
- if (offs)
- {
- if (id->idIsDspReloc())
- printf("reloc ");
- printf("%08X", offs);
- }
- else
- {
- printf("%s", methodName);
- }
+ if (offs)
+ {
+ if (id->idIsDspReloc())
+ printf("reloc ");
+ printf("%08X", offs);
+ }
+ else
+ {
+ printf("%s", methodName);
+ }
- break;
+ break;
- default:
- printf("unexpected format %s", emitIfName(id->idInsFmt()));
- assert(!"unexpectedFormat");
- break;
+ default:
+ printf("unexpected format %s", emitIfName(id->idInsFmt()));
+ assert(!"unexpectedFormat");
+ break;
}
if (id->idDebugOnlyInfo()->idVarRefOffs)
{
printf("\t// ");
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
}
printf("\n");
}
-void emitter::emitDispIns(instrDesc * id,
- bool isNew,
- bool doffs,
- bool asmfm,
- unsigned offset,
- BYTE * code,
- size_t sz,
- insGroup * ig)
+void emitter::emitDispIns(
+ instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* code, size_t sz, insGroup* ig)
{
- insFormat fmt = id->idInsFmt();
+ insFormat fmt = id->idInsFmt();
#ifdef ARM_HAZARD_AVOIDANCE
- if (id->idKraitNop())
+ if (id->idKraitNop())
{
assert(id->idIsKraitBranch());
@@ -7681,11 +7506,11 @@ void emitter::emitDispIns(instrDesc * id,
pidNOP->idInsSize(emitInsSize(IF_T2_A));
pidNOP->idDebugOnlyInfo(id->idDebugOnlyInfo()); // share the idDebugOnlyInfo() field
- size_t nopSizeOrZero = (code == NULL) ? 0 : 4; // NOPW is 4 bytes
- emitDispInsHelp(pidNOP, false, doffs, asmfm, offset, code, nopSizeOrZero, ig);
+ size_t nopSizeOrZero = (code == NULL) ? 0 : 4; // NOPW is 4 bytes
+ emitDispInsHelp(pidNOP, false, doffs, asmfm, offset, code, nopSizeOrZero, ig);
- code += nopSizeOrZero;
- sz -= nopSizeOrZero;
+ code += nopSizeOrZero;
+ sz -= nopSizeOrZero;
offset += 4;
}
}
@@ -7693,7 +7518,7 @@ void emitter::emitDispIns(instrDesc * id,
/* Special-case IF_LARGEJMP */
- if ((fmt == IF_LARGEJMP) && id->idIsBound())
+ if ((fmt == IF_LARGEJMP) && id->idIsBound())
{
// This is a pseudo-instruction format representing a large conditional branch. See the comment
// in emitter::emitOutputLJ() for the full description.
@@ -7703,29 +7528,32 @@ void emitter::emitDispIns(instrDesc * id,
// b<!cond> L_not // 2 bytes. Note that we reverse the condition.
// b L_target // 4 bytes
// L_not:
- //
+ //
// These instructions don't exist in the actual instruction stream, so we need to fake them
// up to display them.
//
// Note: don't touch the actual instrDesc. If we accidentally messed it up, it would create a very
// difficult to find bug.
- instrDescJmp idJmp;
+ instrDescJmp idJmp;
instrDescJmp* pidJmp = &idJmp;
memset(&idJmp, 0, sizeof(idJmp));
- pidJmp->idIns(emitJumpKindToIns(emitReverseJumpKind(emitInsToJumpKind(id->idIns())))); // reverse the conditional instruction
+ pidJmp->idIns(emitJumpKindToIns(emitReverseJumpKind(emitInsToJumpKind(id->idIns())))); // reverse the
+ // conditional
+ // instruction
pidJmp->idInsFmt(IF_T1_K);
pidJmp->idInsSize(emitInsSize(IF_T1_K));
pidJmp->idjShort = 1;
pidJmp->idAddr()->iiaSetInstrCount(1);
pidJmp->idDebugOnlyInfo(id->idDebugOnlyInfo()); // share the idDebugOnlyInfo() field
- size_t bcondSizeOrZero = (code == NULL) ? 0 : 2; // branch is 2 bytes
- emitDispInsHelp(pidJmp, false, doffs, asmfm, offset, code, bcondSizeOrZero, NULL /* force display of pc-relative branch */);
+ size_t bcondSizeOrZero = (code == NULL) ? 0 : 2; // branch is 2 bytes
+ emitDispInsHelp(pidJmp, false, doffs, asmfm, offset, code, bcondSizeOrZero,
+ NULL /* force display of pc-relative branch */);
- code += bcondSizeOrZero;
+ code += bcondSizeOrZero;
offset += 2;
// Next, display the unconditional branch
@@ -7737,7 +7565,7 @@ void emitter::emitDispIns(instrDesc * id,
pidJmp->idInsFmt(IF_T2_J2);
pidJmp->idInsSize(emitInsSize(IF_T2_J2));
pidJmp->idjShort = 0;
- if (id->idIsBound())
+ if (id->idIsBound())
{
pidJmp->idSetIsBound();
pidJmp->idAddr()->iiaIGlabel = id->idAddr()->iiaIGlabel;
@@ -7748,8 +7576,8 @@ void emitter::emitDispIns(instrDesc * id,
}
pidJmp->idDebugOnlyInfo(id->idDebugOnlyInfo()); // share the idDebugOnlyInfo() field
- size_t brSizeOrZero = (code == NULL) ? 0 : 4; // unconditional branch is 4 bytes
- emitDispInsHelp(pidJmp, isNew, doffs, asmfm, offset, code, brSizeOrZero, ig);
+ size_t brSizeOrZero = (code == NULL) ? 0 : 4; // unconditional branch is 4 bytes
+ emitDispInsHelp(pidJmp, isNew, doffs, asmfm, offset, code, brSizeOrZero, ig);
}
else
{
@@ -7762,46 +7590,45 @@ void emitter::emitDispIns(instrDesc * id,
* Display a stack frame reference.
*/
-void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
+void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
{
printf("[");
- if (varx < 0)
- printf("TEMP_%02u", -varx);
- else
- emitComp->gtDispLclVar(+varx, false);
+ if (varx < 0)
+ printf("TEMP_%02u", -varx);
+ else
+ emitComp->gtDispLclVar(+varx, false);
- if (disp < 0)
- printf("-0x%02x", -disp);
- else if (disp > 0)
- printf("+0x%02x", +disp);
+ if (disp < 0)
+ printf("-0x%02x", -disp);
+ else if (disp > 0)
+ printf("+0x%02x", +disp);
printf("]");
- if (varx >= 0 && emitComp->opts.varNames)
+ if (varx >= 0 && emitComp->opts.varNames)
{
LclVarDsc* varDsc;
- const char * varName;
+ const char* varName;
assert((unsigned)varx < emitComp->lvaCount);
varDsc = emitComp->lvaTable + varx;
varName = emitComp->compLocalVarName(varx, offs);
- if (varName)
+ if (varName)
{
printf("'%s", varName);
- if (disp < 0)
- printf("-%d", -disp);
+ if (disp < 0)
+ printf("-%d", -disp);
else if (disp > 0)
- printf("+%d", +disp);
+ printf("+%d", +disp);
printf("'");
}
}
}
-
#endif // DEBUG
#ifndef LEGACY_BACKEND
@@ -7813,7 +7640,7 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
{
switch (node->OperGet())
{
- case GT_IND:
+ case GT_IND:
{
GenTree* addr = node->gtGetOp1();
assert(!addr->isContained());
@@ -7822,7 +7649,7 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
GenTree* addr = node->gtGetOp1();
GenTree* data = node->gtOp.gtOp2;
@@ -7843,7 +7670,7 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
break;
- case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_VAR:
{
GenTreeLclVarCommon* varNode = node->AsLclVarCommon();
@@ -7853,7 +7680,7 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
if (data->isContainedIntOrIImmed())
{
- emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int) data->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue());
codeGen->genUpdateLife(varNode);
}
else
@@ -7864,10 +7691,10 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
codeGen->genUpdateLife(varNode);
}
}
- return;
+ return;
- default:
- unreached();
+ default:
+ unreached();
}
}
diff --git a/src/jit/emitarm.h b/src/jit/emitarm.h
index bfd9d3c0b2..69cae81b4e 100644
--- a/src/jit/emitarm.h
+++ b/src/jit/emitarm.h
@@ -2,408 +2,339 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#if defined(_TARGET_ARM_)
- /************************************************************************/
- /* Routines that compute the size of / encode instructions */
- /************************************************************************/
+/************************************************************************/
+/* Routines that compute the size of / encode instructions */
+/************************************************************************/
- struct CnsVal
- {
- int cnsVal;
+struct CnsVal
+{
+ int cnsVal;
#ifdef RELOC_SUPPORT
- bool cnsReloc;
+ bool cnsReloc;
#endif
- };
+};
- insSize emitInsSize (insFormat insFmt);
+insSize emitInsSize(insFormat insFmt);
- BYTE * emitOutputAM (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
- BYTE * emitOutputSV (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
- BYTE * emitOutputCV (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
+BYTE* emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = NULL);
+BYTE* emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = NULL);
+BYTE* emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = NULL);
- BYTE * emitOutputR (BYTE *dst, instrDesc *id);
- BYTE * emitOutputRI (BYTE *dst, instrDesc *id);
- BYTE * emitOutputRR (BYTE *dst, instrDesc *id);
- BYTE * emitOutputIV (BYTE *dst, instrDesc *id);
+BYTE* emitOutputR(BYTE* dst, instrDesc* id);
+BYTE* emitOutputRI(BYTE* dst, instrDesc* id);
+BYTE* emitOutputRR(BYTE* dst, instrDesc* id);
+BYTE* emitOutputIV(BYTE* dst, instrDesc* id);
#ifdef FEATURE_ITINSTRUCTION
- BYTE * emitOutputIT (BYTE *dst, instruction ins, insFormat fmt, ssize_t condcode);
+BYTE* emitOutputIT(BYTE* dst, instruction ins, insFormat fmt, ssize_t condcode);
#endif // FEATURE_ITINSTRUCTION
- BYTE * emitOutputNOP (BYTE *dst, instruction ins, insFormat fmt);
-
- BYTE * emitOutputLJ (insGroup *ig, BYTE *dst, instrDesc *id);
- BYTE * emitOutputShortBranch(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id);
-
- static unsigned emitOutput_Thumb1Instr(BYTE *dst, ssize_t code);
- static unsigned emitOutput_Thumb2Instr(BYTE *dst, ssize_t code);
-
- /************************************************************************/
- /* Debug-only routines to display instructions */
- /************************************************************************/
-
-#ifdef DEBUG
-
- const char * emitFPregName (unsigned reg,
- bool varName = true);
-
- void emitDispInst (instruction ins, insFlags flags);
- void emitDispReloc (int value, bool addComma);
- void emitDispImm (int imm, bool addComma, bool alwaysHex = false);
- void emitDispCond (int cond);
- void emitDispShiftOpts(insOpts opt);
- void emitDispRegmask (int imm, bool encodedPC_LR);
- void emitDispRegRange(regNumber reg, int len, emitAttr attr);
- void emitDispReg (regNumber reg, emitAttr attr, bool addComma);
- void emitDispFloatReg(regNumber reg, emitAttr attr, bool addComma);
- void emitDispAddrR (regNumber reg, emitAttr attr);
- void emitDispAddrRI (regNumber reg, int imm, emitAttr attr);
- void emitDispAddrRR (regNumber reg1, regNumber reg2, emitAttr attr);
- void emitDispAddrRRI (regNumber reg1, regNumber reg2, int imm, emitAttr attr);
- void emitDispAddrPUW (regNumber reg, int imm, insOpts opt, emitAttr attr);
- void emitDispGC (emitAttr attr);
-
- void emitDispInsHelp (instrDesc *id, bool isNew, bool doffs, bool asmfm,
- unsigned offs = 0, BYTE * code = 0, size_t sz = 0,
- insGroup *ig = NULL);
- void emitDispIns (instrDesc *id, bool isNew, bool doffs, bool asmfm,
- unsigned offs = 0, BYTE * code = 0, size_t sz = 0,
- insGroup *ig = NULL);
+BYTE* emitOutputNOP(BYTE* dst, instruction ins, insFormat fmt);
+
+BYTE* emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* id);
+BYTE* emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id);
+
+static unsigned emitOutput_Thumb1Instr(BYTE* dst, ssize_t code);
+static unsigned emitOutput_Thumb2Instr(BYTE* dst, ssize_t code);
+
+/************************************************************************/
+/* Debug-only routines to display instructions */
+/************************************************************************/
+
+#ifdef DEBUG
+
+const char* emitFPregName(unsigned reg, bool varName = true);
+
+void emitDispInst(instruction ins, insFlags flags);
+void emitDispReloc(int value, bool addComma);
+void emitDispImm(int imm, bool addComma, bool alwaysHex = false);
+void emitDispCond(int cond);
+void emitDispShiftOpts(insOpts opt);
+void emitDispRegmask(int imm, bool encodedPC_LR);
+void emitDispRegRange(regNumber reg, int len, emitAttr attr);
+void emitDispReg(regNumber reg, emitAttr attr, bool addComma);
+void emitDispFloatReg(regNumber reg, emitAttr attr, bool addComma);
+void emitDispAddrR(regNumber reg, emitAttr attr);
+void emitDispAddrRI(regNumber reg, int imm, emitAttr attr);
+void emitDispAddrRR(regNumber reg1, regNumber reg2, emitAttr attr);
+void emitDispAddrRRI(regNumber reg1, regNumber reg2, int imm, emitAttr attr);
+void emitDispAddrPUW(regNumber reg, int imm, insOpts opt, emitAttr attr);
+void emitDispGC(emitAttr attr);
+
+void emitDispInsHelp(instrDesc* id,
+ bool isNew,
+ bool doffs,
+ bool asmfm,
+ unsigned offs = 0,
+ BYTE* code = 0,
+ size_t sz = 0,
+ insGroup* ig = NULL);
+void emitDispIns(instrDesc* id,
+ bool isNew,
+ bool doffs,
+ bool asmfm,
+ unsigned offs = 0,
+ BYTE* code = 0,
+ size_t sz = 0,
+ insGroup* ig = NULL);
#endif // DEBUG
- /************************************************************************/
- /* Private members that deal with target-dependent instr. descriptors */
- /************************************************************************/
+/************************************************************************/
+/* Private members that deal with target-dependent instr. descriptors */
+/************************************************************************/
private:
+instrDesc* emitNewInstrAmd(emitAttr attr, int dsp);
+instrDesc* emitNewInstrAmdCns(emitAttr attr, int dsp, int cns);
- instrDesc *emitNewInstrAmd (emitAttr attr, int dsp);
- instrDesc *emitNewInstrAmdCns (emitAttr attr, int dsp, int cns);
-
- instrDesc *emitNewInstrCallDir (int argCnt,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize);
+instrDesc* emitNewInstrCallDir(
+ int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, emitAttr retSize);
- instrDesc *emitNewInstrCallInd( int argCnt,
- ssize_t disp,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize);
+instrDesc* emitNewInstrCallInd(
+ int argCnt, ssize_t disp, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, emitAttr retSize);
- void emitGetInsCns (instrDesc *id, CnsVal *cv);
- int emitGetInsAmdCns(instrDesc *id, CnsVal *cv);
- void emitGetInsDcmCns(instrDesc *id, CnsVal *cv);
- int emitGetInsAmdAny(instrDesc *id);
+void emitGetInsCns(instrDesc* id, CnsVal* cv);
+int emitGetInsAmdCns(instrDesc* id, CnsVal* cv);
+void emitGetInsDcmCns(instrDesc* id, CnsVal* cv);
+int emitGetInsAmdAny(instrDesc* id);
- /************************************************************************/
- /* Private helpers for instruction output */
- /************************************************************************/
+/************************************************************************/
+/* Private helpers for instruction output */
+/************************************************************************/
private:
+bool emitInsIsCompare(instruction ins);
+bool emitInsIsLoad(instruction ins);
+bool emitInsIsStore(instruction ins);
+bool emitInsIsLoadOrStore(instruction ins);
- bool emitInsIsCompare(instruction ins);
- bool emitInsIsLoad (instruction ins);
- bool emitInsIsStore (instruction ins);
- bool emitInsIsLoadOrStore(instruction ins);
-
- /*****************************************************************************
- *
- * Convert between an index scale in bytes to a smaller encoding used for
- * storage in instruction descriptors.
- */
+/*****************************************************************************
+*
+* Convert between an index scale in bytes to a smaller encoding used for
+* storage in instruction descriptors.
+*/
- inline emitter::opSize emitEncodeScale(size_t scale)
- {
- assert(scale == 1 || scale == 2 || scale == 4 || scale == 8);
+inline emitter::opSize emitEncodeScale(size_t scale)
+{
+ assert(scale == 1 || scale == 2 || scale == 4 || scale == 8);
- return emitSizeEncode[scale-1];
- }
+ return emitSizeEncode[scale - 1];
+}
- inline emitAttr emitDecodeScale(unsigned ensz)
- {
- assert(ensz < 4);
+inline emitAttr emitDecodeScale(unsigned ensz)
+{
+ assert(ensz < 4);
- return emitter::emitSizeDecode[ensz];
- }
+ return emitter::emitSizeDecode[ensz];
+}
- static bool isModImmConst (int imm);
+static bool isModImmConst(int imm);
- static int encodeModImmConst (int imm);
+static int encodeModImmConst(int imm);
- static int insUnscaleImm (int imm, emitAttr size);
+static int insUnscaleImm(int imm, emitAttr size);
- /************************************************************************/
- /* Public inline informational methods */
- /************************************************************************/
+/************************************************************************/
+/* Public inline informational methods */
+/************************************************************************/
public:
+inline static bool isLowRegister(regNumber reg)
+{
+ return (reg <= REG_R7);
+}
- inline static bool isLowRegister (regNumber reg)
- { return (reg <= REG_R7); }
-
- inline static bool isGeneralRegister (regNumber reg)
- { return (reg <= REG_R15); }
+inline static bool isGeneralRegister(regNumber reg)
+{
+ return (reg <= REG_R15);
+}
- inline static bool isFloatReg (regNumber reg)
- { return (reg >= REG_F0 && reg <= REG_F31); }
+inline static bool isFloatReg(regNumber reg)
+{
+ return (reg >= REG_F0 && reg <= REG_F31);
+}
- inline static bool isDoubleReg (regNumber reg)
- { return isFloatReg(reg) && ((reg % 2) == 0); }
+inline static bool isDoubleReg(regNumber reg)
+{
+ return isFloatReg(reg) && ((reg % 2) == 0);
+}
- inline static bool insSetsFlags (insFlags flags)
- { return (flags != INS_FLAGS_NOT_SET); }
+inline static bool insSetsFlags(insFlags flags)
+{
+ return (flags != INS_FLAGS_NOT_SET);
+}
- inline static bool insDoesNotSetFlags(insFlags flags)
- { return (flags != INS_FLAGS_SET); }
+inline static bool insDoesNotSetFlags(insFlags flags)
+{
+ return (flags != INS_FLAGS_SET);
+}
- inline static insFlags insMustSetFlags (insFlags flags)
- { return (flags == INS_FLAGS_SET) ? INS_FLAGS_SET
- : INS_FLAGS_NOT_SET; }
+inline static insFlags insMustSetFlags(insFlags flags)
+{
+ return (flags == INS_FLAGS_SET) ? INS_FLAGS_SET : INS_FLAGS_NOT_SET;
+}
- inline static insFlags insMustNotSetFlags(insFlags flags)
- { return (flags == INS_FLAGS_NOT_SET) ? INS_FLAGS_NOT_SET
- : INS_FLAGS_SET; }
+inline static insFlags insMustNotSetFlags(insFlags flags)
+{
+ return (flags == INS_FLAGS_NOT_SET) ? INS_FLAGS_NOT_SET : INS_FLAGS_SET;
+}
- inline static bool insOptsNone (insOpts opt)
- { return (opt == INS_OPTS_NONE); }
+inline static bool insOptsNone(insOpts opt)
+{
+ return (opt == INS_OPTS_NONE);
+}
- inline static bool insOptAnyInc (insOpts opt)
- { return (opt == INS_OPTS_LDST_PRE_DEC) ||
- (opt == INS_OPTS_LDST_POST_INC); }
+inline static bool insOptAnyInc(insOpts opt)
+{
+ return (opt == INS_OPTS_LDST_PRE_DEC) || (opt == INS_OPTS_LDST_POST_INC);
+}
- inline static bool insOptsPreDec (insOpts opt)
- { return (opt == INS_OPTS_LDST_PRE_DEC); }
+inline static bool insOptsPreDec(insOpts opt)
+{
+ return (opt == INS_OPTS_LDST_PRE_DEC);
+}
- inline static bool insOptsPostInc (insOpts opt)
- { return (opt == INS_OPTS_LDST_POST_INC); }
+inline static bool insOptsPostInc(insOpts opt)
+{
+ return (opt == INS_OPTS_LDST_POST_INC);
+}
- inline static bool insOptAnyShift (insOpts opt)
- { return ((opt >= INS_OPTS_RRX) &&
- (opt <= INS_OPTS_ROR) ); }
+inline static bool insOptAnyShift(insOpts opt)
+{
+ return ((opt >= INS_OPTS_RRX) && (opt <= INS_OPTS_ROR));
+}
- inline static bool insOptsRRX (insOpts opt)
- { return (opt == INS_OPTS_RRX); }
+inline static bool insOptsRRX(insOpts opt)
+{
+ return (opt == INS_OPTS_RRX);
+}
- inline static bool insOptsLSL (insOpts opt)
- { return (opt == INS_OPTS_LSL); }
+inline static bool insOptsLSL(insOpts opt)
+{
+ return (opt == INS_OPTS_LSL);
+}
- inline static bool insOptsLSR (insOpts opt)
- { return (opt == INS_OPTS_LSR); }
+inline static bool insOptsLSR(insOpts opt)
+{
+ return (opt == INS_OPTS_LSR);
+}
- inline static bool insOptsASR (insOpts opt)
- { return (opt == INS_OPTS_ASR); }
+inline static bool insOptsASR(insOpts opt)
+{
+ return (opt == INS_OPTS_ASR);
+}
- inline static bool insOptsROR (insOpts opt)
- { return (opt == INS_OPTS_ROR); }
+inline static bool insOptsROR(insOpts opt)
+{
+ return (opt == INS_OPTS_ROR);
+}
- /************************************************************************/
- /* The public entry points to output instructions */
- /************************************************************************/
+/************************************************************************/
+/* The public entry points to output instructions */
+/************************************************************************/
public:
-
- static bool emitIns_valid_imm_for_alu(int imm);
- static bool emitIns_valid_imm_for_mov(int imm);
- static bool emitIns_valid_imm_for_small_mov(regNumber reg, int imm, insFlags flags);
- static bool emitIns_valid_imm_for_add(int imm, insFlags flags);
- static bool emitIns_valid_imm_for_add_sp(int imm);
+static bool emitIns_valid_imm_for_alu(int imm);
+static bool emitIns_valid_imm_for_mov(int imm);
+static bool emitIns_valid_imm_for_small_mov(regNumber reg, int imm, insFlags flags);
+static bool emitIns_valid_imm_for_add(int imm, insFlags flags);
+static bool emitIns_valid_imm_for_add_sp(int imm);
#ifdef ARM_HAZARD_AVOIDANCE
- bool emitKraitHazardActive(instrDesc * id);
+bool emitKraitHazardActive(instrDesc* id);
#endif
- void emitIns (instruction ins);
-
- void emitIns_I (instruction ins,
- emitAttr attr,
- ssize_t imm);
-
- void emitIns_R (instruction ins,
- emitAttr attr,
- regNumber reg);
-
- void emitIns_R_I (instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t imm,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void emitIns_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void emitIns_R_I_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int imm1,
- int imm2,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void emitIns_R_R_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm,
- insFlags flags = INS_FLAGS_DONT_CARE,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void emitIns_R_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm1,
- int imm2,
- insFlags flags = INS_FLAGS_DONT_CARE);
-
- void emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- int imm,
- insFlags flags = INS_FLAGS_DONT_CARE,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- regNumber reg4);
-
- void emitIns_C (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- int offs);
-
- void emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs);
-
- void emitIns_genStackOffset(regNumber r,
- int varx,
- int offs);
-
- void emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val);
-
- void emitIns_R_C (instruction ins,
- emitAttr attr,
- regNumber reg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs);
-
- void emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs);
-
- void emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- ssize_t offs,
- ssize_t val);
-
- void emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg);
-
- void emitIns_R_D (instruction ins,
- emitAttr attr,
- unsigned offs,
- regNumber reg);
-
- void emitIns_J_R (instruction ins,
- emitAttr attr,
- BasicBlock *dst,
- regNumber reg);
-
- void emitIns_I_AR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_AI (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp);
-
- void emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_ARR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp);
-
- enum EmitCallType
- {
+void emitIns(instruction ins);
+
+void emitIns_I(instruction ins, emitAttr attr, ssize_t imm);
+
+void emitIns_R(instruction ins, emitAttr attr, regNumber reg);
+
+void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insFlags flags = INS_FLAGS_DONT_CARE);
+
+void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insFlags flags = INS_FLAGS_DONT_CARE);
+
+void emitIns_R_I_I(
+ instruction ins, emitAttr attr, regNumber reg1, int imm1, int imm2, insFlags flags = INS_FLAGS_DONT_CARE);
+
+void emitIns_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ int imm,
+ insFlags flags = INS_FLAGS_DONT_CARE,
+ insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R_R(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+void emitIns_R_R_I_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ int imm1,
+ int imm2,
+ insFlags flags = INS_FLAGS_DONT_CARE);
+
+void emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ int imm,
+ insFlags flags = INS_FLAGS_DONT_CARE,
+ insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4);
+
+void emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, int offs);
+
+void emitIns_S(instruction ins, emitAttr attr, int varx, int offs);
+
+void emitIns_genStackOffset(regNumber r, int varx, int offs);
+
+void emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val);
+
+void emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs);
+
+void emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs);
+
+void emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, ssize_t offs, ssize_t val);
+
+void emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg);
+
+void emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumber reg);
+
+void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg);
+
+void emitIns_I_AR(
+ instruction ins, emitAttr attr, int val, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_AR(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp);
+
+void emitIns_AR_R(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp);
+
+enum EmitCallType
+{
// I have included here, but commented out, all the values used by the x86 emitter.
// However, ARM has a much reduced instruction set, and so the ARM emitter only
@@ -412,56 +343,53 @@ public:
// and know why they are unavailible on ARM), while making it easier to stay
// in-sync with x86 and possibly add them back in if needed.
- EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
- // EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
- EC_FUNC_ADDR, // Direct call to an absolute address
+ EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
+ // EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
+ EC_FUNC_ADDR, // Direct call to an absolute address
// EC_FUNC_VIRTUAL, // Call to a virtual method (using the vtable)
- EC_INDIR_R, // Indirect call via register
- // EC_INDIR_SR, // Indirect call via stack-reference (local var)
- // EC_INDIR_C, // Indirect call via static class var
- // EC_INDIR_ARD, // Indirect call via an addressing mode
-
- EC_COUNT
- };
-
- void emitIns_Call (EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd, // used for pretty printing
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize,
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET,
- regNumber ireg = REG_NA,
- regNumber xreg = REG_NA,
- unsigned xmul = 0,
- int disp = 0,
- bool isJump = false,
- bool isNoGC = false,
- bool isProfLeaveCB = false);
+ EC_INDIR_R, // Indirect call via register
+ // EC_INDIR_SR, // Indirect call via stack-reference (local var)
+ // EC_INDIR_C, // Indirect call via static class var
+ // EC_INDIR_ARD, // Indirect call via an addressing mode
+
+ EC_COUNT
+};
+
+void emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd, // used for pretty printing
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize,
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET,
+ regNumber ireg = REG_NA,
+ regNumber xreg = REG_NA,
+ unsigned xmul = 0,
+ int disp = 0,
+ bool isJump = false,
+ bool isNoGC = false,
+ bool isProfLeaveCB = false);
/*****************************************************************************
*
* Given an instrDesc, return true if it's a conditional jump.
*/
-inline bool emitIsCondJump(instrDesc *jmp)
+inline bool emitIsCondJump(instrDesc* jmp)
{
- return (jmp->idInsFmt() == IF_T2_J1) ||
- (jmp->idInsFmt() == IF_T1_K) ||
- (jmp->idInsFmt() == IF_LARGEJMP);
+ return (jmp->idInsFmt() == IF_T2_J1) || (jmp->idInsFmt() == IF_T1_K) || (jmp->idInsFmt() == IF_LARGEJMP);
}
-
/*****************************************************************************
*
* Given an instrDesc, return true if it's a comapre and jump.
*/
-inline bool emitIsCmpJump(instrDesc *jmp)
+inline bool emitIsCmpJump(instrDesc* jmp)
{
return (jmp->idInsFmt() == IF_T1_I);
}
@@ -471,10 +399,9 @@ inline bool emitIsCmpJump(instrDesc *jmp)
* Given a instrDesc, return true if it's an unconditional jump.
*/
-inline bool emitIsUncondJump(instrDesc *jmp)
+inline bool emitIsUncondJump(instrDesc* jmp)
{
- return (jmp->idInsFmt() == IF_T2_J2) ||
- (jmp->idInsFmt() == IF_T1_M);
+ return (jmp->idInsFmt() == IF_T2_J2) || (jmp->idInsFmt() == IF_T1_M);
}
/*****************************************************************************
@@ -482,11 +409,9 @@ inline bool emitIsUncondJump(instrDesc *jmp)
* Given a instrDesc, return true if it's a load label instruction.
*/
-inline bool emitIsLoadLabel(instrDesc *jmp)
+inline bool emitIsLoadLabel(instrDesc* jmp)
{
- return (jmp->idInsFmt() == IF_T2_M1) ||
- (jmp->idInsFmt() == IF_T1_J3) ||
- (jmp->idInsFmt() == IF_T2_N1);
+ return (jmp->idInsFmt() == IF_T2_M1) || (jmp->idInsFmt() == IF_T1_J3) || (jmp->idInsFmt() == IF_T2_N1);
}
#endif // _TARGET_ARM_
diff --git a/src/jit/emitarm64.cpp b/src/jit/emitarm64.cpp
index 9bc8b14ac3..a632ec12c8 100644
--- a/src/jit/emitarm64.cpp
+++ b/src/jit/emitarm64.cpp
@@ -29,27 +29,25 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-const instruction emitJumpKindInstructions[] =
-{
+const instruction emitJumpKindInstructions[] = {
INS_nop,
- #define JMP_SMALL(en, rev, ins) INS_##ins,
- #include "emitjmps.h"
+#define JMP_SMALL(en, rev, ins) INS_##ins,
+#include "emitjmps.h"
};
-const emitJumpKind emitReverseJumpKinds[] =
-{
+const emitJumpKind emitReverseJumpKinds[] = {
EJ_NONE,
- #define JMP_SMALL(en, rev, ins) EJ_##rev,
- #include "emitjmps.h"
+#define JMP_SMALL(en, rev, ins) EJ_##rev,
+#include "emitjmps.h"
};
/*****************************************************************************
* Look up the instruction for a jump kind
*/
-/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
+/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
{
assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions));
return emitJumpKindInstructions[jumpKind];
@@ -60,7 +58,7 @@ const emitJumpKind emitReverseJumpKinds[] =
* branch instruction with a jump kind!
*/
-/*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins)
+/*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins)
{
for (unsigned i = 0; i < ArrLen(emitJumpKindInstructions); i++)
{
@@ -78,7 +76,7 @@ const emitJumpKind emitReverseJumpKinds[] =
* Reverse the conditional jump
*/
-/*static*/ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
+/*static*/ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
{
assert(jumpKind < EJ_COUNT);
return emitReverseJumpKinds[jumpKind];
@@ -89,48 +87,46 @@ const emitJumpKind emitReverseJumpKinds[] =
* Return the allocated size (in bytes) of the given instruction descriptor.
*/
-size_t emitter::emitSizeOfInsDsc(instrDesc *id)
+size_t emitter::emitSizeOfInsDsc(instrDesc* id)
{
- assert (!emitIsTinyInsDsc(id));
+ assert(!emitIsTinyInsDsc(id));
- if (emitIsScnsInsDsc(id))
+ if (emitIsScnsInsDsc(id))
return SMALL_IDSC_SIZE;
assert((unsigned)id->idInsFmt() < emitFmtCount);
- ID_OPS idOp = (ID_OPS) emitFmtToOps[id->idInsFmt()];
- bool isCallIns = (id->idIns() == INS_bl)
- || (id->idIns() == INS_blr)
- || (id->idIns() == INS_b_tail)
- || (id->idIns() == INS_br_tail);
- bool maybeCallIns = (id->idIns() == INS_b) || (id->idIns() == INS_br);
+ ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()];
+ bool isCallIns = (id->idIns() == INS_bl) || (id->idIns() == INS_blr) || (id->idIns() == INS_b_tail) ||
+ (id->idIns() == INS_br_tail);
+ bool maybeCallIns = (id->idIns() == INS_b) || (id->idIns() == INS_br);
switch (idOp)
{
- case ID_OP_NONE:
- break;
+ case ID_OP_NONE:
+ break;
- case ID_OP_JMP:
- return sizeof(instrDescJmp);
+ case ID_OP_JMP:
+ return sizeof(instrDescJmp);
- case ID_OP_CALL:
- assert(isCallIns || maybeCallIns);
- if (id->idIsLargeCall())
- {
- /* Must be a "fat" call descriptor */
- return sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
- return sizeof(instrDesc);
- }
- break;
+ case ID_OP_CALL:
+ assert(isCallIns || maybeCallIns);
+ if (id->idIsLargeCall())
+ {
+ /* Must be a "fat" call descriptor */
+ return sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
+ return sizeof(instrDesc);
+ }
+ break;
- default:
- NO_WAY("unexpected instruction descriptor format");
- break;
+ default:
+ NO_WAY("unexpected instruction descriptor format");
+ break;
}
if (id->idIsLargeCns())
@@ -149,769 +145,769 @@ size_t emitter::emitSizeOfInsDsc(instrDesc *id)
}
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* The following called for each recorded instruction -- use for debugging.
*/
-void emitter::emitInsSanityCheck(instrDesc *id)
+void emitter::emitInsSanityCheck(instrDesc* id)
{
/* What instruction format have we got? */
switch (id->idInsFmt())
{
instruction ins;
- emitAttr elemsize;
- emitAttr datasize;
- emitAttr dstsize;
- emitAttr srcsize;
- ssize_t imm;
- unsigned immShift;
- ssize_t index;
- ssize_t index2;
-
- case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- break;
+ emitAttr elemsize;
+ emitAttr datasize;
+ emitAttr dstsize;
+ emitAttr srcsize;
+ ssize_t imm;
+ unsigned immShift;
+ ssize_t index;
+ ssize_t index2;
+
+ case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ break;
- case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiiii.... simm19:00
- break;
+ case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiiii.... simm19:00
+ break;
- case IF_LARGEJMP:
- case IF_LARGEADR:
- case IF_LARGELDC:
- break;
+ case IF_LARGEJMP:
+ case IF_LARGEADR:
+ case IF_LARGELDC:
+ break;
- case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- break;
+ case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ break;
- case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
- assert(isGeneralRegister(id->idReg3()));
- break;
+ case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
+ assert(isGeneralRegister(id->idReg3()));
+ break;
- case IF_LS_1A: // LS_1A .X......iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
- assert(isGeneralRegister(id->idReg1()) ||
- isVectorRegister(id->idReg1()));
- assert(insOptsNone(id->idInsOpt()));
- break;
+ case IF_LS_1A: // LS_1A .X......iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
+ assert(isGeneralRegister(id->idReg1()) || isVectorRegister(id->idReg1()));
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2())); // SP
- assert(emitGetInsSC(id) == 0);
- assert(insOptsNone(id->idInsOpt()));
- break;
+ case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2())); // SP
+ assert(emitGetInsSC(id) == 0);
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2())); // SP
- assert(isValidUimm12(emitGetInsSC(id)));
- assert(insOptsNone(id->idInsOpt()));
- break;
+ case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2())); // SP
+ assert(isValidUimm12(emitGetInsSC(id)));
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2())); // SP
- assert(emitGetInsSC(id) >= -0x100);
- assert(emitGetInsSC(id) < 0x100);
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- break;
+ case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2())); // SP
+ assert(emitGetInsSC(id) >= -0x100);
+ assert(emitGetInsSC(id) < 0x100);
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ break;
- case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2())); // SP
- if (id->idIsLclVar())
- {
- assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
- }
- else
- {
- assert(isGeneralRegister(id->idReg3()));
- }
- assert(insOptsLSExtend(id->idInsOpt()));
- break;
+ case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2())); // SP
+ if (id->idIsLclVar())
+ {
+ assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
+ }
+ else
+ {
+ assert(isGeneralRegister(id->idReg3()));
+ }
+ assert(insOptsLSExtend(id->idInsOpt()));
+ break;
- case IF_LS_3B: // LS_3B X............... .aaaaannnnnttttt Rt Ra Rn
- assert((isValidGeneralDatasize(id->idOpSize()) && isIntegerRegister(id->idReg1())) ||
- (isValidVectorLSPDatasize(id->idOpSize()) && isVectorRegister(id->idReg1())));
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2()) || // ZR
- isVectorRegister(id->idReg2()));
- assert(isIntegerRegister(id->idReg3())); // SP
- assert(emitGetInsSC(id) == 0);
- assert(insOptsNone(id->idInsOpt()));
- break;
+ case IF_LS_3B: // LS_3B X............... .aaaaannnnnttttt Rt Ra Rn
+ assert((isValidGeneralDatasize(id->idOpSize()) && isIntegerRegister(id->idReg1())) ||
+ (isValidVectorLSPDatasize(id->idOpSize()) && isVectorRegister(id->idReg1())));
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2()) || // ZR
+ isVectorRegister(id->idReg2()));
+ assert(isIntegerRegister(id->idReg3())); // SP
+ assert(emitGetInsSC(id) == 0);
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnttttt Rt Ra Rn imm(im7,sh)
- assert((isValidGeneralDatasize(id->idOpSize()) && isIntegerRegister(id->idReg1())) ||
- (isValidVectorLSPDatasize(id->idOpSize()) && isVectorRegister(id->idReg1())));
- assert(isIntegerRegister(id->idReg1()) || // ZR
- isVectorRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2()) || // ZR
- isVectorRegister(id->idReg2()));
- assert(isIntegerRegister(id->idReg3())); // SP
- assert(emitGetInsSC(id) >= -0x40);
- assert(emitGetInsSC(id) < 0x40);
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- break;
+ case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnttttt Rt Ra Rn imm(im7,sh)
+ assert((isValidGeneralDatasize(id->idOpSize()) && isIntegerRegister(id->idReg1())) ||
+ (isValidVectorLSPDatasize(id->idOpSize()) && isVectorRegister(id->idReg1())));
+ assert(isIntegerRegister(id->idReg1()) || // ZR
+ isVectorRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2()) || // ZR
+ isVectorRegister(id->idReg2()));
+ assert(isIntegerRegister(id->idReg3())); // SP
+ assert(emitGetInsSC(id) >= -0x40);
+ assert(emitGetInsSC(id) < 0x40);
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ break;
- case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidUimm12(emitGetInsSC(id)));
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
- break;
+ case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidUimm12(emitGetInsSC(id)));
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
+ break;
- case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidImmHWVal(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidImmHWVal(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
- assert(isGeneralRegister(id->idReg1()));
- break;
+ case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+ assert(isGeneralRegister(id->idReg1()));
+ break;
- case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidImmCondFlagsImm5(emitGetInsSC(id)));
- break;
+ case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidImmCondFlagsImm5(emitGetInsSC(id)));
+ break;
- case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isIntegerRegister(id->idReg2())); // SP
- assert(isValidUimm12(emitGetInsSC(id)));
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
- break;
+ case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isIntegerRegister(id->idReg2())); // SP
+ assert(isValidUimm12(emitGetInsSC(id)));
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
+ break;
- case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
- break;
+ case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmNRS(emitGetInsSC(id), id->idOpSize()));
+ break;
- case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isValidImmCond(emitGetInsSC(id)));
- break;
+ case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isValidImmCond(emitGetInsSC(id)));
+ break;
- case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // ZR
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- if (!insOptsNone(id->idInsOpt()))
- {
- if (id->idIns() == INS_tst) // tst allows ROR, cmp/cmn don't
+ case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // ZR
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ if (!insOptsNone(id->idInsOpt()))
{
- assert(insOptsAnyShift(id->idInsOpt()));
+ if (id->idIns() == INS_tst) // tst allows ROR, cmp/cmn don't
+ {
+ assert(insOptsAnyShift(id->idInsOpt()));
+ }
+ else
+ {
+ assert(insOptsAluShift(id->idInsOpt()));
+ }
}
- else
+ assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
+ break;
+
+ case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isGeneralRegister(id->idReg2()));
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL(id->idInsOpt()) || insOptsAnyExtend(id->idInsOpt()));
+ assert(emitGetInsSC(id) >= 0);
+ assert(emitGetInsSC(id) <= 4);
+ if (insOptsLSL(id->idInsOpt()))
{
- assert(insOptsAluShift(id->idInsOpt()));
+ assert(emitGetInsSC(id) > 0);
}
- }
- assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
- break;
+ break;
- case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isGeneralRegister(id->idReg2()));
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL(id->idInsOpt()) || insOptsAnyExtend(id->idInsOpt()));
- assert(emitGetInsSC(id) >= 0);
- assert(emitGetInsSC(id) <= 4);
- if (insOptsLSL(id->idInsOpt()))
- {
- assert(emitGetInsSC(id) > 0);
- }
- break;
+ case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnmmmmm Rd Rn cond
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmCond(emitGetInsSC(id)));
+ break;
- case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnmmmmm Rd Rn cond
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmCond(emitGetInsSC(id)));
- break;
+ case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isIntegerRegister(id->idReg2())); // ZR
+ break;
- case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isIntegerRegister(id->idReg2())); // ZR
- break;
+ case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ assert(insOptsNone(id->idInsOpt()) || insOptsAluShift(id->idInsOpt()));
+ assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
+ break;
- case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- assert(insOptsNone(id->idInsOpt()) || insOptsAluShift(id->idInsOpt()));
- assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
- break;
+ case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rm
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isIntegerRegister(id->idReg2())); // SP
+ break;
- case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rm
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isIntegerRegister(id->idReg2())); // SP
- break;
+ case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isValidImmCondFlags(emitGetInsSC(id)));
+ break;
- case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isValidImmCondFlags(emitGetInsSC(id)));
- break;
+ case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isIntegerRegister(id->idReg2())); // SP
+ if (id->idIsLclVar())
+ {
+ assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
+ }
+ else
+ {
+ assert(isGeneralRegister(id->idReg3()));
+ }
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isIntegerRegister(id->idReg2())); // SP
- if (id->idIsLclVar())
- {
- assert(isGeneralRegister(codeGen->rsGetRsvdReg()));
- }
- else
- {
+ case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
assert(isGeneralRegister(id->idReg3()));
- }
- assert(insOptsNone(id->idInsOpt()));
- break;
-
- case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- assert(insOptsNone(id->idInsOpt()) || insOptsAnyShift(id->idInsOpt()));
- assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
- break;
-
- case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isIntegerRegister(id->idReg1())); // SP
- assert(isIntegerRegister(id->idReg2())); // SP
- assert(isGeneralRegister(id->idReg3()));
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL(id->idInsOpt()) || insOptsAnyExtend(id->idInsOpt()));
- assert(emitGetInsSC(id) >= 0);
- assert(emitGetInsSC(id) <= 4);
- if (insOptsLSL(id->idInsOpt()))
- {
- assert((emitGetInsSC(id) > 0) || (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of zero
- }
- break;
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ assert(insOptsNone(id->idInsOpt()) || insOptsAnyShift(id->idInsOpt()));
+ assert(insOptsNone(id->idInsOpt()) || (emitGetInsSC(id) > 0));
+ break;
- case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnmmmmm Rd Rn Rm cond
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(isValidImmCond(emitGetInsSC(id)));
- break;
+ case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isIntegerRegister(id->idReg1())); // SP
+ assert(isIntegerRegister(id->idReg2())); // SP
+ assert(isGeneralRegister(id->idReg3()));
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL(id->idInsOpt()) || insOptsAnyExtend(id->idInsOpt()));
+ assert(emitGetInsSC(id) >= 0);
+ assert(emitGetInsSC(id) <= 4);
+ if (insOptsLSL(id->idInsOpt()))
+ {
+ assert((emitGetInsSC(id) > 0) ||
+ (id->idReg2() == REG_ZR)); // REG_ZR encodes SP and we allow a shift of zero
+ }
+ break;
- case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
- assert(insOptsNone(id->idInsOpt()));
- break;
+ case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnmmmmm Rd Rn Rm cond
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(isValidImmCond(emitGetInsSC(id)));
+ break;
- case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isGeneralRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- assert(isGeneralRegister(id->idReg3()));
- assert(isGeneralRegister(id->idReg4()));
- break;
+ case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(isValidImmShift(emitGetInsSC(id), id->idOpSize()));
+ assert(insOptsNone(id->idInsOpt()));
+ break;
- case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
- assert(insOptsNone(id->idInsOpt()));
- elemsize = id->idOpSize();
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(isVectorRegister(id->idReg1()));
- assert(isValidUimm8(emitGetInsSC(id)));
- break;
+ case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ assert(isGeneralRegister(id->idReg3()));
+ assert(isGeneralRegister(id->idReg4()));
+ break;
- case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
- ins = id->idIns();
- imm = emitGetInsSC(id) & 0x0ff;
- immShift = (emitGetInsSC(id) & 0x700) >> 8;
- assert(immShift >= 0);
- datasize = id->idOpSize();
- assert(isValidVectorDatasize(datasize));
- assert(isValidArrangement(datasize, id->idInsOpt()));
- elemsize = optGetElemsize(id->idInsOpt());
- if (ins == INS_fmov)
- {
+ case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
+ assert(insOptsNone(id->idInsOpt()));
+ elemsize = id->idOpSize();
assert(isValidVectorElemsizeFloat(elemsize));
- assert(id->idInsOpt() != INS_OPTS_1D); // Reserved encoding
- assert(immShift == 0);
- }
- else
- {
- assert(isValidVectorElemsize(elemsize));
- assert((immShift != 4) && (immShift != 7)); // always invalid values
- if (ins != INS_movi) // INS_mvni, INS_orr, INS_bic
+ assert(isVectorRegister(id->idReg1()));
+ assert(isValidUimm8(emitGetInsSC(id)));
+ break;
+
+ case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
+ ins = id->idIns();
+ imm = emitGetInsSC(id) & 0x0ff;
+ immShift = (emitGetInsSC(id) & 0x700) >> 8;
+ assert(immShift >= 0);
+ datasize = id->idOpSize();
+ assert(isValidVectorDatasize(datasize));
+ assert(isValidArrangement(datasize, id->idInsOpt()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ if (ins == INS_fmov)
{
- assert((elemsize != EA_1BYTE) && (elemsize != EA_8BYTE)); // only H or S
- if (elemsize == EA_2BYTE)
- {
- assert(immShift < 2);
- }
- else // (elemsize == EA_4BYTE)
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(id->idInsOpt() != INS_OPTS_1D); // Reserved encoding
+ assert(immShift == 0);
+ }
+ else
+ {
+ assert(isValidVectorElemsize(elemsize));
+ assert((immShift != 4) && (immShift != 7)); // always invalid values
+ if (ins != INS_movi) // INS_mvni, INS_orr, INS_bic
{
- if (ins != INS_mvni)
+ assert((elemsize != EA_1BYTE) && (elemsize != EA_8BYTE)); // only H or S
+ if (elemsize == EA_2BYTE)
{
- assert(immShift < 4);
+ assert(immShift < 2);
+ }
+ else // (elemsize == EA_4BYTE)
+ {
+ if (ins != INS_mvni)
+ {
+ assert(immShift < 4);
+ }
}
}
}
- }
- assert(isVectorRegister(id->idReg1()));
- assert(isValidUimm8(imm));
- break;
+ assert(isVectorRegister(id->idReg1()));
+ assert(isValidUimm8(imm));
+ break;
- case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
- assert(insOptsNone(id->idInsOpt()));
- elemsize = id->idOpSize();
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(isVectorRegister(id->idReg1()));
- break;
+ case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
+ assert(insOptsNone(id->idInsOpt()));
+ elemsize = id->idOpSize();
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(isVectorRegister(id->idReg1()));
+ break;
- case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
- case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
+ case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
- assert(id->idOpSize() == EA_8BYTE);
- assert(insOptsNone(id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isValidImmShift(emitGetInsSC(id), EA_8BYTE));
- break;
+ case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
+ assert(id->idOpSize() == EA_8BYTE);
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isValidImmShift(emitGetInsSC(id), EA_8BYTE));
+ break;
- case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- elemsize = optGetElemsize(id->idInsOpt());
- assert(isValidImmShift(emitGetInsSC(id), elemsize));
- break;
+ case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ assert(isValidImmShift(emitGetInsSC(id), elemsize));
+ break;
- case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
- elemsize = id->idOpSize();
- index = emitGetInsSC(id);
- assert(insOptsNone(id->idInsOpt()));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
- assert(isValidVectorElemsize(elemsize));
- assert(isGeneralRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
+ elemsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
+ assert(isValidVectorElemsize(elemsize));
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
- if (id->idIns() == INS_dup)
- {
+ case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
+ if (id->idIns() == INS_dup)
+ {
+ datasize = id->idOpSize();
+ assert(isValidVectorDatasize(datasize));
+ assert(isValidArrangement(datasize, id->idInsOpt()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ }
+ else // INS_ins
+ {
+ datasize = EA_16BYTE;
+ elemsize = id->idOpSize();
+ assert(isValidVectorElemsize(elemsize));
+ }
+ assert(isVectorRegister(id->idReg1()));
+ assert(isGeneralRegisterOrZR(id->idReg2()));
+ break;
+
+ case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
datasize = id->idOpSize();
assert(isValidVectorDatasize(datasize));
assert(isValidArrangement(datasize, id->idInsOpt()));
elemsize = optGetElemsize(id->idInsOpt());
- }
- else // INS_ins
- {
- datasize = EA_16BYTE;
+ index = emitGetInsSC(id);
+ assert(isValidVectorIndex(datasize, elemsize, index));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
+
+ case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
elemsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
assert(isValidVectorElemsize(elemsize));
- }
- assert(isVectorRegister(id->idReg1()));
- assert(isGeneralRegisterOrZR(id->idReg2()));
- break;
-
- case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
- datasize = id->idOpSize();
- assert(isValidVectorDatasize(datasize));
- assert(isValidArrangement(datasize, id->idInsOpt()));
- elemsize = optGetElemsize(id->idInsOpt());
- index = emitGetInsSC(id);
- assert(isValidVectorIndex(datasize, elemsize, index));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
-
- case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
- elemsize = id->idOpSize();
- index = emitGetInsSC(id);
- assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
- assert(isValidVectorElemsize(elemsize));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
- imm = emitGetInsSC(id);
- index = (imm >> 4) & 0xf;
- index2 = imm & 0xf;
- elemsize = id->idOpSize();
- assert(isValidVectorElemsize(elemsize));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, index2));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
+ imm = emitGetInsSC(id);
+ index = (imm >> 4) & 0xf;
+ index2 = imm & 0xf;
+ elemsize = id->idOpSize();
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, index));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, index2));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
- assert(id->idOpSize() == EA_8BYTE); // only type D is supported
- __fallthrough;
+ case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
+ assert(id->idOpSize() == EA_8BYTE); // only type D is supported
+ __fallthrough;
- case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
- case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
- assert(insOptsNone(id->idInsOpt()));
- assert(isValidVectorElemsizeFloat(id->idOpSize()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
+ case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isValidVectorElemsizeFloat(id->idOpSize()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov/fcvtXX - to general)
- assert(insOptsConvertFloatToInt(id->idInsOpt()));
- dstsize = optGetDstsize(id->idInsOpt());
- srcsize = optGetSrcsize(id->idInsOpt());
- assert(isValidGeneralDatasize(dstsize));
- assert(isValidVectorElemsizeFloat(srcsize));
- assert(dstsize == id->idOpSize());
- assert(isGeneralRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov/fcvtXX - to general)
+ assert(insOptsConvertFloatToInt(id->idInsOpt()));
+ dstsize = optGetDstsize(id->idInsOpt());
+ srcsize = optGetSrcsize(id->idInsOpt());
+ assert(isValidGeneralDatasize(dstsize));
+ assert(isValidVectorElemsizeFloat(srcsize));
+ assert(dstsize == id->idOpSize());
+ assert(isGeneralRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov/Xcvtf - from general)
- assert(insOptsConvertIntToFloat(id->idInsOpt()));
- dstsize = optGetDstsize(id->idInsOpt());
- srcsize = optGetSrcsize(id->idInsOpt());
- assert(isValidGeneralDatasize(srcsize));
- assert(isValidVectorElemsizeFloat(dstsize));
- assert(dstsize == id->idOpSize());
- assert(isVectorRegister(id->idReg1()));
- assert(isGeneralRegister(id->idReg2()));
- break;
+ case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov/Xcvtf - from general)
+ assert(insOptsConvertIntToFloat(id->idInsOpt()));
+ dstsize = optGetDstsize(id->idInsOpt());
+ srcsize = optGetSrcsize(id->idInsOpt());
+ assert(isValidGeneralDatasize(srcsize));
+ assert(isValidVectorElemsizeFloat(dstsize));
+ assert(dstsize == id->idOpSize());
+ assert(isVectorRegister(id->idReg1()));
+ assert(isGeneralRegister(id->idReg2()));
+ break;
- case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
- assert(insOptsConvertFloatToFloat(id->idInsOpt()));
- dstsize = optGetDstsize(id->idInsOpt());
- srcsize = optGetSrcsize(id->idInsOpt());
- assert(isValidVectorFcvtsize(srcsize));
- assert(isValidVectorFcvtsize(dstsize));
- assert(dstsize == id->idOpSize());
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- break;
+ case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
+ assert(insOptsConvertFloatToFloat(id->idInsOpt()));
+ dstsize = optGetDstsize(id->idInsOpt());
+ srcsize = optGetSrcsize(id->idInsOpt());
+ assert(isValidVectorFcvtsize(srcsize));
+ assert(isValidVectorFcvtsize(dstsize));
+ assert(dstsize == id->idOpSize());
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ break;
- case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- elemsize = optGetElemsize(id->idInsOpt());
- ins = id->idIns();
- if (ins == INS_mul)
- {
- assert(elemsize != EA_8BYTE); // can't use 2D or 1D
- }
- else if (ins == INS_pmul)
- {
- assert(elemsize == EA_1BYTE); // only supports 8B or 16B
- }
- break;
+ case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ ins = id->idIns();
+ if (ins == INS_mul)
+ {
+ assert(elemsize != EA_8BYTE); // can't use 2D or 1D
+ }
+ else if (ins == INS_pmul)
+ {
+ assert(elemsize == EA_1BYTE); // only supports 8B or 16B
+ }
+ break;
- case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- elemsize = optGetElemsize(id->idInsOpt());
- assert(isValidVectorIndex(EA_16BYTE, elemsize, emitGetInsSC(id)));
- // Only has encodings for H or S elemsize
- assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE));
- break;
+ case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, emitGetInsSC(id)));
+ // Only has encodings for H or S elemsize
+ assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE));
+ break;
- case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- break;
+ case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ break;
- case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- elemsize = optGetElemsize(id->idInsOpt());
- assert(isValidVectorIndex(id->idOpSize(), elemsize, emitGetInsSC(id)));
- break;
+ case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ assert(isValidVectorIndex(id->idOpSize(), elemsize, emitGetInsSC(id)));
+ break;
- case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- assert(isValidVectorDatasize(id->idOpSize()));
- assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- break;
+ case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ assert(isValidVectorDatasize(id->idOpSize()));
+ assert(isValidArrangement(id->idOpSize(), id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ break;
- case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- assert(isValidScalarDatasize(id->idOpSize()));
- assert(insOptsNone(id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- break;
+ case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ assert(isValidScalarDatasize(id->idOpSize()));
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ break;
- case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
- assert(isValidScalarDatasize(id->idOpSize()));
- assert(insOptsNone(id->idInsOpt()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- elemsize = id->idOpSize();
- assert(isValidVectorIndex(EA_16BYTE, elemsize, emitGetInsSC(id)));
- break;
+ case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
+ assert(isValidScalarDatasize(id->idOpSize()));
+ assert(insOptsNone(id->idInsOpt()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ elemsize = id->idOpSize();
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, emitGetInsSC(id)));
+ break;
- case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- assert(insOptsNone(id->idInsOpt()));
- assert(id->idOpSize() == EA_8BYTE);
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- break;
+ case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idOpSize() == EA_8BYTE);
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ break;
- case IF_DV_4A: // DR_4A .........X.mmmmm .aaaaannnnnddddd Rd Rn Rm Ra (scalar)
- assert(isValidGeneralDatasize(id->idOpSize()));
- assert(isVectorRegister(id->idReg1()));
- assert(isVectorRegister(id->idReg2()));
- assert(isVectorRegister(id->idReg3()));
- assert(isVectorRegister(id->idReg4()));
- break;
+ case IF_DV_4A: // DR_4A .........X.mmmmm .aaaaannnnnddddd Rd Rn Rm Ra (scalar)
+ assert(isValidGeneralDatasize(id->idOpSize()));
+ assert(isVectorRegister(id->idReg1()));
+ assert(isVectorRegister(id->idReg2()));
+ assert(isVectorRegister(id->idReg3()));
+ assert(isVectorRegister(id->idReg4()));
+ break;
- case IF_SN_0A: // SN_0A ................ ................
- case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
- case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
- break;
+ case IF_SN_0A: // SN_0A ................ ................
+ case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
+ case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
+ break;
- default:
- printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
- assert(!"Unexpected format");
- break;
+ default:
+ printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
+ assert(!"Unexpected format");
+ break;
}
}
-#endif // DEBUG
+#endif // DEBUG
-bool emitter::emitInsMayWriteToGCReg(instrDesc *id)
+bool emitter::emitInsMayWriteToGCReg(instrDesc* id)
{
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
switch (fmt)
{
- // These are the formats with "destination" registers:
+ // These are the formats with "destination" registers:
- case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
- case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
- case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+ case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
+ case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
+ case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
- case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
- case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
- case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
- case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
+ case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
+ case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
+ case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
+ case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
- case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
+ case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
- case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
- case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
- case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
- case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rn
- case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
+ case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
+ case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
+ case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
+ case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rn
+ case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
- case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnddddd Rd Rn Rm
- case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
- case IF_DR_3C: // DR_3C X..........mmmmm xxxsssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
- case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
- case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
+ case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnddddd Rd Rn Rm
+ case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
+ case IF_DR_3C: // DR_3C X..........mmmmm xxxsssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
+ case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
+ case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
- case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
+ case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
- case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov - to general)
- case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov - to general)
+ case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov - to general)
+ case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov - to general)
- return true;
+ return true;
- case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
- case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
- case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
- case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
- case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
- case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov - from general)
- case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
- case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
- case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
- case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
- case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector)
- case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
- case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
- // Tracked GC pointers cannot be placed into the SIMD registers.
- return false;
+ case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
+ case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
+ case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
+ case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
+ case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
+ case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov - from general)
+ case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
+ case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
+ case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
+ case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector)
+ case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
+ case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
+ // Tracked GC pointers cannot be placed into the SIMD registers.
+ return false;
- // These are the load/store formats with "target" registers:
+ // These are the load/store formats with "target" registers:
- case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
- case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
- case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
- case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiP.nnnnnttttt Rt Rn imm(-256..+255) pre/post inc
- case IF_LS_3A: // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
- case IF_LS_3B: // LS_3B X............... .aaaaannnnnttttt Rt Ra Rn
- case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnttttt Rt Ra Rn imm(im7,sh)
+ case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
+ case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
+ case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
+ case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiP.nnnnnttttt Rt Rn imm(-256..+255) pre/post inc
+ case IF_LS_3A: // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
+ case IF_LS_3B: // LS_3B X............... .aaaaannnnnttttt Rt Ra Rn
+ case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnttttt Rt Ra Rn imm(im7,sh)
- // For the Store instructions the "target" register is actually a "source" value
+ // For the Store instructions the "target" register is actually a "source" value
- if (emitInsIsStore(ins))
- {
- return false;
- }
- else
- {
- assert(emitInsIsLoad(ins));
- return true;
- }
+ if (emitInsIsStore(ins))
+ {
+ return false;
+ }
+ else
+ {
+ assert(emitInsIsLoad(ins));
+ return true;
+ }
- default:
- return false;
+ default:
+ return false;
}
}
-bool emitter::emitInsWritesToLclVarStackLoc(instrDesc *id)
+bool emitter::emitInsWritesToLclVarStackLoc(instrDesc* id)
{
if (!id->idIsLclVar())
return false;
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
// This list is related to the list of instructions used to store local vars in emitIns_S_R().
// We don't accept writing to float local vars.
switch (ins)
{
- case INS_strb:
- case INS_strh:
- case INS_str:
- case INS_stur:
- case INS_sturb:
- case INS_sturh:
- return true;
- default:
- return false;
+ case INS_strb:
+ case INS_strh:
+ case INS_str:
+ case INS_stur:
+ case INS_sturb:
+ case INS_sturh:
+ return true;
+ default:
+ return false;
}
}
-bool emitter::emitInsMayWriteMultipleRegs(instrDesc *id)
+bool emitter::emitInsMayWriteMultipleRegs(instrDesc* id)
{
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
switch (ins)
{
- case INS_ldp:
- case INS_ldpsw:
- case INS_ldnp:
- return true;
- default:
- return false;
+ case INS_ldp:
+ case INS_ldpsw:
+ case INS_ldnp:
+ return true;
+ default:
+ return false;
}
}
-// For the small loads/store instruction we adjust the size 'attr'
+// For the small loads/store instruction we adjust the size 'attr'
// depending upon whether we have a load or a store
//
-emitAttr emitter::emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr)
+emitAttr emitter::emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr)
{
if (EA_SIZE(attr) <= EA_4BYTE)
{
if (emitInsIsLoad(ins))
{
- // The value of 'ins' encodes the size to load
+ // The value of 'ins' encodes the size to load
// we use EA_8BYTE here because it is the size we will write (into dataReg)
// it is also required when ins is INS_ldrsw
//
@@ -921,9 +917,9 @@ emitAttr emitter::emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr)
{
assert(emitInsIsStore(ins));
- // The value of 'ins' encodes the size to store
+ // The value of 'ins' encodes the size to store
// we use EA_4BYTE here because it is the size of the register
- // that we want to display when storing small values
+ // that we want to display when storing small values
//
attr = EA_4BYTE;
}
@@ -931,74 +927,74 @@ emitAttr emitter::emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr)
return attr;
}
-// Takes an instrDesc 'id' and uses the instruction 'ins' to determine the
+// Takes an instrDesc 'id' and uses the instruction 'ins' to determine the
// size of the target register that is written or read by the instruction.
-// Note that even if EA_4BYTE is returned a load instruction will still
+// Note that even if EA_4BYTE is returned a load instruction will still
// always zero the upper 4 bytes of the target register.
// This method is required so that we can distinguish between loads that are
-// sign-extending as they can have two different sizes for their target register.
-// Additionally for instructions like 'ldr' and 'str' these can load/store
+// sign-extending as they can have two different sizes for their target register.
+// Additionally for instructions like 'ldr' and 'str' these can load/store
// either 4 byte or 8 bytes to/from the target register.
// By convention the small unsigned load instructions are considered to write
-// a 4 byte sized target register, though since these also zero the upper 4 bytes
+// a 4 byte sized target register, though since these also zero the upper 4 bytes
// they could equally be considered to write the unsigned value to full 8 byte register.
//
-emitAttr emitter::emitInsTargetRegSize(instrDesc *id)
+emitAttr emitter::emitInsTargetRegSize(instrDesc* id)
{
- instruction ins = id->idIns();
- emitAttr result = EA_UNKNOWN;
+ instruction ins = id->idIns();
+ emitAttr result = EA_UNKNOWN;
// This is used to determine the size of the target registers for a load/store instruction
switch (ins)
{
- case INS_ldrb:
- case INS_strb:
- case INS_ldurb:
- case INS_sturb:
- result = EA_4BYTE;
- break;
-
- case INS_ldrh:
- case INS_strh:
- case INS_ldurh:
- case INS_sturh:
- result = EA_4BYTE;
- break;
+ case INS_ldrb:
+ case INS_strb:
+ case INS_ldurb:
+ case INS_sturb:
+ result = EA_4BYTE;
+ break;
- case INS_ldrsb:
- case INS_ldursb:
- case INS_ldrsh:
- case INS_ldursh:
- if (id->idOpSize() == EA_8BYTE)
- result = EA_8BYTE;
- else
+ case INS_ldrh:
+ case INS_strh:
+ case INS_ldurh:
+ case INS_sturh:
result = EA_4BYTE;
- break;
+ break;
- case INS_ldrsw:
- case INS_ldursw:
- case INS_ldpsw:
- result = EA_8BYTE;
- break;
+ case INS_ldrsb:
+ case INS_ldursb:
+ case INS_ldrsh:
+ case INS_ldursh:
+ if (id->idOpSize() == EA_8BYTE)
+ result = EA_8BYTE;
+ else
+ result = EA_4BYTE;
+ break;
- case INS_ldp:
- case INS_stp:
- case INS_ldnp:
- case INS_stnp:
- result = id->idOpSize();
- break;
+ case INS_ldrsw:
+ case INS_ldursw:
+ case INS_ldpsw:
+ result = EA_8BYTE;
+ break;
- case INS_ldr:
- case INS_str:
- case INS_ldur:
- case INS_stur:
- result = id->idOpSize();
- break;
+ case INS_ldp:
+ case INS_stp:
+ case INS_ldnp:
+ case INS_stnp:
+ result = id->idOpSize();
+ break;
- default:
- NO_WAY("unexpected instruction");
- break;
+ case INS_ldr:
+ case INS_str:
+ case INS_ldur:
+ case INS_stur:
+ result = id->idOpSize();
+ break;
+
+ default:
+ NO_WAY("unexpected instruction");
+ break;
}
return result;
}
@@ -1006,62 +1002,62 @@ emitAttr emitter::emitInsTargetRegSize(instrDesc *id)
// Takes an instrDesc and uses the instruction to determine the 'size' of the
// data that is loaded from memory.
//
-emitAttr emitter::emitInsLoadStoreSize(instrDesc *id)
+emitAttr emitter::emitInsLoadStoreSize(instrDesc* id)
{
- instruction ins = id->idIns();
- emitAttr result = EA_UNKNOWN;
+ instruction ins = id->idIns();
+ emitAttr result = EA_UNKNOWN;
// The 'result' returned is the 'size' of the data that is loaded from memory.
switch (ins)
{
- case INS_ldrb:
- case INS_strb:
- case INS_ldurb:
- case INS_sturb:
- case INS_ldrsb:
- case INS_ldursb:
- result = EA_1BYTE;
- break;
+ case INS_ldrb:
+ case INS_strb:
+ case INS_ldurb:
+ case INS_sturb:
+ case INS_ldrsb:
+ case INS_ldursb:
+ result = EA_1BYTE;
+ break;
- case INS_ldrh:
- case INS_strh:
- case INS_ldurh:
- case INS_sturh:
- case INS_ldrsh:
- case INS_ldursh:
- result = EA_2BYTE;
- break;
+ case INS_ldrh:
+ case INS_strh:
+ case INS_ldurh:
+ case INS_sturh:
+ case INS_ldrsh:
+ case INS_ldursh:
+ result = EA_2BYTE;
+ break;
- case INS_ldrsw:
- case INS_ldursw:
- case INS_ldpsw:
- result = EA_4BYTE;
- break;
+ case INS_ldrsw:
+ case INS_ldursw:
+ case INS_ldpsw:
+ result = EA_4BYTE;
+ break;
- case INS_ldp:
- case INS_stp:
- case INS_ldnp:
- case INS_stnp:
- result = id->idOpSize();
- break;
+ case INS_ldp:
+ case INS_stp:
+ case INS_ldnp:
+ case INS_stnp:
+ result = id->idOpSize();
+ break;
- case INS_ldr:
- case INS_str:
- case INS_ldur:
- case INS_stur:
- result = id->idOpSize();
- break;
+ case INS_ldr:
+ case INS_str:
+ case INS_ldur:
+ case INS_stur:
+ result = id->idOpSize();
+ break;
- default:
- NO_WAY("unexpected instruction");
- break;
+ default:
+ NO_WAY("unexpected instruction");
+ break;
}
return result;
}
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
// clang-format off
static const char * const xRegNames[] =
@@ -1125,13 +1121,11 @@ static const char * const bRegNames[] =
* Return a string that represents the given register.
*/
-const char * emitter::emitRegName(regNumber reg,
- emitAttr size,
- bool varName)
+const char* emitter::emitRegName(regNumber reg, emitAttr size, bool varName)
{
assert(reg < REG_COUNT);
- const char * rn = nullptr;
+ const char* rn = nullptr;
if (size == EA_8BYTE)
{
@@ -1153,13 +1147,13 @@ const char * emitter::emitRegName(regNumber reg,
}
else if (size == EA_1BYTE)
{
- rn = bRegNames[reg- REG_V0];
+ rn = bRegNames[reg - REG_V0];
}
}
assert(rn != nullptr);
- return rn;
+ return rn;
}
/*****************************************************************************
@@ -1167,13 +1161,13 @@ const char * emitter::emitRegName(regNumber reg,
* Return a string that represents the given register.
*/
-const char * emitter::emitVectorRegName(regNumber reg)
+const char* emitter::emitVectorRegName(regNumber reg)
{
assert((reg >= REG_V0) && (reg <= REG_V31));
- int index = (int) reg - (int) REG_V0;
+ int index = (int)reg - (int)REG_V0;
- return vRegNames[index];
+ return vRegNames[index];
}
#endif // DEBUG
@@ -1182,7 +1176,7 @@ const char * emitter::emitVectorRegName(regNumber reg)
* Returns the base encoding of the given CPU instruction.
*/
-emitter::insFormat emitter::emitInsFormat(instruction ins)
+emitter::insFormat emitter::emitInsFormat(instruction ins)
{
// clang-format off
const static insFormat insFormats[] =
@@ -1201,13 +1195,13 @@ emitter::insFormat emitter::emitInsFormat(instruction ins)
assert(ins < ArrLen(insFormats));
assert((insFormats[ins] != IF_NONE));
- return insFormats[ins];
+ return insFormats[ins];
}
// INST_FP is 1
-#define LD 2
-#define ST 4
-#define CMP 8
+#define LD 2
+#define ST 4
+#define CMP 8
// clang-format off
/*static*/ const BYTE CodeGenInterface::instInfo[] =
@@ -1228,7 +1222,7 @@ emitter::insFormat emitter::emitInsFormat(instruction ins)
* Returns true if the instruction is some kind of compare or test instruction
*/
-bool emitter::emitInsIsCompare(instruction ins)
+bool emitter::emitInsIsCompare(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -1242,7 +1236,7 @@ bool emitter::emitInsIsCompare(instruction ins)
* Returns true if the instruction is some kind of load instruction
*/
-bool emitter::emitInsIsLoad(instruction ins)
+bool emitter::emitInsIsLoad(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -1255,7 +1249,7 @@ bool emitter::emitInsIsLoad(instruction ins)
* Returns true if the instruction is some kind of store instruction
*/
-bool emitter::emitInsIsStore(instruction ins)
+bool emitter::emitInsIsStore(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
@@ -1269,19 +1263,18 @@ bool emitter::emitInsIsStore(instruction ins)
* Returns true if the instruction is some kind of load/store instruction
*/
-bool emitter::emitInsIsLoadOrStore(instruction ins)
+bool emitter::emitInsIsLoadOrStore(instruction ins)
{
// We have pseudo ins like lea which are not included in emitInsLdStTab.
if (ins < ArrLen(CodeGenInterface::instInfo))
- return (CodeGenInterface::instInfo[ins] & (LD|ST)) ? true : false;
+ return (CodeGenInterface::instInfo[ins] & (LD | ST)) ? true : false;
else
return false;
-
}
-#undef LD
-#undef ST
-#undef CMP
+#undef LD
+#undef ST
+#undef CMP
/*****************************************************************************
*
@@ -1392,565 +1385,564 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
};
// clang-format on
- const static insFormat formatEncode9[9] = { IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C,
- IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F };
- const static insFormat formatEncode6A[6] = { IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A, IF_DV_3A,
- IF_DV_3E };
- const static insFormat formatEncode5A[5] = { IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A, IF_LS_1A };
- const static insFormat formatEncode5B[5] = { IF_DV_2G, IF_DV_2H, IF_DV_2I, IF_DV_1A, IF_DV_1B };
- const static insFormat formatEncode5C[5] = { IF_DR_3A, IF_DR_3B, IF_DI_2C, IF_DV_3C, IF_DV_1B };
- const static insFormat formatEncode4A[4] = { IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A };
- const static insFormat formatEncode4B[4] = { IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A };
- const static insFormat formatEncode4C[4] = { IF_DR_2A, IF_DR_2B, IF_DR_2C, IF_DI_1A };
- const static insFormat formatEncode4D[4] = { IF_DV_3B, IF_DV_3D, IF_DV_3BI, IF_DV_3DI };
- const static insFormat formatEncode4E[4] = { IF_DR_3A, IF_DR_3B, IF_DI_2C, IF_DV_3C };
- const static insFormat formatEncode4F[4] = { IF_DR_3A, IF_DR_3B, IF_DV_3C, IF_DV_1B };
- const static insFormat formatEncode4G[4] = { IF_DR_2E, IF_DR_2F, IF_DV_2M, IF_DV_2L };
- const static insFormat formatEncode3A[3] = { IF_DR_3A, IF_DR_3B, IF_DI_2C };
- const static insFormat formatEncode3B[3] = { IF_DR_2A, IF_DR_2B, IF_DI_1C };
- const static insFormat formatEncode3C[3] = { IF_DR_3A, IF_DR_3B, IF_DV_3C };
- const static insFormat formatEncode3D[3] = { IF_DV_2C, IF_DV_2D, IF_DV_2E };
- const static insFormat formatEncode3E[3] = { IF_DV_3B, IF_DV_3BI, IF_DV_3DI };
- const static insFormat formatEncode3F[3] = { IF_DV_2A, IF_DV_2G, IF_DV_2H };
- const static insFormat formatEncode3G[3] = { IF_DV_2A, IF_DV_2G, IF_DV_2I };
- const static insFormat formatEncode3H[3] = { IF_DR_3A, IF_DV_3A, IF_DV_3AI };
- const static insFormat formatEncode3I[3] = { IF_DR_2E, IF_DR_2F, IF_DV_2M };
- const static insFormat formatEncode2A[2] = { IF_DR_2E, IF_DR_2F };
- const static insFormat formatEncode2B[2] = { IF_DR_3A, IF_DR_3B };
- const static insFormat formatEncode2C[2] = { IF_DR_3A, IF_DI_2D };
- const static insFormat formatEncode2D[2] = { IF_DR_3A, IF_DI_2B };
- const static insFormat formatEncode2E[2] = { IF_LS_3B, IF_LS_3C };
- const static insFormat formatEncode2F[2] = { IF_DR_2I, IF_DI_1F };
- const static insFormat formatEncode2G[2] = { IF_DV_3B, IF_DV_3D };
- const static insFormat formatEncode2H[2] = { IF_DV_2C, IF_DV_2F };
- const static insFormat formatEncode2I[2] = { IF_DV_2K, IF_DV_1C };
- const static insFormat formatEncode2J[2] = { IF_DV_2A, IF_DV_2G };
- const static insFormat formatEncode2K[2] = { IF_DV_2M, IF_DV_2L };
- const static insFormat formatEncode2L[2] = { IF_DV_2G, IF_DV_2M };
- const static insFormat formatEncode2M[2] = { IF_DV_3A, IF_DV_3AI };
- const static insFormat formatEncode2N[2] = { IF_DV_2N, IF_DV_2O };
-
- code_t code = BAD_CODE;
- insFormat insFmt = emitInsFormat(ins);
+ const static insFormat formatEncode9[9] = {IF_DR_2E, IF_DR_2G, IF_DI_1B, IF_DI_1D, IF_DV_3C,
+ IF_DV_2B, IF_DV_2C, IF_DV_2E, IF_DV_2F};
+ const static insFormat formatEncode6A[6] = {IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A, IF_DV_3A, IF_DV_3E};
+ const static insFormat formatEncode5A[5] = {IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A, IF_LS_1A};
+ const static insFormat formatEncode5B[5] = {IF_DV_2G, IF_DV_2H, IF_DV_2I, IF_DV_1A, IF_DV_1B};
+ const static insFormat formatEncode5C[5] = {IF_DR_3A, IF_DR_3B, IF_DI_2C, IF_DV_3C, IF_DV_1B};
+ const static insFormat formatEncode4A[4] = {IF_LS_2A, IF_LS_2B, IF_LS_2C, IF_LS_3A};
+ const static insFormat formatEncode4B[4] = {IF_DR_3A, IF_DR_3B, IF_DR_3C, IF_DI_2A};
+ const static insFormat formatEncode4C[4] = {IF_DR_2A, IF_DR_2B, IF_DR_2C, IF_DI_1A};
+ const static insFormat formatEncode4D[4] = {IF_DV_3B, IF_DV_3D, IF_DV_3BI, IF_DV_3DI};
+ const static insFormat formatEncode4E[4] = {IF_DR_3A, IF_DR_3B, IF_DI_2C, IF_DV_3C};
+ const static insFormat formatEncode4F[4] = {IF_DR_3A, IF_DR_3B, IF_DV_3C, IF_DV_1B};
+ const static insFormat formatEncode4G[4] = {IF_DR_2E, IF_DR_2F, IF_DV_2M, IF_DV_2L};
+ const static insFormat formatEncode3A[3] = {IF_DR_3A, IF_DR_3B, IF_DI_2C};
+ const static insFormat formatEncode3B[3] = {IF_DR_2A, IF_DR_2B, IF_DI_1C};
+ const static insFormat formatEncode3C[3] = {IF_DR_3A, IF_DR_3B, IF_DV_3C};
+ const static insFormat formatEncode3D[3] = {IF_DV_2C, IF_DV_2D, IF_DV_2E};
+ const static insFormat formatEncode3E[3] = {IF_DV_3B, IF_DV_3BI, IF_DV_3DI};
+ const static insFormat formatEncode3F[3] = {IF_DV_2A, IF_DV_2G, IF_DV_2H};
+ const static insFormat formatEncode3G[3] = {IF_DV_2A, IF_DV_2G, IF_DV_2I};
+ const static insFormat formatEncode3H[3] = {IF_DR_3A, IF_DV_3A, IF_DV_3AI};
+ const static insFormat formatEncode3I[3] = {IF_DR_2E, IF_DR_2F, IF_DV_2M};
+ const static insFormat formatEncode2A[2] = {IF_DR_2E, IF_DR_2F};
+ const static insFormat formatEncode2B[2] = {IF_DR_3A, IF_DR_3B};
+ const static insFormat formatEncode2C[2] = {IF_DR_3A, IF_DI_2D};
+ const static insFormat formatEncode2D[2] = {IF_DR_3A, IF_DI_2B};
+ const static insFormat formatEncode2E[2] = {IF_LS_3B, IF_LS_3C};
+ const static insFormat formatEncode2F[2] = {IF_DR_2I, IF_DI_1F};
+ const static insFormat formatEncode2G[2] = {IF_DV_3B, IF_DV_3D};
+ const static insFormat formatEncode2H[2] = {IF_DV_2C, IF_DV_2F};
+ const static insFormat formatEncode2I[2] = {IF_DV_2K, IF_DV_1C};
+ const static insFormat formatEncode2J[2] = {IF_DV_2A, IF_DV_2G};
+ const static insFormat formatEncode2K[2] = {IF_DV_2M, IF_DV_2L};
+ const static insFormat formatEncode2L[2] = {IF_DV_2G, IF_DV_2M};
+ const static insFormat formatEncode2M[2] = {IF_DV_3A, IF_DV_3AI};
+ const static insFormat formatEncode2N[2] = {IF_DV_2N, IF_DV_2O};
+
+ code_t code = BAD_CODE;
+ insFormat insFmt = emitInsFormat(ins);
bool encoding_found = false;
- int index = -1;
+ int index = -1;
switch (insFmt)
{
- case IF_EN9:
- for (index=0; index<9; index++)
- {
- if (fmt == formatEncode9[index])
+ case IF_EN9:
+ for (index = 0; index < 9; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode9[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN6A:
- for (index=0; index<6; index++)
- {
- if (fmt == formatEncode6A[index])
+ case IF_EN6A:
+ for (index = 0; index < 6; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode6A[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN5A:
- for (index=0; index<5; index++)
- {
- if (fmt == formatEncode5A[index])
+ case IF_EN5A:
+ for (index = 0; index < 5; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode5A[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN5B:
- for (index=0; index<5; index++)
- {
- if (fmt == formatEncode5B[index])
+ case IF_EN5B:
+ for (index = 0; index < 5; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode5B[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN5C:
- for (index=0; index<5; index++)
- {
- if (fmt == formatEncode5C[index])
+ case IF_EN5C:
+ for (index = 0; index < 5; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode5C[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4A:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4A[index])
+ case IF_EN4A:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4A[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4B:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4B[index])
+ case IF_EN4B:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4B[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4C:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4C[index])
+ case IF_EN4C:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4C[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4D:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4D[index])
+ case IF_EN4D:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4D[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4E:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4E[index])
+ case IF_EN4E:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4E[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4F:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4F[index])
+ case IF_EN4F:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4F[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN4G:
- for (index=0; index<4; index++)
- {
- if (fmt == formatEncode4G[index])
+ case IF_EN4G:
+ for (index = 0; index < 4; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode4G[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3A:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3A[index])
+ case IF_EN3A:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3A[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3B:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3B[index])
+ case IF_EN3B:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3B[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3C:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3C[index])
+ case IF_EN3C:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3C[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3D:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3D[index])
+ case IF_EN3D:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3D[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3E:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3E[index])
+ case IF_EN3E:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3E[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3F:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3F[index])
+ case IF_EN3F:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3F[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3G:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3G[index])
+ case IF_EN3G:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3G[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3H:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3H[index])
+ case IF_EN3H:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3H[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN3I:
- for (index=0; index<3; index++)
- {
- if (fmt == formatEncode3I[index])
+ case IF_EN3I:
+ for (index = 0; index < 3; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode3I[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2A:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2A[index])
+ case IF_EN2A:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2A[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2B:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2B[index])
+ case IF_EN2B:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2B[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2C:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2C[index])
+ case IF_EN2C:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2C[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2D:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2D[index])
+ case IF_EN2D:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2D[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2E:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2E[index])
+ case IF_EN2E:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2E[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2F:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2F[index])
+ case IF_EN2F:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2F[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2G:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2G[index])
+ case IF_EN2G:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2G[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2H:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2H[index])
+ case IF_EN2H:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2H[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2I:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2I[index])
+ case IF_EN2I:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2I[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2J:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2J[index])
+ case IF_EN2J:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2J[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2K:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2K[index])
+ case IF_EN2K:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2K[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2L:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2L[index])
+ case IF_EN2L:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2L[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2M:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2M[index])
+ case IF_EN2M:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2M[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_EN2N:
- for (index=0; index<2; index++)
- {
- if (fmt == formatEncode2N[index])
+ case IF_EN2N:
+ for (index = 0; index < 2; index++)
{
- encoding_found = true;
- break;
+ if (fmt == formatEncode2N[index])
+ {
+ encoding_found = true;
+ break;
+ }
}
- }
- break;
+ break;
- case IF_BI_0A:
- case IF_BI_0B:
- case IF_BI_0C:
- case IF_BI_1A:
- case IF_BI_1B:
- case IF_BR_1A:
- case IF_BR_1B:
- case IF_LS_1A:
- case IF_LS_2A:
- case IF_LS_2B:
- case IF_LS_2C:
- case IF_LS_3A:
- case IF_LS_3B:
- case IF_LS_3C:
- case IF_DI_1A:
- case IF_DI_1B:
- case IF_DI_1C:
- case IF_DI_1D:
- case IF_DI_1E:
- case IF_DI_1F:
- case IF_DI_2A:
- case IF_DI_2B:
- case IF_DI_2C:
- case IF_DI_2D:
- case IF_DR_1D:
- case IF_DR_2A:
- case IF_DR_2B:
- case IF_DR_2C:
- case IF_DR_2D:
- case IF_DR_2E:
- case IF_DR_2F:
- case IF_DR_2G:
- case IF_DR_2H:
- case IF_DR_2I:
- case IF_DR_3A:
- case IF_DR_3B:
- case IF_DR_3C:
- case IF_DR_3D:
- case IF_DR_3E:
- case IF_DR_4A:
- case IF_DV_1A:
- case IF_DV_1B:
- case IF_DV_1C:
- case IF_DV_2A:
- case IF_DV_2B:
- case IF_DV_2C:
- case IF_DV_2D:
- case IF_DV_2E:
- case IF_DV_2F:
- case IF_DV_2G:
- case IF_DV_2H:
- case IF_DV_2I:
- case IF_DV_2J:
- case IF_DV_2K:
- case IF_DV_2L:
- case IF_DV_2M:
- case IF_DV_2N:
- case IF_DV_2O:
- case IF_DV_3A:
- case IF_DV_3AI:
- case IF_DV_3B:
- case IF_DV_3BI:
- case IF_DV_3C:
- case IF_DV_3D:
- case IF_DV_3DI:
- case IF_DV_3E:
- case IF_DV_4A:
- case IF_SN_0A:
- case IF_SI_0A:
- case IF_SI_0B:
-
- index = 0;
- encoding_found = true;
- break;
+ case IF_BI_0A:
+ case IF_BI_0B:
+ case IF_BI_0C:
+ case IF_BI_1A:
+ case IF_BI_1B:
+ case IF_BR_1A:
+ case IF_BR_1B:
+ case IF_LS_1A:
+ case IF_LS_2A:
+ case IF_LS_2B:
+ case IF_LS_2C:
+ case IF_LS_3A:
+ case IF_LS_3B:
+ case IF_LS_3C:
+ case IF_DI_1A:
+ case IF_DI_1B:
+ case IF_DI_1C:
+ case IF_DI_1D:
+ case IF_DI_1E:
+ case IF_DI_1F:
+ case IF_DI_2A:
+ case IF_DI_2B:
+ case IF_DI_2C:
+ case IF_DI_2D:
+ case IF_DR_1D:
+ case IF_DR_2A:
+ case IF_DR_2B:
+ case IF_DR_2C:
+ case IF_DR_2D:
+ case IF_DR_2E:
+ case IF_DR_2F:
+ case IF_DR_2G:
+ case IF_DR_2H:
+ case IF_DR_2I:
+ case IF_DR_3A:
+ case IF_DR_3B:
+ case IF_DR_3C:
+ case IF_DR_3D:
+ case IF_DR_3E:
+ case IF_DR_4A:
+ case IF_DV_1A:
+ case IF_DV_1B:
+ case IF_DV_1C:
+ case IF_DV_2A:
+ case IF_DV_2B:
+ case IF_DV_2C:
+ case IF_DV_2D:
+ case IF_DV_2E:
+ case IF_DV_2F:
+ case IF_DV_2G:
+ case IF_DV_2H:
+ case IF_DV_2I:
+ case IF_DV_2J:
+ case IF_DV_2K:
+ case IF_DV_2L:
+ case IF_DV_2M:
+ case IF_DV_2N:
+ case IF_DV_2O:
+ case IF_DV_3A:
+ case IF_DV_3AI:
+ case IF_DV_3B:
+ case IF_DV_3BI:
+ case IF_DV_3C:
+ case IF_DV_3D:
+ case IF_DV_3DI:
+ case IF_DV_3E:
+ case IF_DV_4A:
+ case IF_SN_0A:
+ case IF_SI_0A:
+ case IF_SI_0B:
+
+ index = 0;
+ encoding_found = true;
+ break;
- default:
+ default:
- encoding_found = false;
- break;
+ encoding_found = false;
+ break;
}
assert(encoding_found);
switch (index)
{
- case 0:
- assert(ins < ArrLen(insCodes1));
- code = insCodes1[ins];
- break;
- case 1:
- assert(ins < ArrLen(insCodes2));
- code = insCodes2[ins];
- break;
- case 2:
- assert(ins < ArrLen(insCodes3));
- code = insCodes3[ins];
- break;
- case 3:
- assert(ins < ArrLen(insCodes4));
- code = insCodes4[ins];
- break;
- case 4:
- assert(ins < ArrLen(insCodes5));
- code = insCodes5[ins];
- break;
- case 5:
- assert(ins < ArrLen(insCodes6));
- code = insCodes6[ins];
- break;
- case 6:
- assert(ins < ArrLen(insCodes7));
- code = insCodes7[ins];
- break;
- case 7:
- assert(ins < ArrLen(insCodes8));
- code = insCodes8[ins];
- break;
- case 8:
- assert(ins < ArrLen(insCodes9));
- code = insCodes9[ins];
- break;
+ case 0:
+ assert(ins < ArrLen(insCodes1));
+ code = insCodes1[ins];
+ break;
+ case 1:
+ assert(ins < ArrLen(insCodes2));
+ code = insCodes2[ins];
+ break;
+ case 2:
+ assert(ins < ArrLen(insCodes3));
+ code = insCodes3[ins];
+ break;
+ case 3:
+ assert(ins < ArrLen(insCodes4));
+ code = insCodes4[ins];
+ break;
+ case 4:
+ assert(ins < ArrLen(insCodes5));
+ code = insCodes5[ins];
+ break;
+ case 5:
+ assert(ins < ArrLen(insCodes6));
+ code = insCodes6[ins];
+ break;
+ case 6:
+ assert(ins < ArrLen(insCodes7));
+ code = insCodes7[ins];
+ break;
+ case 7:
+ assert(ins < ArrLen(insCodes8));
+ code = insCodes8[ins];
+ break;
+ case 8:
+ assert(ins < ArrLen(insCodes9));
+ code = insCodes9[ins];
+ break;
}
-
+
assert((code != BAD_CODE));
- return code;
+ return code;
}
-// true if this 'imm' can be encoded as a input operand to a mov instruction
+// true if this 'imm' can be encoded as a input operand to a mov instruction
/*static*/ bool emitter::emitIns_valid_imm_for_mov(INT64 imm, emitAttr size)
{
// Check for "MOV (wide immediate)".
@@ -1970,7 +1962,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return false;
}
-// true if this 'imm' can be encoded as a input operand to a vector movi instruction
+// true if this 'imm' can be encoded as a input operand to a vector movi instruction
/*static*/ bool emitter::emitIns_valid_imm_for_movi(INT64 imm, emitAttr elemsize)
{
if (elemsize == EA_8BYTE)
@@ -1996,7 +1988,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// First try the standard 'byteShifted immediate' imm(i8,bySh)
if (canEncodeByteShiftedImm(imm, elemsize, true))
return true;
-
+
// Next try the ones-complement form of the 'immediate' imm(i8,bySh)
ssize_t notOfImm = NOT_helper(imm, getBitWidth(elemsize));
if (canEncodeByteShiftedImm(notOfImm, elemsize, true))
@@ -2005,33 +1997,33 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return false;
}
-// true if this 'imm' can be encoded as a input operand to a fmov instruction
+// true if this 'imm' can be encoded as a input operand to a fmov instruction
/*static*/ bool emitter::emitIns_valid_imm_for_fmov(double immDbl)
{
if (canEncodeFloatImm8(immDbl))
return true;
-
+
return false;
}
-// true if this 'imm' can be encoded as a input operand to an add instruction
+// true if this 'imm' can be encoded as a input operand to an add instruction
/*static*/ bool emitter::emitIns_valid_imm_for_add(INT64 imm, emitAttr size)
{
if (unsigned_abs(imm) <= 0x0fff)
return true;
- else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
+ else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
return true;
-
+
return false;
}
-// true if this 'imm' can be encoded as a input operand to an non-add/sub alu instruction
+// true if this 'imm' can be encoded as a input operand to an non-add/sub alu instruction
/*static*/ bool emitter::emitIns_valid_imm_for_cmp(INT64 imm, emitAttr size)
{
return emitIns_valid_imm_for_add(imm, size);
}
-// true if this 'imm' can be encoded as a input operand to an non-add/sub alu instruction
+// true if this 'imm' can be encoded as a input operand to an non-add/sub alu instruction
/*static*/ bool emitter::emitIns_valid_imm_for_alu(INT64 imm, emitAttr size)
{
if (canEncodeBitMaskImm(imm, size))
@@ -2040,26 +2032,26 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return false;
}
-// true if this 'imm' can be encoded as the offset in a ldr/str instruction
+// true if this 'imm' can be encoded as the offset in a ldr/str instruction
/*static*/ bool emitter::emitIns_valid_imm_for_ldst_offset(INT64 imm, emitAttr attr)
{
if (imm == 0)
- return true; // Encodable using IF_LS_2A
+ return true; // Encodable using IF_LS_2A
if ((imm >= -256) && (imm <= 255))
- return true; // Encodable using IF_LS_2C (or possibly IF_LS_2B)
+ return true; // Encodable using IF_LS_2C (or possibly IF_LS_2B)
if (imm < 0)
- return false; // not encodable
+ return false; // not encodable
- emitAttr size = EA_SIZE(attr);
- unsigned scale = NaturalScale_helper(size);
- ssize_t mask = size - 1; // the mask of low bits that must be zero to encode the immediate
+ emitAttr size = EA_SIZE(attr);
+ unsigned scale = NaturalScale_helper(size);
+ ssize_t mask = size - 1; // the mask of low bits that must be zero to encode the immediate
if (((imm & mask) == 0) && ((imm >> scale) < 0x1000))
- return true; // Encodable using IF_LS_2B
+ return true; // Encodable using IF_LS_2B
- return false; // not encodable
+ return false; // not encodable
}
/************************************************************************
@@ -2069,25 +2061,24 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
/*static*/ unsigned emitter::NaturalScale_helper(emitAttr size)
{
- assert(size == EA_1BYTE || size == EA_2BYTE || size == EA_4BYTE ||
- size == EA_8BYTE || size == EA_16BYTE);
-
+ assert(size == EA_1BYTE || size == EA_2BYTE || size == EA_4BYTE || size == EA_8BYTE || size == EA_16BYTE);
+
unsigned result = 0;
- unsigned utemp = (unsigned) size;
-
+ unsigned utemp = (unsigned)size;
+
// Compute log base 2 of utemp (aka 'size')
while (utemp > 1)
{
result++;
utemp >>= 1;
}
-
+
return result;
}
/************************************************************************
*
- * A helper method to perform a Rotate-Right shift operation
+ * A helper method to perform a Rotate-Right shift operation
* the source is 'value' and it is rotated right by 'sh' bits
* 'value' is considered to be a fixed size 'width' set of bits.
*
@@ -2096,26 +2087,26 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* result is '11000011'
*/
-/*static*/ UINT64 emitter::ROR_helper(UINT64 value, unsigned sh, unsigned width)
+/*static*/ UINT64 emitter::ROR_helper(UINT64 value, unsigned sh, unsigned width)
{
assert(width <= 64);
// Check that 'value' fits in 'width' bits
assert((width == 64) || (value < (1ULL << width)));
- // We don't support shifts >= width
+ // We don't support shifts >= width
assert(sh < width);
UINT64 result;
unsigned rsh = sh;
- unsigned lsh = width-rsh;
+ unsigned lsh = width - rsh;
- result = (value >> rsh);
+ result = (value >> rsh);
result |= (value << lsh);
if (width < 64)
{
// mask off any extra bits that we got from the left shift
- result &= ((1ULL << width) - 1);
+ result &= ((1ULL << width) - 1);
}
return result;
}
@@ -2129,7 +2120,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* result is '10110100'
*/
-/*static*/ UINT64 emitter::NOT_helper(UINT64 value, unsigned width)
+/*static*/ UINT64 emitter::NOT_helper(UINT64 value, unsigned width)
{
assert(width <= 64);
@@ -2138,12 +2129,11 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (width < 64)
{
// Check that 'value' fits in 'width' bits. Don't consider "sign" bits above width.
- UINT64 maxVal = 1ULL << width;
- UINT64 lowBitsMask = maxVal - 1;
+ UINT64 maxVal = 1ULL << width;
+ UINT64 lowBitsMask = maxVal - 1;
UINT64 signBitsMask = ~lowBitsMask | (1ULL << (width - 1)); // The high bits must be set, and the top bit
// (sign bit) must be set.
- assert((value < maxVal) ||
- ((value & signBitsMask) == signBitsMask));
+ assert((value < maxVal) || ((value & signBitsMask) == signBitsMask));
// mask off any extra bits that we got from the complement operation
result &= lowBitsMask;
@@ -2152,7 +2142,6 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return result;
}
-
/************************************************************************
*
* A helper method to perform a bit Replicate operation
@@ -2165,14 +2154,14 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* 0xE3E3E3E3E3E3E3E3
*/
-/*static*/ UINT64 emitter::Replicate_helper(UINT64 value, unsigned width, emitAttr size)
+/*static*/ UINT64 emitter::Replicate_helper(UINT64 value, unsigned width, emitAttr size)
{
assert(emitter::isValidGeneralDatasize(size));
unsigned immWidth = (size == EA_8BYTE) ? 64 : 32;
assert(width <= immWidth);
- UINT64 result = value;
+ UINT64 result = value;
unsigned filledBits = width;
while (filledBits < immWidth)
@@ -2191,21 +2180,21 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* 'size' specifies the size of the result (64 or 32 bits)
*/
-/*static*/ INT64 emitter::emitDecodeBitMaskImm(const emitter::bitMaskImm bmImm, emitAttr size)
+/*static*/ INT64 emitter::emitDecodeBitMaskImm(const emitter::bitMaskImm bmImm, emitAttr size)
{
- assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
+ assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
- unsigned N = bmImm.immN; // read the N,R and S values from the 'bitMaskImm' encoding
+ unsigned N = bmImm.immN; // read the N,R and S values from the 'bitMaskImm' encoding
unsigned R = bmImm.immR;
unsigned S = bmImm.immS;
- unsigned elemWidth = 64; // used when immN == 1
+ unsigned elemWidth = 64; // used when immN == 1
- if (bmImm.immN == 0) // find the smaller elemWidth when immN == 0
+ if (bmImm.immN == 0) // find the smaller elemWidth when immN == 0
{
// Scan S for the highest bit not set
elemWidth = 32;
- for (unsigned bitNum=5; bitNum > 0; bitNum--)
+ for (unsigned bitNum = 5; bitNum > 0; bitNum--)
{
unsigned oneBit = elemWidth;
if ((S & oneBit) == 0)
@@ -2218,13 +2207,13 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
assert(size == EA_8BYTE);
}
- unsigned maskSR = elemWidth-1;
+ unsigned maskSR = elemWidth - 1;
S &= maskSR;
R &= maskSR;
- // encoding for S is one less than the number of consecutive one bits
- S++; // Number of consecutive ones to generate in 'welem'
+ // encoding for S is one less than the number of consecutive one bits
+ S++; // Number of consecutive ones to generate in 'welem'
// At this point:
//
@@ -2233,11 +2222,11 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// 'R' is the number of bits that we will Rotate Right the immediate
// 'size' selects the final size of the immedate that we return (64 or 32 bits)
- assert(S < elemWidth); // 'elemWidth' consecutive one's is a reserved encoding
+ assert(S < elemWidth); // 'elemWidth' consecutive one's is a reserved encoding
+
+ UINT64 welem;
+ UINT64 wmask;
- UINT64 welem;
- UINT64 wmask;
-
welem = (1ULL << S) - 1;
wmask = ROR_helper(welem, R, elemWidth);
@@ -2246,7 +2235,6 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return wmask;
}
-
/*****************************************************************************
*
* Check if an immediate can use the left shifted by 12 bits encoding
@@ -2256,31 +2244,30 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
{
if (imm < 0)
{
- imm = -imm; // convert to unsigned
+ imm = -imm; // convert to unsigned
}
- if (imm < 0)
+ if (imm < 0)
{
- return false; // Must be MIN_INT64
+ return false; // Must be MIN_INT64
}
- if ((imm & 0xfff) != 0) // Now the low 12 bits all have to be zero
+ if ((imm & 0xfff) != 0) // Now the low 12 bits all have to be zero
{
return false;
}
-
- imm >>= 12; // shift right by 12 bits
- return (imm <= 0x0fff); // Does it fit in 12 bits
-}
+ imm >>= 12; // shift right by 12 bits
+ return (imm <= 0x0fff); // Does it fit in 12 bits
+}
/*****************************************************************************
*
* Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
*/
-/*static*/ INT64 emitter::normalizeImm64(INT64 imm, emitAttr size)
+/*static*/ INT64 emitter::normalizeImm64(INT64 imm, emitAttr size)
{
unsigned immWidth = getBitWidth(size);
INT64 result = imm;
@@ -2288,26 +2275,25 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (immWidth < 64)
{
// Check that 'imm' fits in 'immWidth' bits. Don't consider "sign" bits above width.
- INT64 maxVal = 1LL << immWidth;
- INT64 lowBitsMask = maxVal - 1;
- INT64 hiBitsMask = ~lowBitsMask;
- INT64 signBitsMask = hiBitsMask | (1LL << (immWidth - 1)); // The high bits must be set, and the top bit (sign bit) must be set.
- assert((imm < maxVal) ||
- ((imm & signBitsMask) == signBitsMask));
-
- // mask off the hiBits
+ INT64 maxVal = 1LL << immWidth;
+ INT64 lowBitsMask = maxVal - 1;
+ INT64 hiBitsMask = ~lowBitsMask;
+ INT64 signBitsMask =
+ hiBitsMask | (1LL << (immWidth - 1)); // The high bits must be set, and the top bit (sign bit) must be set.
+ assert((imm < maxVal) || ((imm & signBitsMask) == signBitsMask));
+
+ // mask off the hiBits
result &= lowBitsMask;
}
return result;
}
-
/*****************************************************************************
*
* Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
*/
-/*static*/ INT32 emitter::normalizeImm32(INT32 imm, emitAttr size)
+/*static*/ INT32 emitter::normalizeImm32(INT32 imm, emitAttr size)
{
unsigned immWidth = getBitWidth(size);
INT32 result = imm;
@@ -2320,29 +2306,26 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
INT32 hiBitsMask = ~lowBitsMask;
INT32 signBitsMask = hiBitsMask | (1 << (immWidth - 1)); // The high bits must be set, and the top bit
// (sign bit) must be set.
- assert((imm < maxVal) ||
- ((imm & signBitsMask) == signBitsMask));
+ assert((imm < maxVal) || ((imm & signBitsMask) == signBitsMask));
- // mask off the hiBits
+ // mask off the hiBits
result &= lowBitsMask;
}
return result;
}
-
/************************************************************************
*
- * returns true if 'imm' of 'size bits (32/64) can be encoded
+ * returns true if 'imm' of 'size bits (32/64) can be encoded
* using the ARM64 'bitmask immediate' form.
* When a non-null value is passed for 'wbBMI' then this method
* writes back the 'N','S' and 'R' values use to encode this immediate
*
*/
-/*static*/ bool emitter::canEncodeBitMaskImm(INT64 imm, emitAttr size,
- emitter::bitMaskImm* wbBMI)
+/*static*/ bool emitter::canEncodeBitMaskImm(INT64 imm, emitAttr size, emitter::bitMaskImm* wbBMI)
{
- assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
+ assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
unsigned immWidth = (size == EA_8BYTE) ? 64 : 32;
unsigned maxLen = (size == EA_8BYTE) ? 6 : 5;
@@ -2356,19 +2339,19 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// len=5, elemWidth is 32 bits
// (optionally) len=6, elemWidth is 64 bits
//
- for (unsigned len=1; (len <= maxLen); len++)
+ for (unsigned len = 1; (len <= maxLen); len++)
{
unsigned elemWidth = 1 << len;
- UINT64 elemMask = ((UINT64) -1) >> (64 - elemWidth);
- UINT64 tempImm = (UINT64) imm; // A working copy of 'imm' that we can mutate
- UINT64 elemVal = tempImm & elemMask; // The low 'elemWidth' bits of 'imm'
-
+ UINT64 elemMask = ((UINT64)-1) >> (64 - elemWidth);
+ UINT64 tempImm = (UINT64)imm; // A working copy of 'imm' that we can mutate
+ UINT64 elemVal = tempImm & elemMask; // The low 'elemWidth' bits of 'imm'
+
// Check for all 1's or 0's as these can't be encoded
if ((elemVal == 0) || (elemVal == elemMask))
continue;
// 'checkedBits' is the count of bits that are known to match 'elemVal' when replicated
- unsigned checkedBits = elemWidth; // by definition the first 'elemWidth' bits match
+ unsigned checkedBits = elemWidth; // by definition the first 'elemWidth' bits match
// Now check to see if each of the next bits match...
//
@@ -2380,7 +2363,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (nextElem != elemVal)
{
// Not matching, exit this loop and checkedBits will not be equal to immWidth
- break;
+ break;
}
// The 'nextElem' is matching, so increment 'checkedBits'
@@ -2391,13 +2374,13 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (checkedBits == immWidth)
{
// We are not quite done, since the only values that we can encode as a
- // 'bitmask immediate' are those that can be formed by starting with a
+ // 'bitmask immediate' are those that can be formed by starting with a
// bit string of 0*1* that is rotated by some number of bits.
- //
+ //
// We check to see if 'elemVal' can be formed using these restrictions.
//
- // Observation:
- // Rotating by one bit any value that passes these restrictions
+ // Observation:
+ // Rotating by one bit any value that passes these restrictions
// can be xor-ed with the original value and will result it a string
// of bits that have exactly two 1 bits: 'elemRorXor'
// Further the distance between the two one bits tells us the value
@@ -2412,27 +2395,27 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// compute S 45678--- ---5678- ---3210-
// compute R 01234567 ---34567 ------67
- UINT64 elemRor = ROR_helper(elemVal, 1, elemWidth); // Rotate 'elemVal' Right by one bit
- UINT64 elemRorXor = elemVal ^ elemRor; // Xor elemVal and elemRor
+ UINT64 elemRor = ROR_helper(elemVal, 1, elemWidth); // Rotate 'elemVal' Right by one bit
+ UINT64 elemRorXor = elemVal ^ elemRor; // Xor elemVal and elemRor
// If we only have a two-bit change in elemROR then we can form a mask for this value
unsigned bitCount = 0;
UINT64 oneBit = 0x1;
- unsigned R = elemWidth; // R is shift count for ROR (rotate right shift)
- unsigned S = 0; // S is number of consecutive one bits
+ unsigned R = elemWidth; // R is shift count for ROR (rotate right shift)
+ unsigned S = 0; // S is number of consecutive one bits
int incr = -1;
// Loop over the 'elemWidth' bits in 'elemRorXor'
//
- for (unsigned bitNum=0; bitNum<elemWidth; bitNum++)
+ for (unsigned bitNum = 0; bitNum < elemWidth; bitNum++)
{
if (incr == -1)
{
- R--; // We decrement R by one whenever incr is -1
+ R--; // We decrement R by one whenever incr is -1
}
if (bitCount == 1)
{
- S += incr; // We incr/decr S, after we find the first one bit in 'elemRorXor'
+ S += incr; // We incr/decr S, after we find the first one bit in 'elemRorXor'
}
// Is this bit position a 1 bit in 'elemRorXor'?
@@ -2448,20 +2431,20 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (toZeros)
{
// S :: Count down from elemWidth
- S = elemWidth;
+ S = elemWidth;
incr = -1;
}
- else // this 1 bit represent a transition to one bits.
+ else // this 1 bit represent a transition to one bits.
{
// S :: Count up from zero
- S = 0;
+ S = 0;
incr = +1;
}
}
else // bitCount > 1
{
// We found the second (or third...) 1 bit in 'elemRorXor'
- incr = 0; // stop decrementing 'R'
+ incr = 0; // stop decrementing 'R'
if (bitCount > 2)
{
@@ -2469,7 +2452,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// This means that 'elemVal' can't be encoded
// using a 'bitmask immediate'.
//
- // Furthermore, it will continue to fail
+ // Furthermore, it will continue to fail
// with any larger 'len' that we try.
// so just return false.
//
@@ -2488,7 +2471,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
assert(bitCount == 2);
if (bitCount != 2)
return false;
-
+
// Perform some sanity checks on the values of 'S' and 'R'
assert(S > 0);
assert(S < elemWidth);
@@ -2499,7 +2482,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
if (wbBMI != nullptr)
{
- // The encoding used for S is one less than the
+ // The encoding used for S is one less than the
// number of consecutive one bits
S--;
@@ -2514,8 +2497,8 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
//
// The upper bits need to be complemented, followed by a zero bit
// then the value of 'S-1'
- //
- unsigned upperBitsOfS = 64 - (1 << (len+1));
+ //
+ unsigned upperBitsOfS = 64 - (1 << (len + 1));
S |= upperBitsOfS;
}
wbBMI->immR = R;
@@ -2556,51 +2539,50 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* 'size' specifies the size of the result (64 or 32 bits)
*/
-/*static*/ INT64 emitter::emitDecodeHalfwordImm(const emitter::halfwordImm hwImm, emitAttr size)
+/*static*/ INT64 emitter::emitDecodeHalfwordImm(const emitter::halfwordImm hwImm, emitAttr size)
{
- assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
+ assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
- unsigned hw = hwImm.immHW;
- INT64 val = (INT64) hwImm.immVal;
+ unsigned hw = hwImm.immHW;
+ INT64 val = (INT64)hwImm.immVal;
assert((hw <= 1) || (size == EA_8BYTE));
-
- INT64 result = val << (16 * hw);
+
+ INT64 result = val << (16 * hw);
return result;
}
/************************************************************************
*
- * returns true if 'imm' of 'size' bits (32/64) can be encoded
+ * returns true if 'imm' of 'size' bits (32/64) can be encoded
* using the ARM64 'halfword immediate' form.
* When a non-null value is passed for 'wbHWI' then this method
* writes back the 'immHW' and 'immVal' values use to encode this immediate
*
*/
-/*static*/ bool emitter::canEncodeHalfwordImm(INT64 imm, emitAttr size,
- emitter::halfwordImm* wbHWI)
+/*static*/ bool emitter::canEncodeHalfwordImm(INT64 imm, emitAttr size, emitter::halfwordImm* wbHWI)
{
- assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
+ assert(isValidGeneralDatasize(size)); // Only EA_4BYTE or EA_8BYTE forms
- unsigned immWidth = (size == EA_8BYTE) ? 64 : 32;
- unsigned maxHW = (size == EA_8BYTE) ? 4 : 2;
+ unsigned immWidth = (size == EA_8BYTE) ? 64 : 32;
+ unsigned maxHW = (size == EA_8BYTE) ? 4 : 2;
// setup immMask to a (EA_4BYTE) 0x00000000_FFFFFFFF or (EA_8BYTE) 0xFFFFFFFF_FFFFFFFF
- const UINT64 immMask = ((UINT64) -1) >> (64 - immWidth);
- const INT64 mask16 = (INT64) 0xFFFF;
+ const UINT64 immMask = ((UINT64)-1) >> (64 - immWidth);
+ const INT64 mask16 = (INT64)0xFFFF;
imm = normalizeImm64(imm, size);
// Try each of the valid hw shift sizes
- for (unsigned hw=0; (hw < maxHW); hw++)
+ for (unsigned hw = 0; (hw < maxHW); hw++)
{
- INT64 curMask = mask16 << (hw * 16); // Represents the mask of the bits in the current halfword
+ INT64 curMask = mask16 << (hw * 16); // Represents the mask of the bits in the current halfword
INT64 checkBits = immMask & ~curMask;
-
+
// Excluding the current halfword (using ~curMask)
// does the immediate have zero bits in every other bit that we care about?
- // note we care about all 64-bits for EA_8BYTE
+ // note we care about all 64-bits for EA_8BYTE
// and we care about the lowest 32 bits for EA_4BYTE
//
if ((imm & checkBits) == 0)
@@ -2609,7 +2591,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
//
if (wbHWI != nullptr)
{
- INT64 val = ((imm & curMask) >> (hw * 16)) & mask16;
+ INT64 val = ((imm & curMask) >> (hw * 16)) & mask16;
wbHWI->immHW = hw;
wbHWI->immVal = val;
@@ -2630,7 +2612,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* Convert a 64-bit immediate into its 'halfword immediate' representation imm(i16,hw)
*/
-/*static*/ emitter::halfwordImm emitter::emitEncodeHalfwordImm(INT64 imm, emitAttr size)
+/*static*/ emitter::halfwordImm emitter::emitEncodeHalfwordImm(INT64 imm, emitAttr size)
{
emitter::halfwordImm result;
result.immHWVal = 0;
@@ -2648,16 +2630,16 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* 'size' specifies the size of the result (16 or 32 bits)
*/
-/*static*/ INT32 emitter::emitDecodeByteShiftedImm(const emitter::byteShiftedImm bsImm, emitAttr size)
+/*static*/ INT32 emitter::emitDecodeByteShiftedImm(const emitter::byteShiftedImm bsImm, emitAttr size)
{
- bool onesShift = (bsImm.immOnes == 1);
- unsigned bySh = bsImm.immBY; // Num Bytes to shift 0,1,2,3
- INT32 val = (INT32) bsImm.immVal; // 8-bit immediate
- INT32 result = val;
+ bool onesShift = (bsImm.immOnes == 1);
+ unsigned bySh = bsImm.immBY; // Num Bytes to shift 0,1,2,3
+ INT32 val = (INT32)bsImm.immVal; // 8-bit immediate
+ INT32 result = val;
if (bySh > 0)
{
- assert((size == EA_2BYTE) || (size == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
+ assert((size == EA_2BYTE) || (size == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
if (size == EA_2BYTE)
{
assert(bySh < 2);
@@ -2666,7 +2648,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
{
assert(bySh < 4);
}
-
+
result <<= (8 * bySh);
if (onesShift)
@@ -2679,57 +2661,59 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
/************************************************************************
*
- * returns true if 'imm' of 'size' bits (16/32) can be encoded
+ * returns true if 'imm' of 'size' bits (16/32) can be encoded
* using the ARM64 'byteShifted immediate' form.
* When a non-null value is passed for 'wbBSI' then this method
* writes back the 'immBY' and 'immVal' values use to encode this immediate
*
*/
-/*static*/ bool emitter::canEncodeByteShiftedImm(INT64 imm, emitAttr size, bool allow_MSL,
- emitter::byteShiftedImm* wbBSI)
+/*static*/ bool emitter::canEncodeByteShiftedImm(INT64 imm,
+ emitAttr size,
+ bool allow_MSL,
+ emitter::byteShiftedImm* wbBSI)
{
bool canEncode = false;
- bool onesShift = false; // true if we use the shifting ones variant
- unsigned bySh = 0; // number of bytes to shift: 0, 1, 2, 3
- unsigned imm8 = 0; // immediate to use in the encoding
+ bool onesShift = false; // true if we use the shifting ones variant
+ unsigned bySh = 0; // number of bytes to shift: 0, 1, 2, 3
+ unsigned imm8 = 0; // immediate to use in the encoding
imm = normalizeImm64(imm, size);
if (size == EA_1BYTE)
{
- imm8 = (unsigned) imm;
+ imm8 = (unsigned)imm;
assert(imm8 < 0x100);
canEncode = true;
}
else if (size == EA_8BYTE)
{
- imm8 = (unsigned) imm;
+ imm8 = (unsigned)imm;
assert(imm8 < 0x100);
canEncode = true;
}
else
{
- assert((size == EA_2BYTE) || (size == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
+ assert((size == EA_2BYTE) || (size == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
- unsigned immWidth = (size == EA_4BYTE) ? 32 : 16;
- unsigned maxBY = (size == EA_4BYTE) ? 4 : 2;
+ unsigned immWidth = (size == EA_4BYTE) ? 32 : 16;
+ unsigned maxBY = (size == EA_4BYTE) ? 4 : 2;
// setup immMask to a (EA_2BYTE) 0x0000FFFF or (EA_4BYTE) 0xFFFFFFFF
- const UINT32 immMask = ((UINT32) -1) >> (32 - immWidth);
- const INT32 mask8 = (INT32) 0xFF;
+ const UINT32 immMask = ((UINT32)-1) >> (32 - immWidth);
+ const INT32 mask8 = (INT32)0xFF;
// Try each of the valid by shift sizes
- for (bySh=0; (bySh < maxBY); bySh++)
+ for (bySh = 0; (bySh < maxBY); bySh++)
{
- INT32 curMask = mask8 << (bySh * 8); // Represents the mask of the bits in the current byteShifted
+ INT32 curMask = mask8 << (bySh * 8); // Represents the mask of the bits in the current byteShifted
INT32 checkBits = immMask & ~curMask;
INT32 immCheck = (imm & checkBits);
-
+
// Excluding the current byte (using ~curMask)
// does the immediate have zero bits in every other bit that we care about?
// or can be use the shifted one variant?
- // note we care about all 32-bits for EA_4BYTE
+ // note we care about all 32-bits for EA_4BYTE
// and we care about the lowest 16 bits for EA_2BYTE
//
if (immCheck == 0)
@@ -2751,22 +2735,22 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
}
if (canEncode)
{
- imm8 = (unsigned) (((imm & curMask) >> (bySh * 8)) & mask8);
+ imm8 = (unsigned)(((imm & curMask) >> (bySh * 8)) & mask8);
break;
}
}
}
-
+
if (canEncode)
{
// Does the caller want us to return the imm(i8,bySh) encoding values?
//
if (wbBSI != nullptr)
{
- wbBSI->immOnes = onesShift;
+ wbBSI->immOnes = onesShift;
wbBSI->immBY = bySh;
wbBSI->immVal = imm8;
-
+
// Verify that what we are returning is correct.
assert(imm == emitDecodeByteShiftedImm(*wbBSI, size));
}
@@ -2800,7 +2784,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* inputs 'fpImm' a floatImm8 struct
*/
-/*static*/ double emitter::emitDecodeFloatImm8(const emitter::floatImm8 fpImm)
+/*static*/ double emitter::emitDecodeFloatImm8(const emitter::floatImm8 fpImm)
{
unsigned sign = fpImm.immSign;
unsigned exp = fpImm.immExp ^ 0x4;
@@ -2813,7 +2797,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
exp--;
}
- double result = ((double) mant) / ((double) scale);
+ double result = ((double)mant) / ((double)scale);
if (sign == 1)
{
result = -result;
@@ -2829,23 +2813,22 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
*
*/
-/*static*/ bool emitter::canEncodeFloatImm8(double immDbl,
- emitter::floatImm8* wbFPI)
+/*static*/ bool emitter::canEncodeFloatImm8(double immDbl, emitter::floatImm8* wbFPI)
{
- bool canEncode = false;
- double val = immDbl;
+ bool canEncode = false;
+ double val = immDbl;
int sign = 0;
if (val < 0.0)
{
- val = -val;
+ val = -val;
sign = 1;
}
int exp = 0;
while ((val < 1.0) && (exp >= -4))
{
- val *= 2.0;
+ val *= 2.0;
exp--;
}
while ((val >= 2.0) && (exp <= 5))
@@ -2855,14 +2838,14 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
}
exp += 3;
val *= 16.0;
- int ival = (int) val;
-
+ int ival = (int)val;
+
if ((exp >= 0) && (exp <= 7))
{
- if (val == (double) ival)
+ if (val == (double)ival)
{
canEncode = true;
-
+
if (wbFPI != nullptr)
{
ival -= 16;
@@ -2871,12 +2854,12 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
wbFPI->immSign = sign;
wbFPI->immExp = exp ^ 0x4;
wbFPI->immMant = ival;
- unsigned imm8 = wbFPI->immFPIVal;
+ unsigned imm8 = wbFPI->immFPIVal;
assert((imm8 >= 0) && (imm8 <= 0xff));
}
}
}
-
+
return canEncode;
}
@@ -2885,7 +2868,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* Convert a double into its 'float 8-bit immediate' representation
*/
-/*static*/ emitter::floatImm8 emitter::emitEncodeFloatImm8(double immDbl)
+/*static*/ emitter::floatImm8 emitter::emitEncodeFloatImm8(double immDbl)
{
emitter::floatImm8 result;
result.immFPIVal = 0;
@@ -2898,36 +2881,36 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
/*****************************************************************************
*
- * For the given 'ins' returns the reverse instruction
+ * For the given 'ins' returns the reverse instruction
* if one exists, otherwise returns INS_INVALID
*/
-/*static*/ instruction emitter::insReverse(instruction ins)
+/*static*/ instruction emitter::insReverse(instruction ins)
{
switch (ins)
{
- case INS_add:
- return INS_sub;
- case INS_adds:
- return INS_subs;
+ case INS_add:
+ return INS_sub;
+ case INS_adds:
+ return INS_subs;
- case INS_sub:
- return INS_add;
- case INS_subs:
- return INS_adds;
+ case INS_sub:
+ return INS_add;
+ case INS_subs:
+ return INS_adds;
- case INS_cmp:
- return INS_cmn;
- case INS_cmn:
- return INS_cmp;
-
- case INS_ccmp:
- return INS_ccmn;
- case INS_ccmn:
- return INS_ccmp;
-
- default:
- return INS_invalid;
+ case INS_cmp:
+ return INS_cmn;
+ case INS_cmn:
+ return INS_cmp;
+
+ case INS_ccmp:
+ return INS_ccmn;
+ case INS_ccmn:
+ return INS_ccmp;
+
+ default:
+ return INS_invalid;
}
}
@@ -2938,7 +2921,7 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* if one does not exist returns INS_OPTS_NONE
*/
-/*static*/ insOpts emitter::optMakeArrangement(emitAttr datasize, emitAttr elemsize)
+/*static*/ insOpts emitter::optMakeArrangement(emitAttr datasize, emitAttr elemsize)
{
insOpts result = INS_OPTS_NONE;
@@ -2946,42 +2929,42 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
{
switch (elemsize)
{
- case EA_1BYTE:
- result = INS_OPTS_8B;
- break;
- case EA_2BYTE:
- result = INS_OPTS_4H;
- break;
- case EA_4BYTE:
- result = INS_OPTS_2S;
- break;
- case EA_8BYTE:
- result = INS_OPTS_1D;
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case EA_1BYTE:
+ result = INS_OPTS_8B;
+ break;
+ case EA_2BYTE:
+ result = INS_OPTS_4H;
+ break;
+ case EA_4BYTE:
+ result = INS_OPTS_2S;
+ break;
+ case EA_8BYTE:
+ result = INS_OPTS_1D;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
}
else if (datasize == EA_16BYTE)
{
switch (elemsize)
{
- case EA_1BYTE:
- result = INS_OPTS_16B;
- break;
- case EA_2BYTE:
- result = INS_OPTS_8H;
- break;
- case EA_4BYTE:
- result = INS_OPTS_4S;
- break;
- case EA_8BYTE:
- result = INS_OPTS_2D;
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case EA_1BYTE:
+ result = INS_OPTS_16B;
+ break;
+ case EA_2BYTE:
+ result = INS_OPTS_8H;
+ break;
+ case EA_4BYTE:
+ result = INS_OPTS_4S;
+ break;
+ case EA_8BYTE:
+ result = INS_OPTS_2D;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
}
return result;
@@ -2989,30 +2972,24 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
/*****************************************************************************
*
- * For the given 'datasize' and arrangement 'opts'
+ * For the given 'datasize' and arrangement 'opts'
* returns true is the pair spcifies a valid arrangement
*/
-/*static*/ bool emitter::isValidArrangement(emitAttr datasize, insOpts opt)
+/*static*/ bool emitter::isValidArrangement(emitAttr datasize, insOpts opt)
{
- if (datasize == EA_8BYTE)
+ if (datasize == EA_8BYTE)
{
- if ((opt == INS_OPTS_8B) ||
- (opt == INS_OPTS_4H) ||
- (opt == INS_OPTS_2S) ||
- (opt == INS_OPTS_1D))
+ if ((opt == INS_OPTS_8B) || (opt == INS_OPTS_4H) || (opt == INS_OPTS_2S) || (opt == INS_OPTS_1D))
{
return true;
- }
+ }
}
- else if (datasize == EA_16BYTE)
+ else if (datasize == EA_16BYTE)
{
- if ((opt == INS_OPTS_16B) ||
- (opt == INS_OPTS_8H) ||
- (opt == INS_OPTS_4S) ||
- (opt == INS_OPTS_2D))
+ if ((opt == INS_OPTS_16B) || (opt == INS_OPTS_8H) || (opt == INS_OPTS_4S) || (opt == INS_OPTS_2D))
{
return true;
- }
+ }
}
return false;
}
@@ -3020,18 +2997,14 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// For the given 'arrangement' returns the 'datasize' specified by the vector register arrangement
// asserts and returns EA_UNKNOWN if an invalid 'arrangement' value is passed
//
-/*static*/ emitAttr emitter::optGetDatasize(insOpts arrangement)
+/*static*/ emitAttr emitter::optGetDatasize(insOpts arrangement)
{
- if ((arrangement == INS_OPTS_8B) ||
- (arrangement == INS_OPTS_4H) ||
- (arrangement == INS_OPTS_2S) ||
+ if ((arrangement == INS_OPTS_8B) || (arrangement == INS_OPTS_4H) || (arrangement == INS_OPTS_2S) ||
(arrangement == INS_OPTS_1D))
{
return EA_8BYTE;
- }
- else if ((arrangement == INS_OPTS_16B) ||
- (arrangement == INS_OPTS_8H) ||
- (arrangement == INS_OPTS_4S) ||
+ }
+ else if ((arrangement == INS_OPTS_16B) || (arrangement == INS_OPTS_8H) || (arrangement == INS_OPTS_4S) ||
(arrangement == INS_OPTS_2D))
{
return EA_16BYTE;
@@ -3046,25 +3019,21 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
// For the given 'arrangement' returns the 'elemsize' specified by the vector register arrangement
// asserts and returns EA_UNKNOWN if an invalid 'arrangement' value is passed
//
-/*static*/ emitAttr emitter::optGetElemsize(insOpts arrangement)
+/*static*/ emitAttr emitter::optGetElemsize(insOpts arrangement)
{
- if ((arrangement == INS_OPTS_8B) ||
- (arrangement == INS_OPTS_16B))
+ if ((arrangement == INS_OPTS_8B) || (arrangement == INS_OPTS_16B))
{
return EA_1BYTE;
}
- else if ((arrangement == INS_OPTS_4H) ||
- (arrangement == INS_OPTS_8H))
+ else if ((arrangement == INS_OPTS_4H) || (arrangement == INS_OPTS_8H))
{
return EA_2BYTE;
}
- else if ((arrangement == INS_OPTS_2S) ||
- (arrangement == INS_OPTS_4S))
+ else if ((arrangement == INS_OPTS_2S) || (arrangement == INS_OPTS_4S))
{
return EA_4BYTE;
}
- else if ((arrangement == INS_OPTS_1D) ||
- (arrangement == INS_OPTS_2D))
+ else if ((arrangement == INS_OPTS_1D) || (arrangement == INS_OPTS_2D))
{
return EA_8BYTE;
}
@@ -3074,24 +3043,21 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return EA_UNKNOWN;
}
}
-
+
// For the given 'arrangement' returns the 'widen-arrangement' specified by the vector register arrangement
// asserts and returns INS_OPTS_NONE if an invalid 'arrangement' value is passed
//
-/*static*/ insOpts emitter::optWidenElemsize(insOpts arrangement)
+/*static*/ insOpts emitter::optWidenElemsize(insOpts arrangement)
{
- if ((arrangement == INS_OPTS_8B) ||
- (arrangement == INS_OPTS_16B))
+ if ((arrangement == INS_OPTS_8B) || (arrangement == INS_OPTS_16B))
{
return INS_OPTS_8H;
}
- else if ((arrangement == INS_OPTS_4H) ||
- (arrangement == INS_OPTS_8H))
+ else if ((arrangement == INS_OPTS_4H) || (arrangement == INS_OPTS_8H))
{
return INS_OPTS_4S;
}
- else if ((arrangement == INS_OPTS_2S) ||
- (arrangement == INS_OPTS_4S))
+ else if ((arrangement == INS_OPTS_2S) || (arrangement == INS_OPTS_4S))
{
return INS_OPTS_2D;
}
@@ -3101,81 +3067,81 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
return INS_OPTS_NONE;
}
}
-
+
// For the given 'conversion' returns the 'dstsize' specified by the conversion option
-/*static*/ emitAttr emitter::optGetDstsize(insOpts conversion)
+/*static*/ emitAttr emitter::optGetDstsize(insOpts conversion)
{
switch (conversion)
{
- case INS_OPTS_S_TO_8BYTE:
- case INS_OPTS_D_TO_8BYTE:
- case INS_OPTS_4BYTE_TO_D:
- case INS_OPTS_8BYTE_TO_D:
- case INS_OPTS_S_TO_D:
- case INS_OPTS_H_TO_D:
+ case INS_OPTS_S_TO_8BYTE:
+ case INS_OPTS_D_TO_8BYTE:
+ case INS_OPTS_4BYTE_TO_D:
+ case INS_OPTS_8BYTE_TO_D:
+ case INS_OPTS_S_TO_D:
+ case INS_OPTS_H_TO_D:
- return EA_8BYTE;
+ return EA_8BYTE;
- case INS_OPTS_S_TO_4BYTE:
- case INS_OPTS_D_TO_4BYTE:
- case INS_OPTS_4BYTE_TO_S:
- case INS_OPTS_8BYTE_TO_S:
- case INS_OPTS_D_TO_S:
- case INS_OPTS_H_TO_S:
+ case INS_OPTS_S_TO_4BYTE:
+ case INS_OPTS_D_TO_4BYTE:
+ case INS_OPTS_4BYTE_TO_S:
+ case INS_OPTS_8BYTE_TO_S:
+ case INS_OPTS_D_TO_S:
+ case INS_OPTS_H_TO_S:
- return EA_4BYTE;
+ return EA_4BYTE;
- case INS_OPTS_S_TO_H:
- case INS_OPTS_D_TO_H:
+ case INS_OPTS_S_TO_H:
+ case INS_OPTS_D_TO_H:
- return EA_2BYTE;
+ return EA_2BYTE;
- default:
- assert(!" invalid 'conversion' value");
- return EA_UNKNOWN;
+ default:
+ assert(!" invalid 'conversion' value");
+ return EA_UNKNOWN;
}
}
// For the given 'conversion' returns the 'srcsize' specified by the conversion option
-/*static*/ emitAttr emitter::optGetSrcsize(insOpts conversion)
+/*static*/ emitAttr emitter::optGetSrcsize(insOpts conversion)
{
switch (conversion)
{
- case INS_OPTS_D_TO_8BYTE:
- case INS_OPTS_D_TO_4BYTE:
- case INS_OPTS_8BYTE_TO_D:
- case INS_OPTS_8BYTE_TO_S:
- case INS_OPTS_D_TO_S:
- case INS_OPTS_D_TO_H:
+ case INS_OPTS_D_TO_8BYTE:
+ case INS_OPTS_D_TO_4BYTE:
+ case INS_OPTS_8BYTE_TO_D:
+ case INS_OPTS_8BYTE_TO_S:
+ case INS_OPTS_D_TO_S:
+ case INS_OPTS_D_TO_H:
- return EA_8BYTE;
+ return EA_8BYTE;
- case INS_OPTS_S_TO_8BYTE:
- case INS_OPTS_S_TO_4BYTE:
- case INS_OPTS_4BYTE_TO_S:
- case INS_OPTS_4BYTE_TO_D:
- case INS_OPTS_S_TO_D:
- case INS_OPTS_S_TO_H:
+ case INS_OPTS_S_TO_8BYTE:
+ case INS_OPTS_S_TO_4BYTE:
+ case INS_OPTS_4BYTE_TO_S:
+ case INS_OPTS_4BYTE_TO_D:
+ case INS_OPTS_S_TO_D:
+ case INS_OPTS_S_TO_H:
- return EA_4BYTE;
+ return EA_4BYTE;
- case INS_OPTS_H_TO_S:
- case INS_OPTS_H_TO_D:
+ case INS_OPTS_H_TO_S:
+ case INS_OPTS_H_TO_D:
- return EA_2BYTE;
+ return EA_2BYTE;
- default:
- assert(!" invalid 'conversion' value");
- return EA_UNKNOWN;
+ default:
+ assert(!" invalid 'conversion' value");
+ return EA_UNKNOWN;
}
}
// For the given 'size' and 'index' returns true if it specifies a valid index for a vector register of 'size'
-/*static*/ bool emitter::isValidVectorIndex(emitAttr datasize, emitAttr elemsize, ssize_t index)
+/*static*/ bool emitter::isValidVectorIndex(emitAttr datasize, emitAttr elemsize, ssize_t index)
{
assert(isValidVectorDatasize(datasize));
assert(isValidVectorElemsize(elemsize));
-
+
bool result = false;
if (index >= 0)
{
@@ -3183,42 +3149,42 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
{
switch (elemsize)
{
- case EA_1BYTE:
- result = (index < 8);
- break;
- case EA_2BYTE:
- result = (index < 4);
- break;
- case EA_4BYTE:
- result = (index < 2);
- break;
- case EA_8BYTE:
- result = (index < 1);
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case EA_1BYTE:
+ result = (index < 8);
+ break;
+ case EA_2BYTE:
+ result = (index < 4);
+ break;
+ case EA_4BYTE:
+ result = (index < 2);
+ break;
+ case EA_8BYTE:
+ result = (index < 1);
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
}
else if (datasize == EA_16BYTE)
{
switch (elemsize)
{
- case EA_1BYTE:
- result = (index < 16);
- break;
- case EA_2BYTE:
- result = (index < 8);
- break;
- case EA_4BYTE:
- result = (index < 4);
- break;
- case EA_8BYTE:
- result = (index < 2);
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case EA_1BYTE:
+ result = (index < 16);
+ break;
+ case EA_2BYTE:
+ result = (index < 8);
+ break;
+ case EA_4BYTE:
+ result = (index < 4);
+ break;
+ case EA_8BYTE:
+ result = (index < 2);
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
}
}
@@ -3230,10 +3196,10 @@ emitter::code_t emitter::emitInsCode(instruction ins, insFormat fmt)
* Add an instruction with no operands.
*/
-void emitter::emitIns(instruction ins)
+void emitter::emitIns(instruction ins)
{
- instrDesc * id = emitNewInstrSmall(EA_8BYTE);
- insFormat fmt = emitInsFormat(ins);
+ instrDesc* id = emitNewInstrSmall(EA_8BYTE);
+ insFormat fmt = emitInsFormat(ins);
assert(fmt == IF_SN_0A);
@@ -3244,38 +3210,35 @@ void emitter::emitIns(instruction ins)
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction with a single immediate value.
*/
-void emitter::emitIns_I(instruction ins,
- emitAttr attr,
- ssize_t imm)
+void emitter::emitIns_I(instruction ins, emitAttr attr, ssize_t imm)
{
- insFormat fmt = IF_NONE;
+ insFormat fmt = IF_NONE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_brk:
- if ((imm & 0x0000ffff) == imm)
- {
- fmt = IF_SI_0A;
- }
- else
- {
- assert(!"Instruction cannot be encoded: IF_SI_0A");
- }
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case INS_brk:
+ if ((imm & 0x0000ffff) == imm)
+ {
+ fmt = IF_SI_0A;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded: IF_SI_0A");
+ }
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, imm);
+ instrDesc* id = emitNewInstrSC(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3284,33 +3247,30 @@ void emitter::emitIns_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing a single register.
*/
-void emitter::emitIns_R(instruction ins,
- emitAttr attr,
- regNumber reg)
+void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- instrDesc * id = nullptr;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ instrDesc* id = nullptr;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_br:
- case INS_ret:
- assert(isGeneralRegister(reg));
- id = emitNewInstrSmall(attr);
- id->idReg1(reg);
- fmt = IF_BR_1A;
- break;
+ case INS_br:
+ case INS_ret:
+ assert(isGeneralRegister(reg));
+ id = emitNewInstrSmall(attr);
+ id->idReg1(reg);
+ fmt = IF_BR_1A;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
assert(fmt != IF_NONE);
@@ -3327,155 +3287,195 @@ void emitter::emitIns_R(instruction ins,
* Add an instruction referencing a register and a constant.
*/
-void emitter::emitIns_R_I(instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t imm,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
- bool canEncode = false;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
+ bool canEncode = false;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- bitMaskImm bmi;
- halfwordImm hwi;
- byteShiftedImm bsi;
- ssize_t notOfImm;
+ bitMaskImm bmi;
+ halfwordImm hwi;
+ byteShiftedImm bsi;
+ ssize_t notOfImm;
- case INS_tst:
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg));
- bmi.immNRS = 0;
- canEncode = canEncodeBitMaskImm(imm, size, &bmi);
- if (canEncode)
- {
- imm = bmi.immNRS;
- assert(isValidImmNRS(imm, size));
- fmt = IF_DI_1C;
- }
- break;
+ case INS_tst:
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg));
+ bmi.immNRS = 0;
+ canEncode = canEncodeBitMaskImm(imm, size, &bmi);
+ if (canEncode)
+ {
+ imm = bmi.immNRS;
+ assert(isValidImmNRS(imm, size));
+ fmt = IF_DI_1C;
+ }
+ break;
- case INS_movk:
- case INS_movn:
- case INS_movz:
- assert(isValidGeneralDatasize(size));
- assert(insOptsNone(opt)); // No LSL here (you must use emitIns_R_I_I if a shift is needed)
- assert(isGeneralRegister(reg));
- assert(isValidUimm16(imm));
+ case INS_movk:
+ case INS_movn:
+ case INS_movz:
+ assert(isValidGeneralDatasize(size));
+ assert(insOptsNone(opt)); // No LSL here (you must use emitIns_R_I_I if a shift is needed)
+ assert(isGeneralRegister(reg));
+ assert(isValidUimm16(imm));
- hwi.immHW = 0;
- hwi.immVal = imm;
- assert(imm == emitDecodeHalfwordImm(hwi, size));
+ hwi.immHW = 0;
+ hwi.immVal = imm;
+ assert(imm == emitDecodeHalfwordImm(hwi, size));
- imm = hwi.immHWVal;
- canEncode = true;
- fmt = IF_DI_1B;
- break;
+ imm = hwi.immHWVal;
+ canEncode = true;
+ fmt = IF_DI_1B;
+ break;
- case INS_mov:
- assert(isValidGeneralDatasize(size));
- assert(insOptsNone(opt)); // No explicit LSL here
- // We will automatically determine the shift based upon the imm
+ case INS_mov:
+ assert(isValidGeneralDatasize(size));
+ assert(insOptsNone(opt)); // No explicit LSL here
+ // We will automatically determine the shift based upon the imm
- // First try the standard 'halfword immediate' imm(i16,hw)
- hwi.immHWVal = 0;
- canEncode = canEncodeHalfwordImm(imm, size, &hwi);
- if (canEncode)
- {
- // uses a movz encoding
- assert(isGeneralRegister(reg));
- imm = hwi.immHWVal;
- assert(isValidImmHWVal(imm, size));
- fmt = IF_DI_1B;
- break;
- }
+ // First try the standard 'halfword immediate' imm(i16,hw)
+ hwi.immHWVal = 0;
+ canEncode = canEncodeHalfwordImm(imm, size, &hwi);
+ if (canEncode)
+ {
+ // uses a movz encoding
+ assert(isGeneralRegister(reg));
+ imm = hwi.immHWVal;
+ assert(isValidImmHWVal(imm, size));
+ fmt = IF_DI_1B;
+ break;
+ }
- // Next try the ones-complement form of 'halfword immediate' imm(i16,hw)
- notOfImm = NOT_helper(imm, getBitWidth(size));
- canEncode = canEncodeHalfwordImm(notOfImm, size, &hwi);
- if (canEncode)
- {
- assert(isGeneralRegister(reg));
- imm = hwi.immHWVal;
- ins = INS_movn; // uses a movn encoding
- assert(isValidImmHWVal(imm, size));
- fmt = IF_DI_1B;
- break;
- }
+ // Next try the ones-complement form of 'halfword immediate' imm(i16,hw)
+ notOfImm = NOT_helper(imm, getBitWidth(size));
+ canEncode = canEncodeHalfwordImm(notOfImm, size, &hwi);
+ if (canEncode)
+ {
+ assert(isGeneralRegister(reg));
+ imm = hwi.immHWVal;
+ ins = INS_movn; // uses a movn encoding
+ assert(isValidImmHWVal(imm, size));
+ fmt = IF_DI_1B;
+ break;
+ }
+
+ // Finally try the 'bitmask immediate' imm(N,r,s)
+ bmi.immNRS = 0;
+ canEncode = canEncodeBitMaskImm(imm, size, &bmi);
+ if (canEncode)
+ {
+ assert(isGeneralRegisterOrSP(reg));
+ reg = encodingSPtoZR(reg);
+ imm = bmi.immNRS;
+ assert(isValidImmNRS(imm, size));
+ fmt = IF_DI_1D;
+ break;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded: mov imm");
+ }
- // Finally try the 'bitmask immediate' imm(N,r,s)
- bmi.immNRS = 0;
- canEncode = canEncodeBitMaskImm(imm, size, &bmi);
- if (canEncode)
- {
- assert(isGeneralRegisterOrSP(reg));
- reg = encodingSPtoZR(reg);
- imm = bmi.immNRS;
- assert(isValidImmNRS(imm, size));
- fmt = IF_DI_1D;
break;
- }
- else
- {
- assert(!"Instruction cannot be encoded: mov imm");
- }
- break;
+ case INS_movi:
+ assert(isValidVectorDatasize(size));
+ assert(isVectorRegister(reg));
+ if (insOptsNone(opt) && (size == EA_8BYTE))
+ {
+ opt = INS_OPTS_1D;
+ }
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
- case INS_movi:
- assert(isValidVectorDatasize(size));
- assert(isVectorRegister(reg));
- if (insOptsNone(opt) && (size == EA_8BYTE))
- {
- opt = INS_OPTS_1D;
- }
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
-
- if (elemsize == EA_8BYTE)
- {
- size_t uimm = imm;
- ssize_t imm8 = 0;
- unsigned pos = 0;
- canEncode = true;
- bool failed = false;
- while (uimm != 0)
+ if (elemsize == EA_8BYTE)
{
- INT64 loByte = uimm & 0xFF;
- if (((loByte == 0) || (loByte == 0xFF)) && (pos < 8))
+ size_t uimm = imm;
+ ssize_t imm8 = 0;
+ unsigned pos = 0;
+ canEncode = true;
+ bool failed = false;
+ while (uimm != 0)
{
- if (loByte == 0xFF)
+ INT64 loByte = uimm & 0xFF;
+ if (((loByte == 0) || (loByte == 0xFF)) && (pos < 8))
+ {
+ if (loByte == 0xFF)
+ {
+ imm8 |= (1 << pos);
+ }
+ uimm >>= 8;
+ pos++;
+ }
+ else
{
- imm8 |= (1 << pos);
+ canEncode = false;
+ break;
}
- uimm >>= 8;
- pos++;
}
- else
+ imm = imm8;
+ assert(isValidUimm8(imm));
+ fmt = IF_DV_1B;
+ break;
+ }
+ else
+ {
+ // Vector operation
+
+ // No explicit LSL/MSL is used for the immediate
+ // We will automatically determine the shift based upon the value of imm
+
+ // First try the standard 'byteShifted immediate' imm(i8,bySh)
+ bsi.immBSVal = 0;
+ canEncode = canEncodeByteShiftedImm(imm, elemsize, true, &bsi);
+ if (canEncode)
{
- canEncode = false;
+ imm = bsi.immBSVal;
+ assert(isValidImmBSVal(imm, size));
+ fmt = IF_DV_1B;
break;
}
+
+ // Next try the ones-complement form of the 'immediate' imm(i8,bySh)
+ if ((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE)) // Only EA_2BYTE or EA_4BYTE forms
+ {
+ notOfImm = NOT_helper(imm, getBitWidth(elemsize));
+ canEncode = canEncodeByteShiftedImm(notOfImm, elemsize, true, &bsi);
+ if (canEncode)
+ {
+ imm = bsi.immBSVal;
+ ins = INS_mvni; // uses a mvni encoding
+ assert(isValidImmBSVal(imm, size));
+ fmt = IF_DV_1B;
+ break;
+ }
+ }
}
- imm = imm8;
- assert(isValidUimm8(imm));
- fmt = IF_DV_1B;
break;
- }
- else
- {
+
+ case INS_orr:
+ case INS_bic:
+ case INS_mvni:
+ assert(isValidVectorDatasize(size));
+ assert(isVectorRegister(reg));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
+
// Vector operation
- // No explicit LSL/MSL is used for the immediate
+ // No explicit LSL/MSL is used for the immediate
// We will automatically determine the shift based upon the value of imm
-
+
// First try the standard 'byteShifted immediate' imm(i8,bySh)
bsi.immBSVal = 0;
- canEncode = canEncodeByteShiftedImm(imm, elemsize, true, &bsi);
+ canEncode = canEncodeByteShiftedImm(imm, elemsize,
+ (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL)
+ &bsi);
if (canEncode)
{
imm = bsi.immBSVal;
@@ -3483,99 +3483,55 @@ void emitter::emitIns_R_I(instruction ins,
fmt = IF_DV_1B;
break;
}
+ break;
- // Next try the ones-complement form of the 'immediate' imm(i8,bySh)
- if ((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE)) // Only EA_2BYTE or EA_4BYTE forms
+ case INS_cmp:
+ case INS_cmn:
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg));
+
+ if (unsigned_abs(imm) <= 0x0fff)
{
- notOfImm = NOT_helper(imm, getBitWidth(elemsize));
- canEncode = canEncodeByteShiftedImm(notOfImm, elemsize, true, &bsi);
- if (canEncode)
+ if (imm < 0)
{
- imm = bsi.immBSVal;
- ins = INS_mvni; // uses a mvni encoding
- assert(isValidImmBSVal(imm, size));
- fmt = IF_DV_1B;
- break;
+ ins = insReverse(ins);
+ imm = -imm;
}
+ assert(isValidUimm12(imm));
+ canEncode = true;
+ fmt = IF_DI_1A;
}
- }
- break;
-
- case INS_orr:
- case INS_bic:
- case INS_mvni:
- assert(isValidVectorDatasize(size));
- assert(isVectorRegister(reg));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE)); // Only EA_2BYTE or EA_4BYTE forms
-
- // Vector operation
-
- // No explicit LSL/MSL is used for the immediate
- // We will automatically determine the shift based upon the value of imm
-
- // First try the standard 'byteShifted immediate' imm(i8,bySh)
- bsi.immBSVal = 0;
- canEncode = canEncodeByteShiftedImm(imm, elemsize,
- (ins == INS_mvni), // mvni supports the ones shifting variant (aka MSL)
- &bsi);
- if (canEncode)
- {
- imm = bsi.immBSVal;
- assert(isValidImmBSVal(imm, size));
- fmt = IF_DV_1B;
- break;
- }
- break;
-
- case INS_cmp:
- case INS_cmn:
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg));
-
- if (unsigned_abs(imm) <= 0x0fff)
- {
- if (imm < 0)
+ else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
{
- ins = insReverse(ins);
- imm = -imm;
+ // Encoding will use a 12-bit left shift of the immediate
+ opt = INS_OPTS_LSL12;
+ if (imm < 0)
+ {
+ ins = insReverse(ins);
+ imm = -imm;
+ }
+ assert((imm & 0xfff) == 0);
+ imm >>= 12;
+ assert(isValidUimm12(imm));
+ canEncode = true;
+ fmt = IF_DI_1A;
}
- assert(isValidUimm12(imm));
- canEncode = true;
- fmt = IF_DI_1A;
- }
- else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
- {
- // Encoding will use a 12-bit left shift of the immediate
- opt = INS_OPTS_LSL12;
- if (imm < 0)
+ else
{
- ins = insReverse(ins);
- imm = -imm;
+ assert(!"Instruction cannot be encoded: IF_DI_1A");
}
- assert((imm & 0xfff) == 0);
- imm >>= 12;
- assert(isValidUimm12(imm));
- canEncode = true;
- fmt = IF_DI_1A;
- }
- else
- {
- assert(!"Instruction cannot be encoded: IF_DI_1A");
- }
- break;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(canEncode);
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, imm);
+ instrDesc* id = emitNewInstrSC(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3587,88 +3543,84 @@ void emitter::emitIns_R_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing a register and a floating point constant.
*/
-void emitter::emitIns_R_F(instruction ins,
- emitAttr attr,
- regNumber reg,
- double immDbl,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_F(
+ instruction ins, emitAttr attr, regNumber reg, double immDbl, insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
- ssize_t imm = 0;
- bool canEncode = false;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
+ ssize_t imm = 0;
+ bool canEncode = false;
/* Figure out the encoding format of the instruction */
switch (ins)
{
floatImm8 fpi;
- case INS_fcmp:
- case INS_fcmpe:
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
- assert(isVectorRegister(reg));
- if (immDbl == 0.0)
- {
- canEncode = true;
- fmt = IF_DV_1C;
- }
- break;
-
- case INS_fmov:
- assert(isVectorRegister(reg));
- fpi.immFPIVal = 0;
- canEncode = canEncodeFloatImm8(immDbl, &fpi);
+ case INS_fcmp:
+ case INS_fcmpe:
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ assert(isVectorRegister(reg));
+ if (immDbl == 0.0)
+ {
+ canEncode = true;
+ fmt = IF_DV_1C;
+ }
+ break;
- if (insOptsAnyArrangement(opt))
- {
- // Vector operation
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
+ case INS_fmov:
+ assert(isVectorRegister(reg));
+ fpi.immFPIVal = 0;
+ canEncode = canEncodeFloatImm8(immDbl, &fpi);
- if (canEncode)
+ if (insOptsAnyArrangement(opt))
{
- imm = fpi.immFPIVal;
- assert((imm >= 0) && (imm <= 0xff));
- fmt = IF_DV_1B;
- }
- }
- else
- {
- // Scalar operation
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
- if (canEncode)
+ if (canEncode)
+ {
+ imm = fpi.immFPIVal;
+ assert((imm >= 0) && (imm <= 0xff));
+ fmt = IF_DV_1B;
+ }
+ }
+ else
{
- imm = fpi.immFPIVal;
- assert((imm >= 0) && (imm <= 0xff));
- fmt = IF_DV_1A;
+ // Scalar operation
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsizeFloat(size));
+
+ if (canEncode)
+ {
+ imm = fpi.immFPIVal;
+ assert((imm >= 0) && (imm <= 0xff));
+ fmt = IF_DV_1A;
+ }
}
- }
- break;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(canEncode);
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, imm);
+ instrDesc* id = emitNewInstrSC(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3685,430 +3637,425 @@ void emitter::emitIns_R_F(instruction ins,
* Add an instruction referencing two registers
*/
-void emitter::emitIns_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_mov:
- assert(insOptsNone(opt));
- // Is the mov even necessary?
- if (reg1 == reg2)
- {
- // A mov with a EA_4BYTE has the side-effect of clearing the upper bits
- // So only eliminate mov instructions that are not clearing the upper bits
- //
- if (isGeneralRegisterOrSP(reg1) && (size == EA_8BYTE))
- {
- return;
- }
- else if (isVectorRegister(reg1) && (size == EA_16BYTE))
+ case INS_mov:
+ assert(insOptsNone(opt));
+ // Is the mov even necessary?
+ if (reg1 == reg2)
{
- return;
+ // A mov with a EA_4BYTE has the side-effect of clearing the upper bits
+ // So only eliminate mov instructions that are not clearing the upper bits
+ //
+ if (isGeneralRegisterOrSP(reg1) && (size == EA_8BYTE))
+ {
+ return;
+ }
+ else if (isVectorRegister(reg1) && (size == EA_16BYTE))
+ {
+ return;
+ }
}
- }
- // Check for the 'mov' aliases for the vector registers
- if (isVectorRegister(reg1))
- {
- if (isVectorRegister(reg2) && isValidVectorDatasize(size))
+ // Check for the 'mov' aliases for the vector registers
+ if (isVectorRegister(reg1))
{
- return emitIns_R_R_R(INS_mov, size, reg1, reg2, reg2);
+ if (isVectorRegister(reg2) && isValidVectorDatasize(size))
+ {
+ return emitIns_R_R_R(INS_mov, size, reg1, reg2, reg2);
+ }
+ else
+ {
+ return emitIns_R_R_I(INS_mov, size, reg1, reg2, 0);
+ }
}
else
{
- return emitIns_R_R_I(INS_mov, size, reg1, reg2, 0);
+ if (isVectorRegister(reg2))
+ {
+ assert(isGeneralRegister(reg1));
+ return emitIns_R_R_I(INS_mov, size, reg1, reg2, 0);
+ }
}
- }
- else
- {
- if (isVectorRegister(reg2))
+
+ // Is this a MOV to/from SP instruction?
+ if ((reg1 == REG_SP) || (reg2 == REG_SP))
{
- assert(isGeneralRegister(reg1));
- return emitIns_R_R_I(INS_mov, size, reg1, reg2, 0);
+ assert(isGeneralRegisterOrSP(reg1));
+ assert(isGeneralRegisterOrSP(reg2));
+ reg1 = encodingSPtoZR(reg1);
+ reg2 = encodingSPtoZR(reg2);
+ fmt = IF_DR_2G;
}
- }
-
- // Is this a MOV to/from SP instruction?
- if ((reg1 == REG_SP) || (reg2 == REG_SP))
- {
- assert(isGeneralRegisterOrSP(reg1));
- assert(isGeneralRegisterOrSP(reg2));
- reg1 = encodingSPtoZR(reg1);
- reg2 = encodingSPtoZR(reg2);
- fmt = IF_DR_2G;
- }
- else
- {
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegisterOrZR(reg2));
- fmt = IF_DR_2E;
- }
- break;
-
- case INS_abs:
- case INS_not:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- // for 'NOT' we can construct the arrangement: 8B or 16B
- if ((ins == INS_not) && insOptsNone(opt))
- {
- assert(isValidVectorDatasize(size));
- elemsize = EA_1BYTE;
- opt = optMakeArrangement(size, elemsize);
- }
- if (insOptsNone(opt))
- {
- // Scalar operation
- assert(size == EA_8BYTE); // Only type D is supported
- fmt = IF_DV_2L;
- }
- else
- {
- // Vector operation
- assert(insOptsAnyArrangement(opt));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- if (ins == INS_not)
+ else
{
- assert(elemsize == EA_1BYTE);
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegisterOrZR(reg2));
+ fmt = IF_DR_2E;
}
- fmt = IF_DV_2M;
- }
- break;
+ break;
- case INS_mvn:
- case INS_neg:
- if (isVectorRegister(reg1))
- {
+ case INS_abs:
+ case INS_not:
+ assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
- // for 'mvn' we can construct the arrangement: 8B or 16b
- if ((ins == INS_mvn) && insOptsNone(opt))
+ // for 'NOT' we can construct the arrangement: 8B or 16B
+ if ((ins == INS_not) && insOptsNone(opt))
{
assert(isValidVectorDatasize(size));
elemsize = EA_1BYTE;
- opt = optMakeArrangement(size, elemsize);
+ opt = optMakeArrangement(size, elemsize);
}
if (insOptsNone(opt))
{
// Scalar operation
- assert(size == EA_8BYTE); // Only type D is supported
+ assert(size == EA_8BYTE); // Only type D is supported
fmt = IF_DV_2L;
}
else
{
// Vector operation
+ assert(insOptsAnyArrangement(opt));
assert(isValidVectorDatasize(size));
assert(isValidArrangement(size, opt));
elemsize = optGetElemsize(opt);
- if (ins == INS_mvn)
+ if (ins == INS_not)
{
- assert(elemsize == EA_1BYTE); // Only supports 8B or 16B
+ assert(elemsize == EA_1BYTE);
}
fmt = IF_DV_2M;
}
break;
- }
- __fallthrough;
-
- case INS_negs:
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegisterOrZR(reg2));
- fmt = IF_DR_2E;
- break;
- case INS_sxtw:
- assert(size == EA_8BYTE);
- __fallthrough;
+ case INS_mvn:
+ case INS_neg:
+ if (isVectorRegister(reg1))
+ {
+ assert(isVectorRegister(reg2));
+ // for 'mvn' we can construct the arrangement: 8B or 16b
+ if ((ins == INS_mvn) && insOptsNone(opt))
+ {
+ assert(isValidVectorDatasize(size));
+ elemsize = EA_1BYTE;
+ opt = optMakeArrangement(size, elemsize);
+ }
+ if (insOptsNone(opt))
+ {
+ // Scalar operation
+ assert(size == EA_8BYTE); // Only type D is supported
+ fmt = IF_DV_2L;
+ }
+ else
+ {
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ if (ins == INS_mvn)
+ {
+ assert(elemsize == EA_1BYTE); // Only supports 8B or 16B
+ }
+ fmt = IF_DV_2M;
+ }
+ break;
+ }
+ __fallthrough;
- case INS_sxtb:
- case INS_sxth:
- case INS_uxtb:
- case INS_uxth:
- assert(insOptsNone(opt));
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- fmt = IF_DR_2H;
- break;
+ case INS_negs:
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegisterOrZR(reg2));
+ fmt = IF_DR_2E;
+ break;
+
+ case INS_sxtw:
+ assert(size == EA_8BYTE);
+ __fallthrough;
- case INS_sxtl:
- case INS_sxtl2:
- case INS_uxtl:
- case INS_uxtl2:
- return emitIns_R_R_I(ins, size, reg1, reg2, 0, opt);
+ case INS_sxtb:
+ case INS_sxth:
+ case INS_uxtb:
+ case INS_uxth:
+ assert(insOptsNone(opt));
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ fmt = IF_DR_2H;
+ break;
- case INS_cls:
- case INS_clz:
- case INS_rbit:
- case INS_rev16:
- case INS_rev32:
- case INS_cnt:
- if (isVectorRegister(reg1))
- {
- assert(isVectorRegister(reg2));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- if ((ins == INS_cls) || (ins == INS_clz))
+ case INS_sxtl:
+ case INS_sxtl2:
+ case INS_uxtl:
+ case INS_uxtl2:
+ return emitIns_R_R_I(ins, size, reg1, reg2, 0, opt);
+
+ case INS_cls:
+ case INS_clz:
+ case INS_rbit:
+ case INS_rev16:
+ case INS_rev32:
+ case INS_cnt:
+ if (isVectorRegister(reg1))
+ {
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ if ((ins == INS_cls) || (ins == INS_clz))
+ {
+ assert(elemsize != EA_8BYTE); // No encoding for type D
+ }
+ else if (ins == INS_rev32)
+ {
+ assert((elemsize == EA_2BYTE) || (elemsize == EA_1BYTE));
+ }
+ else
+ {
+ assert(elemsize == EA_1BYTE); // Only supports 8B or 16B
+ }
+ fmt = IF_DV_2M;
+ break;
+ }
+ if (ins == INS_cnt)
{
- assert(elemsize != EA_8BYTE); // No encoding for type D
+ // Doesn't have general register version(s)
+ break;
}
- else if (ins == INS_rev32)
+
+ __fallthrough;
+
+ case INS_rev:
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ if (ins == INS_rev32)
{
- assert((elemsize == EA_2BYTE) || (elemsize == EA_1BYTE));
+ assert(size == EA_8BYTE);
}
else
{
- assert(elemsize == EA_1BYTE); // Only supports 8B or 16B
+ assert(isValidGeneralDatasize(size));
}
- fmt = IF_DV_2M;
- break;
- }
- if (ins == INS_cnt)
- {
- // Doesn't have general register version(s)
+ fmt = IF_DR_2G;
break;
- }
- __fallthrough;
-
- case INS_rev:
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- if (ins == INS_rev32)
- {
- assert(size == EA_8BYTE);
- }
- else
- {
- assert(isValidGeneralDatasize(size));
- }
- fmt = IF_DR_2G;
- break;
+ case INS_rev64:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(elemsize != EA_8BYTE); // No encoding for type D
+ fmt = IF_DV_2M;
+ break;
- case INS_rev64:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(elemsize != EA_8BYTE); // No encoding for type D
- fmt = IF_DV_2M;
- break;
+ case INS_ldr:
+ case INS_ldrb:
+ case INS_ldrh:
+ case INS_ldrsb:
+ case INS_ldrsh:
+ case INS_ldrsw:
+ case INS_str:
+ case INS_strb:
+ case INS_strh:
+
+ case INS_cmp:
+ case INS_cmn:
+ case INS_tst:
+ assert(insOptsNone(opt));
+ emitIns_R_R_I(ins, attr, reg1, reg2, 0, INS_OPTS_NONE);
+ return;
- case INS_ldr:
- case INS_ldrb:
- case INS_ldrh:
- case INS_ldrsb:
- case INS_ldrsh:
- case INS_ldrsw:
- case INS_str:
- case INS_strb:
- case INS_strh:
-
- case INS_cmp:
- case INS_cmn:
- case INS_tst:
- assert(insOptsNone(opt));
- emitIns_R_R_I(ins, attr, reg1, reg2, 0, INS_OPTS_NONE);
- return;
+ case INS_fmov:
+ assert(isValidVectorElemsizeFloat(size));
- case INS_fmov:
- assert(isValidVectorElemsizeFloat(size));
+ // Is the mov even necessary?
+ if (reg1 == reg2)
+ {
+ return;
+ }
- // Is the mov even necessary?
- if (reg1 == reg2)
- {
- return;
- }
-
- if (isVectorRegister(reg1))
- {
- if (isVectorRegister(reg2))
+ if (isVectorRegister(reg1))
{
- assert(insOptsNone(opt));
- fmt = IF_DV_2G;
+ if (isVectorRegister(reg2))
+ {
+ assert(insOptsNone(opt));
+ fmt = IF_DV_2G;
+ }
+ else
+ {
+ assert(isGeneralRegister(reg2));
+
+ // if the optional conversion specifier is not present we calculate it
+ if (opt == INS_OPTS_NONE)
+ {
+ opt = (size == EA_4BYTE) ? INS_OPTS_4BYTE_TO_S : INS_OPTS_8BYTE_TO_D;
+ }
+ assert(insOptsConvertIntToFloat(opt));
+
+ fmt = IF_DV_2I;
+ }
}
else
{
- assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg1));
+ assert(isVectorRegister(reg2));
// if the optional conversion specifier is not present we calculate it
if (opt == INS_OPTS_NONE)
{
- opt = (size == EA_4BYTE) ? INS_OPTS_4BYTE_TO_S
- : INS_OPTS_8BYTE_TO_D;
+ opt = (size == EA_4BYTE) ? INS_OPTS_S_TO_4BYTE : INS_OPTS_D_TO_8BYTE;
}
- assert(insOptsConvertIntToFloat(opt));
-
- fmt = IF_DV_2I;
+ assert(insOptsConvertFloatToInt(opt));
+
+ fmt = IF_DV_2H;
}
- }
- else
- {
- assert(isGeneralRegister(reg1));
+ break;
+
+ case INS_fcmp:
+ case INS_fcmpe:
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
+ fmt = IF_DV_2K;
+ break;
- // if the optional conversion specifier is not present we calculate it
- if (opt == INS_OPTS_NONE)
+ case INS_fcvtns:
+ case INS_fcvtnu:
+ case INS_fcvtas:
+ case INS_fcvtau:
+ case INS_fcvtps:
+ case INS_fcvtpu:
+ case INS_fcvtms:
+ case INS_fcvtmu:
+ case INS_fcvtzs:
+ case INS_fcvtzu:
+ if (insOptsAnyArrangement(opt))
+ {
+ // Vector operation
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_2A;
+ }
+ else
{
- opt = (size == EA_4BYTE) ? INS_OPTS_S_TO_4BYTE
- : INS_OPTS_D_TO_8BYTE;
+ // Scalar operation
+ assert(isVectorRegister(reg2));
+ if (isVectorRegister(reg1))
+ {
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ fmt = IF_DV_2G;
+ }
+ else
+ {
+ assert(isGeneralRegister(reg1));
+ assert(insOptsConvertFloatToInt(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ fmt = IF_DV_2H;
+ }
}
- assert(insOptsConvertFloatToInt(opt));
-
- fmt = IF_DV_2H;
- }
- break;
-
- case INS_fcmp:
- case INS_fcmpe:
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- fmt = IF_DV_2K;
- break;
+ break;
- case INS_fcvtns:
- case INS_fcvtnu:
- case INS_fcvtas:
- case INS_fcvtau:
- case INS_fcvtps:
- case INS_fcvtpu:
- case INS_fcvtms:
- case INS_fcvtmu:
- case INS_fcvtzs:
- case INS_fcvtzu:
- if (insOptsAnyArrangement(opt))
- {
- // Vector operation
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_2A;
- }
- else
- {
- // Scalar operation
- assert(isVectorRegister(reg2));
- if (isVectorRegister(reg1))
+ case INS_scvtf:
+ case INS_ucvtf:
+ if (insOptsAnyArrangement(opt))
{
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
- fmt = IF_DV_2G;
+ // Vector operation
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_2A;
}
else
{
- assert(isGeneralRegister(reg1));
- assert(insOptsConvertFloatToInt(opt));
- assert(isValidVectorElemsizeFloat(size));
- fmt = IF_DV_2H;
+ // Scalar operation
+ assert(isVectorRegister(reg1));
+ if (isVectorRegister(reg2))
+ {
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ fmt = IF_DV_2G;
+ }
+ else
+ {
+ assert(isGeneralRegister(reg2));
+ assert(insOptsConvertIntToFloat(opt));
+ assert(isValidVectorElemsizeFloat(size));
+ fmt = IF_DV_2I;
+ }
}
- }
- break;
+ break;
- case INS_scvtf:
- case INS_ucvtf:
- if (insOptsAnyArrangement(opt))
- {
- // Vector operation
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_2A;
- }
- else
- {
- // Scalar operation
- assert(isVectorRegister(reg1));
- if (isVectorRegister(reg2))
+ case INS_fabs:
+ case INS_fneg:
+ case INS_fsqrt:
+ case INS_frinta:
+ case INS_frinti:
+ case INS_frintm:
+ case INS_frintn:
+ case INS_frintp:
+ case INS_frintx:
+ case INS_frintz:
+ if (insOptsAnyArrangement(opt))
{
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
- fmt = IF_DV_2G;
+ // Vector operation
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_2A;
}
else
{
- assert(isGeneralRegister(reg2));
- assert(insOptsConvertIntToFloat(opt));
+ // Scalar operation
+ assert(insOptsNone(opt));
assert(isValidVectorElemsizeFloat(size));
- fmt = IF_DV_2I;
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ fmt = IF_DV_2G;
}
- }
- break;
+ break;
- case INS_fabs:
- case INS_fneg:
- case INS_fsqrt:
- case INS_frinta:
- case INS_frinti:
- case INS_frintm:
- case INS_frintn:
- case INS_frintp:
- case INS_frintx:
- case INS_frintz:
- if (insOptsAnyArrangement(opt))
- {
- // Vector operation
+ case INS_fcvt:
+ assert(insOptsConvertFloatToFloat(opt));
+ assert(isValidVectorFcvtsize(size));
assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_2A;
- }
- else
- {
- // Scalar operation
- assert(insOptsNone(opt));
- assert(isValidVectorElemsizeFloat(size));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- fmt = IF_DV_2G;
- }
- break;
-
- case INS_fcvt:
- assert(insOptsConvertFloatToFloat(opt));
- assert(isValidVectorFcvtsize(size));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- fmt = IF_DV_2J;
- break;
+ fmt = IF_DV_2J;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSmall(attr);
+ instrDesc* id = emitNewInstrSmall(attr);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4121,97 +4068,93 @@ void emitter::emitIns_R_R(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing a register and two constants.
*/
-void emitter::emitIns_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t imm1,
- ssize_t imm2,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_I_I(
+ instruction ins, emitAttr attr, regNumber reg, ssize_t imm1, ssize_t imm2, insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- size_t immOut = 0; // composed from imm1 and imm2 and stored in the instrDesc
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ size_t immOut = 0; // composed from imm1 and imm2 and stored in the instrDesc
/* Figure out the encoding format of the instruction */
switch (ins)
{
- bool canEncode;
- halfwordImm hwi;
+ bool canEncode;
+ halfwordImm hwi;
- case INS_mov:
- ins = INS_movz; // INS_mov with LSL is an alias for INS_movz LSL
- __fallthrough;
+ case INS_mov:
+ ins = INS_movz; // INS_mov with LSL is an alias for INS_movz LSL
+ __fallthrough;
- case INS_movk:
- case INS_movn:
- case INS_movz:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg));
- assert(isValidUimm16(imm1));
- assert(insOptsLSL(opt)); // Must be INS_OPTS_LSL
+ case INS_movk:
+ case INS_movn:
+ case INS_movz:
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg));
+ assert(isValidUimm16(imm1));
+ assert(insOptsLSL(opt)); // Must be INS_OPTS_LSL
- if (size == EA_8BYTE)
- {
- assert((imm2 == 0) || (imm2 == 16) || // shift amount: 0, 16, 32 or 48
- (imm2 == 32) || (imm2 == 48));
- }
- else // EA_4BYTE
- {
- assert((imm2 == 0) || (imm2 == 16)); // shift amount: 0 or 16
- }
+ if (size == EA_8BYTE)
+ {
+ assert((imm2 == 0) || (imm2 == 16) || // shift amount: 0, 16, 32 or 48
+ (imm2 == 32) || (imm2 == 48));
+ }
+ else // EA_4BYTE
+ {
+ assert((imm2 == 0) || (imm2 == 16)); // shift amount: 0 or 16
+ }
- hwi.immHWVal = 0;
+ hwi.immHWVal = 0;
- switch (imm2) {
- case 0:
- hwi.immHW = 0;
- canEncode = true;
- break;
-
- case 16:
- hwi.immHW = 1;
- canEncode = true;
- break;
-
- case 32:
- hwi.immHW = 2;
- canEncode = true;
- break;
-
- case 48:
- hwi.immHW = 3;
- canEncode = true;
- break;
+ switch (imm2)
+ {
+ case 0:
+ hwi.immHW = 0;
+ canEncode = true;
+ break;
- default:
- canEncode = false;
- }
+ case 16:
+ hwi.immHW = 1;
+ canEncode = true;
+ break;
- if (canEncode)
- {
- hwi.immVal = imm1;
+ case 32:
+ hwi.immHW = 2;
+ canEncode = true;
+ break;
- immOut = hwi.immHWVal;
- assert(isValidImmHWVal(immOut, size));
- fmt = IF_DI_1B;
- }
- break;
+ case 48:
+ hwi.immHW = 3;
+ canEncode = true;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ canEncode = false;
+ }
- } // end switch (ins)
+ if (canEncode)
+ {
+ hwi.immVal = imm1;
+
+ immOut = hwi.immHWVal;
+ assert(isValidImmHWVal(immOut, size));
+ fmt = IF_DI_1B;
+ }
+ break;
+
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+
+ } // end switch (ins)
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, immOut);
+ instrDesc* id = emitNewInstrSC(attr, immOut);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4222,436 +4165,430 @@ void emitter::emitIns_R_I_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing two registers and a constant.
*/
-void emitter::emitIns_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- ssize_t imm,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_R_I(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
- bool isLdSt = false;
- bool isSIMD = false;
- bool isAddSub = false;
- bool setFlags = false;
- unsigned scale = 0;
- bool unscaledOp = false;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
+ bool isLdSt = false;
+ bool isSIMD = false;
+ bool isAddSub = false;
+ bool setFlags = false;
+ unsigned scale = 0;
+ bool unscaledOp = false;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- bool canEncode;
- bitMaskImm bmi;
+ bool canEncode;
+ bitMaskImm bmi;
- case INS_mov:
- // Check for the 'mov' aliases for the vector registers
- assert(insOptsNone(opt));
- assert(isValidVectorElemsize(size));
- elemsize = size;
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ case INS_mov:
+ // Check for the 'mov' aliases for the vector registers
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsize(size));
+ elemsize = size;
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- if (isVectorRegister(reg1))
- {
- if (isGeneralRegisterOrZR(reg2))
+ if (isVectorRegister(reg1))
{
- fmt = IF_DV_2C; // Alias for 'ins'
- break;
+ if (isGeneralRegisterOrZR(reg2))
+ {
+ fmt = IF_DV_2C; // Alias for 'ins'
+ break;
+ }
+ else if (isVectorRegister(reg2))
+ {
+ fmt = IF_DV_2E; // Alias for 'dup'
+ break;
+ }
}
- else if (isVectorRegister(reg2))
+ else // isGeneralRegister(reg1)
{
- fmt = IF_DV_2E; // Alias for 'dup'
- break;
+ assert(isGeneralRegister(reg1));
+ if (isVectorRegister(reg2))
+ {
+ fmt = IF_DV_2B; // Alias for 'umov'
+ break;
+ }
}
- }
- else // isGeneralRegister(reg1)
- {
+ assert(!" invalid INS_mov operands");
+ break;
+
+ case INS_lsl:
+ case INS_lsr:
+ case INS_asr:
+ assert(insOptsNone(opt));
+ assert(isValidGeneralDatasize(size));
assert(isGeneralRegister(reg1));
- if (isVectorRegister(reg2))
+ assert(isGeneralRegister(reg2));
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DI_2D;
+ break;
+
+ case INS_ror:
+ assert(insOptsNone(opt));
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DI_2B;
+ break;
+
+ case INS_sshr:
+ case INS_ssra:
+ case INS_srshr:
+ case INS_srsra:
+ case INS_shl:
+ case INS_ushr:
+ case INS_usra:
+ case INS_urshr:
+ case INS_ursra:
+ case INS_sri:
+ case INS_sli:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ if (insOptsAnyArrangement(opt))
{
- fmt = IF_DV_2B; // Alias for 'umov'
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidImmShift(imm, elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_2O;
break;
}
- }
- assert(!" invalid INS_mov operands");
- break;
-
- case INS_lsl:
- case INS_lsr:
- case INS_asr:
- assert(insOptsNone(opt));
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isValidImmShift(imm, size));
- fmt = IF_DI_2D;
- break;
+ else
+ {
+ // Scalar operation
+ assert(insOptsNone(opt));
+ assert(size == EA_8BYTE); // only supported size
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DV_2N;
+ }
+ break;
- case INS_ror:
- assert(insOptsNone(opt));
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isValidImmShift(imm, size));
- fmt = IF_DI_2B;
- break;
+ case INS_sxtl:
+ case INS_uxtl:
+ assert(imm == 0);
+ __fallthrough;
- case INS_sshr:
- case INS_ssra:
- case INS_srshr:
- case INS_srsra:
- case INS_shl:
- case INS_ushr:
- case INS_usra:
- case INS_urshr:
- case INS_ursra:
- case INS_sri:
- case INS_sli:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- if (insOptsAnyArrangement(opt))
- {
+ case INS_shrn:
+ case INS_rshrn:
+ case INS_sshll:
+ case INS_ushll:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
// Vector operation
- assert(isValidVectorDatasize(size));
+ assert(size == EA_8BYTE);
assert(isValidArrangement(size, opt));
elemsize = optGetElemsize(opt);
+ assert(elemsize != EA_8BYTE); // Reserved encodings
assert(isValidVectorElemsize(elemsize));
assert(isValidImmShift(imm, elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
fmt = IF_DV_2O;
break;
- }
- else
- {
- // Scalar operation
- assert(insOptsNone(opt));
- assert(size == EA_8BYTE); // only supported size
- assert(isValidImmShift(imm, size));
- fmt = IF_DV_2N;
- }
- break;
+ case INS_sxtl2:
+ case INS_uxtl2:
+ assert(imm == 0);
+ __fallthrough;
- case INS_sxtl:
- case INS_uxtl:
- assert(imm == 0);
- __fallthrough;
-
- case INS_shrn:
- case INS_rshrn:
- case INS_sshll:
- case INS_ushll:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- // Vector operation
- assert(size == EA_8BYTE);
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(elemsize != EA_8BYTE); // Reserved encodings
- assert(isValidVectorElemsize(elemsize));
- assert(isValidImmShift(imm, elemsize));
- fmt = IF_DV_2O;
- break;
+ case INS_shrn2:
+ case INS_rshrn2:
+ case INS_sshll2:
+ case INS_ushll2:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ // Vector operation
+ assert(size == EA_16BYTE);
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(elemsize != EA_8BYTE); // Reserved encodings
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidImmShift(imm, elemsize));
+ fmt = IF_DV_2O;
+ break;
- case INS_sxtl2:
- case INS_uxtl2:
- assert(imm == 0);
- __fallthrough;
+ case INS_mvn:
+ case INS_neg:
+ case INS_negs:
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegisterOrZR(reg2));
- case INS_shrn2:
- case INS_rshrn2:
- case INS_sshll2:
- case INS_ushll2:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- // Vector operation
- assert(size == EA_16BYTE);
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(elemsize != EA_8BYTE); // Reserved encodings
- assert(isValidVectorElemsize(elemsize));
- assert(isValidImmShift(imm, elemsize));
- fmt = IF_DV_2O;
- break;
+ if (imm == 0)
+ {
+ assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
- case INS_mvn:
- case INS_neg:
- case INS_negs:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegisterOrZR(reg2));
+ fmt = IF_DR_2E;
+ }
+ else
+ {
+ if (ins == INS_mvn)
+ {
+ assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
+ }
+ else // neg or negs
+ {
+ assert(insOptsAluShift(opt)); // a non-zero imm, must select shift kind, can't use ROR
+ }
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DR_2F;
+ }
+ break;
- if (imm == 0)
- {
- assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
+ case INS_tst:
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegisterOrZR(reg1));
+ assert(isGeneralRegister(reg2));
- fmt = IF_DR_2E;
- }
- else
- {
- if (ins == INS_mvn)
+ if (insOptsAnyShift(opt))
{
- assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
+ assert(isValidImmShift(imm, size) && (imm != 0));
+ fmt = IF_DR_2B;
}
- else // neg or negs
+ else
{
- assert(insOptsAluShift(opt)); // a non-zero imm, must select shift kind, can't use ROR
+ assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
+ assert(imm == 0);
+ fmt = IF_DR_2A;
}
- assert(isValidImmShift(imm, size));
- fmt = IF_DR_2F;
- }
- break;
-
- case INS_tst:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegisterOrZR(reg1));
- assert(isGeneralRegister(reg2));
-
- if (insOptsAnyShift(opt))
- {
- assert(isValidImmShift(imm, size) && (imm != 0));
- fmt = IF_DR_2B;
- }
- else
- {
- assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
- assert(imm == 0);
- fmt = IF_DR_2A;
- }
- break;
-
- case INS_cmp:
- case INS_cmn:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegisterOrSP(reg1));
- assert(isGeneralRegister(reg2));
-
- reg1 = encodingSPtoZR(reg1);
- if (insOptsAnyExtend(opt))
- {
- assert((imm >= 0) && (imm <= 4));
-
- fmt = IF_DR_2C;
- }
- else if (imm == 0)
- {
- assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
-
- fmt = IF_DR_2A;
- }
- else
- {
- assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
- assert(isValidImmShift(imm, size));
- fmt = IF_DR_2B;
- }
- break;
+ break;
- case INS_ands:
- case INS_and:
- case INS_eor:
- case INS_orr:
- assert(insOptsNone(opt));
- assert(isGeneralRegister(reg2));
- if (ins == INS_ands)
- {
- assert(isGeneralRegister(reg1));
- }
- else
- {
+ case INS_cmp:
+ case INS_cmn:
+ assert(isValidGeneralDatasize(size));
assert(isGeneralRegisterOrSP(reg1));
+ assert(isGeneralRegister(reg2));
+
reg1 = encodingSPtoZR(reg1);
- }
+ if (insOptsAnyExtend(opt))
+ {
+ assert((imm >= 0) && (imm <= 4));
- bmi.immNRS = 0;
- canEncode = canEncodeBitMaskImm(imm, size, &bmi);
- if (canEncode)
- {
- imm = bmi.immNRS;
- assert(isValidImmNRS(imm, size));
- fmt = IF_DI_2C;
- }
- break;
+ fmt = IF_DR_2C;
+ }
+ else if (imm == 0)
+ {
+ assert(insOptsNone(opt)); // a zero imm, means no alu shift kind
- case INS_dup: // by element, imm selects the element of reg2
- assert(isVectorRegister(reg1));
- if (isVectorRegister(reg2))
- {
- if (insOptsAnyArrangement(opt))
+ fmt = IF_DR_2A;
+ }
+ else
{
- // Vector operation
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsize(elemsize));
- assert(isValidVectorIndex(size, elemsize, imm));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_2D;
- break;
+ assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DR_2B;
+ }
+ break;
+
+ case INS_ands:
+ case INS_and:
+ case INS_eor:
+ case INS_orr:
+ assert(insOptsNone(opt));
+ assert(isGeneralRegister(reg2));
+ if (ins == INS_ands)
+ {
+ assert(isGeneralRegister(reg1));
}
else
{
- // Scalar operation
- assert(insOptsNone(opt));
- elemsize = size;
- assert(isValidVectorElemsize(elemsize));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- fmt = IF_DV_2E;
- break;
+ assert(isGeneralRegisterOrSP(reg1));
+ reg1 = encodingSPtoZR(reg1);
}
- }
- __fallthrough;
- case INS_ins: // (MOV from general)
- assert(insOptsNone(opt));
- assert(isValidVectorElemsize(size));
- assert(isVectorRegister(reg1));
- assert(isGeneralRegisterOrZR(reg2));
- elemsize = size;
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- fmt = IF_DV_2C;
- break;
+ bmi.immNRS = 0;
+ canEncode = canEncodeBitMaskImm(imm, size, &bmi);
+ if (canEncode)
+ {
+ imm = bmi.immNRS;
+ assert(isValidImmNRS(imm, size));
+ fmt = IF_DI_2C;
+ }
+ break;
- case INS_umov: // (MOV to general)
- assert(insOptsNone(opt));
- assert(isValidVectorElemsize(size));
- assert(isGeneralRegister(reg1));
- assert(isVectorRegister(reg2));
- elemsize = size;
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- fmt = IF_DV_2B;
- break;
+ case INS_dup: // by element, imm selects the element of reg2
+ assert(isVectorRegister(reg1));
+ if (isVectorRegister(reg2))
+ {
+ if (insOptsAnyArrangement(opt))
+ {
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidVectorIndex(size, elemsize, imm));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_2D;
+ break;
+ }
+ else
+ {
+ // Scalar operation
+ assert(insOptsNone(opt));
+ elemsize = size;
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ fmt = IF_DV_2E;
+ break;
+ }
+ }
+ __fallthrough;
- case INS_smov:
- assert(insOptsNone(opt));
- assert(isValidVectorElemsize(size));
- assert(size != EA_8BYTE); // no encoding, use INS_umov
- assert(isGeneralRegister(reg1));
- assert(isVectorRegister(reg2));
- elemsize = size;
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- fmt = IF_DV_2B;
- break;
+ case INS_ins: // (MOV from general)
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsize(size));
+ assert(isVectorRegister(reg1));
+ assert(isGeneralRegisterOrZR(reg2));
+ elemsize = size;
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ fmt = IF_DV_2C;
+ break;
- case INS_add:
- case INS_sub:
- setFlags = false;
- isAddSub = true;
- break;
+ case INS_umov: // (MOV to general)
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isVectorRegister(reg2));
+ elemsize = size;
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ fmt = IF_DV_2B;
+ break;
- case INS_adds:
- case INS_subs:
- setFlags = true;
- isAddSub = true;
- break;
+ case INS_smov:
+ assert(insOptsNone(opt));
+ assert(isValidVectorElemsize(size));
+ assert(size != EA_8BYTE); // no encoding, use INS_umov
+ assert(isGeneralRegister(reg1));
+ assert(isVectorRegister(reg2));
+ elemsize = size;
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ fmt = IF_DV_2B;
+ break;
- case INS_ldrsb:
- case INS_ldursb:
- // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
- assert(isValidGeneralDatasize(size));
- unscaledOp = (ins == INS_ldursb);
- scale = 0;
- isLdSt = true;
- break;
+ case INS_add:
+ case INS_sub:
+ setFlags = false;
+ isAddSub = true;
+ break;
- case INS_ldrsh:
- case INS_ldursh:
- // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
- assert(isValidGeneralDatasize(size));
- unscaledOp = (ins == INS_ldursh);
- scale = 1;
- isLdSt = true;
- break;
+ case INS_adds:
+ case INS_subs:
+ setFlags = true;
+ isAddSub = true;
+ break;
- case INS_ldrsw:
- case INS_ldursw:
- // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
- assert(size == EA_8BYTE);
- unscaledOp = (ins == INS_ldursw);
- scale = 2;
- isLdSt = true;
- break;
+ case INS_ldrsb:
+ case INS_ldursb:
+ // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
+ assert(isValidGeneralDatasize(size));
+ unscaledOp = (ins == INS_ldursb);
+ scale = 0;
+ isLdSt = true;
+ break;
- case INS_ldrb:
- case INS_strb:
- // size is ignored
- unscaledOp = false;
- scale = 0;
- isLdSt = true;
- break;
+ case INS_ldrsh:
+ case INS_ldursh:
+ // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
+ assert(isValidGeneralDatasize(size));
+ unscaledOp = (ins == INS_ldursh);
+ scale = 1;
+ isLdSt = true;
+ break;
- case INS_ldurb:
- case INS_sturb:
- // size is ignored
- unscaledOp = true;
- scale = 0;
- isLdSt = true;
- break;
+ case INS_ldrsw:
+ case INS_ldursw:
+ // 'size' specifies how we sign-extend into 4 or 8 bytes of the target register
+ assert(size == EA_8BYTE);
+ unscaledOp = (ins == INS_ldursw);
+ scale = 2;
+ isLdSt = true;
+ break;
- case INS_ldrh:
- case INS_strh:
- // size is ignored
- unscaledOp = false;
- scale = 1;
- isLdSt = true;
- break;
+ case INS_ldrb:
+ case INS_strb:
+ // size is ignored
+ unscaledOp = false;
+ scale = 0;
+ isLdSt = true;
+ break;
- case INS_ldurh:
- case INS_sturh:
- // size is ignored
- unscaledOp = true;
- scale = 0;
- isLdSt = true;
- break;
+ case INS_ldurb:
+ case INS_sturb:
+ // size is ignored
+ unscaledOp = true;
+ scale = 0;
+ isLdSt = true;
+ break;
- case INS_ldr:
- case INS_str:
- // Is the target a vector register?
- if (isVectorRegister(reg1))
- {
- assert(isValidVectorLSDatasize(size));
- assert(isGeneralRegisterOrSP(reg2));
- isSIMD = true;
- }
- else
- {
- assert(isValidGeneralDatasize(size));
- }
- unscaledOp = false;
- scale = NaturalScale_helper(size);
- isLdSt = true;
- break;
+ case INS_ldrh:
+ case INS_strh:
+ // size is ignored
+ unscaledOp = false;
+ scale = 1;
+ isLdSt = true;
+ break;
- case INS_ldur:
- case INS_stur:
- // Is the target a vector register?
- if (isVectorRegister(reg1))
- {
- assert(isValidVectorLSDatasize(size));
- assert(isGeneralRegisterOrSP(reg2));
- isSIMD = true;
- }
- else
- {
- assert(isValidGeneralDatasize(size));
- }
- unscaledOp = true;
- scale = 0;
- isLdSt = true;
- break;
+ case INS_ldurh:
+ case INS_sturh:
+ // size is ignored
+ unscaledOp = true;
+ scale = 0;
+ isLdSt = true;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case INS_ldr:
+ case INS_str:
+ // Is the target a vector register?
+ if (isVectorRegister(reg1))
+ {
+ assert(isValidVectorLSDatasize(size));
+ assert(isGeneralRegisterOrSP(reg2));
+ isSIMD = true;
+ }
+ else
+ {
+ assert(isValidGeneralDatasize(size));
+ }
+ unscaledOp = false;
+ scale = NaturalScale_helper(size);
+ isLdSt = true;
+ break;
+
+ case INS_ldur:
+ case INS_stur:
+ // Is the target a vector register?
+ if (isVectorRegister(reg1))
+ {
+ assert(isValidVectorLSDatasize(size));
+ assert(isGeneralRegisterOrSP(reg2));
+ isSIMD = true;
+ }
+ else
+ {
+ assert(isValidGeneralDatasize(size));
+ }
+ unscaledOp = true;
+ scale = 0;
+ isLdSt = true;
+ break;
- } // end switch (ins)
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+
+ } // end switch (ins)
if (isLdSt)
{
@@ -4671,7 +4608,7 @@ void emitter::emitIns_R_R_I(instruction ins,
}
assert(isGeneralRegisterOrSP(reg2));
-
+
// Load/Store reserved encodings:
if (insOptsIndexed(opt))
{
@@ -4680,10 +4617,10 @@ void emitter::emitIns_R_R_I(instruction ins,
reg2 = encodingSPtoZR(reg2);
- ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
+ ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
if (imm == 0)
{
- assert(insOptsNone(opt)); // PRE/POST Index doesn't make sense with an immediate of zero
+ assert(insOptsNone(opt)); // PRE/POST Index doesn't make sense with an immediate of zero
fmt = IF_LS_2A;
}
@@ -4696,7 +4633,7 @@ void emitter::emitIns_R_R_I(instruction ins,
else
{
assert(!"Instruction cannot be encoded: IF_LS_2C");
- }
+ }
}
else if (imm > 0)
{
@@ -4705,14 +4642,14 @@ void emitter::emitIns_R_R_I(instruction ins,
if (((imm & mask) == 0) && ((imm >> scale) < 0x1000))
{
- imm >>= scale; // The immediate is scaled by the size of the ld/st
+ imm >>= scale; // The immediate is scaled by the size of the ld/st
fmt = IF_LS_2B;
}
else
{
assert(!"Instruction cannot be encoded: IF_LS_2B");
- }
+ }
}
}
else if (isAddSub)
@@ -4720,7 +4657,7 @@ void emitter::emitIns_R_R_I(instruction ins,
assert(!isLdSt);
assert(insOptsNone(opt));
- if (setFlags) // Can't encode SP with setFlags
+ if (setFlags) // Can't encode SP with setFlags
{
assert(isGeneralRegister(reg1));
assert(isGeneralRegister(reg2));
@@ -4755,7 +4692,7 @@ void emitter::emitIns_R_R_I(instruction ins,
assert(isValidUimm12(imm));
fmt = IF_DI_2A;
}
- else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
+ else if (canEncodeWithShiftImmBy12(imm)) // Try the shifted by 12 encoding
{
// Encoding will use a 12-bit left shift of the immediate
opt = INS_OPTS_LSL12;
@@ -4777,7 +4714,7 @@ void emitter::emitIns_R_R_I(instruction ins,
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, imm);
+ instrDesc* id = emitNewInstrSC(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4800,11 +4737,7 @@ void emitter::emitIns_R_R_I(instruction ins,
* - Requires that reg1 is a general register and not SP or ZR
* - Requires that reg1 != reg2
*/
-void emitter::emitIns_R_R_Imm(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- ssize_t imm)
+void emitter::emitIns_R_R_Imm(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm)
{
assert(isGeneralRegister(reg1));
assert(reg1 != reg2);
@@ -4813,24 +4746,24 @@ void emitter::emitIns_R_R_Imm(instruction ins,
switch (ins)
{
- case INS_add:
- case INS_adds:
- case INS_sub:
- case INS_subs:
- immFits = emitter::emitIns_valid_imm_for_add(imm, attr);
- break;
+ case INS_add:
+ case INS_adds:
+ case INS_sub:
+ case INS_subs:
+ immFits = emitter::emitIns_valid_imm_for_add(imm, attr);
+ break;
- case INS_ands:
- case INS_and:
- case INS_eor:
- case INS_orr:
- immFits = emitter::emitIns_valid_imm_for_alu(imm, attr);
- break;
+ case INS_ands:
+ case INS_and:
+ case INS_eor:
+ case INS_orr:
+ immFits = emitter::emitIns_valid_imm_for_alu(imm, attr);
+ break;
- default:
- assert(!"Unsupported instruction in emitIns_R_R_Imm");
+ default:
+ assert(!"Unsupported instruction in emitIns_R_R_Imm");
}
-
+
if (immFits)
{
emitIns_R_R_I(ins, attr, reg1, reg2, imm);
@@ -4850,275 +4783,271 @@ void emitter::emitIns_R_R_Imm(instruction ins,
* Add an instruction referencing three registers.
*/
-void emitter::emitIns_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insOpts opt) /* = INS_OPTS_NONE */
+void emitter::emitIns_R_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insOpts opt) /* = INS_OPTS_NONE */
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_lsl:
- case INS_lsr:
- case INS_asr:
- case INS_ror:
- case INS_adc:
- case INS_adcs:
- case INS_sbc:
- case INS_sbcs:
- case INS_udiv:
- case INS_sdiv:
- case INS_mneg:
- case INS_smull:
- case INS_smnegl:
- case INS_smulh:
- case INS_umull:
- case INS_umnegl:
- case INS_umulh:
- case INS_lslv:
- case INS_lsrv:
- case INS_asrv:
- case INS_rorv:
- assert(insOptsNone(opt));
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- fmt = IF_DR_3A;
- break;
-
- case INS_mul:
- if (insOptsNone(opt))
- {
- // general register
+ case INS_lsl:
+ case INS_lsr:
+ case INS_asr:
+ case INS_ror:
+ case INS_adc:
+ case INS_adcs:
+ case INS_sbc:
+ case INS_sbcs:
+ case INS_udiv:
+ case INS_sdiv:
+ case INS_mneg:
+ case INS_smull:
+ case INS_smnegl:
+ case INS_smulh:
+ case INS_umull:
+ case INS_umnegl:
+ case INS_umulh:
+ case INS_lslv:
+ case INS_lsrv:
+ case INS_asrv:
+ case INS_rorv:
+ assert(insOptsNone(opt));
assert(isValidGeneralDatasize(size));
assert(isGeneralRegister(reg1));
assert(isGeneralRegister(reg2));
assert(isGeneralRegister(reg3));
fmt = IF_DR_3A;
break;
- }
- __fallthrough;
- case INS_mla:
- case INS_mls:
- case INS_pmul:
- assert(insOptsAnyArrangement(opt));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- if (ins == INS_pmul)
- {
- assert(elemsize == EA_1BYTE); // only supports 8B or 16B
- }
- else // INS_mul, INS_mla, INS_mls
- {
- assert(elemsize != EA_8BYTE); // can't use 2D or 1D
- }
- fmt = IF_DV_3A;
- break;
+ case INS_mul:
+ if (insOptsNone(opt))
+ {
+ // general register
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ fmt = IF_DR_3A;
+ break;
+ }
+ __fallthrough;
- case INS_add:
- case INS_sub:
- if (isVectorRegister(reg1))
- {
+ case INS_mla:
+ case INS_mls:
+ case INS_pmul:
+ assert(insOptsAnyArrangement(opt));
+ assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
assert(isVectorRegister(reg3));
-
- if (insOptsAnyArrangement(opt))
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ if (ins == INS_pmul)
{
- // Vector operation
- assert(opt != INS_OPTS_1D); // Reserved encoding
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- fmt = IF_DV_3A;
+ assert(elemsize == EA_1BYTE); // only supports 8B or 16B
}
- else
+ else // INS_mul, INS_mla, INS_mls
{
- // Scalar operation
- assert(insOptsNone(opt));
- assert(size == EA_8BYTE);
- fmt = IF_DV_3E;
+ assert(elemsize != EA_8BYTE); // can't use 2D or 1D
}
+ fmt = IF_DV_3A;
break;
- }
- __fallthrough;
-
- case INS_adds:
- case INS_subs:
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, INS_OPTS_NONE);
- return;
- case INS_saba:
- case INS_sabd:
- case INS_uaba:
- case INS_uabd:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- assert(insOptsAnyArrangement(opt));
+ case INS_add:
+ case INS_sub:
+ if (isVectorRegister(reg1))
+ {
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
- // Vector operation
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(elemsize != EA_8BYTE); // can't use 2D or 1D
+ if (insOptsAnyArrangement(opt))
+ {
+ // Vector operation
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ fmt = IF_DV_3A;
+ }
+ else
+ {
+ // Scalar operation
+ assert(insOptsNone(opt));
+ assert(size == EA_8BYTE);
+ fmt = IF_DV_3E;
+ }
+ break;
+ }
+ __fallthrough;
- fmt = IF_DV_3A;
- break;
+ case INS_adds:
+ case INS_subs:
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, INS_OPTS_NONE);
+ return;
- case INS_mov:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(reg2 == reg3);
- assert(isValidVectorDatasize(size));
- // INS_mov is an alias for INS_orr (vector register)
- if (opt == INS_OPTS_NONE)
- {
- elemsize = EA_1BYTE;
- opt = optMakeArrangement(size, elemsize);
- }
- assert(isValidArrangement(size, opt));
- fmt = IF_DV_3C;
- break;
+ case INS_saba:
+ case INS_sabd:
+ case INS_uaba:
+ case INS_uabd:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ assert(insOptsAnyArrangement(opt));
- case INS_and:
- case INS_bic:
- case INS_eor:
- case INS_orr:
- case INS_orn:
- if (isVectorRegister(reg1))
- {
+ // Vector operation
assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(elemsize != EA_8BYTE); // can't use 2D or 1D
+
+ fmt = IF_DV_3A;
+ break;
+
+ case INS_mov:
+ assert(isVectorRegister(reg1));
assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
+ assert(reg2 == reg3);
+ assert(isValidVectorDatasize(size));
+ // INS_mov is an alias for INS_orr (vector register)
if (opt == INS_OPTS_NONE)
{
elemsize = EA_1BYTE;
- opt = optMakeArrangement(size, elemsize);
+ opt = optMakeArrangement(size, elemsize);
}
assert(isValidArrangement(size, opt));
fmt = IF_DV_3C;
break;
- }
- __fallthrough;
- case INS_ands:
- case INS_bics:
- case INS_eon:
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, INS_OPTS_NONE);
- return;
+ case INS_and:
+ case INS_bic:
+ case INS_eor:
+ case INS_orr:
+ case INS_orn:
+ if (isVectorRegister(reg1))
+ {
+ assert(isValidVectorDatasize(size));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ if (opt == INS_OPTS_NONE)
+ {
+ elemsize = EA_1BYTE;
+ opt = optMakeArrangement(size, elemsize);
+ }
+ assert(isValidArrangement(size, opt));
+ fmt = IF_DV_3C;
+ break;
+ }
+ __fallthrough;
- case INS_bsl:
- case INS_bit:
- case INS_bif:
- assert(isValidVectorDatasize(size));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- if (opt == INS_OPTS_NONE)
- {
- elemsize = EA_1BYTE;
- opt = optMakeArrangement(size, elemsize);
- }
- assert(isValidArrangement(size, opt));
- fmt = IF_DV_3C;
- break;
+ case INS_ands:
+ case INS_bics:
+ case INS_eon:
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0, INS_OPTS_NONE);
+ return;
- case INS_fadd:
- case INS_fsub:
- case INS_fdiv:
- case INS_fmax:
- case INS_fmin:
- case INS_fabd:
- case INS_fmul:
- case INS_fmulx:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- if (insOptsAnyArrangement(opt))
- {
- // Vector operation
+ case INS_bsl:
+ case INS_bit:
+ case INS_bif:
assert(isValidVectorDatasize(size));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ if (opt == INS_OPTS_NONE)
+ {
+ elemsize = EA_1BYTE;
+ opt = optMakeArrangement(size, elemsize);
+ }
assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_3B;
- }
- else
- {
+ fmt = IF_DV_3C;
+ break;
+
+ case INS_fadd:
+ case INS_fsub:
+ case INS_fdiv:
+ case INS_fmax:
+ case INS_fmin:
+ case INS_fabd:
+ case INS_fmul:
+ case INS_fmulx:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ if (insOptsAnyArrangement(opt))
+ {
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_3B;
+ }
+ else
+ {
+ // Scalar operation
+ assert(insOptsNone(opt));
+ assert(isValidScalarDatasize(size));
+ fmt = IF_DV_3D;
+ }
+ break;
+
+ case INS_fnmul:
// Scalar operation
assert(insOptsNone(opt));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
assert(isValidScalarDatasize(size));
fmt = IF_DV_3D;
- }
- break;
+ break;
- case INS_fnmul:
- // Scalar operation
- assert(insOptsNone(opt));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- assert(isValidScalarDatasize(size));
- fmt = IF_DV_3D;
- break;
+ case INS_fmla:
+ case INS_fmls:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ assert(insOptsAnyArrangement(opt)); // no scalar encoding, use 4-operand 'fmadd' or 'fmsub'
- case INS_fmla:
- case INS_fmls:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- assert(insOptsAnyArrangement(opt)); // no scalar encoding, use 4-operand 'fmadd' or 'fmsub'
-
- // Vector operation
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_3B;
- break;
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_3B;
+ break;
- case INS_ldr:
- case INS_ldrb:
- case INS_ldrh:
- case INS_ldrsb:
- case INS_ldrsh:
- case INS_ldrsw:
- case INS_str:
- case INS_strb:
- case INS_strh:
- emitIns_R_R_R_Ext(ins, attr, reg1, reg2, reg3, opt);
- return;
+ case INS_ldr:
+ case INS_ldrb:
+ case INS_ldrh:
+ case INS_ldrsb:
+ case INS_ldrsh:
+ case INS_ldrsw:
+ case INS_str:
+ case INS_strb:
+ case INS_strh:
+ emitIns_R_R_R_Ext(ins, attr, reg1, reg2, reg3, opt);
+ return;
- case INS_ldp:
- case INS_ldpsw:
- case INS_ldnp:
- case INS_stp:
- case INS_stnp:
- emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0);
- return;
+ case INS_ldp:
+ case INS_ldpsw:
+ case INS_ldnp:
+ case INS_stp:
+ case INS_stnp:
+ emitIns_R_R_R_I(ins, attr, reg1, reg2, reg3, 0);
+ return;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstr(attr);
+ instrDesc* id = emitNewInstr(attr);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5132,160 +5061,159 @@ void emitter::emitIns_R_R_R(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing three registers and a constant.
*/
-void emitter::emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt /* = INS_OPTS_NONE */)
+void emitter::emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt /* = INS_OPTS_NONE */)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
- bool isLdSt = false;
- bool isSIMD = false;
- bool isAddSub = false;
- bool setFlags = false;
- unsigned scale = 0;
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
+ bool isLdSt = false;
+ bool isSIMD = false;
+ bool isAddSub = false;
+ bool setFlags = false;
+ unsigned scale = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_extr:
- assert(insOptsNone(opt));
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- assert(isValidImmShift(imm, size));
- fmt = IF_DR_3E;
- break;
+ case INS_extr:
+ assert(insOptsNone(opt));
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ assert(isValidImmShift(imm, size));
+ fmt = IF_DR_3E;
+ break;
- case INS_and:
- case INS_ands:
- case INS_eor:
- case INS_orr:
- case INS_bic:
- case INS_bics:
- case INS_eon:
- case INS_orn:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- assert(isValidImmShift(imm, size));
- if (imm == 0)
- {
- assert(insOptsNone(opt)); // a zero imm, means no shift kind
- fmt = IF_DR_3A;
- }
- else
- {
- assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
- fmt = IF_DR_3B;
- }
- break;
+ case INS_and:
+ case INS_ands:
+ case INS_eor:
+ case INS_orr:
+ case INS_bic:
+ case INS_bics:
+ case INS_eon:
+ case INS_orn:
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ assert(isValidImmShift(imm, size));
+ if (imm == 0)
+ {
+ assert(insOptsNone(opt)); // a zero imm, means no shift kind
+ fmt = IF_DR_3A;
+ }
+ else
+ {
+ assert(insOptsAnyShift(opt)); // a non-zero imm, must select shift kind
+ fmt = IF_DR_3B;
+ }
+ break;
- case INS_fmul: // by element, imm[0..3] selects the element of reg3
- case INS_fmla:
- case INS_fmls:
- case INS_fmulx:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- if (insOptsAnyArrangement(opt))
- {
+ case INS_fmul: // by element, imm[0..3] selects the element of reg3
+ case INS_fmla:
+ case INS_fmls:
+ case INS_fmulx:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ if (insOptsAnyArrangement(opt))
+ {
+ // Vector operation
+ assert(isValidVectorDatasize(size));
+ assert(isValidArrangement(size, opt));
+ elemsize = optGetElemsize(opt);
+ assert(isValidVectorElemsizeFloat(elemsize));
+ assert(isValidVectorIndex(size, elemsize, imm));
+ assert(opt != INS_OPTS_1D); // Reserved encoding
+ fmt = IF_DV_3BI;
+ }
+ else
+ {
+ // Scalar operation
+ assert(insOptsNone(opt));
+ assert(isValidScalarDatasize(size));
+ elemsize = size;
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ fmt = IF_DV_3DI;
+ }
+ break;
+
+ case INS_mul: // by element, imm[0..7] selects the element of reg3
+ case INS_mla:
+ case INS_mls:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
// Vector operation
+ assert(insOptsAnyArrangement(opt));
assert(isValidVectorDatasize(size));
assert(isValidArrangement(size, opt));
elemsize = optGetElemsize(opt);
- assert(isValidVectorElemsizeFloat(elemsize));
- assert(isValidVectorIndex(size, elemsize, imm));
- assert(opt != INS_OPTS_1D); // Reserved encoding
- fmt = IF_DV_3BI;
- }
- else
- {
- // Scalar operation
- assert(insOptsNone(opt));
- assert(isValidScalarDatasize(size));
- elemsize = size;
assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- fmt = IF_DV_3DI;
- }
- break;
+ // Only has encodings for H or S elemsize
+ assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE));
+ // Only has encodings for V0..V15
+ if ((elemsize == EA_2BYTE) && (reg3 >= REG_V16))
+ {
+ noway_assert(!"Invalid reg3");
+ }
+ fmt = IF_DV_3AI;
+ break;
- case INS_mul: // by element, imm[0..7] selects the element of reg3
- case INS_mla:
- case INS_mls:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- // Vector operation
- assert(insOptsAnyArrangement(opt));
- assert(isValidVectorDatasize(size));
- assert(isValidArrangement(size, opt));
- elemsize = optGetElemsize(opt);
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- // Only has encodings for H or S elemsize
- assert((elemsize == EA_2BYTE) || (elemsize == EA_4BYTE));
- // Only has encodings for V0..V15
- if ((elemsize == EA_2BYTE) && (reg3 >= REG_V16))
- {
- noway_assert(!"Invalid reg3");
- }
- fmt = IF_DV_3AI;
- break;
+ case INS_add:
+ case INS_sub:
+ setFlags = false;
+ isAddSub = true;
+ break;
- case INS_add:
- case INS_sub:
- setFlags = false;
- isAddSub = true;
- break;
+ case INS_adds:
+ case INS_subs:
+ setFlags = true;
+ isAddSub = true;
+ break;
- case INS_adds:
- case INS_subs:
- setFlags = true;
- isAddSub = true;
- break;
-
- case INS_ldpsw:
- scale = 2;
- isLdSt = true;
- break;
+ case INS_ldpsw:
+ scale = 2;
+ isLdSt = true;
+ break;
- case INS_ldnp:
- case INS_stnp:
- assert(insOptsNone(opt)); // Can't use Pre/Post index on these two instructions
- __fallthrough;
+ case INS_ldnp:
+ case INS_stnp:
+ assert(insOptsNone(opt)); // Can't use Pre/Post index on these two instructions
+ __fallthrough;
- case INS_ldp:
- case INS_stp:
- // Is the target a vector register?
- if (isVectorRegister(reg1))
- {
- scale = NaturalScale_helper(size);
- isSIMD = true;
- }
- else
- {
- scale = (size == EA_8BYTE) ? 3 : 2;
- }
- isLdSt = true;
- break;
+ case INS_ldp:
+ case INS_stp:
+ // Is the target a vector register?
+ if (isVectorRegister(reg1))
+ {
+ scale = NaturalScale_helper(size);
+ isSIMD = true;
+ }
+ else
+ {
+ scale = (size == EA_8BYTE) ? 3 : 2;
+ }
+ isLdSt = true;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
if (isLdSt)
{
@@ -5321,10 +5249,10 @@ void emitter::emitIns_R_R_R_I(instruction ins,
reg3 = encodingSPtoZR(reg3);
- ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
+ ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
if (imm == 0)
{
- assert(insOptsNone(opt)); // PRE/POST Index doesn't make sense with an immediate of zero
+ assert(insOptsNone(opt)); // PRE/POST Index doesn't make sense with an immediate of zero
fmt = IF_LS_3B;
}
@@ -5332,7 +5260,7 @@ void emitter::emitIns_R_R_R_I(instruction ins,
{
if ((imm & mask) == 0)
{
- imm >>= scale; // The immediate is scaled by the size of the ld/st
+ imm >>= scale; // The immediate is scaled by the size of the ld/st
if ((imm >= -64) && (imm <= 63))
{
@@ -5353,8 +5281,8 @@ void emitter::emitIns_R_R_R_I(instruction ins,
assert(!isLdSt);
assert(isValidGeneralDatasize(size));
assert(isGeneralRegister(reg3));
-
- if (setFlags || insOptsAluShift(opt)) // Can't encode SP in reg1 with setFlags or AluShift option
+
+ if (setFlags || insOptsAluShift(opt)) // Can't encode SP in reg1 with setFlags or AluShift option
{
assert(isGeneralRegisterOrZR(reg1));
}
@@ -5364,7 +5292,7 @@ void emitter::emitIns_R_R_R_I(instruction ins,
reg1 = encodingSPtoZR(reg1);
}
- if (insOptsAluShift(opt)) // Can't encode SP in reg2 with AluShift option
+ if (insOptsAluShift(opt)) // Can't encode SP in reg2 with AluShift option
{
assert(isGeneralRegister(reg2));
}
@@ -5394,7 +5322,7 @@ void emitter::emitIns_R_R_R_I(instruction ins,
{
// To encode the SP register as reg2 we must use the IF_DR_3C encoding
// and also specify a LSL of zero (imm == 0)
- opt = INS_OPTS_LSL;
+ opt = INS_OPTS_LSL;
fmt = IF_DR_3C;
}
else
@@ -5409,7 +5337,7 @@ void emitter::emitIns_R_R_R_I(instruction ins,
}
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrCns(attr, imm);
+ instrDesc* id = emitNewInstrCns(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5423,66 +5351,65 @@ void emitter::emitIns_R_R_R_I(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
- * Add an instruction referencing three registers, with an extend option
+ * Add an instruction referencing three registers, with an extend option
*/
-void emitter::emitIns_R_R_R_Ext(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insOpts opt, /* = INS_OPTS_NONE */
- int shiftAmount) /* = -1 -- unset */
+void emitter::emitIns_R_R_R_Ext(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ insOpts opt, /* = INS_OPTS_NONE */
+ int shiftAmount) /* = -1 -- unset */
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- bool isSIMD = false;
- int scale = -1;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ bool isSIMD = false;
+ int scale = -1;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_ldrb:
- case INS_ldrsb:
- case INS_strb:
- scale = 0;
- break;
+ case INS_ldrb:
+ case INS_ldrsb:
+ case INS_strb:
+ scale = 0;
+ break;
- case INS_ldrh:
- case INS_ldrsh:
- case INS_strh:
- scale = 1;
- break;
+ case INS_ldrh:
+ case INS_ldrsh:
+ case INS_strh:
+ scale = 1;
+ break;
- case INS_ldrsw:
- scale = 2;
- break;
+ case INS_ldrsw:
+ scale = 2;
+ break;
- case INS_ldr:
- case INS_str:
- // Is the target a vector register?
- if (isVectorRegister(reg1))
- {
- assert(isValidVectorLSDatasize(size));
- scale = NaturalScale_helper(size);
- isSIMD = true;
- }
- else
- {
- assert(isValidGeneralDatasize(size));
- scale = (size == EA_8BYTE) ? 3 : 2;
- }
+ case INS_ldr:
+ case INS_str:
+ // Is the target a vector register?
+ if (isVectorRegister(reg1))
+ {
+ assert(isValidVectorLSDatasize(size));
+ scale = NaturalScale_helper(size);
+ isSIMD = true;
+ }
+ else
+ {
+ assert(isValidGeneralDatasize(size));
+ scale = (size == EA_8BYTE) ? 3 : 2;
+ }
- break;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(scale != -1);
assert(insOptsLSExtend(opt));
@@ -5508,7 +5435,7 @@ void emitter::emitIns_R_R_R_Ext(instruction ins,
}
if (shiftAmount == -1)
- {
+ {
shiftAmount = insOptsLSL(opt) ? scale : 0;
}
assert((shiftAmount == scale) || (shiftAmount == 0));
@@ -5516,7 +5443,7 @@ void emitter::emitIns_R_R_R_Ext(instruction ins,
reg2 = encodingSPtoZR(reg2);
fmt = IF_LS_3A;
- instrDesc * id = emitNewInstr(attr);
+ instrDesc* id = emitNewInstr(attr);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5531,100 +5458,93 @@ void emitter::emitIns_R_R_R_Ext(instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing two registers and two constants.
*/
-void emitter::emitIns_R_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm1,
- int imm2)
+void emitter::emitIns_R_R_I_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int imm1, int imm2)
{
- emitAttr size = EA_SIZE(attr);
- emitAttr elemsize = EA_UNKNOWN;
- insFormat fmt = IF_NONE;
- size_t immOut = 0; // composed from imm1 and imm2 and stored in the instrDesc
+ emitAttr size = EA_SIZE(attr);
+ emitAttr elemsize = EA_UNKNOWN;
+ insFormat fmt = IF_NONE;
+ size_t immOut = 0; // composed from imm1 and imm2 and stored in the instrDesc
/* Figure out the encoding format of the instruction */
switch (ins)
{
- int lsb;
- int width;
- bitMaskImm bmi;
-
- case INS_bfm:
- case INS_sbfm:
- case INS_ubfm:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isValidImmShift(imm1, size));
- assert(isValidImmShift(imm2, size));
- bmi.immNRS = 0;
- bmi.immN = (size == EA_8BYTE);
- bmi.immR = imm1;
- bmi.immS = imm2;
- immOut = bmi.immNRS;
- fmt = IF_DI_2D;
- break;
+ int lsb;
+ int width;
+ bitMaskImm bmi;
- case INS_bfi:
- case INS_sbfiz:
- case INS_ubfiz:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- lsb = getBitWidth(size) - imm1;
- width = imm2 - 1;
- assert(isValidImmShift(lsb, size));
- assert(isValidImmShift(width, size));
- bmi.immNRS = 0;
- bmi.immN = (size == EA_8BYTE);
- bmi.immR = lsb;
- bmi.immS = width;
- immOut = bmi.immNRS;
- fmt = IF_DI_2D;
- break;
+ case INS_bfm:
+ case INS_sbfm:
+ case INS_ubfm:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isValidImmShift(imm1, size));
+ assert(isValidImmShift(imm2, size));
+ bmi.immNRS = 0;
+ bmi.immN = (size == EA_8BYTE);
+ bmi.immR = imm1;
+ bmi.immS = imm2;
+ immOut = bmi.immNRS;
+ fmt = IF_DI_2D;
+ break;
- case INS_bfxil:
- case INS_sbfx:
- case INS_ubfx:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- lsb = imm1;
- width = imm2 + imm1 - 1;
- assert(isValidImmShift(lsb, size));
- assert(isValidImmShift(width, size));
- bmi.immNRS = 0;
- bmi.immN = (size == EA_8BYTE);
- bmi.immR = imm1;
- bmi.immS = imm2 + imm1 - 1;
- immOut = bmi.immNRS;
- fmt = IF_DI_2D;
- break;
+ case INS_bfi:
+ case INS_sbfiz:
+ case INS_ubfiz:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ lsb = getBitWidth(size) - imm1;
+ width = imm2 - 1;
+ assert(isValidImmShift(lsb, size));
+ assert(isValidImmShift(width, size));
+ bmi.immNRS = 0;
+ bmi.immN = (size == EA_8BYTE);
+ bmi.immR = lsb;
+ bmi.immS = width;
+ immOut = bmi.immNRS;
+ fmt = IF_DI_2D;
+ break;
- case INS_mov:
- case INS_ins:
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- elemsize = size;
- assert(isValidVectorElemsize(elemsize));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm1));
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm2));
- immOut = (imm1 << 4) + imm2;
- fmt = IF_DV_2F;
- break;
+ case INS_bfxil:
+ case INS_sbfx:
+ case INS_ubfx:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ lsb = imm1;
+ width = imm2 + imm1 - 1;
+ assert(isValidImmShift(lsb, size));
+ assert(isValidImmShift(width, size));
+ bmi.immNRS = 0;
+ bmi.immN = (size == EA_8BYTE);
+ bmi.immR = imm1;
+ bmi.immS = imm2 + imm1 - 1;
+ immOut = bmi.immNRS;
+ fmt = IF_DI_2D;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case INS_mov:
+ case INS_ins:
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ elemsize = size;
+ assert(isValidVectorElemsize(elemsize));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm1));
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm2));
+ immOut = (imm1 << 4) + imm2;
+ fmt = IF_DV_2F;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(attr, immOut);
+ instrDesc* id = emitNewInstrSC(attr, immOut);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5641,58 +5561,53 @@ void emitter::emitIns_R_R_I_I(instruction ins,
* Add an instruction referencing four registers.
*/
-void emitter::emitIns_R_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- regNumber reg4)
+void emitter::emitIns_R_R_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_madd:
- case INS_msub:
- case INS_smaddl:
- case INS_smsubl:
- case INS_umaddl:
- case INS_umsubl:
- assert(isValidGeneralDatasize(size));
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- assert(isGeneralRegister(reg4));
- fmt = IF_DR_4A;
- break;
-
- case INS_fmadd:
- case INS_fmsub:
- case INS_fnmadd:
- case INS_fnmsub:
- // Scalar operation
- assert(isValidScalarDatasize(size));
- assert(isVectorRegister(reg1));
- assert(isVectorRegister(reg2));
- assert(isVectorRegister(reg3));
- assert(isVectorRegister(reg4));
- fmt = IF_DV_4A;
- break;
+ case INS_madd:
+ case INS_msub:
+ case INS_smaddl:
+ case INS_smsubl:
+ case INS_umaddl:
+ case INS_umsubl:
+ assert(isValidGeneralDatasize(size));
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ assert(isGeneralRegister(reg4));
+ fmt = IF_DR_4A;
+ break;
- case INS_invalid:
- fmt = IF_NONE;
- break;
+ case INS_fmadd:
+ case INS_fmsub:
+ case INS_fnmadd:
+ case INS_fnmsub:
+ // Scalar operation
+ assert(isValidScalarDatasize(size));
+ assert(isVectorRegister(reg1));
+ assert(isVectorRegister(reg2));
+ assert(isVectorRegister(reg3));
+ assert(isVectorRegister(reg4));
+ fmt = IF_DV_4A;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case INS_invalid:
+ fmt = IF_NONE;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
}
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstr(attr);
+ instrDesc* id = emitNewInstr(attr);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5706,43 +5621,38 @@ void emitter::emitIns_R_R_R_R(instruction ins,
appendToCurIG(id);
}
-
-
/*****************************************************************************
*
* Add an instruction referencing a register and a condition code
*/
-void emitter::emitIns_R_COND(instruction ins,
- emitAttr attr,
- regNumber reg,
- insCond cond)
+void emitter::emitIns_R_COND(instruction ins, emitAttr attr, regNumber reg, insCond cond)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
condFlagsImm cfi;
cfi.immCFVal = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_cset:
- case INS_csetm:
- assert(isGeneralRegister(reg));
- cfi.cond = cond;
- fmt = IF_DR_1D;
- break;
+ case INS_cset:
+ case INS_csetm:
+ assert(isGeneralRegister(reg));
+ cfi.cond = cond;
+ fmt = IF_DR_1D;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(fmt != IF_NONE);
assert(isValidImmCond(cfi.immCFVal));
- instrDesc * id = emitNewInstrSC(attr, cfi.immCFVal);
+ instrDesc* id = emitNewInstrSC(attr, cfi.immCFVal);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5759,38 +5669,34 @@ void emitter::emitIns_R_COND(instruction ins,
* Add an instruction referencing two registers and a condition code
*/
-void emitter::emitIns_R_R_COND(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insCond cond)
+void emitter::emitIns_R_R_COND(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insCond cond)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
condFlagsImm cfi;
cfi.immCFVal = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_cinc:
- case INS_cinv:
- case INS_cneg:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- cfi.cond = cond;
- fmt = IF_DR_2D;
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ case INS_cinc:
+ case INS_cinv:
+ case INS_cneg:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ cfi.cond = cond;
+ fmt = IF_DR_2D;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(fmt != IF_NONE);
assert(isValidImmCond(cfi.immCFVal));
- instrDesc * id = emitNewInstrSC(attr, cfi.immCFVal);
+ instrDesc* id = emitNewInstrSC(attr, cfi.immCFVal);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5808,42 +5714,38 @@ void emitter::emitIns_R_R_COND(instruction ins,
* Add an instruction referencing two registers and a condition code
*/
-void emitter::emitIns_R_R_R_COND(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insCond cond)
+void emitter::emitIns_R_R_R_COND(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insCond cond)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
condFlagsImm cfi;
cfi.immCFVal = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_csel:
- case INS_csinc:
- case INS_csinv:
- case INS_csneg:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- assert(isGeneralRegister(reg3));
- cfi.cond = cond;
- fmt = IF_DR_3D;
- break;
+ case INS_csel:
+ case INS_csinc:
+ case INS_csinv:
+ case INS_csneg:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ assert(isGeneralRegister(reg3));
+ cfi.cond = cond;
+ fmt = IF_DR_3D;
+ break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
- } // end switch (ins)
+ } // end switch (ins)
assert(fmt != IF_NONE);
assert(isValidImmCond(cfi.immCFVal));
- instrDesc * id = emitNewInstr(attr);
+ instrDesc* id = emitNewInstr(attr);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5863,38 +5765,34 @@ void emitter::emitIns_R_R_R_COND(instruction ins,
* Add an instruction referencing two registers the flags and a condition code
*/
-void emitter::emitIns_R_R_FLAGS_COND (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insCflags flags,
- insCond cond)
+void emitter::emitIns_R_R_FLAGS_COND(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insCflags flags, insCond cond)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
condFlagsImm cfi;
cfi.immCFVal = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_ccmp:
- case INS_ccmn:
- assert(isGeneralRegister(reg1));
- assert(isGeneralRegister(reg2));
- cfi.flags = flags;
- cfi.cond = cond;
- fmt = IF_DR_2I;
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
- } // end switch (ins)
+ case INS_ccmp:
+ case INS_ccmn:
+ assert(isGeneralRegister(reg1));
+ assert(isGeneralRegister(reg2));
+ cfi.flags = flags;
+ cfi.cond = cond;
+ fmt = IF_DR_2I;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+ } // end switch (ins)
assert(fmt != IF_NONE);
assert(isValidImmCondFlags(cfi.immCFVal));
- instrDesc * id = emitNewInstrSC(attr, cfi.immCFVal);
+ instrDesc* id = emitNewInstrSC(attr, cfi.immCFVal);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5912,50 +5810,46 @@ void emitter::emitIns_R_R_FLAGS_COND (instruction ins,
* Add an instruction referencing a register, an immediate, the flags and a condition code
*/
-void emitter::emitIns_R_I_FLAGS_COND (instruction ins,
- emitAttr attr,
- regNumber reg,
- int imm,
- insCflags flags,
- insCond cond)
+void emitter::emitIns_R_I_FLAGS_COND(
+ instruction ins, emitAttr attr, regNumber reg, int imm, insCflags flags, insCond cond)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
condFlagsImm cfi;
cfi.immCFVal = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_ccmp:
- case INS_ccmn:
- assert(isGeneralRegister(reg));
- if (imm < 0)
- {
- ins = insReverse(ins);
- imm = -imm;
- }
- if ((imm >= 0) && (imm <= 31))
- {
- cfi.imm5 = imm;
- cfi.flags = flags;
- cfi.cond = cond;
- fmt = IF_DI_1F;
- }
- else
- {
- assert(!"Instruction cannot be encoded: ccmp/ccmn imm5");
- }
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
- } // end switch (ins)
+ case INS_ccmp:
+ case INS_ccmn:
+ assert(isGeneralRegister(reg));
+ if (imm < 0)
+ {
+ ins = insReverse(ins);
+ imm = -imm;
+ }
+ if ((imm >= 0) && (imm <= 31))
+ {
+ cfi.imm5 = imm;
+ cfi.flags = flags;
+ cfi.cond = cond;
+ fmt = IF_DI_1F;
+ }
+ else
+ {
+ assert(!"Instruction cannot be encoded: ccmp/ccmn imm5");
+ }
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+ } // end switch (ins)
assert(fmt != IF_NONE);
assert(isValidImmCondFlagsImm5(cfi.immCFVal));
- instrDesc * id = emitNewInstrSC(attr, cfi.immCFVal);
+ instrDesc* id = emitNewInstrSC(attr, cfi.immCFVal);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5969,33 +5863,32 @@ void emitter::emitIns_R_I_FLAGS_COND (instruction ins,
/*****************************************************************************
*
- * Add a memory barrier instruction with a 'barrier' immediate
+ * Add a memory barrier instruction with a 'barrier' immediate
*/
-void emitter::emitIns_BARR (instruction ins,
- insBarrier barrier)
+void emitter::emitIns_BARR(instruction ins, insBarrier barrier)
{
- insFormat fmt = IF_NONE;
- ssize_t imm = 0;
+ insFormat fmt = IF_NONE;
+ ssize_t imm = 0;
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_dsb:
- case INS_dmb:
- case INS_isb:
+ case INS_dsb:
+ case INS_dmb:
+ case INS_isb:
- fmt = IF_SI_0B;
- imm = (ssize_t) barrier;
- break;
- default:
- // TODO-Cleanup: add unreached() here
- break;
- } // end switch (ins)
+ fmt = IF_SI_0B;
+ imm = (ssize_t)barrier;
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+ } // end switch (ins)
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrSC(EA_8BYTE, imm);
+ instrDesc* id = emitNewInstrSC(EA_8BYTE, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -6012,43 +5905,31 @@ void emitter::emitIns_BARR (instruction ins,
* value (e.g. "push offset clsvar", rather than "push dword ptr [clsvar]").
*/
-void emitter::emitIns_C (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
NYI("emitIns_C");
}
-
/*****************************************************************************
*
* Add an instruction referencing stack-based local variable.
*/
-void emitter::emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs)
+void emitter::emitIns_S(instruction ins, emitAttr attr, int varx, int offs)
{
NYI("emitIns_S");
}
-
/*****************************************************************************
*
* Add an instruction referencing a register and a stack-based local variable.
*/
-void emitter::emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int varx,
- int offs)
+void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs)
{
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- int disp = 0;
- unsigned scale = 0;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ int disp = 0;
+ unsigned scale = 0;
assert(offs >= 0);
@@ -6056,50 +5937,50 @@ void emitter::emitIns_R_S (instruction ins,
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_strb:
- case INS_ldrb:
- case INS_ldrsb:
- scale = 0;
- break;
+ case INS_strb:
+ case INS_ldrb:
+ case INS_ldrsb:
+ scale = 0;
+ break;
- case INS_strh:
- case INS_ldrh:
- case INS_ldrsh:
- scale = 1;
- break;
+ case INS_strh:
+ case INS_ldrh:
+ case INS_ldrsh:
+ scale = 1;
+ break;
- case INS_ldrsw:
- scale = 2;
- break;
+ case INS_ldrsw:
+ scale = 2;
+ break;
- case INS_str:
- case INS_ldr:
- assert(isValidGeneralDatasize(size));
- scale = (size == EA_8BYTE) ? 3 : 2;
- break;
+ case INS_str:
+ case INS_ldr:
+ assert(isValidGeneralDatasize(size));
+ scale = (size == EA_8BYTE) ? 3 : 2;
+ break;
- case INS_lea:
- assert(size == EA_8BYTE);
- scale = 0;
- break;
+ case INS_lea:
+ assert(size == EA_8BYTE);
+ scale = 0;
+ break;
- default:
- NYI("emitIns_R_S"); // FP locals?
- return;
+ default:
+ NYI("emitIns_R_S"); // FP locals?
+ return;
- } // end switch (ins)
+ } // end switch (ins)
/* Figure out the variable's frame position */
ssize_t imm;
- int base;
- bool FPbased;
+ int base;
+ bool FPbased;
- base = emitComp->lvaFrameAddress(varx, &FPbased);
- disp = base + offs;
+ base = emitComp->lvaFrameAddress(varx, &FPbased);
+ disp = base + offs;
assert((scale >= 0) && (scale <= 3));
regNumber reg2 = FPbased ? REG_FPBASE : REG_SPBASE;
- reg2 = encodingSPtoZR(reg2);
+ reg2 = encodingSPtoZR(reg2);
if (ins == INS_lea)
{
@@ -6111,24 +5992,24 @@ void emitter::emitIns_R_S (instruction ins,
else
{
ins = INS_sub;
- imm = -disp;
+ imm = -disp;
}
if (imm <= 0x0fff)
{
- fmt = IF_DI_2A; // add reg1,reg2,#disp
+ fmt = IF_DI_2A; // add reg1,reg2,#disp
}
else
{
regNumber rsvdReg = codeGen->rsGetRsvdReg();
codeGen->instGen_Set_Reg_To_Imm(size, rsvdReg, imm);
- fmt = IF_DR_3A; // add reg1,reg2,rsvdReg
+ fmt = IF_DR_3A; // add reg1,reg2,rsvdReg
}
}
else
{
- bool useRegForImm = false;
- ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
+ bool useRegForImm = false;
+ ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
imm = disp;
if (imm == 0)
@@ -6144,20 +6025,20 @@ void emitter::emitIns_R_S (instruction ins,
else
{
useRegForImm = true;
- }
+ }
}
else if (imm > 0)
{
if (((imm & mask) == 0) && ((imm >> scale) < 0x1000))
{
- imm >>= scale; // The immediate is scaled by the size of the ld/st
+ imm >>= scale; // The immediate is scaled by the size of the ld/st
fmt = IF_LS_2B;
}
else
{
useRegForImm = true;
- }
+ }
}
if (useRegForImm)
@@ -6170,7 +6051,7 @@ void emitter::emitIns_R_S (instruction ins,
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrCns(attr, imm);
+ instrDesc* id = emitNewInstrCns(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -6193,60 +6074,56 @@ void emitter::emitIns_R_S (instruction ins,
*
* Add an instruction referencing a stack-based local variable and a register
*/
-void emitter::emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int varx,
- int offs)
+void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs)
{
assert(offs >= 0);
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_NONE;
- int disp = 0;
- unsigned scale = 0;
- bool isVectorStore = false;
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_NONE;
+ int disp = 0;
+ unsigned scale = 0;
+ bool isVectorStore = false;
// TODO-ARM64-CQ: use unscaled loads?
/* Figure out the encoding format of the instruction */
switch (ins)
{
- case INS_strb:
- scale = 0;
- assert(isGeneralRegisterOrZR(reg1));
- break;
+ case INS_strb:
+ scale = 0;
+ assert(isGeneralRegisterOrZR(reg1));
+ break;
- case INS_strh:
- scale = 1;
- assert(isGeneralRegisterOrZR(reg1));
- break;
+ case INS_strh:
+ scale = 1;
+ assert(isGeneralRegisterOrZR(reg1));
+ break;
- case INS_str:
- if (isGeneralRegisterOrZR(reg1))
- {
- assert(isValidGeneralDatasize(size));
- scale = (size == EA_8BYTE) ? 3 : 2;
- }
- else
- {
- assert(isVectorRegister(reg1));
- assert(isValidVectorLSDatasize(size));
- scale = NaturalScale_helper(size);
- isVectorStore = true;
- }
- break;
+ case INS_str:
+ if (isGeneralRegisterOrZR(reg1))
+ {
+ assert(isValidGeneralDatasize(size));
+ scale = (size == EA_8BYTE) ? 3 : 2;
+ }
+ else
+ {
+ assert(isVectorRegister(reg1));
+ assert(isValidVectorLSDatasize(size));
+ scale = NaturalScale_helper(size);
+ isVectorStore = true;
+ }
+ break;
- default:
- NYI("emitIns_S_R"); // FP locals?
- return;
+ default:
+ NYI("emitIns_S_R"); // FP locals?
+ return;
- } // end switch (ins)
+ } // end switch (ins)
/* Figure out the variable's frame position */
int base;
bool FPbased;
- base = emitComp->lvaFrameAddress(varx, &FPbased);
- disp = base + offs;
+ base = emitComp->lvaFrameAddress(varx, &FPbased);
+ disp = base + offs;
assert(scale >= 0);
if (isVectorStore)
{
@@ -6254,16 +6131,16 @@ void emitter::emitIns_S_R (instruction ins,
}
else
{
- assert(scale <= 3);
+ assert(scale <= 3);
}
// TODO-ARM64-CQ: with compLocallocUsed, should we use REG_SAVED_LOCALLOC_SP instead?
regNumber reg2 = FPbased ? REG_FPBASE : REG_SPBASE;
- reg2 = encodingSPtoZR(reg2);
+ reg2 = encodingSPtoZR(reg2);
- bool useRegForImm = false;
- ssize_t imm = disp;
- ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
+ bool useRegForImm = false;
+ ssize_t imm = disp;
+ ssize_t mask = (1 << scale) - 1; // the mask of low bits that must be zero to encode the immediate
if (imm == 0)
{
fmt = IF_LS_2A;
@@ -6277,20 +6154,20 @@ void emitter::emitIns_S_R (instruction ins,
else
{
useRegForImm = true;
- }
+ }
}
else if (imm > 0)
{
if (((imm & mask) == 0) && ((imm >> scale) < 0x1000))
{
- imm >>= scale; // The immediate is scaled by the size of the ld/st
+ imm >>= scale; // The immediate is scaled by the size of the ld/st
fmt = IF_LS_2B;
}
else
{
useRegForImm = true;
- }
+ }
}
if (useRegForImm)
@@ -6304,7 +6181,7 @@ void emitter::emitIns_S_R (instruction ins,
assert(fmt != IF_NONE);
- instrDesc * id = emitNewInstrCns(attr, imm);
+ instrDesc* id = emitNewInstrCns(attr, imm);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -6323,21 +6200,15 @@ void emitter::emitIns_S_R (instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction referencing stack-based local variable and an immediate
*/
-void emitter::emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val)
+void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val)
{
NYI("emitIns_S_I");
}
-
/*****************************************************************************
*
* Add an instruction with a register + static member operands.
@@ -6345,12 +6216,8 @@ void emitter::emitIns_S_I (instruction ins,
* No relocation is needed. PC-relative offset will be encoded directly into instruction.
*
*/
-void emitter::emitIns_R_C (instruction ins,
- emitAttr attr,
- regNumber reg,
- regNumber addrReg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_R_C(
+ instruction ins, emitAttr attr, regNumber reg, regNumber addrReg, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
assert(offs >= 0);
assert(instrDesc::fitsInSmallCns(offs));
@@ -6362,32 +6229,32 @@ void emitter::emitIns_R_C (instruction ins,
switch (ins)
{
- case INS_adr:
- // This is case to get address to the constant data.
- fmt = IF_LARGEADR;
- assert(isGeneralRegister(reg));
- assert(isValidGeneralDatasize(size));
- break;
-
- case INS_ldr:
- fmt = IF_LARGELDC;
- if (isVectorRegister(reg))
- {
- assert(isValidScalarDatasize(size));
- // For vector (float/double) register, we should have an integer address reg to
- // compute long address which consists of page address and page offset.
- // For integer constant, this is not needed since the dest reg can be used to
- // compute address as well as contain the final contents.
- assert(isGeneralRegister(reg) || (addrReg != REG_NA));
- }
- else
- {
+ case INS_adr:
+ // This is case to get address to the constant data.
+ fmt = IF_LARGEADR;
assert(isGeneralRegister(reg));
assert(isValidGeneralDatasize(size));
- }
- break;
- default:
- unreached();
+ break;
+
+ case INS_ldr:
+ fmt = IF_LARGELDC;
+ if (isVectorRegister(reg))
+ {
+ assert(isValidScalarDatasize(size));
+ // For vector (float/double) register, we should have an integer address reg to
+ // compute long address which consists of page address and page offset.
+ // For integer constant, this is not needed since the dest reg can be used to
+ // compute address as well as contain the final contents.
+ assert(isGeneralRegister(reg) || (addrReg != REG_NA));
+ }
+ else
+ {
+ assert(isGeneralRegister(reg));
+ assert(isValidGeneralDatasize(size));
+ }
+ break;
+ default:
+ unreached();
}
assert(fmt != IF_NONE);
@@ -6398,14 +6265,16 @@ void emitter::emitIns_R_C (instruction ins,
id->idSmallCns(offs);
id->idOpSize(size);
id->idAddr()->iiaFieldHnd = fldHnd;
- id->idSetIsBound(); // We won't patch address since we will know the exact distance once JIT code and data are allocated together.
+ id->idSetIsBound(); // We won't patch address since we will know the exact distance once JIT code and data are
+ // allocated together.
- id->idReg1(reg); // destination register that will get the constant value.
+ id->idReg1(reg); // destination register that will get the constant value.
if (addrReg != REG_NA)
{
- id->idReg2(addrReg); // integer register to compute long address (used for vector dest when we end up with long address)
+ id->idReg2(addrReg); // integer register to compute long address (used for vector dest when we end up with long
+ // address)
}
- id->idjShort = false; // Assume loading constant from long address
+ id->idjShort = false; // Assume loading constant from long address
// Keep it long if it's in cold code.
id->idjKeepLong = emitComp->fgIsBlockCold(emitComp->compCurBB);
@@ -6420,11 +6289,11 @@ void emitter::emitIns_R_C (instruction ins,
if (!id->idjKeepLong)
{
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
+ id->idjIG = emitCurIG;
id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
+ id->idjNext = emitCurIGjmpList;
emitCurIGjmpList = id;
#if EMITTER_STATS
@@ -6436,70 +6305,57 @@ void emitter::emitIns_R_C (instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add an instruction with a static member + constant.
*/
-void emitter::emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- ssize_t offs,
- ssize_t val)
+void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, ssize_t val)
{
NYI("emitIns_C_I");
}
-
/*****************************************************************************
*
* Add an instruction with a static member + register operands.
*/
-void emitter::emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs)
+void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs)
{
assert(!"emitIns_C_R not supported for RyuJIT backend");
}
-void emitter::emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie /* = 0 */,
- void * clsCookie /* = NULL */)
+void emitter::emitIns_R_AR(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie /* = 0 */,
+ void* clsCookie /* = NULL */)
{
NYI("emitIns_R_AR");
}
// This computes address from the immediate which is relocatable.
-void emitter::emitIns_R_AI(instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t addr)
+void emitter::emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t addr)
{
assert(EA_IS_RELOC(attr));
- emitAttr size = EA_SIZE(attr);
- insFormat fmt = IF_DI_1E;
- bool needAdd = false;
- instrDescJmp* id = emitNewInstrJmp();
+ emitAttr size = EA_SIZE(attr);
+ insFormat fmt = IF_DI_1E;
+ bool needAdd = false;
+ instrDescJmp* id = emitNewInstrJmp();
switch (ins)
{
- case INS_adrp:
- // This computes page address.
- // page offset is needed using add.
- needAdd = true;
- break;
- case INS_adr:
- break;
- default:
- unreached();
+ case INS_adrp:
+ // This computes page address.
+ // page offset is needed using add.
+ needAdd = true;
+ break;
+ case INS_adr:
+ break;
+ default:
+ unreached();
}
id->idIns(ins);
@@ -6516,8 +6372,8 @@ void emitter::emitIns_R_AI(instruction ins,
if (needAdd)
{
// add reg, reg, imm
- ins = INS_add;
- fmt = IF_DI_2A;
+ ins = INS_add;
+ fmt = IF_DI_2A;
instrDesc* id = emitAllocInstr(attr);
assert(id->idIsReloc());
@@ -6534,44 +6390,29 @@ void emitter::emitIns_R_AI(instruction ins,
}
}
-void emitter::emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie /* = 0 */,
- void * clsCookie /* = NULL */)
+void emitter::emitIns_AR_R(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie /* = 0 */,
+ void* clsCookie /* = NULL */)
{
NYI("emitIns_AR_R");
}
-void emitter::emitIns_R_ARR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp)
+void emitter::emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp)
{
NYI("emitIns_R_ARR");
}
-void emitter::emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp)
+void emitter::emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp)
{
NYI("emitIns_R_ARR");
}
-void emitter::emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp)
+void emitter::emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp)
{
NYI("emitIns_R_ARR");
}
@@ -6581,7 +6422,7 @@ void emitter::emitIns_R_ARX (instruction ins,
* Record that a jump instruction uses the short encoding
*
*/
-void emitter::emitSetShortJump(instrDescJmp * id)
+void emitter::emitSetShortJump(instrDescJmp* id)
{
if (id->idjKeepLong)
return;
@@ -6599,7 +6440,8 @@ void emitter::emitSetShortJump(instrDescJmp * id)
{
fmt = IF_LS_1A;
}
- else {
+ else
+ {
unreached();
}
@@ -6612,10 +6454,7 @@ void emitter::emitSetShortJump(instrDescJmp * id)
* Add a label instruction.
*/
-void emitter::emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg)
+void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
assert(dst->bbFlags & BBF_JMP_TARGET);
@@ -6623,19 +6462,19 @@ void emitter::emitIns_R_L (instruction ins,
switch (ins)
{
- case INS_adr:
- fmt = IF_LARGEADR;
- break;
- default:
- unreached();
+ case INS_adr:
+ fmt = IF_LARGEADR;
+ break;
+ default:
+ unreached();
}
instrDescJmp* id = emitNewInstrJmp();
id->idIns(ins);
id->idInsFmt(fmt);
- id->idjShort = false;
- id->idAddr()->iiaBBlabel = dst;
+ id->idjShort = false;
+ id->idAddr()->iiaBBlabel = dst;
id->idReg1(reg);
id->idOpSize(EA_PTRSIZE);
@@ -6647,7 +6486,7 @@ void emitter::emitIns_R_L (instruction ins,
}
#endif // DEBUG
- id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
+ id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
#ifdef DEBUG
if (emitComp->opts.compLongAddress)
@@ -6656,13 +6495,13 @@ void emitter::emitIns_R_L (instruction ins,
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#if EMITTER_STATS
emitTotalIGjmps++;
@@ -6672,33 +6511,24 @@ void emitter::emitIns_R_L (instruction ins,
appendToCurIG(id);
}
-
/*****************************************************************************
*
* Add a data label instruction.
*/
-void emitter::emitIns_R_D (instruction ins,
- emitAttr attr,
- unsigned offs,
- regNumber reg)
+void emitter::emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumber reg)
{
NYI("emitIns_R_D");
}
-void emitter::emitIns_J_R (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg)
+void emitter::emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
NYI("emitIns_J_R");
}
-void emitter::emitIns_J(instruction ins,
- BasicBlock * dst,
- int instrCount)
+void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount)
{
- insFormat fmt = IF_NONE;
+ insFormat fmt = IF_NONE;
if (dst != nullptr)
{
@@ -6714,37 +6544,37 @@ void emitter::emitIns_J(instruction ins,
bool idjShort = false;
switch (ins)
{
- case INS_bl_local:
- case INS_b:
- // Unconditional jump is a single form.
- idjShort = true;
- fmt = IF_BI_0A;
- break;
+ case INS_bl_local:
+ case INS_b:
+ // Unconditional jump is a single form.
+ idjShort = true;
+ fmt = IF_BI_0A;
+ break;
- case INS_beq:
- case INS_bne:
- case INS_bhs:
- case INS_blo:
- case INS_bmi:
- case INS_bpl:
- case INS_bvs:
- case INS_bvc:
- case INS_bhi:
- case INS_bls:
- case INS_bge:
- case INS_blt:
- case INS_bgt:
- case INS_ble:
- // Assume conditional jump is long.
- fmt = IF_LARGEJMP;
- break;
+ case INS_beq:
+ case INS_bne:
+ case INS_bhs:
+ case INS_blo:
+ case INS_bmi:
+ case INS_bpl:
+ case INS_bvs:
+ case INS_bvc:
+ case INS_bhi:
+ case INS_bls:
+ case INS_bge:
+ case INS_blt:
+ case INS_bgt:
+ case INS_ble:
+ // Assume conditional jump is long.
+ fmt = IF_LARGEJMP;
+ break;
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
- instrDescJmp* id = emitNewInstrJmp();
+ instrDescJmp* id = emitNewInstrJmp();
id->idIns(ins);
id->idInsFmt(fmt);
@@ -6770,7 +6600,7 @@ void emitter::emitIns_J(instruction ins,
id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
#ifdef DEBUG
- if (emitComp->opts.compLongAddress) // Force long branches
+ if (emitComp->opts.compLongAddress) // Force long branches
id->idjKeepLong = 1;
#endif // DEBUG
}
@@ -6786,13 +6616,13 @@ void emitter::emitIns_J(instruction ins,
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#if EMITTER_STATS
emitTotalIGjmps++;
@@ -6821,24 +6651,24 @@ void emitter::emitIns_J(instruction ins,
* Please consult the "debugger team notification" comment in genFnProlog().
*/
-void emitter::emitIns_Call(EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize,
- emitAttr secondRetSize,
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset /* = BAD_IL_OFFSET */,
- regNumber ireg /* = REG_NA */,
- regNumber xreg /* = REG_NA */,
- unsigned xmul /* = 0 */,
- ssize_t disp /* = 0 */,
- bool isJump /* = false */,
- bool isNoGC /* = false */,
- bool isProfLeaveCB /* = false */)
+void emitter::emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize,
+ emitAttr secondRetSize,
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset /* = BAD_IL_OFFSET */,
+ regNumber ireg /* = REG_NA */,
+ regNumber xreg /* = REG_NA */,
+ unsigned xmul /* = 0 */,
+ ssize_t disp /* = 0 */,
+ bool isJump /* = false */,
+ bool isNoGC /* = false */,
+ bool isProfLeaveCB /* = false */)
{
/* Sanity check the arguments depending on callType */
@@ -6846,18 +6676,17 @@ void emitter::emitIns_Call(EmitCallType callType,
assert((callType != EC_FUNC_TOKEN && callType != EC_FUNC_ADDR) ||
(ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp == 0));
assert(callType < EC_INDIR_R || addr == NULL);
- assert(callType != EC_INDIR_R ||
- (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
+ assert(callType != EC_INDIR_R || (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
// ARM never uses these
assert(xreg == REG_NA && xmul == 0 && disp == 0);
// Our stack level should be always greater than the bytes of arguments we push. Just
// a sanity test.
- assert((unsigned) abs(argSize) <= codeGen->genStackLevel);
+ assert((unsigned)abs(argSize) <= codeGen->genStackLevel);
- int argCnt;
- instrDesc* id;
+ int argCnt;
+ instrDesc* id;
/* This is the saved set of registers after a normal call */
regMaskTP savedSet = RBM_CALLEE_SAVED;
@@ -6891,7 +6720,7 @@ void emitter::emitIns_Call(EmitCallType callType,
gcrefRegs &= savedSet;
byrefRegs &= savedSet;
-#ifdef DEBUG
+#ifdef DEBUG
if (EMIT_GC_VERBOSE)
{
printf("Call: GCvars=%s ", VarSetOps::ToString(emitComp, ptrVars));
@@ -6908,7 +6737,7 @@ void emitter::emitIns_Call(EmitCallType callType,
assert(argSize % REGSIZE_BYTES == 0);
argCnt = (int)(argSize / (int)sizeof(void*));
-
+
#ifdef DEBUGGING_SUPPORT
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -6923,34 +6752,22 @@ void emitter::emitIns_Call(EmitCallType callType,
record an updated set of live GC variables.
*/
- if (callType >= EC_INDIR_R)
+ if (callType >= EC_INDIR_R)
{
/* Indirect call, virtual calls */
assert(callType == EC_INDIR_R);
- id = emitNewInstrCallInd(argCnt,
- disp,
- ptrVars,
- gcrefRegs,
- byrefRegs,
- retSize,
- secondRetSize);
+ id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs, retSize, secondRetSize);
}
else
{
/* Helper/static/nonvirtual/function calls (direct or through handle),
and calls to an absolute addr. */
- assert(callType == EC_FUNC_TOKEN ||
- callType == EC_FUNC_ADDR);
+ assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_ADDR);
- id = emitNewInstrCallDir(argCnt,
- ptrVars,
- gcrefRegs,
- byrefRegs,
- retSize,
- secondRetSize);
+ id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs, retSize, secondRetSize);
}
/* Update the emitter's live GC ref sets */
@@ -6967,36 +6784,36 @@ void emitter::emitIns_Call(EmitCallType callType,
/* Record the address: method, indirection, or funcptr */
- if (callType > EC_FUNC_ADDR)
+ if (callType > EC_FUNC_ADDR)
{
/* This is an indirect call (either a virtual call or func ptr call) */
switch (callType)
{
- case EC_INDIR_R: // the address is in a register
+ case EC_INDIR_R: // the address is in a register
- id->idSetIsCallRegPtr();
+ id->idSetIsCallRegPtr();
- if (isJump)
- {
- ins = INS_br_tail; // INS_br_tail Reg
- }
- else
- {
- ins = INS_blr; // INS_blr Reg
- }
- fmt = IF_BR_1B;
+ if (isJump)
+ {
+ ins = INS_br_tail; // INS_br_tail Reg
+ }
+ else
+ {
+ ins = INS_blr; // INS_blr Reg
+ }
+ fmt = IF_BR_1B;
- id->idIns(ins);
- id->idInsFmt(fmt);
+ id->idIns(ins);
+ id->idInsFmt(fmt);
- id->idReg3(ireg);
- assert(xreg == REG_NA);
- break;
+ id->idReg3(ireg);
+ assert(xreg == REG_NA);
+ break;
- default:
- NO_WAY("unexpected instruction");
- break;
+ default:
+ NO_WAY("unexpected instruction");
+ break;
}
}
else
@@ -7013,12 +6830,12 @@ void emitter::emitIns_Call(EmitCallType callType,
}
else
{
- ins = INS_bl; // INS_bl imm28
+ ins = INS_bl; // INS_bl imm28
}
fmt = IF_BI_0C;
id->idIns(ins);
- id->idInsFmt(fmt);
+ id->idInsFmt(fmt);
id->idAddr()->iiaAddr = (BYTE*)addr;
@@ -7035,20 +6852,21 @@ void emitter::emitIns_Call(EmitCallType callType,
#endif
}
-#ifdef DEBUG
+#ifdef DEBUG
if (EMIT_GC_VERBOSE)
{
if (id->idIsLargeCall())
{
- printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
+ printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum,
+ VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
}
}
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
- id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token
+ id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token
id->idDebugOnlyInfo()->idClsCookie = 0;
- id->idDebugOnlyInfo()->idCallSig = sigInfo;
+ id->idDebugOnlyInfo()->idCallSig = sigInfo;
#endif
#if defined(LATE_DISASM)
@@ -7064,10 +6882,10 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
- * Returns true if 'imm' is valid Cond encoding
+ * Returns true if 'imm' is valid Cond encoding
*/
-/*static*/ bool emitter::isValidImmCond(ssize_t imm)
+/*static*/ bool emitter::isValidImmCond(ssize_t imm)
{
// range check the ssize_t value, to make sure it is a small unsigned value
// and that only the bits in the cfi.cond are set
@@ -7075,17 +6893,17 @@ void emitter::emitIns_Call(EmitCallType callType,
return false;
condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
+ cfi.immCFVal = (unsigned)imm;
- return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
+ return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
}
/*****************************************************************************
*
- * Returns true if 'imm' is valid Cond/Flags encoding
+ * Returns true if 'imm' is valid Cond/Flags encoding
*/
-/*static*/ bool emitter::isValidImmCondFlags(ssize_t imm)
+/*static*/ bool emitter::isValidImmCondFlags(ssize_t imm)
{
// range check the ssize_t value, to make sure it is a small unsigned value
// and that only the bits in the cfi.cond or cfi.flags are set
@@ -7093,17 +6911,17 @@ void emitter::emitIns_Call(EmitCallType callType,
return false;
condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
+ cfi.immCFVal = (unsigned)imm;
- return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
+ return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
}
/*****************************************************************************
*
- * Returns true if 'imm' is valid Cond/Flags/Imm5 encoding
+ * Returns true if 'imm' is valid Cond/Flags/Imm5 encoding
*/
-/*static*/ bool emitter::isValidImmCondFlagsImm5(ssize_t imm)
+/*static*/ bool emitter::isValidImmCondFlagsImm5(ssize_t imm)
{
// range check the ssize_t value, to make sure it is a small unsigned value
// and that only the bits in the cfi.cond, cfi.flags or cfi.imm5 are set
@@ -7111,9 +6929,9 @@ void emitter::emitIns_Call(EmitCallType callType,
return false;
condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
+ cfi.immCFVal = (unsigned)imm;
- return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
+ return (cfi.cond <= INS_COND_LE); // Don't allow 14 & 15 (AL & NV).
}
/*****************************************************************************
@@ -7121,11 +6939,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Rd' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Rd(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Rd(regNumber reg)
{
assert(isIntegerRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg;
}
@@ -7134,11 +6952,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Rt' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Rt(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Rt(regNumber reg)
{
assert(isIntegerRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg;
}
@@ -7147,11 +6965,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Rn' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Rn(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Rn(regNumber reg)
{
assert(isIntegerRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 5;
}
@@ -7160,11 +6978,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Rm' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Rm(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Rm(regNumber reg)
{
assert(isIntegerRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 16;
}
@@ -7173,11 +6991,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Ra' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Ra(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Ra(regNumber reg)
{
assert(isIntegerRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 10;
}
@@ -7186,11 +7004,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Vd' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Vd(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Vd(regNumber reg)
{
assert(emitter::isVectorRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg - (emitter::code_t) REG_V0;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_V0;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg;
}
@@ -7199,11 +7017,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Vt' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Vt(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Vt(regNumber reg)
{
assert(emitter::isVectorRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg - (emitter::code_t) REG_V0;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_V0;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg;
}
@@ -7212,11 +7030,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Vn' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Vn(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Vn(regNumber reg)
{
assert(emitter::isVectorRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg - (emitter::code_t) REG_V0;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_V0;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 5;
}
@@ -7225,11 +7043,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Vm' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Vm(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Vm(regNumber reg)
{
assert(emitter::isVectorRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg - (emitter::code_t) REG_V0;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_V0;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 16;
}
@@ -7238,11 +7056,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified register used in the 'Va' position
*/
-/*static*/ emitter::code_t emitter::insEncodeReg_Va(regNumber reg)
+/*static*/ emitter::code_t emitter::insEncodeReg_Va(regNumber reg)
{
assert(emitter::isVectorRegister(reg));
- emitter::code_t ureg = (emitter::code_t) reg - (emitter::code_t) REG_V0;
- assert((ureg >=0) && (ureg <= 31));
+ emitter::code_t ureg = (emitter::code_t)reg - (emitter::code_t)REG_V0;
+ assert((ureg >= 0) && (ureg <= 31));
return ureg << 10;
}
@@ -7251,9 +7069,9 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified condition code.
*/
-/*static*/ emitter::code_t emitter::insEncodeCond(insCond cond)
+/*static*/ emitter::code_t emitter::insEncodeCond(insCond cond)
{
- emitter::code_t uimm = (emitter::code_t) cond;
+ emitter::code_t uimm = (emitter::code_t)cond;
return uimm << 12;
}
@@ -7263,9 +7081,9 @@ void emitter::emitIns_Call(EmitCallType callType,
* architecture manual).
*/
-/*static*/ emitter::code_t emitter::insEncodeInvertedCond(insCond cond)
+/*static*/ emitter::code_t emitter::insEncodeInvertedCond(insCond cond)
{
- emitter::code_t uimm = (emitter::code_t) cond;
+ emitter::code_t uimm = (emitter::code_t)cond;
uimm ^= 1; // invert the lowest bit
return uimm << 12;
}
@@ -7275,9 +7093,9 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns an encoding for the specified flags.
*/
-/*static*/ emitter::code_t emitter::insEncodeFlags(insCflags flags)
+/*static*/ emitter::code_t emitter::insEncodeFlags(insCflags flags)
{
- emitter::code_t uimm = (emitter::code_t) flags;
+ emitter::code_t uimm = (emitter::code_t)flags;
return uimm;
}
@@ -7286,12 +7104,12 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding for the Shift Count bits to be used for Arm64 encodings
*/
-/*static*/ emitter::code_t emitter::insEncodeShiftCount(ssize_t imm, emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeShiftCount(ssize_t imm, emitAttr size)
{
assert((imm & 0x003F) == imm);
assert(((imm & 0x0020) == 0) || (size == EA_8BYTE));
- return (emitter::code_t) imm << 10;
+ return (emitter::code_t)imm << 10;
}
/*****************************************************************************
@@ -7299,11 +7117,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to select a 64-bit datasize for an Arm64 instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeDatasize(emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeDatasize(emitAttr size)
{
if (size == EA_8BYTE)
{
- return 0x80000000; // set the bit at location 31
+ return 0x80000000; // set the bit at location 31
}
else
{
@@ -7315,28 +7133,28 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
* Returns the encoding to select the datasize for the general load/store Arm64 instructions
- *
+ *
*/
-/*static*/ emitter::code_t emitter::insEncodeDatasizeLS(emitter::code_t code, emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeDatasizeLS(emitter::code_t code, emitAttr size)
{
- if (code & 0x00800000) // Is this a sign-extending opcode? (i.e. ldrsw, ldrsh, ldrsb)
+ if (code & 0x00800000) // Is this a sign-extending opcode? (i.e. ldrsw, ldrsh, ldrsb)
{
assert((size == EA_4BYTE) || (size == EA_8BYTE));
- if ((code & 0x80000000) == 0) // Is it a ldrsh or ldrsb and not ldrsw ?
+ if ((code & 0x80000000) == 0) // Is it a ldrsh or ldrsb and not ldrsw ?
{
- if (size == EA_4BYTE) // Do we need to encode the 32-bit Rt size bit?
+ if (size == EA_4BYTE) // Do we need to encode the 32-bit Rt size bit?
{
- return 0x00400000; // set the bit at location 22
+ return 0x00400000; // set the bit at location 22
}
}
}
- else if (code & 0x80000000) // Is this a ldr/str/ldur/stur opcode?
+ else if (code & 0x80000000) // Is this a ldr/str/ldur/stur opcode?
{
assert((size == EA_4BYTE) || (size == EA_8BYTE));
- if (size == EA_8BYTE) // Do we need to encode the 64-bit size bit?
+ if (size == EA_8BYTE) // Do we need to encode the 64-bit size bit?
{
- return 0x40000000; // set the bit at location 30
+ return 0x40000000; // set the bit at location 30
}
}
return 0;
@@ -7345,15 +7163,15 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
* Returns the encoding to select the datasize for the vector load/store Arm64 instructions
- *
+ *
*/
-/*static*/ emitter::code_t emitter::insEncodeDatasizeVLS(emitter::code_t code, emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeDatasizeVLS(emitter::code_t code, emitAttr size)
{
code_t result = 0;
// Check bit 29
- if ((code & 0x20000000) == 0)
+ if ((code & 0x20000000) == 0)
{
// LDR literal
@@ -7367,7 +7185,7 @@ void emitter::emitIns_Call(EmitCallType callType,
// set the operation size in bit 30
result = 0x40000000;
}
- else
+ else
{
assert(size == EA_4BYTE);
// no bits are set
@@ -7377,7 +7195,7 @@ void emitter::emitIns_Call(EmitCallType callType,
else
{
// LDR non-literal
-
+
if (size == EA_16BYTE)
{
// The operation size in bits 31 and 30 are zero
@@ -7399,7 +7217,7 @@ void emitter::emitIns_Call(EmitCallType callType,
// set the operation size in bit 30
result = 0x40000000;
}
- else
+ else
{
assert(size == EA_1BYTE);
// The operation size in bits 31 and 30 are zero
@@ -7416,13 +7234,13 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
* Returns the encoding to select the datasize for the vector load/store Arm64 instructions
- *
+ *
*/
-/*static*/ emitter::code_t emitter::insEncodeDatasizeVPLS(emitter::code_t code, emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeDatasizeVPLS(emitter::code_t code, emitAttr size)
{
code_t result = 0;
-
+
if (size == EA_16BYTE)
{
// The operation size in bits 31 and 30 are zero
@@ -7449,20 +7267,20 @@ void emitter::emitIns_Call(EmitCallType callType,
/*****************************************************************************
*
* Returns the encoding to set the size bit and the N bits for a 'bitfield' instruction
- *
+ *
*/
-/*static*/ emitter::code_t emitter::insEncodeDatasizeBF(emitter::code_t code, emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeDatasizeBF(emitter::code_t code, emitAttr size)
{
// is bit 30 equal to 0?
- if ((code & 0x40000000) == 0) // is the opcode one of extr, sxtb, sxth or sxtw
+ if ((code & 0x40000000) == 0) // is the opcode one of extr, sxtb, sxth or sxtw
{
- if (size == EA_8BYTE) // Do we need to set the sf and N bits?
+ if (size == EA_8BYTE) // Do we need to set the sf and N bits?
{
- return 0x80400000; // set the sf-bit at location 31 and the N-bit at location 22
+ return 0x80400000; // set the sf-bit at location 31 and the N-bit at location 22
}
}
- return 0; // don't set any bits
+ return 0; // don't set any bits
}
/*****************************************************************************
@@ -7470,11 +7288,11 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to select the 64/128-bit datasize for an Arm64 vector instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeVectorsize(emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeVectorsize(emitAttr size)
{
if (size == EA_16BYTE)
{
- return 0x40000000; // set the bit at location 30
+ return 0x40000000; // set the bit at location 30
}
else
{
@@ -7487,20 +7305,20 @@ void emitter::emitIns_Call(EmitCallType callType,
*
* Returns the encoding to select 'index' for an Arm64 vector elem instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeVectorIndex(emitAttr elemsize, ssize_t index)
+/*static*/ emitter::code_t emitter::insEncodeVectorIndex(emitAttr elemsize, ssize_t index)
{
- code_t bits = (code_t) index;
+ code_t bits = (code_t)index;
if (elemsize == EA_1BYTE)
{
bits <<= 1;
bits |= 1;
}
- else if (elemsize == EA_2BYTE)
+ else if (elemsize == EA_2BYTE)
{
bits <<= 2;
bits |= 2;
}
- else if (elemsize == EA_4BYTE)
+ else if (elemsize == EA_4BYTE)
{
bits <<= 3;
bits |= 4;
@@ -7513,25 +7331,25 @@ void emitter::emitIns_Call(EmitCallType callType,
}
assert((bits >= 1) && (bits <= 0x1f));
- return (bits << 16); // bits at locations [20,19,18,17,16]
+ return (bits << 16); // bits at locations [20,19,18,17,16]
}
/*****************************************************************************
*
* Returns the encoding to select 'index2' for an Arm64 'ins' elem instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeVectorIndex2(emitAttr elemsize, ssize_t index2)
+/*static*/ emitter::code_t emitter::insEncodeVectorIndex2(emitAttr elemsize, ssize_t index2)
{
- code_t bits = (code_t) index2;
+ code_t bits = (code_t)index2;
if (elemsize == EA_1BYTE)
{
// bits are correct
}
- else if (elemsize == EA_2BYTE)
+ else if (elemsize == EA_2BYTE)
{
bits <<= 1;
}
- else if (elemsize == EA_4BYTE)
+ else if (elemsize == EA_4BYTE)
{
bits <<= 2;
}
@@ -7542,43 +7360,43 @@ void emitter::emitIns_Call(EmitCallType callType,
}
assert((bits >= 0) && (bits <= 0xf));
- return (bits << 11); // bits at locations [14,13,12,11]
+ return (bits << 11); // bits at locations [14,13,12,11]
}
/*****************************************************************************
*
* Returns the encoding to select the 'index' for an Arm64 'mul' by elem instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeVectorIndexLMH(emitAttr elemsize, ssize_t index)
+/*static*/ emitter::code_t emitter::insEncodeVectorIndexLMH(emitAttr elemsize, ssize_t index)
{
code_t bits = 0;
-
+
if (elemsize == EA_2BYTE)
{
assert((index >= 0) && (index <= 7));
if (index & 0x4)
{
- bits |= (1 << 11); // set bit 11 'H'
+ bits |= (1 << 11); // set bit 11 'H'
}
if (index & 0x2)
{
- bits |= (1 << 21); // set bit 21 'L'
+ bits |= (1 << 21); // set bit 21 'L'
}
if (index & 0x1)
{
- bits |= (1 << 20); // set bit 20 'M'
+ bits |= (1 << 20); // set bit 20 'M'
}
}
- else if (elemsize == EA_4BYTE)
+ else if (elemsize == EA_4BYTE)
{
assert((index >= 0) && (index <= 3));
if (index & 0x2)
{
- bits |= (1 << 11); // set bit 11 'H'
+ bits |= (1 << 11); // set bit 11 'H'
}
if (index & 0x1)
{
- bits |= (1 << 21); // set bit 21 'L'
+ bits |= (1 << 21); // set bit 21 'L'
}
}
else
@@ -7594,36 +7412,36 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to shift by 'shift' for an Arm64 vector or scalar instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeVectorShift(emitAttr size, ssize_t shift)
+/*static*/ emitter::code_t emitter::insEncodeVectorShift(emitAttr size, ssize_t shift)
{
assert(shift < getBitWidth(size));
- code_t imm = (code_t) (getBitWidth(size) + shift);
+ code_t imm = (code_t)(getBitWidth(size) + shift);
return imm << 16;
}
-
+
/*****************************************************************************
*
* Returns the encoding to select the 1/2/4/8 byte elemsize for an Arm64 vector instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeElemsize(emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeElemsize(emitAttr size)
{
if (size == EA_8BYTE)
{
- return 0x00C00000; // set the bit at location 23 and 22
+ return 0x00C00000; // set the bit at location 23 and 22
}
- else if (size == EA_4BYTE)
+ else if (size == EA_4BYTE)
{
- return 0x00800000; // set the bit at location 23
+ return 0x00800000; // set the bit at location 23
}
- else if (size == EA_2BYTE)
+ else if (size == EA_2BYTE)
{
- return 0x00400000; // set the bit at location 22
+ return 0x00400000; // set the bit at location 22
}
assert(size == EA_1BYTE);
- return 0x00000000;
+ return 0x00000000;
}
/*****************************************************************************
@@ -7631,18 +7449,18 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to select the 4/8 byte elemsize for an Arm64 float vector instruction
*/
-/*static*/ emitter::code_t emitter::insEncodeFloatElemsize(emitAttr size)
+/*static*/ emitter::code_t emitter::insEncodeFloatElemsize(emitAttr size)
{
if (size == EA_8BYTE)
{
- return 0x00400000; // set the bit at location 22
+ return 0x00400000; // set the bit at location 22
}
assert(size == EA_4BYTE);
- return 0x00000000;
+ return 0x00000000;
}
// Returns the encoding to select the index for an Arm64 float vector by elem instruction
-/*static*/ emitter::code_t emitter::insEncodeFloatIndex(emitAttr elemsize, ssize_t index)
+/*static*/ emitter::code_t emitter::insEncodeFloatIndex(emitAttr elemsize, ssize_t index)
{
code_t result = 0x00000000;
if (elemsize == EA_8BYTE)
@@ -7650,20 +7468,20 @@ void emitter::emitIns_Call(EmitCallType callType,
assert((index >= 0) && (index <= 1));
if (index == 1)
{
- result |= 0x00000800; // 'H' - set the bit at location 11
+ result |= 0x00000800; // 'H' - set the bit at location 11
}
}
- else
+ else
{
assert(elemsize == EA_4BYTE);
assert((index >= 0) && (index <= 3));
if (index & 2)
{
- result |= 0x00000800; // 'H' - set the bit at location 11
+ result |= 0x00000800; // 'H' - set the bit at location 11
}
if (index & 1)
{
- result |= 0x00200000; // 'L' - set the bit at location 21
+ result |= 0x00200000; // 'L' - set the bit at location 21
}
}
return result;
@@ -7673,95 +7491,95 @@ void emitter::emitIns_Call(EmitCallType callType,
*
* Returns the encoding to select the fcvt operation for Arm64 instructions
*/
-/*static*/ emitter::code_t emitter::insEncodeConvertOpt(insFormat fmt, insOpts conversion)
+/*static*/ emitter::code_t emitter::insEncodeConvertOpt(insFormat fmt, insOpts conversion)
{
code_t result = 0;
switch (conversion)
{
- case INS_OPTS_S_TO_D: // Single to Double
- assert(fmt == IF_DV_2J);
- result = 0x00008000; // type=00, opc=01
- break;
+ case INS_OPTS_S_TO_D: // Single to Double
+ assert(fmt == IF_DV_2J);
+ result = 0x00008000; // type=00, opc=01
+ break;
- case INS_OPTS_D_TO_S: // Double to Single
- assert(fmt == IF_DV_2J);
- result = 0x00400000; // type=01, opc=00
- break;
+ case INS_OPTS_D_TO_S: // Double to Single
+ assert(fmt == IF_DV_2J);
+ result = 0x00400000; // type=01, opc=00
+ break;
- case INS_OPTS_H_TO_S: // Half to Single
- assert(fmt == IF_DV_2J);
- result = 0x00C00000; // type=11, opc=00
- break;
+ case INS_OPTS_H_TO_S: // Half to Single
+ assert(fmt == IF_DV_2J);
+ result = 0x00C00000; // type=11, opc=00
+ break;
- case INS_OPTS_H_TO_D: // Half to Double
- assert(fmt == IF_DV_2J);
- result = 0x00C08000; // type=11, opc=01
- break;
+ case INS_OPTS_H_TO_D: // Half to Double
+ assert(fmt == IF_DV_2J);
+ result = 0x00C08000; // type=11, opc=01
+ break;
- case INS_OPTS_S_TO_H: // Single to Half
- assert(fmt == IF_DV_2J);
- result = 0x00018000; // type=00, opc=11
- break;
+ case INS_OPTS_S_TO_H: // Single to Half
+ assert(fmt == IF_DV_2J);
+ result = 0x00018000; // type=00, opc=11
+ break;
- case INS_OPTS_D_TO_H: // Double to Half
- assert(fmt == IF_DV_2J);
- result = 0x00418000; // type=01, opc=11
- break;
+ case INS_OPTS_D_TO_H: // Double to Half
+ assert(fmt == IF_DV_2J);
+ result = 0x00418000; // type=01, opc=11
+ break;
- case INS_OPTS_S_TO_4BYTE: // Single to INT32
- assert(fmt == IF_DV_2H);
- result = 0x00000000; // sf=0, type=00
- break;
+ case INS_OPTS_S_TO_4BYTE: // Single to INT32
+ assert(fmt == IF_DV_2H);
+ result = 0x00000000; // sf=0, type=00
+ break;
- case INS_OPTS_D_TO_4BYTE: // Double to INT32
- assert(fmt == IF_DV_2H);
- result = 0x00400000; // sf=0, type=01
- break;
+ case INS_OPTS_D_TO_4BYTE: // Double to INT32
+ assert(fmt == IF_DV_2H);
+ result = 0x00400000; // sf=0, type=01
+ break;
- case INS_OPTS_S_TO_8BYTE: // Single to INT64
- assert(fmt == IF_DV_2H);
- result = 0x80000000; // sf=1, type=00
- break;
+ case INS_OPTS_S_TO_8BYTE: // Single to INT64
+ assert(fmt == IF_DV_2H);
+ result = 0x80000000; // sf=1, type=00
+ break;
- case INS_OPTS_D_TO_8BYTE: // Double to INT64
- assert(fmt == IF_DV_2H);
- result = 0x80400000; // sf=1, type=01
- break;
+ case INS_OPTS_D_TO_8BYTE: // Double to INT64
+ assert(fmt == IF_DV_2H);
+ result = 0x80400000; // sf=1, type=01
+ break;
- case INS_OPTS_4BYTE_TO_S: // INT32 to Single
- assert(fmt == IF_DV_2I);
- result = 0x00000000; // sf=0, type=00
- break;
+ case INS_OPTS_4BYTE_TO_S: // INT32 to Single
+ assert(fmt == IF_DV_2I);
+ result = 0x00000000; // sf=0, type=00
+ break;
- case INS_OPTS_4BYTE_TO_D: // INT32 to Double
- assert(fmt == IF_DV_2I);
- result = 0x00400000; // sf=0, type=01
- break;
+ case INS_OPTS_4BYTE_TO_D: // INT32 to Double
+ assert(fmt == IF_DV_2I);
+ result = 0x00400000; // sf=0, type=01
+ break;
- case INS_OPTS_8BYTE_TO_S: // INT64 to Single
- assert(fmt == IF_DV_2I);
- result = 0x80000000; // sf=1, type=00
- break;
+ case INS_OPTS_8BYTE_TO_S: // INT64 to Single
+ assert(fmt == IF_DV_2I);
+ result = 0x80000000; // sf=1, type=00
+ break;
- case INS_OPTS_8BYTE_TO_D: // INT64 to Double
- assert(fmt == IF_DV_2I);
- result = 0x80400000; // sf=1, type=01
- break;
+ case INS_OPTS_8BYTE_TO_D: // INT64 to Double
+ assert(fmt == IF_DV_2I);
+ result = 0x80400000; // sf=1, type=01
+ break;
- default:
- assert(!"Invalid 'conversion' value");
- break;
+ default:
+ assert(!"Invalid 'conversion' value");
+ break;
}
return result;
}
/*****************************************************************************
*
- * Returns the encoding to have the Rn register be updated Pre/Post indexed
+ * Returns the encoding to have the Rn register be updated Pre/Post indexed
* or not updated
*/
-/*static*/ emitter::code_t emitter::insEncodeIndexedOpt(insOpts opt)
+/*static*/ emitter::code_t emitter::insEncodeIndexedOpt(insOpts opt)
{
assert(emitter::insOptsNone(opt) || emitter::insOptsIndexed(opt));
@@ -7769,35 +7587,35 @@ void emitter::emitIns_Call(EmitCallType callType,
{
if (emitter::insOptsPostIndex(opt))
{
- return 0x00000400; // set the bit at location 10
+ return 0x00000400; // set the bit at location 10
}
else
- {
+ {
assert(emitter::insOptsPreIndex(opt));
- return 0x00000C00; // set the bit at location 10 and 11
+ return 0x00000C00; // set the bit at location 10 and 11
}
}
else
{
assert(emitter::insOptsNone(opt));
- return 0; // bits 10 and 11 are zero
+ return 0; // bits 10 and 11 are zero
}
}
/*****************************************************************************
*
* Returns the encoding for a ldp/stp instruction to have the Rn register
- * be updated Pre/Post indexed or not updated
+ * be updated Pre/Post indexed or not updated
*/
-/*static*/ emitter::code_t emitter::insEncodePairIndexedOpt(instruction ins, insOpts opt)
+/*static*/ emitter::code_t emitter::insEncodePairIndexedOpt(instruction ins, insOpts opt)
{
assert(emitter::insOptsNone(opt) || emitter::insOptsIndexed(opt));
if ((ins == INS_ldnp) || (ins == INS_stnp))
{
assert(emitter::insOptsNone(opt));
- return 0; // bits 23 and 24 are zero
+ return 0; // bits 23 and 24 are zero
}
else
{
@@ -7805,18 +7623,18 @@ void emitter::emitIns_Call(EmitCallType callType,
{
if (emitter::insOptsPostIndex(opt))
{
- return 0x00800000; // set the bit at location 23
+ return 0x00800000; // set the bit at location 23
}
else
- {
+ {
assert(emitter::insOptsPreIndex(opt));
- return 0x01800000; // set the bit at location 24 and 23
+ return 0x01800000; // set the bit at location 24 and 23
}
}
else
{
assert(emitter::insOptsNone(opt));
- return 0x01000000; // set the bit at location 24
+ return 0x01000000; // set the bit at location 24
}
}
}
@@ -7826,7 +7644,7 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to apply a Shift Type on the Rm register
*/
-/*static*/ emitter::code_t emitter::insEncodeShiftType(insOpts opt)
+/*static*/ emitter::code_t emitter::insEncodeShiftType(insOpts opt)
{
if (emitter::insOptsNone(opt))
{
@@ -7835,10 +7653,10 @@ void emitter::emitIns_Call(EmitCallType callType,
}
assert(emitter::insOptsAnyShift(opt));
- emitter::code_t option = (emitter::code_t) opt - (emitter::code_t) INS_OPTS_LSL;
+ emitter::code_t option = (emitter::code_t)opt - (emitter::code_t)INS_OPTS_LSL;
assert(option <= 3);
- return option << 22; // bits 23, 22
+ return option << 22; // bits 23, 22
}
/*****************************************************************************
@@ -7846,21 +7664,21 @@ void emitter::emitIns_Call(EmitCallType callType,
* Returns the encoding to apply a 12 bit left shift to the immediate
*/
-/*static*/ emitter::code_t emitter::insEncodeShiftImm12(insOpts opt)
+/*static*/ emitter::code_t emitter::insEncodeShiftImm12(insOpts opt)
{
if (emitter::insOptsLSL12(opt))
{
- return 0x00400000; // set the bit at location 22
+ return 0x00400000; // set the bit at location 22
}
return 0;
}
/*****************************************************************************
*
- * Returns the encoding to have the Rm register use an extend operation
+ * Returns the encoding to have the Rm register use an extend operation
*/
-/*static*/ emitter::code_t emitter::insEncodeExtend(insOpts opt)
+/*static*/ emitter::code_t emitter::insEncodeExtend(insOpts opt)
{
if (emitter::insOptsNone(opt) || (opt == INS_OPTS_LSL))
{
@@ -7869,36 +7687,35 @@ void emitter::emitIns_Call(EmitCallType callType,
}
assert(emitter::insOptsAnyExtend(opt));
- emitter::code_t option = (emitter::code_t) opt - (emitter::code_t) INS_OPTS_UXTB;
+ emitter::code_t option = (emitter::code_t)opt - (emitter::code_t)INS_OPTS_UXTB;
assert(option <= 7);
- return option << 13; // bits 15,14,13
+ return option << 13; // bits 15,14,13
}
/*****************************************************************************
*
- * Returns the encoding to scale the Rm register by {0,1,2,3,4}
- * when using an extend operation
+ * Returns the encoding to scale the Rm register by {0,1,2,3,4}
+ * when using an extend operation
*/
-/*static*/ emitter::code_t emitter::insEncodeExtendScale(ssize_t imm)
+/*static*/ emitter::code_t emitter::insEncodeExtendScale(ssize_t imm)
{
assert((imm >= 0) && (imm <= 4));
- return (emitter::code_t) imm << 10; // bits 12,11,10
+ return (emitter::code_t)imm << 10; // bits 12,11,10
}
-
/*****************************************************************************
*
- * Returns the encoding to have the Rm register be auto scaled by the ld/st size
+ * Returns the encoding to have the Rm register be auto scaled by the ld/st size
*/
-/*static*/ emitter::code_t emitter::insEncodeReg3Scale(bool isScaled)
+/*static*/ emitter::code_t emitter::insEncodeReg3Scale(bool isScaled)
{
if (isScaled)
{
- return 0x00001000; // set the bit at location 12
+ return 0x00001000; // set the bit at location 12
}
else
{
@@ -7906,34 +7723,36 @@ void emitter::emitIns_Call(EmitCallType callType,
}
}
-BYTE* emitter::emitOutputLoadLabel(BYTE* dst, BYTE* srcAddr, BYTE* dstAddr, instrDescJmp *id)
+BYTE* emitter::emitOutputLoadLabel(BYTE* dst, BYTE* srcAddr, BYTE* dstAddr, instrDescJmp* id)
{
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
- regNumber dstReg = id->idReg1();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
+ regNumber dstReg = id->idReg1();
if (id->idjShort)
{
// adr x, [rel addr] -- compute address: current addr(ip) + rel addr.
assert(ins == INS_adr);
assert(fmt == IF_DI_1E);
ssize_t distVal = (ssize_t)(dstAddr - srcAddr);
- dst = emitOutputShortAddress(dst, ins, fmt, distVal, dstReg);
+ dst = emitOutputShortAddress(dst, ins, fmt, distVal, dstReg);
}
else
{
// adrp x, [rel page addr] -- compute page address: current page addr + rel page addr
assert(fmt == IF_LARGEADR);
- ssize_t relPageAddr = (((ssize_t)dstAddr & 0xFFFFFFFFFFFFF000LL) - ((ssize_t)srcAddr & 0xFFFFFFFFFFFFF000LL)) >> 12;
+ ssize_t relPageAddr =
+ (((ssize_t)dstAddr & 0xFFFFFFFFFFFFF000LL) - ((ssize_t)srcAddr & 0xFFFFFFFFFFFFF000LL)) >> 12;
dst = emitOutputShortAddress(dst, INS_adrp, IF_DI_1E, relPageAddr, dstReg);
// add x, x, page offs -- compute address = page addr + page offs
- ssize_t imm12 = (ssize_t)dstAddr & 0xFFF; // 12 bits
+ ssize_t imm12 = (ssize_t)dstAddr & 0xFFF; // 12 bits
assert(isValidUimm12(imm12));
- code_t code = emitInsCode(INS_add, IF_DI_2A); // DI_2A X0010001shiiiiii iiiiiinnnnnddddd 1100 0000 imm(i12, sh)
- code |= insEncodeDatasize(EA_8BYTE); // X
- code |= ((code_t)imm12 << 10); // iiiiiiiiiiii
- code |= insEncodeReg_Rd(dstReg); // ddddd
- code |= insEncodeReg_Rn(dstReg); // nnnnn
+ code_t code =
+ emitInsCode(INS_add, IF_DI_2A); // DI_2A X0010001shiiiiii iiiiiinnnnnddddd 1100 0000 imm(i12, sh)
+ code |= insEncodeDatasize(EA_8BYTE); // X
+ code |= ((code_t)imm12 << 10); // iiiiiiiiiiii
+ code |= insEncodeReg_Rd(dstReg); // ddddd
+ code |= insEncodeReg_Rn(dstReg); // nnnnn
dst += emitOutput_Instr(dst, code);
}
return dst;
@@ -7946,47 +7765,47 @@ BYTE* emitter::emitOutputLoadLabel(BYTE* dst, BYTE* srcAddr, BYTE*
* to handle forward branch patching.
*/
-BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i)
+BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
{
- instrDescJmp * id = (instrDescJmp*)i;
+ instrDescJmp* id = (instrDescJmp*)i;
- unsigned srcOffs;
- unsigned dstOffs;
- BYTE * srcAddr;
- BYTE * dstAddr;
- ssize_t distVal;
- ssize_t loBits;
+ unsigned srcOffs;
+ unsigned dstOffs;
+ BYTE* srcAddr;
+ BYTE* dstAddr;
+ ssize_t distVal;
+ ssize_t loBits;
// Set default ins/fmt from id.
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
- bool loadLabel = false;
- bool isJump = false;
- bool loadConstant = false;
+ bool loadLabel = false;
+ bool isJump = false;
+ bool loadConstant = false;
switch (ins)
{
- default:
- isJump = true;
- break;
+ default:
+ isJump = true;
+ break;
- case INS_tbz:
- case INS_tbnz:
- case INS_cbz:
- case INS_cbnz:
- isJump = true;
- break;
+ case INS_tbz:
+ case INS_tbnz:
+ case INS_cbz:
+ case INS_cbnz:
+ isJump = true;
+ break;
- case INS_ldr:
- case INS_ldrsw:
- loadConstant = true;
- break;
+ case INS_ldr:
+ case INS_ldrsw:
+ loadConstant = true;
+ break;
- case INS_adr:
- case INS_adrp:
- loadLabel = true;
- break;
+ case INS_adr:
+ case INS_adrp:
+ loadLabel = true;
+ break;
}
/* Figure out the distance to the target */
@@ -8000,15 +7819,15 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
int doff = id->idAddr()->iiaGetJitDataOffset();
assert(doff >= 0);
ssize_t imm = emitGetInsSC(id);
- assert((imm >= 0) && (imm < 0x1000)); // 0x1000 is arbitrary, currently 'imm' is always 0
+ assert((imm >= 0) && (imm < 0x1000)); // 0x1000 is arbitrary, currently 'imm' is always 0
unsigned dataOffs = (unsigned)(doff + imm);
assert(dataOffs < emitDataSize());
dstAddr = emitDataOffsetToPtr(dataOffs);
- regNumber dstReg = id->idReg1();
+ regNumber dstReg = id->idReg1();
regNumber addrReg = dstReg; // an integer register to compute long address.
- emitAttr opSize = id->idOpSize();
+ emitAttr opSize = id->idOpSize();
if (loadConstant)
{
@@ -8018,13 +7837,14 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
assert(ins == INS_ldr);
assert(fmt == IF_LS_1A);
distVal = (ssize_t)(dstAddr - srcAddr);
- dst = emitOutputShortConstant(dst, ins, fmt, distVal, dstReg, opSize);
+ dst = emitOutputShortConstant(dst, ins, fmt, distVal, dstReg, opSize);
}
else
{
// adrp x, [rel page addr] -- compute page address: current page addr + rel page addr
assert(fmt == IF_LARGELDC);
- ssize_t relPageAddr = (((ssize_t)dstAddr & 0xFFFFFFFFFFFFF000LL) - ((ssize_t)srcAddr & 0xFFFFFFFFFFFFF000LL)) >> 12;
+ ssize_t relPageAddr =
+ (((ssize_t)dstAddr & 0xFFFFFFFFFFFFF000LL) - ((ssize_t)srcAddr & 0xFFFFFFFFFFFFF000LL)) >> 12;
if (isVectorRegister(dstReg))
{
// Update addrReg with the reserved integer register
@@ -8047,17 +7867,18 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
// This is needed only for vector constant.
if (addrReg != dstReg)
{
- // fmov Vd,Rn DV_2I X00111100X100111 000000nnnnnddddd 1E27 0000 Vd,Rn (scalar, from general)
+ // fmov Vd,Rn DV_2I X00111100X100111 000000nnnnnddddd 1E27 0000 Vd,Rn
+ // (scalar, from general)
assert(isVectorRegister(dstReg) && isGeneralRegister(addrReg));
- ins = INS_fmov;
- fmt = IF_DV_2I;
+ ins = INS_fmov;
+ fmt = IF_DV_2I;
code_t code = emitInsCode(ins, fmt);
- code |= insEncodeReg_Vd(dstReg); // ddddd
- code |= insEncodeReg_Rn(addrReg); // nnnnn
+ code |= insEncodeReg_Vd(dstReg); // ddddd
+ code |= insEncodeReg_Rn(addrReg); // nnnnn
if (id->idOpSize() == EA_8BYTE)
{
- code |= 0x80400000; // X ... X
+ code |= 0x80400000; // X ... X
}
dst += emitOutput_Instr(dst, code);
}
@@ -8093,19 +7914,19 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
dstAddr = emitOffsetToPtr(dstOffs);
}
- distVal = (ssize_t) (dstAddr - srcAddr);
+ distVal = (ssize_t)(dstAddr - srcAddr);
- if (dstOffs <= srcOffs)
+ if (dstOffs <= srcOffs)
{
-#if DEBUG_EMIT
+#if DEBUG_EMIT
/* This is a backward jump - distance is known at this point */
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
printf("[3] Jump block is at %08X - %02X = %08X\n", blkOffs, emitOffsAdj, blkOffs - emitOffsAdj);
printf("[3] Jump is at %08X - %02X = %08X\n", srcOffs, emitOffsAdj, srcOffs - emitOffsAdj);
printf("[3] Label block is at %08X - %02X = %08X\n", dstOffs, emitOffsAdj, dstOffs - emitOffsAdj);
@@ -8116,7 +7937,7 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
{
/* This is a forward jump - distance will be an upper limit */
- emitFwdJumps = true;
+ emitFwdJumps = true;
/* The target offset will be closer by at least 'emitOffsAdj', but only if this
jump doesn't cross the hot-cold boundary. */
@@ -8133,33 +7954,29 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
/* Are we overflowing the id->idjOffs bitfield? */
if (id->idjOffs != dstOffs)
- IMPL_LIMITATION("Method is too large");
+ IMPL_LIMITATION("Method is too large");
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
- printf("[4] Jump block is at %08X\n" , blkOffs);
- printf("[4] Jump is at %08X\n" , srcOffs);
+ if (INTERESTING_JUMP_NUM == 0)
+ printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ printf("[4] Jump block is at %08X\n", blkOffs);
+ printf("[4] Jump is at %08X\n", srcOffs);
printf("[4] Label block is at %08X - %02X = %08X\n", dstOffs + emitOffsAdj, emitOffsAdj, dstOffs);
}
#endif
-
}
-#ifdef DEBUG
+#ifdef DEBUG
if (0 && emitComp->verbose)
{
- size_t sz = 4;
- int distValSize = id->idjShort ? 4 : 8;
- printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n",
- (dstOffs <= srcOffs)?"Fwd":"Bwd", dspPtr(id), id->idDebugOnlyInfo()->idNum,
- distValSize, srcOffs + sz,
- distValSize, dstOffs,
- distVal);
+ size_t sz = 4;
+ int distValSize = id->idjShort ? 4 : 8;
+ printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n", (dstOffs <= srcOffs) ? "Fwd" : "Bwd",
+ dspPtr(id), id->idDebugOnlyInfo()->idNum, distValSize, srcOffs + sz, distValSize, dstOffs, distVal);
}
#endif
@@ -8206,11 +8023,13 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
// the correct offset. Note also that this works for both integer and floating-point conditions, because
// the condition inversion takes ordered/unordered into account, preserving NaN behavior. For example,
// "GT" (greater than) is inverted to "LE" (less than, equal, or unordered).
- dst = emitOutputShortBranch(dst,
- emitJumpKindToIns(emitReverseJumpKind(emitInsToJumpKind(ins))), // reverse the conditional instruction
- IF_BI_0B,
- 8, /* 8 bytes from start of this large conditional pseudo-instruction to L_not. */
- nullptr /* only used for tbz/tbnzcbz/cbnz */);
+ dst =
+ emitOutputShortBranch(dst,
+ emitJumpKindToIns(emitReverseJumpKind(
+ emitInsToJumpKind(ins))), // reverse the conditional instruction
+ IF_BI_0B,
+ 8, /* 8 bytes from start of this large conditional pseudo-instruction to L_not. */
+ nullptr /* only used for tbz/tbnzcbz/cbnz */);
// Now, pretend we've got a normal unconditional branch, and fall through to the code to emit that.
ins = INS_b;
@@ -8229,14 +8048,14 @@ BYTE* emitter::emitOutputLJ(insGroup *ig, BYTE *dst, instrDesc *i
dst = emitOutputLoadLabel(dst, srcAddr, dstAddr, id);
}
- return dst;
+ return dst;
}
/*****************************************************************************
*
* Output a short branch instruction.
*/
-BYTE* emitter::emitOutputShortBranch(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id)
+BYTE* emitter::emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id)
{
code_t code = emitInsCode(ins, fmt);
@@ -8251,40 +8070,40 @@ BYTE* emitter::emitOutputShortBranch(BYTE *dst, instruction ins, i
distVal &= 0x3FFFFFFLL;
code |= distVal;
}
- else if (fmt == IF_BI_0B) // BI_0B 01010100iiiiiiii iiiiiiiiiiiXXXXX simm19:00
+ else if (fmt == IF_BI_0B) // BI_0B 01010100iiiiiiii iiiiiiiiiiiXXXXX simm19:00
{
// INS_beq, INS_bne, etc...
noway_assert(isValidSimm19(distVal));
distVal &= 0x7FFFFLL;
code |= distVal << 5;
}
- else if (fmt == IF_BI_1A) // BI_1A X.......iiiiiiii iiiiiiiiiiittttt Rt simm19:00
+ else if (fmt == IF_BI_1A) // BI_1A X.......iiiiiiii iiiiiiiiiiittttt Rt simm19:00
{
// INS_cbz or INS_cbnz
assert(id != nullptr);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
noway_assert(isValidSimm19(distVal));
- distVal &= 0x7FFFFLL; // 19 bits
+ distVal &= 0x7FFFFLL; // 19 bits
code |= distVal << 5;
}
- else if (fmt == IF_BI_1B) // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
+ else if (fmt == IF_BI_1B) // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
{
// INS_tbz or INS_tbnz
assert(id != nullptr);
ssize_t imm = emitGetInsSC(id);
assert(isValidImmShift(imm, id->idOpSize()));
- if (imm & 0x20) // test bit 32-63 ?
+ if (imm & 0x20) // test bit 32-63 ?
{
- code |= 0x80000000; // B
+ code |= 0x80000000; // B
}
- code |= ((imm & 0x1F) << 19); // bbbbb
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ code |= ((imm & 0x1F) << 19); // bbbbb
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
noway_assert(isValidSimm14(distVal));
- distVal &= 0x3FFFLL; // 14 bits
+ distVal &= 0x3FFFLL; // 14 bits
code |= distVal << 5;
}
else
@@ -8301,21 +8120,21 @@ BYTE* emitter::emitOutputShortBranch(BYTE *dst, instruction ins, i
*
* Output a short address instruction.
*/
-BYTE* emitter::emitOutputShortAddress(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg)
+BYTE* emitter::emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg)
{
ssize_t loBits = (distVal & 3);
distVal >>= 2;
code_t code = emitInsCode(ins, fmt);
- if (fmt == IF_DI_1E) // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+ if (fmt == IF_DI_1E) // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
{
// INS_adr or INS_adrp
- code |= insEncodeReg_Rd(reg); // ddddd
+ code |= insEncodeReg_Rd(reg); // ddddd
noway_assert(isValidSimm19(distVal));
- distVal &= 0x7FFFFLL; // 19 bits
+ distVal &= 0x7FFFFLL; // 19 bits
code |= distVal << 5;
- code |= loBits << 29; // 2 bits
+ code |= loBits << 29; // 2 bits
}
else
{
@@ -8331,7 +8150,8 @@ BYTE* emitter::emitOutputShortAddress(BYTE *dst, instruction ins,
*
* Output a short constant instruction.
*/
-BYTE* emitter::emitOutputShortConstant(BYTE *dst, instruction ins, insFormat fmt, ssize_t imm, regNumber reg, emitAttr opSize)
+BYTE* emitter::emitOutputShortConstant(
+ BYTE* dst, instruction ins, insFormat fmt, ssize_t imm, regNumber reg, emitAttr opSize)
{
code_t code = emitInsCode(ins, fmt);
@@ -8349,8 +8169,8 @@ BYTE* emitter::emitOutputShortConstant(BYTE *dst, instruction ins,
// Is the target a vector register?
if (isVectorRegister(reg))
{
- code |= insEncodeDatasizeVLS(code, opSize); // XX V
- code |= insEncodeReg_Vt(reg); // ttttt
+ code |= insEncodeDatasizeVLS(code, opSize); // XX V
+ code |= insEncodeReg_Vt(reg); // ttttt
}
else
{
@@ -8363,10 +8183,10 @@ BYTE* emitter::emitOutputShortConstant(BYTE *dst, instruction ins,
code |= 0x40000000;
}
- code |= insEncodeReg_Rt(reg); // ttttt
+ code |= insEncodeReg_Rt(reg); // ttttt
}
- distVal &= 0x7FFFFLL; // 19 bits
+ distVal &= 0x7FFFFLL; // 19 bits
code |= distVal << 5;
}
else if (fmt == IF_LS_2B)
@@ -8397,8 +8217,8 @@ BYTE* emitter::emitOutputShortConstant(BYTE *dst, instruction ins,
imm >>= 2;
}
- code |= insEncodeReg_Rt(reg); // ttttt
- code |= insEncodeReg_Rn(reg); // nnnnn
+ code |= insEncodeReg_Rt(reg); // ttttt
+ code |= insEncodeReg_Rn(reg); // nnnnn
code |= imm << 10;
}
else
@@ -8415,20 +8235,20 @@ BYTE* emitter::emitOutputShortConstant(BYTE *dst, instruction ins,
* Output a call instruction.
*/
-unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t code)
+unsigned emitter::emitOutputCall(insGroup* ig, BYTE* dst, instrDesc* id, code_t code)
{
- const unsigned char callInstrSize = sizeof(code_t); // 4 bytes
- regMaskTP gcrefRegs;
- regMaskTP byrefRegs;
+ const unsigned char callInstrSize = sizeof(code_t); // 4 bytes
+ regMaskTP gcrefRegs;
+ regMaskTP byrefRegs;
VARSET_TP VARSET_INIT_NOCOPY(GCvars, VarSetOps::UninitVal());
- // Is this a "fat" call descriptor?
- if (id->idIsLargeCall())
+ // Is this a "fat" call descriptor?
+ if (id->idIsLargeCall())
{
- instrDescCGCA* idCall = (instrDescCGCA*) id;
- gcrefRegs = idCall->idcGcrefRegs;
- byrefRegs = idCall->idcByrefRegs;
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
+ gcrefRegs = idCall->idcGcrefRegs;
+ byrefRegs = idCall->idcByrefRegs;
VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
}
else
@@ -8436,8 +8256,8 @@ unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t
assert(!id->idIsLargeDsp());
assert(!id->idIsLargeCns());
- gcrefRegs = emitDecodeCallGCregs(id);
- byrefRegs = 0;
+ gcrefRegs = emitDecodeCallGCregs(id);
+ byrefRegs = 0;
VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
}
@@ -8459,11 +8279,11 @@ unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t
assert(outputInstrSize == callInstrSize);
// If the method returns a GC ref, mark INTRET (R0) appropriately.
- if (id->idGCref() == GCT_GCREF)
+ if (id->idGCref() == GCT_GCREF)
{
gcrefRegs |= RBM_INTRET;
}
- else if (id->idGCref() == GCT_BYREF)
+ else if (id->idGCref() == GCT_BYREF)
{
byrefRegs |= RBM_INTRET;
}
@@ -8483,28 +8303,28 @@ unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t
}
// If the GC register set has changed, report the new set.
- if (gcrefRegs != emitThisGCrefRegs)
+ if (gcrefRegs != emitThisGCrefRegs)
{
emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst);
}
// If the Byref register set has changed, report the new set.
- if (byrefRegs != emitThisByrefRegs)
+ if (byrefRegs != emitThisByrefRegs)
{
emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst);
}
// Some helper calls may be marked as not requiring GC info to be recorded.
- if ((!id->idIsNoGC()))
+ if ((!id->idIsNoGC()))
{
// On ARM64, as on AMD64, we don't change the stack pointer to push/pop args.
// So we're not really doing a "stack pop" here (note that "args" is 0), but we use this mechanism
// to record the call for GC info purposes. (It might be best to use an alternate call,
// and protect "emitStackPop" under the EMIT_TRACK_STACK_DEPTH preprocessor variable.)
- emitStackPop(dst, /*isCall*/true, callInstrSize, /*args*/0);
-
+ emitStackPop(dst, /*isCall*/ true, callInstrSize, /*args*/ 0);
+
// Do we need to record a call location for GC purposes?
//
- if (!emitFullGCinfo)
+ if (!emitFullGCinfo)
{
emitRecordGCcall(dst, callInstrSize);
}
@@ -8517,10 +8337,10 @@ unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t
* Emit a 32-bit Arm64 instruction
*/
-/*static*/ unsigned emitter::emitOutput_Instr(BYTE *dst, code_t code)
+/*static*/ unsigned emitter::emitOutput_Instr(BYTE* dst, code_t code)
{
assert(sizeof(code_t) == 4);
- *((code_t *) dst) = code;
+ *((code_t*)dst) = code;
return sizeof(code_t);
}
@@ -8534,24 +8354,23 @@ unsigned emitter::emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *id, code_t
* descriptor in bytes.
*/
-size_t emitter::emitOutputInstr(insGroup *ig,
- instrDesc *id, BYTE **dp)
+size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
{
- BYTE * dst = *dp;
- BYTE * odst = dst;
- code_t code = 0;
- size_t sz = emitGetInstrDescSize(id); // TODO-ARM64-Cleanup: on ARM, this is set in each case. why?
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
- emitAttr size = id->idOpSize();
- unsigned char callInstrSize = 0;
- unsigned condcode;
+ BYTE* dst = *dp;
+ BYTE* odst = dst;
+ code_t code = 0;
+ size_t sz = emitGetInstrDescSize(id); // TODO-ARM64-Cleanup: on ARM, this is set in each case. why?
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
+ emitAttr size = id->idOpSize();
+ unsigned char callInstrSize = 0;
+ unsigned condcode;
-#ifdef DEBUG
-#if DUMP_GC_TABLES
- bool dspOffs = emitComp->opts.dspGCtbls;
+#ifdef DEBUG
+#if DUMP_GC_TABLES
+ bool dspOffs = emitComp->opts.dspGCtbls;
#else
- bool dspOffs = !emitComp->opts.disDiffable;
+ bool dspOffs = !emitComp->opts.disDiffable;
#endif
#endif // DEBUG
@@ -8563,969 +8382,969 @@ size_t emitter::emitOutputInstr(insGroup *ig,
switch (fmt)
{
- ssize_t imm;
- ssize_t index;
- ssize_t index2;
- unsigned scale;
- unsigned cmode;
- unsigned immShift;
- bool hasShift;
- emitAttr extSize;
- emitAttr elemsize;
- emitAttr datasize;
-
- case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00
- case IF_LARGEJMP:
- assert(id->idGCref() == GCT_NONE);
- assert(id->idIsBound());
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
-
- case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- code = emitInsCode(ins, fmt);
- sz = id->idIsLargeCall() ? sizeof(instrDescCGCA) : sizeof(instrDesc);
- dst += emitOutputCall(ig, dst, id, code);
- // Always call RecordRelocation so that we wire in a JumpStub when we don't reach
- emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_BRANCH26);
- break;
+ ssize_t imm;
+ ssize_t index;
+ ssize_t index2;
+ unsigned scale;
+ unsigned cmode;
+ unsigned immShift;
+ bool hasShift;
+ emitAttr extSize;
+ emitAttr elemsize;
+ emitAttr datasize;
+
+ case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00
+ case IF_LARGEJMP:
+ assert(id->idGCref() == GCT_NONE);
+ assert(id->idIsBound());
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
- assert(insOptsNone(id->idInsOpt()));
- assert(id->idIsBound());
+ case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ code = emitInsCode(ins, fmt);
+ sz = id->idIsLargeCall() ? sizeof(instrDescCGCA) : sizeof(instrDesc);
+ dst += emitOutputCall(ig, dst, id, code);
+ // Always call RecordRelocation so that we wire in a JumpStub when we don't reach
+ emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_BRANCH26);
+ break;
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
+ case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idIsBound());
- case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
- assert(insOptsNone(id->idInsOpt()));
- assert(id->idIsBound());
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
+ case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idIsBound());
- case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
- assert(insOptsNone(id->idInsOpt()));
- assert((ins == INS_ret) || (ins == INS_br));
- code = emitInsCode(ins, fmt);
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
+ assert(insOptsNone(id->idInsOpt()));
+ assert((ins == INS_ret) || (ins == INS_br));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
- assert(insOptsNone(id->idInsOpt()));
- assert((ins == INS_br_tail) || (ins == INS_blr));
- code = emitInsCode(ins, fmt);
- code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- sz = id->idIsLargeCall() ? sizeof(instrDescCGCA) : sizeof(instrDesc);
- dst += emitOutputCall(ig, dst, id, code);
- break;
+ case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
+ assert(insOptsNone(id->idInsOpt()));
+ assert((ins == INS_br_tail) || (ins == INS_blr));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
- case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
- case IF_LARGELDC:
- assert(insOptsNone(id->idInsOpt()));
- assert(id->idIsBound());
+ sz = id->idIsLargeCall() ? sizeof(instrDescCGCA) : sizeof(instrDesc);
+ dst += emitOutputCall(ig, dst, id, code);
+ break;
- dst = emitOutputLJ(ig, dst, id);
- sz = sizeof(instrDescJmp);
- break;
+ case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
+ case IF_LARGELDC:
+ assert(insOptsNone(id->idInsOpt()));
+ assert(id->idIsBound());
- case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
- assert(insOptsNone(id->idInsOpt()));
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- }
- else
- {
- code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- }
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ dst = emitOutputLJ(ig, dst, id);
+ sz = sizeof(instrDescJmp);
+ break;
- case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
- assert(insOptsNone(id->idInsOpt()));
- imm = emitGetInsSC(id);
- assert(isValidUimm12(imm));
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- }
- else
- {
- code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- }
- code |= ((code_t) imm << 10); // iiiiiiiiiiii
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
+ assert(insOptsNone(id->idInsOpt()));
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ }
+ else
+ {
+ code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ }
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- imm = emitGetInsSC(id);
- assert((imm >= -256) && (imm <= 255)); // signed 9 bits
- imm &= 0x1ff; // force into unsigned 9 bit representation
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- }
- else
- {
- code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- }
- code |= insEncodeIndexedOpt(id->idInsOpt()); // PP
- code |= ((code_t) imm << 12); // iiiiiiiii
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
+ assert(insOptsNone(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ assert(isValidUimm12(imm));
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ }
+ else
+ {
+ code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ }
+ code |= ((code_t)imm << 10); // iiiiiiiiiiii
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
- assert(insOptsLSExtend(id->idInsOpt()));
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- }
- else
- {
- code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- }
- code |= insEncodeExtend(id->idInsOpt()); // ooo
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- if (id->idIsLclVar())
- {
- code |= insEncodeReg_Rm(codeGen->rsGetRsvdReg()); // mmmmm
- }
- else
- {
- code |= insEncodeReg3Scale(id->idReg3Scaled()); // S
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- }
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ assert((imm >= -256) && (imm <= 255)); // signed 9 bits
+ imm &= 0x1ff; // force into unsigned 9 bit representation
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ }
+ else
+ {
+ code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ }
+ code |= insEncodeIndexedOpt(id->idInsOpt()); // PP
+ code |= ((code_t)imm << 12); // iiiiiiiii
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_LS_3B: // LS_3B X............... .aaaaannnnnddddd Rd Ra Rn
- assert(insOptsNone(id->idInsOpt()));
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVPLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- code |= insEncodeReg_Va(id->idReg2()); // aaaaa
- }
- else
- {
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- code |= insEncodeReg_Ra(id->idReg2()); // aaaaa
- }
- code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
+ assert(insOptsLSExtend(id->idInsOpt()));
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ }
+ else
+ {
+ code |= insEncodeDatasizeLS(code, id->idOpSize()); // .X.......X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ }
+ code |= insEncodeExtend(id->idInsOpt()); // ooo
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ if (id->idIsLclVar())
+ {
+ code |= insEncodeReg_Rm(codeGen->rsGetRsvdReg()); // mmmmm
+ }
+ else
+ {
+ code |= insEncodeReg3Scale(id->idReg3Scaled()); // S
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ }
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_LS_3C: // LS_3C X......PP.iiiiii iaaaaannnnnddddd Rd Ra Rn imm(im7,sh)
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- imm = emitGetInsSC(id);
- assert((imm >= -64) && (imm <= 63)); // signed 7 bits
- imm &= 0x7f; // force into unsigned 7 bit representation
- code = emitInsCode(ins, fmt);
- // Is the target a vector register?
- if (isVectorRegister(id->idReg1()))
- {
- code &= 0x3FFFFFFF; // clear the size bits
- code |= insEncodeDatasizeVPLS(code, id->idOpSize()); // XX
- code |= insEncodeReg_Vt(id->idReg1()); // ttttt
- code |= insEncodeReg_Va(id->idReg2()); // aaaaa
- }
- else
- {
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rt(id->idReg1()); // ttttt
- code |= insEncodeReg_Ra(id->idReg2()); // aaaaa
- }
- code |= insEncodePairIndexedOpt(ins, id->idInsOpt()); // PP
- code |= ((code_t) imm << 15); // iiiiiiiii
- code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_3B: // LS_3B X............... .aaaaannnnnddddd Rd Ra Rn
+ assert(insOptsNone(id->idInsOpt()));
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVPLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ code |= insEncodeReg_Va(id->idReg2()); // aaaaa
+ }
+ else
+ {
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ code |= insEncodeReg_Ra(id->idReg2()); // aaaaa
+ }
+ code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
- imm = emitGetInsSC(id);
- assert(isValidUimm12(imm));
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeShiftImm12(id->idInsOpt()); // sh
- code |= ((code_t) imm << 10); // iiiiiiiiiiii
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_LS_3C: // LS_3C X......PP.iiiiii iaaaaannnnnddddd Rd Ra Rn imm(im7,sh)
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ assert((imm >= -64) && (imm <= 63)); // signed 7 bits
+ imm &= 0x7f; // force into unsigned 7 bit representation
+ code = emitInsCode(ins, fmt);
+ // Is the target a vector register?
+ if (isVectorRegister(id->idReg1()))
+ {
+ code &= 0x3FFFFFFF; // clear the size bits
+ code |= insEncodeDatasizeVPLS(code, id->idOpSize()); // XX
+ code |= insEncodeReg_Vt(id->idReg1()); // ttttt
+ code |= insEncodeReg_Va(id->idReg2()); // aaaaa
+ }
+ else
+ {
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rt(id->idReg1()); // ttttt
+ code |= insEncodeReg_Ra(id->idReg2()); // aaaaa
+ }
+ code |= insEncodePairIndexedOpt(ins, id->idInsOpt()); // PP
+ code |= ((code_t)imm << 15); // iiiiiiiii
+ code |= insEncodeReg_Rn(id->idReg3()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
- imm = emitGetInsSC(id);
- assert(isValidImmHWVal(imm, id->idOpSize()));
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= ((code_t) imm << 5); // hwiiiii iiiiiiiiiii
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ assert(isValidUimm12(imm));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeShiftImm12(id->idInsOpt()); // sh
+ code |= ((code_t)imm << 10); // iiiiiiiiiiii
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
- imm = emitGetInsSC(id);
- assert(isValidImmNRS(imm, id->idOpSize()));
- code = emitInsCode(ins, fmt);
- code |= ((code_t) imm << 10); // Nrrrrrrssssss
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
+ imm = emitGetInsSC(id);
+ assert(isValidImmHWVal(imm, id->idOpSize()));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= ((code_t)imm << 5); // hwiiiii iiiiiiiiiii
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
- imm = emitGetInsSC(id);
- assert(isValidImmNRS(imm, id->idOpSize()));
- code = emitInsCode(ins, fmt);
- code |= ((code_t) imm << 10); // Nrrrrrrssssss
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
+ imm = emitGetInsSC(id);
+ assert(isValidImmNRS(imm, id->idOpSize()));
+ code = emitInsCode(ins, fmt);
+ code |= ((code_t)imm << 10); // Nrrrrrrssssss
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
- case IF_LARGEADR:
- assert(insOptsNone(id->idInsOpt()));
- if (id->idIsReloc())
- {
+ case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
+ imm = emitGetInsSC(id);
+ assert(isValidImmNRS(imm, id->idOpSize()));
code = emitInsCode(ins, fmt);
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= ((code_t)imm << 10); // Nrrrrrrssssss
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
dst += emitOutput_Instr(dst, code);
- emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_PAGEBASE_REL21);
- }
- else
- {
- // Local jmp/load case which does not need a relocation.
- assert(id->idIsBound());
- dst = emitOutputLJ(ig, dst, id);
- }
- sz = sizeof(instrDescJmp);
- break;
+ break;
- case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
- imm = emitGetInsSC(id);
- assert(isValidImmCondFlagsImm5(imm));
- {
- condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- code |= ((code_t) cfi.imm5 << 16); // iiiii
- code |= insEncodeFlags(cfi.flags); // nzcv
- code |= insEncodeCond(cfi.cond); // cccc
- dst += emitOutput_Instr(dst, code);
- }
- break;
+ case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+ case IF_LARGEADR:
+ assert(insOptsNone(id->idInsOpt()));
+ if (id->idIsReloc())
+ {
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ dst += emitOutput_Instr(dst, code);
+ emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_PAGEBASE_REL21);
+ }
+ else
+ {
+ // Local jmp/load case which does not need a relocation.
+ assert(id->idIsBound());
+ dst = emitOutputLJ(ig, dst, id);
+ }
+ sz = sizeof(instrDescJmp);
+ break;
- case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
- assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
- imm = emitGetInsSC(id);
- assert(isValidUimm12(imm));
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeShiftImm12(id->idInsOpt()); // sh
- code |= ((code_t) imm << 10); // iiiiiiiiiiii
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
+ case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
+ imm = emitGetInsSC(id);
+ assert(isValidImmCondFlagsImm5(imm));
+ {
+ condFlagsImm cfi;
+ cfi.immCFVal = (unsigned)imm;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ code |= ((code_t)cfi.imm5 << 16); // iiiii
+ code |= insEncodeFlags(cfi.flags); // nzcv
+ code |= insEncodeCond(cfi.cond); // cccc
+ dst += emitOutput_Instr(dst, code);
+ }
+ break;
- if (id->idIsReloc())
- {
- assert(sz == sizeof(instrDesc));
- assert(id->idAddr()->iiaAddr != nullptr);
- emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_PAGEOFFSET_12A);
- }
- break;
+ case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
+ assert(insOptsNone(id->idInsOpt()) || insOptsLSL12(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ assert(isValidUimm12(imm));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeShiftImm12(id->idInsOpt()); // sh
+ code |= ((code_t)imm << 10); // iiiiiiiiiiii
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
- case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
- code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // Reg2 also in mmmmm
- code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
- dst += emitOutput_Instr(dst, code);
- break;
+ if (id->idIsReloc())
+ {
+ assert(sz == sizeof(instrDesc));
+ assert(id->idAddr()->iiaAddr != nullptr);
+ emitRecordRelocation(odst, id->idAddr()->iiaAddr, IMAGE_REL_ARM64_PAGEOFFSET_12A);
+ }
+ break;
- case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
- imm = emitGetInsSC(id);
- assert(isValidImmNRS(imm, id->idOpSize()));
- code = emitInsCode(ins, fmt);
- code |= ((code_t) imm << 10); // Nrrrrrrssssss
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DI_2B: // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
+ code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // Reg2 also in mmmmm
+ code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
- if (ins == INS_asr || ins == INS_lsl || ins == INS_lsr)
- {
+ case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
+ assert(isValidImmNRS(imm, id->idOpSize()));
+ code = emitInsCode(ins, fmt);
+ code |= ((code_t)imm << 10); // Nrrrrrrssssss
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
+
+ case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
+ if (ins == INS_asr || ins == INS_lsl || ins == INS_lsr)
+ {
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
- // Shift immediates are aliases of the SBFM/UBFM instructions
- // that actually take 2 registers and 2 constants,
- // Since we stored the shift immediate value
- // we need to calculate the N,R and S values here.
+ // Shift immediates are aliases of the SBFM/UBFM instructions
+ // that actually take 2 registers and 2 constants,
+ // Since we stored the shift immediate value
+ // we need to calculate the N,R and S values here.
- bitMaskImm bmi;
- bmi.immNRS = 0;
+ bitMaskImm bmi;
+ bmi.immNRS = 0;
- bmi.immN = (size == EA_8BYTE) ? 1 : 0;
- bmi.immR = imm;
- bmi.immS = (size == EA_8BYTE) ? 0x3f : 0x1f;
+ bmi.immN = (size == EA_8BYTE) ? 1 : 0;
+ bmi.immR = imm;
+ bmi.immS = (size == EA_8BYTE) ? 0x3f : 0x1f;
- // immR and immS are now set correctly for INS_asr and INS_lsr
- // but for INS_lsl we have to adjust the values for immR and immS
- //
- if (ins == INS_lsl)
+ // immR and immS are now set correctly for INS_asr and INS_lsr
+ // but for INS_lsl we have to adjust the values for immR and immS
+ //
+ if (ins == INS_lsl)
+ {
+ bmi.immR = -imm & bmi.immS;
+ bmi.immS = bmi.immS - imm;
+ }
+
+ // setup imm with the proper 13 bit value N:R:S
+ //
+ imm = bmi.immNRS;
+ }
+ else
{
- bmi.immR = -imm & bmi.immS;
- bmi.immS = bmi.immS - imm;
+ // The other instructions have already have encoded N,R and S values
+ imm = emitGetInsSC(id);
}
+ assert(isValidImmNRS(imm, id->idOpSize()));
- // setup imm with the proper 13 bit value N:R:S
- //
- imm = bmi.immNRS;
- }
- else
- {
- // The other instructions have already have encoded N,R and S values
- imm = emitGetInsSC(id);
- }
- assert(isValidImmNRS(imm, id->idOpSize()));
-
- code = emitInsCode(ins, fmt);
- code |= ((code_t)imm << 10); // Nrrrrrrssssss
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ code = emitInsCode(ins, fmt);
+ code |= ((code_t)imm << 10); // Nrrrrrrssssss
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
- imm = emitGetInsSC(id);
- assert(isValidImmCond(imm));
- {
- condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeInvertedCond(cfi.cond); // cccc
- dst += emitOutput_Instr(dst, code);
- }
- break;
+ case IF_DR_1D: // DR_1D X............... cccc.......ddddd Rd cond
+ imm = emitGetInsSC(id);
+ assert(isValidImmCond(imm));
+ {
+ condFlagsImm cfi;
+ cfi.immCFVal = (unsigned)imm;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeInvertedCond(cfi.cond); // cccc
+ dst += emitOutput_Instr(dst, code);
+ }
+ break;
- case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
- assert(insOptsNone(id->idInsOpt()));
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
+ assert(insOptsNone(id->idInsOpt()));
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeShiftType(id->idInsOpt()); // sh
- code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeShiftType(id->idInsOpt()); // sh
+ code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert((imm >= 0) && (imm <= 4)); // imm [0..4]
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeExtend(id->idInsOpt()); // ooo
- code |= insEncodeExtendScale(imm); // sss
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert((imm >= 0) && (imm <= 4)); // imm [0..4]
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeExtend(id->idInsOpt()); // ooo
+ code |= insEncodeExtendScale(imm); // sss
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
- imm = emitGetInsSC(id);
- assert(isValidImmCond(imm));
- {
- condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- code |= insEncodeInvertedCond(cfi.cond); // cccc
- dst += emitOutput_Instr(dst, code);
- }
- break;
+ case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
+ imm = emitGetInsSC(id);
+ assert(isValidImmCond(imm));
+ {
+ condFlagsImm cfi;
+ cfi.immCFVal = (unsigned)imm;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ code |= insEncodeInvertedCond(cfi.cond); // cccc
+ dst += emitOutput_Instr(dst, code);
+ }
+ break;
- case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeShiftType(id->idInsOpt()); // sh
- code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeShiftType(id->idInsOpt()); // sh
+ code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2G: // DR_2G X............... .....xnnnnnddddd Rd Rn
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- if (ins == INS_rev)
- {
- if (size == EA_8BYTE)
+ case IF_DR_2G: // DR_2G X............... .....xnnnnnddddd Rd Rn
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ if (ins == INS_rev)
{
- code |= 0x00000400; // x - bit at location 10
- }
- }
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
-
- case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ if (size == EA_8BYTE)
+ {
+ code |= 0x00000400; // x - bit at location 10
+ }
+ }
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
- imm = emitGetInsSC(id);
- assert(isValidImmCondFlags(imm));
- {
- condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
- code |= insEncodeFlags(cfi.flags); // nzcv
- code |= insEncodeCond(cfi.cond); // cccc
- dst += emitOutput_Instr(dst, code);
- }
- break;
+ case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- if (id->idIsLclVar())
- {
- code |= insEncodeReg_Rm(codeGen->rsGetRsvdReg()); // mmmmm
- }
- else
- {
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- }
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
+ imm = emitGetInsSC(id);
+ assert(isValidImmCondFlags(imm));
+ {
+ condFlagsImm cfi;
+ cfi.immCFVal = (unsigned)imm;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rn(id->idReg1()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg2()); // mmmmm
+ code |= insEncodeFlags(cfi.flags); // nzcv
+ code |= insEncodeCond(cfi.cond); // cccc
+ dst += emitOutput_Instr(dst, code);
+ }
+ break;
- case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- code |= insEncodeShiftType(id->idInsOpt()); // sh
- code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ if (id->idIsLclVar())
+ {
+ code |= insEncodeReg_Rm(codeGen->rsGetRsvdReg()); // mmmmm
+ }
+ else
+ {
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ }
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert((imm >= 0) && (imm <= 4)); // imm [0..4]
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeExtend(id->idInsOpt()); // ooo
- code |= insEncodeExtendScale(imm); // sss
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ code |= insEncodeShiftType(id->idInsOpt()); // sh
+ code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
- imm = emitGetInsSC(id);
- assert(isValidImmCond(imm));
- {
- condFlagsImm cfi;
- cfi.immCFVal = (unsigned) imm;
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- code |= insEncodeCond(cfi.cond); // cccc
- dst += emitOutput_Instr(dst, code);
- }
- break;
+ case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert((imm >= 0) && (imm <= 4)); // imm [0..4]
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeExtend(id->idInsOpt()); // ooo
+ code |= insEncodeExtendScale(imm); // sss
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- assert(isValidImmShift(imm, id->idOpSize()));
- code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
+ imm = emitGetInsSC(id);
+ assert(isValidImmCond(imm));
+ {
+ condFlagsImm cfi;
+ cfi.immCFVal = (unsigned)imm;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ code |= insEncodeCond(cfi.cond); // cccc
+ dst += emitOutput_Instr(dst, code);
+ }
+ break;
- case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnmmmmm Rd Rn Rm Ra
- code = emitInsCode(ins, fmt);
- code |= insEncodeDatasize(id->idOpSize()); // X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
- code |= insEncodeReg_Ra(id->idReg4()); // aaaaa
- dst += emitOutput_Instr(dst, code);
- break;
-
- case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
- imm = emitGetInsSC(id);
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= ((code_t) imm << 13); // iiiii iii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ assert(isValidImmShift(imm, id->idOpSize()));
+ code |= insEncodeDatasizeBF(code, id->idOpSize()); // X........X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ code |= insEncodeShiftCount(imm, id->idOpSize()); // ssssss
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
- imm = emitGetInsSC(id) & 0x0ff;
- immShift = (emitGetInsSC(id) & 0x700) >> 8;
- elemsize = optGetElemsize(id->idInsOpt());
- cmode = 0;
- switch (elemsize)
- { // cmode
- case EA_1BYTE:
- cmode = 0xE; // 1110
+ case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnmmmmm Rd Rn Rm Ra
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeDatasize(id->idOpSize()); // X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Rm(id->idReg3()); // mmmmm
+ code |= insEncodeReg_Ra(id->idReg4()); // aaaaa
+ dst += emitOutput_Instr(dst, code);
break;
- case EA_2BYTE:
- cmode = 0x8;
- cmode |= (immShift << 1); // 10x0
+
+ case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
+ imm = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= ((code_t)imm << 13); // iiiii iii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ dst += emitOutput_Instr(dst, code);
break;
- case EA_4BYTE:
- if (immShift < 4)
+
+ case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
+ imm = emitGetInsSC(id) & 0x0ff;
+ immShift = (emitGetInsSC(id) & 0x700) >> 8;
+ elemsize = optGetElemsize(id->idInsOpt());
+ cmode = 0;
+ switch (elemsize)
+ { // cmode
+ case EA_1BYTE:
+ cmode = 0xE; // 1110
+ break;
+ case EA_2BYTE:
+ cmode = 0x8;
+ cmode |= (immShift << 1); // 10x0
+ break;
+ case EA_4BYTE:
+ if (immShift < 4)
+ {
+ cmode = 0x0;
+ cmode |= (immShift << 1); // 0xx0
+ }
+ else // MSL
+ {
+ cmode = 0xC;
+ if (immShift & 2)
+ cmode |= 1; // 110x
+ }
+ break;
+ case EA_8BYTE:
+ cmode = 0xE; // 1110
+ break;
+ default:
+ // TODO-Cleanup: add unreached() here
+ break;
+ }
+
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ if ((ins == INS_fmov) || (ins == INS_movi))
{
- cmode = 0x0;
- cmode |= (immShift << 1); // 0xx0
+ if (elemsize == EA_8BYTE)
+ {
+ code |= 0x20000000; // X
+ }
}
- else // MSL
+ if (ins != INS_fmov)
{
- cmode = 0xC;
- if (immShift & 2)
- cmode |= 1; // 110x
+ assert((cmode >= 0) && (cmode <= 0xF));
+ code |= (cmode << 12); // cmod
}
+ code |= (((code_t)imm >> 5) << 16); // iii
+ code |= (((code_t)imm & 0x1f) << 5); // iiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ dst += emitOutput_Instr(dst, code);
break;
- case EA_8BYTE:
- cmode = 0xE; // 1110
+
+ case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vn(id->idReg1()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
break;
- default:
- // TODO-Cleanup: add unreached() here
+
+ case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
+ elemsize = optGetElemsize(id->idInsOpt());
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
break;
- }
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- if ((ins == INS_fmov) || (ins== INS_movi))
- {
- if (elemsize == EA_8BYTE)
+ case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
+ elemsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ datasize = (elemsize == EA_8BYTE) ? EA_16BYTE : EA_8BYTE;
+ if (ins == INS_smov)
{
- code |= 0x20000000; // X
+ datasize = EA_16BYTE;
}
- }
- if (ins != INS_fmov)
- {
- assert((cmode >= 0) && (cmode <= 0xF));
- code |= (cmode << 12); // cmod
- }
- code |= (((code_t) imm >> 5) << 16); // iii
- code |= (((code_t) imm & 0x1f) << 5); // iiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- dst += emitOutput_Instr(dst, code);
- break;
-
- case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vn(id->idReg1()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
-
- case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
- elemsize = optGetElemsize(id->idInsOpt());
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(datasize); // Q
+ code |= insEncodeVectorIndex(elemsize, index); // iiiii
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
- elemsize = id->idOpSize();
- index = emitGetInsSC(id);
- datasize = (elemsize == EA_8BYTE) ? EA_16BYTE : EA_8BYTE;
- if (ins == INS_smov)
- {
- datasize = EA_16BYTE;
- }
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(datasize); // Q
- code |= insEncodeVectorIndex(elemsize, index); // iiiii
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
+ if (ins == INS_dup)
+ {
+ datasize = id->idOpSize();
+ elemsize = optGetElemsize(id->idInsOpt());
+ index = 0;
+ }
+ else // INS_ins
+ {
+ datasize = EA_16BYTE;
+ elemsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ }
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(datasize); // Q
+ code |= insEncodeVectorIndex(elemsize, index); // iiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
- if (ins == INS_dup)
- {
- datasize = id->idOpSize();
- elemsize = optGetElemsize(id->idInsOpt());
- index = 0;
- }
- else // INS_ins
- {
- datasize = EA_16BYTE;
- elemsize = id->idOpSize();
+ case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
index = emitGetInsSC(id);
- }
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(datasize); // Q
- code |= insEncodeVectorIndex(elemsize, index); // iiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ elemsize = optGetElemsize(id->idInsOpt());
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeVectorIndex(elemsize, index); // iiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
- index = emitGetInsSC(id);
- elemsize = optGetElemsize(id->idInsOpt());
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeVectorIndex(elemsize, index); // iiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
+ index = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorIndex(elemsize, index); // iiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
- index = emitGetInsSC(id);
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorIndex(elemsize, index); // iiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
+ elemsize = id->idOpSize();
+ imm = emitGetInsSC(id);
+ index = (imm >> 4) & 0xf;
+ index2 = imm & 0xf;
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorIndex(elemsize, index); // iiiii
+ code |= insEncodeVectorIndex2(elemsize, index2); // jjjj
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
- elemsize = id->idOpSize();
- imm = emitGetInsSC(id);
- index = (imm >> 4) & 0xf;
- index2 = imm & 0xf;
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorIndex(elemsize, index); // iiiii
- code |= insEncodeVectorIndex2(elemsize, index2); // jjjj
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov,fcvtXX - register)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov,fcvtXX - register)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov - to general)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // X X
+ code |= insEncodeReg_Rd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov - to general)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // X X
- code |= insEncodeReg_Rd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov - from general)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // X X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov - from general)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // X X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Rn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // SS DD
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
- code = emitInsCode(ins, fmt);
- code |= insEncodeConvertOpt(fmt, id->idInsOpt()); // SS DD
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vn(id->idReg1()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg2()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vn(id->idReg1()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg2()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
+ elemsize = id->idOpSize();
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeElemsize(elemsize); // XX
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
- elemsize = id->idOpSize();
- code = emitInsCode(ins, fmt);
- code |= insEncodeElemsize(elemsize); // XX
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+ elemsize = optGetElemsize(id->idInsOpt());
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeElemsize(elemsize); // XX
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
- elemsize = optGetElemsize(id->idInsOpt());
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeElemsize(elemsize); // XX
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
-
- case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
- imm = emitGetInsSC(id);
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorShift(EA_8BYTE, imm); // iiiiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
+ imm = emitGetInsSC(id);
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorShift(EA_8BYTE, imm); // iiiiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
- imm = emitGetInsSC(id);
- elemsize = optGetElemsize(id->idInsOpt());
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeVectorShift(elemsize, imm); // iiiiiii
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
+ imm = emitGetInsSC(id);
+ elemsize = optGetElemsize(id->idInsOpt());
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeVectorShift(elemsize, imm); // iiiiiii
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- code = emitInsCode(ins, fmt);
- elemsize = optGetElemsize(id->idInsOpt());
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeElemsize(elemsize); // XX
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ code = emitInsCode(ins, fmt);
+ elemsize = optGetElemsize(id->idInsOpt());
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeElemsize(elemsize); // XX
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- elemsize = optGetElemsize(id->idInsOpt());
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeElemsize(elemsize); // XX
- code |= insEncodeVectorIndexLMH(elemsize, imm); // LM H
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ elemsize = optGetElemsize(id->idInsOpt());
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeElemsize(elemsize); // XX
+ code |= insEncodeVectorIndexLMH(elemsize, imm); // LM H
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- code = emitInsCode(ins, fmt);
- elemsize = optGetElemsize(id->idInsOpt());
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3B: // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ code = emitInsCode(ins, fmt);
+ elemsize = optGetElemsize(id->idInsOpt());
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- elemsize = optGetElemsize(id->idInsOpt());
- assert(isValidVectorIndex(id->idOpSize(), elemsize, imm));
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeFloatIndex(elemsize, imm); // L H
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3BI: // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ elemsize = optGetElemsize(id->idInsOpt());
+ assert(isValidVectorIndex(id->idOpSize(), elemsize, imm));
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeFloatIndex(elemsize, imm); // L H
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- code = emitInsCode(ins, fmt);
- code |= insEncodeVectorsize(id->idOpSize()); // Q
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeVectorsize(id->idOpSize()); // Q
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- code = emitInsCode(ins, fmt);
- code |= insEncodeFloatElemsize(id->idOpSize()); // X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeFloatElemsize(id->idOpSize()); // X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
- code = emitInsCode(ins, fmt);
- imm = emitGetInsSC(id);
- elemsize = id->idOpSize();
- assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeFloatIndex(elemsize, imm); // L H
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
+ code = emitInsCode(ins, fmt);
+ imm = emitGetInsSC(id);
+ elemsize = id->idOpSize();
+ assert(isValidVectorIndex(EA_16BYTE, elemsize, imm));
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeFloatIndex(elemsize, imm); // L H
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- code = emitInsCode(ins, fmt);
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ code = emitInsCode(ins, fmt);
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
- code = emitInsCode(ins, fmt);
- elemsize = id->idOpSize();
- code |= insEncodeFloatElemsize(elemsize); // X
- code |= insEncodeReg_Vd(id->idReg1()); // ddddd
- code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
- code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
- code |= insEncodeReg_Va(id->idReg4()); // aaaaa
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
+ code = emitInsCode(ins, fmt);
+ elemsize = id->idOpSize();
+ code |= insEncodeFloatElemsize(elemsize); // X
+ code |= insEncodeReg_Vd(id->idReg1()); // ddddd
+ code |= insEncodeReg_Vn(id->idReg2()); // nnnnn
+ code |= insEncodeReg_Vm(id->idReg3()); // mmmmm
+ code |= insEncodeReg_Va(id->idReg4()); // aaaaa
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_SN_0A: // SN_0A ................ ................
- code = emitInsCode(ins, fmt);
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_SN_0A: // SN_0A ................ ................
+ code = emitInsCode(ins, fmt);
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
- imm = emitGetInsSC(id);
- assert(isValidUimm16(imm));
- code = emitInsCode(ins, fmt);
- code |= ((code_t) imm << 5); // iiiii iiiiiiiiiii
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
+ imm = emitGetInsSC(id);
+ assert(isValidUimm16(imm));
+ code = emitInsCode(ins, fmt);
+ code |= ((code_t)imm << 5); // iiiii iiiiiiiiiii
+ dst += emitOutput_Instr(dst, code);
+ break;
- case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
- imm = emitGetInsSC(id);
- assert((imm >= 0) && (imm <= 15));
- code = emitInsCode(ins, fmt);
- code |= ((code_t) imm << 8); // bbbb
- dst += emitOutput_Instr(dst, code);
- break;
+ case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
+ imm = emitGetInsSC(id);
+ assert((imm >= 0) && (imm <= 15));
+ code = emitInsCode(ins, fmt);
+ code |= ((code_t)imm << 8); // bbbb
+ dst += emitOutput_Instr(dst, code);
+ break;
- default:
- assert(!"Unexpected format");
- break;
+ default:
+ assert(!"Unexpected format");
+ break;
}
// Determine if any registers now hold GC refs, or whether a register that was overwritten held a GC ref.
// We assume here that "id->idGCref()" is not GC_NONE only if the instruction described by "id" writes a
// GC ref to register "id->idReg1()". (It may, apparently, also not be GC_NONE in other cases, such as
// for stores, but we ignore those cases here.)
- if (emitInsMayWriteToGCReg(id)) // True if "id->idIns()" writes to a register than can hold GC ref.
+ if (emitInsMayWriteToGCReg(id)) // True if "id->idIns()" writes to a register than can hold GC ref.
{
// If we ever generate instructions that write to multiple registers,
// then we'd need to more work here to ensure that changes in the status of GC refs are
// tracked properly.
if (emitInsMayWriteMultipleRegs(id))
{
- // INS_ldp etc...
- // We assume that "idReg1" and "idReg2" are the destination register for all instructions
+ // INS_ldp etc...
+ // We assume that "idReg1" and "idReg2" are the destination register for all instructions
emitGCregDeadUpd(id->idReg1(), dst);
emitGCregDeadUpd(id->idReg2(), dst);
}
else
{
- // We assume that "idReg1" is the destination register for all instructions
+ // We assume that "idReg1" is the destination register for all instructions
if (id->idGCref() != GCT_NONE)
{
emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
@@ -9541,10 +9360,10 @@ size_t emitter::emitOutputInstr(insGroup *ig,
// ref or overwritten one.
if (emitInsWritesToLclVarStackLoc(id))
{
- int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
+ int varNum = id->idAddr()->iiaLclVar.lvaVarNum();
unsigned ofs = AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), sizeof(size_t));
- bool FPbased;
- int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
+ bool FPbased;
+ int adr = emitComp->lvaFrameAddress(varNum, &FPbased);
if (id->idGCref() != GCT_NONE)
{
emitGCvarLiveUpd(adr + ofs, varNum, id->idGCref(), dst);
@@ -9561,22 +9380,22 @@ size_t emitter::emitOutputInstr(insGroup *ig,
else
{
TempDsc* tmpDsc = emitComp->tmpFindNum(varNum);
- vt = tmpDsc->tdTempType();
+ vt = tmpDsc->tdTempType();
}
if (vt == TYP_REF || vt == TYP_BYREF)
emitGCvarDeadUpd(adr + ofs, dst);
}
}
-#ifdef DEBUG
+#ifdef DEBUG
/* Make sure we set the instruction descriptor size correctly */
size_t expected = emitSizeOfInsDsc(id);
assert(sz == expected);
- if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
+ if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
{
- emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(odst), *dp, (dst-*dp), ig);
+ emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(odst), *dp, (dst - *dp), ig);
}
if (emitComp->compDebugBreak)
@@ -9596,10 +9415,9 @@ size_t emitter::emitOutputInstr(insGroup *ig,
*dp = dst;
- return sz;
+ return sz;
}
-
/*****************************************************************************/
/*****************************************************************************/
@@ -9609,10 +9427,10 @@ size_t emitter::emitOutputInstr(insGroup *ig,
*
* Display the instruction name
*/
-void emitter::emitDispInst(instruction ins)
+void emitter::emitDispInst(instruction ins)
{
- const char * insstr = codeGen->genInsName(ins);
- size_t len = strlen(insstr);
+ const char* insstr = codeGen->genInsName(ins);
+ size_t len = strlen(insstr);
/* Display the instruction name */
@@ -9621,11 +9439,11 @@ void emitter::emitDispInst(instruction ins)
//
// Add at least one space after the instruction name
// and add spaces until we have reach the normal size of 8
- do {
- printf(" ");
- len++;
- }
- while (len < 8);
+ do
+ {
+ printf(" ");
+ len++;
+ } while (len < 8);
}
/*****************************************************************************
@@ -9634,7 +9452,7 @@ void emitter::emitDispInst(instruction ins)
* If we are formatting for an assembly listing don't print the hex value
* since it will prevent us from doing assembly diffs
*/
-void emitter::emitDispReloc(int value, bool addComma)
+void emitter::emitDispReloc(int value, bool addComma)
{
if (emitComp->opts.disAsm)
{
@@ -9649,12 +9467,11 @@ void emitter::emitDispReloc(int value, bool addComma)
printf(", ");
}
-
/*****************************************************************************
*
* Display an immediate value
*/
-void emitter::emitDispImm(ssize_t imm, bool addComma, bool alwaysHex /* =false */)
+void emitter::emitDispImm(ssize_t imm, bool addComma, bool alwaysHex /* =false */)
{
if (strictArmAsm)
{
@@ -9673,7 +9490,7 @@ void emitter::emitDispImm(ssize_t imm, bool addComma, bool always
{
printf("%d", imm);
}
- else
+ else
{
if ((imm < 0) && ((imm & 0xFFFFFFFF00000000LL) == 0xFFFFFFFF00000000LL))
{
@@ -9699,7 +9516,7 @@ void emitter::emitDispImm(ssize_t imm, bool addComma, bool always
*
* Display a float zero constant
*/
-void emitter::emitDispFloatZero()
+void emitter::emitDispFloatZero()
{
if (strictArmAsm)
{
@@ -9712,7 +9529,7 @@ void emitter::emitDispFloatZero()
*
* Display an encoded float constant value
*/
-void emitter::emitDispFloatImm(ssize_t imm8)
+void emitter::emitDispFloatImm(ssize_t imm8)
{
assert((0 <= imm8) && (imm8 <= 0x0ff));
if (strictArmAsm)
@@ -9721,8 +9538,8 @@ void emitter::emitDispFloatImm(ssize_t imm8)
}
floatImm8 fpImm;
- fpImm.immFPIVal = (unsigned) imm8;
- double result = emitDecodeFloatImm8(fpImm);
+ fpImm.immFPIVal = (unsigned)imm8;
+ double result = emitDecodeFloatImm8(fpImm);
printf("%.4f", result);
}
@@ -9731,7 +9548,7 @@ void emitter::emitDispFloatImm(ssize_t imm8)
*
* Display an immediate that is optionally LSL12.
*/
-void emitter::emitDispImmOptsLSL12(ssize_t imm, insOpts opt)
+void emitter::emitDispImmOptsLSL12(ssize_t imm, insOpts opt)
{
if (!strictArmAsm && insOptsLSL12(opt))
{
@@ -9748,13 +9565,11 @@ void emitter::emitDispImmOptsLSL12(ssize_t imm, insOpts opt)
*
* Display an ARM64 condition code for the conditional instructions
*/
-void emitter::emitDispCond(insCond cond)
+void emitter::emitDispCond(insCond cond)
{
- const static char* armCond[16] = { "eq", "ne", "hs", "lo",
- "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt",
- "gt", "le", "AL", "NV" }; // The last two are invalid
- unsigned imm = (unsigned) cond;
+ const static char* armCond[16] = {"eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "AL", "NV"}; // The last two are invalid
+ unsigned imm = (unsigned)cond;
assert((0 <= imm) && (imm < ArrLen(armCond)));
printf(armCond[imm]);
}
@@ -9763,13 +9578,11 @@ void emitter::emitDispCond(insCond cond)
*
* Display an ARM64 flags for the conditional instructions
*/
-void emitter::emitDispFlags(insCflags flags)
+void emitter::emitDispFlags(insCflags flags)
{
- const static char* armFlags[16] = { "0", "v", "c", "cv",
- "z", "zv", "zc", "zcv",
- "n", "nv", "nc", "ncv",
- "nz", "nzv", "nzc", "nzcv" };
- unsigned imm = (unsigned) flags;
+ const static char* armFlags[16] = {"0", "v", "c", "cv", "z", "zv", "zc", "zcv",
+ "n", "nv", "nc", "ncv", "nz", "nzv", "nzc", "nzcv"};
+ unsigned imm = (unsigned)flags;
assert((0 <= imm) && (imm < ArrLen(armFlags)));
printf(armFlags[imm]);
}
@@ -9778,13 +9591,11 @@ void emitter::emitDispFlags(insCflags flags)
*
* Display an ARM64 'barrier' for the memory barrier instructions
*/
-void emitter::emitDispBarrier (insBarrier barrier)
+void emitter::emitDispBarrier(insBarrier barrier)
{
- const static char* armBarriers[16] = { "#0", "oshld", "oshst", "osh",
- "#4", "nshld", "nshst", "nsh",
- "#8", "ishld", "ishst", "ish",
- "#12", "ld", "st", "sy" };
- unsigned imm = (unsigned) barrier;
+ const static char* armBarriers[16] = {"#0", "oshld", "oshst", "osh", "#4", "nshld", "nshst", "nsh",
+ "#8", "ishld", "ishst", "ish", "#12", "ld", "st", "sy"};
+ unsigned imm = (unsigned)barrier;
assert((0 <= imm) && (imm < ArrLen(armBarriers)));
printf(armBarriers[imm]);
}
@@ -9794,7 +9605,7 @@ void emitter::emitDispBarrier (insBarrier barrier)
* Prints the encoding for the Shift Type encoding
*/
-void emitter::emitDispShiftOpts(insOpts opt)
+void emitter::emitDispShiftOpts(insOpts opt)
{
if (opt == INS_OPTS_LSL)
printf(" LSL ");
@@ -9815,7 +9626,7 @@ void emitter::emitDispShiftOpts(insOpts opt)
* Prints the encoding for the Extend Type encoding
*/
-void emitter::emitDispExtendOpts(insOpts opt)
+void emitter::emitDispExtendOpts(insOpts opt)
{
if (opt == INS_OPTS_UXTB)
printf("UXTB");
@@ -9842,7 +9653,7 @@ void emitter::emitDispExtendOpts(insOpts opt)
* Prints the encoding for the Extend Type encoding in loads/stores
*/
-void emitter::emitDispLSExtendOpts(insOpts opt)
+void emitter::emitDispLSExtendOpts(insOpts opt)
{
if (opt == INS_OPTS_LSL)
printf("LSL");
@@ -9862,7 +9673,7 @@ void emitter::emitDispLSExtendOpts(insOpts opt)
*
* Display a register
*/
-void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addComma)
+void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addComma)
{
emitAttr size = EA_SIZE(attr);
printf(emitRegName(reg, size));
@@ -9875,7 +9686,7 @@ void emitter::emitDispReg(regNumber reg, emitAttr attr, bool addC
*
* Display a vector register with an arrangement suffix
*/
-void emitter::emitDispVectorReg(regNumber reg, insOpts opt, bool addComma)
+void emitter::emitDispVectorReg(regNumber reg, insOpts opt, bool addComma)
{
assert(isVectorRegister(reg));
printf(emitVectorRegName(reg));
@@ -9889,28 +9700,28 @@ void emitter::emitDispVectorReg(regNumber reg, insOpts opt, bool
*
* Display an vector register index suffix
*/
-void emitter::emitDispVectorRegIndex(regNumber reg, emitAttr elemsize, ssize_t index, bool addComma)
+void emitter::emitDispVectorRegIndex(regNumber reg, emitAttr elemsize, ssize_t index, bool addComma)
{
assert(isVectorRegister(reg));
printf(emitVectorRegName(reg));
switch (elemsize)
{
- case EA_1BYTE:
- printf(".b");
- break;
- case EA_2BYTE:
- printf(".h");
- break;
- case EA_4BYTE:
- printf(".s");
- break;
- case EA_8BYTE:
- printf(".d");
- break;
- default:
- assert(!"invalid elemsize");
- break;
+ case EA_1BYTE:
+ printf(".b");
+ break;
+ case EA_2BYTE:
+ printf(".h");
+ break;
+ case EA_4BYTE:
+ printf(".s");
+ break;
+ case EA_8BYTE:
+ printf(".d");
+ break;
+ default:
+ assert(!"invalid elemsize");
+ break;
}
printf("[%d]", index);
@@ -9923,39 +9734,39 @@ void emitter::emitDispVectorRegIndex(regNumber reg, emitAttr elem
*
* Display an arrangement suffix
*/
-void emitter::emitDispArrangement(insOpts opt)
+void emitter::emitDispArrangement(insOpts opt)
{
- const char * str = "???";
+ const char* str = "???";
switch (opt)
{
- case INS_OPTS_8B:
- str = "8b";
- break;
- case INS_OPTS_16B:
- str = "16b";
- break;
- case INS_OPTS_4H:
- str = "4h";
- break;
- case INS_OPTS_8H:
- str = "8h";
- break;
- case INS_OPTS_2S:
- str = "2s";
- break;
- case INS_OPTS_4S:
- str = "4s";
- break;
- case INS_OPTS_1D:
- str = "1d";
- break;
- case INS_OPTS_2D:
- str = "2d";
- break;
+ case INS_OPTS_8B:
+ str = "8b";
+ break;
+ case INS_OPTS_16B:
+ str = "16b";
+ break;
+ case INS_OPTS_4H:
+ str = "4h";
+ break;
+ case INS_OPTS_8H:
+ str = "8h";
+ break;
+ case INS_OPTS_2S:
+ str = "2s";
+ break;
+ case INS_OPTS_4S:
+ str = "4s";
+ break;
+ case INS_OPTS_1D:
+ str = "1d";
+ break;
+ case INS_OPTS_2D:
+ str = "2d";
+ break;
- default:
- assert(!"Invalid insOpt for vector register");
+ default:
+ assert(!"Invalid insOpt for vector register");
}
printf(".");
printf(str);
@@ -9963,9 +9774,9 @@ void emitter::emitDispArrangement(insOpts opt)
/*****************************************************************************
*
- * Display a register with an optional shift operation
+ * Display a register with an optional shift operation
*/
-void emitter::emitDispShiftedReg(regNumber reg, insOpts opt, ssize_t imm, emitAttr attr)
+void emitter::emitDispShiftedReg(regNumber reg, insOpts opt, ssize_t imm, emitAttr attr)
{
emitAttr size = EA_SIZE(attr);
assert((imm & 0x003F) == imm);
@@ -9981,23 +9792,23 @@ void emitter::emitDispShiftedReg(regNumber reg, insOpts opt, ssiz
}
emitDispShiftOpts(opt);
emitDispImm(imm, false);
- }
+ }
}
/*****************************************************************************
*
* Display a register with an optional extend and scale operations
*/
-void emitter::emitDispExtendReg(regNumber reg, insOpts opt, ssize_t imm)
+void emitter::emitDispExtendReg(regNumber reg, insOpts opt, ssize_t imm)
{
assert((imm >= 0) && (imm <= 4));
assert(insOptsNone(opt) || insOptsAnyExtend(opt) || (opt == INS_OPTS_LSL));
- // size is based on the extend option, not the instr size.
+ // size is based on the extend option, not the instr size.
emitAttr size = insOpts32BitExtend(opt) ? EA_4BYTE : EA_8BYTE;
if (strictArmAsm)
- {
+ {
if (insOptsNone(opt))
{
emitDispReg(reg, size, false);
@@ -10016,7 +9827,7 @@ void emitter::emitDispExtendReg(regNumber reg, insOpts opt, ssize
}
}
}
- else // !strictArmAsm
+ else // !strictArmAsm
{
if (insOptsNone(opt))
{
@@ -10035,33 +9846,32 @@ void emitter::emitDispExtendReg(regNumber reg, insOpts opt, ssize
if (imm > 0)
{
printf("*");
- emitDispImm(1<<imm, false);
+ emitDispImm(1 << imm, false);
}
}
}
-
/*****************************************************************************
*
* Display an addressing operand [reg + imm]
*/
-void emitter::emitDispAddrRI(regNumber reg, insOpts opt, ssize_t imm)
+void emitter::emitDispAddrRI(regNumber reg, insOpts opt, ssize_t imm)
{
- reg = encodingZRtoSP(reg); // ZR (R31) encodes the SP register
+ reg = encodingZRtoSP(reg); // ZR (R31) encodes the SP register
if (strictArmAsm)
{
printf("[");
-
+
emitDispReg(reg, EA_8BYTE, false);
-
+
if (!insOptsPostIndex(opt) && (imm != 0))
{
printf(",");
emitDispImm(imm, false);
}
printf("]");
-
+
if (insOptsPreIndex(opt))
{
printf("!");
@@ -10072,15 +9882,15 @@ void emitter::emitDispAddrRI(regNumber reg, insOpts opt, ssize_t
emitDispImm(imm, false);
}
}
- else // !strictArmAsm
+ else // !strictArmAsm
{
printf("[");
-
- const char* operStr = "++";
+
+ const char* operStr = "++";
if (imm < 0)
{
- operStr = "--";
- imm = -imm;
+ operStr = "--";
+ imm = -imm;
}
if (insOptsPreIndex(opt))
@@ -10094,7 +9904,7 @@ void emitter::emitDispAddrRI(regNumber reg, insOpts opt, ssize_t
{
printf(operStr);
}
-
+
if (insOptsIndexed(opt))
{
printf(", ");
@@ -10105,20 +9915,16 @@ void emitter::emitDispAddrRI(regNumber reg, insOpts opt, ssize_t
}
emitDispImm(imm, false);
printf("]");
- }
+ }
}
/*****************************************************************************
*
* Display an addressing operand [reg + extended reg]
*/
-void emitter::emitDispAddrRRExt(regNumber reg1,
- regNumber reg2,
- insOpts opt,
- bool isScaled,
- emitAttr size)
+void emitter::emitDispAddrRRExt(regNumber reg1, regNumber reg2, insOpts opt, bool isScaled, emitAttr size)
{
- reg1 = encodingZRtoSP(reg1); // ZR (R31) encodes the SP register
+ reg1 = encodingZRtoSP(reg1); // ZR (R31) encodes the SP register
unsigned scale = 0;
if (isScaled)
@@ -10133,7 +9939,7 @@ void emitter::emitDispAddrRRExt(regNumber reg1,
emitDispReg(reg1, EA_8BYTE, true);
emitDispExtendReg(reg2, opt, scale);
}
- else // !strictArmAsm
+ else // !strictArmAsm
{
emitDispReg(reg1, EA_8BYTE, false);
printf("+");
@@ -10145,17 +9951,17 @@ void emitter::emitDispAddrRRExt(regNumber reg1,
/*****************************************************************************
*
- * Display (optionally) the instruction encoding in hex
+ * Display (optionally) the instruction encoding in hex
*/
-void emitter::emitDispInsHex(BYTE * code, size_t sz)
+void emitter::emitDispInsHex(BYTE* code, size_t sz)
{
// We do not display the instruction hex if we want diff-able disassembly
if (!emitComp->opts.disDiffable)
{
if (sz == 4)
{
- printf(" %08X ", (*((code_t *) code)));
+ printf(" %08X ", (*((code_t*)code)));
}
else
{
@@ -10169,18 +9975,13 @@ void emitter::emitDispInsHex(BYTE * code, size_t sz)
* Display the given instruction.
*/
-void emitter::emitDispIns(instrDesc * id,
- bool isNew,
- bool doffs,
- bool asmfm,
- unsigned offset,
- BYTE * pCode,
- size_t sz,
- insGroup * ig)
+void emitter::emitDispIns(
+ instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* pCode, size_t sz, insGroup* ig)
{
if (EMITVERBOSE)
{
- unsigned idNum = id->idDebugOnlyInfo()->idNum; // Do not remove this! It is needed for VisualStudio conditional breakpoints
+ unsigned idNum =
+ id->idDebugOnlyInfo()->idNum; // Do not remove this! It is needed for VisualStudio conditional breakpoints
printf("IN%04x: ", idNum);
}
@@ -10188,7 +9989,7 @@ void emitter::emitDispIns(instrDesc * id,
if (pCode == NULL)
sz = 0;
- if (!emitComp->opts.dspEmit && !isNew && !asmfm && sz)
+ if (!emitComp->opts.dspEmit && !isNew && !asmfm && sz)
doffs = true;
/* Display the instruction offset */
@@ -10203,8 +10004,8 @@ void emitter::emitDispIns(instrDesc * id,
/* Get the instruction and format */
- instruction ins = id->idIns();
- insFormat fmt = id->idInsFmt();
+ instruction ins = id->idIns();
+ insFormat fmt = id->idInsFmt();
emitDispInst(ins);
@@ -10213,38 +10014,38 @@ void emitter::emitDispIns(instrDesc * id,
assert(isNew == false || (int)emitSizeOfInsDsc(id) == emitCurIGfreeNext - (BYTE*)id);
/* Figure out the operand size */
- emitAttr size = id->idOpSize();
- emitAttr attr = size;
- if (id->idGCref() == GCT_GCREF)
+ emitAttr size = id->idOpSize();
+ emitAttr attr = size;
+ if (id->idGCref() == GCT_GCREF)
attr = EA_GCREF;
- else if (id->idGCref() == GCT_BYREF)
+ else if (id->idGCref() == GCT_BYREF)
attr = EA_BYREF;
switch (fmt)
{
- code_t code;
- ssize_t imm;
- int doffs;
- bool isExtendAlias;
- bool canEncode;
- bitMaskImm bmi;
- halfwordImm hwi;
- condFlagsImm cfi;
- unsigned scale;
- unsigned immShift;
- bool hasShift;
- ssize_t offs;
- const char * methodName;
- emitAttr elemsize;
- emitAttr datasize;
- emitAttr srcsize;
- emitAttr dstsize;
- ssize_t index;
- ssize_t index2;
-
- case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00
- case IF_LARGEJMP:
+ code_t code;
+ ssize_t imm;
+ int doffs;
+ bool isExtendAlias;
+ bool canEncode;
+ bitMaskImm bmi;
+ halfwordImm hwi;
+ condFlagsImm cfi;
+ unsigned scale;
+ unsigned immShift;
+ bool hasShift;
+ ssize_t offs;
+ const char* methodName;
+ emitAttr elemsize;
+ emitAttr datasize;
+ emitAttr srcsize;
+ emitAttr dstsize;
+ ssize_t index;
+ ssize_t index2;
+
+ case IF_BI_0A: // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ case IF_BI_0B: // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00
+ case IF_LARGEJMP:
{
if (fmt == IF_LARGEJMP)
{
@@ -10256,18 +10057,18 @@ void emitter::emitDispIns(instrDesc * id,
if (ig == nullptr)
{
- printf("pc%s%d instructions", (instrCount >= 0) ? "+" : "" , instrCount);
+ printf("pc%s%d instructions", (instrCount >= 0) ? "+" : "", instrCount);
}
else
{
- unsigned insNum = emitFindInsNum(ig, id);
+ unsigned insNum = emitFindInsNum(ig, id);
UNATIVE_OFFSET srcOffs = ig->igOffs + emitFindOffset(ig, insNum + 1);
UNATIVE_OFFSET dstOffs = ig->igOffs + emitFindOffset(ig, insNum + 1 + instrCount);
- ssize_t relOffs = (ssize_t) (emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
- printf("pc%s%d (%d instructions)", (relOffs >= 0) ? "+" : "" , relOffs, instrCount);
+ ssize_t relOffs = (ssize_t)(emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
+ printf("pc%s%d (%d instructions)", (relOffs >= 0) ? "+" : "", relOffs, instrCount);
}
}
- else if (id->idIsBound())
+ else if (id->idIsBound())
{
printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
}
@@ -10278,657 +10079,656 @@ void emitter::emitDispIns(instrDesc * id,
}
break;
- case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
- if (id->idIsCallAddr())
- {
- offs = (ssize_t)id->idAddr()->iiaAddr;
- methodName = "";
- }
- else
- {
- offs = 0;
- methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
- }
+ case IF_BI_0C: // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00
+ if (id->idIsCallAddr())
+ {
+ offs = (ssize_t)id->idAddr()->iiaAddr;
+ methodName = "";
+ }
+ else
+ {
+ offs = 0;
+ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
+ }
- if (offs)
- {
- if (id->idIsDspReloc())
- printf("reloc ");
- printf("%08X", offs);
- }
- else
- {
- printf("%s", methodName);
- }
- break;
+ if (offs)
+ {
+ if (id->idIsDspReloc())
+ printf("reloc ");
+ printf("%08X", offs);
+ }
+ else
+ {
+ printf("%s", methodName);
+ }
+ break;
- case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
- assert(insOptsNone(id->idInsOpt()));
- emitDispReg(id->idReg1(), size, true);
- if (id->idIsBound())
- {
- printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
- }
- else
- {
- printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
- }
- break;
+ case IF_BI_1A: // BI_1A ......iiiiiiiiii iiiiiiiiiiittttt Rt simm19:00
+ assert(insOptsNone(id->idInsOpt()));
+ emitDispReg(id->idReg1(), size, true);
+ if (id->idIsBound())
+ {
+ printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ }
+ else
+ {
+ printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ }
+ break;
- case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
- assert(insOptsNone(id->idInsOpt()));
- emitDispReg(id->idReg1(), size, true);
- emitDispImm(emitGetInsSC(id), true);
- if (id->idIsBound())
- {
- printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
- }
- else
- {
- printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
- }
- break;
+ case IF_BI_1B: // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6, simm14:00
+ assert(insOptsNone(id->idInsOpt()));
+ emitDispReg(id->idReg1(), size, true);
+ emitDispImm(emitGetInsSC(id), true);
+ if (id->idIsBound())
+ {
+ printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ }
+ else
+ {
+ printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ }
+ break;
- case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
- assert(insOptsNone(id->idInsOpt()));
- emitDispReg(id->idReg1(), size, false);
- break;
+ case IF_BR_1A: // BR_1A ................ ......nnnnn..... Rn
+ assert(insOptsNone(id->idInsOpt()));
+ emitDispReg(id->idReg1(), size, false);
+ break;
- case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
- assert(insOptsNone(id->idInsOpt()));
- emitDispReg(id->idReg3(), size, false);
- break;
+ case IF_BR_1B: // BR_1B ................ ......nnnnn..... Rn
+ assert(insOptsNone(id->idInsOpt()));
+ emitDispReg(id->idReg3(), size, false);
+ break;
- case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
- case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
- case IF_LARGELDC:
- case IF_LARGEADR:
- assert(insOptsNone(id->idInsOpt()));
- emitDispReg(id->idReg1(), size, true);
- imm = emitGetInsSC(id);
+ case IF_LS_1A: // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
+ case IF_DI_1E: // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+ case IF_LARGELDC:
+ case IF_LARGEADR:
+ assert(insOptsNone(id->idInsOpt()));
+ emitDispReg(id->idReg1(), size, true);
+ imm = emitGetInsSC(id);
- /* Is this actually a reference to a data section? */
- if (fmt == IF_LARGEADR)
- {
- printf("(LARGEADR)");
- }
- else if (fmt == IF_LARGELDC)
- {
- printf("(LARGELDC)");
- }
+ /* Is this actually a reference to a data section? */
+ if (fmt == IF_LARGEADR)
+ {
+ printf("(LARGEADR)");
+ }
+ else if (fmt == IF_LARGELDC)
+ {
+ printf("(LARGELDC)");
+ }
- printf("[");
- if (id->idAddr()->iiaIsJitDataOffset())
- {
- doffs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd);
- /* Display a data section reference */
+ printf("[");
+ if (id->idAddr()->iiaIsJitDataOffset())
+ {
+ doffs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd);
+ /* Display a data section reference */
- if (doffs & 1)
- printf("@CNS%02u", doffs - 1);
- else
- printf("@RWD%02u", doffs);
+ if (doffs & 1)
+ printf("@CNS%02u", doffs - 1);
+ else
+ printf("@RWD%02u", doffs);
- if (imm != 0)
- printf("%+Id", imm);
- }
- else
- {
- assert(imm == 0);
- if (id->idIsReloc())
+ if (imm != 0)
+ printf("%+Id", imm);
+ }
+ else
{
- printf("RELOC ");
- emitDispImm((ssize_t)id->idAddr()->iiaAddr, false);
+ assert(imm == 0);
+ if (id->idIsReloc())
+ {
+ printf("RELOC ");
+ emitDispImm((ssize_t)id->idAddr()->iiaAddr, false);
+ }
+ else if (id->idIsBound())
+ {
+ printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ }
+ else
+ {
+ printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ }
}
- else if (id->idIsBound())
+ printf("]");
+ break;
+
+ case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
+ assert(insOptsNone(id->idInsOpt()));
+ assert(emitGetInsSC(id) == 0);
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ emitDispAddrRI(id->idReg2(), id->idInsOpt(), 0);
+ break;
+
+ case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
+ assert(insOptsNone(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ scale = NaturalScale_helper(emitInsLoadStoreSize(id));
+ imm <<= scale; // The immediate is scaled by the size of the ld/st
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ emitDispAddrRI(id->idReg2(), id->idInsOpt(), imm);
+ break;
+
+ case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ emitDispAddrRI(id->idReg2(), id->idInsOpt(), imm);
+ break;
+
+ case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
+ assert(insOptsLSExtend(id->idInsOpt()));
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ if (id->idIsLclVar())
{
- printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ emitDispAddrRRExt(id->idReg2(), codeGen->rsGetRsvdReg(), id->idInsOpt(), false, size);
}
else
{
- printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ emitDispAddrRRExt(id->idReg2(), id->idReg3(), id->idInsOpt(), id->idReg3Scaled(), size);
}
- }
- printf("]");
- break;
-
- case IF_LS_2A: // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
- assert(insOptsNone(id->idInsOpt()));
- assert(emitGetInsSC(id) == 0);
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- emitDispAddrRI(id->idReg2(), id->idInsOpt(), 0);
- break;
+ break;
- case IF_LS_2B: // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
- assert(insOptsNone(id->idInsOpt()));
- imm = emitGetInsSC(id);
- scale = NaturalScale_helper(emitInsLoadStoreSize(id));
- imm <<= scale; // The immediate is scaled by the size of the ld/st
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- emitDispAddrRI(id->idReg2(), id->idInsOpt(), imm);
- break;
+ case IF_LS_3B: // LS_3B X............... .aaaaannnnnddddd Rt Ra Rn
+ assert(insOptsNone(id->idInsOpt()));
+ assert(emitGetInsSC(id) == 0);
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ emitDispReg(id->idReg2(), emitInsTargetRegSize(id), true);
+ emitDispAddrRI(id->idReg3(), id->idInsOpt(), 0);
+ break;
- case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiPPnnnnnttttt Rt Rn imm(-256..+255) no/pre/post inc
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- imm = emitGetInsSC(id);
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- emitDispAddrRI(id->idReg2(), id->idInsOpt(), imm);
- break;
+ case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnddddd Rt Ra Rn imm(im7,sh)
+ assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
+ imm = emitGetInsSC(id);
+ scale = NaturalScale_helper(emitInsLoadStoreSize(id));
+ imm <<= scale;
+ emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
+ emitDispReg(id->idReg2(), emitInsTargetRegSize(id), true);
+ emitDispAddrRI(id->idReg3(), id->idInsOpt(), imm);
+ break;
- case IF_LS_3A: // LS_3A .X.......X.mmmmm oooS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
- assert(insOptsLSExtend(id->idInsOpt()));
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- if (id->idIsLclVar())
- {
- emitDispAddrRRExt(id->idReg2(), codeGen->rsGetRsvdReg(), id->idInsOpt(), false, size);
- }
- else
- {
- emitDispAddrRRExt(id->idReg2(), id->idReg3(), id->idInsOpt(), id->idReg3Scaled(), size);
- }
- break;
+ case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispImmOptsLSL12(emitGetInsSC(id), id->idInsOpt());
+ break;
- case IF_LS_3B: // LS_3B X............... .aaaaannnnnddddd Rt Ra Rn
- assert(insOptsNone(id->idInsOpt()));
- assert(emitGetInsSC(id) == 0);
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- emitDispReg(id->idReg2(), emitInsTargetRegSize(id), true);
- emitDispAddrRI(id->idReg3(), id->idInsOpt(), 0);
- break;
+ case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
+ emitDispReg(id->idReg1(), size, true);
+ hwi.immHWVal = (unsigned)emitGetInsSC(id);
+ if (ins == INS_mov)
+ {
+ emitDispImm(emitDecodeHalfwordImm(hwi, size), false);
+ }
+ else // movz, movn, movk
+ {
+ emitDispImm(hwi.immVal, false);
+ if (hwi.immHW != 0)
+ {
+ emitDispShiftOpts(INS_OPTS_LSL);
+ emitDispImm(hwi.immHW * 16, false);
+ }
+ }
+ break;
- case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnddddd Rt Ra Rn imm(im7,sh)
- assert(insOptsNone(id->idInsOpt()) || insOptsIndexed(id->idInsOpt()));
- imm = emitGetInsSC(id);
- scale = NaturalScale_helper(emitInsLoadStoreSize(id));
- imm <<= scale;
- emitDispReg(id->idReg1(), emitInsTargetRegSize(id), true);
- emitDispReg(id->idReg2(), emitInsTargetRegSize(id), true);
- emitDispAddrRI(id->idReg3(), id->idInsOpt(), imm);
- break;
+ case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
+ emitDispReg(id->idReg1(), size, true);
+ bmi.immNRS = (unsigned)emitGetInsSC(id);
+ emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
+ break;
- case IF_DI_1A: // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
- emitDispReg(id->idReg1(), size, true);
- emitDispImmOptsLSL12(emitGetInsSC(id), id->idInsOpt());
- break;
+ case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ bmi.immNRS = (unsigned)emitGetInsSC(id);
+ emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
+ break;
- case IF_DI_1B: // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
- emitDispReg(id->idReg1(), size, true);
- hwi.immHWVal = (unsigned) emitGetInsSC(id);
- if (ins == INS_mov)
- {
- emitDispImm(emitDecodeHalfwordImm(hwi, size), false);
- }
- else // movz, movn, movk
- {
- emitDispImm(hwi.immVal, false);
- if (hwi.immHW != 0)
+ case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
+ if ((ins == INS_add) || (ins == INS_sub))
{
- emitDispShiftOpts(INS_OPTS_LSL);
- emitDispImm(hwi.immHW * 16, false);
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ emitDispReg(encodingZRtoSP(id->idReg2()), size, true);
}
- }
- break;
+ else
+ {
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ }
+ emitDispImmOptsLSL12(emitGetInsSC(id), id->idInsOpt());
+ break;
- case IF_DI_1C: // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
- emitDispReg(id->idReg1(), size, true);
- bmi.immNRS = (unsigned) emitGetInsSC(id);
- emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
- break;
+ case IF_DI_2B: // DI_2B X........X.nnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispImm(emitGetInsSC(id), false);
+ break;
- case IF_DI_1D: // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- bmi.immNRS = (unsigned) emitGetInsSC(id);
- emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
- break;
+ case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
+ if (ins == INS_ands)
+ {
+ emitDispReg(id->idReg1(), size, true);
+ }
+ else
+ {
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ }
+ emitDispReg(id->idReg2(), size, true);
+ bmi.immNRS = (unsigned)emitGetInsSC(id);
+ emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
+ break;
- case IF_DI_2A: // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
- if ((ins == INS_add) || (ins == INS_sub))
- {
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- emitDispReg(encodingZRtoSP(id->idReg2()), size, true);
- }
- else
- {
+ case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, ims (N,r,s)
emitDispReg(id->idReg1(), size, true);
emitDispReg(id->idReg2(), size, true);
- }
- emitDispImmOptsLSL12(emitGetInsSC(id), id->idInsOpt());
- break;
- case IF_DI_2B: // DI_2B X........X.nnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispImm(emitGetInsSC(id), false);
- break;
+ imm = emitGetInsSC(id);
+ bmi.immNRS = (unsigned)imm;
- case IF_DI_2C: // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
- if (ins == INS_ands)
- {
- emitDispReg(id->idReg1(), size, true);
- }
- else
- {
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- }
- emitDispReg(id->idReg2(), size, true);
- bmi.immNRS = (unsigned) emitGetInsSC(id);
- emitDispImm(emitDecodeBitMaskImm(bmi, size), false);
- break;
+ switch (ins)
+ {
+ case INS_bfm:
+ case INS_sbfm:
+ case INS_ubfm:
+ emitDispImm(bmi.immR, true);
+ emitDispImm(bmi.immS, false);
+ break;
- case IF_DI_2D: // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, ims (N,r,s)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
+ case INS_bfi:
+ case INS_sbfiz:
+ case INS_ubfiz:
+ emitDispImm(getBitWidth(size) - bmi.immR, true);
+ emitDispImm(bmi.immS + 1, false);
+ break;
- imm = emitGetInsSC(id);
- bmi.immNRS = (unsigned)imm;
+ case INS_bfxil:
+ case INS_sbfx:
+ case INS_ubfx:
+ emitDispImm(bmi.immR, true);
+ emitDispImm(bmi.immS - bmi.immR + 1, false);
+ break;
- switch (ins)
- {
- case INS_bfm:
- case INS_sbfm:
- case INS_ubfm:
- emitDispImm(bmi.immR, true);
- emitDispImm(bmi.immS, false);
- break;
+ case INS_asr:
+ case INS_lsr:
+ case INS_lsl:
+ emitDispImm(imm, false);
+ break;
- case INS_bfi:
- case INS_sbfiz:
- case INS_ubfiz:
- emitDispImm(getBitWidth(size) - bmi.immR, true);
- emitDispImm(bmi.immS + 1, false);
- break;
+ default:
+ assert(!"Unexpected instruction in IF_DI_2D");
+ }
- case INS_bfxil:
- case INS_sbfx:
- case INS_ubfx:
- emitDispImm(bmi.immR, true);
- emitDispImm(bmi.immS - bmi.immR + 1, false);
break;
- case INS_asr:
- case INS_lsr:
- case INS_lsl:
- emitDispImm(imm, false);
+ case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
+ emitDispReg(id->idReg1(), size, true);
+ cfi.immCFVal = (unsigned)emitGetInsSC(id);
+ emitDispImm(cfi.imm5, true);
+ emitDispFlags(cfi.flags);
+ printf(",");
+ emitDispCond(cfi.cond);
break;
- default:
- assert(!"Unexpected instruction in IF_DI_2D");
- }
+ case IF_DR_1D: // DR_1D X............... cccc.......mmmmm Rd cond
+ emitDispReg(id->idReg1(), size, true);
+ cfi.immCFVal = (unsigned)emitGetInsSC(id);
+ emitDispCond(cfi.cond);
+ break;
- break;
+ case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, false);
+ break;
- case IF_DI_1F: // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
- emitDispReg(id->idReg1(), size, true);
- cfi.immCFVal = (unsigned) emitGetInsSC(id);
- emitDispImm(cfi.imm5, true);
- emitDispFlags(cfi.flags);
- printf(",");
- emitDispCond(cfi.cond);
- break;
+ case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispShiftedReg(id->idReg2(), id->idInsOpt(), emitGetInsSC(id), size);
+ break;
- case IF_DR_1D: // DR_1D X............... cccc.......mmmmm Rd cond
- emitDispReg(id->idReg1(), size, true);
- cfi.immCFVal = (unsigned) emitGetInsSC(id);
- emitDispCond(cfi.cond);
- break;
+ case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ imm = emitGetInsSC(id);
+ emitDispExtendReg(id->idReg2(), id->idInsOpt(), imm);
+ break;
- case IF_DR_2A: // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, false);
- break;
+ case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ cfi.immCFVal = (unsigned)emitGetInsSC(id);
+ emitDispCond(cfi.cond);
+ break;
- case IF_DR_2B: // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR,ROR} imm(0-63)
- emitDispReg(id->idReg1(), size, true);
- emitDispShiftedReg(id->idReg2(), id->idInsOpt(), emitGetInsSC(id), size);
- break;
+ case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, false);
+ break;
- case IF_DR_2C: // DR_2C X..........mmmmm ooosssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- imm = emitGetInsSC(id);
- emitDispExtendReg(id->idReg2(), id->idInsOpt(), imm);
- break;
+ case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispShiftedReg(id->idReg2(), id->idInsOpt(), emitGetInsSC(id), size);
+ break;
- case IF_DR_2D: // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- cfi.immCFVal = (unsigned) emitGetInsSC(id);
- emitDispCond(cfi.cond);
- break;
+ case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rn
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ emitDispReg(encodingZRtoSP(id->idReg2()), size, false);
+ break;
- case IF_DR_2E: // DR_2E X..........mmmmm ...........ddddd Rd Rm
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, false);
- break;
+ case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, false);
+ break;
- case IF_DR_2F: // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
- emitDispReg(id->idReg1(), size, true);
- emitDispShiftedReg(id->idReg2(), id->idInsOpt(), emitGetInsSC(id), size);
- break;
+ case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ cfi.immCFVal = (unsigned)emitGetInsSC(id);
+ emitDispFlags(cfi.flags);
+ printf(",");
+ emitDispCond(cfi.cond);
+ break;
- case IF_DR_2G: // DR_2G X............... ......nnnnnddddd Rd Rn
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- emitDispReg(encodingZRtoSP(id->idReg2()), size, false);
- break;
+ case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
+ if ((ins == INS_add) || (ins == INS_sub))
+ {
+ emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
+ emitDispReg(encodingZRtoSP(id->idReg2()), size, true);
+ }
+ else
+ {
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ }
+ if (id->idIsLclVar())
+ {
+ emitDispReg(codeGen->rsGetRsvdReg(), size, false);
+ }
+ else
+ {
+ emitDispReg(id->idReg3(), size, false);
+ }
- case IF_DR_2H: // DR_2H X........X...... ......nnnnnddddd Rd Rn
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, false);
- break;
+ break;
- case IF_DR_2I: // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- cfi.immCFVal = (unsigned) emitGetInsSC(id);
- emitDispFlags(cfi.flags);
- printf(",");
- emitDispCond(cfi.cond);
- break;
+ case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispShiftedReg(id->idReg3(), id->idInsOpt(), emitGetInsSC(id), size);
+ break;
- case IF_DR_3A: // DR_3A X..........mmmmm ......nnnnnmmmmm Rd Rn Rm
- if ((ins == INS_add) || (ins == INS_sub) )
- {
+ case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
emitDispReg(encodingZRtoSP(id->idReg2()), size, true);
- }
- else
- {
+ imm = emitGetInsSC(id);
+ emitDispExtendReg(id->idReg3(), id->idInsOpt(), imm);
+ break;
+
+ case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnmmmmm Rd Rn Rm cond
emitDispReg(id->idReg1(), size, true);
emitDispReg(id->idReg2(), size, true);
- }
- if (id->idIsLclVar())
- {
- emitDispReg(codeGen->rsGetRsvdReg(), size, false);
- }
- else
- {
- emitDispReg(id->idReg3(), size, false);
- }
-
- break;
+ emitDispReg(id->idReg3(), size, true);
+ cfi.immCFVal = (unsigned)emitGetInsSC(id);
+ emitDispCond(cfi.cond);
+ break;
- case IF_DR_3B: // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispShiftedReg(id->idReg3(), id->idInsOpt(), emitGetInsSC(id), size);
- break;
+ case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispReg(id->idReg3(), size, true);
+ emitDispImm(emitGetInsSC(id), false);
+ break;
- case IF_DR_3C: // DR_3C X..........mmmmm ooosssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
- emitDispReg(encodingZRtoSP(id->idReg1()), size, true);
- emitDispReg(encodingZRtoSP(id->idReg2()), size, true);
- imm = emitGetInsSC(id);
- emitDispExtendReg(id->idReg3(), id->idInsOpt(), imm);
- break;
+ case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnmmmmm Rd Rn Rm Ra
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispReg(id->idReg3(), size, true);
+ emitDispReg(id->idReg4(), size, false);
+ break;
- case IF_DR_3D: // DR_3D X..........mmmmm cccc..nnnnnmmmmm Rd Rn Rm cond
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispReg(id->idReg3(), size, true);
- cfi.immCFVal = (unsigned) emitGetInsSC(id);
- emitDispCond(cfi.cond);
- break;
+ case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
+ elemsize = id->idOpSize();
+ emitDispReg(id->idReg1(), elemsize, true);
+ emitDispFloatImm(emitGetInsSC(id));
+ break;
- case IF_DR_3E: // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispReg(id->idReg3(), size, true);
- emitDispImm(emitGetInsSC(id), false);
- break;
-
- case IF_DR_4A: // DR_4A X..........mmmmm .aaaaannnnnmmmmm Rd Rn Rm Ra
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispReg(id->idReg3(), size, true);
- emitDispReg(id->idReg4(), size, false);
- break;
+ case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
+ imm = emitGetInsSC(id) & 0x0ff;
+ immShift = (emitGetInsSC(id) & 0x700) >> 8;
+ hasShift = (immShift != 0);
+ elemsize = optGetElemsize(id->idInsOpt());
+ if (id->idInsOpt() == INS_OPTS_1D)
+ {
+ assert(elemsize == size);
+ emitDispReg(id->idReg1(), size, true);
+ }
+ else
+ {
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ }
+ if (ins == INS_fmov)
+ {
+ emitDispFloatImm(imm);
+ assert(hasShift == false);
+ }
+ else
+ {
+ if (elemsize == EA_8BYTE)
+ {
+ assert(ins == INS_movi);
+ ssize_t imm64 = 0;
+ const ssize_t mask8 = 0xFF;
+ for (unsigned b = 0; b < 8; b++)
+ {
+ if (imm & (1 << b))
+ {
+ imm64 |= (mask8 << (b * 8));
+ }
+ }
+ emitDispImm(imm64, hasShift, true);
+ }
+ else
+ {
+ emitDispImm(imm, hasShift, true);
+ }
+ if (hasShift)
+ {
+ insOpts opt = (immShift & 0x4) ? INS_OPTS_MSL : INS_OPTS_LSL;
+ unsigned shift = (immShift & 0x3) * 8;
+ emitDispShiftOpts(opt);
+ emitDispImm(shift, false);
+ }
+ }
+ break;
- case IF_DV_1A: // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
- elemsize = id->idOpSize();
- emitDispReg(id->idReg1(), elemsize, true);
- emitDispFloatImm(emitGetInsSC(id));
- break;
+ case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
+ elemsize = id->idOpSize();
+ emitDispReg(id->idReg1(), elemsize, true);
+ emitDispFloatZero();
+ break;
- case IF_DV_1B: // DV_1B .QX..........iii cmod..iiiiiddddd Vd imm8 (immediate vector)
- imm = emitGetInsSC(id) & 0x0ff;
- immShift = (emitGetInsSC(id) & 0x700) >> 8;
- hasShift = (immShift != 0);
- elemsize = optGetElemsize(id->idInsOpt());
- if (id->idInsOpt() == INS_OPTS_1D)
- {
- assert(elemsize == size);
- emitDispReg(id->idReg1(), size, true);
- }
- else
- {
+ case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
+ case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- }
- if (ins == INS_fmov)
- {
- emitDispFloatImm(imm);
- assert(hasShift == false);
- }
- else
- {
- if (elemsize == EA_8BYTE)
+ emitDispVectorReg(id->idReg2(), id->idInsOpt(), false);
+ break;
+
+ case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
+ elemsize = id->idOpSize();
+ emitDispReg(id->idReg1(), elemsize, true);
+ emitDispReg(id->idReg2(), elemsize, true);
+ emitDispImm(emitGetInsSC(id), false);
+ break;
+
+ case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
+ imm = emitGetInsSC(id);
+ // Do we have a sxtl or uxtl instruction?
+ isExtendAlias = ((ins == INS_sxtl) || (ins == INS_sxtl2) || (ins == INS_uxtl) || (ins == INS_uxtl2));
+ code = emitInsCode(ins, fmt);
+ if (code & 0x00008000) // widen/narrow opcodes
{
- assert(ins == INS_movi);
- ssize_t imm64 = 0;
- const ssize_t mask8 = 0xFF;
- for (unsigned b=0; b<8; b++)
+ if (code & 0x00002000) // SHL opcodes
{
- if (imm & (1<<b))
- {
- imm64 |= (mask8 << (b*8));
- }
+ emitDispVectorReg(id->idReg1(), optWidenElemsize(id->idInsOpt()), true);
+ emitDispVectorReg(id->idReg2(), id->idInsOpt(), !isExtendAlias);
+ }
+ else // SHR opcodes
+ {
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispVectorReg(id->idReg2(), optWidenElemsize(id->idInsOpt()), !isExtendAlias);
}
- emitDispImm(imm64, hasShift, true);
}
else
{
- emitDispImm(imm, hasShift, true);
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispVectorReg(id->idReg2(), id->idInsOpt(), !isExtendAlias);
}
- if (hasShift)
+ // Print the immediate unless we have a sxtl or uxtl instruction
+ if (!isExtendAlias)
{
- insOpts opt = (immShift & 0x4) ? INS_OPTS_MSL : INS_OPTS_LSL;
- unsigned shift = (immShift & 0x3) * 8;
- emitDispShiftOpts(opt);
- emitDispImm(shift, false);
+ emitDispImm(imm, false);
}
- }
- break;
-
- case IF_DV_1C: // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
- elemsize = id->idOpSize();
- emitDispReg(id->idReg1(), elemsize, true);
- emitDispFloatZero();
- break;
-
- case IF_DV_2A: // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvt - vector)
- case IF_DV_2M: // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg2(), id->idInsOpt(), false);
- break;
-
- case IF_DV_2N: // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
- elemsize = id->idOpSize();
- emitDispReg(id->idReg1(), elemsize, true);
- emitDispReg(id->idReg2(), elemsize, true);
- emitDispImm(emitGetInsSC(id), false);
- break;
+ break;
- case IF_DV_2O: // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
- imm = emitGetInsSC(id);
- // Do we have a sxtl or uxtl instruction?
- isExtendAlias = ((ins == INS_sxtl) || (ins == INS_sxtl2) || (ins == INS_uxtl) || (ins == INS_uxtl2));
- code = emitInsCode(ins, fmt);
- if (code & 0x00008000) // widen/narrow opcodes
- {
- if (code & 0x00002000) // SHL opcodes
+ case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
+ srcsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ if (ins == INS_smov)
{
- emitDispVectorReg(id->idReg1(), optWidenElemsize(id->idInsOpt()), true);
- emitDispVectorReg(id->idReg2(), id->idInsOpt(), !isExtendAlias);
+ dstsize = EA_8BYTE;
}
- else // SHR opcodes
+ else // INS_umov or INS_mov
{
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg2(), optWidenElemsize(id->idInsOpt()), !isExtendAlias);
+ dstsize = (srcsize == EA_8BYTE) ? EA_8BYTE : EA_4BYTE;
}
- }
- else
- {
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg2(), id->idInsOpt(), !isExtendAlias);
- }
- // Print the immediate unless we have a sxtl or uxtl instruction
- if (!isExtendAlias)
- {
- emitDispImm(imm, false);
- }
- break;
+ emitDispReg(id->idReg1(), dstsize, true);
+ emitDispVectorRegIndex(id->idReg2(), srcsize, index, false);
+ break;
- case IF_DV_2B: // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
- srcsize = id->idOpSize();
- index = emitGetInsSC(id);
- if (ins == INS_smov)
- {
- dstsize = EA_8BYTE;
- }
- else // INS_umov or INS_mov
- {
- dstsize = (srcsize == EA_8BYTE) ? EA_8BYTE : EA_4BYTE;
- }
- emitDispReg(id->idReg1(), dstsize, true);
- emitDispVectorRegIndex(id->idReg2(), srcsize, index, false);
- break;
+ case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
+ if (ins == INS_dup)
+ {
+ datasize = id->idOpSize();
+ assert(isValidVectorDatasize(datasize));
+ assert(isValidArrangement(datasize, id->idInsOpt()));
+ elemsize = optGetElemsize(id->idInsOpt());
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ }
+ else // INS_ins
+ {
+ elemsize = id->idOpSize();
+ index = emitGetInsSC(id);
+ assert(isValidVectorElemsize(elemsize));
+ emitDispVectorRegIndex(id->idReg1(), elemsize, index, true);
+ }
+ emitDispReg(id->idReg2(), (elemsize == EA_8BYTE) ? EA_8BYTE : EA_4BYTE, false);
+ break;
- case IF_DV_2C: // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
- if (ins == INS_dup)
- {
+ case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
datasize = id->idOpSize();
assert(isValidVectorDatasize(datasize));
assert(isValidArrangement(datasize, id->idInsOpt()));
elemsize = optGetElemsize(id->idInsOpt());
+ index = emitGetInsSC(id);
emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- }
- else // INS_ins
- {
+ emitDispVectorRegIndex(id->idReg2(), elemsize, index, false);
+ break;
+
+ case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
elemsize = id->idOpSize();
- index = emitGetInsSC(id);
- assert(isValidVectorElemsize(elemsize));
- emitDispVectorRegIndex(id->idReg1(), elemsize, index, true);
- }
- emitDispReg(id->idReg2(), (elemsize == EA_8BYTE) ? EA_8BYTE : EA_4BYTE, false);
- break;
+ index = emitGetInsSC(id);
+ emitDispReg(id->idReg1(), elemsize, true);
+ emitDispVectorRegIndex(id->idReg2(), elemsize, index, false);
+ break;
- case IF_DV_2D: // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
- datasize = id->idOpSize();
- assert(isValidVectorDatasize(datasize));
- assert(isValidArrangement(datasize, id->idInsOpt()));
- elemsize = optGetElemsize(id->idInsOpt());
- index = emitGetInsSC(id);
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorRegIndex(id->idReg2(), elemsize, index, false);
- break;
+ case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
+ imm = emitGetInsSC(id);
+ index = (imm >> 4) & 0xf;
+ index2 = imm & 0xf;
+ elemsize = id->idOpSize();
+ emitDispVectorRegIndex(id->idReg1(), elemsize, index, true);
+ emitDispVectorRegIndex(id->idReg2(), elemsize, index2, false);
+ break;
- case IF_DV_2E: // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
- elemsize = id->idOpSize();
- index = emitGetInsSC(id);
- emitDispReg(id->idReg1(), elemsize, true);
- emitDispVectorRegIndex(id->idReg2(), elemsize, index, false);
- break;
+ case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
+ case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
+ case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
+ elemsize = id->idOpSize();
+ emitDispReg(id->idReg1(), elemsize, true);
+ emitDispReg(id->idReg2(), elemsize, false);
+ break;
- case IF_DV_2F: // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
- imm = emitGetInsSC(id);
- index = (imm >> 4) & 0xf;
- index2 = imm & 0xf;
- elemsize = id->idOpSize();
- emitDispVectorRegIndex(id->idReg1(), elemsize, index, true);
- emitDispVectorRegIndex(id->idReg2(), elemsize, index2, false);
- break;
+ case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov, fcvtXX - to general)
+ case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov, Xcvtf - from general)
+ case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
+ dstsize = optGetDstsize(id->idInsOpt());
+ srcsize = optGetSrcsize(id->idInsOpt());
- case IF_DV_2G: // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
- case IF_DV_2K: // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
- case IF_DV_2L: // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
- elemsize = id->idOpSize();
- emitDispReg(id->idReg1(), elemsize, true);
- emitDispReg(id->idReg2(), elemsize, false);
- break;
+ emitDispReg(id->idReg1(), dstsize, true);
+ emitDispReg(id->idReg2(), srcsize, false);
+ break;
- case IF_DV_2H: // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov, fcvtXX - to general)
- case IF_DV_2I: // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov, Xcvtf - from general)
- case IF_DV_2J: // DV_2J ........SS.....D D.....nnnnnddddd Vd Vn (fcvt)
- dstsize = optGetDstsize(id->idInsOpt());
- srcsize = optGetSrcsize(id->idInsOpt());
-
- emitDispReg(id->idReg1(), dstsize, true);
- emitDispReg(id->idReg2(), srcsize, false);
- break;
+ case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ case IF_DV_3B: // DV_3B .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ emitDispVectorReg(id->idReg2(), id->idInsOpt(), true);
+ emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
+ break;
- case IF_DV_3A: // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- case IF_DV_3B: // DV_3B .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg2(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
- break;
+ case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
+ if (ins != INS_mov)
+ {
+ emitDispVectorReg(id->idReg2(), id->idInsOpt(), true);
+ }
+ emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
+ break;
- case IF_DV_3C: // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- if (ins != INS_mov)
- {
+ case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ case IF_DV_3BI: // DV_3BI .Q........Lmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+ emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
emitDispVectorReg(id->idReg2(), id->idInsOpt(), true);
- }
- emitDispVectorReg(id->idReg3(), id->idInsOpt(), false);
- break;
-
- case IF_DV_3AI: // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- case IF_DV_3BI: // DV_3BI .Q........Lmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
- emitDispVectorReg(id->idReg1(), id->idInsOpt(), true);
- emitDispVectorReg(id->idReg2(), id->idInsOpt(), true);
- elemsize = optGetElemsize(id->idInsOpt());
- emitDispVectorRegIndex(id->idReg3(), elemsize, emitGetInsSC(id), false);
- break;
+ elemsize = optGetElemsize(id->idInsOpt());
+ emitDispVectorRegIndex(id->idReg3(), elemsize, emitGetInsSC(id), false);
+ break;
- case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispReg(id->idReg3(), size, false);
- break;
+ case IF_DV_3D: // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ case IF_DV_3E: // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispReg(id->idReg3(), size, false);
+ break;
- case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- elemsize = size;
- emitDispVectorRegIndex(id->idReg3(), elemsize, emitGetInsSC(id), false);
- break;
+ case IF_DV_3DI: // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ elemsize = size;
+ emitDispVectorRegIndex(id->idReg3(), elemsize, emitGetInsSC(id), false);
+ break;
- case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
- emitDispReg(id->idReg1(), size, true);
- emitDispReg(id->idReg2(), size, true);
- emitDispReg(id->idReg3(), size, true);
- emitDispReg(id->idReg4(), size, false);
- break;
+ case IF_DV_4A: // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Va Vn Vm (scalar)
+ emitDispReg(id->idReg1(), size, true);
+ emitDispReg(id->idReg2(), size, true);
+ emitDispReg(id->idReg3(), size, true);
+ emitDispReg(id->idReg4(), size, false);
+ break;
- case IF_SN_0A: // SN_0A ................ ................
- break;
+ case IF_SN_0A: // SN_0A ................ ................
+ break;
- case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
- emitDispImm(emitGetInsSC(id), false);
- break;
+ case IF_SI_0A: // SI_0A ...........iiiii iiiiiiiiiii..... imm16
+ emitDispImm(emitGetInsSC(id), false);
+ break;
- case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
- emitDispBarrier((insBarrier)emitGetInsSC(id));
- break;
+ case IF_SI_0B: // SI_0B ................ ....bbbb........ imm4 - barrier
+ emitDispBarrier((insBarrier)emitGetInsSC(id));
+ break;
- default:
- printf("unexpected format %s", emitIfName(id->idInsFmt()));
- assert(!"unexpectedFormat");
- break;
+ default:
+ printf("unexpected format %s", emitIfName(id->idInsFmt()));
+ assert(!"unexpectedFormat");
+ break;
}
if (id->idDebugOnlyInfo()->idVarRefOffs)
{
printf("\t// ");
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
}
@@ -10940,69 +10740,68 @@ void emitter::emitDispIns(instrDesc * id,
* Display a stack frame reference.
*/
-void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
+void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
{
printf("[");
- if (varx < 0)
- printf("TEMP_%02u", -varx);
- else
- emitComp->gtDispLclVar(+varx, false);
+ if (varx < 0)
+ printf("TEMP_%02u", -varx);
+ else
+ emitComp->gtDispLclVar(+varx, false);
- if (disp < 0)
- printf("-0x%02x", -disp);
- else if (disp > 0)
- printf("+0x%02x", +disp);
+ if (disp < 0)
+ printf("-0x%02x", -disp);
+ else if (disp > 0)
+ printf("+0x%02x", +disp);
printf("]");
- if (varx >= 0 && emitComp->opts.varNames)
+ if (varx >= 0 && emitComp->opts.varNames)
{
LclVarDsc* varDsc;
- const char * varName;
+ const char* varName;
assert((unsigned)varx < emitComp->lvaCount);
varDsc = emitComp->lvaTable + varx;
varName = emitComp->compLocalVarName(varx, offs);
- if (varName)
+ if (varName)
{
printf("'%s", varName);
- if (disp < 0)
- printf("-%d", -disp);
+ if (disp < 0)
+ printf("-%d", -disp);
else if (disp > 0)
- printf("+%d", +disp);
+ printf("+%d", +disp);
printf("'");
}
}
}
-
#endif // DEBUG
// Generate code for a load or store operation with a potentially complex addressing mode
// This method handles the case of a GT_IND with contained GT_LEA op1 of the x86 form [base + index*sccale + offset]
// Since Arm64 does not directly support this complex of an addressing mode
// we may generates up to three instructions for this for Arm64
-//
+//
void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir)
{
emitAttr ldstAttr = isVectorRegister(dataReg) ? attr : emitInsAdjustLoadStoreAttr(ins, attr);
- GenTree* addr = indir->Addr();
+ GenTree* addr = indir->Addr();
if (addr->isContained())
{
assert(addr->OperGet() == GT_LCL_VAR_ADDR || addr->OperGet() == GT_LEA);
- int offset = 0;
- DWORD lsl = 0;
+ int offset = 0;
+ DWORD lsl = 0;
if (addr->OperGet() == GT_LEA)
{
- offset = (int) addr->AsAddrMode()->gtOffset;
+ offset = (int)addr->AsAddrMode()->gtOffset;
if (addr->AsAddrMode()->gtScale > 0)
{
assert(isPow2(addr->AsAddrMode()->gtScale));
@@ -11010,16 +10809,16 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
}
}
- GenTree* memBase = indir->Base();
+ GenTree* memBase = indir->Base();
if (indir->HasIndex())
{
- GenTree* index = indir->Index();
+ GenTree* index = indir->Index();
if (offset != 0)
{
regMaskTP tmpRegMask = indir->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
noway_assert(tmpReg != REG_NA);
if (emitIns_valid_imm_for_add(offset, EA_8BYTE))
@@ -11027,9 +10826,10 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
if (lsl > 0)
{
// Generate code to set tmpReg = base + index*scale
- emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl, INS_OPTS_LSL);
+ emitIns_R_R_R_I(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum, lsl,
+ INS_OPTS_LSL);
}
- else // no scale
+ else // no scale
{
// Generate code to set tmpReg = base + index
emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, memBase->gtRegNum, index->gtRegNum);
@@ -11038,12 +10838,13 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
noway_assert(emitInsIsLoad(ins) || (tmpReg != dataReg));
// Then load/store dataReg from/to [tmpReg + offset]
- emitIns_R_R_I(ins, ldstAttr, dataReg, tmpReg, offset);;
+ emitIns_R_R_I(ins, ldstAttr, dataReg, tmpReg, offset);
+ ;
}
else // large offset
{
// First load/store tmpReg with the large offset constant
- codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
+ codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the base register
// rd = rd + base
emitIns_R_R_R(INS_add, EA_PTRSIZE, tmpReg, tmpReg, memBase->gtRegNum);
@@ -11055,12 +10856,12 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
emitIns_R_R_R_I(ins, ldstAttr, dataReg, tmpReg, index->gtRegNum, lsl, INS_OPTS_LSL);
}
}
- else // (offset == 0)
+ else // (offset == 0)
{
if (lsl > 0)
{
// Then load/store dataReg from/to [memBase + index*scale]
- emitIns_R_R_R_I(ins, ldstAttr, dataReg, memBase->gtRegNum, index->gtRegNum, lsl, INS_OPTS_LSL);
+ emitIns_R_R_R_I(ins, ldstAttr, dataReg, memBase->gtRegNum, index->gtRegNum, lsl, INS_OPTS_LSL);
}
else // no scale
{
@@ -11069,7 +10870,7 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
}
}
}
- else // no Index register
+ else // no Index register
{
if (emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(attr)))
{
@@ -11080,18 +10881,18 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
{
// We require a tmpReg to hold the offset
regMaskTP tmpRegMask = indir->gtRsvdRegs;
- regNumber tmpReg = genRegNumFromMask(tmpRegMask);
+ regNumber tmpReg = genRegNumFromMask(tmpRegMask);
noway_assert(tmpReg != REG_NA);
// First load/store tmpReg with the large offset constant
- codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
+ codeGen->instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then load/store dataReg from/to [memBase + tmpReg]
emitIns_R_R_R(ins, ldstAttr, dataReg, memBase->gtRegNum, tmpReg);
}
}
}
- else // addr is not contained, so we evaluate it into a register
+ else // addr is not contained, so we evaluate it into a register
{
codeGen->genConsumeReg(addr);
// Then load/store dataReg from/to [addrReg]
@@ -11099,19 +10900,18 @@ void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataR
}
}
-
// Generates an integer data section constant and returns a field handle representing
-// the data offset to access the constant via a load instruction.
+// the data offset to access the constant via a load instruction.
// This is called during ngen for any relocatable constants
//
CORINFO_FIELD_HANDLE emitter::emitLiteralConst(ssize_t cnsValIn, emitAttr attr /*=EA_8BYTE*/)
{
ssize_t constValue = cnsValIn;
- void * cnsAddr = &constValue;
+ void* cnsAddr = &constValue;
bool dblAlign;
if (attr == EA_4BYTE)
- {
+ {
dblAlign = false;
}
else
@@ -11125,7 +10925,7 @@ CORINFO_FIELD_HANDLE emitter::emitLiteralConst(ssize_t cnsValIn, emitAttr attr /
// to constant data, not a real static field.
UNATIVE_OFFSET cnsSize = (attr == EA_4BYTE) ? 4 : 8;
- UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
+ UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
return emitComp->eeFindJitDataOffs(cnum);
}
@@ -11140,23 +10940,23 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(GenTreeDblCon* tree, emitAttr at
}
else
{
- assert(emitTypeSize(tree->TypeGet()) == attr);
+ assert(emitTypeSize(tree->TypeGet()) == attr);
}
double constValue = tree->gtDblCon.gtDconVal;
- void *cnsAddr;
- float f;
- bool dblAlign;
+ void* cnsAddr;
+ float f;
+ bool dblAlign;
if (attr == EA_4BYTE)
- {
- f = forceCastToFloat(constValue);
- cnsAddr = &f;
+ {
+ f = forceCastToFloat(constValue);
+ cnsAddr = &f;
dblAlign = false;
}
else
{
- cnsAddr = &constValue;
+ cnsAddr = &constValue;
dblAlign = true;
}
@@ -11165,7 +10965,7 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(GenTreeDblCon* tree, emitAttr at
// to constant data, not a real static field.
UNATIVE_OFFSET cnsSize = (attr == EA_4BYTE) ? 4 : 8;
- UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
+ UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
return emitComp->eeFindJitDataOffs(cnum);
}
@@ -11213,9 +11013,9 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// find immed (if any) - it cannot be a dst
// Only one src can be an int.
- GenTreeIntConCommon* intConst = nullptr;
- GenTree* nonIntReg = nullptr;
-
+ GenTreeIntConCommon* intConst = nullptr;
+ GenTree* nonIntReg = nullptr;
+
if (varTypeIsFloating(dst))
{
// src1 can only be a reg
@@ -11231,7 +11031,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Check src2 first as we can always allow it to be a contained immediate
if (src2->isContainedIntOrIImmed())
{
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntConCommon();
nonIntReg = src1;
}
// Only for commutative operations do we check src1 and allow it to be a contained immediate
@@ -11244,7 +11044,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (src1->isContainedIntOrIImmed())
{
assert(!src2->isContainedIntOrIImmed());
- intConst = src1->AsIntConCommon();
+ intConst = src1->AsIntConCommon();
nonIntReg = src2;
}
}
@@ -11254,9 +11054,9 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
assert(!src1->isContained());
}
}
- bool isMulOverflow = false;
- bool isUnsignedMul = false;
- regNumber extraReg = REG_NA;
+ bool isMulOverflow = false;
+ bool isUnsignedMul = false;
+ regNumber extraReg = REG_NA;
if (dst->gtOverflowEx())
{
if (ins == INS_add)
@@ -11271,7 +11071,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
{
isMulOverflow = true;
isUnsignedMul = ((dst->gtFlags & GTF_UNSIGNED) != 0);
- assert(intConst == nullptr); // overflow format doesn't support an int constant operand
+ assert(intConst == nullptr); // overflow format doesn't support an int constant operand
}
else
{
@@ -11293,8 +11093,8 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Remove the bit for 'dst->gtRegNum' from 'tmpRegsMask'
regMaskTP tmpRegsMask = dst->gtRsvdRegs & ~genRegMask(dst->gtRegNum);
assert(tmpRegsMask != RBM_NONE);
- regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
- extraReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
+ regMaskTP tmpRegMask = genFindLowestBit(tmpRegsMask); // set tmpRegMsk to a one-bit mask
+ extraReg = genRegNumFromMask(tmpRegMask); // set tmpReg from that mask
if (isUnsignedMul)
{
diff --git a/src/jit/emitarm64.h b/src/jit/emitarm64.h
index b35af26311..5459a0d6c8 100644
--- a/src/jit/emitarm64.h
+++ b/src/jit/emitarm64.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#if defined(_TARGET_ARM64_)
// The ARM64 instructions are all 32 bits in size.
@@ -11,856 +10,792 @@
//
typedef unsigned int code_t;
+static bool strictArmAsm;
- static bool strictArmAsm;
-
- /************************************************************************/
- /* Routines that compute the size of / encode instructions */
- /************************************************************************/
+/************************************************************************/
+/* Routines that compute the size of / encode instructions */
+/************************************************************************/
- struct CnsVal
- {
- ssize_t cnsVal;
+struct CnsVal
+{
+ ssize_t cnsVal;
#ifdef RELOC_SUPPORT
- bool cnsReloc;
+ bool cnsReloc;
#endif
- };
-
-#ifdef DEBUG
-
- /************************************************************************/
- /* Debug-only routines to display instructions */
- /************************************************************************/
-
- const char * emitFPregName (unsigned reg,
- bool varName = true);
- const char * emitVectorRegName(regNumber reg);
-
- void emitDispInst (instruction ins);
- void emitDispReloc (int value, bool addComma);
- void emitDispImm (ssize_t imm, bool addComma, bool alwaysHex = false);
- void emitDispFloatZero();
- void emitDispFloatImm(ssize_t imm8);
- void emitDispImmOptsLSL12(ssize_t imm, insOpts opt);
- void emitDispCond (insCond cond);
- void emitDispFlags (insCflags flags);
- void emitDispBarrier (insBarrier barrier);
- void emitDispShiftOpts(insOpts opt);
- void emitDispExtendOpts(insOpts opt);
- void emitDispLSExtendOpts(insOpts opt);
- void emitDispReg (regNumber reg, emitAttr attr, bool addComma);
- void emitDispVectorReg (regNumber reg, insOpts opt, bool addComma);
- void emitDispVectorRegIndex(regNumber reg, emitAttr elemsize, ssize_t index, bool addComma);
- void emitDispArrangement(insOpts opt);
- void emitDispShiftedReg(regNumber reg, insOpts opt, ssize_t imm, emitAttr attr);
- void emitDispExtendReg(regNumber reg, insOpts opt, ssize_t imm);
- void emitDispAddrRI (regNumber reg, insOpts opt, ssize_t imm);
- void emitDispAddrRRExt(regNumber reg1, regNumber reg2, insOpts opt, bool isScaled, emitAttr size);
-
- void emitDispIns (instrDesc *id, bool isNew, bool doffs, bool asmfm,
- unsigned offs = 0, BYTE * pCode = 0, size_t sz = 0,
- insGroup *ig = NULL);
+};
+
+#ifdef DEBUG
+
+/************************************************************************/
+/* Debug-only routines to display instructions */
+/************************************************************************/
+
+const char* emitFPregName(unsigned reg, bool varName = true);
+const char* emitVectorRegName(regNumber reg);
+
+void emitDispInst(instruction ins);
+void emitDispReloc(int value, bool addComma);
+void emitDispImm(ssize_t imm, bool addComma, bool alwaysHex = false);
+void emitDispFloatZero();
+void emitDispFloatImm(ssize_t imm8);
+void emitDispImmOptsLSL12(ssize_t imm, insOpts opt);
+void emitDispCond(insCond cond);
+void emitDispFlags(insCflags flags);
+void emitDispBarrier(insBarrier barrier);
+void emitDispShiftOpts(insOpts opt);
+void emitDispExtendOpts(insOpts opt);
+void emitDispLSExtendOpts(insOpts opt);
+void emitDispReg(regNumber reg, emitAttr attr, bool addComma);
+void emitDispVectorReg(regNumber reg, insOpts opt, bool addComma);
+void emitDispVectorRegIndex(regNumber reg, emitAttr elemsize, ssize_t index, bool addComma);
+void emitDispArrangement(insOpts opt);
+void emitDispShiftedReg(regNumber reg, insOpts opt, ssize_t imm, emitAttr attr);
+void emitDispExtendReg(regNumber reg, insOpts opt, ssize_t imm);
+void emitDispAddrRI(regNumber reg, insOpts opt, ssize_t imm);
+void emitDispAddrRRExt(regNumber reg1, regNumber reg2, insOpts opt, bool isScaled, emitAttr size);
+
+void emitDispIns(instrDesc* id,
+ bool isNew,
+ bool doffs,
+ bool asmfm,
+ unsigned offs = 0,
+ BYTE* pCode = 0,
+ size_t sz = 0,
+ insGroup* ig = NULL);
#endif // DEBUG
- /************************************************************************/
- /* Private members that deal with target-dependent instr. descriptors */
- /************************************************************************/
+/************************************************************************/
+/* Private members that deal with target-dependent instr. descriptors */
+/************************************************************************/
private:
-
- instrDesc *emitNewInstrAmd (emitAttr attr, int dsp);
- instrDesc *emitNewInstrAmdCns (emitAttr attr, int dsp, int cns);
-
- instrDesc *emitNewInstrCallDir (int argCnt,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize,
- emitAttr secondRetSize);
-
- instrDesc *emitNewInstrCallInd( int argCnt,
- ssize_t disp,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize,
- emitAttr secondRetSize);
-
- void emitGetInsCns (instrDesc *id, CnsVal *cv);
- ssize_t emitGetInsAmdCns(instrDesc *id, CnsVal *cv);
- void emitGetInsDcmCns(instrDesc *id, CnsVal *cv);
- ssize_t emitGetInsAmdAny(instrDesc *id);
-
- /************************************************************************/
- /* Private helpers for instruction output */
- /************************************************************************/
+instrDesc* emitNewInstrAmd(emitAttr attr, int dsp);
+instrDesc* emitNewInstrAmdCns(emitAttr attr, int dsp, int cns);
+
+instrDesc* emitNewInstrCallDir(int argCnt,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSize,
+ emitAttr secondRetSize);
+
+instrDesc* emitNewInstrCallInd(int argCnt,
+ ssize_t disp,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSize,
+ emitAttr secondRetSize);
+
+void emitGetInsCns(instrDesc* id, CnsVal* cv);
+ssize_t emitGetInsAmdCns(instrDesc* id, CnsVal* cv);
+void emitGetInsDcmCns(instrDesc* id, CnsVal* cv);
+ssize_t emitGetInsAmdAny(instrDesc* id);
+
+/************************************************************************/
+/* Private helpers for instruction output */
+/************************************************************************/
private:
+bool emitInsIsCompare(instruction ins);
+bool emitInsIsLoad(instruction ins);
+bool emitInsIsStore(instruction ins);
+bool emitInsIsLoadOrStore(instruction ins);
+emitAttr emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr);
+emitAttr emitInsTargetRegSize(instrDesc* id);
+emitAttr emitInsLoadStoreSize(instrDesc* id);
- bool emitInsIsCompare(instruction ins);
- bool emitInsIsLoad (instruction ins);
- bool emitInsIsStore (instruction ins);
- bool emitInsIsLoadOrStore(instruction ins);
- emitAttr emitInsAdjustLoadStoreAttr(instruction ins, emitAttr attr);
- emitAttr emitInsTargetRegSize(instrDesc *id);
- emitAttr emitInsLoadStoreSize(instrDesc *id);
-
- emitter::insFormat emitInsFormat(instruction ins);
- emitter::code_t emitInsCode(instruction ins, insFormat fmt);
+emitter::insFormat emitInsFormat(instruction ins);
+emitter::code_t emitInsCode(instruction ins, insFormat fmt);
- // Generate code for a load or store operation and handle the case of contained GT_LEA op1 with [base + index<<scale + offset]
- void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir);
+// Generate code for a load or store operation and handle the case of contained GT_LEA op1 with [base + index<<scale +
+// offset]
+void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir);
- // Emit the 32-bit Arm64 instruction 'code' into the 'dst' buffer
- static unsigned emitOutput_Instr(BYTE *dst, code_t code);
+// Emit the 32-bit Arm64 instruction 'code' into the 'dst' buffer
+static unsigned emitOutput_Instr(BYTE* dst, code_t code);
- // A helper method to return the natural scale for an EA 'size'
- static unsigned NaturalScale_helper(emitAttr size);
+// A helper method to return the natural scale for an EA 'size'
+static unsigned NaturalScale_helper(emitAttr size);
- // A helper method to perform a Rotate-Right shift operation
- static UINT64 ROR_helper(UINT64 value, unsigned sh, unsigned width);
+// A helper method to perform a Rotate-Right shift operation
+static UINT64 ROR_helper(UINT64 value, unsigned sh, unsigned width);
- // A helper method to perform a 'NOT' bitwise complement operation
- static UINT64 NOT_helper(UINT64 value, unsigned width);
+// A helper method to perform a 'NOT' bitwise complement operation
+static UINT64 NOT_helper(UINT64 value, unsigned width);
- // A helper method to perform a bit Replicate operation
- static UINT64 Replicate_helper(UINT64 value, unsigned width, emitAttr size);
+// A helper method to perform a bit Replicate operation
+static UINT64 Replicate_helper(UINT64 value, unsigned width, emitAttr size);
+/************************************************************************
+*
+* This union is used to to encode/decode the special ARM64 immediate values
+* that is listed as imm(N,r,s) and referred to as 'bitmask immediate'
+*/
- /************************************************************************
- *
- * This union is used to to encode/decode the special ARM64 immediate values
- * that is listed as imm(N,r,s) and referred to as 'bitmask immediate'
- */
-
- union bitMaskImm
+union bitMaskImm {
+ struct
{
- struct {
- unsigned immS:6; // bits 0..5
- unsigned immR:6; // bits 6..11
- unsigned immN:1; // bits 12
- };
- unsigned immNRS; // concat N:R:S forming a 13-bit unsigned immediate
+ unsigned immS : 6; // bits 0..5
+ unsigned immR : 6; // bits 6..11
+ unsigned immN : 1; // bits 12
};
+ unsigned immNRS; // concat N:R:S forming a 13-bit unsigned immediate
+};
- /************************************************************************
- *
- * Convert between a 64-bit immediate and its 'bitmask immediate'
- * representation imm(i16,hw)
- */
-
- static emitter::bitMaskImm emitEncodeBitMaskImm(INT64 imm, emitAttr size);
+/************************************************************************
+*
+* Convert between a 64-bit immediate and its 'bitmask immediate'
+* representation imm(i16,hw)
+*/
- static INT64 emitDecodeBitMaskImm(const emitter::bitMaskImm bmImm,
- emitAttr size);
+static emitter::bitMaskImm emitEncodeBitMaskImm(INT64 imm, emitAttr size);
+static INT64 emitDecodeBitMaskImm(const emitter::bitMaskImm bmImm, emitAttr size);
- /************************************************************************
- *
- * This union is used to to encode/decode the special ARM64 immediate values
- * that is listed as imm(i16,hw) and referred to as 'halfword immediate'
- */
+/************************************************************************
+*
+* This union is used to to encode/decode the special ARM64 immediate values
+* that is listed as imm(i16,hw) and referred to as 'halfword immediate'
+*/
- union halfwordImm
+union halfwordImm {
+ struct
{
- struct {
- unsigned immVal:16; // bits 0..15
- unsigned immHW:2; // bits 16..17
- };
- unsigned immHWVal; // concat HW:Val forming a 18-bit unsigned immediate
+ unsigned immVal : 16; // bits 0..15
+ unsigned immHW : 2; // bits 16..17
};
+ unsigned immHWVal; // concat HW:Val forming a 18-bit unsigned immediate
+};
- /************************************************************************
- *
- * Convert between a 64-bit immediate and its 'halfword immediate'
- * representation imm(i16,hw)
- */
+/************************************************************************
+*
+* Convert between a 64-bit immediate and its 'halfword immediate'
+* representation imm(i16,hw)
+*/
- static emitter::halfwordImm emitEncodeHalfwordImm(INT64 imm, emitAttr size);
+static emitter::halfwordImm emitEncodeHalfwordImm(INT64 imm, emitAttr size);
- static INT64 emitDecodeHalfwordImm(const emitter::halfwordImm hwImm,
- emitAttr size);
+static INT64 emitDecodeHalfwordImm(const emitter::halfwordImm hwImm, emitAttr size);
- /************************************************************************
- *
- * This union is used to encode/decode the special ARM64 immediate values
- * that is listed as imm(i16,by) and referred to as 'byteShifted immediate'
- */
+/************************************************************************
+*
+* This union is used to encode/decode the special ARM64 immediate values
+* that is listed as imm(i16,by) and referred to as 'byteShifted immediate'
+*/
- union byteShiftedImm
+union byteShiftedImm {
+ struct
{
- struct {
- unsigned immVal:8; // bits 0..7
- unsigned immBY:2; // bits 8..9
- unsigned immOnes:1; // bit 10
- };
- unsigned immBSVal; // concat Ones:BY:Val forming a 10-bit unsigned immediate
+ unsigned immVal : 8; // bits 0..7
+ unsigned immBY : 2; // bits 8..9
+ unsigned immOnes : 1; // bit 10
};
+ unsigned immBSVal; // concat Ones:BY:Val forming a 10-bit unsigned immediate
+};
- /************************************************************************
- *
- * Convert between a 16/32-bit immediate and its 'byteShifted immediate'
- * representation imm(i8,by)
- */
+/************************************************************************
+*
+* Convert between a 16/32-bit immediate and its 'byteShifted immediate'
+* representation imm(i8,by)
+*/
- static emitter::byteShiftedImm emitEncodeByteShiftedImm(INT64 imm, emitAttr size, bool allow_MSL);
+static emitter::byteShiftedImm emitEncodeByteShiftedImm(INT64 imm, emitAttr size, bool allow_MSL);
- static INT32 emitDecodeByteShiftedImm(const emitter::byteShiftedImm bsImm,
- emitAttr size);
+static INT32 emitDecodeByteShiftedImm(const emitter::byteShiftedImm bsImm, emitAttr size);
- /************************************************************************
- *
- * This union is used to to encode/decode the special ARM64 immediate values
- * that are use for FMOV immediate and referred to as 'float 8-bit immediate'
- */
+/************************************************************************
+*
+* This union is used to to encode/decode the special ARM64 immediate values
+* that are use for FMOV immediate and referred to as 'float 8-bit immediate'
+*/
- union floatImm8
+union floatImm8 {
+ struct
{
- struct {
- unsigned immMant:4; // bits 0..3
- unsigned immExp:3; // bits 4..6
- unsigned immSign:1; // bits 7
- };
- unsigned immFPIVal; // concat Sign:Exp:Mant forming an 8-bit unsigned immediate
+ unsigned immMant : 4; // bits 0..3
+ unsigned immExp : 3; // bits 4..6
+ unsigned immSign : 1; // bits 7
};
+ unsigned immFPIVal; // concat Sign:Exp:Mant forming an 8-bit unsigned immediate
+};
- /************************************************************************
- *
- * Convert between a double and its 'float 8-bit immediate' representation
- */
+/************************************************************************
+*
+* Convert between a double and its 'float 8-bit immediate' representation
+*/
- static emitter::floatImm8 emitEncodeFloatImm8(double immDbl);
+static emitter::floatImm8 emitEncodeFloatImm8(double immDbl);
- static double emitDecodeFloatImm8(const emitter::floatImm8 fpImm);
+static double emitDecodeFloatImm8(const emitter::floatImm8 fpImm);
- /************************************************************************
- *
- * This union is used to to encode/decode the cond, nzcv and imm5 values for
- * instructions that use them in the small constant immediate field
- */
+/************************************************************************
+*
+* This union is used to to encode/decode the cond, nzcv and imm5 values for
+* instructions that use them in the small constant immediate field
+*/
- union condFlagsImm
+union condFlagsImm {
+ struct
{
- struct {
- insCond cond :4; // bits 0..3
- insCflags flags:4; // bits 4..7
- unsigned imm5 :5; // bits 8..12
- };
- unsigned immCFVal; // concat imm5:flags:cond forming an 13-bit unsigned immediate
+ insCond cond : 4; // bits 0..3
+ insCflags flags : 4; // bits 4..7
+ unsigned imm5 : 5; // bits 8..12
};
+ unsigned immCFVal; // concat imm5:flags:cond forming an 13-bit unsigned immediate
+};
- // Returns an encoding for the specified register used in the 'Rd' position
- static code_t insEncodeReg_Rd(regNumber reg);
+// Returns an encoding for the specified register used in the 'Rd' position
+static code_t insEncodeReg_Rd(regNumber reg);
- // Returns an encoding for the specified register used in the 'Rt' position
- static code_t insEncodeReg_Rt(regNumber reg);
+// Returns an encoding for the specified register used in the 'Rt' position
+static code_t insEncodeReg_Rt(regNumber reg);
- // Returns an encoding for the specified register used in the 'Rn' position
- static code_t insEncodeReg_Rn(regNumber reg);
+// Returns an encoding for the specified register used in the 'Rn' position
+static code_t insEncodeReg_Rn(regNumber reg);
- // Returns an encoding for the specified register used in the 'Rm' position
- static code_t insEncodeReg_Rm(regNumber reg);
+// Returns an encoding for the specified register used in the 'Rm' position
+static code_t insEncodeReg_Rm(regNumber reg);
- // Returns an encoding for the specified register used in the 'Ra' position
- static code_t insEncodeReg_Ra(regNumber reg);
+// Returns an encoding for the specified register used in the 'Ra' position
+static code_t insEncodeReg_Ra(regNumber reg);
- // Returns an encoding for the specified register used in the 'Vd' position
- static code_t insEncodeReg_Vd(regNumber reg);
+// Returns an encoding for the specified register used in the 'Vd' position
+static code_t insEncodeReg_Vd(regNumber reg);
- // Returns an encoding for the specified register used in the 'Vt' position
- static code_t insEncodeReg_Vt(regNumber reg);
+// Returns an encoding for the specified register used in the 'Vt' position
+static code_t insEncodeReg_Vt(regNumber reg);
- // Returns an encoding for the specified register used in the 'Vn' position
- static code_t insEncodeReg_Vn(regNumber reg);
+// Returns an encoding for the specified register used in the 'Vn' position
+static code_t insEncodeReg_Vn(regNumber reg);
- // Returns an encoding for the specified register used in the 'Vm' position
- static code_t insEncodeReg_Vm(regNumber reg);
+// Returns an encoding for the specified register used in the 'Vm' position
+static code_t insEncodeReg_Vm(regNumber reg);
- // Returns an encoding for the specified register used in the 'Va' position
- static code_t insEncodeReg_Va(regNumber reg);
+// Returns an encoding for the specified register used in the 'Va' position
+static code_t insEncodeReg_Va(regNumber reg);
- // Returns an encoding for the imm which represents the condition code.
- static code_t insEncodeCond(insCond cond);
+// Returns an encoding for the imm which represents the condition code.
+static code_t insEncodeCond(insCond cond);
- // Returns an encoding for the imm whioch represents the 'condition code'
- // with the lowest bit inverted (marked by invert(<cond>) in the architecture manual.
- static code_t insEncodeInvertedCond(insCond cond);
+// Returns an encoding for the imm whioch represents the 'condition code'
+// with the lowest bit inverted (marked by invert(<cond>) in the architecture manual.
+static code_t insEncodeInvertedCond(insCond cond);
- // Returns an encoding for the imm which represents the flags.
- static code_t insEncodeFlags(insCflags flags);
+// Returns an encoding for the imm which represents the flags.
+static code_t insEncodeFlags(insCflags flags);
- // Returns the encoding for the Shift Count bits to be used for Arm64 encodings
- static code_t insEncodeShiftCount(ssize_t imm, emitAttr size);
+// Returns the encoding for the Shift Count bits to be used for Arm64 encodings
+static code_t insEncodeShiftCount(ssize_t imm, emitAttr size);
- // Returns the encoding to select the datasize for most Arm64 instructions
- static code_t insEncodeDatasize(emitAttr size);
+// Returns the encoding to select the datasize for most Arm64 instructions
+static code_t insEncodeDatasize(emitAttr size);
- // Returns the encoding to select the datasize for the general load/store Arm64 instructions
- static code_t insEncodeDatasizeLS(code_t code, emitAttr size);
+// Returns the encoding to select the datasize for the general load/store Arm64 instructions
+static code_t insEncodeDatasizeLS(code_t code, emitAttr size);
- // Returns the encoding to select the datasize for the vector load/store Arm64 instructions
- static code_t insEncodeDatasizeVLS(code_t code, emitAttr size);
+// Returns the encoding to select the datasize for the vector load/store Arm64 instructions
+static code_t insEncodeDatasizeVLS(code_t code, emitAttr size);
- // Returns the encoding to select the datasize for the vector load/store pair Arm64 instructions
- static code_t insEncodeDatasizeVPLS(code_t code, emitAttr size);
+// Returns the encoding to select the datasize for the vector load/store pair Arm64 instructions
+static code_t insEncodeDatasizeVPLS(code_t code, emitAttr size);
- // Returns the encoding to select the datasize for bitfield Arm64 instructions
- static code_t insEncodeDatasizeBF(code_t code, emitAttr size);
+// Returns the encoding to select the datasize for bitfield Arm64 instructions
+static code_t insEncodeDatasizeBF(code_t code, emitAttr size);
- // Returns the encoding to select the vectorsize for SIMD Arm64 instructions
- static code_t insEncodeVectorsize(emitAttr size);
+// Returns the encoding to select the vectorsize for SIMD Arm64 instructions
+static code_t insEncodeVectorsize(emitAttr size);
- // Returns the encoding to select 'index' for an Arm64 vector elem instruction
- static code_t insEncodeVectorIndex(emitAttr elemsize, ssize_t index);
+// Returns the encoding to select 'index' for an Arm64 vector elem instruction
+static code_t insEncodeVectorIndex(emitAttr elemsize, ssize_t index);
- // Returns the encoding to select 'index2' for an Arm64 'ins' elem instruction
- static code_t insEncodeVectorIndex2(emitAttr elemsize, ssize_t index2);
+// Returns the encoding to select 'index2' for an Arm64 'ins' elem instruction
+static code_t insEncodeVectorIndex2(emitAttr elemsize, ssize_t index2);
- // Returns the encoding to select 'index' for an Arm64 'mul' elem instruction
- static code_t insEncodeVectorIndexLMH(emitAttr elemsize, ssize_t index);
+// Returns the encoding to select 'index' for an Arm64 'mul' elem instruction
+static code_t insEncodeVectorIndexLMH(emitAttr elemsize, ssize_t index);
- // Returns the encoding to shift by 'shift' bits for an Arm64 vector or scalar instruction
- static code_t insEncodeVectorShift(emitAttr size, ssize_t shift);
+// Returns the encoding to shift by 'shift' bits for an Arm64 vector or scalar instruction
+static code_t insEncodeVectorShift(emitAttr size, ssize_t shift);
- // Returns the encoding to select the 1/2/4/8 byte elemsize for an Arm64 vector instruction
- static code_t insEncodeElemsize(emitAttr size);
+// Returns the encoding to select the 1/2/4/8 byte elemsize for an Arm64 vector instruction
+static code_t insEncodeElemsize(emitAttr size);
- // Returns the encoding to select the 4/8 byte elemsize for an Arm64 float vector instruction
- static code_t insEncodeFloatElemsize(emitAttr size);
+// Returns the encoding to select the 4/8 byte elemsize for an Arm64 float vector instruction
+static code_t insEncodeFloatElemsize(emitAttr size);
- // Returns the encoding to select the index for an Arm64 float vector by elem instruction
- static code_t insEncodeFloatIndex(emitAttr elemsize, ssize_t index);
+// Returns the encoding to select the index for an Arm64 float vector by elem instruction
+static code_t insEncodeFloatIndex(emitAttr elemsize, ssize_t index);
- // Returns the encoding to select the 'conversion' operation for a type 'fmt' Arm64 instruction
- static code_t insEncodeConvertOpt(insFormat fmt, insOpts conversion);
+// Returns the encoding to select the 'conversion' operation for a type 'fmt' Arm64 instruction
+static code_t insEncodeConvertOpt(insFormat fmt, insOpts conversion);
- // Returns the encoding to have the Rn register of a ld/st reg be Pre/Post/Not indexed updated
- static code_t insEncodeIndexedOpt(insOpts opt);
+// Returns the encoding to have the Rn register of a ld/st reg be Pre/Post/Not indexed updated
+static code_t insEncodeIndexedOpt(insOpts opt);
- // Returns the encoding to have the Rn register of a ld/st pair be Pre/Post/Not indexed updated
- static code_t insEncodePairIndexedOpt(instruction ins, insOpts opt);
+// Returns the encoding to have the Rn register of a ld/st pair be Pre/Post/Not indexed updated
+static code_t insEncodePairIndexedOpt(instruction ins, insOpts opt);
- // Returns the encoding to apply a Shift Type on the Rm register
- static code_t insEncodeShiftType(insOpts opt);
+// Returns the encoding to apply a Shift Type on the Rm register
+static code_t insEncodeShiftType(insOpts opt);
- // Returns the encoding to apply a 12 bit left shift to the immediate
- static code_t insEncodeShiftImm12(insOpts opt);
+// Returns the encoding to apply a 12 bit left shift to the immediate
+static code_t insEncodeShiftImm12(insOpts opt);
- // Returns the encoding to have the Rm register use an extend operation
- static code_t insEncodeExtend(insOpts opt);
+// Returns the encoding to have the Rm register use an extend operation
+static code_t insEncodeExtend(insOpts opt);
- // Returns the encoding to scale the Rm register by {0,1,2,3,4} in an extend operation
- static code_t insEncodeExtendScale(ssize_t imm);
+// Returns the encoding to scale the Rm register by {0,1,2,3,4} in an extend operation
+static code_t insEncodeExtendScale(ssize_t imm);
- // Returns the encoding to have the Rm register be auto scaled by the ld/st size
- static code_t insEncodeReg3Scale(bool isScaled);
+// Returns the encoding to have the Rm register be auto scaled by the ld/st size
+static code_t insEncodeReg3Scale(bool isScaled);
- // Returns true if 'reg' represents an integer register.
- static bool isIntegerRegister (regNumber reg)
- { return (reg >= REG_INT_FIRST) && (reg <= REG_INT_LAST); }
+// Returns true if 'reg' represents an integer register.
+static bool isIntegerRegister(regNumber reg)
+{
+ return (reg >= REG_INT_FIRST) && (reg <= REG_INT_LAST);
+}
- // Returns true if 'value' is a legal unsigned immediate 8 bit encoding (such as for fMOV).
- static bool isValidUimm8(ssize_t value)
- { return (0 <= value) && (value <= 0xFFLL); };
+// Returns true if 'value' is a legal unsigned immediate 8 bit encoding (such as for fMOV).
+static bool isValidUimm8(ssize_t value)
+{
+ return (0 <= value) && (value <= 0xFFLL);
+};
- // Returns true if 'value' is a legal unsigned immediate 12 bit encoding (such as for CMP, CMN).
- static bool isValidUimm12(ssize_t value)
- { return (0 <= value) && (value <= 0xFFFLL); };
+// Returns true if 'value' is a legal unsigned immediate 12 bit encoding (such as for CMP, CMN).
+static bool isValidUimm12(ssize_t value)
+{
+ return (0 <= value) && (value <= 0xFFFLL);
+};
- // Returns true if 'value' is a legal unsigned immediate 16 bit encoding (such as for MOVZ, MOVN, MOVK).
- static bool isValidUimm16(ssize_t value)
- { return (0 <= value) && (value <= 0xFFFFLL); };
+// Returns true if 'value' is a legal unsigned immediate 16 bit encoding (such as for MOVZ, MOVN, MOVK).
+static bool isValidUimm16(ssize_t value)
+{
+ return (0 <= value) && (value <= 0xFFFFLL);
+};
- // Returns true if 'value' is a legal signed immediate 26 bit encoding (such as for B or BL).
- static bool isValidSimm26(ssize_t value)
- { return (-0x2000000LL <= value) && (value <= 0x1FFFFFFLL); };
+// Returns true if 'value' is a legal signed immediate 26 bit encoding (such as for B or BL).
+static bool isValidSimm26(ssize_t value)
+{
+ return (-0x2000000LL <= value) && (value <= 0x1FFFFFFLL);
+};
- // Returns true if 'value' is a legal signed immediate 19 bit encoding (such as for B.cond, CBNZ, CBZ).
- static bool isValidSimm19(ssize_t value)
- { return (-0x40000LL <= value) && (value <= 0x3FFFFLL); };
+// Returns true if 'value' is a legal signed immediate 19 bit encoding (such as for B.cond, CBNZ, CBZ).
+static bool isValidSimm19(ssize_t value)
+{
+ return (-0x40000LL <= value) && (value <= 0x3FFFFLL);
+};
- // Returns true if 'value' is a legal signed immediate 14 bit encoding (such as for TBNZ, TBZ).
- static bool isValidSimm14(ssize_t value)
- { return (-0x2000LL <= value) && (value <= 0x1FFFLL); };
+// Returns true if 'value' is a legal signed immediate 14 bit encoding (such as for TBNZ, TBZ).
+static bool isValidSimm14(ssize_t value)
+{
+ return (-0x2000LL <= value) && (value <= 0x1FFFLL);
+};
- // Returns true if 'value' represents a valid 'bitmask immediate' encoding.
- static bool isValidImmNRS (size_t value, emitAttr size)
- { return (value >= 0) && (value < 0x2000); } // any unsigned 13-bit immediate
+// Returns true if 'value' represents a valid 'bitmask immediate' encoding.
+static bool isValidImmNRS(size_t value, emitAttr size)
+{
+ return (value >= 0) && (value < 0x2000);
+} // any unsigned 13-bit immediate
- // Returns true if 'value' represents a valid 'halfword immediate' encoding.
- static bool isValidImmHWVal (size_t value, emitAttr size)
- { return (value >= 0) && (value < 0x40000); } // any unsigned 18-bit immediate
+// Returns true if 'value' represents a valid 'halfword immediate' encoding.
+static bool isValidImmHWVal(size_t value, emitAttr size)
+{
+ return (value >= 0) && (value < 0x40000);
+} // any unsigned 18-bit immediate
- // Returns true if 'value' represents a valid 'byteShifted immediate' encoding.
- static bool isValidImmBSVal (size_t value, emitAttr size)
- { return (value >= 0) && (value < 0x800); } // any unsigned 11-bit immediate
+// Returns true if 'value' represents a valid 'byteShifted immediate' encoding.
+static bool isValidImmBSVal(size_t value, emitAttr size)
+{
+ return (value >= 0) && (value < 0x800);
+} // any unsigned 11-bit immediate
- // The return value replaces REG_ZR with REG_SP
- static regNumber encodingZRtoSP(regNumber reg)
- { return (reg == REG_ZR) ? REG_SP : reg; } // ZR (R31) encodes the SP register
+// The return value replaces REG_ZR with REG_SP
+static regNumber encodingZRtoSP(regNumber reg)
+{
+ return (reg == REG_ZR) ? REG_SP : reg;
+} // ZR (R31) encodes the SP register
- // The return value replaces REG_SP with REG_ZR
- static regNumber encodingSPtoZR(regNumber reg)
- { return (reg == REG_SP) ? REG_ZR : reg; } // SP is encoded using ZR (R31)
+// The return value replaces REG_SP with REG_ZR
+static regNumber encodingSPtoZR(regNumber reg)
+{
+ return (reg == REG_SP) ? REG_ZR : reg;
+} // SP is encoded using ZR (R31)
- // For the given 'ins' returns the reverse instruction, if one exists, otherwise returns INS_INVALID
- static instruction insReverse(instruction ins);
+// For the given 'ins' returns the reverse instruction, if one exists, otherwise returns INS_INVALID
+static instruction insReverse(instruction ins);
- // For the given 'datasize' and 'elemsize' returns the insOpts that specifies the vector register arrangement
- static insOpts optMakeArrangement(emitAttr datasize, emitAttr elemsize);
+// For the given 'datasize' and 'elemsize' returns the insOpts that specifies the vector register arrangement
+static insOpts optMakeArrangement(emitAttr datasize, emitAttr elemsize);
- // For the given 'datasize' and 'opt' returns true if it specifies a valid vector register arrangement
- static bool isValidArrangement(emitAttr datasize, insOpts opt);
+// For the given 'datasize' and 'opt' returns true if it specifies a valid vector register arrangement
+static bool isValidArrangement(emitAttr datasize, insOpts opt);
- // For the given 'arrangement' returns the 'datasize' specified by the vector register arrangement
- static emitAttr optGetDatasize(insOpts arrangement);
+// For the given 'arrangement' returns the 'datasize' specified by the vector register arrangement
+static emitAttr optGetDatasize(insOpts arrangement);
- // For the given 'arrangement' returns the 'elemsize' specified by the vector register arrangement
- static emitAttr optGetElemsize(insOpts arrangement);
+// For the given 'arrangement' returns the 'elemsize' specified by the vector register arrangement
+static emitAttr optGetElemsize(insOpts arrangement);
- // For the given 'arrangement' returns the 'widen-arrangement' specified by the vector register arrangement
- static insOpts optWidenElemsize(insOpts arrangement);
+// For the given 'arrangement' returns the 'widen-arrangement' specified by the vector register arrangement
+static insOpts optWidenElemsize(insOpts arrangement);
- // For the given 'conversion' returns the 'dstsize' specified by the conversion option
- static emitAttr optGetDstsize(insOpts conversion);
+// For the given 'conversion' returns the 'dstsize' specified by the conversion option
+static emitAttr optGetDstsize(insOpts conversion);
- // For the given 'conversion' returns the 'srcsize' specified by the conversion option
- static emitAttr optGetSrcsize(insOpts conversion);
+// For the given 'conversion' returns the 'srcsize' specified by the conversion option
+static emitAttr optGetSrcsize(insOpts conversion);
- // For the given 'datasize', 'elemsize' and 'index' returns true, if it specifies a valid 'index'
- // for an element of size 'elemsize' in a vector register of size 'datasize'
- static bool isValidVectorIndex(emitAttr datasize, emitAttr elemsize, ssize_t index);
+// For the given 'datasize', 'elemsize' and 'index' returns true, if it specifies a valid 'index'
+// for an element of size 'elemsize' in a vector register of size 'datasize'
+static bool isValidVectorIndex(emitAttr datasize, emitAttr elemsize, ssize_t index);
- /************************************************************************/
- /* Public inline informational methods */
- /************************************************************************/
+/************************************************************************/
+/* Public inline informational methods */
+/************************************************************************/
public:
+// true if this 'imm' can be encoded as a input operand to a mov instruction
+static bool emitIns_valid_imm_for_mov(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as a input operand to a mov instruction
- static bool emitIns_valid_imm_for_mov(INT64 imm, emitAttr size);
+// true if this 'imm' can be encoded as a input operand to a vector movi instruction
+static bool emitIns_valid_imm_for_movi(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as a input operand to a vector movi instruction
- static bool emitIns_valid_imm_for_movi(INT64 imm, emitAttr size);
+// true if this 'immDbl' can be encoded as a input operand to a fmov instruction
+static bool emitIns_valid_imm_for_fmov(double immDbl);
- // true if this 'immDbl' can be encoded as a input operand to a fmov instruction
- static bool emitIns_valid_imm_for_fmov(double immDbl);
+// true if this 'imm' can be encoded as a input operand to an add instruction
+static bool emitIns_valid_imm_for_add(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as a input operand to an add instruction
- static bool emitIns_valid_imm_for_add(INT64 imm, emitAttr size);
+// true if this 'imm' can be encoded as a input operand to a cmp instruction
+static bool emitIns_valid_imm_for_cmp(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as a input operand to a cmp instruction
- static bool emitIns_valid_imm_for_cmp(INT64 imm, emitAttr size);
+// true if this 'imm' can be encoded as a input operand to an alu instruction
+static bool emitIns_valid_imm_for_alu(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as a input operand to an alu instruction
- static bool emitIns_valid_imm_for_alu(INT64 imm, emitAttr size);
+// true if this 'imm' can be encoded as the offset in a ldr/str instruction
+static bool emitIns_valid_imm_for_ldst_offset(INT64 imm, emitAttr size);
- // true if this 'imm' can be encoded as the offset in a ldr/str instruction
- static bool emitIns_valid_imm_for_ldst_offset(INT64 imm, emitAttr size);
+// true if 'imm' can use the left shifted by 12 bits encoding
+static bool canEncodeWithShiftImmBy12(INT64 imm);
- // true if 'imm' can use the left shifted by 12 bits encoding
- static bool canEncodeWithShiftImmBy12(INT64 imm);
+// Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
+static INT64 normalizeImm64(INT64 imm, emitAttr size);
- // Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
- static INT64 normalizeImm64(INT64 imm, emitAttr size);
+// Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
+static INT32 normalizeImm32(INT32 imm, emitAttr size);
- // Normalize the 'imm' so that the upper bits, as defined by 'size' are zero
- static INT32 normalizeImm32(INT32 imm, emitAttr size);
+// true if 'imm' can be encoded using a 'bitmask immediate', also returns the encoding if wbBMI is non-null
+static bool canEncodeBitMaskImm(INT64 imm, emitAttr size, emitter::bitMaskImm* wbBMI = nullptr);
- // true if 'imm' can be encoded using a 'bitmask immediate', also returns the encoding if wbBMI is non-null
- static bool canEncodeBitMaskImm(INT64 imm, emitAttr size,
- emitter::bitMaskImm* wbBMI = nullptr);
+// true if 'imm' can be encoded using a 'halfword immediate', also returns the encoding if wbHWI is non-null
+static bool canEncodeHalfwordImm(INT64 imm, emitAttr size, emitter::halfwordImm* wbHWI = nullptr);
- // true if 'imm' can be encoded using a 'halfword immediate', also returns the encoding if wbHWI is non-null
- static bool canEncodeHalfwordImm(INT64 imm, emitAttr size,
- emitter::halfwordImm* wbHWI = nullptr);
+// true if 'imm' can be encoded using a 'byteShifted immediate', also returns the encoding if wbBSI is non-null
+static bool canEncodeByteShiftedImm(INT64 imm, emitAttr size, bool allow_MSL, emitter::byteShiftedImm* wbBSI = nullptr);
- // true if 'imm' can be encoded using a 'byteShifted immediate', also returns the encoding if wbBSI is non-null
- static bool canEncodeByteShiftedImm(INT64 imm, emitAttr size, bool allow_MSL,
- emitter::byteShiftedImm* wbBSI = nullptr);
+// true if 'immDbl' can be encoded using a 'float immediate', also returns the encoding if wbFPI is non-null
+static bool canEncodeFloatImm8(double immDbl, emitter::floatImm8* wbFPI = nullptr);
- // true if 'immDbl' can be encoded using a 'float immediate', also returns the encoding if wbFPI is non-null
- static bool canEncodeFloatImm8(double immDbl,
- emitter::floatImm8* wbFPI = nullptr);
+// Returns the number of bits used by the given 'size'.
+inline static unsigned getBitWidth(emitAttr size)
+{
+ assert(size <= EA_8BYTE);
+ return (unsigned)size * BITS_PER_BYTE;
+}
- // Returns the number of bits used by the given 'size'.
- inline static unsigned getBitWidth (emitAttr size)
- { assert(size <= EA_8BYTE); return (unsigned)size * BITS_PER_BYTE; }
+// Returns true if the imm represents a valid bit shift or bit position for the given 'size' [0..31] or [0..63]
+inline static unsigned isValidImmShift(ssize_t imm, emitAttr size)
+{
+ return (imm >= 0) && (imm < getBitWidth(size));
+}
- // Returns true if the imm represents a valid bit shift or bit position for the given 'size' [0..31] or [0..63]
- inline static unsigned isValidImmShift (ssize_t imm, emitAttr size)
- { return (imm >= 0) && (imm < getBitWidth(size)); }
+inline static bool isValidGeneralDatasize(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE);
+}
- inline static bool isValidGeneralDatasize (emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE); }
+inline static bool isValidScalarDatasize(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE);
+}
- inline static bool isValidScalarDatasize (emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE); }
+inline static bool isValidVectorDatasize(emitAttr size)
+{
+ return (size == EA_16BYTE) || (size == EA_8BYTE);
+}
- inline static bool isValidVectorDatasize (emitAttr size)
- { return (size == EA_16BYTE) || (size == EA_8BYTE); }
+inline static bool isValidGeneralLSDatasize(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE) || (size == EA_2BYTE) || (size == EA_1BYTE);
+}
- inline static bool isValidGeneralLSDatasize (emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE) ||
- (size == EA_2BYTE) || (size == EA_1BYTE); }
+inline static bool isValidVectorLSDatasize(emitAttr size)
+{
+ return (size == EA_16BYTE) || (size == EA_8BYTE) || (size == EA_4BYTE) || (size == EA_2BYTE) || (size == EA_1BYTE);
+}
- inline static bool isValidVectorLSDatasize (emitAttr size)
- { return (size == EA_16BYTE) ||
- (size == EA_8BYTE) || (size == EA_4BYTE) ||
- (size == EA_2BYTE) || (size == EA_1BYTE); }
+inline static bool isValidVectorLSPDatasize(emitAttr size)
+{
+ return (size == EA_16BYTE) || (size == EA_8BYTE) || (size == EA_4BYTE);
+}
- inline static bool isValidVectorLSPDatasize (emitAttr size)
- { return (size == EA_16BYTE) ||
- (size == EA_8BYTE) || (size == EA_4BYTE); }
+inline static bool isValidVectorElemsize(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE) || (size == EA_2BYTE) || (size == EA_1BYTE);
+}
- inline static bool isValidVectorElemsize (emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE) ||
- (size == EA_2BYTE) || (size == EA_1BYTE); }
+inline static bool isValidVectorFcvtsize(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE) || (size == EA_2BYTE);
+}
- inline static bool isValidVectorFcvtsize(emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE) ||
- (size == EA_2BYTE); }
+inline static bool isValidVectorElemsizeFloat(emitAttr size)
+{
+ return (size == EA_8BYTE) || (size == EA_4BYTE);
+}
- inline static bool isValidVectorElemsizeFloat (emitAttr size)
- { return (size == EA_8BYTE) || (size == EA_4BYTE); }
+inline static bool isGeneralRegister(regNumber reg)
+{
+ return (reg >= REG_INT_FIRST) && (reg <= REG_LR);
+} // Excludes REG_ZR
- inline static bool isGeneralRegister (regNumber reg)
- { return (reg >= REG_INT_FIRST) && (reg <= REG_LR); } // Excludes REG_ZR
+inline static bool isGeneralRegisterOrZR(regNumber reg)
+{
+ return (reg >= REG_INT_FIRST) && (reg <= REG_ZR);
+} // Includes REG_ZR
- inline static bool isGeneralRegisterOrZR (regNumber reg)
- { return (reg >= REG_INT_FIRST) && (reg <= REG_ZR); } // Includes REG_ZR
+inline static bool isGeneralRegisterOrSP(regNumber reg)
+{
+ return isGeneralRegister(reg) || (reg == REG_SP);
+} // Includes REG_SP, Excludes REG_ZR
- inline static bool isGeneralRegisterOrSP (regNumber reg)
- { return isGeneralRegister(reg) || (reg == REG_SP); } // Includes REG_SP, Excludes REG_ZR
+inline static bool isVectorRegister(regNumber reg)
+{
+ return (reg >= REG_FP_FIRST && reg <= REG_FP_LAST);
+}
- inline static bool isVectorRegister (regNumber reg)
- { return (reg >= REG_FP_FIRST && reg <= REG_FP_LAST); }
+inline static bool isFloatReg(regNumber reg)
+{
+ return isVectorRegister(reg);
+}
- inline static bool isFloatReg (regNumber reg)
- { return isVectorRegister(reg); }
-
- inline static bool insOptsNone (insOpts opt)
- { return (opt == INS_OPTS_NONE); }
+inline static bool insOptsNone(insOpts opt)
+{
+ return (opt == INS_OPTS_NONE);
+}
- inline static bool insOptsIndexed (insOpts opt)
- { return (opt == INS_OPTS_PRE_INDEX) ||
- (opt == INS_OPTS_POST_INDEX); }
+inline static bool insOptsIndexed(insOpts opt)
+{
+ return (opt == INS_OPTS_PRE_INDEX) || (opt == INS_OPTS_POST_INDEX);
+}
- inline static bool insOptsPreIndex (insOpts opt)
- { return (opt == INS_OPTS_PRE_INDEX); }
+inline static bool insOptsPreIndex(insOpts opt)
+{
+ return (opt == INS_OPTS_PRE_INDEX);
+}
- inline static bool insOptsPostIndex (insOpts opt)
- { return (opt == INS_OPTS_POST_INDEX); }
+inline static bool insOptsPostIndex(insOpts opt)
+{
+ return (opt == INS_OPTS_POST_INDEX);
+}
- inline static bool insOptsLSL12 (insOpts opt) // special 12-bit shift only used for imm12
- { return (opt == INS_OPTS_LSL12); }
+inline static bool insOptsLSL12(insOpts opt) // special 12-bit shift only used for imm12
+{
+ return (opt == INS_OPTS_LSL12);
+}
- inline static bool insOptsAnyShift (insOpts opt)
- { return ((opt >= INS_OPTS_LSL) &&
- (opt <= INS_OPTS_ROR) ); }
+inline static bool insOptsAnyShift(insOpts opt)
+{
+ return ((opt >= INS_OPTS_LSL) && (opt <= INS_OPTS_ROR));
+}
- inline static bool insOptsAluShift (insOpts opt) // excludes ROR
- { return ((opt >= INS_OPTS_LSL) &&
- (opt <= INS_OPTS_ASR) ); }
+inline static bool insOptsAluShift(insOpts opt) // excludes ROR
+{
+ return ((opt >= INS_OPTS_LSL) && (opt <= INS_OPTS_ASR));
+}
- inline static bool insOptsVectorImmShift (insOpts opt)
- { return ((opt == INS_OPTS_LSL) ||
- (opt == INS_OPTS_MSL) ); }
+inline static bool insOptsVectorImmShift(insOpts opt)
+{
+ return ((opt == INS_OPTS_LSL) || (opt == INS_OPTS_MSL));
+}
- inline static bool insOptsLSL (insOpts opt)
- { return (opt == INS_OPTS_LSL); }
+inline static bool insOptsLSL(insOpts opt)
+{
+ return (opt == INS_OPTS_LSL);
+}
- inline static bool insOptsLSR (insOpts opt)
- { return (opt == INS_OPTS_LSR); }
+inline static bool insOptsLSR(insOpts opt)
+{
+ return (opt == INS_OPTS_LSR);
+}
- inline static bool insOptsASR (insOpts opt)
- { return (opt == INS_OPTS_ASR); }
+inline static bool insOptsASR(insOpts opt)
+{
+ return (opt == INS_OPTS_ASR);
+}
- inline static bool insOptsROR (insOpts opt)
- { return (opt == INS_OPTS_ROR); }
+inline static bool insOptsROR(insOpts opt)
+{
+ return (opt == INS_OPTS_ROR);
+}
- inline static bool insOptsAnyExtend (insOpts opt)
- { return ((opt >= INS_OPTS_UXTB) &&
- (opt <= INS_OPTS_SXTX) ); }
+inline static bool insOptsAnyExtend(insOpts opt)
+{
+ return ((opt >= INS_OPTS_UXTB) && (opt <= INS_OPTS_SXTX));
+}
- inline static bool insOptsLSExtend (insOpts opt)
- { return ((opt == INS_OPTS_NONE) || (opt == INS_OPTS_LSL) ||
- (opt == INS_OPTS_UXTW) || (opt == INS_OPTS_SXTW) ||
- (opt == INS_OPTS_UXTX) || (opt == INS_OPTS_SXTX) ); }
+inline static bool insOptsLSExtend(insOpts opt)
+{
+ return ((opt == INS_OPTS_NONE) || (opt == INS_OPTS_LSL) || (opt == INS_OPTS_UXTW) || (opt == INS_OPTS_SXTW) ||
+ (opt == INS_OPTS_UXTX) || (opt == INS_OPTS_SXTX));
+}
- inline static bool insOpts32BitExtend (insOpts opt)
- { return ((opt == INS_OPTS_UXTW) || (opt == INS_OPTS_SXTW)); }
+inline static bool insOpts32BitExtend(insOpts opt)
+{
+ return ((opt == INS_OPTS_UXTW) || (opt == INS_OPTS_SXTW));
+}
- inline static bool insOpts64BitExtend (insOpts opt)
- { return ((opt == INS_OPTS_UXTX) || (opt == INS_OPTS_SXTX)); }
+inline static bool insOpts64BitExtend(insOpts opt)
+{
+ return ((opt == INS_OPTS_UXTX) || (opt == INS_OPTS_SXTX));
+}
- inline static bool insOptsAnyArrangement(insOpts opt)
- { return ((opt >= INS_OPTS_8B) && (opt <= INS_OPTS_2D)); }
+inline static bool insOptsAnyArrangement(insOpts opt)
+{
+ return ((opt >= INS_OPTS_8B) && (opt <= INS_OPTS_2D));
+}
- inline static bool insOptsConvertFloatToFloat (insOpts opt)
- { return ((opt >= INS_OPTS_S_TO_D) && (opt <= INS_OPTS_D_TO_H)); }
+inline static bool insOptsConvertFloatToFloat(insOpts opt)
+{
+ return ((opt >= INS_OPTS_S_TO_D) && (opt <= INS_OPTS_D_TO_H));
+}
- inline static bool insOptsConvertFloatToInt (insOpts opt)
- { return ((opt >= INS_OPTS_S_TO_4BYTE) && (opt <= INS_OPTS_D_TO_8BYTE)); }
+inline static bool insOptsConvertFloatToInt(insOpts opt)
+{
+ return ((opt >= INS_OPTS_S_TO_4BYTE) && (opt <= INS_OPTS_D_TO_8BYTE));
+}
- inline static bool insOptsConvertIntToFloat (insOpts opt)
- { return ((opt >= INS_OPTS_4BYTE_TO_S) && (opt <= INS_OPTS_8BYTE_TO_D)); }
+inline static bool insOptsConvertIntToFloat(insOpts opt)
+{
+ return ((opt >= INS_OPTS_4BYTE_TO_S) && (opt <= INS_OPTS_8BYTE_TO_D));
+}
- static bool isValidImmCond (ssize_t imm);
- static bool isValidImmCondFlags (ssize_t imm);
- static bool isValidImmCondFlagsImm5 (ssize_t imm);
+static bool isValidImmCond(ssize_t imm);
+static bool isValidImmCondFlags(ssize_t imm);
+static bool isValidImmCondFlagsImm5(ssize_t imm);
- /************************************************************************/
- /* The public entry points to output instructions */
- /************************************************************************/
+/************************************************************************/
+/* The public entry points to output instructions */
+/************************************************************************/
public:
+void emitIns(instruction ins);
- void emitIns (instruction ins);
-
- void emitIns_I (instruction ins,
- emitAttr attr,
- ssize_t imm);
-
- void emitIns_R (instruction ins,
- emitAttr attr,
- regNumber reg);
-
- void emitIns_R_I (instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t imm,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_F (instruction ins,
- emitAttr attr,
- regNumber reg,
- double immDbl,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insFlags flags)
- {
- emitIns_R_R(ins,attr,reg1,reg2);
- }
-
- void emitIns_R_I_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- ssize_t imm1,
- ssize_t imm2,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- ssize_t imm,
- insOpts opt = INS_OPTS_NONE);
-
- // Checks for a large immediate that needs a second instruction
- void emitIns_R_R_Imm(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- ssize_t imm);
-
- void emitIns_R_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R_R_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- ssize_t imm,
- insOpts opt = INS_OPTS_NONE);
-
- void emitIns_R_R_R_Ext(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insOpts opt = INS_OPTS_NONE,
- int shiftAmount = -1);
-
- void emitIns_R_R_I_I(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int imm1,
- int imm2);
-
- void emitIns_R_R_R_R(instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- regNumber reg4);
-
- void emitIns_R_COND (instruction ins,
- emitAttr attr,
- regNumber reg,
- insCond cond);
-
- void emitIns_R_R_COND (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insCond cond);
-
- void emitIns_R_R_R_COND (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- insCond cond);
-
- void emitIns_R_R_FLAGS_COND (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- insCflags flags,
- insCond cond);
-
- void emitIns_R_I_FLAGS_COND (instruction ins,
- emitAttr attr,
- regNumber reg1,
- int imm,
- insCflags flags,
- insCond cond);
-
- void emitIns_BARR (instruction ins,
- insBarrier barrier);
-
- void emitIns_C (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- int offs);
-
- void emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs);
-
- void emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val);
-
- void emitIns_R_C (instruction ins,
- emitAttr attr,
- regNumber reg,
- regNumber tmpReg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs);
-
- void emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs);
-
- void emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- ssize_t offs,
- ssize_t val);
-
- void emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg);
-
- void emitIns_R_D (instruction ins,
- emitAttr attr,
- unsigned offs,
- regNumber reg);
-
- void emitIns_J_R (instruction ins,
- emitAttr attr,
- BasicBlock *dst,
- regNumber reg);
-
- void emitIns_I_AR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_AI (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp);
-
- void emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_ARR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp);
-
- enum EmitCallType
- {
+void emitIns_I(instruction ins, emitAttr attr, ssize_t imm);
+
+void emitIns_R(instruction ins, emitAttr attr, regNumber reg);
+
+void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_F(instruction ins, emitAttr attr, regNumber reg, double immDbl, insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insFlags flags)
+{
+ emitIns_R_R(ins, attr, reg1, reg2);
+}
+
+void emitIns_R_I_I(
+ instruction ins, emitAttr attr, regNumber reg1, ssize_t imm1, ssize_t imm2, insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R_I(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insOpts opt = INS_OPTS_NONE);
+
+// Checks for a large immediate that needs a second instruction
+void emitIns_R_R_Imm(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm);
+
+void emitIns_R_R_R(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R_R_I(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ ssize_t imm,
+ insOpts opt = INS_OPTS_NONE);
+
+void emitIns_R_R_R_Ext(instruction ins,
+ emitAttr attr,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ insOpts opt = INS_OPTS_NONE,
+ int shiftAmount = -1);
+
+void emitIns_R_R_I_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int imm1, int imm2);
+
+void emitIns_R_R_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4);
+
+void emitIns_R_COND(instruction ins, emitAttr attr, regNumber reg, insCond cond);
+
+void emitIns_R_R_COND(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insCond cond);
+
+void emitIns_R_R_R_COND(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insCond cond);
+
+void emitIns_R_R_FLAGS_COND(
+ instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insCflags flags, insCond cond);
+
+void emitIns_R_I_FLAGS_COND(instruction ins, emitAttr attr, regNumber reg1, int imm, insCflags flags, insCond cond);
+
+void emitIns_BARR(instruction ins, insBarrier barrier);
+
+void emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, int offs);
+
+void emitIns_S(instruction ins, emitAttr attr, int varx, int offs);
+
+void emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val);
+
+void emitIns_R_C(
+ instruction ins, emitAttr attr, regNumber reg, regNumber tmpReg, CORINFO_FIELD_HANDLE fldHnd, int offs);
+
+void emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs);
+
+void emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, ssize_t offs, ssize_t val);
+
+void emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg);
+
+void emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumber reg);
+
+void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg);
+
+void emitIns_I_AR(
+ instruction ins, emitAttr attr, int val, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_AR(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp);
+
+void emitIns_AR_R(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs, int memCookie = 0, void* clsCookie = NULL);
+
+void emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp);
+
+enum EmitCallType
+{
// I have included here, but commented out, all the values used by the x86 emitter.
// However, ARM has a much reduced instruction set, and so the ARM emitter only
@@ -869,66 +804,64 @@ public:
// and know why they are unavailible on ARM), while making it easier to stay
// in-sync with x86 and possibly add them back in if needed.
- EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
- // EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
- EC_FUNC_ADDR, // Direct call to an absolute address
+ EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
+ // EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
+ EC_FUNC_ADDR, // Direct call to an absolute address
// EC_FUNC_VIRTUAL, // Call to a virtual method (using the vtable)
- EC_INDIR_R, // Indirect call via register
- // EC_INDIR_SR, // Indirect call via stack-reference (local var)
- // EC_INDIR_C, // Indirect call via static class var
- // EC_INDIR_ARD, // Indirect call via an addressing mode
-
- EC_COUNT
- };
-
- void emitIns_Call (EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize,
- emitAttr secondRetSize,
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET,
- regNumber ireg = REG_NA,
- regNumber xreg = REG_NA,
- unsigned xmul = 0,
- ssize_t disp = 0,
- bool isJump = false,
- bool isNoGC = false,
- bool isProfLeaveCB = false);
-
- BYTE* emitOutputLJ (insGroup *ig, BYTE *dst, instrDesc *i);
- unsigned emitOutputCall(insGroup *ig, BYTE *dst, instrDesc *i, code_t code);
- BYTE* emitOutputLoadLabel(BYTE* dst, BYTE* srcAddr, BYTE* dstAddr, instrDescJmp* id);
- BYTE* emitOutputShortBranch(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id);
- BYTE* emitOutputShortAddress(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg);
- BYTE* emitOutputShortConstant(BYTE *dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg, emitAttr opSize);
+ EC_INDIR_R, // Indirect call via register
+ // EC_INDIR_SR, // Indirect call via stack-reference (local var)
+ // EC_INDIR_C, // Indirect call via static class var
+ // EC_INDIR_ARD, // Indirect call via an addressing mode
+
+ EC_COUNT
+};
+
+void emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize,
+ emitAttr secondRetSize,
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET,
+ regNumber ireg = REG_NA,
+ regNumber xreg = REG_NA,
+ unsigned xmul = 0,
+ ssize_t disp = 0,
+ bool isJump = false,
+ bool isNoGC = false,
+ bool isProfLeaveCB = false);
+
+BYTE* emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i);
+unsigned emitOutputCall(insGroup* ig, BYTE* dst, instrDesc* i, code_t code);
+BYTE* emitOutputLoadLabel(BYTE* dst, BYTE* srcAddr, BYTE* dstAddr, instrDescJmp* id);
+BYTE* emitOutputShortBranch(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, instrDescJmp* id);
+BYTE* emitOutputShortAddress(BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg);
+BYTE* emitOutputShortConstant(
+ BYTE* dst, instruction ins, insFormat fmt, ssize_t distVal, regNumber reg, emitAttr opSize);
/*****************************************************************************
*
* Given an instrDesc, return true if it's a conditional jump.
*/
-inline bool emitIsCondJump(instrDesc *jmp)
+inline bool emitIsCondJump(instrDesc* jmp)
{
- return ((jmp->idInsFmt() == IF_BI_0B) ||
- (jmp->idInsFmt() == IF_LARGEJMP));
+ return ((jmp->idInsFmt() == IF_BI_0B) || (jmp->idInsFmt() == IF_LARGEJMP));
}
-
/*****************************************************************************
*
* Given an instrDesc, return true if it's a compare and jump.
*/
-inline bool emitIsCmpJump(instrDesc *jmp)
+inline bool emitIsCmpJump(instrDesc* jmp)
{
- return ((jmp->idInsFmt() == IF_BI_1A) ||
- (jmp->idInsFmt() == IF_BI_1B));
+ return ((jmp->idInsFmt() == IF_BI_1A) || (jmp->idInsFmt() == IF_BI_1B));
}
/*****************************************************************************
@@ -936,7 +869,7 @@ inline bool emitIsCmpJump(instrDesc *jmp)
* Given a instrDesc, return true if it's an unconditional jump.
*/
-inline bool emitIsUncondJump(instrDesc *jmp)
+inline bool emitIsUncondJump(instrDesc* jmp)
{
return (jmp->idInsFmt() == IF_BI_0A);
}
@@ -946,7 +879,7 @@ inline bool emitIsUncondJump(instrDesc *jmp)
* Given a instrDesc, return true if it's a direct call.
*/
-inline bool emitIsDirectCall(instrDesc *call)
+inline bool emitIsDirectCall(instrDesc* call)
{
return (call->idInsFmt() == IF_BI_0C);
}
@@ -956,7 +889,7 @@ inline bool emitIsDirectCall(instrDesc *call)
* Given a instrDesc, return true if it's a load label instruction.
*/
-inline bool emitIsLoadLabel(instrDesc *jmp)
+inline bool emitIsLoadLabel(instrDesc* jmp)
{
return ((jmp->idInsFmt() == IF_DI_1E) || // adr or arp
(jmp->idInsFmt() == IF_LARGEADR));
@@ -967,7 +900,7 @@ inline bool emitIsLoadLabel(instrDesc *jmp)
* Given a instrDesc, return true if it's a load constant instruction.
*/
-inline bool emitIsLoadConstant(instrDesc *jmp)
+inline bool emitIsLoadConstant(instrDesc* jmp)
{
return ((jmp->idInsFmt() == IF_LS_1A) || // ldr
(jmp->idInsFmt() == IF_LARGELDC));
diff --git a/src/jit/emitdef.h b/src/jit/emitdef.h
index df39951884..f7f9325b79 100644
--- a/src/jit/emitdef.h
+++ b/src/jit/emitdef.h
@@ -14,9 +14,9 @@
#elif defined(_TARGET_ARM64_)
#include "emitarm64.h"
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
/*****************************************************************************/
-#endif//_EMITDEF_H_
+#endif //_EMITDEF_H_
/*****************************************************************************/
diff --git a/src/jit/emitfmts.h b/src/jit/emitfmts.h
index 246a2a4efc..587033f2e9 100644
--- a/src/jit/emitfmts.h
+++ b/src/jit/emitfmts.h
@@ -10,5 +10,5 @@
#elif defined(_TARGET_ARM64_)
#include "emitfmtsarm64.h"
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/jit/emitfmtsarm64.h b/src/jit/emitfmtsarm64.h
index 3ea1168915..c4be8ae45a 100644
--- a/src/jit/emitfmtsarm64.h
+++ b/src/jit/emitfmtsarm64.h
@@ -3,23 +3,23 @@
// See the LICENSE file in the project root for more information.
//////////////////////////////////////////////////////////////////////////////
-//clang-format off
+// clang-format off
#if !defined(_TARGET_ARM64_)
- #error Unexpected target type
+#error Unexpected target type
#endif
-#ifdef DEFINE_ID_OPS
+#ifdef DEFINE_ID_OPS
//////////////////////////////////////////////////////////////////////////////
-#undef DEFINE_ID_OPS
+#undef DEFINE_ID_OPS
-enum ID_OPS
+enum ID_OPS
{
- ID_OP_NONE, // no additional arguments
- ID_OP_SCNS, // small const operand (21-bits or less, no reloc)
- ID_OP_JMP, // local jump
- ID_OP_CALL, // method call
- ID_OP_SPEC, // special handling required
+ ID_OP_NONE, // no additional arguments
+ ID_OP_SCNS, // small const operand (21-bits or less, no reloc)
+ ID_OP_JMP, // local jump
+ ID_OP_CALL, // method call
+ ID_OP_SPEC, // special handling required
};
//////////////////////////////////////////////////////////////////////////////
@@ -27,60 +27,60 @@ enum ID_OPS
//////////////////////////////////////////////////////////////////////////////
#ifndef IF_DEF
-#error Must define IF_DEF macro before including this file
+#error Must define IF_DEF macro before including this file
#endif
//////////////////////////////////////////////////////////////////////////////
//
-// enum insFormat instruction enum ID_OPS
-// scheduling
-// (unused)
+// enum insFormat instruction enum ID_OPS
+// scheduling
+// (unused)
//////////////////////////////////////////////////////////////////////////////
-IF_DEF(NONE, IS_NONE, NONE) //
+IF_DEF(NONE, IS_NONE, NONE) //
-IF_DEF(LABEL, IS_NONE, JMP ) // label
-IF_DEF(LARGEJMP, IS_NONE, JMP) // large conditional branch pseudo-op (cond branch + uncond branch)
-IF_DEF(LARGEADR, IS_NONE, JMP) // large address pseudo-op (adrp + add)
-IF_DEF(LARGELDC, IS_NONE, JMP) // large constant pseudo-op (adrp + ldr)
+IF_DEF(LABEL, IS_NONE, JMP) // label
+IF_DEF(LARGEJMP, IS_NONE, JMP) // large conditional branch pseudo-op (cond branch + uncond branch)
+IF_DEF(LARGEADR, IS_NONE, JMP) // large address pseudo-op (adrp + add)
+IF_DEF(LARGELDC, IS_NONE, JMP) // large constant pseudo-op (adrp + ldr)
/////////////////////////////////////////////////////////////////////////////////////////////////////////
-IF_DEF(EN9, IS_NONE, NONE) // Instruction has 9 possible encoding types
-IF_DEF(EN6A, IS_NONE, NONE) // Instruction has 6 possible encoding types, type A
-IF_DEF(EN5A, IS_NONE, NONE) // Instruction has 5 possible encoding types, type A
-IF_DEF(EN5B, IS_NONE, NONE) // Instruction has 5 possible encoding types, type B
-IF_DEF(EN5C, IS_NONE, NONE) // Instruction has 5 possible encoding types, type C
-IF_DEF(EN4A, IS_NONE, NONE) // Instruction has 4 possible encoding types, type A
-IF_DEF(EN4B, IS_NONE, NONE) // Instruction has 4 possible encoding types, type B
-IF_DEF(EN4C, IS_NONE, NONE) // Instruction has 4 possible encoding types, type C
-IF_DEF(EN4D, IS_NONE, NONE) // Instruction has 4 possible encoding types, type D
-IF_DEF(EN4E, IS_NONE, NONE) // Instruction has 4 possible encoding types, type E
-IF_DEF(EN4F, IS_NONE, NONE) // Instruction has 4 possible encoding types, type F
-IF_DEF(EN4G, IS_NONE, NONE) // Instruction has 4 possible encoding types, type G
-IF_DEF(EN3A, IS_NONE, NONE) // Instruction has 3 possible encoding types, type A
-IF_DEF(EN3B, IS_NONE, NONE) // Instruction has 3 possible encoding types, type B
-IF_DEF(EN3C, IS_NONE, NONE) // Instruction has 3 possible encoding types, type C
-IF_DEF(EN3D, IS_NONE, NONE) // Instruction has 3 possible encoding types, type D
-IF_DEF(EN3E, IS_NONE, NONE) // Instruction has 3 possible encoding types, type E
-IF_DEF(EN3F, IS_NONE, NONE) // Instruction has 3 possible encoding types, type F
-IF_DEF(EN3G, IS_NONE, NONE) // Instruction has 3 possible encoding types, type G
-IF_DEF(EN3H, IS_NONE, NONE) // Instruction has 3 possible encoding types, type H
-IF_DEF(EN3I, IS_NONE, NONE) // Instruction has 3 possible encoding types, type I
-IF_DEF(EN2A, IS_NONE, NONE) // Instruction has 2 possible encoding types, type A
-IF_DEF(EN2B, IS_NONE, NONE) // Instruction has 2 possible encoding types, type B
-IF_DEF(EN2C, IS_NONE, NONE) // Instruction has 2 possible encoding types, type C
-IF_DEF(EN2D, IS_NONE, NONE) // Instruction has 2 possible encoding types, type D
-IF_DEF(EN2E, IS_NONE, NONE) // Instruction has 2 possible encoding types, type E
-IF_DEF(EN2F, IS_NONE, NONE) // Instruction has 2 possible encoding types, type F
-IF_DEF(EN2G, IS_NONE, NONE) // Instruction has 2 possible encoding types, type G
-IF_DEF(EN2H, IS_NONE, NONE) // Instruction has 2 possible encoding types, type H
-IF_DEF(EN2I, IS_NONE, NONE) // Instruction has 2 possible encoding types, type I
-IF_DEF(EN2J, IS_NONE, NONE) // Instruction has 2 possible encoding types, type J
-IF_DEF(EN2K, IS_NONE, NONE) // Instruction has 2 possible encoding types, type K
-IF_DEF(EN2L, IS_NONE, NONE) // Instruction has 2 possible encoding types, type L
-IF_DEF(EN2M, IS_NONE, NONE) // Instruction has 2 possible encoding types, type M
-IF_DEF(EN2N, IS_NONE, NONE) // Instruction has 2 possible encoding types, type N
+IF_DEF(EN9, IS_NONE, NONE) // Instruction has 9 possible encoding types
+IF_DEF(EN6A, IS_NONE, NONE) // Instruction has 6 possible encoding types, type A
+IF_DEF(EN5A, IS_NONE, NONE) // Instruction has 5 possible encoding types, type A
+IF_DEF(EN5B, IS_NONE, NONE) // Instruction has 5 possible encoding types, type B
+IF_DEF(EN5C, IS_NONE, NONE) // Instruction has 5 possible encoding types, type C
+IF_DEF(EN4A, IS_NONE, NONE) // Instruction has 4 possible encoding types, type A
+IF_DEF(EN4B, IS_NONE, NONE) // Instruction has 4 possible encoding types, type B
+IF_DEF(EN4C, IS_NONE, NONE) // Instruction has 4 possible encoding types, type C
+IF_DEF(EN4D, IS_NONE, NONE) // Instruction has 4 possible encoding types, type D
+IF_DEF(EN4E, IS_NONE, NONE) // Instruction has 4 possible encoding types, type E
+IF_DEF(EN4F, IS_NONE, NONE) // Instruction has 4 possible encoding types, type F
+IF_DEF(EN4G, IS_NONE, NONE) // Instruction has 4 possible encoding types, type G
+IF_DEF(EN3A, IS_NONE, NONE) // Instruction has 3 possible encoding types, type A
+IF_DEF(EN3B, IS_NONE, NONE) // Instruction has 3 possible encoding types, type B
+IF_DEF(EN3C, IS_NONE, NONE) // Instruction has 3 possible encoding types, type C
+IF_DEF(EN3D, IS_NONE, NONE) // Instruction has 3 possible encoding types, type D
+IF_DEF(EN3E, IS_NONE, NONE) // Instruction has 3 possible encoding types, type E
+IF_DEF(EN3F, IS_NONE, NONE) // Instruction has 3 possible encoding types, type F
+IF_DEF(EN3G, IS_NONE, NONE) // Instruction has 3 possible encoding types, type G
+IF_DEF(EN3H, IS_NONE, NONE) // Instruction has 3 possible encoding types, type H
+IF_DEF(EN3I, IS_NONE, NONE) // Instruction has 3 possible encoding types, type I
+IF_DEF(EN2A, IS_NONE, NONE) // Instruction has 2 possible encoding types, type A
+IF_DEF(EN2B, IS_NONE, NONE) // Instruction has 2 possible encoding types, type B
+IF_DEF(EN2C, IS_NONE, NONE) // Instruction has 2 possible encoding types, type C
+IF_DEF(EN2D, IS_NONE, NONE) // Instruction has 2 possible encoding types, type D
+IF_DEF(EN2E, IS_NONE, NONE) // Instruction has 2 possible encoding types, type E
+IF_DEF(EN2F, IS_NONE, NONE) // Instruction has 2 possible encoding types, type F
+IF_DEF(EN2G, IS_NONE, NONE) // Instruction has 2 possible encoding types, type G
+IF_DEF(EN2H, IS_NONE, NONE) // Instruction has 2 possible encoding types, type H
+IF_DEF(EN2I, IS_NONE, NONE) // Instruction has 2 possible encoding types, type I
+IF_DEF(EN2J, IS_NONE, NONE) // Instruction has 2 possible encoding types, type J
+IF_DEF(EN2K, IS_NONE, NONE) // Instruction has 2 possible encoding types, type K
+IF_DEF(EN2L, IS_NONE, NONE) // Instruction has 2 possible encoding types, type L
+IF_DEF(EN2M, IS_NONE, NONE) // Instruction has 2 possible encoding types, type M
+IF_DEF(EN2N, IS_NONE, NONE) // Instruction has 2 possible encoding types, type N
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
@@ -92,7 +92,7 @@ IF_DEF(EN2N, IS_NONE, NONE) // Instruction has 2 possib
// (? is a unique letter A,B,C...)
//
// Below (Specifies an exact instruction encoding)
-//
+//
// -- the first two characters are
//
// DI :: Data Processing - Immediate
@@ -102,12 +102,12 @@ IF_DEF(EN2N, IS_NONE, NONE) // Instruction has 2 possib
// BI :: Branches - Immediate
// BR :: Branches - Register
// SN :: System - No Registers or Immediates
-// SI :: System - Immediate
+// SI :: System - Immediate
//
// _ :: a separator char '_'
//
// -- the next two characters are
-//
+//
// # :: number of registers in the encoding
// ? :: A unique letter A,B,C,...
// -- optional third character
@@ -115,90 +115,91 @@ IF_DEF(EN2N, IS_NONE, NONE) // Instruction has 2 possib
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-IF_DEF(BI_0A, IS_NONE, JMP) // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00 b
-IF_DEF(BI_0B, IS_NONE, JMP) // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00 b<cond>
-IF_DEF(BI_0C, IS_NONE, CALL) // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00 bl
-IF_DEF(BI_1A, IS_NONE, JMP) // BI_1A X.......iiiiiiii iiiiiiiiiiittttt Rt simm19:00 cbz cbnz
-IF_DEF(BI_1B, IS_NONE, JMP) // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6 simm14:00 tbz tbnz
-IF_DEF(BR_1A, IS_NONE, CALL) // BR_1A ................ ......nnnnn..... Rn ret
-IF_DEF(BR_1B, IS_NONE, CALL) // BR_1B ................ ......nnnnn..... Rn br blr
-
-IF_DEF(LS_1A, IS_NONE, JMP) // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
-IF_DEF(LS_2A, IS_NONE, NONE) // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
-IF_DEF(LS_2B, IS_NONE, NONE) // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
-IF_DEF(LS_2C, IS_NONE, NONE) // LS_2C .X.......X.iiiii iiiiP.nnnnnttttt Rt Rn imm(-256..+255) pre/post inc
-IF_DEF(LS_3A, IS_NONE, NONE) // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
-IF_DEF(LS_3B, IS_NONE, NONE) // LS_3B X............... .aaaaannnnnddddd Rd Ra Rn
-IF_DEF(LS_3C, IS_NONE, NONE) // LS_3C X.........iiiiii iaaaaannnnnddddd Rd Ra Rn imm(im7,sh)
-
-IF_DEF(DI_1A, IS_NONE, NONE) // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
-IF_DEF(DI_1B, IS_NONE, NONE) // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
-IF_DEF(DI_1C, IS_NONE, NONE) // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
-IF_DEF(DI_1D, IS_NONE, NONE) // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
-IF_DEF(DI_1E, IS_NONE, JMP) // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
-IF_DEF(DI_1F, IS_NONE, NONE) // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
-
-IF_DEF(DI_2A, IS_NONE, NONE) // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
-IF_DEF(DI_2B, IS_NONE, NONE) // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
-IF_DEF(DI_2C, IS_NONE, NONE) // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
-IF_DEF(DI_2D, IS_NONE, NONE) // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
-
-IF_DEF(DR_1D, IS_NONE, NONE) // DR_1D X............... cccc.......ddddd Rd cond
-
-IF_DEF(DR_2A, IS_NONE, NONE) // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
-IF_DEF(DR_2B, IS_NONE, NONE) // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR} imm(0-63)
-IF_DEF(DR_2C, IS_NONE, NONE) // DR_2C X..........mmmmm xxxsssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
-IF_DEF(DR_2D, IS_NONE, NONE) // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
-IF_DEF(DR_2E, IS_NONE, NONE) // DR_2E X..........mmmmm ...........ddddd Rd Rm
-IF_DEF(DR_2F, IS_NONE, NONE) // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
-IF_DEF(DR_2G, IS_NONE, NONE) // DR_2G X............... ......nnnnnddddd Rd Rn
-IF_DEF(DR_2H, IS_NONE, NONE) // DR_2H X........X...... ......nnnnnddddd Rd Rn
-IF_DEF(DR_2I, IS_NONE, NONE) // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
-
-IF_DEF(DR_3A, IS_NONE, NONE) // DR_3A X..........mmmmm ......nnnnnddddd Rd Rn Rm
-IF_DEF(DR_3B, IS_NONE, NONE) // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
-IF_DEF(DR_3C, IS_NONE, NONE) // DR_3C X..........mmmmm xxxsssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
-IF_DEF(DR_3D, IS_NONE, NONE) // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
-IF_DEF(DR_3E, IS_NONE, NONE) // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
-
-IF_DEF(DR_4A, IS_NONE, NONE) // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
-
-IF_DEF(DV_1A, IS_NONE, NONE) // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
-IF_DEF(DV_1B, IS_NONE, NONE) // DV_1B .QX..........iii jjjj..iiiiiddddd Vd imm8 (fmov/movi - immediate vector)
-IF_DEF(DV_1C, IS_NONE, NONE) // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
-
-IF_DEF(DV_2A, IS_NONE, NONE) // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvtXX - vector)
-IF_DEF(DV_2B, IS_NONE, NONE) // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
-IF_DEF(DV_2C, IS_NONE, NONE) // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from general)
-IF_DEF(DV_2D, IS_NONE, NONE) // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
-IF_DEF(DV_2E, IS_NONE, NONE) // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
-IF_DEF(DV_2F, IS_NONE, NONE) // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
-IF_DEF(DV_2G, IS_NONE, NONE) // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
-IF_DEF(DV_2H, IS_NONE, NONE) // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov, fcvtXX - to general)
-IF_DEF(DV_2I, IS_NONE, NONE) // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov, fcvtXX - from general)
-IF_DEF(DV_2J, IS_NONE, NONE) // DV_2J .........d...... D.....nnnnnddddd Vd Vn (fcvt)
-IF_DEF(DV_2K, IS_NONE, NONE) // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
-IF_DEF(DV_2L, IS_NONE, NONE) // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
-IF_DEF(DV_2M, IS_NONE, NONE) // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
-IF_DEF(DV_2N, IS_NONE, NONE) // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
-IF_DEF(DV_2O, IS_NONE, NONE) // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
-
-IF_DEF(DV_3A, IS_NONE, NONE) // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
-IF_DEF(DV_3AI, IS_NONE, NONE) // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
-IF_DEF(DV_3B, IS_NONE, NONE) // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
-IF_DEF(DV_3BI, IS_NONE, NONE) // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
-IF_DEF(DV_3C, IS_NONE, NONE) // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
-IF_DEF(DV_3D, IS_NONE, NONE) // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
-IF_DEF(DV_3DI, IS_NONE, NONE) // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
-IF_DEF(DV_3E, IS_NONE, NONE) // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
-
-IF_DEF(DV_4A, IS_NONE, NONE) // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Vn Vm Va (scalar)
-
-IF_DEF(SN_0A, IS_NONE, NONE) // SN_0A ................ ................
-IF_DEF(SI_0A, IS_NONE, NONE) // SI_0A ...........iiiii iiiiiiiiiii..... imm16
-IF_DEF(SI_0B, IS_NONE, NONE) // SI_0B ................ ....bbbb........ imm4 - barrier
-
-IF_DEF(INVALID, IS_NONE, NONE) //
+IF_DEF(BI_0A, IS_NONE, JMP) // BI_0A ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00 b
+IF_DEF(BI_0B, IS_NONE, JMP) // BI_0B ......iiiiiiiiii iiiiiiiiiii..... simm19:00 b<cond>
+IF_DEF(BI_0C, IS_NONE, CALL) // BI_0C ......iiiiiiiiii iiiiiiiiiiiiiiii simm26:00 bl
+IF_DEF(BI_1A, IS_NONE, JMP) // BI_1A X.......iiiiiiii iiiiiiiiiiittttt Rt simm19:00 cbz cbnz
+IF_DEF(BI_1B, IS_NONE, JMP) // BI_1B B.......bbbbbiii iiiiiiiiiiittttt Rt imm6 simm14:00 tbz tbnz
+IF_DEF(BR_1A, IS_NONE, CALL) // BR_1A ................ ......nnnnn..... Rn ret
+IF_DEF(BR_1B, IS_NONE, CALL) // BR_1B ................ ......nnnnn..... Rn br blr
+
+IF_DEF(LS_1A, IS_NONE, JMP) // LS_1A XX...V..iiiiiiii iiiiiiiiiiittttt Rt PC imm(1MB)
+IF_DEF(LS_2A, IS_NONE, NONE) // LS_2A .X.......X...... ......nnnnnttttt Rt Rn
+IF_DEF(LS_2B, IS_NONE, NONE) // LS_2B .X.......Xiiiiii iiiiiinnnnnttttt Rt Rn imm(0-4095)
+IF_DEF(LS_2C, IS_NONE, NONE) // LS_2C .X.......X.iiiii iiiiP.nnnnnttttt Rt Rn imm(-256..+255) pre/post inc
+IF_DEF(LS_3A, IS_NONE, NONE) // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
+IF_DEF(LS_3B, IS_NONE, NONE) // LS_3B X............... .aaaaannnnnddddd Rd Ra Rn
+IF_DEF(LS_3C, IS_NONE, NONE) // LS_3C X.........iiiiii iaaaaannnnnddddd Rd Ra Rn imm(im7,sh)
+
+IF_DEF(DI_1A, IS_NONE, NONE) // DI_1A X.......shiiiiii iiiiiinnnnn..... Rn imm(i12,sh)
+IF_DEF(DI_1B, IS_NONE, NONE) // DI_1B X........hwiiiii iiiiiiiiiiiddddd Rd imm(i16,hw)
+IF_DEF(DI_1C, IS_NONE, NONE) // DI_1C X........Nrrrrrr ssssssnnnnn..... Rn imm(N,r,s)
+IF_DEF(DI_1D, IS_NONE, NONE) // DI_1D X........Nrrrrrr ssssss.....ddddd Rd imm(N,r,s)
+IF_DEF(DI_1E, IS_NONE, JMP) // DI_1E .ii.....iiiiiiii iiiiiiiiiiiddddd Rd simm21
+IF_DEF(DI_1F, IS_NONE, NONE) // DI_1F X..........iiiii cccc..nnnnn.nzcv Rn imm5 nzcv cond
+
+IF_DEF(DI_2A, IS_NONE, NONE) // DI_2A X.......shiiiiii iiiiiinnnnnddddd Rd Rn imm(i12,sh)
+IF_DEF(DI_2B, IS_NONE, NONE) // DI_2B X.........Xnnnnn ssssssnnnnnddddd Rd Rn imm(0-63)
+IF_DEF(DI_2C, IS_NONE, NONE) // DI_2C X........Nrrrrrr ssssssnnnnnddddd Rd Rn imm(N,r,s)
+IF_DEF(DI_2D, IS_NONE, NONE) // DI_2D X........Nrrrrrr ssssssnnnnnddddd Rd Rn imr, imms (N,r,s)
+
+IF_DEF(DR_1D, IS_NONE, NONE) // DR_1D X............... cccc.......ddddd Rd cond
+
+IF_DEF(DR_2A, IS_NONE, NONE) // DR_2A X..........mmmmm ......nnnnn..... Rn Rm
+IF_DEF(DR_2B, IS_NONE, NONE) // DR_2B X.......sh.mmmmm ssssssnnnnn..... Rn Rm {LSL,LSR,ASR} imm(0-63)
+IF_DEF(DR_2C, IS_NONE, NONE) // DR_2C X..........mmmmm xxxsssnnnnn..... Rn Rm ext(Rm) LSL imm(0-4)
+IF_DEF(DR_2D, IS_NONE, NONE) // DR_2D X..........nnnnn cccc..nnnnnddddd Rd Rn cond
+IF_DEF(DR_2E, IS_NONE, NONE) // DR_2E X..........mmmmm ...........ddddd Rd Rm
+IF_DEF(DR_2F, IS_NONE, NONE) // DR_2F X.......sh.mmmmm ssssss.....ddddd Rd Rm {LSL,LSR,ASR} imm(0-63)
+IF_DEF(DR_2G, IS_NONE, NONE) // DR_2G X............... ......nnnnnddddd Rd Rn
+IF_DEF(DR_2H, IS_NONE, NONE) // DR_2H X........X...... ......nnnnnddddd Rd Rn
+IF_DEF(DR_2I, IS_NONE, NONE) // DR_2I X..........mmmmm cccc..nnnnn.nzcv Rn Rm nzcv cond
+
+IF_DEF(DR_3A, IS_NONE, NONE) // DR_3A X..........mmmmm ......nnnnnddddd Rd Rn Rm
+IF_DEF(DR_3B, IS_NONE, NONE) // DR_3B X.......sh.mmmmm ssssssnnnnnddddd Rd Rn Rm {LSL,LSR,ASR} imm(0-63)
+IF_DEF(DR_3C, IS_NONE, NONE) // DR_3C X..........mmmmm xxxsssnnnnnddddd Rd Rn Rm ext(Rm) LSL imm(0-4)
+IF_DEF(DR_3D, IS_NONE, NONE) // DR_3D X..........mmmmm cccc..nnnnnddddd Rd Rn Rm cond
+IF_DEF(DR_3E, IS_NONE, NONE) // DR_3E X........X.mmmmm ssssssnnnnnddddd Rd Rn Rm imm(0-63)
+
+IF_DEF(DR_4A, IS_NONE, NONE) // DR_4A X..........mmmmm .aaaaannnnnddddd Rd Rn Rm Ra
+
+IF_DEF(DV_1A, IS_NONE, NONE) // DV_1A .........X.iiiii iii........ddddd Vd imm8 (fmov - immediate scalar)
+IF_DEF(DV_1B, IS_NONE, NONE) // DV_1B .QX..........iii jjjj..iiiiiddddd Vd imm8 (fmov/movi - immediate vector)
+IF_DEF(DV_1C, IS_NONE, NONE) // DV_1C .........X...... ......nnnnn..... Vn #0.0 (fcmp - with zero)
+
+IF_DEF(DV_2A, IS_NONE, NONE) // DV_2A .Q.......X...... ......nnnnnddddd Vd Vn (fabs, fcvtXX - vector)
+IF_DEF(DV_2B, IS_NONE, NONE) // DV_2B .Q.........iiiii ......nnnnnddddd Rd Vn[] (umov/smov - to general)
+IF_DEF(DV_2C, IS_NONE, NONE) // DV_2C .Q.........iiiii ......nnnnnddddd Vd Rn (dup/ins - vector from
+ // general)
+IF_DEF(DV_2D, IS_NONE, NONE) // DV_2D .Q.........iiiii ......nnnnnddddd Vd Vn[] (dup - vector)
+IF_DEF(DV_2E, IS_NONE, NONE) // DV_2E ...........iiiii ......nnnnnddddd Vd Vn[] (dup - scalar)
+IF_DEF(DV_2F, IS_NONE, NONE) // DV_2F ...........iiiii .jjjj.nnnnnddddd Vd[] Vn[] (ins - element)
+IF_DEF(DV_2G, IS_NONE, NONE) // DV_2G .........X...... ......nnnnnddddd Vd Vn (fmov, fcvtXX - register)
+IF_DEF(DV_2H, IS_NONE, NONE) // DV_2H X........X...... ......nnnnnddddd Rd Vn (fmov, fcvtXX - to general)
+IF_DEF(DV_2I, IS_NONE, NONE) // DV_2I X........X...... ......nnnnnddddd Vd Rn (fmov, fcvtXX - from general)
+IF_DEF(DV_2J, IS_NONE, NONE) // DV_2J .........d...... D.....nnnnnddddd Vd Vn (fcvt)
+IF_DEF(DV_2K, IS_NONE, NONE) // DV_2K .........X.mmmmm ......nnnnn..... Vn Vm (fcmp)
+IF_DEF(DV_2L, IS_NONE, NONE) // DV_2L ........XX...... ......nnnnnddddd Vd Vn (abs, neg - scalar)
+IF_DEF(DV_2M, IS_NONE, NONE) // DV_2M .Q......XX...... ......nnnnnddddd Vd Vn (abs, neg - vector)
+IF_DEF(DV_2N, IS_NONE, NONE) // DV_2N .........iiiiiii ......nnnnnddddd Vd Vn imm (shift - scalar)
+IF_DEF(DV_2O, IS_NONE, NONE) // DV_2O .Q.......iiiiiii ......nnnnnddddd Vd Vn imm (shift - vector)
+
+IF_DEF(DV_3A, IS_NONE, NONE) // DV_3A .Q......XX.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+IF_DEF(DV_3AI, IS_NONE, NONE) // DV_3AI .Q......XXLMmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+IF_DEF(DV_3B, IS_NONE, NONE) // DV_3B .Q.......X.mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+IF_DEF(DV_3BI, IS_NONE, NONE) // DV_3BI .Q.......XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (vector by elem)
+IF_DEF(DV_3C, IS_NONE, NONE) // DV_3C .Q.........mmmmm ......nnnnnddddd Vd Vn Vm (vector)
+IF_DEF(DV_3D, IS_NONE, NONE) // DV_3D .........X.mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+IF_DEF(DV_3DI, IS_NONE, NONE) // DV_3DI .........XLmmmmm ....H.nnnnnddddd Vd Vn Vm[] (scalar by elem)
+IF_DEF(DV_3E, IS_NONE, NONE) // DV_3E ...........mmmmm ......nnnnnddddd Vd Vn Vm (scalar)
+
+IF_DEF(DV_4A, IS_NONE, NONE) // DV_4A .........X.mmmmm .aaaaannnnnddddd Vd Vn Vm Va (scalar)
+
+IF_DEF(SN_0A, IS_NONE, NONE) // SN_0A ................ ................
+IF_DEF(SI_0A, IS_NONE, NONE) // SI_0A ...........iiiii iiiiiiiiiii..... imm16
+IF_DEF(SI_0B, IS_NONE, NONE) // SI_0B ................ ....bbbb........ imm4 - barrier
+
+IF_DEF(INVALID, IS_NONE, NONE) //
//////////////////////////////////////////////////////////////////////////////
#undef IF_DEF
diff --git a/src/jit/emitinl.h b/src/jit/emitinl.h
index 7bbbf06998..302b8ea448 100644
--- a/src/jit/emitinl.h
+++ b/src/jit/emitinl.h
@@ -12,24 +12,20 @@
* produce.
*/
-inline
-UNATIVE_OFFSET emitter::emitInstCodeSz(instrDesc *id)
+inline UNATIVE_OFFSET emitter::emitInstCodeSz(instrDesc* id)
{
- return id->idCodeSize();
+ return id->idCodeSize();
}
-inline
-UNATIVE_OFFSET emitter::emitSizeOfJump(instrDescJmp *jmp)
+inline UNATIVE_OFFSET emitter::emitSizeOfJump(instrDescJmp* jmp)
{
- return jmp->idCodeSize();
+ return jmp->idCodeSize();
}
-
#ifdef _TARGET_XARCH_
/* static */
-inline
-bool emitter::instrIs3opImul(instruction ins)
+inline bool emitter::instrIs3opImul(instruction ins)
{
#ifdef _TARGET_X86_
return ((ins >= INS_imul_AX) && (ins <= INS_imul_DI));
@@ -39,8 +35,7 @@ bool emitter::instrIs3opImul(instruction ins)
}
/* static */
-inline
-bool emitter::instrIsExtendedReg3opImul(instruction ins)
+inline bool emitter::instrIsExtendedReg3opImul(instruction ins)
{
#ifdef _TARGET_X86_
return false;
@@ -50,8 +45,7 @@ bool emitter::instrIsExtendedReg3opImul(instruction ins)
}
/* static */
-inline
-bool emitter::instrHasImplicitRegPairDest(instruction ins)
+inline bool emitter::instrHasImplicitRegPairDest(instruction ins)
{
return (ins == INS_mulEAX) || (ins == INS_imulEAX) || (ins == INS_div) || (ins == INS_idiv);
}
@@ -60,8 +54,7 @@ bool emitter::instrHasImplicitRegPairDest(instruction ins)
// multiplies we fake it with special opcodes. Make sure they are
// contiguous.
/* static */
-inline
-void emitter::check3opImulValues()
+inline void emitter::check3opImulValues()
{
assert(INS_imul_AX - INS_imul_AX == REG_EAX);
assert(INS_imul_BX - INS_imul_AX == REG_EBX);
@@ -88,8 +81,7 @@ void emitter::check3opImulValues()
*/
/* static */
-inline
-instruction emitter::inst3opImulForReg(regNumber reg)
+inline instruction emitter::inst3opImulForReg(regNumber reg)
{
assert(genIsValidIntReg(reg));
@@ -106,10 +98,9 @@ instruction emitter::inst3opImulForReg(regNumber reg)
*/
/* static */
-inline
-regNumber emitter::inst3opImulReg(instruction ins)
+inline regNumber emitter::inst3opImulReg(instruction ins)
{
- regNumber reg = ((regNumber) (ins - INS_imul_AX));
+ regNumber reg = ((regNumber)(ins - INS_imul_AX));
assert(genIsValidIntReg(reg));
@@ -127,16 +118,14 @@ regNumber emitter::inst3opImulReg(instruction ins)
* get stored in different places within the instruction descriptor.
*/
-inline ssize_t emitter::emitGetInsAmd (instrDesc *id)
+inline ssize_t emitter::emitGetInsAmd(instrDesc* id)
{
- return id->idIsLargeDsp() ? ((instrDescAmd*)id)->idaAmdVal
- : id->idAddr()->iiaAddrMode.amDisp;
+ return id->idIsLargeDsp() ? ((instrDescAmd*)id)->idaAmdVal : id->idAddr()->iiaAddrMode.amDisp;
}
-inline
-int emitter::emitGetInsCDinfo(instrDesc *id)
+inline int emitter::emitGetInsCDinfo(instrDesc* id)
{
- if (id->idIsLargeCall())
+ if (id->idIsLargeCall())
{
return ((instrDescCGCA*)id)->idcArgCnt;
}
@@ -153,75 +142,77 @@ int emitter::emitGetInsCDinfo(instrDesc *id)
}
}
-inline void emitter::emitGetInsCns (instrDesc *id, CnsVal *cv)
+inline void emitter::emitGetInsCns(instrDesc* id, CnsVal* cv)
{
#ifdef RELOC_SUPPORT
- cv->cnsReloc = id ->idIsCnsReloc();
+ cv->cnsReloc = id->idIsCnsReloc();
#endif
- if (id->idIsLargeCns())
+ if (id->idIsLargeCns())
{
- cv->cnsVal = ((instrDescCns*)id)->idcCnsVal;
+ cv->cnsVal = ((instrDescCns*)id)->idcCnsVal;
}
else
{
- cv->cnsVal = id ->idSmallCns();
+ cv->cnsVal = id->idSmallCns();
}
}
-inline ssize_t emitter::emitGetInsAmdCns(instrDesc *id, CnsVal *cv)
+inline ssize_t emitter::emitGetInsAmdCns(instrDesc* id, CnsVal* cv)
{
#ifdef RELOC_SUPPORT
- cv->cnsReloc = id ->idIsCnsReloc();
+ cv->cnsReloc = id->idIsCnsReloc();
#endif
- if (id->idIsLargeDsp())
+ if (id->idIsLargeDsp())
{
- if (id->idIsLargeCns())
+ if (id->idIsLargeCns())
{
- cv->cnsVal = ((instrDescCnsAmd*) id)->idacCnsVal;
- return ((instrDescCnsAmd*) id)->idacAmdVal;
+ cv->cnsVal = ((instrDescCnsAmd*)id)->idacCnsVal;
+ return ((instrDescCnsAmd*)id)->idacAmdVal;
}
else
{
- cv->cnsVal = id ->idSmallCns();
- return ((instrDescAmd*) id)->idaAmdVal;
+ cv->cnsVal = id->idSmallCns();
+ return ((instrDescAmd*)id)->idaAmdVal;
}
}
else
{
- if (id->idIsLargeCns())
- cv->cnsVal = ((instrDescCns *) id)->idcCnsVal;
+ if (id->idIsLargeCns())
+ {
+ cv->cnsVal = ((instrDescCns*)id)->idcCnsVal;
+ }
else
- cv->cnsVal = id ->idSmallCns();
+ {
+ cv->cnsVal = id->idSmallCns();
+ }
- return id->idAddr()->iiaAddrMode.amDisp;
+ return id->idAddr()->iiaAddrMode.amDisp;
}
}
-inline
-void emitter::emitGetInsDcmCns(instrDesc *id, CnsVal *cv)
+inline void emitter::emitGetInsDcmCns(instrDesc* id, CnsVal* cv)
{
#ifdef RELOC_SUPPORT
- cv->cnsReloc = id ->idIsCnsReloc();
+ cv->cnsReloc = id->idIsCnsReloc();
#endif
- if (id->idIsLargeCns())
+ if (id->idIsLargeCns())
{
- if (id->idIsLargeDsp())
+ if (id->idIsLargeDsp())
{
- cv->cnsVal = ((instrDescCnsDsp *) id)->iddcCnsVal;
+ cv->cnsVal = ((instrDescCnsDsp*)id)->iddcCnsVal;
}
else
{
- cv->cnsVal = ((instrDescCns *) id)->idcCnsVal;
+ cv->cnsVal = ((instrDescCns*)id)->idcCnsVal;
}
}
else
{
- cv->cnsVal = id ->idSmallCns();
+ cv->cnsVal = id->idSmallCns();
}
}
-inline
-ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
+inline ssize_t emitter::emitGetInsAmdAny(instrDesc* id)
{
if (id->idIsLargeDsp())
{
@@ -235,18 +226,16 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
return id->idAddr()->iiaAddrMode.amDisp;
}
-
/*****************************************************************************
*
* Convert between a register mask and a smaller version for storage.
*/
-
-/*static*/ inline void emitter::emitEncodeCallGCregs(regMaskTP regmask, instrDesc *id)
+/*static*/ inline void emitter::emitEncodeCallGCregs(regMaskTP regmask, instrDesc* id)
{
assert((regmask & RBM_CALLEE_TRASH) == 0);
- unsigned encodeMask;
+ unsigned encodeMask;
#ifdef _TARGET_X86_
assert(REGNUM_BITS >= 3);
@@ -259,35 +248,51 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
if ((regmask & RBM_EBX) != RBM_NONE)
encodeMask |= 0x04;
- id->idReg1((regNumber)encodeMask); // Save in idReg1
+ id->idReg1((regNumber)encodeMask); // Save in idReg1
#elif defined(_TARGET_AMD64_)
assert(REGNUM_BITS >= 4);
encodeMask = 0;
if ((regmask & RBM_RSI) != RBM_NONE)
+ {
encodeMask |= 0x01;
+ }
if ((regmask & RBM_RDI) != RBM_NONE)
+ {
encodeMask |= 0x02;
+ }
if ((regmask & RBM_RBX) != RBM_NONE)
+ {
encodeMask |= 0x04;
+ }
if ((regmask & RBM_RBP) != RBM_NONE)
+ {
encodeMask |= 0x08;
+ }
- id->idReg1((regNumber)encodeMask); // Save in idReg1
+ id->idReg1((regNumber)encodeMask); // Save in idReg1
encodeMask = 0;
if ((regmask & RBM_R12) != RBM_NONE)
+ {
encodeMask |= 0x01;
+ }
if ((regmask & RBM_R13) != RBM_NONE)
+ {
encodeMask |= 0x02;
+ }
if ((regmask & RBM_R14) != RBM_NONE)
+ {
encodeMask |= 0x04;
+ }
if ((regmask & RBM_R15) != RBM_NONE)
+ {
encodeMask |= 0x08;
+ }
- id->idReg2((regNumber)encodeMask); // Save in idReg2
+ id->idReg2((regNumber)encodeMask); // Save in idReg2
#elif defined(_TARGET_ARM_)
assert(REGNUM_BITS >= 4);
@@ -302,7 +307,7 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
if ((regmask & RBM_R7) != RBM_NONE)
encodeMask |= 0x08;
- id->idReg1((regNumber)encodeMask); // Save in idReg1
+ id->idReg1((regNumber)encodeMask); // Save in idReg1
encodeMask = 0;
@@ -315,49 +320,49 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
if ((regmask & RBM_R11) != RBM_NONE)
encodeMask |= 0x08;
- id->idReg2((regNumber)encodeMask); // Save in idReg2
+ id->idReg2((regNumber)encodeMask); // Save in idReg2
#elif defined(_TARGET_ARM64_)
- assert(REGNUM_BITS >= 5);
- encodeMask = 0;
-
- if ((regmask & RBM_R19) != RBM_NONE)
- encodeMask |= 0x01;
- if ((regmask & RBM_R20) != RBM_NONE)
- encodeMask |= 0x02;
- if ((regmask & RBM_R21) != RBM_NONE)
- encodeMask |= 0x04;
- if ((regmask & RBM_R22) != RBM_NONE)
- encodeMask |= 0x08;
- if ((regmask & RBM_R23) != RBM_NONE)
- encodeMask |= 0x10;
-
- id->idReg1((regNumber)encodeMask); // Save in idReg1
-
- encodeMask = 0;
-
- if ((regmask & RBM_R24) != RBM_NONE)
- encodeMask |= 0x01;
- if ((regmask & RBM_R25) != RBM_NONE)
- encodeMask |= 0x02;
- if ((regmask & RBM_R26) != RBM_NONE)
- encodeMask |= 0x04;
- if ((regmask & RBM_R27) != RBM_NONE)
- encodeMask |= 0x08;
- if ((regmask & RBM_R28) != RBM_NONE)
- encodeMask |= 0x10;
-
- id->idReg2((regNumber)encodeMask); // Save in idReg2
+ assert(REGNUM_BITS >= 5);
+ encodeMask = 0;
+
+ if ((regmask & RBM_R19) != RBM_NONE)
+ encodeMask |= 0x01;
+ if ((regmask & RBM_R20) != RBM_NONE)
+ encodeMask |= 0x02;
+ if ((regmask & RBM_R21) != RBM_NONE)
+ encodeMask |= 0x04;
+ if ((regmask & RBM_R22) != RBM_NONE)
+ encodeMask |= 0x08;
+ if ((regmask & RBM_R23) != RBM_NONE)
+ encodeMask |= 0x10;
+
+ id->idReg1((regNumber)encodeMask); // Save in idReg1
+
+ encodeMask = 0;
+
+ if ((regmask & RBM_R24) != RBM_NONE)
+ encodeMask |= 0x01;
+ if ((regmask & RBM_R25) != RBM_NONE)
+ encodeMask |= 0x02;
+ if ((regmask & RBM_R26) != RBM_NONE)
+ encodeMask |= 0x04;
+ if ((regmask & RBM_R27) != RBM_NONE)
+ encodeMask |= 0x08;
+ if ((regmask & RBM_R28) != RBM_NONE)
+ encodeMask |= 0x10;
+
+ id->idReg2((regNumber)encodeMask); // Save in idReg2
#else
NYI("unknown target");
#endif
}
-/*static*/ inline unsigned emitter::emitDecodeCallGCregs(instrDesc *id)
+/*static*/ inline unsigned emitter::emitDecodeCallGCregs(instrDesc* id)
{
- unsigned regmask = 0;
- unsigned encodeMask;
+ unsigned regmask = 0;
+ unsigned encodeMask;
#ifdef _TARGET_X86_
assert(REGNUM_BITS >= 3);
@@ -374,24 +379,40 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
encodeMask = id->idReg1();
if ((encodeMask & 0x01) != 0)
+ {
regmask |= RBM_RSI;
+ }
if ((encodeMask & 0x02) != 0)
+ {
regmask |= RBM_RDI;
+ }
if ((encodeMask & 0x04) != 0)
+ {
regmask |= RBM_RBX;
+ }
if ((encodeMask & 0x08) != 0)
+ {
regmask |= RBM_RBP;
+ }
encodeMask = id->idReg2();
if ((encodeMask & 0x01) != 0)
+ {
regmask |= RBM_R12;
+ }
if ((encodeMask & 0x02) != 0)
+ {
regmask |= RBM_R13;
+ }
if ((encodeMask & 0x04) != 0)
+ {
regmask |= RBM_R14;
+ }
if ((encodeMask & 0x08) != 0)
+ {
regmask |= RBM_R15;
+ }
#elif defined(_TARGET_ARM_)
assert(REGNUM_BITS >= 4);
@@ -449,31 +470,28 @@ ssize_t emitter::emitGetInsAmdAny(instrDesc *id)
NYI("unknown target");
#endif
- return regmask;
+ return regmask;
}
#ifdef _TARGET_XARCH_
-inline bool insIsCMOV(instruction ins)
+inline bool insIsCMOV(instruction ins)
{
return ((ins >= INS_cmovo) && (ins <= INS_cmovg));
}
#endif
-
/*****************************************************************************
*
* Call the specified function pointer for each insGroup in the current
* method that is marked IGF_NOGCINTERRUPT. Stops if the callback returns
* false. Returns the final result of the callback.
*/
-template<typename Callback>
-bool emitter::emitGenNoGCLst(Callback & cb)
+template <typename Callback>
+bool emitter::emitGenNoGCLst(Callback& cb)
{
- for (insGroup * ig = emitIGlist;
- ig;
- ig = ig->igNext)
+ for (insGroup* ig = emitIGlist; ig; ig = ig->igNext)
{
- if (ig->igFlags & IGF_NOGCINTERRUPT)
+ if (ig->igFlags & IGF_NOGCINTERRUPT)
{
if (!cb(ig->igFuncIdx, ig->igOffs, ig->igSize))
{
@@ -486,5 +504,5 @@ bool emitter::emitGenNoGCLst(Callback & cb)
}
/*****************************************************************************/
-#endif//_EMITINL_H_
+#endif //_EMITINL_H_
/*****************************************************************************/
diff --git a/src/jit/emitpub.h b/src/jit/emitpub.h
index e76ed2540a..a2f041a5f3 100644
--- a/src/jit/emitpub.h
+++ b/src/jit/emitpub.h
@@ -2,172 +2,161 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
- /************************************************************************/
- /* Overall emitter control (including startup and shutdown) */
- /************************************************************************/
+/************************************************************************/
+/* Overall emitter control (including startup and shutdown) */
+/************************************************************************/
- static
- void emitInit();
- static
- void emitDone();
+static void emitInit();
+static void emitDone();
- void emitBegCG(Compiler * comp,
- COMP_HANDLE cmpHandle);
- void emitEndCG();
+void emitBegCG(Compiler* comp, COMP_HANDLE cmpHandle);
+void emitEndCG();
- void emitBegFN(bool hasFramePtr
+void emitBegFN(bool hasFramePtr
#if defined(DEBUG)
- , bool checkAlign
+ ,
+ bool checkAlign
#endif
#ifdef LEGACY_BACKEND
- , unsigned lclSize
+ ,
+ unsigned lclSize
#endif // LEGACY_BACKEND
- , unsigned maxTmpSize
- );
+ ,
+ unsigned maxTmpSize);
- void emitEndFN();
+void emitEndFN();
- void emitComputeCodeSizes();
+void emitComputeCodeSizes();
- unsigned emitEndCodeGen(Compiler *comp,
- bool contTrkPtrLcls,
- bool fullyInt,
- bool fullPtrMap,
- bool returnsGCr,
- unsigned xcptnsCount,
- unsigned *prologSize,
- unsigned *epilogSize, void **codeAddr,
- void **coldCodeAddr,
- void **consAddr);
+unsigned emitEndCodeGen(Compiler* comp,
+ bool contTrkPtrLcls,
+ bool fullyInt,
+ bool fullPtrMap,
+ bool returnsGCr,
+ unsigned xcptnsCount,
+ unsigned* prologSize,
+ unsigned* epilogSize,
+ void** codeAddr,
+ void** coldCodeAddr,
+ void** consAddr);
- /************************************************************************/
- /* Method prolog and epilog */
- /************************************************************************/
+/************************************************************************/
+/* Method prolog and epilog */
+/************************************************************************/
- unsigned emitGetEpilogCnt();
+unsigned emitGetEpilogCnt();
- template<typename Callback>
- bool emitGenNoGCLst (Callback & cb);
+template <typename Callback>
+bool emitGenNoGCLst(Callback& cb);
- void emitBegProlog();
- unsigned emitGetPrologOffsetEstimate();
- void emitMarkPrologEnd();
- void emitEndProlog();
+void emitBegProlog();
+unsigned emitGetPrologOffsetEstimate();
+void emitMarkPrologEnd();
+void emitEndProlog();
- void emitCreatePlaceholderIG(insGroupPlaceholderType igType,
- BasicBlock* igBB,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- bool last);
+void emitCreatePlaceholderIG(insGroupPlaceholderType igType,
+ BasicBlock* igBB,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ bool last);
- void emitGeneratePrologEpilog();
- void emitStartPrologEpilogGeneration();
- void emitFinishPrologEpilogGeneration();
+void emitGeneratePrologEpilog();
+void emitStartPrologEpilogGeneration();
+void emitFinishPrologEpilogGeneration();
- /************************************************************************/
- /* Record a code position and later convert it to offset */
- /************************************************************************/
+/************************************************************************/
+/* Record a code position and later convert it to offset */
+/************************************************************************/
- void * emitCurBlock ();
- unsigned emitCurOffset();
+void* emitCurBlock();
+unsigned emitCurOffset();
- UNATIVE_OFFSET emitCodeOffset(void *blockPtr, unsigned codeOffs);
+UNATIVE_OFFSET emitCodeOffset(void* blockPtr, unsigned codeOffs);
#ifdef DEBUG
- const char* emitOffsetToLabel(unsigned offs);
+const char* emitOffsetToLabel(unsigned offs);
#endif // DEBUG
- /************************************************************************/
- /* Output target-independent instructions */
- /************************************************************************/
+/************************************************************************/
+/* Output target-independent instructions */
+/************************************************************************/
- void emitIns_J(instruction ins,
- BasicBlock * dst,
- int instrCount = 0);
+void emitIns_J(instruction ins, BasicBlock* dst, int instrCount = 0);
- /************************************************************************/
- /* Emit initialized data sections */
- /************************************************************************/
+/************************************************************************/
+/* Emit initialized data sections */
+/************************************************************************/
- UNATIVE_OFFSET emitDataGenBeg (UNATIVE_OFFSET size,
- bool dblAlign,
- bool codeLtab);
+UNATIVE_OFFSET emitDataGenBeg(UNATIVE_OFFSET size, bool dblAlign, bool codeLtab);
- UNATIVE_OFFSET emitBBTableDataGenBeg(unsigned numEntries,
- bool relativeAddr);
+UNATIVE_OFFSET emitBBTableDataGenBeg(unsigned numEntries, bool relativeAddr);
- void emitDataGenData(unsigned offs,
- const void * data,
- size_t size);
+void emitDataGenData(unsigned offs, const void* data, size_t size);
- void emitDataGenData(unsigned offs,
- BasicBlock * label);
+void emitDataGenData(unsigned offs, BasicBlock* label);
- void emitDataGenEnd();
+void emitDataGenEnd();
- UNATIVE_OFFSET emitDataConst(const void* cnsAddr,
- unsigned cnsSize,
- bool dblAlign);
+UNATIVE_OFFSET emitDataConst(const void* cnsAddr, unsigned cnsSize, bool dblAlign);
- UNATIVE_OFFSET emitDataSize();
+UNATIVE_OFFSET emitDataSize();
- /************************************************************************/
- /* Instruction information */
- /************************************************************************/
+/************************************************************************/
+/* Instruction information */
+/************************************************************************/
#ifdef _TARGET_XARCH_
- static bool instrIs3opImul (instruction ins);
- static bool instrIsExtendedReg3opImul (instruction ins);
- static bool instrHasImplicitRegPairDest (instruction ins);
- static void check3opImulValues ();
- static regNumber inst3opImulReg (instruction ins);
- static instruction inst3opImulForReg (regNumber reg);
+static bool instrIs3opImul(instruction ins);
+static bool instrIsExtendedReg3opImul(instruction ins);
+static bool instrHasImplicitRegPairDest(instruction ins);
+static void check3opImulValues();
+static regNumber inst3opImulReg(instruction ins);
+static instruction inst3opImulForReg(regNumber reg);
#endif
- /************************************************************************/
- /* Emit PDB offset translation information */
- /************************************************************************/
-
-#ifdef TRANSLATE_PDB
-
- static void SetILBaseOfCode ( BYTE *pTextBase );
- static void SetILMethodBase ( BYTE *pMethodEntry );
- static void SetILMethodStart( BYTE *pMethodCode );
- static void SetImgBaseOfCode( BYTE *pTextBase );
-
- void SetIDBaseToProlog();
- void SetIDBaseToOffset( int methodOffset );
-
- static void DisablePDBTranslation();
- static bool IsPDBEnabled();
-
- static void InitTranslationMaps( int ilCodeSize );
- static void DeleteTranslationMaps();
- static void InitTranslator( PDBRewriter * pPDB,
- int * rgSecMap,
- IMAGE_SECTION_HEADER **rgpHeader,
- int numSections );
-#endif
+/************************************************************************/
+/* Emit PDB offset translation information */
+/************************************************************************/
+
+#ifdef TRANSLATE_PDB
+static void SetILBaseOfCode(BYTE* pTextBase);
+static void SetILMethodBase(BYTE* pMethodEntry);
+static void SetILMethodStart(BYTE* pMethodCode);
+static void SetImgBaseOfCode(BYTE* pTextBase);
- /************************************************************************/
- /* Interface for generating unwind information */
- /************************************************************************/
+void SetIDBaseToProlog();
+void SetIDBaseToOffset(int methodOffset);
+
+static void DisablePDBTranslation();
+static bool IsPDBEnabled();
+
+static void InitTranslationMaps(int ilCodeSize);
+static void DeleteTranslationMaps();
+static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADER** rgpHeader, int numSections);
+#endif
+
+/************************************************************************/
+/* Interface for generating unwind information */
+/************************************************************************/
#ifdef _TARGET_ARMARCH_
- bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL);
+bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL);
- void emitSplit(emitLocation* startLoc, emitLocation* endLoc, UNATIVE_OFFSET maxSplitSize, void* context, emitSplitCallbackType callbackFunc);
+void emitSplit(emitLocation* startLoc,
+ emitLocation* endLoc,
+ UNATIVE_OFFSET maxSplitSize,
+ void* context,
+ emitSplitCallbackType callbackFunc);
- void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp);
+void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp);
#endif // _TARGET_ARMARCH_
#if defined(_TARGET_ARM_)
- unsigned emitGetInstructionSize(emitLocation* emitLoc);
+unsigned emitGetInstructionSize(emitLocation* emitLoc);
#endif // defined(_TARGET_ARM_)
-
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 2687b4984e..3ab7e1567d 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -25,16 +25,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "emit.h"
#include "codegen.h"
-bool IsSSE2Instruction(instruction ins)
-{
+bool IsSSE2Instruction(instruction ins)
+{
return (ins >= INS_FIRST_SSE2_INSTRUCTION && ins <= INS_LAST_SSE2_INSTRUCTION);
}
-bool IsSSEOrAVXInstruction(instruction ins)
+bool IsSSEOrAVXInstruction(instruction ins)
{
#ifdef FEATURE_AVX_SUPPORT
return (ins >= INS_FIRST_SSE2_INSTRUCTION && ins <= INS_LAST_AVX_INSTRUCTION);
-#else // !FEATURE_AVX_SUPPORT
+#else // !FEATURE_AVX_SUPPORT
return IsSSE2Instruction(ins);
#endif // !FEATURE_AVX_SUPPORT
}
@@ -48,56 +48,34 @@ bool emitter::IsAVXInstruction(instruction ins)
#endif
}
-#define REX_PREFIX_MASK 0xFF00000000LL
+#define REX_PREFIX_MASK 0xFF00000000LL
#ifdef FEATURE_AVX_SUPPORT
// Returns true if the AVX instruction is a binary operator that requires 3 operands.
// When we emit an instruction with only two operands, we will duplicate the destination
// as a source.
// TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to
-// be formalized by adding an additional field to instruction table to
+// be formalized by adding an additional field to instruction table to
// to indicate whether a 3-operand instruction.
bool emitter::IsThreeOperandBinaryAVXInstruction(instruction ins)
{
return IsAVXInstruction(ins) &&
- (
- ins == INS_cvtsi2ss || ins == INS_cvtsi2sd ||
- ins == INS_cvtss2sd || ins == INS_cvtsd2ss ||
- ins == INS_addss || ins == INS_addsd ||
- ins == INS_subss || ins == INS_subsd ||
- ins == INS_mulss || ins == INS_mulsd ||
- ins == INS_divss || ins == INS_divsd ||
- ins == INS_addps || ins == INS_addpd ||
- ins == INS_subps || ins == INS_subpd ||
- ins == INS_mulps || ins == INS_mulpd ||
- ins == INS_cmpps || ins == INS_cmppd ||
- ins == INS_andps || ins == INS_andpd ||
- ins == INS_orps || ins == INS_orpd ||
- ins == INS_xorps || ins == INS_xorpd ||
- ins == INS_dpps || ins == INS_dppd ||
- ins == INS_haddpd || ins == INS_por ||
- ins == INS_pand || ins == INS_pandn ||
- ins == INS_pcmpeqd || ins == INS_pcmpgtd ||
- ins == INS_pcmpeqw || ins == INS_pcmpgtw ||
- ins == INS_pcmpeqb || ins == INS_pcmpgtb ||
- ins == INS_pcmpeqq || ins == INS_pcmpgtq ||
- ins == INS_pmulld || ins == INS_pmullw ||
-
- ins == INS_shufps || ins == INS_shufpd ||
- ins == INS_minps || ins == INS_minss ||
- ins == INS_minpd || ins == INS_minsd ||
- ins == INS_divps || ins == INS_divpd ||
- ins == INS_maxps || ins == INS_maxpd ||
- ins == INS_maxss || ins == INS_maxsd ||
- ins == INS_andnps || ins == INS_andnpd ||
- ins == INS_paddb || ins == INS_paddw ||
- ins == INS_paddd || ins == INS_paddq ||
- ins == INS_psubb || ins == INS_psubw ||
- ins == INS_psubd || ins == INS_psubq ||
- ins == INS_pmuludq || ins == INS_pxor ||
- ins == INS_pmaxub || ins == INS_pminub ||
- ins == INS_pmaxsw || ins == INS_pminsw ||
- ins == INS_insertps || ins == INS_vinsertf128
+ (ins == INS_cvtsi2ss || ins == INS_cvtsi2sd || ins == INS_cvtss2sd || ins == INS_cvtsd2ss ||
+ ins == INS_addss || ins == INS_addsd || ins == INS_subss || ins == INS_subsd || ins == INS_mulss ||
+ ins == INS_mulsd || ins == INS_divss || ins == INS_divsd || ins == INS_addps || ins == INS_addpd ||
+ ins == INS_subps || ins == INS_subpd || ins == INS_mulps || ins == INS_mulpd || ins == INS_cmpps ||
+ ins == INS_cmppd || ins == INS_andps || ins == INS_andpd || ins == INS_orps || ins == INS_orpd ||
+ ins == INS_xorps || ins == INS_xorpd || ins == INS_dpps || ins == INS_dppd || ins == INS_haddpd ||
+ ins == INS_por || ins == INS_pand || ins == INS_pandn || ins == INS_pcmpeqd || ins == INS_pcmpgtd ||
+ ins == INS_pcmpeqw || ins == INS_pcmpgtw || ins == INS_pcmpeqb || ins == INS_pcmpgtb ||
+ ins == INS_pcmpeqq || ins == INS_pcmpgtq || ins == INS_pmulld || ins == INS_pmullw ||
+
+ ins == INS_shufps || ins == INS_shufpd || ins == INS_minps || ins == INS_minss || ins == INS_minpd ||
+ ins == INS_minsd || ins == INS_divps || ins == INS_divpd || ins == INS_maxps || ins == INS_maxpd ||
+ ins == INS_maxss || ins == INS_maxsd || ins == INS_andnps || ins == INS_andnpd || ins == INS_paddb ||
+ ins == INS_paddw || ins == INS_paddd || ins == INS_paddq || ins == INS_psubb || ins == INS_psubw ||
+ ins == INS_psubd || ins == INS_psubq || ins == INS_pmuludq || ins == INS_pxor || ins == INS_pmaxub ||
+ ins == INS_pminub || ins == INS_pmaxsw || ins == INS_pminsw || ins == INS_insertps || ins == INS_vinsertf128
);
}
@@ -106,16 +84,12 @@ bool emitter::IsThreeOperandBinaryAVXInstruction(instruction ins)
// When we emit an instruction with only two operands, we will duplicate the source
// register in the vvvv field. This is because these merge sources into the dest.
// TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to
-// be formalized by adding an additional field to instruction table to
+// be formalized by adding an additional field to instruction table to
// to indicate whether a 3-operand instruction.
bool emitter::IsThreeOperandMoveAVXInstruction(instruction ins)
{
return IsAVXInstruction(ins) &&
- (
- ins == INS_movlpd || ins == INS_movlps ||
- ins == INS_movhpd || ins == INS_movhps ||
- ins == INS_movss
- );
+ (ins == INS_movlpd || ins == INS_movlps || ins == INS_movhpd || ins == INS_movhps || ins == INS_movss);
}
#endif // FEATURE_AVX_SUPPORT
@@ -127,28 +101,15 @@ bool emitter::IsThreeOperandMoveAVXInstruction(instruction ins)
bool Is4ByteAVXInstruction(instruction ins)
{
#ifdef FEATURE_AVX_SUPPORT
- return (
- ins == INS_dpps ||
- ins == INS_dppd ||
- ins == INS_insertps ||
- ins == INS_pcmpeqq ||
- ins == INS_pcmpgtq ||
- ins == INS_vbroadcastss ||
- ins == INS_vbroadcastsd ||
- ins == INS_vpbroadcastb ||
- ins == INS_vpbroadcastw ||
- ins == INS_vpbroadcastd ||
- ins == INS_vpbroadcastq ||
- ins == INS_vextractf128 ||
- ins == INS_vinsertf128 ||
- ins == INS_pmulld
- );
+ return (ins == INS_dpps || ins == INS_dppd || ins == INS_insertps || ins == INS_pcmpeqq || ins == INS_pcmpgtq ||
+ ins == INS_vbroadcastss || ins == INS_vbroadcastsd || ins == INS_vpbroadcastb || ins == INS_vpbroadcastw ||
+ ins == INS_vpbroadcastd || ins == INS_vpbroadcastq || ins == INS_vextractf128 || ins == INS_vinsertf128 ||
+ ins == INS_pmulld);
#else
return false;
#endif
}
-
#ifdef FEATURE_AVX_SUPPORT
// Returns true if this instruction requires a VEX prefix
// All AVX instructions require a VEX prefix
@@ -160,7 +121,7 @@ bool emitter::TakesVexPrefix(instruction ins)
return false;
}
- return IsAVXInstruction(ins);
+ return IsAVXInstruction(ins);
}
// Add base VEX prefix without setting W, R, X, or B bits
@@ -187,8 +148,8 @@ bool emitter::TakesVexPrefix(instruction ins)
// TODO-AMD64-CQ: for simplicity of implementation this routine always adds 3-byte VEX
// prefix. Based on 'attr' param we could add 2-byte VEX prefix in case of scalar
// and AVX-128 bit operations.
-#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL
-#define LBIT_IN_3BYTE_VEX_PREFIX 0X00000400000000ULL
+#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL
+#define LBIT_IN_3BYTE_VEX_PREFIX 0X00000400000000ULL
size_t emitter::AddVexPrefix(instruction ins, size_t code, emitAttr attr)
{
// Only AVX instructions require VEX prefix
@@ -231,17 +192,11 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr)
if (IsSSEOrAVXInstruction(ins))
{
- if (ins == INS_cvttsd2si ||
- ins == INS_cvttss2si ||
- ins == INS_cvtsd2si ||
- ins == INS_cvtss2si ||
- ins == INS_cvtsi2sd ||
- ins == INS_cvtsi2ss ||
- ins == INS_mov_xmm2i ||
- ins == INS_mov_i2xmm)
+ if (ins == INS_cvttsd2si || ins == INS_cvttss2si || ins == INS_cvtsd2si || ins == INS_cvtss2si ||
+ ins == INS_cvtsi2sd || ins == INS_cvtsi2ss || ins == INS_mov_xmm2i || ins == INS_mov_i2xmm)
{
return true;
- }
+ }
return false;
}
@@ -251,15 +206,8 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr)
// These are all the instructions that default to 8-byte operand without the REX.W bit
// With 1 special case: movzx because the 4 byte version still zeros-out the hi 4 bytes
// so we never need it
- if ((ins != INS_push) &&
- (ins != INS_pop) &&
- (ins != INS_movq) &&
- (ins != INS_movzx) &&
- (ins != INS_push_hide) &&
- (ins != INS_pop_hide) &&
- (ins != INS_ret) &&
- (ins != INS_call) &&
- !((ins >= INS_i_jmp) && (ins <= INS_l_jg)))
+ if ((ins != INS_push) && (ins != INS_pop) && (ins != INS_movq) && (ins != INS_movzx) && (ins != INS_push_hide) &&
+ (ins != INS_pop_hide) && (ins != INS_ret) && (ins != INS_call) && !((ins >= INS_i_jmp) && (ins <= INS_l_jg)))
{
return true;
}
@@ -267,20 +215,19 @@ bool TakesRexWPrefix(instruction ins, emitAttr attr)
{
return false;
}
-#else //!_TARGET_AMD64 = _TARGET_X86_
+#else //!_TARGET_AMD64 = _TARGET_X86_
return false;
#endif //!_TARGET_AMD64_
}
// Returns true if using this register will require a REX.* prefix.
// Since XMM registers overlap with YMM registers, this routine
-// can also be used to know whether a YMM register if the
+// can also be used to know whether a YMM register if the
// instruction in question is AVX.
bool IsExtendedReg(regNumber reg)
{
#ifdef _TARGET_AMD64_
- return ((reg >= REG_R8) && (reg <= REG_R15)) ||
- ((reg >= REG_XMM8) && (reg <= REG_XMM15));
+ return ((reg >= REG_R8) && (reg <= REG_R15)) || ((reg >= REG_XMM8) && (reg <= REG_XMM15));
#else
// X86 JIT operates in 32-bit mode and hence extended reg are not available.
return false;
@@ -293,15 +240,21 @@ bool IsExtendedReg(regNumber reg, emitAttr attr)
#ifdef _TARGET_AMD64_
// Not a register, so doesn't need a prefix
if (reg > REG_XMM15)
+ {
return false;
+ }
// Opcode field only has 3 bits for the register, these high registers
// need a 4th bit, that comes from the REX prefix (eiter REX.X, REX.R, or REX.B)
if (IsExtendedReg(reg))
+ {
return true;
+ }
if (EA_SIZE(attr) != EA_1BYTE)
+ {
return false;
+ }
// There are 12 one byte registers addressible 'below' r8b:
// al, cl, dl, bl, ah, ch, dh, bh, spl, bpl, sil, dil.
@@ -325,7 +278,7 @@ bool IsExtendedReg(regNumber reg, emitAttr attr)
// Since XMM registers overlap with YMM registers, this routine
// can also used to know whether a YMM register in case of AVX instructions.
//
-// Legacy X86: we have XMM0-XMM7 available but this routine cannot be used to
+// Legacy X86: we have XMM0-XMM7 available but this routine cannot be used to
// determine whether a reg is XMM because they share the same reg numbers
// with integer registers. Hence always return false.
bool IsXMMReg(regNumber reg)
@@ -333,10 +286,10 @@ bool IsXMMReg(regNumber reg)
#ifndef LEGACY_BACKEND
#ifdef _TARGET_AMD64_
return (reg >= REG_XMM0) && (reg <= REG_XMM15);
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
return (reg >= REG_XMM0) && (reg <= REG_XMM7);
#endif // !_TARGET_AMD64_
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
return false;
#endif // LEGACY_BACKEND
}
@@ -347,13 +300,13 @@ regNumber RegEncoding(regNumber reg)
#ifndef LEGACY_BACKEND
// XMM registers do not share the same reg numbers as integer registers.
// But register encoding of integer and XMM registers is the same.
- // Therefore, subtract XMMBASE from regNumber to get the register encoding
+ // Therefore, subtract XMMBASE from regNumber to get the register encoding
// in case of XMM registers.
- return (regNumber)((IsXMMReg(reg) ? reg-XMMBASE : reg) & 0x7);
-#else // LEGACY_BACKEND
+ return (regNumber)((IsXMMReg(reg) ? reg - XMMBASE : reg) & 0x7);
+#else // LEGACY_BACKEND
// Legacy X86: XMM registers share the same reg numbers as integer registers and
// hence nothing to do to get reg encoding.
- return (regNumber) (reg & 0x7);
+ return (regNumber)(reg & 0x7);
#endif // LEGACY_BACKEND
}
@@ -374,7 +327,7 @@ size_t emitter::AddRexWPrefix(instruction ins, size_t code)
return code | 0x4800000000ULL;
#else
- assert(!"UNREACHED");
+ assert(!"UNREACHED");
return code;
#endif
}
@@ -399,7 +352,7 @@ size_t emitter::AddRexXPrefix(instruction ins, size_t code)
{
if (UseAVX() && IsAVXInstruction(ins))
{
- //Right now support 3-byte VEX prefix
+ // Right now support 3-byte VEX prefix
assert(hasVexPrefix(code));
// X-bit is added in bit-inverted form.
@@ -432,7 +385,7 @@ size_t emitter::AddRexPrefix(instruction ins, size_t code)
bool isPrefix(BYTE b)
{
- assert(b != 0); // Caller should check this
+ assert(b != 0); // Caller should check this
assert(b != 0x67); // We don't use the address size prefix
assert(b != 0x65); // The GS segment override prefix is emitted separately
assert(b != 0x64); // The FS segment override prefix is emitted separately
@@ -450,7 +403,7 @@ bool isPrefix(BYTE b)
#endif //_TARGET_AMD64_
// Outputs VEX prefix (in case of AVX instructions) and REX.R/X/W/B otherwise.
-unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, size_t & code)
+unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, size_t& code)
{
#ifdef _TARGET_AMD64_ // TODO-x86: This needs to be enabled for AVX support on x86.
if (hasVexPrefix(code))
@@ -461,41 +414,41 @@ unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, s
code &= 0x00000000FFFFFFFFLL;
WORD leadingBytes = 0;
- BYTE check = (code >> 24) & 0xFF;
+ BYTE check = (code >> 24) & 0xFF;
if (check != 0)
{
- // 3-byte opcode: with the bytes ordered as 0x2211RM33 or
+ // 3-byte opcode: with the bytes ordered as 0x2211RM33 or
// 4-byte opcode: with the bytes ordered as 0x22114433
// check for a prefix in the 11 position
BYTE sizePrefix = (code >> 16) & 0xFF;
if (sizePrefix != 0 && isPrefix(sizePrefix))
{
// 'pp' bits in byte2 of VEX prefix allows us to encode SIMD size prefixes as two bits
- //
+ //
// 00 - None (0F - packed float)
// 01 - 66 (66 0F - packed double)
// 10 - F3 (F3 0F - scalar float
// 11 - F2 (F2 0F - scalar double)
switch (sizePrefix)
{
- case 0x66:
- vexPrefix |= 0x01;
- break;
- case 0xF3:
- vexPrefix |= 0x02;
- break;
- case 0xF2:
- vexPrefix |= 0x03;
- break;
- default:
- assert(!"unrecognized SIMD size prefix");
- unreached();
+ case 0x66:
+ vexPrefix |= 0x01;
+ break;
+ case 0xF3:
+ vexPrefix |= 0x02;
+ break;
+ case 0xF2:
+ vexPrefix |= 0x03;
+ break;
+ default:
+ assert(!"unrecognized SIMD size prefix");
+ unreached();
}
// Now the byte in the 22 position must be an escape byte 0F
leadingBytes = check;
assert(leadingBytes == 0x0F);
-
+
// Get rid of both sizePrefix and escape byte
code &= 0x0000FFFFLL;
@@ -517,37 +470,37 @@ unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, s
assert(leadingBytes == 0x0F || leadingBytes == 0x00);
code &= 0xFFFF;
}
-
+
// If there is an escape byte it must be 0x0F or 0x0F3A or 0x0F38
- // m-mmmmm bits in byte 1 of VEX prefix allows us to encode these
+ // m-mmmmm bits in byte 1 of VEX prefix allows us to encode these
// implied leading bytes
switch (leadingBytes)
{
- case 0x00:
- // there is no leading byte
- break;
- case 0x0F:
- vexPrefix |= 0x0100;
- break;
- case 0x0F38:
- vexPrefix |= 0x0200;
- break;
- case 0x0F3A:
- vexPrefix |= 0x0300;
- break;
- default:
- assert(!"encountered unknown leading bytes");
- unreached();
+ case 0x00:
+ // there is no leading byte
+ break;
+ case 0x0F:
+ vexPrefix |= 0x0100;
+ break;
+ case 0x0F38:
+ vexPrefix |= 0x0200;
+ break;
+ case 0x0F3A:
+ vexPrefix |= 0x0300;
+ break;
+ default:
+ assert(!"encountered unknown leading bytes");
+ unreached();
}
- // At this point
+ // At this point
// VEX.2211RM33 got transformed as VEX.0000RM33
// VEX.0011RM22 got transformed as VEX.0000RM22
- //
+ //
// Now output VEX prefix leaving the 4-byte opcode
emitOutputByte(dst, ((vexPrefix >> 16) & 0xFF));
- emitOutputByte(dst+1, ((vexPrefix >> 8) & 0xFF));
- emitOutputByte(dst+2, vexPrefix & 0xFF);
+ emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0xFF));
+ emitOutputByte(dst + 2, vexPrefix & 0xFF);
return 3;
}
else if (code > 0x00FFFFFFFFLL)
@@ -612,10 +565,12 @@ unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, s
/*****************************************************************************
* Is the last instruction emitted a call instruction?
*/
-bool emitter::emitIsLastInsCall()
+bool emitter::emitIsLastInsCall()
{
if ((emitLastIns != nullptr) && (emitLastIns->idIns() == INS_call))
+ {
return true;
+ }
return false;
}
@@ -624,7 +579,7 @@ bool emitter::emitIsLastInsCall()
* We're about to create an epilog. If the last instruction we output was a 'call',
* then we need to insert a NOP, to allow for proper exception-handling behavior.
*/
-void emitter::emitOutputPreEpilogNOP()
+void emitter::emitOutputPreEpilogNOP()
{
if (emitIsLastInsCall())
{
@@ -635,39 +590,43 @@ void emitter::emitOutputPreEpilogNOP()
#endif //_TARGET_AMD64_
// Size of rex prefix in bytes
-unsigned emitter::emitGetRexPrefixSize(instruction ins)
+unsigned emitter::emitGetRexPrefixSize(instruction ins)
{
// In case of AVX instructions, REX prefixes are part of VEX prefix.
// And hence requires no additional byte to encode REX prefixes.
if (IsAVXInstruction(ins))
+ {
return 0;
+ }
// If not AVX, then we would need 1-byte to encode REX prefix.
return 1;
}
// Size of vex prefix in bytes
-unsigned emitter::emitGetVexPrefixSize(instruction ins, emitAttr attr)
+unsigned emitter::emitGetVexPrefixSize(instruction ins, emitAttr attr)
{
- // TODO-XArch-CQ: right now we default to 3-byte VEX prefix. There is a
- // scope for size win by using 2-byte vex prefix for some of the
+ // TODO-XArch-CQ: right now we default to 3-byte VEX prefix. There is a
+ // scope for size win by using 2-byte vex prefix for some of the
// scalar, avx-128 and most common avx-256 instructions.
if (IsAVXInstruction(ins))
+ {
return 3;
+ }
// If not AVX, then we don't need to encode vex prefix.
return 0;
}
-// VEX prefix encodes some bytes of the opcode and as a result, overall size of the instruction reduces.
-// Therefore, to estimate the size adding VEX prefix size and size of instruction opcode bytes will always overstimate.
-// Instead this routine will adjust the size of VEX prefix based on the number of bytes of opcode it encodes so that
+// VEX prefix encodes some bytes of the opcode and as a result, overall size of the instruction reduces.
+// Therefore, to estimate the size adding VEX prefix size and size of instruction opcode bytes will always overstimate.
+// Instead this routine will adjust the size of VEX prefix based on the number of bytes of opcode it encodes so that
// instruction size estimate will be accurate.
-// Basically this function will decrease the vexPrefixSize,
+// Basically this function will decrease the vexPrefixSize,
// so that opcodeSize + vexPrefixAdjustedSize will be the right size.
// rightOpcodeSize + vexPrefixSize
-//=(opcodeSize - ExtrabytesSize) + vexPrefixSize
+//=(opcodeSize - ExtrabytesSize) + vexPrefixSize
//=opcodeSize + (vexPrefixSize - ExtrabytesSize)
//=opcodeSize + vexPrefixAdjustedSize
unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, size_t code)
@@ -676,20 +635,20 @@ unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, s
if (IsAVXInstruction(ins))
{
unsigned vexPrefixAdjustedSize = emitGetVexPrefixSize(ins, attr);
- // Currently vex prefix size is hard coded as 3 bytes,
- // In future we should support 2 bytes vex prefix.
+ // Currently vex prefix size is hard coded as 3 bytes,
+ // In future we should support 2 bytes vex prefix.
assert(vexPrefixAdjustedSize == 3);
-
+
// In this case, opcode will contains escape prefix at least one byte,
- // vexPrefixAdjustedSize should be minus one.
+ // vexPrefixAdjustedSize should be minus one.
vexPrefixAdjustedSize -= 1;
-
- // Get the fourth byte in Opcode.
- // If this byte is non-zero, then we should check whether the opcode contains SIMD prefix or not.
+
+ // Get the fourth byte in Opcode.
+ // If this byte is non-zero, then we should check whether the opcode contains SIMD prefix or not.
BYTE check = (code >> 24) & 0xFF;
if (check != 0)
{
- // 3-byte opcode: with the bytes ordered as 0x2211RM33 or
+ // 3-byte opcode: with the bytes ordered as 0x2211RM33 or
// 4-byte opcode: with the bytes ordered as 0x22114433
// Simd prefix is at the first byte.
BYTE sizePrefix = (code >> 16) & 0xFF;
@@ -701,11 +660,11 @@ unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, s
// If the opcode size is 4 bytes, then the second escape prefix is at fourth byte in opcode.
// But in this case the opcode has not counted R\M part.
// opcodeSize + VexPrefixAdjustedSize - ExtraEscapePrefixSize + ModR\MSize
- //=opcodeSize + VexPrefixAdjustedSize -1 + 1
+ //=opcodeSize + VexPrefixAdjustedSize -1 + 1
//=opcodeSize + VexPrefixAdjustedSize
// So although we may have second byte escape prefix, we won't decrease vexPrefixAjustedSize.
}
-
+
return vexPrefixAdjustedSize;
}
#endif // FEATURE_AVX_SUPPORT
@@ -714,16 +673,16 @@ unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, s
}
// Get size of rex or vex prefix emitted in code
-unsigned emitter::emitGetPrefixSize(size_t code)
+unsigned emitter::emitGetPrefixSize(size_t code)
{
#ifdef FEATURE_AVX_SUPPORT
if (code & VEX_PREFIX_MASK_3BYTE)
{
return 3;
}
- else
-#endif
- if (code & REX_PREFIX_MASK)
+ else
+#endif
+ if (code & REX_PREFIX_MASK)
{
return 1;
}
@@ -731,18 +690,16 @@ unsigned emitter::emitGetPrefixSize(size_t code)
return 0;
}
-
-
#ifdef _TARGET_X86_
/*****************************************************************************
*
* Record a non-empty stack
*/
-void emitter::emitMarkStackLvl(unsigned stackLevel)
+void emitter::emitMarkStackLvl(unsigned stackLevel)
{
- assert(int(stackLevel) >= 0);
- assert(emitCurStackLvl == 0);
+ assert(int(stackLevel) >= 0);
+ assert(emitCurStackLvl == 0);
assert(emitCurIG->igStkLvl == 0);
assert(emitCurIGfreeNext == emitCurIGfreeBase);
@@ -760,19 +717,18 @@ void emitter::emitMarkStackLvl(unsigned stackLevel)
* Get hold of the address mode displacement value for an indirect call.
*/
-inline
-ssize_t emitter::emitGetInsCIdisp(instrDesc* id)
+inline ssize_t emitter::emitGetInsCIdisp(instrDesc* id)
{
- if (id->idIsLargeCall())
+ if (id->idIsLargeCall())
{
- return ((instrDescCGCA*)id)->idcDisp;
+ return ((instrDescCGCA*)id)->idcDisp;
}
else
{
assert(!id->idIsLargeDsp());
assert(!id->idIsLargeCns());
- return id->idAddr()->iiaAddrMode.amDisp;
+ return id->idAddr()->iiaAddrMode.amDisp;
}
}
@@ -781,8 +737,8 @@ ssize_t emitter::emitGetInsCIdisp(instrDesc* id)
* The following table is used by the instIsFP()/instUse/DefFlags() helpers.
*/
-#define INST_DEF_FL 0x20 // does the instruction set flags?
-#define INST_USE_FL 0x40 // does the instruction use flags?
+#define INST_DEF_FL 0x20 // does the instruction set flags?
+#define INST_USE_FL 0x40 // does the instruction use flags?
// clang-format off
const BYTE CodeGenInterface::instInfo[] =
@@ -827,9 +783,8 @@ const BYTE emitter::emitInsModeFmtTab[] =
};
// clang-format on
-#ifdef DEBUG
-unsigned const emitter::emitInsModeFmtCnt = sizeof(emitInsModeFmtTab)/
- sizeof(emitInsModeFmtTab[0]);
+#ifdef DEBUG
+unsigned const emitter::emitInsModeFmtCnt = sizeof(emitInsModeFmtTab) / sizeof(emitInsModeFmtTab[0]);
#endif
/*****************************************************************************
@@ -837,15 +792,13 @@ unsigned const emitter::emitInsModeFmtCnt = sizeof(emitInsModeFmtTab)/
* Combine the given base format with the update mode of the instuction.
*/
-
-inline
-emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base)
+inline emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base)
{
assert(IF_RRD + IUM_RD == IF_RRD);
assert(IF_RRD + IUM_WR == IF_RWR);
assert(IF_RRD + IUM_RW == IF_RRW);
- return (insFormat)(base + emitInsUpdateMode(ins));
+ return (insFormat)(base + emitInsUpdateMode(ins));
}
/*****************************************************************************
@@ -854,11 +807,9 @@ emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base)
*/
#if FEATURE_STACK_FP_X87
-emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base,
- insFormat FPld,
- insFormat FPst)
+emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base, insFormat FPld, insFormat FPst)
{
- if (CodeGen::instIsFP(ins))
+ if (CodeGen::instIsFP(ins))
{
assert(IF_TRD_SRD + 1 == IF_TWR_SRD);
assert(IF_TRD_SRD + 2 == IF_TRW_SRD);
@@ -871,33 +822,32 @@ emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base,
switch (ins)
{
- case INS_fst:
- case INS_fstp:
- case INS_fistp:
- case INS_fistpl:
- return (insFormat)(FPst );
+ case INS_fst:
+ case INS_fstp:
+ case INS_fistp:
+ case INS_fistpl:
+ return (insFormat)(FPst);
- case INS_fld:
- case INS_fild:
- return (insFormat)(FPld+1);
+ case INS_fld:
+ case INS_fild:
+ return (insFormat)(FPld + 1);
- case INS_fcomp:
- case INS_fcompp:
- case INS_fcomip:
- return (insFormat)(FPld );
+ case INS_fcomp:
+ case INS_fcompp:
+ case INS_fcomip:
+ return (insFormat)(FPld);
- default:
- return (insFormat)(FPld+2);
+ default:
+ return (insFormat)(FPld + 2);
}
}
else
{
- return emitInsModeFormat(ins, base);
+ return emitInsModeFormat(ins, base);
}
}
#endif // FEATURE_STACK_FP_X87
-
// This is a helper we need due to Vs Whidbey #254016 in order to distinguish
// if we can not possibly be updating an integer register. This is not the best
// solution, but the other ones (see bug) are going to be much more complicated.
@@ -906,16 +856,12 @@ emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base,
// We really only need this for x86 where this issue exists.
bool emitter::emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id)
{
- instruction ins = id->idIns();
-
+ instruction ins = id->idIns();
+
// The following SSE2 instructions write to a general purpose integer register.
- if (!IsSSEOrAVXInstruction(ins)
- || ins == INS_mov_xmm2i
- || ins == INS_cvttsd2si
+ if (!IsSSEOrAVXInstruction(ins) || ins == INS_mov_xmm2i || ins == INS_cvttsd2si
#ifndef LEGACY_BACKEND
- || ins == INS_cvttss2si
- || ins == INS_cvtsd2si
- || ins == INS_cvtss2si
+ || ins == INS_cvttss2si || ins == INS_cvtsd2si || ins == INS_cvtss2si
#endif // !LEGACY_BACKEND
)
{
@@ -930,8 +876,7 @@ bool emitter::emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id)
* Returns the base encoding of the given CPU instruction.
*/
-inline
-size_t insCode(instruction ins)
+inline size_t insCode(instruction ins)
{
// clang-format off
const static
@@ -953,10 +898,10 @@ size_t insCode(instruction ins)
};
// clang-format on
- assert((unsigned)ins < sizeof(insCodes)/sizeof(insCodes[0]));
+ assert((unsigned)ins < sizeof(insCodes) / sizeof(insCodes[0]));
assert((insCodes[ins] != BAD_CODE));
- return insCodes[ins];
+ return insCodes[ins];
}
/*****************************************************************************
@@ -964,8 +909,7 @@ size_t insCode(instruction ins)
* Returns the "[r/m], 32-bit icon" encoding of the given CPU instruction.
*/
-inline
-size_t insCodeMI(instruction ins)
+inline size_t insCodeMI(instruction ins)
{
// clang-format off
const static
@@ -987,10 +931,10 @@ size_t insCodeMI(instruction ins)
};
// clang-format on
- assert((unsigned)ins < sizeof(insCodesMI)/sizeof(insCodesMI[0]));
+ assert((unsigned)ins < sizeof(insCodesMI) / sizeof(insCodesMI[0]));
assert((insCodesMI[ins] != BAD_CODE));
- return insCodesMI[ins];
+ return insCodesMI[ins];
}
/*****************************************************************************
@@ -998,8 +942,7 @@ size_t insCodeMI(instruction ins)
* Returns the "reg, [r/m]" encoding of the given CPU instruction.
*/
-inline
-size_t insCodeRM(instruction ins)
+inline size_t insCodeRM(instruction ins)
{
// clang-format off
const static
@@ -1021,10 +964,10 @@ size_t insCodeRM(instruction ins)
};
// clang-format on
- assert((unsigned)ins < sizeof(insCodesRM)/sizeof(insCodesRM[0]));
+ assert((unsigned)ins < sizeof(insCodesRM) / sizeof(insCodesRM[0]));
assert((insCodesRM[ins] != BAD_CODE));
- return insCodesRM[ins];
+ return insCodesRM[ins];
}
/*****************************************************************************
@@ -1032,8 +975,7 @@ size_t insCodeRM(instruction ins)
* Returns the "AL/AX/EAX, imm" accumulator encoding of the given instruction.
*/
-inline
-size_t insCodeACC(instruction ins)
+inline size_t insCodeACC(instruction ins)
{
// clang-format off
const static
@@ -1055,10 +997,10 @@ size_t insCodeACC(instruction ins)
};
// clang-format on
- assert((unsigned)ins < sizeof(insCodesACC)/sizeof(insCodesACC[0]));
+ assert((unsigned)ins < sizeof(insCodesACC) / sizeof(insCodesACC[0]));
assert((insCodesACC[ins] != BAD_CODE));
- return insCodesACC[ins];
+ return insCodesACC[ins];
}
/*****************************************************************************
@@ -1066,8 +1008,7 @@ size_t insCodeACC(instruction ins)
* Returns the "register" encoding of the given CPU instruction.
*/
-inline
-size_t insCodeRR(instruction ins)
+inline size_t insCodeRR(instruction ins)
{
// clang-format off
const static
@@ -1089,10 +1030,10 @@ size_t insCodeRR(instruction ins)
};
// clang-format on
- assert((unsigned)ins < sizeof(insCodesRR)/sizeof(insCodesRR[0]));
+ assert((unsigned)ins < sizeof(insCodesRR) / sizeof(insCodesRR[0]));
assert((insCodesRR[ins] != BAD_CODE));
- return insCodesRR[ins];
+ return insCodesRR[ins];
}
// clang-format off
@@ -1116,26 +1057,23 @@ size_t insCodesMR[] =
// clang-format on
// Returns true iff the give CPU instruction has an MR encoding.
-inline
-size_t hasCodeMR(instruction ins)
+inline size_t hasCodeMR(instruction ins)
{
- assert((unsigned)ins < sizeof(insCodesMR)/sizeof(insCodesMR[0]));
- return((insCodesMR[ins] != BAD_CODE));
+ assert((unsigned)ins < sizeof(insCodesMR) / sizeof(insCodesMR[0]));
+ return ((insCodesMR[ins] != BAD_CODE));
}
-
/*****************************************************************************
*
* Returns the "[r/m], reg" or "[r/m]" encoding of the given CPU instruction.
*/
-inline
-size_t insCodeMR(instruction ins)
+inline size_t insCodeMR(instruction ins)
{
- assert((unsigned)ins < sizeof(insCodesMR)/sizeof(insCodesMR[0]));
+ assert((unsigned)ins < sizeof(insCodesMR) / sizeof(insCodesMR[0]));
assert((insCodesMR[ins] != BAD_CODE));
- return insCodesMR[ins];
+ return insCodesMR[ins];
}
/*****************************************************************************
@@ -1144,8 +1082,7 @@ size_t insCodeMR(instruction ins)
* part of an opcode.
*/
-inline
-unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAttr size, size_t* code)
+inline unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emitAttr size, size_t* code)
{
assert(reg < REG_STK);
@@ -1186,8 +1123,7 @@ unsigned emitter::insEncodeReg012(instruction ins, regNumber reg, emi
* part of an opcode.
*/
-inline
-unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAttr size, size_t* code)
+inline unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emitAttr size, size_t* code)
{
assert(reg < REG_STK);
@@ -1212,11 +1148,11 @@ unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emi
reg = RegEncoding(reg);
assert(reg < 8);
- return(reg<< 3);
+ return (reg << 3);
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
assert(reg < 8);
- return(reg<< 3);
+ return (reg << 3);
#endif // LEGACY_BACKEND
}
@@ -1225,18 +1161,17 @@ unsigned emitter::insEncodeReg345(instruction ins, regNumber reg, emi
* Returns modified AVX opcode with the specified register encoded in bits 3-6 of
* byte 2 of VEX prefix.
*/
-inline
-size_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, size_t code)
-{
-#ifdef FEATURE_AVX_SUPPORT
+inline size_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, size_t code)
+{
+#ifdef FEATURE_AVX_SUPPORT
assert(reg < REG_STK);
assert(IsAVXInstruction(ins));
assert(hasVexPrefix(code));
- // Get 4-bit register encoding
+ // Get 4-bit register encoding
// RegEncoding() gives lower 3 bits
// IsExtendedReg() gives MSB.
- size_t regBits = RegEncoding(reg);
+ size_t regBits = RegEncoding(reg);
if (IsExtendedReg(reg))
{
regBits |= 0x08;
@@ -1253,7 +1188,6 @@ size_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emit
#endif
}
-
/*****************************************************************************
*
* Returns an encoding for the specified register to be used in the bit3-5
@@ -1261,8 +1195,7 @@ size_t emitter::insEncodeReg3456(instruction ins, regNumber reg, emit
* Used exclusively to generate the REX.X bit and truncate the register.
*/
-inline
-unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, size_t* code)
+inline unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, size_t* code)
{
assert(reg < REG_STK);
@@ -1270,7 +1203,7 @@ unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, siz
// Either code is not NULL or reg is not an extended reg.
// If reg is an extended reg, instruction needs to be prefixed with 'REX'
// which would require code != NULL.
- assert(code != NULL || reg < REG_R8 || (reg >= REG_XMM0 && reg < REG_XMM8));
+ assert(code != nullptr || reg < REG_R8 || (reg >= REG_XMM0 && reg < REG_XMM8));
if (IsExtendedReg(reg))
{
@@ -1288,8 +1221,7 @@ unsigned emitter::insEncodeRegSIB(instruction ins, regNumber reg, siz
* Returns the "[r/m]" opcode with the mod/RM field set to register.
*/
-inline
-size_t emitter::insEncodeMRreg(instruction ins, size_t code)
+inline size_t emitter::insEncodeMRreg(instruction ins, size_t code)
{
// If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes.
// Otherwise, it will be placed after the 4 byte encoding.
@@ -1302,14 +1234,12 @@ size_t emitter::insEncodeMRreg(instruction ins, size_t code)
return code;
}
-
/*****************************************************************************
*
* Returns the "[r/m], icon" opcode with the mod/RM field set to register.
*/
-inline
-size_t insEncodeMIreg(instruction ins, size_t code)
+inline size_t insEncodeMIreg(instruction ins, size_t code)
{
assert((code & 0xC000) == 0);
code |= 0xC000;
@@ -1321,8 +1251,7 @@ size_t insEncodeMIreg(instruction ins, size_t code)
* Returns the given "[r/m]" opcode with the mod/RM field set to register.
*/
-inline
-size_t insEncodeRMreg(instruction ins, size_t code)
+inline size_t insEncodeRMreg(instruction ins, size_t code)
{
// If Byte 4 (which is 0xFF00) is 0, that's where the RM encoding goes.
// Otherwise, it will be placed after the 4 byte encoding.
@@ -1340,8 +1269,7 @@ size_t insEncodeRMreg(instruction ins, size_t code)
* the given register.
*/
-inline
-size_t emitter::insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, size_t code)
+inline size_t emitter::insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, size_t code)
{
assert((code & 0xC000) == 0);
code |= 0xC000;
@@ -1356,8 +1284,7 @@ size_t emitter::insEncodeMRreg(instruction ins, regNumber reg, emit
* the given register.
*/
-inline
-size_t emitter::insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, size_t code)
+inline size_t emitter::insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, size_t code)
{
assert((code & 0xC000) == 0);
code |= 0xC000;
@@ -1371,8 +1298,7 @@ size_t emitter::insEncodeMIreg(instruction ins, regNumber reg, emit
* Returns true iff the given instruction does not have a "[r/m], icon" form, but *does* have a
* "reg,reg,imm8" form.
*/
-inline
-bool insNeedsRRIb(instruction ins)
+inline bool insNeedsRRIb(instruction ins)
{
// If this list gets longer, use a switch or a table.
return ins == INS_imul;
@@ -1383,13 +1309,12 @@ bool insNeedsRRIb(instruction ins)
* Returns the "reg,reg,imm8" opcode with both the reg's set to the
* the given register.
*/
-inline
-size_t emitter::insEncodeRRIb(instruction ins, regNumber reg, emitAttr size)
+inline size_t emitter::insEncodeRRIb(instruction ins, regNumber reg, emitAttr size)
{
- assert(size == EA_4BYTE); // All we handle for now.
+ assert(size == EA_4BYTE); // All we handle for now.
assert(insNeedsRRIb(ins));
// If this list gets longer, use a switch, or a table lookup.
- size_t code = 0x69c0;
+ size_t code = 0x69c0;
unsigned regcode = insEncodeReg012(ins, reg, size, &code);
// We use the same register as source and destination. (Could have another version that does both regs...)
code |= regcode;
@@ -1403,8 +1328,7 @@ size_t emitter::insEncodeRRIb(instruction ins, regNumber reg, emitAt
* nibble of the opcode
*/
-inline
-size_t emitter::insEncodeOpreg(instruction ins, regNumber reg, emitAttr size)
+inline size_t emitter::insEncodeOpreg(instruction ins, regNumber reg, emitAttr size)
{
size_t code = insCodeRR(ins);
unsigned regcode = insEncodeReg012(ins, reg, size, &code);
@@ -1417,55 +1341,43 @@ size_t emitter::insEncodeOpreg(instruction ins, regNumber reg, emit
* Return the 'SS' field value for the given index scale factor.
*/
-inline
-unsigned insSSval(unsigned scale)
+inline unsigned insSSval(unsigned scale)
{
- assert(scale == 1 ||
- scale == 2 ||
- scale == 4 ||
- scale == 8);
+ assert(scale == 1 || scale == 2 || scale == 4 || scale == 8);
- const static
- BYTE scales[] =
- {
- 0x00, // 1
- 0x40, // 2
- 0xFF, // 3
- 0x80, // 4
- 0xFF, // 5
- 0xFF, // 6
- 0xFF, // 7
- 0xC0, // 8
+ const static BYTE scales[] = {
+ 0x00, // 1
+ 0x40, // 2
+ 0xFF, // 3
+ 0x80, // 4
+ 0xFF, // 5
+ 0xFF, // 6
+ 0xFF, // 7
+ 0xC0, // 8
};
- return scales[scale-1];
+ return scales[scale - 1];
}
+const instruction emitJumpKindInstructions[] = {INS_nop,
+#define JMP_SMALL(en, rev, ins) INS_##ins,
+#include "emitjmps.h"
-const instruction emitJumpKindInstructions[] =
-{
- INS_nop,
-
- #define JMP_SMALL(en, rev, ins) INS_##ins,
- #include "emitjmps.h"
-
- INS_call
-};
+ INS_call};
-const emitJumpKind emitReverseJumpKinds[] =
-{
+const emitJumpKind emitReverseJumpKinds[] = {
EJ_NONE,
- #define JMP_SMALL(en, rev, ins) EJ_##rev,
- #include "emitjmps.h"
+#define JMP_SMALL(en, rev, ins) EJ_##rev,
+#include "emitjmps.h"
};
/*****************************************************************************
* Look up the instruction for a jump kind
*/
-/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
+/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind)
{
assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions));
return emitJumpKindInstructions[jumpKind];
@@ -1475,7 +1387,7 @@ const emitJumpKind emitReverseJumpKinds[] =
* Reverse the conditional jump
*/
-/* static */ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
+/* static */ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind)
{
assert(jumpKind < EJ_COUNT);
return emitReverseJumpKinds[jumpKind];
@@ -1486,35 +1398,32 @@ const emitJumpKind emitReverseJumpKinds[] =
* but the target register need not be byte-addressable
*/
-inline
-bool emitInstHasNoCode(instruction ins)
+inline bool emitInstHasNoCode(instruction ins)
{
if (ins == INS_align)
+ {
return true;
+ }
return false;
}
-
/*****************************************************************************
* When encoding instructions that operate on byte registers
* we have to ensure that we use a low register (EAX, EBX, ECX or EDX)
* otherwise we will incorrectly encode the instruction
*/
-bool emitter::emitVerifyEncodable(instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2 /* = REG_NA */)
+bool emitter::emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1, regNumber reg2 /* = REG_NA */)
{
#if CPU_HAS_BYTE_REGS
- if (size != EA_1BYTE) // Not operating on a byte register is fine
+ if (size != EA_1BYTE) // Not operating on a byte register is fine
{
return true;
}
- if ((ins != INS_movsx) && // These two instructions support high register
- (ins != INS_movzx) ) // encodings for reg1
+ if ((ins != INS_movsx) && // These two instructions support high register
+ (ins != INS_movzx)) // encodings for reg1
{
// reg1 must be a byte-able register
if ((genRegMask(reg1) & RBM_BYTE_REGS) == 0)
@@ -1523,8 +1432,7 @@ bool emitter::emitVerifyEncodable(instruction ins,
}
}
// if reg2 is not REG_NA then reg2 must be a byte-able register
- if ((reg2 != REG_NA) &&
- ((genRegMask(reg2) & RBM_BYTE_REGS) == 0))
+ if ((reg2 != REG_NA) && ((genRegMask(reg2) & RBM_BYTE_REGS) == 0))
{
return false;
}
@@ -1538,8 +1446,7 @@ bool emitter::emitVerifyEncodable(instruction ins,
* Estimate the size (in bytes of generated code) of the given instruction.
*/
-inline
-UNATIVE_OFFSET emitter::emitInsSize(size_t code)
+inline UNATIVE_OFFSET emitter::emitInsSize(size_t code)
{
UNATIVE_OFFSET size = (code & 0xFF000000) ? 4 : (code & 0x00FF0000) ? 3 : 2;
#ifdef _TARGET_AMD64_
@@ -1548,18 +1455,16 @@ UNATIVE_OFFSET emitter::emitInsSize(size_t code)
return size;
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeRM(instruction ins)
+inline UNATIVE_OFFSET emitter::emitInsSizeRM(instruction ins)
{
- return emitInsSize(insCodeRM(ins));
+ return emitInsSize(insCodeRM(ins));
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regNumber reg2, emitAttr attr)
+inline UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regNumber reg2, emitAttr attr)
{
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
- UNATIVE_OFFSET sz;
+ UNATIVE_OFFSET sz;
#ifdef _TARGET_AMD64_
// If Byte 4 (which is 0xFF00) is non-zero, that's where the RM encoding goes.
// Otherwise, it will be placed after the 4 byte encoding, making the total 5 bytes.
@@ -1572,12 +1477,11 @@ UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regN
#endif // _TARGET_AMD64_
{
size_t code = insCodeRM(ins);
- sz = emitInsSize(insEncodeRMreg(ins, code));
+ sz = emitInsSize(insEncodeRMreg(ins, code));
}
// Most 16-bit operand instructions will need a prefix
- if (size == EA_2BYTE && ins != INS_movsx
- && ins != INS_movzx)
+ if (size == EA_2BYTE && ins != INS_movsx && ins != INS_movzx)
{
sz += 1;
}
@@ -1586,9 +1490,8 @@ UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regN
sz += emitGetVexPrefixAdjustedSize(ins, size, insCodeRM(ins));
// REX prefix
- if ((TakesRexWPrefix(ins, size) && ((ins != INS_xor) || (reg1 != reg2)))
- || IsExtendedReg(reg1, attr)
- || IsExtendedReg(reg2, attr))
+ if ((TakesRexWPrefix(ins, size) && ((ins != INS_xor) || (reg1 != reg2))) || IsExtendedReg(reg1, attr) ||
+ IsExtendedReg(reg2, attr))
{
sz += emitGetRexPrefixSize(ins);
}
@@ -1598,22 +1501,23 @@ UNATIVE_OFFSET emitter::emitInsSizeRR(instruction ins, regNumber reg1, regN
/*****************************************************************************/
-inline
-UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
+inline UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
{
- UNATIVE_OFFSET size = emitInsSize(code);
- UNATIVE_OFFSET offs;
- bool offsIsUpperBound = true;
- bool EBPbased = true;
+ UNATIVE_OFFSET size = emitInsSize(code);
+ UNATIVE_OFFSET offs;
+ bool offsIsUpperBound = true;
+ bool EBPbased = true;
/* Is this a temporary? */
- if (var < 0)
+ if (var < 0)
{
/* An address off of ESP takes an extra byte */
- if (!emitHasFramePtr)
+ if (!emitHasFramePtr)
+ {
size++;
+ }
#ifndef LEGACY_BACKEND
// The offset is already assigned. Find the temp.
@@ -1639,7 +1543,7 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
// SP-based offsets must already be positive.
assert((int)offs >= 0);
}
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
/* We'll have to estimate the max. possible offset of this temp */
// TODO: Get an estimate of the temp offset instead of assuming
@@ -1673,15 +1577,15 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
/* Is this a stack parameter reference? */
- if ( emitComp->lvaIsParameter (var)
+ if (emitComp->lvaIsParameter(var)
#if !defined(_TARGET_AMD64_) || defined(UNIX_AMD64_ABI)
- && !emitComp->lvaIsRegArgument(var)
+ && !emitComp->lvaIsRegArgument(var)
#endif // !_TARGET_AMD64_ || UNIX_AMD64_ABI
- )
+ )
{
/* If no EBP frame, arguments are off of ESP, above temps */
- if (!EBPbased)
+ if (!EBPbased)
{
assert((int)offs >= 0);
@@ -1693,7 +1597,7 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
{
/* Locals off of EBP are at negative offsets */
- if (EBPbased)
+ if (EBPbased)
{
#if defined(_TARGET_AMD64_) && !defined(PLATFORM_UNIX)
// If localloc is not used, then ebp chaining is done and hence
@@ -1714,15 +1618,14 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef UNIX_AMD64_ABI
- LclVarDsc* varDsc = emitComp->lvaTable + var;
- bool isRegPassedArg = varDsc->lvIsParam && varDsc->lvIsRegArg;
+ LclVarDsc* varDsc = emitComp->lvaTable + var;
+ bool isRegPassedArg = varDsc->lvIsParam && varDsc->lvIsRegArg;
// Register passed args could have a stack offset of 0.
noway_assert((int)offs < 0 || isRegPassedArg);
-#else // !UNIX_AMD64_ABI
+#else // !UNIX_AMD64_ABI
noway_assert((int)offs < 0);
#endif // !UNIX_AMD64_ABI
}
-
assert(emitComp->lvaTempsHaveLargerOffsetThanVars());
@@ -1733,17 +1636,17 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
offs -= emitMaxTmpSize;
}
- if ((int) offs < 0)
+ if ((int)offs < 0)
{
// offset is negative
- return size + ((int(offs) >= SCHAR_MIN) ? sizeof(char) : sizeof( int));
+ return size + ((int(offs) >= SCHAR_MIN) ? sizeof(char) : sizeof(int));
}
#ifdef _TARGET_AMD64_
// This case arises for localloc frames
else
{
- return size + ((offs <= SCHAR_MAX) ? sizeof(char) : sizeof(int));
- }
+ return size + ((offs <= SCHAR_MAX) ? sizeof(char) : sizeof(int));
+ }
#endif
}
@@ -1760,11 +1663,11 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
/* Are we addressing off of ESP? */
- if (!emitHasFramePtr)
+ if (!emitHasFramePtr)
{
/* Adjust the effective offset if necessary */
- if (emitCntStackDepth)
+ if (emitCntStackDepth)
offs += emitCurStackLvl;
// we could (and used to) check for the special case [sp] here but the stack offset
@@ -1795,7 +1698,7 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
emitGrowableMaxByteOffs = offs;
#ifdef DEBUG
// Remember which instruction this is
- emitMaxByteOffsIdNum = emitInsCount;
+ emitMaxByteOffsIdNum = emitInsCount;
#endif
}
}
@@ -1808,13 +1711,11 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(size_t code, int var, int dsp)
}
else
{
- return size + (useSmallEncoding ? sizeof(char)
- : sizeof( int));
+ return size + (useSmallEncoding ? sizeof(char) : sizeof(int));
}
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, int var, int dsp, int val)
+inline UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, int var, int dsp, int val)
{
instruction ins = id->idIns();
UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize());
@@ -1827,7 +1728,7 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, int var, int dsp, int
noway_assert(valSize <= sizeof(int) || !id->idIsCnsReloc());
#endif // _TARGET_AMD64_
- if (valSize > sizeof(int))
+ if (valSize > sizeof(int))
{
valSize = sizeof(int);
}
@@ -1835,12 +1736,12 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, int var, int dsp, int
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
{
- valInByte = false; // relocs can't be placed in a byte
+ valInByte = false; // relocs can't be placed in a byte
assert(valSize == sizeof(int));
}
#endif
- if (valInByte)
+ if (valInByte)
{
valSize = sizeof(char);
}
@@ -1848,7 +1749,7 @@ UNATIVE_OFFSET emitter::emitInsSizeSV(instrDesc* id, int var, int dsp, int
// 16-bit operand instructions need a prefix.
// This referes to 66h size prefix override
if (id->idOpSize() == EA_2BYTE)
- {
+ {
prefix = 1;
}
@@ -1875,22 +1776,21 @@ static bool baseRegisterRequiresDisplacement(regNumber base)
#endif
}
-UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
+UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
{
- emitAttr attrSize = id->idOpSize();
+ emitAttr attrSize = id->idOpSize();
instruction ins = id->idIns();
/* The displacement field is in an unusual place for calls */
- ssize_t dsp = (ins == INS_call) ? emitGetInsCIdisp(id)
- : emitGetInsAmdAny(id);
- bool dspInByte = ((signed char)dsp == (ssize_t)dsp);
- bool dspIsZero = (dsp == 0);
+ ssize_t dsp = (ins == INS_call) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id);
+ bool dspInByte = ((signed char)dsp == (ssize_t)dsp);
+ bool dspIsZero = (dsp == 0);
UNATIVE_OFFSET size;
// Note that the values in reg and rgx are used in this method to decide
- // how many bytes will be needed by the address [reg+rgx+cns]
+ // how many bytes will be needed by the address [reg+rgx+cns]
// this includes the prefix bytes when reg or rgx are registers R8-R15
- regNumber reg;
- regNumber rgx;
+ regNumber reg;
+ regNumber rgx;
// The idAddr field is a union and only some of the instruction formats use the iiaAddrMode variant
// these are IF_AWR_*, IF_ARD_*, IF_ARW_* and IF_*_ARD
@@ -1901,39 +1801,37 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
switch (id->idInsFmt())
{
- case IF_RWR_LABEL:
- case IF_MRW_CNS:
- case IF_MRW_RRD:
- case IF_MRW_SHF:
- reg = REG_NA;
- rgx = REG_NA;
- break;
+ case IF_RWR_LABEL:
+ case IF_MRW_CNS:
+ case IF_MRW_RRD:
+ case IF_MRW_SHF:
+ reg = REG_NA;
+ rgx = REG_NA;
+ break;
- default:
- reg = id->idAddr()->iiaAddrMode.amBaseReg;
- rgx = id->idAddr()->iiaAddrMode.amIndxReg;
- break;
+ default:
+ reg = id->idAddr()->iiaAddrMode.amBaseReg;
+ rgx = id->idAddr()->iiaAddrMode.amIndxReg;
+ break;
}
#ifdef RELOC_SUPPORT
if (id->idIsDspReloc())
{
- dspInByte = false; // relocs can't be placed in a byte
- dspIsZero = false; // relocs won't always be zero
+ dspInByte = false; // relocs can't be placed in a byte
+ dspIsZero = false; // relocs won't always be zero
}
#endif
- if (code & 0xFF000000)
+ if (code & 0xFF000000)
{
size = 4;
}
- else if(code & 0x00FF0000)
+ else if (code & 0x00FF0000)
{
- assert( (attrSize == EA_4BYTE)
- || (attrSize == EA_PTRSIZE) // Only for x64
- || (attrSize == EA_16BYTE) // only for x64
- || (ins == INS_movzx)
- || (ins == INS_movsx));
+ assert((attrSize == EA_4BYTE) || (attrSize == EA_PTRSIZE) // Only for x64
+ || (attrSize == EA_16BYTE) // only for x64
+ || (ins == INS_movzx) || (ins == INS_movsx));
size = 3;
}
@@ -1947,7 +1845,7 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
#if FEATURE_STACK_FP_X87
if ((attrSize == EA_2BYTE) && (ins != INS_fldcw) && (ins != INS_fnstcw))
-#else // FEATURE_STACK_FP_X87
+#else // FEATURE_STACK_FP_X87
if (attrSize == EA_2BYTE)
#endif // FEATURE_STACK_FP_X87
{
@@ -1958,7 +1856,7 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
#ifdef _TARGET_AMD64_
size += emitGetVexPrefixAdjustedSize(ins, attrSize, code);
- if (code & REX_PREFIX_MASK)
+ if (code & REX_PREFIX_MASK)
{
// REX prefix
size += emitGetRexPrefixSize(ins);
@@ -1968,15 +1866,14 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
// REX.W prefix
size += emitGetRexPrefixSize(ins);
}
- else if (IsExtendedReg(reg, EA_PTRSIZE) || IsExtendedReg(rgx, EA_PTRSIZE) ||
- IsExtendedReg(id->idReg1(), attrSize))
+ else if (IsExtendedReg(reg, EA_PTRSIZE) || IsExtendedReg(rgx, EA_PTRSIZE) || IsExtendedReg(id->idReg1(), attrSize))
{
// Should have a REX byte
size += emitGetRexPrefixSize(ins);
}
#endif // _TARGET_AMD64_
- if (rgx == REG_NA)
+ if (rgx == REG_NA)
{
/* The address is of the form "[reg+disp]" */
@@ -2011,10 +1908,14 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
/* Does the offset fit in a byte? */
- if (dspInByte)
+ if (dspInByte)
+ {
size += sizeof(char);
+ }
else
+ {
size += sizeof(INT32);
+ }
}
else
{
@@ -2024,27 +1925,30 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
/* Is the index value scaled? */
- if (emitDecodeScale(id->idAddr()->iiaAddrMode.amScale) > 1)
+ if (emitDecodeScale(id->idAddr()->iiaAddrMode.amScale) > 1)
{
/* Is there a base register? */
- if (reg != REG_NA)
+ if (reg != REG_NA)
{
/* The address is "[reg + {2/4/8} * rgx + icon]" */
- if (dspIsZero && !baseRegisterRequiresDisplacement(reg))
+ if (dspIsZero && !baseRegisterRequiresDisplacement(reg))
{
/* The address is "[reg + {2/4/8} * rgx]" */
-
}
else
{
/* The address is "[reg + {2/4/8} * rgx + disp]" */
- if (dspInByte)
+ if (dspInByte)
+ {
size += sizeof(char);
+ }
else
- size += sizeof(int );
+ {
+ size += sizeof(int);
+ }
}
}
else
@@ -2056,38 +1960,40 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code)
}
else
{
- if (dspIsZero && baseRegisterRequiresDisplacement(reg) && !baseRegisterRequiresDisplacement(rgx))
+ if (dspIsZero && baseRegisterRequiresDisplacement(reg) && !baseRegisterRequiresDisplacement(rgx))
{
/* Swap reg and rgx, such that reg is not EBP/R13 */
- regNumber tmp = reg;
+ regNumber tmp = reg;
id->idAddr()->iiaAddrMode.amBaseReg = reg = rgx;
id->idAddr()->iiaAddrMode.amIndxReg = rgx = tmp;
}
/* The address is "[reg+rgx+dsp]" */
- if (dspIsZero && !baseRegisterRequiresDisplacement(reg))
+ if (dspIsZero && !baseRegisterRequiresDisplacement(reg))
{
/* This is [reg+rgx]" */
-
}
else
{
/* This is [reg+rgx+dsp]" */
- if (dspInByte)
+ if (dspInByte)
+ {
size += sizeof(char);
+ }
else
- size += sizeof(int );
+ {
+ size += sizeof(int);
+ }
}
}
}
- return size;
+ return size;
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code, int val)
+inline UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code, int val)
{
instruction ins = id->idIns();
UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize());
@@ -2099,7 +2005,7 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code, int val)
noway_assert(valSize <= sizeof(INT32) || !id->idIsCnsReloc());
#endif // _TARGET_AMD64_
- if (valSize > sizeof(INT32))
+ if (valSize > sizeof(INT32))
{
valSize = sizeof(INT32);
}
@@ -2107,44 +2013,40 @@ UNATIVE_OFFSET emitter::emitInsSizeAM(instrDesc* id, size_t code, int val)
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
{
- valInByte = false; // relocs can't be placed in a byte
+ valInByte = false; // relocs can't be placed in a byte
assert(valSize == sizeof(INT32));
}
#endif
- if (valInByte)
+ if (valInByte)
{
valSize = sizeof(char);
}
- return valSize + emitInsSizeAM(id, code);
+ return valSize + emitInsSizeAM(id, code);
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code)
+inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code)
{
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
// fgMorph changes any statics that won't fit into 32-bit addresses
// into constants with an indir, rather than GT_CLS_VAR
// so we should only hit this path for statics that are RIP-relative
- UNATIVE_OFFSET size = sizeof(INT32);
+ UNATIVE_OFFSET size = sizeof(INT32);
// Most 16-bit operand instructions will need a prefix.
// This refers to 66h size prefix override.
- if (id->idOpSize() == EA_2BYTE &&
- ins != INS_movzx &&
- ins != INS_movsx)
+ if (id->idOpSize() == EA_2BYTE && ins != INS_movzx && ins != INS_movsx)
{
size++;
}
- return size + emitInsSize(code);
+ return size + emitInsSize(code);
}
-inline
-UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code, int val)
+inline UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code, int val)
{
instruction ins = id->idIns();
UNATIVE_OFFSET valSize = EA_SIZE_IN_BYTES(id->idOpSize());
@@ -2152,19 +2054,19 @@ UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code, int val)
#ifndef _TARGET_AMD64_
// occasionally longs get here on x86
- if (valSize > sizeof(INT32))
+ if (valSize > sizeof(INT32))
valSize = sizeof(INT32);
#endif // !_TARGET_AMD64_
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
{
- valInByte = false; // relocs can't be placed in a byte
+ valInByte = false; // relocs can't be placed in a byte
assert(valSize == sizeof(INT32));
}
#endif
- if (valInByte)
+ if (valInByte)
{
valSize = sizeof(char);
}
@@ -2177,29 +2079,28 @@ UNATIVE_OFFSET emitter::emitInsSizeCV(instrDesc* id, size_t code, int val)
* Allocate instruction descriptors for instructions with address modes.
*/
-inline
-emitter::instrDesc* emitter::emitNewInstrAmd (emitAttr size, ssize_t dsp)
+inline emitter::instrDesc* emitter::emitNewInstrAmd(emitAttr size, ssize_t dsp)
{
- if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX)
+ if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX)
{
- instrDescAmd* id = emitAllocInstrAmd (size);
+ instrDescAmd* id = emitAllocInstrAmd(size);
id->idSetIsLargeDsp();
-#ifdef DEBUG
- id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
+#ifdef DEBUG
+ id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
#endif
- id->idaAmdVal = dsp;
+ id->idaAmdVal = dsp;
- return id;
+ return id;
}
else
{
- instrDesc* id = emitAllocInstr (size);
+ instrDesc* id = emitAllocInstr(size);
- id->idAddr()->iiaAddrMode.amDisp = dsp;
- assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
+ id->idAddr()->iiaAddrMode.amDisp = dsp;
+ assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
- return id;
+ return id;
}
}
@@ -2208,97 +2109,92 @@ emitter::instrDesc* emitter::emitNewInstrAmd (emitAttr size, ssize_t dsp)
* Set the displacement field in an instruction. Only handles instrDescAmd type.
*/
-inline
-void emitter::emitSetAmdDisp(instrDescAmd* id, ssize_t dsp)
+inline void emitter::emitSetAmdDisp(instrDescAmd* id, ssize_t dsp)
{
- if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX)
+ if (dsp < AM_DISP_MIN || dsp > AM_DISP_MAX)
{
id->idSetIsLargeDsp();
-#ifdef DEBUG
- id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
+#ifdef DEBUG
+ id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
#endif
- id->idaAmdVal = dsp;
+ id->idaAmdVal = dsp;
}
else
{
id->idSetIsSmallDsp();
- id->idAddr()->iiaAddrMode.amDisp = dsp;
- assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
+ id->idAddr()->iiaAddrMode.amDisp = dsp;
+ assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
}
}
-
/*****************************************************************************
*
* Allocate an instruction descriptor for an instruction that uses both
* an address mode displacement and a constant.
*/
-emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int cns)
+emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, int cns)
{
- if (dsp >= AM_DISP_MIN && dsp <= AM_DISP_MAX)
+ if (dsp >= AM_DISP_MIN && dsp <= AM_DISP_MAX)
{
- if (cns >= ID_MIN_SMALL_CNS &&
- cns <= ID_MAX_SMALL_CNS)
+ if (cns >= ID_MIN_SMALL_CNS && cns <= ID_MAX_SMALL_CNS)
{
- instrDesc* id = emitAllocInstr (size);
+ instrDesc* id = emitAllocInstr(size);
id->idSmallCns(cns);
- id->idAddr()->iiaAddrMode.amDisp = dsp;
- assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
+ id->idAddr()->iiaAddrMode.amDisp = dsp;
+ assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
- return id;
+ return id;
}
else
{
- instrDescCns* id = emitAllocInstrCns (size);
+ instrDescCns* id = emitAllocInstrCns(size);
id->idSetIsLargeCns();
- id->idcCnsVal = cns;
+ id->idcCnsVal = cns;
- id->idAddr()->iiaAddrMode.amDisp = dsp;
- assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
+ id->idAddr()->iiaAddrMode.amDisp = dsp;
+ assert(id->idAddr()->iiaAddrMode.amDisp == dsp); // make sure the value fit
- return id;
+ return id;
}
}
else
{
- if (cns >= ID_MIN_SMALL_CNS &&
- cns <= ID_MAX_SMALL_CNS)
+ if (cns >= ID_MIN_SMALL_CNS && cns <= ID_MAX_SMALL_CNS)
{
- instrDescAmd* id = emitAllocInstrAmd (size);
+ instrDescAmd* id = emitAllocInstrAmd(size);
id->idSetIsLargeDsp();
-#ifdef DEBUG
- id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
+#ifdef DEBUG
+ id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
#endif
- id->idaAmdVal = dsp;
+ id->idaAmdVal = dsp;
id->idSmallCns(cns);
- return id;
+ return id;
}
else
{
instrDescCnsAmd* id = emitAllocInstrCnsAmd(size);
id->idSetIsLargeCns();
- id->idacCnsVal = cns;
+ id->idacCnsVal = cns;
id->idSetIsLargeDsp();
-#ifdef DEBUG
- id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
+#ifdef DEBUG
+ id->idAddr()->iiaAddrMode.amDisp = AM_DISP_BIG_VAL;
#endif
- id->idacAmdVal = dsp;
+ id->idacAmdVal = dsp;
- return id;
+ return id;
}
}
}
-
/*****************************************************************************
*
* The next instruction will be a loop head entry point
@@ -2306,24 +2202,23 @@ emitter::instrDesc* emitter::emitNewInstrAmdCns(emitAttr size, ssize_t dsp, in
* the x86 I-cache alignment rule is followed.
*/
-void emitter::emitLoopAlign()
+void emitter::emitLoopAlign()
{
/* Insert a pseudo-instruction to ensure that we align
the next instruction properly */
- instrDesc* id = emitNewInstrTiny(EA_1BYTE);
+ instrDesc* id = emitNewInstrTiny(EA_1BYTE);
id->idIns(INS_align);
- id->idCodeSize(15); // We may need to skip up to 15 bytes of code
+ id->idCodeSize(15); // We may need to skip up to 15 bytes of code
emitCurIGsize += 15;
}
-
/*****************************************************************************
*
* Add a NOP instruction of the given size.
*/
-void emitter::emitIns_Nop(unsigned size)
+void emitter::emitIns_Nop(unsigned size)
{
assert(size <= 15);
@@ -2341,83 +2236,64 @@ void emitter::emitIns_Nop(unsigned size)
* Add an instruction with no operands.
*/
#ifdef DEBUG
-static bool isX87InsWithNoOperands(instruction ins)
+static bool isX87InsWithNoOperands(instruction ins)
{
#if FEATURE_STACK_FP_X87
- return (
- ins == INS_f2xm1 ||
- ins == INS_fchs ||
- ins == INS_fld1 ||
- ins == INS_fld1 ||
- ins == INS_fldl2e ||
- ins == INS_fldz ||
- ins == INS_fprem ||
- ins == INS_frndint ||
- ins == INS_fscale );
-#else // !FEATURE_STACK_FP_X87
+ return (ins == INS_f2xm1 || ins == INS_fchs || ins == INS_fld1 || ins == INS_fld1 || ins == INS_fldl2e ||
+ ins == INS_fldz || ins == INS_fprem || ins == INS_frndint || ins == INS_fscale);
+#else // !FEATURE_STACK_FP_X87
return false;
#endif // !FEATURE_STACK_FP_X87
}
#endif // DEBUG
-void emitter::emitIns(instruction ins)
+void emitter::emitIns(instruction ins)
{
UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstr();
- size_t code = insCodeMR(ins);
+ instrDesc* id = emitNewInstr();
+ size_t code = insCodeMR(ins);
-#ifdef DEBUG
+#ifdef DEBUG
#if FEATURE_STACK_FP_X87
- if (ins != INS_fabs &&
- ins != INS_fsqrt &&
- ins != INS_fsin &&
- ins != INS_fcos)
+ if (ins != INS_fabs && ins != INS_fsqrt && ins != INS_fsin && ins != INS_fcos)
#endif // FEATURE_STACK_FP_X87
{
// We cannot have #ifdef inside macro expansion.
- bool assertCond =
- (ins == INS_cdq ||
- isX87InsWithNoOperands(ins) ||
- ins == INS_int3 ||
- ins == INS_lock ||
- ins == INS_leave ||
- ins == INS_movsb ||
- ins == INS_movsd ||
- ins == INS_movsp ||
- ins == INS_nop ||
- ins == INS_r_movsb ||
- ins == INS_r_movsd ||
- ins == INS_r_movsp ||
- ins == INS_r_stosb ||
- ins == INS_r_stosd ||
- ins == INS_r_stosp ||
- ins == INS_ret ||
- ins == INS_sahf ||
- ins == INS_stosb ||
- ins == INS_stosd ||
- ins == INS_stosp
+ bool assertCond = (ins == INS_cdq || isX87InsWithNoOperands(ins) || ins == INS_int3 || ins == INS_lock ||
+ ins == INS_leave || ins == INS_movsb || ins == INS_movsd || ins == INS_movsp ||
+ ins == INS_nop || ins == INS_r_movsb || ins == INS_r_movsd || ins == INS_r_movsp ||
+ ins == INS_r_stosb || ins == INS_r_stosd || ins == INS_r_stosp || ins == INS_ret ||
+ ins == INS_sahf || ins == INS_stosb || ins == INS_stosd || ins == INS_stosp
#ifndef LEGACY_BACKEND
- || ins == INS_vzeroupper
+ || ins == INS_vzeroupper
#endif
- );
+ );
- assert(assertCond);
+ assert(assertCond);
}
-#endif //DEBUG
+#endif // DEBUG
#ifdef _TARGET_AMD64_
assert((code & REX_PREFIX_MASK) == 0); // Can't have a REX bit with no operands, right?
-#endif // _TARGET_AMD64_
+#endif // _TARGET_AMD64_
if (code & 0xFF000000)
+ {
sz = 2; // TODO-XArch-Bug?: Shouldn't this be 4? Or maybe we should assert that we don't see this case.
+ }
else if (code & 0x00FF0000)
+ {
sz = 3;
+ }
else if (code & 0x0000FF00)
+ {
sz = 2;
+ }
else
+ {
sz = 1;
+ }
#ifndef LEGACY_BACKEND
// Account for 2-byte VEX prefix in case of vzeroupper
@@ -2450,8 +2326,8 @@ void emitter::emitIns(instruction ins)
void emitter::emitIns(instruction ins, emitAttr attr)
{
UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstr(attr);
- size_t code = insCodeMR(ins);
+ instrDesc* id = emitNewInstr(attr);
+ size_t code = insCodeMR(ins);
assert(ins == INS_cdq);
assert((code & 0xFFFFFF00) == 0);
sz = 1;
@@ -2460,7 +2336,9 @@ void emitter::emitIns(instruction ins, emitAttr attr)
sz += emitGetVexPrefixAdjustedSize(ins, attr, code);
if (TakesRexWPrefix(ins, attr))
+ {
sz += emitGetRexPrefixSize(ins);
+ }
id->idIns(ins);
id->idInsFmt(fmt);
@@ -2470,7 +2348,6 @@ void emitter::emitIns(instruction ins, emitAttr attr)
emitCurIGsize += sz;
}
-
//------------------------------------------------------------------------
// emitMapFmtForIns: map the instruction format based on the instruction.
// Shift-by-a-constant instructions have a special format.
@@ -2486,30 +2363,34 @@ emitter::insFormat emitter::emitMapFmtForIns(insFormat fmt, instruction ins)
{
switch (ins)
{
- case INS_rol_N:
- case INS_ror_N:
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
{
switch (fmt)
{
- case IF_RRW_CNS: return IF_RRW_SHF;
- case IF_MRW_CNS: return IF_MRW_SHF;
- case IF_SRW_CNS: return IF_SRW_SHF;
- case IF_ARW_CNS: return IF_ARW_SHF;
- default: unreached();
+ case IF_RRW_CNS:
+ return IF_RRW_SHF;
+ case IF_MRW_CNS:
+ return IF_MRW_SHF;
+ case IF_SRW_CNS:
+ return IF_SRW_SHF;
+ case IF_ARW_CNS:
+ return IF_ARW_SHF;
+ default:
+ unreached();
}
}
- default:
- return fmt;
+ default:
+ return fmt;
}
}
-
//------------------------------------------------------------------------
// emitMapFmtAtoM: map the address mode formats ARD, ARW, and AWR to their direct address equivalents.
//
@@ -2518,34 +2399,47 @@ emitter::insFormat emitter::emitMapFmtForIns(insFormat fmt, instruction ins)
//
// Returns:
// The mapped instruction format.
-//
+//
emitter::insFormat emitter::emitMapFmtAtoM(insFormat fmt)
{
switch (fmt)
{
- case IF_ARD: return IF_MRD;
- case IF_AWR: return IF_MWR;
- case IF_ARW: return IF_MRW;
+ case IF_ARD:
+ return IF_MRD;
+ case IF_AWR:
+ return IF_MWR;
+ case IF_ARW:
+ return IF_MRW;
- case IF_RRD_ARD: return IF_RRD_MRD;
- case IF_RWR_ARD: return IF_RWR_MRD;
- case IF_RRW_ARD: return IF_RRW_MRD;
+ case IF_RRD_ARD:
+ return IF_RRD_MRD;
+ case IF_RWR_ARD:
+ return IF_RWR_MRD;
+ case IF_RRW_ARD:
+ return IF_RRW_MRD;
- case IF_ARD_RRD: return IF_MRD_RRD;
- case IF_AWR_RRD: return IF_MWR_RRD;
- case IF_ARW_RRD: return IF_MRW_RRD;
+ case IF_ARD_RRD:
+ return IF_MRD_RRD;
+ case IF_AWR_RRD:
+ return IF_MWR_RRD;
+ case IF_ARW_RRD:
+ return IF_MRW_RRD;
- case IF_ARD_CNS: return IF_MRD_CNS;
- case IF_AWR_CNS: return IF_MWR_CNS;
- case IF_ARW_CNS: return IF_MRW_CNS;
+ case IF_ARD_CNS:
+ return IF_MRD_CNS;
+ case IF_AWR_CNS:
+ return IF_MWR_CNS;
+ case IF_ARW_CNS:
+ return IF_MRW_CNS;
- case IF_ARW_SHF: return IF_MRW_SHF;
+ case IF_ARW_SHF:
+ return IF_MRW_SHF;
- default: unreached();
+ default:
+ unreached();
}
}
-
//------------------------------------------------------------------------
// emitHandleMemOp: For a memory operand, fill in the relevant fields of the instrDesc.
//
@@ -2571,7 +2465,7 @@ emitter::insFormat emitter::emitMapFmtAtoM(insFormat fmt)
// The instruction format is set.
//
// idSetIsDspReloc() is called if necessary.
-//
+//
void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt, instruction ins)
{
assert(fmt != IF_NONE);
@@ -2585,12 +2479,12 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
// Static always need relocs
if (!jitStaticFldIsGlobAddr(fldHnd))
{
- // Contract:
+ // Contract:
// fgMorphField() changes any statics that won't fit into 32-bit addresses into
// constants with an indir, rather than GT_CLS_VAR, based on reloc type hint given
// by VM. Hence emitter should always mark GT_CLS_VAR_ADDR as relocatable.
//
- // Data section constants: these get allocated close to code block of the method and
+ // Data section constants: these get allocated close to code block of the method and
// always addressable IP relative. These too should be marked as relocatable.
id->idSetIsDspReloc();
@@ -2603,7 +2497,7 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
{
// Absolute addresses marked as contained should fit within the base of addr mode.
assert(memBase->AsIntConCommon()->FitsInAddrBase(emitComp));
-
+
// Either not generating relocatable code or addr must be an icon handle
assert(!emitComp->opts.compReloc || memBase->IsIconHandle());
@@ -2640,7 +2534,7 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
{
id->idAddr()->iiaAddrMode.amIndxReg = REG_NA;
}
- id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(indir->Scale());
+ id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(indir->Scale());
id->idInsFmt(emitMapFmtForIns(fmt, ins));
@@ -2653,8 +2547,8 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
// into its corresponding shadow space (defined by the x64 ABI)
void emitter::spillIntArgRegsToShadowSlots()
{
- unsigned argNum;
- instrDesc* id;
+ unsigned argNum;
+ instrDesc* id;
UNATIVE_OFFSET sz;
assert(emitComp->compGeneratingProlog);
@@ -2674,9 +2568,9 @@ void emitter::spillIntArgRegsToShadowSlots()
id->idAddr()->iiaAddrMode.amIndxReg = REG_NA;
id->idAddr()->iiaAddrMode.amScale = emitEncodeScale(1);
- // The offset has already been set in the intrDsc ctor,
+ // The offset has already been set in the intrDsc ctor,
// make sure we got it right.
- assert(emitGetInsAmdAny(id) == ssize_t(offset));
+ assert(emitGetInsAmdAny(id) == ssize_t(offset));
id->idReg1(argReg);
sz = emitInsSizeAM(id, insCodeMR(INS_mov));
@@ -2691,15 +2585,15 @@ void emitter::spillIntArgRegsToShadowSlots()
void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
{
UNATIVE_OFFSET sz;
- instrDesc* id;
+ instrDesc* id;
switch (node->OperGet())
{
- case GT_IND:
+ case GT_IND:
{
- GenTreeIndir* mem = node->AsIndir();
- GenTreePtr addr = mem->Addr();
-
+ GenTreeIndir* mem = node->AsIndir();
+ GenTreePtr addr = mem->Addr();
+
if (addr->OperGet() == GT_CLS_VAR_ADDR)
{
emitIns_R_C(ins, attr, mem->gtRegNum, addr->gtClsVar.gtClsVarHnd, 0);
@@ -2714,11 +2608,9 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
else
{
- assert (addr->OperIsAddrMode() ||
- (addr->IsCnsIntOrI() && addr->isContained()) ||
- !addr->isContained());
+ assert(addr->OperIsAddrMode() || (addr->IsCnsIntOrI() && addr->isContained()) || !addr->isContained());
size_t offset = mem->Offset();
- id = emitNewInstrAmd(attr, offset);
+ id = emitNewInstrAmd(attr, offset);
id->idIns(ins);
id->idReg1(mem->gtRegNum);
emitHandleMemOp(mem, id, IF_RWR_ARD, ins);
@@ -2728,18 +2620,18 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
- GenTreeStoreInd* mem = node->AsStoreInd();
- GenTreePtr addr = mem->Addr();
- size_t offset = mem->Offset();
- GenTree* data = mem->Data();
+ GenTreeStoreInd* mem = node->AsStoreInd();
+ GenTreePtr addr = mem->Addr();
+ size_t offset = mem->Offset();
+ GenTree* data = mem->Data();
if (addr->OperGet() == GT_CLS_VAR_ADDR)
{
if (data->isContainedIntOrIImmed())
{
- emitIns_C_I(ins, attr, addr->gtClsVar.gtClsVarHnd, 0, (int) data->AsIntConCommon()->IconValue());
+ emitIns_C_I(ins, attr, addr->gtClsVar.gtClsVarHnd, 0, (int)data->AsIntConCommon()->IconValue());
}
else
{
@@ -2753,7 +2645,7 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
GenTreeLclVarCommon* varNode = addr->AsLclVarCommon();
if (data->isContainedIntOrIImmed())
{
- emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int) data->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue());
}
else
{
@@ -2765,8 +2657,8 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
else if (data->isContainedIntOrIImmed())
{
- int icon = (int) data->AsIntConCommon()->IconValue();
- id = emitNewInstrAmdCns(attr, offset, icon);
+ int icon = (int)data->AsIntConCommon()->IconValue();
+ id = emitNewInstrAmdCns(attr, offset, icon);
id->idIns(ins);
emitHandleMemOp(mem, id, IF_AWR_CNS, ins);
sz = emitInsSizeAM(id, insCodeMI(ins), icon);
@@ -2785,16 +2677,16 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
break;
- case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_VAR:
{
GenTreeLclVarCommon* varNode = node->AsLclVarCommon();
- GenTree* data = varNode->gtOp.gtOp1->gtEffectiveVal();
+ GenTree* data = varNode->gtOp.gtOp1->gtEffectiveVal();
codeGen->inst_set_SV_var(varNode);
assert(varNode->gtRegNum == REG_NA); // stack store
if (data->isContainedIntOrIImmed())
{
- emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int) data->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue());
}
else
{
@@ -2803,10 +2695,10 @@ void emitter::emitInsMov(instruction ins, emitAttr attr, GenTree* node)
}
codeGen->genUpdateLife(varNode);
}
- return;
+ return;
- default:
- unreached();
+ default:
+ unreached();
}
dispIns(id);
@@ -2830,23 +2722,23 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(GenTreeDblCon* tree, emitAttr at
}
else
{
- assert(emitTypeSize(tree->TypeGet()) == attr);
+ assert(emitTypeSize(tree->TypeGet()) == attr);
}
double constValue = tree->gtDblCon.gtDconVal;
- void *cnsAddr;
- float f;
- bool dblAlign;
+ void* cnsAddr;
+ float f;
+ bool dblAlign;
if (attr == EA_4BYTE)
- {
- f = forceCastToFloat(constValue);
- cnsAddr = &f;
+ {
+ f = forceCastToFloat(constValue);
+ cnsAddr = &f;
dblAlign = false;
}
else
{
- cnsAddr = &constValue;
+ cnsAddr = &constValue;
dblAlign = true;
}
@@ -2855,7 +2747,7 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(GenTreeDblCon* tree, emitAttr at
// to constant data, not a real static field.
UNATIVE_OFFSET cnsSize = (attr == EA_4BYTE) ? 4 : 8;
- UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
+ UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, dblAlign);
return emitComp->eeFindJitDataOffs(cnum);
}
@@ -2865,8 +2757,7 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(GenTreeDblCon* tree, emitAttr at
regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src)
{
// dst can only be a reg or modrm
- assert(!dst->isContained() ||
- dst->isContainedMemoryOp() ||
+ assert(!dst->isContained() || dst->isContainedMemoryOp() ||
instrIs3opImul(ins)); // dst on these isn't really the dst
#ifdef DEBUG
@@ -2885,8 +2776,8 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// find which operand is a memory op (if any)
// and what its base is
- GenTreeIndir* mem = nullptr;
- GenTree* memBase = nullptr;
+ GenTreeIndir* mem = nullptr;
+ GenTree* memBase = nullptr;
if (dst->isContainedIndir())
{
@@ -2905,18 +2796,18 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// Find immed (if any) - it cannot be the dst
// SSE2 instructions allow only the second operand to be a memory operand.
GenTreeIntConCommon* intConst = nullptr;
- GenTreeDblCon *dblConst = nullptr;
+ GenTreeDblCon* dblConst = nullptr;
if (src->isContainedIntOrIImmed())
{
intConst = src->AsIntConCommon();
- }
- else if(src->isContainedFltOrDblImmed())
+ }
+ else if (src->isContainedFltOrDblImmed())
{
dblConst = src->AsDblCon();
}
// find local field if any
- GenTreeLclFld* lclField = nullptr;
+ GenTreeLclFld* lclField = nullptr;
if (src->isContainedLclField())
{
lclField = src->AsLclFld();
@@ -2954,10 +2845,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// First handle the simple non-memory cases
//
- if ((mem == nullptr) &&
- (lclField == nullptr) &&
- (lclVar == nullptr) &&
- (tmpDsc == nullptr))
+ if ((mem == nullptr) && (lclField == nullptr) && (lclVar == nullptr) && (tmpDsc == nullptr))
{
if (intConst != nullptr)
{
@@ -2965,14 +2853,15 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
assert(!dst->isContained());
emitIns_R_I(ins, attr, dst->gtRegNum, intConst->IconValue());
- // TODO-XArch-Bug?: does the caller call regTracker.rsTrackRegTrash(dst->gtRegNum) or rsTrackRegIntCns(dst->gtRegNum, intConst->IconValue()) (as appropriate)?
+ // TODO-XArch-Bug?: does the caller call regTracker.rsTrackRegTrash(dst->gtRegNum) or
+ // rsTrackRegIntCns(dst->gtRegNum, intConst->IconValue()) (as appropriate)?
}
else if (dblConst != nullptr)
{
// Emit a data section constant for float or double constant.
CORINFO_FIELD_HANDLE hnd = emitFltOrDblConst(dblConst);
- emitIns_R_C(ins, attr, dst->gtRegNum, hnd, 0);
+ emitIns_R_C(ins, attr, dst->gtRegNum, hnd, 0);
}
else
{
@@ -2987,7 +2876,8 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
{
emitIns_R_R(ins, attr, dst->gtRegNum, src->gtRegNum);
}
- // ToDo-XArch-Bug?: does the caller call regTracker.rsTrackRegTrash(dst->gtRegNum) or, for ins=MOV: regTracker.rsTrackRegCopy(dst->gtRegNum, src->gtRegNum); ?
+ // ToDo-XArch-Bug?: does the caller call regTracker.rsTrackRegTrash(dst->gtRegNum) or, for ins=MOV:
+ // regTracker.rsTrackRegCopy(dst->gtRegNum, src->gtRegNum); ?
}
return dst->gtRegNum;
@@ -3017,13 +2907,10 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// Spill temp numbers are negative and start with -1
// which also happens to be BAD_VAR_NUM. For this reason
// we also need to check 'tmpDsc != nullptr' here.
- if (varNum != BAD_VAR_NUM ||
- tmpDsc != nullptr)
+ if (varNum != BAD_VAR_NUM || tmpDsc != nullptr)
{
// Is the memory op in the source position?
- if (src->isContainedLclField() ||
- src->isContainedLclVar() ||
- src->isContainedSpillTemp())
+ if (src->isContainedLclField() || src->isContainedLclVar() || src->isContainedSpillTemp())
{
if (instrHasImplicitRegPairDest(ins))
{
@@ -3038,16 +2925,16 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
emitIns_R_S(ins, attr, dst->gtRegNum, varNum, offset);
}
}
- else // The memory op is in the dest position.
+ else // The memory op is in the dest position.
{
assert(dst->gtRegNum == REG_NA || dst->IsRegOptional());
// src could be int or reg
if (src->isContainedIntOrIImmed())
{
- // src is an contained immediate
+ // src is an contained immediate
// dst is a stack based local variable
- emitIns_S_I(ins, attr, varNum, offset, (int) src->gtIntConCommon.IconValue());
+ emitIns_S_I(ins, attr, varNum, offset, (int)src->gtIntConCommon.IconValue());
}
else
{
@@ -3066,7 +2953,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
return dst->gtRegNum;
}
- // Now we are left with only the cases where the instruction has some kind of a memory operand
+ // Now we are left with only the cases where the instruction has some kind of a memory operand
//
assert(mem != nullptr);
@@ -3091,13 +2978,13 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
codeGen->genProduceReg(dst);
}
}
- else // The memory op is in the dest position.
+ else // The memory op is in the dest position.
{
if (src->isContained())
{
- // src is an contained immediate
+ // src is an contained immediate
// dst is a class static variable
- emitIns_C_I(ins, attr, memBase->gtClsVar.gtClsVarHnd, 0, (int) src->gtIntConCommon.IconValue());
+ emitIns_C_I(ins, attr, memBase->gtClsVar.gtClsVarHnd, 0, (int)src->gtIntConCommon.IconValue());
}
else
{
@@ -3113,19 +3000,19 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// Finally we handle addressing modes case [regBase + regIndex*scale + const]
//
// We will have to construct and fill in the instruction descriptor for this case
- //
+ //
instrDesc* id = nullptr;
// Is the src an immediate constant?
if (intConst)
{
// [mem], imm
- id = emitNewInstrAmdCns(attr, mem->Offset(), (int) intConst->IconValue());
+ id = emitNewInstrAmdCns(attr, mem->Offset(), (int)intConst->IconValue());
}
else // [mem], reg OR reg, [mem]
- {
+ {
size_t offset = mem->Offset();
- id = emitNewInstrAmd(attr, offset);
+ id = emitNewInstrAmd(attr, offset);
id->idIns(ins);
GenTree* regTree = (src == mem) ? dst : src;
@@ -3136,7 +3023,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
}
assert(id != nullptr);
- id->idIns(ins); // Set the instruction.
+ id->idIns(ins); // Set the instruction.
// Determine the instruction format
//
@@ -3169,10 +3056,10 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// Determine the instruction size
//
- UNATIVE_OFFSET sz = 0;
+ UNATIVE_OFFSET sz = 0;
if (intConst)
{
- sz = emitInsSizeAM(id, insCodeMI(ins), (int) intConst->IconValue());
+ sz = emitInsSizeAM(id, insCodeMI(ins), (int)intConst->IconValue());
}
else
{
@@ -3190,7 +3077,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
{
sz = emitInsSizeAM(id, insCodeRM(ins));
}
- }
+ }
}
assert(sz != 0);
@@ -3234,14 +3121,11 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd, GenTree* src)
{
GenTreePtr addr = storeInd->Addr();
- addr = addr->gtSkipReloadOrCopy();
- assert(addr->OperGet() == GT_LCL_VAR ||
- addr->OperGet() == GT_LCL_VAR_ADDR ||
- addr->OperGet() == GT_LEA ||
- addr->OperGet() == GT_CLS_VAR_ADDR ||
- addr->OperGet() == GT_CNS_INT);
+ addr = addr->gtSkipReloadOrCopy();
+ assert(addr->OperGet() == GT_LCL_VAR || addr->OperGet() == GT_LCL_VAR_ADDR || addr->OperGet() == GT_LEA ||
+ addr->OperGet() == GT_CLS_VAR_ADDR || addr->OperGet() == GT_CNS_INT);
- instrDesc* id = nullptr;
+ instrDesc* id = nullptr;
UNATIVE_OFFSET sz;
size_t offset = 0;
@@ -3253,10 +3137,10 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
if (src->isContainedIntOrIImmed())
{
GenTreeIntConCommon* intConst = src->AsIntConCommon();
- id = emitNewInstrAmdCns(attr, offset, (int) intConst->IconValue());
+ id = emitNewInstrAmdCns(attr, offset, (int)intConst->IconValue());
emitHandleMemOp(storeInd, id, IF_ARW_CNS, ins);
id->idIns(ins);
- sz = emitInsSizeAM(id, insCodeMI(ins), (int) intConst->IconValue());
+ sz = emitInsSizeAM(id, insCodeMI(ins), (int)intConst->IconValue());
}
else
{
@@ -3276,7 +3160,6 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
emitCurIGsize += sz;
}
-
//------------------------------------------------------------------------
// emitInsRMW: Emit logic for Read-Modify-Write unary instructions.
//
@@ -3302,12 +3185,9 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeInd)
{
GenTreePtr addr = storeInd->Addr();
- addr = addr->gtSkipReloadOrCopy();
- assert(addr->OperGet() == GT_LCL_VAR ||
- addr->OperGet() == GT_LCL_VAR_ADDR ||
- addr->OperGet() == GT_CLS_VAR_ADDR ||
- addr->OperGet() == GT_LEA ||
- addr->OperGet() == GT_CNS_INT);
+ addr = addr->gtSkipReloadOrCopy();
+ assert(addr->OperGet() == GT_LCL_VAR || addr->OperGet() == GT_LCL_VAR_ADDR || addr->OperGet() == GT_CLS_VAR_ADDR ||
+ addr->OperGet() == GT_LEA || addr->OperGet() == GT_CNS_INT);
size_t offset = 0;
if (addr->OperGet() != GT_CLS_VAR_ADDR)
@@ -3333,11 +3213,11 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
* Add an instruction of the form "op ST(0),ST(n)".
*/
-void emitter::emitIns_F0_F(instruction ins, unsigned fpreg)
+void emitter::emitIns_F0_F(instruction ins, unsigned fpreg)
{
- UNATIVE_OFFSET sz = 2;
- instrDesc* id = emitNewInstr();
- insFormat fmt = emitInsModeFormat(ins, IF_TRD_FRD);
+ UNATIVE_OFFSET sz = 2;
+ instrDesc* id = emitNewInstr();
+ insFormat fmt = emitInsModeFormat(ins, IF_TRD_FRD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3353,11 +3233,11 @@ void emitter::emitIns_F0_F(instruction ins, unsigned fpreg)
* Add an instruction of the form "op ST(n),ST(0)".
*/
-void emitter::emitIns_F_F0(instruction ins, unsigned fpreg)
+void emitter::emitIns_F_F0(instruction ins, unsigned fpreg)
{
- UNATIVE_OFFSET sz = 2;
- instrDesc* id = emitNewInstr();
- insFormat fmt = emitInsModeFormat(ins, IF_FRD_TRD);
+ UNATIVE_OFFSET sz = 2;
+ instrDesc* id = emitNewInstr();
+ insFormat fmt = emitInsModeFormat(ins, IF_FRD_TRD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3374,11 +3254,9 @@ void emitter::emitIns_F_F0(instruction ins, unsigned fpreg)
* Add an instruction referencing a single register.
*/
-void emitter::emitIns_R(instruction ins,
- emitAttr attr,
- regNumber reg)
+void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg)
{
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
assert(size <= EA_PTRSIZE);
noway_assert(emitVerifyEncodable(ins, size, reg));
@@ -3388,65 +3266,65 @@ void emitter::emitIns_R(instruction ins,
switch (ins)
{
- case INS_inc:
- case INS_dec:
+ case INS_inc:
+ case INS_dec:
#ifdef _TARGET_AMD64_
- sz = 2; // x64 has no 1-byte opcode (it is the same encoding as the REX prefix)
+ sz = 2; // x64 has no 1-byte opcode (it is the same encoding as the REX prefix)
#else // !_TARGET_AMD64_
- if (size == EA_1BYTE)
- sz = 2; // Use the long form as the small one has no 'w' bit
- else
- sz = 1; // Use short form
+ if (size == EA_1BYTE)
+ sz = 2; // Use the long form as the small one has no 'w' bit
+ else
+ sz = 1; // Use short form
#endif // !_TARGET_AMD64_
- break;
+ break;
- case INS_pop:
- case INS_pop_hide:
- case INS_push:
- case INS_push_hide:
+ case INS_pop:
+ case INS_pop_hide:
+ case INS_push:
+ case INS_push_hide:
- /* We don't currently push/pop small values */
+ /* We don't currently push/pop small values */
- assert(size == EA_PTRSIZE);
+ assert(size == EA_PTRSIZE);
- sz = 1;
- break;
+ sz = 1;
+ break;
- default:
+ default:
- /* All the sixteen INS_setCCs are contiguous. */
+ /* All the sixteen INS_setCCs are contiguous. */
- if (INS_seto <= ins && ins <= INS_setg)
- {
- // Rough check that we used the endpoints for the range check
+ if (INS_seto <= ins && ins <= INS_setg)
+ {
+ // Rough check that we used the endpoints for the range check
- assert(INS_seto + 0xF == INS_setg);
+ assert(INS_seto + 0xF == INS_setg);
- // The caller must specify EA_1BYTE for 'attr'
+ // The caller must specify EA_1BYTE for 'attr'
- assert(attr == EA_1BYTE);
+ assert(attr == EA_1BYTE);
- /* We expect this to always be a 'big' opcode */
+ /* We expect this to always be a 'big' opcode */
- assert(insEncodeMRreg(ins, reg, attr, insCodeMR(ins)) & 0x00FF0000);
+ assert(insEncodeMRreg(ins, reg, attr, insCodeMR(ins)) & 0x00FF0000);
- size = attr;
+ size = attr;
- sz = 3;
- break;
- }
- else
- {
- sz = 2;
- break;
- }
+ sz = 3;
+ break;
+ }
+ else
+ {
+ sz = 2;
+ break;
+ }
}
- insFormat fmt = emitInsModeFormat(ins, IF_RRD);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -3464,7 +3342,9 @@ void emitter::emitIns_R(instruction ins,
// REX byte
if (IsExtendedReg(reg, attr) || TakesRexWPrefix(ins, attr))
+ {
sz += emitGetRexPrefixSize(ins);
+ }
id->idCodeSize(sz);
@@ -3473,20 +3353,20 @@ void emitter::emitIns_R(instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -3494,12 +3374,9 @@ void emitter::emitIns_R(instruction ins,
* Add an instruction referencing a register and a constant.
*/
-void emitter::emitIns_R_I(instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t val)
+void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t val)
{
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
// Allow emitting SSE2/AVX SIMD instructions of R_I form that can specify EA_16BYTE or EA_32BYTE
assert(size <= EA_PTRSIZE || IsSSEOrAVXInstruction(ins));
@@ -3513,81 +3390,87 @@ void emitter::emitIns_R_I(instruction ins,
#endif
UNATIVE_OFFSET sz;
- instrDesc* id;
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_CNS);
- bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test);
+ instrDesc* id;
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_CNS);
+ bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test);
// Figure out the size of the instruction
switch (ins)
{
- case INS_mov:
+ case INS_mov:
#ifdef _TARGET_AMD64_
- // mov reg, imm64 is equivalent to mov reg, imm32 if the high order bits are all 0
- // and this isn't a reloc constant.
- if (((size > EA_4BYTE) && (0 == (val & 0xFFFFFFFF00000000LL))) && !EA_IS_CNS_RELOC(attr))
- {
- attr = size = EA_4BYTE;
- }
+ // mov reg, imm64 is equivalent to mov reg, imm32 if the high order bits are all 0
+ // and this isn't a reloc constant.
+ if (((size > EA_4BYTE) && (0 == (val & 0xFFFFFFFF00000000LL))) && !EA_IS_CNS_RELOC(attr))
+ {
+ attr = size = EA_4BYTE;
+ }
- if (size > EA_4BYTE)
- {
- sz = 9; // Really it is 10, but we'll add one more later
- break;
- }
+ if (size > EA_4BYTE)
+ {
+ sz = 9; // Really it is 10, but we'll add one more later
+ break;
+ }
#endif // _TARGET_AMD64_
- sz = 5;
- break;
+ sz = 5;
+ break;
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_RRW_SHF;
- sz = 3;
- val &= 0x7F;
- valInByte = true; // shift amount always placed in a byte
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_RRW_SHF;
+ sz = 3;
+ val &= 0x7F;
+ valInByte = true; // shift amount always placed in a byte
+ break;
- default:
+ default:
- if (EA_IS_CNS_RELOC(attr))
- valInByte = false; // relocs can't be placed in a byte
+ if (EA_IS_CNS_RELOC(attr))
+ {
+ valInByte = false; // relocs can't be placed in a byte
+ }
- if (valInByte)
- {
- if (IsSSEOrAVXInstruction(ins))
- sz = 5;
- else
- sz = 3;
- }
- else
- {
- if (reg == REG_EAX && !instrIs3opImul(ins))
+ if (valInByte)
{
- sz = 1;
+ if (IsSSEOrAVXInstruction(ins))
+ {
+ sz = 5;
+ }
+ else
+ {
+ sz = 3;
+ }
}
else
{
- sz = 2;
- }
+ if (reg == REG_EAX && !instrIs3opImul(ins))
+ {
+ sz = 1;
+ }
+ else
+ {
+ sz = 2;
+ }
#ifdef _TARGET_AMD64_
- if (size > EA_4BYTE)
- {
- // We special-case anything that takes a full 8-byte constant.
- sz += 4;
- }
- else
+ if (size > EA_4BYTE)
+ {
+ // We special-case anything that takes a full 8-byte constant.
+ sz += 4;
+ }
+ else
#endif // _TARGET_AMD64_
- {
- sz += EA_SIZE_IN_BYTES(attr);
+ {
+ sz += EA_SIZE_IN_BYTES(attr);
+ }
}
- }
- break;
+ break;
}
// Vex prefix size
@@ -3605,14 +3488,16 @@ void emitter::emitIns_R_I(instruction ins,
assert(reg < 8);
#endif
- id = emitNewInstrSC(attr, val);
+ id = emitNewInstrSC(attr, val);
id->idIns(ins);
id->idInsFmt(fmt);
id->idReg1(reg);
// 16-bit operand instructions will need a prefix
if (size == EA_2BYTE)
+ {
sz += 1;
+ }
id->idCodeSize(sz);
@@ -3621,11 +3506,11 @@ void emitter::emitIns_R_I(instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (reg == REG_ESP)
+ if (reg == REG_ESP)
{
- if (emitCntStackDepth)
+ if (emitCntStackDepth)
{
- if (ins == INS_sub)
+ if (ins == INS_sub)
{
S_UINT32 newStackLvl(emitCurStackLvl);
newStackLvl += S_UINT32(val);
@@ -3633,8 +3518,8 @@ void emitter::emitIns_R_I(instruction ins,
emitCurStackLvl = newStackLvl.Value();
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_add)
{
@@ -3647,7 +3532,6 @@ void emitter::emitIns_R_I(instruction ins,
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -3655,14 +3539,11 @@ void emitter::emitIns_R_I(instruction ins,
* Add an instruction referencing an integer constant.
*/
-void emitter::emitIns_I(instruction ins,
- emitAttr attr,
- int val
- )
+void emitter::emitIns_I(instruction ins, emitAttr attr, int val)
{
UNATIVE_OFFSET sz;
- instrDesc* id;
- bool valInByte = ((signed char)val == val);
+ instrDesc* id;
+ bool valInByte = ((signed char)val == val);
#ifdef _TARGET_AMD64_
// mov reg, imm64 is the only opcode which takes a full 8 byte immediate
@@ -3671,29 +3552,31 @@ void emitter::emitIns_I(instruction ins,
#endif
if (EA_IS_CNS_RELOC(attr))
- valInByte = false; // relocs can't be placed in a byte
+ {
+ valInByte = false; // relocs can't be placed in a byte
+ }
switch (ins)
{
- case INS_loop:
- case INS_jge:
- sz = 2;
- break;
+ case INS_loop:
+ case INS_jge:
+ sz = 2;
+ break;
- case INS_ret:
- sz = 3;
- break;
+ case INS_ret:
+ sz = 3;
+ break;
- case INS_push_hide:
- case INS_push:
- sz = valInByte ? 2 : 5;
- break;
+ case INS_push_hide:
+ case INS_push:
+ sz = valInByte ? 2 : 5;
+ break;
- default:
- NO_WAY("unexpected instruction");
+ default:
+ NO_WAY("unexpected instruction");
}
- id = emitNewInstrSC(attr, val);
+ id = emitNewInstrSC(attr, val);
id->idIns(ins);
id->idInsFmt(IF_CNS);
id->idCodeSize(sz);
@@ -3703,16 +3586,15 @@ void emitter::emitIns_I(instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -3720,19 +3602,19 @@ void emitter::emitIns_I(instruction ins,
* Add a "jump through a table" instruction.
*/
-void emitter::emitIns_IJ(emitAttr attr,
- regNumber reg,
- unsigned base)
+void emitter::emitIns_IJ(emitAttr attr, regNumber reg, unsigned base)
{
assert(EA_SIZE(attr) == EA_4BYTE);
- UNATIVE_OFFSET sz = 3 + 4;
- const instruction ins = INS_i_jmp;
+ UNATIVE_OFFSET sz = 3 + 4;
+ const instruction ins = INS_i_jmp;
if (IsExtendedReg(reg, attr))
+ {
sz += emitGetRexPrefixSize(ins);
+ }
- instrDesc* id = emitNewInstrAmd(attr, base);
+ instrDesc* id = emitNewInstrAmd(attr, base);
id->idIns(ins);
id->idInsFmt(IF_ARD);
@@ -3740,8 +3622,8 @@ void emitter::emitIns_IJ(emitAttr attr,
id->idAddr()->iiaAddrMode.amIndxReg = reg;
id->idAddr()->iiaAddrMode.amScale = emitter::OPSZP;
-#ifdef DEBUG
- id->idDebugOnlyInfo()->idMemCookie = base;
+#ifdef DEBUG
+ id->idDebugOnlyInfo()->idMemCookie = base;
#endif
id->idCodeSize(sz);
@@ -3757,28 +3639,27 @@ void emitter::emitIns_IJ(emitAttr attr,
* value (e.g. "push offset clsvar", rather than "push dword ptr [clsvar]").
*/
-void emitter::emitIns_C(instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
#if RELOC_SUPPORT
// Static always need relocs
if (!jitStaticFldIsGlobAddr(fldHnd))
+ {
attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG);
+ }
#endif
- UNATIVE_OFFSET sz;
- instrDesc* id;
+ UNATIVE_OFFSET sz;
+ instrDesc* id;
/* Are we pushing the offset of the class variable? */
- if (EA_IS_OFFSET(attr))
+ if (EA_IS_OFFSET(attr))
{
assert(ins == INS_push);
sz = 1 + sizeof(void*);
- id = emitNewInstrDsp(EA_1BYTE, offs);
+ id = emitNewInstrDsp(EA_1BYTE, offs);
id->idIns(ins);
id->idInsFmt(IF_MRD_OFF);
}
@@ -3786,14 +3667,14 @@ void emitter::emitIns_C(instruction ins,
{
#if FEATURE_STACK_FP_X87
insFormat fmt = emitInsModeFormat(ins, IF_MRD, IF_TRD_MRD, IF_MWR_TRD);
-#else // !FEATURE_STACK_FP_X87
+#else // !FEATURE_STACK_FP_X87
insFormat fmt = emitInsModeFormat(ins, IF_MRD);
#endif // !FEATURE_STACK_FP_X87
- id = emitNewInstrDsp(attr, offs);
+ id = emitNewInstrDsp(attr, offs);
id->idIns(ins);
id->idInsFmt(fmt);
- sz = emitInsSizeCV(id, insCodeMR(ins));
+ sz = emitInsSizeCV(id, insCodeMR(ins));
}
// Vex prefix size
@@ -3812,15 +3693,14 @@ void emitter::emitIns_C(instruction ins,
dispIns(id);
emitCurIGsize += sz;
-
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
@@ -3829,7 +3709,6 @@ void emitter::emitIns_C(instruction ins,
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -3837,12 +3716,9 @@ void emitter::emitIns_C(instruction ins,
* Add an instruction with two register operands.
*/
-void emitter::emitIns_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2)
+void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2)
{
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
/* We don't want to generate any useless mov instructions! */
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -3858,11 +3734,10 @@ void emitter::emitIns_R_R (instruction ins,
assert(size <= EA_32BYTE);
noway_assert(emitVerifyEncodable(ins, size, reg1, reg2));
- UNATIVE_OFFSET sz = emitInsSizeRR(ins, reg1, reg2, attr);
+ UNATIVE_OFFSET sz = emitInsSizeRR(ins, reg1, reg2, attr);
/* Special case: "XCHG" uses a different format */
- insFormat fmt = (ins == INS_xchg) ? IF_RRW_RRW
- : emitInsModeFormat(ins, IF_RRD_RRD);
+ insFormat fmt = (ins == INS_xchg) ? IF_RRW_RRW : emitInsModeFormat(ins, IF_RRD_RRD);
instrDesc* id = emitNewInstrTiny(attr);
id->idIns(ins);
@@ -3880,14 +3755,10 @@ void emitter::emitIns_R_R (instruction ins,
* Add an instruction with two register operands and an integer constant.
*/
-void emitter::emitIns_R_R_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int ival)
+void emitter::emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival)
{
// SSE2 version requires 5 bytes and AVX version 6 bytes
- UNATIVE_OFFSET sz = 4;
+ UNATIVE_OFFSET sz = 4;
if (IsSSEOrAVXInstruction(ins))
{
sz = UseAVX() ? 6 : 5;
@@ -3899,7 +3770,7 @@ void emitter::emitIns_R_R_I (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- instrDesc* id = emitNewInstrSC(attr, ival);
+ instrDesc* id = emitNewInstrSC(attr, ival);
// REX prefix
if (IsExtendedReg(reg1, attr) || IsExtendedReg(reg2, attr))
@@ -3922,19 +3793,15 @@ void emitter::emitIns_R_R_I (instruction ins,
* Add an instruction with three register operands.
*/
-void emitter::emitIns_R_R_R (instruction ins,
- emitAttr attr,
- regNumber targetReg,
- regNumber reg1,
- regNumber reg2)
+void emitter::emitIns_R_R_R(instruction ins, emitAttr attr, regNumber targetReg, regNumber reg1, regNumber reg2)
{
assert(IsSSEOrAVXInstruction(ins));
assert(IsThreeOperandAVXInstruction(ins));
- //Currently vex prefix only use three bytes mode.
- //size = vex + opcode + ModR/M = 3 + 1 + 1 = 5
- //TODO-XArch-CQ: We should create function which can calculate all kinds of AVX instructions size in future
- UNATIVE_OFFSET sz = 5;
-
+ // Currently vex prefix only use three bytes mode.
+ // size = vex + opcode + ModR/M = 3 + 1 + 1 = 5
+ // TODO-XArch-CQ: We should create function which can calculate all kinds of AVX instructions size in future
+ UNATIVE_OFFSET sz = 5;
+
instrDesc* id = emitNewInstr(attr);
id->idIns(ins);
id->idInsFmt(IF_RWR_RRD_RRD);
@@ -3947,36 +3814,33 @@ void emitter::emitIns_R_R_R (instruction ins,
emitCurIGsize += sz;
}
-
#endif
/*****************************************************************************
*
* Add an instruction with a register + static member operands.
*/
-void emitter::emitIns_R_C(instruction ins,
- emitAttr attr,
- regNumber reg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs)
+void emitter::emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs)
{
#if RELOC_SUPPORT
// Static always need relocs
if (!jitStaticFldIsGlobAddr(fldHnd))
+ {
attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG);
+ }
#endif
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
assert(size <= EA_32BYTE);
noway_assert(emitVerifyEncodable(ins, size, reg));
- UNATIVE_OFFSET sz;
- instrDesc* id;
+ UNATIVE_OFFSET sz;
+ instrDesc* id;
// Are we MOV'ing the offset of the class variable into EAX?
- if (EA_IS_OFFSET(attr))
+ if (EA_IS_OFFSET(attr))
{
- id = emitNewInstrDsp(EA_1BYTE, offs);
+ id = emitNewInstrDsp(EA_1BYTE, offs);
id->idIns(ins);
id->idInsFmt(IF_RWR_MRD_OFF);
@@ -3989,7 +3853,7 @@ void emitter::emitIns_R_C(instruction ins,
{
insFormat fmt = emitInsModeFormat(ins, IF_RRD_MRD);
- id = emitNewInstrDsp(attr, offs);
+ id = emitNewInstrDsp(attr, offs);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4012,7 +3876,9 @@ void emitter::emitIns_R_C(instruction ins,
// Special case: mov reg, fs:[ddd]
if (fldHnd == FLD_GLOBAL_FS)
+ {
sz += 1;
+ }
}
// VEX prefix
@@ -4038,19 +3904,17 @@ void emitter::emitIns_R_C(instruction ins,
* Add an instruction with a static member + register operands.
*/
-void emitter::emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs)
+void emitter::emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs)
{
#if RELOC_SUPPORT
// Static always need relocs
if (!jitStaticFldIsGlobAddr(fldHnd))
+ {
attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG);
+ }
#endif
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
#if defined(_TARGET_X86_) && !FEATURE_STACK_FP_X87
// For x86 RyuJIT it is valid to storeind a double sized operand in an xmm reg to memory
@@ -4061,13 +3925,13 @@ void emitter::emitIns_C_R (instruction ins,
noway_assert(emitVerifyEncodable(ins, size, reg));
- instrDesc* id = emitNewInstrDsp(attr, offs);
- insFormat fmt = emitInsModeFormat(ins, IF_MRD_RRD);
+ instrDesc* id = emitNewInstrDsp(attr, offs);
+ insFormat fmt = emitInsModeFormat(ins, IF_MRD_RRD);
id->idIns(ins);
id->idInsFmt(fmt);
- UNATIVE_OFFSET sz;
+ UNATIVE_OFFSET sz;
#ifdef _TARGET_X86_
// Special case: "mov [addr], EAX" is smaller.
@@ -4115,45 +3979,43 @@ void emitter::emitIns_C_R (instruction ins,
* Add an instruction with a static member + constant.
*/
-void emitter::emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs,
- int val)
+void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, int offs, int val)
{
#if RELOC_SUPPORT
// Static always need relocs
if (!jitStaticFldIsGlobAddr(fldHnd))
+ {
attr = EA_SET_FLG(attr, EA_DSP_RELOC_FLG);
+ }
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_MRW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_MRW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_MRD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_MRD_CNS);
+ break;
}
- instrDesc* id = emitNewInstrCnsDsp(attr, val, offs);
+ instrDesc* id = emitNewInstrCnsDsp(attr, val, offs);
id->idIns(ins);
id->idInsFmt(fmt);
- size_t code = insCodeMI(ins);
- UNATIVE_OFFSET sz = emitInsSizeCV(id, code, val);
+ size_t code = insCodeMI(ins);
+ UNATIVE_OFFSET sz = emitInsSizeCV(id, code, val);
#ifdef _TARGET_AMD64_
// Vex prefix
@@ -4173,37 +4035,33 @@ void emitter::emitIns_C_I (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_J_S (instruction ins,
- emitAttr attr,
- BasicBlock* dst,
- int varx,
- int offs)
+void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int varx, int offs)
{
assert(ins == INS_mov);
assert(dst->bbFlags & BBF_JMP_TARGET);
- instrDescLbl* id = emitNewInstrLbl();
+ instrDescLbl* id = emitNewInstrLbl();
id->idIns(ins);
id->idInsFmt(IF_SWR_LABEL);
- id->idAddr()->iiaBBlabel = dst;
+ id->idAddr()->iiaBBlabel = dst;
/* The label reference is always long */
- id->idjShort = 0;
- id->idjKeepLong = 1;
+ id->idjShort = 0;
+ id->idjKeepLong = 1;
/* Record the current IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this instruction to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
- UNATIVE_OFFSET sz = sizeof(INT32) + emitInsSizeSV(insCodeMI(ins), varx, offs);
+ UNATIVE_OFFSET sz = sizeof(INT32) + emitInsSizeSV(insCodeMI(ins), varx, offs);
id->dstLclVar.initLclVarAddr(varx, offs);
#ifdef DEBUG
id->idDebugOnlyInfo()->idVarRefOffs = emitVarRefOffs;
@@ -4220,13 +4078,13 @@ void emitter::emitIns_J_S (instruction ins,
// not a relative address.
//
// On Amd64, Absolute code addresses should always go through a reloc to
- // to be encoded as RIP rel32 offset.
+ // to be encoded as RIP rel32 offset.
if (emitComp->opts.compReloc)
#endif
{
id->idSetIsDspReloc();
}
-#endif //RELOC_SUPPORT
+#endif // RELOC_SUPPORT
id->idCodeSize(sz);
@@ -4234,41 +4092,37 @@ void emitter::emitIns_J_S (instruction ins,
emitCurIGsize += sz;
}
-
/*****************************************************************************
*
* Add a label instruction.
*/
-void emitter::emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock* dst,
- regNumber reg)
+void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
assert(ins == INS_lea);
assert(dst->bbFlags & BBF_JMP_TARGET);
- instrDescJmp* id = emitNewInstrJmp();
+ instrDescJmp* id = emitNewInstrJmp();
id->idIns(ins);
id->idReg1(reg);
id->idInsFmt(IF_RWR_LABEL);
- id->idOpSize(EA_SIZE(attr)); // emitNewInstrJmp() sets the size (incorrectly) to EA_1BYTE
- id->idAddr()->iiaBBlabel = dst;
+ id->idOpSize(EA_SIZE(attr)); // emitNewInstrJmp() sets the size (incorrectly) to EA_1BYTE
+ id->idAddr()->iiaBBlabel = dst;
/* The label reference is always long */
- id->idjShort = 0;
- id->idjKeepLong = 1;
+ id->idjShort = 0;
+ id->idjKeepLong = 1;
/* Record the current IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this instruction to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#ifdef DEBUG
// Mark the catch return
@@ -4282,7 +4136,7 @@ void emitter::emitIns_R_L (instruction ins,
emitTotalIGjmps++;
#endif
- UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins));
+ UNATIVE_OFFSET sz = emitInsSizeAM(id, insCodeRM(ins));
id->idCodeSize(sz);
// Set the relocation flags - these give hint to zap to perform
@@ -4298,13 +4152,8 @@ void emitter::emitIns_R_L (instruction ins,
* The following adds instructions referencing address modes.
*/
-void emitter::emitIns_I_AR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- int disp,
- int memCookie,
- void* clsCookie)
+void emitter::emitIns_I_AR(
+ instruction ins, emitAttr attr, int val, regNumber reg, int disp, int memCookie, void* clsCookie)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -4314,25 +4163,25 @@ void emitter::emitIns_I_AR (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_ARW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_ARW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_ARD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_ARD_CNS);
+ break;
}
/*
@@ -4343,16 +4192,16 @@ void emitter::emitIns_I_AR (instruction ins,
}
*/
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
id->idIns(ins);
id->idInsFmt(fmt);
- assert((memCookie == NULL) == (clsCookie == NULL));
+ assert((memCookie == NULL) == (clsCookie == nullptr));
-#ifdef DEBUG
- id->idDebugOnlyInfo()->idMemCookie = memCookie;
- id->idDebugOnlyInfo()->idClsCookie = clsCookie;
+#ifdef DEBUG
+ id->idDebugOnlyInfo()->idMemCookie = memCookie;
+ id->idDebugOnlyInfo()->idClsCookie = clsCookie;
#endif
id->idAddr()->iiaAddrMode.amBaseReg = reg;
@@ -4367,10 +4216,7 @@ void emitter::emitIns_I_AR (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_I_AI (instruction ins,
- emitAttr attr,
- int val,
- ssize_t disp)
+void emitter::emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -4380,25 +4226,25 @@ void emitter::emitIns_I_AI (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_ARW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_ARW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_ARD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_ARD_CNS);
+ break;
}
/*
@@ -4409,8 +4255,8 @@ void emitter::emitIns_I_AI (instruction ins,
}
*/
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4426,18 +4272,13 @@ void emitter::emitIns_I_AI (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber base,
- int disp,
- int memCookie,
- void* clsCookie)
+void emitter::emitIns_R_AR(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber base, int disp, int memCookie, void* clsCookie)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_32BYTE) && (ireg != REG_NA));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
- if (ins == INS_lea)
+ if (ins == INS_lea)
{
if (ireg == base && disp == 0)
{
@@ -4448,19 +4289,19 @@ void emitter::emitIns_R_AR (instruction ins,
}
}
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
id->idIns(ins);
id->idInsFmt(fmt);
id->idReg1(ireg);
- assert((memCookie == NULL) == (clsCookie == NULL));
+ assert((memCookie == NULL) == (clsCookie == nullptr));
-#ifdef DEBUG
- id->idDebugOnlyInfo()->idMemCookie = memCookie;
- id->idDebugOnlyInfo()->idClsCookie = clsCookie;
+#ifdef DEBUG
+ id->idDebugOnlyInfo()->idMemCookie = memCookie;
+ id->idDebugOnlyInfo()->idClsCookie = clsCookie;
#endif
id->idAddr()->iiaAddrMode.amBaseReg = base;
@@ -4475,17 +4316,14 @@ void emitter::emitIns_R_AR (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_AI (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp)
+void emitter::emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4503,24 +4341,19 @@ void emitter::emitIns_R_AI (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber base,
- int disp,
- int memCookie,
- void* clsCookie)
+void emitter::emitIns_AR_R(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber base, int disp, int memCookie, void* clsCookie)
{
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt;
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt;
- if (ireg == REG_NA)
+ if (ireg == REG_NA)
{
#if FEATURE_STACK_FP_X87
fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
-#else // !FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD);
+#else // !FEATURE_STACK_FP_X87
+ fmt = emitInsModeFormat(ins, IF_ARD);
#endif // !FEATURE_STACK_FP_X87
}
else
@@ -4536,11 +4369,11 @@ void emitter::emitIns_AR_R (instruction ins,
id->idIns(ins);
id->idInsFmt(fmt);
- assert((memCookie == NULL) == (clsCookie == NULL));
+ assert((memCookie == NULL) == (clsCookie == nullptr));
-#ifdef DEBUG
- id->idDebugOnlyInfo()->idMemCookie = memCookie;
- id->idDebugOnlyInfo()->idClsCookie = clsCookie;
+#ifdef DEBUG
+ id->idDebugOnlyInfo()->idMemCookie = memCookie;
+ id->idDebugOnlyInfo()->idClsCookie = clsCookie;
#endif
id->idAddr()->iiaAddrMode.amBaseReg = base;
@@ -4556,36 +4389,34 @@ void emitter::emitIns_AR_R (instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
}
-void emitter::emitIns_AI_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp)
+void emitter::emitIns_AI_R(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp)
{
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt;
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt;
- if (ireg == REG_NA)
+ if (ireg == REG_NA)
{
#if FEATURE_STACK_FP_X87
fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
-#else // FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD);
+#else // FEATURE_STACK_FP_X87
+ fmt = emitInsModeFormat(ins, IF_ARD);
#endif // FEATURE_STACK_FP_X87
}
else
@@ -4618,23 +4449,19 @@ void emitter::emitIns_AI_R (instruction ins,
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
}
-void emitter::emitIns_I_ARR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- regNumber rg2,
- int disp)
+void emitter::emitIns_I_ARR(instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -4644,29 +4471,29 @@ void emitter::emitIns_I_ARR (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_ARW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_ARW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_ARD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_ARD_CNS);
+ break;
}
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4683,19 +4510,14 @@ void emitter::emitIns_I_ARR (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_ARR(instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber base,
- regNumber index,
- int disp)
+void emitter::emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber base, regNumber index, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4714,28 +4536,23 @@ void emitter::emitIns_R_ARR(instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber index,
- int disp)
+void emitter::emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber index, int disp)
{
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt;
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt;
- if (ireg == REG_NA)
+ if (ireg == REG_NA)
{
#if FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
-#else // FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD);
+ fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
+#else // FEATURE_STACK_FP_X87
+ fmt = emitInsModeFormat(ins, IF_ARD);
#endif // FEATURE_STACK_FP_X87
}
else
{
- fmt = emitInsModeFormat(ins, IF_ARD_RRD);
+ fmt = emitInsModeFormat(ins, IF_ARD_RRD);
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
@@ -4760,29 +4577,24 @@ void emitter::emitIns_ARR_R (instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
-void emitter::emitIns_I_ARX (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp)
+void emitter::emitIns_I_ARX(
+ instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, unsigned mul, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -4792,29 +4604,29 @@ void emitter::emitIns_I_ARX (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_ARW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_ARW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_ARD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_ARD_CNS);
+ break;
}
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4832,20 +4644,15 @@ void emitter::emitIns_I_ARX (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber base,
- regNumber index,
- unsigned mul,
- int disp)
+void emitter::emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber base, regNumber index, unsigned mul, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4864,24 +4671,19 @@ void emitter::emitIns_R_ARX (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_ARX_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber base,
- regNumber index,
- unsigned mul,
- int disp)
+void emitter::emitIns_ARX_R(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber base, regNumber index, unsigned mul, int disp)
{
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt;
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt;
- if (ireg == REG_NA)
+ if (ireg == REG_NA)
{
#if FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
-#else // !FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD);
+ fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
+#else // !FEATURE_STACK_FP_X87
+ fmt = emitInsModeFormat(ins, IF_ARD);
#endif // !FEATURE_STACK_FP_X87
}
else
@@ -4911,28 +4713,23 @@ void emitter::emitIns_ARX_R (instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
-void emitter::emitIns_I_AX (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- unsigned mul,
- int disp)
+void emitter::emitIns_I_AX(instruction ins, emitAttr attr, int val, regNumber reg, unsigned mul, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -4942,29 +4739,29 @@ void emitter::emitIns_I_AX (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_ARW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_ARW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_ARD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_ARD_CNS);
+ break;
}
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmdCns(attr, disp, val);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -4981,19 +4778,14 @@ void emitter::emitIns_I_AX (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_AX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- unsigned mul,
- int disp)
+void emitter::emitIns_R_AX(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp)
{
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE) && (ireg != REG_NA));
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_ARD);
id->idIns(ins);
id->idInsFmt(fmt);
@@ -5012,28 +4804,23 @@ void emitter::emitIns_R_AX (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_AX_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- unsigned mul,
- int disp)
+void emitter::emitIns_AX_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp)
{
- UNATIVE_OFFSET sz;
- instrDesc* id = emitNewInstrAmd(attr, disp);
- insFormat fmt;
+ UNATIVE_OFFSET sz;
+ instrDesc* id = emitNewInstrAmd(attr, disp);
+ insFormat fmt;
- if (ireg == REG_NA)
+ if (ireg == REG_NA)
{
#if FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
-#else // !FEATURE_STACK_FP_X87
- fmt = emitInsModeFormat(ins, IF_ARD);
+ fmt = emitInsModeFormat(ins, IF_ARD, IF_TRD_ARD, IF_AWR_TRD);
+#else // !FEATURE_STACK_FP_X87
+ fmt = emitInsModeFormat(ins, IF_ARD);
#endif // !FEATURE_STACK_FP_X87
}
else
{
- fmt = emitInsModeFormat(ins, IF_ARD_RRD);
+ fmt = emitInsModeFormat(ins, IF_ARD_RRD);
noway_assert(emitVerifyEncodable(ins, EA_SIZE(attr), ireg));
assert((CodeGen::instIsFP(ins) == false) && (EA_SIZE(attr) <= EA_8BYTE));
@@ -5057,20 +4844,20 @@ void emitter::emitIns_AX_R (instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -5078,17 +4865,14 @@ void emitter::emitIns_AX_R (instruction ins,
* The following add instructions referencing stack-based local variables.
*/
-void emitter::emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs)
+void emitter::emitIns_S(instruction ins, emitAttr attr, int varx, int offs)
{
- instrDesc* id = emitNewInstr(attr);
- UNATIVE_OFFSET sz = emitInsSizeSV(insCodeMR(ins), varx, offs);
+ instrDesc* id = emitNewInstr(attr);
+ UNATIVE_OFFSET sz = emitInsSizeSV(insCodeMR(ins), varx, offs);
#if FEATURE_STACK_FP_X87
- insFormat fmt = emitInsModeFormat(ins, IF_SRD, IF_TRD_SRD, IF_SWR_TRD);
-#else // !FEATURE_STACK_FP_X87
- insFormat fmt = emitInsModeFormat(ins, IF_SRD);
+ insFormat fmt = emitInsModeFormat(ins, IF_SRD, IF_TRD_SRD, IF_SWR_TRD);
+#else // !FEATURE_STACK_FP_X87
+ insFormat fmt = emitInsModeFormat(ins, IF_SRD);
#endif // !FEATURE_STACK_FP_X87
// 16-bit operand instructions will need a prefix
@@ -5123,27 +4907,23 @@ void emitter::emitIns_S (instruction ins,
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
else if (ins == INS_pop)
{
- emitCurStackLvl -= emitCntStackDepth; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= emitCntStackDepth;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
-void emitter::emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs)
+void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs)
{
- instrDesc* id = emitNewInstr(attr);
- UNATIVE_OFFSET sz = emitInsSizeSV(insCodeMR(ins), varx, offs);
- insFormat fmt = emitInsModeFormat(ins, IF_SRD_RRD);
+ instrDesc* id = emitNewInstr(attr);
+ UNATIVE_OFFSET sz = emitInsSizeSV(insCodeMR(ins), varx, offs);
+ insFormat fmt = emitInsModeFormat(ins, IF_SRD_RRD);
// 16-bit operand instructions will need a prefix
if (EA_SIZE(attr) == EA_2BYTE)
@@ -5155,8 +4935,7 @@ void emitter::emitIns_S_R (instruction ins,
sz += emitGetVexPrefixAdjustedSize(ins, attr, insCodeMR(ins));
// 64-bit operand instructions will need a REX.W prefix
- if (TakesRexWPrefix(ins, attr)
- || IsExtendedReg(ireg, attr))
+ if (TakesRexWPrefix(ins, attr) || IsExtendedReg(ireg, attr))
{
sz += emitGetRexPrefixSize(ins);
}
@@ -5173,22 +4952,17 @@ void emitter::emitIns_S_R (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs)
+void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs)
{
- emitAttr size = EA_SIZE(attr);
+ emitAttr size = EA_SIZE(attr);
noway_assert(emitVerifyEncodable(ins, size, ireg));
- instrDesc* id = emitNewInstr(attr);
- UNATIVE_OFFSET sz = emitInsSizeSV(insCodeRM(ins), varx, offs);
- insFormat fmt = emitInsModeFormat(ins, IF_RRD_SRD);
+ instrDesc* id = emitNewInstr(attr);
+ UNATIVE_OFFSET sz = emitInsSizeSV(insCodeRM(ins), varx, offs);
+ insFormat fmt = emitInsModeFormat(ins, IF_RRD_SRD);
// Most 16-bit operand instructions need a prefix
- if (size == EA_2BYTE && ins != INS_movsx
- && ins != INS_movzx)
+ if (size == EA_2BYTE && ins != INS_movsx && ins != INS_movzx)
{
sz++;
}
@@ -5214,11 +4988,7 @@ void emitter::emitIns_R_S (instruction ins,
emitCurIGsize += sz;
}
-void emitter::emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val)
+void emitter::emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val)
{
#ifdef _TARGET_AMD64_
// mov reg, imm64 is the only opcode which takes a full 8 byte immediate
@@ -5226,31 +4996,31 @@ void emitter::emitIns_S_I (instruction ins,
noway_assert(EA_SIZE(attr) < EA_8BYTE || !EA_IS_CNS_RELOC(attr));
#endif
- insFormat fmt;
+ insFormat fmt;
switch (ins)
{
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- assert(val != 1);
- fmt = IF_SRW_SHF;
- val &= 0x7F;
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ assert(val != 1);
+ fmt = IF_SRW_SHF;
+ val &= 0x7F;
+ break;
- default:
- fmt = emitInsModeFormat(ins, IF_SRD_CNS);
- break;
+ default:
+ fmt = emitInsModeFormat(ins, IF_SRD_CNS);
+ break;
}
- instrDesc* id = emitNewInstrCns(attr, val);
+ instrDesc* id = emitNewInstrCns(attr, val);
id->idIns(ins);
id->idInsFmt(fmt);
- UNATIVE_OFFSET sz = emitInsSizeSV(id, varx, offs, val);
+ UNATIVE_OFFSET sz = emitInsSizeSV(id, varx, offs, val);
// VEX prefix
sz += emitGetVexPrefixAdjustedSize(ins, attr, insCodeMI(ins));
@@ -5275,10 +5045,12 @@ void emitter::emitIns_S_I (instruction ins,
* Record that a jump instruction uses the short encoding
*
*/
-void emitter::emitSetShortJump(instrDescJmp* id)
+void emitter::emitSetShortJump(instrDescJmp* id)
{
if (id->idjKeepLong)
+ {
return;
+ }
id->idjShort = true;
}
@@ -5288,18 +5060,16 @@ void emitter::emitSetShortJump(instrDescJmp* id)
* Add a jmp instruction.
*/
-void emitter::emitIns_J(instruction ins,
- BasicBlock* dst,
- int instrCount /* = 0 */)
+void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 */)
{
- UNATIVE_OFFSET sz;
- instrDescJmp* id = emitNewInstrJmp();
+ UNATIVE_OFFSET sz;
+ instrDescJmp* id = emitNewInstrJmp();
assert(dst->bbFlags & BBF_JMP_TARGET);
id->idIns(ins);
id->idInsFmt(IF_LABEL);
- id->idAddr()->iiaBBlabel = dst;
+ id->idAddr()->iiaBBlabel = dst;
#ifdef DEBUG
// Mark the finally call
@@ -5311,18 +5081,18 @@ void emitter::emitIns_J(instruction ins,
/* Assume the jump will be long */
- id->idjShort = 0;
- id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
+ id->idjShort = 0;
+ id->idjKeepLong = emitComp->fgInDifferentRegions(emitComp->compCurBB, dst);
/* Record the jump's IG and offset within it */
- id->idjIG = emitCurIG;
- id->idjOffs = emitCurIGsize;
+ id->idjIG = emitCurIG;
+ id->idjOffs = emitCurIGsize;
/* Append this jump to this IG's jump list */
- id->idjNext = emitCurIGjmpList;
- emitCurIGjmpList = id;
+ id->idjNext = emitCurIGjmpList;
+ emitCurIGjmpList = id;
#if EMITTER_STATS
emitTotalIGjmps++;
@@ -5330,7 +5100,7 @@ void emitter::emitIns_J(instruction ins,
/* Figure out the max. size of the jump/call instruction */
- if (ins == INS_call)
+ if (ins == INS_call)
{
sz = CALL_INST_SIZE;
}
@@ -5341,10 +5111,11 @@ void emitter::emitIns_J(instruction ins,
// as the instruction uses the absolute address,
// not a relative address
if (emitComp->opts.compReloc)
+ {
id->idSetIsDspReloc();
+ }
#endif
sz = PUSH_INST_SIZE;
-
}
else
{
@@ -5352,18 +5123,17 @@ void emitter::emitIns_J(instruction ins,
/* This is a jump - assume the worst */
- sz = (ins == INS_jmp) ? JMP_SIZE_LARGE
- : JCC_SIZE_LARGE;
+ sz = (ins == INS_jmp) ? JMP_SIZE_LARGE : JCC_SIZE_LARGE;
/* Can we guess at the jump distance? */
tgt = (insGroup*)emitCodeGetCookie(dst);
- if (tgt)
+ if (tgt)
{
- int extra;
- UNATIVE_OFFSET srcOffs;
- int jmpDist;
+ int extra;
+ UNATIVE_OFFSET srcOffs;
+ int jmpDist;
assert(JMP_SIZE_SMALL == JCC_SIZE_SMALL);
@@ -5373,26 +5143,31 @@ void emitter::emitIns_J(instruction ins,
/* Compute the distance estimate */
- jmpDist = srcOffs - tgt->igOffs; assert((int)jmpDist > 0);
+ jmpDist = srcOffs - tgt->igOffs;
+ assert((int)jmpDist > 0);
/* How much beyond the max. short distance does the jump go? */
extra = jmpDist + JMP_DIST_SMALL_MAX_NEG;
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ }
printf("[0] Jump source is at %08X\n", srcOffs);
printf("[0] Label block is at %08X\n", tgt->igOffs);
printf("[0] Jump distance - %04X\n", jmpDist);
- if (extra > 0)
- printf("[0] Distance excess = %d \n", extra);
+ if (extra > 0)
+ {
+ printf("[0] Distance excess = %d \n", extra);
+ }
}
#endif
- if (extra <= 0 && !id->idjKeepLong)
+ if (extra <= 0 && !id->idjKeepLong)
{
/* Wonderful - this jump surely will be short */
@@ -5400,14 +5175,17 @@ void emitter::emitIns_J(instruction ins,
sz = JMP_SIZE_SMALL;
}
}
-#if DEBUG_EMIT
+#if DEBUG_EMIT
else
{
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- if (INTERESTING_JUMP_NUM == 0)
- printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
- printf("[0] Jump source is at %04X/%08X\n", emitCurIGsize, emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[0] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ }
+ printf("[0] Jump source is at %04X/%08X\n", emitCurIGsize,
+ emitCurCodeOffset + emitCurIGsize + JMP_SIZE_SMALL);
printf("[0] Label block is unknown\n");
}
}
@@ -5421,16 +5199,15 @@ void emitter::emitIns_J(instruction ins,
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_push)
+ if (ins == INS_push)
{
emitCurStackLvl += emitCntStackDepth;
- if (emitMaxStackDepth < emitCurStackLvl)
- emitMaxStackDepth = emitCurStackLvl;
+ if (emitMaxStackDepth < emitCurStackLvl)
+ emitMaxStackDepth = emitCurStackLvl;
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
/*****************************************************************************
@@ -5454,44 +5231,38 @@ void emitter::emitIns_J(instruction ins,
*
*/
-void emitter::emitIns_Call(EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset, // = BAD_IL_OFFSET
- regNumber ireg, // = REG_NA
- regNumber xreg, // = REG_NA
- unsigned xmul, // = 0
- ssize_t disp, // = 0
- bool isJump, // = false
- bool isNoGC) // = false
+void emitter::emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset, // = BAD_IL_OFFSET
+ regNumber ireg, // = REG_NA
+ regNumber xreg, // = REG_NA
+ unsigned xmul, // = 0
+ ssize_t disp, // = 0
+ bool isJump, // = false
+ bool isNoGC) // = false
{
/* Sanity check the arguments depending on callType */
assert(callType < EC_COUNT);
assert((callType != EC_FUNC_TOKEN && callType != EC_FUNC_TOKEN_INDIR && callType != EC_FUNC_ADDR) ||
(ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp == 0));
- assert(callType != EC_FUNC_VIRTUAL ||
- (ireg < REG_COUNT && xreg == REG_NA && xmul == 0));
- assert(callType < EC_INDIR_R || callType == EC_INDIR_ARD || callType == EC_INDIR_C || addr == NULL);
- assert(callType != EC_INDIR_R ||
- (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
+ assert(callType != EC_FUNC_VIRTUAL || (ireg < REG_COUNT && xreg == REG_NA && xmul == 0));
+ assert(callType < EC_INDIR_R || callType == EC_INDIR_ARD || callType == EC_INDIR_C || addr == nullptr);
+ assert(callType != EC_INDIR_R || (ireg < REG_COUNT && xreg == REG_NA && xmul == 0 && disp == 0));
assert(callType != EC_INDIR_SR ||
(ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp < (int)emitComp->lvaCount));
- assert(callType != EC_INDIR_C ||
- (ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp != 0));
-
-
+ assert(callType != EC_INDIR_C || (ireg == REG_NA && xreg == REG_NA && xmul == 0 && disp != 0));
// Our stack level should be always greater than the bytes of arguments we push. Just
// a sanity test.
- assert((unsigned) abs((signed)argSize) <= codeGen->genStackLevel);
+ assert((unsigned)abs((signed)argSize) <= codeGen->genStackLevel);
#if STACK_PROBES
if (emitComp->opts.compNeedStackProbes)
@@ -5510,20 +5281,19 @@ void emitter::emitIns_Call(EmitCallType callType,
//
//
//
- if ( (sizeof(void*) + // return address for call
- emitComp->genStackLevel +
- // Current stack level. This gets resetted on every
- // localloc and on the prolog (invariant is that
- // genStackLevel is 0 on basic block entry and exit and
- // after any alloca). genStackLevel will include any arguments
- // to the call, so we will insert an aditional probe if
- // we've consumed more than JIT_RESERVED_STACK bytes
- // of stack, which is what the prolog probe covers (in
- // addition to the EE requested size)
- (emitComp->compHndBBtabCount * sizeof(void*))
- // Hidden slots for calling finallys
- )
- >= JIT_RESERVED_STACK)
+ if ((sizeof(void*) + // return address for call
+ emitComp->genStackLevel +
+ // Current stack level. This gets resetted on every
+ // localloc and on the prolog (invariant is that
+ // genStackLevel is 0 on basic block entry and exit and
+ // after any alloca). genStackLevel will include any arguments
+ // to the call, so we will insert an aditional probe if
+ // we've consumed more than JIT_RESERVED_STACK bytes
+ // of stack, which is what the prolog probe covers (in
+ // addition to the EE requested size)
+ (emitComp->compHndBBtabCount * sizeof(void*))
+ // Hidden slots for calling finallys
+ ) >= JIT_RESERVED_STACK)
{
// This happens when you have a call with a lot of arguments or a call is done
// when there's a lot of stuff pushed on the stack (for example a call whos returned
@@ -5580,11 +5350,10 @@ void emitter::emitIns_Call(EmitCallType callType,
}
#endif // STACK_PROBES
+ int argCnt;
- int argCnt;
-
- UNATIVE_OFFSET sz;
- instrDesc* id;
+ UNATIVE_OFFSET sz;
+ instrDesc* id;
/* This is the saved set of registers after a normal call */
unsigned savedSet = RBM_CALLEE_SAVED;
@@ -5606,24 +5375,24 @@ void emitter::emitIns_Call(EmitCallType callType,
gcrefRegs &= savedSet;
byrefRegs &= savedSet;
-#ifdef DEBUG
- if (EMIT_GC_VERBOSE)
+#ifdef DEBUG
+ if (EMIT_GC_VERBOSE)
{
printf("\t\t\t\t\t\t\tCall: GCvars=%s ", VarSetOps::ToString(emitComp, ptrVars));
dumpConvertedVarSet(emitComp, ptrVars);
printf(", gcrefRegs=");
printRegMaskInt(gcrefRegs);
- emitDispRegSet (gcrefRegs);
+ emitDispRegSet(gcrefRegs);
printf(", byrefRegs=");
printRegMaskInt(byrefRegs);
- emitDispRegSet (byrefRegs);
+ emitDispRegSet(byrefRegs);
printf("\n");
}
#endif
- assert( argSize % sizeof(void*) == 0);
+ assert(argSize % sizeof(void*) == 0);
argCnt = (int)(argSize / (ssize_t)sizeof(void*)); // we need a signed-divide
-
+
#ifdef DEBUGGING_SUPPORT
/* Managed RetVal: emit sequence point for the call */
if (emitComp->opts.compDbgInfo && ilOffset != BAD_IL_OFFSET)
@@ -5646,37 +5415,25 @@ void emitter::emitIns_Call(EmitCallType callType,
Indir. call with GC vars 5,768
*/
- if (callType >= EC_FUNC_VIRTUAL)
+ if (callType >= EC_FUNC_VIRTUAL)
{
/* Indirect call, virtual calls */
- assert(callType == EC_FUNC_VIRTUAL || callType == EC_INDIR_R ||
- callType == EC_INDIR_SR || callType == EC_INDIR_C ||
- callType == EC_INDIR_ARD);
+ assert(callType == EC_FUNC_VIRTUAL || callType == EC_INDIR_R || callType == EC_INDIR_SR ||
+ callType == EC_INDIR_C || callType == EC_INDIR_ARD);
- id = emitNewInstrCallInd(argCnt,
- disp,
- ptrVars,
- gcrefRegs,
- byrefRegs,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize));
+ id = emitNewInstrCallInd(argCnt, disp, ptrVars, gcrefRegs, byrefRegs,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize));
}
else
{
// Helper/static/nonvirtual/function calls (direct or through handle),
// and calls to an absolute addr.
- assert(callType == EC_FUNC_TOKEN ||
- callType == EC_FUNC_TOKEN_INDIR ||
- callType == EC_FUNC_ADDR);
+ assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_TOKEN_INDIR || callType == EC_FUNC_ADDR);
- id = emitNewInstrCallDir(argCnt,
- ptrVars,
- gcrefRegs,
- byrefRegs,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize));
+ id = emitNewInstrCallDir(argCnt, ptrVars, gcrefRegs, byrefRegs,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize));
}
/* Update the emitter's live GC ref sets */
@@ -5692,89 +5449,92 @@ void emitter::emitIns_Call(EmitCallType callType,
{
assert(callType == EC_FUNC_TOKEN || callType == EC_FUNC_TOKEN_INDIR);
if (callType == EC_FUNC_TOKEN)
+ {
ins = INS_l_jmp;
+ }
else
+ {
ins = INS_i_jmp;
+ }
}
id->idIns(ins);
id->idSetIsNoGC(isNoGC);
// Record the address: method, indirection, or funcptr
- if (callType >= EC_FUNC_VIRTUAL)
+ if (callType >= EC_FUNC_VIRTUAL)
{
- // This is an indirect call (either a virtual call or func ptr call)
+ // This is an indirect call (either a virtual call or func ptr call)
switch (callType)
{
- case EC_INDIR_C:
- // Indirect call using an absolute code address.
- // Must be marked as relocatable and is done at the
- // branch target location.
- goto CALL_ADDR_MODE;
+ case EC_INDIR_C:
+ // Indirect call using an absolute code address.
+ // Must be marked as relocatable and is done at the
+ // branch target location.
+ goto CALL_ADDR_MODE;
- case EC_INDIR_R: // the address is in a register
+ case EC_INDIR_R: // the address is in a register
- id->idSetIsCallRegPtr();
+ id->idSetIsCallRegPtr();
- __fallthrough;
+ __fallthrough;
- case EC_INDIR_ARD: // the address is an indirection
+ case EC_INDIR_ARD: // the address is an indirection
- goto CALL_ADDR_MODE;
+ goto CALL_ADDR_MODE;
- case EC_INDIR_SR: // the address is in a lcl var
+ case EC_INDIR_SR: // the address is in a lcl var
- id->idInsFmt(IF_SRD);
- // disp is really a lclVarNum
- noway_assert((unsigned)disp == (size_t)disp);
- id->idAddr()->iiaLclVar.initLclVarAddr((unsigned)disp, 0);
- sz = emitInsSizeSV(insCodeMR(INS_call), (unsigned)disp, 0);
+ id->idInsFmt(IF_SRD);
+ // disp is really a lclVarNum
+ noway_assert((unsigned)disp == (size_t)disp);
+ id->idAddr()->iiaLclVar.initLclVarAddr((unsigned)disp, 0);
+ sz = emitInsSizeSV(insCodeMR(INS_call), (unsigned)disp, 0);
- break;
+ break;
- case EC_FUNC_VIRTUAL:
+ case EC_FUNC_VIRTUAL:
- CALL_ADDR_MODE:
+ CALL_ADDR_MODE:
- // fall-through
+ // fall-through
- // The function is "ireg" if id->idIsCallRegPtr(),
- // else [ireg+xmul*xreg+disp]
+ // The function is "ireg" if id->idIsCallRegPtr(),
+ // else [ireg+xmul*xreg+disp]
- id->idInsFmt(IF_ARD);
+ id->idInsFmt(IF_ARD);
- id->idAddr()->iiaAddrMode.amBaseReg = ireg;
- id->idAddr()->iiaAddrMode.amIndxReg = xreg;
- id->idAddr()->iiaAddrMode.amScale = xmul ? emitEncodeScale(xmul) : emitter::OPSZ1;
+ id->idAddr()->iiaAddrMode.amBaseReg = ireg;
+ id->idAddr()->iiaAddrMode.amIndxReg = xreg;
+ id->idAddr()->iiaAddrMode.amScale = xmul ? emitEncodeScale(xmul) : emitter::OPSZ1;
- sz = emitInsSizeAM(id, insCodeMR(INS_call));
+ sz = emitInsSizeAM(id, insCodeMR(INS_call));
- if (ireg == REG_NA && xreg == REG_NA)
- {
- if (codeGen->genCodeIndirAddrNeedsReloc(disp))
+ if (ireg == REG_NA && xreg == REG_NA)
{
- id->idSetIsDspReloc();
- }
+ if (codeGen->genCodeIndirAddrNeedsReloc(disp))
+ {
+ id->idSetIsDspReloc();
+ }
#ifdef _TARGET_AMD64_
- else
- {
- // An absolute indir address that doesn't need reloc should fit within 32-bits
- // to be encoded as offset relative to zero. This addr mode requires an extra
- // SIB byte
- noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr);
- sz++;
- }
+ else
+ {
+ // An absolute indir address that doesn't need reloc should fit within 32-bits
+ // to be encoded as offset relative to zero. This addr mode requires an extra
+ // SIB byte
+ noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (size_t)addr);
+ sz++;
+ }
#endif //_TARGET_AMD64_
- }
+ }
- break;
+ break;
- default:
- NO_WAY("unexpected instruction");
- break;
+ default:
+ NO_WAY("unexpected instruction");
+ break;
}
-
}
else if (callType == EC_FUNC_TOKEN_INDIR)
{
@@ -5783,8 +5543,8 @@ void emitter::emitIns_Call(EmitCallType callType,
assert(addr != nullptr);
id->idInsFmt(IF_METHPTR);
- id->idAddr()->iiaAddr = (BYTE*)addr;
- sz = 6;
+ id->idAddr()->iiaAddr = (BYTE*)addr;
+ sz = 6;
#if RELOC_SUPPORT
// Since this is an indirect call through a pointer and we don't
@@ -5804,8 +5564,7 @@ void emitter::emitIns_Call(EmitCallType callType,
sz++;
}
#endif //_TARGET_AMD64_
-#endif //RELOC_SUPPORT
-
+#endif // RELOC_SUPPORT
}
else
{
@@ -5816,7 +5575,7 @@ void emitter::emitIns_Call(EmitCallType callType,
assert(addr != nullptr);
id->idInsFmt(IF_METHOD);
- sz = 5;
+ sz = 5;
id->idAddr()->iiaAddr = (BYTE*)addr;
@@ -5826,7 +5585,7 @@ void emitter::emitIns_Call(EmitCallType callType,
}
#if RELOC_SUPPORT
- // Direct call to a method and no addr indirection is needed.
+ // Direct call to a method and no addr indirection is needed.
if (codeGen->genCodeAddrNeedsReloc((size_t)addr))
{
id->idSetIsDspReloc();
@@ -5834,22 +5593,28 @@ void emitter::emitIns_Call(EmitCallType callType,
#endif
}
-#ifdef DEBUG
- if (emitComp->verbose&&0)
+#ifdef DEBUG
+ if (emitComp->verbose && 0)
{
- if (id->idIsLargeCall())
+ if (id->idIsLargeCall())
{
- if (callType >= EC_FUNC_VIRTUAL)
- printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp,((instrDescCGCA*)id)->idcGCvars));
+ if (callType >= EC_FUNC_VIRTUAL)
+ {
+ printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum,
+ VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
+ }
else
- printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum, VarSetOps::ToString(emitComp,((instrDescCGCA*)id)->idcGCvars));
+ {
+ printf("[%02u] Rec call GC vars = %s\n", id->idDebugOnlyInfo()->idNum,
+ VarSetOps::ToString(emitComp, ((instrDescCGCA*)id)->idcGCvars));
+ }
}
}
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
- id->idDebugOnlyInfo()->idMemCookie = (size_t) methHnd; // method token
- id->idDebugOnlyInfo()->idClsCookie = 0;
+ id->idDebugOnlyInfo()->idMemCookie = (size_t)methHnd; // method token
+ id->idDebugOnlyInfo()->idClsCookie = nullptr;
id->idDebugOnlyInfo()->idCallSig = sigInfo;
#endif
@@ -5863,31 +5628,31 @@ void emitter::emitIns_Call(EmitCallType callType,
id->idCodeSize(sz);
dispIns(id);
- emitCurIGsize += sz;
+ emitCurIGsize += sz;
#if !FEATURE_FIXED_OUT_ARGS
/* The call will pop the arguments */
- if (emitCntStackDepth && argSize > 0)
+ if (emitCntStackDepth && argSize > 0)
{
noway_assert((ssize_t)emitCurStackLvl >= argSize);
- emitCurStackLvl -= (int)argSize; assert((int)emitCurStackLvl >= 0);
+ emitCurStackLvl -= (int)argSize;
+ assert((int)emitCurStackLvl >= 0);
}
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* The following called for each recorded instruction -- use for debugging.
*/
-void emitter::emitInsSanityCheck(instrDesc* id)
+void emitter::emitInsSanityCheck(instrDesc* id)
{
// make certain you only try to put relocs on things that can have them.
- ID_OPS idOp = (ID_OPS) emitFmtToOps[id->idInsFmt()];
+ ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()];
if ((idOp == ID_OP_SCNS) && id->idIsLargeCns())
{
idOp = ID_OP_CNS;
@@ -5897,25 +5662,15 @@ void emitter::emitInsSanityCheck(instrDesc* id)
{
if (id->idIsDspReloc())
{
- assert(idOp == ID_OP_NONE ||
- idOp == ID_OP_AMD ||
- idOp == ID_OP_DSP ||
- idOp == ID_OP_DSP_CNS ||
- idOp == ID_OP_AMD_CNS ||
- idOp == ID_OP_SPEC ||
- idOp == ID_OP_CALL ||
- idOp == ID_OP_JMP ||
+ assert(idOp == ID_OP_NONE || idOp == ID_OP_AMD || idOp == ID_OP_DSP || idOp == ID_OP_DSP_CNS ||
+ idOp == ID_OP_AMD_CNS || idOp == ID_OP_SPEC || idOp == ID_OP_CALL || idOp == ID_OP_JMP ||
idOp == ID_OP_LBL);
}
if (id->idIsCnsReloc())
{
- assert(idOp == ID_OP_CNS ||
- idOp == ID_OP_AMD_CNS ||
- idOp == ID_OP_DSP_CNS ||
- idOp == ID_OP_SPEC ||
- idOp == ID_OP_CALL ||
- idOp == ID_OP_JMP);
+ assert(idOp == ID_OP_CNS || idOp == ID_OP_AMD_CNS || idOp == ID_OP_DSP_CNS || idOp == ID_OP_SPEC ||
+ idOp == ID_OP_CALL || idOp == ID_OP_JMP);
}
}
}
@@ -5926,17 +5681,21 @@ void emitter::emitInsSanityCheck(instrDesc* id)
* Return the allocated size (in bytes) of the given instruction descriptor.
*/
-size_t emitter::emitSizeOfInsDsc(instrDesc* id)
+size_t emitter::emitSizeOfInsDsc(instrDesc* id)
{
- if (emitIsTinyInsDsc(id))
- return TINY_IDSC_SIZE;
+ if (emitIsTinyInsDsc(id))
+ {
+ return TINY_IDSC_SIZE;
+ }
- if (emitIsScnsInsDsc(id))
+ if (emitIsScnsInsDsc(id))
+ {
return SMALL_IDSC_SIZE;
+ }
assert((unsigned)id->idInsFmt() < emitFmtCount);
- ID_OPS idOp = (ID_OPS) emitFmtToOps[id->idInsFmt()];
+ ID_OPS idOp = (ID_OPS)emitFmtToOps[id->idInsFmt()];
// An INS_call instruction may use a "fat" direct/indirect call descriptor
// except for a local call to a label (i.e. call to a finally)
@@ -5945,69 +5704,77 @@ size_t emitter::emitSizeOfInsDsc(instrDesc* id)
if (id->idIns() == INS_call)
{
- assert(idOp == ID_OP_CALL || // is a direct call
- idOp == ID_OP_SPEC || // is a indirect call
- idOp == ID_OP_JMP ); // is a local call to finally clause
+ assert(idOp == ID_OP_CALL || // is a direct call
+ idOp == ID_OP_SPEC || // is a indirect call
+ idOp == ID_OP_JMP); // is a local call to finally clause
}
switch (idOp)
{
- case ID_OP_NONE:
- break;
+ case ID_OP_NONE:
+ break;
- case ID_OP_LBL:
- return sizeof(instrDescLbl);
+ case ID_OP_LBL:
+ return sizeof(instrDescLbl);
- case ID_OP_JMP:
- return sizeof(instrDescJmp);
+ case ID_OP_JMP:
+ return sizeof(instrDescJmp);
- case ID_OP_CALL:
- case ID_OP_SPEC:
- if (id->idIsLargeCall())
- {
- /* Must be a "fat" indirect call descriptor */
- return sizeof(instrDescCGCA);
- }
+ case ID_OP_CALL:
+ case ID_OP_SPEC:
+ if (id->idIsLargeCall())
+ {
+ /* Must be a "fat" indirect call descriptor */
+ return sizeof(instrDescCGCA);
+ }
- __fallthrough;
+ __fallthrough;
- case ID_OP_SCNS:
- case ID_OP_CNS:
- case ID_OP_DSP:
- case ID_OP_DSP_CNS:
- case ID_OP_AMD:
- case ID_OP_AMD_CNS:
- if (id->idIsLargeCns())
- {
- if (id->idIsLargeDsp())
- return sizeof(instrDescCnsDsp);
- else
- return sizeof(instrDescCns);
- }
- else
- {
- if (id->idIsLargeDsp())
- return sizeof(instrDescDsp);
+ case ID_OP_SCNS:
+ case ID_OP_CNS:
+ case ID_OP_DSP:
+ case ID_OP_DSP_CNS:
+ case ID_OP_AMD:
+ case ID_OP_AMD_CNS:
+ if (id->idIsLargeCns())
+ {
+ if (id->idIsLargeDsp())
+ {
+ return sizeof(instrDescCnsDsp);
+ }
+ else
+ {
+ return sizeof(instrDescCns);
+ }
+ }
else
- return sizeof(instrDesc);
- }
+ {
+ if (id->idIsLargeDsp())
+ {
+ return sizeof(instrDescDsp);
+ }
+ else
+ {
+ return sizeof(instrDesc);
+ }
+ }
- default:
- NO_WAY("unexpected instruction descriptor format");
- break;
+ default:
+ NO_WAY("unexpected instruction descriptor format");
+ break;
}
- return sizeof(instrDesc);
+ return sizeof(instrDesc);
}
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Return a string that represents the given register.
*/
-const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName)
+const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varName)
{
static char rb[2][128];
static unsigned char rbc = 0;
@@ -6019,84 +5786,86 @@ const char* emitter::emitRegName(regNumber reg, emitAttr attr, bool varN
switch (EA_SIZE(attr))
{
- case EA_32BYTE:
- return emitYMMregName(reg);
+ case EA_32BYTE:
+ return emitYMMregName(reg);
- case EA_16BYTE:
- return emitXMMregName(reg);
+ case EA_16BYTE:
+ return emitXMMregName(reg);
- case EA_8BYTE:
- break;
-
- case EA_4BYTE:
- if (reg > REG_R15)
+ case EA_8BYTE:
break;
- if (reg > REG_RDI)
- {
- suffix = 'd';
- goto APPEND_SUFFIX;
- }
- rbc = (rbc+1)%2;
- rb[rbc][0] = 'e';
- rb[rbc][1] = rn[1];
- rb[rbc][2] = rn[2];
- rb[rbc][3] = 0;
- rn = rb[rbc];
- break;
-
- case EA_2BYTE:
- if (reg > REG_RDI)
- {
- suffix = 'w';
- goto APPEND_SUFFIX;
- }
- rn++;
- break;
+ case EA_4BYTE:
+ if (reg > REG_R15)
+ {
+ break;
+ }
- case EA_1BYTE:
- if (reg > REG_RDI)
- {
- suffix = 'b';
-APPEND_SUFFIX:
- rbc = (rbc+1)%2;
- rb[rbc][0] = rn[0];
- rb[rbc][1] = rn[1];
- if (rn[2])
+ if (reg > REG_RDI)
{
- assert(rn[3] == 0);
- rb[rbc][2] = rn[2];
- rb[rbc][3] = suffix;
- rb[rbc][4] = 0;
+ suffix = 'd';
+ goto APPEND_SUFFIX;
}
- else
+ rbc = (rbc + 1) % 2;
+ rb[rbc][0] = 'e';
+ rb[rbc][1] = rn[1];
+ rb[rbc][2] = rn[2];
+ rb[rbc][3] = 0;
+ rn = rb[rbc];
+ break;
+
+ case EA_2BYTE:
+ if (reg > REG_RDI)
{
- rb[rbc][2] = suffix;
- rb[rbc][3] = 0;
+ suffix = 'w';
+ goto APPEND_SUFFIX;
}
- }
- else
- {
- rbc = (rbc+1)%2;
- rb[rbc][0] = rn[1];
- if (reg < 4)
+ rn++;
+ break;
+
+ case EA_1BYTE:
+ if (reg > REG_RDI)
{
- rb[rbc][1] = 'l';
- rb[rbc][2] = 0;
+ suffix = 'b';
+ APPEND_SUFFIX:
+ rbc = (rbc + 1) % 2;
+ rb[rbc][0] = rn[0];
+ rb[rbc][1] = rn[1];
+ if (rn[2])
+ {
+ assert(rn[3] == 0);
+ rb[rbc][2] = rn[2];
+ rb[rbc][3] = suffix;
+ rb[rbc][4] = 0;
+ }
+ else
+ {
+ rb[rbc][2] = suffix;
+ rb[rbc][3] = 0;
+ }
}
else
{
- rb[rbc][1] = rn[2];
- rb[rbc][2] = 'l';
- rb[rbc][3] = 0;
+ rbc = (rbc + 1) % 2;
+ rb[rbc][0] = rn[1];
+ if (reg < 4)
+ {
+ rb[rbc][1] = 'l';
+ rb[rbc][2] = 0;
+ }
+ else
+ {
+ rb[rbc][1] = rn[2];
+ rb[rbc][2] = 'l';
+ rb[rbc][3] = 0;
+ }
}
- }
- rn = rb[rbc];
- break;
+ rn = rb[rbc];
+ break;
- default:
- break;
+ default:
+ break;
}
#endif // _TARGET_AMD64_
@@ -6106,31 +5875,31 @@ APPEND_SUFFIX:
switch (EA_SIZE(attr))
{
#ifndef LEGACY_BACKEND
- case EA_32BYTE:
- return emitYMMregName(reg);
+ case EA_32BYTE:
+ return emitYMMregName(reg);
- case EA_16BYTE:
- return emitXMMregName(reg);
+ case EA_16BYTE:
+ return emitXMMregName(reg);
#endif // LEGACY_BACKEND
- case EA_4BYTE:
- break;
+ case EA_4BYTE:
+ break;
- case EA_2BYTE:
- rn++;
- break;
+ case EA_2BYTE:
+ rn++;
+ break;
- case EA_1BYTE:
- rbc = (rbc+1)%2;
- rb[rbc][0] = rn[1];
- rb[rbc][1] = 'l';
- strcpy_s(&rb[rbc][2], sizeof(rb[0])-2, rn+3);
+ case EA_1BYTE:
+ rbc = (rbc + 1) % 2;
+ rb[rbc][0] = rn[1];
+ rb[rbc][1] = 'l';
+ strcpy_s(&rb[rbc][2], sizeof(rb[0]) - 2, rn + 3);
- rn = rb[rbc];
- break;
+ rn = rb[rbc];
+ break;
- default:
- break;
+ default:
+ break;
}
#endif // _TARGET_X86_
@@ -6157,7 +5926,7 @@ APPEND_SUFFIX:
}
#endif // 0
- return rn;
+ return rn;
}
/*****************************************************************************
@@ -6165,7 +5934,7 @@ APPEND_SUFFIX:
* Return a string that represents the given FP register.
*/
-const char* emitter::emitFPregName(unsigned reg, bool varName)
+const char* emitter::emitFPregName(unsigned reg, bool varName)
{
assert(reg < REG_COUNT);
@@ -6177,20 +5946,19 @@ const char* emitter::emitFPregName(unsigned reg, bool varName)
* Return a string that represents the given XMM register.
*/
-const char* emitter::emitXMMregName(unsigned reg)
+const char* emitter::emitXMMregName(unsigned reg)
{
- static const char* const regNames[] =
- {
- #define REGDEF(name, rnum, mask, sname) "x" sname,
+ static const char* const regNames[] = {
+#define REGDEF(name, rnum, mask, sname) "x" sname,
#ifndef LEGACY_BACKEND
- #include "register.h"
+#include "register.h"
#else // LEGACY_BACKEND
- #include "registerxmm.h"
+#include "registerxmm.h"
#endif // LEGACY_BACKEND
};
assert(reg < REG_COUNT);
- assert(reg < sizeof(regNames)/sizeof(regNames[0]));
+ assert(reg < sizeof(regNames) / sizeof(regNames[0]));
return regNames[reg];
}
@@ -6200,20 +5968,19 @@ const char* emitter::emitXMMregName(unsigned reg)
* Return a string that represents the given YMM register.
*/
-const char* emitter::emitYMMregName(unsigned reg)
+const char* emitter::emitYMMregName(unsigned reg)
{
- static const char* const regNames[] =
- {
- #define REGDEF(name, rnum, mask, sname) "y" sname,
+ static const char* const regNames[] = {
+#define REGDEF(name, rnum, mask, sname) "y" sname,
#ifndef LEGACY_BACKEND
- #include "register.h"
+#include "register.h"
#else // LEGACY_BACKEND
- #include "registerxmm.h"
+#include "registerxmm.h"
#endif // LEGACY_BACKEND
};
assert(reg < REG_COUNT);
- assert(reg < sizeof(regNames)/sizeof(regNames[0]));
+ assert(reg < sizeof(regNames) / sizeof(regNames[0]));
return regNames[reg];
}
@@ -6223,7 +5990,7 @@ const char* emitter::emitYMMregName(unsigned reg)
* Display a static data member reference.
*/
-void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc /* = false */)
+void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t offs, bool reloc /* = false */)
{
int doffs;
@@ -6234,7 +6001,9 @@ void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t
{
ssize_t top12bits = (offs >> 20);
if ((top12bits != 0) && (top12bits != -1))
+ {
offs = 0xD1FFAB1E;
+ }
}
if (fldHnd == FLD_GLOBAL_FS)
@@ -6255,33 +6024,46 @@ void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t
#ifdef RELOC_SUPPORT
if (reloc)
+ {
printf("reloc ");
+ }
#endif
if (doffs >= 0)
{
- if (doffs & 1)
- printf("@CNS%02u", doffs-1);
+ if (doffs & 1)
+ {
+ printf("@CNS%02u", doffs - 1);
+ }
else
+ {
printf("@RWD%02u", doffs);
+ }
- if (offs)
+ if (offs)
+ {
printf("%+Id", offs);
+ }
}
else
{
printf("classVar[%#x]", emitComp->dspPtr(fldHnd));
- if (offs)
+ if (offs)
+ {
printf("%+Id", offs);
+ }
}
printf("]");
- if (emitComp->opts.varNames && offs < 0)
+ if (emitComp->opts.varNames && offs < 0)
{
printf("'%s", emitComp->eeGetFieldName(fldHnd));
- if (offs) printf("%+Id", offs);
+ if (offs)
+ {
+ printf("%+Id", offs);
+ }
printf("'");
}
}
@@ -6291,41 +6073,55 @@ void emitter::emitDispClsVar(CORINFO_FIELD_HANDLE fldHnd, ssize_t
* Display a stack frame reference.
*/
-void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
+void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm)
{
- int addr;
- bool bEBP;
+ int addr;
+ bool bEBP;
printf("[");
- if (!asmfm || emitComp->lvaDoneFrameLayout == Compiler::NO_FRAME_LAYOUT)
+ if (!asmfm || emitComp->lvaDoneFrameLayout == Compiler::NO_FRAME_LAYOUT)
{
- if (varx < 0)
+ if (varx < 0)
+ {
printf("TEMP_%02u", -varx);
+ }
else
+ {
printf("V%02u", +varx);
+ }
- if (disp < 0)
- printf("-0x%X", -disp);
+ if (disp < 0)
+ {
+ printf("-0x%X", -disp);
+ }
else if (disp > 0)
- printf("+0x%X", +disp);
+ {
+ printf("+0x%X", +disp);
+ }
}
- if (emitComp->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
+ if (emitComp->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT)
{
- if (!asmfm)
+ if (!asmfm)
+ {
printf(" ");
+ }
addr = emitComp->lvaFrameAddress(varx, &bEBP) + disp;
- if (bEBP)
+ if (bEBP)
{
printf(STR_FPBASE);
- if (addr < 0)
+ if (addr < 0)
+ {
printf("-%02XH", -addr);
+ }
else if (addr > 0)
- printf("+%02XH", addr);
+ {
+ printf("+%02XH", addr);
+ }
}
else
{
@@ -6333,24 +6129,27 @@ void emitter::emitDispFrameRef(int varx, int disp, int offs, bool
printf(STR_SPBASE);
- if (addr < 0)
+ if (addr < 0)
+ {
printf("-%02XH", -addr);
+ }
else if (addr > 0)
- printf("+%02XH", addr);
+ {
+ printf("+%02XH", addr);
+ }
#if !FEATURE_FIXED_OUT_ARGS
- if (emitCurStackLvl)
+ if (emitCurStackLvl)
printf("+%02XH", emitCurStackLvl);
#endif // !FEATURE_FIXED_OUT_ARGS
-
}
}
printf("]");
- if (varx >= 0 && emitComp->opts.varNames)
+ if (varx >= 0 && emitComp->opts.varNames)
{
LclVarDsc* varDsc;
const char* varName;
@@ -6359,14 +6158,18 @@ void emitter::emitDispFrameRef(int varx, int disp, int offs, bool
varDsc = emitComp->lvaTable + varx;
varName = emitComp->compLocalVarName(varx, offs);
- if (varName)
+ if (varName)
{
printf("'%s", varName);
- if (disp < 0)
- printf("-%d", -disp);
+ if (disp < 0)
+ {
+ printf("-%d", -disp);
+ }
else if (disp > 0)
- printf("+%d", +disp);
+ {
+ printf("+%d", +disp);
+ }
printf("'");
}
@@ -6379,7 +6182,7 @@ void emitter::emitDispFrameRef(int varx, int disp, int offs, bool
* If we are formatting for an assembly listing don't print the hex value
* since it will prevent us from doing assembly diffs
*/
-void emitter::emitDispReloc(ssize_t value)
+void emitter::emitDispReloc(ssize_t value)
{
if (emitComp->opts.disAsm)
{
@@ -6396,42 +6199,41 @@ void emitter::emitDispReloc(ssize_t value)
* Display an address mode.
*/
-void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
+void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
{
- bool nsep = false;
- ssize_t disp;
+ bool nsep = false;
+ ssize_t disp;
- unsigned jtno = 0;
- dataSection* jdsc = 0;
+ unsigned jtno = 0;
+ dataSection* jdsc = nullptr;
/* The displacement field is in an unusual place for calls */
- disp = (id->idIns() == INS_call) ? emitGetInsCIdisp(id)
- : emitGetInsAmdAny(id);
+ disp = (id->idIns() == INS_call) ? emitGetInsCIdisp(id) : emitGetInsAmdAny(id);
/* Display a jump table label if this is a switch table jump */
- if (id->idIns() == INS_i_jmp)
+ if (id->idIns() == INS_i_jmp)
{
- UNATIVE_OFFSET offs = 0;
+ UNATIVE_OFFSET offs = 0;
/* Find the appropriate entry in the data section list */
- for (jdsc = emitConsDsc.dsdList, jtno = 0;
- jdsc;
- jdsc = jdsc->dsNext)
+ for (jdsc = emitConsDsc.dsdList, jtno = 0; jdsc; jdsc = jdsc->dsNext)
{
- UNATIVE_OFFSET size = jdsc->dsSize;
+ UNATIVE_OFFSET size = jdsc->dsSize;
/* Is this a label table? */
- if (size & 1)
+ if (size & 1)
{
size--;
jtno++;
- if (offs == id->idDebugOnlyInfo()->idMemCookie)
+ if (offs == id->idDebugOnlyInfo()->idMemCookie)
+ {
break;
+ }
}
offs += size;
@@ -6439,7 +6241,7 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
/* If we've found a matching entry then is a table jump */
- if (jdsc)
+ if (jdsc)
{
#ifdef RELOC_SUPPORT
if (id->idIsDspReloc())
@@ -6457,24 +6259,32 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
printf("[");
- if (id->idAddr()->iiaAddrMode.amBaseReg != REG_NA)
+ if (id->idAddr()->iiaAddrMode.amBaseReg != REG_NA)
{
printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg));
nsep = true;
if (id->idAddr()->iiaAddrMode.amBaseReg == REG_ESP)
+ {
frameRef = true;
+ }
else if (emitComp->isFramePointerUsed() && id->idAddr()->iiaAddrMode.amBaseReg == REG_EBP)
+ {
frameRef = true;
+ }
}
- if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA)
+ if (id->idAddr()->iiaAddrMode.amIndxReg != REG_NA)
{
- size_t scale = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale);
+ size_t scale = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale);
- if (nsep)
+ if (nsep)
+ {
printf("+");
- if (scale > 1)
+ }
+ if (scale > 1)
+ {
printf("%u*", scale);
+ }
printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amIndxReg));
nsep = true;
}
@@ -6482,8 +6292,10 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
#ifdef RELOC_SUPPORT
if ((id->idIsDspReloc()) && (id->idIns() != INS_i_jmp))
{
- if (nsep)
+ if (nsep)
+ {
printf("+");
+ }
emitDispReloc(disp);
}
else
@@ -6494,67 +6306,95 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
{
ssize_t top12bits = (disp >> 20);
if ((top12bits != 0) && (top12bits != -1))
+ {
disp = 0xD1FFAB1E;
+ }
}
if (disp > 0)
{
- if (nsep)
+ if (nsep)
+ {
printf("+");
+ }
if (frameRef)
+ {
printf("%02XH", disp);
+ }
else if (disp < 1000)
+ {
printf("%d", disp);
+ }
else if (disp <= 0xFFFF)
+ {
printf("%04XH", disp);
+ }
else
+ {
printf("%08XH", disp);
+ }
}
else if (disp < 0)
{
if (frameRef)
+ {
printf("-%02XH", -disp);
+ }
else if (disp > -1000)
+ {
printf("-%d", -disp);
+ }
else if (disp >= -0xFFFF)
+ {
printf("-%04XH", -disp);
+ }
else if ((disp & 0x7F000000) != 0x7F000000)
+ {
printf("%08XH", disp);
+ }
else
+ {
printf("-%08XH", -disp);
+ }
}
else if (!nsep)
{
- printf("%04XH", disp);
+ printf("%04XH", disp);
}
}
printf("]");
- if (id->idDebugOnlyInfo()->idClsCookie)
+ if (id->idDebugOnlyInfo()->idClsCookie)
{
- if (id->idIns() == INS_call)
- printf("%s", emitFncName((CORINFO_METHOD_HANDLE) id->idDebugOnlyInfo()->idMemCookie));
+ if (id->idIns() == INS_call)
+ {
+ printf("%s", emitFncName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie));
+ }
else
- printf("%s", emitFldName((CORINFO_FIELD_HANDLE) id->idDebugOnlyInfo()->idMemCookie));
+ {
+ printf("%s", emitFldName((CORINFO_FIELD_HANDLE)id->idDebugOnlyInfo()->idMemCookie));
+ }
}
- // pretty print string if it looks like one
- else if (id->idGCref() == GCT_GCREF && id->idIns() == INS_mov &&
- id->idAddr()->iiaAddrMode.amBaseReg == REG_NA) {
+ // pretty print string if it looks like one
+ else if (id->idGCref() == GCT_GCREF && id->idIns() == INS_mov && id->idAddr()->iiaAddrMode.amBaseReg == REG_NA)
+ {
const wchar_t* str = emitComp->eeGetCPString(disp);
- if (str != 0)
+ if (str != nullptr)
+ {
printf(" '%S'", str);
+ }
}
- if (jdsc && !noDetail)
+ if (jdsc && !noDetail)
{
- unsigned cnt = (jdsc->dsSize - 1) / sizeof(void*);
- BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
+ unsigned cnt = (jdsc->dsSize - 1) / sizeof(void*);
+ BasicBlock** bbp = (BasicBlock**)jdsc->dsCont;
#ifdef _TARGET_AMD64_
-# define SIZE_LETTER "Q"
+#define SIZE_LETTER "Q"
#else
-# define SIZE_LETTER "D"
+#define SIZE_LETTER "D"
#endif
printf("\n\n J_M%03u_DS%02u LABEL " SIZE_LETTER "WORD", Compiler::s_compMethodsCount, jtno);
@@ -6566,11 +6406,11 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
/* Convert the BasicBlock* value to an IG address */
- lab = (insGroup*)emitCodeGetCookie(*bbp++); assert(lab);
+ lab = (insGroup*)emitCodeGetCookie(*bbp++);
+ assert(lab);
printf("\n D" SIZE_LETTER " G_M%03u_IG%02u", Compiler::s_compMethodsCount, lab->igNum);
- }
- while (--cnt);
+ } while (--cnt);
}
}
@@ -6579,51 +6419,51 @@ void emitter::emitDispAddrMode(instrDesc* id, bool noDetail)
* If the given instruction is a shift, display the 2nd operand.
*/
-void emitter::emitDispShift(instruction ins, int cnt)
+void emitter::emitDispShift(instruction ins, int cnt)
{
switch (ins)
{
- case INS_rcl_1:
- case INS_rcr_1:
- case INS_rol_1:
- case INS_ror_1:
- case INS_shl_1:
- case INS_shr_1:
- case INS_sar_1:
- printf(", 1");
- break;
+ case INS_rcl_1:
+ case INS_rcr_1:
+ case INS_rol_1:
+ case INS_ror_1:
+ case INS_shl_1:
+ case INS_shr_1:
+ case INS_sar_1:
+ printf(", 1");
+ break;
- case INS_rcl:
- case INS_rcr:
- case INS_rol:
- case INS_ror:
- case INS_shl:
- case INS_shr:
- case INS_sar:
- printf(", cl");
- break;
+ case INS_rcl:
+ case INS_rcr:
+ case INS_rol:
+ case INS_ror:
+ case INS_shl:
+ case INS_shr:
+ case INS_sar:
+ printf(", cl");
+ break;
- case INS_rcl_N:
- case INS_rcr_N:
- case INS_rol_N:
- case INS_ror_N:
- case INS_shl_N:
- case INS_shr_N:
- case INS_sar_N:
- printf(", %d", cnt);
- break;
+ case INS_rcl_N:
+ case INS_rcr_N:
+ case INS_rol_N:
+ case INS_ror_N:
+ case INS_shl_N:
+ case INS_shr_N:
+ case INS_sar_N:
+ printf(", %d", cnt);
+ break;
- default:
- break;
+ default:
+ break;
}
}
/*****************************************************************************
*
- * Display (optionally) the bytes for the instruction encoding in hex
+ * Display (optionally) the bytes for the instruction encoding in hex
*/
-void emitter::emitDispInsHex(BYTE * code, size_t sz)
+void emitter::emitDispInsHex(BYTE* code, size_t sz)
{
// We do not display the instruction hex if we want diff-able disassembly
if (!emitComp->opts.disDiffable)
@@ -6635,9 +6475,9 @@ void emitter::emitDispInsHex(BYTE * code, size_t sz)
const size_t digits = 6;
#endif
printf(" ");
- for (unsigned i=0; i < sz; i++)
+ for (unsigned i = 0; i < sz; i++)
{
- printf("%02X", (*((BYTE *) (code+i))));
+ printf("%02X", (*((BYTE*)(code + i))));
}
if (sz < digits)
@@ -6647,25 +6487,18 @@ void emitter::emitDispInsHex(BYTE * code, size_t sz)
}
}
-
/*****************************************************************************
*
* Display the given instruction.
*/
-void emitter::emitDispIns(instrDesc* id,
- bool isNew,
- bool doffs,
- bool asmfm,
- unsigned offset,
- BYTE* code,
- size_t sz,
- insGroup* ig)
+void emitter::emitDispIns(
+ instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* code, size_t sz, insGroup* ig)
{
- emitAttr attr;
- const char* sstr;
+ emitAttr attr;
+ const char* sstr;
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
if (emitComp->verbose)
{
@@ -6674,61 +6507,60 @@ void emitter::emitDispIns(instrDesc* id,
}
#ifdef RELOC_SUPPORT
-# define ID_INFO_DSP_RELOC ((bool) (id->idIsDspReloc()))
+#define ID_INFO_DSP_RELOC ((bool)(id->idIsDspReloc()))
#else
-# define ID_INFO_DSP_RELOC false
+#define ID_INFO_DSP_RELOC false
#endif
/* Display a constant value if the instruction references one */
- if (!isNew)
+ if (!isNew)
{
switch (id->idInsFmt())
{
- int offs;
+ int offs;
- case IF_MRD_RRD:
- case IF_MWR_RRD:
- case IF_MRW_RRD:
+ case IF_MRD_RRD:
+ case IF_MWR_RRD:
+ case IF_MRW_RRD:
- case IF_RRD_MRD:
- case IF_RWR_MRD:
- case IF_RRW_MRD:
+ case IF_RRD_MRD:
+ case IF_RWR_MRD:
+ case IF_RRW_MRD:
- case IF_MRD_CNS:
- case IF_MWR_CNS:
- case IF_MRW_CNS:
- case IF_MRW_SHF:
+ case IF_MRD_CNS:
+ case IF_MWR_CNS:
+ case IF_MRW_CNS:
+ case IF_MRW_SHF:
- case IF_MRD:
- case IF_MWR:
- case IF_MRW:
+ case IF_MRD:
+ case IF_MWR:
+ case IF_MRW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_MRD:
- case IF_TWR_MRD:
- case IF_TRW_MRD:
+ case IF_TRD_MRD:
+ case IF_TWR_MRD:
+ case IF_TRW_MRD:
- // case IF_MRD_TRD:
- // case IF_MRW_TRD:
- case IF_MWR_TRD:
+ // case IF_MRD_TRD:
+ // case IF_MRW_TRD:
+ case IF_MWR_TRD:
#endif // FEATURE_STACK_FP_X87
- case IF_MRD_OFF:
-
+ case IF_MRD_OFF:
- /* Is this actually a reference to a data section? */
+ /* Is this actually a reference to a data section? */
- offs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd);
+ offs = Compiler::eeGetJitDataOffs(id->idAddr()->iiaFieldHnd);
- if (offs >= 0)
- {
- void* addr;
+ if (offs >= 0)
+ {
+ void* addr;
- /* Display a data section reference */
+ /* Display a data section reference */
- assert((unsigned)offs < emitConsDsc.dsdOffs);
- addr = emitConsBlock ? emitConsBlock + offs : nullptr;
+ assert((unsigned)offs < emitConsDsc.dsdOffs);
+ addr = emitConsBlock ? emitConsBlock + offs : nullptr;
#if 0
// TODO-XArch-Cleanup: Fix or remove this code.
@@ -6783,11 +6615,11 @@ void emitter::emitDispIns(instrDesc* id,
}
}
#endif
- }
- break;
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
}
@@ -6799,8 +6631,10 @@ void emitter::emitDispIns(instrDesc* id,
// printf("[A=%08X] " , emitSimpleByrefStkMask);
// printf("[L=%02u] " , id->idCodeSize());
- if (!emitComp->opts.dspEmit && !isNew && !asmfm)
+ if (!emitComp->opts.dspEmit && !isNew && !asmfm)
+ {
doffs = true;
+ }
/* Display the instruction offset */
@@ -6828,7 +6662,7 @@ void emitter::emitDispIns(instrDesc* id,
}
#ifndef FEATURE_PAL
if (strnlen_s(sstr, 10) > 8)
-#else // FEATURE_PAL
+#else // FEATURE_PAL
if (strnlen(sstr, 10) > 8)
#endif // FEATURE_PAL
{
@@ -6841,12 +6675,12 @@ void emitter::emitDispIns(instrDesc* id,
/* Figure out the operand size */
- if (id->idGCref() == GCT_GCREF)
+ if (id->idGCref() == GCT_GCREF)
{
attr = EA_GCREF;
sstr = "gword ptr ";
}
- else if (id->idGCref() == GCT_BYREF)
+ else if (id->idGCref() == GCT_BYREF)
{
attr = EA_BYREF;
sstr = "bword ptr ";
@@ -6868,10 +6702,9 @@ void emitter::emitDispIns(instrDesc* id,
}
/* Now see what instruction format we've got */
-
// First print the implicit register usage
- if (instrHasImplicitRegPairDest(ins))
+ if (instrHasImplicitRegPairDest(ins))
{
printf("%s:%s, ", emitRegName(REG_EDX, id->idOpSize()), emitRegName(REG_EAX, id->idOpSize()));
}
@@ -6883,610 +6716,644 @@ void emitter::emitDispIns(instrDesc* id,
switch (id->idInsFmt())
{
- ssize_t val;
- ssize_t offs;
- CnsVal cnsVal;
- const char* methodName;
+ ssize_t val;
+ ssize_t offs;
+ CnsVal cnsVal;
+ const char* methodName;
- case IF_CNS:
- val = emitGetInsSC(id);
+ case IF_CNS:
+ val = emitGetInsSC(id);
#ifdef _TARGET_AMD64_
- // no 8-byte immediates allowed here!
- assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
+ // no 8-byte immediates allowed here!
+ assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
#endif
#ifdef RELOC_SUPPORT
- if (id->idIsCnsReloc())
- emitDispReloc(val);
- else
+ if (id->idIsCnsReloc())
+ {
+ emitDispReloc(val);
+ }
+ else
#endif
- {
-PRINT_CONSTANT:
- // Munge any pointers if we want diff-able disassembly
- if (emitComp->opts.disDiffable)
{
- ssize_t top12bits = (val >> 20);
- if ((top12bits != 0) && (top12bits != -1))
- val = 0xD1FFAB1E;
+ PRINT_CONSTANT:
+ // Munge any pointers if we want diff-able disassembly
+ if (emitComp->opts.disDiffable)
+ {
+ ssize_t top12bits = (val >> 20);
+ if ((top12bits != 0) && (top12bits != -1))
+ {
+ val = 0xD1FFAB1E;
+ }
+ }
+ if ((val > -1000) && (val < 1000))
+ {
+ printf("%d", val);
+ }
+ else if ((val > 0) || ((val & 0x7F000000) != 0x7F000000))
+ {
+ printf("0x%IX", val);
+ }
+ else
+ { // (val < 0)
+ printf("-0x%IX", -val);
+ }
}
- if ((val > -1000) && (val < 1000))
- printf("%d", val);
- else if ((val > 0) || ((val & 0x7F000000) != 0x7F000000))
- printf("0x%IX", val);
- else // (val < 0)
- printf("-0x%IX", -val);
- }
- break;
+ break;
- case IF_ARD:
- case IF_AWR:
- case IF_ARW:
+ case IF_ARD:
+ case IF_AWR:
+ case IF_ARW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_ARD:
- case IF_TWR_ARD:
- case IF_TRW_ARD:
+ case IF_TRD_ARD:
+ case IF_TWR_ARD:
+ case IF_TRW_ARD:
- // case IF_ARD_TRD:
- case IF_AWR_TRD:
- // case IF_ARW_TRD:
+ // case IF_ARD_TRD:
+ case IF_AWR_TRD:
+// case IF_ARW_TRD:
#endif // FEATURE_STACK_FP_X87
- if (ins == INS_call && id->idIsCallRegPtr())
- {
- printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg));
- break;
- }
+ if (ins == INS_call && id->idIsCallRegPtr())
+ {
+ printf("%s", emitRegName(id->idAddr()->iiaAddrMode.amBaseReg));
+ break;
+ }
- printf("%s", sstr);
- emitDispAddrMode(id, isNew);
- emitDispShift(ins);
+ printf("%s", sstr);
+ emitDispAddrMode(id, isNew);
+ emitDispShift(ins);
- if (ins == INS_call)
- {
- assert(id->idInsFmt() == IF_ARD);
+ if (ins == INS_call)
+ {
+ assert(id->idInsFmt() == IF_ARD);
- /* Ignore indirect calls */
+ /* Ignore indirect calls */
- if (id->idDebugOnlyInfo()->idMemCookie == 0)
- break;
+ if (id->idDebugOnlyInfo()->idMemCookie == 0)
+ {
+ break;
+ }
- assert(id->idDebugOnlyInfo()->idMemCookie);
+ assert(id->idDebugOnlyInfo()->idMemCookie);
- /* This is a virtual call */
+ /* This is a virtual call */
- methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
- printf("%s", methodName);
- }
- break;
+ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
+ printf("%s", methodName);
+ }
+ break;
- case IF_RRD_ARD:
- case IF_RWR_ARD:
- case IF_RRW_ARD:
- if (IsAVXInstruction(ins))
- {
- printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
- }
- else if (IsSSE2Instruction(ins))
- {
- printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
- }
- else
+ case IF_RRD_ARD:
+ case IF_RWR_ARD:
+ case IF_RRW_ARD:
+ if (IsAVXInstruction(ins))
+ {
+ printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else
#ifdef _TARGET_AMD64_
- if (ins == INS_movsxd)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), sstr);
- }
- else
+ if (ins == INS_movsxd)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), sstr);
+ }
+ else
#endif
- if (ins == INS_movsx || ins == INS_movzx)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), sstr);
- }
- else
- {
- printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
- }
- emitDispAddrMode(id);
- break;
+ if (ins == INS_movsx || ins == INS_movzx)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), sstr);
+ }
+ else
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
+ }
+ emitDispAddrMode(id);
+ break;
- case IF_ARD_RRD:
- case IF_AWR_RRD:
- case IF_ARW_RRD:
+ case IF_ARD_RRD:
+ case IF_AWR_RRD:
+ case IF_ARW_RRD:
- printf("%s", sstr);
- emitDispAddrMode(id);
- if (IsAVXInstruction(ins))
- {
- printf(", %s", emitYMMregName((unsigned)id->idReg1()));
- }
- else if (IsSSE2Instruction(ins))
- {
- printf(", %s", emitXMMregName((unsigned)id->idReg1()));
- }
- else
- {
- printf(", %s", emitRegName(id->idReg1(), attr));
- }
- break;
+ printf("%s", sstr);
+ emitDispAddrMode(id);
+ if (IsAVXInstruction(ins))
+ {
+ printf(", %s", emitYMMregName((unsigned)id->idReg1()));
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf(", %s", emitXMMregName((unsigned)id->idReg1()));
+ }
+ else
+ {
+ printf(", %s", emitRegName(id->idReg1(), attr));
+ }
+ break;
- case IF_ARD_CNS:
- case IF_AWR_CNS:
- case IF_ARW_CNS:
- case IF_ARW_SHF:
+ case IF_ARD_CNS:
+ case IF_AWR_CNS:
+ case IF_ARW_CNS:
+ case IF_ARW_SHF:
- printf("%s", sstr);
- emitDispAddrMode(id);
- emitGetInsAmdCns(id, &cnsVal);
- val = cnsVal.cnsVal;
+ printf("%s", sstr);
+ emitDispAddrMode(id);
+ emitGetInsAmdCns(id, &cnsVal);
+ val = cnsVal.cnsVal;
#ifdef _TARGET_AMD64_
- // no 8-byte immediates allowed here!
- assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
+ // no 8-byte immediates allowed here!
+ assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
#endif
- if (id->idInsFmt() == IF_ARW_SHF)
- emitDispShift(ins, (BYTE)val);
- else
- {
- printf(", ");
-#ifdef RELOC_SUPPORT
- if (cnsVal.cnsReloc)
- emitDispReloc(val);
+ if (id->idInsFmt() == IF_ARW_SHF)
+ {
+ emitDispShift(ins, (BYTE)val);
+ }
else
+ {
+ printf(", ");
+#ifdef RELOC_SUPPORT
+ if (cnsVal.cnsReloc)
+ {
+ emitDispReloc(val);
+ }
+ else
#endif
- goto PRINT_CONSTANT;
- }
- break;
+ {
+ goto PRINT_CONSTANT;
+ }
+ }
+ break;
- case IF_SRD:
- case IF_SWR:
- case IF_SRW:
+ case IF_SRD:
+ case IF_SWR:
+ case IF_SRW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_SRD:
- case IF_TWR_SRD:
- case IF_TRW_SRD:
+ case IF_TRD_SRD:
+ case IF_TWR_SRD:
+ case IF_TRW_SRD:
- // case IF_SRD_TRD:
- // case IF_SRW_TRD:
- case IF_SWR_TRD:
+ // case IF_SRD_TRD:
+ // case IF_SRW_TRD:
+ case IF_SWR_TRD:
#endif // FEATURE_STACK_FP_X87
- printf("%s", sstr);
+ printf("%s", sstr);
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_pop) emitCurStackLvl -= sizeof(int);
+ if (ins == INS_pop)
+ emitCurStackLvl -= sizeof(int);
#endif
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
- id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
+ id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
#if !FEATURE_FIXED_OUT_ARGS
- if (ins == INS_pop) emitCurStackLvl += sizeof(int);
+ if (ins == INS_pop)
+ emitCurStackLvl += sizeof(int);
#endif
- emitDispShift(ins);
- break;
+ emitDispShift(ins);
+ break;
- case IF_SRD_RRD:
- case IF_SWR_RRD:
- case IF_SRW_RRD:
+ case IF_SRD_RRD:
+ case IF_SWR_RRD:
+ case IF_SRW_RRD:
- printf("%s", sstr);
+ printf("%s", sstr);
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
- id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
+ id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
- if (IsAVXInstruction(ins))
- {
- printf(", %s", emitYMMregName((unsigned)id->idReg1()));
- }
- else if (IsSSE2Instruction(ins))
- {
- printf(", %s", emitXMMregName((unsigned)id->idReg1()));
- }
- else
- {
- printf(", %s", emitRegName(id->idReg1(), attr));
- }
- break;
+ if (IsAVXInstruction(ins))
+ {
+ printf(", %s", emitYMMregName((unsigned)id->idReg1()));
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf(", %s", emitXMMregName((unsigned)id->idReg1()));
+ }
+ else
+ {
+ printf(", %s", emitRegName(id->idReg1(), attr));
+ }
+ break;
- case IF_SRD_CNS:
- case IF_SWR_CNS:
- case IF_SRW_CNS:
- case IF_SRW_SHF:
+ case IF_SRD_CNS:
+ case IF_SWR_CNS:
+ case IF_SRW_CNS:
+ case IF_SRW_SHF:
- printf("%s", sstr);
+ printf("%s", sstr);
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
- id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
+ id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
- emitGetInsCns(id, &cnsVal);
- val = cnsVal.cnsVal;
+ emitGetInsCns(id, &cnsVal);
+ val = cnsVal.cnsVal;
#ifdef _TARGET_AMD64_
- // no 8-byte immediates allowed here!
- assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
+ // no 8-byte immediates allowed here!
+ assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
#endif
- if (id->idInsFmt() == IF_SRW_SHF)
- emitDispShift(ins, (BYTE)val);
- else
- {
- printf(", ");
-#ifdef RELOC_SUPPORT
- if (cnsVal.cnsReloc)
+ if (id->idInsFmt() == IF_SRW_SHF)
{
- emitDispReloc(val);
+ emitDispShift(ins, (BYTE)val);
}
else
-#endif
{
- goto PRINT_CONSTANT;
+ printf(", ");
+#ifdef RELOC_SUPPORT
+ if (cnsVal.cnsReloc)
+ {
+ emitDispReloc(val);
+ }
+ else
+#endif
+ {
+ goto PRINT_CONSTANT;
+ }
}
- }
- break;
+ break;
- case IF_RRD_SRD:
- case IF_RWR_SRD:
- case IF_RRW_SRD:
- if (IsAVXInstruction(ins))
- {
- printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
- }
- else if (IsSSE2Instruction(ins))
- {
- printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
- }
- else
+ case IF_RRD_SRD:
+ case IF_RWR_SRD:
+ case IF_RRW_SRD:
+ if (IsAVXInstruction(ins))
+ {
+ printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else
#ifdef _TARGET_AMD64_
- if (ins == INS_movsxd)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), sstr);
- }
- else
+ if (ins == INS_movsxd)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), sstr);
+ }
+ else
#endif
- if (ins == INS_movsx || ins == INS_movzx)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), sstr);
- }
- else
- {
- printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
- }
+ if (ins == INS_movsx || ins == INS_movzx)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), sstr);
+ }
+ else
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
+ }
- emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(),
- id->idAddr()->iiaLclVar.lvaOffset(),
- id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
+ emitDispFrameRef(id->idAddr()->iiaLclVar.lvaVarNum(), id->idAddr()->iiaLclVar.lvaOffset(),
+ id->idDebugOnlyInfo()->idVarRefOffs, asmfm);
- break;
+ break;
- case IF_RRD_RRD:
- case IF_RWR_RRD:
- case IF_RRW_RRD:
+ case IF_RRD_RRD:
+ case IF_RWR_RRD:
+ case IF_RRW_RRD:
- if (ins == INS_mov_i2xmm)
- {
- printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), emitRegName(id->idReg2(), attr));
- }
- else if (ins == INS_mov_xmm2i)
- {
- printf("%s, %s", emitRegName(id->idReg2(), attr), emitXMMregName((unsigned)id->idReg1()));
- }
+ if (ins == INS_mov_i2xmm)
+ {
+ printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), emitRegName(id->idReg2(), attr));
+ }
+ else if (ins == INS_mov_xmm2i)
+ {
+ printf("%s, %s", emitRegName(id->idReg2(), attr), emitXMMregName((unsigned)id->idReg1()));
+ }
#ifndef LEGACY_BACKEND
- else if ((ins == INS_cvtsi2ss) || (ins == INS_cvtsi2sd))
- {
- printf(" %s, %s", emitXMMregName((unsigned)id->idReg1()), emitRegName(id->idReg2(), attr));
- }
+ else if ((ins == INS_cvtsi2ss) || (ins == INS_cvtsi2sd))
+ {
+ printf(" %s, %s", emitXMMregName((unsigned)id->idReg1()), emitRegName(id->idReg2(), attr));
+ }
#endif
- else if ((ins == INS_cvttsd2si)
+ else if ((ins == INS_cvttsd2si)
#ifndef LEGACY_BACKEND
- || (ins == INS_cvtss2si) || (ins == INS_cvtsd2si) || (ins == INS_cvttss2si)
+ || (ins == INS_cvtss2si) || (ins == INS_cvtsd2si) || (ins == INS_cvttss2si)
#endif
- )
- {
- printf(" %s, %s", emitRegName(id->idReg1(), attr), emitXMMregName((unsigned)id->idReg2()));
- }
- else if (IsAVXInstruction(ins))
- {
- printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), emitYMMregName((unsigned)id->idReg2()));
- }
- else if (IsSSE2Instruction(ins))
- {
- printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), emitXMMregName((unsigned)id->idReg2()));
- }
+ )
+ {
+ printf(" %s, %s", emitRegName(id->idReg1(), attr), emitXMMregName((unsigned)id->idReg2()));
+ }
+ else if (IsAVXInstruction(ins))
+ {
+ printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), emitYMMregName((unsigned)id->idReg2()));
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), emitXMMregName((unsigned)id->idReg2()));
+ }
#ifdef _TARGET_AMD64_
- else if (ins == INS_movsxd)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE),
- emitRegName(id->idReg2(), EA_4BYTE));
- }
+ else if (ins == INS_movsxd)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_8BYTE), emitRegName(id->idReg2(), EA_4BYTE));
+ }
#endif // _TARGET_AMD64_
- else if (ins == INS_movsx || ins == INS_movzx)
- {
- printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE),
- emitRegName(id->idReg2(), attr));
- }
- else
- {
- printf("%s, %s", emitRegName(id->idReg1(), attr),
- emitRegName(id->idReg2(), attr));
- }
- break;
+ else if (ins == INS_movsx || ins == INS_movzx)
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), EA_PTRSIZE), emitRegName(id->idReg2(), attr));
+ }
+ else
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), attr), emitRegName(id->idReg2(), attr));
+ }
+ break;
- case IF_RRW_RRW:
- assert(ins == INS_xchg);
- printf("%s,", emitRegName(id->idReg1(), attr));
- printf(" %s", emitRegName(id->idReg2(), attr));
- break;
+ case IF_RRW_RRW:
+ assert(ins == INS_xchg);
+ printf("%s,", emitRegName(id->idReg1(), attr));
+ printf(" %s", emitRegName(id->idReg2(), attr));
+ break;
#ifdef FEATURE_AVX_SUPPORT
- case IF_RWR_RRD_RRD:
- assert(IsAVXInstruction(ins));
- assert(IsThreeOperandAVXInstruction(ins));
- printf("%s, ", emitRegName(id->idReg1(), attr));
- printf("%s, ", emitRegName(id->idReg2(), attr));
- printf("%s", emitRegName(id->idReg3(), attr));
- break;
+ case IF_RWR_RRD_RRD:
+ assert(IsAVXInstruction(ins));
+ assert(IsThreeOperandAVXInstruction(ins));
+ printf("%s, ", emitRegName(id->idReg1(), attr));
+ printf("%s, ", emitRegName(id->idReg2(), attr));
+ printf("%s", emitRegName(id->idReg3(), attr));
+ break;
#endif
- case IF_RRW_RRW_CNS:
- if (IsAVXInstruction(ins))
- {
- printf("%s,", emitYMMregName((unsigned)id->idReg1()), attr);
- printf(" %s", emitYMMregName((unsigned)id->idReg2()), attr);
- }
- else
- {
- printf("%s,", emitRegName(id->idReg1(), attr));
- printf(" %s", emitRegName(id->idReg2(), attr));
- }
- val = emitGetInsSC(id);
+ case IF_RRW_RRW_CNS:
+ if (IsAVXInstruction(ins))
+ {
+ printf("%s,", emitYMMregName((unsigned)id->idReg1()), attr);
+ printf(" %s", emitYMMregName((unsigned)id->idReg2()), attr);
+ }
+ else
+ {
+ printf("%s,", emitRegName(id->idReg1(), attr));
+ printf(" %s", emitRegName(id->idReg2(), attr));
+ }
+ val = emitGetInsSC(id);
#ifdef _TARGET_AMD64_
- // no 8-byte immediates allowed here!
- assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
+ // no 8-byte immediates allowed here!
+ assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
#endif
- printf(", ");
+ printf(", ");
#ifdef RELOC_SUPPORT
- if (id->idIsCnsReloc())
- emitDispReloc(val);
- else
+ if (id->idIsCnsReloc())
+ {
+ emitDispReloc(val);
+ }
+ else
#endif
- goto PRINT_CONSTANT;
- break;
+ {
+ goto PRINT_CONSTANT;
+ }
+ break;
- case IF_RRD:
- case IF_RWR:
- case IF_RRW:
- printf("%s", emitRegName(id->idReg1(), attr));
- emitDispShift(ins);
- break;
+ case IF_RRD:
+ case IF_RWR:
+ case IF_RRW:
+ printf("%s", emitRegName(id->idReg1(), attr));
+ emitDispShift(ins);
+ break;
- case IF_RRW_SHF:
- printf("%s", emitRegName(id->idReg1(), attr));
- emitDispShift(ins, (BYTE)emitGetInsSC(id));
- break;
+ case IF_RRW_SHF:
+ printf("%s", emitRegName(id->idReg1(), attr));
+ emitDispShift(ins, (BYTE)emitGetInsSC(id));
+ break;
- case IF_RRD_MRD:
- case IF_RWR_MRD:
- case IF_RRW_MRD:
+ case IF_RRD_MRD:
+ case IF_RWR_MRD:
+ case IF_RRW_MRD:
- if (ins == INS_movsx || ins == INS_movzx)
- attr = EA_PTRSIZE;
+ if (ins == INS_movsx || ins == INS_movzx)
+ {
+ attr = EA_PTRSIZE;
+ }
#ifdef _TARGET_AMD64_
- else if (ins == INS_movsxd)
- {
- attr = EA_PTRSIZE;
- }
+ else if (ins == INS_movsxd)
+ {
+ attr = EA_PTRSIZE;
+ }
#endif
- if (IsAVXInstruction(ins))
- {
- printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
- }
- else if (IsSSE2Instruction(ins))
- {
- printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
- }
- else
- {
- printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
- }
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- break;
+ if (IsAVXInstruction(ins))
+ {
+ printf("%s, %s", emitYMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else if (IsSSE2Instruction(ins))
+ {
+ printf("%s, %s", emitXMMregName((unsigned)id->idReg1()), sstr);
+ }
+ else
+ {
+ printf("%s, %s", emitRegName(id->idReg1(), attr), sstr);
+ }
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ break;
- case IF_RWR_MRD_OFF:
+ case IF_RWR_MRD_OFF:
- printf("%s, %s", emitRegName(id->idReg1(), attr), "offset");
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- break;
+ printf("%s, %s", emitRegName(id->idReg1(), attr), "offset");
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ break;
- case IF_MRD_RRD:
- case IF_MWR_RRD:
- case IF_MRW_RRD:
+ case IF_MRD_RRD:
+ case IF_MWR_RRD:
+ case IF_MRW_RRD:
- printf("%s", sstr);
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- printf(", %s", emitRegName(id->idReg1(), attr));
- break;
+ printf("%s", sstr);
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ printf(", %s", emitRegName(id->idReg1(), attr));
+ break;
- case IF_MRD_CNS:
- case IF_MWR_CNS:
- case IF_MRW_CNS:
- case IF_MRW_SHF:
+ case IF_MRD_CNS:
+ case IF_MWR_CNS:
+ case IF_MRW_CNS:
+ case IF_MRW_SHF:
- printf("%s", sstr);
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- emitGetInsDcmCns(id, &cnsVal);
- val = cnsVal.cnsVal;
+ printf("%s", sstr);
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ emitGetInsDcmCns(id, &cnsVal);
+ val = cnsVal.cnsVal;
#ifdef _TARGET_AMD64_
- // no 8-byte immediates allowed here!
- assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
+ // no 8-byte immediates allowed here!
+ assert((val >= 0xFFFFFFFF80000000LL) && (val <= 0x000000007FFFFFFFLL));
#endif
#ifdef RELOC_SUPPORT
- if (cnsVal.cnsReloc)
- emitDispReloc(val);
- else
+ if (cnsVal.cnsReloc)
+ {
+ emitDispReloc(val);
+ }
+ else
#endif
- if (id->idInsFmt() == IF_MRW_SHF)
- emitDispShift(ins, (BYTE)val);
- else
- {
- printf(", ");
- goto PRINT_CONSTANT;
- }
- break;
+ if (id->idInsFmt() == IF_MRW_SHF)
+ {
+ emitDispShift(ins, (BYTE)val);
+ }
+ else
+ {
+ printf(", ");
+ goto PRINT_CONSTANT;
+ }
+ break;
- case IF_MRD:
- case IF_MWR:
- case IF_MRW:
+ case IF_MRD:
+ case IF_MWR:
+ case IF_MRW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_MRD:
- case IF_TWR_MRD:
- case IF_TRW_MRD:
+ case IF_TRD_MRD:
+ case IF_TWR_MRD:
+ case IF_TRW_MRD:
- // case IF_MRD_TRD:
- // case IF_MRW_TRD:
- case IF_MWR_TRD:
+ // case IF_MRD_TRD:
+ // case IF_MRW_TRD:
+ case IF_MWR_TRD:
#endif // FEATURE_STACK_FP_X87
- printf("%s", sstr);
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- emitDispShift(ins);
- break;
+ printf("%s", sstr);
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ emitDispShift(ins);
+ break;
- case IF_MRD_OFF:
+ case IF_MRD_OFF:
- printf("offset ");
- offs = emitGetInsDsp(id);
- emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
- break;
+ printf("offset ");
+ offs = emitGetInsDsp(id);
+ emitDispClsVar(id->idAddr()->iiaFieldHnd, offs, ID_INFO_DSP_RELOC);
+ break;
- case IF_RRD_CNS:
- case IF_RWR_CNS:
- case IF_RRW_CNS:
- printf("%s, ", emitRegName(id->idReg1(), attr));
- val = emitGetInsSC(id);
+ case IF_RRD_CNS:
+ case IF_RWR_CNS:
+ case IF_RRW_CNS:
+ printf("%s, ", emitRegName(id->idReg1(), attr));
+ val = emitGetInsSC(id);
#ifdef RELOC_SUPPORT
- if (id->idIsCnsReloc())
- emitDispReloc(val);
- else
+ if (id->idIsCnsReloc())
+ {
+ emitDispReloc(val);
+ }
+ else
#endif
- goto PRINT_CONSTANT;
- break;
+ {
+ goto PRINT_CONSTANT;
+ }
+ break;
#if FEATURE_STACK_FP_X87
- case IF_TRD_FRD:
- case IF_TWR_FRD:
- case IF_TRW_FRD:
- switch (ins)
- {
- case INS_fld:
- case INS_fxch:
- break;
+ case IF_TRD_FRD:
+ case IF_TWR_FRD:
+ case IF_TRW_FRD:
+ switch (ins)
+ {
+ case INS_fld:
+ case INS_fxch:
+ break;
- default:
- printf("%s, ", emitFPregName(0));
+ default:
+ printf("%s, ", emitFPregName(0));
+ break;
+ }
+ printf("%s", emitFPregName((unsigned)id->idReg1()));
break;
- }
- printf("%s", emitFPregName((unsigned)id->idReg1()));
- break;
- case IF_FRD_TRD:
- case IF_FWR_TRD:
- case IF_FRW_TRD:
- printf("%s", emitFPregName((unsigned)id->idReg1()));
- if (ins != INS_fst && ins != INS_fstp)
- printf(", %s", emitFPregName(0));
- break;
+ case IF_FRD_TRD:
+ case IF_FWR_TRD:
+ case IF_FRW_TRD:
+ printf("%s", emitFPregName((unsigned)id->idReg1()));
+ if (ins != INS_fst && ins != INS_fstp)
+ printf(", %s", emitFPregName(0));
+ break;
#endif // FEATURE_STACK_FP_X87
- case IF_LABEL:
- case IF_RWR_LABEL:
- case IF_SWR_LABEL:
+ case IF_LABEL:
+ case IF_RWR_LABEL:
+ case IF_SWR_LABEL:
- if (ins == INS_lea)
- {
- printf("%s, ", emitRegName(id->idReg1(), attr));
- }
- else if (ins == INS_mov)
- {
- /* mov dword ptr [frame.callSiteReturnAddress], label */
- assert(id->idInsFmt() == IF_SWR_LABEL);
- instrDescLbl* idlbl = (instrDescLbl*)id;
+ if (ins == INS_lea)
+ {
+ printf("%s, ", emitRegName(id->idReg1(), attr));
+ }
+ else if (ins == INS_mov)
+ {
+ /* mov dword ptr [frame.callSiteReturnAddress], label */
+ assert(id->idInsFmt() == IF_SWR_LABEL);
+ instrDescLbl* idlbl = (instrDescLbl*)id;
- emitDispFrameRef(idlbl->dstLclVar.lvaVarNum(),
- idlbl->dstLclVar.lvaOffset(),
- 0,
- asmfm);
+ emitDispFrameRef(idlbl->dstLclVar.lvaVarNum(), idlbl->dstLclVar.lvaOffset(), 0, asmfm);
- printf(", ");
- }
+ printf(", ");
+ }
- if (((instrDescJmp*)id)->idjShort)
- printf("SHORT ");
+ if (((instrDescJmp*)id)->idjShort)
+ {
+ printf("SHORT ");
+ }
- if (id->idIsBound())
- {
- printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
- }
- else
- {
- printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
- }
- break;
+ if (id->idIsBound())
+ {
+ printf("G_M%03u_IG%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaIGlabel->igNum);
+ }
+ else
+ {
+ printf("L_M%03u_BB%02u", Compiler::s_compMethodsCount, id->idAddr()->iiaBBlabel->bbNum);
+ }
+ break;
- case IF_METHOD:
- case IF_METHPTR:
- if (id->idIsCallAddr())
- {
- offs = (ssize_t)id->idAddr()->iiaAddr;
- methodName = "";
- }
- else
- {
- offs = 0;
- methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
- }
+ case IF_METHOD:
+ case IF_METHPTR:
+ if (id->idIsCallAddr())
+ {
+ offs = (ssize_t)id->idAddr()->iiaAddr;
+ methodName = "";
+ }
+ else
+ {
+ offs = 0;
+ methodName = emitComp->eeGetMethodFullName((CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
+ }
- if (id->idInsFmt() == IF_METHPTR)
- printf("[");
+ if (id->idInsFmt() == IF_METHPTR)
+ {
+ printf("[");
+ }
- if (offs)
- {
- if (id->idIsDspReloc())
- printf("reloc ");
- printf("%08X", offs);
- }
- else
- {
- printf("%s", methodName);
- }
+ if (offs)
+ {
+ if (id->idIsDspReloc())
+ {
+ printf("reloc ");
+ }
+ printf("%08X", offs);
+ }
+ else
+ {
+ printf("%s", methodName);
+ }
- if (id->idInsFmt() == IF_METHPTR)
- printf("]");
+ if (id->idInsFmt() == IF_METHPTR)
+ {
+ printf("]");
+ }
- break;
+ break;
#if FEATURE_STACK_FP_X87
- case IF_TRD:
- case IF_TWR:
- case IF_TRW:
+ case IF_TRD:
+ case IF_TWR:
+ case IF_TRW:
#endif // FEATURE_STACK_FP_X87
- case IF_NONE:
- break;
+ case IF_NONE:
+ break;
- default:
- printf("unexpected format %s", emitIfName(id->idInsFmt()));
- assert(!"unexpectedFormat");
- break;
+ default:
+ printf("unexpected format %s", emitIfName(id->idInsFmt()));
+ assert(!"unexpectedFormat");
+ break;
}
if (sz != 0 && sz != id->idCodeSize() && (!asmfm || emitComp->verbose))
@@ -7519,154 +7386,152 @@ static BYTE* emitOutputNOP(BYTE* dst, size_t nBytes)
switch (nBytes)
{
- case 15:
- *dst++ = 0x90;
- __fallthrough;
- case 14:
- *dst++ = 0x90;
- __fallthrough;
- case 13:
- *dst++ = 0x90;
- __fallthrough;
- case 12:
- *dst++ = 0x90;
- __fallthrough;
- case 11:
- *dst++ = 0x90;
- __fallthrough;
- case 10:
- *dst++ = 0x90;
- __fallthrough;
- case 9:
- *dst++ = 0x90;
- __fallthrough;
- case 8:
- *dst++ = 0x90;
- __fallthrough;
- case 7:
- *dst++ = 0x90;
- __fallthrough;
- case 6:
- *dst++ = 0x90;
- __fallthrough;
- case 5:
- *dst++ = 0x90;
- __fallthrough;
- case 4:
- *dst++ = 0x90;
- __fallthrough;
- case 3:
- *dst++ = 0x90;
- __fallthrough;
- case 2:
- *dst++ = 0x90;
- __fallthrough;
- case 1:
- *dst++ = 0x90;
- break;
- case 0:
- break;
+ case 15:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 14:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 13:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 12:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 11:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 10:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 9:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 8:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 7:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 6:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 5:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 4:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 3:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 2:
+ *dst++ = 0x90;
+ __fallthrough;
+ case 1:
+ *dst++ = 0x90;
+ break;
+ case 0:
+ break;
}
-#else // _TARGET_AMD64_
+#else // _TARGET_AMD64_
switch (nBytes)
{
- case 2:
- *dst++ = 0x66;
- __fallthrough;
- case 1:
- *dst++ = 0x90;
- break;
- case 0:
- break;
- case 3:
- *dst++ = 0x0F;
- *dst++ = 0x1F;
- *dst++ = 0x00;
- break;
- case 4:
- *dst++ = 0x0F;
- *dst++ = 0x1F;
- *dst++ = 0x40;
- *dst++ = 0x00;
- break;
- case 6:
- *dst++ = 0x66;
- __fallthrough;
- case 5:
- *dst++ = 0x0F;
- *dst++ = 0x1F;
- *dst++ = 0x44;
- *dst++ = 0x00;
- *dst++ = 0x00;
- break;
- case 7:
- *dst++ = 0x0F;
- *dst++ = 0x1F;
- *dst++ = 0x80;
- *dst++ = 0x00;
- *dst++ = 0x00;
- *dst++ = 0x00;
- *dst++ = 0x00;
- break;
- case 15:
- // More than 3 prefixes is slower than just 2 NOPs
- dst = emitOutputNOP(emitOutputNOP(dst, 7), 8);
- break;
- case 14:
- // More than 3 prefixes is slower than just 2 NOPs
- dst = emitOutputNOP(emitOutputNOP(dst, 7), 7);
- break;
- case 13:
- // More than 3 prefixes is slower than just 2 NOPs
- dst = emitOutputNOP(emitOutputNOP(dst, 5), 8);
- break;
- case 12:
- // More than 3 prefixes is slower than just 2 NOPs
- dst = emitOutputNOP(emitOutputNOP(dst, 4), 8);
- break;
- case 11:
- *dst++ = 0x66;
- __fallthrough;
- case 10:
- *dst++ = 0x66;
- __fallthrough;
- case 9:
- *dst++ = 0x66;
- __fallthrough;
- case 8:
- *dst++ = 0x0F;
- *dst++ = 0x1F;
- *dst++ = 0x84;
- *dst++ = 0x00;
- *dst++ = 0x00;
- *dst++ = 0x00;
- *dst++ = 0x00;
- *dst++ = 0x00;
- break;
+ case 2:
+ *dst++ = 0x66;
+ __fallthrough;
+ case 1:
+ *dst++ = 0x90;
+ break;
+ case 0:
+ break;
+ case 3:
+ *dst++ = 0x0F;
+ *dst++ = 0x1F;
+ *dst++ = 0x00;
+ break;
+ case 4:
+ *dst++ = 0x0F;
+ *dst++ = 0x1F;
+ *dst++ = 0x40;
+ *dst++ = 0x00;
+ break;
+ case 6:
+ *dst++ = 0x66;
+ __fallthrough;
+ case 5:
+ *dst++ = 0x0F;
+ *dst++ = 0x1F;
+ *dst++ = 0x44;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ break;
+ case 7:
+ *dst++ = 0x0F;
+ *dst++ = 0x1F;
+ *dst++ = 0x80;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ break;
+ case 15:
+ // More than 3 prefixes is slower than just 2 NOPs
+ dst = emitOutputNOP(emitOutputNOP(dst, 7), 8);
+ break;
+ case 14:
+ // More than 3 prefixes is slower than just 2 NOPs
+ dst = emitOutputNOP(emitOutputNOP(dst, 7), 7);
+ break;
+ case 13:
+ // More than 3 prefixes is slower than just 2 NOPs
+ dst = emitOutputNOP(emitOutputNOP(dst, 5), 8);
+ break;
+ case 12:
+ // More than 3 prefixes is slower than just 2 NOPs
+ dst = emitOutputNOP(emitOutputNOP(dst, 4), 8);
+ break;
+ case 11:
+ *dst++ = 0x66;
+ __fallthrough;
+ case 10:
+ *dst++ = 0x66;
+ __fallthrough;
+ case 9:
+ *dst++ = 0x66;
+ __fallthrough;
+ case 8:
+ *dst++ = 0x0F;
+ *dst++ = 0x1F;
+ *dst++ = 0x84;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ *dst++ = 0x00;
+ break;
}
#endif // _TARGET_AMD64_
return dst;
}
-
-
/*****************************************************************************
*
* Output an instruction involving an address mode.
*/
-BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
+BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
{
- regNumber reg;
- regNumber rgx;
- ssize_t dsp;
- bool dspInByte;
- bool dspIsZero;
+ regNumber reg;
+ regNumber rgx;
+ ssize_t dsp;
+ bool dspInByte;
+ bool dspIsZero;
- instruction ins = id->idIns();
- emitAttr size = id->idOpSize();
- size_t opsz = EA_SIZE_IN_BYTES(size);
+ instruction ins = id->idIns();
+ emitAttr size = id->idOpSize();
+ size_t opsz = EA_SIZE_IN_BYTES(size);
// Get the base/index registers
reg = id->idAddr()->iiaAddrMode.amBaseReg;
@@ -7676,7 +7541,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
if (ins == INS_call)
{
// Special case: call via a register
- if (id->idIsCallRegPtr())
+ if (id->idIsCallRegPtr())
{
size_t opcode = insEncodeMRreg(INS_call, reg, EA_PTRSIZE, insCodeMR(INS_call));
@@ -7711,21 +7576,22 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
goto GOT_DSP;
}
- // Is there a large constant operand?
+ // Is there a large constant operand?
if (addc && (size > EA_1BYTE))
{
ssize_t cval = addc->cnsVal;
// Does the constant fit in a byte?
- if ((signed char)cval == cval &&
+ if ((signed char)cval == cval &&
#ifdef RELOC_SUPPORT
- addc->cnsReloc == false &&
+ addc->cnsReloc == false &&
#endif
- ins != INS_mov &&
- ins != INS_test)
+ ins != INS_mov && ins != INS_test)
{
- if (id->idInsFmt() != IF_ARW_SHF)
+ if (id->idInsFmt() != IF_ARW_SHF)
+ {
code |= 2;
+ }
opsz = 1;
}
@@ -7741,7 +7607,7 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
// Encode source operand reg in 'vvvv' bits in 1's compliement form
// The order of operands are reversed, therefore use reg2 as the source.
- code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ code = insEncodeReg3456(ins, id->idReg1(), size, code);
}
// Emit the REX prefix if required
@@ -7769,11 +7635,12 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
// Output the highest word of the opcode
- // We need to check again as in case of AVX instructions leading opcode bytes are stripped off
+ // We need to check again as in case of AVX instructions leading opcode bytes are stripped off
// and encoded as part of VEX prefix.
if (code & 0xFF000000)
{
- dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
}
}
else if (code & 0x00FF0000)
@@ -7784,28 +7651,29 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
// Output the highest byte of the opcode
if (code & 0x00FF0000)
{
- dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
}
// Use the large version if this is not a byte. This trick will not
// work in case of SSE2 and AVX instructions.
if ((size != EA_1BYTE) && (ins != INS_imul) && !IsSSE2Instruction(ins) && !IsAVXInstruction(ins))
+ {
code++;
+ }
}
else if (CodeGen::instIsFP(ins))
{
#if FEATURE_STACK_FP_X87
- assert(size == EA_4BYTE ||
- size == EA_8BYTE ||
- ins == INS_fldcw ||
- ins == INS_fnstcw);
-#else // !FEATURE_STACK_FP_X87
- assert(size == EA_4BYTE ||
- size == EA_8BYTE);
+ assert(size == EA_4BYTE || size == EA_8BYTE || ins == INS_fldcw || ins == INS_fnstcw);
+#else // !FEATURE_STACK_FP_X87
+ assert(size == EA_4BYTE || size == EA_8BYTE);
#endif // ! FEATURE_STACK_FP_X87
- if (size == EA_8BYTE)
+ if (size == EA_8BYTE)
+ {
code += 4;
+ }
}
else if (!IsSSE2Instruction(ins) && !IsAVXInstruction(ins))
{
@@ -7813,40 +7681,40 @@ BYTE* emitter::emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal*
switch (size)
{
- case EA_1BYTE:
- break;
+ case EA_1BYTE:
+ break;
- case EA_2BYTE:
+ case EA_2BYTE:
- /* Output a size prefix for a 16-bit operand */
+ /* Output a size prefix for a 16-bit operand */
- dst += emitOutputByte(dst, 0x66);
+ dst += emitOutputByte(dst, 0x66);
- __fallthrough;
+ __fallthrough;
- case EA_4BYTE:
+ case EA_4BYTE:
#ifdef _TARGET_AMD64_
- case EA_8BYTE:
+ case EA_8BYTE:
#endif
- /* Set the 'w' bit to get the large version */
+ /* Set the 'w' bit to get the large version */
- code |= 0x1;
- break;
+ code |= 0x1;
+ break;
#ifdef _TARGET_X86_
- case EA_8BYTE:
+ case EA_8BYTE:
- /* Double operand - set the appropriate bit */
+ /* Double operand - set the appropriate bit */
- code |= 0x04;
- break;
+ code |= 0x04;
+ break;
#endif // _TARGET_X86_
- default:
- NO_WAY("unexpected size");
- break;
+ default:
+ NO_WAY("unexpected size");
+ break;
}
}
@@ -7864,172 +7732,141 @@ GOT_DSP:
#ifdef RELOC_SUPPORT
if (id->idIsDspReloc())
{
- dspInByte = false; // relocs can't be placed in a byte
+ dspInByte = false; // relocs can't be placed in a byte
}
#endif
// Is there a [scaled] index component?
- if (rgx == REG_NA)
+ if (rgx == REG_NA)
{
// The address is of the form "[reg+disp]"
switch (reg)
{
- case REG_NA:
- if (id->idIsDspReloc())
- {
- INT32 addlDelta = 0;
+ case REG_NA:
+ if (id->idIsDspReloc())
+ {
+ INT32 addlDelta = 0;
- // The address is of the form "[disp]"
- // On x86 - disp is relative to zero
- // On Amd64 - disp is relative to RIP
- dst += emitOutputWord(dst, code | 0x0500);
+ // The address is of the form "[disp]"
+ // On x86 - disp is relative to zero
+ // On Amd64 - disp is relative to RIP
+ dst += emitOutputWord(dst, code | 0x0500);
- if (addc)
- {
- // It is of the form "ins [disp], immed"
- // For emitting relocation, we also need to take into account of the
- // additional bytes of code emitted for immed val.
+ if (addc)
+ {
+ // It is of the form "ins [disp], immed"
+ // For emitting relocation, we also need to take into account of the
+ // additional bytes of code emitted for immed val.
- ssize_t cval = addc->cnsVal;
+ ssize_t cval = addc->cnsVal;
#ifdef _TARGET_AMD64_
- // all these opcodes only take a sign-extended 4-byte immediate
- noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
-#else
- noway_assert(opsz <= 4);
+ // all these opcodes only take a sign-extended 4-byte immediate
+ noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
+#else
+ noway_assert(opsz <= 4);
#endif
- switch (opsz)
- {
- case 0:
- case 4:
- case 8: addlDelta = -4; break;
- case 2: addlDelta = -2; break;
- case 1: addlDelta = -1; break;
-
- default:
- assert(!"unexpected operand size");
- unreached();
+ switch (opsz)
+ {
+ case 0:
+ case 4:
+ case 8:
+ addlDelta = -4;
+ break;
+ case 2:
+ addlDelta = -2;
+ break;
+ case 1:
+ addlDelta = -1;
+ break;
+
+ default:
+ assert(!"unexpected operand size");
+ unreached();
+ }
}
- }
#ifdef _TARGET_AMD64_
- // We emit zero on Amd64, to avoid the assert in emitOutputLong()
- dst += emitOutputLong(dst, 0);
+ // We emit zero on Amd64, to avoid the assert in emitOutputLong()
+ dst += emitOutputLong(dst, 0);
#else
- dst += emitOutputLong(dst, dsp);
+ dst += emitOutputLong(dst, dsp);
#endif
- emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_DISP32, 0, addlDelta);
- }
- else
- {
+ emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_DISP32, 0,
+ addlDelta);
+ }
+ else
+ {
#ifdef _TARGET_X86_
- dst += emitOutputWord(dst, code | 0x0500);
-#else //_TARGET_AMD64_
- // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero.
- // This addr mode should never be used while generating relocatable ngen code nor if
- // the addr can be encoded as pc-relative address.
- noway_assert(!emitComp->opts.compReloc);
- noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32);
- noway_assert((int)dsp == dsp);
-
- // This requires, specifying a SIB byte after ModRM byte.
- dst += emitOutputWord(dst, code | 0x0400);
- dst += emitOutputByte(dst, 0x25);
+ dst += emitOutputWord(dst, code | 0x0500);
+#else //_TARGET_AMD64_
+ // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero.
+ // This addr mode should never be used while generating relocatable ngen code nor if
+ // the addr can be encoded as pc-relative address.
+ noway_assert(!emitComp->opts.compReloc);
+ noway_assert(codeGen->genAddrRelocTypeHint((size_t)dsp) != IMAGE_REL_BASED_REL32);
+ noway_assert((int)dsp == dsp);
+
+ // This requires, specifying a SIB byte after ModRM byte.
+ dst += emitOutputWord(dst, code | 0x0400);
+ dst += emitOutputByte(dst, 0x25);
#endif //_TARGET_AMD64_
- dst += emitOutputLong(dst, dsp);
- }
- break;
-
-
- case REG_EBP:
- // Does the offset fit in a byte?
- if (dspInByte)
- {
- dst += emitOutputWord(dst, code | 0x4500);
- dst += emitOutputByte(dst, dsp);
- }
- else
- {
- dst += emitOutputWord(dst, code | 0x8500);
- dst += emitOutputLong(dst, dsp);
+ dst += emitOutputLong(dst, dsp);
+ }
+ break;
-#ifdef RELOC_SUPPORT
- if (id->idIsDspReloc())
+ case REG_EBP:
+ // Does the offset fit in a byte?
+ if (dspInByte)
{
- emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW);
+ dst += emitOutputWord(dst, code | 0x4500);
+ dst += emitOutputByte(dst, dsp);
}
+ else
+ {
+ dst += emitOutputWord(dst, code | 0x8500);
+ dst += emitOutputLong(dst, dsp);
+
+#ifdef RELOC_SUPPORT
+ if (id->idIsDspReloc())
+ {
+ emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW);
+ }
#endif
- }
- break;
+ }
+ break;
- case REG_ESP:
-#ifdef LEGACY_BACKEND
- // REG_ESP could be REG_R12, which applies to any instruction
- //
- // This assert isn't too helpful from the OptJit point of view
- //
- // a better question is why is it here at all
- //
- assert((ins == INS_lea) ||
- (ins == INS_mov) ||
- (ins == INS_test) ||
- (ins == INS_cmp) ||
- (ins == INS_fld && dspIsZero) ||
- (ins == INS_fstp && dspIsZero) ||
- (ins == INS_fistp && dspIsZero) ||
- IsSSE2Instruction(ins) ||
- IsAVXInstruction(ins) ||
- (ins == INS_or));
+ case REG_ESP:
+#ifdef LEGACY_BACKEND
+ // REG_ESP could be REG_R12, which applies to any instruction
+ //
+ // This assert isn't too helpful from the OptJit point of view
+ //
+ // a better question is why is it here at all
+ //
+ assert((ins == INS_lea) || (ins == INS_mov) || (ins == INS_test) || (ins == INS_cmp) ||
+ (ins == INS_fld && dspIsZero) || (ins == INS_fstp && dspIsZero) ||
+ (ins == INS_fistp && dspIsZero) || IsSSE2Instruction(ins) || IsAVXInstruction(ins) ||
+ (ins == INS_or));
#endif // LEGACY_BACKEND
- // Is the offset 0 or does it at least fit in a byte?
- if (dspIsZero)
- {
- dst += emitOutputWord(dst, code | 0x0400);
- dst += emitOutputByte(dst, 0x24);
- }
- else if (dspInByte)
- {
- dst += emitOutputWord(dst, code | 0x4400);
- dst += emitOutputByte(dst, 0x24);
- dst += emitOutputByte(dst, dsp);
- }
- else
- {
- dst += emitOutputWord(dst, code | 0x8400);
- dst += emitOutputByte(dst, 0x24);
- dst += emitOutputLong(dst, dsp);
-#ifdef RELOC_SUPPORT
- if (id->idIsDspReloc())
+ // Is the offset 0 or does it at least fit in a byte?
+ if (dspIsZero)
{
- emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW);
+ dst += emitOutputWord(dst, code | 0x0400);
+ dst += emitOutputByte(dst, 0x24);
}
-#endif
- }
- break;
-
- default:
- // Put the register in the opcode
- code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) << 8;
-
- // Is there a displacement?
- if (dspIsZero)
- {
- // This is simply "[reg]"
- dst += emitOutputWord(dst, code);
- }
- else
- {
- // This is [reg + dsp]" -- does the offset fit in a byte?
- if (dspInByte)
+ else if (dspInByte)
{
- dst += emitOutputWord(dst, code | 0x4000);
+ dst += emitOutputWord(dst, code | 0x4400);
+ dst += emitOutputByte(dst, 0x24);
dst += emitOutputByte(dst, dsp);
}
else
{
- dst += emitOutputWord(dst, code | 0x8000);
+ dst += emitOutputWord(dst, code | 0x8400);
+ dst += emitOutputByte(dst, 0x24);
dst += emitOutputLong(dst, dsp);
#ifdef RELOC_SUPPORT
if (id->idIsDspReloc())
@@ -8038,17 +7875,48 @@ GOT_DSP:
}
#endif
}
- }
+ break;
- break;
+ default:
+ // Put the register in the opcode
+ code |= insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) << 8;
+
+ // Is there a displacement?
+ if (dspIsZero)
+ {
+ // This is simply "[reg]"
+ dst += emitOutputWord(dst, code);
+ }
+ else
+ {
+ // This is [reg + dsp]" -- does the offset fit in a byte?
+ if (dspInByte)
+ {
+ dst += emitOutputWord(dst, code | 0x4000);
+ dst += emitOutputByte(dst, dsp);
+ }
+ else
+ {
+ dst += emitOutputWord(dst, code | 0x8000);
+ dst += emitOutputLong(dst, dsp);
+#ifdef RELOC_SUPPORT
+ if (id->idIsDspReloc())
+ {
+ emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)dsp, IMAGE_REL_BASED_HIGHLOW);
+ }
+#endif
+ }
+ }
+
+ break;
}
}
else
{
- unsigned regByte;
+ unsigned regByte;
// We have a scaled index operand
- unsigned mul = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale);
+ unsigned mul = emitDecodeScale(id->idAddr()->iiaAddrMode.amScale);
// Is the index operand scaled?
if (mul > 1)
@@ -8056,12 +7924,12 @@ GOT_DSP:
// Is there a base register?
if (reg != REG_NA)
{
- // The address is "[reg + {2/4/8} * rgx + icon]"
+ // The address is "[reg + {2/4/8} * rgx + icon]"
regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) |
insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul);
// Emit [ebp + {2/4/8} * rgz] as [ebp + {2/4/8} * rgx + 0]
- if (dspIsZero && reg != REG_EBP)
+ if (dspIsZero && reg != REG_EBP)
{
// The address is "[reg + {2/4/8} * rgx]"
dst += emitOutputWord(dst, code | 0x0400);
@@ -8070,7 +7938,7 @@ GOT_DSP:
else
{
// The address is "[reg + {2/4/8} * rgx + disp]"
- if (dspInByte)
+ if (dspInByte)
{
dst += emitOutputWord(dst, code | 0x4400);
dst += emitOutputByte(dst, regByte);
@@ -8094,14 +7962,16 @@ GOT_DSP:
{
// The address is "[{2/4/8} * rgx + icon]"
regByte = insEncodeReg012(ins, REG_EBP, EA_PTRSIZE, nullptr) |
- insEncodeReg345(ins, rgx , EA_PTRSIZE, nullptr) | insSSval(mul);
+ insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr) | insSSval(mul);
dst += emitOutputWord(dst, code | 0x0400);
dst += emitOutputByte(dst, regByte);
// Special case: jump through a jump table
- if (ins == INS_i_jmp)
+ if (ins == INS_i_jmp)
+ {
dsp += (size_t)emitConsBlock;
+ }
dst += emitOutputLong(dst, dsp);
#ifdef RELOC_SUPPORT
@@ -8115,10 +7985,9 @@ GOT_DSP:
else
{
// The address is "[reg+rgx+dsp]"
- regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) |
- insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr);
+ regByte = insEncodeReg012(ins, reg, EA_PTRSIZE, nullptr) | insEncodeReg345(ins, rgx, EA_PTRSIZE, nullptr);
- if (dspIsZero && reg != REG_EBP)
+ if (dspIsZero && reg != REG_EBP)
{
// This is [reg+rgx]"
dst += emitOutputWord(dst, code | 0x0400);
@@ -8127,7 +7996,7 @@ GOT_DSP:
else
{
// This is [reg+rgx+dsp]" -- does the offset fit in a byte?
- if (dspInByte)
+ if (dspInByte)
{
dst += emitOutputWord(dst, code | 0x4400);
dst += emitOutputByte(dst, regByte);
@@ -8150,25 +8019,31 @@ GOT_DSP:
}
// Now generate the constant value, if present
- if (addc)
+ if (addc)
{
ssize_t cval = addc->cnsVal;
#ifdef _TARGET_AMD64_
// all these opcodes only take a sign-extended 4-byte immediate
- noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
+ noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
#endif
switch (opsz)
{
- case 0:
- case 4:
- case 8: dst += emitOutputLong(dst, cval); break;
- case 2: dst += emitOutputWord(dst, cval); break;
- case 1: dst += emitOutputByte(dst, cval); break;
+ case 0:
+ case 4:
+ case 8:
+ dst += emitOutputLong(dst, cval);
+ break;
+ case 2:
+ dst += emitOutputWord(dst, cval);
+ break;
+ case 1:
+ dst += emitOutputByte(dst, cval);
+ break;
- default:
- assert(!"unexpected operand size");
+ default:
+ assert(!"unexpected operand size");
}
#ifdef RELOC_SUPPORT
@@ -8183,85 +8058,86 @@ GOT_DSP:
DONE:
// Does this instruction operate on a GC ref value?
- if (id->idGCref())
+ if (id->idGCref())
{
switch (id->idInsFmt())
{
- case IF_ARD:
- case IF_AWR:
- case IF_ARW:
- break;
+ case IF_ARD:
+ case IF_AWR:
+ case IF_ARW:
+ break;
- case IF_RRD_ARD:
- break;
+ case IF_RRD_ARD:
+ break;
- case IF_RWR_ARD:
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ case IF_RWR_ARD:
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case IF_RRW_ARD:
- assert(id->idGCref() == GCT_BYREF);
+ case IF_RRW_ARD:
+ assert(id->idGCref() == GCT_BYREF);
#ifdef DEBUG
- regMaskTP regMask;
- regMask = genRegMask(id->idReg1());
-
- // <BUGNUM> VSW 335101 </BUGNUM>
- // Either id->idReg1(), id->idAddr()->iiaAddrMode.amBaseReg, or id->idAddr()->iiaAddrMode.amIndxReg
- // could be a BYREF.
- // For example in the following case:
- // mov EDX, bword ptr [EBP-78H] ; EDX becomes BYREF after this instr.
- // add EAX, bword ptr [EDX+8] ; It is the EDX that's causing id->idGCref to be GCT_BYREF.
- // ; EAX becomes BYREF after this instr.
- // <BUGNUM> DD 273707 </BUGNUM>
- // add EDX, bword ptr [036464E0H] ; int + static field (technically a GCREF)=BYREF
- regMaskTP baseRegMask;
- if (reg == REG_NA)
- {
- baseRegMask = RBM_NONE;
- }
- else
- {
- baseRegMask = genRegMask(reg);
- }
- regMaskTP indexRegMask;
- if (rgx == REG_NA)
- {
- indexRegMask = RBM_NONE;
- }
- else
- {
- indexRegMask = genRegMask(rgx);
- }
+ regMaskTP regMask;
+ regMask = genRegMask(id->idReg1());
+
+ // <BUGNUM> VSW 335101 </BUGNUM>
+ // Either id->idReg1(), id->idAddr()->iiaAddrMode.amBaseReg, or id->idAddr()->iiaAddrMode.amIndxReg
+ // could be a BYREF.
+ // For example in the following case:
+ // mov EDX, bword ptr [EBP-78H] ; EDX becomes BYREF after this instr.
+ // add EAX, bword ptr [EDX+8] ; It is the EDX that's causing id->idGCref to be GCT_BYREF.
+ // ; EAX becomes BYREF after this instr.
+ // <BUGNUM> DD 273707 </BUGNUM>
+ // add EDX, bword ptr [036464E0H] ; int + static field (technically a GCREF)=BYREF
+ regMaskTP baseRegMask;
+ if (reg == REG_NA)
+ {
+ baseRegMask = RBM_NONE;
+ }
+ else
+ {
+ baseRegMask = genRegMask(reg);
+ }
+ regMaskTP indexRegMask;
+ if (rgx == REG_NA)
+ {
+ indexRegMask = RBM_NONE;
+ }
+ else
+ {
+ indexRegMask = genRegMask(rgx);
+ }
- // r1 could have been a GCREF as GCREF + int=BYREF
- // or BYREF+/-int=BYREF
- assert(((reg == REG_NA) && (rgx == REG_NA) && (ins == INS_add || ins == INS_sub)) ||
- (( (regMask | baseRegMask | indexRegMask) & emitThisGCrefRegs) && (ins == INS_add )) ||
- (( (regMask | baseRegMask | indexRegMask) & emitThisByrefRegs) && (ins == INS_add || ins == INS_sub)));
+ // r1 could have been a GCREF as GCREF + int=BYREF
+ // or BYREF+/-int=BYREF
+ assert(((reg == REG_NA) && (rgx == REG_NA) && (ins == INS_add || ins == INS_sub)) ||
+ (((regMask | baseRegMask | indexRegMask) & emitThisGCrefRegs) && (ins == INS_add)) ||
+ (((regMask | baseRegMask | indexRegMask) & emitThisByrefRegs) &&
+ (ins == INS_add || ins == INS_sub)));
#endif
- // Mark it as holding a GCT_BYREF
- emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
- break;
+ // Mark it as holding a GCT_BYREF
+ emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
+ break;
- case IF_ARD_RRD:
- case IF_AWR_RRD:
- break;
+ case IF_ARD_RRD:
+ case IF_AWR_RRD:
+ break;
- case IF_ARD_CNS:
- case IF_AWR_CNS:
- break;
+ case IF_ARD_CNS:
+ case IF_AWR_CNS:
+ break;
- case IF_ARW_RRD:
- case IF_ARW_CNS:
- assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub));
- break;
+ case IF_ARW_RRD:
+ case IF_ARW_CNS:
+ assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub));
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
// mul can never produce a GC ref
@@ -8277,11 +8153,11 @@ DONE:
{
switch (id->idInsFmt())
{
- case IF_RWR_ARD:
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
- default:
- break;
+ case IF_RWR_ARD:
+ emitGCregDeadUpd(id->idReg1(), dst);
+ break;
+ default:
+ break;
}
if (ins == INS_mulEAX || ins == INS_imulEAX)
@@ -8301,7 +8177,7 @@ DONE:
}
}
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -8309,35 +8185,36 @@ DONE:
* Output an instruction involving a stack frame value.
*/
-BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
+BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
{
- int adr;
- int dsp;
- bool EBPbased;
- bool dspInByte;
- bool dspIsZero;
+ int adr;
+ int dsp;
+ bool EBPbased;
+ bool dspInByte;
+ bool dspIsZero;
- instruction ins = id->idIns();
- emitAttr size = id->idOpSize();
- size_t opsz = EA_SIZE_IN_BYTES(size);
+ instruction ins = id->idIns();
+ emitAttr size = id->idOpSize();
+ size_t opsz = EA_SIZE_IN_BYTES(size);
assert(ins != INS_imul || id->idReg1() == REG_EAX || size == EA_4BYTE || size == EA_8BYTE);
// Is there a large constant operand?
- if (addc && (size > EA_1BYTE))
+ if (addc && (size > EA_1BYTE))
{
ssize_t cval = addc->cnsVal;
// Does the constant fit in a byte?
- if ((signed char)cval == cval &&
+ if ((signed char)cval == cval &&
#ifdef RELOC_SUPPORT
- addc->cnsReloc == false &&
+ addc->cnsReloc == false &&
#endif
- ins != INS_mov &&
- ins != INS_test)
+ ins != INS_mov && ins != INS_test)
{
- if (id->idInsFmt() != IF_SRW_SHF)
+ if (id->idInsFmt() != IF_SRW_SHF)
+ {
code |= 2;
+ }
opsz = 1;
}
@@ -8353,30 +8230,31 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
code = AddRexWPrefix(ins, code);
}
-
+
// Special case emitting AVX instructions
if (Is4ByteAVXInstruction(ins))
{
size_t regcode = insEncodeReg345(ins, id->idReg1(), size, &code);
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
-
+
// Emit last opcode byte
assert((code & 0xFF) == 0);
dst += emitOutputByte(dst, (code >> 8) & 0xFF);
code = regcode;
}
// Is this a 'big' opcode?
- else if (code & 0xFF000000)
+ else if (code & 0xFF000000)
{
// Output the REX prefix
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- // Output the highest word of the opcode
+ // Output the highest word of the opcode
// We need to check again because in case of AVX instructions the leading
// escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix.
if (code & 0xFF000000)
{
- dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
}
}
else if (code & 0x00FF0000)
@@ -8389,64 +8267,68 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
// escape byte(s) (e.g. 0x0F) will be encoded as part of VEX prefix.
if (code & 0x00FF0000)
{
- dst += emitOutputByte(dst, code >> 16);
+ dst += emitOutputByte(dst, code >> 16);
code &= 0x0000FFFF;
}
// Use the large version if this is not a byte
- if ((size != EA_1BYTE) && (ins != INS_imul) && (!insIsCMOV(ins))
- && !IsSSE2Instruction(ins) && !IsAVXInstruction(ins))
+ if ((size != EA_1BYTE) && (ins != INS_imul) && (!insIsCMOV(ins)) && !IsSSE2Instruction(ins) &&
+ !IsAVXInstruction(ins))
+ {
code |= 0x1;
+ }
}
else if (CodeGen::instIsFP(ins))
{
assert(size == EA_4BYTE || size == EA_8BYTE);
- if (size == EA_8BYTE)
+ if (size == EA_8BYTE)
+ {
code += 4;
+ }
}
else if (!IsSSE2Instruction(ins) && !IsAVXInstruction(ins))
{
// Is the operand size larger than a byte?
switch (size)
{
- case EA_1BYTE:
- break;
+ case EA_1BYTE:
+ break;
- case EA_2BYTE:
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
- __fallthrough;
+ case EA_2BYTE:
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ __fallthrough;
- case EA_4BYTE:
+ case EA_4BYTE:
#ifdef _TARGET_AMD64_
- case EA_8BYTE:
+ case EA_8BYTE:
#endif // _TARGET_AMD64_
- /* Set the 'w' size bit to indicate 32-bit operation
- * Note that incrementing "code" for INS_call (0xFF) would
- * overflow, whereas setting the lower bit to 1 just works out
- */
+ /* Set the 'w' size bit to indicate 32-bit operation
+ * Note that incrementing "code" for INS_call (0xFF) would
+ * overflow, whereas setting the lower bit to 1 just works out
+ */
- code |= 0x01;
- break;
+ code |= 0x01;
+ break;
#ifdef _TARGET_X86_
- case EA_8BYTE:
-
- // Double operand - set the appropriate bit.
- // I don't know what a legitimate reason to end up in this case would be
- // considering that FP is taken care of above...
- // what is an instruction that takes a double which is not covered by the
- // above instIsFP? Of the list in instrsxarch, only INS_fprem
- code |= 0x04;
- NO_WAY("bad 8 byte op");
- break;
+ case EA_8BYTE:
+
+ // Double operand - set the appropriate bit.
+ // I don't know what a legitimate reason to end up in this case would be
+ // considering that FP is taken care of above...
+ // what is an instruction that takes a double which is not covered by the
+ // above instIsFP? Of the list in instrsxarch, only INS_fprem
+ code |= 0x04;
+ NO_WAY("bad 8 byte op");
+ break;
#endif // _TARGET_X86_
- default:
- NO_WAY("unexpected size");
- break;
+ default:
+ NO_WAY("unexpected size");
+ break;
}
}
@@ -8467,13 +8349,13 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
assert(id->idIsDspReloc() == 0);
#endif
- if (EBPbased)
+ if (EBPbased)
{
// EBP-based variable: does the offset fit in a byte?
if (Is4ByteAVXInstruction(ins))
{
- if (dspInByte)
- {
+ if (dspInByte)
+ {
dst += emitOutputByte(dst, code | 0x45);
dst += emitOutputByte(dst, dsp);
}
@@ -8483,10 +8365,10 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
dst += emitOutputLong(dst, dsp);
}
}
- else
+ else
{
- if (dspInByte)
- {
+ if (dspInByte)
+ {
dst += emitOutputWord(dst, code | 0x4500);
dst += emitOutputByte(dst, dsp);
}
@@ -8513,7 +8395,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
if (dspInByte)
{
- if (dspIsZero)
+ if (dspIsZero)
{
dst += emitOutputByte(dst, code | 0x04);
dst += emitOutputByte(dst, 0x24);
@@ -8536,7 +8418,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
if (dspInByte)
{
- if (dspIsZero)
+ if (dspIsZero)
{
dst += emitOutputWord(dst, code | 0x0400);
dst += emitOutputByte(dst, 0x24);
@@ -8558,25 +8440,31 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
// Now generate the constant value, if present
- if (addc)
+ if (addc)
{
ssize_t cval = addc->cnsVal;
#ifdef _TARGET_AMD64_
// all these opcodes only take a sign-extended 4-byte immediate
- noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
+ noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
#endif
switch (opsz)
{
- case 0:
- case 4:
- case 8: dst += emitOutputLong(dst, cval); break;
- case 2: dst += emitOutputWord(dst, cval); break;
- case 1: dst += emitOutputByte(dst, cval); break;
+ case 0:
+ case 4:
+ case 8:
+ dst += emitOutputLong(dst, cval);
+ break;
+ case 2:
+ dst += emitOutputWord(dst, cval);
+ break;
+ case 1:
+ dst += emitOutputByte(dst, cval);
+ break;
- default:
- assert(!"unexpected operand size");
+ default:
+ assert(!"unexpected operand size");
}
#ifdef RELOC_SUPPORT
@@ -8589,68 +8477,66 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
// Does this instruction operate on a GC ref value?
- if (id->idGCref())
+ if (id->idGCref())
{
// Factor in the sub-variable offset
adr += AlignDown(id->idAddr()->iiaLclVar.lvaOffset(), TARGET_POINTER_SIZE);
switch (id->idInsFmt())
{
- case IF_SRD:
- // Read stack -- no change
- break;
+ case IF_SRD:
+ // Read stack -- no change
+ break;
- case IF_SWR: // Stack Write (So we need to update GC live for stack var)
- // Write stack -- GC var may be born
- emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst);
- break;
+ case IF_SWR: // Stack Write (So we need to update GC live for stack var)
+ // Write stack -- GC var may be born
+ emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst);
+ break;
- case IF_SRD_CNS:
- // Read stack -- no change
- break;
+ case IF_SRD_CNS:
+ // Read stack -- no change
+ break;
- case IF_SWR_CNS:
- // Write stack -- no change
- break;
+ case IF_SWR_CNS:
+ // Write stack -- no change
+ break;
- case IF_SRD_RRD:
- case IF_RRD_SRD:
- // Read stack , read register -- no change
- break;
+ case IF_SRD_RRD:
+ case IF_RRD_SRD:
+ // Read stack , read register -- no change
+ break;
- case IF_RWR_SRD: // Register Write, Stack Read (So we need to update GC live for register)
+ case IF_RWR_SRD: // Register Write, Stack Read (So we need to update GC live for register)
- // Read stack , write register -- GC reg may be born
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ // Read stack , write register -- GC reg may be born
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case IF_SWR_RRD: // Stack Write, Register Read (So we need to update GC live for stack var)
- // Read register, write stack -- GC var may be born
- emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst);
- break;
+ case IF_SWR_RRD: // Stack Write, Register Read (So we need to update GC live for stack var)
+ // Read register, write stack -- GC var may be born
+ emitGCvarLiveUpd(adr, varNum, id->idGCref(), dst);
+ break;
- case IF_RRW_SRD: // Register Read/Write, Stack Read (So we need to update GC live for register)
+ case IF_RRW_SRD: // Register Read/Write, Stack Read (So we need to update GC live for register)
- // reg could have been a GCREF as GCREF + int=BYREF
- // or BYREF+/-int=BYREF
- assert(id->idGCref() == GCT_BYREF &&
- (ins == INS_add || ins == INS_sub));
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ // reg could have been a GCREF as GCREF + int=BYREF
+ // or BYREF+/-int=BYREF
+ assert(id->idGCref() == GCT_BYREF && (ins == INS_add || ins == INS_sub));
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case IF_SRW_CNS:
- case IF_SRW_RRD:
+ case IF_SRW_CNS:
+ case IF_SRW_RRD:
// += -= of a byref, no change
- case IF_SRW:
- break;
-
+ case IF_SRW:
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
}
else
@@ -8662,12 +8548,12 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
switch (id->idInsFmt())
{
- case IF_RWR_SRD: // Register Write, Stack Read
- case IF_RRW_SRD: // Register Read/Write, Stack Read
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
- default:
- break;
+ case IF_RWR_SRD: // Register Write, Stack Read
+ case IF_RRW_SRD: // Register Read/Write, Stack Read
+ emitGCregDeadUpd(id->idReg1(), dst);
+ break;
+ default:
+ break;
}
if (ins == INS_mulEAX || ins == INS_imulEAX)
@@ -8687,7 +8573,7 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
}
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -8695,17 +8581,17 @@ BYTE* emitter::emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
* Output an instruction with a static data member (class variable).
*/
-BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
+BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc)
{
- BYTE* addr;
- CORINFO_FIELD_HANDLE fldh;
- ssize_t offs;
- int doff;
+ BYTE* addr;
+ CORINFO_FIELD_HANDLE fldh;
+ ssize_t offs;
+ int doff;
- emitAttr size = id->idOpSize();
- size_t opsz = EA_SIZE_IN_BYTES(size);
- instruction ins = id->idIns();
- bool isMoffset = false;
+ emitAttr size = id->idOpSize();
+ size_t opsz = EA_SIZE_IN_BYTES(size);
+ instruction ins = id->idIns();
+ bool isMoffset = false;
// Get hold of the field handle and offset
fldh = id->idAddr()->iiaFieldHnd;
@@ -8729,19 +8615,20 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
// Is there a large constant operand?
- if (addc && (size > EA_1BYTE))
+ if (addc && (size > EA_1BYTE))
{
ssize_t cval = addc->cnsVal;
// Does the constant fit in a byte?
- if ((signed char)cval == cval &&
+ if ((signed char)cval == cval &&
#ifdef RELOC_SUPPORT
- addc->cnsReloc == false &&
+ addc->cnsReloc == false &&
#endif
- ins != INS_mov &&
- ins != INS_test)
+ ins != INS_mov && ins != INS_test)
{
- if (id->idInsFmt() != IF_MRW_SHF)
+ if (id->idInsFmt() != IF_MRW_SHF)
+ {
code |= 2;
+ }
opsz = 1;
}
@@ -8757,26 +8644,28 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
switch (id->idInsFmt())
{
- case IF_RWR_MRD:
+ case IF_RWR_MRD:
- assert((unsigned)code == (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500));
+ assert((unsigned)code ==
+ (insCodeRM(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500));
- code &= ~((size_t)0xFFFFFFFF);
- code |= 0xA0;
- isMoffset = true;
- break;
+ code &= ~((size_t)0xFFFFFFFF);
+ code |= 0xA0;
+ isMoffset = true;
+ break;
- case IF_MWR_RRD:
+ case IF_MWR_RRD:
- assert((unsigned)code == (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500));
+ assert((unsigned)code ==
+ (insCodeMR(ins) | (insEncodeReg345(ins, REG_EAX, EA_PTRSIZE, NULL) << 8) | 0x0500));
- code &= ~((size_t)0xFFFFFFFF);
- code |= 0xA2;
- isMoffset = true;
- break;
+ code &= ~((size_t)0xFFFFFFFF);
+ code |= 0xA2;
+ isMoffset = true;
+ break;
- default:
- break;
+ default:
+ break;
}
}
}
@@ -8787,7 +8676,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
size_t regcode = insEncodeReg345(ins, id->idReg1(), size, &code);
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
-
+
// Emit last opcode byte
// TODO-XArch-CQ: Right now support 4-byte opcode instructions only
assert((code & 0xFF) == 0);
@@ -8798,20 +8687,20 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
dst += emitOutputByte(dst, regcode | 0x05);
}
// Is this a 'big' opcode?
- else if (code & 0xFF000000)
+ else if (code & 0xFF000000)
{
// Output the REX prefix
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
// Output the highest word of the opcode.
// Check again since AVX instructions encode leading opcode bytes as part of VEX prefix.
- if (code & 0xFF000000)
+ if (code & 0xFF000000)
{
- dst += emitOutputWord(dst, code >> 16);
+ dst += emitOutputWord(dst, code >> 16);
}
code &= 0x0000FFFF;
}
- else if(code & 0x00FF0000)
+ else if (code & 0x00FF0000)
{
// Output the REX prefix
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
@@ -8819,11 +8708,13 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
// Check again as VEX prefix would have encoded leading opcode byte
if (code & 0x00FF0000)
{
- dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
}
- if ((ins == INS_movsx || ins == INS_movzx || ins == INS_cmpxchg || ins == INS_xchg || ins == INS_xadd || insIsCMOV(ins)) &&
- size != EA_1BYTE)
+ if ((ins == INS_movsx || ins == INS_movzx || ins == INS_cmpxchg || ins == INS_xchg || ins == INS_xadd ||
+ insIsCMOV(ins)) &&
+ size != EA_1BYTE)
{
// movsx and movzx are 'big' opcodes but also have the 'w' bit
code++;
@@ -8833,39 +8724,41 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
assert(size == EA_4BYTE || size == EA_8BYTE);
- if (size == EA_8BYTE)
+ if (size == EA_8BYTE)
+ {
code += 4;
+ }
}
else
{
// Is the operand size larger than a byte?
switch (size)
{
- case EA_1BYTE:
- break;
+ case EA_1BYTE:
+ break;
- case EA_2BYTE:
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
- __fallthrough;
+ case EA_2BYTE:
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ __fallthrough;
- case EA_4BYTE:
+ case EA_4BYTE:
#ifdef _TARGET_AMD64_
- case EA_8BYTE:
+ case EA_8BYTE:
#endif
- // Set the 'w' bit to get the large version
- code |= 0x1;
- break;
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
+ break;
#ifdef _TARGET_X86_
- case EA_8BYTE:
- // Double operand - set the appropriate bit
- code |= 0x04;
- break;
+ case EA_8BYTE:
+ // Double operand - set the appropriate bit
+ code |= 0x04;
+ break;
#endif // _TARGET_X86_
- default:
- assert(!"unexpected size");
+ default:
+ assert(!"unexpected size");
}
}
@@ -8874,34 +8767,39 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
if (code)
{
- if (id->idInsFmt() == IF_MRD_OFF ||
- id->idInsFmt() == IF_RWR_MRD_OFF ||
- isMoffset)
+ if (id->idInsFmt() == IF_MRD_OFF || id->idInsFmt() == IF_RWR_MRD_OFF || isMoffset)
+ {
dst += emitOutputByte(dst, code);
+ }
else
+ {
dst += emitOutputWord(dst, code);
+ }
}
// Do we have a constant or a static data member?
doff = Compiler::eeGetJitDataOffs(fldh);
- if (doff >= 0)
+ if (doff >= 0)
{
addr = emitConsBlock + doff;
int byteSize = EA_SIZE_IN_BYTES(size);
#ifndef LEGACY_BACKEND
- // this instruction has a fixed size (4) src.
+ // this instruction has a fixed size (4) src.
if (ins == INS_cvttss2si || ins == INS_cvtss2sd || ins == INS_vbroadcastss)
+ {
byteSize = 4;
+ }
// This has a fixed size (8) source.
if (ins == INS_vbroadcastsd)
+ {
byteSize = 8;
+ }
#endif // !LEGACY_BACKEND
// Check that the offset is properly aligned (i.e. the ddd in [ddd])
- assert((emitChkAlign==false) || (ins == INS_lea)
- || (((size_t)addr & (byteSize - 1)) == 0));
+ assert((emitChkAlign == false) || (ins == INS_lea) || (((size_t)addr & (byteSize - 1)) == 0));
}
else
{
@@ -8928,7 +8826,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
if (addc)
{
- // It is of the form "ins [disp], immed"
+ // It is of the form "ins [disp], immed"
// For emitting relocation, we also need to take into account of the
// additional bytes of code emitted for immed val.
@@ -8937,21 +8835,27 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
#ifdef _TARGET_AMD64_
// all these opcodes only take a sign-extended 4-byte immediate
noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
-#else
+#else
noway_assert(opsz <= 4);
#endif
switch (opsz)
{
- case 0:
- case 4:
- case 8: addlDelta = -4; break;
- case 2: addlDelta = -2; break;
- case 1: addlDelta = -1; break;
+ case 0:
+ case 4:
+ case 8:
+ addlDelta = -4;
+ break;
+ case 2:
+ addlDelta = -2;
+ break;
+ case 1:
+ addlDelta = -1;
+ break;
- default:
- assert(!"unexpected operand size");
- unreached();
+ default:
+ assert(!"unexpected operand size");
+ unreached();
}
}
@@ -8959,7 +8863,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
// All static field and data section constant accesses should be marked as relocatable
noway_assert(id->idIsDspReloc());
dst += emitOutputLong(dst, 0);
-#else //_TARGET_X86_
+#else //_TARGET_X86_
dst += emitOutputLong(dst, (int)target);
#endif //_TARGET_X86_
@@ -8992,25 +8896,31 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
// Now generate the constant value, if present
- if (addc)
+ if (addc)
{
ssize_t cval = addc->cnsVal;
#ifdef _TARGET_AMD64_
// all these opcodes only take a sign-extended 4-byte immediate
- noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
+ noway_assert(opsz < 8 || ((int)cval == cval && !addc->cnsReloc));
#endif
switch (opsz)
{
- case 0:
- case 4:
- case 8: dst += emitOutputLong(dst, cval); break;
- case 2: dst += emitOutputWord(dst, cval); break;
- case 1: dst += emitOutputByte(dst, cval); break;
+ case 0:
+ case 4:
+ case 8:
+ dst += emitOutputLong(dst, cval);
+ break;
+ case 2:
+ dst += emitOutputWord(dst, cval);
+ break;
+ case 1:
+ dst += emitOutputByte(dst, cval);
+ break;
- default:
- assert(!"unexpected operand size");
+ default:
+ assert(!"unexpected operand size");
}
#ifdef RELOC_SUPPORT
if (addc->cnsReloc)
@@ -9022,46 +8932,46 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
// Does this instruction operate on a GC ref value?
- if (id->idGCref())
+ if (id->idGCref())
{
switch (id->idInsFmt())
{
- case IF_MRD:
- case IF_MRW:
- case IF_MWR:
- break;
+ case IF_MRD:
+ case IF_MRW:
+ case IF_MWR:
+ break;
- case IF_RRD_MRD:
- break;
+ case IF_RRD_MRD:
+ break;
- case IF_RWR_MRD:
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ case IF_RWR_MRD:
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case IF_MRD_RRD:
- case IF_MWR_RRD:
- case IF_MRW_RRD:
- break;
+ case IF_MRD_RRD:
+ case IF_MWR_RRD:
+ case IF_MRW_RRD:
+ break;
- case IF_MRD_CNS:
- case IF_MWR_CNS:
- case IF_MRW_CNS:
- break;
+ case IF_MRD_CNS:
+ case IF_MWR_CNS:
+ case IF_MRW_CNS:
+ break;
- case IF_RRW_MRD:
+ case IF_RRW_MRD:
- assert(id->idGCref() == GCT_BYREF);
- assert(ins == INS_add || ins == INS_sub);
+ assert(id->idGCref() == GCT_BYREF);
+ assert(ins == INS_add || ins == INS_sub);
- // Mark it as holding a GCT_BYREF
- emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
- break;
+ // Mark it as holding a GCT_BYREF
+ emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
}
else
@@ -9073,11 +8983,11 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
{
switch (id->idInsFmt())
{
- case IF_RWR_MRD:
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
- default:
- break;
+ case IF_RWR_MRD:
+ emitGCregDeadUpd(id->idReg1(), dst);
+ break;
+ default:
+ break;
}
if (ins == INS_mulEAX || ins == INS_imulEAX)
@@ -9097,7 +9007,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
}
}
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -9105,185 +9015,189 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal*
* Output an instruction with one register operand.
*/
-BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id)
+BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id)
{
- size_t code;
+ size_t code;
- instruction ins = id->idIns();
- regNumber reg = id->idReg1();
- emitAttr size = id->idOpSize();
+ instruction ins = id->idIns();
+ regNumber reg = id->idReg1();
+ emitAttr size = id->idOpSize();
// We would to update GC info correctly
- assert(!IsSSE2Instruction(ins));
- assert(!IsAVXInstruction(ins));
+ assert(!IsSSE2Instruction(ins));
+ assert(!IsAVXInstruction(ins));
- // Get the 'base' opcode
+ // Get the 'base' opcode
switch (ins)
{
- case INS_inc:
- case INS_dec:
+ case INS_inc:
+ case INS_dec:
#ifdef _TARGET_AMD64_
- if (true)
+ if (true)
#else
- if (size == EA_1BYTE)
+ if (size == EA_1BYTE)
#endif
- {
- assert(INS_inc_l == INS_inc + 1);
- assert(INS_dec_l == INS_dec + 1);
-
- // Can't use the compact form, use the long form
- ins = (instruction)(ins + 1);
- if (size == EA_2BYTE)
{
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
- }
+ assert(INS_inc_l == INS_inc + 1);
+ assert(INS_dec_l == INS_dec + 1);
- code = insCodeRR(ins);
- if (size != EA_1BYTE)
- {
- // Set the 'w' bit to get the large version
- code |= 0x1;
- }
+ // Can't use the compact form, use the long form
+ ins = (instruction)(ins + 1);
+ if (size == EA_2BYTE)
+ {
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ }
- if (TakesRexWPrefix(ins, size))
- {
- code = AddRexWPrefix(ins, code);
- }
+ code = insCodeRR(ins);
+ if (size != EA_1BYTE)
+ {
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
+ }
- // Register...
- unsigned regcode = insEncodeReg012(ins, reg, size, &code);
+ if (TakesRexWPrefix(ins, size))
+ {
+ code = AddRexWPrefix(ins, code);
+ }
- // Output the REX prefix
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ // Register...
+ unsigned regcode = insEncodeReg012(ins, reg, size, &code);
- dst += emitOutputWord(dst, code | (regcode << 8));
- }
- else
- {
- if (size == EA_2BYTE)
+ // Output the REX prefix
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+
+ dst += emitOutputWord(dst, code | (regcode << 8));
+ }
+ else
{
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
+ if (size == EA_2BYTE)
+ {
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ }
+ dst += emitOutputByte(dst, insCodeRR(ins) | insEncodeReg012(ins, reg, size, nullptr));
}
- dst += emitOutputByte(dst, insCodeRR(ins ) | insEncodeReg012(ins, reg, size, NULL));
- }
- break;
+ break;
- case INS_pop:
- case INS_pop_hide:
- case INS_push:
- case INS_push_hide:
+ case INS_pop:
+ case INS_pop_hide:
+ case INS_push:
+ case INS_push_hide:
- assert(size == EA_PTRSIZE);
- code = insEncodeOpreg(ins, reg, size);
+ assert(size == EA_PTRSIZE);
+ code = insEncodeOpreg(ins, reg, size);
- assert(!TakesVexPrefix(ins));
- assert(!TakesRexWPrefix(ins, size));
+ assert(!TakesVexPrefix(ins));
+ assert(!TakesRexWPrefix(ins, size));
- // Output the REX prefix
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ // Output the REX prefix
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- dst += emitOutputByte(dst, code);
- break;
+ dst += emitOutputByte(dst, code);
+ break;
- case INS_seto:
- case INS_setno:
- case INS_setb:
- case INS_setae:
- case INS_sete:
- case INS_setne:
- case INS_setbe:
- case INS_seta:
- case INS_sets:
- case INS_setns:
- case INS_setpe:
- case INS_setpo:
- case INS_setl:
- case INS_setge:
- case INS_setle:
- case INS_setg:
+ case INS_seto:
+ case INS_setno:
+ case INS_setb:
+ case INS_setae:
+ case INS_sete:
+ case INS_setne:
+ case INS_setbe:
+ case INS_seta:
+ case INS_sets:
+ case INS_setns:
+ case INS_setpe:
+ case INS_setpo:
+ case INS_setl:
+ case INS_setge:
+ case INS_setle:
+ case INS_setg:
+
+ assert(id->idGCref() == GCT_NONE);
+ assert(size == EA_1BYTE);
+
+ code = insEncodeMRreg(ins, reg, EA_1BYTE, insCodeMR(ins));
- assert(id->idGCref() == GCT_NONE);
- assert(size == EA_1BYTE);
+ // Output the REX prefix
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- code = insEncodeMRreg(ins, reg, EA_1BYTE, insCodeMR(ins));
+ // We expect this to always be a 'big' opcode
+ assert(code & 0x00FF0000);
- // Output the REX prefix
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ dst += emitOutputByte(dst, code >> 16);
+ dst += emitOutputWord(dst, code & 0x0000FFFF);
- // We expect this to always be a 'big' opcode
- assert(code & 0x00FF0000);
+ break;
- dst += emitOutputByte(dst, code >> 16);
- dst += emitOutputWord(dst, code & 0x0000FFFF);
+ case INS_mulEAX:
+ case INS_imulEAX:
- break;
+ // Kill off any GC refs in EAX or EDX
+ emitGCregDeadUpd(REG_EAX, dst);
+ emitGCregDeadUpd(REG_EDX, dst);
- case INS_mulEAX:
- case INS_imulEAX:
+ __fallthrough;
- // Kill off any GC refs in EAX or EDX
- emitGCregDeadUpd(REG_EAX, dst);
- emitGCregDeadUpd(REG_EDX, dst);
+ default:
- __fallthrough;
+ assert(id->idGCref() == GCT_NONE);
- default:
+ code = insEncodeMRreg(ins, reg, size, insCodeMR(ins));
- assert(id->idGCref() == GCT_NONE);
+ if (size != EA_1BYTE)
+ {
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
- code = insEncodeMRreg(ins, reg, size, insCodeMR(ins));
+ if (size == EA_2BYTE)
+ {
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ }
+ }
- if (size != EA_1BYTE)
- {
- // Set the 'w' bit to get the large version
- code |= 0x1;
+ code = AddVexPrefixIfNeeded(ins, code, size);
- if (size == EA_2BYTE)
+ if (TakesRexWPrefix(ins, size))
{
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
+ code = AddRexWPrefix(ins, code);
}
- }
-
- code = AddVexPrefixIfNeeded(ins, code, size);
-
- if (TakesRexWPrefix(ins, size))
- {
- code = AddRexWPrefix(ins, code);
- }
- // Output the REX prefix
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ // Output the REX prefix
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- dst += emitOutputWord(dst, code);
- break;
+ dst += emitOutputWord(dst, code);
+ break;
}
// Are we writing the register? if so then update the GC information
switch (id->idInsFmt())
{
- case IF_RRD:
- break;
- case IF_RWR:
- if (id->idGCref())
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- else
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
- case IF_RRW:
+ case IF_RRD:
+ break;
+ case IF_RWR:
+ if (id->idGCref())
+ {
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ }
+ else
+ {
+ emitGCregDeadUpd(id->idReg1(), dst);
+ }
+ break;
+ case IF_RRW:
{
#ifdef DEBUG
regMaskTP regMask = genRegMask(reg);
#endif
- if (id->idGCref())
+ if (id->idGCref())
{
// The reg must currently be holding either a gcref or a byref
// and the instruction must be inc or dec
assert(((emitThisGCrefRegs | emitThisByrefRegs) & regMask) &&
- (ins == INS_inc || ins == INS_dec || ins == INS_inc_l || ins == INS_dec_l));
+ (ins == INS_inc || ins == INS_dec || ins == INS_inc_l || ins == INS_dec_l));
assert(id->idGCref() == GCT_BYREF);
// Mark it as holding a GCT_BYREF
emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
@@ -9296,15 +9210,15 @@ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id)
}
}
break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected instruction format");
- break;
+ assert(!"unexpected instruction format");
+ break;
}
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -9312,14 +9226,14 @@ BYTE* emitter::emitOutputR(BYTE* dst, instrDesc* id)
* Output an instruction with two register operands.
*/
-BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
+BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
{
- size_t code;
+ size_t code;
- instruction ins = id->idIns();
- regNumber reg1 = id->idReg1();
- regNumber reg2 = id->idReg2();
- emitAttr size = id->idOpSize();
+ instruction ins = id->idIns();
+ regNumber reg1 = id->idReg1();
+ regNumber reg2 = id->idReg2();
+ emitAttr size = id->idOpSize();
// Get the 'base' opcode
code = insCodeRM(ins);
@@ -9361,42 +9275,41 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
switch (size)
{
- case EA_1BYTE:
- noway_assert(RBM_BYTE_REGS & genRegMask(reg1));
- noway_assert(RBM_BYTE_REGS & genRegMask(reg2));
- break;
+ case EA_1BYTE:
+ noway_assert(RBM_BYTE_REGS & genRegMask(reg1));
+ noway_assert(RBM_BYTE_REGS & genRegMask(reg2));
+ break;
- case EA_2BYTE:
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
- __fallthrough;
+ case EA_2BYTE:
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ __fallthrough;
- case EA_4BYTE:
- // Set the 'w' bit to get the large version
- code |= 0x1;
- break;
+ case EA_4BYTE:
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
+ break;
#ifdef _TARGET_AMD64_
- case EA_8BYTE:
- // TODO-AMD64-CQ: Better way to not emit REX.W when we don't need it
- // Don't need to zero out the high bits explicitly
- if ((ins != INS_xor) || (reg1 != reg2))
- {
- code = AddRexWPrefix(ins, code);
- }
+ case EA_8BYTE:
+ // TODO-AMD64-CQ: Better way to not emit REX.W when we don't need it
+ // Don't need to zero out the high bits explicitly
+ if ((ins != INS_xor) || (reg1 != reg2))
+ {
+ code = AddRexWPrefix(ins, code);
+ }
- // Set the 'w' bit to get the large version
- code |= 0x1;
- break;
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
+ break;
#endif // _TARGET_AMD64_
- default:
- assert(!"unexpected size");
+ default:
+ assert(!"unexpected size");
}
}
-
unsigned regCode = insEncodeReg345(ins, reg1, size, &code);
regCode |= insEncodeReg012(ins, reg2, size, &code);
@@ -9425,11 +9338,13 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
if (code & 0xFF000000)
{
// Output the highest word of the opcode
- dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
}
- else if(code & 0x00FF0000)
+ else if (code & 0x00FF0000)
{
- dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
}
// If byte 4 is 0xC0, then it contains the Mod/RM encoding for a 3-byte
@@ -9462,133 +9377,139 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
}
// Does this instruction operate on a GC ref value?
- if (id->idGCref())
+ if (id->idGCref())
{
switch (id->idInsFmt())
{
- case IF_RRD_RRD:
- break;
-
- case IF_RWR_RRD:
+ case IF_RRD_RRD:
+ break;
- if (emitSyncThisObjReg != REG_NA && emitIGisInProlog(emitCurIG) &&
- reg2 == (int)REG_ARG_0)
- {
- // We're relocating "this" in the prolog
- assert(emitComp->lvaIsOriginalThisArg(0));
- assert(emitComp->lvaTable[0].lvRegister);
- assert(emitComp->lvaTable[0].lvRegNum == reg1);
+ case IF_RWR_RRD:
- if (emitFullGCinfo)
- {
- emitGCregLiveSet(id->idGCref(), genRegMask(reg1), dst, true);
- break;
- }
- else
+ if (emitSyncThisObjReg != REG_NA && emitIGisInProlog(emitCurIG) && reg2 == (int)REG_ARG_0)
{
- /* If emitFullGCinfo==false, the we don't use any
- regPtrDsc's and so explictly note the location
- of "this" in GCEncode.cpp
- */
- }
- }
+ // We're relocating "this" in the prolog
+ assert(emitComp->lvaIsOriginalThisArg(0));
+ assert(emitComp->lvaTable[0].lvRegister);
+ assert(emitComp->lvaTable[0].lvRegNum == reg1);
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ if (emitFullGCinfo)
+ {
+ emitGCregLiveSet(id->idGCref(), genRegMask(reg1), dst, true);
+ break;
+ }
+ else
+ {
+ /* If emitFullGCinfo==false, the we don't use any
+ regPtrDsc's and so explictly note the location
+ of "this" in GCEncode.cpp
+ */
+ }
+ }
- case IF_RRW_RRD:
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
+ case IF_RRW_RRD:
- switch (id->idIns())
- {
- /*
- This must be one of the following cases:
+ switch (id->idIns())
+ {
+ /*
+ This must be one of the following cases:
- xor reg, reg to assign NULL
+ xor reg, reg to assign NULL
- and r1 , r2 if (ptr1 && ptr2) ...
- or r1 , r2 if (ptr1 || ptr2) ...
+ and r1 , r2 if (ptr1 && ptr2) ...
+ or r1 , r2 if (ptr1 || ptr2) ...
- add r1 , r2 to compute a normal byref
- sub r1 , r2 to compute a strange byref (VC only)
+ add r1 , r2 to compute a normal byref
+ sub r1 , r2 to compute a strange byref (VC only)
- */
- case INS_xor:
- assert(id->idReg1() == id->idReg2());
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ */
+ case INS_xor:
+ assert(id->idReg1() == id->idReg2());
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case INS_or:
- case INS_and:
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
+ case INS_or:
+ case INS_and:
+ emitGCregDeadUpd(id->idReg1(), dst);
+ break;
- case INS_add:
- case INS_sub:
- assert(id->idGCref() == GCT_BYREF);
+ case INS_add:
+ case INS_sub:
+ assert(id->idGCref() == GCT_BYREF);
#ifdef DEBUG
- regMaskTP regMask;
- regMask = genRegMask(reg1) | genRegMask(reg2);
+ regMaskTP regMask;
+ regMask = genRegMask(reg1) | genRegMask(reg2);
- // r1/r2 could have been a GCREF as GCREF + int=BYREF
- // or BYREF+/-int=BYREF
- assert(((regMask & emitThisGCrefRegs) && (ins == INS_add )) ||
- ((regMask & emitThisByrefRegs) && (ins == INS_add || ins == INS_sub)));
+ // r1/r2 could have been a GCREF as GCREF + int=BYREF
+ // or BYREF+/-int=BYREF
+ assert(((regMask & emitThisGCrefRegs) && (ins == INS_add)) ||
+ ((regMask & emitThisByrefRegs) && (ins == INS_add || ins == INS_sub)));
#endif
- // Mark r1 as holding a byref
- emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
- break;
+ // Mark r1 as holding a byref
+ emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC reg update instruction");
- }
+ assert(!"unexpected GC reg update instruction");
+ }
- break;
+ break;
- case IF_RRW_RRW:
- // This must be "xchg reg1, reg2"
- assert(id->idIns() == INS_xchg);
+ case IF_RRW_RRW:
+ // This must be "xchg reg1, reg2"
+ assert(id->idIns() == INS_xchg);
- // If we got here, the GC-ness of the registers doesn't match, so we have to "swap" them in the GC
- // register pointer mask.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ // If we got here, the GC-ness of the registers doesn't match, so we have to "swap" them in the GC
+ // register pointer mask.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef LEGACY_BACKEND
- GCtype gc1, gc2;
+ GCtype gc1, gc2;
- gc1 = emitRegGCtype(reg1);
- gc2 = emitRegGCtype(reg2);
+ gc1 = emitRegGCtype(reg1);
+ gc2 = emitRegGCtype(reg2);
- if (gc1 != gc2)
- {
- // Kill the GC-info about the GC registers
+ if (gc1 != gc2)
+ {
+ // Kill the GC-info about the GC registers
- if (needsGC(gc1))
- emitGCregDeadUpd(reg1, dst);
+ if (needsGC(gc1))
+ {
+ emitGCregDeadUpd(reg1, dst);
+ }
- if (needsGC(gc2))
- emitGCregDeadUpd(reg2, dst);
+ if (needsGC(gc2))
+ {
+ emitGCregDeadUpd(reg2, dst);
+ }
- // Now, swap the info
+ // Now, swap the info
- if (needsGC(gc1))
- emitGCregLiveUpd(gc1, reg2, dst);
+ if (needsGC(gc1))
+ {
+ emitGCregLiveUpd(gc1, reg2, dst);
+ }
- if (needsGC(gc2))
- emitGCregLiveUpd(gc2, reg1, dst);
- }
+ if (needsGC(gc2))
+ {
+ emitGCregLiveUpd(gc2, reg1, dst);
+ }
+ }
#endif // !LEGACY_BACKEND
- break;
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
}
else
@@ -9600,54 +9521,54 @@ BYTE* emitter::emitOutputRR(BYTE* dst, instrDesc* id)
{
switch (id->idInsFmt())
{
- case IF_RRD_CNS:
- // INS_mulEAX can not be used with any of these formats
- assert(ins != INS_mulEAX && ins != INS_imulEAX);
+ case IF_RRD_CNS:
+ // INS_mulEAX can not be used with any of these formats
+ assert(ins != INS_mulEAX && ins != INS_imulEAX);
- // For the three operand imul instruction the target
- // register is encoded in the opcode
+ // For the three operand imul instruction the target
+ // register is encoded in the opcode
- if (instrIs3opImul(ins))
- {
- regNumber tgtReg = inst3opImulReg(ins);
- emitGCregDeadUpd(tgtReg, dst);
- }
- break;
+ if (instrIs3opImul(ins))
+ {
+ regNumber tgtReg = inst3opImulReg(ins);
+ emitGCregDeadUpd(tgtReg, dst);
+ }
+ break;
- case IF_RWR_RRD:
- case IF_RRW_RRD:
- // INS_movxmm2i writes to reg2.
- if (ins == INS_mov_xmm2i)
- {
- emitGCregDeadUpd(id->idReg2(), dst);
- }
- else
- {
- emitGCregDeadUpd(id->idReg1(), dst);
- }
- break;
+ case IF_RWR_RRD:
+ case IF_RRW_RRD:
+ // INS_movxmm2i writes to reg2.
+ if (ins == INS_mov_xmm2i)
+ {
+ emitGCregDeadUpd(id->idReg2(), dst);
+ }
+ else
+ {
+ emitGCregDeadUpd(id->idReg1(), dst);
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
}
}
- return dst;
+ return dst;
}
#ifdef FEATURE_AVX_SUPPORT
-BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id)
+BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id)
{
- size_t code;
+ size_t code;
- instruction ins = id->idIns();
+ instruction ins = id->idIns();
assert(IsAVXInstruction(ins));
assert(IsThreeOperandAVXInstruction(ins));
- regNumber targetReg = id->idReg1();
- regNumber src1 = id->idReg2();
- regNumber src2 = id->idReg3();
- emitAttr size = id->idOpSize();
+ regNumber targetReg = id->idReg1();
+ regNumber src1 = id->idReg2();
+ regNumber src2 = id->idReg3();
+ emitAttr size = id->idOpSize();
code = insCodeRM(ins);
code = AddVexPrefixIfNeeded(ins, code, size);
@@ -9670,11 +9591,13 @@ BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id)
if (code & 0xFF000000)
{
// Output the highest word of the opcode
- dst += emitOutputWord(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
}
else if (code & 0x00FF0000)
{
- dst += emitOutputByte(dst, code >> 16); code &= 0x0000FFFF;
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
}
// If byte 4 is 0xC0, then it contains the Mod/RM encoding for a 3-byte
@@ -9712,25 +9635,24 @@ BYTE* emitter::emitOutputRRR(BYTE* dst, instrDesc* id)
}
#endif
-
/*****************************************************************************
*
* Output an instruction with a register and constant operands.
*/
-BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
+BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
{
- size_t code;
- emitAttr size = id->idOpSize();
- instruction ins = id->idIns();
- regNumber reg = id->idReg1();
- ssize_t val = emitGetInsSC(id);
- bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test);
+ size_t code;
+ emitAttr size = id->idOpSize();
+ instruction ins = id->idIns();
+ regNumber reg = id->idReg1();
+ ssize_t val = emitGetInsSC(id);
+ bool valInByte = ((signed char)val == val) && (ins != INS_mov) && (ins != INS_test);
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
{
- valInByte = false; // relocs can't be placed in a byte
+ valInByte = false; // relocs can't be placed in a byte
}
#endif
@@ -9754,7 +9676,7 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
{
// The 'vvvv' bits encode the destination register, which for this case (RI)
// is the same as the source.
- code = insEncodeReg3456(ins, reg, size, code);
+ code = insEncodeReg3456(ins, reg, size, code);
}
// In case of psrldq
@@ -9764,16 +9686,20 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
// In case of pslldq
// Reg/Opcode = 7
// R/M = reg1
- regNumber regOpcode = (regNumber) ((ins == INS_psrldq) ? 3 : 7);
+ regNumber regOpcode = (regNumber)((ins == INS_psrldq) ? 3 : 7);
unsigned regcode = (insEncodeReg345(ins, regOpcode, size, &code) | insEncodeReg012(ins, reg, size, &code)) << 8;
// Output the REX prefix
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- if (code & 0xFF000000)
+ if (code & 0xFF000000)
+ {
dst += emitOutputWord(dst, code >> 16);
+ }
else if (code & 0xFF0000)
+ {
dst += emitOutputByte(dst, code >> 16);
+ }
dst += emitOutputWord(dst, code | regcode);
@@ -9784,12 +9710,12 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
#endif // !LEGACY_BACKEND
// The 'mov' opcode is special
- if (ins == INS_mov)
+ if (ins == INS_mov)
{
code = insCodeACC(ins);
assert(code < 0x100);
- code |= 0x08; // Set the 'w' bit
+ code |= 0x08; // Set the 'w' bit
unsigned regcode = insEncodeReg012(ins, reg, size, &code);
code |= regcode;
@@ -9827,46 +9753,56 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
}
// Decide which encoding is the shortest
- bool useSigned, useACC;
+ bool useSigned, useACC;
if (reg == REG_EAX && !instrIs3opImul(ins))
{
- if (size == EA_1BYTE || (ins == INS_test))
- {
- // For al, ACC encoding is always the smallest
- useSigned = false; useACC = true;
- }
- else
- {
- /* For ax/eax, we avoid ACC encoding for small constants as we
- * can emit the small constant and have it sign-extended.
- * For big constants, the ACC encoding is better as we can use
- * the 1 byte opcode
- */
-
- if (valInByte)
- {
- // avoid using ACC encoding
- useSigned = true; useACC = false;
- }
- else
- {
- useSigned = false; useACC = true;
- }
- }
+ if (size == EA_1BYTE || (ins == INS_test))
+ {
+ // For al, ACC encoding is always the smallest
+ useSigned = false;
+ useACC = true;
+ }
+ else
+ {
+ /* For ax/eax, we avoid ACC encoding for small constants as we
+ * can emit the small constant and have it sign-extended.
+ * For big constants, the ACC encoding is better as we can use
+ * the 1 byte opcode
+ */
+
+ if (valInByte)
+ {
+ // avoid using ACC encoding
+ useSigned = true;
+ useACC = false;
+ }
+ else
+ {
+ useSigned = false;
+ useACC = true;
+ }
+ }
}
else
{
useACC = false;
if (valInByte)
+ {
useSigned = true;
+ }
else
+ {
useSigned = false;
+ }
}
// "test" has no 's' bit
- if (ins == INS_test) useSigned = false;
+ if (ins == INS_test)
+ {
+ useSigned = false;
+ }
// Get the 'base' opcode
if (useACC)
@@ -9894,65 +9830,78 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
switch (size)
{
- case EA_1BYTE:
- break;
+ case EA_1BYTE:
+ break;
- case EA_2BYTE:
- // Output a size prefix for a 16-bit operand
- dst += emitOutputByte(dst, 0x66);
- __fallthrough;
+ case EA_2BYTE:
+ // Output a size prefix for a 16-bit operand
+ dst += emitOutputByte(dst, 0x66);
+ __fallthrough;
- case EA_4BYTE:
- // Set the 'w' bit to get the large version
- code |= 0x1;
- break;
+ case EA_4BYTE:
+ // Set the 'w' bit to get the large version
+ code |= 0x1;
+ break;
#ifdef _TARGET_AMD64_
- case EA_8BYTE:
- /* Set the 'w' bit to get the large version */
- /* and the REX.W bit to get the really large version */
+ case EA_8BYTE:
+ /* Set the 'w' bit to get the large version */
+ /* and the REX.W bit to get the really large version */
- code = AddRexWPrefix(ins, code);
- code |= 0x1;
- break;
+ code = AddRexWPrefix(ins, code);
+ code |= 0x1;
+ break;
#endif
- default:
- assert(!"unexpected size");
+ default:
+ assert(!"unexpected size");
}
// Output the REX prefix
dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- // Does the value fit in a sign-extended byte?
+ // Does the value fit in a sign-extended byte?
// Important! Only set the 's' bit when we have a size larger than EA_1BYTE.
// Note: A sign-extending immediate when (size == EA_1BYTE) is invalid in 64-bit mode.
- if (useSigned && (size > EA_1BYTE))
+ if (useSigned && (size > EA_1BYTE))
{
// We can just set the 's' bit, and issue an immediate byte
- code |= 0x2; // Set the 's' bit to use a sign-extended immediate byte.
+ code |= 0x2; // Set the 's' bit to use a sign-extended immediate byte.
dst += emitOutputWord(dst, code);
dst += emitOutputByte(dst, val);
}
else
{
// Can we use an accumulator (EAX) encoding?
- if (useACC)
+ if (useACC)
+ {
dst += emitOutputByte(dst, code);
+ }
else
+ {
dst += emitOutputWord(dst, code);
+ }
switch (size)
{
- case EA_1BYTE: dst += emitOutputByte(dst, val); break;
- case EA_2BYTE: dst += emitOutputWord(dst, val); break;
- case EA_4BYTE: dst += emitOutputLong(dst, val); break;
+ case EA_1BYTE:
+ dst += emitOutputByte(dst, val);
+ break;
+ case EA_2BYTE:
+ dst += emitOutputWord(dst, val);
+ break;
+ case EA_4BYTE:
+ dst += emitOutputLong(dst, val);
+ break;
#ifdef _TARGET_AMD64_
- case EA_8BYTE: dst += emitOutputLong(dst, val); break;
+ case EA_8BYTE:
+ dst += emitOutputLong(dst, val);
+ break;
#endif // _TARGET_AMD64_
- default: break;
+ default:
+ break;
}
#ifdef RELOC_SUPPORT
@@ -9967,41 +9916,45 @@ BYTE* emitter::emitOutputRI(BYTE* dst, instrDesc* id)
DONE:
// Does this instruction operate on a GC ref value?
- if (id->idGCref())
+ if (id->idGCref())
{
switch (id->idInsFmt())
{
- case IF_RRD_CNS:
- break;
+ case IF_RRD_CNS:
+ break;
- case IF_RWR_CNS:
- emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
- break;
+ case IF_RWR_CNS:
+ emitGCregLiveUpd(id->idGCref(), id->idReg1(), dst);
+ break;
- case IF_RRW_CNS:
- assert(id->idGCref() == GCT_BYREF);
+ case IF_RRW_CNS:
+ assert(id->idGCref() == GCT_BYREF);
#ifdef DEBUG
- regMaskTP regMask;
- regMask = genRegMask(reg);
- // FIXNOW review the other places and relax the assert there too
-
- // The reg must currently be holding either a gcref or a byref
- // GCT_GCREF+int = GCT_BYREF, and GCT_BYREF+/-int = GCT_BYREF
- if (emitThisGCrefRegs & regMask)
- assert(ins == INS_add);
- if (emitThisByrefRegs & regMask)
- assert(ins == INS_add || ins == INS_sub);
+ regMaskTP regMask;
+ regMask = genRegMask(reg);
+ // FIXNOW review the other places and relax the assert there too
+
+ // The reg must currently be holding either a gcref or a byref
+ // GCT_GCREF+int = GCT_BYREF, and GCT_BYREF+/-int = GCT_BYREF
+ if (emitThisGCrefRegs & regMask)
+ {
+ assert(ins == INS_add);
+ }
+ if (emitThisByrefRegs & regMask)
+ {
+ assert(ins == INS_add || ins == INS_sub);
+ }
#endif
- // Mark it as holding a GCT_BYREF
- emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
- break;
+ // Mark it as holding a GCT_BYREF
+ emitGCregLiveUpd(GCT_BYREF, id->idReg1(), dst);
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
// mul can never produce a GC ref
@@ -10012,34 +9965,33 @@ DONE:
{
switch (id->idInsFmt())
{
- case IF_RRD_CNS:
- // INS_mulEAX can not be used with any of these formats
- assert(ins != INS_mulEAX && ins != INS_imulEAX);
+ case IF_RRD_CNS:
+ // INS_mulEAX can not be used with any of these formats
+ assert(ins != INS_mulEAX && ins != INS_imulEAX);
- // For the three operand imul instruction the target
- // register is encoded in the opcode
+ // For the three operand imul instruction the target
+ // register is encoded in the opcode
- if (instrIs3opImul(ins))
- {
- regNumber tgtReg = inst3opImulReg(ins);
- emitGCregDeadUpd(tgtReg, dst);
- }
- break;
+ if (instrIs3opImul(ins))
+ {
+ regNumber tgtReg = inst3opImulReg(ins);
+ emitGCregDeadUpd(tgtReg, dst);
+ }
+ break;
- case IF_RRW_CNS:
- case IF_RWR_CNS:
- assert(!instrIs3opImul(ins));
+ case IF_RRW_CNS:
+ case IF_RWR_CNS:
+ assert(!instrIs3opImul(ins));
- emitGCregDeadUpd(id->idReg1(), dst);
- break;
+ emitGCregDeadUpd(id->idReg1(), dst);
+ break;
- default:
-#ifdef DEBUG
- emitDispIns(id, false, false, false);
+ default:
+#ifdef DEBUG
+ emitDispIns(id, false, false, false);
#endif
- assert(!"unexpected GC ref instruction format");
+ assert(!"unexpected GC ref instruction format");
}
-
}
return dst;
@@ -10050,17 +10002,17 @@ DONE:
* Output an instruction with a constant operand.
*/
-BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id)
+BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id)
{
- size_t code;
- instruction ins = id->idIns();
- emitAttr size = id->idOpSize();
- ssize_t val = emitGetInsSC(id);
- bool valInByte = ((signed char)val == val);
+ size_t code;
+ instruction ins = id->idIns();
+ emitAttr size = id->idOpSize();
+ ssize_t val = emitGetInsSC(id);
+ bool valInByte = ((signed char)val == val);
// We would to update GC info correctly
- assert(!IsSSE2Instruction(ins));
- assert(!IsAVXInstruction(ins));
+ assert(!IsSSE2Instruction(ins));
+ assert(!IsAVXInstruction(ins));
#ifdef _TARGET_AMD64_
// all these opcodes take a sign-extended 4-byte immediate, max
@@ -10070,7 +10022,7 @@ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id)
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
{
- valInByte = false; // relocs can't be placed in a byte
+ valInByte = false; // relocs can't be placed in a byte
// Of these instructions only the push instruction can have reloc
assert(ins == INS_push || ins == INS_push_hide);
@@ -10079,67 +10031,67 @@ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id)
switch (ins)
{
- case INS_jge:
- assert((val >= -128) && (val <= 127));
- dst += emitOutputByte(dst, insCode(ins));
- dst += emitOutputByte(dst, val);
- break;
+ case INS_jge:
+ assert((val >= -128) && (val <= 127));
+ dst += emitOutputByte(dst, insCode(ins));
+ dst += emitOutputByte(dst, val);
+ break;
- case INS_loop:
- assert((val >= -128) && (val <= 127));
- dst += emitOutputByte(dst, insCodeMI(ins));
- dst += emitOutputByte(dst, val);
- break;
+ case INS_loop:
+ assert((val >= -128) && (val <= 127));
+ dst += emitOutputByte(dst, insCodeMI(ins));
+ dst += emitOutputByte(dst, val);
+ break;
- case INS_ret:
- assert(val);
- dst += emitOutputByte(dst, insCodeMI(ins));
- dst += emitOutputWord(dst, val);
- break;
+ case INS_ret:
+ assert(val);
+ dst += emitOutputByte(dst, insCodeMI(ins));
+ dst += emitOutputWord(dst, val);
+ break;
- case INS_push_hide:
- case INS_push:
- code = insCodeMI(ins);
+ case INS_push_hide:
+ case INS_push:
+ code = insCodeMI(ins);
- // Does the operand fit in a byte?
- if (valInByte)
- {
- dst += emitOutputByte(dst, code|2);
- dst += emitOutputByte(dst, val);
- }
- else
- {
- if (TakesRexWPrefix(ins, size))
+ // Does the operand fit in a byte?
+ if (valInByte)
{
- code = AddRexWPrefix(ins, code);
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ dst += emitOutputByte(dst, code | 2);
+ dst += emitOutputByte(dst, val);
}
+ else
+ {
+ if (TakesRexWPrefix(ins, size))
+ {
+ code = AddRexWPrefix(ins, code);
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ }
- dst += emitOutputByte(dst, code);
- dst += emitOutputLong(dst, val);
+ dst += emitOutputByte(dst, code);
+ dst += emitOutputLong(dst, val);
#ifdef RELOC_SUPPORT
- if (id->idIsCnsReloc())
- {
- emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW);
- }
+ if (id->idIsCnsReloc())
+ {
+ emitRecordRelocation((void*)(dst - sizeof(INT32)), (void*)(size_t)val, IMAGE_REL_BASED_HIGHLOW);
+ }
#endif
- }
+ }
- // Did we push a GC ref value?
- if (id->idGCref())
- {
-#ifdef DEBUG
- printf("UNDONE: record GCref push [cns]\n");
+ // Did we push a GC ref value?
+ if (id->idGCref())
+ {
+#ifdef DEBUG
+ printf("UNDONE: record GCref push [cns]\n");
#endif
- }
+ }
- break;
+ break;
- default:
- assert(!"unexpected instruction");
+ default:
+ assert(!"unexpected instruction");
}
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -10149,59 +10101,56 @@ BYTE* emitter::emitOutputIV(BYTE* dst, instrDesc* id)
* needs to get bound to an actual address and processed by branch shortening.
*/
-BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
+BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
{
- unsigned srcOffs;
- unsigned dstOffs;
- ssize_t distVal;
+ unsigned srcOffs;
+ unsigned dstOffs;
+ ssize_t distVal;
- instrDescJmp* id = (instrDescJmp*)i;
- instruction ins = id->idIns();
- bool jmp;
- bool relAddr = true; // does the instruction use relative-addressing?
+ instrDescJmp* id = (instrDescJmp*)i;
+ instruction ins = id->idIns();
+ bool jmp;
+ bool relAddr = true; // does the instruction use relative-addressing?
// SSE2 doesnt make any sense here
- assert(!IsSSE2Instruction(ins));
- assert(!IsAVXInstruction(ins));
+ assert(!IsSSE2Instruction(ins));
+ assert(!IsAVXInstruction(ins));
- size_t ssz;
- size_t lsz;
+ size_t ssz;
+ size_t lsz;
switch (ins)
{
- default:
- ssz = JCC_SIZE_SMALL;
- lsz = JCC_SIZE_LARGE;
- jmp = true;
- break;
+ default:
+ ssz = JCC_SIZE_SMALL;
+ lsz = JCC_SIZE_LARGE;
+ jmp = true;
+ break;
- case INS_jmp:
- ssz = JMP_SIZE_SMALL;
- lsz = JMP_SIZE_LARGE;
- jmp = true;
- break;
+ case INS_jmp:
+ ssz = JMP_SIZE_SMALL;
+ lsz = JMP_SIZE_LARGE;
+ jmp = true;
+ break;
- case INS_call:
- ssz =
- lsz = CALL_INST_SIZE;
- jmp = false;
- break;
+ case INS_call:
+ ssz = lsz = CALL_INST_SIZE;
+ jmp = false;
+ break;
- case INS_push_hide:
- case INS_push:
- ssz =
- lsz = 5;
- jmp = false;
- relAddr = false;
- break;
+ case INS_push_hide:
+ case INS_push:
+ ssz = lsz = 5;
+ jmp = false;
+ relAddr = false;
+ break;
- case INS_mov:
- case INS_lea:
- ssz =
- lsz = id->idCodeSize();
- jmp = false;
- relAddr = false;
- break;
+ case INS_mov:
+ case INS_lea:
+ ssz = lsz = id->idCodeSize();
+ jmp = false;
+ relAddr = false;
+ break;
}
// Figure out the distance to the target
@@ -10209,22 +10158,28 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
dstOffs = id->idAddr()->iiaIGlabel->igOffs;
if (relAddr)
- distVal = (ssize_t) (emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
+ {
+ distVal = (ssize_t)(emitOffsetToPtr(dstOffs) - emitOffsetToPtr(srcOffs));
+ }
else
- distVal = (ssize_t) emitOffsetToPtr(dstOffs);
+ {
+ distVal = (ssize_t)emitOffsetToPtr(dstOffs);
+ }
- if (dstOffs <= srcOffs)
+ if (dstOffs <= srcOffs)
{
// This is a backward jump - distance is known at this point
CLANG_FORMAT_COMMENT_ANCHOR;
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[3] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ }
printf("[3] Jump block is at %08X - %02X = %08X\n", blkOffs, emitOffsAdj, blkOffs - emitOffsAdj);
printf("[3] Jump is at %08X - %02X = %08X\n", srcOffs, emitOffsAdj, srcOffs - emitOffsAdj);
printf("[3] Label block is at %08X - %02X = %08X\n", dstOffs, emitOffsAdj, dstOffs - emitOffsAdj);
@@ -10232,7 +10187,7 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
#endif
// Can we use a short jump?
- if (jmp && distVal - ssz >= (size_t)JMP_DIST_SMALL_MAX_NEG)
+ if (jmp && distVal - ssz >= (size_t)JMP_DIST_SMALL_MAX_NEG)
{
emitSetShortJump(id);
}
@@ -10240,10 +10195,10 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
else
{
// This is a forward jump - distance will be an upper limit
- emitFwdJumps = true;
+ emitFwdJumps = true;
// The target offset will be closer by at least 'emitOffsAdj', but only if this
- // jump doesn't cross the hot-cold boundary.
+ // jump doesn't cross the hot-cold boundary.
if (!emitJumpCrossHotColdBoundary(srcOffs, dstOffs))
{
dstOffs -= emitOffsAdj;
@@ -10255,23 +10210,27 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
// Are we overflowing the id->idjOffs bitfield?
if (id->idjOffs != dstOffs)
+ {
IMPL_LIMITATION("Method is too large");
+ }
-#if DEBUG_EMIT
- if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
+#if DEBUG_EMIT
+ if (id->idDebugOnlyInfo()->idNum == (unsigned)INTERESTING_JUMP_NUM || INTERESTING_JUMP_NUM == 0)
{
- size_t blkOffs = id->idjIG->igOffs;
+ size_t blkOffs = id->idjIG->igOffs;
- if (INTERESTING_JUMP_NUM == 0)
- printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
- printf("[4] Jump block is at %08X\n" , blkOffs);
- printf("[4] Jump is at %08X\n" , srcOffs);
+ if (INTERESTING_JUMP_NUM == 0)
+ {
+ printf("[4] Jump %u:\n", id->idDebugOnlyInfo()->idNum);
+ }
+ printf("[4] Jump block is at %08X\n", blkOffs);
+ printf("[4] Jump is at %08X\n", srcOffs);
printf("[4] Label block is at %08X - %02X = %08X\n", dstOffs + emitOffsAdj, emitOffsAdj, dstOffs);
}
#endif
// Can we use a short jump?
- if (jmp && distVal - ssz <= (size_t)JMP_DIST_SMALL_MAX_POS)
+ if (jmp && distVal - ssz <= (size_t)JMP_DIST_SMALL_MAX_POS)
{
emitSetShortJump(id);
}
@@ -10279,22 +10238,23 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
// Adjust the offset to emit relative to the end of the instruction
if (relAddr)
+ {
distVal -= id->idjShort ? ssz : lsz;
+ }
-#ifdef DEBUG
- if (0&&emitComp->verbose)
+#ifdef DEBUG
+ if (0 && emitComp->verbose)
{
- size_t sz = id->idjShort ?ssz:lsz;
- int distValSize = id->idjShort ? 4 : 8;
- printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n",
- (dstOffs <= srcOffs)?"Fwd":"Bwd", emitComp->dspPtr(id), id->idDebugOnlyInfo()->idNum,
- distValSize, srcOffs+sz, distValSize, dstOffs,
- distVal);
+ size_t sz = id->idjShort ? ssz : lsz;
+ int distValSize = id->idjShort ? 4 : 8;
+ printf("; %s jump [%08X/%03u] from %0*X to %0*X: dist = %08XH\n", (dstOffs <= srcOffs) ? "Fwd" : "Bwd",
+ emitComp->dspPtr(id), id->idDebugOnlyInfo()->idNum, distValSize, srcOffs + sz, distValSize, dstOffs,
+ distVal);
}
#endif
// What size jump should we use?
- if (id->idjShort)
+ if (id->idjShort)
{
// Short jump
assert(!id->idjKeepLong);
@@ -10309,9 +10269,11 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
{
emitOffsAdj += emitInstCodeSz(id) - JMP_SIZE_SMALL;
-#ifdef DEBUG
+#ifdef DEBUG
if (emitComp->verbose)
+ {
printf("; NOTE: size of jump [%08X] mis-predicted\n", emitComp->dspPtr(id));
+ }
#endif
}
@@ -10327,24 +10289,24 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
size_t code;
// Long jump
- if (jmp)
+ if (jmp)
{
assert(INS_jmp + (INS_l_jmp - INS_jmp) == INS_l_jmp);
- assert(INS_jo + (INS_l_jmp - INS_jmp) == INS_l_jo );
- assert(INS_jb + (INS_l_jmp - INS_jmp) == INS_l_jb );
+ assert(INS_jo + (INS_l_jmp - INS_jmp) == INS_l_jo);
+ assert(INS_jb + (INS_l_jmp - INS_jmp) == INS_l_jb);
assert(INS_jae + (INS_l_jmp - INS_jmp) == INS_l_jae);
- assert(INS_je + (INS_l_jmp - INS_jmp) == INS_l_je );
+ assert(INS_je + (INS_l_jmp - INS_jmp) == INS_l_je);
assert(INS_jne + (INS_l_jmp - INS_jmp) == INS_l_jne);
assert(INS_jbe + (INS_l_jmp - INS_jmp) == INS_l_jbe);
- assert(INS_ja + (INS_l_jmp - INS_jmp) == INS_l_ja );
- assert(INS_js + (INS_l_jmp - INS_jmp) == INS_l_js );
+ assert(INS_ja + (INS_l_jmp - INS_jmp) == INS_l_ja);
+ assert(INS_js + (INS_l_jmp - INS_jmp) == INS_l_js);
assert(INS_jns + (INS_l_jmp - INS_jmp) == INS_l_jns);
assert(INS_jpe + (INS_l_jmp - INS_jmp) == INS_l_jpe);
assert(INS_jpo + (INS_l_jmp - INS_jmp) == INS_l_jpo);
- assert(INS_jl + (INS_l_jmp - INS_jmp) == INS_l_jl );
+ assert(INS_jl + (INS_l_jmp - INS_jmp) == INS_l_jl);
assert(INS_jge + (INS_l_jmp - INS_jmp) == INS_l_jge);
assert(INS_jle + (INS_l_jmp - INS_jmp) == INS_l_jle);
- assert(INS_jg + (INS_l_jmp - INS_jmp) == INS_l_jg );
+ assert(INS_jg + (INS_l_jmp - INS_jmp) == INS_l_jg);
code = insCode((instruction)(ins + (INS_l_jmp - INS_jmp)));
}
@@ -10358,10 +10320,10 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
// Make it look like IF_SWR_CNS so that emitOutputSV emits the r/m32 for us
insFormat tmpInsFmt = id->idInsFmt();
insGroup* tmpIGlabel = id->idAddr()->iiaIGlabel;
- bool tmpDspReloc = id->idIsDspReloc();
+ bool tmpDspReloc = id->idIsDspReloc();
id->idInsFmt(IF_SWR_CNS);
- id->idAddr()->iiaLclVar = ((instrDescLbl*)id)->dstLclVar;
+ id->idAddr()->iiaLclVar = ((instrDescLbl*)id)->dstLclVar;
id->idSetIsDspReloc(false);
dst = emitOutputSV(dst, id, insCodeMI(ins));
@@ -10377,15 +10339,16 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
// Make an instrDesc that looks like IF_RWR_ARD so that emitOutputAM emits the r/m32 for us.
// We basically are doing what emitIns_R_AI does.
// TODO-XArch-Cleanup: revisit this.
- instrDescAmd idAmdStackLocal;
+ instrDescAmd idAmdStackLocal;
instrDescAmd* idAmd = &idAmdStackLocal;
- *(instrDesc*)idAmd = *(instrDesc*)id; // copy all the "core" fields
- memset((BYTE*)idAmd + sizeof(instrDesc), 0, sizeof(instrDescAmd) - sizeof(instrDesc)); // zero out the tail that wasn't copied
+ *(instrDesc*)idAmd = *(instrDesc*)id; // copy all the "core" fields
+ memset((BYTE*)idAmd + sizeof(instrDesc), 0,
+ sizeof(instrDescAmd) - sizeof(instrDesc)); // zero out the tail that wasn't copied
idAmd->idInsFmt(IF_RWR_ARD);
idAmd->idAddr()->iiaAddrMode.amBaseReg = REG_NA;
idAmd->idAddr()->iiaAddrMode.amIndxReg = REG_NA;
- emitSetAmdDisp(idAmd, distVal); // set the displacement
+ emitSetAmdDisp(idAmd, distVal); // set the displacement
idAmd->idSetIsDspReloc(id->idIsDspReloc());
assert(emitGetInsAmdAny(idAmd) == distVal); // make sure "disp" is stored properly
@@ -10415,12 +10378,14 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
{
dst += emitOutputByte(dst, code);
- if (code & 0xFF00)
+ if (code & 0xFF00)
+ {
dst += emitOutputByte(dst, code >> 8);
+ }
}
// For forward jumps, record the address of the distance value
- id->idjTemp.idjAddr = (dstOffs > srcOffs) ? dst : NULL;
+ id->idjTemp.idjAddr = (dstOffs > srcOffs) ? dst : nullptr;
dst += emitOutputLong(dst, distVal);
@@ -10441,10 +10406,12 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
}
// Local calls kill all registers
- if (ins == INS_call && (emitThisGCrefRegs|emitThisByrefRegs))
- emitGCregDeadUpdMask(emitThisGCrefRegs|emitThisByrefRegs, dst);
+ if (ins == INS_call && (emitThisGCrefRegs | emitThisByrefRegs))
+ {
+ emitGCregDeadUpdMask(emitThisGCrefRegs | emitThisByrefRegs, dst);
+ }
- return dst;
+ return dst;
}
/*****************************************************************************
@@ -10458,26 +10425,26 @@ BYTE* emitter::emitOutputLJ(BYTE* dst, instrDesc* i)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
+size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
{
assert(emitIssuing);
- BYTE* dst = *dp;
- size_t sz = sizeof(instrDesc);
- instruction ins = id->idIns();
- unsigned char callInstrSize = 0;
+ BYTE* dst = *dp;
+ size_t sz = sizeof(instrDesc);
+ instruction ins = id->idIns();
+ unsigned char callInstrSize = 0;
#ifdef DEBUG
- bool dspOffs = emitComp->opts.dspGCtbls;
+ bool dspOffs = emitComp->opts.dspGCtbls;
#endif // DEBUG
- emitAttr size = id->idOpSize();
+ emitAttr size = id->idOpSize();
assert(REG_NA == (int)REG_NA);
- assert(ins != INS_imul || size >= EA_4BYTE); // Has no 'w' bit
+ assert(ins != INS_imul || size >= EA_4BYTE); // Has no 'w' bit
assert(instrIs3opImul(id->idIns()) == 0 || size >= EA_4BYTE); // Has no 'w' bit
VARSET_TP VARSET_INIT_NOCOPY(GCvars, VarSetOps::UninitVal());
@@ -10485,620 +10452,653 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
// What instruction format have we got?
switch (id->idInsFmt())
{
- size_t code;
- size_t regcode;
- int args;
- CnsVal cnsVal;
+ size_t code;
+ size_t regcode;
+ int args;
+ CnsVal cnsVal;
- BYTE* addr;
- bool recCall;
+ BYTE* addr;
+ bool recCall;
- regMaskTP gcrefRegs;
- regMaskTP byrefRegs;
+ regMaskTP gcrefRegs;
+ regMaskTP byrefRegs;
/********************************************************************/
/* No operands */
/********************************************************************/
- case IF_NONE:
- // the loop alignment pseudo instruction
- if (ins == INS_align)
- {
- sz = TINY_IDSC_SIZE;
- dst = emitOutputNOP(dst, (-(int)(size_t)dst) & 0x0f);
- assert(((size_t)dst & 0x0f) == 0);
- break;
- }
+ case IF_NONE:
+ // the loop alignment pseudo instruction
+ if (ins == INS_align)
+ {
+ sz = TINY_IDSC_SIZE;
+ dst = emitOutputNOP(dst, (-(int)(size_t)dst) & 0x0f);
+ assert(((size_t)dst & 0x0f) == 0);
+ break;
+ }
- if (ins == INS_nop)
- {
- dst = emitOutputNOP(dst, id->idCodeSize());
- break;
- }
+ if (ins == INS_nop)
+ {
+ dst = emitOutputNOP(dst, id->idCodeSize());
+ break;
+ }
- // the cdq instruction kills the EDX register implicitly
- if (ins == INS_cdq)
- emitGCregDeadUpd(REG_EDX, dst);
+ // the cdq instruction kills the EDX register implicitly
+ if (ins == INS_cdq)
+ {
+ emitGCregDeadUpd(REG_EDX, dst);
+ }
- __fallthrough;
+ __fallthrough;
#if FEATURE_STACK_FP_X87
- case IF_TRD:
- case IF_TWR:
- case IF_TRW:
+ case IF_TRD:
+ case IF_TWR:
+ case IF_TRW:
#endif // FEATURE_STACK_FP_X87
- assert(id->idGCref() == GCT_NONE);
+ assert(id->idGCref() == GCT_NONE);
- code = insCodeMR(ins);
+ code = insCodeMR(ins);
#ifdef _TARGET_AMD64_
- // Support only scalar AVX instructions and hence size is hard coded to 4-byte.
- code = AddVexPrefixIfNeeded(ins, code, EA_4BYTE);
+ // Support only scalar AVX instructions and hence size is hard coded to 4-byte.
+ code = AddVexPrefixIfNeeded(ins, code, EA_4BYTE);
- if (ins == INS_cdq && TakesRexWPrefix(ins,id->idOpSize()))
- {
- code = AddRexWPrefix(ins, code);
- }
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ if (ins == INS_cdq && TakesRexWPrefix(ins, id->idOpSize()))
+ {
+ code = AddRexWPrefix(ins, code);
+ }
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
#endif
- // Is this a 'big' opcode?
- if (code & 0xFF000000)
- {
- // The high word and then the low word
- dst += emitOutputWord(dst, code >> 16);
- code &= 0x0000FFFF;
- dst += emitOutputWord(dst, code);
- }
- else if (code & 0x00FF0000)
- {
- // The high byte and then the low word
- dst += emitOutputByte(dst, code >> 16);
- code &= 0x0000FFFF;
- dst += emitOutputWord(dst, code);
- }
- else if (code & 0xFF00)
- {
- // The 2 byte opcode
- dst += emitOutputWord(dst, code);
- }
- else
- {
- // The 1 byte opcode
- dst += emitOutputByte(dst, code);
- }
+ // Is this a 'big' opcode?
+ if (code & 0xFF000000)
+ {
+ // The high word and then the low word
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code);
+ }
+ else if (code & 0x00FF0000)
+ {
+ // The high byte and then the low word
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
+ dst += emitOutputWord(dst, code);
+ }
+ else if (code & 0xFF00)
+ {
+ // The 2 byte opcode
+ dst += emitOutputWord(dst, code);
+ }
+ else
+ {
+ // The 1 byte opcode
+ dst += emitOutputByte(dst, code);
+ }
- break;
+ break;
/********************************************************************/
/* Simple constant, local label, method */
/********************************************************************/
- case IF_CNS:
- dst = emitOutputIV(dst, id);
- sz = emitSizeOfInsDsc(id);
- break;
-
- case IF_LABEL:
- case IF_RWR_LABEL:
- case IF_SWR_LABEL:
- assert(id->idGCref() == GCT_NONE);
- assert(id->idIsBound());
-
- // TODO-XArch-Cleanup: handle IF_RWR_LABEL in emitOutputLJ() or change it to emitOutputAM()?
- dst = emitOutputLJ(dst, id);
- sz = (id->idInsFmt() == IF_SWR_LABEL ? sizeof(instrDescLbl) : sizeof(instrDescJmp));
- break;
+ case IF_CNS:
+ dst = emitOutputIV(dst, id);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_METHOD:
- case IF_METHPTR:
- // Assume we'll be recording this call
- recCall = true;
+ case IF_LABEL:
+ case IF_RWR_LABEL:
+ case IF_SWR_LABEL:
+ assert(id->idGCref() == GCT_NONE);
+ assert(id->idIsBound());
- // Get hold of the argument count and field Handle
- args = emitGetInsCDinfo(id);
+ // TODO-XArch-Cleanup: handle IF_RWR_LABEL in emitOutputLJ() or change it to emitOutputAM()?
+ dst = emitOutputLJ(dst, id);
+ sz = (id->idInsFmt() == IF_SWR_LABEL ? sizeof(instrDescLbl) : sizeof(instrDescJmp));
+ break;
- // Is this a "fat" call descriptor?
- if (id->idIsLargeCall())
- {
- instrDescCGCA* idCall = (instrDescCGCA*) id;
- gcrefRegs = idCall->idcGcrefRegs;
- byrefRegs = idCall->idcByrefRegs;
- VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
- sz = sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
+ case IF_METHOD:
+ case IF_METHPTR:
+ // Assume we'll be recording this call
+ recCall = true;
- gcrefRegs = emitDecodeCallGCregs(id);
- byrefRegs = 0;
- VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
- sz = sizeof(instrDesc);
- }
+ // Get hold of the argument count and field Handle
+ args = emitGetInsCDinfo(id);
- addr = (BYTE*)id->idAddr()->iiaAddr;
- assert(addr != nullptr);
+ // Is this a "fat" call descriptor?
+ if (id->idIsLargeCall())
+ {
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
+ gcrefRegs = idCall->idcGcrefRegs;
+ byrefRegs = idCall->idcByrefRegs;
+ VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
+ sz = sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
- // Some helpers don't get recorded in GC tables
- if (id->idIsNoGC())
- {
- recCall = false;
- }
+ gcrefRegs = emitDecodeCallGCregs(id);
+ byrefRegs = 0;
+ VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
+ sz = sizeof(instrDesc);
+ }
- // What kind of a call do we have here?
- if (id->idInsFmt() == IF_METHPTR)
- {
- // This is call indirect via a method pointer
+ addr = (BYTE*)id->idAddr()->iiaAddr;
+ assert(addr != nullptr);
- code = insCodeMR(ins);
- if (ins == INS_i_jmp)
+ // Some helpers don't get recorded in GC tables
+ if (id->idIsNoGC())
{
- code |= 1;
+ recCall = false;
}
- if (id->idIsDspReloc())
+ // What kind of a call do we have here?
+ if (id->idInsFmt() == IF_METHPTR)
{
- dst += emitOutputWord(dst, code | 0x0500);
+ // This is call indirect via a method pointer
+
+ code = insCodeMR(ins);
+ if (ins == INS_i_jmp)
+ {
+ code |= 1;
+ }
+
+ if (id->idIsDspReloc())
+ {
+ dst += emitOutputWord(dst, code | 0x0500);
#ifdef _TARGET_AMD64_
- dst += emitOutputLong(dst, 0);
+ dst += emitOutputLong(dst, 0);
#else
- dst += emitOutputLong(dst, (int)addr);
+ dst += emitOutputLong(dst, (int)addr);
#endif
- emitRecordRelocation((void*)(dst - sizeof(int)), addr, IMAGE_REL_BASED_DISP32);
- }
- else
- {
+ emitRecordRelocation((void*)(dst - sizeof(int)), addr, IMAGE_REL_BASED_DISP32);
+ }
+ else
+ {
#ifdef _TARGET_X86_
- dst += emitOutputWord(dst, code | 0x0500);
-#else //_TARGET_AMD64_
- // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero.
- // This addr mode should never be used while generating relocatable ngen code nor if
- // the addr can be encoded as pc-relative address.
- noway_assert(!emitComp->opts.compReloc);
- noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32);
- noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (ssize_t)addr);
-
- // This requires, specifying a SIB byte after ModRM byte.
- dst += emitOutputWord(dst, code | 0x0400);
- dst += emitOutputByte(dst, 0x25);
+ dst += emitOutputWord(dst, code | 0x0500);
+#else //_TARGET_AMD64_
+ // Amd64: addr fits within 32-bits and can be encoded as a displacement relative to zero.
+ // This addr mode should never be used while generating relocatable ngen code nor if
+ // the addr can be encoded as pc-relative address.
+ noway_assert(!emitComp->opts.compReloc);
+ noway_assert(codeGen->genAddrRelocTypeHint((size_t)addr) != IMAGE_REL_BASED_REL32);
+ noway_assert(static_cast<int>(reinterpret_cast<intptr_t>(addr)) == (ssize_t)addr);
+
+ // This requires, specifying a SIB byte after ModRM byte.
+ dst += emitOutputWord(dst, code | 0x0400);
+ dst += emitOutputByte(dst, 0x25);
#endif //_TARGET_AMD64_
- dst += emitOutputLong(dst, static_cast<int>(reinterpret_cast<intptr_t>(addr)));
+ dst += emitOutputLong(dst, static_cast<int>(reinterpret_cast<intptr_t>(addr)));
+ }
+ goto DONE_CALL;
}
- goto DONE_CALL;
- }
- // Else
- // This is call direct where we know the target, thus we can
- // use a direct call; the target to jump to is in iiaAddr.
- assert(id->idInsFmt() == IF_METHOD);
+ // Else
+ // This is call direct where we know the target, thus we can
+ // use a direct call; the target to jump to is in iiaAddr.
+ assert(id->idInsFmt() == IF_METHOD);
- // Output the call opcode followed by the target distance
- dst += (ins == INS_l_jmp) ? emitOutputByte(dst, insCode(ins)) : emitOutputByte(dst, insCodeMI(ins));
+ // Output the call opcode followed by the target distance
+ dst += (ins == INS_l_jmp) ? emitOutputByte(dst, insCode(ins)) : emitOutputByte(dst, insCodeMI(ins));
- ssize_t offset;
+ ssize_t offset;
#ifdef _TARGET_AMD64_
- // All REL32 on Amd64 go through recordRelocation. Here we will output zero to advance dst.
- offset = 0;
- assert(id->idIsDspReloc());
+ // All REL32 on Amd64 go through recordRelocation. Here we will output zero to advance dst.
+ offset = 0;
+ assert(id->idIsDspReloc());
#else
- // Calculate PC relative displacement.
- // Although you think we should be using sizeof(void*), the x86 and x64 instruction set
- // only allow a 32-bit offset, so we correctly use sizeof(INT32)
- offset = addr - (dst + sizeof(INT32));
+ // Calculate PC relative displacement.
+ // Although you think we should be using sizeof(void*), the x86 and x64 instruction set
+ // only allow a 32-bit offset, so we correctly use sizeof(INT32)
+ offset = addr - (dst + sizeof(INT32));
#endif
- dst += emitOutputLong(dst, offset);
+ dst += emitOutputLong(dst, offset);
#ifdef RELOC_SUPPORT
- if (id->idIsDspReloc())
- {
- emitRecordRelocation((void*)(dst - sizeof(INT32)), addr, IMAGE_REL_BASED_REL32);
- }
+ if (id->idIsDspReloc())
+ {
+ emitRecordRelocation((void*)(dst - sizeof(INT32)), addr, IMAGE_REL_BASED_REL32);
+ }
#endif
- DONE_CALL:
+ DONE_CALL:
- /* We update the GC info before the call as the variables cannot be
- used by the call. Killing variables before the call helps with
- boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029.
- If we ever track aliased variables (which could be used by the
- call), we would have to keep them alive past the call.
- */
- assert(FitsIn<unsigned char>(dst - *dp));
- callInstrSize = static_cast<unsigned char>(dst - *dp);
- emitUpdateLiveGCvars(GCvars, *dp);
+ /* We update the GC info before the call as the variables cannot be
+ used by the call. Killing variables before the call helps with
+ boundary conditions if the call is CORINFO_HELP_THROW - see bug 50029.
+ If we ever track aliased variables (which could be used by the
+ call), we would have to keep them alive past the call.
+ */
+ assert(FitsIn<unsigned char>(dst - *dp));
+ callInstrSize = static_cast<unsigned char>(dst - *dp);
+ emitUpdateLiveGCvars(GCvars, *dp);
- // If the method returns a GC ref, mark EAX appropriately
- if (id->idGCref() == GCT_GCREF)
- {
- gcrefRegs |= RBM_EAX;
- }
- else if (id->idGCref() == GCT_BYREF)
- {
- byrefRegs |= RBM_EAX;
- }
-
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- // If is a multi-register return method is called, mark RDX appropriately (for System V AMD64).
- if (id->idIsLargeCall())
- {
- instrDescCGCA* idCall = (instrDescCGCA*)id;
- if (idCall->idSecondGCref() == GCT_GCREF)
+ // If the method returns a GC ref, mark EAX appropriately
+ if (id->idGCref() == GCT_GCREF)
{
- gcrefRegs |= RBM_RDX;
+ gcrefRegs |= RBM_EAX;
}
- else if (idCall->idSecondGCref() == GCT_BYREF)
+ else if (id->idGCref() == GCT_BYREF)
{
- byrefRegs |= RBM_RDX;
+ byrefRegs |= RBM_EAX;
}
- }
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- // If the GC register set has changed, report the new set
- if (gcrefRegs != emitThisGCrefRegs)
- emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst);
+#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+ // If is a multi-register return method is called, mark RDX appropriately (for System V AMD64).
+ if (id->idIsLargeCall())
+ {
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
+ if (idCall->idSecondGCref() == GCT_GCREF)
+ {
+ gcrefRegs |= RBM_RDX;
+ }
+ else if (idCall->idSecondGCref() == GCT_BYREF)
+ {
+ byrefRegs |= RBM_RDX;
+ }
+ }
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- if (byrefRegs != emitThisByrefRegs)
- emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst);
+ // If the GC register set has changed, report the new set
+ if (gcrefRegs != emitThisGCrefRegs)
+ {
+ emitUpdateLiveGCregs(GCT_GCREF, gcrefRegs, dst);
+ }
- if (recCall || args)
- {
- // For callee-pop, all arguments will be popped after the call.
- // For caller-pop, any GC arguments will go dead after the call.
+ if (byrefRegs != emitThisByrefRegs)
+ {
+ emitUpdateLiveGCregs(GCT_BYREF, byrefRegs, dst);
+ }
- assert(callInstrSize != 0);
+ if (recCall || args)
+ {
+ // For callee-pop, all arguments will be popped after the call.
+ // For caller-pop, any GC arguments will go dead after the call.
- if (args >= 0)
- emitStackPop(dst, /*isCall*/true, callInstrSize, args);
- else
- emitStackKillArgs(dst, -args, callInstrSize);
- }
+ assert(callInstrSize != 0);
- // Do we need to record a call location for GC purposes?
- if (!emitFullGCinfo && recCall)
- {
- assert(callInstrSize != 0);
- emitRecordGCcall(dst, callInstrSize);
- }
+ if (args >= 0)
+ {
+ emitStackPop(dst, /*isCall*/ true, callInstrSize, args);
+ }
+ else
+ {
+ emitStackKillArgs(dst, -args, callInstrSize);
+ }
+ }
+
+ // Do we need to record a call location for GC purposes?
+ if (!emitFullGCinfo && recCall)
+ {
+ assert(callInstrSize != 0);
+ emitRecordGCcall(dst, callInstrSize);
+ }
#ifdef DEBUG
- if (ins == INS_call)
- {
- emitRecordCallSite(emitCurCodeOffs(*dp),
- id->idDebugOnlyInfo()->idCallSig,
- (CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
- }
+ if (ins == INS_call)
+ {
+ emitRecordCallSite(emitCurCodeOffs(*dp), id->idDebugOnlyInfo()->idCallSig,
+ (CORINFO_METHOD_HANDLE)id->idDebugOnlyInfo()->idMemCookie);
+ }
#endif // DEBUG
- break;
+ break;
/********************************************************************/
/* One register operand */
/********************************************************************/
- case IF_RRD:
- case IF_RWR:
- case IF_RRW:
- dst = emitOutputR(dst, id);
- sz = TINY_IDSC_SIZE;
- break;
+ case IF_RRD:
+ case IF_RWR:
+ case IF_RRW:
+ dst = emitOutputR(dst, id);
+ sz = TINY_IDSC_SIZE;
+ break;
/********************************************************************/
/* Register and register/constant */
/********************************************************************/
- case IF_RRW_SHF:
- code = insCodeMR(ins);
- // Emit the VEX prefix if it exists
- code = AddVexPrefixIfNeeded(ins, code, size);
- code = insEncodeMRreg(ins, id->idReg1(), size, code);
+ case IF_RRW_SHF:
+ code = insCodeMR(ins);
+ // Emit the VEX prefix if it exists
+ code = AddVexPrefixIfNeeded(ins, code, size);
+ code = insEncodeMRreg(ins, id->idReg1(), size, code);
- // set the W bit
- if (size != EA_1BYTE)
- {
- code |= 1;
- }
+ // set the W bit
+ if (size != EA_1BYTE)
+ {
+ code |= 1;
+ }
- // Emit the REX prefix if it exists
- if (TakesRexWPrefix(ins, size))
- {
- code = AddRexWPrefix(ins, code);
- }
+ // Emit the REX prefix if it exists
+ if (TakesRexWPrefix(ins, size))
+ {
+ code = AddRexWPrefix(ins, code);
+ }
- // Output a size prefix for a 16-bit operand
- if (size == EA_2BYTE)
- {
- dst += emitOutputByte(dst, 0x66);
- }
+ // Output a size prefix for a 16-bit operand
+ if (size == EA_2BYTE)
+ {
+ dst += emitOutputByte(dst, 0x66);
+ }
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- dst += emitOutputWord(dst, code);
- dst += emitOutputByte(dst, emitGetInsSC(id));
- sz = emitSizeOfInsDsc(id);
- break;
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ dst += emitOutputWord(dst, code);
+ dst += emitOutputByte(dst, emitGetInsSC(id));
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_RRD_RRD:
- case IF_RWR_RRD:
- case IF_RRW_RRD:
- case IF_RRW_RRW:
- dst = emitOutputRR(dst, id);
- sz = TINY_IDSC_SIZE;
- break;
+ case IF_RRD_RRD:
+ case IF_RWR_RRD:
+ case IF_RRW_RRD:
+ case IF_RRW_RRW:
+ dst = emitOutputRR(dst, id);
+ sz = TINY_IDSC_SIZE;
+ break;
- case IF_RRD_CNS:
- case IF_RWR_CNS:
- case IF_RRW_CNS:
- dst = emitOutputRI(dst, id);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_RRD_CNS:
+ case IF_RWR_CNS:
+ case IF_RRW_CNS:
+ dst = emitOutputRI(dst, id);
+ sz = emitSizeOfInsDsc(id);
+ break;
#ifdef FEATURE_AVX_SUPPORT
- case IF_RWR_RRD_RRD:
- dst = emitOutputRRR(dst, id);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_RWR_RRD_RRD:
+ dst = emitOutputRRR(dst, id);
+ sz = emitSizeOfInsDsc(id);
+ break;
#endif
- case IF_RRW_RRW_CNS:
- assert(id->idGCref() == GCT_NONE);
-
- // Get the 'base' opcode (it's a big one)
- // Also, determine which operand goes where in the ModRM byte.
- regNumber mReg;
- regNumber rReg;
- // if (ins == INS_shld || ins == INS_shrd || ins == INS_vextractf128 || ins == INS_vinsertf128)
- if (hasCodeMR(ins))
- {
- code = insCodeMR(ins);
- // Emit the VEX prefix if it exists
- code = AddVexPrefixIfNeeded(ins, code, size);
- code = insEncodeMRreg(ins, code);
- mReg = id->idReg1();
- rReg = id->idReg2();
- }
- else
- {
- code = insCodeRM(ins);
- // Emit the VEX prefix if it exists
- code = AddVexPrefixIfNeeded(ins, code, size);
- code = insEncodeRMreg(ins, code);
- mReg = id->idReg2();
- rReg = id->idReg1();
- }
- assert(code & 0x00FF0000);
+ case IF_RRW_RRW_CNS:
+ assert(id->idGCref() == GCT_NONE);
-#ifdef FEATURE_AVX_SUPPORT
- if (TakesVexPrefix(ins))
- {
- if (IsThreeOperandBinaryAVXInstruction(ins))
+ // Get the 'base' opcode (it's a big one)
+ // Also, determine which operand goes where in the ModRM byte.
+ regNumber mReg;
+ regNumber rReg;
+ // if (ins == INS_shld || ins == INS_shrd || ins == INS_vextractf128 || ins == INS_vinsertf128)
+ if (hasCodeMR(ins))
{
- // Encode source/dest operand reg in 'vvvv' bits in 1's complement form
- // This code will have to change when we support 3 operands.
- // For now, we always overload this source with the destination (always reg1).
- // (Though we will need to handle the few ops that can have the 'vvvv' bits as destination,
- // e.g. pslldq, when/if we support those instructions with 2 registers.)
- // (see x64 manual Table 2-9. Instructions with a VEX.vvvv destination)
- code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ code = insCodeMR(ins);
+ // Emit the VEX prefix if it exists
+ code = AddVexPrefixIfNeeded(ins, code, size);
+ code = insEncodeMRreg(ins, code);
+ mReg = id->idReg1();
+ rReg = id->idReg2();
}
- else if (IsThreeOperandMoveAVXInstruction(ins))
+ else
{
- // This is a "merge" move instruction.
- // Encode source operand reg in 'vvvv' bits in 1's complement form
- code = insEncodeReg3456(ins, id->idReg2(), size, code);
+ code = insCodeRM(ins);
+ // Emit the VEX prefix if it exists
+ code = AddVexPrefixIfNeeded(ins, code, size);
+ code = insEncodeRMreg(ins, code);
+ mReg = id->idReg2();
+ rReg = id->idReg1();
+ }
+ assert(code & 0x00FF0000);
+
+#ifdef FEATURE_AVX_SUPPORT
+ if (TakesVexPrefix(ins))
+ {
+ if (IsThreeOperandBinaryAVXInstruction(ins))
+ {
+ // Encode source/dest operand reg in 'vvvv' bits in 1's complement form
+ // This code will have to change when we support 3 operands.
+ // For now, we always overload this source with the destination (always reg1).
+ // (Though we will need to handle the few ops that can have the 'vvvv' bits as destination,
+ // e.g. pslldq, when/if we support those instructions with 2 registers.)
+ // (see x64 manual Table 2-9. Instructions with a VEX.vvvv destination)
+ code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ }
+ else if (IsThreeOperandMoveAVXInstruction(ins))
+ {
+ // This is a "merge" move instruction.
+ // Encode source operand reg in 'vvvv' bits in 1's complement form
+ code = insEncodeReg3456(ins, id->idReg2(), size, code);
+ }
}
- }
#endif // FEATURE_AVX_SUPPORT
- regcode = (insEncodeReg345(ins, rReg, size, &code) |
- insEncodeReg012(ins, mReg, size, &code)) << 8;
+ regcode = (insEncodeReg345(ins, rReg, size, &code) | insEncodeReg012(ins, mReg, size, &code)) << 8;
- // Output the REX prefix
- dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
+ // Output the REX prefix
+ dst += emitOutputRexOrVexPrefixIfNeeded(ins, dst, code);
- if (UseAVX() && Is4ByteAVXInstruction(ins))
- {
- // We just need to output the last byte of the opcode.
- assert((code & 0xFF) == 0);
- assert((code & 0xFF00) != 0xC000);
- dst += emitOutputByte(dst, (code >> 8) & 0xFF);
- code = 0;
- }
- else if (code & 0xFF000000)
- {
- dst += emitOutputWord(dst, code >> 16);
- code &= 0x0000FFFF;
- }
- else if (code & 0x00FF0000)
- {
- dst += emitOutputByte(dst, code >> 16);
- code &= 0x0000FFFF;
- }
+ if (UseAVX() && Is4ByteAVXInstruction(ins))
+ {
+ // We just need to output the last byte of the opcode.
+ assert((code & 0xFF) == 0);
+ assert((code & 0xFF00) != 0xC000);
+ dst += emitOutputByte(dst, (code >> 8) & 0xFF);
+ code = 0;
+ }
+ else if (code & 0xFF000000)
+ {
+ dst += emitOutputWord(dst, code >> 16);
+ code &= 0x0000FFFF;
+ }
+ else if (code & 0x00FF0000)
+ {
+ dst += emitOutputByte(dst, code >> 16);
+ code &= 0x0000FFFF;
+ }
- // Note that regcode is shifted by 8-bits above to align with RM byte.
- if (code != 0)
- {
- assert((code & 0xFF00) == 0xC000);
- dst += emitOutputWord(dst, code | regcode);
- }
- else
- {
- // This case occurs for AVX instructions.
- // Note that regcode is left shifted by 8-bits.
- assert(Is4ByteAVXInstruction(ins));
- dst += emitOutputByte(dst, 0xC0 | (regcode >> 8));
- }
+ // Note that regcode is shifted by 8-bits above to align with RM byte.
+ if (code != 0)
+ {
+ assert((code & 0xFF00) == 0xC000);
+ dst += emitOutputWord(dst, code | regcode);
+ }
+ else
+ {
+ // This case occurs for AVX instructions.
+ // Note that regcode is left shifted by 8-bits.
+ assert(Is4ByteAVXInstruction(ins));
+ dst += emitOutputByte(dst, 0xC0 | (regcode >> 8));
+ }
- dst += emitOutputByte(dst, emitGetInsSC(id));
- sz = emitSizeOfInsDsc(id);
- break;
+ dst += emitOutputByte(dst, emitGetInsSC(id));
+ sz = emitSizeOfInsDsc(id);
+ break;
/********************************************************************/
/* Address mode operand */
/********************************************************************/
- case IF_ARD:
- case IF_AWR:
- case IF_ARW:
+ case IF_ARD:
+ case IF_AWR:
+ case IF_ARW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_ARD:
- case IF_TWR_ARD:
- case IF_TRW_ARD:
+ case IF_TRD_ARD:
+ case IF_TWR_ARD:
+ case IF_TRW_ARD:
- // case IF_ARD_TRD:
- // case IF_ARW_TRD:
- case IF_AWR_TRD:
+ // case IF_ARD_TRD:
+ // case IF_ARW_TRD:
+ case IF_AWR_TRD:
#endif // FEATURE_STACK_FP_X87
- dst = emitCodeWithInstructionSize(dst, emitOutputAM(dst, id, insCodeMR(ins)), &callInstrSize);
+ dst = emitCodeWithInstructionSize(dst, emitOutputAM(dst, id, insCodeMR(ins)), &callInstrSize);
- switch (ins)
- {
- case INS_call:
+ switch (ins)
+ {
+ case INS_call:
- IND_CALL:
- // Get hold of the argument count and method handle
- args = emitGetInsCIargs(id);
+ IND_CALL:
+ // Get hold of the argument count and method handle
+ args = emitGetInsCIargs(id);
- // Is this a "fat" call descriptor?
- if (id->idIsLargeCall())
- {
- instrDescCGCA* idCall = (instrDescCGCA*) id;
+ // Is this a "fat" call descriptor?
+ if (id->idIsLargeCall())
+ {
+ instrDescCGCA* idCall = (instrDescCGCA*)id;
- gcrefRegs = idCall->idcGcrefRegs;
- byrefRegs = idCall->idcByrefRegs;
- VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
- sz = sizeof(instrDescCGCA);
- }
- else
- {
- assert(!id->idIsLargeDsp());
- assert(!id->idIsLargeCns());
+ gcrefRegs = idCall->idcGcrefRegs;
+ byrefRegs = idCall->idcByrefRegs;
+ VarSetOps::Assign(emitComp, GCvars, idCall->idcGCvars);
+ sz = sizeof(instrDescCGCA);
+ }
+ else
+ {
+ assert(!id->idIsLargeDsp());
+ assert(!id->idIsLargeCns());
- gcrefRegs = emitDecodeCallGCregs(id);
- byrefRegs = 0;
- VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
- sz = sizeof(instrDesc);
- }
+ gcrefRegs = emitDecodeCallGCregs(id);
+ byrefRegs = 0;
+ VarSetOps::AssignNoCopy(emitComp, GCvars, VarSetOps::MakeEmpty(emitComp));
+ sz = sizeof(instrDesc);
+ }
- recCall = true;
+ recCall = true;
- goto DONE_CALL;
+ goto DONE_CALL;
- default:
- sz = emitSizeOfInsDsc(id);
+ default:
+ sz = emitSizeOfInsDsc(id);
+ break;
+ }
break;
- }
- break;
- case IF_RRD_ARD:
- case IF_RWR_ARD:
- case IF_RRW_ARD:
- code = insCodeRM(ins);
- code = AddVexPrefixIfNeeded(ins, code, size);
- regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputAM(dst, id, code | regcode);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_RRD_ARD:
+ case IF_RWR_ARD:
+ case IF_RRW_ARD:
+ code = insCodeRM(ins);
+ code = AddVexPrefixIfNeeded(ins, code, size);
+ regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
+ dst = emitOutputAM(dst, id, code | regcode);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_ARD_RRD:
- case IF_AWR_RRD:
- case IF_ARW_RRD:
- code = insCodeMR(ins);
- code = AddVexPrefixIfNeeded(ins, code, size);
- regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputAM(dst, id, code | regcode);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_ARD_RRD:
+ case IF_AWR_RRD:
+ case IF_ARW_RRD:
+ code = insCodeMR(ins);
+ code = AddVexPrefixIfNeeded(ins, code, size);
+ regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
+ dst = emitOutputAM(dst, id, code | regcode);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_ARD_CNS:
- case IF_AWR_CNS:
- case IF_ARW_CNS:
- emitGetInsAmdCns(id, &cnsVal);
- dst = emitOutputAM(dst, id, insCodeMI(ins), &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_ARD_CNS:
+ case IF_AWR_CNS:
+ case IF_ARW_CNS:
+ emitGetInsAmdCns(id, &cnsVal);
+ dst = emitOutputAM(dst, id, insCodeMI(ins), &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_ARW_SHF:
- emitGetInsAmdCns(id, &cnsVal);
- dst = emitOutputAM(dst, id, insCodeMR(ins), &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_ARW_SHF:
+ emitGetInsAmdCns(id, &cnsVal);
+ dst = emitOutputAM(dst, id, insCodeMR(ins), &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
/********************************************************************/
/* Stack-based operand */
/********************************************************************/
- case IF_SRD:
- case IF_SWR:
- case IF_SRW:
+ case IF_SRD:
+ case IF_SWR:
+ case IF_SRW:
#if FEATURE_STACK_FP_X87
- case IF_TRD_SRD:
- case IF_TWR_SRD:
- case IF_TRW_SRD:
+ case IF_TRD_SRD:
+ case IF_TWR_SRD:
+ case IF_TRW_SRD:
- // case IF_SRD_TRD:
- // case IF_SRW_TRD:
- case IF_SWR_TRD:
+ // case IF_SRD_TRD:
+ // case IF_SRW_TRD:
+ case IF_SWR_TRD:
#endif // FEATURE_STACK_FP_X87
- assert(ins != INS_pop_hide);
- if (ins == INS_pop)
- {
- // The offset in "pop [ESP+xxx]" is relative to the new ESP value
- CLANG_FORMAT_COMMENT_ANCHOR;
+ assert(ins != INS_pop_hide);
+ if (ins == INS_pop)
+ {
+ // The offset in "pop [ESP+xxx]" is relative to the new ESP value
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if !FEATURE_FIXED_OUT_ARGS
- emitCurStackLvl -= sizeof(int);
+ emitCurStackLvl -= sizeof(int);
#endif
- dst = emitOutputSV(dst, id, insCodeMR(ins));
+ dst = emitOutputSV(dst, id, insCodeMR(ins));
#if !FEATURE_FIXED_OUT_ARGS
- emitCurStackLvl += sizeof(int);
+ emitCurStackLvl += sizeof(int);
#endif
- break;
- }
+ break;
+ }
- dst = emitCodeWithInstructionSize(dst, emitOutputSV(dst, id, insCodeMR(ins)), &callInstrSize);
+ dst = emitCodeWithInstructionSize(dst, emitOutputSV(dst, id, insCodeMR(ins)), &callInstrSize);
- if (ins == INS_call)
- goto IND_CALL;
+ if (ins == INS_call)
+ {
+ goto IND_CALL;
+ }
- break;
+ break;
- case IF_SRD_CNS:
- case IF_SWR_CNS:
- case IF_SRW_CNS:
- emitGetInsCns(id, &cnsVal);
- dst = emitOutputSV(dst, id, insCodeMI(ins), &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_SRD_CNS:
+ case IF_SWR_CNS:
+ case IF_SRW_CNS:
+ emitGetInsCns(id, &cnsVal);
+ dst = emitOutputSV(dst, id, insCodeMI(ins), &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_SRW_SHF:
- emitGetInsCns(id, &cnsVal);
- dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_SRW_SHF:
+ emitGetInsCns(id, &cnsVal);
+ dst = emitOutputSV(dst, id, insCodeMR(ins), &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_RRD_SRD:
- case IF_RWR_SRD:
- case IF_RRW_SRD:
- code = insCodeRM(ins);
+ case IF_RRD_SRD:
+ case IF_RWR_SRD:
+ case IF_RRW_SRD:
+ code = insCodeRM(ins);
- // 4-byte AVX instructions are special cased inside emitOutputSV
- // since they do not have space to encode ModRM byte.
- if (Is4ByteAVXInstruction(ins))
- {
- dst = emitOutputSV(dst, id, code);
- }
- else
- {
+ // 4-byte AVX instructions are special cased inside emitOutputSV
+ // since they do not have space to encode ModRM byte.
+ if (Is4ByteAVXInstruction(ins))
+ {
+ dst = emitOutputSV(dst, id, code);
+ }
+ else
+ {
+ code = AddVexPrefixIfNeeded(ins, code, size);
+
+ // In case of AVX instructions that take 3 operands, encode reg1 as first source.
+ // Note that reg1 is both a source and a destination.
+ //
+ // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
+ // now we use the single source as source1 and source2.
+ // For this format, moves do not support a third operand, so we only need to handle the binary ops.
+ if (IsThreeOperandBinaryAVXInstruction(ins))
+ {
+ // encode source operand reg in 'vvvv' bits in 1's compliement form
+ code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ }
+
+ regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
+ dst = emitOutputSV(dst, id, code | regcode);
+ }
+ break;
+
+ case IF_SRD_RRD:
+ case IF_SWR_RRD:
+ case IF_SRW_RRD:
+ code = insCodeMR(ins);
code = AddVexPrefixIfNeeded(ins, code, size);
// In case of AVX instructions that take 3 operands, encode reg1 as first source.
@@ -11114,72 +11114,71 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
}
regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputSV(dst, id, code | regcode);
- }
- break;
-
- case IF_SRD_RRD:
- case IF_SWR_RRD:
- case IF_SRW_RRD:
- code = insCodeMR(ins);
- code = AddVexPrefixIfNeeded(ins, code, size);
-
- // In case of AVX instructions that take 3 operands, encode reg1 as first source.
- // Note that reg1 is both a source and a destination.
- //
- // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
- // now we use the single source as source1 and source2.
- // For this format, moves do not support a third operand, so we only need to handle the binary ops.
- if (IsThreeOperandBinaryAVXInstruction(ins))
- {
- // encode source operand reg in 'vvvv' bits in 1's compliement form
- code = insEncodeReg3456(ins, id->idReg1(), size, code);
- }
-
- regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputSV(dst, id, code | regcode);
- break;
+ dst = emitOutputSV(dst, id, code | regcode);
+ break;
/********************************************************************/
/* Direct memory address */
/********************************************************************/
- case IF_MRD:
- case IF_MRW:
- case IF_MWR:
+ case IF_MRD:
+ case IF_MRW:
+ case IF_MWR:
#if FEATURE_STACK_FP_X87
- case IF_TRD_MRD:
- case IF_TWR_MRD:
- case IF_TRW_MRD:
+ case IF_TRD_MRD:
+ case IF_TWR_MRD:
+ case IF_TRW_MRD:
- // case IF_MRD_TRD:
- // case IF_MRW_TRD:
- case IF_MWR_TRD:
+ // case IF_MRD_TRD:
+ // case IF_MRW_TRD:
+ case IF_MWR_TRD:
#endif // FEATURE_STACK_FP_X87
- noway_assert(ins != INS_call);
- dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500);
- sz = emitSizeOfInsDsc(id);
- break;
+ noway_assert(ins != INS_call);
+ dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_MRD_OFF:
- dst = emitOutputCV(dst, id, insCodeMI(ins));
- break;
+ case IF_MRD_OFF:
+ dst = emitOutputCV(dst, id, insCodeMI(ins));
+ break;
- case IF_RRD_MRD:
- case IF_RWR_MRD:
- case IF_RRW_MRD:
- code = insCodeRM(ins);
- // Special case 4-byte AVX instructions
- if (Is4ByteAVXInstruction(ins))
- {
- dst = emitOutputCV(dst, id, code);
- }
- else
- {
+ case IF_RRD_MRD:
+ case IF_RWR_MRD:
+ case IF_RRW_MRD:
+ code = insCodeRM(ins);
+ // Special case 4-byte AVX instructions
+ if (Is4ByteAVXInstruction(ins))
+ {
+ dst = emitOutputCV(dst, id, code);
+ }
+ else
+ {
+ code = AddVexPrefixIfNeeded(ins, code, size);
+
+ // In case of AVX instructions that take 3 operands, encode reg1 as first source.
+ // Note that reg1 is both a source and a destination.
+ //
+ // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
+ // now we use the single source as source1 and source2.
+ // For this format, moves do not support a third operand, so we only need to handle the binary ops.
+ if (IsThreeOperandBinaryAVXInstruction(ins))
+ {
+ // encode source operand reg in 'vvvv' bits in 1's compliement form
+ code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ }
+
+ regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
+ dst = emitOutputCV(dst, id, code | regcode | 0x0500);
+ }
+ sz = emitSizeOfInsDsc(id);
+ break;
+
+ case IF_RWR_MRD_OFF:
+ code = insCode(ins);
code = AddVexPrefixIfNeeded(ins, code, size);
// In case of AVX instructions that take 3 operands, encode reg1 as first source.
@@ -11194,71 +11193,49 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
code = insEncodeReg3456(ins, id->idReg1(), size, code);
}
- regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputCV(dst, id, code | regcode | 0x0500);
- }
- sz = emitSizeOfInsDsc(id);
- break;
-
- case IF_RWR_MRD_OFF:
- code = insCode(ins);
- code = AddVexPrefixIfNeeded(ins, code, size);
-
- // In case of AVX instructions that take 3 operands, encode reg1 as first source.
- // Note that reg1 is both a source and a destination.
- //
- // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
- // now we use the single source as source1 and source2.
- // For this format, moves do not support a third operand, so we only need to handle the binary ops.
- if (IsThreeOperandBinaryAVXInstruction(ins))
- {
- // encode source operand reg in 'vvvv' bits in 1's compliement form
- code = insEncodeReg3456(ins, id->idReg1(), size, code);
- }
-
- regcode = insEncodeReg012(id->idIns(), id->idReg1(), size, &code);
- dst = emitOutputCV(dst, id, code | 0x30 | regcode);
- sz = emitSizeOfInsDsc(id);
- break;
+ regcode = insEncodeReg012(id->idIns(), id->idReg1(), size, &code);
+ dst = emitOutputCV(dst, id, code | 0x30 | regcode);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_MRD_RRD:
- case IF_MWR_RRD:
- case IF_MRW_RRD:
- code = insCodeMR(ins);
+ case IF_MRD_RRD:
+ case IF_MWR_RRD:
+ case IF_MRW_RRD:
+ code = insCodeMR(ins);
#ifdef FEATURE_AVX_SUPPORT
- code = AddVexPrefixIfNeeded(ins, code, size);
+ code = AddVexPrefixIfNeeded(ins, code, size);
- // In case of AVX instructions that take 3 operands, encode reg1 as first source.
- // Note that reg1 is both a source and a destination.
- //
- // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
- // now we use the single source as source1 and source2.
- // For this format, moves do not support a third operand, so we only need to handle the binary ops.
- if (IsThreeOperandBinaryAVXInstruction(ins))
- {
- // encode source operand reg in 'vvvv' bits in 1's compliement form
- code = insEncodeReg3456(ins, id->idReg1(), size, code);
- }
+ // In case of AVX instructions that take 3 operands, encode reg1 as first source.
+ // Note that reg1 is both a source and a destination.
+ //
+ // TODO-XArch-CQ: Eventually we need to support 3 operand instruction formats. For
+ // now we use the single source as source1 and source2.
+ // For this format, moves do not support a third operand, so we only need to handle the binary ops.
+ if (IsThreeOperandBinaryAVXInstruction(ins))
+ {
+ // encode source operand reg in 'vvvv' bits in 1's compliement form
+ code = insEncodeReg3456(ins, id->idReg1(), size, code);
+ }
#endif // FEATURE_AVX_SUPPORT
- regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
- dst = emitOutputCV(dst, id, code | regcode | 0x0500);
- sz = emitSizeOfInsDsc(id);
- break;
+ regcode = (insEncodeReg345(ins, id->idReg1(), size, &code) << 8);
+ dst = emitOutputCV(dst, id, code | regcode | 0x0500);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_MRD_CNS:
- case IF_MWR_CNS:
- case IF_MRW_CNS:
- emitGetInsDcmCns(id, &cnsVal);
- dst = emitOutputCV(dst, id, insCodeMI(ins) | 0x0500, &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_MRD_CNS:
+ case IF_MWR_CNS:
+ case IF_MRW_CNS:
+ emitGetInsDcmCns(id, &cnsVal);
+ dst = emitOutputCV(dst, id, insCodeMI(ins) | 0x0500, &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
- case IF_MRW_SHF:
- emitGetInsDcmCns(id, &cnsVal);
- dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500, &cnsVal);
- sz = emitSizeOfInsDsc(id);
- break;
+ case IF_MRW_SHF:
+ emitGetInsDcmCns(id, &cnsVal);
+ dst = emitOutputCV(dst, id, insCodeMR(ins) | 0x0500, &cnsVal);
+ sz = emitSizeOfInsDsc(id);
+ break;
#if FEATURE_STACK_FP_X87
@@ -11266,19 +11243,19 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
/* FP coprocessor stack operands */
/********************************************************************/
- case IF_TRD_FRD:
- case IF_TWR_FRD:
- case IF_TRW_FRD:
- assert(id->idGCref() == GCT_NONE);
- dst += emitOutputWord(dst, insCodeMR(ins) | 0xC000 | (id->idReg1() << 8));
- break;
+ case IF_TRD_FRD:
+ case IF_TWR_FRD:
+ case IF_TRW_FRD:
+ assert(id->idGCref() == GCT_NONE);
+ dst += emitOutputWord(dst, insCodeMR(ins) | 0xC000 | (id->idReg1() << 8));
+ break;
- case IF_FRD_TRD:
- case IF_FWR_TRD:
- case IF_FRW_TRD:
- assert(id->idGCref() == GCT_NONE);
- dst += emitOutputWord(dst, insCodeMR(ins) | 0xC004 | (id->idReg1() << 8));
- break;
+ case IF_FRD_TRD:
+ case IF_FWR_TRD:
+ case IF_FRW_TRD:
+ assert(id->idGCref() == GCT_NONE);
+ dst += emitOutputWord(dst, insCodeMR(ins) | 0xC004 | (id->idReg1() << 8));
+ break;
#endif // FEATURE_STACK_FP_X87
@@ -11286,13 +11263,13 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
/* oops */
/********************************************************************/
- default:
+ default:
-#ifdef DEBUG
- printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
- assert(!"don't know how to encode this instruction");
+#ifdef DEBUG
+ printf("unexpected format %s\n", emitIfName(id->idInsFmt()));
+ assert(!"don't know how to encode this instruction");
#endif
- break;
+ break;
}
// Make sure we set the instruction descriptor size correctly
@@ -11301,42 +11278,41 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
#if !FEATURE_FIXED_OUT_ARGS
// Make sure we keep the current stack level up to date
- if (!emitIGisInProlog(ig) && !emitIGisInEpilog(ig))
+ if (!emitIGisInProlog(ig) && !emitIGisInEpilog(ig))
{
switch (ins)
{
- case INS_push:
- // Please note: {INS_push_hide,IF_LABEL} is used to push the address of the
- // finally block for calling it locally for an op_leave.
- emitStackPush(dst, id->idGCref());
- break;
+ case INS_push:
+ // Please note: {INS_push_hide,IF_LABEL} is used to push the address of the
+ // finally block for calling it locally for an op_leave.
+ emitStackPush(dst, id->idGCref());
+ break;
- case INS_pop:
- emitStackPop(dst, false, /*callInstrSize*/0, 1);
- break;
+ case INS_pop:
+ emitStackPop(dst, false, /*callInstrSize*/ 0, 1);
+ break;
- case INS_sub:
- // Check for "sub ESP, icon"
- if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS
- && id->idReg1() == REG_ESP)
- {
- assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
- emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
- }
- break;
+ case INS_sub:
+ // Check for "sub ESP, icon"
+ if (ins == INS_sub && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP)
+ {
+ assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
+ emitStackPushN(dst, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ }
+ break;
- case INS_add:
- // Check for "add ESP, icon"
- if (ins == INS_add && id->idInsFmt() == IF_RRW_CNS
- && id->idReg1() == REG_ESP)
- {
- assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
- emitStackPop(dst, /*isCall*/false, /*callInstrSize*/0, (unsigned)(emitGetInsSC(id) / sizeof(void*)));
- }
- break;
+ case INS_add:
+ // Check for "add ESP, icon"
+ if (ins == INS_add && id->idInsFmt() == IF_RRW_CNS && id->idReg1() == REG_ESP)
+ {
+ assert((size_t)emitGetInsSC(id) < 0x00000000FFFFFFFFLL);
+ emitStackPop(dst, /*isCall*/ false, /*callInstrSize*/ 0,
+ (unsigned)(emitGetInsSC(id) / sizeof(void*)));
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
}
@@ -11345,12 +11321,12 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
assert((int)emitCurStackLvl >= 0);
// Only epilog "instructions" and some pseudo-instrs
- // are allowed not to generate any code
+ // are allowed not to generate any code
assert(*dp != dst || emitInstHasNoCode(ins));
-#ifdef DEBUG
- if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
+#ifdef DEBUG
+ if (emitComp->opts.disAsm || emitComp->opts.dspEmit || emitComp->verbose)
{
emitDispIns(id, false, dspOffs, true, emitCurCodeOffs(*dp), *dp, (dst - *dp));
}
@@ -11363,13 +11339,13 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
{
printf("Before emitOutputInstr for id->idDebugOnlyInfo()->idNum=0x%02x\n", id->idDebugOnlyInfo()->idNum);
printf(" emitThisGCrefRegs(0x%p)=", emitComp->dspPtr(&emitThisGCrefRegs));
- printRegMaskInt(emitThisGCrefRegs);
- emitDispRegSet (emitThisGCrefRegs);
- printf("\n");
+ printRegMaskInt(emitThisGCrefRegs);
+ emitDispRegSet(emitThisGCrefRegs);
+ printf("\n");
printf(" emitThisByrefRegs(0x%p)=", emitComp->dspPtr(&emitThisByrefRegs));
- printRegMaskInt(emitThisByrefRegs);
- emitDispRegSet (emitThisByrefRegs);
- printf("\n");
+ printRegMaskInt(emitThisByrefRegs);
+ emitDispRegSet(emitThisByrefRegs);
+ printf("\n");
}
// For example, set JitBreakEmitOutputInstr=a6 will break when this method is called for
@@ -11381,7 +11357,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
}
#endif
-#ifdef TRANSLATE_PDB
+#ifdef TRANSLATE_PDB
if (*dp != dst)
{
// only map instruction groups to instruction groups
@@ -11397,7 +11373,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
// INS_mulEAX has implicit target of Edx:Eax. Make sure
// that we detected this cleared its GC-status.
- assert(((RBM_EAX|RBM_EDX) & (emitThisGCrefRegs|emitThisByrefRegs)) == 0);
+ assert(((RBM_EAX | RBM_EDX) & (emitThisGCrefRegs | emitThisByrefRegs)) == 0);
}
if (instrIs3opImul(ins))
@@ -11406,11 +11382,11 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE**
// that we detected the implicit register and cleared its GC-status.
regMaskTP regMask = genRegMask(inst3opImulReg(ins));
- assert((regMask & (emitThisGCrefRegs|emitThisByrefRegs)) == 0);
+ assert((regMask & (emitThisGCrefRegs | emitThisByrefRegs)) == 0);
}
#endif
- return sz;
+ return sz;
}
#ifdef _PREFAST_
#pragma warning(pop)
diff --git a/src/jit/emitxarch.h b/src/jit/emitxarch.h
index 7d0e648c26..dfd7e6ec50 100644
--- a/src/jit/emitxarch.h
+++ b/src/jit/emitxarch.h
@@ -2,508 +2,408 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#if defined(_TARGET_XARCH_)
- /************************************************************************/
- /* Public inline informational methods */
- /************************************************************************/
+/************************************************************************/
+/* Public inline informational methods */
+/************************************************************************/
public:
+inline static bool isGeneralRegister(regNumber reg)
+{
+ return (reg <= REG_INT_LAST);
+}
- inline static bool isGeneralRegister (regNumber reg)
- { return (reg <= REG_INT_LAST); }
-
- inline static bool isFloatReg (regNumber reg)
- { return (reg >= REG_FP_FIRST && reg <= REG_FP_LAST); }
+inline static bool isFloatReg(regNumber reg)
+{
+ return (reg >= REG_FP_FIRST && reg <= REG_FP_LAST);
+}
- inline static bool isDoubleReg (regNumber reg)
- { return isFloatReg(reg); }
+inline static bool isDoubleReg(regNumber reg)
+{
+ return isFloatReg(reg);
+}
- /************************************************************************/
- /* Routines that compute the size of / encode instructions */
- /************************************************************************/
+/************************************************************************/
+/* Routines that compute the size of / encode instructions */
+/************************************************************************/
- struct CnsVal
- {
- ssize_t cnsVal;
+struct CnsVal
+{
+ ssize_t cnsVal;
#ifdef RELOC_SUPPORT
- bool cnsReloc;
+ bool cnsReloc;
#endif
- };
-
- UNATIVE_OFFSET emitInsSize (size_t code);
- UNATIVE_OFFSET emitInsSizeRM (instruction ins);
- UNATIVE_OFFSET emitInsSizeSV (size_t code, int var, int dsp);
- UNATIVE_OFFSET emitInsSizeSV (instrDesc * id, int var, int dsp, int val);
- UNATIVE_OFFSET emitInsSizeRR (instruction ins, regNumber reg1, regNumber reg2, emitAttr attr);
- UNATIVE_OFFSET emitInsSizeAM (instrDesc * id, size_t code);
- UNATIVE_OFFSET emitInsSizeAM (instrDesc * id, size_t code, int val);
- UNATIVE_OFFSET emitInsSizeCV (instrDesc * id, size_t code);
- UNATIVE_OFFSET emitInsSizeCV (instrDesc * id, size_t code, int val);
-
- BYTE * emitOutputAM (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
- BYTE * emitOutputSV (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
- BYTE * emitOutputCV (BYTE *dst, instrDesc *id, size_t code,
- CnsVal * addc = NULL);
-
- BYTE * emitOutputR (BYTE *dst, instrDesc *id);
- BYTE * emitOutputRI (BYTE *dst, instrDesc *id);
- BYTE * emitOutputRR (BYTE *dst, instrDesc *id);
- BYTE * emitOutputIV (BYTE *dst, instrDesc *id);
+};
+
+UNATIVE_OFFSET emitInsSize(size_t code);
+UNATIVE_OFFSET emitInsSizeRM(instruction ins);
+UNATIVE_OFFSET emitInsSizeSV(size_t code, int var, int dsp);
+UNATIVE_OFFSET emitInsSizeSV(instrDesc* id, int var, int dsp, int val);
+UNATIVE_OFFSET emitInsSizeRR(instruction ins, regNumber reg1, regNumber reg2, emitAttr attr);
+UNATIVE_OFFSET emitInsSizeAM(instrDesc* id, size_t code);
+UNATIVE_OFFSET emitInsSizeAM(instrDesc* id, size_t code, int val);
+UNATIVE_OFFSET emitInsSizeCV(instrDesc* id, size_t code);
+UNATIVE_OFFSET emitInsSizeCV(instrDesc* id, size_t code, int val);
+
+BYTE* emitOutputAM(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = nullptr);
+BYTE* emitOutputSV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = nullptr);
+BYTE* emitOutputCV(BYTE* dst, instrDesc* id, size_t code, CnsVal* addc = nullptr);
+
+BYTE* emitOutputR(BYTE* dst, instrDesc* id);
+BYTE* emitOutputRI(BYTE* dst, instrDesc* id);
+BYTE* emitOutputRR(BYTE* dst, instrDesc* id);
+BYTE* emitOutputIV(BYTE* dst, instrDesc* id);
#ifdef FEATURE_AVX_SUPPORT
- BYTE * emitOutputRRR(BYTE *dst, instrDesc *id);
+BYTE* emitOutputRRR(BYTE* dst, instrDesc* id);
#endif
- BYTE * emitOutputLJ (BYTE *dst, instrDesc *id);
+BYTE* emitOutputLJ(BYTE* dst, instrDesc* id);
- unsigned emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE *dst, size_t &code);
- unsigned emitGetRexPrefixSize(instruction ins);
- unsigned emitGetVexPrefixSize(instruction ins, emitAttr attr);
- unsigned emitGetPrefixSize(size_t code);
- unsigned emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, size_t code);
+unsigned emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, size_t& code);
+unsigned emitGetRexPrefixSize(instruction ins);
+unsigned emitGetVexPrefixSize(instruction ins, emitAttr attr);
+unsigned emitGetPrefixSize(size_t code);
+unsigned emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, size_t code);
- unsigned insEncodeReg345(instruction ins, regNumber reg, emitAttr size, size_t* code);
- unsigned insEncodeReg012(instruction ins, regNumber reg, emitAttr size, size_t* code);
- size_t insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, size_t code);
- unsigned insEncodeRegSIB(instruction ins, regNumber reg, size_t* code);
+unsigned insEncodeReg345(instruction ins, regNumber reg, emitAttr size, size_t* code);
+unsigned insEncodeReg012(instruction ins, regNumber reg, emitAttr size, size_t* code);
+size_t insEncodeReg3456(instruction ins, regNumber reg, emitAttr size, size_t code);
+unsigned insEncodeRegSIB(instruction ins, regNumber reg, size_t* code);
- size_t insEncodeMRreg(instruction ins, size_t code);
- size_t insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, size_t code);
- size_t insEncodeRRIb(instruction ins, regNumber reg, emitAttr size);
- size_t insEncodeOpreg(instruction ins, regNumber reg, emitAttr size);
+size_t insEncodeMRreg(instruction ins, size_t code);
+size_t insEncodeMRreg(instruction ins, regNumber reg, emitAttr size, size_t code);
+size_t insEncodeRRIb(instruction ins, regNumber reg, emitAttr size);
+size_t insEncodeOpreg(instruction ins, regNumber reg, emitAttr size);
- bool IsAVXInstruction(instruction ins);
- size_t insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, size_t code);
+bool IsAVXInstruction(instruction ins);
+size_t insEncodeMIreg(instruction ins, regNumber reg, emitAttr size, size_t code);
- size_t AddRexWPrefix(instruction ins, size_t code);
- size_t AddRexRPrefix(instruction ins, size_t code);
- size_t AddRexXPrefix(instruction ins, size_t code);
- size_t AddRexBPrefix(instruction ins, size_t code);
- size_t AddRexPrefix(instruction ins, size_t code);
+size_t AddRexWPrefix(instruction ins, size_t code);
+size_t AddRexRPrefix(instruction ins, size_t code);
+size_t AddRexXPrefix(instruction ins, size_t code);
+size_t AddRexBPrefix(instruction ins, size_t code);
+size_t AddRexPrefix(instruction ins, size_t code);
#ifdef FEATURE_AVX_SUPPORT
- // 3-byte VEX prefix starts with byte 0xC4
-#define VEX_PREFIX_MASK_3BYTE 0xC4000000000000LL
- bool TakesVexPrefix(instruction ins);
- // Returns true if the instruction encoding already contains VEX prefix
- bool hasVexPrefix(size_t code)
- {
- return (code & VEX_PREFIX_MASK_3BYTE) != 0;
- }
- size_t AddVexPrefix(instruction ins, size_t code, emitAttr attr);
- size_t AddVexPrefixIfNeeded(instruction ins, size_t code, emitAttr size)
- {
- if (TakesVexPrefix(ins))
- {
- code = AddVexPrefix(ins, code, size);
- }
- return code;
- }
- size_t AddVexPrefixIfNeededAndNotPresent(instruction ins, size_t code, emitAttr size)
+// 3-byte VEX prefix starts with byte 0xC4
+#define VEX_PREFIX_MASK_3BYTE 0xC4000000000000LL
+bool TakesVexPrefix(instruction ins);
+// Returns true if the instruction encoding already contains VEX prefix
+bool hasVexPrefix(size_t code)
+{
+ return (code & VEX_PREFIX_MASK_3BYTE) != 0;
+}
+size_t AddVexPrefix(instruction ins, size_t code, emitAttr attr);
+size_t AddVexPrefixIfNeeded(instruction ins, size_t code, emitAttr size)
+{
+ if (TakesVexPrefix(ins))
{
- if (TakesVexPrefix(ins) && !hasVexPrefix(code))
- {
- code = AddVexPrefix(ins, code, size);
- }
- return code;
+ code = AddVexPrefix(ins, code, size);
}
- bool useAVXEncodings;
- bool UseAVX() { return useAVXEncodings; }
- void SetUseAVX(bool value) { useAVXEncodings = value; }
- bool IsThreeOperandBinaryAVXInstruction(instruction ins);
- bool IsThreeOperandMoveAVXInstruction(instruction ins);
- bool IsThreeOperandAVXInstruction(instruction ins)
+ return code;
+}
+size_t AddVexPrefixIfNeededAndNotPresent(instruction ins, size_t code, emitAttr size)
+{
+ if (TakesVexPrefix(ins) && !hasVexPrefix(code))
{
- return (IsThreeOperandBinaryAVXInstruction(ins) || IsThreeOperandMoveAVXInstruction(ins));
+ code = AddVexPrefix(ins, code, size);
}
-#else // !FEATURE_AVX_SUPPORT
- bool UseAVX() { return false; }
- bool hasVexPrefix(size_t code) { return false; }
- bool IsThreeOperandBinaryAVXInstruction(instruction ins) { return false; }
- bool IsThreeOperandMoveAVXInstruction(instruction ins) { return false; }
- bool IsThreeOperandAVXInstruction(instruction ins) { return false; }
- bool TakesVexPrefix(instruction ins) { return false; }
- size_t AddVexPrefixIfNeeded(instruction ins, size_t code, emitAttr attr) { return code; }
- size_t AddVexPrefixIfNeededAndNotPresent(instruction ins, size_t code, emitAttr size) { return code; }
+ return code;
+}
+bool useAVXEncodings;
+bool UseAVX()
+{
+ return useAVXEncodings;
+}
+void SetUseAVX(bool value)
+{
+ useAVXEncodings = value;
+}
+bool IsThreeOperandBinaryAVXInstruction(instruction ins);
+bool IsThreeOperandMoveAVXInstruction(instruction ins);
+bool IsThreeOperandAVXInstruction(instruction ins)
+{
+ return (IsThreeOperandBinaryAVXInstruction(ins) || IsThreeOperandMoveAVXInstruction(ins));
+}
+#else // !FEATURE_AVX_SUPPORT
+bool UseAVX()
+{
+ return false;
+}
+bool hasVexPrefix(size_t code)
+{
+ return false;
+}
+bool IsThreeOperandBinaryAVXInstruction(instruction ins)
+{
+ return false;
+}
+bool IsThreeOperandMoveAVXInstruction(instruction ins)
+{
+ return false;
+}
+bool IsThreeOperandAVXInstruction(instruction ins)
+{
+ return false;
+}
+bool TakesVexPrefix(instruction ins)
+{
+ return false;
+}
+size_t AddVexPrefixIfNeeded(instruction ins, size_t code, emitAttr attr)
+{
+ return code;
+}
+size_t AddVexPrefixIfNeededAndNotPresent(instruction ins, size_t code, emitAttr size)
+{
+ return code;
+}
#endif // !FEATURE_AVX_SUPPORT
- /************************************************************************/
- /* Debug-only routines to display instructions */
- /************************************************************************/
+/************************************************************************/
+/* Debug-only routines to display instructions */
+/************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
- const char * emitFPregName (unsigned reg,
- bool varName = true);
+const char* emitFPregName(unsigned reg, bool varName = true);
- void emitDispReloc (ssize_t value);
- void emitDispAddrMode(instrDesc *id, bool noDetail = false);
- void emitDispShift (instruction ins, int cnt = 0);
+void emitDispReloc(ssize_t value);
+void emitDispAddrMode(instrDesc* id, bool noDetail = false);
+void emitDispShift(instruction ins, int cnt = 0);
- void emitDispIns (instrDesc *id, bool isNew, bool doffs, bool asmfm,
- unsigned offs = 0, BYTE * code = 0, size_t sz = 0,
- insGroup *ig = NULL);
+void emitDispIns(instrDesc* id,
+ bool isNew,
+ bool doffs,
+ bool asmfm,
+ unsigned offs = 0,
+ BYTE* code = nullptr,
+ size_t sz = 0,
+ insGroup* ig = nullptr);
- const char * emitXMMregName (unsigned reg);
- const char * emitYMMregName (unsigned reg);
+const char* emitXMMregName(unsigned reg);
+const char* emitYMMregName(unsigned reg);
#endif
- /************************************************************************/
- /* Private members that deal with target-dependent instr. descriptors */
- /************************************************************************/
+/************************************************************************/
+/* Private members that deal with target-dependent instr. descriptors */
+/************************************************************************/
private:
-
- void emitSetAmdDisp (instrDescAmd* id, ssize_t dsp);
- instrDesc *emitNewInstrAmd (emitAttr attr, ssize_t dsp);
- instrDesc *emitNewInstrAmdCns (emitAttr attr, ssize_t dsp, int cns);
-
- instrDesc *emitNewInstrCallDir (int argCnt,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
-
- instrDesc *emitNewInstrCallInd( int argCnt,
- ssize_t disp,
- VARSET_VALARG_TP GCvars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
-
- void emitGetInsCns (instrDesc *id, CnsVal *cv);
- ssize_t emitGetInsAmdCns(instrDesc *id, CnsVal *cv);
- void emitGetInsDcmCns(instrDesc *id, CnsVal *cv);
- ssize_t emitGetInsAmdAny(instrDesc *id);
-
- /************************************************************************/
- /* Private helpers for instruction output */
- /************************************************************************/
+void emitSetAmdDisp(instrDescAmd* id, ssize_t dsp);
+instrDesc* emitNewInstrAmd(emitAttr attr, ssize_t dsp);
+instrDesc* emitNewInstrAmdCns(emitAttr attr, ssize_t dsp, int cns);
+
+instrDesc* emitNewInstrCallDir(int argCnt,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
+
+instrDesc* emitNewInstrCallInd(int argCnt,
+ ssize_t disp,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize));
+
+void emitGetInsCns(instrDesc* id, CnsVal* cv);
+ssize_t emitGetInsAmdCns(instrDesc* id, CnsVal* cv);
+void emitGetInsDcmCns(instrDesc* id, CnsVal* cv);
+ssize_t emitGetInsAmdAny(instrDesc* id);
+
+/************************************************************************/
+/* Private helpers for instruction output */
+/************************************************************************/
private:
+insFormat emitInsModeFormat(instruction ins, insFormat base, insFormat FPld, insFormat FPst);
- insFormat emitInsModeFormat(instruction ins, insFormat base,
- insFormat FPld,
- insFormat FPst);
+bool emitVerifyEncodable(instruction ins, emitAttr size, regNumber reg1, regNumber reg2 = REG_NA);
- bool emitVerifyEncodable(instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2 = REG_NA);
+bool emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id);
- bool emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc *id);
+/*****************************************************************************
+*
+* Convert between an index scale in bytes to a smaller encoding used for
+* storage in instruction descriptors.
+*/
- /*****************************************************************************
- *
- * Convert between an index scale in bytes to a smaller encoding used for
- * storage in instruction descriptors.
- */
+inline emitter::opSize emitEncodeScale(size_t scale)
+{
+ assert(scale == 1 || scale == 2 || scale == 4 || scale == 8);
- inline emitter::opSize emitEncodeScale(size_t scale)
- {
- assert(scale == 1 || scale == 2 || scale == 4 || scale == 8);
+ return emitSizeEncode[scale - 1];
+}
- return emitSizeEncode[scale-1];
- }
+inline emitAttr emitDecodeScale(unsigned ensz)
+{
+ assert(ensz < 4);
- inline emitAttr emitDecodeScale(unsigned ensz)
- {
- assert(ensz < 4);
+ return emitter::emitSizeDecode[ensz];
+}
- return emitter::emitSizeDecode[ensz];
- }
+/************************************************************************/
+/* The public entry points to output instructions */
+/************************************************************************/
+
+public:
+void emitLoopAlign();
+void emitIns(instruction ins);
- /************************************************************************/
- /* The public entry points to output instructions */
- /************************************************************************/
+void emitIns(instruction ins, emitAttr attr);
-public:
+void emitInsRMW(instruction inst, emitAttr attr, GenTreeStoreInd* storeInd, GenTreePtr src);
+
+void emitInsRMW(instruction inst, emitAttr attr, GenTreeStoreInd* storeInd);
+
+void emitIns_Nop(unsigned size);
+
+void emitIns_I(instruction ins, emitAttr attr, int val);
+
+void emitIns_R(instruction ins, emitAttr attr, regNumber reg);
+
+void emitIns_C(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, int offs);
+
+void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t val);
+
+void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2);
+
+void emitIns_R_R_I(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int ival);
+
+#ifdef FEATURE_AVX_SUPPORT
+void emitIns_R_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3);
+#endif
+
+void emitIns_S(instruction ins, emitAttr attr, int varx, int offs);
+
+void emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+void emitIns_S_I(instruction ins, emitAttr attr, int varx, int offs, int val);
- void emitLoopAlign ();
-
- void emitIns (instruction ins);
-
- void emitIns (instruction ins, emitAttr attr);
-
- void emitInsRMW (instruction inst, emitAttr attr, GenTreeStoreInd* storeInd, GenTreePtr src);
-
- void emitInsRMW (instruction inst, emitAttr attr, GenTreeStoreInd* storeInd);
-
- void emitIns_Nop (unsigned size);
-
- void emitIns_I (instruction ins,
- emitAttr attr,
- int val);
-
- void emitIns_R (instruction ins,
- emitAttr attr,
- regNumber reg);
-
- void emitIns_C (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- int offs);
-
- void emitIns_R_I (instruction ins,
- emitAttr attr,
- regNumber reg,
- ssize_t val);
-
- void emitIns_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2);
-
- void emitIns_R_R_I (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- int ival);
-
-#ifdef FEATURE_AVX_SUPPORT
- void emitIns_R_R_R (instruction ins,
- emitAttr attr,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3);
-#endif
-
- void emitIns_S (instruction ins,
- emitAttr attr,
- int varx,
- int offs);
-
- void emitIns_S_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_R_S (instruction ins,
- emitAttr attr,
- regNumber ireg,
- int varx,
- int offs);
-
- void emitIns_S_I (instruction ins,
- emitAttr attr,
- int varx,
- int offs,
- int val);
-
- void emitIns_R_C (instruction ins,
- emitAttr attr,
- regNumber reg,
- CORINFO_FIELD_HANDLE fldHnd,
- int offs);
-
- void emitIns_C_R (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fldHnd,
- regNumber reg,
- int offs);
-
- void emitIns_C_I (instruction ins,
- emitAttr attr,
- CORINFO_FIELD_HANDLE fdlHnd,
- int offs,
- int val);
-
- void emitIns_IJ (emitAttr attr,
- regNumber reg,
- unsigned base);
-
- void emitIns_J_S (instruction ins,
- emitAttr attr,
- BasicBlock *dst,
- int varx,
- int offs);
-
- void emitIns_R_L (instruction ins,
- emitAttr attr,
- BasicBlock * dst,
- regNumber reg);
-
- void emitIns_R_D (instruction ins,
- emitAttr attr,
- unsigned offs,
- regNumber reg);
-
- void emitIns_I_AR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_I_AI (instruction ins,
- emitAttr attr,
- int val,
- ssize_t disp);
-
- void emitIns_R_AR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_R_AI (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp);
-
- void emitIns_AR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- int offs,
- int memCookie = 0,
- void * clsCookie = NULL);
-
- void emitIns_AI_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- ssize_t disp);
-
- void emitIns_I_ARR (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_R_ARR (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_ARR_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- int disp);
-
- void emitIns_I_ARX (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp);
-
- void emitIns_R_ARX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp);
-
- void emitIns_ARX_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- regNumber rg2,
- unsigned mul,
- int disp);
-
- void emitIns_I_AX (instruction ins,
- emitAttr attr,
- int val,
- regNumber reg,
- unsigned mul,
- int disp);
-
- void emitIns_R_AX (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- unsigned mul,
- int disp);
-
- void emitIns_AX_R (instruction ins,
- emitAttr attr,
- regNumber ireg,
- regNumber reg,
- unsigned mul,
- int disp);
+void emitIns_R_C(instruction ins, emitAttr attr, regNumber reg, CORINFO_FIELD_HANDLE fldHnd, int offs);
+
+void emitIns_C_R(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fldHnd, regNumber reg, int offs);
+
+void emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE fdlHnd, int offs, int val);
+
+void emitIns_IJ(emitAttr attr, regNumber reg, unsigned base);
+
+void emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int varx, int offs);
+
+void emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg);
+
+void emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumber reg);
+
+void emitIns_I_AR(
+ instruction ins, emitAttr attr, int val, regNumber reg, int offs, int memCookie = 0, void* clsCookie = nullptr);
+
+void emitIns_I_AI(instruction ins, emitAttr attr, int val, ssize_t disp);
+
+void emitIns_R_AR(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie = 0,
+ void* clsCookie = nullptr);
+
+void emitIns_R_AI(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp);
+
+void emitIns_AR_R(instruction ins,
+ emitAttr attr,
+ regNumber ireg,
+ regNumber reg,
+ int offs,
+ int memCookie = 0,
+ void* clsCookie = nullptr);
+
+void emitIns_AI_R(instruction ins, emitAttr attr, regNumber ireg, ssize_t disp);
+
+void emitIns_I_ARR(instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_R_ARR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_ARR_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, int disp);
+
+void emitIns_I_ARX(instruction ins, emitAttr attr, int val, regNumber reg, regNumber rg2, unsigned mul, int disp);
+
+void emitIns_R_ARX(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp);
+
+void emitIns_ARX_R(
+ instruction ins, emitAttr attr, regNumber ireg, regNumber reg, regNumber rg2, unsigned mul, int disp);
+
+void emitIns_I_AX(instruction ins, emitAttr attr, int val, regNumber reg, unsigned mul, int disp);
+
+void emitIns_R_AX(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp);
+
+void emitIns_AX_R(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, unsigned mul, int disp);
#if FEATURE_STACK_FP_X87
- void emitIns_F_F0 (instruction ins,
- unsigned fpreg);
+void emitIns_F_F0(instruction ins, unsigned fpreg);
- void emitIns_F0_F (instruction ins,
- unsigned fpreg);
+void emitIns_F0_F(instruction ins, unsigned fpreg);
#endif // FEATURE_STACK_FP_X87
- enum EmitCallType
- {
- EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
- EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
- EC_FUNC_ADDR, // Direct call to an absolute address
-
- EC_FUNC_VIRTUAL, // Call to a virtual method (using the vtable)
- EC_INDIR_R, // Indirect call via register
- EC_INDIR_SR, // Indirect call via stack-reference (local var)
- EC_INDIR_C, // Indirect call via static class var
- EC_INDIR_ARD, // Indirect call via an addressing mode
-
- EC_COUNT
- };
-
- void emitIns_Call (EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd,
- CORINFO_SIG_INFO* sigInfo, // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- GenTreeIndir * indir,
- bool isJump = false,
- bool isNoGC = false);
-
- void emitIns_Call (EmitCallType callType,
- CORINFO_METHOD_HANDLE methHnd,
- INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
- void* addr,
- ssize_t argSize,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
- VARSET_VALARG_TP ptrVars,
- regMaskTP gcrefRegs,
- regMaskTP byrefRegs,
- IL_OFFSETX ilOffset = BAD_IL_OFFSET,
- regNumber ireg = REG_NA,
- regNumber xreg = REG_NA,
- unsigned xmul = 0,
- ssize_t disp = 0,
- bool isJump = false,
- bool isNoGC = false);
+enum EmitCallType
+{
+ EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method
+ EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method
+ EC_FUNC_ADDR, // Direct call to an absolute address
+
+ EC_FUNC_VIRTUAL, // Call to a virtual method (using the vtable)
+ EC_INDIR_R, // Indirect call via register
+ EC_INDIR_SR, // Indirect call via stack-reference (local var)
+ EC_INDIR_C, // Indirect call via static class var
+ EC_INDIR_ARD, // Indirect call via an addressing mode
+
+ EC_COUNT
+};
+
+void emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ CORINFO_SIG_INFO* sigInfo, // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ GenTreeIndir* indir,
+ bool isJump = false,
+ bool isNoGC = false);
+
+void emitIns_Call(EmitCallType callType,
+ CORINFO_METHOD_HANDLE methHnd,
+ INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE
+ void* addr,
+ ssize_t argSize,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
+ VARSET_VALARG_TP ptrVars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ IL_OFFSETX ilOffset = BAD_IL_OFFSET,
+ regNumber ireg = REG_NA,
+ regNumber xreg = REG_NA,
+ unsigned xmul = 0,
+ ssize_t disp = 0,
+ bool isJump = false,
+ bool isNoGC = false);
#ifdef _TARGET_AMD64_
- // Is the last instruction emitted a call instruction?
- bool emitIsLastInsCall();
+// Is the last instruction emitted a call instruction?
+bool emitIsLastInsCall();
- // Insert a NOP at the end of the the current instruction group if the last emitted instruction was a 'call',
- // because the next instruction group will be an epilog.
- void emitOutputPreEpilogNOP();
+// Insert a NOP at the end of the the current instruction group if the last emitted instruction was a 'call',
+// because the next instruction group will be an epilog.
+void emitOutputPreEpilogNOP();
#endif // _TARGET_AMD64_
/*****************************************************************************
@@ -511,13 +411,13 @@ public:
* Given a jump, return true if it's a conditional jump.
*/
-inline bool emitIsCondJump(instrDesc *jmp)
+inline bool emitIsCondJump(instrDesc* jmp)
{
- instruction ins = jmp->idIns();
+ instruction ins = jmp->idIns();
assert(jmp->idInsFmt() == IF_LABEL);
- return (ins != INS_call && ins != INS_jmp);
+ return (ins != INS_call && ins != INS_jmp);
}
/*****************************************************************************
@@ -525,13 +425,13 @@ inline bool emitIsCondJump(instrDesc *jmp)
* Given a jump, return true if it's an unconditional jump.
*/
-inline bool emitIsUncondJump(instrDesc *jmp)
+inline bool emitIsUncondJump(instrDesc* jmp)
{
- instruction ins = jmp->idIns();
+ instruction ins = jmp->idIns();
assert(jmp->idInsFmt() == IF_LABEL);
- return (ins == INS_jmp);
+ return (ins == INS_jmp);
}
#endif // _TARGET_XARCH_
diff --git a/src/jit/error.cpp b/src/jit/error.cpp
index 3d50f67e81..71c3301045 100644
--- a/src/jit/error.cpp
+++ b/src/jit/error.cpp
@@ -98,19 +98,18 @@ void DECLSPEC_NORETURN noWayAssertBody()
fatal(CORJIT_RECOVERABLEERROR);
}
-
inline static bool ShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
- const char* filename, unsigned line
+ const char* filename, unsigned line
#endif
-)
+ )
{
- return JitTls::GetCompiler() == NULL ||
- JitTls::GetCompiler()->compShouldThrowOnNoway(
+ return JitTls::GetCompiler() == nullptr ||
+ JitTls::GetCompiler()->compShouldThrowOnNoway(
#ifdef FEATURE_TRACELOGGING
- filename, line
+ filename, line
#endif
- );
+ );
}
/*****************************************************************************/
@@ -118,7 +117,7 @@ void noWayAssertBodyConditional(
#ifdef FEATURE_TRACELOGGING
const char* filename, unsigned line
#endif
-)
+ )
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(filename, line))
@@ -133,38 +132,29 @@ void noWayAssertBodyConditional(
#if !defined(_TARGET_X86_) || !defined(LEGACY_BACKEND)
/*****************************************************************************/
-void notYetImplemented(const char * msg, const char * filename, unsigned line)
+void notYetImplemented(const char* msg, const char* filename, unsigned line)
{
#if FUNC_INFO_LOGGING
#ifdef DEBUG
LogEnv* env = JitTls::GetLogEnv();
- if (env != NULL)
+ if (env != nullptr)
{
const Compiler* const pCompiler = env->compiler;
if (pCompiler->verbose)
{
- printf("\n\n%s - NYI (%s:%d - %s)\n", pCompiler->info.compFullName,
- filename,
- line,
- msg);
+ printf("\n\n%s - NYI (%s:%d - %s)\n", pCompiler->info.compFullName, filename, line, msg);
}
}
- if (Compiler::compJitFuncInfoFile != NULL)
+ if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - NYI (%s:%d - %s)\n",
- (env == NULL) ? "UNKNOWN" : env->compiler->info.compFullName,
- filename,
- line,
- msg);
+ (env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
-#else // !DEBUG
- if (Compiler::compJitFuncInfoFile != NULL)
+#else // !DEBUG
+ if (Compiler::compJitFuncInfoFile != nullptr)
{
- fprintf(Compiler::compJitFuncInfoFile, "NYI (%s:%d - %s)\n",
- filename,
- line,
- msg);
+ fprintf(Compiler::compJitFuncInfoFile, "NYI (%s:%d - %s)\n", filename, line, msg);
fflush(Compiler::compJitFuncInfoFile);
}
#endif // !DEBUG
@@ -177,7 +167,7 @@ void notYetImplemented(const char * msg, const char * filename, unsigned line)
// Assume we're within a compFunctionTrace boundary, which might not be true.
pCompiler->compFunctionTraceEnd(nullptr, 0, true);
}
-#endif // DEBUG
+#endif // DEBUG
DWORD value = JitConfig.AltJitAssertOnNYI();
@@ -208,19 +198,21 @@ void notYetImplemented(const char * msg, const char * filename, unsigned line)
/*****************************************************************************/
LONG __JITfilter(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
- DWORD exceptCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
+ DWORD exceptCode = pExceptionPointers->ExceptionRecord->ExceptionCode;
- if (exceptCode == FATAL_JIT_EXCEPTION)
+ if (exceptCode == FATAL_JIT_EXCEPTION)
{
- ErrorTrapParam * pParam = (ErrorTrapParam *)lpvParam;
+ ErrorTrapParam* pParam = (ErrorTrapParam*)lpvParam;
assert(pExceptionPointers->ExceptionRecord->NumberParameters == 1);
pParam->errc = (int)pExceptionPointers->ExceptionRecord->ExceptionInformation[0];
- ICorJitInfo * jitInfo = pParam->jitInfo;
+ ICorJitInfo* jitInfo = pParam->jitInfo;
- if (jitInfo != NULL)
+ if (jitInfo != nullptr)
+ {
jitInfo->reportFatalError((CorJitResult)pParam->errc);
+ }
return EXCEPTION_EXECUTE_HANDLER;
}
@@ -237,62 +229,65 @@ DWORD getBreakOnBadCode()
}
/*****************************************************************************/
-void debugError(const char* msg, const char* file, unsigned line)
+void debugError(const char* msg, const char* file, unsigned line)
{
const char* tail = strrchr(file, '\\');
- if (tail) file = tail+1;
+ if (tail)
+ {
+ file = tail + 1;
+ }
LogEnv* env = JitTls::GetLogEnv();
- logf(LL_ERROR, "COMPILATION FAILED: file: %s:%d compiling method %s reason %s\n", file, line, env->compiler->info.compFullName, msg);
+ logf(LL_ERROR, "COMPILATION FAILED: file: %s:%d compiling method %s reason %s\n", file, line,
+ env->compiler->info.compFullName, msg);
// We now only assert when user explicitly set ComPlus_JitRequired=1
// If ComPlus_JitRequired is 0 or is not set, we will not assert.
if (JitConfig.JitRequired() == 1 || getBreakOnBadCode())
{
- // Don't assert if verification is done.
+ // Don't assert if verification is done.
if (!env->compiler->tiVerificationNeeded || getBreakOnBadCode())
+ {
assertAbort(msg, "NO-FILE", 0);
+ }
}
BreakIfDebuggerPresent();
}
-
/*****************************************************************************/
-LogEnv::LogEnv(ICorJitInfo* aCompHnd)
- : compHnd(aCompHnd)
- , compiler(nullptr)
+LogEnv::LogEnv(ICorJitInfo* aCompHnd) : compHnd(aCompHnd), compiler(nullptr)
{
}
/*****************************************************************************/
-extern "C"
-void __cdecl assertAbort(const char *why, const char *file, unsigned line)
+extern "C" void __cdecl assertAbort(const char* why, const char* file, unsigned line)
{
- const char* msg = why;
- LogEnv* env = JitTls::GetLogEnv();
- const int BUFF_SIZE = 8192;
- char *buff = (char*)alloca(BUFF_SIZE);
- if (env->compiler) {
- _snprintf_s(buff, BUFF_SIZE, _TRUNCATE, "Assertion failed '%s' in '%s' (IL size %d)\n", why, env->compiler->info.compFullName, env->compiler->info.compILCodeSize);
+ const char* msg = why;
+ LogEnv* env = JitTls::GetLogEnv();
+ const int BUFF_SIZE = 8192;
+ char* buff = (char*)alloca(BUFF_SIZE);
+ if (env->compiler)
+ {
+ _snprintf_s(buff, BUFF_SIZE, _TRUNCATE, "Assertion failed '%s' in '%s' (IL size %d)\n", why,
+ env->compiler->info.compFullName, env->compiler->info.compILCodeSize);
msg = buff;
}
- printf(""); // null string means flush
+ printf(""); // null string means flush
#if FUNC_INFO_LOGGING
- if (Compiler::compJitFuncInfoFile != NULL)
+ if (Compiler::compJitFuncInfoFile != nullptr)
{
fprintf(Compiler::compJitFuncInfoFile, "%s - Assertion failed (%s:%d - %s)\n",
- (env == NULL) ? "UNKNOWN" : env->compiler->info.compFullName,
- file,
- line,
- why);
+ (env == nullptr) ? "UNKNOWN" : env->compiler->info.compFullName, file, line, why);
}
#endif // FUNC_INFO_LOGGING
if (env->compHnd->doAssert(file, line, msg))
- DebugBreak();
+ {
+ DebugBreak();
+ }
#ifdef ALT_JIT
// If we hit an assert, and we got here, it's either because the user hit "ignore" on the
@@ -310,7 +305,7 @@ void __cdecl assertAbort(const char *why, const char *file, unsigned line)
#elif defined(_TARGET_ARM64_)
// TODO-ARM64-NYI: remove this after the JIT no longer asserts during startup
//
- // When we are bringing up the new Arm64 JIT we set COMPlus_ContinueOnAssert=1
+ // When we are bringing up the new Arm64 JIT we set COMPlus_ContinueOnAssert=1
// We only want to hit one assert then we will fall back to the interpreter.
//
bool interpreterFallback = (JitConfig.InterpreterFallback() != 0);
@@ -319,14 +314,14 @@ void __cdecl assertAbort(const char *why, const char *file, unsigned line)
{
fatal(CORJIT_SKIPPED);
}
-#endif
+#endif
}
/*********************************************************************/
-BOOL vlogf(unsigned level, const char* fmt, va_list args)
+BOOL vlogf(unsigned level, const char* fmt, va_list args)
{
return JitTls::GetLogEnv()->compHnd->logMsg(level, fmt, args);
-}
+}
int vflogf(FILE* file, const char* fmt, va_list args)
{
@@ -338,14 +333,14 @@ int vflogf(FILE* file, const char* fmt, va_list args)
}
const int BUFF_SIZE = 8192;
- char buffer[BUFF_SIZE];
- int written = _vsnprintf_s(&buffer[0], BUFF_SIZE, _TRUNCATE, fmt, args);
+ char buffer[BUFF_SIZE];
+ int written = _vsnprintf_s(&buffer[0], BUFF_SIZE, _TRUNCATE, fmt, args);
if (JitConfig.JitDumpToDebugger())
{
OutputDebugStringA(buffer);
}
-
+
// We use fputs here so that this executes as fast a possible
fputs(&buffer[0], file);
return written;
@@ -363,24 +358,26 @@ int flogf(FILE* file, const char* fmt, ...)
/*********************************************************************/
int logf(const char* fmt, ...)
{
- va_list args;
+ va_list args;
static bool logToEEfailed = false;
- int written = 0;
+ int written = 0;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
- // If it fails to log an LL_INFO1000 message once
+ // If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
+ {
logToEEfailed = true;
+ }
va_end(args);
}
-
+
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
@@ -423,23 +420,25 @@ int logf(const char* fmt, ...)
/*********************************************************************/
void gcDump_logf(const char* fmt, ...)
{
- va_list args;
+ va_list args;
static bool logToEEfailed = false;
//
// We remember when the EE failed to log, because vlogf()
// is very slow in a checked build.
//
- // If it fails to log an LL_INFO1000 message once
+ // If it fails to log an LL_INFO1000 message once
// it will always fail when logging an LL_INFO1000 message.
//
if (!logToEEfailed)
{
va_start(args, fmt);
if (!vlogf(LL_INFO1000, fmt, args))
+ {
logToEEfailed = true;
+ }
va_end(args);
}
-
+
if (logToEEfailed)
{
// if the EE refuses to log it, we try to send it to stdout
@@ -486,12 +485,11 @@ void logf(unsigned level, const char* fmt, ...)
va_end(args);
}
-void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg,
- __in_z const char* file, unsigned line)
+void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg, __in_z const char* file, unsigned line)
{
const int BUFF_SIZE = 512;
- char buf1[BUFF_SIZE];
- char buf2[BUFF_SIZE];
+ char buf1[BUFF_SIZE];
+ char buf2[BUFF_SIZE];
sprintf_s(buf1, BUFF_SIZE, "%s%s", msg, msg2);
sprintf_s(buf2, BUFF_SIZE, buf1, arg);
@@ -499,7 +497,7 @@ void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg,
badCode();
}
-void noWayAssertAbortHelper(const char * cond, const char * file, unsigned line)
+void noWayAssertAbortHelper(const char* cond, const char* file, unsigned line)
{
// Show the assert UI.
if (JitConfig.JitEnableNoWayAssert())
@@ -508,7 +506,7 @@ void noWayAssertAbortHelper(const char * cond, const char * file, unsigned line)
}
}
-void noWayAssertBodyConditional(const char * cond, const char * file, unsigned line)
+void noWayAssertBodyConditional(const char* cond, const char* file, unsigned line)
{
#ifdef FEATURE_TRACELOGGING
if (ShouldThrowOnNoway(file, line))
@@ -525,7 +523,7 @@ void noWayAssertBodyConditional(const char * cond, const char * file, unsigned l
}
}
-void DECLSPEC_NORETURN noWayAssertBody(const char * cond, const char * file, unsigned line)
+void DECLSPEC_NORETURN noWayAssertBody(const char* cond, const char* file, unsigned line)
{
#if MEASURE_FATAL
fatal_noWayAssertBodyArgs += 1;
diff --git a/src/jit/error.h b/src/jit/error.h
index fa4ba0d636..c56971aaf7 100644
--- a/src/jit/error.h
+++ b/src/jit/error.h
@@ -7,108 +7,150 @@
#define _ERROR_H_
/*****************************************************************************/
-#include <corjit.h> // for CORJIT_INTERNALERROR
-#include <safemath.h> // For FitsIn, used by SafeCvt methods.
+#include <corjit.h> // for CORJIT_INTERNALERROR
+#include <safemath.h> // For FitsIn, used by SafeCvt methods.
-#define FATAL_JIT_EXCEPTION 0x02345678
+#define FATAL_JIT_EXCEPTION 0x02345678
class Compiler;
struct ErrorTrapParam
{
- int errc;
- ICorJitInfo *jitInfo;
+ int errc;
+ ICorJitInfo* jitInfo;
EXCEPTION_POINTERS exceptionPointers;
- ErrorTrapParam() { jitInfo = NULL; }
+ ErrorTrapParam()
+ {
+ jitInfo = nullptr;
+ }
};
// Only catch JIT internal errors (will not catch EE generated Errors)
-extern LONG __JITfilter(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
-
-#define setErrorTrap(compHnd, ParamType, paramDef, paramRef) \
- struct __JITParam : ErrorTrapParam \
- { \
- ParamType param; \
- } __JITparam; \
- __JITparam.errc = CORJIT_INTERNALERROR; \
- __JITparam.jitInfo = compHnd; \
- __JITparam.param = paramRef; \
- PAL_TRY(__JITParam *, __JITpParam, &__JITparam) \
- { \
+extern LONG __JITfilter(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
+
+#define setErrorTrap(compHnd, ParamType, paramDef, paramRef) \
+ struct __JITParam : ErrorTrapParam \
+ { \
+ ParamType param; \
+ } __JITparam; \
+ __JITparam.errc = CORJIT_INTERNALERROR; \
+ __JITparam.jitInfo = compHnd; \
+ __JITparam.param = paramRef; \
+ PAL_TRY(__JITParam*, __JITpParam, &__JITparam) \
+ { \
ParamType paramDef = __JITpParam->param;
// Only catch JIT internal errors (will not catch EE generated Errors)
-#define impJitErrorTrap() \
- } \
- PAL_EXCEPT_FILTER(__JITfilter) \
- { \
- int __errc = __JITparam.errc; (void) __errc;
-
-#define endErrorTrap() \
- } \
+#define impJitErrorTrap() \
+ } \
+ PAL_EXCEPT_FILTER(__JITfilter) \
+ { \
+ int __errc = __JITparam.errc; \
+ (void)__errc;
+
+#define endErrorTrap() \
+ } \
PAL_ENDTRY
-#define finallyErrorTrap() \
- } \
- PAL_FINALLY \
+#define finallyErrorTrap() \
+ } \
+ PAL_FINALLY \
{
-
/*****************************************************************************/
extern void debugError(const char* msg, const char* file, unsigned line);
extern void DECLSPEC_NORETURN badCode();
-extern void DECLSPEC_NORETURN badCode3(const char* msg, const char* msg2, int arg, __in_z const char* file, unsigned line);
+extern void DECLSPEC_NORETURN
+badCode3(const char* msg, const char* msg2, int arg, __in_z const char* file, unsigned line);
extern void DECLSPEC_NORETURN noWay();
extern void DECLSPEC_NORETURN NOMEM();
extern void DECLSPEC_NORETURN fatal(int errCode);
extern void DECLSPEC_NORETURN noWayAssertBody();
-extern void DECLSPEC_NORETURN noWayAssertBody(const char * cond, const char * file, unsigned line);
+extern void DECLSPEC_NORETURN noWayAssertBody(const char* cond, const char* file, unsigned line);
// Conditionally invoke the noway assert body. The conditional predicate is evaluated using a method on the tlsCompiler.
// If a noway_assert is hit, we ask the Compiler whether to raise an exception (i.e., conditionally raise exception.)
// To have backward compatibility between v4.5 and v4.0, in min-opts we take a shot at codegen rather than rethrow.
extern void noWayAssertBodyConditional(
#ifdef FEATURE_TRACELOGGING
- const char * file, unsigned line
+ const char* file, unsigned line
#endif
-);
-extern void noWayAssertBodyConditional(const char * cond, const char * file, unsigned line);
+ );
+extern void noWayAssertBodyConditional(const char* cond, const char* file, unsigned line);
#if !defined(_TARGET_X86_) || !defined(LEGACY_BACKEND)
// This guy can return based on Config flag/Debugger
-extern void notYetImplemented(const char * msg, const char * file, unsigned line);
-#define NYI(msg) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
-#define NYI_IF(cond, msg) if (cond) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
+extern void notYetImplemented(const char* msg, const char* file, unsigned line);
+#define NYI(msg) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
+#define NYI_IF(cond, msg) \
+ if (cond) \
+ notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
#ifdef _TARGET_AMD64_
-#define NYI_AMD64(msg) notYetImplemented("NYI_AMD64: " # msg, __FILE__, __LINE__)
-#define NYI_X86(msg) do { } while (0)
-#define NYI_ARM(msg) do { } while (0)
-#define NYI_ARM64(msg) do { } while (0)
+#define NYI_AMD64(msg) notYetImplemented("NYI_AMD64: " #msg, __FILE__, __LINE__)
+#define NYI_X86(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM64(msg) \
+ do \
+ { \
+ } while (0)
#elif defined(_TARGET_X86_)
-#define NYI_AMD64(msg) do { } while (0)
-#define NYI_X86(msg) notYetImplemented("NYI_X86: " # msg, __FILE__, __LINE__)
-#define NYI_ARM(msg) do { } while (0)
-#define NYI_ARM64(msg) do { } while (0)
+#define NYI_AMD64(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_X86(msg) notYetImplemented("NYI_X86: " #msg, __FILE__, __LINE__)
+#define NYI_ARM(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM64(msg) \
+ do \
+ { \
+ } while (0)
#elif defined(_TARGET_ARM_)
-#define NYI_AMD64(msg) do { } while (0)
-#define NYI_X86(msg) do { } while (0)
-#define NYI_ARM(msg) notYetImplemented("NYI_ARM: " # msg, __FILE__, __LINE__)
-#define NYI_ARM64(msg) do { } while (0)
+#define NYI_AMD64(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_X86(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM(msg) notYetImplemented("NYI_ARM: " #msg, __FILE__, __LINE__)
+#define NYI_ARM64(msg) \
+ do \
+ { \
+ } while (0)
#elif defined(_TARGET_ARM64_)
-#define NYI_AMD64(msg) do { } while (0)
-#define NYI_X86(msg) do { } while (0)
-#define NYI_ARM(msg) do { } while (0)
-#define NYI_ARM64(msg) notYetImplemented("NYI_ARM64: " # msg, __FILE__, __LINE__)
+#define NYI_AMD64(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_X86(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM64(msg) notYetImplemented("NYI_ARM64: " #msg, __FILE__, __LINE__)
#else
@@ -118,37 +160,57 @@ extern void notYetImplemented(const char * msg, const char * file, unsigned line
#else // defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
-
-#define NYI(msg) assert(!msg)
-#define NYI_AMD64(msg) do { } while (0)
-#define NYI_ARM(msg) do { } while (0)
-#define NYI_ARM64(msg) do { } while (0)
+#define NYI(msg) assert(!msg)
+#define NYI_AMD64(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_ARM64(msg) \
+ do \
+ { \
+ } while (0)
#endif // _TARGET_X86_
#if !defined(_TARGET_X86_) && !defined(FEATURE_STACK_FP_X87)
-#define NYI_FLAT_FP_X87(msg) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
+#define NYI_FLAT_FP_X87(msg) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
#define NYI_FLAT_FP_X87_NC(msg) notYetImplemented("NYI: " #msg, __FILE__, __LINE__)
#else
-#define NYI_FLAT_FP_X87(msg) do { } while (0)
-#define NYI_FLAT_FP_X87_NC(msg) do { } while (0)
+#define NYI_FLAT_FP_X87(msg) \
+ do \
+ { \
+ } while (0)
+#define NYI_FLAT_FP_X87_NC(msg) \
+ do \
+ { \
+ } while (0)
#endif // !_TARGET_X86_ && !FEATURE_STACK_FP_X87
-
#ifdef DEBUG
#define NO_WAY(msg) (debugError(msg, __FILE__, __LINE__), noWay())
// Used for fallback stress mode
#define NO_WAY_NOASSERT(msg) noWay()
#define BADCODE(msg) (debugError(msg, __FILE__, __LINE__), badCode())
#define BADCODE3(msg, msg2, arg) badCode3(msg, msg2, arg, __FILE__, __LINE__)
-//Used for an assert that we want to convert into BADCODE to force minopts, or in minopts to force codegen.
-#define noway_assert(cond) do { if (!(cond)) { noWayAssertBodyConditional( # cond , __FILE__, __LINE__); } } while (0)
+// Used for an assert that we want to convert into BADCODE to force minopts, or in minopts to force codegen.
+#define noway_assert(cond) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ noWayAssertBodyConditional(#cond, __FILE__, __LINE__); \
+ } \
+ } while (0)
#define unreached() noWayAssertBody("unreached", __FILE__, __LINE__)
-#else
+#else
#define NO_WAY(msg) noWay()
#define BADCODE(msg) badCode()
@@ -160,30 +222,47 @@ extern void notYetImplemented(const char * msg, const char * file, unsigned line
#define NOWAY_ASSERT_BODY_ARGUMENTS
#endif
-#define noway_assert(cond) do { if (!(cond)) { noWayAssertBodyConditional(NOWAY_ASSERT_BODY_ARGUMENTS); } } while (0)
+#define noway_assert(cond) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ noWayAssertBodyConditional(NOWAY_ASSERT_BODY_ARGUMENTS); \
+ } \
+ } while (0)
#define unreached() noWayAssertBody()
#endif
- // IMPL_LIMITATION is called when we encounter valid IL that is not
- // supported by our current implementation because of various
- // limitations (that could be removed in the future)
+// IMPL_LIMITATION is called when we encounter valid IL that is not
+// supported by our current implementation because of various
+// limitations (that could be removed in the future)
#define IMPL_LIMITATION(msg) NO_WAY(msg)
-
#if defined(_HOST_X86_)
// While debugging in an Debugger, the "int 3" will cause the program to break
// Outside, the exception handler will just filter out the "int 3".
-#define BreakIfDebuggerPresent() \
- do { __try { __asm {int 3} } __except(EXCEPTION_EXECUTE_HANDLER) {} } \
- while (0)
+#define BreakIfDebuggerPresent() \
+ do \
+ { \
+ __try \
+ { \
+ __asm {int 3} \
+ } \
+ __except (EXCEPTION_EXECUTE_HANDLER) \
+ { \
+ } \
+ } while (0)
#else
-#define BreakIfDebuggerPresent() \
- do { if (IsDebuggerPresent()) DebugBreak(); } \
- while (0)
+#define BreakIfDebuggerPresent() \
+ do \
+ { \
+ if (IsDebuggerPresent()) \
+ DebugBreak(); \
+ } while (0)
#endif
#ifdef DEBUG
@@ -213,5 +292,4 @@ inline Dst SafeCvtNowayAssert(Src val)
return static_cast<Dst>(val);
}
-
#endif
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index 0e29f18470..4659f47dc7 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -16,28 +16,27 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma hdrstop
#endif
-#include "allocacheck.h" // for alloca
+#include "allocacheck.h" // for alloca
/*****************************************************************************/
-
-void Compiler::fgInit()
+void Compiler::fgInit()
{
impInit();
/* Initialization for fgWalkTreePre() and fgWalkTreePost() */
- fgFirstBBScratch = nullptr;
+ fgFirstBBScratch = nullptr;
#ifdef DEBUG
fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods() == 1;
#endif // DEBUG
/* We haven't yet computed the bbPreds lists */
- fgComputePredsDone = false;
+ fgComputePredsDone = false;
/* We haven't yet computed the bbCheapPreds lists */
- fgCheapPredsValid = false;
+ fgCheapPredsValid = false;
/* We haven't yet computed the edge weight */
fgEdgeWeightsComputed = false;
@@ -48,7 +47,7 @@ void Compiler::fgInit()
fgCalledWeight = BB_ZERO_WEIGHT;
/* We haven't yet computed the dominator sets */
- fgDomsComputed = false;
+ fgDomsComputed = false;
#ifdef DEBUG
fgReachabilitySetsValid = false;
@@ -62,16 +61,16 @@ void Compiler::fgInit()
/* Initialize the basic block list */
- fgFirstBB = NULL;
- fgLastBB = NULL;
- fgFirstColdBlock = NULL;
+ fgFirstBB = nullptr;
+ fgLastBB = nullptr;
+ fgFirstColdBlock = nullptr;
#if FEATURE_EH_FUNCLETS
- fgFirstFuncletBB = NULL;
+ fgFirstFuncletBB = nullptr;
fgFuncletsCreated = false;
#endif // FEATURE_EH_FUNCLETS
- fgBBcount = 0;
+ fgBBcount = 0;
#ifdef DEBUG
fgBBcountAtCodegen = 0;
@@ -88,12 +87,12 @@ void Compiler::fgInit()
fgCurBBEpochSize = 0;
fgBBSetCountInSizeTUnits = 0;
- genReturnBB = NULL;
+ genReturnBB = nullptr;
/* We haven't reached the global morphing phase */
- fgGlobalMorph = false;
- fgExpandInline = false;
- fgModified = false;
+ fgGlobalMorph = false;
+ fgExpandInline = false;
+ fgModified = false;
#ifdef DEBUG
fgSafeBasicBlockCreation = true;
@@ -108,35 +107,35 @@ void Compiler::fgInit()
// Initialize the logic for adding code. This is used to insert code such
// as the code that raises an exception when an array range check fails.
- fgAddCodeList = 0;
- fgAddCodeModf = false;
+ fgAddCodeList = nullptr;
+ fgAddCodeModf = false;
for (int i = 0; i < SCK_COUNT; i++)
{
- fgExcptnTargetCache[i] = NULL;
+ fgExcptnTargetCache[i] = nullptr;
}
/* Keep track of the max count of pointer arguments */
- fgPtrArgCntCur = 0;
- fgPtrArgCntMax = 0;
+ fgPtrArgCntCur = 0;
+ fgPtrArgCntMax = 0;
/* This global flag is set whenever we remove a statement */
- fgStmtRemoved = false;
+ fgStmtRemoved = false;
/* This global flag is set whenever we add a throw block for a RngChk */
fgRngChkThrowAdded = false; /* reset flag for fgIsCodeAdded() */
- fgIncrCount = 0;
+ fgIncrCount = 0;
/* We will record a list of all BBJ_RETURN blocks here */
- fgReturnBlocks = NULL;
+ fgReturnBlocks = nullptr;
/* This is set by fgComputeReachability */
- fgEnterBlks = BlockSetOps::UninitVal();
+ fgEnterBlks = BlockSetOps::UninitVal();
#ifdef DEBUG
- fgEnterBlksSetValid = false;
+ fgEnterBlksSetValid = false;
#endif // DEBUG
#if !FEATURE_EH_FUNCLETS
@@ -152,7 +151,7 @@ void Compiler::fgInit()
fgNoStructPromotion = false;
fgNoStructParamPromotion = false;
- optValnumCSE_phase = false; // referenced in fgMorphSmpOp()
+ optValnumCSE_phase = false; // referenced in fgMorphSmpOp()
#ifdef DEBUG
fgNormalizeEHDone = false;
@@ -161,37 +160,39 @@ void Compiler::fgInit()
#ifdef DEBUG
if (!compIsForInlining())
{
- if ((JitConfig.JitNoStructPromotion() & 1) == 1)
- {
- fgNoStructPromotion = true;
- }
- if ((JitConfig.JitNoStructPromotion() & 2) == 2)
- {
- fgNoStructParamPromotion = true;
- }
+ if ((JitConfig.JitNoStructPromotion() & 1) == 1)
+ {
+ fgNoStructPromotion = true;
+ }
+ if ((JitConfig.JitNoStructPromotion() & 2) == 2)
+ {
+ fgNoStructParamPromotion = true;
+ }
}
#endif // DEBUG
if (!compIsForInlining())
{
- m_promotedStructDeathVars = NULL;
+ m_promotedStructDeathVars = nullptr;
}
#ifdef FEATURE_SIMD
fgPreviousCandidateSIMDFieldAsgStmt = nullptr;
#endif
}
-bool Compiler::fgHaveProfileData()
+bool Compiler::fgHaveProfileData()
{
if (compIsForInlining() || compIsForImportOnly())
+ {
return false;
+ }
- return (fgProfileBuffer != NULL);
+ return (fgProfileBuffer != nullptr);
}
-bool Compiler::fgGetProfileWeightForBasicBlock(IL_OFFSET offset, unsigned* weightWB)
+bool Compiler::fgGetProfileWeightForBasicBlock(IL_OFFSET offset, unsigned* weightWB)
{
- noway_assert(weightWB != NULL);
+ noway_assert(weightWB != nullptr);
unsigned weight = 0;
#ifdef DEBUG
@@ -250,13 +251,13 @@ bool Compiler::fgGetProfileWeightForBasicBlock(IL_OFFSET offset,
return true;
}
-void Compiler::fgInstrumentMethod()
+void Compiler::fgInstrumentMethod()
{
noway_assert(!compIsForInlining());
// Count the number of basic blocks in the method
- int countOfBlocks = 0;
+ int countOfBlocks = 0;
BasicBlock* block;
for (block = fgFirstBB; block; block = block->bbNext)
{
@@ -273,7 +274,7 @@ void Compiler::fgInstrumentMethod()
HRESULT res = info.compCompHnd->allocBBProfileBuffer(countOfBlocks, &bbProfileBuffer);
- ICorJitInfo::ProfileBuffer *bbProfileBufferStart = bbProfileBuffer;
+ ICorJitInfo::ProfileBuffer* bbProfileBufferStart = bbProfileBuffer;
GenTreePtr stmt;
@@ -282,10 +283,10 @@ void Compiler::fgInstrumentMethod()
// The E_NOTIMPL status is returned when we are profiling a generic method from a different assembly
if (res == E_NOTIMPL)
{
- // In such cases we still want to add the method entry callback node
+ // In such cases we still want to add the method entry callback node
GenTreeArgList* args = gtNewArgList(gtNewIconEmbMethHndNode(info.compMethodHnd));
- GenTreePtr call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, 0, args);
+ GenTreePtr call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, 0, args);
stmt = gtNewStmt(call);
}
@@ -308,17 +309,17 @@ void Compiler::fgInstrumentMethod()
bbProfileBuffer->ILOffset = block->bbCodeOffs;
- GenTreePtr addr;
- GenTreePtr value;
+ GenTreePtr addr;
+ GenTreePtr value;
- value = gtNewOperNode(GT_IND, TYP_INT,
- gtNewIconEmbHndNode((void*) &bbProfileBuffer->ExecutionCount, NULL, GTF_ICON_BBC_PTR));
- value = gtNewOperNode(GT_ADD, TYP_INT, value, gtNewIconNode(1));
+ value = gtNewOperNode(GT_IND, TYP_INT, gtNewIconEmbHndNode((void*)&bbProfileBuffer->ExecutionCount, nullptr,
+ GTF_ICON_BBC_PTR));
+ value = gtNewOperNode(GT_ADD, TYP_INT, value, gtNewIconNode(1));
- addr = gtNewOperNode(GT_IND, TYP_INT,
- gtNewIconEmbHndNode((void*) &bbProfileBuffer->ExecutionCount, NULL, GTF_ICON_BBC_PTR));
+ addr = gtNewOperNode(GT_IND, TYP_INT, gtNewIconEmbHndNode((void*)&bbProfileBuffer->ExecutionCount, nullptr,
+ GTF_ICON_BBC_PTR));
- addr = gtNewAssignNode(addr, value);
+ addr = gtNewAssignNode(addr, value);
fgInsertStmtAtBeg(block, addr);
@@ -329,16 +330,17 @@ void Compiler::fgInstrumentMethod()
// Add the method entry callback node
- GenTreeArgList* args = gtNewArgList( gtNewIconEmbMethHndNode(info.compMethodHnd));
- GenTreePtr call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, 0, args);
+ GenTreeArgList* args = gtNewArgList(gtNewIconEmbMethHndNode(info.compMethodHnd));
+ GenTreePtr call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, 0, args);
- GenTreePtr handle = gtNewIconEmbHndNode((void*) &bbProfileBufferStart->ExecutionCount, NULL, GTF_ICON_BBC_PTR);
- GenTreePtr value = gtNewOperNode(GT_IND, TYP_INT, handle);
- GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, value, gtNewIconNode(0, TYP_INT));
- relop->gtFlags |= GTF_RELOP_QMARK;
- GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), call);
- GenTreePtr cond = gtNewQmarkNode(TYP_VOID, relop, colon);
- stmt = gtNewStmt(cond);
+ GenTreePtr handle =
+ gtNewIconEmbHndNode((void*)&bbProfileBufferStart->ExecutionCount, nullptr, GTF_ICON_BBC_PTR);
+ GenTreePtr value = gtNewOperNode(GT_IND, TYP_INT, handle);
+ GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, value, gtNewIconNode(0, TYP_INT));
+ relop->gtFlags |= GTF_RELOP_QMARK;
+ GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), call);
+ GenTreePtr cond = gtNewQmarkNode(TYP_VOID, relop, colon);
+ stmt = gtNewStmt(cond);
}
fgEnsureFirstBBisScratch();
@@ -351,7 +353,7 @@ void Compiler::fgInstrumentMethod()
* Create a basic block and append it to the current BB list.
*/
-BasicBlock * Compiler::fgNewBasicBlock(BBjumpKinds jumpKind)
+BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind)
{
// This method must not be called after the exception table has been
// constructed, because it doesn't not provide support for patching
@@ -368,20 +370,19 @@ BasicBlock * Compiler::fgNewBasicBlock(BBjumpKinds jumpKind)
/* Append the block to the end of the global basic block list */
- if (fgFirstBB)
+ if (fgFirstBB)
{
fgLastBB->setNext(block);
}
else
{
- fgFirstBB = block;
+ fgFirstBB = block;
block->bbPrev = nullptr;
}
fgLastBB = block;
return block;
-
}
/*****************************************************************************
@@ -394,18 +395,20 @@ BasicBlock * Compiler::fgNewBasicBlock(BBjumpKinds jumpKind)
* added to fgEnsureFirstBBisScratch in a way as to change semantics.
*/
-void Compiler::fgEnsureFirstBBisScratch()
+void Compiler::fgEnsureFirstBBisScratch()
{
// Have we already allocated a scratch block?
if (fgFirstBBisScratch())
+ {
return;
+ }
assert(fgFirstBBScratch == nullptr);
BasicBlock* block = bbNewBasicBlock(BBJ_NONE);
- if (fgFirstBB != NULL)
+ if (fgFirstBB != nullptr)
{
// If we have profile data the new block will inherit fgFirstBlock's weight
if (fgFirstBB->bbFlags & BBF_PROF_WEIGHT)
@@ -416,12 +419,12 @@ void Compiler::fgEnsureFirstBBisScratch()
}
else
{
- noway_assert(fgLastBB == NULL);
+ noway_assert(fgLastBB == nullptr);
fgFirstBB = block;
- fgLastBB = block;
+ fgLastBB = block;
}
-
- noway_assert(fgLastBB != NULL);
+
+ noway_assert(fgLastBB != nullptr);
block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED);
@@ -435,7 +438,7 @@ void Compiler::fgEnsureFirstBBisScratch()
#endif
}
-bool Compiler::fgFirstBBisScratch()
+bool Compiler::fgFirstBBisScratch()
{
if (fgFirstBBScratch != nullptr)
{
@@ -446,8 +449,7 @@ bool Compiler::fgFirstBBisScratch()
// Normally, the first scratch block is a fall-through block. However, if the block after it was an empty
// BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block
// a BBJ_ALWAYS block.
- assert((fgFirstBBScratch->bbJumpKind == BBJ_NONE) ||
- (fgFirstBBScratch->bbJumpKind == BBJ_ALWAYS));
+ assert((fgFirstBBScratch->bbJumpKind == BBJ_NONE) || (fgFirstBBScratch->bbJumpKind == BBJ_ALWAYS));
return true;
}
@@ -457,7 +459,7 @@ bool Compiler::fgFirstBBisScratch()
}
}
-bool Compiler::fgBBisScratch(BasicBlock* block)
+bool Compiler::fgBBisScratch(BasicBlock* block)
{
return fgFirstBBisScratch() && (block == fgFirstBB);
}
@@ -473,21 +475,24 @@ bool Compiler::fgBlockContainsStatementBounded(BasicBlock* block, GenTree* stmt,
assert(stmt->gtOper == GT_STMT);
- __int64 *numTraversed = &JitTls::GetCompiler()->compNumStatementLinksTraversed;
+ __int64* numTraversed = &JitTls::GetCompiler()->compNumStatementLinksTraversed;
if (*numTraversed > maxLinks)
+ {
return answerOnBoundExceeded;
+ }
GenTree* curr = block->firstStmt();
do
{
(*numTraversed)++;
if (curr == stmt)
+ {
break;
+ }
curr = curr->gtNext;
- }
- while (curr);
- return curr != NULL;
+ } while (curr);
+ return curr != nullptr;
}
#endif // DEBUG
@@ -507,8 +512,7 @@ bool Compiler::fgBlockContainsStatementBounded(BasicBlock* block, GenTree* stmt,
// In other cases, if there are any phi assignments and/or an assignment of
// the GT_CATCH_ARG, we insert after those.
-GenTreePtr Compiler::fgInsertStmtAtBeg(BasicBlock* block,
- GenTreePtr stmt)
+GenTreePtr Compiler::fgInsertStmtAtBeg(BasicBlock* block, GenTreePtr stmt)
{
if (stmt->gtOper != GT_STMT)
{
@@ -538,7 +542,7 @@ GenTreePtr Compiler::fgInsertStmtAtBeg(BasicBlock* block,
/* Are there any statements in the block? */
- if (list)
+ if (list)
{
GenTreePtr last;
@@ -549,14 +553,14 @@ GenTreePtr Compiler::fgInsertStmtAtBeg(BasicBlock* block,
/* Insert the statement in front of the first one */
- list->gtPrev = stmt;
- stmt->gtPrev = last;
+ list->gtPrev = stmt;
+ stmt->gtPrev = last;
}
else
{
/* The block was completely empty */
- stmt->gtPrev = stmt;
+ stmt->gtPrev = stmt;
}
return stmt;
@@ -569,12 +573,11 @@ GenTreePtr Compiler::fgInsertStmtAtBeg(BasicBlock* block,
* If the block can be a conditional block, use fgInsertStmtNearEnd.
*/
-GenTreeStmt* Compiler::fgInsertStmtAtEnd(BasicBlock* block,
- GenTreePtr node)
+GenTreeStmt* Compiler::fgInsertStmtAtEnd(BasicBlock* block, GenTreePtr node)
{
- GenTreePtr list = block->firstStmt();
- GenTreeStmt* stmt;
-
+ GenTreePtr list = block->firstStmt();
+ GenTreeStmt* stmt;
+
if (node->gtOper != GT_STMT)
{
stmt = gtNewStmt(node);
@@ -586,7 +589,7 @@ GenTreeStmt* Compiler::fgInsertStmtAtEnd(BasicBlock* block,
assert(stmt->gtNext == nullptr); // We don't set it, and it needs to be this after the insert
- if (list)
+ if (list)
{
GenTreePtr last;
@@ -619,16 +622,14 @@ GenTreeStmt* Compiler::fgInsertStmtAtEnd(BasicBlock* block,
* Returns the (potentially) new GT_STMT node.
*/
-GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
+GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
{
GenTreeStmt* stmt;
-
+
// This routine is not aware of embedded stmts and can only be used when in tree order.
assert(fgOrder == FGOrderTree);
- if ((block->bbJumpKind == BBJ_COND) ||
- (block->bbJumpKind == BBJ_SWITCH) ||
- (block->bbJumpKind == BBJ_RETURN))
+ if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH) || (block->bbJumpKind == BBJ_RETURN))
{
if (node->gtOper != GT_STMT)
{
@@ -639,9 +640,11 @@ GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
stmt = node->AsStmt();
}
- GenTreeStmt* first = block->firstStmt(); noway_assert(first);
- GenTreeStmt* last = block->lastStmt(); noway_assert(last && last->gtNext == NULL);
- GenTreePtr after = last->gtPrev;
+ GenTreeStmt* first = block->firstStmt();
+ noway_assert(first);
+ GenTreeStmt* last = block->lastStmt();
+ noway_assert(last && last->gtNext == nullptr);
+ GenTreePtr after = last->gtPrev;
#if DEBUG
if (block->bbJumpKind == BBJ_COND)
@@ -650,15 +653,13 @@ GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
}
else if (block->bbJumpKind == BBJ_RETURN)
{
- noway_assert((last->gtStmtExpr->gtOper == GT_RETURN) ||
- (last->gtStmtExpr->gtOper == GT_JMP) ||
+ noway_assert((last->gtStmtExpr->gtOper == GT_RETURN) || (last->gtStmtExpr->gtOper == GT_JMP) ||
// BBJ_RETURN blocks in functions returning void do not get a GT_RETURN node if they
// have a .tail prefix (even if canTailCall returns false for these calls)
// code:Compiler::impImportBlockCode (search for the RET: label)
// Ditto for real tail calls (all code after them has been removed)
((last->gtStmtExpr->gtOper == GT_CALL) &&
- ((info.compRetType == TYP_VOID) || last->gtStmtExpr->AsCall()->IsTailCall()))
- );
+ ((info.compRetType == TYP_VOID) || last->gtStmtExpr->AsCall()->IsTailCall())));
}
else
{
@@ -697,7 +698,6 @@ GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
}
}
-
/*****************************************************************************
*
* Insert the given statement "stmt" after GT_STMT node "insertionPoint".
@@ -705,9 +705,7 @@ GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTreePtr node)
* Note that the gtPrev list of statement nodes is circular, but the gtNext list is not.
*/
-GenTreePtr Compiler::fgInsertStmtAfter(BasicBlock* block,
- GenTreePtr insertionPoint,
- GenTreePtr stmt)
+GenTreePtr Compiler::fgInsertStmtAfter(BasicBlock* block, GenTreePtr insertionPoint, GenTreePtr stmt)
{
assert(block->bbTreeList != nullptr);
noway_assert(insertionPoint->gtOper == GT_STMT);
@@ -743,9 +741,7 @@ GenTreePtr Compiler::fgInsertStmtAfter(BasicBlock* block,
// Insert the given tree or statement before GT_STMT node "insertionPoint".
// Returns the newly inserted GT_STMT node.
-GenTreePtr Compiler::fgInsertStmtBefore(BasicBlock* block,
- GenTreePtr insertionPoint,
- GenTreePtr stmt)
+GenTreePtr Compiler::fgInsertStmtBefore(BasicBlock* block, GenTreePtr insertionPoint, GenTreePtr stmt)
{
assert(block->bbTreeList != nullptr);
noway_assert(insertionPoint->gtOper == GT_STMT);
@@ -770,8 +766,8 @@ GenTreePtr Compiler::fgInsertStmtBefore(BasicBlock* block,
stmt->gtNext = insertionPoint;
stmt->gtPrev = insertionPoint->gtPrev;
- insertionPoint->gtPrev->gtNext = stmt;
- insertionPoint->gtPrev = stmt;
+ insertionPoint->gtPrev->gtNext = stmt;
+ insertionPoint->gtPrev = stmt;
}
return stmt;
@@ -783,24 +779,25 @@ GenTreePtr Compiler::fgInsertStmtBefore(BasicBlock* block,
* Return the last statement stmtList.
*/
-GenTreePtr Compiler::fgInsertStmtListAfter(BasicBlock * block, // the block where stmtAfter is in.
- GenTreePtr stmtAfter, // the statement where stmtList should be inserted after.
- GenTreePtr stmtList)
+GenTreePtr Compiler::fgInsertStmtListAfter(BasicBlock* block, // the block where stmtAfter is in.
+ GenTreePtr stmtAfter, // the statement where stmtList should be inserted
+ // after.
+ GenTreePtr stmtList)
{
// Currently we can handle when stmtAfter and stmtList are non-NULL. This makes everything easy.
noway_assert(stmtAfter && stmtAfter->gtOper == GT_STMT);
- noway_assert(stmtList && stmtList->gtOper == GT_STMT);
+ noway_assert(stmtList && stmtList->gtOper == GT_STMT);
GenTreePtr stmtLast = stmtList->gtPrev; // Last statement in a non-empty list, circular in the gtPrev list.
noway_assert(stmtLast);
- noway_assert(stmtLast->gtNext == NULL);
+ noway_assert(stmtLast->gtNext == nullptr);
GenTreePtr stmtNext = stmtAfter->gtNext;
if (!stmtNext)
{
- stmtAfter->gtNext = stmtList;
- stmtList->gtPrev = stmtAfter;
+ stmtAfter->gtNext = stmtList;
+ stmtList->gtPrev = stmtAfter;
block->bbTreeList->gtPrev = stmtLast;
goto _Done;
}
@@ -808,13 +805,12 @@ GenTreePtr Compiler::fgInsertStmtListAfter(BasicBlock * block, // t
stmtAfter->gtNext = stmtList;
stmtList->gtPrev = stmtAfter;
- stmtLast->gtNext = stmtNext;
- stmtNext->gtPrev = stmtLast;
+ stmtLast->gtNext = stmtNext;
+ stmtNext->gtPrev = stmtLast;
_Done:
- noway_assert(block->bbTreeList == NULL ||
- block->bbTreeList->gtPrev->gtNext == NULL);
+ noway_assert(block->bbTreeList == nullptr || block->bbTreeList->gtPrev->gtNext == nullptr);
return stmtLast;
}
@@ -822,10 +818,12 @@ _Done:
/*
Removes a block from the return block list
*/
-void Compiler::fgRemoveReturnBlock(BasicBlock* block)
+void Compiler::fgRemoveReturnBlock(BasicBlock* block)
{
if (fgReturnBlocks == nullptr)
+ {
return;
+ }
if (fgReturnBlocks->block == block)
{
@@ -878,7 +876,7 @@ void Compiler::fgRemoveReturnBlock(BasicBlock* block)
// TODO-Cleanup: This should probably simply replace the tree so that the information
// (such as IL offsets) is preserved, but currently it creates a new statement.
-void Compiler::fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt, GenTreePtr newTree)
+void Compiler::fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt, GenTreePtr newTree)
{
// fgNewStmtFromTree will sequence the nodes in newTree. Thus, if we are in FGOrderLinear,
// we will need to fixup any embedded statements after this call.
@@ -904,31 +902,30 @@ void Compiler::fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt
// BEFORE something (not after).
// TODO-Cleanup: Consider finding an alternate approach to this - it seems risky
- for (GenTreeStmt* embeddedStmt = newStmt->gtNextStmt;
- embeddedStmt != nullptr && embeddedStmt->gtStmtIsEmbedded();
- embeddedStmt = embeddedStmt->gtNextStmt)
+ for (GenTreeStmt* embeddedStmt = newStmt->gtNextStmt;
+ embeddedStmt != nullptr && embeddedStmt->gtStmtIsEmbedded(); embeddedStmt = embeddedStmt->gtNextStmt)
{
GenTreePtr firstEmbeddedNode = embeddedStmt->gtStmtList;
GenTreePtr lastEmbeddedNode = embeddedStmt->gtStmtExpr;
- GenTreePtr nextNode = lastEmbeddedNode->gtNext;
- GenTreePtr prevNode = nextNode->gtPrev;
+ GenTreePtr nextNode = lastEmbeddedNode->gtNext;
+ GenTreePtr prevNode = nextNode->gtPrev;
assert(nextNode != nullptr);
if (prevNode == nullptr)
{
// We've reordered the nodes such that the embedded statement is now first.
// Extract it.
firstEmbeddedNode->gtPrev = nullptr;
- lastEmbeddedNode->gtNext = nullptr;
+ lastEmbeddedNode->gtNext = nullptr;
fgRemoveStmt(block, embeddedStmt);
fgInsertStmtBefore(block, stmt, embeddedStmt);
embeddedStmt->gtFlags |= GTF_STMT_TOP_LEVEL;
}
else
{
- prevNode->gtNext = firstEmbeddedNode;
+ prevNode->gtNext = firstEmbeddedNode;
firstEmbeddedNode->gtPrev = prevNode;
- nextNode->gtPrev = lastEmbeddedNode;
- lastEmbeddedNode->gtNext = nextNode;
+ nextNode->gtPrev = lastEmbeddedNode;
+ lastEmbeddedNode->gtNext = nextNode;
}
}
}
@@ -973,7 +970,6 @@ void Compiler::fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt
}
}
-
//------------------------------------------------------------------------
// fgGetPredForBlock: Find and return the predecessor edge corresponding to a given predecessor block.
//
@@ -988,8 +984,7 @@ void Compiler::fgReplaceStmt(BasicBlock* block, GenTreeStmt* stmt
// Assumptions:
// -- This only works on the full predecessor lists, not the cheap preds lists.
-flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
- BasicBlock* blockPred)
+flowList* Compiler::fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred)
{
noway_assert(block);
noway_assert(blockPred);
@@ -1008,7 +1003,6 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
return nullptr;
}
-
//------------------------------------------------------------------------
// fgGetPredForBlock: Find and return the predecessor edge corresponding to a given predecessor block.
// Also returns the address of the pointer that points to this edge, to make it possible to remove this edge from the
@@ -1026,9 +1020,7 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
// Assumptions:
// -- This only works on the full predecessor lists, not the cheap preds lists.
-flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
- BasicBlock* blockPred,
- flowList*** ptrToPred)
+flowList* Compiler::fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred)
{
assert(block);
assert(blockPred);
@@ -1036,9 +1028,10 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
assert(!fgCheapPredsValid);
flowList** predPrevAddr;
- flowList* pred;
+ flowList* pred;
- for (predPrevAddr = &block->bbPreds, pred = *predPrevAddr; pred != nullptr; predPrevAddr = &pred->flNext, pred = *predPrevAddr)
+ for (predPrevAddr = &block->bbPreds, pred = *predPrevAddr; pred != nullptr;
+ predPrevAddr = &pred->flNext, pred = *predPrevAddr)
{
if (blockPred == pred->flBlock)
{
@@ -1051,7 +1044,6 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
return nullptr;
}
-
//------------------------------------------------------------------------
// fgSpliceOutPred: Removes a predecessor edge for a block from the predecessor list.
//
@@ -1074,8 +1066,7 @@ flowList* Compiler::fgGetPredForBlock(BasicBlock* block,
// address instead, to avoid this search.
// -- Marks fgModified = true, since the flow graph has changed.
-flowList* Compiler::fgSpliceOutPred(BasicBlock* block,
- BasicBlock* blockPred)
+flowList* Compiler::fgSpliceOutPred(BasicBlock* block, BasicBlock* blockPred)
{
assert(!fgCheapPredsValid);
noway_assert(block->bbPreds);
@@ -1083,16 +1074,15 @@ flowList* Compiler::fgSpliceOutPred(BasicBlock* block,
flowList* oldEdge = nullptr;
// Is this the first block in the pred list?
- if (blockPred == block->bbPreds->flBlock)
+ if (blockPred == block->bbPreds->flBlock)
{
- oldEdge = block->bbPreds;
+ oldEdge = block->bbPreds;
block->bbPreds = block->bbPreds->flNext;
}
else
{
flowList* pred;
- for (pred = block->bbPreds;
- (pred->flNext != nullptr) && (blockPred != pred->flNext->flBlock);
+ for (pred = block->bbPreds; (pred->flNext != nullptr) && (blockPred != pred->flNext->flBlock);
pred = pred->flNext)
{
// empty
@@ -1111,7 +1101,6 @@ flowList* Compiler::fgSpliceOutPred(BasicBlock* block,
return oldEdge;
}
-
//------------------------------------------------------------------------
// fgAddRefPred: Increment block->bbRefs by one and add "blockPred" to the predecessor list of "block".
//
@@ -1120,7 +1109,8 @@ flowList* Compiler::fgSpliceOutPred(BasicBlock* block,
// blockPred -- The predecessor block to add to the predecessor list.
// oldEdge -- Optional (default: nullptr). If non-nullptr, and a new edge is created (and the dup count
// of an existing edge is not just incremented), the edge weights are copied from this edge.
-// initializingPreds -- Optional (default: false). Only set to "true" when the initial preds computation is happening.
+// initializingPreds -- Optional (default: false). Only set to "true" when the initial preds computation is
+// happening.
//
// Return Value:
// The flow edge representing the predecessor.
@@ -1135,10 +1125,10 @@ flowList* Compiler::fgSpliceOutPred(BasicBlock* block,
// -- fgModified is set if a new flow edge is created (but not if an existing flow edge dup count is incremented),
// indicating that the flow graph shape has changed.
-flowList* Compiler::fgAddRefPred(BasicBlock* block,
- BasicBlock* blockPred,
- flowList* oldEdge /* = nullptr */,
- bool initializingPreds /* = false */)
+flowList* Compiler::fgAddRefPred(BasicBlock* block,
+ BasicBlock* blockPred,
+ flowList* oldEdge /* = nullptr */,
+ bool initializingPreds /* = false */)
{
assert(block != nullptr);
assert(blockPred != nullptr);
@@ -1166,7 +1156,7 @@ flowList* Compiler::fgAddRefPred(BasicBlock* block,
flow = new (this, CMK_FlowList) flowList();
#if MEASURE_BLOCK_SIZE
- genFlowNodeCnt += 1;
+ genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
@@ -1182,16 +1172,16 @@ flowList* Compiler::fgAddRefPred(BasicBlock* block,
// debuggable code, or sort in optFindNaturalLoops, or better, make
// the code in optFindNaturalLoops not depend on order.
- flowList** listp= &block->bbPreds;
+ flowList** listp = &block->bbPreds;
while (*listp && ((*listp)->flBlock->bbNum < blockPred->bbNum))
{
- listp = & (*listp)->flNext;
+ listp = &(*listp)->flNext;
}
- flow->flNext = *listp;
- *listp = flow;
+ flow->flNext = *listp;
+ *listp = flow;
- flow->flBlock = blockPred;
+ flow->flBlock = blockPred;
flow->flDupCount = 1;
if (fgHaveValidEdgeWeights)
@@ -1235,7 +1225,6 @@ flowList* Compiler::fgAddRefPred(BasicBlock* block,
return flow;
}
-
//------------------------------------------------------------------------
// fgRemoveRefPred: Decrements the reference count of a predecessor edge from "blockPred" to "block",
// removing the edge if it is no longer necessary.
@@ -1262,8 +1251,7 @@ flowList* Compiler::fgAddRefPred(BasicBlock* block,
// -- fgModified is set if a flow edge is removed (but not if an existing flow edge dup count is decremented),
// indicating that the flow graph shape has changed.
-flowList* Compiler::fgRemoveRefPred(BasicBlock* block,
- BasicBlock* blockPred)
+flowList* Compiler::fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred)
{
noway_assert(block != nullptr);
noway_assert(blockPred != nullptr);
@@ -1283,7 +1271,7 @@ flowList* Compiler::fgRemoveRefPred(BasicBlock* block,
assert(!fgCheapPredsValid);
flowList** ptrToPred;
- flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred);
+ flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred);
noway_assert(pred);
noway_assert(pred->flDupCount > 0);
@@ -1305,7 +1293,6 @@ flowList* Compiler::fgRemoveRefPred(BasicBlock* block,
}
}
-
//------------------------------------------------------------------------
// fgRemoveAllRefPreds: Removes a predecessor edge from one block to another, no matter what the "dup count" is.
//
@@ -1323,8 +1310,7 @@ flowList* Compiler::fgRemoveRefPred(BasicBlock* block,
// Notes:
// block->bbRefs is decremented to account for the reduction in incoming edges.
-flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
- BasicBlock* blockPred)
+flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred)
{
assert(block != nullptr);
assert(blockPred != nullptr);
@@ -1333,7 +1319,7 @@ flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
assert(block->countOfInEdges() > 0);
flowList** ptrToPred;
- flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred);
+ flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred);
assert(pred != nullptr);
assert(pred->flDupCount > 0);
@@ -1349,7 +1335,6 @@ flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
return pred;
}
-
//------------------------------------------------------------------------
// fgRemoveAllRefPreds: Remove a predecessor edge, given the address of a pointer to it in the
// predecessor list, no matter what the "dup count" is.
@@ -1366,10 +1351,10 @@ flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
// -- This only works on the full predecessor lists, not the cheap preds lists.
//
// Notes:
-// block->bbRefs is decremented by the dup count of the predecessor edge, to account for the reduction in incoming edges.
+// block->bbRefs is decremented by the dup count of the predecessor edge, to account for the reduction in incoming
+// edges.
-flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
- flowList** ptrToPred)
+flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block, flowList** ptrToPred)
{
assert(block != nullptr);
assert(ptrToPred != nullptr);
@@ -1397,7 +1382,7 @@ flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block,
Removes all the appearances of block as predecessor of others
*/
-void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
+void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
{
assert(!fgCheapPredsValid);
@@ -1407,58 +1392,60 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
switch (block->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- if (!(block->bbFlags & BBF_RETLESS_CALL))
- {
- assert(block->isBBCallAlwaysPair());
+ case BBJ_CALLFINALLY:
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- /* The block after the BBJ_CALLFINALLY block is not reachable */
- bNext = block->bbNext;
+ /* The block after the BBJ_CALLFINALLY block is not reachable */
+ bNext = block->bbNext;
- /* bNext is an unreachable BBJ_ALWAYS block */
- noway_assert(bNext->bbJumpKind == BBJ_ALWAYS);
+ /* bNext is an unreachable BBJ_ALWAYS block */
+ noway_assert(bNext->bbJumpKind == BBJ_ALWAYS);
- while (bNext->countOfInEdges() > 0)
- {
- fgRemoveRefPred(bNext, bNext->bbPreds->flBlock);
+ while (bNext->countOfInEdges() > 0)
+ {
+ fgRemoveRefPred(bNext, bNext->bbPreds->flBlock);
+ }
}
- }
- __fallthrough;
+ __fallthrough;
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
- /* Update the predecessor list for 'block->bbJumpDest' and 'block->bbNext' */
- fgRemoveRefPred(block->bbJumpDest, block);
+ /* Update the predecessor list for 'block->bbJumpDest' and 'block->bbNext' */
+ fgRemoveRefPred(block->bbJumpDest, block);
- if (block->bbJumpKind != BBJ_COND)
- break;
+ if (block->bbJumpKind != BBJ_COND)
+ {
+ break;
+ }
- /* If BBJ_COND fall through */
- __fallthrough;
+ /* If BBJ_COND fall through */
+ __fallthrough;
- case BBJ_NONE:
+ case BBJ_NONE:
- /* Update the predecessor list for 'block->bbNext' */
- fgRemoveRefPred(block->bbNext, block);
- break;
+ /* Update the predecessor list for 'block->bbNext' */
+ fgRemoveRefPred(block->bbNext, block);
+ break;
- case BBJ_EHFILTERRET:
+ case BBJ_EHFILTERRET:
- block->bbJumpDest->bbRefs++; // To compensate the bbRefs-- inside fgRemoveRefPred
- fgRemoveRefPred(block->bbJumpDest, block);
- break;
+ block->bbJumpDest->bbRefs++; // To compensate the bbRefs-- inside fgRemoveRefPred
+ fgRemoveRefPred(block->bbJumpDest, block);
+ break;
- case BBJ_EHFINALLYRET:
+ case BBJ_EHFINALLYRET:
{
/* Remove block as the predecessor of the bbNext of all
BBJ_CALLFINALLY blocks calling this finally. No need
to look for BBJ_CALLFINALLY for fault handlers. */
- unsigned hndIndex = block->getHndIndex();
- EHblkDsc* ehDsc = ehGetDsc(hndIndex);
+ unsigned hndIndex = block->getHndIndex();
+ EHblkDsc* ehDsc = ehGetDsc(hndIndex);
if (ehDsc->HasFinallyHandler())
{
@@ -1470,9 +1457,8 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
{
- if ((bcall->bbFlags & BBF_REMOVED) ||
- bcall->bbJumpKind != BBJ_CALLFINALLY ||
- bcall->bbJumpDest != finBeg)
+ if ((bcall->bbFlags & BBF_REMOVED) || bcall->bbJumpKind != BBJ_CALLFINALLY ||
+ bcall->bbJumpDest != finBeg)
{
continue;
}
@@ -1484,27 +1470,26 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
}
break;
- case BBJ_THROW:
- case BBJ_RETURN:
- break;
-
- case BBJ_SWITCH:
- {
- unsigned jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab;
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ break;
- do
+ case BBJ_SWITCH:
{
- fgRemoveRefPred(*jumpTab, block);
- }
- while (++jumpTab, --jumpCnt);
+ unsigned jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab;
- break;
- }
+ do
+ {
+ fgRemoveRefPred(*jumpTab, block);
+ } while (++jumpTab, --jumpCnt);
- default:
- noway_assert(!"Block doesn't have a valid bbJumpKind!!!!");
- break;
+ break;
+ }
+
+ default:
+ noway_assert(!"Block doesn't have a valid bbJumpKind!!!!");
+ break;
}
}
@@ -1517,15 +1502,14 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block)
* the 'oldSwitchBlock' and adding 'newSwitchBlock'.
*/
-void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock,
- BasicBlock* newSwitchBlock)
+void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock)
{
noway_assert(oldSwitchBlock != nullptr);
noway_assert(newSwitchBlock != nullptr);
noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH);
- unsigned jumpCnt = oldSwitchBlock->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = oldSwitchBlock->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt = oldSwitchBlock->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab = oldSwitchBlock->bbJumpSwt->bbsDstTab;
unsigned i;
@@ -1550,7 +1534,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock,
//
fgAddRefPred(bJump, newSwitchBlock);
}
-
+
if (m_switchDescMap != nullptr)
{
SwitchUniqueSuccSet uniqueSuccSet;
@@ -1578,28 +1562,26 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock,
* We also must update the predecessor lists for 'oldTarget' and 'newPred'.
*/
-void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch,
- BasicBlock* newTarget,
- BasicBlock* oldTarget)
+void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget)
{
noway_assert(blockSwitch != nullptr);
- noway_assert(newTarget != nullptr);
- noway_assert(oldTarget != nullptr);
+ noway_assert(newTarget != nullptr);
+ noway_assert(oldTarget != nullptr);
noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH);
// For the jump targets values that match oldTarget of our BBJ_SWITCH
// replace predecessor 'blockSwitch' with 'newTarget'
//
- unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab;
unsigned i = 0;
// Walk the switch's jump table looking for blocks to update the preds for
while (i < jumpCnt)
{
- if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches
+ if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches
{
// Remove the old edge [oldTarget from blockSwitch]
//
@@ -1634,7 +1616,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch,
//
newEdge->flDupCount++;
}
- i++; // Check the next entry in jumpTab[]
+ i++; // Check the next entry in jumpTab[]
}
// Maintain, if necessary, the set of unique targets of "block."
@@ -1643,9 +1625,9 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch,
// Make sure the new target has the proper bits set for being a branch target.
newTarget->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
- return; // We have replaced the jumps to oldTarget with newTarget
+ return; // We have replaced the jumps to oldTarget with newTarget
}
- i++; // Check the next entry in jumpTab[] for a match
+ i++; // Check the next entry in jumpTab[] for a match
}
noway_assert(!"Did not find oldTarget in jumpTab[]");
}
@@ -1668,51 +1650,51 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch,
//
// This function is most useful early, before the full predecessor lists have been computed.
//
-void Compiler::fgReplaceJumpTarget(BasicBlock* block,
- BasicBlock* newTarget,
- BasicBlock* oldTarget)
+void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget)
{
assert(block != nullptr);
switch (block->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_EHFILTERRET:
- case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE
+ case BBJ_CALLFINALLY:
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE
- if (block->bbJumpDest == oldTarget)
- {
- block->bbJumpDest = newTarget;
- }
- break;
+ if (block->bbJumpDest == oldTarget)
+ {
+ block->bbJumpDest = newTarget;
+ }
+ break;
- case BBJ_NONE:
- case BBJ_EHFINALLYRET:
- case BBJ_THROW:
- case BBJ_RETURN:
- break;
+ case BBJ_NONE:
+ case BBJ_EHFINALLYRET:
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ break;
- case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab;
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
- for (unsigned i = 0; i < jumpCnt; i++)
- {
- if (jumpTab[i] == oldTarget)
+ for (unsigned i = 0; i < jumpCnt; i++)
{
- jumpTab[i] = newTarget;
- break;
+ if (jumpTab[i] == oldTarget)
+ {
+ jumpTab[i] = newTarget;
+ break;
+ }
}
- }
- break;
+ break;
- default:
- assert(!"Block doesn't have a valid bbJumpKind!!!!");
- unreached();
- break;
+ default:
+ assert(!"Block doesn't have a valid bbJumpKind!!!!");
+ unreached();
+ break;
}
}
@@ -1729,11 +1711,9 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block,
* references as before, just from a different predecessor block.
*/
-void Compiler::fgReplacePred(BasicBlock* block,
- BasicBlock* oldPred,
- BasicBlock* newPred)
+void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred)
{
- noway_assert(block != nullptr);
+ noway_assert(block != nullptr);
noway_assert(oldPred != nullptr);
noway_assert(newPred != nullptr);
assert(!fgCheapPredsValid);
@@ -1755,7 +1735,7 @@ void Compiler::fgReplacePred(BasicBlock* block,
* Returns true if block b1 dominates block b2.
*/
-bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
+bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
{
noway_assert(fgDomsComputed);
assert(!fgCheapPredsValid);
@@ -1773,15 +1753,19 @@ bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
if (b2->bbNum > fgDomBBcount)
{
if (b1 == b2)
+ {
return true;
+ }
- for (flowList* pred = b2->bbPreds; pred != NULL; pred = pred->flNext)
+ for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
{
if (!fgDominate(b1, pred->flBlock))
+ {
return false;
+ }
}
- return b2->bbPreds != NULL;
+ return b2->bbPreds != nullptr;
}
if (b1->bbNum > fgDomBBcount)
@@ -1802,8 +1786,10 @@ bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
}
/* Check if b1 dominates b2 */
- unsigned numA = b1->bbNum; noway_assert(numA <= fgDomBBcount);
- unsigned numB = b2->bbNum; noway_assert(numB <= fgDomBBcount);
+ unsigned numA = b1->bbNum;
+ noway_assert(numA <= fgDomBBcount);
+ unsigned numB = b2->bbNum;
+ noway_assert(numB <= fgDomBBcount);
// What we want to ask here is basically if A is in the middle of the path from B to the root (the entry node)
// in the dominator tree. Turns out that can be translated as:
@@ -1812,8 +1798,7 @@ bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
//
// where the equality holds when you ask if A dominates itself.
bool treeDom =
- fgDomTreePreOrder[numA] <= fgDomTreePreOrder[numB] &&
- fgDomTreePostOrder[numA] >= fgDomTreePostOrder[numB];
+ fgDomTreePreOrder[numA] <= fgDomTreePreOrder[numB] && fgDomTreePostOrder[numA] >= fgDomTreePostOrder[numB];
return treeDom;
}
@@ -1823,7 +1808,7 @@ bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2)
* Returns true if block b1 can reach block b2.
*/
-bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
+bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
{
noway_assert(fgDomsComputed);
assert(!fgCheapPredsValid);
@@ -1841,12 +1826,16 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
if (b2->bbNum > fgDomBBcount)
{
if (b1 == b2)
+ {
return true;
+ }
- for (flowList* pred = b2->bbPreds; pred != NULL; pred = pred->flNext)
+ for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
{
if (fgReachable(b1, pred->flBlock))
+ {
return true;
+ }
}
return false;
@@ -1857,10 +1846,14 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
noway_assert(b1->bbJumpKind == BBJ_NONE || b1->bbJumpKind == BBJ_ALWAYS || b1->bbJumpKind == BBJ_COND);
if (b1->bbFallsThrough() && fgReachable(b1->bbNext, b2))
+ {
return true;
+ }
if (b1->bbJumpKind == BBJ_ALWAYS || b1->bbJumpKind == BBJ_COND)
+ {
return fgReachable(b1->bbJumpDest, b2);
+ }
return false;
}
@@ -1871,7 +1864,6 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
return BlockSetOps::IsMember(this, b2->bbReach, b1->bbNum);
}
-
/*****************************************************************************
* Update changed flow graph information.
*
@@ -1879,7 +1871,7 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2)
* it again.
*/
-void Compiler::fgUpdateChangedFlowGraph()
+void Compiler::fgUpdateChangedFlowGraph()
{
// We need to clear this so we don't hit an assert calling fgRenumberBlocks().
fgDomsComputed = false;
@@ -1910,7 +1902,7 @@ void Compiler::fgUpdateChangedFlowGraph()
* linear memory to label every block with its SCC.
*/
-void Compiler::fgComputeReachabilitySets()
+void Compiler::fgComputeReachabilitySets()
{
assert(fgComputePredsDone);
assert(!fgCheapPredsValid);
@@ -1919,7 +1911,7 @@ void Compiler::fgComputeReachabilitySets()
fgReachabilitySetsValid = false;
#endif // DEBUG
- BasicBlock* block;
+ BasicBlock* block;
for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
@@ -1935,7 +1927,7 @@ void Compiler::fgComputeReachabilitySets()
/* Find the reachable blocks */
// Also, set BBF_GC_SAFE_POINT.
- bool change;
+ bool change;
BlockSet BLOCKSET_INIT_NOCOPY(newReach, BlockSetOps::MakeEmpty(this));
do
{
@@ -1960,7 +1952,7 @@ void Compiler::fgComputeReachabilitySets()
}
}
- if (predGcSafe)
+ if (predGcSafe)
{
block->bbFlags |= BBF_GC_SAFE_POINT;
}
@@ -1971,11 +1963,10 @@ void Compiler::fgComputeReachabilitySets()
change = true;
}
}
- }
- while (change);
+ } while (change);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nAfter computing reachability sets:\n");
fgDispReach();
@@ -1995,7 +1986,7 @@ void Compiler::fgComputeReachabilitySets()
* of unwinding, even if the call doesn't return (due to an explicit throw, for example).
*/
-void Compiler::fgComputeEnterBlocksSet()
+void Compiler::fgComputeEnterBlocksSet()
{
#ifdef DEBUG
fgEnterBlksSetValid = false;
@@ -2012,9 +2003,7 @@ void Compiler::fgComputeEnterBlocksSet()
/* Also 'or' in the handler basic blocks */
EHblkDsc* HBtab;
EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
if (HBtab->HasFilter())
{
@@ -2066,19 +2055,17 @@ void Compiler::fgComputeEnterBlocksSet()
* Return true if any unreachable blocks were removed.
*/
-bool Compiler::fgRemoveUnreachableBlocks()
+bool Compiler::fgRemoveUnreachableBlocks()
{
assert(!fgCheapPredsValid);
assert(fgReachabilitySetsValid);
- bool hasLoops = false;
- bool hasUnreachableBlocks = false;
- BasicBlock* block;
+ bool hasLoops = false;
+ bool hasUnreachableBlocks = false;
+ BasicBlock* block;
/* Record unreachable blocks */
- for (block = fgFirstBB;
- block != NULL;
- block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
/* Internal throw blocks are also reachable */
if (fgIsThrowHlpBlk(block))
@@ -2119,9 +2106,9 @@ bool Compiler::fgRemoveUnreachableBlocks()
/* Unmark the block as removed, */
/* clear BBF_INTERNAL as well and set BBJ_IMPORTED */
- block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL | BBF_NEEDS_GCPOLL);
- block->bbFlags |= BBF_IMPORTED;
- block->bbJumpKind = BBJ_THROW;
+ block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL | BBF_NEEDS_GCPOLL);
+ block->bbFlags |= BBF_IMPORTED;
+ block->bbJumpKind = BBJ_THROW;
block->bbSetRunRarely();
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
@@ -2141,12 +2128,14 @@ bool Compiler::fgRemoveUnreachableBlocks()
}
continue;
-SKIP_BLOCK:;
+ SKIP_BLOCK:;
- //if (block->isRunRarely())
+ // if (block->isRunRarely())
// continue;
if (block->bbJumpKind == BBJ_RETURN)
+ {
continue;
+ }
/* Set BBF_LOOP_HEAD if we have backwards branches to this block */
@@ -2157,7 +2146,9 @@ SKIP_BLOCK:;
if (blockNum <= predBlock->bbNum)
{
if (predBlock->bbJumpKind == BBJ_CALLFINALLY)
+ {
continue;
+ }
/* If block can reach predBlock then we have a loop head */
if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum))
@@ -2172,14 +2163,12 @@ SKIP_BLOCK:;
}
}
- fgHasLoops = hasLoops;
+ fgHasLoops = hasLoops;
if (hasUnreachableBlocks)
{
// Now remove the unreachable blocks
- for (block = fgFirstBB;
- block != NULL;
- block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
// If we mark the block with BBF_REMOVED then
// we need to call fgRemovedBlock() on it
@@ -2209,11 +2198,13 @@ SKIP_BLOCK:;
* Assumes the predecessor lists are computed and correct.
*/
-void Compiler::fgComputeReachability()
+void Compiler::fgComputeReachability()
{
#ifdef DEBUG
if (verbose)
+ {
printf("*************** In fgComputeReachability\n");
+ }
fgVerifyHandlerTab();
@@ -2231,7 +2222,7 @@ void Compiler::fgComputeReachability()
// used to find return blocks.
if (block->bbJumpKind == BBJ_RETURN)
{
- fgReturnBlocks = new(this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks);
+ fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks);
}
}
@@ -2243,7 +2234,7 @@ void Compiler::fgComputeReachability()
// call to the finally to stay rooted, until a second round of reachability is done.
// The dominator algorithm expects that all blocks can be reached from the fgEnterBlks set.
unsigned passNum = 1;
- bool changed;
+ bool changed;
do
{
// Just to be paranoid, avoid infinite loops; fall back to minopts.
@@ -2256,7 +2247,7 @@ void Compiler::fgComputeReachability()
JITDUMP("\nRenumbering the basic blocks for fgComputeReachability pass #%u\n", passNum);
passNum++;
fgRenumberBlocks();
-
+
//
// Compute fgEnterBlks
//
@@ -2265,7 +2256,7 @@ void Compiler::fgComputeReachability()
//
// Compute bbReach
- //
+ //
fgComputeReachabilitySets();
@@ -2280,7 +2271,7 @@ void Compiler::fgComputeReachability()
} while (changed);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nAfter computing reachability:\n");
fgDispBasicBlocks(verboseTrees);
@@ -2298,7 +2289,6 @@ void Compiler::fgComputeReachability()
fgComputeDoms();
}
-
/** In order to be able to compute dominance, we need to first get a DFS reverse post order sort on the basic flow graph
* for the dominance algorithm to operate correctly. The reason why we need the DFS sort is because
* we will build the dominance sets using the partial order induced by the DFS sorting. With this
@@ -2370,13 +2360,14 @@ void Compiler::fgDfsInvPostOrder()
#endif // DEBUG
}
-BlockSet_ValRet_T Compiler::fgDomFindStartNodes()
+BlockSet_ValRet_T Compiler::fgDomFindStartNodes()
{
- unsigned j;
+ unsigned j;
BasicBlock* block;
// startNodes :: A set that represents which basic blocks in the flow graph don't have incoming edges.
- // We begin assuming everything is a start block and remove any block that is being referenced by another in its successor list.
+ // We begin assuming everything is a start block and remove any block that is being referenced by another in its
+ // successor list.
BlockSet BLOCKSET_INIT_NOCOPY(startNodes, BlockSetOps::MakeFull(this));
@@ -2435,7 +2426,7 @@ void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, uns
// The search is terminated once all the actions have been processed.
while (stack.Height() != 0)
{
- DfsBlockEntry current = stack.Pop();
+ DfsBlockEntry current = stack.Pop();
BasicBlock* currentBlock = current.dfsBlock;
if (current.dfsStackState == DSS_Pre)
@@ -2475,7 +2466,7 @@ void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, uns
unsigned invCount = fgBBcount - *count + 1;
assert(1 <= invCount && invCount <= fgBBNumMax);
fgBBInvPostOrder[invCount] = currentBlock;
- currentBlock->bbDfsNum = invCount;
+ currentBlock->bbDfsNum = invCount;
++(*count);
}
}
@@ -2487,7 +2478,9 @@ void Compiler::fgComputeDoms()
#ifdef DEBUG
if (verbose)
+ {
printf("*************** In fgComputeDoms\n");
+ }
fgVerifyHandlerTab();
@@ -2502,7 +2495,7 @@ void Compiler::fgComputeDoms()
BlockSet BLOCKSET_INIT_NOCOPY(processedBlks, BlockSetOps::MakeEmpty(this));
- fgBBInvPostOrder = new(this, CMK_DominatorMemory) BasicBlock*[fgBBNumMax + 1];
+ fgBBInvPostOrder = new (this, CMK_DominatorMemory) BasicBlock*[fgBBNumMax + 1];
memset(fgBBInvPostOrder, 0, sizeof(BasicBlock*) * (fgBBNumMax + 1));
fgDfsInvPostOrder();
@@ -2513,27 +2506,27 @@ void Compiler::fgComputeDoms()
// (with bbRoot as the only basic block in it) set as flRoot.
// Later on, we clear their predecessors and let them to be nullptr again.
// Since we number basic blocks starting at one, the imaginary entry block is conveniently numbered as zero.
- flowList flRoot;
+ flowList flRoot;
BasicBlock bbRoot;
- bbRoot.bbPreds = nullptr;
- bbRoot.bbNum = 0;
- bbRoot.bbIDom = &bbRoot;
+ bbRoot.bbPreds = nullptr;
+ bbRoot.bbNum = 0;
+ bbRoot.bbIDom = &bbRoot;
bbRoot.bbDfsNum = 0;
- flRoot.flNext = 0;
- flRoot.flBlock = &bbRoot;
+ flRoot.flNext = nullptr;
+ flRoot.flBlock = &bbRoot;
fgBBInvPostOrder[0] = &bbRoot;
// Mark both bbRoot and fgFirstBB processed
- BlockSetOps::AddElemD(this, processedBlks, 0); // bbRoot == block #0
- BlockSetOps::AddElemD(this, processedBlks, 1); // fgFirstBB == block #1
+ BlockSetOps::AddElemD(this, processedBlks, 0); // bbRoot == block #0
+ BlockSetOps::AddElemD(this, processedBlks, 1); // fgFirstBB == block #1
assert(fgFirstBB->bbNum == 1);
// Special case fgFirstBB to say its IDom is bbRoot.
fgFirstBB->bbIDom = &bbRoot;
- BasicBlock* block = nullptr;
+ BasicBlock* block = nullptr;
for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
{
@@ -2543,7 +2536,7 @@ void Compiler::fgComputeDoms()
if (block->bbPreds == nullptr)
{
block->bbPreds = &flRoot;
- block->bbIDom = &bbRoot;
+ block->bbIDom = &bbRoot;
BlockSetOps::AddElemD(this, processedBlks, block->bbNum);
}
else
@@ -2555,11 +2548,9 @@ void Compiler::fgComputeDoms()
// Mark the EH blocks as entry blocks and also flag them as processed.
if (compHndBBtabCount > 0)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
if (HBtab->HasFilter())
{
@@ -2576,17 +2567,20 @@ void Compiler::fgComputeDoms()
while (changed)
{
changed = false;
- for (unsigned i = 1; i <= fgBBNumMax; ++i) // Process each actual block; don't process the imaginary predecessor block.
+ for (unsigned i = 1; i <= fgBBNumMax;
+ ++i) // Process each actual block; don't process the imaginary predecessor block.
{
- flowList* first = nullptr;
+ flowList* first = nullptr;
BasicBlock* newidom = nullptr;
- block = fgBBInvPostOrder[i];
+ block = fgBBInvPostOrder[i];
// If we have a block that has bbRoot as its bbIDom
// it means we flag it as processed and as an entry block so
// in this case we're all set.
if (block->bbIDom == &bbRoot)
+ {
continue;
+ }
// Pick up the first processed predecesor of the current block.
for (first = block->bbPreds; first != nullptr; first = first->flNext)
@@ -2624,7 +2618,7 @@ void Compiler::fgComputeDoms()
{
noway_assert(newidom != nullptr);
block->bbIDom = newidom;
- changed = true;
+ changed = true;
}
BlockSetOps::AddElemD(this, processedBlks, block->bbNum);
}
@@ -2650,8 +2644,8 @@ void Compiler::fgComputeDoms()
fgBuildDomTree();
- fgModified = false;
- fgDomBBcount = fgBBcount;
+ fgModified = false;
+ fgDomBBcount = fgBBcount;
assert(fgBBcount == fgBBNumMax);
assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1);
@@ -2660,7 +2654,7 @@ void Compiler::fgComputeDoms()
void Compiler::fgBuildDomTree()
{
- unsigned i;
+ unsigned i;
BasicBlock* block;
#ifdef DEBUG
@@ -2670,17 +2664,18 @@ void Compiler::fgBuildDomTree()
}
#endif // DEBUG
- // domTree :: The dominance tree represented using adjacency lists. We use BasicBlockList to represent edges. Indexed by basic block number.
- unsigned bbArraySize = fgBBNumMax + 1;
- BasicBlockList** domTree = new(this, CMK_DominatorMemory) BasicBlockList*[bbArraySize];
+ // domTree :: The dominance tree represented using adjacency lists. We use BasicBlockList to represent edges.
+ // Indexed by basic block number.
+ unsigned bbArraySize = fgBBNumMax + 1;
+ BasicBlockList** domTree = new (this, CMK_DominatorMemory) BasicBlockList*[bbArraySize];
- fgDomTreePreOrder = new(this, CMK_DominatorMemory) unsigned[bbArraySize];
- fgDomTreePostOrder = new(this, CMK_DominatorMemory) unsigned[bbArraySize];
+ fgDomTreePreOrder = new (this, CMK_DominatorMemory) unsigned[bbArraySize];
+ fgDomTreePostOrder = new (this, CMK_DominatorMemory) unsigned[bbArraySize];
// Initialize all the data structures.
for (i = 0; i < bbArraySize; ++i)
{
- domTree[i] = nullptr;
+ domTree[i] = nullptr;
fgDomTreePreOrder[i] = fgDomTreePostOrder[i] = 0;
}
@@ -2691,8 +2686,8 @@ void Compiler::fgBuildDomTree()
// we proceed to append this block to the children of the dominator node.
if (block->bbIDom->bbNum != 0)
{
- int bbNum = block->bbIDom->bbNum;
- domTree[bbNum] = new(this, CMK_DominatorMemory) BasicBlockList(block, domTree[bbNum]);
+ int bbNum = block->bbIDom->bbNum;
+ domTree[bbNum] = new (this, CMK_DominatorMemory) BasicBlockList(block, domTree[bbNum]);
}
else
{
@@ -2749,7 +2744,7 @@ void Compiler::fgBuildDomTree()
}
}
- noway_assert(preNum == domTreeReachable + 1);
+ noway_assert(preNum == domTreeReachable + 1);
noway_assert(postNum == domTreeReachable + 1);
// Once we have all the reachable nodes numbered, we proceed to
@@ -2768,9 +2763,9 @@ void Compiler::fgBuildDomTree()
}
}
- noway_assert(preNum == fgBBNumMax + 1);
+ noway_assert(preNum == fgBBNumMax + 1);
noway_assert(postNum == fgBBNumMax + 1);
- noway_assert(fgDomTreePreOrder[0] == 0); // Unused first element
+ noway_assert(fgDomTreePreOrder[0] == 0); // Unused first element
noway_assert(fgDomTreePostOrder[0] == 0); // Unused first element
#ifdef DEBUG
@@ -2845,10 +2840,7 @@ void Compiler::fgDispDomTree(BasicBlockList** domTree)
// These numberings are used to provide constant time lookup for
// ancestor/descendent tests between pairs of nodes in the tree.
-void Compiler::fgTraverseDomTree(unsigned bbNum,
- BasicBlockList** domTree,
- unsigned* preNum,
- unsigned* postNum)
+void Compiler::fgTraverseDomTree(unsigned bbNum, BasicBlockList** domTree, unsigned* preNum, unsigned* postNum)
{
noway_assert(bbNum <= fgBBNumMax);
@@ -2870,7 +2862,7 @@ void Compiler::fgTraverseDomTree(unsigned bbNum,
// The search is terminated once all the actions have been processed.
while (stack.Height() != 0)
{
- DfsNumEntry current = stack.Pop();
+ DfsNumEntry current = stack.Pop();
unsigned currentNum = current.dfsNum;
if (current.dfsStackState == DSS_Pre)
@@ -2921,7 +2913,7 @@ void Compiler::fgTraverseDomTree(unsigned bbNum,
// dominator tree between two basic blocks. The LCA in the Dominance tree
// represents the closest dominator between the two basic blocks. Used to
// adjust the IDom value in fgComputDoms.
-BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b)
+BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b)
{
BasicBlock* finger1 = a;
BasicBlock* finger2 = b;
@@ -2939,9 +2931,8 @@ BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b)
return finger1;
}
-
// Return a BlockSet containing all the blocks that dominate 'block'.
-BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block)
+BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block)
{
assert(block != nullptr);
@@ -2955,8 +2946,7 @@ BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block)
break; // We found a cycle in the IDom list, so we're done.
}
block = block->bbIDom;
- }
- while (block != nullptr);
+ } while (block != nullptr);
return domSet;
}
@@ -2977,15 +2967,15 @@ BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block)
* 4. The cheap preds lists will contain duplicates if a single switch table has multiple branches
* to the same block. Thus, we don't spend the time looking for duplicates for every edge we insert.
*/
-void Compiler::fgComputeCheapPreds()
+void Compiler::fgComputeCheapPreds()
{
- noway_assert(!fgComputePredsDone); // We can't do this if we've got the full preds.
+ noway_assert(!fgComputePredsDone); // We can't do this if we've got the full preds.
noway_assert(fgFirstBB != nullptr);
BasicBlock* block;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** In fgComputeCheapPreds()\n");
fgDispBasicBlocks();
@@ -3000,59 +2990,63 @@ void Compiler::fgComputeCheapPreds()
{
switch (block->bbJumpKind)
{
- case BBJ_COND:
- fgAddCheapPred(block->bbJumpDest, block);
- fgAddCheapPred(block->bbNext, block);
- break;
+ case BBJ_COND:
+ fgAddCheapPred(block->bbJumpDest, block);
+ fgAddCheapPred(block->bbNext, block);
+ break;
- case BBJ_CALLFINALLY:
- case BBJ_LEAVE: // If fgComputeCheapPreds is called before all blocks are imported, BBJ_LEAVE blocks are still in the BB list.
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- fgAddCheapPred(block->bbJumpDest, block);
- break;
+ case BBJ_CALLFINALLY:
+ case BBJ_LEAVE: // If fgComputeCheapPreds is called before all blocks are imported, BBJ_LEAVE blocks are
+ // still in the BB list.
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ fgAddCheapPred(block->bbJumpDest, block);
+ break;
- case BBJ_NONE:
- fgAddCheapPred(block->bbNext, block);
- break;
+ case BBJ_NONE:
+ fgAddCheapPred(block->bbNext, block);
+ break;
- case BBJ_EHFILTERRET:
- // Connect end of filter to catch handler.
- // In a well-formed program, this cannot be null. Tolerate here, so that we can call
- // fgComputeCheapPreds before fgImport on an ill-formed program; the problem will be detected in fgImport.
- if (block->bbJumpDest != nullptr)
- {
- fgAddCheapPred(block->bbJumpDest, block);
- }
- break;
+ case BBJ_EHFILTERRET:
+ // Connect end of filter to catch handler.
+ // In a well-formed program, this cannot be null. Tolerate here, so that we can call
+ // fgComputeCheapPreds before fgImport on an ill-formed program; the problem will be detected in
+ // fgImport.
+ if (block->bbJumpDest != nullptr)
+ {
+ fgAddCheapPred(block->bbJumpDest, block);
+ }
+ break;
- case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab;
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
- do
- {
- fgAddCheapPred(*jumpTab, block);
- }
- while (++jumpTab, --jumpCnt);
+ do
+ {
+ fgAddCheapPred(*jumpTab, block);
+ } while (++jumpTab, --jumpCnt);
- break;
+ break;
- case BBJ_EHFINALLYRET: // It's expensive to compute the preds for this case, so we don't for the cheap preds.
- case BBJ_THROW:
- case BBJ_RETURN:
- break;
+ case BBJ_EHFINALLYRET: // It's expensive to compute the preds for this case, so we don't for the cheap
+ // preds.
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
fgCheapPredsValid = true;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** After fgComputeCheapPreds()\n");
fgDispBasicBlocks();
@@ -3065,8 +3059,7 @@ void Compiler::fgComputeCheapPreds()
* Add 'blockPred' to the cheap predecessor list of 'block'.
*/
-void Compiler::fgAddCheapPred(BasicBlock* block,
- BasicBlock* blockPred)
+void Compiler::fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred)
{
assert(!fgComputePredsDone);
assert(block != nullptr);
@@ -3075,7 +3068,7 @@ void Compiler::fgAddCheapPred(BasicBlock* block,
block->bbCheapPreds = new (this, CMK_FlowList) BasicBlockList(blockPred, block->bbCheapPreds);
#if MEASURE_BLOCK_SIZE
- genFlowNodeCnt += 1;
+ genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(BasicBlockList);
#endif // MEASURE_BLOCK_SIZE
}
@@ -3084,13 +3077,12 @@ void Compiler::fgAddCheapPred(BasicBlock* block,
* Remove 'blockPred' from the cheap predecessor list of 'block'.
* If there are duplicate edges, only remove one of them.
*/
-void Compiler::fgRemoveCheapPred(BasicBlock* block,
- BasicBlock* blockPred)
+void Compiler::fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred)
{
assert(!fgComputePredsDone);
assert(fgCheapPredsValid);
- flowList* oldEdge = NULL;
+ flowList* oldEdge = nullptr;
assert(block != nullptr);
assert(blockPred != nullptr);
@@ -3104,43 +3096,45 @@ void Compiler::fgRemoveCheapPred(BasicBlock* block,
else
{
BasicBlockList* pred;
- for (pred = block->bbCheapPreds;
- pred->next != nullptr;
- pred = pred->next)
+ for (pred = block->bbCheapPreds; pred->next != nullptr; pred = pred->next)
{
if (blockPred == pred->next->block)
+ {
break;
+ }
}
noway_assert(pred->next != nullptr); // we better have found it!
- pred->next = pred->next->next; // splice it out
+ pred->next = pred->next->next; // splice it out
}
}
-void Compiler::fgRemovePreds()
+void Compiler::fgRemovePreds()
{
- C_ASSERT(offsetof(BasicBlock, bbPreds) == offsetof(BasicBlock, bbCheapPreds)); // bbPreds and bbCheapPreds are at the same place in a union,
- C_ASSERT(sizeof( ((BasicBlock*)0)->bbPreds ) == sizeof( ((BasicBlock*)0)->bbCheapPreds )); // and are the same size. So, this function removes both.
+ C_ASSERT(offsetof(BasicBlock, bbPreds) ==
+ offsetof(BasicBlock, bbCheapPreds)); // bbPreds and bbCheapPreds are at the same place in a union,
+ C_ASSERT(sizeof(((BasicBlock*)0)->bbPreds) ==
+ sizeof(((BasicBlock*)0)->bbCheapPreds)); // and are the same size. So, this function removes both.
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
block->bbPreds = nullptr;
}
fgComputePredsDone = false;
- fgCheapPredsValid = false;
+ fgCheapPredsValid = false;
}
/*****************************************************************************
*
* Function called to compute the bbPreds lists.
*/
-void Compiler::fgComputePreds()
+void Compiler::fgComputePreds()
{
noway_assert(fgFirstBB);
BasicBlock* block;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** In fgComputePreds()\n");
fgDispBasicBlocks();
@@ -3167,69 +3161,75 @@ void Compiler::fgComputePreds()
{
switch (block->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- if (!(block->bbFlags & BBF_RETLESS_CALL))
- {
- assert(block->isBBCallAlwaysPair());
+ case BBJ_CALLFINALLY:
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
- /* Mark the next block as being a jump target,
- since the call target will return there */
- PREFIX_ASSUME(block->bbNext != NULL);
- block->bbNext->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
- }
+ /* Mark the next block as being a jump target,
+ since the call target will return there */
+ PREFIX_ASSUME(block->bbNext != nullptr);
+ block->bbNext->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
+ }
- __fallthrough;
+ __fallthrough;
- case BBJ_LEAVE: // Sometimes fgComputePreds is called before all blocks are imported, so BBJ_LEAVE
- // blocks are still in the BB list.
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
+ case BBJ_LEAVE: // Sometimes fgComputePreds is called before all blocks are imported, so BBJ_LEAVE
+ // blocks are still in the BB list.
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
- /* Mark the jump dest block as being a jump target */
- block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
+ /* Mark the jump dest block as being a jump target */
+ block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
- fgAddRefPred(block->bbJumpDest, block, nullptr, true);
+ fgAddRefPred(block->bbJumpDest, block, nullptr, true);
- /* Is the next block reachable? */
+ /* Is the next block reachable? */
- if (block->bbJumpKind != BBJ_COND)
- break;
+ if (block->bbJumpKind != BBJ_COND)
+ {
+ break;
+ }
- noway_assert(block->bbNext);
+ noway_assert(block->bbNext);
- /* Fall through, the next block is also reachable */
- __fallthrough;
+ /* Fall through, the next block is also reachable */
+ __fallthrough;
- case BBJ_NONE:
+ case BBJ_NONE:
- fgAddRefPred(block->bbNext, block, nullptr, true);
- break;
+ fgAddRefPred(block->bbNext, block, nullptr, true);
+ break;
- case BBJ_EHFILTERRET:
+ case BBJ_EHFILTERRET:
- // Connect end of filter to catch handler.
- // In a well-formed program, this cannot be null. Tolerate here, so that we can call
- // fgComputePreds before fgImport on an ill-formed program; the problem will be detected in fgImport.
- if (block->bbJumpDest != NULL)
- {
- fgAddRefPred(block->bbJumpDest, block, nullptr, true);
- }
- break;
+ // Connect end of filter to catch handler.
+ // In a well-formed program, this cannot be null. Tolerate here, so that we can call
+ // fgComputePreds before fgImport on an ill-formed program; the problem will be detected in fgImport.
+ if (block->bbJumpDest != nullptr)
+ {
+ fgAddRefPred(block->bbJumpDest, block, nullptr, true);
+ }
+ break;
- case BBJ_EHFINALLYRET:
+ case BBJ_EHFINALLYRET:
{
/* Connect the end of the finally to the successor of
the call to this finally */
if (!block->hasHndIndex())
+ {
NO_WAY("endfinally outside a finally/fault block.");
+ }
- unsigned hndIndex = block->getHndIndex();
- EHblkDsc* ehDsc = ehGetDsc(hndIndex);
+ unsigned hndIndex = block->getHndIndex();
+ EHblkDsc* ehDsc = ehGetDsc(hndIndex);
if (!ehDsc->HasFinallyOrFaultHandler())
+ {
NO_WAY("endfinally outside a finally/fault block.");
+ }
if (ehDsc->HasFinallyHandler())
{
@@ -3241,8 +3241,10 @@ void Compiler::fgComputePreds()
BasicBlock* finBeg = ehDsc->ebdHndBeg;
for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
{
- if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ {
continue;
+ }
noway_assert(bcall->isBBCallAlwaysPair());
fgAddRefPred(bcall->bbNext, block, nullptr, true);
@@ -3251,28 +3253,29 @@ void Compiler::fgComputePreds()
}
break;
- case BBJ_THROW:
- case BBJ_RETURN:
- break;
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ break;
- case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab;
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
- do
- {
- /* Mark the target block as being a jump target */
- (*jumpTab)->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
+ do
+ {
+ /* Mark the target block as being a jump target */
+ (*jumpTab)->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
- fgAddRefPred(*jumpTab, block, nullptr, true);
- }
- while (++jumpTab, --jumpCnt);
+ fgAddRefPred(*jumpTab, block, nullptr, true);
+ } while (++jumpTab, --jumpCnt);
- break;
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
@@ -3288,11 +3291,11 @@ void Compiler::fgComputePreds()
ehDsc->ebdHndBeg->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
}
- fgModified = false;
+ fgModified = false;
fgComputePredsDone = true;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** After fgComputePreds()\n");
fgDispBasicBlocks();
@@ -3301,32 +3304,32 @@ void Compiler::fgComputePreds()
#endif
}
-unsigned Compiler::fgNSuccsOfFinallyRet(BasicBlock* block)
+unsigned Compiler::fgNSuccsOfFinallyRet(BasicBlock* block)
{
BasicBlock* bb;
- unsigned res;
+ unsigned res;
fgSuccOfFinallyRetWork(block, ~0, &bb, &res);
return res;
}
-BasicBlock* Compiler::fgSuccOfFinallyRet(BasicBlock* block, unsigned i)
+BasicBlock* Compiler::fgSuccOfFinallyRet(BasicBlock* block, unsigned i)
{
BasicBlock* bb;
- unsigned res;
+ unsigned res;
fgSuccOfFinallyRetWork(block, i, &bb, &res);
return bb;
}
-void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres)
+void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres)
{
assert(block->hasHndIndex()); // Otherwise, endfinally outside a finally/fault block?
- unsigned hndIndex = block->getHndIndex();
- EHblkDsc* ehDsc = ehGetDsc(hndIndex);
+ unsigned hndIndex = block->getHndIndex();
+ EHblkDsc* ehDsc = ehGetDsc(hndIndex);
- assert(ehDsc->HasFinallyOrFaultHandler()); // Otherwise, endfinally outside a finally/fault block.
+ assert(ehDsc->HasFinallyOrFaultHandler()); // Otherwise, endfinally outside a finally/fault block.
- *bres = NULL;
+ *bres = nullptr;
unsigned succNum = 0;
if (ehDsc->HasFinallyHandler())
@@ -3335,12 +3338,14 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned
BasicBlock* endBlk;
ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk);
- BasicBlock* finBeg = ehDsc->ebdHndBeg;
+ BasicBlock* finBeg = ehDsc->ebdHndBeg;
for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
{
- if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ {
continue;
+ }
assert(bcall->isBBCallAlwaysPair());
@@ -3352,7 +3357,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned
succNum++;
}
}
- assert(i == ~0u || ehDsc->HasFaultHandler()); // Should reach here only for fault blocks.
+ assert(i == ~0u || ehDsc->HasFaultHandler()); // Should reach here only for fault blocks.
if (i == ~0u)
{
*nres = succNum;
@@ -3363,7 +3368,7 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH);
BlockToSwitchDescMap* switchMap = GetSwitchDescMap();
- SwitchUniqueSuccSet res;
+ SwitchUniqueSuccSet res;
if (switchMap->Lookup(switchBlk, &res))
{
return res;
@@ -3379,9 +3384,9 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
// reachability information stored in the blocks. To avoid that, we just use a local BitVec.
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
- BitVec BITVEC_INIT_NOCOPY(uniqueSuccBlocks, BitVecOps::MakeEmpty(&blockVecTraits));
+ BitVec BITVEC_INIT_NOCOPY(uniqueSuccBlocks, BitVecOps::MakeEmpty(&blockVecTraits));
BasicBlock** jumpTable = switchBlk->bbJumpSwt->bbsDstTab;
- unsigned jumpCount = switchBlk->bbJumpSwt->bbsCount;
+ unsigned jumpCount = switchBlk->bbJumpSwt->bbsCount;
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* targ = jumpTable[i];
@@ -3391,7 +3396,7 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
unsigned numNonDups = BitVecOps::Count(&blockVecTraits, uniqueSuccBlocks);
typedef BasicBlock* BasicBlockPtr;
- BasicBlockPtr* nonDups = new (getAllocator()) BasicBlockPtr[numNonDups];
+ BasicBlockPtr* nonDups = new (getAllocator()) BasicBlockPtr[numNonDups];
unsigned nonDupInd = 0;
// At this point, all unique targets are in "uniqueSuccBlocks". As we encounter each,
@@ -3410,17 +3415,20 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc
assert(nonDupInd == numNonDups);
assert(BitVecOps::Count(&blockVecTraits, uniqueSuccBlocks) == 0);
res.numDistinctSuccs = numNonDups;
- res.nonDuplicates = nonDups;
+ res.nonDuplicates = nonDups;
switchMap->Set(switchBlk, res);
return res;
}
}
-void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to)
+void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc,
+ BasicBlock* switchBlk,
+ BasicBlock* from,
+ BasicBlock* to)
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition.
- unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
- BasicBlock** jmpTab = switchBlk->bbJumpSwt->bbsDstTab;
+ unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
+ BasicBlock** jmpTab = switchBlk->bbJumpSwt->bbsDstTab;
// Is "from" still in the switch table (because it had more than one entry before?)
bool fromStillPresent = false;
@@ -3428,7 +3436,8 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock*
{
if (jmpTab[i] == from)
{
- fromStillPresent = true; break;
+ fromStillPresent = true;
+ break;
}
}
@@ -3438,7 +3447,8 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock*
{
if (nonDuplicates[i] == to)
{
- toAlreadyPresent = true; break;
+ toAlreadyPresent = true;
+ break;
}
}
@@ -3455,8 +3465,8 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock*
{
// reallocate to add an entry
typedef BasicBlock* BasicBlockPtr;
- BasicBlockPtr* newNonDups = new (alloc) BasicBlockPtr[numDistinctSuccs+1];
- memcpy(newNonDups, nonDuplicates, numDistinctSuccs*sizeof(BasicBlock*));
+ BasicBlockPtr* newNonDups = new (alloc) BasicBlockPtr[numDistinctSuccs + 1];
+ memcpy(newNonDups, nonDuplicates, numDistinctSuccs * sizeof(BasicBlock*));
newNonDups[numDistinctSuccs] = to;
numDistinctSuccs++;
nonDuplicates = newNonDups;
@@ -3491,7 +3501,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock*
{
if (nonDuplicates[i] == from)
{
- nonDuplicates[i] = nonDuplicates[numDistinctSuccs-1];
+ nonDuplicates[i] = nonDuplicates[numDistinctSuccs - 1];
numDistinctSuccs--;
#ifdef DEBUG
foundFrom = true;
@@ -3504,7 +3514,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc, BasicBlock*
}
/*****************************************************************************
- *
+ *
* Simple utility function to remove an entry for a block in the switch desc
* map. So it can be called from other phases.
*
@@ -3520,12 +3530,14 @@ void Compiler::fgInvalidateSwitchDescMapEntry(BasicBlock* block)
void Compiler::UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to)
{
- if (m_switchDescMap == NULL)
- return; // No mappings, nothing to do.
+ if (m_switchDescMap == nullptr)
+ {
+ return; // No mappings, nothing to do.
+ }
// Otherwise...
BlockToSwitchDescMap* switchMap = GetSwitchDescMap();
- SwitchUniqueSuccSet* res = switchMap->LookupPointer(switchBlk);
+ SwitchUniqueSuccSet* res = switchMap->LookupPointer(switchBlk);
if (res != nullptr)
{
// If no result, nothing to do. Otherwise, update it.
@@ -3533,27 +3545,27 @@ void Compiler::UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from,
}
}
-
/*****************************************************************************
* For a block that is in a handler region, find the first block of the most-nested
* handler containing the block.
*/
-BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block)
+BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block)
{
assert(block->hasHndIndex());
return ehGetDsc(block->getHndIndex())->ebdHndBeg;
}
-
/*****************************************************************************
*
* Function called to find back edges and return blocks and mark them as needing GC Polls. This marks all
* blocks.
*/
-void Compiler::fgMarkGCPollBlocks()
+void Compiler::fgMarkGCPollBlocks()
{
if (GCPOLL_NONE == opts.compGCPollType)
+ {
return;
+ }
#ifdef DEBUG
/* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */
@@ -3562,40 +3574,41 @@ void Compiler::fgMarkGCPollBlocks()
BasicBlock* block;
- //Return blocks always need GC polls. In addition, all back edges (including those from switch
- //statements) need GC polls. The poll is on the block with the outgoing back edge (or ret), rather than
- //on the destination or on the edge itself.
+ // Return blocks always need GC polls. In addition, all back edges (including those from switch
+ // statements) need GC polls. The poll is on the block with the outgoing back edge (or ret), rather than
+ // on the destination or on the edge itself.
for (block = fgFirstBB; block; block = block->bbNext)
{
bool blockNeedsPoll = false;
switch (block->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_ALWAYS:
- blockNeedsPoll = (block->bbJumpDest->bbNum <= block->bbNum);
- break;
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ blockNeedsPoll = (block->bbJumpDest->bbNum <= block->bbNum);
+ break;
- case BBJ_RETURN:
- blockNeedsPoll = true;
- break;
+ case BBJ_RETURN:
+ blockNeedsPoll = true;
+ break;
- case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab;
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
- do
- {
- if ((*jumpTab)->bbNum <= block->bbNum)
+ do
{
- blockNeedsPoll = true;
- break;
- }
- }
- while (++jumpTab, --jumpCnt);
- break;
+ if ((*jumpTab)->bbNum <= block->bbNum)
+ {
+ blockNeedsPoll = true;
+ break;
+ }
+ } while (++jumpTab, --jumpCnt);
+ break;
- default:
- break;
+ default:
+ break;
}
if (blockNeedsPoll)
@@ -3605,7 +3618,7 @@ void Compiler::fgMarkGCPollBlocks()
}
}
-void Compiler::fgInitBlockVarSets()
+void Compiler::fgInitBlockVarSets()
{
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
@@ -3632,18 +3645,19 @@ void Compiler::fgInitBlockVarSets()
*
* The following does the final pass on BBF_NEEDS_GCPOLL and then actually creates the GC Polls.
*/
-void Compiler::fgCreateGCPolls()
+void Compiler::fgCreateGCPolls()
{
if (GCPOLL_NONE == opts.compGCPollType)
+ {
return;
+ }
bool createdPollBlocks = false;
#ifdef DEBUG
if (verbose)
{
- printf("*************** In fgCreateGCPolls() for %s\n",
- info.compFullName);
+ printf("*************** In fgCreateGCPolls() for %s\n", info.compFullName);
}
#endif // DEBUG
@@ -3663,17 +3677,23 @@ void Compiler::fgCreateGCPolls()
// Filter out loops not meeting the obvious preconditions.
//
if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
if (!(optLoopTable[lnum].lpFlags & LPFLG_CONST))
+ {
continue;
+ }
BasicBlock* head = optLoopTable[lnum].lpHead;
BasicBlock* bottom = optLoopTable[lnum].lpBottom;
// Loops dominated by GC_SAFE_POINT won't have this set.
if (!(bottom->bbFlags & BBF_NEEDS_GCPOLL))
+ {
continue;
+ }
/* Get the loop data:
- initial constant
@@ -3684,38 +3704,37 @@ void Compiler::fgCreateGCPolls()
- loop test type (i.e. GT_GE, GT_LT, etc...)
*/
- int lbeg = optLoopTable[lnum].lpConstInit;
- int llim = optLoopTable[lnum].lpConstLimit();
- genTreeOps testOper = optLoopTable[lnum].lpTestOper();
+ int lbeg = optLoopTable[lnum].lpConstInit;
+ int llim = optLoopTable[lnum].lpConstLimit();
+ genTreeOps testOper = optLoopTable[lnum].lpTestOper();
- int lvar = optLoopTable[lnum].lpIterVar();
- int iterInc = optLoopTable[lnum].lpIterConst();
- genTreeOps iterOper = optLoopTable[lnum].lpIterOper();
+ int lvar = optLoopTable[lnum].lpIterVar();
+ int iterInc = optLoopTable[lnum].lpIterConst();
+ genTreeOps iterOper = optLoopTable[lnum].lpIterOper();
- var_types iterOperType = optLoopTable[lnum].lpIterOperType();
- bool unsTest = (optLoopTable[lnum].lpTestTree->gtFlags & GTF_UNSIGNED) != 0;
- if (lvaTable[lvar].lvAddrExposed) // Can't reason about the value of the iteration variable.
+ var_types iterOperType = optLoopTable[lnum].lpIterOperType();
+ bool unsTest = (optLoopTable[lnum].lpTestTree->gtFlags & GTF_UNSIGNED) != 0;
+ if (lvaTable[lvar].lvAddrExposed)
+ { // Can't reason about the value of the iteration variable.
continue;
+ }
unsigned totalIter;
/* Find the number of iterations - the function returns false if not a constant number */
- if (!optComputeLoopRep(lbeg, llim,
- iterInc, iterOper, iterOperType,
- testOper, unsTest,
+ if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest,
// The value here doesn't matter for this variation of the optimization
- true,
- &totalIter))
+ true, &totalIter))
{
#ifdef DEBUG
if (verbose)
{
- printf( "Could not compute loop iterations for loop from BB%02u to BB%02u",
- head->bbNum, bottom->bbNum );
+ printf("Could not compute loop iterations for loop from BB%02u to BB%02u", head->bbNum,
+ bottom->bbNum);
}
-#endif // DEBUG
- (void)head; //suppress gcc error.
+#endif // DEBUG
+ (void)head; // suppress gcc error.
continue;
}
@@ -3723,17 +3742,18 @@ void Compiler::fgCreateGCPolls()
/* Forget it if there are too many repetitions or not a constant loop */
static const unsigned ITER_LIMIT = 256;
- if (totalIter > ITER_LIMIT)
+ if (totalIter > ITER_LIMIT)
+ {
continue;
+ }
- //It is safe to elminate the poll from this loop.
+ // It is safe to elminate the poll from this loop.
bottom->bbFlags &= ~BBF_NEEDS_GCPOLL;
#ifdef DEBUG
if (verbose)
{
- printf( "Removing poll in block BB%02u because it forms a bounded counted loop\n",
- bottom->bbNum );
+ printf("Removing poll in block BB%02u because it forms a bounded counted loop\n", bottom->bbNum);
}
#endif // DEBUG
}
@@ -3747,29 +3767,33 @@ void Compiler::fgCreateGCPolls()
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
if (!(block->bbFlags & BBF_NEEDS_GCPOLL))
+ {
continue;
+ }
- if (block->bbJumpKind == BBJ_COND ||
- block->bbJumpKind == BBJ_ALWAYS)
+ if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_ALWAYS)
{
- //make sure that this is loop-like
+ // make sure that this is loop-like
if (!fgReachable(block->bbJumpDest, block))
{
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
#ifdef DEBUG
if (verbose)
+ {
printf("Removing poll in block BB%02u because it is not loop\n", block->bbNum);
+ }
#endif // DEBUG
continue;
}
}
- else if (!(block->bbJumpKind == BBJ_RETURN ||
- block->bbJumpKind == BBJ_SWITCH))
+ else if (!(block->bbJumpKind == BBJ_RETURN || block->bbJumpKind == BBJ_SWITCH))
{
noway_assert(!"GC Poll on a block that has no control transfer.");
#ifdef DEBUG
if (verbose)
+ {
printf("Removing poll in block BB%02u because it is not a jump\n", block->bbNum);
+ }
#endif // DEBUG
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
continue;
@@ -3782,7 +3806,9 @@ void Compiler::fgCreateGCPolls()
{
#ifdef DEBUG
if (verbose)
+ {
printf("Removing poll in return block BB%02u because it is GC Safe\n", block->bbNum);
+ }
#endif // DEBUG
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
continue;
@@ -3792,8 +3818,8 @@ void Compiler::fgCreateGCPolls()
{
if (!optReachWithoutCall(fgFirstBB, block))
{
- //check to see if there is a call along the path between the first block and the return
- //block.
+ // check to see if there is a call along the path between the first block and the return
+ // block.
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
#ifdef DEBUG
if (verbose)
@@ -3803,13 +3829,12 @@ void Compiler::fgCreateGCPolls()
#endif // DEBUG
continue;
}
-
}
}
}
noway_assert(!fgGCPollsCreated);
- BasicBlock* block;
+ BasicBlock* block;
fgGCPollsCreated = true;
// Walk through the blocks and hunt for a block that has BBF_NEEDS_GCPOLL
@@ -3818,7 +3843,9 @@ void Compiler::fgCreateGCPolls()
// Because of block compaction, it's possible to end up with a block that is both poll and safe.
// And if !fgDomsComputed, we won't have cleared them, so skip them now
if (!(block->bbFlags & BBF_NEEDS_GCPOLL) || (block->bbFlags & BBF_GC_SAFE_POINT))
+ {
continue;
+ }
// This block needs a poll. We either just insert a callout or we split the block and inline part of
// the test. This depends on the value of opts.compGCPollType.
@@ -3829,29 +3856,29 @@ void Compiler::fgCreateGCPolls()
#ifdef DEBUG
switch (block->bbJumpKind)
{
- case BBJ_RETURN:
- case BBJ_ALWAYS:
- case BBJ_COND:
- case BBJ_SWITCH:
- break;
- default:
- noway_assert(!"Unknown block type for BBF_NEEDS_GCPOLL");
+ case BBJ_RETURN:
+ case BBJ_ALWAYS:
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ break;
+ default:
+ noway_assert(!"Unknown block type for BBF_NEEDS_GCPOLL");
}
#endif // DEBUG
noway_assert(opts.compGCPollType);
GCPollType pollType = opts.compGCPollType;
- //pollType is set to either CALL or INLINE at this point. Below is the list of places where we
- //can't or don't want to emit an inline check. Check all of those. If after all of that we still
- //have INLINE, then emit an inline check.
+ // pollType is set to either CALL or INLINE at this point. Below is the list of places where we
+ // can't or don't want to emit an inline check. Check all of those. If after all of that we still
+ // have INLINE, then emit an inline check.
if (opts.MinOpts() || opts.compDbgCode)
{
#ifdef DEBUG
if (verbose)
{
- printf( "Selecting CALL poll in block BB%02u because of debug/minopts\n", block->bbNum );
+ printf("Selecting CALL poll in block BB%02u because of debug/minopts\n", block->bbNum);
}
#endif // DEBUG
@@ -3867,7 +3894,7 @@ void Compiler::fgCreateGCPolls()
}
#endif // DEBUG
- //we don't want to split the single return block
+ // we don't want to split the single return block
pollType = GCPOLL_CALL;
}
else if (BBJ_SWITCH == block->bbJumpKind)
@@ -3903,7 +3930,7 @@ void Compiler::fgCreateGCPolls()
* a basic block.
*/
-bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
+bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
{
assert(!(block->bbFlags & BBF_GC_SAFE_POINT));
bool createdPollBlocks;
@@ -3926,10 +3953,10 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
if (GCPOLL_CALL == pollType)
{
createdPollBlocks = false;
- GenTreePtr tree = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID);
+ GenTreePtr tree = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID);
#if GTF_CALL_REG_SAVE
tree->gtCall.gtCallMoreFlags |= GTF_CALL_REG_SAVE;
-#endif //GTF_CALL_REG_SAVE
+#endif // GTF_CALL_REG_SAVE
// for BBJ_ALWAYS I don't need to insert it before the condition. Just append it.
if (block->bbJumpKind == BBJ_ALWAYS)
@@ -3984,9 +4011,9 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
// I want to create:
// top -> poll -> bottom (lexically)
// so that we jump over poll to get to bottom.
- BasicBlock* top = block;
- BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true);
- BasicBlock* bottom = fgNewBBafter(top->bbJumpKind, poll, true);
+ BasicBlock* top = block;
+ BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true);
+ BasicBlock* bottom = fgNewBBafter(top->bbJumpKind, poll, true);
BBjumpKinds oldJumpKind = top->bbJumpKind;
// Update block flags
@@ -3995,9 +4022,8 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
// Unlike Fei's inliner from puclr, I'm allowed to split loops.
// And we keep a few other flags...
- noway_assert((originalFlags & (BBF_SPLIT_NONEXIST
- & ~(BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1))) == 0);
- top->bbFlags = originalFlags & (~BBF_SPLIT_LOST | BBF_GC_SAFE_POINT);
+ noway_assert((originalFlags & (BBF_SPLIT_NONEXIST & ~(BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1))) == 0);
+ top->bbFlags = originalFlags & (~BBF_SPLIT_LOST | BBF_GC_SAFE_POINT);
bottom->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT);
bottom->inheritWeight(top);
poll->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT);
@@ -4012,7 +4038,7 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
GenTreePtr tree = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID);
#if GTF_CALL_REG_SAVE
tree->gtCall.gtCallMoreFlags |= GTF_CALL_REG_SAVE;
-#endif //GTF_CALL_REG_SAVE
+#endif // GTF_CALL_REG_SAVE
fgInsertStmtAtEnd(poll, tree);
// 3) Remove the last statement from Top and add it to Bottom.
@@ -4026,7 +4052,6 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
}
fgRemoveStmt(top, stmt);
fgInsertStmtAtEnd(bottom, stmt);
-
}
// for BBJ_ALWAYS blocks, bottom is an empty block.
@@ -4044,7 +4069,8 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
GenTreePtr trap;
if (pAddrOfCaptureThreadGlobal != nullptr)
{
- trap = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewIconHandleNode((size_t)pAddrOfCaptureThreadGlobal, GTF_ICON_PTR_HDL));
+ trap = gtNewOperNode(GT_IND, TYP_I_IMPL,
+ gtNewIconHandleNode((size_t)pAddrOfCaptureThreadGlobal, GTF_ICON_PTR_HDL));
}
else
{
@@ -4052,12 +4078,11 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
}
GenTreePtr trapRelop = gtNewOperNode(GT_EQ, TYP_INT,
- //lhs [g_TrapReturningThreads]
+ // lhs [g_TrapReturningThreads]
gtNewOperNode(GT_IND, TYP_INT, trap),
- //rhs 0
- gtNewIconNode(0, TYP_INT)
- );
- trapRelop->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE; //Treat reading g_TrapReturningThreads as volatile.
+ // rhs 0
+ gtNewIconNode(0, TYP_INT));
+ trapRelop->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE; // Treat reading g_TrapReturningThreads as volatile.
GenTreePtr trapCheck = gtNewOperNode(GT_JTRUE, TYP_VOID, trapRelop);
fgInsertStmtAtEnd(top, trapCheck);
top->bbJumpDest = bottom;
@@ -4073,25 +4098,25 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
// jumps, 2 for conditional branches, N for switches).
switch (oldJumpKind)
{
- case BBJ_RETURN:
- // no successors
- break;
- case BBJ_COND:
- // replace predecessor in the fall through block.
- noway_assert(bottom->bbNext);
- fgReplacePred(bottom->bbNext, top, bottom);
+ case BBJ_RETURN:
+ // no successors
+ break;
+ case BBJ_COND:
+ // replace predecessor in the fall through block.
+ noway_assert(bottom->bbNext);
+ fgReplacePred(bottom->bbNext, top, bottom);
- // fall through for the jump target
- __fallthrough;
+ // fall through for the jump target
+ __fallthrough;
- case BBJ_ALWAYS:
- fgReplacePred(bottom->bbJumpDest, top, bottom);
- break;
- case BBJ_SWITCH:
- NO_WAY("SWITCH should be a call rather than an inlined poll.");
- break;
- default:
- NO_WAY("Unknown block type for updating predecessor lists.");
+ case BBJ_ALWAYS:
+ fgReplacePred(bottom->bbJumpDest, top, bottom);
+ break;
+ case BBJ_SWITCH:
+ NO_WAY("SWITCH should be a call rather than an inlined poll.");
+ break;
+ default:
+ NO_WAY("Unknown block type for updating predecessor lists.");
}
top->bbFlags &= ~BBF_NEEDS_GCPOLL;
@@ -4119,16 +4144,15 @@ bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* bl
return createdPollBlocks;
}
-
/*****************************************************************************
*
* The following helps find a basic block given its PC offset.
*/
-void Compiler::fgInitBBLookup()
+void Compiler::fgInitBBLookup()
{
- BasicBlock** dscBBptr;
- BasicBlock* tmpBBdesc;
+ BasicBlock** dscBBptr;
+ BasicBlock* tmpBBdesc;
/* Allocate the basic block table */
@@ -4144,21 +4168,22 @@ void Compiler::fgInitBBLookup()
noway_assert(dscBBptr == fgBBs + fgBBcount);
}
-
-BasicBlock* Compiler::fgLookupBB(unsigned addr)
+BasicBlock* Compiler::fgLookupBB(unsigned addr)
{
- unsigned lo;
- unsigned hi;
+ unsigned lo;
+ unsigned hi;
/* Do a binary search */
for (lo = 0, hi = fgBBcount - 1;;)
{
-AGAIN:;
+ AGAIN:;
if (lo > hi)
+ {
break;
+ }
unsigned mid = (lo + hi) / 2;
BasicBlock* dsc = fgBBs[mid];
@@ -4175,31 +4200,31 @@ AGAIN:;
if (mid > hi)
{
mid = (lo + hi) / 2;
- hi = mid - 1;
+ hi = mid - 1;
goto AGAIN;
}
}
- unsigned pos = dsc->bbCodeOffs;
+ unsigned pos = dsc->bbCodeOffs;
- if (pos < addr)
+ if (pos < addr)
{
if ((lo == hi) && (lo == (fgBBcount - 1)))
{
noway_assert(addr == dsc->bbCodeOffsEnd);
- return NULL; // NULL means the end of method
+ return nullptr; // NULL means the end of method
}
lo = mid + 1;
continue;
}
- if (pos > addr)
+ if (pos > addr)
{
hi = mid - 1;
continue;
}
- return dsc;
+ return dsc;
}
#ifdef DEBUG
printf("ERROR: Couldn't find basic block at offset %04X\n", addr);
@@ -4207,20 +4232,18 @@ AGAIN:;
NO_WAY("fgLookupBB failed.");
}
-
/*****************************************************************************
*
* The 'jump target' array uses the following flags to indicate what kind
* of label is present.
*/
-#define JT_NONE 0x00 // This IL offset is never used
-#define JT_ADDR 0x01 // merely make sure this is an OK address
-#define JT_JUMP 0x02 // 'normal' jump target
-#define JT_MULTI 0x04 // target of multiple jumps
+#define JT_NONE 0x00 // This IL offset is never used
+#define JT_ADDR 0x01 // merely make sure this is an OK address
+#define JT_JUMP 0x02 // 'normal' jump target
+#define JT_MULTI 0x04 // target of multiple jumps
-inline
-void Compiler::fgMarkJumpTarget(BYTE* jumpTarget, unsigned offs)
+inline void Compiler::fgMarkJumpTarget(BYTE* jumpTarget, unsigned offs)
{
/* Make sure we set JT_MULTI if target of multiple jumps */
@@ -4238,36 +4261,72 @@ void Compiler::fgMarkJumpTarget(BYTE* jumpTarget, unsigned offs)
class FgStack
{
public:
-
- FgStack()
- : slot0(SLOT_INVALID)
- , slot1(SLOT_INVALID)
- , depth(0)
+ FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0)
{
// Empty
}
- void Clear() { depth = 0; }
- void PushUnknown() { Push(SLOT_UNKNOWN); }
- void PushConstant() { Push(SLOT_CONSTANT); }
- void PushArrayLen() { Push(SLOT_ARRAYLEN); }
- void PushArgument(unsigned arg) { Push(SLOT_ARGUMENT + arg); }
- unsigned GetSlot0() const { assert(depth >= 1); return slot0; }
- unsigned GetSlot1() const { assert(depth >= 2); return slot1; }
- static bool IsConstant(unsigned value) { return value == SLOT_CONSTANT; }
- static bool IsArrayLen(unsigned value) { return value == SLOT_ARRAYLEN; }
- static bool IsArgument(unsigned value) { return value >= SLOT_ARGUMENT; }
+ void Clear()
+ {
+ depth = 0;
+ }
+ void PushUnknown()
+ {
+ Push(SLOT_UNKNOWN);
+ }
+ void PushConstant()
+ {
+ Push(SLOT_CONSTANT);
+ }
+ void PushArrayLen()
+ {
+ Push(SLOT_ARRAYLEN);
+ }
+ void PushArgument(unsigned arg)
+ {
+ Push(SLOT_ARGUMENT + arg);
+ }
+ unsigned GetSlot0() const
+ {
+ assert(depth >= 1);
+ return slot0;
+ }
+ unsigned GetSlot1() const
+ {
+ assert(depth >= 2);
+ return slot1;
+ }
+ static bool IsConstant(unsigned value)
+ {
+ return value == SLOT_CONSTANT;
+ }
+ static bool IsArrayLen(unsigned value)
+ {
+ return value == SLOT_ARRAYLEN;
+ }
+ static bool IsArgument(unsigned value)
+ {
+ return value >= SLOT_ARGUMENT;
+ }
static unsigned SlotTypeToArgNum(unsigned value)
{
assert(IsArgument(value));
return value - SLOT_ARGUMENT;
}
- bool IsStackTwoDeep() const { return depth == 2; }
- bool IsStackOneDeep() const { return depth == 1; }
- bool IsStackAtLeastOneDeep() const { return depth >= 1; }
+ bool IsStackTwoDeep() const
+ {
+ return depth == 2;
+ }
+ bool IsStackOneDeep() const
+ {
+ return depth == 1;
+ }
+ bool IsStackAtLeastOneDeep() const
+ {
+ return depth >= 1;
+ }
private:
-
enum
{
SLOT_INVALID = UINT_MAX,
@@ -4281,16 +4340,16 @@ private:
{
switch (depth)
{
- case 0:
- ++depth;
- slot0 = type;
- break;
- case 1:
- ++depth;
- __fallthrough;
- case 2:
- slot1 = slot0;
- slot0 = type;
+ case 0:
+ ++depth;
+ slot0 = type;
+ break;
+ case 1:
+ ++depth;
+ __fallthrough;
+ case 2:
+ slot1 = slot0;
+ slot0 = type;
}
}
@@ -4314,31 +4373,29 @@ private:
//
// May throw an exception if the IL is malformed.
//
-// jumpTarget[N] is set to a JT_* value if IL offset N is a
+// jumpTarget[N] is set to a JT_* value if IL offset N is a
// jump target in the method.
//
// Also sets lvAddrExposed and lvArgWrite in lvaTable[].
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void Compiler::fgFindJumpTargets(const BYTE* codeAddr,
- IL_OFFSET codeSize,
- BYTE* jumpTarget)
+void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE* jumpTarget)
{
- const BYTE* codeBegp = codeAddr;
- const BYTE* codeEndp = codeAddr + codeSize;
+ const BYTE* codeBegp = codeAddr;
+ const BYTE* codeEndp = codeAddr + codeSize;
unsigned varNum;
- bool seenJump = false;
- var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type
- typeInfo ti; // Verifier type.
+ bool seenJump = false;
+ var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type
+ typeInfo ti; // Verifier type.
bool typeIsNormed = false;
FgStack pushedStack;
- const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
+ const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
const bool makeInlineObservations = (compInlineResult != nullptr);
- const bool isInlining = compIsForInlining();
+ const bool isInlining = compIsForInlining();
if (makeInlineObservations)
{
@@ -4362,27 +4419,24 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr,
while (codeAddr < codeEndp)
{
- OPCODE opcode = (OPCODE) getU1LittleEndian(codeAddr);
+ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
opts.instrCount++;
typeIsNormed = false;
-DECODE_OPCODE:
+ DECODE_OPCODE:
if (opcode >= CEE_COUNT)
{
- BADCODE3("Illegal opcode", ": %02X", (int) opcode);
+ BADCODE3("Illegal opcode", ": %02X", (int)opcode);
}
- if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) ||
- (opcode >= CEE_LDARG && opcode <= CEE_STLOC))
+ if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC))
{
opts.lvRefCount++;
}
- if (makeInlineObservations &&
- (opcode >= CEE_LDNULL) &&
- (opcode <= CEE_LDC_R8))
+ if (makeInlineObservations && (opcode >= CEE_LDNULL) && (opcode <= CEE_LDC_R8))
{
pushedStack.PushConstant();
}
@@ -4391,30 +4445,30 @@ DECODE_OPCODE:
switch (opcode)
{
- case CEE_PREFIX1:
+ case CEE_PREFIX1:
{
if (codeAddr >= codeEndp)
{
goto TOO_FAR;
}
- opcode = (OPCODE) (256 + getU1LittleEndian(codeAddr));
+ opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr));
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
}
- case CEE_PREFIX2:
- case CEE_PREFIX3:
- case CEE_PREFIX4:
- case CEE_PREFIX5:
- case CEE_PREFIX6:
- case CEE_PREFIX7:
- case CEE_PREFIXREF:
+ case CEE_PREFIX2:
+ case CEE_PREFIX3:
+ case CEE_PREFIX4:
+ case CEE_PREFIX5:
+ case CEE_PREFIX6:
+ case CEE_PREFIX7:
+ case CEE_PREFIXREF:
{
- BADCODE3("Illegal opcode", ": %02X", (int) opcode);
+ BADCODE3("Illegal opcode", ": %02X", (int)opcode);
}
- case CEE_CALL:
- case CEE_CALLVIRT:
+ case CEE_CALL:
+ case CEE_CALLVIRT:
{
// There has to be code after the call, otherwise the inlinee is unverifiable.
if (isInlining)
@@ -4427,7 +4481,7 @@ DECODE_OPCODE:
// it is a wrapper method.
if (makeInlineObservations)
{
- if ((OPCODE) getU1LittleEndian(codeAddr + sz) == CEE_RET)
+ if ((OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET)
{
compInlineResult->Note(InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER);
}
@@ -4435,34 +4489,34 @@ DECODE_OPCODE:
}
break;
- case CEE_LEAVE:
- case CEE_LEAVE_S:
- case CEE_BR:
- case CEE_BR_S:
- case CEE_BRFALSE:
- case CEE_BRFALSE_S:
- case CEE_BRTRUE:
- case CEE_BRTRUE_S:
- case CEE_BEQ:
- case CEE_BEQ_S:
- case CEE_BGE:
- case CEE_BGE_S:
- case CEE_BGE_UN:
- case CEE_BGE_UN_S:
- case CEE_BGT:
- case CEE_BGT_S:
- case CEE_BGT_UN:
- case CEE_BGT_UN_S:
- case CEE_BLE:
- case CEE_BLE_S:
- case CEE_BLE_UN:
- case CEE_BLE_UN_S:
- case CEE_BLT:
- case CEE_BLT_S:
- case CEE_BLT_UN:
- case CEE_BLT_UN_S:
- case CEE_BNE_UN:
- case CEE_BNE_UN_S:
+ case CEE_LEAVE:
+ case CEE_LEAVE_S:
+ case CEE_BR:
+ case CEE_BR_S:
+ case CEE_BRFALSE:
+ case CEE_BRFALSE_S:
+ case CEE_BRTRUE:
+ case CEE_BRTRUE_S:
+ case CEE_BEQ:
+ case CEE_BEQ_S:
+ case CEE_BGE:
+ case CEE_BGE_S:
+ case CEE_BGE_UN:
+ case CEE_BGE_UN_S:
+ case CEE_BGT:
+ case CEE_BGT_S:
+ case CEE_BGT_UN:
+ case CEE_BGT_UN_S:
+ case CEE_BLE:
+ case CEE_BLE_S:
+ case CEE_BLE_UN:
+ case CEE_BLE_UN_S:
+ case CEE_BLT:
+ case CEE_BLT_S:
+ case CEE_BLT_UN:
+ case CEE_BLT_UN_S:
+ case CEE_BNE_UN:
+ case CEE_BNE_UN_S:
{
seenJump = true;
@@ -4472,22 +4526,20 @@ DECODE_OPCODE:
}
// Compute jump target address
- signed jmpDist = (sz==1) ? getI1LittleEndian(codeAddr)
- : getI4LittleEndian(codeAddr);
+ signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
- if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S
- || opcode == CEE_BR || opcode == CEE_BR_S))
+ if (compIsForInlining() && jmpDist == 0 &&
+ (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S))
{
- break; /* NOP */
+ break; /* NOP */
}
unsigned jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist;
// Make sure target is reasonable
- if (jmpAddr >= codeSize)
+ if (jmpAddr >= codeSize)
{
- BADCODE3("code jumps to outer space",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ BADCODE3("code jumps to outer space", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
}
// Mark the jump target
@@ -4501,7 +4553,7 @@ DECODE_OPCODE:
}
break;
- case CEE_SWITCH:
+ case CEE_SWITCH:
{
seenJump = true;
@@ -4532,7 +4584,7 @@ DECODE_OPCODE:
}
// Find the end of the switch table
- unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt*sizeof(DWORD));
+ unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD));
// Make sure there is more code after the switch
if (jmpBase >= codeSize)
@@ -4551,8 +4603,7 @@ DECODE_OPCODE:
if (jmpAddr >= codeSize)
{
- BADCODE3("jump target out of range",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ BADCODE3("jump target out of range", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
}
fgMarkJumpTarget(jumpTarget, jmpAddr);
@@ -4564,11 +4615,11 @@ DECODE_OPCODE:
}
break;
- case CEE_UNALIGNED:
- case CEE_CONSTRAINED:
- case CEE_READONLY:
- case CEE_VOLATILE:
- case CEE_TAILCALL:
+ case CEE_UNALIGNED:
+ case CEE_CONSTRAINED:
+ case CEE_READONLY:
+ case CEE_VOLATILE:
+ case CEE_TAILCALL:
{
if (codeAddr >= codeEndp)
{
@@ -4577,8 +4628,8 @@ DECODE_OPCODE:
}
break;
- case CEE_STARG:
- case CEE_STARG_S:
+ case CEE_STARG:
+ case CEE_STARG_S:
{
noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD));
@@ -4587,31 +4638,30 @@ DECODE_OPCODE:
goto TOO_FAR;
}
- varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr)
- : getU2LittleEndian(codeAddr);
+ varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr);
varNum = compMapILargNum(varNum); // account for possible hidden param
- // This check is only intended to prevent an AV. Bad varNum values will later
- // be handled properly by the verifier.
- if (varNum < lvaTableCnt)
- {
- if (isInlining)
- {
- impInlineInfo->inlArgInfo[varNum].argHasStargOp = true;
- }
- else
- {
- // In non-inline cases, note written-to locals.
- lvaTable[varNum].lvArgWrite = 1;
- }
- }
+ // This check is only intended to prevent an AV. Bad varNum values will later
+ // be handled properly by the verifier.
+ if (varNum < lvaTableCnt)
+ {
+ if (isInlining)
+ {
+ impInlineInfo->inlArgInfo[varNum].argHasStargOp = true;
+ }
+ else
+ {
+ // In non-inline cases, note written-to locals.
+ lvaTable[varNum].lvArgWrite = 1;
+ }
+ }
}
break;
- case CEE_LDARGA:
- case CEE_LDARGA_S:
- case CEE_LDLOCA:
- case CEE_LDLOCA_S:
+ case CEE_LDARGA:
+ case CEE_LDARGA_S:
+ case CEE_LDLOCA:
+ case CEE_LDLOCA_S:
{
// Handle address-taken args or locals
noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD));
@@ -4621,8 +4671,7 @@ DECODE_OPCODE:
goto TOO_FAR;
}
- varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr)
- : getU2LittleEndian(codeAddr);
+ varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr);
if (isInlining)
{
@@ -4715,72 +4764,74 @@ DECODE_OPCODE:
}
break;
- case CEE_JMP:
+ case CEE_JMP:
#if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_)
- if (!isInlining)
- {
- // We transform this into a set of ldarg's + tail call and
- // thus may push more onto the stack than originally thought.
- // This doesn't interfere with verification because CEE_JMP
- // is never verifiable, and there's nothing unsafe you can
- // do with a an IL stack overflow if the JIT is expecting it.
- info.compMaxStack = max(info.compMaxStack, info.compILargsCount);
- break;
- }
+ if (!isInlining)
+ {
+ // We transform this into a set of ldarg's + tail call and
+ // thus may push more onto the stack than originally thought.
+ // This doesn't interfere with verification because CEE_JMP
+ // is never verifiable, and there's nothing unsafe you can
+ // do with a an IL stack overflow if the JIT is expecting it.
+ info.compMaxStack = max(info.compMaxStack, info.compILargsCount);
+ break;
+ }
#endif // !_TARGET_X86_ && !_TARGET_ARM_
- // If we are inlining, we need to fail for a CEE_JMP opcode, just like
- // the list of other opcodes (for all platforms).
+ // If we are inlining, we need to fail for a CEE_JMP opcode, just like
+ // the list of other opcodes (for all platforms).
- __fallthrough;
+ __fallthrough;
- case CEE_CALLI:
- case CEE_LOCALLOC:
- case CEE_MKREFANY:
- case CEE_RETHROW:
- // CEE_CALLI should not be inlined because the JIT cannot generate an inlined call frame. If the call target
- // is a no-marshal CALLI P/Invoke we end up calling the IL stub. We don't NGEN these stubs, so we'll have to
- // JIT an IL stub for a trivial func. It's almost certainly a better choice to leave out the inline
- // candidate so we can generate an inlined call frame. It might be nice to call getCallInfo to figure out
- // what kind of call we have here.
+ case CEE_CALLI:
+ case CEE_LOCALLOC:
+ case CEE_MKREFANY:
+ case CEE_RETHROW:
+ // CEE_CALLI should not be inlined because the JIT cannot generate an inlined call frame. If the call
+ // target
+ // is a no-marshal CALLI P/Invoke we end up calling the IL stub. We don't NGEN these stubs, so we'll
+ // have to
+ // JIT an IL stub for a trivial func. It's almost certainly a better choice to leave out the inline
+ // candidate so we can generate an inlined call frame. It might be nice to call getCallInfo to figure
+ // out
+ // what kind of call we have here.
+
+ // Consider making this only for not force inline.
+ if (makeInlineObservations)
+ {
+ // Arguably this should be NoteFatal, but the legacy behavior is
+ // to ignore this for the prejit root.
+ compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE);
- //Consider making this only for not force inline.
- if (makeInlineObservations)
- {
- // Arguably this should be NoteFatal, but the legacy behavior is
- // to ignore this for the prejit root.
- compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE);
+ // Fail fast if we're inlining...
+ if (isInlining)
+ {
+ assert(compInlineResult->IsFailure());
+ return;
+ }
+ }
+ break;
- // Fail fast if we're inlining...
- if (isInlining)
+ case CEE_LDARG_0:
+ case CEE_LDARG_1:
+ case CEE_LDARG_2:
+ case CEE_LDARG_3:
+ if (makeInlineObservations)
{
- assert(compInlineResult->IsFailure());
- return;
+ pushedStack.PushArgument(opcode - CEE_LDARG_0);
}
- }
- break;
-
- case CEE_LDARG_0:
- case CEE_LDARG_1:
- case CEE_LDARG_2:
- case CEE_LDARG_3:
- if (makeInlineObservations)
- {
- pushedStack.PushArgument(opcode - CEE_LDARG_0);
- }
- break;
+ break;
- case CEE_LDARG_S:
- case CEE_LDARG:
+ case CEE_LDARG_S:
+ case CEE_LDARG:
{
if (codeAddr > codeEndp - sz)
{
goto TOO_FAR;
}
- varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr)
- : getU2LittleEndian(codeAddr);
+ varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr);
if (makeInlineObservations)
{
@@ -4789,26 +4840,26 @@ DECODE_OPCODE:
}
break;
- case CEE_LDLEN:
- if (makeInlineObservations)
- {
- pushedStack.PushArrayLen();
- }
- break;
+ case CEE_LDLEN:
+ if (makeInlineObservations)
+ {
+ pushedStack.PushArrayLen();
+ }
+ break;
- case CEE_CEQ:
- case CEE_CGT:
- case CEE_CGT_UN:
- case CEE_CLT:
- case CEE_CLT_UN:
- if (makeInlineObservations)
- {
- fgObserveInlineConstants(opcode, pushedStack, isInlining);
- }
- break;
+ case CEE_CEQ:
+ case CEE_CGT:
+ case CEE_CGT_UN:
+ case CEE_CLT:
+ case CEE_CLT_UN:
+ if (makeInlineObservations)
+ {
+ fgObserveInlineConstants(opcode, pushedStack, isInlining);
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
// Skip any remaining operands this opcode may have
@@ -4817,15 +4868,15 @@ DECODE_OPCODE:
// Note the opcode we just saw
if (makeInlineObservations)
{
- InlineObservation obs = typeIsNormed ?
- InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE;
+ InlineObservation obs =
+ typeIsNormed ? InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE;
compInlineResult->NoteInt(obs, opcode);
}
}
if (codeAddr != codeEndp)
{
-TOO_FAR:
+ TOO_FAR:
BADCODE3("Code ends in the middle of an opcode, or there is a branch past the end of the method",
" at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
}
@@ -4851,7 +4902,7 @@ TOO_FAR:
// Assess profitability...
CORINFO_METHOD_INFO* methodInfo = &impInlineInfo->inlineCandidateInfo->methInfo;
compInlineResult->DetermineProfitability(methodInfo);
-
+
if (compInlineResult->IsFailure())
{
impInlineRoot()->m_inlineStrategy->NoteUnprofitable();
@@ -4908,23 +4959,23 @@ void Compiler::fgAdjustForAddressExposedOrWrittenThis()
// If there is a "ldarga 0" or "starg 0", grab and use the temp.
lvaArg0Var = lvaGrabTemp(false DEBUGARG("Address-exposed, or written this pointer"));
noway_assert(lvaArg0Var > (unsigned)info.compThisArg);
- lvaTable[lvaArg0Var].lvType = lvaTable[info.compThisArg].TypeGet();
- lvaTable[lvaArg0Var].lvAddrExposed = lvaTable[info.compThisArg].lvAddrExposed;
+ lvaTable[lvaArg0Var].lvType = lvaTable[info.compThisArg].TypeGet();
+ lvaTable[lvaArg0Var].lvAddrExposed = lvaTable[info.compThisArg].lvAddrExposed;
lvaTable[lvaArg0Var].lvDoNotEnregister = lvaTable[info.compThisArg].lvDoNotEnregister;
#ifdef DEBUG
lvaTable[lvaArg0Var].lvVMNeedsStackAddr = lvaTable[info.compThisArg].lvVMNeedsStackAddr;
lvaTable[lvaArg0Var].lvLiveInOutOfHndlr = lvaTable[info.compThisArg].lvLiveInOutOfHndlr;
- lvaTable[lvaArg0Var].lvLclFieldExpr = lvaTable[info.compThisArg].lvLclFieldExpr;
- lvaTable[lvaArg0Var].lvLiveAcrossUCall = lvaTable[info.compThisArg].lvLiveAcrossUCall;
+ lvaTable[lvaArg0Var].lvLclFieldExpr = lvaTable[info.compThisArg].lvLclFieldExpr;
+ lvaTable[lvaArg0Var].lvLiveAcrossUCall = lvaTable[info.compThisArg].lvLiveAcrossUCall;
#endif
- lvaTable[lvaArg0Var].lvArgWrite = lvaTable[info.compThisArg].lvArgWrite;
+ lvaTable[lvaArg0Var].lvArgWrite = lvaTable[info.compThisArg].lvArgWrite;
lvaTable[lvaArg0Var].lvVerTypeInfo = lvaTable[info.compThisArg].lvVerTypeInfo;
// Clear the TI_FLAG_THIS_PTR in the original 'this' pointer.
noway_assert(lvaTable[lvaArg0Var].lvVerTypeInfo.IsThisPtr());
lvaTable[info.compThisArg].lvVerTypeInfo.ClearThisPtr();
lvaTable[info.compThisArg].lvAddrExposed = false;
- lvaTable[info.compThisArg].lvArgWrite = false;
+ lvaTable[info.compThisArg].lvArgWrite = false;
}
}
@@ -4963,8 +5014,7 @@ void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, boo
if (lookForBranchCases)
{
- if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S ||
- opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S)
+ if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S)
{
unsigned slot0 = stack.GetSlot0();
if (FgStack::IsArgument(slot0))
@@ -4997,15 +5047,15 @@ void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, boo
unsigned slot1 = stack.GetSlot1();
// Arg feeds constant test
- if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1))
- ||(FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0)))
+ if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1)) ||
+ (FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0)))
{
compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST);
}
// Arg feeds range check
- if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1))
- ||(FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0)))
+ if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1)) ||
+ (FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0)))
{
compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK);
}
@@ -5038,16 +5088,16 @@ void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, boo
* Finally link up the bbJumpDest of the blocks together
*/
-void Compiler::fgMarkBackwardJump(BasicBlock * startBlock, BasicBlock * endBlock)
+void Compiler::fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock)
{
noway_assert(startBlock->bbNum <= endBlock->bbNum);
- for (BasicBlock * block = startBlock;
- block != endBlock->bbNext;
- block = block->bbNext)
+ for (BasicBlock* block = startBlock; block != endBlock->bbNext; block = block->bbNext)
{
if ((block->bbFlags & BBF_BACKWARD_JUMP) == 0)
+ {
block->bbFlags |= BBF_BACKWARD_JUMP;
+ }
}
}
@@ -5056,7 +5106,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock * startBlock, BasicBlock
* Finally link up the bbJumpDest of the blocks together
*/
-void Compiler::fgLinkBasicBlocks()
+void Compiler::fgLinkBasicBlocks()
{
/* Create the basic block lookup tables */
@@ -5068,88 +5118,87 @@ void Compiler::fgLinkBasicBlocks()
/* Walk all the basic blocks, filling in the target addresses */
- for (BasicBlock * curBBdesc = fgFirstBB;
- curBBdesc;
- curBBdesc = curBBdesc->bbNext)
+ for (BasicBlock* curBBdesc = fgFirstBB; curBBdesc; curBBdesc = curBBdesc->bbNext)
{
switch (curBBdesc->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_LEAVE:
- curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs);
- curBBdesc->bbJumpDest->bbRefs++;
- if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum)
- {
- fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc);
- }
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_LEAVE:
+ curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs);
+ curBBdesc->bbJumpDest->bbRefs++;
+ if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum)
+ {
+ fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc);
+ }
- /* Is the next block reachable? */
+ /* Is the next block reachable? */
- if (curBBdesc->bbJumpKind == BBJ_ALWAYS ||
- curBBdesc->bbJumpKind == BBJ_LEAVE)
- break;
+ if (curBBdesc->bbJumpKind == BBJ_ALWAYS || curBBdesc->bbJumpKind == BBJ_LEAVE)
+ {
+ break;
+ }
- if (!curBBdesc->bbNext)
- BADCODE("Fall thru the end of a method");
+ if (!curBBdesc->bbNext)
+ {
+ BADCODE("Fall thru the end of a method");
+ }
// Fall through, the next block is also reachable
- case BBJ_NONE:
- curBBdesc->bbNext->bbRefs++;
- break;
+ case BBJ_NONE:
+ curBBdesc->bbNext->bbRefs++;
+ break;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_THROW:
- case BBJ_RETURN:
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = curBBdesc->bbJumpSwt->bbsCount;
- BasicBlock * * jumpPtr; jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt;
+ jumpCnt = curBBdesc->bbJumpSwt->bbsCount;
+ BasicBlock** jumpPtr;
+ jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab;
- do
- {
- *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr);
- (*jumpPtr)->bbRefs++;
- if ((*jumpPtr)->bbNum <= curBBdesc->bbNum)
+ do
{
- fgMarkBackwardJump(*jumpPtr, curBBdesc);
- }
- }
- while (++jumpPtr, --jumpCnt);
+ *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr);
+ (*jumpPtr)->bbRefs++;
+ if ((*jumpPtr)->bbNum <= curBBdesc->bbNum)
+ {
+ fgMarkBackwardJump(*jumpPtr, curBBdesc);
+ }
+ } while (++jumpPtr, --jumpCnt);
- /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */
+ /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */
- noway_assert(*(jumpPtr-1) == curBBdesc->bbNext);
- break;
+ noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext);
+ break;
- case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later
- case BBJ_EHCATCHRET:
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later
+ case BBJ_EHCATCHRET:
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
}
-
/*****************************************************************************
*
* Walk the instrs to create the basic blocks.
*/
-void Compiler::fgMakeBasicBlocks(const BYTE * codeAddr,
- IL_OFFSET codeSize,
- BYTE * jumpTarget)
+void Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE* jumpTarget)
{
- const BYTE * codeBegp = codeAddr;
- const BYTE * codeEndp = codeAddr + codeSize;
- bool tailCall = false;
- unsigned curBBoffs;
- BasicBlock * curBBdesc;
+ const BYTE* codeBegp = codeAddr;
+ const BYTE* codeEndp = codeAddr + codeSize;
+ bool tailCall = false;
+ unsigned curBBoffs;
+ BasicBlock* curBBdesc;
/* Clear the beginning offset for the first BB */
@@ -5161,28 +5210,31 @@ void Compiler::fgMakeBasicBlocks(const BYTE * codeAddr,
compResetScopeLists();
// Ignore scopes beginning at offset 0
- while (compGetNextEnterScope(0)) { /* do nothing */ }
- while (compGetNextExitScope(0)) { /* do nothing */ }
+ while (compGetNextEnterScope(0))
+ { /* do nothing */
+ }
+ while (compGetNextExitScope(0))
+ { /* do nothing */
+ }
}
#endif
-
BBjumpKinds jmpKind;
do
{
- OPCODE opcode;
- unsigned sz;
- unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET);
- unsigned bbFlags = 0;
- BBswtDesc * swtDsc = 0;
- unsigned nxtBBoffs;
+ OPCODE opcode;
+ unsigned sz;
+ unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET);
+ unsigned bbFlags = 0;
+ BBswtDesc* swtDsc = nullptr;
+ unsigned nxtBBoffs;
- opcode = (OPCODE) getU1LittleEndian(codeAddr);
+ opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
- jmpKind = BBJ_NONE;
+ jmpKind = BBJ_NONE;
-DECODE_OPCODE:
+ DECODE_OPCODE:
/* Get the size of additional parameters */
@@ -5192,83 +5244,83 @@ DECODE_OPCODE:
switch (opcode)
{
- signed jmpDist;
-
-
- case CEE_PREFIX1:
- if (jumpTarget[codeAddr - codeBegp] != JT_NONE)
- BADCODE3("jump target between prefix 0xFE and opcode",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
-
- opcode = (OPCODE) (256+getU1LittleEndian(codeAddr));
- codeAddr += sizeof(__int8);
- goto DECODE_OPCODE;
-
- /* Check to see if we have a jump/return opcode */
-
- case CEE_BRFALSE:
- case CEE_BRFALSE_S:
- case CEE_BRTRUE:
- case CEE_BRTRUE_S:
-
- case CEE_BEQ:
- case CEE_BEQ_S:
- case CEE_BGE:
- case CEE_BGE_S:
- case CEE_BGE_UN:
- case CEE_BGE_UN_S:
- case CEE_BGT:
- case CEE_BGT_S:
- case CEE_BGT_UN:
- case CEE_BGT_UN_S:
- case CEE_BLE:
- case CEE_BLE_S:
- case CEE_BLE_UN:
- case CEE_BLE_UN_S:
- case CEE_BLT:
- case CEE_BLT_S:
- case CEE_BLT_UN:
- case CEE_BLT_UN_S:
- case CEE_BNE_UN:
- case CEE_BNE_UN_S:
-
- jmpKind = BBJ_COND;
- goto JMP;
-
-
- case CEE_LEAVE:
- case CEE_LEAVE_S:
-
- // We need to check if we are jumping out of a finally-protected try.
- jmpKind = BBJ_LEAVE;
- goto JMP;
-
+ signed jmpDist;
- case CEE_BR:
- case CEE_BR_S:
- jmpKind = BBJ_ALWAYS;
- goto JMP;
-
- JMP:
-
- /* Compute the target address of the jump */
+ case CEE_PREFIX1:
+ if (jumpTarget[codeAddr - codeBegp] != JT_NONE)
+ {
+ BADCODE3("jump target between prefix 0xFE and opcode", " at offset %04X",
+ (IL_OFFSET)(codeAddr - codeBegp));
+ }
- jmpDist = (sz==1) ? getI1LittleEndian(codeAddr)
- : getI4LittleEndian(codeAddr);
+ opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr));
+ codeAddr += sizeof(__int8);
+ goto DECODE_OPCODE;
- if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S))
- continue; /* NOP */
+ /* Check to see if we have a jump/return opcode */
+
+ case CEE_BRFALSE:
+ case CEE_BRFALSE_S:
+ case CEE_BRTRUE:
+ case CEE_BRTRUE_S:
+
+ case CEE_BEQ:
+ case CEE_BEQ_S:
+ case CEE_BGE:
+ case CEE_BGE_S:
+ case CEE_BGE_UN:
+ case CEE_BGE_UN_S:
+ case CEE_BGT:
+ case CEE_BGT_S:
+ case CEE_BGT_UN:
+ case CEE_BGT_UN_S:
+ case CEE_BLE:
+ case CEE_BLE_S:
+ case CEE_BLE_UN:
+ case CEE_BLE_UN_S:
+ case CEE_BLT:
+ case CEE_BLT_S:
+ case CEE_BLT_UN:
+ case CEE_BLT_UN_S:
+ case CEE_BNE_UN:
+ case CEE_BNE_UN_S:
+
+ jmpKind = BBJ_COND;
+ goto JMP;
+
+ case CEE_LEAVE:
+ case CEE_LEAVE_S:
+
+ // We need to check if we are jumping out of a finally-protected try.
+ jmpKind = BBJ_LEAVE;
+ goto JMP;
+
+ case CEE_BR:
+ case CEE_BR_S:
+ jmpKind = BBJ_ALWAYS;
+ goto JMP;
+
+ JMP:
+
+ /* Compute the target address of the jump */
+
+ jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
+
+ if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S))
+ {
+ continue; /* NOP */
+ }
- jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist;
- break;
+ jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist;
+ break;
- case CEE_SWITCH:
+ case CEE_SWITCH:
{
- unsigned jmpBase;
- unsigned jmpCnt; // # of switch cases (excluding defualt)
+ unsigned jmpBase;
+ unsigned jmpCnt; // # of switch cases (excluding defualt)
- BasicBlock * * jmpTab;
- BasicBlock * * jmpPtr;
+ BasicBlock** jmpTab;
+ BasicBlock** jmpPtr;
/* Allocate the switch descriptor */
@@ -5276,24 +5328,25 @@ DECODE_OPCODE:
/* Read the number of entries in the table */
- jmpCnt = getU4LittleEndian(codeAddr); codeAddr += 4;
+ jmpCnt = getU4LittleEndian(codeAddr);
+ codeAddr += 4;
/* Compute the base offset for the opcode */
- jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt*sizeof(DWORD));
+ jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD));
/* Allocate the jump table */
- jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt+1];
+ jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1];
/* Fill in the jump table */
for (unsigned count = jmpCnt; count; count--)
{
- jmpDist = getI4LittleEndian(codeAddr);
+ jmpDist = getI4LittleEndian(codeAddr);
codeAddr += 4;
- //store the offset in the pointer. We change these in fgLinkBasicBlocks().
+ // store the offset in the pointer. We change these in fgLinkBasicBlocks().
*jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist);
}
@@ -5307,7 +5360,7 @@ DECODE_OPCODE:
/* Compute the size of the switch opcode operands */
- sz = sizeof(DWORD) + jmpCnt*sizeof(DWORD);
+ sz = sizeof(DWORD) + jmpCnt * sizeof(DWORD);
/* Fill in the remaining fields of the switch descriptor */
@@ -5316,7 +5369,7 @@ DECODE_OPCODE:
/* This is definitely a jump */
- jmpKind = BBJ_SWITCH;
+ jmpKind = BBJ_SWITCH;
fgHasSwitch = true;
#ifndef LEGACY_BACKEND
@@ -5332,43 +5385,44 @@ DECODE_OPCODE:
// (maybe immediately after the switch jump), and make the "base" address be also in that section,
// probably the address after the switch jump.
opts.compProcedureSplitting = false;
- JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; implementation limitation.\n");
+ JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; "
+ "implementation limitation.\n");
}
#endif // !LEGACY_BACKEND
-
}
- goto GOT_ENDP;
+ goto GOT_ENDP;
- case CEE_ENDFILTER:
- bbFlags |= BBF_DONT_REMOVE;
- jmpKind = BBJ_EHFILTERRET;
- break;
+ case CEE_ENDFILTER:
+ bbFlags |= BBF_DONT_REMOVE;
+ jmpKind = BBJ_EHFILTERRET;
+ break;
- case CEE_ENDFINALLY:
- jmpKind = BBJ_EHFINALLYRET;
- break;
+ case CEE_ENDFINALLY:
+ jmpKind = BBJ_EHFINALLYRET;
+ break;
- case CEE_READONLY:
- case CEE_CONSTRAINED:
- case CEE_TAILCALL:
- case CEE_VOLATILE:
- case CEE_UNALIGNED:
- // fgFindJumpTargets should have ruled out this possibility
- // (i.e. a prefix opcodes as last intruction in a block)
- noway_assert(codeAddr < codeEndp);
-
- if (jumpTarget[codeAddr - codeBegp] != JT_NONE)
- BADCODE3("jump target between prefix and an opcode",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
- break;
+ case CEE_READONLY:
+ case CEE_CONSTRAINED:
+ case CEE_TAILCALL:
+ case CEE_VOLATILE:
+ case CEE_UNALIGNED:
+ // fgFindJumpTargets should have ruled out this possibility
+ // (i.e. a prefix opcodes as last intruction in a block)
+ noway_assert(codeAddr < codeEndp);
- case CEE_CALL:
- case CEE_CALLVIRT:
- case CEE_CALLI:
+ if (jumpTarget[codeAddr - codeBegp] != JT_NONE)
+ {
+ BADCODE3("jump target between prefix and an opcode", " at offset %04X",
+ (IL_OFFSET)(codeAddr - codeBegp));
+ }
+ break;
+
+ case CEE_CALL:
+ case CEE_CALLVIRT:
+ case CEE_CALLI:
{
- if (compIsForInlining() || // Ignore tail call in the inlinee. Period.
- (!tailCall &&
- !compTailCallStress()) // A new BB with BBJ_RETURN would have been created
+ if (compIsForInlining() || // Ignore tail call in the inlinee. Period.
+ (!tailCall && !compTailCallStress()) // A new BB with BBJ_RETURN would have been created
// after a tailcall statement.
// We need to keep this invariant if we want to stress the tailcall.
@@ -5385,11 +5439,12 @@ DECODE_OPCODE:
// Make sure the code sequence is legal for the tail call.
// If so, mark this BB as having a BBJ_RETURN.
- if (codeAddr >= codeEndp - sz) {
- BADCODE3("No code found after the call instruction",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ if (codeAddr >= codeEndp - sz)
+ {
+ BADCODE3("No code found after the call instruction", " at offset %04X",
+ (IL_OFFSET)(codeAddr - codeBegp));
}
-
+
if (tailCall)
{
bool isCallPopAndRet = false;
@@ -5399,14 +5454,14 @@ DECODE_OPCODE:
// false. This will only affect explicit tail calls when IL verification is not needed for the
// method.
bool isRecursive = false;
- if (!impIsTailCallILPattern(tailCall, opcode, codeAddr+sz, codeEndp, isRecursive, &isCallPopAndRet))
+ if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive,
+ &isCallPopAndRet))
{
#ifdef _TARGET_AMD64_
- BADCODE3("tail call not followed by ret or pop+ret",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ BADCODE3("tail call not followed by ret or pop+ret", " at offset %04X",
+ (IL_OFFSET)(codeAddr - codeBegp));
#else
- BADCODE3("tail call not followed by ret",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ BADCODE3("tail call not followed by ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
#endif //_TARGET_AMD64_
}
@@ -5423,7 +5478,7 @@ DECODE_OPCODE:
}
else
{
- OPCODE nextOpcode = (OPCODE) getU1LittleEndian(codeAddr + sz);
+ OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr + sz);
if (nextOpcode != CEE_RET)
{
@@ -5448,59 +5503,63 @@ DECODE_OPCODE:
// fall-through
- case CEE_JMP:
+ case CEE_JMP:
/* These are equivalent to a return from the current method
But instead of directly returning to the caller we jump and
execute something else in between */
- case CEE_RET:
- jmpKind = BBJ_RETURN;
- break;
+ case CEE_RET:
+ jmpKind = BBJ_RETURN;
+ break;
- case CEE_THROW:
- case CEE_RETHROW:
- jmpKind = BBJ_THROW;
- break;
+ case CEE_THROW:
+ case CEE_RETHROW:
+ jmpKind = BBJ_THROW;
+ break;
#ifdef DEBUG
- // make certain we did not forget any flow of control instructions
- // by checking the 'ctrl' field in opcode.def. First filter out all
- // non-ctrl instructions
-# define BREAK(name) case name: break;
-# define NEXT(name) case name: break;
-# define CALL(name)
-# define THROW(name)
-# undef RETURN // undef contract RETURN macro
-# define RETURN(name)
-# define META(name)
-# define BRANCH(name)
-# define COND_BRANCH(name)
-# define PHI(name)
-
-# define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) ctrl(name)
-# include "opcode.def"
-# undef OPDEF
-
-# undef PHI
-# undef BREAK
-# undef CALL
-# undef NEXT
-# undef THROW
-# undef RETURN
-# undef META
-# undef BRANCH
-# undef COND_BRANCH
-
- // These ctrl-flow opcodes don't need any special handling
- case CEE_NEWOBJ: // CTRL_CALL
- break;
+// make certain we did not forget any flow of control instructions
+// by checking the 'ctrl' field in opcode.def. First filter out all
+// non-ctrl instructions
+#define BREAK(name) \
+ case name: \
+ break;
+#define NEXT(name) \
+ case name: \
+ break;
+#define CALL(name)
+#define THROW(name)
+#undef RETURN // undef contract RETURN macro
+#define RETURN(name)
+#define META(name)
+#define BRANCH(name)
+#define COND_BRANCH(name)
+#define PHI(name)
+
+#define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) ctrl(name)
+#include "opcode.def"
+#undef OPDEF
+
+#undef PHI
+#undef BREAK
+#undef CALL
+#undef NEXT
+#undef THROW
+#undef RETURN
+#undef META
+#undef BRANCH
+#undef COND_BRANCH
+
+ // These ctrl-flow opcodes don't need any special handling
+ case CEE_NEWOBJ: // CTRL_CALL
+ break;
- // what's left are forgotten instructions
- default:
- BADCODE("Unrecognized control Opcode");
- break;
-#else // !DEBUG
- default:
- break;
+ // what's left are forgotten instructions
+ default:
+ BADCODE("Unrecognized control Opcode");
+ break;
+#else // !DEBUG
+ default:
+ break;
#endif // !DEBUG
}
@@ -5508,21 +5567,22 @@ DECODE_OPCODE:
codeAddr += sz;
-GOT_ENDP:
+ GOT_ENDP:
tailCall = (opcode == CEE_TAILCALL);
/* Make sure a jump target isn't in the middle of our opcode */
- if (sz)
+ if (sz)
{
IL_OFFSET offs = (IL_OFFSET)(codeAddr - codeBegp) - sz; // offset of the operand
- for (unsigned i=0; i<sz; i++, offs++)
+ for (unsigned i = 0; i < sz; i++, offs++)
{
- if (jumpTarget[offs] != JT_NONE)
- BADCODE3("jump into the middle of an opcode",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ if (jumpTarget[offs] != JT_NONE)
+ {
+ BADCODE3("jump into the middle of an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ }
}
}
@@ -5532,24 +5592,31 @@ GOT_ENDP:
#ifdef DEBUGGING_SUPPORT
- bool foundScope = false;
+ bool foundScope = false;
if (opts.compDbgCode && (info.compVarScopesCount > 0))
{
- while (compGetNextEnterScope(nxtBBoffs)) foundScope = true;
- while (compGetNextExitScope(nxtBBoffs)) foundScope = true;
+ while (compGetNextEnterScope(nxtBBoffs))
+ {
+ foundScope = true;
+ }
+ while (compGetNextExitScope(nxtBBoffs))
+ {
+ foundScope = true;
+ }
}
#endif
/* Do we have a jump? */
- if (jmpKind == BBJ_NONE)
+ if (jmpKind == BBJ_NONE)
{
/* No jump; make sure we don't fall off the end of the function */
- if (codeAddr == codeEndp)
- BADCODE3("missing return opcode",
- " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ if (codeAddr == codeEndp)
+ {
+ BADCODE3("missing return opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp));
+ }
/* If a label follows this opcode, we'll have to make a new BB */
@@ -5561,21 +5628,25 @@ GOT_ENDP:
makeBlock = true;
#ifdef DEBUG
if (verbose)
+ {
printf("Splitting at BBoffs = %04u\n", nxtBBoffs);
+ }
#endif // DEBUG
}
#endif // DEBUGGING_SUPPORT
if (!makeBlock)
+ {
continue;
+ }
}
/* We need to create a new basic block */
curBBdesc = fgNewBasicBlock(jmpKind);
- curBBdesc->bbFlags |= bbFlags;
- curBBdesc->bbRefs = 0;
+ curBBdesc->bbFlags |= bbFlags;
+ curBBdesc->bbRefs = 0;
curBBdesc->bbCodeOffs = curBBoffs;
curBBdesc->bbCodeOffsEnd = nxtBBoffs;
@@ -5600,19 +5671,19 @@ GOT_ENDP:
switch (jmpKind)
{
- case BBJ_SWITCH:
- curBBdesc->bbJumpSwt = swtDsc;
- break;
+ case BBJ_SWITCH:
+ curBBdesc->bbJumpSwt = swtDsc;
+ break;
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_LEAVE:
- noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET));
- curBBdesc->bbJumpOffs = jmpAddr;
- break;
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_LEAVE:
+ noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET));
+ curBBdesc->bbJumpOffs = jmpAddr;
+ break;
- default:
- break;
+ default:
+ break;
}
DBEXEC(verbose, curBBdesc->dspBlockHeader(this, false, false, false));
@@ -5620,8 +5691,7 @@ GOT_ENDP:
/* Remember where the next BB will start */
curBBoffs = nxtBBoffs;
- }
- while (codeAddr < codeEndp);
+ } while (codeAddr < codeEndp);
noway_assert(codeAddr == codeEndp);
@@ -5630,18 +5700,18 @@ GOT_ENDP:
fgLinkBasicBlocks();
}
-
/*****************************************************************************
*
* Main entry point to discover the basic blocks for the current function.
*/
-void Compiler::fgFindBasicBlocks()
+void Compiler::fgFindBasicBlocks()
{
#ifdef DEBUG
- if (verbose)
- printf("*************** In fgFindBasicBlocks() for %s\n",
- info.compFullName);
+ if (verbose)
+ {
+ printf("*************** In fgFindBasicBlocks() for %s\n", info.compFullName);
+ }
#endif
/* Allocate the 'jump target' vector
@@ -5651,21 +5721,23 @@ void Compiler::fgFindBasicBlocks()
* when we need to add a dummy block
* to record the end of a try or handler region.
*/
- BYTE* jumpTarget = new (this, CMK_Unknown) BYTE[info.compILCodeSize+1];
- memset(jumpTarget, JT_NONE, info.compILCodeSize+1);
+ BYTE* jumpTarget = new (this, CMK_Unknown) BYTE[info.compILCodeSize + 1];
+ memset(jumpTarget, JT_NONE, info.compILCodeSize + 1);
noway_assert(JT_NONE == 0);
/* Walk the instrs to find all jump targets */
fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget);
if (compDonotInline())
+ {
return;
+ }
- unsigned XTnum;
+ unsigned XTnum;
/* Are there any exception handlers? */
- if (info.compXcptnsCount > 0)
+ if (info.compXcptnsCount > 0)
{
noway_assert(!compIsForInlining());
@@ -5673,44 +5745,66 @@ void Compiler::fgFindBasicBlocks()
for (XTnum = 0; XTnum < info.compXcptnsCount; XTnum++)
{
- DWORD tmpOffset;
+ DWORD tmpOffset;
CORINFO_EH_CLAUSE clause;
info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause);
- noway_assert(clause.HandlerLength !=(unsigned) -1);
+ noway_assert(clause.HandlerLength != (unsigned)-1);
if (clause.TryLength <= 0)
+ {
BADCODE("try block length <=0");
+ }
/* Mark the 'try' block extent and the handler itself */
if (clause.TryOffset > info.compILCodeSize)
+ {
BADCODE("try offset is > codesize");
- if (jumpTarget[clause.TryOffset ] == JT_NONE)
- jumpTarget[clause.TryOffset ] = JT_ADDR;
+ }
+ if (jumpTarget[clause.TryOffset] == JT_NONE)
+ {
+ jumpTarget[clause.TryOffset] = JT_ADDR;
+ }
tmpOffset = clause.TryOffset + clause.TryLength;
if (tmpOffset > info.compILCodeSize)
+ {
BADCODE("try end is > codesize");
- if (jumpTarget[tmpOffset ] == JT_NONE)
- jumpTarget[tmpOffset ] = JT_ADDR;
+ }
+ if (jumpTarget[tmpOffset] == JT_NONE)
+ {
+ jumpTarget[tmpOffset] = JT_ADDR;
+ }
if (clause.HandlerOffset > info.compILCodeSize)
+ {
BADCODE("handler offset > codesize");
- if (jumpTarget[clause.HandlerOffset ] == JT_NONE)
- jumpTarget[clause.HandlerOffset ] = JT_ADDR;
+ }
+ if (jumpTarget[clause.HandlerOffset] == JT_NONE)
+ {
+ jumpTarget[clause.HandlerOffset] = JT_ADDR;
+ }
tmpOffset = clause.HandlerOffset + clause.HandlerLength;
if (tmpOffset > info.compILCodeSize)
+ {
BADCODE("handler end > codesize");
- if (jumpTarget[tmpOffset ] == JT_NONE)
- jumpTarget[tmpOffset ] = JT_ADDR;
+ }
+ if (jumpTarget[tmpOffset] == JT_NONE)
+ {
+ jumpTarget[tmpOffset] = JT_ADDR;
+ }
if (clause.Flags & CORINFO_EH_CLAUSE_FILTER)
{
if (clause.FilterOffset > info.compILCodeSize)
+ {
BADCODE("filter offset > codesize");
- if (jumpTarget[clause.FilterOffset ] == JT_NONE)
- jumpTarget[clause.FilterOffset ] = JT_ADDR;
+ }
+ if (jumpTarget[clause.FilterOffset] == JT_NONE)
+ {
+ jumpTarget[clause.FilterOffset] = JT_ADDR;
+ }
}
}
}
@@ -5723,7 +5817,9 @@ void Compiler::fgFindBasicBlocks()
for (unsigned i = 0; i < info.compILCodeSize + 1; i++)
{
if (jumpTarget[i] == JT_NONE)
+ {
continue;
+ }
anyJumpTargets = true;
printf(" IL_%04x", i);
@@ -5751,7 +5847,7 @@ void Compiler::fgFindBasicBlocks()
if (compIsForInlining())
{
- bool hasReturnBlocks = false;
+ bool hasReturnBlocks = false;
bool hasMoreThanOneReturnBlock = false;
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
@@ -5787,10 +5883,11 @@ void Compiler::fgFindBasicBlocks()
}
noway_assert(info.compXcptnsCount == 0);
- compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab;
- compHndBBtabAllocCount = impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it.
- compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount;
- info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount;
+ compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab;
+ compHndBBtabAllocCount =
+ impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it.
+ compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount;
+ info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount;
if (info.compRetNativeType != TYP_VOID && hasMoreThanOneReturnBlock)
{
@@ -5803,11 +5900,15 @@ void Compiler::fgFindBasicBlocks()
/* Mark all blocks within 'try' blocks as such */
- if (info.compXcptnsCount == 0)
+ if (info.compXcptnsCount == 0)
+ {
return;
+ }
if (info.compXcptnsCount > MAX_XCPTN_INDEX)
+ {
IMPL_LIMITATION("too many exception clauses");
+ }
/* Allocate the exception handler table */
@@ -5827,11 +5928,9 @@ void Compiler::fgFindBasicBlocks()
// Annotate BBs with exception handling information required for generating correct eh code
// as well as checking for correct IL
- EHblkDsc * HBtab;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
CORINFO_EH_CLAUSE clause;
info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause);
@@ -5844,23 +5943,25 @@ void Compiler::fgFindBasicBlocks()
}
#endif // DEBUG
- IL_OFFSET tryBegOff = clause.TryOffset;
- IL_OFFSET tryEndOff = tryBegOff + clause.TryLength;
- IL_OFFSET filterBegOff = 0;
- IL_OFFSET hndBegOff = clause.HandlerOffset;
- IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength;
+ IL_OFFSET tryBegOff = clause.TryOffset;
+ IL_OFFSET tryEndOff = tryBegOff + clause.TryLength;
+ IL_OFFSET filterBegOff = 0;
+ IL_OFFSET hndBegOff = clause.HandlerOffset;
+ IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength;
if (clause.Flags & CORINFO_EH_CLAUSE_FILTER)
{
filterBegOff = clause.FilterOffset;
}
- if (tryEndOff > info.compILCodeSize)
- BADCODE3("end of try block beyond end of method for try",
- " at offset %04X",tryBegOff);
- if (hndEndOff > info.compILCodeSize)
- BADCODE3("end of hnd block beyond end of method for try",
- " at offset %04X",tryBegOff);
+ if (tryEndOff > info.compILCodeSize)
+ {
+ BADCODE3("end of try block beyond end of method for try", " at offset %04X", tryBegOff);
+ }
+ if (hndEndOff > info.compILCodeSize)
+ {
+ BADCODE3("end of hnd block beyond end of method for try", " at offset %04X", tryBegOff);
+ }
HBtab->ebdTryBegOffset = tryBegOff;
HBtab->ebdTryEndOffset = tryEndOff;
@@ -5870,22 +5971,23 @@ void Compiler::fgFindBasicBlocks()
/* Convert the various addresses to basic blocks */
- BasicBlock * tryBegBB = fgLookupBB(tryBegOff);
- BasicBlock * tryEndBB = fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function
- BasicBlock * hndBegBB = fgLookupBB(hndBegOff);
- BasicBlock * hndEndBB = NULL;
- BasicBlock * filtBB = NULL;
- BasicBlock * block;
+ BasicBlock* tryBegBB = fgLookupBB(tryBegOff);
+ BasicBlock* tryEndBB =
+ fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function
+ BasicBlock* hndBegBB = fgLookupBB(hndBegOff);
+ BasicBlock* hndEndBB = nullptr;
+ BasicBlock* filtBB = nullptr;
+ BasicBlock* block;
//
// Assert that the try/hnd beginning blocks are set up correctly
//
- if (tryBegBB == NULL)
+ if (tryBegBB == nullptr)
{
BADCODE("Try Clause is invalid");
}
- if (hndBegBB == NULL)
+ if (hndBegBB == nullptr)
{
BADCODE("Handler Clause is invalid");
}
@@ -5898,7 +6000,7 @@ void Compiler::fgFindBasicBlocks()
// and clear the rarely run flag
hndBegBB->makeBlockHot();
#else
- hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed
+ hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed
#endif
if (hndEndOff < info.compILCodeSize)
@@ -5910,8 +6012,8 @@ void Compiler::fgFindBasicBlocks()
{
filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset);
- filtBB->bbCatchTyp = BBCT_FILTER;
- filtBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
+ filtBB->bbCatchTyp = BBCT_FILTER;
+ filtBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER;
@@ -5920,16 +6022,15 @@ void Compiler::fgFindBasicBlocks()
// and clear the rarely run flag
filtBB->makeBlockHot();
#else
- filtBB->bbSetRunRarely(); // filter entry points are rarely executed
+ filtBB->bbSetRunRarely(); // filter entry points are rarely executed
#endif
// Mark all BBs that belong to the filter with the XTnum of the corresponding handler
for (block = filtBB; /**/; block = block->bbNext)
{
- if (block == NULL)
+ if (block == nullptr)
{
- BADCODE3("Missing endfilter for filter",
- " at offset %04X", filtBB->bbCodeOffs);
+ BADCODE3("Missing endfilter for filter", " at offset %04X", filtBB->bbCodeOffs);
return;
}
@@ -5946,8 +6047,10 @@ void Compiler::fgFindBasicBlocks()
}
if (!block->bbNext || block->bbNext != hndBegBB)
- BADCODE3("Filter does not immediately precede handler for filter",
- " at offset %04X", filtBB->bbCodeOffs);
+ {
+ BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X",
+ filtBB->bbCodeOffs);
+ }
}
else
{
@@ -5957,22 +6060,24 @@ void Compiler::fgFindBasicBlocks()
if (clause.Flags & CORINFO_EH_CLAUSE_FINALLY)
{
- hndBegBB->bbCatchTyp = BBCT_FINALLY;
+ hndBegBB->bbCatchTyp = BBCT_FINALLY;
}
else
{
if (clause.Flags & CORINFO_EH_CLAUSE_FAULT)
{
- hndBegBB->bbCatchTyp = BBCT_FAULT;
+ hndBegBB->bbCatchTyp = BBCT_FAULT;
}
else
{
- hndBegBB->bbCatchTyp = clause.ClassToken;
+ hndBegBB->bbCatchTyp = clause.ClassToken;
// These values should be non-zero value that will
// not collide with real tokens for bbCatchTyp
if (clause.ClassToken == 0)
+ {
BADCODE("Exception catch type is Null");
+ }
noway_assert(clause.ClassToken != BBCT_FAULT);
noway_assert(clause.ClassToken != BBCT_FINALLY);
@@ -5989,18 +6094,18 @@ void Compiler::fgFindBasicBlocks()
/* Prevent future optimizations of removing the first block */
/* of a TRY block and the first block of an exception handler */
- tryBegBB->bbFlags |= BBF_DONT_REMOVE;
- hndBegBB->bbFlags |= BBF_DONT_REMOVE;
- hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count.
+ tryBegBB->bbFlags |= BBF_DONT_REMOVE;
+ hndBegBB->bbFlags |= BBF_DONT_REMOVE;
+ hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count.
if (clause.Flags & CORINFO_EH_CLAUSE_FILTER)
{
filtBB->bbFlags |= BBF_DONT_REMOVE;
- filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count.
+ filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count.
}
- tryBegBB->bbFlags |= BBF_DONT_REMOVE;
- hndBegBB->bbFlags |= BBF_DONT_REMOVE;
+ tryBegBB->bbFlags |= BBF_DONT_REMOVE;
+ hndBegBB->bbFlags |= BBF_DONT_REMOVE;
//
// Store the info to the table of EH block handlers
@@ -6039,45 +6144,45 @@ void Compiler::fgFindBasicBlocks()
// Next, set things related to nesting that depend on the sorting being complete.
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
/* Mark all blocks in the finally/fault or catch clause */
- BasicBlock * tryBegBB = HBtab->ebdTryBeg;
- BasicBlock * hndBegBB = HBtab->ebdHndBeg;
+ BasicBlock* tryBegBB = HBtab->ebdTryBeg;
+ BasicBlock* hndBegBB = HBtab->ebdHndBeg;
- IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset;
- IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset;
+ IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset;
+ IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset;
- IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset;
- IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset;
+ IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset;
+ IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset;
- BasicBlock * block;
+ BasicBlock* block;
- for (block = hndBegBB;
- block && (block->bbCodeOffs < hndEndOff);
- block = block->bbNext)
+ for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext)
{
if (!block->hasHndIndex())
+ {
block->setHndIndex(XTnum);
+ }
// All blocks in a catch handler or filter are rarely run, except the entry
if ((block != hndBegBB) && (hndBegBB->bbCatchTyp != BBCT_FINALLY))
+ {
block->bbSetRunRarely();
+ }
}
/* Mark all blocks within the covered range of the try */
- for (block = tryBegBB;
- block && (block->bbCodeOffs < tryEndOff);
- block = block->bbNext)
+ for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext)
{
/* Mark this BB as belonging to a 'try' block */
if (!block->hasTryIndex())
+ {
block->setTryIndex(XTnum);
+ }
#ifdef DEBUG
/* Note: the BB can't span the 'try' block */
@@ -6085,29 +6190,28 @@ void Compiler::fgFindBasicBlocks()
if (!(block->bbFlags & BBF_INTERNAL))
{
noway_assert(tryBegOff <= block->bbCodeOffs);
- noway_assert(tryEndOff >= block->bbCodeOffsEnd ||
- tryEndOff == tryBegOff );
+ noway_assert(tryEndOff >= block->bbCodeOffsEnd || tryEndOff == tryBegOff);
}
#endif
}
- /* Init ebdHandlerNestingLevel of current clause, and bump up value for all
- * enclosed clauses (which have to be before it in the table).
- * Innermost try-finally blocks must precede outermost
- * try-finally blocks.
- */
+/* Init ebdHandlerNestingLevel of current clause, and bump up value for all
+ * enclosed clauses (which have to be before it in the table).
+ * Innermost try-finally blocks must precede outermost
+ * try-finally blocks.
+ */
#if !FEATURE_EH_FUNCLETS
- HBtab->ebdHandlerNestingLevel = 0;
+ HBtab->ebdHandlerNestingLevel = 0;
#endif // !FEATURE_EH_FUNCLETS
- HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX;
- HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX;
+ HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX;
+ HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX;
- noway_assert(XTnum < compHndBBtabCount);
+ noway_assert(XTnum < compHndBBtabCount);
noway_assert(XTnum == ehGetIndex(HBtab));
- for (EHblkDsc * xtab = compHndBBtab; xtab < HBtab; xtab++)
+ for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++)
{
#if !FEATURE_EH_FUNCLETS
if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff))
@@ -6150,9 +6254,7 @@ void Compiler::fgFindBasicBlocks()
#if !FEATURE_EH_FUNCLETS
EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel)
ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1;
@@ -6192,119 +6294,118 @@ void Compiler::fgFindBasicBlocks()
fgNormalizeEH();
}
-
/*****************************************************************************
* Check control flow constraints for well formed IL. Bail if any of the constraints
* are violated.
*/
-void Compiler::fgCheckBasicBlockControlFlow()
+void Compiler::fgCheckBasicBlockControlFlow()
{
assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks
- EHblkDsc *HBtab;
+ EHblkDsc* HBtab;
for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
{
if (blk->bbFlags & BBF_INTERNAL)
+ {
continue;
+ }
switch (blk->bbJumpKind)
{
- case BBJ_NONE: // block flows into the next one (no jump)
+ case BBJ_NONE: // block flows into the next one (no jump)
- fgControlFlowPermitted(blk,blk->bbNext);
+ fgControlFlowPermitted(blk, blk->bbNext);
- break;
+ break;
- case BBJ_ALWAYS: // block does unconditional jump to target
+ case BBJ_ALWAYS: // block does unconditional jump to target
- fgControlFlowPermitted(blk,blk->bbJumpDest);
+ fgControlFlowPermitted(blk, blk->bbJumpDest);
- break;
+ break;
- case BBJ_COND: // block conditionally jumps to the target
+ case BBJ_COND: // block conditionally jumps to the target
- fgControlFlowPermitted(blk,blk->bbNext);
+ fgControlFlowPermitted(blk, blk->bbNext);
- fgControlFlowPermitted(blk,blk->bbJumpDest);
+ fgControlFlowPermitted(blk, blk->bbJumpDest);
- break;
+ break;
- case BBJ_RETURN: // block ends with 'ret'
+ case BBJ_RETURN: // block ends with 'ret'
- if (blk->hasTryIndex() || blk->hasHndIndex())
- {
- BADCODE3("Return from a protected block",
- ". Before offset %04X", blk->bbCodeOffsEnd);
- }
- break;
+ if (blk->hasTryIndex() || blk->hasHndIndex())
+ {
+ BADCODE3("Return from a protected block", ". Before offset %04X", blk->bbCodeOffsEnd);
+ }
+ break;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
- if (!blk->hasHndIndex()) // must be part of a handler
- {
- BADCODE3("Missing handler",
- ". Before offset %04X", blk->bbCodeOffsEnd);
- }
+ if (!blk->hasHndIndex()) // must be part of a handler
+ {
+ BADCODE3("Missing handler", ". Before offset %04X", blk->bbCodeOffsEnd);
+ }
- HBtab = ehGetDsc(blk->getHndIndex());
+ HBtab = ehGetDsc(blk->getHndIndex());
- // Endfilter allowed only in a filter block
- if (blk->bbJumpKind == BBJ_EHFILTERRET)
- {
- if (!HBtab->HasFilter())
+ // Endfilter allowed only in a filter block
+ if (blk->bbJumpKind == BBJ_EHFILTERRET)
{
- BADCODE("Unexpected endfilter");
+ if (!HBtab->HasFilter())
+ {
+ BADCODE("Unexpected endfilter");
+ }
+ }
+ // endfinally allowed only in a finally/fault block
+ else if (!HBtab->HasFinallyOrFaultHandler())
+ {
+ BADCODE("Unexpected endfinally");
}
- }
- // endfinally allowed only in a finally/fault block
- else if (!HBtab->HasFinallyOrFaultHandler())
- {
- BADCODE("Unexpected endfinally");
- }
- // The handler block should be the innermost block
- // Exception blocks are listed, innermost first.
- if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex()))
- {
- BADCODE("endfinally / endfilter in nested try block");
- }
+ // The handler block should be the innermost block
+ // Exception blocks are listed, innermost first.
+ if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex()))
+ {
+ BADCODE("endfinally / endfilter in nested try block");
+ }
- break;
+ break;
- case BBJ_THROW: // block ends with 'throw'
- /* throw is permitted from every BB, so nothing to check */
- /* importer makes sure that rethrow is done from a catch */
- break;
+ case BBJ_THROW: // block ends with 'throw'
+ /* throw is permitted from every BB, so nothing to check */
+ /* importer makes sure that rethrow is done from a catch */
+ break;
- case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded
- // region. Used temporarily until importing
- fgControlFlowPermitted(blk, blk->bbJumpDest,TRUE);
+ case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded
+ // region. Used temporarily until importing
+ fgControlFlowPermitted(blk, blk->bbJumpDest, TRUE);
- break;
+ break;
- case BBJ_SWITCH: // block ends with a switch statement
+ case BBJ_SWITCH: // block ends with a switch statement
- BBswtDesc* swtDesc;
- swtDesc = blk->bbJumpSwt;
+ BBswtDesc* swtDesc;
+ swtDesc = blk->bbJumpSwt;
- assert (swtDesc);
+ assert(swtDesc);
- unsigned i;
- for (i=0; i<swtDesc->bbsCount; i++)
- {
- fgControlFlowPermitted(blk,swtDesc->bbsDstTab[i]);
- }
+ unsigned i;
+ for (i = 0; i < swtDesc->bbsCount; i++)
+ {
+ fgControlFlowPermitted(blk, swtDesc->bbsDstTab[i]);
+ }
- break;
+ break;
- case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS)
- case BBJ_CALLFINALLY: // block always calls the target finally
- default:
- noway_assert(!"Unexpected bbJumpKind"); // these blocks don't get created until importing
- break;
+ case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS)
+ case BBJ_CALLFINALLY: // block always calls the target finally
+ default:
+ noway_assert(!"Unexpected bbJumpKind"); // these blocks don't get created until importing
+ break;
}
}
}
@@ -6314,21 +6415,19 @@ void Compiler::fgCheckBasicBlockControlFlow()
* Consider removing this check here if we can do it cheaply during importing
*/
-void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
- BasicBlock* blkDest,
- BOOL isLeave)
+void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, BOOL isLeave)
{
assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks
- unsigned srcHndBeg, destHndBeg;
- unsigned srcHndEnd, destHndEnd;
- bool srcInFilter, destInFilter;
- bool srcInCatch = false;
+ unsigned srcHndBeg, destHndBeg;
+ unsigned srcHndEnd, destHndEnd;
+ bool srcInFilter, destInFilter;
+ bool srcInCatch = false;
- EHblkDsc* srcHndTab;
+ EHblkDsc* srcHndTab;
- srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter);
- ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter);
+ srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter);
+ ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter);
/* Impose the rules for leaving or jumping from handler blocks */
@@ -6339,7 +6438,7 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
/* Are we jumping within the same handler index? */
if (BasicBlock::sameHndRegion(blkSrc, blkDest))
{
- /* Do we have a filter clause? */
+ /* Do we have a filter clause? */
if (srcHndTab->HasFilter())
{
/* filters and catch handlers share same eh index */
@@ -6347,8 +6446,10 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
if (srcInFilter != destInFilter)
{
if (!jitIsBetween(blkDest->bbCodeOffs, srcHndBeg, srcHndEnd))
- BADCODE3("Illegal control flow between filter and handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ {
+ BADCODE3("Illegal control flow between filter and handler", ". Before offset %04X",
+ blkSrc->bbCodeOffsEnd);
+ }
}
}
}
@@ -6359,14 +6460,14 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
{
/* Any leave instructions must not enter the dest handler from outside*/
if (!jitIsBetween(srcHndBeg, destHndBeg, destHndEnd))
- BADCODE3("Illegal use of leave to enter handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ {
+ BADCODE3("Illegal use of leave to enter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ }
}
else
{
/* We must use a leave to exit a handler */
- BADCODE3("Illegal control flow out of a handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Illegal control flow out of a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
/* Do we have a filter clause? */
@@ -6376,31 +6477,27 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
/* but not from the filter block of a filter */
if (srcInFilter != destInFilter)
{
- BADCODE3("Illegal to leave a filter handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Illegal to leave a filter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
/* We should never leave a finally handler */
if (srcHndTab->HasFinallyHandler())
{
- BADCODE3("Illegal to leave a finally handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Illegal to leave a finally handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
/* We should never leave a fault handler */
if (srcHndTab->HasFaultHandler())
{
- BADCODE3("Illegal to leave a fault handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Illegal to leave a fault handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
}
else if (blkDest->hasHndIndex())
{
/* blkSrc was not inside a handler, but blkDst is inside a handler */
- BADCODE3("Illegal control flow into a handler",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Illegal control flow into a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
/* Are we jumping from a catch handler into the corresponding try? */
@@ -6410,12 +6507,13 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
{
// inspect all handlers containing the jump source
- bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try?
- bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler
+ bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try?
+ bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler
EHblkDsc* ehTableEnd;
EHblkDsc* ehDsc;
- for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount; bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++)
+ for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount;
+ bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++)
{
if (ehDsc->InHndRegionILRange(blkSrc))
{
@@ -6477,27 +6575,33 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
// If it's a handler, but not a catch handler, it must be either a finally or fault
if (!ehDsc->HasFinallyOrFaultHandler())
{
- BADCODE3("Handlers must be catch, finally, or fault",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("Handlers must be catch, finally, or fault", ". Before offset %04X",
+ blkSrc->bbCodeOffsEnd);
}
// Are we jumping out of this handler?
if (!ehDsc->InHndRegionILRange(blkDest))
+ {
bCatchHandlerOnly = false;
+ }
}
}
else if (ehDsc->InFilterRegionILRange(blkSrc))
{
- // Are we jumping out of a filter?
- if (!ehDsc->InFilterRegionILRange(blkDest))
+ // Are we jumping out of a filter?
+ if (!ehDsc->InFilterRegionILRange(blkDest))
+ {
bCatchHandlerOnly = false;
+ }
}
}
if (bCatchHandlerOnly)
{
if (bValidJumpToTry)
+ {
return;
+ }
else
{
// FALL THROUGH
@@ -6510,33 +6614,29 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
}
else
{
- BADCODE3("illegal leave to exit a finally, fault or filter",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("illegal leave to exit a finally, fault or filter", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
-
}
/* Check all the try block rules */
- IL_OFFSET srcTryBeg;
- IL_OFFSET srcTryEnd;
- IL_OFFSET destTryBeg;
- IL_OFFSET destTryEnd;
+ IL_OFFSET srcTryBeg;
+ IL_OFFSET srcTryEnd;
+ IL_OFFSET destTryBeg;
+ IL_OFFSET destTryEnd;
- ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd);
+ ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd);
ehInitTryRange(blkDest, &destTryBeg, &destTryEnd);
/* Are we jumping between try indexes? */
if (!BasicBlock::sameTryRegion(blkSrc, blkDest))
{
// Are we exiting from an inner to outer try?
- if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) &&
- jitIsBetween(srcTryEnd-1, destTryBeg, destTryEnd) )
+ if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) && jitIsBetween(srcTryEnd - 1, destTryBeg, destTryEnd))
{
if (!isLeave)
{
- BADCODE3("exit from try block without a leave",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("exit from try block without a leave", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
else if (jitIsBetween(destTryBeg, srcTryBeg, srcTryEnd))
@@ -6544,8 +6644,7 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
// check that the dest Try is first instruction of an inner try
if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, false))
{
- BADCODE3("control flow into middle of try",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("control flow into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
else // there is no nesting relationship between src and dest
@@ -6555,14 +6654,12 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
// check that the dest Try is first instruction of an inner try sibling
if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, true))
{
- BADCODE3("illegal leave into middle of try",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("illegal leave into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
else
{
- BADCODE3("illegal control flow in to/out of try block",
- ". Before offset %04X", blkSrc->bbCodeOffsEnd);
+ BADCODE3("illegal control flow in to/out of try block", ". Before offset %04X", blkSrc->bbCodeOffsEnd);
}
}
}
@@ -6573,21 +6670,18 @@ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc,
* with no intervening trys in between
*/
-bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc,
- BasicBlock* blkDest,
- bool sibling)
+bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling)
{
assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks
noway_assert(blkDest->hasTryIndex());
- unsigned XTnum = blkDest->getTryIndex();
- unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex()
- : compHndBBtabCount;
- noway_assert(XTnum < compHndBBtabCount);
+ unsigned XTnum = blkDest->getTryIndex();
+ unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex() : compHndBBtabCount;
+ noway_assert(XTnum < compHndBBtabCount);
noway_assert(lastXTnum <= compHndBBtabCount);
- EHblkDsc* HBtab = ehGetDsc(XTnum);
+ EHblkDsc* HBtab = ehGetDsc(XTnum);
// check that we are not jumping into middle of try
if (HBtab->ebdTryBeg != blkDest)
@@ -6604,13 +6698,9 @@ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc,
HBtab = ehGetDsc(lastXTnum);
- for (lastXTnum++, HBtab++;
- lastXTnum < compHndBBtabCount;
- lastXTnum++, HBtab++)
+ for (lastXTnum++, HBtab++; lastXTnum < compHndBBtabCount; lastXTnum++, HBtab++)
{
- if (jitIsBetweenInclusive(blkDest->bbNum,
- HBtab->ebdTryBeg->bbNum,
- HBtab->ebdTryLast->bbNum))
+ if (jitIsBetweenInclusive(blkDest->bbNum, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum))
{
break;
}
@@ -6623,9 +6713,7 @@ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc,
HBtab = ehGetDsc(XTnum);
- for (XTnum++, HBtab++;
- XTnum < lastXTnum;
- XTnum++, HBtab++)
+ for (XTnum++, HBtab++; XTnum < lastXTnum; XTnum++, HBtab++)
{
if (HBtab->ebdTryBeg->bbNum < blkDest->bbNum && blkDest->bbNum <= HBtab->ebdTryLast->bbNum)
{
@@ -6642,26 +6730,21 @@ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc,
* finally-protected try the block is in.
*/
-unsigned Compiler::fgGetNestingLevel(BasicBlock * block,
- unsigned * pFinallyNesting)
+unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting)
{
- unsigned curNesting = 0; // How many handlers is the block in
- unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try
- unsigned XTnum;
- EHblkDsc * HBtab;
+ unsigned curNesting = 0; // How many handlers is the block in
+ unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try
+ unsigned XTnum;
+ EHblkDsc* HBtab;
/* We find the block's handler nesting level by walking over the
complete exception table and find enclosing clauses. */
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++, HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
noway_assert(HBtab->ebdTryBeg && HBtab->ebdHndBeg);
- if (HBtab->HasFinallyHandler() &&
- (tryFin == (unsigned)-1) &&
- bbInTryRegions(XTnum, block))
+ if (HBtab->HasFinallyHandler() && (tryFin == (unsigned)-1) && bbInTryRegions(XTnum, block))
{
tryFin = curNesting;
}
@@ -6671,11 +6754,15 @@ unsigned Compiler::fgGetNestingLevel(BasicBlock * block,
}
}
- if (tryFin == (unsigned)-1)
+ if (tryFin == (unsigned)-1)
+ {
tryFin = curNesting;
+ }
- if (pFinallyNesting)
+ if (pFinallyNesting)
+ {
*pFinallyNesting = curNesting - tryFin;
+ }
return curNesting;
}
@@ -6685,7 +6772,7 @@ unsigned Compiler::fgGetNestingLevel(BasicBlock * block,
* Import the basic blocks of the procedure.
*/
-void Compiler::fgImport()
+void Compiler::fgImport()
{
fgHasPostfix = false;
@@ -6694,37 +6781,34 @@ void Compiler::fgImport()
if (!(opts.eeFlags & CORJIT_FLG_SKIP_VERIFICATION))
{
CorInfoMethodRuntimeFlags verFlag;
- verFlag = tiIsVerifiableCode ? CORINFO_FLG_VERIFIABLE
- : CORINFO_FLG_UNVERIFIABLE;
+ verFlag = tiIsVerifiableCode ? CORINFO_FLG_VERIFIABLE : CORINFO_FLG_UNVERIFIABLE;
info.compCompHnd->setMethodAttribs(info.compMethodHnd, verFlag);
}
}
-
/*****************************************************************************
* This function returns true if tree is a node with a call
* that unconditionally throws an exception
*/
-bool Compiler::fgIsThrow(GenTreePtr tree)
+bool Compiler::fgIsThrow(GenTreePtr tree)
{
- if ((tree->gtOper != GT_CALL ) ||
- (tree->gtCall.gtCallType != CT_HELPER) )
+ if ((tree->gtOper != GT_CALL) || (tree->gtCall.gtCallType != CT_HELPER))
{
return false;
}
// TODO-Throughput: Replace all these calls to eeFindHelper() with a table based lookup
- if ((tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW) ) ||
+ if ((tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW)) ||
(tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_VERIFICATION)) ||
- (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL) ) ||
+ (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) ||
(tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) ||
#if COR_JIT_EE_VERSION > 460
(tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWNULLREF)) ||
#endif // COR_JIT_EE_VERSION
- (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW) ) ||
- (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RETHROW) ) )
+ (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW)) ||
+ (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RETHROW)))
{
noway_assert(tree->gtFlags & GTF_CALL);
noway_assert(tree->gtFlags & GTF_EXCEPT);
@@ -6732,7 +6816,7 @@ bool Compiler::fgIsThrow(GenTreePtr tree)
}
// TODO-CQ: there are a bunch of managed methods in [mscorlib]System.ThrowHelper
- // that would be nice to recognize.
+ // that would be nice to recognize.
return false;
}
@@ -6742,25 +6826,25 @@ bool Compiler::fgIsThrow(GenTreePtr tree)
* It returns false when the blocks are both in the same regions
*/
-bool Compiler::fgInDifferentRegions(BasicBlock *blk1, BasicBlock *blk2)
+bool Compiler::fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2)
{
- noway_assert(blk1 != NULL);
- noway_assert(blk2 != NULL);
+ noway_assert(blk1 != nullptr);
+ noway_assert(blk2 != nullptr);
- if (fgFirstColdBlock == NULL)
+ if (fgFirstColdBlock == nullptr)
{
return false;
}
// If one block is Hot and the other is Cold then we are in different regions
- return ((blk1->bbFlags & BBF_COLD)!= (blk2->bbFlags & BBF_COLD));
+ return ((blk1->bbFlags & BBF_COLD) != (blk2->bbFlags & BBF_COLD));
}
-bool Compiler::fgIsBlockCold(BasicBlock *blk)
+bool Compiler::fgIsBlockCold(BasicBlock* blk)
{
- noway_assert(blk != NULL);
+ noway_assert(blk != nullptr);
- if (fgFirstColdBlock == NULL)
+ if (fgFirstColdBlock == nullptr)
{
return false;
}
@@ -6768,27 +6852,23 @@ bool Compiler::fgIsBlockCold(BasicBlock *blk)
return ((blk->bbFlags & BBF_COLD) != 0);
}
-
/*****************************************************************************
* This function returns true if tree is a GT_COMMA node with a call
* that unconditionally throws an exception
*/
-bool Compiler::fgIsCommaThrow(GenTreePtr tree,
- bool forFolding /* = false */)
+bool Compiler::fgIsCommaThrow(GenTreePtr tree, bool forFolding /* = false */)
{
// Instead of always folding comma throws,
// with stress enabled we only fold half the time
if (forFolding && compStressCompile(STRESS_FOLD, 50))
{
- return false; /* Don't fold */
+ return false; /* Don't fold */
}
/* Check for cast of a GT_COMMA with a throw overflow */
- if ((tree->gtOper == GT_COMMA) &&
- (tree->gtFlags & GTF_CALL) &&
- (tree->gtFlags & GTF_EXCEPT))
+ if ((tree->gtOper == GT_COMMA) && (tree->gtFlags & GTF_CALL) && (tree->gtFlags & GTF_EXCEPT))
{
return (fgIsThrow(tree->gtOp.gtOp1));
}
@@ -6806,7 +6886,7 @@ bool Compiler::fgIsCommaThrow(GenTreePtr tree,
// whose arg in turn is a LCL_VAR, return that LCL_VAR node, else nullptr.
//
// static
-GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
+GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
{
GenTreePtr res = nullptr;
if (tree->OperIsIndir())
@@ -6822,8 +6902,8 @@ GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
// Therefore it is critical that we don't miss 'uses' of any local. It may seem this method overlooks
// if the index part of the LEA has indir( someAddrOperator ( lclVar ) ) to search for a use but it's
// covered by the fact we're traversing the expression in execution order and we also visit the index.
- GenTreeAddrMode* lea = addr->AsAddrMode();
- GenTreePtr base = lea->Base();
+ GenTreeAddrMode* lea = addr->AsAddrMode();
+ GenTreePtr base = lea->Base();
if (base != nullptr)
{
@@ -6843,7 +6923,7 @@ GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
{
res = lclvar;
}
- }
+ }
else if (addr->OperGet() == GT_LCL_VAR_ADDR)
{
res = addr;
@@ -6852,11 +6932,10 @@ GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
return res;
}
-
-GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper)
+GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper)
{
- bool bNeedClassID = true;
- unsigned callFlags = 0;
+ bool bNeedClassID = true;
+ unsigned callFlags = 0;
var_types type = TYP_BYREF;
@@ -6864,56 +6943,56 @@ GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls,
// We need the return type.
switch (helper)
{
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
- bNeedClassID = false;
- __fallthrough;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
+ bNeedClassID = false;
+ __fallthrough;
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
- callFlags |= GTF_CALL_HOISTABLE;
- __fallthrough;
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
+ callFlags |= GTF_CALL_HOISTABLE;
+ __fallthrough;
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
- // type = TYP_BYREF;
- break;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
+ // type = TYP_BYREF;
+ break;
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
- bNeedClassID = false;
- __fallthrough;
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
+ bNeedClassID = false;
+ __fallthrough;
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
- callFlags |= GTF_CALL_HOISTABLE;
- __fallthrough;
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
+ callFlags |= GTF_CALL_HOISTABLE;
+ __fallthrough;
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
- type = TYP_I_IMPL;
- break;
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
+ type = TYP_I_IMPL;
+ break;
- default:
- assert(!"unknown shared statics helper");
- break;
+ default:
+ assert(!"unknown shared statics helper");
+ break;
}
- GenTreeArgList* argList = NULL;
+ GenTreeArgList* argList = nullptr;
GenTreePtr opModuleIDArg;
GenTreePtr opClassIDArg;
// Get the class ID
unsigned clsID;
- size_t moduleID;
- void* pclsID;
- void* pmoduleID;
+ size_t moduleID;
+ void* pclsID;
+ void* pmoduleID;
- clsID = info.compCompHnd->getClassDomainID(cls, &pclsID);
+ clsID = info.compCompHnd->getClassDomainID(cls, &pclsID);
- moduleID = info.compCompHnd->getClassModuleIdForStatics(cls, NULL, &pmoduleID);
+ moduleID = info.compCompHnd->getClassModuleIdForStatics(cls, nullptr, &pmoduleID);
if (!(callFlags & GTF_CALL_HOISTABLE))
{
@@ -6925,7 +7004,7 @@ GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls,
if (pmoduleID)
{
- opModuleIDArg = gtNewIconHandleNode((size_t) pmoduleID, GTF_ICON_CIDMID_HDL);
+ opModuleIDArg = gtNewIconHandleNode((size_t)pmoduleID, GTF_ICON_CIDMID_HDL);
opModuleIDArg = gtNewOperNode(GT_IND, TYP_I_IMPL, opModuleIDArg);
opModuleIDArg->gtFlags |= GTF_IND_INVARIANT;
}
@@ -6938,7 +7017,7 @@ GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls,
{
if (pclsID)
{
- opClassIDArg = gtNewIconHandleNode((size_t) pclsID, GTF_ICON_CIDMID_HDL);
+ opClassIDArg = gtNewIconHandleNode((size_t)pclsID, GTF_ICON_CIDMID_HDL);
opClassIDArg = gtNewOperNode(GT_IND, TYP_INT, opClassIDArg);
opClassIDArg->gtFlags |= GTF_IND_INVARIANT;
}
@@ -6963,7 +7042,7 @@ GenTreePtr Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls,
return gtNewHelperCallNode(helper, type, callFlags, argList);
}
-GenTreePtr Compiler::fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls)
+GenTreePtr Compiler::fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
@@ -6980,7 +7059,6 @@ GenTreePtr Compiler::fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls)
return fgGetStaticsCCtorHelper(cls, info.compCompHnd->getSharedCCtorHelper(cls));
}
-
//
// Returns true unless the address expression could
// never represent a NULL
@@ -7003,7 +7081,7 @@ bool Compiler::fgAddrCouldBeNull(GenTreePtr addr)
{
return false;
}
- return false; // we can't have a null address
+ return false; // we can't have a null address
}
else if (addr->gtOper == GT_ADD)
{
@@ -7018,7 +7096,7 @@ bool Compiler::fgAddrCouldBeNull(GenTreePtr addr)
return fgAddrCouldBeNull(addr->gtOp.gtOp2);
}
}
- else // Op1 was a handle represented as a constant
+ else // Op1 was a handle represented as a constant
{
// Is Op2 also a constant?
if (addr->gtOp.gtOp2->gtOper == GT_CNS_INT)
@@ -7029,7 +7107,7 @@ bool Compiler::fgAddrCouldBeNull(GenTreePtr addr)
{
if (!fgIsBigOffset(cns2Tree->gtIntCon.gtIconVal))
{
- // Op2 was an ordinary small constant
+ // Op2 was an ordinary small constant
return false; // we can't have a null address
}
}
@@ -7055,29 +7133,28 @@ bool Compiler::fgAddrCouldBeNull(GenTreePtr addr)
}
}
}
- return true; // default result: addr could be null
+ return true; // default result: addr could be null
}
-
/*****************************************************************************
* Optimize the call to the delegate constructor.
*/
-GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_CONTEXT_HANDLE * ExactContextHnd)
+GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_CONTEXT_HANDLE* ExactContextHnd)
{
noway_assert(call->gtOper == GT_CALL);
noway_assert(call->gtCall.gtCallType == CT_USER_FUNC);
CORINFO_METHOD_HANDLE methHnd = call->gtCall.gtCallMethHnd;
- CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getMethodClass(methHnd);
+ CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getMethodClass(methHnd);
GenTreePtr targetMethod = call->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp1;
noway_assert(targetMethod->TypeGet() == TYP_I_IMPL);
genTreeOps oper = targetMethod->OperGet();
if (oper == GT_FTN_ADDR || oper == GT_CALL || oper == GT_QMARK)
{
- CORINFO_METHOD_HANDLE targetMethodHnd = NULL;
- GenTreePtr qmarkNode = NULL;
+ CORINFO_METHOD_HANDLE targetMethodHnd = nullptr;
+ GenTreePtr qmarkNode = nullptr;
if (oper == GT_FTN_ADDR)
{
targetMethodHnd = targetMethod->gtFptrVal.gtFptrMethod;
@@ -7091,15 +7168,15 @@ GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_C
// it's a ldvirtftn case, fetch the methodhandle off the helper for ldvirtftn. It's the 3rd arg
targetMethodHnd = CORINFO_METHOD_HANDLE(handleNode->gtIntCon.gtCompileTimeHandle);
}
- //Sometimes the argument to this is the result of a generic dictionary lookup, which shows
- //up as a GT_QMARK.
+ // Sometimes the argument to this is the result of a generic dictionary lookup, which shows
+ // up as a GT_QMARK.
else if (handleNode->OperGet() == GT_QMARK)
{
qmarkNode = handleNode;
}
}
- //Sometimes we don't call CORINFO_HELP_VIRTUAL_FUNC_PTR but instead just call
- //CORINFO_HELP_RUNTIMEHANDLE_METHOD directly.
+ // Sometimes we don't call CORINFO_HELP_VIRTUAL_FUNC_PTR but instead just call
+ // CORINFO_HELP_RUNTIMEHANDLE_METHOD directly.
else if (oper == GT_QMARK)
{
qmarkNode = targetMethod;
@@ -7107,22 +7184,22 @@ GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_C
if (qmarkNode)
{
noway_assert(qmarkNode->OperGet() == GT_QMARK);
- //The argument is actually a generic dictionary lookup. For delegate creation it looks
- //like:
- //GT_QMARK
+ // The argument is actually a generic dictionary lookup. For delegate creation it looks
+ // like:
+ // GT_QMARK
// GT_COLON
// op1 -> call
// Arg 1 -> token (has compile time handle)
// op2 -> lclvar
//
//
- //In this case I can find the token (which is a method handle) and that is the compile time
- //handle.
+ // In this case I can find the token (which is a method handle) and that is the compile time
+ // handle.
noway_assert(qmarkNode->gtOp.gtOp2->OperGet() == GT_COLON);
noway_assert(qmarkNode->gtOp.gtOp2->gtOp.gtOp1->OperGet() == GT_CALL);
GenTreePtr runtimeLookupCall = qmarkNode->gtOp.gtOp2->gtOp.gtOp1;
- //This could be any of CORINFO_HELP_RUNTIMEHANDLE_(METHOD|CLASS)(_LOG?)
+ // This could be any of CORINFO_HELP_RUNTIMEHANDLE_(METHOD|CLASS)(_LOG?)
GenTreePtr tokenNode = runtimeLookupCall->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp1;
noway_assert(tokenNode->OperGet() == GT_CNS_INT);
targetMethodHnd = CORINFO_METHOD_HANDLE(tokenNode->gtIntCon.gtCompileTimeHandle);
@@ -7135,7 +7212,7 @@ GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_C
if (oper == GT_FTN_ADDR)
{
// The first argument of the helper is delegate this pointer
- GenTreeArgList* helperArgs = gtNewArgList(call->gtCall.gtCallObjp);
+ GenTreeArgList* helperArgs = gtNewArgList(call->gtCall.gtCallObjp);
CORINFO_CONST_LOOKUP entryPoint;
// The second argument of the helper is the target object pointers
@@ -7143,35 +7220,36 @@ GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_C
call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_DELEGATE_CTOR, TYP_VOID, GTF_EXCEPT, helperArgs);
#if COR_JIT_EE_VERSION > 460
- info.compCompHnd->getReadyToRunDelegateCtorHelper(targetMethod->gtFptrVal.gtLdftnResolvedToken, clsHnd, &entryPoint);
+ info.compCompHnd->getReadyToRunDelegateCtorHelper(targetMethod->gtFptrVal.gtLdftnResolvedToken, clsHnd,
+ &entryPoint);
#else
info.compCompHnd->getReadyToRunHelper(targetMethod->gtFptrVal.gtLdftnResolvedToken,
- CORINFO_HELP_READYTORUN_DELEGATE_CTOR, &entryPoint);
+ CORINFO_HELP_READYTORUN_DELEGATE_CTOR, &entryPoint);
#endif
call->gtCall.setEntryPoint(entryPoint);
}
}
else
#endif
- if (targetMethodHnd != NULL)
+ if (targetMethodHnd != nullptr)
{
- CORINFO_METHOD_HANDLE alternateCtor = NULL;
- DelegateCtorArgs ctorData;
+ CORINFO_METHOD_HANDLE alternateCtor = nullptr;
+ DelegateCtorArgs ctorData;
ctorData.pMethod = info.compMethodHnd;
- ctorData.pArg3 = NULL;
- ctorData.pArg4 = NULL;
- ctorData.pArg5 = NULL;
+ ctorData.pArg3 = nullptr;
+ ctorData.pArg4 = nullptr;
+ ctorData.pArg5 = nullptr;
alternateCtor = info.compCompHnd->GetDelegateCtor(methHnd, clsHnd, targetMethodHnd, &ctorData);
if (alternateCtor != methHnd)
{
// we erase any inline info that may have been set for generics has it is not needed here,
// and in fact it will pass the wrong info to the inliner code
- * ExactContextHnd = 0;
+ *ExactContextHnd = nullptr;
call->gtCall.gtCallMethHnd = alternateCtor;
- noway_assert(call->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp2 == NULL);
+ noway_assert(call->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp2 == nullptr);
if (ctorData.pArg3)
{
call->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp2 =
@@ -7196,7 +7274,6 @@ GenTreePtr Compiler::fgOptimizeDelegateConstructor(GenTreePtr call, CORINFO_C
return call;
}
-
bool Compiler::fgCastNeeded(GenTreePtr tree, var_types toType)
{
//
@@ -7204,7 +7281,9 @@ bool Compiler::fgCastNeeded(GenTreePtr tree, var_types toType)
// then we never need to insert a cast
//
if ((tree->OperKind() & GTK_RELOP) && (genActualType(toType) == TYP_INT))
+ {
return false;
+ }
var_types fromType;
@@ -7217,7 +7296,7 @@ bool Compiler::fgCastNeeded(GenTreePtr tree, var_types toType)
}
else if (tree->OperGet() == GT_CALL)
{
- fromType = (var_types) tree->gtCall.gtReturnType;
+ fromType = (var_types)tree->gtCall.gtReturnType;
}
else
{
@@ -7228,17 +7307,23 @@ bool Compiler::fgCastNeeded(GenTreePtr tree, var_types toType)
// If both types are the same then an additional cast is not necessary
//
if (toType == fromType)
+ {
return false;
+ }
//
// If the sign-ness of the two types are different then a cast is necessary
//
if (varTypeIsUnsigned(toType) != varTypeIsUnsigned(fromType))
+ {
return true;
+ }
//
// If the from type is the same size or smaller then an additional cast is not necessary
//
if (genTypeSize(toType) >= genTypeSize(fromType))
+ {
return false;
+ }
//
// Looks like we will need the cast
@@ -7255,18 +7340,18 @@ GenTreePtr Compiler::fgDoNormalizeOnStore(GenTreePtr tree)
//
if (fgGlobalMorph)
{
- noway_assert(tree->OperGet()==GT_ASG);
+ noway_assert(tree->OperGet() == GT_ASG);
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
if (op1->gtOper == GT_LCL_VAR && genActualType(op1->TypeGet()) == TYP_INT)
{
// Small-typed arguments and aliased locals are normalized on load.
// Other small-typed locals are normalized on store.
// If it is an assignment to one of the latter, insert the cast on RHS
- unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[varNum];
+ unsigned varNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[varNum];
if (varDsc->lvNormalizeOnStore())
{
@@ -7275,11 +7360,11 @@ GenTreePtr Compiler::fgDoNormalizeOnStore(GenTreePtr tree)
if (fgCastNeeded(op2, varDsc->TypeGet()))
{
- op2 = gtNewCastNode(TYP_INT, op2, varDsc->TypeGet());
+ op2 = gtNewCastNode(TYP_INT, op2, varDsc->TypeGet());
tree->gtOp.gtOp2 = op2;
// Propagate GTF_COLON_COND
- op2->gtFlags|=(tree->gtFlags & GTF_COLON_COND);
+ op2->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
}
}
}
@@ -7288,37 +7373,38 @@ GenTreePtr Compiler::fgDoNormalizeOnStore(GenTreePtr tree)
return tree;
}
-
/*****************************************************************************
*
* Mark whether the edge "srcBB -> dstBB" forms a loop that will always
* execute a call or not.
*/
-inline
-void Compiler::fgLoopCallTest(BasicBlock *srcBB,
- BasicBlock *dstBB)
+inline void Compiler::fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB)
{
/* Bail if this is not a backward edge */
- if (srcBB->bbNum < dstBB->bbNum)
+ if (srcBB->bbNum < dstBB->bbNum)
+ {
return;
+ }
/* Unless we already know that there is a loop without a call here ... */
- if (!(dstBB->bbFlags & BBF_LOOP_CALL0))
+ if (!(dstBB->bbFlags & BBF_LOOP_CALL0))
{
/* Check whether there is a loop path that doesn't call */
- if (optReachWithoutCall(dstBB, srcBB))
+ if (optReachWithoutCall(dstBB, srcBB))
{
- dstBB->bbFlags |= BBF_LOOP_CALL0;
+ dstBB->bbFlags |= BBF_LOOP_CALL0;
dstBB->bbFlags &= ~BBF_LOOP_CALL1;
}
else
- dstBB->bbFlags |= BBF_LOOP_CALL1;
+ {
+ dstBB->bbFlags |= BBF_LOOP_CALL1;
+ }
}
- //if this loop will always call, then we can omit the GC Poll
+ // if this loop will always call, then we can omit the GC Poll
if ((GCPOLL_NONE != opts.compGCPollType) && (dstBB->bbFlags & BBF_LOOP_CALL1))
{
srcBB->bbFlags &= ~BBF_NEEDS_GCPOLL;
@@ -7330,14 +7416,16 @@ void Compiler::fgLoopCallTest(BasicBlock *srcBB,
* Mark which loops are guaranteed to execute a call.
*/
-void Compiler::fgLoopCallMark()
+void Compiler::fgLoopCallMark()
{
- BasicBlock * block;
+ BasicBlock* block;
/* If we've already marked all the block, bail */
- if (fgLoopCallMarked)
+ if (fgLoopCallMarked)
+ {
return;
+ }
fgLoopCallMarked = true;
@@ -7347,28 +7435,29 @@ void Compiler::fgLoopCallMark()
{
switch (block->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_CALLFINALLY:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- fgLoopCallTest(block, block->bbJumpDest);
- break;
+ case BBJ_COND:
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ fgLoopCallTest(block, block->bbJumpDest);
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock * * jumpPtr; jumpPtr = block->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpPtr;
+ jumpPtr = block->bbJumpSwt->bbsDstTab;
- do
- {
- fgLoopCallTest(block, *jumpPtr);
- }
- while (++jumpPtr, --jumpCnt);
+ do
+ {
+ fgLoopCallTest(block, *jumpPtr);
+ } while (++jumpPtr, --jumpCnt);
- break;
+ break;
- default:
- break;
+ default:
+ break;
}
}
}
@@ -7378,32 +7467,37 @@ void Compiler::fgLoopCallMark()
* Note the fact that the given block is a loop header.
*/
-inline
-void Compiler::fgMarkLoopHead(BasicBlock *block)
+inline void Compiler::fgMarkLoopHead(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
+ {
printf("fgMarkLoopHead: Checking loop head block BB%02u: ", block->bbNum);
+ }
#endif
/* Have we decided to generate fully interruptible code already? */
- if (genInterruptible)
+ if (genInterruptible)
{
#ifdef DEBUG
if (verbose)
+ {
printf("method is already fully interruptible\n");
+ }
#endif
return;
}
/* Is the loop head block known to execute a method call? */
- if (block->bbFlags & BBF_GC_SAFE_POINT)
+ if (block->bbFlags & BBF_GC_SAFE_POINT)
{
#ifdef DEBUG
if (verbose)
+ {
printf("this block will execute a call\n");
+ }
#endif
// single block loops that contain GC safe points don't need polls.
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
@@ -7412,20 +7506,24 @@ void Compiler::fgMarkLoopHead(BasicBlock *block)
/* Are dominator sets available? */
- if (fgDomsComputed)
+ if (fgDomsComputed)
{
/* Make sure that we know which loops will always execute calls */
- if (!fgLoopCallMarked)
+ if (!fgLoopCallMarked)
+ {
fgLoopCallMark();
+ }
/* Will every trip through our loop execute a call? */
- if (block->bbFlags & BBF_LOOP_CALL1)
+ if (block->bbFlags & BBF_LOOP_CALL1)
{
#ifdef DEBUG
if (verbose)
+ {
printf("this block dominates a block that will execute a call\n");
+ }
#endif
return;
}
@@ -7444,38 +7542,41 @@ void Compiler::fgMarkLoopHead(BasicBlock *block)
{
#ifdef DEBUG
if (verbose)
+ {
printf("a callsite with more than 1023 pushed args exists\n");
+ }
#endif
return;
}
#ifdef DEBUG
if (verbose)
+ {
printf("no guaranteed callsite exits, marking method as fully interruptible\n");
+ }
#endif
- //only enable fully interruptible code for if we're hijacking.
+ // only enable fully interruptible code for if we're hijacking.
if (GCPOLL_NONE == opts.compGCPollType)
{
genInterruptible = true;
}
}
-
GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
{
noway_assert(!compIsForInlining());
- noway_assert(info.compIsStatic); // This method should only be called for static methods.
+ noway_assert(info.compIsStatic); // This method should only be called for static methods.
- GenTreePtr tree = NULL;
+ GenTreePtr tree = nullptr;
CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
if (!kind.needsRuntimeLookup)
{
- void * critSect = 0, **pCrit = 0;
- critSect = info.compCompHnd->getMethodSync(info.compMethodHnd, (void**) &pCrit);
+ void *critSect = nullptr, **pCrit = nullptr;
+ critSect = info.compCompHnd->getMethodSync(info.compMethodHnd, (void**)&pCrit);
noway_assert((!critSect) != (!pCrit));
tree = gtNewIconEmbHndNode(critSect, pCrit, GTF_ICON_METHOD_HDL);
@@ -7489,28 +7590,26 @@ GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
switch (kind.runtimeLookupKind)
{
- case CORINFO_LOOKUP_THISOBJ :
+ case CORINFO_LOOKUP_THISOBJ:
{
noway_assert(!"Should never get this for static method.");
break;
}
- case CORINFO_LOOKUP_CLASSPARAM :
+ case CORINFO_LOOKUP_CLASSPARAM:
{
// In this case, the hidden param is the class handle.
tree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
break;
}
- case CORINFO_LOOKUP_METHODPARAM :
+ case CORINFO_LOOKUP_METHODPARAM:
{
// In this case, the hidden param is the method handle.
tree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
// Call helper CORINFO_HELP_GETCLASSFROMMETHODPARAM to get the class handle
// from the method handle.
- tree = gtNewHelperCallNode(CORINFO_HELP_GETCLASSFROMMETHODPARAM,
- TYP_I_IMPL, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_GETCLASSFROMMETHODPARAM, TYP_I_IMPL, 0, gtNewArgList(tree));
break;
}
@@ -7521,20 +7620,16 @@ GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
}
}
- noway_assert(tree); // tree should now contain the CORINFO_CLASS_HANDLE for the exact class.
+ noway_assert(tree); // tree should now contain the CORINFO_CLASS_HANDLE for the exact class.
// Given the class handle, get the pointer to the Monitor.
- tree = gtNewHelperCallNode(CORINFO_HELP_GETSYNCFROMCLASSHANDLE,
- TYP_I_IMPL, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_GETSYNCFROMCLASSHANDLE, TYP_I_IMPL, 0, gtNewArgList(tree));
}
noway_assert(tree);
return tree;
-
}
-
#if !defined(_TARGET_X86_)
/*****************************************************************************
@@ -7574,7 +7669,7 @@ GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
* to release the lock. Otherwise, 'acquired' will be 1, and the lock will be
* released during exception processing.
*
- * For synchronized methods, we generate a single return block.
+ * For synchronized methods, we generate a single return block.
* We can do this without creating additional "step" blocks because "ret" blocks
* must occur at the top-level (of the original code), not nested within any EH
* constructs. From the CLI spec, 12.4.2.8.2.3 "ret": "Shall not be enclosed in any
@@ -7597,7 +7692,7 @@ GenTreePtr Compiler::fgGetCritSectOfStaticMethod()
* be handled naturally; no additional work is required.
*/
-void Compiler::fgAddSyncMethodEnterExit()
+void Compiler::fgAddSyncMethodEnterExit()
{
assert((info.compFlags & CORINFO_FLG_SYNCH) != 0);
@@ -7626,7 +7721,7 @@ void Compiler::fgAddSyncMethodEnterExit()
assert(fgFirstBB->bbFallsThrough());
- BasicBlock* tryBegBB = fgNewBBafter(BBJ_NONE, fgFirstBB, false);
+ BasicBlock* tryBegBB = fgNewBBafter(BBJ_NONE, fgFirstBB, false);
BasicBlock* tryLastBB = fgLastBB;
// Create a block for the fault.
@@ -7635,7 +7730,7 @@ void Compiler::fgAddSyncMethodEnterExit()
BasicBlock* faultBB = fgNewBBafter(BBJ_EHFINALLYRET, tryLastBB, false);
assert(tryLastBB->bbNext == faultBB);
- assert(faultBB->bbNext == NULL);
+ assert(faultBB->bbNext == nullptr);
assert(faultBB == fgLastBB);
{ // Scope the EH region creation
@@ -7643,40 +7738,40 @@ void Compiler::fgAddSyncMethodEnterExit()
// Add the new EH region at the end, since it is the least nested,
// and thus should be last.
- EHblkDsc * newEntry;
- unsigned XTnew = compHndBBtabCount;
+ EHblkDsc* newEntry;
+ unsigned XTnew = compHndBBtabCount;
newEntry = fgAddEHTableEntry(XTnew);
// Initialize the new entry
- newEntry->ebdHandlerType = EH_HANDLER_FAULT;
+ newEntry->ebdHandlerType = EH_HANDLER_FAULT;
- newEntry->ebdTryBeg = tryBegBB;
- newEntry->ebdTryLast = tryLastBB;
+ newEntry->ebdTryBeg = tryBegBB;
+ newEntry->ebdTryLast = tryLastBB;
- newEntry->ebdHndBeg = faultBB;
- newEntry->ebdHndLast = faultBB;
+ newEntry->ebdHndBeg = faultBB;
+ newEntry->ebdHndLast = faultBB;
- newEntry->ebdTyp = 0; // unused for fault
+ newEntry->ebdTyp = 0; // unused for fault
- newEntry->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX;
- newEntry->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX;
+ newEntry->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX;
+ newEntry->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX;
newEntry->ebdTryBegOffset = tryBegBB->bbCodeOffs;
newEntry->ebdTryEndOffset = tryLastBB->bbCodeOffsEnd;
newEntry->ebdFilterBegOffset = 0;
- newEntry->ebdHndBegOffset = 0; // handler doesn't correspond to any IL
- newEntry->ebdHndEndOffset = 0; // handler doesn't correspond to any IL
+ newEntry->ebdHndBegOffset = 0; // handler doesn't correspond to any IL
+ newEntry->ebdHndEndOffset = 0; // handler doesn't correspond to any IL
// Set some flags on the new region. This is the same as when we set up
// EH regions in fgFindBasicBlocks(). Note that the try has no enclosing
// handler, and the fault has no enclosing try.
- tryBegBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_IMPORTED;
+ tryBegBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_IMPORTED;
- faultBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_IMPORTED;
- faultBB->bbCatchTyp = BBCT_FAULT;
+ faultBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_IMPORTED;
+ faultBB->bbCatchTyp = BBCT_FAULT;
tryBegBB->setTryIndex(XTnew);
tryBegBB->clearHndIndex();
@@ -7687,7 +7782,7 @@ void Compiler::fgAddSyncMethodEnterExit()
// Walk the user code blocks and set all blocks that don't already have a try handler
// to point to the new try handler.
- BasicBlock * tmpBB;
+ BasicBlock* tmpBB;
for (tmpBB = tryBegBB->bbNext; tmpBB != faultBB; tmpBB = tmpBB->bbNext)
{
if (!tmpBB->hasTryIndex())
@@ -7699,39 +7794,37 @@ void Compiler::fgAddSyncMethodEnterExit()
// Walk the EH table. Make every EH entry that doesn't already have an enclosing
// try index mark this new entry as their enclosing try index.
- unsigned XTnum;
- EHblkDsc * HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < XTnew;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < XTnew; XTnum++, HBtab++)
{
if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
- HBtab->ebdEnclosingTryIndex = (unsigned short)XTnew; // This EH region wasn't previously nested, but now it is.
+ HBtab->ebdEnclosingTryIndex =
+ (unsigned short)XTnew; // This EH region wasn't previously nested, but now it is.
}
}
#ifdef DEBUG
if (verbose)
{
- JITDUMP("Synchronized method - created additional EH descriptor EH#%u for try/fault wrapping monitor enter/exit\n",
- XTnew);
+ JITDUMP("Synchronized method - created additional EH descriptor EH#%u for try/fault wrapping monitor "
+ "enter/exit\n",
+ XTnew);
fgDispBasicBlocks();
fgDispHandlerTab();
}
fgVerifyHandlerTab();
#endif // DEBUG
-
}
// Create a 'monitor acquired' boolean (actually, an unsigned byte: 1 = acquired, 0 = not acquired).
var_types typeMonAcquired = TYP_UBYTE;
- this->lvaMonAcquired = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean"));
-
-
+ this->lvaMonAcquired = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean"));
+
lvaTable[lvaMonAcquired].lvType = typeMonAcquired;
{ // Scope the variables of the variable initialization
@@ -7747,12 +7840,12 @@ void Compiler::fgAddSyncMethodEnterExit()
#ifdef DEBUG
if (verbose)
{
- printf("\nSynchronized method - Add 'acquired' initialization in first block BB%02u [%08p]\n", fgFirstBB, dspPtr(fgFirstBB));
+ printf("\nSynchronized method - Add 'acquired' initialization in first block BB%02u [%08p]\n", fgFirstBB,
+ dspPtr(fgFirstBB));
gtDispTree(initNode);
printf("\n");
}
#endif
-
}
// Make a copy of the 'this' pointer to be used in the handler so it does not inhibit enregistration
@@ -7760,11 +7853,11 @@ void Compiler::fgAddSyncMethodEnterExit()
unsigned lvaCopyThis = 0;
if (!info.compIsStatic)
{
- lvaCopyThis = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean"));
+ lvaCopyThis = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean"));
lvaTable[lvaCopyThis].lvType = TYP_REF;
- GenTreePtr thisNode = gtNewLclvNode(info.compThisArg, TYP_REF);
- GenTreePtr copyNode = gtNewLclvNode(lvaCopyThis, TYP_REF);
+ GenTreePtr thisNode = gtNewLclvNode(info.compThisArg, TYP_REF);
+ GenTreePtr copyNode = gtNewLclvNode(lvaCopyThis, TYP_REF);
GenTreePtr initNode = gtNewAssignNode(copyNode, thisNode);
fgInsertStmtAtEnd(tryBegBB, initNode);
@@ -7774,7 +7867,7 @@ void Compiler::fgAddSyncMethodEnterExit()
// exceptional case
fgCreateMonitorTree(lvaMonAcquired, lvaCopyThis, faultBB, false /*exit*/);
-
+
// non-exceptional cases
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
@@ -7787,57 +7880,56 @@ void Compiler::fgAddSyncMethodEnterExit()
// fgCreateMonitorTree: Create tree to execute a monitor enter or exit operation for synchronized methods
// lvaMonAcquired: lvaNum of boolean variable that tracks if monitor has been acquired.
-// lvaThisVar: lvaNum of variable being used as 'this' pointer, may not be the original one. Is only used for nonstatic methods
-// block: block to insert the tree in. It is inserted at the end or in the case of a return, immediately before the GT_RETURN
+// lvaThisVar: lvaNum of variable being used as 'this' pointer, may not be the original one. Is only used for
+// nonstatic methods
+// block: block to insert the tree in. It is inserted at the end or in the case of a return, immediately before the
+// GT_RETURN
// enter: whether to create a monitor enter or exit
GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThisVar, BasicBlock* block, bool enter)
{
// Insert the expression "enter/exitCrit(this, &acquired)" or "enter/exitCrit(handle, &acquired)"
- var_types typeMonAcquired = TYP_UBYTE;
- GenTreePtr varNode = gtNewLclvNode(lvaMonAcquired, typeMonAcquired);
- GenTreePtr varAddrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, varNode);
+ var_types typeMonAcquired = TYP_UBYTE;
+ GenTreePtr varNode = gtNewLclvNode(lvaMonAcquired, typeMonAcquired);
+ GenTreePtr varAddrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, varNode);
GenTreePtr tree;
- if (info.compIsStatic)
+ if (info.compIsStatic)
{
tree = fgGetCritSectOfStaticMethod();
- tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER_STATIC : CORINFO_HELP_MON_EXIT_STATIC,
- TYP_VOID, 0,
+ tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER_STATIC : CORINFO_HELP_MON_EXIT_STATIC, TYP_VOID, 0,
gtNewArgList(tree, varAddrNode));
}
else
{
tree = gtNewLclvNode(lvaThisVar, TYP_REF);
- tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER : CORINFO_HELP_MON_EXIT,
- TYP_VOID, 0,
+ tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER : CORINFO_HELP_MON_EXIT, TYP_VOID, 0,
gtNewArgList(tree, varAddrNode));
}
#ifdef DEBUG
if (verbose)
{
- printf("\nSynchronized method - Add monitor %s call to block BB%02u [%08p]\n", enter?"enter":"exit", block, dspPtr(block));
+ printf("\nSynchronized method - Add monitor %s call to block BB%02u [%08p]\n", enter ? "enter" : "exit", block,
+ dspPtr(block));
gtDispTree(tree);
printf("\n");
}
#endif
-
- if (block->bbJumpKind == BBJ_RETURN &&
- block->lastStmt()->gtStmtExpr->gtOper == GT_RETURN)
+ if (block->bbJumpKind == BBJ_RETURN && block->lastStmt()->gtStmtExpr->gtOper == GT_RETURN)
{
GenTree* retNode = block->lastStmt()->gtStmtExpr;
GenTree* retExpr = retNode->gtOp.gtOp1;
-
+
if (retExpr != nullptr)
{
// have to insert this immediately before the GT_RETURN so we transform:
// ret(...) ->
// ret(comma(comma(tmp=...,call mon_exit), tmp)
//
- //
+ //
// Before morph stage, it is possible to have a case of GT_RETURN(TYP_LONG, op1) where op1's type is
// TYP_STRUCT (of 8-bytes) and op1 is call node. See the big comment block in impReturnInstruction()
// for details for the case where info.compRetType is not the same as info.compRetNativeType. For
@@ -7847,7 +7939,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis
// in turn passes it to VM to know the size of value type.
GenTree* temp = fgInsertCommaFormTemp(&retNode->gtOp.gtOp1, info.compMethodInfo->args.retTypeClass);
- GenTree* lclVar = retNode->gtOp.gtOp1->gtOp.gtOp2;
+ GenTree* lclVar = retNode->gtOp.gtOp1->gtOp.gtOp2;
retNode->gtOp.gtOp1->gtOp.gtOp2 = gtNewOperNode(GT_COMMA, retExpr->TypeGet(), tree, lclVar);
}
else
@@ -7864,22 +7956,19 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis
return tree;
}
-
-
-
// Convert a BBJ_RETURN block in a synchronized method to a BBJ_ALWAYS.
// We've previously added a 'try' block around the original program code using fgAddSyncMethodEnterExit().
// Thus, we put BBJ_RETURN blocks inside a 'try'. In IL this is illegal. Instead, we would
// see a 'leave' inside a 'try' that would get transformed into BBJ_CALLFINALLY/BBJ_ALWAYS blocks
// during importing, and the BBJ_ALWAYS would point at an outer block with the BBJ_RETURN.
// Here, we mimic some of the logic of importing a LEAVE to get the same effect for synchronized methods.
-void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block)
+void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block)
{
assert(!fgFuncletsCreated);
assert(info.compFlags & CORINFO_FLG_SYNCH);
assert(genReturnBB != nullptr);
assert(genReturnBB != block);
- assert(fgReturnCount <= 1); // We have a single return for synchronized methods
+ assert(fgReturnCount <= 1); // We have a single return for synchronized methods
assert(block->bbJumpKind == BBJ_RETURN);
assert((block->bbFlags & BBF_HAS_JMP) == 0);
assert(block->hasTryIndex());
@@ -7887,22 +7976,24 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block)
assert(compHndBBtabCount >= 1);
unsigned tryIndex = block->getTryIndex();
- assert(tryIndex == compHndBBtabCount - 1); // The BBJ_RETURN must be at the top-level before we inserted the try/finally, which must be the last EH region.
+ assert(tryIndex == compHndBBtabCount - 1); // The BBJ_RETURN must be at the top-level before we inserted the
+ // try/finally, which must be the last EH region.
EHblkDsc* ehDsc = ehGetDsc(tryIndex);
- assert(ehDsc->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the BBJ_RETURN block
+ assert(ehDsc->ebdEnclosingTryIndex ==
+ EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the BBJ_RETURN block
assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX);
- // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB.
- block->bbJumpKind = BBJ_ALWAYS;
- block->bbJumpDest = genReturnBB;
+ // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB.
+ block->bbJumpKind = BBJ_ALWAYS;
+ block->bbJumpDest = genReturnBB;
block->bbJumpDest->bbRefs++;
#ifdef DEBUG
if (verbose)
{
- printf("Synchronized method - convert block BB%02u to BBJ_ALWAYS [targets BB%02u]\n",
- block->bbNum, block->bbJumpDest->bbNum);
+ printf("Synchronized method - convert block BB%02u to BBJ_ALWAYS [targets BB%02u]\n", block->bbNum,
+ block->bbJumpDest->bbNum);
}
#endif
}
@@ -7925,19 +8016,17 @@ void Compiler::fgAddReversePInvokeEnterExit()
#if COR_JIT_EE_VERSION > 460
lvaReversePInvokeFrameVar = lvaGrabTempWithImplicitUse(false DEBUGARG("Reverse Pinvoke FrameVar"));
- LclVarDsc* varDsc = &lvaTable[lvaReversePInvokeFrameVar];
- varDsc->lvType = TYP_BLK;
+ LclVarDsc* varDsc = &lvaTable[lvaReversePInvokeFrameVar];
+ varDsc->lvType = TYP_BLK;
varDsc->lvExactSize = eeGetEEInfo()->sizeOfReversePInvokeFrame;
- GenTreePtr tree;
+ GenTreePtr tree;
// Add enter pinvoke exit callout at the start of prolog
tree = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK));
- tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER, TYP_VOID, 0, gtNewArgList(tree));
fgEnsureFirstBBisScratch();
@@ -7956,9 +8045,7 @@ void Compiler::fgAddReversePInvokeEnterExit()
tree = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK));
- tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT, TYP_VOID, 0, gtNewArgList(tree));
assert(genReturnBB != nullptr);
@@ -7967,7 +8054,8 @@ void Compiler::fgAddReversePInvokeEnterExit()
#ifdef DEBUG
if (verbose)
{
- printf("\nReverse PInvoke method - Add reverse pinvoke exit in return basic block [%08p]\n", dspPtr(genReturnBB));
+ printf("\nReverse PInvoke method - Add reverse pinvoke exit in return basic block [%08p]\n",
+ dspPtr(genReturnBB));
gtDispTree(tree);
printf("\n");
}
@@ -7981,13 +8069,13 @@ void Compiler::fgAddReversePInvokeEnterExit()
* Return 'true' if there is more than one BBJ_RETURN block.
*/
-bool Compiler::fgMoreThanOneReturnBlock()
+bool Compiler::fgMoreThanOneReturnBlock()
{
- unsigned retCnt = 0;
+ unsigned retCnt = 0;
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- if (block->bbJumpKind == BBJ_RETURN)
+ if (block->bbJumpKind == BBJ_RETURN)
{
retCnt++;
if (retCnt > 1)
@@ -8005,7 +8093,7 @@ bool Compiler::fgMoreThanOneReturnBlock()
* Add any internal blocks/trees we may need
*/
-void Compiler::fgAddInternal()
+void Compiler::fgAddInternal()
{
noway_assert(!compIsForInlining());
@@ -8033,14 +8121,15 @@ void Compiler::fgAddInternal()
bool lva0CopiedForGenericsCtxt;
#ifndef JIT32_GCENCODER
lva0CopiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0);
-#else // JIT32_GCENCODER
+#else // JIT32_GCENCODER
lva0CopiedForGenericsCtxt = false;
-#endif // JIT32_GCENCODER
+#endif // JIT32_GCENCODER
noway_assert(lva0CopiedForGenericsCtxt || !lvaTable[info.compThisArg].lvAddrExposed);
noway_assert(!lvaTable[info.compThisArg].lvArgWrite);
- noway_assert(lvaTable[lvaArg0Var].lvAddrExposed || lvaTable[lvaArg0Var].lvArgWrite || lva0CopiedForGenericsCtxt);
+ noway_assert(lvaTable[lvaArg0Var].lvAddrExposed || lvaTable[lvaArg0Var].lvArgWrite ||
+ lva0CopiedForGenericsCtxt);
- var_types thisType = lvaTable[info.compThisArg].TypeGet();
+ var_types thisType = lvaTable[info.compThisArg].TypeGet();
// Now assign the original input "this" to the temp
@@ -8048,9 +8137,9 @@ void Compiler::fgAddInternal()
tree = gtNewLclvNode(lvaArg0Var, thisType);
- tree = gtNewAssignNode(tree, // dst
- gtNewLclvNode(info.compThisArg, thisType) // src
- );
+ tree = gtNewAssignNode(tree, // dst
+ gtNewLclvNode(info.compThisArg, thisType) // src
+ );
/* Create a new basic block and stick the assignment in it */
@@ -8074,15 +8163,15 @@ void Compiler::fgAddInternal()
if (opts.compNeedSecurityCheck)
{
noway_assert(lvaSecurityObject == BAD_VAR_NUM);
- lvaSecurityObject = lvaGrabTempWithImplicitUse(false DEBUGARG("security check"));
+ lvaSecurityObject = lvaGrabTempWithImplicitUse(false DEBUGARG("security check"));
lvaTable[lvaSecurityObject].lvType = TYP_REF;
}
/* Assume we will generate a single shared return sequence */
- ULONG returnWeight = 0;
- bool oneReturn;
- bool allProfWeight;
+ ULONG returnWeight = 0;
+ bool oneReturn;
+ bool allProfWeight;
//
// We will generate just one epilog (return block)
@@ -8091,10 +8180,8 @@ void Compiler::fgAddInternal()
// or for methods calling into unmanaged code
// or for synchronized methods.
//
- if ( compIsProfilerHookNeeded() ||
- (info.compCallUnmanaged != 0) ||
- opts.IsReversePInvoke() ||
- ((info.compFlags & CORINFO_FLG_SYNCH) != 0))
+ if (compIsProfilerHookNeeded() || (info.compCallUnmanaged != 0) || opts.IsReversePInvoke() ||
+ ((info.compFlags & CORINFO_FLG_SYNCH) != 0))
{
// We will generate only one return block
// We will transform the BBJ_RETURN blocks
@@ -8116,9 +8203,9 @@ void Compiler::fgAddInternal()
// sum of all these blocks.
//
fgReturnCount = 0;
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- if (block->bbJumpKind == BBJ_RETURN)
+ if (block->bbJumpKind == BBJ_RETURN)
{
//
// returnCount is the count of BBJ_RETURN blocks in this method
@@ -8177,10 +8264,10 @@ void Compiler::fgAddInternal()
}
#endif // !_TARGET_X86_
- if (oneReturn)
+ if (oneReturn)
{
- genReturnBB = fgNewBBinRegion(BBJ_RETURN);
- genReturnBB->bbRefs = 1; // bbRefs gets update later, for now it should be 1
+ genReturnBB = fgNewBBinRegion(BBJ_RETURN);
+ genReturnBB->bbRefs = 1; // bbRefs gets update later, for now it should be 1
fgReturnCount++;
if (allProfWeight)
@@ -8221,13 +8308,15 @@ void Compiler::fgAddInternal()
genReturnBB->bbFlags &= ~BBF_RUN_RARELY;
}
- genReturnBB->bbFlags |= (BBF_INTERNAL | BBF_DONT_REMOVE);
+ genReturnBB->bbFlags |= (BBF_INTERNAL | BBF_DONT_REMOVE);
- noway_assert(genReturnBB->bbNext == NULL);
+ noway_assert(genReturnBB->bbNext == nullptr);
#ifdef DEBUG
if (verbose)
+ {
printf("\n genReturnBB [BB%02u] created\n", genReturnBB->bbNum);
+ }
#endif
}
else
@@ -8239,7 +8328,7 @@ void Compiler::fgAddInternal()
}
// If there is a return value, then create a temp for it. Real returns will store the value in there and
- // it'll be reloaded by the single return.
+ // it'll be reloaded by the single return.
if (genReturnBB && compMethodHasRetVal())
{
genReturnLocal = lvaGrabTemp(true DEBUGARG("Single return block return value"));
@@ -8269,10 +8358,14 @@ void Compiler::fgAddInternal()
}
if (!varTypeIsFloating(info.compRetType))
+ {
lvaTable[genReturnLocal].setPrefReg(REG_INTRET, this);
#ifdef REG_FLOATRET
+ }
else
+ {
lvaTable[genReturnLocal].setPrefReg(REG_FLOATRET, this);
+ }
#endif
#ifdef DEBUG
@@ -8297,7 +8390,7 @@ void Compiler::fgAddInternal()
lvaInlinedPInvokeFrameVar = lvaGrabTempWithImplicitUse(false DEBUGARG("Pinvoke FrameVar"));
- LclVarDsc * varDsc = &lvaTable[lvaInlinedPInvokeFrameVar];
+ LclVarDsc* varDsc = &lvaTable[lvaInlinedPInvokeFrameVar];
varDsc->addPrefReg(RBM_PINVOKE_TCB, this);
varDsc->lvType = TYP_BLK;
// Make room for the inlined frame.
@@ -8309,17 +8402,17 @@ void Compiler::fgAddInternal()
if (!opts.ShouldUsePInvokeHelpers() && compJmpOpUsed)
{
lvaPInvokeFrameRegSaveVar = lvaGrabTempWithImplicitUse(false DEBUGARG("PInvokeFrameRegSave Var"));
- varDsc = &lvaTable[lvaPInvokeFrameRegSaveVar];
- varDsc->lvType = TYP_BLK;
- varDsc->lvExactSize = 2 * REGSIZE_BYTES;
+ varDsc = &lvaTable[lvaPInvokeFrameRegSaveVar];
+ varDsc->lvType = TYP_BLK;
+ varDsc->lvExactSize = 2 * REGSIZE_BYTES;
}
#endif
}
// Do we need to insert a "JustMyCode" callback?
- CORINFO_JUST_MY_CODE_HANDLE *pDbgHandle = NULL;
- CORINFO_JUST_MY_CODE_HANDLE dbgHandle = NULL;
+ CORINFO_JUST_MY_CODE_HANDLE* pDbgHandle = nullptr;
+ CORINFO_JUST_MY_CODE_HANDLE dbgHandle = nullptr;
if (opts.compDbgCode && !(opts.eeFlags & CORJIT_FLG_IL_STUB))
{
dbgHandle = info.compCompHnd->getJustMyCodeHandle(info.compMethodHnd, &pDbgHandle);
@@ -8327,7 +8420,7 @@ void Compiler::fgAddInternal()
#ifdef _TARGET_ARM64_
// TODO-ARM64-NYI: don't do just-my-code
- dbgHandle = nullptr;
+ dbgHandle = nullptr;
pDbgHandle = nullptr;
#endif // _TARGET_ARM64_
@@ -8335,16 +8428,15 @@ void Compiler::fgAddInternal()
if (dbgHandle || pDbgHandle)
{
- GenTreePtr guardCheckVal = gtNewOperNode(GT_IND, TYP_INT,
- gtNewIconEmbHndNode(dbgHandle, pDbgHandle, GTF_ICON_TOKEN_HDL));
- GenTreePtr guardCheckCond = gtNewOperNode(GT_EQ, TYP_INT,
- guardCheckVal, gtNewZeroConNode(TYP_INT));
+ GenTreePtr guardCheckVal =
+ gtNewOperNode(GT_IND, TYP_INT, gtNewIconEmbHndNode(dbgHandle, pDbgHandle, GTF_ICON_TOKEN_HDL));
+ GenTreePtr guardCheckCond = gtNewOperNode(GT_EQ, TYP_INT, guardCheckVal, gtNewZeroConNode(TYP_INT));
guardCheckCond->gtFlags |= GTF_RELOP_QMARK;
// Create the callback which will yield the final answer
GenTreePtr callback = gtNewHelperCallNode(CORINFO_HELP_DBG_IS_JUST_MY_CODE, TYP_VOID);
- callback = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), callback);
+ callback = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), callback);
// Stick the conditional call at the start of the method
@@ -8354,25 +8446,21 @@ void Compiler::fgAddInternal()
/* Do we need to call out for security ? */
- if (tiSecurityCalloutNeeded)
+ if (tiSecurityCalloutNeeded)
{
// We must have grabbed this local.
noway_assert(opts.compNeedSecurityCheck);
noway_assert(lvaSecurityObject != BAD_VAR_NUM);
- GenTreePtr tree;
+ GenTreePtr tree;
/* Insert the expression "call JIT_Security_Prolog(MethodHnd, &SecurityObject)" */
tree = gtNewIconEmbMethHndNode(info.compMethodHnd);
- tree = gtNewHelperCallNode(info.compCompHnd->getSecurityPrologHelper(info.compMethodHnd),
- TYP_VOID,
- 0,
- gtNewArgList(tree,
- gtNewOperNode(GT_ADDR,
- TYP_BYREF,
- gtNewLclvNode(lvaSecurityObject, TYP_REF))));
+ tree = gtNewHelperCallNode(info.compCompHnd->getSecurityPrologHelper(info.compMethodHnd), TYP_VOID, 0,
+ gtNewArgList(tree, gtNewOperNode(GT_ADDR, TYP_BYREF,
+ gtNewLclvNode(lvaSecurityObject, TYP_REF))));
/* Create a new basic block and stick the call in it */
@@ -8383,34 +8471,31 @@ void Compiler::fgAddInternal()
#ifdef DEBUG
if (verbose)
{
- printf("\ntiSecurityCalloutNeeded - Add call JIT_Security_Prolog(%08p) statement ", dspPtr(info.compMethodHnd));
+ printf("\ntiSecurityCalloutNeeded - Add call JIT_Security_Prolog(%08p) statement ",
+ dspPtr(info.compMethodHnd));
printTreeID(tree);
printf(" in first basic block [%08p]\n", dspPtr(fgFirstBB));
gtDispTree(tree);
printf("\n");
}
#endif
-
}
-
#if defined(_TARGET_X86_)
/* Is this a 'synchronized' method? */
- if (info.compFlags & CORINFO_FLG_SYNCH)
+ if (info.compFlags & CORINFO_FLG_SYNCH)
{
- GenTreePtr tree = NULL;
+ GenTreePtr tree = NULL;
/* Insert the expression "enterCrit(this)" or "enterCrit(handle)" */
- if (info.compIsStatic)
+ if (info.compIsStatic)
{
tree = fgGetCritSectOfStaticMethod();
- tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER_STATIC,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER_STATIC, TYP_VOID, 0, gtNewArgList(tree));
}
else
{
@@ -8418,9 +8503,7 @@ void Compiler::fgAddInternal()
tree = gtNewLclvNode(info.compThisArg, TYP_REF);
- tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER, TYP_VOID, 0, gtNewArgList(tree));
}
/* Create a new basic block and stick the call in it */
@@ -8445,21 +8528,17 @@ void Compiler::fgAddInternal()
/* Create the expression "exitCrit(this)" or "exitCrit(handle)" */
- if (info.compIsStatic)
+ if (info.compIsStatic)
{
tree = fgGetCritSectOfStaticMethod();
- tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT_STATIC,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT_STATIC, TYP_VOID, 0, gtNewArgList(tree));
}
else
{
tree = gtNewLclvNode(info.compThisArg, TYP_REF);
- tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT, TYP_VOID, 0, gtNewArgList(tree));
}
fgInsertStmtAtEnd(genReturnBB, tree);
@@ -8475,25 +8554,22 @@ void Compiler::fgAddInternal()
// Reset cookies used to track start and end of the protected region in synchronized methods
syncStartEmitCookie = NULL;
- syncEndEmitCookie = NULL;
+ syncEndEmitCookie = NULL;
}
#endif // _TARGET_X86_
-
/* Do we need to do runtime call out to check the security? */
- if (tiRuntimeCalloutNeeded)
+ if (tiRuntimeCalloutNeeded)
{
- GenTreePtr tree;
+ GenTreePtr tree;
/* Insert the expression "call verificationRuntimeCheck(MethodHnd)" */
tree = gtNewIconEmbMethHndNode(info.compMethodHnd);
- tree = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION_RUNTIME_CHECK,
- TYP_VOID, 0,
- gtNewArgList(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, TYP_VOID, 0, gtNewArgList(tree));
/* Create a new basic block and stick the call in it */
@@ -8504,12 +8580,13 @@ void Compiler::fgAddInternal()
#ifdef DEBUG
if (verbose)
{
- printf("\ntiRuntimeCalloutNeeded - Call verificationRuntimeCheck(%08p) statement in first basic block [%08p]\n", dspPtr(info.compMethodHnd), dspPtr(fgFirstBB));
+ printf("\ntiRuntimeCalloutNeeded - Call verificationRuntimeCheck(%08p) statement in first basic block "
+ "[%08p]\n",
+ dspPtr(info.compMethodHnd), dspPtr(fgFirstBB));
gtDispTree(tree);
printf("\n");
}
#endif
-
}
if (opts.IsReversePInvoke())
@@ -8528,14 +8605,14 @@ void Compiler::fgAddInternal()
// Make the 'return' expression.
//
- //make sure to reload the return value as part of the return (it is saved by the "real return").
+ // make sure to reload the return value as part of the return (it is saved by the "real return").
if (genReturnLocal != BAD_VAR_NUM)
{
noway_assert(compMethodHasRetVal());
GenTreePtr retTemp = gtNewLclvNode(genReturnLocal, lvaTable[genReturnLocal].TypeGet());
- //make sure copy prop ignores this node (make sure it always does a reload from the temp).
+ // make sure copy prop ignores this node (make sure it always does a reload from the temp).
retTemp->gtFlags |= GTF_DONT_CSE;
tree = gtNewOperNode(GT_RETURN, retTemp->gtType, retTemp);
}
@@ -8543,7 +8620,7 @@ void Compiler::fgAddInternal()
{
noway_assert(info.compRetType == TYP_VOID || varTypeIsStruct(info.compRetType));
// return void
- tree = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID );
+ tree = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
/* Add 'return' expression to the return block */
@@ -8562,7 +8639,6 @@ void Compiler::fgAddInternal()
printf("\n");
}
#endif
-
}
#ifdef DEBUG
@@ -8575,7 +8651,6 @@ void Compiler::fgAddInternal()
#endif
}
-
/*****************************************************************************
*
* Create a new statement from tree and wire the links up.
@@ -8672,20 +8747,21 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
BasicBlock* succ = curr->GetSucc(i, this);
if (succ != newBlock)
{
- JITDUMP("BB%02u previous predecessor was BB%02u, now is BB%02u\n", succ->bbNum, curr->bbNum, newBlock->bbNum);
+ JITDUMP("BB%02u previous predecessor was BB%02u, now is BB%02u\n", succ->bbNum, curr->bbNum,
+ newBlock->bbNum);
fgReplacePred(succ, curr, newBlock);
}
}
newBlock->bbJumpDest = curr->bbJumpDest;
- curr->bbJumpDest = nullptr;
+ curr->bbJumpDest = nullptr;
}
else
{
// In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists
// but fortunately there's an existing method that implements this functionality.
newBlock->bbJumpSwt = curr->bbJumpSwt;
-
+
fgChangeSwitchBlock(curr, newBlock);
curr->bbJumpSwt = nullptr;
@@ -8694,11 +8770,11 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
newBlock->inheritWeight(curr);
// Set the new block's flags. Note that the new block isn't BBF_INTERNAL unless the old block is.
- newBlock->bbFlags = curr->bbFlags;
+ newBlock->bbFlags = curr->bbFlags;
// Remove flags that the new block can't have.
- newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_HAS_LABEL | BBF_JMP_TARGET |
- BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS);
+ newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_HAS_LABEL |
+ BBF_JMP_TARGET | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS);
// Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end,
// such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then
@@ -8707,10 +8783,10 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
// be careful about updating this flag appropriately. So, removing the GC safe bit is simply
// conservative: some functions might end up being fully interruptible that could be partially
// interruptible if we exercised more care here.
- newBlock->bbFlags &= ~BBF_GC_SAFE_POINT;
+ newBlock->bbFlags &= ~BBF_GC_SAFE_POINT;
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- newBlock->bbFlags &= ~(BBF_FINALLY_TARGET);
+ newBlock->bbFlags &= ~(BBF_FINALLY_TARGET);
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
// The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller
@@ -8718,10 +8794,10 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
// Insert the new block in the block list after the 'curr' block.
fgInsertBBafter(curr, newBlock);
- fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block.
+ fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block.
// Remove flags from the old block that are no longer possible.
- curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL);
+ curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL);
// Default to fallthru, and add the arc for that.
curr->bbJumpKind = BBJ_NONE;
@@ -8746,11 +8822,11 @@ BasicBlock* Compiler::fgSplitBlockAfterStatement(BasicBlock* curr, GenTree* stmt
newBlock->bbTreeList->gtPrev = curr->bbTreeList->gtPrev;
}
curr->bbTreeList->gtPrev = stmt;
- stmt->gtNext = NULL;
+ stmt->gtNext = nullptr;
// Update the IL offsets of the blocks to match the split.
- assert(newBlock->bbCodeOffs == BAD_IL_OFFSET);
+ assert(newBlock->bbCodeOffs == BAD_IL_OFFSET);
assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET);
// curr->bbCodeOffs remains the same
@@ -8780,7 +8856,7 @@ BasicBlock* Compiler::fgSplitBlockAtBeginning(BasicBlock* curr)
BasicBlock* newBlock = fgSplitBlockAtEnd(curr);
newBlock->bbTreeList = curr->bbTreeList;
- curr->bbTreeList = NULL;
+ curr->bbTreeList = nullptr;
// The new block now has all the code, and the old block has none. Update the
// IL offsets for the block to reflect this.
@@ -8873,19 +8949,20 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
return newBlock;
}
-
/*****************************************************************************/
/*****************************************************************************/
-void Compiler::fgFindOperOrder()
+void Compiler::fgFindOperOrder()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgFindOperOrder()\n");
+ }
#endif
- BasicBlock* block;
- GenTreeStmt* stmt;
+ BasicBlock* block;
+ GenTreeStmt* stmt;
/* Walk the basic blocks and for each statement determine
* the evaluation order, cost, FP levels, etc... */
@@ -8917,10 +8994,10 @@ void Compiler::fgSimpleLowering()
{
if (tree->gtOper == GT_ARR_LENGTH)
{
- GenTreeArrLen* arrLen = tree->AsArrLen();
- GenTreePtr arr = arrLen->gtArrLen.ArrRef();
- GenTreePtr add;
- GenTreePtr con;
+ GenTreeArrLen* arrLen = tree->AsArrLen();
+ GenTreePtr arr = arrLen->gtArrLen.ArrRef();
+ GenTreePtr add;
+ GenTreePtr con;
/* Create the expression "*(array_addr + ArrLenOffs)" */
@@ -8929,8 +9006,7 @@ void Compiler::fgSimpleLowering()
noway_assert(arrLen->ArrLenOffset() == offsetof(CORINFO_Array, length) ||
arrLen->ArrLenOffset() == offsetof(CORINFO_String, stringLen));
- if ((arr->gtOper == GT_CNS_INT) &&
- (arr->gtIntCon.gtIconVal == 0))
+ if ((arr->gtOper == GT_CNS_INT) && (arr->gtIntCon.gtIconVal == 0))
{
// If the array is NULL, then we should get a NULL reference
// exception when computing its length. We need to maintain
@@ -8941,10 +9017,10 @@ void Compiler::fgSimpleLowering()
}
else
{
- con = gtNewIconNode(arrLen->ArrLenOffset(), TYP_I_IMPL);
+ con = gtNewIconNode(arrLen->ArrLenOffset(), TYP_I_IMPL);
con->gtRsvdRegs = 0;
con->gtCopyFPlvl(arr);
- add = gtNewOperNode(GT_ADD, TYP_REF, arr, con);
+ add = gtNewOperNode(GT_ADD, TYP_REF, arr, con);
add->gtRsvdRegs = arr->gtRsvdRegs;
add->gtCopyFPlvl(arr);
add->CopyCosts(arr);
@@ -8954,7 +9030,7 @@ void Compiler::fgSimpleLowering()
con->gtNext = add;
add->gtPrev = con;
- add->gtNext = tree;
+ add->gtNext = tree;
tree->gtPrev = add;
}
@@ -8967,7 +9043,7 @@ void Compiler::fgSimpleLowering()
#ifdef FEATURE_SIMD
|| tree->OperGet() == GT_SIMD_CHK
#endif // FEATURE_SIMD
- )
+ )
{
// Add in a call to an error routine.
fgSetRngChkTarget(tree, false);
@@ -8990,23 +9066,21 @@ void Compiler::fgSimpleLowering()
/*****************************************************************************
*/
-void Compiler::fgUpdateRefCntForClone(BasicBlock* addedToBlock,
- GenTreePtr clonedTree)
+void Compiler::fgUpdateRefCntForClone(BasicBlock* addedToBlock, GenTreePtr clonedTree)
{
assert(clonedTree->gtOper != GT_STMT);
if (lvaLocalVarRefCounted)
{
compCurBB = addedToBlock;
- fgWalkTreePre(&clonedTree, Compiler::lvaIncRefCntsCB, (void *)this, true);
+ fgWalkTreePre(&clonedTree, Compiler::lvaIncRefCntsCB, (void*)this, true);
}
}
/*****************************************************************************
*/
-void Compiler::fgUpdateRefCntForExtract(GenTreePtr wholeTree,
- GenTreePtr keptTree)
+void Compiler::fgUpdateRefCntForExtract(GenTreePtr wholeTree, GenTreePtr keptTree)
{
if (lvaLocalVarRefCounted)
{
@@ -9017,20 +9091,18 @@ void Compiler::fgUpdateRefCntForExtract(GenTreePtr wholeTree,
*/
if (keptTree)
{
- fgWalkTreePre(&keptTree, Compiler::lvaIncRefCntsCB, (void *)this, true);
+ fgWalkTreePre(&keptTree, Compiler::lvaIncRefCntsCB, (void*)this, true);
}
- fgWalkTreePre( &wholeTree, Compiler::lvaDecRefCntsCB, (void *)this, true);
+ fgWalkTreePre(&wholeTree, Compiler::lvaDecRefCntsCB, (void*)this, true);
}
}
-VARSET_VALRET_TP Compiler::fgGetVarBits(GenTreePtr tree)
+VARSET_VALRET_TP Compiler::fgGetVarBits(GenTreePtr tree)
{
VARSET_TP VARSET_INIT_NOCOPY(varBits, VarSetOps::MakeEmpty(this));
- assert(tree->gtOper == GT_LCL_VAR ||
- tree->gtOper == GT_LCL_FLD ||
- tree->gtOper == GT_REG_VAR);
+ assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD || tree->gtOper == GT_REG_VAR);
unsigned int lclNum = tree->gtLclVarCommon.gtLclNum;
LclVarDsc* varDsc = lvaTable + lclNum;
@@ -9040,12 +9112,10 @@ VARSET_VALRET_TP Compiler::fgGetVarBits(GenTreePtr tree)
}
else if (varDsc->lvType == TYP_STRUCT && varDsc->lvPromoted)
{
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
noway_assert(lvaTable[i].lvIsStructField);
- if (lvaTable[i].lvTracked)
+ if (lvaTable[i].lvTracked)
{
unsigned varIndex = lvaTable[i].lvVarIndex;
noway_assert(varIndex < lvaTrackedCount);
@@ -9062,14 +9132,14 @@ VARSET_VALRET_TP Compiler::fgGetVarBits(GenTreePtr tree)
* imported because they are not reachable, or they have been optimized away).
*/
-void Compiler::fgRemoveEmptyBlocks()
+void Compiler::fgRemoveEmptyBlocks()
{
- BasicBlock* cur;
- BasicBlock* nxt;
+ BasicBlock* cur;
+ BasicBlock* nxt;
/* If we remove any blocks, we'll have to do additional work */
- unsigned removedBlks = 0;
+ unsigned removedBlks = 0;
for (cur = fgFirstBB; cur != nullptr; cur = nxt)
{
@@ -9079,7 +9149,7 @@ void Compiler::fgRemoveEmptyBlocks()
/* Should this block be removed? */
- if (!(cur->bbFlags & BBF_IMPORTED))
+ if (!(cur->bbFlags & BBF_IMPORTED))
{
noway_assert(cur->isEmpty());
@@ -9096,8 +9166,7 @@ void Compiler::fgRemoveEmptyBlocks()
#ifdef DEBUG
if (verbose)
{
- printf("BB%02u was not imported, marked as removed (%d)\n",
- cur->bbNum, removedBlks);
+ printf("BB%02u was not imported, marked as removed (%d)\n", cur->bbNum, removedBlks);
}
#endif // DEBUG
@@ -9115,8 +9184,10 @@ void Compiler::fgRemoveEmptyBlocks()
/* If no blocks were removed, we're done */
- if (removedBlks == 0)
+ if (removedBlks == 0)
+ {
return;
+ }
/* Update all references in the exception handler table.
* Mark the new blocks as non-removable.
@@ -9125,15 +9196,13 @@ void Compiler::fgRemoveEmptyBlocks()
* Check for this case and remove the entry from the EH table.
*/
- unsigned XTnum;
- EHblkDsc* HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
INDEBUG(unsigned delCnt = 0;)
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
-AGAIN:
+ AGAIN:
/* If the beginning of the try block was not imported, we
* need to remove the entry from the EH table. */
@@ -9145,8 +9214,7 @@ AGAIN:
{
printf("Beginning of try block (BB%02u) not imported "
"- remove index #%u from the EH table\n",
- HBtab->ebdTryBeg->bbNum,
- XTnum + delCnt);
+ HBtab->ebdTryBeg->bbNum, XTnum + delCnt);
}
delCnt++;
#endif // DEBUG
@@ -9164,7 +9232,7 @@ AGAIN:
break; // no more entries (we deleted the last one), so exit the loop
}
- /* At this point we know we have a valid try block */
+/* At this point we know we have a valid try block */
#ifdef DEBUG
assert(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED);
@@ -9219,17 +9287,15 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
{
assert(fgOrder == FGOrderLinear);
GenTreeStmt* stmt = tree->AsStmt();
-
+
// No embedded statements.
- if (stmt->gtStmtIsTopLevel() &&
- (stmt->gtNext == nullptr || stmt->gtNextStmt->gtStmtIsTopLevel()))
+ if (stmt->gtStmtIsTopLevel() && (stmt->gtNext == nullptr || stmt->gtNextStmt->gtStmtIsTopLevel()))
{
return;
}
// stmt is last embedded statement, assume we have a tree order: prevStmt->stmt->nextStmt.
// We are dropping "stmt". So fix the next link for "prevStmt" and prev link for "nextStmt".
- if (stmt->gtStmtIsEmbedded() &&
- (stmt->gtNext == nullptr || stmt->gtNextStmt->gtStmtIsTopLevel()))
+ if (stmt->gtStmtIsEmbedded() && (stmt->gtNext == nullptr || stmt->gtNextStmt->gtStmtIsTopLevel()))
{
if (stmt->gtStmtList->gtPrev)
{
@@ -9268,7 +9334,7 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
//
// In the end we should obtain:
// (top (emb2, emb3) ). Callers should fix bbTreeList. We only fix tree order.
- //
+ //
// So in tree order:
// BEFORE: top:t1 -> stmt:t1 -> emb2:t1 -> stmt:t2 -> emb3:t1 -> stmt:t3 -> top:t2
// AFTER : top:t1 -> emb2:t1 -> emb3:t1 -> top:t2
@@ -9309,7 +9375,7 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
// ... -> emb2List -> emb2Expr -> -> emb3List -> emb3Expr -> stmtNode -> ... -> stmtExpr
// clang-format on
- // Drop stmtNodes that occur between emb2Expr and emb3List.
+ // Drop stmtNodes that occur between emb2Expr and emb3List.
if (lastNestEmbedNode)
{
lastNestEmbedNode->gtNext = node;
@@ -9334,14 +9400,16 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
{
// We are just done visiting the last node of a first level embedded stmt.
- // Before:
- // stmtList -> emb2List -> emb2Expr -> stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ... -> stmtExpr
+ // Before:
+ // stmtList -> emb2List -> emb2Expr -> stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ...
+ // -> stmtExpr
// "stmt" is top level.
- //
+ //
// Currently, "node" is emb2Expr and "lastNestEmbedNode" is "don't care".
//
// After:
- // node = stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ... -> stmtExpr
+ // node = stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ... ->
+ // stmtExpr
// nullptr <- emb2List -> emb2Expr -> nullptr
//
// stmtList -> emb2List -> emb2Expr -> ...
@@ -9372,13 +9440,15 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
if (stmt->gtStmtIsEmbedded())
{
//
- // Before:
- // ... -> emb2List -> emb2Expr -> stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ... -> stmtExpr -> topNode
- //
+ // Before:
+ // ... -> emb2List -> emb2Expr -> stmtNode -> stmtNode -> emb3List -> emb3Expr -> stmtNode -> ... -> stmtExpr ->
+ // topNode
+ //
// Currently, "node" is topNode (i.e., stmtExpr->gtNext) and "lastNestEmbedNode" is emb3Expr.
//
// After:
- // ... -> emb2List -> emb2Expr -> -> emb3List -> emb3Expr -> -> topNode
+ // ... -> emb2List -> emb2Expr -> -> emb3List -> emb3Expr -> ->
+ // topNode
//
if (node)
{
@@ -9398,18 +9468,19 @@ void Compiler::fgRemoveLinearOrderDependencies(GenTreePtr tree)
*
*/
-void Compiler::fgRemoveStmt(BasicBlock* block,
- GenTreePtr node,
- // whether to decrement ref counts for tracked vars in statement
- bool updateRefCount)
+void Compiler::fgRemoveStmt(BasicBlock* block,
+ GenTreePtr node,
+ // whether to decrement ref counts for tracked vars in statement
+ bool updateRefCount)
{
noway_assert(node);
- GenTreeStmt* tree = block->firstStmt();
- GenTreeStmt* stmt = node->AsStmt();
+ GenTreeStmt* tree = block->firstStmt();
+ GenTreeStmt* stmt = node->AsStmt();
#ifdef DEBUG
- if (verbose && stmt->gtStmtExpr->gtOper != GT_NOP) // Don't print if it is a GT_NOP. Too much noise from the inliner.
+ if (verbose &&
+ stmt->gtStmtExpr->gtOper != GT_NOP) // Don't print if it is a GT_NOP. Too much noise from the inliner.
{
printf("\nRemoving statement ");
printTreeID(stmt);
@@ -9418,9 +9489,7 @@ void Compiler::fgRemoveStmt(BasicBlock* block,
}
#endif // DEBUG
- if (opts.compDbgCode &&
- stmt->gtPrev != stmt &&
- stmt->gtStmtILoffsx != BAD_IL_OFFSET)
+ if (opts.compDbgCode && stmt->gtPrev != stmt && stmt->gtStmtILoffsx != BAD_IL_OFFSET)
{
/* TODO: For debuggable code, should we remove significant
statement boundaries. Or should we leave a GT_NO_OP in its place? */
@@ -9430,11 +9499,11 @@ void Compiler::fgRemoveStmt(BasicBlock* block,
{
fgRemoveLinearOrderDependencies(stmt);
}
-
+
/* Is it the first statement in the list? */
GenTreeStmt* firstStmt = block->firstStmt();
- if (firstStmt == stmt)
+ if (firstStmt == stmt)
{
if (firstStmt->gtNext == nullptr)
{
@@ -9453,7 +9522,7 @@ void Compiler::fgRemoveStmt(BasicBlock* block,
/* Is it the last statement in the list? */
- if (stmt == block->lastStmt())
+ if (stmt == block->lastStmt())
{
stmt->gtPrev->gtNext = nullptr;
block->bbTreeList->gtPrev = stmt->gtPrev;
@@ -9479,10 +9548,7 @@ DONE:
{
if (fgStmtListThreaded)
{
- fgWalkTreePre(&stmt->gtStmtExpr,
- Compiler::lvaDecRefCntsCB,
- (void*) this,
- true);
+ fgWalkTreePre(&stmt->gtStmtExpr, Compiler::lvaDecRefCntsCB, (void*)this, true);
}
}
}
@@ -9490,7 +9556,7 @@ DONE:
#ifdef DEBUG
if (verbose)
{
- if (block->bbTreeList == 0)
+ if (block->bbTreeList == nullptr)
{
printf("\nBB%02u becomes empty", block->bbNum);
}
@@ -9503,27 +9569,26 @@ DONE:
// Returns true if the operator is involved in control-flow
// TODO-Cleanup: Move this into genTreeKinds in genTree.h
-inline
-bool OperIsControlFlow(genTreeOps oper)
+inline bool OperIsControlFlow(genTreeOps oper)
{
switch (oper)
{
- case GT_JTRUE:
- case GT_SWITCH:
- case GT_LABEL:
+ case GT_JTRUE:
+ case GT_SWITCH:
+ case GT_LABEL:
- case GT_CALL:
- case GT_JMP:
+ case GT_CALL:
+ case GT_JMP:
- case GT_RETURN:
- case GT_RETFILT:
+ case GT_RETURN:
+ case GT_RETFILT:
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
+ case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
- return true;
+ return true;
- default:
- return false;
+ default:
+ return false;
}
}
@@ -9532,10 +9597,12 @@ bool OperIsControlFlow(genTreeOps oper)
* Returns true if it did remove the statement.
*/
-bool Compiler::fgCheckRemoveStmt(BasicBlock* block, GenTreePtr node)
+bool Compiler::fgCheckRemoveStmt(BasicBlock* block, GenTreePtr node)
{
if (opts.compDbgCode)
+ {
return false;
+ }
GenTreeStmt* stmt = node->AsStmt();
@@ -9543,11 +9610,15 @@ bool Compiler::fgCheckRemoveStmt(BasicBlock* block, GenTreePtr no
genTreeOps oper = tree->OperGet();
if (OperIsControlFlow(oper) || oper == GT_NO_OP)
+ {
return false;
+ }
// TODO: Use a recursive version of gtNodeHasSideEffects()
if (tree->gtFlags & GTF_SIDE_EFFECT)
+ {
return false;
+ }
fgRemoveStmt(block, stmt);
return true;
@@ -9557,34 +9628,45 @@ bool Compiler::fgCheckRemoveStmt(BasicBlock* block, GenTreePtr no
*
*
*/
-bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext)
+bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext)
{
- if ((block == NULL) || (bNext == NULL))
+ if ((block == nullptr) || (bNext == nullptr))
+ {
return false;
+ }
noway_assert(block->bbNext == bNext);
if (block->bbJumpKind != BBJ_NONE)
+ {
return false;
+ }
// If the next block has multiple incoming edges, we can still compact if the first block is empty.
// However, not if it is the beginning of a handler.
- if (bNext->countOfInEdges() != 1 &&
+ if (bNext->countOfInEdges() != 1 &&
(!block->isEmpty() || (block->bbFlags & BBF_FUNCLET_BEG) || (block->bbCatchTyp != BBCT_NONE)))
+ {
return false;
+ }
if (bNext->bbFlags & BBF_DONT_REMOVE)
+ {
return false;
+ }
// Don't compact the first block if it was specially created as a scratch block.
if (fgBBisScratch(block))
+ {
return false;
+ }
#if defined(_TARGET_ARM_)
- // We can't compact a finally target block, as we need to generate special code for such blocks during code generation
+ // We can't compact a finally target block, as we need to generate special code for such blocks during code
+ // generation
if ((bNext->bbFlags & BBF_FINALLY_TARGET) != 0)
return false;
-#endif
+#endif
// We don't want to compact blocks that are in different Hot/Cold regions
//
@@ -9607,13 +9689,14 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock*
for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext)
{
if (pred->flBlock->bbJumpKind == BBJ_SWITCH)
+ {
return false;
+ }
}
return true;
}
-
/*****************************************************************************************************
*
* Function called to compact two given blocks in the flowgraph
@@ -9624,14 +9707,14 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock*
* It will keep the flowgraph data in synch - bbNum, bbRefs, bbPreds
*/
-void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
+void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
{
- noway_assert(block != NULL);
+ noway_assert(block != nullptr);
noway_assert((block->bbFlags & BBF_REMOVED) == 0);
noway_assert(block->bbJumpKind == BBJ_NONE);
noway_assert(bNext == block->bbNext);
- noway_assert(bNext != NULL);
+ noway_assert(bNext != nullptr);
noway_assert((bNext->bbFlags & BBF_REMOVED) == 0);
noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty());
noway_assert(bNext->bbPreds);
@@ -9643,14 +9726,14 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
// Make sure the second block is not the start of a TRY block or an exception handler
noway_assert(bNext->bbCatchTyp == BBCT_NONE);
- noway_assert((bNext->bbFlags & BBF_TRY_BEG) == 0);
+ noway_assert((bNext->bbFlags & BBF_TRY_BEG) == 0);
noway_assert((bNext->bbFlags & BBF_DONT_REMOVE) == 0);
/* both or none must have an exception handler */
noway_assert(block->hasTryIndex() == bNext->hasTryIndex());
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nCompacting blocks BB%02u and BB%02u:\n", block->bbNum, bNext->bbNum);
}
@@ -9665,7 +9748,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext)
{
fgReplaceJumpTarget(pred->flBlock, block, bNext);
-
+
if (pred->flBlock != block)
{
fgAddRefPred(block, pred->flBlock);
@@ -9691,10 +9774,10 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
GenTreePtr bNextFirst = bNext->firstStmt();
// Does the second have any phis?
- if (bNextFirst != NULL && bNextFirst != bNextNonPhi1)
+ if (bNextFirst != nullptr && bNextFirst != bNextNonPhi1)
{
GenTreePtr bNextLast = bNextFirst->gtPrev;
- assert(bNextLast->gtNext == NULL);
+ assert(bNextLast->gtNext == nullptr);
// Does "blk" have phis?
if (blkNonPhi1 != blkFirst)
@@ -9703,7 +9786,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
// Insert after the last phi of "block."
// First, bNextPhis after last phi of block.
GenTreePtr blkLastPhi;
- if (blkNonPhi1 != NULL)
+ if (blkNonPhi1 != nullptr)
{
blkLastPhi = blkNonPhi1->gtPrev;
}
@@ -9716,8 +9799,8 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
bNextFirst->gtPrev = blkLastPhi;
// Now, rest of "block" after last phi of "bNext".
- GenTreePtr bNextLastPhi = NULL;
- if (bNextNonPhi1 != NULL)
+ GenTreePtr bNextLastPhi = nullptr;
+ if (bNextNonPhi1 != nullptr)
{
bNextLastPhi = bNextNonPhi1->gtPrev;
}
@@ -9727,7 +9810,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
}
bNextLastPhi->gtNext = blkNonPhi1;
- if (blkNonPhi1 != NULL)
+ if (blkNonPhi1 != nullptr)
{
blkNonPhi1->gtPrev = bNextLastPhi;
}
@@ -9739,21 +9822,21 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
// Now update the bbTreeList of "bNext".
bNext->bbTreeList = bNextNonPhi1;
- if (bNextNonPhi1 != NULL)
+ if (bNextNonPhi1 != nullptr)
{
bNextNonPhi1->gtPrev = bNextLast;
}
}
else
{
- if (blkFirst != NULL) // If "block" has no statements, fusion will work fine...
+ if (blkFirst != nullptr) // If "block" has no statements, fusion will work fine...
{
// First, bNextPhis at start of block.
GenTreePtr blkLast = blkFirst->gtPrev;
- block->bbTreeList = bNextFirst;
+ block->bbTreeList = bNextFirst;
// Now, rest of "block" (if it exists) after last phi of "bNext".
- GenTreePtr bNextLastPhi = NULL;
- if (bNextNonPhi1 != NULL)
+ GenTreePtr bNextLastPhi = nullptr;
+ if (bNextNonPhi1 != nullptr)
{
// There is a first non phi, so the last phi is before it.
bNextLastPhi = bNextNonPhi1->gtPrev;
@@ -9763,12 +9846,12 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
// All the statements are phi defns, so the last one is the prev of the first.
bNextLastPhi = bNextFirst->gtPrev;
}
- bNextFirst->gtPrev = blkLast;
+ bNextFirst->gtPrev = blkLast;
bNextLastPhi->gtNext = blkFirst;
- blkFirst->gtPrev = bNextLastPhi;
+ blkFirst->gtPrev = bNextLastPhi;
// Now update the bbTreeList of "bNext"
bNext->bbTreeList = bNextNonPhi1;
- if (bNextNonPhi1 != NULL)
+ if (bNextNonPhi1 != nullptr)
{
bNextNonPhi1->gtPrev = bNextLast;
}
@@ -9794,7 +9877,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
/* append list2 to list 1 */
stmtLast1->gtNext = stmtList2;
- stmtList2->gtPrev = stmtLast1;
+ stmtList2->gtPrev = stmtLast1;
stmtList1->gtPrev = stmtLast2;
}
}
@@ -9811,9 +9894,8 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
// or if both block and bNext have non-zero weights
// then we select the highest weight block.
- if ((block->bbFlags & BBF_PROF_WEIGHT) ||
- (bNext->bbFlags & BBF_PROF_WEIGHT) ||
- (block->bbWeight && bNext->bbWeight) )
+ if ((block->bbFlags & BBF_PROF_WEIGHT) || (bNext->bbFlags & BBF_PROF_WEIGHT) ||
+ (block->bbWeight && bNext->bbWeight))
{
// We are keeping block so update its fields
// when bNext has a greater weight
@@ -9825,7 +9907,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
block->bbFlags |= (bNext->bbFlags & BBF_PROF_WEIGHT); // Set the profile weight flag (if necessary)
if (block->bbWeight != 0)
{
- block->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag
+ block->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag
}
}
}
@@ -9834,7 +9916,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
{
noway_assert((block->bbWeight == BB_ZERO_WEIGHT) || (bNext->bbWeight == BB_ZERO_WEIGHT));
block->bbWeight = BB_ZERO_WEIGHT;
- block->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag
+ block->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag
}
/* set the right links */
@@ -9874,12 +9956,11 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
}
}
- if (((block->bbFlags & BBF_INTERNAL) != 0) &&
- ((bNext->bbFlags & BBF_INTERNAL) == 0) )
+ if (((block->bbFlags & BBF_INTERNAL) != 0) && ((bNext->bbFlags & BBF_INTERNAL) == 0))
{
// If 'block' is an internal block and 'bNext' isn't, then adjust the flags set on 'block'.
- block->bbFlags &= ~BBF_INTERNAL; // Clear the BBF_INTERNAL flag
- block->bbFlags |= BBF_IMPORTED; // Set the BBF_IMPORTED flag
+ block->bbFlags &= ~BBF_INTERNAL; // Clear the BBF_INTERNAL flag
+ block->bbFlags |= BBF_IMPORTED; // Set the BBF_IMPORTED flag
}
/* Update the flags for block with those found in bNext */
@@ -9918,40 +9999,40 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
switch (bNext->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- // Propagate RETLESS property
- block->bbFlags |= (bNext->bbFlags & BBF_RETLESS_CALL);
+ case BBJ_CALLFINALLY:
+ // Propagate RETLESS property
+ block->bbFlags |= (bNext->bbFlags & BBF_RETLESS_CALL);
- __fallthrough;
+ __fallthrough;
+
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ block->bbJumpDest = bNext->bbJumpDest;
- case BBJ_COND:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- block->bbJumpDest = bNext->bbJumpDest;
+ /* Update the predecessor list for 'bNext->bbJumpDest' */
+ fgReplacePred(bNext->bbJumpDest, bNext, block);
- /* Update the predecessor list for 'bNext->bbJumpDest' */
- fgReplacePred(bNext->bbJumpDest, bNext, block);
+ /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */
+ if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext)
+ {
+ fgReplacePred(bNext->bbNext, bNext, block);
+ }
+ break;
- /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */
- if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext)
- {
+ case BBJ_NONE:
+ /* Update the predecessor list for 'bNext->bbNext' */
fgReplacePred(bNext->bbNext, bNext, block);
- }
- break;
-
- case BBJ_NONE:
- /* Update the predecessor list for 'bNext->bbNext' */
- fgReplacePred(bNext->bbNext, bNext, block);
- break;
+ break;
- case BBJ_EHFILTERRET:
- fgReplacePred(bNext->bbJumpDest, bNext, block);
- break;
+ case BBJ_EHFILTERRET:
+ fgReplacePred(bNext->bbJumpDest, bNext, block);
+ break;
- case BBJ_EHFINALLYRET:
+ case BBJ_EHFINALLYRET:
{
- unsigned hndIndex = block->getHndIndex();
- EHblkDsc* ehDsc = ehGetDsc(hndIndex);
+ unsigned hndIndex = block->getHndIndex();
+ EHblkDsc* ehDsc = ehGetDsc(hndIndex);
if (ehDsc->HasFinallyHandler()) // No need to do this for fault handlers
{
@@ -9963,8 +10044,10 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
{
- if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ {
continue;
+ }
noway_assert(bcall->isBBCallAlwaysPair());
fgReplacePred(bcall->bbNext, bNext, block);
@@ -9973,21 +10056,21 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
}
break;
- case BBJ_THROW:
- case BBJ_RETURN:
- /* no jumps or fall through blocks to set here */
- break;
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ /* no jumps or fall through blocks to set here */
+ break;
- case BBJ_SWITCH:
- block->bbJumpSwt = bNext->bbJumpSwt;
- // We are moving the switch jump from bNext to block. Examine the jump targets
- // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block'
- fgChangeSwitchBlock(bNext, block);
- break;
+ case BBJ_SWITCH:
+ block->bbJumpSwt = bNext->bbJumpSwt;
+ // We are moving the switch jump from bNext to block. Examine the jump targets
+ // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block'
+ fgChangeSwitchBlock(bNext, block);
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
fgUpdateLoopsAfterCompacting(block, bNext);
@@ -10009,7 +10092,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNe
#endif // DEBUG
}
-void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock * block, BasicBlock* bNext)
+void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext)
{
/* Check if the removed block is not part the loop table */
noway_assert(bNext);
@@ -10020,25 +10103,27 @@ void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock * block, BasicBlock* bNex
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
/* Check the loop head (i.e. the block preceding the loop) */
- if (optLoopTable[loopNum].lpHead == bNext)
+ if (optLoopTable[loopNum].lpHead == bNext)
{
optLoopTable[loopNum].lpHead = block;
}
/* Check the loop bottom */
- if (optLoopTable[loopNum].lpBottom == bNext)
+ if (optLoopTable[loopNum].lpBottom == bNext)
{
optLoopTable[loopNum].lpBottom = block;
}
/* Check the loop exit */
- if (optLoopTable[loopNum].lpExit == bNext)
+ if (optLoopTable[loopNum].lpExit == bNext)
{
noway_assert(optLoopTable[loopNum].lpExitCnt == 1);
optLoopTable[loopNum].lpExit = block;
@@ -10046,7 +10131,7 @@ void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock * block, BasicBlock* bNex
/* Check the loop entry */
- if (optLoopTable[loopNum].lpEntry == bNext)
+ if (optLoopTable[loopNum].lpEntry == bNext)
{
optLoopTable[loopNum].lpEntry = block;
}
@@ -10060,7 +10145,7 @@ void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock * block, BasicBlock* bNex
* This function cannot remove the first block.
*/
-void Compiler::fgUnreachableBlock(BasicBlock* block)
+void Compiler::fgUnreachableBlock(BasicBlock* block)
{
// genReturnBB should never be removed, as we might have special hookups there.
// Therefore, we should never come here to remove the statements in the genReturnBB block.
@@ -10070,12 +10155,14 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
noway_assert(block != genReturnBB);
if (block->bbFlags & BBF_REMOVED)
+ {
return;
+ }
- /* Removing an unreachable block */
+/* Removing an unreachable block */
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nRemoving unreachable BB%02u\n", block->bbNum);
}
@@ -10085,7 +10172,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
assert(!block->bbPrev->isBBCallAlwaysPair()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair
-#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
+#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
/* First walk the statement trees in this basic block and delete each stmt */
@@ -10097,7 +10184,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
GenTreePtr firstNonPhi = block->FirstNonPhiDef();
if (block->bbTreeList != firstNonPhi)
{
- if (firstNonPhi != NULL)
+ if (firstNonPhi != nullptr)
{
firstNonPhi->gtPrev = block->lastStmt();
}
@@ -10108,7 +10195,7 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
{
fgRemoveStmt(block, stmt);
}
- noway_assert(block->bbTreeList == 0);
+ noway_assert(block->bbTreeList == nullptr);
/* Next update the loop table and bbWeights */
optUpdateLoopsBeforeRemoveBlock(block);
@@ -10120,18 +10207,17 @@ void Compiler::fgUnreachableBlock(BasicBlock* block)
fgRemoveBlockAsPred(block);
}
-
/*****************************************************************************************************
*
* Function called to remove or morph a GT_JTRUE statement when we jump to the same
* block when both the condition is true or false.
*/
-void Compiler::fgRemoveJTrue(BasicBlock *block)
+void Compiler::fgRemoveJTrue(BasicBlock* block)
{
noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext);
flowList* flow = fgGetPredForBlock(block->bbNext, block);
- noway_assert(flow->flDupCount==2);
+ noway_assert(flow->flDupCount == 2);
// Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount.
block->bbJumpKind = BBJ_NONE;
@@ -10140,10 +10226,13 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
--flow->flDupCount;
#ifdef DEBUG
- block->bbJumpDest = NULL;
+ block->bbJumpDest = nullptr;
if (verbose)
- printf("Block BB%02u becoming a BBJ_NONE to BB%02u (jump target is the same whether the condition is true or false)\n",
- block->bbNum, block->bbNext->bbNum);
+ {
+ printf("Block BB%02u becoming a BBJ_NONE to BB%02u (jump target is the same whether the condition is true or "
+ "false)\n",
+ block->bbNum, block->bbNext->bbNum);
+ }
#endif
/* Remove the block jump condition */
@@ -10154,16 +10243,16 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
noway_assert(tree->gtOper == GT_JTRUE);
- GenTree* sideEffList = NULL;
+ GenTree* sideEffList = nullptr;
if (tree->gtFlags & GTF_SIDE_EFFECT)
{
if (compRationalIRForm)
{
// if we are in rational form don't try to extract the side effects
- // because gtExtractSideEffList will create new comma nodes
- // (which we would have to rationalize) and fgMorphBlockStmt can't
- // handle embedded statements.
+ // because gtExtractSideEffList will create new comma nodes
+ // (which we would have to rationalize) and fgMorphBlockStmt can't
+ // handle embedded statements.
// Instead just transform the JTRUE into a NEG which has the effect of
// evaluating the side-effecting tree and perform a benign operation on it.
@@ -10181,7 +10270,8 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
if (verbose)
{
printf("Extracted side effects list from condition...\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif
}
@@ -10189,10 +10279,12 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
}
// Delete the cond test or replace it with the side effect tree
- if (sideEffList == NULL)
+ if (sideEffList == nullptr)
{
if (!compRationalIRForm || (tree->gtFlags & GTF_SIDE_EFFECT) == 0)
+ {
fgRemoveStmt(block, test);
+ }
}
else
{
@@ -10202,7 +10294,6 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
}
}
-
/*****************************************************************************************************
*
* Function to return the last basic block in the main part of the function. With funclets, it is
@@ -10210,18 +10301,18 @@ void Compiler::fgRemoveJTrue(BasicBlock *block)
* An inclusive end of the main method.
*/
-BasicBlock* Compiler::fgLastBBInMainFunction()
+BasicBlock* Compiler::fgLastBBInMainFunction()
{
#if FEATURE_EH_FUNCLETS
- if (fgFirstFuncletBB != NULL)
+ if (fgFirstFuncletBB != nullptr)
{
return fgFirstFuncletBB->bbPrev;
}
#endif // FEATURE_EH_FUNCLETS
- assert(fgLastBB->bbNext == NULL);
+ assert(fgLastBB->bbNext == nullptr);
return fgLastBB;
}
@@ -10234,23 +10325,22 @@ BasicBlock* Compiler::fgLastBBInMainFunction()
* An exclusive end of the main method.
*/
-BasicBlock* Compiler::fgEndBBAfterMainFunction()
+BasicBlock* Compiler::fgEndBBAfterMainFunction()
{
#if FEATURE_EH_FUNCLETS
- if (fgFirstFuncletBB != NULL)
+ if (fgFirstFuncletBB != nullptr)
{
return fgFirstFuncletBB;
}
#endif // FEATURE_EH_FUNCLETS
- assert(fgLastBB->bbNext == NULL);
+ assert(fgLastBB->bbNext == nullptr);
- return NULL;
+ return nullptr;
}
-
// Removes the block from the bbPrev/bbNext chain
// Updates fgFirstBB and fgLastBB if necessary
// Does not update fgFirstFuncletBB or fgFirstColdBlock (fgUnlinkRange does)
@@ -10275,7 +10365,7 @@ void Compiler::fgUnlinkBlock(BasicBlock* block)
assert(block != fgLastBB);
assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB));
- fgFirstBB = block->bbNext;
+ fgFirstBB = block->bbNext;
fgFirstBB->bbPrev = nullptr;
if (fgFirstBBScratch != nullptr)
@@ -10292,7 +10382,6 @@ void Compiler::fgUnlinkBlock(BasicBlock* block)
}
}
-
/*****************************************************************************************************
*
* Function called to unlink basic block range [bBeg .. bEnd] from the basic block list.
@@ -10300,8 +10389,7 @@ void Compiler::fgUnlinkBlock(BasicBlock* block)
* 'bBeg' can't be the first block.
*/
-void Compiler::fgUnlinkRange(BasicBlock* bBeg,
- BasicBlock* bEnd)
+void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd)
{
assert(bBeg != nullptr);
assert(bEnd != nullptr);
@@ -10315,7 +10403,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg,
if (fgLastBB == bEnd)
{
fgLastBB = bPrev;
- noway_assert(fgLastBB->bbNext == NULL);
+ noway_assert(fgLastBB->bbNext == nullptr);
}
// If bEnd was the first Cold basic block update fgFirstColdBlock
@@ -10338,20 +10426,18 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg,
#endif // FEATURE_EH_FUNCLETS
}
-
/*****************************************************************************************************
*
* Function called to remove a basic block
*/
-void Compiler::fgRemoveBlock(BasicBlock* block,
- bool unreachable)
+void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
{
BasicBlock* bPrev = block->bbPrev;
/* The block has to be either unreachable or empty */
- PREFIX_ASSUME(block != NULL);
+ PREFIX_ASSUME(block != nullptr);
JITDUMP("fgRemoveBlock BB%02u\n", block->bbNum);
@@ -10373,12 +10459,12 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
if (unreachable)
{
- PREFIX_ASSUME(bPrev != NULL);
+ PREFIX_ASSUME(bPrev != nullptr);
fgUnreachableBlock(block);
/* If this is the last basic block update fgLastBB */
- if (block == fgLastBB)
+ if (block == fgLastBB)
{
fgLastBB = bPrev;
}
@@ -10400,10 +10486,8 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
NO_WAY("No retless call finally blocks; need unwind target instead");
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
}
- else if (bPrev->bbJumpKind == BBJ_ALWAYS &&
- bPrev->bbJumpDest == block->bbNext &&
- !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) &&
- (block != fgFirstColdBlock) &&
+ else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext &&
+ !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) &&
(block->bbNext != fgFirstColdBlock))
{
// previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE.
@@ -10435,8 +10519,8 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS);
leaveBlk->bbFlags &= ~BBF_DONT_REMOVE;
- leaveBlk->bbRefs = 0;
- leaveBlk->bbPreds = nullptr;
+ leaveBlk->bbRefs = 0;
+ leaveBlk->bbPreds = nullptr;
fgRemoveBlock(leaveBlk, true);
@@ -10460,7 +10544,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
noway_assert(block != fgLastBB);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Removing empty BB%02u\n", block->bbNum);
}
@@ -10471,20 +10555,20 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
switch (block->bbJumpKind)
{
- case BBJ_NONE:
- break;
+ case BBJ_NONE:
+ break;
- case BBJ_ALWAYS:
- /* Do not remove a block that jumps to itself - used for while (true){} */
- noway_assert(block->bbJumpDest != block);
+ case BBJ_ALWAYS:
+ /* Do not remove a block that jumps to itself - used for while (true){} */
+ noway_assert(block->bbJumpDest != block);
- /* Empty GOTO can be removed iff bPrev is BBJ_NONE */
- noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE);
- break;
+ /* Empty GOTO can be removed iff bPrev is BBJ_NONE */
+ noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE);
+ break;
- default:
- noway_assert(!"Empty block of this type cannot be removed!");
- break;
+ default:
+ noway_assert(!"Empty block of this type cannot be removed!");
+ break;
}
#endif // DEBUG
@@ -10547,7 +10631,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
/* Remove the block */
- if (bPrev == NULL)
+ if (bPrev == nullptr)
{
/* special case if this is the first BB */
@@ -10589,9 +10673,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
/* Are we changing a loop backedge into a forward jump? */
- if ( block->isLoopHead() &&
- (predBlock->bbNum >= block->bbNum) &&
- (predBlock->bbNum <= succBlock->bbNum) )
+ if (block->isLoopHead() && (predBlock->bbNum >= block->bbNum) && (predBlock->bbNum <= succBlock->bbNum))
{
/* First update the loop table and bbWeights */
optUpdateLoopsBeforeRemoveBlock(predBlock);
@@ -10603,130 +10685,130 @@ void Compiler::fgRemoveBlock(BasicBlock* block,
{
// Even if the pred is not a switch, we could have a conditional branch
// to the fallthrough, so duplicate there could be preds
- for (unsigned i=0; i<pred->flDupCount; i++)
+ for (unsigned i = 0; i < pred->flDupCount; i++)
+ {
fgAddRefPred(succBlock, predBlock);
+ }
}
/* change all jumps to the removed block */
switch (predBlock->bbJumpKind)
{
- default:
- noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()");
- break;
-
- case BBJ_NONE:
- noway_assert(predBlock == bPrev);
- PREFIX_ASSUME(bPrev != NULL);
+ default:
+ noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()");
+ break;
- /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */
- if (block->bbJumpKind == BBJ_ALWAYS)
- {
- /* bPrev now becomes a BBJ_ALWAYS */
- bPrev->bbJumpKind = BBJ_ALWAYS;
- bPrev->bbJumpDest = succBlock;
- }
- break;
+ case BBJ_NONE:
+ noway_assert(predBlock == bPrev);
+ PREFIX_ASSUME(bPrev != nullptr);
- case BBJ_COND:
- /* The links for the direct predecessor case have already been updated above */
- if (predBlock->bbJumpDest != block)
- {
- succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
+ /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */
+ if (block->bbJumpKind == BBJ_ALWAYS)
+ {
+ /* bPrev now becomes a BBJ_ALWAYS */
+ bPrev->bbJumpKind = BBJ_ALWAYS;
+ bPrev->bbJumpDest = succBlock;
+ }
break;
- }
- /* Check if both side of the BBJ_COND now jump to the same block */
- if (predBlock->bbNext == succBlock)
- {
- // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest.
+ case BBJ_COND:
+ /* The links for the direct predecessor case have already been updated above */
+ if (predBlock->bbJumpDest != block)
+ {
+ succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
+ break;
+ }
+
+ /* Check if both side of the BBJ_COND now jump to the same block */
+ if (predBlock->bbNext == succBlock)
+ {
+ // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest.
+ noway_assert(predBlock->bbJumpDest == block);
+ predBlock->bbJumpDest = succBlock;
+ fgRemoveJTrue(predBlock);
+ break;
+ }
+
+ /* Fall through for the jump case */
+ __fallthrough;
+
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
noway_assert(predBlock->bbJumpDest == block);
predBlock->bbJumpDest = succBlock;
- fgRemoveJTrue(predBlock);
+ succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
break;
- }
-
- /* Fall through for the jump case */
- __fallthrough;
-
- case BBJ_CALLFINALLY:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- noway_assert(predBlock->bbJumpDest == block);
- predBlock->bbJumpDest = succBlock;
- succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
- break;
- case BBJ_SWITCH:
- // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock'
- //
- // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block'
- // remove the old predecessor at 'block' from 'predBlock' and
- // add the new predecessor at 'succBlock' from 'predBlock'
- //
- fgReplaceSwitchJumpTarget(predBlock, succBlock, block);
- break;
+ case BBJ_SWITCH:
+ // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock'
+ //
+ // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block'
+ // remove the old predecessor at 'block' from 'predBlock' and
+ // add the new predecessor at 'succBlock' from 'predBlock'
+ //
+ fgReplaceSwitchJumpTarget(predBlock, succBlock, block);
+ break;
}
}
}
- if (bPrev != NULL)
+ if (bPrev != nullptr)
{
switch (bPrev->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS
- noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL);
- break;
-
- case BBJ_ALWAYS:
- // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not
- // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph
- // because we can be called by ComputeDominators and it expects it to remove this jump to
- // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS
- // pairing.
+ case BBJ_CALLFINALLY:
+ // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS
+ noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL);
+ break;
- if ((bPrev->bbJumpDest == bPrev->bbNext) &&
- !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold
- {
- if ((bPrev == fgFirstBB) || !bPrev->bbPrev->isBBCallAlwaysPair())
+ case BBJ_ALWAYS:
+ // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not
+ // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph
+ // because we can be called by ComputeDominators and it expects it to remove this jump to
+ // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS
+ // pairing.
+
+ if ((bPrev->bbJumpDest == bPrev->bbNext) &&
+ !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold
{
- // It's safe to change the jump type
- bPrev->bbJumpKind = BBJ_NONE;
- bPrev->bbFlags &= ~BBF_NEEDS_GCPOLL;
+ if ((bPrev == fgFirstBB) || !bPrev->bbPrev->isBBCallAlwaysPair())
+ {
+ // It's safe to change the jump type
+ bPrev->bbJumpKind = BBJ_NONE;
+ bPrev->bbFlags &= ~BBF_NEEDS_GCPOLL;
+ }
}
- }
- break;
+ break;
- case BBJ_COND:
- /* Check for branch to next block */
- if (bPrev->bbJumpDest == bPrev->bbNext)
- {
- fgRemoveJTrue(bPrev);
- }
- break;
+ case BBJ_COND:
+ /* Check for branch to next block */
+ if (bPrev->bbJumpDest == bPrev->bbNext)
+ {
+ fgRemoveJTrue(bPrev);
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
ehUpdateForDeletedBlock(block);
}
}
-
/*****************************************************************************
*
* Function called to connect to block that previously had a fall through
*/
-BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc,
- BasicBlock* bDst)
+BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
{
- BasicBlock* jmpBlk = NULL;
+ BasicBlock* jmpBlk = nullptr;
/* If bSrc is non-NULL */
- if (bSrc != NULL)
+ if (bSrc != nullptr)
{
/* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */
@@ -10735,100 +10817,100 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc,
switch (bSrc->bbJumpKind)
{
- case BBJ_NONE:
- bSrc->bbJumpKind = BBJ_ALWAYS;
- bSrc->bbJumpDest = bDst;
- bSrc->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
+ case BBJ_NONE:
+ bSrc->bbJumpKind = BBJ_ALWAYS;
+ bSrc->bbJumpDest = bDst;
+ bSrc->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
#ifdef DEBUG
- if (verbose)
- {
- printf("Block BB%02u ended with a BBJ_NONE, Changed to an unconditional jump to BB%02u\n",
- bSrc->bbNum, bSrc->bbJumpDest->bbNum);
- }
+ if (verbose)
+ {
+ printf("Block BB%02u ended with a BBJ_NONE, Changed to an unconditional jump to BB%02u\n",
+ bSrc->bbNum, bSrc->bbJumpDest->bbNum);
+ }
#endif
- break;
-
- case BBJ_CALLFINALLY:
- case BBJ_COND:
-
- // Add a new block after bSrc which jumps to 'bDst'
- jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true);
-
- if (fgComputePredsDone)
- {
- fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc));
- }
+ break;
- // When adding a new jmpBlk we will set the bbWeight and bbFlags
- //
- if (fgHaveValidEdgeWeights)
- {
- noway_assert(fgComputePredsDone);
+ case BBJ_CALLFINALLY:
+ case BBJ_COND:
- flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc);
+ // Add a new block after bSrc which jumps to 'bDst'
+ jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true);
- jmpBlk->bbWeight = (newEdge->flEdgeWeightMin + newEdge->flEdgeWeightMax) / 2;
- if (bSrc->bbWeight == 0)
+ if (fgComputePredsDone)
{
- jmpBlk->bbWeight = 0;
+ fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc));
}
- if (jmpBlk->bbWeight == 0)
+ // When adding a new jmpBlk we will set the bbWeight and bbFlags
+ //
+ if (fgHaveValidEdgeWeights)
{
- jmpBlk->bbFlags |= BBF_RUN_RARELY;
- }
+ noway_assert(fgComputePredsDone);
- BasicBlock::weight_t weightDiff = (newEdge->flEdgeWeightMax - newEdge->flEdgeWeightMin);
- BasicBlock::weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst);
+ flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc);
- //
- // If the [min/max] values for our edge weight is within the slop factor
- // then we will set the BBF_PROF_WEIGHT flag for the block
- //
- if (weightDiff <= slop)
+ jmpBlk->bbWeight = (newEdge->flEdgeWeightMin + newEdge->flEdgeWeightMax) / 2;
+ if (bSrc->bbWeight == 0)
+ {
+ jmpBlk->bbWeight = 0;
+ }
+
+ if (jmpBlk->bbWeight == 0)
+ {
+ jmpBlk->bbFlags |= BBF_RUN_RARELY;
+ }
+
+ BasicBlock::weight_t weightDiff = (newEdge->flEdgeWeightMax - newEdge->flEdgeWeightMin);
+ BasicBlock::weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst);
+
+ //
+ // If the [min/max] values for our edge weight is within the slop factor
+ // then we will set the BBF_PROF_WEIGHT flag for the block
+ //
+ if (weightDiff <= slop)
+ {
+ jmpBlk->bbFlags |= BBF_PROF_WEIGHT;
+ }
+ }
+ else
{
- jmpBlk->bbFlags |= BBF_PROF_WEIGHT;
+ // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight
+ if (bSrc->bbWeight < bDst->bbWeight)
+ {
+ jmpBlk->bbWeight = bSrc->bbWeight;
+ jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY);
+ }
+ else
+ {
+ jmpBlk->bbWeight = bDst->bbWeight;
+ jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY);
+ }
}
- }
- else
- {
- // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight
- if (bSrc->bbWeight < bDst->bbWeight)
+
+ jmpBlk->bbJumpDest = bDst;
+ jmpBlk->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
+
+ if (fgComputePredsDone)
{
- jmpBlk->bbWeight = bSrc->bbWeight;
- jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY);
+ fgReplacePred(bDst, bSrc, jmpBlk);
}
else
{
- jmpBlk->bbWeight = bDst->bbWeight;
- jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY);
+ jmpBlk->bbFlags |= BBF_IMPORTED;
}
- }
-
- jmpBlk->bbJumpDest = bDst;
- jmpBlk->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
-
- if (fgComputePredsDone)
- {
- fgReplacePred(bDst, bSrc, jmpBlk);
- }
- else
- {
- jmpBlk->bbFlags |= BBF_IMPORTED;
- }
#ifdef DEBUG
- if (verbose)
- {
- printf("Added an unconditional jump to BB%02u after block BB%02u\n",
- jmpBlk->bbJumpDest->bbNum, bSrc->bbNum);
- }
+ if (verbose)
+ {
+ printf("Added an unconditional jump to BB%02u after block BB%02u\n", jmpBlk->bbJumpDest->bbNum,
+ bSrc->bbNum);
+ }
#endif // DEBUG
- break;
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
else
@@ -10836,14 +10918,13 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc,
// If bSrc is an unconditional branch to the next block
// then change it to a BBJ_NONE block
//
- if ( (bSrc->bbJumpKind == BBJ_ALWAYS) &&
- !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) &&
- (bSrc->bbJumpDest == bSrc->bbNext))
+ if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) &&
+ (bSrc->bbJumpDest == bSrc->bbNext))
{
bSrc->bbJumpKind = BBJ_NONE;
bSrc->bbFlags &= ~BBF_NEEDS_GCPOLL;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Changed an unconditional jump from BB%02u to the next block BB%02u into a BBJ_NONE block\n",
bSrc->bbNum, bSrc->bbNext->bbNum);
@@ -10865,7 +10946,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc,
maximum assigned block number. This affects the block set epoch).
*/
-bool Compiler::fgRenumberBlocks()
+bool Compiler::fgRenumberBlocks()
{
// If we renumber the blocks the dominator information will be out-of-date
if (fgDomsComputed)
@@ -10882,16 +10963,14 @@ bool Compiler::fgRenumberBlocks()
}
#endif // DEBUG
- bool renumbered = false;
- bool newMaxBBNum = false;
- BasicBlock* block;
+ bool renumbered = false;
+ bool newMaxBBNum = false;
+ BasicBlock* block;
unsigned numStart = 1 + (compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : 0);
unsigned num;
- for (block = fgFirstBB , num = numStart;
- block != NULL;
- block = block->bbNext, num++)
+ for (block = fgFirstBB, num = numStart; block != nullptr; block = block->bbNext, num++)
{
noway_assert((block->bbFlags & BBF_REMOVED) == 0);
@@ -10907,7 +10986,7 @@ bool Compiler::fgRenumberBlocks()
block->bbNum = num;
}
- if (block->bbNext == NULL)
+ if (block->bbNext == nullptr)
{
fgLastBB = block;
fgBBcount = num - numStart + 1;
@@ -10916,14 +10995,14 @@ bool Compiler::fgRenumberBlocks()
if (impInlineInfo->InlinerCompiler->fgBBNumMax != num)
{
impInlineInfo->InlinerCompiler->fgBBNumMax = num;
- newMaxBBNum = true;
+ newMaxBBNum = true;
}
}
else
{
if (fgBBNumMax != num)
{
- fgBBNumMax = num;
+ fgBBNumMax = num;
newMaxBBNum = true;
}
}
@@ -10980,22 +11059,23 @@ bool Compiler::fgRenumberBlocks()
* Optionally bSrc can be supplied to indicate that
* bJump must be forward with respect to bSrc
*/
-bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */)
+bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */)
{
bool result = false;
- if ((bJump->bbJumpKind == BBJ_COND) ||
- (bJump->bbJumpKind == BBJ_ALWAYS) )
+ if ((bJump->bbJumpKind == BBJ_COND) || (bJump->bbJumpKind == BBJ_ALWAYS))
{
BasicBlock* bDest = bJump->bbJumpDest;
- BasicBlock* bTemp = (bSrc == NULL) ? bJump : bSrc;
+ BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc;
while (true)
{
bTemp = bTemp->bbNext;
- if (bTemp == NULL)
+ if (bTemp == nullptr)
+ {
break;
+ }
if (bTemp == bDest)
{
@@ -11013,15 +11093,17 @@ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* b
* Function called to expand the set of rarely run blocks
*/
-bool Compiler::fgExpandRarelyRunBlocks()
+bool Compiler::fgExpandRarelyRunBlocks()
{
bool result = false;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgExpandRarelyRunBlocks()\n");
+ }
- const char* reason = NULL;
+ const char* reason = nullptr;
#endif
// We expand the number of rarely run blocks by observing
@@ -11030,185 +11112,182 @@ bool Compiler::fgExpandRarelyRunBlocks()
// jump in which both branches go to rarely run blocks then
// the block must itself be rarely run
- BasicBlock* block;
- BasicBlock* bPrev;
+ BasicBlock* block;
+ BasicBlock* bPrev;
- for (bPrev = fgFirstBB, block = bPrev->bbNext;
- block != NULL;
- bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
{
if (bPrev->isRunRarely())
+ {
continue;
+ }
/* bPrev is known to be a normal block here */
switch (bPrev->bbJumpKind)
{
- case BBJ_ALWAYS:
-
- /* Is the jump target rarely run? */
- if (bPrev->bbJumpDest->isRunRarely())
- {
- INDEBUG(reason = "Unconditional jump to a rarely run block";)
- goto NEW_RARELY_RUN;
- }
- break;
-
- case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
- // Check for a BBJ_CALLFINALLY followed by a rarely run paired BBJ_ALWAYS
- //
- if (bPrev->isBBCallAlwaysPair())
- {
- /* Is the next block rarely run? */
- if (block->isRunRarely())
+ /* Is the jump target rarely run? */
+ if (bPrev->bbJumpDest->isRunRarely())
{
- INDEBUG(reason = "Call of finally followed by a rarely run block";)
+ INDEBUG(reason = "Unconditional jump to a rarely run block";)
goto NEW_RARELY_RUN;
}
- }
- break;
+ break;
- case BBJ_NONE:
+ case BBJ_CALLFINALLY:
- /* is fall through target rarely run? */
- if (block->isRunRarely())
- {
- INDEBUG(reason = "Falling into a rarely run block";)
- goto NEW_RARELY_RUN;
- }
- break;
+ // Check for a BBJ_CALLFINALLY followed by a rarely run paired BBJ_ALWAYS
+ //
+ if (bPrev->isBBCallAlwaysPair())
+ {
+ /* Is the next block rarely run? */
+ if (block->isRunRarely())
+ {
+ INDEBUG(reason = "Call of finally followed by a rarely run block";)
+ goto NEW_RARELY_RUN;
+ }
+ }
+ break;
- case BBJ_COND:
+ case BBJ_NONE:
- if (!block->isRunRarely())
- continue;
+ /* is fall through target rarely run? */
+ if (block->isRunRarely())
+ {
+ INDEBUG(reason = "Falling into a rarely run block";)
+ goto NEW_RARELY_RUN;
+ }
+ break;
- /* If both targets of the BBJ_COND are run rarely then don't reorder */
- if (bPrev->bbJumpDest->isRunRarely())
- {
- /* bPrev should also be marked as run rarely */
- if (!bPrev->isRunRarely())
+ case BBJ_COND:
+
+ if (!block->isRunRarely())
{
- INDEBUG(reason = "Both sides of a conditional jump are rarely run";)
+ continue;
+ }
-NEW_RARELY_RUN:
- /* If the weight of the block was obtained from a profile run,
- than it's more accurate than our static analysis */
- if (bPrev->bbFlags & BBF_PROF_WEIGHT)
+ /* If both targets of the BBJ_COND are run rarely then don't reorder */
+ if (bPrev->bbJumpDest->isRunRarely())
+ {
+ /* bPrev should also be marked as run rarely */
+ if (!bPrev->isRunRarely())
{
- continue;
- }
- result = true;
+ INDEBUG(reason = "Both sides of a conditional jump are rarely run";)
+
+ NEW_RARELY_RUN:
+ /* If the weight of the block was obtained from a profile run,
+ than it's more accurate than our static analysis */
+ if (bPrev->bbFlags & BBF_PROF_WEIGHT)
+ {
+ continue;
+ }
+ result = true;
#ifdef DEBUG
- assert(reason != NULL);
- if (verbose)
- {
- printf("%s, marking BB%02u as rarely run\n", reason, bPrev->bbNum);
- }
+ assert(reason != nullptr);
+ if (verbose)
+ {
+ printf("%s, marking BB%02u as rarely run\n", reason, bPrev->bbNum);
+ }
#endif // DEBUG
- /* Must not have previously been marked */
- noway_assert(!bPrev->isRunRarely());
+ /* Must not have previously been marked */
+ noway_assert(!bPrev->isRunRarely());
- /* Mark bPrev as a new rarely run block */
- bPrev->bbSetRunRarely();
+ /* Mark bPrev as a new rarely run block */
+ bPrev->bbSetRunRarely();
- BasicBlock* bPrevPrev = NULL;
- BasicBlock* tmpbb;
+ BasicBlock* bPrevPrev = nullptr;
+ BasicBlock* tmpbb;
- if ((bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)
- {
- // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an additional predecessor for the BBJ_ALWAYS block
- tmpbb = bPrev->bbPrev;
- noway_assert(tmpbb != NULL);
-#if FEATURE_EH_FUNCLETS
- noway_assert(tmpbb->isBBCallAlwaysPair());
- bPrevPrev = tmpbb;
-#else
- if (tmpbb->bbJumpKind == BBJ_CALLFINALLY)
+ if ((bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)
{
+ // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an
+ // additional predecessor for the BBJ_ALWAYS block
+ tmpbb = bPrev->bbPrev;
+ noway_assert(tmpbb != nullptr);
+#if FEATURE_EH_FUNCLETS
+ noway_assert(tmpbb->isBBCallAlwaysPair());
bPrevPrev = tmpbb;
- }
+#else
+ if (tmpbb->bbJumpKind == BBJ_CALLFINALLY)
+ {
+ bPrevPrev = tmpbb;
+ }
#endif
- }
+ }
- /* Now go back to it's earliest predecessor to see */
- /* if it too should now be marked as rarely run */
- flowList* pred = bPrev->bbPreds;
+ /* Now go back to it's earliest predecessor to see */
+ /* if it too should now be marked as rarely run */
+ flowList* pred = bPrev->bbPreds;
- if ((pred != NULL) || (bPrevPrev != NULL))
- {
- // bPrevPrev will be set to the lexically
- // earliest predecessor of bPrev.
-
- while (pred != NULL)
+ if ((pred != nullptr) || (bPrevPrev != nullptr))
{
- if (bPrevPrev == NULL)
- {
- // Initially we select the first block in the bbPreds list
- bPrevPrev = pred->flBlock;
- continue;
- }
+ // bPrevPrev will be set to the lexically
+ // earliest predecessor of bPrev.
- // Walk the flow graph lexically forward from pred->flBlock
- // if we find (block == bPrevPrev) then
- // pred->flBlock is an earlier predecessor.
- for (tmpbb = pred->flBlock;
- tmpbb != NULL;
- tmpbb = tmpbb->bbNext)
+ while (pred != nullptr)
{
- if (tmpbb == bPrevPrev)
+ if (bPrevPrev == nullptr)
{
- /* We found an ealier predecessor */
+ // Initially we select the first block in the bbPreds list
bPrevPrev = pred->flBlock;
- break;
+ continue;
}
- else if (tmpbb == bPrev)
+
+ // Walk the flow graph lexically forward from pred->flBlock
+ // if we find (block == bPrevPrev) then
+ // pred->flBlock is an earlier predecessor.
+ for (tmpbb = pred->flBlock; tmpbb != nullptr; tmpbb = tmpbb->bbNext)
{
- // We have reached bPrev so stop walking
- // as this cannot be an earlier predecessor
- break;
+ if (tmpbb == bPrevPrev)
+ {
+ /* We found an ealier predecessor */
+ bPrevPrev = pred->flBlock;
+ break;
+ }
+ else if (tmpbb == bPrev)
+ {
+ // We have reached bPrev so stop walking
+ // as this cannot be an earlier predecessor
+ break;
+ }
}
- }
- // Onto the next predecessor
- pred = pred->flNext;
- }
+ // Onto the next predecessor
+ pred = pred->flNext;
+ }
- // Walk the flow graph forward from bPrevPrev
- // if we don't find (tmpbb == bPrev) then our candidate
- // bPrevPrev is lexically after bPrev and we do not
- // want to select it as our new block
+ // Walk the flow graph forward from bPrevPrev
+ // if we don't find (tmpbb == bPrev) then our candidate
+ // bPrevPrev is lexically after bPrev and we do not
+ // want to select it as our new block
- for (tmpbb = bPrevPrev;
- tmpbb != NULL;
- tmpbb = tmpbb->bbNext)
- {
- if (tmpbb == bPrev)
+ for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext)
{
- // Set up block back to the lexically
- // earliest predecessor of pPrev
+ if (tmpbb == bPrev)
+ {
+ // Set up block back to the lexically
+ // earliest predecessor of pPrev
- block = bPrevPrev;
+ block = bPrevPrev;
+ }
}
}
}
- }
- break;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
}
}
// Now iterate over every block to see if we can prove that a block is rarely run
// (i.e. when all predecessors to the block are rarely run)
//
- for (bPrev = fgFirstBB, block = bPrev->bbNext;
- block != NULL;
- bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
{
// If block is not run rarely, then check to make sure that it has
// at least one non-rarely run block.
@@ -11218,9 +11297,7 @@ NEW_RARELY_RUN:
bool rare = true;
/* Make sure that block has at least one normal predecessor */
- for (flowList* pred = block->bbPreds;
- pred != NULL;
- pred = pred->flNext)
+ for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
/* Find the fall through predecessor, if any */
if (!pred->flBlock->isRunRarely())
@@ -11248,10 +11325,9 @@ NEW_RARELY_RUN:
result = true;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("All branches to BB%02u are from rarely run blocks, marking as rarely run\n",
- block->bbNum);
+ printf("All branches to BB%02u are from rarely run blocks, marking as rarely run\n", block->bbNum);
}
#endif // DEBUG
@@ -11261,13 +11337,12 @@ NEW_RARELY_RUN:
if (block->isBBCallAlwaysPair())
{
BasicBlock* bNext = block->bbNext;
- PREFIX_ASSUME(bNext != NULL);
+ PREFIX_ASSUME(bNext != nullptr);
bNext->bbSetRunRarely();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("Also marking the BBJ_ALWAYS at BB%02u as rarely run\n",
- bNext->bbNum);
+ printf("Also marking the BBJ_ALWAYS at BB%02u as rarely run\n", bNext->bbNum);
}
#endif // DEBUG
}
@@ -11277,28 +11352,30 @@ NEW_RARELY_RUN:
/* COMPACT blocks if possible */
if (bPrev->bbJumpKind == BBJ_NONE)
{
- if (fgCanCompactBlocks(bPrev, block))
- {
- fgCompactBlocks(bPrev, block);
-
- block = bPrev;
- continue;
- }
+ if (fgCanCompactBlocks(bPrev, block))
+ {
+ fgCompactBlocks(bPrev, block);
+
+ block = bPrev;
+ continue;
+ }
}
//
// if bPrev->bbWeight is not based upon profile data we can adjust
// the weights of bPrev and block
//
- else if ( bPrev->isBBCallAlwaysPair() && // we must have a BBJ_CALLFINALLY and BBK_ALWAYS pair
- (bPrev->bbWeight != block->bbWeight) && // the weights are currently different
- ((bPrev->bbFlags & BBF_PROF_WEIGHT) == 0)) // and the BBJ_CALLFINALLY block is not using profiled weights
+ else if (bPrev->isBBCallAlwaysPair() && // we must have a BBJ_CALLFINALLY and BBK_ALWAYS pair
+ (bPrev->bbWeight != block->bbWeight) && // the weights are currently different
+ ((bPrev->bbFlags & BBF_PROF_WEIGHT) == 0)) // and the BBJ_CALLFINALLY block is not using profiled
+ // weights
{
if (block->isRunRarely())
{
- bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block
- bPrev->bbFlags |= BBF_RUN_RARELY; // and is now rarely run
+ bPrev->bbWeight =
+ block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block
+ bPrev->bbFlags |= BBF_RUN_RARELY; // and is now rarely run
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Marking the BBJ_CALLFINALLY block at BB%02u as rarely run because BB%02u is rarely run\n",
bPrev->bbNum, block->bbNum);
@@ -11307,19 +11384,21 @@ NEW_RARELY_RUN:
}
else if (bPrev->isRunRarely())
{
- block->bbWeight = bPrev->bbWeight; // the BBJ_ALWAYS block now has the same weight as the BBJ_CALLFINALLY block
- block->bbFlags |= BBF_RUN_RARELY; // and is now rarely run
+ block->bbWeight =
+ bPrev->bbWeight; // the BBJ_ALWAYS block now has the same weight as the BBJ_CALLFINALLY block
+ block->bbFlags |= BBF_RUN_RARELY; // and is now rarely run
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Marking the BBJ_ALWAYS block at BB%02u as rarely run because BB%02u is rarely run\n",
block->bbNum, bPrev->bbNum);
}
#endif // DEBUG
}
- else // Both blocks are hot, bPrev is known not to be using profiled weight
+ else // Both blocks are hot, bPrev is known not to be using profiled weight
{
- bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block
+ bPrev->bbWeight =
+ block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block
}
noway_assert(block->bbWeight == bPrev->bbWeight);
}
@@ -11335,8 +11414,7 @@ NEW_RARELY_RUN:
* if the 'bBefore' and 'bAfter' blocks are in the exact same EH region.
*/
-bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore,
- BasicBlock* bAfter)
+bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter)
{
return BasicBlock::sameEHRegion(bBefore, bAfter);
}
@@ -11354,28 +11432,28 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("Relocated block%s [BB%02u..BB%02u] inserted after BB%02u%s\n",
- (bStart == bEnd) ? "" : "s",
- bStart->bbNum, bEnd->bbNum,
- insertAfterBlk->bbNum,
- (insertAfterBlk->bbNext == NULL) ? " at the end of method" : "");
+ printf("Relocated block%s [BB%02u..BB%02u] inserted after BB%02u%s\n", (bStart == bEnd) ? "" : "s",
+ bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum,
+ (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : "");
}
#endif // DEBUG
/* relink [bStart .. bEnd] into the flow graph */
- bEnd->bbNext = insertAfterBlk->bbNext;
+ bEnd->bbNext = insertAfterBlk->bbNext;
if (insertAfterBlk->bbNext)
+ {
insertAfterBlk->bbNext->bbPrev = bEnd;
+ }
insertAfterBlk->setNext(bStart);
/* If insertAfterBlk was fgLastBB then update fgLastBB */
if (insertAfterBlk == fgLastBB)
{
fgLastBB = bEnd;
- noway_assert(fgLastBB->bbNext == NULL);
+ noway_assert(fgLastBB->bbNext == nullptr);
}
}
@@ -11392,18 +11470,18 @@ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBloc
=============================================================
*/
-BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType)
+BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType)
{
INDEBUG(const char* reason = "None";)
// Figure out the range of blocks we're going to move
- unsigned XTnum;
- EHblkDsc* HBtab;
- BasicBlock* bStart = NULL;
- BasicBlock* bMiddle = NULL;
- BasicBlock* bLast = NULL;
- BasicBlock* bPrev = NULL;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
+ BasicBlock* bStart = nullptr;
+ BasicBlock* bMiddle = nullptr;
+ BasicBlock* bLast = nullptr;
+ BasicBlock* bPrev = nullptr;
#if FEATURE_EH_FUNCLETS
// We don't support moving try regions... yet?
@@ -11415,7 +11493,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
if (relocateType == FG_RELOCATE_TRY)
{
bStart = HBtab->ebdTryBeg;
- bLast = HBtab->ebdTryLast;
+ bLast = HBtab->ebdTryLast;
}
else if (relocateType == FG_RELOCATE_HANDLER)
{
@@ -11428,8 +11506,8 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
}
else
{
- bStart = HBtab->ebdHndBeg;
- bLast = HBtab->ebdHndLast;
+ bStart = HBtab->ebdHndBeg;
+ bLast = HBtab->ebdHndLast;
}
}
@@ -11437,9 +11515,9 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
bool inTheRange = false;
bool validRange = false;
- BasicBlock* block;
+ BasicBlock* block;
- noway_assert(bStart != NULL && bLast != NULL);
+ noway_assert(bStart != nullptr && bLast != nullptr);
if (bStart == fgFirstBB)
{
INDEBUG(reason = "can not relocate first block";)
@@ -11494,8 +11572,10 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
validRange = true;
}
- if (block == NULL)
+ if (block == nullptr)
+ {
break;
+ }
block = block->bbNext;
}
@@ -11503,13 +11583,10 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
noway_assert((validRange == true) && (inTheRange == false));
bPrev = bStart->bbPrev;
- noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function.
+ noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function.
JITDUMP("Relocating %s range BB%02u..BB%02u (EH#%u) to end of BBlist\n",
- (relocateType == FG_RELOCATE_TRY) ? "try" : "handler",
- bStart->bbNum,
- bLast->bbNum,
- regionIndex);
+ (relocateType == FG_RELOCATE_TRY) ? "try" : "handler", bStart->bbNum, bLast->bbNum, regionIndex);
#ifdef DEBUG
if (verbose)
@@ -11533,7 +11610,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
bStart->bbFlags |= BBF_FUNCLET_BEG; // Mark the start block of the funclet
- if (bMiddle != NULL)
+ if (bMiddle != nullptr)
{
bMiddle->bbFlags |= BBF_FUNCLET_BEG; // Also mark the start block of a filter handler as a funclet
}
@@ -11582,9 +11659,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
// the beginning of A can't be strictly within the range of X (that is, the beginning
// of A isn't shared with the beginning of X) and the end of A outside the range.
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
if (XTnum != regionIndex) // we don't need to update our 'last' pointer
{
@@ -11592,7 +11667,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
{
// If we moved a set of blocks that were at the end of
// a different try region then we may need to update ebdTryLast
- for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext)
+ for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext)
{
if (block == bPrev)
{
@@ -11613,7 +11688,7 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
{
// If we moved a set of blocks that were at the end of
// a different handler region then we must update ebdHndLast
- for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext)
+ for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext)
{
if (block == bPrev)
{
@@ -11633,13 +11708,14 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
// Insert the block(s) we are moving after fgLastBlock
fgMoveBlocksAfter(bStart, bLast, insertAfterBlk);
- if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet
+ if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet
{
fgFirstFuncletBB = bStart;
}
else
{
- assert(fgFirstFuncletBB != insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region.
+ assert(fgFirstFuncletBB !=
+ insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region.
}
// These asserts assume we aren't moving try regions (which we might need to do). Only
@@ -11670,62 +11746,60 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
#else // FEATURE_EH_FUNCLETS
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
- {
- if (XTnum == regionIndex)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
- // Don't update our handler's Last info
- continue;
- }
+ if (XTnum == regionIndex)
+ {
+ // Don't update our handler's Last info
+ continue;
+ }
- if (HBtab->ebdTryLast == bLast)
- {
- // If we moved a set of blocks that were at the end of
- // a different try region then we may need to update ebdTryLast
- for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext)
+ if (HBtab->ebdTryLast == bLast)
{
- if (block == bPrev)
- {
- fgSetTryEnd(HBtab, bPrev);
- break;
- }
- else if (block == HBtab->ebdTryLast->bbNext)
+ // If we moved a set of blocks that were at the end of
+ // a different try region then we may need to update ebdTryLast
+ for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext)
{
- // bPrev does not come after the TryBeg
- break;
+ if (block == bPrev)
+ {
+ fgSetTryEnd(HBtab, bPrev);
+ break;
+ }
+ else if (block == HBtab->ebdTryLast->bbNext)
+ {
+ // bPrev does not come after the TryBeg
+ break;
+ }
}
}
- }
- if (HBtab->ebdHndLast == bLast)
- {
- // If we moved a set of blocks that were at the end of
- // a different handler region then we must update ebdHndLast
- for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext)
+ if (HBtab->ebdHndLast == bLast)
{
- if (block == bPrev)
- {
- fgSetHndEnd(HBtab, bPrev);
- break;
- }
- else if (block == HBtab->ebdHndLast->bbNext)
+ // If we moved a set of blocks that were at the end of
+ // a different handler region then we must update ebdHndLast
+ for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext)
{
- // bPrev does not come after the HndBeg
- break;
+ if (block == bPrev)
+ {
+ fgSetHndEnd(HBtab, bPrev);
+ break;
+ }
+ else if (block == HBtab->ebdHndLast->bbNext)
+ {
+ // bPrev does not come after the HndBeg
+ break;
+ }
}
}
- }
- } // end exception table iteration
+ } // end exception table iteration
- // We have decided to insert the block(s) after fgLastBlock
- fgMoveBlocksAfter(bStart, bLast, insertAfterBlk);
+ // We have decided to insert the block(s) after fgLastBlock
+ fgMoveBlocksAfter(bStart, bLast, insertAfterBlk);
- // If bPrev falls through, we will insert a jump to block
- fgConnectFallThrough(bPrev, bStart);
+ // If bPrev falls through, we will insert a jump to block
+ fgConnectFallThrough(bPrev, bStart);
- // If bLast falls through, we will insert a jump to bNext
- fgConnectFallThrough(bLast, bNext);
+ // If bLast falls through, we will insert a jump to bNext
+ fgConnectFallThrough(bLast, bNext);
#endif // FEATURE_EH_FUNCLETS
@@ -11734,20 +11808,20 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE
FAILURE:
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("*************** Failed fgRelocateEHRange(BB%02u..BB%02u) because %s\n", bStart->bbNum, bLast->bbNum, reason);
+ printf("*************** Failed fgRelocateEHRange(BB%02u..BB%02u) because %s\n", bStart->bbNum, bLast->bbNum,
+ reason);
}
#endif // DEBUG
- bLast = NULL;
+ bLast = nullptr;
DONE:
return bLast;
}
-
#if FEATURE_EH_FUNCLETS
#if defined(_TARGET_ARM_)
@@ -11759,14 +11833,13 @@ DONE:
* removed the BBJ_ALWAYS, it better have the BBF_FINALLY_TARGET bit set.
*/
-void Compiler::fgClearFinallyTargetBit(BasicBlock* block)
+void Compiler::fgClearFinallyTargetBit(BasicBlock* block)
{
assert((block->bbFlags & BBF_FINALLY_TARGET) != 0);
for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
{
- if (pred->flBlock->bbJumpKind == BBJ_ALWAYS &&
- pred->flBlock->bbJumpDest == block)
+ if (pred->flBlock->bbJumpKind == BBJ_ALWAYS && pred->flBlock->bbJumpDest == block)
{
BasicBlock* pPrev = pred->flBlock->bbPrev;
if (pPrev != NULL)
@@ -11807,11 +11880,11 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block)
*
* Return 'true' for case #1, and 'false' otherwise.
*/
-bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block)
+bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block)
{
// Some simple preconditions (as stated above)
assert(!fgFuncletsCreated);
- assert(fgGetPredForBlock(block, predBlock) != NULL);
+ assert(fgGetPredForBlock(block, predBlock) != nullptr);
assert(block->hasHndIndex());
EHblkDsc* xtab = ehGetDsc(block->getHndIndex());
@@ -11819,8 +11892,10 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicB
#if FEATURE_EH_CALLFINALLY_THUNKS
if (xtab->HasFinallyHandler())
{
- assert((xtab->ebdHndBeg == block) || // The normal case
- ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're trying to decide how to split up the predecessor edges.
+ assert((xtab->ebdHndBeg == block) || // The normal case
+ ((xtab->ebdHndBeg->bbNext == block) &&
+ (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're
+ // trying to decide how to split up the predecessor edges.
if (predBlock->bbJumpKind == BBJ_CALLFINALLY)
{
assert(predBlock->bbJumpDest == block);
@@ -11905,13 +11980,12 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicB
return true;
}
-
/*****************************************************************************
* Does this block, first block of a handler region, have any predecessor edges
* that are not from its corresponding try region?
*/
-bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block)
+bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block)
{
assert(block->hasHndIndex());
assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler
@@ -11932,18 +12006,19 @@ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block)
return false;
}
-
/*****************************************************************************
* Introduce a new head block of the handler for the prolog to be put in, ahead
* of the current handler head 'block'.
* Note that this code has some similarities to fgCreateLoopPreHeader().
*/
-void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
+void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
+ {
printf("\nCreating funclet prolog header for BB%02u\n", block->bbNum);
+ }
#endif
assert(block->hasHndIndex());
@@ -11957,10 +12032,11 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
//
newHead->bbFlags |= (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL);
newHead->inheritWeight(block);
- newHead->bbRefs = 0;
+ newHead->bbRefs = 0;
fgInsertBBbefore(block, newHead); // insert the new block in the block list
- fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH block.
+ fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH
+ // block.
// fgExtendEHRegionBefore mucks with the bbRefs without updating the pred list, which we will
// do below for this block. So, undo that change.
@@ -11982,29 +12058,29 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
switch (predBlock->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- noway_assert(predBlock->bbJumpDest == block);
- predBlock->bbJumpDest = newHead;
- fgRemoveRefPred(block, predBlock);
- fgAddRefPred(newHead, predBlock);
- break;
+ case BBJ_CALLFINALLY:
+ noway_assert(predBlock->bbJumpDest == block);
+ predBlock->bbJumpDest = newHead;
+ fgRemoveRefPred(block, predBlock);
+ fgAddRefPred(newHead, predBlock);
+ break;
- default:
- // The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or
- // via exception handling.
- noway_assert(false);
- break;
+ default:
+ // The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or
+ // via exception handling.
+ noway_assert(false);
+ break;
}
}
}
- assert(NULL == fgGetPredForBlock(block, newHead));
+ assert(nullptr == fgGetPredForBlock(block, newHead));
fgAddRefPred(block, newHead);
- assert((newHead->bbFlags & (BBF_INTERNAL|BBF_JMP_TARGET|BBF_HAS_LABEL)) == (BBF_INTERNAL|BBF_JMP_TARGET|BBF_HAS_LABEL));
+ assert((newHead->bbFlags & (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL)) ==
+ (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL));
}
-
/*****************************************************************************
*
* Every funclet will have a prolog. That prolog will be inserted as the first instructions
@@ -12015,19 +12091,17 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
* handler's try region, since the only way to get into a handler is via that try region.
*/
-void Compiler::fgCreateFuncletPrologBlocks()
+void Compiler::fgCreateFuncletPrologBlocks()
{
noway_assert(fgComputePredsDone);
noway_assert(!fgDomsComputed); // this function doesn't maintain the dom sets
assert(!fgFuncletsCreated);
- bool prologBlocksCreated = false;
- EHblkDsc* HBtabEnd;
- EHblkDsc* HBtab;
+ bool prologBlocksCreated = false;
+ EHblkDsc* HBtabEnd;
+ EHblkDsc* HBtab;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
BasicBlock* head = HBtab->ebdHndBeg;
@@ -12081,13 +12155,15 @@ void Compiler::fgCreateFuncletPrologBlocks()
* We only move filter and handler blocks, not try blocks.
*/
-void Compiler::fgCreateFunclets()
+void Compiler::fgCreateFunclets()
{
assert(!fgFuncletsCreated);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgCreateFunclets()\n");
+ }
#endif
fgCreateFuncletPrologBlocks();
@@ -12101,9 +12177,9 @@ void Compiler::fgCreateFunclets()
IMPL_LIMITATION("Too many funclets");
}
- FuncInfoDsc* funcInfo = new (this, CMK_BasicBlock) FuncInfoDsc[funcCnt];
+ FuncInfoDsc* funcInfo = new (this, CMK_BasicBlock) FuncInfoDsc[funcCnt];
- unsigned short funcIdx;
+ unsigned short funcIdx;
// Setup the root FuncInfoDsc and prepare to start associating
// FuncInfoDsc's with their corresponding EH region
@@ -12121,22 +12197,20 @@ void Compiler::fgCreateFunclets()
// be added *after* the current index, so our iteration here is not invalidated.
// It *can* invalidate the compHndBBtab pointer itself, though, if it gets reallocated!
- for (XTnum = 0;
- XTnum < compHndBBtabCount;
- XTnum++)
+ for (XTnum = 0; XTnum < compHndBBtabCount; XTnum++)
{
HBtab = ehGetDsc(XTnum); // must re-compute this every loop, since fgRelocateEHRange changes the table
if (HBtab->HasFilter())
{
assert(funcIdx < funcCnt);
- funcInfo[funcIdx].funKind = FUNC_FILTER;
+ funcInfo[funcIdx].funKind = FUNC_FILTER;
funcInfo[funcIdx].funEHIndex = (unsigned short)XTnum;
funcIdx++;
}
assert(funcIdx < funcCnt);
- funcInfo[funcIdx].funKind = FUNC_HANDLER;
+ funcInfo[funcIdx].funKind = FUNC_HANDLER;
funcInfo[funcIdx].funEHIndex = (unsigned short)XTnum;
- HBtab->ebdFuncIndex = funcIdx;
+ HBtab->ebdFuncIndex = funcIdx;
funcIdx++;
fgRelocateEHRange(XTnum, FG_RELOCATE_HANDLER);
}
@@ -12145,8 +12219,8 @@ void Compiler::fgCreateFunclets()
assert(funcIdx == funcCnt);
// Publish
- compCurrFuncIdx = 0;
- compFuncInfos = funcInfo;
+ compCurrFuncIdx = 0;
+ compFuncInfos = funcInfo;
compFuncInfoCount = (unsigned short)funcCnt;
fgFuncletsCreated = true;
@@ -12166,72 +12240,70 @@ void Compiler::fgCreateFunclets()
#else // !FEATURE_EH_FUNCLETS
-/*****************************************************************************
- *
- * Function called to relocate any and all EH regions.
- * Only entire consecutive EH regions will be moved and they will be kept together.
- * Except for the first block, the range can not have any blocks that jump into or out of the region.
- */
+ /*****************************************************************************
+ *
+ * Function called to relocate any and all EH regions.
+ * Only entire consecutive EH regions will be moved and they will be kept together.
+ * Except for the first block, the range can not have any blocks that jump into or out of the region.
+ */
-bool Compiler::fgRelocateEHRegions()
-{
- bool result = false; // Our return value
+ bool Compiler::fgRelocateEHRegions()
+ {
+ bool result = false; // Our return value
#ifdef DEBUG
- if (verbose)
- printf("*************** In fgRelocateEHRegions()\n");
+ if (verbose)
+ printf("*************** In fgRelocateEHRegions()\n");
#endif
- if (fgCanRelocateEHRegions)
- {
- unsigned XTnum;
- EHblkDsc* HBtab;
-
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ if (fgCanRelocateEHRegions)
{
- // Nested EH regions cannot be moved.
- // Also we don't want to relocate an EH region that has a filter
- if ((HBtab->ebdHandlerNestingLevel == 0) && !HBtab->HasFilter())
+ unsigned XTnum;
+ EHblkDsc* HBtab;
+
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
- bool movedTry = false;
+ // Nested EH regions cannot be moved.
+ // Also we don't want to relocate an EH region that has a filter
+ if ((HBtab->ebdHandlerNestingLevel == 0) && !HBtab->HasFilter())
+ {
+ bool movedTry = false;
#if DEBUG
- bool movedHnd = false;
+ bool movedHnd = false;
#endif // DEBUG
- // Only try to move the outermost try region
- if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
- {
- // Move the entire try region if it can be moved
- if (HBtab->ebdTryBeg->isRunRarely())
+ // Only try to move the outermost try region
+ if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
- BasicBlock* bTryLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_TRY);
- if (bTryLastBB != NULL)
+ // Move the entire try region if it can be moved
+ if (HBtab->ebdTryBeg->isRunRarely())
{
- result = true;
- movedTry = true;
+ BasicBlock* bTryLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_TRY);
+ if (bTryLastBB != NULL)
+ {
+ result = true;
+ movedTry = true;
+ }
}
- }
#if DEBUG
- if (verbose && movedTry)
- {
- printf("\nAfter relocating an EH try region");
- fgDispBasicBlocks();
- fgDispHandlerTab();
-
- // Make sure that the predecessor lists are accurate
- if (expensiveDebugCheckLevel >= 2)
+ if (verbose && movedTry)
{
- fgDebugCheckBBlist();
+ printf("\nAfter relocating an EH try region");
+ fgDispBasicBlocks();
+ fgDispHandlerTab();
+
+ // Make sure that the predecessor lists are accurate
+ if (expensiveDebugCheckLevel >= 2)
+ {
+ fgDebugCheckBBlist();
+ }
}
- }
#endif // DEBUG
- }
+ }
- // Currently it is not good to move the rarely run handler regions to the end of the method
- // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot section.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ // Currently it is not good to move the rarely run handler regions to the end of the method
+ // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot section.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if 0
// Now try to move the entire handler region if it can be moved.
@@ -12250,50 +12322,48 @@ bool Compiler::fgRelocateEHRegions()
#endif // 0
#if DEBUG
- if (verbose && movedHnd)
- {
- printf("\nAfter relocating an EH handler region");
- fgDispBasicBlocks();
- fgDispHandlerTab();
-
- // Make sure that the predecessor lists are accurate
- if (expensiveDebugCheckLevel >= 2)
+ if (verbose && movedHnd)
{
- fgDebugCheckBBlist();
+ printf("\nAfter relocating an EH handler region");
+ fgDispBasicBlocks();
+ fgDispHandlerTab();
+
+ // Make sure that the predecessor lists are accurate
+ if (expensiveDebugCheckLevel >= 2)
+ {
+ fgDebugCheckBBlist();
+ }
}
- }
#endif // DEBUG
+ }
}
}
- }
#if DEBUG
- fgVerifyHandlerTab();
+ fgVerifyHandlerTab();
- if (verbose && result)
- {
- printf("\nAfter fgRelocateEHRegions()");
- fgDispBasicBlocks();
- fgDispHandlerTab();
- // Make sure that the predecessor lists are accurate
- fgDebugCheckBBlist();
- }
+ if (verbose && result)
+ {
+ printf("\nAfter fgRelocateEHRegions()");
+ fgDispBasicBlocks();
+ fgDispHandlerTab();
+ // Make sure that the predecessor lists are accurate
+ fgDebugCheckBBlist();
+ }
#endif // DEBUG
- return result;
-}
+ return result;
+ }
#endif // !FEATURE_EH_FUNCLETS
-bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight,
- BasicBlock::weight_t slop,
- bool* wbUsedSlop)
+bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop)
{
bool result = false;
if ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin))
{
flEdgeWeightMin = newWeight;
- result = true;
+ result = true;
}
else if (slop > 0)
{
@@ -12313,7 +12383,7 @@ bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight,
flEdgeWeightMax = newWeight;
}
- if (wbUsedSlop != NULL)
+ if (wbUsedSlop != nullptr)
{
*wbUsedSlop = true;
}
@@ -12334,7 +12404,7 @@ bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight,
// We will lower flEdgeWeightMin towards newWeight
flEdgeWeightMin = newWeight;
- if (wbUsedSlop != NULL)
+ if (wbUsedSlop != nullptr)
{
*wbUsedSlop = true;
}
@@ -12346,10 +12416,9 @@ bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight,
// Also we should have set wbUsedSlop to true.
if (result == true)
{
- assert( (flEdgeWeightMax == 0) ||
- ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin)));
+ assert((flEdgeWeightMax == 0) || ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin)));
- if (wbUsedSlop != NULL)
+ if (wbUsedSlop != nullptr)
{
assert(*wbUsedSlop == true);
}
@@ -12359,22 +12428,20 @@ bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight,
#if DEBUG
if (result == false)
{
- result = false; // break here
+ result = false; // break here
}
#endif // DEBUG
return result;
}
-bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight,
- BasicBlock::weight_t slop,
- bool* wbUsedSlop)
+bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop)
{
bool result = false;
if ((newWeight >= flEdgeWeightMin) && (newWeight <= flEdgeWeightMax))
{
flEdgeWeightMax = newWeight;
- result = true;
+ result = true;
}
else if (slop > 0)
{
@@ -12393,7 +12460,7 @@ bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight,
flEdgeWeightMax = newWeight;
}
- if (wbUsedSlop != NULL)
+ if (wbUsedSlop != nullptr)
{
*wbUsedSlop = true;
}
@@ -12415,7 +12482,7 @@ bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight,
flEdgeWeightMax = flEdgeWeightMin;
flEdgeWeightMin = newWeight;
- if (wbUsedSlop != NULL)
+ if (wbUsedSlop != nullptr)
{
*wbUsedSlop = true;
}
@@ -12427,17 +12494,16 @@ bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight,
// Also we should have set wbUsedSlop to true, unless it is NULL
if (result == true)
{
- assert( (flEdgeWeightMax == 0) ||
- ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin)));
+ assert((flEdgeWeightMax == 0) || ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin)));
- assert((wbUsedSlop == NULL) || (*wbUsedSlop == true));
+ assert((wbUsedSlop == nullptr) || (*wbUsedSlop == true));
}
}
#if DEBUG
if (result == false)
{
- result = false; // break here
+ result = false; // break here
}
#endif // DEBUG
@@ -12445,19 +12511,19 @@ bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight,
}
#ifdef DEBUG
-void Compiler::fgPrintEdgeWeights()
+void Compiler::fgPrintEdgeWeights()
{
- BasicBlock* bSrc;
- BasicBlock* bDst;
- flowList* edge;
+ BasicBlock* bSrc;
+ BasicBlock* bDst;
+ flowList* edge;
// Print out all of the edge weights
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- if (bDst->bbPreds != NULL)
+ if (bDst->bbPreds != nullptr)
{
printf(" Edge weights into BB%02u :", bDst->bbNum);
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
bSrc = edge->flBlock;
// This is the control flow edge (bSrc -> bDst)
@@ -12484,7 +12550,7 @@ void Compiler::fgPrintEdgeWeights()
}
}
printf(")");
- if (edge->flNext != NULL)
+ if (edge->flNext != nullptr)
{
printf(", ");
}
@@ -12502,7 +12568,7 @@ bool Compiler::fgMightHaveLoop()
// and potentially change the block epoch.
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
- BitVec BLOCKSET_INIT_NOCOPY(blocksSeen, BitVecOps::MakeEmpty(&blockVecTraits));
+ BitVec BLOCKSET_INIT_NOCOPY(blocksSeen, BitVecOps::MakeEmpty(&blockVecTraits));
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
@@ -12513,30 +12579,33 @@ bool Compiler::fgMightHaveLoop()
{
BasicBlock* succ = (*succs);
if (BitVecOps::IsMember(&blockVecTraits, blocksSeen, succ->bbNum))
+ {
return true;
+ }
}
}
return false;
}
-
-void Compiler::fgComputeEdgeWeights()
+void Compiler::fgComputeEdgeWeights()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgComputeEdgeWeights()\n");
+ }
#endif // DEBUG
if (fgIsUsingProfileWeights() == false)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("fgComputeEdgeWeights() we do not have any profile data so we are not using the edge weights\n");
}
#endif // DEBUG
fgHaveValidEdgeWeights = false;
- fgCalledWeight = BB_UNITY_WEIGHT;
+ fgCalledWeight = BB_UNITY_WEIGHT;
}
#if DEBUG
@@ -12547,18 +12616,18 @@ void Compiler::fgComputeEdgeWeights()
}
#endif // DEBUG
- BasicBlock* bSrc;
- BasicBlock* bDst;
- flowList* edge;
- unsigned iterations = 0;
- unsigned goodEdgeCountCurrent = 0;
- unsigned goodEdgeCountPrevious = 0;
- bool inconsistentProfileData = false;
- bool hasIncompleteEdgeWeights = false;
- unsigned numEdges = 0;
- bool usedSlop = false;
- bool changed;
- bool modified;
+ BasicBlock* bSrc;
+ BasicBlock* bDst;
+ flowList* edge;
+ unsigned iterations = 0;
+ unsigned goodEdgeCountCurrent = 0;
+ unsigned goodEdgeCountPrevious = 0;
+ bool inconsistentProfileData = false;
+ bool hasIncompleteEdgeWeights = false;
+ unsigned numEdges = 0;
+ bool usedSlop = false;
+ bool changed;
+ bool modified;
BasicBlock::weight_t returnWeight;
BasicBlock::weight_t slop;
@@ -12567,15 +12636,15 @@ void Compiler::fgComputeEdgeWeights()
// we will try to fix their weight up here
//
modified = false;
- do // while (changed)
+ do // while (changed)
{
- changed = false;
+ changed = false;
returnWeight = 0;
iterations++;
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- if (((bDst->bbFlags & BBF_PROF_WEIGHT) == 0) && (bDst->bbPreds != NULL))
+ if (((bDst->bbFlags & BBF_PROF_WEIGHT) == 0) && (bDst->bbPreds != nullptr))
{
BasicBlock* bOnlyNext;
@@ -12599,7 +12668,7 @@ void Compiler::fgComputeEdgeWeights()
}
else
{
- bOnlyNext = NULL;
+ bOnlyNext = nullptr;
}
if ((bOnlyNext == bDst) && ((bSrc->bbFlags & BBF_PROF_WEIGHT) != 0))
@@ -12620,10 +12689,10 @@ void Compiler::fgComputeEdgeWeights()
}
else
{
- bOnlyNext = NULL;
+ bOnlyNext = nullptr;
}
- if ((bOnlyNext != NULL) && (bOnlyNext->bbPreds != NULL))
+ if ((bOnlyNext != nullptr) && (bOnlyNext->bbPreds != nullptr))
{
// Does only one block flow into bOnlyNext
if (bOnlyNext->countOfInEdges() == 1)
@@ -12637,8 +12706,8 @@ void Compiler::fgComputeEdgeWeights()
if ((newWeight != BB_MAX_WEIGHT) && (bDst->bbWeight != newWeight))
{
- changed = true;
- modified = true;
+ changed = true;
+ modified = true;
bDst->bbWeight = newWeight;
if (newWeight == 0)
{
@@ -12660,9 +12729,9 @@ void Compiler::fgComputeEdgeWeights()
returnWeight += bDst->bbWeight;
}
}
- }
+ }
// Generally when we synthesize profile estimates we do it in a way where this algorithm will converge
- // but downstream opts that remove conditional branches may create a situation where this is not the case.
+ // but downstream opts that remove conditional branches may create a situation where this is not the case.
// For instance a loop that becomes unreachable creates a sort of 'ring oscillator' (See test b539509)
while (changed && iterations < 10);
@@ -12697,7 +12766,7 @@ void Compiler::fgComputeEdgeWeights()
}
// Now we will compute the initial flEdgeWeightMin and flEdgeWeightMax values
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
BasicBlock::weight_t bDstWeight = bDst->bbWeight;
@@ -12709,7 +12778,7 @@ void Compiler::fgComputeEdgeWeights()
bDstWeight -= fgCalledWeight;
}
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
bool assignOK = true;
@@ -12723,8 +12792,7 @@ void Compiler::fgComputeEdgeWeights()
// then we must reset any values that they currently have
//
- if (((bSrc->bbFlags & BBF_PROF_WEIGHT) == 0) ||
- ((bDst->bbFlags & BBF_PROF_WEIGHT) == 0))
+ if (((bSrc->bbFlags & BBF_PROF_WEIGHT) == 0) || ((bDst->bbFlags & BBF_PROF_WEIGHT) == 0))
{
edge->flEdgeWeightMin = BB_ZERO_WEIGHT;
edge->flEdgeWeightMax = BB_MAX_WEIGHT;
@@ -12733,30 +12801,30 @@ void Compiler::fgComputeEdgeWeights()
slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1;
switch (bSrc->bbJumpKind)
{
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_NONE:
- case BBJ_CALLFINALLY:
- // We know the exact edge weight
- assignOK &= edge->setEdgeWeightMinChecked(bSrc->bbWeight, slop, &usedSlop);
- assignOK &= edge->setEdgeWeightMaxChecked(bSrc->bbWeight, slop, &usedSlop);
- break;
-
- case BBJ_COND:
- case BBJ_SWITCH:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- if (edge->flEdgeWeightMax > bSrc->bbWeight)
- {
- // The maximum edge weight to block can't be greater than the weight of bSrc
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_NONE:
+ case BBJ_CALLFINALLY:
+ // We know the exact edge weight
+ assignOK &= edge->setEdgeWeightMinChecked(bSrc->bbWeight, slop, &usedSlop);
assignOK &= edge->setEdgeWeightMaxChecked(bSrc->bbWeight, slop, &usedSlop);
- }
- break;
+ break;
- default:
- // We should never have an edge that starts from one of these jump kinds
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ if (edge->flEdgeWeightMax > bSrc->bbWeight)
+ {
+ // The maximum edge weight to block can't be greater than the weight of bSrc
+ assignOK &= edge->setEdgeWeightMaxChecked(bSrc->bbWeight, slop, &usedSlop);
+ }
+ break;
+
+ default:
+ // We should never have an edge that starts from one of these jump kinds
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
// The maximum edge weight to block can't be greater than the weight of bDst
@@ -12786,9 +12854,9 @@ void Compiler::fgComputeEdgeWeights()
goodEdgeCountCurrent = 0;
hasIncompleteEdgeWeights = false;
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
bool assignOK = true;
@@ -12798,7 +12866,7 @@ void Compiler::fgComputeEdgeWeights()
slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1;
if (bSrc->bbJumpKind == BBJ_COND)
{
- int diff;
+ int diff;
flowList* otherEdge;
if (bSrc->bbNext == bDst)
{
@@ -12812,21 +12880,23 @@ void Compiler::fgComputeEdgeWeights()
noway_assert(otherEdge->flEdgeWeightMin <= otherEdge->flEdgeWeightMax);
// Adjust edge->flEdgeWeightMin up or adjust otherEdge->flEdgeWeightMax down
- diff = ((int) bSrc->bbWeight) - ((int) edge->flEdgeWeightMin + (int) otherEdge->flEdgeWeightMax);
+ diff = ((int)bSrc->bbWeight) - ((int)edge->flEdgeWeightMin + (int)otherEdge->flEdgeWeightMax);
if (diff > 0)
{
assignOK &= edge->setEdgeWeightMinChecked(edge->flEdgeWeightMin + diff, slop, &usedSlop);
}
else if (diff < 0)
{
- assignOK &= otherEdge->setEdgeWeightMaxChecked(otherEdge->flEdgeWeightMax + diff, slop, &usedSlop);
+ assignOK &=
+ otherEdge->setEdgeWeightMaxChecked(otherEdge->flEdgeWeightMax + diff, slop, &usedSlop);
}
// Adjust otherEdge->flEdgeWeightMin up or adjust edge->flEdgeWeightMax down
- diff = ((int) bSrc->bbWeight) - ((int) otherEdge->flEdgeWeightMin + (int) edge->flEdgeWeightMax);
+ diff = ((int)bSrc->bbWeight) - ((int)otherEdge->flEdgeWeightMin + (int)edge->flEdgeWeightMax);
if (diff > 0)
{
- assignOK &= otherEdge->setEdgeWeightMinChecked(otherEdge->flEdgeWeightMin + diff, slop, &usedSlop);
+ assignOK &=
+ otherEdge->setEdgeWeightMinChecked(otherEdge->flEdgeWeightMin + diff, slop, &usedSlop);
}
else if (diff < 0)
{
@@ -12842,18 +12912,18 @@ void Compiler::fgComputeEdgeWeights()
}
#ifdef DEBUG
// Now edge->flEdgeWeightMin and otherEdge->flEdgeWeightMax) should add up to bSrc->bbWeight
- diff = ((int) bSrc->bbWeight) - ((int) edge->flEdgeWeightMin + (int) otherEdge->flEdgeWeightMax);
- noway_assert((-((int) slop) <= diff) && (diff <= ((int) slop)));
+ diff = ((int)bSrc->bbWeight) - ((int)edge->flEdgeWeightMin + (int)otherEdge->flEdgeWeightMax);
+ noway_assert((-((int)slop) <= diff) && (diff <= ((int)slop)));
// Now otherEdge->flEdgeWeightMin and edge->flEdgeWeightMax) should add up to bSrc->bbWeight
- diff = ((int) bSrc->bbWeight) - ((int) otherEdge->flEdgeWeightMin + (int) edge->flEdgeWeightMax);
- noway_assert((-((int) slop) <= diff) && (diff <= ((int) slop)));
+ diff = ((int)bSrc->bbWeight) - ((int)otherEdge->flEdgeWeightMin + (int)edge->flEdgeWeightMax);
+ noway_assert((-((int)slop) <= diff) && (diff <= ((int)slop)));
#endif // DEBUG
}
}
}
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
BasicBlock::weight_t bDstWeight = bDst->bbWeight;
@@ -12877,7 +12947,7 @@ void Compiler::fgComputeEdgeWeights()
UINT64 maxEdgeWeightSum = 0;
// Calculate the sums of the minimum and maximum edge weights
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
// We are processing the control flow edge (bSrc -> bDst)
bSrc = edge->flBlock;
@@ -12889,7 +12959,7 @@ void Compiler::fgComputeEdgeWeights()
// maxEdgeWeightSum is the sum of all flEdgeWeightMax values into bDst
// minEdgeWeightSum is the sum of all flEdgeWeightMin values into bDst
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
bool assignOK = true;
@@ -12910,7 +12980,8 @@ void Compiler::fgComputeEdgeWeights()
if (bDstWeight >= otherMaxEdgesWeightSum)
{
// minWeightCalc is our minWeight when every other path to bDst takes it's flEdgeWeightMax value
- BasicBlock::weight_t minWeightCalc = (BasicBlock::weight_t)(bDstWeight - otherMaxEdgesWeightSum);
+ BasicBlock::weight_t minWeightCalc =
+ (BasicBlock::weight_t)(bDstWeight - otherMaxEdgesWeightSum);
if (minWeightCalc > edge->flEdgeWeightMin)
{
assignOK &= edge->setEdgeWeightMinChecked(minWeightCalc, slop, &usedSlop);
@@ -12920,7 +12991,8 @@ void Compiler::fgComputeEdgeWeights()
if (bDstWeight >= otherMinEdgesWeightSum)
{
// maxWeightCalc is our maxWeight when every other path to bDst takes it's flEdgeWeightMin value
- BasicBlock::weight_t maxWeightCalc = (BasicBlock::weight_t)(bDstWeight - otherMinEdgesWeightSum);
+ BasicBlock::weight_t maxWeightCalc =
+ (BasicBlock::weight_t)(bDstWeight - otherMinEdgesWeightSum);
if (maxWeightCalc < edge->flEdgeWeightMax)
{
assignOK &= edge->setEdgeWeightMaxChecked(maxWeightCalc, slop, &usedSlop);
@@ -12951,7 +13023,6 @@ void Compiler::fgComputeEdgeWeights()
}
}
}
-
}
if (inconsistentProfileData)
@@ -12971,7 +13042,7 @@ void Compiler::fgComputeEdgeWeights()
EARLY_EXIT:;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
if (inconsistentProfileData)
{
@@ -12981,13 +13052,15 @@ EARLY_EXIT:;
{
if (hasIncompleteEdgeWeights)
{
- printf("fgComputeEdgeWeights() was able to compute exact edge weights for %3d of the %3d edges, using %d passes.\n",
- goodEdgeCountCurrent, numEdges, iterations);
+ printf("fgComputeEdgeWeights() was able to compute exact edge weights for %3d of the %3d edges, using "
+ "%d passes.\n",
+ goodEdgeCountCurrent, numEdges, iterations);
}
else
{
- printf("fgComputeEdgeWeights() was able to compute exact edge weights for all of the %3d edges, using %d passes.\n",
- numEdges, iterations);
+ printf("fgComputeEdgeWeights() was able to compute exact edge weights for all of the %3d edges, using "
+ "%d passes.\n",
+ numEdges, iterations);
}
fgPrintEdgeWeights();
@@ -12995,16 +13068,16 @@ EARLY_EXIT:;
}
#endif // DEBUG
- fgSlopUsedInEdgeWeights = usedSlop;
+ fgSlopUsedInEdgeWeights = usedSlop;
fgRangeUsedInEdgeWeights = false;
// See if any edge weight are expressed in [min..max] form
- for (bDst = fgFirstBB; bDst != NULL; bDst = bDst->bbNext)
+ for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- if (bDst->bbPreds != NULL)
+ if (bDst->bbPreds != nullptr)
{
- for (edge = bDst->bbPreds; edge != NULL; edge = edge->flNext)
+ for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
{
bSrc = edge->flBlock;
// This is the control flow edge (bSrc -> bDst)
@@ -13022,15 +13095,15 @@ EARLY_EXIT:;
}
}
- fgHaveValidEdgeWeights = !inconsistentProfileData;
- fgEdgeWeightsComputed = true;
+ fgHaveValidEdgeWeights = !inconsistentProfileData;
+ fgEdgeWeightsComputed = true;
}
// fgOptimizeBranchToEmptyUnconditional:
// optimize a jump to an empty block which ends in an unconditional branch.
// Args:
// block: source block
-// bDest: destination
+// bDest: destination
// Returns: true if we changed the code
//
bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest)
@@ -13080,16 +13153,16 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed)
{
fgNeedsUpdateFlowGraph = true;
- optimizeJump = false;
+ optimizeJump = false;
}
if (optimizeJump)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nOptimizing a jump to an unconditional jump (BB%02u -> BB%02u -> BB%02u)\n",
- block->bbNum, bDest->bbNum, bDest->bbJumpDest->bbNum);
+ printf("\nOptimizing a jump to an unconditional jump (BB%02u -> BB%02u -> BB%02u)\n", block->bbNum,
+ bDest->bbNum, bDest->bbJumpDest->bbNum);
}
#endif // DEBUG
@@ -13100,7 +13173,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
if (fgHaveValidEdgeWeights && ((bDest->bbFlags & BBF_PROF_WEIGHT) != 0))
{
flowList* edge1 = fgGetPredForBlock(bDest, block);
- noway_assert(edge1 != NULL);
+ noway_assert(edge1 != nullptr);
BasicBlock::weight_t edgeWeight;
@@ -13113,7 +13186,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
//
// Clear the profile weight flag
//
- bDest->bbFlags &= ~BBF_PROF_WEIGHT;
+ bDest->bbFlags &= ~BBF_PROF_WEIGHT;
}
else
{
@@ -13133,12 +13206,12 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc
else
{
bDest->bbWeight = BB_ZERO_WEIGHT;
- bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag
+ bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag
}
flowList* edge2 = fgGetPredForBlock(bDest->bbJumpDest, bDest);
- if (edge2 != NULL)
+ if (edge2 != nullptr)
{
//
// Update the edge2 min/max weights
@@ -13188,163 +13261,169 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block)
switch (block->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_SWITCH:
- case BBJ_THROW:
-
- /* can never happen */
- noway_assert(!"Conditional, switch, or throw block with empty body!");
- break;
-
- case BBJ_CALLFINALLY:
- case BBJ_RETURN:
- case BBJ_EHCATCHRET:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
-
- /* leave them as is */
- /* some compilers generate multiple returns and put all of them at the end -
- * to solve that we need the predecessor list */
-
- break;
+ case BBJ_COND:
+ case BBJ_SWITCH:
+ case BBJ_THROW:
- case BBJ_ALWAYS:
+ /* can never happen */
+ noway_assert(!"Conditional, switch, or throw block with empty body!");
+ break;
- // A GOTO cannot be to the next block since that
- // should have been fixed by the optimization above
- // An exception is made for a jump from Hot to Cold
- noway_assert(block->bbJumpDest != block->bbNext ||
- ((bPrev != nullptr) && bPrev->isBBCallAlwaysPair()) ||
- fgInDifferentRegions(block, block->bbNext));
+ case BBJ_CALLFINALLY:
+ case BBJ_RETURN:
+ case BBJ_EHCATCHRET:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
- /* Cannot remove the first BB */
- if (!bPrev)
- break;
+ /* leave them as is */
+ /* some compilers generate multiple returns and put all of them at the end -
+ * to solve that we need the predecessor list */
- /* Do not remove a block that jumps to itself - used for while (true){} */
- if (block->bbJumpDest == block)
break;
- /* Empty GOTO can be removed iff bPrev is BBJ_NONE */
- if (bPrev->bbJumpKind != BBJ_NONE)
- break;
+ case BBJ_ALWAYS:
- // can't allow fall through into cold code
- if (block->bbNext == fgFirstColdBlock)
- break;
+ // A GOTO cannot be to the next block since that
+ // should have been fixed by the optimization above
+ // An exception is made for a jump from Hot to Cold
+ noway_assert(block->bbJumpDest != block->bbNext || ((bPrev != nullptr) && bPrev->isBBCallAlwaysPair()) ||
+ fgInDifferentRegions(block, block->bbNext));
- /* Can fall through since this is similar with removing
- * a BBJ_NONE block, only the successor is different */
+ /* Cannot remove the first BB */
+ if (!bPrev)
+ {
+ break;
+ }
- __fallthrough;
+ /* Do not remove a block that jumps to itself - used for while (true){} */
+ if (block->bbJumpDest == block)
+ {
+ break;
+ }
- case BBJ_NONE:
+ /* Empty GOTO can be removed iff bPrev is BBJ_NONE */
+ if (bPrev->bbJumpKind != BBJ_NONE)
+ {
+ break;
+ }
- /* special case if this is the first BB */
- if (!bPrev)
- {
- assert (block == fgFirstBB);
- }
- else
- {
- /* If this block follows a BBJ_CALLFINALLY do not remove it
- * (because we don't know who may jump to it) */
- if (bPrev->bbJumpKind == BBJ_CALLFINALLY)
+ // can't allow fall through into cold code
+ if (block->bbNext == fgFirstColdBlock)
+ {
break;
- }
+ }
-#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- /* Don't remove finally targets */
- if (block->bbFlags & BBF_FINALLY_TARGET)
- break;
-#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
+ /* Can fall through since this is similar with removing
+ * a BBJ_NONE block, only the successor is different */
-#if FEATURE_EH_FUNCLETS
- /* Don't remove an empty block that is in a different EH region
- * from its successor block, if the block is the target of a
- * catch return. It is required that the return address of a
- * catch be in the correct EH region, for re-raise of thread
- * abort exceptions to work. Insert a NOP in the empty block
- * to ensure we generate code for the block, if we keep it.
- */
- {
- BasicBlock* succBlock;
+ __fallthrough;
- if (block->bbJumpKind == BBJ_ALWAYS)
+ case BBJ_NONE:
+
+ /* special case if this is the first BB */
+ if (!bPrev)
{
- succBlock = block->bbJumpDest;
+ assert(block == fgFirstBB);
}
else
{
- succBlock = block->bbNext;
+ /* If this block follows a BBJ_CALLFINALLY do not remove it
+ * (because we don't know who may jump to it) */
+ if (bPrev->bbJumpKind == BBJ_CALLFINALLY)
+ {
+ break;
+ }
}
+#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
+ /* Don't remove finally targets */
+ if (block->bbFlags & BBF_FINALLY_TARGET)
+ break;
+#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- if ((succBlock != NULL) &&
- !BasicBlock::sameEHRegion(block, succBlock))
+#if FEATURE_EH_FUNCLETS
+ /* Don't remove an empty block that is in a different EH region
+ * from its successor block, if the block is the target of a
+ * catch return. It is required that the return address of a
+ * catch be in the correct EH region, for re-raise of thread
+ * abort exceptions to work. Insert a NOP in the empty block
+ * to ensure we generate code for the block, if we keep it.
+ */
{
- // The empty block and the block that follows it are in different
- // EH regions. Is this a case where they can't be merged?
+ BasicBlock* succBlock;
- bool okToMerge = true; // assume it's ok
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ if (block->bbJumpKind == BBJ_ALWAYS)
{
- if (pred->flBlock->bbJumpKind == BBJ_EHCATCHRET)
- {
- assert(pred->flBlock->bbJumpDest == block);
- okToMerge = false; // we can't get rid of the empty block
- break;
- }
+ succBlock = block->bbJumpDest;
+ }
+ else
+ {
+ succBlock = block->bbNext;
}
- if (!okToMerge)
+ if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock))
{
- // Insert a NOP in the empty block to ensure we generate code
- // for the catchret target in the right EH region.
- GenTreePtr nopStmt =
- fgInsertStmtAtEnd(block, new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID));
- fgSetStmtSeq(nopStmt);
- gtSetStmtInfo(nopStmt);
+ // The empty block and the block that follows it are in different
+ // EH regions. Is this a case where they can't be merged?
-#ifdef DEBUG
- if (verbose)
+ bool okToMerge = true; // assume it's ok
+ for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
{
- printf("\nKeeping empty block BB%02u - it is the target of a catch return\n",
- block->bbNum);
+ if (pred->flBlock->bbJumpKind == BBJ_EHCATCHRET)
+ {
+ assert(pred->flBlock->bbJumpDest == block);
+ okToMerge = false; // we can't get rid of the empty block
+ break;
+ }
}
+
+ if (!okToMerge)
+ {
+ // Insert a NOP in the empty block to ensure we generate code
+ // for the catchret target in the right EH region.
+ GenTreePtr nopStmt = fgInsertStmtAtEnd(block, new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID));
+ fgSetStmtSeq(nopStmt);
+ gtSetStmtInfo(nopStmt);
+
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nKeeping empty block BB%02u - it is the target of a catch return\n", block->bbNum);
+ }
#endif // DEBUG
- break; // go to the next block
+ break; // go to the next block
+ }
}
}
- }
#endif // FEATURE_EH_FUNCLETS
- if (!ehCanDeleteEmptyBlock(block))
- {
- // We're not allowed to remove this block due to reasons related to the EH table.
- break;
- }
-
- /* special case if this is the last BB */
- if (block == fgLastBB)
- {
- if (!bPrev)
+ if (!ehCanDeleteEmptyBlock(block))
+ {
+ // We're not allowed to remove this block due to reasons related to the EH table.
break;
- fgLastBB = bPrev;
- }
+ }
- /* Remove the block */
- compCurBB = block;
- fgRemoveBlock(block, false);
- return true;
+ /* special case if this is the last BB */
+ if (block == fgLastBB)
+ {
+ if (!bPrev)
+ {
+ break;
+ }
+ fgLastBB = bPrev;
+ }
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ /* Remove the block */
+ compCurBB = block;
+ fgRemoveBlock(block, false);
+ return true;
+
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
return false;
-
}
// fgOptimizeSwitchBranches:
@@ -13359,11 +13438,11 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
{
assert(block->bbJumpKind == BBJ_SWITCH);
- unsigned jmpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab;
- BasicBlock* bNewDest; // the new jump target for the current switch case
- BasicBlock* bDest;
- bool returnvalue = false;
+ unsigned jmpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab;
+ BasicBlock* bNewDest; // the new jump target for the current switch case
+ BasicBlock* bDest;
+ bool returnvalue = false;
do
{
@@ -13372,9 +13451,8 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
bNewDest = bDest;
// Do we have a JUMP to an empty unconditional JUMP block?
- if (bDest->isEmpty() &&
- (bDest->bbJumpKind == BBJ_ALWAYS) &&
- (bDest != bDest->bbJumpDest)) // special case for self jumps
+ if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) &&
+ (bDest != bDest->bbJumpDest)) // special case for self jumps
{
bool optimizeJump = true;
@@ -13393,16 +13471,17 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed)
{
fgNeedsUpdateFlowGraph = true;
- optimizeJump = false;
+ optimizeJump = false;
}
if (optimizeJump)
{
bNewDest = bDest->bbJumpDest;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nOptimizing a switch jump to an empty block with an unconditional jump (BB%02u -> BB%02u -> BB%02u)\n",
+ printf("\nOptimizing a switch jump to an empty block with an unconditional jump (BB%02u -> BB%02u "
+ "-> BB%02u)\n",
block->bbNum, bDest->bbNum, bNewDest->bbNum);
}
#endif // DEBUG
@@ -13419,10 +13498,10 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
{
if (fgHaveValidEdgeWeights)
{
- flowList* edge = fgGetPredForBlock(bDest, block);
+ flowList* edge = fgGetPredForBlock(bDest, block);
BasicBlock::weight_t branchThroughWeight = edge->flEdgeWeightMin;
- if (bDest->bbWeight > branchThroughWeight)
+ if (bDest->bbWeight > branchThroughWeight)
{
bDest->bbWeight -= branchThroughWeight;
}
@@ -13447,8 +13526,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
goto REPEAT_SWITCH;
}
- }
- while (++jmpTab, --jmpCnt);
+ } while (++jmpTab, --jmpCnt);
GenTreeStmt* switchStmt = block->lastTopLevelStmt();
GenTreePtr switchTree = switchStmt->gtStmtExpr;
@@ -13458,8 +13536,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
if (switchTree->gtOper == GT_SWITCH_TABLE)
{
noway_assert(fgOrder == FGOrderLinear);
- assert(switchStmt->AsStmt()->gtStmtIsTopLevel() &&
- (switchStmt->gtNext == nullptr));
+ assert(switchStmt->AsStmt()->gtStmtIsTopLevel() && (switchStmt->gtNext == nullptr));
}
else
{
@@ -13467,7 +13544,6 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
}
noway_assert(switchTree->gtType == TYP_VOID);
-
// At this point all of the case jump targets have been updated such
// that none of them go to block that is an empty unconditional block
//
@@ -13481,10 +13557,9 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
BasicBlock* uniqueSucc = jmpTab[0];
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nRemoving a switch jump with a single target (BB%02u)\n",
- block->bbNum);
+ printf("\nRemoving a switch jump with a single target (BB%02u)\n", block->bbNum);
printf("BEFORE:\n");
}
#endif // DEBUG
@@ -13494,21 +13569,25 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
if (switchTree->gtFlags & GTF_SIDE_EFFECT)
{
/* Extract the side effects from the conditional */
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
gtExtractSideEffList(switchTree, &sideEffList);
- if (sideEffList == NULL)
+ if (sideEffList == nullptr)
+ {
goto NO_SWITCH_SIDE_EFFECT;
+ }
noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nSwitch expression has side effects! Extracting side effects...\n");
- gtDispTree(switchTree); printf("\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(switchTree);
+ printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif // DEBUG
@@ -13547,14 +13626,13 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
{
for (unsigned i = 1; i < jmpCnt; ++i)
{
- (void) fgRemoveRefPred(jmpTab[i], block);
+ (void)fgRemoveRefPred(jmpTab[i], block);
}
}
return true;
}
- else if (block->bbJumpSwt->bbsCount == 2 &&
- block->bbJumpSwt->bbsDstTab[1] == block->bbNext)
+ else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext)
{
/* Use a BBJ_COND(switchVal==0) for a switch with only one
significant clause besides the default clause, if the
@@ -13571,19 +13649,18 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nConverting a switch (BB%02u) with only one significant clause besides a default target to a conditional branch\n",
+ printf("\nConverting a switch (BB%02u) with only one significant clause besides a default target to a "
+ "conditional branch\n",
block->bbNum);
}
#endif // DEBUG
switchTree->ChangeOper(GT_JTRUE);
- GenTree* zeroConstNode = gtNewZeroConNode(genActualType(switchVal->TypeGet()));
- GenTree* condNode = gtNewOperNode(GT_EQ, TYP_INT,
- switchVal,
- zeroConstNode);
- switchTree->gtOp.gtOp1 = condNode;
+ GenTree* zeroConstNode = gtNewZeroConNode(genActualType(switchVal->TypeGet()));
+ GenTree* condNode = gtNewOperNode(GT_EQ, TYP_INT, switchVal, zeroConstNode);
+ switchTree->gtOp.gtOp1 = condNode;
switchTree->gtOp.gtOp1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// Re-link the nodes for this statement.
@@ -13604,32 +13681,36 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block)
// for tail-duplicating its successor (such as assigning a constant to a local).
// Args:
// block: BasicBlock we are considering duplicating the successor of
-// Returns:
+// Returns:
// true if it seems like a good idea
//
-bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock *block)
+bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block)
{
if (block->isRunRarely())
+ {
return false;
+ }
if (!block->lastTopLevelStmt())
+ {
return false;
+ }
else
{
// Tail duplication tends to pay off when the last statement
// is an assignment of a constant, arraylength, or a relop.
// This is because these statements produce information about values
- // that would otherwise be lost at the upcoming merge point.
+ // that would otherwise be lost at the upcoming merge point.
GenTreeStmt* lastStmt = block->lastTopLevelStmt();
- GenTree* tree = lastStmt->gtStmtExpr;
+ GenTree* tree = lastStmt->gtStmtExpr;
if (tree->gtOper != GT_ASG)
+ {
return false;
+ }
GenTree* op2 = tree->gtOp.gtOp2;
- if (op2->gtOper != GT_ARR_LENGTH
- && !op2->OperIsConst()
- && ((op2->OperKind() & GTK_RELOP) == 0))
+ if (op2->gtOper != GT_ARR_LENGTH && !op2->OperIsConst() && ((op2->OperKind() & GTK_RELOP) == 0))
{
return false;
}
@@ -13637,55 +13718,70 @@ bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock *block)
return true;
}
-
// fgBlockIsGoodTailDuplicationCandidate:
-// Heuristic function that examines a block (presumably one that is a merge point) to determine
+// Heuristic function that examines a block (presumably one that is a merge point) to determine
// if it should be duplicated.
// args:
// target - the tail block (candidate for duplication)
// returns:
// true if this block seems like a good candidate for duplication
//
-bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock *target)
+bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target)
{
GenTreeStmt* stmt = target->FirstNonPhiDef();
// Here we are looking for blocks with a single statement feeding a conditional branch.
- // These blocks are small, and when duplicated onto the tail of blocks that end in
+ // These blocks are small, and when duplicated onto the tail of blocks that end in
// assignments, there is a high probability of the branch completely going away.
// This is by no means the only kind of tail that it is beneficial to duplicate,
// just the only one we recognize for now.
-
+
if (stmt != target->lastStmt())
+ {
return false;
+ }
if (target->bbJumpKind != BBJ_COND)
+ {
return false;
+ }
GenTree* tree = stmt->gtStmtExpr;
if (tree->gtOper != GT_JTRUE)
+ {
return false;
+ }
// must be some kind of relational operator
GenTree* cond = tree->gtOp.gtOp1;
if (!(cond->OperKind() & GTK_RELOP))
+ {
return false;
+ }
// op1 must be some combinations of casts of local or constant
GenTree* op1 = cond->gtOp.gtOp1;
while (op1->gtOper == GT_CAST)
+ {
op1 = op1->gtOp.gtOp1;
+ }
if (!op1->IsLocal() && !op1->OperIsConst())
+ {
return false;
+ }
// op2 must be some combinations of casts of local or constant
GenTree* op2 = cond->gtOp.gtOp2;
while (op2->gtOper == GT_CAST)
+ {
op2 = op2->gtOp.gtOp1;
+ }
if (!op2->IsLocal() && !op2->OperIsConst())
+ {
return false;
+ }
return true;
}
@@ -13697,7 +13793,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock *target)
// Args:
// block - block with uncond branch
// target - block which is target of first block
-//
+//
// returns: true if changes were made
bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target)
@@ -13707,21 +13803,27 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
// TODO-Review: OK if they are in the same region?
if (compHndBBtabCount > 0)
+ {
return false;
+ }
if (!fgBlockIsGoodTailDuplicationCandidate(target))
+ {
return false;
+ }
if (!fgBlockEndFavorsTailDuplication(block))
+ {
return false;
+ }
GenTreeStmt* stmt = target->FirstNonPhiDef();
// Duplicate the target block at the end of this block
-
+
GenTree* cloned = gtCloneExpr(stmt->gtStmtExpr);
noway_assert(cloned);
- GenTree *jmpStmt = gtNewStmt(cloned);
+ GenTree* jmpStmt = gtNewStmt(cloned);
block->bbJumpKind = BBJ_COND;
block->bbJumpDest = target->bbJumpDest;
@@ -13730,21 +13832,21 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
// add an unconditional block after this block to jump to the target block's fallthrough block
- BasicBlock *next = fgNewBBafter(BBJ_ALWAYS, block, true);
- next->bbFlags = block->bbFlags | BBF_INTERNAL;
+ BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true);
+ next->bbFlags = block->bbFlags | BBF_INTERNAL;
next->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_HAS_LABEL | BBF_JMP_TARGET |
- BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS);
-
+ BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS);
+
next->bbJumpDest = target->bbNext;
target->bbNext->bbFlags |= BBF_JMP_TARGET;
- fgAddRefPred(next, block);
+ fgAddRefPred(next, block);
fgAddRefPred(next->bbJumpDest, next);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("fgOptimizeUncondBranchToSimpleCond(from BB%02u to cond BB%02u), created new uncond BB%02u\n",
- block->bbNum, target->bbNum, next->bbNum);
+ printf("fgOptimizeUncondBranchToSimpleCond(from BB%02u to cond BB%02u), created new uncond BB%02u\n",
+ block->bbNum, target->bbNum, next->bbNum);
}
#endif // DEBUG
@@ -13758,7 +13860,6 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
return true;
}
-
// fgOptimizeBranchToNext:
// Optimize a block which has a branch to the following block
// Args:
@@ -13784,15 +13885,16 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
if (!(block->bbFlags & BBF_KEEP_BBJ_ALWAYS))
{
// We can't remove if the BBJ_ALWAYS is part of a BBJ_CALLFINALLY pair
- if ( (bPrev == nullptr) || !bPrev->isBBCallAlwaysPair() )
+ if ((bPrev == nullptr) || !bPrev->isBBCallAlwaysPair())
{
/* the unconditional jump is to the next BB */
block->bbJumpKind = BBJ_NONE;
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nRemoving unconditional jump to next block (BB%02u -> BB%02u) (converted BB%02u to fall-through)\n",
+ printf("\nRemoving unconditional jump to next block (BB%02u -> BB%02u) (converted BB%02u to "
+ "fall-through)\n",
block->bbNum, bNext->bbNum, block->bbNum);
}
#endif // DEBUG
@@ -13811,10 +13913,9 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
noway_assert(cond->gtStmtExpr->gtOper == GT_JTRUE);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nRemoving conditional jump to next block (BB%02u -> BB%02u)\n",
- block->bbNum, bNext->bbNum);
+ printf("\nRemoving conditional jump to next block (BB%02u -> BB%02u)\n", block->bbNum, bNext->bbNum);
}
#endif // DEBUG
@@ -13835,11 +13936,11 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
else
{
/* Extract the side effects from the conditional */
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
gtExtractSideEffList(cond->gtStmtExpr, &sideEffList);
- if (sideEffList == NULL)
+ if (sideEffList == nullptr)
{
compCurBB = block;
fgRemoveStmt(block, cond);
@@ -13851,8 +13952,10 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
if (verbose)
{
printf("\nConditional has side effects! Extracting side effects...\n");
- gtDispTree(cond); printf("\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(cond);
+ printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif // DEBUG
@@ -13874,14 +13977,14 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
/* Re-link the nodes for this statement */
fgSetStmtSeq(cond);
}
- }
+ }
}
}
else
{
- compCurBB = block;
- /* conditional has NO side effect - remove it */
- fgRemoveStmt(block, cond);
+ compCurBB = block;
+ /* conditional has NO side effect - remove it */
+ fgRemoveStmt(block, cond);
}
/* Conditional is gone - simply fall into the next block */
@@ -13916,42 +14019,58 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi
*
*/
-bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
+bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
{
if (opts.MinOpts())
+ {
return false;
+ }
if (bJump->bbJumpKind != BBJ_ALWAYS)
+ {
return false;
+ }
if (bJump->bbFlags & BBF_KEEP_BBJ_ALWAYS)
+ {
return false;
+ }
// Don't hoist a conditional branch into the scratch block; we'd prefer it stay
// either BBJ_NONE or BBJ_ALWAYS.
if (fgBBisScratch(bJump))
+ {
return false;
+ }
BasicBlock* bDest = bJump->bbJumpDest;
if (bDest->bbJumpKind != BBJ_COND)
+ {
return false;
+ }
if (bDest->bbJumpDest != bJump->bbNext)
+ {
return false;
+ }
// 'bJump' must be in the same try region as the condition, since we're going to insert
// a duplicated condition in 'bJump', and the condition might include exception throwing code.
if (!BasicBlock::sameTryRegion(bJump, bDest))
+ {
return false;
+ }
// do not jump into another try region
BasicBlock* bDestNext = bDest->bbNext;
if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext))
+ {
return false;
+ }
GenTreeStmt* stmt;
- unsigned estDupCostSz = 0;
+ unsigned estDupCostSz = 0;
for (stmt = bDest->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
GenTreePtr expr = stmt->gtStmtExpr;
@@ -13962,13 +14081,13 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
estDupCostSz += expr->gtCostSz;
}
- bool allProfileWeightsAreValid = false;
- BasicBlock::weight_t weightJump = bJump->bbWeight;
- BasicBlock::weight_t weightDest = bDest->bbWeight;
- BasicBlock::weight_t weightNext = bJump->bbNext->bbWeight;
- bool rareJump = bJump->isRunRarely();
- bool rareDest = bDest->isRunRarely();
- bool rareNext = bJump->bbNext->isRunRarely();
+ bool allProfileWeightsAreValid = false;
+ BasicBlock::weight_t weightJump = bJump->bbWeight;
+ BasicBlock::weight_t weightDest = bDest->bbWeight;
+ BasicBlock::weight_t weightNext = bJump->bbNext->bbWeight;
+ bool rareJump = bJump->isRunRarely();
+ bool rareDest = bDest->isRunRarely();
+ bool rareNext = bJump->bbNext->isRunRarely();
// If we have profile data then we calculate the number of time
// the loop will iterate into loopIterations
@@ -13977,8 +14096,8 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
// Only rely upon the profile weight when all three of these blocks
// have either good profile weights or are rarelyRun
//
- if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
- (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
+ if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
+ (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) &&
(bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)))
{
allProfileWeightsAreValid = true;
@@ -13993,8 +14112,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
rareNext = true;
}
- if (((weightDest * 100) < weightJump) &&
- ((weightDest * 100) < weightNext))
+ if (((weightDest * 100) < weightJump) && ((weightDest * 100) < weightNext))
{
rareDest = true;
}
@@ -14031,24 +14149,19 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
}
}
-
// If the compare has too high cost then we don't want to dup
bool costIsTooHigh = (estDupCostSz > maxDupCostSz);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nDuplication of the conditional block BB%02u (always branch from BB%02u) %s, because the cost of duplication (%i) is %s than %i,"
+ printf("\nDuplication of the conditional block BB%02u (always branch from BB%02u) %s, because the cost of "
+ "duplication (%i) is %s than %i,"
" validProfileWeights = %s\n",
- bDest->bbNum,
- bJump->bbNum,
- costIsTooHigh ? "not done" : "performed",
- estDupCostSz,
- costIsTooHigh ? "greater" : "less or equal",
- maxDupCostSz,
- allProfileWeightsAreValid ? "true" : "false");
- }
+ bDest->bbNum, bJump->bbNum, costIsTooHigh ? "not done" : "performed", estDupCostSz,
+ costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, allProfileWeightsAreValid ? "true" : "false");
+ }
#endif // DEBUG
if (costIsTooHigh)
@@ -14058,9 +14171,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
/* Looks good - duplicate the conditional block */
- GenTree* newStmtList = NULL; // new stmt list to be added to bJump
- GenTree* newStmtLast = NULL;
- bool cloneExprFailed = false;
+ GenTree* newStmtList = nullptr; // new stmt list to be added to bJump
+ GenTree* newStmtLast = nullptr;
+ bool cloneExprFailed = false;
/* Visit all the statements in bDest */
@@ -14072,7 +14185,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
// cloneExpr doesn't handle everything
- if (stmt == NULL)
+ if (stmt == nullptr)
{
cloneExprFailed = true;
break;
@@ -14080,13 +14193,13 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
/* Append the expression to our list */
- if (newStmtList != NULL)
+ if (newStmtList != nullptr)
{
newStmtLast->gtNext = stmt;
}
else
{
- newStmtList = stmt;
+ newStmtList = stmt;
}
stmt->gtPrev = newStmtLast;
@@ -14094,14 +14207,18 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
}
if (cloneExprFailed)
+ {
return false;
+ }
- noway_assert(newStmtLast != NULL);
- noway_assert(stmt != NULL);
+ noway_assert(newStmtLast != nullptr);
+ noway_assert(stmt != nullptr);
noway_assert(stmt->gtOper == GT_STMT);
- if ((newStmtLast == NULL) || (stmt == NULL) || (stmt->gtOper != GT_STMT))
+ if ((newStmtLast == nullptr) || (stmt == nullptr) || (stmt->gtOper != GT_STMT))
+ {
return false;
+ }
/* Get to the condition node from the statement tree */
@@ -14109,7 +14226,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
noway_assert(condTree->gtOper == GT_JTRUE);
if (condTree->gtOper != GT_JTRUE)
+ {
return false;
+ }
//
// Set condTree to the operand to the GT_JTRUE
@@ -14120,7 +14239,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
// This condTree has to be a RelOp comparison
//
if (condTree->OperIsCompare() == false)
+ {
return false;
+ }
// Bump up the ref-counts of any variables in 'stmt'
fgUpdateRefCntForClone(bJump, stmt->gtStmtExpr);
@@ -14128,7 +14249,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
//
// Find the last statement in the bJump block
//
- GenTreeStmt* lastStmt = NULL;
+ GenTreeStmt* lastStmt = nullptr;
for (stmt = bJump->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
lastStmt = stmt;
@@ -14136,9 +14257,9 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
stmt = bJump->firstStmt();
/* Join the two linked lists */
- newStmtLast->gtNext = NULL;
+ newStmtLast->gtNext = nullptr;
- if (lastStmt != NULL)
+ if (lastStmt != nullptr)
{
stmt->gtPrev = newStmtLast;
lastStmt->gtNext = newStmtList;
@@ -14146,8 +14267,8 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
}
else
{
- bJump->bbTreeList = newStmtList;
- newStmtList->gtPrev = newStmtLast;
+ bJump->bbTreeList = newStmtList;
+ newStmtList->gtPrev = newStmtLast;
}
//
@@ -14159,7 +14280,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
bJump->bbJumpDest = bDest->bbNext;
/* Mark the jump dest block as being a jump target */
- bJump->bbJumpDest->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ bJump->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
/* Update bbRefs and bbPreds */
@@ -14190,16 +14311,16 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
}
else
{
- BasicBlock::weight_t newWeightDest = 0;
+ BasicBlock::weight_t newWeightDest = 0;
BasicBlock::weight_t unloopWeightDest = 0;
if (weightDest > weightJump)
{
newWeightDest = (weightDest - weightJump);
}
- if (weightDest >= (BB_LOOP_WEIGHT*BB_UNITY_WEIGHT)/2)
+ if (weightDest >= (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT) / 2)
{
- newWeightDest = (weightDest*2) / (BB_LOOP_WEIGHT*BB_UNITY_WEIGHT);
+ newWeightDest = (weightDest * 2) / (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT);
}
if ((newWeightDest > 0) || (unloopWeightDest > 0))
{
@@ -14225,7 +14346,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
* Function called to optimize switch statements
*/
-bool Compiler::fgOptimizeSwitchJumps()
+bool Compiler::fgOptimizeSwitchJumps()
{
bool result = false; // Our return value
@@ -14265,7 +14386,7 @@ bool Compiler::fgOptimizeSwitchJumps()
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
*
@@ -14275,7 +14396,7 @@ bool Compiler::fgOptimizeSwitchJumps()
* all conditional jumps that would benefit.
*/
-void Compiler::fgReorderBlocks()
+void Compiler::fgReorderBlocks()
{
noway_assert(opts.compDbgCode == false);
@@ -14284,7 +14405,7 @@ void Compiler::fgReorderBlocks()
#endif // FEATURE_EH_FUNCLETS
// We can't relocate anything if we only have one block
- if (fgFirstBB->bbNext == NULL)
+ if (fgFirstBB->bbNext == nullptr)
{
return;
}
@@ -14297,7 +14418,7 @@ void Compiler::fgReorderBlocks()
newRarelyRun |= fgExpandRarelyRunBlocks();
#if !FEATURE_EH_FUNCLETS
- movedBlocks |= fgRelocateEHRegions();
+ movedBlocks |= fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
//
@@ -14317,7 +14438,7 @@ void Compiler::fgReorderBlocks()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In fgReorderBlocks()\n");
@@ -14327,16 +14448,14 @@ void Compiler::fgReorderBlocks()
}
#endif // DEBUG
- BasicBlock* bNext;
- BasicBlock* bPrev;
- BasicBlock* block;
- unsigned XTnum;
- EHblkDsc* HBtab;
+ BasicBlock* bNext;
+ BasicBlock* bPrev;
+ BasicBlock* block;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
// Iterate over every block, remembering our previous block in bPrev
- for (bPrev = fgFirstBB, block = bPrev->bbNext;
- block != NULL;
- bPrev = block, block = block->bbNext)
+ for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext)
{
//
// Consider relocating the rarely run blocks such that they are at the end of the method.
@@ -14345,23 +14464,27 @@ void Compiler::fgReorderBlocks()
// If block is marked with a BBF_KEEP_BBJ_ALWAYS flag then we don't move the block
if ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)
+ {
continue;
+ }
// Finally and handlers blocks are to be kept contiguous.
// TODO-CQ: Allow reordering within the handler region
if (block->hasHndIndex() == true)
+ {
continue;
+ }
- bool reorderBlock = true; // This is set to false if we decide not to reorder 'block'
- bool isRare = block->isRunRarely();
- BasicBlock* bDest = NULL;
- bool forwardBranch = false;
- bool backwardBranch = false;
+ bool reorderBlock = true; // This is set to false if we decide not to reorder 'block'
+ bool isRare = block->isRunRarely();
+ BasicBlock* bDest = nullptr;
+ bool forwardBranch = false;
+ bool backwardBranch = false;
// Setup bDest
if ((bPrev->bbJumpKind == BBJ_COND) || (bPrev->bbJumpKind == BBJ_ALWAYS))
{
- bDest = bPrev->bbJumpDest;
+ bDest = bPrev->bbJumpDest;
forwardBranch = fgIsForwardBranch(bPrev);
backwardBranch = !forwardBranch;
}
@@ -14378,9 +14501,8 @@ void Compiler::fgReorderBlocks()
BasicBlock::weight_t profHotWeight = -1;
- if ((bPrev->bbFlags & BBF_PROF_WEIGHT) &&
- (block->bbFlags & BBF_PROF_WEIGHT) &&
- ((bDest == NULL) || (bDest->bbFlags & BBF_PROF_WEIGHT)))
+ if ((bPrev->bbFlags & BBF_PROF_WEIGHT) && (block->bbFlags & BBF_PROF_WEIGHT) &&
+ ((bDest == nullptr) || (bDest->bbFlags & BBF_PROF_WEIGHT)))
{
//
// All blocks have profile information
@@ -14410,11 +14532,11 @@ void Compiler::fgReorderBlocks()
// The edge bPrev -> bDest must have a higher minimum weight
// than every other edge into bDest
//
- flowList* edgeFromPrev = fgGetPredForBlock(bDest, bPrev);
- noway_assert(edgeFromPrev != NULL);
+ flowList* edgeFromPrev = fgGetPredForBlock(bDest, bPrev);
+ noway_assert(edgeFromPrev != nullptr);
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != NULL; edge = edge->flNext)
+ for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
{
if (edge != edgeFromPrev)
{
@@ -14434,7 +14556,7 @@ void Compiler::fgReorderBlocks()
//
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != NULL; edge = edge->flNext)
+ for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
{
BasicBlock* bTemp = edge->flBlock;
@@ -14468,7 +14590,7 @@ void Compiler::fgReorderBlocks()
profHotWeight = block->bbWeight + 1;
}
// But we won't try to connect with bDest
- bDest = NULL;
+ bDest = nullptr;
}
}
}
@@ -14506,18 +14628,20 @@ void Compiler::fgReorderBlocks()
//
flowList* edgeToDest = fgGetPredForBlock(bDest, bPrev);
flowList* edgeToBlock = fgGetPredForBlock(block, bPrev);
- noway_assert(edgeToDest != NULL);
- noway_assert(edgeToBlock != NULL);
+ noway_assert(edgeToDest != nullptr);
+ noway_assert(edgeToBlock != nullptr);
//
// Calculate the taken ratio
// A takenRation of 0.10 means taken 10% of the time, not taken 90% of the time
// A takenRation of 0.50 means taken 50% of the time, not taken 50% of the time
// A takenRation of 0.90 means taken 90% of the time, not taken 10% of the time
//
- double takenCount = ((double) edgeToDest->flEdgeWeightMin + (double) edgeToDest->flEdgeWeightMax) / 2.0;
- double notTakenCount = ((double) edgeToBlock->flEdgeWeightMin + (double) edgeToBlock->flEdgeWeightMax) / 2.0;
- double totalCount = takenCount + notTakenCount;
- double takenRatio = takenCount / totalCount;
+ double takenCount =
+ ((double)edgeToDest->flEdgeWeightMin + (double)edgeToDest->flEdgeWeightMax) / 2.0;
+ double notTakenCount =
+ ((double)edgeToBlock->flEdgeWeightMin + (double)edgeToBlock->flEdgeWeightMax) / 2.0;
+ double totalCount = takenCount + notTakenCount;
+ double takenRatio = takenCount / totalCount;
// If the takenRatio is greater or equal to 51% then we will reverse the branch
if (takenRatio < 0.51)
@@ -14527,7 +14651,7 @@ void Compiler::fgReorderBlocks()
else
{
// set profHotWeight
- profHotWeight = (edgeToBlock->flEdgeWeightMin + edgeToBlock->flEdgeWeightMax)/2 - 1;
+ profHotWeight = (edgeToBlock->flEdgeWeightMin + edgeToBlock->flEdgeWeightMax) / 2 - 1;
}
}
else
@@ -14554,8 +14678,10 @@ void Compiler::fgReorderBlocks()
// Generally both weightDest and weightPrev should calculate
// the same value unless bPrev or bDest are part of a loop
//
- BasicBlock::weight_t weightDest = bDest->isMaxBBWeight() ? bDest->bbWeight : (bDest->bbWeight+1) / 2;
- BasicBlock::weight_t weightPrev = bPrev->isMaxBBWeight() ? bPrev->bbWeight : (bPrev->bbWeight+2) / 3;
+ BasicBlock::weight_t weightDest =
+ bDest->isMaxBBWeight() ? bDest->bbWeight : (bDest->bbWeight + 1) / 2;
+ BasicBlock::weight_t weightPrev =
+ bPrev->isMaxBBWeight() ? bPrev->bbWeight : (bPrev->bbWeight + 2) / 3;
// select the lower of weightDest and weightPrev
profHotWeight = (weightDest < weightPrev) ? weightDest : weightPrev;
@@ -14579,11 +14705,11 @@ void Compiler::fgReorderBlocks()
// and place it here since bPrev does not fall through.
BasicBlock::weight_t highestWeight = 0;
- BasicBlock* candidateBlock = NULL;
+ BasicBlock* candidateBlock = nullptr;
BasicBlock* lastNonFallThroughBlock = bPrev;
BasicBlock* bTmp = bPrev->bbNext;
- while (bTmp != NULL)
+ while (bTmp != nullptr)
{
// Don't try to split a Call/Always pair
//
@@ -14596,8 +14722,10 @@ void Compiler::fgReorderBlocks()
//
// Check for loop exit condition
//
- if (bTmp == NULL)
+ if (bTmp == nullptr)
+ {
break;
+ }
//
// if its weight is the highest one we've seen and
@@ -14609,7 +14737,7 @@ void Compiler::fgReorderBlocks()
// to bTmp (which is a higher weighted block) then it is better to keep out current
// candidateBlock and have it fall into bTmp
//
- if ((candidateBlock == NULL) ||
+ if ((candidateBlock == nullptr) ||
((candidateBlock->bbJumpKind != BBJ_COND) && (candidateBlock->bbJumpKind != BBJ_ALWAYS)) ||
(candidateBlock->bbJumpDest != bTmp))
{
@@ -14635,7 +14763,7 @@ void Compiler::fgReorderBlocks()
}
else
{
- noway_assert(candidateBlock != NULL);
+ noway_assert(candidateBlock != nullptr);
// If the candidateBlock is the same a block then skip this
if (candidateBlock == block)
@@ -14648,7 +14776,7 @@ void Compiler::fgReorderBlocks()
bDest = candidateBlock;
// set profHotWeight
- profHotWeight = highestWeight-1;
+ profHotWeight = highestWeight - 1;
}
}
}
@@ -14656,7 +14784,7 @@ void Compiler::fgReorderBlocks()
else // we don't have good profile info (or we are falling through)
{
-CHECK_FOR_RARE:;
+ CHECK_FOR_RARE:;
/* We only want to reorder when we have a rarely run */
/* block right after a normal block, */
@@ -14670,7 +14798,7 @@ CHECK_FOR_RARE:;
/* If the jump target bDest is also a rarely run block then we don't want to do the reversal */
if (bDest && bDest->isRunRarely())
{
- reorderBlock = false; /* Both block and bDest are rarely run */
+ reorderBlock = false; /* Both block and bDest are rarely run */
}
else
{
@@ -14717,16 +14845,16 @@ CHECK_FOR_RARE:;
// We set bStart to the first block that will be relocated
// and bEnd to the last block that will be relocated
- BasicBlock* bStart = block;
- BasicBlock* bEnd = bStart;
- bNext = bEnd->bbNext;
- bool connected_bDest = false;
+ BasicBlock* bStart = block;
+ BasicBlock* bEnd = bStart;
+ bNext = bEnd->bbNext;
+ bool connected_bDest = false;
if ((backwardBranch && !isRare) ||
- ((block->bbFlags & BBF_DONT_REMOVE) != 0)) // Don't choose option #1 when block is the start of a try region
+ ((block->bbFlags & BBF_DONT_REMOVE) != 0)) // Don't choose option #1 when block is the start of a try region
{
- bStart = NULL;
- bEnd = NULL;
+ bStart = nullptr;
+ bEnd = nullptr;
}
else
{
@@ -14744,13 +14872,17 @@ CHECK_FOR_RARE:;
//
// Check for loop exit condition
//
- if (bNext == NULL)
+ if (bNext == nullptr)
+ {
break;
+ }
#if FEATURE_EH_FUNCLETS
// Check if we've reached the funclets region, at the end of the function
if (fgFirstFuncletBB == bEnd->bbNext)
+ {
break;
+ }
#endif // FEATURE_EH_FUNCLETS
if (bNext == bDest)
@@ -14762,8 +14894,7 @@ CHECK_FOR_RARE:;
// All the blocks must have the same try index
// and must not have the BBF_DONT_REMOVE flag set
- if ( !BasicBlock::sameTryRegion(bStart, bNext) ||
- ((bNext->bbFlags & BBF_DONT_REMOVE) != 0) )
+ if (!BasicBlock::sameTryRegion(bStart, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0))
{
// exit the loop, bEnd is now set to the
// last block that we want to relocate
@@ -14819,13 +14950,12 @@ CHECK_FOR_RARE:;
// If after this calculation bStart2 is NULL we cannot use option #2,
// otherwise bStart2, bEnd2 and bPrev2 are all non-NULL and we will use option #2
- BasicBlock* bStart2 = NULL;
- BasicBlock* bEnd2 = NULL;
- BasicBlock* bPrev2 = NULL;
+ BasicBlock* bStart2 = nullptr;
+ BasicBlock* bEnd2 = nullptr;
+ BasicBlock* bPrev2 = nullptr;
// If option #1 didn't connect bDest and bDest isn't NULL
- if ((connected_bDest == false) &&
- (bDest != NULL) &&
+ if ((connected_bDest == false) && (bDest != nullptr) &&
// The jump target cannot be moved if it has the BBF_DONT_REMOVE flag set
((bDest->bbFlags & BBF_DONT_REMOVE) == 0))
{
@@ -14834,15 +14964,17 @@ CHECK_FOR_RARE:;
// setup bPrev2 to be the lexical pred of bDest
bPrev2 = block;
- while (bPrev2 != NULL)
+ while (bPrev2 != nullptr)
{
if (bPrev2->bbNext == bDest)
+ {
break;
+ }
bPrev2 = bPrev2->bbNext;
}
- if ((bPrev2 != NULL) && fgEhAllowsMoveBlock(bPrev, bDest))
+ if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest))
{
// We have decided that relocating bDest to be after bPrev is best
// Set bStart2 to the first block that will be relocated
@@ -14868,18 +15000,21 @@ CHECK_FOR_RARE:;
// Check for the Loop exit conditions
- if (bNext == NULL)
+ if (bNext == nullptr)
+ {
break;
+ }
if (bEnd2->bbFallsThrough() == false)
+ {
break;
+ }
// If we are relocating rarely run blocks..
// All the blocks must have the same try index,
// and must not have the BBF_DONT_REMOVE flag set
- if ( !BasicBlock::sameTryRegion(bStart2, bNext) ||
- ((bNext->bbFlags & BBF_DONT_REMOVE) != 0) )
+ if (!BasicBlock::sameTryRegion(bStart2, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0))
{
// exit the loop, bEnd2 is now set to the
// last block that we want to relocate
@@ -14916,33 +15051,40 @@ CHECK_FOR_RARE:;
}
// If we are using option #1 then ...
- if (bStart2 == NULL)
+ if (bStart2 == nullptr)
{
// Don't use option #1 for a backwards branch
- if (bStart == NULL)
+ if (bStart == nullptr)
+ {
continue;
+ }
// .... Don't move a set of blocks that are already at the end of the main method
if (bEnd == fgLastBBInMainFunction())
+ {
continue;
+ }
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- if (bDest != NULL)
+ if (bDest != nullptr)
{
if (bPrev->bbJumpKind == BBJ_COND)
{
- printf("Decided to reverse conditional branch at block BB%02u branch to BB%02u ", bPrev->bbNum, bDest->bbNum);
+ printf("Decided to reverse conditional branch at block BB%02u branch to BB%02u ", bPrev->bbNum,
+ bDest->bbNum);
}
else if (bPrev->bbJumpKind == BBJ_ALWAYS)
{
- printf("Decided to straighten unconditional branch at block BB%02u branch to BB%02u ", bPrev->bbNum, bDest->bbNum);
+ printf("Decided to straighten unconditional branch at block BB%02u branch to BB%02u ", bPrev->bbNum,
+ bDest->bbNum);
}
else
{
- printf("Decided to place hot code after BB%02u, placed BB%02u after this block ", bPrev->bbNum, bDest->bbNum);
+ printf("Decided to place hot code after BB%02u, placed BB%02u after this block ", bPrev->bbNum,
+ bDest->bbNum);
}
if (profHotWeight > 0)
@@ -14963,8 +15105,8 @@ CHECK_FOR_RARE:;
}
else
{
- printf("Decided to relocate block(s) after block BB%02u since they are %s block(s)\n",
- bPrev->bbNum, block->isRunRarely() ? "rarely run" : "uncommonly run");
+ printf("Decided to relocate block(s) after block BB%02u since they are %s block(s)\n", bPrev->bbNum,
+ block->isRunRarely() ? "rarely run" : "uncommonly run");
}
}
#endif // DEBUG
@@ -14974,13 +15116,13 @@ CHECK_FOR_RARE:;
BasicBlock* insertAfterBlk;
BasicBlock* bStartPrev;
- if (bStart2 != NULL)
+ if (bStart2 != nullptr)
{
// Option #2: relocating blocks starting at 'bDest' to follow bPrev
// Update bStart and bEnd so that we can use these two for all later operations
- bStart = bStart2;
- bEnd = bEnd2;
+ bStart = bStart2;
+ bEnd = bEnd2;
// Set bStartPrev to be the block that comes before bStart
bStartPrev = bPrev2;
@@ -14996,12 +15138,12 @@ CHECK_FOR_RARE:;
bStartPrev = bPrev;
// We will move [bStart..bEnd] but we will pick the insert location later
- insertAfterBlk = NULL;
+ insertAfterBlk = nullptr;
}
// We are going to move [bStart..bEnd] so they can't be NULL
- noway_assert(bStart != NULL);
- noway_assert(bEnd != NULL);
+ noway_assert(bStart != nullptr);
+ noway_assert(bEnd != nullptr);
// bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call
noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL));
@@ -15013,17 +15155,15 @@ CHECK_FOR_RARE:;
// we need to compute and remember if bStart is in each of
// the try and handler regions
//
- bool* fStartIsInTry = NULL;
- bool* fStartIsInHnd = NULL;
+ bool* fStartIsInTry = nullptr;
+ bool* fStartIsInHnd = nullptr;
if (compHndBBtabCount > 0)
{
fStartIsInTry = new (this, CMK_Unknown) bool[compHndBBtabCount];
fStartIsInHnd = new (this, CMK_Unknown) bool[compHndBBtabCount];
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
fStartIsInTry[XTnum] = HBtab->InTryRegionBBRange(bStart);
fStartIsInHnd[XTnum] = HBtab->InHndRegionBBRange(bStart);
@@ -15033,7 +15173,7 @@ CHECK_FOR_RARE:;
/* Temporarily unlink [bStart..bEnd] from the flow graph */
fgUnlinkRange(bStart, bEnd);
- if (insertAfterBlk == NULL)
+ if (insertAfterBlk == nullptr)
{
// Find new location for the unlinked block(s)
// Set insertAfterBlk to the block which will precede the insertion point
@@ -15052,7 +15192,7 @@ CHECK_FOR_RARE:;
{
BasicBlock* startBlk;
BasicBlock* lastBlk;
- EHblkDsc* ehDsc = ehInitTryBlockRange(bStart, &startBlk, &lastBlk);
+ EHblkDsc* ehDsc = ehInitTryBlockRange(bStart, &startBlk, &lastBlk);
BasicBlock* endBlk;
@@ -15085,7 +15225,9 @@ CHECK_FOR_RARE:;
// startBlk cannot equal endBlk as it must come before endBlk
if (startBlk == endBlk)
+ {
goto CANNOT_MOVE;
+ }
// we also can't start searching the try region at bStart
if (startBlk == bStart)
@@ -15094,8 +15236,10 @@ CHECK_FOR_RARE:;
// or if bEnd->bbNext is in a different try region
// then we cannot move the blocks
//
- if ((bEnd->bbNext == NULL) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext))
+ if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext))
+ {
goto CANNOT_MOVE;
+ }
startBlk = bEnd->bbNext;
@@ -15103,18 +15247,22 @@ CHECK_FOR_RARE:;
// startBlk cannot equal endBlk as it must come before endBlk
if (startBlk == endBlk)
+ {
goto CANNOT_MOVE;
+ }
BasicBlock* tmpBlk = startBlk;
- while ((tmpBlk != endBlk) && (tmpBlk != NULL))
+ while ((tmpBlk != endBlk) && (tmpBlk != nullptr))
{
tmpBlk = tmpBlk->bbNext;
}
// when tmpBlk is NULL that means startBlk is after endBlk
// so there is no way to move bStart..bEnd within the try region
- if (tmpBlk == NULL)
+ if (tmpBlk == nullptr)
+ {
goto CANNOT_MOVE;
+ }
}
}
else
@@ -15134,11 +15282,10 @@ CHECK_FOR_RARE:;
// another [rarely run] block that comes after bPrev (forward branch)
// then we can set up nearBlk to eliminate this jump sometimes
//
- BasicBlock* nearBlk = NULL;
- BasicBlock* jumpBlk = NULL;
+ BasicBlock* nearBlk = nullptr;
+ BasicBlock* jumpBlk = nullptr;
- if ((bEnd->bbJumpKind == BBJ_ALWAYS) &&
- (!isRare || bEnd->bbJumpDest->isRunRarely()) &&
+ if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) &&
fgIsForwardBranch(bEnd, bPrev))
{
// Set nearBlk to be the block in [startBlk..endBlk]
@@ -15155,27 +15302,29 @@ CHECK_FOR_RARE:;
{
// Check if nearBlk satisfies our requirement
if (nearBlk->bbNext == bEnd->bbJumpDest)
+ {
break;
+ }
}
// Did we reach the endBlk?
if (nearBlk == endBlk)
{
- nearBlk = NULL;
+ nearBlk = nullptr;
break;
}
// advance nearBlk to the next block
nearBlk = nearBlk->bbNext;
- } while (nearBlk != NULL);
+ } while (nearBlk != nullptr);
}
// if nearBlk is NULL then we set nearBlk to be the
// first block that we want to insert after.
- if (nearBlk == NULL)
+ if (nearBlk == nullptr)
{
- if (bDest != NULL)
+ if (bDest != nullptr)
{
// we want to insert after bDest
nearBlk = bDest;
@@ -15189,38 +15338,36 @@ CHECK_FOR_RARE:;
/* Set insertAfterBlk to the block which we will insert after. */
- insertAfterBlk = fgFindInsertPoint(bStart->bbTryIndex,
- true, // Insert in the try region.
- startBlk, endBlk,
- nearBlk, jumpBlk,
- bStart->bbWeight == BB_ZERO_WEIGHT);
+ insertAfterBlk =
+ fgFindInsertPoint(bStart->bbTryIndex,
+ true, // Insert in the try region.
+ startBlk, endBlk, nearBlk, jumpBlk, bStart->bbWeight == BB_ZERO_WEIGHT);
}
/* See if insertAfterBlk is the same as where we started, */
/* or if we could not find any insertion point */
- if ((insertAfterBlk == bPrev) ||
- (insertAfterBlk == NULL))
+ if ((insertAfterBlk == bPrev) || (insertAfterBlk == nullptr))
{
-CANNOT_MOVE:;
+ CANNOT_MOVE:;
/* We couldn't move the blocks, so put everything back */
/* relink [bStart .. bEnd] into the flow graph */
bPrev->setNext(bStart);
if (bEnd->bbNext)
+ {
bEnd->bbNext->bbPrev = bEnd;
+ }
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
if (bStart != bEnd)
{
- printf("Could not relocate blocks (BB%02u .. BB%02u)\n",
- bStart->bbNum, bEnd->bbNum);
+ printf("Could not relocate blocks (BB%02u .. BB%02u)\n", bStart->bbNum, bEnd->bbNum);
}
else
{
- printf("Could not relocate block BB%02u\n",
- bStart->bbNum);
+ printf("Could not relocate block BB%02u\n", bStart->bbNum);
}
}
#endif // DEBUG
@@ -15229,17 +15376,17 @@ CANNOT_MOVE:;
}
}
- noway_assert(insertAfterBlk != NULL);
- noway_assert(bStartPrev != NULL);
+ noway_assert(insertAfterBlk != nullptr);
+ noway_assert(bStartPrev != nullptr);
noway_assert(bStartPrev != insertAfterBlk);
#ifdef DEBUG
movedBlocks = true;
- if (verbose)
+ if (verbose)
{
- const char * msg;
- if (bStart2 != NULL)
+ const char* msg;
+ if (bStart2 != nullptr)
{
msg = "hot";
}
@@ -15258,13 +15405,11 @@ CANNOT_MOVE:;
printf("Relocated %s ", msg);
if (bStart != bEnd)
{
- printf("blocks (BB%02u .. BB%02u)",
- bStart->bbNum, bEnd->bbNum);
+ printf("blocks (BB%02u .. BB%02u)", bStart->bbNum, bEnd->bbNum);
}
else
{
- printf("block BB%02u",
- bStart->bbNum);
+ printf("block BB%02u", bStart->bbNum);
}
if (bPrev->bbJumpKind == BBJ_COND)
@@ -15288,11 +15433,11 @@ CANNOT_MOVE:;
condTest->gtOp.gtOp1 = gtReverseCond(condTest->gtOp.gtOp1);
- if (bStart2 == NULL)
+ if (bStart2 == nullptr)
{
/* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */
- bPrev->bbJumpDest = bStart;
- bStart->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
+ bPrev->bbJumpDest = bStart;
+ bStart->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
else
{
@@ -15300,8 +15445,8 @@ CANNOT_MOVE:;
noway_assert(insertAfterBlk->bbNext == block);
/* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */
- bPrev->bbJumpDest = block;
- block->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
+ bPrev->bbJumpDest = block;
+ block->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
}
@@ -15314,14 +15459,12 @@ CANNOT_MOVE:;
// we will need to extend ebdTryLast or ebdHndLast so the blocks that we
// are moving are part of this try or handler region.
//
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Are we moving blocks to the end of a try region?
if (HBtab->ebdTryLast == insertAfterBlk)
{
- if (fStartIsInTry[XTnum])
+ if (fStartIsInTry[XTnum])
{
// bStart..bEnd is in the try, so extend the try region
fgSetTryEnd(HBtab, bEnd);
@@ -15358,7 +15501,7 @@ CANNOT_MOVE:;
/* If bEnd falls through, we must insert a jump to bNext */
fgConnectFallThrough(bEnd, bNext);
- if (bStart2 == NULL)
+ if (bStart2 == nullptr)
{
/* If insertAfterBlk falls through, we are forced to */
/* add a jump around the block(s) we just inserted */
@@ -15418,11 +15561,13 @@ CANNOT_MOVE:;
* or are rarely executed.
*/
-void Compiler::fgDetermineFirstColdBlock()
+void Compiler::fgDetermineFirstColdBlock()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgDetermineFirstColdBlock()\n");
+ }
#endif // DEBUG
// Since we may need to create a new transistion block
@@ -15430,7 +15575,7 @@ void Compiler::fgDetermineFirstColdBlock()
//
assert(fgSafeBasicBlockCreation);
- fgFirstColdBlock = NULL;
+ fgFirstColdBlock = nullptr;
#if FEATURE_STACK_FP_X87
if (compMayHaveTransitionBlocks)
@@ -15467,14 +15612,12 @@ void Compiler::fgDetermineFirstColdBlock()
}
#endif // FEATURE_EH_FUNCLETS
- BasicBlock* firstColdBlock = NULL;
- BasicBlock* prevToFirstColdBlock = NULL;
+ BasicBlock* firstColdBlock = nullptr;
+ BasicBlock* prevToFirstColdBlock = nullptr;
BasicBlock* block;
BasicBlock* lblk;
- for (lblk = NULL, block = fgFirstBB;
- block != NULL;
- lblk = block, block = block->bbNext)
+ for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->bbNext)
{
bool blockMustBeInHotSection = false;
@@ -15486,7 +15629,7 @@ void Compiler::fgDetermineFirstColdBlock()
#endif // HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION
// Do we have a candidate for the first cold block?
- if (firstColdBlock != NULL)
+ if (firstColdBlock != nullptr)
{
// We have a candidate for first cold block
@@ -15494,8 +15637,8 @@ void Compiler::fgDetermineFirstColdBlock()
if (blockMustBeInHotSection || (block->isRunRarely() == false))
{
// We have to restart the search for the first cold block
- firstColdBlock = NULL;
- prevToFirstColdBlock = NULL;
+ firstColdBlock = nullptr;
+ prevToFirstColdBlock = nullptr;
}
}
else // (firstColdBlock == NULL)
@@ -15511,9 +15654,7 @@ void Compiler::fgDetermineFirstColdBlock()
// so the code size for block needs be large
// enough to make it worth our while
//
- if ((lblk == NULL) ||
- (lblk->bbJumpKind != BBJ_COND) ||
- (fgGetCodeEstimate(block) >= 8) )
+ if ((lblk == nullptr) || (lblk->bbJumpKind != BBJ_COND) || (fgGetCodeEstimate(block) >= 8))
{
// This block is now a candidate for first cold block
// Also remember the predecessor to this block
@@ -15529,29 +15670,31 @@ void Compiler::fgDetermineFirstColdBlock()
// If the first block is Cold then we can't move any blocks
// into the cold section
- firstColdBlock = NULL;
+ firstColdBlock = nullptr;
}
- if (firstColdBlock != NULL)
+ if (firstColdBlock != nullptr)
{
- noway_assert(prevToFirstColdBlock != NULL);
+ noway_assert(prevToFirstColdBlock != nullptr);
- if (prevToFirstColdBlock == NULL)
+ if (prevToFirstColdBlock == nullptr)
+ {
return; // To keep Prefast happy
+ }
// If we only have one cold block
// then it may not be worth it to move it
// into the Cold section as a jump to the
// Cold section is 5 bytes in size.
//
- if (firstColdBlock->bbNext == NULL)
+ if (firstColdBlock->bbNext == nullptr)
{
// If the size of the cold block is 7 or less
// then we will keep it in the Hot section.
//
if (fgGetCodeEstimate(firstColdBlock) < 8)
{
- firstColdBlock = NULL;
+ firstColdBlock = nullptr;
goto EXIT;
}
}
@@ -15563,57 +15706,57 @@ void Compiler::fgDetermineFirstColdBlock()
{
switch (prevToFirstColdBlock->bbJumpKind)
{
- default:
- noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()");
+ default:
+ noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()");
- case BBJ_CALLFINALLY:
- // A BBJ_CALLFINALLY that falls through is always followed
- // by an empty BBJ_ALWAYS.
- //
- assert(prevToFirstColdBlock->isBBCallAlwaysPair());
- firstColdBlock = firstColdBlock->bbNext; // Note that this assignment could make firstColdBlock == nullptr
- break;
+ case BBJ_CALLFINALLY:
+ // A BBJ_CALLFINALLY that falls through is always followed
+ // by an empty BBJ_ALWAYS.
+ //
+ assert(prevToFirstColdBlock->isBBCallAlwaysPair());
+ firstColdBlock =
+ firstColdBlock->bbNext; // Note that this assignment could make firstColdBlock == nullptr
+ break;
- case BBJ_COND:
- //
- // This is a slightly more complicated case, because we will
- // probably need to insert a block to jump to the cold section.
- //
- if (firstColdBlock->isEmpty() &&
- (firstColdBlock->bbJumpKind == BBJ_ALWAYS))
- {
- // We can just use this block as the transitionBlock
- firstColdBlock = firstColdBlock->bbNext;
- // Note that this assignment could make firstColdBlock == NULL
- }
- else
- {
- BasicBlock* transitionBlock = fgNewBBafter(BBJ_ALWAYS, prevToFirstColdBlock, true);
- transitionBlock->bbJumpDest = firstColdBlock;
- transitionBlock->inheritWeight(firstColdBlock);
+ case BBJ_COND:
+ //
+ // This is a slightly more complicated case, because we will
+ // probably need to insert a block to jump to the cold section.
+ //
+ if (firstColdBlock->isEmpty() && (firstColdBlock->bbJumpKind == BBJ_ALWAYS))
+ {
+ // We can just use this block as the transitionBlock
+ firstColdBlock = firstColdBlock->bbNext;
+ // Note that this assignment could make firstColdBlock == NULL
+ }
+ else
+ {
+ BasicBlock* transitionBlock = fgNewBBafter(BBJ_ALWAYS, prevToFirstColdBlock, true);
+ transitionBlock->bbJumpDest = firstColdBlock;
+ transitionBlock->inheritWeight(firstColdBlock);
- noway_assert(fgComputePredsDone);
+ noway_assert(fgComputePredsDone);
- // Update the predecessor list for firstColdBlock
- fgReplacePred(firstColdBlock, prevToFirstColdBlock, transitionBlock);
+ // Update the predecessor list for firstColdBlock
+ fgReplacePred(firstColdBlock, prevToFirstColdBlock, transitionBlock);
- // Add prevToFirstColdBlock as a predecessor for transitionBlock
- fgAddRefPred(transitionBlock, prevToFirstColdBlock);
- }
- break;
+ // Add prevToFirstColdBlock as a predecessor for transitionBlock
+ fgAddRefPred(transitionBlock, prevToFirstColdBlock);
+ }
+ break;
- case BBJ_NONE:
- // If the block preceding the first cold block is BBJ_NONE,
- // convert it to BBJ_ALWAYS to force an explicit jump.
+ case BBJ_NONE:
+ // If the block preceding the first cold block is BBJ_NONE,
+ // convert it to BBJ_ALWAYS to force an explicit jump.
- prevToFirstColdBlock->bbJumpDest = firstColdBlock;
- prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS;
- break;
+ prevToFirstColdBlock->bbJumpDest = firstColdBlock;
+ prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS;
+ break;
}
}
}
- if (firstColdBlock != NULL)
+ if (firstColdBlock != nullptr)
{
firstColdBlock->bbFlags |= BBF_JMP_TARGET;
@@ -15648,14 +15791,14 @@ EXIT:;
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
*
* Function called to "comb" the basic block list.
* Removes any empty blocks, unreachable blocks and redundant jumps.
* Most of those appear after dead store removal and folding of conditionals.
- *
+ *
* Returns: true if the flowgraph has been modified
*
* It also compacts basic blocks
@@ -15666,11 +15809,13 @@ EXIT:;
* but we do not optimize those!
*/
-bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
+bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgUpdateFlowGraph()");
+ }
#endif // DEBUG
/* This should never be called for debuggable code */
@@ -15678,7 +15823,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
noway_assert(!opts.MinOpts() && !opts.compDbgCode);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nBefore updating the flow graph:\n");
fgDispBasicBlocks(verboseTrees);
@@ -15699,14 +15844,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
{
change = false;
- BasicBlock* block; // the current block
- BasicBlock* bPrev = NULL; // the previous non-worthless block
- BasicBlock* bNext; // the successor of the current block
- BasicBlock* bDest; // the jump target of the current block
+ BasicBlock* block; // the current block
+ BasicBlock* bPrev = nullptr; // the previous non-worthless block
+ BasicBlock* bNext; // the successor of the current block
+ BasicBlock* bDest; // the jump target of the current block
- for (block = fgFirstBB;
- block != NULL;
- block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
/* Some blocks may be already marked removed by other optimizations
* (e.g worthless loop removal), without being explicitly removed
@@ -15729,57 +15872,55 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication)
continue;
}
- /* We jump to the REPEAT label if we performed a change involving the current block
- * This is in case there are other optimizations that can show up
- * (e.g. - compact 3 blocks in a row)
- * If nothing happens, we then finish the iteration and move to the next block
- */
+ /* We jump to the REPEAT label if we performed a change involving the current block
+ * This is in case there are other optimizations that can show up
+ * (e.g. - compact 3 blocks in a row)
+ * If nothing happens, we then finish the iteration and move to the next block
+ */
-REPEAT:;
+ REPEAT:;
bNext = block->bbNext;
- bDest = NULL;
+ bDest = nullptr;
if (block->bbJumpKind == BBJ_ALWAYS)
{
bDest = block->bbJumpDest;
if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest))
{
- change = true;
+ change = true;
modified = true;
- bDest = block->bbJumpDest;
- bNext = block->bbNext;
+ bDest = block->bbJumpDest;
+ bNext = block->bbNext;
}
}
// Remove JUMPS to the following block
// and optimize any JUMPS to JUMPS
- if (block->bbJumpKind == BBJ_COND ||
- block->bbJumpKind == BBJ_ALWAYS )
+ if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_ALWAYS)
{
bDest = block->bbJumpDest;
if (bDest == bNext)
{
if (fgOptimizeBranchToNext(block, bNext, bPrev))
{
- change = true;
+ change = true;
modified = true;
- bDest = NULL;
+ bDest = nullptr;
}
}
}
- if (bDest != NULL)
+ if (bDest != nullptr)
{
// Do we have a JUMP to an empty unconditional JUMP block?
- if (bDest->isEmpty() &&
- (bDest->bbJumpKind == BBJ_ALWAYS) &&
- (bDest != bDest->bbJumpDest)) // special case for self jumps
+ if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) &&
+ (bDest != bDest->bbJumpDest)) // special case for self jumps
{
if (fgOptimizeBranchToEmptyUnconditional(block, bDest))
{
- change = true;
+ change = true;
modified = true;
goto REPEAT;
}
@@ -15787,13 +15928,13 @@ REPEAT:;
// Check for a conditional branch that just skips over an empty BBJ_ALWAYS block
- if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block
- (bNext != NULL) && // block is not the last block
- (bNext->bbRefs == 1) && // No other block jumps to bNext
- (bNext->bbNext == bDest) && // The block after bNext is the BBJ_COND jump dest
- (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block
- bNext->isEmpty() && // and it is an an empty block
- (bNext != bNext->bbJumpDest) && // special case for self jumps
+ if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block
+ (bNext != nullptr) && // block is not the last block
+ (bNext->bbRefs == 1) && // No other block jumps to bNext
+ (bNext->bbNext == bDest) && // The block after bNext is the BBJ_COND jump dest
+ (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block
+ bNext->isEmpty() && // and it is an an empty block
+ (bNext != bNext->bbJumpDest) && // special case for self jumps
(bDest != fgFirstColdBlock))
{
bool optimizeJump = true;
@@ -15820,21 +15961,22 @@ REPEAT:;
if (fgIsUsingProfileWeights())
{
// if block and bdest are in different hot/cold regions we can't do this this optimization
- // because we can't allow fall-through into the cold region.
+ // because we can't allow fall-through into the cold region.
if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest))
{
fgNeedsUpdateFlowGraph = true;
- optimizeJump = false;
+ optimizeJump = false;
}
}
if (optimizeJump)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("\nReversing a conditional jump around an unconditional jump (BB%02u -> BB%02u -> BB%02u)\n",
- block->bbNum, bDest->bbNum, bNext->bbJumpDest->bbNum);
+ printf("\nReversing a conditional jump around an unconditional jump (BB%02u -> BB%02u -> "
+ "BB%02u)\n",
+ block->bbNum, bDest->bbNum, bNext->bbJumpDest->bbNum);
}
#endif // DEBUG
/* Reverse the jump condition */
@@ -15878,15 +16020,13 @@ REPEAT:;
// we will need to update ebdTryLast or ebdHndLast.
//
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd;
HBtab++)
{
- if ((HBtab->ebdTryLast == bNext) ||
- (HBtab->ebdHndLast == bNext))
+ if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext))
{
fgSkipRmvdBlocks(HBtab);
}
@@ -15897,7 +16037,7 @@ REPEAT:;
modified = true;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nAfter reversing the jump:\n");
fgDispBasicBlocks(verboseTrees);
@@ -15914,7 +16054,9 @@ REPEAT:;
unreachable.
*/
if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1))
+ {
continue;
+ }
goto REPEAT;
}
@@ -15951,9 +16093,7 @@ REPEAT:;
/* Remove unreachable or empty blocks - do not consider blocks marked BBF_DONT_REMOVE or genReturnBB block
* These include first and last block of a TRY, exception handlers and RANGE_CHECK_FAIL THROW blocks */
- if ((block->bbFlags & BBF_DONT_REMOVE) == BBF_DONT_REMOVE ||
- block == genReturnBB
- )
+ if ((block->bbFlags & BBF_DONT_REMOVE) == BBF_DONT_REMOVE || block == genReturnBB)
{
bPrev = block;
continue;
@@ -15999,24 +16139,24 @@ REPEAT:;
{
switch (block->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_ALWAYS:
- if (block->bbJumpDest == block)
- {
- fgRemoveBlock(block, true);
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ if (block->bbJumpDest == block)
+ {
+ fgRemoveBlock(block, true);
- change = true;
- modified = true;
+ change = true;
+ modified = true;
- /* we removed the current block - the rest of the optimizations
- * won't have a target so continue with the next block */
+ /* we removed the current block - the rest of the optimizations
+ * won't have a target so continue with the next block */
- continue;
- }
- break;
+ continue;
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
}
@@ -16035,7 +16175,7 @@ REPEAT:;
/* Have we removed the block? */
- if (block->bbFlags & BBF_REMOVED)
+ if (block->bbFlags & BBF_REMOVED)
{
/* block was removed - no change to bPrev */
continue;
@@ -16050,13 +16190,12 @@ REPEAT:;
bPrev = block;
}
- }
- while (change);
+ } while (change);
fgNeedsUpdateFlowGraph = false;
#ifdef DEBUG
- if (verbose && modified)
+ if (verbose && modified)
{
printf("\nAfter updating the flow graph:\n");
fgDispBasicBlocks(verboseTrees);
@@ -16080,10 +16219,12 @@ REPEAT:;
#ifdef DEBUG
-void Compiler::fgDebugCheckUpdate()
+void Compiler::fgDebugCheckUpdate()
{
if (!compStressCompile(STRESS_CHK_FLOW_UPDATE, 30))
- return;
+ {
+ return;
+ }
/* We check for these conditions:
* no unreachable blocks -> no blocks have countOfInEdges() = 0
@@ -16095,65 +16236,65 @@ void Compiler::fgDebugCheckUpdate()
BasicBlock* prev;
BasicBlock* block;
- for (prev = NULL , block = fgFirstBB;
- block != NULL;
- prev = block, block = block->bbNext)
+ for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext)
{
/* no unreachable blocks */
- if ((block->countOfInEdges() == 0) &&
- !(block->bbFlags & BBF_DONT_REMOVE)
+ if ((block->countOfInEdges() == 0) && !(block->bbFlags & BBF_DONT_REMOVE)
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
- // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair,
- // even if we can prove that the finally block never returns.
- && (prev == NULL || block->bbJumpKind != BBJ_ALWAYS || !prev->isBBCallAlwaysPair())
+ // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair,
+ // even if we can prove that the finally block never returns.
+ && (prev == NULL || block->bbJumpKind != BBJ_ALWAYS || !prev->isBBCallAlwaysPair())
#endif // FEATURE_EH_FUNCLETS
- )
+ )
{
noway_assert(!"Unreachable block not removed!");
}
/* no empty blocks */
- if (block->isEmpty() &&
- !(block->bbFlags & BBF_DONT_REMOVE))
+ if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE))
{
switch (block->bbJumpKind)
{
- case BBJ_CALLFINALLY:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_RETURN:
+ case BBJ_CALLFINALLY:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_RETURN:
/* for BBJ_ALWAYS is probably just a GOTO, but will have to be treated */
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- /* These jump kinds are allowed to have empty tree lists */
- break;
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ /* These jump kinds are allowed to have empty tree lists */
+ break;
- default:
- /* it may be the case that the block had more than one reference to it
- * so we couldn't remove it */
+ default:
+ /* it may be the case that the block had more than one reference to it
+ * so we couldn't remove it */
- if (block->countOfInEdges() == 0)
- noway_assert(!"Empty block not removed!");
- break;
+ if (block->countOfInEdges() == 0)
+ {
+ noway_assert(!"Empty block not removed!");
+ }
+ break;
}
}
/* no un-imported blocks */
- if (!(block->bbFlags & BBF_IMPORTED))
+ if (!(block->bbFlags & BBF_IMPORTED))
{
/* internal blocks do not count */
if (!(block->bbFlags & BBF_INTERNAL))
+ {
noway_assert(!"Non IMPORTED block not removed!");
+ }
}
- bool prevIsCallAlwaysPair = ((prev != NULL) && prev->isBBCallAlwaysPair());
+ bool prevIsCallAlwaysPair = ((prev != nullptr) && prev->isBBCallAlwaysPair());
// Check for an unnecessary jumps to the next block
- bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert
+ bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert
if (block->bbJumpKind == BBJ_COND)
{
@@ -16180,7 +16321,7 @@ void Compiler::fgDebugCheckUpdate()
// We are allowed to have a branch from a hot 'block' to a cold 'bbNext'
//
- if ((block->bbNext != NULL) && fgInDifferentRegions(block, block->bbNext))
+ if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext))
{
doAssertOnJumpToNextBlock = false;
}
@@ -16226,7 +16367,7 @@ void Compiler::fgDebugCheckUpdate()
* (copy the bbTryIndex, bbHndIndex, and bbCatchTyp from 'block' to the new predecessor, and clear
* 'bbCatchTyp' from 'block').
*/
-void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
+void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
{
assert(block->bbPrev != nullptr);
@@ -16238,15 +16379,13 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
bPrev->bbCatchTyp = block->bbCatchTyp;
block->bbCatchTyp = BBCT_NONE;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
/* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */
- if (HBtab->ebdTryBeg == block)
+ if (HBtab->ebdTryBeg == block)
{
#ifdef DEBUG
if (verbose)
@@ -16255,7 +16394,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
}
#endif // DEBUG
HBtab->ebdTryBeg = bPrev;
- bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_HAS_LABEL;
// clear the TryBeg flag unless it begins another try region
if (!bbIsTryBeg(block))
{
@@ -16263,7 +16402,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
}
}
- if (HBtab->ebdHndBeg == block)
+ if (HBtab->ebdHndBeg == block)
{
#ifdef DEBUG
if (verbose)
@@ -16277,11 +16416,11 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
block->bbRefs--;
HBtab->ebdHndBeg = bPrev;
- bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
bPrev->bbRefs++;
- // If this is a handler for a filter, the last block of the filter will end with
- // a BBJ_EJFILTERRET block that has a bbJumpDest that jumps to the first block of
+ // If this is a handler for a filter, the last block of the filter will end with
+ // a BBJ_EJFILTERRET block that has a bbJumpDest that jumps to the first block of
// it's handler. So we need to update it to keep things in sync.
//
if (HBtab->HasFilter())
@@ -16293,8 +16432,8 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
#ifdef DEBUG
if (verbose)
{
- printf("EH#%u: Updating bbJumpDest for filter ret block: BB%02u => BB%02u\n",
- ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum);
+ printf("EH#%u: Updating bbJumpDest for filter ret block: BB%02u => BB%02u\n", ehGetIndex(HBtab),
+ bFilterLast->bbNum, bPrev->bbNum);
}
#endif // DEBUG
// Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev'
@@ -16316,7 +16455,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
block->bbRefs--;
HBtab->ebdFilter = bPrev;
- bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
bPrev->bbRefs++;
}
}
@@ -16327,15 +16466,17 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
* Update the EH table to make this so. Also, set the new block to have the right EH region data.
*/
-void Compiler::fgExtendEHRegionAfter(BasicBlock* block)
+void Compiler::fgExtendEHRegionAfter(BasicBlock* block)
{
BasicBlock* newBlk = block->bbNext;
assert(newBlk != nullptr);
newBlk->copyEHRegion(block);
- newBlk->bbCatchTyp = BBCT_NONE; // Only the first block of a catch has this set, and 'newBlk' can't be the first block of a catch.
+ newBlk->bbCatchTyp =
+ BBCT_NONE; // Only the first block of a catch has this set, and 'newBlk' can't be the first block of a catch.
- // TODO-Throughput: if the block is not in an EH region, then we don't need to walk the EH table looking for 'last' block pointers to update.
+ // TODO-Throughput: if the block is not in an EH region, then we don't need to walk the EH table looking for 'last'
+ // block pointers to update.
ehUpdateLastBlocks(block, newBlk);
}
@@ -16344,14 +16485,12 @@ void Compiler::fgExtendEHRegionAfter(BasicBlock* block)
* Insert a BasicBlock before the given block.
*/
-BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind,
- BasicBlock* block,
- bool extendRegion)
+BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion)
{
// Create a new BasicBlock and chain it in
BasicBlock* newBlk = bbNewBasicBlock(jumpKind);
- newBlk->bbFlags |= BBF_INTERNAL;
+ newBlk->bbFlags |= BBF_INTERNAL;
fgInsertBBbefore(block, newBlk);
@@ -16385,14 +16524,12 @@ BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind,
* Insert a BasicBlock after the given block.
*/
-BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind,
- BasicBlock* block,
- bool extendRegion)
+BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion)
{
// Create a new BasicBlock and chain it in
BasicBlock* newBlk = bbNewBasicBlock(jumpKind);
- newBlk->bbFlags |= BBF_INTERNAL;
+ newBlk->bbFlags |= BBF_INTERNAL;
fgInsertBBafter(block, newBlk);
@@ -16428,8 +16565,7 @@ BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind,
* (If insertBeforeBlk is the first block of the funclet region, then 'newBlk' will be the
* new first block of the funclet region.)
*/
-void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk,
- BasicBlock* newBlk)
+void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk)
{
if (insertBeforeBlk->bbPrev)
{
@@ -16439,7 +16575,7 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk,
{
newBlk->setNext(fgFirstBB);
- fgFirstBB = newBlk;
+ fgFirstBB = newBlk;
newBlk->bbPrev = nullptr;
}
@@ -16461,21 +16597,22 @@ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk,
* If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region.
* (It can't be used to insert a block as the first block of the funclet region).
*/
-void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk,
- BasicBlock* newBlk)
+void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk)
{
newBlk->bbNext = insertAfterBlk->bbNext;
if (insertAfterBlk->bbNext)
+ {
insertAfterBlk->bbNext->bbPrev = newBlk;
-
+ }
+
insertAfterBlk->bbNext = newBlk;
- newBlk->bbPrev = insertAfterBlk;
+ newBlk->bbPrev = insertAfterBlk;
if (fgLastBB == insertAfterBlk)
{
fgLastBB = newBlk;
- assert(fgLastBB->bbNext == NULL);
+ assert(fgLastBB->bbNext == nullptr);
}
}
@@ -16489,34 +16626,35 @@ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk,
bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt)
{
// bCur can't be NULL and must be a fall through bbJumpKind
- noway_assert(bCur != NULL);
+ noway_assert(bCur != nullptr);
noway_assert(bCur->bbFallsThrough());
- noway_assert(bAlt != NULL);
+ noway_assert(bAlt != nullptr);
// We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND
- if ((bAlt->bbJumpKind != BBJ_ALWAYS) &&
- (bAlt->bbJumpKind != BBJ_COND))
+ if ((bAlt->bbJumpKind != BBJ_ALWAYS) && (bAlt->bbJumpKind != BBJ_COND))
{
return false;
}
// if bAlt doesn't jump to bCur it can't be a better fall through than bCur
if (bAlt->bbJumpDest != bCur)
+ {
return false;
+ }
// Currently bNext is the fall through for bCur
BasicBlock* bNext = bCur->bbNext;
- noway_assert(bNext != NULL);
+ noway_assert(bNext != nullptr);
// We will set result to true if bAlt is a better fall through than bCur
bool result;
if (fgHaveValidEdgeWeights)
{
// We will compare the edge weight for our two choices
- flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt);
+ flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt);
flowList* edgeFromCur = fgGetPredForBlock(bNext, bCur);
- noway_assert(edgeFromCur != NULL);
- noway_assert(edgeFromAlt != NULL);
+ noway_assert(edgeFromCur != nullptr);
+ noway_assert(edgeFromAlt != nullptr);
result = (edgeFromAlt->flEdgeWeightMin > edgeFromCur->flEdgeWeightMax);
}
@@ -16531,7 +16669,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt)
{
noway_assert(bAlt->bbJumpKind == BBJ_COND);
// Our result is true if bAlt's weight is more than twice bCur's weight
- result = (bAlt->bbWeight > (2*bCur->bbWeight));
+ result = (bAlt->bbWeight > (2 * bCur->bbWeight));
}
}
return result;
@@ -16563,12 +16701,12 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt)
// will all return 'true'. Here are the cases:
// 1. Insert into try1: the most nested EH region BB05 is in is already try1, so we can insert after
// it and extend try1 (and try2).
-// 2. Insert into try2: we can extend try2, but leave try1 alone.
+// 2. Insert into try2: we can extend try2, but leave try1 alone.
// 3. Insert into try3: we can leave try1 and try2 alone, and put the new block just in try3. Note that
// in this case, after we "loop outwards" in the EH nesting, we get to a place where we're in the middle
// of the try3 region, not at the end of it.
// In all cases, it is possible to put a block after BB05 and put it in any of these three 'try' regions legally.
-//
+//
// Filters are ignored; if 'blk' is in a filter, the answer will be false.
//
// Arguments:
@@ -16581,7 +16719,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt)
// Return Value:
// 'true' if a block can be inserted after 'blk' and put in EH region 'regionIndex', else 'false'.
//
-bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion)
+bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion)
{
assert(blk != nullptr);
assert(regionIndex <= compHndBBtabCount);
@@ -16591,7 +16729,7 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
assert(putInTryRegion);
}
- bool inTryRegion;
+ bool inTryRegion;
unsigned nestedRegionIndex = ehGetMostNestedRegionIndex(blk, &inTryRegion);
bool insertOK = true;
@@ -16616,7 +16754,7 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
}
assert(nestedRegionIndex > 0);
- EHblkDsc* ehDsc = ehGetDsc(nestedRegionIndex - 1); // ehGetDsc uses [0..compHndBBtabCount) form.
+ EHblkDsc* ehDsc = ehGetDsc(nestedRegionIndex - 1); // ehGetDsc uses [0..compHndBBtabCount) form.
if (inTryRegion)
{
@@ -16632,7 +16770,8 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
// We ignore filters.
if (blk != ehDsc->ebdHndLast)
{
- // Not the last block? Then it must be somewhere else within the handler region, so we can't insert here.
+ // Not the last block? Then it must be somewhere else within the handler region, so we can't insert
+ // here.
insertOK = false;
break; // exit the 'for' loop
}
@@ -16640,7 +16779,9 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
// Things look good for this region; check the enclosing regions, if any.
- nestedRegionIndex = ehGetEnclosingRegionIndex(nestedRegionIndex - 1, &inTryRegion); // ehGetEnclosingRegionIndex uses [0..compHndBBtabCount) form.
+ nestedRegionIndex =
+ ehGetEnclosingRegionIndex(nestedRegionIndex - 1,
+ &inTryRegion); // ehGetEnclosingRegionIndex uses [0..compHndBBtabCount) form.
// Convert to [0..compHndBBtabCount] form.
nestedRegionIndex = (nestedRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : nestedRegionIndex + 1;
@@ -16714,7 +16855,8 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
// the end of the function.
// nearBlk - If non-nullptr, try to find an insertion location closely after this block. If nullptr, we insert
// at the best location found towards the end of the acceptable block range.
-// jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review this?)
+// jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review
+// this?)
// runRarely - true if the block being inserted is expected to be rarely run. This helps determine
// the best place to put the new block, by putting in a place that has the same 'rarely run' characteristic.
//
@@ -16722,19 +16864,21 @@ bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionI
// A block with the desired characteristics, so the new block will be inserted after this one.
// If there is no suitable location, return nullptr. This should basically never happen.
-BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
- bool putInTryRegion,
- BasicBlock* startBlk,
- BasicBlock* endBlk,
- BasicBlock* nearBlk,
- BasicBlock* jumpBlk,
- bool runRarely)
+BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
+ bool putInTryRegion,
+ BasicBlock* startBlk,
+ BasicBlock* endBlk,
+ BasicBlock* nearBlk,
+ BasicBlock* jumpBlk,
+ bool runRarely)
{
noway_assert(startBlk != nullptr);
noway_assert(startBlk != endBlk);
- noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method
- (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the specified try region
- (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region
+ noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method
+ (putInTryRegion && regionIndex > 0 &&
+ startBlk->bbTryIndex == regionIndex) || // Search in the specified try region
+ (!putInTryRegion && regionIndex > 0 &&
+ startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region
#ifdef DEBUG
// Assert that startBlk precedes endBlk in the block list.
@@ -16746,20 +16890,18 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
}
#endif // DEBUG
- JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=BB%02u, endBlk=BB%02u, nearBlk=BB%02u, jumpBlk=BB%02u, runRarely=%s)\n",
- regionIndex,
- dspBool(putInTryRegion),
- startBlk->bbNum,
- (endBlk == nullptr) ? 0 : endBlk->bbNum,
- (nearBlk == nullptr) ? 0 : nearBlk->bbNum,
- (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum,
- dspBool(runRarely));
-
- bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching.
- bool inFilter = false; // Are we in a filter region that we need to skip?
- BasicBlock* bestBlk = nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements.
- BasicBlock* goodBlk = nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option.
- BasicBlock* blk;
+ JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=BB%02u, endBlk=BB%02u, nearBlk=BB%02u, "
+ "jumpBlk=BB%02u, runRarely=%s)\n",
+ regionIndex, dspBool(putInTryRegion), startBlk->bbNum, (endBlk == nullptr) ? 0 : endBlk->bbNum,
+ (nearBlk == nullptr) ? 0 : nearBlk->bbNum, (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum, dspBool(runRarely));
+
+ bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching.
+ bool inFilter = false; // Are we in a filter region that we need to skip?
+ BasicBlock* bestBlk =
+ nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements.
+ BasicBlock* goodBlk =
+ nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option.
+ BasicBlock* blk;
if (nearBlk != nullptr)
{
@@ -16795,10 +16937,8 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// Record the fact that we entered a filter region, so we don't insert into filters...
// Unless the caller actually wanted the block inserted in this exact filter region.
// Detect this by the fact that startBlk and endBlk point to the filter begin and end.
- if (putInTryRegion ||
- (blk != startBlk) ||
- (startBlk != ehGetDsc(regionIndex - 1)->ebdFilter) ||
- (endBlk != ehGetDsc(regionIndex - 1)->ebdHndBeg))
+ if (putInTryRegion || (blk != startBlk) || (startBlk != ehGetDsc(regionIndex - 1)->ebdFilter) ||
+ (endBlk != ehGetDsc(regionIndex - 1)->ebdHndBeg))
{
inFilter = true;
}
@@ -16811,7 +16951,9 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// Don't insert a block inside this filter region.
if (inFilter)
+ {
continue;
+ }
// Note that the new block will be inserted AFTER "blk". We check to make sure that doing so
// would put the block in the correct EH region. We make an assumption here that you can
@@ -16856,7 +16998,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// 2. Also, when blk equals nearBlk we may want to insert here.
if (!blk->bbFallsThrough() || (blk == nearBlk))
{
- bool updateBestBlk = true; // We will probably update the bestBlk
+ bool updateBestBlk = true; // We will probably update the bestBlk
// If blk falls through then we must decide whether to use the nearBlk
// hint
@@ -16881,11 +17023,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// want a non-rarely-run block), but bestBlock->isRunRarely() is true. In that
// case, we should update the block, also. Probably what we want is:
// (bestBlk->isRunRarely() != runRarely) && (blk->isRunRarely() == runRarely)
- if (updateBestBlk &&
- (bestBlk != nullptr) &&
- runRarely &&
- bestBlk->isRunRarely() &&
- !blk->isRunRarely())
+ if (updateBestBlk && (bestBlk != nullptr) && runRarely && bestBlk->isRunRarely() && !blk->isRunRarely())
{
updateBestBlk = false;
}
@@ -16898,7 +17036,9 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// If we've reached nearBlk, we've satisfied all the criteria,
// so we're done.
if (reachedNear)
+ {
goto DONE;
+ }
// If we haven't reached nearBlk, keep looking for a 'best' location, just
// in case we'll find one at or after nearBlk. If no nearBlk was specified,
@@ -16910,7 +17050,9 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
// No need to update goodBlk after we have set bestBlk, but we could still find a better
// bestBlk, so keep looking.
if (bestBlk != nullptr)
+ {
continue;
+ }
// Set the current block as a "good enough" insertion point, if it meets certain criteria.
// We'll return this block if we don't find a "best" block in the search range. The block
@@ -16925,8 +17067,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex,
{
goodBlk = blk;
}
- else if ((goodBlk->bbJumpKind == BBJ_COND) ||
- (blk->bbJumpKind != BBJ_COND))
+ else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND))
{
if ((blk == nearBlk) || !reachedNear)
{
@@ -16948,7 +17089,6 @@ DONE:;
return bestBlk;
}
-
//------------------------------------------------------------------------
// Creates a new BasicBlock and inserts it in a specific EH region, given by 'tryIndex', 'hndIndex', and 'putInFilter'.
//
@@ -16956,7 +17096,8 @@ DONE:;
// must be a less nested EH region (that is, tryIndex > hndIndex).
//
// Otherwise, the block is inserted in either the try region or the handler region, depending on which one is the inner
-// region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by hndIndex,
+// region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by
+// hndIndex,
// then the new BB will be created in the try region. Vice versa.
//
// Note that tryIndex and hndIndex are numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is,
@@ -16970,10 +17111,10 @@ DONE:;
// 3. When tryIndex == 0 and hndIndex != 0:
// The new BB will be created in the handler region indicated by hndIndex.
// 4. When tryIndex != 0 and hndIndex != 0 and tryIndex < hndIndex:
-// In this case, the try region is nested inside the handler region. Therefore, the new BB will be created
+// In this case, the try region is nested inside the handler region. Therefore, the new BB will be created
// in the try region indicated by tryIndex.
// 5. When tryIndex != 0 and hndIndex != 0 and tryIndex > hndIndex:
-// In this case, the handler region is nested inside the try region. Therefore, the new BB will be created
+// In this case, the handler region is nested inside the try region. Therefore, the new BB will be created
// in the handler region indicated by hndIndex.
//
// Note that if tryIndex != 0 and hndIndex != 0 then tryIndex must not be equal to hndIndex (this makes sense because
@@ -17003,33 +17144,33 @@ DONE:;
// Return Value:
// The new block.
-BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
- unsigned tryIndex,
- unsigned hndIndex,
- BasicBlock* nearBlk,
- bool putInFilter /* = false */,
- bool runRarely /* = false */,
- bool insertAtEnd /* = false */)
+BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
+ unsigned tryIndex,
+ unsigned hndIndex,
+ BasicBlock* nearBlk,
+ bool putInFilter /* = false */,
+ bool runRarely /* = false */,
+ bool insertAtEnd /* = false */)
{
assert(tryIndex <= compHndBBtabCount);
assert(hndIndex <= compHndBBtabCount);
/* afterBlk is the block which will precede the newBB */
- BasicBlock* afterBlk;
+ BasicBlock* afterBlk;
// start and end limit for inserting the block
- BasicBlock* startBlk = nullptr;
- BasicBlock* endBlk = nullptr;
+ BasicBlock* startBlk = nullptr;
+ BasicBlock* endBlk = nullptr;
- bool putInTryRegion = true;
- unsigned regionIndex = 0;
+ bool putInTryRegion = true;
+ unsigned regionIndex = 0;
// First, figure out which region (the "try" region or the "handler" region) to put the newBB in.
if ((tryIndex == 0) && (hndIndex == 0))
{
assert(!putInFilter);
- endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region
+ endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region
if (insertAtEnd || (nearBlk == nullptr))
{
@@ -17067,7 +17208,7 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
else
{
noway_assert(tryIndex > 0 && hndIndex > 0 && tryIndex != hndIndex);
- putInTryRegion = (tryIndex < hndIndex);
+ putInTryRegion = (tryIndex < hndIndex);
}
if (putInTryRegion)
@@ -17095,49 +17236,41 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
{
// We will put the newBB in the try region.
EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1);
- startBlk = ehDsc->ebdTryBeg;
- endBlk = ehDsc->ebdTryLast->bbNext;
- regionIndex = tryIndex;
+ startBlk = ehDsc->ebdTryBeg;
+ endBlk = ehDsc->ebdTryLast->bbNext;
+ regionIndex = tryIndex;
}
else if (putInFilter)
{
// We will put the newBB in the filter region.
EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1);
- startBlk = ehDsc->ebdFilter;
- endBlk = ehDsc->ebdHndBeg;
- regionIndex = hndIndex;
+ startBlk = ehDsc->ebdFilter;
+ endBlk = ehDsc->ebdHndBeg;
+ regionIndex = hndIndex;
}
else
{
// We will put the newBB in the handler region.
EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1);
- startBlk = ehDsc->ebdHndBeg;
- endBlk = ehDsc->ebdHndLast->bbNext;
- regionIndex = hndIndex;
+ startBlk = ehDsc->ebdHndBeg;
+ endBlk = ehDsc->ebdHndLast->bbNext;
+ regionIndex = hndIndex;
}
noway_assert(regionIndex > 0);
}
// Now find the insertion point.
- afterBlk = fgFindInsertPoint(regionIndex,
- putInTryRegion,
- startBlk, endBlk,
- nearBlk, nullptr,
- runRarely);
+ afterBlk = fgFindInsertPoint(regionIndex, putInTryRegion, startBlk, endBlk, nearBlk, nullptr, runRarely);
_FoundAfterBlk:;
/* We have decided to insert the block after 'afterBlk'. */
noway_assert(afterBlk != nullptr);
- JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): inserting after BB%02u\n",
- jumpKind,
- tryIndex,
- hndIndex,
- dspBool(putInFilter),
- dspBool(runRarely),
- dspBool(insertAtEnd),
+ JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): "
+ "inserting after BB%02u\n",
+ jumpKind, tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd),
afterBlk->bbNum);
return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion);
@@ -17155,16 +17288,16 @@ _FoundAfterBlk:;
// Return Value:
// The new block.
-BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
- BasicBlock* srcBlk,
- bool runRarely /* = false */,
- bool insertAtEnd /* = false */)
+BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
+ BasicBlock* srcBlk,
+ bool runRarely /* = false */,
+ bool insertAtEnd /* = false */)
{
assert(srcBlk != nullptr);
- const unsigned tryIndex = srcBlk->bbTryIndex;
- const unsigned hndIndex = srcBlk->bbHndIndex;
- bool putInFilter = false;
+ const unsigned tryIndex = srcBlk->bbTryIndex;
+ const unsigned hndIndex = srcBlk->bbHndIndex;
+ bool putInFilter = false;
// Check to see if we need to put the new block in a filter. We do if srcBlk is in a filter.
// This can only be true if there is a handler index, and the handler region is more nested than the
@@ -17189,12 +17322,12 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind,
// Return Value:
// The new block.
-BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind)
+BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind)
{
- return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true);
+ return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false,
+ /* insertAtEnd */ true);
}
-
//------------------------------------------------------------------------
// Creates a new BasicBlock, and inserts it after 'afterBlk'.
//
@@ -17214,14 +17347,14 @@ BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind)
// Return Value:
// The new block.
-BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
- BasicBlock* afterBlk,
- unsigned regionIndex,
- bool putInTryRegion)
+BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
+ BasicBlock* afterBlk,
+ unsigned regionIndex,
+ bool putInTryRegion)
{
/* Insert the new block */
BasicBlock* afterBlkNext = afterBlk->bbNext;
- (void)afterBlkNext; //prevent "unused variable" error from GCC
+ (void)afterBlkNext; // prevent "unused variable" error from GCC
BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false);
if (putInTryRegion)
@@ -17245,12 +17378,10 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
BasicBlock* newTryLast;
(void)ehInitTryBlockRange(newBlk, &newTryBeg, &newTryLast);
- unsigned XTnum;
- EHblkDsc* HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Is afterBlk at the end of a try region?
if (HBtab->ebdTryLast == afterBlk)
@@ -17278,8 +17409,7 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
// the same block, and we're extending 2. Thus, we must also extend 1. If we
// only extended 2, we would break proper nesting. (Dev11 bug 137967)
- extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) ||
- bbInTryRegions(XTnum, newBlk);
+ extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) || bbInTryRegions(XTnum, newBlk);
}
// Does newBlk extend this try region?
@@ -17342,23 +17472,29 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind,
return newBlk;
}
-
/*****************************************************************************
*/
/* static */
-unsigned Compiler::acdHelper(SpecialCodeKind codeKind)
+unsigned Compiler::acdHelper(SpecialCodeKind codeKind)
{
switch (codeKind)
{
- case SCK_RNGCHK_FAIL: return CORINFO_HELP_RNGCHKFAIL;
+ case SCK_RNGCHK_FAIL:
+ return CORINFO_HELP_RNGCHKFAIL;
#if COR_JIT_EE_VERSION > 460
- case SCK_ARG_EXCPN: return CORINFO_HELP_THROW_ARGUMENTEXCEPTION;
- case SCK_ARG_RNG_EXCPN: return CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION;
-#endif //COR_JIT_EE_VERSION
- case SCK_DIV_BY_ZERO: return CORINFO_HELP_THROWDIVZERO;
- case SCK_ARITH_EXCPN: return CORINFO_HELP_OVERFLOW;
- default: assert(!"Bad codeKind"); return 0;
+ case SCK_ARG_EXCPN:
+ return CORINFO_HELP_THROW_ARGUMENTEXCEPTION;
+ case SCK_ARG_RNG_EXCPN:
+ return CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION;
+#endif // COR_JIT_EE_VERSION
+ case SCK_DIV_BY_ZERO:
+ return CORINFO_HELP_THROWDIVZERO;
+ case SCK_ARITH_EXCPN:
+ return CORINFO_HELP_OVERFLOW;
+ default:
+ assert(!"Bad codeKind");
+ return 0;
}
}
@@ -17368,10 +17504,7 @@ unsigned Compiler::acdHelper(SpecialCodeKind codeKind)
* the given kind.
*/
-BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
- unsigned refData,
- SpecialCodeKind kind,
- unsigned stkDepth)
+BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind, unsigned stkDepth)
{
// Record that the code will call a THROW_HELPER
// so on Windows Amd64 we can allocate the 4 outgoing
@@ -17385,16 +17518,14 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
return nullptr;
}
- const static
- BBjumpKinds jumpKinds[] =
- {
- BBJ_NONE, // SCK_NONE
- BBJ_THROW, // SCK_RNGCHK_FAIL
- BBJ_ALWAYS, // SCK_PAUSE_EXEC
- BBJ_THROW, // SCK_DIV_BY_ZERO
- BBJ_THROW, // SCK_ARITH_EXCP, SCK_OVERFLOW
- BBJ_THROW, // SCK_ARG_EXCPN
- BBJ_THROW, // SCK_ARG_RNG_EXCPN
+ const static BBjumpKinds jumpKinds[] = {
+ BBJ_NONE, // SCK_NONE
+ BBJ_THROW, // SCK_RNGCHK_FAIL
+ BBJ_ALWAYS, // SCK_PAUSE_EXEC
+ BBJ_THROW, // SCK_DIV_BY_ZERO
+ BBJ_THROW, // SCK_ARITH_EXCP, SCK_OVERFLOW
+ BBJ_THROW, // SCK_ARG_EXCPN
+ BBJ_THROW, // SCK_ARG_RNG_EXCPN
};
noway_assert(sizeof(jumpKinds) == SCK_COUNT); // sanity check
@@ -17416,33 +17547,32 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
// this restriction could be removed with more careful code
// generation for BBJ_THROW (i.e. range check failed).
//
- if (add->acdStkLvl != stkDepth)
+ if (add->acdStkLvl != stkDepth)
{
codeGen->setFrameRequired(true);
}
#endif // _TARGET_X86_
- return add->acdDstBlk;
+ return add->acdDstBlk;
}
/* We have to allocate a new entry and prepend it to the list */
- add = new (this, CMK_Unknown) AddCodeDsc;
+ add = new (this, CMK_Unknown) AddCodeDsc;
add->acdData = refData;
add->acdKind = kind;
add->acdStkLvl = (unsigned short)stkDepth;
noway_assert(add->acdStkLvl == stkDepth);
- add->acdNext = fgAddCodeList;
- fgAddCodeList = add;
+ add->acdNext = fgAddCodeList;
+ fgAddCodeList = add;
/* Create the target basic block */
- BasicBlock* newBlk;
+ BasicBlock* newBlk;
- newBlk =
- add->acdDstBlk = fgNewBBinRegion(jumpKinds[kind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true);
+ newBlk = add->acdDstBlk = fgNewBBinRegion(jumpKinds[kind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true);
- add->acdDstBlk->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ add->acdDstBlk->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
#ifdef DEBUG
if (verbose)
@@ -17472,15 +17602,29 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
const char* msg;
switch (kind)
{
- case SCK_RNGCHK_FAIL: msg = " for RNGCHK_FAIL"; break;
- case SCK_PAUSE_EXEC: msg = " for PAUSE_EXEC"; break;
- case SCK_DIV_BY_ZERO: msg = " for DIV_BY_ZERO"; break;
- case SCK_OVERFLOW: msg = " for OVERFLOW"; break;
+ case SCK_RNGCHK_FAIL:
+ msg = " for RNGCHK_FAIL";
+ break;
+ case SCK_PAUSE_EXEC:
+ msg = " for PAUSE_EXEC";
+ break;
+ case SCK_DIV_BY_ZERO:
+ msg = " for DIV_BY_ZERO";
+ break;
+ case SCK_OVERFLOW:
+ msg = " for OVERFLOW";
+ break;
#if COR_JIT_EE_VERSION > 460
- case SCK_ARG_EXCPN: msg = " for ARG_EXCPN"; break;
- case SCK_ARG_RNG_EXCPN: msg = " for ARG_RNG_EXCPN"; break;
-#endif //COR_JIT_EE_VERSION
- default: msg = " for ??"; break;
+ case SCK_ARG_EXCPN:
+ msg = " for ARG_EXCPN";
+ break;
+ case SCK_ARG_RNG_EXCPN:
+ msg = " for ARG_RNG_EXCPN";
+ break;
+#endif // COR_JIT_EE_VERSION
+ default:
+ msg = " for ??";
+ break;
}
printf("\nfgAddCodeRef -"
@@ -17489,7 +17633,6 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
}
#endif // DEBUG
-
#ifdef DEBUG
newBlk->bbTgtStkDepth = stkDepth;
#endif // DEBUG
@@ -17512,30 +17655,35 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
switch (kind)
{
- case SCK_RNGCHK_FAIL: helper = CORINFO_HELP_RNGCHKFAIL;
- break;
+ case SCK_RNGCHK_FAIL:
+ helper = CORINFO_HELP_RNGCHKFAIL;
+ break;
- case SCK_DIV_BY_ZERO: helper = CORINFO_HELP_THROWDIVZERO;
- break;
+ case SCK_DIV_BY_ZERO:
+ helper = CORINFO_HELP_THROWDIVZERO;
+ break;
- case SCK_ARITH_EXCPN: helper = CORINFO_HELP_OVERFLOW;
- noway_assert(SCK_OVERFLOW == SCK_ARITH_EXCPN);
- break;
+ case SCK_ARITH_EXCPN:
+ helper = CORINFO_HELP_OVERFLOW;
+ noway_assert(SCK_OVERFLOW == SCK_ARITH_EXCPN);
+ break;
#if COR_JIT_EE_VERSION > 460
- case SCK_ARG_EXCPN: helper = CORINFO_HELP_THROW_ARGUMENTEXCEPTION;
- break;
+ case SCK_ARG_EXCPN:
+ helper = CORINFO_HELP_THROW_ARGUMENTEXCEPTION;
+ break;
- case SCK_ARG_RNG_EXCPN: helper = CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION;
- break;
+ case SCK_ARG_RNG_EXCPN:
+ helper = CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION;
+ break;
#endif // COR_JIT_EE_VERSION
- // case SCK_PAUSE_EXEC:
- // noway_assert(!"add code to pause exec");
+ // case SCK_PAUSE_EXEC:
+ // noway_assert(!"add code to pause exec");
- default:
- noway_assert(!"unexpected code addition kind");
- return nullptr;
+ default:
+ noway_assert(!"unexpected code addition kind");
+ return nullptr;
}
noway_assert(helper != CORINFO_HELP_UNDEF);
@@ -17543,8 +17691,8 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
// Add the appropriate helper call.
tree = gtNewHelperCallNode(helper, TYP_VOID, GTF_EXCEPT);
- // There are no args here but fgMorphArgs has side effects
- // such as setting the outgoing arg area (which is necessary
+ // There are no args here but fgMorphArgs has side effects
+ // such as setting the outgoing arg area (which is necessary
// on AMD if there are any calls).
tree = fgMorphArgs(tree);
@@ -17552,7 +17700,7 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
fgInsertStmtAtEnd(newBlk, fgNewStmtFromTree(tree));
- return add->acdDstBlk;
+ return add->acdDstBlk;
}
/*****************************************************************************
@@ -17562,20 +17710,21 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk,
* a given type of exception
*/
-Compiler::AddCodeDsc* Compiler::fgFindExcptnTarget(SpecialCodeKind kind,
- unsigned refData)
+Compiler::AddCodeDsc* Compiler::fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData)
{
- if (!(fgExcptnTargetCache[kind] && // Try the cached value first
+ if (!(fgExcptnTargetCache[kind] && // Try the cached value first
fgExcptnTargetCache[kind]->acdData == refData))
{
// Too bad, have to search for the jump target for the exception
- AddCodeDsc* add = NULL;
+ AddCodeDsc* add = nullptr;
- for (add = fgAddCodeList; add != NULL; add = add->acdNext)
+ for (add = fgAddCodeList; add != nullptr; add = add->acdNext)
{
- if (add->acdData == refData && add->acdKind == kind)
+ if (add->acdData == refData && add->acdKind == kind)
+ {
break;
+ }
}
fgExcptnTargetCache[kind] = add; // Cache it
@@ -17590,7 +17739,7 @@ Compiler::AddCodeDsc* Compiler::fgFindExcptnTarget(SpecialCodeKind kind,
* range check is to jump to upon failure.
*/
-BasicBlock* Compiler::fgRngChkTarget(BasicBlock* block, unsigned stkDepth, SpecialCodeKind kind)
+BasicBlock* Compiler::fgRngChkTarget(BasicBlock* block, unsigned stkDepth, SpecialCodeKind kind)
{
#ifdef DEBUG
if (verbose)
@@ -17602,13 +17751,13 @@ BasicBlock* Compiler::fgRngChkTarget(BasicBlock* block, unsigned stkDept
/* We attach the target label to the containing try block (if any) */
noway_assert(!compIsForInlining());
- return fgAddCodeRef(block, bbThrowIndex(block), kind, stkDepth);
+ return fgAddCodeRef(block, bbThrowIndex(block), kind, stkDepth);
}
// Sequences the tree.
// prevTree is what gtPrev of the first node in execution order gets set to.
// Returns the first node (execution order) in the sequenced tree.
-GenTree* Compiler::fgSetTreeSeq(GenTree* tree, GenTree* prevTree)
+GenTree* Compiler::fgSetTreeSeq(GenTree* tree, GenTree* prevTree)
{
GenTree list;
@@ -17637,10 +17786,10 @@ GenTree* Compiler::fgSetTreeSeq(GenTree* tree, GenTree* prevTree)
* Uses 'global' - fgTreeSeqLst
*/
-void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
+void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
noway_assert(tree);
assert(!IsUninitialized(tree));
@@ -17653,7 +17802,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* Is this a leaf/constant node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
fgSetTreeSeqFinish(tree);
return;
@@ -17661,17 +17810,16 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
// Special handling for GT_LIST
if (tree->OperGet() == GT_LIST)
{
-
- if (tree->gtOp.gtOp2 != nullptr
- && tree->gtOp.gtOp2->gtOper != GT_LIST)
+
+ if (tree->gtOp.gtOp2 != nullptr && tree->gtOp.gtOp2->gtOper != GT_LIST)
{
// This is a special kind of GT_LIST that only occurs under initBlk and copyBlk.
// It is used as a pair, where op1 is the dst and op2 is the src (value or location)
@@ -17688,10 +17836,10 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
// As we go, we will link the GT_LIST nodes in reverse order - we will number
// them and update fgTreeSeqList in a subsequent traversal.
GenTreePtr nextList = tree;
- GenTreePtr list = nullptr;
- while(nextList != nullptr && nextList->OperGet() == GT_LIST)
+ GenTreePtr list = nullptr;
+ while (nextList != nullptr && nextList->OperGet() == GT_LIST)
{
- list = nextList;
+ list = nextList;
GenTreePtr listItem = list->gtOp.gtOp1;
fgSetTreeSeqHelper(listItem);
nextList = list->gtOp.gtOp2;
@@ -17708,7 +17856,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
do
{
assert(list != nullptr);
- list = nextList;
+ list = nextList;
nextList = list->gtNext;
fgSetTreeSeqFinish(list);
} while (list != tree);
@@ -17721,14 +17869,14 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
bool reverse = ((tree->gtFlags & GTF_REVERSE_OPS) != 0);
if (reverse)
{
- assert(op1 != NULL && op2 != NULL);
+ assert(op1 != nullptr && op2 != nullptr);
fgSetTreeSeqHelper(op2);
}
- if (op1 != NULL)
+ if (op1 != nullptr)
{
fgSetTreeSeqHelper(op1);
}
- if (!reverse && op2 != NULL)
+ if (!reverse && op2 != nullptr)
{
fgSetTreeSeqHelper(op2);
}
@@ -17739,9 +17887,9 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* Check for a nilary operator */
- if (op1 == NULL)
+ if (op1 == nullptr)
{
- noway_assert(op2 == NULL);
+ noway_assert(op2 == nullptr);
fgSetTreeSeqFinish(tree);
return;
}
@@ -17749,7 +17897,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* Is this a unary operator?
* Although UNARY GT_IND has a special structure */
- if (oper == GT_IND)
+ if (oper == GT_IND)
{
/* Visit the indirection first - op2 may point to the
* jump Label for array-index-out-of-range */
@@ -17761,7 +17909,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* Now this is REALLY a unary operator */
- if (!op2)
+ if (!op2)
{
/* Visit the (only) operand and we're done */
@@ -17770,18 +17918,18 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
return;
}
- /*
- For "real" ?: operators, we make sure the order is
- as follows:
+ /*
+ For "real" ?: operators, we make sure the order is
+ as follows:
- condition
- 1st operand
- GT_COLON
- 2nd operand
- GT_QMARK
- */
+ condition
+ 1st operand
+ GT_COLON
+ 2nd operand
+ GT_QMARK
+ */
- if (oper == GT_QMARK)
+ if (oper == GT_QMARK)
{
noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
@@ -17798,7 +17946,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
return;
}
- if (oper == GT_COLON)
+ if (oper == GT_COLON)
{
fgSetTreeSeqFinish(tree);
return;
@@ -17806,7 +17954,7 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* This is a binary operator */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
fgSetTreeSeqHelper(op2);
fgSetTreeSeqHelper(op1);
@@ -17823,101 +17971,100 @@ void Compiler::fgSetTreeSeqHelper(GenTreePtr tree)
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- noway_assert(tree->gtField.gtFldObj == NULL);
- break;
+ case GT_FIELD:
+ noway_assert(tree->gtField.gtFldObj == nullptr);
+ break;
- case GT_CALL:
+ case GT_CALL:
- /* We'll evaluate the 'this' argument value first */
- if (tree->gtCall.gtCallObjp)
- {
- fgSetTreeSeqHelper(tree->gtCall.gtCallObjp);
- }
+ /* We'll evaluate the 'this' argument value first */
+ if (tree->gtCall.gtCallObjp)
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtCallObjp);
+ }
- /* We'll evaluate the arguments next, left to right
- * NOTE: setListOrder needs cleanup - eliminate the #ifdef afterwards */
+ /* We'll evaluate the arguments next, left to right
+ * NOTE: setListOrder needs cleanup - eliminate the #ifdef afterwards */
- if (tree->gtCall.gtCallArgs)
- {
- fgSetTreeSeqHelper(tree->gtCall.gtCallArgs);
- }
+ if (tree->gtCall.gtCallArgs)
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtCallArgs);
+ }
- /* Evaluate the temp register arguments list
- * This is a "hidden" list and its only purpose is to
- * extend the life of temps until we make the call */
+ /* Evaluate the temp register arguments list
+ * This is a "hidden" list and its only purpose is to
+ * extend the life of temps until we make the call */
- if (tree->gtCall.gtCallLateArgs)
- {
- fgSetTreeSeqHelper(tree->gtCall.gtCallLateArgs);
- }
+ if (tree->gtCall.gtCallLateArgs)
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtCallLateArgs);
+ }
- if ((tree->gtCall.gtCallType == CT_INDIRECT) && (tree->gtCall.gtCallCookie != NULL))
- {
- fgSetTreeSeqHelper(tree->gtCall.gtCallCookie);
- }
+ if ((tree->gtCall.gtCallType == CT_INDIRECT) && (tree->gtCall.gtCallCookie != nullptr))
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtCallCookie);
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- fgSetTreeSeqHelper(tree->gtCall.gtCallAddr);
- }
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtCallAddr);
+ }
- if (tree->gtCall.gtControlExpr)
- {
- fgSetTreeSeqHelper(tree->gtCall.gtControlExpr);
- }
+ if (tree->gtCall.gtControlExpr)
+ {
+ fgSetTreeSeqHelper(tree->gtCall.gtControlExpr);
+ }
- break;
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- fgSetTreeSeqHelper(tree->gtArrElem.gtArrObj);
+ fgSetTreeSeqHelper(tree->gtArrElem.gtArrObj);
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- fgSetTreeSeqHelper(tree->gtArrElem.gtArrInds[dim]);
- }
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ fgSetTreeSeqHelper(tree->gtArrElem.gtArrInds[dim]);
+ }
- break;
+ break;
- case GT_ARR_OFFSET:
- fgSetTreeSeqHelper(tree->gtArrOffs.gtOffset);
- fgSetTreeSeqHelper(tree->gtArrOffs.gtIndex);
- fgSetTreeSeqHelper(tree->gtArrOffs.gtArrObj);
- break;
+ case GT_ARR_OFFSET:
+ fgSetTreeSeqHelper(tree->gtArrOffs.gtOffset);
+ fgSetTreeSeqHelper(tree->gtArrOffs.gtIndex);
+ fgSetTreeSeqHelper(tree->gtArrOffs.gtArrObj);
+ break;
- case GT_CMPXCHG:
- //Evaluate the trees left to right
- fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpLocation);
- fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpValue);
- fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpComparand);
- break;
+ case GT_CMPXCHG:
+ // Evaluate the trees left to right
+ fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpLocation);
+ fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpValue);
+ fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpComparand);
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- //Evaluate the trees left to right
- fgSetTreeSeqHelper(tree->gtBoundsChk.gtArrLen);
- fgSetTreeSeqHelper(tree->gtBoundsChk.gtIndex);
- break;
+ // Evaluate the trees left to right
+ fgSetTreeSeqHelper(tree->gtBoundsChk.gtArrLen);
+ fgSetTreeSeqHelper(tree->gtBoundsChk.gtIndex);
+ break;
- default:
+ default:
#ifdef DEBUG
- gtDispTree(tree);
- noway_assert(!"unexpected operator");
+ gtDispTree(tree);
+ noway_assert(!"unexpected operator");
#endif // DEBUG
- break;
+ break;
}
fgSetTreeSeqFinish(tree);
}
-void
-Compiler::fgSetTreeSeqFinish(GenTreePtr tree)
+void Compiler::fgSetTreeSeqFinish(GenTreePtr tree)
{
/* Append to the node list */
++fgTreeSeqNum;
@@ -17925,7 +18072,7 @@ Compiler::fgSetTreeSeqFinish(GenTreePtr tree)
#ifdef DEBUG
tree->gtSeqNum = fgTreeSeqNum;
- if (verbose & 0)
+ if (verbose & 0)
{
printf("SetTreeOrder: ");
printTreeID(fgTreeSeqLst);
@@ -17936,13 +18083,13 @@ Compiler::fgSetTreeSeqFinish(GenTreePtr tree)
#endif // DEBUG
fgTreeSeqLst->gtNext = tree;
- tree->gtNext = nullptr;
- tree->gtPrev = fgTreeSeqLst;
- fgTreeSeqLst = tree;
+ tree->gtNext = nullptr;
+ tree->gtPrev = fgTreeSeqLst;
+ fgTreeSeqLst = tree;
/* Remember the very first node */
- if (!fgTreeSeqBeg)
+ if (!fgTreeSeqBeg)
{
fgTreeSeqBeg = tree;
assert(tree->gtSeqNum == 1);
@@ -17956,10 +18103,10 @@ Compiler::fgSetTreeSeqFinish(GenTreePtr tree)
* Also finds blocks that need GC polls and inserts them as needed.
*/
-void Compiler::fgSetBlockOrder()
+void Compiler::fgSetBlockOrder()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In fgSetBlockOrder()\n");
}
@@ -17974,15 +18121,17 @@ void Compiler::fgSetBlockOrder()
/* If we don't compute the doms, then we never mark blocks as loops. */
if (fgDomsComputed)
{
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
/* If this block is a loop header, mark it appropriately */
- if (block->isLoopHead())
+ if (block->isLoopHead())
+ {
fgMarkLoopHead(block);
+ }
}
}
- //only enable fully interruptible code for if we're hijacking.
+ // only enable fully interruptible code for if we're hijacking.
else if (GCPOLL_NONE == opts.compGCPollType)
{
/* If we don't have the dominators, use an abbreviated test for fully interruptible. If there are
@@ -17994,32 +18143,34 @@ void Compiler::fgSetBlockOrder()
*/
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- //true if the edge is forward, or if it is a back edge and either the source and dest are GC safe.
-#define EDGE_IS_GC_SAFE(src, dst) (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT))
+// true if the edge is forward, or if it is a back edge and either the source and dest are GC safe.
+#define EDGE_IS_GC_SAFE(src, dst) \
+ (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT))
bool partiallyInterruptible = true;
switch (block->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_ALWAYS:
- partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->bbJumpDest);
- break;
+ case BBJ_COND:
+ case BBJ_ALWAYS:
+ partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->bbJumpDest);
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpPtr; jumpPtr = block->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpPtr;
+ jumpPtr = block->bbJumpSwt->bbsDstTab;
- do
- {
- partiallyInterruptible &= EDGE_IS_GC_SAFE(block, *jumpPtr);
- }
- while (++jumpPtr, --jumpCnt);
+ do
+ {
+ partiallyInterruptible &= EDGE_IS_GC_SAFE(block, *jumpPtr);
+ } while (++jumpPtr, --jumpCnt);
- break;
+ break;
- default:
- break;
+ default:
+ break;
}
if (!partiallyInterruptible)
@@ -18049,8 +18200,7 @@ void Compiler::fgSetBlockOrder()
#if FEATURE_FASTTAILCALL
#ifndef JIT32_GCENCODER
- if (block->endsWithTailCallOrJmp(this, true) &&
- !(block->bbFlags & BBF_GC_SAFE_POINT) &&
+ if (block->endsWithTailCallOrJmp(this, true) && !(block->bbFlags & BBF_GC_SAFE_POINT) &&
optReachWithoutCall(fgFirstBB, block))
{
// We have a tail call that is reachable without making any other
@@ -18087,13 +18237,12 @@ void Compiler::fgSetBlockOrder()
#endif // DEBUG
}
-
/*****************************************************************************/
-void Compiler::fgSetStmtSeq(GenTreePtr tree)
+void Compiler::fgSetStmtSeq(GenTreePtr tree)
{
- GenTree list; // helper node that we use to start the StmtList
- // It's located in front of the first node in the list
+ GenTree list; // helper node that we use to start the StmtList
+ // It's located in front of the first node in the list
noway_assert(tree->gtOper == GT_STMT);
noway_assert(tree->gtNext == nullptr || tree->gtNext->gtFlags & GTF_STMT_TOP_LEVEL);
@@ -18102,7 +18251,7 @@ void Compiler::fgSetStmtSeq(GenTreePtr tree)
fgTreeSeqNum = 0;
fgTreeSeqLst = &list;
- fgTreeSeqBeg = NULL;
+ fgTreeSeqBeg = nullptr;
fgSetTreeSeqHelper(tree->gtStmt.gtStmtExpr);
@@ -18112,7 +18261,7 @@ void Compiler::fgSetStmtSeq(GenTreePtr tree)
#ifdef DEBUG
- if (list.gtNext->gtPrev != &list)
+ if (list.gtNext->gtPrev != &list)
{
printf("&list ");
printTreeID(&list);
@@ -18135,7 +18284,7 @@ void Compiler::fgSetStmtSeq(GenTreePtr tree)
printTreeID(last);
printf("\n");
-BAD_LIST:;
+ BAD_LIST:;
printf("\n");
gtDispTree(tree->gtStmt.gtStmtExpr);
@@ -18162,28 +18311,29 @@ BAD_LIST:;
/* Fix the first node's 'prev' link */
noway_assert(list.gtNext->gtPrev == &list);
- list.gtNext->gtPrev = NULL;
+ list.gtNext->gtPrev = nullptr;
#ifdef DEBUG
/* Keep track of the highest # of tree nodes */
- if (BasicBlock::s_nMaxTrees < fgTreeSeqNum)
+ if (BasicBlock::s_nMaxTrees < fgTreeSeqNum)
{
- BasicBlock::s_nMaxTrees = fgTreeSeqNum;
+ BasicBlock::s_nMaxTrees = fgTreeSeqNum;
}
#endif // DEBUG
-
}
/*****************************************************************************/
-void Compiler::fgSetBlockOrder(BasicBlock* block)
+void Compiler::fgSetBlockOrder(BasicBlock* block)
{
- GenTreePtr tree;
+ GenTreePtr tree;
tree = block->bbTreeList;
- if (!tree)
+ if (!tree)
+ {
return;
+ }
for (;;)
{
@@ -18243,12 +18393,12 @@ void Compiler::fgSetBlockOrder(BasicBlock* block)
* switched order.
*/
-void Compiler::fgOrderBlockOps(GenTreePtr tree,
- regMaskTP reg0,
- regMaskTP reg1,
- regMaskTP reg2,
- GenTreePtr* opsPtr, // OUT
- regMaskTP* regsPtr) // OUT
+void Compiler::fgOrderBlockOps(GenTreePtr tree,
+ regMaskTP reg0,
+ regMaskTP reg1,
+ regMaskTP reg2,
+ GenTreePtr* opsPtr, // OUT
+ regMaskTP* regsPtr) // OUT
{
assert(tree->OperIsBlkOp());
@@ -18256,26 +18406,26 @@ void Compiler::fgOrderBlockOps(GenTreePtr tree,
assert(tree->gtOp.gtOp1->gtOp.gtOp1 && tree->gtOp.gtOp1->gtOp.gtOp2);
assert(tree->gtOp.gtOp2);
- GenTreePtr ops[3] =
- {
- tree->gtOp.gtOp1->gtOp.gtOp1, // Dest address
- tree->gtOp.gtOp1->gtOp.gtOp2, // Val / Src address
- tree->gtOp.gtOp2 // Size of block
+ GenTreePtr ops[3] = {
+ tree->gtOp.gtOp1->gtOp.gtOp1, // Dest address
+ tree->gtOp.gtOp1->gtOp.gtOp2, // Val / Src address
+ tree->gtOp.gtOp2 // Size of block
};
- regMaskTP regs[3] = { reg0, reg1, reg2 };
+ regMaskTP regs[3] = {reg0, reg1, reg2};
static int blockOpsOrder[4][3] =
- // tree->gtFlags | tree->gtOp.gtOp1->gtFlags
- { // ---------------------+----------------------------
- { 0, 1, 2 }, // - | -
- { 2, 0, 1 }, // GTF_REVERSE_OPS | -
- { 1, 0, 2 }, // - | GTF_REVERSE_OPS
- { 2, 1, 0 } // GTF_REVERSE_OPS | GTF_REVERSE_OPS
- };
+ // tree->gtFlags | tree->gtOp.gtOp1->gtFlags
+ {
+ // ---------------------+----------------------------
+ {0, 1, 2}, // - | -
+ {2, 0, 1}, // GTF_REVERSE_OPS | -
+ {1, 0, 2}, // - | GTF_REVERSE_OPS
+ {2, 1, 0} // GTF_REVERSE_OPS | GTF_REVERSE_OPS
+ };
- int orderNum = ((tree->gtFlags & GTF_REVERSE_OPS) != 0) * 1 +
- ((tree->gtOp.gtOp1->gtFlags & GTF_REVERSE_OPS) != 0) * 2;
+ int orderNum =
+ ((tree->gtFlags & GTF_REVERSE_OPS) != 0) * 1 + ((tree->gtOp.gtOp1->gtFlags & GTF_REVERSE_OPS) != 0) * 2;
assert(orderNum < 4);
@@ -18285,13 +18435,13 @@ void Compiler::fgOrderBlockOps(GenTreePtr tree,
// Fill in the OUT arrays according to the order we have selected
- opsPtr[0] = ops[ order[0] ];
- opsPtr[1] = ops[ order[1] ];
- opsPtr[2] = ops[ order[2] ];
+ opsPtr[0] = ops[order[0]];
+ opsPtr[1] = ops[order[1]];
+ opsPtr[2] = ops[order[2]];
- regsPtr[0] = regs[ order[0] ];
- regsPtr[1] = regs[ order[1] ];
- regsPtr[2] = regs[ order[2] ];
+ regsPtr[0] = regs[order[0]];
+ regsPtr[1] = regs[order[1]];
+ regsPtr[2] = regs[order[2]];
}
#endif // LEGACY_BACKEND
@@ -18458,8 +18608,8 @@ void Compiler::fgDeleteTreeFromList(GenTreeStmt* stmt, GenTreePtr tree)
assert(fgTreeIsInStmt(tree, stmt));
GenTreePtr firstNode = fgGetFirstNode(tree);
- GenTreePtr prevNode = firstNode->gtPrev;
- GenTreePtr nextNode = tree->gtNext;
+ GenTreePtr prevNode = firstNode->gtPrev;
+ GenTreePtr nextNode = tree->gtNext;
if (prevNode != nullptr)
{
@@ -18484,7 +18634,6 @@ void Compiler::fgDeleteTreeFromList(GenTreeStmt* stmt, GenTreePtr tree)
}
}
-
//------------------------------------------------------------------------
// fgTreeIsInStmt: return 'true' if 'tree' is in the execution order list of statement 'stmt'.
// This works for a single node or an entire tree, assuming a well-formed tree, where the entire
@@ -18500,7 +18649,9 @@ bool Compiler::fgTreeIsInStmt(GenTree* tree, GenTreeStmt* stmt)
for (GenTree* curr = stmt->gtStmtList; curr != nullptr; curr = curr->gtNext)
{
if (tree == curr)
+ {
return true;
+ }
}
return false;
}
@@ -18524,7 +18675,7 @@ void Compiler::fgInsertTreeInListBefore(GenTree* tree, GenTree* insertionPoint,
GenTree* beforeTree = insertionPoint->gtPrev;
insertionPoint->gtPrev = tree;
- tree->gtNext = insertionPoint;
+ tree->gtNext = insertionPoint;
GenTree* first = fgGetFirstNode(tree);
@@ -18565,10 +18716,10 @@ void Compiler::fgInsertTreeInListAfter(GenTree* tree, GenTree* insertionPoint, G
assert(!fgTreeIsInStmt(tree, stmt));
GenTree* afterTree = insertionPoint->gtNext;
- GenTree* first = fgGetFirstNode(tree);
+ GenTree* first = fgGetFirstNode(tree);
insertionPoint->gtNext = first;
- first->gtPrev = insertionPoint;
+ first->gtPrev = insertionPoint;
tree->gtNext = afterTree;
@@ -18586,7 +18737,10 @@ void Compiler::fgInsertTreeInListAfter(GenTree* tree, GenTree* insertionPoint, G
//------------------------------------------------------------------------
// fgInsertTreeBeforeAsEmbedded: Insert a tree before 'insertionPoint' as an embedded statement under 'stmt'.
//
-GenTreeStmt* Compiler::fgInsertTreeBeforeAsEmbedded(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt, BasicBlock* block)
+GenTreeStmt* Compiler::fgInsertTreeBeforeAsEmbedded(GenTree* tree,
+ GenTree* insertionPoint,
+ GenTreeStmt* stmt,
+ BasicBlock* block)
{
assert(tree->gtOper != GT_STMT);
assert(insertionPoint->gtOper != GT_STMT);
@@ -18603,7 +18757,7 @@ GenTreeStmt* Compiler::fgInsertTreeBeforeAsEmbedded(GenTree* tree, GenTree* inse
// since we could be inserting it ahead of an already existing embedded statement
// in execution order.
GenTreeStmt* topStmt = fgFindTopLevelStmtBackwards(stmt);
- GenTreeStmt* result = fgMakeEmbeddedStmt(block, tree, topStmt);
+ GenTreeStmt* result = fgMakeEmbeddedStmt(block, tree, topStmt);
DBEXEC(true, fgDebugCheckNodeLinks(block, result));
return result;
@@ -18612,7 +18766,10 @@ GenTreeStmt* Compiler::fgInsertTreeBeforeAsEmbedded(GenTree* tree, GenTree* inse
//------------------------------------------------------------------------
// fgInsertTreeAfterAsEmbedded: Insert a tree after 'insertionPoint' as an embedded statement under 'stmt'.
// If it is inserted after all nodes in the given tree, just make it a new statement.
-GenTreeStmt* Compiler::fgInsertTreeAfterAsEmbedded(GenTree* tree, GenTree* insertionPoint, GenTreeStmt* stmt, BasicBlock* block)
+GenTreeStmt* Compiler::fgInsertTreeAfterAsEmbedded(GenTree* tree,
+ GenTree* insertionPoint,
+ GenTreeStmt* stmt,
+ BasicBlock* block)
{
assert(tree->gtOper != GT_STMT);
assert(insertionPoint->gtOper != GT_STMT);
@@ -18656,46 +18813,45 @@ GenTreeStmt* Compiler::fgInsertTreeAfterAsEmbedded(GenTree* tree, GenTree* inser
return result;
}
-
// Examine the bbTreeList and return the estimated code size for this block
unsigned Compiler::fgGetCodeEstimate(BasicBlock* block)
{
- unsigned costSz = 0; // estimate of blocks code size cost
+ unsigned costSz = 0; // estimate of blocks code size cost
switch (block->bbJumpKind)
{
- case BBJ_NONE:
- costSz = 0;
- break;
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_LEAVE:
- case BBJ_COND:
- costSz = 2;
- break;
- case BBJ_CALLFINALLY:
- costSz = 5;
- break;
- case BBJ_SWITCH:
- costSz = 10;
- break;
- case BBJ_THROW:
- costSz = 1; // We place a int3 after the code for a throw block
- break;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- costSz = 1;
- break;
- case BBJ_RETURN: // return from method
- costSz = 3;
- break;
- default:
- noway_assert(!"Bad bbJumpKind");
- break;
+ case BBJ_NONE:
+ costSz = 0;
+ break;
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_LEAVE:
+ case BBJ_COND:
+ costSz = 2;
+ break;
+ case BBJ_CALLFINALLY:
+ costSz = 5;
+ break;
+ case BBJ_SWITCH:
+ costSz = 10;
+ break;
+ case BBJ_THROW:
+ costSz = 1; // We place a int3 after the code for a throw block
+ break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ costSz = 1;
+ break;
+ case BBJ_RETURN: // return from method
+ costSz = 3;
+ break;
+ default:
+ noway_assert(!"Bad bbJumpKind");
+ break;
}
- GenTreePtr tree = block->FirstNonPhiDef();
- if (tree)
+ GenTreePtr tree = block->FirstNonPhiDef();
+ if (tree)
{
do
{
@@ -18713,8 +18869,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block)
}
tree = tree->gtNext;
- }
- while (tree);
+ } while (tree);
}
return costSz;
@@ -18739,7 +18894,7 @@ static escapeMapping_t s_EscapeFileMapping[] =
{'&', "~amp~"},
{'"', "~quot~"},
{'*', "~star~"},
- {0, 0}
+ {0, nullptr}
};
static escapeMapping_t s_EscapeMapping[] =
@@ -18748,7 +18903,7 @@ static escapeMapping_t s_EscapeMapping[] =
{'>', "&gt;"},
{'&', "&amp;"},
{'"', "&quot;"},
- {0, 0}
+ {0, nullptr}
};
// clang-formt on
@@ -18859,9 +19014,9 @@ static void fprintfDouble(FILE* fgxFile, double value)
FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, LPCWSTR type)
{
FILE* fgxFile;
- LPCWSTR pattern = NULL;
- LPCWSTR filename = NULL;
- LPCWSTR pathname = NULL;
+ LPCWSTR pattern = nullptr;
+ LPCWSTR filename = nullptr;
+ LPCWSTR pathname = nullptr;
const char* escapedString;
bool createDuplicateFgxFiles = true;
@@ -18880,18 +19035,21 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
#endif // DEBUG
- if (fgBBcount <= 1)
- return NULL;
+ if (fgBBcount <= 1) {
+ return nullptr;
+}
- if (pattern == NULL)
- return NULL;
+ if (pattern == nullptr) {
+ return nullptr;
+}
- if (wcslen(pattern) == 0)
- return NULL;
+ if (wcslen(pattern) == 0) {
+ return nullptr;
+}
LPCWSTR phasePattern = JitConfig.JitDumpFgPhase();
LPCWSTR phaseName = PhaseShortNames[phase];
- if (phasePattern == 0)
+ if (phasePattern == nullptr)
{
if (phase != PHASE_DETERMINE_FIRST_COLD_BLOCK)
{
@@ -18908,7 +19066,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
if (*pattern != W('*'))
{
- bool hasColon = (wcschr(pattern, W(':')) != 0);
+ bool hasColon = (wcschr(pattern, W(':')) != nullptr);
if (hasColon)
{
@@ -18921,8 +19079,9 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
{
while ((*pattern != W(':')) && (*pattern != W('*')))
{
- if (*pattern != *className)
- return NULL;
+ if (*pattern != *className) {
+ return nullptr;
+}
pattern++;
className++;
@@ -18933,12 +19092,14 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- if (*className != 0)
- return NULL;
+ if (*className != 0) {
+ return nullptr;
+}
}
}
- if (*pattern != W(':'))
- return NULL;
+ if (*pattern != W(':')) {
+ return nullptr;
+}
pattern++;
}
@@ -18952,8 +19113,9 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
{
while ((*pattern != 0) && (*pattern != W('*')))
{
- if (*pattern != *methodName)
- return NULL;
+ if (*pattern != *methodName) {
+ return nullptr;
+}
pattern++;
methodName++;
@@ -18964,15 +19126,17 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- if (*methodName != 0)
- return NULL;
+ if (*methodName != 0) {
+ return nullptr;
+}
}
}
- if (*pattern != 0)
- return NULL;
+ if (*pattern != 0) {
+ return nullptr;
+}
}
- if (filename == NULL)
+ if (filename == nullptr)
{
filename = W("default");
}
@@ -18986,7 +19150,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- return NULL;
+ return nullptr;
}
}
if (wcscmp(filename, W("hot")) == 0)
@@ -18999,7 +19163,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- return NULL;
+ return nullptr;
}
}
else if (wcscmp(filename, W("cold")) == 0)
@@ -19011,7 +19175,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- return NULL;
+ return nullptr;
}
}
else if (wcscmp(filename, W("jit")) == 0)
@@ -19023,7 +19187,7 @@ FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phas
}
else
{
- return NULL;
+ return nullptr;
}
}
else if (wcscmp(filename, W("all")) == 0)
@@ -19034,12 +19198,12 @@ ONE_FILE_PER_METHOD:;
escapedString = fgProcessEscapes(info.compFullName, s_EscapeFileMapping);
size_t wCharCount = strlen(escapedString) + wcslen(phaseName) + 1 + strlen("~999") + wcslen(type) + 1;
- if (pathname != NULL)
+ if (pathname != nullptr)
{
wCharCount += wcslen(pathname) + 1;
}
filename = (LPCWSTR) alloca(wCharCount * sizeof(WCHAR));
- if (pathname != NULL)
+ if (pathname != nullptr)
{
swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S-%s.%s"), pathname, escapedString, phaseName, type);
}
@@ -19048,19 +19212,19 @@ ONE_FILE_PER_METHOD:;
swprintf_s((LPWSTR)filename, wCharCount, W("%S.%s"), escapedString, type);
}
fgxFile = _wfopen(filename, W("r")); // Check if this file already exists
- if (fgxFile != NULL)
+ if (fgxFile != nullptr)
{
// For Generic methods we will have both hot and cold versions
if (createDuplicateFgxFiles == false)
{
fclose(fgxFile);
- return NULL;
+ return nullptr;
}
// Yes, this filename already exists, so create a different one by appending ~2, ~3, etc...
for (int i = 2; i < 1000; i++)
{
fclose(fgxFile);
- if (pathname != NULL)
+ if (pathname != nullptr)
{
swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S~%d.%s"), pathname, escapedString, i, type);
}
@@ -19069,14 +19233,15 @@ ONE_FILE_PER_METHOD:;
swprintf_s((LPWSTR)filename, wCharCount, W("%S~%d.%s"), escapedString, i, type);
}
fgxFile = _wfopen(filename, W("r")); // Check if this file exists
- if (fgxFile == NULL)
+ if (fgxFile == nullptr) {
break;
+}
}
// If we have already created 1000 files with this name then just fail
- if (fgxFile != NULL)
+ if (fgxFile != nullptr)
{
fclose(fgxFile);
- return NULL;
+ return nullptr;
}
}
fgxFile = _wfopen(filename, W("a+"));
@@ -19096,12 +19261,12 @@ ONE_FILE_PER_METHOD:;
{
LPCWSTR origFilename = filename;
size_t wCharCount = wcslen(origFilename) + wcslen(type) + 2;
- if (pathname != NULL)
+ if (pathname != nullptr)
{
wCharCount += wcslen(pathname) + 1;
}
filename = (LPCWSTR) alloca(wCharCount * sizeof(WCHAR));
- if (pathname != NULL)
+ if (pathname != nullptr)
{
swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%s.%s"), pathname, origFilename, type);
}
@@ -19162,7 +19327,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase)
FILE* fgxFile = fgOpenFlowGraphFile(&dontClose, phase, createDotFile ? W("dot") : W("fgx"));
- if (fgxFile == NULL)
+ if (fgxFile == nullptr)
{
return false;
}
@@ -19228,7 +19393,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase)
fprintf(fgxFile, "\n exactEdgeWeights=\"true\"");
}
}
- if (fgFirstColdBlock != NULL)
+ if (fgFirstColdBlock != nullptr)
{
fprintf(fgxFile, "\n firstColdBlock=\"%d\"", fgFirstColdBlock->bbNum);
}
@@ -19247,7 +19412,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase)
BasicBlock* block;
unsigned blockOrdinal;
for (block = fgFirstBB , blockOrdinal = 1;
- block != NULL;
+ block != nullptr;
block = block->bbNext, blockOrdinal++)
{
if (createDotFile)
@@ -19318,7 +19483,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase)
unsigned edgeNum = 1;
BasicBlock* bTarget;
- for (bTarget = fgFirstBB; bTarget != NULL; bTarget = bTarget->bbNext)
+ for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext)
{
double targetWeightDivisor;
if (bTarget->bbWeight == BB_ZERO_WEIGHT)
@@ -19331,7 +19496,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase)
}
flowList* edge;
- for (edge = bTarget->bbPreds; edge != NULL; edge = edge->flNext, edgeNum++)
+ for (edge = bTarget->bbPreds; edge != nullptr; edge = edge->flNext, edgeNum++)
{
BasicBlock* bSource = edge->flBlock;
double sourceWeightDivisor;
@@ -19821,8 +19986,9 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock,
ibcColWidth = max(ibcColWidth, thisIbcWidth);
}
- if (block == lastBlock)
+ if (block == lastBlock) {
break;
+}
}
if (ibcColWidth > 0)
{
@@ -19892,8 +20058,9 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock,
fgTableDispBasicBlock(block, ibcColWidth);
- if (block == lastBlock)
+ if (block == lastBlock) {
break;
+}
}
printf("------%*s------------------------------------%*s-----------------------%*s----------------------------------------\n",
@@ -19911,7 +20078,7 @@ void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock,
void Compiler::fgDispBasicBlocks(bool dumpTrees)
{
- fgDispBasicBlocks(fgFirstBB, NULL, dumpTrees);
+ fgDispBasicBlocks(fgFirstBB, nullptr, dumpTrees);
}
/*****************************************************************************/
@@ -19960,8 +20127,9 @@ void Compiler::fgDumpTrees(BasicBlock* firstBlock,
block->bbStmtNum = compCurStmtNum; // Set the block->bbStmtNum
}
}
- if (block == lastBlock)
+ if (block == lastBlock) {
break;
+}
}
printf("\n-------------------------------------------------------------------------------------------------------------------\n");
}
@@ -19978,8 +20146,9 @@ Compiler::fgWalkResult Compiler::fgStress64RsltMulCB(GenTreePtr* pTree, fgW
GenTreePtr tree = *pTree;
Compiler* pComp = data->compiler;
- if (tree->gtOper != GT_MUL || tree->gtType != TYP_INT || (tree->gtOverflow()))
+ if (tree->gtOper != GT_MUL || tree->gtType != TYP_INT || (tree->gtOverflow())) {
return WALK_CONTINUE;
+}
// To ensure optNarrowTree() doesn't fold back to the original tree.
tree->gtOp.gtOp1 = pComp->gtNewOperNode(GT_NOP, TYP_LONG, tree->gtOp.gtOp1);
@@ -19993,8 +20162,9 @@ Compiler::fgWalkResult Compiler::fgStress64RsltMulCB(GenTreePtr* pTree, fgW
void Compiler::fgStress64RsltMul()
{
- if (!compStressCompile(STRESS_64RSLT_MUL, 20))
+ if (!compStressCompile(STRESS_64RSLT_MUL, 20)) {
return;
+}
fgWalkAllTreesPre(fgStress64RsltMulCB, (void*)this);
}
@@ -20083,7 +20253,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */,
if (checkBBNum)
{
// Check that bbNum is sequential
- noway_assert(block->bbNext == NULL || (block->bbNum + 1 == block->bbNext->bbNum));
+ noway_assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum));
}
// If the block is a BBJ_COND, a BBJ_SWITCH or a
@@ -20092,13 +20262,13 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */,
if (block->bbJumpKind == BBJ_COND)
{
- noway_assert(block->lastStmt()->gtNext == NULL &&
+ noway_assert(block->lastStmt()->gtNext == nullptr &&
block->lastTopLevelStmt()->gtStmtExpr->gtOper == GT_JTRUE);
}
else if (block->bbJumpKind == BBJ_SWITCH)
{
#ifndef LEGACY_BACKEND
- noway_assert(block->lastStmt()->gtNext == NULL &&
+ noway_assert(block->lastStmt()->gtNext == nullptr &&
(block->lastTopLevelStmt()->gtStmtExpr->gtOper == GT_SWITCH ||
block->lastTopLevelStmt()->gtStmtExpr->gtOper == GT_SWITCH_TABLE));
#else // LEGACY_BACKEND
@@ -20118,7 +20288,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */,
if (!fgCheapPredsValid) // Don't check cheap preds
{
// A filter has no predecessors
- noway_assert(block->bbPreds == NULL);
+ noway_assert(block->bbPreds == nullptr);
}
}
@@ -20163,16 +20333,19 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */,
if (ehTryDsc != nullptr)
{
// You can jump to the start of a try
- if (ehTryDsc->ebdTryBeg == block)
+ if (ehTryDsc->ebdTryBeg == block) {
goto CHECK_HND;
+}
// You can jump within the same try region
- if (bbInTryRegions(block->getTryIndex(), blockPred))
+ if (bbInTryRegions(block->getTryIndex(), blockPred)) {
goto CHECK_HND;
+}
// The catch block can jump back into the middle of the try
- if (bbInCatchHandlerRegions(block, blockPred))
+ if (bbInCatchHandlerRegions(block, blockPred)) {
goto CHECK_HND;
+}
// The end of a finally region is a BBJ_EHFINALLYRET block (during importing, BBJ_LEAVE) which
// is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY
@@ -20180,8 +20353,9 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */,
// the try region protected by the finally (for x86, ARM), but that's ok.
if (prevBlock->bbJumpKind == BBJ_CALLFINALLY &&
block->bbJumpKind == BBJ_ALWAYS &&
- blockPred->bbJumpKind == BBJ_EHFINALLYRET)
+ blockPred->bbJumpKind == BBJ_EHFINALLYRET) {
goto CHECK_HND;
+}
printf("Jump into the middle of try region: BB%02u branches to BB%02u\n", blockPred->bbNum, block->bbNum);
noway_assert(!"Jump into middle of try region");
@@ -20194,8 +20368,9 @@ CHECK_HND:;
{
// You can do a BBJ_EHFINALLYRET or BBJ_EHFILTERRET into a handler region
if ( (blockPred->bbJumpKind == BBJ_EHFINALLYRET)
- || (blockPred->bbJumpKind == BBJ_EHFILTERRET))
+ || (blockPred->bbJumpKind == BBJ_EHFILTERRET)) {
goto CHECK_JUMP;
+}
// Our try block can call our finally block
if ((block->bbCatchTyp == BBCT_FINALLY) &&
@@ -20206,12 +20381,14 @@ CHECK_HND:;
}
// You can jump within the same handler region
- if (bbInHandlerRegions(block->getHndIndex(), blockPred))
+ if (bbInHandlerRegions(block->getHndIndex(), blockPred)) {
goto CHECK_JUMP;
+}
// A filter can jump to the start of the filter handler
- if (ehHndDsc->HasFilter())
+ if (ehHndDsc->HasFilter()) {
goto CHECK_JUMP;
+}
printf("Jump into the middle of handler region: BB%02u branches to BB%02u\n", blockPred->bbNum, block->bbNum);
noway_assert(!"Jump into the middle of handler region");
@@ -20256,11 +20433,13 @@ CHECK_JUMP:;
for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext)
{
- if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) {
continue;
+}
- if (block == bcall->bbNext)
+ if (block == bcall->bbNext) {
goto PRED_OK;
+}
}
#if FEATURE_EH_FUNCLETS
@@ -20274,14 +20453,17 @@ CHECK_JUMP:;
for (BasicBlock* bcall = fgFirstFuncletBB; bcall; bcall = bcall->bbNext)
{
- if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
+ if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) {
continue;
+}
- if (block != bcall->bbNext)
+ if (block != bcall->bbNext) {
continue;
+}
- if (ehCallFinallyInCorrectRegion(bcall, hndIndex))
+ if (ehCallFinallyInCorrectRegion(bcall, hndIndex)) {
goto PRED_OK;
+}
}
}
@@ -20465,11 +20647,15 @@ void Compiler::fgDebugCheckFlags(GenTreePtr tree)
/* Recursively check the subtrees */
- if (op1) fgDebugCheckFlags(op1);
- if (op2) fgDebugCheckFlags(op2);
+ if (op1) { fgDebugCheckFlags(op1);
+}
+ if (op2) { fgDebugCheckFlags(op2);
+}
- if (op1) chkFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
- if (op2) chkFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
+ if (op1) { chkFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
+}
+ if (op2) { chkFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
+}
// We reuse the value of GTF_REVERSE_OPS for a GT_IND-specific flag,
// so exempt that (unary) operator.
@@ -20531,7 +20717,7 @@ void Compiler::fgDebugCheckFlags(GenTreePtr tree)
/* See what kind of a special operator we have here */
- else switch (tree->OperGet())
+ else { switch (tree->OperGet())
{
case GT_CALL:
@@ -20598,7 +20784,7 @@ void Compiler::fgDebugCheckFlags(GenTreePtr tree)
}
}
- if ((call->gtCallType == CT_INDIRECT) && (call->gtCallCookie != NULL))
+ if ((call->gtCallType == CT_INDIRECT) && (call->gtCallCookie != nullptr))
{
fgDebugCheckFlags(call->gtCallCookie);
chkFlags |= (call->gtCallCookie->gtFlags & GTF_SIDE_EFFECT);
@@ -20654,6 +20840,7 @@ void Compiler::fgDebugCheckFlags(GenTreePtr tree)
default:
break;
}
+}
if (chkFlags & ~treeFlags)
{
@@ -20746,7 +20933,7 @@ void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, GenTree* node)
/* Cross-check gtPrev,gtNext with gtOp for simple trees */
- GenTreePtr expectedPrevTree = NULL;
+ GenTreePtr expectedPrevTree = nullptr;
if (tree->OperIsLeaf())
{
@@ -20807,7 +20994,7 @@ void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, GenTree* node)
}
}
- noway_assert(expectedPrevTree == NULL || // No expectations about the prev node
+ noway_assert(expectedPrevTree == nullptr || // No expectations about the prev node
tree->gtPrev == expectedPrevTree); // The "normal" case
}
}
@@ -20901,8 +21088,9 @@ void Compiler::fgDebugCheckLinearNodeLinks(BasicBlock* block,
// Consider whether we should do some alternate checking in that case (e.g. just validate
// the list is correct OR validate the corresponding top-level statement, which we probably
// just finished doing, OR fix all callees to check whether it's top-level before calling this).
- if ((topLevelStmt->gtFlags & GTF_STMT_TOP_LEVEL) == 0)
+ if ((topLevelStmt->gtFlags & GTF_STMT_TOP_LEVEL) == 0) {
return;
+}
// We're first going to traverse the statements in linear order, counting the nodes and ensuring that
// the links are consistent.
@@ -21002,8 +21190,9 @@ bool Compiler::fgStmtContainsNode(GenTreeStmt* stmt, GenTree* tree)
actual != nullptr;
actual = actual->gtNext)
{
- if (actual == tree)
+ if (actual == tree) {
return true;
+}
}
return false;
}
@@ -21182,8 +21371,9 @@ bool Compiler::fgNodeContainsEmbeddedStatement(GenTree* tree, GenTreeStmt* topLe
curStmt != nullptr && curStmt->gtStmt.gtStmtIsEmbedded();
curStmt = curStmt->gtNext)
{
- if (curStmt->gtStmt.gtStmtList == actual)
+ if (curStmt->gtStmt.gtStmtList == actual) {
return true;
+}
}
}
return false;
@@ -21288,12 +21478,14 @@ unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo)
void Compiler::fgInline()
{
- if (!opts.OptEnabled(CLFLG_INLINING))
+ if (!opts.OptEnabled(CLFLG_INLINING)) {
return;
+}
#ifdef DEBUG
- if (verbose)
+ if (verbose) {
printf("*************** In fgInline()\n");
+}
#endif // DEBUG
BasicBlock* block = fgFirstBB;
@@ -21778,7 +21970,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
noway_assert(opts.OptEnabled(CLFLG_INLINING));
// This is the InlineInfo struct representing a method to be inlined.
- InlineInfo inlineInfo = {0};
+ InlineInfo inlineInfo = {nullptr};
CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
@@ -21787,7 +21979,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
inlineInfo.iciStmt = fgMorphStmt;
inlineInfo.iciBlock = compCurBB;
inlineInfo.thisDereferencedFirst = false;
- inlineInfo.retExpr = NULL;
+ inlineInfo.retExpr = nullptr;
inlineInfo.inlineResult = inlineResult;
#ifdef FEATURE_SIMD
inlineInfo.hasSIMDTypeArgLocalOrReturn = false;
@@ -21819,7 +22011,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle;
InlineCandidateInfo* inlineCandidateInfo;
InlineInfo* inlineInfo;
- } param = {0};
+ } param = {nullptr};
param.pThis = this;
param.call = call;
@@ -21841,7 +22033,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
//
pParam->inlineInfo->InlinerCompiler = pParam->pThis;
- if (pParam->pThis->impInlineInfo == NULL)
+ if (pParam->pThis->impInlineInfo == nullptr)
{
pParam->inlineInfo->InlineRoot = pParam->pThis;
}
@@ -21876,7 +22068,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
pParam->pThis->info.compCompHnd,
&pParam->inlineCandidateInfo->methInfo,
(void**)pParam->inlineInfo,
- NULL,
+ nullptr,
&compileFlagsForInlinee,
pParam->inlineInfo);
@@ -21929,7 +22121,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
// (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which
// causes the BBJ_RETURN block not to be imported at all.)
// Fail the inlining attempt
- if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == NULL)
+ if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr)
{
#ifdef DEBUG
if (verbose)
@@ -21946,7 +22138,7 @@ void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call,
{
// we defer the call to initClass() until inlining is completed in case it fails. If inlining succeeds,
// we will call initClass().
- if (!(info.compCompHnd->initClass(NULL /* field */, fncHandle /* method */,
+ if (!(info.compCompHnd->initClass(nullptr /* field */, fncHandle /* method */,
inlineCandidateInfo->exactContextHnd /* context */) & CORINFO_INITCLASS_INITIALIZED))
{
inlineResult->NoteFatal(InlineObservation::CALLEE_CLASS_INIT_FAILURE);
@@ -21999,13 +22191,13 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
// We can write better assert here. For example, we can check that
// iciBlock contains iciStmt, which in turn contains iciCall.
- noway_assert(iciBlock->bbTreeList != NULL);
- noway_assert(iciStmt->gtStmt.gtStmtExpr != NULL);
+ noway_assert(iciBlock->bbTreeList != nullptr);
+ noway_assert(iciStmt->gtStmt.gtStmtExpr != nullptr);
noway_assert(iciCall->gtOper == GT_CALL);
#ifdef DEBUG
- GenTreePtr currentDumpStmt = NULL;
+ GenTreePtr currentDumpStmt = nullptr;
if (verbose)
{
@@ -22052,7 +22244,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
{
// When fgBBCount is 1 we will always have a non-NULL fgFirstBB
//
- PREFAST_ASSUME(InlineeCompiler->fgFirstBB != NULL);
+ PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr);
// DDB 91389: Don't throw away the (only) inlinee block
// when its return type is not BBJ_RETURN.
@@ -22129,10 +22321,10 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
GenTreePtr bottomBlock_Begin;
GenTreePtr bottomBlock_End;
- topBlock_Begin = NULL;
- topBlock_End = NULL;
- bottomBlock_Begin = NULL;
- bottomBlock_End = NULL;
+ topBlock_Begin = nullptr;
+ topBlock_End = nullptr;
+ bottomBlock_Begin = nullptr;
+ bottomBlock_End = nullptr;
//
// First figure out bottomBlock_Begin
@@ -22140,13 +22332,13 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
bottomBlock_Begin = stmtAfter->gtNext;
- if (topBlock->bbTreeList == NULL)
+ if (topBlock->bbTreeList == nullptr)
{
// topBlock is empty before the split.
// In this case, both topBlock and bottomBlock should be empty
- noway_assert(bottomBlock_Begin == NULL);
- topBlock->bbTreeList = NULL;
- bottomBlock->bbTreeList = NULL;
+ noway_assert(bottomBlock_Begin == nullptr);
+ topBlock->bbTreeList = nullptr;
+ bottomBlock->bbTreeList = nullptr;
}
else if (topBlock->bbTreeList == bottomBlock_Begin)
{
@@ -22156,9 +22348,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
// And the split is before the first statement.
// In this case, topBlock should be empty, and everything else should be moved to the bottonBlock.
bottomBlock->bbTreeList = topBlock->bbTreeList;
- topBlock->bbTreeList = NULL;
+ topBlock->bbTreeList = nullptr;
}
- else if (bottomBlock_Begin == NULL)
+ else if (bottomBlock_Begin == nullptr)
{
noway_assert(topBlock->bbTreeList);
@@ -22166,7 +22358,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
// And the split is at the end of the topBlock.
// In this case, everything should be kept in the topBlock, and the bottomBlock should be empty
- bottomBlock->bbTreeList = NULL;
+ bottomBlock->bbTreeList = nullptr;
}
else
{
@@ -22182,7 +22374,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
noway_assert(bottomBlock_End);
// Break the linkage between 2 blocks.
- topBlock_End->gtNext = NULL;
+ topBlock_End->gtNext = nullptr;
// Fix up all the pointers.
topBlock->bbTreeList = topBlock_Begin;
@@ -22200,7 +22392,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo)
inheritWeight = true; // The firstBB does inherit the weight from the iciBlock
for (block = InlineeCompiler->fgFirstBB;
- block != NULL;
+ block != nullptr;
block = block->bbNext)
{
noway_assert(!block->hasTryIndex());
@@ -22372,7 +22564,7 @@ GenTreePtr Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo)
// Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner.
// However the assetionProp logic will remove any unecessary null checks that we may have added
//
- GenTreePtr nullcheck = NULL;
+ GenTreePtr nullcheck = nullptr;
if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst)
{
@@ -22640,13 +22832,15 @@ Compiler::fgWalkResult Compiler::fgChkThrowCB(GenTreePtr* pTree,
case GT_ASG_ADD:
case GT_ASG_SUB:
case GT_CAST:
- if (tree->gtOverflow())
+ if (tree->gtOverflow()) {
return Compiler::WALK_ABORT;
+}
break;
case GT_INDEX:
- if (tree->gtFlags & GTF_INX_RNGCHK)
+ if (tree->gtFlags & GTF_INX_RNGCHK) {
return Compiler::WALK_ABORT;
+}
break;
case GT_ARR_BOUNDS_CHECK:
@@ -22666,8 +22860,9 @@ Compiler::fgWalkResult Compiler::fgChkLocAllocCB(GenTreePtr* pTree,
{
GenTreePtr tree = *pTree;
- if (tree->gtOper == GT_LCLHEAP)
+ if (tree->gtOper == GT_LCLHEAP) {
return Compiler::WALK_ABORT;
+}
return Compiler::WALK_CONTINUE;
}
@@ -22679,8 +22874,9 @@ Compiler::fgWalkResult Compiler::fgChkQmarkCB(GenTreePtr* pTree,
{
GenTreePtr tree = *pTree;
- if (tree->gtOper == GT_QMARK)
+ if (tree->gtOper == GT_QMARK) {
return Compiler::WALK_ABORT;
+}
return Compiler::WALK_CONTINUE;
}
diff --git a/src/jit/fp.h b/src/jit/fp.h
index eb8a79613d..f1cee9581a 100644
--- a/src/jit/fp.h
+++ b/src/jit/fp.h
@@ -11,62 +11,62 @@
enum dummyFPenum
{
-#define REGDEF(name, rnum, mask, sname) dummmy_##name = rnum,
+#define REGDEF(name, rnum, mask, sname) dummmy_##name = rnum,
#include "registerfp.h"
FP_VIRTUALREGISTERS,
};
// FlatFPStateX87 holds the state of the virtual register file. For each
-// virtual register we keep track to which physical register we're
+// virtual register we keep track to which physical register we're
// mapping. We also keep track of the physical stack.
#define FP_PHYSICREGISTERS FP_VIRTUALREGISTERS
-#define FP_VRNOTMAPPED -1
+#define FP_VRNOTMAPPED -1
struct FlatFPStateX87
{
-public:
- void Init (FlatFPStateX87* pFrom = 0);
- bool Mapped (unsigned uEntry); // Is virtual register mapped
- void Unmap (unsigned uEntry); // Unmaps a virtual register
- void Associate (unsigned uEntry, unsigned uStack);
- unsigned StackToST (unsigned uEntry); // Maps the stack to a ST(x) entry
- unsigned VirtualToST (unsigned uEntry);
- unsigned STToVirtual (unsigned uST);
- unsigned TopIndex ();
- unsigned TopVirtual ();
- void Rename (unsigned uVirtualTo, unsigned uVirtualFrom);
- unsigned Pop ();
- void Push (unsigned uEntry);
- bool IsEmpty ();
+public:
+ void Init(FlatFPStateX87* pFrom = 0);
+ bool Mapped(unsigned uEntry); // Is virtual register mapped
+ void Unmap(unsigned uEntry); // Unmaps a virtual register
+ void Associate(unsigned uEntry, unsigned uStack);
+ unsigned StackToST(unsigned uEntry); // Maps the stack to a ST(x) entry
+ unsigned VirtualToST(unsigned uEntry);
+ unsigned STToVirtual(unsigned uST);
+ unsigned TopIndex();
+ unsigned TopVirtual();
+ void Rename(unsigned uVirtualTo, unsigned uVirtualFrom);
+ unsigned Pop();
+ void Push(unsigned uEntry);
+ bool IsEmpty();
// Debug/test methods
- static bool AreEqual (FlatFPStateX87* pSrc, FlatFPStateX87* pDst);
+ static bool AreEqual(FlatFPStateX87* pSrc, FlatFPStateX87* pDst);
#ifdef DEBUG
- bool IsValidEntry (unsigned uEntry);
- bool IsConsistent ();
- void UpdateMappingFromStack ();
- void Dump ();
+ bool IsValidEntry(unsigned uEntry);
+ bool IsConsistent();
+ void UpdateMappingFromStack();
+ void Dump();
// In some optimizations the stack will be inconsistent in some transactions. We want to keep
// the checks for everthing else, so if have the stack in an inconsistent state, you must
// ignore it on purpose.
- bool m_bIgnoreConsistencyChecks;
+ bool m_bIgnoreConsistencyChecks;
- inline void IgnoreConsistencyChecks(bool bIgnore)
+ inline void IgnoreConsistencyChecks(bool bIgnore)
{
m_bIgnoreConsistencyChecks = bIgnore;
}
#else
- inline void IgnoreConsistencyChecks(bool bIgnore)
+ inline void IgnoreConsistencyChecks(bool bIgnore)
{
}
#endif
- unsigned m_uVirtualMap[FP_VIRTUALREGISTERS];
- unsigned m_uStack[FP_PHYSICREGISTERS];
- unsigned m_uStackSize;
+ unsigned m_uVirtualMap[FP_VIRTUALREGISTERS];
+ unsigned m_uStack[FP_PHYSICREGISTERS];
+ unsigned m_uStackSize;
};
#endif // FEATURE_STACK_FP_X87
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index 1f9065e2a6..c13754dcc7 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -17,7 +17,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef _MSC_VER
#pragma hdrstop
-#pragma warning(disable:4244) // loss of data int -> char ..
+#pragma warning(disable : 4244) // loss of data int -> char ..
#endif
@@ -45,23 +45,26 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
//
// find . -name regen.txt | xargs cat | grep CallSite | sort | uniq -c | sort -r | head -80
-
#if REGEN_SHORTCUTS || REGEN_CALLPAT
-static FILE* logFile = NULL;
+static FILE* logFile = NULL;
CRITICAL_SECTION logFileLock;
#endif
#if REGEN_CALLPAT
-static void regenLog(unsigned codeDelta, unsigned argMask,
- unsigned regMask, unsigned argCnt,
- unsigned byrefArgMask, unsigned byrefRegMask,
- BYTE* base, unsigned enSize)
+static void regenLog(unsigned codeDelta,
+ unsigned argMask,
+ unsigned regMask,
+ unsigned argCnt,
+ unsigned byrefArgMask,
+ unsigned byrefRegMask,
+ BYTE* base,
+ unsigned enSize)
{
CallPattern pat;
- pat.fld.argCnt = (argCnt < 0xff) ? argCnt : 0xff;
- pat.fld.regMask = (regMask < 0xff) ? regMask : 0xff;
- pat.fld.argMask = (argMask < 0xff) ? argMask : 0xff;
+ pat.fld.argCnt = (argCnt < 0xff) ? argCnt : 0xff;
+ pat.fld.regMask = (regMask < 0xff) ? regMask : 0xff;
+ pat.fld.argMask = (argMask < 0xff) ? argMask : 0xff;
pat.fld.codeDelta = (codeDelta < 0xff) ? codeDelta : 0xff;
if (logFile == NULL)
@@ -70,12 +73,11 @@ static void regenLog(unsigned codeDelta, unsigned argMask,
InitializeCriticalSection(&logFileLock);
}
- assert(((enSize>0) && (enSize<256)) && ((pat.val & 0xffffff) != 0xffffff));
+ assert(((enSize > 0) && (enSize < 256)) && ((pat.val & 0xffffff) != 0xffffff));
EnterCriticalSection(&logFileLock);
- fprintf(logFile, "CallSite( 0x%08x, 0x%02x%02x, 0x",
- pat.val, byrefArgMask, byrefRegMask);
+ fprintf(logFile, "CallSite( 0x%08x, 0x%02x%02x, 0x", pat.val, byrefArgMask, byrefRegMask);
while (enSize > 0)
{
@@ -101,35 +103,19 @@ static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state)
EnterCriticalSection(&logFileLock);
fprintf(logFile, "InfoHdr( %2d, %2d, %1d, %1d, %1d,"
- " %1d, %1d, %1d, %1d, %1d,"
- " %1d, %1d, %1d, %1d, %1d,"
- " %1d, %2d, %2d, %2d, %2d,"
- " %2d, %2d), \n",
- state->prologSize,
- state->epilogSize,
- state->epilogCount,
- state->epilogAtEnd,
- state->ediSaved,
- state->esiSaved,
- state->ebxSaved,
- state->ebpSaved,
- state->ebpFrame,
- state->interruptible,
- state->doubleAlign,
- state->security,
- state->handlers,
- state->localloc,
- state->editNcontinue,
- state->varargs,
- state->profCallbacks,
- state->argCount,
- state->frameSize,
- (state->untrackedCnt <= SET_UNTRACKED_MAX) ? state->untrackedCnt : HAS_UNTRACKED,
- (state->varPtrTableSize == 0) ? 0 : HAS_VARPTR,
- (state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET) ? 0 : HAS_GS_COOKIE_OFFSET,
- (state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
- (state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET
- );
+ " %1d, %1d, %1d, %1d, %1d,"
+ " %1d, %1d, %1d, %1d, %1d,"
+ " %1d, %2d, %2d, %2d, %2d,"
+ " %2d, %2d), \n",
+ state->prologSize, state->epilogSize, state->epilogCount, state->epilogAtEnd, state->ediSaved,
+ state->esiSaved, state->ebxSaved, state->ebpSaved, state->ebpFrame, state->interruptible,
+ state->doubleAlign, state->security, state->handlers, state->localloc, state->editNcontinue, state->varargs,
+ state->profCallbacks, state->argCount, state->frameSize,
+ (state->untrackedCnt <= SET_UNTRACKED_MAX) ? state->untrackedCnt : HAS_UNTRACKED,
+ (state->varPtrTableSize == 0) ? 0 : HAS_VARPTR,
+ (state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET) ? 0 : HAS_GS_COOKIE_OFFSET,
+ (state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET,
+ (state->syncStartOffset == INVALID_SYNC_OFFSET) ? 0 : HAS_SYNC_OFFSET);
fflush(logFile);
@@ -150,15 +136,14 @@ static void regenLog(unsigned encoding, InfoHdr* header, InfoHdr* state)
* rest of the delta plus the other three items are encoded in the
* second byte.
*/
-int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask,
- unsigned argMask, unsigned codeDelta)
+int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask, unsigned argMask, unsigned codeDelta)
{
if ((argCnt <= CP_MAX_ARG_CNT) && (argMask <= CP_MAX_ARG_MASK))
{
CallPattern pat;
pat.fld.argCnt = argCnt;
- pat.fld.regMask = regMask; // EBP,EBX,ESI,EDI
+ pat.fld.regMask = regMask; // EBP,EBX,ESI,EDI
pat.fld.argMask = argMask;
pat.fld.codeDelta = codeDelta;
@@ -168,10 +153,8 @@ int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask,
unsigned patval = pat.val;
assert(sizeof(CallPattern) == sizeof(unsigned));
- const unsigned * curp = &callPatternTable[0];
- for (unsigned inx = 0;
- inx < 80;
- inx++,curp++)
+ const unsigned* curp = &callPatternTable[0];
+ for (unsigned inx = 0; inx < 80; inx++, curp++)
{
unsigned curval = *curp;
if ((patval == curval) && codeDeltaOK)
@@ -182,8 +165,8 @@ int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask,
unsigned delta2 = codeDelta - (curval >> 24);
if (delta2 < bestDelta2)
{
- bestDelta2 = delta2;
- bestPattern = inx;
+ bestDelta2 = delta2;
+ bestPattern = inx;
}
}
}
@@ -196,10 +179,7 @@ int FASTCALL lookupCallPattern(unsigned argCnt, unsigned regMask,
return -1;
}
-
-
-static bool initNeeded3(unsigned cur, unsigned tgt,
- unsigned max, unsigned* hint)
+static bool initNeeded3(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
@@ -223,8 +203,7 @@ static bool initNeeded3(unsigned cur, unsigned tgt,
return true;
}
-static bool initNeeded4(unsigned cur, unsigned tgt,
- unsigned max, unsigned* hint)
+static bool initNeeded4(unsigned cur, unsigned tgt, unsigned max, unsigned* hint)
{
assert(cur != tgt);
@@ -296,25 +275,24 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (header.argCount <= SET_ARGCOUNT_MAX)
{
state->argCount = header.argCount;
- encoding = SET_ARGCOUNT + header.argCount;
+ encoding = SET_ARGCOUNT + header.argCount;
goto DO_RETURN;
}
else
{
unsigned hint;
- if (initNeeded4(state->argCount, header.argCount,
- SET_ARGCOUNT_MAX, &hint))
+ if (initNeeded4(state->argCount, header.argCount, SET_ARGCOUNT_MAX, &hint))
{
assert(hint <= SET_ARGCOUNT_MAX);
state->argCount = hint;
- encoding = SET_ARGCOUNT + hint;
+ encoding = SET_ARGCOUNT + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->argCount <<= 4;
- state->argCount += hint;
+ state->argCount += hint;
encoding = NEXT_FOUR_ARGCOUNT + hint;
goto DO_RETURN;
}
@@ -327,40 +305,38 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (header.frameSize <= SET_FRAMESIZE_MAX)
{
state->frameSize = header.frameSize;
- encoding = SET_FRAMESIZE + header.frameSize;
+ encoding = SET_FRAMESIZE + header.frameSize;
goto DO_RETURN;
}
else
{
unsigned hint;
- if (initNeeded4(state->frameSize, header.frameSize,
- SET_FRAMESIZE_MAX, &hint))
+ if (initNeeded4(state->frameSize, header.frameSize, SET_FRAMESIZE_MAX, &hint))
{
assert(hint <= SET_FRAMESIZE_MAX);
state->frameSize = hint;
- encoding = SET_FRAMESIZE + hint;
+ encoding = SET_FRAMESIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0xf);
state->frameSize <<= 4;
- state->frameSize += hint;
+ state->frameSize += hint;
encoding = NEXT_FOUR_FRAMESIZE + hint;
goto DO_RETURN;
}
}
}
- if ((state->epilogCount != header.epilogCount) ||
- (state->epilogAtEnd != header.epilogAtEnd))
+ if ((state->epilogCount != header.epilogCount) || (state->epilogAtEnd != header.epilogAtEnd))
{
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
state->epilogCount = header.epilogCount;
state->epilogAtEnd = header.epilogAtEnd;
- encoding = SET_EPILOGCNT + header.epilogCount*2;
+ encoding = SET_EPILOGCNT + header.epilogCount * 2;
if (header.epilogAtEnd)
encoding++;
goto DO_RETURN;
@@ -369,37 +345,36 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (state->varPtrTableSize != header.varPtrTableSize)
{
assert(state->varPtrTableSize == 0 || state->varPtrTableSize == HAS_VARPTR);
-
+
if (state->varPtrTableSize == 0)
{
state->varPtrTableSize = HAS_VARPTR;
- encoding = FLIP_VAR_PTR_TABLE_SZ;
+ encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
else if (header.varPtrTableSize == 0)
{
state->varPtrTableSize = 0;
- encoding = FLIP_VAR_PTR_TABLE_SZ;
+ encoding = FLIP_VAR_PTR_TABLE_SZ;
goto DO_RETURN;
}
}
-
+
if (state->untrackedCnt != header.untrackedCnt)
{
- assert(state->untrackedCnt <= SET_UNTRACKED_MAX ||
- state->untrackedCnt == HAS_UNTRACKED);
-
+ assert(state->untrackedCnt <= SET_UNTRACKED_MAX || state->untrackedCnt == HAS_UNTRACKED);
+
// We have one-byte encodings for 0..3
if (header.untrackedCnt <= SET_UNTRACKED_MAX)
{
state->untrackedCnt = header.untrackedCnt;
- encoding = SET_UNTRACKED + header.untrackedCnt;
+ encoding = SET_UNTRACKED + header.untrackedCnt;
goto DO_RETURN;
}
else if (state->untrackedCnt != HAS_UNTRACKED)
{
state->untrackedCnt = HAS_UNTRACKED;
- encoding = FFFF_UNTRACKED_CNT;
+ encoding = FFFF_UNTRACKED_CNT;
goto DO_RETURN;
}
}
@@ -410,25 +385,24 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (header.epilogSize <= SET_EPILOGSIZE_MAX)
{
state->epilogSize = header.epilogSize;
- encoding = SET_EPILOGSIZE + header.epilogSize;
+ encoding = SET_EPILOGSIZE + header.epilogSize;
goto DO_RETURN;
}
else
{
unsigned hint;
- if (initNeeded3(state->epilogSize, header.epilogSize,
- SET_EPILOGSIZE_MAX, &hint))
+ if (initNeeded3(state->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX, &hint))
{
assert(hint <= SET_EPILOGSIZE_MAX);
state->epilogSize = hint;
- encoding = SET_EPILOGSIZE + hint;
+ encoding = SET_EPILOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->epilogSize <<= 3;
- state->epilogSize += hint;
+ state->epilogSize += hint;
encoding = NEXT_THREE_EPILOGSIZE + hint;
goto DO_RETURN;
}
@@ -441,7 +415,7 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (header.prologSize <= SET_PROLOGSIZE_MAX)
{
state->prologSize = header.prologSize;
- encoding = SET_PROLOGSIZE + header.prologSize;
+ encoding = SET_PROLOGSIZE + header.prologSize;
goto DO_RETURN;
}
else
@@ -452,14 +426,14 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
{
assert(hint <= 15);
state->prologSize = hint;
- encoding = SET_PROLOGSIZE + hint;
+ encoding = SET_PROLOGSIZE + hint;
goto DO_RETURN;
}
else
{
assert(hint <= 0x7);
state->prologSize <<= 3;
- state->prologSize += hint;
+ state->prologSize += hint;
encoding = NEXT_THREE_PROLOGSIZE + hint;
goto DO_RETURN;
}
@@ -469,42 +443,42 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (state->ediSaved != header.ediSaved)
{
state->ediSaved = header.ediSaved;
- encoding = FLIP_EDI_SAVED;
+ encoding = FLIP_EDI_SAVED;
goto DO_RETURN;
}
if (state->esiSaved != header.esiSaved)
{
state->esiSaved = header.esiSaved;
- encoding = FLIP_ESI_SAVED;
+ encoding = FLIP_ESI_SAVED;
goto DO_RETURN;
}
if (state->ebxSaved != header.ebxSaved)
{
state->ebxSaved = header.ebxSaved;
- encoding = FLIP_EBX_SAVED;
+ encoding = FLIP_EBX_SAVED;
goto DO_RETURN;
}
if (state->ebpSaved != header.ebpSaved)
{
state->ebpSaved = header.ebpSaved;
- encoding = FLIP_EBP_SAVED;
+ encoding = FLIP_EBP_SAVED;
goto DO_RETURN;
}
if (state->ebpFrame != header.ebpFrame)
{
state->ebpFrame = header.ebpFrame;
- encoding = FLIP_EBP_FRAME;
+ encoding = FLIP_EBP_FRAME;
goto DO_RETURN;
}
if (state->interruptible != header.interruptible)
{
state->interruptible = header.interruptible;
- encoding = FLIP_INTERRUPTIBLE;
+ encoding = FLIP_INTERRUPTIBLE;
goto DO_RETURN;
}
@@ -512,7 +486,7 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (state->doubleAlign != header.doubleAlign)
{
state->doubleAlign = header.doubleAlign;
- encoding = FLIP_DOUBLE_ALIGN;
+ encoding = FLIP_DOUBLE_ALIGN;
goto DO_RETURN;
}
#endif
@@ -520,97 +494,95 @@ BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state)
if (state->security != header.security)
{
state->security = header.security;
- encoding = FLIP_SECURITY;
+ encoding = FLIP_SECURITY;
goto DO_RETURN;
}
if (state->handlers != header.handlers)
{
state->handlers = header.handlers;
- encoding = FLIP_HANDLERS;
+ encoding = FLIP_HANDLERS;
goto DO_RETURN;
}
if (state->localloc != header.localloc)
{
state->localloc = header.localloc;
- encoding = FLIP_LOCALLOC;
+ encoding = FLIP_LOCALLOC;
goto DO_RETURN;
}
if (state->editNcontinue != header.editNcontinue)
{
state->editNcontinue = header.editNcontinue;
- encoding = FLIP_EDITnCONTINUE;
+ encoding = FLIP_EDITnCONTINUE;
goto DO_RETURN;
}
if (state->varargs != header.varargs)
{
state->varargs = header.varargs;
- encoding = FLIP_VARARGS;
+ encoding = FLIP_VARARGS;
goto DO_RETURN;
}
if (state->profCallbacks != header.profCallbacks)
{
state->profCallbacks = header.profCallbacks;
- encoding = FLIP_PROF_CALLBACKS;
+ encoding = FLIP_PROF_CALLBACKS;
goto DO_RETURN;
}
if (state->genericsContext != header.genericsContext)
{
state->genericsContext = header.genericsContext;
- encoding = FLIP_HAS_GENERICS_CONTEXT;
+ encoding = FLIP_HAS_GENERICS_CONTEXT;
goto DO_RETURN;
}
if (state->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
state->genericsContextIsMethodDesc = header.genericsContextIsMethodDesc;
- encoding = FLIP_GENERICS_CONTEXT_IS_METHODDESC;
+ encoding = FLIP_GENERICS_CONTEXT_IS_METHODDESC;
goto DO_RETURN;
}
if (state->gsCookieOffset != header.gsCookieOffset)
{
- assert(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET ||
- state->gsCookieOffset == HAS_GS_COOKIE_OFFSET);
-
+ assert(state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET || state->gsCookieOffset == HAS_GS_COOKIE_OFFSET);
+
if (state->gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
- // header.gsCookieOffset is non-zero. We can set it
+ // header.gsCookieOffset is non-zero. We can set it
// to zero using FLIP_HAS_GS_COOKIE
state->gsCookieOffset = HAS_GS_COOKIE_OFFSET;
- encoding = FLIP_HAS_GS_COOKIE;
+ encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
else if (header.gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
{
state->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
- encoding = FLIP_HAS_GS_COOKIE;
+ encoding = FLIP_HAS_GS_COOKIE;
goto DO_RETURN;
}
}
if (state->syncStartOffset != header.syncStartOffset)
{
- assert(state->syncStartOffset == INVALID_SYNC_OFFSET ||
- state->syncStartOffset == HAS_SYNC_OFFSET);
-
+ assert(state->syncStartOffset == INVALID_SYNC_OFFSET || state->syncStartOffset == HAS_SYNC_OFFSET);
+
if (state->syncStartOffset == INVALID_SYNC_OFFSET)
{
- // header.syncStartOffset is non-zero. We can set it
+ // header.syncStartOffset is non-zero. We can set it
// to zero using FLIP_SYNC
state->syncStartOffset = HAS_SYNC_OFFSET;
- encoding = FLIP_SYNC;
+ encoding = FLIP_SYNC;
goto DO_RETURN;
}
else if (header.syncStartOffset == INVALID_SYNC_OFFSET)
{
state->syncStartOffset = INVALID_SYNC_OFFSET;
- encoding = FLIP_SYNC;
+ encoding = FLIP_SYNC;
goto DO_RETURN;
}
}
@@ -622,7 +594,7 @@ DO_RETURN:
return encoding;
}
-static int measureDistance(const InfoHdr& header, const InfoHdrSmall * p, int closeness)
+static int measureDistance(const InfoHdr& header, const InfoHdrSmall* p, int closeness)
{
int distance = 0;
@@ -637,7 +609,8 @@ static int measureDistance(const InfoHdr& header, const InfoHdrSmall * p, int cl
{
distance += 1;
}
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->varPtrTableSize != header.varPtrTableSize)
@@ -652,69 +625,75 @@ static int measureDistance(const InfoHdr& header, const InfoHdrSmall * p, int cl
assert(p->varPtrTableSize == HAS_VARPTR);
distance += 1;
}
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->frameSize != header.frameSize)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
// We have one-byte encodings for 0..7
if (header.frameSize > SET_FRAMESIZE_MAX)
{
- distance += bigEncoding4(p->frameSize, header.frameSize,
- SET_FRAMESIZE_MAX);
- if (distance >= closeness) return distance;
+ distance += bigEncoding4(p->frameSize, header.frameSize, SET_FRAMESIZE_MAX);
+ if (distance >= closeness)
+ return distance;
}
}
if (p->argCount != header.argCount)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
// We have one-byte encodings for 0..8
if (header.argCount > SET_ARGCOUNT_MAX)
{
- distance += bigEncoding4(p->argCount, header.argCount,
- SET_ARGCOUNT_MAX);
- if (distance >= closeness) return distance;
+ distance += bigEncoding4(p->argCount, header.argCount, SET_ARGCOUNT_MAX);
+ if (distance >= closeness)
+ return distance;
}
}
if (p->prologSize != header.prologSize)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
// We have one-byte encodings for 0..16
if (header.prologSize > SET_PROLOGSIZE_MAX)
{
assert(SET_PROLOGSIZE_MAX > 15);
distance += bigEncoding3(p->prologSize, header.prologSize, 15);
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
}
if (p->epilogSize != header.epilogSize)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
// We have one-byte encodings for 0..10
if (header.epilogSize > SET_EPILOGSIZE_MAX)
{
- distance += bigEncoding3(p->epilogSize, header.epilogSize,
- SET_EPILOGSIZE_MAX);
- if (distance >= closeness) return distance;
+ distance += bigEncoding3(p->epilogSize, header.epilogSize, SET_EPILOGSIZE_MAX);
+ if (distance >= closeness)
+ return distance;
}
}
- if ((p->epilogCount != header.epilogCount) ||
- (p->epilogAtEnd != header.epilogAtEnd))
+ if ((p->epilogCount != header.epilogCount) || (p->epilogAtEnd != header.epilogAtEnd))
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
if (header.epilogCount > SET_EPILOGCNT_MAX)
IMPL_LIMITATION("More than SET_EPILOGCNT_MAX epilogs");
@@ -723,119 +702,136 @@ static int measureDistance(const InfoHdr& header, const InfoHdrSmall * p, int cl
if (p->ediSaved != header.ediSaved)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->esiSaved != header.esiSaved)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->ebxSaved != header.ebxSaved)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->ebpSaved != header.ebpSaved)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->ebpFrame != header.ebpFrame)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->interruptible != header.interruptible)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
#if DOUBLE_ALIGN
if (p->doubleAlign != header.doubleAlign)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
#endif
if (p->security != header.security)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->handlers != header.handlers)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->localloc != header.localloc)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->editNcontinue != header.editNcontinue)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->varargs != header.varargs)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->profCallbacks != header.profCallbacks)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->genericsContext != header.genericsContext)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (p->genericsContextIsMethodDesc != header.genericsContextIsMethodDesc)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (header.gsCookieOffset != INVALID_GS_COOKIE_OFFSET)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
if (header.syncStartOffset != INVALID_SYNC_OFFSET)
{
distance += 1;
- if (distance >= closeness) return distance;
+ if (distance >= closeness)
+ return distance;
}
return distance;
}
// DllMain calls gcInitEncoderLookupTable to fill in this table
-/* extern */ int infoHdrLookup[IH_MAX_PROLOG_SIZE+2];
+/* extern */ int infoHdrLookup[IH_MAX_PROLOG_SIZE + 2];
/* static */ void GCInfo::gcInitEncoderLookupTable()
{
- const InfoHdrSmall * p = &infoHdrShortcut[0];
- int lo = -1;
- int hi = 0;
- int n;
+ const InfoHdrSmall* p = &infoHdrShortcut[0];
+ int lo = -1;
+ int hi = 0;
+ int n;
for (n = 0; n < 128; n++, p++)
{
@@ -881,19 +877,22 @@ static int measureDistance(const InfoHdr& header, const InfoHdrSmall * p, int cl
assert(callCommonDelta[2] < callCommonDelta[3]);
assert(sizeof(CallPattern) == sizeof(unsigned));
unsigned maxMarks = 0;
- for (unsigned inx=0; inx < 80; inx++)
+ for (unsigned inx = 0; inx < 80; inx++)
{
CallPattern pat;
pat.val = callPatternTable[inx];
assert(pat.fld.codeDelta <= CP_MAX_CODE_DELTA);
- if (pat.fld.codeDelta == CP_MAX_CODE_DELTA) maxMarks |= 0x01;
+ if (pat.fld.codeDelta == CP_MAX_CODE_DELTA)
+ maxMarks |= 0x01;
- assert(pat.fld.argCnt <= CP_MAX_ARG_CNT);
- if (pat.fld.argCnt == CP_MAX_ARG_CNT) maxMarks |= 0x02;
+ assert(pat.fld.argCnt <= CP_MAX_ARG_CNT);
+ if (pat.fld.argCnt == CP_MAX_ARG_CNT)
+ maxMarks |= 0x02;
- assert(pat.fld.argMask <= CP_MAX_ARG_MASK);
- if (pat.fld.argMask == CP_MAX_ARG_MASK) maxMarks |= 0x04;
+ assert(pat.fld.argMask <= CP_MAX_ARG_MASK);
+ if (pat.fld.argMask == CP_MAX_ARG_MASK)
+ maxMarks |= 0x04;
}
assert(maxMarks == 0x07);
#endif
@@ -905,8 +904,8 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
{
// First try the cached value for an exact match, if there is one
//
- int n = *pCached;
- const InfoHdrSmall * p;
+ int n = *pCached;
+ const InfoHdrSmall* p;
if (n != NO_CACHED_HEADER)
{
@@ -915,7 +914,7 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
{
// exact match found
GetInfoHdr(n, state);
- *more = 0;
+ *more = 0;
return n;
}
}
@@ -926,23 +925,23 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
// range of entries that have the correct prolog size
//
unsigned psz = header.prologSize;
- int lo = 0;
- int hi = 0;
+ int lo = 0;
+ int hi = 0;
if (psz <= IH_MAX_PROLOG_SIZE)
{
lo = infoHdrLookup[psz];
- hi = infoHdrLookup[psz+1];
+ hi = infoHdrLookup[psz + 1];
p = &infoHdrShortcut[lo];
- for (n=lo; n<hi; n++,p++)
+ for (n = lo; n < hi; n++, p++)
{
assert(psz == p->prologSize);
if (p->isHeaderMatch(header))
{
// exact match found
GetInfoHdr(n, state);
- *pCached = n; // cache the value
- *more = 0;
+ *pCached = n; // cache the value
+ *more = 0;
return n;
}
}
@@ -954,7 +953,7 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
// find the nearest entry in the table
//
int nearest = -1;
- int closeness = 255; // (i.e. not very close)
+ int closeness = 255; // (i.e. not very close)
//
// Calculate the minimum acceptable distance
@@ -981,13 +980,13 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
//
if (*pCached != NO_CACHED_HEADER)
{
- p = &infoHdrShortcut[*pCached];
+ p = &infoHdrShortcut[*pCached];
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(*pCached, state);
- *more = distance;
+ *more = distance;
return 0x80 | *pCached;
}
else
@@ -1001,17 +1000,17 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
// (i.e. the ones that have the correct prolog size)
//
p = &infoHdrShortcut[lo];
- for (n=lo; n<hi; n++,p++)
+ for (n = lo; n < hi; n++, p++)
{
if (n == *pCached)
- continue; // already tried this one
+ continue; // already tried this one
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
- *pCached = n; // Cache this value
- *more = distance;
+ *pCached = n; // Cache this value
+ *more = distance;
return 0x80 | n;
}
else if (distance < closeness)
@@ -1021,30 +1020,30 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
}
}
- int last = infoHdrLookup[IH_MAX_PROLOG_SIZE+1];
+ int last = infoHdrLookup[IH_MAX_PROLOG_SIZE + 1];
assert(last <= 128);
// Then try all the rest [0..last-1]
p = &infoHdrShortcut[0];
- for (n=0; n<last; n++,p++)
+ for (n = 0; n < last; n++, p++)
{
if (n == *pCached)
- continue; // already tried this one
- if ((n>=lo) && (n<hi))
- continue; // already tried these
+ continue; // already tried this one
+ if ((n >= lo) && (n < hi))
+ continue; // already tried these
int distance = measureDistance(header, p, closeness);
assert(distance > 0);
if (distance <= min_acceptable_distance)
{
GetInfoHdr(n, state);
- *pCached = n; // Cache this value
- *more = distance;
+ *pCached = n; // Cache this value
+ *more = distance;
return 0x80 | n;
}
else if (distance < closeness)
{
closeness = distance;
- nearest = n;
+ nearest = n;
}
}
@@ -1055,8 +1054,8 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
//
assert((nearest >= 0) && (nearest <= 127));
GetInfoHdr(nearest, state);
- *pCached = nearest; // Cache this value
- *more = closeness;
+ *pCached = nearest; // Cache this value
+ *more = closeness;
return 0x80 | nearest;
}
@@ -1067,33 +1066,32 @@ BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more
* to actually generate the contents of the table (mask=-1,dest!=NULL).
*/
-size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
- int mask,
- unsigned methodSize,
- unsigned prologSize,
- unsigned epilogSize,
- InfoHdr* header,
- int* pCached)
+size_t GCInfo::gcInfoBlockHdrSave(
+ BYTE* dest, int mask, unsigned methodSize, unsigned prologSize, unsigned epilogSize, InfoHdr* header, int* pCached)
{
#ifdef DEBUG
- if (compiler->verbose)
+ if (compiler->verbose)
printf("*************** In gcInfoBlockHdrSave()\n");
#endif
- size_t size = 0;
+ size_t size = 0;
#if VERIFY_GC_TABLES
- *castto(dest, unsigned short *)++ = 0xFEEF; size += sizeof(short);
+ *castto(dest, unsigned short*)++ = 0xFEEF;
+ size += sizeof(short);
#endif
/* Write the method size first (using between 1 and 5 bytes) */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
- if (mask) printf("GCINFO: methodSize = %04X\n", methodSize);
- if (mask) printf("GCINFO: prologSize = %04X\n", prologSize);
- if (mask) printf("GCINFO: epilogSize = %04X\n", epilogSize);
+ if (mask)
+ printf("GCINFO: methodSize = %04X\n", methodSize);
+ if (mask)
+ printf("GCINFO: prologSize = %04X\n", prologSize);
+ if (mask)
+ printf("GCINFO: epilogSize = %04X\n", epilogSize);
}
#endif
@@ -1107,14 +1105,14 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
// Typically only uses one-byte to store everything.
//
- if (mask==0)
+ if (mask == 0)
{
memset(header, 0, sizeof(InfoHdr));
*pCached = NO_CACHED_HEADER;
}
assert(FitsIn<unsigned char>(prologSize));
- header->prologSize = static_cast<unsigned char>(prologSize);
+ header->prologSize = static_cast<unsigned char>(prologSize);
assert(FitsIn<unsigned char>(epilogSize));
header->epilogSize = static_cast<unsigned char>(epilogSize);
header->epilogCount = compiler->getEmitter()->emitGetEpilogCnt();
@@ -1122,22 +1120,19 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
IMPL_LIMITATION("emitGetEpilogCnt() does not fit in InfoHdr::epilogCount");
header->epilogAtEnd = compiler->getEmitter()->emitHasEpilogEnd();
-
if (compiler->codeGen->regSet.rsRegsModified(RBM_EDI))
header->ediSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_ESI))
- header->esiSaved = 1;
+ header->esiSaved = 1;
if (compiler->codeGen->regSet.rsRegsModified(RBM_EBX))
header->ebxSaved = 1;
-
header->interruptible = compiler->codeGen->genInterruptible;
-
- if (!compiler->isFramePointerUsed())
+ if (!compiler->isFramePointerUsed())
{
#if DOUBLE_ALIGN
- if (compiler->genDoubleAlign())
+ if (compiler->genDoubleAlign())
{
header->ebpSaved = true;
assert(!compiler->codeGen->regSet.rsRegsModified(RBM_EBP));
@@ -1154,7 +1149,6 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
header->ebpFrame = true;
}
-
#if DOUBLE_ALIGN
header->doubleAlign = compiler->genDoubleAlign();
#endif
@@ -1164,22 +1158,23 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
header->handlers = compiler->ehHasCallableHandlers();
header->localloc = compiler->compLocallocUsed;
- header->varargs = compiler->info.compIsVarArgs;
- header->profCallbacks = compiler->info.compProfilerCallback;
- header->editNcontinue = compiler->opts.compDbgEnC;
+ header->varargs = compiler->info.compIsVarArgs;
+ header->profCallbacks = compiler->info.compProfilerCallback;
+ header->editNcontinue = compiler->opts.compDbgEnC;
header->genericsContext = compiler->lvaReportParamTypeArg();
- header->genericsContextIsMethodDesc = header->genericsContext && (compiler->info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC));
+ header->genericsContextIsMethodDesc =
+ header->genericsContext && (compiler->info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC));
header->gsCookieOffset = INVALID_GS_COOKIE_OFFSET;
if (compiler->getNeedsGSSecurityCookie())
{
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
- int stkOffs = compiler->lvaTable[compiler->lvaGSSecurityCookie].lvStkOffs;
+ int stkOffs = compiler->lvaTable[compiler->lvaGSSecurityCookie].lvStkOffs;
header->gsCookieOffset = compiler->isFramePointerUsed() ? -stkOffs : stkOffs;
assert(header->gsCookieOffset != INVALID_GS_COOKIE_OFFSET);
}
header->syncStartOffset = INVALID_SYNC_OFFSET;
- header->syncEndOffset = INVALID_SYNC_OFFSET;
+ header->syncEndOffset = INVALID_SYNC_OFFSET;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
assert(compiler->syncStartEmitCookie != NULL);
@@ -1197,17 +1192,19 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
assert((compiler->compArgSize & 0x3) == 0);
- size_t argCount = (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * sizeof(void *))) / sizeof(void*);
+ size_t argCount =
+ (compiler->compArgSize - (compiler->codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*))) / sizeof(void*);
assert(argCount <= MAX_USHORT_SIZE_T);
- header->argCount = static_cast<unsigned short>(argCount);
+ header->argCount = static_cast<unsigned short>(argCount);
header->frameSize = compiler->compLclFrameSize / sizeof(int);
if (header->frameSize != (compiler->compLclFrameSize / sizeof(int)))
IMPL_LIMITATION("compLclFrameSize does not fit in InfoHdr::frameSize");
- if (mask==0)
+ if (mask == 0)
{
- gcCountForHeader((UNALIGNED unsigned int*)&header->untrackedCnt, (UNALIGNED unsigned int*)&header->varPtrTableSize);
+ gcCountForHeader((UNALIGNED unsigned int*)&header->untrackedCnt,
+ (UNALIGNED unsigned int*)&header->varPtrTableSize);
}
//
@@ -1216,8 +1213,8 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
// until the fully state is encoded
//
InfoHdr state;
- int more = 0;
- BYTE headerEncoding = encodeHeaderFirst(*header, &state, &more, pCached);
+ int more = 0;
+ BYTE headerEncoding = encodeHeaderFirst(*header, &state, &more, pCached);
++size;
if (mask)
{
@@ -1245,7 +1242,7 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
if (header->untrackedCnt > SET_UNTRACKED_MAX)
{
unsigned count = header->untrackedCnt;
- unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
+ unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
@@ -1253,7 +1250,7 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
if (header->varPtrTableSize != 0)
{
unsigned count = header->varPtrTableSize;
- unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
+ unsigned sz = encodeUnsigned(mask ? dest : NULL, count);
size += sz;
dest += (sz & mask);
}
@@ -1262,7 +1259,7 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
{
assert(mask == 0 || state.gsCookieOffset == HAS_GS_COOKIE_OFFSET);
unsigned offset = header->gsCookieOffset;
- unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
+ unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
@@ -1273,29 +1270,28 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
{
unsigned offset = header->syncStartOffset;
- unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
+ unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
{
unsigned offset = header->syncEndOffset;
- unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
+ unsigned sz = encodeUnsigned(mask ? dest : NULL, offset);
size += sz;
dest += (sz & mask);
}
}
-
if (header->epilogCount)
{
/* Generate table unless one epilog at the end of the method */
- if (header->epilogAtEnd == 0 ||
- header->epilogCount != 1)
+ if (header->epilogAtEnd == 0 || header->epilogCount != 1)
{
#if VERIFY_GC_TABLES
- *castto(dest, unsigned short *)++ = 0xFACE; size += sizeof(short);
+ *castto(dest, unsigned short*)++ = 0xFACE;
+ size += sizeof(short);
#endif
/* Simply write a sorted array of offsets using encodeUDelta */
@@ -1328,7 +1324,7 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
#endif // DISPLAY_SIZES
- return size;
+ return size;
}
/*****************************************************************************
@@ -1336,9 +1332,9 @@ size_t GCInfo::gcInfoBlockHdrSave(BYTE* dest,
* Return the size of the pointer tracking tables.
*/
-size_t GCInfo::gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
+size_t GCInfo::gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
- BYTE temp[16+1];
+ BYTE temp[16 + 1];
#ifdef DEBUG
temp[16] = 0xAB; // Set some marker
#endif
@@ -1356,13 +1352,16 @@ size_t GCInfo::gcPtrTableSize(const InfoHdr& header, unsigned codeS
* Encode the callee-saved registers into 3 bits.
*/
-unsigned gceEncodeCalleeSavedRegs(unsigned regs)
+unsigned gceEncodeCalleeSavedRegs(unsigned regs)
{
- unsigned encodedRegs = 0;
+ unsigned encodedRegs = 0;
- if (regs & RBM_EBX) encodedRegs |= 0x04;
- if (regs & RBM_ESI) encodedRegs |= 0x02;
- if (regs & RBM_EDI) encodedRegs |= 0x01;
+ if (regs & RBM_EBX)
+ encodedRegs |= 0x04;
+ if (regs & RBM_ESI)
+ encodedRegs |= 0x02;
+ if (regs & RBM_EDI)
+ encodedRegs |= 0x01;
return encodedRegs;
}
@@ -1372,11 +1371,10 @@ unsigned gceEncodeCalleeSavedRegs(unsigned regs)
* interruptible encoding. Check only for pushes and registers
*/
-inline
-BYTE * gceByrefPrefixI(GCInfo::regPtrDsc * rpd, BYTE * dest)
+inline BYTE* gceByrefPrefixI(GCInfo::regPtrDsc* rpd, BYTE* dest)
{
// For registers, we don't need a prefix if it is going dead.
- assert(rpd->rpdArg || rpd->rpdCompiler.rpdDel==0);
+ assert(rpd->rpdArg || rpd->rpdCompiler.rpdDel == 0);
if (!rpd->rpdArg || rpd->rpdArgType == GCInfo::rpdARG_PUSH)
if (rpd->rpdGCtypeGet() == GCT_BYREF)
@@ -1389,80 +1387,94 @@ BYTE * gceByrefPrefixI(GCInfo::regPtrDsc * rpd, BYTE * dest)
/* These functions are needed to work around a VC5.0 compiler bug */
/* DO NOT REMOVE, unless you are sure that the free build works */
-static int zeroFN() { return 0; }
-static int (*zeroFunc)() = zeroFN;
+static int zeroFN()
+{
+ return 0;
+}
+static int (*zeroFunc)() = zeroFN;
/*****************************************************************************
* Modelling of the GC ptrs pushed on the stack
*/
-typedef unsigned pasMaskType;
-#define BITS_IN_pasMask (BITS_IN_BYTE * sizeof(pasMaskType))
-#define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask-1))
+typedef unsigned pasMaskType;
+#define BITS_IN_pasMask (BITS_IN_BYTE * sizeof(pasMaskType))
+#define HIGHEST_pasMask_BIT (((pasMaskType)0x1) << (BITS_IN_pasMask - 1))
//-----------------------------------------------------------------------------
-class PendingArgsStack
+class PendingArgsStack
{
public:
+ PendingArgsStack(unsigned maxDepth, Compiler* pComp);
- PendingArgsStack (unsigned maxDepth, Compiler * pComp);
-
- void pasPush (GCtype gcType);
- void pasPop (unsigned count);
- void pasKill (unsigned gcCount);
+ void pasPush(GCtype gcType);
+ void pasPop(unsigned count);
+ void pasKill(unsigned gcCount);
- unsigned pasCurDepth () { return pasDepth; }
- pasMaskType pasArgMask () { assert(pasDepth <= BITS_IN_pasMask); return pasBottomMask; }
- pasMaskType pasByrefArgMask () { assert(pasDepth <= BITS_IN_pasMask); return pasByrefBottomMask; }
- bool pasHasGCptrs ();
+ unsigned pasCurDepth()
+ {
+ return pasDepth;
+ }
+ pasMaskType pasArgMask()
+ {
+ assert(pasDepth <= BITS_IN_pasMask);
+ return pasBottomMask;
+ }
+ pasMaskType pasByrefArgMask()
+ {
+ assert(pasDepth <= BITS_IN_pasMask);
+ return pasByrefBottomMask;
+ }
+ bool pasHasGCptrs();
// Use these in the case where there actually are more ptrs than pasArgMask
- unsigned pasEnumGCoffsCount();
- #define pasENUM_START ((unsigned)-1)
- #define pasENUM_LAST ((unsigned)-2)
- #define pasENUM_END ((unsigned)-3)
- unsigned pasEnumGCoffs (unsigned iter, unsigned * offs);
+ unsigned pasEnumGCoffsCount();
+#define pasENUM_START ((unsigned)-1)
+#define pasENUM_LAST ((unsigned)-2)
+#define pasENUM_END ((unsigned)-3)
+ unsigned pasEnumGCoffs(unsigned iter, unsigned* offs);
protected:
+ unsigned pasMaxDepth;
- unsigned pasMaxDepth;
-
- unsigned pasDepth;
+ unsigned pasDepth;
pasMaskType pasBottomMask; // The first 32 args
pasMaskType pasByrefBottomMask; // byref qualifier for pasBottomMask
- BYTE * pasTopArray; // More than 32 args are represented here
- unsigned pasPtrsInTopArray; // How many GCptrs here
+ BYTE* pasTopArray; // More than 32 args are represented here
+ unsigned pasPtrsInTopArray; // How many GCptrs here
};
-
//-----------------------------------------------------------------------------
-PendingArgsStack::PendingArgsStack(unsigned maxDepth, Compiler * pComp) :
- pasMaxDepth(maxDepth), pasDepth(0),
- pasBottomMask(0), pasByrefBottomMask(0),
- pasTopArray(NULL), pasPtrsInTopArray(0)
+PendingArgsStack::PendingArgsStack(unsigned maxDepth, Compiler* pComp)
+ : pasMaxDepth(maxDepth)
+ , pasDepth(0)
+ , pasBottomMask(0)
+ , pasByrefBottomMask(0)
+ , pasTopArray(NULL)
+ , pasPtrsInTopArray(0)
{
/* Do we need an array as well as the mask ? */
if (pasMaxDepth > BITS_IN_pasMask)
- pasTopArray = (BYTE *)pComp->compGetMemA(pasMaxDepth - BITS_IN_pasMask);
+ pasTopArray = (BYTE*)pComp->compGetMemA(pasMaxDepth - BITS_IN_pasMask);
}
//-----------------------------------------------------------------------------
-void PendingArgsStack::pasPush(GCtype gcType)
+void PendingArgsStack::pasPush(GCtype gcType)
{
assert(pasDepth < pasMaxDepth);
- if (pasDepth < BITS_IN_pasMask)
+ if (pasDepth < BITS_IN_pasMask)
{
/* Shift the mask */
- pasBottomMask <<= 1;
- pasByrefBottomMask <<= 1;
+ pasBottomMask <<= 1;
+ pasByrefBottomMask <<= 1;
if (needsGC(gcType))
{
@@ -1487,22 +1499,23 @@ void PendingArgsStack::pasPush(GCtype gcType)
//-----------------------------------------------------------------------------
-void PendingArgsStack::pasPop(unsigned count)
+void PendingArgsStack::pasPop(unsigned count)
{
assert(pasDepth >= count);
/* First pop from array (if applicable) */
- for (/**/; (pasDepth > BITS_IN_pasMask) && count; pasDepth--,count--)
+ for (/**/; (pasDepth > BITS_IN_pasMask) && count; pasDepth--, count--)
{
- unsigned topIndex = pasDepth - BITS_IN_pasMask - 1;
+ unsigned topIndex = pasDepth - BITS_IN_pasMask - 1;
- GCtype topArg = (GCtype)pasTopArray[topIndex];
+ GCtype topArg = (GCtype)pasTopArray[topIndex];
if (needsGC(topArg))
pasPtrsInTopArray--;
}
- if (count == 0) return;
+ if (count == 0)
+ return;
/* Now un-shift the mask */
@@ -1511,13 +1524,12 @@ void PendingArgsStack::pasPop(unsigned count)
if (count == BITS_IN_pasMask) // (x>>32) is a nop on x86. So special-case it
{
- pasBottomMask =
- pasByrefBottomMask = 0;
- pasDepth = 0;
+ pasBottomMask = pasByrefBottomMask = 0;
+ pasDepth = 0;
}
else
{
- pasBottomMask >>= count;
+ pasBottomMask >>= count;
pasByrefBottomMask >>= count;
pasDepth -= count;
}
@@ -1526,7 +1538,7 @@ void PendingArgsStack::pasPop(unsigned count)
//-----------------------------------------------------------------------------
// Kill (but don't pop) the top 'gcCount' args
-void PendingArgsStack::pasKill(unsigned gcCount)
+void PendingArgsStack::pasKill(unsigned gcCount)
{
assert(gcCount != 0);
@@ -1534,9 +1546,9 @@ void PendingArgsStack::pasKill(unsigned gcCount)
for (unsigned curPos = pasDepth; (curPos > BITS_IN_pasMask) && gcCount; curPos--)
{
- unsigned curIndex = curPos - BITS_IN_pasMask - 1;
+ unsigned curIndex = curPos - BITS_IN_pasMask - 1;
- GCtype curArg = (GCtype)pasTopArray[curIndex];
+ GCtype curArg = (GCtype)pasTopArray[curIndex];
if (needsGC(curArg))
{
@@ -1551,7 +1563,7 @@ void PendingArgsStack::pasKill(unsigned gcCount)
assert(pasPtrsInTopArray == 0);
assert(gcCount <= BITS_IN_pasMask);
- for (unsigned bitPos = 1; gcCount; bitPos<<=1)
+ for (unsigned bitPos = 1; gcCount; bitPos <<= 1)
{
assert(pasBottomMask != 0);
@@ -1572,7 +1584,7 @@ void PendingArgsStack::pasKill(unsigned gcCount)
// Used for the case where there are more than BITS_IN_pasMask args on stack,
// but none are any pointers. May avoid reporting anything to GCinfo
-bool PendingArgsStack::pasHasGCptrs()
+bool PendingArgsStack::pasHasGCptrs()
{
if (pasDepth <= BITS_IN_pasMask)
return pasBottomMask != 0;
@@ -1584,7 +1596,7 @@ bool PendingArgsStack::pasHasGCptrs()
// Iterates over mask and array to return total count.
// Use only when you are going to emit a table of the offsets
-unsigned PendingArgsStack::pasEnumGCoffsCount()
+unsigned PendingArgsStack::pasEnumGCoffsCount()
{
/* Should only be used in the worst case, when just the mask can't be used */
@@ -1594,7 +1606,7 @@ unsigned PendingArgsStack::pasEnumGCoffsCount()
unsigned count = 0;
- for (pasMaskType mask = 0x1, i = 0; i < BITS_IN_pasMask; mask<<=1, i++)
+ for (pasMaskType mask = 0x1, i = 0; i < BITS_IN_pasMask; mask <<= 1, i++)
{
if (mask & pasBottomMask)
count++;
@@ -1609,21 +1621,22 @@ unsigned PendingArgsStack::pasEnumGCoffsCount()
// End of enumeration when pasENUM_END is returned
// If return value != pasENUM_END, *offs is set to the offset for GCinfo
-unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned * offs)
+unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned* offs)
{
- if (iter == pasENUM_LAST) return pasENUM_END;
+ if (iter == pasENUM_LAST)
+ return pasENUM_END;
unsigned i = (iter == pasENUM_START) ? pasDepth : iter;
for (/**/; i > BITS_IN_pasMask; i--)
{
- GCtype curArg = (GCtype)pasTopArray[i-BITS_IN_pasMask-1];
+ GCtype curArg = (GCtype)pasTopArray[i - BITS_IN_pasMask - 1];
if (needsGC(curArg))
{
- unsigned offset;
+ unsigned offset;
- offset = (pasDepth - i) * sizeof(void*);
- if (curArg==GCT_BYREF)
+ offset = (pasDepth - i) * sizeof(void*);
+ if (curArg == GCT_BYREF)
offset |= byref_OFFSET_FLAG;
*offs = offset;
@@ -1631,28 +1644,29 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned * offs)
}
}
- if (!pasBottomMask) return pasENUM_END;
+ if (!pasBottomMask)
+ return pasENUM_END;
// Have we already processed some of the bits in pasBottomMask ?
- i = (iter == pasENUM_START || iter >= BITS_IN_pasMask) ? 0 // no
- : iter; // yes
+ i = (iter == pasENUM_START || iter >= BITS_IN_pasMask) ? 0 // no
+ : iter; // yes
- for (pasMaskType mask = 0x1 << i; mask; i++, mask<<=1)
+ for (pasMaskType mask = 0x1 << i; mask; i++, mask <<= 1)
{
if (mask & pasBottomMask)
{
- unsigned lvl = (pasDepth>BITS_IN_pasMask) ? (pasDepth-BITS_IN_pasMask) : 0; // How many in pasTopArray[]
+ unsigned lvl = (pasDepth > BITS_IN_pasMask) ? (pasDepth - BITS_IN_pasMask) : 0; // How many in pasTopArray[]
lvl += i;
- unsigned offset;
- offset = lvl * sizeof(void*);
+ unsigned offset;
+ offset = lvl * sizeof(void*);
if (mask & pasByrefBottomMask)
offset |= byref_OFFSET_FLAG;
*offs = offset;
- unsigned remMask = -int(mask<<1);
+ unsigned remMask = -int(mask << 1);
return ((pasBottomMask & remMask) ? (i + 1) : pasENUM_LAST);
}
}
@@ -1671,25 +1685,21 @@ unsigned PendingArgsStack::pasEnumGCoffs(unsigned iter, unsigned * offs)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
- int mask,
- const InfoHdr& header,
- unsigned codeSize,
- size_t* pArgTabOffset)
+size_t GCInfo::gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
- unsigned count;
+ unsigned count;
- unsigned varNum;
- LclVarDsc* varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- unsigned pass;
+ unsigned pass;
- size_t totalSize = 0;
- unsigned lastOffset;
+ size_t totalSize = 0;
+ unsigned lastOffset;
- bool thisKeptAliveIsInUntracked = false;
+ bool thisKeptAliveIsInUntracked = false;
/* The mask should be all 0's or all 1's */
@@ -1702,14 +1712,14 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
unsigned sz = encodeUnsigned(dest, static_cast<unsigned>(*pArgTabOffset));
- dest += sz;
+ dest += sz;
totalSize += sz;
}
#if VERIFY_GC_TABLES
if (mask)
{
- *(short *)dest = (short)0xBEEF;
+ *(short*)dest = (short)0xBEEF;
dest += sizeof(short);
}
totalSize += sizeof(short);
@@ -1730,35 +1740,33 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
*/
int lastoffset = 0;
- if (pass==1)
+ if (pass == 1)
{
assert(count == header.untrackedCnt);
- if (header.untrackedCnt==0)
- break; // No entries, break exits the loop since pass==1
+ if (header.untrackedCnt == 0)
+ break; // No entries, break exits the loop since pass==1
}
/* Count&Write untracked locals and non-enregistered args */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
- {
- if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
- {
- // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
+ {
+ if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ {
+ // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local
continue;
- }
+ }
- if (varTypeIsGC(varDsc->TypeGet()))
+ if (varTypeIsGC(varDsc->TypeGet()))
{
/* Do we have an argument or local variable? */
- if (!varDsc->lvIsParam)
+ if (!varDsc->lvIsParam)
{
// If is is pinned, it must be an untracked local
assert(!varDsc->lvPinned || !varDsc->lvTracked);
- if (varDsc->lvTracked || !varDsc->lvOnFrame)
+ if (varDsc->lvTracked || !varDsc->lvOnFrame)
continue;
}
else
@@ -1769,7 +1777,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
*/
/* Has this argument been enregistered? */
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
/* if a CEE_JMP has been used, then we need to report all the arguments
even if they are enregistered, since we will be using this value
@@ -1781,15 +1789,15 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
}
else
{
- if (!varDsc->lvOnFrame)
+ if (!varDsc->lvOnFrame)
{
/* If this non-enregistered pointer arg is never
* used, we don't need to report it
*/
- assert(varDsc->lvRefCnt == 0); // This assert is currently a known issue for X86-RyuJit
+ assert(varDsc->lvRefCnt == 0); // This assert is currently a known issue for X86-RyuJit
continue;
}
- else if (varDsc->lvIsRegArg && varDsc->lvTracked)
+ else if (varDsc->lvIsRegArg && varDsc->lvTracked)
{
/* If this register-passed arg is tracked, then
* it has been allocated space near the other
@@ -1813,12 +1821,12 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
continue;
}
- if (pass==0)
+ if (pass == 0)
count++;
else
{
int offset;
- assert(pass==1);
+ assert(pass == 1);
offset = varDsc->lvStkOffs;
#if DOUBLE_ALIGN
@@ -1846,59 +1854,57 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
}
int encodedoffset = lastoffset - offset;
- lastoffset = offset;
-
+ lastoffset = offset;
+
if (mask == 0)
- totalSize += encodeSigned(NULL, encodedoffset);
+ totalSize += encodeSigned(NULL, encodedoffset);
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
- dest += sz;
+ dest += sz;
totalSize += sz;
}
}
}
// A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
- if (varDsc->lvType == TYP_STRUCT &&
- varDsc->lvOnFrame &&
- (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
- BYTE * gcPtrs = compiler->lvaGetGcLayout(varNum);
+ BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
for (unsigned i = 0; i < slots; i++)
{
- if (gcPtrs[i] == TYPE_GC_NONE) // skip non-gc slots
+ if (gcPtrs[i] == TYPE_GC_NONE) // skip non-gc slots
continue;
- if (pass==0)
+ if (pass == 0)
count++;
else
{
- assert(pass==1);
+ assert(pass == 1);
unsigned offset = varDsc->lvStkOffs + i * sizeof(void*);
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
- // arguments are addressed relative to EBP.
+ // arguments are addressed relative to EBP.
if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
#endif
if (gcPtrs[i] == TYPE_GC_BYREF)
- offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
+ offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
int encodedoffset = lastoffset - offset;
- lastoffset = offset;
-
+ lastoffset = offset;
+
if (mask == 0)
- totalSize += encodeSigned(NULL, encodedoffset);
+ totalSize += encodeSigned(NULL, encodedoffset);
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
- dest += sz;
+ dest += sz;
totalSize += sz;
}
}
@@ -1909,18 +1915,16 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
/* Count&Write spill temps that hold pointers */
assert(compiler->tmpAllFree());
- for (TempDsc* tempItem = compiler->tmpListBeg();
- tempItem != nullptr;
- tempItem = compiler->tmpListNxt(tempItem))
+ for (TempDsc* tempItem = compiler->tmpListBeg(); tempItem != nullptr; tempItem = compiler->tmpListNxt(tempItem))
{
- if (varTypeIsGC(tempItem->tdTempType()))
+ if (varTypeIsGC(tempItem->tdTempType()))
{
- if (pass==0)
+ if (pass == 0)
count++;
else
{
int offset;
- assert(pass==1);
+ assert(pass == 1);
offset = tempItem->tdTempOffs();
@@ -1930,16 +1934,16 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
}
int encodedoffset = lastoffset - offset;
- lastoffset = offset;
-
+ lastoffset = offset;
+
if (mask == 0)
{
- totalSize += encodeSigned(NULL, encodedoffset);
+ totalSize += encodeSigned(NULL, encodedoffset);
}
else
{
unsigned sz = encodeSigned(dest, encodedoffset);
- dest += sz;
+ dest += sz;
totalSize += sz;
}
}
@@ -1950,13 +1954,12 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
#if VERIFY_GC_TABLES
if (mask)
{
- *(short *)dest = (short)0xCAFE;
+ *(short*)dest = (short)0xCAFE;
dest += sizeof(short);
}
totalSize += sizeof(short);
#endif
-
/**************************************************************************
*
* Generate the table of stack pointer variable lifetimes.
@@ -1970,10 +1973,10 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
// First we check for the most common case - no lifetimes at all.
- if (header.varPtrTableSize == 0)
+ if (header.varPtrTableSize == 0)
goto DONE_VLT;
- varPtrDsc * varTmp;
+ varPtrDsc* varTmp;
count = 0;
if (thisKeptAliveIsInUntracked)
@@ -1986,20 +1989,20 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
assert(compiler->lvaTable[compiler->info.compThisArg].TypeGet() == TYP_REF);
- unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].lvStkOffs;
+ unsigned varOffs = compiler->lvaTable[compiler->info.compThisArg].lvStkOffs;
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
- varOffs = abs(static_cast<int>(varOffs));
+ varOffs = abs(static_cast<int>(varOffs));
varOffs |= this_OFFSET_FLAG;
size_t sz = 0;
- sz = encodeUnsigned(mask?(dest+sz):NULL, varOffs);
- sz += encodeUDelta (mask?(dest+sz):NULL, 0, 0);
- sz += encodeUDelta (mask?(dest+sz):NULL, codeSize, 0);
+ sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
+ sz += encodeUDelta(mask ? (dest + sz) : NULL, 0, 0);
+ sz += encodeUDelta(mask ? (dest + sz) : NULL, codeSize, 0);
- dest += (sz & mask);
+ dest += (sz & mask);
totalSize += sz;
}
@@ -2007,7 +2010,7 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
{
/* If second pass, generate the count */
- if (pass)
+ if (pass)
{
assert(header.varPtrTableSize > 0);
assert(header.varPtrTableSize == count);
@@ -2019,22 +2022,22 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
for (varTmp = gcVarPtrList; varTmp; varTmp = varTmp->vpdNext)
{
- unsigned varOffs;
- unsigned lowBits;
+ unsigned varOffs;
+ unsigned lowBits;
- unsigned begOffs;
- unsigned endOffs;
+ unsigned begOffs;
+ unsigned endOffs;
assert(~OFFSET_MASK % sizeof(void*) == 0);
/* Get hold of the variable's stack offset */
- lowBits = varTmp->vpdVarNum & OFFSET_MASK;
+ lowBits = varTmp->vpdVarNum & OFFSET_MASK;
/* For negative stack offsets we must reset the low bits,
* take abs and then set them back */
- varOffs = abs(static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK));
+ varOffs = abs(static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK));
varOffs |= lowBits;
/* Compute the actual lifetime offsets */
@@ -2044,23 +2047,23 @@ size_t GCInfo::gcMakeRegPtrTable(BYTE* dest,
/* Special case: skip any 0-length lifetimes */
- if (endOffs == begOffs)
+ if (endOffs == begOffs)
continue;
/* Are we counting or generating? */
- if (!pass)
+ if (!pass)
{
count++;
}
else
{
size_t sz = 0;
- sz = encodeUnsigned(mask?(dest+sz):NULL, varOffs);
- sz += encodeUDelta (mask?(dest+sz):NULL, begOffs, lastOffset);
- sz += encodeUDelta (mask?(dest+sz):NULL, endOffs, begOffs);
+ sz = encodeUnsigned(mask ? (dest + sz) : NULL, varOffs);
+ sz += encodeUDelta(mask ? (dest + sz) : NULL, begOffs, lastOffset);
+ sz += encodeUDelta(mask ? (dest + sz) : NULL, endOffs, begOffs);
- dest += (sz & mask);
+ dest += (sz & mask);
totalSize += sz;
}
@@ -2078,7 +2081,7 @@ DONE_VLT:
#if VERIFY_GC_TABLES
if (mask)
{
- *(short *)dest = (short)0xBABE;
+ *(short*)dest = (short)0xBABE;
dest += sizeof(short);
}
totalSize += sizeof(short);
@@ -2087,10 +2090,9 @@ DONE_VLT:
if (!mask && emitArgTabOffset)
{
assert(*pArgTabOffset <= MAX_UNSIGNED_SIZE_T);
- totalSize += encodeUnsigned(NULL, static_cast<unsigned>(*pArgTabOffset));
+ totalSize += encodeUnsigned(NULL, static_cast<unsigned>(*pArgTabOffset));
}
-
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
@@ -2100,88 +2102,87 @@ DONE_VLT:
lastOffset = 0;
- if (compiler->codeGen->genInterruptible)
+ if (compiler->codeGen->genInterruptible)
{
#ifdef _TARGET_X86_
assert(compiler->genFullPtrRegMap);
- unsigned ptrRegs = 0;
+ unsigned ptrRegs = 0;
- regPtrDsc * genRegPtrTemp;
+ regPtrDsc* genRegPtrTemp;
/* Walk the list of pointer register/argument entries */
- for (genRegPtrTemp = gcRegPtrList;
- genRegPtrTemp;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
- BYTE * base = dest;
+ BYTE* base = dest;
- unsigned nextOffset;
- DWORD codeDelta;
+ unsigned nextOffset;
+ DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
- /*
- Encoding table for methods that are fully interruptible
+ /*
+ Encoding table for methods that are fully interruptible
- The encoding used is as follows:
+ The encoding used is as follows:
- ptr reg dead 00RRRDDD [RRR != 100]
- ptr reg live 01RRRDDD [RRR != 100]
+ ptr reg dead 00RRRDDD [RRR != 100]
+ ptr reg live 01RRRDDD [RRR != 100]
- non-ptr arg push 10110DDD [SSS == 110]
- ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
- ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
- little skip 11000DDD [CCC == 000]
- bigger skip 11110BBB [CCC == 110]
+ non-ptr arg push 10110DDD [SSS == 110]
+ ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
+ ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
+ little skip 11000DDD [CCC == 000]
+ bigger skip 11110BBB [CCC == 110]
- The values used in the above encodings are as follows:
+ The values used in the above encodings are as follows:
- DDD code offset delta from previous entry (0-7)
- BBB bigger delta 000=8,001=16,010=24,...,111=64
- RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
- EBP=101,ESI=110,EDI=111), ESP=100 is reserved
- SSS argument offset from base of stack. This is
- redundant for frameless methods as we can
- infer it from the previous pushes+pops. However,
- for EBP-methods, we only report GC pushes, and
- so we need SSS
- CCC argument count being popped (includes only ptrs for EBP methods)
+ DDD code offset delta from previous entry (0-7)
+ BBB bigger delta 000=8,001=16,010=24,...,111=64
+ RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
+ EBP=101,ESI=110,EDI=111), ESP=100 is reserved
+ SSS argument offset from base of stack. This is
+ redundant for frameless methods as we can
+ infer it from the previous pushes+pops. However,
+ for EBP-methods, we only report GC pushes, and
+ so we need SSS
+ CCC argument count being popped (includes only ptrs for EBP methods)
- The following are the 'large' versions:
+ The following are the 'large' versions:
- large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
+ large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
- large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
- large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
- large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
- large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
- Any GC args go dead after the call,
- but are still sitting on the stack
+ large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
+ large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
+ large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
+ large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
+ Any GC args go dead after the call,
+ but are still sitting on the stack
- this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
- or a ptr arg push
- and contains the this pointer
+ this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
+ or a ptr arg push
+ and contains the this pointer
- interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
- pointer prefix or a ptr arg push
- and contains an interior
- or by-ref pointer
+ interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
+ pointer prefix or a ptr arg push
+ and contains an interior
+ or by-ref pointer
- The value 11111111 [0xFF] indicates the end of the table.
- */
+ The value 11111111 [0xFF] indicates the end of the table.
+ */
- codeDelta = nextOffset - lastOffset; assert((int)codeDelta >= 0);
+ codeDelta = nextOffset - lastOffset;
+ assert((int)codeDelta >= 0);
// If the code delta is between 8 and (64+7),
// generate a 'bigger delta' encoding
- if ((codeDelta >= 8) && (codeDelta <= (64+7)))
+ if ((codeDelta >= 8) && (codeDelta <= (64 + 7)))
{
- unsigned biggerDelta = ((codeDelta-8) & 0x38) + 8;
- *dest++ = 0xF0 | ((biggerDelta-8) >> 3);
+ unsigned biggerDelta = ((codeDelta - 8) & 0x38) + 8;
+ *dest++ = 0xF0 | ((biggerDelta - 8) >> 3);
lastOffset += biggerDelta;
codeDelta &= 0x07;
}
@@ -2189,10 +2190,10 @@ DONE_VLT:
// If the code delta is still bigger than 7,
// generate a 'large code delta' encoding
- if (codeDelta > 7)
+ if (codeDelta > 7)
{
- *dest++ = 0xB8;
- dest += encodeUnsigned(dest, codeDelta);
+ *dest++ = 0xB8;
+ dest += encodeUnsigned(dest, codeDelta);
codeDelta = 0;
/* Remember the new 'last' offset */
@@ -2200,13 +2201,13 @@ DONE_VLT:
lastOffset = nextOffset;
}
- /* Is this a pointer argument or register entry? */
+ /* Is this a pointer argument or register entry? */
- if (genRegPtrTemp->rpdArg)
+ if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
- if (codeDelta)
+ if (codeDelta)
{
/*
Use the small encoding:
@@ -2224,18 +2225,17 @@ DONE_VLT:
/* Caller-pop arguments are dead after call but are still
sitting on the stack */
- *dest++ = 0xFD;
+ *dest++ = 0xFD;
assert(genRegPtrTemp->rpdPtrArg != 0);
- dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
+ dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
}
- else if (genRegPtrTemp->rpdPtrArg < 6 && genRegPtrTemp->rpdGCtypeGet())
+ else if (genRegPtrTemp->rpdPtrArg < 6 && genRegPtrTemp->rpdGCtypeGet())
{
/* Is the argument offset/count smaller than 6 ? */
dest = gceByrefPrefixI(genRegPtrTemp, dest);
- if ( genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH ||
- (genRegPtrTemp->rpdPtrArg!=0))
+ if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
/*
Use the small encoding:
@@ -2246,9 +2246,7 @@ DONE_VLT:
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
- *dest++ = 0x80 | (BYTE)codeDelta
- | genRegPtrTemp->rpdPtrArg << 3
- | isPop << 6;
+ *dest++ = 0x80 | (BYTE)codeDelta | genRegPtrTemp->rpdPtrArg << 3 | isPop << 6;
/* Remember the new 'last' offset */
@@ -2258,7 +2256,6 @@ DONE_VLT:
{
assert(!"Check this");
}
-
}
else if (genRegPtrTemp->rpdGCtypeGet() == GCT_NONE)
{
@@ -2281,7 +2278,7 @@ DONE_VLT:
* first do the code delta
*/
- if (codeDelta)
+ if (codeDelta)
{
/*
Use the small encoding:
@@ -2303,8 +2300,8 @@ DONE_VLT:
dest = gceByrefPrefixI(genRegPtrTemp, dest);
- *dest++ = 0xF8 | (isPop << 2);
- dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
+ *dest++ = 0xF8 | (isPop << 2);
+ dest += encodeUnsigned(dest, genRegPtrTemp->rpdPtrArg);
/* Remember the new 'last' offset */
@@ -2313,28 +2310,30 @@ DONE_VLT:
}
else
{
- unsigned regMask;
+ unsigned regMask;
/* Record any registers that are becoming dead */
regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
- while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
+ while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
- unsigned tmpMask;
- regNumber regNum;
+ unsigned tmpMask;
+ regNumber regNum;
/* Get hold of the next register bit */
- tmpMask = genFindLowestReg(regMask); assert(tmpMask);
+ tmpMask = genFindLowestReg(regMask);
+ assert(tmpMask);
/* Remember the new state of this register */
- ptrRegs&= ~tmpMask;
+ ptrRegs &= ~tmpMask;
/* Figure out which register the next bit corresponds to */
- regNum = genRegNumFromMask(tmpMask); assert(regNum <= 7);
+ regNum = genRegNumFromMask(tmpMask);
+ assert(regNum <= 7);
/* Reserve ESP, regNum==4 for future use */
@@ -2347,8 +2346,7 @@ DONE_VLT:
*/
assert((codeDelta & 0x7) == codeDelta);
- *dest++ = 0x00 | regNum << 3
- | (BYTE)codeDelta;
+ *dest++ = 0x00 | regNum << 3 | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
@@ -2360,21 +2358,22 @@ DONE_VLT:
/* Any entries that follow will be at the same offset */
- codeDelta = zeroFunc(); /* DO NOT REMOVE */
+ codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
/* Record any registers that are becoming live */
regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
- while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
+ while (regMask) // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
{
- unsigned tmpMask;
- regNumber regNum;
+ unsigned tmpMask;
+ regNumber regNum;
/* Get hold of the next register bit */
- tmpMask = genFindLowestReg(regMask); assert(tmpMask);
+ tmpMask = genFindLowestReg(regMask);
+ assert(tmpMask);
/* Remember the new state of this register */
@@ -2382,7 +2381,8 @@ DONE_VLT:
/* Figure out which register the next bit corresponds to */
- regNum = genRegNumFromMask(tmpMask); assert(regNum <= 7);
+ regNum = genRegNumFromMask(tmpMask);
+ assert(regNum <= 7);
/*
Generate a small encoding:
@@ -2401,12 +2401,11 @@ DONE_VLT:
}
assert((codeDelta & 0x7) == codeDelta);
- *dest++ = 0x40 | (regNum << 3)
- | (BYTE)codeDelta;
+ *dest++ = 0x40 | (regNum << 3) | (BYTE)codeDelta;
/* Turn the bit we've just generated off and continue */
- regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
+ regMask -= tmpMask; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
/* Remember the new 'last' offset */
@@ -2414,7 +2413,7 @@ DONE_VLT:
/* Any entries that follow will be at the same offset */
- codeDelta = zeroFunc(); /* DO NOT REMOVE */
+ codeDelta = zeroFunc(); /* DO NOT REMOVE */
}
}
@@ -2424,131 +2423,133 @@ DONE_VLT:
/* Go back to the buffer start if we're not generating a table */
- if (!mask)
+ if (!mask)
dest = base;
}
#endif // _TARGET_X86_
/* Terminate the table with 0xFF */
- *dest = 0xFF; dest -= mask; totalSize++;
+ *dest = 0xFF;
+ dest -= mask;
+ totalSize++;
}
- else if (compiler->isFramePointerUsed()) // genInterruptible is false
+ else if (compiler->isFramePointerUsed()) // genInterruptible is false
{
#ifdef _TARGET_X86_
- /*
- Encoding table for methods with an EBP frame and
- that are not fully interruptible
+ /*
+ Encoding table for methods with an EBP frame and
+ that are not fully interruptible
- The encoding used is as follows:
+ The encoding used is as follows:
- this pointer encodings:
+ this pointer encodings:
- 01000000 this pointer in EBX
- 00100000 this pointer in ESI
- 00010000 this pointer in EDI
+ 01000000 this pointer in EBX
+ 00100000 this pointer in ESI
+ 00010000 this pointer in EDI
- tiny encoding:
+ tiny encoding:
- 0bsdDDDD
- requires code delta > 0 & delta < 16 (4-bits)
- requires pushed argmask == 0
+ 0bsdDDDD
+ requires code delta > 0 & delta < 16 (4-bits)
+ requires pushed argmask == 0
- where DDDD is code delta
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
+ where DDDD is code delta
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
- small encoding:
+ small encoding:
- 1DDDDDDD bsdAAAAA
+ 1DDDDDDD bsdAAAAA
- requires code delta < 120 (7-bits)
- requires pushed argmask < 64 (5-bits)
+ requires code delta < 120 (7-bits)
+ requires pushed argmask < 64 (5-bits)
- where DDDDDDD is code delta
- AAAAA is the pushed args mask
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
+ where DDDDDDD is code delta
+ AAAAA is the pushed args mask
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
- medium encoding
+ medium encoding
- 0xFD aaaaaaaa AAAAdddd bseDDDDD
+ 0xFD aaaaaaaa AAAAdddd bseDDDDD
- requires code delta < 512 (9-bits)
- requires pushed argmask < 2048 (12-bits)
+ requires code delta < 512 (9-bits)
+ requires pushed argmask < 2048 (12-bits)
- where DDDDD is the upper 5-bits of the code delta
- dddd is the low 4-bits of the code delta
- AAAA is the upper 4-bits of the pushed arg mask
- aaaaaaaa is the low 8-bits of the pushed arg mask
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- e indicates that register EDI is a live pointer
+ where DDDDD is the upper 5-bits of the code delta
+ dddd is the low 4-bits of the code delta
+ AAAA is the upper 4-bits of the pushed arg mask
+ aaaaaaaa is the low 8-bits of the pushed arg mask
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ e indicates that register EDI is a live pointer
- medium encoding with interior pointers
+ medium encoding with interior pointers
- 0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
+ 0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
- requires code delta < 256 (8-bits)
- requires pushed argmask < 64 (5-bits)
+ requires code delta < 256 (8-bits)
+ requires pushed argmask < 64 (5-bits)
- where DDDDDDD is the code delta
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
- AAAAA is the pushed arg mask
- iii indicates that EBX,EDI,ESI are interior pointers
- IIIII indicates that bits in the arg mask are interior
- pointers
+ where DDDDDDD is the code delta
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ AAAAA is the pushed arg mask
+ iii indicates that EBX,EDI,ESI are interior pointers
+ IIIII indicates that bits in the arg mask are interior
+ pointers
- large encoding
+ large encoding
- 0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
+ 0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
- B indicates that register EBX is an interior pointer
- S indicates that register ESI is an interior pointer
- D indicates that register EDI is an interior pointer
- requires pushed argmask < 32-bits
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ requires pushed argmask < 32-bits
- large encoding with interior pointers
+ large encoding with interior pointers
- 0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
+ 0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
- B indicates that register EBX is an interior pointer
- S indicates that register ESI is an interior pointer
- D indicates that register EDI is an interior pointer
- requires pushed argmask < 32-bits
- requires pushed iArgmask < 32-bits
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ requires pushed argmask < 32-bits
+ requires pushed iArgmask < 32-bits
- huge encoding This is the only encoding that supports
- a pushed argmask which is greater than
- 32-bits.
+ huge encoding This is the only encoding that supports
+ a pushed argmask which is greater than
+ 32-bits.
- 0xFB [0BSD0bsd][32-bit code delta]
- [32-bit table count][32-bit table size]
- [pushed ptr offsets table...]
+ 0xFB [0BSD0bsd][32-bit code delta]
+ [32-bit table count][32-bit table size]
+ [pushed ptr offsets table...]
- b indicates that register EBX is a live pointer
- s indicates that register ESI is a live pointer
- d indicates that register EDI is a live pointer
- B indicates that register EBX is an interior pointer
- S indicates that register ESI is an interior pointer
- D indicates that register EDI is an interior pointer
- the list count is the number of entries in the list
- the list size gives the byte-length of the list
- the offsets in the list are variable-length
- */
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ the list count is the number of entries in the list
+ the list size gives the byte-length of the list
+ the offsets in the list are variable-length
+ */
/* If "this" is enregistered, note it. We do this explicitly here as
genFullPtrRegMap==false, and so we don't have any regPtrDsc's. */
@@ -2566,7 +2567,7 @@ DONE_VLT:
}
}
- CallDsc * call;
+ CallDsc* call;
assert(compiler->genFullPtrRegMap == false);
@@ -2574,8 +2575,8 @@ DONE_VLT:
for (call = gcCallDescList; call; call = call->cdNext)
{
- BYTE * base = dest;
- unsigned nextOffset;
+ BYTE* base = dest;
+ unsigned nextOffset;
/* Figure out the code offset of this entry */
@@ -2583,7 +2584,7 @@ DONE_VLT:
/* Compute the distance from the previous call */
- DWORD codeDelta = nextOffset - lastOffset;
+ DWORD codeDelta = nextOffset - lastOffset;
assert((int)codeDelta >= 0);
@@ -2608,34 +2609,38 @@ DONE_VLT:
/* Check for the really large argument offset case */
/* The very rare Huge encodings */
- if (call->cdArgCnt)
+ if (call->cdArgCnt)
{
- unsigned argNum;
- DWORD argCnt = call->cdArgCnt;
- DWORD argBytes = 0;
- BYTE * pArgBytes = DUMMY_INIT(NULL);
+ unsigned argNum;
+ DWORD argCnt = call->cdArgCnt;
+ DWORD argBytes = 0;
+ BYTE* pArgBytes = DUMMY_INIT(NULL);
if (mask != 0)
{
- *dest++ = 0xFB;
- *dest++ = (byrefRegMask << 4) | regMask;
- *(DWORD*)dest = codeDelta; dest += sizeof(DWORD);
- *(DWORD*)dest = argCnt; dest += sizeof(DWORD);
+ *dest++ = 0xFB;
+ *dest++ = (byrefRegMask << 4) | regMask;
+ *(DWORD*)dest = codeDelta;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = argCnt;
+ dest += sizeof(DWORD);
// skip the byte-size for now. Just note where it will go
- pArgBytes = dest; dest += sizeof(DWORD);
+ pArgBytes = dest;
+ dest += sizeof(DWORD);
}
for (argNum = 0; argNum < argCnt; argNum++)
{
- unsigned eltSize;
+ unsigned eltSize;
eltSize = encodeUnsigned(dest, call->cdArgTable[argNum]);
argBytes += eltSize;
- if (mask) dest += eltSize;
+ if (mask)
+ dest += eltSize;
}
if (mask == 0)
{
- dest = base + 1 + 1 + 3*sizeof(DWORD) + argBytes;
+ dest = base + 1 + 1 + 3 * sizeof(DWORD) + argBytes;
}
else
{
@@ -2658,7 +2663,7 @@ DONE_VLT:
}
/* Check if we can use the medium encoding */
- else if (codeDelta <= 0x01FF && call->u1.cdArgMask <= 0x0FFF && !byref)
+ else if (codeDelta <= 0x01FF && call->u1.cdArgMask <= 0x0FFF && !byref)
{
*dest++ = 0xFD;
*dest++ = call->u1.cdArgMask;
@@ -2667,31 +2672,36 @@ DONE_VLT:
}
/* Check if we can use the medium encoding with byrefs */
- else if (codeDelta <= 0x0FF && call->u1.cdArgMask <= 0x01F)
+ else if (codeDelta <= 0x0FF && call->u1.cdArgMask <= 0x01F)
{
*dest++ = 0xF9;
*dest++ = (BYTE)codeDelta;
- *dest++ = ( regMask << 5) | call->u1.cdArgMask;
+ *dest++ = (regMask << 5) | call->u1.cdArgMask;
*dest++ = (byrefRegMask << 5) | call->u1.cdByrefArgMask;
}
/* We'll use the large encoding */
else if (!byref)
{
- *dest++ = 0xFE;
- *dest++ = (byrefRegMask << 4) | regMask;
- *(DWORD*)dest = codeDelta; dest += sizeof(DWORD);
- *(DWORD*)dest = call->u1.cdArgMask; dest += sizeof(DWORD);
+ *dest++ = 0xFE;
+ *dest++ = (byrefRegMask << 4) | regMask;
+ *(DWORD*)dest = codeDelta;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = call->u1.cdArgMask;
+ dest += sizeof(DWORD);
}
/* We'll use the large encoding with byrefs */
else
{
- *dest++ = 0xFA;
- *dest++ = (byrefRegMask << 4) | regMask;
- *(DWORD*)dest = codeDelta; dest += sizeof(DWORD);
- *(DWORD*)dest = call->u1.cdArgMask; dest += sizeof(DWORD);
- *(DWORD*)dest = call->u1.cdByrefArgMask; dest += sizeof(DWORD);
+ *dest++ = 0xFA;
+ *dest++ = (byrefRegMask << 4) | regMask;
+ *(DWORD*)dest = codeDelta;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = call->u1.cdArgMask;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = call->u1.cdByrefArgMask;
+ dest += sizeof(DWORD);
}
/* Keep track of the total amount of generated stuff */
@@ -2700,14 +2710,16 @@ DONE_VLT:
/* Go back to the buffer start if we're not generating a table */
- if (!mask)
+ if (!mask)
dest = base;
}
#endif // _TARGET_X86_
/* Terminate the table with 0xFF */
- *dest = 0xFF; dest -= mask; totalSize++;
+ *dest = 0xFF;
+ dest -= mask;
+ totalSize++;
}
else // genInterruptible is false and we have an EBP-less frame
{
@@ -2715,141 +2727,140 @@ DONE_VLT:
#ifdef _TARGET_X86_
- regPtrDsc * genRegPtrTemp;
- regNumber thisRegNum = regNumber(0);
+ regPtrDsc* genRegPtrTemp;
+ regNumber thisRegNum = regNumber(0);
PendingArgsStack pasStk(compiler->getEmitter()->emitMaxStackDepth, compiler);
/* Walk the list of pointer register/argument entries */
- for (genRegPtrTemp = gcRegPtrList;
- genRegPtrTemp;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (genRegPtrTemp = gcRegPtrList; genRegPtrTemp; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
-/*
- * Encoding table for methods without an EBP frame and
- * that are not fully interruptible
- *
- * The encoding used is as follows:
- *
- * push 000DDDDD ESP push one item with 5-bit delta
- * push 00100000 [pushCount] ESP push multiple items
- * reserved 0010xxxx xxxx != 0000
- * reserved 0011xxxx
- * skip 01000000 [Delta] Skip Delta, arbitrary sized delta
- * skip 0100DDDD Skip small Delta, for call (DDDD != 0)
- * pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
- * call 1PPPPPPP Call Pattern, P=[0..79]
- * call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
- * ArgMask=MMM Delta=commonDelta[DD]
- * call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,ArgMask
- * call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
- * [32-bit PndCnt][32-bit PndSize][PndOffs...]
- * iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
- * thisptr 111101RR This pointer is in Register RR
- * 00=EDI,01=ESI,10=EBX,11=EBP
- * reserved 111100xx xx != 00
- * reserved 111110xx xx != 00
- * reserved 11111xxx xxx != 000 && xxx != 111(EOT)
- *
- * The value 11111111 [0xFF] indicates the end of the table. (EOT)
- *
- * An offset (at which stack-walking is performed) without an explicit encoding
- * is assumed to be a trivial call-site (no GC registers, stack empty before and
- * after) to avoid having to encode all trivial calls.
- *
- * Note on the encoding used for interior pointers
- *
- * The iptr encoding must immediately precede a call encoding. It is used
- * to transform a normal GC pointer addresses into an interior pointers for
- * GC purposes. The mask supplied to the iptr encoding is read from the
- * least signicant bit to the most signicant bit. (i.e the lowest bit is
- * read first)
- *
- * p indicates that register EBP is a live pointer
- * b indicates that register EBX is a live pointer
- * s indicates that register ESI is a live pointer
- * d indicates that register EDI is a live pointer
- * P indicates that register EBP is an interior pointer
- * B indicates that register EBX is an interior pointer
- * S indicates that register ESI is an interior pointer
- * D indicates that register EDI is an interior pointer
- *
- * As an example the following sequence indicates that EDI.ESI and the
- * second pushed pointer in ArgMask are really interior pointers. The
- * pointer in ESI in a normal pointer:
- *
- * iptr 11110000 00010011 => read Interior Ptr, Interior Ptr,
- * Normal Ptr, Normal Ptr, Interior Ptr
- *
- * call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer,
- * ESI is a GC-pointer.
- * EBP is a GC-pointer
- * MMM=0011 => read two GC-pointers arguments
- * on the stack (nested call)
- *
- * Since the call instruction mentions 5 GC-pointers we list them in
- * the required order: EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
- *
- * And we apply the Interior Pointer mask mmmm=10011 to the five GC-pointers
- * we learn that EDI and ESI are interior GC-pointers and that
- * the second push arg is an interior GC-pointer.
- */
+ /*
+ * Encoding table for methods without an EBP frame and
+ * that are not fully interruptible
+ *
+ * The encoding used is as follows:
+ *
+ * push 000DDDDD ESP push one item with 5-bit delta
+ * push 00100000 [pushCount] ESP push multiple items
+ * reserved 0010xxxx xxxx != 0000
+ * reserved 0011xxxx
+ * skip 01000000 [Delta] Skip Delta, arbitrary sized delta
+ * skip 0100DDDD Skip small Delta, for call (DDDD != 0)
+ * pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
+ * call 1PPPPPPP Call Pattern, P=[0..79]
+ * call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
+ * ArgMask=MMM Delta=commonDelta[DD]
+ * call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,ArgMask
+ * call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
+ * [32-bit PndCnt][32-bit PndSize][PndOffs...]
+ * iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
+ * thisptr 111101RR This pointer is in Register RR
+ * 00=EDI,01=ESI,10=EBX,11=EBP
+ * reserved 111100xx xx != 00
+ * reserved 111110xx xx != 00
+ * reserved 11111xxx xxx != 000 && xxx != 111(EOT)
+ *
+ * The value 11111111 [0xFF] indicates the end of the table. (EOT)
+ *
+ * An offset (at which stack-walking is performed) without an explicit encoding
+ * is assumed to be a trivial call-site (no GC registers, stack empty before and
+ * after) to avoid having to encode all trivial calls.
+ *
+ * Note on the encoding used for interior pointers
+ *
+ * The iptr encoding must immediately precede a call encoding. It is used
+ * to transform a normal GC pointer addresses into an interior pointers for
+ * GC purposes. The mask supplied to the iptr encoding is read from the
+ * least signicant bit to the most signicant bit. (i.e the lowest bit is
+ * read first)
+ *
+ * p indicates that register EBP is a live pointer
+ * b indicates that register EBX is a live pointer
+ * s indicates that register ESI is a live pointer
+ * d indicates that register EDI is a live pointer
+ * P indicates that register EBP is an interior pointer
+ * B indicates that register EBX is an interior pointer
+ * S indicates that register ESI is an interior pointer
+ * D indicates that register EDI is an interior pointer
+ *
+ * As an example the following sequence indicates that EDI.ESI and the
+ * second pushed pointer in ArgMask are really interior pointers. The
+ * pointer in ESI in a normal pointer:
+ *
+ * iptr 11110000 00010011 => read Interior Ptr, Interior Ptr,
+ * Normal Ptr, Normal Ptr, Interior Ptr
+ *
+ * call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer,
+ * ESI is a GC-pointer.
+ * EBP is a GC-pointer
+ * MMM=0011 => read two GC-pointers arguments
+ * on the stack (nested call)
+ *
+ * Since the call instruction mentions 5 GC-pointers we list them in
+ * the required order: EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
+ *
+ * And we apply the Interior Pointer mask mmmm=10011 to the five GC-pointers
+ * we learn that EDI and ESI are interior GC-pointers and that
+ * the second push arg is an interior GC-pointer.
+ */
- BYTE * base = dest;
+ BYTE* base = dest;
- bool usePopEncoding;
- unsigned regMask;
- unsigned argMask;
- unsigned byrefRegMask;
- unsigned byrefArgMask;
- DWORD callArgCnt;
+ bool usePopEncoding;
+ unsigned regMask;
+ unsigned argMask;
+ unsigned byrefRegMask;
+ unsigned byrefArgMask;
+ DWORD callArgCnt;
- unsigned nextOffset;
- DWORD codeDelta;
+ unsigned nextOffset;
+ DWORD codeDelta;
nextOffset = genRegPtrTemp->rpdOffs;
/* Compute the distance from the previous call */
- codeDelta = nextOffset - lastOffset; assert((int)codeDelta >= 0);
+ codeDelta = nextOffset - lastOffset;
+ assert((int)codeDelta >= 0);
#if REGEN_CALLPAT
// Must initialize this flag to true when REGEN_CALLPAT is on
- usePopEncoding = true;
+ usePopEncoding = true;
unsigned origCodeDelta = codeDelta;
#endif
if (!thisKeptAliveIsInUntracked && genRegPtrTemp->rpdIsThis)
{
- unsigned tmpMask = genRegPtrTemp->rpdCompiler.rpdAdd;
+ unsigned tmpMask = genRegPtrTemp->rpdCompiler.rpdAdd;
/* tmpMask must have exactly one bit set */
- assert(tmpMask && ((tmpMask & (tmpMask-1)) == 0));
+ assert(tmpMask && ((tmpMask & (tmpMask - 1)) == 0));
- thisRegNum = genRegNumFromMask(tmpMask);
+ thisRegNum = genRegNumFromMask(tmpMask);
switch (thisRegNum)
{
- case 0: // EAX
- case 1: // ECX
- case 2: // EDX
- case 4: // ESP
- break;
- case 7: // EDI
- *dest++ = 0xF4; /* 11110100 This pointer is in EDI */
- break;
- case 6: // ESI
- *dest++ = 0xF5; /* 11110100 This pointer is in ESI */
- break;
- case 3: // EBX
- *dest++ = 0xF6; /* 11110100 This pointer is in EBX */
- break;
- case 5: // EBP
- *dest++ = 0xF7; /* 11110100 This pointer is in EBP */
- break;
- default:
- break;
+ case 0: // EAX
+ case 1: // ECX
+ case 2: // EDX
+ case 4: // ESP
+ break;
+ case 7: // EDI
+ *dest++ = 0xF4; /* 11110100 This pointer is in EDI */
+ break;
+ case 6: // ESI
+ *dest++ = 0xF5; /* 11110100 This pointer is in ESI */
+ break;
+ case 3: // EBX
+ *dest++ = 0xF6; /* 11110100 This pointer is in EBX */
+ break;
+ case 5: // EBP
+ *dest++ = 0xF7; /* 11110100 This pointer is in EBP */
+ break;
+ default:
+ break;
}
}
@@ -2862,7 +2873,7 @@ DONE_VLT:
// kill 'rpdPtrArg' number of pointer variables in pasStk
pasStk.pasKill(genRegPtrTemp->rpdPtrArg);
}
- /* Is this a call site? */
+ /* Is this a call site? */
else if (genRegPtrTemp->rpdCall)
{
/* This is a true call site */
@@ -2886,44 +2897,47 @@ DONE_VLT:
/* Do we have to use the fat encoding */
- if (pasStk.pasCurDepth() > BITS_IN_pasMask &&
- pasStk.pasHasGCptrs())
+ if (pasStk.pasCurDepth() > BITS_IN_pasMask && pasStk.pasHasGCptrs())
{
/* use fat encoding:
* 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
* [32-bit PndCnt][32-bit PndSize][PndOffs...]
*/
- DWORD pndCount = pasStk.pasEnumGCoffsCount();
- DWORD pndSize = 0;
- BYTE * pPndSize = DUMMY_INIT(NULL);
+ DWORD pndCount = pasStk.pasEnumGCoffsCount();
+ DWORD pndSize = 0;
+ BYTE* pPndSize = DUMMY_INIT(NULL);
if (mask)
{
- *dest++ = 0xF8;
- *dest++ = (byrefRegMask << 4) | regMask;
- *(DWORD*)dest = codeDelta; dest += sizeof(DWORD);
- *(DWORD*)dest = callArgCnt; dest += sizeof(DWORD);
- *(DWORD*)dest = pndCount; dest += sizeof(DWORD);
- pPndSize = dest; dest += sizeof(DWORD); // Leave space for pndSize
+ *dest++ = 0xF8;
+ *dest++ = (byrefRegMask << 4) | regMask;
+ *(DWORD*)dest = codeDelta;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = callArgCnt;
+ dest += sizeof(DWORD);
+ *(DWORD*)dest = pndCount;
+ dest += sizeof(DWORD);
+ pPndSize = dest;
+ dest += sizeof(DWORD); // Leave space for pndSize
}
unsigned offs, iter;
- for (iter = pasStk.pasEnumGCoffs(pasENUM_START, &offs);
- pndCount;
- iter = pasStk.pasEnumGCoffs(iter, &offs), pndCount--)
+ for (iter = pasStk.pasEnumGCoffs(pasENUM_START, &offs); pndCount;
+ iter = pasStk.pasEnumGCoffs(iter, &offs), pndCount--)
{
unsigned eltSize = encodeUnsigned(dest, offs);
pndSize += eltSize;
- if (mask) dest += eltSize;
+ if (mask)
+ dest += eltSize;
}
assert(iter == pasENUM_END);
if (mask == 0)
{
- dest = base + 2 + 4*sizeof(DWORD) + pndSize;
+ dest = base + 2 + 4 * sizeof(DWORD) + pndSize;
}
else
{
@@ -2940,7 +2954,7 @@ DONE_VLT:
{
assert(pasStk.pasCurDepth() <= BITS_IN_pasMask);
- argMask = pasStk.pasArgMask();
+ argMask = pasStk.pasArgMask();
byrefArgMask = pasStk.pasByrefArgMask();
}
@@ -2948,16 +2962,16 @@ DONE_VLT:
assert(regMask || argMask || callArgCnt || pasStk.pasCurDepth());
- // Emit IPtrMask if needed
+// Emit IPtrMask if needed
-#define CHK_NON_INTRPT_ESP_IPtrMask \
- \
- if (byrefRegMask || byrefArgMask) \
- { \
- *dest++ = 0xF0; \
- unsigned imask = (byrefArgMask << 4) | byrefRegMask;\
- dest += encodeUnsigned(dest, imask); \
- }
+#define CHK_NON_INTRPT_ESP_IPtrMask \
+ \
+ if (byrefRegMask || byrefArgMask) \
+ { \
+ *dest++ = 0xF0; \
+ unsigned imask = (byrefArgMask << 4) | byrefRegMask; \
+ dest += encodeUnsigned(dest, imask); \
+ }
/* When usePopEncoding is true:
* this is not an interesting call site
@@ -2967,8 +2981,7 @@ DONE_VLT:
if (!usePopEncoding)
{
- int pattern = lookupCallPattern(callArgCnt, regMask,
- argMask, codeDelta);
+ int pattern = lookupCallPattern(callArgCnt, regMask, argMask, codeDelta);
if (pattern != -1)
{
if (pattern > 0xff)
@@ -3004,42 +3017,40 @@ DONE_VLT:
if ((callArgCnt <= 7) && (argMask <= 7))
{
- unsigned inx; // callCommonDelta[] index
+ unsigned inx; // callCommonDelta[] index
unsigned maxCommonDelta = callCommonDelta[3];
if (codeDelta > maxCommonDelta)
{
- if (codeDelta > maxCommonDelta+15)
+ if (codeDelta > maxCommonDelta + 15)
{
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
- dest += encodeUnsigned(dest, codeDelta-maxCommonDelta);
+ dest += encodeUnsigned(dest, codeDelta - maxCommonDelta);
}
else
{
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
- *dest++ = 0x40 | (BYTE)(codeDelta-maxCommonDelta);
+ *dest++ = 0x40 | (BYTE)(codeDelta - maxCommonDelta);
}
codeDelta = maxCommonDelta;
- inx = 3;
+ inx = 3;
goto EMIT_2ND_CALL_ENCODING;
}
- for (inx=0; inx<4; inx++)
+ for (inx = 0; inx < 4; inx++)
{
if (codeDelta == callCommonDelta[inx])
{
-EMIT_2ND_CALL_ENCODING:
+ EMIT_2ND_CALL_ENCODING:
// Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
*dest++ = 0xD0 | regMask;
- *dest++ = (inx << 6)
- | (callArgCnt << 3)
- | argMask;
+ *dest++ = (inx << 6) | (callArgCnt << 3) | argMask;
goto NEXT_RPD;
}
}
@@ -3048,16 +3059,15 @@ EMIT_2ND_CALL_ENCODING:
if ((codeDelta > minCommonDelta) && (codeDelta < maxCommonDelta))
{
- assert((minCommonDelta+16) > maxCommonDelta);
+ assert((minCommonDelta + 16) > maxCommonDelta);
/* use encoding: */
/* skip 0100DDDD small delta=DDDD */
- *dest++ = 0x40 | (BYTE)(codeDelta-minCommonDelta);
+ *dest++ = 0x40 | (BYTE)(codeDelta - minCommonDelta);
codeDelta = minCommonDelta;
- inx = 0;
+ inx = 0;
goto EMIT_2ND_CALL_ENCODING;
}
-
}
}
@@ -3067,7 +3077,7 @@ EMIT_2ND_CALL_ENCODING:
/* use encoding: */
/* skip 01000000 [Delta] arbitrary sized delta */
*dest++ = 0x40;
- dest += encodeUnsigned(dest, codeDelta-i);
+ dest += encodeUnsigned(dest, codeDelta - i);
codeDelta = i;
}
@@ -3089,16 +3099,16 @@ EMIT_2ND_CALL_ENCODING:
}
}
- //Emit IPtrMask if needed
+ // Emit IPtrMask if needed
CHK_NON_INTRPT_ESP_IPtrMask;
/* use encoding: */
/* call 1110RRRR [ArgCnt] [ArgMask] */
*dest++ = 0xE0 | regMask;
- dest += encodeUnsigned(dest, callArgCnt);
+ dest += encodeUnsigned(dest, callArgCnt);
- dest += encodeUnsigned(dest, argMask);
+ dest += encodeUnsigned(dest, argMask);
}
else
{
@@ -3108,7 +3118,7 @@ EMIT_2ND_CALL_ENCODING:
lastOffset = nextOffset;
- if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP)
+ if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP)
{
/* This must be a gcArgPopSingle */
@@ -3119,7 +3129,7 @@ EMIT_2ND_CALL_ENCODING:
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
- dest += encodeUnsigned(dest, codeDelta-15);
+ dest += encodeUnsigned(dest, codeDelta - 15);
codeDelta = 15;
}
@@ -3140,7 +3150,7 @@ EMIT_2ND_CALL_ENCODING:
/* use encoding: */
/* skip 01000000 [Delta] */
*dest++ = 0x40;
- dest += encodeUnsigned(dest, codeDelta-31);
+ dest += encodeUnsigned(dest, codeDelta - 31);
codeDelta = 31;
}
@@ -3157,25 +3167,23 @@ EMIT_2ND_CALL_ENCODING:
}
}
- /* We ignore the register live/dead information, since the
- * rpdCallRegMask contains all the liveness information
- * that we need
- */
-NEXT_RPD:
+ /* We ignore the register live/dead information, since the
+ * rpdCallRegMask contains all the liveness information
+ * that we need
+ */
+ NEXT_RPD:
totalSize += dest - base;
/* Go back to the buffer start if we're not generating a table */
- if (!mask)
+ if (!mask)
dest = base;
#if REGEN_CALLPAT
- if ((mask==-1) && (usePopEncoding==false) && ((dest-base) > 0))
- regenLog(origCodeDelta, argMask, regMask, callArgCnt,
- byrefArgMask, byrefRegMask, base, (dest-base));
+ if ((mask == -1) && (usePopEncoding == false) && ((dest - base) > 0))
+ regenLog(origCodeDelta, argMask, regMask, callArgCnt, byrefArgMask, byrefRegMask, base, (dest - base));
#endif
-
}
/* Verify that we pop every arg that was pushed and that argMask is 0 */
@@ -3186,13 +3194,15 @@ NEXT_RPD:
/* Terminate the table with 0xFF */
- *dest = 0xFF; dest -= mask; totalSize++;
+ *dest = 0xFF;
+ dest -= mask;
+ totalSize++;
}
#if VERIFY_GC_TABLES
if (mask)
{
- *(short *)dest = (short)0xBEEB;
+ *(short*)dest = (short)0xBEEB;
dest += sizeof(short);
}
totalSize += sizeof(short);
@@ -3205,8 +3215,7 @@ NEXT_RPD:
#endif
-
- return totalSize;
+ return totalSize;
}
#ifdef _PREFAST_
#pragma warning(pop)
@@ -3222,24 +3231,21 @@ NEXT_RPD:
#include "gcdump.h"
#if VERIFY_GC_TABLES
-const bool verifyGCTables = true;
+const bool verifyGCTables = true;
#else
-const bool verifyGCTables = false;
+const bool verifyGCTables = false;
#endif
-
/*****************************************************************************
*
* Dump the info block header.
*/
-unsigned GCInfo::gcInfoBlockHdrDump(const BYTE* table,
- InfoHdr* header,
- unsigned* methodSize)
+unsigned GCInfo::gcInfoBlockHdrDump(const BYTE* table, InfoHdr* header, unsigned* methodSize)
{
GCDump gcDump(GCINFO_VERSION);
- gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
+ gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
printf("Method info block:\n");
return gcDump.DumpInfoHdr(table, header, methodSize, verifyGCTables);
@@ -3247,30 +3253,25 @@ unsigned GCInfo::gcInfoBlockHdrDump(const BYTE* table,
/*****************************************************************************/
-unsigned GCInfo::gcDumpPtrTable(const BYTE* table,
- const InfoHdr& header,
- unsigned methodSize)
+unsigned GCInfo::gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize)
{
printf("Pointer table:\n");
GCDump gcDump(GCINFO_VERSION);
- gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
+ gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables);
}
-
/*****************************************************************************
*
* Find all the live pointers in a stack frame.
*/
-void GCInfo::gcFindPtrsInFrame(const void* infoBlock,
- const void* codeBlock,
- unsigned offs)
+void GCInfo::gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs)
{
GCDump gcDump(GCINFO_VERSION);
- gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
+ gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
gcDump.DumpPtrsInFrame((const BYTE*)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables);
}
@@ -3288,71 +3289,65 @@ template class SimplerHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId, JitSim
#ifdef DEBUG
-void GCInfo::gcDumpVarPtrDsc(varPtrDsc * desc)
+void GCInfo::gcDumpVarPtrDsc(varPtrDsc* desc)
{
const int offs = (desc->vpdVarNum & ~OFFSET_MASK);
const GCtype gcType = (desc->vpdVarNum & byref_OFFSET_FLAG) ? GCT_BYREF : GCT_GCREF;
const bool isPin = (desc->vpdVarNum & pinned_OFFSET_FLAG) != 0;
- printf("[%08X] %s%s var at [%s",
- dspPtr(desc),
- GCtypeStr(gcType),
- isPin ? "pinned-ptr" : "",
+ printf("[%08X] %s%s var at [%s", dspPtr(desc), GCtypeStr(gcType), isPin ? "pinned-ptr" : "",
compiler->isFramePointerUsed() ? STR_FPBASE : STR_SPBASE);
- if (offs < 0)
+ if (offs < 0)
+ {
printf("-%02XH", -offs);
+ }
else if (offs > 0)
+ {
printf("+%02XH", +offs);
+ }
- printf("] live from %04X to %04X\n",
- desc->vpdBegOfs,
- desc->vpdEndOfs);
+ printf("] live from %04X to %04X\n", desc->vpdBegOfs, desc->vpdEndOfs);
}
-static const char * const GcSlotFlagsNames[] = {
- "",
- "(byref) ",
- "(pinned) ",
- "(byref, pinned) ",
- "(untracked) ",
- "(byref, untracked) ",
- "(pinned, untracked) ",
- "(byref, pinned, untracked) "
-};
+static const char* const GcSlotFlagsNames[] = {"",
+ "(byref) ",
+ "(pinned) ",
+ "(byref, pinned) ",
+ "(untracked) ",
+ "(byref, untracked) ",
+ "(pinned, untracked) ",
+ "(byref, pinned, untracked) "};
// I'm making a local wrapper class for GcInfoEncoder so that can add logging of my own (DLD).
class GcInfoEncoderWithLogging
{
GcInfoEncoder* m_gcInfoEncoder;
- bool m_doLogging;
-
-public:
+ bool m_doLogging;
- GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose) :
- m_gcInfoEncoder(gcInfoEncoder),
- m_doLogging(verbose || JitConfig.JitGCInfoLogging() != 0)
- {}
+public:
+ GcInfoEncoderWithLogging(GcInfoEncoder* gcInfoEncoder, bool verbose)
+ : m_gcInfoEncoder(gcInfoEncoder), m_doLogging(verbose || JitConfig.JitGCInfoLogging() != 0)
+ {
+ }
- GcSlotId GetStackSlotId( INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase = GC_CALLER_SP_REL )
+ GcSlotId GetStackSlotId(INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase = GC_CALLER_SP_REL)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetStackSlotId(spOffset, flags, spBase);
if (m_doLogging)
{
- printf("Stack slot id for offset %d (0x%x) (%s) %s= %d.\n",
- spOffset, spOffset, GcStackSlotBaseNames[spBase],
- GcSlotFlagsNames[flags & 7], newSlotId);
+ printf("Stack slot id for offset %d (0x%x) (%s) %s= %d.\n", spOffset, spOffset,
+ GcStackSlotBaseNames[spBase], GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
- GcSlotId GetRegisterSlotId( UINT32 regNum, GcSlotFlags flags )
+ GcSlotId GetRegisterSlotId(UINT32 regNum, GcSlotFlags flags)
{
GcSlotId newSlotId = m_gcInfoEncoder->GetRegisterSlotId(regNum, flags);
if (m_doLogging)
{
- printf("Register slot id for reg %s %s= %d.\n",
- getRegName(regNum), GcSlotFlagsNames[flags & 7], newSlotId);
+ printf("Register slot id for reg %s %s= %d.\n", getRegName(regNum), GcSlotFlagsNames[flags & 7], newSlotId);
}
return newSlotId;
}
@@ -3362,8 +3357,8 @@ public:
m_gcInfoEncoder->SetSlotState(instructionOffset, slotId, slotState);
if (m_doLogging)
{
- printf("Set state of slot %d at instr offset 0x%x to %s.\n",
- slotId, instructionOffset, (slotState == GC_SLOT_LIVE ? "Live" : "Dead"));
+ printf("Set state of slot %d at instr offset 0x%x to %s.\n", slotId, instructionOffset,
+ (slotState == GC_SLOT_LIVE ? "Live" : "Dead"));
}
}
@@ -3385,7 +3380,8 @@ public:
m_gcInfoEncoder->DefineInterruptibleRange(startInstructionOffset, length);
if (m_doLogging)
{
- printf("Defining interruptible range: [0x%x, 0x%x).\n", startInstructionOffset, startInstructionOffset + length);
+ printf("Defining interruptible range: [0x%x, 0x%x).\n", startInstructionOffset,
+ startInstructionOffset + length);
}
}
@@ -3416,25 +3412,26 @@ public:
}
}
- void SetPrologSize( UINT32 prologSize )
+ void SetPrologSize(UINT32 prologSize)
{
- m_gcInfoEncoder->SetPrologSize( prologSize );
+ m_gcInfoEncoder->SetPrologSize(prologSize);
if (m_doLogging)
{
printf("Set prolog size 0x%x.\n", prologSize);
}
}
- void SetGSCookieStackSlot( INT32 spOffsetGSCookie, UINT32 validRangeStart, UINT32 validRangeEnd )
+ void SetGSCookieStackSlot(INT32 spOffsetGSCookie, UINT32 validRangeStart, UINT32 validRangeEnd)
{
m_gcInfoEncoder->SetGSCookieStackSlot(spOffsetGSCookie, validRangeStart, validRangeEnd);
if (m_doLogging)
{
- printf("Set GS Cookie stack slot to %d, valid from 0x%x to 0x%x.\n", spOffsetGSCookie, validRangeStart, validRangeEnd);
+ printf("Set GS Cookie stack slot to %d, valid from 0x%x to 0x%x.\n", spOffsetGSCookie, validRangeStart,
+ validRangeEnd);
}
}
- void SetPSPSymStackSlot( INT32 spOffsetPSPSym )
+ void SetPSPSymStackSlot(INT32 spOffsetPSPSym)
{
m_gcInfoEncoder->SetPSPSymStackSlot(spOffsetPSPSym);
if (m_doLogging)
@@ -3443,19 +3440,20 @@ public:
}
}
- void SetGenericsInstContextStackSlot( INT32 spOffsetGenericsContext, GENERIC_CONTEXTPARAM_TYPE type )
+ void SetGenericsInstContextStackSlot(INT32 spOffsetGenericsContext, GENERIC_CONTEXTPARAM_TYPE type)
{
m_gcInfoEncoder->SetGenericsInstContextStackSlot(spOffsetGenericsContext, type);
if (m_doLogging)
{
printf("Set generic instantiation context stack slot to %d, type is %s.\n", spOffsetGenericsContext,
- (type == GENERIC_CONTEXTPARAM_THIS ? "THIS" :
- (type == GENERIC_CONTEXTPARAM_MT ? "MT" :
- (type == GENERIC_CONTEXTPARAM_MD ? "MD" : "UNKNOWN!"))));
+ (type == GENERIC_CONTEXTPARAM_THIS
+ ? "THIS"
+ : (type == GENERIC_CONTEXTPARAM_MT ? "MT"
+ : (type == GENERIC_CONTEXTPARAM_MD ? "MD" : "UNKNOWN!"))));
}
}
- void SetSecurityObjectStackSlot( INT32 spOffset )
+ void SetSecurityObjectStackSlot(INT32 spOffset)
{
m_gcInfoEncoder->SetSecurityObjectStackSlot(spOffset);
if (m_doLogging)
@@ -3490,40 +3488,42 @@ public:
printf("Set Outgoing stack arg area size to %d.\n", size);
}
}
-
};
-#define GCENCODER_WITH_LOGGING(withLog, realEncoder) \
- GcInfoEncoderWithLogging withLog ## Var(realEncoder, compiler->verbose || compiler->opts.dspGCtbls); \
- GcInfoEncoderWithLogging * withLog = &withLog ## Var;
+#define GCENCODER_WITH_LOGGING(withLog, realEncoder) \
+ GcInfoEncoderWithLogging withLog##Var(realEncoder, compiler->verbose || compiler->opts.dspGCtbls); \
+ GcInfoEncoderWithLogging* withLog = &withLog##Var;
#else // DEBUG
-#define GCENCODER_WITH_LOGGING(withLog, realEncoder) \
- GcInfoEncoder * withLog = realEncoder;
+#define GCENCODER_WITH_LOGGING(withLog, realEncoder) GcInfoEncoder* withLog = realEncoder;
#endif // DEBUG
ReturnKind GCTypeToReturnKind(CorInfoGCType gcType)
{
- switch (gcType) {
- case TYPE_GC_NONE: return RT_Scalar;
- case TYPE_GC_REF: return RT_Object;
- case TYPE_GC_BYREF: return RT_ByRef;
- default:
- _ASSERTE(!"TYP_GC_OTHER is unexpected");
- return RT_Illegal;
+ switch (gcType)
+ {
+ case TYPE_GC_NONE:
+ return RT_Scalar;
+ case TYPE_GC_REF:
+ return RT_Object;
+ case TYPE_GC_BYREF:
+ return RT_ByRef;
+ default:
+ _ASSERTE(!"TYP_GC_OTHER is unexpected");
+ return RT_Illegal;
}
}
-void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
- unsigned methodSize,
- unsigned prologSize)
+void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSize, unsigned prologSize)
{
#ifdef DEBUG
if (compiler->verbose)
+ {
printf("*************** In gcInfoBlockHdrSave()\n");
+ }
#endif
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
@@ -3534,41 +3534,41 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
ReturnKind returnKind = RT_Illegal;
- switch (compiler->info.compRetType)
- {
- case TYP_REF:
- returnKind = RT_Object;
- break;
- case TYP_BYREF:
- returnKind = RT_ByRef;
- break;
- case TYP_STRUCT:
+ switch (compiler->info.compRetType)
{
- CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
- if (compiler->IsMultiRegReturnedType(structType))
+ case TYP_REF:
+ returnKind = RT_Object;
+ break;
+ case TYP_BYREF:
+ returnKind = RT_ByRef;
+ break;
+ case TYP_STRUCT:
{
- BYTE gcPtrs[2] = { TYPE_GC_NONE, TYPE_GC_NONE };
- compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
+ CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
+ if (compiler->IsMultiRegReturnedType(structType))
+ {
+ BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
+ compiler->info.compCompHnd->getClassGClayout(structType, gcPtrs);
- ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
- ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
+ ReturnKind first = GCTypeToReturnKind((CorInfoGCType)gcPtrs[0]);
+ ReturnKind second = GCTypeToReturnKind((CorInfoGCType)gcPtrs[1]);
- returnKind = GetStructReturnKind(first, second);
+ returnKind = GetStructReturnKind(first, second);
+ }
+ else
+ {
+ returnKind = RT_Scalar;
+ }
+ break;
}
- else
- {
+ default:
returnKind = RT_Scalar;
- }
- break;
- }
- default:
- returnKind = RT_Scalar;
}
_ASSERTE(returnKind != RT_Illegal);
gcInfoEncoderWithLog->SetReturnKind(returnKind);
- if (compiler->isFramePointerUsed())
+ if (compiler->isFramePointerUsed())
{
gcInfoEncoderWithLog->SetStackBaseRegister(REG_FPBASE);
}
@@ -3580,7 +3580,7 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
// No equivalents.
// header->profCallbacks = compiler->info.compProfilerCallback;
// header->editNcontinue = compiler->opts.compDbgEnC;
- //
+ //
if (compiler->lvaReportParamTypeArg())
{
// The predicate above is true only if there is an extra generic context parameter, not for
@@ -3589,23 +3589,25 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
GENERIC_CONTEXTPARAM_TYPE ctxtParamType = GENERIC_CONTEXTPARAM_NONE;
switch (compiler->info.compMethodInfo->options & CORINFO_GENERICS_CTXT_MASK)
{
- case CORINFO_GENERICS_CTXT_FROM_METHODDESC:
- ctxtParamType = GENERIC_CONTEXTPARAM_MD; break;
- case CORINFO_GENERICS_CTXT_FROM_METHODTABLE:
- ctxtParamType = GENERIC_CONTEXTPARAM_MT; break;
+ case CORINFO_GENERICS_CTXT_FROM_METHODDESC:
+ ctxtParamType = GENERIC_CONTEXTPARAM_MD;
+ break;
+ case CORINFO_GENERICS_CTXT_FROM_METHODTABLE:
+ ctxtParamType = GENERIC_CONTEXTPARAM_MT;
+ break;
- case CORINFO_GENERICS_CTXT_FROM_THIS: // See comment above.
- default:
- // If we have a generic context parameter, then we should have
- // one of the two options flags handled above.
- assert(false);
+ case CORINFO_GENERICS_CTXT_FROM_THIS: // See comment above.
+ default:
+ // If we have a generic context parameter, then we should have
+ // one of the two options flags handled above.
+ assert(false);
}
gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(
compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
compiler->isFramePointerUsed()),
ctxtParamType);
- }
+ }
// As discussed above, handle the case where the generics context is obtained via
// the method table of "this".
else if (compiler->lvaKeepAliveAndReportThis())
@@ -3625,9 +3627,12 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
// The code offset ranges assume that the GS Cookie slot is initialized in the prolog, and is valid
// through the remainder of the method. We will not query for the GS Cookie while we're in an epilog,
// so the question of where in the epilog it becomes invalid is moot.
- gcInfoEncoderWithLog->SetGSCookieStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie), prologSize, methodSize);
+ gcInfoEncoderWithLog->SetGSCookieStackSlot(compiler->lvaGetCallerSPRelativeOffset(
+ compiler->lvaGSSecurityCookie),
+ prologSize, methodSize);
}
- else if (compiler->opts.compNeedSecurityCheck || compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis())
+ else if (compiler->opts.compNeedSecurityCheck || compiler->lvaReportParamTypeArg() ||
+ compiler->lvaKeepAliveAndReportThis())
{
gcInfoEncoderWithLog->SetPrologSize(prologSize);
}
@@ -3644,7 +3649,8 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
// The normal GC lifetime reporting mechanisms will report a proper lifetime to the GC.
// The security subsystem can safely assume that anywhere it might walk the stack, it will be
// valid (null or a live GC ref).
- gcInfoEncoderWithLog->SetSecurityObjectStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaSecurityObject));
+ gcInfoEncoderWithLog->SetSecurityObjectStackSlot(
+ compiler->lvaGetCallerSPRelativeOffset(compiler->lvaSecurityObject));
}
#if FEATURE_EH_FUNCLETS
@@ -3655,7 +3661,7 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
#ifdef _TARGET_AMD64_
// The PSPSym is relative to InitialSP on X64 and CallerSP on other platforms.
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetInitialSPRelativeOffset(compiler->lvaPSPSym));
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
gcInfoEncoderWithLog->SetPSPSymStackSlot(compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
#endif // !_TARGET_AMD64_
}
@@ -3667,7 +3673,7 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
}
#endif // FEATURE_EH_FUNCLETS
- //outgoing stack area size
+ // outgoing stack area size
gcInfoEncoderWithLog->SetSizeOfStackOutgoingAndScratchArea(compiler->lvaOutgoingArgSpaceSize);
#if DISPLAY_SIZES
@@ -3682,7 +3688,6 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
}
#endif // DISPLAY_SIZES
-
}
#ifdef DEBUG
@@ -3698,13 +3703,11 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
//
struct InterruptibleRangeReporter
{
- unsigned prevStart;
- Encoder * gcInfoEncoderWithLog;
+ unsigned prevStart;
+ Encoder* gcInfoEncoderWithLog;
- InterruptibleRangeReporter(unsigned _prevStart,
- Encoder * _gcInfo) :
- prevStart(_prevStart),
- gcInfoEncoderWithLog(_gcInfo)
+ InterruptibleRangeReporter(unsigned _prevStart, Encoder* _gcInfo)
+ : prevStart(_prevStart), gcInfoEncoderWithLog(_gcInfo)
{
}
@@ -3734,11 +3737,10 @@ struct InterruptibleRangeReporter
}
};
-
-void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
- unsigned codeSize,
- unsigned prologSize,
- MakeRegPtrMode mode)
+void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
+ unsigned codeSize,
+ unsigned prologSize,
+ MakeRegPtrMode mode)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
@@ -3756,34 +3758,34 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
*/
unsigned count = 0;
-
+
int lastoffset = 0;
/* Count&Write untracked locals and non-enregistered args */
- unsigned varNum;
+ unsigned varNum;
LclVarDsc* varDsc;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
- {
- if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
- {
- // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
+ {
+ if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ {
+ // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local.
continue;
- }
+ }
- if (varTypeIsGC(varDsc->TypeGet()))
+ if (varTypeIsGC(varDsc->TypeGet()))
{
// Do we have an argument or local variable?
- if (!varDsc->lvIsParam)
+ if (!varDsc->lvIsParam)
{
// If is is pinned, it must be an untracked local.
assert(!varDsc->lvPinned || !varDsc->lvTracked);
- if (varDsc->lvTracked || !varDsc->lvOnFrame)
+ if (varDsc->lvTracked || !varDsc->lvOnFrame)
+ {
continue;
+ }
}
else
{
@@ -3796,8 +3798,8 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
#ifndef LEGACY_BACKEND
if (!varDsc->lvOnFrame)
-#else // LEGACY_BACKEND
- if (varDsc->lvRegister)
+#else // LEGACY_BACKEND
+ if (varDsc->lvRegister)
#endif // LEGACY_BACKEND
{
// If a CEE_JMP has been used, then we need to report all the arguments
@@ -3806,18 +3808,20 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
// argument offsets are always fixed up properly even if lvRegister
// is set.
if (!compiler->compJmpOpUsed)
+ {
continue;
+ }
}
else
{
- if (!varDsc->lvOnFrame)
+ if (!varDsc->lvOnFrame)
{
// If this non-enregistered pointer arg is never
// used, we don't need to report it.
assert(varDsc->lvRefCnt == 0);
continue;
}
- else if (varDsc->lvIsRegArg && varDsc->lvTracked)
+ else if (varDsc->lvIsRegArg && varDsc->lvTracked)
{
// If this register-passed arg is tracked, then
// it has been allocated space near the other
@@ -3853,9 +3857,12 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
flags = (GcSlotFlags)(flags | GC_SLOT_PINNED);
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
- if (varDsc->lvFramePointerBased) stackSlotBase = GC_FRAMEREG_REL;
+ if (varDsc->lvFramePointerBased)
+ {
+ stackSlotBase = GC_FRAMEREG_REL;
+ }
StackSlotIdKey sskey(varDsc->lvStkOffs, (stackSlotBase == GC_FRAMEREG_REL), flags);
- GcSlotId varSlotId;
+ GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
@@ -3868,21 +3875,23 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
// If this is a TYP_STRUCT, handle its GC pointers.
// Note that the enregisterable struct types cannot have GC pointers in them.
- if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
- BYTE * gcPtrs = compiler->lvaGetGcLayout(varNum);
+ BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
for (unsigned i = 0; i < slots; i++)
{
- if (gcPtrs[i] == TYPE_GC_NONE) // skip non-gc slots
+ if (gcPtrs[i] == TYPE_GC_NONE)
+ { // skip non-gc slots
continue;
+ }
int offset = varDsc->lvStkOffs + i * sizeof(void*);
#if DOUBLE_ALIGN
// For genDoubleAlign(), locals are addressed relative to ESP and
- // arguments are addressed relative to EBP.
+ // arguments are addressed relative to EBP.
if (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
offset += compiler->codeGen->genTotalFrameSize();
@@ -3894,9 +3903,12 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
- if (varDsc->lvFramePointerBased) stackSlotBase = GC_FRAMEREG_REL;
+ if (varDsc->lvFramePointerBased)
+ {
+ stackSlotBase = GC_FRAMEREG_REL;
+ }
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
- GcSlotId varSlotId;
+ GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
@@ -3914,11 +3926,9 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
// Count&Write spill temps that hold pointers.
assert(compiler->tmpAllFree());
- for (TempDsc* tempItem = compiler->tmpListBeg();
- tempItem != nullptr;
- tempItem = compiler->tmpListNxt(tempItem))
+ for (TempDsc* tempItem = compiler->tmpListBeg(); tempItem != nullptr; tempItem = compiler->tmpListNxt(tempItem))
{
- if (varTypeIsGC(tempItem->tdTempType()))
+ if (varTypeIsGC(tempItem->tdTempType()))
{
int offset = tempItem->tdTempOffs();
@@ -3929,9 +3939,12 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
- if (compiler->isFramePointerUsed()) stackSlotBase = GC_FRAMEREG_REL;
+ if (compiler->isFramePointerUsed())
+ {
+ stackSlotBase = GC_FRAMEREG_REL;
+ }
StackSlotIdKey sskey(offset, (stackSlotBase == GC_FRAMEREG_REL), flags);
- GcSlotId varSlotId;
+ GcSlotId varSlotId;
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
{
varSlotId = gcInfoEncoderWithLog->GetStackSlotId(offset, flags, stackSlotBase);
@@ -3957,13 +3970,11 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
gcInfoEncoderWithLog->GetStackSlotId(compiler->lvaCachedGenericContextArgOffset(), flags, stackSlotBase);
}
-
}
// Generate the table of tracked stack pointer variable lifetimes.
gcMakeVarPtrTable(gcInfoEncoder, mode);
-
/**************************************************************************
*
* Prepare to generate the pointer register/argument map
@@ -3971,43 +3982,43 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
**************************************************************************
*/
- if (compiler->codeGen->genInterruptible)
+ if (compiler->codeGen->genInterruptible)
{
assert(compiler->genFullPtrRegMap);
- regMaskSmall ptrRegs = 0;
- regPtrDsc* regStackArgFirst = NULL;
+ regMaskSmall ptrRegs = 0;
+ regPtrDsc* regStackArgFirst = nullptr;
// Walk the list of pointer register/argument entries.
- for (regPtrDsc* genRegPtrTemp = gcRegPtrList;
- genRegPtrTemp != NULL;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
int nextOffset = genRegPtrTemp->rpdOffs;
- if (genRegPtrTemp->rpdArg)
+ if (genRegPtrTemp->rpdArg)
{
if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_KILL)
{
// Kill all arguments for a call
- if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != NULL))
+ if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
- gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst, genRegPtrTemp);
+ gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
+ genRegPtrTemp);
}
- regStackArgFirst = NULL;
+ regStackArgFirst = nullptr;
}
- else if (genRegPtrTemp->rpdGCtypeGet() != GCT_NONE)
+ else if (genRegPtrTemp->rpdGCtypeGet() != GCT_NONE)
{
- if ( genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH ||
- (genRegPtrTemp->rpdPtrArg!=0))
+ if (genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH || (genRegPtrTemp->rpdPtrArg != 0))
{
bool isPop = genRegPtrTemp->rpdArgTypeGet() == rpdARG_POP;
assert(!isPop);
gcInfoRecordGCStackArgLive(gcInfoEncoder, mode, genRegPtrTemp);
- if (regStackArgFirst == NULL)
+ if (regStackArgFirst == nullptr)
+ {
regStackArgFirst = genRegPtrTemp;
+ }
}
else
{
@@ -4019,32 +4030,38 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
assert(genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr());
// Kill all arguments for a call
- if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != NULL))
+ if ((mode == MAKE_REG_PTR_MODE_DO_WORK) && (regStackArgFirst != nullptr))
{
// Record any outgoing arguments as becoming dead
- gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst, genRegPtrTemp);
+ gcInfoRecordGCStackArgsDead(gcInfoEncoder, genRegPtrTemp->rpdOffs, regStackArgFirst,
+ genRegPtrTemp);
}
- regStackArgFirst = NULL;
+ regStackArgFirst = nullptr;
}
-
}
}
else
{
// Record any registers that are becoming dead.
- regMaskSmall regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
+ regMaskSmall regMask = genRegPtrTemp->rpdCompiler.rpdDel & ptrRegs;
regMaskSmall byRefMask = 0;
- if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF) byRefMask = regMask;
- gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
+ if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
+ {
+ byRefMask = regMask;
+ }
+ gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
byRefMask, &ptrRegs);
-
+
// Record any registers that are becoming live.
- regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
+ regMask = genRegPtrTemp->rpdCompiler.rpdAdd & ~ptrRegs;
byRefMask = 0;
// As far as I (DLD, 2010) can tell, there's one GCtype for the entire genRegPtrTemp, so if
// it says byref then all the registers in "regMask" contain byrefs.
- if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF) byRefMask = regMask;
+ if (genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF)
+ {
+ byRefMask = regMask;
+ }
gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_LIVE,
byRefMask, &ptrRegs);
}
@@ -4067,10 +4084,9 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
{
gcInfoEncoderWithLog->DefineInterruptibleRange(prologSize, codeSize - prologSize);
}
-
}
}
- else if (compiler->isFramePointerUsed()) // genInterruptible is false, and we're using EBP as a frame pointer.
+ else if (compiler->isFramePointerUsed()) // genInterruptible is false, and we're using EBP as a frame pointer.
{
assert(compiler->genFullPtrRegMap == false);
@@ -4080,31 +4096,34 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
unsigned numCallSites = 0;
// Now we can allocate the information.
- unsigned* pCallSites = NULL;
- BYTE* pCallSiteSizes = NULL;
- unsigned callSiteNum = 0;
+ unsigned* pCallSites = nullptr;
+ BYTE* pCallSiteSizes = nullptr;
+ unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
- if (gcCallDescList != NULL)
+ if (gcCallDescList != nullptr)
{
- for (CallDsc* call = gcCallDescList; call != NULL; call = call->cdNext) numCallSites++;
- pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
+ for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
+ {
+ numCallSites++;
+ }
+ pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
// Now consider every call.
- for (CallDsc* call = gcCallDescList; call != NULL; call = call->cdNext)
+ for (CallDsc* call = gcCallDescList; call != nullptr; call = call->cdNext)
{
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
- pCallSites[callSiteNum] = call->cdOffs - call->cdCallInstrSize;
+ pCallSites[callSiteNum] = call->cdOffs - call->cdCallInstrSize;
pCallSiteSizes[callSiteNum] = call->cdCallInstrSize;
callSiteNum++;
}
-
- unsigned nextOffset;
+
+ unsigned nextOffset;
// Figure out the code offset of this entry.
nextOffset = call->cdOffs;
@@ -4134,9 +4153,10 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
// the call instruction size to get the offset of the actual call instruction...
unsigned callOffset = call->cdOffs - call->cdCallInstrSize;
// Record that these registers are live before the call...
- gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask, NULL);
+ gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask, nullptr);
// ...and dead after.
- gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, call->cdOffs, regMask, GC_SLOT_DEAD, byrefRegMask, NULL);
+ gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, call->cdOffs, regMask, GC_SLOT_DEAD, byrefRegMask,
+ nullptr);
}
// OK, define the call sites.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
@@ -4153,29 +4173,29 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
unsigned numCallSites = 0;
// Now we can allocate the information (if we're in the "DO_WORK" pass...)
- unsigned* pCallSites = NULL;
- BYTE* pCallSiteSizes = NULL;
- unsigned callSiteNum = 0;
+ unsigned* pCallSites = nullptr;
+ BYTE* pCallSiteSizes = nullptr;
+ unsigned callSiteNum = 0;
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
- for (regPtrDsc* genRegPtrTemp = gcRegPtrList;
- genRegPtrTemp != NULL;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr;
+ genRegPtrTemp = genRegPtrTemp->rpdNext)
{
- if (genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr()) numCallSites++;
+ if (genRegPtrTemp->rpdArg && genRegPtrTemp->rpdIsCallInstr())
+ {
+ numCallSites++;
+ }
}
if (numCallSites > 0)
{
- pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
+ pCallSites = new (compiler, CMK_GC) unsigned[numCallSites];
pCallSiteSizes = new (compiler, CMK_GC) BYTE[numCallSites];
}
}
- for (regPtrDsc* genRegPtrTemp = gcRegPtrList;
- genRegPtrTemp != NULL;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (regPtrDsc* genRegPtrTemp = gcRegPtrList; genRegPtrTemp != nullptr; genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (genRegPtrTemp->rpdArg)
{
@@ -4196,17 +4216,19 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
// GcInfoEncoder wants the call instruction, so subtract the width of the call instruction.
assert(genRegPtrTemp->rpdOffs >= genRegPtrTemp->rpdCallInstrSize);
unsigned callOffset = genRegPtrTemp->rpdOffs - genRegPtrTemp->rpdCallInstrSize;
-
+
// Tell the GCInfo encoder about these registers. We say that the registers become live
// before the call instruction, and dead after.
- gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask, NULL);
- gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD, byrefRegMask, NULL);
+ gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, callOffset, regMask, GC_SLOT_LIVE, byrefRegMask,
+ nullptr);
+ gcInfoRecordGCRegStateChange(gcInfoEncoder, mode, genRegPtrTemp->rpdOffs, regMask, GC_SLOT_DEAD,
+ byrefRegMask, nullptr);
// Also remember the call site.
if (mode == MAKE_REG_PTR_MODE_DO_WORK)
{
- assert(pCallSites != NULL && pCallSiteSizes != NULL);
- pCallSites[callSiteNum] = callOffset;
+ assert(pCallSites != nullptr && pCallSiteSizes != nullptr);
+ pCallSites[callSiteNum] = callOffset;
pCallSiteSizes[callSiteNum] = genRegPtrTemp->rpdCallInstrSize;
callSiteNum++;
}
@@ -4228,13 +4250,13 @@ void GCInfo::gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder,
}
}
-void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEncoder,
- MakeRegPtrMode mode,
- unsigned instrOffset,
- regMaskSmall regMask,
- GcSlotState newState,
- regMaskSmall byRefMask,
- regMaskSmall* pPtrRegs)
+void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEncoder,
+ MakeRegPtrMode mode,
+ unsigned instrOffset,
+ regMaskSmall regMask,
+ GcSlotState newState,
+ regMaskSmall byRefMask,
+ regMaskSmall* pPtrRegs)
{
// Precondition: byRefMask is a subset of regMask.
assert((byRefMask & ~regMask) == 0);
@@ -4244,31 +4266,36 @@ void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEn
while (regMask)
{
// Get hold of the next register bit.
- regMaskTP tmpMask = genFindLowestReg(regMask); assert(tmpMask);
+ regMaskTP tmpMask = genFindLowestReg(regMask);
+ assert(tmpMask);
// Remember the new state of this register.
- if (pPtrRegs != NULL)
+ if (pPtrRegs != nullptr)
{
if (newState == GC_SLOT_DEAD)
+ {
*pPtrRegs &= ~tmpMask;
+ }
else
+ {
*pPtrRegs |= tmpMask;
+ }
}
// Figure out which register the next bit corresponds to.
- regNumber regNum = genRegNumFromMask(tmpMask);
+ regNumber regNum = genRegNumFromMask(tmpMask);
/* Reserve SP future use */
assert(regNum != REG_SPBASE);
GcSlotFlags regFlags = GC_SLOT_BASE;
- if ((tmpMask & byRefMask) != 0)
+ if ((tmpMask & byRefMask) != 0)
{
- regFlags = (GcSlotFlags) (regFlags | GC_SLOT_INTERIOR);
+ regFlags = (GcSlotFlags)(regFlags | GC_SLOT_INTERIOR);
}
RegSlotIdKey rskey(regNum, regFlags);
- GcSlotId regSlotId;
+ GcSlotId regSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_regSlotMap->Lookup(rskey, &regSlotId))
@@ -4280,7 +4307,7 @@ void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEn
else
{
BOOL b = m_regSlotMap->Lookup(rskey, &regSlotId);
- assert(b); // Should have been added in the first pass.
+ assert(b); // Should have been added in the first pass.
gcInfoEncoderWithLog->SetSlotState(instrOffset, regSlotId, newState);
}
@@ -4300,14 +4327,13 @@ void GCInfo::gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEn
**************************************************************************
*/
-void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
- MakeRegPtrMode mode)
+void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode)
{
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
// Make sure any flags we hide in the offset are in the bits guaranteed
// unused by alignment
- C_ASSERT((OFFSET_MASK+1) <= sizeof(int));
+ C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
#ifdef DEBUG
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
@@ -4316,7 +4342,7 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
// advantage of that by using the same bit for 'pinned' and 'this'
// Since we don't track 'this', we should never see either flag here.
// Check it now before we potentially add some pinned flags.
- for (varPtrDsc* varTmp = gcVarPtrList; varTmp != NULL; varTmp = varTmp->vpdNext)
+ for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
const unsigned flags = varTmp->vpdVarNum & OFFSET_MASK;
assert((flags & pinned_OFFSET_FLAG) == 0);
@@ -4331,24 +4357,26 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
gcMarkFilterVarsPinned();
}
- for (varPtrDsc* varTmp = gcVarPtrList; varTmp != NULL; varTmp = varTmp->vpdNext)
+ for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
- C_ASSERT((OFFSET_MASK+1) <= sizeof(int));
+ C_ASSERT((OFFSET_MASK + 1) <= sizeof(int));
// Get hold of the variable's stack offset.
- unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
+ unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// For negative stack offsets we must reset the low bits
- int varOffs = static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK);
+ int varOffs = static_cast<int>(varTmp->vpdVarNum & ~OFFSET_MASK);
// Compute the actual lifetime offsets.
unsigned begOffs = varTmp->vpdBegOfs;
unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
- if (endOffs == begOffs)
+ if (endOffs == begOffs)
+ {
continue;
+ }
GcSlotFlags flags = GC_SLOT_BASE;
if ((lowBits & byref_OFFSET_FLAG) != 0)
@@ -4361,12 +4389,12 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
}
GcStackSlotBase stackSlotBase = GC_SP_REL;
- if (compiler->isFramePointerUsed())
+ if (compiler->isFramePointerUsed())
{
stackSlotBase = GC_FRAMEREG_REL;
}
StackSlotIdKey sskey(varOffs, (stackSlotBase == GC_FRAMEREG_REL), flags);
- GcSlotId varSlotId;
+ GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
if (!m_stackSlotMap->Lookup(sskey, &varSlotId))
@@ -4378,7 +4406,7 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
else
{
BOOL b = m_stackSlotMap->Lookup(sskey, &varSlotId);
- assert(b); // Should have been added in the first pass.
+ assert(b); // Should have been added in the first pass.
// Live from the beginning to the end.
gcInfoEncoderWithLog->SetSlotState(begOffs, varSlotId, GC_SLOT_LIVE);
gcInfoEncoderWithLog->SetSlotState(endOffs, varSlotId, GC_SLOT_DEAD);
@@ -4386,7 +4414,6 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
}
}
-
// gcMarkFilterVarsPinned - Walk all lifetimes and make it so that anything
// live in a filter is marked as pinned (often by splitting the lifetime
// so that *only* the filter region is pinned). This should only be
@@ -4407,34 +4434,38 @@ void GCInfo::gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder,
// variables as pinned inside the filter. Thus if they are double reported, it
// won't be a problem since they won't be double relocated.
//
-void GCInfo::gcMarkFilterVarsPinned()
+void GCInfo::gcMarkFilterVarsPinned()
{
assert(compiler->ehAnyFunclets());
- const EHblkDsc * endHBtab = &(compiler->compHndBBtab[compiler->compHndBBtabCount]);
+ const EHblkDsc* endHBtab = &(compiler->compHndBBtab[compiler->compHndBBtabCount]);
- for (EHblkDsc * HBtab = compiler->compHndBBtab; HBtab < endHBtab; HBtab++)
+ for (EHblkDsc* HBtab = compiler->compHndBBtab; HBtab < endHBtab; HBtab++)
{
if (HBtab->HasFilter())
{
const UNATIVE_OFFSET filterBeg = compiler->ehCodeOffset(HBtab->ebdFilter);
const UNATIVE_OFFSET filterEnd = compiler->ehCodeOffset(HBtab->ebdHndBeg);
- for (varPtrDsc* varTmp = gcVarPtrList; varTmp != NULL; varTmp = varTmp->vpdNext)
+ for (varPtrDsc* varTmp = gcVarPtrList; varTmp != nullptr; varTmp = varTmp->vpdNext)
{
// Get hold of the variable's flags.
- const unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
+ const unsigned lowBits = varTmp->vpdVarNum & OFFSET_MASK;
// Compute the actual lifetime offsets.
const unsigned begOffs = varTmp->vpdBegOfs;
const unsigned endOffs = varTmp->vpdEndOfs;
// Special case: skip any 0-length lifetimes.
- if (endOffs == begOffs)
+ if (endOffs == begOffs)
+ {
continue;
+ }
// Skip lifetimes with no overlap with the filter
if ((endOffs <= filterBeg) || (begOffs >= filterEnd))
+ {
continue;
+ }
// Because there is no nesting within filters, nothing
// should be already pinned.
@@ -4460,18 +4491,18 @@ void GCInfo::gcMarkFilterVarsPinned()
}
#endif // DEBUG
- varPtrDsc * desc1 = new (compiler, CMK_GC) varPtrDsc;
- desc1->vpdNext = gcVarPtrList;
+ varPtrDsc* desc1 = new (compiler, CMK_GC) varPtrDsc;
+ desc1->vpdNext = gcVarPtrList;
desc1->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc1->vpdBegOfs = filterBeg;
desc1->vpdEndOfs = filterEnd;
- varPtrDsc *desc2 = new (compiler, CMK_GC) varPtrDsc;
- desc2->vpdNext = desc1;
+ varPtrDsc* desc2 = new (compiler, CMK_GC) varPtrDsc;
+ desc2->vpdNext = desc1;
desc2->vpdVarNum = varTmp->vpdVarNum;
desc2->vpdBegOfs = filterEnd;
desc2->vpdEndOfs = endOffs;
- gcVarPtrList = desc2;
+ gcVarPtrList = desc2;
varTmp->vpdEndOfs = filterBeg;
#ifdef DEBUG
@@ -4502,12 +4533,12 @@ void GCInfo::gcMarkFilterVarsPinned()
}
#endif // DEBUG
- varPtrDsc * desc = new (compiler, CMK_GC) varPtrDsc;
- desc->vpdNext = gcVarPtrList;
+ varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
+ desc->vpdNext = gcVarPtrList;
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = filterBeg;
desc->vpdEndOfs = endOffs;
- gcVarPtrList = desc;
+ gcVarPtrList = desc;
varTmp->vpdEndOfs = filterBeg;
@@ -4540,12 +4571,12 @@ void GCInfo::gcMarkFilterVarsPinned()
}
#endif // DEBUG
- varPtrDsc * desc = new (compiler, CMK_GC) varPtrDsc;
- desc->vpdNext = gcVarPtrList;
+ varPtrDsc* desc = new (compiler, CMK_GC) varPtrDsc;
+ desc->vpdNext = gcVarPtrList;
desc->vpdVarNum = varTmp->vpdVarNum | pinned_OFFSET_FLAG;
desc->vpdBegOfs = begOffs;
desc->vpdEndOfs = filterEnd;
- gcVarPtrList = desc;
+ gcVarPtrList = desc;
varTmp->vpdBegOfs = filterEnd;
@@ -4584,12 +4615,10 @@ void GCInfo::gcMarkFilterVarsPinned()
}
}
} // HasFilter
- } // Foreach EH
+ } // Foreach EH
}
-void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder,
- MakeRegPtrMode mode,
- regPtrDsc* genStackPtr)
+void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode, regPtrDsc* genStackPtr)
{
// On non-x86 platforms, don't have pointer argument push/pop/kill declarations.
// But we use the same mechanism to record writes into the outgoing argument space...
@@ -4602,7 +4631,8 @@ void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEnco
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
- StackSlotIdKey sskey(genStackPtr->rpdPtrArg, FALSE, GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE));
+ StackSlotIdKey sskey(genStackPtr->rpdPtrArg, FALSE,
+ GcSlotFlags(genStackPtr->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE));
GcSlotId varSlotId;
if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS)
{
@@ -4615,16 +4645,16 @@ void GCInfo::gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEnco
else
{
BOOL b = m_stackSlotMap->Lookup(sskey, &varSlotId);
- assert(b); // Should have been added in the first pass.
+ assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(genStackPtr->rpdOffs, varSlotId, GC_SLOT_LIVE);
}
}
-void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder,
- unsigned instrOffset,
- regPtrDsc* genStackPtrFirst,
- regPtrDsc* genStackPtrLast)
+void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder,
+ unsigned instrOffset,
+ regPtrDsc* genStackPtrFirst,
+ regPtrDsc* genStackPtrLast)
{
// After a call all of the outgoing arguments are marked as dead.
// The calling loop keeps track of the first argument pushed for this call
@@ -4638,20 +4668,22 @@ void GCInfo::gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEnc
GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder);
- for (regPtrDsc* genRegPtrTemp = genStackPtrFirst;
- genRegPtrTemp != genStackPtrLast;
- genRegPtrTemp = genRegPtrTemp->rpdNext)
+ for (regPtrDsc* genRegPtrTemp = genStackPtrFirst; genRegPtrTemp != genStackPtrLast;
+ genRegPtrTemp = genRegPtrTemp->rpdNext)
{
if (!genRegPtrTemp->rpdArg)
+ {
continue;
+ }
assert(genRegPtrTemp->rpdGCtypeGet() != GCT_NONE);
assert(genRegPtrTemp->rpdArgTypeGet() == rpdARG_PUSH);
- StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, FALSE, genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE);
+ StackSlotIdKey sskey(genRegPtrTemp->rpdPtrArg, FALSE,
+ genRegPtrTemp->rpdGCtypeGet() == GCT_BYREF ? GC_SLOT_INTERIOR : GC_SLOT_BASE);
GcSlotId varSlotId;
- BOOL b = m_stackSlotMap->Lookup(sskey, &varSlotId);
- assert(b); // Should have been added in the first pass.
+ BOOL b = m_stackSlotMap->Lookup(sskey, &varSlotId);
+ assert(b); // Should have been added in the first pass.
// Live until the call.
gcInfoEncoderWithLog->SetSlotState(instrOffset, varSlotId, GC_SLOT_DEAD);
}
diff --git a/src/jit/gcinfo.cpp b/src/jit/gcinfo.cpp
index e458e86d19..b64fd0a174 100644
--- a/src/jit/gcinfo.cpp
+++ b/src/jit/gcinfo.cpp
@@ -29,12 +29,12 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-extern int JITGcBarrierCall;
+extern int JITGcBarrierCall;
/*****************************************************************************/
#if MEASURE_PTRTAB_SIZE
-/* static */ size_t GCInfo::s_gcRegPtrDscSize = 0;
+/* static */ size_t GCInfo::s_gcRegPtrDscSize = 0;
/* static */ size_t GCInfo::s_gcTotalPtrTabSize = 0;
#endif // MEASURE_PTRTAB_SIZE
@@ -47,8 +47,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-GCInfo::GCInfo(Compiler *theCompiler) :
- compiler(theCompiler)
+GCInfo::GCInfo(Compiler* theCompiler) : compiler(theCompiler)
{
regSet = nullptr;
gcVarPtrList = nullptr;
@@ -59,12 +58,11 @@ GCInfo::GCInfo(Compiler *theCompiler) :
gcCallDescList = nullptr;
gcCallDescLast = nullptr;
#ifdef JIT32_GCENCODER
- gcEpilogTable = nullptr;
-#else // !JIT32_GCENCODER
+ gcEpilogTable = nullptr;
+#else // !JIT32_GCENCODER
m_regSlotMap = nullptr;
m_stackSlotMap = nullptr;
#endif // JIT32_GCENCODER
-
}
/*****************************************************************************/
@@ -72,21 +70,21 @@ GCInfo::GCInfo(Compiler *theCompiler) :
* Reset tracking info at the start of a basic block.
*/
-void GCInfo::gcResetForBB()
+void GCInfo::gcResetForBB()
{
gcRegGCrefSetCur = RBM_NONE;
gcRegByrefSetCur = RBM_NONE;
VarSetOps::AssignNoCopy(compiler, gcVarPtrSetCur, VarSetOps::MakeEmpty(compiler));
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Print the changes in the gcRegGCrefSetCur sets.
*/
-void GCInfo::gcDspGCrefSetChanges(regMaskTP gcRegGCrefSetNew DEBUGARG(bool forceOutput))
+void GCInfo::gcDspGCrefSetChanges(regMaskTP gcRegGCrefSetNew DEBUGARG(bool forceOutput))
{
if (compiler->verbose)
{
@@ -115,7 +113,7 @@ void GCInfo::gcDspGCrefSetChanges(regMaskTP gcRegGCrefSetNew DEBU
* Print the changes in the gcRegByrefSetCur sets.
*/
-void GCInfo::gcDspByrefSetChanges(regMaskTP gcRegByrefSetNew DEBUGARG(bool forceOutput))
+void GCInfo::gcDspByrefSetChanges(regMaskTP gcRegByrefSetNew DEBUGARG(bool forceOutput))
{
if (compiler->verbose)
{
@@ -147,9 +145,9 @@ void GCInfo::gcDspByrefSetChanges(regMaskTP gcRegByrefSetNew DEBU
* GCref pointer values.
*/
-void GCInfo::gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool forceOutput))
+void GCInfo::gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool forceOutput))
{
-#ifdef DEBUG
+#ifdef DEBUG
if (compiler->compRegSetCheckLevel == 0)
{
// This set of registers are going to hold REFs.
@@ -158,8 +156,8 @@ void GCInfo::gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool fo
}
#endif
- regMaskTP gcRegByrefSetNew = gcRegByrefSetCur & ~regMask; // Clear it if set in Byref mask
- regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur | regMask; // Set it in GCref mask
+ regMaskTP gcRegByrefSetNew = gcRegByrefSetCur & ~regMask; // Clear it if set in Byref mask
+ regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur | regMask; // Set it in GCref mask
INDEBUG(gcDspGCrefSetChanges(gcRegGCrefSetNew, forceOutput));
INDEBUG(gcDspByrefSetChanges(gcRegByrefSetNew));
@@ -174,10 +172,10 @@ void GCInfo::gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool fo
* Byref pointer values.
*/
-void GCInfo::gcMarkRegSetByref(regMaskTP regMask DEBUGARG(bool forceOutput))
+void GCInfo::gcMarkRegSetByref(regMaskTP regMask DEBUGARG(bool forceOutput))
{
- regMaskTP gcRegByrefSetNew = gcRegByrefSetCur | regMask; // Set it in Byref mask
- regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur & ~regMask; // Clear it if set in GCref mask
+ regMaskTP gcRegByrefSetNew = gcRegByrefSetCur | regMask; // Set it in Byref mask
+ regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur & ~regMask; // Clear it if set in GCref mask
INDEBUG(gcDspGCrefSetChanges(gcRegGCrefSetNew));
INDEBUG(gcDspByrefSetChanges(gcRegByrefSetNew, forceOutput));
@@ -192,7 +190,7 @@ void GCInfo::gcMarkRegSetByref(regMaskTP regMask DEBUGARG(bool fo
* non-pointer values.
*/
-void GCInfo::gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forceOutput))
+void GCInfo::gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forceOutput))
{
/* NOTE: don't unmark any live register variables */
@@ -211,40 +209,50 @@ void GCInfo::gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forc
* Mark the specified register as now holding a value of the given type.
*/
-void GCInfo::gcMarkRegPtrVal(regNumber reg, var_types type)
+void GCInfo::gcMarkRegPtrVal(regNumber reg, var_types type)
{
- regMaskTP regMask = genRegMask(reg);
+ regMaskTP regMask = genRegMask(reg);
switch (type)
{
- case TYP_REF: gcMarkRegSetGCref(regMask); break;
- case TYP_BYREF: gcMarkRegSetByref(regMask); break;
- default: gcMarkRegSetNpt (regMask); break;
+ case TYP_REF:
+ gcMarkRegSetGCref(regMask);
+ break;
+ case TYP_BYREF:
+ gcMarkRegSetByref(regMask);
+ break;
+ default:
+ gcMarkRegSetNpt(regMask);
+ break;
}
}
-
/*****************************************************************************/
-
GCInfo::WriteBarrierForm GCInfo::gcIsWriteBarrierCandidate(GenTreePtr tgt, GenTreePtr assignVal)
{
#if FEATURE_WRITE_BARRIER
/* Are we storing a GC ptr? */
- if (!varTypeIsGC(tgt->TypeGet()))
+ if (!varTypeIsGC(tgt->TypeGet()))
+ {
return WBF_NoBarrier;
+ }
/* Ignore any assignments of NULL */
// 'assignVal' can be the constant Null or something else (LclVar, etc..)
// that is known to be null via Value Numbering.
if (assignVal->GetVN(VNK_Liberal) == ValueNumStore::VNForNull())
+ {
return WBF_NoBarrier;
+ }
if (assignVal->gtOper == GT_CNS_INT && assignVal->gtIntCon.gtIconVal == 0)
+ {
return WBF_NoBarrier;
+ }
/* Where are we storing into? */
@@ -254,27 +262,27 @@ GCInfo::WriteBarrierForm GCInfo::gcIsWriteBarrierCandidate(GenTreePtr tgt, GenTr
{
#ifndef LEGACY_BACKEND
- case GT_STOREIND:
-#endif // !LEGACY_BACKEND
- case GT_IND: /* Could be the managed heap */
- return gcWriteBarrierFormFromTargetAddress(tgt->gtOp.gtOp1);
-
- case GT_LEA:
- return gcWriteBarrierFormFromTargetAddress(tgt->AsAddrMode()->Base());
-
- case GT_ARR_ELEM: /* Definitely in the managed heap */
- case GT_CLS_VAR:
- return WBF_BarrierUnchecked;
-
- case GT_REG_VAR: /* Definitely not in the managed heap */
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_STORE_LCL_VAR:
- case GT_STORE_LCL_FLD:
- return WBF_NoBarrier;
-
- default:
- break;
+ case GT_STOREIND:
+#endif // !LEGACY_BACKEND
+ case GT_IND: /* Could be the managed heap */
+ return gcWriteBarrierFormFromTargetAddress(tgt->gtOp.gtOp1);
+
+ case GT_LEA:
+ return gcWriteBarrierFormFromTargetAddress(tgt->AsAddrMode()->Base());
+
+ case GT_ARR_ELEM: /* Definitely in the managed heap */
+ case GT_CLS_VAR:
+ return WBF_BarrierUnchecked;
+
+ case GT_REG_VAR: /* Definitely not in the managed heap */
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_FLD:
+ return WBF_NoBarrier;
+
+ default:
+ break;
}
assert(!"Missing case in gcIsWriteBarrierCandidate");
@@ -301,23 +309,24 @@ bool GCInfo::gcIsWriteBarrierAsgNode(GenTreePtr op)
}
}
-
/*****************************************************************************/
/*****************************************************************************
*
* If the given tree value is sitting in a register, free it now.
*/
-void GCInfo::gcMarkRegPtrVal(GenTreePtr tree)
+void GCInfo::gcMarkRegPtrVal(GenTreePtr tree)
{
- if (varTypeIsGC(tree->TypeGet()))
+ if (varTypeIsGC(tree->TypeGet()))
{
#ifdef LEGACY_BACKEND
- if (tree->gtOper == GT_LCL_VAR)
+ if (tree->gtOper == GT_LCL_VAR)
compiler->codeGen->genMarkLclVar(tree);
#endif // LEGACY_BACKEND
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
+ {
gcMarkRegSetNpt(genRegMask(tree->gtRegNum));
+ }
}
}
@@ -327,12 +336,12 @@ void GCInfo::gcMarkRegPtrVal(GenTreePtr tree)
* Initialize the non-register pointer variable tracking logic.
*/
-void GCInfo::gcVarPtrSetInit()
+void GCInfo::gcVarPtrSetInit()
{
VarSetOps::AssignNoCopy(compiler, gcVarPtrSetCur, VarSetOps::MakeEmpty(compiler));
/* Initialize the list of lifetime entries */
- gcVarPtrList = gcVarPtrLast = NULL;
+ gcVarPtrList = gcVarPtrLast = nullptr;
}
/*****************************************************************************
@@ -341,9 +350,9 @@ void GCInfo::gcVarPtrSetInit()
* it to the list.
*/
-GCInfo::regPtrDsc * GCInfo::gcRegPtrAllocDsc()
+GCInfo::regPtrDsc* GCInfo::gcRegPtrAllocDsc()
{
- regPtrDsc * regPtrNext;
+ regPtrDsc* regPtrNext;
assert(compiler->genFullPtrRegMap);
@@ -351,29 +360,29 @@ GCInfo::regPtrDsc * GCInfo::gcRegPtrAllocDsc()
regPtrNext = new (compiler, CMK_GC) regPtrDsc;
- regPtrNext->rpdIsThis = FALSE;
+ regPtrNext->rpdIsThis = FALSE;
- regPtrNext->rpdOffs = 0;
- regPtrNext->rpdNext = NULL;
+ regPtrNext->rpdOffs = 0;
+ regPtrNext->rpdNext = nullptr;
// Append the entry to the end of the list.
- if (gcRegPtrLast == NULL)
+ if (gcRegPtrLast == nullptr)
{
- assert(gcRegPtrList == NULL);
+ assert(gcRegPtrList == nullptr);
gcRegPtrList = gcRegPtrLast = regPtrNext;
}
else
{
- assert(gcRegPtrList != NULL);
- gcRegPtrLast->rpdNext = regPtrNext;
- gcRegPtrLast = regPtrNext;
+ assert(gcRegPtrList != nullptr);
+ gcRegPtrLast->rpdNext = regPtrNext;
+ gcRegPtrLast = regPtrNext;
}
#if MEASURE_PTRTAB_SIZE
s_gcRegPtrDscSize += sizeof(*regPtrNext);
#endif
- return regPtrNext;
+ return regPtrNext;
}
/*****************************************************************************
@@ -381,36 +390,35 @@ GCInfo::regPtrDsc * GCInfo::gcRegPtrAllocDsc()
* Compute the various counts that get stored in the info block header.
*/
-void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedCount,
- UNALIGNED unsigned int * varPtrTableSize)
+void GCInfo::gcCountForHeader(UNALIGNED unsigned int* untrackedCount, UNALIGNED unsigned int* varPtrTableSize)
{
- unsigned varNum;
- LclVarDsc* varDsc;
- varPtrDsc* varTmp;
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ varPtrDsc* varTmp;
- bool thisKeptAliveIsInUntracked = false; // did we track "this" in a synchronized method?
- unsigned int count = 0;
+ bool thisKeptAliveIsInUntracked = false; // did we track "this" in a synchronized method?
+ unsigned int count = 0;
/* Count the untracked locals and non-enregistered args */
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varTypeIsGC(varDsc->TypeGet()))
+ if (varTypeIsGC(varDsc->TypeGet()))
{
- if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
- {
- // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
+ if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ {
+ // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been
// reported through its parent local
continue;
- }
-
+ }
+
/* Do we have an argument or local variable? */
- if (!varDsc->lvIsParam)
+ if (!varDsc->lvIsParam)
{
- if (varDsc->lvTracked || !varDsc->lvOnFrame)
+ if (varDsc->lvTracked || !varDsc->lvOnFrame)
+ {
continue;
+ }
}
else
{
@@ -424,8 +432,8 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
#ifndef LEGACY_BACKEND
if (!varDsc->lvOnFrame)
-#else // LEGACY_BACKEND
- if (varDsc->lvRegister)
+#else // LEGACY_BACKEND
+ if (varDsc->lvRegister)
#endif // LEGACY_BACKEND
{
/* if a CEE_JMP has been used, then we need to report all the arguments
@@ -434,11 +442,13 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
argument offsets are always fixed up properly even if lvRegister
is set */
if (!compiler->compJmpOpUsed)
+ {
continue;
+ }
}
else
{
- if (!varDsc->lvOnFrame)
+ if (!varDsc->lvOnFrame)
{
/* If this non-enregistered pointer arg is never
* used, we don't need to report it
@@ -446,7 +456,7 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
assert(varDsc->lvRefCnt == 0);
continue;
}
- else if (varDsc->lvIsRegArg && varDsc->lvTracked)
+ else if (varDsc->lvIsRegArg && varDsc->lvTracked)
{
/* If this register-passed arg is tracked, then
* it has been allocated space near the other
@@ -463,26 +473,29 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
if (compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis())
{
// Encoding of untracked variables does not support reporting
- // "this". So report it as a tracked variable with a liveness
+ // "this". So report it as a tracked variable with a liveness
// extending over the entire method.
thisKeptAliveIsInUntracked = true;
continue;
}
-#ifdef DEBUG
- if (compiler->verbose)
+#ifdef DEBUG
+ if (compiler->verbose)
{
- int offs = varDsc->lvStkOffs;
+ int offs = varDsc->lvStkOffs;
- printf("GCINFO: untrckd %s lcl at [%s",
- varTypeGCstring(varDsc->TypeGet()),
- compiler->genEmitter->emitGetFrameReg());
+ printf("GCINFO: untrckd %s lcl at [%s", varTypeGCstring(varDsc->TypeGet()),
+ compiler->genEmitter->emitGetFrameReg());
- if (offs < 0)
+ if (offs < 0)
+ {
printf("-%02XH", -offs);
+ }
else if (offs > 0)
+ {
printf("+%02XH", +offs);
+ }
printf("]\n");
}
@@ -490,43 +503,48 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
count++;
}
- else if (varDsc->lvType == TYP_STRUCT &&
- varDsc->lvOnFrame &&
- (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*);
- BYTE * gcPtrs = compiler->lvaGetGcLayout(varNum);
+ BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
// walk each member of the array
for (unsigned i = 0; i < slots; i++)
- if (gcPtrs[i] != TYPE_GC_NONE) // count only gc slots
+ {
+ if (gcPtrs[i] != TYPE_GC_NONE)
+ { // count only gc slots
count++;
+ }
+ }
}
}
/* Also count spill temps that hold pointers */
assert(compiler->tmpAllFree());
- for (TempDsc* tempThis = compiler->tmpListBeg();
- tempThis != nullptr;
- tempThis = compiler->tmpListNxt(tempThis))
+ for (TempDsc* tempThis = compiler->tmpListBeg(); tempThis != nullptr; tempThis = compiler->tmpListNxt(tempThis))
{
- if (varTypeIsGC(tempThis->tdTempType()) == false)
+ if (varTypeIsGC(tempThis->tdTempType()) == false)
+ {
continue;
+ }
-#ifdef DEBUG
- if (compiler->verbose)
+#ifdef DEBUG
+ if (compiler->verbose)
{
- int offs = tempThis->tdTempOffs();
+ int offs = tempThis->tdTempOffs();
- printf("GCINFO: untrck %s Temp at [%s",
- varTypeGCstring(varDsc->TypeGet()),
- compiler->genEmitter->emitGetFrameReg());
+ printf("GCINFO: untrck %s Temp at [%s", varTypeGCstring(varDsc->TypeGet()),
+ compiler->genEmitter->emitGetFrameReg());
- if (offs < 0)
+ if (offs < 0)
+ {
printf("-%02XH", -offs);
+ }
else if (offs > 0)
+ {
printf("+%02XH", +offs);
+ }
printf("]\n");
}
@@ -535,8 +553,11 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
count++;
}
-#ifdef DEBUG
- if (compiler->verbose) printf("GCINFO: untrckVars = %u\n", count);
+#ifdef DEBUG
+ if (compiler->verbose)
+ {
+ printf("GCINFO: untrckVars = %u\n", count);
+ }
#endif
*untrackedCount = count;
@@ -547,9 +568,11 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
count = 0;
if (thisKeptAliveIsInUntracked)
+ {
count++;
+ }
- if (gcVarPtrList)
+ if (gcVarPtrList)
{
/* We'll use a delta encoding for the lifetime offsets */
@@ -557,15 +580,20 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
{
/* Special case: skip any 0-length lifetimes */
- if (varTmp->vpdBegOfs == varTmp->vpdEndOfs)
+ if (varTmp->vpdBegOfs == varTmp->vpdEndOfs)
+ {
continue;
+ }
count++;
}
}
-#ifdef DEBUG
- if (compiler->verbose) printf("GCINFO: trackdLcls = %u\n", count);
+#ifdef DEBUG
+ if (compiler->verbose)
+ {
+ printf("GCINFO: trackdLcls = %u\n", count);
+ }
#endif
*varPtrTableSize = count;
@@ -580,14 +608,11 @@ void GCInfo::gcCountForHeader(UNALIGNED unsigned int * untrackedC
* returned from gcPtrTableSize().
*/
-BYTE* GCInfo::gcPtrTableSave(BYTE* destPtr,
- const InfoHdr& header,
- unsigned codeSize,
- size_t* pArgTabOffset)
+BYTE* GCInfo::gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset)
{
/* Write the tables to the info block */
- return destPtr + gcMakeRegPtrTable(destPtr, -1, header, codeSize, pArgTabOffset);
+ return destPtr + gcMakeRegPtrTable(destPtr, -1, header, codeSize, pArgTabOffset);
}
#endif
@@ -596,19 +621,18 @@ BYTE* GCInfo::gcPtrTableSave(BYTE* destPtr,
* Initialize the 'pointer value' register/argument tracking logic.
*/
-void GCInfo::gcRegPtrSetInit()
+void GCInfo::gcRegPtrSetInit()
{
- gcRegGCrefSetCur =
- gcRegByrefSetCur = 0;
+ gcRegGCrefSetCur = gcRegByrefSetCur = 0;
- if (compiler->genFullPtrRegMap)
+ if (compiler->genFullPtrRegMap)
{
- gcRegPtrList = gcRegPtrLast = NULL;
+ gcRegPtrList = gcRegPtrLast = nullptr;
}
else
{
/* Initialize the 'call descriptor' list */
- gcCallDescList = gcCallDescLast = NULL;
+ gcCallDescList = gcCallDescLast = nullptr;
}
}
@@ -620,16 +644,13 @@ void GCInfo::gcRegPtrSetInit()
* the table of epilogs.
*/
-/* static */ size_t GCInfo::gcRecordEpilog(void* pCallBackData,
- unsigned offset)
+/* static */ size_t GCInfo::gcRecordEpilog(void* pCallBackData, unsigned offset)
{
- GCInfo* gcInfo = (GCInfo *)pCallBackData;
+ GCInfo* gcInfo = (GCInfo*)pCallBackData;
assert(gcInfo);
- size_t result = encodeUDelta(gcInfo->gcEpilogTable,
- offset,
- gcInfo->gcEpilogPrevOffset);
+ size_t result = encodeUDelta(gcInfo->gcEpilogTable, offset, gcInfo->gcEpilogPrevOffset);
if (gcInfo->gcEpilogTable)
gcInfo->gcEpilogTable += result;
@@ -641,15 +662,14 @@ void GCInfo::gcRegPtrSetInit()
#endif // JIT32_GCENCODER
-
GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr)
{
- GCInfo::WriteBarrierForm result = GCInfo::WBF_BarrierUnknown; // Default case, we have no information.
+ GCInfo::WriteBarrierForm result = GCInfo::WBF_BarrierUnknown; // Default case, we have no information.
// If we store through an int to a GC_REF field, we'll assume that needs to use a checked barriers.
if (tgtAddr->TypeGet() == TYP_I_IMPL)
{
- return GCInfo::WBF_BarrierChecked; // Why isn't this GCInfo::WBF_BarrierUnknown?
+ return GCInfo::WBF_BarrierChecked; // Why isn't this GCInfo::WBF_BarrierUnknown?
}
// Otherwise...
@@ -663,11 +683,12 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
while (tgtAddr->OperGet() == GT_ADDR && tgtAddr->gtOp.gtOp1->OperGet() == GT_IND)
{
- tgtAddr = tgtAddr->gtOp.gtOp1->gtOp.gtOp1;
+ tgtAddr = tgtAddr->gtOp.gtOp1->gtOp.gtOp1;
simplifiedExpr = true;
assert(tgtAddr->TypeGet() == TYP_BYREF);
}
- // For additions, one of the operands is a byref or a ref (and the other is not). Follow this down to its source.
+ // For additions, one of the operands is a byref or a ref (and the other is not). Follow this down to its
+ // source.
while (tgtAddr->OperGet() == GT_ADD || tgtAddr->OperGet() == GT_LEA)
{
if (tgtAddr->OperGet() == GT_ADD)
@@ -675,12 +696,12 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
if (tgtAddr->gtOp.gtOp1->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp1->TypeGet() == TYP_REF)
{
assert(!(tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF));
- tgtAddr = tgtAddr->gtOp.gtOp1;
+ tgtAddr = tgtAddr->gtOp.gtOp1;
simplifiedExpr = true;
}
else if (tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF)
{
- tgtAddr = tgtAddr->gtOp.gtOp2;
+ tgtAddr = tgtAddr->gtOp.gtOp2;
simplifiedExpr = true;
}
else
@@ -692,7 +713,7 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
return GCInfo::WBF_BarrierUnknown;
}
}
- else
+ else
{
// Must be an LEA (i.e., an AddrMode)
assert(tgtAddr->OperGet() == GT_LEA);
@@ -718,20 +739,22 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
{
unsigned lclNum = 0;
if (tgtAddr->gtOper == GT_LCL_VAR)
+ {
lclNum = tgtAddr->gtLclVar.gtLclNum;
- else
+ }
+ else
{
assert(tgtAddr->gtOper == GT_REG_VAR);
lclNum = tgtAddr->gtRegVar.gtLclNum;
}
- LclVarDsc * varDsc = &compiler->lvaTable[lclNum];
+ LclVarDsc* varDsc = &compiler->lvaTable[lclNum];
- // Instead of marking LclVar with 'lvStackByref',
+ // Instead of marking LclVar with 'lvStackByref',
// Consider decomposing the Value Number given to this LclVar to see if it was
// created using a GT_ADDR(GT_LCLVAR) or a GT_ADD( GT_ADDR(GT_LCLVAR), Constant)
- // We may have an internal compiler temp created in fgMorphCopyBlock() that we know
+ // We may have an internal compiler temp created in fgMorphCopyBlock() that we know
// points at one of our stack local variables, it will have lvStackByref set to true.
//
if (varDsc->lvStackByref)
@@ -743,7 +766,7 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
// We don't eliminate for inlined methods, where we (can) know where the "retBuff" points.
if (!compiler->compIsForInlining() && lclNum == compiler->info.compRetBuffArg)
{
- assert(compiler->info.compRetType == TYP_STRUCT); // Else shouldn't have a ret buff.
+ assert(compiler->info.compRetType == TYP_STRUCT); // Else shouldn't have a ret buff.
// Are we assured that the ret buff pointer points into the stack of a caller?
if (compiler->info.compRetBuffDefStack)
@@ -757,7 +780,7 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
#else
return WBF_NoBarrier;
#endif
-#else // 0
+#else // 0
return GCInfo::WBF_NoBarrier;
#endif // 0
}
@@ -788,12 +811,11 @@ GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr
// It is also called by LinearScan::recordVarLocationAtStartOfBB() which is in turn called by
// CodeGen::genCodeForBBList() at the block boundary.
-void
-GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc *varDsc)
+void GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc* varDsc)
{
- var_types type = varDsc->TypeGet();
- bool isGCRef = (type == TYP_REF);
- bool isByRef = (type == TYP_BYREF);
+ var_types type = varDsc->TypeGet();
+ bool isGCRef = (type == TYP_REF);
+ bool isByRef = (type == TYP_BYREF);
if (srcMask != RBM_NONE)
{
@@ -811,7 +833,7 @@ GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc *v
gcRegByrefSetCur |= dstMask; // safe if no dst, i.e. RBM_NONE
}
}
- else if (isGCRef || isByRef)
+ else if (isGCRef || isByRef)
{
// In this case, we are moving it from the stack to a register,
// so remove it from the set of live stack gc refs
@@ -824,7 +846,7 @@ GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc *v
// Otherwise, we have to determine whether to set them
if (srcMask == RBM_NONE)
{
- if (isGCRef)
+ if (isGCRef)
{
gcRegGCrefSetCur |= dstMask;
}
@@ -834,7 +856,7 @@ GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc *v
}
}
}
- else if (isGCRef || isByRef)
+ else if (isGCRef || isByRef)
{
VarSetOps::AddElemD(compiler, gcVarPtrSetCur, varDsc->lvVarIndex);
}
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index 6b2c18eb78..4094a4614e 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -20,41 +20,53 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-const
-unsigned short GenTree::gtOperKindTable[] =
-{
- #define GTNODE(en,sn,cm,ok) ok + GTK_COMMUTE*cm,
- #include "gtlist.h"
+const unsigned short GenTree::gtOperKindTable[] = {
+#define GTNODE(en, sn, cm, ok) ok + GTK_COMMUTE *cm,
+#include "gtlist.h"
};
/*****************************************************************************/
// static
-genTreeOps GenTree::OpAsgToOper(genTreeOps op)
+genTreeOps GenTree::OpAsgToOper(genTreeOps op)
{
// Precondition.
assert(OperIsAssignment(op) && op != GT_ASG);
switch (op)
{
- case GT_ASG_ADD: return GT_ADD;
- case GT_ASG_SUB: return GT_SUB;
- case GT_ASG_MUL: return GT_MUL;
- case GT_ASG_DIV: return GT_DIV;
- case GT_ASG_MOD: return GT_MOD;
-
- case GT_ASG_UDIV: return GT_UDIV;
- case GT_ASG_UMOD: return GT_UMOD;
-
- case GT_ASG_OR: return GT_OR;
- case GT_ASG_XOR: return GT_XOR;
- case GT_ASG_AND: return GT_AND;
- case GT_ASG_LSH: return GT_LSH;
- case GT_ASG_RSH: return GT_RSH;
- case GT_ASG_RSZ: return GT_RSZ;
+ case GT_ASG_ADD:
+ return GT_ADD;
+ case GT_ASG_SUB:
+ return GT_SUB;
+ case GT_ASG_MUL:
+ return GT_MUL;
+ case GT_ASG_DIV:
+ return GT_DIV;
+ case GT_ASG_MOD:
+ return GT_MOD;
+
+ case GT_ASG_UDIV:
+ return GT_UDIV;
+ case GT_ASG_UMOD:
+ return GT_UMOD;
+
+ case GT_ASG_OR:
+ return GT_OR;
+ case GT_ASG_XOR:
+ return GT_XOR;
+ case GT_ASG_AND:
+ return GT_AND;
+ case GT_ASG_LSH:
+ return GT_LSH;
+ case GT_ASG_RSH:
+ return GT_RSH;
+ case GT_ASG_RSZ:
+ return GT_RSZ;
- case GT_CHS: return GT_NEG;
+ case GT_CHS:
+ return GT_NEG;
- default:
- unreached(); // Precondition implies we don't get here.
+ default:
+ unreached(); // Precondition implies we don't get here.
}
}
@@ -65,10 +77,10 @@ genTreeOps GenTree::OpAsgToOper(genTreeOps op)
#ifdef DEBUG
-#define INDENT_SIZE 3
+#define INDENT_SIZE 3
//--------------------------------------------
-//
+//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
@@ -78,7 +90,18 @@ genTreeOps GenTree::OpAsgToOper(genTreeOps op)
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
-enum IndentChars {ICVertical, ICBottom, ICTop, ICMiddle, ICDash, ICEmbedded, ICTerminal, ICError, IndentCharCount };
+enum IndentChars
+{
+ ICVertical,
+ ICBottom,
+ ICTop,
+ ICMiddle,
+ ICDash,
+ ICEmbedded,
+ ICTerminal,
+ ICError,
+ IndentCharCount
+};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
@@ -90,12 +113,11 @@ static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x9
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
- IndentInfoStack stack;
- const char** indents;
+ IndentInfoStack stack;
+ const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
- IndentStack(Compiler* compiler) :
- stack(compiler)
+ IndentStack(Compiler* compiler) : stack(compiler)
{
if (compiler->asciiTrees)
{
@@ -131,36 +153,36 @@ struct IndentStack
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
- unsigned index = indentCount-1-i;
+ unsigned index = indentCount - 1 - i;
switch (stack.Index(index))
{
- case Compiler::IndentInfo::IINone:
- printf(" ");
- break;
- case Compiler::IndentInfo::IIEmbedded:
- printf("%s ", indents[ICEmbedded]);
- break;
- case Compiler::IndentInfo::IIArc:
- if (index == 0)
- {
- printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
- }
- else
- {
- printf("%s ", indents[ICVertical]);
- }
- break;
- case Compiler::IndentInfo::IIArcBottom:
- printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
- break;
- case Compiler::IndentInfo::IIArcTop:
- printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
- break;
- case Compiler::IndentInfo::IIError:
- printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
- break;
- default:
- unreached();
+ case Compiler::IndentInfo::IINone:
+ printf(" ");
+ break;
+ case Compiler::IndentInfo::IIEmbedded:
+ printf("%s ", indents[ICEmbedded]);
+ break;
+ case Compiler::IndentInfo::IIArc:
+ if (index == 0)
+ {
+ printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
+ }
+ else
+ {
+ printf("%s ", indents[ICVertical]);
+ }
+ break;
+ case Compiler::IndentInfo::IIArcBottom:
+ printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
+ break;
+ case Compiler::IndentInfo::IIArcTop:
+ printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
+ break;
+ case Compiler::IndentInfo::IIError:
+ printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
+ break;
+ default:
+ unreached();
}
}
printf("%s", indents[ICTerminal]);
@@ -180,34 +202,34 @@ struct IndentStack
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
+ {
return;
+ }
indentStack->print();
}
-static const char * nodeNames[] =
-{
- #define GTNODE(en,sn,cm,ok) sn,
- #include "gtlist.h"
+static const char* nodeNames[] = {
+#define GTNODE(en, sn, cm, ok) sn,
+#include "gtlist.h"
};
-const char * GenTree::NodeName(genTreeOps op)
+const char* GenTree::NodeName(genTreeOps op)
{
- assert((unsigned)op < sizeof(nodeNames)/sizeof(nodeNames[0]));
+ assert((unsigned)op < sizeof(nodeNames) / sizeof(nodeNames[0]));
- return nodeNames[op];
+ return nodeNames[op];
}
-static const char * opNames[] =
-{
- #define GTNODE(en,sn,cm,ok) #en,
- #include "gtlist.h"
+static const char* opNames[] = {
+#define GTNODE(en, sn, cm, ok) #en,
+#include "gtlist.h"
};
-const char * GenTree::OpName(genTreeOps op)
+const char* GenTree::OpName(genTreeOps op)
{
- assert((unsigned)op < sizeof(opNames)/sizeof(opNames[0]));
+ assert((unsigned)op < sizeof(opNames) / sizeof(opNames[0]));
- return opNames[op];
+ return opNames[op];
}
#endif
@@ -224,10 +246,10 @@ const char * GenTree::OpName(genTreeOps op)
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
-unsigned char GenTree::s_gtNodeSizes[GT_COUNT+1];
+unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
/* static */
-void GenTree::InitNodeSize()
+void GenTree::InitNodeSize()
{
/* 'GT_LCL_VAR' often gets changed to 'GT_REG_VAR' */
@@ -244,43 +266,43 @@ void GenTree::InitNodeSize()
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- // On ARM32, ARM64 and System V for struct returning
+ // On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
- GenTree::s_gtNodeSizes[GT_ASG ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_RETURN ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
#endif // defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- GenTree::s_gtNodeSizes[GT_CALL ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_CAST ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_FTN_ADDR ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_BOX ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_INDEX ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_BOUNDS_CHECK] = TREE_NODE_SZ_LARGE;
#ifdef FEATURE_SIMD
- GenTree::s_gtNodeSizes[GT_SIMD_CHK ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_SIMD_CHK] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_SIMD
- GenTree::s_gtNodeSizes[GT_ARR_ELEM ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_ARR_INDEX ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_ARR_OFFSET ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_RET_EXPR ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_OBJ ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_FIELD ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_STMT ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_CMPXCHG ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_QMARK ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_LEA ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_COPYOBJ ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_INTRINSIC ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_ALLOCOBJ ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_OBJ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_STMT] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_LEA] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_COPYOBJ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
- GenTree::s_gtNodeSizes[GT_DIV ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_UDIV ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_MOD ] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_UMOD ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- GenTree::s_gtNodeSizes[GT_PUTARG_STK ] = TREE_NODE_SZ_LARGE;
+ GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#if defined(FEATURE_HFA) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// In importer for Hfa and register returned structs we rewrite GT_ASG to GT_COPYOBJ/GT_CPYBLK
@@ -296,79 +318,79 @@ void GenTree::InitNodeSize()
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
- static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
#ifndef LEGACY_BACKEND
- static_assert_no_msg(sizeof(GenTreeJumpTable) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeJumpTable) <= TREE_NODE_SZ_SMALL);
#endif // !LEGACY_BACKEND
- static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeRegVar) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeArgList) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeBlkOp) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeCpBlk) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeInitBlk) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeCpObj) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeStmt) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_LARGE); // *** large node
- static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLabel) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeRegVar) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeArgList) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeBlkOp) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeCpBlk) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeInitBlk) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeCpObj) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeStmt) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeLabel) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
- static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
- static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
+ static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
+#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#ifdef FEATURE_SIMD
- static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
}
-size_t GenTree::GetNodeSize() const
+size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
-bool GenTree::IsNodeProperlySized() const
+bool GenTree::IsNodeProperlySized() const
{
size_t size;
- if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
+ if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
- else
+ else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
@@ -381,9 +403,9 @@ bool GenTree::IsNodeProperlySized() const
#else // SMALL_TREE_NODES
#ifdef DEBUG
-bool GenTree::IsNodeProperlySized() const
+bool GenTree::IsNodeProperlySized() const
{
- return true;
+ return true;
}
#endif
@@ -394,14 +416,14 @@ bool GenTree::IsNodeProperlySized() const
// make sure these get instantiated, because it's not in a header file
// (emulating the c++ 'export' keyword here)
// VC appears to be somewhat unpredictable about whether they end up in the .obj file without this
-template Compiler::fgWalkResult Compiler::fgWalkTreePostRec<true> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreePostRec<false> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreePreRec<true> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreePreRec<false> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreeRec<true,true> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreeRec<false,false>(GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreeRec<true,false> (GenTreePtr *pTree, fgWalkData *fgWalkData);
-template Compiler::fgWalkResult Compiler::fgWalkTreeRec<false,true> (GenTreePtr *pTree, fgWalkData *fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreePostRec<true>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreePostRec<false>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreePreRec<true>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreePreRec<false>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreeRec<true, true>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreeRec<false, false>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreeRec<true, false>(GenTreePtr* pTree, fgWalkData* fgWalkData);
+template Compiler::fgWalkResult Compiler::fgWalkTreeRec<false, true>(GenTreePtr* pTree, fgWalkData* fgWalkData);
//******************************************************************************
// fgWalkTreePreRec - Helper function for fgWalkTreePre.
@@ -409,36 +431,40 @@ template Compiler::fgWalkResult Compiler::fgWalkTreeRec<false,true> (GenTreePtr
// Template parameter 'computeStack' specifies whether to maintain
// a stack of ancestor nodes which can be viewed in the callback.
//
-template<bool computeStack>
+template <bool computeStack>
// static
-Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
+Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr* pTree, fgWalkData* fgWalkData)
{
- fgWalkResult result = WALK_CONTINUE;
- GenTreePtr currentParent = fgWalkData->parent;
+ fgWalkResult result = WALK_CONTINUE;
+ GenTreePtr currentParent = fgWalkData->parent;
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
- do
+ do
{
GenTreePtr tree = *pTree;
assert(tree);
assert(tree->gtOper != GT_STMT);
- GenTreeArgList* args; // For call node arg lists.
+ GenTreeArgList* args; // For call node arg lists.
if (computeStack)
+ {
fgWalkData->parentStack->Push(tree);
+ }
/* Visit this node */
// if we are not in the mode where we only do the callback for local var nodes,
// visit the node unconditionally. Otherwise we will visit it under leaf handling.
- if (!fgWalkData->wtprLclsOnly)
+ if (!fgWalkData->wtprLclsOnly)
{
assert(tree == *pTree);
result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
- if (result != WALK_CONTINUE)
+ if (result != WALK_CONTINUE)
+ {
break;
+ }
}
/* Figure out what kind of a node we have */
@@ -448,32 +474,38 @@ Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr *pTree, fgWalk
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
- if (fgWalkData->wtprLclsOnly && (oper == GT_LCL_VAR || oper == GT_LCL_FLD))
+ if (fgWalkData->wtprLclsOnly && (oper == GT_LCL_VAR || oper == GT_LCL_FLD))
+ {
result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
+ }
break;
}
else if (fgWalkData->wtprLclsOnly && GenTree::OperIsLocalStore(oper))
{
result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
- if (result != WALK_CONTINUE)
+ if (result != WALK_CONTINUE)
+ {
break;
+ }
}
fgWalkData->parent = tree;
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
- if (tree->gtOp.gtOp1 != NULL)
+ if (tree->gtOp.gtOp1 != nullptr)
{
result = fgWalkTreePreRec<computeStack>(&tree->gtOp.gtOp1, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
else
{
@@ -486,7 +518,8 @@ Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr *pTree, fgWalk
else
{
pTree = &tree->gtOp.gtOp1;
- if (*pTree){
+ if (*pTree)
+ {
continue;
}
@@ -496,139 +529,174 @@ Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr *pTree, fgWalk
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- pTree = &tree->gtField.gtFldObj;
- break;
-
- case GT_CALL:
+ case GT_FIELD:
+ pTree = &tree->gtField.gtFldObj;
+ break;
- assert(tree->gtFlags & GTF_CALL);
+ case GT_CALL:
- /* Is this a call to unmanaged code ? */
- if (fgWalkData->wtprLclsOnly && (tree->gtFlags & GTF_CALL_UNMANAGED))
- {
- result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ assert(tree->gtFlags & GTF_CALL);
- if (tree->gtCall.gtCallObjp)
- {
- result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtCallObjp, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ /* Is this a call to unmanaged code ? */
+ if (fgWalkData->wtprLclsOnly && (tree->gtFlags & GTF_CALL_UNMANAGED))
+ {
+ result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
- {
- result = fgWalkTreePreRec<computeStack>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ if (tree->gtCall.gtCallObjp)
+ {
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtCallObjp, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
- {
- result = fgWalkTreePreRec<computeStack>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
+ {
+ result = fgWalkTreePreRec<computeStack>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- if (tree->gtCall.gtControlExpr)
- {
- result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtControlExpr, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
+ {
+ result = fgWalkTreePreRec<computeStack>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- if (tree->gtCall.gtCallCookie)
+ if (tree->gtCall.gtControlExpr)
{
- result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtCallCookie, fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtControlExpr, fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
- pTree = &tree->gtCall.gtCallAddr;
- }
- else
- pTree = NULL;
- break;
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ if (tree->gtCall.gtCallCookie)
+ {
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCall.gtCallCookie, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ pTree = &tree->gtCall.gtCallAddr;
+ }
+ else
+ {
+ pTree = nullptr;
+ }
- case GT_ARR_ELEM:
+ break;
- result = fgWalkTreePreRec<computeStack>(&tree->gtArrElem.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
+ case GT_ARR_ELEM:
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- result = fgWalkTreePreRec<computeStack>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreePreRec<computeStack>(&tree->gtArrElem.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
- }
- pTree = NULL;
- break;
+ }
- case GT_ARR_OFFSET:
- result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtOffset, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- pTree = nullptr;
- break;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ result = fgWalkTreePreRec<computeStack>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ pTree = nullptr;
+ break;
- case GT_CMPXCHG:
- result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- pTree = NULL;
- break;
+ case GT_ARR_OFFSET:
+ result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtOffset, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePreRec<computeStack>(&tree->gtArrOffs.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ pTree = nullptr;
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_CMPXCHG:
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePreRec<computeStack>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ pTree = nullptr;
+ break;
+
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- result = fgWalkTreePreRec<computeStack>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePreRec<computeStack>(&tree->gtBoundsChk.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- pTree = NULL;
- break;
+ result = fgWalkTreePreRec<computeStack>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePreRec<computeStack>(&tree->gtBoundsChk.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ pTree = nullptr;
+ break;
- default:
-#ifdef DEBUG
- fgWalkData->compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ fgWalkData->compiler->gtDispTree(tree);
#endif
- assert(!"unexpected operator");
+ assert(!"unexpected operator");
}
- }
- while (pTree != NULL && *pTree != NULL);
+ } while (pTree != nullptr && *pTree != nullptr);
if (computeStack)
+ {
fgWalkData->parentStack->Pop();
+ }
if (result != WALK_ABORT)
{
//
// Restore fgWalkData->parent
- //
+ //
fgWalkData->parent = currentParent;
}
return result;
@@ -640,14 +708,13 @@ Compiler::fgWalkResult Compiler::fgWalkTreePreRec(GenTreePtr *pTree, fgWalk
* nodes contained therein.
*/
-void Compiler::fgWalkAllTreesPre(fgWalkPreFn * visitor,
- void * pCallBackData)
+void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
- BasicBlock * block;
+ BasicBlock* block;
for (block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr tree;
+ GenTreePtr tree;
for (tree = block->bbTreeList; tree; tree = tree->gtNext)
{
@@ -658,24 +725,23 @@ void Compiler::fgWalkAllTreesPre(fgWalkPreFn * visitor,
}
}
-
//******************************************************************************
// fgWalkTreePostRec - Helper function for fgWalkTreePost.
// Walk tree in post order, executing callback on every node
// template parameter 'computeStack' specifies whether to maintain
// a stack of ancestor nodes which can be viewed in the callback.
//
-template<bool computeStack>
+template <bool computeStack>
// static
-Compiler::fgWalkResult Compiler::fgWalkTreePostRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
+Compiler::fgWalkResult Compiler::fgWalkTreePostRec(GenTreePtr* pTree, fgWalkData* fgWalkData)
{
- fgWalkResult result;
- GenTreePtr currentParent = fgWalkData->parent;
+ fgWalkResult result;
+ GenTreePtr currentParent = fgWalkData->parent;
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
- GenTree *tree = *pTree;
+ GenTree* tree = *pTree;
assert(tree);
assert(tree->gtOper != GT_STMT);
GenTreeArgList* args;
@@ -686,12 +752,16 @@ Compiler::fgWalkResult Compiler::fgWalkTreePostRec(GenTreePtr *pTree, fgWalkData
kind = tree->OperKind();
if (computeStack)
+ {
fgWalkData->parentStack->Push(tree);
+ }
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
+ {
goto DONE;
+ }
/* Is it a 'simple' unary/binary operator? */
@@ -722,15 +792,19 @@ Compiler::fgWalkResult Compiler::fgWalkTreePostRec(GenTreePtr *pTree, fgWalkData
if (*op1Slot != nullptr)
{
result = fgWalkTreePostRec<computeStack>(op1Slot, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
if (op2Slot != nullptr && *op2Slot != nullptr)
{
result = fgWalkTreePostRec<computeStack>(op2Slot, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
goto DONE;
@@ -738,119 +812,153 @@ Compiler::fgWalkResult Compiler::fgWalkTreePostRec(GenTreePtr *pTree, fgWalkData
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- if (tree->gtField.gtFldObj)
- {
- result = fgWalkTreePostRec<computeStack>(&tree->gtField.gtFldObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
-
- break;
+ case GT_FIELD:
+ if (tree->gtField.gtFldObj)
+ {
+ result = fgWalkTreePostRec<computeStack>(&tree->gtField.gtFldObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- case GT_CALL:
+ break;
- assert(tree->gtFlags & GTF_CALL);
+ case GT_CALL:
- if (tree->gtCall.gtCallObjp)
- {
- result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallObjp, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ assert(tree->gtFlags & GTF_CALL);
- for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
- {
- result = fgWalkTreePostRec<computeStack>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ if (tree->gtCall.gtCallObjp)
+ {
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallObjp, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
- {
- result = fgWalkTreePostRec<computeStack>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- if (tree->gtCall.gtCallCookie)
+ for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
{
- result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallCookie, fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreePostRec<computeStack>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
- result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallAddr, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
- if (tree->gtCall.gtControlExpr != nullptr)
- {
- result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtControlExpr, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
- break;
+ for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
+ {
+ result = fgWalkTreePostRec<computeStack>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ if (tree->gtCall.gtCallCookie)
+ {
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallCookie, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtCallAddr, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- case GT_ARR_ELEM:
+ if (tree->gtCall.gtControlExpr != nullptr)
+ {
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCall.gtControlExpr, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ break;
- result = fgWalkTreePostRec<computeStack>(&tree->gtArrElem.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
+ case GT_ARR_ELEM:
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- result = fgWalkTreePostRec<computeStack>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreePostRec<computeStack>(&tree->gtArrElem.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
- }
- break;
+ }
- case GT_ARR_OFFSET:
- result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtOffset, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ result = fgWalkTreePostRec<computeStack>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ break;
- case GT_CMPXCHG:
- result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ case GT_ARR_OFFSET:
+ result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtOffset, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtArrOffs.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
+
+ case GT_CMPXCHG:
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- result = fgWalkTreePostRec<computeStack>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreePostRec<computeStack>(&tree->gtBoundsChk.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ result = fgWalkTreePostRec<computeStack>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreePostRec<computeStack>(&tree->gtBoundsChk.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
- default:
-#ifdef DEBUG
- fgWalkData->compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ fgWalkData->compiler->gtDispTree(tree);
#endif
- assert(!"unexpected operator");
+ assert(!"unexpected operator");
}
DONE:
@@ -861,7 +969,9 @@ DONE:
result = fgWalkData->wtpoVisitorFn(pTree, fgWalkData);
if (computeStack)
+ {
fgWalkData->parentStack->Pop();
+ }
return result;
}
@@ -869,17 +979,16 @@ DONE:
// ****************************************************************************
// walk tree doing callbacks in both pre- and post- order (both optional)
-template<bool doPreOrder, bool doPostOrder>
+template <bool doPreOrder, bool doPostOrder>
// static
-Compiler::fgWalkResult
-Compiler::fgWalkTreeRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
+Compiler::fgWalkResult Compiler::fgWalkTreeRec(GenTreePtr* pTree, fgWalkData* fgWalkData)
{
- fgWalkResult result = WALK_CONTINUE;
+ fgWalkResult result = WALK_CONTINUE;
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
- GenTree *tree = *pTree;
+ GenTree* tree = *pTree;
assert(tree);
assert(tree->gtOper != GT_STMT);
GenTreeArgList* args;
@@ -894,8 +1003,10 @@ Compiler::fgWalkTreeRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
if (doPreOrder)
{
result = fgWalkData->wtprVisitorFn(pTree, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
else
{
tree = *pTree;
@@ -906,29 +1017,37 @@ Compiler::fgWalkTreeRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
// If we're skipping subtrees, we're done.
if (result == WALK_SKIP_SUBTREES)
+ {
goto DONE;
+ }
/* Is this a constant or leaf node? */
- if ((kind & (GTK_CONST|GTK_LEAF)) != 0)
+ if ((kind & (GTK_CONST | GTK_LEAF)) != 0)
+ {
goto DONE;
+ }
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- if (tree->gtOp.gtOp1)
+ if (tree->gtOp.gtOp1)
{
result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtOp.gtOp1, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtOp.gtOp2, fgWalkData);
- if (result == WALK_ABORT)
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
goto DONE;
@@ -936,120 +1055,154 @@ Compiler::fgWalkTreeRec(GenTreePtr *pTree, fgWalkData *fgWalkData)
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- if (tree->gtField.gtFldObj)
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtField.gtFldObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
-
- break;
+ case GT_FIELD:
+ if (tree->gtField.gtFldObj)
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtField.gtFldObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- case GT_CALL:
+ break;
- assert(tree->gtFlags & GTF_CALL);
+ case GT_CALL:
- if (tree->gtCall.gtCallObjp)
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallObjp, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ assert(tree->gtFlags & GTF_CALL);
- for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ if (tree->gtCall.gtCallObjp)
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallObjp, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(args->pCurrent(), fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- if (tree->gtCall.gtCallCookie)
+ for (args = tree->gtCall.gtCallArgs; args; args = args->Rest())
{
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallCookie, fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
+ }
}
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallAddr, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
- if (tree->gtCall.gtControlExpr)
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtControlExpr, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- }
+ for (args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(args->pCurrent(), fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ if (tree->gtCall.gtCallCookie)
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallCookie, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtCallAddr, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- break;
+ if (tree->gtCall.gtControlExpr)
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCall.gtControlExpr, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
- case GT_ARR_ELEM:
+ break;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrElem.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
+ case GT_ARR_ELEM:
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
- if (result == WALK_ABORT)
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrElem.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
return result;
- }
- break;
+ }
- case GT_ARR_OFFSET:
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtOffset, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtArrObj, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrElem.gtArrInds[dim], fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ }
+ break;
- case GT_CMPXCHG:
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ case GT_ARR_OFFSET:
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtOffset, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtArrOffs.gtArrObj, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
+
+ case GT_CMPXCHG:
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpComparand, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpValue, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtCmpXchg.gtOpLocation, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtBoundsChk.gtIndex, fgWalkData);
- if (result == WALK_ABORT)
- return result;
- break;
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtBoundsChk.gtArrLen, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ result = fgWalkTreeRec<doPreOrder, doPostOrder>(&tree->gtBoundsChk.gtIndex, fgWalkData);
+ if (result == WALK_ABORT)
+ {
+ return result;
+ }
+ break;
- default:
-#ifdef DEBUG
- fgWalkData->compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ fgWalkData->compiler->gtDispTree(tree);
#endif
- assert(!"unexpected operator");
+ assert(!"unexpected operator");
}
DONE:
@@ -1075,37 +1228,42 @@ DONE:
* WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited
*/
-Compiler::fgWalkResult Compiler::fgWalkTree(GenTreePtr * pTree,
- fgWalkPreFn * preVisitor,
- fgWalkPreFn * postVisitor,
- void * callBackData)
+Compiler::fgWalkResult Compiler::fgWalkTree(GenTreePtr* pTree,
+ fgWalkPreFn* preVisitor,
+ fgWalkPreFn* postVisitor,
+ void* callBackData)
{
fgWalkData walkData;
- walkData.compiler = this;
- walkData.wtprVisitorFn = preVisitor;
- walkData.wtpoVisitorFn = postVisitor;
- walkData.pCallbackData = callBackData;
- walkData.parent = NULL;
- walkData.wtprLclsOnly = false;
+ walkData.compiler = this;
+ walkData.wtprVisitorFn = preVisitor;
+ walkData.wtpoVisitorFn = postVisitor;
+ walkData.pCallbackData = callBackData;
+ walkData.parent = nullptr;
+ walkData.wtprLclsOnly = false;
#ifdef DEBUG
- walkData.printModified = false;
+ walkData.printModified = false;
#endif
- ArrayStack<GenTree *> parentStack(this);
+ ArrayStack<GenTree*> parentStack(this);
walkData.parentStack = &parentStack;
fgWalkResult result;
- assert (preVisitor || postVisitor);
+ assert(preVisitor || postVisitor);
if (preVisitor && postVisitor)
- result = fgWalkTreeRec<true,true>(pTree, &walkData);
+ {
+ result = fgWalkTreeRec<true, true>(pTree, &walkData);
+ }
else if (preVisitor)
- result = fgWalkTreeRec<true,false>(pTree, &walkData);
+ {
+ result = fgWalkTreeRec<true, false>(pTree, &walkData);
+ }
else
- result = fgWalkTreeRec<false,true>(pTree, &walkData);
-
+ {
+ result = fgWalkTreeRec<false, true>(pTree, &walkData);
+ }
#ifdef DEBUG
if (verbose && walkData.printModified)
@@ -1127,8 +1285,7 @@ Compiler::fgWalkResult Compiler::fgWalkTree(GenTreePtr * pTree,
//
// Return Value:
// None
-void
-GenTree::gtClearReg(Compiler* compiler)
+void GenTree::gtClearReg(Compiler* compiler)
{
#if CPU_LONG_USES_REGPAIR
if (isRegPairType(TypeGet()) ||
@@ -1162,8 +1319,7 @@ GenTree::gtClearReg(Compiler* compiler)
//
// Return Value:
// None
-void
-GenTree::CopyReg(GenTreePtr from)
+void GenTree::CopyReg(GenTreePtr from)
{
// To do the copy, use _gtRegPair, which must be bigger than _gtRegNum. Note that the values
// might be undefined (so gtRegTag == GT_REGTAG_NONE).
@@ -1224,10 +1380,10 @@ bool GenTree::gtHasReg() const
if (IsMultiRegCall())
{
// Has to cast away const-ness because GetReturnTypeDesc() is a non-const method
- GenTree* tree = const_cast<GenTree*>(this);
- GenTreeCall* call = tree->AsCall();
- unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
- hasReg = false;
+ GenTree* tree = const_cast<GenTree*>(this);
+ GenTreeCall* call = tree->AsCall();
+ unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
+ hasReg = false;
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
@@ -1242,11 +1398,11 @@ bool GenTree::gtHasReg() const
}
else if (IsCopyOrReloadOfMultiRegCall())
{
- GenTree* tree = const_cast<GenTree*>(this);
+ GenTree* tree = const_cast<GenTree*>(this);
GenTreeCopyOrReload* copyOrReload = tree->AsCopyOrReload();
- GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
- unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
- hasReg = false;
+ GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
+ unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
+ hasReg = false;
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
@@ -1277,8 +1433,7 @@ bool GenTree::gtHasReg() const
// Return Value:
// Reg Mask of GenTree node.
//
-regMaskTP
-GenTree::gtGetRegMask() const
+regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
@@ -1293,20 +1448,20 @@ GenTree::gtGetRegMask() const
if (IsMultiRegCall())
{
// temporarily cast away const-ness as AsCall() method is not declared const
- resultMask = genRegMask(gtRegNum);
+ resultMask = genRegMask(gtRegNum);
GenTree* temp = const_cast<GenTree*>(this);
resultMask |= temp->AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
- // A multi-reg copy or reload, will have valid regs for only those
+ // A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
- GenTree* tree = const_cast<GenTree*>(this);
+ GenTree* tree = const_cast<GenTree*>(this);
GenTreeCopyOrReload* copyOrReload = tree->AsCopyOrReload();
- GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
- unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
+ GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
+ unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
@@ -1336,8 +1491,7 @@ GenTree::gtGetRegMask() const
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
-regMaskTP
-GenTreeCall::GetOtherRegMask() const
+regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
@@ -1374,7 +1528,6 @@ bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
return GetNonStandardAddedArgCount(compiler) != 0;
}
-
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
@@ -1415,7 +1568,7 @@ int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
// R11 = PInvoke cookie param
return 2;
}
- }
+ }
return 0;
}
@@ -1429,13 +1582,13 @@ int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
-// This method may actually have a retBuf argument
-// or it could be a JIT helper that we are still transforming during
+// This method may actually have a retBuf argument
+// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
-// will make HasRetBufArg() return true, but will also force the
+// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
@@ -1476,7 +1629,6 @@ bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
}
return false;
}
-
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
@@ -1492,25 +1644,33 @@ bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
return IsHelperCall(compiler->eeFindHelper(helper));
}
-
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
-bool GenTree::Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK)
+bool GenTree::Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
- if (op1 == NULL) return (op2 == NULL);
- if (op2 == NULL) return false;
- if (op1 == op2) return true;
+ if (op1 == nullptr)
+ {
+ return (op2 == nullptr);
+ }
+ if (op2 == nullptr)
+ {
+ return false;
+ }
+ if (op1 == op2)
+ {
+ return true;
+ }
assert(op1->gtOper != GT_STMT);
assert(op2->gtOper != GT_STMT);
@@ -1519,43 +1679,46 @@ AGAIN:
/* The operators must be equal */
- if (oper != op2->gtOper)
+ if (oper != op2->gtOper)
+ {
return false;
+ }
/* The types must be equal */
- if (op1->gtType != op2->gtType)
+ if (op1->gtType != op2->gtType)
+ {
return false;
+ }
/* Overflow must be equal */
- if (op1->gtOverflowEx() != op2->gtOverflowEx())
+ if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
-
/* Sensible flags must be equal */
- if ( (op1->gtFlags & (GTF_UNSIGNED )) !=
- (op2->gtFlags & (GTF_UNSIGNED )) )
+ if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
-
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
switch (oper)
{
- case GT_CNS_INT:
- if (op1->gtIntCon.gtIconVal == op2->gtIntCon.gtIconVal)
- return true;
- break;
+ case GT_CNS_INT:
+ if (op1->gtIntCon.gtIconVal == op2->gtIntCon.gtIconVal)
+ {
+ return true;
+ }
+ break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
@@ -1568,51 +1731,57 @@ AGAIN:
return true;
break;
#endif
- default:
- break;
+ default:
+ break;
}
- return false;
+ return false;
}
/* Is this a leaf node? */
- if (kind & GTK_LEAF)
+ if (kind & GTK_LEAF)
{
switch (oper)
{
- case GT_LCL_VAR:
- if (op1->gtLclVarCommon.gtLclNum != op2->gtLclVarCommon.gtLclNum)
- break;
+ case GT_LCL_VAR:
+ if (op1->gtLclVarCommon.gtLclNum != op2->gtLclVarCommon.gtLclNum)
+ {
+ break;
+ }
- return true;
+ return true;
- case GT_LCL_FLD:
- if (op1->gtLclFld.gtLclNum != op2->gtLclFld.gtLclNum ||
- op1->gtLclFld.gtLclOffs != op2->gtLclFld.gtLclOffs)
- break;
+ case GT_LCL_FLD:
+ if (op1->gtLclFld.gtLclNum != op2->gtLclFld.gtLclNum ||
+ op1->gtLclFld.gtLclOffs != op2->gtLclFld.gtLclOffs)
+ {
+ break;
+ }
- return true;
+ return true;
- case GT_CLS_VAR:
- if (op1->gtClsVar.gtClsVarHnd != op2->gtClsVar.gtClsVarHnd)
- break;
+ case GT_CLS_VAR:
+ if (op1->gtClsVar.gtClsVarHnd != op2->gtClsVar.gtClsVarHnd)
+ {
+ break;
+ }
- return true;
+ return true;
- case GT_LABEL:
- return true;
+ case GT_LABEL:
+ return true;
- case GT_ARGPLACE:
- if ((op1->gtType == TYP_STRUCT) &&
- (op1->gtArgPlace.gtArgPlaceClsHnd != op2->gtArgPlace.gtArgPlaceClsHnd))
- {
- break;
- }
- return true;
+ case GT_ARGPLACE:
+ if ((op1->gtType == TYP_STRUCT) &&
+ (op1->gtArgPlace.gtArgPlaceClsHnd != op2->gtArgPlace.gtArgPlaceClsHnd))
+ {
+ break;
+ }
+ return true;
- default:
- break;
+ default:
+ break;
}
return false;
@@ -1620,7 +1789,7 @@ AGAIN:
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_UNOP)
+ if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
@@ -1628,28 +1797,37 @@ AGAIN:
// these should be included in the comparison.
switch (oper)
{
- case GT_ARR_LENGTH:
- if (op1->gtArrLen.ArrLenOffset() != op2->gtArrLen.ArrLenOffset()) return false;
- break;
- case GT_CAST:
- if (op1->gtCast.gtCastType != op2->gtCast.gtCastType) return false;
- break;
- case GT_OBJ:
- if (op1->AsObj()->gtClass != op2->AsObj()->gtClass) return false;
- break;
+ case GT_ARR_LENGTH:
+ if (op1->gtArrLen.ArrLenOffset() != op2->gtArrLen.ArrLenOffset())
+ {
+ return false;
+ }
+ break;
+ case GT_CAST:
+ if (op1->gtCast.gtCastType != op2->gtCast.gtCastType)
+ {
+ return false;
+ }
+ break;
+ case GT_OBJ:
+ if (op1->AsObj()->gtClass != op2->AsObj()->gtClass)
+ {
+ return false;
+ }
+ break;
// For the ones below no extra argument matters for comparison.
- case GT_BOX:
- break;
+ case GT_BOX:
+ break;
- default:
- assert(!"unexpected unary ExOp operator");
+ default:
+ assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->gtOp.gtOp1, op2->gtOp.gtOp1);
}
- if (kind & GTK_BINOP)
+ if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
@@ -1657,34 +1835,48 @@ AGAIN:
// these should be included in the hash code.
switch (oper)
{
- case GT_INTRINSIC:
- if (op1->gtIntrinsic.gtIntrinsicId != op2->gtIntrinsic.gtIntrinsicId) return false;
- break;
- case GT_LEA:
- if (op1->gtAddrMode.gtScale != op2->gtAddrMode.gtScale) return false;
- if (op1->gtAddrMode.gtOffset != op2->gtAddrMode.gtOffset) return false;
- break;
- case GT_INDEX:
- if (op1->gtIndex.gtIndElemSize != op2->gtIndex.gtIndElemSize) return false;
- break;
+ case GT_INTRINSIC:
+ if (op1->gtIntrinsic.gtIntrinsicId != op2->gtIntrinsic.gtIntrinsicId)
+ {
+ return false;
+ }
+ break;
+ case GT_LEA:
+ if (op1->gtAddrMode.gtScale != op2->gtAddrMode.gtScale)
+ {
+ return false;
+ }
+ if (op1->gtAddrMode.gtOffset != op2->gtAddrMode.gtOffset)
+ {
+ return false;
+ }
+ break;
+ case GT_INDEX:
+ if (op1->gtIndex.gtIndElemSize != op2->gtIndex.gtIndElemSize)
+ {
+ return false;
+ }
+ break;
// For the ones below no extra argument matters for comparison.
- case GT_QMARK:
- break;
+ case GT_QMARK:
+ break;
- default:
- assert(!"unexpected binary ExOp operator");
+ default:
+ assert(!"unexpected binary ExOp operator");
}
}
- if (op1->gtOp.gtOp2)
+ if (op1->gtOp.gtOp2)
{
- if (!Compare(op1->gtOp.gtOp1, op2->gtOp.gtOp1, swapOK))
+ if (!Compare(op1->gtOp.gtOp1, op2->gtOp.gtOp1, swapOK))
{
- if (swapOK && OperIsCommutative(oper) &&
- ((op1->gtOp.gtOp1->gtFlags | op1->gtOp.gtOp2->gtFlags | op2->gtOp.gtOp1->gtFlags | op2->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT) == 0)
+ if (swapOK && OperIsCommutative(oper) &&
+ ((op1->gtOp.gtOp1->gtFlags | op1->gtOp.gtOp2->gtFlags | op2->gtOp.gtOp1->gtFlags |
+ op2->gtOp.gtOp2->gtFlags) &
+ GTF_ALL_EFFECT) == 0)
{
- if (Compare(op1->gtOp.gtOp1, op2->gtOp.gtOp2, swapOK))
+ if (Compare(op1->gtOp.gtOp1, op2->gtOp.gtOp2, swapOK))
{
op1 = op1->gtOp.gtOp2;
op2 = op2->gtOp.gtOp1;
@@ -1706,8 +1898,14 @@ AGAIN:
op1 = op1->gtOp.gtOp1;
op2 = op2->gtOp.gtOp1;
- if (!op1) return (op2 == 0);
- if (!op2) return false;
+ if (!op1)
+ {
+ return (op2 == nullptr);
+ }
+ if (!op2)
+ {
+ return false;
+ }
goto AGAIN;
}
@@ -1715,94 +1913,110 @@ AGAIN:
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- if (op1->gtField.gtFldHnd != op2->gtField.gtFldHnd)
- break;
+ case GT_FIELD:
+ if (op1->gtField.gtFldHnd != op2->gtField.gtFldHnd)
+ {
+ break;
+ }
- op1 = op1->gtField.gtFldObj;
- op2 = op2->gtField.gtFldObj;
+ op1 = op1->gtField.gtFldObj;
+ op2 = op2->gtField.gtFldObj;
- if (op1 || op2)
- {
- if (op1 && op2)
- goto AGAIN;
- }
-
- return true;
+ if (op1 || op2)
+ {
+ if (op1 && op2)
+ {
+ goto AGAIN;
+ }
+ }
- case GT_CALL:
+ return true;
- if (op1->gtCall.gtCallType != op2->gtCall.gtCallType)
- return false;
+ case GT_CALL:
- if (op1->gtCall.gtCallType != CT_INDIRECT)
- {
- if (op1->gtCall.gtCallMethHnd != op2->gtCall.gtCallMethHnd)
+ if (op1->gtCall.gtCallType != op2->gtCall.gtCallType)
+ {
return false;
+ }
+
+ if (op1->gtCall.gtCallType != CT_INDIRECT)
+ {
+ if (op1->gtCall.gtCallMethHnd != op2->gtCall.gtCallMethHnd)
+ {
+ return false;
+ }
#ifdef FEATURE_READYTORUN_COMPILER
- if (op1->gtCall.gtEntryPoint.addr != op2->gtCall.gtEntryPoint.addr)
- return false;
+ if (op1->gtCall.gtEntryPoint.addr != op2->gtCall.gtEntryPoint.addr)
+ return false;
#endif
- }
- else
- {
- if (!Compare(op1->gtCall.gtCallAddr, op2->gtCall.gtCallAddr))
- return false;
- }
+ }
+ else
+ {
+ if (!Compare(op1->gtCall.gtCallAddr, op2->gtCall.gtCallAddr))
+ {
+ return false;
+ }
+ }
- if (Compare(op1->gtCall.gtCallLateArgs, op2->gtCall.gtCallLateArgs) &&
- Compare(op1->gtCall.gtCallArgs, op2->gtCall.gtCallArgs) &&
- Compare(op1->gtCall.gtControlExpr, op2->gtCall.gtControlExpr) &&
- Compare(op1->gtCall.gtCallObjp, op2->gtCall.gtCallObjp))
- return true;
- break;
+ if (Compare(op1->gtCall.gtCallLateArgs, op2->gtCall.gtCallLateArgs) &&
+ Compare(op1->gtCall.gtCallArgs, op2->gtCall.gtCallArgs) &&
+ Compare(op1->gtCall.gtControlExpr, op2->gtCall.gtControlExpr) &&
+ Compare(op1->gtCall.gtCallObjp, op2->gtCall.gtCallObjp))
+ {
+ return true;
+ }
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- if (op1->gtArrElem.gtArrRank != op2->gtArrElem.gtArrRank)
- return false;
+ if (op1->gtArrElem.gtArrRank != op2->gtArrElem.gtArrRank)
+ {
+ return false;
+ }
- // NOTE: gtArrElemSize may need to be handled
+ // NOTE: gtArrElemSize may need to be handled
- unsigned dim;
- for (dim = 0; dim < op1->gtArrElem.gtArrRank; dim++)
- {
- if (!Compare(op1->gtArrElem.gtArrInds[dim], op2->gtArrElem.gtArrInds[dim]))
- return false;
- }
+ unsigned dim;
+ for (dim = 0; dim < op1->gtArrElem.gtArrRank; dim++)
+ {
+ if (!Compare(op1->gtArrElem.gtArrInds[dim], op2->gtArrElem.gtArrInds[dim]))
+ {
+ return false;
+ }
+ }
- op1 = op1->gtArrElem.gtArrObj;
- op2 = op2->gtArrElem.gtArrObj;
- goto AGAIN;
+ op1 = op1->gtArrElem.gtArrObj;
+ op2 = op2->gtArrElem.gtArrObj;
+ goto AGAIN;
- case GT_ARR_OFFSET:
- if (op1->gtArrOffs.gtCurrDim != op2->gtArrOffs.gtCurrDim ||
- op1->gtArrOffs.gtArrRank != op2->gtArrOffs.gtArrRank)
- {
- return false;
- }
- return (Compare(op1->gtArrOffs.gtOffset, op2->gtArrOffs.gtOffset) &&
- Compare(op1->gtArrOffs.gtIndex, op2->gtArrOffs.gtIndex) &&
- Compare(op1->gtArrOffs.gtArrObj, op2->gtArrOffs.gtArrObj));
+ case GT_ARR_OFFSET:
+ if (op1->gtArrOffs.gtCurrDim != op2->gtArrOffs.gtCurrDim ||
+ op1->gtArrOffs.gtArrRank != op2->gtArrOffs.gtArrRank)
+ {
+ return false;
+ }
+ return (Compare(op1->gtArrOffs.gtOffset, op2->gtArrOffs.gtOffset) &&
+ Compare(op1->gtArrOffs.gtIndex, op2->gtArrOffs.gtIndex) &&
+ Compare(op1->gtArrOffs.gtArrObj, op2->gtArrOffs.gtArrObj));
- case GT_CMPXCHG:
- return Compare(op1->gtCmpXchg.gtOpLocation, op2->gtCmpXchg.gtOpLocation)
- && Compare(op1->gtCmpXchg.gtOpValue, op2->gtCmpXchg.gtOpValue)
- && Compare(op1->gtCmpXchg.gtOpComparand, op2->gtCmpXchg.gtOpComparand);
+ case GT_CMPXCHG:
+ return Compare(op1->gtCmpXchg.gtOpLocation, op2->gtCmpXchg.gtOpLocation) &&
+ Compare(op1->gtCmpXchg.gtOpValue, op2->gtCmpXchg.gtOpValue) &&
+ Compare(op1->gtCmpXchg.gtOpComparand, op2->gtCmpXchg.gtOpComparand);
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- return Compare(op1->gtBoundsChk.gtArrLen, op2->gtBoundsChk.gtArrLen)
- && Compare(op1->gtBoundsChk.gtIndex, op2->gtBoundsChk.gtIndex)
- && (op1->gtBoundsChk.gtThrowKind == op2->gtBoundsChk.gtThrowKind);
+ return Compare(op1->gtBoundsChk.gtArrLen, op2->gtBoundsChk.gtArrLen) &&
+ Compare(op1->gtBoundsChk.gtIndex, op2->gtBoundsChk.gtIndex) &&
+ (op1->gtBoundsChk.gtThrowKind == op2->gtBoundsChk.gtThrowKind);
- default:
- assert(!"unexpected operator");
+ default:
+ assert(!"unexpected operator");
}
return false;
@@ -1813,10 +2027,10 @@ AGAIN:
* Returns non-zero if the given tree contains a use of a local #lclNum.
*/
-bool Compiler::gtHasRef(GenTreePtr tree, ssize_t lclNum, bool defOnly)
+bool Compiler::gtHasRef(GenTreePtr tree, ssize_t lclNum, bool defOnly)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
AGAIN:
@@ -1829,37 +2043,43 @@ AGAIN:
/* Is this a constant node? */
- if (kind & GTK_CONST)
- return false;
+ if (kind & GTK_CONST)
+ {
+ return false;
+ }
/* Is this a leaf node? */
- if (kind & GTK_LEAF)
+ if (kind & GTK_LEAF)
{
- if (oper == GT_LCL_VAR)
+ if (oper == GT_LCL_VAR)
{
- if (tree->gtLclVarCommon.gtLclNum == (unsigned)lclNum)
+ if (tree->gtLclVarCommon.gtLclNum == (unsigned)lclNum)
{
- if (!defOnly)
+ if (!defOnly)
+ {
return true;
+ }
}
}
else if (oper == GT_RET_EXPR)
{
return gtHasRef(tree->gtRetExpr.gtInlineCandidate, lclNum, defOnly);
- }
+ }
return false;
}
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
- if (gtHasRef(tree->gtOp.gtOp1, lclNum, defOnly))
+ if (gtHasRef(tree->gtOp.gtOp1, lclNum, defOnly))
+ {
return true;
+ }
tree = tree->gtOp.gtOp2;
goto AGAIN;
@@ -1868,23 +2088,23 @@ AGAIN:
{
tree = tree->gtOp.gtOp1;
- if (!tree)
- return false;
+ if (!tree)
+ {
+ return false;
+ }
- if (kind & GTK_ASGOP)
+ if (kind & GTK_ASGOP)
{
// 'tree' is the gtOp1 of an assignment node. So we can handle
// the case where defOnly is either true or false.
- if (tree->gtOper == GT_LCL_VAR &&
- tree->gtLclVarCommon.gtLclNum == (unsigned)lclNum)
+ if (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == (unsigned)lclNum)
{
return true;
}
- else if (tree->gtOper == GT_FIELD &&
- lclNum == (ssize_t)tree->gtField.gtFldHnd)
+ else if (tree->gtOper == GT_FIELD && lclNum == (ssize_t)tree->gtField.gtFldHnd)
{
- return true;
+ return true;
}
}
@@ -1894,101 +2114,138 @@ AGAIN:
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- if (lclNum == (ssize_t)tree->gtField.gtFldHnd)
- {
- if (!defOnly)
- return true;
- }
+ case GT_FIELD:
+ if (lclNum == (ssize_t)tree->gtField.gtFldHnd)
+ {
+ if (!defOnly)
+ {
+ return true;
+ }
+ }
- tree = tree->gtField.gtFldObj;
- if (tree)
- goto AGAIN;
- break;
+ tree = tree->gtField.gtFldObj;
+ if (tree)
+ {
+ goto AGAIN;
+ }
+ break;
- case GT_CALL:
+ case GT_CALL:
- if (tree->gtCall.gtCallObjp)
- if (gtHasRef(tree->gtCall.gtCallObjp, lclNum, defOnly))
- return true;
+ if (tree->gtCall.gtCallObjp)
+ {
+ if (gtHasRef(tree->gtCall.gtCallObjp, lclNum, defOnly))
+ {
+ return true;
+ }
+ }
- if (tree->gtCall.gtCallArgs)
- if (gtHasRef(tree->gtCall.gtCallArgs, lclNum, defOnly))
- return true;
+ if (tree->gtCall.gtCallArgs)
+ {
+ if (gtHasRef(tree->gtCall.gtCallArgs, lclNum, defOnly))
+ {
+ return true;
+ }
+ }
- if (tree->gtCall.gtCallLateArgs)
- if (gtHasRef(tree->gtCall.gtCallLateArgs, lclNum, defOnly))
- return true;
+ if (tree->gtCall.gtCallLateArgs)
+ {
+ if (gtHasRef(tree->gtCall.gtCallLateArgs, lclNum, defOnly))
+ {
+ return true;
+ }
+ }
- if (tree->gtCall.gtCallLateArgs)
- if (gtHasRef(tree->gtCall.gtControlExpr, lclNum, defOnly))
- return true;
+ if (tree->gtCall.gtCallLateArgs)
+ {
+ if (gtHasRef(tree->gtCall.gtControlExpr, lclNum, defOnly))
+ {
+ return true;
+ }
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- // pinvoke-calli cookie is a constant, or constant indirection
- assert(tree->gtCall.gtCallCookie == NULL ||
- tree->gtCall.gtCallCookie->gtOper == GT_CNS_INT ||
- tree->gtCall.gtCallCookie->gtOper == GT_IND);
-
- tree = tree->gtCall.gtCallAddr;
- }
- else
- tree = NULL;
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ // pinvoke-calli cookie is a constant, or constant indirection
+ assert(tree->gtCall.gtCallCookie == nullptr || tree->gtCall.gtCallCookie->gtOper == GT_CNS_INT ||
+ tree->gtCall.gtCallCookie->gtOper == GT_IND);
- if (tree)
- goto AGAIN;
+ tree = tree->gtCall.gtCallAddr;
+ }
+ else
+ {
+ tree = nullptr;
+ }
- break;
+ if (tree)
+ {
+ goto AGAIN;
+ }
- case GT_ARR_ELEM:
- if (gtHasRef(tree->gtArrElem.gtArrObj, lclNum, defOnly))
- return true;
+ break;
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- if (gtHasRef(tree->gtArrElem.gtArrInds[dim], lclNum, defOnly))
+ case GT_ARR_ELEM:
+ if (gtHasRef(tree->gtArrElem.gtArrObj, lclNum, defOnly))
+ {
return true;
- }
+ }
- break;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ if (gtHasRef(tree->gtArrElem.gtArrInds[dim], lclNum, defOnly))
+ {
+ return true;
+ }
+ }
- case GT_ARR_OFFSET:
- if (gtHasRef(tree->gtArrOffs.gtOffset, lclNum, defOnly) ||
- gtHasRef(tree->gtArrOffs.gtIndex, lclNum, defOnly) ||
- gtHasRef(tree->gtArrOffs.gtArrObj, lclNum, defOnly))
- {
- return true;
- }
- break;
+ break;
- case GT_CMPXCHG:
- if (gtHasRef(tree->gtCmpXchg.gtOpLocation, lclNum, defOnly))
- return true;
- if (gtHasRef(tree->gtCmpXchg.gtOpValue, lclNum, defOnly))
- return true;
- if (gtHasRef(tree->gtCmpXchg.gtOpComparand, lclNum, defOnly))
- return true;
- break;
+ case GT_ARR_OFFSET:
+ if (gtHasRef(tree->gtArrOffs.gtOffset, lclNum, defOnly) ||
+ gtHasRef(tree->gtArrOffs.gtIndex, lclNum, defOnly) ||
+ gtHasRef(tree->gtArrOffs.gtArrObj, lclNum, defOnly))
+ {
+ return true;
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_CMPXCHG:
+ if (gtHasRef(tree->gtCmpXchg.gtOpLocation, lclNum, defOnly))
+ {
+ return true;
+ }
+ if (gtHasRef(tree->gtCmpXchg.gtOpValue, lclNum, defOnly))
+ {
+ return true;
+ }
+ if (gtHasRef(tree->gtCmpXchg.gtOpComparand, lclNum, defOnly))
+ {
+ return true;
+ }
+ break;
+
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- if (gtHasRef(tree->gtBoundsChk.gtArrLen, lclNum, defOnly))
- return true;
- if (gtHasRef(tree->gtBoundsChk.gtIndex, lclNum, defOnly))
- return true;
- break;
+ if (gtHasRef(tree->gtBoundsChk.gtArrLen, lclNum, defOnly))
+ {
+ return true;
+ }
+ if (gtHasRef(tree->gtBoundsChk.gtIndex, lclNum, defOnly))
+ {
+ return true;
+ }
+ break;
- default:
-#ifdef DEBUG
- gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ gtDispTree(tree);
#endif
- assert(!"unexpected operator");
+ assert(!"unexpected operator");
}
return false;
@@ -1996,29 +2253,28 @@ AGAIN:
struct AddrTakenDsc
{
- Compiler * comp;
- bool hasAddrTakenLcl;
+ Compiler* comp;
+ bool hasAddrTakenLcl;
};
/* static */
-Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTreePtr *pTree,
- fgWalkData *data)
-{
+Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTreePtr* pTree, fgWalkData* data)
+{
GenTreePtr tree = *pTree;
- Compiler * comp = data->compiler;
+ Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &comp->lvaTable[lclNum];
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &comp->lvaTable[lclNum];
if (varDsc->lvHasLdAddrOp || varDsc->lvAddrExposed)
{
- ((AddrTakenDsc *)data->pCallbackData)->hasAddrTakenLcl = true;
- return WALK_ABORT;
+ ((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
+ return WALK_ABORT;
}
}
-
+
return WALK_CONTINUE;
}
@@ -2028,42 +2284,36 @@ Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTreePtr *pTree,
* flag(s) set.
*/
-bool Compiler::gtHasLocalsWithAddrOp(GenTreePtr tree)
+bool Compiler::gtHasLocalsWithAddrOp(GenTreePtr tree)
{
- AddrTakenDsc desc;
-
+ AddrTakenDsc desc;
+
desc.comp = this;
desc.hasAddrTakenLcl = false;
-
- fgWalkTreePre(&tree,
- gtHasLocalsWithAddrOpCB,
- &desc);
+
+ fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
-}
+}
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
-inline
-unsigned genTreeHashAdd(unsigned old, unsigned add)
+inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
- return (old + old/2) ^ add;
+ return (old + old / 2) ^ add;
}
-inline
-unsigned genTreeHashAdd(unsigned old, void * add)
+inline unsigned genTreeHashAdd(unsigned old, void* add)
{
- return genTreeHashAdd(old, (unsigned) (size_t)add);
+ return genTreeHashAdd(old, (unsigned)(size_t)add);
}
-inline
-unsigned genTreeHashAdd(unsigned old, unsigned add1,
- unsigned add2)
+inline unsigned genTreeHashAdd(unsigned old, unsigned add1, unsigned add2)
{
- return (old + old/2) ^ add1 ^ add2;
+ return (old + old / 2) ^ add1 ^ add2;
}
/*****************************************************************************
@@ -2071,14 +2321,14 @@ unsigned genTreeHashAdd(unsigned old, unsigned add1,
* Given an arbitrary expression tree, compute a hash value for it.
*/
-unsigned Compiler::gtHashValue(GenTree * tree)
+unsigned Compiler::gtHashValue(GenTree* tree)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
- unsigned hash = 0;
+ unsigned hash = 0;
- GenTreePtr temp;
+ GenTreePtr temp;
AGAIN:
assert(tree);
@@ -2095,34 +2345,50 @@ AGAIN:
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
- size_t add;
+ size_t add;
switch (oper)
{
- case GT_LCL_VAR: add = tree->gtLclVar.gtLclNum; break;
- case GT_LCL_FLD: hash = genTreeHashAdd(hash, tree->gtLclFld.gtLclNum);
- add = tree->gtLclFld.gtLclOffs; break;
+ case GT_LCL_VAR:
+ add = tree->gtLclVar.gtLclNum;
+ break;
+ case GT_LCL_FLD:
+ hash = genTreeHashAdd(hash, tree->gtLclFld.gtLclNum);
+ add = tree->gtLclFld.gtLclOffs;
+ break;
- case GT_CNS_INT: add = (int)tree->gtIntCon.gtIconVal; break;
- case GT_CNS_LNG: add = (int)tree->gtLngCon.gtLconVal; break;
- case GT_CNS_DBL: add = (int)tree->gtDblCon.gtDconVal; break;
- case GT_CNS_STR: add = (int)tree->gtStrCon.gtSconCPX; break;
+ case GT_CNS_INT:
+ add = (int)tree->gtIntCon.gtIconVal;
+ break;
+ case GT_CNS_LNG:
+ add = (int)tree->gtLngCon.gtLconVal;
+ break;
+ case GT_CNS_DBL:
+ add = (int)tree->gtDblCon.gtDconVal;
+ break;
+ case GT_CNS_STR:
+ add = (int)tree->gtStrCon.gtSconCPX;
+ break;
- case GT_JMP: add = tree->gtVal.gtVal1; break;
+ case GT_JMP:
+ add = tree->gtVal.gtVal1;
+ break;
- default: add = 0; break;
+ default:
+ add = 0;
+ break;
}
- //narrowing cast, but for hashing.
+ // narrowing cast, but for hashing.
hash = genTreeHashAdd(hash, (unsigned)add);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
- GenTreePtr op1;
+ GenTreePtr op1;
if (kind & GTK_UNOP)
{
@@ -2135,40 +2401,43 @@ AGAIN:
// these should be included in the hash code.
switch (oper)
{
- case GT_ARR_LENGTH:
- hash += tree->gtArrLen.ArrLenOffset();
- break;
- case GT_CAST:
- hash ^= tree->gtCast.gtCastType;
- break;
- case GT_OBJ:
- hash ^= static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtObj.gtClass));
- break;
- case GT_INDEX:
- hash += tree->gtIndex.gtIndElemSize;
- break;
- case GT_ALLOCOBJ:
- hash = genTreeHashAdd(hash, static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtAllocObj.gtAllocObjClsHnd)));
- hash = genTreeHashAdd(hash, tree->gtAllocObj.gtNewHelper);
- break;
-
+ case GT_ARR_LENGTH:
+ hash += tree->gtArrLen.ArrLenOffset();
+ break;
+ case GT_CAST:
+ hash ^= tree->gtCast.gtCastType;
+ break;
+ case GT_OBJ:
+ hash ^= static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtObj.gtClass));
+ break;
+ case GT_INDEX:
+ hash += tree->gtIndex.gtIndElemSize;
+ break;
+ case GT_ALLOCOBJ:
+ hash = genTreeHashAdd(hash, static_cast<unsigned>(
+ reinterpret_cast<uintptr_t>(tree->gtAllocObj.gtAllocObjClsHnd)));
+ hash = genTreeHashAdd(hash, tree->gtAllocObj.gtNewHelper);
+ break;
+
// For the ones below no extra argument matters for comparison.
- case GT_BOX:
- break;
+ case GT_BOX:
+ break;
- default:
- assert(!"unexpected unary ExOp operator");
+ default:
+ assert(!"unexpected unary ExOp operator");
}
}
- if (!op1)
+ if (!op1)
+ {
goto DONE;
+ }
tree = op1;
goto AGAIN;
}
- if (kind & GTK_BINOP)
+ if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
@@ -2176,42 +2445,44 @@ AGAIN:
// these should be included in the hash code.
switch (oper)
{
- case GT_INTRINSIC:
- hash += tree->gtIntrinsic.gtIntrinsicId;
- break;
- case GT_LEA:
- hash += (tree->gtAddrMode.gtOffset << 3) + tree->gtAddrMode.gtScale;
- break;
+ case GT_INTRINSIC:
+ hash += tree->gtIntrinsic.gtIntrinsicId;
+ break;
+ case GT_LEA:
+ hash += (tree->gtAddrMode.gtOffset << 3) + tree->gtAddrMode.gtScale;
+ break;
// For the ones below no extra argument matters for comparison.
- case GT_ARR_INDEX:
- case GT_QMARK:
- case GT_INDEX:
- break;
+ case GT_ARR_INDEX:
+ case GT_QMARK:
+ case GT_INDEX:
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- hash += tree->gtSIMD.gtSIMDIntrinsicID;
- hash += tree->gtSIMD.gtSIMDBaseType;
- break;
+ case GT_SIMD:
+ hash += tree->gtSIMD.gtSIMDIntrinsicID;
+ hash += tree->gtSIMD.gtSIMDBaseType;
+ break;
#endif // FEATURE_SIMD
- default:
- assert(!"unexpected binary ExOp operator");
+ default:
+ assert(!"unexpected binary ExOp operator");
}
}
- op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
/* Is there a second sub-operand? */
- if (!op2)
+ if (!op2)
{
/* Special case: no sub-operands at all */
- if (!op1)
+ if (!op1)
+ {
goto DONE;
+ }
/* This is a unary operator */
@@ -2221,13 +2492,13 @@ AGAIN:
/* This is a binary operator */
- unsigned hsh1 = gtHashValue(op1);
+ unsigned hsh1 = gtHashValue(op1);
/* Special case: addition of two values */
- if (GenTree::OperIsCommutative(oper))
+ if (GenTree::OperIsCommutative(oper))
{
- unsigned hsh2 = gtHashValue(op2);
+ unsigned hsh2 = gtHashValue(op2);
/* Produce a hash that allows swapping the operands */
@@ -2244,89 +2515,97 @@ AGAIN:
}
/* See what kind of a special operator we have here */
- switch (tree->gtOper)
+ switch (tree->gtOper)
{
- case GT_FIELD:
- if (tree->gtField.gtFldObj)
- {
- temp = tree->gtField.gtFldObj; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- }
- break;
+ case GT_FIELD:
+ if (tree->gtField.gtFldObj)
+ {
+ temp = tree->gtField.gtFldObj;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ }
+ break;
- case GT_STMT:
- temp = tree->gtStmt.gtStmtExpr; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- break;
+ case GT_STMT:
+ temp = tree->gtStmt.gtStmtExpr;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrElem.gtArrObj));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrElem.gtArrObj));
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrElem.gtArrInds[dim]));
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrElem.gtArrInds[dim]));
+ }
- break;
+ break;
- case GT_ARR_OFFSET:
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtOffset));
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtIndex));
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtArrObj));
- break;
+ case GT_ARR_OFFSET:
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtOffset));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtIndex));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtArrOffs.gtArrObj));
+ break;
- case GT_CALL:
+ case GT_CALL:
- if (tree->gtCall.gtCallObjp && tree->gtCall.gtCallObjp->gtOper != GT_NOP)
- {
- temp = tree->gtCall.gtCallObjp; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- }
+ if (tree->gtCall.gtCallObjp && tree->gtCall.gtCallObjp->gtOper != GT_NOP)
+ {
+ temp = tree->gtCall.gtCallObjp;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ }
- if (tree->gtCall.gtCallArgs)
- {
- temp = tree->gtCall.gtCallArgs; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- }
+ if (tree->gtCall.gtCallArgs)
+ {
+ temp = tree->gtCall.gtCallArgs;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- temp = tree->gtCall.gtCallAddr; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- }
- else
- {
- hash = genTreeHashAdd(hash, tree->gtCall.gtCallMethHnd);
- }
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ temp = tree->gtCall.gtCallAddr;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ }
+ else
+ {
+ hash = genTreeHashAdd(hash, tree->gtCall.gtCallMethHnd);
+ }
- if (tree->gtCall.gtCallLateArgs)
- {
- temp = tree->gtCall.gtCallLateArgs; assert(temp);
- hash = genTreeHashAdd(hash, gtHashValue(temp));
- }
- break;
+ if (tree->gtCall.gtCallLateArgs)
+ {
+ temp = tree->gtCall.gtCallLateArgs;
+ assert(temp);
+ hash = genTreeHashAdd(hash, gtHashValue(temp));
+ }
+ break;
- case GT_CMPXCHG:
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpLocation));
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpValue));
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpComparand));
- break;
+ case GT_CMPXCHG:
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpLocation));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpValue));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtCmpXchg.gtOpComparand));
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtBoundsChk.gtArrLen));
- hash = genTreeHashAdd(hash, gtHashValue(tree->gtBoundsChk.gtIndex));
- hash = genTreeHashAdd(hash, tree->gtBoundsChk.gtThrowKind);
- break;
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtBoundsChk.gtArrLen));
+ hash = genTreeHashAdd(hash, gtHashValue(tree->gtBoundsChk.gtIndex));
+ hash = genTreeHashAdd(hash, tree->gtBoundsChk.gtThrowKind);
+ break;
- default:
-#ifdef DEBUG
- gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ gtDispTree(tree);
#endif
- assert(!"unexpected operator");
- break;
+ assert(!"unexpected operator");
+ break;
}
DONE:
@@ -2337,7 +2616,7 @@ DONE:
/*****************************************************************************
*
* Given an arbitrary expression tree, attempts to find the set of all local variables
- * referenced by the tree, and return them as "*result".
+ * referenced by the tree, and return them as "*result".
* If "findPtr" is null, this is a tracked variable set;
* if it is non-null, this is an "all var set."
* The "*result" value is valid only if the call returns "true." It may return "false"
@@ -2346,7 +2625,7 @@ DONE:
* If "findPtr" is non-NULL, and the expression contains a variable that can't be represented
* in an "all var set."
* If the expression accesses address-exposed variables.
- *
+ *
* If there
* are any indirections or global refs in the expression, the "*refsPtr" argument
* will be assigned the appropriate bit set based on the 'varRefKinds' type.
@@ -2355,20 +2634,21 @@ DONE:
* If we encounter an expression that is equal to *findPtr we set *findPtr
* to NULL.
*/
-bool Compiler::lvaLclVarRefs(GenTreePtr tree,
- GenTreePtr * findPtr,
- varRefKinds* refsPtr,
- void* result)
-{
- genTreeOps oper;
- unsigned kind;
- varRefKinds refs = VR_NONE;
- ALLVARSET_TP ALLVARSET_INIT_NOCOPY(allVars, AllVarSetOps::UninitVal());
- VARSET_TP VARSET_INIT_NOCOPY(trkdVars, VarSetOps::UninitVal());
+bool Compiler::lvaLclVarRefs(GenTreePtr tree, GenTreePtr* findPtr, varRefKinds* refsPtr, void* result)
+{
+ genTreeOps oper;
+ unsigned kind;
+ varRefKinds refs = VR_NONE;
+ ALLVARSET_TP ALLVARSET_INIT_NOCOPY(allVars, AllVarSetOps::UninitVal());
+ VARSET_TP VARSET_INIT_NOCOPY(trkdVars, VarSetOps::UninitVal());
if (findPtr)
+ {
AllVarSetOps::AssignNoCopy(this, allVars, AllVarSetOps::MakeEmpty(this));
+ }
else
+ {
VarSetOps::AssignNoCopy(this, trkdVars, VarSetOps::MakeEmpty(this));
+ }
AGAIN:
@@ -2377,7 +2657,10 @@ AGAIN:
/* Remember whether we've come across the expression we're looking for */
- if (findPtr && *findPtr == tree) *findPtr = NULL;
+ if (findPtr && *findPtr == tree)
+ {
+ *findPtr = nullptr;
+ }
/* Figure out what kind of a node we have */
@@ -2386,32 +2669,38 @@ AGAIN:
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
- if (oper == GT_LCL_VAR)
+ if (oper == GT_LCL_VAR)
{
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
/* Should we use the variable table? */
- if (findPtr)
+ if (findPtr)
{
if (lclNum >= lclMAX_ALLSET_TRACKED)
+ {
return false;
+ }
AllVarSetOps::AddElemD(this, allVars, lclNum);
}
else
{
assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
if (varDsc->lvTracked == false)
- return false;
+ {
+ return false;
+ }
// Don't deal with expressions with address-exposed variables.
if (varDsc->lvAddrExposed)
- return false;
+ {
+ return false;
+ }
VarSetOps::AddElemD(this, trkdVars, varDsc->lvVarIndex);
}
@@ -2423,9 +2712,13 @@ AGAIN:
overlapping) fields. So just treat them as indirect accesses */
if (varTypeIsGC(tree->TypeGet()))
+ {
refs = VR_IND_REF;
+ }
else
+ {
refs = VR_IND_SCL;
+ }
}
else if (oper == GT_CLS_VAR)
{
@@ -2443,27 +2736,31 @@ AGAIN:
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- if (oper == GT_IND)
+ if (oper == GT_IND)
{
- assert(tree->gtOp.gtOp2 == NULL);
+ assert(tree->gtOp.gtOp2 == nullptr);
/* Set the proper indirection bit */
if ((tree->gtFlags & GTF_IND_INVARIANT) == 0)
{
if (varTypeIsGC(tree->TypeGet()))
+ {
refs = VR_IND_REF;
+ }
else
+ {
refs = VR_IND_SCL;
+ }
// If the flag GTF_IND_TGTANYWHERE is set this indirection
// could also point at a global variable
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
- refs = varRefKinds( ((int) refs) | ((int) VR_GLB_VAR) );
+ refs = varRefKinds(((int)refs) | ((int)VR_GLB_VAR));
}
}
@@ -2479,16 +2776,20 @@ AGAIN:
//
if (tree->gtFlags & GTF_DONT_CSE)
{
- return false;
+ return false;
}
}
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
/* It's a binary operator */
- if (!lvaLclVarRefsAccum(tree->gtOp.gtOp1, findPtr, refsPtr, &allVars, &trkdVars)) return false;
+ if (!lvaLclVarRefsAccum(tree->gtOp.gtOp1, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
// Otherwise...
- tree = tree->gtOp.gtOp2; assert(tree);
+ tree = tree->gtOp.gtOp2;
+ assert(tree);
goto AGAIN;
}
else
@@ -2496,8 +2797,10 @@ AGAIN:
/* It's a unary (or nilary) operator */
tree = tree->gtOp.gtOp1;
- if (tree)
+ if (tree)
+ {
goto AGAIN;
+ }
lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
return true;
@@ -2506,88 +2809,109 @@ AGAIN:
switch (oper)
{
- case GT_ARR_ELEM:
- if (!lvaLclVarRefsAccum(tree->gtArrElem.gtArrObj, findPtr, refsPtr, &allVars, &trkdVars)) return false;
+ case GT_ARR_ELEM:
+ if (!lvaLclVarRefsAccum(tree->gtArrElem.gtArrObj, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- VARSET_TP VARSET_INIT_NOCOPY(tmpVs, VarSetOps::UninitVal());
- if (!lvaLclVarRefsAccum(tree->gtArrElem.gtArrInds[dim], findPtr, refsPtr, &allVars, &trkdVars)) return false;
- }
- lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
- return true;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ VARSET_TP VARSET_INIT_NOCOPY(tmpVs, VarSetOps::UninitVal());
+ if (!lvaLclVarRefsAccum(tree->gtArrElem.gtArrInds[dim], findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
+ }
+ lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
+ return true;
- case GT_ARR_OFFSET:
- if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtOffset, findPtr, refsPtr, &allVars, &trkdVars))
- return false;
- // Otherwise...
- if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtIndex, findPtr, refsPtr, &allVars, &trkdVars))
- return false;
- // Otherwise...
- if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtArrObj, findPtr, refsPtr, &allVars, &trkdVars))
- return false;
- // Otherwise...
- lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
- return true;
+ case GT_ARR_OFFSET:
+ if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtOffset, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
+ // Otherwise...
+ if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtIndex, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
+ // Otherwise...
+ if (!lvaLclVarRefsAccum(tree->gtArrOffs.gtArrObj, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
+ // Otherwise...
+ lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
+ return true;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
{
- if (!lvaLclVarRefsAccum(tree->gtBoundsChk.gtArrLen, findPtr, refsPtr, &allVars, &trkdVars)) return false;
+ if (!lvaLclVarRefsAccum(tree->gtBoundsChk.gtArrLen, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
// Otherwise...
- if (!lvaLclVarRefsAccum(tree->gtBoundsChk.gtIndex, findPtr, refsPtr, &allVars, &trkdVars)) return false;
+ if (!lvaLclVarRefsAccum(tree->gtBoundsChk.gtIndex, findPtr, refsPtr, &allVars, &trkdVars))
+ {
+ return false;
+ }
// Otherwise...
lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
return true;
}
- case GT_CALL:
- /* Allow calls to the Shared Static helper */
- if (IsSharedStaticHelper(tree))
- {
- *refsPtr = varRefKinds((*refsPtr) | VR_INVARIANT);
- lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
- return true;
- }
- break;
- default:
- break;
+ case GT_CALL:
+ /* Allow calls to the Shared Static helper */
+ if (IsSharedStaticHelper(tree))
+ {
+ *refsPtr = varRefKinds((*refsPtr) | VR_INVARIANT);
+ lvaLclVarRefsAccumIntoRes(findPtr, result, allVars, trkdVars);
+ return true;
+ }
+ break;
+ default:
+ break;
} // end switch (oper)
return false;
}
-bool Compiler::lvaLclVarRefsAccum(GenTreePtr tree,
- GenTreePtr * findPtr,
- varRefKinds * refsPtr,
- ALLVARSET_TP* allVars,
- VARSET_TP* trkdVars)
+bool Compiler::lvaLclVarRefsAccum(
+ GenTreePtr tree, GenTreePtr* findPtr, varRefKinds* refsPtr, ALLVARSET_TP* allVars, VARSET_TP* trkdVars)
{
if (findPtr)
{
ALLVARSET_TP ALLVARSET_INIT_NOCOPY(tmpVs, AllVarSetOps::UninitVal());
- if (!lvaLclVarRefs(tree, findPtr, refsPtr, &tmpVs)) return false;
+ if (!lvaLclVarRefs(tree, findPtr, refsPtr, &tmpVs))
+ {
+ return false;
+ }
// Otherwise...
AllVarSetOps::UnionD(this, *allVars, tmpVs);
}
else
{
VARSET_TP VARSET_INIT_NOCOPY(tmpVs, VarSetOps::UninitVal());
- if (!lvaLclVarRefs(tree, findPtr, refsPtr, &tmpVs)) return false;
+ if (!lvaLclVarRefs(tree, findPtr, refsPtr, &tmpVs))
+ {
+ return false;
+ }
// Otherwise...
VarSetOps::UnionD(this, *trkdVars, tmpVs);
}
return true;
}
-void Compiler::lvaLclVarRefsAccumIntoRes(GenTreePtr * findPtr,
- void* result,
- ALLVARSET_VALARG_TP allVars,
- VARSET_VALARG_TP trkdVars)
+void Compiler::lvaLclVarRefsAccumIntoRes(GenTreePtr* findPtr,
+ void* result,
+ ALLVARSET_VALARG_TP allVars,
+ VARSET_VALARG_TP trkdVars)
{
if (findPtr)
{
@@ -2607,17 +2931,15 @@ void Compiler::lvaLclVarRefsAccumIntoRes(GenTreePtr * findPtr,
*/
/* static */
-genTreeOps GenTree::ReverseRelop(genTreeOps relop)
-{
- static const
- genTreeOps reverseOps[] =
- {
- GT_NE, // GT_EQ
- GT_EQ, // GT_NE
- GT_GE, // GT_LT
- GT_GT, // GT_LE
- GT_LT, // GT_GE
- GT_LE, // GT_GT
+genTreeOps GenTree::ReverseRelop(genTreeOps relop)
+{
+ static const genTreeOps reverseOps[] = {
+ GT_NE, // GT_EQ
+ GT_EQ, // GT_NE
+ GT_GE, // GT_LT
+ GT_GT, // GT_LE
+ GT_LT, // GT_GE
+ GT_LE, // GT_GT
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
@@ -2640,17 +2962,15 @@ genTreeOps GenTree::ReverseRelop(genTreeOps relop)
*/
/* static */
-genTreeOps GenTree::SwapRelop(genTreeOps relop)
-{
- static const
- genTreeOps swapOps[] =
- {
- GT_EQ, // GT_EQ
- GT_NE, // GT_NE
- GT_GT, // GT_LT
- GT_GE, // GT_LE
- GT_LE, // GT_GE
- GT_LT, // GT_GT
+genTreeOps GenTree::SwapRelop(genTreeOps relop)
+{
+ static const genTreeOps swapOps[] = {
+ GT_EQ, // GT_EQ
+ GT_NE, // GT_NE
+ GT_GT, // GT_LT
+ GT_GE, // GT_LE
+ GT_LE, // GT_GE
+ GT_LT, // GT_GT
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
@@ -2672,9 +2992,9 @@ genTreeOps GenTree::SwapRelop(genTreeOps relop)
* Reverse the meaning of the given test condition.
*/
-GenTreePtr Compiler::gtReverseCond(GenTree * tree)
+GenTreePtr Compiler::gtReverseCond(GenTree* tree)
{
- if (tree->OperIsCompare())
+ if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
@@ -2683,7 +3003,9 @@ GenTreePtr Compiler::gtReverseCond(GenTree * tree)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->gtOp.gtOp1->TypeGet()))
+ {
tree->gtFlags ^= GTF_RELOP_NAN_UN;
+ }
}
else
{
@@ -2693,46 +3015,53 @@ GenTreePtr Compiler::gtReverseCond(GenTree * tree)
return tree;
}
-
/*****************************************************************************/
#ifdef DEBUG
-
-bool GenTree::gtIsValid64RsltMul()
+bool GenTree::gtIsValid64RsltMul()
{
if ((gtOper != GT_MUL) || !(gtFlags & GTF_MUL_64RSLT))
+ {
return false;
+ }
- GenTreePtr op1 = gtOp.gtOp1;
- GenTreePtr op2 = gtOp.gtOp2;
+ GenTreePtr op1 = gtOp.gtOp1;
+ GenTreePtr op2 = gtOp.gtOp2;
- if (TypeGet() != TYP_LONG ||
- op1->TypeGet() != TYP_LONG ||
- op2->TypeGet() != TYP_LONG)
+ if (TypeGet() != TYP_LONG || op1->TypeGet() != TYP_LONG || op2->TypeGet() != TYP_LONG)
+ {
return false;
+ }
if (gtOverflow())
+ {
return false;
+ }
// op1 has to be conv.i8(i4Expr)
- if ((op1->gtOper != GT_CAST) ||
- (genActualType(op1->CastFromType()) != TYP_INT))
+ if ((op1->gtOper != GT_CAST) || (genActualType(op1->CastFromType()) != TYP_INT))
+ {
return false;
+ }
// op2 has to be conv.i8(i4Expr)
- if ((op2->gtOper != GT_CAST) ||
- (genActualType(op2->CastFromType()) != TYP_INT))
+ if ((op2->gtOper != GT_CAST) || (genActualType(op2->CastFromType()) != TYP_INT))
+ {
return false;
+ }
// The signedness of both casts must be the same
- if (((op1->gtFlags & GTF_UNSIGNED) != 0) !=
- ((op2->gtFlags & GTF_UNSIGNED) != 0))
+ if (((op1->gtFlags & GTF_UNSIGNED) != 0) != ((op2->gtFlags & GTF_UNSIGNED) != 0))
+ {
return false;
+ }
// Do unsigned mul iff both the casts are unsigned
if (((op1->gtFlags & GTF_UNSIGNED) != 0) != ((gtFlags & GTF_UNSIGNED) != 0))
+ {
return false;
+ }
return true;
}
@@ -2744,37 +3073,39 @@ bool GenTree::gtIsValid64RsltMul()
* Figure out the evaluation order for a list of values.
*/
-unsigned Compiler::gtSetListOrder(GenTree *list, bool regs)
+unsigned Compiler::gtSetListOrder(GenTree* list, bool regs)
{
assert(list && list->IsList());
- unsigned level = 0;
- unsigned ftreg = 0;
- unsigned costSz = 0;
- unsigned costEx = 0;
+ unsigned level = 0;
+ unsigned ftreg = 0;
+ unsigned costSz = 0;
+ unsigned costEx = 0;
#if FEATURE_STACK_FP_X87
/* Save the current FP stack level since an argument list
* will implicitly pop the FP stack when pushing the argument */
- unsigned FPlvlSave = codeGen->genGetFPstkLevel();
+ unsigned FPlvlSave = codeGen->genGetFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- GenTreePtr next = list->gtOp.gtOp2;
+ GenTreePtr next = list->gtOp.gtOp2;
- if (next)
+ if (next)
{
- unsigned nxtlvl = gtSetListOrder(next, regs);
+ unsigned nxtlvl = gtSetListOrder(next, regs);
ftreg |= next->gtRsvdRegs;
- if (level < nxtlvl)
- level = nxtlvl;
+ if (level < nxtlvl)
+ {
+ level = nxtlvl;
+ }
costEx += next->gtCostEx;
costSz += next->gtCostSz;
}
- GenTreePtr op1 = list->gtOp.gtOp1;
- unsigned lvl = gtSetEvalOrder(op1);
+ GenTreePtr op1 = list->gtOp.gtOp1;
+ unsigned lvl = gtSetEvalOrder(op1);
#if FEATURE_STACK_FP_X87
/* restore the FP level */
@@ -2783,8 +3114,10 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool regs)
list->gtRsvdRegs = (regMaskSmall)(ftreg | op1->gtRsvdRegs);
- if (level < lvl)
- level = lvl;
+ if (level < lvl)
+ {
+ level = lvl;
+ }
if (op1->gtCostEx != 0)
{
@@ -2796,7 +3129,7 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool regs)
{
costSz += op1->gtCostSz;
#ifdef _TARGET_XARCH_
- if (regs) // push is smaller than mov to reg
+ if (regs) // push is smaller than mov to reg
#endif
{
costSz += 1;
@@ -2814,11 +3147,11 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool regs)
* mark the interior address computation nodes with the GTF_ADDRMODE_NO_CSE flag
* which prevents them from being considered for CSE's.
*
- * Furthermore this routine is a factoring of the logic used to walk down
+ * Furthermore this routine is a factoring of the logic used to walk down
* the child nodes of a GT_IND tree, similar to optParseArrayRef().
*
* Previously we had this logic repeated three times inside of gtSetEvalOrder().
- * Here we combine those three repeats into this routine and use the
+ * Here we combine those three repeats into this routine and use the
* bool constOnly to modify the behavior of this routine for the first call.
*
* The object here is to mark all of the interior GT_ADD's and GT_NOP's
@@ -2837,10 +3170,7 @@ unsigned Compiler::gtSetListOrder(GenTree *list, bool regs)
* case (which really seems like a bug) is very confusing.
*/
-void Compiler::gtWalkOp(GenTree * * op1WB,
- GenTree * * op2WB,
- GenTree * adr,
- bool constOnly)
+void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* adr, bool constOnly)
{
GenTreePtr op1 = *op1WB;
GenTreePtr op2 = *op2WB;
@@ -2849,8 +3179,7 @@ void Compiler::gtWalkOp(GenTree * * op1WB,
if (op1->gtOper == GT_COMMA)
{
op1EffectiveVal = op1->gtEffectiveVal();
- if ((op1EffectiveVal->gtOper == GT_ADD) &&
- (!op1EffectiveVal->gtOverflow()) &&
+ if ((op1EffectiveVal->gtOper == GT_ADD) && (!op1EffectiveVal->gtOverflow()) &&
(!constOnly || (op1EffectiveVal->gtOp.gtOp2->IsCnsIntOrI())))
{
op1 = op1EffectiveVal;
@@ -2858,17 +3187,17 @@ void Compiler::gtWalkOp(GenTree * * op1WB,
}
// Now we look for op1's with non-overflow GT_ADDs [of constants]
- while ((op1->gtOper == GT_ADD) &&
- (!op1->gtOverflow()) &&
- (!constOnly || (op1->gtOp.gtOp2->IsCnsIntOrI())))
+ while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->gtOp.gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (!constOnly) // TODO-Cleanup: It seems bizarre that this is !constOnly
+ if (!constOnly)
+ { // TODO-Cleanup: It seems bizarre that this is !constOnly
op2 = op1->gtOp.gtOp2;
+ }
op1 = op1->gtOp.gtOp1;
-
+
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
@@ -2883,8 +3212,7 @@ void Compiler::gtWalkOp(GenTree * * op1WB,
if (op1->gtOper == GT_COMMA)
{
op1EffectiveVal = op1->gtEffectiveVal();
- if ((op1EffectiveVal->gtOper == GT_ADD) &&
- (!op1EffectiveVal->gtOverflow()) &&
+ if ((op1EffectiveVal->gtOper == GT_ADD) && (!op1EffectiveVal->gtOverflow()) &&
(!constOnly || (op1EffectiveVal->gtOp.gtOp2->IsCnsIntOrI())))
{
op1 = op1EffectiveVal;
@@ -2892,7 +3220,9 @@ void Compiler::gtWalkOp(GenTree * * op1WB,
}
if (!constOnly && ((op2 == adr) || (!op2->IsCnsIntOrI())))
+ {
break;
+ }
}
*op1WB = op1;
@@ -2929,18 +3259,17 @@ GenTreePtr Compiler::gtWalkOpEffectiveVal(GenTreePtr op)
if (op->gtOper == GT_COMMA)
{
GenTreePtr opEffectiveVal = op->gtEffectiveVal();
- if ((opEffectiveVal->gtOper == GT_ADD) &&
- (!opEffectiveVal->gtOverflow()) &&
+ if ((opEffectiveVal->gtOper == GT_ADD) && (!opEffectiveVal->gtOverflow()) &&
(opEffectiveVal->gtOp.gtOp2->IsCnsIntOrI()))
{
op = opEffectiveVal;
}
}
- if ((op->gtOper != GT_ADD) ||
- op->gtOverflow() ||
- !op->gtOp.gtOp2->IsCnsIntOrI())
- break;
+ if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->gtOp.gtOp2->IsCnsIntOrI())
+ {
+ break;
+ }
op = op->gtOp.gtOp1;
}
@@ -2952,11 +3281,11 @@ GenTreePtr Compiler::gtWalkOpEffectiveVal(GenTreePtr op)
/*****************************************************************************
*
* Given a tree, set the gtCostEx and gtCostSz fields which
- * are used to measure the relative costs of the codegen of the tree
+ * are used to measure the relative costs of the codegen of the tree
*
*/
-void Compiler::gtPrepareCost(GenTree * tree)
+void Compiler::gtPrepareCost(GenTree* tree)
{
#if FEATURE_STACK_FP_X87
codeGen->genResetFPstkLevel();
@@ -2964,19 +3293,25 @@ void Compiler::gtPrepareCost(GenTree * tree)
gtSetEvalOrder(tree);
}
-bool Compiler::gtIsLikelyRegVar(GenTree * tree)
+bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
+ {
return false;
+ }
assert(tree->gtLclVar.gtLclNum < lvaTableCnt);
- LclVarDsc * varDsc = lvaTable + tree->gtLclVar.gtLclNum;
+ LclVarDsc* varDsc = lvaTable + tree->gtLclVar.gtLclNum;
if (varDsc->lvDoNotEnregister)
+ {
return false;
+ }
if (varDsc->lvRefCntWtd < (BB_UNITY_WEIGHT * 3))
+ {
return false;
+ }
#ifdef _TARGET_X86_
if (varTypeIsFloating(tree->TypeGet()))
@@ -3000,22 +3335,20 @@ bool Compiler::gtIsLikelyRegVar(GenTree * tree)
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
-bool
-Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
+bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
- bool canSwap = true;
+ bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
-
+
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
- if (canSwap &&
- (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
+ if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
@@ -3026,8 +3359,7 @@ Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
unsigned strictEffects = GTF_GLOB_EFFECT;
- if (canSwap &&
- (firstNode->gtFlags & strictEffects))
+ if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
@@ -3046,7 +3378,9 @@ Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->OperIsConst())
+ {
canSwap = false;
+ }
}
}
}
@@ -3076,9 +3410,9 @@ Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-unsigned Compiler::gtSetEvalOrder(GenTree * tree)
+unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
assert(tree->gtOper != GT_STMT);
@@ -3090,22 +3424,22 @@ unsigned Compiler::gtSetEvalOrder(GenTree * tree)
/* Is this a FP value? */
- bool isflt = varTypeIsFloating(tree->TypeGet());
- unsigned FPlvlSave;
+ bool isflt = varTypeIsFloating(tree->TypeGet());
+ unsigned FPlvlSave;
/* Figure out what kind of a node we have */
- genTreeOps oper = tree->OperGet();
- unsigned kind = tree->OperKind();
+ genTreeOps oper = tree->OperGet();
+ unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
- regMaskTP ftreg = RBM_NONE; // Set of registers that will be used by the subtree
- unsigned level;
- int costEx;
- int costSz;
+ regMaskTP ftreg = RBM_NONE; // Set of registers that will be used by the subtree
+ unsigned level;
+ int costEx;
+ int costSz;
- bool bRngChk;
+ bool bRngChk;
#ifdef DEBUG
costEx = -1;
@@ -3114,212 +3448,212 @@ unsigned Compiler::gtSetEvalOrder(GenTree * tree)
/* Is this a constant or a leaf node? */
- if (kind & (GTK_LEAF|GTK_CONST))
+ if (kind & (GTK_LEAF | GTK_CONST))
{
switch (oper)
{
- bool iconNeedsReloc;
+ bool iconNeedsReloc;
#ifdef _TARGET_ARM_
- case GT_CNS_LNG:
- costSz = 9;
- costEx = 4;
- goto COMMON_CNS;
+ case GT_CNS_LNG:
+ costSz = 9;
+ costEx = 4;
+ goto COMMON_CNS;
- case GT_CNS_STR:
- // Uses movw/movt
- costSz = 7;
- costEx = 3;
- goto COMMON_CNS;
-
- case GT_CNS_INT:
-
- // If the constant is a handle then it will need to have a relocation
- // applied to it.
- // Any constant that requires a reloc must use the movw/movt sequence
- //
- iconNeedsReloc = opts.compReloc && tree->IsIconHandle() && !tree->IsIconHandle(GTF_ICON_FIELD_HDL);
-
- if (iconNeedsReloc || !codeGen->validImmForInstr(INS_mov, tree->gtIntCon.gtIconVal))
- {
+ case GT_CNS_STR:
// Uses movw/movt
costSz = 7;
costEx = 3;
- }
- else if (((unsigned) tree->gtIntCon.gtIconVal) <= 0x00ff)
- {
- // mov Rd, <const8>
- costSz = 1;
- costEx = 1;
- }
- else
- {
- // Uses movw/mvn
- costSz = 3;
- costEx = 1;
- }
- goto COMMON_CNS;
+ goto COMMON_CNS;
-#elif defined _TARGET_XARCH_
+ case GT_CNS_INT:
- case GT_CNS_LNG:
- costSz = 10;
- costEx = 3;
- goto COMMON_CNS;
-
- case GT_CNS_STR:
- costSz = 4;
- costEx = 1;
- goto COMMON_CNS;
+ // If the constant is a handle then it will need to have a relocation
+ // applied to it.
+ // Any constant that requires a reloc must use the movw/movt sequence
+ //
+ iconNeedsReloc = opts.compReloc && tree->IsIconHandle() && !tree->IsIconHandle(GTF_ICON_FIELD_HDL);
- case GT_CNS_INT:
+ if (iconNeedsReloc || !codeGen->validImmForInstr(INS_mov, tree->gtIntCon.gtIconVal))
+ {
+ // Uses movw/movt
+ costSz = 7;
+ costEx = 3;
+ }
+ else if (((unsigned)tree->gtIntCon.gtIconVal) <= 0x00ff)
+ {
+ // mov Rd, <const8>
+ costSz = 1;
+ costEx = 1;
+ }
+ else
+ {
+ // Uses movw/mvn
+ costSz = 3;
+ costEx = 1;
+ }
+ goto COMMON_CNS;
- // If the constant is a handle then it will need to have a relocation
- // applied to it.
- // Any constant that requires a reloc must use the movw/movt sequence
- //
- iconNeedsReloc = opts.compReloc && tree->IsIconHandle() && !tree->IsIconHandle(GTF_ICON_FIELD_HDL);
+#elif defined _TARGET_XARCH_
- if (!iconNeedsReloc && (((signed char) tree->gtIntCon.gtIconVal) == tree->gtIntCon.gtIconVal))
- {
- costSz = 1;
- costEx = 1;
- }
-#if defined(_TARGET_AMD64_)
- else if (iconNeedsReloc || ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0))
- {
+ case GT_CNS_LNG:
costSz = 10;
costEx = 3;
- }
-#endif // _TARGET_AMD64_
- else
- {
+ goto COMMON_CNS;
+
+ case GT_CNS_STR:
costSz = 4;
costEx = 1;
- }
- goto COMMON_CNS;
+ goto COMMON_CNS;
+
+ case GT_CNS_INT:
+
+ // If the constant is a handle then it will need to have a relocation
+ // applied to it.
+ // Any constant that requires a reloc must use the movw/movt sequence
+ //
+ iconNeedsReloc = opts.compReloc && tree->IsIconHandle() && !tree->IsIconHandle(GTF_ICON_FIELD_HDL);
+
+ if (!iconNeedsReloc && (((signed char)tree->gtIntCon.gtIconVal) == tree->gtIntCon.gtIconVal))
+ {
+ costSz = 1;
+ costEx = 1;
+ }
+#if defined(_TARGET_AMD64_)
+ else if (iconNeedsReloc || ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0))
+ {
+ costSz = 10;
+ costEx = 3;
+ }
+#endif // _TARGET_AMD64_
+ else
+ {
+ costSz = 4;
+ costEx = 1;
+ }
+ goto COMMON_CNS;
#elif defined(_TARGET_ARM64_)
- case GT_CNS_LNG:
- case GT_CNS_STR:
- case GT_CNS_INT:
- // TODO-ARM64-NYI: Need cost estimates.
- costSz = 1;
- costEx = 1;
- goto COMMON_CNS;
+ case GT_CNS_LNG:
+ case GT_CNS_STR:
+ case GT_CNS_INT:
+ // TODO-ARM64-NYI: Need cost estimates.
+ costSz = 1;
+ costEx = 1;
+ goto COMMON_CNS;
#else
- case GT_CNS_LNG:
- case GT_CNS_STR:
- case GT_CNS_INT:
+ case GT_CNS_LNG:
+ case GT_CNS_STR:
+ case GT_CNS_INT:
#error "Unknown _TARGET_"
#endif
-COMMON_CNS:
- /*
- Note that some code below depends on constants always getting
- moved to be the second operand of a binary operator. This is
- easily accomplished by giving constants a level of 0, which
- we do on the next line. If you ever decide to change this, be
- aware that unless you make other arrangements for integer
- constants to be moved, stuff will break.
- */
+ COMMON_CNS:
+ /*
+ Note that some code below depends on constants always getting
+ moved to be the second operand of a binary operator. This is
+ easily accomplished by giving constants a level of 0, which
+ we do on the next line. If you ever decide to change this, be
+ aware that unless you make other arrangements for integer
+ constants to be moved, stuff will break.
+ */
- level = 0;
- break;
+ level = 0;
+ break;
- case GT_CNS_DBL:
- level = 0;
- /* We use fldz and fld1 to load 0.0 and 1.0, but all other */
- /* floating point constants are loaded using an indirection */
- if ((*((__int64 *)&(tree->gtDblCon.gtDconVal)) == 0) ||
- (*((__int64 *)&(tree->gtDblCon.gtDconVal)) == I64(0x3ff0000000000000)))
- {
- costEx = 1;
- costSz = 1;
- }
- else
- {
- costEx = IND_COST_EX;
- costSz = 4;
- }
- break;
-
- case GT_LCL_VAR:
- level = 1;
- if (gtIsLikelyRegVar(tree))
- {
- costEx = 1;
- costSz = 1;
- /* Sign-extend and zero-extend are more expensive to load */
- if (lvaTable[tree->gtLclVar.gtLclNum].lvNormalizeOnLoad())
+ case GT_CNS_DBL:
+ level = 0;
+ /* We use fldz and fld1 to load 0.0 and 1.0, but all other */
+ /* floating point constants are loaded using an indirection */
+ if ((*((__int64*)&(tree->gtDblCon.gtDconVal)) == 0) ||
+ (*((__int64*)&(tree->gtDblCon.gtDconVal)) == I64(0x3ff0000000000000)))
{
- costEx += 1;
- costSz += 1;
+ costEx = 1;
+ costSz = 1;
}
- }
- else
- {
- costEx = IND_COST_EX;
- costSz = 2;
- /* Sign-extend and zero-extend are more expensive to load */
- if (varTypeIsSmall(tree->TypeGet()))
+ else
{
- costEx += 1;
- costSz += 1;
+ costEx = IND_COST_EX;
+ costSz = 4;
+ }
+ break;
+
+ case GT_LCL_VAR:
+ level = 1;
+ if (gtIsLikelyRegVar(tree))
+ {
+ costEx = 1;
+ costSz = 1;
+ /* Sign-extend and zero-extend are more expensive to load */
+ if (lvaTable[tree->gtLclVar.gtLclNum].lvNormalizeOnLoad())
+ {
+ costEx += 1;
+ costSz += 1;
+ }
+ }
+ else
+ {
+ costEx = IND_COST_EX;
+ costSz = 2;
+ /* Sign-extend and zero-extend are more expensive to load */
+ if (varTypeIsSmall(tree->TypeGet()))
+ {
+ costEx += 1;
+ costSz += 1;
+ }
}
- }
#if defined(_TARGET_AMD64_)
- // increase costSz for floating point locals
- if (isflt)
- {
- costSz += 1;
- if (!gtIsLikelyRegVar(tree))
+ // increase costSz for floating point locals
+ if (isflt)
{
costSz += 1;
+ if (!gtIsLikelyRegVar(tree))
+ {
+ costSz += 1;
+ }
}
- }
#endif
#if CPU_LONG_USES_REGPAIR
- if (varTypeIsLong(tree->TypeGet()))
- {
- costEx *= 2; // Longs are twice as expensive
- costSz *= 2;
- }
+ if (varTypeIsLong(tree->TypeGet()))
+ {
+ costEx *= 2; // Longs are twice as expensive
+ costSz *= 2;
+ }
#endif
- break;
+ break;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
#ifdef _TARGET_ARM_
- // We generate movw/movt/ldr
- level = 1;
- costEx = 3 + IND_COST_EX; // 6
- costSz = 4 + 4 + 2; // 10
- break;
+ // We generate movw/movt/ldr
+ level = 1;
+ costEx = 3 + IND_COST_EX; // 6
+ costSz = 4 + 4 + 2; // 10
+ break;
#endif
- case GT_LCL_FLD:
- level = 1;
- costEx = IND_COST_EX;
- costSz = 4;
- if (varTypeIsSmall(tree->TypeGet()))
- {
- costEx += 1;
- costSz += 1;
- }
- break;
+ case GT_LCL_FLD:
+ level = 1;
+ costEx = IND_COST_EX;
+ costSz = 4;
+ if (varTypeIsSmall(tree->TypeGet()))
+ {
+ costEx += 1;
+ costSz += 1;
+ }
+ break;
- case GT_PHI_ARG:
- case GT_ARGPLACE:
- level = 0;
- costEx = 0;
- costSz = 0;
- break;
+ case GT_PHI_ARG:
+ case GT_ARGPLACE:
+ level = 0;
+ costEx = 0;
+ costSz = 0;
+ break;
- default:
- level = 1;
- costEx = 1;
- costSz = 1;
- break;
+ default:
+ level = 1;
+ costEx = 1;
+ costSz = 1;
+ break;
}
#if FEATURE_STACK_FP_X87
if (isflt && (oper != GT_PHI_ARG))
@@ -3332,13 +3666,13 @@ COMMON_CNS:
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- int lvlb; // preference for op2
- unsigned lvl2; // scratch variable
+ int lvlb; // preference for op2
+ unsigned lvl2; // scratch variable
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
costEx = 0;
costSz = 0;
@@ -3358,14 +3692,14 @@ COMMON_CNS:
{
assert(op2 == nullptr);
- level = 0;
+ level = 0;
goto DONE;
}
/* Is this a unary operator? */
- if (op2 == nullptr)
+ if (op2 == nullptr)
{
/* Process the operand of the operator */
@@ -3373,573 +3707,588 @@ COMMON_CNS:
costEx = 1;
costSz = 1;
- level = gtSetEvalOrder(op1);
+ level = gtSetEvalOrder(op1);
ftreg |= op1->gtRsvdRegs;
/* Special handling for some operators */
switch (oper)
{
- case GT_JTRUE:
- costEx = 2;
- costSz = 2;
- break;
+ case GT_JTRUE:
+ costEx = 2;
+ costSz = 2;
+ break;
- case GT_SWITCH:
- costEx = 10;
- costSz = 5;
- break;
+ case GT_SWITCH:
+ costEx = 10;
+ costSz = 5;
+ break;
- case GT_CAST:
+ case GT_CAST:
#if defined(_TARGET_ARM_)
- costEx = 1;
- costSz = 1;
- if (isflt || varTypeIsFloating(op1->TypeGet()))
- {
- costEx = 3;
- costSz = 4;
- }
+ costEx = 1;
+ costSz = 1;
+ if (isflt || varTypeIsFloating(op1->TypeGet()))
+ {
+ costEx = 3;
+ costSz = 4;
+ }
#elif defined(_TARGET_ARM64_)
- costEx = 1;
- costSz = 2;
- if (isflt || varTypeIsFloating(op1->TypeGet()))
- {
- costEx = 2;
- costSz = 4;
- }
+ costEx = 1;
+ costSz = 2;
+ if (isflt || varTypeIsFloating(op1->TypeGet()))
+ {
+ costEx = 2;
+ costSz = 4;
+ }
#elif defined(_TARGET_XARCH_)
- costEx = 1;
- costSz = 2;
+ costEx = 1;
+ costSz = 2;
- if (isflt || varTypeIsFloating(op1->TypeGet()))
- {
- /* cast involving floats always go through memory */
- costEx = IND_COST_EX * 2;
- costSz = 6;
+ if (isflt || varTypeIsFloating(op1->TypeGet()))
+ {
+ /* cast involving floats always go through memory */
+ costEx = IND_COST_EX * 2;
+ costSz = 6;
#if FEATURE_STACK_FP_X87
- if (isflt != varTypeIsFloating(op1->TypeGet()))
- {
- isflt ? codeGen->genIncrementFPstkLevel() // Cast from int to float
- : codeGen->genDecrementFPstkLevel(); // Cast from float to int
- }
+ if (isflt != varTypeIsFloating(op1->TypeGet()))
+ {
+ isflt ? codeGen->genIncrementFPstkLevel() // Cast from int to float
+ : codeGen->genDecrementFPstkLevel(); // Cast from float to int
+ }
#endif // FEATURE_STACK_FP_X87
- }
+ }
#else
#error "Unknown _TARGET_"
#endif
#if CPU_LONG_USES_REGPAIR
- if (varTypeIsLong(tree->TypeGet()))
- {
- if (varTypeIsUnsigned(tree->TypeGet()))
- {
- /* Cast to unsigned long */
- costEx += 1;
- costSz += 2;
- }
- else
+ if (varTypeIsLong(tree->TypeGet()))
{
- /* Cast to signed long is slightly more costly */
- costEx += 2;
- costSz += 3;
+ if (varTypeIsUnsigned(tree->TypeGet()))
+ {
+ /* Cast to unsigned long */
+ costEx += 1;
+ costSz += 2;
+ }
+ else
+ {
+ /* Cast to signed long is slightly more costly */
+ costEx += 2;
+ costSz += 3;
+ }
}
- }
#endif // CPU_LONG_USES_REGPAIR
- /* Overflow casts are a lot more expensive */
- if (tree->gtOverflow())
- {
- costEx += 6;
- costSz += 6;
- }
-
- break;
-
-
- case GT_LIST:
- case GT_NOP:
- costEx = 0;
- costSz = 0;
- break;
+ /* Overflow casts are a lot more expensive */
+ if (tree->gtOverflow())
+ {
+ costEx += 6;
+ costSz += 6;
+ }
- case GT_INTRINSIC:
- // GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
- // TODO: tune these costs target specific as some of these are
- // target intrinsics and would cost less to generate code.
- switch (tree->gtIntrinsic.gtIntrinsicId)
- {
- default:
- assert(!"missing case for gtIntrinsicId");
- costEx = 12;
- costSz = 12;
break;
- case CORINFO_INTRINSIC_Sin:
- case CORINFO_INTRINSIC_Cos:
- case CORINFO_INTRINSIC_Sqrt:
- case CORINFO_INTRINSIC_Cosh:
- case CORINFO_INTRINSIC_Sinh:
- case CORINFO_INTRINSIC_Tan:
- case CORINFO_INTRINSIC_Tanh:
- case CORINFO_INTRINSIC_Asin:
- case CORINFO_INTRINSIC_Acos:
- case CORINFO_INTRINSIC_Atan:
- case CORINFO_INTRINSIC_Atan2:
- case CORINFO_INTRINSIC_Log10:
- case CORINFO_INTRINSIC_Pow:
- case CORINFO_INTRINSIC_Exp:
- case CORINFO_INTRINSIC_Ceiling:
- case CORINFO_INTRINSIC_Floor:
- case CORINFO_INTRINSIC_Object_GetType:
- // Giving intrinsics a large fixed exectuion cost is because we'd like to CSE
- // them, even if they are implemented by calls. This is different from modeling
- // user calls since we never CSE user calls.
- costEx = 36;
- costSz = 4;
- break;
-
- case CORINFO_INTRINSIC_Abs:
- costEx = 5;
- costSz = 15;
+ case GT_LIST:
+ case GT_NOP:
+ costEx = 0;
+ costSz = 0;
break;
- case CORINFO_INTRINSIC_Round:
- costEx = 3;
- costSz = 4;
-#if FEATURE_STACK_FP_X87
- if (tree->TypeGet() == TYP_INT)
+ case GT_INTRINSIC:
+ // GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
+ // TODO: tune these costs target specific as some of these are
+ // target intrinsics and would cost less to generate code.
+ switch (tree->gtIntrinsic.gtIntrinsicId)
{
- // This is a special case to handle the following
- // optimization: conv.i4(round.d(d)) -> round.i(d)
- codeGen->genDecrementFPstkLevel();
- }
+ default:
+ assert(!"missing case for gtIntrinsicId");
+ costEx = 12;
+ costSz = 12;
+ break;
+
+ case CORINFO_INTRINSIC_Sin:
+ case CORINFO_INTRINSIC_Cos:
+ case CORINFO_INTRINSIC_Sqrt:
+ case CORINFO_INTRINSIC_Cosh:
+ case CORINFO_INTRINSIC_Sinh:
+ case CORINFO_INTRINSIC_Tan:
+ case CORINFO_INTRINSIC_Tanh:
+ case CORINFO_INTRINSIC_Asin:
+ case CORINFO_INTRINSIC_Acos:
+ case CORINFO_INTRINSIC_Atan:
+ case CORINFO_INTRINSIC_Atan2:
+ case CORINFO_INTRINSIC_Log10:
+ case CORINFO_INTRINSIC_Pow:
+ case CORINFO_INTRINSIC_Exp:
+ case CORINFO_INTRINSIC_Ceiling:
+ case CORINFO_INTRINSIC_Floor:
+ case CORINFO_INTRINSIC_Object_GetType:
+ // Giving intrinsics a large fixed exectuion cost is because we'd like to CSE
+ // them, even if they are implemented by calls. This is different from modeling
+ // user calls since we never CSE user calls.
+ costEx = 36;
+ costSz = 4;
+ break;
+
+ case CORINFO_INTRINSIC_Abs:
+ costEx = 5;
+ costSz = 15;
+ break;
+
+ case CORINFO_INTRINSIC_Round:
+ costEx = 3;
+ costSz = 4;
+#if FEATURE_STACK_FP_X87
+ if (tree->TypeGet() == TYP_INT)
+ {
+ // This is a special case to handle the following
+ // optimization: conv.i4(round.d(d)) -> round.i(d)
+ codeGen->genDecrementFPstkLevel();
+ }
#endif // FEATURE_STACK_FP_X87
+ break;
+ }
+ level++;
break;
- }
- level++;
- break;
- case GT_NOT:
- case GT_NEG:
- // We need to ensure that -x is evaluated before x or else
- // we get burned while adjusting genFPstkLevel in x*-x where
- // the rhs x is the last use of the enregsitered x.
- //
- // Even in the integer case we want to prefer to
- // evaluate the side without the GT_NEG node, all other things
- // being equal. Also a GT_NOT requires a scratch register
+ case GT_NOT:
+ case GT_NEG:
+ // We need to ensure that -x is evaluated before x or else
+ // we get burned while adjusting genFPstkLevel in x*-x where
+ // the rhs x is the last use of the enregsitered x.
+ //
+ // Even in the integer case we want to prefer to
+ // evaluate the side without the GT_NEG node, all other things
+ // being equal. Also a GT_NOT requires a scratch register
- level++;
- break;
+ level++;
+ break;
- case GT_ADDR:
+ case GT_ADDR:
#if FEATURE_STACK_FP_X87
- /* If the operand was floating point, pop the value from the stack */
+ /* If the operand was floating point, pop the value from the stack */
- if (varTypeIsFloating(op1->TypeGet()))
- {
- codeGen->genDecrementFPstkLevel();
- }
+ if (varTypeIsFloating(op1->TypeGet()))
+ {
+ codeGen->genDecrementFPstkLevel();
+ }
#endif // FEATURE_STACK_FP_X87
- costEx = 0;
- costSz = 1;
+ costEx = 0;
+ costSz = 1;
- // If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
- if (op1->OperGet() == GT_IND)
- {
- GenTreePtr indOp1 = op1->gtOp.gtOp1;
- costEx = indOp1->gtCostEx;
- costSz = indOp1->gtCostSz;
- }
- break;
+ // If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
+ if (op1->OperGet() == GT_IND)
+ {
+ GenTreePtr indOp1 = op1->gtOp.gtOp1;
+ costEx = indOp1->gtCostEx;
+ costSz = indOp1->gtCostSz;
+ }
+ break;
- case GT_ARR_LENGTH:
- level++;
+ case GT_ARR_LENGTH:
+ level++;
- /* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
- costEx = IND_COST_EX - 1;
- costSz = 2;
- break;
+ /* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
+ costEx = IND_COST_EX - 1;
+ costSz = 2;
+ break;
- case GT_MKREFANY:
- case GT_OBJ:
- // We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
- costEx = 2*IND_COST_EX;
- costSz = 2*2;
- break;
+ case GT_MKREFANY:
+ case GT_OBJ:
+ // We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
+ costEx = 2 * IND_COST_EX;
+ costSz = 2 * 2;
+ break;
- case GT_BOX:
- // We estimate the cost of a GT_BOX to be two stores (GT_INDs)
- costEx = 2*IND_COST_EX;
- costSz = 2*2;
- break;
+ case GT_BOX:
+ // We estimate the cost of a GT_BOX to be two stores (GT_INDs)
+ costEx = 2 * IND_COST_EX;
+ costSz = 2 * 2;
+ break;
- case GT_IND:
+ case GT_IND:
- /* An indirection should always have a non-zero level.
- * Only constant leaf nodes have level 0.
- */
+ /* An indirection should always have a non-zero level.
+ * Only constant leaf nodes have level 0.
+ */
- if (level == 0)
- level = 1;
+ if (level == 0)
+ {
+ level = 1;
+ }
- /* Indirections have a costEx of IND_COST_EX */
- costEx = IND_COST_EX;
- costSz = 2;
+ /* Indirections have a costEx of IND_COST_EX */
+ costEx = IND_COST_EX;
+ costSz = 2;
- /* If we have to sign-extend or zero-extend, bump the cost */
- if (varTypeIsSmall(tree->TypeGet()))
- {
- costEx += 1;
- costSz += 1;
- }
+ /* If we have to sign-extend or zero-extend, bump the cost */
+ if (varTypeIsSmall(tree->TypeGet()))
+ {
+ costEx += 1;
+ costSz += 1;
+ }
- if (isflt)
- {
+ if (isflt)
+ {
#if FEATURE_STACK_FP_X87
- /* Indirect loads of FP values push a new value on the FP stack */
- codeGen->genIncrementFPstkLevel();
+ /* Indirect loads of FP values push a new value on the FP stack */
+ codeGen->genIncrementFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- if (tree->TypeGet() == TYP_DOUBLE)
- costEx += 1;
+ if (tree->TypeGet() == TYP_DOUBLE)
+ {
+ costEx += 1;
+ }
#ifdef _TARGET_ARM_
- costSz += 2;
+ costSz += 2;
#endif // _TARGET_ARM_
- }
+ }
- /* Can we form an addressing mode with this indirection? */
+ /* Can we form an addressing mode with this indirection? */
- if (op1->gtOper == GT_ADD)
- {
- bool rev;
+ if (op1->gtOper == GT_ADD)
+ {
+ bool rev;
#if SCALED_ADDR_MODES
- unsigned mul;
+ unsigned mul;
#endif
- unsigned cns;
- GenTreePtr base;
- GenTreePtr idx;
-
- /* See if we can form a complex addressing mode? */
-
- GenTreePtr addr = op1;
- if (codeGen->genCreateAddrMode(addr, // address
- 0, // mode
- false, // fold
- RBM_NONE, // reg mask
- &rev, // reverse ops
- &base, // base addr
- &idx, // index val
+ unsigned cns;
+ GenTreePtr base;
+ GenTreePtr idx;
+
+ /* See if we can form a complex addressing mode? */
+
+ GenTreePtr addr = op1;
+ if (codeGen->genCreateAddrMode(addr, // address
+ 0, // mode
+ false, // fold
+ RBM_NONE, // reg mask
+ &rev, // reverse ops
+ &base, // base addr
+ &idx, // index val
#if SCALED_ADDR_MODES
- &mul, // scaling
+ &mul, // scaling
#endif
- &cns, // displacement
- true)) // don't generate code
- {
- // We can form a complex addressing mode, so mark each of the interior
- // nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
-
- addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
-#ifdef _TARGET_XARCH_
- // addrmodeCount is the count of items that we used to form
- // an addressing mode. The maximum value is 4 when we have
- // all of these: { base, idx, cns, mul }
- //
- unsigned addrmodeCount = 0;
- if (base)
+ &cns, // displacement
+ true)) // don't generate code
{
- costEx += base->gtCostEx;
- costSz += base->gtCostSz;
- addrmodeCount++;
- }
+ // We can form a complex addressing mode, so mark each of the interior
+ // nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
- if (idx)
- {
- costEx += idx->gtCostEx;
- costSz += idx->gtCostSz;
- addrmodeCount++;
- }
-
- if (cns)
- {
- if (((signed char)cns) == ((int)cns))
- costSz += 1;
- else
- costSz += 4;
- addrmodeCount++;
- }
- if (mul)
- {
- addrmodeCount++;
- }
- // When we form a complex addressing mode we can reduced the costs
- // associated with the interior GT_ADD and GT_LSH nodes:
- //
- // GT_ADD -- reduce this interior GT_ADD by (-3,-3)
- // / \ --
- // GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
- // / \ --
- // 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
- // / \ --
- // 'idx' 'mul'
- //
- if (addrmodeCount > 1)
- {
- // The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+#ifdef _TARGET_XARCH_
+ // addrmodeCount is the count of items that we used to form
+ // an addressing mode. The maximum value is 4 when we have
+ // all of these: { base, idx, cns, mul }
//
- addrmodeCount--;
+ unsigned addrmodeCount = 0;
+ if (base)
+ {
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
+ addrmodeCount++;
+ }
- GenTreePtr tmp = addr;
- while (addrmodeCount > 0)
+ if (idx)
{
- // decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining addrmodeCount
- tmp->SetCosts(tmp->gtCostEx - addrmodeCount, tmp->gtCostSz - addrmodeCount);
+ costEx += idx->gtCostEx;
+ costSz += idx->gtCostSz;
+ addrmodeCount++;
+ }
+ if (cns)
+ {
+ if (((signed char)cns) == ((int)cns))
+ {
+ costSz += 1;
+ }
+ else
+ {
+ costSz += 4;
+ }
+ addrmodeCount++;
+ }
+ if (mul)
+ {
+ addrmodeCount++;
+ }
+ // When we form a complex addressing mode we can reduced the costs
+ // associated with the interior GT_ADD and GT_LSH nodes:
+ //
+ // GT_ADD -- reduce this interior GT_ADD by (-3,-3)
+ // / \ --
+ // GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
+ // / \ --
+ // 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
+ // / \ --
+ // 'idx' 'mul'
+ //
+ if (addrmodeCount > 1)
+ {
+ // The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
+ //
addrmodeCount--;
- if (addrmodeCount > 0)
+
+ GenTreePtr tmp = addr;
+ while (addrmodeCount > 0)
{
- GenTreePtr tmpOp1 = tmp->gtOp.gtOp1;
- GenTreePtr tmpOp2 = tmp->gtGetOp2();
- assert(tmpOp2 != nullptr);
+ // decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
+ // addrmodeCount
+ tmp->SetCosts(tmp->gtCostEx - addrmodeCount, tmp->gtCostSz - addrmodeCount);
- if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
- {
- tmp = tmpOp1;
- }
- else if (tmpOp2->OperGet() == GT_LSH)
+ addrmodeCount--;
+ if (addrmodeCount > 0)
{
- tmp = tmpOp2;
- }
- else if (tmpOp1->OperGet() == GT_LSH)
- {
- tmp = tmpOp1;
- }
- else if (tmpOp2->OperGet() == GT_ADD)
- {
- tmp = tmpOp2;
- }
- else
- {
- // We can very rarely encounter a tree that has a GT_COMMA node
- // that is difficult to walk, so we just early out without decrementing.
- addrmodeCount = 0;
+ GenTreePtr tmpOp1 = tmp->gtOp.gtOp1;
+ GenTreePtr tmpOp2 = tmp->gtGetOp2();
+ assert(tmpOp2 != nullptr);
+
+ if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
+ {
+ tmp = tmpOp1;
+ }
+ else if (tmpOp2->OperGet() == GT_LSH)
+ {
+ tmp = tmpOp2;
+ }
+ else if (tmpOp1->OperGet() == GT_LSH)
+ {
+ tmp = tmpOp1;
+ }
+ else if (tmpOp2->OperGet() == GT_ADD)
+ {
+ tmp = tmpOp2;
+ }
+ else
+ {
+ // We can very rarely encounter a tree that has a GT_COMMA node
+ // that is difficult to walk, so we just early out without decrementing.
+ addrmodeCount = 0;
+ }
}
}
}
- }
#elif defined _TARGET_ARM_
- if (base)
- {
- costEx += base->gtCostEx;
- costSz += base->gtCostSz;
- if ((base->gtOper == GT_LCL_VAR) &&
- ((idx==NULL) || (cns==0)))
+ if (base)
{
- costSz -= 1;
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
+ if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
+ {
+ costSz -= 1;
+ }
}
- }
- if (idx)
- {
- costEx += idx->gtCostEx;
- costSz += idx->gtCostSz;
- if (mul > 0)
+ if (idx)
{
- costSz += 2;
+ costEx += idx->gtCostEx;
+ costSz += idx->gtCostSz;
+ if (mul > 0)
+ {
+ costSz += 2;
+ }
}
- }
- if (cns)
- {
- if (cns >= 128) // small offsets fits into a 16-bit instruction
+ if (cns)
{
- if (cns < 4096) // medium offsets require a 32-bit instruction
+ if (cns >= 128) // small offsets fits into a 16-bit instruction
{
- if (!isflt)
- costSz += 2;
- }
- else
- {
- costEx += 2; // Very large offsets require movw/movt instructions
- costSz += 8;
+ if (cns < 4096) // medium offsets require a 32-bit instruction
+ {
+ if (!isflt)
+ costSz += 2;
+ }
+ else
+ {
+ costEx += 2; // Very large offsets require movw/movt instructions
+ costSz += 8;
+ }
}
}
- }
#elif defined _TARGET_ARM64_
- if (base)
- {
- costEx += base->gtCostEx;
- costSz += base->gtCostSz;
- }
+ if (base)
+ {
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
+ }
- if (idx)
- {
- costEx += idx->gtCostEx;
- costSz += idx->gtCostSz;
- }
+ if (idx)
+ {
+ costEx += idx->gtCostEx;
+ costSz += idx->gtCostSz;
+ }
- if (cns != 0)
- {
- if (cns >= (4096 * genTypeSize(tree->TypeGet())))
+ if (cns != 0)
{
- costEx += 1;
- costSz += 4;
+ if (cns >= (4096 * genTypeSize(tree->TypeGet())))
+ {
+ costEx += 1;
+ costSz += 4;
+ }
}
- }
#else
#error "Unknown _TARGET_"
#endif
- assert(addr->gtOper == GT_ADD);
- assert(!addr->gtOverflow());
- assert(op2 == NULL);
- assert(mul != 1);
-
- // If we have an addressing mode, we have one of:
- // [base + cns]
- // [ idx * mul ] // mul >= 2, else we would use base instead of idx
- // [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
- // [base + idx * mul ] // mul can be 0, 2, 4, or 8
- // [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
- // Note that mul == 0 is semantically equivalent to mul == 1.
- // Note that cns can be zero.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ assert(addr->gtOper == GT_ADD);
+ assert(!addr->gtOverflow());
+ assert(op2 == nullptr);
+ assert(mul != 1);
+
+ // If we have an addressing mode, we have one of:
+ // [base + cns]
+ // [ idx * mul ] // mul >= 2, else we would use base instead of idx
+ // [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
+ // [base + idx * mul ] // mul can be 0, 2, 4, or 8
+ // [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
+ // Note that mul == 0 is semantically equivalent to mul == 1.
+ // Note that cns can be zero.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if SCALED_ADDR_MODES
- assert((base != NULL) || (idx != NULL && mul >= 2));
+ assert((base != nullptr) || (idx != nullptr && mul >= 2));
#else
- assert(base != NULL);
+ assert(base != NULL);
#endif
- INDEBUG(GenTreePtr op1Save = addr);
+ INDEBUG(GenTreePtr op1Save = addr);
- /* Walk addr looking for non-overflow GT_ADDs */
- gtWalkOp(&addr, &op2, base, false);
+ /* Walk addr looking for non-overflow GT_ADDs */
+ gtWalkOp(&addr, &op2, base, false);
- // addr and op2 are now children of the root GT_ADD of the addressing mode
- assert(addr != op1Save);
- assert(op2 != NULL);
+ // addr and op2 are now children of the root GT_ADD of the addressing mode
+ assert(addr != op1Save);
+ assert(op2 != nullptr);
- /* Walk addr looking for non-overflow GT_ADDs of constants */
- gtWalkOp(&addr, &op2, NULL, true);
+ /* Walk addr looking for non-overflow GT_ADDs of constants */
+ gtWalkOp(&addr, &op2, nullptr, true);
- // TODO-Cleanup: It seems very strange that we might walk down op2 now, even though the prior
- // call to gtWalkOp() may have altered op2.
+ // TODO-Cleanup: It seems very strange that we might walk down op2 now, even though the
+ // prior
+ // call to gtWalkOp() may have altered op2.
- /* Walk op2 looking for non-overflow GT_ADDs of constants */
- gtWalkOp(&op2, &addr, NULL, true);
+ /* Walk op2 looking for non-overflow GT_ADDs of constants */
+ gtWalkOp(&op2, &addr, nullptr, true);
- // OK we are done walking the tree
- // Now assert that addr and op2 correspond with base and idx
- // in one of the several acceptable ways.
+ // OK we are done walking the tree
+ // Now assert that addr and op2 correspond with base and idx
+ // in one of the several acceptable ways.
- // Note that sometimes addr/op2 is equal to idx/base
- // and other times addr/op2 is a GT_COMMA node with
- // an effective value that is idx/base
+ // Note that sometimes addr/op2 is equal to idx/base
+ // and other times addr/op2 is a GT_COMMA node with
+ // an effective value that is idx/base
- if (mul > 1)
- {
- if ((addr != base) && (addr->gtOper == GT_LSH))
+ if (mul > 1)
{
- addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (addr->gtOp.gtOp1->gtOper == GT_MUL)
+ if ((addr != base) && (addr->gtOper == GT_LSH))
{
- addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if (addr->gtOp.gtOp1->gtOper == GT_MUL)
+ {
+ addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ }
+ assert((base == nullptr) || (op2 == base) ||
+ (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
+ (gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
- assert((base == NULL) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
- (gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
- }
- else
- {
- assert(op2);
- assert(op2->gtOper == GT_LSH || op2->gtOper == GT_MUL);
- op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
- // We may have eliminated multiple shifts and multiplies in the addressing mode,
- // so navigate down through them to get to "idx".
- GenTreePtr op2op1 = op2->gtOp.gtOp1;
- while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
+ else
{
- op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
- op2op1 = op2op1->gtOp.gtOp1;
+ assert(op2);
+ assert(op2->gtOper == GT_LSH || op2->gtOper == GT_MUL);
+ op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ // We may have eliminated multiple shifts and multiplies in the addressing mode,
+ // so navigate down through them to get to "idx".
+ GenTreePtr op2op1 = op2->gtOp.gtOp1;
+ while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
+ {
+ op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ op2op1 = op2op1->gtOp.gtOp1;
+ }
+ assert(addr->gtEffectiveVal() == base);
+ assert(op2op1 == idx);
}
- assert(addr->gtEffectiveVal() == base);
- assert(op2op1 == idx);
}
- }
- else
- {
- assert(mul == 0);
-
- if ((addr == idx) || (addr->gtEffectiveVal() == idx))
+ else
{
- if (idx != NULL)
+ assert(mul == 0);
+
+ if ((addr == idx) || (addr->gtEffectiveVal() == idx))
{
- if ((addr->gtOper == GT_MUL) || (addr->gtOper == GT_LSH))
+ if (idx != nullptr)
{
- if ((addr->gtOp.gtOp1->gtOper == GT_NOP) ||
- (addr->gtOp.gtOp1->gtOper == GT_MUL && addr->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
+ if ((addr->gtOper == GT_MUL) || (addr->gtOper == GT_LSH))
{
- addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (addr->gtOp.gtOp1->gtOper == GT_MUL)
- addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if ((addr->gtOp.gtOp1->gtOper == GT_NOP) ||
+ (addr->gtOp.gtOp1->gtOper == GT_MUL &&
+ addr->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
+ {
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if (addr->gtOp.gtOp1->gtOper == GT_MUL)
+ {
+ addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ }
+ }
}
}
+ assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
- assert((op2 == base) || (op2->gtEffectiveVal() == base));
- }
- else if ((addr == base) || (addr->gtEffectiveVal() == base))
- {
- if (idx != NULL)
+ else if ((addr == base) || (addr->gtEffectiveVal() == base))
{
- assert(op2);
- if ((op2->gtOper == GT_MUL) || (op2->gtOper == GT_LSH))
+ if (idx != nullptr)
{
- if ((op2->gtOp.gtOp1->gtOper == GT_NOP) ||
- (op2->gtOp.gtOp1->gtOper == GT_MUL && op2->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
+ assert(op2);
+ if ((op2->gtOper == GT_MUL) || (op2->gtOper == GT_LSH))
{
- // assert(bRngChk);
- op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (op2->gtOp.gtOp1->gtOper == GT_MUL)
- op2->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if ((op2->gtOp.gtOp1->gtOper == GT_NOP) ||
+ (op2->gtOp.gtOp1->gtOper == GT_MUL &&
+ op2->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
+ {
+ // assert(bRngChk);
+ op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if (op2->gtOp.gtOp1->gtOper == GT_MUL)
+ {
+ op2->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ }
+ }
}
+ assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
- assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
+ }
+ else
+ {
+ // addr isn't base or idx. Is this possible? Or should there be an assert?
}
}
- else
- {
- // addr isn't base or idx. Is this possible? Or should there be an assert?
- }
- }
- goto DONE;
+ goto DONE;
- } // end if (genCreateAddrMode(...))
+ } // end if (genCreateAddrMode(...))
- } // end if (op1->gtOper == GT_ADD)
- else if (gtIsLikelyRegVar(op1))
- {
- /* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
- goto DONE;
- }
+ } // end if (op1->gtOper == GT_ADD)
+ else if (gtIsLikelyRegVar(op1))
+ {
+ /* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
+ goto DONE;
+ }
#ifdef _TARGET_XARCH_
- else if (op1->IsCnsIntOrI())
- {
- // Indirection of a CNS_INT, subtract 1 from costEx
- // makes costEx 3 for x86 and 4 for amd64
- //
- costEx += (op1->gtCostEx - 1);
- costSz += op1->gtCostSz;
- goto DONE;
- }
+ else if (op1->IsCnsIntOrI())
+ {
+ // Indirection of a CNS_INT, subtract 1 from costEx
+ // makes costEx 3 for x86 and 4 for amd64
+ //
+ costEx += (op1->gtCostEx - 1);
+ costSz += op1->gtCostSz;
+ goto DONE;
+ }
#endif
- break;
+ break;
- default:
- break;
+ default:
+ break;
}
- costEx += op1->gtCostEx;
- costSz += op1->gtCostSz;
+ costEx += op1->gtCostEx;
+ costSz += op1->gtCostSz;
goto DONE;
}
@@ -3963,147 +4312,152 @@ COMMON_CNS:
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
- }
+ }
#endif
switch (oper)
{
- case GT_MOD:
- case GT_UMOD:
+ case GT_MOD:
+ case GT_UMOD:
- /* Modulo by a power of 2 is easy */
+ /* Modulo by a power of 2 is easy */
- if (op2->IsCnsIntOrI())
- {
- size_t ival = op2->gtIntConCommon.IconValue();
+ if (op2->IsCnsIntOrI())
+ {
+ size_t ival = op2->gtIntConCommon.IconValue();
- if (ival > 0 && ival == genFindLowestBit(ival))
- break;
- }
+ if (ival > 0 && ival == genFindLowestBit(ival))
+ {
+ break;
+ }
+ }
- __fallthrough;
+ __fallthrough;
- case GT_DIV:
- case GT_UDIV:
+ case GT_DIV:
+ case GT_UDIV:
- if (isflt)
- {
- /* fp division is very expensive to execute */
- costEx = 36; // TYP_DOUBLE
- costSz += 3;
- }
- else
- {
- /* integer division is also very expensive */
- costEx = 20;
- costSz += 2;
+ if (isflt)
+ {
+ /* fp division is very expensive to execute */
+ costEx = 36; // TYP_DOUBLE
+ costSz += 3;
+ }
+ else
+ {
+ /* integer division is also very expensive */
+ costEx = 20;
+ costSz += 2;
- // Encourage the first operand to be evaluated (into EAX/EDX) first */
- lvlb -= 3;
+ // Encourage the first operand to be evaluated (into EAX/EDX) first */
+ lvlb -= 3;
#ifdef _TARGET_XARCH_
- // the idiv and div instruction requires EAX/EDX
- ftreg |= RBM_EAX|RBM_EDX;
+ // the idiv and div instruction requires EAX/EDX
+ ftreg |= RBM_EAX | RBM_EDX;
#endif
- }
- break;
+ }
+ break;
- case GT_MUL:
+ case GT_MUL:
- if (isflt)
- {
- /* FP multiplication instructions are more expensive */
- costEx += 4;
- costSz += 3;
- }
- else
- {
- /* Integer multiplication instructions are more expensive */
- costEx += 3;
- costSz += 2;
-
- if (tree->gtOverflow())
+ if (isflt)
{
- /* Overflow check are more expensive */
- costEx += 3;
+ /* FP multiplication instructions are more expensive */
+ costEx += 4;
costSz += 3;
}
-
-#ifdef _TARGET_X86_
- if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
+ else
{
- /* We use imulEAX for TYP_LONG and overflow multiplications */
- // Encourage the first operand to be evaluated (into EAX/EDX) first */
- lvlb -= 4;
+ /* Integer multiplication instructions are more expensive */
+ costEx += 3;
+ costSz += 2;
- // the imulEAX instruction ob x86 requires EDX:EAX
- ftreg |= (RBM_EAX|RBM_EDX);
+ if (tree->gtOverflow())
+ {
+ /* Overflow check are more expensive */
+ costEx += 3;
+ costSz += 3;
+ }
- /* The 64-bit imul instruction costs more */
- costEx += 4;
- }
-#endif // _TARGET_X86_
- }
- break;
+#ifdef _TARGET_X86_
+ if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
+ {
+ /* We use imulEAX for TYP_LONG and overflow multiplications */
+ // Encourage the first operand to be evaluated (into EAX/EDX) first */
+ lvlb -= 4;
- case GT_ADD:
- case GT_SUB:
- case GT_ASG_ADD:
- case GT_ASG_SUB:
+ // the imulEAX instruction ob x86 requires EDX:EAX
+ ftreg |= (RBM_EAX | RBM_EDX);
- if (isflt)
- {
- /* FP instructions are a bit more expensive */
- costEx += 4;
- costSz += 3;
+ /* The 64-bit imul instruction costs more */
+ costEx += 4;
+ }
+#endif // _TARGET_X86_
+ }
break;
- }
- /* Overflow check are more expensive */
- if (tree->gtOverflow())
- {
- costEx += 3;
- costSz += 3;
- }
- break;
+ case GT_ADD:
+ case GT_SUB:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ if (isflt)
+ {
+ /* FP instructions are a bit more expensive */
+ costEx += 4;
+ costSz += 3;
+ break;
+ }
- case GT_COMMA:
+ /* Overflow check are more expensive */
+ if (tree->gtOverflow())
+ {
+ costEx += 3;
+ costSz += 3;
+ }
+ break;
- /* Comma tosses the result of the left operand */
- gtSetEvalOrderAndRestoreFPstkLevel(op1);
- level = gtSetEvalOrder(op2);
+ case GT_COMMA:
- ftreg |= op1->gtRsvdRegs|op2->gtRsvdRegs;
+ /* Comma tosses the result of the left operand */
+ gtSetEvalOrderAndRestoreFPstkLevel(op1);
+ level = gtSetEvalOrder(op2);
- /* GT_COMMA cost is the sum of op1 and op2 costs */
- costEx = (op1->gtCostEx + op2->gtCostEx);
- costSz = (op1->gtCostSz + op2->gtCostSz);
+ ftreg |= op1->gtRsvdRegs | op2->gtRsvdRegs;
- goto DONE;
+ /* GT_COMMA cost is the sum of op1 and op2 costs */
+ costEx = (op1->gtCostEx + op2->gtCostEx);
+ costSz = (op1->gtCostSz + op2->gtCostSz);
+
+ goto DONE;
- case GT_COLON:
+ case GT_COLON:
- level = gtSetEvalOrderAndRestoreFPstkLevel(op1);
- lvl2 = gtSetEvalOrder(op2);
+ level = gtSetEvalOrderAndRestoreFPstkLevel(op1);
+ lvl2 = gtSetEvalOrder(op2);
- if (level < lvl2)
- level = lvl2;
- else if (level == lvl2)
- level += 1;
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ else if (level == lvl2)
+ {
+ level += 1;
+ }
- ftreg |= op1->gtRsvdRegs|op2->gtRsvdRegs;
- costEx = op1->gtCostEx + op2->gtCostEx;
- costSz = op1->gtCostSz + op2->gtCostSz;
+ ftreg |= op1->gtRsvdRegs | op2->gtRsvdRegs;
+ costEx = op1->gtCostEx + op2->gtCostEx;
+ costSz = op1->gtCostSz + op2->gtCostSz;
- goto DONE;
+ goto DONE;
- default:
- break;
+ default:
+ break;
}
/* Assignments need a bit of special handling */
- if (kind & GTK_ASGOP)
+ if (kind & GTK_ASGOP)
{
/* Process the target */
@@ -4113,7 +4467,7 @@ COMMON_CNS:
/* If assigning an FP value, the target won't get pushed */
- if (isflt && !tree->IsPhiDefn())
+ if (isflt && !tree->IsPhiDefn())
{
op1->gtFPlvl--;
codeGen->genDecrementFPstkLevel();
@@ -4126,14 +4480,16 @@ COMMON_CNS:
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
if (oper != GT_ASG)
+ {
ftreg |= op2->gtRsvdRegs;
+ }
/* Assignment to an enregistered LCL_VAR */
costEx = op2->gtCostEx;
- costSz = max(3, op2->gtCostSz); // 3 is an estimate for a reg-reg assignment
+ costSz = max(3, op2->gtCostSz); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
- else if (oper != GT_ASG)
+ else if (oper != GT_ASG)
{
// Assign-Op instructions read and write op1
//
@@ -4148,19 +4504,21 @@ COMMON_CNS:
/* Process the sub-operands */
- level = gtSetEvalOrder(op1);
+ level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
- level -= lvlb; // lvlb is negative, so this increases level
- lvlb = 0;
+ level -= lvlb; // lvlb is negative, so this increases level
+ lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
- lvl2 = gtSetEvalOrder(op2) + lvlb;
- ftreg |= op1->gtRsvdRegs;
+ lvl2 = gtSetEvalOrder(op2) + lvlb;
+ ftreg |= op1->gtRsvdRegs;
if (oper != GT_ASG)
+ {
ftreg |= op2->gtRsvdRegs;
+ }
costEx += (op1->gtCostEx + op2->gtCostEx);
costSz += (op1->gtCostSz + op2->gtCostSz);
@@ -4173,7 +4531,7 @@ COMMON_CNS:
assignments consume 1 value and don't produce anything.
*/
- if (isflt && !tree->IsPhiDefn())
+ if (isflt && !tree->IsPhiDefn())
{
assert(oper != GT_COMMA);
codeGen->genDecrementFPstkLevel();
@@ -4181,7 +4539,7 @@ COMMON_CNS:
#endif // FEATURE_STACK_FP_X87
bool bReverseInAssignment = false;
- if (kind & GTK_ASGOP)
+ if (kind & GTK_ASGOP)
{
GenTreePtr op1Val = op1;
@@ -4197,34 +4555,40 @@ COMMON_CNS:
switch (op1Val->gtOper)
{
- case GT_IND:
+ case GT_IND:
- // If we have any side effects on the GT_IND child node
- // we have to evaluate op1 first
- if (op1Val->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT)
- break;
+ // If we have any side effects on the GT_IND child node
+ // we have to evaluate op1 first
+ if (op1Val->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT)
+ {
+ break;
+ }
- // In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
- if (op2->gtFlags & GTF_ASG)
- break;
+ // In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
+ if (op2->gtFlags & GTF_ASG)
+ {
+ break;
+ }
- // If op2 is simple then evaluate op1 first
+ // If op2 is simple then evaluate op1 first
- if (op2->OperKind() & GTK_LEAF)
- break;
+ if (op2->OperKind() & GTK_LEAF)
+ {
+ break;
+ }
// fall through and set GTF_REVERSE_OPS
- case GT_LCL_VAR:
- case GT_LCL_FLD:
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
- // We evaluate op2 before op1
- bReverseInAssignment = true;
- tree->gtFlags |= GTF_REVERSE_OPS;
- break;
+ // We evaluate op2 before op1
+ bReverseInAssignment = true;
+ tree->gtFlags |= GTF_REVERSE_OPS;
+ break;
- default:
- break;
+ default:
+ break;
}
}
else if (kind & GTK_RELOP)
@@ -4232,22 +4596,23 @@ COMMON_CNS:
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
- if (varTypeIsFloating(op1->TypeGet()))
+ if (varTypeIsFloating(op1->TypeGet()))
{
#if FEATURE_STACK_FP_X87
codeGen->genDecrementFPstkLevel(2);
#endif // FEATURE_STACK_FP_X87
#ifdef _TARGET_XARCH_
- ftreg |= RBM_EAX;
+ ftreg |= RBM_EAX;
#endif
- level++; lvl2++;
+ level++;
+ lvl2++;
}
#if CPU_LONG_USES_REGPAIR
if (varTypeIsLong(op1->TypeGet()))
{
- costEx *= 2; // Longs are twice as expensive
+ costEx *= 2; // Longs are twice as expensive
costSz *= 2;
- }
+ }
#endif
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
@@ -4260,61 +4625,60 @@ COMMON_CNS:
switch (oper)
{
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
- /* Variable sized shifts are more expensive and use REG_SHIFT */
+ /* Variable sized shifts are more expensive and use REG_SHIFT */
- if (!op2->IsCnsIntOrI())
- {
- costEx += 3;
- if (REG_SHIFT != REG_NA)
+ if (!op2->IsCnsIntOrI())
{
- ftreg |= RBM_SHIFT;
- }
+ costEx += 3;
+ if (REG_SHIFT != REG_NA)
+ {
+ ftreg |= RBM_SHIFT;
+ }
#ifndef _TARGET_64BIT_
- // Variable sized LONG shifts require the use of a helper call
- //
- if (tree->gtType == TYP_LONG)
- {
- level += 5;
- lvl2 += 5;
- costEx += 3 * IND_COST_EX;
- costSz += 4;
- ftreg |= RBM_CALLEE_TRASH;
- }
+ // Variable sized LONG shifts require the use of a helper call
+ //
+ if (tree->gtType == TYP_LONG)
+ {
+ level += 5;
+ lvl2 += 5;
+ costEx += 3 * IND_COST_EX;
+ costSz += 4;
+ ftreg |= RBM_CALLEE_TRASH;
+ }
#endif // !_TARGET_64BIT_
+ }
+ break;
- }
- break;
+ case GT_INTRINSIC:
- case GT_INTRINSIC:
+ switch (tree->gtIntrinsic.gtIntrinsicId)
+ {
+ case CORINFO_INTRINSIC_Atan2:
+ case CORINFO_INTRINSIC_Pow:
+ // These math intrinsics are actually implemented by user calls.
+ // Increase the Sethi 'complexity' by two to reflect the argument
+ // register requirement.
+ level += 2;
+ break;
+ default:
+ assert(!"Unknown binary GT_INTRINSIC operator");
+ break;
+ }
- switch (tree->gtIntrinsic.gtIntrinsicId)
- {
- case CORINFO_INTRINSIC_Atan2:
- case CORINFO_INTRINSIC_Pow:
- // These math intrinsics are actually implemented by user calls.
- // Increase the Sethi 'complexity' by two to reflect the argument
- // register requirement.
- level += 2;
break;
+
default:
- assert(!"Unknown binary GT_INTRINSIC operator");
break;
- }
-
- break;
-
- default:
- break;
}
/* We need to evalutate constants later as many places in codegen
@@ -4324,16 +4688,15 @@ COMMON_CNS:
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
- if ((level == 0) && (level == lvl2) &&
- (op1->OperKind() & GTK_CONST) &&
+ if ((level == 0) && (level == lvl2) && (op1->OperKind() & GTK_CONST) &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
- bool tryToSwap;
- GenTreePtr opA,opB;
+ bool tryToSwap;
+ GenTreePtr opA, opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
@@ -4345,7 +4708,7 @@ COMMON_CNS:
opA = op1;
opB = op2;
}
-
+
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
@@ -4354,7 +4717,7 @@ COMMON_CNS:
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
- // so if possible it was set above.
+ // so if possible it was set above.
tryToSwap = false;
}
else
@@ -4367,106 +4730,104 @@ COMMON_CNS:
{
tryToSwap = (level < lvl2);
}
-
+
// Try to force extra swapping when in the stress mode:
- if (compStressCompile(STRESS_REVERSE_FLAG, 60) &&
- ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
- ((op2->OperKind() & GTK_CONST) == 0) )
+ if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
+ ((op2->OperKind() & GTK_CONST) == 0))
{
tryToSwap = true;
}
-
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
- if (canSwap)
+ if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- if (GenTree::SwapRelop(oper) != oper)
- {
- // SetOper will obliterate the VN for the underlying expression.
- // If we're in VN CSE phase, we don't want to lose that information,
- // so save the value numbers and put them back after the SetOper.
- ValueNumPair vnp = tree->gtVNPair;
- tree->SetOper(GenTree::SwapRelop(oper));
- if (optValnumCSE_phase)
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ if (GenTree::SwapRelop(oper) != oper)
{
- tree->gtVNPair = vnp;
+ // SetOper will obliterate the VN for the underlying expression.
+ // If we're in VN CSE phase, we don't want to lose that information,
+ // so save the value numbers and put them back after the SetOper.
+ ValueNumPair vnp = tree->gtVNPair;
+ tree->SetOper(GenTree::SwapRelop(oper));
+ if (optValnumCSE_phase)
+ {
+ tree->gtVNPair = vnp;
+ }
}
- }
- __fallthrough;
+ __fallthrough;
- case GT_ADD:
- case GT_MUL:
+ case GT_ADD:
+ case GT_MUL:
- case GT_OR:
- case GT_XOR:
- case GT_AND:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
- /* Swap the operands */
+ /* Swap the operands */
- tree->gtOp.gtOp1 = op2;
- tree->gtOp.gtOp2 = op1;
+ tree->gtOp.gtOp1 = op2;
+ tree->gtOp.gtOp2 = op1;
#if FEATURE_STACK_FP_X87
- /* We may have to recompute FP levels */
- if (op1->gtFPlvl || op2->gtFPlvl)
- gtFPstLvlRedo = true;
+ /* We may have to recompute FP levels */
+ if (op1->gtFPlvl || op2->gtFPlvl)
+ gtFPstLvlRedo = true;
#endif // FEATURE_STACK_FP_X87
- break;
+ break;
- case GT_QMARK:
- case GT_COLON:
- case GT_MKREFANY:
- break;
-
- case GT_LIST:
- break;
+ case GT_QMARK:
+ case GT_COLON:
+ case GT_MKREFANY:
+ break;
- case GT_SUB:
-#ifdef LEGACY_BACKEND
- // For LSRA we require that LclVars be "evaluated" just prior to their use,
- // so that if they must be reloaded, it is done at the right place.
- // This means that we allow reverse evaluation for all BINOPs.
- // (Note that this doesn't affect the order of the operands in the instruction).
- if (!isflt)
+ case GT_LIST:
break;
+
+ case GT_SUB:
+#ifdef LEGACY_BACKEND
+ // For LSRA we require that LclVars be "evaluated" just prior to their use,
+ // so that if they must be reloaded, it is done at the right place.
+ // This means that we allow reverse evaluation for all BINOPs.
+ // (Note that this doesn't affect the order of the operands in the instruction).
+ if (!isflt)
+ break;
#endif // LEGACY_BACKEND
- __fallthrough;
+ __fallthrough;
- default:
+ default:
- /* Mark the operand's evaluation order to be swapped */
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- tree->gtFlags &= ~GTF_REVERSE_OPS;
- }
- else
- {
- tree->gtFlags |= GTF_REVERSE_OPS;
- }
+ /* Mark the operand's evaluation order to be swapped */
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ tree->gtFlags &= ~GTF_REVERSE_OPS;
+ }
+ else
+ {
+ tree->gtFlags |= GTF_REVERSE_OPS;
+ }
#if FEATURE_STACK_FP_X87
- /* We may have to recompute FP levels */
- if (op1->gtFPlvl || op2->gtFPlvl)
- gtFPstLvlRedo = true;
+ /* We may have to recompute FP levels */
+ if (op1->gtFPlvl || op2->gtFPlvl)
+ gtFPstLvlRedo = true;
#endif // FEATURE_STACK_FP_X87
- break;
+ break;
}
}
}
@@ -4476,18 +4837,18 @@ COMMON_CNS:
{
unsigned tmpl;
- tmpl = level;
- level = lvl2;
- lvl2 = tmpl;
+ tmpl = level;
+ level = lvl2;
+ lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
- if (level < 1)
+ if (level < 1)
{
- level = lvl2;
+ level = lvl2;
}
- else if (level == lvl2)
+ else if (level == lvl2)
{
level += 1;
}
@@ -4497,238 +4858,264 @@ COMMON_CNS:
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
unsigned lvl2; // Scratch variable
- case GT_CALL:
+ case GT_CALL:
- assert(tree->gtFlags & GTF_CALL);
+ assert(tree->gtFlags & GTF_CALL);
- level = 0;
- costEx = 5;
- costSz = 2;
+ level = 0;
+ costEx = 5;
+ costSz = 2;
- /* Evaluate the 'this' argument, if present */
+ /* Evaluate the 'this' argument, if present */
- if (tree->gtCall.gtCallObjp)
- {
- GenTreePtr thisVal = tree->gtCall.gtCallObjp;
+ if (tree->gtCall.gtCallObjp)
+ {
+ GenTreePtr thisVal = tree->gtCall.gtCallObjp;
- lvl2 = gtSetEvalOrder(thisVal);
- if (level < lvl2) level = lvl2;
- costEx += thisVal->gtCostEx;
- costSz += thisVal->gtCostSz + 1;
- ftreg |= thisVal->gtRsvdRegs;
- }
+ lvl2 = gtSetEvalOrder(thisVal);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costEx += thisVal->gtCostEx;
+ costSz += thisVal->gtCostSz + 1;
+ ftreg |= thisVal->gtRsvdRegs;
+ }
- /* Evaluate the arguments, right to left */
+ /* Evaluate the arguments, right to left */
- if (tree->gtCall.gtCallArgs)
- {
+ if (tree->gtCall.gtCallArgs)
+ {
#if FEATURE_STACK_FP_X87
- FPlvlSave = codeGen->genGetFPstkLevel();
+ FPlvlSave = codeGen->genGetFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- lvl2 = gtSetListOrder(tree->gtCall.gtCallArgs, false);
- if (level < lvl2) level = lvl2;
- costEx += tree->gtCall.gtCallArgs->gtCostEx;
- costSz += tree->gtCall.gtCallArgs->gtCostSz;
- ftreg |= tree->gtCall.gtCallArgs->gtRsvdRegs;
+ lvl2 = gtSetListOrder(tree->gtCall.gtCallArgs, false);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costEx += tree->gtCall.gtCallArgs->gtCostEx;
+ costSz += tree->gtCall.gtCallArgs->gtCostSz;
+ ftreg |= tree->gtCall.gtCallArgs->gtRsvdRegs;
#if FEATURE_STACK_FP_X87
- codeGen->genResetFPstkLevel(FPlvlSave);
+ codeGen->genResetFPstkLevel(FPlvlSave);
#endif // FEATURE_STACK_FP_X87
- }
+ }
- /* Evaluate the temp register arguments list
- * This is a "hidden" list and its only purpose is to
- * extend the life of temps until we make the call */
+ /* Evaluate the temp register arguments list
+ * This is a "hidden" list and its only purpose is to
+ * extend the life of temps until we make the call */
- if (tree->gtCall.gtCallLateArgs)
- {
+ if (tree->gtCall.gtCallLateArgs)
+ {
#if FEATURE_STACK_FP_X87
- FPlvlSave = codeGen->genGetFPstkLevel();
+ FPlvlSave = codeGen->genGetFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- lvl2 = gtSetListOrder(tree->gtCall.gtCallLateArgs, true);
- if (level < lvl2) level = lvl2;
- costEx += tree->gtCall.gtCallLateArgs->gtCostEx;
- costSz += tree->gtCall.gtCallLateArgs->gtCostSz;
- ftreg |= tree->gtCall.gtCallLateArgs->gtRsvdRegs;
+ lvl2 = gtSetListOrder(tree->gtCall.gtCallLateArgs, true);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costEx += tree->gtCall.gtCallLateArgs->gtCostEx;
+ costSz += tree->gtCall.gtCallLateArgs->gtCostSz;
+ ftreg |= tree->gtCall.gtCallLateArgs->gtRsvdRegs;
#if FEATURE_STACK_FP_X87
- codeGen->genResetFPstkLevel(FPlvlSave);
+ codeGen->genResetFPstkLevel(FPlvlSave);
#endif // FEATURE_STACK_FP_X87
- }
-
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- // pinvoke-calli cookie is a constant, or constant indirection
- assert(tree->gtCall.gtCallCookie == NULL ||
- tree->gtCall.gtCallCookie->gtOper == GT_CNS_INT ||
- tree->gtCall.gtCallCookie->gtOper == GT_IND);
-
- GenTreePtr indirect = tree->gtCall.gtCallAddr;
+ }
- lvl2 = gtSetEvalOrder(indirect);
- if (level < lvl2) level = lvl2;
- costEx += indirect->gtCostEx + IND_COST_EX;
- costSz += indirect->gtCostSz;
- ftreg |= indirect->gtRsvdRegs;
- }
- else
- {
-#ifdef _TARGET_ARM_
- if ((tree->gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB)
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
{
- // We generate movw/movt/ldr
- costEx += (1 + IND_COST_EX);
- costSz += 8;
- if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
+ // pinvoke-calli cookie is a constant, or constant indirection
+ assert(tree->gtCall.gtCallCookie == nullptr || tree->gtCall.gtCallCookie->gtOper == GT_CNS_INT ||
+ tree->gtCall.gtCallCookie->gtOper == GT_IND);
+
+ GenTreePtr indirect = tree->gtCall.gtCallAddr;
+
+ lvl2 = gtSetEvalOrder(indirect);
+ if (level < lvl2)
{
- // Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
- costSz += 2;
+ level = lvl2;
}
+ costEx += indirect->gtCostEx + IND_COST_EX;
+ costSz += indirect->gtCostSz;
+ ftreg |= indirect->gtRsvdRegs;
}
- else if ((opts.eeFlags & CORJIT_FLG_PREJIT) == 0)
+ else
{
- costEx += 2;
- costSz += 6;
- }
- costSz += 2;
+#ifdef _TARGET_ARM_
+ if ((tree->gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB)
+ {
+ // We generate movw/movt/ldr
+ costEx += (1 + IND_COST_EX);
+ costSz += 8;
+ if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
+ {
+ // Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
+ costSz += 2;
+ }
+ }
+ else if ((opts.eeFlags & CORJIT_FLG_PREJIT) == 0)
+ {
+ costEx += 2;
+ costSz += 6;
+ }
+ costSz += 2;
#endif
#ifdef _TARGET_XARCH_
- costSz += 3;
+ costSz += 3;
#endif
- }
+ }
- level += 1;
+ level += 1;
- unsigned callKind; callKind = (tree->gtFlags & GTF_CALL_VIRT_KIND_MASK);
+ unsigned callKind;
+ callKind = (tree->gtFlags & GTF_CALL_VIRT_KIND_MASK);
- /* Virtual calls are a bit more expensive */
- if (callKind != GTF_CALL_NONVIRT)
- {
- costEx += 2 * IND_COST_EX;
- costSz += 2;
- }
+ /* Virtual calls are a bit more expensive */
+ if (callKind != GTF_CALL_NONVIRT)
+ {
+ costEx += 2 * IND_COST_EX;
+ costSz += 2;
+ }
- /* Virtual stub calls also must reserve the VIRTUAL_STUB_PARAM reg */
- if (callKind == GTF_CALL_VIRT_STUB)
- {
- ftreg |= RBM_VIRTUAL_STUB_PARAM;
- }
+ /* Virtual stub calls also must reserve the VIRTUAL_STUB_PARAM reg */
+ if (callKind == GTF_CALL_VIRT_STUB)
+ {
+ ftreg |= RBM_VIRTUAL_STUB_PARAM;
+ }
#ifdef FEATURE_READYTORUN_COMPILER
#ifdef _TARGET_ARM64_
- if (tree->gtCall.IsR2RRelativeIndir())
- {
- ftreg |= RBM_R2R_INDIRECT_PARAM;
- }
+ if (tree->gtCall.IsR2RRelativeIndir())
+ {
+ ftreg |= RBM_R2R_INDIRECT_PARAM;
+ }
#endif
#endif
#if GTF_CALL_REG_SAVE
- // Normally function calls don't preserve caller save registers
- // and thus are much more expensive.
- // However a few function calls do preserve these registers
- // such as the GC WriteBarrier helper calls.
+ // Normally function calls don't preserve caller save registers
+ // and thus are much more expensive.
+ // However a few function calls do preserve these registers
+ // such as the GC WriteBarrier helper calls.
- if (!(tree->gtFlags & GTF_CALL_REG_SAVE))
+ if (!(tree->gtFlags & GTF_CALL_REG_SAVE))
#endif
- {
- level += 5;
- costEx += 3 * IND_COST_EX;
- ftreg |= RBM_CALLEE_TRASH;
- }
+ {
+ level += 5;
+ costEx += 3 * IND_COST_EX;
+ ftreg |= RBM_CALLEE_TRASH;
+ }
#if FEATURE_STACK_FP_X87
- if (isflt) codeGen->genIncrementFPstkLevel();
+ if (isflt)
+ codeGen->genIncrementFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- break;
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- level = gtSetEvalOrder(tree->gtArrElem.gtArrObj);
- costEx = tree->gtArrElem.gtArrObj->gtCostEx;
- costSz = tree->gtArrElem.gtArrObj->gtCostSz;
+ level = gtSetEvalOrder(tree->gtArrElem.gtArrObj);
+ costEx = tree->gtArrElem.gtArrObj->gtCostEx;
+ costSz = tree->gtArrElem.gtArrObj->gtCostSz;
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- lvl2 = gtSetEvalOrder(tree->gtArrElem.gtArrInds[dim]);
- if (level < lvl2) level = lvl2;
- costEx += tree->gtArrElem.gtArrInds[dim]->gtCostEx;
- costSz += tree->gtArrElem.gtArrInds[dim]->gtCostSz;
- }
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ lvl2 = gtSetEvalOrder(tree->gtArrElem.gtArrInds[dim]);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costEx += tree->gtArrElem.gtArrInds[dim]->gtCostEx;
+ costSz += tree->gtArrElem.gtArrInds[dim]->gtCostSz;
+ }
#if FEATURE_STACK_FP_X87
- if (isflt) codeGen->genIncrementFPstkLevel();
+ if (isflt)
+ codeGen->genIncrementFPstkLevel();
#endif // FEATURE_STACK_FP_X87
- level += tree->gtArrElem.gtArrRank;
- costEx += 2 + (tree->gtArrElem.gtArrRank * (IND_COST_EX+1));
- costSz += 2 + (tree->gtArrElem.gtArrRank * 2);
- break;
+ level += tree->gtArrElem.gtArrRank;
+ costEx += 2 + (tree->gtArrElem.gtArrRank * (IND_COST_EX + 1));
+ costSz += 2 + (tree->gtArrElem.gtArrRank * 2);
+ break;
- case GT_ARR_OFFSET:
- level = gtSetEvalOrder(tree->gtArrOffs.gtOffset);
- costEx = tree->gtArrOffs.gtOffset->gtCostEx;
- costSz = tree->gtArrOffs.gtOffset->gtCostSz;
- lvl2 = gtSetEvalOrder(tree->gtArrOffs.gtIndex);
- level = max(level, lvl2);
- costEx += tree->gtArrOffs.gtIndex->gtCostEx;
- costSz += tree->gtArrOffs.gtIndex->gtCostSz;
- lvl2 = gtSetEvalOrder(tree->gtArrOffs.gtArrObj);
- level = max(level, lvl2);
- costEx += tree->gtArrOffs.gtArrObj->gtCostEx;
- costSz += tree->gtArrOffs.gtArrObj->gtCostSz;
- break;
+ case GT_ARR_OFFSET:
+ level = gtSetEvalOrder(tree->gtArrOffs.gtOffset);
+ costEx = tree->gtArrOffs.gtOffset->gtCostEx;
+ costSz = tree->gtArrOffs.gtOffset->gtCostSz;
+ lvl2 = gtSetEvalOrder(tree->gtArrOffs.gtIndex);
+ level = max(level, lvl2);
+ costEx += tree->gtArrOffs.gtIndex->gtCostEx;
+ costSz += tree->gtArrOffs.gtIndex->gtCostSz;
+ lvl2 = gtSetEvalOrder(tree->gtArrOffs.gtArrObj);
+ level = max(level, lvl2);
+ costEx += tree->gtArrOffs.gtArrObj->gtCostEx;
+ costSz += tree->gtArrOffs.gtArrObj->gtCostSz;
+ break;
- case GT_CMPXCHG:
+ case GT_CMPXCHG:
- level = gtSetEvalOrder(tree->gtCmpXchg.gtOpLocation);
- costSz = tree->gtCmpXchg.gtOpLocation->gtCostSz;
+ level = gtSetEvalOrder(tree->gtCmpXchg.gtOpLocation);
+ costSz = tree->gtCmpXchg.gtOpLocation->gtCostSz;
- lvl2 = gtSetEvalOrder(tree->gtCmpXchg.gtOpValue);
- if (level < lvl2) level = lvl2;
- costSz += tree->gtCmpXchg.gtOpValue->gtCostSz;
+ lvl2 = gtSetEvalOrder(tree->gtCmpXchg.gtOpValue);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costSz += tree->gtCmpXchg.gtOpValue->gtCostSz;
- lvl2 = gtSetEvalOrder(tree->gtCmpXchg.gtOpComparand);
- if (level < lvl2) level = lvl2;
- costSz += tree->gtCmpXchg.gtOpComparand->gtCostSz;
+ lvl2 = gtSetEvalOrder(tree->gtCmpXchg.gtOpComparand);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costSz += tree->gtCmpXchg.gtOpComparand->gtCostSz;
- costEx = MAX_COST; //Seriously, what could be more expensive than lock cmpxchg?
- costSz += 5; //size of lock cmpxchg [reg+C], reg
+ costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
+ costSz += 5; // size of lock cmpxchg [reg+C], reg
#ifdef _TARGET_XARCH_
- ftreg |= RBM_EAX; //cmpxchg must be evaluated into eax.
+ ftreg |= RBM_EAX; // cmpxchg must be evaluated into eax.
#endif
- break;
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
-#endif // FEATURE_SIMD
- costEx = 4; // cmp reg,reg and jae throw (not taken)
- costSz = 7; // jump to cold section
+ case GT_SIMD_CHK:
+#endif // FEATURE_SIMD
+ costEx = 4; // cmp reg,reg and jae throw (not taken)
+ costSz = 7; // jump to cold section
- level = gtSetEvalOrder(tree->gtBoundsChk.gtArrLen);
- costEx += tree->gtBoundsChk.gtArrLen->gtCostEx;
- costSz += tree->gtBoundsChk.gtArrLen->gtCostSz;
+ level = gtSetEvalOrder(tree->gtBoundsChk.gtArrLen);
+ costEx += tree->gtBoundsChk.gtArrLen->gtCostEx;
+ costSz += tree->gtBoundsChk.gtArrLen->gtCostSz;
- lvl2 = gtSetEvalOrder(tree->gtBoundsChk.gtIndex);
- if (level < lvl2) level = lvl2;
- costEx += tree->gtBoundsChk.gtIndex->gtCostEx;
- costSz += tree->gtBoundsChk.gtIndex->gtCostSz;
+ lvl2 = gtSetEvalOrder(tree->gtBoundsChk.gtIndex);
+ if (level < lvl2)
+ {
+ level = lvl2;
+ }
+ costEx += tree->gtBoundsChk.gtIndex->gtCostEx;
+ costSz += tree->gtBoundsChk.gtIndex->gtCostSz;
- break;
+ break;
- default:
-#ifdef DEBUG
- if (verbose)
- {
- printf("unexpected operator in this tree:\n");
- gtDispTree(tree);
- }
+ default:
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("unexpected operator in this tree:\n");
+ gtDispTree(tree);
+ }
#endif
- NO_WAY("unexpected operator");
+ NO_WAY("unexpected operator");
}
DONE:
@@ -4759,12 +5146,12 @@ DONE:
#if FEATURE_STACK_FP_X87
/*****************************************************************************/
-void Compiler::gtComputeFPlvls(GenTreePtr tree)
+void Compiler::gtComputeFPlvls(GenTreePtr tree)
{
- genTreeOps oper;
- unsigned kind;
- bool isflt;
- unsigned savFPstkLevel;
+ genTreeOps oper;
+ unsigned kind;
+ bool isflt;
+ unsigned savFPstkLevel;
noway_assert(tree);
noway_assert(tree->gtOper != GT_STMT);
@@ -4777,7 +5164,7 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
codeGen->genFPstkLevel += isflt;
goto DONE;
@@ -4785,69 +5172,69 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
/* Check for some special cases */
switch (oper)
{
- case GT_IND:
+ case GT_IND:
- gtComputeFPlvls(op1);
+ gtComputeFPlvls(op1);
- /* Indirect loads of FP values push a new value on the FP stack */
+ /* Indirect loads of FP values push a new value on the FP stack */
- codeGen->genFPstkLevel += isflt;
- goto DONE;
+ codeGen->genFPstkLevel += isflt;
+ goto DONE;
- case GT_CAST:
+ case GT_CAST:
- gtComputeFPlvls(op1);
+ gtComputeFPlvls(op1);
- /* Casts between non-FP and FP push on / pop from the FP stack */
+ /* Casts between non-FP and FP push on / pop from the FP stack */
- if (varTypeIsFloating(op1->TypeGet()))
- {
- if (isflt == false)
- codeGen->genFPstkLevel--;
- }
- else
- {
- if (isflt != false)
- codeGen->genFPstkLevel++;
- }
+ if (varTypeIsFloating(op1->TypeGet()))
+ {
+ if (isflt == false)
+ codeGen->genFPstkLevel--;
+ }
+ else
+ {
+ if (isflt != false)
+ codeGen->genFPstkLevel++;
+ }
- goto DONE;
+ goto DONE;
- case GT_LIST: /* GT_LIST presumably part of an argument list */
- case GT_COMMA: /* Comma tosses the result of the left operand */
+ case GT_LIST: /* GT_LIST presumably part of an argument list */
+ case GT_COMMA: /* Comma tosses the result of the left operand */
- savFPstkLevel = codeGen->genFPstkLevel;
- gtComputeFPlvls(op1);
- codeGen->genFPstkLevel = savFPstkLevel;
+ savFPstkLevel = codeGen->genFPstkLevel;
+ gtComputeFPlvls(op1);
+ codeGen->genFPstkLevel = savFPstkLevel;
- if (op2)
- gtComputeFPlvls(op2);
+ if (op2)
+ gtComputeFPlvls(op2);
- goto DONE;
+ goto DONE;
- default:
- break;
+ default:
+ break;
}
- if (!op1)
+ if (!op1)
{
- if (!op2)
+ if (!op2)
goto DONE;
gtComputeFPlvls(op2);
goto DONE;
}
- if (!op2)
+ if (!op2)
{
gtComputeFPlvls(op1);
if (oper == GT_ADDR)
@@ -4863,8 +5250,8 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
// This is a special case to handle the following
// optimization: conv.i4(round.d(d)) -> round.i(d)
- if (oper== GT_INTRINSIC && tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round &&
- tree->TypeGet()==TYP_INT)
+ if (oper == GT_INTRINSIC && tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round &&
+ tree->TypeGet() == TYP_INT)
{
codeGen->genFPstkLevel--;
}
@@ -4874,15 +5261,15 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
/* FP assignments need a bit special handling */
- if (isflt && (kind & GTK_ASGOP))
+ if (isflt && (kind & GTK_ASGOP))
{
/* The target of the assignment won't get pushed */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
gtComputeFPlvls(op2);
gtComputeFPlvls(op1);
- op1->gtFPlvl--;
+ op1->gtFPlvl--;
codeGen->genFPstkLevel--;
}
else
@@ -4899,7 +5286,7 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
/* Here we have a binary operator; visit operands in proper order */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
gtComputeFPlvls(op2);
gtComputeFPlvls(op1);
@@ -4915,14 +5302,14 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
assignments consume 1 value and don't produce any.
*/
- if (isflt)
+ if (isflt)
codeGen->genFPstkLevel--;
/* Float compares remove both operands from the FP stack */
- if (kind & GTK_RELOP)
+ if (kind & GTK_RELOP)
{
- if (varTypeIsFloating(op1->TypeGet()))
+ if (varTypeIsFloating(op1->TypeGet()))
codeGen->genFPstkLevel -= 2;
}
@@ -4931,65 +5318,65 @@ void Compiler::gtComputeFPlvls(GenTreePtr tree)
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_FIELD:
- gtComputeFPlvls(tree->gtField.gtFldObj);
- codeGen->genFPstkLevel += isflt;
- break;
+ case GT_FIELD:
+ gtComputeFPlvls(tree->gtField.gtFldObj);
+ codeGen->genFPstkLevel += isflt;
+ break;
- case GT_CALL:
+ case GT_CALL:
- if (tree->gtCall.gtCallObjp)
- gtComputeFPlvls(tree->gtCall.gtCallObjp);
+ if (tree->gtCall.gtCallObjp)
+ gtComputeFPlvls(tree->gtCall.gtCallObjp);
- if (tree->gtCall.gtCallArgs)
- {
- savFPstkLevel = codeGen->genFPstkLevel;
- gtComputeFPlvls(tree->gtCall.gtCallArgs);
- codeGen->genFPstkLevel = savFPstkLevel;
- }
+ if (tree->gtCall.gtCallArgs)
+ {
+ savFPstkLevel = codeGen->genFPstkLevel;
+ gtComputeFPlvls(tree->gtCall.gtCallArgs);
+ codeGen->genFPstkLevel = savFPstkLevel;
+ }
- if (tree->gtCall.gtCallLateArgs)
- {
- savFPstkLevel = codeGen->genFPstkLevel;
- gtComputeFPlvls(tree->gtCall.gtCallLateArgs);
- codeGen->genFPstkLevel = savFPstkLevel;
- }
+ if (tree->gtCall.gtCallLateArgs)
+ {
+ savFPstkLevel = codeGen->genFPstkLevel;
+ gtComputeFPlvls(tree->gtCall.gtCallLateArgs);
+ codeGen->genFPstkLevel = savFPstkLevel;
+ }
- codeGen->genFPstkLevel += isflt;
- break;
+ codeGen->genFPstkLevel += isflt;
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- gtComputeFPlvls(tree->gtArrElem.gtArrObj);
+ gtComputeFPlvls(tree->gtArrElem.gtArrObj);
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- gtComputeFPlvls(tree->gtArrElem.gtArrInds[dim]);
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ gtComputeFPlvls(tree->gtArrElem.gtArrInds[dim]);
- /* Loads of FP values push a new value on the FP stack */
- codeGen->genFPstkLevel += isflt;
- break;
+ /* Loads of FP values push a new value on the FP stack */
+ codeGen->genFPstkLevel += isflt;
+ break;
- case GT_CMPXCHG:
- //Evaluate the trees left to right
- gtComputeFPlvls(tree->gtCmpXchg.gtOpLocation);
- gtComputeFPlvls(tree->gtCmpXchg.gtOpValue);
- gtComputeFPlvls(tree->gtCmpXchg.gtOpComparand);
- noway_assert(!isflt);
- break;
+ case GT_CMPXCHG:
+ // Evaluate the trees left to right
+ gtComputeFPlvls(tree->gtCmpXchg.gtOpLocation);
+ gtComputeFPlvls(tree->gtCmpXchg.gtOpValue);
+ gtComputeFPlvls(tree->gtCmpXchg.gtOpComparand);
+ noway_assert(!isflt);
+ break;
- case GT_ARR_BOUNDS_CHECK:
- gtComputeFPlvls(tree->gtBoundsChk.gtArrLen);
- gtComputeFPlvls(tree->gtBoundsChk.gtIndex);
- noway_assert(!isflt);
- break;
+ case GT_ARR_BOUNDS_CHECK:
+ gtComputeFPlvls(tree->gtBoundsChk.gtArrLen);
+ gtComputeFPlvls(tree->gtBoundsChk.gtIndex);
+ noway_assert(!isflt);
+ break;
#ifdef DEBUG
- default:
- noway_assert(!"Unhandled special operator in gtComputeFPlvls()");
- break;
+ default:
+ noway_assert(!"Unhandled special operator in gtComputeFPlvls()");
+ break;
#endif
}
@@ -5002,7 +5389,6 @@ DONE:
#endif // FEATURE_STACK_FP_X87
-
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
@@ -5011,10 +5397,12 @@ DONE:
* to match the behavior of GetScaleIndexShf().
*/
-unsigned GenTree::GetScaleIndexMul()
+unsigned GenTree::GetScaleIndexMul()
{
- if (IsCnsIntOrI() && jitIsScaleIndexMul(gtIntConCommon.IconValue()) && gtIntConCommon.IconValue()!=1)
+ if (IsCnsIntOrI() && jitIsScaleIndexMul(gtIntConCommon.IconValue()) && gtIntConCommon.IconValue() != 1)
+ {
return (unsigned)gtIntConCommon.IconValue();
+ }
return 0;
}
@@ -5027,10 +5415,12 @@ unsigned GenTree::GetScaleIndexMul()
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
-unsigned GenTree::GetScaleIndexShf()
+unsigned GenTree::GetScaleIndexShf()
{
- if (IsCnsIntOrI() && jitIsScaleIndexShift(gtIntConCommon.IconValue()))
+ if (IsCnsIntOrI() && jitIsScaleIndexShift(gtIntConCommon.IconValue()))
+ {
return (unsigned)(1 << gtIntConCommon.IconValue());
+ }
return 0;
}
@@ -5042,25 +5432,27 @@ unsigned GenTree::GetScaleIndexShf()
* returned.
*/
-unsigned GenTree::GetScaledIndex()
+unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
- //
+ //
if (gtOp.gtOp1->IsCnsIntOrI())
+ {
return 0;
+ }
switch (gtOper)
{
- case GT_MUL:
- return gtOp.gtOp2->GetScaleIndexMul();
+ case GT_MUL:
+ return gtOp.gtOp2->GetScaleIndexMul();
- case GT_LSH:
- return gtOp.gtOp2->GetScaleIndexShf();
+ case GT_LSH:
+ return gtOp.gtOp2->GetScaleIndexShf();
- default:
- assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
- break;
+ default:
+ assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
+ break;
}
return 0;
@@ -5073,20 +5465,20 @@ unsigned GenTree::GetScaledIndex()
* "*addr" to the other argument.
*/
-bool GenTree::IsAddWithI32Const(GenTreePtr* addr, int* offset)
+bool GenTree::IsAddWithI32Const(GenTreePtr* addr, int* offset)
{
if (OperGet() == GT_ADD)
{
if (gtOp.gtOp1->IsIntCnsFitsInI32())
{
*offset = (int)gtOp.gtOp1->gtIntCon.gtIconVal;
- *addr = gtOp.gtOp2;
+ *addr = gtOp.gtOp2;
return true;
}
else if (gtOp.gtOp2->IsIntCnsFitsInI32())
{
*offset = (int)gtOp.gtOp2->gtIntCon.gtIconVal;
- *addr = gtOp.gtOp1;
+ *addr = gtOp.gtOp1;
return true;
}
}
@@ -5112,7 +5504,7 @@ bool GenTree::IsAddWithI32Const(GenTreePtr* addr, int* offset)
// Notes:
// Use Compiler::fgInsertTreeInListAfter() to insert a whole tree.
-void GenTree::InsertAfterSelf(GenTree* node, GenTreeStmt* stmt /* = nullptr */)
+void GenTree::InsertAfterSelf(GenTree* node, GenTreeStmt* stmt /* = nullptr */)
{
// statements have crazy requirements
assert(this->gtOper != GT_STMT);
@@ -5153,87 +5545,155 @@ void GenTree::InsertAfterSelf(GenTree* node, GenTreeStmt* stmt /* = n
// 'parent' must be non-null
//
// Notes:
-// When FEATURE_MULTIREG_ARGS is defined we can get here with GT_LDOBJ tree.
+// When FEATURE_MULTIREG_ARGS is defined we can get here with GT_LDOBJ tree.
// This happens when we have a struct that is passed in multiple registers.
//
-// Also note that when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined the GT_LDOBJ
+// Also note that when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined the GT_LDOBJ
// later gets converted to a GT_LIST with two GT_LCL_FLDs in Lower/LowerXArch.
//
-GenTreePtr* GenTree::gtGetChildPointer(GenTreePtr parent)
+GenTreePtr* GenTree::gtGetChildPointer(GenTreePtr parent)
{
switch (parent->OperGet())
{
- default:
- if (!parent->OperIsSimple()) return nullptr;
- if (this == parent->gtOp.gtOp1) return &(parent->gtOp.gtOp1);
- if (this == parent->gtOp.gtOp2) return &(parent->gtOp.gtOp2);
- break;
+ default:
+ if (!parent->OperIsSimple())
+ {
+ return nullptr;
+ }
+ if (this == parent->gtOp.gtOp1)
+ {
+ return &(parent->gtOp.gtOp1);
+ }
+ if (this == parent->gtOp.gtOp2)
+ {
+ return &(parent->gtOp.gtOp2);
+ }
+ break;
#if !FEATURE_MULTIREG_ARGS
- // Note that when FEATURE_MULTIREG_ARGS==1
+ // Note that when FEATURE_MULTIREG_ARGS==1
// a GT_OBJ node is handled above by the default case
- case GT_OBJ:
- // Any GT_OBJ with a field must be lowered before this point.
- noway_assert(!"GT_OBJ encountered in GenTree::gtGetChildPointer");
- break;
+ case GT_OBJ:
+ // Any GT_OBJ with a field must be lowered before this point.
+ noway_assert(!"GT_OBJ encountered in GenTree::gtGetChildPointer");
+ break;
#endif // !FEATURE_MULTIREG_ARGS
- case GT_CMPXCHG:
- if (this == parent->gtCmpXchg.gtOpLocation) return &(parent->gtCmpXchg.gtOpLocation);
- if (this == parent->gtCmpXchg.gtOpValue) return &(parent->gtCmpXchg.gtOpValue);
- if (this == parent->gtCmpXchg.gtOpComparand) return &(parent->gtCmpXchg.gtOpComparand);
- break;
+ case GT_CMPXCHG:
+ if (this == parent->gtCmpXchg.gtOpLocation)
+ {
+ return &(parent->gtCmpXchg.gtOpLocation);
+ }
+ if (this == parent->gtCmpXchg.gtOpValue)
+ {
+ return &(parent->gtCmpXchg.gtOpValue);
+ }
+ if (this == parent->gtCmpXchg.gtOpComparand)
+ {
+ return &(parent->gtCmpXchg.gtOpComparand);
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- if (this == parent->gtBoundsChk.gtArrLen) return &(parent->gtBoundsChk.gtArrLen);
- if (this == parent->gtBoundsChk.gtIndex) return &(parent->gtBoundsChk.gtIndex);
- if (this == parent->gtBoundsChk.gtIndRngFailBB) return &(parent->gtBoundsChk.gtIndRngFailBB);
- break;
+ if (this == parent->gtBoundsChk.gtArrLen)
+ {
+ return &(parent->gtBoundsChk.gtArrLen);
+ }
+ if (this == parent->gtBoundsChk.gtIndex)
+ {
+ return &(parent->gtBoundsChk.gtIndex);
+ }
+ if (this == parent->gtBoundsChk.gtIndRngFailBB)
+ {
+ return &(parent->gtBoundsChk.gtIndRngFailBB);
+ }
+ break;
- case GT_ARR_ELEM:
- if (this == parent->gtArrElem.gtArrObj) return &(parent->gtArrElem.gtArrObj);
- for (int i = 0; i < GT_ARR_MAX_RANK; i++)
- if (this == parent->gtArrElem.gtArrInds[i]) return &(parent->gtArrElem.gtArrInds[i]);
- break;
+ case GT_ARR_ELEM:
+ if (this == parent->gtArrElem.gtArrObj)
+ {
+ return &(parent->gtArrElem.gtArrObj);
+ }
+ for (int i = 0; i < GT_ARR_MAX_RANK; i++)
+ {
+ if (this == parent->gtArrElem.gtArrInds[i])
+ {
+ return &(parent->gtArrElem.gtArrInds[i]);
+ }
+ }
+ break;
- case GT_ARR_OFFSET:
- if (this == parent->gtArrOffs.gtOffset) return &(parent->gtArrOffs.gtOffset);
- if (this == parent->gtArrOffs.gtIndex) return &(parent->gtArrOffs.gtIndex);
- if (this == parent->gtArrOffs.gtArrObj) return &(parent->gtArrOffs.gtArrObj);
- break;
+ case GT_ARR_OFFSET:
+ if (this == parent->gtArrOffs.gtOffset)
+ {
+ return &(parent->gtArrOffs.gtOffset);
+ }
+ if (this == parent->gtArrOffs.gtIndex)
+ {
+ return &(parent->gtArrOffs.gtIndex);
+ }
+ if (this == parent->gtArrOffs.gtArrObj)
+ {
+ return &(parent->gtArrOffs.gtArrObj);
+ }
+ break;
- case GT_FIELD:
- if (this == parent->AsField()->gtFldObj) return &(parent->AsField()->gtFldObj);
- break;
+ case GT_FIELD:
+ if (this == parent->AsField()->gtFldObj)
+ {
+ return &(parent->AsField()->gtFldObj);
+ }
+ break;
- case GT_RET_EXPR:
- if (this == parent->gtRetExpr.gtInlineCandidate) return &(parent->gtRetExpr.gtInlineCandidate);
- break;
+ case GT_RET_EXPR:
+ if (this == parent->gtRetExpr.gtInlineCandidate)
+ {
+ return &(parent->gtRetExpr.gtInlineCandidate);
+ }
+ break;
- case GT_CALL:
+ case GT_CALL:
{
GenTreeCall* call = parent->AsCall();
- if (this == call->gtCallObjp) return &(call->gtCallObjp);
- if (this == call->gtCallArgs) return reinterpret_cast<GenTreePtr*>(&(call->gtCallArgs));
- if (this == call->gtCallLateArgs) return reinterpret_cast<GenTreePtr*>(&(call->gtCallLateArgs));
- if (this == call->gtControlExpr) return &(call->gtControlExpr);
+ if (this == call->gtCallObjp)
+ {
+ return &(call->gtCallObjp);
+ }
+ if (this == call->gtCallArgs)
+ {
+ return reinterpret_cast<GenTreePtr*>(&(call->gtCallArgs));
+ }
+ if (this == call->gtCallLateArgs)
+ {
+ return reinterpret_cast<GenTreePtr*>(&(call->gtCallLateArgs));
+ }
+ if (this == call->gtControlExpr)
+ {
+ return &(call->gtControlExpr);
+ }
if (call->gtCallType == CT_INDIRECT)
{
- if (this == call->gtCallCookie) return &(call->gtCallCookie);
- if (this == call->gtCallAddr) return &(call->gtCallAddr);
+ if (this == call->gtCallCookie)
+ {
+ return &(call->gtCallCookie);
+ }
+ if (this == call->gtCallAddr)
+ {
+ return &(call->gtCallAddr);
+ }
}
}
break;
- case GT_STMT:
- noway_assert(!"Illegal node for gtGetChildPointer()");
- unreached();
+ case GT_STMT:
+ noway_assert(!"Illegal node for gtGetChildPointer()");
+ unreached();
}
return nullptr;
@@ -5258,17 +5718,23 @@ GenTreePtr* GenTree::gtGetChildPointer(GenTreePtr parent)
// To enable the child to be replaced, it accepts an argument, parentChildPointer that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
-GenTreePtr GenTree::gtGetParent(GenTreePtr** parentChildPtrPtr)
+GenTreePtr GenTree::gtGetParent(GenTreePtr** parentChildPtrPtr)
{
// Find the parent node; it must be after this node in the execution order.
- GenTreePtr * parentChildPtr = nullptr;
- GenTreePtr parent;
+ GenTreePtr* parentChildPtr = nullptr;
+ GenTreePtr parent;
for (parent = gtNext; parent != nullptr; parent = parent->gtNext)
{
parentChildPtr = gtGetChildPointer(parent);
- if (parentChildPtr != nullptr) break;
+ if (parentChildPtr != nullptr)
+ {
+ break;
+ }
+ }
+ if (parentChildPtrPtr != nullptr)
+ {
+ *parentChildPtrPtr = parentChildPtr;
}
- if (parentChildPtrPtr != nullptr) *parentChildPtrPtr = parentChildPtr;
return parent;
}
@@ -5277,134 +5743,154 @@ GenTreePtr GenTree::gtGetParent(GenTreePtr** parentChildPtrPtr)
* Returns true if the given operator may cause an exception.
*/
-bool GenTree::OperMayThrow()
+bool GenTree::OperMayThrow()
{
- GenTreePtr op;
+ GenTreePtr op;
switch (gtOper)
{
- case GT_MOD:
- case GT_DIV:
- case GT_UMOD:
- case GT_UDIV:
+ case GT_MOD:
+ case GT_DIV:
+ case GT_UMOD:
+ case GT_UDIV:
- /* Division with a non-zero, non-minus-one constant does not throw an exception */
+ /* Division with a non-zero, non-minus-one constant does not throw an exception */
- op = gtOp.gtOp2;
+ op = gtOp.gtOp2;
- if (varTypeIsFloating(op->TypeGet()))
- return false; // Floating point division does not throw.
+ if (varTypeIsFloating(op->TypeGet()))
+ {
+ return false; // Floating point division does not throw.
+ }
- // For integers only division by 0 or by -1 can throw
- if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
- return false;
- return true;
+ // For integers only division by 0 or by -1 can throw
+ if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
+ {
+ return false;
+ }
+ return true;
- case GT_IND:
- op = gtOp.gtOp1;
+ case GT_IND:
+ op = gtOp.gtOp1;
- /* Indirections of handles are known to be safe */
- if (op->gtOper == GT_CNS_INT)
- {
- if (op->IsIconHandle())
+ /* Indirections of handles are known to be safe */
+ if (op->gtOper == GT_CNS_INT)
+ {
+ if (op->IsIconHandle())
+ {
+ /* No exception is thrown on this indirection */
+ return false;
+ }
+ }
+ if (this->gtFlags & GTF_IND_NONFAULTING)
+ {
+ return false;
+ }
+ // Non-Null AssertionProp will remove the GTF_EXCEPT flag and mark the GT_IND with GTF_ORDER_SIDEEFF flag
+ if ((this->gtFlags & GTF_ALL_EFFECT) == GTF_ORDER_SIDEEFF)
{
- /* No exception is thrown on this indirection */
return false;
}
- }
- if (this->gtFlags & GTF_IND_NONFAULTING)
- {
- return false;
- }
- // Non-Null AssertionProp will remove the GTF_EXCEPT flag and mark the GT_IND with GTF_ORDER_SIDEEFF flag
- if ((this->gtFlags & GTF_ALL_EFFECT) == GTF_ORDER_SIDEEFF)
- {
- return false;
- }
-
- return true;
- case GT_INTRINSIC:
- // If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
- // Report it as may throw.
- // Note: Some of the rest of the existing intrinsics could potentially throw an exception (for example
- // the array and string element access ones). They are handled differently than the GetType intrinsic
- // and are not marked with GTF_EXCEPT. If these are revisited at some point to be marked as GTF_EXCEPT,
- // the code below might need to be specialized to handle them properly.
- if ((this->gtFlags & GTF_EXCEPT) != 0)
- {
return true;
- }
- break;
+ case GT_INTRINSIC:
+ // If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
+ // Report it as may throw.
+ // Note: Some of the rest of the existing intrinsics could potentially throw an exception (for example
+ // the array and string element access ones). They are handled differently than the GetType intrinsic
+ // and are not marked with GTF_EXCEPT. If these are revisited at some point to be marked as
+ // GTF_EXCEPT,
+ // the code below might need to be specialized to handle them properly.
+ if ((this->gtFlags & GTF_EXCEPT) != 0)
+ {
+ return true;
+ }
+
+ break;
- case GT_OBJ:
- return !Compiler::fgIsIndirOfAddrOfLocal(this);
-
- case GT_ARR_BOUNDS_CHECK:
- case GT_ARR_ELEM:
- case GT_ARR_INDEX:
- case GT_CATCH_ARG:
- case GT_ARR_LENGTH:
- case GT_LCLHEAP:
- case GT_CKFINITE:
- case GT_NULLCHECK:
+ case GT_OBJ:
+ return !Compiler::fgIsIndirOfAddrOfLocal(this);
+
+ case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_ELEM:
+ case GT_ARR_INDEX:
+ case GT_CATCH_ARG:
+ case GT_ARR_LENGTH:
+ case GT_LCLHEAP:
+ case GT_CKFINITE:
+ case GT_NULLCHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- return true;
- default:
- break;
+ return true;
+ default:
+ break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
+ {
return true;
+ }
- return false;
+ return false;
}
#if DEBUGGABLE_GENTREE
// static
-GenTree::VtablePtr GenTree::s_vtablesForOpers[] = { NULL };
-GenTree::VtablePtr GenTree::s_vtableForOp = NULL;
+GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
+GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
- noway_assert (oper < GT_COUNT);
+ noway_assert(oper < GT_COUNT);
- if (s_vtablesForOpers[oper] != NULL) return s_vtablesForOpers[oper];
+ if (s_vtablesForOpers[oper] != nullptr)
+ {
+ return s_vtablesForOpers[oper];
+ }
// Otherwise...
- VtablePtr res = NULL;
+ VtablePtr res = nullptr;
switch (oper)
{
-#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
-#define GTSTRUCT_1(nm, tag) case tag: { GenTree##nm gt; res = *reinterpret_cast<VtablePtr*>(&gt); } break;
-#define GTSTRUCT_2(nm, tag, tag2) /*handle explicitly*/
-#define GTSTRUCT_3(nm, tag, tag2, tag3) /*handle explicitly*/
+#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
+#define GTSTRUCT_1(nm, tag) \
+ case tag: \
+ { \
+ GenTree##nm gt; \
+ res = *reinterpret_cast<VtablePtr*>(&gt); \
+ } \
+ break;
+#define GTSTRUCT_2(nm, tag, tag2) /*handle explicitly*/
+#define GTSTRUCT_3(nm, tag, tag2, tag3) /*handle explicitly*/
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) /*handle explicitly*/
-#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
+#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#include "gtstructs.h"
#if !FEATURE_EH_FUNCLETS
// If FEATURE_EH_FUNCLETS is set, then GT_JMP becomes the only member of Val, and will be handled above.
- case GT_END_LFIN:
- case GT_JMP:
- { GenTreeVal gt(GT_JMP, TYP_INT, 0); res = *reinterpret_cast<VtablePtr*>(&gt); break; }
+ case GT_END_LFIN:
+ case GT_JMP:
+ {
+ GenTreeVal gt(GT_JMP, TYP_INT, 0);
+ res = *reinterpret_cast<VtablePtr*>(&gt);
+ break;
+ }
#endif
- default:
+ default:
{
// Should be unary or binary op.
- if (s_vtableForOp == NULL)
+ if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
- Compiler* comp = (Compiler*)_alloca(sizeof(Compiler));
+ Compiler* comp = (Compiler*)_alloca(sizeof(Compiler));
GenTreeIntCon dummyOp(TYP_INT, 0);
- GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? NULL : &dummyOp));
+ GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(&gt);
}
res = s_vtableForOp;
@@ -5421,26 +5907,24 @@ void GenTree::SetVtableForOper(genTreeOps oper)
}
#endif // DEBUGGABLE_GENTREE
-GenTreePtr Compiler::gtNewOperNode(genTreeOps oper,
- var_types type, GenTreePtr op1,
- GenTreePtr op2)
+GenTreePtr Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2)
{
- assert(op1 != NULL);
- assert(op2 != NULL);
+ assert(op1 != nullptr);
+ assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
- GenTreePtr node = new(this, oper) GenTreeOp(oper, type, op1, op2);
+ GenTreePtr node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
-GenTreePtr Compiler::gtNewQmarkNode(var_types type, GenTreePtr cond, GenTreePtr colon)
+GenTreePtr Compiler::gtNewQmarkNode(var_types type, GenTreePtr cond, GenTreePtr colon)
{
- compQmarkUsed = true;
- GenTree* result = new(this, GT_QMARK) GenTreeQmark(type, cond, colon, this);
+ compQmarkUsed = true;
+ GenTree* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon, this);
#ifdef DEBUG
if (compQmarkRationalized)
{
@@ -5450,47 +5934,46 @@ GenTreePtr Compiler::gtNewQmarkNode(var_types type, GenTreePtr cond, G
return result;
}
-GenTreeQmark::GenTreeQmark(var_types type, GenTreePtr cond, GenTreePtr colonOp, Compiler* comp) :
- GenTreeOp(GT_QMARK, type, cond, colonOp),
- gtThenLiveSet(VarSetOps::UninitVal()),
- gtElseLiveSet(VarSetOps::UninitVal())
- {
- // These must follow a specific form.
- assert(cond != NULL && cond->TypeGet() == TYP_INT);
- assert(colonOp != NULL && colonOp->OperGet() == GT_COLON);
-
- comp->impInlineRoot()->compQMarks->Push(this);
- }
+GenTreeQmark::GenTreeQmark(var_types type, GenTreePtr cond, GenTreePtr colonOp, Compiler* comp)
+ : GenTreeOp(GT_QMARK, type, cond, colonOp)
+ , gtThenLiveSet(VarSetOps::UninitVal())
+ , gtElseLiveSet(VarSetOps::UninitVal())
+{
+ // These must follow a specific form.
+ assert(cond != nullptr && cond->TypeGet() == TYP_INT);
+ assert(colonOp != nullptr && colonOp->OperGet() == GT_COLON);
+ comp->impInlineRoot()->compQMarks->Push(this);
+}
-GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
+GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
- return new(this, GT_CNS_INT) GenTreeIntCon(type, value);
+ return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
- GenTree *result = new(this, GT_PHYSREG) GenTreePhysReg(reg, type);
+ GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
-// Return a new node representing a store of a value to a physical register
+// Return a new node representing a store of a value to a physical register
// modifies: child's gtRegNum
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, GenTree* src)
{
assert(genIsValidIntReg(reg));
- GenTree *result = new(this, GT_PHYSREGDST) GenTreeOp(GT_PHYSREGDST, TYP_I_IMPL, src, nullptr);
+ GenTree* result = new (this, GT_PHYSREGDST) GenTreeOp(GT_PHYSREGDST, TYP_I_IMPL, src, nullptr);
result->gtRegNum = reg;
- src->gtRegNum = reg;
+ src->gtRegNum = reg;
return result;
}
#ifndef LEGACY_BACKEND
GenTreePtr Compiler::gtNewJmpTableNode()
{
- GenTreePtr node = new(this, GT_JMPTABLE) GenTreeJumpTable(TYP_INT);
+ GenTreePtr node = new (this, GT_JMPTABLE) GenTreeJumpTable(TYP_INT);
node->gtJumpTable.gtJumpTableAddr = 0;
return node;
}
@@ -5503,29 +5986,29 @@ GenTreePtr Compiler::gtNewJmpTableNode()
* node)
*/
-unsigned Compiler::gtTokenToIconFlags(unsigned token)
+unsigned Compiler::gtTokenToIconFlags(unsigned token)
{
unsigned flags = 0;
switch (TypeFromToken(token))
{
- case mdtTypeRef:
- case mdtTypeDef:
- case mdtTypeSpec:
- flags = GTF_ICON_CLASS_HDL;
- break;
+ case mdtTypeRef:
+ case mdtTypeDef:
+ case mdtTypeSpec:
+ flags = GTF_ICON_CLASS_HDL;
+ break;
- case mdtMethodDef:
- flags = GTF_ICON_METHOD_HDL;
- break;
+ case mdtMethodDef:
+ flags = GTF_ICON_METHOD_HDL;
+ break;
- case mdtFieldDef:
- flags = GTF_ICON_FIELD_HDL;
- break;
+ case mdtFieldDef:
+ flags = GTF_ICON_FIELD_HDL;
+ break;
- default:
- flags = GTF_ICON_TOKEN_HDL;
- break;
+ default:
+ flags = GTF_ICON_TOKEN_HDL;
+ break;
}
return flags;
@@ -5539,64 +6022,59 @@ unsigned Compiler::gtTokenToIconFlags(unsigned token)
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
-GenTreePtr Compiler::gtNewIconEmbHndNode(void * value,
- void * pValue,
- unsigned flags,
- unsigned handle1,
- void * handle2,
- void * compileTimeHandle)
+GenTreePtr Compiler::gtNewIconEmbHndNode(
+ void* value, void* pValue, unsigned flags, unsigned handle1, void* handle2, void* compileTimeHandle)
{
- GenTreePtr node;
+ GenTreePtr node;
assert((!value) != (!pValue));
if (value)
{
- node = gtNewIconHandleNode((size_t)value, flags, /*fieldSeq*/FieldSeqStore::NotAField(), handle1, handle2);
- node->gtIntCon.gtCompileTimeHandle = (size_t) compileTimeHandle;
+ node = gtNewIconHandleNode((size_t)value, flags, /*fieldSeq*/ FieldSeqStore::NotAField(), handle1, handle2);
+ node->gtIntCon.gtCompileTimeHandle = (size_t)compileTimeHandle;
}
else
{
- node = gtNewIconHandleNode((size_t)pValue, flags, /*fieldSeq*/FieldSeqStore::NotAField(), handle1, handle2);
- node->gtIntCon.gtCompileTimeHandle = (size_t) compileTimeHandle;
- node = gtNewOperNode(GT_IND, TYP_I_IMPL, node);
+ node = gtNewIconHandleNode((size_t)pValue, flags, /*fieldSeq*/ FieldSeqStore::NotAField(), handle1, handle2);
+ node->gtIntCon.gtCompileTimeHandle = (size_t)compileTimeHandle;
+ node = gtNewOperNode(GT_IND, TYP_I_IMPL, node);
}
-
return node;
}
/*****************************************************************************/
-GenTreePtr Compiler::gtNewStringLiteralNode(InfoAccessType iat, void * pValue)
+GenTreePtr Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
- GenTreePtr tree = NULL;
+ GenTreePtr tree = nullptr;
switch (iat)
{
- case IAT_VALUE: // The info value is directly available
- tree = gtNewIconEmbHndNode(pValue, NULL, GTF_ICON_STR_HDL);
- tree->gtType = TYP_REF;
- tree = gtNewOperNode(GT_NOP, TYP_REF, tree); // prevents constant folding
- break;
+ case IAT_VALUE: // The info value is directly available
+ tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL);
+ tree->gtType = TYP_REF;
+ tree = gtNewOperNode(GT_NOP, TYP_REF, tree); // prevents constant folding
+ break;
- case IAT_PVALUE: // The value needs to be accessed via an indirection
- tree = gtNewIconHandleNode((size_t)pValue, GTF_ICON_STR_HDL);
- // An indirection of a string handle can't cause an exception so don't set GTF_EXCEPT
- tree = gtNewOperNode(GT_IND, TYP_REF, tree);
- tree->gtFlags |= GTF_GLOB_REF;
- break;
+ case IAT_PVALUE: // The value needs to be accessed via an indirection
+ tree = gtNewIconHandleNode((size_t)pValue, GTF_ICON_STR_HDL);
+ // An indirection of a string handle can't cause an exception so don't set GTF_EXCEPT
+ tree = gtNewOperNode(GT_IND, TYP_REF, tree);
+ tree->gtFlags |= GTF_GLOB_REF;
+ break;
- case IAT_PPVALUE: // The value needs to be accessed via a double indirection
- tree = gtNewIconHandleNode((size_t)pValue, GTF_ICON_PSTR_HDL);
- tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
- tree->gtFlags |= GTF_IND_INVARIANT;
- // An indirection of a string handle can't cause an exception so don't set GTF_EXCEPT
- tree = gtNewOperNode(GT_IND, TYP_REF, tree);
- tree->gtFlags |= GTF_GLOB_REF;
- break;
+ case IAT_PPVALUE: // The value needs to be accessed via a double indirection
+ tree = gtNewIconHandleNode((size_t)pValue, GTF_ICON_PSTR_HDL);
+ tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
+ tree->gtFlags |= GTF_IND_INVARIANT;
+ // An indirection of a string handle can't cause an exception so don't set GTF_EXCEPT
+ tree = gtNewOperNode(GT_IND, TYP_REF, tree);
+ tree->gtFlags |= GTF_GLOB_REF;
+ break;
- default:
- assert(!"Unexpected InfoAccessType");
+ default:
+ assert(!"Unexpected InfoAccessType");
}
return tree;
@@ -5604,27 +6082,25 @@ GenTreePtr Compiler::gtNewStringLiteralNode(InfoAccessType iat, void *
/*****************************************************************************/
-GenTreePtr Compiler::gtNewLconNode(__int64 value)
+GenTreePtr Compiler::gtNewLconNode(__int64 value)
{
#ifdef _TARGET_64BIT_
- GenTreePtr node = new(this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
+ GenTreePtr node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
- GenTreePtr node = new(this, GT_CNS_LNG) GenTreeLngCon(value);
+ GenTreePtr node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
-
-GenTreePtr Compiler::gtNewDconNode(double value)
+GenTreePtr Compiler::gtNewDconNode(double value)
{
- GenTreePtr node = new(this, GT_CNS_DBL) GenTreeDblCon(value);
+ GenTreePtr node = new (this, GT_CNS_DBL) GenTreeDblCon(value);
return node;
}
-
-GenTreePtr Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
+GenTreePtr Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
#if SMALL_TREE_NODES
@@ -5633,102 +6109,95 @@ GenTreePtr Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHa
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
- GenTreePtr node = new(this, GT_CALL) GenTreeStrCon(CPX, scpHandle
- DEBUGARG(/*largeNode*/true));
+ GenTreePtr node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
#else
- GenTreePtr node = new(this, GT_CNS_STR) GenTreeStrCon(CPX, scpHandle
- DEBUGARG(/*largeNode*/true));
+ GenTreePtr node = new (this, GT_CNS_STR) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
#endif
return node;
}
-
-GenTreePtr Compiler::gtNewZeroConNode(var_types type)
+GenTreePtr Compiler::gtNewZeroConNode(var_types type)
{
- GenTreePtr zero;
+ GenTreePtr zero;
switch (type)
{
- case TYP_INT:
- zero = gtNewIconNode(0);
- break;
+ case TYP_INT:
+ zero = gtNewIconNode(0);
+ break;
- case TYP_BYREF:
- __fallthrough;
+ case TYP_BYREF:
+ __fallthrough;
- case TYP_REF:
- zero = gtNewIconNode(0);
- zero->gtType = type;
- break;
+ case TYP_REF:
+ zero = gtNewIconNode(0);
+ zero->gtType = type;
+ break;
- case TYP_LONG:
- zero = gtNewLconNode(0);
- break;
+ case TYP_LONG:
+ zero = gtNewLconNode(0);
+ break;
- case TYP_FLOAT:
- zero = gtNewDconNode(0.0);
- zero->gtType = type;
- break;
+ case TYP_FLOAT:
+ zero = gtNewDconNode(0.0);
+ zero->gtType = type;
+ break;
- case TYP_DOUBLE:
- zero = gtNewDconNode(0.0);
- break;
+ case TYP_DOUBLE:
+ zero = gtNewDconNode(0.0);
+ break;
- default:
- assert(!"Bad type");
- zero = nullptr;
- break;
+ default:
+ assert(!"Bad type");
+ zero = nullptr;
+ break;
}
return zero;
}
-GenTreePtr Compiler::gtNewOneConNode(var_types type)
+GenTreePtr Compiler::gtNewOneConNode(var_types type)
{
switch (type)
{
- case TYP_INT:
- case TYP_UINT:
- return gtNewIconNode(1);
+ case TYP_INT:
+ case TYP_UINT:
+ return gtNewIconNode(1);
- case TYP_LONG:
- case TYP_ULONG:
- return gtNewLconNode(1);
-
- case TYP_FLOAT:
+ case TYP_LONG:
+ case TYP_ULONG:
+ return gtNewLconNode(1);
+
+ case TYP_FLOAT:
{
GenTreePtr one = gtNewDconNode(1.0);
- one->gtType = type;
+ one->gtType = type;
return one;
}
- case TYP_DOUBLE:
- return gtNewDconNode(1.0);
+ case TYP_DOUBLE:
+ return gtNewDconNode(1.0);
- default:
- assert(!"Bad type");
- return nullptr;
+ default:
+ assert(!"Bad type");
+ return nullptr;
}
}
-GenTreeCall* Compiler::gtNewIndCallNode(GenTreePtr addr,
- var_types type,
- GenTreeArgList* args,
- IL_OFFSETX ilOffset)
+GenTreeCall* Compiler::gtNewIndCallNode(GenTreePtr addr, var_types type, GenTreeArgList* args, IL_OFFSETX ilOffset)
{
- return gtNewCallNode(CT_INDIRECT,(CORINFO_METHOD_HANDLE)addr, type, args, ilOffset);
+ return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, ilOffset);
}
-GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
- CORINFO_METHOD_HANDLE callHnd,
- var_types type,
- GenTreeArgList* args,
- IL_OFFSETX ilOffset)
+GenTreeCall* Compiler::gtNewCallNode(
+ gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeArgList* args, IL_OFFSETX ilOffset)
{
- GenTreeCall* node = new(this, GT_CALL) GenTreeCall(genActualType(type));
+ GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
- node->gtFlags |= (GTF_CALL|GTF_GLOB_REF);
+ node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
if (args)
- node->gtFlags |= (args->gtFlags & GTF_ALL_EFFECT);
+ {
+ node->gtFlags |= (args->gtFlags & GTF_ALL_EFFECT);
+ }
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
@@ -5741,14 +6210,14 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
if (callType == CT_INDIRECT)
{
- node->gtCallCookie = NULL;
+ node->gtCallCookie = nullptr;
}
else
{
- node->gtInlineCandidateInfo = NULL;
+ node->gtInlineCandidateInfo = nullptr;
}
- node->gtCallLateArgs = nullptr;
- node->gtReturnType = type;
+ node->gtCallLateArgs = nullptr;
+ node->gtReturnType = type;
#ifdef LEGACY_BACKEND
node->gtCallRegUsedMask = RBM_NONE;
@@ -5761,13 +6230,13 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtCall.gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
- node->gtCall.gtRawILOffset = BAD_IL_OFFSET;
+ node->gtCall.gtRawILOffset = BAD_IL_OFFSET;
#endif
#ifdef DEBUGGING_SUPPORT
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
- // Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
+ // Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass BAD_IL_OFFSET as IL offset of a call node to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo)
@@ -5775,14 +6244,14 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
- // TODO-Cleanup:
- // a) (Opt) We need not store this offset if the method doesn't return a
+ // TODO-Cleanup:
+ // a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
- if (genCallSite2ILOffsetMap == NULL)
+ if (genCallSite2ILOffsetMap == nullptr)
{
genCallSite2ILOffsetMap = new (getAllocator()) CallSiteILOffsetTable(getAllocator());
}
@@ -5794,7 +6263,7 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
}
#endif
- // Initialize gtOtherRegs
+ // Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
@@ -5803,9 +6272,7 @@ GenTreeCall* Compiler::gtNewCallNode(gtCallTypes callType,
return node;
}
-GenTreePtr Compiler::gtNewLclvNode(unsigned lnum,
- var_types type,
- IL_OFFSETX ILoffs)
+GenTreePtr Compiler::gtNewLclvNode(unsigned lnum, var_types type, IL_OFFSETX ILoffs)
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
@@ -5813,20 +6280,17 @@ GenTreePtr Compiler::gtNewLclvNode(unsigned lnum,
{
assert(type == lvaTable[lnum].lvType);
}
- GenTreePtr node = new(this, GT_LCL_VAR) GenTreeLclVar(type, lnum, ILoffs
- );
+ GenTreePtr node = new (this, GT_LCL_VAR) GenTreeLclVar(type, lnum, ILoffs);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
- //assert(lnum < lvaCount);
+ // assert(lnum < lvaCount);
return node;
-}
+}
-GenTreePtr Compiler::gtNewLclLNode(unsigned lnum,
- var_types type,
- IL_OFFSETX ILoffs)
+GenTreePtr Compiler::gtNewLclLNode(unsigned lnum, var_types type, IL_OFFSETX ILoffs)
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
@@ -5839,40 +6303,33 @@ GenTreePtr Compiler::gtNewLclLNode(unsigned lnum,
// assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
- GenTreePtr node = new(this, GT_CALL) GenTreeLclVar(type, lnum, ILoffs
- DEBUGARG(/*largeNode*/true));
+ GenTreePtr node = new (this, GT_CALL) GenTreeLclVar(type, lnum, ILoffs DEBUGARG(/*largeNode*/ true));
#else
- GenTreePtr node = new(this, GT_LCL_VAR) GenTreeLclVar(type, lnum, ILoffs
- DEBUGARG(/*largeNode*/true));
+ GenTreePtr node = new (this, GT_LCL_VAR) GenTreeLclVar(type, lnum, ILoffs DEBUGARG(/*largeNode*/ true));
#endif
return node;
}
-GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum,
- var_types type,
- unsigned offset)
+GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
- GenTreeLclFld* node = new(this, GT_LCL_FLD) GenTreeLclFld(type, lnum, offset);
+ GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
- //assert(lnum < lvaCount);
+ // assert(lnum < lvaCount);
node->gtFieldSeq = FieldSeqStore::NotAField();
return node;
}
-
-
-GenTreePtr Compiler::gtNewInlineCandidateReturnExpr(GenTreePtr inlineCandidate,
- var_types type)
+GenTreePtr Compiler::gtNewInlineCandidateReturnExpr(GenTreePtr inlineCandidate, var_types type)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
- GenTreePtr node = new(this, GT_RET_EXPR) GenTreeRetExpr(type);
-
+ GenTreePtr node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
+
node->gtRetExpr.gtInlineCandidate = inlineCandidate;
if (varTypeIsStruct(inlineCandidate))
@@ -5884,18 +6341,17 @@ GenTreePtr Compiler::gtNewInlineCandidateReturnExpr(GenTreePtr inline
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
-
+
return node;
}
GenTreeArgList* Compiler::gtNewListNode(GenTreePtr op1, GenTreeArgList* op2)
{
- assert((op1 != NULL) && (op1->OperGet() != GT_LIST));
+ assert((op1 != nullptr) && (op1->OperGet() != GT_LIST));
return new (this, GT_LIST) GenTreeArgList(op1, op2);
}
-
/*****************************************************************************
*
* Create a list out of one value.
@@ -5926,20 +6382,22 @@ fgArgTabEntryPtr Compiler::gtArgEntryByArgNum(GenTreePtr call, unsigned argNum)
{
noway_assert(call->IsCall());
fgArgInfoPtr argInfo = call->gtCall.fgArgInfo;
- noway_assert(argInfo != NULL);
+ noway_assert(argInfo != nullptr);
- unsigned argCount = argInfo->ArgCount();
- fgArgTabEntryPtr * argTable = argInfo->ArgTable();
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ unsigned argCount = argInfo->ArgCount();
+ fgArgTabEntryPtr* argTable = argInfo->ArgTable();
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
- for (unsigned i=0; i < argCount; i++)
+ for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->argNum == argNum)
+ {
return curArgTabEntry;
+ }
}
noway_assert(!"gtArgEntryByArgNum: argNum not found");
- return NULL;
+ return nullptr;
}
/*****************************************************************************
@@ -5952,16 +6410,16 @@ fgArgTabEntryPtr Compiler::gtArgEntryByNode(GenTreePtr call, GenTreePtr node)
{
noway_assert(call->IsCall());
fgArgInfoPtr argInfo = call->gtCall.fgArgInfo;
- noway_assert(argInfo != NULL);
+ noway_assert(argInfo != nullptr);
- unsigned argCount = argInfo->ArgCount();
- fgArgTabEntryPtr * argTable = argInfo->ArgTable();
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ unsigned argCount = argInfo->ArgCount();
+ fgArgTabEntryPtr* argTable = argInfo->ArgTable();
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
- for (unsigned i=0; i < argCount; i++)
+ for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
-
+
if (curArgTabEntry->node == node)
{
return curArgTabEntry;
@@ -5972,20 +6430,24 @@ fgArgTabEntryPtr Compiler::gtArgEntryByNode(GenTreePtr call, GenTreePtr node)
return curArgTabEntry;
}
#endif // PROTO_JIT
- else if (curArgTabEntry->parent != NULL)
+ else if (curArgTabEntry->parent != nullptr)
{
assert(curArgTabEntry->parent->IsList());
if (curArgTabEntry->parent->Current() == node)
+ {
return curArgTabEntry;
+ }
}
else // (curArgTabEntry->parent == NULL)
{
if (call->gtCall.gtCallObjp == node)
+ {
return curArgTabEntry;
+ }
}
}
noway_assert(!"gtArgEntryByNode: node not found");
- return NULL;
+ return nullptr;
}
/*****************************************************************************
@@ -5997,30 +6459,31 @@ fgArgTabEntryPtr Compiler::gtArgEntryByLateArgIndex(GenTreePtr call, unsigned la
{
noway_assert(call->IsCall());
fgArgInfoPtr argInfo = call->gtCall.fgArgInfo;
- noway_assert(argInfo != NULL);
+ noway_assert(argInfo != nullptr);
- unsigned argCount = argInfo->ArgCount();
- fgArgTabEntryPtr * argTable = argInfo->ArgTable();
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ unsigned argCount = argInfo->ArgCount();
+ fgArgTabEntryPtr* argTable = argInfo->ArgTable();
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
- for (unsigned i=0; i < argCount; i++)
+ for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->lateArgInx == lateArgInx)
+ {
return curArgTabEntry;
+ }
}
noway_assert(!"gtArgEntryByNode: node not found");
- return NULL;
+ return nullptr;
}
-
/*****************************************************************************
*
* Given an fgArgTabEntryPtr, return true if it is the 'this' pointer argument.
*/
-bool Compiler::gtArgIsThisPtr(fgArgTabEntryPtr argEntry)
+bool Compiler::gtArgIsThisPtr(fgArgTabEntryPtr argEntry)
{
- return (argEntry->parent == NULL);
+ return (argEntry->parent == nullptr);
}
/*****************************************************************************
@@ -6028,7 +6491,7 @@ bool Compiler::gtArgIsThisPtr(fgArgTabEntryPtr argEntry)
* Create a node that will assign 'src' to 'dst'.
*/
-GenTreePtr Compiler::gtNewAssignNode(GenTreePtr dst, GenTreePtr src)
+GenTreePtr Compiler::gtNewAssignNode(GenTreePtr dst, GenTreePtr src)
{
/* Mark the target as being assigned */
@@ -6057,9 +6520,9 @@ GenTreePtr Compiler::gtNewAssignNode(GenTreePtr dst, GenTreePtr src)
// Creates a new Obj node.
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
- var_types nodeType = impNormStructType(structHnd);
+ var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
- GenTreeObj *objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, structHnd);
+ GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, structHnd);
// An Obj is not a global reference, if it is known to be a local struct.
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if ((lclNode != nullptr) && !lvaIsImplicitByRefLocal(lclNode->gtLclNum))
@@ -6077,21 +6540,18 @@ GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
-GenTreeBlkOp* Compiler::gtNewCpObjNode(GenTreePtr dst,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- bool isVolatile)
+GenTreeBlkOp* Compiler::gtNewCpObjNode(GenTreePtr dst, GenTreePtr src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
- size_t size = 0;
- unsigned slots = 0;
- unsigned gcPtrCount = 0;
- BYTE * gcPtrs = nullptr;
- var_types type = TYP_STRUCT;
+ size_t size = 0;
+ unsigned slots = 0;
+ unsigned gcPtrCount = 0;
+ BYTE* gcPtrs = nullptr;
+ var_types type = TYP_STRUCT;
- GenTreePtr hndOrSize = nullptr;
+ GenTreePtr hndOrSize = nullptr;
GenTreeBlkOp* result = nullptr;
-
+
bool useCopyObj = false;
// Intermediate SIMD operations may use SIMD types that are not used by the input IL.
@@ -6119,8 +6579,8 @@ GenTreeBlkOp* Compiler::gtNewCpObjNode(GenTreePtr dst,
if (size >= TARGET_POINTER_SIZE)
{
- slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE);
- gcPtrs = new (this, CMK_ASTNode) BYTE[slots];
+ slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE);
+ gcPtrs = new (this, CMK_ASTNode) BYTE[slots];
type = impNormStructType(structHnd, gcPtrs, &gcPtrCount);
if (varTypeIsEnregisterableStruct(type))
@@ -6143,26 +6603,26 @@ GenTreeBlkOp* Compiler::gtNewCpObjNode(GenTreePtr dst,
}
}
- // If the class being copied contains any GC pointer we store a class handle
+ // If the class being copied contains any GC pointer we store a class handle
// in the icon, otherwise we store the size in bytes to copy
- //
+ //
genTreeOps op;
if (useCopyObj)
{
// This will treated as a cpobj as we need to note GC info.
// Store the class handle and mark the node
- op = GT_COPYOBJ;
+ op = GT_COPYOBJ;
hndOrSize = gtNewIconHandleNode((size_t)structHnd, GTF_ICON_CLASS_HDL);
- result = new (this, GT_COPYOBJ) GenTreeCpObj(gcPtrCount, slots, gcPtrs);
+ result = new (this, GT_COPYOBJ) GenTreeCpObj(gcPtrCount, slots, gcPtrs);
}
else
{
assert(gcPtrCount == 0);
// Doesn't need GC info. Treat operation as a cpblk
- op = GT_COPYBLK;
- hndOrSize = gtNewIconNode(size);
- result = new (this, GT_COPYBLK) GenTreeCpBlk();
+ op = GT_COPYBLK;
+ hndOrSize = gtNewIconNode(size);
+ result = new (this, GT_COPYBLK) GenTreeCpBlk();
result->gtBlkOpGcUnsafe = false;
}
@@ -6186,15 +6646,14 @@ GenTreeBlkOp* Compiler::gtNewCpObjNode(GenTreePtr dst,
// to an assignment of a primitive type.
// This performs the appropriate extension.
-void
-GenTreeIntCon::FixupInitBlkValue(var_types asgType)
+void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
- cns = cns & 0xFF;
+ cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
@@ -6231,13 +6690,9 @@ GenTreeIntCon::FixupInitBlkValue(var_types asgType)
// - volatil flag specifies if this node is a volatile memory operation.
//
// This procedure centralizes all the logic to both enforce proper structure and
-// to properly construct any InitBlk/CpBlk node.
-void Compiler::gtBlockOpInit(GenTreePtr result,
- genTreeOps oper,
- GenTreePtr dst,
- GenTreePtr srcOrFillVal,
- GenTreePtr hndOrSize,
- bool volatil)
+// to properly construct any InitBlk/CpBlk node.
+void Compiler::gtBlockOpInit(
+ GenTreePtr result, genTreeOps oper, GenTreePtr dst, GenTreePtr srcOrFillVal, GenTreePtr hndOrSize, bool volatil)
{
assert(GenTree::OperIsBlkOp(oper));
@@ -6247,7 +6702,7 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
#ifdef DEBUG
// If this is a CpObj node, the caller must have already set
// the node additional members (gtGcPtrs, gtGcPtrCount, gtSlots).
- if(hndOrSize->OperGet() == GT_CNS_INT && hndOrSize->IsIconHandle(GTF_ICON_CLASS_HDL))
+ if (hndOrSize->OperGet() == GT_CNS_INT && hndOrSize->IsIconHandle(GTF_ICON_CLASS_HDL))
{
GenTreeCpObj* cpObjNode = result->AsCpObj();
@@ -6259,26 +6714,26 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
for (unsigned i = 0; i < cpObjNode->gtGcPtrCount; ++i)
{
CorInfoGCType t = (CorInfoGCType)cpObjNode->gtGcPtrs[i];
- switch(t)
+ switch (t)
{
- case TYPE_GC_NONE:
- case TYPE_GC_REF:
- case TYPE_GC_BYREF:
- case TYPE_GC_OTHER:
- break;
- default:
- unreached();
+ case TYPE_GC_NONE:
+ case TYPE_GC_REF:
+ case TYPE_GC_BYREF:
+ case TYPE_GC_OTHER:
+ break;
+ default:
+ unreached();
}
}
}
#endif // DEBUG
- /* In the case of CpBlk, we want to avoid generating
+ /* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
- * because of two reasons, first, is useless, second
- * it introduces issues in liveness and also copying
- * memory from an overlapping memory location is
- * undefined both as per the ECMA standard and also
+ * because of two reasons, first, is useless, second
+ * it introduces issues in liveness and also copying
+ * memory from an overlapping memory location is
+ * undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
@@ -6288,7 +6743,7 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
- * surface if struct promotion is ON (which is the case on x86/arm). But still the
+ * surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
GenTreePtr currSrc = srcOrFillVal;
@@ -6299,8 +6754,7 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
currDst = currDst->gtOp.gtOp1;
}
- if (currSrc->OperGet() == GT_LCL_VAR &&
- currDst->OperGet() == GT_LCL_VAR &&
+ if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->gtLclVarCommon.gtLclNum == currDst->gtLclVarCommon.gtLclNum)
{
// Make this a NOP
@@ -6308,7 +6762,6 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
return;
}
-
/* Note that this use of a GT_LIST is different than all others */
/* in that the the GT_LIST is used as a tuple [dest,src] rather */
/* than a being a NULL terminated list of GT_LIST nodes */
@@ -6328,23 +6781,21 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
result->gtOp.gtOp1->gtFlags |= GTF_REVERSE_OPS;
if (result->gtOper == GT_INITBLK)
- {
- result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) |
- (hndOrSize->gtFlags & GTF_EXCEPT);
+ {
+ result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (hndOrSize->gtFlags & GTF_EXCEPT);
}
- else
+ else
{
- result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) |
- (srcOrFillVal->gtFlags & GTF_EXCEPT) |
- (hndOrSize->gtFlags & GTF_EXCEPT);
+ result->gtFlags |=
+ (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT) | (hndOrSize->gtFlags & GTF_EXCEPT);
- // If the class being copied contains any GC pointer we store a class handle
+ // If the class being copied contains any GC pointer we store a class handle
// and we must set the flag GTF_BLK_HASGCPTR, so that the register predictor
// knows that this GT_COPYBLK will use calls to the ByRef Assign helper
//
if ((hndOrSize->OperGet() == GT_CNS_INT) && hndOrSize->IsIconHandle(GTF_ICON_CLASS_HDL))
{
- hndOrSize->gtFlags |= GTF_DONT_CSE; // We can't CSE the class handle
+ hndOrSize->gtFlags |= GTF_DONT_CSE; // We can't CSE the class handle
result->gtFlags |= GTF_BLK_HASGCPTR;
}
}
@@ -6355,30 +6806,25 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
}
#ifdef FEATURE_SIMD
- if (oper == GT_COPYBLK &&
- srcOrFillVal->OperGet() == GT_ADDR &&
- dst->OperGet() == GT_ADDR)
- {
- // If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
+ if (oper == GT_COPYBLK && srcOrFillVal->OperGet() == GT_ADDR && dst->OperGet() == GT_ADDR)
+ {
+ // If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
-
+
GenTreePtr srcChild = srcOrFillVal->gtGetOp1();
GenTreePtr dstChild = dst->gtGetOp1();
- if (dstChild->OperIsLocal() &&
- varTypeIsStruct(dstChild) &&
- srcChild->OperGet() == GT_SIMD &&
+ if (dstChild->OperIsLocal() && varTypeIsStruct(dstChild) && srcChild->OperGet() == GT_SIMD &&
varTypeIsSIMD(srcChild))
{
- unsigned lclNum = dst->gtGetOp1()->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ unsigned lclNum = dst->gtGetOp1()->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
lclVarDsc->lvUsedInSIMDIntrinsic = true;
}
-
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
@@ -6394,12 +6840,8 @@ void Compiler::gtBlockOpInit(GenTreePtr result,
// Return Value:
// Returns the newly constructed and initialized block operation.
-GenTreeBlkOp*
-Compiler::gtNewBlkOpNode(genTreeOps oper,
- GenTreePtr dst,
- GenTreePtr srcOrFillVal,
- GenTreePtr sizeOrClsTok,
- bool isVolatile)
+GenTreeBlkOp* Compiler::gtNewBlkOpNode(
+ genTreeOps oper, GenTreePtr dst, GenTreePtr srcOrFillVal, GenTreePtr sizeOrClsTok, bool isVolatile)
{
GenTreeBlkOp* result = new (this, oper) GenTreeBlkOp(oper);
gtBlockOpInit(result, oper, dst, srcOrFillVal, sizeOrClsTok, isVolatile);
@@ -6418,118 +6860,121 @@ Compiler::gtNewBlkOpNode(genTreeOps oper,
* complete job if you can't handle this function failing.
*/
-GenTreePtr Compiler::gtClone(GenTree * tree, bool complexOK)
+GenTreePtr Compiler::gtClone(GenTree* tree, bool complexOK)
{
- GenTreePtr copy;
+ GenTreePtr copy;
switch (tree->gtOper)
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
-#if defined (LATE_DISASM)
- if (tree->IsIconHandle())
- {
- copy = gtNewIconHandleNode(tree->gtIntCon.gtIconVal,
- tree->gtFlags,
- tree->gtIntCon.gtFieldSeq,
- tree->gtIntCon.gtIconHdl.gtIconHdl1,
- tree->gtIntCon.gtIconHdl.gtIconHdl2);
- copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
- copy->gtType = tree->gtType;
- }
- else
+#if defined(LATE_DISASM)
+ if (tree->IsIconHandle())
+ {
+ copy = gtNewIconHandleNode(tree->gtIntCon.gtIconVal, tree->gtFlags, tree->gtIntCon.gtFieldSeq,
+ tree->gtIntCon.gtIconHdl.gtIconHdl1, tree->gtIntCon.gtIconHdl.gtIconHdl2);
+ copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
+ copy->gtType = tree->gtType;
+ }
+ else
#endif
- {
- copy = new(this, GT_CNS_INT) GenTreeIntCon(tree->gtType, tree->gtIntCon.gtIconVal, tree->gtIntCon.gtFieldSeq
- );
- copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
- }
- break;
+ {
+ copy = new (this, GT_CNS_INT)
+ GenTreeIntCon(tree->gtType, tree->gtIntCon.gtIconVal, tree->gtIntCon.gtFieldSeq);
+ copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
+ }
+ break;
- case GT_LCL_VAR:
- // Remember that the LclVar node has been cloned. The flag will be set
- // on 'copy' as well.
- tree->gtFlags |= GTF_VAR_CLONED;
- copy = gtNewLclvNode(tree->gtLclVarCommon.gtLclNum, tree->gtType,
- tree->gtLclVar.gtLclILoffs);
- break;
+ case GT_LCL_VAR:
+ // Remember that the LclVar node has been cloned. The flag will be set
+ // on 'copy' as well.
+ tree->gtFlags |= GTF_VAR_CLONED;
+ copy = gtNewLclvNode(tree->gtLclVarCommon.gtLclNum, tree->gtType, tree->gtLclVar.gtLclILoffs);
+ break;
- case GT_LCL_FLD:
- case GT_LCL_FLD_ADDR:
- // Remember that the LclVar node has been cloned. The flag will be set
- // on 'copy' as well.
- tree->gtFlags |= GTF_VAR_CLONED;
- copy = new (this, tree->gtOper)
- GenTreeLclFld(tree->gtOper, tree->TypeGet(), tree->gtLclFld.gtLclNum, tree->gtLclFld.gtLclOffs);
- copy->gtLclFld.gtFieldSeq = tree->gtLclFld.gtFieldSeq;
- break;
+ case GT_LCL_FLD:
+ case GT_LCL_FLD_ADDR:
+ // Remember that the LclVar node has been cloned. The flag will be set
+ // on 'copy' as well.
+ tree->gtFlags |= GTF_VAR_CLONED;
+ copy = new (this, tree->gtOper)
+ GenTreeLclFld(tree->gtOper, tree->TypeGet(), tree->gtLclFld.gtLclNum, tree->gtLclFld.gtLclOffs);
+ copy->gtLclFld.gtFieldSeq = tree->gtLclFld.gtFieldSeq;
+ break;
- case GT_CLS_VAR:
- copy = new(this, GT_CLS_VAR) GenTreeClsVar(tree->gtType, tree->gtClsVar.gtClsVarHnd, tree->gtClsVar.gtFieldSeq);
- break;
+ case GT_CLS_VAR:
+ copy = new (this, GT_CLS_VAR)
+ GenTreeClsVar(tree->gtType, tree->gtClsVar.gtClsVarHnd, tree->gtClsVar.gtFieldSeq);
+ break;
- case GT_REG_VAR:
- assert(!"clone regvar");
+ case GT_REG_VAR:
+ assert(!"clone regvar");
- default:
- if (!complexOK)
- return NULL;
+ default:
+ if (!complexOK)
+ {
+ return nullptr;
+ }
- if (tree->gtOper == GT_FIELD)
- {
- GenTreePtr objp;
+ if (tree->gtOper == GT_FIELD)
+ {
+ GenTreePtr objp;
- // copied from line 9850
+ // copied from line 9850
- objp = 0;
- if (tree->gtField.gtFldObj)
- {
- objp = gtClone(tree->gtField.gtFldObj, false);
- if (!objp)
- return objp;
+ objp = nullptr;
+ if (tree->gtField.gtFldObj)
+ {
+ objp = gtClone(tree->gtField.gtFldObj, false);
+ if (!objp)
+ {
+ return objp;
+ }
+ }
+
+ copy = gtNewFieldRef(tree->TypeGet(), tree->gtField.gtFldHnd, objp, tree->gtField.gtFldOffset);
+ copy->gtField.gtFldMayOverlap = tree->gtField.gtFldMayOverlap;
}
+ else if (tree->gtOper == GT_ADD)
+ {
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
- copy = gtNewFieldRef(tree->TypeGet(),
- tree->gtField.gtFldHnd,
- objp,
- tree->gtField.gtFldOffset);
- copy->gtField.gtFldMayOverlap = tree->gtField.gtFldMayOverlap;
- }
- else if (tree->gtOper == GT_ADD)
- {
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ if (op1->OperIsLeaf() && op2->OperIsLeaf())
+ {
+ op1 = gtClone(op1);
+ if (op1 == nullptr)
+ {
+ return nullptr;
+ }
+ op2 = gtClone(op2);
+ if (op2 == nullptr)
+ {
+ return nullptr;
+ }
- if (op1->OperIsLeaf() &&
- op2->OperIsLeaf())
+ copy = gtNewOperNode(GT_ADD, tree->TypeGet(), op1, op2);
+ }
+ else
+ {
+ return nullptr;
+ }
+ }
+ else if (tree->gtOper == GT_ADDR)
{
- op1 = gtClone(op1);
- if (op1 == 0)
- return 0;
- op2 = gtClone(op2);
- if (op2 == 0)
- return 0;
-
- copy = gtNewOperNode(GT_ADD, tree->TypeGet(), op1, op2);
+ GenTreePtr op1 = gtClone(tree->gtOp.gtOp1);
+ if (op1 == nullptr)
+ {
+ return nullptr;
+ }
+ copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
- return NULL;
+ return nullptr;
}
- }
- else if (tree->gtOper == GT_ADDR)
- {
- GenTreePtr op1 = gtClone(tree->gtOp.gtOp1);
- if (op1 == 0)
- return NULL;
- copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
- }
- else
- {
- return NULL;
- }
- break;
+ break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
@@ -6547,152 +6992,149 @@ GenTreePtr Compiler::gtClone(GenTree * tree, bool complexOK)
* constant varVal.
*/
-GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
- unsigned addFlags,
- unsigned varNum, // = (unsigned)-1
- int varVal)
+GenTreePtr Compiler::gtCloneExpr(GenTree* tree,
+ unsigned addFlags,
+ unsigned varNum, // = (unsigned)-1
+ int varVal)
{
- if (tree == NULL)
- return NULL;
+ if (tree == nullptr)
+ {
+ return nullptr;
+ }
/* Figure out what kind of a node we have */
- genTreeOps oper = tree->OperGet();
- unsigned kind = tree->OperKind();
- GenTree * copy;
+ genTreeOps oper = tree->OperGet();
+ unsigned kind = tree->OperKind();
+ GenTree* copy;
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
switch (oper)
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
-#if defined (LATE_DISASM)
- if (tree->IsIconHandle())
- {
- copy = gtNewIconHandleNode(tree->gtIntCon.gtIconVal,
- tree->gtFlags,
- tree->gtIntCon.gtFieldSeq,
- tree->gtIntCon.gtIconFld.gtIconCPX,
- tree->gtIntCon.gtIconFld.gtIconCls);
- copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
- copy->gtType = tree->gtType;
- }
- else
+#if defined(LATE_DISASM)
+ if (tree->IsIconHandle())
+ {
+ copy = gtNewIconHandleNode(tree->gtIntCon.gtIconVal, tree->gtFlags, tree->gtIntCon.gtFieldSeq,
+ tree->gtIntCon.gtIconFld.gtIconCPX, tree->gtIntCon.gtIconFld.gtIconCls);
+ copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
+ copy->gtType = tree->gtType;
+ }
+ else
#endif
- {
- copy = gtNewIconNode (tree->gtIntCon.gtIconVal,
- tree->gtType);
- copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
- copy->gtIntCon.gtFieldSeq = tree->gtIntCon.gtFieldSeq;
- }
- goto DONE;
-
- case GT_CNS_LNG:
- copy = gtNewLconNode(tree->gtLngCon.gtLconVal);
- goto DONE;
+ {
+ copy = gtNewIconNode(tree->gtIntCon.gtIconVal, tree->gtType);
+ copy->gtIntCon.gtCompileTimeHandle = tree->gtIntCon.gtCompileTimeHandle;
+ copy->gtIntCon.gtFieldSeq = tree->gtIntCon.gtFieldSeq;
+ }
+ goto DONE;
- case GT_CNS_DBL:
- copy = gtNewDconNode(tree->gtDblCon.gtDconVal);
- copy->gtType = tree->gtType; // keep the same type
- goto DONE;
+ case GT_CNS_LNG:
+ copy = gtNewLconNode(tree->gtLngCon.gtLconVal);
+ goto DONE;
- case GT_CNS_STR:
- copy = gtNewSconNode(tree->gtStrCon.gtSconCPX, tree->gtStrCon.gtScpHnd);
- goto DONE;
+ case GT_CNS_DBL:
+ copy = gtNewDconNode(tree->gtDblCon.gtDconVal);
+ copy->gtType = tree->gtType; // keep the same type
+ goto DONE;
- case GT_LCL_VAR:
+ case GT_CNS_STR:
+ copy = gtNewSconNode(tree->gtStrCon.gtSconCPX, tree->gtStrCon.gtScpHnd);
+ goto DONE;
- if (tree->gtLclVarCommon.gtLclNum == varNum)
- {
- copy = gtNewIconNode(varVal, tree->gtType);
- }
- else
- {
- // Remember that the LclVar node has been cloned. The flag will
- // be set on 'copy' as well.
- tree->gtFlags |= GTF_VAR_CLONED;
- copy = gtNewLclvNode(tree->gtLclVar.gtLclNum, tree->gtType,
- tree->gtLclVar.gtLclILoffs);
- copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
- }
- copy->gtFlags = tree->gtFlags;
- goto DONE;
+ case GT_LCL_VAR:
- case GT_LCL_FLD:
- if (tree->gtLclFld.gtLclNum == varNum)
- {
- IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
- }
- else
- {
- // Remember that the LclVar node has been cloned. The flag will
- // be set on 'copy' as well.
- tree->gtFlags |= GTF_VAR_CLONED;
- copy = new(this, GT_LCL_FLD) GenTreeLclFld(tree->TypeGet(), tree->gtLclFld.gtLclNum, tree->gtLclFld.gtLclOffs
- );
- copy->gtLclFld.gtFieldSeq = tree->gtLclFld.gtFieldSeq;
+ if (tree->gtLclVarCommon.gtLclNum == varNum)
+ {
+ copy = gtNewIconNode(varVal, tree->gtType);
+ }
+ else
+ {
+ // Remember that the LclVar node has been cloned. The flag will
+ // be set on 'copy' as well.
+ tree->gtFlags |= GTF_VAR_CLONED;
+ copy = gtNewLclvNode(tree->gtLclVar.gtLclNum, tree->gtType, tree->gtLclVar.gtLclILoffs);
+ copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
+ }
copy->gtFlags = tree->gtFlags;
- }
- goto DONE;
+ goto DONE;
- case GT_CLS_VAR:
- copy = new(this, GT_CLS_VAR) GenTreeClsVar(tree->TypeGet(), tree->gtClsVar.gtClsVarHnd, tree->gtClsVar.gtFieldSeq);
- goto DONE;
+ case GT_LCL_FLD:
+ if (tree->gtLclFld.gtLclNum == varNum)
+ {
+ IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
+ }
+ else
+ {
+ // Remember that the LclVar node has been cloned. The flag will
+ // be set on 'copy' as well.
+ tree->gtFlags |= GTF_VAR_CLONED;
+ copy = new (this, GT_LCL_FLD)
+ GenTreeLclFld(tree->TypeGet(), tree->gtLclFld.gtLclNum, tree->gtLclFld.gtLclOffs);
+ copy->gtLclFld.gtFieldSeq = tree->gtLclFld.gtFieldSeq;
+ copy->gtFlags = tree->gtFlags;
+ }
+ goto DONE;
- case GT_RET_EXPR:
- copy = gtNewInlineCandidateReturnExpr(tree->gtRetExpr.gtInlineCandidate, tree->gtType);
- goto DONE;
+ case GT_CLS_VAR:
+ copy = new (this, GT_CLS_VAR)
+ GenTreeClsVar(tree->TypeGet(), tree->gtClsVar.gtClsVarHnd, tree->gtClsVar.gtFieldSeq);
+ goto DONE;
- case GT_MEMORYBARRIER:
- copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
- goto DONE;
+ case GT_RET_EXPR:
+ copy = gtNewInlineCandidateReturnExpr(tree->gtRetExpr.gtInlineCandidate, tree->gtType);
+ goto DONE;
- case GT_ARGPLACE:
- copy = gtNewArgPlaceHolderNode(tree->gtType, tree->gtArgPlace.gtArgPlaceClsHnd);
- goto DONE;
+ case GT_MEMORYBARRIER:
+ copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
+ goto DONE;
- case GT_REG_VAR:
- NO_WAY("Cloning of GT_REG_VAR node not supported");
- goto DONE;
+ case GT_ARGPLACE:
+ copy = gtNewArgPlaceHolderNode(tree->gtType, tree->gtArgPlace.gtArgPlaceClsHnd);
+ goto DONE;
- case GT_FTN_ADDR:
- copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->gtFptrVal.gtFptrMethod);
+ case GT_REG_VAR:
+ NO_WAY("Cloning of GT_REG_VAR node not supported");
+ goto DONE;
+
+ case GT_FTN_ADDR:
+ copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->gtFptrVal.gtFptrMethod);
#ifdef FEATURE_READYTORUN_COMPILER
- copy->gtFptrVal.gtEntryPoint = tree->gtFptrVal.gtEntryPoint;
- copy->gtFptrVal.gtLdftnResolvedToken = tree->gtFptrVal.gtLdftnResolvedToken;
+ copy->gtFptrVal.gtEntryPoint = tree->gtFptrVal.gtEntryPoint;
+ copy->gtFptrVal.gtLdftnResolvedToken = tree->gtFptrVal.gtLdftnResolvedToken;
#endif
- goto DONE;
+ goto DONE;
- case GT_CATCH_ARG:
- case GT_NO_OP:
- copy = new (this, oper) GenTree(oper, tree->gtType);
- goto DONE;
+ case GT_CATCH_ARG:
+ case GT_NO_OP:
+ copy = new (this, oper) GenTree(oper, tree->gtType);
+ goto DONE;
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
+ case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
- case GT_JMP:
- copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->gtVal.gtVal1);
- goto DONE;
-
- case GT_LABEL:
- copy = new (this, oper) GenTreeLabel(tree->gtLabel.gtLabBB);
- goto DONE;
+ case GT_JMP:
+ copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->gtVal.gtVal1);
+ goto DONE;
- default:
- NO_WAY("Cloning of node not supported");
- goto DONE;
+ case GT_LABEL:
+ copy = new (this, oper) GenTreeLabel(tree->gtLabel.gtLabBB);
+ goto DONE;
+ default:
+ NO_WAY("Cloning of node not supported");
+ goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -6702,107 +7144,110 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
{
/* These nodes sometimes get bashed to "fat" ones */
- case GT_MUL:
- case GT_DIV:
- case GT_MOD:
-
- case GT_UDIV:
- case GT_UMOD:
+ case GT_MUL:
+ case GT_DIV:
+ case GT_MOD:
- // In the implementation of gtNewLargeOperNode you have
- // to give an oper that will create a small node,
- // otherwise it asserts.
- //
- if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
- {
- copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1,
- tree->OperIsBinary() ? tree->gtOp.gtOp2 : NULL);
- }
- else // Always a large tree
- {
- if (tree->OperIsBinary())
+ case GT_UDIV:
+ case GT_UMOD:
+
+ // In the implementation of gtNewLargeOperNode you have
+ // to give an oper that will create a small node,
+ // otherwise it asserts.
+ //
+ if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
- copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2);
+ copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1,
+ tree->OperIsBinary() ? tree->gtOp.gtOp2 : nullptr);
}
- else
+ else // Always a large tree
{
- copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1);
+ if (tree->OperIsBinary())
+ {
+ copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2);
+ }
+ else
+ {
+ copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1);
+ }
}
- }
- break;
+ break;
- case GT_CAST:
- copy = new (this, LargeOpOpcode()) GenTreeCast(tree->TypeGet(), tree->gtCast.CastOp(), tree->gtCast.gtCastType
- DEBUGARG(/*largeNode*/TRUE));
- break;
+ case GT_CAST:
+ copy = new (this, LargeOpOpcode()) GenTreeCast(tree->TypeGet(), tree->gtCast.CastOp(),
+ tree->gtCast.gtCastType DEBUGARG(/*largeNode*/ TRUE));
+ break;
// The nodes below this are not bashed, so they can be allocated at their individual sizes.
- case GT_LIST:
- // This is ridiculous, but would go away if we made a stronger distinction between argument lists, whose
- // second argument *must* be an arglist*, and the uses of LIST in copyblk and initblk.
- if (tree->gtOp.gtOp2 != NULL && tree->gtOp.gtOp2->OperGet() == GT_LIST)
- {
- copy = new (this, GT_LIST) GenTreeArgList(tree->gtOp.gtOp1, tree->gtOp.gtOp2->AsArgList());
- }
- else
- {
- copy = new (this, GT_LIST) GenTreeOp(GT_LIST, TYP_VOID, tree->gtOp.gtOp1, tree->gtOp.gtOp2);
- }
- break;
+ case GT_LIST:
+ // This is ridiculous, but would go away if we made a stronger distinction between argument lists, whose
+ // second argument *must* be an arglist*, and the uses of LIST in copyblk and initblk.
+ if (tree->gtOp.gtOp2 != nullptr && tree->gtOp.gtOp2->OperGet() == GT_LIST)
+ {
+ copy = new (this, GT_LIST) GenTreeArgList(tree->gtOp.gtOp1, tree->gtOp.gtOp2->AsArgList());
+ }
+ else
+ {
+ copy = new (this, GT_LIST) GenTreeOp(GT_LIST, TYP_VOID, tree->gtOp.gtOp1, tree->gtOp.gtOp2);
+ }
+ break;
- case GT_INDEX:
+ case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
- copy = new (this, GT_INDEX) GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
+ copy = new (this, GT_INDEX)
+ GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
- case GT_ALLOCOBJ:
+ case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
- copy = new (this, GT_ALLOCOBJ) GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
+ copy = new (this, GT_ALLOCOBJ) GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper,
+ asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
}
break;
- case GT_ARR_LENGTH:
- copy = new (this, GT_ARR_LENGTH) GenTreeArrLen(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtArrLen.ArrLenOffset());
- break;
+ case GT_ARR_LENGTH:
+ copy = new (this, GT_ARR_LENGTH)
+ GenTreeArrLen(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtArrLen.ArrLenOffset());
+ break;
- case GT_ARR_INDEX:
- copy = new (this, GT_ARR_INDEX)
- GenTreeArrIndex(tree->TypeGet(),
- gtCloneExpr(tree->gtArrIndex.ArrObj(), addFlags, varNum, varVal),
- gtCloneExpr(tree->gtArrIndex.IndexExpr(), addFlags, varNum, varVal),
- tree->gtArrIndex.gtCurrDim,
- tree->gtArrIndex.gtArrRank,
- tree->gtArrIndex.gtArrElemType);
- break;
-
- case GT_QMARK:
- copy = new (this, GT_QMARK) GenTreeQmark(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2, this);
- VarSetOps::AssignAllowUninitRhs(this, copy->gtQmark.gtThenLiveSet, tree->gtQmark.gtThenLiveSet);
- VarSetOps::AssignAllowUninitRhs(this, copy->gtQmark.gtElseLiveSet, tree->gtQmark.gtElseLiveSet);
- break;
+ case GT_ARR_INDEX:
+ copy = new (this, GT_ARR_INDEX)
+ GenTreeArrIndex(tree->TypeGet(), gtCloneExpr(tree->gtArrIndex.ArrObj(), addFlags, varNum, varVal),
+ gtCloneExpr(tree->gtArrIndex.IndexExpr(), addFlags, varNum, varVal),
+ tree->gtArrIndex.gtCurrDim, tree->gtArrIndex.gtArrRank,
+ tree->gtArrIndex.gtArrElemType);
+ break;
- case GT_OBJ:
- copy = new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->AsObj()->gtClass);
- break;
+ case GT_QMARK:
+ copy = new (this, GT_QMARK) GenTreeQmark(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2, this);
+ VarSetOps::AssignAllowUninitRhs(this, copy->gtQmark.gtThenLiveSet, tree->gtQmark.gtThenLiveSet);
+ VarSetOps::AssignAllowUninitRhs(this, copy->gtQmark.gtElseLiveSet, tree->gtQmark.gtElseLiveSet);
+ break;
- case GT_BOX:
- copy = new (this, GT_BOX) GenTreeBox(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtBox.gtAsgStmtWhenInlinedBoxValue);
- break;
+ case GT_OBJ:
+ copy = new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->AsObj()->gtClass);
+ break;
- case GT_INTRINSIC:
- copy = new (this, GT_INTRINSIC) GenTreeIntrinsic(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2, tree->gtIntrinsic.gtIntrinsicId,
- tree->gtIntrinsic.gtMethodHandle);
+ case GT_BOX:
+ copy = new (this, GT_BOX)
+ GenTreeBox(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtBox.gtAsgStmtWhenInlinedBoxValue);
+ break;
+
+ case GT_INTRINSIC:
+ copy = new (this, GT_INTRINSIC)
+ GenTreeIntrinsic(tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2,
+ tree->gtIntrinsic.gtIntrinsicId, tree->gtIntrinsic.gtMethodHandle);
#ifdef FEATURE_READYTORUN_COMPILER
- copy->gtIntrinsic.gtEntryPoint = tree->gtIntrinsic.gtEntryPoint;
+ copy->gtIntrinsic.gtEntryPoint = tree->gtIntrinsic.gtEntryPoint;
#endif
- break;
+ break;
- case GT_COPYOBJ:
+ case GT_COPYOBJ:
{
GenTreeCpObj* cpObjOp = tree->AsCpObj();
assert(cpObjOp->gtGcPtrCount > 0);
@@ -6810,70 +7255,61 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
}
break;
- case GT_INITBLK:
+ case GT_INITBLK:
{
GenTreeInitBlk* initBlkOp = tree->AsInitBlk();
- copy = gtNewBlkOpNode(oper,
- initBlkOp->Dest(),
- initBlkOp->InitVal(),
- initBlkOp->Size(),
- initBlkOp->IsVolatile());
+ copy = gtNewBlkOpNode(oper, initBlkOp->Dest(), initBlkOp->InitVal(), initBlkOp->Size(),
+ initBlkOp->IsVolatile());
}
break;
- case GT_COPYBLK:
+ case GT_COPYBLK:
{
GenTreeCpBlk* cpBlkOp = tree->AsCpBlk();
- copy = gtNewBlkOpNode(oper,
- cpBlkOp->Dest(),
- cpBlkOp->Source(),
- cpBlkOp->Size(),
- cpBlkOp->IsVolatile());
+ copy = gtNewBlkOpNode(oper, cpBlkOp->Dest(), cpBlkOp->Source(), cpBlkOp->Size(), cpBlkOp->IsVolatile());
copy->AsCpBlk()->gtBlkOpGcUnsafe = cpBlkOp->gtBlkOpGcUnsafe;
}
break;
- case GT_LEA:
+ case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
- copy = new(this, GT_LEA) GenTreeAddrMode(addrModeOp->TypeGet(),
- addrModeOp->Base(),
- addrModeOp->Index(),
- addrModeOp->gtScale,
- addrModeOp->gtOffset);
+ copy =
+ new (this, GT_LEA) GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(),
+ addrModeOp->gtScale, addrModeOp->gtOffset);
}
break;
- case GT_COPY:
- case GT_RELOAD:
+ case GT_COPY:
+ case GT_RELOAD:
{
- copy = new(this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
+ copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
+ case GT_SIMD:
{
- GenTreeSIMD *simdOp = tree->AsSIMD();
- copy = gtNewSIMDNode(simdOp->TypeGet(), simdOp->gtGetOp1(), simdOp->gtGetOp2(),
+ GenTreeSIMD* simdOp = tree->AsSIMD();
+ copy = gtNewSIMDNode(simdOp->TypeGet(), simdOp->gtGetOp1(), simdOp->gtGetOp2(),
simdOp->gtSIMDIntrinsicID, simdOp->gtSIMDBaseType, simdOp->gtSIMDSize);
}
break;
#endif
- default:
- assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
- // We're in the SimpleOp case, so it's always unary or binary.
- if (GenTree::OperIsUnary(tree->OperGet()))
- {
- copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, /*doSimplifications*/false);
- }
- else
- {
- assert(GenTree::OperIsBinary(tree->OperGet()));
- copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2);
- }
- break;
+ default:
+ assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
+ // We're in the SimpleOp case, so it's always unary or binary.
+ if (GenTree::OperIsUnary(tree->OperGet()))
+ {
+ copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, /*doSimplifications*/ false);
+ }
+ else
+ {
+ assert(GenTree::OperIsBinary(tree->OperGet()));
+ copy = gtNewOperNode(oper, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtOp.gtOp2);
+ }
+ break;
}
#else
// We're in the SimpleOp case, so it's always unary or binary.
@@ -6890,62 +7326,64 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
copy->gtFlags |= (tree->gtFlags & GTF_UNSIGNED);
}
- if (tree->gtOp.gtOp1)
+ if (tree->gtOp.gtOp1)
{
copy->gtOp.gtOp1 = gtCloneExpr(tree->gtOp.gtOp1, addFlags, varNum, varVal);
}
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
copy->gtOp.gtOp2 = gtCloneExpr(tree->gtOp.gtOp2, addFlags, varNum, varVal);
}
-
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
- case GT_ASG:
+ case GT_ASG:
{
IndirectAssignmentAnnotation* pIndirAnnot = nullptr;
- if (m_indirAssignMap != NULL && GetIndirAssignMap()->Lookup(tree, &pIndirAnnot))
+ if (m_indirAssignMap != nullptr && GetIndirAssignMap()->Lookup(tree, &pIndirAnnot))
{
- IndirectAssignmentAnnotation* pNewIndirAnnot =
- new (this, CMK_Unknown) IndirectAssignmentAnnotation(pIndirAnnot->m_lclNum,
- pIndirAnnot->m_fieldSeq,
- pIndirAnnot->m_isEntire);
+ IndirectAssignmentAnnotation* pNewIndirAnnot = new (this, CMK_Unknown)
+ IndirectAssignmentAnnotation(pIndirAnnot->m_lclNum, pIndirAnnot->m_fieldSeq,
+ pIndirAnnot->m_isEntire);
GetIndirAssignMap()->Set(copy, pNewIndirAnnot);
}
}
break;
- case GT_STOREIND:
- case GT_IND:
- if (tree->gtFlags & GTF_IND_ARR_INDEX)
- {
- ArrayInfo arrInfo;
- bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
- assert(b);
- GetArrayInfoMap()->Set(copy, arrInfo);
- }
- break;
+ case GT_STOREIND:
+ case GT_IND:
+ if (tree->gtFlags & GTF_IND_ARR_INDEX)
+ {
+ ArrayInfo arrInfo;
+ bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
+ assert(b);
+ GetArrayInfoMap()->Set(copy, arrInfo);
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
-#ifdef DEBUG
+#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->gtOp.gtOp1 != nullptr)
+ {
copy->gtFlags |= (copy->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ }
if (copy->gtGetOp2() != nullptr)
+ {
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
+ }
// The early morph for TailCall creates a GT_NOP with GTF_REG_VAL flag set
// Thus we have to copy the gtRegNum/gtRegPair value if we clone it here.
@@ -6958,10 +7396,10 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
// We can call gtCloneExpr() before we have called fgMorph when we expand a GT_INDEX node in fgMorphArrayIndex()
// The method gtFoldExpr() expects to be run after fgMorph so it will set the GTF_DEBUG_NODE_MORPHED
// flag on nodes that it adds/modifies. Then when we call fgMorph we will assert.
- // We really only will need to fold when this method is used to replace references to
+ // We really only will need to fold when this method is used to replace references to
// local variable with an integer.
//
- if (varNum != (unsigned) -1)
+ if (varNum != (unsigned)-1)
{
/* Try to do some folding */
copy = gtFoldExpr(copy);
@@ -6972,154 +7410,152 @@ GenTreePtr Compiler::gtCloneExpr(GenTree * tree,
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
- case GT_STMT:
- copy = gtCloneExpr(tree->gtStmt.gtStmtExpr, addFlags, varNum, varVal);
- copy = gtNewStmt(copy, tree->gtStmt.gtStmtILoffsx);
- goto DONE;
+ case GT_STMT:
+ copy = gtCloneExpr(tree->gtStmt.gtStmtExpr, addFlags, varNum, varVal);
+ copy = gtNewStmt(copy, tree->gtStmt.gtStmtILoffsx);
+ goto DONE;
- case GT_CALL:
+ case GT_CALL:
- copy = new(this, GT_CALL) GenTreeCall(tree->TypeGet());
+ copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
- copy->gtCall.gtCallObjp = tree->gtCall.gtCallObjp ? gtCloneExpr(tree->gtCall.gtCallObjp, addFlags, varNum, varVal) : NULL;
- copy->gtCall.gtCallArgs = tree->gtCall.gtCallArgs ? gtCloneExpr(tree->gtCall.gtCallArgs, addFlags, varNum, varVal)->AsArgList() : NULL;
- copy->gtCall.gtCallMoreFlags= tree->gtCall.gtCallMoreFlags;
- copy->gtCall.gtCallLateArgs = tree->gtCall.gtCallLateArgs ? gtCloneExpr(tree->gtCall.gtCallLateArgs, addFlags, varNum, varVal)->AsArgList() : NULL;
+ copy->gtCall.gtCallObjp =
+ tree->gtCall.gtCallObjp ? gtCloneExpr(tree->gtCall.gtCallObjp, addFlags, varNum, varVal) : nullptr;
+ copy->gtCall.gtCallArgs = tree->gtCall.gtCallArgs
+ ? gtCloneExpr(tree->gtCall.gtCallArgs, addFlags, varNum, varVal)->AsArgList()
+ : nullptr;
+ copy->gtCall.gtCallMoreFlags = tree->gtCall.gtCallMoreFlags;
+ copy->gtCall.gtCallLateArgs =
+ tree->gtCall.gtCallLateArgs
+ ? gtCloneExpr(tree->gtCall.gtCallLateArgs, addFlags, varNum, varVal)->AsArgList()
+ : nullptr;
#if !FEATURE_FIXED_OUT_ARGS
- copy->gtCall.regArgList = tree->gtCall.regArgList;
- copy->gtCall.regArgListCount= tree->gtCall.regArgListCount;
+ copy->gtCall.regArgList = tree->gtCall.regArgList;
+ copy->gtCall.regArgListCount = tree->gtCall.regArgListCount;
#endif
- // The call sig comes from the EE and doesn't change throughout the compilation process, meaning
- // we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
- // (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
- // because the inlinee still uses the inliner's memory allocator anyway.)
- copy->gtCall.callSig = tree->gtCall.callSig;
+ // The call sig comes from the EE and doesn't change throughout the compilation process, meaning
+ // we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
+ // (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
+ // because the inlinee still uses the inliner's memory allocator anyway.)
+ copy->gtCall.callSig = tree->gtCall.callSig;
- copy->gtCall.gtCallType = tree->gtCall.gtCallType;
- copy->gtCall.gtReturnType = tree->gtCall.gtReturnType;
- copy->gtCall.gtControlExpr = tree->gtCall.gtControlExpr;
+ copy->gtCall.gtCallType = tree->gtCall.gtCallType;
+ copy->gtCall.gtReturnType = tree->gtCall.gtReturnType;
+ copy->gtCall.gtControlExpr = tree->gtCall.gtControlExpr;
- /* Copy the union */
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- copy->gtCall.gtCallCookie = tree->gtCall.gtCallCookie ? gtCloneExpr(tree->gtCall.gtCallCookie, addFlags, varNum, varVal) : NULL;
- copy->gtCall.gtCallAddr = tree->gtCall.gtCallAddr ? gtCloneExpr(tree->gtCall.gtCallAddr, addFlags, varNum, varVal) : NULL;
- }
- else if (tree->gtFlags & GTF_CALL_VIRT_STUB)
- {
- copy->gtCall.gtCallMethHnd = tree->gtCall.gtCallMethHnd;
- copy->gtCall.gtStubCallStubAddr = tree->gtCall.gtStubCallStubAddr;
- }
- else
- {
- copy->gtCall.gtCallMethHnd = tree->gtCall.gtCallMethHnd;
- copy->gtCall.gtInlineCandidateInfo = tree->gtCall.gtInlineCandidateInfo;
- }
+ /* Copy the union */
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ copy->gtCall.gtCallCookie = tree->gtCall.gtCallCookie
+ ? gtCloneExpr(tree->gtCall.gtCallCookie, addFlags, varNum, varVal)
+ : nullptr;
+ copy->gtCall.gtCallAddr =
+ tree->gtCall.gtCallAddr ? gtCloneExpr(tree->gtCall.gtCallAddr, addFlags, varNum, varVal) : nullptr;
+ }
+ else if (tree->gtFlags & GTF_CALL_VIRT_STUB)
+ {
+ copy->gtCall.gtCallMethHnd = tree->gtCall.gtCallMethHnd;
+ copy->gtCall.gtStubCallStubAddr = tree->gtCall.gtStubCallStubAddr;
+ }
+ else
+ {
+ copy->gtCall.gtCallMethHnd = tree->gtCall.gtCallMethHnd;
+ copy->gtCall.gtInlineCandidateInfo = tree->gtCall.gtInlineCandidateInfo;
+ }
- if (tree->gtCall.fgArgInfo)
- {
- // Create and initialize the fgArgInfo for our copy of the call tree
- copy->gtCall.fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
- }
- else
- {
- copy->gtCall.fgArgInfo = NULL;
- }
- copy->gtCall.gtRetClsHnd = tree->gtCall.gtRetClsHnd;
+ if (tree->gtCall.fgArgInfo)
+ {
+ // Create and initialize the fgArgInfo for our copy of the call tree
+ copy->gtCall.fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
+ }
+ else
+ {
+ copy->gtCall.fgArgInfo = nullptr;
+ }
+ copy->gtCall.gtRetClsHnd = tree->gtCall.gtRetClsHnd;
#if FEATURE_MULTIREG_RET
- copy->gtCall.gtReturnTypeDesc = tree->gtCall.gtReturnTypeDesc;
+ copy->gtCall.gtReturnTypeDesc = tree->gtCall.gtReturnTypeDesc;
#endif
#ifdef LEGACY_BACKEND
- copy->gtCall.gtCallRegUsedMask = tree->gtCall.gtCallRegUsedMask;
+ copy->gtCall.gtCallRegUsedMask = tree->gtCall.gtCallRegUsedMask;
#endif // LEGACY_BACKEND
#ifdef FEATURE_READYTORUN_COMPILER
- copy->gtCall.setEntryPoint(tree->gtCall.gtEntryPoint);
+ copy->gtCall.setEntryPoint(tree->gtCall.gtEntryPoint);
#endif
#ifdef DEBUG
- copy->gtCall.gtInlineObservation = tree->gtCall.gtInlineObservation;
+ copy->gtCall.gtInlineObservation = tree->gtCall.gtInlineObservation;
#endif
- copy->AsCall()->CopyOtherRegFlags(tree->AsCall());
- break;
+ copy->AsCall()->CopyOtherRegFlags(tree->AsCall());
+ break;
- case GT_FIELD:
+ case GT_FIELD:
- copy = gtNewFieldRef(tree->TypeGet(),
- tree->gtField.gtFldHnd,
- NULL,
- tree->gtField.gtFldOffset);
+ copy = gtNewFieldRef(tree->TypeGet(), tree->gtField.gtFldHnd, nullptr, tree->gtField.gtFldOffset);
- copy->gtField.gtFldObj = tree->gtField.gtFldObj ? gtCloneExpr(tree->gtField.gtFldObj , addFlags, varNum, varVal) : 0;
- copy->gtField.gtFldMayOverlap = tree->gtField.gtFldMayOverlap;
+ copy->gtField.gtFldObj =
+ tree->gtField.gtFldObj ? gtCloneExpr(tree->gtField.gtFldObj, addFlags, varNum, varVal) : nullptr;
+ copy->gtField.gtFldMayOverlap = tree->gtField.gtFldMayOverlap;
#ifdef FEATURE_READYTORUN_COMPILER
- copy->gtField.gtFieldLookup = tree->gtField.gtFieldLookup;
+ copy->gtField.gtFieldLookup = tree->gtField.gtFieldLookup;
#endif
- break;
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
{
GenTreePtr inds[GT_ARR_MAX_RANK];
- for (unsigned dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ for (unsigned dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
inds[dim] = gtCloneExpr(tree->gtArrElem.gtArrInds[dim], addFlags, varNum, varVal);
- copy = new (this, GT_ARR_ELEM)
- GenTreeArrElem(tree->TypeGet(),
- gtCloneExpr(tree->gtArrElem.gtArrObj, addFlags, varNum, varVal),
- tree->gtArrElem.gtArrRank,
- tree->gtArrElem.gtArrElemSize,
- tree->gtArrElem.gtArrElemType,
- &inds[0]
- );
+ }
+ copy = new (this, GT_ARR_ELEM)
+ GenTreeArrElem(tree->TypeGet(), gtCloneExpr(tree->gtArrElem.gtArrObj, addFlags, varNum, varVal),
+ tree->gtArrElem.gtArrRank, tree->gtArrElem.gtArrElemSize, tree->gtArrElem.gtArrElemType,
+ &inds[0]);
}
break;
- case GT_ARR_OFFSET:
+ case GT_ARR_OFFSET:
{
- copy = new (this, GT_ARR_OFFSET)
- GenTreeArrOffs(tree->TypeGet(),
- gtCloneExpr(tree->gtArrOffs.gtOffset, addFlags, varNum, varVal),
+ copy = new (this, GT_ARR_OFFSET)
+ GenTreeArrOffs(tree->TypeGet(), gtCloneExpr(tree->gtArrOffs.gtOffset, addFlags, varNum, varVal),
gtCloneExpr(tree->gtArrOffs.gtIndex, addFlags, varNum, varVal),
gtCloneExpr(tree->gtArrOffs.gtArrObj, addFlags, varNum, varVal),
- tree->gtArrOffs.gtCurrDim,
- tree->gtArrOffs.gtArrRank,
- tree->gtArrOffs.gtArrElemType);
+ tree->gtArrOffs.gtCurrDim, tree->gtArrOffs.gtArrRank, tree->gtArrOffs.gtArrElemType);
}
break;
- case GT_CMPXCHG:
- copy = new (this, GT_CMPXCHG)
- GenTreeCmpXchg(tree->TypeGet(),
- gtCloneExpr(tree->gtCmpXchg.gtOpLocation, addFlags, varNum, varVal),
- gtCloneExpr(tree->gtCmpXchg.gtOpValue, addFlags, varNum, varVal),
- gtCloneExpr(tree->gtCmpXchg.gtOpComparand, addFlags, varNum, varVal));
- break;
+ case GT_CMPXCHG:
+ copy = new (this, GT_CMPXCHG)
+ GenTreeCmpXchg(tree->TypeGet(), gtCloneExpr(tree->gtCmpXchg.gtOpLocation, addFlags, varNum, varVal),
+ gtCloneExpr(tree->gtCmpXchg.gtOpValue, addFlags, varNum, varVal),
+ gtCloneExpr(tree->gtCmpXchg.gtOpComparand, addFlags, varNum, varVal));
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- copy = new (this, oper)
- GenTreeBoundsChk(oper,
- tree->TypeGet(),
- gtCloneExpr(tree->gtBoundsChk.gtArrLen, addFlags, varNum, varVal),
- gtCloneExpr(tree->gtBoundsChk.gtIndex, addFlags, varNum, varVal),
- tree->gtBoundsChk.gtThrowKind);
- break;
-
+ copy = new (this, oper) GenTreeBoundsChk(oper, tree->TypeGet(),
+ gtCloneExpr(tree->gtBoundsChk.gtArrLen, addFlags, varNum, varVal),
+ gtCloneExpr(tree->gtBoundsChk.gtIndex, addFlags, varNum, varVal),
+ tree->gtBoundsChk.gtThrowKind);
+ break;
- default:
-#ifdef DEBUG
- gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ gtDispTree(tree);
#endif
- NO_WAY("unexpected operator");
+ NO_WAY("unexpected operator");
}
DONE:
@@ -7129,10 +7565,12 @@ DONE:
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
+ {
GetZeroOffsetFieldMap()->Set(copy, fldSeq);
+ }
}
- copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
+ copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* We assume the FP stack level will be identical */
@@ -7145,14 +7583,16 @@ DONE:
{
addFlags |= tree->gtFlags;
-#ifdef DEBUG
+#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Some other flags depend on the context of the expression, and should not be preserved.
// For example, GTF_RELOP_QMARK:
if (copy->OperKind() & GTK_RELOP)
+ {
addFlags &= ~GTF_RELOP_QMARK;
+ }
// On the other hand, if we're creating such a context, restore this flag.
if (copy->OperGet() == GT_QMARK)
{
@@ -7175,7 +7615,7 @@ DONE:
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
- return copy;
+ return copy;
}
//------------------------------------------------------------------------
@@ -7194,20 +7634,18 @@ DONE:
// The sequencing of the stmt has been done.
//
// Notes:
-// The caller must ensure that the original statement has been sequenced,
-// but this method will sequence 'replacementTree', and insert it into the
+// The caller must ensure that the original statement has been sequenced,
+// but this method will sequence 'replacementTree', and insert it into the
// proper place in the statement sequence.
-GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt,
- GenTreePtr tree,
- GenTreePtr replacementTree)
+GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt, GenTreePtr tree, GenTreePtr replacementTree)
{
assert(fgStmtListThreaded);
assert(tree != nullptr);
assert(stmt != nullptr);
assert(replacementTree != nullptr);
- GenTreePtr* treePtr = nullptr;
+ GenTreePtr* treePtr = nullptr;
GenTreePtr treeParent = tree->gtGetParent(&treePtr);
assert(treeParent != nullptr || tree == stmt->gtStmt.gtStmtExpr);
@@ -7225,25 +7663,25 @@ GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt,
assert(treeParent != nullptr);
GenTreePtr treeFirstNode = fgGetFirstNode(tree);
- GenTreePtr treeLastNode = tree;
- GenTreePtr treePrevNode = treeFirstNode->gtPrev;
- GenTreePtr treeNextNode = treeLastNode->gtNext;
+ GenTreePtr treeLastNode = tree;
+ GenTreePtr treePrevNode = treeFirstNode->gtPrev;
+ GenTreePtr treeNextNode = treeLastNode->gtNext;
*treePtr = replacementTree;
-
+
// Build the linear order for "replacementTree".
fgSetTreeSeq(replacementTree, treePrevNode);
// Restore linear-order Prev and Next for "replacementTree".
if (treePrevNode != nullptr)
{
- treeFirstNode = fgGetFirstNode(replacementTree);
+ treeFirstNode = fgGetFirstNode(replacementTree);
treeFirstNode->gtPrev = treePrevNode;
- treePrevNode->gtNext = treeFirstNode;
+ treePrevNode->gtNext = treeFirstNode;
}
else
{
- // Update the linear oder start of "stmt" if treeFirstNode
+ // Update the linear oder start of "stmt" if treeFirstNode
// appears to have replaced the original first node.
assert(treeFirstNode == stmt->gtStmt.gtStmtList);
stmt->gtStmt.gtStmtList = fgGetFirstNode(replacementTree);
@@ -7251,13 +7689,13 @@ GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt,
if (treeNextNode != nullptr)
{
- treeLastNode = replacementTree;
+ treeLastNode = replacementTree;
treeLastNode->gtNext = treeNextNode;
treeNextNode->gtPrev = treeLastNode;
}
- bool needFixupCallArg = false;
- GenTreePtr node = treeParent;
+ bool needFixupCallArg = false;
+ GenTreePtr node = treeParent;
// If we have replaced an arg, then update pointers in argtable.
do
@@ -7302,7 +7740,7 @@ GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt,
gtUpdateSideEffects(treeParent, tree->gtFlags, replacementTree->gtFlags);
}
- return replacementTree;
+ return replacementTree;
}
//------------------------------------------------------------------------
@@ -7318,12 +7756,10 @@ GenTreePtr Compiler::gtReplaceTree(GenTreePtr stmt,
// Linear order of the stmt has been established.
//
// Notes:
-// The routine is used for updating the stale side effect flags for ancestor
+// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
-void Compiler::gtUpdateSideEffects(GenTreePtr treeParent,
- unsigned oldGtFlags,
- unsigned newGtFlags)
+void Compiler::gtUpdateSideEffects(GenTreePtr treeParent, unsigned oldGtFlags, unsigned newGtFlags)
{
assert(fgStmtListThreaded);
@@ -7337,7 +7773,7 @@ void Compiler::gtUpdateSideEffects(GenTreePtr treeParent,
treeParent->gtFlags &= ~oldGtFlags;
treeParent->gtFlags |= newGtFlags;
treeParent = treeParent->gtGetParent(nullptr);
- }
+ }
}
}
@@ -7350,78 +7786,79 @@ void Compiler::gtUpdateSideEffects(GenTreePtr treeParent,
* The current implementation only compares a limited set of LEAF/CONST node
* and returns false for all othere trees.
*/
-bool Compiler::gtCompareTree(GenTree * op1,
- GenTree * op2)
+bool Compiler::gtCompareTree(GenTree* op1, GenTree* op2)
{
/* Make sure that both trees are of the same GT node kind */
if (op1->OperGet() != op2->OperGet())
+ {
return false;
+ }
/* Make sure that both trees are returning the same type */
if (op1->gtType != op2->gtType)
+ {
return false;
+ }
/* Figure out what kind of a node we have */
- genTreeOps oper = op1->OperGet();
- unsigned kind = op1->OperKind();
+ genTreeOps oper = op1->OperGet();
+ unsigned kind = op1->OperKind();
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST|GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
switch (oper)
{
- case GT_CNS_INT:
- if ((op1->gtIntCon.gtIconVal == op2->gtIntCon.gtIconVal) &&
- GenTree::SameIconHandleFlag(op1, op2))
- {
- return true;
- }
- break;
+ case GT_CNS_INT:
+ if ((op1->gtIntCon.gtIconVal == op2->gtIntCon.gtIconVal) && GenTree::SameIconHandleFlag(op1, op2))
+ {
+ return true;
+ }
+ break;
- case GT_CNS_LNG:
- if (op1->gtLngCon.gtLconVal == op2->gtLngCon.gtLconVal)
- {
- return true;
- }
- break;
+ case GT_CNS_LNG:
+ if (op1->gtLngCon.gtLconVal == op2->gtLngCon.gtLconVal)
+ {
+ return true;
+ }
+ break;
- case GT_CNS_STR:
- if (op1->gtStrCon.gtSconCPX == op2->gtStrCon.gtSconCPX)
- {
- return true;
- }
- break;
+ case GT_CNS_STR:
+ if (op1->gtStrCon.gtSconCPX == op2->gtStrCon.gtSconCPX)
+ {
+ return true;
+ }
+ break;
- case GT_LCL_VAR:
- if (op1->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum)
- {
- return true;
- }
- break;
+ case GT_LCL_VAR:
+ if (op1->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum)
+ {
+ return true;
+ }
+ break;
- case GT_CLS_VAR:
- if (op1->gtClsVar.gtClsVarHnd == op2->gtClsVar.gtClsVarHnd)
- {
- return true;
- }
- break;
+ case GT_CLS_VAR:
+ if (op1->gtClsVar.gtClsVarHnd == op2->gtClsVar.gtClsVarHnd)
+ {
+ return true;
+ }
+ break;
- default:
- // we return false for these unhandled 'oper' kinds
- break;
+ default:
+ // we return false for these unhandled 'oper' kinds
+ break;
}
}
return false;
}
-
GenTreePtr Compiler::gtGetThisArg(GenTreePtr call)
{
assert(call->gtOper == GT_CALL);
- if (call->gtCall.gtCallObjp != NULL)
+ if (call->gtCall.gtCallObjp != nullptr)
{
if (call->gtCall.gtCallObjp->gtOper != GT_NOP && call->gtCall.gtCallObjp->gtOper != GT_ASG)
{
@@ -7433,15 +7870,15 @@ GenTreePtr Compiler::gtGetThisArg(GenTreePtr call)
if (call->gtCall.gtCallLateArgs)
{
- regNumber thisReg = REG_ARG_0;
- unsigned argNum = 0;
+ regNumber thisReg = REG_ARG_0;
+ unsigned argNum = 0;
fgArgTabEntryPtr thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
- GenTreePtr result = thisArgTabEntry->node;
+ GenTreePtr result = thisArgTabEntry->node;
#if !FEATURE_FIXED_OUT_ARGS
GenTreePtr lateArgs = call->gtCall.gtCallLateArgs;
- regList list = call->gtCall.regArgList;
- int index = 0;
+ regList list = call->gtCall.regArgList;
+ int index = 0;
while (lateArgs != NULL)
{
assert(lateArgs->gtOper == GT_LIST);
@@ -7462,27 +7899,29 @@ GenTreePtr Compiler::gtGetThisArg(GenTreePtr call)
return result;
}
}
- return NULL;
+ return nullptr;
}
-bool GenTree::gtSetFlags() const
+bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (_TARGET_ARM_) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
- // otherwise the architecture will be have instructions that typically set
+ // otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
- // Exceptions: GT_IND (load/store) is not allowed to set the flags
+ // Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
- //
+ //
// Precondition we have a GTK_SMPOP
- //
+ //
assert(OperIsSimple());
if (!varTypeIsIntegralOrI(TypeGet()))
+ {
return false;
+ }
#if FEATURE_SET_FLAGS
@@ -7496,18 +7935,24 @@ bool GenTree::gtSetFlags() const
return false;
}
-#else // !FEATURE_SET_FLAGS
+#else // !FEATURE_SET_FLAGS
#ifdef _TARGET_XARCH_
// Return true if/when the codegen for this node will set the flags
- //
+ //
//
if ((gtOper == GT_IND) || (gtOper == GT_MUL) || (gtOper == GT_DIV))
+ {
return false;
+ }
else if (gtOverflowEx())
+ {
return false;
+ }
else
+ {
return true;
+ }
#else
// Otherwise for other architectures we should return false
return false;
@@ -7516,47 +7961,48 @@ bool GenTree::gtSetFlags() const
#endif // !FEATURE_SET_FLAGS
}
-bool GenTree::gtRequestSetFlags()
+bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
- // In order to set GTF_SET_FLAGS
+ // In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
- //
+ //
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
- switch (gtOper) {
- case GT_IND:
- case GT_ARR_LENGTH:
- // These will turn into simple load from memory instructions
- // and we can't force the setting of the flags on load from memory
- break;
-
- case GT_MUL:
- case GT_DIV:
- // These instructions don't set the flags (on x86/x64)
- //
- break;
-
- default:
- // Otherwise we can set the flags for this gtOper
- // and codegen must set the condition flags.
- //
- gtFlags |= GTF_SET_FLAGS;
- result = true;
- break;
+ switch (gtOper)
+ {
+ case GT_IND:
+ case GT_ARR_LENGTH:
+ // These will turn into simple load from memory instructions
+ // and we can't force the setting of the flags on load from memory
+ break;
+
+ case GT_MUL:
+ case GT_DIV:
+ // These instructions don't set the flags (on x86/x64)
+ //
+ break;
+
+ default:
+ // Otherwise we can set the flags for this gtOper
+ // and codegen must set the condition flags.
+ //
+ gtFlags |= GTF_SET_FLAGS;
+ result = true;
+ break;
}
#endif // FEATURE_SET_FLAGS
- // Codegen for this tree must set the condition flags if
+ // Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
@@ -7565,8 +8011,8 @@ bool GenTree::gtRequestSetFlags()
/*****************************************************************************/
void GenTree::CopyTo(class Compiler* comp, const GenTree& gt)
{
- gtOper = gt.gtOper;
- gtType = gt.gtType;
+ gtOper = gt.gtOper;
+ gtType = gt.gtType;
gtAssertionNum = gt.gtAssertionNum;
gtRegNum = gt.gtRegNum; // one union member.
@@ -7616,9 +8062,13 @@ unsigned GenTree::NumChildren()
if (OperGet() == GT_NOP || OperGet() == GT_RETURN || OperGet() == GT_RETFILT)
{
if (gtOp.gtOp1 == nullptr)
+ {
return 0;
+ }
else
+ {
return 1;
+ }
}
else
{
@@ -7656,45 +8106,63 @@ unsigned GenTree::NumChildren()
// Special
switch (OperGet())
{
- case GT_CMPXCHG:
- return 3;
+ case GT_CMPXCHG:
+ return 3;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- return 2;
+ return 2;
- case GT_FIELD:
- case GT_STMT:
- return 1;
+ case GT_FIELD:
+ case GT_STMT:
+ return 1;
- case GT_ARR_ELEM:
- return 1 + AsArrElem()->gtArrRank;
+ case GT_ARR_ELEM:
+ return 1 + AsArrElem()->gtArrRank;
- case GT_ARR_OFFSET:
- return 3;
+ case GT_ARR_OFFSET:
+ return 3;
- case GT_CALL:
+ case GT_CALL:
{
GenTreeCall* call = AsCall();
- unsigned res = 0; // arg list(s) (including late args).
- if (call->gtCallObjp != nullptr) res++; // Add objp?
- if (call->gtCallArgs != nullptr) res++; // Add args?
- if (call->gtCallLateArgs != nullptr) res++; // Add late args?
- if (call->gtControlExpr != nullptr) res++;
+ unsigned res = 0; // arg list(s) (including late args).
+ if (call->gtCallObjp != nullptr)
+ {
+ res++; // Add objp?
+ }
+ if (call->gtCallArgs != nullptr)
+ {
+ res++; // Add args?
+ }
+ if (call->gtCallLateArgs != nullptr)
+ {
+ res++; // Add late args?
+ }
+ if (call->gtControlExpr != nullptr)
+ {
+ res++;
+ }
if (call->gtCallType == CT_INDIRECT)
{
- if (call->gtCallCookie != nullptr) res++;
- if (call->gtCallAddr != nullptr) res++;
+ if (call->gtCallCookie != nullptr)
+ {
+ res++;
+ }
+ if (call->gtCallAddr != nullptr)
+ {
+ res++;
+ }
}
return res;
}
- case GT_NONE:
- return 0;
- default:
- unreached();
+ case GT_NONE:
+ return 0;
+ default:
+ unreached();
}
}
}
@@ -7715,7 +8183,9 @@ GenTreePtr GenTree::GetChild(unsigned childNum)
// If this is the first (0th) child, only return op1 if it is non-null
// Otherwise, we return gtOp2.
if (childNum == 0 && AsOp()->gtOp1 != nullptr)
+ {
return AsOp()->gtOp1;
+ }
return AsOp()->gtOp2;
}
// TODO-Cleanup: Consider handling ReverseOps here, and then we wouldn't have to handle it in
@@ -7734,79 +8204,90 @@ GenTreePtr GenTree::GetChild(unsigned childNum)
// Special
switch (OperGet())
{
- case GT_CMPXCHG:
- switch (childNum)
- {
- case 0:
- return AsCmpXchg()->gtOpLocation;
- case 1:
- return AsCmpXchg()->gtOpValue;
- case 2:
- return AsCmpXchg()->gtOpComparand;
- default:
- unreached();
- }
- case GT_ARR_BOUNDS_CHECK:
+ case GT_CMPXCHG:
+ switch (childNum)
+ {
+ case 0:
+ return AsCmpXchg()->gtOpLocation;
+ case 1:
+ return AsCmpXchg()->gtOpValue;
+ case 2:
+ return AsCmpXchg()->gtOpComparand;
+ default:
+ unreached();
+ }
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- switch (childNum)
- {
- case 0:
- return AsBoundsChk()->gtArrLen;
- case 1:
- return AsBoundsChk()->gtIndex;
- default:
- unreached();
- }
+ switch (childNum)
+ {
+ case 0:
+ return AsBoundsChk()->gtArrLen;
+ case 1:
+ return AsBoundsChk()->gtIndex;
+ default:
+ unreached();
+ }
- case GT_FIELD:
- return AsField()->gtFldObj;
+ case GT_FIELD:
+ return AsField()->gtFldObj;
- case GT_STMT:
- return AsStmt()->gtStmtExpr;
+ case GT_STMT:
+ return AsStmt()->gtStmtExpr;
- case GT_ARR_ELEM:
- if (childNum == 0)
- {
- return AsArrElem()->gtArrObj;
- }
- else
- {
- return AsArrElem()->gtArrInds[childNum-1];
- }
+ case GT_ARR_ELEM:
+ if (childNum == 0)
+ {
+ return AsArrElem()->gtArrObj;
+ }
+ else
+ {
+ return AsArrElem()->gtArrInds[childNum - 1];
+ }
- case GT_ARR_OFFSET:
- switch (childNum)
- {
- case 0:
- return AsArrOffs()->gtOffset;
- case 1:
- return AsArrOffs()->gtIndex;
- case 2:
- return AsArrOffs()->gtArrObj;
- default:
- unreached();
- }
+ case GT_ARR_OFFSET:
+ switch (childNum)
+ {
+ case 0:
+ return AsArrOffs()->gtOffset;
+ case 1:
+ return AsArrOffs()->gtIndex;
+ case 2:
+ return AsArrOffs()->gtArrObj;
+ default:
+ unreached();
+ }
- case GT_CALL:
+ case GT_CALL:
{
// The if chain below assumes that all possible children are non-null.
// If some are null, "virtually skip them."
// If there isn't "virtually skip it."
GenTreeCall* call = AsCall();
- if (call->gtCallObjp == nullptr)
+ if (call->gtCallObjp == nullptr)
+ {
childNum++;
- if (childNum >= 1 && call->gtCallArgs == nullptr)
+ }
+ if (childNum >= 1 && call->gtCallArgs == nullptr)
+ {
childNum++;
+ }
if (childNum >= 2 && call->gtCallLateArgs == nullptr)
+ {
childNum++;
- if (childNum >= 3 && call->gtControlExpr == nullptr)
+ }
+ if (childNum >= 3 && call->gtControlExpr == nullptr)
+ {
childNum++;
+ }
if (call->gtCallType == CT_INDIRECT)
{
- if (childNum >= 4 && call->gtCallCookie == nullptr) childNum++;
+ if (childNum >= 4 && call->gtCallCookie == nullptr)
+ {
+ childNum++;
+ }
}
if (childNum == 0)
@@ -7834,15 +8315,15 @@ GenTreePtr GenTree::GetChild(unsigned childNum)
}
else
{
- assert (childNum == 5);
+ assert(childNum == 5);
return call->gtCallAddr;
}
}
}
- case GT_NONE:
- unreached();
- default:
- unreached();
+ case GT_NONE:
+ unreached();
+ default:
+ unreached();
}
}
}
@@ -7885,119 +8366,119 @@ GenTree* GenTreeOperandIterator::GetNextOperand() const
{
switch (m_node->OperGet())
{
- case GT_CMPXCHG:
- switch (m_state)
- {
- case 0:
- return m_node->AsCmpXchg()->gtOpLocation;
- case 1:
- return m_node->AsCmpXchg()->gtOpValue;
- case 2:
- return m_node->AsCmpXchg()->gtOpComparand;
- default:
- return nullptr;
- }
- case GT_ARR_BOUNDS_CHECK:
+ case GT_CMPXCHG:
+ switch (m_state)
+ {
+ case 0:
+ return m_node->AsCmpXchg()->gtOpLocation;
+ case 1:
+ return m_node->AsCmpXchg()->gtOpValue;
+ case 2:
+ return m_node->AsCmpXchg()->gtOpComparand;
+ default:
+ return nullptr;
+ }
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- switch (m_state)
- {
- case 0:
- return m_node->AsBoundsChk()->gtArrLen;
- case 1:
- return m_node->AsBoundsChk()->gtIndex;
- default:
- return nullptr;
- }
-
- case GT_FIELD:
- if (m_state == 0)
- {
- return m_node->AsField()->gtFldObj;
- }
- return nullptr;
+ switch (m_state)
+ {
+ case 0:
+ return m_node->AsBoundsChk()->gtArrLen;
+ case 1:
+ return m_node->AsBoundsChk()->gtIndex;
+ default:
+ return nullptr;
+ }
- case GT_STMT:
- if (m_state == 0)
- {
- return m_node->AsStmt()->gtStmtExpr;
- }
- return nullptr;
+ case GT_FIELD:
+ if (m_state == 0)
+ {
+ return m_node->AsField()->gtFldObj;
+ }
+ return nullptr;
- case GT_ARR_ELEM:
- if (m_state == 0)
- {
- return m_node->AsArrElem()->gtArrObj;
- }
- else if (m_state <= m_node->AsArrElem()->gtArrRank)
- {
- return m_node->AsArrElem()->gtArrInds[m_state-1];
- }
- return nullptr;
+ case GT_STMT:
+ if (m_state == 0)
+ {
+ return m_node->AsStmt()->gtStmtExpr;
+ }
+ return nullptr;
- case GT_ARR_OFFSET:
- switch (m_state)
- {
- case 0:
- return m_node->AsArrOffs()->gtOffset;
- case 1:
- return m_node->AsArrOffs()->gtIndex;
- case 2:
- return m_node->AsArrOffs()->gtArrObj;
- default:
+ case GT_ARR_ELEM:
+ if (m_state == 0)
+ {
+ return m_node->AsArrElem()->gtArrObj;
+ }
+ else if (m_state <= m_node->AsArrElem()->gtArrRank)
+ {
+ return m_node->AsArrElem()->gtArrInds[m_state - 1];
+ }
return nullptr;
- }
- // Call, phi, and SIMD nodes are handled by MoveNext{Call,Phi,SIMD}Operand, repsectively.
- case GT_CALL:
- case GT_PHI:
+ case GT_ARR_OFFSET:
+ switch (m_state)
+ {
+ case 0:
+ return m_node->AsArrOffs()->gtOffset;
+ case 1:
+ return m_node->AsArrOffs()->gtIndex;
+ case 2:
+ return m_node->AsArrOffs()->gtArrObj;
+ default:
+ return nullptr;
+ }
+
+ // Call, phi, and SIMD nodes are handled by MoveNext{Call,Phi,SIMD}Operand, repsectively.
+ case GT_CALL:
+ case GT_PHI:
#ifdef FEATURE_SIMD
- case GT_SIMD:
+ case GT_SIMD:
#endif
- break;
+ break;
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
{
GenTreeBlkOp* blkOp = m_node->AsBlkOp();
- bool blkOpReversed = (blkOp->gtFlags & GTF_REVERSE_OPS) != 0;
+ bool blkOpReversed = (blkOp->gtFlags & GTF_REVERSE_OPS) != 0;
bool srcDstReversed = (blkOp->gtOp1->gtFlags & GTF_REVERSE_OPS) != 0;
if (!blkOpReversed)
{
switch (m_state)
{
- case 0:
- return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp1 : blkOp->gtOp1->AsArgList()->gtOp2;
- case 1:
- return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp2 : blkOp->gtOp1->AsArgList()->gtOp1;
- case 2:
- return blkOp->gtOp2;
- default:
- return nullptr;
+ case 0:
+ return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp1 : blkOp->gtOp1->AsArgList()->gtOp2;
+ case 1:
+ return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp2 : blkOp->gtOp1->AsArgList()->gtOp1;
+ case 2:
+ return blkOp->gtOp2;
+ default:
+ return nullptr;
}
}
else
{
switch (m_state)
{
- case 0:
- return blkOp->gtOp2;
- case 1:
- return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp1 : blkOp->gtOp1->AsArgList()->gtOp2;
- case 2:
- return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp2 : blkOp->gtOp1->AsArgList()->gtOp1;
- default:
- return nullptr;
+ case 0:
+ return blkOp->gtOp2;
+ case 1:
+ return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp1 : blkOp->gtOp1->AsArgList()->gtOp2;
+ case 2:
+ return !srcDstReversed ? blkOp->gtOp1->AsArgList()->gtOp2 : blkOp->gtOp1->AsArgList()->gtOp1;
+ default:
+ return nullptr;
}
}
}
break;
- case GT_LEA:
+ case GT_LEA:
{
GenTreeAddrMode* lea = m_node->AsAddrMode();
@@ -8020,28 +8501,28 @@ GenTree* GenTreeOperandIterator::GetNextOperand() const
}
break;
- default:
- if (m_node->OperIsConst() || m_node->OperIsLeaf())
- {
- return nullptr;
- }
- else if (m_node->OperIsUnary())
- {
- return m_state == 0 ? m_node->AsUnOp()->gtOp1 : nullptr;
- }
- else if (m_node->OperIsBinary())
- {
- bool operandsReversed = (m_node->gtFlags & GTF_REVERSE_OPS) != 0;
- switch (m_state)
+ default:
+ if (m_node->OperIsConst() || m_node->OperIsLeaf())
{
- case 0:
- return !operandsReversed ? m_node->AsOp()->gtOp1 : m_node->AsOp()->gtOp2;
- case 1:
- return !operandsReversed ? m_node->AsOp()->gtOp2 : m_node->AsOp()->gtOp1;
- default:
- return nullptr;
+ return nullptr;
+ }
+ else if (m_node->OperIsUnary())
+ {
+ return m_state == 0 ? m_node->AsUnOp()->gtOp1 : nullptr;
+ }
+ else if (m_node->OperIsBinary())
+ {
+ bool operandsReversed = (m_node->gtFlags & GTF_REVERSE_OPS) != 0;
+ switch (m_state)
+ {
+ case 0:
+ return !operandsReversed ? m_node->AsOp()->gtOp1 : m_node->AsOp()->gtOp2;
+ case 1:
+ return !operandsReversed ? m_node->AsOp()->gtOp2 : m_node->AsOp()->gtOp1;
+ default:
+ return nullptr;
+ }
}
- }
}
unreached();
@@ -8063,7 +8544,7 @@ void GenTreeOperandIterator::MoveToNextCallOperand()
switch (m_state)
{
case 0:
- m_state = 1;
+ m_state = 1;
m_argList = call->gtCallArgs;
if (call->gtCallObjp != nullptr)
@@ -8111,13 +8592,12 @@ void GenTreeOperandIterator::MoveToNextCallOperand()
else
{
GenTreeArgList* regNode = m_multiRegArg->AsArgList();
- m_operand = regNode->gtOp1;
- m_multiRegArg = regNode->Rest();
+ m_operand = regNode->gtOp1;
+ m_multiRegArg = regNode->Rest();
return;
}
break;
-
case 5:
m_state = call->gtCallType == CT_INDIRECT ? 6 : 8;
@@ -8152,10 +8632,10 @@ void GenTreeOperandIterator::MoveToNextCallOperand()
break;
default:
- m_node = nullptr;
+ m_node = nullptr;
m_operand = nullptr;
m_argList = nullptr;
- m_state = -1;
+ m_state = -1;
return;
}
}
@@ -8177,7 +8657,7 @@ void GenTreeOperandIterator::MoveToNextPhiOperand()
switch (m_state)
{
case 0:
- m_state = 1;
+ m_state = 1;
m_argList = phi->gtOp1;
break;
@@ -8189,17 +8669,17 @@ void GenTreeOperandIterator::MoveToNextPhiOperand()
else
{
GenTreeArgList* argNode = m_argList->AsArgList();
- m_operand = argNode->gtOp1;
- m_argList = argNode->Rest();
+ m_operand = argNode->gtOp1;
+ m_argList = argNode->Rest();
return;
}
break;
default:
- m_node = nullptr;
+ m_node = nullptr;
m_operand = nullptr;
m_argList = nullptr;
- m_state = -1;
+ m_state = -1;
return;
}
}
@@ -8240,7 +8720,7 @@ void GenTreeOperandIterator::MoveToNextSIMDOperand()
}
else
{
- m_node = nullptr;
+ m_node = nullptr;
m_state = -1;
}
@@ -8252,7 +8732,7 @@ void GenTreeOperandIterator::MoveToNextSIMDOperand()
switch (m_state)
{
case 0:
- m_state = 1;
+ m_state = 1;
m_argList = simd->gtOp1;
break;
@@ -8264,17 +8744,17 @@ void GenTreeOperandIterator::MoveToNextSIMDOperand()
else
{
GenTreeArgList* argNode = m_argList->AsArgList();
- m_operand = argNode->gtOp1;
- m_argList = argNode->Rest();
+ m_operand = argNode->gtOp1;
+ m_argList = argNode->Rest();
return;
}
break;
default:
- m_node = nullptr;
+ m_node = nullptr;
m_operand = nullptr;
m_argList = nullptr;
- m_state = -1;
+ m_state = -1;
return;
}
}
@@ -8321,7 +8801,7 @@ GenTreeOperandIterator& GenTreeOperandIterator::operator++()
}
else
{
- m_node = nullptr;
+ m_node = nullptr;
m_state = -1;
}
}
@@ -8349,44 +8829,44 @@ IteratorPair<GenTreeOperandIterator> GenTree::Operands(bool expandMultiRegArgs)
/* static */ int GenTree::gtDispFlags(unsigned flags, unsigned debugFlags)
{
- printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
- printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
- printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
- printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
- printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
- (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-'); // otherwise print 'O' or '-'
- printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
- printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
- (flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
- printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
- printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
- (flags & GTF_BOOLEAN ) ? 'B' : '-');
+ printf("%c", (flags & GTF_ASG) ? 'A' : '-');
+ printf("%c", (flags & GTF_CALL) ? 'C' : '-');
+ printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
+ printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
+ printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
+ (flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
+ printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
+ printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
+ (flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
+ printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
+ printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
- printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
+ printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
#endif
- printf("%c", (flags & GTF_LATE_ARG ) ? 'L' : '-');
- printf("%c", (flags & GTF_SPILLED ) ? 'z' :
- (flags & GTF_SPILL ) ? 'Z' : '-');
+ printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
+ printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return 12; // displayed 12 flag characters
}
/*****************************************************************************/
-void
-Compiler::gtDispNodeName(GenTree *tree)
+void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
- const char * name;
+ const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
+ {
name = GenTree::NodeName(tree->OperGet());
+ }
else
+ {
name = "<ERROR>";
-
- char buf[32];
- char * bufp = &buf[0];
+ }
+ char buf[32];
+ char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
@@ -8398,58 +8878,73 @@ Compiler::gtDispNodeName(GenTree *tree)
}
else if (tree->gtOper == GT_CALL)
{
- const char * callType = "call";
- const char * gtfType = "";
- const char * ctType = "";
- char gtfTypeBuf[100];
+ const char* callType = "call";
+ const char* gtfType = "";
+ const char* ctType = "";
+ char gtfTypeBuf[100];
if (tree->gtCall.gtCallType == CT_USER_FUNC)
{
if ((tree->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
- callType = "callv";
+ {
+ callType = "callv";
+ }
}
else if (tree->gtCall.gtCallType == CT_HELPER)
- ctType = " help";
+ {
+ ctType = " help";
+ }
else if (tree->gtCall.gtCallType == CT_INDIRECT)
- ctType = " ind";
+ {
+ ctType = " ind";
+ }
else
+ {
assert(!"Unknown gtCallType");
+ }
if (tree->gtFlags & GTF_CALL_NULLCHECK)
+ {
gtfType = " nullcheck";
-
+ }
if (tree->gtFlags & GTF_CALL_VIRT_VTABLE)
+ {
gtfType = " ind";
+ }
else if (tree->gtFlags & GTF_CALL_VIRT_STUB)
+ {
gtfType = " stub";
+ }
#ifdef FEATURE_READYTORUN_COMPILER
else if (tree->gtCall.IsR2RRelativeIndir())
+ {
gtfType = " r2r_ind";
+ }
#endif // FEATURE_READYTORUN_COMPILER
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
- char * gtfTypeBufWalk = gtfTypeBuf;
- gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf,
- sizeof(gtfTypeBuf), " unman");
+ char* gtfTypeBufWalk = gtfTypeBuf;
+ gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
- gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf,
- sizeof(gtfTypeBuf), " popargs");
+ {
+ gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
+ }
if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
- gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf,
- sizeof(gtfTypeBuf),
- " thiscall");
+ gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
gtfType = gtfTypeBuf;
- }
+ }
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
- for (unsigned rank = tree->gtArrElem.gtArrRank-1; rank; rank--)
+ for (unsigned rank = tree->gtArrElem.gtArrRank - 1; rank; rank--)
+ {
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
+ }
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
@@ -8460,12 +8955,12 @@ Compiler::gtDispNodeName(GenTree *tree)
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->gtArrOffs.gtCurrDim;
- rank = tree->gtArrOffs.gtArrRank;
+ rank = tree->gtArrOffs.gtArrRank;
}
else
{
currDim = tree->gtArrIndex.gtCurrDim;
- rank = tree->gtArrIndex.gtArrRank;
+ rank = tree->gtArrIndex.gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
@@ -8483,7 +8978,7 @@ Compiler::gtDispNodeName(GenTree *tree)
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
- if (dim != rank-1)
+ if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
@@ -8492,20 +8987,33 @@ Compiler::gtDispNodeName(GenTree *tree)
}
else if (tree->gtOper == GT_LEA)
{
- GenTreeAddrMode * lea = tree->AsAddrMode();
+ GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
- if (lea->Base() != NULL) bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
- if (lea->Index() != NULL) bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
+ if (lea->Base() != nullptr)
+ {
+ bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
+ }
+ if (lea->Index() != nullptr)
+ {
+ bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
+ }
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->gtOffset);
}
else if (tree->gtOper == GT_ARR_BOUNDS_CHECK)
{
- switch(tree->gtBoundsChk.gtThrowKind)
+ switch (tree->gtBoundsChk.gtThrowKind)
{
- case SCK_RNGCHK_FAIL: sprintf_s(bufp, sizeof(buf), " %s_Rng", name); break;
- case SCK_ARG_EXCPN: sprintf_s(bufp, sizeof(buf), " %s_Arg", name); break;
- case SCK_ARG_RNG_EXCPN: sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name); break;
- default: unreached();
+ case SCK_RNGCHK_FAIL:
+ sprintf_s(bufp, sizeof(buf), " %s_Rng", name);
+ break;
+ case SCK_ARG_EXCPN:
+ sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
+ break;
+ case SCK_ARG_RNG_EXCPN:
+ sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
+ break;
+ default:
+ unreached();
}
}
else if (tree->gtOverflowEx())
@@ -8518,12 +9026,16 @@ Compiler::gtDispNodeName(GenTree *tree)
}
if (strlen(buf) < 10)
+ {
printf(" %-10s", buf);
+ }
else
+ {
printf(" %s", buf);
+ }
}
-void Compiler::gtDispVN(GenTree* tree)
+void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
@@ -8548,19 +9060,17 @@ void Compiler::gtDispVN(GenTree* tree)
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
-void Compiler::gtDispNode(GenTreePtr tree,
- IndentStack* indentStack,
- __in __in_z __in_opt const char * msg)
-{
- bool printPointer = true; // always true..
- bool printFlags = true; // always true..
- bool printCost = true; // always true..
+void Compiler::gtDispNode(GenTreePtr tree, IndentStack* indentStack, __in __in_z __in_opt const char* msg)
+{
+ bool printPointer = true; // always true..
+ bool printFlags = true; // always true..
+ bool printCost = true; // always true..
int msgLength = 25;
- GenTree * prev;
+ GenTree* prev;
- if (tree->gtSeqNum)
+ if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
@@ -8569,7 +9079,9 @@ void Compiler::gtDispNode(GenTreePtr tree,
}
else
{
- printf("(???" ",???" ") "); // This probably indicates a bug: the node has a sequence number, but not costs.
+ printf("(???"
+ ",???"
+ ") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
@@ -8583,14 +9095,14 @@ void Compiler::gtDispNode(GenTreePtr tree,
prev = tree;
}
- bool hasSeqNum = true;
- unsigned dotNum = 0;
+ bool hasSeqNum = true;
+ unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
- if ((prev == NULL) || (prev == tree))
+ if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
@@ -8633,8 +9145,7 @@ void Compiler::gtDispNode(GenTreePtr tree,
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
- printf("CSE #%02d (%s)", GET_CSE_INDEX(tree->gtCSEnum),
- (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
+ printf("CSE #%02d (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
@@ -8652,111 +9163,199 @@ void Compiler::gtDispNode(GenTreePtr tree,
return;
}
- if (printFlags)
+ if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
- case GT_LEA:
- case GT_IND:
- // We prefer printing R, V or U
- if ((tree->gtFlags & (GTF_IND_REFARR_LAYOUT | GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
- {
- if (tree->gtFlags & GTF_IND_TGTANYWHERE)
- { printf("*"); --msgLength; break; }
- if (tree->gtFlags & GTF_IND_INVARIANT)
- { printf("#"); --msgLength; break; }
- if (tree->gtFlags & GTF_IND_ARR_INDEX)
- { printf("a"); --msgLength; break; }
- }
- __fallthrough;
+ case GT_LEA:
+ case GT_IND:
+ // We prefer printing R, V or U
+ if ((tree->gtFlags & (GTF_IND_REFARR_LAYOUT | GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
+ {
+ if (tree->gtFlags & GTF_IND_TGTANYWHERE)
+ {
+ printf("*");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_IND_INVARIANT)
+ {
+ printf("#");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_IND_ARR_INDEX)
+ {
+ printf("a");
+ --msgLength;
+ break;
+ }
+ }
+ __fallthrough;
- case GT_INDEX:
+ case GT_INDEX:
- if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0) // We prefer printing V or U over R
- {
- if (tree->gtFlags & GTF_IND_REFARR_LAYOUT)
- { printf("R"); --msgLength; break; } // R means RefArray
- }
- __fallthrough;
+ if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0) // We prefer printing V or U over R
+ {
+ if (tree->gtFlags & GTF_IND_REFARR_LAYOUT)
+ {
+ printf("R");
+ --msgLength;
+ break;
+ } // R means RefArray
+ }
+ __fallthrough;
- case GT_FIELD:
- case GT_CLS_VAR:
- if (tree->gtFlags & GTF_IND_VOLATILE)
- { printf("V"); --msgLength; break; }
- if (tree->gtFlags & GTF_IND_UNALIGNED)
- { printf("U"); --msgLength; break; }
- goto DASH;
+ case GT_FIELD:
+ case GT_CLS_VAR:
+ if (tree->gtFlags & GTF_IND_VOLATILE)
+ {
+ printf("V");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_IND_UNALIGNED)
+ {
+ printf("U");
+ --msgLength;
+ break;
+ }
+ goto DASH;
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
- if (tree->AsBlkOp()->IsVolatile())
- { printf("V"); --msgLength; break; }
- if (tree->gtFlags & GTF_BLK_UNALIGNED)
- { printf("U"); --msgLength; break; }
- goto DASH;
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ if (tree->AsBlkOp()->IsVolatile())
+ {
+ printf("V");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_BLK_UNALIGNED)
+ {
+ printf("U");
+ --msgLength;
+ break;
+ }
+ goto DASH;
- case GT_CALL:
- if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE)
- { printf("I"); --msgLength; break; }
- if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
- { printf("S"); --msgLength; break; }
- if (tree->gtFlags & GTF_CALL_HOISTABLE)
- { printf("H"); --msgLength; break; }
-
- goto DASH;
+ case GT_CALL:
+ if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE)
+ {
+ printf("I");
+ --msgLength;
+ break;
+ }
+ if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
+ {
+ printf("S");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_CALL_HOISTABLE)
+ {
+ printf("H");
+ --msgLength;
+ break;
+ }
- case GT_MUL:
- if (tree->gtFlags & GTF_MUL_64RSLT)
- { printf("L"); --msgLength; break; }
- goto DASH;
+ goto DASH;
- case GT_ADDR:
- if (tree->gtFlags & GTF_ADDR_ONSTACK)
- { printf("L"); --msgLength; break; } // L means LclVar
- goto DASH;
+ case GT_MUL:
+ if (tree->gtFlags & GTF_MUL_64RSLT)
+ {
+ printf("L");
+ --msgLength;
+ break;
+ }
+ goto DASH;
- case GT_LCL_FLD:
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_FLD:
- case GT_STORE_LCL_VAR:
- case GT_REG_VAR:
- if (tree->gtFlags & GTF_VAR_USEASG)
- { printf("U"); --msgLength; break; }
- if (tree->gtFlags & GTF_VAR_USEDEF)
- { printf("B"); --msgLength; break; }
- if (tree->gtFlags & GTF_VAR_DEF)
- { printf("D"); --msgLength; break; }
- if (tree->gtFlags & GTF_VAR_CAST)
- { printf("C"); --msgLength; break; }
- if (tree->gtFlags & GTF_VAR_ARR_INDEX)
- { printf("i"); --msgLength; break; }
- goto DASH;
+ case GT_ADDR:
+ if (tree->gtFlags & GTF_ADDR_ONSTACK)
+ {
+ printf("L");
+ --msgLength;
+ break;
+ } // L means LclVar
+ goto DASH;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- if (tree->gtFlags & GTF_RELOP_NAN_UN)
- { printf("N"); --msgLength; break; }
- if (tree->gtFlags & GTF_RELOP_JMP_USED)
- { printf("J"); --msgLength; break; }
- if (tree->gtFlags & GTF_RELOP_QMARK)
- { printf("Q"); --msgLength; break; }
- if (tree->gtFlags & GTF_RELOP_SMALL)
- { printf("S"); --msgLength; break; }
- goto DASH;
+ case GT_LCL_FLD:
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ case GT_REG_VAR:
+ if (tree->gtFlags & GTF_VAR_USEASG)
+ {
+ printf("U");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_VAR_USEDEF)
+ {
+ printf("B");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_VAR_DEF)
+ {
+ printf("D");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_VAR_CAST)
+ {
+ printf("C");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_VAR_ARR_INDEX)
+ {
+ printf("i");
+ --msgLength;
+ break;
+ }
+ goto DASH;
- default:
-DASH:
- printf("-");
- --msgLength;
- break;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ if (tree->gtFlags & GTF_RELOP_NAN_UN)
+ {
+ printf("N");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_RELOP_JMP_USED)
+ {
+ printf("J");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_RELOP_QMARK)
+ {
+ printf("Q");
+ --msgLength;
+ break;
+ }
+ if (tree->gtFlags & GTF_RELOP_SMALL)
+ {
+ printf("S");
+ --msgLength;
+ break;
+ }
+ goto DASH;
+
+ default:
+ DASH:
+ printf("-");
+ --msgLength;
+ break;
}
/* Then print the general purpose flags */
@@ -8771,33 +9370,33 @@ DASH:
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
- flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
+ flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
- else // !tree->OperIsBinary()
+ else // !tree->OperIsBinary()
{
// the GTF_REVERSE flag only applies to binary operations
- flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
+ flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
- /*
- printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
- printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
- printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
- printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
- printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
- printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
- printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
- (flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
- printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
- printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
- (flags & GTF_BOOLEAN ) ? 'B' : '-');
- printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
- printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
- printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
- */
+/*
+ printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
+ printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
+ printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
+ printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
+ printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
+ printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
+ printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
+ (flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
+ printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
+ printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
+ (flags & GTF_BOOLEAN ) ? 'B' : '-');
+ printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
+ printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
+ printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
+*/
#if FEATURE_STACK_FP_X87
BYTE fpLvl = (BYTE)tree->gtFPlvl;
@@ -8814,10 +9413,14 @@ DASH:
/* print the msg associated with the node */
- if (msg == NULL)
+ if (msg == nullptr)
+ {
msg = "";
+ }
if (msgLength < 0)
+ {
msgLength = 0;
+ }
printf(" %-*s", msgLength, msg);
@@ -8826,9 +9429,9 @@ DASH:
gtDispNodeName(tree);
- assert(tree == 0 || tree->gtOper < GT_COUNT);
+ assert(tree == nullptr || tree->gtOper < GT_COUNT);
- if (tree)
+ if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
@@ -8836,10 +9439,10 @@ DASH:
printf(" %-6s", varTypeName(tree->TypeGet()));
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
- LclVarDsc * varDsc = &lvaTable[tree->gtLclVarCommon.gtLclNum];
+ LclVarDsc* varDsc = &lvaTable[tree->gtLclVarCommon.gtLclNum];
if (varDsc->lvAddrExposed)
{
- printf("(AX)"); // Variable has address exposed.
+ printf("(AX)"); // Variable has address exposed.
}
if (varDsc->lvUnusedStruct)
@@ -8857,9 +9460,13 @@ DASH:
if (tree->gtOper == GT_STMT)
{
if (tree->gtFlags & GTF_STMT_TOP_LEVEL)
+ {
printf("(top level) ");
+ }
else
+ {
printf("(embedded) ");
+ }
if (opts.compDbgInfo)
{
@@ -8867,19 +9474,27 @@ DASH:
printf("(IL ");
if (tree->gtStmt.gtStmtILoffsx == BAD_IL_OFFSET)
+ {
printf(" ???");
+ }
else
+ {
printf("0x%03X", jitGetILoffs(tree->gtStmt.gtStmtILoffsx));
+ }
printf("...");
if (endIL == BAD_IL_OFFSET)
+ {
printf(" ???");
+ }
else
+ {
printf("0x%03X", endIL);
+ }
printf(")");
}
}
- if (tree->IsArgPlaceHolderNode() && (tree->gtArgPlace.gtArgPlaceClsHnd != NULL))
+ if (tree->IsArgPlaceHolderNode() && (tree->gtArgPlace.gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->gtArgPlace.gtArgPlaceClsHnd));
}
@@ -8887,77 +9502,79 @@ DASH:
// for tracking down problems in reguse prediction or liveness tracking
- if (verbose&&0)
+ if (verbose && 0)
{
- printf(" RR="); dspRegMask(tree->gtRsvdRegs);
+ printf(" RR=");
+ dspRegMask(tree->gtRsvdRegs);
#ifdef LEGACY_BACKEND
- printf(",UR="); dspRegMask(tree->gtUsedRegs);
+ printf(",UR=");
+ dspRegMask(tree->gtUsedRegs);
#endif // LEGACY_BACKEND
printf("\n");
}
}
}
-void Compiler::gtDispRegVal(GenTree * tree)
+void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
- // Don't display NOREG; the absence of this tag will imply this state
- //case GenTree::GT_REGTAG_NONE: printf(" NOREG"); break;
+ // Don't display NOREG; the absence of this tag will imply this state
+ // case GenTree::GT_REGTAG_NONE: printf(" NOREG"); break;
- case GenTree::GT_REGTAG_REG:
- printf(" REG %s", compRegVarName(tree->gtRegNum));
- break;
+ case GenTree::GT_REGTAG_REG:
+ printf(" REG %s", compRegVarName(tree->gtRegNum));
+ break;
#if CPU_LONG_USES_REGPAIR
- case GenTree::GT_REGTAG_REGPAIR:
- printf(" PAIR %s", compRegPairName(tree->gtRegPair));
- break;
+ case GenTree::GT_REGTAG_REGPAIR:
+ printf(" PAIR %s", compRegPairName(tree->gtRegPair));
+ break;
#endif
- default:
- break;
+ default:
+ break;
}
if (tree->IsMultiRegCall())
{
// 0th reg is gtRegNum, which is already printed above.
// Print the remaining regs of a multi-reg call node.
- GenTreeCall* call = tree->AsCall();
- unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
+ GenTreeCall* call = tree->AsCall();
+ unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
for (unsigned i = 1; i < regCount; ++i)
{
printf(",%s", compRegVarName(call->GetRegNumByIdx(i)));
- }
+ }
}
else if (tree->IsCopyOrReloadOfMultiRegCall())
{
GenTreeCopyOrReload* copyOrReload = tree->AsCopyOrReload();
- GenTreeCall* call = tree->gtGetOp1()->AsCall();
- unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
+ GenTreeCall* call = tree->gtGetOp1()->AsCall();
+ unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
for (unsigned i = 1; i < regCount; ++i)
{
printf(",%s", compRegVarName(copyOrReload->GetRegNumByIdx(i)));
}
}
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
printf(" RV");
}
}
// We usually/commonly don't expect to print anything longer than this string,
-#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
-#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
-#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH*2)
+#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
+#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
+#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
-void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned * ilNumOut)
+void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
- unsigned ilNum = compMap2ILvarNum(lclNum);
+ unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
@@ -8981,68 +9598,89 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char**
}
else if (lclNum >= optCSEstart)
{
- // Currently any new LclVar's introduced after the CSE phase
+ // Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
- ilNum = lclNum - (optCSEstart+optCSEcount);
+ ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
#endif // FEATURE_ANYCSE
- {
+ {
if (lclNum == info.compLvFrameListRoot)
+ {
ilName = "FramesRoot";
+ }
else if (lclNum == lvaInlinedPInvokeFrameVar)
+ {
ilName = "PInvokeFrame";
+ }
else if (lclNum == lvaGSSecurityCookie)
+ {
ilName = "GsCookie";
+ }
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
+ {
ilName = "PInvokeFrameRegSave";
+ }
else if (lclNum == lvaOutgoingArgSpaceVar)
+ {
ilName = "OutArgs";
+ }
#endif // FEATURE_FIXED_OUT_ARGS
#ifdef _TARGET_ARM_
else if (lclNum == lvaPromotedStructAssemblyScratchVar)
+ {
ilName = "PromotedStructScratch";
+ }
#endif // _TARGET_ARM_
#if !FEATURE_EH_FUNCLETS
else if (lclNum == lvaShadowSPslotsVar)
+ {
ilName = "EHSlots";
+ }
#endif // !FEATURE_EH_FUNCLETS
else if (lclNum == lvaLocAllocSPvar)
+ {
ilName = "LocAllocSP";
+ }
#if FEATURE_EH_FUNCLETS
else if (lclNum == lvaPSPSym)
+ {
ilName = "PSPSym";
+ }
#endif // FEATURE_EH_FUNCLETS
- else
+ else
{
- ilKind = "tmp";
+ ilKind = "tmp";
if (compIsForInlining())
{
- ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
+ ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
- ilNum = lclNum - info.compLocalsCount;
+ ilNum = lclNum - info.compLocalsCount;
}
}
}
}
- else if (lclNum < (compIsForInlining()
- ? impInlineInfo->InlinerCompiler->info.compArgsCount
- : info.compArgsCount))
+ else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
+ {
ilName = "this";
+ }
else
+ {
ilKind = "arg";
+ }
}
else
{
- if (!lvaTable[lclNum].lvIsStructField)
- ilKind = "loc";
-
+ if (!lvaTable[lclNum].lvIsStructField)
+ {
+ ilKind = "loc";
+ }
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
@@ -9055,63 +9693,73 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char**
*ilKindOut = ilKind;
*ilNameOut = ilName;
- *ilNumOut = ilNum;
+ *ilNumOut = ilNum;
}
-
+
/*****************************************************************************/
-int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
+int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
- char* bufp_next = buf;
- unsigned charsPrinted = 0;
+ char* bufp_next = buf;
+ unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
+ {
return sprintf_result;
+ }
- charsPrinted += sprintf_result;
- bufp_next += sprintf_result;
+ charsPrinted += sprintf_result;
+ bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
- unsigned ilNum = 0;
+ unsigned ilNum = 0;
Compiler::gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
- if (sprintf_result < 0) return sprintf_result;
- charsPrinted += sprintf_result;
- bufp_next += sprintf_result;
+ if (sprintf_result < 0)
+ {
+ return sprintf_result;
+ }
+ charsPrinted += sprintf_result;
+ bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
- if (sprintf_result < 0) return sprintf_result;
- charsPrinted += sprintf_result;
- bufp_next += sprintf_result;
+ if (sprintf_result < 0)
+ {
+ return sprintf_result;
+ }
+ charsPrinted += sprintf_result;
+ bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
- assert(charsPrinted > 0);
+ assert(charsPrinted > 0);
assert(buf_remaining > 0);
-
+
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
-char* Compiler::gtGetLclVarName(unsigned lclNum)
+char* Compiler::gtGetLclVarName(unsigned lclNum)
{
- char buf[BUF_SIZE];
- int charsPrinted = gtGetLclVarName(lclNum, buf, sizeof(buf)/sizeof(buf[0]));
+ char buf[BUF_SIZE];
+ int charsPrinted = gtGetLclVarName(lclNum, buf, sizeof(buf) / sizeof(buf[0]));
if (charsPrinted < 0)
+ {
return nullptr;
+ }
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
@@ -9119,131 +9767,149 @@ char* Compiler::gtGetLclVarName(unsigned lclNum)
}
/*****************************************************************************/
-void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
+void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
- char buf[BUF_SIZE];
- int charsPrinted = gtGetLclVarName(lclNum, buf, sizeof(buf)/sizeof(buf[0]));
+ char buf[BUF_SIZE];
+ int charsPrinted = gtGetLclVarName(lclNum, buf, sizeof(buf) / sizeof(buf[0]));
if (charsPrinted < 0)
+ {
return;
-
+ }
+
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
+ {
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
+ }
}
/*****************************************************************************/
-void
-Compiler::gtDispConst(GenTree *tree)
+void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperKind() & GTK_CONST);
- switch (tree->gtOper)
+ switch (tree->gtOper)
{
- case GT_CNS_INT:
- if (tree->IsIconHandle(GTF_ICON_STR_HDL))
- {
- printf(" 0x%X \"%S\"", dspPtr(tree->gtIntCon.gtIconVal), eeGetCPString(tree->gtIntCon.gtIconVal));
- }
- else
- {
- ssize_t dspIconVal = tree->IsIconHandle() ? dspPtr(tree->gtIntCon.gtIconVal) : tree->gtIntCon.gtIconVal;
-
- if (tree->TypeGet() == TYP_REF)
+ case GT_CNS_INT:
+ if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
- assert(tree->gtIntCon.gtIconVal == 0);
- printf(" null");
+ printf(" 0x%X \"%S\"", dspPtr(tree->gtIntCon.gtIconVal), eeGetCPString(tree->gtIntCon.gtIconVal));
}
- else if ((tree->gtIntCon.gtIconVal > -1000) && (tree->gtIntCon.gtIconVal < 1000))
- printf(" %ld", dspIconVal);
+ else
+ {
+ ssize_t dspIconVal = tree->IsIconHandle() ? dspPtr(tree->gtIntCon.gtIconVal) : tree->gtIntCon.gtIconVal;
+
+ if (tree->TypeGet() == TYP_REF)
+ {
+ assert(tree->gtIntCon.gtIconVal == 0);
+ printf(" null");
+ }
+ else if ((tree->gtIntCon.gtIconVal > -1000) && (tree->gtIntCon.gtIconVal < 1000))
+ {
+ printf(" %ld", dspIconVal);
#ifdef _TARGET_64BIT_
- else if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
- printf(" 0x%llx", dspIconVal);
+ }
+ else if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0)
+ {
+ printf(" 0x%llx", dspIconVal);
#endif
- else
- printf(" 0x%X", dspIconVal);
+ }
+ else
+ {
+ printf(" 0x%X", dspIconVal);
+ }
- if (tree->IsIconHandle())
- {
- switch (tree->GetIconHandleFlag())
+ if (tree->IsIconHandle())
{
- case GTF_ICON_SCOPE_HDL:
- printf(" scope");
- break;
- case GTF_ICON_CLASS_HDL:
- printf(" class");
- break;
- case GTF_ICON_METHOD_HDL:
- printf(" method");
- break;
- case GTF_ICON_FIELD_HDL:
- printf(" field");
- break;
- case GTF_ICON_STATIC_HDL:
- printf(" static");
- break;
- case GTF_ICON_STR_HDL:
- unreached(); // This case is handled above
- break;
- case GTF_ICON_PSTR_HDL:
- printf(" pstr");
- break;
- case GTF_ICON_PTR_HDL:
- printf(" ptr");
- break;
- case GTF_ICON_VARG_HDL:
- printf(" vararg");
- break;
- case GTF_ICON_PINVKI_HDL:
- printf(" pinvoke");
- break;
- case GTF_ICON_TOKEN_HDL:
- printf(" token");
- break;
- case GTF_ICON_TLS_HDL:
- printf(" tls");
- break;
- case GTF_ICON_FTN_ADDR:
- printf(" ftn");
- break;
- case GTF_ICON_CIDMID_HDL:
- printf(" cid");
- break;
- case GTF_ICON_BBC_PTR:
- printf(" bbc");
- break;
- default:
- printf(" UNKNOWN");
- break;
+ switch (tree->GetIconHandleFlag())
+ {
+ case GTF_ICON_SCOPE_HDL:
+ printf(" scope");
+ break;
+ case GTF_ICON_CLASS_HDL:
+ printf(" class");
+ break;
+ case GTF_ICON_METHOD_HDL:
+ printf(" method");
+ break;
+ case GTF_ICON_FIELD_HDL:
+ printf(" field");
+ break;
+ case GTF_ICON_STATIC_HDL:
+ printf(" static");
+ break;
+ case GTF_ICON_STR_HDL:
+ unreached(); // This case is handled above
+ break;
+ case GTF_ICON_PSTR_HDL:
+ printf(" pstr");
+ break;
+ case GTF_ICON_PTR_HDL:
+ printf(" ptr");
+ break;
+ case GTF_ICON_VARG_HDL:
+ printf(" vararg");
+ break;
+ case GTF_ICON_PINVKI_HDL:
+ printf(" pinvoke");
+ break;
+ case GTF_ICON_TOKEN_HDL:
+ printf(" token");
+ break;
+ case GTF_ICON_TLS_HDL:
+ printf(" tls");
+ break;
+ case GTF_ICON_FTN_ADDR:
+ printf(" ftn");
+ break;
+ case GTF_ICON_CIDMID_HDL:
+ printf(" cid");
+ break;
+ case GTF_ICON_BBC_PTR:
+ printf(" bbc");
+ break;
+ default:
+ printf(" UNKNOWN");
+ break;
+ }
+ }
+
+ if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
+ {
+ printf(" field offset");
}
- }
- if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
- printf(" field offset");
+ if ((tree->IsReuseRegVal()) != 0)
+ {
+ printf(" reuse reg val");
+ }
+ }
- if ((tree->IsReuseRegVal()) != 0)
- printf(" reuse reg val");
- }
+ gtDispFieldSeq(tree->gtIntCon.gtFieldSeq);
- gtDispFieldSeq(tree->gtIntCon.gtFieldSeq);
-
- break;
+ break;
- case GT_CNS_LNG:
- printf(" 0x%016I64x", tree->gtLngCon.gtLconVal);
- break;
+ case GT_CNS_LNG:
+ printf(" 0x%016I64x", tree->gtLngCon.gtLconVal);
+ break;
- case GT_CNS_DBL:
- if (*((__int64 *)&tree->gtDblCon.gtDconVal) == (__int64)I64(0x8000000000000000))
- printf(" -0.00000");
- else
- printf(" %#.17g", tree->gtDblCon.gtDconVal);
- break;
- case GT_CNS_STR:
- printf("<string constant>");
- break;
- default: assert(!"unexpected constant node");
+ case GT_CNS_DBL:
+ if (*((__int64*)&tree->gtDblCon.gtDconVal) == (__int64)I64(0x8000000000000000))
+ {
+ printf(" -0.00000");
+ }
+ else
+ {
+ printf(" %#.17g", tree->gtDblCon.gtDconVal);
+ }
+ break;
+ case GT_CNS_STR:
+ printf("<string constant>");
+ break;
+ default:
+ assert(!"unexpected constant node");
}
gtDispRegVal(tree);
@@ -9258,7 +9924,7 @@ void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
// Otherwise...
printf(" Fseq[");
- while (pfsn != NULL)
+ while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->m_fieldHnd;
@@ -9276,7 +9942,10 @@ void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->m_next;
- if (pfsn != NULL) printf(", ");
+ if (pfsn != nullptr)
+ {
+ printf(", ");
+ }
}
printf("]");
}
@@ -9294,8 +9963,7 @@ void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
-void
-Compiler::gtDispLeaf(GenTree *tree, IndentStack* indentStack)
+void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperKind() & GTK_CONST)
{
@@ -9305,216 +9973,209 @@ Compiler::gtDispLeaf(GenTree *tree, IndentStack* indentStack)
bool isLclFld = false;
- switch (tree->gtOper)
+ switch (tree->gtOper)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- case GT_LCL_FLD:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_FLD:
- isLclFld = true;
- __fallthrough;
+ case GT_LCL_FLD:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_FLD:
+ isLclFld = true;
+ __fallthrough;
- case GT_PHI_ARG:
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- case GT_STORE_LCL_VAR:
- printf(" ");
- varNum = tree->gtLclVarCommon.gtLclNum;
- varDsc = &lvaTable[varNum];
- gtDispLclVar(varNum);
- if (tree->gtLclVarCommon.HasSsaName())
- {
- if (tree->gtFlags & GTF_VAR_USEASG)
+ case GT_PHI_ARG:
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ case GT_STORE_LCL_VAR:
+ printf(" ");
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ varDsc = &lvaTable[varNum];
+ gtDispLclVar(varNum);
+ if (tree->gtLclVarCommon.HasSsaName())
{
- assert(tree->gtFlags & GTF_VAR_DEF);
- printf("ud:%d->%d", tree->gtLclVarCommon.gtSsaNum, GetSsaNumForLocalVarDef(tree));
+ if (tree->gtFlags & GTF_VAR_USEASG)
+ {
+ assert(tree->gtFlags & GTF_VAR_DEF);
+ printf("ud:%d->%d", tree->gtLclVarCommon.gtSsaNum, GetSsaNumForLocalVarDef(tree));
+ }
+ else
+ {
+ printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->gtLclVarCommon.gtSsaNum);
+ }
}
- else
+
+ if (isLclFld)
{
- printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->gtLclVarCommon.gtSsaNum);
+ printf("[+%u]", tree->gtLclFld.gtLclOffs);
+ gtDispFieldSeq(tree->gtLclFld.gtFieldSeq);
}
- }
-
- if (isLclFld)
- {
- printf("[+%u]", tree->gtLclFld.gtLclOffs);
- gtDispFieldSeq(tree->gtLclFld.gtFieldSeq);
- }
- if (varDsc->lvRegister)
- {
- printf(" ");
- varDsc->PrintVarReg();
- }
+ if (varDsc->lvRegister)
+ {
+ printf(" ");
+ varDsc->PrintVarReg();
+ }
#ifndef LEGACY_BACKEND
- else if (tree->InReg())
- {
+ else if (tree->InReg())
+ {
#if CPU_LONG_USES_REGPAIR
- if (isRegPairType(tree->TypeGet()))
- printf(" %s", compRegPairName(tree->gtRegPair));
- else
+ if (isRegPairType(tree->TypeGet()))
+ printf(" %s", compRegPairName(tree->gtRegPair));
+ else
#endif
- printf(" %s", compRegVarName(tree->gtRegNum));
- }
+ printf(" %s", compRegVarName(tree->gtRegNum));
+ }
#endif // !LEGACY_BACKEND
-
- if (varDsc->lvPromoted)
- {
- assert(varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct);
-
- CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
- CORINFO_FIELD_HANDLE fldHnd;
-
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
- {
- LclVarDsc * fieldVarDsc = &lvaTable[i];
- const char* fieldName;
-#if !defined(_TARGET_64BIT_)
- if (varTypeIsLong(varDsc))
+
+ if (varDsc->lvPromoted)
+ {
+ assert(varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct);
+
+ CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
+ CORINFO_FIELD_HANDLE fldHnd;
+
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
- fieldName = (i == 0) ? "lo" : "hi";
- }
- else
+ LclVarDsc* fieldVarDsc = &lvaTable[i];
+ const char* fieldName;
+#if !defined(_TARGET_64BIT_)
+ if (varTypeIsLong(varDsc))
+ {
+ fieldName = (i == 0) ? "lo" : "hi";
+ }
+ else
#endif // !defined(_TARGET_64BIT_)
- {
- fldHnd = info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
- fieldName = eeGetFieldName(fldHnd);
- }
-
- printf("\n");
- printf(" ");
- printIndent(indentStack);
- printf(" %-6s V%02u.%s (offs=0x%02x) -> ",
- varTypeName(fieldVarDsc->TypeGet()),
- tree->gtLclVarCommon.gtLclNum,
- fieldName,
- fieldVarDsc->lvFldOffset
- );
- gtDispLclVar(i);
+ {
+ fldHnd = info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
+ fieldName = eeGetFieldName(fldHnd);
+ }
- if (fieldVarDsc->lvRegister)
- {
- printf(" ");
- fieldVarDsc->PrintVarReg();
- }
+ printf("\n");
+ printf(" ");
+ printIndent(indentStack);
+ printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
+ tree->gtLclVarCommon.gtLclNum, fieldName, fieldVarDsc->lvFldOffset);
+ gtDispLclVar(i);
- if (fieldVarDsc->lvTracked &&
- fgLocalVarLivenessDone && // Includes local variable liveness
- ((tree->gtFlags & GTF_VAR_DEATH) != 0))
+ if (fieldVarDsc->lvRegister)
+ {
+ printf(" ");
+ fieldVarDsc->PrintVarReg();
+ }
+
+ if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && // Includes local variable liveness
+ ((tree->gtFlags & GTF_VAR_DEATH) != 0))
+ {
+ printf(" (last use)");
+ }
+ }
+ }
+ else // a normal not-promoted lclvar
+ {
+ if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
- }
- else // a normal not-promoted lclvar
- {
- if (varDsc->lvTracked &&
- fgLocalVarLivenessDone &&
- ((tree->gtFlags & GTF_VAR_DEATH) != 0))
+ break;
+
+ case GT_REG_VAR:
+ printf(" ");
+ gtDispLclVar(tree->gtRegVar.gtLclNum);
+ if (isFloatRegType(tree->gtType))
{
- printf(" (last use)");
+ assert(tree->gtRegVar.gtRegNum == tree->gtRegNum);
+ printf(" FPV%u", tree->gtRegNum);
+ }
+ else
+ {
+ printf(" %s", compRegVarName(tree->gtRegVar.gtRegNum));
}
- }
- break;
- case GT_REG_VAR:
- printf(" ");
- gtDispLclVar(tree->gtRegVar.gtLclNum);
- if (isFloatRegType(tree->gtType))
- {
- assert(tree->gtRegVar.gtRegNum == tree->gtRegNum);
- printf(" FPV%u", tree->gtRegNum);
- }
- else
- {
- printf(" %s", compRegVarName(tree->gtRegVar.gtRegNum));
- }
+ varNum = tree->gtRegVar.gtLclNum;
+ varDsc = &lvaTable[varNum];
- varNum = tree->gtRegVar.gtLclNum;
- varDsc = &lvaTable[varNum];
+ if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
+ {
+ printf(" (last use)");
+ }
+
+ break;
- if (varDsc->lvTracked &&
- fgLocalVarLivenessDone &&
- ((tree->gtFlags & GTF_VAR_DEATH) != 0))
+ case GT_JMP:
{
- printf(" (last use)");
- }
+ const char* methodName;
+ const char* className;
+ methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtVal.gtVal1, &className);
+ printf(" %s.%s\n", className, methodName);
+ }
break;
- case GT_JMP:
- {
- const char * methodName;
- const char * className;
-
- methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtVal.gtVal1, &className);
- printf(" %s.%s\n", className, methodName);
- }
- break;
+ case GT_CLS_VAR:
+ printf(" Hnd=%#x", dspPtr(tree->gtClsVar.gtClsVarHnd));
+ gtDispFieldSeq(tree->gtClsVar.gtFieldSeq);
+ break;
- case GT_CLS_VAR:
- printf(" Hnd=%#x" , dspPtr(tree->gtClsVar.gtClsVarHnd));
- gtDispFieldSeq(tree->gtClsVar.gtFieldSeq);
- break;
+ case GT_CLS_VAR_ADDR:
+ printf(" Hnd=%#x", dspPtr(tree->gtClsVar.gtClsVarHnd));
+ break;
- case GT_CLS_VAR_ADDR:
- printf(" Hnd=%#x" , dspPtr(tree->gtClsVar.gtClsVarHnd));
- break;
+ case GT_LABEL:
+ if (tree->gtLabel.gtLabBB)
+ {
+ printf(" dst=BB%02u", tree->gtLabel.gtLabBB->bbNum);
+ }
+ else
+ {
+ printf(" dst=<null>");
+ }
- case GT_LABEL:
- if (tree->gtLabel.gtLabBB)
- printf(" dst=BB%02u" , tree->gtLabel.gtLabBB->bbNum);
- else
- printf(" dst=<null>");
-
- break;
+ break;
- case GT_FTN_ADDR:
- {
- const char * methodName;
- const char * className;
+ case GT_FTN_ADDR:
+ {
+ const char* methodName;
+ const char* className;
- methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtFptrVal.gtFptrMethod, &className);
- printf(" %s.%s\n", className, methodName);
- }
- break;
+ methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtFptrVal.gtFptrMethod, &className);
+ printf(" %s.%s\n", className, methodName);
+ }
+ break;
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
- printf(" endNstLvl=%d", tree->gtVal.gtVal1);
- break;
+ case GT_END_LFIN:
+ printf(" endNstLvl=%d", tree->gtVal.gtVal1);
+ break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
- case GT_NO_OP:
- case GT_START_NONGC:
- case GT_PROF_HOOK:
- case GT_CATCH_ARG:
- case GT_MEMORYBARRIER:
- case GT_ARGPLACE:
- case GT_PINVOKE_PROLOG:
+ case GT_NO_OP:
+ case GT_START_NONGC:
+ case GT_PROF_HOOK:
+ case GT_CATCH_ARG:
+ case GT_MEMORYBARRIER:
+ case GT_ARGPLACE:
+ case GT_PINVOKE_PROLOG:
#ifndef LEGACY_BACKEND
- case GT_JMPTABLE:
+ case GT_JMPTABLE:
#endif // !LEGACY_BACKEND
- break;
+ break;
- case GT_RET_EXPR:
- printf("(inl return from call ");
- printTreeID(tree->gtRetExpr.gtInlineCandidate);
- printf(")");
- break;
+ case GT_RET_EXPR:
+ printf("(inl return from call ");
+ printTreeID(tree->gtRetExpr.gtInlineCandidate);
+ printf(")");
+ break;
- case GT_PHYSREG:
- printf(" %s", getRegName(tree->gtPhysReg.gtSrcReg, varTypeIsFloating(tree)));
- break;
+ case GT_PHYSREG:
+ printf(" %s", getRegName(tree->gtPhysReg.gtSrcReg, varTypeIsFloating(tree)));
+ break;
- default:
- assert(!"don't know how to display tree leaf node");
+ default:
+ assert(!"don't know how to display tree leaf node");
}
gtDispRegVal(tree);
@@ -9538,13 +10199,13 @@ Compiler::gtDispLeaf(GenTree *tree, IndentStack* indentStack)
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
-void Compiler::gtDispChild(GenTreePtr child,
- IndentStack* indentStack,
- IndentInfo arcType,
- __in_opt const char* msg, /* = nullptr */
- bool topOnly) /* = false */
+void Compiler::gtDispChild(GenTreePtr child,
+ IndentStack* indentStack,
+ IndentInfo arcType,
+ __in_opt const char* msg, /* = nullptr */
+ bool topOnly) /* = false */
{
- IndentInfo info;
+ IndentInfo info;
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
@@ -9552,26 +10213,23 @@ void Compiler::gtDispChild(GenTreePtr child,
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
-extern
-const char * const simdIntrinsicNames[] =
-{
- #define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
- #include "simdintrinsiclist.h"
+extern const char* const simdIntrinsicNames[] = {
+#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
+#include "simdintrinsiclist.h"
};
-#endif //FEATURE_SIMD
-
+#endif // FEATURE_SIMD
/*****************************************************************************/
-void Compiler::gtDispTree(GenTreePtr tree,
- IndentStack* indentStack, /* = nullptr */
- __in __in_z __in_opt const char * msg, /* = nullptr */
- bool topOnly) /* = false */
+void Compiler::gtDispTree(GenTreePtr tree,
+ IndentStack* indentStack, /* = nullptr */
+ __in __in_z __in_opt const char* msg, /* = nullptr */
+ bool topOnly) /* = false */
{
- if (tree == NULL)
+ if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
- printf(""); // null string means flush
+ printf(""); // null string means flush
return;
}
@@ -9579,7 +10237,7 @@ void Compiler::gtDispTree(GenTreePtr tree,
{
if (tree->gtOper == GT_STMT)
{
- (void) gtDispLinearStmt(tree->AsStmt());
+ (void)gtDispLinearStmt(tree->AsStmt());
}
else
{
@@ -9600,17 +10258,16 @@ void Compiler::gtDispTree(GenTreePtr tree,
return;
}
- if (tree->gtOper >= GT_COUNT)
+ if (tree->gtOper >= GT_COUNT)
{
- gtDispNode(tree, indentStack, msg);
+ gtDispNode(tree, indentStack, msg);
printf("Bogus operator!");
return;
}
/* Is tree a leaf node? */
- if (tree->OperIsLeaf()
- || tree->OperIsLocalStore()) // local stores used to be leaves
+ if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg);
gtDispLeaf(tree, indentStack);
@@ -9624,32 +10281,32 @@ void Compiler::gtDispTree(GenTreePtr tree,
}
// Determine what kind of arc to propagate.
- IndentInfo myArc = IINone;
+ IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
- switch(myArc)
+ switch (myArc)
{
- case IIArcBottom:
- indentStack->Push(IIArc);
- lowerArc = IINone;
- break;
- case IIArc:
- indentStack->Push(IIArc);
- lowerArc = IIArc;
- break;
- case IIArcTop:
- indentStack->Push(IINone);
- lowerArc = IIArc;
- break;
- case IIEmbedded:
- indentStack->Push(IIEmbedded);
- lowerArc = IIEmbedded;
- break;
- default:
- // Should never get here; just use IINone.
- break;
+ case IIArcBottom:
+ indentStack->Push(IIArc);
+ lowerArc = IINone;
+ break;
+ case IIArc:
+ indentStack->Push(IIArc);
+ lowerArc = IIArc;
+ break;
+ case IIArcTop:
+ indentStack->Push(IINone);
+ lowerArc = IIArc;
+ break;
+ case IIEmbedded:
+ indentStack->Push(IIEmbedded);
+ lowerArc = IIEmbedded;
+ break;
+ default:
+ // Should never get here; just use IINone.
+ break;
}
}
@@ -9661,14 +10318,14 @@ void Compiler::gtDispTree(GenTreePtr tree,
gtDispVN(tree);
printf("\n");
- if (tree->gtOp.gtOp1 != NULL)
+ if (tree->gtOp.gtOp1 != nullptr)
{
IndentInfo arcType = IIArcTop;
- for (GenTreeArgList* args = tree->gtOp.gtOp1->AsArgList(); args != NULL; args = args->Rest())
+ for (GenTreeArgList* args = tree->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
if (args->Rest() == nullptr)
{
- arcType = IIArcBottom;
+ arcType = IIArcBottom;
}
gtDispChild(args->Current(), indentStack, arcType);
arcType = IIArc;
@@ -9679,20 +10336,21 @@ void Compiler::gtDispTree(GenTreePtr tree,
/* Is it a 'simple' unary/binary operator? */
- const char * childMsg = NULL;
+ const char* childMsg = nullptr;
- if (tree->OperIsSimple())
+ if (tree->OperIsSimple())
{
if (!topOnly)
{
- if (tree->gtGetOp2())
+ if (tree->gtGetOp2())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
+ {
childMsg = "then";
-
+ }
gtDispChild(tree->gtOp.gtOp2, indentStack, IIArcTop, childMsg, topOnly);
}
}
@@ -9708,10 +10366,10 @@ void Compiler::gtDispTree(GenTreePtr tree,
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
- (void) indentStack->Pop();
+ (void)indentStack->Pop();
indentStack->Push(lowerArc);
}
-
+
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
@@ -9722,10 +10380,14 @@ void Compiler::gtDispTree(GenTreePtr tree,
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
+ {
fromType = genUnsignedType(fromType);
+ }
if (finalType != toType)
+ {
printf(" %s <-", varTypeName(finalType));
+ }
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
@@ -9753,35 +10415,74 @@ void Compiler::gtDispTree(GenTreePtr tree,
{
switch (tree->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Sin: printf(" sin"); break;
- case CORINFO_INTRINSIC_Cos: printf(" cos"); break;
- case CORINFO_INTRINSIC_Sqrt: printf(" sqrt"); break;
- case CORINFO_INTRINSIC_Abs: printf(" abs"); break;
- case CORINFO_INTRINSIC_Round: printf(" round"); break;
- case CORINFO_INTRINSIC_Cosh: printf(" cosh"); break;
- case CORINFO_INTRINSIC_Sinh: printf(" sinh"); break;
- case CORINFO_INTRINSIC_Tan: printf(" tan"); break;
- case CORINFO_INTRINSIC_Tanh: printf(" tanh"); break;
- case CORINFO_INTRINSIC_Asin: printf(" asin"); break;
- case CORINFO_INTRINSIC_Acos: printf(" acos"); break;
- case CORINFO_INTRINSIC_Atan: printf(" atan"); break;
- case CORINFO_INTRINSIC_Atan2: printf(" atan2"); break;
- case CORINFO_INTRINSIC_Log10: printf(" log10"); break;
- case CORINFO_INTRINSIC_Pow: printf(" pow"); break;
- case CORINFO_INTRINSIC_Exp: printf(" exp"); break;
- case CORINFO_INTRINSIC_Ceiling: printf(" ceiling"); break;
- case CORINFO_INTRINSIC_Floor: printf(" floor"); break;
- case CORINFO_INTRINSIC_Object_GetType: printf(" objGetType"); break;
+ case CORINFO_INTRINSIC_Sin:
+ printf(" sin");
+ break;
+ case CORINFO_INTRINSIC_Cos:
+ printf(" cos");
+ break;
+ case CORINFO_INTRINSIC_Sqrt:
+ printf(" sqrt");
+ break;
+ case CORINFO_INTRINSIC_Abs:
+ printf(" abs");
+ break;
+ case CORINFO_INTRINSIC_Round:
+ printf(" round");
+ break;
+ case CORINFO_INTRINSIC_Cosh:
+ printf(" cosh");
+ break;
+ case CORINFO_INTRINSIC_Sinh:
+ printf(" sinh");
+ break;
+ case CORINFO_INTRINSIC_Tan:
+ printf(" tan");
+ break;
+ case CORINFO_INTRINSIC_Tanh:
+ printf(" tanh");
+ break;
+ case CORINFO_INTRINSIC_Asin:
+ printf(" asin");
+ break;
+ case CORINFO_INTRINSIC_Acos:
+ printf(" acos");
+ break;
+ case CORINFO_INTRINSIC_Atan:
+ printf(" atan");
+ break;
+ case CORINFO_INTRINSIC_Atan2:
+ printf(" atan2");
+ break;
+ case CORINFO_INTRINSIC_Log10:
+ printf(" log10");
+ break;
+ case CORINFO_INTRINSIC_Pow:
+ printf(" pow");
+ break;
+ case CORINFO_INTRINSIC_Exp:
+ printf(" exp");
+ break;
+ case CORINFO_INTRINSIC_Ceiling:
+ printf(" ceiling");
+ break;
+ case CORINFO_INTRINSIC_Floor:
+ printf(" floor");
+ break;
+ case CORINFO_INTRINSIC_Object_GetType:
+ printf(" objGetType");
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
#ifdef FEATURE_SIMD
if (tree->gtOper == GT_SIMD)
- {
- printf(" %s %s", varTypeName(tree->gtSIMD.gtSIMDBaseType), simdIntrinsicNames[tree->gtSIMD.gtSIMDIntrinsicID]);
+ {
+ printf(" %s %s", varTypeName(tree->gtSIMD.gtSIMDBaseType),
+ simdIntrinsicNames[tree->gtSIMD.gtSIMDIntrinsicID]);
}
#endif // FEATURE_SIMD
@@ -9789,24 +10490,26 @@ void Compiler::gtDispTree(GenTreePtr tree,
gtDispVN(tree);
printf("\n");
- if (!topOnly && tree->gtOp.gtOp1)
+ if (!topOnly && tree->gtOp.gtOp1)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
+ {
childMsg = "else";
+ }
else if (tree->gtOper == GT_QMARK)
- childMsg = " if";
-
+ {
+ childMsg = " if";
+ }
gtDispChild(tree->gtOp.gtOp1, indentStack, IIArcBottom, childMsg, topOnly);
}
return;
}
-
// Now, get the right type of arc for this node
if (myArc != IINone)
{
@@ -9818,36 +10521,36 @@ void Compiler::gtDispTree(GenTreePtr tree,
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
- (void) indentStack->Pop();
+ (void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
- switch (tree->gtOper)
+ switch (tree->gtOper)
{
- case GT_FIELD:
- printf(" %s", eeGetFieldName(tree->gtField.gtFldHnd), 0);
+ case GT_FIELD:
+ printf(" %s", eeGetFieldName(tree->gtField.gtFldHnd), 0);
- if (tree->gtField.gtFldObj && !topOnly)
- {
- gtDispVN(tree);
- printf("\n");
- gtDispChild(tree->gtField.gtFldObj, indentStack, IIArcBottom);
- }
- else
- {
- gtDispRegVal(tree);
- gtDispVN(tree);
- printf("\n");
- }
- break;
+ if (tree->gtField.gtFldObj && !topOnly)
+ {
+ gtDispVN(tree);
+ printf("\n");
+ gtDispChild(tree->gtField.gtFldObj, indentStack, IIArcBottom);
+ }
+ else
+ {
+ gtDispRegVal(tree);
+ gtDispVN(tree);
+ printf("\n");
+ }
+ break;
- case GT_CALL:
+ case GT_CALL:
{
assert(tree->gtFlags & GTF_CALL);
unsigned numChildren = tree->NumChildren();
- GenTree* lastChild = nullptr;
+ GenTree* lastChild = nullptr;
if (numChildren != 0)
{
lastChild = tree->GetChild(numChildren - 1);
@@ -9855,8 +10558,8 @@ void Compiler::gtDispTree(GenTreePtr tree,
if (tree->gtCall.gtCallType != CT_INDIRECT)
{
- const char * methodName;
- const char * className;
+ const char* methodName;
+ const char* className;
methodName = eeGetMethodName(tree->gtCall.gtCallMethHnd, &className);
@@ -9868,9 +10571,8 @@ void Compiler::gtDispTree(GenTreePtr tree,
printf(" (FramesRoot last use)");
}
- if (((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) &&
- (tree->gtCall.gtInlineCandidateInfo != NULL) &&
- (tree->gtCall.gtInlineCandidateInfo->exactContextHnd != NULL))
+ if (((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (tree->gtCall.gtInlineCandidateInfo != nullptr) &&
+ (tree->gtCall.gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(tree->gtCall.gtInlineCandidateInfo->exactContextHnd));
}
@@ -9884,41 +10586,55 @@ void Compiler::gtDispTree(GenTreePtr tree,
if (!topOnly)
{
- char buf[64];
- char * bufp;
+ char buf[64];
+ char* bufp;
bufp = &buf[0];
- if ((tree->gtCall.gtCallObjp != NULL) &&
- (tree->gtCall.gtCallObjp->gtOper != GT_NOP) &&
- (!tree->gtCall.gtCallObjp->IsArgPlaceHolderNode()))
+ if ((tree->gtCall.gtCallObjp != nullptr) && (tree->gtCall.gtCallObjp->gtOper != GT_NOP) &&
+ (!tree->gtCall.gtCallObjp->IsArgPlaceHolderNode()))
{
if (tree->gtCall.gtCallObjp->gtOper == GT_ASG)
+ {
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
+ }
else
+ {
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
- gtDispChild(tree->gtCall.gtCallObjp, indentStack, (tree->gtCall.gtCallObjp == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
+ }
+ gtDispChild(tree->gtCall.gtCallObjp, indentStack,
+ (tree->gtCall.gtCallObjp == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (tree->gtCall.gtCallArgs)
+ {
gtDispArgList(tree, indentStack);
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- gtDispChild(tree->gtCall.gtCallAddr, indentStack, (tree->gtCall.gtCallAddr == lastChild) ? IIArcBottom : IIArc, "calli tgt", topOnly);
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ gtDispChild(tree->gtCall.gtCallAddr, indentStack,
+ (tree->gtCall.gtCallAddr == lastChild) ? IIArcBottom : IIArc, "calli tgt", topOnly);
+ }
- if (tree->gtCall.gtControlExpr != nullptr)
- gtDispChild(tree->gtCall.gtControlExpr, indentStack, (tree->gtCall.gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
+ if (tree->gtCall.gtControlExpr != nullptr)
+ {
+ gtDispChild(tree->gtCall.gtControlExpr, indentStack,
+ (tree->gtCall.gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr",
+ topOnly);
+ }
- #if !FEATURE_FIXED_OUT_ARGS
+#if !FEATURE_FIXED_OUT_ARGS
regList list = tree->gtCall.regArgList;
- #endif
+#endif
/* process the late argument list */
- int lateArgIndex=0;
- for (GenTreeArgList* lateArgs = tree->gtCall.gtCallLateArgs; lateArgs; (lateArgIndex++, lateArgs = lateArgs->Rest()))
+ int lateArgIndex = 0;
+ for (GenTreeArgList* lateArgs = tree->gtCall.gtCallLateArgs; lateArgs;
+ (lateArgIndex++, lateArgs = lateArgs->Rest()))
{
GenTreePtr argx;
- argx = lateArgs->Current();
+ argx = lateArgs->Current();
IndentInfo arcType = (lateArgs->Rest() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(tree, argx, lateArgIndex, -1, bufp, sizeof(buf));
@@ -9928,69 +10644,71 @@ void Compiler::gtDispTree(GenTreePtr tree,
}
break;
- case GT_STMT:
- printf("\n");
-
- if (!topOnly)
- gtDispChild(tree->gtStmt.gtStmtExpr, indentStack, IIArcBottom);
- break;
+ case GT_STMT:
+ printf("\n");
- case GT_ARR_ELEM:
- gtDispVN(tree);
- printf("\n");
+ if (!topOnly)
+ {
+ gtDispChild(tree->gtStmt.gtStmtExpr, indentStack, IIArcBottom);
+ }
+ break;
- if (!topOnly)
- {
- gtDispChild(tree->gtArrElem.gtArrObj, indentStack, IIArc, nullptr, topOnly);
+ case GT_ARR_ELEM:
+ gtDispVN(tree);
+ printf("\n");
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ if (!topOnly)
{
- IndentInfo arcType = ((dim + 1) == tree->gtArrElem.gtArrRank) ? IIArcBottom : IIArc;
- gtDispChild(tree->gtArrElem.gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
+ gtDispChild(tree->gtArrElem.gtArrObj, indentStack, IIArc, nullptr, topOnly);
+
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ IndentInfo arcType = ((dim + 1) == tree->gtArrElem.gtArrRank) ? IIArcBottom : IIArc;
+ gtDispChild(tree->gtArrElem.gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
+ }
}
- }
- break;
+ break;
- case GT_ARR_OFFSET:
- gtDispVN(tree);
- printf("\n");
- if (!topOnly)
- {
- gtDispChild(tree->gtArrOffs.gtOffset, indentStack, IIArc, nullptr, topOnly);
- gtDispChild(tree->gtArrOffs.gtIndex, indentStack, IIArc, nullptr, topOnly);
- gtDispChild(tree->gtArrOffs.gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
- }
- break;
+ case GT_ARR_OFFSET:
+ gtDispVN(tree);
+ printf("\n");
+ if (!topOnly)
+ {
+ gtDispChild(tree->gtArrOffs.gtOffset, indentStack, IIArc, nullptr, topOnly);
+ gtDispChild(tree->gtArrOffs.gtIndex, indentStack, IIArc, nullptr, topOnly);
+ gtDispChild(tree->gtArrOffs.gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
+ }
+ break;
- case GT_CMPXCHG:
- gtDispVN(tree);
- printf("\n");
- if (!topOnly)
- {
- gtDispChild(tree->gtCmpXchg.gtOpLocation, indentStack, IIArc, nullptr, topOnly);
- gtDispChild(tree->gtCmpXchg.gtOpValue, indentStack, IIArc, nullptr, topOnly);
- gtDispChild(tree->gtCmpXchg.gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
- }
- break;
+ case GT_CMPXCHG:
+ gtDispVN(tree);
+ printf("\n");
+ if (!topOnly)
+ {
+ gtDispChild(tree->gtCmpXchg.gtOpLocation, indentStack, IIArc, nullptr, topOnly);
+ gtDispChild(tree->gtCmpXchg.gtOpValue, indentStack, IIArc, nullptr, topOnly);
+ gtDispChild(tree->gtCmpXchg.gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
+ }
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
- gtDispVN(tree);
- printf("\n");
- if (!topOnly)
- {
- gtDispChild(tree->gtBoundsChk.gtArrLen, indentStack, IIArc, nullptr, topOnly);
- gtDispChild(tree->gtBoundsChk.gtIndex, indentStack, IIArcBottom, nullptr, topOnly);
- }
- break;
+ gtDispVN(tree);
+ printf("\n");
+ if (!topOnly)
+ {
+ gtDispChild(tree->gtBoundsChk.gtArrLen, indentStack, IIArc, nullptr, topOnly);
+ gtDispChild(tree->gtBoundsChk.gtIndex, indentStack, IIArcBottom, nullptr, topOnly);
+ }
+ break;
- default:
- printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
- printf(""); // null string means flush
- break;
+ default:
+ printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
+ printf(""); // null string means flush
+ break;
}
}
@@ -10001,7 +10719,7 @@ void Compiler::gtDispTree(GenTreePtr tree,
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
-// listCount - When printing in Linear form this is the count for a multireg GT_LIST
+// listCount - When printing in Linear form this is the count for a multireg GT_LIST
// or -1 if we are not printing in Linear form
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
@@ -10013,14 +10731,10 @@ void Compiler::gtDispTree(GenTreePtr tree,
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
-void Compiler::gtGetArgMsg(GenTreePtr call,
- GenTreePtr arg,
- unsigned argNum,
- int listCount,
- char* bufp,
- unsigned bufLength)
+void Compiler::gtGetArgMsg(
+ GenTreePtr call, GenTreePtr arg, unsigned argNum, int listCount, char* bufp, unsigned bufLength)
{
- if (call->gtCall.gtCallLateArgs != NULL)
+ if (call->gtCall.gtCallLateArgs != nullptr)
{
fgArgTabEntryPtr curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
@@ -10029,17 +10743,19 @@ void Compiler::gtGetArgMsg(GenTreePtr call,
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
- else
+ else
{
#if FEATURE_FIXED_OUT_ARGS
if (listCount == -1)
{
- sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->slotNum * TARGET_POINTER_SIZE, 0);
+ sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->slotNum * TARGET_POINTER_SIZE,
+ 0);
}
else // listCount is 0,1,2 or 3
{
assert(listCount <= MAX_ARG_REG_COUNT);
- sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, (curArgTabEntry->slotNum + listCount) * TARGET_POINTER_SIZE, 0);
+ sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum,
+ (curArgTabEntry->slotNum + listCount) * TARGET_POINTER_SIZE, 0);
}
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
@@ -10059,7 +10775,7 @@ void Compiler::gtGetArgMsg(GenTreePtr call,
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
-// listCount - When printing in Linear form this is the count for a multireg GT_LIST
+// listCount - When printing in Linear form this is the count for a multireg GT_LIST
// or -1 if we are not printing in Linear form
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
@@ -10071,14 +10787,10 @@ void Compiler::gtGetArgMsg(GenTreePtr call,
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
-void Compiler::gtGetLateArgMsg(GenTreePtr call,
- GenTreePtr argx,
- int lateArgIndex,
- int listCount,
- char* bufp,
- unsigned bufLength)
+void Compiler::gtGetLateArgMsg(
+ GenTreePtr call, GenTreePtr argx, int lateArgIndex, int listCount, char* bufp, unsigned bufLength)
{
- assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
+ assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntryPtr curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
@@ -10090,7 +10802,8 @@ void Compiler::gtGetLateArgMsg(GenTreePtr call,
#else
if (argReg == REG_STK)
{
- sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->slotNum * TARGET_POINTER_SIZE, 0);
+ sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum,
+ curArgTabEntry->slotNum * TARGET_POINTER_SIZE, 0);
}
else
#endif
@@ -10108,7 +10821,7 @@ void Compiler::gtGetLateArgMsg(GenTreePtr call,
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
assert(curArgTabEntry->numRegs == 2);
otherRegNum = curArgTabEntry->otherRegNum;
-#else
+#else
otherRegNum = (regNumber)(((unsigned)curArgTabEntry->regNum) + curArgTabEntry->numRegs - 1);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -10116,14 +10829,15 @@ void Compiler::gtGetLateArgMsg(GenTreePtr call,
{
char seperator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
- sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum,
- compRegVarName(argReg), seperator, compRegVarName(otherRegNum), 0);
+ sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg),
+ seperator, compRegVarName(otherRegNum), 0);
}
else // listCount is 0,1,2 or 3
{
assert(listCount <= MAX_ARG_REG_COUNT);
- regNumber curReg = (listCount == 1) ? otherRegNum : (regNumber)((unsigned)(argReg)+listCount);
- sprintf_s(bufp, bufLength, "arg%d m%d %s%c", curArgTabEntry->argNum, listCount, compRegVarName(curReg), 0);
+ regNumber curReg = (listCount == 1) ? otherRegNum : (regNumber)((unsigned)(argReg) + listCount);
+ sprintf_s(bufp, bufLength, "arg%d m%d %s%c", curArgTabEntry->argNum, listCount,
+ compRegVarName(curReg), 0);
}
}
else
@@ -10148,23 +10862,24 @@ void Compiler::gtGetLateArgMsg(GenTreePtr call,
// Assumptions:
// 'tree' must be a call node
-void Compiler::gtDispArgList(GenTreePtr tree,
- IndentStack* indentStack)
+void Compiler::gtDispArgList(GenTreePtr tree, IndentStack* indentStack)
{
- GenTree * args = tree->gtCall.gtCallArgs;
- unsigned argnum = 0;
- const int BufLength = 256;
- char buf[BufLength];
- char * bufp = &buf[0];
- unsigned numChildren = tree->NumChildren();
+ GenTree* args = tree->gtCall.gtCallArgs;
+ unsigned argnum = 0;
+ const int BufLength = 256;
+ char buf[BufLength];
+ char* bufp = &buf[0];
+ unsigned numChildren = tree->NumChildren();
assert(numChildren != 0);
bool argListIsLastChild = (args == tree->GetChild(numChildren - 1));
IndentInfo arcType = IIArc;
- if (tree->gtCall.gtCallObjp != NULL)
+ if (tree->gtCall.gtCallObjp != nullptr)
+ {
argnum++;
+ }
- while (args != 0)
+ while (args != nullptr)
{
assert(args->gtOper == GT_LIST);
GenTree* arg = args->gtOp.gtOp1;
@@ -10195,8 +10910,7 @@ void Compiler::gtDispArgList(GenTreePtr tree,
// Assumptions:
// 'tree' must be a GT_LIST node
-void Compiler::gtDispTreeList(GenTreePtr tree,
- IndentStack* indentStack /* = nullptr */)
+void Compiler::gtDispTreeList(GenTreePtr tree, IndentStack* indentStack /* = nullptr */)
{
for (/*--*/; tree != nullptr; tree = tree->gtNext)
{
@@ -10250,15 +10964,15 @@ GenTree* nextPrintable(GenTree* next, GenTree* tree)
// have an embedded statement, so that statement is then printed within
// the dump for this statement.
-GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
- GenTreePtr nextLinearNode,
- GenTreePtr tree,
- IndentStack* indentStack,
- __in __in_z __in_opt const char * msg /* = nullptr */)
+GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
+ GenTreePtr nextLinearNode,
+ GenTreePtr tree,
+ IndentStack* indentStack,
+ __in __in_z __in_opt const char* msg /* = nullptr */)
{
- const int BufLength = 256;
- char buf[BufLength];
- char * bufp = &buf[0];
+ const int BufLength = 256;
+ char buf[BufLength];
+ char* bufp = &buf[0];
// Determine what kind of arc to propagate
IndentInfo myArc = IINone;
@@ -10277,11 +10991,9 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
}
// Visit children
- unsigned childCount = tree->NumChildren();
+ unsigned childCount = tree->NumChildren();
GenTreePtr deferChild = nullptr;
- for (unsigned i = 0;
- i < childCount;
- i++)
+ for (unsigned i = 0; i < childCount; i++)
{
unsigned childIndex = i;
if (tree->OperIsBinary() && tree->IsReverseOp())
@@ -10289,7 +11001,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
childIndex = (i == 0) ? 1 : 0;
}
- GenTreePtr child = tree->GetChild(childIndex);
+ GenTreePtr child = tree->GetChild(childIndex);
IndentInfo indentInfo = (i == 0) ? IIArcTop : IIArc;
if (tree->OperGet() == GT_COLON && i == 1)
@@ -10298,8 +11010,8 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
continue;
}
- unsigned listElemNum = 0;
- const char* childMsg = nullptr;
+ unsigned listElemNum = 0;
+ const char* childMsg = nullptr;
if (tree->IsCall())
{
if (child == tree->gtCall.gtCallObjp)
@@ -10330,7 +11042,9 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
{
// List is handled below, but adjust listElemNum to account for "this" if necessary
if (tree->gtCall.gtCallObjp != nullptr)
+ {
listElemNum = 1;
+ }
}
else
{
@@ -10343,8 +11057,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
{
// For each list element
GenTreePtr nextList = nullptr;
- if (child->gtOp.gtOp2 != nullptr
- && child->gtOp.gtOp2->gtOper != GT_LIST)
+ if (child->gtOp.gtOp2 != nullptr && child->gtOp.gtOp2->gtOper != GT_LIST)
{
// special case for child of initblk and cpblk
// op1 is dst, op2 is src, and op2 must show up first
@@ -10386,7 +11099,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
// multiple registers) print the nodes of the nested list and continue to the next argument.
if (listElem->gtOper == GT_LIST)
{
- int listCount = 0;
+ int listCount = 0;
GenTreePtr nextListNested = nullptr;
for (GenTreePtr listNested = listElem; listNested != nullptr; listNested = nextListNested)
{
@@ -10398,7 +11111,8 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
}
else
{
- // GT_LIST nodes (under initBlk, others?) can have a non-null op2 that's not a GT_LIST
+ // GT_LIST nodes (under initBlk, others?) can have a non-null op2 that's not a
+ // GT_LIST
nextListNested = nullptr;
listElemNested = listNested;
}
@@ -10414,7 +11128,8 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
gtGetLateArgMsg(tree, listNested, listElemNum, listCount, bufp, BufLength);
}
listCount++;
- nextLinearNode = gtDispLinearTree(curStmt, nextLinearNode, listElemNested, indentStack, bufp);
+ nextLinearNode =
+ gtDispLinearTree(curStmt, nextLinearNode, listElemNested, indentStack, bufp);
indentStack->Pop();
}
@@ -10451,7 +11166,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
listElemNum++;
}
}
-
+
// Skip the GT_LIST nodes, as we do not print them, and the next node to print will occur
// after the list.
while (nextLinearNode->OperGet() == GT_LIST)
@@ -10482,14 +11197,13 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
{
// Get the next statement, which had better be embedded
GenTreePtr nextStmt = curStmt->gtNext;
- while (nextStmt != nullptr &&
- nextStmt->gtStmt.gtStmtIsEmbedded() &&
+ while (nextStmt != nullptr && nextStmt->gtStmt.gtStmtIsEmbedded() &&
nextStmt->gtStmt.gtStmtList != nextLinearNode)
{
nextStmt = nextStmt->gtNext;
}
- if(nextStmt != nullptr && nextStmt->gtStmt.gtStmtList == nextLinearNode)
+ if (nextStmt != nullptr && nextStmt->gtStmt.gtStmtList == nextLinearNode)
{
indentStack->Push(IIEmbedded);
nextLinearNode = gtDispLinearStmt(nextStmt->AsStmt(), indentStack);
@@ -10531,7 +11245,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
nextLinearNode = gtDispLinearTree(curStmt, nextLinearNode, deferChild, indentStack);
indentStack->Pop();
}
-
+
return nextLinearNode;
}
@@ -10549,7 +11263,7 @@ GenTreePtr Compiler::gtDispLinearTree(GenTreeStmt* curStmt,
// Assumptions:
// 'stmt' must be a GT_STMT node
-GenTreePtr Compiler::gtDispLinearStmt(GenTreeStmt* stmt, IndentStack *indentStack /* = nullptr */)
+GenTreePtr Compiler::gtDispLinearStmt(GenTreeStmt* stmt, IndentStack* indentStack /* = nullptr */)
{
if (indentStack == nullptr)
{
@@ -10571,53 +11285,58 @@ GenTreePtr Compiler::gtDispLinearStmt(GenTreeStmt* stmt, IndentStack
* and call the methods to perform the folding
*/
-GenTreePtr Compiler::gtFoldExpr(GenTreePtr tree)
+GenTreePtr Compiler::gtFoldExpr(GenTreePtr tree)
{
- unsigned kind = tree->OperKind();
+ unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
-
- // If we're in CSE, it's not safe to perform tree
- // folding given that it can will potentially
+
+ // If we're in CSE, it's not safe to perform tree
+ // folding given that it can will potentially
// change considered CSE candidates.
- if(optValnumCSE_phase)
+ if (optValnumCSE_phase)
+ {
return tree;
+ }
if (!(kind & GTK_SMPOP))
+ {
return tree;
+ }
- GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
/* Filter out non-foldable trees that can have constant children */
- assert (kind & (GTK_UNOP | GTK_BINOP));
+ assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
- case GT_RETFILT:
- case GT_RETURN:
- case GT_IND:
- return tree;
- default:
- break;
+ case GT_RETFILT:
+ case GT_RETURN:
+ case GT_IND:
+ return tree;
+ default:
+ break;
}
/* try to fold the current node */
- if ((kind & GTK_UNOP) && op1)
+ if ((kind & GTK_UNOP) && op1)
{
- if (op1->OperKind() & GTK_CONST)
+ if (op1->OperKind() & GTK_CONST)
+ {
return gtFoldExprConst(tree);
+ }
}
else if ((kind & GTK_BINOP) && op1 && tree->gtOp.gtOp2 &&
// Don't take out conditionals for debugging
- !((opts.compDbgCode || opts.MinOpts()) &&
- tree->OperIsCompare()))
+ !((opts.compDbgCode || opts.MinOpts()) && tree->OperIsCompare()))
{
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
- if (((op1->OperKind() & op2->OperKind()) & GTK_CONST) && !tree->OperIsAtomicOp())
+ if (((op1->OperKind() & op2->OperKind()) & GTK_CONST) && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
@@ -10640,37 +11359,38 @@ GenTreePtr Compiler::gtFoldExpr(GenTreePtr tree)
{
assert(tree->OperGet() == GT_QMARK);
- GenTreePtr colon_op1 = op2->gtOp.gtOp1;
- GenTreePtr colon_op2 = op2->gtOp.gtOp2;
+ GenTreePtr colon_op1 = op2->gtOp.gtOp1;
+ GenTreePtr colon_op2 = op2->gtOp.gtOp2;
if (gtCompareTree(colon_op1, colon_op2))
{
// Both sides of the GT_COLON are the same tree
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
gtExtractSideEffList(op1, &sideEffList);
fgUpdateRefCntForExtract(op1, sideEffList); // Decrement refcounts for op1, Keeping any side-effects
- fgUpdateRefCntForExtract(colon_op1, NULL); // Decrement refcounts for colon_op1
+ fgUpdateRefCntForExtract(colon_op1, nullptr); // Decrement refcounts for colon_op1
// Clear colon flags only if the qmark itself is not conditionaly executed
- if ( (tree->gtFlags & GTF_COLON_COND)==0 )
+ if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&colon_op2, gtClearColonCond);
}
- if (sideEffList == NULL)
+ if (sideEffList == nullptr)
{
// No side-effects, just return colon_op2
return colon_op2;
}
else
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\nIdentical GT_COLON trees with side effects! Extracting side effects...\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif
// Change the GT_COLON into a GT_COMMA node with the side-effects
@@ -10679,7 +11399,6 @@ GenTreePtr Compiler::gtFoldExpr(GenTreePtr tree)
op2->gtOp.gtOp1 = sideEffList;
return op2;
}
-
}
}
}
@@ -10699,10 +11418,10 @@ GenTreePtr Compiler::gtFoldExpr(GenTreePtr tree)
*
*/
-GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
+GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
assert(tree->OperIsCompare());
@@ -10710,33 +11429,37 @@ GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
/* Do not fold floats or doubles (e.g. NaN != Nan) */
- if (varTypeIsFloating(op1->TypeGet()))
+ if (varTypeIsFloating(op1->TypeGet()))
+ {
return tree;
+ }
/* Currently we can only fold when the two subtrees exactly match */
if ((tree->gtFlags & GTF_SIDE_EFFECT) || GenTree::Compare(op1, op2, true) == false)
- return tree; /* return unfolded tree */
+ {
+ return tree; /* return unfolded tree */
+ }
GenTreePtr cons;
switch (tree->gtOper)
{
- case GT_EQ:
- case GT_LE:
- case GT_GE:
- cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
- break;
+ case GT_EQ:
+ case GT_LE:
+ case GT_GE:
+ cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
+ break;
- case GT_NE:
- case GT_LT:
- case GT_GT:
- cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
- break;
+ case GT_NE:
+ case GT_LT:
+ case GT_GT:
+ cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
+ break;
- default:
- assert(!"Unexpected relOp");
- return tree;
+ default:
+ assert(!"Unexpected relOp");
+ return tree;
}
/* The node has beeen folded into 'cons' */
@@ -10744,7 +11467,9 @@ GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
if (fgGlobalMorph)
{
if (!fgIsInlining())
+ {
fgMorphTreeDone(cons);
+ }
}
else
{
@@ -10758,7 +11483,6 @@ GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
return cons;
}
-
/*****************************************************************************
*
* Some binary operators can be folded even if they have only one
@@ -10766,41 +11490,47 @@ GenTreePtr Compiler::gtFoldExprCompare(GenTreePtr tree)
* multiply with 1, etc
*/
-GenTreePtr Compiler::gtFoldExprSpecial(GenTreePtr tree)
+GenTreePtr Compiler::gtFoldExprSpecial(GenTreePtr tree)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ genTreeOps oper = tree->OperGet();
- GenTreePtr op, cons;
- ssize_t val;
+ GenTreePtr op, cons;
+ ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
- if (oper == GT_CAST)
- return tree;
+ if (oper == GT_CAST)
+ {
+ return tree;
+ }
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
+ {
return tree;
+ }
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
- op = op2;
- cons = op1;
+ op = op2;
+ cons = op1;
}
else if (op2->IsCnsIntOrI())
{
- op = op1;
- cons = op2;
+ op = op1;
+ cons = op2;
}
else
+ {
return tree;
+ }
/* Get the constant value */
@@ -10809,170 +11539,181 @@ GenTreePtr Compiler::gtFoldExprSpecial(GenTreePtr tree)
/* Here op is the non-constant operand, val is the constant,
first is true if the constant is op1 */
- switch (oper)
+ switch (oper)
{
- case GT_EQ:
- case GT_NE:
- // Optimize boxed value classes; these are always false. This IL is
- // generated when a generic value is tested against null:
- // <T> ... foo(T x) { ... if ((object)x == null) ...
- if (val == 0 && op->IsBoxedValue())
- {
- // Change the assignment node so we don't generate any code for it.
+ case GT_EQ:
+ case GT_NE:
+ // Optimize boxed value classes; these are always false. This IL is
+ // generated when a generic value is tested against null:
+ // <T> ... foo(T x) { ... if ((object)x == null) ...
+ if (val == 0 && op->IsBoxedValue())
+ {
+ // Change the assignment node so we don't generate any code for it.
- GenTreePtr asgStmt = op->gtBox.gtAsgStmtWhenInlinedBoxValue;
- assert(asgStmt->gtOper == GT_STMT);
- GenTreePtr asg = asgStmt->gtStmt.gtStmtExpr;
- assert(asg->gtOper == GT_ASG);
+ GenTreePtr asgStmt = op->gtBox.gtAsgStmtWhenInlinedBoxValue;
+ assert(asgStmt->gtOper == GT_STMT);
+ GenTreePtr asg = asgStmt->gtStmt.gtStmtExpr;
+ assert(asg->gtOper == GT_ASG);
#ifdef DEBUG
- if (verbose)
- {
- printf("Bashing ");
- printTreeID(asg);
- printf(" to NOP as part of dead box operation\n");
- gtDispTree(tree);
- }
+ if (verbose)
+ {
+ printf("Bashing ");
+ printTreeID(asg);
+ printf(" to NOP as part of dead box operation\n");
+ gtDispTree(tree);
+ }
#endif
- asg->gtBashToNOP();
-
- op = gtNewIconNode(oper == GT_NE);
- if (fgGlobalMorph)
- {
- if (!fgIsInlining())
- fgMorphTreeDone(op);
+ asg->gtBashToNOP();
+
+ op = gtNewIconNode(oper == GT_NE);
+ if (fgGlobalMorph)
+ {
+ if (!fgIsInlining())
+ {
+ fgMorphTreeDone(op);
+ }
+ }
+ else
+ {
+ op->gtNext = tree->gtNext;
+ op->gtPrev = tree->gtPrev;
+ }
+ fgSetStmtSeq(asgStmt);
+ return op;
}
- else
+ break;
+
+ case GT_ADD:
+ case GT_ASG_ADD:
+ if (val == 0)
{
- op->gtNext = tree->gtNext;
- op->gtPrev = tree->gtPrev;
+ goto DONE_FOLD;
}
- fgSetStmtSeq(asgStmt);
- return op;
- }
- break;
-
- case GT_ADD:
- case GT_ASG_ADD:
- if (val == 0) goto DONE_FOLD;
- break;
+ break;
- case GT_MUL:
- case GT_ASG_MUL:
- if (val == 1)
- goto DONE_FOLD;
- else if (val == 0)
- {
- /* Multiply by zero - return the 'zero' node, but not if side effects */
- if (!(op->gtFlags & GTF_SIDE_EFFECT))
+ case GT_MUL:
+ case GT_ASG_MUL:
+ if (val == 1)
{
- if (lvaLocalVarRefCounted)
+ goto DONE_FOLD;
+ }
+ else if (val == 0)
+ {
+ /* Multiply by zero - return the 'zero' node, but not if side effects */
+ if (!(op->gtFlags & GTF_SIDE_EFFECT))
{
- lvaRecursiveDecRefCounts(op);
+ if (lvaLocalVarRefCounted)
+ {
+ lvaRecursiveDecRefCounts(op);
+ }
+ op = cons;
+ goto DONE_FOLD;
}
- op = cons;
- goto DONE_FOLD;
}
- }
- break;
+ break;
- case GT_DIV:
- case GT_UDIV:
- case GT_ASG_DIV:
- if ((op2 == cons) && (val == 1) && !(op1->OperKind() & GTK_CONST))
- {
- goto DONE_FOLD;
- }
- break;
+ case GT_DIV:
+ case GT_UDIV:
+ case GT_ASG_DIV:
+ if ((op2 == cons) && (val == 1) && !(op1->OperKind() & GTK_CONST))
+ {
+ goto DONE_FOLD;
+ }
+ break;
- case GT_SUB:
- case GT_ASG_SUB:
- if ((op2 == cons) && (val == 0) && !(op1->OperKind() & GTK_CONST))
- {
- goto DONE_FOLD;
- }
- break;
+ case GT_SUB:
+ case GT_ASG_SUB:
+ if ((op2 == cons) && (val == 0) && !(op1->OperKind() & GTK_CONST))
+ {
+ goto DONE_FOLD;
+ }
+ break;
- case GT_AND:
- if (val == 0)
- {
- /* AND with zero - return the 'zero' node, but not if side effects */
+ case GT_AND:
+ if (val == 0)
+ {
+ /* AND with zero - return the 'zero' node, but not if side effects */
- if (!(op->gtFlags & GTF_SIDE_EFFECT))
+ if (!(op->gtFlags & GTF_SIDE_EFFECT))
+ {
+ if (lvaLocalVarRefCounted)
+ {
+ lvaRecursiveDecRefCounts(op);
+ }
+ op = cons;
+ goto DONE_FOLD;
+ }
+ }
+ else
{
- if (lvaLocalVarRefCounted)
+ /* The GTF_BOOLEAN flag is set for nodes that are part
+ * of a boolean expression, thus all their children
+ * are known to evaluate to only 0 or 1 */
+
+ if (tree->gtFlags & GTF_BOOLEAN)
{
- lvaRecursiveDecRefCounts(op);
+
+ /* The constant value must be 1
+ * AND with 1 stays the same */
+ assert(val == 1);
+ goto DONE_FOLD;
}
- op = cons;
- goto DONE_FOLD;
}
- }
- else
- {
- /* The GTF_BOOLEAN flag is set for nodes that are part
- * of a boolean expression, thus all their children
- * are known to evaluate to only 0 or 1 */
+ break;
- if (tree->gtFlags & GTF_BOOLEAN)
+ case GT_OR:
+ if (val == 0)
{
-
- /* The constant value must be 1
- * AND with 1 stays the same */
- assert(val == 1);
goto DONE_FOLD;
}
- }
- break;
-
- case GT_OR:
- if (val == 0)
- goto DONE_FOLD;
- else if (tree->gtFlags & GTF_BOOLEAN)
- {
- /* The constant value must be 1 - OR with 1 is 1 */
+ else if (tree->gtFlags & GTF_BOOLEAN)
+ {
+ /* The constant value must be 1 - OR with 1 is 1 */
- assert(val == 1);
+ assert(val == 1);
- /* OR with one - return the 'one' node, but not if side effects */
+ /* OR with one - return the 'one' node, but not if side effects */
- if (!(op->gtFlags & GTF_SIDE_EFFECT))
- {
- if (lvaLocalVarRefCounted)
+ if (!(op->gtFlags & GTF_SIDE_EFFECT))
{
- lvaRecursiveDecRefCounts(op);
+ if (lvaLocalVarRefCounted)
+ {
+ lvaRecursiveDecRefCounts(op);
+ }
+ op = cons;
+ goto DONE_FOLD;
}
- op = cons;
- goto DONE_FOLD;
}
- }
- break;
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
- if (val == 0)
- {
- if (op2 == cons)
- goto DONE_FOLD;
- else if (!(op->gtFlags & GTF_SIDE_EFFECT))
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ if (val == 0)
{
- if (lvaLocalVarRefCounted)
+ if (op2 == cons)
{
- lvaRecursiveDecRefCounts(op);
+ goto DONE_FOLD;
+ }
+ else if (!(op->gtFlags & GTF_SIDE_EFFECT))
+ {
+ if (lvaLocalVarRefCounted)
+ {
+ lvaRecursiveDecRefCounts(op);
+ }
+ op = cons;
+ goto DONE_FOLD;
}
- op = cons;
- goto DONE_FOLD;
}
- }
- break;
+ break;
- case GT_QMARK:
+ case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->gtOp.gtOp1 && op2->gtOp.gtOp2);
@@ -10982,30 +11723,30 @@ GenTreePtr Compiler::gtFoldExprSpecial(GenTreePtr tree)
GenTree* opToDelete;
if (val)
{
- op = op2->AsColon()->ThenNode();
+ op = op2->AsColon()->ThenNode();
opToDelete = op2->AsColon()->ElseNode();
}
else
{
- op = op2->AsColon()->ElseNode();
+ op = op2->AsColon()->ElseNode();
opToDelete = op2->AsColon()->ThenNode();
}
if (lvaLocalVarRefCounted)
{
lvaRecursiveDecRefCounts(opToDelete);
}
-
+
// Clear colon flags only if the qmark itself is not conditionaly executed
- if ( (tree->gtFlags & GTF_COLON_COND)==0 )
+ if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
- goto DONE_FOLD;
+ goto DONE_FOLD;
- default:
- break;
+ default:
+ break;
}
/* The node is not foldable */
@@ -11015,13 +11756,12 @@ GenTreePtr Compiler::gtFoldExprSpecial(GenTreePtr tree)
DONE_FOLD:
/* The node has beeen folded into 'op' */
-
+
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
- assert ((tree->OperKind() & GTK_ASGOP) ||
- (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF | GTF_VAR_DEF)) == 0);
+ assert((tree->OperKind() & GTK_ASGOP) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_USEDEF | GTF_VAR_DEF);
}
@@ -11039,23 +11779,23 @@ DONE_FOLD:
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-GenTreePtr Compiler::gtFoldExprConst(GenTreePtr tree)
+GenTreePtr Compiler::gtFoldExprConst(GenTreePtr tree)
{
- unsigned kind = tree->OperKind();
+ unsigned kind = tree->OperKind();
- SSIZE_T i1, i2, itemp;
- INT64 lval1, lval2, ltemp;
- float f1, f2;
- double d1, d2;
- var_types switchType;
- FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
+ SSIZE_T i1, i2, itemp;
+ INT64 lval1, lval2, ltemp;
+ float f1, f2;
+ double d1, d2;
+ var_types switchType;
+ FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
- assert (kind & (GTK_UNOP | GTK_BINOP));
+ assert(kind & (GTK_UNOP | GTK_BINOP));
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
@@ -11079,342 +11819,401 @@ GenTreePtr Compiler::gtFoldExprConst(GenTreePtr tree)
return tree;
}
- if (kind & GTK_UNOP)
+ if (kind & GTK_UNOP)
{
assert(op1->OperKind() & GTK_CONST);
switch (op1->gtType)
{
- case TYP_INT:
+ case TYP_INT:
- /* Fold constant INT unary operator */
- assert(op1->gtIntCon.ImmedValCanBeFolded(this, tree->OperGet()));
- i1 = (int) op1->gtIntCon.gtIconVal;
+ /* Fold constant INT unary operator */
+ assert(op1->gtIntCon.ImmedValCanBeFolded(this, tree->OperGet()));
+ i1 = (int)op1->gtIntCon.gtIconVal;
- // If we fold a unary oper, then the folded constant
- // is considered a ConstantIndexField if op1 was one
- //
+ // If we fold a unary oper, then the folded constant
+ // is considered a ConstantIndexField if op1 was one
+ //
- if ((op1->gtIntCon.gtFieldSeq != nullptr) &&
- op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- fieldSeq = op1->gtIntCon.gtFieldSeq;
- }
+ if ((op1->gtIntCon.gtFieldSeq != nullptr) && op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ fieldSeq = op1->gtIntCon.gtFieldSeq;
+ }
- switch (tree->gtOper)
- {
- case GT_NOT: i1 = ~i1; break;
+ switch (tree->gtOper)
+ {
+ case GT_NOT:
+ i1 = ~i1;
+ break;
- case GT_NEG:
- case GT_CHS: i1 = -i1; break;
+ case GT_NEG:
+ case GT_CHS:
+ i1 = -i1;
+ break;
- case GT_CAST:
- // assert (genActualType(tree->CastToType()) == tree->gtType);
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- itemp = INT32(INT8(i1));
- goto CHK_OVF;
+ case GT_CAST:
+ // assert (genActualType(tree->CastToType()) == tree->gtType);
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ itemp = INT32(INT8(i1));
+ goto CHK_OVF;
+
+ case TYP_SHORT:
+ itemp = INT32(INT16(i1));
+ CHK_OVF:
+ if (tree->gtOverflow() && ((itemp != i1) || ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)))
+ {
+ goto INT_OVF;
+ }
+ i1 = itemp;
+ goto CNS_INT;
- case TYP_SHORT:
- itemp = INT32(INT16(i1));
-CHK_OVF:
- if (tree->gtOverflow() &&
- ((itemp != i1) ||
- ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)))
- {
- goto INT_OVF;
- }
- i1 = itemp; goto CNS_INT;
+ case TYP_CHAR:
+ itemp = INT32(UINT16(i1));
+ if (tree->gtOverflow())
+ {
+ if (itemp != i1)
+ {
+ goto INT_OVF;
+ }
+ }
+ i1 = itemp;
+ goto CNS_INT;
- case TYP_CHAR:
- itemp = INT32(UINT16(i1));
- if (tree->gtOverflow())
- if (itemp != i1) goto INT_OVF;
- i1 = itemp;
- goto CNS_INT;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ itemp = INT32(UINT8(i1));
+ if (tree->gtOverflow())
+ {
+ if (itemp != i1)
+ {
+ goto INT_OVF;
+ }
+ }
+ i1 = itemp;
+ goto CNS_INT;
- case TYP_BOOL:
- case TYP_UBYTE:
- itemp = INT32(UINT8(i1));
- if (tree->gtOverflow()) if (itemp != i1) goto INT_OVF;
- i1 = itemp; goto CNS_INT;
+ case TYP_UINT:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- case TYP_UINT:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_INT:
+ if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- case TYP_INT:
- if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_ULONG:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
+ {
+ op1->ChangeOperConst(GT_CNS_NATIVELONG); // need type of oper to be same as tree
+ op1->gtType = TYP_LONG;
+ // We don't care about the value as we are throwing an exception
+ goto LNG_OVF;
+ }
+ lval1 = UINT64(UINT32(i1));
+ goto CNS_LONG;
- case TYP_ULONG:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && i1 < 0)
- {
- op1->ChangeOperConst(GT_CNS_NATIVELONG); // need type of oper to be same as tree
- op1->gtType = TYP_LONG;
- // We don't care about the value as we are throwing an exception
- goto LNG_OVF;
- }
- lval1 = UINT64(UINT32(i1));
- goto CNS_LONG;
+ case TYP_LONG:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ lval1 = INT64(UINT32(i1));
+ }
+ else
+ {
+ lval1 = INT64(INT32(i1));
+ }
+ goto CNS_LONG;
- case TYP_LONG:
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- lval1 = INT64(UINT32(i1));
- }
- else
- {
- lval1 = INT64(INT32(i1));
- }
- goto CNS_LONG;
+ case TYP_FLOAT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ f1 = forceCastToFloat(UINT32(i1));
+ }
+ else
+ {
+ f1 = forceCastToFloat(INT32(i1));
+ }
+ d1 = f1;
+ goto CNS_DOUBLE;
- case TYP_FLOAT:
- if (tree->gtFlags & GTF_UNSIGNED)
- f1 = forceCastToFloat(UINT32(i1));
- else
- f1 = forceCastToFloat(INT32(i1));
- d1 = f1;
- goto CNS_DOUBLE;
-
- case TYP_DOUBLE:
- if (tree->gtFlags & GTF_UNSIGNED)
- d1 = (double) UINT32(i1);
- else
- d1 = (double) INT32(i1);
- goto CNS_DOUBLE;
+ case TYP_DOUBLE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ d1 = (double)UINT32(i1);
+ }
+ else
+ {
+ d1 = (double)INT32(i1);
+ }
+ goto CNS_DOUBLE;
- default:
- assert(!"BAD_TYP");
- break;
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
+
+ default:
+ return tree;
}
- return tree;
- default:
- return tree;
- }
+ goto CNS_INT;
- goto CNS_INT;
+ case TYP_LONG:
- case TYP_LONG:
+ /* Fold constant LONG unary operator */
- /* Fold constant LONG unary operator */
+ assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ lval1 = op1->gtIntConCommon.LngValue();
- assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- lval1 = op1->gtIntConCommon.LngValue();
+ switch (tree->gtOper)
+ {
+ case GT_NOT:
+ lval1 = ~lval1;
+ break;
- switch (tree->gtOper)
- {
- case GT_NOT: lval1 = ~lval1; break;
+ case GT_NEG:
+ case GT_CHS:
+ lval1 = -lval1;
+ break;
- case GT_NEG:
- case GT_CHS: lval1 = -lval1; break;
+ case GT_CAST:
+ assert(genActualType(tree->CastToType()) == tree->gtType);
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ i1 = INT32(INT8(lval1));
+ goto CHECK_INT_OVERFLOW;
- case GT_CAST:
- assert (genActualType(tree->CastToType()) == tree->gtType);
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = INT32(INT8(lval1));
- goto CHECK_INT_OVERFLOW;
+ case TYP_SHORT:
+ i1 = INT32(INT16(lval1));
+ goto CHECK_INT_OVERFLOW;
- case TYP_SHORT:
- i1 = INT32(INT16(lval1));
- goto CHECK_INT_OVERFLOW;
+ case TYP_CHAR:
+ i1 = INT32(UINT16(lval1));
+ goto CHECK_UINT_OVERFLOW;
- case TYP_CHAR:
- i1 = INT32(UINT16(lval1));
- goto CHECK_UINT_OVERFLOW;
+ case TYP_UBYTE:
+ i1 = INT32(UINT8(lval1));
+ goto CHECK_UINT_OVERFLOW;
- case TYP_UBYTE:
- i1 = INT32(UINT8(lval1));
- goto CHECK_UINT_OVERFLOW;
+ case TYP_INT:
+ i1 = INT32(lval1);
- case TYP_INT:
- i1 = INT32(lval1);
+ CHECK_INT_OVERFLOW:
+ if (tree->gtOverflow())
+ {
+ if (i1 != lval1)
+ {
+ goto INT_OVF;
+ }
+ if ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)
+ {
+ goto INT_OVF;
+ }
+ }
+ goto CNS_INT;
- CHECK_INT_OVERFLOW:
- if (tree->gtOverflow())
- {
- if (i1 != lval1)
- goto INT_OVF;
- if ((tree->gtFlags & GTF_UNSIGNED) && i1 < 0)
- goto INT_OVF;
- }
- goto CNS_INT;
+ case TYP_UINT:
+ i1 = UINT32(lval1);
- case TYP_UINT:
- i1 = UINT32(lval1);
+ CHECK_UINT_OVERFLOW:
+ if (tree->gtOverflow() && UINT32(i1) != lval1)
+ {
+ goto INT_OVF;
+ }
+ goto CNS_INT;
- CHECK_UINT_OVERFLOW:
- if (tree->gtOverflow() && UINT32(i1) != lval1)
- goto INT_OVF;
- goto CNS_INT;
+ case TYP_ULONG:
+ if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
+ {
+ goto LNG_OVF;
+ }
+ goto CNS_LONG;
- case TYP_ULONG:
- if (!(tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
- goto LNG_OVF;
- goto CNS_LONG;
+ case TYP_LONG:
+ if ((tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
+ {
+ goto LNG_OVF;
+ }
+ goto CNS_LONG;
- case TYP_LONG:
- if ( (tree->gtFlags & GTF_UNSIGNED) && tree->gtOverflow() && lval1 < 0)
- goto LNG_OVF;
- goto CNS_LONG;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if ((tree->gtFlags & GTF_UNSIGNED) && lval1 < 0)
+ {
+ d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
+ }
+ else
+ {
+ d1 = (double)lval1;
+ }
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if ((tree->gtFlags & GTF_UNSIGNED) && lval1 < 0)
- {
- d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
- }
- else
- {
- d1 = (double)lval1;
- }
+ if (tree->CastToType() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1); // truncate precision
+ d1 = f1;
+ }
+ goto CNS_DOUBLE;
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
- if (tree->CastToType() == TYP_FLOAT)
- {
- f1 = forceCastToFloat(d1); // truncate precision
- d1 = f1;
- }
- goto CNS_DOUBLE;
- default:
- assert(!"BAD_TYP");
- break;
+ default:
+ return tree;
}
- return tree;
- default:
- return tree;
- }
+ goto CNS_LONG;
- goto CNS_LONG;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ assert(op1->gtOper == GT_CNS_DBL);
- case TYP_FLOAT:
- case TYP_DOUBLE:
- assert(op1->gtOper == GT_CNS_DBL);
+ /* Fold constant DOUBLE unary operator */
- /* Fold constant DOUBLE unary operator */
-
- d1 = op1->gtDblCon.gtDconVal;
-
- switch (tree->gtOper)
- {
- case GT_NEG:
- case GT_CHS:
- d1 = -d1;
- break;
+ d1 = op1->gtDblCon.gtDconVal;
- case GT_CAST:
+ switch (tree->gtOper)
+ {
+ case GT_NEG:
+ case GT_CHS:
+ d1 = -d1;
+ break;
- if (tree->gtOverflowEx())
- return tree;
+ case GT_CAST:
- assert (genActualType(tree->CastToType()) == tree->gtType);
+ if (tree->gtOverflowEx())
+ {
+ return tree;
+ }
- if ((op1->gtType == TYP_FLOAT && !_finite(forceCastToFloat(d1))) ||
- (op1->gtType == TYP_DOUBLE && !_finite(d1)))
- {
- // The floating point constant is not finite. The ECMA spec says, in
- // III 3.27, that "...if overflow occurs converting a floating point type
- // to an integer, ..., the value returned is unspecified." However, it would
- // at least be desirable to have the same value returned for casting an overflowing
- // constant to an int as would obtained by passing that constant as a parameter
- // then casting that parameter to an int type. We will assume that the C compiler's
- // cast logic will yield the desired result (and trust testing to tell otherwise).
- // Cross-compilation is an issue here; if that becomes an important scenario, we should
- // capture the target-specific values of overflow casts to the various integral types as
- // constants in a target-specific function.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ assert(genActualType(tree->CastToType()) == tree->gtType);
+
+ if ((op1->gtType == TYP_FLOAT && !_finite(forceCastToFloat(d1))) ||
+ (op1->gtType == TYP_DOUBLE && !_finite(d1)))
+ {
+ // The floating point constant is not finite. The ECMA spec says, in
+ // III 3.27, that "...if overflow occurs converting a floating point type
+ // to an integer, ..., the value returned is unspecified." However, it would
+ // at least be desirable to have the same value returned for casting an overflowing
+ // constant to an int as would obtained by passing that constant as a parameter
+ // then casting that parameter to an int type. We will assume that the C compiler's
+ // cast logic will yield the desired result (and trust testing to tell otherwise).
+ // Cross-compilation is an issue here; if that becomes an important scenario, we should
+ // capture the target-specific values of overflow casts to the various integral types as
+ // constants in a target-specific function.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef _TARGET_XARCH_
- // Don't fold conversions of +inf/-inf to integral value as the value returned by JIT helper
- // doesn't match with the C compiler's cast result.
- return tree;
-#else //!_TARGET_XARCH_
-
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = ssize_t(INT8(d1)); goto CNS_INT;
- case TYP_UBYTE:
- i1 = ssize_t(UINT8(d1)); goto CNS_INT;
- case TYP_SHORT:
- i1 = ssize_t(INT16(d1)); goto CNS_INT;
- case TYP_CHAR:
- i1 = ssize_t(UINT16(d1)); goto CNS_INT;
- case TYP_INT:
- i1 = ssize_t(INT32(d1)); goto CNS_INT;
- case TYP_UINT:
- i1 = ssize_t(UINT32(d1)); goto CNS_INT;
- case TYP_LONG:
- lval1 = INT64(d1); goto CNS_LONG;
- case TYP_ULONG:
- lval1 = UINT64(d1); goto CNS_LONG;
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if (op1->gtType == TYP_FLOAT)
- d1 = forceCastToFloat(d1); // it's only !_finite() after this conversion
- goto CNS_DOUBLE;
- default:
- unreached();
- }
+ // Don't fold conversions of +inf/-inf to integral value as the value returned by JIT helper
+ // doesn't match with the C compiler's cast result.
+ return tree;
+#else //!_TARGET_XARCH_
+
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ i1 = ssize_t(INT8(d1));
+ goto CNS_INT;
+ case TYP_UBYTE:
+ i1 = ssize_t(UINT8(d1));
+ goto CNS_INT;
+ case TYP_SHORT:
+ i1 = ssize_t(INT16(d1));
+ goto CNS_INT;
+ case TYP_CHAR:
+ i1 = ssize_t(UINT16(d1));
+ goto CNS_INT;
+ case TYP_INT:
+ i1 = ssize_t(INT32(d1));
+ goto CNS_INT;
+ case TYP_UINT:
+ i1 = ssize_t(UINT32(d1));
+ goto CNS_INT;
+ case TYP_LONG:
+ lval1 = INT64(d1);
+ goto CNS_LONG;
+ case TYP_ULONG:
+ lval1 = UINT64(d1);
+ goto CNS_LONG;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if (op1->gtType == TYP_FLOAT)
+ d1 = forceCastToFloat(d1); // it's only !_finite() after this conversion
+ goto CNS_DOUBLE;
+ default:
+ unreached();
+ }
#endif //!_TARGET_XARCH_
- }
-
- switch (tree->CastToType())
- {
- case TYP_BYTE:
- i1 = INT32(INT8(d1)); goto CNS_INT;
+ }
- case TYP_SHORT:
- i1 = INT32(INT16(d1)); goto CNS_INT;
+ switch (tree->CastToType())
+ {
+ case TYP_BYTE:
+ i1 = INT32(INT8(d1));
+ goto CNS_INT;
- case TYP_CHAR:
- i1 = INT32(UINT16(d1)); goto CNS_INT;
+ case TYP_SHORT:
+ i1 = INT32(INT16(d1));
+ goto CNS_INT;
- case TYP_UBYTE:
- i1 = INT32(UINT8(d1)); goto CNS_INT;
+ case TYP_CHAR:
+ i1 = INT32(UINT16(d1));
+ goto CNS_INT;
- case TYP_INT:
- i1 = INT32(d1); goto CNS_INT;
+ case TYP_UBYTE:
+ i1 = INT32(UINT8(d1));
+ goto CNS_INT;
- case TYP_UINT:
- i1 = forceCastToUInt32(d1); goto CNS_INT;
+ case TYP_INT:
+ i1 = INT32(d1);
+ goto CNS_INT;
- case TYP_LONG:
- lval1 = INT64(d1); goto CNS_LONG;
+ case TYP_UINT:
+ i1 = forceCastToUInt32(d1);
+ goto CNS_INT;
- case TYP_ULONG:
- lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
- goto CNS_LONG;
+ case TYP_LONG:
+ lval1 = INT64(d1);
+ goto CNS_LONG;
- case TYP_FLOAT:
- d1 = forceCastToFloat(d1);
- goto CNS_DOUBLE;
+ case TYP_ULONG:
+ lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
+ goto CNS_LONG;
- case TYP_DOUBLE:
- if (op1->gtType == TYP_FLOAT)
- d1 = forceCastToFloat(d1); // truncate precision
- goto CNS_DOUBLE; // redundant cast
+ case TYP_FLOAT:
+ d1 = forceCastToFloat(d1);
+ goto CNS_DOUBLE;
- default:
- assert(!"BAD_TYP");
- break;
+ case TYP_DOUBLE:
+ if (op1->gtType == TYP_FLOAT)
+ {
+ d1 = forceCastToFloat(d1); // truncate precision
+ }
+ goto CNS_DOUBLE; // redundant cast
+
+ default:
+ assert(!"BAD_TYP");
+ break;
+ }
+ return tree;
+
+ default:
+ return tree;
}
- return tree;
+ goto CNS_DOUBLE;
default:
+ /* not a foldable typ - e.g. RET const */
return tree;
- }
- goto CNS_DOUBLE;
-
- default:
- /* not a foldable typ - e.g. RET const */
- return tree;
}
}
@@ -11426,10 +12225,14 @@ CHK_OVF:
assert(op2->OperKind() & GTK_CONST);
if (tree->gtOper == GT_COMMA)
+ {
return op2;
+ }
if (tree->gtOper == GT_LIST)
+ {
return tree;
+ }
switchType = op1->gtType;
@@ -11445,296 +12248,343 @@ CHK_OVF:
switch (switchType)
{
- /*-------------------------------------------------------------------------
- * Fold constant REF of BYREF binary operator
- * These can only be comparisons or null pointers
- */
+ /*-------------------------------------------------------------------------
+ * Fold constant REF of BYREF binary operator
+ * These can only be comparisons or null pointers
+ */
- case TYP_REF:
+ case TYP_REF:
- /* String nodes are an RVA at this point */
+ /* String nodes are an RVA at this point */
- if (op1->gtOper == GT_CNS_STR || op2->gtOper == GT_CNS_STR)
- return tree;
+ if (op1->gtOper == GT_CNS_STR || op2->gtOper == GT_CNS_STR)
+ {
+ return tree;
+ }
- __fallthrough;
+ __fallthrough;
- case TYP_BYREF:
+ case TYP_BYREF:
- i1 = op1->gtIntConCommon.IconValue();
- i2 = op2->gtIntConCommon.IconValue();
+ i1 = op1->gtIntConCommon.IconValue();
+ i2 = op2->gtIntConCommon.IconValue();
- switch (tree->gtOper)
- {
- case GT_EQ:
- i1 = (i1 == i2);
- goto FOLD_COND;
-
- case GT_NE:
- i1 = (i1 != i2);
- goto FOLD_COND;
-
- case GT_ADD:
- noway_assert(tree->gtType != TYP_REF);
- // We only fold a GT_ADD that involves a null reference.
- if (((op1->TypeGet() == TYP_REF) && (i1 == 0)) ||
- ((op2->TypeGet() == TYP_REF) && (i2 == 0)))
+ switch (tree->gtOper)
{
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolding operator with constant nodes into a constant:\n");
- gtDispTree(tree);
- }
+ case GT_EQ:
+ i1 = (i1 == i2);
+ goto FOLD_COND;
+
+ case GT_NE:
+ i1 = (i1 != i2);
+ goto FOLD_COND;
+
+ case GT_ADD:
+ noway_assert(tree->gtType != TYP_REF);
+ // We only fold a GT_ADD that involves a null reference.
+ if (((op1->TypeGet() == TYP_REF) && (i1 == 0)) || ((op2->TypeGet() == TYP_REF) && (i2 == 0)))
+ {
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nFolding operator with constant nodes into a constant:\n");
+ gtDispTree(tree);
+ }
#endif
- // Fold into GT_IND of null byref
- tree->ChangeOperConst(GT_CNS_INT);
- tree->gtType = TYP_BYREF;
- tree->gtIntCon.gtIconVal = 0;
- tree->gtIntCon.gtFieldSeq = FieldSeqStore::NotAField();
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolded to null byref:\n");
- gtDispTree(tree);
- }
+ // Fold into GT_IND of null byref
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtType = TYP_BYREF;
+ tree->gtIntCon.gtIconVal = 0;
+ tree->gtIntCon.gtFieldSeq = FieldSeqStore::NotAField();
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nFolded to null byref:\n");
+ gtDispTree(tree);
+ }
#endif
- goto DONE;
- }
+ goto DONE;
+ }
- default:
- break;
- }
-
- return tree;
+ default:
+ break;
+ }
- /*-------------------------------------------------------------------------
- * Fold constant INT binary operator
- */
+ return tree;
- case TYP_INT:
+ /*-------------------------------------------------------------------------
+ * Fold constant INT binary operator
+ */
- if (tree->OperIsCompare() && (tree->gtType == TYP_BYTE))
- tree->gtType = TYP_INT;
+ case TYP_INT:
- assert (tree->gtType == TYP_INT ||
- varTypeIsGC(tree->TypeGet()) ||
- tree->gtOper == GT_MKREFANY);
+ if (tree->OperIsCompare() && (tree->gtType == TYP_BYTE))
+ {
+ tree->gtType = TYP_INT;
+ }
- // No GC pointer types should be folded here...
- //
- assert(!varTypeIsGC(op1->gtType) && !varTypeIsGC(op2->gtType));
+ assert(tree->gtType == TYP_INT || varTypeIsGC(tree->TypeGet()) || tree->gtOper == GT_MKREFANY);
- assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- assert(op2->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ // No GC pointer types should be folded here...
+ //
+ assert(!varTypeIsGC(op1->gtType) && !varTypeIsGC(op2->gtType));
- i1 = op1->gtIntConCommon.IconValue();
- i2 = op2->gtIntConCommon.IconValue();
+ assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ assert(op2->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- switch (tree->gtOper)
- {
- case GT_EQ : i1 = (INT32(i1) == INT32(i2)); break;
- case GT_NE : i1 = (INT32(i1) != INT32(i2)); break;
+ i1 = op1->gtIntConCommon.IconValue();
+ i2 = op2->gtIntConCommon.IconValue();
- case GT_LT :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT32(i1) < UINT32(i2));
- else
- i1 = (INT32(i1) < INT32(i2));
- break;
+ switch (tree->gtOper)
+ {
+ case GT_EQ:
+ i1 = (INT32(i1) == INT32(i2));
+ break;
+ case GT_NE:
+ i1 = (INT32(i1) != INT32(i2));
+ break;
- case GT_LE :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT32(i1) <= UINT32(i2));
- else
- i1 = (INT32(i1) <= INT32(i2));
- break;
+ case GT_LT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT32(i1) < UINT32(i2));
+ }
+ else
+ {
+ i1 = (INT32(i1) < INT32(i2));
+ }
+ break;
- case GT_GE :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT32(i1) >= UINT32(i2));
- else
- i1 = (INT32(i1) >= INT32(i2));
- break;
+ case GT_LE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT32(i1) <= UINT32(i2));
+ }
+ else
+ {
+ i1 = (INT32(i1) <= INT32(i2));
+ }
+ break;
- case GT_GT :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT32(i1) > UINT32(i2));
- else
- i1 = (INT32(i1) > INT32(i2));
- break;
+ case GT_GE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT32(i1) >= UINT32(i2));
+ }
+ else
+ {
+ i1 = (INT32(i1) >= INT32(i2));
+ }
+ break;
- case GT_ADD:
- itemp = i1 + i2;
- if (tree->gtOverflow())
- {
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- if (INT64(UINT32(itemp)) != INT64(UINT32(i1)) + INT64(UINT32(i2)))
- goto INT_OVF;
- }
- else
- {
- if (INT64(INT32(itemp)) != INT64(INT32(i1))+INT64(INT32(i2)))
- goto INT_OVF;
- }
- }
- i1 = itemp;
- fieldSeq = GetFieldSeqStore()->Append(op1->gtIntCon.gtFieldSeq,
- op2->gtIntCon.gtFieldSeq);
- break;
- case GT_SUB:
- itemp = i1 - i2;
- if (tree->gtOverflow())
- {
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- if (INT64(UINT32(itemp)) != ((INT64)((UINT32)i1) - (INT64)((UINT32)i2)))
- goto INT_OVF;
- }
- else
- {
- if (INT64(INT32(itemp)) != INT64(INT32(i1)) - INT64(INT32(i2)))
- goto INT_OVF;
- }
- }
- i1 = itemp; break;
- case GT_MUL:
- itemp = i1 * i2;
- if (tree->gtOverflow())
- {
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- if (INT64(UINT32(itemp)) != ((INT64)((UINT32)i1) * (INT64)((UINT32)i2)))
- goto INT_OVF;
- }
- else
- {
- if (INT64(INT32(itemp)) != INT64(INT32(i1)) * INT64(INT32(i2)))
- goto INT_OVF;
- }
- }
- // For the very particular case of the "constant array index" pseudo-field, we
- // assume that multiplication is by the field width, and preserves that field.
- // This could obviously be made more robust by a more complicated set of annotations...
- if ((op1->gtIntCon.gtFieldSeq != nullptr) &&
- op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- assert(op2->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
- fieldSeq = op1->gtIntCon.gtFieldSeq;
- }
- else if ((op2->gtIntCon.gtFieldSeq != nullptr) &&
- op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- assert(op1->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
- fieldSeq = op2->gtIntCon.gtFieldSeq;
- }
- i1 = itemp;
- break;
-
- case GT_OR : i1 |= i2; break;
- case GT_XOR: i1 ^= i2; break;
- case GT_AND: i1 &= i2; break;
+ case GT_GT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT32(i1) > UINT32(i2));
+ }
+ else
+ {
+ i1 = (INT32(i1) > INT32(i2));
+ }
+ break;
- case GT_LSH: i1 <<= (i2 & 0x1f); break;
- case GT_RSH: i1 >>= (i2 & 0x1f); break;
- case GT_RSZ:
- /* logical shift -> make it unsigned to not propagate the sign bit */
- i1 = UINT32(i1) >> (i2 & 0x1f);
- break;
- case GT_ROL: i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
- break;
- case GT_ROR: i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
- break;
+ case GT_ADD:
+ itemp = i1 + i2;
+ if (tree->gtOverflow())
+ {
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ if (INT64(UINT32(itemp)) != INT64(UINT32(i1)) + INT64(UINT32(i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ else
+ {
+ if (INT64(INT32(itemp)) != INT64(INT32(i1)) + INT64(INT32(i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ }
+ i1 = itemp;
+ fieldSeq = GetFieldSeqStore()->Append(op1->gtIntCon.gtFieldSeq, op2->gtIntCon.gtFieldSeq);
+ break;
+ case GT_SUB:
+ itemp = i1 - i2;
+ if (tree->gtOverflow())
+ {
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ if (INT64(UINT32(itemp)) != ((INT64)((UINT32)i1) - (INT64)((UINT32)i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ else
+ {
+ if (INT64(INT32(itemp)) != INT64(INT32(i1)) - INT64(INT32(i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ }
+ i1 = itemp;
+ break;
+ case GT_MUL:
+ itemp = i1 * i2;
+ if (tree->gtOverflow())
+ {
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ if (INT64(UINT32(itemp)) != ((INT64)((UINT32)i1) * (INT64)((UINT32)i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ else
+ {
+ if (INT64(INT32(itemp)) != INT64(INT32(i1)) * INT64(INT32(i2)))
+ {
+ goto INT_OVF;
+ }
+ }
+ }
+ // For the very particular case of the "constant array index" pseudo-field, we
+ // assume that multiplication is by the field width, and preserves that field.
+ // This could obviously be made more robust by a more complicated set of annotations...
+ if ((op1->gtIntCon.gtFieldSeq != nullptr) && op1->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ assert(op2->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
+ fieldSeq = op1->gtIntCon.gtFieldSeq;
+ }
+ else if ((op2->gtIntCon.gtFieldSeq != nullptr) &&
+ op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ assert(op1->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
+ fieldSeq = op2->gtIntCon.gtFieldSeq;
+ }
+ i1 = itemp;
+ break;
- /* DIV and MOD can generate an INT 0 - if division by 0
- * or overflow - when dividing MIN by -1 */
+ case GT_OR:
+ i1 |= i2;
+ break;
+ case GT_XOR:
+ i1 ^= i2;
+ break;
+ case GT_AND:
+ i1 &= i2;
+ break;
- case GT_DIV:
- case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
- if (INT32(i2) == 0)
- {
- // Division by zero:
- // We have to evaluate this expression and throw an exception
- return tree;
- }
- else if ((INT32(i2) == -1) &&
- (UINT32(i1) == 0x80000000))
- {
- // Overflow Division:
- // We have to evaluate this expression and throw an exception
- return tree;
- }
+ case GT_LSH:
+ i1 <<= (i2 & 0x1f);
+ break;
+ case GT_RSH:
+ i1 >>= (i2 & 0x1f);
+ break;
+ case GT_RSZ:
+ /* logical shift -> make it unsigned to not propagate the sign bit */
+ i1 = UINT32(i1) >> (i2 & 0x1f);
+ break;
+ case GT_ROL:
+ i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
+ break;
+ case GT_ROR:
+ i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
+ break;
- if (tree->gtOper == GT_DIV)
- {
- i1 = INT32(i1) / INT32(i2);
- }
- else if (tree->gtOper == GT_MOD)
- {
- i1 = INT32(i1) % INT32(i2);
- }
- else if (tree->gtOper == GT_UDIV)
- {
- i1 = UINT32(i1) / UINT32(i2);
- }
- else
- {
- assert(tree->gtOper == GT_UMOD);
- i1 = UINT32(i1) % UINT32(i2);
- }
- break;
+ /* DIV and MOD can generate an INT 0 - if division by 0
+ * or overflow - when dividing MIN by -1 */
- default:
- return tree;
- }
+ case GT_DIV:
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ if (INT32(i2) == 0)
+ {
+ // Division by zero:
+ // We have to evaluate this expression and throw an exception
+ return tree;
+ }
+ else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
+ {
+ // Overflow Division:
+ // We have to evaluate this expression and throw an exception
+ return tree;
+ }
+
+ if (tree->gtOper == GT_DIV)
+ {
+ i1 = INT32(i1) / INT32(i2);
+ }
+ else if (tree->gtOper == GT_MOD)
+ {
+ i1 = INT32(i1) % INT32(i2);
+ }
+ else if (tree->gtOper == GT_UDIV)
+ {
+ i1 = UINT32(i1) / UINT32(i2);
+ }
+ else
+ {
+ assert(tree->gtOper == GT_UMOD);
+ i1 = UINT32(i1) % UINT32(i2);
+ }
+ break;
+
+ default:
+ return tree;
+ }
/* We get here after folding to a GT_CNS_INT type
* change the node to the new type / value and make sure the node sizes are OK */
-CNS_INT:
-FOLD_COND:
+ CNS_INT:
+ FOLD_COND:
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolding operator with constant nodes into a constant:\n");
- gtDispTree(tree);
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nFolding operator with constant nodes into a constant:\n");
+ gtDispTree(tree);
+ }
#endif
#ifdef _TARGET_64BIT_
- // we need to properly re-sign-extend or truncate as needed.
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = UINT32(i1);
- else
- i1 = INT32(i1);
+ // we need to properly re-sign-extend or truncate as needed.
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = UINT32(i1);
+ }
+ else
+ {
+ i1 = INT32(i1);
+ }
#endif // _TARGET_64BIT_
- /* Also all conditional folding jumps here since the node hanging from
- * GT_JTRUE has to be a GT_CNS_INT - value 0 or 1 */
+ /* Also all conditional folding jumps here since the node hanging from
+ * GT_JTRUE has to be a GT_CNS_INT - value 0 or 1 */
- tree->ChangeOperConst(GT_CNS_INT);
- tree->gtType = TYP_INT;
- tree->gtIntCon.gtIconVal = i1;
- tree->gtIntCon.gtFieldSeq = fieldSeq;
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
-#ifdef DEBUG
- if (verbose)
- {
- printf("Bashed to int constant:\n");
- gtDispTree(tree);
- }
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtType = TYP_INT;
+ tree->gtIntCon.gtIconVal = i1;
+ tree->gtIntCon.gtFieldSeq = fieldSeq;
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Bashed to int constant:\n");
+ gtDispTree(tree);
+ }
#endif
- goto DONE;
+ goto DONE;
/* This operation is going to cause an overflow exception. Morph into
an overflow helper. Put a dummy constant value for code generation.
@@ -11747,489 +12597,596 @@ FOLD_COND:
was successful - instead use one of the operands, e.g. op1
*/
-LNG_OVF:
- // Don't fold overflow operations if not global morph phase.
- // The reason for this is that this optimization is replacing a gentree node
- // with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
- // involving overflow arithmetic. During assertion prop, it is possible
- // that the 'arg' could be constant folded and the result could lead to an
- // overflow. In such a case 'arg' will get replaced with GT_COMMA node
- // but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
- // update args table. For this reason this optimization is enabled only
- // for global morphing phase.
- //
- // X86/Arm32 legacy codegen note: This is not an issue on x86 for the reason that
- // it doesn't use arg table for calls. In addition x86/arm32 legacy codegen doesn't
- // expect long constants to show up as an operand of overflow cast operation.
- //
- // TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ LNG_OVF:
+ // Don't fold overflow operations if not global morph phase.
+ // The reason for this is that this optimization is replacing a gentree node
+ // with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
+ // involving overflow arithmetic. During assertion prop, it is possible
+ // that the 'arg' could be constant folded and the result could lead to an
+ // overflow. In such a case 'arg' will get replaced with GT_COMMA node
+ // but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
+ // update args table. For this reason this optimization is enabled only
+ // for global morphing phase.
+ //
+ // X86/Arm32 legacy codegen note: This is not an issue on x86 for the reason that
+ // it doesn't use arg table for calls. In addition x86/arm32 legacy codegen doesn't
+ // expect long constants to show up as an operand of overflow cast operation.
+ //
+ // TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef LEGACY_BACKEND
- if (!fgGlobalMorph)
- {
- assert(tree->gtOverflow());
- return tree;
- }
+ if (!fgGlobalMorph)
+ {
+ assert(tree->gtOverflow());
+ return tree;
+ }
#endif // !LEGACY_BACKEND
- op1 = gtNewLconNode(0);
- if (vnStore != nullptr)
- {
- op1->gtVNPair.SetBoth(vnStore->VNZeroForType(TYP_LONG));
- }
- goto OVF;
+ op1 = gtNewLconNode(0);
+ if (vnStore != nullptr)
+ {
+ op1->gtVNPair.SetBoth(vnStore->VNZeroForType(TYP_LONG));
+ }
+ goto OVF;
-INT_OVF:
+ INT_OVF:
#ifndef LEGACY_BACKEND
- // Don't fold overflow operations if not global morph phase.
- // The reason for this is that this optimization is replacing a gentree node
- // with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
- // involving overflow arithmetic. During assertion prop, it is possible
- // that the 'arg' could be constant folded and the result could lead to an
- // overflow. In such a case 'arg' will get replaced with GT_COMMA node
- // but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
- // update args table. For this reason this optimization is enabled only
- // for global morphing phase.
- //
- // X86/Arm32 legacy codegen note: This is not an issue on x86 for the reason that
- // it doesn't use arg table for calls. In addition x86/arm32 legacy codegen doesn't
- // expect long constants to show up as an operand of overflow cast operation.
- //
- // TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
+ // Don't fold overflow operations if not global morph phase.
+ // The reason for this is that this optimization is replacing a gentree node
+ // with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
+ // involving overflow arithmetic. During assertion prop, it is possible
+ // that the 'arg' could be constant folded and the result could lead to an
+ // overflow. In such a case 'arg' will get replaced with GT_COMMA node
+ // but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
+ // update args table. For this reason this optimization is enabled only
+ // for global morphing phase.
+ //
+ // X86/Arm32 legacy codegen note: This is not an issue on x86 for the reason that
+ // it doesn't use arg table for calls. In addition x86/arm32 legacy codegen doesn't
+ // expect long constants to show up as an operand of overflow cast operation.
+ //
+ // TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
- if (!fgGlobalMorph)
- {
- assert(tree->gtOverflow());
- return tree;
- }
+ if (!fgGlobalMorph)
+ {
+ assert(tree->gtOverflow());
+ return tree;
+ }
#endif // !LEGACY_BACKEND
- op1 = gtNewIconNode(0);
- if (vnStore != nullptr)
- {
- op1->gtVNPair.SetBoth(vnStore->VNZeroForType(TYP_INT));
- }
- goto OVF;
+ op1 = gtNewIconNode(0);
+ if (vnStore != nullptr)
+ {
+ op1->gtVNPair.SetBoth(vnStore->VNZeroForType(TYP_INT));
+ }
+ goto OVF;
-OVF:
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolding binary operator with constant nodes into a comma throw:\n");
- gtDispTree(tree);
- }
+ OVF:
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nFolding binary operator with constant nodes into a comma throw:\n");
+ gtDispTree(tree);
+ }
#endif
- /* We will change the cast to a GT_COMMA and attach the exception helper as gtOp.gtOp1.
- * The constant expression zero becomes op2. */
+ /* We will change the cast to a GT_COMMA and attach the exception helper as gtOp.gtOp1.
+ * The constant expression zero becomes op2. */
- assert(tree->gtOverflow());
- assert(tree->gtOper == GT_ADD || tree->gtOper == GT_SUB ||
- tree->gtOper == GT_CAST || tree->gtOper == GT_MUL);
- assert(op1);
+ assert(tree->gtOverflow());
+ assert(tree->gtOper == GT_ADD || tree->gtOper == GT_SUB || tree->gtOper == GT_CAST ||
+ tree->gtOper == GT_MUL);
+ assert(op1);
- op2 = op1;
- op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW,
- TYP_VOID,
- GTF_EXCEPT,
- gtNewArgList(gtNewIconNode(compCurBB->bbTryIndex)));
+ op2 = op1;
+ op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, GTF_EXCEPT,
+ gtNewArgList(gtNewIconNode(compCurBB->bbTryIndex)));
- if (vnStore != nullptr)
- {
- op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()), vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc)));
- }
-
- tree = gtNewOperNode(GT_COMMA, tree->gtType, op1, op2);
+ if (vnStore != nullptr)
+ {
+ op1->gtVNPair =
+ vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
+ vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc)));
+ }
- return tree;
+ tree = gtNewOperNode(GT_COMMA, tree->gtType, op1, op2);
- /*-------------------------------------------------------------------------
- * Fold constant LONG binary operator
- */
+ return tree;
- case TYP_LONG:
+ /*-------------------------------------------------------------------------
+ * Fold constant LONG binary operator
+ */
- // No GC pointer types should be folded here...
- //
- assert(!varTypeIsGC(op1->gtType) && !varTypeIsGC(op2->gtType));
+ case TYP_LONG:
- // op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case it is a TYP_INT
- //
- assert((op2->gtType == TYP_LONG) || (op2->gtType == TYP_INT));
+ // No GC pointer types should be folded here...
+ //
+ assert(!varTypeIsGC(op1->gtType) && !varTypeIsGC(op2->gtType));
- assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- assert(op2->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ // op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
+ // it is a TYP_INT
+ //
+ assert((op2->gtType == TYP_LONG) || (op2->gtType == TYP_INT));
- lval1 = op1->gtIntConCommon.LngValue();
-
- // For the shift operators we can have a op2 that is a TYP_INT and thus will be GT_CNS_INT
- if (op2->OperGet() == GT_CNS_INT)
- lval2 = op2->gtIntConCommon.IconValue();
- else
- lval2 = op2->gtIntConCommon.LngValue();
+ assert(op1->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
+ assert(op2->gtIntConCommon.ImmedValCanBeFolded(this, tree->OperGet()));
- switch (tree->gtOper)
- {
- case GT_EQ : i1 = (lval1 == lval2); goto FOLD_COND;
- case GT_NE : i1 = (lval1 != lval2); goto FOLD_COND;
+ lval1 = op1->gtIntConCommon.LngValue();
- case GT_LT :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT64(lval1) < UINT64(lval2));
+ // For the shift operators we can have a op2 that is a TYP_INT and thus will be GT_CNS_INT
+ if (op2->OperGet() == GT_CNS_INT)
+ {
+ lval2 = op2->gtIntConCommon.IconValue();
+ }
else
- i1 = (lval1 < lval2);
- goto FOLD_COND;
+ {
+ lval2 = op2->gtIntConCommon.LngValue();
+ }
- case GT_LE :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT64(lval1) <= UINT64(lval2));
- else
- i1 = (lval1 <= lval2);
- goto FOLD_COND;
+ switch (tree->gtOper)
+ {
+ case GT_EQ:
+ i1 = (lval1 == lval2);
+ goto FOLD_COND;
+ case GT_NE:
+ i1 = (lval1 != lval2);
+ goto FOLD_COND;
- case GT_GE :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT64(lval1) >= UINT64(lval2));
- else
- i1 = (lval1 >= lval2);
- goto FOLD_COND;
+ case GT_LT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT64(lval1) < UINT64(lval2));
+ }
+ else
+ {
+ i1 = (lval1 < lval2);
+ }
+ goto FOLD_COND;
- case GT_GT :
- if (tree->gtFlags & GTF_UNSIGNED)
- i1 = (UINT64(lval1) > UINT64(lval2));
- else
- i1 = (lval1 > lval2);
- goto FOLD_COND;
+ case GT_LE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT64(lval1) <= UINT64(lval2));
+ }
+ else
+ {
+ i1 = (lval1 <= lval2);
+ }
+ goto FOLD_COND;
- case GT_ADD:
- ltemp = lval1 + lval2;
+ case GT_GE:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT64(lval1) >= UINT64(lval2));
+ }
+ else
+ {
+ i1 = (lval1 >= lval2);
+ }
+ goto FOLD_COND;
-LNG_ADD_CHKOVF:
- /* For the SIGNED case - If there is one positive and one negative operand, there can be no overflow
- * If both are positive, the result has to be positive, and similary for negatives.
- *
- * For the UNSIGNED case - If a UINT32 operand is bigger than the result then OVF */
+ case GT_GT:
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ i1 = (UINT64(lval1) > UINT64(lval2));
+ }
+ else
+ {
+ i1 = (lval1 > lval2);
+ }
+ goto FOLD_COND;
- if (tree->gtOverflow())
- {
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- if ( (UINT64(lval1) > UINT64(ltemp)) ||
- (UINT64(lval2) > UINT64(ltemp)) )
- goto LNG_OVF;
- }
- else
- if ( ((lval1<0) == (lval2<0)) && ((lval1<0) != (ltemp<0)) )
- goto LNG_OVF;
- }
- lval1 = ltemp; break;
+ case GT_ADD:
+ ltemp = lval1 + lval2;
- case GT_SUB:
- ltemp = lval1 - lval2;
- if (tree->gtOverflow())
- {
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- if (UINT64(lval2) > UINT64(lval1))
- goto LNG_OVF;
- }
- else
- {
- /* If both operands are +ve or both are -ve, there can be no
- overflow. Else use the logic for : lval1 + (-lval2) */
+ LNG_ADD_CHKOVF:
+ /* For the SIGNED case - If there is one positive and one negative operand, there can be no overflow
+ * If both are positive, the result has to be positive, and similary for negatives.
+ *
+ * For the UNSIGNED case - If a UINT32 operand is bigger than the result then OVF */
- if ((lval1<0) != (lval2<0))
+ if (tree->gtOverflow())
{
- if (lval2 == INT64_MIN) goto LNG_OVF;
- lval2 = -lval2; goto LNG_ADD_CHKOVF;
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ if ((UINT64(lval1) > UINT64(ltemp)) || (UINT64(lval2) > UINT64(ltemp)))
+ {
+ goto LNG_OVF;
+ }
+ }
+ else if (((lval1 < 0) == (lval2 < 0)) && ((lval1 < 0) != (ltemp < 0)))
+ {
+ goto LNG_OVF;
+ }
}
- }
- }
- lval1 = ltemp; break;
+ lval1 = ltemp;
+ break;
- case GT_MUL:
- ltemp = lval1 * lval2;
+ case GT_SUB:
+ ltemp = lval1 - lval2;
+ if (tree->gtOverflow())
+ {
+ if (tree->gtFlags & GTF_UNSIGNED)
+ {
+ if (UINT64(lval2) > UINT64(lval1))
+ {
+ goto LNG_OVF;
+ }
+ }
+ else
+ {
+ /* If both operands are +ve or both are -ve, there can be no
+ overflow. Else use the logic for : lval1 + (-lval2) */
- if (tree->gtOverflow() && lval2 != 0)
- {
+ if ((lval1 < 0) != (lval2 < 0))
+ {
+ if (lval2 == INT64_MIN)
+ {
+ goto LNG_OVF;
+ }
+ lval2 = -lval2;
+ goto LNG_ADD_CHKOVF;
+ }
+ }
+ }
+ lval1 = ltemp;
+ break;
- if (tree->gtFlags & GTF_UNSIGNED)
- {
- UINT64 ultemp = ltemp;
- UINT64 ulval1 = lval1;
- UINT64 ulval2 = lval2;
- if ((ultemp/ulval2) != ulval1) goto LNG_OVF;
- }
- else
- {
- //This does a multiply and then reverses it. This test works great except for MIN_INT *
- //-1. In that case we mess up the sign on ltmp. Make sure to double check the sign.
- //if either is 0, then no overflow
- if (lval1 != 0) //lval2 checked above.
+ case GT_MUL:
+ ltemp = lval1 * lval2;
+
+ if (tree->gtOverflow() && lval2 != 0)
{
- if (((lval1<0) == (lval2<0)) && (ltemp<0))
+
+ if (tree->gtFlags & GTF_UNSIGNED)
{
- goto LNG_OVF;
+ UINT64 ultemp = ltemp;
+ UINT64 ulval1 = lval1;
+ UINT64 ulval2 = lval2;
+ if ((ultemp / ulval2) != ulval1)
+ {
+ goto LNG_OVF;
+ }
}
- if (((lval1<0) != (lval2<0)) && (ltemp>0))
+ else
{
- goto LNG_OVF;
- }
+ // This does a multiply and then reverses it. This test works great except for MIN_INT *
+ //-1. In that case we mess up the sign on ltmp. Make sure to double check the sign.
+ // if either is 0, then no overflow
+ if (lval1 != 0) // lval2 checked above.
+ {
+ if (((lval1 < 0) == (lval2 < 0)) && (ltemp < 0))
+ {
+ goto LNG_OVF;
+ }
+ if (((lval1 < 0) != (lval2 < 0)) && (ltemp > 0))
+ {
+ goto LNG_OVF;
+ }
- // TODO-Amd64-Unix: Remove the code that disables optimizations for this method when the clang
- // optimizer is fixed and/or the method implementation is refactored in a simpler code.
- // There is a bug in the clang-3.5 optimizer. The issue is that in release build the
- // optimizer is mistyping (or just wrongly decides to use 32 bit operation for a corner
- // case of MIN_LONG) the args of the (ltemp / lval2) to int (it does a 32 bit div
- // operation instead of 64 bit.). For the case of lval1 and lval2 equal to MIN_LONG
- // (0x8000000000000000) this results in raising a SIGFPE.
- // Optimizations disabled for now. See compiler.h.
- if ((ltemp/lval2) != lval1) goto LNG_OVF;
+ // TODO-Amd64-Unix: Remove the code that disables optimizations for this method when the
+ // clang
+ // optimizer is fixed and/or the method implementation is refactored in a simpler code.
+ // There is a bug in the clang-3.5 optimizer. The issue is that in release build the
+ // optimizer is mistyping (or just wrongly decides to use 32 bit operation for a corner
+ // case of MIN_LONG) the args of the (ltemp / lval2) to int (it does a 32 bit div
+ // operation instead of 64 bit.). For the case of lval1 and lval2 equal to MIN_LONG
+ // (0x8000000000000000) this results in raising a SIGFPE.
+ // Optimizations disabled for now. See compiler.h.
+ if ((ltemp / lval2) != lval1)
+ {
+ goto LNG_OVF;
+ }
+ }
+ }
}
- }
- }
- lval1 = ltemp; break;
+ lval1 = ltemp;
+ break;
- case GT_OR : lval1 |= lval2; break;
- case GT_XOR: lval1 ^= lval2; break;
- case GT_AND: lval1 &= lval2; break;
+ case GT_OR:
+ lval1 |= lval2;
+ break;
+ case GT_XOR:
+ lval1 ^= lval2;
+ break;
+ case GT_AND:
+ lval1 &= lval2;
+ break;
- case GT_LSH: lval1 <<= (lval2 & 0x3f); break;
- case GT_RSH: lval1 >>= (lval2 & 0x3f); break;
- case GT_RSZ:
- /* logical shift -> make it unsigned to not propagate the sign bit */
- lval1 = UINT64(lval1) >> (lval2 & 0x3f);
- break;
- case GT_ROL: lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
- break;
- case GT_ROR: lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
- break;
+ case GT_LSH:
+ lval1 <<= (lval2 & 0x3f);
+ break;
+ case GT_RSH:
+ lval1 >>= (lval2 & 0x3f);
+ break;
+ case GT_RSZ:
+ /* logical shift -> make it unsigned to not propagate the sign bit */
+ lval1 = UINT64(lval1) >> (lval2 & 0x3f);
+ break;
+ case GT_ROL:
+ lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
+ break;
+ case GT_ROR:
+ lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
+ break;
- //Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
- //that behavior here.
- case GT_DIV:
- if (!lval2) return tree;
+ // Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
+ // that behavior here.
+ case GT_DIV:
+ if (!lval2)
+ {
+ return tree;
+ }
- if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
- {
- return tree;
- }
- lval1 /= lval2; break;
+ if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
+ {
+ return tree;
+ }
+ lval1 /= lval2;
+ break;
- case GT_MOD:
- if (!lval2) return tree;
- if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
- {
- return tree;
- }
- lval1 %= lval2; break;
+ case GT_MOD:
+ if (!lval2)
+ {
+ return tree;
+ }
+ if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
+ {
+ return tree;
+ }
+ lval1 %= lval2;
+ break;
- case GT_UDIV:
- if (!lval2) return tree;
- if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1)) return tree;
- lval1 = UINT64(lval1) / UINT64(lval2); break;
+ case GT_UDIV:
+ if (!lval2)
+ {
+ return tree;
+ }
+ if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
+ {
+ return tree;
+ }
+ lval1 = UINT64(lval1) / UINT64(lval2);
+ break;
- case GT_UMOD:
- if (!lval2) return tree;
- if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1)) return tree;
- lval1 = UINT64(lval1) % UINT64(lval2); break;
- default:
- return tree;
- }
+ case GT_UMOD:
+ if (!lval2)
+ {
+ return tree;
+ }
+ if (UINT64(lval1) == UI64(0x8000000000000000) && lval2 == INT64(-1))
+ {
+ return tree;
+ }
+ lval1 = UINT64(lval1) % UINT64(lval2);
+ break;
+ default:
+ return tree;
+ }
-CNS_LONG:
+ CNS_LONG:
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolding long operator with constant nodes into a constant:\n");
- gtDispTree(tree);
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nFolding long operator with constant nodes into a constant:\n");
+ gtDispTree(tree);
+ }
#endif
- assert ((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
- (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) );
+ assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
+ (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
- tree->ChangeOperConst(GT_CNS_NATIVELONG);
- tree->gtIntConCommon.SetLngValue(lval1);
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
+ tree->ChangeOperConst(GT_CNS_NATIVELONG);
+ tree->gtIntConCommon.SetLngValue(lval1);
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
-#ifdef DEBUG
- if (verbose)
- {
- printf("Bashed to long constant:\n");
- gtDispTree(tree);
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Bashed to long constant:\n");
+ gtDispTree(tree);
+ }
#endif
- goto DONE;
+ goto DONE;
- /*-------------------------------------------------------------------------
- * Fold constant FLOAT or DOUBLE binary operator
- */
+ /*-------------------------------------------------------------------------
+ * Fold constant FLOAT or DOUBLE binary operator
+ */
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
- if (tree->gtOverflowEx())
- return tree;
+ if (tree->gtOverflowEx())
+ {
+ return tree;
+ }
- assert(op1->gtOper == GT_CNS_DBL);
- d1 = op1->gtDblCon.gtDconVal;
+ assert(op1->gtOper == GT_CNS_DBL);
+ d1 = op1->gtDblCon.gtDconVal;
- assert(varTypeIsFloating(op2->gtType));
- assert(op2->gtOper == GT_CNS_DBL);
- d2 = op2->gtDblCon.gtDconVal;
+ assert(varTypeIsFloating(op2->gtType));
+ assert(op2->gtOper == GT_CNS_DBL);
+ d2 = op2->gtDblCon.gtDconVal;
- /* Special case - check if we have NaN operands.
- * For comparisons if not an unordered operation always return 0.
- * For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
- * the result is always true - return 1. */
+ /* Special case - check if we have NaN operands.
+ * For comparisons if not an unordered operation always return 0.
+ * For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
+ * the result is always true - return 1. */
- if (_isnan(d1) || _isnan(d2))
- {
-#ifdef DEBUG
- if (verbose)
- printf("Double operator(s) is NaN\n");
-#endif
- if (tree->OperKind() & GTK_RELOP)
+ if (_isnan(d1) || _isnan(d2))
{
- if (tree->gtFlags & GTF_RELOP_NAN_UN)
+#ifdef DEBUG
+ if (verbose)
{
- /* Unordered comparison with NaN always succeeds */
- i1 = 1; goto FOLD_COND;
+ printf("Double operator(s) is NaN\n");
}
- else
+#endif
+ if (tree->OperKind() & GTK_RELOP)
{
- /* Normal comparison with NaN always fails */
- i1 = 0; goto FOLD_COND;
+ if (tree->gtFlags & GTF_RELOP_NAN_UN)
+ {
+ /* Unordered comparison with NaN always succeeds */
+ i1 = 1;
+ goto FOLD_COND;
+ }
+ else
+ {
+ /* Normal comparison with NaN always fails */
+ i1 = 0;
+ goto FOLD_COND;
+ }
}
}
- }
- switch (tree->gtOper)
- {
- case GT_EQ : i1 = (d1 == d2); goto FOLD_COND;
- case GT_NE : i1 = (d1 != d2); goto FOLD_COND;
+ switch (tree->gtOper)
+ {
+ case GT_EQ:
+ i1 = (d1 == d2);
+ goto FOLD_COND;
+ case GT_NE:
+ i1 = (d1 != d2);
+ goto FOLD_COND;
- case GT_LT : i1 = (d1 < d2); goto FOLD_COND;
- case GT_LE : i1 = (d1 <= d2); goto FOLD_COND;
- case GT_GE : i1 = (d1 >= d2); goto FOLD_COND;
- case GT_GT : i1 = (d1 > d2); goto FOLD_COND;
+ case GT_LT:
+ i1 = (d1 < d2);
+ goto FOLD_COND;
+ case GT_LE:
+ i1 = (d1 <= d2);
+ goto FOLD_COND;
+ case GT_GE:
+ i1 = (d1 >= d2);
+ goto FOLD_COND;
+ case GT_GT:
+ i1 = (d1 > d2);
+ goto FOLD_COND;
#if FEATURE_STACK_FP_X87
- case GT_ADD: d1 += d2; break;
- case GT_SUB: d1 -= d2; break;
- case GT_MUL: d1 *= d2; break;
- case GT_DIV: if (!d2) return tree;
- d1 /= d2; break;
-#else //!FEATURE_STACK_FP_X87
- // non-x86 arch: floating point arithmetic should be done in declared
- // precision while doing constant folding. For this reason though TYP_FLOAT
- // constants are stored as double constants, while performing float arithmetic,
- // double constants should be converted to float. Here is an example case
- // where performing arithmetic in double precision would lead to incorrect
- // results.
- //
- // Example:
- // float a = float.MaxValue;
- // float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
- // precision.
- // flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
- case GT_ADD:
- if (op1->TypeGet() == TYP_FLOAT)
- {
- f1 = forceCastToFloat(d1);
- f2 = forceCastToFloat(d2);
- d1 = f1+f2;
- }
- else
- {
- d1 += d2;
- }
- break;
+ case GT_ADD:
+ d1 += d2;
+ break;
+ case GT_SUB:
+ d1 -= d2;
+ break;
+ case GT_MUL:
+ d1 *= d2;
+ break;
+ case GT_DIV:
+ if (!d2)
+ return tree;
+ d1 /= d2;
+ break;
+#else //! FEATURE_STACK_FP_X87
+ // non-x86 arch: floating point arithmetic should be done in declared
+ // precision while doing constant folding. For this reason though TYP_FLOAT
+ // constants are stored as double constants, while performing float arithmetic,
+ // double constants should be converted to float. Here is an example case
+ // where performing arithmetic in double precision would lead to incorrect
+ // results.
+ //
+ // Example:
+ // float a = float.MaxValue;
+ // float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
+ // precision.
+ // flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
+ case GT_ADD:
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1);
+ f2 = forceCastToFloat(d2);
+ d1 = f1 + f2;
+ }
+ else
+ {
+ d1 += d2;
+ }
+ break;
- case GT_SUB:
- if (op1->TypeGet() == TYP_FLOAT)
- {
- f1 = forceCastToFloat(d1);
- f2 = forceCastToFloat(d2);
- d1 = f1 - f2;
- }
- else
- {
- d1 -= d2;
- }
- break;
+ case GT_SUB:
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1);
+ f2 = forceCastToFloat(d2);
+ d1 = f1 - f2;
+ }
+ else
+ {
+ d1 -= d2;
+ }
+ break;
- case GT_MUL:
- if (op1->TypeGet() == TYP_FLOAT)
- {
- f1 = forceCastToFloat(d1);
- f2 = forceCastToFloat(d2);
- d1 = f1 * f2;
+ case GT_MUL:
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1);
+ f2 = forceCastToFloat(d2);
+ d1 = f1 * f2;
+ }
+ else
+ {
+ d1 *= d2;
+ }
+ break;
+
+ case GT_DIV:
+ if (!d2)
+ {
+ return tree;
+ }
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ f1 = forceCastToFloat(d1);
+ f2 = forceCastToFloat(d2);
+ d1 = f1 / f2;
+ }
+ else
+ {
+ d1 /= d2;
+ }
+ break;
+#endif //! FEATURE_STACK_FP_X87
+
+ default:
+ return tree;
}
- else
+
+ CNS_DOUBLE:
+
+#ifdef DEBUG
+ if (verbose)
{
- d1 *= d2;
+ printf("\nFolding fp operator with constant nodes into a fp constant:\n");
+ gtDispTree(tree);
}
- break;
+#endif
+
+ assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
+ (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
- case GT_DIV:
- if (!d2) return tree;
- if (op1->TypeGet() == TYP_FLOAT)
+ tree->ChangeOperConst(GT_CNS_DBL);
+ tree->gtDblCon.gtDconVal = d1;
+ if (vnStore != nullptr)
{
- f1 = forceCastToFloat(d1);
- f2 = forceCastToFloat(d2);
- d1 = f1/f2;
+ fgValueNumberTreeConst(tree);
}
- else
+#ifdef DEBUG
+ if (verbose)
{
- d1 /= d2;
+ printf("Bashed to fp constant:\n");
+ gtDispTree(tree);
}
- break;
-#endif //!FEATURE_STACK_FP_X87
+#endif
+ goto DONE;
default:
+ /* not a foldable typ */
return tree;
- }
-
-CNS_DOUBLE:
-
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFolding fp operator with constant nodes into a fp constant:\n");
- gtDispTree(tree);
- }
-#endif
-
- assert ((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
- (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) );
-
- tree->ChangeOperConst(GT_CNS_DBL);
- tree->gtDblCon.gtDconVal = d1;
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
-#ifdef DEBUG
- if (verbose)
- {
- printf("Bashed to fp constant:\n");
- gtDispTree(tree);
- }
-#endif
- goto DONE;
-
- default:
- /* not a foldable typ */
- return tree;
}
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
DONE:
@@ -12248,24 +13205,25 @@ DONE:
* Create an assignment of the given value to a temp.
*/
-GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
+GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
{
- LclVarDsc * varDsc = lvaTable + tmp;
+ LclVarDsc* varDsc = lvaTable + tmp;
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
+ {
impBashVarAddrsToI(val);
+ }
- var_types valTyp = val->TypeGet();
- if (val->OperGet() == GT_LCL_VAR
- && lvaTable[val->gtLclVar.gtLclNum].lvNormalizeOnLoad())
+ var_types valTyp = val->TypeGet();
+ if (val->OperGet() == GT_LCL_VAR && lvaTable[val->gtLclVar.gtLclNum].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->gtLclVar.gtLclNum);
- val = gtNewLclvNode(val->gtLclVar.gtLclNum, valTyp, val->gtLclVar.gtLclILoffs);
+ val = gtNewLclvNode(val->gtLclVar.gtLclNum, valTyp, val->gtLclVar.gtLclILoffs);
}
- var_types dstTyp = varDsc->TypeGet();
-
+ var_types dstTyp = varDsc->TypeGet();
+
/* If the variable's lvType is not yet set then set it here */
- if (dstTyp == TYP_UNDEF)
+ if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
if (varTypeIsGC(dstTyp))
@@ -12280,7 +13238,7 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
#endif
}
-#ifdef DEBUG
+#ifdef DEBUG
/* Make sure the actual types match */
if (genActualType(valTyp) != genActualType(dstTyp))
{
@@ -12305,7 +13263,7 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
}
#endif
- // Floating Point assignments can be created during inlining
+ // Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
@@ -12315,11 +13273,11 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
}
/* Create the assignment node */
-
+
GenTreePtr asg;
GenTreePtr dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
-
+
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
@@ -12328,7 +13286,7 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
if (varTypeIsStruct(valTyp) && ((structHnd != NO_CLASS_HANDLE) || (varTypeIsSIMD(valTyp))))
{
// The GT_OBJ may be be a child of a GT_COMMA.
- GenTreePtr valx = val->gtEffectiveVal(/*commaOnly*/true);
+ GenTreePtr valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valx->gtOper == GT_OBJ)
{
@@ -12337,10 +13295,7 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
- asg = impAssignStruct(dest,
- val,
- structHnd,
- (unsigned)CHECK_SPILL_NONE);
+ asg = impAssignStruct(dest, val, structHnd, (unsigned)CHECK_SPILL_NONE);
}
else
{
@@ -12363,46 +13318,50 @@ GenTreePtr Compiler::gtNewTempAssign(unsigned tmp, GenTreePtr val)
* an assignment and 'assg' is the new value).
*/
-GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS access,
- CORINFO_FIELD_INFO * pFieldInfo,
- var_types lclTyp,
- CORINFO_CLASS_HANDLE structType,
- GenTreePtr assg)
+GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS access,
+ CORINFO_FIELD_INFO* pFieldInfo,
+ var_types lclTyp,
+ CORINFO_CLASS_HANDLE structType,
+ GenTreePtr assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
- GenTreeArgList* args = NULL;
- var_types helperType = TYP_BYREF;
+ GenTreeArgList* args = nullptr;
+ var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
- if (access & CORINFO_ACCESS_SET)
+ if (access & CORINFO_ACCESS_SET)
{
- assert(assg != 0);
+ assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
- assert(structType != 0);
+ assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
+ {
assg = gtNewCastNode(TYP_DOUBLE, assg, TYP_DOUBLE);
+ }
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
+ {
assg = gtNewCastNode(TYP_FLOAT, assg, TYP_FLOAT);
+ }
- args = gtNewArgList(assg);
+ args = gtNewArgList(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
- // The calling convention for the helper does not take into
+ // The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
@@ -12413,22 +13372,26 @@ GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
- assert(pFieldInfo->structType != NULL);
+ assert(pFieldInfo->structType != nullptr);
args = gtNewListNode(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTreePtr fieldHnd = impTokenToHandle(pResolvedToken);
- if (fieldHnd == NULL) // compDonotInline()
- return NULL;
+ if (fieldHnd == nullptr)
+ { // compDonotInline()
+ return nullptr;
+ }
args = gtNewListNode(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
- assert( (pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == 0) );
+ assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
- if (objPtr != NULL)
+ if (objPtr != nullptr)
+ {
args = gtNewListNode(objPtr, args);
+ }
GenTreePtr tree = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), 0, args);
@@ -12470,7 +13433,9 @@ GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
+ {
tree = impAssignStructPtr(tree, assg, structType, (unsigned)CHECK_SPILL_ALL);
+ }
else
{
tree = gtNewOperNode(GT_IND, lclTyp, tree);
@@ -12480,7 +13445,7 @@ GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
}
}
- return(tree);
+ return (tree);
}
/*****************************************************************************
@@ -12493,21 +13458,19 @@ GenTreePtr Compiler::gtNewRefCOMfield(GenTreePtr objPtr,
* assignments too.
*/
-bool Compiler::gtNodeHasSideEffects(GenTreePtr tree, unsigned flags)
+bool Compiler::gtNodeHasSideEffects(GenTreePtr tree, unsigned flags)
{
if (flags & GTF_ASG)
{
- if ((tree->OperKind() & GTK_ASGOP) ||
- (tree->gtOper == GT_INITBLK ||
- tree->gtOper == GT_COPYBLK ||
- tree->gtOper == GT_COPYOBJ))
+ if ((tree->OperKind() & GTK_ASGOP) ||
+ (tree->gtOper == GT_INITBLK || tree->gtOper == GT_COPYBLK || tree->gtOper == GT_COPYOBJ))
{
- return true;
+ return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
- if (flags & GTF_CALL)
+ if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
@@ -12523,33 +13486,46 @@ bool Compiler::gtNodeHasSideEffects(GenTreePtr tree, unsigned fla
// We definitely care about the side effects if MutatesHeap is true
//
if (s_helperCallProperties.MutatesHeap(helper))
+ {
return true;
+ }
// with GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE we will CSE helper calls that can run cctors.
//
if ((flags != GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE) && (s_helperCallProperties.MayRunCctor(helper)))
+ {
return true;
+ }
// If we also care about exceptions then check if the helper can throw
//
if (((flags & GTF_EXCEPT) != 0) && !s_helperCallProperties.NoThrow(helper))
+ {
return true;
+ }
// If this is a Pure helper call or an allocator (that will not need to run a finalizer)
- // then we don't need to preserve the side effects (of this call -- we may care about those of the arguments).
- if ( s_helperCallProperties.IsPure(helper)
- || (s_helperCallProperties.IsAllocator(helper) && !s_helperCallProperties.MayFinalize(helper)))
+ // then we don't need to preserve the side effects (of this call -- we may care about those of the
+ // arguments).
+ if (s_helperCallProperties.IsPure(helper) ||
+ (s_helperCallProperties.IsAllocator(helper) && !s_helperCallProperties.MayFinalize(helper)))
{
GenTreeCall* call = tree->AsCall();
for (GenTreeArgList* args = call->gtCallArgs; args != nullptr; args = args->Rest())
{
- if (gtTreeHasSideEffects(args->Current(), flags)) return true;
+ if (gtTreeHasSideEffects(args->Current(), flags))
+ {
+ return true;
+ }
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeArgList* args = call->gtCallLateArgs; args != nullptr; args = args->Rest())
{
- if (gtTreeHasSideEffects(args->Current(), flags)) return true;
+ if (gtTreeHasSideEffects(args->Current(), flags))
+ {
+ return true;
+ }
}
// Otherwise:
return false;
@@ -12564,13 +13540,17 @@ bool Compiler::gtNodeHasSideEffects(GenTreePtr tree, unsigned fla
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow())
+ {
return true;
+ }
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
+ {
return true;
+ }
return false;
}
@@ -12579,8 +13559,7 @@ bool Compiler::gtNodeHasSideEffects(GenTreePtr tree, unsigned fla
* Returns true if the expr tree has any side effects.
*/
-bool Compiler::gtTreeHasSideEffects(GenTreePtr tree,
- unsigned flags /* = GTF_SIDE_EFFECT*/)
+bool Compiler::gtTreeHasSideEffects(GenTreePtr tree, unsigned flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
unsigned sideEffectFlags = tree->gtFlags & flags;
@@ -12607,15 +13586,21 @@ bool Compiler::gtTreeHasSideEffects(GenTreePtr tree,
}
}
else if (tree->OperGet() == GT_INTRINSIC)
- {
+ {
if (gtNodeHasSideEffects(tree, flags))
+ {
return true;
+ }
if (gtNodeHasSideEffects(tree->gtOp.gtOp1, flags))
+ {
return true;
+ }
if ((tree->gtOp.gtOp2 != nullptr) && gtNodeHasSideEffects(tree->gtOp.gtOp2, flags))
+ {
return true;
+ }
return false;
}
@@ -12626,7 +13611,7 @@ bool Compiler::gtTreeHasSideEffects(GenTreePtr tree,
GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
{
- // 'list' starts off as null,
+ // 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
@@ -12634,13 +13619,13 @@ GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTreePtr result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
- // Set the flags in the comma node
+ // Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one
noway_assert(list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined());
-
+
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (expr->gtVNPair.BothDefined())
@@ -12648,20 +13633,20 @@ GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
- ValueNumPair op1vnp;
- ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
- ValueNumPair op2vnp;
- ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
-
+ ValueNumPair op1vnp;
+ ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op2vnp;
+ ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
+
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
- ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
- result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
+ result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
@@ -12671,7 +13656,6 @@ GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
// The 'expr' will start the list of expressions
return expr;
}
-
}
/*****************************************************************************
@@ -12682,21 +13666,25 @@ GenTreePtr Compiler::gtBuildCommaList(GenTreePtr list, GenTreePtr expr)
* level tree node as having side-effect.
*/
-void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr * pList,
- unsigned flags /* = GTF_SIDE_EFFECT*/,
- bool ignoreRoot /* = false */)
+void Compiler::gtExtractSideEffList(GenTreePtr expr,
+ GenTreePtr* pList,
+ unsigned flags /* = GTF_SIDE_EFFECT*/,
+ bool ignoreRoot /* = false */)
{
- assert(expr); assert(expr->gtOper != GT_STMT);
+ assert(expr);
+ assert(expr->gtOper != GT_STMT);
/* If no side effect in the expression return */
if (!gtTreeHasSideEffects(expr, flags))
+ {
return;
+ }
- genTreeOps oper = expr->OperGet();
- unsigned kind = expr->OperKind();
+ genTreeOps oper = expr->OperGet();
+ unsigned kind = expr->OperKind();
- // Look for any side effects that we care about
+ // Look for any side effects that we care about
//
if (!ignoreRoot && gtNodeHasSideEffects(expr, flags))
{
@@ -12707,12 +13695,14 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
}
if (kind & GTK_LEAF)
+ {
return;
+ }
if (oper == GT_LOCKADD || oper == GT_XADD || oper == GT_XCHG || oper == GT_CMPXCHG)
{
- //XADD both adds to the memory location and also fetches the old value. If we only need the side
- //effect of this instruction, change it into a GT_LOCKADD node (the add only)
+ // XADD both adds to the memory location and also fetches the old value. If we only need the side
+ // effect of this instruction, change it into a GT_LOCKADD node (the add only)
if (oper == GT_XADD)
{
expr->gtOper = GT_LOCKADD;
@@ -12726,8 +13716,8 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
if (kind & GTK_SMPOP)
{
- GenTreePtr op1 = expr->gtOp.gtOp1;
- GenTreePtr op2 = expr->gtGetOp2();
+ GenTreePtr op1 = expr->gtOp.gtOp1;
+ GenTreePtr op2 = expr->gtGetOp2();
if (flags & GTF_EXCEPT)
{
@@ -12738,9 +13728,11 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
{
*pList = gtBuildCommaList(*pList, expr);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
+ {
printf("Keep the GT_ADDR and GT_IND together:\n");
+ }
#endif
return;
}
@@ -12751,19 +13743,31 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
* to the list */
/* Continue searching for side effects in the subtrees of the expression
- * NOTE: Be careful to preserve the right ordering
+ * NOTE: Be careful to preserve the right ordering
* as side effects are prepended to the list */
if (expr->gtFlags & GTF_REVERSE_OPS)
{
assert(oper != GT_COMMA);
- if (op1) gtExtractSideEffList(op1, pList, flags);
- if (op2) gtExtractSideEffList(op2, pList, flags);
+ if (op1)
+ {
+ gtExtractSideEffList(op1, pList, flags);
+ }
+ if (op2)
+ {
+ gtExtractSideEffList(op2, pList, flags);
+ }
}
else
{
- if (op2) gtExtractSideEffList(op2, pList, flags);
- if (op1) gtExtractSideEffList(op1, pList, flags);
+ if (op2)
+ {
+ gtExtractSideEffList(op2, pList, flags);
+ }
+ if (op1)
+ {
+ gtExtractSideEffList(op1, pList, flags);
+ }
}
}
@@ -12775,9 +13779,9 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
//
assert(expr->gtCall.gtCallType == CT_HELPER);
- // We can remove this Helper call, but there still could be
- // side-effects in the arguments that we may need to keep
- //
+ // We can remove this Helper call, but there still could be
+ // side-effects in the arguments that we may need to keep
+ //
GenTreePtr args;
for (args = expr->gtCall.gtCallArgs; args; args = args->gtOp.gtOp2)
{
@@ -12802,41 +13806,46 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
}
}
-
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
-#ifdef DEBUG
+#ifdef DEBUG
-void dispNodeList(GenTreePtr list, bool verbose)
+void dispNodeList(GenTreePtr list, bool verbose)
{
- GenTreePtr last = 0;
- GenTreePtr next;
+ GenTreePtr last = nullptr;
+ GenTreePtr next;
- if (!list)
+ if (!list)
+ {
return;
+ }
for (;;)
{
next = list->gtNext;
- if (verbose)
+ if (verbose)
+ {
printf("%08X -> %08X -> %08X\n", last, list, next);
+ }
assert(!last || last->gtNext == list);
- assert(next == 0 || next->gtPrev == list);
+ assert(next == nullptr || next->gtPrev == list);
- if (!next)
+ if (!next)
+ {
break;
+ }
last = list;
list = next;
}
- printf(""); // null string means flush
+ printf(""); // null string means flush
}
/*****************************************************************************
@@ -12844,10 +13853,9 @@ void dispNodeList(GenTreePtr list, bool verbose)
*/
/* static */
-Compiler::fgWalkResult Compiler::gtAssertColonCond(GenTreePtr *pTree,
- fgWalkData *data)
+Compiler::fgWalkResult Compiler::gtAssertColonCond(GenTreePtr* pTree, fgWalkData* data)
{
- assert(data->pCallbackData == NULL);
+ assert(data->pCallbackData == nullptr);
assert((*pTree)->gtFlags & GTF_COLON_COND);
@@ -12856,15 +13864,14 @@ Compiler::fgWalkResult Compiler::gtAssertColonCond(GenTreePtr *pTree,
#endif // DEBUG
/*****************************************************************************
- * Callback to mark the nodes of a qmark-colon subtree that are conditionally
+ * Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
-Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTreePtr *pTree,
- fgWalkData *data)
+Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTreePtr* pTree, fgWalkData* data)
{
- assert(data->pCallbackData == NULL);
+ assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
@@ -12873,20 +13880,19 @@ Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTreePtr *pTree,
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
- will be conditionally executed. Note that when we find another colon we must
+ will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
-Compiler::fgWalkResult Compiler::gtClearColonCond(GenTreePtr *pTree,
- fgWalkData *data)
+Compiler::fgWalkResult Compiler::gtClearColonCond(GenTreePtr* pTree, fgWalkData* data)
{
GenTreePtr tree = *pTree;
- assert(data->pCallbackData == NULL);
+ assert(data->pCallbackData == nullptr);
- if (tree->OperGet()==GT_COLON)
+ if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
@@ -12896,20 +13902,19 @@ Compiler::fgWalkResult Compiler::gtClearColonCond(GenTreePtr *pTree,
return WALK_CONTINUE;
}
-
-struct FindLinkData {
- GenTreePtr nodeToFind;
- GenTreePtr * result;
+struct FindLinkData
+{
+ GenTreePtr nodeToFind;
+ GenTreePtr* result;
};
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
-static Compiler::fgWalkResult gtFindLinkCB(GenTreePtr * pTree,
- Compiler::fgWalkData * cbData)
+static Compiler::fgWalkResult gtFindLinkCB(GenTreePtr* pTree, Compiler::fgWalkData* cbData)
{
- FindLinkData * data = (FindLinkData*) cbData->pCallbackData;
+ FindLinkData* data = (FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
@@ -12919,11 +13924,11 @@ static Compiler::fgWalkResult gtFindLinkCB(GenTreePtr * pTree,
return Compiler::WALK_CONTINUE;
}
-GenTreePtr * Compiler::gtFindLink(GenTreePtr stmt, GenTreePtr node)
+GenTreePtr* Compiler::gtFindLink(GenTreePtr stmt, GenTreePtr node)
{
assert(stmt->gtOper == GT_STMT);
- FindLinkData data = {node, NULL};
+ FindLinkData data = {node, nullptr};
fgWalkResult result = fgWalkTreePre(&stmt->gtStmt.gtStmtExpr, gtFindLinkCB, &data);
@@ -12933,7 +13938,9 @@ GenTreePtr * Compiler::gtFindLink(GenTreePtr stmt, GenTreePtr node)
return data.result;
}
else
- return NULL;
+ {
+ return nullptr;
+ }
}
/*****************************************************************************
@@ -12941,25 +13948,21 @@ GenTreePtr * Compiler::gtFindLink(GenTreePtr stmt, GenTreePtr node)
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
-static Compiler::fgWalkResult gtFindCatchArg(GenTreePtr * pTree,
- Compiler::fgWalkData * /* data */)
+static Compiler::fgWalkResult gtFindCatchArg(GenTreePtr* pTree, Compiler::fgWalkData* /* data */)
{
- return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT
- : Compiler::WALK_CONTINUE;
+ return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
-bool Compiler::gtHasCatchArg(GenTreePtr tree)
+bool Compiler::gtHasCatchArg(GenTreePtr tree)
{
- if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) &&
- (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
+ if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
-
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
@@ -12974,13 +13977,11 @@ bool Compiler::gtHasCatchArg(GenTreePtr tree)
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
-/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack *parentStack)
+/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
- for (int i = 0;
- i < parentStack->Height();
- i++)
+ for (int i = 0; i < parentStack->Height(); i++)
{
- GenTree *node = parentStack->Index(i);
+ GenTree* node = parentStack->Index(i);
if (node->OperGet() == GT_CALL)
{
return true;
@@ -13003,10 +14004,10 @@ bool Compiler::gtHasCatchArg(GenTreePtr tree)
// Notes:
// When allocation size of this LclVar is 32-bits we will quirk the size to 64-bits
// because some PInvoke signatures incorrectly specify a ByRef to an INT32
-// when they actually write a SIZE_T or INT64. There are cases where overwriting
-// these extra 4 bytes corrupts some data (such as a saved register) that leads to A/V
-// Wheras previously the JIT64 codegen did not lead to an A/V
-//
+// when they actually write a SIZE_T or INT64. There are cases where overwriting
+// these extra 4 bytes corrupts some data (such as a saved register) that leads to A/V
+// Wheras previously the JIT64 codegen did not lead to an A/V
+//
// Assumptions:
// 'tree' is known to be address taken and that we have a stack
// of parent nodes. Both of these generally requires that
@@ -13024,9 +14025,9 @@ void Compiler::gtCheckQuirkAddrExposedLclVar(GenTreePtr tree, GenTreeStack* pare
return;
}
noway_assert(tree->gtOper == GT_LCL_VAR);
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[lclNum];
- var_types vartype = varDsc->TypeGet();
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
+ var_types vartype = varDsc->TypeGet();
if (varDsc->lvIsParam)
{
@@ -13049,8 +14050,8 @@ void Compiler::gtCheckQuirkAddrExposedLclVar(GenTreePtr tree, GenTreeStack* pare
#endif
}
-//Checks to see if we're allowed to optimize Type::op_Equality or Type::op_Inequality on this operand.
-//We're allowed to convert to GT_EQ/GT_NE if one of the operands is:
+// Checks to see if we're allowed to optimize Type::op_Equality or Type::op_Inequality on this operand.
+// We're allowed to convert to GT_EQ/GT_NE if one of the operands is:
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) a local variable of type RuntimeType.
@@ -13061,30 +14062,31 @@ bool Compiler::gtCanOptimizeTypeEquality(GenTreePtr tree)
if (tree->gtCall.gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree))
+ {
return true;
+ }
}
else if (tree->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
- if (info.compCompHnd->getIntrinsicID(tree->gtCall.gtCallMethHnd)
- == CORINFO_INTRINSIC_Object_GetType)
+ if (info.compCompHnd->getIntrinsicID(tree->gtCall.gtCallMethHnd) == CORINFO_INTRINSIC_Object_GetType)
{
return true;
}
}
}
- else if ((tree->gtOper == GT_INTRINSIC) &&
- (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType))
+ else if ((tree->gtOper == GT_INTRINSIC) && (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType))
{
return true;
}
else if (tree->gtOper == GT_LCL_VAR)
{
- LclVarDsc * lcl = &(lvaTable[tree->gtLclVarCommon.gtLclNum]);
+ LclVarDsc* lcl = &(lvaTable[tree->gtLclVarCommon.gtLclNum]);
if (lcl->TypeGet() == TYP_REF)
{
- if (lcl->lvVerTypeInfo.GetClassHandle()
- == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
+ if (lcl->lvVerTypeInfo.GetClassHandle() == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
+ {
return true;
+ }
}
}
return false;
@@ -13093,7 +14095,7 @@ bool Compiler::gtCanOptimizeTypeEquality(GenTreePtr tree)
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreePtr tree)
{
return tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
- tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
+ tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
bool Compiler::gtIsActiveCSE_Candidate(GenTreePtr tree)
@@ -13105,22 +14107,37 @@ bool Compiler::gtIsActiveCSE_Candidate(GenTreePtr tree)
struct ComplexityStruct
{
- unsigned m_numNodes; unsigned m_nodeLimit;
- ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit) {}
+ unsigned m_numNodes;
+ unsigned m_nodeLimit;
+ ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
+ {
+ }
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
- if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit) return Compiler::WALK_ABORT;
- else return Compiler::WALK_CONTINUE;
+ if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
+ {
+ return Compiler::WALK_ABORT;
+ }
+ else
+ {
+ return Compiler::WALK_CONTINUE;
+ }
}
-bool Compiler::gtComplexityExceeds(GenTreePtr* tree, unsigned limit)
+bool Compiler::gtComplexityExceeds(GenTreePtr* tree, unsigned limit)
{
ComplexityStruct complexity(limit);
- if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT) return true;
- else return false;
+ if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
}
/*
@@ -13133,38 +14150,36 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
-#if MEASURE_BLOCK_SIZE
+#if MEASURE_BLOCK_SIZE
/* static */
-size_t BasicBlock::s_Size;
+size_t BasicBlock::s_Size;
/* static */
-size_t BasicBlock::s_Count;
+size_t BasicBlock::s_Count;
#endif // MEASURE_BLOCK_SIZE
#ifdef DEBUG
// The max # of tree nodes in any BB
/* static */
-unsigned BasicBlock::s_nMaxTrees;
+unsigned BasicBlock::s_nMaxTrees;
#endif // DEBUG
-
/*****************************************************************************
*
* Allocate a basic block but don't append it to the current BB list.
*/
-BasicBlock * Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
+BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
{
- BasicBlock * block;
+ BasicBlock* block;
/* Allocate the block descriptor and zero it out */
assert(fgSafeBasicBlockCreation);
block = new (this, CMK_BasicBlock) BasicBlock;
-#if MEASURE_BLOCK_SIZE
+#if MEASURE_BLOCK_SIZE
BasicBlock::s_Count += 1;
- BasicBlock::s_Size += sizeof(*block);
+ BasicBlock::s_Size += sizeof(*block);
#endif
#ifdef DEBUG
@@ -13185,34 +14200,38 @@ BasicBlock * Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
/* Give the block a number, set the ancestor count and weight */
++fgBBcount;
-
+
if (compIsForInlining())
{
block->bbNum = ++impInlineInfo->InlinerCompiler->fgBBNumMax;
}
else
- {
+ {
block->bbNum = ++fgBBNumMax;
}
- block->bbRefs = 1;
- block->bbWeight = BB_UNITY_WEIGHT;
+ block->bbRefs = 1;
+ block->bbWeight = BB_UNITY_WEIGHT;
block->bbStkTempsIn = NO_BASE_TMP;
block->bbStkTempsOut = NO_BASE_TMP;
- block->bbEntryState = NULL;
+ block->bbEntryState = nullptr;
/* Record the jump kind in the block */
block->bbJumpKind = jumpKind;
if (jumpKind == BBJ_THROW)
+ {
block->bbSetRunRarely();
+ }
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("New Basic Block BB%02u [%p] created.\n", block->bbNum, dspPtr(block));
+ }
#endif
// We will give all the blocks var sets after the number of tracked variables
@@ -13237,12 +14256,12 @@ BasicBlock * Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
VarSetOps::AssignNoCopy(this, block->bbScope, VarSetOps::UninitVal());
}
- block->bbHeapUse = false;
- block->bbHeapDef = false;
- block->bbHeapLiveIn = false;
+ block->bbHeapUse = false;
+ block->bbHeapDef = false;
+ block->bbHeapLiveIn = false;
block->bbHeapLiveOut = false;
- block->bbHeapSsaPhiFunc = NULL;
+ block->bbHeapSsaPhiFunc = nullptr;
block->bbHeapSsaNumIn = 0;
block->bbHeapSsaNumOut = 0;
@@ -13254,36 +14273,42 @@ BasicBlock * Compiler::bbNewBasicBlock(BBjumpKinds jumpKind)
return block;
}
-
//------------------------------------------------------------------------------
// containsStatement - return true if the block contains the given statement
//------------------------------------------------------------------------------
-bool BasicBlock::containsStatement(GenTree *statement)
+bool BasicBlock::containsStatement(GenTree* statement)
{
assert(statement->gtOper == GT_STMT);
- GenTree *curr = bbTreeList;
- do
+ GenTree* curr = bbTreeList;
+ do
{
if (curr == statement)
+ {
break;
- curr = curr->gtNext;
- }
- while (curr);
- return curr != NULL;
+ }
+ curr = curr->gtNext;
+ } while (curr);
+ return curr != nullptr;
}
GenTreeStmt* BasicBlock::FirstNonPhiDef()
{
GenTreePtr stmt = bbTreeList;
- if (stmt == nullptr) return nullptr;
+ if (stmt == nullptr)
+ {
+ return nullptr;
+ }
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
- while ((tree->OperGet() == GT_ASG && tree->gtOp.gtOp2->OperGet() == GT_PHI)
- || (tree->OperGet() == GT_STORE_LCL_VAR && tree->gtOp.gtOp1->OperGet() == GT_PHI))
+ while ((tree->OperGet() == GT_ASG && tree->gtOp.gtOp2->OperGet() == GT_PHI) ||
+ (tree->OperGet() == GT_STORE_LCL_VAR && tree->gtOp.gtOp1->OperGet() == GT_PHI))
{
stmt = stmt->gtNext;
- if (stmt == nullptr) return nullptr;
+ if (stmt == nullptr)
+ {
+ return nullptr;
+ }
tree = stmt->gtStmt.gtStmtExpr;
}
return stmt->AsStmt();
@@ -13292,10 +14317,13 @@ GenTreeStmt* BasicBlock::FirstNonPhiDef()
GenTreePtr BasicBlock::FirstNonPhiDefOrCatchArgAsg()
{
GenTreePtr stmt = FirstNonPhiDef();
- if (stmt == nullptr) return nullptr;
+ if (stmt == nullptr)
+ {
+ return nullptr;
+ }
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
- if ((tree->OperGet() == GT_ASG && tree->gtOp.gtOp2->OperGet() == GT_CATCH_ARG)
- || (tree->OperGet() == GT_STORE_LCL_VAR && tree->gtOp.gtOp1->OperGet() == GT_CATCH_ARG))
+ if ((tree->OperGet() == GT_ASG && tree->gtOp.gtOp2->OperGet() == GT_CATCH_ARG) ||
+ (tree->OperGet() == GT_STORE_LCL_VAR && tree->gtOp.gtOp1->OperGet() == GT_CATCH_ARG))
{
stmt = stmt->gtNext;
}
@@ -13308,12 +14336,12 @@ GenTreePtr BasicBlock::FirstNonPhiDefOrCatchArgAsg()
* rarely run block, and we set it's weight to zero.
*/
-void BasicBlock::bbSetRunRarely()
+void BasicBlock::bbSetRunRarely()
{
setBBWeight(BB_ZERO_WEIGHT);
- if (bbWeight == BB_ZERO_WEIGHT)
+ if (bbWeight == BB_ZERO_WEIGHT)
{
- bbFlags |= BBF_RUN_RARELY; // This block is never/rarely run
+ bbFlags |= BBF_RUN_RARELY; // This block is never/rarely run
}
}
@@ -13322,56 +14350,60 @@ void BasicBlock::bbSetRunRarely()
* Can a BasicBlock be inserted after this without altering the flowgraph
*/
-bool BasicBlock::bbFallsThrough()
+bool BasicBlock::bbFallsThrough()
{
switch (bbJumpKind)
{
- case BBJ_THROW:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_EHCATCHRET:
- case BBJ_RETURN:
- case BBJ_ALWAYS:
- case BBJ_LEAVE:
- case BBJ_SWITCH:
- return false;
+ case BBJ_THROW:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_EHCATCHRET:
+ case BBJ_RETURN:
+ case BBJ_ALWAYS:
+ case BBJ_LEAVE:
+ case BBJ_SWITCH:
+ return false;
- case BBJ_NONE:
- case BBJ_COND:
- return true;
+ case BBJ_NONE:
+ case BBJ_COND:
+ return true;
- case BBJ_CALLFINALLY:
- return ((bbFlags & BBF_RETLESS_CALL) == 0);
-
- default:
- assert(!"Unknown bbJumpKind in bbFallsThrough()");
- return true;
+ case BBJ_CALLFINALLY:
+ return ((bbFlags & BBF_RETLESS_CALL) == 0);
+
+ default:
+ assert(!"Unknown bbJumpKind in bbFallsThrough()");
+ return true;
}
}
-unsigned BasicBlock::NumSucc(Compiler * comp)
+unsigned BasicBlock::NumSucc(Compiler* comp)
{
// As described in the spec comment of NumSucc at its declaration, whether "comp" is null determines
- // whether NumSucc and GetSucc yield successors of finally blocks.
+ // whether NumSucc and GetSucc yield successors of finally blocks.
switch (bbJumpKind)
{
- case BBJ_THROW:
- case BBJ_RETURN:
- return 0;
-
- case BBJ_EHFILTERRET:
- if (comp == NULL)
+ case BBJ_THROW:
+ case BBJ_RETURN:
return 0;
- else
- return 1;
- case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ if (comp == nullptr)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+
+ case BBJ_EHFINALLYRET:
{
- if (comp == NULL)
- {
+ if (comp == nullptr)
+ {
return 0;
}
else
@@ -13384,103 +14416,103 @@ unsigned BasicBlock::NumSucc(Compiler * comp)
}
else
{
- assert(hndBeg->bbCatchTyp == BBCT_FAULT); // We can only BBJ_EHFINALLYRET from FINALLY and FAULT.
+ assert(hndBeg->bbCatchTyp == BBCT_FAULT); // We can only BBJ_EHFINALLYRET from FINALLY and FAULT.
// A FAULT block has no successors.
return 0;
}
}
}
- case BBJ_CALLFINALLY:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_LEAVE:
- case BBJ_NONE:
- return 1;
- case BBJ_COND:
- if (bbJumpDest == bbNext)
- {
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_LEAVE:
+ case BBJ_NONE:
return 1;
- }
- else
- {
- return 2;
- }
- case BBJ_SWITCH:
- if (comp == NULL)
- {
- return bbJumpSwt->bbsCount;
- } else
- {
- Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
- return sd.numDistinctSuccs;
- }
+ case BBJ_COND:
+ if (bbJumpDest == bbNext)
+ {
+ return 1;
+ }
+ else
+ {
+ return 2;
+ }
+ case BBJ_SWITCH:
+ if (comp == nullptr)
+ {
+ return bbJumpSwt->bbsCount;
+ }
+ else
+ {
+ Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
+ return sd.numDistinctSuccs;
+ }
- default:
- unreached();
+ default:
+ unreached();
}
}
-
-BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler * comp)
+BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp)
{
// As described in the spec comment of GetSucc at its declaration, whether "comp" is null determines
- // whether NumSucc and GetSucc yield successors of finally blocks.
+ // whether NumSucc and GetSucc yield successors of finally blocks.
- assert(i < NumSucc(comp)); // Index bounds check.
- //printf("bbjk=%d\n", bbJumpKind);
+ assert(i < NumSucc(comp)); // Index bounds check.
+ // printf("bbjk=%d\n", bbJumpKind);
switch (bbJumpKind)
{
- case BBJ_THROW:
- case BBJ_RETURN:
- unreached(); // Should have been covered by assert above.
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ unreached(); // Should have been covered by assert above.
- case BBJ_EHFILTERRET:
- {
- assert(comp != NULL); // Or else we're not looking for successors.
- BasicBlock* result = comp->fgFirstBlockOfHandler(this);
- noway_assert(result == bbJumpDest);
- // Handler is the (sole) normal successor of the filter.
- return result;
- }
+ case BBJ_EHFILTERRET:
+ {
+ assert(comp != nullptr); // Or else we're not looking for successors.
+ BasicBlock* result = comp->fgFirstBlockOfHandler(this);
+ noway_assert(result == bbJumpDest);
+ // Handler is the (sole) normal successor of the filter.
+ return result;
+ }
- case BBJ_EHFINALLYRET:
- return comp->fgSuccOfFinallyRet(this, i);
+ case BBJ_EHFINALLYRET:
+ return comp->fgSuccOfFinallyRet(this, i);
- case BBJ_CALLFINALLY:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_LEAVE:
- return bbJumpDest;
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_LEAVE:
+ return bbJumpDest;
- case BBJ_NONE:
- return bbNext;
- case BBJ_COND:
- if (i == 0)
- {
+ case BBJ_NONE:
return bbNext;
- }
- else
- {
- assert(i == 1);
- return bbJumpDest;
- };
- case BBJ_SWITCH:
- if (comp == NULL)
- {
- assert(i < bbJumpSwt->bbsCount); // Range check.
- return bbJumpSwt->bbsDstTab[i];
- }
- else
- {
- // Remove duplicates.
- Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
- assert(i < sd.numDistinctSuccs); // Range check.
- return sd.nonDuplicates[i];
- }
+ case BBJ_COND:
+ if (i == 0)
+ {
+ return bbNext;
+ }
+ else
+ {
+ assert(i == 1);
+ return bbJumpDest;
+ };
+ case BBJ_SWITCH:
+ if (comp == nullptr)
+ {
+ assert(i < bbJumpSwt->bbsCount); // Range check.
+ return bbJumpSwt->bbsDstTab[i];
+ }
+ else
+ {
+ // Remove duplicates.
+ Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this);
+ assert(i < sd.numDistinctSuccs); // Range check.
+ return sd.nonDuplicates[i];
+ }
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -13499,17 +14531,17 @@ bool GenTree::IsRegOptional() const
bool GenTree::IsPhiDefn()
{
- bool res =
- OperGet() == GT_ASG
- && gtOp.gtOp2 != NULL
- && gtOp.gtOp2->OperGet() == GT_PHI;
+ bool res = OperGet() == GT_ASG && gtOp.gtOp2 != nullptr && gtOp.gtOp2->OperGet() == GT_PHI;
assert(!res || gtOp.gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
bool GenTree::IsPhiDefnStmt()
{
- if (OperGet() != GT_STMT) return false;
+ if (OperGet() != GT_STMT)
+ {
+ return false;
+ }
GenTreePtr asg = gtStmt.gtStmtExpr;
return asg->IsPhiDefn();
}
@@ -13526,7 +14558,7 @@ bool GenTree::IsPhiDefnStmt()
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
- (comp->lvaTable[this->gtLclVarCommon.gtLclNum].lvExactSize != genTypeSize(gtType)));
+ (comp->lvaTable[this->gtLclVarCommon.gtLclNum].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
@@ -13536,7 +14568,7 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo
if (gtOp.gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = gtOp.gtOp1->AsLclVarCommon();
- *pLclVarTree = lclVarTree;
+ *pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
@@ -13559,9 +14591,9 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo
else if (OperIsBlkOp())
{
GenTreePtr destAddr = gtOp.gtOp1->gtOp.gtOp1;
- unsigned width = 0;
+ unsigned width = 0;
// Do we care about whether this assigns the entire variable?
- if (pIsEntire != NULL)
+ if (pIsEntire != nullptr)
{
GenTreePtr blockWidth = gtOp.gtOp2;
if (blockWidth->IsCnsIntOrI())
@@ -13572,7 +14604,8 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo
// for initialization of struct types, so the type of the argument(s) will match this
// type, by construction, and be "entire".
assert(blockWidth->IsIconHandle(GTF_ICON_CLASS_HDL));
- width = comp->info.compCompHnd->getClassSize(CORINFO_CLASS_HANDLE(blockWidth->gtIntConCommon.IconValue()));
+ width = comp->info.compCompHnd->getClassSize(
+ CORINFO_CLASS_HANDLE(blockWidth->gtIntConCommon.IconValue()));
}
else
{
@@ -13580,7 +14613,9 @@ bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bo
assert(swidth >= 0);
// cpblk of size zero exists in the wild (in yacc-generated code in SQL) and is valid IL.
if (swidth == 0)
+ {
return false;
+ }
width = unsigned(swidth);
}
}
@@ -13605,8 +14640,8 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
- *pLclVarTree = addrArgLcl;
- if (pIsEntire != NULL)
+ *pLclVarTree = addrArgLcl;
+ if (pIsEntire != nullptr)
{
unsigned lclOffset = 0;
if (addrArg->OperIsLocalField())
@@ -13621,7 +14656,7 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm
}
else
{
- unsigned lclNum = addrArgLcl->GetLclNum();
+ unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
@@ -13645,14 +14680,16 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm
if (gtOp.gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
- // otherwise we change width to zero to disallow an IsEntire Match
- return gtOp.gtOp2->DefinesLocalAddr(comp, gtOp.gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree, pIsEntire);
+ // otherwise we change width to zero to disallow an IsEntire Match
+ return gtOp.gtOp2->DefinesLocalAddr(comp, gtOp.gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
+ pIsEntire);
}
else if (gtOp.gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
- // otherwise we change width to zero to disallow an IsEntire Match
- return gtOp.gtOp1->DefinesLocalAddr(comp, gtOp.gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree, pIsEntire);
+ // otherwise we change width to zero to disallow an IsEntire Match
+ return gtOp.gtOp1->DefinesLocalAddr(comp, gtOp.gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
+ pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
@@ -13670,7 +14707,7 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
-#endif //DEBUG
+#endif // DEBUG
// base
GenTreePtr base = gtOp.gtOp1;
@@ -13705,7 +14742,7 @@ bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarComm
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
- if (IsLocal()) // Note that this covers "GT_LCL_FLD."
+ if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
@@ -13749,14 +14786,13 @@ GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
return nullptr;
}
-
bool GenTree::IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTreePtr addrArg = gtOp.gtOp1;
- if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
+ if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
@@ -13784,16 +14820,20 @@ bool GenTree::IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree,
{
if (gtOp.gtOp1->OperGet() == GT_CNS_INT)
{
- if (gtOp.gtOp1->AsIntCon()->gtFieldSeq == NULL)
+ if (gtOp.gtOp1->AsIntCon()->gtFieldSeq == nullptr)
+ {
return false;
+ }
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(gtOp.gtOp1->AsIntCon()->gtFieldSeq, *pFldSeq);
return gtOp.gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq);
}
else if (gtOp.gtOp2->OperGet() == GT_CNS_INT)
{
- if (gtOp.gtOp2->AsIntCon()->gtFieldSeq == NULL)
+ if (gtOp.gtOp2->AsIntCon()->gtFieldSeq == nullptr)
+ {
return false;
+ }
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(gtOp.gtOp2->AsIntCon()->gtFieldSeq, *pFldSeq);
return gtOp.gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq);
@@ -13825,8 +14865,7 @@ bool GenTree::IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree,
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
-unsigned
-GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps *pOper)
+unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIsAssignment())
@@ -13838,19 +14877,18 @@ GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps *pOper)
if (gtOper == GT_ASG)
{
GenTree* rhs = gtOp.gtOp2;
- if (rhs->OperIsBinary() &&
- (rhs->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
+ if (rhs->OperIsBinary() && (rhs->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
(rhs->gtOp.gtOp1->AsLclVarCommon()->gtLclNum == lhsLclNum))
{
- lclNum = lhsLclNum;
+ lclNum = lhsLclNum;
*pOtherTree = rhs->gtOp.gtOp2;
- *pOper = rhs->gtOper;
+ *pOper = rhs->gtOper;
}
}
else
{
- lclNum = lhsLclNum;
- *pOper = GenTree::OpAsgToOper(gtOper);
+ lclNum = lhsLclNum;
+ *pOper = GenTree::OpAsgToOper(gtOper);
*pOtherTree = gtOp.gtOp2;
}
}
@@ -13860,8 +14898,8 @@ GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps *pOper)
// return true if this tree node is a subcomponent of parent for codegen purposes
// (essentially, will be rolled into the same instruction)
-// Note that this method relies upon the value of gtRegNum field to determine
-// if the treenode is contained or not. Therefore you can not call this method
+// Note that this method relies upon the value of gtRegNum field to determine
+// if the treenode is contained or not. Therefore you can not call this method
// until after the LSRA phase has allocated physical registers to the treenodes.
bool GenTree::isContained() const
{
@@ -13885,83 +14923,85 @@ bool GenTree::isContained() const
// TODO-Cleanup : this is not clean, would be nice to have some way of marking this.
switch (OperGet())
{
- case GT_STOREIND:
- case GT_JTRUE:
- case GT_RETURN:
- case GT_STORE_LCL_FLD:
- case GT_STORE_LCL_VAR:
- case GT_ARR_BOUNDS_CHECK:
- case GT_LOCKADD:
- case GT_NOP:
- case GT_NO_OP:
- case GT_START_NONGC:
- case GT_PROF_HOOK:
- case GT_RETURNTRAP:
- case GT_COMMA:
- case GT_PINVOKE_PROLOG:
- case GT_PHYSREGDST:
- case GT_PUTARG_STK:
- case GT_MEMORYBARRIER:
- case GT_COPYBLK:
- case GT_INITBLK:
- case GT_COPYOBJ:
- case GT_SWITCH:
- case GT_JMPTABLE:
- case GT_SWITCH_TABLE:
- case GT_SWAP:
- case GT_LCLHEAP:
- case GT_CKFINITE:
- case GT_JMP:
+ case GT_STOREIND:
+ case GT_JTRUE:
+ case GT_RETURN:
+ case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ case GT_ARR_BOUNDS_CHECK:
+ case GT_LOCKADD:
+ case GT_NOP:
+ case GT_NO_OP:
+ case GT_START_NONGC:
+ case GT_PROF_HOOK:
+ case GT_RETURNTRAP:
+ case GT_COMMA:
+ case GT_PINVOKE_PROLOG:
+ case GT_PHYSREGDST:
+ case GT_PUTARG_STK:
+ case GT_MEMORYBARRIER:
+ case GT_COPYBLK:
+ case GT_INITBLK:
+ case GT_COPYOBJ:
+ case GT_SWITCH:
+ case GT_JMPTABLE:
+ case GT_SWITCH_TABLE:
+ case GT_SWAP:
+ case GT_LCLHEAP:
+ case GT_CKFINITE:
+ case GT_JMP:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
+ case GT_END_LFIN:
#endif
- return false;
+ return false;
#if !defined(LEGACY_BACKEND) && !defined(_TARGET_64BIT_)
- case GT_LONG:
- // GT_LONG nodes are normally contained. The only exception is when the result
- // of a TYP_LONG operation is not used and this can only happen if the GT_LONG
- // is the last node in the statement (in linear order).
- return gtNext != nullptr;
+ case GT_LONG:
+ // GT_LONG nodes are normally contained. The only exception is when the result
+ // of a TYP_LONG operation is not used and this can only happen if the GT_LONG
+ // is the last node in the statement (in linear order).
+ return gtNext != nullptr;
#endif
- case GT_CALL:
- // Note: if you hit this assert you are probably calling isContained()
- // before the LSRA phase has allocated physical register to the tree nodes
- //
- assert(gtType == TYP_VOID);
- return false;
- case GT_RETFILT:
- if (gtType == TYP_VOID)
- return false; // endfinally case
+ case GT_CALL:
+ // Note: if you hit this assert you are probably calling isContained()
+ // before the LSRA phase has allocated physical register to the tree nodes
+ //
+ assert(gtType == TYP_VOID);
+ return false;
+ case GT_RETFILT:
+ if (gtType == TYP_VOID)
+ {
+ return false; // endfinally case
+ }
- __fallthrough;
+ __fallthrough;
- default:
- // if it's contained it better have a parent
- assert(gtNext || OperIsLocal());
- return true;
+ default:
+ // if it's contained it better have a parent
+ assert(gtNext || OperIsLocal());
+ return true;
}
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
-{
- return isContained() && isIndir();
+{
+ return isContained() && isIndir();
}
bool GenTree::isIndirAddrMode()
-{
- return isIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
+{
+ return isIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
-{
- return OperGet() == GT_IND || OperGet() == GT_STOREIND;
+{
+ return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
@@ -13989,7 +15029,7 @@ GenTreePtr GenTreeIndir::Base()
}
else
{
- return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
+ return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
@@ -14013,21 +15053,33 @@ GenTree* GenTreeIndir::Index()
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
+ {
return Addr()->AsAddrMode()->gtScale;
+ }
else
+ {
return 1;
+ }
}
size_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
+ {
return Addr()->AsAddrMode()->gtOffset;
+ }
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
- return (size_t) Addr()->gtClsVar.gtClsVarHnd;
+ {
+ return (size_t)Addr()->gtClsVar.gtClsVarHnd;
+ }
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
+ {
return Addr()->AsIntConCommon()->IconValue();
+ }
else
+ {
return 0;
+ }
}
//------------------------------------------------------------------------
@@ -14080,13 +15132,13 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
return false;
}
#endif
-#endif //!LEGACY_BACKEND
+#endif //! LEGACY_BACKEND
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
- return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void *)IconValue()));
+ return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
@@ -14097,7 +15149,7 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
- //
+ //
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
@@ -14107,8 +15159,8 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
- return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void *)IconValue())) || FitsInI32();
- }
+ return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
+ }
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
@@ -14118,11 +15170,11 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
- return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void *)IconValue()));
+ return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
- return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void *)IconValue());
+ return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
@@ -14139,17 +15191,17 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
return false;
}
#endif
-#endif //!LEGACY_BACKEND
+#endif //! LEGACY_BACKEND
- //TODO-x86 - TLS field handles are excluded for now as they are accessed relative to FS segment.
- //Handling of TLS field handles is a NYI and this needs to be relooked after implementing it.
+ // TODO-x86 - TLS field handles are excluded for now as they are accessed relative to FS segment.
+ // Handling of TLS field handles is a NYI and this needs to be relooked after implementing it.
return IsCnsIntOrI() && !IsIconHandle(GTF_ICON_TLS_HDL);
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
- //If generating relocatable code, icons should be reported for recording relocatons.
+ // If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif //_TARGET_X86_
@@ -14166,7 +15218,7 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
// Recognize struct static field patterns...
if (OperGet() == GT_IND)
{
- GenTreePtr addr = gtOp.gtOp1;
+ GenTreePtr addr = gtOp.gtOp1;
GenTreeIntCon* icon = nullptr;
if (addr->OperGet() == GT_CNS_INT)
{
@@ -14181,14 +15233,13 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
icon = addr->gtOp.gtOp2->AsIntCon();
}
}
- if ( icon != nullptr
- && !icon->IsIconHandle(GTF_ICON_STR_HDL) // String handles are a source of TYP_REFs.
- && icon->gtFieldSeq != nullptr
- && icon->gtFieldSeq->m_next == nullptr // A static field should be a singleton
+ if (icon != nullptr && !icon->IsIconHandle(GTF_ICON_STR_HDL) // String handles are a source of TYP_REFs.
+ && icon->gtFieldSeq != nullptr &&
+ icon->gtFieldSeq->m_next == nullptr // A static field should be a singleton
// TODO-Review: A pseudoField here indicates an issue - this requires investigation
// See test case src\ddsuites\src\clr\x86\CoreMangLib\Dev\Globalization\CalendarRegressions.exe
- && !(FieldSeqStore::IsPseudoField(icon->gtFieldSeq->m_fieldHnd))
- && icon->gtFieldSeq != FieldSeqStore::NotAField()) // Ignore non-fields.
+ && !(FieldSeqStore::IsPseudoField(icon->gtFieldSeq->m_fieldHnd)) &&
+ icon->gtFieldSeq != FieldSeqStore::NotAField()) // Ignore non-fields.
{
statStructFldSeq = icon->gtFieldSeq;
}
@@ -14203,7 +15254,9 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
if (comp->GetZeroOffsetFieldMap()->Lookup(addr, &zeroFieldSeq))
{
if (zeroFieldSeq->m_next == nullptr)
+ {
statStructFldSeq = zeroFieldSeq;
+ }
}
}
}
@@ -14225,36 +15278,36 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
// The CSE could be a pointer to a boxed struct
//
GenTreeLclVarCommon* lclVar = AsLclVarCommon();
- ValueNum vn = gtVNPair.GetLiberal();
+ ValueNum vn = gtVNPair.GetLiberal();
if (vn != ValueNumStore::NoVN)
{
// Is the ValueNum a MapSelect involving a SharedStatic helper?
VNFuncApp funcApp1;
- if (comp->vnStore->GetVNFunc(vn, &funcApp1) &&
- (funcApp1.m_func == VNF_MapSelect) &&
+ if (comp->vnStore->GetVNFunc(vn, &funcApp1) && (funcApp1.m_func == VNF_MapSelect) &&
(comp->vnStore->IsSharedStatic(funcApp1.m_args[1])))
{
- ValueNum mapVN = funcApp1.m_args[0];
+ ValueNum mapVN = funcApp1.m_args[0];
// Is this new 'mapVN' ValueNum, a MapSelect involving a handle?
VNFuncApp funcApp2;
- if (comp->vnStore->GetVNFunc(mapVN, &funcApp2) &&
- (funcApp2.m_func == VNF_MapSelect) &&
+ if (comp->vnStore->GetVNFunc(mapVN, &funcApp2) && (funcApp2.m_func == VNF_MapSelect) &&
(comp->vnStore->IsVNHandle(funcApp2.m_args[1])))
{
- ValueNum fldHndVN = funcApp2.m_args[1];
+ ValueNum fldHndVN = funcApp2.m_args[1];
// Is this new 'fldHndVN' VNhandle a FieldHandle?
unsigned flags = comp->vnStore->GetHandleFlags(fldHndVN);
if (flags == GTF_ICON_FIELD_HDL)
{
- CORINFO_FIELD_HANDLE fieldHnd = CORINFO_FIELD_HANDLE(comp->vnStore->ConstantValue<ssize_t>(fldHndVN));
+ CORINFO_FIELD_HANDLE fieldHnd =
+ CORINFO_FIELD_HANDLE(comp->vnStore->ConstantValue<ssize_t>(fldHndVN));
- // Record this field sequence in 'statStructFldSeq' as it is likely to be a Boxed Struct field access.
+ // Record this field sequence in 'statStructFldSeq' as it is likely to be a Boxed Struct
+ // field access.
statStructFldSeq = comp->GetFieldSeqStore()->CreateSingleton(fieldHnd);
}
}
}
}
- }
+ }
if (statStructFldSeq != nullptr)
{
@@ -14263,14 +15316,14 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
if (comp->gtIsStaticFieldPtrToBoxedStruct(TYP_REF, statStructFldSeq->m_fieldHnd))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(statStructFldSeq, *pFldSeq);
- *pObj = nullptr;
+ *pObj = nullptr;
*pStatic = this;
return true;
}
}
// Otherwise...
- *pObj = this;
+ *pObj = this;
*pStatic = nullptr;
return true;
}
@@ -14281,10 +15334,10 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
if (gtOp.gtOp2->OperGet() == GT_CNS_INT)
{
newFldSeq = gtOp.gtOp2->AsIntCon()->gtFieldSeq;
- baseAddr = gtOp.gtOp1;
+ baseAddr = gtOp.gtOp1;
}
}
- else
+ else
{
// Check if "this" has a zero-offset annotation.
if (!comp->GetZeroOffsetFieldMap()->Lookup(this, &newFldSeq))
@@ -14294,24 +15347,26 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
}
else
{
- baseAddr = this;
+ baseAddr = this;
mustBeStatic = true;
}
}
// If not we don't have a field seq, it's not a field address.
if (newFldSeq == nullptr || newFldSeq == FieldSeqStore::NotAField())
+ {
return false;
+ }
// Prepend this field to whatever we've already accumulated (outside-in).
*pFldSeq = comp->GetFieldSeqStore()->Append(newFldSeq, *pFldSeq);
// Is it a static or instance field?
if (!FieldSeqStore::IsPseudoField(newFldSeq->m_fieldHnd) &&
- comp->info.compCompHnd->isFieldStatic(newFldSeq->m_fieldHnd))
+ comp->info.compCompHnd->isFieldStatic(newFldSeq->m_fieldHnd))
{
// It is a static field. We're done.
- *pObj = nullptr;
+ *pObj = nullptr;
*pStatic = baseAddr;
return true;
}
@@ -14328,62 +15383,79 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic,
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
- if (fieldNodeType != TYP_REF) return false;
+ if (fieldNodeType != TYP_REF)
+ {
+ return false;
+ }
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldHnd != nullptr);
- CorInfoType cit = info.compCompHnd->getFieldType(fldHnd, &fldCls);
- var_types fieldTyp = JITtype2varType(cit);
+ CorInfoType cit = info.compCompHnd->getFieldType(fldHnd, &fldCls);
+ var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
- tree = tree->gtEffectiveVal();
+ tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
- switch(tree->gtOper)
+ switch (tree->gtOper)
{
- default:
- break;
- case GT_MKREFANY: structHnd = impGetRefAnyClass(); break;
- case GT_OBJ: structHnd = tree->gtObj.gtClass; break;
- case GT_CALL: structHnd = tree->gtCall.gtRetClsHnd; break;
- case GT_RET_EXPR: structHnd = tree->gtRetExpr.gtRetClsHnd; break;
- case GT_ARGPLACE: structHnd = tree->gtArgPlace.gtArgPlaceClsHnd; break;
- case GT_INDEX: structHnd = tree->gtIndex.gtStructElemClass; break;
- case GT_FIELD: info.compCompHnd->getFieldType(tree->gtField.gtFldHnd, &structHnd); break;
- case GT_ASG:
- structHnd = gtGetStructHandle(tree->gtGetOp1());
- break;
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- structHnd = lvaTable[tree->AsLclVarCommon()->gtLclNum].lvVerTypeInfo.GetClassHandle();
- break;
- case GT_RETURN:
- structHnd = gtGetStructHandleIfPresent(tree->gtOp.gtOp1);
- break;
- case GT_IND:
+ default:
+ break;
+ case GT_MKREFANY:
+ structHnd = impGetRefAnyClass();
+ break;
+ case GT_OBJ:
+ structHnd = tree->gtObj.gtClass;
+ break;
+ case GT_CALL:
+ structHnd = tree->gtCall.gtRetClsHnd;
+ break;
+ case GT_RET_EXPR:
+ structHnd = tree->gtRetExpr.gtRetClsHnd;
+ break;
+ case GT_ARGPLACE:
+ structHnd = tree->gtArgPlace.gtArgPlaceClsHnd;
+ break;
+ case GT_INDEX:
+ structHnd = tree->gtIndex.gtStructElemClass;
+ break;
+ case GT_FIELD:
+ info.compCompHnd->getFieldType(tree->gtField.gtFldHnd, &structHnd);
+ break;
+ case GT_ASG:
+ structHnd = gtGetStructHandle(tree->gtGetOp1());
+ break;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ structHnd = lvaTable[tree->AsLclVarCommon()->gtLclNum].lvVerTypeInfo.GetClassHandle();
+ break;
+ case GT_RETURN:
+ structHnd = gtGetStructHandleIfPresent(tree->gtOp.gtOp1);
+ break;
+ case GT_IND:
#ifdef FEATURE_SIMD
- if (varTypeIsSIMD(tree))
- {
- structHnd = gtGetStructHandleForSIMD(tree->gtType, TYP_FLOAT);
- }
- else
+ if (varTypeIsSIMD(tree))
+ {
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, TYP_FLOAT);
+ }
+ else
#endif
- if (tree->gtFlags & GTF_IND_ARR_INDEX)
- {
- ArrayInfo arrInfo;
- bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
- assert(b);
- structHnd = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
- }
- break;
+ if (tree->gtFlags & GTF_IND_ARR_INDEX)
+ {
+ ArrayInfo arrInfo;
+ bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
+ assert(b);
+ structHnd = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
+ }
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->gtSIMDBaseType);
+ case GT_SIMD:
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->gtSIMDBaseType);
#endif // FEATURE_SIMD
- break;
+ break;
}
}
return structHnd;
@@ -14396,22 +15468,26 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
return structHnd;
}
-void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr* pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
+void GenTree::ParseArrayAddress(
+ Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr* pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
- *pArr = nullptr;
- ValueNum inxVN = ValueNumStore::NoVN;
- ssize_t offset = 0;
+ *pArr = nullptr;
+ ValueNum inxVN = ValueNumStore::NoVN;
+ ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
- if (*pArr == nullptr) return;
+ if (*pArr == nullptr)
+ {
+ return;
+ }
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
- unsigned fieldOffsets = 0;
- FieldSeqNode* fldSeqIter = fldSeq;
+ unsigned fieldOffsets = 0;
+ FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
@@ -14427,7 +15503,9 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
if (!FieldSeqStore::IsPseudoField(fldSeqIter->m_fieldHnd))
{
if (*pFldSeq == nullptr)
+ {
*pFldSeq = fldSeqIter;
+ }
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->m_fieldHnd != nullptr);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->m_fieldHnd, &fldCls);
@@ -14437,8 +15515,7 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
- if ( !FitsIn<ssize_t>(fieldOffsets + arrayInfo->m_elemOffset)
- || !FitsIn<ssize_t>(arrayInfo->m_elemSize))
+ if (!FitsIn<ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) || !FitsIn<ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(TYP_INT);
@@ -14446,8 +15523,8 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
}
// Otherwise...
ssize_t offsetAccountedFor = static_cast<ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
- ssize_t elemSize = static_cast<ssize_t>(arrayInfo->m_elemSize);
-
+ ssize_t elemSize = static_cast<ssize_t>(arrayInfo->m_elemSize);
+
ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
@@ -14477,11 +15554,11 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
else
{
bool canFoldDiv = false;
-
- // If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
+
+ // If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
- if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc) GT_MUL)
+ if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
@@ -14489,12 +15566,12 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
- *pInxVN = funcApp.m_args[0];
+ *pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
- *pInxVN = funcApp.m_args[1];
+ *pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
@@ -14503,70 +15580,77 @@ void GenTree::ParseArrayAddress(Compiler* comp, ArrayInfo* arrayInfo, GenTreePtr
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
- ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, GetVNFuncForOper(GT_DIV, false), inxVN, vnForElemSize);
+ ValueNum vnForScaledInx =
+ vnStore->VNForFunc(TYP_I_IMPL, GetVNFuncForOper(GT_DIV, false), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
- *pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, GetVNFuncForOper(GT_ADD, (gtFlags & GTF_UNSIGNED) != 0), *pInxVN, vnForConstInd);
+ *pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL,
+ GetVNFuncForOper(GT_ADD, (gtFlags & GTF_UNSIGNED) != 0),
+ *pInxVN, vnForConstInd);
}
}
}
}
-void GenTree::ParseArrayAddressWork(Compiler* comp, ssize_t inputMul, GenTreePtr* pArr, ValueNum* pInxVN, ssize_t* pOffset, FieldSeqNode** pFldSeq)
+void GenTree::ParseArrayAddressWork(
+ Compiler* comp, ssize_t inputMul, GenTreePtr* pArr, ValueNum* pInxVN, ssize_t* pOffset, FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
- assert(inputMul == 1); // Can't multiply the array pointer by anything.
+ assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
- case GT_CNS_INT:
- *pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, gtIntCon.gtFieldSeq);
- *pOffset += (inputMul * gtIntCon.gtIconVal);
- return;
-
- case GT_ADD:
- case GT_SUB:
- gtOp.gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
- if (OperGet() == GT_SUB)
- inputMul = -inputMul;
- gtOp.gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
- return;
+ case GT_CNS_INT:
+ *pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, gtIntCon.gtFieldSeq);
+ *pOffset += (inputMul * gtIntCon.gtIconVal);
+ return;
- case GT_MUL:
+ case GT_ADD:
+ case GT_SUB:
+ gtOp.gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
+ if (OperGet() == GT_SUB)
+ {
+ inputMul = -inputMul;
+ }
+ gtOp.gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
+ return;
+
+ case GT_MUL:
{
// If one op is a constant, continue parsing down.
- ssize_t subMul = 0;
+ ssize_t subMul = 0;
GenTreePtr nonConst = nullptr;
if (gtOp.gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
- if (gtOp.gtOp2->OperGet() == GT_CNS_INT && gtOp.gtOp2->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField())
+ if (gtOp.gtOp2->OperGet() == GT_CNS_INT &&
+ gtOp.gtOp2->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField())
{
- subMul = gtOp.gtOp2->gtIntConCommon.IconValue();
+ subMul = gtOp.gtOp2->gtIntConCommon.IconValue();
nonConst = gtOp.gtOp1;
}
- else
+ else
{
- subMul = gtOp.gtOp1->gtIntConCommon.IconValue();
+ subMul = gtOp.gtOp1->gtIntConCommon.IconValue();
nonConst = gtOp.gtOp2;
}
}
else if (gtOp.gtOp2->IsCnsIntOrI())
{
- subMul = gtOp.gtOp2->gtIntConCommon.IconValue();
+ subMul = gtOp.gtOp2->gtIntConCommon.IconValue();
nonConst = gtOp.gtOp1;
}
- if (nonConst != NULL)
+ if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
@@ -14575,26 +15659,27 @@ void GenTree::ParseArrayAddressWork(Compiler* comp, ssize_t inputMul, GenTreePtr
}
break;
- case GT_LSH:
- // If one op is a constant, continue parsing down.
- if (gtOp.gtOp2->IsCnsIntOrI())
- {
- ssize_t subMul = 1 << gtOp.gtOp2->gtIntConCommon.IconValue();
- gtOp.gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
- return;
- }
- // Otherwise, exit the switch, treat as a contribution to the index.
- break;
-
- default:
- break;
+ case GT_LSH:
+ // If one op is a constant, continue parsing down.
+ if (gtOp.gtOp2->IsCnsIntOrI())
+ {
+ ssize_t subMul = 1 << gtOp.gtOp2->gtIntConCommon.IconValue();
+ gtOp.gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
+ return;
+ }
+ // Otherwise, exit the switch, treat as a contribution to the index.
+ break;
+
+ default:
+ break;
}
// If we didn't return above, must be a constribution to the non-constant part of the index VN.
- ValueNum vn = comp->GetValueNumStore()->VNNormVal(gtVNPair.GetLiberal()); // We don't care about exceptions for this purpose.
+ ValueNum vn = comp->GetValueNumStore()->VNNormVal(gtVNPair.GetLiberal()); // We don't care about exceptions for
+ // this purpose.
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
- vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), GetVNFuncForOper(GT_MUL, false), mulVN, vn);
+ vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), GetVNFuncForOper(GT_MUL, false), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
@@ -14632,30 +15717,32 @@ bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, Field
{
switch (OperGet())
{
- case GT_ADD:
+ case GT_ADD:
{
GenTreePtr arrAddr = nullptr;
- GenTreePtr offset = nullptr;
+ GenTreePtr offset = nullptr;
if (gtOp.gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = gtOp.gtOp1;
- offset = gtOp.gtOp2;
+ offset = gtOp.gtOp2;
}
else if (gtOp.gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = gtOp.gtOp2;
- offset = gtOp.gtOp1;
+ offset = gtOp.gtOp1;
}
else
{
return false;
}
- if (!offset->ParseOffsetForm(comp, pFldSeq)) return false;
+ if (!offset->ParseOffsetForm(comp, pFldSeq))
+ {
+ return false;
+ }
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
-
- case GT_ADDR:
+ case GT_ADDR:
{
GenTreePtr addrArg = gtOp.gtOp1;
if (addrArg->OperGet() != GT_IND)
@@ -14674,8 +15761,8 @@ bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, Field
}
}
- default:
- return false;
+ default:
+ return false;
}
}
@@ -14683,19 +15770,22 @@ bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
- case GT_CNS_INT:
+ case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
- *pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
+ *pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
- case GT_ADD:
- if (!gtOp.gtOp1->ParseOffsetForm(comp, pFldSeq)) return false;
- return gtOp.gtOp2->ParseOffsetForm(comp, pFldSeq);
+ case GT_ADD:
+ if (!gtOp.gtOp1->ParseOffsetForm(comp, pFldSeq))
+ {
+ return false;
+ }
+ return gtOp.gtOp2->ParseOffsetForm(comp, pFldSeq);
- default:
- return false;
+ default:
+ return false;
}
}
@@ -14703,70 +15793,78 @@ void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
- case GT_CNS_INT:
- // If we got here, this is a contribution to the constant part of the index.
- if (isConst)
- gtIntCon.gtFieldSeq = comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
- return;
+ case GT_CNS_INT:
+ // If we got here, this is a contribution to the constant part of the index.
+ if (isConst)
+ {
+ gtIntCon.gtFieldSeq =
+ comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
+ }
+ return;
- case GT_LCL_VAR:
- gtFlags |= GTF_VAR_ARR_INDEX;
- return;
+ case GT_LCL_VAR:
+ gtFlags |= GTF_VAR_ARR_INDEX;
+ return;
- case GT_ADD:
- case GT_SUB:
- gtOp.gtOp1->LabelIndex(comp, isConst);
- gtOp.gtOp2->LabelIndex(comp, isConst);
- break;
+ case GT_ADD:
+ case GT_SUB:
+ gtOp.gtOp1->LabelIndex(comp, isConst);
+ gtOp.gtOp2->LabelIndex(comp, isConst);
+ break;
- case GT_CAST:
- gtOp.gtOp1->LabelIndex(comp, isConst);
- break;
+ case GT_CAST:
+ gtOp.gtOp1->LabelIndex(comp, isConst);
+ break;
- case GT_ARR_LENGTH:
- gtFlags |= GTF_ARRLEN_ARR_IDX;
- return;
+ case GT_ARR_LENGTH:
+ gtFlags |= GTF_ARRLEN_ARR_IDX;
+ return;
- default:
- // For all other operators, peel off one constant; and then label the other if it's also a constant.
- if (OperIsArithmetic() || OperIsCompare())
- {
- if (gtOp.gtOp2->OperGet() == GT_CNS_INT)
+ default:
+ // For all other operators, peel off one constant; and then label the other if it's also a constant.
+ if (OperIsArithmetic() || OperIsCompare())
{
- gtOp.gtOp1->LabelIndex(comp, isConst);
- break;
- }
- else if (gtOp.gtOp1->OperGet() == GT_CNS_INT)
- {
- gtOp.gtOp2->LabelIndex(comp, isConst);
+ if (gtOp.gtOp2->OperGet() == GT_CNS_INT)
+ {
+ if (gtOp.gtOp2->OperGet() == GT_CNS_INT)
+ {
+ gtOp.gtOp1->LabelIndex(comp, isConst);
+ break;
+ }
+ else if (gtOp.gtOp1->OperGet() == GT_CNS_INT)
+ {
+ gtOp.gtOp2->LabelIndex(comp, isConst);
+ break;
+ }
+ // Otherwise continue downward on both, labeling vars.
+ gtOp.gtOp1->LabelIndex(comp, false);
+ gtOp.gtOp2->LabelIndex(comp, false);
+ }
break;
}
- // Otherwise continue downward on both, labeling vars.
- gtOp.gtOp1->LabelIndex(comp, false);
- gtOp.gtOp2->LabelIndex(comp, false);
- }
- break;
}
}
-// static
-FieldSeqNode FieldSeqStore::s_notAField(NULL, NULL); // Value doesn't matter; exists only to provide a distinguished address.
+// static
+FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr); // Value doesn't matter; exists only to provide a
+ // distinguished address.
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(IAllocator* alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
-{}
+{
+}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
{
- FieldSeqNode fsn(fieldHnd, NULL);
- FieldSeqNode* res = NULL;
+ FieldSeqNode fsn(fieldHnd, nullptr);
+ FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
- res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
+ res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
@@ -14775,34 +15873,41 @@ FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd)
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
- if (a == NULL)
+ if (a == nullptr)
+ {
return b;
- else if (a == NotAField())
+ }
+ else if (a == NotAField())
+ {
return NotAField();
- else if (b == NULL)
+ }
+ else if (b == nullptr)
+ {
return a;
- else if (b == NotAField())
+ }
+ else if (b == NotAField())
+ {
return NotAField();
- // Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
- // together collapse to one.
- else if ( a->m_next == nullptr
- && a->m_fieldHnd == ConstantIndexPseudoField
- && b->m_fieldHnd == ConstantIndexPseudoField)
+ // Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
+ // together collapse to one.
+ }
+ else if (a->m_next == nullptr && a->m_fieldHnd == ConstantIndexPseudoField &&
+ b->m_fieldHnd == ConstantIndexPseudoField)
{
return b;
}
else
{
FieldSeqNode* tmp = Append(a->m_next, b);
- FieldSeqNode fsn(a->m_fieldHnd, tmp);
- FieldSeqNode* res = NULL;
+ FieldSeqNode fsn(a->m_fieldHnd, tmp);
+ FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
- res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
+ res = reinterpret_cast<FieldSeqNode*>(m_alloc->Alloc(sizeof(FieldSeqNode)));
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
@@ -14814,8 +15919,10 @@ FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
-CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField = (CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
-CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField = (CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
+CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
+ (CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
+CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
+ (CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
bool FieldSeqNode::IsFirstElemFieldSeq()
{
@@ -14832,45 +15939,49 @@ bool FieldSeqNode::IsConstantIndexFieldSeq()
bool FieldSeqNode::IsPseudoField()
{
if (this == nullptr)
+ {
return false;
+ }
return m_fieldHnd == FieldSeqStore::FirstElemPseudoField || m_fieldHnd == FieldSeqStore::ConstantIndexPseudoField;
}
#ifdef FEATURE_SIMD
-GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
-{
- // TODO-CQ: An operand may be a GT_OBJ(GT_ADDR(GT_LCL_VAR))), in which case it should be
- // marked lvUsedInSIMDIntrinsic.
- assert(op1 != nullptr);
+GenTreeSIMD* Compiler::gtNewSIMDNode(
+ var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
+{
+ // TODO-CQ: An operand may be a GT_OBJ(GT_ADDR(GT_LCL_VAR))), in which case it should be
+ // marked lvUsedInSIMDIntrinsic.
+ assert(op1 != nullptr);
if (op1->OperGet() == GT_LCL_VAR)
{
- unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
lclVarDsc->lvUsedInSIMDIntrinsic = true;
}
-
+
return new (this, GT_SIMD) GenTreeSIMD(type, op1, simdIntrinsicID, baseType, size);
}
-GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type, GenTreePtr op1, GenTreePtr op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
+GenTreeSIMD* Compiler::gtNewSIMDNode(
+ var_types type, GenTreePtr op1, GenTreePtr op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
{
// TODO-CQ: An operand may be a GT_OBJ(GT_ADDR(GT_LCL_VAR))), in which case it should be
// marked lvUsedInSIMDIntrinsic.
assert(op1 != nullptr);
if (op1->OperIsLocal())
{
- unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
lclVarDsc->lvUsedInSIMDIntrinsic = true;
}
if (op2 != nullptr && op2->OperIsLocal())
{
- unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
lclVarDsc->lvUsedInSIMDIntrinsic = true;
}
-
+
return new (this, GT_SIMD) GenTreeSIMD(type, op1, op2, simdIntrinsicID, baseType, size);
}
@@ -14879,27 +15990,27 @@ bool GenTree::isCommutativeSIMDIntrinsic()
assert(gtOper == GT_SIMD);
switch (AsSIMD()->gtSIMDIntrinsicID)
{
- case SIMDIntrinsicAdd:
- case SIMDIntrinsicBitwiseAnd:
- case SIMDIntrinsicBitwiseOr:
- case SIMDIntrinsicBitwiseXor:
- case SIMDIntrinsicEqual:
- case SIMDIntrinsicMax:
- case SIMDIntrinsicMin:
- case SIMDIntrinsicMul:
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
- return true;
- default:
- return false;
+ case SIMDIntrinsicAdd:
+ case SIMDIntrinsicBitwiseAnd:
+ case SIMDIntrinsicBitwiseOr:
+ case SIMDIntrinsicBitwiseXor:
+ case SIMDIntrinsicEqual:
+ case SIMDIntrinsicMax:
+ case SIMDIntrinsicMin:
+ case SIMDIntrinsicMul:
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
+ return true;
+ default:
+ return false;
}
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
-//
+//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
@@ -14914,10 +16025,10 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
- unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
+ unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
- Compiler::structPassingKind howToReturnStruct;
- var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct, structSize);
+ Compiler::structPassingKind howToReturnStruct;
+ var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
@@ -14942,8 +16053,8 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
-
- unsigned hfaCount = (structSize / elemSize);
+
+ unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
@@ -14975,10 +16086,10 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
#elif defined(_TARGET_ARM64_)
// a non-HFA struct returned using two registers
- //
+ //
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
- BYTE gcPtrs[2] = { TYPE_GC_NONE, TYPE_GC_NONE };
+ BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
@@ -14991,15 +16102,14 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
-
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- break; // for case SPK_ByValue
+ break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
- // We are returning using the return buffer argument
+ // We are returning using the return buffer argument
// There are no return registers
break;
@@ -15007,7 +16117,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
- } // end of switch (howToReturnStruct)
+ } // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
@@ -15019,7 +16129,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
-//
+//
// Arguments
// comp - Compiler Instance
//
@@ -15036,7 +16146,7 @@ void ReturnTypeDesc::InitializeLongReturnType(Compiler* comp)
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
-#else // not _TARGET_X86_
+#else // not _TARGET_X86_
m_regType[0] = TYP_LONG;
@@ -15080,7 +16190,8 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
{
resultReg = REG_INTRET;
}
- else {
+ else
+ {
noway_assert(varTypeIsFloating(regType0));
resultReg = REG_FLOATRET;
}
@@ -15100,7 +16211,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
resultReg = REG_INTRET;
}
}
- else
+ else
{
noway_assert(varTypeIsFloating(regType1));
@@ -15131,13 +16242,13 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
- noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
- resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
+ noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
+ resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
- noway_assert(idx < 4); // Up to 4 return registers for HFA's
- resultReg = (regNumber)((unsigned)(REG_FLOATRET)+idx); // V0, V1, V2 or V3
+ noway_assert(idx < 4); // Up to 4 return registers for HFA's
+ resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
@@ -15151,7 +16262,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
//
// Arguments:
// None
-//
+//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
@@ -15166,7 +16277,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx)
// TODO-ARM64: Implement this routine to support HFA returns.
// TODO-X86: Implement this routine to support long returns.
//
-//static
+// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs()
{
regMaskTP resultMask = RBM_NONE;
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index a8baa9690d..bf114f4fc3 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -19,9 +19,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#define _GENTREE_H_
/*****************************************************************************/
-#include "vartype.h" // For "var_types"
-#include "target.h" // For "regNumber"
-#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
+#include "vartype.h" // For "var_types"
+#include "target.h" // For "regNumber"
+#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "reglist.h"
#include "valuenumtype.h"
#include "simplerhash.h"
@@ -35,9 +35,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// build flags correctly.
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
-#define DEBUGGABLE_GENTREE 1
+#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
-#define DEBUGGABLE_GENTREE 0
+#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
@@ -50,8 +50,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
// SCK_PAUSE_EXEC is not currently used.
-//
-enum SpecialCodeKind
+//
+enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
@@ -66,24 +66,24 @@ enum SpecialCodeKind
/*****************************************************************************/
-DECLARE_TYPED_ENUM(genTreeOps,BYTE)
+DECLARE_TYPED_ENUM(genTreeOps, BYTE)
{
- #define GTNODE(en,sn,cm,ok) GT_ ## en,
- #include "gtlist.h"
+#define GTNODE(en, sn, cm, ok) GT_##en,
+#include "gtlist.h"
GT_COUNT,
#ifdef _TARGET_64BIT_
- // GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
- // For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
- GT_CNS_NATIVELONG = GT_CNS_INT,
+ // GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
+ // For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
+ GT_CNS_NATIVELONG = GT_CNS_INT,
#else
- // For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
- // In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
- GT_CNS_NATIVELONG = GT_CNS_LNG,
+ // For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
+ // In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
+ GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
}
-END_DECLARE_TYPED_ENUM(genTreeOps,BYTE)
+END_DECLARE_TYPED_ENUM(genTreeOps, BYTE)
/*****************************************************************************
*
@@ -98,52 +98,51 @@ END_DECLARE_TYPED_ENUM(genTreeOps,BYTE)
enum genTreeKinds
{
- GTK_SPECIAL = 0x0000, // unclassified operator (special handling reqd)
+ GTK_SPECIAL = 0x0000, // unclassified operator (special handling reqd)
- GTK_CONST = 0x0001, // constant operator
- GTK_LEAF = 0x0002, // leaf operator
- GTK_UNOP = 0x0004, // unary operator
- GTK_BINOP = 0x0008, // binary operator
- GTK_RELOP = 0x0010, // comparison operator
- GTK_LOGOP = 0x0020, // logical operator
- GTK_ASGOP = 0x0040, // assignment operator
+ GTK_CONST = 0x0001, // constant operator
+ GTK_LEAF = 0x0002, // leaf operator
+ GTK_UNOP = 0x0004, // unary operator
+ GTK_BINOP = 0x0008, // binary operator
+ GTK_RELOP = 0x0010, // comparison operator
+ GTK_LOGOP = 0x0020, // logical operator
+ GTK_ASGOP = 0x0040, // assignment operator
- GTK_KINDMASK= 0x007F, // operator kind mask
+ GTK_KINDMASK = 0x007F, // operator kind mask
- GTK_COMMUTE = 0x0080, // commutative operator
+ GTK_COMMUTE = 0x0080, // commutative operator
- GTK_EXOP = 0x0100, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
- // by adding non-node fields to unary or binary operator.
+ GTK_EXOP = 0x0100, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
+ // by adding non-node fields to unary or binary operator.
- GTK_LOCAL = 0x0200, // is a local access (load, store, phi)
+ GTK_LOCAL = 0x0200, // is a local access (load, store, phi)
/* Define composite value(s) */
- GTK_SMPOP = (GTK_UNOP|GTK_BINOP|GTK_RELOP|GTK_LOGOP)
+ GTK_SMPOP = (GTK_UNOP | GTK_BINOP | GTK_RELOP | GTK_LOGOP)
};
/*****************************************************************************/
-#define SMALL_TREE_NODES 1
+#define SMALL_TREE_NODES 1
/*****************************************************************************/
-DECLARE_TYPED_ENUM(gtCallTypes,BYTE)
+DECLARE_TYPED_ENUM(gtCallTypes, BYTE)
{
- CT_USER_FUNC, // User function
- CT_HELPER, // Jit-helper
- CT_INDIRECT, // Indirect call
+ CT_USER_FUNC, // User function
+ CT_HELPER, // Jit-helper
+ CT_INDIRECT, // Indirect call
- CT_COUNT // fake entry (must be last)
+ CT_COUNT // fake entry (must be last)
}
-END_DECLARE_TYPED_ENUM(gtCallTypes,BYTE)
-
+END_DECLARE_TYPED_ENUM(gtCallTypes, BYTE)
/*****************************************************************************/
-struct BasicBlock;
-
-struct InlineCandidateInfo;
+struct BasicBlock;
+
+struct InlineCandidateInfo;
/*****************************************************************************/
@@ -158,7 +157,9 @@ struct FieldSeqNode
CORINFO_FIELD_HANDLE m_fieldHnd;
FieldSeqNode* m_next;
- FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next) {}
+ FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next) : m_fieldHnd(fieldHnd), m_next(next)
+ {
+ }
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq();
@@ -172,7 +173,8 @@ struct FieldSeqNode
// Make sure this provides methods that allow it to be used as a KeyFuncs type in SimplerHash.
static int GetHashCode(FieldSeqNode fsn)
{
- return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
+ return static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_fieldHnd)) ^
+ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(FieldSeqNode fsn1, FieldSeqNode fsn2)
@@ -184,12 +186,13 @@ struct FieldSeqNode
// This class canonicalizes field sequences.
class FieldSeqStore
{
- typedef SimplerHashTable<FieldSeqNode, /*KeyFuncs*/FieldSeqNode, FieldSeqNode*, JitSimplerHashBehavior> FieldSeqNodeCanonMap;
+ typedef SimplerHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*, JitSimplerHashBehavior>
+ FieldSeqNodeCanonMap;
IAllocator* m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
- static FieldSeqNode s_notAField; // No value, just exists to provide an address.
+ static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
@@ -204,7 +207,10 @@ public:
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
- static FieldSeqNode* NotAField() { return &s_notAField; }
+ static FieldSeqNode* NotAField()
+ {
+ return &s_notAField;
+ }
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
@@ -233,14 +239,14 @@ class GenTreeOperandIterator;
/*****************************************************************************/
-typedef struct GenTree * GenTreePtr;
+typedef struct GenTree* GenTreePtr;
struct GenTreeArgList;
// Forward declarations of the subtypes
-#define GTSTRUCT_0(fn, en) struct GenTree##fn;
-#define GTSTRUCT_1(fn, en) struct GenTree##fn;
-#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
-#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
+#define GTSTRUCT_0(fn, en) struct GenTree##fn;
+#define GTSTRUCT_1(fn, en) struct GenTree##fn;
+#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
+#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#include "gtstructs.h"
@@ -253,118 +259,177 @@ struct GenTreeArgList;
struct GenTree
{
- // We use GT_STRUCT_0 only for the category of simple ops.
-#define GTSTRUCT_0(fn, en) GenTree##fn* As##fn() \
- { \
- assert(this->OperIsSimple()); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
-#define GTSTRUCT_1(fn, en) GenTree##fn* As##fn() \
- { \
- assert(this->gtOper == en); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
-#define GTSTRUCT_2(fn, en, en2) GenTree##fn* As##fn() \
- { \
- assert(this->gtOper == en || this->gtOper == en2); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
-#define GTSTRUCT_3(fn, en, en2, en3) GenTree##fn* As##fn() \
- { \
- assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
-
-#define GTSTRUCT_4(fn, en, en2, en3, en4) GenTree##fn* As##fn() \
- { \
- assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3 || this->gtOper == en4); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+// We use GT_STRUCT_0 only for the category of simple ops.
+#define GTSTRUCT_0(fn, en) \
+ GenTree##fn* As##fn() \
+ { \
+ assert(this->OperIsSimple()); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_1(fn, en) \
+ GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_2(fn, en, en2) \
+ GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_3(fn, en, en2, en3) \
+ GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
+
+#define GTSTRUCT_4(fn, en, en2, en3, en4) \
+ GenTree##fn* As##fn() \
+ { \
+ assert(this->gtOper == en || this->gtOper == en2 || this->gtOper == en3 || this->gtOper == en4); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
#ifdef DEBUG
// VC does not optimize out this loop in retail even though the value it computes is unused
// so we need a separate version for non-debug
-#define GTSTRUCT_N(fn, ...) GenTree##fn* As##fn() \
- { \
- genTreeOps validOps[] = {__VA_ARGS__}; \
- bool found = false; \
- for (unsigned i=0; i<ArrLen(validOps); i++) { \
- if (this->gtOper == validOps[i]) \
- { \
- found = true; \
- break; \
- } \
- } \
- assert(found); \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_N(fn, ...) \
+ GenTree##fn* As##fn() \
+ { \
+ genTreeOps validOps[] = {__VA_ARGS__}; \
+ bool found = false; \
+ for (unsigned i = 0; i < ArrLen(validOps); i++) \
+ { \
+ if (this->gtOper == validOps[i]) \
+ { \
+ found = true; \
+ break; \
+ } \
+ } \
+ assert(found); \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
#else
-#define GTSTRUCT_N(fn, ...) GenTree##fn* As##fn() \
- { \
- return reinterpret_cast<GenTree##fn*>(this); \
- } \
- GenTree##fn& As##fn##Ref() { return *As##fn(); } \
- __declspec(property(get=As##fn##Ref)) GenTree##fn& gt##fn;
+#define GTSTRUCT_N(fn, ...) \
+ GenTree##fn* As##fn() \
+ { \
+ return reinterpret_cast<GenTree##fn*>(this); \
+ } \
+ GenTree##fn& As##fn##Ref() \
+ { \
+ return *As##fn(); \
+ } \
+ __declspec(property(get = As##fn##Ref)) GenTree##fn& gt##fn;
#endif
#include "gtstructs.h"
- genTreeOps gtOper; // enum subtype BYTE
- var_types gtType; // enum subtype BYTE
+ genTreeOps gtOper; // enum subtype BYTE
+ var_types gtType; // enum subtype BYTE
- genTreeOps OperGet() const { return gtOper; }
- var_types TypeGet() const { return gtType; }
+ genTreeOps OperGet() const
+ {
+ return gtOper;
+ }
+ var_types TypeGet() const
+ {
+ return gtType;
+ }
#ifdef DEBUG
- genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
+ genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#if FEATURE_ANYCSE
-#define NO_CSE (0)
+#define NO_CSE (0)
-#define IS_CSE_INDEX(x) (x != 0)
-#define IS_CSE_USE(x) (x > 0)
-#define IS_CSE_DEF(x) (x < 0)
+#define IS_CSE_INDEX(x) (x != 0)
+#define IS_CSE_USE(x) (x > 0)
+#define IS_CSE_DEF(x) (x < 0)
#define GET_CSE_INDEX(x) ((x > 0) ? x : -x)
-#define TO_CSE_DEF(x) (-x)
+#define TO_CSE_DEF(x) (-x)
- signed char gtCSEnum; // 0 or the CSE index (negated if def)
- // valid only for CSE expressions
+ signed char gtCSEnum; // 0 or the CSE index (negated if def)
+ // valid only for CSE expressions
#endif // FEATURE_ANYCSE
#if ASSERTION_PROP
- unsigned short gtAssertionNum; // 0 or Assertion table index
- // valid only for non-GT_STMT nodes
+ unsigned short gtAssertionNum; // 0 or Assertion table index
+ // valid only for non-GT_STMT nodes
- bool HasAssertion() const { return gtAssertionNum != 0; }
- void ClearAssertion() { gtAssertionNum = 0; }
+ bool HasAssertion() const
+ {
+ return gtAssertionNum != 0;
+ }
+ void ClearAssertion()
+ {
+ gtAssertionNum = 0;
+ }
- unsigned short GetAssertion() const { return gtAssertionNum; }
- void SetAssertion(unsigned short value) { assert((unsigned short)value == value); gtAssertionNum = (unsigned short)value; }
+ unsigned short GetAssertion() const
+ {
+ return gtAssertionNum;
+ }
+ void SetAssertion(unsigned short value)
+ {
+ assert((unsigned short)value == value);
+ gtAssertionNum = (unsigned short)value;
+ }
#endif
#if FEATURE_STACK_FP_X87
- unsigned char gtFPlvl; // x87 stack depth at this node
- void gtCopyFPlvl(GenTree * other) { gtFPlvl = other->gtFPlvl; }
- void gtSetFPlvl(unsigned level) { noway_assert(FitsIn<unsigned char>(level)); gtFPlvl = (unsigned char)level; }
-#else // FEATURE_STACK_FP_X87
- void gtCopyFPlvl(GenTree * other) { }
- void gtSetFPlvl(unsigned level) { }
+ unsigned char gtFPlvl; // x87 stack depth at this node
+ void gtCopyFPlvl(GenTree* other)
+ {
+ gtFPlvl = other->gtFPlvl;
+ }
+ void gtSetFPlvl(unsigned level)
+ {
+ noway_assert(FitsIn<unsigned char>(level));
+ gtFPlvl = (unsigned char)level;
+ }
+#else // FEATURE_STACK_FP_X87
+ void gtCopyFPlvl(GenTree* other)
+ {
+ }
+ void gtSetFPlvl(unsigned level)
+ {
+ }
#endif // FEATURE_STACK_FP_X87
//
@@ -372,7 +437,6 @@ struct GenTree
//
public:
-
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
@@ -382,23 +446,29 @@ public:
bool gtCostsInitialized;
#endif // DEBUG
-#define MAX_COST UCHAR_MAX
-#define IND_COST_EX 3 // execution cost for an indirection
+#define MAX_COST UCHAR_MAX
+#define IND_COST_EX 3 // execution cost for an indirection
- __declspec(property(get=GetCostEx))
- unsigned char gtCostEx; // estimate of expression execution cost
+ __declspec(property(get = GetCostEx)) unsigned char gtCostEx; // estimate of expression execution cost
- __declspec(property(get=GetCostSz))
- unsigned char gtCostSz; // estimate of expression code size cost
+ __declspec(property(get = GetCostSz)) unsigned char gtCostSz; // estimate of expression code size cost
- unsigned char GetCostEx() const { assert(gtCostsInitialized); return _gtCostEx; }
- unsigned char GetCostSz() const { assert(gtCostsInitialized); return _gtCostSz; }
+ unsigned char GetCostEx() const
+ {
+ assert(gtCostsInitialized);
+ return _gtCostEx;
+ }
+ unsigned char GetCostSz() const
+ {
+ assert(gtCostsInitialized);
+ return _gtCostSz;
+ }
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
- void SetCosts(unsigned costEx, unsigned costSz)
+ void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
@@ -408,10 +478,12 @@ public:
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
- // Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is happening.
- void CopyCosts(const GenTree* const tree)
+ // Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
+ // happening.
+ void CopyCosts(const GenTree* const tree)
{
- INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;) // If the 'tree' costs aren't initialized, we'll hit an assert below.
+ INDEBUG(gtCostsInitialized =
+ tree->gtCostsInitialized;) // If the 'tree' costs aren't initialized, we'll hit an assert below.
_gtCostEx = tree->gtCostEx;
_gtCostSz = tree->gtCostSz;
}
@@ -420,7 +492,7 @@ public:
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
- void CopyRawCosts(const GenTree* const tree)
+ void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
@@ -428,9 +500,8 @@ public:
}
private:
-
- unsigned char _gtCostEx; // estimate of expression execution cost
- unsigned char _gtCostSz; // estimate of expression code size cost
+ unsigned char _gtCostEx; // estimate of expression execution cost
+ unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
@@ -441,10 +512,10 @@ private:
public:
enum genRegTag
{
- GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum/_gtRegPair
- GT_REGTAG_REG, // _gtRegNum has been assigned
+ GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum/_gtRegPair
+ GT_REGTAG_REG, // _gtRegNum has been assigned
#if CPU_LONG_USES_REGPAIR
- GT_REGTAG_REGPAIR // _gtRegPair has been assigned
+ GT_REGTAG_REGPAIR // _gtRegPair has been assigned
#endif
};
genRegTag GetRegTag() const
@@ -456,28 +527,25 @@ public:
#endif
return gtRegTag;
}
-private:
- genRegTag gtRegTag; // What is in _gtRegNum/_gtRegPair?
-#endif // DEBUG
private:
+ genRegTag gtRegTag; // What is in _gtRegNum/_gtRegPair?
+#endif // DEBUG
- union
- {
+private:
+ union {
// NOTE: After LSRA, one of these values may be valid even if GTF_REG_VAL is not set in gtFlags.
// They store the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA
// or _gtRegPair is set to REG_PAIR_NONE, depending on the node type.
- regNumberSmall _gtRegNum; // which register the value is in
- regPairNoSmall _gtRegPair; // which register pair the value is in
+ regNumberSmall _gtRegNum; // which register the value is in
+ regPairNoSmall _gtRegPair; // which register pair the value is in
};
public:
-
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
- __declspec(property(get=GetRegNum,put=SetRegNum))
- regNumber gtRegNum;
+ __declspec(property(get = GetRegNum, put = SetRegNum)) regNumber gtRegNum;
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
@@ -488,71 +556,88 @@ public:
bool isIndir() const;
- bool isContainedIntOrIImmed() const { return isContained() && IsCnsIntOrI(); }
+ bool isContainedIntOrIImmed() const
+ {
+ return isContained() && IsCnsIntOrI();
+ }
- bool isContainedFltOrDblImmed() const { return isContained() && (OperGet() == GT_CNS_DBL); }
+ bool isContainedFltOrDblImmed() const
+ {
+ return isContained() && (OperGet() == GT_CNS_DBL);
+ }
- bool isLclField() const { return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD; }
+ bool isLclField() const
+ {
+ return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
+ }
- bool isContainedLclField() const { return isContained() && isLclField(); }
+ bool isContainedLclField() const
+ {
+ return isContained() && isLclField();
+ }
- bool isContainedLclVar() const { return isContained() && (OperGet() == GT_LCL_VAR); }
+ bool isContainedLclVar() const
+ {
+ return isContained() && (OperGet() == GT_LCL_VAR);
+ }
bool isContainedSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
- bool isMemoryOp() const { return isIndir() || isLclField(); }
+ bool isMemoryOp() const
+ {
+ return isIndir() || isLclField();
+ }
- bool isContainedMemoryOp() const
- {
- return (isContained() && isMemoryOp()) || isContainedLclVar() || isContainedSpillTemp();
+ bool isContainedMemoryOp() const
+ {
+ return (isContained() && isMemoryOp()) || isContainedLclVar() || isContainedSpillTemp();
}
regNumber GetRegNum() const
{
- assert((gtRegTag == GT_REGTAG_REG) ||
- (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
- regNumber reg = (regNumber) _gtRegNum;
- assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
- (reg >= REG_FIRST &&
- reg <= REG_COUNT));
+ assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
+ // and fix everyplace that reads undefined
+ // values
+ regNumber reg = (regNumber)_gtRegNum;
+ assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
+ // undefined values
+ (reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
- assert(reg >= REG_FIRST &&
- reg <= REG_COUNT);
+ assert(reg >= REG_FIRST && reg <= REG_COUNT);
// Make sure the upper bits of _gtRegPair are clear
- _gtRegPair = (regPairNoSmall) 0;
- _gtRegNum = (regNumberSmall) reg;
+ _gtRegPair = (regPairNoSmall)0;
+ _gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
#if CPU_LONG_USES_REGPAIR
- __declspec(property(get=GetRegPair,put=SetRegPair))
- regPairNo gtRegPair;
+ __declspec(property(get = GetRegPair, put = SetRegPair)) regPairNo gtRegPair;
regPairNo GetRegPair() const
{
- assert((gtRegTag == GT_REGTAG_REGPAIR) ||
- (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
- regPairNo regPair = (regPairNo) _gtRegPair;
- assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads undefined values
- (regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST) ||
- (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
+ assert((gtRegTag == GT_REGTAG_REGPAIR) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE
+ // case, and fix everyplace that reads
+ // undefined values
+ regPairNo regPair = (regPairNo)_gtRegPair;
+ assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
+ // undefined values
+ (regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST) ||
+ (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
return regPair;
}
void SetRegPair(regPairNo regPair)
{
- assert((regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST) ||
- (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
- _gtRegPair = (regPairNoSmall) regPair;
+ assert((regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST) ||
+ (regPair == REG_PAIR_NONE)); // allow initializing to an undefined value
+ _gtRegPair = (regPairNoSmall)regPair;
INDEBUG(gtRegTag = GT_REGTAG_REGPAIR;)
assert(_gtRegPair == regPair);
}
@@ -567,29 +652,29 @@ public:
regMaskTP gtGetRegMask() const;
- unsigned gtFlags; // see GTF_xxxx below
+ unsigned gtFlags; // see GTF_xxxx below
#if defined(DEBUG)
- unsigned gtDebugFlags; // see GTF_DEBUG_xxx below
-#endif // defined(DEBUG)
+ unsigned gtDebugFlags; // see GTF_DEBUG_xxx below
+#endif // defined(DEBUG)
- ValueNumPair gtVNPair;
+ ValueNumPair gtVNPair;
- regMaskSmall gtRsvdRegs; // set of fixed trashed registers
+ regMaskSmall gtRsvdRegs; // set of fixed trashed registers
#ifdef LEGACY_BACKEND
- regMaskSmall gtUsedRegs; // set of used (trashed) registers
-#endif // LEGACY_BACKEND
+ regMaskSmall gtUsedRegs; // set of used (trashed) registers
+#endif // LEGACY_BACKEND
#ifndef LEGACY_BACKEND
- TreeNodeInfo gtLsraInfo;
+ TreeNodeInfo gtLsraInfo;
#endif // !LEGACY_BACKEND
- void SetVNsFromNode(GenTreePtr tree)
+ void SetVNsFromNode(GenTreePtr tree)
{
gtVNPair = tree->gtVNPair;
}
- ValueNum GetVN(ValueNumKind vnk) const
+ ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
@@ -601,7 +686,7 @@ public:
return gtVNPair.GetConservative();
}
}
- void SetVN(ValueNumKind vnk, ValueNum vn)
+ void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
@@ -613,289 +698,290 @@ public:
return gtVNPair.SetConservative(vn);
}
}
- void SetVNs(ValueNumPair vnp)
+ void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
- void ClearVN()
+ void ClearVN()
{
- gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
+ gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
- //---------------------------------------------------------------------
- // The first set of flags can be used with a large set of nodes, and
- // thus they must all have distinct values. That is, one can test any
- // expression node for one of these flags.
- //---------------------------------------------------------------------
+//---------------------------------------------------------------------
+// The first set of flags can be used with a large set of nodes, and
+// thus they must all have distinct values. That is, one can test any
+// expression node for one of these flags.
+//---------------------------------------------------------------------
- #define GTF_ASG 0x00000001 // sub-expression contains an assignment
- #define GTF_CALL 0x00000002 // sub-expression contains a func. call
- #define GTF_EXCEPT 0x00000004 // sub-expression might throw an exception
- #define GTF_GLOB_REF 0x00000008 // sub-expression uses global variable(s)
- #define GTF_ORDER_SIDEEFF 0x00000010 // sub-expression has a re-ordering side effect
-
- // If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
- // otherwise the C# (run csc /o-)
- // var v = side_eff_operation
- // with no use of v will drop your tree on the floor.
- #define GTF_PERSISTENT_SIDE_EFFECTS (GTF_ASG|GTF_CALL)
- #define GTF_SIDE_EFFECT (GTF_PERSISTENT_SIDE_EFFECTS|GTF_EXCEPT)
- #define GTF_GLOB_EFFECT (GTF_SIDE_EFFECT|GTF_GLOB_REF)
- #define GTF_ALL_EFFECT (GTF_GLOB_EFFECT|GTF_ORDER_SIDEEFF)
-
- // The extra flag GTF_IS_IN_CSE is used to tell the consumer of these flags
- // that we are calling in the context of performing a CSE, thus we
- // should allow the run-once side effects of running a class constructor.
- //
- // The only requirement of this flag is that it not overlap any of the
- // side-effect flags. The actual bit used is otherwise arbitrary.
- #define GTF_IS_IN_CSE GTF_MAKE_CSE
- #define GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE (GTF_ASG|GTF_CALL|GTF_IS_IN_CSE)
-
- // Can any side-effects be observed externally, say by a caller method?
- // For assignments, only assignments to global memory can be observed
- // externally, whereas simple assignments to local variables can not.
- //
- // Be careful when using this inside a "try" protected region as the
- // order of assignments to local variables would need to be preserved
- // wrt side effects if the variables are alive on entry to the
- // "catch/finally" region. In such cases, even assignments to locals
- // will have to be restricted.
- #define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
- (((flags) & (GTF_CALL|GTF_EXCEPT)) || \
- (((flags) & (GTF_ASG|GTF_GLOB_REF)) == (GTF_ASG|GTF_GLOB_REF)))
-
- #define GTF_REVERSE_OPS 0x00000020 // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
- #define GTF_REG_VAL 0x00000040 // operand is sitting in a register (or part of a TYP_LONG operand is sitting in a register)
-
- #define GTF_SPILLED 0x00000080 // the value has been spilled
+#define GTF_ASG 0x00000001 // sub-expression contains an assignment
+#define GTF_CALL 0x00000002 // sub-expression contains a func. call
+#define GTF_EXCEPT 0x00000004 // sub-expression might throw an exception
+#define GTF_GLOB_REF 0x00000008 // sub-expression uses global variable(s)
+#define GTF_ORDER_SIDEEFF 0x00000010 // sub-expression has a re-ordering side effect
+
+// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
+// otherwise the C# (run csc /o-)
+// var v = side_eff_operation
+// with no use of v will drop your tree on the floor.
+#define GTF_PERSISTENT_SIDE_EFFECTS (GTF_ASG | GTF_CALL)
+#define GTF_SIDE_EFFECT (GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT)
+#define GTF_GLOB_EFFECT (GTF_SIDE_EFFECT | GTF_GLOB_REF)
+#define GTF_ALL_EFFECT (GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF)
+
+// The extra flag GTF_IS_IN_CSE is used to tell the consumer of these flags
+// that we are calling in the context of performing a CSE, thus we
+// should allow the run-once side effects of running a class constructor.
+//
+// The only requirement of this flag is that it not overlap any of the
+// side-effect flags. The actual bit used is otherwise arbitrary.
+#define GTF_IS_IN_CSE GTF_MAKE_CSE
+#define GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE (GTF_ASG | GTF_CALL | GTF_IS_IN_CSE)
+
+// Can any side-effects be observed externally, say by a caller method?
+// For assignments, only assignments to global memory can be observed
+// externally, whereas simple assignments to local variables can not.
+//
+// Be careful when using this inside a "try" protected region as the
+// order of assignments to local variables would need to be preserved
+// wrt side effects if the variables are alive on entry to the
+// "catch/finally" region. In such cases, even assignments to locals
+// will have to be restricted.
+#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
+ (((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
+
+#define GTF_REVERSE_OPS \
+ 0x00000020 // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated
+ // second)
+#define GTF_REG_VAL \
+ 0x00000040 // operand is sitting in a register (or part of a TYP_LONG operand is sitting in a register)
+
+#define GTF_SPILLED 0x00000080 // the value has been spilled
#ifdef LEGACY_BACKEND
- #define GTF_SPILLED_OPER 0x00000100 // op1 has been spilled
- #define GTF_SPILLED_OP2 0x00000200 // op2 has been spilled
+#define GTF_SPILLED_OPER 0x00000100 // op1 has been spilled
+#define GTF_SPILLED_OP2 0x00000200 // op2 has been spilled
#else
- #define GTF_NOREG_AT_USE 0x00000100 // tree node is in memory at the point of use
-#endif // LEGACY_BACKEND
+#define GTF_NOREG_AT_USE 0x00000100 // tree node is in memory at the point of use
+#endif // LEGACY_BACKEND
- #define GTF_ZSF_SET 0x00000400 // the zero(ZF) and sign(SF) flags set to the operand
+#define GTF_ZSF_SET 0x00000400 // the zero(ZF) and sign(SF) flags set to the operand
#if FEATURE_SET_FLAGS
- #define GTF_SET_FLAGS 0x00000800 // Requires that codegen for this node set the flags
- // Use gtSetFlags() to check this flags
+#define GTF_SET_FLAGS 0x00000800 // Requires that codegen for this node set the flags
+ // Use gtSetFlags() to check this flags
#endif
- #define GTF_IND_NONFAULTING 0x00000800 // An indir that cannot fault. GTF_SET_FLAGS is not used on indirs
-
- #define GTF_MAKE_CSE 0x00002000 // Hoisted Expression: try hard to make this into CSE (see optPerformHoistExpr)
- #define GTF_DONT_CSE 0x00004000 // don't bother CSE'ing this expr
- #define GTF_COLON_COND 0x00008000 // this node is conditionally executed (part of ? :)
-
- #define GTF_NODE_MASK (GTF_COLON_COND)
-
- #define GTF_BOOLEAN 0x00040000 // value is known to be 0/1
-
- #define GTF_SMALL_OK 0x00080000 // actual small int sufficient
-
- #define GTF_UNSIGNED 0x00100000 // with GT_CAST: the source operand is an unsigned type
- // with operators: the specified node is an unsigned operator
-
- #define GTF_LATE_ARG 0x00200000 // the specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
-
- #define GTF_SPILL 0x00400000 // needs to be spilled here
- #define GTF_SPILL_HIGH 0x00040000 // shared with GTF_BOOLEAN
-
- #define GTF_COMMON_MASK 0x007FFFFF // mask of all the flags above
-
- #define GTF_REUSE_REG_VAL 0x00800000 // This is set by the register allocator on nodes whose value already exists in the
- // register assigned to this node, so the code generator does not have to generate
- // code to produce the value.
- // It is currently used only on constant nodes.
- // It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
- // it is not needed for lclVars and is highly unlikely to be useful for indir nodes
-
- //---------------------------------------------------------------------
- // The following flags can be used only with a small set of nodes, and
- // thus their values need not be distinct (other than within the set
- // that goes with a particular node/nodes, of course). That is, one can
- // only test for one of these flags if the 'gtOper' value is tested as
- // well to make sure it's the right operator for the particular flag.
- //---------------------------------------------------------------------
-
- // NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags, because
- // GT_LCL_VAR nodes may be changed to GT_REG_VAR nodes without resetting
- // the flags. These are also used by GT_LCL_FLD.
- #define GTF_VAR_DEF 0x80000000 // GT_LCL_VAR -- this is a definition
- #define GTF_VAR_USEASG 0x40000000 // GT_LCL_VAR -- this is a use/def for a x<op>=y
- #define GTF_VAR_USEDEF 0x20000000 // GT_LCL_VAR -- this is a use/def as in x=x+y (only the lhs x is tagged)
- #define GTF_VAR_CAST 0x10000000 // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
- #define GTF_VAR_ITERATOR 0x08000000 // GT_LCL_VAR -- this is a iterator reference in the loop condition
- #define GTF_VAR_CLONED 0x01000000 // GT_LCL_VAR -- this node has been cloned or is a clone
- // Relevant for inlining optimizations (see fgInlinePrependStatements)
-
- // TODO-Cleanup: Currently, GTF_REG_BIRTH is used only by stackfp
- // We should consider using it more generally for VAR_BIRTH, instead of
- // GTF_VAR_DEF && !GTF_VAR_USEASG
- #define GTF_REG_BIRTH 0x04000000 // GT_REG_VAR -- enregistered variable born here
- #define GTF_VAR_DEATH 0x02000000 // GT_LCL_VAR, GT_REG_VAR -- variable dies here (last use)
-
- #define GTF_VAR_ARR_INDEX 0x00000020 // The variable is part of (the index portion of) an array index expression.
- // Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
-
- #define GTF_LIVENESS_MASK (GTF_VAR_DEF|GTF_VAR_USEASG|GTF_VAR_USEDEF|GTF_REG_BIRTH|GTF_VAR_DEATH)
-
- #define GTF_CALL_UNMANAGED 0x80000000 // GT_CALL -- direct call to unmanaged code
- #define GTF_CALL_INLINE_CANDIDATE 0x40000000 // GT_CALL -- this call has been marked as an inline candidate
-
- #define GTF_CALL_VIRT_KIND_MASK 0x30000000
- #define GTF_CALL_NONVIRT 0x00000000 // GT_CALL -- a non virtual call
- #define GTF_CALL_VIRT_STUB 0x10000000 // GT_CALL -- a stub-dispatch virtual call
- #define GTF_CALL_VIRT_VTABLE 0x20000000 // GT_CALL -- a vtable-based virtual call
-
- #define GTF_CALL_NULLCHECK 0x08000000 // GT_CALL -- must check instance pointer for null
- #define GTF_CALL_POP_ARGS 0x04000000 // GT_CALL -- caller pop arguments?
- #define GTF_CALL_HOISTABLE 0x02000000 // GT_CALL -- call is hoistable
- #define GTF_CALL_REG_SAVE 0x01000000 // GT_CALL -- This call preserves all integer regs
- // For additional flags for GT_CALL node see GTF_CALL_M_
-
- #define GTF_NOP_DEATH 0x40000000 // GT_NOP -- operand dies here
-
- #define GTF_FLD_NULLCHECK 0x80000000 // GT_FIELD -- need to nullcheck the "this" pointer
- #define GTF_FLD_VOLATILE 0x40000000 // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
-
- #define GTF_INX_RNGCHK 0x80000000 // GT_INDEX -- the array reference should be range-checked.
- #define GTF_INX_REFARR_LAYOUT 0x20000000 // GT_INDEX -- same as GTF_IND_REFARR_LAYOUT
- #define GTF_INX_STRING_LAYOUT 0x40000000 // GT_INDEX -- this uses the special string array layout
-
- #define GTF_IND_VOLATILE 0x40000000 // GT_IND -- the load or store must use volatile sematics (this is a nop
- // on X86)
- #define GTF_IND_REFARR_LAYOUT 0x20000000 // GT_IND -- the array holds object refs (only effects layout of Arrays)
- #define GTF_IND_TGTANYWHERE 0x10000000 // GT_IND -- the target could be anywhere
- #define GTF_IND_TLS_REF 0x08000000 // GT_IND -- the target is accessed via TLS
- #define GTF_IND_ASG_LHS 0x04000000 // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
- // assignment; don't evaluate it independently.
- #define GTF_IND_UNALIGNED 0x02000000 // GT_IND -- the load or store is unaligned (we assume worst case
- // alignment of 1 byte)
- #define GTF_IND_INVARIANT 0x01000000 // GT_IND -- the target is invariant (a prejit indirection)
- #define GTF_IND_ARR_LEN 0x80000000 // GT_IND -- the indirection represents an array length (of the REF
- // contribution to its argument).
- #define GTF_IND_ARR_INDEX 0x00800000 // GT_IND -- the indirection represents an (SZ) array index
-
- #define GTF_IND_FLAGS (GTF_IND_VOLATILE|GTF_IND_REFARR_LAYOUT|GTF_IND_TGTANYWHERE|GTF_IND_NONFAULTING|\
- GTF_IND_TLS_REF|GTF_IND_UNALIGNED|GTF_IND_INVARIANT|GTF_IND_ARR_INDEX)
-
- #define GTF_CLS_VAR_ASG_LHS 0x04000000 // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
- // of an assignment; don't evaluate it independently.
-
- #define GTF_ADDR_ONSTACK 0x80000000 // GT_ADDR -- this expression is guaranteed to be on the stack
-
-
- #define GTF_ADDRMODE_NO_CSE 0x80000000 // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
- // addressing mode
-
- #define GTF_MUL_64RSLT 0x40000000 // GT_MUL -- produce 64-bit result
-
- #define GTF_MOD_INT_RESULT 0x80000000 // GT_MOD, -- the real tree represented by this
- // GT_UMOD node evaluates to an int even though
- // its type is long. The result is
- // placed in the low member of the
- // reg pair
-
- #define GTF_RELOP_NAN_UN 0x80000000 // GT_<relop> -- Is branch taken if ops are NaN?
- #define GTF_RELOP_JMP_USED 0x40000000 // GT_<relop> -- result of compare used for jump or ?:
- #define GTF_RELOP_QMARK 0x20000000 // GT_<relop> -- the node is the condition for ?:
- #define GTF_RELOP_SMALL 0x10000000 // GT_<relop> -- We should use a byte or short sized compare (op1->gtType
- // is the small type)
- #define GTF_RELOP_ZTT 0x08000000 // GT_<relop> -- Loop test cloned for converting while-loops into do-while
- // with explicit "loop test" in the header block.
-
- #define GTF_QMARK_CAST_INSTOF 0x80000000 // GT_QMARK -- Is this a top (not nested) level qmark created for
- // castclass or instanceof?
-
- #define GTF_BOX_VALUE 0x80000000 // GT_BOX -- "box" is on a value type
-
- #define GTF_ICON_HDL_MASK 0xF0000000 // Bits used by handle types below
-
- #define GTF_ICON_SCOPE_HDL 0x10000000 // GT_CNS_INT -- constant is a scope handle
- #define GTF_ICON_CLASS_HDL 0x20000000 // GT_CNS_INT -- constant is a class handle
- #define GTF_ICON_METHOD_HDL 0x30000000 // GT_CNS_INT -- constant is a method handle
- #define GTF_ICON_FIELD_HDL 0x40000000 // GT_CNS_INT -- constant is a field handle
- #define GTF_ICON_STATIC_HDL 0x50000000 // GT_CNS_INT -- constant is a handle to static data
- #define GTF_ICON_STR_HDL 0x60000000 // GT_CNS_INT -- constant is a string handle
- #define GTF_ICON_PSTR_HDL 0x70000000 // GT_CNS_INT -- constant is a ptr to a string handle
- #define GTF_ICON_PTR_HDL 0x80000000 // GT_CNS_INT -- constant is a ldptr handle
- #define GTF_ICON_VARG_HDL 0x90000000 // GT_CNS_INT -- constant is a var arg cookie handle
- #define GTF_ICON_PINVKI_HDL 0xA0000000 // GT_CNS_INT -- constant is a pinvoke calli handle
- #define GTF_ICON_TOKEN_HDL 0xB0000000 // GT_CNS_INT -- constant is a token handle
- #define GTF_ICON_TLS_HDL 0xC0000000 // GT_CNS_INT -- constant is a TLS ref with offset
- #define GTF_ICON_FTN_ADDR 0xD0000000 // GT_CNS_INT -- constant is a function address
- #define GTF_ICON_CIDMID_HDL 0xE0000000 // GT_CNS_INT -- constant is a class or module ID handle
- #define GTF_ICON_BBC_PTR 0xF0000000 // GT_CNS_INT -- constant is a basic block count pointer
-
- #define GTF_ICON_FIELD_OFF 0x08000000 // GT_CNS_INT -- constant is a field offset
-
- #define GTF_BLK_HASGCPTR 0x80000000 // GT_COPYBLK -- This struct copy will copy GC Pointers
- #define GTF_BLK_VOLATILE 0x40000000 // GT_INITBLK/GT_COPYBLK -- is a volatile block operation
- #define GTF_BLK_UNALIGNED 0x02000000 // GT_INITBLK/GT_COPYBLK -- is an unaligned block operation
-
- #define GTF_OVERFLOW 0x10000000 // GT_ADD, GT_SUB, GT_MUL, - Need overflow check
- // GT_ASG_ADD, GT_ASG_SUB,
- // GT_CAST
- // Use gtOverflow(Ex)() to check this flag
+#define GTF_IND_NONFAULTING 0x00000800 // An indir that cannot fault. GTF_SET_FLAGS is not used on indirs
+
+#define GTF_MAKE_CSE 0x00002000 // Hoisted Expression: try hard to make this into CSE (see optPerformHoistExpr)
+#define GTF_DONT_CSE 0x00004000 // don't bother CSE'ing this expr
+#define GTF_COLON_COND 0x00008000 // this node is conditionally executed (part of ? :)
+
+#define GTF_NODE_MASK (GTF_COLON_COND)
+
+#define GTF_BOOLEAN 0x00040000 // value is known to be 0/1
+
+#define GTF_SMALL_OK 0x00080000 // actual small int sufficient
+
+#define GTF_UNSIGNED 0x00100000 // with GT_CAST: the source operand is an unsigned type
+ // with operators: the specified node is an unsigned operator
+
+#define GTF_LATE_ARG \
+ 0x00200000 // the specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
+
+#define GTF_SPILL 0x00400000 // needs to be spilled here
+#define GTF_SPILL_HIGH 0x00040000 // shared with GTF_BOOLEAN
+
+#define GTF_COMMON_MASK 0x007FFFFF // mask of all the flags above
+
+#define GTF_REUSE_REG_VAL 0x00800000 // This is set by the register allocator on nodes whose value already exists in the
+ // register assigned to this node, so the code generator does not have to generate
+ // code to produce the value.
+ // It is currently used only on constant nodes.
+// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
+// it is not needed for lclVars and is highly unlikely to be useful for indir nodes
+
+//---------------------------------------------------------------------
+// The following flags can be used only with a small set of nodes, and
+// thus their values need not be distinct (other than within the set
+// that goes with a particular node/nodes, of course). That is, one can
+// only test for one of these flags if the 'gtOper' value is tested as
+// well to make sure it's the right operator for the particular flag.
+//---------------------------------------------------------------------
+
+// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags, because
+// GT_LCL_VAR nodes may be changed to GT_REG_VAR nodes without resetting
+// the flags. These are also used by GT_LCL_FLD.
+#define GTF_VAR_DEF 0x80000000 // GT_LCL_VAR -- this is a definition
+#define GTF_VAR_USEASG 0x40000000 // GT_LCL_VAR -- this is a use/def for a x<op>=y
+#define GTF_VAR_USEDEF 0x20000000 // GT_LCL_VAR -- this is a use/def as in x=x+y (only the lhs x is tagged)
+#define GTF_VAR_CAST 0x10000000 // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
+#define GTF_VAR_ITERATOR 0x08000000 // GT_LCL_VAR -- this is a iterator reference in the loop condition
+#define GTF_VAR_CLONED 0x01000000 // GT_LCL_VAR -- this node has been cloned or is a clone
+ // Relevant for inlining optimizations (see fgInlinePrependStatements)
+
+// TODO-Cleanup: Currently, GTF_REG_BIRTH is used only by stackfp
+// We should consider using it more generally for VAR_BIRTH, instead of
+// GTF_VAR_DEF && !GTF_VAR_USEASG
+#define GTF_REG_BIRTH 0x04000000 // GT_REG_VAR -- enregistered variable born here
+#define GTF_VAR_DEATH 0x02000000 // GT_LCL_VAR, GT_REG_VAR -- variable dies here (last use)
+
+#define GTF_VAR_ARR_INDEX 0x00000020 // The variable is part of (the index portion of) an array index expression.
+ // Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
+
+#define GTF_LIVENESS_MASK (GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_USEDEF | GTF_REG_BIRTH | GTF_VAR_DEATH)
+
+#define GTF_CALL_UNMANAGED 0x80000000 // GT_CALL -- direct call to unmanaged code
+#define GTF_CALL_INLINE_CANDIDATE 0x40000000 // GT_CALL -- this call has been marked as an inline candidate
+
+#define GTF_CALL_VIRT_KIND_MASK 0x30000000
+#define GTF_CALL_NONVIRT 0x00000000 // GT_CALL -- a non virtual call
+#define GTF_CALL_VIRT_STUB 0x10000000 // GT_CALL -- a stub-dispatch virtual call
+#define GTF_CALL_VIRT_VTABLE 0x20000000 // GT_CALL -- a vtable-based virtual call
+
+#define GTF_CALL_NULLCHECK 0x08000000 // GT_CALL -- must check instance pointer for null
+#define GTF_CALL_POP_ARGS 0x04000000 // GT_CALL -- caller pop arguments?
+#define GTF_CALL_HOISTABLE 0x02000000 // GT_CALL -- call is hoistable
+#define GTF_CALL_REG_SAVE 0x01000000 // GT_CALL -- This call preserves all integer regs
+ // For additional flags for GT_CALL node see GTF_CALL_M_
+
+#define GTF_NOP_DEATH 0x40000000 // GT_NOP -- operand dies here
+
+#define GTF_FLD_NULLCHECK 0x80000000 // GT_FIELD -- need to nullcheck the "this" pointer
+#define GTF_FLD_VOLATILE 0x40000000 // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
+
+#define GTF_INX_RNGCHK 0x80000000 // GT_INDEX -- the array reference should be range-checked.
+#define GTF_INX_REFARR_LAYOUT 0x20000000 // GT_INDEX -- same as GTF_IND_REFARR_LAYOUT
+#define GTF_INX_STRING_LAYOUT 0x40000000 // GT_INDEX -- this uses the special string array layout
+
+#define GTF_IND_VOLATILE 0x40000000 // GT_IND -- the load or store must use volatile sematics (this is a nop
+ // on X86)
+#define GTF_IND_REFARR_LAYOUT 0x20000000 // GT_IND -- the array holds object refs (only effects layout of Arrays)
+#define GTF_IND_TGTANYWHERE 0x10000000 // GT_IND -- the target could be anywhere
+#define GTF_IND_TLS_REF 0x08000000 // GT_IND -- the target is accessed via TLS
+#define GTF_IND_ASG_LHS 0x04000000 // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
+ // assignment; don't evaluate it independently.
+#define GTF_IND_UNALIGNED 0x02000000 // GT_IND -- the load or store is unaligned (we assume worst case
+ // alignment of 1 byte)
+#define GTF_IND_INVARIANT 0x01000000 // GT_IND -- the target is invariant (a prejit indirection)
+#define GTF_IND_ARR_LEN 0x80000000 // GT_IND -- the indirection represents an array length (of the REF
+ // contribution to its argument).
+#define GTF_IND_ARR_INDEX 0x00800000 // GT_IND -- the indirection represents an (SZ) array index
+
+#define GTF_IND_FLAGS \
+ (GTF_IND_VOLATILE | GTF_IND_REFARR_LAYOUT | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
+ GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_ARR_INDEX)
+
+#define GTF_CLS_VAR_ASG_LHS 0x04000000 // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
+ // of an assignment; don't evaluate it independently.
+
+#define GTF_ADDR_ONSTACK 0x80000000 // GT_ADDR -- this expression is guaranteed to be on the stack
+
+#define GTF_ADDRMODE_NO_CSE 0x80000000 // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
+ // addressing mode
+
+#define GTF_MUL_64RSLT 0x40000000 // GT_MUL -- produce 64-bit result
+
+#define GTF_MOD_INT_RESULT 0x80000000 // GT_MOD, -- the real tree represented by this
+ // GT_UMOD node evaluates to an int even though
+ // its type is long. The result is
+ // placed in the low member of the
+ // reg pair
+
+#define GTF_RELOP_NAN_UN 0x80000000 // GT_<relop> -- Is branch taken if ops are NaN?
+#define GTF_RELOP_JMP_USED 0x40000000 // GT_<relop> -- result of compare used for jump or ?:
+#define GTF_RELOP_QMARK 0x20000000 // GT_<relop> -- the node is the condition for ?:
+#define GTF_RELOP_SMALL 0x10000000 // GT_<relop> -- We should use a byte or short sized compare (op1->gtType
+ // is the small type)
+#define GTF_RELOP_ZTT 0x08000000 // GT_<relop> -- Loop test cloned for converting while-loops into do-while
+ // with explicit "loop test" in the header block.
+
+#define GTF_QMARK_CAST_INSTOF 0x80000000 // GT_QMARK -- Is this a top (not nested) level qmark created for
+ // castclass or instanceof?
+
+#define GTF_BOX_VALUE 0x80000000 // GT_BOX -- "box" is on a value type
+
+#define GTF_ICON_HDL_MASK 0xF0000000 // Bits used by handle types below
+
+#define GTF_ICON_SCOPE_HDL 0x10000000 // GT_CNS_INT -- constant is a scope handle
+#define GTF_ICON_CLASS_HDL 0x20000000 // GT_CNS_INT -- constant is a class handle
+#define GTF_ICON_METHOD_HDL 0x30000000 // GT_CNS_INT -- constant is a method handle
+#define GTF_ICON_FIELD_HDL 0x40000000 // GT_CNS_INT -- constant is a field handle
+#define GTF_ICON_STATIC_HDL 0x50000000 // GT_CNS_INT -- constant is a handle to static data
+#define GTF_ICON_STR_HDL 0x60000000 // GT_CNS_INT -- constant is a string handle
+#define GTF_ICON_PSTR_HDL 0x70000000 // GT_CNS_INT -- constant is a ptr to a string handle
+#define GTF_ICON_PTR_HDL 0x80000000 // GT_CNS_INT -- constant is a ldptr handle
+#define GTF_ICON_VARG_HDL 0x90000000 // GT_CNS_INT -- constant is a var arg cookie handle
+#define GTF_ICON_PINVKI_HDL 0xA0000000 // GT_CNS_INT -- constant is a pinvoke calli handle
+#define GTF_ICON_TOKEN_HDL 0xB0000000 // GT_CNS_INT -- constant is a token handle
+#define GTF_ICON_TLS_HDL 0xC0000000 // GT_CNS_INT -- constant is a TLS ref with offset
+#define GTF_ICON_FTN_ADDR 0xD0000000 // GT_CNS_INT -- constant is a function address
+#define GTF_ICON_CIDMID_HDL 0xE0000000 // GT_CNS_INT -- constant is a class or module ID handle
+#define GTF_ICON_BBC_PTR 0xF0000000 // GT_CNS_INT -- constant is a basic block count pointer
+
+#define GTF_ICON_FIELD_OFF 0x08000000 // GT_CNS_INT -- constant is a field offset
+
+#define GTF_BLK_HASGCPTR 0x80000000 // GT_COPYBLK -- This struct copy will copy GC Pointers
+#define GTF_BLK_VOLATILE 0x40000000 // GT_INITBLK/GT_COPYBLK -- is a volatile block operation
+#define GTF_BLK_UNALIGNED 0x02000000 // GT_INITBLK/GT_COPYBLK -- is an unaligned block operation
- #define GTF_NO_OP_NO 0x80000000 // GT_NO_OP --Have the codegenerator generate a special nop
-
- #define GTF_ARR_BOUND_INBND 0x80000000 // GT_ARR_BOUNDS_CHECK -- have proved this check is always in-bounds
+#define GTF_OVERFLOW 0x10000000 // GT_ADD, GT_SUB, GT_MUL, - Need overflow check
+ // GT_ASG_ADD, GT_ASG_SUB,
+ // GT_CAST
+ // Use gtOverflow(Ex)() to check this flag
+
+#define GTF_NO_OP_NO 0x80000000 // GT_NO_OP --Have the codegenerator generate a special nop
+
+#define GTF_ARR_BOUND_INBND 0x80000000 // GT_ARR_BOUNDS_CHECK -- have proved this check is always in-bounds
+
+#define GTF_ARRLEN_ARR_IDX 0x80000000 // GT_ARR_LENGTH -- Length which feeds into an array index expression
+
+//----------------------------------------------------------------
+
+#define GTF_STMT_CMPADD 0x80000000 // GT_STMT -- added by compiler
+#define GTF_STMT_HAS_CSE 0x40000000 // GT_STMT -- CSE def or use was subsituted
+#define GTF_STMT_TOP_LEVEL 0x20000000 // GT_STMT -- Top-level statement -
+ // true iff gtStmtList->gtPrev == nullptr
+ // True for all stmts when in FGOrderTree
+#define GTF_STMT_SKIP_LOWER 0x10000000 // GT_STMT -- Skip lowering if we already lowered an embedded stmt.
- #define GTF_ARRLEN_ARR_IDX 0x80000000 // GT_ARR_LENGTH -- Length which feeds into an array index expression
-
- //----------------------------------------------------------------
-
- #define GTF_STMT_CMPADD 0x80000000 // GT_STMT -- added by compiler
- #define GTF_STMT_HAS_CSE 0x40000000 // GT_STMT -- CSE def or use was subsituted
- #define GTF_STMT_TOP_LEVEL 0x20000000 // GT_STMT -- Top-level statement -
- // true iff gtStmtList->gtPrev == nullptr
- // True for all stmts when in FGOrderTree
- #define GTF_STMT_SKIP_LOWER 0x10000000 // GT_STMT -- Skip lowering if we already lowered an embedded stmt.
-
- //----------------------------------------------------------------
+//----------------------------------------------------------------
#if defined(DEBUG)
- #define GTF_DEBUG_NONE 0x00000000 // No debug flags.
+#define GTF_DEBUG_NONE 0x00000000 // No debug flags.
- #define GTF_DEBUG_NODE_MORPHED 0x00000001 // the node has been morphed (in the global morphing phase)
- #define GTF_DEBUG_NODE_SMALL 0x00000002
- #define GTF_DEBUG_NODE_LARGE 0x00000004
+#define GTF_DEBUG_NODE_MORPHED 0x00000001 // the node has been morphed (in the global morphing phase)
+#define GTF_DEBUG_NODE_SMALL 0x00000002
+#define GTF_DEBUG_NODE_LARGE 0x00000004
- #define GTF_DEBUG_NODE_MASK 0x00000007 // These flags are all node (rather than operation) properties.
+#define GTF_DEBUG_NODE_MASK 0x00000007 // These flags are all node (rather than operation) properties.
- #define GTF_DEBUG_VAR_CSE_REF 0x00800000 // GT_LCL_VAR -- This is a CSE LCL_VAR node
-#endif // defined(DEBUG)
+#define GTF_DEBUG_VAR_CSE_REF 0x00800000 // GT_LCL_VAR -- This is a CSE LCL_VAR node
+#endif // defined(DEBUG)
- GenTreePtr gtNext;
- GenTreePtr gtPrev;
+ GenTreePtr gtNext;
+ GenTreePtr gtPrev;
#ifdef DEBUG
- unsigned gtTreeID;
- unsigned gtSeqNum; // liveness traversal order within the current statement
+ unsigned gtTreeID;
+ unsigned gtSeqNum; // liveness traversal order within the current statement
#endif
- static
- const unsigned short gtOperKindTable[];
+ static const unsigned short gtOperKindTable[];
- static
- unsigned OperKind(unsigned gtOper)
+ static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
- return gtOperKindTable[gtOper];
+ return gtOperKindTable[gtOper];
}
- unsigned OperKind() const
+ unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
- return gtOperKindTable[gtOper];
+ return gtOperKindTable[gtOper];
}
- static bool IsExOp(unsigned opKind)
+ static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
@@ -906,430 +992,372 @@ public:
return opKind & ~GTK_EXOP;
}
- static
- bool OperIsConst(genTreeOps gtOper)
+ static bool OperIsConst(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_CONST ) != 0;
+ return (OperKind(gtOper) & GTK_CONST) != 0;
}
- bool OperIsConst() const
+ bool OperIsConst() const
{
- return (OperKind(gtOper) & GTK_CONST ) != 0;
+ return (OperKind(gtOper) & GTK_CONST) != 0;
}
- static
- bool OperIsLeaf(genTreeOps gtOper)
+ static bool OperIsLeaf(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_LEAF ) != 0;
+ return (OperKind(gtOper) & GTK_LEAF) != 0;
}
- bool OperIsLeaf() const
+ bool OperIsLeaf() const
{
- return (OperKind(gtOper) & GTK_LEAF ) != 0;
+ return (OperKind(gtOper) & GTK_LEAF) != 0;
}
- static
- bool OperIsCompare(genTreeOps gtOper)
+ static bool OperIsCompare(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_RELOP ) != 0;
+ return (OperKind(gtOper) & GTK_RELOP) != 0;
}
- static
- bool OperIsLocal(genTreeOps gtOper)
+ static bool OperIsLocal(genTreeOps gtOper)
{
bool result = (OperKind(gtOper) & GTK_LOCAL) != 0;
- assert(result ==
- (gtOper == GT_LCL_VAR ||
- gtOper == GT_PHI_ARG ||
- gtOper == GT_REG_VAR ||
- gtOper == GT_LCL_FLD ||
- gtOper == GT_STORE_LCL_VAR ||
- gtOper == GT_STORE_LCL_FLD));
+ assert(result == (gtOper == GT_LCL_VAR || gtOper == GT_PHI_ARG || gtOper == GT_REG_VAR ||
+ gtOper == GT_LCL_FLD || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD));
return result;
}
- static
- bool OperIsBlkOp(genTreeOps gtOper)
+ static bool OperIsBlkOp(genTreeOps gtOper)
{
- return (gtOper == GT_INITBLK ||
- gtOper == GT_COPYBLK ||
- gtOper == GT_COPYOBJ);
+ return (gtOper == GT_INITBLK || gtOper == GT_COPYBLK || gtOper == GT_COPYOBJ);
}
- static
- bool OperIsCopyBlkOp(genTreeOps gtOper)
+ static bool OperIsCopyBlkOp(genTreeOps gtOper)
{
return (gtOper == GT_COPYOBJ || gtOper == GT_COPYBLK);
}
-
- static
- bool OperIsLocalAddr(genTreeOps gtOper)
+ static bool OperIsLocalAddr(genTreeOps gtOper)
{
- return (gtOper == GT_LCL_VAR_ADDR ||
- gtOper == GT_LCL_FLD_ADDR);
+ return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
- static
- bool OperIsLocalField(genTreeOps gtOper)
+ static bool OperIsLocalField(genTreeOps gtOper)
{
- return (gtOper == GT_LCL_FLD ||
- gtOper == GT_LCL_FLD_ADDR ||
- gtOper == GT_STORE_LCL_FLD );
+ return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
- inline bool OperIsLocalField() const
+ inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
- static
- bool OperIsScalarLocal(genTreeOps gtOper)
+ static bool OperIsScalarLocal(genTreeOps gtOper)
{
- return (gtOper == GT_LCL_VAR ||
- gtOper == GT_REG_VAR ||
- gtOper == GT_STORE_LCL_VAR);
+ return (gtOper == GT_LCL_VAR || gtOper == GT_REG_VAR || gtOper == GT_STORE_LCL_VAR);
}
- static
- bool OperIsNonPhiLocal(genTreeOps gtOper)
+ static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
- static
- bool OperIsLocalRead(genTreeOps gtOper)
+ static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
- static
- bool OperIsLocalStore(genTreeOps gtOper)
+ static bool OperIsLocalStore(genTreeOps gtOper)
{
- return (gtOper == GT_STORE_LCL_VAR ||
- gtOper == GT_STORE_LCL_FLD);
-
+ return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
- static
- bool OperIsAddrMode(genTreeOps gtOper)
+ static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
- bool OperIsBlkOp() const;
- bool OperIsCopyBlkOp() const;
- bool OperIsInitBlkOp() const;
- bool OperIsDynBlkOp();
+ bool OperIsBlkOp() const;
+ bool OperIsCopyBlkOp() const;
+ bool OperIsInitBlkOp() const;
+ bool OperIsDynBlkOp();
- bool OperIsPutArgStk() const
+ bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
- bool OperIsAddrMode() const
+ bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
- bool OperIsLocal() const
+ bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
- bool OperIsLocalAddr() const
+ bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
- bool OperIsScalarLocal() const
+ bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
- bool OperIsNonPhiLocal() const
+ bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
- bool OperIsLocalStore() const
+ bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
- bool OperIsLocalRead() const
+ bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
- bool OperIsCompare()
+ bool OperIsCompare()
{
- return (OperKind(gtOper) & GTK_RELOP ) != 0;
+ return (OperKind(gtOper) & GTK_RELOP) != 0;
}
- static
- bool OperIsLogical(genTreeOps gtOper)
+ static bool OperIsLogical(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_LOGOP ) != 0;
+ return (OperKind(gtOper) & GTK_LOGOP) != 0;
}
- bool OperIsLogical() const
+ bool OperIsLogical() const
{
- return (OperKind(gtOper) & GTK_LOGOP ) != 0;
+ return (OperKind(gtOper) & GTK_LOGOP) != 0;
}
- static
- bool OperIsShift(genTreeOps gtOper)
+ static bool OperIsShift(genTreeOps gtOper)
{
- return (gtOper == GT_LSH) ||
- (gtOper == GT_RSH) ||
- (gtOper == GT_RSZ);
+ return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
- bool OperIsShift() const
+ bool OperIsShift() const
{
return OperIsShift(OperGet());
}
- static
- bool OperIsRotate(genTreeOps gtOper)
+ static bool OperIsRotate(genTreeOps gtOper)
{
- return (gtOper == GT_ROL) ||
- (gtOper == GT_ROR);
+ return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
- bool OperIsRotate() const
+ bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
- static
- bool OperIsShiftOrRotate(genTreeOps gtOper)
+ static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
- return OperIsShift(gtOper) ||
- OperIsRotate(gtOper);
+ return OperIsShift(gtOper) || OperIsRotate(gtOper);
}
- bool OperIsShiftOrRotate() const
+ bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
- bool OperIsArithmetic() const
+ bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
- return op==GT_ADD
- || op==GT_SUB
- || op==GT_MUL
- || op==GT_DIV
- || op==GT_MOD
+ return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
- || op==GT_UDIV
- || op==GT_UMOD
+ || op == GT_UDIV || op == GT_UMOD
- || op==GT_OR
- || op==GT_XOR
- || op==GT_AND
+ || op == GT_OR || op == GT_XOR || op == GT_AND
- || OperIsShiftOrRotate(op);
+ || OperIsShiftOrRotate(op);
}
#if !defined(LEGACY_BACKEND) && !defined(_TARGET_64BIT_)
- static
- bool OperIsHigh(genTreeOps gtOper)
+ static bool OperIsHigh(genTreeOps gtOper)
{
switch (gtOper)
{
- case GT_ADD_HI:
- case GT_SUB_HI:
- case GT_MUL_HI:
- case GT_DIV_HI:
- case GT_MOD_HI:
- return true;
- default:
- return false;
+ case GT_ADD_HI:
+ case GT_SUB_HI:
+ case GT_MUL_HI:
+ case GT_DIV_HI:
+ case GT_MOD_HI:
+ return true;
+ default:
+ return false;
}
}
- bool OperIsHigh() const
+ bool OperIsHigh() const
{
return OperIsHigh(OperGet());
}
#endif // !defined(LEGACY_BACKEND) && !defined(_TARGET_64BIT_)
- static
- bool OperIsUnary(genTreeOps gtOper)
+ static bool OperIsUnary(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_UNOP ) != 0;
+ return (OperKind(gtOper) & GTK_UNOP) != 0;
}
- bool OperIsUnary() const
+ bool OperIsUnary() const
{
- return OperIsUnary(gtOper);
+ return OperIsUnary(gtOper);
}
- static
- bool OperIsBinary(genTreeOps gtOper)
+ static bool OperIsBinary(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_BINOP ) != 0;
+ return (OperKind(gtOper) & GTK_BINOP) != 0;
}
- bool OperIsBinary() const
+ bool OperIsBinary() const
{
- return OperIsBinary(gtOper);
+ return OperIsBinary(gtOper);
}
- static
- bool OperIsSimple(genTreeOps gtOper)
+ static bool OperIsSimple(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_SMPOP ) != 0;
+ return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
- static
- bool OperIsSpecial(genTreeOps gtOper)
- {
- return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
+ static bool OperIsSpecial(genTreeOps gtOper)
+ {
+ return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
- bool OperIsSimple() const
+ bool OperIsSimple() const
{
- return OperIsSimple(gtOper);
+ return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
- bool isCommutativeSIMDIntrinsic();
-#else // !
- bool isCommutativeSIMDIntrinsic()
+ bool isCommutativeSIMDIntrinsic();
+#else // !
+ bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
- static
- bool OperIsCommutative(genTreeOps gtOper)
+ static bool OperIsCommutative(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_COMMUTE) != 0;
+ return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
- bool OperIsCommutative()
+ bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic());
}
- static
- bool OperIsAssignment(genTreeOps gtOper)
+ static bool OperIsAssignment(genTreeOps gtOper)
{
- return (OperKind(gtOper) & GTK_ASGOP) != 0;
+ return (OperKind(gtOper) & GTK_ASGOP) != 0;
}
- bool OperIsAssignment() const
+ bool OperIsAssignment() const
{
- return OperIsAssignment(gtOper);
+ return OperIsAssignment(gtOper);
}
- static
- bool OperIsIndir(genTreeOps gtOper)
+ static bool OperIsIndir(genTreeOps gtOper)
{
- return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || gtOper == GT_OBJ;
+ return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || gtOper == GT_OBJ;
}
- bool OperIsIndir() const
+ bool OperIsIndir() const
{
- return OperIsIndir(gtOper);
+ return OperIsIndir(gtOper);
}
- static
- bool OperIsImplicitIndir(genTreeOps gtOper)
+ static bool OperIsImplicitIndir(genTreeOps gtOper)
{
switch (gtOper)
{
- case GT_LOCKADD:
- case GT_XADD:
- case GT_CMPXCHG:
- case GT_COPYBLK:
- case GT_COPYOBJ:
- case GT_INITBLK:
- case GT_OBJ:
- case GT_BOX:
- case GT_ARR_INDEX:
- case GT_ARR_ELEM:
- case GT_ARR_OFFSET:
- return true;
- default:
- return false;
+ case GT_LOCKADD:
+ case GT_XADD:
+ case GT_CMPXCHG:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ case GT_INITBLK:
+ case GT_OBJ:
+ case GT_BOX:
+ case GT_ARR_INDEX:
+ case GT_ARR_ELEM:
+ case GT_ARR_OFFSET:
+ return true;
+ default:
+ return false;
}
}
- bool OperIsImplicitIndir() const
+ bool OperIsImplicitIndir() const
{
- return OperIsImplicitIndir(gtOper);
+ return OperIsImplicitIndir(gtOper);
}
- bool OperIsStore() const
+ bool OperIsStore() const
{
return OperIsStore(gtOper);
}
- static
- bool OperIsStore(genTreeOps gtOper)
+ static bool OperIsStore(genTreeOps gtOper)
{
- return (gtOper == GT_STOREIND
- || gtOper == GT_STORE_LCL_VAR
- || gtOper == GT_STORE_LCL_FLD
- || gtOper == GT_STORE_CLS_VAR);
+ return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
+ gtOper == GT_STORE_CLS_VAR);
}
- static
- bool OperIsAtomicOp(genTreeOps gtOper)
+ static bool OperIsAtomicOp(genTreeOps gtOper)
{
- return (gtOper == GT_XADD
- || gtOper == GT_XCHG
- || gtOper == GT_LOCKADD
- || gtOper == GT_CMPXCHG);
+ return (gtOper == GT_XADD || gtOper == GT_XCHG || gtOper == GT_LOCKADD || gtOper == GT_CMPXCHG);
}
- bool OperIsAtomicOp()
+ bool OperIsAtomicOp()
{
- return OperIsAtomicOp(gtOper);
+ return OperIsAtomicOp(gtOper);
}
// This is basically here for cleaner FEATURE_SIMD #ifdefs.
- static
- bool OperIsSIMD(genTreeOps gtOper)
+ static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
-#else // !FEATURE_SIMD
+#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
- bool OperIsSIMD()
+ bool OperIsSIMD()
{
return OperIsSIMD(gtOper);
}
// Requires that "op" is an op= operator. Returns
// the corresponding "op".
- static
- genTreeOps OpAsgToOper(genTreeOps op);
+ static genTreeOps OpAsgToOper(genTreeOps op);
#ifdef DEBUG
- bool NullOp1Legal() const
+ bool NullOp1Legal() const
{
assert(OperIsSimple(gtOper));
switch (gtOper)
{
- case GT_PHI: case GT_LEA: case GT_RETFILT: case GT_NOP:
- return true;
- case GT_RETURN:
- return gtType == TYP_VOID;
- default:
- return false;
+ case GT_PHI:
+ case GT_LEA:
+ case GT_RETFILT:
+ case GT_NOP:
+ return true;
+ case GT_RETURN:
+ return gtType == TYP_VOID;
+ default:
+ return false;
}
}
- bool NullOp2Legal() const
+ bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper));
if (!OperIsBinary(gtOper))
@@ -1338,24 +1366,23 @@ public:
}
switch (gtOper)
{
- case GT_LIST:
- case GT_INTRINSIC:
- case GT_LEA:
- case GT_STOREIND:
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
+ case GT_LIST:
+ case GT_INTRINSIC:
+ case GT_LEA:
+ case GT_STOREIND:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
#ifdef FEATURE_SIMD
- case GT_SIMD:
+ case GT_SIMD:
#endif // !FEATURE_SIMD
- return true;
- default:
- return false;
+ return true;
+ default:
+ return false;
}
}
- static
- inline bool RequiresNonNullOp2(genTreeOps oper);
+ static inline bool RequiresNonNullOp2(genTreeOps oper);
bool IsListForMultiRegArg();
#endif // DEBUG
@@ -1373,7 +1400,7 @@ public:
inline GenTreePtr Current();
- inline GenTreePtr *pCurrent();
+ inline GenTreePtr* pCurrent();
inline GenTreePtr gtGetOp1();
@@ -1381,97 +1408,89 @@ public:
// Given a tree node, if this is a child of that node, return the pointer to the child node so that it
// can be modified; otherwise, return null.
- GenTreePtr* gtGetChildPointer(GenTreePtr parent);
+ GenTreePtr* gtGetChildPointer(GenTreePtr parent);
// Get the parent of this node, and optionally capture the pointer to the child so that it can be modified.
- GenTreePtr gtGetParent(GenTreePtr** parentChildPtrPtr);
+ GenTreePtr gtGetParent(GenTreePtr** parentChildPtrPtr);
inline GenTreePtr gtEffectiveVal(bool commaOnly = false);
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
- inline GenTree* gtSkipReloadOrCopy();
+ inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
- inline bool IsMultiRegCall() const;
+ inline bool IsMultiRegCall() const;
// Returns true if it is a GT_COPY or GT_RELOAD node
- inline bool IsCopyOrReload() const;
+ inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
- inline bool IsCopyOrReloadOfMultiRegCall() const;
+ inline bool IsCopyOrReloadOfMultiRegCall() const;
- bool OperMayThrow();
+ bool OperMayThrow();
- unsigned GetScaleIndexMul();
- unsigned GetScaleIndexShf();
- unsigned GetScaledIndex();
+ unsigned GetScaleIndexMul();
+ unsigned GetScaleIndexShf();
+ unsigned GetScaledIndex();
// Returns true if "addr" is a GT_ADD node, at least one of whose arguments is an integer
// (<= 32 bit) constant. If it returns true, it sets "*offset" to (one of the) constant value(s), and
// "*addr" to the other argument.
- bool IsAddWithI32Const(GenTreePtr* addr, int* offset);
+ bool IsAddWithI32Const(GenTreePtr* addr, int* offset);
// Insert 'node' after this node in execution order.
- void InsertAfterSelf(GenTree* node, GenTreeStmt* stmt = nullptr);
+ void InsertAfterSelf(GenTree* node, GenTreeStmt* stmt = nullptr);
public:
-
#if SMALL_TREE_NODES
- static
- unsigned char s_gtNodeSizes[];
+ static unsigned char s_gtNodeSizes[];
#endif
- static
- void InitNodeSize();
+ static void InitNodeSize();
- size_t GetNodeSize() const;
+ size_t GetNodeSize() const;
- bool IsNodeProperlySized() const;
+ bool IsNodeProperlySized() const;
- void CopyFrom(const GenTree* src, Compiler* comp);
+ void CopyFrom(const GenTree* src, Compiler* comp);
- static
- genTreeOps ReverseRelop(genTreeOps relop);
+ static genTreeOps ReverseRelop(genTreeOps relop);
- static
- genTreeOps SwapRelop(genTreeOps relop);
+ static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
- static
- bool Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK = false);
+ static bool Compare(GenTreePtr op1, GenTreePtr op2, bool swapOK = false);
- //---------------------------------------------------------------------
- #ifdef DEBUG
+//---------------------------------------------------------------------
+#ifdef DEBUG
//---------------------------------------------------------------------
- static
- const char * NodeName(genTreeOps op);
+ static const char* NodeName(genTreeOps op);
- static
- const char * OpName(genTreeOps op);
+ static const char* OpName(genTreeOps op);
- //---------------------------------------------------------------------
- #endif
+//---------------------------------------------------------------------
+#endif
//---------------------------------------------------------------------
- bool IsNothingNode () const;
- void gtBashToNOP ();
+ bool IsNothingNode() const;
+ void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
- CLEAR_VN, // Clear value number
- PRESERVE_VN // Preserve value number
+ CLEAR_VN, // Clear value number
+ PRESERVE_VN // Preserve value number
};
- void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
- void SetOperResetFlags (genTreeOps oper); // set gtOper and reset flags
+ void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
+ void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
- void ChangeOperConst (genTreeOps oper); // ChangeOper(constOper)
+ void ChangeOperConst(genTreeOps oper); // ChangeOper(constOper)
// set gtOper and only keep GTF_COMMON_MASK flags
- void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
- void ChangeOperUnchecked (genTreeOps oper);
+ void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
+ void ChangeOperUnchecked(genTreeOps oper);
bool IsLocal() const
{
@@ -1482,7 +1501,7 @@ public:
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
- // Returns "true" iff "this" defines a local variable. Requires "comp" to be the
+ // Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
@@ -1494,7 +1513,7 @@ public:
// sets "*pFldSeq" to the field sequence representing that field, else null.
bool IsLocalAddrExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
- // Simpler variant of the above which just returns the local node if this is an expression that
+ // Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
@@ -1504,32 +1523,31 @@ public:
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
- unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps *updateOper);
+ unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
// If returns "true", "this" may represent the address of a static or instance field
- // (or a field of such a field, in the case of an object field of type struct).
- // If returns "true", then either "*pObj" is set to the object reference,
- // or "*pStatic" is set to the baseAddr or offset to be added to the "*pFldSeq"
+ // (or a field of such a field, in the case of an object field of type struct).
+ // If returns "true", then either "*pObj" is set to the object reference,
+ // or "*pStatic" is set to the baseAddr or offset to be added to the "*pFldSeq"
// Only one of "*pObj" or "*pStatic" will be set, the other one will be null.
- // The boolean return value only indicates that "this" *may* be a field address
- // -- the field sequence must also be checked.
- // If it is a field address, the field sequence will be a sequence of length >= 1,
+ // The boolean return value only indicates that "this" *may* be a field address
+ // -- the field sequence must also be checked.
+ // If it is a field address, the field sequence will be a sequence of length >= 1,
// starting with an instance or static field, and optionally continuing with struct fields.
bool IsFieldAddr(Compiler* comp, GenTreePtr* pObj, GenTreePtr* pStatic, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
- // Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some element).
+ // Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
+ // element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
- void ParseArrayAddress(Compiler* comp,
- struct ArrayInfo* arrayInfo,
- GenTreePtr* pArr,
- ValueNum* pInxVN,
- FieldSeqNode** pFldSeq);
+ void ParseArrayAddress(
+ Compiler* comp, struct ArrayInfo* arrayInfo, GenTreePtr* pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
- void ParseArrayAddressWork(Compiler* comp, ssize_t inputMul, GenTreePtr* pArr, ValueNum* pInxVN, ssize_t* pOffset, FieldSeqNode** pFldSeq);
+ void ParseArrayAddressWork(
+ Compiler* comp, ssize_t inputMul, GenTreePtr* pArr, ValueNum* pInxVN, ssize_t* pOffset, FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
@@ -1557,58 +1575,58 @@ public:
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
- bool IsRegVar () const
+ bool IsRegVar() const
{
- return OperGet() == GT_REG_VAR?true:false;
+ return OperGet() == GT_REG_VAR ? true : false;
}
- bool InReg() const
+ bool InReg() const
{
- return (gtFlags & GTF_REG_VAL)?true:false;
+ return (gtFlags & GTF_REG_VAL) ? true : false;
}
- void SetInReg()
+ void SetInReg()
{
gtFlags |= GTF_REG_VAL;
}
- regNumber GetReg() const
+ regNumber GetReg() const
{
return InReg() ? gtRegNum : REG_NA;
}
- bool IsRegVarDeath () const
+ bool IsRegVarDeath() const
{
assert(OperGet() == GT_REG_VAR);
- return (gtFlags & GTF_VAR_DEATH)?true:false;
+ return (gtFlags & GTF_VAR_DEATH) ? true : false;
}
- bool IsRegVarBirth () const
+ bool IsRegVarBirth() const
{
assert(OperGet() == GT_REG_VAR);
- return (gtFlags & GTF_REG_BIRTH)?true:false;
+ return (gtFlags & GTF_REG_BIRTH) ? true : false;
}
- bool IsReverseOp() const
+ bool IsReverseOp() const
{
- return (gtFlags & GTF_REVERSE_OPS)?true:false;
+ return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
- inline bool IsCnsIntOrI () const;
+ inline bool IsCnsIntOrI() const;
- inline bool IsIntegralConst () const;
+ inline bool IsIntegralConst() const;
- inline bool IsIntCnsFitsInI32 ();
+ inline bool IsIntCnsFitsInI32();
- inline bool IsCnsFltOrDbl() const;
+ inline bool IsCnsFltOrDbl() const;
- inline bool IsCnsNonZeroFltOrDbl();
+ inline bool IsCnsNonZeroFltOrDbl();
- bool IsIconHandle () const
+ bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
- bool IsIconHandle (unsigned handleType) const
+ bool IsIconHandle(unsigned handleType) const
{
assert(gtOper == GT_CNS_INT);
- assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
+ assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
@@ -1616,48 +1634,57 @@ public:
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
- unsigned GetIconHandleFlag () const
+ unsigned GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
- void ClearIconHandleMask()
+ void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
- static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
+ static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
- bool IsArgPlaceHolderNode() const { return OperGet() == GT_ARGPLACE; }
- bool IsCall () const { return OperGet() == GT_CALL; }
- bool IsStatement () const { return OperGet() == GT_STMT; }
- inline bool IsHelperCall ();
+ bool IsArgPlaceHolderNode() const
+ {
+ return OperGet() == GT_ARGPLACE;
+ }
+ bool IsCall() const
+ {
+ return OperGet() == GT_CALL;
+ }
+ bool IsStatement() const
+ {
+ return OperGet() == GT_STMT;
+ }
+ inline bool IsHelperCall();
- bool IsVarAddr () const;
- bool gtOverflow () const;
- bool gtOverflowEx () const;
- bool gtSetFlags () const;
- bool gtRequestSetFlags ();
+ bool IsVarAddr() const;
+ bool gtOverflow() const;
+ bool gtOverflowEx() const;
+ bool gtSetFlags() const;
+ bool gtRequestSetFlags();
#ifdef DEBUG
- bool gtIsValid64RsltMul ();
- static int gtDispFlags (unsigned flags, unsigned debugFlags);
+ bool gtIsValid64RsltMul();
+ static int gtDispFlags(unsigned flags, unsigned debugFlags);
#endif
- // cast operations
- inline var_types CastFromType();
- inline var_types& CastToType();
+ // cast operations
+ inline var_types CastFromType();
+ inline var_types& CastToType();
// Returns true if this gentree node is marked by lowering to indicate
- // that codegen can still generate code even if it wasn't allocated a
+ // that codegen can still generate code even if it wasn't allocated a
// register.
- bool IsRegOptional() const;
+ bool IsRegOptional() const;
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
@@ -1679,7 +1706,9 @@ public:
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
- GenTree() {}
+ GenTree()
+ {
+ }
// Returns the number of children of the current node.
unsigned NumChildren();
@@ -1703,7 +1732,7 @@ public:
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
- if(OperIsConst() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
+ if (OperIsConst() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
@@ -1721,8 +1750,9 @@ public:
}
#ifdef DEBUG
- private:
- GenTree& operator=(const GenTree& gt) {
+private:
+ GenTree& operator=(const GenTree& gt)
+ {
assert(!"Don't copy");
return *this;
}
@@ -1730,7 +1760,9 @@ public:
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
- virtual void DummyVirt() {}
+ virtual void DummyVirt()
+ {
+ }
typedef void* VtablePtr;
@@ -1741,11 +1773,10 @@ public:
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
- public:
+public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
- inline GenTree(genTreeOps oper, var_types type
- DEBUGARG(bool largeNode = false));
+ inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
//------------------------------------------------------------------------
@@ -1771,14 +1802,14 @@ class GenTreeOperandIterator
GenTree* m_operand;
GenTree* m_argList;
GenTree* m_multiRegArg;
- bool m_expandMultiRegArgs;
- int m_state;
+ bool m_expandMultiRegArgs;
+ int m_state;
GenTreeOperandIterator(GenTree* node, bool expandMultiRegArgs);
GenTree* GetNextOperand() const;
- void MoveToNextCallOperand();
- void MoveToNextPhiOperand();
+ void MoveToNextCallOperand();
+ void MoveToNextPhiOperand();
#ifdef FEATURE_SIMD
void MoveToNextSIMDOperand();
#endif
@@ -1803,7 +1834,8 @@ public:
return m_state == other.m_state;
}
- return (m_node == other.m_node) && (m_operand == other.m_operand) && (m_argList == other.m_argList) && (m_state == other.m_state);
+ return (m_node == other.m_node) && (m_operand == other.m_operand) && (m_argList == other.m_argList) &&
+ (m_state == other.m_state);
}
inline bool operator!=(const GenTreeOperandIterator& other) const
@@ -1814,152 +1846,145 @@ public:
GenTreeOperandIterator& operator++();
};
-
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of gtOp.gtOp1.
-struct GenTreeUnOp: public GenTree
+struct GenTreeUnOp : public GenTree
{
- GenTreePtr gtOp1;
+ GenTreePtr gtOp1;
protected:
- GenTreeUnOp(genTreeOps oper, var_types type
- DEBUGARG(bool largeNode = false)) :
- GenTree(oper, type
- DEBUGARG(largeNode)),
- gtOp1(nullptr)
- {}
-
- GenTreeUnOp(genTreeOps oper, var_types type, GenTreePtr op1
- DEBUGARG(bool largeNode = false)) :
- GenTree(oper, type
- DEBUGARG(largeNode)),
- gtOp1(op1)
- {
- assert(op1 != nullptr || NullOp1Legal());
- if (op1 != nullptr) // Propagate effects flags from child.
- gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
+ GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
+ : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
+ {
+ }
+
+ GenTreeUnOp(genTreeOps oper, var_types type, GenTreePtr op1 DEBUGARG(bool largeNode = false))
+ : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
+ {
+ assert(op1 != nullptr || NullOp1Legal());
+ if (op1 != nullptr)
+ { // Propagate effects flags from child.
+ gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
+ }
#if DEBUGGABLE_GENTREE
- GenTreeUnOp() : GenTree(), gtOp1(nullptr) {}
+ GenTreeUnOp() : GenTree(), gtOp1(nullptr)
+ {
+ }
#endif
};
-struct GenTreeOp: public GenTreeUnOp
+struct GenTreeOp : public GenTreeUnOp
{
- GenTreePtr gtOp2;
-
- GenTreeOp(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2
- DEBUGARG(bool largeNode = false)) :
- GenTreeUnOp(oper, type, op1
- DEBUGARG(largeNode)),
- gtOp2(op2)
+ GenTreePtr gtOp2;
+
+ GenTreeOp(genTreeOps oper, var_types type, GenTreePtr op1, GenTreePtr op2 DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
+ {
+ // comparisons are always integral types
+ assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
+ // Binary operators, with a few exceptions, require a non-nullptr
+ // second argument.
+ assert(op2 != nullptr || NullOp2Legal());
+ // Unary operators, on the other hand, require a null second argument.
+ assert(!OperIsUnary(oper) || op2 == nullptr);
+ // Propagate effects flags from child. (UnOp handled this for first child.)
+ if (op2 != nullptr)
{
- // comparisons are always integral types
- assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
- // Binary operators, with a few exceptions, require a non-nullptr
- // second argument.
- assert(op2 != nullptr || NullOp2Legal());
- // Unary operators, on the other hand, require a null second argument.
- assert(!OperIsUnary(oper) || op2 == nullptr);
- // Propagate effects flags from child. (UnOp handled this for first child.)
- if (op2 != nullptr)
- {
- gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
- }
+ gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
+ }
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
- GenTreeOp(genTreeOps oper, var_types type
- DEBUGARG(bool largeNode = false)) :
- GenTreeUnOp(oper, type
- DEBUGARG(largeNode)),
- gtOp2(nullptr)
- {
- // Unary operators with optional arguments:
- assert(oper == GT_NOP ||
- oper == GT_RETURN ||
- oper == GT_RETFILT ||
- OperIsBlkOp(oper));
- }
+ GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
+ {
+ // Unary operators with optional arguments:
+ assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlkOp(oper));
+ }
#if DEBUGGABLE_GENTREE
- GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr) {}
+ GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
+ {
+ }
#endif
};
-
-struct GenTreeVal: public GenTree
+struct GenTreeVal : public GenTree
{
- size_t gtVal1;
+ size_t gtVal1;
- GenTreeVal(genTreeOps oper, var_types type, ssize_t val) :
- GenTree(oper, type),
- gtVal1(val)
- {}
+ GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeVal() : GenTree() {}
+ GenTreeVal() : GenTree()
+ {
+ }
#endif
};
-struct GenTreeIntConCommon: public GenTree
+struct GenTreeIntConCommon : public GenTree
{
- inline INT64 LngValue();
- inline void SetLngValue(INT64 val);
+ inline INT64 LngValue();
+ inline void SetLngValue(INT64 val);
inline ssize_t IconValue();
inline void SetIconValue(ssize_t val);
-
- GenTreeIntConCommon(genTreeOps oper, var_types type
- DEBUGARG(bool largeNode = false)) :
- GenTree(oper, type
- DEBUGARG(largeNode))
- {}
-
- bool FitsInI32()
- {
- return FitsInI32(IconValue());
- }
- static bool FitsInI32(ssize_t val)
- {
+ GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
+ : GenTree(oper, type DEBUGARG(largeNode))
+ {
+ }
+
+ bool FitsInI32()
+ {
+ return FitsInI32(IconValue());
+ }
+
+ static bool FitsInI32(ssize_t val)
+ {
#ifdef _TARGET_64BIT_
- return (int)val == val;
+ return (int)val == val;
#else
- return true;
+ return true;
#endif
- }
+ }
- bool ImmedValNeedsReloc(Compiler* comp);
- bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
+ bool ImmedValNeedsReloc(Compiler* comp);
+ bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef _TARGET_XARCH_
- bool FitsInAddrBase(Compiler* comp);
- bool AddrNeedsReloc(Compiler* comp);
+ bool FitsInAddrBase(Compiler* comp);
+ bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
- GenTreeIntConCommon() : GenTree() {}
+ GenTreeIntConCommon() : GenTree()
+ {
+ }
#endif
};
// node representing a read from a physical register
-struct GenTreePhysReg: public GenTree
+struct GenTreePhysReg : public GenTree
{
- // physregs need a field beyond gtRegNum because
+ // physregs need a field beyond gtRegNum because
// gtRegNum indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
- GenTreePhysReg(regNumber r, var_types type=TYP_I_IMPL) :
- GenTree(GT_PHYSREG, type), gtSrcReg(r)
+ GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
- GenTreePhysReg() : GenTree() {}
+ GenTreePhysReg() : GenTree()
+ {
+ }
#endif
};
@@ -1967,44 +1992,45 @@ struct GenTreePhysReg: public GenTree
// gtJumpTable - Switch Jump Table
//
// This node stores a DWORD constant that represents the
-// absolute address of a jump table for switches. The code
+// absolute address of a jump table for switches. The code
// generator uses this table to code the destination for every case
// in an array of addresses which starting position is stored in
// this constant.
struct GenTreeJumpTable : public GenTreeIntConCommon
{
- ssize_t gtJumpTableAddr;
+ ssize_t gtJumpTableAddr;
- GenTreeJumpTable(var_types type
- DEBUGARG(bool largeNode = false)) :
- GenTreeIntConCommon(GT_JMPTABLE, type
- DEBUGARG(largeNode))
- {}
+ GenTreeJumpTable(var_types type DEBUGARG(bool largeNode = false))
+ : GenTreeIntConCommon(GT_JMPTABLE, type DEBUGARG(largeNode))
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeJumpTable() : GenTreeIntConCommon() {}
+ GenTreeJumpTable() : GenTreeIntConCommon()
+ {
+ }
#endif // DEBUG
};
#endif // !LEGACY_BACKEND
/* gtIntCon -- integer constant (GT_CNS_INT) */
-struct GenTreeIntCon: public GenTreeIntConCommon
+struct GenTreeIntCon : public GenTreeIntConCommon
{
- /*
+ /*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
- ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
-
+ ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
+
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
- ssize_t gtCompileTimeHandle;
+ ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
@@ -2013,64 +2039,58 @@ struct GenTreeIntCon: public GenTreeIntConCommon
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
- FieldSeqNode* gtFieldSeq;
+ FieldSeqNode* gtFieldSeq;
-#if defined (LATE_DISASM)
+#if defined(LATE_DISASM)
/* If the constant was morphed from some other node,
these fields enable us to get back to what the node
originally represented. See use of gtNewIconHandleNode()
*/
- union
- {
+ union {
/* Template struct - The significant field of the other
* structs should overlap exactly with this struct
*/
struct
{
- unsigned gtIconHdl1;
- void * gtIconHdl2;
- }
- gtIconHdl;
+ unsigned gtIconHdl1;
+ void* gtIconHdl2;
+ } gtIconHdl;
/* GT_FIELD, etc */
struct
{
- unsigned gtIconCPX;
- CORINFO_CLASS_HANDLE gtIconCls;
- }
- gtIconFld;
+ unsigned gtIconCPX;
+ CORINFO_CLASS_HANDLE gtIconCls;
+ } gtIconFld;
};
#endif
- GenTreeIntCon(var_types type, ssize_t value
- DEBUGARG(bool largeNode = false)) :
- GenTreeIntConCommon(GT_CNS_INT, type
- DEBUGARG(largeNode)),
- gtIconVal(value),
- gtCompileTimeHandle(0),
- gtFieldSeq(FieldSeqStore::NotAField())
- {}
-
- GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields
- DEBUGARG(bool largeNode = false)) :
- GenTreeIntConCommon(GT_CNS_INT, type
- DEBUGARG(largeNode)),
- gtIconVal(value),
- gtCompileTimeHandle(0),
- gtFieldSeq(fields)
- {
- assert(fields != NULL);
- }
+ GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
+ : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
+ , gtIconVal(value)
+ , gtCompileTimeHandle(0)
+ , gtFieldSeq(FieldSeqStore::NotAField())
+ {
+ }
+
+ GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
+ : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
+ , gtIconVal(value)
+ , gtCompileTimeHandle(0)
+ , gtFieldSeq(fields)
+ {
+ assert(fields != nullptr);
+ }
void FixupInitBlkValue(var_types asgType);
#ifdef _TARGET_64BIT_
void TruncateOrSignExtend32()
- {
+ {
if (gtFlags & GTF_UNSIGNED)
{
gtIconVal = UINT32(gtIconVal);
@@ -2083,16 +2103,17 @@ struct GenTreeIntCon: public GenTreeIntConCommon
#endif // _TARGET_64BIT_
#if DEBUGGABLE_GENTREE
- GenTreeIntCon() : GenTreeIntConCommon() {}
+ GenTreeIntCon() : GenTreeIntConCommon()
+ {
+ }
#endif
};
-
/* gtLngCon -- long constant (GT_CNS_LNG) */
-struct GenTreeLngCon: public GenTreeIntConCommon
+struct GenTreeLngCon : public GenTreeIntConCommon
{
- INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
+ INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
@@ -2100,18 +2121,21 @@ struct GenTreeLngCon: public GenTreeIntConCommon
INT32 HiVal()
{
- return (INT32)(gtLconVal >> 32);;
+ return (INT32)(gtLconVal >> 32);
+ ;
}
- GenTreeLngCon(INT64 val) :
- GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
- { SetLngValue(val); }
+ GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
+ {
+ SetLngValue(val);
+ }
#if DEBUGGABLE_GENTREE
- GenTreeLngCon() : GenTreeIntConCommon() {}
+ GenTreeLngCon() : GenTreeIntConCommon()
+ {
+ }
#endif
};
-
inline INT64 GenTreeIntConCommon::LngValue()
{
#ifndef _TARGET_64BIT_
@@ -2138,72 +2162,70 @@ inline void GenTreeIntConCommon::SetLngValue(INT64 val)
inline ssize_t GenTreeIntConCommon::IconValue()
{
- assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
+ assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
- assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
+ assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
-struct GenTreeDblCon: public GenTree
+struct GenTreeDblCon : public GenTree
{
- double gtDconVal;
+ double gtDconVal;
- bool isBitwiseEqual(GenTreeDblCon* other)
+ bool isBitwiseEqual(GenTreeDblCon* other)
{
- unsigned __int64 bits = *(unsigned __int64 *)(&gtDconVal);
- unsigned __int64 otherBits = *(unsigned __int64 *)(&(other->gtDconVal));
+ unsigned __int64 bits = *(unsigned __int64*)(&gtDconVal);
+ unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
- GenTreeDblCon(double val) :
- GenTree(GT_CNS_DBL, TYP_DOUBLE),
- gtDconVal(val)
- {}
+ GenTreeDblCon(double val) : GenTree(GT_CNS_DBL, TYP_DOUBLE), gtDconVal(val)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeDblCon() : GenTree() {}
+ GenTreeDblCon() : GenTree()
+ {
+ }
#endif
};
-
/* gtStrCon -- string constant (GT_CNS_STR) */
-struct GenTreeStrCon: public GenTree
+struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
- GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod
- DEBUGARG(bool largeNode = false)) :
- GenTree(GT_CNS_STR, TYP_REF
- DEBUGARG(largeNode)),
- gtSconCPX(sconCPX), gtScpHnd(mod)
- {}
+ GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
+ : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeStrCon() : GenTree() {}
+ GenTreeStrCon() : GenTree()
+ {
+ }
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
-struct GenTreeLclVarCommon: public GenTreeUnOp
+struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
- unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
- unsigned _gtSsaNum; // The SSA number.
+ unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
+ unsigned _gtSsaNum; // The SSA number.
public:
- GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum
- DEBUGARG(bool largeNode = false)) :
- GenTreeUnOp(oper, type
- DEBUGARG(largeNode))
+ GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
@@ -2212,7 +2234,7 @@ public:
{
return _gtLclNum;
}
- __declspec(property(get=GetLclNum)) unsigned gtLclNum;
+ __declspec(property(get = GetLclNum)) unsigned gtLclNum;
void SetLclNum(unsigned lclNum)
{
@@ -2224,7 +2246,7 @@ public:
{
return _gtSsaNum;
}
- __declspec(property(get=GetSsaNum)) unsigned gtSsaNum;
+ __declspec(property(get = GetSsaNum)) unsigned gtSsaNum;
void SetSsaNum(unsigned ssaNum)
{
@@ -2237,66 +2259,64 @@ public:
}
#if DEBUGGABLE_GENTREE
- GenTreeLclVarCommon() : GenTreeUnOp() {}
+ GenTreeLclVarCommon() : GenTreeUnOp()
+ {
+ }
#endif
};
-// gtLclVar -- load/store/addr of local variable
+// gtLclVar -- load/store/addr of local variable
-struct GenTreeLclVar: public GenTreeLclVarCommon
+struct GenTreeLclVar : public GenTreeLclVarCommon
{
- IL_OFFSET gtLclILoffs; // instr offset of ref (only for debug info)
-
- GenTreeLclVar(var_types type, unsigned lclNum, IL_OFFSET ilOffs
- DEBUGARG(bool largeNode = false)) :
- GenTreeLclVarCommon(GT_LCL_VAR, type, lclNum
- DEBUGARG(largeNode)),
- gtLclILoffs(ilOffs)
- {}
-
- GenTreeLclVar(genTreeOps oper, var_types type, unsigned lclNum, IL_OFFSET ilOffs
- DEBUGARG(bool largeNode = false)) :
- GenTreeLclVarCommon(oper, type, lclNum
- DEBUGARG(largeNode)),
- gtLclILoffs(ilOffs)
- {
- assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
- }
-
+ IL_OFFSET gtLclILoffs; // instr offset of ref (only for debug info)
+
+ GenTreeLclVar(var_types type, unsigned lclNum, IL_OFFSET ilOffs DEBUGARG(bool largeNode = false))
+ : GenTreeLclVarCommon(GT_LCL_VAR, type, lclNum DEBUGARG(largeNode)), gtLclILoffs(ilOffs)
+ {
+ }
+
+ GenTreeLclVar(genTreeOps oper, var_types type, unsigned lclNum, IL_OFFSET ilOffs DEBUGARG(bool largeNode = false))
+ : GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)), gtLclILoffs(ilOffs)
+ {
+ assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
+ }
+
#if DEBUGGABLE_GENTREE
- GenTreeLclVar() : GenTreeLclVarCommon() {}
+ GenTreeLclVar() : GenTreeLclVarCommon()
+ {
+ }
#endif
};
-// gtLclFld -- load/store/addr of local variable field
+// gtLclFld -- load/store/addr of local variable field
-struct GenTreeLclFld: public GenTreeLclVarCommon
+struct GenTreeLclFld : public GenTreeLclVarCommon
{
- unsigned gtLclOffs; // offset into the variable to access
+ unsigned gtLclOffs; // offset into the variable to access
- FieldSeqNode* gtFieldSeq; // This LclFld node represents some sequences of accesses.
+ FieldSeqNode* gtFieldSeq; // This LclFld node represents some sequences of accesses.
// old/FE style constructor where load/store/addr share same opcode
- GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs) :
- GenTreeLclVarCommon(GT_LCL_FLD, type, lclNum),
- gtLclOffs(lclOffs), gtFieldSeq(NULL)
- {
- assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
- }
-
+ GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs)
+ : GenTreeLclVarCommon(GT_LCL_FLD, type, lclNum), gtLclOffs(lclOffs), gtFieldSeq(nullptr)
+ {
+ assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
+ }
- GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs) :
- GenTreeLclVarCommon(oper, type, lclNum),
- gtLclOffs(lclOffs), gtFieldSeq(NULL)
- {
- assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
- }
+ GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
+ : GenTreeLclVarCommon(oper, type, lclNum), gtLclOffs(lclOffs), gtFieldSeq(nullptr)
+ {
+ assert(sizeof(*this) <= s_gtNodeSizes[GT_LCL_FLD]);
+ }
#if DEBUGGABLE_GENTREE
- GenTreeLclFld() : GenTreeLclVarCommon() {}
+ GenTreeLclFld() : GenTreeLclVarCommon()
+ {
+ }
#endif
};
-struct GenTreeRegVar: public GenTreeLclVarCommon
+struct GenTreeRegVar : public GenTreeLclVarCommon
{
// TODO-Cleanup: Note that the base class GenTree already has a gtRegNum field.
// It's not clear exactly why a GT_REG_VAR has a separate field. When
@@ -2325,102 +2345,104 @@ struct GenTreeRegVar: public GenTreeLclVarCommon
//
private:
-
- regNumberSmall _gtRegNum;
+ regNumberSmall _gtRegNum;
public:
-
- GenTreeRegVar(var_types type, unsigned lclNum, regNumber regNum) :
- GenTreeLclVarCommon(GT_REG_VAR, type, lclNum
- )
- {
- gtRegNum = regNum;
- }
+ GenTreeRegVar(var_types type, unsigned lclNum, regNumber regNum) : GenTreeLclVarCommon(GT_REG_VAR, type, lclNum)
+ {
+ gtRegNum = regNum;
+ }
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
- __declspec(property(get=GetRegNum,put=SetRegNum))
- regNumber gtRegNum;
+ __declspec(property(get = GetRegNum, put = SetRegNum)) regNumber gtRegNum;
regNumber GetRegNum() const
{
- return (regNumber) _gtRegNum;
+ return (regNumber)_gtRegNum;
}
void SetRegNum(regNumber reg)
{
- _gtRegNum = (regNumberSmall) reg;
+ _gtRegNum = (regNumberSmall)reg;
assert(_gtRegNum == reg);
}
#if DEBUGGABLE_GENTREE
- GenTreeRegVar() : GenTreeLclVarCommon() {}
+ GenTreeRegVar() : GenTreeLclVarCommon()
+ {
+ }
#endif
};
-
+
/* gtCast -- conversion to a different type (GT_CAST) */
-struct GenTreeCast: public GenTreeOp
+struct GenTreeCast : public GenTreeOp
{
- GenTreePtr& CastOp() { return gtOp1; }
- var_types gtCastType;
-
- GenTreeCast(var_types type, GenTreePtr op, var_types castType
- DEBUGARG(bool largeNode = false)) :
- GenTreeOp(GT_CAST, type, op, nullptr
- DEBUGARG(largeNode)),
- gtCastType(castType)
- {}
+ GenTreePtr& CastOp()
+ {
+ return gtOp1;
+ }
+ var_types gtCastType;
+
+ GenTreeCast(var_types type, GenTreePtr op, var_types castType DEBUGARG(bool largeNode = false))
+ : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeCast() : GenTreeOp() {}
+ GenTreeCast() : GenTreeOp()
+ {
+ }
#endif
};
-
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
-struct GenTreeBox: public GenTreeUnOp
+struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
- GenTreePtr& BoxOp() { return gtOp1; }
+ GenTreePtr& BoxOp()
+ {
+ return gtOp1;
+ }
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
- // type
- GenTreePtr gtAsgStmtWhenInlinedBoxValue;
+ // type
+ GenTreePtr gtAsgStmtWhenInlinedBoxValue;
- GenTreeBox(var_types type, GenTreePtr boxOp, GenTreePtr asgStmtWhenInlinedBoxValue) :
- GenTreeUnOp(GT_BOX, type, boxOp),
- gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
- {}
+ GenTreeBox(var_types type, GenTreePtr boxOp, GenTreePtr asgStmtWhenInlinedBoxValue)
+ : GenTreeUnOp(GT_BOX, type, boxOp), gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeBox() : GenTreeUnOp() {}
+ GenTreeBox() : GenTreeUnOp()
+ {
+ }
#endif
};
-
-
/* gtField -- data member ref (GT_FIELD) */
-struct GenTreeField: public GenTree
+struct GenTreeField : public GenTree
{
- GenTreePtr gtFldObj;
- CORINFO_FIELD_HANDLE gtFldHnd;
- DWORD gtFldOffset;
- bool gtFldMayOverlap;
+ GenTreePtr gtFldObj;
+ CORINFO_FIELD_HANDLE gtFldHnd;
+ DWORD gtFldOffset;
+ bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN_COMPILER
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
- GenTreeField(var_types type) :
- GenTree(GT_FIELD, type
- )
+ GenTreeField(var_types type) : GenTree(GT_FIELD, type)
{
gtFldMayOverlap = false;
}
#if DEBUGGABLE_GENTREE
- GenTreeField() : GenTree() {}
+ GenTreeField() : GenTree()
+ {
+ }
#endif
};
@@ -2434,27 +2456,36 @@ struct GenTreeField: public GenTree
// Note that while this extends GenTreeOp, it is *not* an EXOP. We don't add any new fields, and one
// is free to allocate a GenTreeOp of type GT_LIST. If you use this type, you get the convenient Current/Rest
// method names for the arguments.
-struct GenTreeArgList: public GenTreeOp
+struct GenTreeArgList : public GenTreeOp
{
- GenTreePtr& Current() { return gtOp1; }
- GenTreeArgList*& Rest() { assert(gtOp2 == NULL || gtOp2->OperGet() == GT_LIST); return *reinterpret_cast<GenTreeArgList**>(&gtOp2); }
+ GenTreePtr& Current()
+ {
+ return gtOp1;
+ }
+ GenTreeArgList*& Rest()
+ {
+ assert(gtOp2 == nullptr || gtOp2->OperGet() == GT_LIST);
+ return *reinterpret_cast<GenTreeArgList**>(&gtOp2);
+ }
#if DEBUGGABLE_GENTREE
- GenTreeArgList() : GenTreeOp() {}
+ GenTreeArgList() : GenTreeOp()
+ {
+ }
#endif
- GenTreeArgList(GenTreePtr arg) :
- GenTreeArgList(arg, nullptr) {}
-
- GenTreeArgList(GenTreePtr arg, GenTreeArgList* rest) :
- GenTreeOp(GT_LIST, TYP_VOID, arg, rest)
+ GenTreeArgList(GenTreePtr arg) : GenTreeArgList(arg, nullptr)
+ {
+ }
+
+ GenTreeArgList(GenTreePtr arg, GenTreeArgList* rest) : GenTreeOp(GT_LIST, TYP_VOID, arg, rest)
{
// With structs passed in multiple args we could have an arg
// GT_LIST containing a list of LCL_FLDs, see IsListForMultiRegArg()
//
assert((arg != nullptr) && ((!arg->IsList()) || (arg->IsListForMultiRegArg())));
- gtFlags |= arg->gtFlags & GTF_ALL_EFFECT;
- if (rest != NULL)
+ gtFlags |= arg->gtFlags & GTF_ALL_EFFECT;
+ if (rest != nullptr)
{
gtFlags |= rest->gtFlags & GTF_ALL_EFFECT;
}
@@ -2466,27 +2497,35 @@ struct GenTreeArgList: public GenTreeOp
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
-struct GenTreeColon: public GenTreeOp
+struct GenTreeColon : public GenTreeOp
{
- GenTreePtr& ThenNode() { return gtOp2; }
- GenTreePtr& ElseNode() { return gtOp1; }
+ GenTreePtr& ThenNode()
+ {
+ return gtOp2;
+ }
+ GenTreePtr& ElseNode()
+ {
+ return gtOp1;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeColon() : GenTreeOp() {}
+ GenTreeColon() : GenTreeOp()
+ {
+ }
#endif
- GenTreeColon(var_types typ, GenTreePtr thenNode, GenTreePtr elseNode) :
- GenTreeOp(GT_COLON, typ, elseNode, thenNode)
- {}
+ GenTreeColon(var_types typ, GenTreePtr thenNode, GenTreePtr elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
+ {
+ }
};
// gtCall -- method call (GT_CALL)
-typedef class fgArgInfo * fgArgInfoPtr;
+typedef class fgArgInfo* fgArgInfoPtr;
enum class InlineObservation;
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
-// registers. For such calls this struct provides the following info
+// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
@@ -2518,7 +2557,7 @@ public:
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
- // Only needed for X86
+ // Only needed for X86
void InitializeLongReturnType(Compiler* comp);
// Reset type descriptor to defaults
@@ -2559,20 +2598,20 @@ public:
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
- for (unsigned i = regCount+1; i < MAX_RET_REG_COUNT; ++i)
+ for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
-#endif
+#endif
return regCount;
}
//-----------------------------------------------------------------------
- // IsMultiRegRetType: check whether the type is returned in multiple
+ // IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
- // Arguments:
+ // Arguments:
// None
//
// Return Value:
@@ -2588,14 +2627,13 @@ public:
}
else
{
- return ((m_regType[0] != TYP_UNKNOWN) &&
- (m_regType[1] != TYP_UNKNOWN));
+ return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
- //
+ //
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
@@ -2603,7 +2641,7 @@ public:
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
-
+
var_types GetReturnRegType(unsigned index)
{
var_types result = m_regType[index];
@@ -2612,51 +2650,51 @@ public:
return result;
}
- // Get ith ABI return register
+ // Get ith ABI return register
regNumber GetABIReturnReg(unsigned idx);
- // Get reg mask of ABI return registers
+ // Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs();
};
struct GenTreeCall final : public GenTree
{
- GenTreePtr gtCallObjp; // The instance argument ('this' pointer)
- GenTreeArgList* gtCallArgs; // The list of arguments in original evaluation order
- GenTreeArgList* gtCallLateArgs; // On x86: The register arguments in an optimal order
- // On ARM/x64: - also includes any outgoing arg space arguments
- // - that were evaluated into a temp LclVar
- fgArgInfoPtr fgArgInfo;
+ GenTreePtr gtCallObjp; // The instance argument ('this' pointer)
+ GenTreeArgList* gtCallArgs; // The list of arguments in original evaluation order
+ GenTreeArgList* gtCallLateArgs; // On x86: The register arguments in an optimal order
+ // On ARM/x64: - also includes any outgoing arg space arguments
+ // - that were evaluated into a temp LclVar
+ fgArgInfoPtr fgArgInfo;
#if !FEATURE_FIXED_OUT_ARGS
- int regArgListCount;
- regList regArgList;
+ int regArgListCount;
+ regList regArgList;
#endif
-
+
// TODO-Throughput: Revisit this (this used to be only defined if
- // FEATURE_FIXED_OUT_ARGS was enabled, so this makes GenTreeCall 4 bytes bigger on x86).
- CORINFO_SIG_INFO* callSig; // Used by tail calls and to register callsites with the EE
+ // FEATURE_FIXED_OUT_ARGS was enabled, so this makes GenTreeCall 4 bytes bigger on x86).
+ CORINFO_SIG_INFO* callSig; // Used by tail calls and to register callsites with the EE
#ifdef LEGACY_BACKEND
- regMaskTP gtCallRegUsedMask; // mask of registers used to pass parameters
-#endif // LEGACY_BACKEND
+ regMaskTP gtCallRegUsedMask; // mask of registers used to pass parameters
+#endif // LEGACY_BACKEND
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
// For now it is enabled only for x64 unix.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
- ReturnTypeDesc gtReturnTypeDesc;
+ ReturnTypeDesc gtReturnTypeDesc;
// gtRegNum would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
- regNumber gtOtherRegs[MAX_RET_REG_COUNT - 1];
+ regNumber gtOtherRegs[MAX_RET_REG_COUNT - 1];
// GTF_SPILL or GTF_SPILLED flag on a multi-reg call node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored in the below array.
- unsigned gtSpillFlags[MAX_RET_REG_COUNT];
-#endif
+ unsigned gtSpillFlags[MAX_RET_REG_COUNT];
+#endif
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
@@ -2668,11 +2706,11 @@ struct GenTreeCall final : public GenTree
// Type descriptor of the value returned by call
//
// Note:
- // Right now implemented only for x64 unix and yet to be
+ // Right now implemented only for x64 unix and yet to be
// implemented for other multi-reg target arch (Arm64/Arm32/x86).
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
- ReturnTypeDesc* GetReturnTypeDesc()
+ ReturnTypeDesc* GetReturnTypeDesc()
{
#if FEATURE_MULTIREG_RET
return &gtReturnTypeDesc;
@@ -2691,7 +2729,7 @@ struct GenTreeCall final : public GenTree
// Return regNumber of ith return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
- regNumber GetRegNumByIdx(unsigned idx) const
+ regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
@@ -2701,7 +2739,7 @@ struct GenTreeCall final : public GenTree
}
#if FEATURE_MULTIREG_RET
- return gtOtherRegs[idx-1];
+ return gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
@@ -2717,7 +2755,7 @@ struct GenTreeCall final : public GenTree
// Return Value:
// None
//
- void SetRegNumByIdx(regNumber reg, unsigned idx)
+ void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
@@ -2745,7 +2783,7 @@ struct GenTreeCall final : public GenTree
// Return Value:
// None
//
- void ClearOtherRegs()
+ void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
@@ -2775,10 +2813,10 @@ struct GenTreeCall final : public GenTree
}
// Get reg mask of all the valid registers of gtOtherRegs array
- regMaskTP GetOtherRegMask() const;
+ regMaskTP GetOtherRegMask() const;
//----------------------------------------------------------------------
- // GetRegSpillFlagByIdx: get spill flag associated with the return register
+ // GetRegSpillFlagByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
@@ -2799,7 +2837,7 @@ struct GenTreeCall final : public GenTree
}
//----------------------------------------------------------------------
- // SetRegSpillFlagByIdx: set spill flags for the return register
+ // SetRegSpillFlagByIdx: set spill flags for the return register
// specified by its index.
//
// Arguments:
@@ -2857,44 +2895,71 @@ struct GenTreeCall final : public GenTree
#endif
}
-#define GTF_CALL_M_EXPLICIT_TAILCALL 0x0001 // GT_CALL -- the call is "tail" prefixed and importer has performed tail call checks
-#define GTF_CALL_M_TAILCALL 0x0002 // GT_CALL -- the call is a tailcall
-#define GTF_CALL_M_VARARGS 0x0004 // GT_CALL -- the call uses varargs ABI
-#define GTF_CALL_M_RETBUFFARG 0x0008 // GT_CALL -- first parameter is the return buffer argument
-#define GTF_CALL_M_DELEGATE_INV 0x0010 // GT_CALL -- call to Delegate.Invoke
-#define GTF_CALL_M_NOGCCHECK 0x0020 // GT_CALL -- not a call for computing full interruptability
-#define GTF_CALL_M_SPECIAL_INTRINSIC 0x0040 // GT_CALL -- function that could be optimized as an intrinsic
- // in special cases. Used to optimize fast way out in morphing
-#define GTF_CALL_M_UNMGD_THISCALL 0x0080 // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
-#define GTF_CALL_M_VIRTSTUB_REL_INDIRECT 0x0080 // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
-#define GTF_CALL_M_NONVIRT_SAME_THIS 0x0080 // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
-#define GTF_CALL_M_FRAME_VAR_DEATH 0x0100 // GT_CALL -- the compLvFrameListRoot variable dies here (last use)
+#define GTF_CALL_M_EXPLICIT_TAILCALL \
+ 0x0001 // GT_CALL -- the call is "tail" prefixed and importer has performed tail call checks
+#define GTF_CALL_M_TAILCALL 0x0002 // GT_CALL -- the call is a tailcall
+#define GTF_CALL_M_VARARGS 0x0004 // GT_CALL -- the call uses varargs ABI
+#define GTF_CALL_M_RETBUFFARG 0x0008 // GT_CALL -- first parameter is the return buffer argument
+#define GTF_CALL_M_DELEGATE_INV 0x0010 // GT_CALL -- call to Delegate.Invoke
+#define GTF_CALL_M_NOGCCHECK 0x0020 // GT_CALL -- not a call for computing full interruptability
+#define GTF_CALL_M_SPECIAL_INTRINSIC 0x0040 // GT_CALL -- function that could be optimized as an intrinsic
+ // in special cases. Used to optimize fast way out in morphing
+#define GTF_CALL_M_UNMGD_THISCALL \
+ 0x0080 // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
+#define GTF_CALL_M_VIRTSTUB_REL_INDIRECT \
+ 0x0080 // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
+#define GTF_CALL_M_NONVIRT_SAME_THIS \
+ 0x0080 // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
+#define GTF_CALL_M_FRAME_VAR_DEATH 0x0100 // GT_CALL -- the compLvFrameListRoot variable dies here (last use)
#ifndef LEGACY_BACKEND
-#define GTF_CALL_M_TAILCALL_VIA_HELPER 0x0200 // GT_CALL -- call is a tail call dispatched via tail call JIT helper.
-#endif // !LEGACY_BACKEND
+#define GTF_CALL_M_TAILCALL_VIA_HELPER 0x0200 // GT_CALL -- call is a tail call dispatched via tail call JIT helper.
+#endif // !LEGACY_BACKEND
#if FEATURE_TAILCALL_OPT
-#define GTF_CALL_M_IMPLICIT_TAILCALL 0x0400 // GT_CALL -- call is an opportunistic tail call and importer has performed tail call checks
-#define GTF_CALL_M_TAILCALL_TO_LOOP 0x0800 // GT_CALL -- call is a fast recursive tail call that can be converted into a loop
+#define GTF_CALL_M_IMPLICIT_TAILCALL \
+ 0x0400 // GT_CALL -- call is an opportunistic tail call and importer has performed tail call checks
+#define GTF_CALL_M_TAILCALL_TO_LOOP \
+ 0x0800 // GT_CALL -- call is a fast recursive tail call that can be converted into a loop
#endif
-#define GTF_CALL_M_PINVOKE 0x1000 // GT_CALL -- call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
- // A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
- // an IL Stub dynamically generated for a PInvoke declaration is flagged as
- // a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
- // know when these flags are set.
+#define GTF_CALL_M_PINVOKE 0x1000 // GT_CALL -- call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
+ // A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
+ // an IL Stub dynamically generated for a PInvoke declaration is flagged as
+ // a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
+ // know when these flags are set.
-#define GTF_CALL_M_R2R_REL_INDIRECT 0x2000 // GT_CALL -- ready to run call is indirected through a relative address
-#define GTF_CALL_M_DOES_NOT_RETURN 0x4000 // GT_CALL -- call does not return
+#define GTF_CALL_M_R2R_REL_INDIRECT 0x2000 // GT_CALL -- ready to run call is indirected through a relative address
+#define GTF_CALL_M_DOES_NOT_RETURN 0x4000 // GT_CALL -- call does not return
- bool IsUnmanaged() const { return (gtFlags & GTF_CALL_UNMANAGED) != 0; }
- bool NeedsNullCheck() const { return (gtFlags & GTF_CALL_NULLCHECK) != 0; }
- bool CallerPop() const { return (gtFlags & GTF_CALL_POP_ARGS) != 0; }
- bool IsVirtual() const { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT; }
- bool IsVirtualStub() const { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB; }
- bool IsVirtualVtable() const { return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE; }
- bool IsInlineCandidate() const { return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0; }
+ bool IsUnmanaged() const
+ {
+ return (gtFlags & GTF_CALL_UNMANAGED) != 0;
+ }
+ bool NeedsNullCheck() const
+ {
+ return (gtFlags & GTF_CALL_NULLCHECK) != 0;
+ }
+ bool CallerPop() const
+ {
+ return (gtFlags & GTF_CALL_POP_ARGS) != 0;
+ }
+ bool IsVirtual() const
+ {
+ return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
+ }
+ bool IsVirtualStub() const
+ {
+ return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
+ }
+ bool IsVirtualVtable() const
+ {
+ return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
+ }
+ bool IsInlineCandidate() const
+ {
+ return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
+ }
#ifndef LEGACY_BACKEND
bool HasNonStandardAddedArgs(Compiler* compiler) const;
@@ -2904,7 +2969,7 @@ struct GenTreeCall final : public GenTree
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
- return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
+ return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
@@ -2915,13 +2980,13 @@ struct GenTreeCall final : public GenTree
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
- // This method may actually have a retBuf argument
- // or it could be a JIT helper that we are still transforming during
+ // This method may actually have a retBuf argument
+ // or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
- // will make HasRetBufArg() return true, but will also force the
+ // will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
@@ -2939,7 +3004,7 @@ struct GenTreeCall final : public GenTree
// This is implemented only for x64 Unix and yet to be implemented for
// other multi-reg return target arch (arm64/arm32/x86).
//
- bool HasMultiRegRetVal() const
+ bool HasMultiRegRetVal() const
{
#if defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
// LEGACY_BACKEND does not use multi reg returns for calls with long return types
@@ -2952,49 +3017,97 @@ struct GenTreeCall final : public GenTree
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
- bool IsPInvoke() const { return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0; }
+ bool IsPInvoke() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
+ }
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
- bool IsTailPrefixedCall() const { return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0; }
+ bool IsTailPrefixedCall() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
+ }
- // This method returning "true" implies that tail call flowgraph morhphing has
+ // This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
- bool IsTailCall() const { return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0; }
+ bool IsTailCall() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
+ }
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
- bool CanTailCall() const { return IsTailPrefixedCall() || IsImplicitTailCall(); }
+ bool CanTailCall() const
+ {
+ return IsTailPrefixedCall() || IsImplicitTailCall();
+ }
#ifndef LEGACY_BACKEND
- bool IsTailCallViaHelper() const { return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER); }
-#else // LEGACY_BACKEND
- bool IsTailCallViaHelper() const { return true; }
+ bool IsTailCallViaHelper() const
+ {
+ return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER);
+ }
+#else // LEGACY_BACKEND
+ bool IsTailCallViaHelper() const
+ {
+ return true;
+ }
#endif // LEGACY_BACKEND
-#if FEATURE_FASTTAILCALL
- bool IsFastTailCall() const { return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER); }
-#else // !FEATURE_FASTTAILCALL
- bool IsFastTailCall() const { return false; }
+#if FEATURE_FASTTAILCALL
+ bool IsFastTailCall() const
+ {
+ return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER);
+ }
+#else // !FEATURE_FASTTAILCALL
+ bool IsFastTailCall() const
+ {
+ return false;
+ }
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
- bool IsImplicitTailCall() const { return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0; }
- bool IsTailCallConvertibleToLoop() const { return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0; }
-#else // !FEATURE_TAILCALL_OPT
- bool IsImplicitTailCall() const { return false; }
- bool IsTailCallConvertibleToLoop() const { return false; }
+ bool IsImplicitTailCall() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
+ }
+ bool IsTailCallConvertibleToLoop() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
+ }
+#else // !FEATURE_TAILCALL_OPT
+ bool IsImplicitTailCall() const
+ {
+ return false;
+ }
+ bool IsTailCallConvertibleToLoop() const
+ {
+ return false;
+ }
#endif // !FEATURE_TAILCALL_OPT
- bool IsSameThis() const { return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0; }
- bool IsDelegateInvoke() const { return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0; }
- bool IsVirtualStubRelativeIndir() const { return (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0; }
+ bool IsSameThis() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
+ }
+ bool IsDelegateInvoke() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
+ }
+ bool IsVirtualStubRelativeIndir() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
+ }
#ifdef FEATURE_READYTORUN_COMPILER
- bool IsR2RRelativeIndir() const { return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0; }
+ bool IsR2RRelativeIndir() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
+ }
void setEntryPoint(CORINFO_CONST_LOOKUP entryPoint)
{
gtEntryPoint = entryPoint;
@@ -3005,35 +3118,39 @@ struct GenTreeCall final : public GenTree
}
#endif // FEATURE_READYTORUN_COMPILER
- bool IsVarargs() const { return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0; }
+ bool IsVarargs() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
+ }
+
+ bool IsNoReturn() const
+ {
+ return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
+ }
- bool IsNoReturn() const { return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0; }
+ unsigned short gtCallMoreFlags; // in addition to gtFlags
- unsigned short gtCallMoreFlags; // in addition to gtFlags
-
- unsigned char gtCallType :3; // value from the gtCallTypes enumeration
- unsigned char gtReturnType :5; // exact return type
+ unsigned char gtCallType : 3; // value from the gtCallTypes enumeration
+ unsigned char gtReturnType : 5; // exact return type
- CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
+ CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
- union
- {
+ union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
- GenTreePtr gtCallCookie;
- // gtInlineCandidateInfo is only used when inlining methods
- InlineCandidateInfo* gtInlineCandidateInfo;
- void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
+ GenTreePtr gtCallCookie;
+ // gtInlineCandidateInfo is only used when inlining methods
+ InlineCandidateInfo* gtInlineCandidateInfo;
+ void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
- void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
+ void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
- GenTree * gtControlExpr;
+ GenTree* gtControlExpr;
- union
- {
- CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC
- GenTreePtr gtCallAddr; // CT_INDIRECT
+ union {
+ CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC
+ GenTreePtr gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN_COMPILER
@@ -3049,21 +3166,20 @@ struct GenTreeCall final : public GenTree
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
+
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
-
+
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
-
+
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
- GenTreeCall(var_types type) :
- GenTree(GT_CALL, type)
+ GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
}
#if DEBUGGABLE_GENTREE
@@ -3073,42 +3189,42 @@ struct GenTreeCall final : public GenTree
#endif
};
-struct GenTreeCmpXchg: public GenTree
+struct GenTreeCmpXchg : public GenTree
{
- GenTreePtr gtOpLocation;
- GenTreePtr gtOpValue;
- GenTreePtr gtOpComparand;
+ GenTreePtr gtOpLocation;
+ GenTreePtr gtOpValue;
+ GenTreePtr gtOpComparand;
- GenTreeCmpXchg(var_types type, GenTreePtr loc, GenTreePtr val, GenTreePtr comparand) :
- GenTree(GT_CMPXCHG, type),
- gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
- {
- // There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
- // have global effects.
- gtFlags |= GTF_GLOB_EFFECT;
- }
+ GenTreeCmpXchg(var_types type, GenTreePtr loc, GenTreePtr val, GenTreePtr comparand)
+ : GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
+ {
+ // There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
+ // have global effects.
+ gtFlags |= GTF_GLOB_EFFECT;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeCmpXchg() : GenTree() {}
+ GenTreeCmpXchg() : GenTree()
+ {
+ }
#endif
};
-
-
-struct GenTreeFptrVal: public GenTree
+struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
#ifdef FEATURE_READYTORUN_COMPILER
- CORINFO_CONST_LOOKUP gtEntryPoint;
+ CORINFO_CONST_LOOKUP gtEntryPoint;
CORINFO_RESOLVED_TOKEN* gtLdftnResolvedToken;
#endif
- GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth) :
- GenTree(GT_FTN_ADDR, type),
- gtFptrMethod(meth)
- {}
+ GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth) : GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeFptrVal() : GenTree() {}
+ GenTreeFptrVal() : GenTree()
+ {
+ }
#endif
};
@@ -3116,136 +3232,165 @@ struct GenTreeFptrVal: public GenTree
struct GenTreeQmark : public GenTreeOp
{
// Livesets on entry to then and else subtrees
- VARSET_TP gtThenLiveSet;
- VARSET_TP gtElseLiveSet;
-
+ VARSET_TP gtThenLiveSet;
+ VARSET_TP gtElseLiveSet;
+
// The "Compiler*" argument is not a DEBUGARG here because we use it to keep track of the set of
// (possible) QMark nodes.
GenTreeQmark(var_types type, GenTreePtr cond, GenTreePtr colonOp, class Compiler* comp);
#if DEBUGGABLE_GENTREE
- GenTreeQmark() : GenTreeOp(GT_QMARK, TYP_INT, NULL, NULL) {}
+ GenTreeQmark() : GenTreeOp(GT_QMARK, TYP_INT, nullptr, nullptr)
+ {
+ }
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
-struct GenTreeIntrinsic: public GenTreeOp
+struct GenTreeIntrinsic : public GenTreeOp
{
CorInfoIntrinsics gtIntrinsicId;
- CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
+ CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN_COMPILER
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
- GenTreeIntrinsic(var_types type, GenTreePtr op1, CorInfoIntrinsics intrinsicId, CORINFO_METHOD_HANDLE methodHandle) :
- GenTreeOp(GT_INTRINSIC, type, op1, NULL),
- gtIntrinsicId(intrinsicId),
- gtMethodHandle(methodHandle)
- {}
+ GenTreeIntrinsic(var_types type, GenTreePtr op1, CorInfoIntrinsics intrinsicId, CORINFO_METHOD_HANDLE methodHandle)
+ : GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicId(intrinsicId), gtMethodHandle(methodHandle)
+ {
+ }
- GenTreeIntrinsic(var_types type, GenTreePtr op1, GenTreePtr op2, CorInfoIntrinsics intrinsicId, CORINFO_METHOD_HANDLE methodHandle) :
- GenTreeOp(GT_INTRINSIC, type, op1, op2),
- gtIntrinsicId(intrinsicId),
- gtMethodHandle(methodHandle)
- {}
+ GenTreeIntrinsic(var_types type,
+ GenTreePtr op1,
+ GenTreePtr op2,
+ CorInfoIntrinsics intrinsicId,
+ CORINFO_METHOD_HANDLE methodHandle)
+ : GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicId(intrinsicId), gtMethodHandle(methodHandle)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeIntrinsic() : GenTreeOp() {}
+ GenTreeIntrinsic() : GenTreeOp()
+ {
+ }
#endif
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
-struct GenTreeSIMD: public GenTreeOp
+struct GenTreeSIMD : public GenTreeOp
{
- SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
- var_types gtSIMDBaseType; // SIMD vector base type
- unsigned gtSIMDSize; // SIMD vector size in bytes
-
- GenTreeSIMD(var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size) :
- GenTreeOp(GT_SIMD, type, op1, nullptr),
- gtSIMDIntrinsicID(simdIntrinsicID),
- gtSIMDBaseType(baseType),
- gtSIMDSize(size)
- {}
-
- GenTreeSIMD(var_types type, GenTreePtr op1, GenTreePtr op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size) :
- GenTreeOp(GT_SIMD, type, op1, op2),
- gtSIMDIntrinsicID(simdIntrinsicID),
- gtSIMDBaseType(baseType),
- gtSIMDSize(size)
- {}
+ SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
+ var_types gtSIMDBaseType; // SIMD vector base type
+ unsigned gtSIMDSize; // SIMD vector size in bytes
+
+ GenTreeSIMD(var_types type, GenTreePtr op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
+ : GenTreeOp(GT_SIMD, type, op1, nullptr)
+ , gtSIMDIntrinsicID(simdIntrinsicID)
+ , gtSIMDBaseType(baseType)
+ , gtSIMDSize(size)
+ {
+ }
+
+ GenTreeSIMD(var_types type,
+ GenTreePtr op1,
+ GenTreePtr op2,
+ SIMDIntrinsicID simdIntrinsicID,
+ var_types baseType,
+ unsigned size)
+ : GenTreeOp(GT_SIMD, type, op1, op2)
+ , gtSIMDIntrinsicID(simdIntrinsicID)
+ , gtSIMDBaseType(baseType)
+ , gtSIMDSize(size)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeSIMD() : GenTreeOp() {}
+ GenTreeSIMD() : GenTreeOp()
+ {
+ }
#endif
};
#endif // FEATURE_SIMD
/* gtIndex -- array access */
-struct GenTreeIndex: public GenTreeOp
+struct GenTreeIndex : public GenTreeOp
{
- GenTreePtr& Arr() { return gtOp1; }
- GenTreePtr& Index() { return gtOp2; }
+ GenTreePtr& Arr()
+ {
+ return gtOp1;
+ }
+ GenTreePtr& Index()
+ {
+ return gtOp2;
+ }
- unsigned gtIndElemSize; // size of elements in the array
- CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
+ unsigned gtIndElemSize; // size of elements in the array
+ CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
- GenTreeIndex(var_types type, GenTreePtr arr, GenTreePtr ind, unsigned indElemSize) :
- GenTreeOp(GT_INDEX, type, arr, ind),
- gtIndElemSize(indElemSize),
- gtStructElemClass(nullptr) // We always initialize this after construction.
- {
+ GenTreeIndex(var_types type, GenTreePtr arr, GenTreePtr ind, unsigned indElemSize)
+ : GenTreeOp(GT_INDEX, type, arr, ind)
+ , gtIndElemSize(indElemSize)
+ , gtStructElemClass(nullptr) // We always initialize this after construction.
+ {
#ifdef DEBUG
- if (JitConfig.JitSkipArrayBoundCheck() == 1)
- {
- // Skip bounds check
- }
- else
+ if (JitConfig.JitSkipArrayBoundCheck() == 1)
+ {
+ // Skip bounds check
+ }
+ else
#endif
- {
- // Do bounds check
- gtFlags |= GTF_INX_RNGCHK;
- }
-
- if (type == TYP_REF)
- {
- gtFlags |= GTF_INX_REFARR_LAYOUT;
- }
+ {
+ // Do bounds check
+ gtFlags |= GTF_INX_RNGCHK;
+ }
- gtFlags |= GTF_EXCEPT|GTF_GLOB_REF;
+ if (type == TYP_REF)
+ {
+ gtFlags |= GTF_INX_REFARR_LAYOUT;
}
+
+ gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeIndex() : GenTreeOp() {}
+ GenTreeIndex() : GenTreeOp()
+ {
+ }
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
-struct GenTreeArrLen: public GenTreeUnOp
+struct GenTreeArrLen : public GenTreeUnOp
{
- GenTreePtr& ArrRef() { return gtOp1; } // the array address node
+ GenTreePtr& ArrRef()
+ {
+ return gtOp1;
+ } // the array address node
private:
- int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
+ int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
- inline int ArrLenOffset() {
+ inline int ArrLenOffset()
+ {
return gtArrLenOffset;
}
- GenTreeArrLen(var_types type, GenTreePtr arrRef, int lenOffset) :
- GenTreeUnOp(GT_ARR_LENGTH, type, arrRef),
- gtArrLenOffset(lenOffset)
- {}
+ GenTreeArrLen(var_types type, GenTreePtr arrRef, int lenOffset)
+ : GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeArrLen() : GenTreeUnOp() {}
+ GenTreeArrLen() : GenTreeUnOp()
+ {
+ }
#endif
};
@@ -3256,32 +3401,35 @@ public:
// - the "kind" of the throw block to branch to on failure
// It generates no result.
-struct GenTreeBoundsChk: public GenTree
+struct GenTreeBoundsChk : public GenTree
{
- GenTreePtr gtArrLen; // An expression for the length of the array being indexed.
- GenTreePtr gtIndex; // The index expression.
+ GenTreePtr gtArrLen; // An expression for the length of the array being indexed.
+ GenTreePtr gtIndex; // The index expression.
- GenTreePtr gtIndRngFailBB; // Label to jump to for array-index-out-of-range
- SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
+ GenTreePtr gtIndRngFailBB; // Label to jump to for array-index-out-of-range
+ SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
/* Only out-of-ranges at same stack depth can jump to the same label (finding return address is easier)
For delayed calling of fgSetRngChkTarget() so that the
optimizer has a chance of eliminating some of the rng checks */
- unsigned gtStkDepth;
-
- GenTreeBoundsChk(genTreeOps oper, var_types type, GenTreePtr arrLen, GenTreePtr index, SpecialCodeKind kind) :
- GenTree(oper, type),
- gtArrLen(arrLen), gtIndex(index),
- gtIndRngFailBB(NULL),
- gtThrowKind(kind),
- gtStkDepth(0)
- {
- // Effects flags propagate upwards.
- gtFlags |= (arrLen->gtFlags & GTF_ALL_EFFECT);
- gtFlags |= GTF_EXCEPT;
- }
+ unsigned gtStkDepth;
+
+ GenTreeBoundsChk(genTreeOps oper, var_types type, GenTreePtr arrLen, GenTreePtr index, SpecialCodeKind kind)
+ : GenTree(oper, type)
+ , gtArrLen(arrLen)
+ , gtIndex(index)
+ , gtIndRngFailBB(nullptr)
+ , gtThrowKind(kind)
+ , gtStkDepth(0)
+ {
+ // Effects flags propagate upwards.
+ gtFlags |= (arrLen->gtFlags & GTF_ALL_EFFECT);
+ gtFlags |= GTF_EXCEPT;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeBoundsChk() : GenTree() {}
+ GenTreeBoundsChk() : GenTree()
+ {
+ }
#endif
// If the gtArrLen is really an array length, returns array reference, else "NULL".
@@ -3293,7 +3441,7 @@ struct GenTreeBoundsChk: public GenTree
}
else
{
- return NULL;
+ return nullptr;
}
}
};
@@ -3301,36 +3449,45 @@ struct GenTreeBoundsChk: public GenTree
// gtArrElem -- general array element (GT_ARR_ELEM), for non "SZ_ARRAYS"
// -- multidimensional arrays, or 1-d arrays with non-zero lower bounds.
-struct GenTreeArrElem: public GenTree
+struct GenTreeArrElem : public GenTree
{
- GenTreePtr gtArrObj;
+ GenTreePtr gtArrObj;
- #define GT_ARR_MAX_RANK 3
- GenTreePtr gtArrInds[GT_ARR_MAX_RANK]; // Indices
- unsigned char gtArrRank; // Rank of the array
+#define GT_ARR_MAX_RANK 3
+ GenTreePtr gtArrInds[GT_ARR_MAX_RANK]; // Indices
+ unsigned char gtArrRank; // Rank of the array
- unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
- // on the optimization path of array intrisics.
- // It stores the size of array elements WHEN it can fit
- // into an "unsigned char".
- // This has caused VSW 571394.
- var_types gtArrElemType; // The array element type
+ unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
+ // on the optimization path of array intrisics.
+ // It stores the size of array elements WHEN it can fit
+ // into an "unsigned char".
+ // This has caused VSW 571394.
+ var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" GenTreePtrs for the indices.
- GenTreeArrElem(var_types type, GenTreePtr arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTreePtr* inds) :
- GenTree(GT_ARR_ELEM, type),
- gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
+ GenTreeArrElem(var_types type,
+ GenTreePtr arr,
+ unsigned char rank,
+ unsigned char elemSize,
+ var_types elemType,
+ GenTreePtr* inds)
+ : GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
+ {
+ for (unsigned char i = 0; i < rank; i++)
{
- for (unsigned char i = 0; i < rank; i++) gtArrInds[i] = inds[i];
- gtFlags |= GTF_EXCEPT;
+ gtArrInds[i] = inds[i];
}
+ gtFlags |= GTF_EXCEPT;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeArrElem() : GenTree() {}
+ GenTreeArrElem() : GenTree()
+ {
+ }
#endif
};
//--------------------------------------------
-//
+//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
@@ -3356,20 +3513,32 @@ struct GenTreeArrElem: public GenTree
// +--* <index0>
// +--* - (GT_SUB)
//
-struct GenTreeArrIndex: public GenTreeOp
+struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
- GenTreePtr& ArrObj() { return gtOp1; }
+ GenTreePtr& ArrObj()
+ {
+ return gtOp1;
+ }
// The index expression - may be any integral expression.
- GenTreePtr& IndexExpr() { return gtOp2; }
- unsigned char gtCurrDim; // The current dimension
- unsigned char gtArrRank; // Rank of the array
- var_types gtArrElemType; // The array element type
+ GenTreePtr& IndexExpr()
+ {
+ return gtOp2;
+ }
+ unsigned char gtCurrDim; // The current dimension
+ unsigned char gtArrRank; // Rank of the array
+ var_types gtArrElemType; // The array element type
- GenTreeArrIndex(var_types type, GenTreePtr arrObj, GenTreePtr indexExpr,
- unsigned char currDim, unsigned char arrRank, var_types elemType) :
- GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr),
- gtCurrDim(currDim), gtArrRank(arrRank), gtArrElemType(elemType)
+ GenTreeArrIndex(var_types type,
+ GenTreePtr arrObj,
+ GenTreePtr indexExpr,
+ unsigned char currDim,
+ unsigned char arrRank,
+ var_types elemType)
+ : GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
+ , gtCurrDim(currDim)
+ , gtArrRank(arrRank)
+ , gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
@@ -3377,29 +3546,38 @@ struct GenTreeArrIndex: public GenTreeOp
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
- GenTreeArrIndex() : GenTreeOp() {}
+ GenTreeArrIndex() : GenTreeOp()
+ {
+ }
#endif
};
-// Represents either an InitBlk, InitObj, CpBlk or CpObj
+// Represents either an InitBlk, InitObj, CpBlk or CpObj
// MSIL OpCode.
struct GenTreeBlkOp : public GenTreeOp
{
public:
// The destination for the CpBlk/CpObj/InitBlk/InitObj to copy bits to
- GenTreePtr Dest() {
- assert(gtOp1->gtOper == GT_LIST);
- return gtOp1->gtOp.gtOp1;
- }
+ GenTreePtr Dest()
+ {
+ assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp1;
+ }
// Return true iff the object being copied contains one or more GC pointers.
- bool HasGCPtr();
+ bool HasGCPtr();
// True if this BlkOpNode is a volatile memory operation.
- bool IsVolatile() const { return (gtFlags & GTF_BLK_VOLATILE) != 0; }
+ bool IsVolatile() const
+ {
+ return (gtFlags & GTF_BLK_VOLATILE) != 0;
+ }
// True if this BlkOpNode is a volatile memory operation.
- bool IsUnaligned() const { return (gtFlags & GTF_BLK_UNALIGNED) != 0; }
+ bool IsUnaligned() const
+ {
+ return (gtFlags & GTF_BLK_UNALIGNED) != 0;
+ }
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
@@ -3411,12 +3589,10 @@ public:
BlkOpKindUnroll,
} gtBlkOpKind;
- bool gtBlkOpGcUnsafe;
+ bool gtBlkOpGcUnsafe;
- GenTreeBlkOp(genTreeOps oper) :
- GenTreeOp(oper, TYP_VOID DEBUGARG(true)),
- gtBlkOpKind(BlkOpKindInvalid),
- gtBlkOpGcUnsafe(false)
+ GenTreeBlkOp(genTreeOps oper)
+ : GenTreeOp(oper, TYP_VOID DEBUGARG(true)), gtBlkOpKind(BlkOpKindInvalid), gtBlkOpGcUnsafe(false)
{
assert(OperIsBlkOp(oper));
}
@@ -3424,7 +3600,9 @@ public:
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
- GenTreeBlkOp() : GenTreeOp(){}
+ GenTreeBlkOp() : GenTreeOp()
+ {
+ }
#endif // DEBUGGABLE_GENTREE
};
@@ -3433,38 +3611,40 @@ struct GenTreeCpObj : public GenTreeBlkOp
{
public:
// The source for the CpBlk/CpObj to copy bits from
- GenTreePtr Source() {
+ GenTreePtr Source()
+ {
assert(gtOper == GT_COPYOBJ && gtOp1->gtOper == GT_LIST);
return gtOp1->gtOp.gtOp2;
}
// In the case of CopyObj, this is the class token that represents the type that is being copied.
- GenTreePtr ClsTok() { return gtOp2; }
+ GenTreePtr ClsTok()
+ {
+ return gtOp2;
+ }
// If non-null, this array represents the gc-layout of the class that is being copied
// with CpObj.
- BYTE* gtGcPtrs;
+ BYTE* gtGcPtrs;
- // If non-zero, this is the number of slots in the class layout that
+ // If non-zero, this is the number of slots in the class layout that
// contain gc-pointers.
- unsigned gtGcPtrCount;
+ unsigned gtGcPtrCount;
// If non-zero, the number of pointer-sized slots that constitutes the class token in CpObj.
- unsigned gtSlots;
+ unsigned gtSlots;
- GenTreeCpObj(unsigned gcPtrCount, unsigned gtSlots, BYTE* gtGcPtrs) :
- GenTreeBlkOp(GT_COPYOBJ),
- gtGcPtrs(gtGcPtrs),
- gtGcPtrCount(gcPtrCount),
- gtSlots(gtSlots){ }
+ GenTreeCpObj(unsigned gcPtrCount, unsigned gtSlots, BYTE* gtGcPtrs)
+ : GenTreeBlkOp(GT_COPYOBJ), gtGcPtrs(gtGcPtrs), gtGcPtrCount(gcPtrCount), gtSlots(gtSlots)
+ {
+ }
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
- GenTreeCpObj() : GenTreeBlkOp(),
- gtGcPtrs(nullptr),
- gtGcPtrCount(0),
- gtSlots(0) {}
+ GenTreeCpObj() : GenTreeBlkOp(), gtGcPtrs(nullptr), gtGcPtrCount(0), gtSlots(0)
+ {
+ }
#endif // DEBUGGABLE_GENTREE
};
@@ -3472,15 +3652,22 @@ protected:
struct GenTreeInitBlk : public GenTreeBlkOp
{
public:
-
// The value used to fill the destination buffer.
- GenTreePtr InitVal() { assert(gtOp1->gtOper == GT_LIST);
- return gtOp1->gtOp.gtOp2; }
+ GenTreePtr InitVal()
+ {
+ assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp2;
+ }
// The size of the buffer to be copied.
- GenTreePtr Size() { return gtOp2; }
+ GenTreePtr Size()
+ {
+ return gtOp2;
+ }
- GenTreeInitBlk() : GenTreeBlkOp(GT_INITBLK){}
+ GenTreeInitBlk() : GenTreeBlkOp(GT_INITBLK)
+ {
+ }
#if DEBUGGABLE_GENTREE
protected:
@@ -3492,16 +3679,23 @@ protected:
struct GenTreeCpBlk : public GenTreeBlkOp
{
public:
-
// The value used to fill the destination buffer.
// The source for the CpBlk/CpObj to copy bits from
- GenTreePtr Source() { assert(gtOp1->gtOper == GT_LIST);
- return gtOp1->gtOp.gtOp2; }
+ GenTreePtr Source()
+ {
+ assert(gtOp1->gtOper == GT_LIST);
+ return gtOp1->gtOp.gtOp2;
+ }
// The size of the buffer to be copied.
- GenTreePtr Size() { return gtOp2; }
+ GenTreePtr Size()
+ {
+ return gtOp2;
+ }
- GenTreeCpBlk() : GenTreeBlkOp(GT_COPYBLK){}
+ GenTreeCpBlk() : GenTreeBlkOp(GT_COPYBLK)
+ {
+ }
#if DEBUGGABLE_GENTREE
protected:
@@ -3510,7 +3704,7 @@ protected:
};
//--------------------------------------------
-//
+//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
@@ -3537,35 +3731,47 @@ protected:
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
-struct GenTreeArrOffs: public GenTree
+struct GenTreeArrOffs : public GenTree
{
- GenTreePtr gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
- // will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
- GenTreePtr gtIndex; // The effective index for the current dimension - must be non-negative
- // and can be any expression (though it is likely to be either a GenTreeArrIndex,
- // node, a lclVar, or a constant).
- GenTreePtr gtArrObj; // The array object - may be any expression producing an Array reference,
- // but is likely to be a lclVar.
- unsigned char gtCurrDim; // The current dimension
- unsigned char gtArrRank; // Rank of the array
- var_types gtArrElemType; // The array element type
-
- GenTreeArrOffs(var_types type, GenTreePtr offset, GenTreePtr index, GenTreePtr arrObj,
- unsigned char currDim, unsigned char rank, var_types elemType) :
- GenTree(GT_ARR_OFFSET, type), gtOffset(offset), gtIndex(index), gtArrObj(arrObj),
- gtCurrDim(currDim), gtArrRank(rank), gtArrElemType(elemType)
+ GenTreePtr gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
+ // will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
+ GenTreePtr gtIndex; // The effective index for the current dimension - must be non-negative
+ // and can be any expression (though it is likely to be either a GenTreeArrIndex,
+ // node, a lclVar, or a constant).
+ GenTreePtr gtArrObj; // The array object - may be any expression producing an Array reference,
+ // but is likely to be a lclVar.
+ unsigned char gtCurrDim; // The current dimension
+ unsigned char gtArrRank; // Rank of the array
+ var_types gtArrElemType; // The array element type
+
+ GenTreeArrOffs(var_types type,
+ GenTreePtr offset,
+ GenTreePtr index,
+ GenTreePtr arrObj,
+ unsigned char currDim,
+ unsigned char rank,
+ var_types elemType)
+ : GenTree(GT_ARR_OFFSET, type)
+ , gtOffset(offset)
+ , gtIndex(index)
+ , gtArrObj(arrObj)
+ , gtCurrDim(currDim)
+ , gtArrRank(rank)
+ , gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
- GenTreeArrOffs() : GenTree() {}
+ GenTreeArrOffs() : GenTree()
+ {
+ }
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
-struct GenTreeAddrMode: public GenTreeOp
+struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
@@ -3585,47 +3791,62 @@ struct GenTreeAddrMode: public GenTreeOp
// "Index*Scale + Offset".
// First operand is base address/pointer
- bool HasBase() const { return gtOp1 != nullptr; }
- GenTreePtr& Base() { return gtOp1; }
+ bool HasBase() const
+ {
+ return gtOp1 != nullptr;
+ }
+ GenTreePtr& Base()
+ {
+ return gtOp1;
+ }
// Second operand is scaled index value
- bool HasIndex() const { return gtOp2 != nullptr; }
- GenTreePtr& Index() { return gtOp2; }
+ bool HasIndex() const
+ {
+ return gtOp2 != nullptr;
+ }
+ GenTreePtr& Index()
+ {
+ return gtOp2;
+ }
- unsigned gtScale; // The scale factor
- unsigned gtOffset; // The offset to add
+ unsigned gtScale; // The scale factor
+ unsigned gtOffset; // The offset to add
- GenTreeAddrMode(var_types type, GenTreePtr base, GenTreePtr index,
- unsigned scale, unsigned offset) :
- GenTreeOp(GT_LEA, type, base, index )
+ GenTreeAddrMode(var_types type, GenTreePtr base, GenTreePtr index, unsigned scale, unsigned offset)
+ : GenTreeOp(GT_LEA, type, base, index)
{
- gtScale = scale;
+ gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
- GenTreeAddrMode() : GenTreeOp() {}
+ GenTreeAddrMode() : GenTreeOp()
+ {
+ }
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
-struct GenTreeIndir: public GenTreeOp
+struct GenTreeIndir : public GenTreeOp
{
// like an assign, op1 is the destination
- GenTreePtr& Addr() { return gtOp1; }
+ GenTreePtr& Addr()
+ {
+ return gtOp1;
+ }
- // these methods provide an interface to the indirection node which
- bool HasBase();
- bool HasIndex();
- GenTree* Base();
- GenTree* Index();
- unsigned Scale();
- size_t Offset();
+ // these methods provide an interface to the indirection node which
+ bool HasBase();
+ bool HasIndex();
+ GenTree* Base();
+ GenTree* Index();
+ unsigned Scale();
+ size_t Offset();
- GenTreeIndir(genTreeOps oper, var_types type, GenTree *addr, GenTree *data) :
- GenTreeOp(oper, type, addr, data)
+ GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
@@ -3633,37 +3854,41 @@ struct GenTreeIndir: public GenTreeOp
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
- GenTreeIndir() : GenTreeOp() {}
+ GenTreeIndir() : GenTreeOp()
+ {
+ }
#endif
};
// gtObj -- 'object' (GT_OBJ). */
-struct GenTreeObj: public GenTreeIndir
+struct GenTreeObj : public GenTreeIndir
{
- CORINFO_CLASS_HANDLE gtClass; // the class of the object
+ CORINFO_CLASS_HANDLE gtClass; // the class of the object
- GenTreeObj(var_types type, GenTreePtr addr, CORINFO_CLASS_HANDLE cls) :
- GenTreeIndir(GT_OBJ, type, addr, nullptr),
- gtClass(cls)
- {
- // By default, an OBJ is assumed to be a global reference.
- gtFlags |= GTF_GLOB_REF;
- }
+ GenTreeObj(var_types type, GenTreePtr addr, CORINFO_CLASS_HANDLE cls)
+ : GenTreeIndir(GT_OBJ, type, addr, nullptr), gtClass(cls)
+ {
+ // By default, an OBJ is assumed to be a global reference.
+ gtFlags |= GTF_GLOB_REF;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeObj() : GenTreeIndir() {}
+ GenTreeObj() : GenTreeIndir()
+ {
+ }
#endif
};
-// Read-modify-write status of a RMW memory op rooted at a storeInd
-enum RMWStatus {
- STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
- // Default status unless modified by IsRMWMemOpRootedAtStoreInd()
+// Read-modify-write status of a RMW memory op rooted at a storeInd
+enum RMWStatus
+{
+ STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
+ // Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
- STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
- STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
+ STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
+ STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
@@ -3673,33 +3898,45 @@ enum RMWStatus {
};
// StoreInd is just a BinOp, with additional RMW status
-struct GenTreeStoreInd: public GenTreeIndir
+struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
- RMWStatus gtRMWStatus;
+ RMWStatus gtRMWStatus;
- bool IsRMWStatusUnknown() { return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN; }
- bool IsNonRMWMemoryOp() {
- return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR ||
- gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
- gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE ||
- gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
+ bool IsRMWStatusUnknown()
+ {
+ return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
+ }
+ bool IsNonRMWMemoryOp()
+ {
+ return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
+ gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
+ }
+ bool IsRMWMemoryOp()
+ {
+ return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
+ }
+ bool IsRMWDstOp1()
+ {
+ return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
- bool IsRMWMemoryOp() { return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2; }
- bool IsRMWDstOp1() { return gtRMWStatus == STOREIND_RMW_DST_IS_OP1; }
- bool IsRMWDstOp2() { return gtRMWStatus == STOREIND_RMW_DST_IS_OP2; }
-#endif //!CPU_LOAD_STORE_ARCH
+ bool IsRMWDstOp2()
+ {
+ return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
+ }
+#endif //! CPU_LOAD_STORE_ARCH
- RMWStatus GetRMWStatus() {
+ RMWStatus GetRMWStatus()
+ {
#if !CPU_LOAD_STORE_ARCH
- return gtRMWStatus;
+ return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
- void SetRMWStatusDefault()
+ void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
@@ -3713,10 +3950,12 @@ struct GenTreeStoreInd: public GenTreeIndir
#endif
}
- GenTreePtr& Data() { return gtOp2; }
+ GenTreePtr& Data()
+ {
+ return gtOp2;
+ }
- GenTreeStoreInd(var_types type, GenTree *destPtr, GenTree *data) :
- GenTreeIndir(GT_STOREIND, type, destPtr, data)
+ GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
@@ -3725,58 +3964,61 @@ struct GenTreeStoreInd: public GenTreeIndir
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
- GenTreeStoreInd() : GenTreeIndir() { SetRMWStatusDefault(); }
+ GenTreeStoreInd() : GenTreeIndir()
+ {
+ SetRMWStatusDefault();
+ }
#endif
};
-
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
-struct GenTreeRetExpr: public GenTree
+struct GenTreeRetExpr : public GenTree
{
- GenTreePtr gtInlineCandidate;
+ GenTreePtr gtInlineCandidate;
CORINFO_CLASS_HANDLE gtRetClsHnd;
- GenTreeRetExpr(var_types type) :
- GenTree(GT_RET_EXPR, type)
- {}
+ GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeRetExpr() : GenTree() {}
+ GenTreeRetExpr() : GenTree()
+ {
+ }
#endif
};
-
/* gtStmt -- 'statement expr' (GT_STMT) */
class InlineContext;
-struct GenTreeStmt: public GenTree
+struct GenTreeStmt : public GenTree
{
- GenTreePtr gtStmtExpr; // root of the expression tree
- GenTreePtr gtStmtList; // first node (for forward walks)
- InlineContext* gtInlineContext; // The inline context for this statement.
-
+ GenTreePtr gtStmtExpr; // root of the expression tree
+ GenTreePtr gtStmtList; // first node (for forward walks)
+ InlineContext* gtInlineContext; // The inline context for this statement.
+
#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
- IL_OFFSETX gtStmtILoffsx; // instr offset (if available)
+ IL_OFFSETX gtStmtILoffsx; // instr offset (if available)
#endif
#ifdef DEBUG
- IL_OFFSET gtStmtLastILoffs;// instr offset at end of stmt
+ IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
- bool gtStmtIsTopLevel()
+ bool gtStmtIsTopLevel()
{
return (gtFlags & GTF_STMT_TOP_LEVEL) != 0;
}
- bool gtStmtIsEmbedded()
+ bool gtStmtIsEmbedded()
{
return !gtStmtIsTopLevel();
}
// Return the next statement, if it is embedded, otherwise nullptr
- GenTreeStmt* gtStmtNextIfEmbedded()
+ GenTreeStmt* gtStmtNextIfEmbedded()
{
GenTree* nextStmt = gtNext;
if (nextStmt != nullptr && nextStmt->gtStmt.gtStmtIsEmbedded())
@@ -3789,7 +4031,7 @@ struct GenTreeStmt: public GenTree
}
}
- GenTree* gtStmtNextTopLevelStmt()
+ GenTree* gtStmtNextTopLevelStmt()
{
GenTree* nextStmt = gtNext;
while (nextStmt != nullptr && nextStmt->gtStmt.gtStmtIsEmbedded())
@@ -3799,26 +4041,32 @@ struct GenTreeStmt: public GenTree
return nextStmt;
}
- __declspec(property(get=getNextStmt))
- GenTreeStmt* gtNextStmt;
+ __declspec(property(get = getNextStmt)) GenTreeStmt* gtNextStmt;
- __declspec(property(get=getPrevStmt))
- GenTreeStmt* gtPrevStmt;
+ __declspec(property(get = getPrevStmt)) GenTreeStmt* gtPrevStmt;
- GenTreeStmt* getNextStmt()
+ GenTreeStmt* getNextStmt()
{
if (gtNext == nullptr)
+ {
return nullptr;
+ }
else
+ {
return gtNext->AsStmt();
+ }
}
- GenTreeStmt* getPrevStmt()
+ GenTreeStmt* getPrevStmt()
{
if (gtPrev == nullptr)
+ {
return nullptr;
+ }
else
+ {
return gtPrev->AsStmt();
+ }
}
GenTreeStmt(GenTreePtr expr, IL_OFFSETX offset)
@@ -3845,115 +4093,112 @@ struct GenTreeStmt: public GenTree
}
#if DEBUGGABLE_GENTREE
- GenTreeStmt() : GenTree(GT_STMT, TYP_VOID) {}
+ GenTreeStmt() : GenTree(GT_STMT, TYP_VOID)
+ {
+ }
#endif
};
-
-
-
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
-
/* gtClsVar -- 'static data member' (GT_CLS_VAR) */
-struct GenTreeClsVar: public GenTree
+struct GenTreeClsVar : public GenTree
{
- CORINFO_FIELD_HANDLE gtClsVarHnd;
- FieldSeqNode* gtFieldSeq;
+ CORINFO_FIELD_HANDLE gtClsVarHnd;
+ FieldSeqNode* gtFieldSeq;
- GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq) :
- GenTree(GT_CLS_VAR, type),
- gtClsVarHnd(clsVarHnd),
- gtFieldSeq(fldSeq)
- {
- gtFlags |= GTF_GLOB_REF;
- }
+ GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
+ : GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
+ {
+ gtFlags |= GTF_GLOB_REF;
+ }
#if DEBUGGABLE_GENTREE
- GenTreeClsVar() : GenTree() {}
+ GenTreeClsVar() : GenTree()
+ {
+ }
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
-struct GenTreeArgPlace: public GenTree
+struct GenTreeArgPlace : public GenTree
{
- CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
+ CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
- GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) :
- GenTree(GT_ARGPLACE, type),
- gtArgPlaceClsHnd(clsHnd)
- {}
+ GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeArgPlace() : GenTree() {}
+ GenTreeArgPlace() : GenTree()
+ {
+ }
#endif
};
/* gtLabel -- code label target (GT_LABEL) */
-struct GenTreeLabel: public GenTree
+struct GenTreeLabel : public GenTree
{
BasicBlock* gtLabBB;
- GenTreeLabel(BasicBlock* bb) :
- GenTree(GT_LABEL, TYP_VOID),
- gtLabBB(bb)
- {}
+ GenTreeLabel(BasicBlock* bb) : GenTree(GT_LABEL, TYP_VOID), gtLabBB(bb)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeLabel() : GenTree() {}
+ GenTreeLabel() : GenTree()
+ {
+ }
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
-struct GenTreePhiArg: public GenTreeLclVarCommon
+struct GenTreePhiArg : public GenTreeLclVarCommon
{
- BasicBlock * gtPredBB;
+ BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned snum, BasicBlock* block)
- : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum)
- , gtPredBB(block)
- {
+ : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
+ {
SetSsaNum(snum);
}
#if DEBUGGABLE_GENTREE
- GenTreePhiArg() : GenTreeLclVarCommon() {}
+ GenTreePhiArg() : GenTreeLclVarCommon()
+ {
+ }
#endif
};
/* gtPutArgStk -- Argument passed on stack */
-struct GenTreePutArgStk: public GenTreeUnOp
+struct GenTreePutArgStk : public GenTreeUnOp
{
- unsigned gtSlotNum; // Slot number of the argument to be passed on stack
+ unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if FEATURE_FASTTAILCALL
- bool putInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
- // By default this is false and will be placed in out-going arg area.
- // Fast tail calls set this to true.
- // In future if we need to add more such bool fields consider bit fields.
-
- GenTreePutArgStk(
- genTreeOps oper,
- var_types type,
- unsigned slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct),
- bool _putInIncomingArgArea = false
- DEBUGARG(GenTreePtr callNode = NULL)
- DEBUGARG(bool largeNode = false))
- :
- GenTreeUnOp(oper, type DEBUGARG(largeNode)),
- gtSlotNum(slotNum),
- putInIncomingArgArea(_putInIncomingArgArea)
+ bool putInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
+ // By default this is false and will be placed in out-going arg area.
+ // Fast tail calls set this to true.
+ // In future if we need to add more such bool fields consider bit fields.
+
+ GenTreePutArgStk(genTreeOps oper,
+ var_types type,
+ unsigned slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct),
+ bool _putInIncomingArgArea = false DEBUGARG(GenTreePtr callNode = nullptr)
+ DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type DEBUGARG(largeNode))
+ , gtSlotNum(slotNum)
+ , putInIncomingArgArea(_putInIncomingArgArea)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- , gtPutArgStkKind(PutArgStkKindInvalid),
- gtNumSlots(numSlots),
- gtIsStruct(isStruct),
- gtNumberReferenceSlots(0),
- gtGcPtrs(nullptr)
+ , gtPutArgStkKind(PutArgStkKindInvalid)
+ , gtNumSlots(numSlots)
+ , gtIsStruct(isStruct)
+ , gtNumberReferenceSlots(0)
+ , gtGcPtrs(nullptr)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
#ifdef DEBUG
@@ -3961,27 +4206,22 @@ struct GenTreePutArgStk: public GenTreeUnOp
#endif
}
-
- GenTreePutArgStk(
- genTreeOps oper,
- var_types type,
- GenTreePtr op1,
- unsigned slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct),
- bool _putInIncomingArgArea = false
- DEBUGARG(GenTreePtr callNode = NULL)
- DEBUGARG(bool largeNode = false))
- :
- GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)),
- gtSlotNum(slotNum),
- putInIncomingArgArea(_putInIncomingArgArea)
+ GenTreePutArgStk(genTreeOps oper,
+ var_types type,
+ GenTreePtr op1,
+ unsigned slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct),
+ bool _putInIncomingArgArea = false DEBUGARG(GenTreePtr callNode = nullptr)
+ DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode))
+ , gtSlotNum(slotNum)
+ , putInIncomingArgArea(_putInIncomingArgArea)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- , gtPutArgStkKind(PutArgStkKindInvalid),
- gtNumSlots(numSlots),
- gtIsStruct(isStruct),
- gtNumberReferenceSlots(0),
- gtGcPtrs(nullptr)
+ , gtPutArgStkKind(PutArgStkKindInvalid)
+ , gtNumSlots(numSlots)
+ , gtIsStruct(isStruct)
+ , gtNumberReferenceSlots(0)
+ , gtGcPtrs(nullptr)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
#ifdef DEBUG
@@ -3989,25 +4229,21 @@ struct GenTreePutArgStk: public GenTreeUnOp
#endif
}
-#else // !FEATURE_FASTTAILCALL
+#else // !FEATURE_FASTTAILCALL
- GenTreePutArgStk(
- genTreeOps oper,
- var_types type,
- unsigned slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct)
- DEBUGARG(GenTreePtr callNode = NULL)
- DEBUGARG(bool largeNode = false))
- :
- GenTreeUnOp(oper, type DEBUGARG(largeNode)),
- gtSlotNum(slotNum)
+ GenTreePutArgStk(genTreeOps oper,
+ var_types type,
+ unsigned slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct) DEBUGARG(GenTreePtr callNode = NULL)
+ DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type DEBUGARG(largeNode))
+ , gtSlotNum(slotNum)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- , gtPutArgStkKind(PutArgStkKindInvalid),
- gtNumSlots(numSlots),
- gtIsStruct(isStruct),
- gtNumberReferenceSlots(0),
- gtGcPtrs(nullptr)
+ , gtPutArgStkKind(PutArgStkKindInvalid)
+ , gtNumSlots(numSlots)
+ , gtIsStruct(isStruct)
+ , gtNumberReferenceSlots(0)
+ , gtGcPtrs(nullptr)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
#ifdef DEBUG
@@ -4015,25 +4251,20 @@ struct GenTreePutArgStk: public GenTreeUnOp
#endif
}
-
- GenTreePutArgStk(
- genTreeOps oper,
- var_types type,
- GenTreePtr op1,
- unsigned slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct)
- DEBUGARG(GenTreePtr callNode = NULL)
- DEBUGARG(bool largeNode = false))
- :
- GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)),
- gtSlotNum(slotNum)
+ GenTreePutArgStk(genTreeOps oper,
+ var_types type,
+ GenTreePtr op1,
+ unsigned slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(unsigned numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(bool isStruct) DEBUGARG(GenTreePtr callNode = NULL)
+ DEBUGARG(bool largeNode = false))
+ : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode))
+ , gtSlotNum(slotNum)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- , gtPutArgStkKind(PutArgStkKindInvalid),
- gtNumSlots(numSlots),
- gtIsStruct(isStruct),
- gtNumberReferenceSlots(0),
- gtGcPtrs(nullptr)
+ , gtPutArgStkKind(PutArgStkKindInvalid)
+ , gtNumSlots(numSlots)
+ , gtIsStruct(isStruct)
+ , gtNumberReferenceSlots(0)
+ , gtGcPtrs(nullptr)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
#ifdef DEBUG
@@ -4042,10 +4273,16 @@ struct GenTreePutArgStk: public GenTreeUnOp
}
#endif // FEATURE_FASTTAILCALL
- unsigned getArgOffset() { return gtSlotNum * TARGET_POINTER_SIZE; }
+ unsigned getArgOffset()
+ {
+ return gtSlotNum * TARGET_POINTER_SIZE;
+ }
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- unsigned getArgSize() { return gtNumSlots * TARGET_POINTER_SIZE; }
+ unsigned getArgSize()
+ {
+ return gtNumSlots * TARGET_POINTER_SIZE;
+ }
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -4068,35 +4305,34 @@ struct GenTreePutArgStk: public GenTreeUnOp
void setGcPointers(unsigned numPointers, BYTE* pointers)
{
gtNumberReferenceSlots = numPointers;
- gtGcPtrs = pointers;
+ gtGcPtrs = pointers;
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#ifdef DEBUG
- GenTreePtr gtCall; // the call node to which this argument belongs
+ GenTreePtr gtCall; // the call node to which this argument belongs
#endif
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
- enum PutArgStkKind : __int8
- {
- PutArgStkKindInvalid,
- PutArgStkKindRepInstr,
- PutArgStkKindUnroll,
+ enum PutArgStkKind : __int8{
+ PutArgStkKindInvalid, PutArgStkKindRepInstr, PutArgStkKindUnroll,
};
PutArgStkKind gtPutArgStkKind;
- unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
- bool gtIsStruct; // This stack arg is a struct.
- unsigned gtNumberReferenceSlots; // Number of reference slots.
- BYTE* gtGcPtrs; // gcPointers
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
+ bool gtIsStruct; // This stack arg is a struct.
+ unsigned gtNumberReferenceSlots; // Number of reference slots.
+ BYTE* gtGcPtrs; // gcPointers
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#if DEBUGGABLE_GENTREE
- GenTreePutArgStk() : GenTreeUnOp() {}
+ GenTreePutArgStk() : GenTreeUnOp()
+ {
+ }
#endif
};
@@ -4158,7 +4394,7 @@ struct GenTreeCopyOrReload : public GenTreeUnOp
// SetRegNumByIdx: Set the regNumber for ith position.
//
// Arguments:
- // reg - reg number
+ // reg - reg number
// idx - register position.
//
// Return Value:
@@ -4210,16 +4446,16 @@ struct GenTreeCopyOrReload : public GenTreeUnOp
#endif
}
- GenTreeCopyOrReload(genTreeOps oper,
- var_types type,
- GenTree* op1) : GenTreeUnOp(oper, type, op1)
+ GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
gtRegNum = REG_NA;
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
- GenTreeCopyOrReload() : GenTreeUnOp() {}
+ GenTreeCopyOrReload() : GenTreeUnOp()
+ {
+ }
#endif
};
@@ -4227,39 +4463,40 @@ struct GenTreeCopyOrReload : public GenTreeUnOp
struct GenTreeAllocObj final : public GenTreeUnOp
{
- unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
+ unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
- GenTreeAllocObj(var_types type, unsigned int helper, CORINFO_CLASS_HANDLE clsHnd, GenTreePtr op) :
- GenTreeUnOp(GT_ALLOCOBJ, type, op
- DEBUGARG(/*largeNode*/TRUE)),// This node in most cases will be changed to a call node
- gtNewHelper(helper),
- gtAllocObjClsHnd(clsHnd)
- {}
+ GenTreeAllocObj(var_types type, unsigned int helper, CORINFO_CLASS_HANDLE clsHnd, GenTreePtr op)
+ : GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
+ , // This node in most cases will be changed to
+ // a call node
+ gtNewHelper(helper)
+ , gtAllocObjClsHnd(clsHnd)
+ {
+ }
#if DEBUGGABLE_GENTREE
- GenTreeAllocObj() : GenTreeUnOp() {}
+ GenTreeAllocObj() : GenTreeUnOp()
+ {
+ }
#endif
};
-
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
-inline bool GenTree::OperIsBlkOp() const
+inline bool GenTree::OperIsBlkOp() const
{
- return (gtOper == GT_INITBLK ||
- gtOper == GT_COPYBLK ||
- gtOper == GT_COPYOBJ);
+ return (gtOper == GT_INITBLK || gtOper == GT_COPYBLK || gtOper == GT_COPYOBJ);
}
-inline bool GenTree::OperIsDynBlkOp()
+inline bool GenTree::OperIsDynBlkOp()
{
return (OperIsBlkOp() && !gtGetOp2()->IsCnsIntOrI());
}
-inline bool GenTree::OperIsCopyBlkOp() const
+inline bool GenTree::OperIsCopyBlkOp() const
{
return (gtOper == GT_COPYOBJ || gtOper == GT_COPYBLK);
}
@@ -4278,7 +4515,9 @@ inline bool GenTree::OperIsInitBlkOp() const
inline bool GenTree::IsFPZero()
{
if ((gtOper == GT_CNS_DBL) && (gtDblCon.gtDconVal == 0.0))
+ {
return true;
+ }
return false;
}
@@ -4300,17 +4539,21 @@ inline bool GenTree::IsIntegralConst(ssize_t constVal)
{
if ((gtOper == GT_CNS_INT) && (gtIntConCommon.IconValue() == constVal))
+ {
return true;
+ }
if ((gtOper == GT_CNS_LNG) && (gtIntConCommon.LngValue() == constVal))
+ {
return true;
+ }
return false;
}
inline bool GenTree::IsBoxedValue()
{
- assert(gtOper != GT_BOX || gtBox.BoxOp() != NULL);
+ assert(gtOper != GT_BOX || gtBox.BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
@@ -4328,15 +4571,15 @@ inline GenTreePtr GenTree::MoveNext()
// For LEGACY_BACKEND or architectures that don't support MultiReg args
// we don't allow a GT_LIST at all.
//
-// Currently for AMD64 UNIX we allow a limited case where a GT_LIST is
+// Currently for AMD64 UNIX we allow a limited case where a GT_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes
// the current ARM64 target) we allow a GT_LIST of arbitrary nodes, these
-// would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
-// but could be changed into constants or GT_COMMA trees by the later
+// would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
+// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
-//
+//
// Arguments:
// instance method for a GenTree node
//
@@ -4351,21 +4594,21 @@ inline bool GenTree::IsListForMultiRegArg()
// We don't have a GT_LIST, so just return true.
return true;
}
- else // We do have a GT_LIST
+ else // We do have a GT_LIST
{
#if defined(LEGACY_BACKEND) || !FEATURE_MULTIREG_ARGS
- // Not allowed to have a GT_LIST for an argument
+ // Not allowed to have a GT_LIST for an argument
// unless we have a RyuJIT backend and FEATURE_MULTIREG_ARGS
return false;
-#else // we have RyuJIT backend and FEATURE_MULTIREG_ARGS
+#else // we have RyuJIT backend and FEATURE_MULTIREG_ARGS
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- // For UNIX ABI we currently only allow a GT_LIST of GT_LCL_FLDs nodes
+ // For UNIX ABI we currently only allow a GT_LIST of GT_LCL_FLDs nodes
GenTree* gtListPtr = this;
- while (gtListPtr != nullptr)
+ while (gtListPtr != nullptr)
{
// ToDo: fix UNIX_AMD64 so that we do not generate this kind of a List
// Note the list as currently created is malformed, as the last entry is a nullptr
@@ -4379,14 +4622,14 @@ inline bool GenTree::IsListForMultiRegArg()
}
gtListPtr = gtListPtr->MoveNext();
}
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
// Note that for non-UNIX ABI the GT_LIST may contain any node
//
- // We allow this GT_LIST as an argument
+ // We allow this GT_LIST as an argument
return true;
-#endif // RyuJIT backend and FEATURE_MULTIREG_ARGS
+#endif // RyuJIT backend and FEATURE_MULTIREG_ARGS
}
}
#endif // DEBUG
@@ -4397,13 +4640,13 @@ inline GenTreePtr GenTree::Current()
return gtOp.gtOp1;
}
-inline GenTreePtr *GenTree::pCurrent()
+inline GenTreePtr* GenTree::pCurrent()
{
assert(IsList());
return &(gtOp.gtOp1);
}
-inline GenTreePtr GenTree::gtGetOp1()
+inline GenTreePtr GenTree::gtGetOp1()
{
return gtOp.gtOp1;
}
@@ -4414,56 +4657,56 @@ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
- case GT_ADD:
- case GT_SUB:
- case GT_MUL:
- case GT_DIV:
- case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- case GT_INDEX:
- case GT_ASG:
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_MOD:
- case GT_ASG_UDIV:
- case GT_ASG_UMOD:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- case GT_COMMA:
- case GT_QMARK:
- case GT_COLON:
- case GT_MKREFANY:
- case GT_INITBLK:
- case GT_COPYBLK:
- return true;
- default:
- return false;
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ case GT_DIV:
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ case GT_INDEX:
+ case GT_ASG:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ case GT_COMMA:
+ case GT_QMARK:
+ case GT_COLON:
+ case GT_MKREFANY:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ return true;
+ default:
+ return false;
}
}
#endif // DEBUG
-inline GenTreePtr GenTree::gtGetOp2()
+inline GenTreePtr GenTree::gtGetOp2()
{
/* gtOp.gtOp2 is only valid for GTK_BINOP nodes. */
@@ -4477,26 +4720,28 @@ inline GenTreePtr GenTree::gtGetOp2()
return op2;
}
-inline GenTreePtr GenTree::gtEffectiveVal(bool commaOnly)
+inline GenTreePtr GenTree::gtEffectiveVal(bool commaOnly)
{
switch (gtOper)
{
- case GT_COMMA:
- return gtOp.gtOp2->gtEffectiveVal(commaOnly);
+ case GT_COMMA:
+ return gtOp.gtOp2->gtEffectiveVal(commaOnly);
- case GT_NOP:
- if (!commaOnly && gtOp.gtOp1 != NULL)
- return gtOp.gtOp1->gtEffectiveVal();
- break;
+ case GT_NOP:
+ if (!commaOnly && gtOp.gtOp1 != nullptr)
+ {
+ return gtOp.gtOp1->gtEffectiveVal();
+ }
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
return this;
}
-inline GenTree* GenTree::gtSkipReloadOrCopy()
+inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
@@ -4514,13 +4759,13 @@ inline GenTree* GenTree::gtSkipReloadOrCopy()
// None
//
// Return Value:
-// Returns true if this GenTree is a multi register returning call
-inline bool GenTree::IsMultiRegCall() const
+// Returns true if this GenTree is a multi register returning call
+inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
// We cannot use AsCall() as it is not declared const
- const GenTreeCall* call = reinterpret_cast<const GenTreeCall *>(this);
+ const GenTreeCall* call = reinterpret_cast<const GenTreeCall*>(this);
return call->HasMultiRegRetVal();
}
@@ -4569,7 +4814,7 @@ inline bool GenTree::IsIntegralConst() const
{
#ifdef _TARGET_64BIT_
return IsCnsIntOrI();
-#else // !_TARGET_64BIT_
+#else // !_TARGET_64BIT_
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !_TARGET_64BIT_
}
@@ -4578,7 +4823,7 @@ inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef _TARGET_64BIT_
return IsCnsIntOrI() && ((int)gtIntConCommon.IconValue() == gtIntConCommon.IconValue());
-#else // !_TARGET_64BIT_
+#else // !_TARGET_64BIT_
return IsCnsIntOrI();
#endif // !_TARGET_64BIT_
}
@@ -4599,10 +4844,19 @@ inline bool GenTree::IsCnsNonZeroFltOrDbl()
return false;
}
-inline bool GenTree::IsHelperCall() { return OperGet() == GT_CALL && gtCall.gtCallType == CT_HELPER; }
+inline bool GenTree::IsHelperCall()
+{
+ return OperGet() == GT_CALL && gtCall.gtCallType == CT_HELPER;
+}
-inline var_types GenTree::CastFromType() { return this->gtCast.CastOp()->TypeGet(); }
-inline var_types& GenTree::CastToType() { return this->gtCast.gtCastType; }
+inline var_types GenTree::CastFromType()
+{
+ return this->gtCast.CastOp()->TypeGet();
+}
+inline var_types& GenTree::CastToType()
+{
+ return this->gtCast.gtCastType;
+}
//-----------------------------------------------------------------------------------
// HasGCPtr: determine whether this block op involves GC pointers
@@ -4615,9 +4869,8 @@ inline var_types& GenTree::CastToType() { return this->gtCast.gtCastType; }
//
// Notes:
// Of the block ops only GT_COPYOBJ is allowed to have GC pointers.
-//
-inline bool
-GenTreeBlkOp::HasGCPtr()
+//
+inline bool GenTreeBlkOp::HasGCPtr()
{
if (gtFlags & GTF_BLK_HASGCPTR)
{
@@ -4631,12 +4884,11 @@ inline bool GenTree::isContainedSpillTemp() const
{
#if !defined(LEGACY_BACKEND)
// If spilled and no reg at use, then it is treated as contained.
- if (((gtFlags & GTF_SPILLED) != 0) &&
- ((gtFlags & GTF_NOREG_AT_USE) != 0))
+ if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
-#endif //!LEGACY_BACKEND
+#endif //! LEGACY_BACKEND
return false;
}
@@ -4649,16 +4901,14 @@ inline bool GenTree::isContainedSpillTemp() const
/*****************************************************************************/
-#if SMALL_TREE_NODES
+#if SMALL_TREE_NODES
// In debug, on some platforms (e.g., when LATE_DISASM is defined), GenTreeIntCon is bigger than GenTreeLclFld.
-const
-size_t TREE_NODE_SZ_SMALL = max(sizeof(GenTreeIntCon), sizeof(GenTreeLclFld));
+const size_t TREE_NODE_SZ_SMALL = max(sizeof(GenTreeIntCon), sizeof(GenTreeLclFld));
#endif // SMALL_TREE_NODES
-const
-size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
+const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
/*****************************************************************************
* Types returned by GenTree::lvaLclVarRefs()
@@ -4666,16 +4916,15 @@ size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
- VR_INVARIANT = 0x00, // an invariant value
- VR_NONE = 0x00,
- VR_IND_REF = 0x01, // an object reference
- VR_IND_SCL = 0x02, // a non-object reference
- VR_GLB_VAR = 0x04, // a global (clsVar)
+ VR_INVARIANT = 0x00, // an invariant value
+ VR_NONE = 0x00,
+ VR_IND_REF = 0x01, // an object reference
+ VR_IND_SCL = 0x02, // a non-object reference
+ VR_GLB_VAR = 0x04, // a global (clsVar)
};
// Add a temp define to avoid merge conflict.
#define VR_IND_PTR VR_IND_REF
/*****************************************************************************/
-#endif // !GENTREE_H
+#endif // !GENTREE_H
/*****************************************************************************/
-
diff --git a/src/jit/gschecks.cpp b/src/jit/gschecks.cpp
index d368b3b28a..f0a4ac46c0 100644
--- a/src/jit/gschecks.cpp
+++ b/src/jit/gschecks.cpp
@@ -30,8 +30,7 @@ void Compiler::gsGSChecksInitCookie()
lvaSetVarAddrExposed(lvaGSSecurityCookie);
lvaTable[lvaGSSecurityCookie].lvType = type;
- info.compCompHnd->getGSCookie(&gsGlobalSecurityCookieVal,
- &gsGlobalSecurityCookieAddr);
+ info.compCompHnd->getGSCookie(&gsGlobalSecurityCookieVal, &gsGlobalSecurityCookieAddr);
}
const unsigned NO_SHADOW_COPY = UINT_MAX;
@@ -41,9 +40,9 @@ const unsigned NO_SHADOW_COPY = UINT_MAX;
* The current function has an unsafe buffer on the stack. Search for vulnerable
* parameters which could be used to modify a code address and take over the process
* in the case of a buffer overrun. Create a safe local copy for each vulnerable parameter,
- * which will be allocated bellow the unsafe buffer. Change uses of the param to the
+ * which will be allocated bellow the unsafe buffer. Change uses of the param to the
* shadow copy.
- *
+ *
* A pointer under indirection is considered vulnerable. A malicious user could read from
* protected memory or write to it. If a parameter is assigned/computed into another variable,
* and is a pointer (i.e., under indirection), then we consider the variable to be part of the
@@ -59,7 +58,7 @@ void Compiler::gsCopyShadowParams()
// Allocate array for shadow param info
gsShadowVarInfo = new (this, CMK_Unknown) ShadowParamVarInfo[lvaCount]();
- // Find groups of variables assigned to each other, and also
+ // Find groups of variables assigned to each other, and also
// tracks variables which are dereferenced and marks them as ptrs.
// Look for assignments to *p, and ptrs passed to functions
if (gsFindVulnerableParams())
@@ -73,17 +72,18 @@ void Compiler::gsCopyShadowParams()
struct MarkPtrsInfo
{
- Compiler * comp;
- unsigned lvAssignDef; // Which local variable is the tree being assigned to?
- bool isAssignSrc; // Is this the source value for an assignment?
- bool isUnderIndir; // Is this a pointer value tree that is being dereferenced?
- bool skipNextNode; // Skip a single node during the tree-walk
+ Compiler* comp;
+ unsigned lvAssignDef; // Which local variable is the tree being assigned to?
+ bool isAssignSrc; // Is this the source value for an assignment?
+ bool isUnderIndir; // Is this a pointer value tree that is being dereferenced?
+ bool skipNextNode; // Skip a single node during the tree-walk
#ifdef DEBUG
void Print()
{
- printf("[MarkPtrsInfo] = {comp = %p, lvAssignDef = %d, isAssignSrc = %d, isUnderIndir = %d, skipNextNode = %d}\n",
- comp, lvAssignDef, isAssignSrc, isUnderIndir, skipNextNode);
+ printf(
+ "[MarkPtrsInfo] = {comp = %p, lvAssignDef = %d, isAssignSrc = %d, isUnderIndir = %d, skipNextNode = %d}\n",
+ comp, lvAssignDef, isAssignSrc, isUnderIndir, skipNextNode);
}
#endif
};
@@ -97,15 +97,15 @@ struct MarkPtrsInfo
* or indirection node. It starts a new tree walk for it's subtrees when the state
* changes.
*/
-Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr* pTree, fgWalkData* data)
{
- struct MarkPtrsInfo *pState= (MarkPtrsInfo *)data->pCallbackData;
- struct MarkPtrsInfo newState = *pState;
- Compiler *comp = data->compiler;
- GenTreePtr tree = *pTree;
- ShadowParamVarInfo *shadowVarInfo = pState->comp->gsShadowVarInfo;
+ struct MarkPtrsInfo* pState = (MarkPtrsInfo*)data->pCallbackData;
+ struct MarkPtrsInfo newState = *pState;
+ Compiler* comp = data->compiler;
+ GenTreePtr tree = *pTree;
+ ShadowParamVarInfo* shadowVarInfo = pState->comp->gsShadowVarInfo;
assert(shadowVarInfo);
- bool fIsBlk = false;
+ bool fIsBlk = false;
unsigned lclNum;
assert(!pState->isAssignSrc || pState->lvAssignDef != (unsigned)-1);
@@ -118,168 +118,164 @@ Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr *pTree, fg
switch (tree->OperGet())
{
- // Indirections - look for *p uses and defs
- case GT_INITBLK:
- case GT_COPYOBJ:
- case GT_COPYBLK:
- fIsBlk = true;
+ // Indirections - look for *p uses and defs
+ case GT_INITBLK:
+ case GT_COPYOBJ:
+ case GT_COPYBLK:
+ fIsBlk = true;
// fallthrough
- case GT_IND:
- case GT_OBJ:
- case GT_ARR_ELEM:
- case GT_ARR_INDEX:
- case GT_ARR_OFFSET:
- case GT_FIELD:
-
- newState.isUnderIndir = true;
- {
- if (fIsBlk)
+ case GT_IND:
+ case GT_OBJ:
+ case GT_ARR_ELEM:
+ case GT_ARR_INDEX:
+ case GT_ARR_OFFSET:
+ case GT_FIELD:
+
+ newState.isUnderIndir = true;
{
- // Blk nodes have implicit indirections.
- comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
-
- if (tree->OperGet() == GT_INITBLK)
+ if (fIsBlk)
{
- newState.isUnderIndir = false;
+ // Blk nodes have implicit indirections.
+ comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
+
+ if (tree->OperGet() == GT_INITBLK)
+ {
+ newState.isUnderIndir = false;
+ }
+ comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
+ }
+ else
+ {
+ newState.skipNextNode = true; // Don't have to worry about which kind of node we're dealing with
+ comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
}
- comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
- }
- else
- {
- newState.skipNextNode = true; // Don't have to worry about which kind of node we're dealing with
- comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
}
- }
- return WALK_SKIP_SUBTREES;
+ return WALK_SKIP_SUBTREES;
- // local vars and param uses
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- lclNum = tree->gtLclVarCommon.gtLclNum;
+ // local vars and param uses
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ lclNum = tree->gtLclVarCommon.gtLclNum;
- if (pState->isUnderIndir)
- {
- // The variable is being dereferenced for a read or a write.
- comp->lvaTable[lclNum].lvIsPtr = 1;
- }
+ if (pState->isUnderIndir)
+ {
+ // The variable is being dereferenced for a read or a write.
+ comp->lvaTable[lclNum].lvIsPtr = 1;
+ }
- if (pState->isAssignSrc)
- {
- //
- // Add lvAssignDef and lclNum to a common assign group
- if (shadowVarInfo[pState->lvAssignDef].assignGroup)
+ if (pState->isAssignSrc)
{
- if (shadowVarInfo[lclNum].assignGroup)
+ //
+ // Add lvAssignDef and lclNum to a common assign group
+ if (shadowVarInfo[pState->lvAssignDef].assignGroup)
+ {
+ if (shadowVarInfo[lclNum].assignGroup)
+ {
+ // OR both bit vector
+ shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectOr(shadowVarInfo[lclNum].assignGroup);
+ }
+ else
+ {
+ shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectSet(lclNum);
+ }
+
+ // Point both to the same bit vector
+ shadowVarInfo[lclNum].assignGroup = shadowVarInfo[pState->lvAssignDef].assignGroup;
+ }
+ else if (shadowVarInfo[lclNum].assignGroup)
{
- // OR both bit vector
- shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectOr(shadowVarInfo[lclNum].assignGroup);
+ shadowVarInfo[lclNum].assignGroup->bitVectSet(pState->lvAssignDef);
+
+ // Point both to the same bit vector
+ shadowVarInfo[pState->lvAssignDef].assignGroup = shadowVarInfo[lclNum].assignGroup;
}
else
{
- shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectSet(lclNum);
+ FixedBitVect* bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);
+
+ // (shadowVarInfo[pState->lvAssignDef] == NULL && shadowVarInfo[lclNew] == NULL);
+ // Neither of them has an assign group yet. Make a new one.
+ shadowVarInfo[pState->lvAssignDef].assignGroup = bv;
+ shadowVarInfo[lclNum].assignGroup = bv;
+ bv->bitVectSet(pState->lvAssignDef);
+ bv->bitVectSet(lclNum);
}
-
- // Point both to the same bit vector
- shadowVarInfo[lclNum].assignGroup = shadowVarInfo[pState->lvAssignDef].assignGroup;
- }
- else if (shadowVarInfo[lclNum].assignGroup)
- {
- shadowVarInfo[lclNum].assignGroup->bitVectSet(pState->lvAssignDef);
-
- // Point both to the same bit vector
- shadowVarInfo[pState->lvAssignDef].assignGroup = shadowVarInfo[lclNum].assignGroup;
- }
- else
- {
- FixedBitVect *bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);
-
- // (shadowVarInfo[pState->lvAssignDef] == NULL && shadowVarInfo[lclNew] == NULL);
- // Neither of them has an assign group yet. Make a new one.
- shadowVarInfo[pState->lvAssignDef].assignGroup = bv;
- shadowVarInfo[lclNum].assignGroup = bv;
- bv->bitVectSet(pState->lvAssignDef);
- bv->bitVectSet(lclNum);
}
+ return WALK_CONTINUE;
- }
- return WALK_CONTINUE;
-
- // Calls - Mark arg variables
- case GT_CALL:
+ // Calls - Mark arg variables
+ case GT_CALL:
- newState.isUnderIndir = false;
- newState.isAssignSrc = false;
- {
- if (tree->gtCall.gtCallObjp)
+ newState.isUnderIndir = false;
+ newState.isAssignSrc = false;
{
- newState.isUnderIndir = true;
- comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void *)&newState);
- }
+ if (tree->gtCall.gtCallObjp)
+ {
+ newState.isUnderIndir = true;
+ comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void*)&newState);
+ }
- for (GenTreeArgList* args = tree->gtCall.gtCallArgs; args; args = args->Rest())
- {
- comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
- }
- for (GenTreeArgList* args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
- {
- comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
+ for (GenTreeArgList* args = tree->gtCall.gtCallArgs; args; args = args->Rest())
+ {
+ comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
+ }
+ for (GenTreeArgList* args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
+ {
+ comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void*)&newState);
+ }
+
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ newState.isUnderIndir = true;
+
+ // A function pointer is treated like a write-through pointer since
+ // it controls what code gets executed, and so indirectly can cause
+ // a write to memory.
+ comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void*)&newState);
+ }
}
+ return WALK_SKIP_SUBTREES;
- if (tree->gtCall.gtCallType == CT_INDIRECT)
+ case GT_ADDR:
+ newState.isUnderIndir = false;
+ // We'll assume p in "**p = " can be vulnerable because by changing 'p', someone
+ // could control where **p stores to.
{
- newState.isUnderIndir = true;
-
- // A function pointer is treated like a write-through pointer since
- // it controls what code gets executed, and so indirectly can cause
- // a write to memory.
- comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void *)&newState);
+ comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
}
- }
- return WALK_SKIP_SUBTREES;
+ return WALK_SKIP_SUBTREES;
+ default:
+ // Assignments - track assign groups and *p defs.
+ if (tree->OperIsAssignment())
+ {
+ bool isLocVar;
+ bool isLocFld;
- case GT_ADDR:
- newState.isUnderIndir = false;
- // We'll assume p in "**p = " can be vulnerable because by changing 'p', someone
- // could control where **p stores to.
- {
- comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
- }
- return WALK_SKIP_SUBTREES;
+ // Walk dst side
+ comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
+ // Now handle src side
+ isLocVar = tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR;
+ isLocFld = tree->gtOp.gtOp1->OperGet() == GT_LCL_FLD;
- default:
- // Assignments - track assign groups and *p defs.
- if (tree->OperIsAssignment())
- {
- bool isLocVar;
- bool isLocFld;
+ if ((isLocVar || isLocFld) && tree->gtOp.gtOp2)
+ {
+ lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
+ newState.lvAssignDef = lclNum;
+ newState.isAssignSrc = true;
+ }
- // Walk dst side
- comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
-
- // Now handle src side
- isLocVar = tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR;
- isLocFld = tree->gtOp.gtOp1->OperGet() == GT_LCL_FLD;
+ comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void*)&newState);
- if ((isLocVar || isLocFld) && tree->gtOp.gtOp2)
- {
- lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
- newState.lvAssignDef = lclNum;
- newState.isAssignSrc = true;
+ return WALK_SKIP_SUBTREES;
}
-
- comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
-
- return WALK_SKIP_SUBTREES;
- }
}
return WALK_CONTINUE;
}
-
/*****************************************************************************
* gsFindVulnerableParams
* Walk all the trees looking for ptrs, args, assign groups, *p stores, etc.
@@ -293,10 +289,10 @@ bool Compiler::gsFindVulnerableParams()
{
MarkPtrsInfo info;
- info.comp = this;
- info.lvAssignDef = (unsigned)-1;
+ info.comp = this;
+ info.lvAssignDef = (unsigned)-1;
info.isUnderIndir = false;
- info.isAssignSrc = false;
+ info.isAssignSrc = false;
info.skipNextNode = false;
// Walk all the trees setting lvIsWritePtr, lvIsOutgoingArg, lvIsPtr and assignGroup.
@@ -307,12 +303,12 @@ bool Compiler::gsFindVulnerableParams()
// Initialize propagated[v0...vn] = {0}^n, so we can skip the ones propagated through
// some assign group.
- FixedBitVect* propagated = (lvaCount > 0) ? FixedBitVect::bitVectInit(lvaCount, this) : NULL;
+ FixedBitVect* propagated = (lvaCount > 0) ? FixedBitVect::bitVectInit(lvaCount, this) : nullptr;
for (UINT lclNum = 0; lclNum < lvaCount; lclNum++)
{
- LclVarDsc *varDsc = &lvaTable[lclNum];
- ShadowParamVarInfo *shadowInfo = &gsShadowVarInfo[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
+ ShadowParamVarInfo* shadowInfo = &gsShadowVarInfo[lclNum];
// If there was an indirection or if unsafe buffer, then we'd call it vulnerable.
if (varDsc->lvIsPtr || varDsc->lvIsUnsafeBuffer)
@@ -321,7 +317,7 @@ bool Compiler::gsFindVulnerableParams()
}
// Now, propagate the info through the assign group (an equivalence class of vars transitively assigned.)
- if (shadowInfo->assignGroup == NULL || propagated->bitVectTest(lclNum))
+ if (shadowInfo->assignGroup == nullptr || propagated->bitVectTest(lclNum))
{
continue;
}
@@ -332,10 +328,9 @@ bool Compiler::gsFindVulnerableParams()
UINT isUnderIndir = varDsc->lvIsPtr;
// First pass -- find if any variable is vulnerable.
- FixedBitVect *assignGroup = shadowInfo->assignGroup;
- for (UINT lclNum = assignGroup->bitVectGetFirst();
- lclNum != (unsigned) -1 && !isUnderIndir;
- lclNum = assignGroup->bitVectGetNext(lclNum))
+ FixedBitVect* assignGroup = shadowInfo->assignGroup;
+ for (UINT lclNum = assignGroup->bitVectGetFirst(); lclNum != (unsigned)-1 && !isUnderIndir;
+ lclNum = assignGroup->bitVectGetNext(lclNum))
{
isUnderIndir |= lvaTable[lclNum].lvIsPtr;
}
@@ -353,9 +348,8 @@ bool Compiler::gsFindVulnerableParams()
// Second pass -- mark all are vulnerable.
assert(isUnderIndir);
- for (UINT lclNum = assignGroup->bitVectGetFirst();
- lclNum != (unsigned) -1;
- lclNum = assignGroup->bitVectGetNext(lclNum))
+ for (UINT lclNum = assignGroup->bitVectGetFirst(); lclNum != (unsigned)-1;
+ lclNum = assignGroup->bitVectGetNext(lclNum))
{
lvaTable[lclNum].lvIsPtr = TRUE;
propagated->bitVectSet(lclNum);
@@ -364,10 +358,9 @@ bool Compiler::gsFindVulnerableParams()
#ifdef DEBUG
if (verbose)
{
- printf("Equivalence assign group %s: ", isUnderIndir ? "isPtr " : "");
- for (UINT lclNum = assignGroup->bitVectGetFirst();
- lclNum != (unsigned) -1;
- lclNum = assignGroup->bitVectGetNext(lclNum))
+ printf("Equivalence assign group %s: ", isUnderIndir ? "isPtr " : "");
+ for (UINT lclNum = assignGroup->bitVectGetFirst(); lclNum != (unsigned)-1;
+ lclNum = assignGroup->bitVectGetNext(lclNum))
{
gtDispLclVar(lclNum, false);
printf(" ");
@@ -380,7 +373,6 @@ bool Compiler::gsFindVulnerableParams()
return hasOneVulnerable;
}
-
/*****************************************************************************
* gsParamsToShadows
* Copy each vulnerable param ptr or buffer to a local shadow copy and replace
@@ -388,14 +380,14 @@ bool Compiler::gsFindVulnerableParams()
*/
void Compiler::gsParamsToShadows()
{
- // Cache old count since we'll add new variables, and
+ // Cache old count since we'll add new variables, and
// gsShadowVarInfo will not grow to accomodate the new ones.
UINT lvaOldCount = lvaCount;
// Create shadow copy for each param candidate
for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++)
{
- LclVarDsc *varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
gsShadowVarInfo[lclNum].shadowCopy = NO_SHADOW_COPY;
// Only care about params whose values are on the stack
@@ -409,15 +401,14 @@ void Compiler::gsParamsToShadows()
continue;
}
-
int shadowVar = lvaGrabTemp(false DEBUGARG("shadowVar"));
// Copy some info
- var_types type = varTypeIsSmall(varDsc->TypeGet()) ? TYP_INT : varDsc->TypeGet();
+ var_types type = varTypeIsSmall(varDsc->TypeGet()) ? TYP_INT : varDsc->TypeGet();
lvaTable[shadowVar].lvType = type;
#ifdef FEATURE_SIMD
- lvaTable[shadowVar].lvSIMDType = varDsc->lvSIMDType;
+ lvaTable[shadowVar].lvSIMDType = varDsc->lvSIMDType;
lvaTable[shadowVar].lvUsedInSIMDIntrinsic = varDsc->lvUsedInSIMDIntrinsic;
if (varDsc->lvSIMDType)
{
@@ -426,20 +417,20 @@ void Compiler::gsParamsToShadows()
#endif
lvaTable[shadowVar].lvRegStruct = varDsc->lvRegStruct;
- lvaTable[shadowVar].lvAddrExposed = varDsc->lvAddrExposed;
+ lvaTable[shadowVar].lvAddrExposed = varDsc->lvAddrExposed;
lvaTable[shadowVar].lvDoNotEnregister = varDsc->lvDoNotEnregister;
#ifdef DEBUG
lvaTable[shadowVar].lvVMNeedsStackAddr = varDsc->lvVMNeedsStackAddr;
lvaTable[shadowVar].lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
- lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr;
- lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall;
+ lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr;
+ lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall;
#endif
- lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo;
- lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout;
+ lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo;
+ lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout;
lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer;
- lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr;
+ lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("Var V%02u is shadow param candidate. Shadow copy is V%02u.\n", lclNum, shadowVar);
@@ -450,12 +441,12 @@ void Compiler::gsParamsToShadows()
}
// Replace param uses with shadow copy
- fgWalkAllTreesPre(gsReplaceShadowParams, (void *)this);
+ fgWalkAllTreesPre(gsReplaceShadowParams, (void*)this);
// Now insert code to copy the params to their shadow copy.
for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++)
{
- LclVarDsc *varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy;
if (shadowVar == NO_SHADOW_COPY)
@@ -471,7 +462,7 @@ void Compiler::gsParamsToShadows()
src->gtFlags |= GTF_DONT_CSE;
dst->gtFlags |= GTF_DONT_CSE;
- GenTreePtr opAssign = NULL;
+ GenTreePtr opAssign = nullptr;
if (type == TYP_STRUCT)
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle();
@@ -483,7 +474,7 @@ void Compiler::gsParamsToShadows()
src = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst);
- opAssign = gtNewCpObjNode(dst, src, clsHnd, false);
+ opAssign = gtNewCpObjNode(dst, src, clsHnd, false);
lvaTable[shadowVar].lvIsMultiRegArg = lvaTable[lclNum].lvIsMultiRegArg;
lvaTable[shadowVar].lvIsMultiRegRet = lvaTable[lclNum].lvIsMultiRegRet;
}
@@ -492,7 +483,7 @@ void Compiler::gsParamsToShadows()
opAssign = gtNewAssignNode(dst, src);
}
fgEnsureFirstBBisScratch();
- (void) fgInsertStmtAtBeg(fgFirstBB, fgMorphTree(opAssign));
+ (void)fgInsertStmtAtBeg(fgFirstBB, fgMorphTree(opAssign));
}
// If the method has "Jmp CalleeMethod", then we need to copy shadow params back to original
@@ -501,21 +492,21 @@ void Compiler::gsParamsToShadows()
{
// There could be more than one basic block ending with a "Jmp" type tail call.
// We would have to insert assignments in all such blocks, just before GT_JMP stmnt.
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
if (block->bbJumpKind != BBJ_RETURN)
{
continue;
}
- if ((block->bbFlags & BBF_HAS_JMP) == 0)
+ if ((block->bbFlags & BBF_HAS_JMP) == 0)
{
continue;
}
for (UINT lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
- LclVarDsc *varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy;
if (shadowVar == NO_SHADOW_COPY)
@@ -525,7 +516,7 @@ void Compiler::gsParamsToShadows()
GenTreePtr src = gtNewLclvNode(shadowVar, lvaTable[shadowVar].TypeGet());
GenTreePtr dst = gtNewLclvNode(lclNum, varDsc->TypeGet());
-
+
src->gtFlags |= GTF_DONT_CSE;
dst->gtFlags |= GTF_DONT_CSE;
@@ -533,8 +524,8 @@ void Compiler::gsParamsToShadows()
if (varDsc->TypeGet() == TYP_STRUCT)
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle();
- src = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
- dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst);
+ src = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
+ dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst);
opAssign = gtNewCpObjNode(dst, src, clsHnd, false);
}
@@ -542,32 +533,30 @@ void Compiler::gsParamsToShadows()
{
opAssign = gtNewAssignNode(dst, src);
}
-
- (void) fgInsertStmtNearEnd(block, fgMorphTree(opAssign));
- }
+ (void)fgInsertStmtNearEnd(block, fgMorphTree(opAssign));
+ }
}
}
}
-
/*****************************************************************************
* gsReplaceShadowParams (tree-walk call-back)
* Replace all vulnerable param uses by it's shadow copy.
*/
-Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr * pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr* pTree, fgWalkData* data)
{
- Compiler * comp = data->compiler;
- GenTreePtr tree = *pTree;
- GenTreePtr asg = NULL;
+ Compiler* comp = data->compiler;
+ GenTreePtr tree = *pTree;
+ GenTreePtr asg = nullptr;
if (tree->gtOper == GT_ASG)
{
- asg = tree; // "asg" is the assignment tree.
- tree = tree->gtOp.gtOp1; // "tree" is the local var tree at the left-hand size of the assignment.
- }
-
+ asg = tree; // "asg" is the assignment tree.
+ tree = tree->gtOp.gtOp1; // "tree" is the local var tree at the left-hand size of the assignment.
+ }
+
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD)
{
UINT paramNum = tree->gtLclVarCommon.gtLclNum;
@@ -585,7 +574,7 @@ Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr * pTree,
if (varTypeIsSmall(comp->lvaTable[paramNum].TypeGet()))
{
tree->gtType = TYP_INT;
- if (asg)
+ if (asg)
{
// If this is an assignment tree, propagate the type to it as well.
asg->gtType = TYP_INT;
@@ -595,4 +584,3 @@ Compiler::fgWalkResult Compiler::gsReplaceShadowParams(GenTreePtr * pTree,
return WALK_CONTINUE;
}
-
diff --git a/src/jit/hashbv.cpp b/src/jit/hashbv.cpp
index 33822144d2..c745659c4a 100644
--- a/src/jit/hashbv.cpp
+++ b/src/jit/hashbv.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -26,11 +25,11 @@ void hashBvNode::Reconstruct(indexType base)
assert(!(baseIndex % BITS_PER_NODE));
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
elements[i] = 0;
}
- next = NULL;
+ next = nullptr;
}
hashBvNode::hashBvNode(indexType base)
@@ -38,35 +37,34 @@ hashBvNode::hashBvNode(indexType base)
this->Reconstruct(base);
}
-hashBvNode *hashBvNode::Create(indexType base, Compiler *compiler)
+hashBvNode* hashBvNode::Create(indexType base, Compiler* compiler)
{
- hashBvNode *result = NULL;
+ hashBvNode* result = nullptr;
if (compiler->hbvGlobalData.hbvNodeFreeList)
{
- result = compiler->hbvGlobalData.hbvNodeFreeList;
+ result = compiler->hbvGlobalData.hbvNodeFreeList;
compiler->hbvGlobalData.hbvNodeFreeList = result->next;
}
else
{
- result = new(compiler, CMK_hashBv) hashBvNode;
+ result = new (compiler, CMK_hashBv) hashBvNode;
}
result->Reconstruct(base);
return result;
}
-void hashBvNode::freeNode(hashBvGlobalData *glob)
+void hashBvNode::freeNode(hashBvGlobalData* glob)
{
- this->next = glob->hbvNodeFreeList;
+ this->next = glob->hbvNodeFreeList;
glob->hbvNodeFreeList = this;
}
-
void hashBvNode::setBit(indexType base)
{
assert(base >= baseIndex);
assert(base - baseIndex < BITS_PER_NODE);
-
+
base -= baseIndex;
indexType elem = base / BITS_PER_ELEMENT;
indexType posi = base % BITS_PER_ELEMENT;
@@ -87,8 +85,8 @@ void hashBvNode::setLowest(indexType numToSet)
}
if (numToSet)
{
- elemType allOnes = ~(elemType(0));
- int numToShift = (int)(BITS_PER_ELEMENT - numToSet);
+ elemType allOnes = ~(elemType(0));
+ int numToShift = (int)(BITS_PER_ELEMENT - numToSet);
elements[elemIndex] = allOnes >> numToShift;
}
}
@@ -97,7 +95,7 @@ void hashBvNode::clrBit(indexType base)
{
assert(base >= baseIndex);
assert(base - baseIndex < BITS_PER_NODE);
-
+
base -= baseIndex;
indexType elem = base / BITS_PER_ELEMENT;
indexType posi = base % BITS_PER_ELEMENT;
@@ -108,9 +106,13 @@ void hashBvNode::clrBit(indexType base)
bool hashBvNode::belongsIn(indexType index)
{
if (index < baseIndex)
+ {
return false;
+ }
if (index >= baseIndex + BITS_PER_NODE)
+ {
return false;
+ }
return true;
}
@@ -122,8 +124,8 @@ int countBitsInWord(unsigned int bits)
bits = ((bits >> 2) & 0x33333333) + (bits & 0x33333333);
bits = ((bits >> 4) & 0x0F0F0F0F) + (bits & 0x0F0F0F0F);
bits = ((bits >> 8) & 0x00FF00FF) + (bits & 0x00FF00FF);
- bits = ((bits >>16) & 0x0000FFFF) + (bits & 0x0000FFFF);
- return (int) bits;
+ bits = ((bits >> 16) & 0x0000FFFF) + (bits & 0x0000FFFF);
+ return (int)bits;
}
int countBitsInWord(unsigned __int64 bits)
@@ -132,16 +134,16 @@ int countBitsInWord(unsigned __int64 bits)
bits = ((bits >> 2) & 0x3333333333333333) + (bits & 0x3333333333333333);
bits = ((bits >> 4) & 0x0F0F0F0F0F0F0F0F) + (bits & 0x0F0F0F0F0F0F0F0F);
bits = ((bits >> 8) & 0x00FF00FF00FF00FF) + (bits & 0x00FF00FF00FF00FF);
- bits = ((bits >>16) & 0x0000FFFF0000FFFF) + (bits & 0x0000FFFF0000FFFF);
- bits = ((bits >>32) & 0x00000000FFFFFFFF) + (bits & 0x00000000FFFFFFFF);
- return (int) bits;
+ bits = ((bits >> 16) & 0x0000FFFF0000FFFF) + (bits & 0x0000FFFF0000FFFF);
+ bits = ((bits >> 32) & 0x00000000FFFFFFFF) + (bits & 0x00000000FFFFFFFF);
+ return (int)bits;
}
int hashBvNode::countBits()
{
int result = 0;
-
- for (int i=0; i< this->numElements(); i++)
+
+ for (int i = 0; i < this->numElements(); i++)
{
elemType bits = elements[i];
@@ -154,73 +156,80 @@ int hashBvNode::countBits()
bool hashBvNode::anyBits()
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
if (elements[i])
+ {
return true;
+ }
}
return false;
}
-
-
bool hashBvNode::getBit(indexType base)
{
assert(base >= baseIndex);
assert(base - baseIndex < BITS_PER_NODE);
base -= baseIndex;
-
+
indexType elem = base / BITS_PER_ELEMENT;
indexType posi = base % BITS_PER_ELEMENT;
if (elements[elem] & (indexType(1) << posi))
+ {
return true;
+ }
else
+ {
return false;
+ }
}
bool hashBvNode::anySet()
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
if (elements[i])
+ {
return true;
+ }
}
return false;
}
-void hashBvNode::copyFrom(hashBvNode *other)
+void hashBvNode::copyFrom(hashBvNode* other)
{
this->baseIndex = other->baseIndex;
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
this->elements[i] = other->elements[i];
}
}
-
void hashBvNode::foreachBit(bitAction a)
{
indexType base;
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
- base = baseIndex + i*BITS_PER_ELEMENT;
+ base = baseIndex + i * BITS_PER_ELEMENT;
elemType e = elements[i];
while (e)
{
- if (e&1)
+ if (e & 1)
+ {
a(base);
+ }
e >>= 1;
base++;
}
- }
+ }
}
-elemType hashBvNode::AndWithChange(hashBvNode *other)
+elemType hashBvNode::AndWithChange(hashBvNode* other)
{
elemType result = 0;
-
- for (int i=0; i< this->numElements(); i++)
+
+ for (int i = 0; i < this->numElements(); i++)
{
elemType src = this->elements[i];
elemType dst;
@@ -232,11 +241,11 @@ elemType hashBvNode::AndWithChange(hashBvNode *other)
return result;
}
-elemType hashBvNode::OrWithChange(hashBvNode *other)
+elemType hashBvNode::OrWithChange(hashBvNode* other)
{
elemType result = 0;
-
- for (int i=0; i< this->numElements(); i++)
+
+ for (int i = 0; i < this->numElements(); i++)
{
elemType src = this->elements[i];
elemType dst;
@@ -248,11 +257,11 @@ elemType hashBvNode::OrWithChange(hashBvNode *other)
return result;
}
-elemType hashBvNode::XorWithChange(hashBvNode *other)
+elemType hashBvNode::XorWithChange(hashBvNode* other)
{
elemType result = 0;
-
- for (int i=0; i< this->numElements(); i++)
+
+ for (int i = 0; i < this->numElements(); i++)
{
elemType src = this->elements[i];
elemType dst;
@@ -264,11 +273,11 @@ elemType hashBvNode::XorWithChange(hashBvNode *other)
return result;
}
-elemType hashBvNode::SubtractWithChange(hashBvNode *other)
+elemType hashBvNode::SubtractWithChange(hashBvNode* other)
{
elemType result = 0;
-
- for (int i=0; i< this->numElements(); i++)
+
+ for (int i = 0; i < this->numElements(); i++)
{
elemType src = this->elements[i];
elemType dst;
@@ -280,48 +289,51 @@ elemType hashBvNode::SubtractWithChange(hashBvNode *other)
return result;
}
-
-void hashBvNode::AndWith(hashBvNode *other)
+void hashBvNode::AndWith(hashBvNode* other)
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
this->elements[i] &= other->elements[i];
}
}
-void hashBvNode::OrWith(hashBvNode *other)
+void hashBvNode::OrWith(hashBvNode* other)
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
this->elements[i] |= other->elements[i];
}
}
-void hashBvNode::XorWith(hashBvNode *other)
+void hashBvNode::XorWith(hashBvNode* other)
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
this->elements[i] ^= other->elements[i];
}
}
-void hashBvNode::Subtract(hashBvNode *other)
+void hashBvNode::Subtract(hashBvNode* other)
{
- for (int i=0; i< this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
this->elements[i] &= ~other->elements[i];
}
}
-bool hashBvNode::sameAs(hashBvNode *other)
+bool hashBvNode::sameAs(hashBvNode* other)
{
if (this->baseIndex != other->baseIndex)
+ {
return false;
+ }
- for (int i=0; i<this->numElements(); i++)
+ for (int i = 0; i < this->numElements(); i++)
{
if (this->elements[i] != other->elements[i])
+ {
return false;
+ }
}
return true;
@@ -330,124 +342,118 @@ bool hashBvNode::sameAs(hashBvNode *other)
// --------------------------------------------------------------------
// --------------------------------------------------------------------
-hashBv::hashBv(
- Compiler *comp
- )
+hashBv::hashBv(Compiler* comp)
{
- this->compiler = comp;
+ this->compiler = comp;
this->log2_hashSize = globalData()->hbvHashSizeLog2;
int hts = hashtable_size();
nodeArr = getNewVector(hts);
- for (int i=0; i<hts; i++)
+ for (int i = 0; i < hts; i++)
{
- nodeArr[i] = NULL;
+ nodeArr[i] = nullptr;
}
this->numNodes = 0;
}
-hashBv *hashBv::Create(
- Compiler *compiler
- )
+hashBv* hashBv::Create(Compiler* compiler)
{
- hashBv *result;
- hashBvGlobalData *gd = &compiler->hbvGlobalData;
+ hashBv* result;
+ hashBvGlobalData* gd = &compiler->hbvGlobalData;
if (hbvFreeList(gd))
{
- result = hbvFreeList(gd);
+ result = hbvFreeList(gd);
hbvFreeList(gd) = result->next;
assert(result->nodeArr);
}
else
{
- result = new(compiler, CMK_hashBv) hashBv(compiler);
+ result = new (compiler, CMK_hashBv) hashBv(compiler);
memset(result, 0, sizeof(hashBv));
result->nodeArr = result->initialVector;
}
- result->compiler = compiler;
+ result->compiler = compiler;
result->log2_hashSize = 0;
- result->numNodes = 0;
-
+ result->numNodes = 0;
+
return result;
}
-void hashBv::Init(Compiler *compiler)
+void hashBv::Init(Compiler* compiler)
{
memset(&compiler->hbvGlobalData, 0, sizeof(hashBvGlobalData));
}
-
-hashBvGlobalData *hashBv::globalData()
-{
- return &compiler->hbvGlobalData;
+hashBvGlobalData* hashBv::globalData()
+{
+ return &compiler->hbvGlobalData;
}
-
-hashBvNode ** hashBv::getNewVector(int vectorLength)
+hashBvNode** hashBv::getNewVector(int vectorLength)
{
assert(vectorLength > 0);
assert(isPow2(vectorLength));
- hashBvNode ** newVector = new(compiler, CMK_hashBv) hashBvNode*[vectorLength]();
+ hashBvNode** newVector = new (compiler, CMK_hashBv) hashBvNode*[vectorLength]();
return newVector;
}
-hashBvNode *&hashBv::nodeFreeList(hashBvGlobalData *data)
-{
- return data->hbvNodeFreeList;
+hashBvNode*& hashBv::nodeFreeList(hashBvGlobalData* data)
+{
+ return data->hbvNodeFreeList;
}
-hashBv *&hashBv::hbvFreeList(hashBvGlobalData *data)
-{
- return data->hbvFreeList;
+hashBv*& hashBv::hbvFreeList(hashBvGlobalData* data)
+{
+ return data->hbvFreeList;
}
-void hashBv::freeVector(hashBvNode *vect, int vectorLength)
+void hashBv::freeVector(hashBvNode* vect, int vectorLength)
{
// not enough space to do anything with it
if (vectorLength < 2)
+ {
return;
-
- hbvFreeListNode *f = (hbvFreeListNode *) vect;
- f->next = globalData()->hbvFreeVectorList;
+ }
+
+ hbvFreeListNode* f = (hbvFreeListNode*)vect;
+ f->next = globalData()->hbvFreeVectorList;
globalData()->hbvFreeVectorList = f;
- f->size = vectorLength;
+ f->size = vectorLength;
}
-
void hashBv::hbvFree()
{
- Compiler *comp = this->compiler;
+ Compiler* comp = this->compiler;
int hts = hashtable_size();
- for (int i=0; i<hts; i++)
+ for (int i = 0; i < hts; i++)
{
while (nodeArr[i])
{
- hashBvNode *curr = nodeArr[i];
- nodeArr[i] = curr->next;
+ hashBvNode* curr = nodeArr[i];
+ nodeArr[i] = curr->next;
curr->freeNode(globalData());
}
}
// keep the vector attached because the whole thing is freelisted
// plus you don't even know if it's freeable
- this->next = hbvFreeList(globalData());
+ this->next = hbvFreeList(globalData());
hbvFreeList(globalData()) = this;
}
-hashBv *hashBv::CreateFrom(hashBv *other, Compiler *comp)
+hashBv* hashBv::CreateFrom(hashBv* other, Compiler* comp)
{
- hashBv *result = hashBv::Create(comp);
+ hashBv* result = hashBv::Create(comp);
result->copyFrom(other, comp);
return result;
}
-
-void hashBv::MergeLists(hashBvNode **root1, hashBvNode **root2)
+void hashBv::MergeLists(hashBvNode** root1, hashBvNode** root2)
{
}
@@ -463,12 +469,12 @@ bool hashBv::TooBig()
int hashBv::getNodeCount()
{
- int size = hashtable_size();
+ int size = hashtable_size();
int result = 0;
- for (int i=0; i<size; i++)
+ for (int i = 0; i < size; i++)
{
- hashBvNode *last = nodeArr[i];
+ hashBvNode* last = nodeArr[i];
while (last)
{
@@ -483,24 +489,26 @@ bool hashBv::IsValid()
{
int size = hashtable_size();
// is power of 2
- assert(((size-1) & size) == 0);
+ assert(((size - 1) & size) == 0);
- for (int i=0; i<size; i++)
+ for (int i = 0; i < size; i++)
{
- hashBvNode *last = nodeArr[i];
- hashBvNode *curr;
- int lastIndex = -1;
+ hashBvNode* last = nodeArr[i];
+ hashBvNode* curr;
+ int lastIndex = -1;
while (last)
{
// the node has been hashed correctly
assert((int)last->baseIndex > lastIndex);
- lastIndex = (int) last->baseIndex;
+ lastIndex = (int)last->baseIndex;
assert(i == getHashForIndex(last->baseIndex, size));
curr = last->next;
// the order is monotonically increasing bases
if (curr)
+ {
assert(curr->baseIndex > last->baseIndex);
+ }
last = curr;
}
}
@@ -510,93 +518,95 @@ bool hashBv::IsValid()
void hashBv::Resize()
{
// resize to 'optimal' size
-
+
this->Resize(this->numNodes);
}
void hashBv::Resize(int newSize)
{
- assert(newSize>0);
+ assert(newSize > 0);
newSize = nearest_pow2(newSize);
-
+
int oldSize = hashtable_size();
if (newSize == oldSize)
+ {
return;
+ }
- int oldSizeLog2 = log2_hashSize;
+ int oldSizeLog2 = log2_hashSize;
int log2_newSize = genLog2((unsigned)newSize);
int size;
- hashBvNode ** newNodes = this->getNewVector(newSize);
+ hashBvNode** newNodes = this->getNewVector(newSize);
- hashBvNode *** insertionPoints = (hashBvNode ***) alloca(sizeof(hashBvNode *)* newSize);
- memset(insertionPoints, 0, sizeof(hashBvNode *)* newSize);
+ hashBvNode*** insertionPoints = (hashBvNode***)alloca(sizeof(hashBvNode*) * newSize);
+ memset(insertionPoints, 0, sizeof(hashBvNode*) * newSize);
- for (int i=0; i<newSize; i++)
+ for (int i = 0; i < newSize; i++)
{
insertionPoints[i] = &(newNodes[i]);
}
-
+
if (newSize > oldSize)
{
// for each src list, expand it into multiple dst lists
- for (int i=0; i<oldSize; i++)
+ for (int i = 0; i < oldSize; i++)
{
- hashBvNode *next = nodeArr[i];
+ hashBvNode* next = nodeArr[i];
while (next)
{
- hashBvNode *curr = next;
- next = curr->next;
- int destination = getHashForIndex(curr->baseIndex, newSize);
+ hashBvNode* curr = next;
+ next = curr->next;
+ int destination = getHashForIndex(curr->baseIndex, newSize);
// ...
// stick the current node on the end of the selected list
*(insertionPoints[destination]) = curr;
- insertionPoints[destination] = &(curr->next);
- curr->next = NULL;
+ insertionPoints[destination] = &(curr->next);
+ curr->next = nullptr;
}
}
- nodeArr = newNodes;
- log2_hashSize = (unsigned short) log2_newSize;
-
+ nodeArr = newNodes;
+ log2_hashSize = (unsigned short)log2_newSize;
}
else if (oldSize > newSize)
{
int shrinkFactor = oldSize / newSize;
-
+
// shrink multiple lists into one list
- // more efficient ways to do this but...
+ // more efficient ways to do this but...
// if the lists are long, you shouldn't be shrinking.
- for (int i=0; i<oldSize; i++)
+ for (int i = 0; i < oldSize; i++)
{
- hashBvNode *next = nodeArr[i];
+ hashBvNode* next = nodeArr[i];
if (next)
{
// all nodes in this list should have the same destination list
- int destination = getHashForIndex(next->baseIndex, newSize);
- hashBvNode ** insertionPoint = &newNodes[destination];
+ int destination = getHashForIndex(next->baseIndex, newSize);
+ hashBvNode** insertionPoint = &newNodes[destination];
do
{
- hashBvNode *curr = next;
+ hashBvNode* curr = next;
// figure out where to insert it
while (*insertionPoint && (*insertionPoint)->baseIndex < curr->baseIndex)
+ {
insertionPoint = &((*insertionPoint)->next);
+ }
next = curr->next;
- hashBvNode *temp = *insertionPoint;
- *insertionPoint = curr;
- curr->next = temp;
-
- }
- while (next);
+ hashBvNode* temp = *insertionPoint;
+ *insertionPoint = curr;
+ curr->next = temp;
+
+ } while (next);
}
}
- nodeArr = newNodes;
- log2_hashSize = (unsigned short) log2_newSize;
+ nodeArr = newNodes;
+ log2_hashSize = (unsigned short)log2_newSize;
}
else
{
@@ -606,22 +616,22 @@ void hashBv::Resize(int newSize)
assert(this->IsValid());
}
-
-
#ifdef DEBUG
void hashBv::dump()
{
- bool first = true;
+ bool first = true;
indexType index;
// uncomment to print internal implementation details
- //DBEXEC(TRUE, printf("[%d(%d)(nodes:%d)]{ ", hashtable_size(), countBits(), this->numNodes));
+ // DBEXEC(TRUE, printf("[%d(%d)(nodes:%d)]{ ", hashtable_size(), countBits(), this->numNodes));
printf("{");
FOREACH_HBV_BIT_SET(index, this)
{
if (!first)
+ {
printf(" ");
+ }
printf("%d", index);
first = false;
}
@@ -639,26 +649,26 @@ void hashBv::dumpFancy()
printf("count:%d", this->countBits());
FOREACH_HBV_BIT_SET(index, this)
{
- if (last_1 != index-1)
+ if (last_1 != index - 1)
{
- if (last_0+1 != last_1)
+ if (last_0 + 1 != last_1)
{
- printf(" %d-%d", last_0+1, last_1);
+ printf(" %d-%d", last_0 + 1, last_1);
}
else
{
printf(" %d", last_1);
}
- last_0 = index-1;
+ last_0 = index - 1;
}
last_1 = index;
}
NEXT_HBV_BIT_SET;
// Print the last one
- if (last_0+1 != last_1)
+ if (last_0 + 1 != last_1)
{
- printf(" %d-%d", last_0+1, last_1);
+ printf(" %d-%d", last_0 + 1, last_1);
}
else
{
@@ -671,10 +681,9 @@ void hashBv::dumpFancy()
void hashBv::removeNodeAtBase(indexType index)
{
- hashBvNode **insertionPoint =
- this->getInsertionPointForIndex(index);
+ hashBvNode** insertionPoint = this->getInsertionPointForIndex(index);
- hashBvNode *node = *insertionPoint;
+ hashBvNode* node = *insertionPoint;
// make sure that we were called to remove something
// that really was there
@@ -688,11 +697,11 @@ void hashBv::removeNodeAtBase(indexType index)
int hashBv::getHashForIndex(indexType index, int table_size)
{
indexType hashIndex;
-
+
hashIndex = index >> LOG2_BITS_PER_NODE;
hashIndex &= (table_size - 1);
- return (int) hashIndex;
+ return (int)hashIndex;
}
int hashBv::getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize)
@@ -701,25 +710,25 @@ int hashBv::getRehashForIndex(indexType thisIndex, int thisTableSize, int newTab
return 0;
}
-hashBvNode **hashBv::getInsertionPointForIndex(indexType index)
+hashBvNode** hashBv::getInsertionPointForIndex(indexType index)
{
indexType indexInNode;
indexType hashIndex;
indexType baseIndex;
-
- hashBvNode *result;
+
+ hashBvNode* result;
hashIndex = getHashForIndex(index, hashtable_size());
baseIndex = index & ~(BITS_PER_NODE - 1);
indexInNode = index & (BITS_PER_NODE - 1);
- //printf("(%x) : hsh=%x, base=%x, index=%x\n", index,
+ // printf("(%x) : hsh=%x, base=%x, index=%x\n", index,
// hashIndex, baseIndex, indexInNode);
-
+
// find the node
- hashBvNode **prev = &nodeArr[hashIndex];
- result = nodeArr[hashIndex];
+ hashBvNode** prev = &nodeArr[hashIndex];
+ result = nodeArr[hashIndex];
while (result)
{
@@ -731,23 +740,23 @@ hashBvNode **hashBv::getInsertionPointForIndex(indexType index)
{
return prev;
}
- else
+ else
{
- prev = &(result->next);
+ prev = &(result->next);
result = result->next;
}
}
return prev;
}
-hashBvNode *hashBv::getNodeForIndexHelper(indexType index, bool canAdd)
+hashBvNode* hashBv::getNodeForIndexHelper(indexType index, bool canAdd)
{
// determine the base index of the node containing this index
index = index & ~(BITS_PER_NODE - 1);
- hashBvNode **prev = getInsertionPointForIndex(index);
+ hashBvNode** prev = getInsertionPointForIndex(index);
- hashBvNode *node = *prev;
+ hashBvNode* node = *prev;
if (node && node->belongsIn(index))
{
@@ -756,41 +765,47 @@ hashBvNode *hashBv::getNodeForIndexHelper(indexType index, bool canAdd)
else if (canAdd)
{
// missing node, insert it before the current one
- hashBvNode *temp = hashBvNode::Create(index, this->compiler);
- temp->next = node;
- *prev = temp;
+ hashBvNode* temp = hashBvNode::Create(index, this->compiler);
+ temp->next = node;
+ *prev = temp;
this->numNodes++;
return temp;
}
else
- return NULL;
+ {
+ return nullptr;
+ }
}
-hashBvNode *hashBv::getNodeForIndex(indexType index)
+hashBvNode* hashBv::getNodeForIndex(indexType index)
{
// determine the base index of the node containing this index
index = index & ~(BITS_PER_NODE - 1);
- hashBvNode **prev = getInsertionPointForIndex(index);
+ hashBvNode** prev = getInsertionPointForIndex(index);
- hashBvNode *node = *prev;
+ hashBvNode* node = *prev;
if (node && node->belongsIn(index))
+ {
return node;
+ }
else
- return NULL;
+ {
+ return nullptr;
+ }
}
void hashBv::setBit(indexType index)
{
assert(index >= 0);
assert(this->numNodes == this->getNodeCount());
- hashBvNode *result = NULL;
+ hashBvNode* result = nullptr;
indexType baseIndex = index & ~(BITS_PER_NODE - 1);
- indexType base = index - baseIndex;
- indexType elem = base / BITS_PER_ELEMENT;
- indexType posi = base % BITS_PER_ELEMENT;
+ indexType base = index - baseIndex;
+ indexType elem = base / BITS_PER_ELEMENT;
+ indexType posi = base % BITS_PER_ELEMENT;
// this should be the 99% case : when there is only one node in the structure
if ((result = nodeArr[0]) && result->baseIndex == baseIndex)
@@ -798,7 +813,7 @@ void hashBv::setBit(indexType index)
result->elements[elem] |= indexType(1) << posi;
return;
}
-
+
result = getOrAddNodeForIndex(index);
result->setBit(index);
@@ -816,26 +831,25 @@ void hashBv::setBit(indexType index)
void hashBv::setAll(indexType numToSet)
{
// TODO-Throughput: this could be more efficient
- for (unsigned int i=0; i<numToSet; i+= BITS_PER_NODE)
+ for (unsigned int i = 0; i < numToSet; i += BITS_PER_NODE)
{
- hashBvNode *node = getOrAddNodeForIndex(i);
- indexType bits_to_set = min(BITS_PER_NODE, numToSet - i);
+ hashBvNode* node = getOrAddNodeForIndex(i);
+ indexType bits_to_set = min(BITS_PER_NODE, numToSet - i);
node->setLowest(bits_to_set);
}
}
-
void hashBv::clearBit(indexType index)
{
assert(index >= 0);
assert(this->numNodes == this->getNodeCount());
- hashBvNode *result = NULL;
+ hashBvNode* result = nullptr;
indexType baseIndex = index & ~(BITS_PER_NODE - 1);
indexType hashIndex = getHashForIndex(index, hashtable_size());
- hashBvNode **prev = &nodeArr[hashIndex];
- result = nodeArr[hashIndex];
+ hashBvNode** prev = &nodeArr[hashIndex];
+ result = nodeArr[hashIndex];
while (result)
{
@@ -855,9 +869,9 @@ void hashBv::clearBit(indexType index)
{
return;
}
- else
+ else
{
- prev = &(result->next);
+ prev = &(result->next);
result = result->next;
}
}
@@ -877,7 +891,7 @@ bool hashBv::testBit(indexType index)
indexType hashIndex = getHashForIndex(baseIndex, hashtable_size());
- hashBvNode *iter = nodeArr[hashIndex];
+ hashBvNode* iter = nodeArr[hashIndex];
while (iter)
{
@@ -896,10 +910,10 @@ bool hashBv::testBit(indexType index)
int hashBv::countBits()
{
int result = 0;
- int hts = this->hashtable_size();
- for (int hashNum =0 ; hashNum < hts; hashNum++)
+ int hts = this->hashtable_size();
+ for (int hashNum = 0; hashNum < hts; hashNum++)
{
- hashBvNode *node = nodeArr[hashNum];
+ hashBvNode* node = nodeArr[hashNum];
while (node)
{
result += node->countBits();
@@ -914,13 +928,15 @@ bool hashBv::anySet()
int result = 0;
int hts = this->hashtable_size();
- for (int hashNum =0 ; hashNum < hts; hashNum++)
+ for (int hashNum = 0; hashNum < hts; hashNum++)
{
- hashBvNode *node = nodeArr[hashNum];
+ hashBvNode* node = nodeArr[hashNum];
while (node)
{
if (node->anySet())
+ {
return true;
+ }
node = node->next;
}
}
@@ -930,10 +946,10 @@ bool hashBv::anySet()
class AndAction
{
public:
- static inline void PreAction(hashBv *lhs, hashBv *rhs)
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
{
}
- static inline void PostAction(hashBv *lhs, hashBv *rhs)
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
{
}
static inline bool DefaultResult()
@@ -941,28 +957,28 @@ public:
return false;
}
- static inline void LeftGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in other, not this
// so skip it
r = r->next;
}
- static inline void RightGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in LHS, not RHS
// so have to remove it
- hashBvNode *old = *l;
- *l = (*l)->next;
+ hashBvNode* old = *l;
+ *l = (*l)->next;
// splice it out
old->freeNode(lhs->globalData());
lhs->numNodes--;
result = true;
}
- static inline void BothPresent(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
if ((*l)->AndWithChange(r))
{
- r = r->next;
+ r = r->next;
result = true;
if ((*l)->anySet())
@@ -971,8 +987,8 @@ public:
}
else
{
- hashBvNode *old = *l;
- *l = (*l)->next;
+ hashBvNode* old = *l;
+ *l = (*l)->next;
old->freeNode(lhs->globalData());
lhs->numNodes--;
}
@@ -983,7 +999,7 @@ public:
l = &((*l)->next);
}
}
- static inline void LeftEmpty(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
r = r->next;
}
@@ -992,33 +1008,33 @@ public:
class SubtractAction
{
public:
- static inline void PreAction(hashBv *lhs, hashBv *rhs)
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
{
}
- static inline void PostAction(hashBv *lhs, hashBv *rhs)
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
{
}
static inline bool DefaultResult()
{
return false;
}
- static inline void LeftGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in other, not this
// so skip it
r = r->next;
}
- static inline void RightGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// in lhs, not rhs
// so skip lhs
l = &((*l)->next);
}
- static inline void BothPresent(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
if ((*l)->SubtractWithChange(r))
{
- r = r->next;
+ r = r->next;
result = true;
if ((*l)->anySet())
@@ -1027,8 +1043,8 @@ public:
}
else
{
- hashBvNode *old = *l;
- *l = (*l)->next;
+ hashBvNode* old = *l;
+ *l = (*l)->next;
old->freeNode(lhs->globalData());
lhs->numNodes--;
}
@@ -1039,7 +1055,7 @@ public:
l = &((*l)->next);
}
}
- static inline void LeftEmpty(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
r = r->next;
}
@@ -1048,55 +1064,60 @@ public:
class XorAction
{
public:
- static inline void PreAction(hashBv *lhs, hashBv *rhs)
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
{
}
- static inline void PostAction(hashBv *lhs, hashBv *rhs)
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
{
}
- static inline bool DefaultResult() { return false; }
+ static inline bool DefaultResult()
+ {
+ return false;
+ }
- static inline void LeftGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in other, not this
// so put one in
- result = true;
- hashBvNode *temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
+ result = true;
+ hashBvNode* temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
lhs->numNodes++;
temp->XorWith(r);
temp->next = (*l)->next;
- *l = temp;
- l = &(temp->next);
+ *l = temp;
+ l = &(temp->next);
r = r->next;
}
- static inline void RightGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in LHS, not RHS
// so LHS remains the same
l = &((*l)->next);
}
- static inline void BothPresent(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
if ((*l)->XorWithChange(r))
+ {
result = true;
+ }
l = &((*l)->next);
r = r->next;
}
- static inline void LeftEmpty(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in other, not this
// so put one in
- result = true;
- hashBvNode *temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
+ result = true;
+ hashBvNode* temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
lhs->numNodes++;
temp->XorWith(r);
- temp->next = NULL;
- *l = temp;
- l = &(temp->next);
+ temp->next = nullptr;
+ *l = temp;
+ l = &(temp->next);
r = r->next;
}
@@ -1105,9 +1126,9 @@ public:
class OrAction
{
public:
- static inline void PreAction(hashBv *lhs, hashBv *rhs)
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
{
- if (lhs->log2_hashSize+2 < rhs->log2_hashSize)
+ if (lhs->log2_hashSize + 2 < rhs->log2_hashSize)
{
lhs->Resize(rhs->numNodes);
}
@@ -1116,7 +1137,7 @@ public:
rhs->Resize(rhs->numNodes);
}
}
- static inline void PostAction(hashBv *lhs, hashBv *rhs)
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
{
}
static inline bool DefaultResult()
@@ -1124,45 +1145,47 @@ public:
return false;
}
- static inline void LeftGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// it's in other, not this
// so put one in
- result = true;
- hashBvNode *temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
+ result = true;
+ hashBvNode* temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
lhs->numNodes++;
temp->OrWith(r);
temp->next = *l;
- *l = temp;
- l = &(temp->next);
+ *l = temp;
+ l = &(temp->next);
r = r->next;
}
- static inline void RightGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// in lhs, not rhs
// so skip lhs
l = &((*l)->next);
}
- static inline void BothPresent(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
if ((*l)->OrWithChange(r))
+ {
result = true;
+ }
l = &((*l)->next);
r = r->next;
}
- static inline void LeftEmpty(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// other contains something this does not
// copy it
- //LeftGap(lhs, l, r, result, terminate);
- result = true;
- hashBvNode *temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
+ // LeftGap(lhs, l, r, result, terminate);
+ result = true;
+ hashBvNode* temp = hashBvNode::Create(r->baseIndex, lhs->compiler);
lhs->numNodes++;
temp->OrWith(r);
- temp->next = NULL;
- *l = temp;
- l = &(temp->next);
+ temp->next = nullptr;
+ *l = temp;
+ l = &(temp->next);
r = r->next;
}
@@ -1171,64 +1194,65 @@ public:
class CompareAction
{
public:
- static inline void PreAction(hashBv *lhs, hashBv *rhs)
+ static inline void PreAction(hashBv* lhs, hashBv* rhs)
{
}
- static inline void PostAction(hashBv *lhs, hashBv *rhs)
+ static inline void PostAction(hashBv* lhs, hashBv* rhs)
{
}
static inline bool DefaultResult()
{
return true;
}
-
- static inline void LeftGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+
+ static inline void LeftGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
terminate = true;
- result = false;
+ result = false;
}
- static inline void RightGap(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void RightGap(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
// in lhs, not rhs
// so skip lhs
terminate = true;
- result = false;
+ result = false;
}
- static inline void BothPresent(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void BothPresent(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
if (!(*l)->sameAs(r))
{
terminate = true;
- result = false;
+ result = false;
}
l = &((*l)->next);
r = r->next;
}
- static inline void LeftEmpty(hashBv *lhs, hashBvNode **&l, hashBvNode *&r, bool &result, bool &terminate)
+ static inline void LeftEmpty(hashBv* lhs, hashBvNode**& l, hashBvNode*& r, bool& result, bool& terminate)
{
terminate = true;
- result = false;
+ result = false;
}
};
-template <typename Action> bool hashBv::MultiTraverseLHSBigger(hashBv *other)
+template <typename Action>
+bool hashBv::MultiTraverseLHSBigger(hashBv* other)
{
int hts = this->hashtable_size();
int ots = other->hashtable_size();
- bool result = Action::DefaultResult();
+ bool result = Action::DefaultResult();
bool terminate = false;
// this is larger
- hashBvNode *** cursors;
- int shiftFactor = this->log2_hashSize - other->log2_hashSize;
- int expansionFactor = hts/ots;
- cursors = (hashBvNode ***) alloca(expansionFactor*sizeof(void*));
+ hashBvNode*** cursors;
+ int shiftFactor = this->log2_hashSize - other->log2_hashSize;
+ int expansionFactor = hts / ots;
+ cursors = (hashBvNode***)alloca(expansionFactor * sizeof(void*));
- for (int h=0; h<other->hashtable_size(); h++)
+ for (int h = 0; h < other->hashtable_size(); h++)
{
// set up cursors for the expansion of nodes
- for (int i=0; i<expansionFactor; i++)
+ for (int i = 0; i < expansionFactor; i++)
{
// ex: for [1024] &= [8]
// for rhs in bin 0
@@ -1236,71 +1260,87 @@ template <typename Action> bool hashBv::MultiTraverseLHSBigger(hashBv *other)
cursors[i] = &nodeArr[ots * i + h];
}
- hashBvNode *o = other->nodeArr[h];
+ hashBvNode* o = other->nodeArr[h];
while (o)
{
- hashBvNode *next = o->next;
+ hashBvNode* next = o->next;
// figure out what dst list this goes to
- int hash = getHashForIndex(o->baseIndex, hts);
- int dstIndex = (hash-h) >> other->log2_hashSize;
- hashBvNode ** cursor = cursors[dstIndex];
- hashBvNode *c = *cursor;
+ int hash = getHashForIndex(o->baseIndex, hts);
+ int dstIndex = (hash - h) >> other->log2_hashSize;
+ hashBvNode** cursor = cursors[dstIndex];
+ hashBvNode* c = *cursor;
// figure out where o fits in the cursor
if (!c)
{
Action::LeftEmpty(this, cursors[dstIndex], o, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (c->baseIndex == o->baseIndex)
{
Action::BothPresent(this, cursors[dstIndex], o, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (c->baseIndex > o->baseIndex)
{
Action::LeftGap(this, cursors[dstIndex], o, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (c->baseIndex < o->baseIndex)
{
Action::RightGap(this, cursors[dstIndex], o, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
- for (int i=0; i<expansionFactor; i++)
+ for (int i = 0; i < expansionFactor; i++)
{
while (*(cursors[i]))
{
Action::RightGap(this, cursors[i], o, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
}
return result;
}
-template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
+template <typename Action>
+bool hashBv::MultiTraverseRHSBigger(hashBv* other)
{
int hts = this->hashtable_size();
int ots = other->hashtable_size();
- bool result = Action::DefaultResult();
+ bool result = Action::DefaultResult();
bool terminate = false;
- for (int hashNum =0 ; hashNum < ots; hashNum++)
+ for (int hashNum = 0; hashNum < ots; hashNum++)
{
- int destination = getHashForIndex(BITS_PER_NODE * hashNum, this->hashtable_size());
+ int destination = getHashForIndex(BITS_PER_NODE * hashNum, this->hashtable_size());
assert(hashNum == getHashForIndex(BITS_PER_NODE * hashNum, other->hashtable_size()));
-
- hashBvNode **pa = &this->nodeArr[destination];
- hashBvNode **pb = &other->nodeArr[hashNum];
- hashBvNode *b = *pb;
+
+ hashBvNode** pa = &this->nodeArr[destination];
+ hashBvNode** pb = &other->nodeArr[hashNum];
+ hashBvNode* b = *pb;
while (*pa && b)
{
- hashBvNode *a = *pa;
+ hashBvNode* a = *pa;
if (a->baseIndex < b->baseIndex)
{
// in a but not in b
@@ -1310,7 +1350,10 @@ template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
// this contains something other does not
// need to erase it
Action::RightGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else
{
@@ -1321,13 +1364,19 @@ template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
else if (a->baseIndex == b->baseIndex)
{
Action::BothPresent(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (a->baseIndex > b->baseIndex)
{
// other contains something this does not
Action::LeftGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
while (*pa)
@@ -1337,7 +1386,10 @@ template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
if (getHashForIndex((*pa)->baseIndex, ots) == hashNum)
{
Action::RightGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else
{
@@ -1347,7 +1399,10 @@ template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
while (b)
{
Action::LeftEmpty(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
assert(this->numNodes == this->getNodeCount());
@@ -1357,60 +1412,77 @@ template <typename Action> bool hashBv::MultiTraverseRHSBigger(hashBv *other)
// LHSBigger and RHSBigger algorithms both work for equal
// this is a specialized version of RHSBigger which is simpler (and faster)
// because equal sizes are the 99% case
-template <typename Action> bool hashBv::MultiTraverseEqual(hashBv *other)
+template <typename Action>
+bool hashBv::MultiTraverseEqual(hashBv* other)
{
int hts = this->hashtable_size();
assert(other->hashtable_size() == hts);
- bool result = Action::DefaultResult();
+ bool result = Action::DefaultResult();
bool terminate = false;
- for (int hashNum =0 ; hashNum < hts; hashNum++)
+ for (int hashNum = 0; hashNum < hts; hashNum++)
{
int destination = getHashForIndex(BITS_PER_NODE * hashNum, this->hashtable_size());
-
- hashBvNode **pa = &this->nodeArr[hashNum];
- hashBvNode **pb = &other->nodeArr[hashNum];
- hashBvNode *b = *pb;
+
+ hashBvNode** pa = &this->nodeArr[hashNum];
+ hashBvNode** pb = &other->nodeArr[hashNum];
+ hashBvNode* b = *pb;
while (*pa && b)
{
- hashBvNode *a = *pa;
+ hashBvNode* a = *pa;
if (a->baseIndex < b->baseIndex)
{
// in a but not in b
Action::RightGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (a->baseIndex == b->baseIndex)
{
Action::BothPresent(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
else if (a->baseIndex > b->baseIndex)
{
// other contains something this does not
Action::LeftGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
while (*pa)
{
// if it's in the dest but not in src
Action::RightGap(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
while (b)
{
Action::LeftEmpty(this, pa, b, result, terminate);
- if (terminate) return result;
+ if (terminate)
+ {
+ return result;
+ }
}
}
assert(this->numNodes == this->getNodeCount());
return result;
}
-template <class Action> bool hashBv::MultiTraverse(hashBv *other)
+template <class Action>
+bool hashBv::MultiTraverse(hashBv* other)
{
bool result = false;
@@ -1435,29 +1507,29 @@ template <class Action> bool hashBv::MultiTraverse(hashBv *other)
}
}
-bool hashBv::AndWithChange(hashBv *other)
+bool hashBv::AndWithChange(hashBv* other)
{
return MultiTraverse<AndAction>(other);
}
// same as AND ~x
-bool hashBv::SubtractWithChange(hashBv *other)
+bool hashBv::SubtractWithChange(hashBv* other)
{
return MultiTraverse<SubtractAction>(other);
}
-void hashBv::Subtract(hashBv *other)
+void hashBv::Subtract(hashBv* other)
{
this->SubtractWithChange(other);
}
-void hashBv::Subtract3(hashBv *o1, hashBv *o2)
+void hashBv::Subtract3(hashBv* o1, hashBv* o2)
{
this->copyFrom(o1, compiler);
this->Subtract(o2);
}
-void hashBv::UnionMinus(hashBv *src1, hashBv *src2, hashBv *src3)
+void hashBv::UnionMinus(hashBv* src1, hashBv* src2, hashBv* src3)
{
this->Subtract3(src1, src2);
this->OrWithChange(src3);
@@ -1466,12 +1538,12 @@ void hashBv::UnionMinus(hashBv *src1, hashBv *src2, hashBv *src3)
void hashBv::ZeroAll()
{
int hts = this->hashtable_size();
-
- for (int hashNum =0 ; hashNum < hts; hashNum++)
+
+ for (int hashNum = 0; hashNum < hts; hashNum++)
{
while (nodeArr[hashNum])
{
- hashBvNode *n = nodeArr[hashNum];
+ hashBvNode* n = nodeArr[hashNum];
nodeArr[hashNum] = n->next;
n->freeNode(globalData());
}
@@ -1479,69 +1551,67 @@ void hashBv::ZeroAll()
this->numNodes = 0;
}
-
-bool hashBv::OrWithChange(hashBv *other)
+bool hashBv::OrWithChange(hashBv* other)
{
return MultiTraverse<OrAction>(other);
}
-bool hashBv::XorWithChange(hashBv *other)
+bool hashBv::XorWithChange(hashBv* other)
{
return MultiTraverse<XorAction>(other);
}
-void hashBv::OrWith(hashBv *other)
+void hashBv::OrWith(hashBv* other)
{
this->OrWithChange(other);
}
-void hashBv::AndWith(hashBv *other)
+void hashBv::AndWith(hashBv* other)
{
this->AndWithChange(other);
}
-bool hashBv::CompareWith(hashBv *other)
+bool hashBv::CompareWith(hashBv* other)
{
return MultiTraverse<CompareAction>(other);
}
-
-void hashBv::copyFrom(hashBv *other, Compiler *comp)
+void hashBv::copyFrom(hashBv* other, Compiler* comp)
{
assert(this != other);
-
- hashBvNode *freeList = NULL;
+
+ hashBvNode* freeList = nullptr;
this->ZeroAll();
if (this->log2_hashSize != other->log2_hashSize)
{
- this->nodeArr = this->getNewVector(other->hashtable_size());
+ this->nodeArr = this->getNewVector(other->hashtable_size());
this->log2_hashSize = other->log2_hashSize;
assert(this->hashtable_size() == other->hashtable_size());
}
int hts = this->hashtable_size();
- //printf("in copyfrom\n");
- for (int h=0; h<hts; h++)
+ // printf("in copyfrom\n");
+ for (int h = 0; h < hts; h++)
{
// put the current list on the free list
- freeList = this->nodeArr[h];
- this->nodeArr[h] = NULL;
-
- hashBvNode **splicePoint = &(this->nodeArr[h]);
- hashBvNode *otherNode = other->nodeArr[h];
- hashBvNode *newNode = NULL;
+ freeList = this->nodeArr[h];
+ this->nodeArr[h] = nullptr;
+
+ hashBvNode** splicePoint = &(this->nodeArr[h]);
+ hashBvNode* otherNode = other->nodeArr[h];
+ hashBvNode* newNode = nullptr;
while (otherNode)
{
- //printf("otherNode is True...\n");
- hashBvNode *next = *splicePoint;
+ // printf("otherNode is True...\n");
+ hashBvNode* next = *splicePoint;
this->numNodes++;
-
+
if (freeList)
{
- newNode = freeList;
+ newNode = freeList;
freeList = freeList->next;
newNode->Reconstruct(otherNode->baseIndex);
}
@@ -1550,17 +1620,17 @@ void hashBv::copyFrom(hashBv *other, Compiler *comp)
newNode = hashBvNode::Create(otherNode->baseIndex, this->compiler);
}
newNode->copyFrom(otherNode);
-
+
newNode->next = *splicePoint;
- *splicePoint = newNode;
- splicePoint = &(newNode->next);
+ *splicePoint = newNode;
+ splicePoint = &(newNode->next);
otherNode = otherNode->next;
}
}
while (freeList)
{
- hashBvNode *next = freeList->next;
+ hashBvNode* next = freeList->next;
freeList->freeNode(globalData());
freeList = next;
}
@@ -1572,24 +1642,24 @@ void hashBv::copyFrom(hashBv *other, Compiler *comp)
#endif
}
-int nodeSort(const void *x, const void *y)
+int nodeSort(const void* x, const void* y)
{
- hashBvNode *a = (hashBvNode *) x;
- hashBvNode *b = (hashBvNode *) y;
- return (int) (b->baseIndex - a->baseIndex);
+ hashBvNode* a = (hashBvNode*)x;
+ hashBvNode* b = (hashBvNode*)y;
+ return (int)(b->baseIndex - a->baseIndex);
}
void hashBv::InorderTraverse(nodeAction n)
{
int hts = hashtable_size();
- hashBvNode **x = new(compiler, CMK_hashBv) hashBvNode*[hts];
+ hashBvNode** x = new (compiler, CMK_hashBv) hashBvNode*[hts];
{
// keep an array of the current pointers
// into each of the the bitvector lists
// in the hashtable
- for (int i=0; i<hts; i++)
+ for (int i = 0; i < hts; i++)
{
x[i] = nodeArr[i];
}
@@ -1597,14 +1667,14 @@ void hashBv::InorderTraverse(nodeAction n)
while (1)
{
// pick the lowest node in the hashtable
-
- indexType lowest = INT_MAX;
- int lowest_index = -1;
- for (int i=0; i<hts; i++)
+
+ indexType lowest = INT_MAX;
+ int lowest_index = -1;
+ for (int i = 0; i < hts; i++)
{
if (x[i] && x[i]->baseIndex < lowest)
{
- lowest = x[i]->baseIndex;
+ lowest = x[i]->baseIndex;
lowest_index = i;
}
}
@@ -1616,82 +1686,82 @@ void hashBv::InorderTraverse(nodeAction n)
x[lowest_index] = x[lowest_index]->next;
}
else
+ {
break;
+ }
}
-
}
delete[] x;
}
-void hashBv::InorderTraverseTwo(hashBv *other, dualNodeAction a)
+void hashBv::InorderTraverseTwo(hashBv* other, dualNodeAction a)
{
- int sizeThis, sizeOther;
+ int sizeThis, sizeOther;
hashBvNode **nodesThis, **nodesOther;
- sizeThis = this->hashtable_size();
+ sizeThis = this->hashtable_size();
sizeOther = other->hashtable_size();
- nodesThis = new(compiler, CMK_hashBv) hashBvNode*[sizeThis];
- nodesOther = new(compiler, CMK_hashBv) hashBvNode*[sizeOther];
+ nodesThis = new (compiler, CMK_hashBv) hashBvNode*[sizeThis];
+ nodesOther = new (compiler, CMK_hashBv) hashBvNode*[sizeOther];
- //populate the arrays
- for (int i=0; i<sizeThis; i++)
+ // populate the arrays
+ for (int i = 0; i < sizeThis; i++)
{
nodesThis[i] = this->nodeArr[i];
}
- for (int i=0; i<sizeOther; i++)
+ for (int i = 0; i < sizeOther; i++)
{
nodesOther[i] = other->nodeArr[i];
}
while (1)
{
- indexType lowestThis = INT_MAX;
- indexType lowestOther = INT_MAX;
- int lowestHashIndexThis = -1;
- int lowestHashIndexOther = -1;
+ indexType lowestThis = INT_MAX;
+ indexType lowestOther = INT_MAX;
+ int lowestHashIndexThis = -1;
+ int lowestHashIndexOther = -1;
// find the lowest remaining node in each BV
- for (int i=0; i<sizeThis; i++)
+ for (int i = 0; i < sizeThis; i++)
{
- if (nodesThis[i] && nodesThis[i]->baseIndex
- < lowestThis)
+ if (nodesThis[i] && nodesThis[i]->baseIndex < lowestThis)
{
lowestHashIndexThis = i;
- lowestThis = nodesThis[i]->baseIndex;
+ lowestThis = nodesThis[i]->baseIndex;
}
}
- for (int i=0; i<sizeOther; i++)
+ for (int i = 0; i < sizeOther; i++)
{
- if (nodesOther[i] && nodesOther[i]->baseIndex
- < lowestOther)
+ if (nodesOther[i] && nodesOther[i]->baseIndex < lowestOther)
{
lowestHashIndexOther = i;
- lowestOther = nodesOther[i]->baseIndex;
+ lowestOther = nodesOther[i]->baseIndex;
}
}
hashBvNode *nodeThis, *nodeOther;
- nodeThis = lowestHashIndexThis == -1 ? NULL :
- nodesThis[lowestHashIndexThis];
- nodeOther = lowestHashIndexOther == -1 ? NULL :
- nodesOther[lowestHashIndexOther];
+ nodeThis = lowestHashIndexThis == -1 ? nullptr : nodesThis[lowestHashIndexThis];
+ nodeOther = lowestHashIndexOther == -1 ? nullptr : nodesOther[lowestHashIndexOther];
// no nodes left in either, so return
if ((!nodeThis) && (!nodeOther))
+ {
break;
-
- // there are only nodes left in one bitvector
+
+ // there are only nodes left in one bitvector
+ }
else if ((!nodeThis) || (!nodeOther))
{
a(this, other, nodeThis, nodeOther);
if (nodeThis)
- nodesThis[lowestHashIndexThis] =
- nodesThis[lowestHashIndexThis]->next;
+ {
+ nodesThis[lowestHashIndexThis] = nodesThis[lowestHashIndexThis]->next;
+ }
if (nodeOther)
- nodesOther[lowestHashIndexOther] =
- nodesOther[lowestHashIndexOther]->next;
-
+ {
+ nodesOther[lowestHashIndexOther] = nodesOther[lowestHashIndexOther]->next;
+ }
}
// nodes are left in both so determine if the lowest ones
// match. if so process them in a pair. if not then
@@ -1701,99 +1771,104 @@ void hashBv::InorderTraverseTwo(hashBv *other, dualNodeAction a)
if (nodeThis->baseIndex == nodeOther->baseIndex)
{
a(this, other, nodeThis, nodeOther);
- nodesThis[lowestHashIndexThis] =
- nodesThis[lowestHashIndexThis]->next;
- nodesOther[lowestHashIndexOther] =
- nodesOther[lowestHashIndexOther]->next;
+ nodesThis[lowestHashIndexThis] = nodesThis[lowestHashIndexThis]->next;
+ nodesOther[lowestHashIndexOther] = nodesOther[lowestHashIndexOther]->next;
}
else if (nodeThis->baseIndex < nodeOther->baseIndex)
{
- a(this, other, nodeThis, NULL);
- nodesThis[lowestHashIndexThis] =
- nodesThis[lowestHashIndexThis]->next;
+ a(this, other, nodeThis, nullptr);
+ nodesThis[lowestHashIndexThis] = nodesThis[lowestHashIndexThis]->next;
}
else if (nodeOther->baseIndex < nodeThis->baseIndex)
{
- a(this, other, NULL, nodeOther);
- nodesOther[lowestHashIndexOther] =
- nodesOther[lowestHashIndexOther]->next;
+ a(this, other, nullptr, nodeOther);
+ nodesOther[lowestHashIndexOther] = nodesOther[lowestHashIndexOther]->next;
}
}
-
-
}
delete[] nodesThis;
delete[] nodesOther;
}
-
// --------------------------------------------------------------------
// --------------------------------------------------------------------
-
#ifdef DEBUG
-void SimpleDumpNode(hashBvNode *n)
+void SimpleDumpNode(hashBvNode* n)
{
printf("base: %d\n", n->baseIndex);
}
-void DumpNode(hashBvNode *n)
+void DumpNode(hashBvNode* n)
{
n->dump();
}
-void SimpleDumpDualNode(hashBv *a, hashBv *b, hashBvNode *n, hashBvNode *m)
+void SimpleDumpDualNode(hashBv* a, hashBv* b, hashBvNode* n, hashBvNode* m)
{
printf("nodes: ");
if (n)
+ {
printf("%d,", n->baseIndex);
+ }
else
+ {
printf("----,");
+ }
if (m)
+ {
printf("%d\n", m->baseIndex);
+ }
else
+ {
printf("----\n");
-
+ }
}
#endif // DEBUG
hashBvIterator::hashBvIterator()
{
- this->bv = NULL;
+ this->bv = nullptr;
}
-
-hashBvIterator::hashBvIterator(hashBv *bv)
+
+hashBvIterator::hashBvIterator(hashBv* bv)
{
- this->bv = bv;
+ this->bv = bv;
this->hashtable_index = 0;
this->current_element = 0;
- this->current_base = 0;
- this->current_data = 0;
+ this->current_base = 0;
+ this->current_data = 0;
if (bv)
{
this->hashtable_size = bv->hashtable_size();
- this->currNode = bv->nodeArr[0];
-
+ this->currNode = bv->nodeArr[0];
+
if (!this->currNode)
+ {
this->nextNode();
+ }
}
}
-void hashBvIterator::initFrom(hashBv *bv)
+void hashBvIterator::initFrom(hashBv* bv)
{
- this->bv = bv;
- this->hashtable_size = bv->hashtable_size();
+ this->bv = bv;
+ this->hashtable_size = bv->hashtable_size();
this->hashtable_index = 0;
- this->currNode = bv->nodeArr[0];
+ this->currNode = bv->nodeArr[0];
this->current_element = 0;
- this->current_base = 0;
- this->current_data = 0;
+ this->current_base = 0;
+ this->current_data = 0;
if (!this->currNode)
+ {
this->nextNode();
+ }
if (this->currNode)
+ {
this->current_data = this->currNode->elements[0];
+ }
}
void hashBvIterator::nextNode()
@@ -1811,51 +1886,54 @@ void hashBvIterator::nextNode()
// no more
if (hashtable_index >= hashtable_size)
{
- //printf("nextnode bailed\n");
+ // printf("nextnode bailed\n");
return;
}
-
+
this->currNode = bv->nodeArr[hashtable_index];
}
// first element in the new node
this->current_element = 0;
- this->current_base = this->currNode->baseIndex;
- this->current_data = this->currNode->elements[0];
- //printf("nextnode returned base %d\n", this->current_base);
- //printf("hti = %d ", hashtable_index);
+ this->current_base = this->currNode->baseIndex;
+ this->current_data = this->currNode->elements[0];
+ // printf("nextnode returned base %d\n", this->current_base);
+ // printf("hti = %d ", hashtable_index);
}
indexType hashBvIterator::nextBit()
{
- //printf("in nextbit for bv:\n");
- //this->bv->dump();
-
+ // printf("in nextbit for bv:\n");
+ // this->bv->dump();
+
if (!this->currNode)
+ {
this->nextNode();
+ }
+
+top:
-
- top:
-
if (!this->currNode)
+ {
return NOMOREBITS;
+ }
- more_data:
+more_data:
if (!this->current_data)
{
current_element++;
- //printf("current element is %d\n", current_element);
+ // printf("current element is %d\n", current_element);
// reached the end of this node
- if (current_element == (indexType) this->currNode->numElements())
+ if (current_element == (indexType)this->currNode->numElements())
{
- //printf("going to next node\n");
+ // printf("going to next node\n");
this->nextNode();
goto top;
}
else
{
- assert(current_element < (indexType) this->currNode->numElements());
- //printf("getting more data\n");
+ assert(current_element < (indexType)this->currNode->numElements());
+ // printf("getting more data\n");
current_data = this->currNode->elements[current_element];
current_base = this->currNode->baseIndex + current_element * BITS_PER_ELEMENT;
goto more_data;
@@ -1880,10 +1958,9 @@ indexType hashBvIterator::nextBit()
}
goto more_data;
}
-
}
-indexType HbvNext(hashBv *bv, Compiler *comp)
+indexType HbvNext(hashBv* bv, Compiler* comp)
{
if (bv)
{
@@ -1891,4 +1968,3 @@ indexType HbvNext(hashBv *bv, Compiler *comp)
}
return bv->globalData()->hashBvNextIterator.nextBit();
}
-
diff --git a/src/jit/hashbv.h b/src/jit/hashbv.h
index d2d15559c2..55d3b27bdd 100644
--- a/src/jit/hashbv.h
+++ b/src/jit/hashbv.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef HASHBV_H
#define HASHBV_H
@@ -15,16 +14,15 @@
#include <memory.h>
#include <windows.h>
-
//#define TESTING 1
-#define LOG2_BITS_PER_ELEMENT 5
+#define LOG2_BITS_PER_ELEMENT 5
#define LOG2_ELEMENTS_PER_NODE 2
-#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE)
+#define LOG2_BITS_PER_NODE (LOG2_BITS_PER_ELEMENT + LOG2_ELEMENTS_PER_NODE)
-#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT)
+#define BITS_PER_ELEMENT (1 << LOG2_BITS_PER_ELEMENT)
#define ELEMENTS_PER_NODE (1 << LOG2_ELEMENTS_PER_NODE)
-#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE)
+#define BITS_PER_NODE (1 << LOG2_BITS_PER_NODE)
#ifdef _TARGET_AMD64_
typedef unsigned __int64 elemType;
@@ -40,9 +38,8 @@ class hashBvIterator;
class hashBvGlobalData;
typedef void bitAction(indexType);
-typedef void nodeAction(hashBvNode *);
-typedef void dualNodeAction(hashBv *left, hashBv *right, hashBvNode *a, hashBvNode *b);
-
+typedef void nodeAction(hashBvNode*);
+typedef void dualNodeAction(hashBv* left, hashBv* right, hashBvNode* a, hashBvNode* b);
#define NOMOREBITS -1
@@ -80,7 +77,7 @@ inline int log2(int number)
while (number)
{
result++;
- number>>=1;
+ number >>= 1;
}
return result;
}
@@ -93,216 +90,222 @@ inline int nearest_pow2(unsigned number)
if (number > 0xffff)
{
- number >>= 16; result += 16;
+ number >>= 16;
+ result += 16;
}
if (number > 0xff)
{
- number >>= 8; result += 8;
+ number >>= 8;
+ result += 8;
}
if (number > 0xf)
{
- number >>= 4; result += 4;
+ number >>= 4;
+ result += 4;
}
if (number > 0x3)
{
- number >>= 2; result += 2;
+ number >>= 2;
+ result += 2;
}
if (number > 0x1)
{
- number >>= 1; result += 1;
+ number >>= 1;
+ result += 1;
}
return 1 << result;
}
class hashBvNode
{
- public:
- hashBvNode *next;
+public:
+ hashBvNode* next;
indexType baseIndex;
elemType elements[ELEMENTS_PER_NODE];
- public:
- hashBvNode (indexType base);
- hashBvNode() {}
- static hashBvNode *Create ( indexType base, Compiler *comp);
+public:
+ hashBvNode(indexType base);
+ hashBvNode()
+ {
+ }
+ static hashBvNode* Create(indexType base, Compiler* comp);
void Reconstruct(indexType base);
- int numElements() { return ELEMENTS_PER_NODE; }
- void setBit (indexType base);
- void setLowest (indexType numToSet);
- bool getBit (indexType base);
- void clrBit (indexType base);
- bool anySet ();
- bool belongsIn (indexType index);
+ int numElements()
+ {
+ return ELEMENTS_PER_NODE;
+ }
+ void setBit(indexType base);
+ void setLowest(indexType numToSet);
+ bool getBit(indexType base);
+ void clrBit(indexType base);
+ bool anySet();
+ bool belongsIn(indexType index);
int countBits();
bool anyBits();
void foreachBit(bitAction x);
- void freeNode (
- hashBvGlobalData *glob
- );
- bool sameAs(hashBvNode *other);
- void copyFrom(hashBvNode *other);
-
- void AndWith(hashBvNode *other);
- void OrWith(hashBvNode *other);
- void XorWith(hashBvNode *other);
- void Subtract(hashBvNode *other);
-
- elemType AndWithChange(hashBvNode *other);
- elemType OrWithChange(hashBvNode *other);
- elemType XorWithChange(hashBvNode *other);
- elemType SubtractWithChange(hashBvNode *other);
+ void freeNode(hashBvGlobalData* glob);
+ bool sameAs(hashBvNode* other);
+ void copyFrom(hashBvNode* other);
+
+ void AndWith(hashBvNode* other);
+ void OrWith(hashBvNode* other);
+ void XorWith(hashBvNode* other);
+ void Subtract(hashBvNode* other);
+
+ elemType AndWithChange(hashBvNode* other);
+ elemType OrWithChange(hashBvNode* other);
+ elemType XorWithChange(hashBvNode* other);
+ elemType SubtractWithChange(hashBvNode* other);
#ifdef DEBUG
- void dump ();
+ void dump();
#endif // DEBUG
};
-
class hashBv
{
- public:
+public:
// --------------------------------------
// data
// --------------------------------------
- hashBvNode **nodeArr;
- hashBvNode *initialVector[1];
+ hashBvNode** nodeArr;
+ hashBvNode* initialVector[1];
- union
- {
- Compiler *compiler;
+ union {
+ Compiler* compiler;
// for freelist
- hashBv *next;
+ hashBv* next;
};
unsigned short log2_hashSize;
// used for heuristic resizing... could be overflowed in rare circumstances
// but should not affect correctness
unsigned short numNodes;
-
-
- public:
- hashBv(Compiler *comp);
- hashBv(hashBv *other);
- //hashBv() {}
- static hashBv* Create(Compiler *comp);
- static void Init(Compiler *comp);
- static hashBv* CreateFrom(hashBv *other, Compiler *comp);
+
+public:
+ hashBv(Compiler* comp);
+ hashBv(hashBv* other);
+ // hashBv() {}
+ static hashBv* Create(Compiler* comp);
+ static void Init(Compiler* comp);
+ static hashBv* CreateFrom(hashBv* other, Compiler* comp);
void hbvFree();
#ifdef DEBUG
void dump();
void dumpFancy();
#endif // DEBUG
- __forceinline int hashtable_size() { return 1 << this->log2_hashSize; }
+ __forceinline int hashtable_size()
+ {
+ return 1 << this->log2_hashSize;
+ }
- hashBvGlobalData *globalData();
+ hashBvGlobalData* globalData();
- static hashBvNode *&nodeFreeList(hashBvGlobalData *globalData);
- static hashBv *&hbvFreeList(hashBvGlobalData *data);
+ static hashBvNode*& nodeFreeList(hashBvGlobalData* globalData);
+ static hashBv*& hbvFreeList(hashBvGlobalData* data);
- hashBvNode **getInsertionPointForIndex(indexType index);
+ hashBvNode** getInsertionPointForIndex(indexType index);
- private:
- hashBvNode *getNodeForIndexHelper(indexType index, bool canAdd);
- int getHashForIndex(indexType index, int table_size);
- int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize);
+private:
+ hashBvNode* getNodeForIndexHelper(indexType index, bool canAdd);
+ int getHashForIndex(indexType index, int table_size);
+ int getRehashForIndex(indexType thisIndex, int thisTableSize, int newTableSize);
// maintain free lists for vectors
- hashBvNode ** getNewVector(int vectorLength);
- void freeVector(hashBvNode *vect, int vectorLength);
- int getNodeCount();
+ hashBvNode** getNewVector(int vectorLength);
+ void freeVector(hashBvNode* vect, int vectorLength);
+ int getNodeCount();
hashBvNode* getFreeList();
- public:
-
- inline hashBvNode *getOrAddNodeForIndex(indexType index)
+public:
+ inline hashBvNode* getOrAddNodeForIndex(indexType index)
{
- hashBvNode *temp = getNodeForIndexHelper(index, true);
+ hashBvNode* temp = getNodeForIndexHelper(index, true);
return temp;
}
- hashBvNode *getNodeForIndex (indexType index);
- void removeNodeAtBase (indexType index);
-
-
- public:
- void setBit (indexType index);
- void setAll (indexType numToSet);
- bool testBit (indexType index);
+ hashBvNode* getNodeForIndex(indexType index);
+ void removeNodeAtBase(indexType index);
+
+public:
+ void setBit(indexType index);
+ void setAll(indexType numToSet);
+ bool testBit(indexType index);
void clearBit(indexType index);
int countBits();
bool anySet();
- void copyFrom(hashBv *other, Compiler *comp);
+ void copyFrom(hashBv* other, Compiler* comp);
void ZeroAll();
- bool CompareWith(hashBv *other);
-
- void AndWith (hashBv *other);
- void OrWith (hashBv *other);
- void XorWith (hashBv *other);
- void Subtract(hashBv *other);
- void Subtract3(hashBv *other, hashBv *other2);
-
- void UnionMinus(hashBv *a, hashBv *b, hashBv *c);
-
- bool AndWithChange (hashBv *other);
- bool OrWithChange (hashBv *other);
- bool OrWithChangeRight (hashBv *other);
- bool OrWithChangeLeft (hashBv *other);
- bool XorWithChange (hashBv *other);
- bool SubtractWithChange (hashBv *other);
-
- template <class Action> bool MultiTraverseLHSBigger(hashBv *other);
- template <class Action> bool MultiTraverseRHSBigger(hashBv *other);
- template <class Action> bool MultiTraverseEqual(hashBv *other);
- template <class Action> bool MultiTraverse(hashBv *other);
-
-
+ bool CompareWith(hashBv* other);
+
+ void AndWith(hashBv* other);
+ void OrWith(hashBv* other);
+ void XorWith(hashBv* other);
+ void Subtract(hashBv* other);
+ void Subtract3(hashBv* other, hashBv* other2);
+
+ void UnionMinus(hashBv* a, hashBv* b, hashBv* c);
+
+ bool AndWithChange(hashBv* other);
+ bool OrWithChange(hashBv* other);
+ bool OrWithChangeRight(hashBv* other);
+ bool OrWithChangeLeft(hashBv* other);
+ bool XorWithChange(hashBv* other);
+ bool SubtractWithChange(hashBv* other);
+
+ template <class Action>
+ bool MultiTraverseLHSBigger(hashBv* other);
+ template <class Action>
+ bool MultiTraverseRHSBigger(hashBv* other);
+ template <class Action>
+ bool MultiTraverseEqual(hashBv* other);
+ template <class Action>
+ bool MultiTraverse(hashBv* other);
void InorderTraverse(nodeAction a);
- void InorderTraverseTwo(hashBv *other, dualNodeAction a);
+ void InorderTraverseTwo(hashBv* other, dualNodeAction a);
void Resize(int newSize);
void Resize();
- void MergeLists(hashBvNode **a, hashBvNode **b);
+ void MergeLists(hashBvNode** a, hashBvNode** b);
bool TooSmall();
bool TooBig();
bool IsValid();
-
};
-
// --------------------------------------------------------------------
// --------------------------------------------------------------------
class hbvFreeListNode
{
public:
- hbvFreeListNode *next;
- int size;
+ hbvFreeListNode* next;
+ int size;
};
// --------------------------------------------------------------------
// --------------------------------------------------------------------
-
class hashBvIterator
{
- public:
- unsigned hashtable_size;
- unsigned hashtable_index;
- hashBv *bv;
- hashBvNode *currNode;
- indexType current_element;
+public:
+ unsigned hashtable_size;
+ unsigned hashtable_index;
+ hashBv* bv;
+ hashBvNode* currNode;
+ indexType current_element;
// base index of current node
indexType current_base;
// working data of current element
elemType current_data;
- hashBvIterator(hashBv *bv);
- void initFrom(hashBv *bv);
+ hashBvIterator(hashBv* bv);
+ void initFrom(hashBv* bv);
hashBvIterator();
indexType nextBit();
- private:
+
+private:
void nextNode();
};
@@ -311,16 +314,16 @@ class hashBvGlobalData
friend class hashBv;
friend class hashBvNode;
- hashBvNode *hbvNodeFreeList;
- hashBv *hbvFreeList;
- unsigned short hbvHashSizeLog2;
- hbvFreeListNode * hbvFreeVectorList;
+ hashBvNode* hbvNodeFreeList;
+ hashBv* hbvFreeList;
+ unsigned short hbvHashSizeLog2;
+ hbvFreeListNode* hbvFreeVectorList;
+
public:
- hashBvIterator hashBvNextIterator;
+ hashBvIterator hashBvNextIterator;
};
-
-indexType HbvNext(hashBv *bv, Compiler *comp);
+indexType HbvNext(hashBv* bv, Compiler* comp);
// clang-format off
#define FOREACH_HBV_BIT_SET(index, bv) \
diff --git a/src/jit/host.h b/src/jit/host.h
index d38d41bdc7..87e13d4180 100644
--- a/src/jit/host.h
+++ b/src/jit/host.h
@@ -18,10 +18,13 @@ class LogEnv
{
public:
LogEnv(ICorJitInfo* aCompHnd);
- void setCompiler(Compiler* val) { const_cast<Compiler*&>(compiler) = val; }
+ void setCompiler(Compiler* val)
+ {
+ const_cast<Compiler*&>(compiler) = val;
+ }
ICorJitInfo* const compHnd;
- Compiler* const compiler;
+ Compiler* const compiler;
};
BOOL vlogf(unsigned level, const char* fmt, va_list args);
@@ -33,16 +36,15 @@ void gcDump_logf(const char* fmt, ...);
void logf(unsigned level, const char* fmt, ...);
-extern "C"
-void __cdecl assertAbort(const char *why, const char *file, unsigned line);
+extern "C" void __cdecl assertAbort(const char* why, const char* file, unsigned line);
-#undef assert
-#define assert(p) (void)((p) || (assertAbort(#p, __FILE__, __LINE__),0))
+#undef assert
+#define assert(p) (void)((p) || (assertAbort(#p, __FILE__, __LINE__), 0))
#else // DEBUG
-#undef assert
-#define assert(p) (void) 0
+#undef assert
+#define assert(p) (void)0
#endif // DEBUG
/*****************************************************************************/
@@ -50,11 +52,14 @@ void __cdecl assertAbort(const char *why, const char *file, unsigned line
#define _HOST_H_
/*****************************************************************************/
-const size_t OS_page_size = (4*1024);
+const size_t OS_page_size = (4 * 1024);
extern FILE* jitstdout;
-inline FILE* procstdout() { return stdout; }
+inline FILE* procstdout()
+{
+ return stdout;
+}
#undef stdout
#define stdout use_jitstdout
diff --git a/src/jit/hostallocator.h b/src/jit/hostallocator.h
index c51eccc75e..c48ed45b8c 100644
--- a/src/jit/hostallocator.h
+++ b/src/jit/hostallocator.h
@@ -7,7 +7,9 @@ class HostAllocator : public IAllocator
private:
static HostAllocator s_hostAllocator;
- HostAllocator() {}
+ HostAllocator()
+ {
+ }
public:
void* Alloc(size_t size) override;
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 1f20d454c2..73026bd00f 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -20,49 +20,58 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "corexcep.h"
-#define Verify(cond, msg) \
- do { \
- if (!(cond)) { \
- verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
- } \
+#define Verify(cond, msg) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
+ } \
} while (0)
-#define VerifyOrReturn(cond, msg) \
- do { \
- if (!(cond)) { \
- verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
- return; \
- } \
+#define VerifyOrReturn(cond, msg) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
+ return; \
+ } \
} while (0)
-#define VerifyOrReturnSpeculative(cond, msg, speculative) \
- do { \
- if (speculative) { \
- if (!(cond)) { \
- return false; \
- } \
- } \
- else { \
- if (!(cond)) { \
- verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
- return false; \
- } \
- } \
+#define VerifyOrReturnSpeculative(cond, msg, speculative) \
+ do \
+ { \
+ if (speculative) \
+ { \
+ if (!(cond)) \
+ { \
+ return false; \
+ } \
+ } \
+ else \
+ { \
+ if (!(cond)) \
+ { \
+ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
+ return false; \
+ } \
+ } \
} while (0)
/*****************************************************************************/
-void Compiler::impInit()
+void Compiler::impInit()
{
#ifdef DEBUG
- impTreeList = impTreeLast = NULL;
+ impTreeList = impTreeLast = nullptr;
#endif
#if defined(DEBUG)
impInlinedCodeSize = 0;
#endif
- seenConditionalJump = false;
+ seenConditionalJump = false;
}
/*****************************************************************************
@@ -70,7 +79,7 @@ void Compiler::impInit()
* Pushes the given tree on the stack.
*/
-void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
+void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
@@ -89,26 +98,25 @@ void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
assert(clsHnd != NO_CLASS_HANDLE);
}
- if (tiVerificationNeeded && !ti.IsDead())
+ if (tiVerificationNeeded && !ti.IsDead())
{
assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
// The ti type is consistent with the tree type.
- //
-
+ //
+
// On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
// In the verification type system, we always transform "native int" to "TI_INT".
// Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
// attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
// when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
// method used in the last disjunct allows exactly this mismatch.
- assert(ti.IsDead() ||
- ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
+ assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
- ti.IsObjRef() && tree->TypeGet() == TYP_REF ||
- ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
+ ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
- typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti), NormaliseForStack(typeInfo(tree->TypeGet()))));
+ typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
+ NormaliseForStack(typeInfo(tree->TypeGet()))));
// If it is a struct type, make certain we normalized the primitive types
assert(!ti.IsType(TI_STRUCT) ||
@@ -127,32 +135,38 @@ void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
#endif // DEBUG
- verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
+ verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
+ {
compLongUsed = true;
+ }
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
+ {
compFloatingPointUsed = true;
+ }
}
/******************************************************************************/
// used in the inliner, where we can assume typesafe code. please don't use in the importer!!
-inline
-void Compiler::impPushOnStackNoType(GenTreePtr tree)
+inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
{
assert(verCurrentState.esStackDepth < impStkSize);
INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
- verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
+ verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
+ {
compLongUsed = true;
+ }
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
+ {
compFloatingPointUsed = true;
+ }
}
-inline
-void Compiler::impPushNullObjRefOnStack()
+inline void Compiler::impPushNullObjRefOnStack()
{
impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
}
@@ -160,58 +174,66 @@ void Compiler::impPushNullObjRefOnStack()
// This method gets called when we run into unverifiable code
// (and we are verifying the method)
-inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line))
+inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
+ DEBUGARG(unsigned line))
{
// Remember that the code is not verifiable
- // Note that the method may yet pass canSkipMethodVerification(),
+ // Note that the method may yet pass canSkipMethodVerification(),
// and so the presence of unverifiable code may not be an issue.
tiIsVerifiableCode = FALSE;
#ifdef DEBUG
const char* tail = strrchr(file, '\\');
- if (tail) file = tail+1;
+ if (tail)
+ {
+ file = tail + 1;
+ }
if (JitConfig.JitBreakOnUnsafeCode())
+ {
assert(!"Unsafe code detected");
+ }
#endif
- JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n",
- file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs));
-
+ JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
+ msg, info.compFullName, impCurOpcName, impCurOpcOffs));
+
if (verNeedsVerification() || compIsForImportOnly())
{
- JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n",
- file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs));
+ JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
+ msg, info.compFullName, impCurOpcName, impCurOpcOffs));
verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
}
}
-
-inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line))
+inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
+ DEBUGARG(unsigned line))
{
- JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n",
- file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs));
+ JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
+ msg, info.compFullName, impCurOpcName, impCurOpcOffs));
#ifdef DEBUG
// BreakIfDebuggerPresent();
if (getBreakOnBadCode())
+ {
assert(!"Typechecking error");
+ }
#endif
- RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, NULL);
+ RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
UNREACHABLE();
}
// helper function that will tell us if the IL instruction at the addr passed
// by param consumes an address at the top of the stack. We use it to save
// us lvAddrTaken
-bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
+bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
{
assert(!compIsForInlining());
-
- OPCODE opcode;
- opcode = (OPCODE) getU1LittleEndian(codeAddr);
+ OPCODE opcode;
+
+ opcode = (OPCODE)getU1LittleEndian(codeAddr);
switch (opcode)
{
@@ -225,29 +247,31 @@ bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HAN
// that's not marked as addrtaken, which is wrong. Also ldflda is usually used
// for structs that contain other structs, which isnt a case we handle very
// well now for other reasons.
-
- case CEE_LDFLD:
+
+ case CEE_LDFLD:
{
// We won't collapse small fields. This is probably not the right place to have this
// check, but we're only using the function for this purpose, and is easy to factor
// out if we need to do so.
-
+
CORINFO_RESOLVED_TOKEN resolvedToken;
impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
CORINFO_CLASS_HANDLE clsHnd;
var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
- // Preserve 'small' int types
- if (lclTyp > TYP_INT)
+ // Preserve 'small' int types
+ if (lclTyp > TYP_INT)
+ {
lclTyp = genActualType(lclTyp);
+ }
if (varTypeIsSmall(lclTyp))
{
return false;
}
- return true;
+ return true;
}
default:
break;
@@ -256,12 +280,12 @@ bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HAN
return false;
}
-void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN * pResolvedToken, CorInfoTokenKind kind)
+void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
{
pResolvedToken->tokenContext = impTokenLookupContextHandle;
- pResolvedToken->tokenScope = info.compScopeHnd;
- pResolvedToken->token = getU4LittleEndian(addr);
- pResolvedToken->tokenType = kind;
+ pResolvedToken->tokenScope = info.compScopeHnd;
+ pResolvedToken->token = getU4LittleEndian(addr);
+ pResolvedToken->tokenType = kind;
if (!tiVerificationNeeded)
{
@@ -278,18 +302,19 @@ void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN * pResol
* Pop one tree from the stack.
*/
-StackEntry Compiler::impPopStack()
+StackEntry Compiler::impPopStack()
{
if (verCurrentState.esStackDepth == 0)
{
BADCODE("stack underflow");
}
-
+
#ifdef DEBUG
#if VERBOSE_VERIFY
if (VERBOSE && tiVerificationNeeded)
{
- JITDUMP("\n"); printf(TI_DUMP_PADDING);
+ JITDUMP("\n");
+ printf(TI_DUMP_PADDING);
printf("About to pop from the stack: ");
const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
ti.Dump();
@@ -300,35 +325,33 @@ StackEntry Compiler::impPopStack()
return verCurrentState.esStack[--verCurrentState.esStackDepth];
}
-StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
+StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
{
StackEntry ret = impPopStack();
- structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
- return(ret);
+ structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
+ return (ret);
}
-GenTreePtr Compiler::impPopStack(typeInfo& ti)
+GenTreePtr Compiler::impPopStack(typeInfo& ti)
{
StackEntry ret = impPopStack();
- ti = ret.seTypeInfo;
- return(ret.val);
+ ti = ret.seTypeInfo;
+ return (ret.val);
}
-
-
/*****************************************************************************
*
* Peep at n'th (0-based) tree on the top of the stack.
*/
-StackEntry& Compiler::impStackTop(unsigned n)
+StackEntry& Compiler::impStackTop(unsigned n)
{
if (verCurrentState.esStackDepth <= n)
{
BADCODE("stack underflow");
}
- return verCurrentState.esStack[verCurrentState.esStackDepth-n-1];
+ return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
}
/*****************************************************************************
* Some of the trees are spilled specially. While unspilling them, or
@@ -337,14 +360,17 @@ StackEntry& Compiler::impStackTop(unsigned n)
*/
#ifdef DEBUG // only used in asserts
-static
-bool impValidSpilledStackEntry(GenTreePtr tree)
+static bool impValidSpilledStackEntry(GenTreePtr tree)
{
if (tree->gtOper == GT_LCL_VAR)
+ {
return true;
+ }
if (tree->OperIsConst())
+ {
return true;
+ }
return false;
}
@@ -357,42 +383,41 @@ bool impValidSpilledStackEntry(GenTreePtr tree)
* have to all be cloneable/spilled values.
*/
-void Compiler::impSaveStackState(SavedStack *savePtr,
- bool copy)
+void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
{
savePtr->ssDepth = verCurrentState.esStackDepth;
- if (verCurrentState.esStackDepth)
+ if (verCurrentState.esStackDepth)
{
savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
- size_t saveSize = verCurrentState.esStackDepth*sizeof(*savePtr->ssTrees);
+ size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
- if (copy)
+ if (copy)
{
- StackEntry *table = savePtr->ssTrees;
+ StackEntry* table = savePtr->ssTrees;
/* Make a fresh copy of all the stack entries */
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTreePtr tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
switch (tree->gtOper)
{
- case GT_CNS_INT:
- case GT_CNS_LNG:
- case GT_CNS_DBL:
- case GT_CNS_STR:
- case GT_LCL_VAR:
- table->val = gtCloneExpr(tree);
- break;
+ case GT_CNS_INT:
+ case GT_CNS_LNG:
+ case GT_CNS_DBL:
+ case GT_CNS_STR:
+ case GT_LCL_VAR:
+ table->val = gtCloneExpr(tree);
+ break;
- default:
- assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
- break;
+ default:
+ assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
+ break;
}
}
}
@@ -403,29 +428,28 @@ void Compiler::impSaveStackState(SavedStack *savePtr,
}
}
-void Compiler::impRestoreStackState(SavedStack *savePtr)
+void Compiler::impRestoreStackState(SavedStack* savePtr)
{
verCurrentState.esStackDepth = savePtr->ssDepth;
if (verCurrentState.esStackDepth)
- memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth*sizeof(*verCurrentState.esStack));
+ {
+ memcpy(verCurrentState.esStack, savePtr->ssTrees,
+ verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
+ }
}
-
/*****************************************************************************
*
* Get the tree list started for a new basic block.
*/
-inline
-void Compiler::impBeginTreeList()
+inline void Compiler::impBeginTreeList()
{
- assert(impTreeList == NULL && impTreeLast == NULL);
+ assert(impTreeList == nullptr && impTreeLast == nullptr);
- impTreeList =
- impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
+ impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
}
-
/*****************************************************************************
*
* Store the given start and end stmt in the given basic block. This is
@@ -433,17 +457,14 @@ void Compiler::impBeginTreeList()
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
-inline
-void Compiler::impEndTreeList(BasicBlock * block,
- GenTreePtr firstStmt,
- GenTreePtr lastStmt)
+inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
{
assert(firstStmt->gtOper == GT_STMT);
- assert( lastStmt->gtOper == GT_STMT);
+ assert(lastStmt->gtOper == GT_STMT);
/* Make the list circular, so that we can easily walk it backwards */
- firstStmt->gtPrev = lastStmt;
+ firstStmt->gtPrev = lastStmt;
/* Store the tree list in the basic block */
@@ -460,14 +481,13 @@ void Compiler::impEndTreeList(BasicBlock * block,
* Store the current tree list in the given basic block.
*/
-inline
-void Compiler::impEndTreeList(BasicBlock *block)
+inline void Compiler::impEndTreeList(BasicBlock* block)
{
assert(impTreeList->gtOper == GT_BEG_STMTS);
- GenTreePtr firstTree = impTreeList->gtNext;
+ GenTreePtr firstTree = impTreeList->gtNext;
- if (!firstTree)
+ if (!firstTree)
{
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
@@ -484,13 +504,13 @@ void Compiler::impEndTreeList(BasicBlock *block)
}
#ifdef DEBUG
- if (impLastILoffsStmt != NULL)
+ if (impLastILoffsStmt != nullptr)
{
- impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining()?BAD_IL_OFFSET:impCurOpcOffs;
- impLastILoffsStmt = NULL;
+ impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
+ impLastILoffsStmt = nullptr;
}
- impTreeList = impTreeLast = NULL;
+ impTreeList = impTreeLast = nullptr;
#endif
}
@@ -500,8 +520,7 @@ void Compiler::impEndTreeList(BasicBlock *block)
* that this has only limited value as we can only check [0..chkLevel).
*/
-inline
-void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
+inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
@@ -509,10 +528,14 @@ void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLe
assert(stmt->gtOper == GT_STMT);
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
+ {
chkLevel = verCurrentState.esStackDepth;
+ }
if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
+ {
return;
+ }
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
@@ -521,7 +544,9 @@ void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLe
if (tree->gtFlags & GTF_CALL)
{
for (unsigned level = 0; level < chkLevel; level++)
+ {
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
+ }
}
if (tree->gtOper == GT_ASG)
@@ -557,12 +582,13 @@ void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLe
// If the access is to glob-memory, all side effects have to be spilled
if (false)
+ {
goto SPILL_GLOB_EFFECT;
+ }
}
#endif
}
-
/*****************************************************************************
*
* Append the given GT_STMT node to the current block's tree list.
@@ -570,8 +596,7 @@ void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLe
* interference with stmt and spill if needed.
*/
-inline
-void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
+inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
{
assert(stmt->gtOper == GT_STMT);
noway_assert(impTreeLast != nullptr);
@@ -579,17 +604,15 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
- GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
- unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
+ GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
+ unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
/* Assignment to (unaliased) locals don't count as a side-effect as
we handle them specially using impSpillLclRefs(). Temp locals should
be fine too. */
- if ((expr->gtOper == GT_ASG) &&
- (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
- !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) &&
- !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
+ if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
+ !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
{
unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
@@ -597,9 +620,11 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
}
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
+ {
chkLevel = verCurrentState.esStackDepth;
+ }
- if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
+ if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
{
assert(chkLevel <= verCurrentState.esStackDepth);
@@ -612,15 +637,17 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
{
// If we are assigning to a global ref, we have to spill global refs on stack
if (expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
+ {
spillGlobEffects = true;
+ }
}
- else if ((expr->gtOper == GT_INITBLK) || (expr->gtOper == GT_COPYBLK))
+ else if ((expr->gtOper == GT_INITBLK) || (expr->gtOper == GT_COPYBLK))
{
// INITBLK and COPYBLK are other ways of performing an assignment
spillGlobEffects = true;
}
- impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt") );
+ impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
}
else
{
@@ -641,7 +668,7 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
#ifdef FEATURE_SIMD
impMarkContiguousSIMDFieldAssignments(stmt);
-#endif
+#endif
#ifdef DEBUGGING_SUPPORT
@@ -656,8 +683,10 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
#endif
#ifdef DEBUG
- if (impLastILoffsStmt == NULL)
+ if (impLastILoffsStmt == nullptr)
+ {
impLastILoffsStmt = stmt;
+ }
if (verbose)
{
@@ -672,39 +701,36 @@ void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
* Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
*/
-inline
-void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
+inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
{
assert(stmt->gtOper == GT_STMT);
assert(stmtBefore->gtOper == GT_STMT);
-
+
GenTreePtr stmtPrev = stmtBefore->gtPrev;
- stmt->gtPrev = stmtPrev;
- stmt->gtNext = stmtBefore;
- stmtPrev->gtNext = stmt;
- stmtBefore->gtPrev = stmt;
+ stmt->gtPrev = stmtPrev;
+ stmt->gtNext = stmtBefore;
+ stmtPrev->gtNext = stmt;
+ stmtBefore->gtPrev = stmt;
}
/*****************************************************************************
*
- * Append the given expression tree to the current block's tree list.
+ * Append the given expression tree to the current block's tree list.
* Return the newly created statement.
*/
-GenTreePtr Compiler::impAppendTree(GenTreePtr tree,
- unsigned chkLevel,
- IL_OFFSETX offset)
+GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
{
assert(tree);
/* Allocate an 'expression statement' node */
- GenTreePtr expr = gtNewStmt(tree, offset);
+ GenTreePtr expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impAppendStmt(expr, chkLevel);
-
+
return expr;
}
@@ -713,46 +739,45 @@ GenTreePtr Compiler::impAppendTree(GenTreePtr tree,
* Insert the given exression tree before GT_STMT "stmtBefore"
*/
-void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
+void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
{
assert(stmtBefore->gtOper == GT_STMT);
-
+
/* Allocate an 'expression statement' node */
- GenTreePtr expr = gtNewStmt(tree, offset);
+ GenTreePtr expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
impInsertStmtBefore(expr, stmtBefore);
}
-
/*****************************************************************************
*
* Append an assignment of the given value to a temp to the current tree list.
* curLevel is the stack level for which the spill to the temp is being done.
*/
-void Compiler::impAssignTempGen(unsigned tmp,
- GenTreePtr val,
- unsigned curLevel,
- GenTreePtr * pAfterStmt, /* = NULL */
- IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
- BasicBlock * block /* = NULL */
- )
+void Compiler::impAssignTempGen(unsigned tmp,
+ GenTreePtr val,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt, /* = NULL */
+ IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
+ BasicBlock* block /* = NULL */
+ )
{
- GenTreePtr asg = gtNewTempAssign(tmp, val);
+ GenTreePtr asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
- * pAfterStmt = fgInsertStmtAfter(block, * pAfterStmt, asgStmt);
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
- impAppendTree(asg, curLevel, impCurStmtOffs);
+ impAppendTree(asg, curLevel, impCurStmtOffs);
}
}
}
@@ -761,14 +786,14 @@ void Compiler::impAssignTempGen(unsigned tmp,
* same as above, but handle the valueclass case too
*/
-void Compiler::impAssignTempGen(unsigned tmpNum,
- GenTreePtr val,
- CORINFO_CLASS_HANDLE structType,
- unsigned curLevel,
- GenTreePtr * pAfterStmt, /* = NULL */
- IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
- BasicBlock * block /* = NULL */
- )
+void Compiler::impAssignTempGen(unsigned tmpNum,
+ GenTreePtr val,
+ CORINFO_CLASS_HANDLE structType,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt, /* = NULL */
+ IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
+ BasicBlock* block /* = NULL */
+ )
{
GenTreePtr asg;
@@ -781,9 +806,7 @@ void Compiler::impAssignTempGen(unsigned tmpNum,
// so at least ignore it in the case when verification is turned on
// since any block that tries to use the temp would have failed verification.
var_types varType = lvaTable[tmpNum].lvType;
- assert(tiVerificationNeeded ||
- varType == TYP_UNDEF ||
- varTypeIsStruct(varType));
+ assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
lvaSetStruct(tmpNum, structType, false);
// Now, set the type of the struct value. Note that lvaSetStruct may modify the type
@@ -795,27 +818,27 @@ void Compiler::impAssignTempGen(unsigned tmpNum,
// calls that may not actually be required - e.g. if we only access a field of a struct.
val->gtType = lvaTable[tmpNum].lvType;
-
- GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
- asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
+
+ GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
+ asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
}
- else
+ else
{
asg = gtNewTempAssign(tmpNum, val);
}
- if (!asg->IsNothingNode())
+ if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
- * pAfterStmt = fgInsertStmtAfter(block, * pAfterStmt, asgStmt);
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
}
- }
+ }
}
/*****************************************************************************
@@ -836,34 +859,38 @@ void Compiler::impAssignTempGen(unsigned tmpNum,
* prefixTree at the head of the list.
*/
-GenTreeArgList* Compiler::impPopList(unsigned count,
- unsigned * flagsPtr,
- CORINFO_SIG_INFO* sig,
- GenTreeArgList* prefixTree)
+GenTreeArgList* Compiler::impPopList(unsigned count,
+ unsigned* flagsPtr,
+ CORINFO_SIG_INFO* sig,
+ GenTreeArgList* prefixTree)
{
- assert(sig == 0 || count == sig->numArgs);
+ assert(sig == nullptr || count == sig->numArgs);
- unsigned flags = 0;
- CORINFO_CLASS_HANDLE structType;
- GenTreeArgList* treeList;
+ unsigned flags = 0;
+ CORINFO_CLASS_HANDLE structType;
+ GenTreeArgList* treeList;
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
+ {
treeList = nullptr;
- else // ARG_ORDER_L2R
+ }
+ else
+ { // ARG_ORDER_L2R
treeList = prefixTree;
-
+ }
+
while (count--)
{
- StackEntry se = impPopStack();
- typeInfo ti = se.seTypeInfo;
- GenTreePtr temp = se.val;
+ StackEntry se = impPopStack();
+ typeInfo ti = se.seTypeInfo;
+ GenTreePtr temp = se.val;
if (varTypeIsStruct(temp))
{
// Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
- temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
+ temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
}
/* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
@@ -873,64 +900,62 @@ GenTreeArgList* Compiler::impPopList(unsigned count,
*flagsPtr = flags;
- if (sig != NULL)
+ if (sig != nullptr)
{
- if (sig->retTypeSigClass != 0 &&
- sig->retType != CORINFO_TYPE_CLASS &&
- sig->retType != CORINFO_TYPE_BYREF &&
- sig->retType != CORINFO_TYPE_PTR &&
- sig->retType != CORINFO_TYPE_VAR)
- {
- // Make sure that all valuetypes (including enums) that we push are loaded.
+ if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
+ sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
+ {
+ // Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
- // all valuetypes in the method signature are already loaded.
+ // all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
- CORINFO_ARG_LIST_HANDLE argLst = sig->args;
- CORINFO_CLASS_HANDLE argClass;
- CORINFO_CLASS_HANDLE argRealClass;
- GenTreeArgList* args;
- unsigned sigSize;
+ CORINFO_ARG_LIST_HANDLE argLst = sig->args;
+ CORINFO_CLASS_HANDLE argClass;
+ CORINFO_CLASS_HANDLE argRealClass;
+ GenTreeArgList* args;
+ unsigned sigSize;
- for (args = treeList, count = sig->numArgs;
- count > 0;
- args = args->Rest(), count--)
+ for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
{
- PREFIX_ASSUME(args != NULL);
+ PREFIX_ASSUME(args != nullptr);
CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
// insert implied casts (from float to double or double to float)
if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
+ {
args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
+ }
else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
+ {
args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
+ }
// insert any widening or narrowing casts for backwards compatibility
args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
- if (corType != CORINFO_TYPE_CLASS
- && corType != CORINFO_TYPE_BYREF
- && corType != CORINFO_TYPE_PTR
- && corType != CORINFO_TYPE_VAR
- && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != NULL)
+ if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
+ corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
{
// Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
- // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying primitive types.
- // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for details).
+ // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
+ // primitive types.
+ // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
+ // details).
if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
{
args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
}
- // Make sure that all valuetypes (including enums) that we push are loaded.
+ // Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggered from the prestub of this methods,
- // all valuetypes in the method signature are already loaded.
+ // all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
@@ -943,15 +968,15 @@ GenTreeArgList* Compiler::impPopList(unsigned count,
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
// Prepend the prefixTree
-
+
// Simple in-place reversal to place treeList
// at the end of a reversed prefixTree
while (prefixTree != nullptr)
{
GenTreeArgList* next = prefixTree->Rest();
- prefixTree->Rest() = treeList;
- treeList = prefixTree;
- prefixTree = next;
+ prefixTree->Rest() = treeList;
+ treeList = prefixTree;
+ prefixTree = next;
}
}
return treeList;
@@ -963,22 +988,24 @@ GenTreeArgList* Compiler::impPopList(unsigned count,
* The first "skipReverseCount" items are not reversed.
*/
-GenTreeArgList* Compiler::impPopRevList(unsigned count,
- unsigned * flagsPtr,
- CORINFO_SIG_INFO* sig,
- unsigned skipReverseCount)
+GenTreeArgList* Compiler::impPopRevList(unsigned count,
+ unsigned* flagsPtr,
+ CORINFO_SIG_INFO* sig,
+ unsigned skipReverseCount)
{
assert(skipReverseCount <= count);
-
+
GenTreeArgList* list = impPopList(count, flagsPtr, sig);
- // reverse the list
- if (list == NULL || skipReverseCount == count)
+ // reverse the list
+ if (list == nullptr || skipReverseCount == count)
+ {
return list;
+ }
- GenTreeArgList* ptr = NULL; // Initialized to the first node that needs to be reversed
- GenTreeArgList* lastSkipNode = NULL; // Will be set to the last node that does not need to be reversed
+ GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
+ GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
if (skipReverseCount == 0)
{
@@ -992,20 +1019,20 @@ GenTreeArgList* Compiler::impPopRevList(unsigned count,
{
lastSkipNode = lastSkipNode->Rest();
}
-
- PREFIX_ASSUME(lastSkipNode != NULL);
+
+ PREFIX_ASSUME(lastSkipNode != nullptr);
ptr = lastSkipNode->Rest();
}
-
- GenTreeArgList* reversedList = NULL;
- do {
+ GenTreeArgList* reversedList = nullptr;
+
+ do
+ {
GenTreeArgList* tmp = ptr->Rest();
- ptr->Rest() = reversedList;
- reversedList = ptr;
- ptr = tmp;
- }
- while (ptr != NULL);
+ ptr->Rest() = reversedList;
+ reversedList = ptr;
+ ptr = tmp;
+ } while (ptr != nullptr);
if (skipReverseCount)
{
@@ -1026,37 +1053,36 @@ GenTreeArgList* Compiler::impPopRevList(unsigned count,
curLevel is the stack level for which a spill may be being done.
*/
-GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt, /* = NULL */
- BasicBlock * block /* = NULL */
- )
+GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
+ GenTreePtr src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt, /* = NULL */
+ BasicBlock* block /* = NULL */
+ )
{
assert(varTypeIsStruct(dest));
while (dest->gtOper == GT_COMMA)
{
- assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
+ assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
// Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
if (pAfterStmt)
{
- * pAfterStmt = fgInsertStmtAfter(block, * pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
- }
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
+ }
else
{
- impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
+ impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
}
// set dest to the second thing
dest = dest->gtOp.gtOp2;
}
- assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN ||
- dest->gtOper == GT_FIELD || dest->gtOper == GT_IND ||
- dest->gtOper == GT_OBJ);
+ assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
+ dest->gtOper == GT_IND || dest->gtOper == GT_OBJ);
GenTreePtr destAddr;
@@ -1066,41 +1092,38 @@ GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
}
else
{
- destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
+ destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
}
- return(impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
+ return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
}
/*****************************************************************************/
-GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt, /* = NULL */
- BasicBlock * block /* = NULL */
- )
+GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
+ GenTreePtr src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt, /* = NULL */
+ BasicBlock* block /* = NULL */
+ )
{
GenTreePtr dest = nullptr;
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
+ assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
// TODO-ARM-BUG: Does ARM need this?
// TODO-ARM64-BUG: Does ARM64 need this?
- assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD ||
- src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
- src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY ||
- src->gtOper == GT_RET_EXPR || src->gtOper == GT_COMMA ||
- src->gtOper == GT_ADDR ||
+ assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
+ src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
+ src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
(src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
assert(varTypeIsStruct(src));
- assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD ||
- src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
- src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY ||
- src->gtOper == GT_RET_EXPR || src->gtOper == GT_COMMA ||
+ assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
+ src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
+ src->gtOper == GT_COMMA ||
(src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
@@ -1139,18 +1162,18 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
- // which currently makes it non promotable.
+ // which currently makes it non promotable.
lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
}
- else // The call result is not a multireg return
+ else // The call result is not a multireg return
{
// We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
lcl->ChangeOper(GT_LCL_FLD);
fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
- }
+ }
lcl->gtType = src->gtType;
- dest = lcl;
+ dest = lcl;
#if defined(_TARGET_ARM_)
impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
@@ -1162,7 +1185,7 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
#endif
}
- else // we don't have a GT_ADDR of a GT_LCL_VAR
+ else // we don't have a GT_ADDR of a GT_LCL_VAR
{
// We change this to "(returnType)*destAddr = call"
dest = gtNewOperNode(GT_IND, returnType, destAddr);
@@ -1200,7 +1223,7 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
var_types returnType = (var_types)call->gtCall.gtReturnType;
// We won't need a return buffer
- src->gtType = genActualType(returnType);
+ src->gtType = genActualType(returnType);
call->gtType = src->gtType;
dest = gtNewOperNode(GT_IND, returnType, destAddr);
@@ -1215,31 +1238,32 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
else if (src->gtOper == GT_OBJ)
{
assert(src->gtObj.gtClass == structHnd);
- src = src->gtOp.gtOp1;
+ src = src->gtOp.gtOp1;
}
else if (src->gtOper == GT_MKREFANY)
{
- // Since we are assigning the result of a GT_MKREFANY,
+ // Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
GenTreePtr destAddrClone;
- destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment") );
+ destAddr =
+ impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
- GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
+ GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
- typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
- GenTreePtr typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL,
- gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
+ typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
+ GenTreePtr typeSlot =
+ gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
if (pAfterStmt)
{
- * pAfterStmt = fgInsertStmtAfter(block, * pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
- }
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
+ }
else
{
impAppendTree(asg, curLevel, impCurStmtOffs);
@@ -1254,11 +1278,11 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
- * pAfterStmt = fgInsertStmtAfter(block, * pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
- }
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
+ }
else
{
- impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
+ impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
}
// Evaluate the second thing using recursion.
@@ -1266,7 +1290,7 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
}
else if (src->gtOper == GT_ADDR)
{
- // In case of address already in src, use it to copy the struct.
+ // In case of address already in src, use it to copy the struct.
}
else
{
@@ -1284,33 +1308,33 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
willDeref - does the caller guarantee to dereference the pointer.
*/
-GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool willDeref)
+GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
- genTreeOps oper = structVal->gtOper;
+ genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->gtObj.gtClass == structHnd);
- return(structVal->gtObj.Addr());
+ return (structVal->gtObj.Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
{
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
- type = genActualType(lvaTable[tmpNum].TypeGet());
+ type = genActualType(lvaTable[tmpNum].TypeGet());
GenTreePtr temp = gtNewLclvNode(tmpNum, type);
- temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
+ temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
@@ -1318,8 +1342,8 @@ GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
GenTreePtr oldTreeLast = impTreeLast;
- structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
- structVal->gtType = TYP_BYREF;
+ structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
+ structVal->gtType = TYP_BYREF;
if (oldTreeLast != impTreeLast)
{
@@ -1331,10 +1355,10 @@ GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
structVal->gtOp.gtOp1 = gtNewNothingNode();
}
- return(structVal);
+ return (structVal);
}
- return(gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
+ return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
@@ -1355,32 +1379,30 @@ GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
//
// Assumptions:
// The caller must set gcLayout to nullptr OR ensure that it is large enough
-// (see ICorStaticInfo::getClassGClayout in corinfo.h).
+// (see ICorStaticInfo::getClassGClayout in corinfo.h).
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16.
-var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
- BYTE* gcLayout,
- unsigned* pNumGCVars,
- var_types* pSimdBaseType)
+var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
+ BYTE* gcLayout,
+ unsigned* pNumGCVars,
+ var_types* pSimdBaseType)
{
assert(structHnd != NO_CLASS_HANDLE);
- unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
- unsigned numGCVars = 0;
- var_types structType = TYP_STRUCT;
- var_types simdBaseType = TYP_UNKNOWN;
- bool definitelyHasGCPtrs = false;
+ unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
+ unsigned numGCVars = 0;
+ var_types structType = TYP_STRUCT;
+ var_types simdBaseType = TYP_UNKNOWN;
+ bool definitelyHasGCPtrs = false;
#ifdef FEATURE_SIMD
// We don't want to consider this as a possible SIMD type if it has GC pointers.
// (Saves querying about the SIMD assembly.)
BYTE gcBytes[maxPossibleSIMDStructBytes / TARGET_POINTER_SIZE];
- if ((gcLayout == nullptr) &&
- (originalSize >= minSIMDStructBytes()) &&
- (originalSize <= maxSIMDStructBytes()))
+ if ((gcLayout == nullptr) && (originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
{
gcLayout = gcBytes;
}
@@ -1388,14 +1410,12 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
if (gcLayout != nullptr)
{
- numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
+ numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
definitelyHasGCPtrs = (numGCVars != 0);
}
#ifdef FEATURE_SIMD
// Check to see if this is a SIMD type.
- if (featureSIMD &&
- (originalSize <= getSIMDVectorRegisterByteLength()) &&
- (originalSize >= TARGET_POINTER_SIZE) &&
+ if (featureSIMD && (originalSize <= getSIMDVectorRegisterByteLength()) && (originalSize >= TARGET_POINTER_SIZE) &&
!definitelyHasGCPtrs)
{
unsigned int sizeBytes;
@@ -1414,7 +1434,7 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
#endif
}
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
if (pNumGCVars != nullptr)
{
*pNumGCVars = numGCVars;
@@ -1426,10 +1446,10 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
// Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
// it is either an OBJ or a MKREFANY node.
//
-GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool forceNormalization /*=false*/)
+GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
@@ -1438,86 +1458,89 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
{
structType = impNormStructType(structHnd);
}
-
+
genTreeOps oper = structVal->OperGet();
switch (oper)
{
- // GT_RETURN and GT_MKREFANY don't capture the handle.
- case GT_RETURN:
- case GT_MKREFANY:
- break;
+ // GT_RETURN and GT_MKREFANY don't capture the handle.
+ case GT_RETURN:
+ case GT_MKREFANY:
+ break;
- case GT_CALL:
- structVal->gtCall.gtRetClsHnd = structHnd;
- structVal->gtType = structType;
- break;
+ case GT_CALL:
+ structVal->gtCall.gtRetClsHnd = structHnd;
+ structVal->gtType = structType;
+ break;
- case GT_RET_EXPR:
- structVal->gtRetExpr.gtRetClsHnd = structHnd;
- structVal->gtType = structType;
- break;
+ case GT_RET_EXPR:
+ structVal->gtRetExpr.gtRetClsHnd = structHnd;
+ structVal->gtType = structType;
+ break;
- case GT_ARGPLACE:
- structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
- structVal->gtType = structType;
- break;
+ case GT_ARGPLACE:
+ structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
+ structVal->gtType = structType;
+ break;
- case GT_INDEX:
- structVal->gtIndex.gtStructElemClass = structHnd;
- structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
- structVal->gtType = structType;
- break;
+ case GT_INDEX:
+ structVal->gtIndex.gtStructElemClass = structHnd;
+ structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
+ structVal->gtType = structType;
+ break;
- case GT_FIELD:
- structVal->gtType = structType;
- break;
+ case GT_FIELD:
+ structVal->gtType = structType;
+ break;
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- break;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ break;
- case GT_OBJ:
- case GT_IND:
- // These should already have the appropriate type.
- assert(structVal->gtType == structType);
- break;
+ case GT_OBJ:
+ case GT_IND:
+ // These should already have the appropriate type.
+ assert(structVal->gtType == structType);
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
- break;
+ case GT_SIMD:
+ assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
+ break;
#endif // FEATURE_SIMD
- case GT_COMMA:
+ case GT_COMMA:
{
// The second thing is the struct node.
GenTree* op2 = structVal->gtOp.gtOp2;
assert(op2->gtType == structType);
impNormStructVal(op2, structHnd, curLevel, forceNormalization);
- structType = op2->TypeGet();
+ structType = op2->TypeGet();
structVal->gtType = structType;
}
break;
- default:
- assert(!"Unexpected node in impNormStructVal()");
- break;
+ default:
+ assert(!"Unexpected node in impNormStructVal()");
+ break;
}
// Is it already normalized?
if (!forceNormalization && (structVal->gtOper == GT_MKREFANY || structVal->gtOper == GT_OBJ))
- return(structVal);
+ {
+ return (structVal);
+ }
// Normalize it by wraping it in an OBJ
- GenTreePtr structAddr = impGetStructAddr(structVal, structHnd, curLevel, !forceNormalization); // get the addr of struct
+ GenTreePtr structAddr =
+ impGetStructAddr(structVal, structHnd, curLevel, !forceNormalization); // get the addr of struct
GenTreePtr structObj = new (this, GT_OBJ) GenTreeObj(structType, structAddr, structHnd);
if (structAddr->gtOper == GT_ADDR)
{
- // structVal can start off as a GT_RET_EXPR that
- // gets changed into a GT_LCL_VAR by impGetStructAddr
+ // structVal can start off as a GT_RET_EXPR that
+ // gets changed into a GT_LCL_VAR by impGetStructAddr
// when it calls impAssignTempGen()
structVal = structAddr->gtOp.gtOp1;
}
@@ -1530,87 +1553,90 @@ GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
structObj->gtFlags &= ~GTF_GLOB_REF;
}
}
- else
+ else
{
// In general a OBJ is an IND and could raise an exception
structObj->gtFlags |= GTF_EXCEPT;
}
- return(structObj);
+ return (structObj);
}
-
-
/******************************************************************************/
-// Given a type token, generate code that will evaluate to the correct
+// Given a type token, generate code that will evaluate to the correct
// handle representation of that token (type handle, field handle, or method handle)
//
// For most cases, the handle is determined at compile-time, and the code
// generated is simply an embedded handle.
-//
-// Run-time lookup is required if the enclosing method is shared between instantiations
+//
+// Run-time lookup is required if the enclosing method is shared between instantiations
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
-GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- BOOL *pRuntimeLookup /* = NULL */,
- BOOL mustRestoreHandle /* = FALSE */,
- BOOL importParent /* = FALSE */)
+GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ BOOL* pRuntimeLookup /* = NULL */,
+ BOOL mustRestoreHandle /* = FALSE */,
+ BOOL importParent /* = FALSE */)
{
assert(!fgGlobalMorph);
CORINFO_GENERICHANDLE_RESULT embedInfo;
- info.compCompHnd->embedGenericHandle(pResolvedToken,
- importParent,
- &embedInfo);
+ info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
if (pRuntimeLookup)
+ {
*pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
+ }
if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
switch (embedInfo.handleType)
{
- case CORINFO_HANDLETYPE_CLASS:
- info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE) embedInfo.compileTimeHandle);
- break;
+ case CORINFO_HANDLETYPE_CLASS:
+ info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
+ break;
- case CORINFO_HANDLETYPE_METHOD:
- info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE) embedInfo.compileTimeHandle);
- break;
+ case CORINFO_HANDLETYPE_METHOD:
+ info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
+ break;
- case CORINFO_HANDLETYPE_FIELD:
- info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
- break;
+ case CORINFO_HANDLETYPE_FIELD:
+ info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
+ info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
+ break;
- default:
- break;
+ default:
+ break;
}
}
- return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle);
+ return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
+ embedInfo.compileTimeHandle);
}
-GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN *pResolvedToken,
- CORINFO_LOOKUP *pLookup,
- unsigned handleFlags,
- void *compileTimeHandle)
+GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ unsigned handleFlags,
+ void* compileTimeHandle)
{
- if (!pLookup->lookupKind.needsRuntimeLookup)
+ if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
- CORINFO_GENERIC_HANDLE handle = 0;
- void *pIndirection = 0;
+ CORINFO_GENERIC_HANDLE handle = nullptr;
+ void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
+ {
handle = pLookup->constLookup.handle;
+ }
else if (pLookup->constLookup.accessType == IAT_PVALUE)
+ {
pIndirection = pLookup->constLookup.addr;
- return gtNewIconEmbHndNode(handle, pIndirection, handleFlags,
- 0, 0, compileTimeHandle);
- }
+ }
+ return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
+ }
else if (compIsForInlining())
{
// Don't import runtime lookups when inlining
@@ -1622,34 +1648,33 @@ GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN *pResolvedT
{
// Need to use dictionary-based access which depends on the typeContext
// which is only available at runtime, not at compile-time.
-
+
return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
}
}
#ifdef FEATURE_READYTORUN_COMPILER
-GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP *pLookup,
- unsigned handleFlags,
- void *compileTimeHandle)
+GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
+ unsigned handleFlags,
+ void* compileTimeHandle)
{
- CORINFO_GENERIC_HANDLE handle = 0;
- void *pIndirection = 0;
+ CORINFO_GENERIC_HANDLE handle = 0;
+ void* pIndirection = 0;
assert(pLookup->accessType != IAT_PPVALUE);
if (pLookup->accessType == IAT_VALUE)
handle = pLookup->handle;
else if (pLookup->accessType == IAT_PVALUE)
pIndirection = pLookup->addr;
- return gtNewIconEmbHndNode(handle, pIndirection, handleFlags,
- 0, 0, compileTimeHandle);
+ return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, 0, compileTimeHandle);
}
-GenTreePtr Compiler::impReadyToRunHelperToTree(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CorInfoHelpFunc helper,
- var_types type,
- GenTreeArgList* args /* =NULL*/,
- CORINFO_LOOKUP_KIND * pGenericLookupKind /* =NULL. Only used with generics */)
+GenTreePtr Compiler::impReadyToRunHelperToTree(
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CorInfoHelpFunc helper,
+ var_types type,
+ GenTreeArgList* args /* =NULL*/,
+ CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
{
CORINFO_CONST_LOOKUP lookup;
#if COR_JIT_EE_VERSION > 460
@@ -1667,48 +1692,47 @@ GenTreePtr Compiler::impReadyToRunHelperToTree(
}
#endif
-GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN * pResolvedToken, CORINFO_CALL_INFO * pCallInfo)
+GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
- GenTreePtr op1 = NULL;
+ GenTreePtr op1 = nullptr;
switch (pCallInfo->kind)
{
- case CORINFO_CALL:
- op1 = new(this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
+ case CORINFO_CALL:
+ op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
- op1->gtFptrVal.gtLdftnResolvedToken = new(this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
- *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
- }
- else
- op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ if (opts.IsReadyToRun())
+ {
+ op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
+ op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
+ *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
+ }
+ else
+ op1->gtFptrVal.gtEntryPoint.addr = nullptr;
#endif
- break;
+ break;
- case CORINFO_CALL_CODE_POINTER:
- if (compIsForInlining())
- {
- // Don't import runtime lookups when inlining
- // Inlining has to be aborted in such a case
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
- return nullptr;
- }
+ case CORINFO_CALL_CODE_POINTER:
+ if (compIsForInlining())
+ {
+ // Don't import runtime lookups when inlining
+ // Inlining has to be aborted in such a case
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
+ return nullptr;
+ }
- op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
- break;
+ op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
+ break;
- default:
- noway_assert(!"unknown call kind");
- break;
+ default:
+ noway_assert(!"unknown call kind");
+ break;
}
return op1;
}
-
/*****************************************************************************/
/* Import a dictionary lookup to access a handle in code shared between
generic instantiations.
@@ -1717,9 +1741,9 @@ GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN * pResolvedToken, C
pLookup->token1 and pLookup->token2 specify the handle that is needed.
The cases are:
- 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
+ 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
instantiation-specific handle, and the tokens to lookup the handle.
- 2. pLookup->indirections != CORINFO_USEHELPER :
+ 2. pLookup->indirections != CORINFO_USEHELPER :
2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
to get the handle.
2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
@@ -1727,12 +1751,12 @@ GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN * pResolvedToken, C
to lookup the handle.
*/
-GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pResolvedToken,
- CORINFO_LOOKUP *pLookup,
- void* compileTimeHandle)
+GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ void* compileTimeHandle)
{
- CORINFO_RUNTIME_LOOKUP_KIND kind = pLookup->lookupKind.runtimeLookupKind;
- CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
+ CORINFO_RUNTIME_LOOKUP_KIND kind = pLookup->lookupKind.runtimeLookupKind;
+ CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// This method can only be called from the importer instance of the Compiler.
// In other word, it cannot be called by the instance of the Compiler for the inlinee.
@@ -1765,17 +1789,17 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pRe
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
- return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE,
- TYP_I_IMPL, gtNewArgList(ctxTree), &pLookup->lookupKind);
+ return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
+ gtNewArgList(ctxTree), &pLookup->lookupKind);
}
#endif
-
// It's available only via the run-time helper function
if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
{
- GenTreeArgList* helperArgs = gtNewArgList(ctxTree, gtNewIconEmbHndNode(
- pRuntimeLookup->signature, NULL, GTF_ICON_TOKEN_HDL, 0, NULL, compileTimeHandle));
+ GenTreeArgList* helperArgs =
+ gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
+ nullptr, compileTimeHandle));
return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
}
@@ -1785,22 +1809,26 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pRe
if (pRuntimeLookup->testForNull)
{
- slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("impRuntimeLookup slot") );
+ slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("impRuntimeLookup slot"));
}
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
- if (i != 0)
+ if (i != 0)
{
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
}
if (pRuntimeLookup->offsets[i] != 0)
- slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
+ {
+ slotPtrTree =
+ gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
+ }
}
-
+
// No null test required
if (!pRuntimeLookup->testForNull)
{
@@ -1819,18 +1847,20 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pRe
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
- GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("impRuntimeLookup test") );
- op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
+ GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("impRuntimeLookup test"));
+ op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
// Use a GT_AND to check for the lowest bit and indirect if it is set
GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
- GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
+ GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
relop->gtFlags |= GTF_RELOP_QMARK;
- op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("impRuntimeLookup indir") );
- op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
+ op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("impRuntimeLookup indir"));
+ op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
- GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
+ GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
@@ -1841,34 +1871,40 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pRe
assert(pRuntimeLookup->indirections != 0);
- impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
-
+ impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
+
// Extract the handle
GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handle->gtFlags |= GTF_IND_NONFAULTING;
- GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("impRuntimeLookup typehandle") );
+ GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("impRuntimeLookup typehandle"));
// Call to helper
- GenTreeArgList* helperArgs = gtNewArgList(ctxTree, gtNewIconEmbHndNode(
- pRuntimeLookup->signature, NULL, GTF_ICON_TOKEN_HDL, 0, NULL, compileTimeHandle));
+ GenTreeArgList* helperArgs =
+ gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
+ compileTimeHandle));
GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
-
- // Check for null and possibly call helper
+
+ // Check for null and possibly call helper
GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
relop->gtFlags |= GTF_RELOP_QMARK;
- GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
+ GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
gtNewNothingNode(), // do nothing if nonnull
helperCall);
-
+
GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
unsigned tmp;
if (handleCopy->IsLocal())
+ {
tmp = handleCopy->gtLclVarCommon.gtLclNum;
+ }
else
+ {
tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
+ }
impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
return gtNewLclvNode(tmp, TYP_I_IMPL);
@@ -1885,10 +1921,9 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN *pRe
struct RecursiveGuard
{
public:
-
RecursiveGuard()
{
- m_pAddress = 0;
+ m_pAddress = nullptr;
}
~RecursiveGuard()
@@ -1902,7 +1937,7 @@ public:
void Init(bool* pAddress, bool bInitialize)
{
assert(pAddress && *pAddress == false && "Recursive guard violation");
- m_pAddress = pAddress;
+ m_pAddress = pAddress;
if (bInitialize)
{
@@ -1914,23 +1949,24 @@ protected:
bool* m_pAddress;
};
-bool Compiler::impSpillStackEntry(unsigned level,
- unsigned tnum
-#ifdef DEBUG
- , bool bAssertOnRecursion
- , const char * reason
+bool Compiler::impSpillStackEntry(unsigned level,
+ unsigned tnum
+#ifdef DEBUG
+ ,
+ bool bAssertOnRecursion,
+ const char* reason
#endif
- )
+ )
{
-#ifdef DEBUG
+#ifdef DEBUG
RecursiveGuard guard;
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
assert(!fgGlobalMorph); // use impInlineSpillStackEntry() during inlining
-
- GenTreePtr tree = verCurrentState.esStack[level].val;
+
+ GenTreePtr tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
@@ -1939,12 +1975,16 @@ bool Compiler::impSpillStackEntry(unsigned level,
// Ignore bad temp requests (they will happen with bad code and will be
// catched when importing the destblock)
if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
+ {
return false;
- }
+ }
+ }
else
{
if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
+ {
return false;
+ }
}
if (tnum == BAD_VAR_NUM)
@@ -1956,7 +1996,7 @@ bool Compiler::impSpillStackEntry(unsigned level,
// if verification is needed and tnum's type is incompatible with
// type on that stack, we grab a new temp. This is safe since
// we will throw a verification exception in the dest block.
-
+
var_types valTyp = tree->TypeGet();
var_types dstTyp = lvaTable[tnum].TypeGet();
@@ -1965,14 +2005,14 @@ bool Compiler::impSpillStackEntry(unsigned level,
if ((genActualType(valTyp) != genActualType(dstTyp)) &&
!(
#ifndef _TARGET_64BIT_
- (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF ) ||
- (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
+ (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
#endif // !_TARGET_64BIT_
- (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp)))
- )
+ (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
{
if (verNeedsVerification())
+ {
return false;
+ }
}
}
@@ -1980,8 +2020,8 @@ bool Compiler::impSpillStackEntry(unsigned level,
impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
- var_types type = genActualType(lvaTable[tnum].TypeGet());
- GenTreePtr temp = gtNewLclvNode(tnum, type);
+ var_types type = genActualType(lvaTable[tnum].TypeGet());
+ GenTreePtr temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
@@ -1992,38 +2032,39 @@ bool Compiler::impSpillStackEntry(unsigned level,
* Ensure that the stack has only spilled values
*/
-void Compiler::impSpillStackEnsure(bool spillLeaves)
+void Compiler::impSpillStackEnsure(bool spillLeaves)
{
assert(!spillLeaves || opts.compDbgCode);
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTreePtr tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
+ {
continue;
+ }
// Temps introduced by the importer itself don't need to be spilled
- bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) &&
- (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
+ bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
- if (isTempLcl)
+ if (isTempLcl)
+ {
continue;
+ }
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
+ impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
}
}
-void Compiler::impSpillEvalStack()
+void Compiler::impSpillEvalStack()
{
assert(!fgGlobalMorph); // use impInlineSpillEvalStack() during inlining
-
+
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
- {
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
+ {
+ impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
}
}
@@ -2034,10 +2075,9 @@ void Compiler::impSpillEvalStack()
* On return the stack is guaranteed to be empty.
*/
-inline
-void Compiler::impEvalSideEffects()
+inline void Compiler::impEvalSideEffects()
{
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects") );
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
verCurrentState.esStackDepth = 0;
}
@@ -2048,10 +2088,7 @@ void Compiler::impEvalSideEffects()
* [0..chkLevel) is the portion of the stack which will be checked and spilled.
*/
-inline
-void Compiler::impSpillSideEffects(bool spillGlobEffects,
- unsigned chkLevel
- DEBUGARG(const char * reason) )
+inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
{
assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
@@ -2061,7 +2098,9 @@ void Compiler::impSpillSideEffects(bool spillGlobEffects,
impSpillSpecialSideEff();
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
+ {
chkLevel = verCurrentState.esStackDepth;
+ }
assert(chkLevel <= verCurrentState.esStackDepth);
@@ -2072,14 +2111,14 @@ void Compiler::impSpillSideEffects(bool spillGlobEffects,
GenTreePtr tree = verCurrentState.esStack[i].val;
GenTreePtr lclVarTree;
-
- if ((tree->gtFlags & spillFlags) != 0 ||
- (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
- !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
- gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or lvAddrTaken flag.
+
+ if ((tree->gtFlags & spillFlags) != 0 ||
+ (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
+ !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
+ gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
+ // lvAddrTaken flag.
{
- impSpillStackEntry(i, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG(reason));
+ impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
}
}
}
@@ -2090,13 +2129,14 @@ void Compiler::impSpillSideEffects(bool spillGlobEffects,
* those trees to temps and replace them on the stack with refs to their temps.
*/
-inline
-void Compiler::impSpillSpecialSideEff()
+inline void Compiler::impSpillSpecialSideEff()
{
// Only exception objects need to be carefully handled
- if (!compCurBB->bbCatchTyp)
- return;
+ if (!compCurBB->bbCatchTyp)
+ {
+ return;
+ }
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
@@ -2104,8 +2144,7 @@ void Compiler::impSpillSpecialSideEff()
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
+ impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
}
}
}
@@ -2115,11 +2154,11 @@ void Compiler::impSpillSpecialSideEff()
* Spill all stack references to value classes (TYP_STRUCT nodes)
*/
-void Compiler::impSpillValueClasses()
+void Compiler::impSpillValueClasses()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTreePtr tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
@@ -2127,8 +2166,7 @@ void Compiler::impSpillValueClasses()
// value class on the stack. Need to spill that
// stack entry.
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
+ impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
}
}
}
@@ -2138,11 +2176,10 @@ void Compiler::impSpillValueClasses()
* Callback that checks if a tree node is TYP_STRUCT
*/
-Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr * pTree,
- fgWalkData *data)
+Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
-
+
if ((*pTree)->gtType == TYP_STRUCT)
{
// Abort the walk and indicate that we found a value class
@@ -2160,10 +2197,10 @@ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr * pTree,
* their temps.
*/
-void Compiler::impSpillLclRefs(ssize_t lclNum)
+void Compiler::impSpillLclRefs(ssize_t lclNum)
{
assert(!fgGlobalMorph); // use impInlineSpillLclRefs() during inlining
-
+
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
@@ -2171,23 +2208,21 @@ void Compiler::impSpillLclRefs(ssize_t lclNum)
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTreePtr tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
live on entry to the handler.
Just spill 'em all without considering the liveness */
- bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) &&
- (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
+ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
/* Skip the tree if it doesn't have an affected reference,
unless xcptnCaught */
- if (xcptnCaught || gtHasRef(tree, lclNum, false))
+ if (xcptnCaught || gtHasRef(tree, lclNum, false))
{
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
+ impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
}
}
}
@@ -2195,21 +2230,20 @@ void Compiler::impSpillLclRefs(ssize_t lclNum)
/*****************************************************************************
*
* Push catch arg onto the stack.
- * If there are jumps to the beginning of the handler, insert basic block
+ * If there are jumps to the beginning of the handler, insert basic block
* and spill catch arg to a temp. Update the handler block if necessary.
*
* Returns the basic block of the actual handler.
*/
-BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndBlk,
- CORINFO_CLASS_HANDLE clsHnd)
+BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
{
// Do not inject the basic block twice on reimport. This should be
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
- if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
- == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
+ if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
+ (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
{
GenTreePtr tree = hndBlk->bbTreeList;
@@ -2218,8 +2252,7 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
tree = tree->gtStmt.gtStmtExpr;
assert(tree != nullptr);
- if ((tree->gtOper == GT_ASG) &&
- (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
+ if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
(tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
{
tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
@@ -2234,7 +2267,7 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
// someone prepended something to our injected block, but that's unlikely.
}
- /* Push the exception address value on the stack */
+ /* Push the exception address value on the stack */
GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
@@ -2242,11 +2275,12 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
arg->gtFlags |= GTF_ORDER_SIDEEFF;
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
- if (hndBlk->bbRefs > 1
- || compStressCompile(STRESS_CATCH_ARG, 5) )
+ if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
{
if (hndBlk->bbRefs == 1)
+ {
hndBlk->bbRefs++;
+ }
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
@@ -2258,9 +2292,9 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
hndBlk->bbRefs++;
/* Spill into a temp */
- unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
+ unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
lvaTable[tempNum].lvType = TYP_REF;
- arg = gtNewTempAssign(tempNum, arg);
+ arg = gtNewTempAssign(tempNum, arg);
hndBlk->bbStkTempsIn = tempNum;
@@ -2269,7 +2303,7 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
- arg = gtNewStmt(arg, impCurStmtOffs);
+ arg = gtNewStmt(arg, impCurStmtOffs);
}
fgInsertStmtAtEnd(newBlk, arg);
@@ -2290,16 +2324,15 @@ BasicBlock * Compiler::impPushCatchArgOnStack(BasicBlock * hndB
* If the tree has side-effects, it will be spilled to a temp.
*/
-GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
- GenTreePtr * pClone,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr * pAfterStmt
- DEBUGARG(const char * reason) )
+GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
+ GenTreePtr* pClone,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
- GenTreePtr clone = gtClone(tree, true);
+ GenTreePtr clone = gtClone(tree, true);
if (clone)
{
@@ -2310,7 +2343,7 @@ GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
/* Store the operand in a temp and return the temp */
- unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
+ unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
// impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
// return a struct type. It also may modify the struct type to a more
@@ -2318,10 +2351,10 @@ GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
// the lclVar AFTER calling impAssignTempGen().
impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
- var_types type = genActualType(lvaTable[temp].TypeGet());
+ var_types type = genActualType(lvaTable[temp].TypeGet());
*pClone = gtNewLclvNode(temp, type);
- return gtNewLclvNode(temp, type);
+ return gtNewLclvNode(temp, type);
}
/*****************************************************************************
@@ -2329,28 +2362,26 @@ GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
* generate now.
*/
-inline
-void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
+inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
{
if (compIsForInlining())
{
GenTreePtr callStmt = impInlineInfo->iciStmt;
assert(callStmt->gtOper == GT_STMT);
- impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
+ impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
}
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
- impCurStmtOffs = offs | stkBit;
+ impCurStmtOffs = offs | stkBit;
}
}
/*****************************************************************************
* Returns current IL offset with stack-empty and call-instruction info incorporated
*/
-inline
-IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
+inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
{
if (compIsForInlining())
{
@@ -2359,7 +2390,7 @@ IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction
else
{
assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
- IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
+ IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
return offs | stkBit | callInstructionBit;
}
@@ -2381,9 +2412,9 @@ IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction
#ifdef DEBUG
-void Compiler::impNoteLastILoffs()
+void Compiler::impNoteLastILoffs()
{
- if (impLastILoffsStmt == NULL)
+ if (impLastILoffsStmt == nullptr)
{
// We should have added a statement for the current basic block
// Is this assert correct ?
@@ -2391,28 +2422,29 @@ void Compiler::impNoteLastILoffs()
assert(impTreeLast);
assert(impTreeLast->gtOper == GT_STMT);
- impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining()?BAD_IL_OFFSET:impCurOpcOffs;
+ impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
}
else
{
- impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining()?BAD_IL_OFFSET:impCurOpcOffs;
- impLastILoffsStmt = NULL;
+ impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
+ impLastILoffsStmt = nullptr;
}
}
#endif // DEBUG
-
/*****************************************************************************
* We don't create any GenTree (excluding spills) for a branch.
* For debugging info, we need a placeholder so that we can note
* the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
*/
-void Compiler::impNoteBranchOffs()
+void Compiler::impNoteBranchOffs()
{
if (opts.compDbgCode)
+ {
impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ }
}
/*****************************************************************************
@@ -2422,11 +2454,11 @@ void Compiler::impNoteBranchOffs()
* Returns the next stmt boundary (after the start of the block)
*/
-unsigned Compiler::impInitBlockLineInfo()
+unsigned Compiler::impInitBlockLineInfo()
{
/* Assume the block does not correspond with any IL offset. This prevents
us from reporting extra offsets. Extra mappings can cause confusing
- stepping, especially if the extra mapping is a jump-target, and the
+ stepping, especially if the extra mapping is a jump-target, and the
debugger does not ignore extra mappings, but instead rewinds to the
nearest known offset */
@@ -2434,14 +2466,12 @@ unsigned Compiler::impInitBlockLineInfo()
if (compIsForInlining())
{
- return ~0;
+ return ~0;
}
-
- IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
+ IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
- if ((verCurrentState.esStackDepth == 0) &&
- (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
+ if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
@@ -2459,22 +2489,25 @@ unsigned Compiler::impInitBlockLineInfo()
impCurStmtOffsSet(blockOffs);
}
- if (!info.compStmtOffsetsCount)
+ if (!info.compStmtOffsetsCount)
+ {
return ~0;
+ }
/* Find the lowest explicit stmt boundary within the block */
/* Start looking at an entry that is based on our instr offset */
- unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
+ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
- if (index >= info.compStmtOffsetsCount)
- index = info.compStmtOffsetsCount - 1;
+ if (index >= info.compStmtOffsetsCount)
+ {
+ index = info.compStmtOffsetsCount - 1;
+ }
/* If we've guessed too far, back up */
- while (index > 0 &&
- info.compStmtOffsets[index - 1] >= blockOffs)
+ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
{
index--;
}
@@ -2486,7 +2519,9 @@ unsigned Compiler::impInitBlockLineInfo()
index++;
if (index == info.compStmtOffsetsCount)
+ {
return info.compStmtOffsetsCount;
+ }
}
assert(index < info.compStmtOffsetsCount);
@@ -2505,11 +2540,9 @@ unsigned Compiler::impInitBlockLineInfo()
return index;
}
-
/*****************************************************************************/
-static inline
-bool impOpcodeIsCallOpcode(OPCODE opcode)
+static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
{
switch (opcode)
{
@@ -2526,8 +2559,7 @@ bool impOpcodeIsCallOpcode(OPCODE opcode)
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
-static inline
-bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
+static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
{
switch (opcode)
{
@@ -2548,42 +2580,42 @@ bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
/*****************************************************************************/
-// One might think it is worth caching these values, but results indicate
+// One might think it is worth caching these values, but results indicate
// that it isn't.
// In addition, caching them causes SuperPMI to be unable to completely
// encapsulate an individual method context.
-CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
+CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
{
CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
- assert(refAnyClass != (CORINFO_CLASS_HANDLE) 0);
+ assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
return refAnyClass;
}
-CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
+CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
{
CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
- assert(typeHandleClass != (CORINFO_CLASS_HANDLE) 0);
+ assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
return typeHandleClass;
}
-CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
+CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
{
CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
- assert(argIteratorClass != (CORINFO_CLASS_HANDLE) 0);
+ assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
return argIteratorClass;
}
-CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
+CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
{
CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
- assert(stringClass != (CORINFO_CLASS_HANDLE) 0);
+ assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
return stringClass;
}
-CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
+CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
{
CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
- assert(objectClass != (CORINFO_CLASS_HANDLE) 0);
+ assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
return objectClass;
}
@@ -2594,14 +2626,17 @@ CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
*/
/* static */
-void Compiler::impBashVarAddrsToI(GenTreePtr tree1,
- GenTreePtr tree2)
+void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
{
- if ( tree1->IsVarAddr())
+ if (tree1->IsVarAddr())
+ {
tree1->gtType = TYP_I_IMPL;
+ }
if (tree2 && tree2->IsVarAddr())
+ {
tree2->gtType = TYP_I_IMPL;
+ }
}
/*****************************************************************************
@@ -2612,74 +2647,75 @@ void Compiler::impBashVarAddrsToI(GenTreePtr tree1,
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
-GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree,
- var_types dstTyp)
+GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
- if (wantedType != currType)
- {
- // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
- if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
- {
- if (!varTypeIsI(tree->gtType) ||
- ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
- {
- tree->gtType = TYP_I_IMPL;
- }
- }
+ if (wantedType != currType)
+ {
+ // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
+ if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
+ {
+ if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
+ {
+ tree->gtType = TYP_I_IMPL;
+ }
+ }
#ifdef _TARGET_64BIT_
- else if (varTypeIsI(wantedType) && (currType == TYP_INT))
- {
- // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
- tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
- }
- else if ((wantedType == TYP_INT) && varTypeIsI(currType))
- {
+ else if (varTypeIsI(wantedType) && (currType == TYP_INT))
+ {
+ // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
+ tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ }
+ else if ((wantedType == TYP_INT) && varTypeIsI(currType))
+ {
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
- tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
- }
+ tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
+ }
#endif // _TARGET_64BIT_
- }
+ }
return tree;
}
/*****************************************************************************
- * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
+ * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
* but we want to make that an explicit cast in our trees, so any implicit casts
* that exist in the IL are turned into explicit casts here.
*/
-GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree,
- var_types dstTyp)
+GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
{
#ifndef LEGACY_BACKEND
- if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
- {
- tree = gtNewCastNode(dstTyp, tree, dstTyp);
- }
+ if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
+ {
+ tree = gtNewCastNode(dstTyp, tree, dstTyp);
+ }
#endif // !LEGACY_BACKEND
return tree;
}
/*****************************************************************************/
-BOOL Compiler::impLocAllocOnStack()
+BOOL Compiler::impLocAllocOnStack()
{
if (!compLocallocUsed)
- return(FALSE);
+ {
+ return (FALSE);
+ }
- // Returns true if a GT_LCLHEAP node is encountered in any of the trees
+ // Returns true if a GT_LCLHEAP node is encountered in any of the trees
// that have been pushed on the importer evaluatuion stack.
//
- for (unsigned i=0; i < verCurrentState.esStackDepth; i++)
+ for (unsigned i = 0; i < verCurrentState.esStackDepth; i++)
{
if (fgWalkTreePre(&verCurrentState.esStack[i].val, Compiler::fgChkLocAllocCB) == WALK_ABORT)
- return(TRUE);
+ {
+ return (TRUE);
+ }
}
- return(FALSE);
+ return (FALSE);
}
//------------------------------------------------------------------------
@@ -2701,10 +2737,10 @@ BOOL Compiler::impLocAllocOnStack()
// ldtoken <field handle>
// call InitializeArray
// The lower bounds need not be constant except when the array rank is 1.
-// The function recognizes all kinds of arrays thus enabling a small runtime
+// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
-GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
+GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
@@ -2712,55 +2748,52 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
GenTreePtr arrayLocalNode = impStackTop(1).val;
//
- // Verify that the field token is known and valid. Note that It's also
+ // Verify that the field token is known and valid. Note that It's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
- // see an example of this in bvt\DynIL\initarray2.exe (in Main).
+ // see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
- if ( fieldTokenNode->gtOper != GT_CALL ||
- (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
- (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
+ if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
+ (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
- return NULL;
+ return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
- if (fieldTokenNode->gtOper == GT_IND)
+ if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
}
- // Check for constant
+ // Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
- return NULL;
+ return nullptr;
}
- CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE) fieldTokenNode->gtIntCon.gtCompileTimeHandle;
- if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) ||
- (fieldToken == 0))
+ CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
+ if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
- return NULL;
+ return nullptr;
}
//
// We need to get the number of elements in the array and the size of each element.
// We verify that the newarr statement is exactly what we expect it to be.
// If it's not then we just return NULL and we don't optimize this call
- //
-
+ //
//
// It is possible the we don't have any statements in the block yet
- //
+ //
if (impTreeLast->gtOper != GT_STMT)
{
assert(impTreeLast->gtOper == GT_BEG_STMTS);
- return NULL;
+ return nullptr;
}
//
@@ -2768,12 +2801,11 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
// that the target of the assignment is the array passed to InitializeArray.
//
GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
- if ((arrayAssignment->gtOper != GT_ASG) ||
- (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
- (arrayLocalNode->gtOper != GT_LCL_VAR) ||
+ if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
+ (arrayLocalNode->gtOper != GT_LCL_VAR) ||
(arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
{
- return NULL;
+ return nullptr;
}
//
@@ -2781,10 +2813,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
//
GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
- if ((newArrayCall->gtOper != GT_CALL) ||
- (newArrayCall->gtCall.gtCallType != CT_HELPER))
+ if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
{
- return NULL;
+ return nullptr;
}
//
@@ -2794,30 +2825,34 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
bool isMDArray = false;
if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
- newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
- newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
+ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
+ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
#ifdef FEATURE_READYTORUN_COMPILER
&& newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
#endif
- )
+ )
{
#if COR_JIT_EE_VERSION > 460
if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
+ {
return nullptr;
+ }
isMDArray = true;
#endif
}
- CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE) newArrayCall->gtCall.compileTimeHelperArgumentHandle;
+ CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
//
// Make sure we found a compile time handle to the array
//
if (!arrayClsHnd)
+ {
return nullptr;
+ }
unsigned rank = 0;
S_UINT32 numElements;
@@ -2827,7 +2862,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
rank = info.compCompHnd->getArrayRank(arrayClsHnd);
if (rank == 0)
+ {
return nullptr;
+ }
GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
assert(tokenArg != nullptr);
@@ -2842,15 +2879,14 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
- if ((!numArgsArg->Current()->IsCnsIntOrI()) ||
- (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
+ if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
(numArgsArg->Current()->AsIntCon()->IconValue() > 64))
{
return nullptr;
}
unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
- bool lowerBoundsSpecified;
+ bool lowerBoundsSpecified;
if (numArgs == rank * 2)
{
@@ -2867,7 +2903,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
//
if (rank == 1)
+ {
isMDArray = false;
+ }
}
else
{
@@ -2885,30 +2923,26 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
{
static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
- return (tree->OperGet() == GT_ASG) &&
- IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
+ return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
- return (tree->OperGet() == GT_IND) &&
- (tree->gtGetOp1()->OperGet() == GT_ADD) &&
+ return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
(tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
{
- return (tree->OperGet() == GT_ADDR) &&
- (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
+ return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
(tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
}
static bool IsComma(GenTree* tree)
{
- return (tree != nullptr) &&
- (tree->OperGet() == GT_COMMA);
+ return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
}
};
@@ -2933,7 +2967,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
if (lowerBoundNode->IsIntegralConst(0))
+ {
isMDArray = false;
+ }
}
comma = comma->gtGetOp2();
@@ -2945,7 +2981,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
if (!lengthNode->IsCnsIntOrI())
+ {
return nullptr;
+ }
numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
argIndex++;
@@ -2954,7 +2992,9 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
if (argIndex != numArgs)
+ {
return nullptr;
+ }
}
else
{
@@ -2984,19 +3024,19 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
- return NULL;
+ return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
-
+
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
- return NULL;
+ return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
- var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
+ var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
@@ -3012,21 +3052,24 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
- return NULL;
+ {
+ return nullptr;
+ }
- if ((size.Value() == 0) ||
- (varTypeIsGC(elementType)))
+ if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
assert(verNeedsVerification());
- return NULL;
+ return nullptr;
}
- void *initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
- if (!initData)
- return NULL;
+ void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
+ if (!initData)
+ {
+ return nullptr;
+ }
//
- // At this point we are ready to commit to implementing the InitializeArray
+ // At this point we are ready to commit to implementing the InitializeArray
// instrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
@@ -3040,44 +3083,37 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO * sig)
{
unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
- dst = gtNewOperNode(GT_ADD,
- TYP_BYREF,
- arrayLocalNode,
- gtNewIconNode(dataOffset, TYP_I_IMPL));
+ dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
}
else
{
- dst = gtNewOperNode(GT_ADDR,
- TYP_BYREF,
- gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
+ dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
}
-
+
return gtNewBlkOpNode(GT_COPYBLK,
- dst, // dst
- gtNewIconHandleNode((size_t) initData, GTF_ICON_STATIC_HDL), // src
- gtNewIconNode(size.Value()), // size
+ dst, // dst
+ gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL), // src
+ gtNewIconNode(size.Value()), // size
false);
}
-
-
/*****************************************************************************/
// Returns the GenTree that should be used to do the intrinsic instead of the call.
// Returns NULL if an intrinsic cannot be used
-GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_METHOD_HANDLE method,
- CORINFO_SIG_INFO * sig,
- int memberRef,
- bool readonlyCall,
- bool tailCall,
- CorInfoIntrinsics * pIntrinsicID)
+GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_SIG_INFO* sig,
+ int memberRef,
+ bool readonlyCall,
+ bool tailCall,
+ CorInfoIntrinsics* pIntrinsicID)
{
bool mustExpand = false;
#if COR_JIT_EE_VERSION > 460
CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
#else
- CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
+ CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
#endif
*pIntrinsicID = intrinsicID;
@@ -3090,14 +3126,14 @@ GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
-#ifdef _TARGET_64BIT_
+#ifdef _TARGET_64BIT_
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
{
// must be done regardless of DbgCode and MinOpts
return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
}
#else
- assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
+ assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
#endif
GenTreePtr retNode = nullptr;
@@ -3115,13 +3151,13 @@ GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
// seem to work properly for Infinity values, we don't do
// CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
- var_types callType = JITtype2varType(sig->retType);
+ var_types callType = JITtype2varType(sig->retType);
/* First do the intrinsics which are always smaller than a call */
switch (intrinsicID)
{
- GenTreePtr op1, op2;
+ GenTreePtr op1, op2;
case CORINFO_INTRINSIC_Sin:
case CORINFO_INTRINSIC_Sqrt:
@@ -3142,153 +3178,165 @@ GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
case CORINFO_INTRINSIC_Ceiling:
case CORINFO_INTRINSIC_Floor:
- // These are math intrinsics
+ // These are math intrinsics
- assert(callType != TYP_STRUCT);
+ assert(callType != TYP_STRUCT);
- op1 = nullptr;
+ op1 = nullptr;
#ifdef LEGACY_BACKEND
- if (IsTargetIntrinsic(intrinsicID))
+ if (IsTargetIntrinsic(intrinsicID))
#else
- // Intrinsics that are not implemented directly by target instructions will
- // be re-materialized as users calls in rationalizer. For prefixed tail calls,
- // don't do this optimization, because
- // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
- // b) It will be non-trivial task or too late to re-materialize a surviving
- // tail prefixed GT_INTRINSIC as tail call in rationalizer.
- if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
+ // Intrinsics that are not implemented directly by target instructions will
+ // be re-materialized as users calls in rationalizer. For prefixed tail calls,
+ // don't do this optimization, because
+ // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
+ // b) It will be non-trivial task or too late to re-materialize a surviving
+ // tail prefixed GT_INTRINSIC as tail call in rationalizer.
+ if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
#endif
- {
- switch (sig->numArgs)
{
- case 1:
- op1 = impPopStack().val;
+ switch (sig->numArgs)
+ {
+ case 1:
+ op1 = impPopStack().val;
-#if FEATURE_X87_DOUBLES
+#if FEATURE_X87_DOUBLES
- // X87 stack doesn't differentiate between float/double
- // so it doesn't need a cast, but everybody else does
- // Just double check it is at least a FP type
- noway_assert(varTypeIsFloating(op1));
+ // X87 stack doesn't differentiate between float/double
+ // so it doesn't need a cast, but everybody else does
+ // Just double check it is at least a FP type
+ noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
- if (op1->TypeGet() != callType)
- op1 = gtNewCastNode(callType, op1, callType);
+ if (op1->TypeGet() != callType)
+ {
+ op1 = gtNewCastNode(callType, op1, callType);
+ }
#endif // FEATURE_X87_DOUBLES
- op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
- break;
+ op1 = new (this, GT_INTRINSIC)
+ GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
+ break;
- case 2:
- op2 = impPopStack().val;
- op1 = impPopStack().val;
+ case 2:
+ op2 = impPopStack().val;
+ op1 = impPopStack().val;
+
+#if FEATURE_X87_DOUBLES
-#if FEATURE_X87_DOUBLES
+ // X87 stack doesn't differentiate between float/double
+ // so it doesn't need a cast, but everybody else does
+ // Just double check it is at least a FP type
+ noway_assert(varTypeIsFloating(op2));
+ noway_assert(varTypeIsFloating(op1));
- // X87 stack doesn't differentiate between float/double
- // so it doesn't need a cast, but everybody else does
- // Just double check it is at least a FP type
- noway_assert(varTypeIsFloating(op2));
- noway_assert(varTypeIsFloating(op1));
+#else // FEATURE_X87_DOUBLES
-#else // FEATURE_X87_DOUBLES
+ if (op2->TypeGet() != callType)
+ {
+ op2 = gtNewCastNode(callType, op2, callType);
+ }
+ if (op1->TypeGet() != callType)
+ {
+ op1 = gtNewCastNode(callType, op1, callType);
+ }
- if (op2->TypeGet() != callType)
- op2 = gtNewCastNode(callType, op2, callType);
- if (op1->TypeGet() != callType)
- op1 = gtNewCastNode(callType, op1, callType);
+#endif // FEATURE_X87_DOUBLES
-#endif // FEATURE_X87_DOUBLES
+ op1 = new (this, GT_INTRINSIC)
+ GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
+ break;
- op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
- break;
+ default:
+ NO_WAY("Unsupported number of args for Math Instrinsic");
+ }
- default:
- NO_WAY("Unsupported number of args for Math Instrinsic");
- }
-
#ifndef LEGACY_BACKEND
- if (IsIntrinsicImplementedByUserCall(intrinsicID))
- {
- op1->gtFlags |= GTF_CALL;
- }
+ if (IsIntrinsicImplementedByUserCall(intrinsicID))
+ {
+ op1->gtFlags |= GTF_CALL;
+ }
#endif
- }
-
- retNode = op1;
- break;
+ }
+ retNode = op1;
+ break;
#ifdef _TARGET_XARCH_
// TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
- case CORINFO_INTRINSIC_InterlockedAdd32:
- interlockedOperator = GT_LOCKADD; goto InterlockedBinOpCommon;
- case CORINFO_INTRINSIC_InterlockedXAdd32:
- interlockedOperator = GT_XADD; goto InterlockedBinOpCommon;
- case CORINFO_INTRINSIC_InterlockedXchg32:
- interlockedOperator = GT_XCHG; goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedAdd32:
+ interlockedOperator = GT_LOCKADD;
+ goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedXAdd32:
+ interlockedOperator = GT_XADD;
+ goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedXchg32:
+ interlockedOperator = GT_XCHG;
+ goto InterlockedBinOpCommon;
#ifdef _TARGET_AMD64_
- case CORINFO_INTRINSIC_InterlockedAdd64:
- interlockedOperator = GT_LOCKADD; goto InterlockedBinOpCommon;
- case CORINFO_INTRINSIC_InterlockedXAdd64:
- interlockedOperator = GT_XADD; goto InterlockedBinOpCommon;
- case CORINFO_INTRINSIC_InterlockedXchg64:
- interlockedOperator = GT_XCHG; goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedAdd64:
+ interlockedOperator = GT_LOCKADD;
+ goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedXAdd64:
+ interlockedOperator = GT_XADD;
+ goto InterlockedBinOpCommon;
+ case CORINFO_INTRINSIC_InterlockedXchg64:
+ interlockedOperator = GT_XCHG;
+ goto InterlockedBinOpCommon;
#endif // _TARGET_AMD64_
-InterlockedBinOpCommon:
- assert(callType != TYP_STRUCT);
- assert(sig->numArgs == 2);
+ InterlockedBinOpCommon:
+ assert(callType != TYP_STRUCT);
+ assert(sig->numArgs == 2);
- op2 = impPopStack().val;
- op1 = impPopStack().val;
+ op2 = impPopStack().val;
+ op1 = impPopStack().val;
- // This creates:
- // val
- // XAdd
- // addr
- // field (for example)
- //
- // In the case where the first argument is the address of a local, we might
- // want to make this *not* make the var address-taken -- but atomic instructions
- // on a local are probably pretty useless anyway, so we probably don't care.
+ // This creates:
+ // val
+ // XAdd
+ // addr
+ // field (for example)
+ //
+ // In the case where the first argument is the address of a local, we might
+ // want to make this *not* make the var address-taken -- but atomic instructions
+ // on a local are probably pretty useless anyway, so we probably don't care.
- op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
- op1->gtFlags |= GTF_GLOB_EFFECT;
- retNode = op1;
- break;
+ op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
+ op1->gtFlags |= GTF_GLOB_EFFECT;
+ retNode = op1;
+ break;
#endif // _TARGET_XARCH_
- case CORINFO_INTRINSIC_MemoryBarrier:
+ case CORINFO_INTRINSIC_MemoryBarrier:
- assert(sig->numArgs == 0);
+ assert(sig->numArgs == 0);
- op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
- op1->gtFlags |= GTF_GLOB_EFFECT;
- retNode = op1;
- break;
+ op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
+ op1->gtFlags |= GTF_GLOB_EFFECT;
+ retNode = op1;
+ break;
#ifdef _TARGET_XARCH_
// TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
- case CORINFO_INTRINSIC_InterlockedCmpXchg32:
+ case CORINFO_INTRINSIC_InterlockedCmpXchg32:
#ifdef _TARGET_AMD64_
- case CORINFO_INTRINSIC_InterlockedCmpXchg64:
+ case CORINFO_INTRINSIC_InterlockedCmpXchg64:
#endif
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
GenTreePtr op3;
- op3 = impPopStack().val; //comparand
- op2 = impPopStack().val; //value
- op1 = impPopStack().val; //location
+ op3 = impPopStack().val; // comparand
+ op2 = impPopStack().val; // value
+ op1 = impPopStack().val; // location
- GenTreePtr node = new(this, GT_CMPXCHG)
- GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
+ GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
@@ -3296,97 +3344,98 @@ InterlockedBinOpCommon:
}
#endif
- case CORINFO_INTRINSIC_StringLength:
- op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
- {
- GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen)
- );
- op1 = arrLen;
- }
- else
- {
- /* Create the expression "*(str_addr + stringLengthOffset)" */
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
- op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
- }
- retNode = op1;
- break;
-
- case CORINFO_INTRINSIC_StringGetChar:
- op2 = impPopStack().val;
- op1 = impPopStack().val;
- op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
- op1->gtFlags |= GTF_INX_STRING_LAYOUT;
- retNode = op1;
- break;
-
- case CORINFO_INTRINSIC_InitializeArray:
- retNode = impInitializeArrayIntrinsic(sig);
- break;
-
- case CORINFO_INTRINSIC_Array_Address:
- case CORINFO_INTRINSIC_Array_Get:
- case CORINFO_INTRINSIC_Array_Set:
- retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
- break;
+ case CORINFO_INTRINSIC_StringLength:
+ op1 = impPopStack().val;
+ if (!opts.MinOpts() && !opts.compDbgCode)
+ {
+ GenTreeArrLen* arrLen =
+ new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
+ op1 = arrLen;
+ }
+ else
+ {
+ /* Create the expression "*(str_addr + stringLengthOffset)" */
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
+ op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
+ }
+ retNode = op1;
+ break;
- case CORINFO_INTRINSIC_GetTypeFromHandle:
- op1 = impStackTop(0).val;
- if ( op1->gtOper == GT_CALL &&
- (op1->gtCall.gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(op1) )
- {
+ case CORINFO_INTRINSIC_StringGetChar:
+ op2 = impPopStack().val;
op1 = impPopStack().val;
- // Change call to return RuntimeType directly.
- op1->gtType = TYP_REF;
+ op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
+ op1->gtFlags |= GTF_INX_STRING_LAYOUT;
retNode = op1;
- }
- // Call the regular function.
- break;
+ break;
- case CORINFO_INTRINSIC_RTH_GetValueInternal:
- op1 = impStackTop(0).val;
- if ( op1->gtOper == GT_CALL &&
- (op1->gtCall.gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(op1) )
- {
- // Old tree
- // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
- //
- // New tree
- // TreeToGetNativeTypeHandle
+ case CORINFO_INTRINSIC_InitializeArray:
+ retNode = impInitializeArrayIntrinsic(sig);
+ break;
- // Remove call to helper and return the native TypeHandle pointer that was the parameter
- // to that helper.
+ case CORINFO_INTRINSIC_Array_Address:
+ case CORINFO_INTRINSIC_Array_Get:
+ case CORINFO_INTRINSIC_Array_Set:
+ retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
+ break;
- op1 = impPopStack().val;
+ case CORINFO_INTRINSIC_GetTypeFromHandle:
+ op1 = impStackTop(0).val;
+ if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
+ gtIsTypeHandleToRuntimeTypeHelper(op1))
+ {
+ op1 = impPopStack().val;
+ // Change call to return RuntimeType directly.
+ op1->gtType = TYP_REF;
+ retNode = op1;
+ }
+ // Call the regular function.
+ break;
- // Get native TypeHandle argument to old helper
- op1 = op1->gtCall.gtCallArgs;
- assert(op1->IsList());
- assert(op1->gtOp.gtOp2 == nullptr);
- op1 = op1->gtOp.gtOp1;
- retNode = op1;
- }
- // Call the regular function.
- break;
+ case CORINFO_INTRINSIC_RTH_GetValueInternal:
+ op1 = impStackTop(0).val;
+ if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
+ gtIsTypeHandleToRuntimeTypeHelper(op1))
+ {
+ // Old tree
+ // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
+ //
+ // New tree
+ // TreeToGetNativeTypeHandle
+
+ // Remove call to helper and return the native TypeHandle pointer that was the parameter
+ // to that helper.
+
+ op1 = impPopStack().val;
+
+ // Get native TypeHandle argument to old helper
+ op1 = op1->gtCall.gtCallArgs;
+ assert(op1->IsList());
+ assert(op1->gtOp.gtOp2 == nullptr);
+ op1 = op1->gtOp.gtOp1;
+ retNode = op1;
+ }
+ // Call the regular function.
+ break;
#ifndef LEGACY_BACKEND
- case CORINFO_INTRINSIC_Object_GetType:
+ case CORINFO_INTRINSIC_Object_GetType:
- op1 = impPopStack().val;
- op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
+ op1 = impPopStack().val;
+ op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
- // Set the CALL flag to indicate that the operator is implemented by a call.
- // Set also the EXCEPTION flag because the native implementation of
- // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
- op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
- retNode = op1;
- break;
+ // Set the CALL flag to indicate that the operator is implemented by a call.
+ // Set also the EXCEPTION flag because the native implementation of
+ // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
+ op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
+ retNode = op1;
+ break;
#endif
- default:
- /* Unknown intrinsic */
- break;
+ default:
+ /* Unknown intrinsic */
+ break;
}
if (mustExpand)
@@ -3402,18 +3451,17 @@ InterlockedBinOpCommon:
/*****************************************************************************/
-GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_SIG_INFO * sig,
- int memberRef,
- bool readonlyCall,
- CorInfoIntrinsics intrinsicID)
+GenTreePtr Compiler::impArrayAccessIntrinsic(
+ CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
{
/* If we are generating SMALL_CODE, we don't want to use intrinsics for
the following, as it generates fatter code.
*/
if (compCodeOpt() == SMALL_CODE)
- return NULL;
+ {
+ return nullptr;
+ }
/* These intrinsics generate fatter (but faster) code and are only
done if we don't need SMALL_CODE */
@@ -3423,10 +3471,12 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
// The rank 1 case is special because it has to handle two array formats
// we will simply not do that case
if (rank > GT_ARR_MAX_RANK || rank <= 1)
- return NULL;
+ {
+ return nullptr;
+ }
- CORINFO_CLASS_HANDLE arrElemClsHnd = 0;
- var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
+ CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
+ var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
// For the ref case, we will only be able to inline if the types match
// (verifier checks for this, we don't care for the nonverified case and the
@@ -3434,7 +3484,7 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
{
// Get the call site signature
- CORINFO_SIG_INFO LocalSig;
+ CORINFO_SIG_INFO LocalSig;
eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
assert(LocalSig.hasThis());
@@ -3443,9 +3493,11 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
// Fetch the last argument, the one that indicates the type we are setting.
- CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
+ CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
for (unsigned r = 0; r < rank; r++)
+ {
argType = info.compCompHnd->getArgNext(argType);
+ }
typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
actualElemClsHnd = argInfo.GetClassHandle();
@@ -3463,7 +3515,7 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
// if it's not final, we can't do the optimization
if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
{
- return NULL;
+ return nullptr;
}
}
@@ -3480,45 +3532,43 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
}
if ((unsigned char)arrayElemSize != arrayElemSize)
- {
+ {
// arrayElemSize would be truncated as an unsigned char.
// This means the array element is too large. Don't do the optimization.
- return NULL;
+ return nullptr;
}
-
- GenTreePtr val = NULL;
+
+ GenTreePtr val = nullptr;
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
// Assignment of a struct is more work, and there are more gets than sets.
if (elemType == TYP_STRUCT)
- return NULL;
+ {
+ return nullptr;
+ }
val = impPopStack().val;
assert(genActualType(elemType) == genActualType(val->gtType) ||
- (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
- (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
- (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
+ (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
+ (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
+ (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
}
-
+
noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
GenTreePtr inds[GT_ARR_MAX_RANK];
for (unsigned k = rank; k > 0; k--)
{
- inds[k-1] = impPopStack().val;
+ inds[k - 1] = impPopStack().val;
}
GenTreePtr arr = impPopStack().val;
assert(arr->gtType == TYP_REF);
- GenTreePtr arrElem = new (this, GT_ARR_ELEM)
- GenTreeArrElem(TYP_BYREF, arr,
- static_cast<unsigned char>(rank),
- static_cast<unsigned char>(arrayElemSize),
- elemType, &inds[0]
- );
-
+ GenTreePtr arrElem =
+ new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
+ static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
{
@@ -3527,7 +3577,7 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
- assert(val != NULL);
+ assert(val != nullptr);
return gtNewAssignNode(arrElem, val);
}
else
@@ -3536,32 +3586,28 @@ GenTreePtr Compiler::impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
}
}
-BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
+BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
unsigned i;
// do some basic checks first
if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
+ {
return FALSE;
+ }
if (verCurrentState.esStackDepth > 0)
{
// merge stack types
StackEntry* parentStack = block->bbStackOnEntry();
StackEntry* childStack = verCurrentState.esStack;
-
- for (i = 0;
- i < verCurrentState.esStackDepth ;
- i++, parentStack++, childStack++)
- {
- if (tiMergeToCommonParent(&parentStack->seTypeInfo,
- &childStack->seTypeInfo,
- changed)
- == FALSE)
+
+ for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
+ {
+ if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
{
return FALSE;
}
-
}
}
@@ -3571,7 +3617,7 @@ BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
// If we're tracking the CtorInitState, then it must not be unknown in the current state.
assert(verCurrentState.thisInitialized != TIS_Bottom);
-
+
// If the successor block's thisInit state is unknown, copy it from the current state.
if (block->bbThisOnEntry() == TIS_Bottom)
{
@@ -3604,7 +3650,7 @@ BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
// recursive calls back into this code path (if successors of the current bad block are
// also bad blocks).
//
- ThisInitState origTIS = verCurrentState.thisInitialized;
+ ThisInitState origTIS = verCurrentState.thisInitialized;
verCurrentState.thisInitialized = TIS_Top;
impVerifyEHBlock(block, true);
verCurrentState.thisInitialized = origTIS;
@@ -3617,7 +3663,7 @@ BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
}
-
+
return TRUE;
}
@@ -3628,8 +3674,7 @@ BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
* "call unauthorized by host" exception.
*/
-void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block
- DEBUGARG(bool logMsg) )
+void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
{
block->bbJumpKind = BBJ_THROW;
block->bbFlags |= BBF_FAILED_VERIFICATION;
@@ -3638,15 +3683,17 @@ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block
#ifdef DEBUG
// we need this since BeginTreeList asserts otherwise
- impTreeList = impTreeLast = NULL;
+ impTreeList = impTreeLast = nullptr;
block->bbFlags &= ~BBF_IMPORTED;
if (logMsg)
{
- JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n",
- info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd));
+ JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
+ block->bbCodeOffs, block->bbCodeOffsEnd));
if (verbose)
+ {
printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
+ }
}
if (JitConfig.DebugBreakOnVerificationFailure())
@@ -3659,14 +3706,14 @@ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block
// if the stack is non-empty evaluate all the side-effects
if (verCurrentState.esStackDepth > 0)
+ {
impEvalSideEffects();
+ }
assert(verCurrentState.esStackDepth == 0);
- GenTreePtr op1 = gtNewHelperCallNode( CORINFO_HELP_VERIFICATION,
- TYP_VOID,
- GTF_EXCEPT,
- gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
- //verCurrentState.esStackDepth = 0;
+ GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
+ gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
+ // verCurrentState.esStackDepth = 0;
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
// The inliner is not able to handle methods that require throw block, so
@@ -3674,26 +3721,25 @@ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block
info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
}
-
/*****************************************************************************
- *
+ *
*/
-void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
+void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
{
- // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
- // slightly different mechanism in which it calls the JIT to perform IL verification:
+ // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
+ // slightly different mechanism in which it calls the JIT to perform IL verification:
// in the case of transparent methods the VM calls for a predicate IsVerifiable()
- // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
+ // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
// If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
// it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
// up the exception, instead it embeds a throw inside the offending basic block and lets this
- // to fail upon runtime of the jitted method.
+ // to fail upon runtime of the jitted method.
//
// For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
// with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
// just try to find out whether to fail this method before even actually jitting it. So, in case
- // we detect these two conditions, instead of generating a throw statement inside the offending
+ // we detect these two conditions, instead of generating a throw statement inside the offending
// basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
// to return false and make RyuJIT behave the same way JIT64 does.
//
@@ -3701,22 +3747,23 @@ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool l
// RyuJIT for the time being until we completely replace JIT64.
// TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
- // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
+ // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
// exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
// tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
- // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
+ // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
// be turned off during importation).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef _TARGET_64BIT_
#ifdef DEBUG
- bool canSkipVerificationResult = info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
+ bool canSkipVerificationResult =
+ info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
assert(tiVerificationNeeded || canSkipVerificationResult);
#endif // DEBUG
// Add the non verifiable flag to the compiler
- if((opts.eeFlags & CORJIT_FLG_IMPORT_ONLY) != 0)
+ if ((opts.eeFlags & CORJIT_FLG_IMPORT_ONLY) != 0)
{
tiIsVerifiableCode = FALSE;
}
@@ -3725,82 +3772,93 @@ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool l
verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
#ifdef DEBUG
- impNoteLastILoffs(); // Remember at which BC offset the tree was finished
-#endif // DEBUG
+ impNoteLastILoffs(); // Remember at which BC offset the tree was finished
+#endif // DEBUG
}
/******************************************************************************/
-typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
+typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
{
assert(ciType < CORINFO_TYPE_COUNT);
typeInfo tiResult;
switch (ciType)
{
- case CORINFO_TYPE_STRING:
- case CORINFO_TYPE_CLASS:
- tiResult = verMakeTypeInfo(clsHnd);
- if (!tiResult.IsType(TI_REF)) // type must be consistent with element type
- return typeInfo();
- break;
+ case CORINFO_TYPE_STRING:
+ case CORINFO_TYPE_CLASS:
+ tiResult = verMakeTypeInfo(clsHnd);
+ if (!tiResult.IsType(TI_REF))
+ { // type must be consistent with element type
+ return typeInfo();
+ }
+ break;
#ifdef _TARGET_64BIT_
- case CORINFO_TYPE_NATIVEINT:
- case CORINFO_TYPE_NATIVEUINT:
- if (clsHnd)
- {
- // If we have more precise information, use it
- return verMakeTypeInfo(clsHnd);
- }
- else
- {
- return typeInfo::nativeInt();
- }
- break;
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ if (clsHnd)
+ {
+ // If we have more precise information, use it
+ return verMakeTypeInfo(clsHnd);
+ }
+ else
+ {
+ return typeInfo::nativeInt();
+ }
+ break;
#endif // _TARGET_64BIT_
- case CORINFO_TYPE_VALUECLASS:
- case CORINFO_TYPE_REFANY:
- tiResult = verMakeTypeInfo(clsHnd);
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ tiResult = verMakeTypeInfo(clsHnd);
// type must be constant with element type;
- if (!tiResult.IsValueClass())
+ if (!tiResult.IsValueClass())
+ {
+ return typeInfo();
+ }
+ break;
+ case CORINFO_TYPE_VAR:
+ return verMakeTypeInfo(clsHnd);
+
+ case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
+ case CORINFO_TYPE_VOID:
return typeInfo();
- break;
- case CORINFO_TYPE_VAR:
- return verMakeTypeInfo(clsHnd);
+ break;
- case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
- case CORINFO_TYPE_VOID:
- return typeInfo();
+ case CORINFO_TYPE_BYREF:
+ {
+ CORINFO_CLASS_HANDLE childClassHandle;
+ CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
+ return ByRef(verMakeTypeInfo(childType, childClassHandle));
+ }
break;
- case CORINFO_TYPE_BYREF: {
- CORINFO_CLASS_HANDLE childClassHandle;
- CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
- return ByRef(verMakeTypeInfo(childType, childClassHandle));
- }
- break;
-
- default:
- if (clsHnd) // If we have more precise information, use it
- return typeInfo(TI_STRUCT, clsHnd);
- else
- return typeInfo(JITtype2tiType(ciType));
+ default:
+ if (clsHnd)
+ { // If we have more precise information, use it
+ return typeInfo(TI_STRUCT, clsHnd);
+ }
+ else
+ {
+ return typeInfo(JITtype2tiType(ciType));
+ }
}
return tiResult;
}
/******************************************************************************/
-typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
+typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
{
- if (clsHnd == NULL)
+ if (clsHnd == nullptr)
+ {
return typeInfo();
-
+ }
+
// Byrefs should only occur in method and local signatures, which are accessed
// using ICorClassInfo and ICorClassInfo.getChildType.
// So findClass() and getClassAttribs() should not be called for byrefs
-
+
if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
{
assert(!"Did findClass() return a Byref?");
@@ -3808,52 +3866,66 @@ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bash
}
unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
-
+
if (attribs & CORINFO_FLG_VALUECLASS)
{
CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
-
+
// Meta-data validation should ensure that CORINF_TYPE_BYREF should
// not occur here, so we may want to change this to an assert instead.
if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
+ {
return typeInfo();
+ }
#ifdef _TARGET_64BIT_
- if(t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
+ if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
{
return typeInfo::nativeInt();
}
#endif // _TARGET_64BIT_
if (t != CORINFO_TYPE_UNDEF)
- return(typeInfo(JITtype2tiType(t)));
- else if (bashStructToRef)
- return(typeInfo(TI_REF, clsHnd));
- else
- return(typeInfo(TI_STRUCT, clsHnd));
+ {
+ return (typeInfo(JITtype2tiType(t)));
+ }
+ else if (bashStructToRef)
+ {
+ return (typeInfo(TI_REF, clsHnd));
+ }
+ else
+ {
+ return (typeInfo(TI_STRUCT, clsHnd));
+ }
}
else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
{
// See comment in _typeInfo.h for why we do it this way.
- return(typeInfo(TI_REF, clsHnd, true));
+ return (typeInfo(TI_REF, clsHnd, true));
}
else
{
- return(typeInfo(TI_REF, clsHnd));
+ return (typeInfo(TI_REF, clsHnd));
}
}
/******************************************************************************/
BOOL Compiler::verIsSDArray(typeInfo ti)
{
- if (ti.IsNullObjRef()) // nulls are SD arrays
+ if (ti.IsNullObjRef())
+ { // nulls are SD arrays
return TRUE;
+ }
if (!ti.IsType(TI_REF))
+ {
return FALSE;
+ }
if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
+ {
return FALSE;
+ }
return TRUE;
}
@@ -3863,12 +3935,14 @@ BOOL Compiler::verIsSDArray(typeInfo ti)
typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
{
- assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
+ assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
if (!verIsSDArray(arrayObjectType))
+ {
return typeInfo();
+ }
- CORINFO_CLASS_HANDLE childClassHandle = NULL;
+ CORINFO_CLASS_HANDLE childClassHandle = nullptr;
CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
return verMakeTypeInfo(ciType, childClassHandle);
@@ -3876,59 +3950,64 @@ typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
/*****************************************************************************
*/
-typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig,
- CORINFO_ARG_LIST_HANDLE args)
+typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
{
CORINFO_CLASS_HANDLE classHandle;
- CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
+ CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
- var_types type = JITtype2varType(ciType);
+ var_types type = JITtype2varType(ciType);
if (varTypeIsGC(type))
{
// For efficiency, getArgType only returns something in classHandle for
- // value types. For other types that have addition type info, you
+ // value types. For other types that have addition type info, you
// have to call back explicitly
classHandle = info.compCompHnd->getArgClass(sig, args);
if (!classHandle)
+ {
NO_WAY("Could not figure out Class specified in argument or local signature");
+ }
}
-
+
return verMakeTypeInfo(ciType, classHandle);
-}
+}
/*****************************************************************************/
-// This does the expensive check to figure out whether the method
-// needs to be verified. It is called only when we fail verification,
+// This does the expensive check to figure out whether the method
+// needs to be verified. It is called only when we fail verification,
// just before throwing the verification exception.
BOOL Compiler::verNeedsVerification()
{
- // If we have previously determined that verification is NOT needed
+ // If we have previously determined that verification is NOT needed
// (for example in Compiler::compCompile), that means verification is really not needed.
// Return the same decision we made before.
// (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
if (!tiVerificationNeeded)
+ {
return tiVerificationNeeded;
+ }
assert(tiVerificationNeeded);
// Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
// obtain the answer.
- CorInfoCanSkipVerificationResult canSkipVerificationResult =
+ CorInfoCanSkipVerificationResult canSkipVerificationResult =
info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
// canSkipVerification will return one of the following three values:
// CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
// CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
// CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
- // but need to insert a callout to the VM to ask during runtime
- // whether to skip verification or not.
-
+ // but need to insert a callout to the VM to ask during runtime
+ // whether to skip verification or not.
+
// Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
+ {
tiRuntimeCalloutNeeded = true;
+ }
if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
{
@@ -3940,16 +4019,20 @@ BOOL Compiler::verNeedsVerification()
// When tiVerificationNeeded is true, JIT will do the verification during JIT time.
// The following line means we will NOT do jit time verification if canSkipVerification
// returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
- tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
+ tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
return tiVerificationNeeded;
}
BOOL Compiler::verIsByRefLike(const typeInfo& ti)
-{
+{
if (ti.IsByRef())
+ {
return TRUE;
+ }
if (!ti.IsType(TI_STRUCT))
+ {
return FALSE;
+ }
return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
}
@@ -3960,23 +4043,22 @@ BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
return TRUE;
}
else
- {
+ {
return FALSE;
}
}
BOOL Compiler::verIsBoxable(const typeInfo& ti)
-{
- return ( ti.IsPrimitiveType()
- || ti.IsObjRef() // includes boxed generic type variables
- || ti.IsUnboxedGenericTypeVar()
- || (ti.IsType(TI_STRUCT) &&
- //exclude byreflike structs
- !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
+{
+ return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
+ || ti.IsUnboxedGenericTypeVar() ||
+ (ti.IsType(TI_STRUCT) &&
+ // exclude byreflike structs
+ !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
}
// Is it a boxed value type?
-bool Compiler::verIsBoxedValueType(typeInfo ti)
+bool Compiler::verIsBoxedValueType(typeInfo ti)
{
if (ti.GetType() == TI_REF)
{
@@ -3994,28 +4076,31 @@ bool Compiler::verIsBoxedValueType(typeInfo ti)
* Check if a TailCall is legal.
*/
-bool Compiler::verCheckTailCallConstraint (OPCODE opcode,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
- bool speculative // If true, won't throw if verificatoin fails. Instead it will
- // return false to the caller.
- // If false, it will throw.
- )
+bool Compiler::verCheckTailCallConstraint(
+ OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
+ bool speculative // If true, won't throw if verificatoin fails. Instead it will
+ // return false to the caller.
+ // If false, it will throw.
+ )
{
- DWORD mflags;
- CORINFO_SIG_INFO sig;
- unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
- // this counter is used to keep track of how many items have been
- // virtually popped
-
- CORINFO_METHOD_HANDLE methodHnd = 0;
- CORINFO_CLASS_HANDLE methodClassHnd = 0;
- unsigned methodClassFlgs = 0;
-
+ DWORD mflags;
+ CORINFO_SIG_INFO sig;
+ unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
+ // this counter is used to keep track of how many items have been
+ // virtually popped
+
+ CORINFO_METHOD_HANDLE methodHnd = nullptr;
+ CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
+ unsigned methodClassFlgs = 0;
+
assert(impOpcodeIsCallOpcode(opcode));
if (compIsForInlining())
+ {
return false;
+ }
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
@@ -4025,16 +4110,16 @@ bool Compiler::verCheckTailCallConstraint (OPCODE opc
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
- mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
+ mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
}
- else
+ else
{
methodHnd = pResolvedToken->hMethod;
mflags = info.compCompHnd->getMethodAttribs(methodHnd);
- // When verifying generic code we pair the method handle with its
- // owning class to get the exact method signature.
+ // When verifying generic code we pair the method handle with its
+ // owning class to get the exact method signature.
methodClassHnd = pResolvedToken->hClass;
assert(methodClassHnd);
@@ -4045,26 +4130,30 @@ bool Compiler::verCheckTailCallConstraint (OPCODE opc
}
// We must have got the methodClassHnd if opcode is not CEE_CALLI
- assert( (methodHnd!=0 && methodClassHnd!=0) || opcode == CEE_CALLI);
+ assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ {
eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
+ }
// check compatibility of the arguments
- unsigned int argCount; argCount = sig.numArgs;
- CORINFO_ARG_LIST_HANDLE args; args = sig.args;
+ unsigned int argCount;
+ argCount = sig.numArgs;
+ CORINFO_ARG_LIST_HANDLE args;
+ args = sig.args;
while (argCount--)
{
typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
// check that the argument is not a byref for tailcalls
- VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
+ VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
// For unsafe code, we might have parameters containing pointer to the stack location.
// Disallow the tailcall for this kind.
CORINFO_CLASS_HANDLE classHandle;
- CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
- VerifyOrReturnSpeculative(ciType!=CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
+ CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
+ VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
args = info.compCompHnd->getArgNext(args);
}
@@ -4075,17 +4164,19 @@ bool Compiler::verCheckTailCallConstraint (OPCODE opc
// check for 'this' which is on non-static methods, not called via NEWOBJ
if (!(mflags & CORINFO_FLG_STATIC))
{
- // Always update the popCount.
+ // Always update the popCount.
// This is crucial for the stack calculation to be correct.
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
-
+
if (opcode == CEE_CALLI)
{
// For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
// on the stack.
if (tiThis.IsValueClass())
+ {
tiThis.MakeByRef();
+ }
VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
}
else
@@ -4093,18 +4184,20 @@ bool Compiler::verCheckTailCallConstraint (OPCODE opc
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
if (tiDeclaredThis.IsValueClass())
+ {
tiDeclaredThis.MakeByRef();
+ }
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
}
}
// Tail calls on constrained calls should be illegal too:
- // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
+ // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
// Get the exact view of the signature for an array method
- if (sig.retType != CORINFO_TYPE_VOID)
+ if (sig.retType != CORINFO_TYPE_VOID)
{
if (methodClassFlgs & CORINFO_FLG_ARRAY)
{
@@ -4113,49 +4206,48 @@ bool Compiler::verCheckTailCallConstraint (OPCODE opc
}
}
- typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
- typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
-
- // void return type gets morphed into the error type, so we have to treat them specially here
+ typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
+ typeInfo tiCallerRetType =
+ verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
+
+ // void return type gets morphed into the error type, so we have to treat them specially here
if (sig.retType == CORINFO_TYPE_VOID)
- VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID,
- "tailcall return mismatch",
+ {
+ VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
speculative);
- else
+ }
+ else
+ {
VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
- NormaliseForStack(tiCallerRetType), true),
- "tailcall return mismatch",
- speculative);
+ NormaliseForStack(tiCallerRetType), true),
+ "tailcall return mismatch", speculative);
+ }
// for tailcall, stack must be empty
VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
- return true; // Yes, tailcall is legal
+ return true; // Yes, tailcall is legal
}
-
-
-
/*****************************************************************************
*
* Checks the IL verification rules for the call
*/
-void Compiler::verVerifyCall (OPCODE opcode,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
- bool tailCall,
- bool readonlyCall,
- const BYTE* delegateCreateStart,
- const BYTE* codeAddr,
- CORINFO_CALL_INFO* callInfo
- DEBUGARG(const char * methodName))
+void Compiler::verVerifyCall(OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ bool tailCall,
+ bool readonlyCall,
+ const BYTE* delegateCreateStart,
+ const BYTE* codeAddr,
+ CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
{
- DWORD mflags;
- CORINFO_SIG_INFO* sig = NULL;
- unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
- // this counter is used to keep track of how many items have been
- // virtually popped
+ DWORD mflags;
+ CORINFO_SIG_INFO* sig = nullptr;
+ unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
+ // this counter is used to keep track of how many items have been
+ // virtually popped
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
@@ -4168,144 +4260,146 @@ void Compiler::verVerifyCall (OPCODE opcode,
mflags = callInfo->verMethodFlags;
sig = &callInfo->verSig;
-
+
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ {
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
+ }
// opcode specific check
unsigned methodClassFlgs = callInfo->classFlags;
switch (opcode)
{
- case CEE_CALLVIRT:
- // cannot do callvirt on valuetypes
- VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
- VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
- break;
+ case CEE_CALLVIRT:
+ // cannot do callvirt on valuetypes
+ VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
+ VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
+ break;
- case CEE_NEWOBJ: {
- assert(!tailCall); // Importer should not allow this
- VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
- "newobj must be on instance");
-
- if (methodClassFlgs & CORINFO_FLG_DELEGATE)
- {
- VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
- typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
- typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
- VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
-
- assert(popCount == 0);
- typeInfo tiActualObj = impStackTop(1).seTypeInfo;
- typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
-
- VerifyOrReturn(tiActualFtn.IsMethod(),
- "delegate needs method as first arg");
- VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true),
- "delegate object type mismatch");
- VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
- "delegate object type mismatch");
-
- CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? NULL
- : tiActualObj.GetClassHandleForObjRef();
-
- // the method signature must be compatible with the delegate's invoke method
-
- // check that for virtual functions, the type of the object used to get the
- // ftn ptr is the same as the type of the object passed to the delegate ctor.
- // since this is a bit of work to determine in general, we pattern match stylized
- // code sequences
-
- // the delegate creation code check, which used to be done later, is now done here
- // so we can read delegateMethodRef directly from
- // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
- // we then use it in our call to isCompatibleDelegate().
-
- mdMemberRef delegateMethodRef = mdMemberRefNil;
- VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
- "must create delegates with certain IL");
-
- CORINFO_RESOLVED_TOKEN delegateResolvedToken;
- delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
- delegateResolvedToken.tokenScope = info.compScopeHnd;
- delegateResolvedToken.token = delegateMethodRef;
- delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
- info.compCompHnd->resolveToken(&delegateResolvedToken);
-
- CORINFO_CALL_INFO delegateCallInfo;
- eeGetCallInfo(&delegateResolvedToken, 0 /* constraint typeRef */,
- addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
-
- BOOL isOpenDelegate = FALSE;
- VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(
- objTypeHandle,
- delegateResolvedToken.hClass,
- tiActualFtn.GetMethod(),
- pResolvedToken->hClass,
- &isOpenDelegate),
- "function incompatible with delegate");
-
-
- // check the constraints on the target method
- VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
- "delegate target has unsatisfied class constraints");
- VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,tiActualFtn.GetMethod()),
- "delegate target has unsatisfied method constraints");
-
- // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
- // for additional verification rules for delegates
- CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
- DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
- if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
+ case CEE_NEWOBJ:
+ {
+ assert(!tailCall); // Importer should not allow this
+ VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
+ "newobj must be on instance");
+
+ if (methodClassFlgs & CORINFO_FLG_DELEGATE)
{
+ VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
+ typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
+ typeInfo tiDeclaredFtn =
+ verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
+ VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
- if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
-#ifdef DEBUG
- && StrictCheckForNonVirtualCallToVirtualMethod()
-#endif
- )
+ assert(popCount == 0);
+ typeInfo tiActualObj = impStackTop(1).seTypeInfo;
+ typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
+
+ VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
+ VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
+ VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
+ "delegate object type mismatch");
+
+ CORINFO_CLASS_HANDLE objTypeHandle =
+ tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
+
+ // the method signature must be compatible with the delegate's invoke method
+
+ // check that for virtual functions, the type of the object used to get the
+ // ftn ptr is the same as the type of the object passed to the delegate ctor.
+ // since this is a bit of work to determine in general, we pattern match stylized
+ // code sequences
+
+ // the delegate creation code check, which used to be done later, is now done here
+ // so we can read delegateMethodRef directly from
+ // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
+ // we then use it in our call to isCompatibleDelegate().
+
+ mdMemberRef delegateMethodRef = mdMemberRefNil;
+ VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
+ "must create delegates with certain IL");
+
+ CORINFO_RESOLVED_TOKEN delegateResolvedToken;
+ delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
+ delegateResolvedToken.tokenScope = info.compScopeHnd;
+ delegateResolvedToken.token = delegateMethodRef;
+ delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
+ info.compCompHnd->resolveToken(&delegateResolvedToken);
+
+ CORINFO_CALL_INFO delegateCallInfo;
+ eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
+ addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
+
+ BOOL isOpenDelegate = FALSE;
+ VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
+ tiActualFtn.GetMethod(), pResolvedToken->hClass,
+ &isOpenDelegate),
+ "function incompatible with delegate");
+
+ // check the constraints on the target method
+ VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
+ "delegate target has unsatisfied class constraints");
+ VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
+ tiActualFtn.GetMethod()),
+ "delegate target has unsatisfied method constraints");
+
+ // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
+ // for additional verification rules for delegates
+ CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
+ DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
+ if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
- if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
+
+ if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
+#ifdef DEBUG
+ && StrictCheckForNonVirtualCallToVirtualMethod()
+#endif
+ )
{
- VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
- verIsBoxedValueType(tiActualObj),
- "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
- "a boxed value type.");
+ if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
+ {
+ VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
+ verIsBoxedValueType(tiActualObj),
+ "The 'this' parameter to the call must be either the calling method's "
+ "'this' parameter or "
+ "a boxed value type.");
+ }
}
}
- }
- if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
- {
- BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
+ if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
+ {
+ BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
- Verify(targetIsStatic || !isOpenDelegate,
- "Unverifiable creation of an open instance delegate for a protected member.");
+ Verify(targetIsStatic || !isOpenDelegate,
+ "Unverifiable creation of an open instance delegate for a protected member.");
- CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
- ? info.compClassHnd
- : tiActualObj.GetClassHandleForObjRef();
+ CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
+ ? info.compClassHnd
+ : tiActualObj.GetClassHandleForObjRef();
- // In the case of protected methods, it is a requirement that the 'this'
- // pointer be a subclass of the current context. Perform this check.
- Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
- "Accessing protected method through wrong type.");
+ // In the case of protected methods, it is a requirement that the 'this'
+ // pointer be a subclass of the current context. Perform this check.
+ Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
+ "Accessing protected method through wrong type.");
+ }
+ goto DONE_ARGS;
}
- goto DONE_ARGS;
}
- }
// fall thru to default checks
- default:
- VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
+ default:
+ VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
}
VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
"can only newobj a delegate constructor");
// check compatibility of the arguments
- unsigned int argCount; argCount = sig->numArgs;
- CORINFO_ARG_LIST_HANDLE args; args = sig->args;
+ unsigned int argCount;
+ argCount = sig->numArgs;
+ CORINFO_ARG_LIST_HANDLE args;
+ args = sig->args;
while (argCount--)
{
- typeInfo tiActual = impStackTop(popCount+argCount).seTypeInfo;
+ typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
@@ -4313,7 +4407,6 @@ void Compiler::verVerifyCall (OPCODE opcode,
args = info.compCompHnd->getArgNext(args);
}
-
DONE_ARGS:
// update popCount
@@ -4325,27 +4418,33 @@ DONE_ARGS:
{
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
-
+
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a reference class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis.IsType(TI_REF))
+ {
instanceClassHnd = tiThis.GetClassHandleForObjRef();
-
+ }
+
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
if (tiDeclaredThis.IsValueClass())
+ {
tiDeclaredThis.MakeByRef();
+ }
- // If this is a call to the base class .ctor, set thisPtr Init for
+ // If this is a call to the base class .ctor, set thisPtr Init for
// this block.
if (mflags & CORINFO_FLG_CONSTRUCTOR)
{
- if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
+ if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
{
- assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier.
- VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized");
+ assert(verCurrentState.thisInitialized !=
+ TIS_Bottom); // This should never be the case just from the logic of the verifier.
+ VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
+ "Call to base class constructor when 'this' is possibly initialized");
// Otherwise, 'this' is now initialized.
verCurrentState.thisInitialized = TIS_Init;
tiThis.SetInitialisedObjRef();
@@ -4355,21 +4454,24 @@ DONE_ARGS:
// We allow direct calls to value type constructors
// NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
// constrained callvirt to illegally re-enter a .ctor on a value of reference type.
- VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor");
+ VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
+ "Bad call to a constructor");
}
}
- if (pConstrainedResolvedToken != NULL) {
- VerifyOrReturn(tiThis.IsByRef(),"non-byref this type in constrained call");
+ if (pConstrainedResolvedToken != nullptr)
+ {
+ VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
// We just dereference this and test for equality
tiThis.DereferenceByRef();
- VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),"this type mismatch with constrained type operand");
+ VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
+ "this type mismatch with constrained type operand");
// Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
- tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
+ tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
}
// To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
@@ -4379,7 +4481,7 @@ DONE_ARGS:
}
VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
-
+
if (tiThis.IsByRef())
{
// Find the actual type where the method exists (as opposed to what is declared
@@ -4387,50 +4489,49 @@ DONE_ARGS:
// while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
- VerifyOrReturn(eeIsValueClass(actualClassHnd),
+ VerifyOrReturn(eeIsValueClass(actualClassHnd),
"Call to base type of valuetype (which is never a valuetype)");
}
// Rules for non-virtual call to a non-final virtual method:
-
- // Define:
+
+ // Define:
// The "this" pointer is considered to be "possibly written" if
// 1. Its address have been taken (LDARGA 0) anywhere in the method.
// (or)
// 2. It has been stored to (STARG.0) anywhere in the method.
// A non-virtual call to a non-final virtual method is only allowed if
- // 1. The this pointer passed to the callee is an instance of a boxed value type.
+ // 1. The this pointer passed to the callee is an instance of a boxed value type.
// (or)
// 2. The this pointer passed to the callee is the current method's this pointer.
// (and) The current method's this pointer is not "possibly written".
- // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
- // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
- // This is stronger that is strictly needed, but implementing a laxer rule is significantly
+ // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
+ // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
+ // This is stronger that is strictly needed, but implementing a laxer rule is significantly
// hard and more error prone.
- if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
-#ifdef DEBUG
- && StrictCheckForNonVirtualCallToVirtualMethod()
-#endif
- )
+ if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
+#ifdef DEBUG
+ && StrictCheckForNonVirtualCallToVirtualMethod()
+#endif
+ )
{
if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
{
- VerifyOrReturn(tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
- verIsBoxedValueType(tiThis),
- "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
- "a boxed value type.");
+ VerifyOrReturn(
+ tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
+ "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
+ "a boxed value type.");
}
}
-
}
// check any constraints on the callee's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
"method has unsatisfied class constraints");
- VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass,pResolvedToken->hMethod),
+ VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
"method has unsatisfied method constraints");
if (mflags & CORINFO_FLG_PROTECTED)
@@ -4440,7 +4541,7 @@ DONE_ARGS:
}
// Get the exact view of the signature for an array method
- if (sig->retType != CORINFO_TYPE_VOID)
+ if (sig->retType != CORINFO_TYPE_VOID)
{
eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
}
@@ -4450,22 +4551,18 @@ DONE_ARGS:
// so we can trust that only the Address operation returns a byref.
if (readonlyCall)
{
- typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
- VerifyOrReturn ((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix");
+ typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
+ VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
+ "unexpected use of readonly prefix");
}
// Verify the tailcall
- if (tailCall) {
- verCheckTailCallConstraint(opcode,
- pResolvedToken,
- pConstrainedResolvedToken,
- false
- );
+ if (tailCall)
+ {
+ verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
}
-
}
-
/*****************************************************************************
* Checks that a delegate creation is done using the following pattern:
* dup
@@ -4474,20 +4571,22 @@ DONE_ARGS:
* ldftn targetMemberRef
*
* 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
- * not in this basic block)
+ * not in this basic block)
*
* targetMemberRef is read from the code sequence.
* targetMemberRef is validated iff verificationNeeded.
*/
-BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef &targetMemberRef)
+BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
+ const BYTE* codeAddr,
+ mdMemberRef& targetMemberRef)
{
- if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
+ if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
return TRUE;
}
- else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
+ else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
return TRUE;
@@ -4496,11 +4595,10 @@ BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const B
return FALSE;
}
-
typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
{
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
- typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
+ typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
if (!tiCompatibleWith(value, normPtrVal, true))
{
@@ -4513,9 +4611,9 @@ typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, c
typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
{
assert(!instrType.IsStruct());
-
+
typeInfo ptrVal;
- if (ptr.IsByRef())
+ if (ptr.IsByRef())
{
ptrVal = DereferenceByRef(ptr);
if (instrType.IsObjRef() && !ptrVal.IsObjRef())
@@ -4523,9 +4621,9 @@ typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType
Verify(false, "bad pointer");
compUnsafeCastUsed = true;
}
- else if(!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
+ else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
{
- Verify(false, "pointer not consistent with instr");
+ Verify(false, "pointer not consistent with instr");
compUnsafeCastUsed = true;
}
}
@@ -4534,41 +4632,39 @@ typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType
Verify(false, "pointer not byref");
compUnsafeCastUsed = true;
}
-
+
return ptrVal;
}
-
-// Verify that the field is used properly. 'tiThis' is NULL for statics,
-// 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
-// ld*flda or a st*fld.
+// Verify that the field is used properly. 'tiThis' is NULL for statics,
+// 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
+// ld*flda or a st*fld.
// 'enclosingClass' is given if we are accessing a field in some specific type.
-void Compiler::verVerifyField(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- const CORINFO_FIELD_INFO& fieldInfo,
- const typeInfo* tiThis,
- BOOL mutator,
- BOOL allowPlainStructAsThis)
+void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ const CORINFO_FIELD_INFO& fieldInfo,
+ const typeInfo* tiThis,
+ BOOL mutator,
+ BOOL allowPlainStructAsThis)
{
CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
- unsigned fieldFlags = fieldInfo.fieldFlags;
- CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class.
+ unsigned fieldFlags = fieldInfo.fieldFlags;
+ CORINFO_CLASS_HANDLE instanceClass =
+ info.compClassHnd; // for statics, we imagine the instance is the current class.
bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
- if (mutator)
+ if (mutator)
{
Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
{
- Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) &&
- enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField,
+ Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
+ info.compIsStatic == isStaticField,
"bad use of initonly field (set or address taken)");
}
-
}
- if (tiThis == 0)
+ if (tiThis == nullptr)
{
Verify(isStaticField, "used static opcode with non-static field");
}
@@ -4576,17 +4672,18 @@ void Compiler::verVerifyField(
{
typeInfo tThis = *tiThis;
- if (allowPlainStructAsThis &&
- tThis.IsValueClass())
+ if (allowPlainStructAsThis && tThis.IsValueClass())
{
tThis.MakeByRef();
}
-
+
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a refernce class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis->IsType(TI_REF))
+ {
instanceClass = tiThis->GetClassHandleForObjRef();
+ }
// Note that even if the field is static, we require that the this pointer
// satisfy the same constraints as a non-static field This happens to
@@ -4598,16 +4695,16 @@ void Compiler::verVerifyField(
// we allow read-only tThis, on any field access (even stores!), because if the
// class implementor wants to prohibit stores he should make the field private.
- // we do this by setting the read-only bit on the type we compare tThis to.
+ // we do this by setting the read-only bit on the type we compare tThis to.
tiDeclaredThis.SetIsReadonlyByRef();
}
else if (verTrackObjCtorInitState && tThis.IsThisPtr())
{
- // Any field access is legal on "uninitialized" this pointers.
+ // Any field access is legal on "uninitialized" this pointers.
// The easiest way to implement this is to simply set the
// initialized bit for the duration of the type check on the
- // field access only. It does not change the state of the "this"
- // for the function as a whole. Note that the "tThis" is a copy
+ // field access only. It does not change the state of the "this"
+ // for the function as a whole. Note that the "tThis" is a copy
// of the original "this" type (*tiThis) passed in.
tThis.SetInitialisedObjRef();
}
@@ -4634,16 +4731,16 @@ void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsig
{
#ifdef _TARGET_64BIT_
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
-#else // _TARGET_64BIT
- // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
- // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
+#else // _TARGET_64BIT
+ // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
+ // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
// but compatible, since we can coalesce native int with int32 (see section III.1.5).
Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
#endif // !_TARGET_64BIT_
}
- else if (tiOp1.IsObjRef())
+ else if (tiOp1.IsObjRef())
{
- switch (opcode)
+ switch (opcode)
{
case CEE_BEQ_S:
case CEE_BEQ:
@@ -4658,31 +4755,34 @@ void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsig
Verify(tiOp2.IsObjRef(), "Cond type mismatch");
}
else if (tiOp1.IsByRef())
+ {
Verify(tiOp2.IsByRef(), "Cond type mismatch");
- else
+ }
+ else
+ {
Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
+ }
}
void Compiler::verVerifyThisPtrInitialised()
{
if (verTrackObjCtorInitState)
+ {
Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
+ }
}
-BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context,
- CORINFO_CLASS_HANDLE target)
+BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
{
// Either target == context, in this case calling an alternate .ctor
// Or target is the immediate parent of context
- return ((target == context) ||
- (target == info.compCompHnd->getParentType(context)));
+ return ((target == context) || (target == info.compCompHnd->getParentType(context)));
}
-
-GenTreePtr Compiler::impImportLdvirtftn (GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_CALL_INFO* pCallInfo)
+GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_CALL_INFO* pCallInfo)
{
if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
{
@@ -4692,8 +4792,8 @@ GenTreePtr Compiler::impImportLdvirtftn (GenTreePtr thisPtr,
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun() && !pCallInfo->exactContextNeedsRuntimeLookup)
{
- GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR,
- TYP_I_IMPL, GTF_EXCEPT, gtNewArgList(thisPtr));
+ GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
+ gtNewArgList(thisPtr));
call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
@@ -4701,50 +4801,53 @@ GenTreePtr Compiler::impImportLdvirtftn (GenTreePtr thisPtr,
}
#endif
- // Get the exact descriptor for the static callsite
+ // Get the exact descriptor for the static callsite
GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
- if (exactTypeDesc == NULL) // compDonotInline()
- return NULL;
+ if (exactTypeDesc == nullptr)
+ { // compDonotInline()
+ return nullptr;
+ }
GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
- if (exactMethodDesc == NULL) // compDonotInline()
- return NULL;
-
+ if (exactMethodDesc == nullptr)
+ { // compDonotInline()
+ return nullptr;
+ }
+
GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
-
+
helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
-
+
helpArgs = gtNewListNode(thisPtr, helpArgs);
-
+
// Call helper function. This gets the target address of the final destination callsite.
-
- return gtNewHelperCallNode( CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
-}
+ return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
+}
/*****************************************************************************
*
* Build and import a box node
*/
-void Compiler::impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolvedToken)
+void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
// Get the tree for the type handle for the boxed object. In the case
// of shared generic code or ngen'd code this might be an embedded
// computation.
// Note we can only box do it if the class construtor has been called
// We can always do it on primitive types
-
+
GenTreePtr op1 = nullptr;
GenTreePtr op2 = nullptr;
- var_types lclTyp;
-
+ var_types lclTyp;
+
impSpillSpecialSideEff();
-
+
// Now get the expression to box from the stack.
CORINFO_CLASS_HANDLE operCls;
- GenTreePtr exprToBox = impPopStack(operCls).val;
-
+ GenTreePtr exprToBox = impPopStack(operCls).val;
+
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
@@ -4752,27 +4855,29 @@ void Compiler::impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolved
// Box(expr) gets morphed into
// temp = new(clsHnd)
// cpobj(temp+4, expr, clsHnd)
- // push temp
+ // push temp
// The code paths differ slightly below for structs and primitives because
// "cpobj" differs in these cases. In one case you get
// impAssignStructPtr(temp+4, expr, clsHnd)
// and the other you get
// *(temp+4) = expr
-
- if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
+
+ if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
+ {
impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
-
- // needs to stay in use until this box expression is appended
+ }
+
+ // needs to stay in use until this box expression is appended
// some other node. We approximate this by keeping it alive until
// the opcode stack becomes empty
impBoxTempInUse = true;
-
+
#ifdef FEATURE_READYTORUN_COMPILER
bool usingReadyToRunHelper = false;
if (opts.IsReadyToRun())
{
- op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
+ op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
usingReadyToRunHelper = (op1 != NULL);
}
@@ -4787,39 +4892,44 @@ void Compiler::impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolved
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
// Ensure that the value class is restored
- op2 = impTokenToHandle(pResolvedToken, NULL, TRUE /* mustRestoreHandle */);
- if (op2 == NULL) // compDonotInline()
+ op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
+ if (op2 == nullptr)
+ { // compDonotInline()
return;
+ }
- op1 = gtNewHelperCallNode( info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
- TYP_REF, 0,
- gtNewArgList(op2));
+ op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
+ gtNewArgList(op2));
}
/* Remember that this basic block contains 'new' of an array */
compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
-
+
GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
- GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
-
+
if (varTypeIsStruct(exprToBox))
{
assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
- op1 = impAssignStructPtr(op1, exprToBox, operCls,(unsigned)CHECK_SPILL_ALL);
+ op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
}
else
{
lclTyp = exprToBox->TypeGet();
if (lclTyp == TYP_BYREF)
+ {
lclTyp = TYP_I_IMPL;
+ }
CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
if (impIsPrimitive(jitType))
+ {
lclTyp = JITtype2varType(jitType);
+ }
assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
var_types srcTyp = exprToBox->TypeGet();
@@ -4828,39 +4938,41 @@ void Compiler::impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolved
if (srcTyp != dstTyp)
{
assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
- (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
+ (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
}
op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
- op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
+ op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
// Record that this is a "box" node.
op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
// If it is a value class, mark the "box" node. We can use this information
- // to optimise several cases:
+ // to optimise several cases:
// "box(x) == null" --> false
// "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
// "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
op1->gtFlags |= GTF_BOX_VALUE;
assert(op1->IsBoxedValue());
- assert(asg->gtOper == GT_ASG);
+ assert(asg->gtOper == GT_ASG);
}
- else
+ else
{
// Don't optimize, just call the helper and be done with it
// Ensure that the value class is restored
op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
- if (op2 == nullptr) // compDonotInline()
+ if (op2 == nullptr)
+ { // compDonotInline()
return;
+ }
GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
- op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
+ op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
}
/* Push the result back on the stack, */
@@ -4883,23 +4995,24 @@ void Compiler::impImportAndPushBox (CORINFO_RESOLVED_TOKEN * pResolved
// pushed on the IL stack on entry to this method.
//
// Notes:
-// Multi-dimensional array constructors are imported as calls to a JIT
+// Multi-dimensional array constructors are imported as calls to a JIT
// helper, not as regular calls.
-void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_CALL_INFO* pCallInfo)
+void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
- if (classHandle == nullptr) // compDonotInline()
+ if (classHandle == nullptr)
+ { // compDonotInline()
return;
+ }
assert(pCallInfo->sig.numArgs);
- GenTreePtr node;
+ GenTreePtr node;
GenTreeArgList* args;
//
- // There are two different JIT helpers that can be used to allocate
+ // There are two different JIT helpers that can be used to allocate
// multi-dimensional arrays:
//
// - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
@@ -4908,7 +5021,7 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
// - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
// pointer to block of int32s. This variant is more portable.
//
- // The non-varargs helper is enabled for CoreRT only for now. Enabling this
+ // The non-varargs helper is enabled for CoreRT only for now. Enabling this
// unconditionally would require ReadyToRun version bump.
//
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -4919,18 +5032,18 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
LclVarDsc* newObjArrayArgsVar;
// Reuse the temp used to pass the array dimensions to avoid bloating
- // the stack frame in case there are multiple calls to multi-dim array
+ // the stack frame in case there are multiple calls to multi-dim array
// constructors within a single method.
if (lvaNewObjArrayArgs == BAD_VAR_NUM)
{
- lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
- lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
+ lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
+ lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
}
// Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
// for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
- lvaTable[lvaNewObjArrayArgs].lvExactSize =
+ lvaTable[lvaNewObjArrayArgs].lvExactSize =
max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
// The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
@@ -4948,16 +5061,16 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
- // Pop dimension arguments from the stack one at a time and store it
+ // Pop dimension arguments from the stack one at a time and store it
// into lvaNewObjArrayArgs temp.
for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
{
GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
- dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
- dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
+ dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
+ dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
dest = gtNewOperNode(GT_IND, TYP_INT, dest);
node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
@@ -4987,7 +5100,7 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
unsigned argFlags = 0;
- args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
+ args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
@@ -5013,99 +5126,95 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken,
impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
}
-GenTreePtr Compiler::impTransformThis (GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
- CORINFO_THIS_TRANSFORM transform)
+GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ CORINFO_THIS_TRANSFORM transform)
{
switch (transform)
{
- case CORINFO_DEREF_THIS:
+ case CORINFO_DEREF_THIS:
{
GenTreePtr obj = thisPtr;
-
+
// This does a LDIND on the obj, which should be a byref. pointing to a ref
impBashVarAddrsToI(obj);
- assert(genActualType(obj->gtType) == TYP_I_IMPL ||
- obj->gtType == TYP_BYREF);
+ assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
-
+
obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
// ldind could point anywhere, example a boxed class static int
obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
-
+
return obj;
}
-
- case CORINFO_BOX_THIS:
+
+ case CORINFO_BOX_THIS:
{
- // Constraint calls where there might be no
- // unboxed entry point require us to implement the call via helper.
- // These only occur when a possible target of the call
+ // Constraint calls where there might be no
+ // unboxed entry point require us to implement the call via helper.
+ // These only occur when a possible target of the call
// may have inherited an implementation of an interface
- // method from System.Object or System.ValueType. The EE does not provide us with
+ // method from System.Object or System.ValueType. The EE does not provide us with
// "unboxed" versions of these methods.
-
+
GenTreePtr obj = thisPtr;
-
+
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
obj->gtFlags |= GTF_EXCEPT;
-
+
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
if (impIsPrimitive(jitTyp))
{
obj->ChangeOperUnchecked(GT_IND);
-
+
// Obj could point anywhere, example a boxed class static int
obj->gtFlags |= GTF_IND_TGTANYWHERE;
-
- obj->gtType = JITtype2varType(jitTyp);
- obj->gtOp.gtOp2 = 0; // must be zero for tree walkers
+
+ obj->gtType = JITtype2varType(jitTyp);
+ obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
assert(varTypeIsArithmetic(obj->gtType));
}
-
+
// This pushes on the dereferenced byref
// This is then used immediately to box.
impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
-
+
// This pops off the byref-to-a-value-type remaining on the stack and
// replaces it with a boxed object.
// This is then used as the object to the virtual call immediately below.
- impImportAndPushBox (pConstrainedResolvedToken);
+ impImportAndPushBox(pConstrainedResolvedToken);
if (compDonotInline())
- return NULL;
-
+ {
+ return nullptr;
+ }
+
obj = impPopStack().val;
return obj;
}
- case CORINFO_NO_THIS_TRANSFORM:
- default:
- return thisPtr;
+ case CORINFO_NO_THIS_TRANSFORM:
+ default:
+ return thisPtr;
}
}
-
-bool Compiler::impCanPInvokeInline(var_types callRetTyp)
+bool Compiler::impCanPInvokeInline(var_types callRetTyp)
{
- return
- impCanPInvokeInlineCallSite(callRetTyp) &&
- getInlinePInvokeEnabled() &&
- (!opts.compDbgCode) &&
- (compCodeOpt() != SMALL_CODE) &&
- (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
+ return impCanPInvokeInlineCallSite(callRetTyp) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
+ (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
;
}
// Returns false only if the callsite really cannot be inlined. Ignores global variables
// like debugger, profiler etc.
-bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp)
+bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp)
{
return
// We have to disable pinvoke inlining inside of filters
// because in case the main execution (i.e. in the try block) is inside
// unmanaged code, we cannot reuse the inlined stub (we still need the
// original state until we are in the catch handler)
- (!bbInFilterILRange(compCurBB)) &&
+ (!bbInFilterILRange(compCurBB)) &&
// We disable pinvoke inlining inside handlers since the GSCookie is
// in the inlined Frame (see CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie),
// but this would not protect framelets/return-address of handlers.
@@ -5114,7 +5223,7 @@ bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp)
// Turns out JIT64 doesn't perform PInvoke inlining inside try regions, here's an excerpt of
// the comment from JIT64 explaining why:
//
- //// [VSWhidbey: 611015] - because the jitted code links in the Frame (instead
+ //// [VSWhidbey: 611015] - because the jitted code links in the Frame (instead
//// of the stub) we rely on the Frame not being 'active' until inside the
//// stub. This normally happens by the stub setting the return address
//// pointer in the Frame object inside the stub. On a normal return, the
@@ -5128,25 +5237,22 @@ bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp)
//// in a try body with a catch or filter/except where other non-handler code
//// in this method might run and try to re-use the dirty Frame object.
//
- // Now, because of this, the VM actually assumes that in 64 bit we never PInvoke
+ // Now, because of this, the VM actually assumes that in 64 bit we never PInvoke
// inline calls on any EH construct, you can verify that on VM\ExceptionHandling.cpp:203
// The method responsible for resuming execution is UpdateObjectRefInResumeContextCallback
- // you can see how it aligns with JIT64 policy of not inlining PInvoke calls almost right
+ // you can see how it aligns with JIT64 policy of not inlining PInvoke calls almost right
// at the beginning of the body of the method.
!compCurBB->hasTryIndex() &&
#endif
- (!impLocAllocOnStack()) &&
- (callRetTyp != TYP_STRUCT)
- ;
+ (!impLocAllocOnStack()) && (callRetTyp != TYP_STRUCT);
}
-void Compiler::impCheckForPInvokeCall(
- GenTreePtr call,
- CORINFO_METHOD_HANDLE methHnd,
- CORINFO_SIG_INFO * sig,
- unsigned mflags)
+void Compiler::impCheckForPInvokeCall(GenTreePtr call,
+ CORINFO_METHOD_HANDLE methHnd,
+ CORINFO_SIG_INFO* sig,
+ unsigned mflags)
{
- var_types callRetTyp = JITtype2varType(sig->retType);
+ var_types callRetTyp = JITtype2varType(sig->retType);
CorInfoUnmanagedCallConv unmanagedCallConv;
// If VM flagged it as Pinvoke, flag the call node accordingly
@@ -5154,12 +5260,13 @@ void Compiler::impCheckForPInvokeCall(
{
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
}
-
+
if (methHnd)
{
- if ((mflags & CORINFO_FLG_PINVOKE) == 0 ||
- (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
+ if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
+ {
return;
+ }
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
}
@@ -5170,7 +5277,7 @@ void Compiler::impCheckForPInvokeCall(
{
// Used by the IL Stubs.
callConv = CORINFO_CALLCONV_C;
- }
+ }
static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
@@ -5179,15 +5286,14 @@ void Compiler::impCheckForPInvokeCall(
assert(!call->gtCall.gtCallCookie);
}
- if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C &&
- unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
+ if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
{
return;
}
optNativeCallCount++;
- if (opts.compMustInlinePInvokeCalli && methHnd == NULL)
+ if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
{
#ifdef _TARGET_X86_
// CALLI in IL stubs must be inlined
@@ -5197,15 +5303,18 @@ void Compiler::impCheckForPInvokeCall(
}
else
{
- if (!impCanPInvokeInline(callRetTyp))
+ if (!impCanPInvokeInline(callRetTyp))
+ {
return;
+ }
if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
+ {
return;
+ }
}
- JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s",
- info.compFullName));
+ JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
call->gtFlags |= GTF_CALL_UNMANAGED;
info.compCallUnmanaged++;
@@ -5214,26 +5323,31 @@ void Compiler::impCheckForPInvokeCall(
// AMD64 convention is same for native and managed
if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
+ {
call->gtFlags |= GTF_CALL_POP_ARGS;
+ }
if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
+ {
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
+ }
}
-
-GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO * sig, IL_OFFSETX ilOffset)
+GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
{
var_types callRetTyp = JITtype2varType(sig->retType);
-
+
/* The function pointer is on top of the stack - It may be a
* complex expression. As it is evaluated after the args,
* it may cause registered args to be spilled. Simply spill it.
*/
// Ignore this trivial case.
- if (impStackTop().val->gtOper != GT_LCL_VAR)
- impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
+ if (impStackTop().val->gtOper != GT_LCL_VAR)
+ {
+ impSpillStackEntry(verCurrentState.esStackDepth - 1,
+ BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
+ }
/* Get the function pointer */
@@ -5244,14 +5358,16 @@ GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO * sig, IL_O
// This temporary must never be converted to a double in stress mode,
// because that can introduce a call to the cast helper after the
// arguments have already been evaluated.
-
+
if (fptr->OperGet() == GT_LCL_VAR)
+ {
lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
+ }
#endif
/* Create the call node */
- GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, NULL, ilOffset);
+ GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
@@ -5260,12 +5376,10 @@ GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO * sig, IL_O
/*****************************************************************************/
-void Compiler::impPopArgsForUnmanagedCall(
- GenTreePtr call,
- CORINFO_SIG_INFO * sig)
+void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
-
+
/* Since we push the arguments in reverse order (i.e. right -> left)
* spill any side effects from the stack
*
@@ -5273,13 +5387,13 @@ void Compiler::impPopArgsForUnmanagedCall(
* thus we have to spill all side-effects except last one
*/
- unsigned lastLevelWithSideEffects = UINT_MAX;
+ unsigned lastLevelWithSideEffects = UINT_MAX;
unsigned argsToReverse = sig->numArgs;
-
+
// For "thiscall", the first argument goes in a register. Since its
// order does not need to be changed, we do not need to spill it
-
+
if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
assert(argsToReverse);
@@ -5291,24 +5405,22 @@ void Compiler::impPopArgsForUnmanagedCall(
argsToReverse = 0;
#endif
- for (unsigned level = verCurrentState.esStackDepth - argsToReverse;
- level < verCurrentState.esStackDepth;
- level++)
+ for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
{
- if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
+ if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
{
assert(lastLevelWithSideEffects == UINT_MAX);
- impSpillStackEntry(level, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
+ impSpillStackEntry(level,
+ BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
}
else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
{
- if (lastLevelWithSideEffects != UINT_MAX)
+ if (lastLevelWithSideEffects != UINT_MAX)
{
/* We had a previous side effect - must spill it */
- impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM
- DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
+ impSpillStackEntry(lastLevelWithSideEffects,
+ BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
/* Record the level for the current side effect in case we will spill it */
lastLevelWithSideEffects = level;
@@ -5325,13 +5437,10 @@ void Compiler::impPopArgsForUnmanagedCall(
/* The argument list is now "clean" - no out-of-order side effects
* Pop the argument list in reverse order */
- unsigned argFlags = 0;
- GenTreePtr args = call->gtCall.gtCallArgs = impPopRevList(
- sig->numArgs,
- &argFlags,
- sig,
- sig->numArgs - argsToReverse);
-
+ unsigned argFlags = 0;
+ GenTreePtr args = call->gtCall.gtCallArgs =
+ impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
+
if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
GenTreePtr thisPtr = args->Current();
@@ -5340,10 +5449,11 @@ void Compiler::impPopArgsForUnmanagedCall(
}
if (args)
+ {
call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
+ }
}
-
//------------------------------------------------------------------------
// impInitClass: Build a node to initialize the class before accessing the
// field if necessary
@@ -5356,10 +5466,10 @@ void Compiler::impPopArgsForUnmanagedCall(
// initializtion. Otherwise, nullptr.
//
-GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN * pResolvedToken)
+GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
- CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd,
- impTokenLookupContextHandle);
+ CorInfoInitClassResult initClassResult =
+ info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
{
@@ -5377,9 +5487,7 @@ GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN * pResolvedToken)
if (runtimeLookup)
{
- node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS,
- TYP_VOID, 0,
- gtNewArgList(node));
+ node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
}
else
{
@@ -5390,82 +5498,83 @@ GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN * pResolvedToken)
return node;
}
-GenTreePtr Compiler::impImportStaticReadOnlyField(void * fldAddr, var_types lclTyp)
+GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
{
- GenTreePtr op1 = NULL;
-
- switch (lclTyp) {
- int ival;
- __int64 lval;
- double dval;
-
- case TYP_BOOL:
- ival = *((bool *) fldAddr);
- goto IVAL_COMMON;
-
- case TYP_BYTE:
- ival = *((signed char *) fldAddr);
- goto IVAL_COMMON;
-
- case TYP_UBYTE:
- ival = *((unsigned char *) fldAddr);
- goto IVAL_COMMON;
-
- case TYP_SHORT:
- ival = *((short *) fldAddr);
- goto IVAL_COMMON;
-
- case TYP_CHAR:
- case TYP_USHORT:
- ival = *((unsigned short *) fldAddr);
- goto IVAL_COMMON;
-
- case TYP_UINT:
- case TYP_INT:
- ival = *((int *) fldAddr);
-IVAL_COMMON:
- op1 = gtNewIconNode(ival);
- break;
+ GenTreePtr op1 = nullptr;
- case TYP_LONG:
- case TYP_ULONG:
- lval = *((__int64 *) fldAddr);
- op1 = gtNewLconNode(lval);
- break;
+ switch (lclTyp)
+ {
+ int ival;
+ __int64 lval;
+ double dval;
+
+ case TYP_BOOL:
+ ival = *((bool*)fldAddr);
+ goto IVAL_COMMON;
+
+ case TYP_BYTE:
+ ival = *((signed char*)fldAddr);
+ goto IVAL_COMMON;
+
+ case TYP_UBYTE:
+ ival = *((unsigned char*)fldAddr);
+ goto IVAL_COMMON;
+
+ case TYP_SHORT:
+ ival = *((short*)fldAddr);
+ goto IVAL_COMMON;
+
+ case TYP_CHAR:
+ case TYP_USHORT:
+ ival = *((unsigned short*)fldAddr);
+ goto IVAL_COMMON;
+
+ case TYP_UINT:
+ case TYP_INT:
+ ival = *((int*)fldAddr);
+ IVAL_COMMON:
+ op1 = gtNewIconNode(ival);
+ break;
- case TYP_FLOAT:
- dval = *((float *) fldAddr);
- op1 = gtNewDconNode(dval);
+ case TYP_LONG:
+ case TYP_ULONG:
+ lval = *((__int64*)fldAddr);
+ op1 = gtNewLconNode(lval);
+ break;
+
+ case TYP_FLOAT:
+ dval = *((float*)fldAddr);
+ op1 = gtNewDconNode(dval);
#if !FEATURE_X87_DOUBLES
- // X87 stack doesn't differentiate between float/double
- // so R4 is treated as R8, but everybody else does
- op1->gtType = TYP_FLOAT;
+ // X87 stack doesn't differentiate between float/double
+ // so R4 is treated as R8, but everybody else does
+ op1->gtType = TYP_FLOAT;
#endif // FEATURE_X87_DOUBLES
- break;
+ break;
- case TYP_DOUBLE:
- dval = *((double *) fldAddr);
- op1 = gtNewDconNode(dval);
- break;
+ case TYP_DOUBLE:
+ dval = *((double*)fldAddr);
+ op1 = gtNewDconNode(dval);
+ break;
- default:
- assert(!"Unexpected lclTyp");
- break;
+ default:
+ assert(!"Unexpected lclTyp");
+ break;
}
return op1;
}
-GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_ACCESS_FLAGS access,
- CORINFO_FIELD_INFO * pFieldInfo,
- var_types lclTyp)
+GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS access,
+ CORINFO_FIELD_INFO* pFieldInfo,
+ var_types lclTyp)
{
GenTreePtr op1;
switch (pFieldInfo->fieldAccessor)
{
- case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
{
assert(!compIsForInlining());
@@ -5473,118 +5582,117 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolv
op1 = impParentClassTokenToHandle(pResolvedToken);
// compIsForInlining() is false so we should not neve get NULL here
- assert(op1 != NULL);
+ assert(op1 != nullptr);
var_types type = TYP_BYREF;
switch (pFieldInfo->helper)
{
- case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
- type = TYP_I_IMPL;
- break;
- case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
- case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
- case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
- break;
- default:
- assert(!"unknown generic statics helper");
- break;
+ case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
+ type = TYP_I_IMPL;
+ break;
+ case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
+ case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
+ case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
+ break;
+ default:
+ assert(!"unknown generic statics helper");
+ break;
}
op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
- op1 = gtNewOperNode(GT_ADD, type,
- op1,
+ op1 = gtNewOperNode(GT_ADD, type, op1,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
}
break;
- case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
+ case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- unsigned callFlags = 0;
-
- if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
+ if (opts.IsReadyToRun())
{
- callFlags |= GTF_CALL_HOISTABLE;
- }
+ unsigned callFlags = 0;
- op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
+ if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
+ {
+ callFlags |= GTF_CALL_HOISTABLE;
+ }
- op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
- }
- else
+ op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
+
+ op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
+ }
+ else
#endif
- {
- op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
- }
+ {
+ op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
+ }
- {
- FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
- op1 = gtNewOperNode(GT_ADD, op1->TypeGet(),
- op1,
- new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs
- ));
- }
- break;
+ {
+ FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
+ op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
+ }
+ break;
- default:
- if (!(access & CORINFO_ACCESS_ADDRESS))
- {
- // In future, it may be better to just create the right tree here instead of folding it later.
- op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
+ default:
+ if (!(access & CORINFO_ACCESS_ADDRESS))
+ {
+ // In future, it may be better to just create the right tree here instead of folding it later.
+ op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
- if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
+ if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
+ {
+ op1->gtType = TYP_REF; // points at boxed object
+ FieldSeqNode* firstElemFldSeq =
+ GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
+ op1 =
+ gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
+
+ if (varTypeIsStruct(lclTyp))
+ {
+ // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
+ op1 = gtNewObjNode(pFieldInfo->structType, op1);
+ }
+ else
+ {
+ op1 = gtNewOperNode(GT_IND, lclTyp, op1);
+ op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
+ }
+ }
+
+ return op1;
+ }
+ else
{
- op1->gtType = TYP_REF; // points at boxed object
- FieldSeqNode* firstElemFldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, new(this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
+ void** pFldAddr = nullptr;
+ void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
- if (varTypeIsStruct(lclTyp))
+ FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
+
+ /* Create the data member node */
+ if (pFldAddr == nullptr)
{
- // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
- op1 = gtNewObjNode(pFieldInfo->structType, op1);
+ op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
}
else
{
- op1 = gtNewOperNode(GT_IND, lclTyp, op1);
- op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
- }
- }
-
- return op1;
- }
- else
- {
- void ** pFldAddr = NULL;
- void * fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**) &pFldAddr);
+ op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
- FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
+ // There are two cases here, either the static is RVA based,
+ // in which case the type of the FIELD node is not a GC type
+ // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
+ // a GC type and the handle to it is a TYP_BYREF in the GC heap
+ // because handles to statics now go into the large object heap
- /* Create the data member node */
- if (pFldAddr == NULL)
- {
- op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
- }
- else
- {
- op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
-
- // There are two cases here, either the static is RVA based,
- // in which case the type of the FIELD node is not a GC type
- // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
- // a GC type and the handle to it is a TYP_BYREF in the GC heap
- // because handles to statics now go into the large object heap
-
- var_types handleTyp = (var_types) (varTypeIsGC(lclTyp) ? TYP_BYREF
- : TYP_I_IMPL);
- op1 = gtNewOperNode(GT_IND, handleTyp, op1);
- op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
+ var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
+ op1 = gtNewOperNode(GT_IND, handleTyp, op1);
+ op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
+ }
}
- }
- break;
+ break;
}
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
@@ -5593,8 +5701,8 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolv
FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- new(this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
@@ -5606,71 +5714,75 @@ GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN * pResolv
return op1;
}
-//In general try to call this before most of the verification work. Most people expect the access
-//exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
-//out if you can't access something we also think that you're unverifiable for other reasons.
-void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result,
- CORINFO_HELPER_DESC * helperCall)
+// In general try to call this before most of the verification work. Most people expect the access
+// exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
+// out if you can't access something we also think that you're unverifiable for other reasons.
+void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
if (result != CORINFO_ACCESS_ALLOWED)
+ {
impHandleAccessAllowedInternal(result, helperCall);
+ }
}
-void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result,
- CORINFO_HELPER_DESC * helperCall)
+void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
switch (result)
{
- case CORINFO_ACCESS_ALLOWED:
- break;
- case CORINFO_ACCESS_ILLEGAL:
- // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
- // method is verifiable. Otherwise, delay the exception to runtime.
- if (compIsForImportOnly())
- {
- info.compCompHnd->ThrowExceptionForHelper(helperCall);
- }
- else
- {
+ case CORINFO_ACCESS_ALLOWED:
+ break;
+ case CORINFO_ACCESS_ILLEGAL:
+ // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
+ // method is verifiable. Otherwise, delay the exception to runtime.
+ if (compIsForImportOnly())
+ {
+ info.compCompHnd->ThrowExceptionForHelper(helperCall);
+ }
+ else
+ {
+ impInsertHelperCall(helperCall);
+ }
+ break;
+ case CORINFO_ACCESS_RUNTIME_CHECK:
impInsertHelperCall(helperCall);
- }
- break;
- case CORINFO_ACCESS_RUNTIME_CHECK:
- impInsertHelperCall(helperCall);
- break;
+ break;
}
}
-void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC * helperInfo)
+void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
{
- //Construct the argument list
- GenTreeArgList* args = NULL;
+ // Construct the argument list
+ GenTreeArgList* args = nullptr;
assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
for (unsigned i = helperInfo->numArgs; i > 0; --i)
{
- const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
- GenTreePtr currentArg = NULL;
+ const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
+ GenTreePtr currentArg = nullptr;
switch (helperArg.argType)
{
- case CORINFO_HELPER_ARG_TYPE_Field:
- info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(info.compCompHnd->getFieldClass(helperArg.fieldHandle));
- currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
- break;
- case CORINFO_HELPER_ARG_TYPE_Method:
- info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
- currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
- break;
- case CORINFO_HELPER_ARG_TYPE_Class:
- info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
- currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break;
- case CORINFO_HELPER_ARG_TYPE_Module:
- currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break;
- case CORINFO_HELPER_ARG_TYPE_Const:
- currentArg = gtNewIconNode(helperArg.constant); break;
- default:
- NO_WAY("Illegal helper arg type");
+ case CORINFO_HELPER_ARG_TYPE_Field:
+ info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
+ info.compCompHnd->getFieldClass(helperArg.fieldHandle));
+ currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
+ break;
+ case CORINFO_HELPER_ARG_TYPE_Method:
+ info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
+ currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
+ break;
+ case CORINFO_HELPER_ARG_TYPE_Class:
+ info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
+ currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
+ break;
+ case CORINFO_HELPER_ARG_TYPE_Module:
+ currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
+ break;
+ case CORINFO_HELPER_ARG_TYPE_Const:
+ currentArg = gtNewIconNode(helperArg.constant);
+ break;
+ default:
+ NO_WAY("Illegal helper arg type");
}
- args = (currentArg == NULL) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
+ args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
}
/* TODO-Review:
@@ -5681,20 +5793,19 @@ void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC * helperIn
impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
-void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
- CORINFO_METHOD_HANDLE calleeMethodHnd,
- CORINFO_CLASS_HANDLE delegateTypeHnd)
-{
+void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
+ CORINFO_METHOD_HANDLE calleeMethodHnd,
+ CORINFO_CLASS_HANDLE delegateTypeHnd)
+{
#ifdef FEATURE_CORECLR
if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
{
// Call the JIT_DelegateSecurityCheck helper before calling the actual function.
// This helper throws an exception if the CLR host disallows the call.
- GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK,
- TYP_VOID,
- GTF_EXCEPT,
- gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd), gtNewIconEmbMethHndNode(calleeMethodHnd)));
+ GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
+ gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
+ gtNewIconEmbMethHndNode(calleeMethodHnd)));
// Append the callout statement
impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
@@ -5705,10 +5816,10 @@ void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE
// so that callee can be tail called. Note that here we don't check
// compatibility in IL Verifier sense, but on the lines of return type
// sizes are equal and get returned in the same return register.
-bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
- CORINFO_CLASS_HANDLE callerRetTypeClass,
- var_types calleeRetType,
- CORINFO_CLASS_HANDLE calleeRetTypeClass)
+bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
+ CORINFO_CLASS_HANDLE callerRetTypeClass,
+ var_types calleeRetType,
+ CORINFO_CLASS_HANDLE calleeRetTypeClass)
{
// Note that we can not relax this condition with genActualType() as the
// calling convention dictates that the caller of a function with a small
@@ -5737,10 +5848,12 @@ bool Compiler::impTailCallRetTypeCompatible(var_types callerRetTy
// return value. Some of the tail calls permitted by below checks would have
// been rejected by IL Verifier before we reached here. Therefore, only full
// trust code can make those tail calls.
- unsigned callerRetTypeSize = 0;
- unsigned calleeRetTypeSize = 0;
- bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
- bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
+ unsigned callerRetTypeSize = 0;
+ unsigned calleeRetTypeSize = 0;
+ bool isCallerRetTypMBEnreg =
+ VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
+ bool isCalleeRetTypMBEnreg =
+ VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
{
@@ -5754,13 +5867,14 @@ bool Compiler::impTailCallRetTypeCompatible(var_types callerRetTy
// For prefixFlags
enum
{
- PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
- PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
- PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
- PREFIX_VOLATILE = 0x00000100,
- PREFIX_UNALIGNED = 0x00001000,
- PREFIX_CONSTRAINED = 0x00010000,
- PREFIX_READONLY = 0x00100000
+ PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
+ PREFIX_TAILCALL_IMPLICIT =
+ 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
+ PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
+ PREFIX_VOLATILE = 0x00000100,
+ PREFIX_UNALIGNED = 0x00001000,
+ PREFIX_CONSTRAINED = 0x00010000,
+ PREFIX_READONLY = 0x00100000
};
/********************************************************************************
@@ -5769,12 +5883,12 @@ enum
* to a supported tail call IL pattern.
*
*/
-bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
- OPCODE curOpcode,
- const BYTE *codeAddrOfNextOpcode,
- const BYTE *codeEnd,
- bool isRecursive,
- bool *isCallPopAndRet /* = nullptr */)
+bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
+ OPCODE curOpcode,
+ const BYTE* codeAddrOfNextOpcode,
+ const BYTE* codeEnd,
+ bool isRecursive,
+ bool* isCallPopAndRet /* = nullptr */)
{
// Bail out if the current opcode is not a call.
if (!impOpcodeIsCallOpcode(curOpcode))
@@ -5799,7 +5913,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
return false;
}
- // Scan the opcodes to look for the following IL patterns if either
+ // Scan the opcodes to look for the following IL patterns if either
// i) the call is not tail prefixed (i.e. implicit tail call) or
// ii) if tail prefixed, IL verification is not needed for the method.
//
@@ -5817,17 +5931,18 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
// pop
// nop*
// ret
- int cntPop = 0;
+ int cntPop = 0;
OPCODE nextOpcode;
#ifdef _TARGET_AMD64_
- do
+ do
{
nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
codeAddrOfNextOpcode += sizeof(__int8);
- } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
- (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
- ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly one pop seen so far.
+ } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
+ (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
+ ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
+ // one pop seen so far.
#else
nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
#endif
@@ -5842,35 +5957,38 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
// Jit64 Compat:
// Tail call IL pattern could be either of the following
// 1) call/callvirt/calli + ret
- // 2) call/callvirt/calli + pop + ret in a method returning void.
- return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
+ // 2) call/callvirt/calli + pop + ret in a method returning void.
+ return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
#else //!_TARGET_AMD64_
return (nextOpcode == CEE_RET) && (cntPop == 0);
#endif
}
/*****************************************************************************
- *
+ *
* Determine whether the call could be converted to an implicit tail call
*
*/
-bool Compiler::impIsImplicitTailCallCandidate(OPCODE opcode,
- const BYTE *codeAddrOfNextOpcode,
- const BYTE *codeEnd,
- int prefixFlags,
- bool isRecursive)
+bool Compiler::impIsImplicitTailCallCandidate(
+ OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
{
#if FEATURE_TAILCALL_OPT
if (!opts.compTailCallOpt)
+ {
return false;
-
+ }
+
if (opts.compDbgCode || opts.MinOpts())
+ {
return false;
+ }
// must not be tail prefixed
- if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
+ if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
+ {
return false;
+ }
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// the block containing call is marked as BBJ_RETURN
@@ -5880,14 +5998,16 @@ bool Compiler::impIsImplicitTailCallCandidate(OPCODE opcode,
return false;
#endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
- // must be call+ret or call+pop+ret
- if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
+ // must be call+ret or call+pop+ret
+ if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
+ {
return false;
+ }
return true;
#else
return false;
-#endif //FEATURE_TAILCALL_OPT
+#endif // FEATURE_TAILCALL_OPT
}
//------------------------------------------------------------------------
@@ -5913,59 +6033,58 @@ bool Compiler::impIsImplicitTailCallCandidate(OPCODE opcode,
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-var_types Compiler::impImportCall(OPCODE opcode,
- CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
- GenTreePtr newobjThis,
- int prefixFlags,
- CORINFO_CALL_INFO* callInfo,
- IL_OFFSET rawILOffset)
+var_types Compiler::impImportCall(OPCODE opcode,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ GenTreePtr newobjThis,
+ int prefixFlags,
+ CORINFO_CALL_INFO* callInfo,
+ IL_OFFSET rawILOffset)
{
- assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT ||
- opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
-
- IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
- var_types callRetTyp = TYP_COUNT;
- CORINFO_SIG_INFO* sig = nullptr;
- CORINFO_METHOD_HANDLE methHnd = nullptr;
- CORINFO_CLASS_HANDLE clsHnd = nullptr;
- unsigned clsFlags = 0;
- unsigned mflags = 0;
- unsigned argFlags = 0;
- GenTreePtr call = nullptr;
- GenTreeArgList* args = nullptr;
- CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
- CORINFO_CONTEXT_HANDLE exactContextHnd = 0;
- BOOL exactContextNeedsRuntimeLookup = FALSE;
- bool canTailCall = true;
- const char* szCanTailCallFailReason = nullptr;
- int tailCall = prefixFlags & PREFIX_TAILCALL;
- bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
+ assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
+
+ IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
+ var_types callRetTyp = TYP_COUNT;
+ CORINFO_SIG_INFO* sig = nullptr;
+ CORINFO_METHOD_HANDLE methHnd = nullptr;
+ CORINFO_CLASS_HANDLE clsHnd = nullptr;
+ unsigned clsFlags = 0;
+ unsigned mflags = 0;
+ unsigned argFlags = 0;
+ GenTreePtr call = nullptr;
+ GenTreeArgList* args = nullptr;
+ CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
+ CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
+ BOOL exactContextNeedsRuntimeLookup = FALSE;
+ bool canTailCall = true;
+ const char* szCanTailCallFailReason = nullptr;
+ int tailCall = prefixFlags & PREFIX_TAILCALL;
+ bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
// Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
// do that before tailcalls, but that is probably not the intended
// semantic. So just disallow tailcalls from synchronized methods.
- // Also, popping arguments in a varargs function is more work and NYI
+ // Also, popping arguments in a varargs function is more work and NYI
// If we have a security object, we have to keep our frame around for callers
// to see any imperative security.
if (info.compFlags & CORINFO_FLG_SYNCH)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Caller is synchronized";
}
#if !FEATURE_FIXED_OUT_ARGS
else if (info.compIsVarArgs)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Caller is varargs";
}
#endif // FEATURE_FIXED_OUT_ARGS
else if (opts.compNeedSecurityCheck)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Caller requires a security check.";
}
@@ -5979,8 +6098,8 @@ var_types Compiler::impImportCall(OPCODE opcode,
// ReadyToRun code sticks with default calling convention that does not widen small return types.
- bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
- bool bIntrinsicImported = false;
+ bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
+ bool bIntrinsicImported = false;
CORINFO_SIG_INFO calliSig;
GenTreeArgList* extraArg = nullptr;
@@ -6000,19 +6119,19 @@ var_types Compiler::impImportCall(OPCODE opcode,
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
- mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
+ mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
#ifdef DEBUG
if (verbose)
{
- unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
- printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
+ unsigned structSize =
+ (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
+ printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
- //This should be checked in impImportBlockCode.
- assert(!compIsForInlining()
- || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
+ // This should be checked in impImportBlockCode.
+ assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
sig = &calliSig;
@@ -6020,7 +6139,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
// We cannot lazily obtain the signature of a CALLI call because it has no method
// handle that we can use, so we need to save its full call signature here.
assert(call->gtCall.callSig == nullptr);
- call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
+ call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
*call->gtCall.callSig = calliSig;
#endif // DEBUG
}
@@ -6028,24 +6147,24 @@ var_types Compiler::impImportCall(OPCODE opcode,
{
CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
- // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
+ // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
// supply the instantiation parameters necessary to make direct calls to underlying
- // shared generic code, rather than calling through instantiating stubs. If the
+ // shared generic code, rather than calling through instantiating stubs. If the
// returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
// must indeed pass an instantiation parameter.
methHnd = callInfo->hMethod;
- sig = &(callInfo->sig);
+ sig = &(callInfo->sig);
callRetTyp = JITtype2varType(sig->retType);
- mflags = callInfo->methodFlags;
+ mflags = callInfo->methodFlags;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
- printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
+ printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
@@ -6090,7 +6209,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
return callRetTyp;
}
- if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return callRetTyp;
@@ -6103,7 +6222,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
}
- clsHnd = pResolvedToken->hClass;
+ clsHnd = pResolvedToken->hClass;
clsFlags = callInfo->classFlags;
@@ -6115,10 +6234,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
const char* modName;
const char* className;
const char* methodName;
- if ((className = eeGetClassName(clsHnd)) != NULL
- && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0
- && (methodName = eeGetMethodName(methHnd, &modName)) != NULL
- && strcmp(methodName, "Mark") == 0)
+ if ((className = eeGetClassName(clsHnd)) != nullptr &&
+ strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
+ (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
{
return impImportJitTestLabelMark(sig->numArgs);
}
@@ -6127,12 +6245,12 @@ var_types Compiler::impImportCall(OPCODE opcode,
// <NICE> Factor this into getCallInfo </NICE>
if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
{
- call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall, (canTailCall && (tailCall != 0)), &intrinsicID);
+ call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
+ (canTailCall && (tailCall != 0)), &intrinsicID);
if (call != nullptr)
{
- assert(!(mflags & CORINFO_FLG_VIRTUAL) ||
- (mflags & CORINFO_FLG_FINAL) ||
+ assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
(clsFlags & CORINFO_FLG_FINAL));
#ifdef FEATURE_READYTORUN_COMPILER
@@ -6167,8 +6285,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
#endif // FEATURE_SIMD
- if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) &&
- (opcode == CEE_CALLVIRT))
+ if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
goto DONE_CALL;
@@ -6177,7 +6294,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
+ {
BADCODE("Bad calling convention");
+ }
//-------------------------------------------------------------------------
// Construct the call node
@@ -6187,9 +6306,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
constraintCallThisTransform = callInfo->thisTransform;
- exactContextHnd = callInfo->contextHandle;
+ exactContextHnd = callInfo->contextHandle;
exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
-
+
// Recursive call is treaded as a loop to the begining of the method.
if (methHnd == info.compMethodHnd)
{
@@ -6197,21 +6316,21 @@ var_types Compiler::impImportCall(OPCODE opcode,
if (verbose)
{
JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
- fgFirstBB->bbNum, compCurBB->bbNum);
+ fgFirstBB->bbNum, compCurBB->bbNum);
}
#endif
fgMarkBackwardJump(fgFirstBB, compCurBB);
}
-
+
switch (callInfo->kind)
{
- case CORINFO_VIRTUALCALL_STUB:
+ case CORINFO_VIRTUALCALL_STUB:
{
- assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
+ assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
- {
+ {
if (compIsForInlining())
{
@@ -6228,52 +6347,52 @@ var_types Compiler::impImportCall(OPCODE opcode,
*/
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
return callRetTyp;
- }
-
+ }
+
GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
assert(!compDonotInline());
// This is the rough code to set up an indirect stub call
- assert(stubAddr!= 0);
-
+ assert(stubAddr != nullptr);
+
// The stubAddr may be a
// complex expression. As it is evaluated after the args,
// it may cause registered args to be spilled. Simply spill it.
-
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
+
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
- stubAddr= gtNewLclvNode(lclNum, TYP_I_IMPL);
-
+ stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
+
// Create the actual call node
-
+
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
-
- call = gtNewIndCallNode(stubAddr, callRetTyp, NULL);
-
+
+ call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
+
call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
call->gtFlags |= GTF_CALL_VIRT_STUB;
#ifdef _TARGET_X86_
// No tailcalls allowed for these yet...
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "VirtualCall with runtime lookup";
#endif
}
else
{
- // ok, the stub is available at compile type.
-
- call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, NULL, ilOffset);
+ // ok, the stub is available at compile type.
+
+ call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
- assert (callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
+ assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
}
}
-
+
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
@@ -6289,58 +6408,59 @@ var_types Compiler::impImportCall(OPCODE opcode,
break;
}
- case CORINFO_VIRTUALCALL_VTABLE:
+ case CORINFO_VIRTUALCALL_VTABLE:
{
- assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
+ assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
- call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, NULL, ilOffset);
+ call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_CALL_VIRT_VTABLE;
break;
}
- case CORINFO_VIRTUALCALL_LDVIRTFTN:
+ case CORINFO_VIRTUALCALL_LDVIRTFTN:
{
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
return callRetTyp;
}
-
- assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
+
+ assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
// OK, We've been told to call via LDVIRTFTN, so just
// take the call now....
-
+
args = impPopList(sig->numArgs, &argFlags, sig);
-
+
GenTreePtr thisPtr = impPopStack().val;
- thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
- if (compDonotInline())
+ thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
+ if (compDonotInline())
{
return callRetTyp;
}
-
+
// Clone the (possibly transformed) "this" pointer
GenTreePtr thisPtrCopy;
- thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("LDVIRTFTN this pointer") );
-
+ thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("LDVIRTFTN this pointer"));
+
GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
- if (compDonotInline())
+ if (compDonotInline())
{
return callRetTyp;
}
-
- thisPtr = 0; // can't reuse it
-
+
+ thisPtr = nullptr; // can't reuse it
+
// Now make an indirect call through the function pointer
-
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
+
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
-
+
// Create the actual call node
-
- call = gtNewIndCallNode(fptr, callRetTyp,args, ilOffset);
+
+ call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
call->gtCall.gtCallObjp = thisPtrCopy;
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
@@ -6353,16 +6473,16 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
#endif
- // Sine we are jumping over some code, check that its OK to skip that code
+ // Sine we are jumping over some code, check that its OK to skip that code
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
goto DONE;
}
- case CORINFO_CALL:
+ case CORINFO_CALL:
{
// This is for a non-virtual, non-interface etc. call
- call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, NULL, ilOffset);
+ call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
// We remove the nullcheck for the GetType call instrinsic.
// TODO-CQ: JIT64 does not introduce the null check for many more helper calls
@@ -6382,9 +6502,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
break;
}
- case CORINFO_CALL_CODE_POINTER:
+ case CORINFO_CALL_CODE_POINTER:
{
- // The EE has asked us to call by computing a code pointer and then doing an
+ // The EE has asked us to call by computing a code pointer and then doing an
// indirect call. This is because a runtime lookup is required to get the code entry point.
// These calls always follow a uniform calling convention, i.e. no extra hidden params
@@ -6393,11 +6513,8 @@ var_types Compiler::impImportCall(OPCODE opcode,
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
- GenTreePtr fptr = impLookupToTree(
- pResolvedToken,
- &callInfo->codePointerLookup,
- GTF_ICON_FTN_ADDR,
- callInfo->hMethod);
+ GenTreePtr fptr =
+ impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
if (compDonotInline())
{
@@ -6405,11 +6522,11 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
// Now make an indirect call through the function pointer
-
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
+
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
-
+
call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if (callInfo->nullInstanceCheck)
@@ -6420,54 +6537,50 @@ var_types Compiler::impImportCall(OPCODE opcode,
break;
}
- default:
- assert(!"unknown call kind");
- break;
-
+ default:
+ assert(!"unknown call kind");
+ break;
}
//-------------------------------------------------------------------------
// Set more flags
- PREFIX_ASSUME(call != 0);
+ PREFIX_ASSUME(call != nullptr);
if (mflags & CORINFO_FLG_NOGCCHECK)
+ {
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
-
+ }
+
// Mark call if it's one of the ones we will maybe treat as an intrinsic
- if (intrinsicID == CORINFO_INTRINSIC_Object_GetType ||
- intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
- intrinsicID == CORINFO_INTRINSIC_TypeNEQ ||
- intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
+ if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
+ intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
{
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
}
}
assert(sig);
- assert(clsHnd || (opcode == CEE_CALLI)); //We're never verifying for CALLI, so this is not set.
+ assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
/* Some sanity checks */
// CALL_VIRT and NEWOBJ must have a THIS pointer
- assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) ||
- (sig->callConv & CORINFO_CALLCONV_HASTHIS));
+ assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
// static bit and hasThis are negations of one another
- assert(((mflags & CORINFO_FLG_STATIC) != 0) ==
- ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
- assert(call != 0);
+ assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
+ assert(call != nullptr);
/*-------------------------------------------------------------------------
* Check special-cases etc
*/
-
/* Special case - Check if it is a call to Delegate.Invoke(). */
if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
{
assert(!compIsForInlining());
- assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
+ assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(mflags & CORINFO_FLG_FINAL);
/* Set the delegate flag */
@@ -6487,7 +6600,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
actualMethodRetTypeSigClass = sig->retTypeSigClass;
if (varTypeIsStruct(callRetTyp))
{
- callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
+ callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
call->gtType = callRetTyp;
}
@@ -6500,8 +6613,8 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
#endif // !FEATURE_VARARG
- if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
- (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
+ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
+ (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
assert(!compIsForInlining());
@@ -6520,7 +6633,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
#ifdef _TARGET_X86_
if (canTailCall)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Callee is varargs";
}
#endif
@@ -6528,10 +6641,10 @@ var_types Compiler::impImportCall(OPCODE opcode,
/* Get the total number of arguments - this is already correct
* for CALLI - for methods we have to get it from the call site */
- if (opcode != CEE_CALLI)
+ if (opcode != CEE_CALLI)
{
#ifdef DEBUG
- unsigned numArgsDef = sig->numArgs;
+ unsigned numArgsDef = sig->numArgs;
#endif
eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
@@ -6539,25 +6652,23 @@ var_types Compiler::impImportCall(OPCODE opcode,
// We cannot lazily obtain the signature of a vararg call because using its method
// handle will give us only the declared argument list, not the full argument list.
assert(call->gtCall.callSig == nullptr);
- call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
+ call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
*call->gtCall.callSig = *sig;
#endif
- // For vararg calls we must be sure to load the return type of the
- // method actually being called, as well as the return types of the
+ // For vararg calls we must be sure to load the return type of the
+ // method actually being called, as well as the return types of the
// specified in the vararg signature. With type equivalency, these types
// may not be the same.
if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
{
- if (actualMethodRetTypeSigClass != 0 &&
- sig->retType != CORINFO_TYPE_CLASS &&
- sig->retType != CORINFO_TYPE_BYREF &&
- sig->retType != CORINFO_TYPE_PTR &&
- sig->retType != CORINFO_TYPE_VAR)
- {
- // Make sure that all valuetypes (including enums) that we push are loaded.
+ if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
+ sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
+ sig->retType != CORINFO_TYPE_VAR)
+ {
+ // Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
- // all valuetypes in the method signature are already loaded.
+ // all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
@@ -6571,11 +6682,11 @@ var_types Compiler::impImportCall(OPCODE opcode,
* it on the operand stack because we may overflow, so we append it
* to the arg list next after we pop them */
}
-
+
if (mflags & CORINFO_FLG_SECURITYCHECK)
- {
+ {
assert(!compIsForInlining());
-
+
// Need security prolog/epilog callouts when there is
// imperative security in the method. This is to give security a
// chance to do any setup in the prolog and cleanup in the epilog if needed.
@@ -6591,9 +6702,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
else
{
tiSecurityCalloutNeeded = true;
-
- // If the current method calls a method which needs a security check,
- // (i.e. the method being compiled has imperative security)
+
+ // If the current method calls a method which needs a security check,
+ // (i.e. the method being compiled has imperative security)
// we need to reserve a slot for the security object in
// the current method's stack frame
opts.compNeedSecurityCheck = true;
@@ -6613,23 +6724,20 @@ var_types Compiler::impImportCall(OPCODE opcode,
// This needs to be cleaned up on return
if (canTailCall)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Callee is native";
- }
-
+ }
+
checkForSmallType = true;
-
+
impPopArgsForUnmanagedCall(call, sig);
-
+
goto DONE;
}
- else if ((opcode == CEE_CALLI) &&
- (
- ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
- ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
- ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
- ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)
- ))
+ else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
+ ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
+ ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
+ ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
{
if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
{
@@ -6664,30 +6772,30 @@ var_types Compiler::impImportCall(OPCODE opcode,
// Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
// we won't allow this tree to participate in any CSE logic
//
- cookie->gtFlags |= GTF_DONT_CSE;
+ cookie->gtFlags |= GTF_DONT_CSE;
cookieConst->gtFlags |= GTF_DONT_CSE;
call->gtCall.gtCallCookie = cookie;
if (canTailCall)
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "PInvoke calli";
}
}
-
+
/*-------------------------------------------------------------------------
* Create the argument list
*/
//-------------------------------------------------------------------------
- // Special case - for varargs we have an implicit last argument
+ // Special case - for varargs we have an implicit last argument
- if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
assert(!compIsForInlining());
- void * varCookie, *pVarCookie;
+ void *varCookie, *pVarCookie;
if (!info.compCompHnd->canGetVarArgsHandle(sig))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
@@ -6696,9 +6804,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
assert((!varCookie) != (!pVarCookie));
- GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
-
- assert(extraArg == NULL);
+ GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
+
+ assert(extraArg == nullptr);
extraArg = gtNewArgList(cookie);
}
@@ -6714,33 +6822,37 @@ var_types Compiler::impImportCall(OPCODE opcode,
// (c) To shared-code per-instantiation non-generic static methods in generic
// classes and structs; the extra parameter is the type handle
// (d) To shared-code generic methods; the extra parameter is an
- // exact-instantiation MethodDesc
+ // exact-instantiation MethodDesc
//
// We also set the exact type context associated with the call so we can
// inline the call correctly later on.
if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
{
- assert (call->gtCall.gtCallType == CT_USER_FUNC);
- if (clsHnd == 0)
+ assert(call->gtCall.gtCallType == CT_USER_FUNC);
+ if (clsHnd == nullptr)
+ {
NO_WAY("CALLI on parameterized type");
+ }
- assert (opcode != CEE_CALLI);
+ assert(opcode != CEE_CALLI);
GenTreePtr instParam;
- BOOL runtimeLookup;
+ BOOL runtimeLookup;
// Instantiated generic method
if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
{
- CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
+ CORINFO_METHOD_HANDLE exactMethodHandle =
+ (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
- instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
+ instParam =
+ impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
if (instParam == nullptr)
{
return callRetTyp;
@@ -6763,19 +6875,20 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
}
- // otherwise must be an instance method in a generic struct,
+ // otherwise must be an instance method in a generic struct,
// a static method in a generic type, or a runtime-generated array method
else
- {
+ {
assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
- CORINFO_CLASS_HANDLE exactClassHandle = (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
+ CORINFO_CLASS_HANDLE exactClassHandle =
+ (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
return callRetTyp;
}
-
+
if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
{
// We indicate "readonly" to the Address operation by using a null
@@ -6788,7 +6901,8 @@ var_types Compiler::impImportCall(OPCODE opcode,
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
- instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
+ instParam =
+ impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
if (instParam == NULL)
{
return callRetTyp;
@@ -6804,34 +6918,37 @@ var_types Compiler::impImportCall(OPCODE opcode,
else
{
instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
- if (instParam == NULL)
+ if (instParam == nullptr)
{
return callRetTyp;
}
}
}
- assert(extraArg == NULL);
+ assert(extraArg == nullptr);
extraArg = gtNewArgList(instParam);
}
// Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
// to inline 'polytypic' operations such as static field accesses, type tests and method calls which
// rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
- // exactContextHnd is not currently required when inlining shared generic code into shared
+ // exactContextHnd is not currently required when inlining shared generic code into shared
// generic code, since the inliner aborts whenever shared code polytypic operations are encountered
// (e.g. anything marked needsRuntimeLookup)
if (exactContextNeedsRuntimeLookup)
- exactContextHnd = 0;
+ {
+ exactContextHnd = nullptr;
+ }
//-------------------------------------------------------------------------
// The main group of arguments
args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
-
if (args)
+ {
call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
+ }
//-------------------------------------------------------------------------
// The "this" pointer
@@ -6841,12 +6958,14 @@ var_types Compiler::impImportCall(OPCODE opcode,
GenTreePtr obj;
if (opcode == CEE_NEWOBJ)
+ {
obj = newobjThis;
- else
+ }
+ else
{
obj = impPopStack().val;
obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
- if (compDonotInline())
+ if (compDonotInline())
{
return callRetTyp;
}
@@ -6854,7 +6973,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
/* Is this a virtual or interface call? */
- if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
+ if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
{
/* only true object pointers can be virtual */
@@ -6863,15 +6982,17 @@ var_types Compiler::impImportCall(OPCODE opcode,
else
{
if (impIsThis(obj))
+ {
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
+ }
}
/* Store the "this" value in the call */
- call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
+ call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
call->gtCall.gtCallObjp = obj;
}
-
+
//-------------------------------------------------------------------------
// The "this" pointer for "newobj"
@@ -6879,13 +7000,13 @@ var_types Compiler::impImportCall(OPCODE opcode,
{
if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
- assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
+ assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
// This is a 'new' of a variable sized object, wher
// the constructor is to return the object. In this case
// the constructor claims to return VOID but we know it
// actually returns the new object
assert(callRetTyp == TYP_VOID);
- callRetTyp = TYP_REF;
+ callRetTyp = TYP_REF;
call->gtType = TYP_REF;
impSpillSpecialSideEff();
@@ -6894,12 +7015,12 @@ var_types Compiler::impImportCall(OPCODE opcode,
else
{
if (clsFlags & CORINFO_FLG_DELEGATE)
- {
+ {
// New inliner morph it in impImportCall.
// This will allow us to inline the call to the delegate constructor.
call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
- }
-
+ }
+
if (!bIntrinsicImported)
{
@@ -6926,8 +7047,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
- assert(newobjThis->gtOper == GT_ADDR &&
- newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
+ assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
@@ -6938,11 +7058,11 @@ var_types Compiler::impImportCall(OPCODE opcode,
{
// In coreclr the callout can be inserted even if verification is disabled
// so we cannot rely on tiVerificationNeeded alone
-
+
// We must have inserted the callout. Get the real newobj.
newobjThis = newobjThis->gtOp.gtOp2;
}
-
+
assert(newobjThis->gtOper == GT_LCL_VAR);
impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
}
@@ -6951,11 +7071,11 @@ var_types Compiler::impImportCall(OPCODE opcode,
}
DONE:
-
+
if (tailCall)
{
// This check cannot be performed for implicit tail calls for the reason
- // that impIsImplicitTailCallCandidate() is not checking whether return
+ // that impIsImplicitTailCallCandidate() is not checking whether return
// types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
// As a result it is possible that in the following case, we find that
// the type stack is non-empty if Callee() is considered for implicit
@@ -6966,7 +7086,7 @@ DONE:
// as we don't have required info or need to duplicate some of the logic of
// ImpImportCall().
//
- // For implicit tail calls, we perform this check after return types are
+ // For implicit tail calls, we perform this check after return types are
// known to be compatible.
if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
{
@@ -6975,12 +7095,13 @@ DONE:
// Note that we can not relax this condition with genActualType() as
// the calling convention dictates that the caller of a function with
- // a small-typed return value is responsible for normalizing the return val
+ // a small-typed return value is responsible for normalizing the return val
- if (canTailCall && !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass,
- callRetTyp, callInfo->sig.retTypeClass))
+ if (canTailCall &&
+ !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
+ callInfo->sig.retTypeClass))
{
- canTailCall = false;
+ canTailCall = false;
szCanTailCallFailReason = "Return types are not tail call compatible";
}
@@ -6991,7 +7112,7 @@ DONE:
// JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
// in JIT64, not an InvalidProgramException.
Verify(false, "Stack should be empty after tailcall");
-#else // _TARGET_64BIT_
+#else // _TARGET_64BIT_
BADCODE("Stack should be empty after tailcall");
#endif //!_TARGET_64BIT_
}
@@ -7000,24 +7121,27 @@ DONE:
// assert(compCurBB is not a try block protected by a finally block);
// Check for permission to tailcall
- bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
+ bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
if (canTailCall)
{
// True virtual or indirect calls, shouldn't pass in a callee handle.
- CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) || ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)) ? NULL : methHnd;
- GenTreePtr thisArg = call->gtCall.gtCallObjp;
+ CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
+ ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
+ ? nullptr
+ : methHnd;
+ GenTreePtr thisArg = call->gtCall.gtCallObjp;
if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
- {
+ {
canTailCall = true;
if (explicitTailCall)
{
// In case of explicit tail calls, mark it so that it is not considered
// for in-lining.
- call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
+ call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
#ifdef DEBUG
if (verbose)
{
@@ -7025,7 +7149,7 @@ DONE:
printTreeID(call);
printf("\n");
}
-#endif
+#endif
}
else
{
@@ -7047,20 +7171,20 @@ DONE:
printTreeID(call);
printf("\n");
}
-#endif
+#endif
-#else //!FEATURE_TAILCALL_OPT
+#else //! FEATURE_TAILCALL_OPT
NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
-#endif //FEATURE_TAILCALL_OPT
+#endif // FEATURE_TAILCALL_OPT
}
// we can't report success just yet...
}
- else
+ else
{
canTailCall = false;
- // canTailCall reported its reasons already
+// canTailCall reported its reasons already
#ifdef DEBUG
if (verbose)
{
@@ -7075,7 +7199,7 @@ DONE:
{
// If this assert fires it means that canTailCall was set to false without setting a reason!
assert(szCanTailCallFailReason != nullptr);
-
+
#ifdef DEBUG
if (verbose)
{
@@ -7083,16 +7207,17 @@ DONE:
printTreeID(call);
printf(": %s\n", szCanTailCallFailReason);
}
-#endif
- info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason);
+#endif
+ info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
+ szCanTailCallFailReason);
}
}
- // Note: we assume that small return types are already normalized by the managed callee
- // or by the pinvoke stub for calls to unmanaged code.
+// Note: we assume that small return types are already normalized by the managed callee
+// or by the pinvoke stub for calls to unmanaged code.
DONE_CALL:
-
+
if (!bIntrinsicImported)
{
//
@@ -7100,33 +7225,31 @@ DONE_CALL:
//
assert(call->gtOper == GT_CALL);
- assert(sig != NULL);
+ assert(sig != nullptr);
// Tail calls require us to save the call site's sig info so we can obtain an argument
// copying thunk from the EE later on.
if (call->gtCall.callSig == nullptr)
{
- call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
+ call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
*call->gtCall.callSig = *sig;
}
-
- if (compIsForInlining() && opcode == CEE_CALLVIRT)
- {
+
+ if (compIsForInlining() && opcode == CEE_CALLVIRT)
+ {
GenTreePtr callObj = call->gtCall.gtCallObjp;
- assert(callObj != NULL);
-
- unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
-
+ assert(callObj != nullptr);
+
+ unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
+
if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
- impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs,
- callObj,
- impInlineInfo->inlArgInfo))
+ impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
+ impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
-
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
@@ -7134,12 +7257,12 @@ DONE_CALL:
#endif // defined(DEBUG) || defined(INLINE_DATA)
- // Is it an inline candidate?
+ // Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, callInfo);
}
// Push or append the result of the call
- if (callRetTyp == TYP_VOID)
+ if (callRetTyp == TYP_VOID)
{
if (opcode == CEE_NEWOBJ)
{
@@ -7149,7 +7272,7 @@ DONE_CALL:
}
else
{
- impAppendTree(call, (unsigned) CHECK_SPILL_ALL, impCurStmtOffs);
+ impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
}
else
@@ -7163,9 +7286,9 @@ DONE_CALL:
// Find the return type used for verification by interpreting the method signature.
// NB: we are clobbering the already established sig.
- if (tiVerificationNeeded)
+ if (tiVerificationNeeded)
{
- //Actually, we never get the sig for the original method.
+ // Actually, we never get the sig for the original method.
sig = &(callInfo->verSig);
}
@@ -7185,7 +7308,7 @@ DONE_CALL:
// didn't they wouldn't be verifiable. This is also covering
// the Address() helper for multidimensional arrays.
if (tiRetVal.IsByRef())
- {
+ {
tiRetVal.SetIsPermanentHomeByRef();
}
}
@@ -7206,7 +7329,7 @@ DONE_CALL:
{
assert(opts.OptEnabled(CLFLG_INLINING));
- // Make the call its own tree (spill the stack if needed).
+ // Make the call its own tree (spill the stack if needed).
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
// TODO: Still using the widened type.
@@ -7217,16 +7340,14 @@ DONE_CALL:
if (!bIntrinsicImported)
{
//-------------------------------------------------------------------------
- //
+ //
/* If the call is of a small type and the callee is managed, the callee will normalize the result
before returning.
However, we need to normalize small type values returned by unmanaged
functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
if we use the shorter inlined pinvoke stub. */
- if (checkForSmallType &&
- varTypeIsIntegral(callRetTyp) &&
- genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
+ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
}
@@ -7235,8 +7356,8 @@ DONE_CALL:
impPushOnStack(call, tiRetVal);
}
- //VSD functions get a new call target each time we getCallInfo, so clear the cache.
- //Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
+ // VSD functions get a new call target each time we getCallInfo, so clear the cache.
+ // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
// if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
// callInfoCache.uncacheCallInfo();
@@ -7246,17 +7367,17 @@ DONE_CALL:
#pragma warning(pop)
#endif
-bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO * methInfo)
+bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
{
- CorInfoType corType = methInfo->args.retType;
-
- if ((corType == CORINFO_TYPE_VALUECLASS) ||(corType == CORINFO_TYPE_REFANY))
+ CorInfoType corType = methInfo->args.retType;
+
+ if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
{
// We have some kind of STRUCT being returned
structPassingKind howToReturnStruct = SPK_Unknown;
- var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
+ var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
@@ -7274,7 +7395,7 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
TestLabelAndNum tlAndN;
if (numArgs == 2)
{
- tlAndN.m_num = 0;
+ tlAndN.m_num = 0;
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTreePtr val = se.val;
@@ -7288,7 +7409,7 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
GenTreePtr val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_num = val->AsIntConCommon()->IconValue();
- se = impPopStack();
+ se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
val = se.val;
assert(val->IsCnsIntOrI());
@@ -7300,7 +7421,7 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
}
StackEntry expSe = impPopStack();
- GenTreePtr node = expSe.val;
+ GenTreePtr node = expSe.val;
// There are a small number of special cases, where we actually put the annotation on a subnode.
if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
@@ -7320,7 +7441,6 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
GetNodeTestData()->Set(node, tlAndN);
}
-
impPushOnStack(node, expSe.seTypeInfo);
return node->TypeGet();
}
@@ -7338,8 +7458,7 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
// Return Value:
// Returns new GenTree node after fixing struct return of call node
//
-GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call,
- CORINFO_CLASS_HANDLE retClsHnd)
+GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
{
assert(call->gtOper == GT_CALL);
@@ -7353,7 +7472,7 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
GenTreeCall* callNode = call->AsCall();
#if FEATURE_MULTIREG_RET
- // Initialize Return type descriptor of call node
+ // Initialize Return type descriptor of call node
ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
retTypeDesc->InitializeStructReturnType(this, retClsHnd);
#endif // FEATURE_MULTIREG_RET
@@ -7372,7 +7491,7 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
{
if (retRegCount == 1)
{
- // struct returned in a single register
+ // struct returned in a single register
callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
}
else
@@ -7386,7 +7505,7 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
- // - It is a tail call or
+ // - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd);
}
@@ -7432,11 +7551,11 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
#endif // _TARGET_ARM_
// Check for TYP_STRUCT type that wraps a primitive type
- // Such structs are returned using a single register
+ // Such structs are returned using a single register
// and we change the return type on those calls here.
//
structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
@@ -7450,9 +7569,13 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
// ToDo: Refactor this common code sequence into its own method as it is used 4+ times
if ((returnType == TYP_LONG) && (compLongUsed == false))
+ {
compLongUsed = true;
+ }
else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
+ {
compFloatingPointUsed = true;
+ }
#if FEATURE_MULTIREG_RET
unsigned retRegCount = retTypeDesc->GetReturnRegCount();
@@ -7466,13 +7589,12 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
- // - It is a tail call or
+ // - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd);
}
}
#endif // FEATURE_MULTIREG_RET
-
}
#endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -7480,9 +7602,8 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
return call;
}
-
//-------------------------------------------------------------------------------------
-// impInitCallLongReturn:
+// impInitCallLongReturn:
// Initialize the ReturnTypDesc for a call that returns a TYP_LONG
//
// Arguments:
@@ -7491,7 +7612,7 @@ GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call
// Return Value:
// Returns new GenTree node after initializing the ReturnTypeDesc of call node
//
-GenTreePtr Compiler::impInitCallLongReturn(GenTreePtr call)
+GenTreePtr Compiler::impInitCallLongReturn(GenTreePtr call)
{
assert(call->gtOper == GT_CALL);
@@ -7523,7 +7644,7 @@ GenTreePtr Compiler::impInitCallLongReturn(GenTreePtr call)
Note that this method is only call for !_TARGET_X86_
*/
-GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
+GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
@@ -7544,7 +7665,7 @@ GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CL
if (op->gtOper == GT_LCL_VAR)
{
// Make sure that this struct stays in memory and doesn't get promoted.
- unsigned lclNum = op->gtLclVarCommon.gtLclNum;
+ unsigned lclNum = op->gtLclVarCommon.gtLclNum;
lvaTable[lclNum].lvIsMultiRegRet = true;
return op;
@@ -7557,7 +7678,7 @@ GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CL
return impAssignMultiRegTypeToVar(op, retClsHnd);
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
assert(info.compRetNativeType != TYP_STRUCT);
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -7573,7 +7694,7 @@ GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CL
lvaTable[lclNum].lvIsMultiRegRet = true;
return op;
}
-
+
if (op->gtOper == GT_CALL)
{
if (op->gtCall.IsVarargs())
@@ -7670,7 +7791,7 @@ REDO_RETURN_NODE:
// feeds the return, then the call must be returning the
// same structure/class/type.
//
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
@@ -7681,7 +7802,7 @@ REDO_RETURN_NODE:
op->ChangeOper(GT_LCL_FLD);
}
else
- {
+ {
assert(info.compRetNativeType == op->gtCall.gtReturnType);
// Don't change the gtType of the node just yet, it will get changed later.
@@ -7693,13 +7814,11 @@ REDO_RETURN_NODE:
op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
}
-
op->gtType = info.compRetNativeType;
return op;
}
-
/*****************************************************************************
CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
finally-protected try. We find the finally blocks protecting the current
@@ -7715,7 +7834,7 @@ REDO_RETURN_NODE:
#if !FEATURE_EH_FUNCLETS
-void Compiler::impImportLeave(BasicBlock * block)
+void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
@@ -7726,31 +7845,28 @@ void Compiler::impImportLeave(BasicBlock * block)
}
#endif // DEBUG
- bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
- unsigned blkAddr = block->bbCodeOffs;
- BasicBlock * leaveTarget = block->bbJumpDest;
- unsigned jmpAddr = leaveTarget->bbCodeOffs;
+ bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
+ unsigned blkAddr = block->bbCodeOffs;
+ BasicBlock* leaveTarget = block->bbJumpDest;
+ unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
- impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave") );
- verCurrentState.esStackDepth = 0;
+ impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
+ verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
- assert(fgBBs == (BasicBlock**)0xCDCD ||
- fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
+ assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
- BasicBlock * step = DUMMY_INIT(NULL);
- unsigned encFinallies = 0; // Number of enclosing finallies.
- GenTreePtr endCatches = NULL;
- GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
+ BasicBlock* step = DUMMY_INIT(NULL);
+ unsigned encFinallies = 0; // Number of enclosing finallies.
+ GenTreePtr endCatches = NULL;
+ GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
- unsigned XTnum;
- EHblkDsc * HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
@@ -7763,8 +7879,7 @@ void Compiler::impImportLeave(BasicBlock * block)
* If so, we need to call CORINFO_HELP_ENDCATCH.
*/
- if ( jitIsBetween(blkAddr, hndBeg, hndEnd) &&
- !jitIsBetween(jmpAddr, hndBeg, hndEnd))
+ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
@@ -7775,21 +7890,20 @@ void Compiler::impImportLeave(BasicBlock * block)
// Make a list of all the currently pending endCatches
if (endCatches)
- endCatches = gtNewOperNode(GT_COMMA, TYP_VOID,
- endCatches, endCatch);
+ endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
else
endCatches = endCatch;
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to CORINFO_HELP_ENDCATCH\n",
- block->bbNum, XTnum);
+ printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
+ "CORINFO_HELP_ENDCATCH\n",
+ block->bbNum, XTnum);
}
#endif
}
- else if (HBtab->HasFinallyHandler() &&
- jitIsBetween(blkAddr, tryBeg, tryEnd) &&
+ else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* This is a finally-protected try we are jumping out of */
@@ -7800,15 +7914,15 @@ void Compiler::impImportLeave(BasicBlock * block)
exceptions to work correctly.
Else, just use append to the original block */
- BasicBlock * callBlock;
+ BasicBlock* callBlock;
assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
- callBlock = block;
- callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
+ callBlock = block;
+ callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
@@ -7816,8 +7930,9 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY block BB%02u [%08p]\n",
- callBlock->bbNum, dspPtr(callBlock));
+ printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
+ "block BB%02u [%08p]\n",
+ callBlock->bbNum, dspPtr(callBlock));
}
#endif
}
@@ -7826,13 +7941,10 @@ void Compiler::impImportLeave(BasicBlock * block)
assert(step != DUMMY_INIT(NULL));
/* Calling the finally block */
- callBlock = fgNewBBinRegion(BBJ_CALLFINALLY,
- XTnum+1,
- 0,
- step);
+ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
assert(step->bbJumpKind == BBJ_ALWAYS);
- step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
- // finally in the chain)
+ step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
+ // finally in the chain)
step->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
@@ -7842,8 +7954,9 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u [%08p]\n",
- callBlock->bbNum, dspPtr(callBlock));
+ printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
+ "[%08p]\n",
+ callBlock->bbNum, dspPtr(callBlock));
}
#endif
@@ -7851,20 +7964,20 @@ void Compiler::impImportLeave(BasicBlock * block)
if (endCatches)
{
- lastStmt = gtNewStmt(endCatches);
- endLFin->gtNext = lastStmt;
- lastStmt->gtPrev= endLFin;
+ lastStmt = gtNewStmt(endCatches);
+ endLFin->gtNext = lastStmt;
+ lastStmt->gtPrev = endLFin;
}
else
{
- lastStmt = endLFin;
+ lastStmt = endLFin;
}
// note that this sets BBF_IMPORTED on the block
impEndTreeList(callBlock, endLFin, lastStmt);
}
- step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
+ step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
/* The new block will inherit this block's weight */
step->setBBWeight(block->bbWeight);
step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
@@ -7872,18 +7985,19 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block BB%02u [%08p]\n",
- step->bbNum, dspPtr(step));
+ printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
+ "BB%02u [%08p]\n",
+ step->bbNum, dspPtr(step));
}
#endif
unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
assert(finallyNesting <= compHndBBtabCount);
- callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
- endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
- endLFin = gtNewStmt(endLFin);
- endCatches = NULL;
+ callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
+ endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
+ endLFin = gtNewStmt(endLFin);
+ endCatches = NULL;
encFinallies++;
@@ -7898,7 +8012,7 @@ void Compiler::impImportLeave(BasicBlock * block)
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
- block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
+ block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
@@ -7906,8 +8020,9 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS block BB%02u [%08p]\n",
- block->bbNum, dspPtr(block));
+ printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
+ "block BB%02u [%08p]\n",
+ block->bbNum, dspPtr(block));
}
#endif
}
@@ -7916,17 +8031,14 @@ void Compiler::impImportLeave(BasicBlock * block)
// If leaveTarget is the start of another try block, we want to make sure that
// we do not insert finalStep into that try block. Hence, we find the enclosing
// try block.
- unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
+ unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
// Insert a new BB either in the try region indicated by tryIndex or
// the handler region indicated by leaveTarget->bbHndIndex,
// depending on which is the inner region.
- BasicBlock * finalStep = fgNewBBinRegion(BBJ_ALWAYS,
- tryIndex,
- leaveTarget->bbHndIndex,
- step);
+ BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
- step->bbJumpDest = finalStep;
+ step->bbJumpDest = finalStep;
/* The new block will inherit this block's weight */
finalStep->setBBWeight(block->bbWeight);
@@ -7936,17 +8048,17 @@ void Compiler::impImportLeave(BasicBlock * block)
if (verbose)
{
printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
- encFinallies, finalStep->bbNum, dspPtr(finalStep));
+ encFinallies, finalStep->bbNum, dspPtr(finalStep));
}
#endif
-
+
GenTreePtr lastStmt;
if (endCatches)
{
- lastStmt = gtNewStmt(endCatches);
- endLFin->gtNext = lastStmt;
- lastStmt->gtPrev = endLFin;
+ lastStmt = gtNewStmt(endCatches);
+ endLFin->gtNext = lastStmt;
+ lastStmt->gtPrev = endLFin;
}
else
{
@@ -7955,7 +8067,7 @@ void Compiler::impImportLeave(BasicBlock * block)
impEndTreeList(finalStep, endLFin, lastStmt);
- finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
+ finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
// Queue up the jump target for importing
@@ -7984,7 +8096,7 @@ void Compiler::impImportLeave(BasicBlock * block)
#else // FEATURE_EH_FUNCLETS
-void Compiler::impImportLeave(BasicBlock * block)
+void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
@@ -7995,21 +8107,20 @@ void Compiler::impImportLeave(BasicBlock * block)
}
#endif // DEBUG
- bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
- unsigned blkAddr = block->bbCodeOffs;
- BasicBlock * leaveTarget = block->bbJumpDest;
- unsigned jmpAddr = leaveTarget->bbCodeOffs;
+ bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
+ unsigned blkAddr = block->bbCodeOffs;
+ BasicBlock* leaveTarget = block->bbJumpDest;
+ unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
- impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave") );
- verCurrentState.esStackDepth = 0;
+ impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
+ verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
- assert(fgBBs == (BasicBlock**)0xCDCD ||
- fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
+ assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
- BasicBlock * step = NULL;
+ BasicBlock* step = nullptr;
enum StepType
{
@@ -8019,7 +8130,7 @@ void Compiler::impImportLeave(BasicBlock * block)
// Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
// That is, is step->bbJumpDest where a finally will return to?
ST_FinallyReturn,
-
+
// The step block is a catch return.
ST_Catch,
@@ -8028,12 +8139,10 @@ void Compiler::impImportLeave(BasicBlock * block)
};
StepType stepType = ST_None;
- unsigned XTnum;
- EHblkDsc * HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
@@ -8045,37 +8154,42 @@ void Compiler::impImportLeave(BasicBlock * block)
/* Is this a catch-handler we are CEE_LEAVEing out of?
*/
- if ( jitIsBetween(blkAddr, hndBeg, hndEnd) &&
- !jitIsBetween(jmpAddr, hndBeg, hndEnd))
+ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
+ {
BADCODE("leave out of fault/finally block");
+ }
/* We are jumping out of a catch */
- if (step == NULL)
+ if (step == nullptr)
{
- step = block;
- step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
- stepType = ST_Catch;
+ step = block;
+ step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
+ stepType = ST_Catch;
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET block\n", XTnum, step->bbNum);
+ printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
+ "block\n",
+ XTnum, step->bbNum);
}
#endif
}
else
{
- BasicBlock * exitBlock;
+ BasicBlock* exitBlock;
- /* Create a new catch exit block in the catch region for the existing step block to jump to in this scope */
+ /* Create a new catch exit block in the catch region for the existing step block to jump to in this
+ * scope */
exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
- step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch exit) returns to this block
+ step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
+ // exit) returns to this block
step->bbJumpDest->bbRefs++;
#if defined(_TARGET_ARM_)
@@ -8100,34 +8214,36 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum, exitBlock->bbNum);
+ printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
+ exitBlock->bbNum);
}
#endif
}
}
- else if (HBtab->HasFinallyHandler() &&
- jitIsBetween(blkAddr, tryBeg, tryEnd) &&
+ else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* We are jumping out of a finally-protected try */
- BasicBlock * callBlock;
+ BasicBlock* callBlock;
- if (step == NULL)
+ if (step == nullptr)
{
#if FEATURE_EH_CALLFINALLY_THUNKS
// Put the call to the finally in the enclosing region.
- unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
- unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
+ unsigned callFinallyTryIndex =
+ (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
+ unsigned callFinallyHndIndex =
+ (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
// Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
// the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
// which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
// next block, and flow optimizations will remove it.
- block->bbJumpKind = BBJ_ALWAYS;
- block->bbJumpDest = callBlock;
+ block->bbJumpKind = BBJ_ALWAYS;
+ block->bbJumpDest = callBlock;
block->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
@@ -8137,21 +8253,23 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
- XTnum, block->bbNum, callBlock->bbNum);
+ printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
+ "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
+ XTnum, block->bbNum, callBlock->bbNum);
}
#endif
-
+
#else // !FEATURE_EH_CALLFINALLY_THUNKS
- callBlock = block;
- callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
+ callBlock = block;
+ callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to BBJ_CALLFINALLY block\n",
- XTnum, callBlock->bbNum);
+ printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
+ "BBJ_CALLFINALLY block\n",
+ XTnum, callBlock->bbNum);
}
#endif
@@ -8162,7 +8280,7 @@ void Compiler::impImportLeave(BasicBlock * block)
// Calling the finally block. We already have a step block that is either the call-to-finally from a
// more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
// a 'finally'), or the step block is the return from a catch.
- //
+ //
// Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
// directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
// automatically re-raise the exception, using the return address of the catch (that is, the target
@@ -8180,9 +8298,10 @@ void Compiler::impImportLeave(BasicBlock * block)
#if FEATURE_EH_CALLFINALLY_THUNKS
if (step->bbJumpKind == BBJ_EHCATCHRET)
{
- // Need to create another step block in the 'try' region that will actually branch to the call-to-finally thunk.
+ // Need to create another step block in the 'try' region that will actually branch to the
+ // call-to-finally thunk.
BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
- step->bbJumpDest = step2;
+ step->bbJumpDest = step2;
step->bbJumpDest->bbRefs++;
step2->setBBWeight(block->bbWeight);
step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
@@ -8190,8 +8309,9 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
- XTnum, step->bbNum, step2->bbNum);
+ printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
+ "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
+ XTnum, step->bbNum, step2->bbNum);
}
#endif
@@ -8200,18 +8320,19 @@ void Compiler::impImportLeave(BasicBlock * block)
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
-
#if FEATURE_EH_CALLFINALLY_THUNKS
- unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
- unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
-#else // !FEATURE_EH_CALLFINALLY_THUNKS
+ unsigned callFinallyTryIndex =
+ (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
+ unsigned callFinallyHndIndex =
+ (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
+#else // !FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex = XTnum + 1;
unsigned callFinallyHndIndex = 0; // don't care
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
- callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
- step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
- // finally in the chain)
+ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
+ step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
+ // finally in the chain)
step->bbJumpDest->bbRefs++;
#if defined(_TARGET_ARM_)
@@ -8230,14 +8351,15 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block BB%02u\n",
- XTnum, callBlock->bbNum);
+ printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
+ "BB%02u\n",
+ XTnum, callBlock->bbNum);
}
#endif
}
- step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
- stepType = ST_FinallyReturn;
+ step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
+ stepType = ST_FinallyReturn;
/* The new block will inherit this block's weight */
step->setBBWeight(block->bbWeight);
@@ -8246,17 +8368,17 @@ void Compiler::impImportLeave(BasicBlock * block)
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) block BB%02u\n",
- XTnum, step->bbNum);
+ printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
+ "block BB%02u\n",
+ XTnum, step->bbNum);
}
#endif
- callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
+ callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
invalidatePreds = true;
}
- else if (HBtab->HasCatchHandler() &&
- jitIsBetween(blkAddr, tryBeg, tryEnd) &&
+ else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
// We are jumping out of a catch-protected try.
@@ -8294,12 +8416,12 @@ void Compiler::impImportLeave(BasicBlock * block)
// }
// LABEL_1:
//
- // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# compiler.
+ // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
+ // compiler.
- if ((stepType == ST_FinallyReturn) ||
- (stepType == ST_Catch))
+ if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
{
- BasicBlock * catchStep;
+ BasicBlock* catchStep;
assert(step);
@@ -8314,7 +8436,7 @@ void Compiler::impImportLeave(BasicBlock * block)
}
/* Create a new exit block in the try region for the existing step block to jump to in this scope */
- catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
+ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = catchStep;
step->bbJumpDest->bbRefs++;
@@ -8335,20 +8457,22 @@ void Compiler::impImportLeave(BasicBlock * block)
{
if (stepType == ST_FinallyReturn)
{
- printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new BBJ_ALWAYS block BB%02u\n",
- XTnum, catchStep->bbNum);
+ printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
+ "BBJ_ALWAYS block BB%02u\n",
+ XTnum, catchStep->bbNum);
}
else
{
assert(stepType == ST_Catch);
- printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new BBJ_ALWAYS block BB%02u\n",
- XTnum, catchStep->bbNum);
+ printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
+ "BBJ_ALWAYS block BB%02u\n",
+ XTnum, catchStep->bbNum);
}
}
#endif // DEBUG
/* This block is the new step */
- step = catchStep;
+ step = catchStep;
stepType = ST_Try;
invalidatePreds = true;
@@ -8356,20 +8480,22 @@ void Compiler::impImportLeave(BasicBlock * block)
}
}
- if (step == NULL)
+ if (step == nullptr)
{
- block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
+ block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
#ifdef DEBUG
if (verbose)
{
- printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE block BB%02u to BBJ_ALWAYS\n", block->bbNum);
+ printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
+ "block BB%02u to BBJ_ALWAYS\n",
+ block->bbNum);
}
#endif
}
else
{
- step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
+ step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
#if defined(_TARGET_ARM_)
if (stepType == ST_FinallyReturn)
@@ -8416,7 +8542,7 @@ void Compiler::impImportLeave(BasicBlock * block)
// This is called when reimporting a leave block. It resets the JumpKind,
// JumpDest, and bbNext to the original values
-void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
+void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
{
#if FEATURE_EH_FUNCLETS
// With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
@@ -8433,7 +8559,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmp
// leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
// } finally { }
// } finally { }
- // OUTSIDE:
+ // OUTSIDE:
//
// In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
// where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
@@ -8443,19 +8569,19 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmp
// will be treated as pair and handled correctly.
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
- BasicBlock *dupBlock = bbNewBasicBlock(block->bbJumpKind);
- dupBlock->bbFlags = block->bbFlags;
+ BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
+ dupBlock->bbFlags = block->bbFlags;
dupBlock->bbJumpDest = block->bbJumpDest;
dupBlock->copyEHRegion(block);
dupBlock->bbCatchTyp = block->bbCatchTyp;
- // Mark this block as
+ // Mark this block as
// a) not referenced by any other block to make sure that it gets deleted
// b) weight zero
// c) prevent from being imported
- // d) as internal
+ // d) as internal
// e) as rarely run
- dupBlock->bbRefs = 0;
+ dupBlock->bbRefs = 0;
dupBlock->bbWeight = 0;
dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
@@ -8465,10 +8591,12 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmp
#ifdef DEBUG
if (verbose)
+ {
printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
+ }
#endif
}
-#endif //FEATURE_EH_FUNCLETS
+#endif // FEATURE_EH_FUNCLETS
block->bbJumpKind = BBJ_LEAVE;
fgInitBBLookup();
@@ -8485,22 +8613,25 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmp
// Get the first non-prefix opcode. Used for verification of valid combinations
// of prefixes and actual opcodes.
-static
-OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
+static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
{
- while (codeAddr < codeEndp) {
- OPCODE opcode = (OPCODE) getU1LittleEndian(codeAddr);
+ while (codeAddr < codeEndp)
+ {
+ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
- if (opcode == CEE_PREFIX1) {
- if (codeAddr >= codeEndp) {
+ if (opcode == CEE_PREFIX1)
+ {
+ if (codeAddr >= codeEndp)
+ {
break;
}
- opcode = (OPCODE) (getU1LittleEndian(codeAddr) + 256);
+ opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
codeAddr += sizeof(__int8);
}
- switch (opcode) {
+ switch (opcode)
+ {
case CEE_UNALIGNED:
case CEE_VOLATILE:
case CEE_TAILCALL:
@@ -8520,20 +8651,17 @@ OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
/*****************************************************************************/
// Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
-static
-void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
+static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
{
OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
- if ( !(
- // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
- ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
- (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) ||
- (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
- (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
- // volatile. prefix is allowed with the ldsfld and stsfld
- (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))
- ) )
+ if (!(
+ // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
+ ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
+ (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
+ (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
+ // volatile. prefix is allowed with the ldsfld and stsfld
+ (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
{
BADCODE("Invalid opcode for unaligned. or volatile. prefix");
}
@@ -8543,9 +8671,9 @@ void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE*
#ifdef DEBUG
-#undef RETURN // undef contracts RETURN macro
+#undef RETURN // undef contracts RETURN macro
-enum controlFlow_t
+enum controlFlow_t
{
NEXT,
CALL,
@@ -8558,12 +8686,10 @@ enum controlFlow_t
META,
};
-const static
-controlFlow_t controlFlow[] =
-{
- #define OPDEF(c,s,pop,push,args,type,l,s1,s2,flow) flow,
- #include "opcode.def"
- #undef OPDEF
+const static controlFlow_t controlFlow[] = {
+#define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
+#include "opcode.def"
+#undef OPDEF
};
#endif // DEBUG
@@ -8572,43 +8698,39 @@ controlFlow_t controlFlow[] =
* Determine the result type of an arithemetic operation
* On 64-bit inserts upcasts when native int is mixed with int32
*/
-var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr * pOp1, GenTreePtr *pOp2)
+var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
{
- var_types type = TYP_UNDEF;
+ var_types type = TYP_UNDEF;
GenTreePtr op1 = *pOp1, op2 = *pOp2;
-
+
// Arithemetic operations are generally only allowed with
// primitive types, but certain operations are allowed
// with byrefs
- if ((oper == GT_SUB) &&
- (genActualType(op1->TypeGet()) == TYP_BYREF ||
- genActualType(op2->TypeGet()) == TYP_BYREF))
+ if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
- if ((genActualType(op1->TypeGet()) == TYP_BYREF) &&
- (genActualType(op2->TypeGet()) == TYP_BYREF))
+ if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref1-byref2 => gives a native int
type = TYP_I_IMPL;
}
- else if (genActualTypeIsIntOrI(op1->TypeGet()) &&
- (genActualType(op2->TypeGet()) == TYP_BYREF))
- {
+ else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
+ {
// [native] int - byref => gives a native int
-
- //
- // The reason is that it is possible, in managed C++,
+
+ //
+ // The reason is that it is possible, in managed C++,
// to have a tree like this:
- //
+ //
// -
// / \
// / \
// / \
// / \
- // const(h) int addr byref
- //
+ // const(h) int addr byref
+ //
// <BUGNUM> VSW 318822 </BUGNUM>
- //
+ //
// So here we decide to make the resulting type to be a native int.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -8619,14 +8741,13 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
}
#endif // _TARGET_64BIT_
-
+
type = TYP_I_IMPL;
}
else
{
- // byref - [native] int => gives a byref
- assert(genActualType(op1->TypeGet()) == TYP_BYREF &&
- genActualTypeIsIntOrI(op2->TypeGet()));
+ // byref - [native] int => gives a byref
+ assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef _TARGET_64BIT_
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
@@ -8635,23 +8756,20 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
}
#endif // _TARGET_64BIT_
-
+
type = TYP_BYREF;
}
}
- else if ( (oper== GT_ADD) &&
- (genActualType(op1->TypeGet()) == TYP_BYREF ||
- genActualType(op2->TypeGet()) == TYP_BYREF))
+ else if ((oper == GT_ADD) &&
+ (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref + [native] int => gives a byref
// (or)
// [native] int + byref => gives a byref
-
+
// only one can be a byref : byref op byref not allowed
- assert(genActualType(op1->TypeGet()) != TYP_BYREF ||
- genActualType(op2->TypeGet()) != TYP_BYREF);
- assert(genActualTypeIsIntOrI(op1->TypeGet()) ||
- genActualTypeIsIntOrI(op2->TypeGet()));
+ assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
+ assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef _TARGET_64BIT_
if (genActualType(op2->TypeGet()) == TYP_BYREF)
@@ -8668,12 +8786,11 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
}
#endif // _TARGET_64BIT_
-
+
type = TYP_BYREF;
}
#ifdef _TARGET_64BIT_
- else if (genActualType(op1->TypeGet()) == TYP_I_IMPL ||
- genActualType(op2->TypeGet()) == TYP_I_IMPL)
+ else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
@@ -8695,8 +8812,7 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
type = TYP_I_IMPL;
}
#else // 32-bit TARGET
- else if (genActualType(op1->TypeGet()) == TYP_LONG ||
- genActualType(op2->TypeGet()) == TYP_LONG)
+ else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
@@ -8709,19 +8825,18 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
else
{
// int + int => gives an int
- assert(genActualType(op1->TypeGet()) != TYP_BYREF &&
- genActualType(op2->TypeGet()) != TYP_BYREF);
+ assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
type = genActualType(op1->gtType);
-#if FEATURE_X87_DOUBLES
+#if FEATURE_X87_DOUBLES
// For x87, since we only have 1 size of registers, prefer double
// For everybody else, be more precise
- if (type == TYP_FLOAT)
+ if (type == TYP_FLOAT)
type = TYP_DOUBLE;
#else // !FEATURE_X87_DOUBLES
@@ -8735,29 +8850,28 @@ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTr
}
#endif // FEATURE_X87_DOUBLES
-
}
#if FEATURE_X87_DOUBLES
- assert( type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG
- || type == TYP_INT );
+ assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
#else // FEATURE_X87_DOUBLES
- assert( type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT
- || type == TYP_LONG || type == TYP_INT );
+ assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
#endif // FEATURE_X87_DOUBLES
return type;
-
}
/*****************************************************************************
* Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
*
- * typeRef contains the token, op1 to contain the value being cast,
+ * typeRef contains the token, op1 to contain the value being cast,
* and op2 to contain code that creates the type handle corresponding to typeRef
* isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
*/
-GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2, CORINFO_RESOLVED_TOKEN * pResolvedToken, bool isCastClass)
+GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
+ GenTreePtr op2,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ bool isCastClass)
{
bool expandInline;
@@ -8768,34 +8882,38 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
if (isCastClass)
{
// We only want to expand inline the normal CHKCASTCLASS helper;
- expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
+ expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
}
else
{
- if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
+ if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
{
// Get the Class Handle abd class attributes for the type we are casting to
- //
+ //
DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
//
// If the class handle is marked as final we can also expand the IsInst check inline
- //
+ //
expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
//
// But don't expand inline these two cases
- //
+ //
if (flags & CORINFO_FLG_MARSHAL_BYREF)
+ {
expandInline = false;
+ }
else if (flags & CORINFO_FLG_CONTEXTFUL)
+ {
expandInline = false;
+ }
}
else
{
//
// We can't expand inline any other helpers
- //
+ //
expandInline = false;
}
}
@@ -8803,11 +8921,15 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
if (expandInline)
{
if (compCurBB->isRunRarely())
- expandInline = false; // not worth the code expansion in a rarely run block
+ {
+ expandInline = false; // not worth the code expansion in a rarely run block
+ }
if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
- expandInline = false; // not worth creating an untracked local variable
- }
+ {
+ expandInline = false; // not worth creating an untracked local variable
+ }
+ }
if (!expandInline)
{
@@ -8816,7 +8938,7 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
//
op2->gtFlags |= GTF_DONT_CSE;
- return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
+ return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
}
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
@@ -8825,7 +8947,7 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
GenTreePtr condMT;
//
// expand the methodtable match:
- //
+ //
// condMT ==> GT_NE
// / \
// GT_IND op2 (typically CNS_INT)
@@ -8834,45 +8956,44 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
//
// This can replace op1 with a GT_COMMA that evaluates op1 into a local
- //
- op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("CASTCLASS eval op1") );
+ //
+ op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
//
// op1 is now known to be a non-complex tree
// thus we can use gtClone(op1) from now on
- //
+ //
GenTreePtr op2Var = op2;
if (isCastClass)
{
- op2Var = fgInsertCommaFormTemp(&op2);
+ op2Var = fgInsertCommaFormTemp(&op2);
lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
}
- temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
+ temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
temp->gtFlags |= GTF_EXCEPT;
condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
-
GenTreePtr condNull;
//
// expand the null check:
- //
+ //
// condNull ==> GT_EQ
// / \
// op1Copy CNS_INT
// null
- //
+ //
condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
//
// expand the true and false trees for the condMT
- //
+ //
GenTreePtr condFalse = gtClone(op1);
GenTreePtr condTrue;
if (isCastClass)
{
//
// use the special helper that skips the cases checked by our inlined cast
- //
+ //
helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
@@ -8884,37 +9005,35 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
#define USE_QMARK_TREES
-#ifdef USE_QMARK_TREES
+#ifdef USE_QMARK_TREES
GenTreePtr qmarkMT;
//
// Generate first QMARK - COLON tree
- //
+ //
// qmarkMT ==> GT_QMARK
// / \
// condMT GT_COLON
// / \
// condFalse condTrue
- //
- temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse
- );
+ //
+ temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
condMT->gtFlags |= GTF_RELOP_QMARK;
GenTreePtr qmarkNull;
//
// Generate second QMARK - COLON tree
- //
+ //
// qmarkNull ==> GT_QMARK
// / \
// condNull GT_COLON
// / \
// qmarkMT op1Copy
//
- temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT
- );
+ temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
- condNull->gtFlags |= GTF_RELOP_QMARK;
+ condNull->gtFlags |= GTF_RELOP_QMARK;
// Make QMark node a top level node by spilling it.
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
@@ -8924,51 +9043,55 @@ GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1, GenTreePtr op2,
}
#ifndef DEBUG
-#define assertImp(cond) ((void)0)
+#define assertImp(cond) ((void)0)
#else
-#define assertImp(cond) \
- do { if (!(cond)) { \
- const int cchAssertImpBuf = 600; \
- char *assertImpBuf = (char*)alloca(cchAssertImpBuf); \
- _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,\
- "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", \
- #cond, impCurOpcName, impCurOpcOffs, \
- op1?varTypeName(op1->TypeGet()):"NULL", \
- op2?varTypeName(op2->TypeGet()):"NULL", verCurrentState.esStackDepth); \
- assertAbort(assertImpBuf, __FILE__, __LINE__); \
- } } while (0)
+#define assertImp(cond) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ const int cchAssertImpBuf = 600; \
+ char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
+ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
+ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
+ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
+ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
+ assertAbort(assertImpBuf, __FILE__, __LINE__); \
+ } \
+ } while (0)
#endif // DEBUG
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
* Import the instr for the given basic block
*/
-void Compiler::impImportBlockCode(BasicBlock * block)
+void Compiler::impImportBlockCode(BasicBlock* block)
{
#define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
- printf("\nImporting BB%02u (PC=%03u) of '%s'",
- block->bbNum, block->bbCodeOffs, info.compFullName);
+ {
+ printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
+ }
#endif
- unsigned nxtStmtIndex = impInitBlockLineInfo();
- IL_OFFSET nxtStmtOffs;
+ unsigned nxtStmtIndex = impInitBlockLineInfo();
+ IL_OFFSET nxtStmtOffs;
- GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
- bool expandInline;
- CorInfoHelpFunc helper;
+ GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
+ bool expandInline;
+ CorInfoHelpFunc helper;
CorInfoIsAccessAllowedResult accessAllowedResult;
- CORINFO_HELPER_DESC calloutHelper;
- const BYTE * lastLoadToken = NULL;
+ CORINFO_HELPER_DESC calloutHelper;
+ const BYTE* lastLoadToken = nullptr;
- // reject cyclic constraints
- if (tiVerificationNeeded)
+ // reject cyclic constraints
+ if (tiVerificationNeeded)
{
Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
@@ -8980,30 +9103,29 @@ void Compiler::impImportBlockCode(BasicBlock * block)
/* Walk the opcodes that comprise the basic block */
- const BYTE *codeAddr = info.compCode + block->bbCodeOffs;
- const BYTE *codeEndp = info.compCode + block->bbCodeOffsEnd;
+ const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
+ const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
- IL_OFFSET opcodeOffs = block->bbCodeOffs;
- IL_OFFSET lastSpillOffs = opcodeOffs;
-
- signed jmpDist;
+ IL_OFFSET opcodeOffs = block->bbCodeOffs;
+ IL_OFFSET lastSpillOffs = opcodeOffs;
- /* remember the start of the delegate creation sequence (used for verification) */
- const BYTE* delegateCreateStart = 0;
+ signed jmpDist;
+ /* remember the start of the delegate creation sequence (used for verification) */
+ const BYTE* delegateCreateStart = nullptr;
- int prefixFlags = 0;
+ int prefixFlags = 0;
bool explicitTailCall, constraintCall, readonlyCall;
- bool insertLdloc = false; // set by CEE_DUP and cleared by following store
- typeInfo tiRetVal;
+ bool insertLdloc = false; // set by CEE_DUP and cleared by following store
+ typeInfo tiRetVal;
- unsigned numArgs = info.compArgsCount;
+ unsigned numArgs = info.compArgsCount;
/* Now process all the opcodes in the block */
- var_types callTyp = TYP_COUNT;
- OPCODE prevOpcode = CEE_ILLEGAL;
+ var_types callTyp = TYP_COUNT;
+ OPCODE prevOpcode = CEE_ILLEGAL;
if (block->bbCatchTyp)
{
@@ -9014,18 +9136,18 @@ void Compiler::impImportBlockCode(BasicBlock * block)
// We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
// to a temp. This is a trade off for code simplicity
- impSpillSpecialSideEff();
+ impSpillSpecialSideEff();
}
-
+
while (codeAddr < codeEndp)
{
- bool usingReadyToRunHelper = false;
+ bool usingReadyToRunHelper = false;
CORINFO_RESOLVED_TOKEN resolvedToken;
CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
- CORINFO_CALL_INFO callInfo;
- CORINFO_FIELD_INFO fieldInfo;
+ CORINFO_CALL_INFO callInfo;
+ CORINFO_FIELD_INFO fieldInfo;
- tiRetVal = typeInfo(); // Default type info
+ tiRetVal = typeInfo(); // Default type info
//---------------------------------------------------------------------
@@ -9045,8 +9167,8 @@ void Compiler::impImportBlockCode(BasicBlock * block)
}
else
{
- lastSpillOffs = opcodeOffs;
- impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
+ lastSpillOffs = opcodeOffs;
+ impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
}
/* Compute the current instr offset */
@@ -9061,17 +9183,16 @@ void Compiler::impImportBlockCode(BasicBlock * block)
{
if (!compIsForInlining())
{
- nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount)
- ? info.compStmtOffsets[nxtStmtIndex]
- : BAD_IL_OFFSET;
+ nxtStmtOffs =
+ (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
/* Have we reached the next stmt boundary ? */
- if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
+ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
- if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
+ if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
@@ -9095,8 +9216,8 @@ void Compiler::impImportBlockCode(BasicBlock * block)
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */
- while ((nxtStmtIndex+1) < info.compStmtOffsetsCount &&
- info.compStmtOffsets[nxtStmtIndex+1] <= opcodeOffs)
+ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
+ info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
nxtStmtIndex++;
}
@@ -9112,7 +9233,7 @@ void Compiler::impImportBlockCode(BasicBlock * block)
/* Are there any more line# entries after this one? */
- if (nxtStmtIndex < info.compStmtOffsetsCount)
+ if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */
@@ -9126,8 +9247,8 @@ void Compiler::impImportBlockCode(BasicBlock * block)
}
}
}
- else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
- (verCurrentState.esStackDepth == 0))
+ else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
+ (verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
@@ -9136,8 +9257,8 @@ void Compiler::impImportBlockCode(BasicBlock * block)
impCurStmtOffsSet(opcodeOffs);
}
- else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
- impOpcodeIsCallSiteBoundary(prevOpcode))
+ else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
+ impOpcodeIsCallSiteBoundary(prevOpcode))
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);
@@ -9152,2849 +9273,3163 @@ void Compiler::impImportBlockCode(BasicBlock * block)
impCurStmtOffsSet(opcodeOffs);
}
}
- else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) &&
- (prevOpcode == CEE_NOP))
+ else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
{
if (opts.compDbgCode)
+ {
impSpillStackEnsure(true);
+ }
impCurStmtOffsSet(opcodeOffs);
}
- assert(impCurStmtOffs == BAD_IL_OFFSET ||
- nxtStmtOffs == BAD_IL_OFFSET ||
+ assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
}
}
#endif // defined(DEBUGGING_SUPPORT) || defined(DEBUG)
- CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
- CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
- CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
+ CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
+ CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
+ CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
var_types lclTyp, ovflType = TYP_UNKNOWN;
- GenTreePtr op1 = DUMMY_INIT(NULL);
- GenTreePtr op2 = DUMMY_INIT(NULL);
- GenTreeArgList* args = NULL; // What good do these "DUMMY_INIT"s do?
+ GenTreePtr op1 = DUMMY_INIT(NULL);
+ GenTreePtr op2 = DUMMY_INIT(NULL);
+ GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
- bool uns = DUMMY_INIT(false);
+ bool uns = DUMMY_INIT(false);
/* Get the next opcode and the size of its parameters */
- OPCODE opcode = (OPCODE) getU1LittleEndian(codeAddr);
+ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
-#ifdef DEBUG
- impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
- JITDUMP("\n [%2u] %3u (0x%03x) ",
- verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
+#ifdef DEBUG
+ impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
+ JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
#endif
-DECODE_OPCODE:
+ DECODE_OPCODE:
// Return if any previous code has caused inline to fail.
if (compDonotInline())
- return;
+ {
+ return;
+ }
/* Get the size of additional parameters */
- signed int sz = opcodeSizes[opcode];
-
+ signed int sz = opcodeSizes[opcode];
#ifdef DEBUG
- clsHnd = NO_CLASS_HANDLE;
- lclTyp = TYP_COUNT;
- callTyp = TYP_COUNT;
+ clsHnd = NO_CLASS_HANDLE;
+ lclTyp = TYP_COUNT;
+ callTyp = TYP_COUNT;
- impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
- impCurOpcName = opcodeNames[opcode];
+ impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
+ impCurOpcName = opcodeNames[opcode];
if (verbose && (opcode != CEE_PREFIX1))
+ {
printf("%s", impCurOpcName);
+ }
/* Use assertImp() to display the opcode */
- op1 = op2 = NULL;
+ op1 = op2 = nullptr;
#endif
/* See what kind of an opcode we have, then */
- unsigned mflags = 0;
+ unsigned mflags = 0;
unsigned clsFlags = 0;
switch (opcode)
{
- unsigned lclNum;
- var_types type;
+ unsigned lclNum;
+ var_types type;
- GenTreePtr op3;
- genTreeOps oper;
- unsigned size;
+ GenTreePtr op3;
+ genTreeOps oper;
+ unsigned size;
- int val;
+ int val;
- CORINFO_SIG_INFO sig;
- unsigned flags;
- IL_OFFSET jmpAddr;
- bool ovfl, unordered, callNode;
- bool ldstruct;
+ CORINFO_SIG_INFO sig;
+ unsigned flags;
+ IL_OFFSET jmpAddr;
+ bool ovfl, unordered, callNode;
+ bool ldstruct;
CORINFO_CLASS_HANDLE tokenType;
- union
- {
- int intVal;
- float fltVal;
- __int64 lngVal;
- double dblVal;
- }
- cval;
-
- case CEE_PREFIX1:
- opcode = (OPCODE) (getU1LittleEndian(codeAddr) + 256);
- codeAddr += sizeof(__int8);
- opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
- goto DECODE_OPCODE;
+ union {
+ int intVal;
+ float fltVal;
+ __int64 lngVal;
+ double dblVal;
+ } cval;
+ case CEE_PREFIX1:
+ opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
+ codeAddr += sizeof(__int8);
+ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
+ goto DECODE_OPCODE;
-SPILL_APPEND:
+ SPILL_APPEND:
- /* Append 'op1' to the list of statements */
- impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
- goto DONE_APPEND;
+ /* Append 'op1' to the list of statements */
+ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ goto DONE_APPEND;
-APPEND:
+ APPEND:
- /* Append 'op1' to the list of statements */
+ /* Append 'op1' to the list of statements */
- impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
- goto DONE_APPEND;
+ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ goto DONE_APPEND;
-DONE_APPEND:
+ DONE_APPEND:
#ifdef DEBUG
- // Remember at which BC offset the tree was finished
- impNoteLastILoffs();
+ // Remember at which BC offset the tree was finished
+ impNoteLastILoffs();
#endif
- break;
+ break;
- case CEE_LDNULL:
- impPushNullObjRefOnStack();
- break;
+ case CEE_LDNULL:
+ impPushNullObjRefOnStack();
+ break;
- case CEE_LDC_I4_M1 :
- case CEE_LDC_I4_0 :
- case CEE_LDC_I4_1 :
- case CEE_LDC_I4_2 :
- case CEE_LDC_I4_3 :
- case CEE_LDC_I4_4 :
- case CEE_LDC_I4_5 :
- case CEE_LDC_I4_6 :
- case CEE_LDC_I4_7 :
- case CEE_LDC_I4_8 :
- cval.intVal = (opcode - CEE_LDC_I4_0);
- assert(-1 <= cval.intVal && cval.intVal <= 8);
- goto PUSH_I4CON;
-
- case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON;
- case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON;
-PUSH_I4CON:
- JITDUMP(" %d", cval.intVal);
- impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
- break;
+ case CEE_LDC_I4_M1:
+ case CEE_LDC_I4_0:
+ case CEE_LDC_I4_1:
+ case CEE_LDC_I4_2:
+ case CEE_LDC_I4_3:
+ case CEE_LDC_I4_4:
+ case CEE_LDC_I4_5:
+ case CEE_LDC_I4_6:
+ case CEE_LDC_I4_7:
+ case CEE_LDC_I4_8:
+ cval.intVal = (opcode - CEE_LDC_I4_0);
+ assert(-1 <= cval.intVal && cval.intVal <= 8);
+ goto PUSH_I4CON;
+
+ case CEE_LDC_I4_S:
+ cval.intVal = getI1LittleEndian(codeAddr);
+ goto PUSH_I4CON;
+ case CEE_LDC_I4:
+ cval.intVal = getI4LittleEndian(codeAddr);
+ goto PUSH_I4CON;
+ PUSH_I4CON:
+ JITDUMP(" %d", cval.intVal);
+ impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
+ break;
- case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr);
- JITDUMP(" 0x%016llx", cval.lngVal);
- impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
- break;
+ case CEE_LDC_I8:
+ cval.lngVal = getI8LittleEndian(codeAddr);
+ JITDUMP(" 0x%016llx", cval.lngVal);
+ impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
+ break;
- case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr);
- JITDUMP(" %#.17g", cval.dblVal);
- impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
- break;
+ case CEE_LDC_R8:
+ cval.dblVal = getR8LittleEndian(codeAddr);
+ JITDUMP(" %#.17g", cval.dblVal);
+ impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
+ break;
- case CEE_LDC_R4:
- cval.dblVal = getR4LittleEndian(codeAddr);
- JITDUMP(" %#.17g", cval.dblVal);
- {
- GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
+ case CEE_LDC_R4:
+ cval.dblVal = getR4LittleEndian(codeAddr);
+ JITDUMP(" %#.17g", cval.dblVal);
+ {
+ GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
#if !FEATURE_X87_DOUBLES
- // X87 stack doesn't differentiate between float/double
- // so R4 is treated as R8, but everybody else does
- cnsOp->gtType = TYP_FLOAT;
+ // X87 stack doesn't differentiate between float/double
+ // so R4 is treated as R8, but everybody else does
+ cnsOp->gtType = TYP_FLOAT;
#endif // FEATURE_X87_DOUBLES
- impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
- }
- break;
+ impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
+ }
+ break;
- case CEE_LDSTR:
-
- if (compIsForInlining())
- {
- if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
- {
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
- return;
+ case CEE_LDSTR:
+
+ if (compIsForInlining())
+ {
+ if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
+ return;
+ }
}
- }
-
- val = getU4LittleEndian(codeAddr);
- JITDUMP(" %08X", val);
- if (tiVerificationNeeded)
- {
- Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
- tiRetVal = typeInfo(TI_REF, impGetStringClass());
- }
- impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
- break;
+ val = getU4LittleEndian(codeAddr);
+ JITDUMP(" %08X", val);
+ if (tiVerificationNeeded)
+ {
+ Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
+ tiRetVal = typeInfo(TI_REF, impGetStringClass());
+ }
+ impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
- case CEE_LDARG:
- lclNum = getU2LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- impLoadArg(lclNum, opcodeOffs + sz + 1);
- break;
+ break;
- case CEE_LDARG_S:
- lclNum = getU1LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- impLoadArg(lclNum, opcodeOffs + sz + 1);
- break;
+ case CEE_LDARG:
+ lclNum = getU2LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ impLoadArg(lclNum, opcodeOffs + sz + 1);
+ break;
- case CEE_LDARG_0:
- case CEE_LDARG_1:
- case CEE_LDARG_2:
- case CEE_LDARG_3:
- lclNum = (opcode - CEE_LDARG_0);
- assert(lclNum >= 0 && lclNum < 4);
- impLoadArg(lclNum, opcodeOffs + sz + 1);
- break;
+ case CEE_LDARG_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ impLoadArg(lclNum, opcodeOffs + sz + 1);
+ break;
+ case CEE_LDARG_0:
+ case CEE_LDARG_1:
+ case CEE_LDARG_2:
+ case CEE_LDARG_3:
+ lclNum = (opcode - CEE_LDARG_0);
+ assert(lclNum >= 0 && lclNum < 4);
+ impLoadArg(lclNum, opcodeOffs + sz + 1);
+ break;
- case CEE_LDLOC:
- lclNum = getU2LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- impLoadLoc(lclNum, opcodeOffs + sz + 1);
- break;
+ case CEE_LDLOC:
+ lclNum = getU2LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ impLoadLoc(lclNum, opcodeOffs + sz + 1);
+ break;
- case CEE_LDLOC_S:
- lclNum = getU1LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- impLoadLoc(lclNum, opcodeOffs + sz + 1);
- break;
+ case CEE_LDLOC_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ impLoadLoc(lclNum, opcodeOffs + sz + 1);
+ break;
- case CEE_LDLOC_0:
- case CEE_LDLOC_1:
- case CEE_LDLOC_2:
- case CEE_LDLOC_3:
- lclNum = (opcode - CEE_LDLOC_0);
- assert(lclNum >= 0 && lclNum < 4);
- impLoadLoc(lclNum, opcodeOffs + sz + 1);
- break;
+ case CEE_LDLOC_0:
+ case CEE_LDLOC_1:
+ case CEE_LDLOC_2:
+ case CEE_LDLOC_3:
+ lclNum = (opcode - CEE_LDLOC_0);
+ assert(lclNum >= 0 && lclNum < 4);
+ impLoadLoc(lclNum, opcodeOffs + sz + 1);
+ break;
- case CEE_STARG:
- lclNum = getU2LittleEndian(codeAddr);
- goto STARG;
+ case CEE_STARG:
+ lclNum = getU2LittleEndian(codeAddr);
+ goto STARG;
- case CEE_STARG_S:
- lclNum = getU1LittleEndian(codeAddr);
- STARG:
- JITDUMP(" %u", lclNum);
+ case CEE_STARG_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ STARG:
+ JITDUMP(" %u", lclNum);
- if (tiVerificationNeeded)
- {
- Verify(lclNum < info.compILargsCount, "bad arg num");
- }
+ if (tiVerificationNeeded)
+ {
+ Verify(lclNum < info.compILargsCount, "bad arg num");
+ }
- if (compIsForInlining())
- {
- op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
- noway_assert(op1->gtOper == GT_LCL_VAR);
- lclNum = op1->AsLclVar()->gtLclNum;
+ if (compIsForInlining())
+ {
+ op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
+ noway_assert(op1->gtOper == GT_LCL_VAR);
+ lclNum = op1->AsLclVar()->gtLclNum;
- goto VAR_ST_VALID;
- }
+ goto VAR_ST_VALID;
+ }
- lclNum = compMapILargNum(lclNum); // account for possible hidden param
- assertImp(lclNum < numArgs);
+ lclNum = compMapILargNum(lclNum); // account for possible hidden param
+ assertImp(lclNum < numArgs);
- if (lclNum == info.compThisArg)
- {
- lclNum = lvaArg0Var;
- }
- lvaTable[lclNum].lvArgWrite = 1;
+ if (lclNum == info.compThisArg)
+ {
+ lclNum = lvaArg0Var;
+ }
+ lvaTable[lclNum].lvArgWrite = 1;
- if (tiVerificationNeeded)
- {
- typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
- Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true), "type mismatch");
+ if (tiVerificationNeeded)
+ {
+ typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
+ Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
+ "type mismatch");
- if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
- Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
- }
+ if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
+ {
+ Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
+ }
+ }
- goto VAR_ST;
+ goto VAR_ST;
- case CEE_STLOC:
- lclNum = getU2LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- goto LOC_ST;
+ case CEE_STLOC:
+ lclNum = getU2LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ goto LOC_ST;
- case CEE_STLOC_S:
- lclNum = getU1LittleEndian(codeAddr);
- JITDUMP(" %u", lclNum);
- goto LOC_ST;
+ case CEE_STLOC_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ JITDUMP(" %u", lclNum);
+ goto LOC_ST;
- case CEE_STLOC_0:
- case CEE_STLOC_1:
- case CEE_STLOC_2:
- case CEE_STLOC_3:
- lclNum = (opcode - CEE_STLOC_0);
- assert(lclNum >= 0 && lclNum < 4);
+ case CEE_STLOC_0:
+ case CEE_STLOC_1:
+ case CEE_STLOC_2:
+ case CEE_STLOC_3:
+ lclNum = (opcode - CEE_STLOC_0);
+ assert(lclNum >= 0 && lclNum < 4);
- LOC_ST:
- if (tiVerificationNeeded)
- {
- Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
- Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true), "type mismatch");
- }
+ LOC_ST:
+ if (tiVerificationNeeded)
+ {
+ Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
+ Verify(tiCompatibleWith(impStackTop().seTypeInfo,
+ NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
+ "type mismatch");
+ }
+ if (compIsForInlining())
+ {
+ lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
- if (compIsForInlining())
- {
- lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
-
- /* Have we allocated a temp for this local? */
-
- lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
-
- goto _PopValue;
- }
-
- lclNum += numArgs;
+ /* Have we allocated a temp for this local? */
- VAR_ST:
+ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
- if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
- {
- assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- BADCODE("Bad IL");
- }
+ goto _PopValue;
+ }
- VAR_ST_VALID:
+ lclNum += numArgs;
- /* if it is a struct assignment, make certain we don't overflow the buffer */
- assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
+ VAR_ST:
- if (lvaTable[lclNum].lvNormalizeOnLoad())
- lclTyp = lvaGetRealType (lclNum);
- else
- lclTyp = lvaGetActualType(lclNum);
+ if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
+ {
+ assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
+ BADCODE("Bad IL");
+ }
-_PopValue:
- /* Pop the value being assigned */
+ VAR_ST_VALID:
- {
- StackEntry se = impPopStack(clsHnd);
- op1 = se.val;
- tiRetVal = se.seTypeInfo;
- }
+ /* if it is a struct assignment, make certain we don't overflow the buffer */
+ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
+
+ if (lvaTable[lclNum].lvNormalizeOnLoad())
+ {
+ lclTyp = lvaGetRealType(lclNum);
+ }
+ else
+ {
+ lclTyp = lvaGetActualType(lclNum);
+ }
+
+ _PopValue:
+ /* Pop the value being assigned */
+
+ {
+ StackEntry se = impPopStack(clsHnd);
+ op1 = se.val;
+ tiRetVal = se.seTypeInfo;
+ }
#ifdef FEATURE_SIMD
- if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
- {
- assert(op1->TypeGet() == TYP_STRUCT);
- op1->gtType = lclTyp;
- }
+ if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
+ {
+ assert(op1->TypeGet() == TYP_STRUCT);
+ op1->gtType = lclTyp;
+ }
#endif // FEATURE_SIMD
- op1 = impImplicitIorI4Cast(op1, lclTyp);
+ op1 = impImplicitIorI4Cast(op1, lclTyp);
#ifdef _TARGET_64BIT_
- // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
- if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
- {
- assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
- }
+ // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
+ if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
+ {
+ assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
+ op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
+ }
#endif // _TARGET_64BIT_
- // We had better assign it a value of the correct type
- assertImp(genActualType(lclTyp) == genActualType(op1->gtType) ||
- genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
- (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
- (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
- (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
- ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
+ // We had better assign it a value of the correct type
+ assertImp(
+ genActualType(lclTyp) == genActualType(op1->gtType) ||
+ genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
+ (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
+ (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
+ (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
+ ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
- /* If op1 is "&var" then its type is the transient "*" and it can
- be used either as TYP_BYREF or TYP_I_IMPL */
+ /* If op1 is "&var" then its type is the transient "*" and it can
+ be used either as TYP_BYREF or TYP_I_IMPL */
- if (op1->IsVarAddr())
- {
- assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
+ if (op1->IsVarAddr())
+ {
+ assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
- /* When "&var" is created, we assume it is a byref. If it is
- being assigned to a TYP_I_IMPL var, change the type to
- prevent unnecessary GC info */
+ /* When "&var" is created, we assume it is a byref. If it is
+ being assigned to a TYP_I_IMPL var, change the type to
+ prevent unnecessary GC info */
- if (genActualType(lclTyp) == TYP_I_IMPL)
- op1->gtType = TYP_I_IMPL;
- }
+ if (genActualType(lclTyp) == TYP_I_IMPL)
+ {
+ op1->gtType = TYP_I_IMPL;
+ }
+ }
- /* Filter out simple assignments to itself */
+ /* Filter out simple assignments to itself */
- if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
- {
- if (insertLdloc)
+ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
{
- // This is a sequence of (ldloc, dup, stloc). Can simplify
- // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ if (insertLdloc)
+ {
+ // This is a sequence of (ldloc, dup, stloc). Can simplify
+ // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (tiVerificationNeeded)
+ if (tiVerificationNeeded)
+ {
+ assert(
+ typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
+ }
+#endif
+
+ op1 = nullptr;
+ insertLdloc = false;
+
+ impLoadVar(lclNum, opcodeOffs + sz + 1);
+ break;
+ }
+ else if (opts.compDbgCode)
{
- assert(typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
+ op1 = gtNewNothingNode();
+ goto SPILL_APPEND;
}
-#endif
+ else
+ {
+ break;
+ }
+ }
- op1 = NULL;
- insertLdloc = false;
+ /* Create the assignment node */
- impLoadVar(lclNum, opcodeOffs + sz + 1);
- break;
+ op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
+
+ /* If the local is aliased, we need to spill calls and
+ indirections from the stack. */
+
+ if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
+ verCurrentState.esStackDepth > 0)
+ {
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
+ }
+
+ /* Spill any refs to the local from the stack */
+
+ impSpillLclRefs(lclNum);
+
+#if !FEATURE_X87_DOUBLES
+ // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
+ // We insert a cast to the dest 'op2' type
+ //
+ if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
+ varTypeIsFloating(op2->gtType))
+ {
+ op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
}
- else if (opts.compDbgCode)
+#endif // !FEATURE_X87_DOUBLES
+
+ if (varTypeIsStruct(lclTyp))
{
- op1 = gtNewNothingNode();
- goto SPILL_APPEND;
+ op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
+ // The code generator generates GC tracking information
+ // based on the RHS of the assignment. Later the LHS (which is
+ // is a BYREF) gets used and the emitter checks that that variable
+ // is being tracked. It is not (since the RHS was an int and did
+ // not need tracking). To keep this assert happy, we change the RHS
+ if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
+ {
+ op1->gtType = TYP_BYREF;
+ }
+ op1 = gtNewAssignNode(op2, op1);
+ }
+
+ /* If insertLdloc is true, then we need to insert a ldloc following the
+ stloc. This is done when converting a (dup, stloc) sequence into
+ a (stloc, ldloc) sequence. */
+
+ if (insertLdloc)
+ {
+ // From SPILL_APPEND
+ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+
+#ifdef DEBUG
+ // From DONE_APPEND
+ impNoteLastILoffs();
+#endif
+ op1 = nullptr;
+ insertLdloc = false;
+
+ impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
break;
}
- }
- /* Create the assignment node */
+ goto SPILL_APPEND;
- op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
+ case CEE_LDLOCA:
+ lclNum = getU2LittleEndian(codeAddr);
+ goto LDLOCA;
- /* If the local is aliased, we need to spill calls and
- indirections from the stack. */
+ case CEE_LDLOCA_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ LDLOCA:
+ JITDUMP(" %u", lclNum);
+ if (tiVerificationNeeded)
+ {
+ Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
+ Verify(info.compInitMem, "initLocals not set");
+ }
- if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) && verCurrentState.esStackDepth > 0)
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased") );
+ if (compIsForInlining())
+ {
+ // Get the local type
+ lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
- /* Spill any refs to the local from the stack */
+ /* Have we allocated a temp for this local? */
- impSpillLclRefs(lclNum);
+ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
-#if !FEATURE_X87_DOUBLES
- // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
- // We insert a cast to the dest 'op2' type
- //
- if ((op1->TypeGet() != op2->TypeGet()) &&
- varTypeIsFloating(op1->gtType) &&
- varTypeIsFloating(op2->gtType))
- {
- op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
- }
-#endif // !FEATURE_X87_DOUBLES
+ op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
- if (varTypeIsStruct(lclTyp))
- {
- op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
- }
- else
- {
- // The code generator generates GC tracking information
- // based on the RHS of the assignment. Later the LHS (which is
- // is a BYREF) gets used and the emitter checks that that variable
- // is being tracked. It is not (since the RHS was an int and did
- // not need tracking). To keep this assert happy, we change the RHS
- if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
- op1->gtType = TYP_BYREF;
- op1 = gtNewAssignNode(op2, op1);
- }
+ goto _PUSH_ADRVAR;
+ }
- /* If insertLdloc is true, then we need to insert a ldloc following the
- stloc. This is done when converting a (dup, stloc) sequence into
- a (stloc, ldloc) sequence. */
+ lclNum += numArgs;
+ assertImp(lclNum < info.compLocalsCount);
+ goto ADRVAR;
- if (insertLdloc)
- {
- // From SPILL_APPEND
- impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
-
-#ifdef DEBUG
- // From DONE_APPEND
- impNoteLastILoffs();
-#endif
- op1 = NULL;
- insertLdloc = false;
+ case CEE_LDARGA:
+ lclNum = getU2LittleEndian(codeAddr);
+ goto LDARGA;
- impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
- break;
- }
+ case CEE_LDARGA_S:
+ lclNum = getU1LittleEndian(codeAddr);
+ LDARGA:
+ JITDUMP(" %u", lclNum);
+ Verify(lclNum < info.compILargsCount, "bad arg num");
- goto SPILL_APPEND;
+ if (compIsForInlining())
+ {
+ // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
+ // followed by a ldfld to load the field.
+ op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
+ if (op1->gtOper != GT_LCL_VAR)
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
+ return;
+ }
- case CEE_LDLOCA:
- lclNum = getU2LittleEndian(codeAddr);
- goto LDLOCA;
+ assert(op1->gtOper == GT_LCL_VAR);
- case CEE_LDLOCA_S:
- lclNum = getU1LittleEndian(codeAddr);
- LDLOCA:
- JITDUMP(" %u", lclNum);
- if (tiVerificationNeeded)
- {
- Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
- Verify(info.compInitMem, "initLocals not set");
- }
+ goto _PUSH_ADRVAR;
+ }
- if (compIsForInlining())
- {
- // Get the local type
- lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
-
- /* Have we allocated a temp for this local? */
-
- lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
-
- op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
-
- goto _PUSH_ADRVAR;
- }
-
- lclNum += numArgs;
- assertImp(lclNum < info.compLocalsCount);
- goto ADRVAR;
-
-
- case CEE_LDARGA:
- lclNum = getU2LittleEndian(codeAddr);
- goto LDARGA;
-
- case CEE_LDARGA_S:
- lclNum = getU1LittleEndian(codeAddr);
- LDARGA:
- JITDUMP(" %u", lclNum);
- Verify(lclNum < info.compILargsCount, "bad arg num");
-
- if (compIsForInlining())
- {
- // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
- // followed by a ldfld to load the field.
-
- op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
- if (op1->gtOper != GT_LCL_VAR)
- {
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
- return;
+ lclNum = compMapILargNum(lclNum); // account for possible hidden param
+ assertImp(lclNum < numArgs);
+
+ if (lclNum == info.compThisArg)
+ {
+ lclNum = lvaArg0Var;
}
- assert(op1->gtOper == GT_LCL_VAR);
+ goto ADRVAR;
- goto _PUSH_ADRVAR;
- }
+ ADRVAR:
- lclNum = compMapILargNum(lclNum); // account for possible hidden param
- assertImp(lclNum < numArgs);
-
- if (lclNum == info.compThisArg)
- {
- lclNum = lvaArg0Var;
- }
+ op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
- goto ADRVAR;
+ _PUSH_ADRVAR:
+ assert(op1->gtOper == GT_LCL_VAR);
- ADRVAR:
+ /* Note that this is supposed to create the transient type "*"
+ which may be used as a TYP_I_IMPL. However we catch places
+ where it is used as a TYP_I_IMPL and change the node if needed.
+ Thus we are pessimistic and may report byrefs in the GC info
+ where it was not absolutely needed, but it is safer this way.
+ */
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
- op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
+ // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
+ assert((op1->gtFlags & GTF_GLOB_REF) == 0);
-_PUSH_ADRVAR:
- assert(op1->gtOper == GT_LCL_VAR);
+ tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
+ if (tiVerificationNeeded)
+ {
+ // Don't allow taking address of uninit this ptr.
+ if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
+ {
+ Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
+ }
+
+ if (!tiRetVal.IsByRef())
+ {
+ tiRetVal.MakeByRef();
+ }
+ else
+ {
+ Verify(false, "byref to byref");
+ }
+ }
- /* Note that this is supposed to create the transient type "*"
- which may be used as a TYP_I_IMPL. However we catch places
- where it is used as a TYP_I_IMPL and change the node if needed.
- Thus we are pessimistic and may report byrefs in the GC info
- where it was not absolutely needed, but it is safer this way.
- */
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
+ impPushOnStack(op1, tiRetVal);
+ break;
- // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
- assert((op1->gtFlags & GTF_GLOB_REF) == 0);
+ case CEE_ARGLIST:
- tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
- if (tiVerificationNeeded)
- {
- // Don't allow taking address of uninit this ptr.
- if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
+ if (!info.compIsVarArgs)
{
- Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
+ BADCODE("arglist in non-vararg method");
}
- if (!tiRetVal.IsByRef())
- tiRetVal.MakeByRef();
- else
- Verify(false, "byref to byref");
- }
-
- impPushOnStack(op1, tiRetVal);
- break;
+ if (tiVerificationNeeded)
+ {
+ tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
+ }
+ assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
+
+ /* The ARGLIST cookie is a hidden 'last' parameter, we have already
+ adjusted the arg count cos this is like fetching the last param */
+ assertImp(0 < numArgs);
+ assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
+ lclNum = lvaVarargsHandleArg;
+ op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_ARGLIST:
+ case CEE_ENDFINALLY:
- if (!info.compIsVarArgs)
- BADCODE("arglist in non-vararg method");
+ if (compIsForInlining())
+ {
+ assert(!"Shouldn't have exception handlers in the inliner!");
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
+ return;
+ }
- if (tiVerificationNeeded)
- {
- tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
- }
- assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
-
- /* The ARGLIST cookie is a hidden 'last' parameter, we have already
- adjusted the arg count cos this is like fetching the last param */
- assertImp(0 < numArgs);
- assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
- lclNum = lvaVarargsHandleArg;
- op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
- impPushOnStack(op1, tiRetVal);
- break;
+ if (verCurrentState.esStackDepth > 0)
+ {
+ impEvalSideEffects();
+ }
- case CEE_ENDFINALLY:
+ if (info.compXcptnsCount == 0)
+ {
+ BADCODE("endfinally outside finally");
+ }
- if (compIsForInlining())
- {
- assert(!"Shouldn't have exception handlers in the inliner!");
- compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
- return;
- }
-
- if (verCurrentState.esStackDepth > 0)
- impEvalSideEffects();
+ assert(verCurrentState.esStackDepth == 0);
- if (info.compXcptnsCount == 0)
- BADCODE("endfinally outside finally");
+ op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
+ goto APPEND;
- assert(verCurrentState.esStackDepth == 0);
+ case CEE_ENDFILTER:
- op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, NULL);
- goto APPEND;
+ if (compIsForInlining())
+ {
+ assert(!"Shouldn't have exception handlers in the inliner!");
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
+ return;
+ }
- case CEE_ENDFILTER:
-
- if (compIsForInlining())
- {
- assert(!"Shouldn't have exception handlers in the inliner!");
- compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
- return;
- }
+ block->bbSetRunRarely(); // filters are rare
- block->bbSetRunRarely(); // filters are rare
+ if (info.compXcptnsCount == 0)
+ {
+ BADCODE("endfilter outside filter");
+ }
- if (info.compXcptnsCount == 0)
- BADCODE("endfilter outside filter");
+ if (tiVerificationNeeded)
+ {
+ Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
+ }
- if (tiVerificationNeeded)
- Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
+ op1 = impPopStack().val;
+ assertImp(op1->gtType == TYP_INT);
+ if (!bbInFilterILRange(block))
+ {
+ BADCODE("EndFilter outside a filter handler");
+ }
- op1 = impPopStack().val;
- assertImp(op1->gtType == TYP_INT);
- if (!bbInFilterILRange(block))
- BADCODE("EndFilter outside a filter handler");
+ /* Mark current bb as end of filter */
- /* Mark current bb as end of filter */
+ assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
+ assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
- assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
- assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
+ /* Mark catch handler as successor */
- /* Mark catch handler as successor */
+ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
+ if (verCurrentState.esStackDepth != 0)
+ {
+ verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
+ DEBUGARG(__LINE__));
+ }
+ goto APPEND;
- op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
- if (verCurrentState.esStackDepth != 0)
- verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__));
- goto APPEND;
+ case CEE_RET:
+ prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
+ RET:
+ if (!impReturnInstruction(block, prefixFlags, opcode))
+ {
+ return; // abort
+ }
+ else
+ {
+ break;
+ }
- case CEE_RET:
- prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
-RET:
- if (!impReturnInstruction(block, prefixFlags, opcode))
- return; // abort
- else
- break;
+ case CEE_JMP:
- case CEE_JMP:
+ assert(!compIsForInlining());
- assert(!compIsForInlining());
+ if (tiVerificationNeeded)
+ {
+ Verify(false, "Invalid opcode: CEE_JMP");
+ }
- if (tiVerificationNeeded)
- Verify(false, "Invalid opcode: CEE_JMP");
-
- if ((info.compFlags & CORINFO_FLG_SYNCH) ||
- block->hasTryIndex() || block->hasHndIndex())
- {
- /* CEE_JMP does not make sense in some "protected" regions. */
-
- BADCODE("Jmp not allowed in protected region");
- }
+ if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
+ {
+ /* CEE_JMP does not make sense in some "protected" regions. */
- if (verCurrentState.esStackDepth != 0)
- {
- BADCODE("Stack must be empty after CEE_JMPs");
- }
+ BADCODE("Jmp not allowed in protected region");
+ }
- _impResolveToken(CORINFO_TOKENKIND_Method);
+ if (verCurrentState.esStackDepth != 0)
+ {
+ BADCODE("Stack must be empty after CEE_JMPs");
+ }
- JITDUMP(" %08X", resolvedToken.token);
+ _impResolveToken(CORINFO_TOKENKIND_Method);
- /* The signature of the target has to be identical to ours.
- At least check that argCnt and returnType match */
+ JITDUMP(" %08X", resolvedToken.token);
- eeGetMethodSig(resolvedToken.hMethod, &sig);
- if (sig.numArgs != info.compMethodInfo->args.numArgs ||
- sig.retType != info.compMethodInfo->args.retType ||
- sig.callConv != info.compMethodInfo->args.callConv)
- {
- BADCODE("Incompatible target for CEE_JMPs");
- }
+ /* The signature of the target has to be identical to ours.
+ At least check that argCnt and returnType match */
+
+ eeGetMethodSig(resolvedToken.hMethod, &sig);
+ if (sig.numArgs != info.compMethodInfo->args.numArgs ||
+ sig.retType != info.compMethodInfo->args.retType ||
+ sig.callConv != info.compMethodInfo->args.callConv)
+ {
+ BADCODE("Incompatible target for CEE_JMPs");
+ }
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
- op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t) resolvedToken.hMethod);
+ op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
+
+ /* Mark the basic block as being a JUMP instead of RETURN */
- /* Mark the basic block as being a JUMP instead of RETURN */
+ block->bbFlags |= BBF_HAS_JMP;
- block->bbFlags |= BBF_HAS_JMP;
+ /* Set this flag to make sure register arguments have a location assigned
+ * even if we don't use them inside the method */
- /* Set this flag to make sure register arguments have a location assigned
- * even if we don't use them inside the method */
+ compJmpOpUsed = true;
- compJmpOpUsed = true;
+ fgNoStructPromotion = true;
- fgNoStructPromotion = true;
-
- goto APPEND;
+ goto APPEND;
#else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
- // Import this just like a series of LDARGs + tail. + call + ret
+ // Import this just like a series of LDARGs + tail. + call + ret
- if (info.compIsVarArgs)
- {
- // For now we don't implement true tail calls, so this breaks varargs.
- // So warn the user instead of generating bad code.
- // This is a semi-temporary workaround for DevDiv 173860, until we can properly
- // implement true tail calls.
- IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
- }
+ if (info.compIsVarArgs)
+ {
+ // For now we don't implement true tail calls, so this breaks varargs.
+ // So warn the user instead of generating bad code.
+ // This is a semi-temporary workaround for DevDiv 173860, until we can properly
+ // implement true tail calls.
+ IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
+ }
- // First load up the arguments (0 - N)
- for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
- {
- impLoadArg(argNum, opcodeOffs + sz + 1);
- }
+ // First load up the arguments (0 - N)
+ for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
+ {
+ impLoadArg(argNum, opcodeOffs + sz + 1);
+ }
- // Now generate the tail call
- noway_assert(prefixFlags == 0);
- prefixFlags = PREFIX_TAILCALL_EXPLICIT;
- opcode = CEE_CALL;
+ // Now generate the tail call
+ noway_assert(prefixFlags == 0);
+ prefixFlags = PREFIX_TAILCALL_EXPLICIT;
+ opcode = CEE_CALL;
- eeGetCallInfo(&resolvedToken,
- NULL,
- combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
- &callInfo);
+ eeGetCallInfo(&resolvedToken, NULL,
+ combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
- //All calls and delegates need a security callout.
- impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
+ // All calls and delegates need a security callout.
+ impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
- callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo, opcodeOffs);
+ callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
+ opcodeOffs);
- // And finish with the ret
- goto RET;
+ // And finish with the ret
+ goto RET;
#endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
- case CEE_LDELEMA :
- assertImp(sz == sizeof(unsigned));
+ case CEE_LDELEMA:
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- ldelemClsHnd = resolvedToken.hClass;
+ ldelemClsHnd = resolvedToken.hClass;
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(1).seTypeInfo;
- typeInfo tiIndex = impStackTop().seTypeInfo;
-
-
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
-
- typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
- Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType), "bad array");
-
- tiRetVal = arrayElemType;
- tiRetVal.MakeByRef();
- if (prefixFlags & PREFIX_READONLY)
+ if (tiVerificationNeeded)
{
- tiRetVal.SetIsReadonlyByRef();
- }
+ typeInfo tiArray = impStackTop(1).seTypeInfo;
+ typeInfo tiIndex = impStackTop().seTypeInfo;
- // an array interior pointer is always in the heap
- tiRetVal.SetIsPermanentHomeByRef();
- }
-
- // If it's a value class array we just do a simple address-of
- if (eeIsValueClass(ldelemClsHnd))
- {
- CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
- if (cit == CORINFO_TYPE_UNDEF)
- lclTyp = TYP_STRUCT;
- else
- lclTyp = JITtype2varType(cit);
- goto ARR_LD_POST_VERIFY;
- }
-
- // Similarly, if its a readonly access, we can do a simple address-of
- // without doing a runtime type-check
- if (prefixFlags & PREFIX_READONLY)
- {
- lclTyp = TYP_REF;
- goto ARR_LD_POST_VERIFY;
- }
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- // Otherwise we need the full helper function with run-time type check
- op1 = impTokenToHandle(&resolvedToken);
- if (op1 == NULL) // compDonotInline()
- return;
+ typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
+ Verify(tiArray.IsNullObjRef() ||
+ typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
+ "bad array");
- args = gtNewArgList(op1); // Type
- args = gtNewListNode(impPopStack().val, args); // index
- args = gtNewListNode(impPopStack().val, args); // array
- op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
+ tiRetVal = arrayElemType;
+ tiRetVal.MakeByRef();
+ if (prefixFlags & PREFIX_READONLY)
+ {
+ tiRetVal.SetIsReadonlyByRef();
+ }
- impPushOnStack(op1, tiRetVal);
- break;
+ // an array interior pointer is always in the heap
+ tiRetVal.SetIsPermanentHomeByRef();
+ }
- //ldelem for reference and value types
- case CEE_LDELEM :
- assertImp(sz == sizeof(unsigned));
+ // If it's a value class array we just do a simple address-of
+ if (eeIsValueClass(ldelemClsHnd))
+ {
+ CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
+ if (cit == CORINFO_TYPE_UNDEF)
+ {
+ lclTyp = TYP_STRUCT;
+ }
+ else
+ {
+ lclTyp = JITtype2varType(cit);
+ }
+ goto ARR_LD_POST_VERIFY;
+ }
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ // Similarly, if its a readonly access, we can do a simple address-of
+ // without doing a runtime type-check
+ if (prefixFlags & PREFIX_READONLY)
+ {
+ lclTyp = TYP_REF;
+ goto ARR_LD_POST_VERIFY;
+ }
- JITDUMP(" %08X", resolvedToken.token);
+ // Otherwise we need the full helper function with run-time type check
+ op1 = impTokenToHandle(&resolvedToken);
+ if (op1 == nullptr)
+ { // compDonotInline()
+ return;
+ }
- ldelemClsHnd = resolvedToken.hClass;
+ args = gtNewArgList(op1); // Type
+ args = gtNewListNode(impPopStack().val, args); // index
+ args = gtNewListNode(impPopStack().val, args); // array
+ op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(1).seTypeInfo;
- typeInfo tiIndex = impStackTop().seTypeInfo;
+ impPushOnStack(op1, tiRetVal);
+ break;
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- tiRetVal = verMakeTypeInfo(ldelemClsHnd);
+ // ldelem for reference and value types
+ case CEE_LDELEM:
+ assertImp(sz == sizeof(unsigned));
- Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
- "type of array incompatible with type operand");
- tiRetVal.NormaliseForStack();
- }
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- // If it's a reference type or generic variable type
- // then just generate code as though it's a ldelem.ref instruction
- if (!eeIsValueClass(ldelemClsHnd))
- {
- lclTyp = TYP_REF;
- opcode = CEE_LDELEM_REF;
- }
- else
- {
- CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
- lclTyp = JITtype2varType(jitTyp);
- tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
- tiRetVal.NormaliseForStack();
- }
- goto ARR_LD_POST_VERIFY;
+ JITDUMP(" %08X", resolvedToken.token);
-
- case CEE_LDELEM_I1 : lclTyp = TYP_BYTE ; goto ARR_LD;
- case CEE_LDELEM_I2 : lclTyp = TYP_SHORT ; goto ARR_LD;
- case CEE_LDELEM_I : lclTyp = TYP_I_IMPL; goto ARR_LD;
+ ldelemClsHnd = resolvedToken.hClass;
- // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
- // and treating it as TYP_INT avoids other asserts.
- case CEE_LDELEM_U4 : lclTyp = TYP_INT ; goto ARR_LD;
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiArray = impStackTop(1).seTypeInfo;
+ typeInfo tiIndex = impStackTop().seTypeInfo;
- case CEE_LDELEM_I4 : lclTyp = TYP_INT ; goto ARR_LD;
- case CEE_LDELEM_I8 : lclTyp = TYP_LONG ; goto ARR_LD;
- case CEE_LDELEM_REF: lclTyp = TYP_REF ; goto ARR_LD;
- case CEE_LDELEM_R4 : lclTyp = TYP_FLOAT ; goto ARR_LD;
- case CEE_LDELEM_R8 : lclTyp = TYP_DOUBLE; goto ARR_LD;
- case CEE_LDELEM_U1 : lclTyp = TYP_UBYTE ; goto ARR_LD;
- case CEE_LDELEM_U2 : lclTyp = TYP_CHAR ; goto ARR_LD;
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
+ tiRetVal = verMakeTypeInfo(ldelemClsHnd);
- ARR_LD:
+ Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
+ "type of array incompatible with type operand");
+ tiRetVal.NormaliseForStack();
+ }
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(1).seTypeInfo;
- typeInfo tiIndex = impStackTop().seTypeInfo;
-
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- if (tiArray.IsNullObjRef())
- {
- if (lclTyp == TYP_REF) // we will say a deref of a null array yields a null ref
- tiRetVal = typeInfo(TI_NULL);
- else
- tiRetVal = typeInfo(lclTyp);
+ // If it's a reference type or generic variable type
+ // then just generate code as though it's a ldelem.ref instruction
+ if (!eeIsValueClass(ldelemClsHnd))
+ {
+ lclTyp = TYP_REF;
+ opcode = CEE_LDELEM_REF;
}
else
{
- tiRetVal = verGetArrayElemType(tiArray);
- typeInfo arrayElemTi = typeInfo(lclTyp);
-#ifdef _TARGET_64BIT_
- if (opcode == CEE_LDELEM_I)
- {
- arrayElemTi = typeInfo::nativeInt();
- }
+ CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
+ lclTyp = JITtype2varType(jitTyp);
+ tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
+ tiRetVal.NormaliseForStack();
+ }
+ goto ARR_LD_POST_VERIFY;
+ case CEE_LDELEM_I1:
+ lclTyp = TYP_BYTE;
+ goto ARR_LD;
+ case CEE_LDELEM_I2:
+ lclTyp = TYP_SHORT;
+ goto ARR_LD;
+ case CEE_LDELEM_I:
+ lclTyp = TYP_I_IMPL;
+ goto ARR_LD;
+
+ // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
+ // and treating it as TYP_INT avoids other asserts.
+ case CEE_LDELEM_U4:
+ lclTyp = TYP_INT;
+ goto ARR_LD;
+
+ case CEE_LDELEM_I4:
+ lclTyp = TYP_INT;
+ goto ARR_LD;
+ case CEE_LDELEM_I8:
+ lclTyp = TYP_LONG;
+ goto ARR_LD;
+ case CEE_LDELEM_REF:
+ lclTyp = TYP_REF;
+ goto ARR_LD;
+ case CEE_LDELEM_R4:
+ lclTyp = TYP_FLOAT;
+ goto ARR_LD;
+ case CEE_LDELEM_R8:
+ lclTyp = TYP_DOUBLE;
+ goto ARR_LD;
+ case CEE_LDELEM_U1:
+ lclTyp = TYP_UBYTE;
+ goto ARR_LD;
+ case CEE_LDELEM_U2:
+ lclTyp = TYP_CHAR;
+ goto ARR_LD;
+
+ ARR_LD:
+
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiArray = impStackTop(1).seTypeInfo;
+ typeInfo tiIndex = impStackTop().seTypeInfo;
- if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
+ if (tiArray.IsNullObjRef())
{
- Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
+ if (lclTyp == TYP_REF)
+ { // we will say a deref of a null array yields a null ref
+ tiRetVal = typeInfo(TI_NULL);
+ }
+ else
+ {
+ tiRetVal = typeInfo(lclTyp);
+ }
}
else
-#endif // _TARGET_64BIT_
{
- Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
+ tiRetVal = verGetArrayElemType(tiArray);
+ typeInfo arrayElemTi = typeInfo(lclTyp);
+#ifdef _TARGET_64BIT_
+ if (opcode == CEE_LDELEM_I)
+ {
+ arrayElemTi = typeInfo::nativeInt();
+ }
+
+ if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
+ {
+ Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
+ }
+ else
+#endif // _TARGET_64BIT_
+ {
+ Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
+ }
}
+ tiRetVal.NormaliseForStack();
}
- tiRetVal.NormaliseForStack();
- }
-ARR_LD_POST_VERIFY:
-
- /* Pull the index value and array address */
+ ARR_LD_POST_VERIFY:
+
+ /* Pull the index value and array address */
op2 = impPopStack().val;
- op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF);
+ op1 = impPopStack().val;
+ assertImp(op1->gtType == TYP_REF);
- /* Check for null pointer - in the inliner case we simply abort */
+ /* Check for null pointer - in the inliner case we simply abort */
- if (compIsForInlining())
- {
- if (op1->gtOper == GT_CNS_INT)
+ if (compIsForInlining())
{
- compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
- return;
+ if (op1->gtOper == GT_CNS_INT)
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
+ return;
+ }
}
- }
- op1 = impCheckForNullPointer(op1);
+ op1 = impCheckForNullPointer(op1);
- /* Mark the block as containing an index expression */
+ /* Mark the block as containing an index expression */
- if (op1->gtOper == GT_LCL_VAR)
- {
- if (op2->gtOper == GT_LCL_VAR ||
- op2->gtOper == GT_CNS_INT ||
- op2->gtOper == GT_ADD)
+ if (op1->gtOper == GT_LCL_VAR)
{
- block->bbFlags |= BBF_HAS_IDX_LEN;
- optMethodFlags |= OMF_HAS_ARRAYREF;
+ if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
+ {
+ block->bbFlags |= BBF_HAS_IDX_LEN;
+ optMethodFlags |= OMF_HAS_ARRAYREF;
+ }
}
- }
- /* Create the index node and push it on the stack */
+ /* Create the index node and push it on the stack */
- op1 = gtNewIndexRef(lclTyp, op1, op2);
+ op1 = gtNewIndexRef(lclTyp, op1, op2);
- ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
+ ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
- if ((opcode == CEE_LDELEMA) || ldstruct ||
- (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
- {
- assert(ldelemClsHnd != DUMMY_INIT(NULL));
-
- // remember the element size
- if (lclTyp == TYP_REF)
+ if ((opcode == CEE_LDELEMA) || ldstruct ||
+ (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
{
- op1->gtIndex.gtIndElemSize = sizeof(void*);
- }
- else
- {
- // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
- if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
+ assert(ldelemClsHnd != DUMMY_INIT(NULL));
+
+ // remember the element size
+ if (lclTyp == TYP_REF)
{
- op1->gtIndex.gtStructElemClass = ldelemClsHnd;
+ op1->gtIndex.gtIndElemSize = sizeof(void*);
}
- assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
- if (lclTyp == TYP_STRUCT)
+ else
{
- size = info.compCompHnd->getClassSize(ldelemClsHnd);
- op1->gtIndex.gtIndElemSize = size;
- op1->gtType = lclTyp;
+ // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
+ if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
+ {
+ op1->gtIndex.gtStructElemClass = ldelemClsHnd;
+ }
+ assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
+ if (lclTyp == TYP_STRUCT)
+ {
+ size = info.compCompHnd->getClassSize(ldelemClsHnd);
+ op1->gtIndex.gtIndElemSize = size;
+ op1->gtType = lclTyp;
+ }
}
- }
- if ((opcode == CEE_LDELEMA) || ldstruct)
- {
- // wrap it in a &
- lclTyp = TYP_BYREF;
+ if ((opcode == CEE_LDELEMA) || ldstruct)
+ {
+ // wrap it in a &
+ lclTyp = TYP_BYREF;
- op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
+ op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
+ }
+ else
+ {
+ assert(lclTyp != TYP_STRUCT);
+ }
}
- else
+
+ if (ldstruct)
{
- assert(lclTyp != TYP_STRUCT);
+ // Create an OBJ for the result
+ op1 = gtNewObjNode(ldelemClsHnd, op1);
+ op1->gtFlags |= GTF_EXCEPT;
}
- }
-
- if (ldstruct)
- {
- // Create an OBJ for the result
- op1 = gtNewObjNode(ldelemClsHnd, op1);
- op1->gtFlags |= GTF_EXCEPT;
- }
- impPushOnStack(op1, tiRetVal);
- break;
-
-
- //stelem for reference and value types
- case CEE_STELEM:
-
- assertImp(sz == sizeof(unsigned));
+ impPushOnStack(op1, tiRetVal);
+ break;
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ // stelem for reference and value types
+ case CEE_STELEM:
- JITDUMP(" %08X", resolvedToken.token);
+ assertImp(sz == sizeof(unsigned));
- stelemClsHnd = resolvedToken.hClass;
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(2).seTypeInfo;
- typeInfo tiIndex = impStackTop(1).seTypeInfo;
- typeInfo tiValue = impStackTop().seTypeInfo;
+ JITDUMP(" %08X", resolvedToken.token);
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
+ stelemClsHnd = resolvedToken.hClass;
- Verify(tiArray.IsNullObjRef() ||
- tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
- "type operand incompatible with array element type");
- arrayElem.NormaliseForStack();
- Verify(tiCompatibleWith(tiValue, arrayElem, true),
- "value incompatible with type operand");
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiArray = impStackTop(2).seTypeInfo;
+ typeInfo tiIndex = impStackTop(1).seTypeInfo;
+ typeInfo tiValue = impStackTop().seTypeInfo;
+
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
+ typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
+
+ Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
+ "type operand incompatible with array element type");
+ arrayElem.NormaliseForStack();
+ Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
+ }
- }
+ // If it's a reference type just behave as though it's a stelem.ref instruction
+ if (!eeIsValueClass(stelemClsHnd))
+ {
+ goto STELEM_REF_POST_VERIFY;
+ }
- // If it's a reference type just behave as though it's a stelem.ref instruction
- if (!eeIsValueClass(stelemClsHnd))
- goto STELEM_REF_POST_VERIFY;
+ // Otherwise extract the type
+ {
+ CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
+ lclTyp = JITtype2varType(jitTyp);
+ goto ARR_ST_POST_VERIFY;
+ }
- // Otherwise extract the type
- {
- CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
- lclTyp = JITtype2varType(jitTyp);
- goto ARR_ST_POST_VERIFY;
- }
+ case CEE_STELEM_REF:
-
- case CEE_STELEM_REF:
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiArray = impStackTop(2).seTypeInfo;
+ typeInfo tiIndex = impStackTop(1).seTypeInfo;
+ typeInfo tiValue = impStackTop().seTypeInfo;
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(2).seTypeInfo;
- typeInfo tiIndex = impStackTop(1).seTypeInfo;
- typeInfo tiValue = impStackTop().seTypeInfo;
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
+ Verify(tiValue.IsObjRef(), "bad value");
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- Verify(tiValue.IsObjRef(), "bad value");
+ // we only check that it is an object referece, The helper does additional checks
+ Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
+ }
- // we only check that it is an object referece, The helper does additional checks
- Verify(tiArray.IsNullObjRef() ||
- verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
- }
-
- arrayNodeTo = impStackTop(2).val;
- arrayNodeToIndex = impStackTop(1).val;
- arrayNodeFrom = impStackTop().val;
+ arrayNodeTo = impStackTop(2).val;
+ arrayNodeToIndex = impStackTop(1).val;
+ arrayNodeFrom = impStackTop().val;
- //
- // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
- // lot of cases because of covariance. ie. foo[] can be cast to object[].
- //
-
- // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
- // This does not need CORINFO_HELP_ARRADDR_ST
-
- if (arrayNodeFrom->OperGet() == GT_INDEX &&
- arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
- arrayNodeTo->gtOper == GT_LCL_VAR &&
- arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
- !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
- {
- lclTyp = TYP_REF;
- goto ARR_ST_POST_VERIFY;
- }
+ //
+ // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
+ // lot of cases because of covariance. ie. foo[] can be cast to object[].
+ //
- // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
-
- if (arrayNodeFrom->OperGet() == GT_CNS_INT)
- {
- assert(arrayNodeFrom->gtType == TYP_REF &&
- arrayNodeFrom->gtIntCon.gtIconVal == 0);
-
- lclTyp = TYP_REF;
- goto ARR_ST_POST_VERIFY;
- }
-
- STELEM_REF_POST_VERIFY:
-
- /* Call a helper function to do the assignment */
- op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST,
- TYP_VOID, 0,
- impPopList(3, &flags, 0));
-
- goto SPILL_APPEND;
-
- case CEE_STELEM_I1: lclTyp = TYP_BYTE ; goto ARR_ST;
- case CEE_STELEM_I2: lclTyp = TYP_SHORT ; goto ARR_ST;
- case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST;
- case CEE_STELEM_I4: lclTyp = TYP_INT ; goto ARR_ST;
- case CEE_STELEM_I8: lclTyp = TYP_LONG ; goto ARR_ST;
- case CEE_STELEM_R4: lclTyp = TYP_FLOAT ; goto ARR_ST;
- case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST;
-
- ARR_ST:
-
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop(2).seTypeInfo;
- typeInfo tiIndex = impStackTop(1).seTypeInfo;
- typeInfo tiValue = impStackTop().seTypeInfo;
+ // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
+ // This does not need CORINFO_HELP_ARRADDR_ST
- // As per ECMA 'index' specified can be either int32 or native int.
- Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
- typeInfo arrayElem = typeInfo(lclTyp);
-#ifdef _TARGET_64BIT_
- if (opcode == CEE_STELEM_I)
+ if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
+ arrayNodeTo->gtOper == GT_LCL_VAR &&
+ arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
+ !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
{
- arrayElem = typeInfo::nativeInt();
+ lclTyp = TYP_REF;
+ goto ARR_ST_POST_VERIFY;
}
-#endif // _TARGET_64BIT_
- Verify(tiArray.IsNullObjRef() ||
- typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem), "bad array");
-
- Verify(tiCompatibleWith(NormaliseForStack(tiValue),
- arrayElem.NormaliseForStack(), true),
- "bad value");
- }
+ // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
- ARR_ST_POST_VERIFY:
- /* The strict order of evaluation is LHS-operands, RHS-operands,
- range-check, and then assignment. However, codegen currently
- does the range-check before evaluation the RHS-operands. So to
- maintain strict ordering, we spill the stack. */
-
- if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
- {
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Strict ordering of exceptions for Array store") );
- }
+ if (arrayNodeFrom->OperGet() == GT_CNS_INT)
+ {
+ assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
- /* Pull the new value from the stack */
- op2 = impPopStack().val;
+ lclTyp = TYP_REF;
+ goto ARR_ST_POST_VERIFY;
+ }
- /* Pull the index value */
- op1 = impPopStack().val;
+ STELEM_REF_POST_VERIFY:
- /* Pull the array address */
- op3 = impPopStack().val;
-
- assertImp(op3->gtType == TYP_REF);
- if (op2->IsVarAddr())
- op2->gtType = TYP_I_IMPL;
+ /* Call a helper function to do the assignment */
+ op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
- op3 = impCheckForNullPointer(op3);
+ goto SPILL_APPEND;
- // Mark the block as containing an index expression
+ case CEE_STELEM_I1:
+ lclTyp = TYP_BYTE;
+ goto ARR_ST;
+ case CEE_STELEM_I2:
+ lclTyp = TYP_SHORT;
+ goto ARR_ST;
+ case CEE_STELEM_I:
+ lclTyp = TYP_I_IMPL;
+ goto ARR_ST;
+ case CEE_STELEM_I4:
+ lclTyp = TYP_INT;
+ goto ARR_ST;
+ case CEE_STELEM_I8:
+ lclTyp = TYP_LONG;
+ goto ARR_ST;
+ case CEE_STELEM_R4:
+ lclTyp = TYP_FLOAT;
+ goto ARR_ST;
+ case CEE_STELEM_R8:
+ lclTyp = TYP_DOUBLE;
+ goto ARR_ST;
+
+ ARR_ST:
- if (op3->gtOper == GT_LCL_VAR)
- {
- if (op1->gtOper == GT_LCL_VAR ||
- op1->gtOper == GT_CNS_INT ||
- op1->gtOper == GT_ADD)
+ if (tiVerificationNeeded)
{
- block->bbFlags |= BBF_HAS_IDX_LEN;
- optMethodFlags |= OMF_HAS_ARRAYREF;
- }
- }
+ typeInfo tiArray = impStackTop(2).seTypeInfo;
+ typeInfo tiIndex = impStackTop(1).seTypeInfo;
+ typeInfo tiValue = impStackTop().seTypeInfo;
- /* Create the index node */
+ // As per ECMA 'index' specified can be either int32 or native int.
+ Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
+ typeInfo arrayElem = typeInfo(lclTyp);
+#ifdef _TARGET_64BIT_
+ if (opcode == CEE_STELEM_I)
+ {
+ arrayElem = typeInfo::nativeInt();
+ }
+#endif // _TARGET_64BIT_
+ Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
+ "bad array");
- op1 = gtNewIndexRef(lclTyp, op3, op1);
+ Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
+ "bad value");
+ }
- /* Create the assignment node and append it */
+ ARR_ST_POST_VERIFY:
+ /* The strict order of evaluation is LHS-operands, RHS-operands,
+ range-check, and then assignment. However, codegen currently
+ does the range-check before evaluation the RHS-operands. So to
+ maintain strict ordering, we spill the stack. */
- if (lclTyp == TYP_STRUCT)
- {
- assert(stelemClsHnd != DUMMY_INIT(NULL));
+ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
+ {
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
+ "Strict ordering of exceptions for Array store"));
+ }
- op1->gtIndex.gtStructElemClass = stelemClsHnd;
- op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
- }
- if (varTypeIsStruct(op1))
- {
- // wrap it in a &
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
- op1 = impAssignStructPtr(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
- }
- else
- {
- op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
- op1 = gtNewAssignNode(op1, op2);
- }
+ /* Pull the new value from the stack */
+ op2 = impPopStack().val;
- /* Mark the expression as containing an assignment */
+ /* Pull the index value */
+ op1 = impPopStack().val;
- op1->gtFlags |= GTF_ASG;
+ /* Pull the array address */
+ op3 = impPopStack().val;
- goto SPILL_APPEND;
+ assertImp(op3->gtType == TYP_REF);
+ if (op2->IsVarAddr())
+ {
+ op2->gtType = TYP_I_IMPL;
+ }
- case CEE_ADD: oper = GT_ADD; goto MATH_OP2;
+ op3 = impCheckForNullPointer(op3);
- case CEE_ADD_OVF: uns = false; goto ADD_OVF;
- case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF;
+ // Mark the block as containing an index expression
-ADD_OVF: ovfl = true; callNode = false;
- oper = GT_ADD; goto MATH_OP2_FLAGS;
+ if (op3->gtOper == GT_LCL_VAR)
+ {
+ if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
+ {
+ block->bbFlags |= BBF_HAS_IDX_LEN;
+ optMethodFlags |= OMF_HAS_ARRAYREF;
+ }
+ }
- case CEE_SUB: oper = GT_SUB; goto MATH_OP2;
+ /* Create the index node */
- case CEE_SUB_OVF: uns = false; goto SUB_OVF;
- case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF;
+ op1 = gtNewIndexRef(lclTyp, op3, op1);
-SUB_OVF: ovfl = true; callNode = false;
- oper = GT_SUB; goto MATH_OP2_FLAGS;
+ /* Create the assignment node and append it */
- case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF;
+ if (lclTyp == TYP_STRUCT)
+ {
+ assert(stelemClsHnd != DUMMY_INIT(NULL));
- case CEE_MUL_OVF: uns = false; goto MUL_OVF;
- case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF;
+ op1->gtIndex.gtStructElemClass = stelemClsHnd;
+ op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
+ }
+ if (varTypeIsStruct(op1))
+ {
+ // wrap it in a &
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
+ op1 = impAssignStructPtr(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
+ }
+ else
+ {
+ op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
+ op1 = gtNewAssignNode(op1, op2);
+ }
-MUL_OVF: ovfl = true;
- oper = GT_MUL; goto MATH_MAYBE_CALL_OVF;
+ /* Mark the expression as containing an assignment */
- // Other binary math operations
+ op1->gtFlags |= GTF_ASG;
- case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF;
+ goto SPILL_APPEND;
- case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF;
+ case CEE_ADD:
+ oper = GT_ADD;
+ goto MATH_OP2;
+
+ case CEE_ADD_OVF:
+ uns = false;
+ goto ADD_OVF;
+ case CEE_ADD_OVF_UN:
+ uns = true;
+ goto ADD_OVF;
+
+ ADD_OVF:
+ ovfl = true;
+ callNode = false;
+ oper = GT_ADD;
+ goto MATH_OP2_FLAGS;
+
+ case CEE_SUB:
+ oper = GT_SUB;
+ goto MATH_OP2;
+
+ case CEE_SUB_OVF:
+ uns = false;
+ goto SUB_OVF;
+ case CEE_SUB_OVF_UN:
+ uns = true;
+ goto SUB_OVF;
+
+ SUB_OVF:
+ ovfl = true;
+ callNode = false;
+ oper = GT_SUB;
+ goto MATH_OP2_FLAGS;
+
+ case CEE_MUL:
+ oper = GT_MUL;
+ goto MATH_MAYBE_CALL_NO_OVF;
+
+ case CEE_MUL_OVF:
+ uns = false;
+ goto MUL_OVF;
+ case CEE_MUL_OVF_UN:
+ uns = true;
+ goto MUL_OVF;
+
+ MUL_OVF:
+ ovfl = true;
+ oper = GT_MUL;
+ goto MATH_MAYBE_CALL_OVF;
+
+ // Other binary math operations
+
+ case CEE_DIV:
+ oper = GT_DIV;
+ goto MATH_MAYBE_CALL_NO_OVF;
+
+ case CEE_DIV_UN:
+ oper = GT_UDIV;
+ goto MATH_MAYBE_CALL_NO_OVF;
+
+ case CEE_REM:
+ oper = GT_MOD;
+ goto MATH_MAYBE_CALL_NO_OVF;
+
+ case CEE_REM_UN:
+ oper = GT_UMOD;
+ goto MATH_MAYBE_CALL_NO_OVF;
+
+ MATH_MAYBE_CALL_NO_OVF:
+ ovfl = false;
+ MATH_MAYBE_CALL_OVF:
+ // Morpher has some complex logic about when to turn different
+ // typed nodes on different platforms into helper calls. We
+ // need to either duplicate that logic here, or just
+ // pessimistically make all the nodes large enough to become
+ // call nodes. Since call nodes aren't that much larger and
+ // these opcodes are infrequent enough I chose the latter.
+ callNode = true;
+ goto MATH_OP2_FLAGS;
- case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF;
+ case CEE_AND:
+ oper = GT_AND;
+ goto MATH_OP2;
+ case CEE_OR:
+ oper = GT_OR;
+ goto MATH_OP2;
+ case CEE_XOR:
+ oper = GT_XOR;
+ goto MATH_OP2;
- case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF;
+ MATH_OP2: // For default values of 'ovfl' and 'callNode'
-MATH_MAYBE_CALL_NO_OVF: ovfl = false;
-MATH_MAYBE_CALL_OVF:
- // Morpher has some complex logic about when to turn different
- // typed nodes on different platforms into helper calls. We
- // need to either duplicate that logic here, or just
- // pessimistically make all the nodes large enough to become
- // call nodes. Since call nodes aren't that much larger and
- // these opcodes are infrequent enough I chose the latter.
- callNode = true;
- goto MATH_OP2_FLAGS;
+ ovfl = false;
+ callNode = false;
- case CEE_AND: oper = GT_AND; goto MATH_OP2;
- case CEE_OR: oper = GT_OR ; goto MATH_OP2;
- case CEE_XOR: oper = GT_XOR; goto MATH_OP2;
+ MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
-MATH_OP2: // For default values of 'ovfl' and 'callNode'
+ /* Pull two values and push back the result */
- ovfl = false;
- callNode = false;
+ if (tiVerificationNeeded)
+ {
+ const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
+ const typeInfo& tiOp2 = impStackTop().seTypeInfo;
-MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
+ Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
+ if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
+ {
+ Verify(tiOp1.IsNumberType(), "not number");
+ }
+ else
+ {
+ Verify(tiOp1.IsIntegerType(), "not integer");
+ }
- /* Pull two values and push back the result */
+ Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
- if (tiVerificationNeeded)
- {
- const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
- const typeInfo& tiOp2 = impStackTop().seTypeInfo;
-
- Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
- if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
- Verify(tiOp1.IsNumberType(), "not number");
- else
- Verify(tiOp1.IsIntegerType(), "not integer");
-
- Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
-
- tiRetVal = tiOp1;
+ tiRetVal = tiOp1;
#ifdef _TARGET_64BIT_
- if (tiOp2.IsNativeIntType())
- {
- tiRetVal = tiOp2;
- }
+ if (tiOp2.IsNativeIntType())
+ {
+ tiRetVal = tiOp2;
+ }
#endif // _TARGET_64BIT_
- }
+ }
- op2 = impPopStack().val;
- op1 = impPopStack().val;
+ op2 = impPopStack().val;
+ op1 = impPopStack().val;
#if !CPU_HAS_FP_SUPPORT
- if (varTypeIsFloating(op1->gtType))
- {
- callNode = true;
- }
+ if (varTypeIsFloating(op1->gtType))
+ {
+ callNode = true;
+ }
#endif
- /* Can't do arithmetic with references */
- assertImp(genActualType(op1->TypeGet()) != TYP_REF &&
- genActualType(op2->TypeGet()) != TYP_REF);
-
- // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
- // if it is in the stack)
- impBashVarAddrsToI(op1, op2);
+ /* Can't do arithmetic with references */
+ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
- type = impGetByRefResultType(oper, uns, &op1, &op2);
+ // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
+ // if it is in the stack)
+ impBashVarAddrsToI(op1, op2);
- assert(!ovfl || !varTypeIsFloating(op1->gtType));
+ type = impGetByRefResultType(oper, uns, &op1, &op2);
- /* Special case: "int+0", "int-0", "int*1", "int/1" */
+ assert(!ovfl || !varTypeIsFloating(op1->gtType));
- if (op2->gtOper == GT_CNS_INT)
- {
- if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
- (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
+ /* Special case: "int+0", "int-0", "int*1", "int/1" */
+ if (op2->gtOper == GT_CNS_INT)
{
- impPushOnStack(op1, tiRetVal);
- break;
- }
- }
+ if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
+ (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
-#if !FEATURE_X87_DOUBLES
- // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
- //
- if (varTypeIsFloating(type) &&
- varTypeIsFloating(op1->gtType) &&
- varTypeIsFloating(op2->gtType))
- {
- if (op1->TypeGet() != type)
- {
- // We insert a cast of op1 to 'type'
- op1 = gtNewCastNode(type, op1, type);
+ {
+ impPushOnStack(op1, tiRetVal);
+ break;
+ }
}
- if (op2->TypeGet() != type)
+
+#if !FEATURE_X87_DOUBLES
+ // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
+ //
+ if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
{
- // We insert a cast of op2 to 'type'
- op2 = gtNewCastNode(type, op2, type);
+ if (op1->TypeGet() != type)
+ {
+ // We insert a cast of op1 to 'type'
+ op1 = gtNewCastNode(type, op1, type);
+ }
+ if (op2->TypeGet() != type)
+ {
+ // We insert a cast of op2 to 'type'
+ op2 = gtNewCastNode(type, op2, type);
+ }
}
- }
#endif // !FEATURE_X87_DOUBLES
#if SMALL_TREE_NODES
- if (callNode)
- {
- /* These operators can later be transformed into 'GT_CALL' */
+ if (callNode)
+ {
+ /* These operators can later be transformed into 'GT_CALL' */
- assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
#ifndef _TARGET_ARM_
- assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
- assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
- assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
- assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
+ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
#endif
- // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
- // that we'll need to transform into a general large node, but rather specifically
- // to a call: by doing it this way, things keep working if there are multiple sizes,
- // and a CALL is no longer the largest.
- // That said, as of now it *is* a large node, so we'll do this with an assert rather
- // than an "if".
- assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
- op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/true));
- }
- else
+ // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
+ // that we'll need to transform into a general large node, but rather specifically
+ // to a call: by doing it this way, things keep working if there are multiple sizes,
+ // and a CALL is no longer the largest.
+ // That said, as of now it *is* a large node, so we'll do this with an assert rather
+ // than an "if".
+ assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
+ op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
+ }
+ else
#endif // SMALL_TREE_NODES
- {
- op1 = gtNewOperNode(oper, type, op1, op2);
- }
-
- /* Special case: integer/long division may throw an exception */
-
- if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
- {
- op1->gtFlags |= GTF_EXCEPT;
- }
-
- if (ovfl)
- {
- assert(oper==GT_ADD || oper==GT_SUB || oper==GT_MUL);
- if (ovflType != TYP_UNKNOWN)
- op1->gtType = ovflType;
- op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
- if (uns)
- op1->gtFlags |= GTF_UNSIGNED;
- }
+ {
+ op1 = gtNewOperNode(oper, type, op1, op2);
+ }
- impPushOnStack(op1, tiRetVal);
- break;
+ /* Special case: integer/long division may throw an exception */
+ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
+ {
+ op1->gtFlags |= GTF_EXCEPT;
+ }
- case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2;
+ if (ovfl)
+ {
+ assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
+ if (ovflType != TYP_UNKNOWN)
+ {
+ op1->gtType = ovflType;
+ }
+ op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
+ if (uns)
+ {
+ op1->gtFlags |= GTF_UNSIGNED;
+ }
+ }
- case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2;
- case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2;
+ impPushOnStack(op1, tiRetVal);
+ break;
-CEE_SH_OP2:
- if (tiVerificationNeeded)
- {
- const typeInfo& tiVal = impStackTop(1).seTypeInfo;
- const typeInfo& tiShift = impStackTop(0).seTypeInfo;
- Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
- tiRetVal = tiVal;
- }
- op2 = impPopStack().val;
- op1 = impPopStack().val; // operand to be shifted
- impBashVarAddrsToI(op1, op2);
+ case CEE_SHL:
+ oper = GT_LSH;
+ goto CEE_SH_OP2;
- type = genActualType(op1->TypeGet());
- op1 = gtNewOperNode(oper, type, op1, op2);
+ case CEE_SHR:
+ oper = GT_RSH;
+ goto CEE_SH_OP2;
+ case CEE_SHR_UN:
+ oper = GT_RSZ;
+ goto CEE_SH_OP2;
- impPushOnStack(op1, tiRetVal);
- break;
+ CEE_SH_OP2:
+ if (tiVerificationNeeded)
+ {
+ const typeInfo& tiVal = impStackTop(1).seTypeInfo;
+ const typeInfo& tiShift = impStackTop(0).seTypeInfo;
+ Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
+ tiRetVal = tiVal;
+ }
+ op2 = impPopStack().val;
+ op1 = impPopStack().val; // operand to be shifted
+ impBashVarAddrsToI(op1, op2);
- case CEE_NOT:
- if (tiVerificationNeeded)
- {
- tiRetVal = impStackTop().seTypeInfo;
- Verify(tiRetVal.IsIntegerType(), "bad int value");
- }
+ type = genActualType(op1->TypeGet());
+ op1 = gtNewOperNode(oper, type, op1, op2);
- op1 = impPopStack().val;
- impBashVarAddrsToI(op1, NULL);
- type = genActualType(op1->TypeGet());
- impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
- break;
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_CKFINITE:
- if (tiVerificationNeeded)
- {
- tiRetVal = impStackTop().seTypeInfo;
- Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
- }
- op1 = impPopStack().val;
- type = op1->TypeGet();
- op1 = gtNewOperNode(GT_CKFINITE, type, op1);
- op1->gtFlags |= GTF_EXCEPT;
+ case CEE_NOT:
+ if (tiVerificationNeeded)
+ {
+ tiRetVal = impStackTop().seTypeInfo;
+ Verify(tiRetVal.IsIntegerType(), "bad int value");
+ }
- impPushOnStack(op1, tiRetVal);
- break;
+ op1 = impPopStack().val;
+ impBashVarAddrsToI(op1, nullptr);
+ type = genActualType(op1->TypeGet());
+ impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
+ break;
- case CEE_LEAVE:
+ case CEE_CKFINITE:
+ if (tiVerificationNeeded)
+ {
+ tiRetVal = impStackTop().seTypeInfo;
+ Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
+ }
+ op1 = impPopStack().val;
+ type = op1->TypeGet();
+ op1 = gtNewOperNode(GT_CKFINITE, type, op1);
+ op1->gtFlags |= GTF_EXCEPT;
- val = getI4LittleEndian(codeAddr); // jump distance
- jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
- goto LEAVE;
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_LEAVE_S:
- val = getI1LittleEndian(codeAddr); // jump distance
- jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8 )) + val);
+ case CEE_LEAVE:
- LEAVE:
+ val = getI4LittleEndian(codeAddr); // jump distance
+ jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
+ goto LEAVE;
- if (compIsForInlining())
- {
- compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
- return;
- }
+ case CEE_LEAVE_S:
+ val = getI1LittleEndian(codeAddr); // jump distance
+ jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
- JITDUMP(" %04X", jmpAddr);
- if (block->bbJumpKind != BBJ_LEAVE)
- {
- impResetLeaveBlock(block, jmpAddr);
- }
+ LEAVE:
- assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
- impImportLeave(block);
- impNoteBranchOffs();
+ if (compIsForInlining())
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
+ return;
+ }
- break;
+ JITDUMP(" %04X", jmpAddr);
+ if (block->bbJumpKind != BBJ_LEAVE)
+ {
+ impResetLeaveBlock(block, jmpAddr);
+ }
+ assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
+ impImportLeave(block);
+ impNoteBranchOffs();
- case CEE_BR:
- case CEE_BR_S:
- jmpDist = (sz==1) ? getI1LittleEndian(codeAddr)
- : getI4LittleEndian(codeAddr);
+ break;
- if (compIsForInlining() && jmpDist == 0)
- break; /* NOP */
+ case CEE_BR:
+ case CEE_BR_S:
+ jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
- impNoteBranchOffs();
- break;
+ if (compIsForInlining() && jmpDist == 0)
+ {
+ break; /* NOP */
+ }
+ impNoteBranchOffs();
+ break;
- case CEE_BRTRUE:
- case CEE_BRTRUE_S:
- case CEE_BRFALSE:
- case CEE_BRFALSE_S:
+ case CEE_BRTRUE:
+ case CEE_BRTRUE_S:
+ case CEE_BRFALSE:
+ case CEE_BRFALSE_S:
- /* Pop the comparand (now there's a neat term) from the stack */
- if (tiVerificationNeeded)
- {
- typeInfo& tiVal = impStackTop().seTypeInfo;
- Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(), "bad value");
- }
+ /* Pop the comparand (now there's a neat term) from the stack */
+ if (tiVerificationNeeded)
+ {
+ typeInfo& tiVal = impStackTop().seTypeInfo;
+ Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
+ "bad value");
+ }
- op1 = impPopStack().val;
- type = op1->TypeGet();
+ op1 = impPopStack().val;
+ type = op1->TypeGet();
- // brfalse and brtrue is only allowed on I4, refs, and byrefs.
- if (!opts.MinOpts() && !opts.compDbgCode &&
- block->bbJumpDest == block->bbNext)
- {
- block->bbJumpKind = BBJ_NONE;
-
- if (op1->gtFlags & GTF_GLOB_EFFECT)
+ // brfalse and brtrue is only allowed on I4, refs, and byrefs.
+ if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
{
- op1 = gtUnusedValNode(op1);
- goto SPILL_APPEND;
+ block->bbJumpKind = BBJ_NONE;
+
+ if (op1->gtFlags & GTF_GLOB_EFFECT)
+ {
+ op1 = gtUnusedValNode(op1);
+ goto SPILL_APPEND;
+ }
+ else
+ {
+ break;
+ }
}
- else break;
- }
- if (op1->OperIsCompare())
- {
- if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
+ if (op1->OperIsCompare())
{
- // Flip the sense of the compare
+ if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
+ {
+ // Flip the sense of the compare
- op1 = gtReverseCond(op1);
+ op1 = gtReverseCond(op1);
+ }
}
- }
- else
- {
- /* We'll compare against an equally-sized integer 0 */
- /* For small types, we always compare against int */
- op2 = gtNewZeroConNode(genActualType(op1->gtType));
+ else
+ {
+ /* We'll compare against an equally-sized integer 0 */
+ /* For small types, we always compare against int */
+ op2 = gtNewZeroConNode(genActualType(op1->gtType));
- /* Create the comparison operator and try to fold it */
+ /* Create the comparison operator and try to fold it */
- oper = (opcode==CEE_BRTRUE || opcode==CEE_BRTRUE_S) ? GT_NE : GT_EQ;
- op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
- }
+ oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
+ op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
+ }
// fall through
- COND_JUMP:
-
- seenConditionalJump = true;
-
- /* Fold comparison if we can */
+ COND_JUMP:
- op1 = gtFoldExpr(op1);
+ seenConditionalJump = true;
- /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
- /* Don't make any blocks unreachable in import only mode */
+ /* Fold comparison if we can */
- if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
- {
- /* gtFoldExpr() should prevent this as we don't want to make any blocks
- unreachable under compDbgCode */
- assert(!opts.compDbgCode);
+ op1 = gtFoldExpr(op1);
- BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS
- : BBJ_NONE);
- assertImp((block->bbJumpKind == BBJ_COND) // normal case
- || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the block for the second time
+ /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
+ /* Don't make any blocks unreachable in import only mode */
- block->bbJumpKind = foldedJumpKind;
-#ifdef DEBUG
- if (verbose)
+ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
{
- if (op1->gtIntCon.gtIconVal)
- printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
- block->bbJumpDest->bbNum);
- else
- printf("\nThe block falls through into the next BB%02u\n",
- block->bbNext ->bbNum);
- }
-#endif
- break;
- }
+ /* gtFoldExpr() should prevent this as we don't want to make any blocks
+ unreachable under compDbgCode */
+ assert(!opts.compDbgCode);
- op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
+ BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
+ assertImp((block->bbJumpKind == BBJ_COND) // normal case
+ || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
+ // block for the second time
- /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
- in impImportBlock(block). For correct line numbers, spill stack. */
+ block->bbJumpKind = foldedJumpKind;
+#ifdef DEBUG
+ if (verbose)
+ {
+ if (op1->gtIntCon.gtIconVal)
+ {
+ printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
+ block->bbJumpDest->bbNum);
+ }
+ else
+ {
+ printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
+ }
+ }
+#endif
+ break;
+ }
- if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
- impSpillStackEnsure(true);
+ op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
- goto SPILL_APPEND;
+ /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
+ in impImportBlock(block). For correct line numbers, spill stack. */
+ if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
+ {
+ impSpillStackEnsure(true);
+ }
- case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs;
- case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs;
- case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs;
- case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs;
- case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs;
+ goto SPILL_APPEND;
-CMP_2_OPs:
- if (tiVerificationNeeded)
- {
- verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
- tiRetVal = typeInfo(TI_INT);
- }
+ case CEE_CEQ:
+ oper = GT_EQ;
+ uns = false;
+ goto CMP_2_OPs;
+ case CEE_CGT_UN:
+ oper = GT_GT;
+ uns = true;
+ goto CMP_2_OPs;
+ case CEE_CGT:
+ oper = GT_GT;
+ uns = false;
+ goto CMP_2_OPs;
+ case CEE_CLT_UN:
+ oper = GT_LT;
+ uns = true;
+ goto CMP_2_OPs;
+ case CEE_CLT:
+ oper = GT_LT;
+ uns = false;
+ goto CMP_2_OPs;
+
+ CMP_2_OPs:
+ if (tiVerificationNeeded)
+ {
+ verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
+ tiRetVal = typeInfo(TI_INT);
+ }
- op2 = impPopStack().val;
- op1 = impPopStack().val;
+ op2 = impPopStack().val;
+ op1 = impPopStack().val;
#ifdef _TARGET_64BIT_
- if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
- {
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
- }
- else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
- {
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
- }
+ if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
+ {
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ }
+ else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
+ {
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ }
#endif // _TARGET_64BIT_
- assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
- varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
- varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
+ assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
+ varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
+ varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
- /* Create the comparison node */
+ /* Create the comparison node */
- op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
+ op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
/* TODO: setting both flags when only one is appropriate */
- if (opcode==CEE_CGT_UN || opcode==CEE_CLT_UN)
- op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
-
- impPushOnStack(op1, tiRetVal);
- break;
-
- case CEE_BEQ_S:
- case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR;
-
- case CEE_BGE_S:
- case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR;
-
- case CEE_BGE_UN_S:
- case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN;
-
- case CEE_BGT_S:
- case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR;
-
- case CEE_BGT_UN_S:
- case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN;
-
- case CEE_BLE_S:
- case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR;
-
- case CEE_BLE_UN_S:
- case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN;
-
- case CEE_BLT_S:
- case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR;
-
- case CEE_BLT_UN_S:
- case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN;
+ if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
+ {
+ op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
+ }
- case CEE_BNE_UN_S:
- case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN;
+ impPushOnStack(op1, tiRetVal);
+ break;
- CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL;
- CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL;
- CMP_2_OPs_AND_BR_ALL:
+ case CEE_BEQ_S:
+ case CEE_BEQ:
+ oper = GT_EQ;
+ goto CMP_2_OPs_AND_BR;
+
+ case CEE_BGE_S:
+ case CEE_BGE:
+ oper = GT_GE;
+ goto CMP_2_OPs_AND_BR;
+
+ case CEE_BGE_UN_S:
+ case CEE_BGE_UN:
+ oper = GT_GE;
+ goto CMP_2_OPs_AND_BR_UN;
+
+ case CEE_BGT_S:
+ case CEE_BGT:
+ oper = GT_GT;
+ goto CMP_2_OPs_AND_BR;
+
+ case CEE_BGT_UN_S:
+ case CEE_BGT_UN:
+ oper = GT_GT;
+ goto CMP_2_OPs_AND_BR_UN;
+
+ case CEE_BLE_S:
+ case CEE_BLE:
+ oper = GT_LE;
+ goto CMP_2_OPs_AND_BR;
+
+ case CEE_BLE_UN_S:
+ case CEE_BLE_UN:
+ oper = GT_LE;
+ goto CMP_2_OPs_AND_BR_UN;
+
+ case CEE_BLT_S:
+ case CEE_BLT:
+ oper = GT_LT;
+ goto CMP_2_OPs_AND_BR;
+
+ case CEE_BLT_UN_S:
+ case CEE_BLT_UN:
+ oper = GT_LT;
+ goto CMP_2_OPs_AND_BR_UN;
+ case CEE_BNE_UN_S:
+ case CEE_BNE_UN:
+ oper = GT_NE;
+ goto CMP_2_OPs_AND_BR_UN;
+
+ CMP_2_OPs_AND_BR_UN:
+ uns = true;
+ unordered = true;
+ goto CMP_2_OPs_AND_BR_ALL;
+ CMP_2_OPs_AND_BR:
+ uns = false;
+ unordered = false;
+ goto CMP_2_OPs_AND_BR_ALL;
+ CMP_2_OPs_AND_BR_ALL:
- if (tiVerificationNeeded)
- verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
+ if (tiVerificationNeeded)
+ {
+ verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
+ }
- /* Pull two values */
- op2 = impPopStack().val;
- op1 = impPopStack().val;
+ /* Pull two values */
+ op2 = impPopStack().val;
+ op1 = impPopStack().val;
#ifdef _TARGET_64BIT_
- if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
- {
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
- }
- else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
- {
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
- }
-#endif // _TARGET_64BIT_
-
- assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
- varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
- varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
-
- if (!opts.MinOpts() && !opts.compDbgCode &&
- block->bbJumpDest == block->bbNext)
- {
- block->bbJumpKind = BBJ_NONE;
-
- if (op1->gtFlags & GTF_GLOB_EFFECT)
+ if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op1 side effect") );
- impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
}
- if (op2->gtFlags & GTF_GLOB_EFFECT)
+ else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Branch to next Optimization, op2 side effect") );
- impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
}
+#endif // _TARGET_64BIT_
-#ifdef DEBUG
- if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
- impNoteLastILoffs();
-#endif
- break;
- }
-#if !FEATURE_X87_DOUBLES
- // We can generate an compare of different sized floating point op1 and op2
- // We insert a cast
- //
- if (varTypeIsFloating(op1->TypeGet()))
- {
- if (op1->TypeGet() != op2->TypeGet())
+ assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
+ varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
+ varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
+
+ if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
{
- assert(varTypeIsFloating(op2->TypeGet()));
+ block->bbJumpKind = BBJ_NONE;
- // say op1=double, op2=float. To avoid loss of precision
- // while comparing, op2 is converted to double and double
- // comparison is done.
- if (op1->TypeGet() == TYP_DOUBLE)
+ if (op1->gtFlags & GTF_GLOB_EFFECT)
{
- // We insert a cast of op2 to TYP_DOUBLE
- op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
+ "Branch to next Optimization, op1 side effect"));
+ impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
- else if (op2->TypeGet() == TYP_DOUBLE)
+ if (op2->gtFlags & GTF_GLOB_EFFECT)
{
- // We insert a cast of op1 to TYP_DOUBLE
- op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
+ "Branch to next Optimization, op2 side effect"));
+ impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
- }
- }
-#endif // !FEATURE_X87_DOUBLES
- /* Create and append the operator */
+#ifdef DEBUG
+ if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
+ {
+ impNoteLastILoffs();
+ }
+#endif
+ break;
+ }
+#if !FEATURE_X87_DOUBLES
+ // We can generate an compare of different sized floating point op1 and op2
+ // We insert a cast
+ //
+ if (varTypeIsFloating(op1->TypeGet()))
+ {
+ if (op1->TypeGet() != op2->TypeGet())
+ {
+ assert(varTypeIsFloating(op2->TypeGet()));
- op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
+ // say op1=double, op2=float. To avoid loss of precision
+ // while comparing, op2 is converted to double and double
+ // comparison is done.
+ if (op1->TypeGet() == TYP_DOUBLE)
+ {
+ // We insert a cast of op2 to TYP_DOUBLE
+ op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ }
+ else if (op2->TypeGet() == TYP_DOUBLE)
+ {
+ // We insert a cast of op1 to TYP_DOUBLE
+ op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
+ }
+ }
+ }
+#endif // !FEATURE_X87_DOUBLES
- if (uns)
- op1->gtFlags |= GTF_UNSIGNED;
+ /* Create and append the operator */
- if (unordered)
- op1->gtFlags |= GTF_RELOP_NAN_UN;
+ op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
- goto COND_JUMP;
+ if (uns)
+ {
+ op1->gtFlags |= GTF_UNSIGNED;
+ }
+ if (unordered)
+ {
+ op1->gtFlags |= GTF_RELOP_NAN_UN;
+ }
- case CEE_SWITCH:
- assert(!compIsForInlining());
+ goto COND_JUMP;
- if (tiVerificationNeeded)
- Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
- /* Pop the switch value off the stack */
- op1 = impPopStack().val;
- assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
+ case CEE_SWITCH:
+ assert(!compIsForInlining());
-#ifdef _TARGET_64BIT_
- // Widen 'op1' on 64-bit targets
- if (op1->TypeGet() != TYP_I_IMPL)
- {
- if (op1->OperGet() == GT_CNS_INT)
+ if (tiVerificationNeeded)
{
- op1->gtType = TYP_I_IMPL;
+ Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
}
- else
+ /* Pop the switch value off the stack */
+ op1 = impPopStack().val;
+ assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
+
+#ifdef _TARGET_64BIT_
+ // Widen 'op1' on 64-bit targets
+ if (op1->TypeGet() != TYP_I_IMPL)
{
- op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
+ if (op1->OperGet() == GT_CNS_INT)
+ {
+ op1->gtType = TYP_I_IMPL;
+ }
+ else
+ {
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
+ }
}
- }
#endif // _TARGET_64BIT_
- assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
-
- /* We can create a switch node */
-
- op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
-
- val = (int)getU4LittleEndian(codeAddr);
- codeAddr += 4 + val*4; // skip over the switch-table
-
- goto SPILL_APPEND;
-
- /************************** Casting OPCODES ***************************/
-
- case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE ; goto CONV_OVF;
- case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT ; goto CONV_OVF;
- case CEE_CONV_OVF_I : lclTyp = TYP_I_IMPL; goto CONV_OVF;
- case CEE_CONV_OVF_I4: lclTyp = TYP_INT ; goto CONV_OVF;
- case CEE_CONV_OVF_I8: lclTyp = TYP_LONG ; goto CONV_OVF;
+ assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
- case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE ; goto CONV_OVF;
- case CEE_CONV_OVF_U2: lclTyp = TYP_CHAR ; goto CONV_OVF;
- case CEE_CONV_OVF_U : lclTyp = TYP_U_IMPL; goto CONV_OVF;
- case CEE_CONV_OVF_U4: lclTyp = TYP_UINT ; goto CONV_OVF;
- case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG ; goto CONV_OVF;
+ /* We can create a switch node */
- case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_I_UN : lclTyp = TYP_I_IMPL; goto CONV_OVF_UN;
- case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG ; goto CONV_OVF_UN;
+ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
- case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_U2_UN: lclTyp = TYP_CHAR ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_U_UN : lclTyp = TYP_U_IMPL; goto CONV_OVF_UN;
- case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT ; goto CONV_OVF_UN;
- case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG ; goto CONV_OVF_UN;
+ val = (int)getU4LittleEndian(codeAddr);
+ codeAddr += 4 + val * 4; // skip over the switch-table
-CONV_OVF_UN:
- uns = true; goto CONV_OVF_COMMON;
-CONV_OVF:
- uns = false; goto CONV_OVF_COMMON;
-
-CONV_OVF_COMMON:
- ovfl = true;
- goto _CONV;
+ goto SPILL_APPEND;
- case CEE_CONV_I1: lclTyp = TYP_BYTE ; goto CONV;
- case CEE_CONV_I2: lclTyp = TYP_SHORT ; goto CONV;
- case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV;
- case CEE_CONV_I4: lclTyp = TYP_INT ; goto CONV;
- case CEE_CONV_I8: lclTyp = TYP_LONG ; goto CONV;
+ /************************** Casting OPCODES ***************************/
- case CEE_CONV_U1: lclTyp = TYP_UBYTE ; goto CONV;
- case CEE_CONV_U2: lclTyp = TYP_CHAR ; goto CONV;
+ case CEE_CONV_OVF_I1:
+ lclTyp = TYP_BYTE;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_I2:
+ lclTyp = TYP_SHORT;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_I:
+ lclTyp = TYP_I_IMPL;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_I4:
+ lclTyp = TYP_INT;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_I8:
+ lclTyp = TYP_LONG;
+ goto CONV_OVF;
+
+ case CEE_CONV_OVF_U1:
+ lclTyp = TYP_UBYTE;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_U2:
+ lclTyp = TYP_CHAR;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_U:
+ lclTyp = TYP_U_IMPL;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_U4:
+ lclTyp = TYP_UINT;
+ goto CONV_OVF;
+ case CEE_CONV_OVF_U8:
+ lclTyp = TYP_ULONG;
+ goto CONV_OVF;
+
+ case CEE_CONV_OVF_I1_UN:
+ lclTyp = TYP_BYTE;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_I2_UN:
+ lclTyp = TYP_SHORT;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_I_UN:
+ lclTyp = TYP_I_IMPL;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_I4_UN:
+ lclTyp = TYP_INT;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_I8_UN:
+ lclTyp = TYP_LONG;
+ goto CONV_OVF_UN;
+
+ case CEE_CONV_OVF_U1_UN:
+ lclTyp = TYP_UBYTE;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_U2_UN:
+ lclTyp = TYP_CHAR;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_U_UN:
+ lclTyp = TYP_U_IMPL;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_U4_UN:
+ lclTyp = TYP_UINT;
+ goto CONV_OVF_UN;
+ case CEE_CONV_OVF_U8_UN:
+ lclTyp = TYP_ULONG;
+ goto CONV_OVF_UN;
+
+ CONV_OVF_UN:
+ uns = true;
+ goto CONV_OVF_COMMON;
+ CONV_OVF:
+ uns = false;
+ goto CONV_OVF_COMMON;
+
+ CONV_OVF_COMMON:
+ ovfl = true;
+ goto _CONV;
+
+ case CEE_CONV_I1:
+ lclTyp = TYP_BYTE;
+ goto CONV;
+ case CEE_CONV_I2:
+ lclTyp = TYP_SHORT;
+ goto CONV;
+ case CEE_CONV_I:
+ lclTyp = TYP_I_IMPL;
+ goto CONV;
+ case CEE_CONV_I4:
+ lclTyp = TYP_INT;
+ goto CONV;
+ case CEE_CONV_I8:
+ lclTyp = TYP_LONG;
+ goto CONV;
+
+ case CEE_CONV_U1:
+ lclTyp = TYP_UBYTE;
+ goto CONV;
+ case CEE_CONV_U2:
+ lclTyp = TYP_CHAR;
+ goto CONV;
#if (REGSIZE_BYTES == 8)
- case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN;
+ case CEE_CONV_U:
+ lclTyp = TYP_U_IMPL;
+ goto CONV_UN;
#else
- case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV;
+ case CEE_CONV_U:
+ lclTyp = TYP_U_IMPL;
+ goto CONV;
#endif
- case CEE_CONV_U4: lclTyp = TYP_UINT ; goto CONV;
- case CEE_CONV_U8: lclTyp = TYP_ULONG ; goto CONV_UN;
-
- case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV;
- case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV;
-
- case CEE_CONV_R_UN : lclTyp = TYP_DOUBLE; goto CONV_UN;
-
-CONV_UN:
- uns = true;
- ovfl = false;
- goto _CONV;
-
-CONV:
- uns = false;
- ovfl = false;
- goto _CONV;
-
-_CONV:
- // just check that we have a number on the stack
- if (tiVerificationNeeded)
- {
- const typeInfo& tiVal = impStackTop().seTypeInfo;
- Verify(tiVal.IsNumberType(), "bad arg");
+ case CEE_CONV_U4:
+ lclTyp = TYP_UINT;
+ goto CONV;
+ case CEE_CONV_U8:
+ lclTyp = TYP_ULONG;
+ goto CONV_UN;
+
+ case CEE_CONV_R4:
+ lclTyp = TYP_FLOAT;
+ goto CONV;
+ case CEE_CONV_R8:
+ lclTyp = TYP_DOUBLE;
+ goto CONV;
+
+ case CEE_CONV_R_UN:
+ lclTyp = TYP_DOUBLE;
+ goto CONV_UN;
+
+ CONV_UN:
+ uns = true;
+ ovfl = false;
+ goto _CONV;
+
+ CONV:
+ uns = false;
+ ovfl = false;
+ goto _CONV;
+
+ _CONV:
+ // just check that we have a number on the stack
+ if (tiVerificationNeeded)
+ {
+ const typeInfo& tiVal = impStackTop().seTypeInfo;
+ Verify(tiVal.IsNumberType(), "bad arg");
#ifdef _TARGET_64BIT_
- bool isNative = false;
+ bool isNative = false;
- switch (opcode)
- {
- case CEE_CONV_OVF_I:
- case CEE_CONV_OVF_I_UN:
- case CEE_CONV_I:
- case CEE_CONV_OVF_U:
- case CEE_CONV_OVF_U_UN:
- case CEE_CONV_U:
- isNative = true;
- default:
- // leave 'isNative' = false;
- break;
+ switch (opcode)
+ {
+ case CEE_CONV_OVF_I:
+ case CEE_CONV_OVF_I_UN:
+ case CEE_CONV_I:
+ case CEE_CONV_OVF_U:
+ case CEE_CONV_OVF_U_UN:
+ case CEE_CONV_U:
+ isNative = true;
+ default:
+ // leave 'isNative' = false;
+ break;
+ }
+ if (isNative)
+ {
+ tiRetVal = typeInfo::nativeInt();
+ }
+ else
+#endif // _TARGET_64BIT_
+ {
+ tiRetVal = typeInfo(lclTyp).NormaliseForStack();
+ }
}
- if (isNative)
+
+ // only converts from FLOAT or DOUBLE to an integer type
+ // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
+
+ if (varTypeIsFloating(lclTyp))
{
- tiRetVal = typeInfo::nativeInt();
+ callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
+#ifdef _TARGET_64BIT_
+ // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
+ // TYP_BYREF could be used as TYP_I_IMPL which is long.
+ // TODO-CQ: remove this when we lower casts long/ulong --> float/double
+ // and generate SSE2 code instead of going through helper calls.
+ || (impStackTop().val->TypeGet() == TYP_BYREF)
+#endif
+ ;
}
else
-#endif // _TARGET_64BIT_
{
- tiRetVal = typeInfo(lclTyp).NormaliseForStack();
+ callNode = varTypeIsFloating(impStackTop().val->TypeGet());
}
- }
-
- // only converts from FLOAT or DOUBLE to an integer type
- // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
- if (varTypeIsFloating(lclTyp))
- {
- callNode = varTypeIsLong(impStackTop().val)
- || uns // uint->dbl gets turned into uint->long->dbl
-#ifdef _TARGET_64BIT_
- // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
- // TYP_BYREF could be used as TYP_I_IMPL which is long.
- // TODO-CQ: remove this when we lower casts long/ulong --> float/double
- // and generate SSE2 code instead of going through helper calls.
- || (impStackTop().val->TypeGet() == TYP_BYREF)
-#endif
- ;
- }
- else
- {
- callNode = varTypeIsFloating(impStackTop().val->TypeGet());
- }
-
- // At this point uns, ovf, callNode all set
+ // At this point uns, ovf, callNode all set
- op1 = impPopStack().val;
- impBashVarAddrsToI(op1);
-
- if (varTypeIsSmall(lclTyp) && !ovfl &&
- op1->gtType == TYP_INT && op1->gtOper == GT_AND)
- {
- op2 = op1->gtOp.gtOp2;
+ op1 = impPopStack().val;
+ impBashVarAddrsToI(op1);
- if (op2->gtOper == GT_CNS_INT)
+ if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
{
- ssize_t ival = op2->gtIntCon.gtIconVal;
- ssize_t mask, umask;
+ op2 = op1->gtOp.gtOp2;
- switch (lclTyp)
+ if (op2->gtOper == GT_CNS_INT)
{
- case TYP_BYTE :
- case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break;
- case TYP_CHAR :
- case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break;
-
- default:
- assert(!"unexpected type"); return;
- }
+ ssize_t ival = op2->gtIntCon.gtIconVal;
+ ssize_t mask, umask;
- if (((ival & umask) == ival) ||
- ((ival & mask) == ival && uns))
- {
- /* Toss the cast, it's a waste of time */
+ switch (lclTyp)
+ {
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ mask = 0x00FF;
+ umask = 0x007F;
+ break;
+ case TYP_CHAR:
+ case TYP_SHORT:
+ mask = 0xFFFF;
+ umask = 0x7FFF;
+ break;
+
+ default:
+ assert(!"unexpected type");
+ return;
+ }
- impPushOnStack(op1, tiRetVal);
- break;
- }
- else if (ival == mask)
- {
- /* Toss the masking, it's a waste of time, since
- we sign-extend from the small value anyways */
+ if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
+ {
+ /* Toss the cast, it's a waste of time */
- op1 = op1->gtOp.gtOp1;
+ impPushOnStack(op1, tiRetVal);
+ break;
+ }
+ else if (ival == mask)
+ {
+ /* Toss the masking, it's a waste of time, since
+ we sign-extend from the small value anyways */
+ op1 = op1->gtOp.gtOp1;
+ }
}
}
- }
- /* The 'op2' sub-operand of a cast is the 'real' type number,
- since the result of a cast to one of the 'small' integer
- types is an integer.
- */
+ /* The 'op2' sub-operand of a cast is the 'real' type number,
+ since the result of a cast to one of the 'small' integer
+ types is an integer.
+ */
- type = genActualType(lclTyp);
+ type = genActualType(lclTyp);
#if SMALL_TREE_NODES
- if (callNode)
- {
- op1 = gtNewCastNodeL(type, op1, lclTyp);
- }
- else
+ if (callNode)
+ {
+ op1 = gtNewCastNodeL(type, op1, lclTyp);
+ }
+ else
#endif // SMALL_TREE_NODES
- {
- op1 = gtNewCastNode (type, op1, lclTyp);
- }
-
- if (ovfl)
- op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
- if (uns)
- op1->gtFlags |= GTF_UNSIGNED;
- impPushOnStack(op1, tiRetVal);
- break;
+ {
+ op1 = gtNewCastNode(type, op1, lclTyp);
+ }
- case CEE_NEG:
- if (tiVerificationNeeded)
- {
- tiRetVal = impStackTop().seTypeInfo;
- Verify(tiRetVal.IsNumberType(), "Bad arg");
- }
+ if (ovfl)
+ {
+ op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
+ }
+ if (uns)
+ {
+ op1->gtFlags |= GTF_UNSIGNED;
+ }
+ impPushOnStack(op1, tiRetVal);
+ break;
- op1 = impPopStack().val;
- impBashVarAddrsToI(op1, NULL);
- impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
- break;
+ case CEE_NEG:
+ if (tiVerificationNeeded)
+ {
+ tiRetVal = impStackTop().seTypeInfo;
+ Verify(tiRetVal.IsNumberType(), "Bad arg");
+ }
- case CEE_POP:
- if (tiVerificationNeeded)
- impStackTop(0);
+ op1 = impPopStack().val;
+ impBashVarAddrsToI(op1, nullptr);
+ impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
+ break;
- /* Pull the top value from the stack */
+ case CEE_POP:
+ if (tiVerificationNeeded)
+ {
+ impStackTop(0);
+ }
- op1 = impPopStack(clsHnd).val;
+ /* Pull the top value from the stack */
- /* Get hold of the type of the value being duplicated */
+ op1 = impPopStack(clsHnd).val;
- lclTyp = genActualType(op1->gtType);
+ /* Get hold of the type of the value being duplicated */
- /* Does the value have any side effects? */
+ lclTyp = genActualType(op1->gtType);
- if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
- {
- // Since we are throwing away the value, just normalize
- // it to its address. This is more efficient.
+ /* Does the value have any side effects? */
+ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
+ {
+ // Since we are throwing away the value, just normalize
+ // it to its address. This is more efficient.
- if (varTypeIsStruct(op1))
- {
+ if (varTypeIsStruct(op1))
+ {
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- // Non-calls, such as obj or ret_expr, have to go through this.
- // Calls with large struct return value have to go through this.
- // Helper calls with small struct return value also have to go
- // through this since they do not follow Unix calling convention.
- if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd)
- || op1->AsCall()->gtCallType == CT_HELPER)
+ // Non-calls, such as obj or ret_expr, have to go through this.
+ // Calls with large struct return value have to go through this.
+ // Helper calls with small struct return value also have to go
+ // through this since they do not follow Unix calling convention.
+ if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
+ op1->AsCall()->gtCallType == CT_HELPER)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ {
+ op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
+ }
+ }
+
+ // If op1 is non-overflow cast, throw it away since it is useless.
+ // Another reason for throwing away the useless cast is in the context of
+ // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
+ // The cast gets added as part of importing GT_CALL, which gets in the way
+ // of fgMorphCall() on the forms of tail call nodes that we assert.
+ if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
{
- op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
+ op1 = op1->gtOp.gtOp1;
}
- }
- // If op1 is non-overflow cast, throw it away since it is useless.
- // Another reason for throwing away the useless cast is in the context of
- // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
- // The cast gets added as part of importing GT_CALL, which gets in the way
- // of fgMorphCall() on the forms of tail call nodes that we assert.
- if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
- {
- op1 = op1->gtOp.gtOp1;
- }
+ // If 'op1' is an expression, create an assignment node.
+ // Helps analyses (like CSE) to work fine.
- // If 'op1' is an expression, create an assignment node.
- // Helps analyses (like CSE) to work fine.
+ if (op1->gtOper != GT_CALL)
+ {
+ op1 = gtUnusedValNode(op1);
+ }
- if (op1->gtOper != GT_CALL)
- {
- op1 = gtUnusedValNode(op1);
+ /* Append the value to the tree list */
+ goto SPILL_APPEND;
}
- /* Append the value to the tree list */
- goto SPILL_APPEND;
- }
-
- /* No side effects - just throw the <BEEP> thing away */
- break;
+ /* No side effects - just throw the <BEEP> thing away */
+ break;
+ case CEE_DUP:
- case CEE_DUP:
-
- if (tiVerificationNeeded)
- {
+ if (tiVerificationNeeded)
+ {
// Dup could start the begining of delegate creation sequence, remember that
- delegateCreateStart = codeAddr - 1;
- impStackTop(0);
- }
+ delegateCreateStart = codeAddr - 1;
+ impStackTop(0);
+ }
- // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
- // - If this is non-debug code - so that CSE will recognize the two as equal.
- // This helps eliminate a redundant bounds check in cases such as:
- // ariba[i+3] += some_value;
- // - If the top of the stack is a non-leaf that may be expensive to clone.
+ // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
+ // - If this is non-debug code - so that CSE will recognize the two as equal.
+ // This helps eliminate a redundant bounds check in cases such as:
+ // ariba[i+3] += some_value;
+ // - If the top of the stack is a non-leaf that may be expensive to clone.
- if (codeAddr < codeEndp)
- {
- OPCODE nextOpcode = (OPCODE) getU1LittleEndian(codeAddr);
- if (impIsAnySTLOC(nextOpcode))
+ if (codeAddr < codeEndp)
{
- if (!opts.compDbgCode)
+ OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
+ if (impIsAnySTLOC(nextOpcode))
{
- insertLdloc = true;
- break;
- }
- GenTree* stackTop = impStackTop().val;
- if (!stackTop->IsIntegralConst(0) &&
- !stackTop->IsFPZero() &&
- !stackTop->IsLocal())
- {
- insertLdloc = true;
- break;
+ if (!opts.compDbgCode)
+ {
+ insertLdloc = true;
+ break;
+ }
+ GenTree* stackTop = impStackTop().val;
+ if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
+ {
+ insertLdloc = true;
+ break;
+ }
}
}
- }
- /* Pull the top value from the stack */
- op1 = impPopStack(tiRetVal);
+ /* Pull the top value from the stack */
+ op1 = impPopStack(tiRetVal);
- /* Clone the value */
- op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("DUP instruction") );
+ /* Clone the value */
+ op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("DUP instruction"));
- /* Either the tree started with no global effects, or impCloneExpr
- evaluated the tree to a temp and returned two copies of that
- temp. Either way, neither op1 nor op2 should have side effects.
- */
- assert (!(op1->gtFlags & GTF_GLOB_EFFECT) &&
- !(op2->gtFlags & GTF_GLOB_EFFECT));
+ /* Either the tree started with no global effects, or impCloneExpr
+ evaluated the tree to a temp and returned two copies of that
+ temp. Either way, neither op1 nor op2 should have side effects.
+ */
+ assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
- /* Push the tree/temp back on the stack */
- impPushOnStack(op1, tiRetVal);
+ /* Push the tree/temp back on the stack */
+ impPushOnStack(op1, tiRetVal);
- /* Push the copy on the stack */
- impPushOnStack(op2, tiRetVal);
+ /* Push the copy on the stack */
+ impPushOnStack(op2, tiRetVal);
- break;
+ break;
- case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND;
- case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND;
- case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND;
- case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND;
- case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND;
- case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND;
- case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND;
- case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND;
-STIND:
-
- if (tiVerificationNeeded)
- {
- typeInfo instrType(lclTyp);
+ case CEE_STIND_I1:
+ lclTyp = TYP_BYTE;
+ goto STIND;
+ case CEE_STIND_I2:
+ lclTyp = TYP_SHORT;
+ goto STIND;
+ case CEE_STIND_I4:
+ lclTyp = TYP_INT;
+ goto STIND;
+ case CEE_STIND_I8:
+ lclTyp = TYP_LONG;
+ goto STIND;
+ case CEE_STIND_I:
+ lclTyp = TYP_I_IMPL;
+ goto STIND;
+ case CEE_STIND_REF:
+ lclTyp = TYP_REF;
+ goto STIND;
+ case CEE_STIND_R4:
+ lclTyp = TYP_FLOAT;
+ goto STIND;
+ case CEE_STIND_R8:
+ lclTyp = TYP_DOUBLE;
+ goto STIND;
+ STIND:
+
+ if (tiVerificationNeeded)
+ {
+ typeInfo instrType(lclTyp);
#ifdef _TARGET_64BIT_
- if (opcode == CEE_STIND_I)
+ if (opcode == CEE_STIND_I)
+ {
+ instrType = typeInfo::nativeInt();
+ }
+#endif // _TARGET_64BIT_
+ verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
+ }
+ else
{
- instrType = typeInfo::nativeInt();
+ compUnsafeCastUsed = true; // Have to go conservative
}
-#endif // _TARGET_64BIT_
- verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
- }
- else
- {
- compUnsafeCastUsed = true; // Have to go conservative
- }
-STIND_POST_VERIFY:
+ STIND_POST_VERIFY:
- op2 = impPopStack().val; // value to store
- op1 = impPopStack().val; // address to store to
-
- // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
- assertImp(genActualType(op1->gtType) == TYP_I_IMPL ||
- op1->gtType == TYP_BYREF);
+ op2 = impPopStack().val; // value to store
+ op1 = impPopStack().val; // address to store to
- impBashVarAddrsToI(op1, op2);
+ // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
+ assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
- op2 = impImplicitR4orR8Cast(op2, lclTyp);
+ impBashVarAddrsToI(op1, op2);
+
+ op2 = impImplicitR4orR8Cast(op2, lclTyp);
#ifdef _TARGET_64BIT_
- // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
- if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
- {
- op2->gtType = TYP_I_IMPL;
- }
- else
- {
- // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
- //
- if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
+ // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
+ if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
- assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ op2->gtType = TYP_I_IMPL;
}
- // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
- //
- if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
+ else
{
- assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
+ //
+ if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
+ {
+ assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
+ op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ }
+ // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
+ //
+ if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
+ {
+ assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ }
}
- }
#endif // _TARGET_64BIT_
- if (opcode == CEE_STIND_REF)
- {
- // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
- assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
- lclTyp = genActualType(op2->TypeGet());
- }
+ if (opcode == CEE_STIND_REF)
+ {
+ // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
+ assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
+ lclTyp = genActualType(op2->TypeGet());
+ }
- // Check target type.
+// Check target type.
#ifdef DEBUG
- if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
- {
- if (op2->gtType == TYP_BYREF)
- assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
- else if (lclTyp == TYP_BYREF)
- assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
- }
- else
- {
- assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
- ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
- (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
- }
+ if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
+ {
+ if (op2->gtType == TYP_BYREF)
+ {
+ assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
+ }
+ else if (lclTyp == TYP_BYREF)
+ {
+ assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
+ }
+ }
+ else
+ {
+ assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
+ ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
+ (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
+ }
#endif
- op1 = gtNewOperNode(GT_IND, lclTyp, op1);
+ op1 = gtNewOperNode(GT_IND, lclTyp, op1);
- // stind could point anywhere, example a boxed class static int
- op1->gtFlags |= GTF_IND_TGTANYWHERE;
+ // stind could point anywhere, example a boxed class static int
+ op1->gtFlags |= GTF_IND_TGTANYWHERE;
- if (prefixFlags & PREFIX_VOLATILE)
- {
- assert(op1->OperGet() == GT_IND);
- op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
- op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
- op1->gtFlags |= GTF_IND_VOLATILE;
- }
-
- if (prefixFlags & PREFIX_UNALIGNED)
- {
- assert(op1->OperGet() == GT_IND);
- op1->gtFlags |= GTF_IND_UNALIGNED;
- }
-
- op1 = gtNewAssignNode(op1, op2);
- op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
+ if (prefixFlags & PREFIX_VOLATILE)
+ {
+ assert(op1->OperGet() == GT_IND);
+ op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
+ op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
+ op1->gtFlags |= GTF_IND_VOLATILE;
+ }
- // Spill side-effects AND global-data-accesses
- if (verCurrentState.esStackDepth > 0)
- impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND") );
+ if (prefixFlags & PREFIX_UNALIGNED)
+ {
+ assert(op1->OperGet() == GT_IND);
+ op1->gtFlags |= GTF_IND_UNALIGNED;
+ }
- goto APPEND;
+ op1 = gtNewAssignNode(op1, op2);
+ op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
+ // Spill side-effects AND global-data-accesses
+ if (verCurrentState.esStackDepth > 0)
+ {
+ impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
+ }
- case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND;
- case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND;
- case CEE_LDIND_U4:
- case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND;
- case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND;
- case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND;
- case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND;
- case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND;
- case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND;
- case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND;
- case CEE_LDIND_U2: lclTyp = TYP_CHAR; goto LDIND;
-LDIND:
+ goto APPEND;
+
+ case CEE_LDIND_I1:
+ lclTyp = TYP_BYTE;
+ goto LDIND;
+ case CEE_LDIND_I2:
+ lclTyp = TYP_SHORT;
+ goto LDIND;
+ case CEE_LDIND_U4:
+ case CEE_LDIND_I4:
+ lclTyp = TYP_INT;
+ goto LDIND;
+ case CEE_LDIND_I8:
+ lclTyp = TYP_LONG;
+ goto LDIND;
+ case CEE_LDIND_REF:
+ lclTyp = TYP_REF;
+ goto LDIND;
+ case CEE_LDIND_I:
+ lclTyp = TYP_I_IMPL;
+ goto LDIND;
+ case CEE_LDIND_R4:
+ lclTyp = TYP_FLOAT;
+ goto LDIND;
+ case CEE_LDIND_R8:
+ lclTyp = TYP_DOUBLE;
+ goto LDIND;
+ case CEE_LDIND_U1:
+ lclTyp = TYP_UBYTE;
+ goto LDIND;
+ case CEE_LDIND_U2:
+ lclTyp = TYP_CHAR;
+ goto LDIND;
+ LDIND:
- if (tiVerificationNeeded)
- {
- typeInfo lclTiType(lclTyp);
+ if (tiVerificationNeeded)
+ {
+ typeInfo lclTiType(lclTyp);
#ifdef _TARGET_64BIT_
- if (opcode == CEE_LDIND_I)
+ if (opcode == CEE_LDIND_I)
+ {
+ lclTiType = typeInfo::nativeInt();
+ }
+#endif // _TARGET_64BIT_
+ tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
+ tiRetVal.NormaliseForStack();
+ }
+ else
{
- lclTiType = typeInfo::nativeInt();
+ compUnsafeCastUsed = true; // Have to go conservative
}
-#endif // _TARGET_64BIT_
- tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
- tiRetVal.NormaliseForStack();
- }
- else
- {
- compUnsafeCastUsed = true; // Have to go conservative
- }
-LDIND_POST_VERIFY:
-
- op1 = impPopStack().val; // address to load from
- impBashVarAddrsToI(op1);
+ LDIND_POST_VERIFY:
+
+ op1 = impPopStack().val; // address to load from
+ impBashVarAddrsToI(op1);
#ifdef _TARGET_64BIT_
- // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
- //
- if (genActualType(op1->gtType) == TYP_INT)
- {
- assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
- }
+ // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
+ //
+ if (genActualType(op1->gtType) == TYP_INT)
+ {
+ assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
+ }
#endif
- assertImp(genActualType(op1->gtType) == TYP_I_IMPL ||
- op1->gtType == TYP_BYREF);
+ assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
- op1 = gtNewOperNode(GT_IND, lclTyp, op1);
+ op1 = gtNewOperNode(GT_IND, lclTyp, op1);
- // ldind could point anywhere, example a boxed class static int
- op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
+ // ldind could point anywhere, example a boxed class static int
+ op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
- if (prefixFlags & PREFIX_VOLATILE)
- {
- assert(op1->OperGet() == GT_IND);
- op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
- op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
- op1->gtFlags |= GTF_IND_VOLATILE;
- }
+ if (prefixFlags & PREFIX_VOLATILE)
+ {
+ assert(op1->OperGet() == GT_IND);
+ op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
+ op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
+ op1->gtFlags |= GTF_IND_VOLATILE;
+ }
- if (prefixFlags & PREFIX_UNALIGNED)
- {
- assert(op1->OperGet() == GT_IND);
- op1->gtFlags |= GTF_IND_UNALIGNED;
- }
+ if (prefixFlags & PREFIX_UNALIGNED)
+ {
+ assert(op1->OperGet() == GT_IND);
+ op1->gtFlags |= GTF_IND_UNALIGNED;
+ }
- impPushOnStack(op1, tiRetVal);
+ impPushOnStack(op1, tiRetVal);
- break;
+ break;
+ case CEE_UNALIGNED:
- case CEE_UNALIGNED:
+ assert(sz == 1);
+ val = getU1LittleEndian(codeAddr);
+ ++codeAddr;
+ JITDUMP(" %u", val);
+ if ((val != 1) && (val != 2) && (val != 4))
+ {
+ BADCODE("Alignment unaligned. must be 1, 2, or 4");
+ }
- assert(sz == 1);
- val = getU1LittleEndian(codeAddr);
- ++codeAddr;
- JITDUMP(" %u", val);
- if ((val != 1) && (val != 2) && (val != 4)) {
- BADCODE("Alignment unaligned. must be 1, 2, or 4");
- }
+ Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
+ prefixFlags |= PREFIX_UNALIGNED;
- Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
- prefixFlags |= PREFIX_UNALIGNED;
+ impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
- impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
+ PREFIX:
+ opcode = (OPCODE)getU1LittleEndian(codeAddr);
+ codeAddr += sizeof(__int8);
+ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
+ goto DECODE_OPCODE;
-PREFIX:
- opcode = (OPCODE) getU1LittleEndian(codeAddr);
- codeAddr += sizeof(__int8);
- opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
- goto DECODE_OPCODE;
+ case CEE_VOLATILE:
- case CEE_VOLATILE:
+ Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
+ prefixFlags |= PREFIX_VOLATILE;
- Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
- prefixFlags |= PREFIX_VOLATILE;
+ impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
- impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
+ assert(sz == 0);
+ goto PREFIX;
- assert(sz == 0);
- goto PREFIX;
+ case CEE_LDFTN:
+ {
+ // Need to do a lookup here so that we perform an access check
+ // and do a NOWAY if protections are violated
+ _impResolveToken(CORINFO_TOKENKIND_Method);
- case CEE_LDFTN:
- {
- // Need to do a lookup here so that we perform an access check
- // and do a NOWAY if protections are violated
- _impResolveToken(CORINFO_TOKENKIND_Method);
+ JITDUMP(" %08X", resolvedToken.token);
- JITDUMP(" %08X", resolvedToken.token);
+ eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
+ addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
+ &callInfo);
- eeGetCallInfo(&resolvedToken, 0 /* constraint typeRef*/,
- addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN)), &callInfo);
+ // This check really only applies to intrinsic Array.Address methods
+ if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
+ {
+ NO_WAY("Currently do not support LDFTN of Parameterized functions");
+ }
- // This check really only applies to intrinsic Array.Address methods
- if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
- NO_WAY("Currently do not support LDFTN of Parameterized functions");
-
- //Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
- impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
-
- if (tiVerificationNeeded)
- {
- // LDFTN could start the begining of delegate creation sequence, remember that
- delegateCreateStart = codeAddr - 2;
-
- // check any constraints on the callee's class and type parameters
- VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
- "method has unsatisfied class constraints");
- VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,resolvedToken.hMethod),
- "method has unsatisfied method constraints");
-
- mflags = callInfo.verMethodFlags;
- Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR),
- "LDFTN on a constructor");
- }
+ // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
+ impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
-DO_LDFTN:
- op1 = impMethodPointer(&resolvedToken, &callInfo);
- if (compDonotInline())
- return;
+ if (tiVerificationNeeded)
+ {
+ // LDFTN could start the begining of delegate creation sequence, remember that
+ delegateCreateStart = codeAddr - 2;
+
+ // check any constraints on the callee's class and type parameters
+ VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
+ "method has unsatisfied class constraints");
+ VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
+ resolvedToken.hMethod),
+ "method has unsatisfied method constraints");
+
+ mflags = callInfo.verMethodFlags;
+ Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
+ }
- impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
+ DO_LDFTN:
+ op1 = impMethodPointer(&resolvedToken, &callInfo);
+ if (compDonotInline())
+ {
+ return;
+ }
- break;
- }
+ impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
- case CEE_LDVIRTFTN:
- {
- /* Get the method token */
+ break;
+ }
- _impResolveToken(CORINFO_TOKENKIND_Method);
+ case CEE_LDVIRTFTN:
+ {
+ /* Get the method token */
- JITDUMP(" %08X", resolvedToken.token);
+ _impResolveToken(CORINFO_TOKENKIND_Method);
- eeGetCallInfo(&resolvedToken, 0 /* constraint typeRef */,
- addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT)),
- &callInfo);
-
- // This check really only applies to intrinsic Array.Address methods
- if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
- NO_WAY("Currently do not support LDFTN of Parameterized functions");
+ JITDUMP(" %08X", resolvedToken.token);
- mflags = callInfo.methodFlags;
+ eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
+ addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
+ CORINFO_CALLINFO_CALLVIRT)),
+ &callInfo);
- impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
+ // This check really only applies to intrinsic Array.Address methods
+ if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
+ {
+ NO_WAY("Currently do not support LDFTN of Parameterized functions");
+ }
- if (compIsForInlining())
- {
- if (mflags & (CORINFO_FLG_FINAL|CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
+ mflags = callInfo.methodFlags;
+
+ impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
+
+ if (compIsForInlining())
{
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
- return;
+ if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
+ return;
+ }
}
- }
-
- CORINFO_SIG_INFO& ftnSig = callInfo.sig;
- if (tiVerificationNeeded)
- {
-
- Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
- Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
-
- // JIT32 verifier rejects verifiable ldvirtftn pattern
- typeInfo declType = verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
-
- typeInfo arg = impStackTop().seTypeInfo;
- Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) &&
- tiCompatibleWith(arg, declType, true), "bad ldvirtftn");
-
- CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
- if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
- instanceClassHnd = arg.GetClassHandleForObjRef();
-
-
- // check any constraints on the method's class and type parameters
- VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
- "method has unsatisfied class constraints");
- VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,resolvedToken.hMethod),
- "method has unsatisfied method constraints");
-
- if (mflags & CORINFO_FLG_PROTECTED)
+ CORINFO_SIG_INFO& ftnSig = callInfo.sig;
+
+ if (tiVerificationNeeded)
{
- Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
- "Accessing protected method through wrong type.");
+
+ Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
+ Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
+
+ // JIT32 verifier rejects verifiable ldvirtftn pattern
+ typeInfo declType =
+ verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
+
+ typeInfo arg = impStackTop().seTypeInfo;
+ Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
+ "bad ldvirtftn");
+
+ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
+ if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
+ {
+ instanceClassHnd = arg.GetClassHandleForObjRef();
+ }
+
+ // check any constraints on the method's class and type parameters
+ VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
+ "method has unsatisfied class constraints");
+ VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
+ resolvedToken.hMethod),
+ "method has unsatisfied method constraints");
+
+ if (mflags & CORINFO_FLG_PROTECTED)
+ {
+ Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
+ "Accessing protected method through wrong type.");
+ }
}
- }
- /* Get the object-ref */
- op1 = impPopStack().val;
- assertImp(op1->gtType == TYP_REF);
+ /* Get the object-ref */
+ op1 = impPopStack().val;
+ assertImp(op1->gtType == TYP_REF);
- if (opts.IsReadyToRun())
- {
- if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
+ if (opts.IsReadyToRun())
+ {
+ if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
+ {
+ if (op1->gtFlags & GTF_SIDE_EFFECT)
+ {
+ op1 = gtUnusedValNode(op1);
+ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ }
+ goto DO_LDFTN;
+ }
+ }
+ else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
- if (op1->gtFlags & GTF_SIDE_EFFECT)
+ if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
goto DO_LDFTN;
}
- }
- else
- if (mflags & (CORINFO_FLG_FINAL|CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
- {
- if (op1->gtFlags & GTF_SIDE_EFFECT)
+
+ GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
+ if (compDonotInline())
{
- op1 = gtUnusedValNode(op1);
- impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ return;
}
- goto DO_LDFTN;
- }
- GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
- if (compDonotInline())
- return;
+ impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
- impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
-
- break;
- }
+ break;
+ }
- case CEE_CONSTRAINED:
+ case CEE_CONSTRAINED:
- assertImp(sz == sizeof(unsigned));
- impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
- codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
- JITDUMP(" (%08X) ", constrainedResolvedToken.token);
+ assertImp(sz == sizeof(unsigned));
+ impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
+ codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
+ JITDUMP(" (%08X) ", constrainedResolvedToken.token);
- Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
- prefixFlags |= PREFIX_CONSTRAINED;
+ Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
+ prefixFlags |= PREFIX_CONSTRAINED;
- {
- OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
- if (actualOpcode != CEE_CALLVIRT) {
- BADCODE("constrained. has to be followed by callvirt");
+ {
+ OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
+ if (actualOpcode != CEE_CALLVIRT)
+ {
+ BADCODE("constrained. has to be followed by callvirt");
+ }
}
- }
- goto PREFIX;
+ goto PREFIX;
- case CEE_READONLY:
- JITDUMP(" readonly.");
+ case CEE_READONLY:
+ JITDUMP(" readonly.");
- Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
- prefixFlags |= PREFIX_READONLY;
+ Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
+ prefixFlags |= PREFIX_READONLY;
- {
- OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
- if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) {
- BADCODE("readonly. has to be followed by ldelema or call");
+ {
+ OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
+ if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
+ {
+ BADCODE("readonly. has to be followed by ldelema or call");
+ }
}
- }
- assert(sz == 0);
- goto PREFIX;
+ assert(sz == 0);
+ goto PREFIX;
- case CEE_TAILCALL:
- JITDUMP(" tail.");
+ case CEE_TAILCALL:
+ JITDUMP(" tail.");
- Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
- prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
+ Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
+ prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
- {
- OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
- if (!impOpcodeIsCallOpcode(actualOpcode)) {
- BADCODE("tailcall. has to be followed by call, callvirt or calli");
+ {
+ OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
+ if (!impOpcodeIsCallOpcode(actualOpcode))
+ {
+ BADCODE("tailcall. has to be followed by call, callvirt or calli");
+ }
}
- }
- assert(sz == 0);
- goto PREFIX;
+ assert(sz == 0);
+ goto PREFIX;
- case CEE_NEWOBJ:
-
- /* Since we will implicitly insert newObjThisPtr at the start of the
- argument list, spill any GTF_ORDER_SIDEEFF */
- impSpillSpecialSideEff();
+ case CEE_NEWOBJ:
+
+ /* Since we will implicitly insert newObjThisPtr at the start of the
+ argument list, spill any GTF_ORDER_SIDEEFF */
+ impSpillSpecialSideEff();
- /* NEWOBJ does not respond to TAIL */
- prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
+ /* NEWOBJ does not respond to TAIL */
+ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
- /* NEWOBJ does not respond to CONSTRAINED */
- prefixFlags &= ~PREFIX_CONSTRAINED;
+ /* NEWOBJ does not respond to CONSTRAINED */
+ prefixFlags &= ~PREFIX_CONSTRAINED;
#if COR_JIT_EE_VERSION > 460
- _impResolveToken(CORINFO_TOKENKIND_NewObj);
+ _impResolveToken(CORINFO_TOKENKIND_NewObj);
#else
- _impResolveToken(CORINFO_TOKENKIND_Method);
+ _impResolveToken(CORINFO_TOKENKIND_Method);
#endif
- eeGetCallInfo(&resolvedToken, 0 /* constraint typeRef*/,
- addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS,
- CORINFO_CALLINFO_ALLOWINSTPARAM)), &callInfo);
+ eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
+ addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
+ &callInfo);
- if (compIsForInlining())
- {
- if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
+ if (compIsForInlining())
{
- //Check to see if this call violates the boundary.
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
- return;
+ if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
+ {
+ // Check to see if this call violates the boundary.
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
+ return;
+ }
}
- }
-
- mflags = callInfo.methodFlags;
- if ((mflags & (CORINFO_FLG_STATIC|CORINFO_FLG_ABSTRACT)) != 0)
- BADCODE("newobj on static or abstract method");
+ mflags = callInfo.methodFlags;
- // Insert the security callout before any actual code is generated
- impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
+ if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
+ {
+ BADCODE("newobj on static or abstract method");
+ }
- // There are three different cases for new
- // Object size is variable (depends on arguments)
- // 1) Object is an array (arrays treated specially by the EE)
- // 2) Object is some other variable sized object (e.g. String)
- // 3) Class Size can be determined beforehand (normal case)
- // In the first case, we need to call a NEWOBJ helper (multinewarray)
- // in the second case we call the constructor with a '0' this pointer
- // In the third case we alloc the memory, then call the constuctor
+ // Insert the security callout before any actual code is generated
+ impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
- clsFlags = callInfo.classFlags;
- if (clsFlags & CORINFO_FLG_ARRAY)
- {
- if (tiVerificationNeeded)
+ // There are three different cases for new
+ // Object size is variable (depends on arguments)
+ // 1) Object is an array (arrays treated specially by the EE)
+ // 2) Object is some other variable sized object (e.g. String)
+ // 3) Class Size can be determined beforehand (normal case)
+ // In the first case, we need to call a NEWOBJ helper (multinewarray)
+ // in the second case we call the constructor with a '0' this pointer
+ // In the third case we alloc the memory, then call the constuctor
+
+ clsFlags = callInfo.classFlags;
+ if (clsFlags & CORINFO_FLG_ARRAY)
{
- CORINFO_CLASS_HANDLE elemTypeHnd;
- INDEBUG(CorInfoType corType =) info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
- assert(!(elemTypeHnd == 0 && corType == CORINFO_TYPE_VALUECLASS));
- Verify(elemTypeHnd == 0 || !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
- "newarr of byref-like objects");
- verVerifyCall(opcode,
- &resolvedToken,
- NULL,
- ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
- ((prefixFlags & PREFIX_READONLY) != 0),
- delegateCreateStart,
- codeAddr - 1,
- &callInfo
- DEBUGARG(info.compFullName));
-
- }
- // Arrays need to call the NEWOBJ helper.
- assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
-
- impImportNewObjArray(&resolvedToken, &callInfo);
- if (compDonotInline())
- return;
+ if (tiVerificationNeeded)
+ {
+ CORINFO_CLASS_HANDLE elemTypeHnd;
+ INDEBUG(CorInfoType corType =)
+ info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
+ assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
+ Verify(elemTypeHnd == nullptr ||
+ !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
+ "newarr of byref-like objects");
+ verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
+ ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
+ &callInfo DEBUGARG(info.compFullName));
+ }
+ // Arrays need to call the NEWOBJ helper.
+ assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
- callTyp = TYP_REF;
- break;
- }
- // At present this can only be String
- else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
- {
-#if COR_JIT_EE_VERSION > 460
- if (eeGetEEInfo()->targetAbi == CORINFO_CORERT_ABI)
+ impImportNewObjArray(&resolvedToken, &callInfo);
+ if (compDonotInline())
+ {
+ return;
+ }
+
+ callTyp = TYP_REF;
+ break;
+ }
+ // At present this can only be String
+ else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
- // The dummy argument does not exist in CoreRT
- newObjThisPtr = nullptr;
+#if COR_JIT_EE_VERSION > 460
+ if (eeGetEEInfo()->targetAbi == CORINFO_CORERT_ABI)
+ {
+ // The dummy argument does not exist in CoreRT
+ newObjThisPtr = nullptr;
+ }
+ else
+#endif
+ {
+ // This is the case for variable-sized objects that are not
+ // arrays. In this case, call the constructor with a null 'this'
+ // pointer
+ newObjThisPtr = gtNewIconNode(0, TYP_REF);
+ }
+
+ /* Remember that this basic block contains 'new' of an object */
+ block->bbFlags |= BBF_HAS_NEWOBJ;
+ optMethodFlags |= OMF_HAS_NEWOBJ;
}
else
-#endif
{
- // This is the case for variable-sized objects that are not
- // arrays. In this case, call the constructor with a null 'this'
- // pointer
- newObjThisPtr = gtNewIconNode(0, TYP_REF);
- }
+ // This is the normal case where the size of the object is
+ // fixed. Allocate the memory and call the constructor.
- /* Remember that this basic block contains 'new' of an object */
- block->bbFlags |= BBF_HAS_NEWOBJ;
- optMethodFlags |= OMF_HAS_NEWOBJ;
- }
- else
- {
- // This is the normal case where the size of the object is
- // fixed. Allocate the memory and call the constructor.
+ // Note: We cannot add a peep to avoid use of temp here
+ // becase we don't have enough interference info to detect when
+ // sources and destination interfere, example: s = new S(ref);
- // Note: We cannot add a peep to avoid use of temp here
- // becase we don't have enough interference info to detect when
- // sources and destination interfere, example: s = new S(ref);
+ // TODO: We find the correct place to introduce a general
+ // reverse copy prop for struct return values from newobj or
+ // any function returning structs.
- // TODO: We find the correct place to introduce a general
- // reverse copy prop for struct return values from newobj or
- // any function returning structs.
+ /* get a temporary for the new object */
+ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
- /* get a temporary for the new object */
- lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
+ // In the value class case we only need clsHnd for size calcs.
+ //
+ // The lookup of the code pointer will be handled by CALL in this case
+ if (clsFlags & CORINFO_FLG_VALUECLASS)
+ {
+ CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
- // In the value class case we only need clsHnd for size calcs.
- //
- // The lookup of the code pointer will be handled by CALL in this case
- if (clsFlags & CORINFO_FLG_VALUECLASS)
- {
- CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
+ if (impIsPrimitive(jitTyp))
+ {
+ lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
+ }
+ else
+ {
+ // The local variable itself is the allocated space.
+ // Here we need unsafe value cls check, since the address of struct is taken for further use
+ // and potentially exploitable.
+ lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
+ }
- if (impIsPrimitive(jitTyp))
- {
- lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
+ // Append a tree to zero-out the temp
+ newObjThisPtr =
+ gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
+
+ newObjThisPtr =
+ gtNewBlkOpNode(GT_INITBLK,
+ newObjThisPtr, // Dest
+ gtNewIconNode(0), // Value
+ gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)), // Size
+ false); // volatil
+ impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+
+ // Obtain the address of the temp
+ newObjThisPtr =
+ gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
}
else
{
- // The local variable itself is the allocated space.
- // Here we need unsafe value cls check, since the address of struct is taken for further use
- // and potentially exploitable.
- lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
- }
-
- // Append a tree to zero-out the temp
- newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF,
- gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
-
- newObjThisPtr = gtNewBlkOpNode(GT_INITBLK,
- newObjThisPtr, // Dest
- gtNewIconNode(0), // Value
- gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)), // Size
- false); // volatil
- impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
-
- // Obtain the address of the temp
- newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF,
- gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
- }
- else
- {
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
- usingReadyToRunHelper = (op1 != NULL);
- }
+ if (opts.IsReadyToRun())
+ {
+ op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
+ usingReadyToRunHelper = (op1 != NULL);
+ }
- if (!usingReadyToRunHelper)
+ if (!usingReadyToRunHelper)
#endif
- {
- op1 = impParentClassTokenToHandle(&resolvedToken, NULL, TRUE);
- if (op1 == NULL) // compDonotInline()
- return;
-
- // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
- // and the newfast call with a single call to a dynamic R2R cell that will:
- // 1) Load the context
- // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
- // 3) Allocate and return the new object
- // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+ {
+ op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
+ if (op1 == nullptr)
+ { // compDonotInline()
+ return;
+ }
+
+ // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
+ // and the newfast call with a single call to a dynamic R2R cell that will:
+ // 1) Load the context
+ // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
+ // stub
+ // 3) Allocate and return the new object
+ // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+
+ op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
+ resolvedToken.hClass, TYP_REF, op1);
+ }
- op1 = gtNewAllocObjNode( info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
- resolvedToken.hClass, TYP_REF, op1 );
- }
+ // Remember that this basic block contains 'new' of an object
+ block->bbFlags |= BBF_HAS_NEWOBJ;
+ optMethodFlags |= OMF_HAS_NEWOBJ;
- // Remember that this basic block contains 'new' of an object
- block->bbFlags |= BBF_HAS_NEWOBJ;
- optMethodFlags |= OMF_HAS_NEWOBJ;
-
- // Append the assignment to the temp/local. Dont need to spill
- // at all as we are just calling an EE-Jit helper which can only
- // cause an (async) OutOfMemoryException.
+ // Append the assignment to the temp/local. Dont need to spill
+ // at all as we are just calling an EE-Jit helper which can only
+ // cause an (async) OutOfMemoryException.
- // We assign the newly allocated object (by a GT_ALLOCOBJ node)
- // to a temp. Note that the pattern "temp = allocObj" is required
- // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
- // without exhaustive walk over all expressions.
+ // We assign the newly allocated object (by a GT_ALLOCOBJ node)
+ // to a temp. Note that the pattern "temp = allocObj" is required
+ // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
+ // without exhaustive walk over all expressions.
- impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
+ impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
- newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
+ newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
+ }
}
- }
- goto CALL;
+ goto CALL;
- case CEE_CALLI:
+ case CEE_CALLI:
- /* CALLI does not respond to CONSTRAINED */
- prefixFlags &= ~PREFIX_CONSTRAINED;
+ /* CALLI does not respond to CONSTRAINED */
+ prefixFlags &= ~PREFIX_CONSTRAINED;
- if (compIsForInlining())
- {
- //CALLI doesn't have a method handle, so assume the worst.
- if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
+ if (compIsForInlining())
{
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
- return;
+ // CALLI doesn't have a method handle, so assume the worst.
+ if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
+ return;
+ }
}
- }
// fall through
- case CEE_CALLVIRT:
- case CEE_CALL:
+ case CEE_CALLVIRT:
+ case CEE_CALL:
- //We can't call getCallInfo on the token from a CALLI, but we need it in
- //many other places. We unfortunately embed that knowledge here.
- if (opcode != CEE_CALLI)
- {
- _impResolveToken(CORINFO_TOKENKIND_Method);
-
- eeGetCallInfo(&resolvedToken,
- (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : NULL,
- //this is how impImportCall invokes getCallInfo
- addVerifyFlag(combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM,
- CORINFO_CALLINFO_SECURITYCHECKS),
- (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
- : CORINFO_CALLINFO_NONE)),
- &callInfo);
- }
- else
- {
- //Suppress uninitialized use warning.
- memset(&resolvedToken, 0, sizeof(resolvedToken));
- memset(&callInfo, 0, sizeof(callInfo));
-
- resolvedToken.token = getU4LittleEndian(codeAddr);
- }
+ // We can't call getCallInfo on the token from a CALLI, but we need it in
+ // many other places. We unfortunately embed that knowledge here.
+ if (opcode != CEE_CALLI)
+ {
+ _impResolveToken(CORINFO_TOKENKIND_Method);
+
+ eeGetCallInfo(&resolvedToken,
+ (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
+ // this is how impImportCall invokes getCallInfo
+ addVerifyFlag(
+ combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
+ (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
+ : CORINFO_CALLINFO_NONE)),
+ &callInfo);
+ }
+ else
+ {
+ // Suppress uninitialized use warning.
+ memset(&resolvedToken, 0, sizeof(resolvedToken));
+ memset(&callInfo, 0, sizeof(callInfo));
+ resolvedToken.token = getU4LittleEndian(codeAddr);
+ }
- CALL: // memberRef should be set.
- // newObjThisPtr should be set for CEE_NEWOBJ
+ CALL: // memberRef should be set.
+ // newObjThisPtr should be set for CEE_NEWOBJ
- JITDUMP(" %08X", resolvedToken.token);
- constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
+ JITDUMP(" %08X", resolvedToken.token);
+ constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
- bool newBBcreatedForTailcallStress;
+ bool newBBcreatedForTailcallStress;
- newBBcreatedForTailcallStress = false;
+ newBBcreatedForTailcallStress = false;
- if (compIsForInlining())
- {
- if ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0)
+ if (compIsForInlining())
{
-#ifdef DEBUG
- if (verbose)
+ if ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0)
{
- printf("\n\nIgnoring the tail call prefix in the inlinee %s\n",
- info.compFullName);
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\n\nIgnoring the tail call prefix in the inlinee %s\n", info.compFullName);
+ }
#endif
- prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
- }
- }
- else
- {
- if (compTailCallStress())
- {
- // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
- // Tail call stress only recognizes call+ret patterns and forces them to be
- // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
- // doesn't import 'ret' opcode following the call into the basic block containing
- // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
- // is already checking that there is an opcode following call and hence it is
- // safe here to read next opcode without bounds check.
- newBBcreatedForTailcallStress =
- impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't make it jump to RET.
- (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
-
- if(newBBcreatedForTailcallStress &&
- !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
- verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, true) // Is it legal to do talcall?
- )
- {
- // Stress the tailcall.
- JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
- prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
+ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
}
}
-
- // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
- // hence will not be considered for implicit tail calling.
- bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
- if (impIsImplicitTailCallCandidate(opcode,
- codeAddr + sz,
- codeEndp,
- prefixFlags,
- isRecursive))
+ else
{
- JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
- prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
+ if (compTailCallStress())
+ {
+ // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
+ // Tail call stress only recognizes call+ret patterns and forces them to be
+ // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
+ // doesn't import 'ret' opcode following the call into the basic block containing
+ // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
+ // is already checking that there is an opcode following call and hence it is
+ // safe here to read next opcode without bounds check.
+ newBBcreatedForTailcallStress =
+ impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
+ // make it jump to RET.
+ (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
+
+ if (newBBcreatedForTailcallStress &&
+ !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
+ verCheckTailCallConstraint(opcode, &resolvedToken,
+ constraintCall ? &constrainedResolvedToken : nullptr,
+ true) // Is it legal to do talcall?
+ )
+ {
+ // Stress the tailcall.
+ JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
+ prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
+ }
+ }
+
+ // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
+ // hence will not be considered for implicit tail calling.
+ bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
+ if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
+ {
+ JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
+ prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
+ }
}
- }
- // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
- explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
- readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
+ // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
+ explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
+ readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
- if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
- {
- //All calls and delegates need a security callout.
- //For delegates, this is the call to the delegate constructor, not the access check on the
- //LD(virt)FTN.
- impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
+ if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
+ {
+ // All calls and delegates need a security callout.
+ // For delegates, this is the call to the delegate constructor, not the access check on the
+ // LD(virt)FTN.
+ impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
#if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
@@ -12020,1011 +12455,1045 @@ DO_LDFTN:
}
#endif // DevDiv 410397
-
- }
+ }
- if (tiVerificationNeeded)
- verVerifyCall(opcode,
- &resolvedToken,
- constraintCall ? &constrainedResolvedToken : NULL,
- explicitTailCall,
- readonlyCall,
- delegateCreateStart,
- codeAddr - 1,
- &callInfo
- DEBUGARG(info.compFullName));
-
-
- // Insert delegate callout here.
- if (opcode == CEE_NEWOBJ &&
- (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
- {
-#ifdef DEBUG
- // We should do this only if verification is enabled
- // If verification is disabled, delegateCreateStart will not be initialized correctly
if (tiVerificationNeeded)
{
- mdMemberRef delegateMethodRef = mdMemberRefNil;
- // We should get here only for well formed delegate creation.
- assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
+ verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
+ explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
+ &callInfo DEBUGARG(info.compFullName));
}
+
+ // Insert delegate callout here.
+ if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
+ {
+#ifdef DEBUG
+ // We should do this only if verification is enabled
+ // If verification is disabled, delegateCreateStart will not be initialized correctly
+ if (tiVerificationNeeded)
+ {
+ mdMemberRef delegateMethodRef = mdMemberRefNil;
+ // We should get here only for well formed delegate creation.
+ assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
+ }
#endif
#ifdef FEATURE_CORECLR
- // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
- typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
- CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
-
- impInsertCalloutForDelegate(info.compMethodHnd,
- delegateMethodHandle,
- resolvedToken.hClass);
-#endif // FEATURE_CORECLR
- }
+ // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
+ typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
+ CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
- callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
- if (compDonotInline())
- {
- return;
- }
+ impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
+#endif // FEATURE_CORECLR
+ }
- if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we have created a new BB after the "call"
- // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
- {
- assert(!compIsForInlining());
- goto RET;
- }
+ callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
+ newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
+ if (compDonotInline())
+ {
+ return;
+ }
- break;
+ if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
+ // have created a new BB after the "call"
+ // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
+ {
+ assert(!compIsForInlining());
+ goto RET;
+ }
- case CEE_LDFLD:
- case CEE_LDSFLD:
- case CEE_LDFLDA:
- case CEE_LDSFLDA: {
+ break;
- BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
- BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
+ case CEE_LDFLD:
+ case CEE_LDSFLD:
+ case CEE_LDFLDA:
+ case CEE_LDSFLDA:
+ {
- /* Get the CP_Fieldref index */
- assertImp(sz == sizeof(unsigned));
+ BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
+ BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
- _impResolveToken(CORINFO_TOKENKIND_Field);
+ /* Get the CP_Fieldref index */
+ assertImp(sz == sizeof(unsigned));
- JITDUMP(" %08X", resolvedToken.token);
+ _impResolveToken(CORINFO_TOKENKIND_Field);
- int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
+ JITDUMP(" %08X", resolvedToken.token);
- GenTreePtr obj = NULL;
- typeInfo* tiObj = NULL;
- CORINFO_CLASS_HANDLE objType = NULL; // used for fields
+ int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
- if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
- {
- tiObj = &impStackTop().seTypeInfo;
- obj = impPopStack(objType).val;
+ GenTreePtr obj = nullptr;
+ typeInfo* tiObj = nullptr;
+ CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
- if (impIsThis(obj))
+ if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
{
- aflags |= CORINFO_ACCESS_THIS;
+ tiObj = &impStackTop().seTypeInfo;
+ obj = impPopStack(objType).val;
- // An optimization for Contextful classes:
- // we unwrap the proxy when we have a 'this reference'
+ if (impIsThis(obj))
+ {
+ aflags |= CORINFO_ACCESS_THIS;
+
+ // An optimization for Contextful classes:
+ // we unwrap the proxy when we have a 'this reference'
- if (info.compUnwrapContextful)
- aflags |= CORINFO_ACCESS_UNWRAP;
+ if (info.compUnwrapContextful)
+ {
+ aflags |= CORINFO_ACCESS_UNWRAP;
+ }
+ }
}
- }
- eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
+ eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
- // Figure out the type of the member. We always call canAccessField, so you always need this
- // handle
- CorInfoType ciType = fieldInfo.fieldType;
- clsHnd = fieldInfo.structType;
+ // Figure out the type of the member. We always call canAccessField, so you always need this
+ // handle
+ CorInfoType ciType = fieldInfo.fieldType;
+ clsHnd = fieldInfo.structType;
- lclTyp = JITtype2varType(ciType);
+ lclTyp = JITtype2varType(ciType);
#ifdef _TARGET_AMD64
- noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
+ noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
#endif // _TARGET_AMD64
- if (compIsForInlining())
- {
- switch (fieldInfo.fieldAccessor)
+ if (compIsForInlining())
{
- case CORINFO_FIELD_INSTANCE_HELPER:
- case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
- case CORINFO_FIELD_STATIC_ADDR_HELPER:
- case CORINFO_FIELD_STATIC_TLS:
+ switch (fieldInfo.fieldAccessor)
+ {
+ case CORINFO_FIELD_INSTANCE_HELPER:
+ case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
+ case CORINFO_FIELD_STATIC_ADDR_HELPER:
+ case CORINFO_FIELD_STATIC_TLS:
- compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
- return;
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
+ return;
- case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
- /* We may be able to inline the field accessors in specific instantiations of generic methods */
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
- return;
+ /* We may be able to inline the field accessors in specific instantiations of generic
+ * methods */
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
+ return;
- default:
- break;
- }
+ default:
+ break;
+ }
- if (!isLoadAddress &&
- (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) &&
- lclTyp == TYP_STRUCT &&
- clsHnd)
- {
- if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF)
- && !(info.compFlags & CORINFO_FLG_FORCEINLINE))
+ if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
+ clsHnd)
{
- // Loading a static valuetype field usually will cause a JitHelper to be called
- // for the static base. This will bloat the code.
- compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
-
- if (compInlineResult->IsFailure())
+ if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
+ !(info.compFlags & CORINFO_FLG_FORCEINLINE))
{
- return;
+ // Loading a static valuetype field usually will cause a JitHelper to be called
+ // for the static base. This will bloat the code.
+ compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
+
+ if (compInlineResult->IsFailure())
+ {
+ return;
+ }
}
}
}
- }
-
- tiRetVal = verMakeTypeInfo(ciType, clsHnd);
- if (isLoadAddress)
- tiRetVal.MakeByRef();
- else
- tiRetVal.NormaliseForStack();
-
- //Perform this check always to ensure that we get field access exceptions even with
- //SkipVerification.
- impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
- if (tiVerificationNeeded)
- {
- // You can also pass the unboxed struct to LDFLD
- BOOL bAllowPlainValueTypeAsThis = FALSE;
- if (opcode == CEE_LDFLD && impIsValueType(tiObj))
+ tiRetVal = verMakeTypeInfo(ciType, clsHnd);
+ if (isLoadAddress)
{
- bAllowPlainValueTypeAsThis = TRUE;
+ tiRetVal.MakeByRef();
}
-
- verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
+ else
+ {
+ tiRetVal.NormaliseForStack();
+ }
+
+ // Perform this check always to ensure that we get field access exceptions even with
+ // SkipVerification.
+ impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
- // If we're doing this on a heap object or from a 'safe' byref
- // then the result is a safe byref too
- if (isLoadAddress) // load address
+ if (tiVerificationNeeded)
{
- if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
+ // You can also pass the unboxed struct to LDFLD
+ BOOL bAllowPlainValueTypeAsThis = FALSE;
+ if (opcode == CEE_LDFLD && impIsValueType(tiObj))
{
- if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
+ bAllowPlainValueTypeAsThis = TRUE;
+ }
+
+ verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
+
+ // If we're doing this on a heap object or from a 'safe' byref
+ // then the result is a safe byref too
+ if (isLoadAddress) // load address
+ {
+ if (fieldInfo.fieldFlags &
+ CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
{
+ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
+ {
+ tiRetVal.SetIsPermanentHomeByRef();
+ }
+ }
+ else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
+ {
+ // ldflda of byref is safe if done on a gc object or on a
+ // safe byref
tiRetVal.SetIsPermanentHomeByRef();
}
}
- else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
+ }
+ else
+ {
+ // tiVerificationNeeded is false.
+ // Raise InvalidProgramException if static load accesses non-static field
+ if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
- // ldflda of byref is safe if done on a gc object or on a
- // safe byref
- tiRetVal.SetIsPermanentHomeByRef();
+ BADCODE("static access on an instance field");
}
}
- }
- else
- {
- // tiVerificationNeeded is false.
- // Raise InvalidProgramException if static load accesses non-static field
- if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
+
+ // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
+ if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
- BADCODE("static access on an instance field");
+ if (obj->gtFlags & GTF_SIDE_EFFECT)
+ {
+ obj = gtUnusedValNode(obj);
+ impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ }
+ obj = nullptr;
}
- }
- // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
- if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != NULL)
- {
- if (obj->gtFlags & GTF_SIDE_EFFECT)
+ /* Preserve 'small' int types */
+ if (lclTyp > TYP_INT)
{
- obj = gtUnusedValNode(obj);
- impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ lclTyp = genActualType(lclTyp);
}
- obj = 0;
- }
- /* Preserve 'small' int types */
- if (lclTyp > TYP_INT)
- {
- lclTyp = genActualType(lclTyp);
- }
+ bool usesHelper = false;
- bool usesHelper = false;
-
- switch (fieldInfo.fieldAccessor)
- {
- case CORINFO_FIELD_INSTANCE:
+ switch (fieldInfo.fieldAccessor)
+ {
+ case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN_COMPILER
- case CORINFO_FIELD_INSTANCE_WITH_BASE:
+ case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
- {
- bool nullcheckNeeded = false;
+ {
+ bool nullcheckNeeded = false;
- obj = impCheckForNullPointer(obj);
+ obj = impCheckForNullPointer(obj);
- if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
- {
- nullcheckNeeded = true;
- }
+ if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
+ {
+ nullcheckNeeded = true;
+ }
- // If the object is a struct, what we really want is
- // for the field to operate on the address of the struct.
- if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
- {
- assert(opcode == CEE_LDFLD && objType != NULL);
+ // If the object is a struct, what we really want is
+ // for the field to operate on the address of the struct.
+ if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
+ {
+ assert(opcode == CEE_LDFLD && objType != nullptr);
- obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
- }
+ obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
+ }
- /* Create the data member node */
- op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
+ /* Create the data member node */
+ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
#ifdef FEATURE_READYTORUN_COMPILER
- if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
- op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
+ if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
+ op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
#endif
- op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
+ op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
- if (fgAddrCouldBeNull(obj))
- {
- op1->gtFlags |= GTF_EXCEPT;
- }
+ if (fgAddrCouldBeNull(obj))
+ {
+ op1->gtFlags |= GTF_EXCEPT;
+ }
- // If gtFldObj is a BYREF then our target is a value class and
- // it could point anywhere, example a boxed class static int
- if (obj->gtType == TYP_BYREF)
- op1->gtFlags |= GTF_IND_TGTANYWHERE;
+ // If gtFldObj is a BYREF then our target is a value class and
+ // it could point anywhere, example a boxed class static int
+ if (obj->gtType == TYP_BYREF)
+ {
+ op1->gtFlags |= GTF_IND_TGTANYWHERE;
+ }
- DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
- if (StructHasOverlappingFields(typeFlags))
- {
- op1->gtField.gtFldMayOverlap = true;
- }
+ DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
+ if (StructHasOverlappingFields(typeFlags))
+ {
+ op1->gtField.gtFldMayOverlap = true;
+ }
- // wrap it in a address of operator if necessary
- if (isLoadAddress)
- {
- op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ?
- TYP_BYREF : TYP_I_IMPL), op1);
- }
- else
- {
- if (compIsForInlining() &&
- impInlineIsGuaranteedThisDerefBeforeAnySideEffects( NULL,
- obj,
- impInlineInfo->inlArgInfo))
+ // wrap it in a address of operator if necessary
+ if (isLoadAddress)
{
- impInlineInfo->thisDereferencedFirst = true;
+ op1 = gtNewOperNode(GT_ADDR,
+ (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
+ }
+ else
+ {
+ if (compIsForInlining() &&
+ impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
+ impInlineInfo->inlArgInfo))
+ {
+ impInlineInfo->thisDereferencedFirst = true;
+ }
}
}
- }
- break;
+ break;
- case CORINFO_FIELD_STATIC_TLS:
+ case CORINFO_FIELD_STATIC_TLS:
#ifdef _TARGET_X86_
- // Legacy TLS access is implemented as intrinsic on x86 only
+ // Legacy TLS access is implemented as intrinsic on x86 only
- /* Create the data member node */
- op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
- op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
+ /* Create the data member node */
+ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
+ op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
- if (isLoadAddress)
- {
- op1 = gtNewOperNode(GT_ADDR,
- (var_types)TYP_I_IMPL,
- op1);
- }
- break;
+ if (isLoadAddress)
+ {
+ op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
+ }
+ break;
#else
- fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
+ fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
- __fallthrough;
+ __fallthrough;
#endif
- case CORINFO_FIELD_STATIC_ADDR_HELPER:
- case CORINFO_FIELD_INSTANCE_HELPER:
- case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
- op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, 0);
- usesHelper = true;
- break;
+ case CORINFO_FIELD_STATIC_ADDR_HELPER:
+ case CORINFO_FIELD_INSTANCE_HELPER:
+ case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
+ op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
+ clsHnd, nullptr);
+ usesHelper = true;
+ break;
- case CORINFO_FIELD_STATIC_ADDRESS:
- // Replace static read-only fields with constant if possible
- if ((aflags & CORINFO_ACCESS_GET) &&
- (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
- !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
- (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
- {
- CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
- impTokenLookupContextHandle);
+ case CORINFO_FIELD_STATIC_ADDRESS:
+ // Replace static read-only fields with constant if possible
+ if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
+ !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
+ (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
+ {
+ CorInfoInitClassResult initClassResult =
+ info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
+ impTokenLookupContextHandle);
+
+ if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
+ {
+ void** pFldAddr = nullptr;
+ void* fldAddr =
+ info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
+
+ // We should always be able to access this static's address directly
+ assert(pFldAddr == nullptr);
+
+ op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
+ goto FIELD_DONE;
+ }
+ }
- if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
- {
- void ** pFldAddr = NULL;
- void * fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**) &pFldAddr);
+ __fallthrough;
- // We should always be able to access this static's address directly
- assert(pFldAddr == NULL);
+ case CORINFO_FIELD_STATIC_RVA_ADDRESS:
+ case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
+ op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
+ lclTyp);
+ break;
- op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
+ case CORINFO_FIELD_INTRINSIC_ZERO:
+ {
+ assert(aflags & CORINFO_ACCESS_GET);
+ op1 = gtNewIconNode(0, lclTyp);
goto FIELD_DONE;
}
- }
+ break;
- __fallthrough;
+ case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
+ {
+ assert(aflags & CORINFO_ACCESS_GET);
- case CORINFO_FIELD_STATIC_RVA_ADDRESS:
- case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
- case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
- op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp);
- break;
+ LPVOID pValue;
+ InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
+ op1 = gtNewStringLiteralNode(iat, pValue);
+ goto FIELD_DONE;
+ }
+ break;
- case CORINFO_FIELD_INTRINSIC_ZERO:
- {
- assert(aflags & CORINFO_ACCESS_GET);
- op1 = gtNewIconNode(0, lclTyp);
- goto FIELD_DONE;
+ default:
+ assert(!"Unexpected fieldAccessor");
}
- break;
- case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
+ if (!isLoadAddress)
{
- assert(aflags & CORINFO_ACCESS_GET);
- LPVOID pValue;
- InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
- op1 = gtNewStringLiteralNode(iat, pValue);
- goto FIELD_DONE;
- }
- break;
-
- default:
- assert(!"Unexpected fieldAccessor");
- }
-
- if (!isLoadAddress)
- {
+ if (prefixFlags & PREFIX_VOLATILE)
+ {
+ op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
+ op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
- if (prefixFlags & PREFIX_VOLATILE)
- {
- op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
- op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
+ if (!usesHelper)
+ {
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
+ (op1->OperGet() == GT_OBJ));
+ op1->gtFlags |= GTF_IND_VOLATILE;
+ }
+ }
- if (!usesHelper)
+ if (prefixFlags & PREFIX_UNALIGNED)
{
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ));
- op1->gtFlags |= GTF_IND_VOLATILE;
+ if (!usesHelper)
+ {
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
+ (op1->OperGet() == GT_OBJ));
+ op1->gtFlags |= GTF_IND_UNALIGNED;
+ }
}
}
- if (prefixFlags & PREFIX_UNALIGNED)
+ /* Check if the class needs explicit initialization */
+
+ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
- if (!usesHelper)
+ GenTreePtr helperNode = impInitClass(&resolvedToken);
+ if (compDonotInline())
{
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ));
- op1->gtFlags |= GTF_IND_UNALIGNED;
+ return;
+ }
+ if (helperNode != nullptr)
+ {
+ op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
- }
-
- /* Check if the class needs explicit initialization */
-
- if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
- {
- GenTreePtr helperNode = impInitClass(&resolvedToken);
- if (compDonotInline())
- return;
- if (helperNode != NULL)
- op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
- }
-
-FIELD_DONE:
- impPushOnStack(op1, tiRetVal);
+ FIELD_DONE:
+ impPushOnStack(op1, tiRetVal);
}
break;
- case CEE_STFLD:
- case CEE_STSFLD: {
+ case CEE_STFLD:
+ case CEE_STSFLD:
+ {
- BOOL isStoreStatic = (opcode == CEE_STSFLD);
+ BOOL isStoreStatic = (opcode == CEE_STSFLD);
- CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
+ CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
- /* Get the CP_Fieldref index */
+ /* Get the CP_Fieldref index */
- assertImp(sz == sizeof(unsigned));
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Field);
+ _impResolveToken(CORINFO_TOKENKIND_Field);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- int aflags = CORINFO_ACCESS_SET;
- GenTreePtr obj = NULL;
- typeInfo* tiObj = NULL;
- typeInfo tiVal;
+ int aflags = CORINFO_ACCESS_SET;
+ GenTreePtr obj = nullptr;
+ typeInfo* tiObj = nullptr;
+ typeInfo tiVal;
/* Pull the value from the stack */
- op2 = impPopStack(tiVal);
- clsHnd = tiVal.GetClassHandle();
-
- if (opcode == CEE_STFLD)
- {
- tiObj = &impStackTop().seTypeInfo;
- obj = impPopStack().val;
+ op2 = impPopStack(tiVal);
+ clsHnd = tiVal.GetClassHandle();
- if (impIsThis(obj))
+ if (opcode == CEE_STFLD)
{
- aflags |= CORINFO_ACCESS_THIS;
+ tiObj = &impStackTop().seTypeInfo;
+ obj = impPopStack().val;
- // An optimization for Contextful classes:
- // we unwrap the proxy when we have a 'this reference'
+ if (impIsThis(obj))
+ {
+ aflags |= CORINFO_ACCESS_THIS;
- if (info.compUnwrapContextful)
- aflags |= CORINFO_ACCESS_UNWRAP;
- }
- }
+ // An optimization for Contextful classes:
+ // we unwrap the proxy when we have a 'this reference'
- eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
+ if (info.compUnwrapContextful)
+ {
+ aflags |= CORINFO_ACCESS_UNWRAP;
+ }
+ }
+ }
- // Figure out the type of the member. We always call canAccessField, so you always need this
- // handle
- CorInfoType ciType = fieldInfo.fieldType;
- fieldClsHnd = fieldInfo.structType;
+ eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
- lclTyp = JITtype2varType(ciType);
+ // Figure out the type of the member. We always call canAccessField, so you always need this
+ // handle
+ CorInfoType ciType = fieldInfo.fieldType;
+ fieldClsHnd = fieldInfo.structType;
- if (compIsForInlining())
- {
- /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or per-inst static? */
+ lclTyp = JITtype2varType(ciType);
- switch (fieldInfo.fieldAccessor)
+ if (compIsForInlining())
{
- case CORINFO_FIELD_INSTANCE_HELPER:
- case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
- case CORINFO_FIELD_STATIC_ADDR_HELPER:
- case CORINFO_FIELD_STATIC_TLS:
+ /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
+ * per-inst static? */
- compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
- return;
+ switch (fieldInfo.fieldAccessor)
+ {
+ case CORINFO_FIELD_INSTANCE_HELPER:
+ case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
+ case CORINFO_FIELD_STATIC_ADDR_HELPER:
+ case CORINFO_FIELD_STATIC_TLS:
- case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
+ return;
- /* We may be able to inline the field accessors in specific instantiations of generic methods */
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
- return;
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
- default:
- break;
+ /* We may be able to inline the field accessors in specific instantiations of generic
+ * methods */
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
+ return;
+
+ default:
+ break;
+ }
}
- }
- impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
+ impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
- if (tiVerificationNeeded)
- {
- verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
- typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
- Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
- }
- else
- {
- // tiVerificationNeed is false.
- // Raise InvalidProgramException if static store accesses non-static field
- if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
+ if (tiVerificationNeeded)
{
- BADCODE("static access on an instance field");
+ verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
+ typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
+ Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
+ }
+ else
+ {
+ // tiVerificationNeed is false.
+ // Raise InvalidProgramException if static store accesses non-static field
+ if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
+ {
+ BADCODE("static access on an instance field");
+ }
}
- }
- // We are using stfld on a static field.
- // We allow it, but need to eval any side-effects for obj
- if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != NULL)
- {
- if (obj->gtFlags & GTF_SIDE_EFFECT)
+ // We are using stfld on a static field.
+ // We allow it, but need to eval any side-effects for obj
+ if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
- obj = gtUnusedValNode(obj);
- impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ if (obj->gtFlags & GTF_SIDE_EFFECT)
+ {
+ obj = gtUnusedValNode(obj);
+ impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ }
+ obj = nullptr;
}
- obj = 0;
- }
- /* Preserve 'small' int types */
- if (lclTyp > TYP_INT)
- {
- lclTyp = genActualType(lclTyp);
- }
+ /* Preserve 'small' int types */
+ if (lclTyp > TYP_INT)
+ {
+ lclTyp = genActualType(lclTyp);
+ }
- switch (fieldInfo.fieldAccessor)
- {
- case CORINFO_FIELD_INSTANCE:
+ switch (fieldInfo.fieldAccessor)
+ {
+ case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN_COMPILER
- case CORINFO_FIELD_INSTANCE_WITH_BASE:
+ case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
- {
- obj = impCheckForNullPointer(obj);
-
- /* Create the data member node */
- op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
- DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
- if (StructHasOverlappingFields(typeFlags))
{
- op1->gtField.gtFldMayOverlap = true;
- }
+ obj = impCheckForNullPointer(obj);
+
+ /* Create the data member node */
+ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
+ DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
+ if (StructHasOverlappingFields(typeFlags))
+ {
+ op1->gtField.gtFldMayOverlap = true;
+ }
#ifdef FEATURE_READYTORUN_COMPILER
- if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
- op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
+ if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
+ op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
#endif
- op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
+ op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
- if (fgAddrCouldBeNull(obj))
- {
- op1->gtFlags |= GTF_EXCEPT;
- }
+ if (fgAddrCouldBeNull(obj))
+ {
+ op1->gtFlags |= GTF_EXCEPT;
+ }
- // If gtFldObj is a BYREF then our target is a value class and
- // it could point anywhere, example a boxed class static int
- if (obj->gtType == TYP_BYREF)
- op1->gtFlags |= GTF_IND_TGTANYWHERE;
+ // If gtFldObj is a BYREF then our target is a value class and
+ // it could point anywhere, example a boxed class static int
+ if (obj->gtType == TYP_BYREF)
+ {
+ op1->gtFlags |= GTF_IND_TGTANYWHERE;
+ }
- if (compIsForInlining() &&
- impInlineIsGuaranteedThisDerefBeforeAnySideEffects( op2,
- obj,
- impInlineInfo->inlArgInfo))
- {
- impInlineInfo->thisDereferencedFirst = true;
+ if (compIsForInlining() &&
+ impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
+ {
+ impInlineInfo->thisDereferencedFirst = true;
+ }
}
- }
- break;
+ break;
- case CORINFO_FIELD_STATIC_TLS:
+ case CORINFO_FIELD_STATIC_TLS:
#ifdef _TARGET_X86_
- // Legacy TLS access is implemented as intrinsic on x86 only
+ // Legacy TLS access is implemented as intrinsic on x86 only
- /* Create the data member node */
- op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
- op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
+ /* Create the data member node */
+ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
+ op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
- break;
+ break;
#else
- fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
+ fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
- __fallthrough;
+ __fallthrough;
#endif
- case CORINFO_FIELD_STATIC_ADDR_HELPER:
- case CORINFO_FIELD_INSTANCE_HELPER:
- case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
- op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2);
- goto SPILL_APPEND;
-
- case CORINFO_FIELD_STATIC_ADDRESS:
- case CORINFO_FIELD_STATIC_RVA_ADDRESS:
- case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
- case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
- op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp);
- break;
+ case CORINFO_FIELD_STATIC_ADDR_HELPER:
+ case CORINFO_FIELD_INSTANCE_HELPER:
+ case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
+ op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
+ clsHnd, op2);
+ goto SPILL_APPEND;
+
+ case CORINFO_FIELD_STATIC_ADDRESS:
+ case CORINFO_FIELD_STATIC_RVA_ADDRESS:
+ case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
+ op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
+ lclTyp);
+ break;
- default:
- assert(!"Unexpected fieldAccessor");
- }
+ default:
+ assert(!"Unexpected fieldAccessor");
+ }
- /* Create the member assignment, unless we have a struct */
- bool deferStructAssign = varTypeIsStruct(lclTyp);
+ /* Create the member assignment, unless we have a struct */
+ bool deferStructAssign = varTypeIsStruct(lclTyp);
- if (!deferStructAssign)
- {
- if (prefixFlags & PREFIX_VOLATILE)
- {
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
- op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
- op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
- op1->gtFlags |= GTF_IND_VOLATILE;
- }
- if (prefixFlags & PREFIX_UNALIGNED)
+ if (!deferStructAssign)
{
- assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
- op1->gtFlags |= GTF_IND_UNALIGNED;
- }
-
- /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust
- apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation
- and reads from the union as if it were a long during code generation. Though this can potentially
- read garbage, one can get lucky to have this working correctly.
-
- This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2
- switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on
- it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly
- always.
+ if (prefixFlags & PREFIX_VOLATILE)
+ {
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
+ op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
+ op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
+ op1->gtFlags |= GTF_IND_VOLATILE;
+ }
+ if (prefixFlags & PREFIX_UNALIGNED)
+ {
+ assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
+ op1->gtFlags |= GTF_IND_UNALIGNED;
+ }
- Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
- V4.0.
- */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
+ trust
+ apps). The reason this works is that JIT stores an i4 constant in Gentree union during
+ importation
+ and reads from the union as if it were a long during code generation. Though this can potentially
+ read garbage, one can get lucky to have this working correctly.
+
+ This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
+ /O2
+ switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
+ on
+ it. To be backward compatible, we will explicitly add an upward cast here so that it works
+ correctly
+ always.
+
+ Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
+ V4.0.
+ */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef _TARGET_X86_
- if (op1->TypeGet() != op2->TypeGet() &&
- op2->OperIsConst() &&
- varTypeIsIntOrI(op2->TypeGet()) &&
- varTypeIsLong(op1->TypeGet()))
- {
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
- }
+ if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
+ varTypeIsLong(op1->TypeGet()))
+ {
+ op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ }
#endif
#ifdef _TARGET_64BIT_
- // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
- if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
- {
- op2->gtType = TYP_I_IMPL;
- }
- else
- {
- // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
- //
- if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
- {
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
- }
- // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
- //
- if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
- {
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
- }
- }
+ // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
+ if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
+ {
+ op2->gtType = TYP_I_IMPL;
+ }
+ else
+ {
+ // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
+ //
+ if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
+ {
+ op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ }
+ // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
+ //
+ if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
+ {
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ }
+ }
#endif
-#if !FEATURE_X87_DOUBLES
- // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
- // We insert a cast to the dest 'op1' type
- //
- if ((op1->TypeGet() != op2->TypeGet()) &&
- varTypeIsFloating(op1->gtType) &&
- varTypeIsFloating(op2->gtType))
- {
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
- }
-#endif // !FEATURE_X87_DOUBLES
-
- op1 = gtNewAssignNode(op1, op2);
-
- /* Mark the expression as containing an assignment */
-
- op1->gtFlags |= GTF_ASG;
- }
+#if !FEATURE_X87_DOUBLES
+ // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
+ // We insert a cast to the dest 'op1' type
+ //
+ if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
+ varTypeIsFloating(op2->gtType))
+ {
+ op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ }
+#endif // !FEATURE_X87_DOUBLES
- /* Check if the class needs explicit initialization */
+ op1 = gtNewAssignNode(op1, op2);
- if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
- {
- GenTreePtr helperNode = impInitClass(&resolvedToken);
- if (compDonotInline())
- return;
- if (helperNode != NULL)
- op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
- }
+ /* Mark the expression as containing an assignment */
- /* stfld can interfere with value classes (consider the sequence
- ldloc, ldloca, ..., stfld, stloc). We will be conservative and
- spill all value class references from the stack. */
+ op1->gtFlags |= GTF_ASG;
+ }
- if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
- {
- assert(tiObj);
+ /* Check if the class needs explicit initialization */
- if (impIsValueType(tiObj))
+ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
- impSpillEvalStack();
+ GenTreePtr helperNode = impInitClass(&resolvedToken);
+ if (compDonotInline())
+ {
+ return;
+ }
+ if (helperNode != nullptr)
+ {
+ op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
+ }
}
- else
+
+ /* stfld can interfere with value classes (consider the sequence
+ ldloc, ldloca, ..., stfld, stloc). We will be conservative and
+ spill all value class references from the stack. */
+
+ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
{
- impSpillValueClasses();
+ assert(tiObj);
+
+ if (impIsValueType(tiObj))
+ {
+ impSpillEvalStack();
+ }
+ else
+ {
+ impSpillValueClasses();
+ }
}
- }
- /* Spill any refs to the same member from the stack */
+ /* Spill any refs to the same member from the stack */
- impSpillLclRefs((ssize_t)resolvedToken.hField);
+ impSpillLclRefs((ssize_t)resolvedToken.hField);
- /* stsfld also interferes with indirect accesses (for aliased
- statics) and calls. But don't need to spill other statics
- as we have explicitly spilled this particular static field. */
+ /* stsfld also interferes with indirect accesses (for aliased
+ statics) and calls. But don't need to spill other statics
+ as we have explicitly spilled this particular static field. */
- impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD") );
+ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
- if (deferStructAssign)
- {
- op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
+ if (deferStructAssign)
+ {
+ op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
+ }
}
- }
- goto APPEND;
+ goto APPEND;
- case CEE_NEWARR: {
+ case CEE_NEWARR:
+ {
- /* Get the class type index operand */
+ /* Get the class type index operand */
- _impResolveToken(CORINFO_TOKENKIND_Newarr);
+ _impResolveToken(CORINFO_TOKENKIND_Newarr);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- if (!opts.IsReadyToRun())
- {
- // Need to restore array classes before creating array objects on the heap
- op1 = impTokenToHandle(&resolvedToken, NULL, TRUE /*mustRestoreHandle*/);
- if (op1 == NULL) // compDonotInline()
- return;
- }
+ if (!opts.IsReadyToRun())
+ {
+ // Need to restore array classes before creating array objects on the heap
+ op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
+ if (op1 == nullptr)
+ { // compDonotInline()
+ return;
+ }
+ }
- if (tiVerificationNeeded)
- {
- // As per ECMA 'numElems' specified can be either int32 or native int.
- Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
+ if (tiVerificationNeeded)
+ {
+ // As per ECMA 'numElems' specified can be either int32 or native int.
+ Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
- CORINFO_CLASS_HANDLE elemTypeHnd;
- info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
- Verify(elemTypeHnd == 0 || !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR), "array of byref-like type");
- tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
- }
+ CORINFO_CLASS_HANDLE elemTypeHnd;
+ info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
+ Verify(elemTypeHnd == nullptr ||
+ !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
+ "array of byref-like type");
+ tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
+ }
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- /* Form the arglist: array class handle, size */
- op2 = impPopStack().val;
- assertImp(genActualTypeIsIntOrI(op2->gtType));
+ /* Form the arglist: array class handle, size */
+ op2 = impPopStack().val;
+ assertImp(genActualTypeIsIntOrI(op2->gtType));
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewArgList(op2));
- usingReadyToRunHelper = (op1 != NULL);
-
- if (!usingReadyToRunHelper)
+ if (opts.IsReadyToRun())
{
- // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
- // and the newarr call with a single call to a dynamic R2R cell that will:
- // 1) Load the context
- // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
- // 3) Allocate the new array
- // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+ op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
+ gtNewArgList(op2));
+ usingReadyToRunHelper = (op1 != NULL);
- // Need to restore array classes before creating array objects on the heap
- op1 = impTokenToHandle(&resolvedToken, NULL, TRUE /*mustRestoreHandle*/);
- if (op1 == NULL) // compDonotInline()
- return;
+ if (!usingReadyToRunHelper)
+ {
+ // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
+ // and the newarr call with a single call to a dynamic R2R cell that will:
+ // 1) Load the context
+ // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
+ // 3) Allocate the new array
+ // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+
+ // Need to restore array classes before creating array objects on the heap
+ op1 = impTokenToHandle(&resolvedToken, NULL, TRUE /*mustRestoreHandle*/);
+ if (op1 == NULL) // compDonotInline()
+ return;
+ }
}
- }
- if (!usingReadyToRunHelper)
+ if (!usingReadyToRunHelper)
#endif
- {
- args = gtNewArgList(op1, op2);
+ {
+ args = gtNewArgList(op1, op2);
- /* Create a call to 'new' */
+ /* Create a call to 'new' */
- // Note that this only works for shared generic code because the same helper is used for all reference array types
- op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass),
- TYP_REF, 0, args);
- }
+ // Note that this only works for shared generic code because the same helper is used for all
+ // reference array types
+ op1 =
+ gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
+ }
- op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
+ op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
- /* Remember that this basic block contains 'new' of an sd array */
+ /* Remember that this basic block contains 'new' of an sd array */
- block->bbFlags |= BBF_HAS_NEWARRAY;
- optMethodFlags |= OMF_HAS_NEWARRAY;
-
- /* Push the result of the call on the stack */
+ block->bbFlags |= BBF_HAS_NEWARRAY;
+ optMethodFlags |= OMF_HAS_NEWARRAY;
- impPushOnStack(op1, tiRetVal);
+ /* Push the result of the call on the stack */
- callTyp = TYP_REF;
+ impPushOnStack(op1, tiRetVal);
- } break;
+ callTyp = TYP_REF;
+ }
+ break;
- case CEE_LOCALLOC:
- assert(!compIsForInlining());
-
- if (tiVerificationNeeded)
- Verify(false, "bad opcode");
+ case CEE_LOCALLOC:
+ assert(!compIsForInlining());
- // We don't allow locallocs inside handlers
- if (block->hasHndIndex())
- {
- BADCODE("Localloc can't be inside handler");
- }
+ if (tiVerificationNeeded)
+ {
+ Verify(false, "bad opcode");
+ }
- /* The FP register may not be back to the original value at the end
- of the method, even if the frame size is 0, as localloc may
- have modified it. So we will HAVE to reset it */
+ // We don't allow locallocs inside handlers
+ if (block->hasHndIndex())
+ {
+ BADCODE("Localloc can't be inside handler");
+ }
- compLocallocUsed = true;
- setNeedsGSSecurityCookie();
+ /* The FP register may not be back to the original value at the end
+ of the method, even if the frame size is 0, as localloc may
+ have modified it. So we will HAVE to reset it */
- // Get the size to allocate
+ compLocallocUsed = true;
+ setNeedsGSSecurityCookie();
- op2 = impPopStack().val;
- assertImp(genActualTypeIsIntOrI(op2->gtType));
+ // Get the size to allocate
- if (verCurrentState.esStackDepth != 0)
- {
- BADCODE("Localloc can only be used when the stack is empty");
- }
-
+ op2 = impPopStack().val;
+ assertImp(genActualTypeIsIntOrI(op2->gtType));
- op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
+ if (verCurrentState.esStackDepth != 0)
+ {
+ BADCODE("Localloc can only be used when the stack is empty");
+ }
- // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
+ op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
- op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
+ // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
- impPushOnStack(op1, tiRetVal);
- break;
+ op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_ISINST:
+ case CEE_ISINST:
- /* Get the type token */
- assertImp(sz == sizeof(unsigned));
+ /* Get the type token */
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Casting);
+ _impResolveToken(CORINFO_TOKENKIND_Casting);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- if (!opts.IsReadyToRun())
- {
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
- return;
- }
+ if (!opts.IsReadyToRun())
+ {
+ op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
+ }
- if (tiVerificationNeeded)
- {
- Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
- // Even if this is a value class, we know it is boxed.
- tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
- }
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
+ if (tiVerificationNeeded)
+ {
+ Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
+ // Even if this is a value class, we know it is boxed.
+ tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
+ }
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- op1 = impPopStack().val;
+ op1 = impPopStack().val;
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken,
- CORINFO_HELP_READYTORUN_ISINSTANCEOF,
- TYP_REF,
- gtNewArgList(op1));
- usingReadyToRunHelper = (opLookup != NULL);
- op1 = (usingReadyToRunHelper ? opLookup : op1);
-
- if (!usingReadyToRunHelper)
+ if (opts.IsReadyToRun())
{
- // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
- // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
- // 1) Load the context
- // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
- // 3) Perform the 'is instance' check on the input object
- // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+ GenTreePtr opLookup =
+ impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
+ gtNewArgList(op1));
+ usingReadyToRunHelper = (opLookup != NULL);
+ op1 = (usingReadyToRunHelper ? opLookup : op1);
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
- return;
+ if (!usingReadyToRunHelper)
+ {
+ // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
+ // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
+ // 1) Load the context
+ // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
+ // 3) Perform the 'is instance' check on the input object
+ // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+
+ op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
+ if (op2 == NULL) // compDonotInline()
+ return;
+ }
}
- }
- if (!usingReadyToRunHelper)
+ if (!usingReadyToRunHelper)
#endif
- {
- op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
- }
- if (compDonotInline())
- {
- return;
- }
-
- impPushOnStack(op1, tiRetVal);
+ {
+ op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
+ }
+ if (compDonotInline())
+ {
+ return;
+ }
- break;
+ impPushOnStack(op1, tiRetVal);
- case CEE_REFANYVAL:
+ break;
- // get the class handle and make a ICON node out of it
+ case CEE_REFANYVAL:
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ // get the class handle and make a ICON node out of it
- JITDUMP(" %08X", resolvedToken.token);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- op2 = impTokenToHandle(&resolvedToken);
- if (op2 == NULL) // compDonotInline()
- return;
+ JITDUMP(" %08X", resolvedToken.token);
- if (tiVerificationNeeded)
- {
- Verify(typeInfo::AreEquivalent(
- impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())), "need refany");
- tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
- }
-
- op1 = impPopStack().val;
- // make certain it is normalized;
- op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
+ op2 = impTokenToHandle(&resolvedToken);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
- // Call helper GETREFANY(classHandle, op1);
- args = gtNewArgList(op2, op1);
- op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
+ if (tiVerificationNeeded)
+ {
+ Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
+ "need refany");
+ tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
+ }
- impPushOnStack(op1, tiRetVal);
- break;
+ op1 = impPopStack().val;
+ // make certain it is normalized;
+ op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
- case CEE_REFANYTYPE:
+ // Call helper GETREFANY(classHandle, op1);
+ args = gtNewArgList(op2, op1);
+ op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
- if (tiVerificationNeeded)
- {
- Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo,
- verMakeTypeInfo(impGetRefAnyClass())), "need refany");
- }
-
- op1 = impPopStack().val;
+ impPushOnStack(op1, tiRetVal);
+ break;
- // make certain it is normalized;
- op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
+ case CEE_REFANYTYPE:
- if (op1->gtOper == GT_OBJ)
- {
- // Get the address of the refany
- op1 = op1->gtOp.gtOp1;
+ if (tiVerificationNeeded)
+ {
+ Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
+ "need refany");
+ }
- // Fetch the type from the correct slot
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
- op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
- }
- else
- {
- assertImp(op1->gtOper == GT_MKREFANY);
+ op1 = impPopStack().val;
+
+ // make certain it is normalized;
+ op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
+
+ if (op1->gtOper == GT_OBJ)
+ {
+ // Get the address of the refany
+ op1 = op1->gtOp.gtOp1;
- // The pointer may have side-effects
- if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
+ // Fetch the type from the correct slot
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
+ op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
+ }
+ else
{
- impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ assertImp(op1->gtOper == GT_MKREFANY);
+
+ // The pointer may have side-effects
+ if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
+ {
+ impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
#ifdef DEBUG
- impNoteLastILoffs();
+ impNoteLastILoffs();
#endif
- }
+ }
- // We already have the class handle
- op1 = op1->gtOp.gtOp2;
- }
+ // We already have the class handle
+ op1 = op1->gtOp.gtOp2;
+ }
- // convert native TypeHandle to RuntimeTypeHandle
- {
- GenTreeArgList* helperArgs = gtNewArgList(op1);
+ // convert native TypeHandle to RuntimeTypeHandle
+ {
+ GenTreeArgList* helperArgs = gtNewArgList(op1);
- op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT, helperArgs);
+ op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
+ helperArgs);
- // The handle struct is returned in register
- op1->gtCall.gtReturnType = TYP_REF;
+ // The handle struct is returned in register
+ op1->gtCall.gtReturnType = TYP_REF;
- tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
- }
+ tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
+ }
- impPushOnStack(op1, tiRetVal);
- break;
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_LDTOKEN:
- {
+ case CEE_LDTOKEN:
+ {
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
lastLoadToken = codeAddr;
@@ -13032,19 +13501,20 @@ FIELD_DONE:
tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
- op1 = impTokenToHandle(&resolvedToken, NULL, TRUE);
- if (op1 == NULL) // compDonotInline()
+ op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
+ if (op1 == nullptr)
+ { // compDonotInline()
return;
+ }
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
- assert(resolvedToken.hClass != NULL);
+ assert(resolvedToken.hClass != nullptr);
- if (resolvedToken.hMethod != NULL)
+ if (resolvedToken.hMethod != nullptr)
{
helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
}
- else
- if (resolvedToken.hField != NULL)
+ else if (resolvedToken.hField != nullptr)
{
helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
}
@@ -13061,838 +13531,850 @@ FIELD_DONE:
}
break;
- case CEE_UNBOX:
- case CEE_UNBOX_ANY:
- {
- /* Get the Class index */
- assertImp(sz == sizeof(unsigned));
+ case CEE_UNBOX:
+ case CEE_UNBOX_ANY:
+ {
+ /* Get the Class index */
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- BOOL runtimeLookup;
- op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
- if (op2 == NULL) // compDonotInline()
- return;
+ BOOL runtimeLookup;
+ op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
- //Run this always so we can get access exceptions even with SkipVerification.
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
+ // Run this always so we can get access exceptions even with SkipVerification.
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
- {
+ if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
+ {
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiUnbox = impStackTop().seTypeInfo;
+ Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
+ tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
+ tiRetVal.NormaliseForStack();
+ }
+ op1 = impPopStack().val;
+ goto CASTCLASS;
+ }
+
+ /* Pop the object and create the unbox helper call */
+ /* You might think that for UNBOX_ANY we need to push a different */
+ /* (non-byref) type, but here we're making the tiRetVal that is used */
+ /* for the intermediate pointer which we then transfer onto the OBJ */
+ /* instruction. OBJ then creates the appropriate tiRetVal. */
if (tiVerificationNeeded)
{
typeInfo tiUnbox = impStackTop().seTypeInfo;
- Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
+ Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
+
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
- tiRetVal.NormaliseForStack();
+ Verify(tiRetVal.IsValueClass(), "not value class");
+ tiRetVal.MakeByRef();
+
+ // We always come from an objref, so this is safe byref
+ tiRetVal.SetIsPermanentHomeByRef();
+ tiRetVal.SetIsReadonlyByRef();
}
+
op1 = impPopStack().val;
- goto CASTCLASS;
- }
+ assertImp(op1->gtType == TYP_REF);
- /* Pop the object and create the unbox helper call */
- /* You might think that for UNBOX_ANY we need to push a different */
- /* (non-byref) type, but here we're making the tiRetVal that is used */
- /* for the intermediate pointer which we then transfer onto the OBJ */
- /* instruction. OBJ then creates the appropriate tiRetVal. */
- if (tiVerificationNeeded)
- {
- typeInfo tiUnbox = impStackTop().seTypeInfo;
- Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
-
- tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
- Verify(tiRetVal.IsValueClass(), "not value class");
- tiRetVal.MakeByRef();
+ helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
+ assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
- // We always come from an objref, so this is safe byref
- tiRetVal.SetIsPermanentHomeByRef();
- tiRetVal.SetIsReadonlyByRef();
- }
+ // We only want to expand inline the normal UNBOX helper;
+ expandInline = (helper == CORINFO_HELP_UNBOX);
- op1 = impPopStack().val;
- assertImp(op1->gtType == TYP_REF);
+ if (expandInline)
+ {
+ if (compCurBB->isRunRarely())
+ {
+ expandInline = false; // not worth the code expansion
+ }
+ }
- helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
- assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
+ if (expandInline)
+ {
+ // we are doing normal unboxing
+ // inline the common case of the unbox helper
+ // UNBOX(exp) morphs into
+ // clone = pop(exp);
+ // ((*clone == typeToken) ? nop : helper(clone, typeToken));
+ // push(clone + sizeof(void*))
+ //
+ GenTreePtr cloneOperand;
+ op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("inline UNBOX clone1"));
+ op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
+
+ GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
+
+ op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("inline UNBOX clone2"));
+ op2 = impTokenToHandle(&resolvedToken);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
+ args = gtNewArgList(op2, op1);
+ op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
- // We only want to expand inline the normal UNBOX helper;
- expandInline = (helper == CORINFO_HELP_UNBOX);
+ op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
+ op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
+ condBox->gtFlags |= GTF_RELOP_QMARK;
- if (expandInline)
- {
- if (compCurBB->isRunRarely())
- expandInline = false; // not worth the code expansion
- }
+ // QMARK nodes cannot reside on the evaluation stack. Because there
+ // may be other trees on the evaluation stack that side-effect the
+ // sources of the UNBOX operation we must spill the stack.
- if (expandInline)
- {
- // we are doing normal unboxing
- // inline the common case of the unbox helper
- // UNBOX(exp) morphs into
- // clone = pop(exp);
- // ((*clone == typeToken) ? nop : helper(clone, typeToken));
- // push(clone + sizeof(void*))
- //
- GenTreePtr cloneOperand;
- op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("inline UNBOX clone1") );
- op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
+ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
- GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
+ // Create the address-expression to reference past the object header
+ // to the beginning of the value-type. Today this means adjusting
+ // past the base of the objects vtable field which is pointer sized.
- op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, NULL DEBUGARG("inline UNBOX clone2") );
- op2 = impTokenToHandle(&resolvedToken);
- if (op2 == NULL) // compDonotInline()
- return;
- args = gtNewArgList(op2, op1);
- op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
+ op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
+ }
+ else
+ {
+ unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
- op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1
- );
- op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
- condBox->gtFlags |= GTF_RELOP_QMARK;
+ // Don't optimize, just call the helper and be done with it
+ args = gtNewArgList(op2, op1);
+ op1 = gtNewHelperCallNode(helper,
+ (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
+ callFlags, args);
+ }
- // QMARK nodes cannot reside on the evaluation stack. Because there
- // may be other trees on the evaluation stack that side-effect the
- // sources of the UNBOX operation we must spill the stack.
+ assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
+ helper == CORINFO_HELP_UNBOX_NULLABLE &&
+ varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
+ );
+
+ /*
+ ----------------------------------------------------------------------
+ | \ helper | | |
+ | \ | | |
+ | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
+ | \ | (which returns a BYREF) | (which returns a STRUCT) | |
+ | opcode \ | | |
+ |---------------------------------------------------------------------
+ | UNBOX | push the BYREF | spill the STRUCT to a local, |
+ | | | push the BYREF to this local |
+ |---------------------------------------------------------------------
+ | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
+ | | the BYREF | For Linux when the |
+ | | | struct is returned in two |
+ | | | registers create a temp |
+ | | | which address is passed to |
+ | | | the unbox_nullable helper. |
+ |---------------------------------------------------------------------
+ */
- impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
+ if (opcode == CEE_UNBOX)
+ {
+ if (helper == CORINFO_HELP_UNBOX_NULLABLE)
+ {
+ // Unbox nullable helper returns a struct type.
+ // We need to spill it to a temp so than can take the address of it.
+ // Here we need unsafe value cls check, since the address of struct is taken to be used
+ // further along and potetially be exploitable.
- // Create the address-expression to reference past the object header
- // to the beginning of the value-type. Today this means adjusting
- // past the base of the objects vtable field which is pointer sized.
+ unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
+ lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
- op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
- }
- else
- {
- unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
+ op2 = gtNewLclvNode(tmp, TYP_STRUCT);
+ op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
+ assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
- // Don't optimize, just call the helper and be done with it
- args = gtNewArgList(op2, op1);
- op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX)?TYP_BYREF:TYP_STRUCT), callFlags, args);
- }
+ op2 = gtNewLclvNode(tmp, TYP_STRUCT);
+ op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
+ op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
+ }
- assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
- helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
- );
-
- /*
- ----------------------------------------------------------------------
- | \ helper | | |
- | \ | | |
- | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
- | \ | (which returns a BYREF) | (which returns a STRUCT) | |
- | opcode \ | | |
- |---------------------------------------------------------------------
- | UNBOX | push the BYREF | spill the STRUCT to a local, |
- | | | push the BYREF to this local |
- |---------------------------------------------------------------------
- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
- | | the BYREF | For Linux when the |
- | | | struct is returned in two |
- | | | registers create a temp |
- | | | which address is passed to |
- | | | the unbox_nullable helper. |
- |---------------------------------------------------------------------
- */
-
- if (opcode == CEE_UNBOX)
- {
- if (helper == CORINFO_HELP_UNBOX_NULLABLE)
- {
- // Unbox nullable helper returns a struct type.
- // We need to spill it to a temp so than can take the address of it.
- // Here we need unsafe value cls check, since the address of struct is taken to be used
- // further along and potetially be exploitable.
-
- unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
- lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
-
- op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
- assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
-
- op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
- op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
- }
-
- assert(op1->gtType == TYP_BYREF);
- assert(!tiVerificationNeeded || tiRetVal.IsByRef());
- }
- else
- {
- assert(opcode == CEE_UNBOX_ANY);
-
- if (helper == CORINFO_HELP_UNBOX)
- {
- // Normal unbox helper returns a TYP_BYREF.
- impPushOnStack(op1, tiRetVal);
- oper = GT_OBJ;
- goto OBJ;
- }
-
- assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
+ assert(op1->gtType == TYP_BYREF);
+ assert(!tiVerificationNeeded || tiRetVal.IsByRef());
+ }
+ else
+ {
+ assert(opcode == CEE_UNBOX_ANY);
+
+ if (helper == CORINFO_HELP_UNBOX)
+ {
+ // Normal unbox helper returns a TYP_BYREF.
+ impPushOnStack(op1, tiRetVal);
+ oper = GT_OBJ;
+ goto OBJ;
+ }
+
+ assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
#if FEATURE_MULTIREG_RET
- if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
- {
- // Unbox nullable helper returns a TYP_STRUCT.
- // For the multi-reg case we need to spill it to a temp so that
- // we can pass the address to the unbox_nullable jit helper.
+ if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
+ {
+ // Unbox nullable helper returns a TYP_STRUCT.
+ // For the multi-reg case we need to spill it to a temp so that
+ // we can pass the address to the unbox_nullable jit helper.
- unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
- lvaTable[tmp].lvIsMultiRegArg = true;
- lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
+ unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
+ lvaTable[tmp].lvIsMultiRegArg = true;
+ lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
- op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
- assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
+ op2 = gtNewLclvNode(tmp, TYP_STRUCT);
+ op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
+ assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
- op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
- op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
+ op2 = gtNewLclvNode(tmp, TYP_STRUCT);
+ op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
+ op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
- // In this case the return value of the unbox helper is TYP_BYREF.
- // Make sure the right type is placed on the operand type stack.
- impPushOnStack(op1, tiRetVal);
+ // In this case the return value of the unbox helper is TYP_BYREF.
+ // Make sure the right type is placed on the operand type stack.
+ impPushOnStack(op1, tiRetVal);
- // Load the struct.
- oper = GT_OBJ;
+ // Load the struct.
+ oper = GT_OBJ;
- assert(op1->gtType == TYP_BYREF);
- assert(!tiVerificationNeeded || tiRetVal.IsByRef());
+ assert(op1->gtType == TYP_BYREF);
+ assert(!tiVerificationNeeded || tiRetVal.IsByRef());
+
+ goto OBJ;
+ }
+ else
- goto OBJ;
- }
- else
-
#endif // !FEATURE_MULTIREG_RET
- {
- // If non register passable struct we have it materialized in the RetBuf.
- assert(op1->gtType == TYP_STRUCT);
- tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
- assert(tiRetVal.IsValueClass());
+ {
+ // If non register passable struct we have it materialized in the RetBuf.
+ assert(op1->gtType == TYP_STRUCT);
+ tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
+ assert(tiRetVal.IsValueClass());
+ }
}
+
+ impPushOnStack(op1, tiRetVal);
}
+ break;
- impPushOnStack(op1, tiRetVal);
- }
- break;
+ case CEE_BOX:
+ {
+ /* Get the Class index */
+ assertImp(sz == sizeof(unsigned));
- case CEE_BOX: {
- /* Get the Class index */
- assertImp(sz == sizeof(unsigned));
+ _impResolveToken(CORINFO_TOKENKIND_Box);
- _impResolveToken(CORINFO_TOKENKIND_Box);
+ JITDUMP(" %08X", resolvedToken.token);
- JITDUMP(" %08X", resolvedToken.token);
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiActual = impStackTop().seTypeInfo;
+ typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
- if (tiVerificationNeeded)
- {
- typeInfo tiActual = impStackTop().seTypeInfo;
- typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
-
- Verify(verIsBoxable(tiBox), "boxable type expected");
+ Verify(verIsBoxable(tiBox), "boxable type expected");
- //check the class constraints of the boxed type in case we are boxing an uninitialized value
- Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
- "boxed type has unsatisfied class constraints");
+ // check the class constraints of the boxed type in case we are boxing an uninitialized value
+ Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
+ "boxed type has unsatisfied class constraints");
- Verify(tiCompatibleWith(tiActual,tiBox.NormaliseForStack(), true), "type mismatch");
+ Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
- //Observation: the following code introduces a boxed value class on the stack, but,
- //according to the ECMA spec, one would simply expect: tiRetVal = typeInfo(TI_REF,impGetObjectClass());
+ // Observation: the following code introduces a boxed value class on the stack, but,
+ // according to the ECMA spec, one would simply expect: tiRetVal =
+ // typeInfo(TI_REF,impGetObjectClass());
- // Push the result back on the stack,
- // even if clsHnd is a value class we want the TI_REF
- // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
- tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
- }
+ // Push the result back on the stack,
+ // even if clsHnd is a value class we want the TI_REF
+ // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
+ tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
+ }
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- // Note BOX can be used on things that are not value classes, in which
- // case we get a NOP. However the verifier's view of the type on the
- // stack changes (in generic code a 'T' becomes a 'boxed T')
- if (!eeIsValueClass(resolvedToken.hClass))
- {
- verCurrentState.esStack[verCurrentState.esStackDepth-1].seTypeInfo = tiRetVal;
- break;
- }
+ // Note BOX can be used on things that are not value classes, in which
+ // case we get a NOP. However the verifier's view of the type on the
+ // stack changes (in generic code a 'T' becomes a 'boxed T')
+ if (!eeIsValueClass(resolvedToken.hClass))
+ {
+ verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
+ break;
+ }
- // Look ahead for unbox.any
- if (codeAddr+(sz+1+sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
- {
- DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
- if (!(classAttribs & CORINFO_FLG_SHAREDINST))
+ // Look ahead for unbox.any
+ if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
{
- CORINFO_RESOLVED_TOKEN unboxResolvedToken;
+ DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
+ if (!(classAttribs & CORINFO_FLG_SHAREDINST))
+ {
+ CORINFO_RESOLVED_TOKEN unboxResolvedToken;
- impResolveToken(codeAddr+(sz+1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
+ impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
- if (unboxResolvedToken.hClass == resolvedToken.hClass)
- {
- // Skip the next unbox.any instruction
- sz += sizeof(mdToken) + 1;
- break;
+ if (unboxResolvedToken.hClass == resolvedToken.hClass)
+ {
+ // Skip the next unbox.any instruction
+ sz += sizeof(mdToken) + 1;
+ break;
+ }
}
}
- }
- impImportAndPushBox(&resolvedToken);
- if (compDonotInline())
- return;
- }
- break;
+ impImportAndPushBox(&resolvedToken);
+ if (compDonotInline())
+ {
+ return;
+ }
+ }
+ break;
- case CEE_SIZEOF:
+ case CEE_SIZEOF:
- /* Get the Class index */
- assertImp(sz == sizeof(unsigned));
+ /* Get the Class index */
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- if (tiVerificationNeeded)
- {
- tiRetVal = typeInfo(TI_INT);
- }
+ if (tiVerificationNeeded)
+ {
+ tiRetVal = typeInfo(TI_INT);
+ }
- op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
- impPushOnStack(op1, tiRetVal);
- break;
+ op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_CASTCLASS:
+ case CEE_CASTCLASS:
- /* Get the Class index */
+ /* Get the Class index */
- assertImp(sz == sizeof(unsigned));
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Casting);
+ _impResolveToken(CORINFO_TOKENKIND_Casting);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- if (!opts.IsReadyToRun())
- {
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
- return;
- }
+ if (!opts.IsReadyToRun())
+ {
+ op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
+ }
- if (tiVerificationNeeded)
- {
- Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
- // box it
- tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
+ if (tiVerificationNeeded)
+ {
+ Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
+ // box it
+ tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
+ }
- }
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
-
- op1 = impPopStack().val;
+ op1 = impPopStack().val;
/* Pop the address and create the 'checked cast' helper call */
- // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
+ // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
// and op2 to contain code that creates the type handle corresponding to typeRef
- CASTCLASS:
+ CASTCLASS:
#ifdef FEATURE_READYTORUN_COMPILER
- if (opts.IsReadyToRun())
- {
- GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken,
- CORINFO_HELP_READYTORUN_CHKCAST,
- TYP_REF,
- gtNewArgList(op1));
- usingReadyToRunHelper = (opLookup != NULL);
- op1 = (usingReadyToRunHelper ? opLookup : op1);
-
- if (!usingReadyToRunHelper)
+ if (opts.IsReadyToRun())
{
- // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
- // and the chkcastany call with a single call to a dynamic R2R cell that will:
- // 1) Load the context
- // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
- // 3) Check the object on the stack for the type-cast
- // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+ GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
+ TYP_REF, gtNewArgList(op1));
+ usingReadyToRunHelper = (opLookup != NULL);
+ op1 = (usingReadyToRunHelper ? opLookup : op1);
- op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
- if (op2 == NULL) // compDonotInline()
- return;
+ if (!usingReadyToRunHelper)
+ {
+ // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
+ // and the chkcastany call with a single call to a dynamic R2R cell that will:
+ // 1) Load the context
+ // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
+ // 3) Check the object on the stack for the type-cast
+ // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
+
+ op2 = impTokenToHandle(&resolvedToken, NULL, FALSE);
+ if (op2 == NULL) // compDonotInline()
+ return;
+ }
}
- }
- if (!usingReadyToRunHelper)
+ if (!usingReadyToRunHelper)
#endif
- {
- op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
- }
- if (compDonotInline())
- {
- return;
- }
-
- /* Push the result back on the stack */
- impPushOnStack(op1, tiRetVal);
- break;
+ {
+ op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
+ }
+ if (compDonotInline())
+ {
+ return;
+ }
- case CEE_THROW:
+ /* Push the result back on the stack */
+ impPushOnStack(op1, tiRetVal);
+ break;
- if (compIsForInlining())
- {
- // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- // TODO: Will this be too strict, given that we will inline many basic blocks?
- // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- /* Do we have just the exception on the stack ?*/
+ case CEE_THROW:
- if (verCurrentState.esStackDepth != 1)
+ if (compIsForInlining())
{
- /* if not, just don't inline the method */
+ // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ // TODO: Will this be too strict, given that we will inline many basic blocks?
+ // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
- return;
+ /* Do we have just the exception on the stack ?*/
+
+ if (verCurrentState.esStackDepth != 1)
+ {
+ /* if not, just don't inline the method */
+
+ compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
+ return;
+ }
+
+ /* Don't inline non-void conditionals that have a throw in one of the branches */
+
+ /* NOTE: If we do allow this, note that we can't simply do a
+ checkLiveness() to match the liveness at the end of the "then"
+ and "else" branches of the GT_COLON. The branch with the throw
+ will keep nothing live, so we should use the liveness at the
+ end of the non-throw branch. */
+
+ if (seenConditionalJump && (impInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID))
+ {
+ compInlineResult->NoteFatal(InlineObservation::CALLSITE_CONDITIONAL_THROW);
+ return;
+ }
}
-
- /* Don't inline non-void conditionals that have a throw in one of the branches */
-
- /* NOTE: If we do allow this, note that we can't simply do a
- checkLiveness() to match the liveness at the end of the "then"
- and "else" branches of the GT_COLON. The branch with the throw
- will keep nothing live, so we should use the liveness at the
- end of the non-throw branch. */
-
- if (seenConditionalJump && (impInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID))
- {
- compInlineResult->NoteFatal(InlineObservation::CALLSITE_CONDITIONAL_THROW);
- return;
- }
-
- }
- if (tiVerificationNeeded)
- {
- tiRetVal = impStackTop().seTypeInfo;
- Verify(tiRetVal.IsObjRef(), "object ref expected");
- if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
+ if (tiVerificationNeeded)
{
- Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
+ tiRetVal = impStackTop().seTypeInfo;
+ Verify(tiRetVal.IsObjRef(), "object ref expected");
+ if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
+ {
+ Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
+ }
}
- }
+ block->bbSetRunRarely(); // any block with a throw is rare
+ /* Pop the exception object and create the 'throw' helper call */
- block->bbSetRunRarely(); // any block with a throw is rare
- /* Pop the exception object and create the 'throw' helper call */
+ op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
- op1 = gtNewHelperCallNode(CORINFO_HELP_THROW,
- TYP_VOID,
- GTF_EXCEPT,
- gtNewArgList(impPopStack().val));
+ EVAL_APPEND:
+ if (verCurrentState.esStackDepth > 0)
+ {
+ impEvalSideEffects();
+ }
+ assert(verCurrentState.esStackDepth == 0);
-EVAL_APPEND:
- if (verCurrentState.esStackDepth > 0)
- impEvalSideEffects();
+ goto APPEND;
- assert(verCurrentState.esStackDepth == 0);
+ case CEE_RETHROW:
- goto APPEND;
-
- case CEE_RETHROW:
+ assert(!compIsForInlining());
- assert(!compIsForInlining());
-
- if (info.compXcptnsCount == 0)
- BADCODE("rethrow outside catch");
+ if (info.compXcptnsCount == 0)
+ {
+ BADCODE("rethrow outside catch");
+ }
- if (tiVerificationNeeded)
- {
- Verify(block->hasHndIndex(), "rethrow outside catch");
- if (block->hasHndIndex())
+ if (tiVerificationNeeded)
{
- EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
- Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
- if (HBtab->HasFilter())
+ Verify(block->hasHndIndex(), "rethrow outside catch");
+ if (block->hasHndIndex())
{
- // we better be in the handler clause part, not the filter part
- Verify (jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
- "rethrow in filter");
+ EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
+ Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
+ if (HBtab->HasFilter())
+ {
+ // we better be in the handler clause part, not the filter part
+ Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
+ "rethrow in filter");
+ }
}
}
- }
- /* Create the 'rethrow' helper call */
+ /* Create the 'rethrow' helper call */
- op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW,
- TYP_VOID,
- GTF_EXCEPT);
+ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
- goto EVAL_APPEND;
+ goto EVAL_APPEND;
- case CEE_INITOBJ:
+ case CEE_INITOBJ:
- assertImp(sz == sizeof(unsigned));
-
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ assertImp(sz == sizeof(unsigned));
- JITDUMP(" %08X", resolvedToken.token);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- if (tiVerificationNeeded)
- {
- typeInfo tiTo = impStackTop().seTypeInfo;
- typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
+ JITDUMP(" %08X", resolvedToken.token);
- Verify(tiTo.IsByRef(), "byref expected");
- Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiTo = impStackTop().seTypeInfo;
+ typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
- Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
- "type operand incompatible with type of address");
+ Verify(tiTo.IsByRef(), "byref expected");
+ Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
- }
+ Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
+ "type operand incompatible with type of address");
+ }
- op3 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); // Size
- op2 = gtNewIconNode(0); // Value
- goto INITBLK_OR_INITOBJ;
+ op3 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); // Size
+ op2 = gtNewIconNode(0); // Value
+ goto INITBLK_OR_INITOBJ;
- case CEE_INITBLK:
+ case CEE_INITBLK:
- if (tiVerificationNeeded)
- Verify(false, "bad opcode");
+ if (tiVerificationNeeded)
+ {
+ Verify(false, "bad opcode");
+ }
- op3 = impPopStack().val; // Size
- op2 = impPopStack().val; // Value
+ op3 = impPopStack().val; // Size
+ op2 = impPopStack().val; // Value
-INITBLK_OR_INITOBJ:
- op1 = impPopStack().val; // Dest
- op1 = gtNewBlkOpNode(GT_INITBLK, op1, op2, op3, (prefixFlags & PREFIX_VOLATILE) != 0);
+ INITBLK_OR_INITOBJ:
+ op1 = impPopStack().val; // Dest
+ op1 = gtNewBlkOpNode(GT_INITBLK, op1, op2, op3, (prefixFlags & PREFIX_VOLATILE) != 0);
- goto SPILL_APPEND;
+ goto SPILL_APPEND;
+ case CEE_CPBLK:
- case CEE_CPBLK:
+ if (tiVerificationNeeded)
+ {
+ Verify(false, "bad opcode");
+ }
+ op3 = impPopStack().val; // Size
+ op2 = impPopStack().val; // Src
+ op1 = impPopStack().val; // Dest
+ op1 = gtNewBlkOpNode(GT_COPYBLK, op1, op2, op3, (prefixFlags & PREFIX_VOLATILE) != 0);
+ goto SPILL_APPEND;
- if (tiVerificationNeeded)
- Verify(false, "bad opcode");
- op3 = impPopStack().val; // Size
- op2 = impPopStack().val; // Src
- op1 = impPopStack().val; // Dest
- op1 = gtNewBlkOpNode(GT_COPYBLK, op1, op2, op3, (prefixFlags & PREFIX_VOLATILE) != 0);
- goto SPILL_APPEND;
+ case CEE_CPOBJ:
- case CEE_CPOBJ:
+ assertImp(sz == sizeof(unsigned));
- assertImp(sz == sizeof(unsigned));
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ JITDUMP(" %08X", resolvedToken.token);
- JITDUMP(" %08X", resolvedToken.token);
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiFrom = impStackTop().seTypeInfo;
+ typeInfo tiTo = impStackTop(1).seTypeInfo;
+ typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
+
+ Verify(tiFrom.IsByRef(), "expected byref source");
+ Verify(tiTo.IsByRef(), "expected byref destination");
+
+ Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
+ "type of source address incompatible with type operand");
+ Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
+ Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
+ "type operand incompatible with type of destination address");
+ }
- if (tiVerificationNeeded)
- {
- typeInfo tiFrom = impStackTop().seTypeInfo;
- typeInfo tiTo = impStackTop(1).seTypeInfo;
- typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
-
- Verify(tiFrom.IsByRef(), "expected byref source");
- Verify(tiTo.IsByRef(), "expected byref destination");
-
- Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
- "type of source address incompatible with type operand");
- Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
- Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
- "type operand incompatible with type of destination address");
- }
-
- if (!eeIsValueClass(resolvedToken.hClass))
- {
- op1 = impPopStack().val; // address to load from
-
- impBashVarAddrsToI(op1);
-
- assertImp(genActualType(op1->gtType) == TYP_I_IMPL ||
- op1->gtType == TYP_BYREF);
-
- op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
- op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
-
- impPushOnStackNoType(op1);
- opcode = CEE_STIND_REF;
- lclTyp = TYP_REF;
- goto STIND_POST_VERIFY;
- }
+ if (!eeIsValueClass(resolvedToken.hClass))
+ {
+ op1 = impPopStack().val; // address to load from
+ impBashVarAddrsToI(op1);
- op2 = impPopStack().val; // Src
- op1 = impPopStack().val; // Dest
- op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass,
- ((prefixFlags & PREFIX_VOLATILE) != 0));
- goto SPILL_APPEND;
+ assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
- case CEE_STOBJ: {
- assertImp(sz == sizeof(unsigned));
+ op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
+ op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ impPushOnStackNoType(op1);
+ opcode = CEE_STIND_REF;
+ lclTyp = TYP_REF;
+ goto STIND_POST_VERIFY;
+ }
- JITDUMP(" %08X", resolvedToken.token);
+ op2 = impPopStack().val; // Src
+ op1 = impPopStack().val; // Dest
+ op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
+ goto SPILL_APPEND;
- if (eeIsValueClass(resolvedToken.hClass))
- {
- lclTyp = TYP_STRUCT;
- }
- else
+ case CEE_STOBJ:
{
- lclTyp = TYP_REF;
- }
+ assertImp(sz == sizeof(unsigned));
- if (tiVerificationNeeded)
- {
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- typeInfo tiPtr = impStackTop(1).seTypeInfo;
+ JITDUMP(" %08X", resolvedToken.token);
- // Make sure we have a good looking byref
- Verify(tiPtr.IsByRef(), "pointer not byref");
- Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
- if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
+ if (eeIsValueClass(resolvedToken.hClass))
{
- compUnsafeCastUsed = true;
+ lclTyp = TYP_STRUCT;
+ }
+ else
+ {
+ lclTyp = TYP_REF;
}
- typeInfo ptrVal = DereferenceByRef(tiPtr);
- typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
+ if (tiVerificationNeeded)
+ {
+
+ typeInfo tiPtr = impStackTop(1).seTypeInfo;
- if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
+ // Make sure we have a good looking byref
+ Verify(tiPtr.IsByRef(), "pointer not byref");
+ Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
+ if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
+ {
+ compUnsafeCastUsed = true;
+ }
+
+ typeInfo ptrVal = DereferenceByRef(tiPtr);
+ typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
+
+ if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
+ {
+ Verify(false, "type of value incompatible with type operand");
+ compUnsafeCastUsed = true;
+ }
+
+ if (!tiCompatibleWith(argVal, ptrVal, false))
+ {
+ Verify(false, "type operand incompatible with type of address");
+ compUnsafeCastUsed = true;
+ }
+ }
+ else
{
- Verify(false,
- "type of value incompatible with type operand");
compUnsafeCastUsed = true;
}
-
- if (!tiCompatibleWith(argVal, ptrVal, false))
+
+ if (lclTyp == TYP_REF)
{
- Verify(false,
- "type operand incompatible with type of address");
- compUnsafeCastUsed = true;
+ opcode = CEE_STIND_REF;
+ goto STIND_POST_VERIFY;
}
- }
- else
- {
- compUnsafeCastUsed = true;
- }
- if (lclTyp == TYP_REF)
- {
- opcode = CEE_STIND_REF;
- goto STIND_POST_VERIFY;
- }
+ CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
+ if (impIsPrimitive(jitTyp))
+ {
+ lclTyp = JITtype2varType(jitTyp);
+ goto STIND_POST_VERIFY;
+ }
- CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
- if (impIsPrimitive(jitTyp))
- {
- lclTyp = JITtype2varType(jitTyp);
- goto STIND_POST_VERIFY;
- }
-
- op2 = impPopStack().val; // Value
- op1 = impPopStack().val; // Ptr
+ op2 = impPopStack().val; // Value
+ op1 = impPopStack().val; // Ptr
- assertImp(varTypeIsStruct(op2));
+ assertImp(varTypeIsStruct(op2));
- op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
- goto SPILL_APPEND;
+ op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
+ goto SPILL_APPEND;
}
- case CEE_MKREFANY:
+ case CEE_MKREFANY:
- assert(!compIsForInlining());
+ assert(!compIsForInlining());
- // Being lazy here. Refanys are tricky in terms of gc tracking.
- // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
+ // Being lazy here. Refanys are tricky in terms of gc tracking.
+ // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
- JITDUMP("disabling struct promotion because of mkrefany\n");
- fgNoStructPromotion = true;
+ JITDUMP("disabling struct promotion because of mkrefany\n");
+ fgNoStructPromotion = true;
- oper = GT_MKREFANY;
- assertImp(sz == sizeof(unsigned));
+ oper = GT_MKREFANY;
+ assertImp(sz == sizeof(unsigned));
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- JITDUMP(" %08X", resolvedToken.token);
+ JITDUMP(" %08X", resolvedToken.token);
- op2 = impTokenToHandle(&resolvedToken, NULL, TRUE);
- if (op2 == NULL) // compDonotInline()
- return;
+ op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
+ if (op2 == nullptr)
+ { // compDonotInline()
+ return;
+ }
- if (tiVerificationNeeded)
- {
- typeInfo tiPtr = impStackTop().seTypeInfo;
- typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiPtr = impStackTop().seTypeInfo;
+ typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
- Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
- Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
- Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
- }
+ Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
+ Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
+ Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
+ }
- accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
- impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
+ accessAllowedResult =
+ info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
+ impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
- op1 = impPopStack().val;
+ op1 = impPopStack().val;
- // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
- // But JIT32 allowed it, so we continue to allow it.
- assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
+ // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
+ // But JIT32 allowed it, so we continue to allow it.
+ assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
- // MKREFANY returns a struct. op2 is the class token.
- op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
+ // MKREFANY returns a struct. op2 is the class token.
+ op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
- impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
- break;
+ impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
+ break;
+ case CEE_LDOBJ:
+ {
+ oper = GT_OBJ;
+ assertImp(sz == sizeof(unsigned));
- case CEE_LDOBJ: {
- oper = GT_OBJ;
- assertImp(sz == sizeof(unsigned));
+ _impResolveToken(CORINFO_TOKENKIND_Class);
- _impResolveToken(CORINFO_TOKENKIND_Class);
+ JITDUMP(" %08X", resolvedToken.token);
- JITDUMP(" %08X", resolvedToken.token);
+ OBJ:
-OBJ:
+ tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
- tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiPtr = impStackTop().seTypeInfo;
- if (tiVerificationNeeded)
- {
- typeInfo tiPtr = impStackTop().seTypeInfo;
+ // Make sure we have a byref
+ if (!tiPtr.IsByRef())
+ {
+ Verify(false, "pointer not byref");
+ compUnsafeCastUsed = true;
+ }
+ typeInfo tiPtrVal = DereferenceByRef(tiPtr);
- // Make sure we have a byref
- if (!tiPtr.IsByRef())
+ if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
+ {
+ Verify(false, "type of address incompatible with type operand");
+ compUnsafeCastUsed = true;
+ }
+ tiRetVal.NormaliseForStack();
+ }
+ else
{
- Verify(false, "pointer not byref");
compUnsafeCastUsed = true;
}
- typeInfo tiPtrVal = DereferenceByRef(tiPtr);
- if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
+ if (eeIsValueClass(resolvedToken.hClass))
{
- Verify(false,
- "type of address incompatible with type operand");
- compUnsafeCastUsed = true;
+ lclTyp = TYP_STRUCT;
+ }
+ else
+ {
+ lclTyp = TYP_REF;
+ opcode = CEE_LDIND_REF;
+ goto LDIND_POST_VERIFY;
}
- tiRetVal.NormaliseForStack();
- }
- else
- {
- compUnsafeCastUsed = true;
- }
- if (eeIsValueClass(resolvedToken.hClass))
- {
- lclTyp = TYP_STRUCT;
- }
- else
- {
- lclTyp = TYP_REF;
- opcode = CEE_LDIND_REF;
- goto LDIND_POST_VERIFY;
- }
+ op1 = impPopStack().val;
- op1 = impPopStack().val;
+ assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
- assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
-
- CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
- if (impIsPrimitive(jitTyp))
- {
- op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
+ CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
+ if (impIsPrimitive(jitTyp))
+ {
+ op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
- // Could point anywhere, example a boxed class static int
- op1->gtFlags |= GTF_IND_TGTANYWHERE|GTF_GLOB_REF;
- assertImp(varTypeIsArithmetic(op1->gtType));
- }
- else
- {
- // OBJ returns a struct
- // and an inline argument which is the class token of the loaded obj
- op1 = gtNewObjNode(resolvedToken.hClass, op1);
- }
- op1->gtFlags |= GTF_EXCEPT;
-
- impPushOnStack(op1, tiRetVal);
- break;
- }
+ // Could point anywhere, example a boxed class static int
+ op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
+ assertImp(varTypeIsArithmetic(op1->gtType));
+ }
+ else
+ {
+ // OBJ returns a struct
+ // and an inline argument which is the class token of the loaded obj
+ op1 = gtNewObjNode(resolvedToken.hClass, op1);
+ }
+ op1->gtFlags |= GTF_EXCEPT;
- case CEE_LDLEN:
- if (tiVerificationNeeded)
- {
- typeInfo tiArray = impStackTop().seTypeInfo;
- Verify(verIsSDArray(tiArray), "bad array");
- tiRetVal = typeInfo(TI_INT);
+ impPushOnStack(op1, tiRetVal);
+ break;
}
+ case CEE_LDLEN:
+ if (tiVerificationNeeded)
+ {
+ typeInfo tiArray = impStackTop().seTypeInfo;
+ Verify(verIsSDArray(tiArray), "bad array");
+ tiRetVal = typeInfo(TI_INT);
+ }
+
op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
- {
- /* Use GT_ARR_LENGTH operator so rng check opts see this */
- GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length)
- );
+ if (!opts.MinOpts() && !opts.compDbgCode)
+ {
+ /* Use GT_ARR_LENGTH operator so rng check opts see this */
+ GenTreeArrLen* arrLen =
+ new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
- /* Mark the block as containing a length expression */
+ /* Mark the block as containing a length expression */
- if (op1->gtOper == GT_LCL_VAR)
+ if (op1->gtOper == GT_LCL_VAR)
+ {
+ block->bbFlags |= BBF_HAS_IDX_LEN;
+ }
+
+ op1 = arrLen;
+ }
+ else
{
- block->bbFlags |= BBF_HAS_IDX_LEN;
+ /* Create the expression "*(array_addr + ArrLenOffs)" */
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
+ op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
+ op1->gtFlags |= GTF_IND_ARR_LEN;
}
- op1 = arrLen;
- }
- else
- {
- /* Create the expression "*(array_addr + ArrLenOffs)" */
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
- op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
- op1->gtFlags |= GTF_IND_ARR_LEN;
- }
-
- /* An indirection will cause a GPF if the address is null */
- op1->gtFlags |= GTF_EXCEPT;
-
- /* Push the result back on the stack */
- impPushOnStack(op1, tiRetVal);
- break;
+ /* An indirection will cause a GPF if the address is null */
+ op1->gtFlags |= GTF_EXCEPT;
- case CEE_BREAK:
- op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
- goto SPILL_APPEND;
+ /* Push the result back on the stack */
+ impPushOnStack(op1, tiRetVal);
+ break;
- case CEE_NOP:
- if (opts.compDbgCode)
- {
- op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
+ case CEE_BREAK:
+ op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
goto SPILL_APPEND;
- }
- break;
- /******************************** NYI *******************************/
+ case CEE_NOP:
+ if (opts.compDbgCode)
+ {
+ op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
+ goto SPILL_APPEND;
+ }
+ break;
- case 0xCC:
- OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
+ /******************************** NYI *******************************/
- case CEE_ILLEGAL:
- case CEE_MACRO_END:
+ case 0xCC:
+ OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
- default:
- BADCODE3("unknown opcode", ": %02X", (int) opcode);
+ case CEE_ILLEGAL:
+ case CEE_MACRO_END:
+
+ default:
+ BADCODE3("unknown opcode", ": %02X", (int)opcode);
}
codeAddr += sz;
@@ -13912,13 +14394,11 @@ OBJ:
#endif
// Push a local/argument treeon the operand stack
-void Compiler::impPushVar(GenTree * op, typeInfo tiRetVal)
+void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
{
tiRetVal.NormaliseForStack();
-
- if (verTrackObjCtorInitState &&
- (verCurrentState.thisInitialized != TIS_Init) &&
- tiRetVal.IsThisPtr())
+
+ if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
{
tiRetVal.SetUninitialisedObjRef();
}
@@ -13933,9 +14413,13 @@ void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
var_types lclTyp;
if (lvaTable[lclNum].lvNormalizeOnLoad())
- lclTyp = lvaGetRealType (lclNum);
+ {
+ lclTyp = lvaGetRealType(lclNum);
+ }
else
+ {
lclTyp = lvaGetActualType(lclNum);
+ }
impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
}
@@ -13966,7 +14450,7 @@ void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
BADCODE("Bad IL");
}
- unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
+ unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
if (lclNum == info.compThisArg)
{
@@ -13977,14 +14461,13 @@ void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
}
}
-
// Load a local on the operand stack
// Shared by the various CEE_LDLOC opcodes
// ilLclNum is the local index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
{
- if (tiVerificationNeeded)
+ if (tiVerificationNeeded)
{
Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
Verify(info.compInitMem, "initLocals not set");
@@ -13999,14 +14482,14 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
}
// Get the local type
- var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
-
- typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
-
+ var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
+
+ typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
+
/* Have we allocated a temp for this local? */
unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
-
+
// All vars of inlined methods should be !lvNormalizeOnLoad()
assert(!lvaTable[lclNum].lvNormalizeOnLoad());
@@ -14015,7 +14498,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
}
else
- {
+ {
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
BADCODE("Bad IL");
@@ -14024,7 +14507,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
unsigned lclNum = info.compArgsCount + ilLclNum;
impLoadVar(lclNum, offset);
- }
+ }
}
#ifdef _TARGET_ARM_
@@ -14049,8 +14532,8 @@ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORIN
{
if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
{
- int hfaSlots = GetHfaCount(hClass);
- var_types hfaType = GetHfaType(hClass);
+ int hfaSlots = GetHfaCount(hClass);
+ var_types hfaType = GetHfaType(hClass);
// If we have varargs we morph the method's return type to be "int" irrespective of its original
// type: struct/float at importer because the ABI calls out return in integer registers.
@@ -14080,45 +14563,43 @@ GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HAN
return ret;
}
-#endif // FEATURE_MULTIREG_RET
+#endif // FEATURE_MULTIREG_RET
// do import for a return
// returns false if inlining was aborted
// opcode can be ret or call in the case of a tail.call
-bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &opcode)
+bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
{
if (tiVerificationNeeded)
{
verVerifyThisPtrInitialised();
unsigned expectedStack = 0;
- if (info.compRetType != TYP_VOID)
+ if (info.compRetType != TYP_VOID)
{
typeInfo tiVal = impStackTop().seTypeInfo;
- typeInfo tiDeclared = verMakeTypeInfo(info.compMethodInfo->args.retType,
- info.compMethodInfo->args.retTypeClass);
+ typeInfo tiDeclared =
+ verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
+
+ Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
- Verify(!verIsByRefLike(tiDeclared) ||
- verIsSafeToReturnByRef(tiVal)
- , "byref return");
-
Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
- expectedStack=1;
+ expectedStack = 1;
}
Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
}
-
- GenTree *op2 = 0;
- GenTree *op1 = 0;
- CORINFO_CLASS_HANDLE retClsHnd = NULL;
+
+ GenTree* op2 = nullptr;
+ GenTree* op1 = nullptr;
+ CORINFO_CLASS_HANDLE retClsHnd = nullptr;
if (info.compRetType != TYP_VOID)
{
StackEntry se = impPopStack(retClsHnd);
- op2 = se.val;
-
+ op2 = se.val;
+
if (!compIsForInlining())
- {
+ {
impBashVarAddrsToI(op2);
op2 = impImplicitIorI4Cast(op2, info.compRetType);
op2 = impImplicitR4orR8Cast(op2, info.compRetType);
@@ -14126,25 +14607,26 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
- (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
+ (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
#ifdef DEBUG
if (opts.compGcChecks && info.compRetType == TYP_REF)
{
// DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
- // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with one-return BB.
-
+ // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
+ // one-return BB.
+
assert(op2->gtType == TYP_REF);
-
- // confirm that the argument is a GC pointer (for debugging (GC stress))
+
+ // confirm that the argument is a GC pointer (for debugging (GC stress))
GenTreeArgList* args = gtNewArgList(op2);
- op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
+ op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
if (verbose)
{
printf("\ncompGcChecks tree:\n");
- gtDispTree(op2);
- }
+ gtDispTree(op2);
+ }
}
#endif
}
@@ -14163,13 +14645,13 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
// Make sure the type matches the original call.
- var_types returnType = genActualType(op2->gtType);
+ var_types returnType = genActualType(op2->gtType);
var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
{
originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
}
-
+
if (returnType != originalCallType)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
@@ -14188,7 +14670,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
if (info.compRetNativeType != TYP_STRUCT)
{
// compRetNativeType is not TYP_STRUCT.
- // This implies it could be either a scalar type or SIMD vector type or
+ // This implies it could be either a scalar type or SIMD vector type or
// a struct type that can be normalized to a scalar type.
if (varTypeIsStruct(info.compRetType))
@@ -14201,7 +14683,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
else
{
// Do we have to normalize?
- var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
+ var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
fgCastNeeded(op2, fncRealRetType))
{
@@ -14213,7 +14695,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
{
assert(info.compRetNativeType != TYP_VOID && fgMoreThanOneReturnBlock());
-
+
// This is a bit of a workaround...
// If we are inlining a call that returns a struct, where the actual "native" return type is
// not a struct (for example, the struct is composed of exactly one int, and the native
@@ -14224,14 +14706,16 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
//
// inliner:
// // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
- // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
+ // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
+ // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
//
// inlinee:
// ...
// ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
// ret
// ...
- // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&, object&, class System.Func`1<!!0>)
+ // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
+ // object&, class System.Func`1<!!0>)
// ret
//
// In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
@@ -14252,9 +14736,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
restoreType = true;
}
- impAssignTempGen(lvaInlineeReturnSpillTemp,
- op2,
- se.seTypeInfo.GetClassHandle(),
+ impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
@@ -14273,8 +14755,8 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
// Better they spilled to the same temp.
assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
- }
-#endif
+ }
+#endif
}
#ifdef DEBUG
@@ -14286,7 +14768,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
#endif
// Report the return expression
- impInlineInfo->retExpr = op2;
+ impInlineInfo->retExpr = op2;
}
else
{
@@ -14305,9 +14787,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
assert(info.compRetNativeType != TYP_VOID);
assert(fgMoreThanOneReturnBlock());
- impAssignTempGen(lvaInlineeReturnSpillTemp,
- op2,
- se.seTypeInfo.GetClassHandle(),
+ impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
}
@@ -14318,8 +14798,8 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
// next ifdefs could be refactored in a single method with the ifdef inside.
if (IsHfa(retClsHnd))
{
- // Same as !IsHfa but just don't bother with impAssignStructPtr.
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+// Same as !IsHfa but just don't bother with impAssignStructPtr.
+#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
@@ -14327,7 +14807,8 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
if (retRegCount != 0)
{
// If single eightbyte, the return type would have been normalized and there won't be a temp var.
- // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - max allowed.)
+ // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
+ // max allowed.)
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -14339,9 +14820,10 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
{
#if defined(_TARGET_ARM_)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// The inlinee compiler has figured out the type of the temp already. Use it here.
- impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
+ impInlineInfo->retExpr =
+ gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
}
}
@@ -14365,7 +14847,8 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
- impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
+ impInlineInfo->retExpr =
+ gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
@@ -14377,23 +14860,21 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
#endif // defined(_TARGET_ARM64_)
{
assert(iciCall->AsCall()->HasRetBufArg());
- GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
+ GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
// spill temp only exists if there are multiple return points
if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
{
// if this is the first return we have seen set the retExpr
if (!impInlineInfo->retExpr)
{
- impInlineInfo->retExpr = impAssignStructPtr(
- dest,
- gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
- retClsHnd,
- (unsigned) CHECK_SPILL_ALL);
+ impInlineInfo->retExpr =
+ impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
+ retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
else
{
- impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned) CHECK_SPILL_ALL);
+ impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
}
@@ -14401,7 +14882,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
}
if (compIsForInlining())
- {
+ {
return true;
}
@@ -14418,29 +14899,29 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
- // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
+ // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(_TARGET_AMD64_)
- // x64 (System V and Win64) calling convention requires to
+ // x64 (System V and Win64) calling convention requires to
// return the implicit return buffer explicitly (in RAX).
- // Change the return type to be BYREF.
+ // Change the return type to be BYREF.
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
-#else // !defined(_TARGET_AMD64_)
- // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
- // In such case the return value of the function is changed to BYREF.
- // If profiler hook is not needed the return type of the function is TYP_VOID.
+#else // !defined(_TARGET_AMD64_)
+ // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
+ // In such case the return value of the function is changed to BYREF.
+ // If profiler hook is not needed the return type of the function is TYP_VOID.
if (compIsProfilerHookNeeded())
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
else
{
- // return void
+ // return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
-#endif // !defined(_TARGET_AMD64_)
+#endif // !defined(_TARGET_AMD64_)
}
else if (varTypeIsStruct(info.compRetType))
{
@@ -14458,7 +14939,7 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
// return op2
op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
}
-
+
// We must have imported a tailcall and jumped to RET
if (prefixFlags & PREFIX_TAILCALL)
{
@@ -14475,7 +14956,9 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
// impImportCall() would have already appended TYP_VOID calls
if (info.compRetType == TYP_VOID)
+ {
return true;
+ }
}
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
@@ -14492,12 +14975,13 @@ bool Compiler::impReturnInstruction(BasicBlock *block, int prefixFlags, OPCODE &
* with the appropriate stack-state
*/
-inline
-void Compiler::impReimportMarkBlock(BasicBlock * block)
+inline void Compiler::impReimportMarkBlock(BasicBlock* block)
{
#ifdef DEBUG
if (verbose && (block->bbFlags & BBF_IMPORTED))
+ {
printf("\nBB%02u will be reimported\n", block->bbNum);
+ }
#endif
block->bbFlags &= ~BBF_IMPORTED;
@@ -14509,7 +14993,7 @@ void Compiler::impReimportMarkBlock(BasicBlock * block)
* for all the successors, with the appropriate stack-state.
*/
-void Compiler::impReimportMarkSuccessors(BasicBlock * block)
+void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
for (unsigned i = 0; i < block->NumSucc(); i++)
{
@@ -14533,14 +15017,13 @@ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID
return EXCEPTION_CONTINUE_SEARCH;
}
-void Compiler::impVerifyEHBlock(BasicBlock * block,
- bool isTryStart)
+void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
{
assert(block->hasTryIndex());
assert(!compIsForInlining());
-
- unsigned tryIndex = block->getTryIndex();
- EHblkDsc * HBtab = ehGetDsc(tryIndex);
+
+ unsigned tryIndex = block->getTryIndex();
+ EHblkDsc* HBtab = ehGetDsc(tryIndex);
if (isTryStart)
{
@@ -14556,10 +15039,10 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
// Save the stack contents, we'll need to restore it later
//
- SavedStack blockState;
+ SavedStack blockState;
impSaveStackState(&blockState, false);
- while (HBtab != NULL)
+ while (HBtab != nullptr)
{
if (isTryStart)
{
@@ -14572,7 +15055,8 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
//
if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
{
- BADCODE("The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
+ BADCODE(
+ "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
}
else
{
@@ -14582,10 +15066,10 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
}
/* Recursively process the handler block */
- BasicBlock * hndBegBB = HBtab->ebdHndBeg;
+ BasicBlock* hndBegBB = HBtab->ebdHndBeg;
// Construct the proper verification stack state
- // either empty or one that contains just
+ // either empty or one that contains just
// the Exception Object that we are dealing with
//
verCurrentState.esStackDepth = 0;
@@ -14595,15 +15079,17 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
CORINFO_CLASS_HANDLE clsHnd;
if (HBtab->HasFilter())
+ {
clsHnd = impGetObjectClass();
+ }
else
{
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
- resolvedToken.tokenScope = info.compScopeHnd;
- resolvedToken.token = HBtab->ebdTyp;
- resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
+ resolvedToken.tokenScope = info.compScopeHnd;
+ resolvedToken.token = HBtab->ebdTyp;
+ resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
info.compCompHnd->resolveToken(&resolvedToken);
clsHnd = resolvedToken.hClass;
@@ -14628,7 +15114,7 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
verCurrentState.esStackDepth = 0;
- BasicBlock * filterBB = HBtab->ebdFilter;
+ BasicBlock* filterBB = HBtab->ebdFilter;
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
@@ -14653,7 +15139,7 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
tryIndex = HBtab->ebdEnclosingTryIndex;
if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
- HBtab = NULL;
+ HBtab = nullptr;
}
else
{
@@ -14672,9 +15158,9 @@ void Compiler::impVerifyEHBlock(BasicBlock * block,
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void Compiler::impImportBlock(BasicBlock *block)
+void Compiler::impImportBlock(BasicBlock* block)
{
// BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
// handle them specially. In particular, there is no IL to import for them, but we do need
@@ -14692,7 +15178,7 @@ void Compiler::impImportBlock(BasicBlock *block)
return;
}
- bool markImport;
+ bool markImport;
assert(block);
@@ -14709,19 +15195,19 @@ void Compiler::impImportBlock(BasicBlock *block)
/* Set the current stack state to the merged result */
verResetCurrentState(block, &verCurrentState);
- /* Now walk the code and import the IL into GenTrees */
+ /* Now walk the code and import the IL into GenTrees */
struct FilterVerificationExceptionsParam
{
- Compiler *pThis;
- BasicBlock *block;
+ Compiler* pThis;
+ BasicBlock* block;
};
FilterVerificationExceptionsParam param;
param.pThis = this;
param.block = block;
- PAL_TRY(FilterVerificationExceptionsParam *, pParam, &param)
+ PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
{
/* @VERIFICATION : For now, the only state propagation from try
to it's handler is "thisInit" state (stack is empty at start of try).
@@ -14729,27 +15215,27 @@ void Compiler::impImportBlock(BasicBlock *block)
model the possibility that an exception might happen at any IL
instruction, so we really need to merge all states that obtain
between IL instructions in a try block into the start states of
- all handlers.
-
- However we do not allow the 'this' pointer to be uninitialized when
- entering most kinds try regions (only try/fault are allowed to have
+ all handlers.
+
+ However we do not allow the 'this' pointer to be uninitialized when
+ entering most kinds try regions (only try/fault are allowed to have
an uninitialized this pointer on entry to the try)
Fortunately, the stack is thrown away when an exception
- leads to a handler, so we don't have to worry about that.
- We DO, however, have to worry about the "thisInit" state.
+ leads to a handler, so we don't have to worry about that.
+ We DO, however, have to worry about the "thisInit" state.
But only for the try/fault case.
-
- The only allowed transition is from TIS_Uninit to TIS_Init.
- So for a try/fault region for the fault handler block
- we will merge the start state of the try begin
+ The only allowed transition is from TIS_Uninit to TIS_Init.
+
+ So for a try/fault region for the fault handler block
+ we will merge the start state of the try begin
and the post-state of each block that is part of this try region
*/
- // merge the start state of the try begin
+ // merge the start state of the try begin
//
- if (pParam->block->bbFlags & BBF_TRY_BEG)
+ if (pParam->block->bbFlags & BBF_TRY_BEG)
{
pParam->pThis->impVerifyEHBlock(pParam->block, true);
}
@@ -14757,21 +15243,23 @@ void Compiler::impImportBlock(BasicBlock *block)
pParam->pThis->impImportBlockCode(pParam->block);
// As discussed above:
- // merge the post-state of each block that is part of this try region
- //
- if (pParam->block->hasTryIndex())
+ // merge the post-state of each block that is part of this try region
+ //
+ if (pParam->block->hasTryIndex())
{
pParam->pThis->impVerifyEHBlock(pParam->block, false);
}
}
- PAL_EXCEPT_FILTER(FilterVerificationExceptions)
+ PAL_EXCEPT_FILTER(FilterVerificationExceptions)
{
verHandleVerificationFailure(block DEBUGARG(false));
}
PAL_ENDTRY
if (compDonotInline())
+ {
return;
+ }
assert(!compDonotInline());
@@ -14779,119 +15267,118 @@ void Compiler::impImportBlock(BasicBlock *block)
SPILLSTACK:
- unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
+ unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
bool reimportSpillClique = false;
- BasicBlock* tgtBlock = nullptr;
+ BasicBlock* tgtBlock = nullptr;
/* If the stack is non-empty, we might have to spill its contents */
- if (verCurrentState.esStackDepth != 0)
+ if (verCurrentState.esStackDepth != 0)
{
- impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
- // on the stack, its lifetime is hard to determine, simply
- // don't reuse such temps.
+ impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
+ // on the stack, its lifetime is hard to determine, simply
+ // don't reuse such temps.
- GenTreePtr addStmt = 0;
+ GenTreePtr addStmt = nullptr;
/* Do the successors of 'block' have any other predecessors ?
We do not want to do some of the optimizations related to multiRef
if we can reimport blocks */
- unsigned multRef = impCanReimport ? unsigned(~0) : 0;
+ unsigned multRef = impCanReimport ? unsigned(~0) : 0;
switch (block->bbJumpKind)
{
- case BBJ_COND:
+ case BBJ_COND:
- /* Temporarily remove the 'jtrue' from the end of the tree list */
+ /* Temporarily remove the 'jtrue' from the end of the tree list */
- assert(impTreeLast);
- assert(impTreeLast ->gtOper == GT_STMT );
- assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
+ assert(impTreeLast);
+ assert(impTreeLast->gtOper == GT_STMT);
+ assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
- addStmt = impTreeLast;
- impTreeLast = impTreeLast->gtPrev;
+ addStmt = impTreeLast;
+ impTreeLast = impTreeLast->gtPrev;
- /* Note if the next block has more than one ancestor */
+ /* Note if the next block has more than one ancestor */
- multRef |= block->bbNext->bbRefs;
+ multRef |= block->bbNext->bbRefs;
- /* Does the next block have temps assigned? */
+ /* Does the next block have temps assigned? */
- baseTmp = block->bbNext->bbStkTempsIn;
- tgtBlock = block->bbNext;
+ baseTmp = block->bbNext->bbStkTempsIn;
+ tgtBlock = block->bbNext;
- if (baseTmp != NO_BASE_TMP)
- {
- break;
- }
+ if (baseTmp != NO_BASE_TMP)
+ {
+ break;
+ }
- /* Try the target of the jump then */
+ /* Try the target of the jump then */
- multRef |= block->bbJumpDest->bbRefs;
- baseTmp = block->bbJumpDest->bbStkTempsIn;
- tgtBlock = block->bbJumpDest;
- break;
+ multRef |= block->bbJumpDest->bbRefs;
+ baseTmp = block->bbJumpDest->bbStkTempsIn;
+ tgtBlock = block->bbJumpDest;
+ break;
- case BBJ_ALWAYS:
- multRef |= block->bbJumpDest->bbRefs;
- baseTmp = block->bbJumpDest->bbStkTempsIn;
- tgtBlock = block->bbJumpDest;
- break;
+ case BBJ_ALWAYS:
+ multRef |= block->bbJumpDest->bbRefs;
+ baseTmp = block->bbJumpDest->bbStkTempsIn;
+ tgtBlock = block->bbJumpDest;
+ break;
- case BBJ_NONE:
- multRef |= block->bbNext->bbRefs;
- baseTmp = block->bbNext->bbStkTempsIn;
- tgtBlock = block->bbNext;
- break;
+ case BBJ_NONE:
+ multRef |= block->bbNext->bbRefs;
+ baseTmp = block->bbNext->bbStkTempsIn;
+ tgtBlock = block->bbNext;
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
- BasicBlock * * jmpTab;
- unsigned jmpCnt;
+ BasicBlock** jmpTab;
+ unsigned jmpCnt;
- /* Temporarily remove the GT_SWITCH from the end of the tree list */
+ /* Temporarily remove the GT_SWITCH from the end of the tree list */
- assert(impTreeLast);
- assert(impTreeLast ->gtOper == GT_STMT );
- assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
+ assert(impTreeLast);
+ assert(impTreeLast->gtOper == GT_STMT);
+ assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
- addStmt = impTreeLast;
- impTreeLast = impTreeLast->gtPrev;
+ addStmt = impTreeLast;
+ impTreeLast = impTreeLast->gtPrev;
- jmpCnt = block->bbJumpSwt->bbsCount;
- jmpTab = block->bbJumpSwt->bbsDstTab;
+ jmpCnt = block->bbJumpSwt->bbsCount;
+ jmpTab = block->bbJumpSwt->bbsDstTab;
- do
- {
- tgtBlock = (*jmpTab);
+ do
+ {
+ tgtBlock = (*jmpTab);
- multRef |= tgtBlock->bbRefs;
+ multRef |= tgtBlock->bbRefs;
- // Thanks to spill cliques, we should have assigned all or none
- assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
- baseTmp = tgtBlock->bbStkTempsIn;
- if (multRef > 1)
- {
- break;
- }
- }
- while (++jmpTab, --jmpCnt);
+ // Thanks to spill cliques, we should have assigned all or none
+ assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
+ baseTmp = tgtBlock->bbStkTempsIn;
+ if (multRef > 1)
+ {
+ break;
+ }
+ } while (++jmpTab, --jmpCnt);
- break;
+ break;
- case BBJ_CALLFINALLY:
- case BBJ_EHCATCHRET:
- case BBJ_RETURN:
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_THROW:
- NO_WAY("can't have 'unreached' end of BB with non-empty stack");
- break;
+ case BBJ_CALLFINALLY:
+ case BBJ_EHCATCHRET:
+ case BBJ_RETURN:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_THROW:
+ NO_WAY("can't have 'unreached' end of BB with non-empty stack");
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
assert(multRef >= 1);
@@ -14900,13 +15387,13 @@ SPILLSTACK:
bool newTemps = (baseTmp == NO_BASE_TMP);
- if (newTemps)
+ if (newTemps)
{
/* Grab enough temps for the whole stack */
baseTmp = impGetSpillTmpBase(block);
}
-
- /* Spill all stack entries into temps */
+
+ /* Spill all stack entries into temps */
unsigned level, tempNum;
JITDUMP("\nSpilling stack entries into temps\n");
@@ -14915,15 +15402,14 @@ SPILLSTACK:
GenTreePtr tree = verCurrentState.esStack[level].val;
/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
- the other. This should merge to a byref in unverifiable code.
+ the other. This should merge to a byref in unverifiable code.
However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
successor would be imported assuming there was a TYP_I_IMPL on
the stack. Thus the value would not get GC-tracked. Hence,
change the temp to TYP_BYREF and reimport the successors.
Note: We should only allow this in unverifiable code.
*/
- if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL &&
- !verNeedsVerification())
+ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
{
lvaTable[tempNum].lvType = TYP_BYREF;
impReimportMarkSuccessors(block);
@@ -14931,15 +15417,13 @@ SPILLSTACK:
}
#ifdef _TARGET_64BIT_
- if (genActualType(tree->gtType) == TYP_I_IMPL &&
- lvaTable[tempNum].lvType == TYP_INT)
+ if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
{
- if(tiVerificationNeeded &&
- tgtBlock->bbEntryState != nullptr &&
- (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
+ if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
+ (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
{
- // Merge the current state into the entry state of block;
- // the call to verMergeEntryStates must have changed
+ // Merge the current state into the entry state of block;
+ // the call to verMergeEntryStates must have changed
// the entry state of the block by merging the int local var
// and the native-int stack entry.
bool changed = false;
@@ -14959,7 +15443,7 @@ SPILLSTACK:
// Some other block in the spill clique set this to "int", but now we have "native int".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_I_IMPL;
- reimportSpillClique = true;
+ reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
@@ -14978,13 +15462,12 @@ SPILLSTACK:
// the 'byref' size.
if (!tiVerificationNeeded)
{
- if (genActualType(tree->gtType) == TYP_BYREF &&
- lvaTable[tempNum].lvType == TYP_INT)
+ if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "byref".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_BYREF;
- reimportSpillClique = true;
+ reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
{
@@ -15013,7 +15496,7 @@ SPILLSTACK:
// Some other block in the spill clique set this to "float", but now we have "double".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_DOUBLE;
- reimportSpillClique = true;
+ reimportSpillClique = true;
}
else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
{
@@ -15028,8 +15511,7 @@ SPILLSTACK:
are spilling to the temps already used by a previous block),
we need to spill addStmt */
- if (addStmt && !newTemps &&
- gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
+ if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
{
GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
@@ -15044,7 +15526,7 @@ SPILLSTACK:
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
- type = genActualType(lvaTable[temp].TypeGet());
+ type = genActualType(lvaTable[temp].TypeGet());
relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
}
@@ -15052,14 +15534,13 @@ SPILLSTACK:
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
- type = genActualType(lvaTable[temp].TypeGet());
+ type = genActualType(lvaTable[temp].TypeGet());
relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
}
}
else
{
- assert(addTree->gtOper == GT_SWITCH &&
- genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
+ assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
@@ -15069,28 +15550,31 @@ SPILLSTACK:
/* Spill the stack entry, and replace with the temp */
-
if (!impSpillStackEntry(level, tempNum
-#ifdef DEBUG
- ,true
- , "Spill Stack Entry"
+#ifdef DEBUG
+ ,
+ true, "Spill Stack Entry"
#endif
- ))
+ ))
{
if (markImport)
+ {
BADCODE("bad stack state");
+ }
// Oops. Something went wrong when spilling. Bad code.
verHandleVerificationFailure(block DEBUGARG(true));
- goto SPILLSTACK;
+ goto SPILLSTACK;
}
}
/* Put back the 'jtrue'/'switch' if we removed it earlier */
- if (addStmt)
+ if (addStmt)
+ {
impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
+ }
}
// Some of the append/spill logic works on compCurBB
@@ -15122,7 +15606,7 @@ SPILLSTACK:
}
}
}
- else // the normal case
+ else // the normal case
{
// otherwise just import the successors of block
@@ -15144,11 +15628,13 @@ SPILLSTACK:
// impPendingBlockMembers). Merges the current verification state into the verification state of "block"
// (its "pre-state").
-void Compiler::impImportBlockPending(BasicBlock * block)
+void Compiler::impImportBlockPending(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
+ }
#endif
// We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
@@ -15161,8 +15647,7 @@ void Compiler::impImportBlockPending(BasicBlock * block)
// Initialize bbEntryState just the first time we try to add this block to the pending list
// Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
// We use NULL to indicate the 'common' state to avoid memory allocation
- if ((block->bbEntryState == NULL) &&
- ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
+ if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
(impGetPendingBlockMember(block) == 0))
{
verInitBBEntryState(block, &verCurrentState);
@@ -15178,11 +15663,12 @@ void Compiler::impImportBlockPending(BasicBlock * block)
{
#ifdef DEBUG
char buffer[400];
- sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
- "Previous depth was %d, current depth is %d",
- block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName,
- block->bbStkDepth, verCurrentState.esStackDepth);
- buffer[400-1] = 0;
+ sprintf_s(buffer, sizeof(buffer),
+ "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
+ "Previous depth was %d, current depth is %d",
+ block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
+ verCurrentState.esStackDepth);
+ buffer[400 - 1] = 0;
NO_WAY(buffer);
#else
NO_WAY("Block entered with different stack depths");
@@ -15203,13 +15689,15 @@ void Compiler::impImportBlockPending(BasicBlock * block)
else if (changed)
{
addToPending = true;
-
+
JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
}
}
if (!addToPending)
+ {
return;
+ }
if (block->bbStkDepth > 0)
{
@@ -15220,17 +15708,19 @@ void Compiler::impImportBlockPending(BasicBlock * block)
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
+ {
return;
+ }
}
// Get an entry to add to the pending list
- PendingDsc * dsc;
+ PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
- dsc = impPendingFree;
+ dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
@@ -15239,19 +15729,21 @@ void Compiler::impImportBlockPending(BasicBlock * block)
dsc = new (this, CMK_Unknown) PendingDsc;
}
- dsc->pdBB = block;
+ dsc->pdBB = block;
dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
- dsc->pdThisPtrInit = verCurrentState.thisInitialized;
+ dsc->pdThisPtrInit = verCurrentState.thisInitialized;
// Save the stack trees for later
if (verCurrentState.esStackDepth)
+ {
impSaveStackState(&dsc->pdSavedStack, false);
+ }
// Add the entry to the pending list
- dsc->pdNext = impPendingList;
- impPendingList = dsc;
+ dsc->pdNext = impPendingList;
+ impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
@@ -15259,8 +15751,10 @@ void Compiler::impImportBlockPending(BasicBlock * block)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
- if (verbose&&0) printf("Added PendingDsc - %08p for BB%02u\n",
- dspPtr(dsc), block->bbNum);
+ if (verbose && 0)
+ {
+ printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
+ }
#endif
}
@@ -15270,23 +15764,26 @@ void Compiler::impImportBlockPending(BasicBlock * block)
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
-void Compiler::impReimportBlockPending(BasicBlock * block)
+void Compiler::impReimportBlockPending(BasicBlock* block)
{
JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
assert(block->bbFlags & BBF_IMPORTED);
// OK, we must add to the pending list, if it's not already in it.
- if (impGetPendingBlockMember(block) != 0) return;
+ if (impGetPendingBlockMember(block) != 0)
+ {
+ return;
+ }
// Get an entry to add to the pending list
- PendingDsc * dsc;
+ PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
- dsc = impPendingFree;
+ dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
@@ -15295,7 +15792,7 @@ void Compiler::impReimportBlockPending(BasicBlock * block)
dsc = new (this, CMK_ImpStack) PendingDsc;
}
- dsc->pdBB = block;
+ dsc->pdBB = block;
if (block->bbEntryState)
{
@@ -15307,13 +15804,13 @@ void Compiler::impReimportBlockPending(BasicBlock * block)
{
dsc->pdThisPtrInit = TIS_Bottom;
dsc->pdSavedStack.ssDepth = 0;
- dsc->pdSavedStack.ssTrees = NULL;
+ dsc->pdSavedStack.ssTrees = nullptr;
}
// Add the entry to the pending list
- dsc->pdNext = impPendingList;
- impPendingList = dsc;
+ dsc->pdNext = impPendingList;
+ impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
@@ -15321,21 +15818,22 @@ void Compiler::impReimportBlockPending(BasicBlock * block)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
- if (verbose&&0) printf("Added PendingDsc - %08p for BB%02u\n",
- dspPtr(dsc), block->bbNum);
+ if (verbose && 0)
+ {
+ printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
+ }
#endif
}
-
void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
{
- if (comp->impBlockListNodeFreeList == NULL)
+ if (comp->impBlockListNodeFreeList == nullptr)
{
return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
}
else
{
- BlockListNode* res = comp->impBlockListNodeFreeList;
+ BlockListNode* res = comp->impBlockListNodeFreeList;
comp->impBlockListNodeFreeList = res->m_next;
return res;
}
@@ -15343,11 +15841,11 @@ void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
{
- node->m_next = impBlockListNodeFreeList;
+ node->m_next = impBlockListNodeFreeList;
impBlockListNodeFreeList = node;
}
-void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker * callback)
+void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
{
bool toDo = true;
@@ -15357,17 +15855,17 @@ void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker *
fgComputeCheapPreds();
}
- BlockListNode* succCliqueToDo = NULL;
+ BlockListNode* succCliqueToDo = nullptr;
BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
while (toDo)
{
toDo = false;
// Look at the successors of every member of the predecessor to-do list.
- while (predCliqueToDo != NULL)
+ while (predCliqueToDo != nullptr)
{
BlockListNode* node = predCliqueToDo;
- predCliqueToDo = node->m_next;
- BasicBlock* blk = node->m_blk;
+ predCliqueToDo = node->m_next;
+ BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
@@ -15380,16 +15878,16 @@ void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker *
callback->Visit(SpillCliqueSucc, succ);
impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
- toDo = true;
+ toDo = true;
}
}
}
// Look at the predecessors of every member of the successor to-do list.
- while (succCliqueToDo != NULL)
+ while (succCliqueToDo != nullptr)
{
BlockListNode* node = succCliqueToDo;
- succCliqueToDo = node->m_next;
- BasicBlock* blk = node->m_blk;
+ succCliqueToDo = node->m_next;
+ BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
@@ -15402,7 +15900,7 @@ void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker *
callback->Visit(SpillCliquePred, predBlock);
impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
- toDo = true;
+ toDo = true;
}
}
}
@@ -15414,7 +15912,6 @@ void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker *
assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
}
-
void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliqueSucc)
@@ -15436,15 +15933,14 @@ void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock*
// and re-type it/add a cast, but that is complicated and hopefully very rare, so
// just re-import the whole block (just like we do for successors)
- if (((blk->bbFlags & BBF_IMPORTED) == 0) &&
- (m_pComp->impGetPendingBlockMember(blk) == 0))
+ if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
{
// If we haven't imported this block and we're not going to (because it isn't on
// the pending list) then just ignore it for now.
// This block has either never been imported (EntryState == NULL) or it failed
// verification. Neither state requires us to force it to be imported now.
- assert((blk->bbEntryState == NULL) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
+ assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
return;
}
@@ -15484,9 +15980,9 @@ void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock*
// Re-type the incoming lclVar nodes to match the varDsc.
void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
{
- if (blk->bbEntryState != NULL)
+ if (blk->bbEntryState != nullptr)
{
- EntryState * es = blk->bbEntryState;
+ EntryState* es = blk->bbEntryState;
for (unsigned level = 0; level < es->esStackDepth; level++)
{
GenTreePtr tree = es->esStack[level].val;
@@ -15494,7 +15990,7 @@ void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
{
unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
es->esStack[level].val->gtType = varDsc->TypeGet();
}
}
@@ -15504,10 +16000,12 @@ void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
{
if (block->bbStkTempsOut != NO_BASE_TMP)
+ {
return block->bbStkTempsOut;
+ }
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
}
@@ -15528,7 +16026,7 @@ unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
void Compiler::impReimportSpillClique(BasicBlock* block)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
}
@@ -15547,37 +16045,32 @@ void Compiler::impReimportSpillClique(BasicBlock* block)
impWalkSpillCliqueFromPred(block, &callback);
}
-
// Set the pre-state of "block" (which should not have a pre-state allocated) to
// a copy of "srcState", cloning tree pointers as required.
-void Compiler::verInitBBEntryState(BasicBlock* block,
- EntryState* srcState)
+void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
{
- if (srcState->esStackDepth == 0 &&
- srcState->thisInitialized == TIS_Bottom)
+ if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
{
- block->bbEntryState = NULL;
+ block->bbEntryState = nullptr;
return;
}
- block->bbEntryState = (EntryState*) compGetMemA(sizeof(EntryState));
+ block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
- //block->bbEntryState.esRefcount = 1;
+ // block->bbEntryState.esRefcount = 1;
block->bbEntryState->esStackDepth = srcState->esStackDepth;
block->bbEntryState->thisInitialized = TIS_Bottom;
-
+
if (srcState->esStackDepth > 0)
{
block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
- memcpy(block->bbEntryState->esStack,
- srcState->esStack,
- stackSize);
+ memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
for (unsigned level = 0; level < srcState->esStackDepth; level++)
{
- GenTreePtr tree = srcState->esStack[level].val;
+ GenTreePtr tree = srcState->esStack[level].val;
block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
}
}
@@ -15593,38 +16086,34 @@ void Compiler::verInitBBEntryState(BasicBlock* block,
void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
{
assert(tis != TIS_Bottom); // Precondition.
- if (block->bbEntryState == NULL)
+ if (block->bbEntryState == nullptr)
{
block->bbEntryState = new (this, CMK_Unknown) EntryState();
}
-
+
block->bbEntryState->thisInitialized = tis;
}
/*
- * Resets the current state to the state at the start of the basic block
+ * Resets the current state to the state at the start of the basic block
*/
-void Compiler::verResetCurrentState(BasicBlock* block,
- EntryState* destState)
+void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
{
- if (block->bbEntryState == NULL)
+ if (block->bbEntryState == nullptr)
{
- destState->esStackDepth = 0;
- destState->thisInitialized = TIS_Bottom;
+ destState->esStackDepth = 0;
+ destState->thisInitialized = TIS_Bottom;
return;
}
-
- destState->esStackDepth = block->bbEntryState->esStackDepth;
+
+ destState->esStackDepth = block->bbEntryState->esStackDepth;
if (destState->esStackDepth > 0)
{
unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
-
- memcpy(destState->esStack,
- block->bbStackOnEntry(),
- stackSize);
+ memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
}
destState->thisInitialized = block->bbThisOnEntry();
@@ -15632,45 +16121,40 @@ void Compiler::verResetCurrentState(BasicBlock* block,
return;
}
-
-
-ThisInitState BasicBlock::bbThisOnEntry()
+ThisInitState BasicBlock::bbThisOnEntry()
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
-unsigned BasicBlock::bbStackDepthOnEntry()
+unsigned BasicBlock::bbStackDepthOnEntry()
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
-
}
-void BasicBlock::bbSetStack(void* stackBuffer)
+void BasicBlock::bbSetStack(void* stackBuffer)
{
assert(bbEntryState);
assert(stackBuffer);
- bbEntryState->esStack = (StackEntry*) stackBuffer;
+ bbEntryState->esStack = (StackEntry*)stackBuffer;
}
-StackEntry* BasicBlock::bbStackOnEntry()
+StackEntry* BasicBlock::bbStackOnEntry()
{
assert(bbEntryState);
return bbEntryState->esStack;
}
-void Compiler::verInitCurrentState()
+void Compiler::verInitCurrentState()
{
- verTrackObjCtorInitState = FALSE;
+ verTrackObjCtorInitState = FALSE;
verCurrentState.thisInitialized = TIS_Bottom;
if (tiVerificationNeeded)
{
// Track this ptr initialization
- if (!info.compIsStatic &&
- (info.compFlags & CORINFO_FLG_CONSTRUCTOR) &&
- lvaTable[0].lvVerTypeInfo.IsObjRef())
+ if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
{
- verTrackObjCtorInitState = TRUE;
+ verTrackObjCtorInitState = TRUE;
verCurrentState.thisInitialized = TIS_Uninit;
}
}
@@ -15678,15 +16162,15 @@ void Compiler::verInitCurrentState()
// initialize stack info
verCurrentState.esStackDepth = 0;
- assert(verCurrentState.esStack != NULL);
+ assert(verCurrentState.esStack != nullptr);
// copy current state to entry state of first BB
- verInitBBEntryState(fgFirstBB,&verCurrentState);
+ verInitBBEntryState(fgFirstBB, &verCurrentState);
}
Compiler* Compiler::impInlineRoot()
{
- if (impInlineInfo == NULL)
+ if (impInlineInfo == nullptr)
{
return this;
}
@@ -15728,26 +16212,28 @@ void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* bl
* basic flowgraph has already been constructed and is passed in.
*/
-void Compiler::impImport(BasicBlock *method)
-{
+void Compiler::impImport(BasicBlock* method)
+{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In impImport() for %s\n", info.compFullName);
+ }
#endif
/* Allocate the stack contents */
- if (info.compMaxStack <= sizeof(impSmallStack)/sizeof(impSmallStack[0]))
+ if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
{
/* Use local variable, don't waste time allocating on the heap */
- impStkSize = sizeof(impSmallStack)/sizeof(impSmallStack[0]);
- verCurrentState.esStack = impSmallStack;
+ impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
+ verCurrentState.esStack = impSmallStack;
}
else
{
- impStkSize = info.compMaxStack;
- verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
+ impStkSize = info.compMaxStack;
+ verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
// initialize the entry state at start of method
@@ -15755,7 +16241,7 @@ void Compiler::impImport(BasicBlock *method)
// Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
Compiler* inlineRoot = impInlineRoot();
- if (this == inlineRoot) // These are only used on the root of the inlining tree.
+ if (this == inlineRoot) // These are only used on the root of the inlining tree.
{
// We have initialized these previously, but to size 0. Make them larger.
impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
@@ -15765,21 +16251,22 @@ void Compiler::impImport(BasicBlock *method)
inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
- impBlockListNodeFreeList = NULL;
+ impBlockListNodeFreeList = nullptr;
-#ifdef DEBUG
- impLastILoffsStmt = NULL;
+#ifdef DEBUG
+ impLastILoffsStmt = nullptr;
impNestedStackSpill = false;
#endif
impBoxTemp = BAD_VAR_NUM;
- impPendingList = impPendingFree = NULL;
+ impPendingList = impPendingFree = nullptr;
/* Add the entry-point to the worker-list */
// Skip leading internal blocks. There can be one as a leading scratch BB, and more
// from EH normalization.
- // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall out.
+ // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
+ // out.
for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
{
// Treat these as imported.
@@ -15796,37 +16283,39 @@ void Compiler::impImport(BasicBlock *method)
{
/* Remove the entry at the front of the list */
- PendingDsc * dsc = impPendingList;
- impPendingList = impPendingList->pdNext;
+ PendingDsc* dsc = impPendingList;
+ impPendingList = impPendingList->pdNext;
impSetPendingBlockMember(dsc->pdBB, 0);
/* Restore the stack state */
- verCurrentState.thisInitialized = dsc->pdThisPtrInit;
- verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
+ verCurrentState.thisInitialized = dsc->pdThisPtrInit;
+ verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
if (verCurrentState.esStackDepth)
+ {
impRestoreStackState(&dsc->pdSavedStack);
+ }
/* Add the entry to the free list for reuse */
- dsc->pdNext = impPendingFree;
+ dsc->pdNext = impPendingFree;
impPendingFree = dsc;
/* Now import the block */
if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
{
-
+
#ifdef _TARGET_64BIT_
// On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
// coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
// method for further explanation on why we raise this exception instead of making the jitted
// code throw the verification exception during execution.
- if(tiVerificationNeeded && (opts.eeFlags & CORJIT_FLG_IMPORT_ONLY) != 0)
+ if (tiVerificationNeeded && (opts.eeFlags & CORJIT_FLG_IMPORT_ONLY) != 0)
{
BADCODE("Basic block marked as not verifiable");
}
- else
+ else
#endif // _TARGET_64BIT_
{
verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
@@ -15838,9 +16327,13 @@ void Compiler::impImport(BasicBlock *method)
impImportBlock(dsc->pdBB);
if (compDonotInline())
+ {
return;
+ }
if (compIsForImportOnly() && !tiVerificationNeeded)
+ {
return;
+ }
}
}
@@ -15853,7 +16346,7 @@ void Compiler::impImport(BasicBlock *method)
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
block->bbFlags &= ~BBF_VISITED;
}
@@ -15878,22 +16371,24 @@ bool Compiler::impIsValueType(typeInfo* pTypeInfo)
}
/*****************************************************************************
- * Check to see if the tree is the address of a local or
+ * Check to see if the tree is the address of a local or
the address of a field in a local.
-
+
*lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
-
+
*/
-BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr * lclVarTreeOut)
-{
+BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
+{
if (tree->gtOper != GT_ADDR)
+ {
return FALSE;
-
+ }
+
GenTreePtr op = tree->gtOp.gtOp1;
while (op->gtOper == GT_FIELD)
{
- op = op->gtField.gtFldObj;
+ op = op->gtField.gtFldObj;
if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
{
op = op->gtOp.gtOp1;
@@ -15928,12 +16423,11 @@ BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr * lclVarTreeO
// various observations about the method that factor into inline
// decisions. It sets `compNativeSizeEstimate` as a side effect.
-void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
- InlineResult* inlineResult)
+void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
{
- assert(pInlineInfo != NULL && compIsForInlining() || // Perform the actual inlining.
- pInlineInfo == NULL && !compIsForInlining() // Calculate the static inlining hint for ngen.
- );
+ assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
+ pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
+ );
// If we're really inlining, we should just have one result in play.
assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
@@ -15956,7 +16450,7 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
}
- bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
+ bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
if (isSpecialMethod)
@@ -15985,15 +16479,15 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
}
// Note if the callee's class is a promotable struct
- if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
- {
+ if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
+ {
lvaStructPromotionInfo structPromotionInfo;
lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
if (structPromotionInfo.canPromote)
{
inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
}
- }
+ }
#ifdef FEATURE_SIMD
@@ -16013,23 +16507,21 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
{
frequency = InlineCallsiteFrequency::HOT;
}
- //No training data. Look for loop-like things.
- //We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
- //However, give it to things nearby.
+ // No training data. Look for loop-like things.
+ // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
+ // However, give it to things nearby.
else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
(pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
{
frequency = InlineCallsiteFrequency::LOOP;
}
- else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT)
- && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
+ else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
{
frequency = InlineCallsiteFrequency::WARM;
}
- //Now modify the multiplier based on where we're called from.
- else if (pInlineInfo->iciBlock->isRunRarely() ||
- ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
- {
+ // Now modify the multiplier based on where we're called from.
+ else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
+ {
frequency = InlineCallsiteFrequency::RARE;
}
else
@@ -16055,16 +16547,16 @@ void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo,
}
/*****************************************************************************
- This method makes STATIC inlining decision based on the IL code.
+ This method makes STATIC inlining decision based on the IL code.
It should not make any inlining decision based on the context.
If forceInline is true, then the inlining decision should not depend on
performance heuristics (code size, etc.).
*/
-void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
- CORINFO_METHOD_INFO * methInfo,
- bool forceInline,
- InlineResult* inlineResult)
+void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
+ CORINFO_METHOD_INFO* methInfo,
+ bool forceInline,
+ InlineResult* inlineResult)
{
unsigned codeSize = methInfo->ILCodeSize;
@@ -16077,7 +16569,7 @@ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
return;
}
- if ((methInfo->ILCode == 0) || (codeSize == 0))
+ if ((methInfo->ILCode == nullptr) || (codeSize == 0))
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
@@ -16102,13 +16594,13 @@ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
return;
}
-
+
// Make sure there aren't too many arguments.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
-
+
if (methInfo->args.numArgs > MAX_INL_ARGS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
@@ -16141,184 +16633,183 @@ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
/*****************************************************************************
*/
-void Compiler::impCheckCanInline(GenTreePtr call,
- CORINFO_METHOD_HANDLE fncHandle,
- unsigned methAttr,
- CORINFO_CONTEXT_HANDLE exactContextHnd,
- InlineCandidateInfo** ppInlineCandidateInfo,
- InlineResult* inlineResult)
+void Compiler::impCheckCanInline(GenTreePtr call,
+ CORINFO_METHOD_HANDLE fncHandle,
+ unsigned methAttr,
+ CORINFO_CONTEXT_HANDLE exactContextHnd,
+ InlineCandidateInfo** ppInlineCandidateInfo,
+ InlineResult* inlineResult)
{
// Either EE or JIT might throw exceptions below.
// If that happens, just don't inline the method.
-
+
struct Param
{
- Compiler * pThis;
- GenTreePtr call;
- CORINFO_METHOD_HANDLE fncHandle;
- unsigned methAttr;
+ Compiler* pThis;
+ GenTreePtr call;
+ CORINFO_METHOD_HANDLE fncHandle;
+ unsigned methAttr;
CORINFO_CONTEXT_HANDLE exactContextHnd;
- InlineResult* result;
- InlineCandidateInfo ** ppInlineCandidateInfo;
- } param = {0};
-
- param.pThis = this;
- param.call = call;
- param.fncHandle = fncHandle;
- param.methAttr = methAttr;
- param.exactContextHnd = (exactContextHnd != NULL) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
- param.result = inlineResult;
+ InlineResult* result;
+ InlineCandidateInfo** ppInlineCandidateInfo;
+ } param = {nullptr};
+
+ param.pThis = this;
+ param.call = call;
+ param.fncHandle = fncHandle;
+ param.methAttr = methAttr;
+ param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
+ param.result = inlineResult;
param.ppInlineCandidateInfo = ppInlineCandidateInfo;
- bool success = eeRunWithErrorTrap<Param>([](Param* pParam)
- {
- DWORD dwRestrictions = 0;
- CorInfoInitClassResult initClassResult;
+ bool success = eeRunWithErrorTrap<Param>(
+ [](Param* pParam) {
+ DWORD dwRestrictions = 0;
+ CorInfoInitClassResult initClassResult;
#ifdef DEBUG
- const char * methodName;
- const char * className;
- methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
+ const char* methodName;
+ const char* className;
+ methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
- if (JitConfig.JitNoInline())
- {
- pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
- goto _exit;
- }
+ if (JitConfig.JitNoInline())
+ {
+ pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
+ goto _exit;
+ }
#endif
- /* Try to get the code address/size for the method */
+ /* Try to get the code address/size for the method */
- CORINFO_METHOD_INFO methInfo;
- if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
- {
- pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
- goto _exit;
- }
+ CORINFO_METHOD_INFO methInfo;
+ if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
+ {
+ pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
+ goto _exit;
+ }
- bool forceInline;
- forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
+ bool forceInline;
+ forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
- pParam->pThis->impCanInlineIL(pParam->fncHandle,
- &methInfo,
- forceInline,
- pParam->result);
+ pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
- if (pParam->result->IsFailure())
- {
- assert(pParam->result->IsNever());
- goto _exit;
- }
+ if (pParam->result->IsFailure())
+ {
+ assert(pParam->result->IsNever());
+ goto _exit;
+ }
- // Speculatively check if initClass() can be done.
- // If it can be done, we will try to inline the method. If inlining
- // succeeds, then we will do the non-speculative initClass() and commit it.
- // If this speculative call to initClass() fails, there is no point
- // trying to inline this method.
- initClassResult = pParam->pThis->info.compCompHnd->initClass(NULL /* field */, pParam->fncHandle /* method */,
- pParam->exactContextHnd /* context */, TRUE /* speculative */);
+ // Speculatively check if initClass() can be done.
+ // If it can be done, we will try to inline the method. If inlining
+ // succeeds, then we will do the non-speculative initClass() and commit it.
+ // If this speculative call to initClass() fails, there is no point
+ // trying to inline this method.
+ initClassResult =
+ pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
+ pParam->exactContextHnd /* context */,
+ TRUE /* speculative */);
- if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
- {
- pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
- goto _exit;
- }
+ if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
+ {
+ pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
+ goto _exit;
+ }
- // Given the EE the final say in whether to inline or not.
- // This should be last since for verifiable code, this can be expensive
+ // Given the EE the final say in whether to inline or not.
+ // This should be last since for verifiable code, this can be expensive
- /* VM Inline check also ensures that the method is verifiable if needed */
- CorInfoInline vmResult;
- vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle, &dwRestrictions);
+ /* VM Inline check also ensures that the method is verifiable if needed */
+ CorInfoInline vmResult;
+ vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
+ &dwRestrictions);
- if (vmResult == INLINE_FAIL)
- {
- pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
- }
- else if (vmResult == INLINE_NEVER)
- {
- pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
- }
-
- if (pParam->result->IsFailure())
- {
- // Make sure not to report this one. It was already reported by the VM.
- pParam->result->SetReported();
- goto _exit;
- }
-
- // check for unsupported inlining restrictions
- assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY|INLINE_NO_CALLEE_LDSTR|INLINE_SAME_THIS)) == 0);
-
- if (dwRestrictions & INLINE_SAME_THIS)
- {
- GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
- assert(thisArg);
-
- if (!pParam->pThis->impIsThis(thisArg))
+ if (vmResult == INLINE_FAIL)
{
- pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
+ pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
+ }
+ else if (vmResult == INLINE_NEVER)
+ {
+ pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
+ }
+
+ if (pParam->result->IsFailure())
+ {
+ // Make sure not to report this one. It was already reported by the VM.
+ pParam->result->SetReported();
goto _exit;
}
- }
- /* Get the method properties */
+ // check for unsupported inlining restrictions
+ assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
+
+ if (dwRestrictions & INLINE_SAME_THIS)
+ {
+ GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
+ assert(thisArg);
+
+ if (!pParam->pThis->impIsThis(thisArg))
+ {
+ pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
+ goto _exit;
+ }
+ }
+
+ /* Get the method properties */
+
+ CORINFO_CLASS_HANDLE clsHandle;
+ clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
+ unsigned clsAttr;
+ clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
- CORINFO_CLASS_HANDLE clsHandle;
- clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
- unsigned clsAttr;
- clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
+ /* Get the return type */
- /* Get the return type */
-
- var_types fncRetType;
- fncRetType = pParam->call->TypeGet();
+ var_types fncRetType;
+ fncRetType = pParam->call->TypeGet();
#ifdef DEBUG
- var_types fncRealRetType;
- fncRealRetType = JITtype2varType(methInfo.args.retType);
-
- assert( (genActualType(fncRealRetType) == genActualType(fncRetType)) ||
- // <BUGNUM> VSW 288602 </BUGNUM>
- // In case of IJW, we allow to assign a native pointer to a BYREF.
- (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
- (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))
- );
+ var_types fncRealRetType;
+ fncRealRetType = JITtype2varType(methInfo.args.retType);
+
+ assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
+ // <BUGNUM> VSW 288602 </BUGNUM>
+ // In case of IJW, we allow to assign a native pointer to a BYREF.
+ (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
+ (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
#endif
- //
- // Allocate an InlineCandidateInfo structure
- //
- InlineCandidateInfo * pInfo;
- pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
-
- pInfo->dwRestrictions = dwRestrictions;
- pInfo->methInfo = methInfo;
- pInfo->methAttr = pParam->methAttr;
- pInfo->clsHandle = clsHandle;
- pInfo->clsAttr = clsAttr;
- pInfo->fncRetType = fncRetType;
- pInfo->exactContextHnd = pParam->exactContextHnd;
- pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
- pInfo->initClassResult = initClassResult;
-
- *(pParam->ppInlineCandidateInfo) = pInfo;
-
-_exit:
- ;
- }, &param);
+ //
+ // Allocate an InlineCandidateInfo structure
+ //
+ InlineCandidateInfo* pInfo;
+ pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
+
+ pInfo->dwRestrictions = dwRestrictions;
+ pInfo->methInfo = methInfo;
+ pInfo->methAttr = pParam->methAttr;
+ pInfo->clsHandle = clsHandle;
+ pInfo->clsAttr = clsAttr;
+ pInfo->fncRetType = fncRetType;
+ pInfo->exactContextHnd = pParam->exactContextHnd;
+ pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
+ pInfo->initClassResult = initClassResult;
+
+ *(pParam->ppInlineCandidateInfo) = pInfo;
+
+ _exit:;
+ },
+ &param);
if (!success)
{
param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
}
}
-
-void Compiler::impInlineRecordArgInfo(InlineInfo * pInlineInfo,
+
+void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTreePtr curArgVal,
unsigned argNum,
InlineResult* inlineResult)
{
- InlArgInfo * inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
+ InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
if (curArgVal->gtOper == GT_MKREFANY)
{
@@ -16329,9 +16820,8 @@ void Compiler::impInlineRecordArgInfo(InlineInfo * pInlineInfo,
inlCurArgInfo->argNode = curArgVal;
GenTreePtr lclVarTree;
- if (impIsAddressInLocal(curArgVal, &lclVarTree) &&
- varTypeIsStruct(lclVarTree))
- {
+ if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
+ {
inlCurArgInfo->argIsByRefToStructLocal = true;
#ifdef FEATURE_SIMD
if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
@@ -16351,25 +16841,23 @@ void Compiler::impInlineRecordArgInfo(InlineInfo * pInlineInfo,
if (curArgVal->gtFlags & GTF_GLOB_EFFECT)
{
- inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF ) != 0;
+ inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & GTF_SIDE_EFFECT) != 0;
}
if (curArgVal->gtOper == GT_LCL_VAR)
{
inlCurArgInfo->argIsLclVar = true;
-
+
/* Remember the "original" argument number */
curArgVal->gtLclVar.gtLclILoffs = argNum;
}
- if ( (curArgVal->OperKind() & GTK_CONST) ||
- ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
+ if ((curArgVal->OperKind() & GTK_CONST) ||
+ ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
{
inlCurArgInfo->argIsInvariant = true;
- if (inlCurArgInfo->argIsThis &&
- (curArgVal->gtOper == GT_CNS_INT) &&
- (curArgVal->gtIntCon.gtIconVal == 0) )
+ if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
{
/* Abort, but do not mark as not inlinable */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
@@ -16386,23 +16874,41 @@ void Compiler::impInlineRecordArgInfo(InlineInfo * pInlineInfo,
if (verbose)
{
if (inlCurArgInfo->argIsThis)
+ {
printf("thisArg:");
+ }
else
+ {
printf("\nArgument #%u:", argNum);
- if (inlCurArgInfo->argIsLclVar)
+ }
+ if (inlCurArgInfo->argIsLclVar)
+ {
printf(" is a local var");
- if (inlCurArgInfo->argIsInvariant)
+ }
+ if (inlCurArgInfo->argIsInvariant)
+ {
printf(" is a constant");
- if (inlCurArgInfo->argHasGlobRef)
+ }
+ if (inlCurArgInfo->argHasGlobRef)
+ {
printf(" has global refs");
- if (inlCurArgInfo->argHasSideEff)
- printf(" has side effects");
- if (inlCurArgInfo->argHasLdargaOp)
+ }
+ if (inlCurArgInfo->argHasSideEff)
+ {
+ printf(" has side effects");
+ }
+ if (inlCurArgInfo->argHasLdargaOp)
+ {
printf(" has ldarga effect");
+ }
if (inlCurArgInfo->argHasStargOp)
+ {
printf(" has starg effect");
- if (inlCurArgInfo->argIsByRefToStructLocal)
+ }
+ if (inlCurArgInfo->argIsByRefToStructLocal)
+ {
printf(" is byref to a struct local");
+ }
printf("\n");
gtDispTree(curArgVal);
@@ -16415,17 +16921,17 @@ void Compiler::impInlineRecordArgInfo(InlineInfo * pInlineInfo,
*
*/
-void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
+void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
{
- assert(!compIsForInlining());
-
- GenTreePtr call = pInlineInfo->iciCall;
- CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
- unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
- InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
- InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
- InlineResult* inlineResult = pInlineInfo->inlineResult;
-
+ assert(!compIsForInlining());
+
+ GenTreePtr call = pInlineInfo->iciCall;
+ CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
+ unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
+ InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
+ InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
+ InlineResult* inlineResult = pInlineInfo->inlineResult;
+
const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
/* init the argument stuct */
@@ -16434,42 +16940,44 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* Get hold of the 'this' pointer and the argument list proper */
- GenTreePtr thisArg = call->gtCall.gtCallObjp;
- GenTreePtr argList = call->gtCall.gtCallArgs;
- unsigned argCnt = 0; // Count of the arguments
+ GenTreePtr thisArg = call->gtCall.gtCallObjp;
+ GenTreePtr argList = call->gtCall.gtCallArgs;
+ unsigned argCnt = 0; // Count of the arguments
- assert( (methInfo->args.hasThis()) == (thisArg != NULL) );
+ assert((methInfo->args.hasThis()) == (thisArg != nullptr));
- if (thisArg)
+ if (thisArg)
{
- inlArgInfo[0].argIsThis = true;
-
+ inlArgInfo[0].argIsThis = true;
+
impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
-
+
if (inlineResult->IsFailure())
{
- return;
+ return;
}
-
+
/* Increment the argument count */
argCnt++;
}
/* Record some information about each of the arguments */
- bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
+ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
#if USER_ARGS_COME_LAST
unsigned typeCtxtArg = thisArg ? 1 : 0;
-#else // USER_ARGS_COME_LAST
+#else // USER_ARGS_COME_LAST
unsigned typeCtxtArg = methInfo->args.totalILArgs();
#endif // USER_ARGS_COME_LAST
for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
{
if (argTmp == argList && hasRetBuffArg)
+ {
continue;
+ }
- // Ignore the type context argument
+ // Ignore the type context argument
if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
{
typeCtxtArg = 0xFFFFFFFF;
@@ -16477,9 +16985,9 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
}
assert(argTmp->gtOper == GT_LIST);
- GenTreePtr argVal = argTmp->gtOp.gtOp1;
+ GenTreePtr argVal = argTmp->gtOp.gtOp1;
- impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
+ impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
if (inlineResult->IsFailure())
{
@@ -16489,7 +16997,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* Increment the argument count */
argCnt++;
}
-
+
/* Make sure we got the arg number right */
assert(argCnt == methInfo->args.totalILArgs());
@@ -16501,14 +17009,18 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
if (thisArg)
{
- var_types sigType;
+ var_types sigType;
if (clsAttr & CORINFO_FLG_VALUECLASS)
+ {
sigType = TYP_BYREF;
+ }
else
+ {
sigType = TYP_REF;
+ }
- lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
+ lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
lclVarInfo[0].lclHasLdlocaOp = false;
#ifdef FEATURE_SIMD
@@ -16516,8 +17028,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) &&
- isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
if (sigType == TYP_STRUCT)
{
@@ -16526,10 +17037,10 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
foundSIMDType = true;
}
#endif // FEATURE_SIMD
- lclVarInfo[0].lclTypeInfo = sigType;
+ lclVarInfo[0].lclTypeInfo = sigType;
- assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
- (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
+ assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
+ (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
(clsAttr & CORINFO_FLG_VALUECLASS)));
if (genActualType(thisArg->gtType) != genActualType(sigType))
@@ -16544,8 +17055,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* This can only happen with byrefs <-> ints/shorts */
assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
- assert(genActualType(thisArg->gtType) == TYP_I_IMPL ||
- thisArg->gtType == TYP_BYREF );
+ assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
if (sigType == TYP_BYREF)
{
@@ -16558,7 +17068,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* If possible change the BYREF to an int */
if (thisArg->IsVarAddr())
{
- thisArg->gtType = TYP_I_IMPL;
+ thisArg->gtType = TYP_I_IMPL;
lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else
@@ -16574,18 +17084,17 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* Init the types of the arguments and make sure the types
* from the trees match the types in the signature */
- CORINFO_ARG_LIST_HANDLE argLst;
+ CORINFO_ARG_LIST_HANDLE argLst;
argLst = methInfo->args.args;
unsigned i;
for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
{
- var_types sigType = (var_types) eeGetArgType(argLst, &methInfo->args);
+ var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) &&
- isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
@@ -16594,7 +17103,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
if (sigType == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
- sigType = structType;
+ sigType = structType;
}
}
#endif // FEATURE_SIMD
@@ -16609,14 +17118,13 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
if (sigType != inlArgNode->gtType)
{
/* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
- but in bad IL cases with caller-callee signature mismatches we can see other types.
+ but in bad IL cases with caller-callee signature mismatches we can see other types.
Intentionally reject cases with mismatches so the jit is more flexible when
encountering bad IL. */
- bool isPlausibleTypeMatch =
- (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
- (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
- (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
+ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
+ (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
+ (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
if (!isPlausibleTypeMatch)
{
@@ -16641,7 +17149,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* If possible bash the BYREF to an int */
if (inlArgNode->IsVarAddr())
{
- inlArgNode->gtType = TYP_I_IMPL;
+ inlArgNode->gtType = TYP_I_IMPL;
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else
@@ -16665,10 +17173,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
continue;
}
- inlArgNode =
- inlArgInfo[i].argNode = gtNewCastNode (TYP_INT,
- inlArgNode,
- sigType);
+ inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
inlArgInfo[i].argIsLclVar = false;
@@ -16676,7 +17181,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
if (inlArgInfo[i].argIsInvariant)
{
- inlArgNode = gtFoldExprConst(inlArgNode);
+ inlArgNode = gtFoldExprConst(inlArgNode);
inlArgInfo[i].argNode = inlArgNode;
assert(inlArgNode->OperIsConst());
}
@@ -16685,10 +17190,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
- inlArgNode =
- inlArgInfo[i].argNode = gtNewCastNode (genActualType(sigType),
- inlArgNode,
- sigType);
+ inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
inlArgInfo[i].argIsLclVar = false;
@@ -16696,7 +17198,7 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
if (inlArgInfo[i].argIsInvariant)
{
- inlArgNode = gtFoldExprConst(inlArgNode);
+ inlArgNode = gtFoldExprConst(inlArgNode);
inlArgInfo[i].argNode = inlArgNode;
assert(inlArgNode->OperIsConst());
}
@@ -16708,16 +17210,16 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
/* Init the types of the local variables */
- CORINFO_ARG_LIST_HANDLE localsSig;
+ CORINFO_ARG_LIST_HANDLE localsSig;
localsSig = methInfo->locals.args;
for (i = 0; i < methInfo->locals.numArgs; i++)
{
- bool isPinned;
- var_types type = (var_types) eeGetArgType(localsSig, &methInfo->locals, &isPinned);
+ bool isPinned;
+ var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
- lclVarInfo[i + argCnt].lclTypeInfo = type;
+ lclVarInfo[i + argCnt].lclTypeInfo = type;
if (isPinned)
{
@@ -16726,12 +17228,11 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
}
lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
-
+
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (type == TYP_STRUCT)) &&
- isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
+ if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (featureSIMD && type == TYP_STRUCT)
@@ -16744,48 +17245,45 @@ void Compiler::impInlineInitVars(InlineInfo * pInlineInfo)
}
#ifdef FEATURE_SIMD
- if (!foundSIMDType &&
- (call->AsCall()->gtRetClsHnd != nullptr) &&
- isSIMDClass(call->AsCall()->gtRetClsHnd))
+ if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
#endif // FEATURE_SIMD
-
}
-
-unsigned Compiler::impInlineFetchLocal(unsigned lclNum
- DEBUGARG(const char * reason) )
+unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
{
assert(compIsForInlining());
- unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
+ unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
- if (tmpNum == BAD_VAR_NUM)
+ if (tmpNum == BAD_VAR_NUM)
{
var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
-
+
// The lifetime of this local might span multiple BBs.
// So it is a long lifetime local.
- impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
-
+ impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
+
lvaTable[tmpNum].lvType = lclTyp;
if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
{
- lvaTable[tmpNum].lvHasLdAddrOp = 1;
+ lvaTable[tmpNum].lvHasLdAddrOp = 1;
}
if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
- {
- lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
- }
+ {
+ lvaSetStruct(tmpNum,
+ impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
+ true /* unsafe value cls check */);
+ }
else
{
- //This is a wrapped primitive. Make sure the verstate knows that
+ // This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo =
impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
}
@@ -16795,19 +17293,18 @@ unsigned Compiler::impInlineFetchLocal(unsigned lclNum
return tmpNum;
}
-
// A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
// Only use this method for the arguments of the inlinee method.
// !!! Do not use it for the locals of the inlinee method. !!!!
-GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo, InlLclVarInfo* lclVarInfo)
+GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
{
/* Get the argument type */
- var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
-
- GenTreePtr op1 = NULL;
+ var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
+
+ GenTreePtr op1 = nullptr;
- // constant or address of local
+ // constant or address of local
if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
{
/* Clone the constant. Note that we cannot directly use argNode
@@ -16815,36 +17312,38 @@ GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo,
would introduce aliasing between inlArgInfo[].argNode and
impInlineExpr. Then gtFoldExpr() could change it, causing further
references to the argument working off of the bashed copy. */
-
+
op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
- PREFIX_ASSUME(op1 != NULL);
- inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
+ PREFIX_ASSUME(op1 != nullptr);
+ inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
}
else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
{
/* Argument is a local variable (of the caller)
* Can we re-use the passed argument node? */
-
- op1 = inlArgInfo[lclNum].argNode;
+
+ op1 = inlArgInfo[lclNum].argNode;
inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
-
+
if (inlArgInfo[lclNum].argIsUsed)
{
assert(op1->gtOper == GT_LCL_VAR);
assert(lclNum == op1->gtLclVar.gtLclILoffs);
-
+
if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
+ {
lclTyp = genActualType(lclTyp);
-
+ }
+
/* Create a new lcl var node - remember the argument lclNum */
op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
}
}
else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
{
- /* Argument is a by-ref address to a struct, a normed struct, or its field.
+ /* Argument is a by-ref address to a struct, a normed struct, or its field.
In these cases, don't spill the byref to a local, simply clone the tree and use it.
- This way we will increase the chance for this byref to be optimized away by
+ This way we will increase the chance for this byref to be optimized away by
a subsequent "dereference" operation.
From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
@@ -16863,56 +17362,56 @@ GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo,
inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
}
- else
+ else
{
/* Argument is a complex expression - it must be evaluated into a temp */
-
+
if (inlArgInfo[lclNum].argHasTmp)
{
assert(inlArgInfo[lclNum].argIsUsed);
assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
-
+
/* Create a new lcl var node - remember the argument lclNum */
op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
-
+
/* This is the second or later use of the this argument,
so we have to use the temp (instead of the actual arg) */
- inlArgInfo[lclNum].argBashTmpNode = NULL;
+ inlArgInfo[lclNum].argBashTmpNode = nullptr;
}
else
{
/* First time use */
assert(inlArgInfo[lclNum].argIsUsed == false);
-
+
/* Reserve a temp for the expression.
* Use a large size node as we may change it later */
-
+
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
-
+
lvaTable[tmpNum].lvType = lclTyp;
assert(lvaTable[tmpNum].lvAddrExposed == 0);
if (inlArgInfo[lclNum].argHasLdargaOp)
{
- lvaTable[tmpNum].lvHasLdAddrOp = 1;
+ lvaTable[tmpNum].lvHasLdAddrOp = 1;
}
if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
- {
- if (varTypeIsStruct(lclTyp))
- {
- lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
- }
+ {
+ if (varTypeIsStruct(lclTyp))
+ {
+ lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
+ true /* unsafe value cls check */);
+ }
else
{
- //This is a wrapped primitive. Make sure the verstate knows that
- lvaTable[tmpNum].lvVerTypeInfo =
- impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
+ // This is a wrapped primitive. Make sure the verstate knows that
+ lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
}
}
-
+
inlArgInfo[lclNum].argHasTmp = true;
inlArgInfo[lclNum].argTmpNum = tmpNum;
-
+
// If we require strict exception order, then arguments must
// be evaluated in sequence before the body of the inlined method.
// So we need to evaluate them to a temp.
@@ -16921,14 +17420,12 @@ GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo,
// inlined body may be modifying the global ref.
// TODO-1stClassStructs: We currently do not reuse an existing lclVar
// if it is a struct, because it requires some additional handling.
-
- if (!varTypeIsStruct(lclTyp) &&
- (!inlArgInfo[lclNum].argHasSideEff) &&
- (!inlArgInfo[lclNum].argHasGlobRef))
+
+ if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
{
/* Get a *LARGE* LCL_VAR node */
op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
-
+
/* Record op1 as the very first use of this argument.
If there are no further uses of the arg, we may be
able to use the actual arg node instead of the temp.
@@ -16940,13 +17437,13 @@ GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo,
/* Get a small LCL_VAR node */
op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
/* No bashing of this argument */
- inlArgInfo[lclNum].argBashTmpNode = NULL;
+ inlArgInfo[lclNum].argBashTmpNode = nullptr;
}
}
}
-
+
/* Mark the argument as used */
-
+
inlArgInfo[lclNum].argIsUsed = true;
return op1;
@@ -16954,16 +17451,15 @@ GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo *inlArgInfo,
/******************************************************************************
Is this the original "this" argument to the call being inlined?
-
+
Note that we do not inline methods with "starg 0", and so we do not need to
worry about it.
*/
-BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo * inlArgInfo)
-{
+BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
+{
assert(compIsForInlining());
- return (tree->gtOper == GT_LCL_VAR &&
- tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
+ return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
}
//-----------------------------------------------------------------------------
@@ -16977,44 +17473,51 @@ BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo * inlA
// is the set of pending trees that have not yet been added to the statement list,
// and which have been removed from verCurrentState.esStack[]
-BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(
- GenTreePtr additionalTreesToBeEvaluatedBefore,
- GenTreePtr variableBeingDereferenced,
- InlArgInfo * inlArgInfo)
+BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
+ GenTreePtr variableBeingDereferenced,
+ InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
assert(opts.OptEnabled(CLFLG_INLINING));
- BasicBlock * block = compCurBB;
+ BasicBlock* block = compCurBB;
GenTreePtr stmt;
GenTreePtr expr;
if (block != fgFirstBB)
+ {
return FALSE;
+ }
if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
+ {
return FALSE;
+ }
if (additionalTreesToBeEvaluatedBefore &&
GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
+ {
return FALSE;
+ }
- for (stmt = impTreeList->gtNext;
- stmt;
- stmt = stmt->gtNext)
+ for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
{
expr = stmt->gtStmt.gtStmtExpr;
-
+
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
+ {
return FALSE;
+ }
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
- if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
+ if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
+ {
return FALSE;
+ }
}
return TRUE;
@@ -17022,21 +17525,21 @@ BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects
/******************************************************************************/
// Check the inlining eligibility of this GT_CALL node.
-// Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
+// Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
// Todo: find a way to record the failure reasons in the IR (or
// otherwise build tree context) so when we do the inlining pass we
// can capture these reasons
-void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
- CORINFO_CONTEXT_HANDLE exactContextHnd,
- CORINFO_CALL_INFO* callInfo)
+void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
+ CORINFO_CONTEXT_HANDLE exactContextHnd,
+ CORINFO_CALL_INFO* callInfo)
{
// Let the strategy know there's another call
impInlineRoot()->m_inlineStrategy->NoteCall();
if (!opts.OptEnabled(CLFLG_INLINING))
- {
+ {
/* XXX Mon 8/18/2008
* This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
* calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
@@ -17054,10 +17557,10 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
// that leads to the creation of multiple instances of Compiler.
return;
}
-
+
GenTreeCall* call = callNode->AsCall();
InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
-
+
// Don't inline if not optimizing root method
if (opts.compDbgCode)
{
@@ -17098,14 +17601,14 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
/* Ignore helper calls */
- if (call->gtCallType == CT_HELPER)
+ if (call->gtCallType == CT_HELPER)
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
return;
}
/* Ignore indirect calls */
- if (call->gtCallType == CT_INDIRECT)
+ if (call->gtCallType == CT_INDIRECT)
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
return;
@@ -17116,7 +17619,7 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
* inlining in throw blocks. I should consider the same thing for catch and filter regions. */
CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
- unsigned methAttr;
+ unsigned methAttr;
// Reuse method flags from the original callInfo if possible
if (fncHandle == callInfo->hMethod)
@@ -17151,7 +17654,7 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
{
printf("\nWill not inline blocks that are in the catch handler region\n");
}
-
+
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
@@ -17190,7 +17693,7 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
/* Cannot inline synchronized methods */
- if (methAttr & CORINFO_FLG_SYNCH)
+ if (methAttr & CORINFO_FLG_SYNCH)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
return;
@@ -17204,7 +17707,7 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
return;
}
- InlineCandidateInfo * inlineCandidateInfo = nullptr;
+ InlineCandidateInfo* inlineCandidateInfo = nullptr;
impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
if (inlineResult.IsFailure())
@@ -17229,7 +17732,7 @@ void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
}
/******************************************************************************/
-// Returns true if the given intrinsic will be implemented by target-specific
+// Returns true if the given intrinsic will be implemented by target-specific
// instructions
bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
@@ -17237,7 +17740,7 @@ bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
#if defined(_TARGET_AMD64_)
switch (intrinsicId)
{
- // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
+ // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
case CORINFO_INTRINSIC_Sqrt:
case CORINFO_INTRINSIC_Abs:
return true;
@@ -17281,21 +17784,21 @@ bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
return false;
}
#else
- // TODO: This portion of logic is not implemented for other arch.
- // The reason for returning true is that on all other arch the only intrinsic
+ // TODO: This portion of logic is not implemented for other arch.
+ // The reason for returning true is that on all other arch the only intrinsic
// enabled are target intrinsics.
return true;
-#endif //_TARGET_AMD64_
+#endif //_TARGET_AMD64_
}
/******************************************************************************/
-// Returns true if the given intrinsic will be implemented by calling System.Math
+// Returns true if the given intrinsic will be implemented by calling System.Math
// methods.
bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
{
- // Currently, if an math intrisic is not implemented by target-specific
- // intructions, it will be implemented by a System.Math call. In the
+ // Currently, if an math intrisic is not implemented by target-specific
+ // intructions, it will be implemented by a System.Math call. In the
// future, if we turn to implementing some of them with helper callers,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicId);
@@ -17305,27 +17808,27 @@ bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
{
switch (intrinsicId)
{
- case CORINFO_INTRINSIC_Sin:
- case CORINFO_INTRINSIC_Sqrt:
- case CORINFO_INTRINSIC_Abs:
- case CORINFO_INTRINSIC_Cos:
- case CORINFO_INTRINSIC_Round:
- case CORINFO_INTRINSIC_Cosh:
- case CORINFO_INTRINSIC_Sinh:
- case CORINFO_INTRINSIC_Tan:
- case CORINFO_INTRINSIC_Tanh:
- case CORINFO_INTRINSIC_Asin:
- case CORINFO_INTRINSIC_Acos:
- case CORINFO_INTRINSIC_Atan:
- case CORINFO_INTRINSIC_Atan2:
- case CORINFO_INTRINSIC_Log10:
- case CORINFO_INTRINSIC_Pow:
- case CORINFO_INTRINSIC_Exp:
- case CORINFO_INTRINSIC_Ceiling:
- case CORINFO_INTRINSIC_Floor:
- return true;
- default:
- return false;
+ case CORINFO_INTRINSIC_Sin:
+ case CORINFO_INTRINSIC_Sqrt:
+ case CORINFO_INTRINSIC_Abs:
+ case CORINFO_INTRINSIC_Cos:
+ case CORINFO_INTRINSIC_Round:
+ case CORINFO_INTRINSIC_Cosh:
+ case CORINFO_INTRINSIC_Sinh:
+ case CORINFO_INTRINSIC_Tan:
+ case CORINFO_INTRINSIC_Tanh:
+ case CORINFO_INTRINSIC_Asin:
+ case CORINFO_INTRINSIC_Acos:
+ case CORINFO_INTRINSIC_Atan:
+ case CORINFO_INTRINSIC_Atan2:
+ case CORINFO_INTRINSIC_Log10:
+ case CORINFO_INTRINSIC_Pow:
+ case CORINFO_INTRINSIC_Exp:
+ case CORINFO_INTRINSIC_Ceiling:
+ case CORINFO_INTRINSIC_Floor:
+ return true;
+ default:
+ return false;
}
}
diff --git a/src/jit/inline.cpp b/src/jit/inline.cpp
index f813427220..deccc0e84b 100644
--- a/src/jit/inline.cpp
+++ b/src/jit/inline.cpp
@@ -11,8 +11,7 @@
// Lookup table for inline description strings
-static const char* InlineDescriptions[] =
-{
+static const char* InlineDescriptions[] = {
#define INLINE_OBSERVATION(name, type, description, impact, target) description,
#include "inline.def"
#undef INLINE_OBSERVATION
@@ -20,8 +19,7 @@ static const char* InlineDescriptions[] =
// Lookup table for inline targets
-static const InlineTarget InlineTargets[] =
-{
+static const InlineTarget InlineTargets[] = {
#define INLINE_OBSERVATION(name, type, description, impact, target) InlineTarget::target,
#include "inline.def"
#undef INLINE_OBSERVATION
@@ -29,8 +27,7 @@ static const InlineTarget InlineTargets[] =
// Lookup table for inline impacts
-static const InlineImpact InlineImpacts[] =
-{
+static const InlineImpact InlineImpacts[] = {
#define INLINE_OBSERVATION(name, type, description, impact, target) InlineImpact::impact,
#include "inline.def"
#undef INLINE_OBSERVATION
@@ -49,8 +46,7 @@ static const InlineImpact InlineImpacts[] =
bool InlIsValidObservation(InlineObservation obs)
{
- return((obs > InlineObservation::CALLEE_UNUSED_INITIAL) &&
- (obs < InlineObservation::CALLEE_UNUSED_FINAL));
+ return ((obs > InlineObservation::CALLEE_UNUSED_INITIAL) && (obs < InlineObservation::CALLEE_UNUSED_FINAL));
}
#endif // DEBUG
@@ -99,14 +95,14 @@ const char* InlGetTargetString(InlineObservation obs)
InlineTarget t = InlGetTarget(obs);
switch (t)
{
- case InlineTarget::CALLER:
- return "caller";
- case InlineTarget::CALLEE:
- return "callee";
- case InlineTarget::CALLSITE:
- return "call site";
- default:
- return "unexpected target";
+ case InlineTarget::CALLER:
+ return "caller";
+ case InlineTarget::CALLEE:
+ return "callee";
+ case InlineTarget::CALLSITE:
+ return "call site";
+ default:
+ return "unexpected target";
}
}
@@ -139,18 +135,18 @@ const char* InlGetImpactString(InlineObservation obs)
InlineImpact i = InlGetImpact(obs);
switch (i)
{
- case InlineImpact::FATAL:
- return "correctness -- fatal";
- case InlineImpact::FUNDAMENTAL:
- return "correctness -- fundamental limitation";
- case InlineImpact::LIMITATION:
- return "correctness -- jit limitation";
- case InlineImpact::PERFORMANCE:
- return "performance";
- case InlineImpact::INFORMATION:
- return "information";
- default:
- return "unexpected impact";
+ case InlineImpact::FATAL:
+ return "correctness -- fatal";
+ case InlineImpact::FUNDAMENTAL:
+ return "correctness -- fundamental limitation";
+ case InlineImpact::LIMITATION:
+ return "correctness -- jit limitation";
+ case InlineImpact::PERFORMANCE:
+ return "performance";
+ case InlineImpact::INFORMATION:
+ return "information";
+ default:
+ return "unexpected impact";
}
}
@@ -165,16 +161,17 @@ const char* InlGetImpactString(InlineObservation obs)
CorInfoInline InlGetCorInfoInlineDecision(InlineDecision d)
{
- switch (d) {
- case InlineDecision::SUCCESS:
- return INLINE_PASS;
- case InlineDecision::FAILURE:
- return INLINE_FAIL;
- case InlineDecision::NEVER:
- return INLINE_NEVER;
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::SUCCESS:
+ return INLINE_PASS;
+ case InlineDecision::FAILURE:
+ return INLINE_FAIL;
+ case InlineDecision::NEVER:
+ return INLINE_NEVER;
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -189,20 +186,21 @@ CorInfoInline InlGetCorInfoInlineDecision(InlineDecision d)
const char* InlGetDecisionString(InlineDecision d)
{
- switch (d) {
- case InlineDecision::SUCCESS:
- return "success";
- case InlineDecision::FAILURE:
- return "failed this call site";
- case InlineDecision::NEVER:
- return "failed this callee";
- case InlineDecision::CANDIDATE:
- return "candidate";
- case InlineDecision::UNDECIDED:
- return "undecided";
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::SUCCESS:
+ return "success";
+ case InlineDecision::FAILURE:
+ return "failed this call site";
+ case InlineDecision::NEVER:
+ return "failed this callee";
+ case InlineDecision::CANDIDATE:
+ return "candidate";
+ case InlineDecision::UNDECIDED:
+ return "undecided";
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -217,17 +215,18 @@ const char* InlGetDecisionString(InlineDecision d)
bool InlDecisionIsFailure(InlineDecision d)
{
- switch (d) {
- case InlineDecision::SUCCESS:
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- return false;
- case InlineDecision::FAILURE:
- case InlineDecision::NEVER:
- return true;
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::SUCCESS:
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ return false;
+ case InlineDecision::FAILURE:
+ case InlineDecision::NEVER:
+ return true;
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -242,17 +241,18 @@ bool InlDecisionIsFailure(InlineDecision d)
bool InlDecisionIsSuccess(InlineDecision d)
{
- switch (d) {
- case InlineDecision::SUCCESS:
- return true;
- case InlineDecision::FAILURE:
- case InlineDecision::NEVER:
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- return false;
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::SUCCESS:
+ return true;
+ case InlineDecision::FAILURE:
+ case InlineDecision::NEVER:
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ return false;
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -267,17 +267,18 @@ bool InlDecisionIsSuccess(InlineDecision d)
bool InlDecisionIsNever(InlineDecision d)
{
- switch (d) {
- case InlineDecision::NEVER:
- return true;
- case InlineDecision::FAILURE:
- case InlineDecision::SUCCESS:
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- return false;
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::NEVER:
+ return true;
+ case InlineDecision::FAILURE:
+ case InlineDecision::SUCCESS:
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ return false;
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -306,17 +307,18 @@ bool InlDecisionIsCandidate(InlineDecision d)
bool InlDecisionIsDecided(InlineDecision d)
{
- switch (d) {
- case InlineDecision::NEVER:
- case InlineDecision::FAILURE:
- case InlineDecision::SUCCESS:
- return true;
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- return false;
- default:
- assert(!"Unexpected InlineDecision");
- unreached();
+ switch (d)
+ {
+ case InlineDecision::NEVER:
+ case InlineDecision::FAILURE:
+ case InlineDecision::SUCCESS:
+ return true;
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ return false;
+ default:
+ assert(!"Unexpected InlineDecision");
+ unreached();
}
}
@@ -375,13 +377,11 @@ void InlineContext::Dump(unsigned indent)
#if defined(DEBUG)
calleeName = compiler->eeGetMethodFullName(m_Callee);
#else
- calleeName = "callee";
+ calleeName = "callee";
#endif // defined(DEBUG)
-
}
- mdMethodDef calleeToken =
- compiler->info.compCompHnd->getMethodDefFromMethod(m_Callee);
+ mdMethodDef calleeToken = compiler->info.compCompHnd->getMethodDefFromMethod(m_Callee);
// Dump this node
if (m_Parent == nullptr)
@@ -397,15 +397,13 @@ void InlineContext::Dump(unsigned indent)
if (m_Offset == BAD_IL_OFFSET)
{
- printf("%*s[%u IL=???? TR=%06u %08X] [%s%s] %s\n", indent, "",
- m_Ordinal, m_TreeID, calleeToken,
+ printf("%*s[%u IL=???? TR=%06u %08X] [%s%s] %s\n", indent, "", m_Ordinal, m_TreeID, calleeToken,
inlineResult, inlineReason, calleeName);
}
else
{
IL_OFFSET offset = jitGetILoffs(m_Offset);
- printf("%*s[%u IL=%04d TR=%06u %08X] [%s%s] %s\n", indent, "",
- m_Ordinal, offset, m_TreeID, calleeToken,
+ printf("%*s[%u IL=%04d TR=%06u %08X] [%s%s] %s\n", indent, "", m_Ordinal, offset, m_TreeID, calleeToken,
inlineResult, inlineReason, calleeName);
}
}
@@ -444,10 +442,7 @@ void InlineContext::DumpData(unsigned indent)
{
// Root method... cons up a policy so we can display the name
InlinePolicy* policy = InlinePolicy::GetPolicy(compiler, true);
- printf("\nInlines [%u] into \"%s\" [%s]\n",
- m_InlineStrategy->GetInlineCount(),
- calleeName,
- policy->GetName());
+ printf("\nInlines [%u] into \"%s\" [%s]\n", m_InlineStrategy->GetInlineCount(), calleeName, policy->GetName());
}
else if (m_Success)
{
@@ -479,26 +474,24 @@ void InlineContext::DumpXml(FILE* file, unsigned indent)
m_Sibling->DumpXml(file, indent);
}
- const bool isRoot = m_Parent == nullptr;
- const bool hasChild = m_Child != nullptr;
+ const bool isRoot = m_Parent == nullptr;
+ const bool hasChild = m_Child != nullptr;
const char* inlineType = m_Success ? "Inline" : "FailedInline";
- unsigned newIndent = indent;
+ unsigned newIndent = indent;
if (!isRoot)
{
Compiler* compiler = m_InlineStrategy->GetCompiler();
- mdMethodDef calleeToken =
- compiler->info.compCompHnd->getMethodDefFromMethod(m_Callee);
- unsigned calleeHash =
- compiler->info.compCompHnd->getMethodHash(m_Callee);
+ mdMethodDef calleeToken = compiler->info.compCompHnd->getMethodDefFromMethod(m_Callee);
+ unsigned calleeHash = compiler->info.compCompHnd->getMethodHash(m_Callee);
const char* inlineReason = InlGetObservationString(m_Observation);
int offset = -1;
if (m_Offset != BAD_IL_OFFSET)
{
- offset = (int) jitGetILoffs(m_Offset);
+ offset = (int)jitGetILoffs(m_Offset);
}
fprintf(file, "%*s<%s>\n", indent, "", inlineType);
@@ -508,8 +501,7 @@ void InlineContext::DumpXml(FILE* file, unsigned indent)
fprintf(file, "%*s<Reason>%s</Reason>\n", indent + 2, "", inlineReason);
// Optionally, dump data about the last inline
- if ((JitConfig.JitInlineDumpData() != 0)
- && (this == m_InlineStrategy->GetLastContext()))
+ if ((JitConfig.JitInlineDumpData() != 0) && (this == m_InlineStrategy->GetLastContext()))
{
fprintf(file, "%*s<Data>", indent + 2, "");
m_InlineStrategy->DumpDataContents(file);
@@ -552,10 +544,7 @@ void InlineContext::DumpXml(FILE* file, unsigned indent)
// stmt - statement containing the call (if known)
// description - string describing the context of the decision
-InlineResult::InlineResult(Compiler* compiler,
- GenTreeCall* call,
- GenTreeStmt* stmt,
- const char* description)
+InlineResult::InlineResult(Compiler* compiler, GenTreeCall* call, GenTreeStmt* stmt, const char* description)
: m_RootCompiler(nullptr)
, m_Policy(nullptr)
, m_Call(call)
@@ -570,7 +559,7 @@ InlineResult::InlineResult(Compiler* compiler,
// Set the policy
const bool isPrejitRoot = false;
- m_Policy = InlinePolicy::GetPolicy(m_RootCompiler, isPrejitRoot);
+ m_Policy = InlinePolicy::GetPolicy(m_RootCompiler, isPrejitRoot);
// Pass along some optional information to the policy.
if (stmt != nullptr)
@@ -612,9 +601,7 @@ InlineResult::InlineResult(Compiler* compiler,
// We use the inlCallee member to track the method since logically
// it is the callee here.
-InlineResult::InlineResult(Compiler* compiler,
- CORINFO_METHOD_HANDLE method,
- const char* description)
+InlineResult::InlineResult(Compiler* compiler, CORINFO_METHOD_HANDLE method, const char* description)
: m_RootCompiler(nullptr)
, m_Policy(nullptr)
, m_Call(nullptr)
@@ -629,7 +616,7 @@ InlineResult::InlineResult(Compiler* compiler,
// Set the policy
const bool isPrejitRoot = true;
- m_Policy = InlinePolicy::GetPolicy(m_RootCompiler, isPrejitRoot);
+ m_Policy = InlinePolicy::GetPolicy(m_RootCompiler, isPrejitRoot);
}
//------------------------------------------------------------------------
@@ -708,7 +695,7 @@ void InlineResult::Report()
JITDUMP("\nINLINER: Marking %s as NOINLINE because of %s\n", callee, obsString);
}
-#endif // DEBUG
+#endif // DEBUG
COMP_HANDLE comp = m_RootCompiler->info.compCompHnd;
comp->setMethodAttribs(m_Callee, CORINFO_FLG_BAD_INLINEE);
@@ -796,7 +783,6 @@ InlineStrategy::InlineStrategy(Compiler* compiler)
}
#endif // DEBUG
-
}
//------------------------------------------------------------------------
@@ -907,13 +893,10 @@ void InlineStrategy::DumpCsvData(FILE* fp)
//
// So they are "cheaper" that late failures.
- unsigned profitableCandidateCount =
- m_DiscretionaryCandidateCount - m_UnprofitableCandidateCount;
+ unsigned profitableCandidateCount = m_DiscretionaryCandidateCount - m_UnprofitableCandidateCount;
- unsigned earlyFailCount = m_CandidateCount
- - m_AlwaysCandidateCount
- - m_ForceCandidateCount
- - profitableCandidateCount;
+ unsigned earlyFailCount =
+ m_CandidateCount - m_AlwaysCandidateCount - m_ForceCandidateCount - profitableCandidateCount;
fprintf(fp, "%u,", earlyFailCount);
@@ -1005,8 +988,8 @@ int InlineStrategy::EstimateSize(InlineContext* context)
// native code size is fairly well predicted by IL size.
//
// Model below is for x64 on windows.
- unsigned ilSize = context->GetILSize();
- int estimate = (1312 + 228 * ilSize) / 10;
+ unsigned ilSize = context->GetILSize();
+ int estimate = (1312 + 228 * ilSize) / 10;
return estimate;
}
@@ -1036,13 +1019,11 @@ void InlineStrategy::NoteOutcome(InlineContext* context)
// Keep track of the inline targeted for data collection or,
// if we don't have one (yet), the last successful inline.
- bool updateLast =
- (m_LastSuccessfulPolicy == nullptr) ||
- !m_LastSuccessfulPolicy->IsDataCollectionTarget();
+ bool updateLast = (m_LastSuccessfulPolicy == nullptr) || !m_LastSuccessfulPolicy->IsDataCollectionTarget();
if (updateLast)
{
- m_LastContext = context;
+ m_LastContext = context;
m_LastSuccessfulPolicy = context->m_Policy;
}
else
@@ -1063,7 +1044,7 @@ void InlineStrategy::NoteOutcome(InlineContext* context)
// increase expense.
InlineContext* currentContext = context;
- bool isForceInline = false;
+ bool isForceInline = false;
while (currentContext != m_RootContext)
{
@@ -1082,7 +1063,7 @@ void InlineStrategy::NoteOutcome(InlineContext* context)
break;
}
- isForceInline = true;
+ isForceInline = true;
currentContext = currentContext->GetParent();
}
@@ -1181,25 +1162,25 @@ InlineContext* InlineStrategy::NewSuccess(InlineInfo* inlineInfo)
noway_assert(parentContext != nullptr);
- calleeContext->m_Code = calleeIL;
+ calleeContext->m_Code = calleeIL;
calleeContext->m_ILSize = calleeILSize;
calleeContext->m_Parent = parentContext;
// Push on front here will put siblings in reverse lexical
// order which we undo in the dumper
- calleeContext->m_Sibling = parentContext->m_Child;
- parentContext->m_Child = calleeContext;
- calleeContext->m_Child = nullptr;
- calleeContext->m_Offset = stmt->AsStmt()->gtStmtILoffsx;
+ calleeContext->m_Sibling = parentContext->m_Child;
+ parentContext->m_Child = calleeContext;
+ calleeContext->m_Child = nullptr;
+ calleeContext->m_Offset = stmt->AsStmt()->gtStmtILoffsx;
calleeContext->m_Observation = inlineInfo->inlineResult->GetObservation();
- calleeContext->m_Success = true;
+ calleeContext->m_Success = true;
#if defined(DEBUG) || defined(INLINE_DATA)
InlinePolicy* policy = inlineInfo->inlineResult->GetPolicy();
- calleeContext->m_Policy = policy;
+ calleeContext->m_Policy = policy;
calleeContext->m_CodeSizeEstimate = policy->CodeSizeEstimate();
- calleeContext->m_Callee = inlineInfo->fncHandle;
+ calleeContext->m_Callee = inlineInfo->fncHandle;
// +1 here since we set this before calling NoteOutcome.
calleeContext->m_Ordinal = m_InlineCount + 1;
// Update offset with more accurate info
@@ -1232,8 +1213,7 @@ InlineContext* InlineStrategy::NewSuccess(InlineInfo* inlineInfo)
// A new InlineContext for diagnostic purposes, or nullptr if
// the desired context could not be created.
-InlineContext* InlineStrategy::NewFailure(GenTree* stmt,
- InlineResult* inlineResult)
+InlineContext* InlineStrategy::NewFailure(GenTree* stmt, InlineResult* inlineResult)
{
// Check for a parent context first. We may insert new statements
// between the caller and callee that do not pick up either's
@@ -1258,13 +1238,13 @@ InlineContext* InlineStrategy::NewFailure(GenTree* stmt,
failedContext->m_Parent = parentContext;
// Push on front here will put siblings in reverse lexical
// order which we undo in the dumper
- failedContext->m_Sibling = parentContext->m_Child;
- parentContext->m_Child = failedContext;
- failedContext->m_Child = nullptr;
- failedContext->m_Offset = stmt->AsStmt()->gtStmtILoffsx;
+ failedContext->m_Sibling = parentContext->m_Child;
+ parentContext->m_Child = failedContext;
+ failedContext->m_Child = nullptr;
+ failedContext->m_Offset = stmt->AsStmt()->gtStmtILoffsx;
failedContext->m_Observation = inlineResult->GetObservation();
- failedContext->m_Callee = inlineResult->GetCallee();
- failedContext->m_Success = false;
+ failedContext->m_Callee = inlineResult->GetCallee();
+ failedContext->m_Success = false;
#if defined(DEBUG) || defined(INLINE_DATA)
@@ -1291,22 +1271,17 @@ void InlineStrategy::Dump()
{
m_RootContext->Dump();
- printf("Budget: initialTime=%d, finalTime=%d, initialBudget=%d, currentBudget=%d\n",
- m_InitialTimeEstimate,
- m_CurrentTimeEstimate,
- m_InitialTimeBudget,
- m_CurrentTimeBudget);
+ printf("Budget: initialTime=%d, finalTime=%d, initialBudget=%d, currentBudget=%d\n", m_InitialTimeEstimate,
+ m_CurrentTimeEstimate, m_InitialTimeBudget, m_CurrentTimeBudget);
if (m_CurrentTimeBudget > m_InitialTimeBudget)
{
- printf("Budget: increased by %d because of force inlines\n",
- m_CurrentTimeBudget - m_InitialTimeBudget);
+ printf("Budget: increased by %d because of force inlines\n", m_CurrentTimeBudget - m_InitialTimeBudget);
}
if (m_CurrentTimeEstimate > m_CurrentTimeBudget)
{
- printf("Budget: went over budget by %d\n",
- m_CurrentTimeEstimate - m_CurrentTimeBudget);
+ printf("Budget: went over budget by %d\n", m_CurrentTimeEstimate - m_CurrentTimeBudget);
}
if (m_HasForceViaDiscretionary)
@@ -1314,9 +1289,7 @@ void InlineStrategy::Dump()
printf("Budget: discretionary inline caused a force inline\n");
}
- printf("Budget: initialSize=%d, finalSize=%d\n",
- m_InitialSizeEstimate,
- m_CurrentSizeEstimate);
+ printf("Budget: initialSize=%d, finalSize=%d\n", m_InitialSizeEstimate, m_CurrentSizeEstimate);
}
// Static to track emission of the inline data header
@@ -1374,7 +1347,7 @@ void InlineStrategy::DumpData()
void InlineStrategy::DumpDataEnsurePolicyIsSet()
{
// Cache references to compiler substructures.
- const Compiler::Info& info = m_Compiler->info;
+ const Compiler::Info& info = m_Compiler->info;
const Compiler::Options& opts = m_Compiler->opts;
// If there weren't any successful inlines, we won't have a
@@ -1382,7 +1355,7 @@ void InlineStrategy::DumpDataEnsurePolicyIsSet()
if (m_LastSuccessfulPolicy == nullptr)
{
const bool isPrejitRoot = (opts.eeFlags & CORJIT_FLG_PREJIT) != 0;
- m_LastSuccessfulPolicy = InlinePolicy::GetPolicy(m_Compiler, isPrejitRoot);
+ m_LastSuccessfulPolicy = InlinePolicy::GetPolicy(m_Compiler, isPrejitRoot);
// Add in a bit of data....
const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
@@ -1401,10 +1374,7 @@ void InlineStrategy::DumpDataHeader(FILE* file)
{
DumpDataEnsurePolicyIsSet();
const int limit = JitConfig.JitInlineLimit();
- fprintf(file,
- "*** Inline Data: Policy=%s JitInlineLimit=%d ***\n",
- m_LastSuccessfulPolicy->GetName(),
- limit);
+ fprintf(file, "*** Inline Data: Policy=%s JitInlineLimit=%d ***\n", m_LastSuccessfulPolicy->GetName(), limit);
DumpDataSchema(file);
fprintf(file, "\n");
}
@@ -1433,7 +1403,7 @@ void InlineStrategy::DumpDataContents(FILE* file)
DumpDataEnsurePolicyIsSet();
// Cache references to compiler substructures.
- const Compiler::Info& info = m_Compiler->info;
+ const Compiler::Info& info = m_Compiler->info;
const Compiler::Options& opts = m_Compiler->opts;
// We'd really like the method identifier to be unique and
@@ -1442,28 +1412,20 @@ void InlineStrategy::DumpDataContents(FILE* file)
//
// Post processing will have to filter out all data from
// methods where the root entry appears multiple times.
- mdMethodDef currentMethodToken =
- info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd);
+ mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd);
// Convert time spent jitting into microseconds
- unsigned microsecondsSpentJitting = 0;
- unsigned __int64 compCycles = m_Compiler->getInlineCycleCount();
+ unsigned microsecondsSpentJitting = 0;
+ unsigned __int64 compCycles = m_Compiler->getInlineCycleCount();
if (compCycles > 0)
{
- double countsPerSec = CycleTimer::CyclesPerSecond();
- double counts = (double) compCycles;
- microsecondsSpentJitting = (unsigned) ((counts / countsPerSec) * 1000 * 1000);
+ double countsPerSec = CycleTimer::CyclesPerSecond();
+ double counts = (double)compCycles;
+ microsecondsSpentJitting = (unsigned)((counts / countsPerSec) * 1000 * 1000);
}
- fprintf(file,
- "%08X,%u,%u,%u,%u,%d,%d",
- currentMethodToken,
- m_InlineCount,
- info.compTotalHotCodeSize,
- info.compTotalColdCodeSize,
- microsecondsSpentJitting,
- m_CurrentSizeEstimate / 10,
- m_CurrentTimeEstimate);
+ fprintf(file, "%08X,%u,%u,%u,%u,%d,%d", currentMethodToken, m_InlineCount, info.compTotalHotCodeSize,
+ info.compTotalColdCodeSize, microsecondsSpentJitting, m_CurrentSizeEstimate / 10, m_CurrentTimeEstimate);
m_LastSuccessfulPolicy->DumpData(file);
}
@@ -1519,10 +1481,10 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
}
// Cache references to compiler substructures.
- const Compiler::Info& info = m_Compiler->info;
+ const Compiler::Info& info = m_Compiler->info;
const Compiler::Options& opts = m_Compiler->opts;
- const bool isPrejitRoot = (opts.eeFlags & CORJIT_FLG_PREJIT) != 0;
+ const bool isPrejitRoot = (opts.eeFlags & CORJIT_FLG_PREJIT) != 0;
const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
// We'd really like the method identifier to be unique and
@@ -1531,19 +1493,18 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
//
// Post processing will have to filter out all data from
// methods where the root entry appears multiple times.
- mdMethodDef currentMethodToken =
- info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd);
+ mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd);
unsigned hash = info.compMethodHash();
// Convert time spent jitting into microseconds
- unsigned microsecondsSpentJitting = 0;
- unsigned __int64 compCycles = m_Compiler->getInlineCycleCount();
+ unsigned microsecondsSpentJitting = 0;
+ unsigned __int64 compCycles = m_Compiler->getInlineCycleCount();
if (compCycles > 0)
{
- double countsPerSec = CycleTimer::CyclesPerSecond();
- double counts = (double) compCycles;
- microsecondsSpentJitting = (unsigned) ((counts / countsPerSec) * 1000 * 1000);
+ double countsPerSec = CycleTimer::CyclesPerSecond();
+ double counts = (double)compCycles;
+ microsecondsSpentJitting = (unsigned)((counts / countsPerSec) * 1000 * 1000);
}
// Get method name just for root method, to make it a bit easier
@@ -1556,23 +1517,23 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
// Ok to truncate name, just ensure it's null terminated.
char buf[64];
strncpy(buf, methodName, sizeof(buf));
- buf[sizeof(buf)-1] = 0;
+ buf[sizeof(buf) - 1] = 0;
for (int i = 0; i < sizeof(buf); i++)
{
switch (buf[i])
{
- case '<':
- buf[i] = '[';
- break;
- case '>':
- buf[i] = ']';
- break;
- case '&':
- buf[i] = '#';
- break;
- default:
- break;
+ case '<':
+ buf[i] = '[';
+ break;
+ case '>':
+ buf[i] = ']';
+ break;
+ case '&':
+ buf[i] = '#';
+ break;
+ default:
+ break;
}
}
@@ -1585,7 +1546,7 @@ void InlineStrategy::DumpXml(FILE* file, unsigned indent)
fprintf(file, "%*s<ColdSize>%u</ColdSize>\n", indent + 2, "", info.compTotalColdCodeSize);
fprintf(file, "%*s<JitTime>%u</JitTime>\n", indent + 2, "", microsecondsSpentJitting);
fprintf(file, "%*s<SizeEstimate>%u</SizeEstimate>\n", indent + 2, "", m_CurrentSizeEstimate / 10);
- fprintf(file, "%*s<TimeEstimate>%u</TimeEstimate>\n", indent + 2, "", m_CurrentTimeEstimate);
+ fprintf(file, "%*s<TimeEstimate>%u</TimeEstimate>\n", indent + 2, "", m_CurrentTimeEstimate);
// Root context will be null if we're not optimizing the method.
//
@@ -1648,7 +1609,7 @@ bool InlineStrategy::IsNoInline(ICorJitInfo* info, CORINFO_METHOD_HANDLE method)
#if defined(DEBUG) || defined(INLINE_DATA)
static ConfigMethodRange range;
- const wchar_t* noInlineRange = JitConfig.JitNoInlineRange();
+ const wchar_t* noInlineRange = JitConfig.JitNoInlineRange();
if (noInlineRange == nullptr)
{
@@ -1676,5 +1637,4 @@ bool InlineStrategy::IsNoInline(ICorJitInfo* info, CORINFO_METHOD_HANDLE method)
return false;
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
}
diff --git a/src/jit/inline.h b/src/jit/inline.h
index 8e75c4a807..d568854a69 100644
--- a/src/jit/inline.h
+++ b/src/jit/inline.h
@@ -78,21 +78,17 @@
// Implementation limits
#ifndef LEGACY_BACKEND
-const unsigned int MAX_INL_ARGS = 32; // does not include obj pointer
-const unsigned int MAX_INL_LCLS = 32;
-#else // LEGACY_BACKEND
-const unsigned int MAX_INL_ARGS = 10; // does not include obj pointer
-const unsigned int MAX_INL_LCLS = 8;
+const unsigned int MAX_INL_ARGS = 32; // does not include obj pointer
+const unsigned int MAX_INL_LCLS = 32;
+#else // LEGACY_BACKEND
+const unsigned int MAX_INL_ARGS = 10; // does not include obj pointer
+const unsigned int MAX_INL_LCLS = 8;
#endif // LEGACY_BACKEND
// Flags lost during inlining.
-#define CORJIT_FLG_LOST_WHEN_INLINING (CORJIT_FLG_BBOPT | \
- CORJIT_FLG_BBINSTR | \
- CORJIT_FLG_PROF_ENTERLEAVE | \
- CORJIT_FLG_DEBUG_EnC | \
- CORJIT_FLG_DEBUG_INFO \
- )
+#define CORJIT_FLG_LOST_WHEN_INLINING \
+ (CORJIT_FLG_BBOPT | CORJIT_FLG_BBINSTR | CORJIT_FLG_PROF_ENTERLEAVE | CORJIT_FLG_DEBUG_EnC | CORJIT_FLG_DEBUG_INFO)
// Forward declarations
@@ -103,12 +99,12 @@ class InlineStrategy;
enum class InlineCallsiteFrequency
{
- UNUSED, // n/a
- RARE, // once in a blue moon
- BORING, // normal call site
- WARM, // seen during profiling
- LOOP, // in a loop
- HOT // very frequent
+ UNUSED, // n/a
+ RARE, // once in a blue moon
+ BORING, // normal call site
+ WARM, // seen during profiling
+ LOOP, // in a loop
+ HOT // very frequent
};
// InlineDecision describes the various states the jit goes through when
@@ -157,27 +153,27 @@ bool InlDecisionIsDecided(InlineDecision d);
enum class InlineTarget
{
- CALLEE, // observation applies to all calls to this callee
- CALLER, // observation applies to all calls made by this caller
- CALLSITE // observation applies to a specific call site
+ CALLEE, // observation applies to all calls to this callee
+ CALLER, // observation applies to all calls made by this caller
+ CALLSITE // observation applies to a specific call site
};
// InlineImpact describe the possible impact of an inline observation.
enum class InlineImpact
{
- FATAL, // inlining impossible, unsafe to evaluate further
- FUNDAMENTAL, // inlining impossible for fundamental reasons, deeper exploration safe
- LIMITATION, // inlining impossible because of jit limitations, deeper exploration safe
- PERFORMANCE, // inlining inadvisable because of performance concerns
- INFORMATION // policy-free observation to provide data for later decision making
+ FATAL, // inlining impossible, unsafe to evaluate further
+ FUNDAMENTAL, // inlining impossible for fundamental reasons, deeper exploration safe
+ LIMITATION, // inlining impossible because of jit limitations, deeper exploration safe
+ PERFORMANCE, // inlining inadvisable because of performance concerns
+ INFORMATION // policy-free observation to provide data for later decision making
};
// InlineObservation describes the set of possible inline observations.
enum class InlineObservation
{
-#define INLINE_OBSERVATION(name, type, description, impact, scope) scope ## _ ## name,
+#define INLINE_OBSERVATION(name, type, description, impact, scope) scope##_##name,
#include "inline.def"
#undef INLINE_OBSERVATION
};
@@ -216,18 +212,25 @@ InlineImpact InlGetImpact(InlineObservation obs);
class InlinePolicy
{
public:
-
// Factory method for getting policies
static InlinePolicy* GetPolicy(Compiler* compiler, bool isPrejitRoot);
// Obligatory virtual dtor
- virtual ~InlinePolicy() {}
+ virtual ~InlinePolicy()
+ {
+ }
// Get the current decision
- InlineDecision GetDecision() const { return m_Decision; }
+ InlineDecision GetDecision() const
+ {
+ return m_Decision;
+ }
// Get the observation responsible for the result
- InlineObservation GetObservation() const { return m_Observation; }
+ InlineObservation GetObservation() const
+ {
+ return m_Observation;
+ }
// Policy observations
virtual void NoteSuccess() = 0;
@@ -236,15 +239,21 @@ public:
virtual void NoteInt(InlineObservation obs, int value) = 0;
// Optional observations. Most policies ignore these.
- virtual void NoteContext(InlineContext* context) { (void) context; }
- virtual void NoteOffset(IL_OFFSETX offset) { (void) offset; }
+ virtual void NoteContext(InlineContext* context)
+ {
+ (void)context;
+ }
+ virtual void NoteOffset(IL_OFFSETX offset)
+ {
+ (void)offset;
+ }
// Policy determinations
virtual void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) = 0;
// Policy policies
virtual bool PropagateNeverToRuntime() const = 0;
- virtual bool IsLegacyPolicy() const = 0;
+ virtual bool IsLegacyPolicy() const = 0;
// Policy estimates
virtual int CodeSizeEstimate() = 0;
@@ -254,22 +263,28 @@ public:
// Name of the policy
virtual const char* GetName() const = 0;
// Detailed data value dump
- virtual void DumpData(FILE* file) const { }
+ virtual void DumpData(FILE* file) const
+ {
+ }
// Detailed data name dump
- virtual void DumpSchema(FILE* file) const { }
+ virtual void DumpSchema(FILE* file) const
+ {
+ }
// True if this is the inline targeted by data collection
- bool IsDataCollectionTarget() { return m_IsDataCollectionTarget; }
+ bool IsDataCollectionTarget()
+ {
+ return m_IsDataCollectionTarget;
+ }
#endif // defined(DEBUG) || defined(INLINE_DATA)
protected:
-
InlinePolicy(bool isPrejitRoot)
- : m_Decision(InlineDecision::UNDECIDED)
- , m_Observation(InlineObservation::CALLEE_UNUSED_INITIAL)
- , m_IsPrejitRoot(isPrejitRoot)
+ : m_Decision(InlineDecision::UNDECIDED), m_Observation(InlineObservation::CALLEE_UNUSED_INITIAL),
+ m_IsPrejitRoot(isPrejitRoot)
#if defined(DEBUG) || defined(INLINE_DATA)
- , m_IsDataCollectionTarget(false)
+ ,
+ m_IsDataCollectionTarget(false)
#endif // defined(DEBUG) || defined(INLINE_DATA)
{
@@ -277,20 +292,18 @@ protected:
}
private:
-
// No copying or assignment supported
InlinePolicy(const InlinePolicy&) = delete;
InlinePolicy& operator=(const InlinePolicy&) = delete;
protected:
-
InlineDecision m_Decision;
InlineObservation m_Observation;
bool m_IsPrejitRoot;
#if defined(DEBUG) || defined(INLINE_DATA)
- bool m_IsDataCollectionTarget;
+ bool m_IsDataCollectionTarget;
#endif // defined(DEBUG) || defined(INLINE_DATA)
};
@@ -301,19 +314,13 @@ protected:
class InlineResult
{
public:
-
// Construct a new InlineResult to help evaluate a
// particular call for inlining.
- InlineResult(Compiler* compiler,
- GenTreeCall* call,
- GenTreeStmt* stmt,
- const char* description);
+ InlineResult(Compiler* compiler, GenTreeCall* call, GenTreeStmt* stmt, const char* description);
// Construct a new InlineResult to evaluate a particular
// method to see if it is inlineable.
- InlineResult(Compiler* compiler,
- CORINFO_METHOD_HANDLE method,
- const char* description);
+ InlineResult(Compiler* compiler, CORINFO_METHOD_HANDLE method, const char* description);
// Has the policy determined this inline should fail?
bool IsFailure() const
@@ -345,7 +352,7 @@ public:
bool IsDiscretionaryCandidate() const
{
bool result = InlDecisionIsCandidate(m_Policy->GetDecision()) &&
- (m_Policy->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
+ (m_Policy->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
return result;
}
@@ -474,7 +481,6 @@ public:
}
private:
-
// No copying or assignment allowed.
InlineResult(const InlineResult&) = delete;
InlineResult& operator=(const InlineResult&) = delete;
@@ -482,14 +488,14 @@ private:
// Report/log/dump decision as appropriate
void Report();
- Compiler* m_RootCompiler;
- InlinePolicy* m_Policy;
- GenTreeCall* m_Call;
- InlineContext* m_InlineContext;
- CORINFO_METHOD_HANDLE m_Caller; // immediate caller's handle
- CORINFO_METHOD_HANDLE m_Callee;
- const char* m_Description;
- bool m_Reported;
+ Compiler* m_RootCompiler;
+ InlinePolicy* m_Policy;
+ GenTreeCall* m_Call;
+ InlineContext* m_InlineContext;
+ CORINFO_METHOD_HANDLE m_Caller; // immediate caller's handle
+ CORINFO_METHOD_HANDLE m_Callee;
+ const char* m_Description;
+ bool m_Reported;
};
// InlineCandidateInfo provides basic information about a particular
@@ -497,13 +503,13 @@ private:
struct InlineCandidateInfo
{
- DWORD dwRestrictions;
- CORINFO_METHOD_INFO methInfo;
- unsigned methAttr;
- CORINFO_CLASS_HANDLE clsHandle;
- unsigned clsAttr;
- var_types fncRetType;
- CORINFO_METHOD_HANDLE ilCallerHandle; //the logical IL caller of this inlinee.
+ DWORD dwRestrictions;
+ CORINFO_METHOD_INFO methInfo;
+ unsigned methAttr;
+ CORINFO_CLASS_HANDLE clsHandle;
+ unsigned clsAttr;
+ var_types fncRetType;
+ CORINFO_METHOD_HANDLE ilCallerHandle; // the logical IL caller of this inlinee.
CORINFO_CONTEXT_HANDLE exactContextHnd;
CorInfoInitClassResult initClassResult;
};
@@ -512,61 +518,63 @@ struct InlineCandidateInfo
struct InlArgInfo
{
- unsigned argIsUsed :1; // is this arg used at all?
- unsigned argIsInvariant:1; // the argument is a constant or a local variable address
- unsigned argIsLclVar :1; // the argument is a local variable
- unsigned argIsThis :1; // the argument is the 'this' pointer
- unsigned argHasSideEff :1; // the argument has side effects
- unsigned argHasGlobRef :1; // the argument has a global ref
- unsigned argHasTmp :1; // the argument will be evaluated to a temp
- unsigned argIsByRefToStructLocal:1; // Is this arg an address of a struct local or a normed struct local or a field in them?
- unsigned argHasLdargaOp:1; // Is there LDARGA(s) operation on this argument?
- unsigned argHasStargOp :1; // Is there STARG(s) operation on this argument?
-
- unsigned argTmpNum; // the argument tmp number
- GenTreePtr argNode;
- GenTreePtr argBashTmpNode; // tmp node created, if it may be replaced with actual arg
+ unsigned argIsUsed : 1; // is this arg used at all?
+ unsigned argIsInvariant : 1; // the argument is a constant or a local variable address
+ unsigned argIsLclVar : 1; // the argument is a local variable
+ unsigned argIsThis : 1; // the argument is the 'this' pointer
+ unsigned argHasSideEff : 1; // the argument has side effects
+ unsigned argHasGlobRef : 1; // the argument has a global ref
+ unsigned argHasTmp : 1; // the argument will be evaluated to a temp
+ unsigned argIsByRefToStructLocal : 1; // Is this arg an address of a struct local or a normed struct local or a
+ // field in them?
+ unsigned argHasLdargaOp : 1; // Is there LDARGA(s) operation on this argument?
+ unsigned argHasStargOp : 1; // Is there STARG(s) operation on this argument?
+
+ unsigned argTmpNum; // the argument tmp number
+ GenTreePtr argNode;
+ GenTreePtr argBashTmpNode; // tmp node created, if it may be replaced with actual arg
};
// InlArgInfo describes inline candidate local variable properties.
struct InlLclVarInfo
{
- var_types lclTypeInfo;
- typeInfo lclVerTypeInfo;
- bool lclHasLdlocaOp; // Is there LDLOCA(s) operation on this argument?
+ var_types lclTypeInfo;
+ typeInfo lclVerTypeInfo;
+ bool lclHasLdlocaOp; // Is there LDLOCA(s) operation on this argument?
};
// InlineInfo provides detailed information about a particular inline candidate.
struct InlineInfo
{
- Compiler * InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner)
- Compiler * InlineRoot; // The Compiler instance that is the root of the inlining tree of which the owner of "this" is a member.
+ Compiler* InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner)
+ Compiler* InlineRoot; // The Compiler instance that is the root of the inlining tree of which the owner of "this" is
+ // a member.
CORINFO_METHOD_HANDLE fncHandle;
- InlineCandidateInfo * inlineCandidateInfo;
+ InlineCandidateInfo* inlineCandidateInfo;
- InlineResult* inlineResult;
+ InlineResult* inlineResult;
- GenTreePtr retExpr; // The return expression of the inlined candidate.
+ GenTreePtr retExpr; // The return expression of the inlined candidate.
CORINFO_CONTEXT_HANDLE tokenLookupContextHandle; // The context handle that will be passed to
// impTokenLookupContextHandle in Inlinee's Compiler.
- unsigned argCnt;
- InlArgInfo inlArgInfo[MAX_INL_ARGS + 1];
- int lclTmpNum[MAX_INL_LCLS]; // map local# -> temp# (-1 if unused)
- InlLclVarInfo lclVarInfo[MAX_INL_LCLS + MAX_INL_ARGS + 1]; // type information from local sig
+ unsigned argCnt;
+ InlArgInfo inlArgInfo[MAX_INL_ARGS + 1];
+ int lclTmpNum[MAX_INL_LCLS]; // map local# -> temp# (-1 if unused)
+ InlLclVarInfo lclVarInfo[MAX_INL_LCLS + MAX_INL_ARGS + 1]; // type information from local sig
- bool thisDereferencedFirst;
+ bool thisDereferencedFirst;
#ifdef FEATURE_SIMD
- bool hasSIMDTypeArgLocalOrReturn;
+ bool hasSIMDTypeArgLocalOrReturn;
#endif // FEATURE_SIMD
- GenTreeCall * iciCall; // The GT_CALL node to be inlined.
- GenTree * iciStmt; // The statement iciCall is in.
- BasicBlock * iciBlock; // The basic block iciStmt is in.
+ GenTreeCall* iciCall; // The GT_CALL node to be inlined.
+ GenTree* iciStmt; // The statement iciCall is in.
+ BasicBlock* iciBlock; // The basic block iciStmt is in.
};
// InlineContext tracks the inline history in a method.
@@ -591,7 +599,6 @@ class InlineContext
friend class InlineStrategy;
public:
-
#if defined(DEBUG) || defined(INLINE_DATA)
// Dump the full subtree, including failures
@@ -660,31 +667,28 @@ public:
}
private:
-
InlineContext(InlineStrategy* strategy);
private:
-
- InlineStrategy* m_InlineStrategy; // overall strategy
- InlineContext* m_Parent; // logical caller (parent)
- InlineContext* m_Child; // first child
- InlineContext* m_Sibling; // next child of the parent
- BYTE* m_Code; // address of IL buffer for the method
- unsigned m_ILSize; // size of IL buffer for the method
- IL_OFFSETX m_Offset; // call site location within parent
- InlineObservation m_Observation; // what lead to this inline
- int m_CodeSizeEstimate; // in bytes * 10
- bool m_Success; // true if this was a successful inline
+ InlineStrategy* m_InlineStrategy; // overall strategy
+ InlineContext* m_Parent; // logical caller (parent)
+ InlineContext* m_Child; // first child
+ InlineContext* m_Sibling; // next child of the parent
+ BYTE* m_Code; // address of IL buffer for the method
+ unsigned m_ILSize; // size of IL buffer for the method
+ IL_OFFSETX m_Offset; // call site location within parent
+ InlineObservation m_Observation; // what lead to this inline
+ int m_CodeSizeEstimate; // in bytes * 10
+ bool m_Success; // true if this was a successful inline
#if defined(DEBUG) || defined(INLINE_DATA)
- InlinePolicy* m_Policy; // policy that evaluated this inline
- CORINFO_METHOD_HANDLE m_Callee; // handle to the method
- unsigned m_TreeID; // ID of the GenTreeCall
- unsigned m_Ordinal; // Ordinal number of this inline
+ InlinePolicy* m_Policy; // policy that evaluated this inline
+ CORINFO_METHOD_HANDLE m_Callee; // handle to the method
+ unsigned m_TreeID; // ID of the GenTreeCall
+ unsigned m_Ordinal; // Ordinal number of this inline
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
};
// The InlineStrategy holds the per-method persistent inline state.
@@ -695,16 +699,14 @@ class InlineStrategy
{
public:
-
// Construct a new inline strategy.
InlineStrategy(Compiler* compiler);
// Create context for a successful inline.
- InlineContext* NewSuccess(InlineInfo* inlineInfo);
+ InlineContext* NewSuccess(InlineInfo* inlineInfo);
// Create context for a failing inline.
- InlineContext* NewFailure(GenTree* stmt,
- InlineResult* inlineResult);
+ InlineContext* NewFailure(GenTree* stmt, InlineResult* inlineResult);
// Compiler associated with this strategy
Compiler* GetCompiler() const
@@ -826,13 +828,12 @@ public:
// Some inline limit values
enum
{
- ALWAYS_INLINE_SIZE = 16,
- IMPLEMENTATION_MAX_INLINE_SIZE = _UI16_MAX,
+ ALWAYS_INLINE_SIZE = 16,
+ IMPLEMENTATION_MAX_INLINE_SIZE = _UI16_MAX,
IMPLEMENTATION_MAX_INLINE_DEPTH = 1000
};
private:
-
// Create a context for the root method.
InlineContext* NewRoot();
@@ -886,9 +887,8 @@ private:
bool m_HasForceViaDiscretionary;
#if defined(DEBUG) || defined(INLINE_DATA)
- long m_MethodXmlFilePosition;
+ long m_MethodXmlFilePosition;
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
};
#endif // _INLINE_H_
diff --git a/src/jit/inlinepolicy.cpp b/src/jit/inlinepolicy.cpp
index 9731f3156f..f80f3a5ec0 100644
--- a/src/jit/inlinepolicy.cpp
+++ b/src/jit/inlinepolicy.cpp
@@ -147,23 +147,22 @@ void LegalPolicy::SetFailure(InlineObservation obs)
switch (m_Decision)
{
- case InlineDecision::FAILURE:
- // Repeated failure only ok if evaluating a prejit root
- // (since we can't fail fast because we're not inlining)
- // or if inlining and the observation is CALLSITE_TOO_MANY_LOCALS
- // (since we can't fail fast from lvaGrabTemp).
- assert(m_IsPrejitRoot ||
- (obs == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
- break;
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- m_Decision = InlineDecision::FAILURE;
- m_Observation = obs;
- break;
- default:
- // SUCCESS, NEVER, or ??
- assert(!"Unexpected m_Decision");
- unreached();
+ case InlineDecision::FAILURE:
+ // Repeated failure only ok if evaluating a prejit root
+ // (since we can't fail fast because we're not inlining)
+ // or if inlining and the observation is CALLSITE_TOO_MANY_LOCALS
+ // (since we can't fail fast from lvaGrabTemp).
+ assert(m_IsPrejitRoot || (obs == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
+ break;
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ m_Decision = InlineDecision::FAILURE;
+ m_Observation = obs;
+ break;
+ default:
+ // SUCCESS, NEVER, or ??
+ assert(!"Unexpected m_Decision");
+ unreached();
}
}
@@ -180,19 +179,19 @@ void LegalPolicy::SetNever(InlineObservation obs)
switch (m_Decision)
{
- case InlineDecision::NEVER:
- // Repeated never only ok if evaluating a prejit root
- assert(m_IsPrejitRoot);
- break;
- case InlineDecision::UNDECIDED:
- case InlineDecision::CANDIDATE:
- m_Decision = InlineDecision::NEVER;
- m_Observation = obs;
- break;
- default:
- // SUCCESS, FAILURE or ??
- assert(!"Unexpected m_Decision");
- unreached();
+ case InlineDecision::NEVER:
+ // Repeated never only ok if evaluating a prejit root
+ assert(m_IsPrejitRoot);
+ break;
+ case InlineDecision::UNDECIDED:
+ case InlineDecision::CANDIDATE:
+ m_Decision = InlineDecision::NEVER;
+ m_Observation = obs;
+ break;
+ default:
+ // SUCCESS, FAILURE or ??
+ assert(!"Unexpected m_Decision");
+ unreached();
}
}
@@ -219,7 +218,7 @@ void LegalPolicy::SetCandidate(InlineObservation obs)
assert(!InlDecisionIsSuccess(m_Decision));
// Update, overriding any previous candidacy.
- m_Decision = InlineDecision::CANDIDATE;
+ m_Decision = InlineDecision::CANDIDATE;
m_Observation = obs;
}
@@ -249,79 +248,79 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
// Handle most information here
bool isInformation = (impact == InlineImpact::INFORMATION);
- bool propagate = !isInformation;
+ bool propagate = !isInformation;
if (isInformation)
{
switch (obs)
{
- case InlineObservation::CALLEE_IS_FORCE_INLINE:
- // We may make the force-inline observation more than
- // once. All observations should agree.
- assert(!m_IsForceInlineKnown || (m_IsForceInline == value));
- m_IsForceInline = value;
- m_IsForceInlineKnown = true;
- break;
+ case InlineObservation::CALLEE_IS_FORCE_INLINE:
+ // We may make the force-inline observation more than
+ // once. All observations should agree.
+ assert(!m_IsForceInlineKnown || (m_IsForceInline == value));
+ m_IsForceInline = value;
+ m_IsForceInlineKnown = true;
+ break;
- case InlineObservation::CALLEE_IS_INSTANCE_CTOR:
- m_IsInstanceCtor = value;
- break;
+ case InlineObservation::CALLEE_IS_INSTANCE_CTOR:
+ m_IsInstanceCtor = value;
+ break;
- case InlineObservation::CALLEE_CLASS_PROMOTABLE:
- m_IsFromPromotableValueClass = value;
- break;
+ case InlineObservation::CALLEE_CLASS_PROMOTABLE:
+ m_IsFromPromotableValueClass = value;
+ break;
- case InlineObservation::CALLEE_HAS_SIMD:
- m_HasSimd = value;
- break;
+ case InlineObservation::CALLEE_HAS_SIMD:
+ m_HasSimd = value;
+ break;
- case InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER:
- // LegacyPolicy ignores this for prejit roots.
- if (!m_IsPrejitRoot)
- {
- m_LooksLikeWrapperMethod = value;
- }
- break;
+ case InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER:
+ // LegacyPolicy ignores this for prejit roots.
+ if (!m_IsPrejitRoot)
+ {
+ m_LooksLikeWrapperMethod = value;
+ }
+ break;
- case InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST:
- // LegacyPolicy ignores this for prejit roots.
- if (!m_IsPrejitRoot)
- {
- m_ArgFeedsConstantTest++;
- }
- break;
+ case InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST:
+ // LegacyPolicy ignores this for prejit roots.
+ if (!m_IsPrejitRoot)
+ {
+ m_ArgFeedsConstantTest++;
+ }
+ break;
- case InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK:
- // LegacyPolicy ignores this for prejit roots.
- if (!m_IsPrejitRoot)
- {
- m_ArgFeedsRangeCheck++;
- }
- break;
+ case InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK:
+ // LegacyPolicy ignores this for prejit roots.
+ if (!m_IsPrejitRoot)
+ {
+ m_ArgFeedsRangeCheck++;
+ }
+ break;
- case InlineObservation::CALLEE_HAS_SWITCH:
- case InlineObservation::CALLEE_UNSUPPORTED_OPCODE:
- // LegacyPolicy ignores these for prejit roots.
- if (!m_IsPrejitRoot)
- {
- // Pass these on, they should cause inlining to fail.
- propagate = true;
- }
- break;
+ case InlineObservation::CALLEE_HAS_SWITCH:
+ case InlineObservation::CALLEE_UNSUPPORTED_OPCODE:
+ // LegacyPolicy ignores these for prejit roots.
+ if (!m_IsPrejitRoot)
+ {
+ // Pass these on, they should cause inlining to fail.
+ propagate = true;
+ }
+ break;
- case InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST:
- // We shouldn't see this for a prejit root since
- // we don't know anything about callers.
- assert(!m_IsPrejitRoot);
- m_ConstantArgFeedsConstantTest++;
- break;
+ case InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST:
+ // We shouldn't see this for a prejit root since
+ // we don't know anything about callers.
+ assert(!m_IsPrejitRoot);
+ m_ConstantArgFeedsConstantTest++;
+ break;
- case InlineObservation::CALLEE_BEGIN_OPCODE_SCAN:
+ case InlineObservation::CALLEE_BEGIN_OPCODE_SCAN:
{
// Set up the state machine, if this inline is
// discretionary and is still a candidate.
- if (InlDecisionIsCandidate(m_Decision)
- && (m_Observation == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE))
+ if (InlDecisionIsCandidate(m_Decision) &&
+ (m_Observation == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE))
{
// Better not have a state machine already.
assert(m_StateMachine == nullptr);
@@ -331,7 +330,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
}
- case InlineObservation::CALLEE_END_OPCODE_SCAN:
+ case InlineObservation::CALLEE_END_OPCODE_SCAN:
{
if (m_StateMachine != nullptr)
{
@@ -345,7 +344,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
// This allows for CALL, RET, and one more non-ld/st
// instruction.
if (((m_InstructionCount - m_LoadStoreCount) < 4) ||
- (((double)m_LoadStoreCount/(double)m_InstructionCount) > .90))
+ (((double)m_LoadStoreCount / (double)m_InstructionCount) > .90))
{
m_MethodIsMostlyLoadStore = true;
}
@@ -373,8 +372,8 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
if (!m_IsPrejitRoot)
{
- InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
- bool overBudget = strategy->BudgetCheck(m_CodeSize);
+ InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
+ bool overBudget = strategy->BudgetCheck(m_CodeSize);
if (overBudget)
{
SetFailure(InlineObservation::CALLSITE_OVER_BUDGET);
@@ -384,9 +383,9 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
}
- default:
- // Ignore the remainder for now
- break;
+ default:
+ // Ignore the remainder for now
+ break;
}
}
@@ -407,7 +406,7 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
{
switch (obs)
{
- case InlineObservation::CALLEE_MAXSTACK:
+ case InlineObservation::CALLEE_MAXSTACK:
{
assert(m_IsForceInlineKnown);
@@ -421,7 +420,7 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
+ case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
{
assert(m_IsForceInlineKnown);
assert(value != 0);
@@ -436,7 +435,7 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLEE_IL_CODE_SIZE:
+ case InlineObservation::CALLEE_IL_CODE_SIZE:
{
assert(m_IsForceInlineKnown);
assert(value != 0);
@@ -468,7 +467,7 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLSITE_DEPTH:
+ case InlineObservation::CALLSITE_DEPTH:
{
unsigned depth = static_cast<unsigned>(value);
@@ -480,8 +479,8 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLEE_OPCODE_NORMED:
- case InlineObservation::CALLEE_OPCODE:
+ case InlineObservation::CALLEE_OPCODE_NORMED:
+ case InlineObservation::CALLEE_OPCODE:
{
m_InstructionCount++;
OPCODE opcode = static_cast<OPCODE>(value);
@@ -508,12 +507,9 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
// Look for opcodes that imply loads and stores.
// Logic here is as it is to match legacy behavior.
- if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) ||
- (opcode >= CEE_LDARG && opcode <= CEE_STLOC) ||
- (opcode >= CEE_LDNULL && opcode <= CEE_LDC_R8) ||
- (opcode >= CEE_LDIND_I1 && opcode <= CEE_STIND_R8) ||
- (opcode >= CEE_LDFLD && opcode <= CEE_STOBJ) ||
- (opcode >= CEE_LDELEMA && opcode <= CEE_STELEM) ||
+ if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC) ||
+ (opcode >= CEE_LDNULL && opcode <= CEE_LDC_R8) || (opcode >= CEE_LDIND_I1 && opcode <= CEE_STIND_R8) ||
+ (opcode >= CEE_LDFLD && opcode <= CEE_STOBJ) || (opcode >= CEE_LDELEMA && opcode <= CEE_STELEM) ||
(opcode == CEE_POP))
{
m_LoadStoreCount++;
@@ -522,19 +518,18 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLSITE_FREQUENCY:
- assert(m_CallsiteFrequency == InlineCallsiteFrequency::UNUSED);
- m_CallsiteFrequency = static_cast<InlineCallsiteFrequency>(value);
- assert(m_CallsiteFrequency != InlineCallsiteFrequency::UNUSED);
- break;
+ case InlineObservation::CALLSITE_FREQUENCY:
+ assert(m_CallsiteFrequency == InlineCallsiteFrequency::UNUSED);
+ m_CallsiteFrequency = static_cast<InlineCallsiteFrequency>(value);
+ assert(m_CallsiteFrequency != InlineCallsiteFrequency::UNUSED);
+ break;
- default:
- // Ignore all other information
- break;
+ default:
+ // Ignore all other information
+ break;
}
}
-
//------------------------------------------------------------------------
// DetermineMultiplier: determine benefit multiplier for this inline
//
@@ -566,7 +561,8 @@ double LegacyPolicy::DetermineMultiplier()
if (m_HasSimd)
{
multiplier += JitConfig.JitInlineSIMDMultiplier();
- JITDUMP("\nInline candidate has SIMD type args, locals or return value. Multiplier increased to %g.", multiplier);
+ JITDUMP("\nInline candidate has SIMD type args, locals or return value. Multiplier increased to %g.",
+ multiplier);
}
#endif // FEATURE_SIMD
@@ -603,30 +599,30 @@ double LegacyPolicy::DetermineMultiplier()
switch (m_CallsiteFrequency)
{
- case InlineCallsiteFrequency::RARE:
- // Note this one is not additive, it uses '=' instead of '+='
- multiplier = 1.3;
- JITDUMP("\nInline candidate callsite is rare. Multiplier limited to %g.", multiplier);
- break;
- case InlineCallsiteFrequency::BORING:
- multiplier += 1.3;
- JITDUMP("\nInline candidate callsite is boring. Multiplier increased to %g.", multiplier);
- break;
- case InlineCallsiteFrequency::WARM:
- multiplier += 2.0;
- JITDUMP("\nInline candidate callsite is warm. Multiplier increased to %g.", multiplier);
- break;
- case InlineCallsiteFrequency::LOOP:
- multiplier += 3.0;
- JITDUMP("\nInline candidate callsite is in a loop. Multiplier increased to %g.", multiplier);
- break;
- case InlineCallsiteFrequency::HOT:
- multiplier += 3.0;
- JITDUMP("\nInline candidate callsite is hot. Multiplier increased to %g.", multiplier);
- break;
- default:
- assert(!"Unexpected callsite frequency");
- break;
+ case InlineCallsiteFrequency::RARE:
+ // Note this one is not additive, it uses '=' instead of '+='
+ multiplier = 1.3;
+ JITDUMP("\nInline candidate callsite is rare. Multiplier limited to %g.", multiplier);
+ break;
+ case InlineCallsiteFrequency::BORING:
+ multiplier += 1.3;
+ JITDUMP("\nInline candidate callsite is boring. Multiplier increased to %g.", multiplier);
+ break;
+ case InlineCallsiteFrequency::WARM:
+ multiplier += 2.0;
+ JITDUMP("\nInline candidate callsite is warm. Multiplier increased to %g.", multiplier);
+ break;
+ case InlineCallsiteFrequency::LOOP:
+ multiplier += 3.0;
+ JITDUMP("\nInline candidate callsite is in a loop. Multiplier increased to %g.", multiplier);
+ break;
+ case InlineCallsiteFrequency::HOT:
+ multiplier += 3.0;
+ JITDUMP("\nInline candidate callsite is hot. Multiplier increased to %g.", multiplier);
+ break;
+ default:
+ assert(!"Unexpected callsite frequency");
+ break;
}
#ifdef DEBUG
@@ -684,27 +680,25 @@ int LegacyPolicy::DetermineNativeSizeEstimate()
int LegacyPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methInfo)
{
- int callsiteSize = 55; // Direct call take 5 native bytes; indirect call takes 6 native bytes.
+ int callsiteSize = 55; // Direct call take 5 native bytes; indirect call takes 6 native bytes.
bool hasThis = methInfo->args.hasThis();
if (hasThis)
{
- callsiteSize += 30; // "mov" or "lea"
+ callsiteSize += 30; // "mov" or "lea"
}
CORINFO_ARG_LIST_HANDLE argLst = methInfo->args.args;
- COMP_HANDLE comp = m_RootCompiler->info.compCompHnd;
+ COMP_HANDLE comp = m_RootCompiler->info.compCompHnd;
- for (unsigned i = (hasThis ? 1 : 0);
- i < methInfo->args.totalILArgs();
- i++, argLst = comp->getArgNext(argLst))
+ for (unsigned i = (hasThis ? 1 : 0); i < methInfo->args.totalILArgs(); i++, argLst = comp->getArgNext(argLst))
{
- var_types sigType = (var_types) m_RootCompiler->eeGetArgType(argLst, &methInfo->args);
+ var_types sigType = (var_types)m_RootCompiler->eeGetArgType(argLst, &methInfo->args);
if (sigType == TYP_STRUCT)
{
- typeInfo verType = m_RootCompiler->verParseArgSigToTypeInfo(&methInfo->args, argLst);
+ typeInfo verType = m_RootCompiler->verParseArgSigToTypeInfo(&methInfo->args, argLst);
/*
@@ -719,7 +713,7 @@ int LegacyPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methI
// NB sizeof (void*) fails to convey intent when cross-jitting.
- unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*)));
+ unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*)));
unsigned slots = opsz / sizeof(void*);
callsiteSize += slots * 20; // "push gword ptr [EAX+offs] "
@@ -753,12 +747,10 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
#if defined(DEBUG)
// Punt if we're inlining and we've reached the acceptance limit.
- int limit = JitConfig.JitInlineLimit();
+ int limit = JitConfig.JitInlineLimit();
unsigned current = m_RootCompiler->m_inlineStrategy->GetInlineCount();
- if (!m_IsPrejitRoot &&
- (limit >= 0) &&
- (current >= static_cast<unsigned>(limit)))
+ if (!m_IsPrejitRoot && (limit >= 0) && (current >= static_cast<unsigned>(limit)))
{
SetFailure(InlineObservation::CALLSITE_OVER_INLINE_LIMIT);
return;
@@ -769,10 +761,10 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
assert(InlDecisionIsCandidate(m_Decision));
assert(m_Observation == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
- m_CalleeNativeSizeEstimate = DetermineNativeSizeEstimate();
+ m_CalleeNativeSizeEstimate = DetermineNativeSizeEstimate();
m_CallsiteNativeSizeEstimate = DetermineCallsiteNativeSizeEstimate(methodInfo);
- m_Multiplier = DetermineMultiplier();
- const int threshold = (int)(m_CallsiteNativeSizeEstimate * m_Multiplier);
+ m_Multiplier = DetermineMultiplier();
+ const int threshold = (int)(m_CallsiteNativeSizeEstimate * m_Multiplier);
// Note the LegacyPolicy estimates are scaled up by SIZE_SCALE
JITDUMP("\ncalleeNativeSizeEstimate=%d\n", m_CalleeNativeSizeEstimate)
@@ -785,12 +777,9 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
{
// Inline appears to be unprofitable
JITLOG_THIS(m_RootCompiler,
- (LL_INFO100000,
- "Native estimate for function size exceeds threshold"
- " for inlining %g > %g (multiplier = %g)\n",
- (double) m_CalleeNativeSizeEstimate / SIZE_SCALE,
- (double) threshold / SIZE_SCALE,
- m_Multiplier));
+ (LL_INFO100000, "Native estimate for function size exceeds threshold"
+ " for inlining %g > %g (multiplier = %g)\n",
+ (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier));
// Fail the inline
if (m_IsPrejitRoot)
@@ -806,12 +795,9 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
{
// Inline appears to be profitable
JITLOG_THIS(m_RootCompiler,
- (LL_INFO100000,
- "Native estimate for function size is within threshold"
- " for inlining %g <= %g (multiplier = %g)\n",
- (double) m_CalleeNativeSizeEstimate / SIZE_SCALE,
- (double) threshold / SIZE_SCALE,
- m_Multiplier));
+ (LL_INFO100000, "Native estimate for function size is within threshold"
+ " for inlining %g <= %g (multiplier = %g)\n",
+ (double)m_CalleeNativeSizeEstimate / SIZE_SCALE, (double)threshold / SIZE_SCALE, m_Multiplier));
// Update candidacy
if (m_IsPrejitRoot)
@@ -863,15 +849,15 @@ void EnhancedLegacyPolicy::NoteBool(InlineObservation obs, bool value)
{
switch (obs)
{
- case InlineObservation::CALLEE_DOES_NOT_RETURN:
- m_IsNoReturn = value;
- m_IsNoReturnKnown = true;
- break;
+ case InlineObservation::CALLEE_DOES_NOT_RETURN:
+ m_IsNoReturn = value;
+ m_IsNoReturnKnown = true;
+ break;
- default:
- // Pass all other information to the legacy policy
- LegacyPolicy::NoteBool(obs, value);
- break;
+ default:
+ // Pass all other information to the legacy policy
+ LegacyPolicy::NoteBool(obs, value);
+ break;
}
}
@@ -886,16 +872,16 @@ void EnhancedLegacyPolicy::NoteInt(InlineObservation obs, int value)
{
switch (obs)
{
- case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
+ case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
{
assert(value != 0);
assert(m_IsNoReturnKnown);
//
- // Let's be conservative for now and reject inlining of "no return" methods only
- // if the callee contains a single basic block. This covers most of the use cases
- // (typical throw helpers simply do "throw new X();" and so they have a single block)
- // without affecting more exotic cases (loops that do actual work for example) where
+ // Let's be conservative for now and reject inlining of "no return" methods only
+ // if the callee contains a single basic block. This covers most of the use cases
+ // (typical throw helpers simply do "throw new X();" and so they have a single block)
+ // without affecting more exotic cases (loops that do actual work for example) where
// failure to inline could negatively impact code quality.
//
@@ -913,10 +899,10 @@ void EnhancedLegacyPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- default:
- // Pass all other information to the legacy policy
- LegacyPolicy::NoteInt(obs, value);
- break;
+ default:
+ // Pass all other information to the legacy policy
+ LegacyPolicy::NoteInt(obs, value);
+ break;
}
}
@@ -927,13 +913,13 @@ void EnhancedLegacyPolicy::NoteInt(InlineObservation obs, int value)
bool EnhancedLegacyPolicy::PropagateNeverToRuntime() const
{
//
- // Do not propagate the "no return" observation. If we do this then future inlining
- // attempts will fail immediately without marking the call node as "no return".
+ // Do not propagate the "no return" observation. If we do this then future inlining
+ // attempts will fail immediately without marking the call node as "no return".
// This can have an adverse impact on caller's code quality as it may have to preserve
// registers across the call.
- // TODO-Throughput: We should persist the "no return" information in the runtime
+ // TODO-Throughput: We should persist the "no return" information in the runtime
// so we don't need to re-analyze the inlinee all the time.
- //
+ //
bool propagate = (m_Observation != InlineObservation::CALLEE_DOES_NOT_RETURN);
@@ -1001,31 +987,31 @@ void RandomPolicy::NoteBool(InlineObservation obs, bool value)
// Handle most information here
bool isInformation = (impact == InlineImpact::INFORMATION);
- bool propagate = !isInformation;
+ bool propagate = !isInformation;
if (isInformation)
{
switch (obs)
{
- case InlineObservation::CALLEE_IS_FORCE_INLINE:
- // The RandomPolicy still honors force inlines.
- //
- // We may make the force-inline observation more than
- // once. All observations should agree.
- assert(!m_IsForceInlineKnown || (m_IsForceInline == value));
- m_IsForceInline = value;
- m_IsForceInlineKnown = true;
- break;
+ case InlineObservation::CALLEE_IS_FORCE_INLINE:
+ // The RandomPolicy still honors force inlines.
+ //
+ // We may make the force-inline observation more than
+ // once. All observations should agree.
+ assert(!m_IsForceInlineKnown || (m_IsForceInline == value));
+ m_IsForceInline = value;
+ m_IsForceInlineKnown = true;
+ break;
- case InlineObservation::CALLEE_HAS_SWITCH:
- case InlineObservation::CALLEE_UNSUPPORTED_OPCODE:
- // Pass these on, they should cause inlining to fail.
- propagate = true;
- break;
+ case InlineObservation::CALLEE_HAS_SWITCH:
+ case InlineObservation::CALLEE_UNSUPPORTED_OPCODE:
+ // Pass these on, they should cause inlining to fail.
+ propagate = true;
+ break;
- default:
- // Ignore the remainder for now
- break;
+ default:
+ // Ignore the remainder for now
+ break;
}
}
@@ -1047,7 +1033,7 @@ void RandomPolicy::NoteInt(InlineObservation obs, int value)
switch (obs)
{
- case InlineObservation::CALLEE_IL_CODE_SIZE:
+ case InlineObservation::CALLEE_IL_CODE_SIZE:
{
assert(m_IsForceInlineKnown);
assert(value != 0);
@@ -1067,9 +1053,9 @@ void RandomPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- default:
- // Ignore all other information
- break;
+ default:
+ // Ignore all other information
+ break;
}
}
@@ -1092,8 +1078,8 @@ void RandomPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// Budget check.
if (!m_IsPrejitRoot)
{
- InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
- bool overBudget = strategy->BudgetCheck(m_CodeSize);
+ InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
+ bool overBudget = strategy->BudgetCheck(m_CodeSize);
if (overBudget)
{
SetFailure(InlineObservation::CALLSITE_OVER_BUDGET);
@@ -1183,7 +1169,7 @@ void RandomPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
#ifdef _MSC_VER
// Disable warning about new array member initialization behavior
-#pragma warning( disable : 4351 )
+#pragma warning(disable : 4351)
#endif
//------------------------------------------------------------------------
@@ -1255,46 +1241,46 @@ DiscretionaryPolicy::DiscretionaryPolicy(Compiler* compiler, bool isPrejitRoot)
void DiscretionaryPolicy::NoteBool(InlineObservation obs, bool value)
{
- switch(obs)
+ switch (obs)
{
- case InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER:
- m_LooksLikeWrapperMethod = value;
- break;
+ case InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER:
+ m_LooksLikeWrapperMethod = value;
+ break;
- case InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST:
- assert(value);
- m_ArgFeedsConstantTest++;
- break;
+ case InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST:
+ assert(value);
+ m_ArgFeedsConstantTest++;
+ break;
- case InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK:
- assert(value);
- m_ArgFeedsRangeCheck++;
- break;
+ case InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK:
+ assert(value);
+ m_ArgFeedsRangeCheck++;
+ break;
- case InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST:
- assert(value);
- m_ConstantArgFeedsConstantTest++;
- break;
+ case InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST:
+ assert(value);
+ m_ConstantArgFeedsConstantTest++;
+ break;
- case InlineObservation::CALLEE_IS_CLASS_CTOR:
- m_IsClassCtor = value;
- break;
+ case InlineObservation::CALLEE_IS_CLASS_CTOR:
+ m_IsClassCtor = value;
+ break;
- case InlineObservation::CALLSITE_IS_SAME_THIS:
- m_IsSameThis = value;
- break;
+ case InlineObservation::CALLSITE_IS_SAME_THIS:
+ m_IsSameThis = value;
+ break;
- case InlineObservation::CALLER_HAS_NEWARRAY:
- m_CallerHasNewArray = value;
- break;
+ case InlineObservation::CALLER_HAS_NEWARRAY:
+ m_CallerHasNewArray = value;
+ break;
- case InlineObservation::CALLER_HAS_NEWOBJ:
- m_CallerHasNewObj = value;
- break;
+ case InlineObservation::CALLER_HAS_NEWOBJ:
+ m_CallerHasNewObj = value;
+ break;
- default:
- LegacyPolicy::NoteBool(obs, value);
- break;
+ default:
+ LegacyPolicy::NoteBool(obs, value);
+ break;
}
}
@@ -1310,28 +1296,28 @@ void DiscretionaryPolicy::NoteInt(InlineObservation obs, int value)
switch (obs)
{
- case InlineObservation::CALLEE_IL_CODE_SIZE:
- // Override how code size is handled
- {
- assert(m_IsForceInlineKnown);
- assert(value != 0);
- m_CodeSize = static_cast<unsigned>(value);
-
- if (m_IsForceInline)
- {
- // Candidate based on force inline
- SetCandidate(InlineObservation::CALLEE_IS_FORCE_INLINE);
- }
- else
+ case InlineObservation::CALLEE_IL_CODE_SIZE:
+ // Override how code size is handled
{
- // Candidate, pending profitability evaluation
- SetCandidate(InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
- }
+ assert(m_IsForceInlineKnown);
+ assert(value != 0);
+ m_CodeSize = static_cast<unsigned>(value);
- break;
- }
+ if (m_IsForceInline)
+ {
+ // Candidate based on force inline
+ SetCandidate(InlineObservation::CALLEE_IS_FORCE_INLINE);
+ }
+ else
+ {
+ // Candidate, pending profitability evaluation
+ SetCandidate(InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
+ }
- case InlineObservation::CALLEE_OPCODE:
+ break;
+ }
+
+ case InlineObservation::CALLEE_OPCODE:
{
// This tries to do a rough binning of opcodes based
// on similarity of impact on codegen.
@@ -1341,26 +1327,26 @@ void DiscretionaryPolicy::NoteInt(InlineObservation obs, int value)
break;
}
- case InlineObservation::CALLEE_MAXSTACK:
- m_Maxstack = value;
- break;
+ case InlineObservation::CALLEE_MAXSTACK:
+ m_Maxstack = value;
+ break;
- case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
- m_BlockCount = value;
- break;
+ case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
+ m_BlockCount = value;
+ break;
- case InlineObservation::CALLSITE_DEPTH:
- m_Depth = value;
- break;
+ case InlineObservation::CALLSITE_DEPTH:
+ m_Depth = value;
+ break;
- case InlineObservation::CALLSITE_WEIGHT:
- m_CallSiteWeight = static_cast<unsigned>(value);
- break;
+ case InlineObservation::CALLSITE_WEIGHT:
+ m_CallSiteWeight = static_cast<unsigned>(value);
+ break;
- default:
- // Delegate remainder to the LegacyPolicy.
- LegacyPolicy::NoteInt(obs, value);
- break;
+ default:
+ // Delegate remainder to the LegacyPolicy.
+ LegacyPolicy::NoteInt(obs, value);
+ break;
}
}
@@ -1650,12 +1636,10 @@ void DiscretionaryPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo
#if defined(DEBUG)
// Punt if we're inlining and we've reached the acceptance limit.
- int limit = JitConfig.JitInlineLimit();
+ int limit = JitConfig.JitInlineLimit();
unsigned current = m_RootCompiler->m_inlineStrategy->GetInlineCount();
- if (!m_IsPrejitRoot &&
- (limit >= 0) &&
- (current >= static_cast<unsigned>(limit)))
+ if (!m_IsPrejitRoot && (limit >= 0) && (current >= static_cast<unsigned>(limit)))
{
SetFailure(InlineObservation::CALLSITE_OVER_INLINE_LIMIT);
return;
@@ -1690,14 +1674,14 @@ void DiscretionaryPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo
void DiscretionaryPolicy::MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo)
{
CORINFO_SIG_INFO& locals = methodInfo->locals;
- m_LocalCount = locals.numArgs;
+ m_LocalCount = locals.numArgs;
- CORINFO_SIG_INFO& args = methodInfo->args;
- const unsigned argCount = args.numArgs;
- m_ArgCount = argCount;
+ CORINFO_SIG_INFO& args = methodInfo->args;
+ const unsigned argCount = args.numArgs;
+ m_ArgCount = argCount;
const unsigned pointerSize = sizeof(void*);
- unsigned i = 0;
+ unsigned i = 0;
// Implicit arguments
@@ -1723,14 +1707,14 @@ void DiscretionaryPolicy::MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo
// Explicit arguments
- unsigned j = 0;
+ unsigned j = 0;
CORINFO_ARG_LIST_HANDLE argListHandle = args.args;
- COMP_HANDLE comp = m_RootCompiler->info.compCompHnd;
+ COMP_HANDLE comp = m_RootCompiler->info.compCompHnd;
while ((i < MAX_ARGS) && (j < argCount))
{
CORINFO_CLASS_HANDLE classHandle;
- CorInfoType type = strip(comp->getArgType(&args, argListHandle, &classHandle));
+ CorInfoType type = strip(comp->getArgType(&args, argListHandle, &classHandle));
m_ArgType[i] = type;
@@ -1831,7 +1815,7 @@ void DiscretionaryPolicy::EstimateCodeSize()
// clang-format on
// Scaled up and reported as an integer value.
- m_ModelCodeSizeEstimate = (int) (SIZE_SCALE * sizeEstimate);
+ m_ModelCodeSizeEstimate = (int)(SIZE_SCALE * sizeEstimate);
}
//------------------------------------------------------------------------
@@ -1860,7 +1844,7 @@ void DiscretionaryPolicy::EstimatePerformanceImpact()
// clang-format on
// Scaled up and reported as an integer value.
- m_PerCallInstructionEstimate = (int) (SIZE_SCALE * perCallSavingsEstimate);
+ m_PerCallInstructionEstimate = (int)(SIZE_SCALE * perCallSavingsEstimate);
}
//------------------------------------------------------------------------
@@ -1981,12 +1965,12 @@ void DiscretionaryPolicy::DumpData(FILE* file) const
for (unsigned i = 0; i < MAX_ARGS; i++)
{
- fprintf(file, ",%u", (unsigned) m_ArgSize[i]);
+ fprintf(file, ",%u", (unsigned)m_ArgSize[i]);
}
fprintf(file, ",%u", m_LocalCount);
fprintf(file, ",%u", m_ReturnType);
- fprintf(file, ",%u", (unsigned) m_ReturnSize);
+ fprintf(file, ",%u", (unsigned)m_ReturnSize);
fprintf(file, ",%u", m_ArgAccessCount);
fprintf(file, ",%u", m_LocalAccessCount);
fprintf(file, ",%u", m_IntConstantCount);
@@ -2045,8 +2029,7 @@ void DiscretionaryPolicy::DumpData(FILE* file) const
// compiler -- compiler instance doing the inlining (root compiler)
// isPrejitRoot -- true if this compiler is prejitting the root method
-ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot)
- : DiscretionaryPolicy(compiler, isPrejitRoot)
+ModelPolicy::ModelPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot)
{
// Empty
}
@@ -2096,9 +2079,7 @@ void ModelPolicy::NoteInt(InlineObservation obs, int value)
// Fail fast for inlinees that are too large to ever inline.
// The value of 120 is model-dependent; see notes above.
- if (!m_IsForceInline &&
- (obs == InlineObservation::CALLEE_IL_CODE_SIZE) &&
- (value >= 120))
+ if (!m_IsForceInline && (obs == InlineObservation::CALLEE_IL_CODE_SIZE) && (value >= 120))
{
// Callee too big, not a candidate
SetNever(InlineObservation::CALLEE_TOO_MUCH_IL);
@@ -2147,10 +2128,8 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
if (m_ModelCodeSizeEstimate <= 0)
{
// Inline will likely decrease code size
- JITLOG_THIS(m_RootCompiler,
- (LL_INFO100000,
- "Inline profitable, will decrease code size by %g bytes\n",
- (double) -m_ModelCodeSizeEstimate / SIZE_SCALE));
+ JITLOG_THIS(m_RootCompiler, (LL_INFO100000, "Inline profitable, will decrease code size by %g bytes\n",
+ (double)-m_ModelCodeSizeEstimate / SIZE_SCALE));
if (m_IsPrejitRoot)
{
@@ -2174,7 +2153,7 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// The per call instruction estimate is negative if the inline
// will reduce instruction count. Flip the sign here to make
// positive be better and negative worse.
- double perCallBenefit = -((double) m_PerCallInstructionEstimate / (double) m_ModelCodeSizeEstimate);
+ double perCallBenefit = -((double)m_PerCallInstructionEstimate / (double)m_ModelCodeSizeEstimate);
// Now estimate the local call frequency.
//
@@ -2187,22 +2166,22 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
switch (m_CallsiteFrequency)
{
- case InlineCallsiteFrequency::RARE:
- callSiteWeight = 0.1;
- break;
- case InlineCallsiteFrequency::BORING:
- callSiteWeight = 1.0;
- break;
- case InlineCallsiteFrequency::WARM:
- callSiteWeight = 1.5;
- break;
- case InlineCallsiteFrequency::LOOP:
- case InlineCallsiteFrequency::HOT:
- callSiteWeight = 3.0;
- break;
- default:
- assert(false);
- break;
+ case InlineCallsiteFrequency::RARE:
+ callSiteWeight = 0.1;
+ break;
+ case InlineCallsiteFrequency::BORING:
+ callSiteWeight = 1.0;
+ break;
+ case InlineCallsiteFrequency::WARM:
+ callSiteWeight = 1.5;
+ break;
+ case InlineCallsiteFrequency::LOOP:
+ case InlineCallsiteFrequency::HOT:
+ callSiteWeight = 3.0;
+ break;
+ default:
+ assert(false);
+ break;
}
// Determine the estimated number of instructions saved per
@@ -2216,16 +2195,13 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// the value of 0.2 below indicates we'll allow inlines that
// grow code by as many as 5 bytes to save 1 instruction
// execution (per call to the root method).
- double threshold = 0.20;
- bool shouldInline = (benefit > threshold);
+ double threshold = 0.20;
+ bool shouldInline = (benefit > threshold);
JITLOG_THIS(m_RootCompiler,
- (LL_INFO100000,
- "Inline %s profitable: benefit=%g (weight=%g, percall=%g, size=%g)\n",
- shouldInline ? "is" : "is not",
- benefit, callSiteWeight,
- (double) m_PerCallInstructionEstimate / SIZE_SCALE,
- (double) m_ModelCodeSizeEstimate / SIZE_SCALE));
+ (LL_INFO100000, "Inline %s profitable: benefit=%g (weight=%g, percall=%g, size=%g)\n",
+ shouldInline ? "is" : "is not", benefit, callSiteWeight,
+ (double)m_PerCallInstructionEstimate / SIZE_SCALE, (double)m_ModelCodeSizeEstimate / SIZE_SCALE));
if (!shouldInline)
{
@@ -2263,8 +2239,7 @@ void ModelPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// compiler -- compiler instance doing the inlining (root compiler)
// isPrejitRoot -- true if this compiler is prejitting the root method
-FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot)
- : DiscretionaryPolicy(compiler, isPrejitRoot)
+FullPolicy::FullPolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot)
{
// Empty
}
@@ -2318,8 +2293,7 @@ void FullPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// compiler -- compiler instance doing the inlining (root compiler)
// isPrejitRoot -- true if this compiler is prejitting the root method
-SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot)
- : DiscretionaryPolicy(compiler, isPrejitRoot)
+SizePolicy::SizePolicy(Compiler* compiler, bool isPrejitRoot) : DiscretionaryPolicy(compiler, isPrejitRoot)
{
// Empty
}
@@ -2338,17 +2312,16 @@ void SizePolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// Does this inline increase the estimated size beyond
// the original size estimate?
- const InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
- const int initialSize = strategy->GetInitialSizeEstimate();
- const int currentSize = strategy->GetCurrentSizeEstimate();
- const int newSize = currentSize + m_ModelCodeSizeEstimate;
+ const InlineStrategy* strategy = m_RootCompiler->m_inlineStrategy;
+ const int initialSize = strategy->GetInitialSizeEstimate();
+ const int currentSize = strategy->GetCurrentSizeEstimate();
+ const int newSize = currentSize + m_ModelCodeSizeEstimate;
if (newSize <= initialSize)
{
// Estimated size impact is acceptable, so inline here.
JITLOG_THIS(m_RootCompiler,
- (LL_INFO100000,
- "Inline profitable, root size estimate %d is less than initial size %d\n",
+ (LL_INFO100000, "Inline profitable, root size estimate %d is less than initial size %d\n",
newSize / SIZE_SCALE, initialSize / SIZE_SCALE));
if (m_IsPrejitRoot)
@@ -2384,7 +2357,7 @@ void SizePolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// and provide file access to the inline xml
bool ReplayPolicy::s_WroteReplayBanner = false;
-FILE* ReplayPolicy::s_ReplayFile = nullptr;
+FILE* ReplayPolicy::s_ReplayFile = nullptr;
CritSecObject ReplayPolicy::s_XmlReaderLock;
//------------------------------------------------------------------------/
@@ -2408,14 +2381,13 @@ ReplayPolicy::ReplayPolicy(Compiler* compiler, bool isPrejitRoot)
{
// Nope, open it up.
const wchar_t* replayFileName = JitConfig.JitInlineReplayFile();
- s_ReplayFile = _wfopen(replayFileName, W("r"));
+ s_ReplayFile = _wfopen(replayFileName, W("r"));
// Display banner to stderr, unless we're dumping inline Xml,
// in which case the policy name is captured in the Xml.
if (JitConfig.JitInlineDumpXml() == 0)
{
- fprintf(stderr, "*** %s inlines from %ws\n",
- s_ReplayFile == nullptr ? "Unable to replay" : "Replaying",
+ fprintf(stderr, "*** %s inlines from %ws\n", s_ReplayFile == nullptr ? "Unable to replay" : "Replaying",
replayFileName);
}
@@ -2455,7 +2427,7 @@ bool ReplayPolicy::FindMethod()
// See if we've already found this method.
InlineStrategy* inlineStrategy = m_RootCompiler->m_inlineStrategy;
- long filePosition = inlineStrategy->GetMethodXmlFilePosition();
+ long filePosition = inlineStrategy->GetMethodXmlFilePosition();
if (filePosition == -1)
{
@@ -2472,10 +2444,8 @@ bool ReplayPolicy::FindMethod()
// Else, scan the file. Might be nice to build an index
// or something, someday.
const mdMethodDef methodToken =
- m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(
- m_RootCompiler->info.compMethodHnd);
- const unsigned methodHash =
- m_RootCompiler->info.compMethodHash();
+ m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(m_RootCompiler->info.compMethodHnd);
+ const unsigned methodHash = m_RootCompiler->info.compMethodHash();
bool foundMethod = false;
char buffer[256];
@@ -2503,7 +2473,7 @@ bool ReplayPolicy::FindMethod()
// See if token matches
unsigned token = 0;
- int count = sscanf(buffer, " <Token>%u</Token> ", &token);
+ int count = sscanf(buffer, " <Token>%u</Token> ", &token);
if ((count != 1) || (token != methodToken))
{
continue;
@@ -2517,7 +2487,7 @@ bool ReplayPolicy::FindMethod()
// See if hash matches
unsigned hash = 0;
- count = sscanf(buffer, " <Hash>%u</Hash> ", &hash);
+ count = sscanf(buffer, " <Hash>%u</Hash> ", &hash);
if ((count != 1) || (hash != methodHash))
{
continue;
@@ -2575,13 +2545,9 @@ bool ReplayPolicy::FindContext(InlineContext* context)
// See if we see an inline entry for this context.
//
// Token and Hash we're looking for.
- mdMethodDef contextToken =
- m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(
- context->GetCallee());
- unsigned contextHash =
- m_RootCompiler->info.compCompHnd->getMethodHash(
- context->GetCallee());
- unsigned contextOffset = (unsigned) context->GetOffset();
+ mdMethodDef contextToken = m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(context->GetCallee());
+ unsigned contextHash = m_RootCompiler->info.compCompHnd->getMethodHash(context->GetCallee());
+ unsigned contextOffset = (unsigned)context->GetOffset();
return FindInline(contextToken, contextHash, contextOffset);
}
@@ -2609,7 +2575,7 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
{
char buffer[256];
bool foundInline = false;
- int depth = 0;
+ int depth = 0;
while (!foundInline)
{
@@ -2680,7 +2646,7 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
// Match token
unsigned inlineToken = 0;
- int count = sscanf(buffer, " <Token>%u</Token> ", &inlineToken);
+ int count = sscanf(buffer, " <Token>%u</Token> ", &inlineToken);
if ((count != 1) || (inlineToken != token))
{
@@ -2695,7 +2661,7 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
// Match hash
unsigned inlineHash = 0;
- count = sscanf(buffer, " <Hash>%u</Hash> ", &inlineHash);
+ count = sscanf(buffer, " <Hash>%u</Hash> ", &inlineHash);
if ((count != 1) || (inlineHash != hash))
{
@@ -2710,7 +2676,7 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
// Match offset
unsigned inlineOffset = 0;
- count = sscanf(buffer, " <Offset>%u</Offset> ", &inlineOffset);
+ count = sscanf(buffer, " <Offset>%u</Offset> ", &inlineOffset);
if ((count != 1) || (inlineOffset != offset))
{
continue;
@@ -2729,7 +2695,7 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
if (fgets(buffer, sizeof(buffer), s_ReplayFile) != nullptr)
{
unsigned collectData = 0;
- count = sscanf(buffer, " <CollectData>%u</CollectData> ", &collectData);
+ count = sscanf(buffer, " <CollectData>%u</CollectData> ", &collectData);
if (count == 1)
{
@@ -2763,20 +2729,18 @@ bool ReplayPolicy::FindInline(unsigned token, unsigned hash, unsigned offset)
bool ReplayPolicy::FindInline(CORINFO_METHOD_HANDLE callee)
{
// Token and Hash we're looking for
- mdMethodDef calleeToken =
- m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(callee);
- unsigned calleeHash =
- m_RootCompiler->info.compCompHnd->getMethodHash(callee);
+ mdMethodDef calleeToken = m_RootCompiler->info.compCompHnd->getMethodDefFromMethod(callee);
+ unsigned calleeHash = m_RootCompiler->info.compCompHnd->getMethodHash(callee);
// Abstract this or just pass through raw bits
// See matching code in xml writer
int offset = -1;
if (m_Offset != BAD_IL_OFFSET)
{
- offset = (int) jitGetILoffs(m_Offset);
+ offset = (int)jitGetILoffs(m_Offset);
}
- unsigned calleeOffset = (unsigned) offset;
+ unsigned calleeOffset = (unsigned)offset;
bool foundInline = FindInline(calleeToken, calleeHash, calleeOffset);
@@ -2800,7 +2764,7 @@ void ReplayPolicy::NoteBool(InlineObservation obs, bool value)
if (!m_IsPrejitRoot && (obs == InlineObservation::CALLEE_IS_FORCE_INLINE))
{
m_WasForceInline = value;
- value = false;
+ value = false;
}
DiscretionaryPolicy::NoteBool(obs, value);
@@ -2855,7 +2819,7 @@ void ReplayPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
{
// Finally, find this candidate within its context
CORINFO_METHOD_HANDLE calleeHandle = methodInfo->ftn;
- accept = FindInline(calleeHandle);
+ accept = FindInline(calleeHandle);
}
}
}
diff --git a/src/jit/inlinepolicy.h b/src/jit/inlinepolicy.h
index f7569cbc24..62031c86a0 100644
--- a/src/jit/inlinepolicy.h
+++ b/src/jit/inlinepolicy.h
@@ -48,10 +48,8 @@ class LegalPolicy : public InlinePolicy
{
public:
-
// Constructor
- LegalPolicy(bool isPrejitRoot)
- : InlinePolicy(isPrejitRoot)
+ LegalPolicy(bool isPrejitRoot) : InlinePolicy(isPrejitRoot)
{
// empty
}
@@ -60,7 +58,6 @@ public:
void NoteFatal(InlineObservation obs) override;
protected:
-
// Helper methods
void NoteInternal(InlineObservation obs);
void SetCandidate(InlineObservation obs);
@@ -79,7 +76,6 @@ class CodeSeqSM;
class LegacyPolicy : public LegalPolicy
{
public:
-
// Construct a LegacyPolicy
LegacyPolicy(Compiler* compiler, bool isPrejitRoot)
: LegalPolicy(isPrejitRoot)
@@ -115,30 +111,42 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Policy policies
- bool PropagateNeverToRuntime() const override { return true; }
- bool IsLegacyPolicy() const override { return true; }
+ bool PropagateNeverToRuntime() const override
+ {
+ return true;
+ }
+ bool IsLegacyPolicy() const override
+ {
+ return true;
+ }
// Policy estimates
int CodeSizeEstimate() override;
#if defined(DEBUG) || defined(INLINE_DATA)
- const char* GetName() const override { return "LegacyPolicy"; }
+ const char* GetName() const override
+ {
+ return "LegacyPolicy";
+ }
#endif // (DEBUG) || defined(INLINE_DATA)
protected:
-
// Constants
- enum { MAX_BASIC_BLOCKS = 5, SIZE_SCALE = 10 };
+ enum
+ {
+ MAX_BASIC_BLOCKS = 5,
+ SIZE_SCALE = 10
+ };
// Helper methods
double DetermineMultiplier();
- int DetermineNativeSizeEstimate();
+ int DetermineNativeSizeEstimate();
int DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methodInfo);
// Data members
- Compiler* m_RootCompiler; // root compiler instance
+ Compiler* m_RootCompiler; // root compiler instance
CodeSeqSM* m_StateMachine;
double m_Multiplier;
unsigned m_CodeSize;
@@ -150,13 +158,13 @@ protected:
unsigned m_ConstantArgFeedsConstantTest;
int m_CalleeNativeSizeEstimate;
int m_CallsiteNativeSizeEstimate;
- bool m_IsForceInline :1;
- bool m_IsForceInlineKnown :1;
- bool m_IsInstanceCtor :1;
- bool m_IsFromPromotableValueClass :1;
- bool m_HasSimd :1;
- bool m_LooksLikeWrapperMethod :1;
- bool m_MethodIsMostlyLoadStore :1;
+ bool m_IsForceInline : 1;
+ bool m_IsForceInlineKnown : 1;
+ bool m_IsInstanceCtor : 1;
+ bool m_IsFromPromotableValueClass : 1;
+ bool m_HasSimd : 1;
+ bool m_LooksLikeWrapperMethod : 1;
+ bool m_MethodIsMostlyLoadStore : 1;
};
// EnhancedLegacyPolicy extends the legacy policy by rejecting
@@ -166,9 +174,7 @@ class EnhancedLegacyPolicy : public LegacyPolicy
{
public:
EnhancedLegacyPolicy(Compiler* compiler, bool isPrejitRoot)
- : LegacyPolicy(compiler, isPrejitRoot)
- , m_IsNoReturn(false)
- , m_IsNoReturnKnown(false)
+ : LegacyPolicy(compiler, isPrejitRoot), m_IsNoReturn(false), m_IsNoReturnKnown(false)
{
// empty
}
@@ -179,13 +185,15 @@ public:
// Policy policies
bool PropagateNeverToRuntime() const override;
- bool IsLegacyPolicy() const override { return false; }
+ bool IsLegacyPolicy() const override
+ {
+ return false;
+ }
protected:
-
// Data members
- bool m_IsNoReturn :1;
- bool m_IsNoReturnKnown :1;
+ bool m_IsNoReturn : 1;
+ bool m_IsNoReturnKnown : 1;
};
#ifdef DEBUG
@@ -196,7 +204,6 @@ protected:
class RandomPolicy : public LegalPolicy
{
public:
-
// Construct a RandomPolicy
RandomPolicy(Compiler* compiler, bool isPrejitRoot, unsigned seed);
@@ -209,8 +216,14 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Policy policies
- bool PropagateNeverToRuntime() const override { return true; }
- bool IsLegacyPolicy() const override { return false; }
+ bool PropagateNeverToRuntime() const override
+ {
+ return true;
+ }
+ bool IsLegacyPolicy() const override
+ {
+ return false;
+ }
// Policy estimates
int CodeSizeEstimate() override
@@ -218,16 +231,18 @@ public:
return 0;
}
- const char* GetName() const override { return "RandomPolicy"; }
+ const char* GetName() const override
+ {
+ return "RandomPolicy";
+ }
private:
-
// Data members
- Compiler* m_RootCompiler;
- CLRRandom* m_Random;
- unsigned m_CodeSize;
- bool m_IsForceInline :1;
- bool m_IsForceInlineKnown :1;
+ Compiler* m_RootCompiler;
+ CLRRandom* m_Random;
+ unsigned m_CodeSize;
+ bool m_IsForceInline : 1;
+ bool m_IsForceInlineKnown : 1;
};
#endif // DEBUG
@@ -242,7 +257,6 @@ private:
class DiscretionaryPolicy : public LegacyPolicy
{
public:
-
// Construct a DiscretionaryPolicy
DiscretionaryPolicy(Compiler* compiler, bool isPrejitRoot);
@@ -252,7 +266,10 @@ public:
// Policy policies
bool PropagateNeverToRuntime() const override;
- bool IsLegacyPolicy() const override { return false; }
+ bool IsLegacyPolicy() const override
+ {
+ return false;
+ }
// Policy determinations
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
@@ -267,18 +284,22 @@ public:
void DumpSchema(FILE* file) const override;
// Miscellaneous
- const char* GetName() const override { return "DiscretionaryPolicy"; }
+ const char* GetName() const override
+ {
+ return "DiscretionaryPolicy";
+ }
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
protected:
-
void ComputeOpcodeBin(OPCODE opcode);
void EstimateCodeSize();
void EstimatePerformanceImpact();
void MethodInfoObservations(CORINFO_METHOD_INFO* methodInfo);
- enum { MAX_ARGS = 6 };
+ enum
+ {
+ MAX_ARGS = 6
+ };
unsigned m_Depth;
unsigned m_BlockCount;
@@ -333,7 +354,6 @@ protected:
class ModelPolicy : public DiscretionaryPolicy
{
public:
-
// Construct a ModelPolicy
ModelPolicy(Compiler* compiler, bool isPrejitRoot);
@@ -344,15 +364,20 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Policy policies
- bool PropagateNeverToRuntime() const override { return true; }
+ bool PropagateNeverToRuntime() const override
+ {
+ return true;
+ }
#if defined(DEBUG) || defined(INLINE_DATA)
// Miscellaneous
- const char* GetName() const override { return "ModelPolicy"; }
+ const char* GetName() const override
+ {
+ return "ModelPolicy";
+ }
#endif // defined(DEBUG) || defined(INLINE_DATA)
-
};
#if defined(DEBUG) || defined(INLINE_DATA)
@@ -366,7 +391,6 @@ public:
class FullPolicy : public DiscretionaryPolicy
{
public:
-
// Construct a FullPolicy
FullPolicy(Compiler* compiler, bool isPrejitRoot);
@@ -374,7 +398,10 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Miscellaneous
- const char* GetName() const override { return "FullPolicy"; }
+ const char* GetName() const override
+ {
+ return "FullPolicy";
+ }
};
// SizePolicy is an experimental policy that will inline as much
@@ -386,7 +413,6 @@ public:
class SizePolicy : public DiscretionaryPolicy
{
public:
-
// Construct a SizePolicy
SizePolicy(Compiler* compiler, bool isPrejitRoot);
@@ -394,7 +420,10 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Miscellaneous
- const char* GetName() const override { return "SizePolicy"; }
+ const char* GetName() const override
+ {
+ return "SizePolicy";
+ }
};
// The ReplayPolicy performs only inlines specified by an external
@@ -403,7 +432,6 @@ public:
class ReplayPolicy : public DiscretionaryPolicy
{
public:
-
// Construct a ReplayPolicy
ReplayPolicy(Compiler* compiler, bool isPrejitRoot);
@@ -425,12 +453,14 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Miscellaneous
- const char* GetName() const override { return "ReplayPolicy"; }
+ const char* GetName() const override
+ {
+ return "ReplayPolicy";
+ }
static void FinalizeXml();
private:
-
bool FindMethod();
bool FindContext(InlineContext* context);
bool FindInline(CORINFO_METHOD_HANDLE callee);
diff --git a/src/jit/instr.cpp b/src/jit/instr.cpp
index c1f3527b85..d516e0dea4 100644
--- a/src/jit/instr.cpp
+++ b/src/jit/instr.cpp
@@ -23,16 +23,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "emit.h"
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Returns the string representation of the given CPU instruction.
*/
-const char * CodeGen::genInsName(instruction ins)
+const char* CodeGen::genInsName(instruction ins)
{
-// clang-format off
+ // clang-format off
static
const char * const insNames[] =
{
@@ -70,49 +70,51 @@ const char * CodeGen::genInsName(instruction ins)
#error "Unknown _TARGET_"
#endif
};
-// clang-format on
+ // clang-format on
- assert((unsigned)ins < sizeof(insNames)/sizeof(insNames[0]));
- assert(insNames[ins] != NULL);
+ assert((unsigned)ins < sizeof(insNames) / sizeof(insNames[0]));
+ assert(insNames[ins] != nullptr);
return insNames[ins];
}
-void __cdecl CodeGen::instDisp(instruction ins, bool noNL, const char *fmt, ...)
+void __cdecl CodeGen::instDisp(instruction ins, bool noNL, const char* fmt, ...)
{
- if (compiler->opts.dspCode)
+ if (compiler->opts.dspCode)
{
/* Display the instruction offset within the emit block */
-// printf("[%08X:%04X]", getEmitter().emitCodeCurBlock(), getEmitter().emitCodeOffsInBlock());
+ // printf("[%08X:%04X]", getEmitter().emitCodeCurBlock(), getEmitter().emitCodeOffsInBlock());
/* Display the FP stack depth (before the instruction is executed) */
-// printf("[FP=%02u] ", genGetFPstkLevel());
+ // printf("[FP=%02u] ", genGetFPstkLevel());
/* Display the instruction mnemonic */
printf(" ");
printf(" %-8s", genInsName(ins));
- if (fmt)
+ if (fmt)
{
- va_list args;
+ va_list args;
va_start(args, fmt);
vprintf(fmt, args);
- va_end (args);
+ va_end(args);
}
- if (!noNL)
+ if (!noNL)
+ {
printf("\n");
+ }
}
}
/*****************************************************************************/
-#endif//DEBUG
+#endif // DEBUG
/*****************************************************************************/
-void CodeGen::instInit()
+void CodeGen::instInit()
{
}
@@ -121,51 +123,59 @@ void CodeGen::instInit()
* Return the size string (e.g. "word ptr") appropriate for the given size.
*/
-#ifdef DEBUG
+#ifdef DEBUG
-const char * CodeGen::genSizeStr(emitAttr attr)
+const char* CodeGen::genSizeStr(emitAttr attr)
{
-// clang-format off
+ // clang-format off
static
const char * const sizes[] =
{
"",
"byte ptr ",
"word ptr ",
- 0,
+ nullptr,
"dword ptr ",
- 0,
- 0,
- 0,
+ nullptr,
+ nullptr,
+ nullptr,
"qword ptr ",
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
"xmmword ptr ",
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
"ymmword ptr"
};
-// clang-format on
+ // clang-format on
unsigned size = EA_SIZE(attr);
assert(size == 0 || size == 1 || size == 2 || size == 4 || size == 8 || size == 16 || size == 32);
if (EA_ATTR(size) == attr)
+ {
return sizes[size];
+ }
else if (attr == EA_GCREF)
+ {
return "gword ptr ";
+ }
else if (attr == EA_BYREF)
+ {
return "bword ptr ";
+ }
else if (EA_IS_DSP_RELOC(attr))
+ {
return "rword ptr ";
+ }
else
{
assert(!"Unexpected");
@@ -180,7 +190,7 @@ const char * CodeGen::genSizeStr(emitAttr attr)
* Generate an instruction.
*/
-void CodeGen::instGen(instruction ins)
+void CodeGen::instGen(instruction ins)
{
getEmitter()->emitIns(ins);
@@ -190,8 +200,7 @@ void CodeGen::instGen(instruction ins)
// if we are scheduled to insert a nop here, we have to delay it
// hopefully we have not missed any other prefix instructions or places
// they could be inserted
- if (ins == INS_lock
- && getEmitter()->emitNextNop == 0)
+ if (ins == INS_lock && getEmitter()->emitNextNop == 0)
{
getEmitter()->emitNextNop = 1;
}
@@ -204,11 +213,11 @@ void CodeGen::instGen(instruction ins)
*/
// static inline
-bool CodeGenInterface::instIsFP(instruction ins)
+bool CodeGenInterface::instIsFP(instruction ins)
{
- assert((unsigned)ins < sizeof(instInfo)/sizeof(instInfo[0]));
+ assert((unsigned)ins < sizeof(instInfo) / sizeof(instInfo[0]));
- return (instInfo[ins] & INST_FP) != 0;
+ return (instInfo[ins] & INST_FP) != 0;
}
#ifdef _TARGET_XARCH_
@@ -217,7 +226,7 @@ bool CodeGenInterface::instIsFP(instruction ins)
* Generate a multi-byte NOP instruction.
*/
-void CodeGen::instNop(unsigned size)
+void CodeGen::instNop(unsigned size)
{
assert(size <= 15);
getEmitter()->emitIns_Nop(size);
@@ -229,16 +238,15 @@ void CodeGen::instNop(unsigned size)
* Generate a jump instruction.
*/
-void CodeGen::inst_JMP(emitJumpKind jmp,
- BasicBlock * tgtBlock)
+void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock)
{
#if !FEATURE_FIXED_OUT_ARGS
- // On the x86 we are pushing (and changing the stack level), but on x64 and other archs we have
+ // On the x86 we are pushing (and changing the stack level), but on x64 and other archs we have
// a fixed outgoing args area that we store into and we never change the stack level when calling methods.
//
// Thus only on x86 do we need to assert that the stack level at the target block matches the current stack level.
//
- assert(tgtBlock->bbTgtStkDepth*sizeof(int) == genStackLevel || compiler->rpFrameType != FT_ESP_FRAME);
+ assert(tgtBlock->bbTgtStkDepth * sizeof(int) == genStackLevel || compiler->rpFrameType != FT_ESP_FRAME);
#endif
getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmp), tgtBlock);
@@ -249,35 +257,64 @@ void CodeGen::inst_JMP(emitJumpKind jmp,
* Generate a set instruction.
*/
-void CodeGen::inst_SET(emitJumpKind condition,
- regNumber reg)
+void CodeGen::inst_SET(emitJumpKind condition, regNumber reg)
{
#ifdef _TARGET_XARCH_
- instruction ins;
+ instruction ins;
/* Convert the condition to an instruction opcode */
switch (condition)
{
- case EJ_js : ins = INS_sets ; break;
- case EJ_jns : ins = INS_setns ; break;
- case EJ_je : ins = INS_sete ; break;
- case EJ_jne : ins = INS_setne ; break;
+ case EJ_js:
+ ins = INS_sets;
+ break;
+ case EJ_jns:
+ ins = INS_setns;
+ break;
+ case EJ_je:
+ ins = INS_sete;
+ break;
+ case EJ_jne:
+ ins = INS_setne;
+ break;
- case EJ_jl : ins = INS_setl ; break;
- case EJ_jle : ins = INS_setle ; break;
- case EJ_jge : ins = INS_setge ; break;
- case EJ_jg : ins = INS_setg ; break;
+ case EJ_jl:
+ ins = INS_setl;
+ break;
+ case EJ_jle:
+ ins = INS_setle;
+ break;
+ case EJ_jge:
+ ins = INS_setge;
+ break;
+ case EJ_jg:
+ ins = INS_setg;
+ break;
- case EJ_jb : ins = INS_setb ; break;
- case EJ_jbe : ins = INS_setbe ; break;
- case EJ_jae : ins = INS_setae ; break;
- case EJ_ja : ins = INS_seta ; break;
+ case EJ_jb:
+ ins = INS_setb;
+ break;
+ case EJ_jbe:
+ ins = INS_setbe;
+ break;
+ case EJ_jae:
+ ins = INS_setae;
+ break;
+ case EJ_ja:
+ ins = INS_seta;
+ break;
- case EJ_jpe : ins = INS_setpe ; break;
- case EJ_jpo : ins = INS_setpo ; break;
+ case EJ_jpe:
+ ins = INS_setpe;
+ break;
+ case EJ_jpo:
+ ins = INS_setpo;
+ break;
- default: NO_WAY("unexpected condition type"); return;
+ default:
+ NO_WAY("unexpected condition type");
+ return;
}
assert(genRegMask(reg) & RBM_BYTE_REGS);
@@ -289,25 +326,55 @@ void CodeGen::inst_SET(emitJumpKind condition,
/* Convert the condition to an insCond value */
switch (condition)
{
- case EJ_eq : cond = INS_COND_EQ; break;
- case EJ_ne : cond = INS_COND_NE; break;
- case EJ_hs : cond = INS_COND_HS; break;
- case EJ_lo : cond = INS_COND_LO; break;
-
- case EJ_mi : cond = INS_COND_MI; break;
- case EJ_pl : cond = INS_COND_PL; break;
- case EJ_vs : cond = INS_COND_VS; break;
- case EJ_vc : cond = INS_COND_VC; break;
-
- case EJ_hi : cond = INS_COND_HI; break;
- case EJ_ls : cond = INS_COND_LS; break;
- case EJ_ge : cond = INS_COND_GE; break;
- case EJ_lt : cond = INS_COND_LT; break;
-
- case EJ_gt : cond = INS_COND_GT; break;
- case EJ_le : cond = INS_COND_LE; break;
-
- default: NO_WAY("unexpected condition type"); return;
+ case EJ_eq:
+ cond = INS_COND_EQ;
+ break;
+ case EJ_ne:
+ cond = INS_COND_NE;
+ break;
+ case EJ_hs:
+ cond = INS_COND_HS;
+ break;
+ case EJ_lo:
+ cond = INS_COND_LO;
+ break;
+
+ case EJ_mi:
+ cond = INS_COND_MI;
+ break;
+ case EJ_pl:
+ cond = INS_COND_PL;
+ break;
+ case EJ_vs:
+ cond = INS_COND_VS;
+ break;
+ case EJ_vc:
+ cond = INS_COND_VC;
+ break;
+
+ case EJ_hi:
+ cond = INS_COND_HI;
+ break;
+ case EJ_ls:
+ cond = INS_COND_LS;
+ break;
+ case EJ_ge:
+ cond = INS_COND_GE;
+ break;
+ case EJ_lt:
+ cond = INS_COND_LT;
+ break;
+
+ case EJ_gt:
+ cond = INS_COND_GT;
+ break;
+ case EJ_le:
+ cond = INS_COND_LE;
+ break;
+
+ default:
+ NO_WAY("unexpected condition type");
+ return;
}
getEmitter()->emitIns_R_COND(INS_cset, EA_8BYTE, reg, cond);
#else
@@ -320,10 +387,12 @@ void CodeGen::inst_SET(emitJumpKind condition,
* Generate a "op reg" instruction.
*/
-void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size)
+void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size)
{
if (size == EA_UNKNOWN)
+ {
size = emitActualTypeSize(type);
+ }
getEmitter()->emitIns_R(ins, size, reg);
}
@@ -333,15 +402,17 @@ void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emi
* Generate a "op reg1, reg2" instruction.
*/
-void CodeGen::inst_RV_RV(instruction ins,
- regNumber reg1,
- regNumber reg2,
- var_types type,
- emitAttr size,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void CodeGen::inst_RV_RV(instruction ins,
+ regNumber reg1,
+ regNumber reg2,
+ var_types type,
+ emitAttr size,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
if (size == EA_UNKNOWN)
+ {
size = emitActualTypeSize(type);
+ }
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_R(ins, size, reg1, reg2, flags);
@@ -350,18 +421,17 @@ void CodeGen::inst_RV_RV(instruction ins,
#endif
}
-
/*****************************************************************************
*
* Generate a "op reg1, reg2, reg3" instruction.
*/
-void CodeGen::inst_RV_RV_RV(instruction ins,
- regNumber reg1,
- regNumber reg2,
- regNumber reg3,
- emitAttr size,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void CodeGen::inst_RV_RV_RV(instruction ins,
+ regNumber reg1,
+ regNumber reg2,
+ regNumber reg3,
+ emitAttr size,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
#ifdef _TARGET_ARM_
getEmitter()->emitIns_R_R_R(ins, size, reg1, reg2, reg3, flags);
@@ -376,7 +446,7 @@ void CodeGen::inst_RV_RV_RV(instruction ins,
* Generate a "op icon" instruction.
*/
-void CodeGen::inst_IV(instruction ins, int val)
+void CodeGen::inst_IV(instruction ins, int val)
{
getEmitter()->emitIns_I(ins, EA_PTRSIZE, val);
}
@@ -384,11 +454,10 @@ void CodeGen::inst_IV(instruction ins, int val)
/*****************************************************************************
*
* Generate a "op icon" instruction where icon is a handle of type specified
- * by 'flags'
+ * by 'flags'
*/
-void CodeGen::inst_IV_handle(instruction ins,
- int val)
+void CodeGen::inst_IV_handle(instruction ins, int val)
{
getEmitter()->emitIns_I(ins, EA_HANDLE_CNS_RELOC, val);
}
@@ -399,22 +468,23 @@ void CodeGen::inst_IV_handle(instruction ins,
* Generate a "op ST(n), ST(0)" instruction.
*/
-void CodeGen::inst_FS(instruction ins, unsigned stk)
+void CodeGen::inst_FS(instruction ins, unsigned stk)
{
assert(stk < 8);
-#ifdef DEBUG
+#ifdef DEBUG
switch (ins)
{
- case INS_fcompp:
- assert(stk == 1); break; // Implicit operand of compp is ST(1)
- case INS_fld:
- case INS_fxch:
- assert(!"don't do this. Do you want to use inst_FN() instead?");
- break;
- default:
- break;
+ case INS_fcompp:
+ assert(stk == 1);
+ break; // Implicit operand of compp is ST(1)
+ case INS_fld:
+ case INS_fxch:
+ assert(!"don't do this. Do you want to use inst_FN() instead?");
+ break;
+ default:
+ break;
}
#endif
@@ -427,27 +497,27 @@ void CodeGen::inst_FS(instruction ins, unsigned stk)
* Generate a "op ST(0), ST(n)" instruction
*/
-void CodeGenInterface::inst_FN(instruction ins, unsigned stk)
+void CodeGenInterface::inst_FN(instruction ins, unsigned stk)
{
assert(stk < 8);
-#ifdef DEBUG
+#ifdef DEBUG
switch (ins)
{
- case INS_fst:
- case INS_fstp:
- case INS_faddp:
- case INS_fsubp:
- case INS_fsubrp:
- case INS_fmulp:
- case INS_fdivp:
- case INS_fdivrp:
- case INS_fcompp:
- assert(!"don't do this. Do you want to use inst_FS() instead?");
- break;
- default:
- break;
+ case INS_fst:
+ case INS_fstp:
+ case INS_faddp:
+ case INS_fsubp:
+ case INS_fsubrp:
+ case INS_fmulp:
+ case INS_fdivp:
+ case INS_fdivrp:
+ case INS_fcompp:
+ assert(!"don't do this. Do you want to use inst_FS() instead?");
+ break;
+ default:
+ break;
}
#endif // DEBUG
@@ -456,34 +526,29 @@ void CodeGenInterface::inst_FN(instruction ins, unsigned stk)
}
#endif // FEATURE_STACK_FP_X87
-
/*****************************************************************************
*
* Display a stack frame reference.
*/
-void CodeGen::inst_set_SV_var(GenTreePtr tree)
+void CodeGen::inst_set_SV_var(GenTreePtr tree)
{
-#ifdef DEBUG
+#ifdef DEBUG
assert(tree && (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_STORE_LCL_VAR));
assert(tree->gtLclVarCommon.gtLclNum < compiler->lvaCount);
getEmitter()->emitVarRefOffs = tree->gtLclVar.gtLclILoffs;
-#endif//DEBUG
+#endif // DEBUG
}
-
/*****************************************************************************
*
* Generate a "op reg, icon" instruction.
*/
-void CodeGen::inst_RV_IV(instruction ins,
- regNumber reg,
- ssize_t val,
- emitAttr size,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void CodeGen::inst_RV_IV(
+ instruction ins, regNumber reg, ssize_t val, emitAttr size, insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
#if !defined(_TARGET_64BIT_)
assert(size != EA_8BYTE);
@@ -493,7 +558,7 @@ void CodeGen::inst_RV_IV(instruction ins,
if (arm_Valid_Imm_For_Instr(ins, val, flags))
{
getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
- }
+ }
else if (ins == INS_mov)
{
instGen_Set_Reg_To_Imm(size, reg, val);
@@ -503,7 +568,7 @@ void CodeGen::inst_RV_IV(instruction ins,
#ifndef LEGACY_BACKEND
// TODO-Cleanup: Add a comment about why this is unreached() for RyuJIT backend.
unreached();
-#else //LEGACY_BACKEND
+#else // LEGACY_BACKEND
regNumber tmpReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
instGen_Set_Reg_To_Imm(size, tmpReg, val);
getEmitter()->emitIns_R_R(ins, size, reg, tmpReg, flags);
@@ -529,7 +594,7 @@ void CodeGen::inst_RV_IV(instruction ins,
{
#ifndef LEGACY_BACKEND
assert(!"Invalid immediate for inst_RV_IV");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
// We can't fit the immediate into this instruction, so move it into
// a register first
regNumber tmpReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
@@ -552,7 +617,6 @@ void CodeGen::inst_RV_IV(instruction ins,
#endif // !_TARGET_ARM_
}
-
#if defined(LEGACY_BACKEND)
/*****************************************************************************
* Figure out the operands to address the tree.
@@ -563,11 +627,7 @@ void CodeGen::inst_RV_IV(instruction ins,
* On return, *baseReg, *indScale, *indReg, and *cns are set.
*/
-void CodeGen::instGetAddrMode(GenTreePtr addr,
- regNumber* baseReg,
- unsigned* indScale,
- regNumber* indReg,
- unsigned* cns)
+void CodeGen::instGetAddrMode(GenTreePtr addr, regNumber* baseReg, unsigned* indScale, regNumber* indReg, unsigned* cns)
{
if (addr->gtOper == GT_ARR_ELEM)
{
@@ -589,8 +649,8 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
}
else if (addr->gtOper == GT_LEA)
{
- GenTreeAddrMode * lea = addr->AsAddrMode();
- GenTreePtr base = lea->Base();
+ GenTreeAddrMode* lea = addr->AsAddrMode();
+ GenTreePtr base = lea->Base();
assert(!base || (base->gtFlags & GTF_REG_VAL));
GenTreePtr index = lea->Index();
assert(!index || (index->gtFlags & GTF_REG_VAL));
@@ -605,22 +665,16 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
{
/* Figure out what complex address mode to use */
- GenTreePtr rv1 = NULL;
- GenTreePtr rv2 = NULL;
- bool rev = false;
+ GenTreePtr rv1 = NULL;
+ GenTreePtr rv2 = NULL;
+ bool rev = false;
INDEBUG(bool yes =)
- genCreateAddrMode(addr,
- -1,
- true,
- RBM_NONE,
- &rev,
- &rv1,
- &rv2,
+ genCreateAddrMode(addr, -1, true, RBM_NONE, &rev, &rv1, &rv2,
#if SCALED_ADDR_MODES
- indScale,
+ indScale,
#endif
- cns);
+ cns);
assert(yes); // // since we have called genMakeAddressable() on addr
// Ensure that the base and index, if used, are in registers.
@@ -637,7 +691,7 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
}
assert(rv1->gtFlags & GTF_REG_VAL);
}
- if (rv2 && ((rv2->gtFlags & GTF_REG_VAL) == 0))
+ if (rv2 && ((rv2->gtFlags & GTF_REG_VAL) == 0))
{
if (rv2->gtFlags & GTF_SPILLED)
{
@@ -659,11 +713,10 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
}
*baseReg = rv1 ? rv1->gtRegNum : REG_NA;
- * indReg = rv2 ? rv2->gtRegNum : REG_NA;
+ *indReg = rv2 ? rv2->gtRegNum : REG_NA;
}
}
-
#if CPU_LOAD_STORE_ARCH
/*****************************************************************************
*
@@ -671,7 +724,7 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
* For a Load/Store arch we generate the 1-8 instructions necessary to
* implement the single addressing mode instruction used on x86.
* We currently don't have an instruction scheduler enabled on any target.
- *
+ *
* [Schedule] an "ins reg, [r/m]" (rdst=true), or "ins [r/m], reg" (rdst=false)
* instruction (the r/m operand given by a tree). We also allow instructions
* of the form "ins [r/m], icon", these are signaled by setting 'cons' to
@@ -680,9 +733,9 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
* The longest instruction sequence emitted on the ARM is as follows:
*
* - the "addr" represents an array addressing mode,
- * with a baseReg, indReg with a shift and a large offset
+ * with a baseReg, indReg with a shift and a large offset
* (Note that typically array addressing modes do NOT have a large offset)
- * - "ins" is an ALU instruction,
+ * - "ins" is an ALU instruction,
* - cons=true, and imm is a large constant that can not be directly encoded with "ins"
* - We may need to grab upto four additional registers: regT, rtegVal, regOffs and regImm
*
@@ -697,15 +750,15 @@ void CodeGen::instGetAddrMode(GenTreePtr addr,
*
*/
-void CodeGen::sched_AM(instruction ins,
- emitAttr size,
- regNumber ireg,
- bool rdst,
- GenTreePtr addr,
- unsigned offs,
- bool cons,
- int imm,
- insFlags flags)
+void CodeGen::sched_AM(instruction ins,
+ emitAttr size,
+ regNumber ireg,
+ bool rdst,
+ GenTreePtr addr,
+ unsigned offs,
+ bool cons,
+ int imm,
+ insFlags flags)
{
assert(addr);
assert(size != EA_UNKNOWN);
@@ -722,7 +775,7 @@ void CodeGen::sched_AM(instruction ins,
if (ins == INS_lea)
{
insType = eIT_Lea;
- ins = INS_add;
+ ins = INS_add;
}
else if (getEmitter()->emitInsIsLoad(ins))
{
@@ -730,15 +783,14 @@ void CodeGen::sched_AM(instruction ins,
}
else if (getEmitter()->emitInsIsStore(ins))
{
- insType = eIT_Store;
+ insType = eIT_Store;
}
-
- regNumber baseReg = REG_NA;
- regNumber indReg = REG_NA;
- unsigned indScale = 0;
- regMaskTP avoidMask = RBM_NONE;
-
+ regNumber baseReg = REG_NA;
+ regNumber indReg = REG_NA;
+ unsigned indScale = 0;
+
+ regMaskTP avoidMask = RBM_NONE;
if (addr->gtFlags & GTF_REG_VAL)
{
@@ -802,28 +854,26 @@ void CodeGen::sched_AM(instruction ins,
}
}
- unsigned shift = (indScale > 0) ? genLog2((unsigned) indScale) : 0;
-
- regNumber regT = REG_NA; // the register where the address is computed into
- regNumber regOffs = REG_NA; // a temporary register to use for the offs when it can't be directly encoded
- regNumber regImm = REG_NA; // a temporary register to use for the imm when it can't be directly encoded
- regNumber regVal = REG_NA; // a temporary register to use when we have to do a load/modify/store operation
+ unsigned shift = (indScale > 0) ? genLog2((unsigned)indScale) : 0;
+
+ regNumber regT = REG_NA; // the register where the address is computed into
+ regNumber regOffs = REG_NA; // a temporary register to use for the offs when it can't be directly encoded
+ regNumber regImm = REG_NA; // a temporary register to use for the imm when it can't be directly encoded
+ regNumber regVal = REG_NA; // a temporary register to use when we have to do a load/modify/store operation
// Setup regT
if (indReg == REG_NA)
{
- regT = baseReg; // We can use the baseReg, regT is read-only
+ regT = baseReg; // We can use the baseReg, regT is read-only
}
- else // We have an index register (indReg != REG_NA)
+ else // We have an index register (indReg != REG_NA)
{
// Check for special case that we can encode using one instruction
- if ((offs == 0) && (insType != eIT_Other) && !instIsFP(ins)
- && baseReg != REG_NA)
+ if ((offs == 0) && (insType != eIT_Other) && !instIsFP(ins) && baseReg != REG_NA)
{
// ins ireg, [baseReg + indReg << shift]
- getEmitter()->emitIns_R_R_R_I(ins, size, ireg, baseReg, indReg, shift,
- flags, INS_OPTS_LSL);
- return;
+ getEmitter()->emitIns_R_R_R_I(ins, size, ireg, baseReg, indReg, shift, flags, INS_OPTS_LSL);
+ return;
}
// Otherwise setup regT, regT is written once here
@@ -845,30 +895,26 @@ void CodeGen::sched_AM(instruction ins,
if (baseReg == REG_NA)
{
assert(shift > 0);
- // LSL regT, indReg, shift.
- getEmitter()->emitIns_R_R_I(
- INS_lsl,
- EA_PTRSIZE,
- regT, indReg, shift & ((TARGET_POINTER_SIZE * 8) - 1));
+ // LSL regT, indReg, shift.
+ getEmitter()->emitIns_R_R_I(INS_lsl, EA_PTRSIZE, regT, indReg, shift & ((TARGET_POINTER_SIZE * 8) - 1));
}
else
#endif // SCALED_ADDR_MODES
{
assert(baseReg != REG_NA);
- // add regT, baseReg, indReg<<shift.
- getEmitter()->emitIns_R_R_R_I(
- INS_add,
- // The "add" operation will yield either a pointer or byref, depending on the type of "addr."
- varTypeIsGC(addr->TypeGet()) ? EA_BYREF : EA_PTRSIZE,
- regT, baseReg, indReg, shift,
- INS_FLAGS_NOT_SET, INS_OPTS_LSL);
+ // add regT, baseReg, indReg<<shift.
+ getEmitter()->emitIns_R_R_R_I(INS_add,
+ // The "add" operation will yield either a pointer or byref, depending on the
+ // type of "addr."
+ varTypeIsGC(addr->TypeGet()) ? EA_BYREF : EA_PTRSIZE, regT, baseReg, indReg,
+ shift, INS_FLAGS_NOT_SET, INS_OPTS_LSL);
}
}
// regT is the base register for a load/store or an operand for add when insType is eIT_Lea
//
- assert (regT != REG_NA);
+ assert(regT != REG_NA);
avoidMask |= genRegMask(regT);
if (insType != eIT_Other)
@@ -876,7 +922,7 @@ void CodeGen::sched_AM(instruction ins,
assert((flags != INS_FLAGS_SET) || (insType == eIT_Lea));
if ((insType == eIT_Lea) && (offs == 0))
{
- // If we have the same register as src and dst and we do not need to set the flags
+ // If we have the same register as src and dst and we do not need to set the flags
// then we can skip emitting the instruction
if ((ireg != regT) || (flags == INS_FLAGS_SET))
{
@@ -887,7 +933,7 @@ void CodeGen::sched_AM(instruction ins,
else if (arm_Valid_Imm_For_Instr(ins, offs, flags))
{
// ins ireg, [regT + offs]
- getEmitter()->emitIns_R_R_I(ins, size, ireg, regT, offs, flags);
+ getEmitter()->emitIns_R_R_I(ins, size, ireg, regT, offs, flags);
}
else
{
@@ -931,32 +977,32 @@ void CodeGen::sched_AM(instruction ins,
regVal = regSet.rsPickReg(RBM_ALLINT & ~avoidMask);
regTracker.rsTrackRegTrash(regVal);
avoidMask |= genRegMask(regVal);
- var_types load_store_type;
+ var_types load_store_type;
switch (size)
{
- case EA_4BYTE:
- load_store_type = TYP_INT;
- break;
+ case EA_4BYTE:
+ load_store_type = TYP_INT;
+ break;
- case EA_2BYTE:
- load_store_type = TYP_SHORT;
- break;
+ case EA_2BYTE:
+ load_store_type = TYP_SHORT;
+ break;
- case EA_1BYTE:
- load_store_type = TYP_BYTE;
- break;
+ case EA_1BYTE:
+ load_store_type = TYP_BYTE;
+ break;
- default:
- assert(!"Unexpected size in sched_AM, eIT_Other");
- load_store_type = TYP_INT;
- break;
+ default:
+ assert(!"Unexpected size in sched_AM, eIT_Other");
+ load_store_type = TYP_INT;
+ break;
}
// Load the content at addr into regVal using regT + offs
if (arm_Valid_Disp_For_LdSt(offs, load_store_type))
{
// ldrX regVal, [regT + offs]
- getEmitter()->emitIns_R_R_I(ins_Load(load_store_type), size, regVal, regT, offs);
+ getEmitter()->emitIns_R_R_I(ins_Load(load_store_type), size, regVal, regT, offs);
}
else
{
@@ -967,16 +1013,16 @@ void CodeGen::sched_AM(instruction ins,
instGen_Set_Reg_To_Imm(EA_4BYTE, regOffs, offs);
getEmitter()->emitIns_R_R_R(ins_Load(load_store_type), size, regVal, regT, regOffs);
}
-
+
if (cons)
{
if (arm_Valid_Imm_For_Instr(ins, imm, flags))
{
- getEmitter()->emitIns_R_I(ins, size, regVal, imm, flags);
+ getEmitter()->emitIns_R_I(ins, size, regVal, imm, flags);
}
else
{
- assert (regOffs == REG_NA);
+ assert(regOffs == REG_NA);
regImm = regSet.rsPickFreeReg(RBM_ALLINT & ~avoidMask);
avoidMask |= genRegMask(regImm);
instGen_Set_Reg_To_Imm(size, regImm, imm);
@@ -991,7 +1037,7 @@ void CodeGen::sched_AM(instruction ins,
{
getEmitter()->emitIns_R_R(ins, size, regVal, ireg, flags);
}
-
+
// If we do not have a register destination we must perform the write-back store instruction
// (unless we have an instruction like INS_cmp that does not write a destination)
//
@@ -1001,7 +1047,7 @@ void CodeGen::sched_AM(instruction ins,
if (regOffs == REG_NA)
{
// strX regVal, [regT + offs]
- getEmitter()->emitIns_R_R_I(ins_Store(load_store_type), size, regVal, regT, offs);
+ getEmitter()->emitIns_R_R_I(ins_Store(load_store_type), size, regVal, regT, offs);
}
else
{
@@ -1018,22 +1064,22 @@ void CodeGen::sched_AM(instruction ins,
*
* This is somewhat specific to the x86 instrution format.
* We currently don't have an instruction scheduler enabled on any target.
- *
+ *
* [Schedule] an "ins reg, [r/m]" (rdst=true), or "ins [r/m], reg" (rdst=false)
* instruction (the r/m operand given by a tree). We also allow instructions
* of the form "ins [r/m], icon", these are signalled by setting 'cons' to
* true.
*/
-void CodeGen::sched_AM(instruction ins,
- emitAttr size,
- regNumber ireg,
- bool rdst,
- GenTreePtr addr,
- unsigned offs,
- bool cons,
- int imm,
- insFlags flags)
+void CodeGen::sched_AM(instruction ins,
+ emitAttr size,
+ regNumber ireg,
+ bool rdst,
+ GenTreePtr addr,
+ unsigned offs,
+ bool cons,
+ int imm,
+ insFlags flags)
{
#ifdef _TARGET_XARCH_
/* Don't use this method for issuing calls. Use instEmit_xxxCall() */
@@ -1043,13 +1089,12 @@ void CodeGen::sched_AM(instruction ins,
assert(addr);
assert(size != EA_UNKNOWN);
- regNumber reg;
+ regNumber reg;
/* Has the address been conveniently loaded into a register,
or is it an absolute value ? */
- if ((addr->gtFlags & GTF_REG_VAL) ||
- (addr->IsCnsIntOrI()))
+ if ((addr->gtFlags & GTF_REG_VAL) || (addr->IsCnsIntOrI()))
{
if (addr->gtFlags & GTF_REG_VAL)
{
@@ -1057,12 +1102,12 @@ void CodeGen::sched_AM(instruction ins,
reg = addr->gtRegNum;
- if (cons)
- getEmitter()->emitIns_I_AR (ins, size, imm, reg, offs);
+ if (cons)
+ getEmitter()->emitIns_I_AR(ins, size, imm, reg, offs);
else if (rdst)
- getEmitter()->emitIns_R_AR (ins, size, ireg, reg, offs);
+ getEmitter()->emitIns_R_AR(ins, size, ireg, reg, offs);
else
- getEmitter()->emitIns_AR_R (ins, size, ireg, reg, offs);
+ getEmitter()->emitIns_AR_R(ins, size, ireg, reg, offs);
}
else
{
@@ -1080,19 +1125,18 @@ void CodeGen::sched_AM(instruction ins,
assert(offs <= 4);
}
#endif
- reg = REG_NA;
+ reg = REG_NA;
ssize_t disp = addr->gtIntCon.gtIconVal + offs;
// Cross our fingers and hope the codegenerator did the right
// thing and the constant address can be RIP-relative
- if (cons)
- getEmitter()->emitIns_I_AI (ins, size, imm, disp);
+ if (cons)
+ getEmitter()->emitIns_I_AI(ins, size, imm, disp);
else if (rdst)
- getEmitter()->emitIns_R_AI (ins, size, ireg, disp);
+ getEmitter()->emitIns_R_AI(ins, size, ireg, disp);
else
- getEmitter()->emitIns_AI_R (ins, size, ireg, disp);
-
+ getEmitter()->emitIns_AI_R(ins, size, ireg, disp);
}
return;
@@ -1100,8 +1144,8 @@ void CodeGen::sched_AM(instruction ins,
/* Figure out what complex address mode to use */
- regNumber baseReg, indReg;
- unsigned indScale = 0, cns = 0;
+ regNumber baseReg, indReg;
+ unsigned indScale = 0, cns = 0;
instGetAddrMode(addr, &baseReg, &indScale, &indReg, &cns);
@@ -1111,22 +1155,22 @@ void CodeGen::sched_AM(instruction ins,
/* Is there an index reg operand? */
- if (indReg != REG_NA)
+ if (indReg != REG_NA)
{
/* Is the index reg operand scaled? */
- if (indScale)
+ if (indScale)
{
/* Is there a base address operand? */
- if (baseReg != REG_NA)
+ if (baseReg != REG_NA)
{
reg = baseReg;
/* The address is "[reg + {2/4/8} * indReg + offs]" */
- if (cons)
- getEmitter()->emitIns_I_ARX(ins, size, imm, reg, indReg, indScale, offs);
+ if (cons)
+ getEmitter()->emitIns_I_ARX(ins, size, imm, reg, indReg, indScale, offs);
else if (rdst)
getEmitter()->emitIns_R_ARX(ins, size, ireg, reg, indReg, indScale, offs);
else
@@ -1136,12 +1180,12 @@ void CodeGen::sched_AM(instruction ins,
{
/* The address is "[{2/4/8} * indReg + offs]" */
- if (cons)
- getEmitter()->emitIns_I_AX (ins, size, imm, indReg, indScale, offs);
+ if (cons)
+ getEmitter()->emitIns_I_AX(ins, size, imm, indReg, indScale, offs);
else if (rdst)
- getEmitter()->emitIns_R_AX (ins, size, ireg, indReg, indScale, offs);
+ getEmitter()->emitIns_R_AX(ins, size, ireg, indReg, indScale, offs);
else
- getEmitter()->emitIns_AX_R (ins, size, ireg, indReg, indScale, offs);
+ getEmitter()->emitIns_AX_R(ins, size, ireg, indReg, indScale, offs);
}
}
else
@@ -1150,8 +1194,8 @@ void CodeGen::sched_AM(instruction ins,
reg = baseReg;
/* The address is "[reg + indReg + offs]" */
- if (cons)
- getEmitter()->emitIns_I_ARR(ins, size, imm, reg, indReg, offs);
+ if (cons)
+ getEmitter()->emitIns_I_ARR(ins, size, imm, reg, indReg, offs);
else if (rdst)
getEmitter()->emitIns_R_ARR(ins, size, ireg, reg, indReg, offs);
else
@@ -1160,14 +1204,15 @@ void CodeGen::sched_AM(instruction ins,
}
else
{
- unsigned cpx = 0;
- CORINFO_CLASS_HANDLE cls = 0;
+ unsigned cpx = 0;
+ CORINFO_CLASS_HANDLE cls = 0;
/* No second operand: the address is "[reg + icon]" */
- assert(baseReg != REG_NA); reg = baseReg;
+ assert(baseReg != REG_NA);
+ reg = baseReg;
-#ifdef LATE_DISASM
+#ifdef LATE_DISASM
/*
Keep in mind that non-static data members (GT_FIELD nodes) were
transformed into GT_IND nodes - we keep the CLS/CPX information
@@ -1175,9 +1220,8 @@ void CodeGen::sched_AM(instruction ins,
class member
*/
- if (addr->gtOper != GT_LEA &&
- (addr->gtOp.gtOp2->gtOper == GT_CNS_INT) &&
- addr->gtOp.gtOp2->IsIconHandle(GTF_ICON_FIELD_HDL))
+ if (addr->gtOper != GT_LEA && (addr->gtOp.gtOp2->gtOper == GT_CNS_INT) &&
+ addr->gtOp.gtOp2->IsIconHandle(GTF_ICON_FIELD_HDL))
{
/* This is a field offset - set the CPX/CLS values to emit a fixup */
@@ -1186,9 +1230,9 @@ void CodeGen::sched_AM(instruction ins,
}
#endif
- if (cons)
+ if (cons)
{
- getEmitter()->emitIns_I_AR(ins, size, imm, reg, offs, cpx, cls);
+ getEmitter()->emitIns_I_AR(ins, size, imm, reg, offs, cpx, cls);
}
else if (rdst)
{
@@ -1197,7 +1241,7 @@ void CodeGen::sched_AM(instruction ins,
else
{
getEmitter()->emitIns_AR_R(ins, size, ireg, reg, offs, cpx, cls);
- }
+ }
}
}
@@ -1209,21 +1253,20 @@ void CodeGen::sched_AM(instruction ins,
* Emit a "call [r/m]" instruction (the r/m operand given by a tree).
*/
-void CodeGen::instEmit_indCall(GenTreePtr call,
- size_t argSize,
- emitAttr retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
+void CodeGen::instEmit_indCall(GenTreePtr call,
+ size_t argSize,
+ emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize))
{
- GenTreePtr addr;
+ GenTreePtr addr;
- emitter::EmitCallType emitCallType;
+ emitter::EmitCallType emitCallType;
- regNumber brg = REG_NA;
- regNumber xrg = REG_NA;
- unsigned mul = 0;
- unsigned cns = 0;
+ regNumber brg = REG_NA;
+ regNumber xrg = REG_NA;
+ unsigned mul = 0;
+ unsigned cns = 0;
- CORINFO_SIG_INFO* sigInfo = nullptr;
+ CORINFO_SIG_INFO* sigInfo = nullptr;
assert(call->gtOper == GT_CALL);
@@ -1243,22 +1286,17 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
emitCallType = emitter::EC_INDIR_R;
- if (!addr->OperIsIndir())
+ if (!addr->OperIsIndir())
{
if (!(addr->gtFlags & GTF_REG_VAL) && (addr->OperGet() == GT_CNS_INT))
{
- ssize_t funcPtr = addr->gtIntCon.gtIconVal;
+ ssize_t funcPtr = addr->gtIntCon.gtIconVal;
getEmitter()->emitIns_Call(emitter::EC_FUNC_ADDR,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- (void*) funcPtr,
- argSize,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
+ NULL, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo)(void*) funcPtr, argSize,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
return;
}
}
@@ -1267,10 +1305,9 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
/* Get hold of the address of the function pointer */
addr = addr->gtOp.gtOp1;
-
}
- if (addr->gtFlags & GTF_REG_VAL)
+ if (addr->gtFlags & GTF_REG_VAL)
{
/* The address is "reg" */
@@ -1288,17 +1325,16 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
brg = addr->gtRegNum;
}
-
#else // CPU_LOAD_STORE_ARCH
/* Is there an indirection? */
- if (!addr->OperIsIndir())
+ if (!addr->OperIsIndir())
{
if (addr->gtFlags & GTF_REG_VAL)
{
emitCallType = emitter::EC_INDIR_R;
- brg = addr->gtRegNum;
+ brg = addr->gtRegNum;
}
else
{
@@ -1307,22 +1343,17 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
assert(addr->OperGet() == GT_LCL_VAR);
emitCallType = emitter::EC_INDIR_SR;
- cns = addr->gtLclVarCommon.gtLclNum;
+ cns = addr->gtLclVarCommon.gtLclNum;
}
else
{
- ssize_t funcPtr = addr->gtIntCon.gtIconVal;
+ ssize_t funcPtr = addr->gtIntCon.gtIconVal;
getEmitter()->emitIns_Call(emitter::EC_FUNC_ADDR,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- (void*) funcPtr,
- argSize,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur);
+ nullptr, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo)(void*) funcPtr, argSize,
+ retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur);
return;
}
}
@@ -1339,7 +1370,7 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
/* Has the address been conveniently loaded into a register? */
- if (addr->gtFlags & GTF_REG_VAL)
+ if (addr->gtFlags & GTF_REG_VAL)
{
/* The address is "reg" */
@@ -1347,28 +1378,30 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
}
else
{
- bool rev = false;
+ bool rev = false;
- GenTreePtr rv1 = NULL;
- GenTreePtr rv2 = NULL;
+ GenTreePtr rv1 = nullptr;
+ GenTreePtr rv2 = nullptr;
/* Figure out what complex address mode to use */
INDEBUG(bool yes =)
- genCreateAddrMode(addr, -1, true, RBM_NONE, &rev, &rv1, &rv2, &mul, &cns);
-
+ genCreateAddrMode(addr, -1, true, RBM_NONE, &rev, &rv1, &rv2, &mul, &cns);
+
INDEBUG(PREFIX_ASSUME(yes)); // since we have called genMakeAddressable() on call->gtCall.gtCallAddr
/* Get the additional operands if any */
- if (rv1)
+ if (rv1)
{
- assert(rv1->gtFlags & GTF_REG_VAL); brg = rv1->gtRegNum;
+ assert(rv1->gtFlags & GTF_REG_VAL);
+ brg = rv1->gtRegNum;
}
- if (rv2)
+ if (rv2)
{
- assert(rv2->gtFlags & GTF_REG_VAL); xrg = rv2->gtRegNum;
+ assert(rv2->gtFlags & GTF_REG_VAL);
+ xrg = rv2->gtRegNum;
}
}
}
@@ -1379,20 +1412,13 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
#endif // CPU_LOAD_STORE_ARCH
getEmitter()->emitIns_Call(emitCallType,
- NULL, // methHnd
- INDEBUG_LDISASM_COMMA(sigInfo)
- NULL, // addr
- argSize,
- retSize
- MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
- gcInfo.gcVarPtrSetCur,
- gcInfo.gcRegGCrefSetCur,
- gcInfo.gcRegByrefSetCur,
- BAD_IL_OFFSET, // ilOffset
- brg,
- xrg,
- mul,
- cns); // addressing mode values
+ nullptr, // methHnd
+ INDEBUG_LDISASM_COMMA(sigInfo) nullptr, // addr
+ argSize, retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
+ gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur, gcInfo.gcRegByrefSetCur,
+ BAD_IL_OFFSET, // ilOffset
+ brg, xrg, mul,
+ cns); // addressing mode values
}
#ifdef LEGACY_BACKEND
@@ -1401,12 +1427,9 @@ void CodeGen::instEmit_indCall(GenTreePtr call,
* Emit an "op [r/m]" instruction (the r/m operand given by a tree).
*/
-void CodeGen::instEmit_RM(instruction ins,
- GenTreePtr tree,
- GenTreePtr addr,
- unsigned offs)
+void CodeGen::instEmit_RM(instruction ins, GenTreePtr tree, GenTreePtr addr, unsigned offs)
{
- emitAttr size;
+ emitAttr size;
if (!instIsFP(ins))
size = emitTypeSize(tree->TypeGet());
@@ -1421,11 +1444,7 @@ void CodeGen::instEmit_RM(instruction ins,
* Emit an "op [r/m], reg" instruction (the r/m operand given by a tree).
*/
-void CodeGen::instEmit_RM_RV(instruction ins,
- emitAttr size,
- GenTreePtr tree,
- regNumber reg,
- unsigned offs)
+void CodeGen::instEmit_RM_RV(instruction ins, emitAttr size, GenTreePtr tree, regNumber reg, unsigned offs)
{
#ifdef _TARGET_XARCH_
assert(instIsFP(ins) == 0);
@@ -1440,11 +1459,7 @@ void CodeGen::instEmit_RM_RV(instruction ins,
* been made addressable).
*/
-void CodeGen::inst_TT(instruction ins,
- GenTreePtr tree,
- unsigned offs,
- int shfv,
- emitAttr size)
+void CodeGen::inst_TT(instruction ins, GenTreePtr tree, unsigned offs, int shfv, emitAttr size)
{
bool sizeInferred = false;
@@ -1452,18 +1467,22 @@ void CodeGen::inst_TT(instruction ins,
{
sizeInferred = true;
if (instIsFP(ins))
+ {
size = EA_ATTR(genTypeSize(tree->TypeGet()));
+ }
else
+ {
size = emitTypeSize(tree->TypeGet());
+ }
}
AGAIN:
/* Is the value sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
- regNumber reg;
+ regNumber reg;
#ifndef _TARGET_64BIT_
#ifdef LEGACY_BACKEND
@@ -1475,7 +1494,7 @@ AGAIN:
/* Is this a floating-point instruction? */
- if (isFloatRegType(tree->gtType))
+ if (isFloatRegType(tree->gtType))
{
reg = tree->gtRegNum;
@@ -1490,9 +1509,9 @@ AGAIN:
assert(!instIsFP(ins));
#if CPU_LONG_USES_REGPAIR
- if (tree->gtType == TYP_LONG)
+ if (tree->gtType == TYP_LONG)
{
- if (offs)
+ if (offs)
{
assert(offs == sizeof(int));
reg = genRegPairHi(tree->gtRegPair);
@@ -1510,7 +1529,7 @@ AGAIN:
/* Make sure it is not the "stack-half" of an enregistered long */
- if (reg != REG_STK)
+ if (reg != REG_STK)
{
// For short types, indicate that the value is promoted to 4 bytes.
// For longs, we are only emitting half of it so again set it to 4 bytes.
@@ -1520,10 +1539,14 @@ AGAIN:
size = EA_SET_SIZE(size, 4);
}
- if (shfv)
+ if (shfv)
+ {
getEmitter()->emitIns_R_I(ins, size, reg, shfv);
+ }
else
+ {
inst_RV(ins, reg, tree->TypeGet(), size);
+ }
return;
}
@@ -1531,98 +1554,105 @@ AGAIN:
/* Is this a spilled value? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
}
switch (tree->gtOper)
{
- unsigned varNum;
+ unsigned varNum;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
#ifdef LEGACY_BACKEND
- /* Is this an enregistered long ? */
+ /* Is this an enregistered long ? */
- if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
- {
- /* Avoid infinite loop */
+ if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
+ {
+ /* Avoid infinite loop */
- if (genMarkLclVar(tree))
- goto LONGREG_TT;
- }
+ if (genMarkLclVar(tree))
+ goto LONGREG_TT;
+ }
#endif // LEGACY_BACKEND
- inst_set_SV_var(tree);
- goto LCL;
+ inst_set_SV_var(tree);
+ goto LCL;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- offs += tree->gtLclFld.gtLclOffs;
- goto LCL;
+ offs += tree->gtLclFld.gtLclOffs;
+ goto LCL;
- LCL:
- varNum = tree->gtLclVarCommon.gtLclNum; assert(varNum < compiler->lvaCount);
+ LCL:
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(varNum < compiler->lvaCount);
- if (shfv)
- getEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
- else
- getEmitter()->emitIns_S (ins, size, varNum, offs);
+ if (shfv)
+ {
+ getEmitter()->emitIns_S_I(ins, size, varNum, offs, shfv);
+ }
+ else
+ {
+ getEmitter()->emitIns_S(ins, size, varNum, offs);
+ }
- return;
+ return;
- case GT_CLS_VAR:
- // Make sure FP instruction size matches the operand size
- // (We optimized constant doubles to floats when we can, just want to
- // make sure that we don't mistakenly use 8 bytes when the
- // constant.
- assert(!isFloatRegType(tree->gtType) ||
- genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
+ case GT_CLS_VAR:
+ // Make sure FP instruction size matches the operand size
+ // (We optimized constant doubles to floats when we can, just want to
+ // make sure that we don't mistakenly use 8 bytes when the
+ // constant.
+ assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
- if (shfv)
- getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
- else
- getEmitter()->emitIns_C (ins, size, tree->gtClsVar.gtClsVarHnd, offs);
- return;
+ if (shfv)
+ {
+ getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, shfv);
+ }
+ else
+ {
+ getEmitter()->emitIns_C(ins, size, tree->gtClsVar.gtClsVarHnd, offs);
+ }
+ return;
- case GT_IND:
- case GT_NULLCHECK:
- case GT_ARR_ELEM:
+ case GT_IND:
+ case GT_NULLCHECK:
+ case GT_ARR_ELEM:
{
#ifndef LEGACY_BACKEND
assert(!"inst_TT not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM in !LEGACY_BACKEND");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
GenTreePtr addr = tree->OperIsIndir() ? tree->gtOp.gtOp1 : tree;
- if (shfv)
+ if (shfv)
sched_AM(ins, size, REG_NA, false, addr, offs, true, shfv);
else
- instEmit_RM(ins, tree, addr, offs);
+ instEmit_RM(ins, tree, addr, offs);
#endif // LEGACY_BACKEND
}
break;
#ifdef _TARGET_X86_
- case GT_CNS_INT:
- // We will get here for GT_MKREFANY from CodeGen::genPushArgList
- assert(offs == 0);
- assert(!shfv);
- if (tree->IsIconHandle())
- inst_IV_handle(ins, tree->gtIntCon.gtIconVal);
- else
- inst_IV(ins, tree->gtIntCon.gtIconVal);
- break;
+ case GT_CNS_INT:
+ // We will get here for GT_MKREFANY from CodeGen::genPushArgList
+ assert(offs == 0);
+ assert(!shfv);
+ if (tree->IsIconHandle())
+ inst_IV_handle(ins, tree->gtIntCon.gtIconVal);
+ else
+ inst_IV(ins, tree->gtIntCon.gtIconVal);
+ break;
#endif
- case GT_COMMA:
- // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
- tree = tree->gtOp.gtOp2;
- goto AGAIN;
+ case GT_COMMA:
+ // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
+ tree = tree->gtOp.gtOp2;
+ goto AGAIN;
- default:
- assert(!"invalid address");
+ default:
+ assert(!"invalid address");
}
-
}
/*****************************************************************************
@@ -1631,12 +1661,7 @@ AGAIN:
* been made addressable) and another that is a register.
*/
-void CodeGen::inst_TT_RV(instruction ins,
- GenTreePtr tree,
- regNumber reg,
- unsigned offs,
- emitAttr size,
- insFlags flags)
+void CodeGen::inst_TT_RV(instruction ins, GenTreePtr tree, regNumber reg, unsigned offs, emitAttr size, insFlags flags)
{
assert(reg != REG_STK);
@@ -1644,9 +1669,9 @@ AGAIN:
/* Is the value sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
- regNumber rg2;
+ regNumber rg2;
#ifdef _TARGET_64BIT_
assert(!instIsFP(ins));
@@ -1657,7 +1682,9 @@ AGAIN:
assert(rg2 != REG_STK);
if (ins != INS_mov || rg2 != reg)
+ {
inst_RV_RV(ins, rg2, reg, tree->TypeGet());
+ }
return;
#else // !_TARGET_64BIT_
@@ -1671,9 +1698,9 @@ AGAIN:
#endif
#if CPU_LONG_USES_REGPAIR
- if (tree->gtType == TYP_LONG)
+ if (tree->gtType == TYP_LONG)
{
- if (offs)
+ if (offs)
{
assert(offs == sizeof(int));
rg2 = genRegPairHi(tree->gtRegPair);
@@ -1689,19 +1716,19 @@ AGAIN:
rg2 = tree->gtRegNum;
}
- if (rg2 != REG_STK)
+ if (rg2 != REG_STK)
{
if (ins != INS_mov || rg2 != reg)
inst_RV_RV(ins, rg2, reg, tree->TypeGet(), size, flags);
return;
}
-
+
#endif // _TARGET_64BIT_
}
/* Is this a spilled value? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
}
@@ -1709,123 +1736,127 @@ AGAIN:
if (size == EA_UNKNOWN)
{
if (instIsFP(ins))
+ {
size = EA_ATTR(genTypeSize(tree->TypeGet()));
+ }
else
+ {
size = emitTypeSize(tree->TypeGet());
+ }
}
switch (tree->gtOper)
{
- unsigned varNum;
+ unsigned varNum;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
#ifdef LEGACY_BACKEND
- if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
- {
- /* Avoid infinite loop */
+ if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
+ {
+ /* Avoid infinite loop */
- if (genMarkLclVar(tree))
- goto LONGREG_TT_RV;
- }
+ if (genMarkLclVar(tree))
+ goto LONGREG_TT_RV;
+ }
#endif // LEGACY_BACKEND
- inst_set_SV_var(tree);
- goto LCL;
+ inst_set_SV_var(tree);
+ goto LCL;
- case GT_LCL_FLD:
- case GT_STORE_LCL_FLD:
- offs += tree->gtLclFld.gtLclOffs;
- goto LCL;
+ case GT_LCL_FLD:
+ case GT_STORE_LCL_FLD:
+ offs += tree->gtLclFld.gtLclOffs;
+ goto LCL;
- LCL:
+ LCL:
- varNum = tree->gtLclVarCommon.gtLclNum; assert(varNum < compiler->lvaCount);
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(varNum < compiler->lvaCount);
#if CPU_LOAD_STORE_ARCH
- if (!getEmitter()->emitInsIsStore(ins))
- {
+ if (!getEmitter()->emitInsIsStore(ins))
+ {
#ifndef LEGACY_BACKEND
- // TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
- // Either way, it is not currently being handled by Lowering.
- regNumber regTmp = tree->gtRegNum;
- assert(regTmp != REG_NA);
-#else // LEGACY_BACKEND
- regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
+ // TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
+ // Either way, it is not currently being handled by Lowering.
+ regNumber regTmp = tree->gtRegNum;
+ assert(regTmp != REG_NA);
+#else // LEGACY_BACKEND
+ regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
#endif // LEGACY_BACKEND
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- getEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
- getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
+ getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
+ getEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
+ getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
- regTracker.rsTrackRegTrash(regTmp);
- }
- else
+ regTracker.rsTrackRegTrash(regTmp);
+ }
+ else
#endif
- {
- // ins is a Store instruction
- //
- getEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
+ {
+ // ins is a Store instruction
+ //
+ getEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
#ifdef _TARGET_ARM_
- // If we need to set the flags then add an extra movs reg,reg instruction
- if (flags == INS_FLAGS_SET)
- getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
+ // If we need to set the flags then add an extra movs reg,reg instruction
+ if (flags == INS_FLAGS_SET)
+ getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
#endif
- }
- return;
+ }
+ return;
- case GT_CLS_VAR:
- // Make sure FP instruction size matches the operand size
- // (We optimized constant doubles to floats when we can, just want to
- // make sure that we don't mistakenly use 8 bytes when the
- // constant).
- assert(!isFloatRegType(tree->gtType) ||
- genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
+ case GT_CLS_VAR:
+ // Make sure FP instruction size matches the operand size
+ // (We optimized constant doubles to floats when we can, just want to
+ // make sure that we don't mistakenly use 8 bytes when the
+ // constant).
+ assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
#if CPU_LOAD_STORE_ARCH
- if (!getEmitter()->emitInsIsStore(ins))
- {
+ if (!getEmitter()->emitInsIsStore(ins))
+ {
#ifndef LEGACY_BACKEND
- NYI("Store of GT_CLS_VAR not supported for ARM RyuJIT Backend");
-#else // LEGACY_BACKEND
- regNumber regTmpAddr = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
- regNumber regTmpArith = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg) & ~genRegMask(regTmpAddr));
-
- getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, regTmpAddr, tree->gtClsVar.gtClsVarHnd, offs);
- getEmitter()->emitIns_R_R(ins_Load(tree->TypeGet()), size, regTmpArith, regTmpAddr);
- getEmitter()->emitIns_R_R(ins, size, regTmpArith, reg, flags);
- getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpArith, regTmpAddr);
-
- regTracker.rsTrackRegTrash(regTmpAddr);
- regTracker.rsTrackRegTrash(regTmpArith);
+ NYI("Store of GT_CLS_VAR not supported for ARM RyuJIT Backend");
+#else // LEGACY_BACKEND
+ regNumber regTmpAddr = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
+ regNumber regTmpArith = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg) & ~genRegMask(regTmpAddr));
+
+ getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, regTmpAddr, tree->gtClsVar.gtClsVarHnd, offs);
+ getEmitter()->emitIns_R_R(ins_Load(tree->TypeGet()), size, regTmpArith, regTmpAddr);
+ getEmitter()->emitIns_R_R(ins, size, regTmpArith, reg, flags);
+ getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpArith, regTmpAddr);
+
+ regTracker.rsTrackRegTrash(regTmpAddr);
+ regTracker.rsTrackRegTrash(regTmpArith);
#endif // LEGACY_BACKEND
- }
- else
+ }
+ else
#endif // CPU_LOAD_STORE_ARCH
- {
- getEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
- }
- return;
+ {
+ getEmitter()->emitIns_C_R(ins, size, tree->gtClsVar.gtClsVarHnd, reg, offs);
+ }
+ return;
- case GT_IND:
- case GT_NULLCHECK:
- case GT_ARR_ELEM:
+ case GT_IND:
+ case GT_NULLCHECK:
+ case GT_ARR_ELEM:
{
#ifndef LEGACY_BACKEND
assert(!"inst_TT_RV not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM in RyuJIT Backend");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
GenTreePtr addr = tree->OperIsIndir() ? tree->gtOp.gtOp1 : tree;
sched_AM(ins, size, reg, false, addr, offs, false, 0, flags);
#endif // LEGACY_BACKEND
}
break;
- case GT_COMMA:
- // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
- tree = tree->gtOp.gtOp2;
- goto AGAIN;
+ case GT_COMMA:
+ // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
+ tree = tree->gtOp.gtOp2;
+ goto AGAIN;
- default:
- assert(!"invalid address");
+ default:
+ assert(!"invalid address");
}
}
@@ -1835,13 +1866,13 @@ regNumber CodeGen::genGetZeroRegister()
#if REDUNDANT_LOAD
- // Is the constant already in some register?
+ // Is the constant already in some register?
zeroReg = regTracker.rsIconIsInReg(0);
#endif
#ifdef LEGACY_BACKEND
- if (zeroReg == REG_NA)
+ if (zeroReg == REG_NA)
{
regMaskTP freeMask = regSet.rsRegMaskFree();
@@ -1860,8 +1891,8 @@ regNumber CodeGen::genGetZeroRegister()
//
if ((freeMask & RBM_CALLEE_TRASH) != RBM_TMP_0)
freeMask &= ~RBM_TMP_0;
- zeroReg = regSet.rsGrabReg(freeMask); // PickReg in stress will pick 'random' registers
- // We want one in the freeMask set, so just use GrabReg
+ zeroReg = regSet.rsGrabReg(freeMask); // PickReg in stress will pick 'random' registers
+ // We want one in the freeMask set, so just use GrabReg
genSetRegToIcon(zeroReg, 0, TYP_INT);
}
}
@@ -1870,19 +1901,13 @@ regNumber CodeGen::genGetZeroRegister()
return zeroReg;
}
-
/*****************************************************************************
*
* Generate an instruction that has one operand given by a tree (which has
* been made addressable) and another that is an integer constant.
*/
#ifdef LEGACY_BACKEND
-void CodeGen::inst_TT_IV(instruction ins,
- GenTreePtr tree,
- ssize_t val,
- unsigned offs,
- emitAttr size,
- insFlags flags)
+void CodeGen::inst_TT_IV(instruction ins, GenTreePtr tree, ssize_t val, unsigned offs, emitAttr size, insFlags flags)
{
bool sizeInferred = false;
@@ -1899,23 +1924,23 @@ AGAIN:
/* Is the value sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
#ifndef _TARGET_64BIT_
-LONGREG_TT_IV:
+ LONGREG_TT_IV:
#endif
- regNumber reg;
+ regNumber reg;
assert(instIsFP(ins) == 0);
#if CPU_LONG_USES_REGPAIR
- if (tree->gtType == TYP_LONG)
+ if (tree->gtType == TYP_LONG)
{
- if (offs == 0)
+ if (offs == 0)
{
reg = genRegPairLo(tree->gtRegPair);
}
- else // offs == 4
+ else // offs == 4
{
assert(offs == sizeof(int));
reg = genRegPairHi(tree->gtRegPair);
@@ -1935,7 +1960,7 @@ LONGREG_TT_IV:
reg = tree->gtRegNum;
}
- if (reg != REG_STK)
+ if (reg != REG_STK)
{
// We always widen as part of enregistering,
// so a smaller tree in a register can be
@@ -1945,7 +1970,7 @@ LONGREG_TT_IV:
size = EA_SET_SIZE(size, EA_4BYTE);
}
- if ((ins == INS_mov) && !EA_IS_CNS_RELOC(size))
+ if ((ins == INS_mov) && !EA_IS_CNS_RELOC(size))
{
genSetRegToIcon(reg, val, tree->TypeGet(), flags);
}
@@ -1953,16 +1978,16 @@ LONGREG_TT_IV:
{
#if defined(_TARGET_XARCH_)
inst_RV_IV(ins, reg, val, size);
-#elif defined (_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
if (!EA_IS_CNS_RELOC(size) && arm_Valid_Imm_For_Instr(ins, val, flags))
{
getEmitter()->emitIns_R_I(ins, size, reg, val, flags);
}
- else // We need a scratch register
+ else // We need a scratch register
{
// Load imm into a register
regMaskTP usedMask;
- if (tree->gtType == TYP_LONG)
+ if (tree->gtType == TYP_LONG)
{
usedMask = genRegPairMask(tree->gtRegPair);
#if CPU_LOAD_STORE_ARCH
@@ -2001,8 +2026,8 @@ LONGREG_TT_IV:
regNumber zeroReg;
zeroReg = genGetZeroRegister();
-
- if (zeroReg != REG_NA)
+
+ if (zeroReg != REG_NA)
{
inst_TT_RV(INS_mov, tree, zeroReg, offs);
return;
@@ -2032,7 +2057,7 @@ LONGREG_TT_IV:
/* Is this a spilled value? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill], icon'");
}
@@ -2052,157 +2077,160 @@ LONGREG_TT_IV:
switch (tree->gtOper)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
- varNum = tree->gtLclVarCommon.gtLclNum; assert(varNum < compiler->lvaCount);
- offs += tree->gtLclFld.gtLclOffs;
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(varNum < compiler->lvaCount);
+ offs += tree->gtLclFld.gtLclOffs;
- goto LCL;
+ goto LCL;
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
#ifndef _TARGET_64BIT_
- /* Is this an enregistered long ? */
+ /* Is this an enregistered long ? */
- if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
- {
- /* Avoid infinite loop */
+ if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
+ {
+ /* Avoid infinite loop */
- if (genMarkLclVar(tree))
- goto LONGREG_TT_IV;
- }
+ if (genMarkLclVar(tree))
+ goto LONGREG_TT_IV;
+ }
#endif // !_TARGET_64BIT_
- inst_set_SV_var(tree);
+ inst_set_SV_var(tree);
- varNum = tree->gtLclVarCommon.gtLclNum; assert(varNum < compiler->lvaCount);
- varDsc = &compiler->lvaTable[varNum];
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(varNum < compiler->lvaCount);
+ varDsc = &compiler->lvaTable[varNum];
- // Fix the immediate by sign extending if needed
- if (size < EA_4BYTE && !varTypeIsUnsigned(varDsc->TypeGet()))
- {
- if (size == EA_1BYTE)
+ // Fix the immediate by sign extending if needed
+ if (size < EA_4BYTE && !varTypeIsUnsigned(varDsc->TypeGet()))
{
- if ((ival & 0x7f) != ival)
- ival = ival | 0xffffff00;
+ if (size == EA_1BYTE)
+ {
+ if ((ival & 0x7f) != ival)
+ ival = ival | 0xffffff00;
+ }
+ else
+ {
+ assert(size == EA_2BYTE);
+ if ((ival & 0x7fff) != ival)
+ ival = ival | 0xffff0000;
+ }
}
- else
+
+ // A local stack slot is at least 4 bytes in size, regardles of
+ // what the local var is typed as, so auto-promote it here
+ // unless the codegenerator told us a size, or it is a field
+ // of a promoted struct
+ if (sizeInferred && (size < EA_4BYTE) && !varDsc->lvIsStructField)
{
- assert(size == EA_2BYTE);
- if ((ival & 0x7fff) != ival)
- ival = ival | 0xffff0000;
+ size = EA_SET_SIZE(size, EA_4BYTE);
}
- }
- // A local stack slot is at least 4 bytes in size, regardles of
- // what the local var is typed as, so auto-promote it here
- // unless the codegenerator told us a size, or it is a field
- // of a promoted struct
- if (sizeInferred && (size < EA_4BYTE) && !varDsc->lvIsStructField)
- {
- size = EA_SET_SIZE(size, EA_4BYTE);
- }
-
- LCL:
+ LCL:
- /* Integer instructions never operate on more than EA_PTRSIZE */
+ /* Integer instructions never operate on more than EA_PTRSIZE */
- assert(instIsFP(ins) == false);
+ assert(instIsFP(ins) == false);
#if CPU_LOAD_STORE_ARCH
- if (!getEmitter()->emitInsIsStore(ins))
- {
- regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT);
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- regTracker.rsTrackRegTrash(regTmp);
-
- if (arm_Valid_Imm_For_Instr(ins, val, flags))
- {
- getEmitter()->emitIns_R_I(ins, size, regTmp, ival, flags);
- }
- else // We need a scratch register
+ if (!getEmitter()->emitInsIsStore(ins))
{
- // Load imm into a register
- regNumber regImm = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regTmp));
+ regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT);
+ getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
+ regTracker.rsTrackRegTrash(regTmp);
+
+ if (arm_Valid_Imm_For_Instr(ins, val, flags))
+ {
+ getEmitter()->emitIns_R_I(ins, size, regTmp, ival, flags);
+ }
+ else // We need a scratch register
+ {
+ // Load imm into a register
+ regNumber regImm = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(regTmp));
- instGen_Set_Reg_To_Imm(size, regImm, val);
- getEmitter()->emitIns_R_R(ins, size, regTmp, regImm, flags);
+ instGen_Set_Reg_To_Imm(size, regImm, val);
+ getEmitter()->emitIns_R_R(ins, size, regTmp, regImm, flags);
+ }
+ getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
}
- getEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
- }
- else
+ else
#endif
- {
- getEmitter()->emitIns_S_I(ins, size, varNum, offs, ival);
- }
- return;
+ {
+ getEmitter()->emitIns_S_I(ins, size, varNum, offs, ival);
+ }
+ return;
- case GT_CLS_VAR:
- // Make sure FP instruction size matches the operand size
- // (We optimize constant doubles to floats when we can)
- // We just want to make sure that we don't mistakenly
- // use 8 bytes when the constant is smaller.
- //
- assert(!isFloatRegType(tree->gtType) ||
- genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
+ case GT_CLS_VAR:
+ // Make sure FP instruction size matches the operand size
+ // (We optimize constant doubles to floats when we can)
+ // We just want to make sure that we don't mistakenly
+ // use 8 bytes when the constant is smaller.
+ //
+ assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
#if CPU_LOAD_STORE_ARCH
- regNumber regTmpAddr; regTmpAddr = regSet.rsPickFreeReg(RBM_ALLINT);
+ regNumber regTmpAddr;
+ regTmpAddr = regSet.rsPickFreeReg(RBM_ALLINT);
- getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, regTmpAddr, tree->gtClsVar.gtClsVarHnd, offs);
- regTracker.rsTrackRegTrash(regTmpAddr);
+ getEmitter()->emitIns_R_C(INS_lea, EA_PTRSIZE, regTmpAddr, tree->gtClsVar.gtClsVarHnd, offs);
+ regTracker.rsTrackRegTrash(regTmpAddr);
- if (!getEmitter()->emitInsIsStore(ins))
- {
- regNumber regTmpArith = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr));
+ if (!getEmitter()->emitInsIsStore(ins))
+ {
+ regNumber regTmpArith = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr));
- getEmitter()->emitIns_R_R(ins_Load(tree->TypeGet()), size, regTmpArith, regTmpAddr);
+ getEmitter()->emitIns_R_R(ins_Load(tree->TypeGet()), size, regTmpArith, regTmpAddr);
- if (arm_Valid_Imm_For_Instr(ins, ival, flags))
- {
- getEmitter()->emitIns_R_R_I(ins, size, regTmpArith, regTmpArith, ival, flags);
+ if (arm_Valid_Imm_For_Instr(ins, ival, flags))
+ {
+ getEmitter()->emitIns_R_R_I(ins, size, regTmpArith, regTmpArith, ival, flags);
+ }
+ else
+ {
+ regNumber regTmpImm =
+ regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr) & ~genRegMask(regTmpArith));
+ instGen_Set_Reg_To_Imm(EA_4BYTE, regTmpImm, (ssize_t)ival);
+ getEmitter()->emitIns_R_R(ins, size, regTmpArith, regTmpImm, flags);
+ }
+ regTracker.rsTrackRegTrash(regTmpArith);
+
+ getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpArith, regTmpAddr);
}
else
{
- regNumber regTmpImm = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr) & ~genRegMask(regTmpArith));
- instGen_Set_Reg_To_Imm(EA_4BYTE, regTmpImm, (ssize_t)ival);
- getEmitter()->emitIns_R_R(ins, size, regTmpArith, regTmpImm, flags);
- }
- regTracker.rsTrackRegTrash(regTmpArith);
-
- getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpArith, regTmpAddr);
- }
- else
- {
- regNumber regTmpImm = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr));
+ regNumber regTmpImm = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(regTmpAddr));
- instGen_Set_Reg_To_Imm(EA_4BYTE, regTmpImm, (ssize_t)ival, flags);
- getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpImm, regTmpAddr);
- }
+ instGen_Set_Reg_To_Imm(EA_4BYTE, regTmpImm, (ssize_t)ival, flags);
+ getEmitter()->emitIns_R_R(ins_Store(tree->TypeGet()), size, regTmpImm, regTmpAddr);
+ }
#else // !CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, ival);
+ getEmitter()->emitIns_C_I(ins, size, tree->gtClsVar.gtClsVarHnd, offs, ival);
#endif
- return;
+ return;
- case GT_IND:
- case GT_NULLCHECK:
- case GT_ARR_ELEM:
+ case GT_IND:
+ case GT_NULLCHECK:
+ case GT_ARR_ELEM:
{
GenTreePtr addr = tree->OperIsIndir() ? tree->gtOp.gtOp1 : tree;
sched_AM(ins, size, REG_NA, false, addr, offs, true, ival, flags);
}
- return;
+ return;
- case GT_COMMA:
- // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
- tree = tree->gtOp.gtOp2;
- goto AGAIN;
+ case GT_COMMA:
+ // tree->gtOp.gtOp1 - already processed by genCreateAddrMode()
+ tree = tree->gtOp.gtOp2;
+ goto AGAIN;
- default:
- assert(!"invalid address");
+ default:
+ assert(!"invalid address");
}
}
#endif // LEGACY_BACKEND
@@ -2214,13 +2242,8 @@ LONGREG_TT_IV:
* other one by an indirection tree (which has been made addressable).
*/
-void CodeGen::inst_RV_AT(instruction ins,
- emitAttr size,
- var_types type,
- regNumber reg,
- GenTreePtr tree,
- unsigned offs,
- insFlags flags)
+void CodeGen::inst_RV_AT(
+ instruction ins, emitAttr size, var_types type, regNumber reg, GenTreePtr tree, unsigned offs, insFlags flags)
{
#ifdef _TARGET_XARCH_
#ifdef DEBUG
@@ -2229,12 +2252,11 @@ void CodeGen::inst_RV_AT(instruction ins,
// 2) optOptimizeBools() optimized if (ref != 0 && ref != 0) to if (ref & ref)
// 3) optOptimizeBools() optimized if (ref == 0 || ref == 0) to if (ref | ref)
// 4) byref - byref = int
- if (type == TYP_REF && !EA_IS_GCREF(size))
- assert((EA_IS_BYREF(size) && ins == INS_add) ||
- (ins == INS_lea || ins == INS_and || ins == INS_or));
- if (type == TYP_BYREF && !EA_IS_BYREF(size))
+ if (type == TYP_REF && !EA_IS_GCREF(size))
+ assert((EA_IS_BYREF(size) && ins == INS_add) || (ins == INS_lea || ins == INS_and || ins == INS_or));
+ if (type == TYP_BYREF && !EA_IS_BYREF(size))
assert(ins == INS_lea || ins == INS_and || ins == INS_or || ins == INS_sub);
- assert(!instIsFP(ins));
+ assert(!instIsFP(ins));
#endif
#endif
@@ -2252,11 +2274,7 @@ void CodeGen::inst_RV_AT(instruction ins,
* (which has been made addressable) and an integer constant.
*/
-void CodeGen::inst_AT_IV(instruction ins,
- emitAttr size,
- GenTreePtr baseTree,
- int icon,
- unsigned offs)
+void CodeGen::inst_AT_IV(instruction ins, emitAttr size, GenTreePtr baseTree, int icon, unsigned offs)
{
sched_AM(ins, size, REG_NA, false, baseTree, offs, true, icon);
}
@@ -2268,21 +2286,25 @@ void CodeGen::inst_AT_IV(instruction ins,
* other one by a tree (which has been made addressable).
*/
-void CodeGen::inst_RV_TT(instruction ins,
- regNumber reg,
- GenTreePtr tree,
- unsigned offs,
- emitAttr size,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void CodeGen::inst_RV_TT(instruction ins,
+ regNumber reg,
+ GenTreePtr tree,
+ unsigned offs,
+ emitAttr size,
+ insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
assert(reg != REG_STK);
if (size == EA_UNKNOWN)
{
if (!instIsFP(ins))
+ {
size = emitTypeSize(tree->TypeGet());
+ }
else
+ {
size = EA_ATTR(genTypeSize(tree->TypeGet()));
+ }
}
#ifdef _TARGET_XARCH_
@@ -2292,32 +2314,37 @@ void CodeGen::inst_RV_TT(instruction ins,
// 2) optOptimizeBools() optimized if (ref != 0 && ref != 0) to if (ref & ref)
// 3) optOptimizeBools() optimized if (ref == 0 || ref == 0) to if (ref | ref)
// 4) byref - byref = int
- if (tree->gtType == TYP_REF && !EA_IS_GCREF(size))
- assert((EA_IS_BYREF(size) && ins == INS_add) ||
- (ins == INS_lea || ins == INS_and || ins == INS_or));
- if (tree->gtType == TYP_BYREF && !EA_IS_BYREF(size))
+ if (tree->gtType == TYP_REF && !EA_IS_GCREF(size))
+ {
+ assert((EA_IS_BYREF(size) && ins == INS_add) || (ins == INS_lea || ins == INS_and || ins == INS_or));
+ }
+ if (tree->gtType == TYP_BYREF && !EA_IS_BYREF(size))
+ {
assert(ins == INS_lea || ins == INS_and || ins == INS_or || ins == INS_sub);
+ }
#endif
#endif
#if CPU_LOAD_STORE_ARCH
if (ins == INS_mov)
{
-#if defined (_TARGET_ARM_)
+#if defined(_TARGET_ARM_)
if (tree->TypeGet() != TYP_LONG)
{
- ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL)!=0);
+ ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL) != 0);
}
else if (offs == 0)
{
- ins = ins_Move_Extend(TYP_INT, (tree->gtFlags & GTF_REG_VAL)!=0 && genRegPairLo(tree->gtRegPair) != REG_STK);
+ ins = ins_Move_Extend(TYP_INT,
+ (tree->gtFlags & GTF_REG_VAL) != 0 && genRegPairLo(tree->gtRegPair) != REG_STK);
}
else
{
- ins = ins_Move_Extend(TYP_INT, (tree->gtFlags & GTF_REG_VAL)!=0 && genRegPairHi(tree->gtRegPair) != REG_STK);
+ ins = ins_Move_Extend(TYP_INT,
+ (tree->gtFlags & GTF_REG_VAL) != 0 && genRegPairHi(tree->gtRegPair) != REG_STK);
}
#elif defined(_TARGET_ARM64_)
- ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL)!=0);
+ ins = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL) != 0);
#else
NYI("CodeGen::inst_RV_TT with INS_mov");
#endif
@@ -2328,7 +2355,7 @@ AGAIN:
/* Is the value sitting in a register? */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
{
#ifdef _TARGET_64BIT_
assert(instIsFP(ins) == 0);
@@ -2347,7 +2374,7 @@ AGAIN:
#else // !_TARGET_64BIT_
#ifdef LEGACY_BACKEND
-LONGREG_RVTT:
+ LONGREG_RVTT:
#endif // LEGACY_BACKEND
#ifdef _TARGET_XARCH_
@@ -2357,9 +2384,9 @@ LONGREG_RVTT:
regNumber rg2;
#if CPU_LONG_USES_REGPAIR
- if (tree->gtType == TYP_LONG)
+ if (tree->gtType == TYP_LONG)
{
- if (offs)
+ if (offs)
{
assert(offs == sizeof(int));
@@ -2376,7 +2403,7 @@ LONGREG_RVTT:
rg2 = tree->gtRegNum;
}
- if (rg2 != REG_STK)
+ if (rg2 != REG_STK)
{
#ifdef _TARGET_ARM_
if (getEmitter()->emitInsIsLoad(ins) || (ins == INS_lea))
@@ -2398,199 +2425,199 @@ LONGREG_RVTT:
}
#endif // _TARGET_64BIT_
-
}
/* Is this a spilled value? */
- if (tree->gtFlags & GTF_SPILLED)
+ if (tree->gtFlags & GTF_SPILLED)
{
assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
}
switch (tree->gtOper)
{
- unsigned varNum;
+ unsigned varNum;
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
#ifdef LEGACY_BACKEND
- /* Is this an enregistered long ? */
+ /* Is this an enregistered long ? */
- if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
- {
+ if (tree->gtType == TYP_LONG && !(tree->gtFlags & GTF_REG_VAL))
+ {
- /* Avoid infinite loop */
+ /* Avoid infinite loop */
- if (genMarkLclVar(tree))
- goto LONGREG_RVTT;
- }
+ if (genMarkLclVar(tree))
+ goto LONGREG_RVTT;
+ }
#endif // LEGACY_BACKEND
- inst_set_SV_var(tree);
- goto LCL;
+ inst_set_SV_var(tree);
+ goto LCL;
- case GT_LCL_FLD_ADDR:
- case GT_LCL_FLD:
- offs += tree->gtLclFld.gtLclOffs;
- goto LCL;
+ case GT_LCL_FLD_ADDR:
+ case GT_LCL_FLD:
+ offs += tree->gtLclFld.gtLclOffs;
+ goto LCL;
- LCL:
- varNum = tree->gtLclVarCommon.gtLclNum; assert(varNum < compiler->lvaCount);
+ LCL:
+ varNum = tree->gtLclVarCommon.gtLclNum;
+ assert(varNum < compiler->lvaCount);
#ifdef _TARGET_ARM_
- switch (ins)
- {
- case INS_mov:
- ins = ins_Load(tree->TypeGet());
- __fallthrough;
-
- case INS_lea:
- case INS_ldr:
- case INS_ldrh:
- case INS_ldrb:
- case INS_ldrsh:
- case INS_ldrsb:
- case INS_vldr:
- assert(flags != INS_FLAGS_SET);
- getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
- return;
-
- default:
- regNumber regTmp;
-#ifndef LEGACY_BACKEND
- if (tree->TypeGet() == TYP_LONG)
- regTmp = (offs == 0) ? genRegPairLo(tree->gtRegPair) : genRegPairHi(tree->gtRegPair);
- else
- regTmp = tree->gtRegNum;
-#else // LEGACY_BACKEND
- if (varTypeIsFloating(tree))
- {
- regTmp = regSet.PickRegFloat(tree->TypeGet());
- }
- else
+ switch (ins)
{
- regTmp = regSet.rsPickReg(RBM_ALLINT & ~genRegMask(reg));
- }
+ case INS_mov:
+ ins = ins_Load(tree->TypeGet());
+ __fallthrough;
+
+ case INS_lea:
+ case INS_ldr:
+ case INS_ldrh:
+ case INS_ldrb:
+ case INS_ldrsh:
+ case INS_ldrsb:
+ case INS_vldr:
+ assert(flags != INS_FLAGS_SET);
+ getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
+ return;
+
+ default:
+ regNumber regTmp;
+#ifndef LEGACY_BACKEND
+ if (tree->TypeGet() == TYP_LONG)
+ regTmp = (offs == 0) ? genRegPairLo(tree->gtRegPair) : genRegPairHi(tree->gtRegPair);
+ else
+ regTmp = tree->gtRegNum;
+#else // LEGACY_BACKEND
+ if (varTypeIsFloating(tree))
+ {
+ regTmp = regSet.PickRegFloat(tree->TypeGet());
+ }
+ else
+ {
+ regTmp = regSet.rsPickReg(RBM_ALLINT & ~genRegMask(reg));
+ }
#endif // LEGACY_BACKEND
- getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
+ getEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
+ getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
- regTracker.rsTrackRegTrash(regTmp);
+ regTracker.rsTrackRegTrash(regTmp);
+ return;
+ }
+#else // !_TARGET_ARM_
+ getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
return;
- }
-#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_S(ins, size, reg, varNum, offs);
- return;
#endif // !_TARGET_ARM_
- case GT_CLS_VAR:
- // Make sure FP instruction size matches the operand size
- // (We optimized constant doubles to floats when we can, just want to
- // make sure that we don't mistakenly use 8 bytes when the
- // constant.
- assert(!isFloatRegType(tree->gtType) ||
- genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
+ case GT_CLS_VAR:
+ // Make sure FP instruction size matches the operand size
+ // (We optimized constant doubles to floats when we can, just want to
+ // make sure that we don't mistakenly use 8 bytes when the
+ // constant.
+ assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
#if CPU_LOAD_STORE_ARCH
#ifndef LEGACY_BACKEND
- assert(!"GT_CLS_VAR not supported in ARM RyuJIT backend");
-#else // LEGACY_BACKEND
- switch (ins)
- {
- case INS_mov:
- ins = ins_Load(tree->TypeGet());
-
- __fallthrough;
-
- case INS_lea:
- case INS_ldr:
- case INS_ldrh:
- case INS_ldrb:
- case INS_ldrsh:
- case INS_ldrsb:
- case INS_vldr:
- assert(flags != INS_FLAGS_SET);
- getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
- return;
-
- default:
- regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
- getEmitter()->emitIns_R_C(ins_Load(tree->TypeGet()), size, regTmp, tree->gtClsVar.gtClsVarHnd, offs);
- getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
- regTracker.rsTrackRegTrash(regTmp);
- return;
- }
+ assert(!"GT_CLS_VAR not supported in ARM RyuJIT backend");
+#else // LEGACY_BACKEND
+ switch (ins)
+ {
+ case INS_mov:
+ ins = ins_Load(tree->TypeGet());
+
+ __fallthrough;
+
+ case INS_lea:
+ case INS_ldr:
+ case INS_ldrh:
+ case INS_ldrb:
+ case INS_ldrsh:
+ case INS_ldrsb:
+ case INS_vldr:
+ assert(flags != INS_FLAGS_SET);
+ getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
+ return;
+
+ default:
+ regNumber regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
+ getEmitter()->emitIns_R_C(ins_Load(tree->TypeGet()), size, regTmp, tree->gtClsVar.gtClsVarHnd,
+ offs);
+ getEmitter()->emitIns_R_R(ins, size, reg, regTmp, flags);
+ regTracker.rsTrackRegTrash(regTmp);
+ return;
+ }
#endif // LEGACY_BACKEND
-#else // CPU_LOAD_STORE_ARCH
- getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
+#else // CPU_LOAD_STORE_ARCH
+ getEmitter()->emitIns_R_C(ins, size, reg, tree->gtClsVar.gtClsVarHnd, offs);
#endif // CPU_LOAD_STORE_ARCH
- return;
+ return;
- case GT_IND:
- case GT_NULLCHECK:
- case GT_ARR_ELEM:
- case GT_LEA:
+ case GT_IND:
+ case GT_NULLCHECK:
+ case GT_ARR_ELEM:
+ case GT_LEA:
{
#ifndef LEGACY_BACKEND
assert(!"inst_RV_TT not supported for GT_IND, GT_NULLCHECK, GT_ARR_ELEM or GT_LEA in !LEGACY_BACKEND");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
GenTreePtr addr = tree->OperIsIndir() ? tree->gtOp.gtOp1 : tree;
inst_RV_AT(ins, size, tree->TypeGet(), reg, addr, offs, flags);
#endif // LEGACY_BACKEND
}
break;
- case GT_CNS_INT:
+ case GT_CNS_INT:
- assert(offs == 0);
+ assert(offs == 0);
- inst_RV_IV(ins, reg, tree->gtIntCon.gtIconVal, emitActualTypeSize(tree->TypeGet()), flags);
- break;
+ inst_RV_IV(ins, reg, tree->gtIntCon.gtIconVal, emitActualTypeSize(tree->TypeGet()), flags);
+ break;
- case GT_CNS_LNG:
+ case GT_CNS_LNG:
- assert(size == EA_4BYTE || size == EA_8BYTE);
+ assert(size == EA_4BYTE || size == EA_8BYTE);
#ifdef _TARGET_AMD64_
- assert(offs == 0);
+ assert(offs == 0);
#endif // _TARGET_AMD64_
- ssize_t constVal;
- emitAttr size;
- if (offs == 0)
- {
- constVal = (ssize_t)(tree->gtLngCon.gtLconVal);
- size = EA_PTRSIZE;
- }
- else
- {
- constVal = (ssize_t)(tree->gtLngCon.gtLconVal >> 32);
- size = EA_4BYTE;
- }
+ ssize_t constVal;
+ emitAttr size;
+ if (offs == 0)
+ {
+ constVal = (ssize_t)(tree->gtLngCon.gtLconVal);
+ size = EA_PTRSIZE;
+ }
+ else
+ {
+ constVal = (ssize_t)(tree->gtLngCon.gtLconVal >> 32);
+ size = EA_4BYTE;
+ }
#ifndef LEGACY_BACKEND
#ifdef _TARGET_ARM_
- if ((ins != INS_mov) && !arm_Valid_Imm_For_Instr(ins, constVal, flags))
- {
- regNumber constReg = (offs == 0) ? genRegPairLo(tree->gtRegPair) : genRegPairHi(tree->gtRegPair);
- instGen_Set_Reg_To_Imm(size, constReg, constVal);
- getEmitter()->emitIns_R_R(ins, size, reg, constReg, flags);
- break;
- }
+ if ((ins != INS_mov) && !arm_Valid_Imm_For_Instr(ins, constVal, flags))
+ {
+ regNumber constReg = (offs == 0) ? genRegPairLo(tree->gtRegPair) : genRegPairHi(tree->gtRegPair);
+ instGen_Set_Reg_To_Imm(size, constReg, constVal);
+ getEmitter()->emitIns_R_R(ins, size, reg, constReg, flags);
+ break;
+ }
#endif // _TARGET_ARM_
#endif // !LEGACY_BACKEND
- inst_RV_IV(ins, reg, constVal, size, flags);
- break;
+ inst_RV_IV(ins, reg, constVal, size, flags);
+ break;
- case GT_COMMA:
- tree = tree->gtOp.gtOp2;
- goto AGAIN;
+ case GT_COMMA:
+ tree = tree->gtOp.gtOp2;
+ goto AGAIN;
- default:
- assert(!"invalid address");
+ default:
+ assert(!"invalid address");
}
}
@@ -2600,10 +2627,7 @@ LONGREG_RVTT:
* which is reg=[tree]*icon
*/
#ifdef LEGACY_BACKEND
-void CodeGen::inst_RV_TT_IV(instruction ins,
- regNumber reg,
- GenTreePtr tree,
- int val)
+void CodeGen::inst_RV_TT_IV(instruction ins, regNumber reg, GenTreePtr tree, int val)
{
assert(tree->gtType <= TYP_I_IMPL);
@@ -2628,11 +2652,8 @@ void CodeGen::inst_RV_TT_IV(instruction ins,
* Generate a "shift reg, icon" instruction.
*/
-void CodeGen::inst_RV_SH(instruction ins,
- emitAttr size,
- regNumber reg,
- unsigned val,
- insFlags flags /* = INS_FLAGS_DONT_CARE */)
+void CodeGen::inst_RV_SH(
+ instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags /* = INS_FLAGS_DONT_CARE */)
{
#if defined(_TARGET_ARM_)
@@ -2645,7 +2666,7 @@ void CodeGen::inst_RV_SH(instruction ins,
#ifdef _TARGET_AMD64_
// X64 JB BE insures only encodable values make it here.
- // x86 can encode 8 bits, though it masks down to 5 or 6
+ // x86 can encode 8 bits, though it masks down to 5 or 6
// depending on 32-bit or 64-bit registers are used.
// Here we will allow anything that is encodable.
assert(val < 256);
@@ -2653,7 +2674,7 @@ void CodeGen::inst_RV_SH(instruction ins,
ins = genMapShiftInsToShiftByConstantIns(ins, val);
- if (val == 1)
+ if (val == 1)
{
getEmitter()->emitIns_R(ins, size, reg);
}
@@ -2672,10 +2693,7 @@ void CodeGen::inst_RV_SH(instruction ins,
* Generate a "shift [r/m], icon" instruction.
*/
-void CodeGen::inst_TT_SH(instruction ins,
- GenTreePtr tree,
- unsigned val,
- unsigned offs)
+void CodeGen::inst_TT_SH(instruction ins, GenTreePtr tree, unsigned val, unsigned offs)
{
#ifdef _TARGET_XARCH_
if (val == 0)
@@ -2705,9 +2723,7 @@ void CodeGen::inst_TT_SH(instruction ins,
* Generate a "shift [addr], cl" instruction.
*/
-void CodeGen::inst_TT_CL(instruction ins,
- GenTreePtr tree,
- unsigned offs)
+void CodeGen::inst_TT_CL(instruction ins, GenTreePtr tree, unsigned offs)
{
inst_TT(ins, tree, offs, 0, emitTypeSize(tree->TypeGet()));
}
@@ -2718,18 +2734,12 @@ void CodeGen::inst_TT_CL(instruction ins,
*/
#if defined(_TARGET_XARCH_)
-void CodeGen::inst_RV_RV_IV(instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2,
- unsigned ival)
+void CodeGen::inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival)
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- assert(ins == INS_shld || ins == INS_shrd ||
- ins == INS_shufps || ins == INS_shufpd || ins == INS_pshufd ||
- ins == INS_cmpps || ins == INS_cmppd ||
- ins == INS_dppd || ins == INS_dpps || ins == INS_insertps);
-#else // !_TARGET_XARCH_
+ assert(ins == INS_shld || ins == INS_shrd || ins == INS_shufps || ins == INS_shufpd || ins == INS_pshufd ||
+ ins == INS_cmpps || ins == INS_cmppd || ins == INS_dppd || ins == INS_dpps || ins == INS_insertps);
+#else // !_TARGET_XARCH_
assert(ins == INS_shld || ins == INS_shrd);
#endif // !_TARGET_XARCH_
@@ -2743,10 +2753,7 @@ void CodeGen::inst_RV_RV_IV(instruction ins,
* or word register (i.e. this is something like "movzx eax, cl").
*/
-void CodeGen::inst_RV_RR(instruction ins,
- emitAttr size,
- regNumber reg1,
- regNumber reg2)
+void CodeGen::inst_RV_RR(instruction ins, emitAttr size, regNumber reg1, regNumber reg2)
{
assert(size == EA_1BYTE || size == EA_2BYTE);
#ifdef _TARGET_XARCH_
@@ -2762,30 +2769,14 @@ void CodeGen::inst_RV_RR(instruction ins,
* The following should all end up inline in compiler.hpp at some point.
*/
-void CodeGen::inst_ST_RV(instruction ins,
- TempDsc * tmp,
- unsigned ofs,
- regNumber reg,
- var_types type)
+void CodeGen::inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type)
{
- getEmitter()->emitIns_S_R(ins,
- emitActualTypeSize(type),
- reg,
- tmp->tdTempNum(),
- ofs);
+ getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, tmp->tdTempNum(), ofs);
}
-void CodeGen::inst_ST_IV(instruction ins,
- TempDsc * tmp,
- unsigned ofs,
- int val,
- var_types type)
+void CodeGen::inst_ST_IV(instruction ins, TempDsc* tmp, unsigned ofs, int val, var_types type)
{
- getEmitter()->emitIns_S_I(ins,
- emitActualTypeSize(type),
- tmp->tdTempNum(),
- ofs,
- val);
+ getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), tmp->tdTempNum(), ofs, val);
}
#if FEATURE_FIXED_OUT_ARGS
@@ -2795,32 +2786,18 @@ void CodeGen::inst_ST_IV(instruction ins,
* like "str r3, [sp+0x04]"
*/
-void CodeGen::inst_SA_RV(instruction ins,
- unsigned ofs,
- regNumber reg,
- var_types type)
+void CodeGen::inst_SA_RV(instruction ins, unsigned ofs, regNumber reg, var_types type)
{
assert(ofs < compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_S_R(ins,
- emitActualTypeSize(type),
- reg,
- compiler->lvaOutgoingArgSpaceVar,
- ofs);
+ getEmitter()->emitIns_S_R(ins, emitActualTypeSize(type), reg, compiler->lvaOutgoingArgSpaceVar, ofs);
}
-void CodeGen::inst_SA_IV(instruction ins,
- unsigned ofs,
- int val,
- var_types type)
+void CodeGen::inst_SA_IV(instruction ins, unsigned ofs, int val, var_types type)
{
assert(ofs < compiler->lvaOutgoingArgSpaceSize);
- getEmitter()->emitIns_S_I(ins,
- emitActualTypeSize(type),
- compiler->lvaOutgoingArgSpaceVar,
- ofs,
- val);
+ getEmitter()->emitIns_S_I(ins, emitActualTypeSize(type), compiler->lvaOutgoingArgSpaceVar, ofs, val);
}
#endif // FEATURE_FIXED_OUT_ARGS
@@ -2830,100 +2807,85 @@ void CodeGen::inst_SA_IV(instruction ins,
* or short (e.g. something like "movzx eax, byte ptr [edx]").
*/
-void CodeGen::inst_RV_ST(instruction ins,
- emitAttr size,
- regNumber reg,
- GenTreePtr tree)
+void CodeGen::inst_RV_ST(instruction ins, emitAttr size, regNumber reg, GenTreePtr tree)
{
assert(size == EA_1BYTE || size == EA_2BYTE);
/* "movsx erx, rl" must be handled as a special case */
- if (tree->gtFlags & GTF_REG_VAL)
+ if (tree->gtFlags & GTF_REG_VAL)
+ {
inst_RV_RR(ins, size, reg, tree->gtRegNum);
+ }
else
+ {
inst_RV_TT(ins, reg, tree, 0, size);
+ }
}
-void CodeGen::inst_RV_ST(instruction ins,
- regNumber reg,
- TempDsc * tmp,
- unsigned ofs,
- var_types type,
- emitAttr size)
+void CodeGen::inst_RV_ST(instruction ins, regNumber reg, TempDsc* tmp, unsigned ofs, var_types type, emitAttr size)
{
if (size == EA_UNKNOWN)
+ {
size = emitActualTypeSize(type);
+ }
#ifdef _TARGET_ARM_
switch (ins)
{
- case INS_mov:
- assert(!"Please call ins_Load(type) to get the load instruction");
- break;
+ case INS_mov:
+ assert(!"Please call ins_Load(type) to get the load instruction");
+ break;
- case INS_add:
- case INS_ldr:
- case INS_ldrh:
- case INS_ldrb:
- case INS_ldrsh:
- case INS_ldrsb:
- case INS_lea:
- case INS_vldr:
- getEmitter()->emitIns_R_S(ins,
- size,
- reg,
- tmp->tdTempNum(),
- ofs);
- break;
+ case INS_add:
+ case INS_ldr:
+ case INS_ldrh:
+ case INS_ldrb:
+ case INS_ldrsh:
+ case INS_ldrsb:
+ case INS_lea:
+ case INS_vldr:
+ getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
+ break;
- default:
+ default:
#ifndef LEGACY_BACKEND
- assert(!"Default inst_RV_ST case not supported for Arm !LEGACY_BACKEND");
-#else // LEGACY_BACKEND
- regNumber regTmp;
- if (varTypeIsFloating(type))
- {
- regTmp = regSet.PickRegFloat(type);
- }
- else
- {
- regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
- }
- getEmitter()->emitIns_R_S(ins_Load(type),
- size,
- regTmp,
- tmp->tdTempNum(),
- ofs);
- regTracker.rsTrackRegTrash(regTmp);
- getEmitter()->emitIns_R_R(ins, size, reg, regTmp);
+ assert(!"Default inst_RV_ST case not supported for Arm !LEGACY_BACKEND");
+#else // LEGACY_BACKEND
+ regNumber regTmp;
+ if (varTypeIsFloating(type))
+ {
+ regTmp = regSet.PickRegFloat(type);
+ }
+ else
+ {
+ regTmp = regSet.rsPickFreeReg(RBM_ALLINT & ~genRegMask(reg));
+ }
+ getEmitter()->emitIns_R_S(ins_Load(type), size, regTmp, tmp->tdTempNum(), ofs);
+ regTracker.rsTrackRegTrash(regTmp);
+ getEmitter()->emitIns_R_R(ins, size, reg, regTmp);
#endif // LEGACY_BACKEND
- break;
+ break;
}
-#else // !_TARGET_ARM_
- getEmitter()->emitIns_R_S(ins,
- size,
- reg,
- tmp->tdTempNum(),
- ofs);
+#else // !_TARGET_ARM_
+ getEmitter()->emitIns_R_S(ins, size, reg, tmp->tdTempNum(), ofs);
#endif // !_TARGET_ARM_
}
-void CodeGen::inst_mov_RV_ST(regNumber reg,
- GenTreePtr tree)
+void CodeGen::inst_mov_RV_ST(regNumber reg, GenTreePtr tree)
{
/* Figure out the size of the value being loaded */
- emitAttr size = EA_ATTR(genTypeSize(tree->gtType));
- instruction loadIns = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL)!=0);
+ emitAttr size = EA_ATTR(genTypeSize(tree->gtType));
+ instruction loadIns = ins_Move_Extend(tree->TypeGet(), (tree->gtFlags & GTF_REG_VAL) != 0);
- if (size < EA_4BYTE)
+ if (size < EA_4BYTE)
{
- if ((tree->gtFlags & GTF_SMALL_OK) && (size == EA_1BYTE)
-#if CPU_HAS_BYTE_REGS
- && (genRegMask(reg) & RBM_BYTE_REGS)
+ if ((tree->gtFlags & GTF_SMALL_OK) && (size == EA_1BYTE)
+#if CPU_HAS_BYTE_REGS
+ && (genRegMask(reg) & RBM_BYTE_REGS)
#endif
- )
+ )
{
/* We only need to load the actual size */
@@ -2944,107 +2906,97 @@ void CodeGen::inst_mov_RV_ST(regNumber reg,
}
}
#ifdef _TARGET_XARCH_
-void CodeGen::inst_FS_ST(instruction ins,
- emitAttr size,
- TempDsc * tmp,
- unsigned ofs)
+void CodeGen::inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs)
{
- getEmitter()->emitIns_S(ins,
- size,
- tmp->tdTempNum(),
- ofs);
+ getEmitter()->emitIns_S(ins, size, tmp->tdTempNum(), ofs);
}
#endif
#ifdef _TARGET_ARM_
-bool CodeGenInterface::validImmForInstr(instruction ins,
- ssize_t imm,
- insFlags flags)
+bool CodeGenInterface::validImmForInstr(instruction ins, ssize_t imm, insFlags flags)
{
if (getEmitter()->emitInsIsLoadOrStore(ins) && !instIsFP(ins))
{
return validDispForLdSt(imm, TYP_INT);
}
-
+
bool result = false;
switch (ins)
{
- case INS_cmp:
- case INS_cmn:
- if (validImmForAlu(imm) || validImmForAlu(-imm))
- result = true;
- break;
+ case INS_cmp:
+ case INS_cmn:
+ if (validImmForAlu(imm) || validImmForAlu(-imm))
+ result = true;
+ break;
- case INS_and:
- case INS_bic:
- case INS_orr:
- case INS_orn:
- case INS_mvn:
- if (validImmForAlu(imm) || validImmForAlu(~imm))
- result = true;
- break;
+ case INS_and:
+ case INS_bic:
+ case INS_orr:
+ case INS_orn:
+ case INS_mvn:
+ if (validImmForAlu(imm) || validImmForAlu(~imm))
+ result = true;
+ break;
- case INS_mov:
- if (validImmForMov(imm))
- result = true;
- break;
+ case INS_mov:
+ if (validImmForMov(imm))
+ result = true;
+ break;
- case INS_addw:
- case INS_subw:
- if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate
- result = true;
- break;
+ case INS_addw:
+ case INS_subw:
+ if ((unsigned_abs(imm) <= 0x00000fff) && (flags != INS_FLAGS_SET)) // 12-bit immediate
+ result = true;
+ break;
- case INS_add:
- case INS_sub:
- if (validImmForAdd(imm, flags))
- result = true;
- break;
+ case INS_add:
+ case INS_sub:
+ if (validImmForAdd(imm, flags))
+ result = true;
+ break;
- case INS_tst:
- case INS_eor:
- case INS_teq:
- case INS_adc:
- case INS_sbc:
- case INS_rsb:
- if (validImmForAlu(imm))
- result = true;
- break;
+ case INS_tst:
+ case INS_eor:
+ case INS_teq:
+ case INS_adc:
+ case INS_sbc:
+ case INS_rsb:
+ if (validImmForAlu(imm))
+ result = true;
+ break;
- case INS_asr:
- case INS_lsl:
- case INS_lsr:
- case INS_ror:
- if (imm > 0 && imm <= 32)
- result = true;
- break;
-
- case INS_vstr:
- case INS_vldr:
- if ((imm & 0x3FC) == imm)
- result = true;
- break;
+ case INS_asr:
+ case INS_lsl:
+ case INS_lsr:
+ case INS_ror:
+ if (imm > 0 && imm <= 32)
+ result = true;
+ break;
- default:
- break;
+ case INS_vstr:
+ case INS_vldr:
+ if ((imm & 0x3FC) == imm)
+ result = true;
+ break;
+
+ default:
+ break;
}
return result;
}
-bool CodeGen::arm_Valid_Imm_For_Instr(instruction ins,
- ssize_t imm,
- insFlags flags)
+bool CodeGen::arm_Valid_Imm_For_Instr(instruction ins, ssize_t imm, insFlags flags)
{
return validImmForInstr(ins, imm, flags);
}
-bool CodeGenInterface::validDispForLdSt(ssize_t disp, var_types type)
+bool CodeGenInterface::validDispForLdSt(ssize_t disp, var_types type)
{
if (varTypeIsFloating(type))
{
- if ((disp & 0x3FC) == disp)
- return true;
- else
- return false;
+ if ((disp & 0x3FC) == disp)
+ return true;
+ else
+ return false;
}
else
{
@@ -3054,78 +3006,77 @@ bool CodeGenInterface::validDispForLdSt(ssize_t disp, var_type
return false;
}
}
-bool CodeGen::arm_Valid_Disp_For_LdSt(ssize_t disp, var_types type)
+bool CodeGen::arm_Valid_Disp_For_LdSt(ssize_t disp, var_types type)
{
return validDispForLdSt(disp, type);
}
-bool CodeGenInterface::validImmForAlu(ssize_t imm)
+bool CodeGenInterface::validImmForAlu(ssize_t imm)
{
return emitter::emitIns_valid_imm_for_alu(imm);
}
-bool CodeGen::arm_Valid_Imm_For_Alu (ssize_t imm)
+bool CodeGen::arm_Valid_Imm_For_Alu(ssize_t imm)
{
return validImmForAlu(imm);
}
-bool CodeGenInterface::validImmForMov (ssize_t imm)
+bool CodeGenInterface::validImmForMov(ssize_t imm)
{
return emitter::emitIns_valid_imm_for_mov(imm);
}
-bool CodeGen::arm_Valid_Imm_For_Mov (ssize_t imm)
+bool CodeGen::arm_Valid_Imm_For_Mov(ssize_t imm)
{
return validImmForMov(imm);
}
-bool CodeGen::arm_Valid_Imm_For_Small_Mov(regNumber reg, ssize_t imm, insFlags flags)
+bool CodeGen::arm_Valid_Imm_For_Small_Mov(regNumber reg, ssize_t imm, insFlags flags)
{
return emitter::emitIns_valid_imm_for_small_mov(reg, imm, flags);
}
-bool CodeGenInterface::validImmForAdd(ssize_t imm, insFlags flags)
+bool CodeGenInterface::validImmForAdd(ssize_t imm, insFlags flags)
{
return emitter::emitIns_valid_imm_for_add(imm, flags);
}
-bool CodeGen::arm_Valid_Imm_For_Add (ssize_t imm, insFlags flags)
+bool CodeGen::arm_Valid_Imm_For_Add(ssize_t imm, insFlags flags)
{
return emitter::emitIns_valid_imm_for_add(imm, flags);
}
// Check "add Rd,SP,i10"
-bool CodeGen::arm_Valid_Imm_For_Add_SP(ssize_t imm)
+bool CodeGen::arm_Valid_Imm_For_Add_SP(ssize_t imm)
{
return emitter::emitIns_valid_imm_for_add_sp(imm);
}
-bool CodeGenInterface::validImmForBL (ssize_t addr)
+bool CodeGenInterface::validImmForBL(ssize_t addr)
{
- return
+ return
// If we are running the altjit for NGEN, then assume we can use the "BL" instruction.
// This matches the usual behavior for NGEN, since we normally do generate "BL".
- (!compiler->info.compMatchedVM && (compiler->opts.eeFlags & CORJIT_FLG_PREJIT))
- ||
+ (!compiler->info.compMatchedVM && (compiler->opts.eeFlags & CORJIT_FLG_PREJIT)) ||
(compiler->eeGetRelocTypeHint((void*)addr) == IMAGE_REL_BASED_THUMB_BRANCH24);
}
-bool CodeGen::arm_Valid_Imm_For_BL (ssize_t addr)
+bool CodeGen::arm_Valid_Imm_For_BL(ssize_t addr)
{
return validImmForBL(addr);
}
// Returns true if this instruction writes to a destination register
//
-bool CodeGen::ins_Writes_Dest (instruction ins)
+bool CodeGen::ins_Writes_Dest(instruction ins)
{
switch (ins)
{
- case INS_cmp:
- case INS_cmn:
- case INS_tst:
- case INS_teq:
- return false;
+ case INS_cmp:
+ case INS_cmn:
+ case INS_tst:
+ case INS_teq:
+ return false;
- default:
- return true;
+ default:
+ return true;
}
}
#endif // _TARGET_ARM_
@@ -3138,8 +3089,7 @@ bool CodeGen::ins_Writes_Dest (instruction ins)
* srcType - source type
* srcInReg - whether source is in a register
*/
-instruction CodeGen::ins_Move_Extend(var_types srcType,
- bool srcInReg)
+instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg)
{
instruction ins = INS_invalid;
@@ -3148,7 +3098,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType,
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
// SSE2/AVX requires destination to be a reg always.
// If src is in reg means, it is a reg-reg move.
- //
+ //
// SSE2 Note: always prefer movaps/movups over movapd/movupd since the
// former doesn't require 66h prefix and one byte smaller than the
// latter.
@@ -3156,7 +3106,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType,
// TODO-CQ: based on whether src type is aligned use movaps instead
return (srcInReg) ? INS_movaps : INS_movups;
-#else // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
+#else // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
assert(!"unhandled SIMD type");
#endif // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
}
@@ -3186,11 +3136,17 @@ instruction CodeGen::ins_Move_Extend(var_types srcType,
#if defined(_TARGET_XARCH_)
if (!varTypeIsSmall(srcType))
+ {
ins = INS_mov;
+ }
else if (varTypeIsUnsigned(srcType))
+ {
ins = INS_movzx;
+ }
else
+ {
ins = INS_movsx;
+ }
#elif defined(_TARGET_ARM_)
//
// Register to Register zero/sign extend operation
@@ -3238,13 +3194,13 @@ instruction CodeGen::ins_Move_Extend(var_types srcType,
}
else
{
- // A mov Rd, Rm instruction performs the zero extend
+ // A mov Rd, Rm instruction performs the zero extend
// for the upper 32 bits when the size is EA_4BYTE
ins = INS_mov;
}
}
- else
+ else
{
if (varTypeIsByte(srcType))
{
@@ -3286,8 +3242,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType,
* srcType - source type
* aligned - whether source is 16-byte aligned if srcType is a SIMD type
*/
-instruction CodeGenInterface::ins_Load(var_types srcType,
- bool aligned /*=false*/)
+instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*/)
{
instruction ins = INS_invalid;
@@ -3301,7 +3256,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType,
}
else
#endif // FEATURE_SIMD
- if (compiler->canUseAVX())
+ if (compiler->canUseAVX())
{
// TODO-CQ: consider alignment of AVX vectors.
return INS_movupd;
@@ -3340,15 +3295,21 @@ instruction CodeGenInterface::ins_Load(var_types srcType,
#else
assert(!varTypeIsFloating(srcType));
#endif
- }
+ }
#if defined(_TARGET_XARCH_)
if (!varTypeIsSmall(srcType))
+ {
ins = INS_mov;
+ }
else if (varTypeIsUnsigned(srcType))
+ {
ins = INS_movzx;
- else
+ }
+ else
+ {
ins = INS_movsx;
+ }
#elif defined(_TARGET_ARMARCH_)
if (!varTypeIsSmall(srcType))
@@ -3393,23 +3354,23 @@ instruction CodeGenInterface::ins_Load(var_types srcType,
* Parameters
* dstType - destination type
*/
-instruction CodeGen::ins_Copy(var_types dstType)
+instruction CodeGen::ins_Copy(var_types dstType)
{
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
if (varTypeIsSIMD(dstType))
- {
+ {
return INS_movaps;
}
else if (varTypeIsFloating(dstType))
{
- // Both float and double copy can use movaps
- return INS_movaps;
+ // Both float and double copy can use movaps
+ return INS_movaps;
}
else
{
return INS_mov;
}
-#elif defined (_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
if (varTypeIsFloating(dstType))
{
return INS_fmov;
@@ -3445,7 +3406,7 @@ instruction CodeGen::ins_Copy(var_types dstType)
* dstType - destination type
* aligned - whether destination is 16-byte aligned if dstType is a SIMD type
*/
-instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
+instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
{
instruction ins = INS_invalid;
@@ -3459,7 +3420,7 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligne
}
else
#endif // FEATURE_SIMD
- if (compiler->canUseAVX())
+ if (compiler->canUseAVX())
{
// TODO-CQ: consider alignment of AVX vectors.
return INS_movupd;
@@ -3487,13 +3448,13 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligne
assert(!"unhandled floating type");
}
}
-#elif defined (_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
if (varTypeIsSIMD(dstType) || varTypeIsFloating(dstType))
{
// All sizes of SIMD and FP instructions use INS_str
return INS_str;
}
-#elif defined (_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
assert(!varTypeIsSIMD(dstType));
if (varTypeIsFloating(dstType))
{
@@ -3523,86 +3484,86 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligne
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
-bool CodeGen::isMoveIns(instruction ins)
+bool CodeGen::isMoveIns(instruction ins)
{
return (ins == INS_mov);
}
-instruction CodeGenInterface::ins_FloatLoad(var_types type)
-{
+instruction CodeGenInterface::ins_FloatLoad(var_types type)
+{
// Do Not use this routine in RyuJIT backend. Instead use ins_Load()/ins_Store()
unreached();
}
// everything is just an addressing mode variation on x64
-instruction CodeGen::ins_FloatStore(var_types type)
+instruction CodeGen::ins_FloatStore(var_types type)
{
// Do Not use this routine in RyuJIT backend. Instead use ins_Store()
unreached();
}
-instruction CodeGen::ins_FloatCopy(var_types type)
+instruction CodeGen::ins_FloatCopy(var_types type)
{
// Do Not use this routine in RyuJIT backend. Instead use ins_Load().
unreached();
}
-instruction CodeGen::ins_FloatCompare(var_types type)
+instruction CodeGen::ins_FloatCompare(var_types type)
{
return (type == TYP_FLOAT) ? INS_ucomiss : INS_ucomisd;
}
-instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
+instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
{
// On SSE2/AVX - the same instruction is used for moving double/quad word to XMM/YMM register.
assert((srcType == TYP_INT) || (srcType == TYP_UINT) || (srcType == TYP_LONG) || (srcType == TYP_ULONG));
return INS_mov_i2xmm;
}
-instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
+instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
{
// On SSE2/AVX - the same instruction is used for moving double/quad word of XMM/YMM to an integer register.
assert((dstType == TYP_INT) || (dstType == TYP_UINT) || (dstType == TYP_LONG) || (dstType == TYP_ULONG));
return INS_mov_xmm2i;
}
-instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
+instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
{
switch (oper)
{
- case GT_ADD:
- case GT_ASG_ADD:
- return type==TYP_DOUBLE ? INS_addsd : INS_addss;
- break;
- case GT_SUB:
- case GT_ASG_SUB:
- return type==TYP_DOUBLE ? INS_subsd : INS_subss;
- break;
- case GT_MUL:
- case GT_ASG_MUL:
- return type==TYP_DOUBLE ? INS_mulsd : INS_mulss;
- break;
- case GT_DIV:
- case GT_ASG_DIV:
- return type==TYP_DOUBLE ? INS_divsd : INS_divss;
- case GT_AND:
- return type==TYP_DOUBLE ? INS_andpd : INS_andps;
- case GT_OR:
- return type==TYP_DOUBLE ? INS_orpd : INS_orps;
- case GT_XOR:
- return type==TYP_DOUBLE ? INS_xorpd : INS_xorps;
- default:
- unreached();
+ case GT_ADD:
+ case GT_ASG_ADD:
+ return type == TYP_DOUBLE ? INS_addsd : INS_addss;
+ break;
+ case GT_SUB:
+ case GT_ASG_SUB:
+ return type == TYP_DOUBLE ? INS_subsd : INS_subss;
+ break;
+ case GT_MUL:
+ case GT_ASG_MUL:
+ return type == TYP_DOUBLE ? INS_mulsd : INS_mulss;
+ break;
+ case GT_DIV:
+ case GT_ASG_DIV:
+ return type == TYP_DOUBLE ? INS_divsd : INS_divss;
+ case GT_AND:
+ return type == TYP_DOUBLE ? INS_andpd : INS_andps;
+ case GT_OR:
+ return type == TYP_DOUBLE ? INS_orpd : INS_orps;
+ case GT_XOR:
+ return type == TYP_DOUBLE ? INS_xorpd : INS_xorps;
+ default:
+ unreached();
}
}
-instruction CodeGen::ins_FloatSqrt(var_types type)
+instruction CodeGen::ins_FloatSqrt(var_types type)
{
instruction ins = INS_invalid;
if (type == TYP_DOUBLE)
{
- ins = INS_sqrtsd;
+ ins = INS_sqrtsd;
}
else
{
@@ -3614,173 +3575,209 @@ instruction CodeGen::ins_FloatSqrt(var_types type)
}
// Conversions to or from floating point values
-instruction CodeGen::ins_FloatConv(var_types to, var_types from)
+instruction CodeGen::ins_FloatConv(var_types to, var_types from)
{
// AVX: For now we support only conversion from Int/Long -> float
switch (from)
{
- // int/long -> float/double use the same instruction but type size would be different.
- case TYP_INT:
- case TYP_LONG:
- switch (to)
- {
- case TYP_FLOAT: return INS_cvtsi2ss;
- case TYP_DOUBLE: return INS_cvtsi2sd;
- default: unreached();
- }
- break;
-
- case TYP_FLOAT:
- switch (to)
- {
- case TYP_INT: return INS_cvttss2si;
- case TYP_LONG: return INS_cvttss2si;
- case TYP_FLOAT: return ins_Move_Extend(TYP_FLOAT, false);
- case TYP_DOUBLE: return INS_cvtss2sd;
- default: unreached();
- }
- break;
+ // int/long -> float/double use the same instruction but type size would be different.
+ case TYP_INT:
+ case TYP_LONG:
+ switch (to)
+ {
+ case TYP_FLOAT:
+ return INS_cvtsi2ss;
+ case TYP_DOUBLE:
+ return INS_cvtsi2sd;
+ default:
+ unreached();
+ }
+ break;
- case TYP_DOUBLE:
- switch (to)
- {
- case TYP_INT: return INS_cvttsd2si;
- case TYP_LONG: return INS_cvttsd2si;
- case TYP_FLOAT: return INS_cvtsd2ss;
- case TYP_DOUBLE: return ins_Move_Extend(TYP_DOUBLE, false);
- default: unreached();
- }
- break;
+ case TYP_FLOAT:
+ switch (to)
+ {
+ case TYP_INT:
+ return INS_cvttss2si;
+ case TYP_LONG:
+ return INS_cvttss2si;
+ case TYP_FLOAT:
+ return ins_Move_Extend(TYP_FLOAT, false);
+ case TYP_DOUBLE:
+ return INS_cvtss2sd;
+ default:
+ unreached();
+ }
+ break;
- default: unreached();
+ case TYP_DOUBLE:
+ switch (to)
+ {
+ case TYP_INT:
+ return INS_cvttsd2si;
+ case TYP_LONG:
+ return INS_cvttsd2si;
+ case TYP_FLOAT:
+ return INS_cvtsd2ss;
+ case TYP_DOUBLE:
+ return ins_Move_Extend(TYP_DOUBLE, false);
+ default:
+ unreached();
+ }
+ break;
+
+ default:
+ unreached();
}
}
#elif defined(_TARGET_ARM_)
-bool CodeGen::isMoveIns(instruction ins)
+bool CodeGen::isMoveIns(instruction ins)
{
return (ins == INS_vmov) || (ins == INS_mov);
}
-instruction CodeGenInterface::ins_FloatLoad(var_types type)
+instruction CodeGenInterface::ins_FloatLoad(var_types type)
{
assert(type == TYP_DOUBLE || type == TYP_FLOAT);
return INS_vldr;
}
-instruction CodeGen::ins_FloatStore(var_types type)
+instruction CodeGen::ins_FloatStore(var_types type)
{
assert(type == TYP_DOUBLE || type == TYP_FLOAT);
return INS_vstr;
}
-instruction CodeGen::ins_FloatCopy(var_types type)
+instruction CodeGen::ins_FloatCopy(var_types type)
{
assert(type == TYP_DOUBLE || type == TYP_FLOAT);
return INS_vmov;
}
-instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
+instruction CodeGen::ins_CopyIntToFloat(var_types srcType, var_types dstType)
{
// Not used and not implemented
unreached();
}
-instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
+instruction CodeGen::ins_CopyFloatToInt(var_types srcType, var_types dstType)
{
// Not used and not implemented
unreached();
}
-instruction CodeGen::ins_FloatCompare(var_types type)
+instruction CodeGen::ins_FloatCompare(var_types type)
{
// Not used and not implemented
unreached();
}
-instruction CodeGen::ins_FloatSqrt(var_types type)
+instruction CodeGen::ins_FloatSqrt(var_types type)
{
// Not used and not implemented
unreached();
}
-instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
+instruction CodeGen::ins_MathOp(genTreeOps oper, var_types type)
{
switch (oper)
{
- case GT_ADD:
- case GT_ASG_ADD:
- return INS_vadd;
- break;
- case GT_SUB:
- case GT_ASG_SUB:
- return INS_vsub;
- break;
- case GT_MUL:
- case GT_ASG_MUL:
- return INS_vmul;
- break;
- case GT_DIV:
- case GT_ASG_DIV:
- return INS_vdiv;
- case GT_NEG:
- return INS_vneg;
- default:
- unreached();
+ case GT_ADD:
+ case GT_ASG_ADD:
+ return INS_vadd;
+ break;
+ case GT_SUB:
+ case GT_ASG_SUB:
+ return INS_vsub;
+ break;
+ case GT_MUL:
+ case GT_ASG_MUL:
+ return INS_vmul;
+ break;
+ case GT_DIV:
+ case GT_ASG_DIV:
+ return INS_vdiv;
+ case GT_NEG:
+ return INS_vneg;
+ default:
+ unreached();
}
}
-instruction CodeGen::ins_FloatConv(var_types to, var_types from)
+instruction CodeGen::ins_FloatConv(var_types to, var_types from)
{
switch (from)
{
- case TYP_INT:
- switch (to)
- {
- case TYP_FLOAT: return INS_vcvt_i2f;
- case TYP_DOUBLE: return INS_vcvt_i2d;
- default: unreached();
- }
- break;
- case TYP_UINT:
- switch (to)
- {
- case TYP_FLOAT: return INS_vcvt_u2f;
- case TYP_DOUBLE: return INS_vcvt_u2d;
- default: unreached();
- }
- break;
- case TYP_LONG:
- switch (to)
- {
- case TYP_FLOAT: NYI("long to float");
- case TYP_DOUBLE: NYI("long to double");
- default: unreached();
- }
- break;
- case TYP_FLOAT:
- switch (to)
- {
- case TYP_INT: return INS_vcvt_f2i;
- case TYP_UINT: return INS_vcvt_f2u;
- case TYP_LONG: NYI("float to long");
- case TYP_DOUBLE: return INS_vcvt_f2d;
- case TYP_FLOAT: return INS_vmov;
- default: unreached();
- }
- break;
- case TYP_DOUBLE:
- switch (to)
- {
- case TYP_INT: return INS_vcvt_d2i;
- case TYP_UINT: return INS_vcvt_d2u;
- case TYP_LONG: NYI("double to long");
- case TYP_FLOAT: return INS_vcvt_d2f;
- case TYP_DOUBLE: return INS_vmov;
- default: unreached();
- }
- break;
- default: unreached();
+ case TYP_INT:
+ switch (to)
+ {
+ case TYP_FLOAT:
+ return INS_vcvt_i2f;
+ case TYP_DOUBLE:
+ return INS_vcvt_i2d;
+ default:
+ unreached();
+ }
+ break;
+ case TYP_UINT:
+ switch (to)
+ {
+ case TYP_FLOAT:
+ return INS_vcvt_u2f;
+ case TYP_DOUBLE:
+ return INS_vcvt_u2d;
+ default:
+ unreached();
+ }
+ break;
+ case TYP_LONG:
+ switch (to)
+ {
+ case TYP_FLOAT:
+ NYI("long to float");
+ case TYP_DOUBLE:
+ NYI("long to double");
+ default:
+ unreached();
+ }
+ break;
+ case TYP_FLOAT:
+ switch (to)
+ {
+ case TYP_INT:
+ return INS_vcvt_f2i;
+ case TYP_UINT:
+ return INS_vcvt_f2u;
+ case TYP_LONG:
+ NYI("float to long");
+ case TYP_DOUBLE:
+ return INS_vcvt_f2d;
+ case TYP_FLOAT:
+ return INS_vmov;
+ default:
+ unreached();
+ }
+ break;
+ case TYP_DOUBLE:
+ switch (to)
+ {
+ case TYP_INT:
+ return INS_vcvt_d2i;
+ case TYP_UINT:
+ return INS_vcvt_d2u;
+ case TYP_LONG:
+ NYI("double to long");
+ case TYP_FLOAT:
+ return INS_vcvt_d2f;
+ case TYP_DOUBLE:
+ return INS_vmov;
+ default:
+ unreached();
+ }
+ break;
+ default:
+ unreached();
}
}
@@ -3788,23 +3785,27 @@ instruction CodeGen::ins_FloatConv(var_types to, var_types from)
/*****************************************************************************
*
- * Machine independent way to return
+ * Machine independent way to return
*/
-void CodeGen::instGen_Return(unsigned stkArgSize)
+void CodeGen::instGen_Return(unsigned stkArgSize)
{
#if defined(_TARGET_XARCH_)
if (stkArgSize == 0)
+ {
instGen(INS_ret);
+ }
else
+ {
inst_IV(INS_ret, stkArgSize);
-#elif defined (_TARGET_ARM_)
- //
- // The return on ARM is folded into the pop multiple instruction
- // and as we do not know the exact set of registers that we will
- // need to restore (pop) when we first call instGen_Return we will
- // instead just not emit anything for this method on the ARM
- // The return will be part of the pop multiple and that will be
- // part of the epilog that is generated by genFnEpilog()
+ }
+#elif defined(_TARGET_ARM_)
+//
+// The return on ARM is folded into the pop multiple instruction
+// and as we do not know the exact set of registers that we will
+// need to restore (pop) when we first call instGen_Return we will
+// instead just not emit anything for this method on the ARM
+// The return will be part of the pop multiple and that will be
+// part of the epilog that is generated by genFnEpilog()
#elif defined(_TARGET_ARM64_)
// This function shouldn't be used on ARM64.
unreached();
@@ -3820,33 +3821,32 @@ void CodeGen::instGen_Return(unsigned stkArgSize)
* Note: all MemoryBarriers instructions can be removed by
* SET COMPlus_JitNoMemoryBarriers=1
*/
-void CodeGen::instGen_MemoryBarrier()
+void CodeGen::instGen_MemoryBarrier()
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
+ {
return;
+ }
#endif // DEBUG
#if defined(_TARGET_XARCH_)
instGen(INS_lock);
getEmitter()->emitIns_I_AR(INS_or, EA_4BYTE, 0, REG_SPBASE, 0);
-#elif defined (_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
getEmitter()->emitIns_I(INS_dmb, EA_4BYTE, 0xf);
-#elif defined (_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
getEmitter()->emitIns_BARR(INS_dmb, INS_BARRIER_SY);
#else
#error "Unknown _TARGET_"
#endif
}
-
/*****************************************************************************
*
* Machine independent way to move a Zero value into a register
*/
-void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size,
- regNumber reg,
- insFlags flags)
+void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags)
{
#if defined(_TARGET_XARCH_)
getEmitter()->emitIns_R_R(INS_xor, size, reg, reg);
@@ -3858,22 +3858,18 @@ void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size,
regTracker.rsTrackRegIntCns(reg, 0);
}
-
#ifdef LEGACY_BACKEND
/*****************************************************************************
*
* Machine independent way to move an immediate value into a register
*/
-void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
- regNumber reg,
- ssize_t imm,
- insFlags flags)
+void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm, insFlags flags)
{
#if RELOC_SUPPORT
if (!compiler->opts.compReloc)
#endif // RELOC_SUPPORT
{
- size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
+ size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if ((imm == 0) && !EA_IS_RELOC(size))
@@ -3884,7 +3880,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
{
#if defined(_TARGET_XARCH_)
getEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
-#elif defined (_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
if (EA_IS_RELOC(size))
{
@@ -3895,7 +3891,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
{
getEmitter()->emitIns_R_I(INS_mov, size, reg, imm, flags);
}
- else // We have to use a movw/movt pair of instructions
+ else // We have to use a movw/movt pair of instructions
{
ssize_t imm_lo16 = (imm & 0xffff);
ssize_t imm_hi16 = (imm >> 16) & 0xffff;
@@ -3922,8 +3918,8 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
if (flags == INS_FLAGS_SET)
getEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
}
-#elif defined (_TARGET_ARM64_)
- NYI_ARM64("instGen_Set_Reg_To_Imm");
+#elif defined(_TARGET_ARM64_)
+ NYI_ARM64("instGen_Set_Reg_To_Imm");
#else
#error "Unknown _TARGET_"
#endif
@@ -3934,11 +3930,10 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
/*****************************************************************************
*
- * Machine independent way to set the flags based on
+ * Machine independent way to set the flags based on
* comparing a register with zero
*/
-void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size,
- regNumber reg)
+void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size, regNumber reg)
{
#if defined(_TARGET_XARCH_)
getEmitter()->emitIns_R_R(INS_test, size, reg, reg);
@@ -3949,15 +3944,12 @@ void CodeGen::instGen_Compare_Reg_To_Zero(emitAttr size,
#endif
}
-
/*****************************************************************************
*
- * Machine independent way to set the flags based upon
+ * Machine independent way to set the flags based upon
* comparing a register with another register
*/
-void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size,
- regNumber reg1,
- regNumber reg2)
+void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size, regNumber reg1, regNumber reg2)
{
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
getEmitter()->emitIns_R_R(INS_cmp, size, reg1, reg2);
@@ -3971,9 +3963,7 @@ void CodeGen::instGen_Compare_Reg_To_Reg(emitAttr size,
* Machine independent way to set the flags based upon
* comparing a register with an immediate
*/
-void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size,
- regNumber reg,
- ssize_t imm)
+void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm)
{
if (imm == 0)
{
@@ -3987,7 +3977,7 @@ void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size,
{
#ifndef LEGACY_BACKEND
assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
// Load imm into a register
regNumber immReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
instGen_Set_Reg_To_Imm(size, immReg, (ssize_t)imm);
@@ -3999,31 +3989,31 @@ void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size,
{
getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
}
-#elif defined (_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
if (arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
{
getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
}
- else // We need a scratch register
+ else // We need a scratch register
{
#ifndef LEGACY_BACKEND
assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
// Load imm into a register
regNumber immReg = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
instGen_Set_Reg_To_Imm(size, immReg, (ssize_t)imm);
getEmitter()->emitIns_R_R(INS_cmp, size, reg, immReg);
#endif // !LEGACY_BACKEND
}
-#elif defined (_TARGET_ARM64_)
- if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
- {
- getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
- }
- else // We need a scratch register
- {
- assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
- }
+#elif defined(_TARGET_ARM64_)
+ if (true) // TODO-ARM64-NYI: arm_Valid_Imm_For_Alu(imm) || arm_Valid_Imm_For_Alu(-imm))
+ {
+ getEmitter()->emitIns_R_I(INS_cmp, size, reg, imm);
+ }
+ else // We need a scratch register
+ {
+ assert(!"Invalid immediate for instGen_Compare_Reg_To_Imm");
+ }
#else
#error "Unknown _TARGET_"
#endif
@@ -4034,13 +4024,10 @@ void CodeGen::instGen_Compare_Reg_To_Imm(emitAttr size,
*
* Machine independent way to move a stack based local variable into a register
*/
-void CodeGen::instGen_Load_Reg_From_Lcl(var_types srcType,
- regNumber dstReg,
- int varNum,
- int offs)
+void CodeGen::instGen_Load_Reg_From_Lcl(var_types srcType, regNumber dstReg, int varNum, int offs)
{
emitAttr size = emitTypeSize(srcType);
-
+
getEmitter()->emitIns_R_S(ins_Load(srcType), size, dstReg, varNum, offs);
}
@@ -4048,10 +4035,7 @@ void CodeGen::instGen_Load_Reg_From_Lcl(var_types srcType,
*
* Machine independent way to move a register into a stack based local variable
*/
-void CodeGen::instGen_Store_Reg_Into_Lcl(var_types dstType,
- regNumber srcReg,
- int varNum,
- int offs)
+void CodeGen::instGen_Store_Reg_Into_Lcl(var_types dstType, regNumber srcReg, int varNum, int offs)
{
emitAttr size = emitTypeSize(dstType);
@@ -4062,17 +4046,12 @@ void CodeGen::instGen_Store_Reg_Into_Lcl(var_types dstType,
*
* Machine independent way to move an immediate into a stack based local variable
*/
-void CodeGen::instGen_Store_Imm_Into_Lcl(var_types dstType,
- emitAttr sizeAttr,
- ssize_t imm,
- int varNum,
- int offs,
- regNumber regToUse)
-{
+void CodeGen::instGen_Store_Imm_Into_Lcl(
+ var_types dstType, emitAttr sizeAttr, ssize_t imm, int varNum, int offs, regNumber regToUse)
+{
#ifdef _TARGET_XARCH_
#ifdef _TARGET_AMD64_
- if ((EA_SIZE(sizeAttr) == EA_8BYTE) &&
- (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(sizeAttr)))
+ if ((EA_SIZE(sizeAttr) == EA_8BYTE) && (((int)imm != (ssize_t)imm) || EA_IS_CNS_RELOC(sizeAttr)))
{
assert(!"Invalid immediate for instGen_Store_Imm_Into_Lcl");
}
@@ -4088,8 +4067,8 @@ void CodeGen::instGen_Store_Imm_Into_Lcl(var_types dstType,
#ifndef LEGACY_BACKEND
regNumber immReg = regToUse;
assert(regToUse != REG_NA);
-#else // LEGACY_BACKEND
- regNumber immReg = (regToUse == REG_NA)? regSet.rsGrabReg(RBM_ALLINT) : regToUse;
+#else // LEGACY_BACKEND
+ regNumber immReg = (regToUse == REG_NA) ? regSet.rsGrabReg(RBM_ALLINT) : regToUse;
#endif // LEGACY_BACKEND
instGen_Set_Reg_To_Imm(sizeAttr, immReg, (ssize_t)imm);
instGen_Store_Reg_Into_Lcl(dstType, immReg, varNum, offs);
@@ -4097,7 +4076,7 @@ void CodeGen::instGen_Store_Imm_Into_Lcl(var_types dstType,
{
regTracker.rsTrackRegTrash(immReg);
}
-#else // _TARGET_*
+#else // _TARGET_*
#error "Unknown _TARGET_"
#endif // _TARGET_*
}
diff --git a/src/jit/instr.h b/src/jit/instr.h
index 26245a0a69..c38f8d2073 100644
--- a/src/jit/instr.h
+++ b/src/jit/instr.h
@@ -7,7 +7,7 @@
#define _INSTR_H_
/*****************************************************************************/
-#define BAD_CODE 0x0BADC0DE // better not match a real encoding!
+#define BAD_CODE 0x0BADC0DE // better not match a real encoding!
/*****************************************************************************/
@@ -297,5 +297,5 @@ enum InstructionSet
// clang-format on
/*****************************************************************************/
-#endif//_INSTR_H_
+#endif //_INSTR_H_
/*****************************************************************************/
diff --git a/src/jit/instrs.h b/src/jit/instrs.h
index 8063f3b6a4..2f5c14fc6f 100644
--- a/src/jit/instrs.h
+++ b/src/jit/instrs.h
@@ -9,5 +9,5 @@
#elif defined(_TARGET_ARM64_)
#include "instrsarm64.h"
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/jit/instrsarm.h b/src/jit/instrsarm.h
index 324c281761..d1a77f8ebb 100644
--- a/src/jit/instrsarm.h
+++ b/src/jit/instrsarm.h
@@ -23,36 +23,36 @@
******************************************************************************/
#if !defined(_TARGET_ARM_)
- #error Unexpected target type
+#error Unexpected target type
#endif
#ifndef INST1
-#error INST1 must be defined before including this file.
+#error INST1 must be defined before including this file.
#endif
#ifndef INST2
-#error INST2 must be defined before including this file.
+#error INST2 must be defined before including this file.
#endif
#ifndef INST3
-#error INST3 must be defined before including this file.
+#error INST3 must be defined before including this file.
#endif
#ifndef INST4
-#error INST4 must be defined before including this file.
+#error INST4 must be defined before including this file.
#endif
#ifndef INST5
-#error INST5 must be defined before including this file.
+#error INST5 must be defined before including this file.
#endif
#ifndef INST6
-#error INST6 must be defined before including this file.
+#error INST6 must be defined before including this file.
#endif
// No INST7
// #ifndef INST7
// #error INST7 must be defined before including this file.
// #endif
#ifndef INST8
-#error INST8 must be defined before including this file.
+#error INST8 must be defined before including this file.
#endif
#ifndef INST9
-#error INST9 must be defined before including this file.
+#error INST9 must be defined before including this file.
#endif
/*****************************************************************************/
@@ -545,13 +545,13 @@ INST1(vmov_f2i, "vmov.f2i", 1, 0, IF_T2_VMOVS, 0xEE100A10) // A8.6.330 VMOV
// clang-format on
/*****************************************************************************/
-#undef INST1
-#undef INST2
-#undef INST3
-#undef INST4
-#undef INST5
-#undef INST6
-#undef INST7
-#undef INST8
-#undef INST9
+#undef INST1
+#undef INST2
+#undef INST3
+#undef INST4
+#undef INST5
+#undef INST6
+#undef INST7
+#undef INST8
+#undef INST9
/*****************************************************************************/
diff --git a/src/jit/instrsarm64.h b/src/jit/instrsarm64.h
index 3e1d00417c..e91aaa6836 100644
--- a/src/jit/instrsarm64.h
+++ b/src/jit/instrsarm64.h
@@ -19,29 +19,29 @@
******************************************************************************/
#if !defined(_TARGET_ARM64_)
- #error Unexpected target type
+#error Unexpected target type
#endif
#ifndef INST1
-#error INST1 must be defined before including this file.
+#error INST1 must be defined before including this file.
#endif
#ifndef INST2
-#error INST2 must be defined before including this file.
+#error INST2 must be defined before including this file.
#endif
#ifndef INST3
-#error INST3 must be defined before including this file.
+#error INST3 must be defined before including this file.
#endif
#ifndef INST4
-#error INST4 must be defined before including this file.
+#error INST4 must be defined before including this file.
#endif
#ifndef INST5
-#error INST5 must be defined before including this file.
+#error INST5 must be defined before including this file.
#endif
#ifndef INST6
-#error INST6 must be defined before including this file.
+#error INST6 must be defined before including this file.
#endif
#ifndef INST9
-#error INST9 must be defined before including this file.
+#error INST9 must be defined before including this file.
#endif
/*****************************************************************************/
@@ -944,11 +944,11 @@ INST1(uxtl2, "uxtl2", 0, 0, IF_DV_2O, 0x6F00A400)
// clang-format on
/*****************************************************************************/
-#undef INST1
-#undef INST2
-#undef INST3
-#undef INST4
-#undef INST5
-#undef INST6
-#undef INST9
+#undef INST1
+#undef INST2
+#undef INST3
+#undef INST4
+#undef INST5
+#undef INST6
+#undef INST9
/*****************************************************************************/
diff --git a/src/jit/jit.h b/src/jit/jit.h
index 36cf690c9d..205ccf03a6 100644
--- a/src/jit/jit.h
+++ b/src/jit/jit.h
@@ -11,10 +11,10 @@
// clr.sln only defines _DEBUG
// The jit uses DEBUG rather than _DEBUG
// So we make sure that _DEBUG implies DEBUG
-//
+//
#ifdef _DEBUG
-#ifndef DEBUG
-#define DEBUG 1
+#ifndef DEBUG
+#define DEBUG 1
#endif
#endif
@@ -28,162 +28,161 @@
#ifdef _MSC_VER
// These don't seem useful, so turning them off is no big deal
-#pragma warning(disable:4510) // can't generate default constructor
-#pragma warning(disable:4511) // can't generate copy constructor
-#pragma warning(disable:4512) // can't generate assignment constructor
-#pragma warning(disable:4610) // user defined constructor required
-#pragma warning(disable:4211) // nonstandard extention used (char name[0] in structs)
-#pragma warning(disable:4127) // conditional expression constant
-#pragma warning(disable:4201) // "nonstandard extension used : nameless struct/union"
+#pragma warning(disable : 4510) // can't generate default constructor
+#pragma warning(disable : 4511) // can't generate copy constructor
+#pragma warning(disable : 4512) // can't generate assignment constructor
+#pragma warning(disable : 4610) // user defined constructor required
+#pragma warning(disable : 4211) // nonstandard extention used (char name[0] in structs)
+#pragma warning(disable : 4127) // conditional expression constant
+#pragma warning(disable : 4201) // "nonstandard extension used : nameless struct/union"
// Depending on the code base, you may want to not disable these
-#pragma warning(disable:4245) // assigning signed / unsigned
-#pragma warning(disable:4146) // unary minus applied to unsigned
+#pragma warning(disable : 4245) // assigning signed / unsigned
+#pragma warning(disable : 4146) // unary minus applied to unsigned
-#pragma warning(disable:4100) // unreferenced formal parameter
-#pragma warning(disable:4291) // new operator without delete (only in emitX86.cpp)
+#pragma warning(disable : 4100) // unreferenced formal parameter
+#pragma warning(disable : 4291) // new operator without delete (only in emitX86.cpp)
#endif
#ifdef _MSC_VER
-#define CHECK_STRUCT_PADDING 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after
- // construct 'member_name'" on interesting structs/classes
+#define CHECK_STRUCT_PADDING 0 // Set this to '1' to enable warning C4820 "'bytes' bytes padding added after
+ // construct 'member_name'" on interesting structs/classes
#else
-#define CHECK_STRUCT_PADDING 0 // Never enable it for non-MSFT compilers
+#define CHECK_STRUCT_PADDING 0 // Never enable it for non-MSFT compilers
#endif
#if defined(_X86_)
- #if defined(_ARM_)
- #error Cannot define both _X86_ and _ARM_
- #endif
- #if defined(_AMD64_)
- #error Cannot define both _X86_ and _AMD64_
- #endif
- #if defined(_ARM64_)
- #error Cannot define both _X86_ and _ARM64_
- #endif
- #define _HOST_X86_
+#if defined(_ARM_)
+#error Cannot define both _X86_ and _ARM_
+#endif
+#if defined(_AMD64_)
+#error Cannot define both _X86_ and _AMD64_
+#endif
+#if defined(_ARM64_)
+#error Cannot define both _X86_ and _ARM64_
+#endif
+#define _HOST_X86_
#elif defined(_AMD64_)
- #if defined(_X86_)
- #error Cannot define both _AMD64_ and _X86_
- #endif
- #if defined(_ARM_)
- #error Cannot define both _AMD64_ and _ARM_
- #endif
- #if defined(_ARM64_)
- #error Cannot define both _AMD64_ and _ARM64_
- #endif
- #define _HOST_AMD64_
+#if defined(_X86_)
+#error Cannot define both _AMD64_ and _X86_
+#endif
+#if defined(_ARM_)
+#error Cannot define both _AMD64_ and _ARM_
+#endif
+#if defined(_ARM64_)
+#error Cannot define both _AMD64_ and _ARM64_
+#endif
+#define _HOST_AMD64_
#elif defined(_ARM_)
- #if defined(_X86_)
- #error Cannot define both _ARM_ and _X86_
- #endif
- #if defined(_AMD64_)
- #error Cannot define both _ARM_ and _AMD64_
- #endif
- #if defined(_ARM64_)
- #error Cannot define both _ARM_ and _ARM64_
- #endif
- #define _HOST_ARM_
+#if defined(_X86_)
+#error Cannot define both _ARM_ and _X86_
+#endif
+#if defined(_AMD64_)
+#error Cannot define both _ARM_ and _AMD64_
+#endif
+#if defined(_ARM64_)
+#error Cannot define both _ARM_ and _ARM64_
+#endif
+#define _HOST_ARM_
#elif defined(_ARM64_)
- #if defined(_X86_)
- #error Cannot define both _ARM64_ and _X86_
- #endif
- #if defined(_AMD64_)
- #error Cannot define both _ARM64_ and _AMD64_
- #endif
- #if defined(_ARM_)
- #error Cannot define both _ARM64_ and _ARM_
- #endif
- #define _HOST_ARM64_
+#if defined(_X86_)
+#error Cannot define both _ARM64_ and _X86_
+#endif
+#if defined(_AMD64_)
+#error Cannot define both _ARM64_ and _AMD64_
+#endif
+#if defined(_ARM_)
+#error Cannot define both _ARM64_ and _ARM_
+#endif
+#define _HOST_ARM64_
#else
- #error Unsupported or unset host architecture
+#error Unsupported or unset host architecture
#endif
#if defined(_HOST_AMD64_) || defined(_HOST_ARM64_)
- #define _HOST_64BIT_
+#define _HOST_64BIT_
#endif
#if defined(_TARGET_X86_)
- #if defined(_TARGET_ARM_)
- #error Cannot define both _TARGET_X86_ and _TARGET_ARM_
- #endif
- #if defined(_TARGET_AMD64_)
- #error Cannot define both _TARGET_X86_ and _TARGET_AMD64_
- #endif
- #if defined(_TARGET_ARM64_)
- #error Cannot define both _TARGET_X86_ and _TARGET_ARM64_
- #endif
- #if !defined(_HOST_X86_)
- #define _CROSS_COMPILER_
- #endif
+#if defined(_TARGET_ARM_)
+#error Cannot define both _TARGET_X86_ and _TARGET_ARM_
+#endif
+#if defined(_TARGET_AMD64_)
+#error Cannot define both _TARGET_X86_ and _TARGET_AMD64_
+#endif
+#if defined(_TARGET_ARM64_)
+#error Cannot define both _TARGET_X86_ and _TARGET_ARM64_
+#endif
+#if !defined(_HOST_X86_)
+#define _CROSS_COMPILER_
+#endif
#elif defined(_TARGET_AMD64_)
- #if defined(_TARGET_X86_)
- #error Cannot define both _TARGET_AMD64_ and _TARGET_X86_
- #endif
- #if defined(_TARGET_ARM_)
- #error Cannot define both _TARGET_AMD64_ and _TARGET_ARM_
- #endif
- #if defined(_TARGET_ARM64_)
- #error Cannot define both _TARGET_AMD64_ and _TARGET_ARM64_
- #endif
- #if !defined(_HOST_AMD64_)
- #define _CROSS_COMPILER_
- #endif
+#if defined(_TARGET_X86_)
+#error Cannot define both _TARGET_AMD64_ and _TARGET_X86_
+#endif
+#if defined(_TARGET_ARM_)
+#error Cannot define both _TARGET_AMD64_ and _TARGET_ARM_
+#endif
+#if defined(_TARGET_ARM64_)
+#error Cannot define both _TARGET_AMD64_ and _TARGET_ARM64_
+#endif
+#if !defined(_HOST_AMD64_)
+#define _CROSS_COMPILER_
+#endif
#elif defined(_TARGET_ARM_)
- #if defined(_TARGET_X86_)
- #error Cannot define both _TARGET_ARM_ and _TARGET_X86_
- #endif
- #if defined(_TARGET_AMD64_)
- #error Cannot define both _TARGET_ARM_ and _TARGET_AMD64_
- #endif
- #if defined(_TARGET_ARM64_)
- #error Cannot define both _TARGET_ARM_ and _TARGET_ARM64_
- #endif
- #if !defined(_HOST_ARM_)
- #define _CROSS_COMPILER_
- #endif
+#if defined(_TARGET_X86_)
+#error Cannot define both _TARGET_ARM_ and _TARGET_X86_
+#endif
+#if defined(_TARGET_AMD64_)
+#error Cannot define both _TARGET_ARM_ and _TARGET_AMD64_
+#endif
+#if defined(_TARGET_ARM64_)
+#error Cannot define both _TARGET_ARM_ and _TARGET_ARM64_
+#endif
+#if !defined(_HOST_ARM_)
+#define _CROSS_COMPILER_
+#endif
#elif defined(_TARGET_ARM64_)
- #if defined(_TARGET_X86_)
- #error Cannot define both _TARGET_ARM64_ and _TARGET_X86_
- #endif
- #if defined(_TARGET_AMD64_)
- #error Cannot define both _TARGET_ARM64_ and _TARGET_AMD64_
- #endif
- #if defined(_TARGET_ARM_)
- #error Cannot define both _TARGET_ARM64_ and _TARGET_ARM_
- #endif
- #if !defined(_HOST_ARM64_)
- #define _CROSS_COMPILER_
- #endif
+#if defined(_TARGET_X86_)
+#error Cannot define both _TARGET_ARM64_ and _TARGET_X86_
+#endif
+#if defined(_TARGET_AMD64_)
+#error Cannot define both _TARGET_ARM64_ and _TARGET_AMD64_
+#endif
+#if defined(_TARGET_ARM_)
+#error Cannot define both _TARGET_ARM64_ and _TARGET_ARM_
+#endif
+#if !defined(_HOST_ARM64_)
+#define _CROSS_COMPILER_
+#endif
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
- #define _TARGET_64BIT_
+#define _TARGET_64BIT_
#endif
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- #define _TARGET_XARCH_
+#define _TARGET_XARCH_
#endif
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
- #define _TARGET_ARMARCH_
+#define _TARGET_ARMARCH_
#endif
-
// --------------------------------------------------------------------------------
// IMAGE_FILE_MACHINE_TARGET
// --------------------------------------------------------------------------------
#if defined(_TARGET_X86_)
-#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_I386
+#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_I386
#elif defined(_TARGET_AMD64_)
-#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_AMD64
+#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_AMD64
#elif defined(_TARGET_ARM_)
-#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARMNT
+#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARMNT
#elif defined(_TARGET_ARM64_)
-#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARM64 // 0xAA64
+#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARM64 // 0xAA64
#else
#error Unsupported or unset target architecture
#endif
@@ -204,84 +203,85 @@
// VC++ understands the syntax to declare these directly, e.g., "enum FooEnum : BYTE",
// but GCC does not, so we use typedefs.
-#define DECLARE_TYPED_ENUM(tag,baseType) \
- enum tag : baseType
+#define DECLARE_TYPED_ENUM(tag, baseType) enum tag : baseType
-#define END_DECLARE_TYPED_ENUM(tag,baseType) \
- ;
+#define END_DECLARE_TYPED_ENUM(tag, baseType) ;
#include "corhdr.h"
#include "corjit.h"
-#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these
-#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake
- // with our new(compiler*) pattern.
+#define __OPERATOR_NEW_INLINE 1 // indicate that I will define these
+#define __PLACEMENT_NEW_INLINE // don't bring in the global placement new, it is easy to make a mistake
+ // with our new(compiler*) pattern.
#if COR_JIT_EE_VER > 460
-#define NO_CLRCONFIG // Don't bring in the usual CLRConfig infrastructure, since the JIT uses the JIT/EE
- // interface to retrieve config values.
+#define NO_CLRCONFIG // Don't bring in the usual CLRConfig infrastructure, since the JIT uses the JIT/EE
+ // interface to retrieve config values.
// This is needed for contract.inl when FEATURE_STACK_PROBE is enabled.
struct CLRConfig
{
- static struct ConfigKey { } EXTERNAL_NO_SO_NOT_MAINLINE;
- static DWORD GetConfigValue(const ConfigKey& key) { return 0; }
+ static struct ConfigKey
+ {
+ } EXTERNAL_NO_SO_NOT_MAINLINE;
+ static DWORD GetConfigValue(const ConfigKey& key)
+ {
+ return 0;
+ }
};
#endif
-#include "utilcode.h" // this defines assert as _ASSERTE
-#include "host.h" // this redefines assert for the JIT to use assertAbort
+#include "utilcode.h" // this defines assert as _ASSERTE
+#include "host.h" // this redefines assert for the JIT to use assertAbort
#include "utils.h"
#ifdef DEBUG
-#define INDEBUG(x) x
-#define INDEBUG_COMMA(x) x,
-#define DEBUGARG(x) , x
-#else
+#define INDEBUG(x) x
+#define INDEBUG_COMMA(x) x,
+#define DEBUGARG(x) , x
+#else
#define INDEBUG(x)
#define INDEBUG_COMMA(x)
#define DEBUGARG(x)
#endif
#if defined(DEBUG) || defined(LATE_DISASM)
-#define INDEBUG_LDISASM_COMMA(x) x,
-#else
+#define INDEBUG_LDISASM_COMMA(x) x,
+#else
#define INDEBUG_LDISASM_COMMA(x)
#endif
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x) , x
-#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x) x
+#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x) , x
+#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x) x
#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(x)
#define FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(x)
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
#if defined(UNIX_AMD64_ABI)
-#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
-#define UNIX_AMD64_ABI_ONLY(x) x
+#define UNIX_AMD64_ABI_ONLY_ARG(x) , x
+#define UNIX_AMD64_ABI_ONLY(x) x
#else // !defined(UNIX_AMD64_ABI)
#define UNIX_AMD64_ABI_ONLY_ARG(x)
#define UNIX_AMD64_ABI_ONLY(x)
#endif // defined(UNIX_AMD64_ABI)
#if defined(UNIX_AMD64_ABI) || defined(_TARGET_ARM64_)
-#define MULTIREG_HAS_SECOND_GC_RET 1
-#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x
-#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x
+#define MULTIREG_HAS_SECOND_GC_RET 1
+#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x
+#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x
#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#define MULTIREG_HAS_SECOND_GC_RET 0
+#define MULTIREG_HAS_SECOND_GC_RET 0
#define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x)
#define MULTIREG_HAS_SECOND_GC_RET_ONLY(x)
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-
-
// To get rid of warning 4701 : local variable may be used without being initialized
-#define DUMMY_INIT(x) (x)
+#define DUMMY_INIT(x) (x)
#define REGEN_SHORTCUTS 0
-#define REGEN_CALLPAT 0
+#define REGEN_CALLPAT 0
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -298,21 +298,24 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if defined(DEBUG)
#include "log.h"
-#define INFO6 LL_INFO10000 // Did Jit or Inline succeeded?
-#define INFO7 LL_INFO100000 // NYI stuff
-#define INFO8 LL_INFO1000000 // Weird failures
-#define INFO9 LL_EVERYTHING // Info about incoming settings
-#define INFO10 LL_EVERYTHING // Totally verbose
+#define INFO6 LL_INFO10000 // Did Jit or Inline succeeded?
+#define INFO7 LL_INFO100000 // NYI stuff
+#define INFO8 LL_INFO1000000 // Weird failures
+#define INFO9 LL_EVERYTHING // Info about incoming settings
+#define INFO10 LL_EVERYTHING // Totally verbose
#endif // DEBUG
-typedef class ICorJitInfo* COMP_HANDLE;
+typedef class ICorJitInfo* COMP_HANDLE;
-const CORINFO_CLASS_HANDLE NO_CLASS_HANDLE = (CORINFO_CLASS_HANDLE) 0;
+const CORINFO_CLASS_HANDLE NO_CLASS_HANDLE = (CORINFO_CLASS_HANDLE) nullptr;
/*****************************************************************************/
-inline bool False() { return false; } // Use to disable code while keeping prefast happy
+inline bool False()
+{
+ return false;
+} // Use to disable code while keeping prefast happy
// We define two IL offset types, as follows:
//
@@ -335,38 +338,38 @@ inline bool False() { return false; } // Use to disable code while keeping prefa
// Blocks and statements never store one of the ICorDebugInfo values, even for IL_OFFSETX types. These are
// only stored in the IPmappingDsc struct, ipmdILoffsx field.
-typedef unsigned IL_OFFSET;
+typedef unsigned IL_OFFSET;
-const IL_OFFSET BAD_IL_OFFSET = 0x80000000;
-const IL_OFFSET MAX_IL_OFFSET = 0x3fffffff;
+const IL_OFFSET BAD_IL_OFFSET = 0x80000000;
+const IL_OFFSET MAX_IL_OFFSET = 0x3fffffff;
-typedef unsigned IL_OFFSETX; // IL_OFFSET with stack-empty or call-instruction bit
-const IL_OFFSETX IL_OFFSETX_STKBIT = 0x80000000; // Note: this bit is set when the stack is NOT empty!
-const IL_OFFSETX IL_OFFSETX_CALLINSTRUCTIONBIT = 0x40000000; // Set when the IL offset is for a call instruction.
-const IL_OFFSETX IL_OFFSETX_BITS = IL_OFFSETX_STKBIT | IL_OFFSETX_CALLINSTRUCTIONBIT;
+typedef unsigned IL_OFFSETX; // IL_OFFSET with stack-empty or call-instruction bit
+const IL_OFFSETX IL_OFFSETX_STKBIT = 0x80000000; // Note: this bit is set when the stack is NOT empty!
+const IL_OFFSETX IL_OFFSETX_CALLINSTRUCTIONBIT = 0x40000000; // Set when the IL offset is for a call instruction.
+const IL_OFFSETX IL_OFFSETX_BITS = IL_OFFSETX_STKBIT | IL_OFFSETX_CALLINSTRUCTIONBIT;
-IL_OFFSET jitGetILoffs (IL_OFFSETX offsx);
-IL_OFFSET jitGetILoffsAny (IL_OFFSETX offsx);
-bool jitIsStackEmpty (IL_OFFSETX offsx);
-bool jitIsCallInstruction(IL_OFFSETX offsx);
+IL_OFFSET jitGetILoffs(IL_OFFSETX offsx);
+IL_OFFSET jitGetILoffsAny(IL_OFFSETX offsx);
+bool jitIsStackEmpty(IL_OFFSETX offsx);
+bool jitIsCallInstruction(IL_OFFSETX offsx);
-const unsigned BAD_VAR_NUM = UINT_MAX;
+const unsigned BAD_VAR_NUM = UINT_MAX;
// Code can't be more than 2^31 in any direction. This is signed, so it should be used for anything that is
// relative to something else.
-typedef int NATIVE_OFFSET;
+typedef int NATIVE_OFFSET;
// This is the same as the above, but it's used in absolute contexts (i.e. offset from the start). Also,
// this is used for native code sizes.
-typedef unsigned UNATIVE_OFFSET;
+typedef unsigned UNATIVE_OFFSET;
-typedef ptrdiff_t ssize_t;
+typedef ptrdiff_t ssize_t;
// For the following specially handled FIELD_HANDLES we need
// values that are negative and have the low two bits zero
-// See eeFindJitDataOffs and eeGetJitDataOffs in Compiler.hpp
-#define FLD_GLOBAL_DS ((CORINFO_FIELD_HANDLE) -4 )
-#define FLD_GLOBAL_FS ((CORINFO_FIELD_HANDLE) -8 )
+// See eeFindJitDataOffs and eeGetJitDataOffs in Compiler.hpp
+#define FLD_GLOBAL_DS ((CORINFO_FIELD_HANDLE)-4)
+#define FLD_GLOBAL_FS ((CORINFO_FIELD_HANDLE)-8)
/*****************************************************************************/
@@ -377,10 +380,10 @@ typedef ptrdiff_t ssize_t;
// Debugging support is ON by default. Can be turned OFF by
// adding /DDEBUGGING_SUPPORT=0 on the command line.
-#ifndef DEBUGGING_SUPPORT
-# define DEBUGGING_SUPPORT
-#elif !DEBUGGING_SUPPORT
-# undef DEBUGGING_SUPPORT
+#ifndef DEBUGGING_SUPPORT
+#define DEBUGGING_SUPPORT
+#elif !DEBUGGING_SUPPORT
+#undef DEBUGGING_SUPPORT
#endif
/*****************************************************************************/
@@ -390,89 +393,88 @@ typedef ptrdiff_t ssize_t;
// Always OFF in the non-debug version
#if defined(LATE_DISASM) && (LATE_DISASM == 0)
-#undef LATE_DISASM
+#undef LATE_DISASM
#endif
/*****************************************************************************/
-
/*****************************************************************************/
-#define FEATURE_VALNUM_CSE 1 // enable the Value Number CSE optimization logic
+#define FEATURE_VALNUM_CSE 1 // enable the Value Number CSE optimization logic
// true if Value Number CSE is enabled
-#define FEATURE_ANYCSE FEATURE_VALNUM_CSE
+#define FEATURE_ANYCSE FEATURE_VALNUM_CSE
-#define CSE_INTO_HANDLERS 0
+#define CSE_INTO_HANDLERS 0
-#define CAN_DISABLE_DFA 1 // disable data flow for minopts
+#define CAN_DISABLE_DFA 1 // disable data flow for minopts
-#define LARGE_EXPSET 1 // Track 64 or 32 assertions/copies/consts/rangechecks
-#define ASSERTION_PROP 1 // Enable value/assertion propagation
+#define LARGE_EXPSET 1 // Track 64 or 32 assertions/copies/consts/rangechecks
+#define ASSERTION_PROP 1 // Enable value/assertion propagation
-#define LOCAL_ASSERTION_PROP ASSERTION_PROP // Enable local assertion propagation
+#define LOCAL_ASSERTION_PROP ASSERTION_PROP // Enable local assertion propagation
//=============================================================================
-#define FANCY_ARRAY_OPT 0 // optimize more complex index checks
+#define FANCY_ARRAY_OPT 0 // optimize more complex index checks
//=============================================================================
-#define LONG_ASG_OPS 0 // implementation isn't complete yet
+#define LONG_ASG_OPS 0 // implementation isn't complete yet
//=============================================================================
-#define OPT_MULT_ADDSUB 1 // optimize consecutive "lclVar += or -= icon"
-#define OPT_BOOL_OPS 1 // optimize boolean operations
+#define OPT_MULT_ADDSUB 1 // optimize consecutive "lclVar += or -= icon"
+#define OPT_BOOL_OPS 1 // optimize boolean operations
//=============================================================================
-#define REDUNDANT_LOAD 1 // track locals in regs, suppress loads
-#define STACK_PROBES 0 // Support for stack probes
-#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files
+#define REDUNDANT_LOAD 1 // track locals in regs, suppress loads
+#define STACK_PROBES 0 // Support for stack probes
+#define DUMP_FLOWGRAPHS DEBUG // Support for creating Xml Flowgraph reports in *.fgx files
-#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 1 // if 1 we must have all handler entry points in the Hot code section
+#define HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION 1 // if 1 we must have all handler entry points in the Hot code section
/*****************************************************************************/
-#define VPTR_OFFS 0 // offset of vtable pointer from obj ptr
+#define VPTR_OFFS 0 // offset of vtable pointer from obj ptr
/*****************************************************************************/
-#define DUMP_GC_TABLES DEBUG
-#define VERIFY_GC_TABLES 0
-#define REARRANGE_ADDS 1
+#define DUMP_GC_TABLES DEBUG
+#define VERIFY_GC_TABLES 0
+#define REARRANGE_ADDS 1
-#define FUNC_INFO_LOGGING 1 // Support dumping function info to a file. In retail, only NYIs, with no function name,
- // are dumped.
+#define FUNC_INFO_LOGGING 1 // Support dumping function info to a file. In retail, only NYIs, with no function name,
+ // are dumped.
/*****************************************************************************/
/*****************************************************************************/
/* Set these to 1 to collect and output various statistics about the JIT */
-#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments.
-#define COUNT_BASIC_BLOCKS 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple
- // case of single block methods.
-#define COUNT_LOOPS 0 // Collect stats about loops, such as the total number of natural loops, a histogram of
- // the number of loop exits, etc.
-#define COUNT_RANGECHECKS 0 // Count range checks removed (in lexical CSE?).
-#define DATAFLOW_ITER 0 // Count iterations in lexical CSE and constant folding dataflow.
-#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes.
-#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and flowList node sizes and memory allocations.
-#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts.
-#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations.
-#define MEASURE_PTRTAB_SIZE 0 // Collect stats about GC pointer table allocations.
-#define EMITTER_STATS 0 // Collect stats on the emitter.
-
-#define VERBOSE_SIZES 0 // Always display GC info sizes. If set, DISPLAY_SIZES must also be set.
-#define VERBOSE_VERIFY 0 // Dump additional information when verifying code. Useful to debug verification bugs.
+#define CALL_ARG_STATS 0 // Collect stats about calls and call arguments.
+#define COUNT_BASIC_BLOCKS 0 // Create a histogram of basic block sizes, and a histogram of IL sizes in the simple
+ // case of single block methods.
+#define COUNT_LOOPS 0 // Collect stats about loops, such as the total number of natural loops, a histogram of
+ // the number of loop exits, etc.
+#define COUNT_RANGECHECKS 0 // Count range checks removed (in lexical CSE?).
+#define DATAFLOW_ITER 0 // Count iterations in lexical CSE and constant folding dataflow.
+#define DISPLAY_SIZES 0 // Display generated code, data, and GC information sizes.
+#define MEASURE_BLOCK_SIZE 0 // Collect stats about basic block and flowList node sizes and memory allocations.
+#define MEASURE_FATAL 0 // Count the number of calls to fatal(), including NYIs and noway_asserts.
+#define MEASURE_NODE_SIZE 0 // Collect stats about GenTree node allocations.
+#define MEASURE_PTRTAB_SIZE 0 // Collect stats about GC pointer table allocations.
+#define EMITTER_STATS 0 // Collect stats on the emitter.
+
+#define VERBOSE_SIZES 0 // Always display GC info sizes. If set, DISPLAY_SIZES must also be set.
+#define VERBOSE_VERIFY 0 // Dump additional information when verifying code. Useful to debug verification bugs.
#ifdef DEBUG
-#define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats.
-#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats.
+#define MEASURE_MEM_ALLOC 1 // Collect memory allocation stats.
+#define LOOP_HOIST_STATS 1 // Collect loop hoisting stats.
#else
-#define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well
-#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well
+#define MEASURE_MEM_ALLOC 0 // You can set this to 1 to get memory stats in retail, as well
+#define LOOP_HOIST_STATS 0 // You can set this to 1 to get loop hoist stats in retail, as well
#endif
/*****************************************************************************/
@@ -483,16 +485,16 @@ typedef ptrdiff_t ssize_t;
#endif
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************/
#define DUMPER
#else // !DEBUG
-#if DUMP_GC_TABLES
+#if DUMP_GC_TABLES
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
-const bool dspGCtbls = true;
+const bool dspGCtbls = true;
#endif
/*****************************************************************************/
@@ -500,12 +502,30 @@ const bool dspGCtbls = true;
#ifdef DEBUG
void JitDump(const char* pcFormat, ...);
-#define JITDUMP(...) { if (JitTls::GetCompiler()->verbose) JitDump(__VA_ARGS__); }
-#define JITLOG(x) { JitLogEE x; }
-#define JITLOG_THIS(t, x) { (t)->JitLogEE x; }
-#define DBEXEC(flg, expr) if (flg) {expr;}
-#define DISPNODE(t) if (JitTls::GetCompiler()->verbose) JitTls::GetCompiler()->gtDispTree(t, nullptr, nullptr, true);
-#define DISPTREE(x) if (JitTls::GetCompiler()->verbose) JitTls::GetCompiler()->gtDispTree(x)
+#define JITDUMP(...) \
+ { \
+ if (JitTls::GetCompiler()->verbose) \
+ JitDump(__VA_ARGS__); \
+ }
+#define JITLOG(x) \
+ { \
+ JitLogEE x; \
+ }
+#define JITLOG_THIS(t, x) \
+ { \
+ (t)->JitLogEE x; \
+ }
+#define DBEXEC(flg, expr) \
+ if (flg) \
+ { \
+ expr; \
+ }
+#define DISPNODE(t) \
+ if (JitTls::GetCompiler()->verbose) \
+ JitTls::GetCompiler()->gtDispTree(t, nullptr, nullptr, true);
+#define DISPTREE(x) \
+ if (JitTls::GetCompiler()->verbose) \
+ JitTls::GetCompiler()->gtDispTree(x)
#define VERBOSE JitTls::GetCompiler()->verbose
#else // !DEBUG
#define JITDUMP(...)
@@ -526,14 +546,14 @@ void JitDump(const char* pcFormat, ...);
*/
#ifdef _TARGET_X86_
-#define DOUBLE_ALIGN 1 // permit the double alignment of ESP in prolog,
- // and permit the double alignment of local offsets
+#define DOUBLE_ALIGN 1 // permit the double alignment of ESP in prolog,
+ // and permit the double alignment of local offsets
#else
-#define DOUBLE_ALIGN 0 // no special handling for double alignment
+#define DOUBLE_ALIGN 0 // no special handling for double alignment
#endif
/*****************************************************************************/
-#ifdef DEBUG
-extern void _cdecl debugStop(const char *why, ...);
+#ifdef DEBUG
+extern void _cdecl debugStop(const char* why, ...);
#endif
/*****************************************************************************/
@@ -541,23 +561,23 @@ extern void _cdecl debugStop(const char *why, ...);
struct JitOptions
{
- const char* methodName; // Method to display output for
- const char* className; // Class to display output for
+ const char* methodName; // Method to display output for
+ const char* className; // Class to display output for
- double CGknob; // Tweakable knob for testing
- unsigned testMask; // Tweakable mask for testing
+ double CGknob; // Tweakable knob for testing
+ unsigned testMask; // Tweakable mask for testing
- JitOptions * lastDummyField; // Ensures instantiation uses right order of arguments
+ JitOptions* lastDummyField; // Ensures instantiation uses right order of arguments
};
-extern JitOptions jitOpts;
+extern JitOptions jitOpts;
/*****************************************************************************
*
* Returns a word filled with the JITs allocator CHK fill value.
*
*/
-template<typename T>
+template <typename T>
inline T UninitializedWord()
{
__int64 word = 0x0101010101010101LL * (JitConfig.JitDefaultFill() & 0xFF);
@@ -570,7 +590,7 @@ inline T UninitializedWord()
*
*/
-template<typename T>
+template <typename T>
inline bool IsUninitialized(T data)
{
return data == UninitializedWord<T>();
@@ -590,81 +610,68 @@ enum accessLevel
/*****************************************************************************/
-#define castto(var,typ) (*(typ *)&var)
+#define castto(var, typ) (*(typ*)&var)
-#define sizeto(typ,mem) (offsetof(typ, mem) + sizeof(((typ*)0)->mem))
+#define sizeto(typ, mem) (offsetof(typ, mem) + sizeof(((typ*)0)->mem))
/*****************************************************************************/
-#ifdef NO_MISALIGNED_ACCESS
+#ifdef NO_MISALIGNED_ACCESS
-#define MISALIGNED_RD_I2(src) \
- (*castto(src , char *) | \
- *castto(src+1, char *) << 8)
+#define MISALIGNED_RD_I2(src) (*castto(src, char*) | *castto(src + 1, char*) << 8)
-#define MISALIGNED_RD_U2(src) \
- (*castto(src , char *) | \
- *castto(src+1, char *) << 8)
+#define MISALIGNED_RD_U2(src) (*castto(src, char*) | *castto(src + 1, char*) << 8)
-#define MISALIGNED_WR_I2(dst, val) \
- *castto(dst , char *) = val; \
- *castto(dst+1, char *) = val >> 8;
+#define MISALIGNED_WR_I2(dst, val) \
+ *castto(dst, char*) = val; \
+ *castto(dst + 1, char*) = val >> 8;
-#define MISALIGNED_WR_I4(dst, val) \
- *castto(dst , char *) = val; \
- *castto(dst+1, char *) = val >> 8; \
- *castto(dst+2, char *) = val >> 16; \
- *castto(dst+3, char *) = val >> 24;
+#define MISALIGNED_WR_I4(dst, val) \
+ *castto(dst, char*) = val; \
+ *castto(dst + 1, char*) = val >> 8; \
+ *castto(dst + 2, char*) = val >> 16; \
+ *castto(dst + 3, char*) = val >> 24;
#else
-#define MISALIGNED_RD_I2(src) \
- (*castto(src , short *))
-#define MISALIGNED_RD_U2(src) \
- (*castto(src , unsigned short *))
+#define MISALIGNED_RD_I2(src) (*castto(src, short*))
+#define MISALIGNED_RD_U2(src) (*castto(src, unsigned short*))
-#define MISALIGNED_WR_I2(dst, val) \
- *castto(dst , short *) = val;
-#define MISALIGNED_WR_I4(dst, val) \
- *castto(dst , int *) = val;
+#define MISALIGNED_WR_I2(dst, val) *castto(dst, short*) = val;
+#define MISALIGNED_WR_I4(dst, val) *castto(dst, int*) = val;
-#define MISALIGNED_WR_ST(dst, val) \
- *castto(dst , ssize_t *) = val;
+#define MISALIGNED_WR_ST(dst, val) *castto(dst, ssize_t*) = val;
#endif
/*****************************************************************************/
-inline
-size_t roundUp(size_t size, size_t mult = sizeof(size_t))
+inline size_t roundUp(size_t size, size_t mult = sizeof(size_t))
{
- assert(mult && ((mult & (mult-1)) == 0)); // power of two test
+ assert(mult && ((mult & (mult - 1)) == 0)); // power of two test
- return (size + (mult - 1)) & ~(mult - 1);
+ return (size + (mult - 1)) & ~(mult - 1);
}
-inline
-size_t roundDn(size_t size, size_t mult = sizeof(size_t))
+inline size_t roundDn(size_t size, size_t mult = sizeof(size_t))
{
- assert(mult && ((mult & (mult-1)) == 0)); // power of two test
+ assert(mult && ((mult & (mult - 1)) == 0)); // power of two test
- return (size ) & ~(mult - 1);
+ return (size) & ~(mult - 1);
}
-inline
-unsigned int unsigned_abs(int x)
+inline unsigned int unsigned_abs(int x)
{
- return ((unsigned int) abs(x));
+ return ((unsigned int)abs(x));
}
#ifdef _TARGET_64BIT_
-inline
-size_t unsigned_abs(ssize_t x)
+inline size_t unsigned_abs(ssize_t x)
{
#ifndef FEATURE_PAL
- return ((size_t) abs(x));
-#else // !FEATURE_PAL
- return ((size_t) labs(x));
+ return ((size_t)abs(x));
+#else // !FEATURE_PAL
+ return ((size_t)labs(x));
#endif // !FEATURE_PAL
}
#endif // _TARGET_64BIT_
@@ -685,29 +692,29 @@ public:
private:
void ensureAllocated();
- IAllocator* m_allocator;
- unsigned m_sizeCount;
+ IAllocator* m_allocator;
+ unsigned m_sizeCount;
const unsigned* const m_sizeTable;
- unsigned* m_counts;
+ unsigned* m_counts;
};
#endif // CALL_ARG_STATS || COUNT_BASIC_BLOCKS || COUNT_LOOPS || EMITTER_STATS || MEASURE_NODE_SIZE
/*****************************************************************************/
-#ifdef ICECAP
+#ifdef ICECAP
#include "icapexp.h"
#include "icapctrl.h"
#endif
/*****************************************************************************/
-#define SECURITY_CHECK 1
-#define VERIFY_IMPORTER 1
+#define SECURITY_CHECK 1
+#define VERIFY_IMPORTER 1
/*****************************************************************************/
#if !defined(RELOC_SUPPORT)
-#define RELOC_SUPPORT 1
+#define RELOC_SUPPORT 1
#endif
/*****************************************************************************/
@@ -718,8 +725,8 @@ private:
#if CHECK_STRUCT_PADDING
#pragma warning(push)
-#pragma warning(default:4820) // 'bytes' bytes padding added after construct 'member_name'
-#endif // CHECK_STRUCT_PADDING
+#pragma warning(default : 4820) // 'bytes' bytes padding added after construct 'member_name'
+#endif // CHECK_STRUCT_PADDING
#include "alloc.h"
#include "target.h"
@@ -728,91 +735,72 @@ private:
#ifdef FEATURE_CORECLR
// CoreCLR - enable tail call opt for the following IL pattern
-//
+//
// call someFunc
// jmp/jcc RetBlock
// ...
// RetBlock:
// ret
-#define FEATURE_TAILCALL_OPT_SHARED_RETURN 1
+#define FEATURE_TAILCALL_OPT_SHARED_RETURN 1
#else
-// Desktop: Keep this to zero as one of app-compat apps that is using GetCallingAssembly()
+// Desktop: Keep this to zero as one of app-compat apps that is using GetCallingAssembly()
// has an issue turning this ON.
//
// Refer to TF: Bug: 824625 and its associated regression TF Bug: 1113265
#define FEATURE_TAILCALL_OPT_SHARED_RETURN 0
-#endif //FEATURE_CORECLR
+#endif // FEATURE_CORECLR
#else // !FEATURE_TAILCALL_OPT
#define FEATURE_TAILCALL_OPT_SHARED_RETURN 0
#endif // !FEATURE_TAILCALL_OPT
-#define CLFLG_CODESIZE 0x00001
-#define CLFLG_CODESPEED 0x00002
-#define CLFLG_CSE 0x00004
-#define CLFLG_REGVAR 0x00008
-#define CLFLG_RNGCHKOPT 0x00010
-#define CLFLG_DEADASGN 0x00020
-#define CLFLG_CODEMOTION 0x00040
-#define CLFLG_QMARK 0x00080
-#define CLFLG_TREETRANS 0x00100
-#define CLFLG_INLINING 0x00200
-#define CLFLG_CONSTANTFOLD 0x00800
+#define CLFLG_CODESIZE 0x00001
+#define CLFLG_CODESPEED 0x00002
+#define CLFLG_CSE 0x00004
+#define CLFLG_REGVAR 0x00008
+#define CLFLG_RNGCHKOPT 0x00010
+#define CLFLG_DEADASGN 0x00020
+#define CLFLG_CODEMOTION 0x00040
+#define CLFLG_QMARK 0x00080
+#define CLFLG_TREETRANS 0x00100
+#define CLFLG_INLINING 0x00200
+#define CLFLG_CONSTANTFOLD 0x00800
#if FEATURE_STRUCTPROMOTE
-#define CLFLG_STRUCTPROMOTE 0x00400
+#define CLFLG_STRUCTPROMOTE 0x00400
#else
-#define CLFLG_STRUCTPROMOTE 0x00000
+#define CLFLG_STRUCTPROMOTE 0x00000
#endif
-#define CLFLG_MAXOPT (CLFLG_CSE | \
- CLFLG_REGVAR | \
- CLFLG_RNGCHKOPT | \
- CLFLG_DEADASGN | \
- CLFLG_CODEMOTION | \
- CLFLG_QMARK | \
- CLFLG_TREETRANS | \
- CLFLG_INLINING | \
- CLFLG_STRUCTPROMOTE | \
- CLFLG_CONSTANTFOLD)
+#define CLFLG_MAXOPT \
+ (CLFLG_CSE | CLFLG_REGVAR | CLFLG_RNGCHKOPT | CLFLG_DEADASGN | CLFLG_CODEMOTION | CLFLG_QMARK | CLFLG_TREETRANS | \
+ CLFLG_INLINING | CLFLG_STRUCTPROMOTE | CLFLG_CONSTANTFOLD)
-#define CLFLG_MINOPT (CLFLG_TREETRANS )
+#define CLFLG_MINOPT (CLFLG_TREETRANS)
-
-
-
-#define JIT_RESERVED_STACK 64 // Reserved for arguments of calls and hidden
- // pushes for finallys so that we don't
- // probe on every call site. See comment in
- // for CORINFO_STACKPROBE_DEPTH in corjit.h
+#define JIT_RESERVED_STACK 64 // Reserved for arguments of calls and hidden
+ // pushes for finallys so that we don't
+ // probe on every call site. See comment in
+ // for CORINFO_STACKPROBE_DEPTH in corjit.h
/*****************************************************************************/
-extern void dumpILBytes(const BYTE* const codeAddr,
- unsigned codeSize,
- unsigned alignSize);
+extern void dumpILBytes(const BYTE* const codeAddr, unsigned codeSize, unsigned alignSize);
-extern unsigned dumpSingleInstr(const BYTE* const codeAddr,
- IL_OFFSET offs,
- const char* prefix = nullptr);
+extern unsigned dumpSingleInstr(const BYTE* const codeAddr, IL_OFFSET offs, const char* prefix = nullptr);
-extern void dumpILRange(const BYTE* const codeAddr,
- unsigned codeSize); // in bytes
+extern void dumpILRange(const BYTE* const codeAddr, unsigned codeSize); // in bytes
/*****************************************************************************/
-
-
-
-extern int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_MODULE_HANDLE classHnd,
- COMP_HANDLE compHnd,
- CORINFO_METHOD_INFO * methodInfo,
- void * * methodCodePtr,
- ULONG * methodCodeSize,
- CORJIT_FLAGS * compileFlags,
- void * inlineInfoPtr
- );
+extern int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd,
+ CORINFO_MODULE_HANDLE classHnd,
+ COMP_HANDLE compHnd,
+ CORINFO_METHOD_INFO* methodInfo,
+ void** methodCodePtr,
+ ULONG* methodCodeSize,
+ CORJIT_FLAGS* compileFlags,
+ void* inlineInfoPtr);
#ifdef _HOST_64BIT_
const size_t INVALID_POINTER_VALUE = 0xFEEDFACEABADF00D;
@@ -821,7 +809,7 @@ const size_t INVALID_POINTER_VALUE = 0xFEEDFACE;
#endif
// Constants for making sure size_t fit into smaller types.
-const size_t MAX_USHORT_SIZE_T = static_cast<size_t>(static_cast<unsigned short>(-1));
+const size_t MAX_USHORT_SIZE_T = static_cast<size_t>(static_cast<unsigned short>(-1));
const size_t MAX_UNSIGNED_SIZE_T = static_cast<size_t>(static_cast<unsigned>(-1));
// These assume 2's complement...
@@ -842,8 +830,8 @@ class JitTls
{
#ifdef DEBUG
Compiler* m_compiler;
- LogEnv m_logEnv;
- JitTls* m_next;
+ LogEnv m_logEnv;
+ JitTls* m_next;
#endif
public:
@@ -862,13 +850,13 @@ public:
#include "compiler.h"
-template<typename T>
+template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (JitTls::GetCompiler()->opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
-template<typename T>
+template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (JitTls::GetCompiler()->opts.dspDiffable ? T(0xD1FFAB1E) : o);
@@ -876,13 +864,13 @@ T dspOffset(T o)
#else // !defined(DEBUG)
-template<typename T>
+template <typename T>
T dspPtr(T p)
{
return p;
}
-template<typename T>
+template <typename T>
T dspOffset(T o)
{
return o;
diff --git a/src/jit/jitconfig.cpp b/src/jit/jitconfig.cpp
index 7d946da3ac..9f0e226e3a 100644
--- a/src/jit/jitconfig.cpp
+++ b/src/jit/jitconfig.cpp
@@ -15,27 +15,33 @@ void JitConfigValues::MethodSet::initialize(const wchar_t* list, ICorJitHost* ho
{
assert(m_list == nullptr);
- enum State { NO_NAME, CLS_NAME, FUNC_NAME, ARG_LIST }; // parsing state machine
+ enum State
+ {
+ NO_NAME,
+ CLS_NAME,
+ FUNC_NAME,
+ ARG_LIST
+ }; // parsing state machine
- const char SEP_CHAR = ' '; // current character use to separate each entry
+ const char SEP_CHAR = ' '; // current character use to separate each entry
- wchar_t lastChar = '?'; // dummy
- int nameStart = -1; // Index of the start of the current class or method name
- MethodName currentName; // Buffer used while parsing the current entry
+ wchar_t lastChar = '?'; // dummy
+ int nameStart = -1; // Index of the start of the current class or method name
+ MethodName currentName; // Buffer used while parsing the current entry
MethodName** lastName = &m_names; // Last entry inserted into the list
- bool isQuoted = false;
+ bool isQuoted = false;
- currentName.m_next = nullptr;
+ currentName.m_next = nullptr;
currentName.m_methodNameStart = -1;
- currentName.m_methodNameLen = -1;
- currentName.m_classNameStart = -1;
- currentName.m_classNameLen = -1;
- currentName.m_numArgs = -1;
+ currentName.m_methodNameLen = -1;
+ currentName.m_classNameStart = -1;
+ currentName.m_classNameLen = -1;
+ currentName.m_numArgs = -1;
// Convert the input list to UTF-8
- int utf8ListLen = WszWideCharToMultiByte(CP_UTF8, 0, list, -1, NULL, 0, NULL, NULL);
- m_list = (char*)host->allocateMemory(utf8ListLen);
- if (WszWideCharToMultiByte(CP_UTF8, 0, list, -1, const_cast<LPSTR>(m_list), utf8ListLen, NULL, NULL) == 0)
+ int utf8ListLen = WszWideCharToMultiByte(CP_UTF8, 0, list, -1, nullptr, 0, nullptr, nullptr);
+ m_list = (char*)host->allocateMemory(utf8ListLen);
+ if (WszWideCharToMultiByte(CP_UTF8, 0, list, -1, const_cast<LPSTR>(m_list), utf8ListLen, nullptr, nullptr) == 0)
{
// Failed to convert the list. Free the memory and ignore the list.
host->freeMemory(reinterpret_cast<void*>(const_cast<char*>(m_list)));
@@ -48,164 +54,170 @@ void JitConfigValues::MethodSet::initialize(const wchar_t* list, ICorJitHost* ho
{
lastChar = m_list[i];
- switch(state)
+ switch (state)
{
- case NO_NAME:
- if (m_list[i] != SEP_CHAR)
- {
- nameStart = i;
- state = CLS_NAME; // we have found the start of the next entry
- }
- break;
-
- case CLS_NAME:
- if (m_list[nameStart] == '"')
- {
- for (; m_list[i] != '\0' && m_list[i] != '"'; i++)
- ;
-
- nameStart++;
- isQuoted = true;
- }
-
- if (m_list[i] == ':')
- {
- if (m_list[nameStart] == '*' && !isQuoted)
+ case NO_NAME:
+ if (m_list[i] != SEP_CHAR)
{
- // The class name is a wildcard; mark it invalid.
- currentName.m_classNameStart = -1;
- currentName.m_classNameLen = -1;
+ nameStart = i;
+ state = CLS_NAME; // we have found the start of the next entry
}
- else
- {
- currentName.m_classNameStart = nameStart;
- currentName.m_classNameLen = i - nameStart;
+ break;
- // Remove the trailing quote, if any
- if (isQuoted)
+ case CLS_NAME:
+ if (m_list[nameStart] == '"')
+ {
+ for (; m_list[i] != '\0' && m_list[i] != '"'; i++)
{
- currentName.m_classNameLen--;
- isQuoted = false;
+ ;
}
- }
- // Accept class::name syntax as well
- if (m_list[i + 1] == ':')
- {
- i++;
+ nameStart++;
+ isQuoted = true;
}
- nameStart = i + 1;
- state = FUNC_NAME;
- }
- else if (m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(')
- {
- // Treat this as a method name without a class name.
- currentName.m_classNameStart = -1;
- currentName.m_classNameLen = -1;
- goto DONE_FUNC_NAME;
- }
- break;
-
- case FUNC_NAME:
- if (m_list[nameStart] == '"')
- {
- // The first half of the outer contdition handles the case where the
- // class name is valid.
- for (; nameStart == i || (m_list[i] != '\0' && m_list[i] != '"'); i++)
- ;
-
- nameStart++;
- isQuoted = true;
- }
+ if (m_list[i] == ':')
+ {
+ if (m_list[nameStart] == '*' && !isQuoted)
+ {
+ // The class name is a wildcard; mark it invalid.
+ currentName.m_classNameStart = -1;
+ currentName.m_classNameLen = -1;
+ }
+ else
+ {
+ currentName.m_classNameStart = nameStart;
+ currentName.m_classNameLen = i - nameStart;
+
+ // Remove the trailing quote, if any
+ if (isQuoted)
+ {
+ currentName.m_classNameLen--;
+ isQuoted = false;
+ }
+ }
- if (m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(')
- {
- DONE_FUNC_NAME:
- assert(m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(');
+ // Accept class::name syntax as well
+ if (m_list[i + 1] == ':')
+ {
+ i++;
+ }
- if (m_list[nameStart] == '*' && !isQuoted)
- {
- // The method name is a wildcard; mark it invalid.
- currentName.m_methodNameStart = -1;
- currentName.m_methodNameLen = -1;
+ nameStart = i + 1;
+ state = FUNC_NAME;
}
- else
+ else if (m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(')
{
- currentName.m_methodNameStart = nameStart;
- currentName.m_methodNameLen = i - nameStart;
+ // Treat this as a method name without a class name.
+ currentName.m_classNameStart = -1;
+ currentName.m_classNameLen = -1;
+ goto DONE_FUNC_NAME;
+ }
+ break;
- // Remove the trailing quote, if any
- if (isQuoted)
+ case FUNC_NAME:
+ if (m_list[nameStart] == '"')
+ {
+ // The first half of the outer contdition handles the case where the
+ // class name is valid.
+ for (; nameStart == i || (m_list[i] != '\0' && m_list[i] != '"'); i++)
{
- currentName.m_classNameLen--;
- isQuoted = false;
+ ;
}
- }
- if (m_list[i] == '\0' || m_list[i] == SEP_CHAR)
- {
- currentName.m_numArgs = -1;
- goto DONE_ARG_LIST;
+ nameStart++;
+ isQuoted = true;
}
- else
+
+ if (m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(')
{
- assert(m_list[i] == '(');
- currentName.m_numArgs = -1;
- state = ARG_LIST;
+ DONE_FUNC_NAME:
+ assert(m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == '(');
+
+ if (m_list[nameStart] == '*' && !isQuoted)
+ {
+ // The method name is a wildcard; mark it invalid.
+ currentName.m_methodNameStart = -1;
+ currentName.m_methodNameLen = -1;
+ }
+ else
+ {
+ currentName.m_methodNameStart = nameStart;
+ currentName.m_methodNameLen = i - nameStart;
+
+ // Remove the trailing quote, if any
+ if (isQuoted)
+ {
+ currentName.m_classNameLen--;
+ isQuoted = false;
+ }
+ }
+
+ if (m_list[i] == '\0' || m_list[i] == SEP_CHAR)
+ {
+ currentName.m_numArgs = -1;
+ goto DONE_ARG_LIST;
+ }
+ else
+ {
+ assert(m_list[i] == '(');
+ currentName.m_numArgs = -1;
+ state = ARG_LIST;
+ }
}
- }
- break;
+ break;
- case ARG_LIST:
- if (m_list[i] == '\0' || m_list[i] == ')')
- {
- if (currentName.m_numArgs == -1)
+ case ARG_LIST:
+ if (m_list[i] == '\0' || m_list[i] == ')')
{
- currentName.m_numArgs = 0;
- }
+ if (currentName.m_numArgs == -1)
+ {
+ currentName.m_numArgs = 0;
+ }
- DONE_ARG_LIST:
- assert(m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == ')');
+ DONE_ARG_LIST:
+ assert(m_list[i] == '\0' || m_list[i] == SEP_CHAR || m_list[i] == ')');
- // We have parsed an entire method name; create a new entry in the list for it.
- MethodName* name = (MethodName*)host->allocateMemory(sizeof(MethodName));
- *name = currentName;
+ // We have parsed an entire method name; create a new entry in the list for it.
+ MethodName* name = (MethodName*)host->allocateMemory(sizeof(MethodName));
+ *name = currentName;
- assert(name->m_next == nullptr);
- *lastName = name;
- lastName = &name->m_next;
+ assert(name->m_next == nullptr);
+ *lastName = name;
+ lastName = &name->m_next;
- state = NO_NAME;
+ state = NO_NAME;
- // Skip anything after the argument list until we find the next
- // separator character. Otherwise if we see "func(a,b):foo" we
- // create entries for "func(a,b)" as well as ":foo".
- if (m_list[i] == ')')
- {
- for (; m_list[i] && m_list[i] != SEP_CHAR; i++)
- ;
+ // Skip anything after the argument list until we find the next
+ // separator character. Otherwise if we see "func(a,b):foo" we
+ // create entries for "func(a,b)" as well as ":foo".
+ if (m_list[i] == ')')
+ {
+ for (; m_list[i] && m_list[i] != SEP_CHAR; i++)
+ {
+ ;
+ }
- lastChar = m_list[i];
+ lastChar = m_list[i];
+ }
}
- }
- else
- {
- if (m_list[i] != SEP_CHAR && currentName.m_numArgs == -1)
+ else
{
- currentName.m_numArgs = 1;
- }
+ if (m_list[i] != SEP_CHAR && currentName.m_numArgs == -1)
+ {
+ currentName.m_numArgs = 1;
+ }
- if (m_list[i] == ',')
- {
- currentName.m_numArgs++;
+ if (m_list[i] == ',')
+ {
+ currentName.m_numArgs++;
+ }
}
- }
- break;
+ break;
- default:
- assert(!"Bad state");
- break;
+ default:
+ assert(!"Bad state");
+ break;
}
}
}
@@ -213,7 +225,7 @@ void JitConfigValues::MethodSet::initialize(const wchar_t* list, ICorJitHost* ho
void JitConfigValues::MethodSet::destroy(ICorJitHost* host)
{
// Free method names, free the list string, and reset our state
- for (MethodName* name = m_names, *next = nullptr; name != nullptr; name = next)
+ for (MethodName *name = m_names, *next = nullptr; name != nullptr; name = next)
{
next = name->m_next;
host->freeMemory(reinterpret_cast<void*>(const_cast<MethodName*>(name)));
@@ -222,7 +234,7 @@ void JitConfigValues::MethodSet::destroy(ICorJitHost* host)
host->freeMemory(reinterpret_cast<void*>(const_cast<char*>(m_list)));
m_names = nullptr;
- m_list = nullptr;
+ m_list = nullptr;
}
static bool matchesName(const char* const name, int nameLen, const char* const s2)
@@ -230,7 +242,9 @@ static bool matchesName(const char* const name, int nameLen, const char* const s
return strncmp(name, s2, nameLen) == 0 && s2[nameLen] == '\0';
}
-bool JitConfigValues::MethodSet::contains(const char* methodName, const char* className, CORINFO_SIG_INFO* sigInfo) const
+bool JitConfigValues::MethodSet::contains(const char* methodName,
+ const char* className,
+ CORINFO_SIG_INFO* sigInfo) const
{
int numArgs = sigInfo != nullptr ? sigInfo->numArgs : -1;
@@ -251,12 +265,13 @@ bool JitConfigValues::MethodSet::contains(const char* methodName, const char* cl
{
// C++ embeds the class name into the method name; deal with that here.
const char* colon = strchr(methodName, ':');
- if (colon != nullptr && colon[1] == ':' && matchesName(expectedMethodName, name->m_methodNameLen, methodName))
+ if (colon != nullptr && colon[1] == ':' &&
+ matchesName(expectedMethodName, name->m_methodNameLen, methodName))
{
int classLen = (int)(colon - methodName);
if (name->m_classNameStart == -1 ||
(classLen == name->m_classNameLen &&
- strncmp(&m_list[name->m_classNameStart], methodName, classLen) == 0))
+ strncmp(&m_list[name->m_classNameStart], methodName, classLen) == 0))
{
return true;
}
@@ -266,14 +281,14 @@ bool JitConfigValues::MethodSet::contains(const char* methodName, const char* cl
}
// If m_classNameStart is valid, check for a mismatch
- if (className == nullptr || name->m_classNameStart == -1 || matchesName(&m_list[name->m_classNameStart], name->m_classNameLen, className))
+ if (className == nullptr || name->m_classNameStart == -1 ||
+ matchesName(&m_list[name->m_classNameStart], name->m_classNameLen, className))
{
return true;
}
// Check for suffix wildcard like System.*
- if (name->m_classNameLen > 0 &&
- m_list[name->m_classNameStart + name->m_classNameLen - 1] == '*' &&
+ if (name->m_classNameLen > 0 && m_list[name->m_classNameStart + name->m_classNameLen - 1] == '*' &&
strncmp(&m_list[name->m_classNameStart], className, name->m_classNameLen - 1) == 0)
{
return true;
@@ -302,9 +317,9 @@ void JitConfigValues::initialize(ICorJitHost* host)
#define CONFIG_INTEGER(name, key, defaultValue) m_##name = host->getIntConfigValue(key, defaultValue);
#define CONFIG_STRING(name, key) m_##name = host->getStringConfigValue(key);
-#define CONFIG_METHODSET(name, key) \
- const wchar_t* name##value = host->getStringConfigValue(key); \
- m_##name.initialize(name##value, host); \
+#define CONFIG_METHODSET(name, key) \
+ const wchar_t* name##value = host->getStringConfigValue(key); \
+ m_##name.initialize(name##value, host); \
host->freeStringConfigValue(name##value);
#include "jitconfigvalues.h"
diff --git a/src/jit/jitconfig.h b/src/jit/jitconfig.h
index 076d920192..d5b4e30796 100644
--- a/src/jit/jitconfig.h
+++ b/src/jit/jitconfig.h
@@ -17,11 +17,11 @@ public:
struct MethodName
{
MethodName* m_next;
- int m_methodNameStart;
- int m_methodNameLen;
- int m_classNameStart;
- int m_classNameLen;
- int m_numArgs;
+ int m_methodNameStart;
+ int m_methodNameLen;
+ int m_classNameStart;
+ int m_classNameLen;
+ int m_numArgs;
};
const char* m_list;
@@ -31,26 +31,46 @@ public:
MethodSet& operator=(const MethodSet& other) = delete;
public:
- MethodSet() { }
- inline const char* list() const { return m_list; }
+ MethodSet()
+ {
+ }
+ inline const char* list() const
+ {
+ return m_list;
+ }
void initialize(const wchar_t* list, ICorJitHost* host);
void destroy(ICorJitHost* host);
- inline bool isEmpty() const { return m_names == nullptr; }
+ inline bool isEmpty() const
+ {
+ return m_names == nullptr;
+ }
bool contains(const char* methodName, const char* className, CORINFO_SIG_INFO* sigInfo) const;
};
private:
#define CONFIG_INTEGER(name, key, defaultValue) int m_##name;
#define CONFIG_STRING(name, key) const wchar_t* m_##name;
-#define CONFIG_METHODSET(name, key) MethodSet m_##name;
+#define CONFIG_METHODSET(name, key) MethodSet m_##name;
#include "jitconfigvalues.h"
public:
-#define CONFIG_INTEGER(name, key, defaultValue) inline int name() const { return m_##name; }
-#define CONFIG_STRING(name, key) inline const wchar_t* name() const { return m_##name; }
-#define CONFIG_METHODSET(name, key) inline const MethodSet& name() const { return m_##name; }
+#define CONFIG_INTEGER(name, key, defaultValue) \
+ inline int name() const \
+ { \
+ return m_##name; \
+ }
+#define CONFIG_STRING(name, key) \
+ inline const wchar_t* name() const \
+ { \
+ return m_##name; \
+ }
+#define CONFIG_METHODSET(name, key) \
+ inline const MethodSet& name() const \
+ { \
+ return m_##name; \
+ }
#include "jitconfigvalues.h"
private:
@@ -60,9 +80,14 @@ private:
JitConfigValues& operator=(const JitConfigValues& other) = delete;
public:
- JitConfigValues() {}
+ JitConfigValues()
+ {
+ }
- inline bool isInitialized() const { return m_isInitialized != 0; }
+ inline bool isInitialized() const
+ {
+ return m_isInitialized != 0;
+ }
void initialize(ICorJitHost* host);
void destroy(ICorJitHost* host);
};
diff --git a/src/jit/jitconfigvalues.h b/src/jit/jitconfigvalues.h
index 44c3676ed7..6579817249 100644
--- a/src/jit/jitconfigvalues.h
+++ b/src/jit/jitconfigvalues.h
@@ -7,16 +7,22 @@
#endif // !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET)
#if defined(DEBUG)
-CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal)
-CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback JIT. Useful in conjunction with COMPlus_ContinueOnAssert=1
-CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a particular token value.
-CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on verification failure
-CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able
+CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal)
+CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback
+ // JIT. Useful in conjunction with
+ // COMPlus_ContinueOnAssert=1
+CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a
+ // particular token value.
+CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on
+ // verification failure
+CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able
CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics
-CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
-CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console
-CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
-CONFIG_INTEGER(InterpreterFallback, W("InterpreterFallback"), 0) // Fallback to the interpreter when the JIT compiler fails
+CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
+CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console
+CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by
+ // RyuJIT where possible
+CONFIG_INTEGER(InterpreterFallback, W("InterpreterFallback"), 0) // Fallback to the interpreter when the JIT compiler
+ // fails
CONFIG_INTEGER(JitAssertOnMaxRAPasses, W("JitAssertOnMaxRAPasses"), 0)
CONFIG_INTEGER(JitBreakEmitOutputInstr, W("JitBreakEmitOutputInstr"), -1)
CONFIG_INTEGER(JitBreakMorphTree, W("JitBreakMorphTree"), 0xffffffff)
@@ -25,35 +31,39 @@ CONFIG_INTEGER(JitBreakOnMinOpts, W("JITBreakOnMinOpts"), 0) // Halt if jit swit
CONFIG_INTEGER(JitBreakOnUnsafeCode, W("JitBreakOnUnsafeCode"), 0)
CONFIG_INTEGER(JitCanUseSSE2, W("JitCanUseSSE2"), -1)
CONFIG_INTEGER(JitCloneLoops, W("JitCloneLoops"), 1) // If 0, don't clone. Otherwise clone loops for optimizations.
-CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning optimizations are performed on the fast path.
-CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xff) // In debug builds, initialize the memory allocated by the nra with this byte.
+CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning
+ // optimizations are performed on the fast path.
+CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xff) // In debug builds, initialize the memory allocated by the nra
+ // with this byte.
CONFIG_INTEGER(JitDirectAlloc, W("JitDirectAlloc"), 0)
CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization
-CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
+CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagataion
-CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values
+CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values
CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis
CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables
CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions
CONFIG_INTEGER(JitDoubleAlign, W("JitDoubleAlign"), 1)
-CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps
-CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 0) // Set to non-zero to emit Dot instead of Xml Flowgraph dump
+CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps
+CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 0) // Set to non-zero to emit Dot instead of Xml Flowgraph dump
CONFIG_INTEGER(JitDumpTerseLsra, W("JitDumpTerseLsra"), 1) // Produce terse dump output for LSRA
-CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger
-CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA
+CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger
+CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA
CONFIG_INTEGER(JitDumpVerboseTrees, W("JitDumpVerboseTrees"), 0) // Enable more verbose tree dumps
CONFIG_INTEGER(JitEmitPrintRefRegs, W("JitEmitPrintRefRegs"), 0)
-CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking beyond the default to do in debug builds (currently 1-2)
+CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking
+ // beyond the default to do in debug
+ // builds (currently 1-2)
CONFIG_INTEGER(JitForceFallback, W("JitForceFallback"), 0) // Set to non-zero to test NOWAY assert by forcing a retry
CONFIG_INTEGER(JitForceVer, W("JitForceVer"), 0)
-CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptable code
+CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptable code
CONFIG_INTEGER(JitFunctionTrace, W("JitFunctionTrace"), 0) // If non-zero, print JIT start/end logging
CONFIG_INTEGER(JitGCChecks, W("JitGCChecks"), 0)
CONFIG_INTEGER(JitGCInfoLogging, W("JitGCInfoLogging"), 0) // If true, prints GCInfo-related output to standard output.
-CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash
-CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash
-CONFIG_INTEGER(JitHashDumpIR, W("JitHashDumpIR"), -1) // Same as JitDumpIR, but for a method hash
-CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash
+CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash
+CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash
+CONFIG_INTEGER(JitHashDumpIR, W("JitHashDumpIR"), -1) // Same as JitDumpIR, but for a method hash
+CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash
CONFIG_INTEGER(JitInlineAdditionalMultiplier, W("JitInlineAdditionalMultiplier"), 0)
CONFIG_INTEGER(JitInlinePrintStats, W("JitInlinePrintStats"), 0)
CONFIG_INTEGER(JitInlineSize, W("JITInlineSize"), DEFAULT_MAX_INLINE_SIZE)
@@ -61,18 +71,24 @@ CONFIG_INTEGER(JitInlineDepth, W("JITInlineDepth"), DEFAULT_MAX_INLINE_DEPTH)
CONFIG_INTEGER(JitLongAddress, W("JitLongAddress"), 0) // Force using the large pseudo instruction form for long address
CONFIG_INTEGER(JitMaxTempAssert, W("JITMaxTempAssert"), 1)
CONFIG_INTEGER(JitMaxUncheckedOffset, W("JitMaxUncheckedOffset"), 8)
-CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts
+CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts
CONFIG_INTEGER(JitMinOptsBbCount, W("JITMinOptsBbCount"), DEFAULT_MIN_OPTS_BB_COUNT) // Internal jit control of MinOpts
-CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of MinOpts
-CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of MinOpts
-CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control of MinOpts
-CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control of MinOpts
+CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of
+ // MinOpts
+CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of
+ // MinOpts
+CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control
+ // of MinOpts
+CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control
+ // of MinOpts
CONFIG_INTEGER(JitNoCMOV, W("JitNoCMOV"), 0)
CONFIG_INTEGER(JitNoCSE, W("JitNoCSE"), 0)
CONFIG_INTEGER(JitNoCSE2, W("JitNoCSE2"), 0)
-CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing. Overrides COMPlus_JitForceFallback and JIT stress flags.
+CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing.
+ // Overrides COMPlus_JitForceFallback and JIT stress
+ // flags.
CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0)
-CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods
+CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods
CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers
CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0)
CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion in Jit32
@@ -85,21 +101,32 @@ CONFIG_INTEGER(JitRequired, W("JITRequired"), -1)
CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL)
CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0)
CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks
-CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet size for creating function fragments (and creating multiple RUNTIME_FUNCTION entries)
-CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 = use method hash; * = supplied value as random hash
+CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet
+ // size for creating function fragments (and creating
+ // multiple RUNTIME_FUNCTION entries)
+CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 =
+ // use method hash; * = supplied value as random hash
CONFIG_INTEGER(JitStackChecks, W("JitStackChecks"), 0)
-CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary stress based on a hash of the method and this value
-CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode
-CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value between (0,100) to perform CSE on a candidate. 100% = All CSEs. 0% = 0 CSE. (> 100) means no stress.
-CONFIG_INTEGER(JitStressFP, W("JitStressFP"), 0) // Internal Jit stress mode
-CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable stress modes listed in JitStressModeNames
+CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary
+ // stress based on a hash of the method and this value
+CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode
+CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value
+ // between (0,100) to perform CSE on a candidate.
+ // 100% = All CSEs. 0% = 0 CSE. (> 100) means no
+ // stress.
+CONFIG_INTEGER(JitStressFP, W("JitStressFP"), 0) // Internal Jit stress mode
+CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable
+ // stress modes listed in JitStressModeNames
CONFIG_INTEGER(JitStressRegs, W("JitStressRegs"), 0)
CONFIG_INTEGER(JitStrictCheckForNonVirtualCallToVirtualMethod, W("JitStrictCheckForNonVirtualCallToVirtualMethod"), 1)
-CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications considered reaches this
-CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen
-CONFIG_INTEGER(NgenHashDumpIR, W("NgenHashDumpIR"), -1) // same as JitHashDumpIR, but for ngen
+CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications
+ // considered reaches this
+CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen
+CONFIG_INTEGER(NgenHashDumpIR, W("NgenHashDumpIR"), -1) // same as JitHashDumpIR, but for ngen
CONFIG_INTEGER(NgenOrder, W("NgenOrder"), 0)
-CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then use the code. If zero, then we always throw away the generated code and fall back to the default compiler.
+CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then
+ // use the code. If zero, then we always throw away the generated
+ // code and fall back to the default compiler.
CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests
CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0)
CONFIG_INTEGER(StackProbesOverride, W("JitStackProbes"), 0)
@@ -109,7 +136,7 @@ CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1,
CONFIG_METHODSET(JitBreak, W("JitBreak")) // Stops in the importer when compiling a specified method
CONFIG_METHODSET(JitDebugBreak, W("JitDebugBreak"))
CONFIG_METHODSET(JitDisasm, W("JitDisasm")) // Dumps disassembly for specified method
-CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method
+CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method
CONFIG_METHODSET(JitDumpIR, W("JitDumpIR")) // Dumps trees (in linear IR form) for specified method
CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(JitExclude, W("JitExclude"))
@@ -119,81 +146,98 @@ CONFIG_METHODSET(JitHalt, W("JitHalt")) // Emits break instruction into jitted c
CONFIG_METHODSET(JitImportBreak, W("JitImportBreak"))
CONFIG_METHODSET(JitInclude, W("JitInclude"))
CONFIG_METHODSET(JitLateDisasm, W("JitLateDisasm"))
-CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function
+CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function
CONFIG_METHODSET(JitNoProcedureSplitting, W("JitNoProcedureSplitting")) // Disallow procedure splitting for specified
// methods
-CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for specified methods if they contain exception handling
+CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for
+ // specified methods if they contain
+ // exception handling
CONFIG_METHODSET(JitStressOnly, W("JitStressOnly")) // Internal Jit stress mode: stress only the specified method(s)
CONFIG_METHODSET(JitUnwindDump, W("JitUnwindDump")) // Dump the unwind codes for the method
-CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen
-CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen
-CONFIG_METHODSET(NgenDumpIR, W("NgenDumpIR")) // Same as JitDumpIR, but for ngen
-CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM
+CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen
+CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen
+CONFIG_METHODSET(NgenDumpIR, W("NgenDumpIR")) // Same as JitDumpIR, but for ngen
+CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(NgenGCDump, W("NgenGCDump"))
CONFIG_METHODSET(NgenUnwindDump, W("NgenUnwindDump")) // Dump the unwind codes for the method
-CONFIG_STRING(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method
-CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s)
-CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s)
-CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a phase to see the flowgraph after that phase. Leave unset to dump after COLD-BLK (determine first cold block) or set to * for all phases
-CONFIG_STRING(JitDumpIRFormat, W("JitDumpIRFormat")) // Comma separated format control for JitDumpIR, values = {types | locals | ssa | valnums | kinds | flags | nodes | nolists | nostmts | noleafs | trees | dataflow}
-CONFIG_STRING(JitDumpIRPhase, W("JitDumpIRPhase")) // Phase control for JitDumpIR, values = {* | phasename}
+CONFIG_STRING(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method
+CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s)
+CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s)
+CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a
+ // phase to see the flowgraph after that phase. Leave unset to dump
+ // after COLD-BLK (determine first cold block) or set to * for all
+ // phases
+CONFIG_STRING(JitDumpIRFormat, W("JitDumpIRFormat")) // Comma separated format control for JitDumpIR, values = {types |
+ // locals | ssa | valnums | kinds | flags | nodes | nolists |
+ // nostmts | noleafs | trees | dataflow}
+CONFIG_STRING(JitDumpIRPhase, W("JitDumpIRPhase")) // Phase control for JitDumpIR, values = {* | phasename}
CONFIG_STRING(JitLateDisasmTo, W("JITLateDisasmTo"))
CONFIG_STRING(JitRange, W("JitRange"))
-CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL
-CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the given set of stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL
-CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode
-CONFIG_STRING(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml Flowgraph support
-CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml Flowgraph support
-CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml Flowgraph support
-CONFIG_STRING(NgenDumpIRFormat, W("NgenDumpIRFormat")) // Same as JitDumpIRFormat, but for ngen
-CONFIG_STRING(NgenDumpIRPhase, W("NgenDumpIRPhase")) // Same as JitDumpIRPhase, but for ngen
-#endif // defined(DEBUG)
+CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of
+ // stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL
+CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the
+ // given set of stress mode names, e.g. STRESS_REGS,
+ // STRESS_TAILCALL
+CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode
+CONFIG_STRING(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml Flowgraph support
+CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml Flowgraph support
+CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml Flowgraph support
+CONFIG_STRING(NgenDumpIRFormat, W("NgenDumpIRFormat")) // Same as JitDumpIRFormat, but for ngen
+CONFIG_STRING(NgenDumpIRPhase, W("NgenDumpIRPhase")) // Same as JitDumpIRPhase, but for ngen
+#endif // defined(DEBUG)
-// AltJitAssertOnNYI should be 0 on targets where JIT is under developement or bring up stage, so as to facilitate fallback to main JIT on hitting a NYI.
+// AltJitAssertOnNYI should be 0 on targets where JIT is under developement or bring up stage, so as to facilitate
+// fallback to main JIT on hitting a NYI.
#if defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff
-#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_X86_)
+#else // !defined(_TARGET_ARM64_) && !defined(_TARGET_X86_)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff
-#endif // defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
+#endif // defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
#if defined(_TARGET_AMD64_)
CONFIG_INTEGER(EnableAVX, W("EnableAVX"), 1) // Enable AVX instruction set for wide operations as default
-#else // !defined(_TARGET_AMD64_)
-CONFIG_INTEGER(EnableAVX, W("EnableAVX"), 0) // Enable AVX instruction set for wide operations as default
-#endif // defined(_TARGET_AMD64_)
+#else // !defined(_TARGET_AMD64_)
+CONFIG_INTEGER(EnableAVX, W("EnableAVX"), 0) // Enable AVX instruction set for wide operations as default
+#endif // defined(_TARGET_AMD64_)
#if !defined(DEBUG) && !defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 0)
-#else // defined(DEBUG) || defined(_DEBUG)
+#else // defined(DEBUG) || defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1)
#endif // !defined(DEBUG) && !defined(_DEBUG)
CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods
-CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // On ARM, setting this will emit Enter/Leave/TailCall callbacks
+CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // On ARM, setting this will emit Enter/Leave/TailCall
+ // callbacks
CONFIG_INTEGER(JitInlineSIMDMultiplier, W("JitInlineSIMDMultiplier"), 3)
#if defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks
-#endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
+#endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
-CONFIG_INTEGER(JitRegisterFP, W("JitRegisterFP"), 3) // Control FP enregistration
-CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data
-CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), 100) // Max # of MapSelect's considered for a particular top-level invocation.
-CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops
+CONFIG_INTEGER(JitRegisterFP, W("JitRegisterFP"), 3) // Control FP enregistration
+CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data
+CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), 100) // Max # of MapSelect's considered for a particular
+ // top-level invocation.
+CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops
CONFIG_METHODSET(AltJit, W("AltJit")) // Enables AltJit and selectively limits it to the specified methods.
-CONFIG_METHODSET(AltJitNgen, W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it to the specified methods.
+CONFIG_METHODSET(AltJitNgen,
+ W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it to the specified methods.
#if defined(ALT_JIT)
-CONFIG_STRING(AltJitExcludeAssemblies, W("AltJitExcludeAssemblies")) // Do not use AltJit on this semicolon-delimited list of assemblies.
-#endif // defined(ALT_JIT)
+CONFIG_STRING(AltJitExcludeAssemblies,
+ W("AltJitExcludeAssemblies")) // Do not use AltJit on this semicolon-delimited list of assemblies.
+#endif // defined(ALT_JIT)
CONFIG_STRING(JitFuncInfoFile, W("JitFuncInfoLogFile")) // If set, gather JIT function info and write to this file.
-CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This mode must be used in internal retail builds.
+CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This
+ // mode must be used in internal retail builds.
CONFIG_STRING(TailCallOpt, W("TailCallOpt"))
#if defined(DEBUG) || defined(INLINE_DATA)
CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0)
-CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (all methods), 2 = minimal xml (only method with inlines)
+CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (all methods), 2 = minimal xml (only method
+ // with inlines)
CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1)
CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0)
CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0)
diff --git a/src/jit/jiteh.cpp b/src/jit/jiteh.cpp
index 8098992356..f6ce7b839e 100644
--- a/src/jit/jiteh.cpp
+++ b/src/jit/jiteh.cpp
@@ -50,7 +50,7 @@ BasicBlock* EHblkDsc::ExFlowBlock()
bool EHblkDsc::InTryRegionILRange(BasicBlock* pBlk)
{
- // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
+ // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
// should only be used before any BBF_INTERNAL blocks have been added.
assert(!(pBlk->bbFlags & BBF_INTERNAL));
@@ -59,17 +59,16 @@ bool EHblkDsc::InTryRegionILRange(BasicBlock* pBlk)
bool EHblkDsc::InFilterRegionILRange(BasicBlock* pBlk)
{
- // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
+ // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
// should only be used before any BBF_INTERNAL blocks have been added.
assert(!(pBlk->bbFlags & BBF_INTERNAL));
- return HasFilter() &&
- Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdFilterBegOffs(), ebdFilterEndOffs());
+ return HasFilter() && Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdFilterBegOffs(), ebdFilterEndOffs());
}
bool EHblkDsc::InHndRegionILRange(BasicBlock* pBlk)
{
- // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
+ // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function
// should only be used before any BBF_INTERNAL blocks have been added.
assert(!(pBlk->bbFlags & BBF_INTERNAL));
@@ -79,8 +78,7 @@ bool EHblkDsc::InHndRegionILRange(BasicBlock* pBlk)
// HasCatchHandler: returns 'true' for either try/catch, or try/filter/filter-handler.
bool EHblkDsc::HasCatchHandler()
{
- return (ebdHandlerType == EH_HANDLER_CATCH) ||
- (ebdHandlerType == EH_HANDLER_FILTER);
+ return (ebdHandlerType == EH_HANDLER_CATCH) || (ebdHandlerType == EH_HANDLER_FILTER);
}
bool EHblkDsc::HasFilter()
@@ -113,7 +111,9 @@ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd)
for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->bbNext)
{
if (pWalk == pBlk)
+ {
return true;
+ }
}
return false;
}
@@ -179,71 +179,68 @@ unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion)
// That being said, the IL offsets in the EH table should only be examined early,
// during importing. After importing, use block info instead.
-IL_OFFSET EHblkDsc::ebdTryBegOffs()
+IL_OFFSET EHblkDsc::ebdTryBegOffs()
{
return ebdTryBegOffset;
}
-IL_OFFSET EHblkDsc::ebdTryEndOffs()
+IL_OFFSET EHblkDsc::ebdTryEndOffs()
{
return ebdTryEndOffset;
}
-IL_OFFSET EHblkDsc::ebdHndBegOffs()
+IL_OFFSET EHblkDsc::ebdHndBegOffs()
{
return ebdHndBegOffset;
}
-IL_OFFSET EHblkDsc::ebdHndEndOffs()
+IL_OFFSET EHblkDsc::ebdHndEndOffs()
{
return ebdHndEndOffset;
}
-IL_OFFSET EHblkDsc::ebdFilterBegOffs()
+IL_OFFSET EHblkDsc::ebdFilterBegOffs()
{
assert(HasFilter());
return ebdFilterBegOffset;
}
-IL_OFFSET EHblkDsc::ebdFilterEndOffs()
+IL_OFFSET EHblkDsc::ebdFilterEndOffs()
{
assert(HasFilter());
return ebdHndBegOffs(); // end of filter is beginning of handler
}
/* static */
-bool EHblkDsc::ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2)
+bool EHblkDsc::ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2)
{
- return ((h1->ebdTryBegOffset == h2->ebdTryBegOffset) &&
- (h1->ebdTryEndOffset == h2->ebdTryEndOffset));
+ return ((h1->ebdTryBegOffset == h2->ebdTryBegOffset) && (h1->ebdTryEndOffset == h2->ebdTryEndOffset));
}
/*****************************************************************************/
/* static */
-bool EHblkDsc::ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2)
+bool EHblkDsc::ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2)
{
- return ((h1->ebdTryBeg == h2->ebdTryBeg) &&
- (h1->ebdTryLast == h2->ebdTryLast));
+ return ((h1->ebdTryBeg == h2->ebdTryBeg) && (h1->ebdTryLast == h2->ebdTryLast));
}
-bool EHblkDsc::ebdIsSameTry(Compiler* comp, unsigned t2)
+bool EHblkDsc::ebdIsSameTry(Compiler* comp, unsigned t2)
{
EHblkDsc* h2 = comp->ehGetDsc(t2);
return ebdIsSameTry(this, h2);
}
-bool EHblkDsc::ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast)
+bool EHblkDsc::ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast)
{
- return ((this->ebdTryBeg == ebdTryBeg) &&
- (this->ebdTryLast == ebdTryLast));
+ return ((this->ebdTryBeg == ebdTryBeg) && (this->ebdTryLast == ebdTryLast));
}
/*****************************************************************************/
#ifdef DEBUG
/*****************************************************************************/
-void EHblkDsc::DispEntry(unsigned XTnum)
+void EHblkDsc::DispEntry(unsigned XTnum)
{
printf(" %2u ::", XTnum);
@@ -273,13 +270,10 @@ void EHblkDsc::DispEntry(unsigned XTnum)
////////////// Protected (try) region
//////////////
- printf("- Try at BB%02u..BB%02u",
- ebdTryBeg->bbNum,
- ebdTryLast->bbNum);
+ printf("- Try at BB%02u..BB%02u", ebdTryBeg->bbNum, ebdTryLast->bbNum);
/* ( brace matching editor workaround to compensate for the following line */
printf(" [%03X..%03X), ", ebdTryBegOffset, ebdTryEndOffset);
-
//////////////
////////////// Filter region
@@ -288,10 +282,7 @@ void EHblkDsc::DispEntry(unsigned XTnum)
if (HasFilter())
{
/* ( brace matching editor workaround to compensate for the following line */
- printf("Filter at BB%02u..BB%02u [%03X..%03X), ",
- ebdFilter->bbNum,
- BBFilterLast()->bbNum,
- ebdFilterBegOffset,
+ printf("Filter at BB%02u..BB%02u [%03X..%03X), ", ebdFilter->bbNum, BBFilterLast()->bbNum, ebdFilterBegOffset,
ebdHndBegOffset);
}
@@ -312,9 +303,7 @@ void EHblkDsc::DispEntry(unsigned XTnum)
printf("Handler");
}
- printf(" at BB%02u..BB%02u",
- ebdHndBeg->bbNum,
- ebdHndLast->bbNum);
+ printf(" at BB%02u..BB%02u", ebdHndBeg->bbNum, ebdHndLast->bbNum);
/* ( brace matching editor workaround to compensate for the following line */
printf(" [%03X..%03X)", ebdHndBegOffset, ebdHndEndOffset);
@@ -326,7 +315,6 @@ void EHblkDsc::DispEntry(unsigned XTnum)
#endif // DEBUG
/*****************************************************************************/
-
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
@@ -336,29 +324,33 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-bool Compiler::bbInCatchHandlerILRange(BasicBlock* blk)
+bool Compiler::bbInCatchHandlerILRange(BasicBlock* blk)
{
EHblkDsc* HBtab = ehGetBlockHndDsc(blk);
if (HBtab == nullptr)
+ {
return false;
+ }
return HBtab->HasCatchHandler() && HBtab->InHndRegionILRange(blk);
}
-bool Compiler::bbInFilterILRange(BasicBlock* blk)
+bool Compiler::bbInFilterILRange(BasicBlock* blk)
{
EHblkDsc* HBtab = ehGetBlockHndDsc(blk);
if (HBtab == nullptr)
+ {
return false;
+ }
return HBtab->InFilterRegionILRange(blk);
}
// Given a handler region, find the innermost try region that contains it.
// NOTE: handlerIndex is 1-based (0 means no handler).
-unsigned short Compiler::bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex)
+unsigned short Compiler::bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex)
{
if (handlerIndex > 0)
{
@@ -368,24 +360,22 @@ unsigned short Compiler::bbFindInnermostTryRegionContainingHandlerRegion(un
// handlerIndex is 1 based, therefore our interesting clauses start from clause compHndBBtab[handlerIndex]
EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount;
- for (ehDsc = compHndBBtab + handlerIndex, XTnum = handlerIndex;
- ehDsc < ehDscEnd;
- ehDsc++, XTnum++)
+ for (ehDsc = compHndBBtab + handlerIndex, XTnum = handlerIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++)
{
if (bbInTryRegions(XTnum, blk))
{
noway_assert(XTnum < MAX_XCPTN_INDEX);
- return (unsigned short)(XTnum + 1); // Return the tryIndex
+ return (unsigned short)(XTnum + 1); // Return the tryIndex
}
}
}
-
+
return 0;
}
// Given a try region, find the innermost handler region that contains it.
// NOTE: tryIndex is 1-based (0 means no handler).
-unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex)
+unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex)
{
if (tryIndex > 0)
{
@@ -395,18 +385,16 @@ unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(un
// tryIndex is 1 based, our interesting clauses start from clause compHndBBtab[tryIndex]
EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount;
- for (ehDsc = compHndBBtab + tryIndex, XTnum = tryIndex;
- ehDsc < ehDscEnd;
- ehDsc++, XTnum++)
+ for (ehDsc = compHndBBtab + tryIndex, XTnum = tryIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++)
{
if (bbInHandlerRegions(XTnum, blk))
{
noway_assert(XTnum < MAX_XCPTN_INDEX);
- return (unsigned short)(XTnum + 1); // Return the handlerIndex
+ return (unsigned short)(XTnum + 1); // Return the handlerIndex
}
}
}
-
+
return 0;
}
@@ -415,7 +403,7 @@ unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(un
the try body. For this check, a funclet is considered to be in the region
it was extracted from.
*/
-bool Compiler::bbInTryRegions(unsigned regionIndex, BasicBlock * blk)
+bool Compiler::bbInTryRegions(unsigned regionIndex, BasicBlock* blk)
{
assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX);
unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
@@ -443,16 +431,16 @@ bool Compiler::bbInTryRegions(unsigned regionIndex, BasicBlock *
// Return Value:
// true - The region with index 'regionIndex' can handle exceptions from 'blk'
// false - The region with index 'regionIndex' can't handle exceptions from 'blk'
-//
+//
// Notes:
// For this check, a funclet is considered to be in the region it was
// extracted from.
-bool Compiler::bbInExnFlowRegions(unsigned regionIndex, BasicBlock * blk)
+bool Compiler::bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk)
{
assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX);
EHblkDsc* ExnFlowRegion = ehGetBlockExnFlowDsc(blk);
- unsigned tryIndex = (ExnFlowRegion == nullptr ? EHblkDsc::NO_ENCLOSING_INDEX : ehGetIndex(ExnFlowRegion));
+ unsigned tryIndex = (ExnFlowRegion == nullptr ? EHblkDsc::NO_ENCLOSING_INDEX : ehGetIndex(ExnFlowRegion));
// Loop outward until we find an enclosing try that is the same as the one
// we are looking for or an outer/later one
@@ -469,7 +457,7 @@ bool Compiler::bbInExnFlowRegions(unsigned regionIndex, BasicBloc
Given a block, check to see if it is in the handler block of the EH descriptor.
For this check, a funclet is considered to be in the region it was extracted from.
*/
-bool Compiler::bbInHandlerRegions(unsigned regionIndex, BasicBlock * blk)
+bool Compiler::bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk)
{
assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX);
unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
@@ -492,26 +480,28 @@ bool Compiler::bbInHandlerRegions(unsigned regionIndex, BasicBloc
Since we create one EHblkDsc for each "catch" of a "try", we might end up
with multiple EHblkDsc's that have the same ebdTryBeg and ebdTryLast, but different
ebdHndBeg and ebdHndLast. Unfortunately getTryIndex() only returns the index of the first EHblkDsc.
-
+
E.g. The following example shows that BB02 has a catch in BB03 and another catch in BB04.
-
+
index nest, enclosing
0 :: 0, 1 - Try at BB01..BB02 [000..008], Handler at BB03 [009..016]
1 :: 0, - Try at BB01..BB02 [000..008], Handler at BB04 [017..022]
This function will return true for
bbInCatchHandlerRegions(BB02, BB03) and bbInCatchHandlerRegions(BB02, BB04)
-
+
*/
-bool Compiler::bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk)
+bool Compiler::bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk)
{
assert(tryBlk->hasTryIndex());
if (!hndBlk->hasHndIndex())
+ {
return false;
-
- unsigned XTnum = tryBlk->getTryIndex();
+ }
+
+ unsigned XTnum = tryBlk->getTryIndex();
EHblkDsc* firstEHblkDsc = ehGetDsc(XTnum);
- EHblkDsc* ehDsc = firstEHblkDsc;
+ EHblkDsc* ehDsc = firstEHblkDsc;
// Rather than searching the whole list, take advantage of our sorting.
// We will only match against blocks with the same try body (mutually
@@ -542,46 +532,39 @@ bool Compiler::bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicB
do
{
- if (ehDsc->HasCatchHandler() &&
- bbInHandlerRegions(XTnum, hndBlk))
+ if (ehDsc->HasCatchHandler() && bbInHandlerRegions(XTnum, hndBlk))
{
return true;
}
XTnum++;
ehDsc++;
- }
- while (XTnum < compHndBBtabCount && EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc));
+ } while (XTnum < compHndBBtabCount && EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc));
return false;
}
/******************************************************************************************
* Give two blocks, return the inner-most enclosing try region that contains both of them.
- * Return 0 if it does not find any try region (which means the inner-most region
+ * Return 0 if it does not find any try region (which means the inner-most region
* is the method itself).
*/
-unsigned short Compiler::bbFindInnermostCommonTryRegion(BasicBlock* bbOne,
- BasicBlock* bbTwo)
+unsigned short Compiler::bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo)
{
- unsigned XTnum;
+ unsigned XTnum;
- for (XTnum = 0;
- XTnum < compHndBBtabCount;
- XTnum++)
+ for (XTnum = 0; XTnum < compHndBBtabCount; XTnum++)
{
- if (bbInTryRegions(XTnum, bbOne) &&
- bbInTryRegions(XTnum, bbTwo))
+ if (bbInTryRegions(XTnum, bbOne) && bbInTryRegions(XTnum, bbTwo))
{
noway_assert(XTnum < MAX_XCPTN_INDEX);
- return (unsigned short)(XTnum + 1); // Return the tryIndex
+ return (unsigned short)(XTnum + 1); // Return the tryIndex
}
- }
+ }
- return 0;
+ return 0;
}
-
// bbIsTryBeg() returns true if this block is the start of any try region.
// This is computed by examining the current values in the
// EH table rather than just looking at the block->bbFlags.
@@ -590,28 +573,24 @@ unsigned short Compiler::bbFindInnermostCommonTryRegion(BasicBlock* bbOne,
// most nested try region it is a member of. Thus, we only need to check the EH
// table entry related to the try index stored on the block.
//
-bool Compiler::bbIsTryBeg(BasicBlock* block)
+bool Compiler::bbIsTryBeg(BasicBlock* block)
{
EHblkDsc* ehDsc = ehGetBlockTryDsc(block);
return (ehDsc != nullptr) && (block == ehDsc->ebdTryBeg);
}
-
// bbIsHanderBeg() returns true if "block" is the start of any handler or filter.
// Note that if a block is the beginning of a handler or filter, it must be the beginning
// of the most nested handler or filter region it is in. Thus, we only need to look at the EH
// descriptor corresponding to the handler index on the block.
//
-bool Compiler::bbIsHandlerBeg(BasicBlock* block)
+bool Compiler::bbIsHandlerBeg(BasicBlock* block)
{
EHblkDsc* ehDsc = ehGetBlockHndDsc(block);
- return (ehDsc != nullptr) &&
- ((block == ehDsc->ebdHndBeg) ||
- (ehDsc->HasFilter() && (block == ehDsc->ebdFilter)));
+ return (ehDsc != nullptr) && ((block == ehDsc->ebdHndBeg) || (ehDsc->HasFilter() && (block == ehDsc->ebdFilter)));
}
-
-bool Compiler::bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex)
+bool Compiler::bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex)
{
if (block->hasHndIndex())
{
@@ -624,8 +603,7 @@ bool Compiler::bbIsExFlowBlock(BasicBlock* block, unsigned* regionInd
}
}
-
-bool Compiler::ehHasCallableHandlers()
+bool Compiler::ehHasCallableHandlers()
{
#if FEATURE_EH_FUNCLETS
@@ -640,12 +618,11 @@ bool Compiler::ehHasCallableHandlers()
#endif // FEATURE_EH_FUNCLETS
}
-
/******************************************************************************************
* Determine if 'block' is the last block of an EH 'try' or handler (ignoring filters). If so,
* return the EH descriptor pointer for that EH region. Otherwise, return nullptr.
*/
-EHblkDsc* Compiler::ehIsBlockTryLast(BasicBlock* block)
+EHblkDsc* Compiler::ehIsBlockTryLast(BasicBlock* block)
{
EHblkDsc* HBtab = ehGetBlockTryDsc(block);
if ((HBtab != nullptr) && (HBtab->ebdTryLast == block))
@@ -655,7 +632,7 @@ EHblkDsc* Compiler::ehIsBlockTryLast(BasicBlock* block)
return nullptr;
}
-EHblkDsc* Compiler::ehIsBlockHndLast(BasicBlock* block)
+EHblkDsc* Compiler::ehIsBlockHndLast(BasicBlock* block)
{
EHblkDsc* HBtab = ehGetBlockHndDsc(block);
if ((HBtab != nullptr) && (HBtab->ebdHndLast == block))
@@ -665,10 +642,9 @@ EHblkDsc* Compiler::ehIsBlockHndLast(BasicBlock* block)
return nullptr;
}
-bool Compiler::ehIsBlockEHLast(BasicBlock* block)
+bool Compiler::ehIsBlockEHLast(BasicBlock* block)
{
- return (ehIsBlockTryLast(block) != nullptr) ||
- (ehIsBlockHndLast(block) != nullptr);
+ return (ehIsBlockTryLast(block) != nullptr) || (ehIsBlockHndLast(block) != nullptr);
}
//------------------------------------------------------------------------
@@ -684,7 +660,7 @@ bool Compiler::ehIsBlockEHLast(BasicBlock* block)
// non-null - This region is the innermost handler for exceptions raised in
// the given block
-EHblkDsc* Compiler::ehGetBlockExnFlowDsc(BasicBlock* block)
+EHblkDsc* Compiler::ehGetBlockExnFlowDsc(BasicBlock* block)
{
EHblkDsc* hndDesc = ehGetBlockHndDsc(block);
@@ -721,7 +697,7 @@ EHblkDsc* Compiler::ehGetBlockExnFlowDsc(BasicBlock* block)
return ehGetBlockTryDsc(block);
}
-bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block)
+bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block)
{
if (block->hasTryIndex())
{
@@ -730,9 +706,8 @@ bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block)
EHblkDsc* hndDesc = ehGetBlockHndDsc(block);
- return ((hndDesc != nullptr)
- && hndDesc->InFilterRegionBBRange(block)
- && (hndDesc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX));
+ return ((hndDesc != nullptr) && hndDesc->InFilterRegionBBRange(block) &&
+ (hndDesc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX));
}
//------------------------------------------------------------------------
@@ -749,7 +724,7 @@ bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block)
// Return Value:
// As described above.
//
-unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion)
+unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion)
{
assert(block != nullptr);
assert(inTryRegion != nullptr);
@@ -758,26 +733,26 @@ unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* in
if (block->bbHndIndex == 0)
{
mostNestedRegion = block->bbTryIndex;
- *inTryRegion = true;
+ *inTryRegion = true;
}
else if (block->bbTryIndex == 0)
{
mostNestedRegion = block->bbHndIndex;
- *inTryRegion = false;
+ *inTryRegion = false;
}
else
{
if (block->bbTryIndex < block->bbHndIndex)
{
mostNestedRegion = block->bbTryIndex;
- *inTryRegion = true;
+ *inTryRegion = true;
}
else
{
assert(block->bbTryIndex != block->bbHndIndex); // A block can't be both in the 'try' and 'handler' region
// of the same EH region
mostNestedRegion = block->bbHndIndex;
- *inTryRegion = false;
+ *inTryRegion = false;
}
}
@@ -785,35 +760,38 @@ unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* in
return mostNestedRegion;
}
-
/*****************************************************************************
* Returns the try index of the enclosing try, skipping all EH regions with the
* same try region (that is, all 'mutual protect' regions). If there is no such
* enclosing try, returns EHblkDsc::NO_ENCLOSING_INDEX.
*/
-unsigned Compiler::ehTrueEnclosingTryIndexIL(unsigned regionIndex)
+unsigned Compiler::ehTrueEnclosingTryIndexIL(unsigned regionIndex)
{
assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX);
EHblkDsc* ehDscRoot = ehGetDsc(regionIndex);
- EHblkDsc* HBtab = ehDscRoot;
+ EHblkDsc* HBtab = ehDscRoot;
for (;;)
{
regionIndex = HBtab->ebdEnclosingTryIndex;
if (regionIndex == EHblkDsc::NO_ENCLOSING_INDEX)
- break; // No enclosing 'try'; we're done
+ {
+ break; // No enclosing 'try'; we're done
+ }
HBtab = ehGetDsc(regionIndex);
if (!EHblkDsc::ebdIsSameILTry(ehDscRoot, HBtab))
- break; // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the original region). Return it.
+ {
+ break; // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the
+ // original region). Return it.
+ }
}
return regionIndex;
}
-
-unsigned Compiler::ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion)
+unsigned Compiler::ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion)
{
assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX);
@@ -825,7 +803,7 @@ unsigned Compiler::ehGetEnclosingRegionIndex(unsigned regionIndex, bool*
* The argument 'block' has been deleted. Update the EH table so 'block' is no longer listed
* as a 'last' block. You can't delete a 'begin' block this way.
*/
-void Compiler::ehUpdateForDeletedBlock(BasicBlock* block)
+void Compiler::ehUpdateForDeletedBlock(BasicBlock* block)
{
assert(block->bbFlags & BBF_REMOVED);
@@ -841,7 +819,6 @@ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block)
ehUpdateLastBlocks(block, bPrev);
}
-
/*****************************************************************************
* Determine if an empty block can be deleted, and still preserve the EH normalization
* rules on blocks.
@@ -851,13 +828,13 @@ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block)
* 'last' block. If this previous block is already a 'last' block, then we can't do the
* delete, as that would cause a single block to be the 'last' block of multiple regions.
*/
-bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block)
+bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block)
{
assert(block->isEmpty());
return true;
-#if 0 // This is disabled while the "multiple last block" normalization is disabled
+#if 0 // This is disabled while the "multiple last block" normalization is disabled
if (!fgNormalizeEHDone)
{
return true;
@@ -890,14 +867,12 @@ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block)
* oldLast -- Search for this block as the 'last' block of one or more EH regions.
* newLast -- If 'oldLast' is found to be the 'last' block of an EH region, replace it by 'newLast'.
*/
-void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast)
+void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
if (HBtab->ebdTryLast == oldLast)
{
@@ -910,7 +885,7 @@ void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* ne
}
}
-unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion)
+unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion)
{
assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX);
assert(ehGetDsc(finallyIndex)->HasFinallyHandler());
@@ -923,7 +898,7 @@ unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, boo
#endif
}
-void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk)
+void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk)
{
assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX);
assert(ehGetDsc(finallyIndex)->HasFinallyHandler());
@@ -933,7 +908,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, Basi
EHblkDsc* ehDsc = ehGetDsc(finallyIndex);
#if FEATURE_EH_CALLFINALLY_THUNKS
- bool inTryRegion;
+ bool inTryRegion;
unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion);
if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX)
@@ -956,7 +931,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, Basi
*endBlk = ehDsc->ebdHndLast->bbNext;
}
}
-#else // !FEATURE_EH_CALLFINALLY_THUNKS
+#else // !FEATURE_EH_CALLFINALLY_THUNKS
*begBlk = ehDsc->ebdTryBeg;
*endBlk = ehDsc->ebdTryLast->bbNext;
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
@@ -964,14 +939,14 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, Basi
#ifdef DEBUG
-bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex)
+bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex)
{
assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY);
assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX);
assert(finallyIndex < compHndBBtabCount);
assert(ehGetDsc(finallyIndex)->HasFinallyHandler());
- bool inTryRegion;
+ bool inTryRegion;
unsigned callFinallyIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion);
if (callFinallyIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
@@ -990,12 +965,16 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFina
if (inTryRegion)
{
if (bbInTryRegions(callFinallyIndex, blockCallFinally))
+ {
return true;
+ }
}
else
{
if (bbInHandlerRegions(callFinallyIndex, blockCallFinally))
+ {
return true;
+ }
}
}
@@ -1011,9 +990,9 @@ bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFina
* Are there (or will there be) any funclets in the function?
*/
-bool Compiler::ehAnyFunclets()
+bool Compiler::ehAnyFunclets()
{
- return compHndBBtabCount > 0; // if there is any EH, there will be funclets
+ return compHndBBtabCount > 0; // if there is any EH, there will be funclets
}
/*****************************************************************************
@@ -1023,18 +1002,18 @@ bool Compiler::ehAnyFunclets()
* EH table, it is accurate at any time.
*/
-unsigned Compiler::ehFuncletCount()
+unsigned Compiler::ehFuncletCount()
{
- unsigned funcletCnt = 0;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned funcletCnt = 0;
+ EHblkDsc* HBtab;
+ EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount;
- HBtab < HBtabEnd;
- HBtab++)
+ for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
{
if (HBtab->HasFilter())
+ {
++funcletCnt;
+ }
++funcletCnt;
}
return funcletCnt;
@@ -1052,7 +1031,7 @@ unsigned Compiler::ehFuncletCount()
* 2 = filter)
*
*/
-unsigned Compiler::bbThrowIndex(BasicBlock* blk)
+unsigned Compiler::bbThrowIndex(BasicBlock* blk)
{
if (!blk->hasTryIndex() && !blk->hasHndIndex())
{
@@ -1084,12 +1063,11 @@ unsigned Compiler::bbThrowIndex(BasicBlock* blk)
#endif // FEATURE_EH_FUNCLETS
-
/*****************************************************************************
* Determine the emitter code cookie for a block, for unwind purposes.
*/
-void* Compiler::ehEmitCookie(BasicBlock* block)
+void* Compiler::ehEmitCookie(BasicBlock* block)
{
noway_assert(block);
@@ -1114,23 +1092,19 @@ void* Compiler::ehEmitCookie(BasicBlock* block)
return cookie;
}
-
/*****************************************************************************
* Determine the emitter code offset for a block. If the block is a finally
* target, choose the offset of the NOP padding that precedes the block.
*/
-UNATIVE_OFFSET Compiler::ehCodeOffset(BasicBlock* block)
+UNATIVE_OFFSET Compiler::ehCodeOffset(BasicBlock* block)
{
return genEmitter->emitCodeOffset(ehEmitCookie(block), 0);
}
/****************************************************************************/
-EHblkDsc* Compiler::ehInitHndRange(BasicBlock* blk,
- IL_OFFSET* hndBeg,
- IL_OFFSET* hndEnd,
- bool* inFilter)
+EHblkDsc* Compiler::ehInitHndRange(BasicBlock* blk, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter)
{
EHblkDsc* hndTab = ehGetBlockHndDsc(blk);
if (hndTab != nullptr)
@@ -1159,9 +1133,7 @@ EHblkDsc* Compiler::ehInitHndRange(BasicBlock* blk,
/****************************************************************************/
-EHblkDsc* Compiler::ehInitTryRange(BasicBlock* blk,
- IL_OFFSET* tryBeg,
- IL_OFFSET* tryEnd)
+EHblkDsc* Compiler::ehInitTryRange(BasicBlock* blk, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd)
{
EHblkDsc* tryTab = ehGetBlockTryDsc(blk);
if (tryTab != nullptr)
@@ -1179,17 +1151,14 @@ EHblkDsc* Compiler::ehInitTryRange(BasicBlock* blk,
/****************************************************************************/
-EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk,
- BasicBlock** hndBeg,
- BasicBlock** hndLast,
- bool* inFilter)
+EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter)
{
EHblkDsc* hndTab = ehGetBlockHndDsc(blk);
if (hndTab != nullptr)
{
if (hndTab->InFilterRegionBBRange(blk))
{
- *hndBeg = hndTab->ebdFilter;
+ *hndBeg = hndTab->ebdFilter;
if (hndLast != nullptr)
{
*hndLast = hndTab->BBFilterLast();
@@ -1198,20 +1167,20 @@ EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk,
}
else
{
- *hndBeg = hndTab->ebdHndBeg;
+ *hndBeg = hndTab->ebdHndBeg;
if (hndLast != nullptr)
{
- *hndLast = hndTab->ebdHndLast;
+ *hndLast = hndTab->ebdHndLast;
}
*inFilter = false;
}
}
else
{
- *hndBeg = nullptr;
+ *hndBeg = nullptr;
if (hndLast != nullptr)
{
- *hndLast = nullptr;
+ *hndLast = nullptr;
}
*inFilter = false;
}
@@ -1220,9 +1189,7 @@ EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk,
/****************************************************************************/
-EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk,
- BasicBlock** tryBeg,
- BasicBlock** tryLast)
+EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast)
{
EHblkDsc* tryTab = ehGetBlockTryDsc(blk);
if (tryTab != nullptr)
@@ -1248,8 +1215,7 @@ EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk,
* This method updates the value of ebdTryLast.
*/
-void Compiler::fgSetTryEnd(EHblkDsc* handlerTab,
- BasicBlock* newTryLast)
+void Compiler::fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast)
{
assert(newTryLast != nullptr);
@@ -1275,8 +1241,7 @@ void Compiler::fgSetTryEnd(EHblkDsc* handlerTab,
* This method updates the value of ebdHndLast.
*/
-void Compiler::fgSetHndEnd(EHblkDsc* handlerTab,
- BasicBlock* newHndLast)
+void Compiler::fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast)
{
assert(newHndLast != nullptr);
@@ -1297,7 +1262,6 @@ void Compiler::fgSetHndEnd(EHblkDsc* handlerTab,
}
}
-
/*****************************************************************************
*
* Given a EH handler table entry update the ebdTryLast and ebdHndLast pointers
@@ -1312,9 +1276,9 @@ void Compiler::fgSetHndEnd(EHblkDsc* handlerTab,
void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
{
- BasicBlock* block;
- BasicBlock* bEnd;
- BasicBlock* bLast;
+ BasicBlock* block;
+ BasicBlock* bEnd;
+ BasicBlock* bLast;
// Update ebdTryLast
bLast = nullptr;
@@ -1338,7 +1302,9 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
block = block->bbNext;
if (block == bEnd)
+ {
break;
+ }
}
fgSetTryEnd(handlerTab, bLast);
@@ -1364,18 +1330,19 @@ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab)
block = block->bbNext;
if (block == bEnd)
+ {
break;
+ }
}
fgSetHndEnd(handlerTab, bLast);
}
-
/*****************************************************************************
*
* Allocate the EH table
*/
-void Compiler::fgAllocEHTable()
+void Compiler::fgAllocEHTable()
{
#if FEATURE_EH_FUNCLETS
@@ -1390,9 +1357,9 @@ void Compiler::fgAllocEHTable()
#ifdef DEBUG
compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG
-#else // DEBUG
+#else // DEBUG
compHndBBtabAllocCount = info.compXcptnsCount * 2;
-#endif // DEBUG
+#endif // DEBUG
#else // FEATURE_EH_FUNCLETS
@@ -1400,7 +1367,7 @@ void Compiler::fgAllocEHTable()
#endif // FEATURE_EH_FUNCLETS
- compHndBBtab = new(this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount];
+ compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount];
compHndBBtabCount = info.compXcptnsCount;
}
@@ -1411,12 +1378,12 @@ void Compiler::fgAllocEHTable()
* the exception table. If calling this within a loop over the exception table
* be careful to iterate again on the current entry (if XTnum) to not skip any.
*/
-void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
+void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
{
assert(compHndBBtabCount > 0);
assert(XTnum < compHndBBtabCount);
- EHblkDsc* HBtab;
+ EHblkDsc* HBtab;
/* Reduce the number of entries in the EH table by one */
compHndBBtabCount--;
@@ -1424,7 +1391,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
if (compHndBBtabCount == 0)
{
// No more entries remaining.
- INDEBUG(compHndBBtab = (EHblkDsc *)INVALID_POINTER_VALUE;)
+ INDEBUG(compHndBBtab = (EHblkDsc*)INVALID_POINTER_VALUE;)
}
else
{
@@ -1436,12 +1403,9 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
EHblkDsc* xtabEnd;
EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount;
- xtab < xtabEnd;
- xtab++)
+ for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
{
- if ((xtab != HBtab) &&
- (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
+ if ((xtab != HBtab) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
(xtab->ebdEnclosingTryIndex >= XTnum))
{
// Update the enclosing scope link
@@ -1456,8 +1420,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
}
}
- if ((xtab != HBtab) &&
- (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
+ if ((xtab != HBtab) && (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
(xtab->ebdEnclosingHndIndex >= XTnum))
{
// Update the enclosing scope link
@@ -1519,7 +1482,6 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
}
}
-
#if FEATURE_EH_FUNCLETS
/*****************************************************************************
@@ -1531,7 +1493,7 @@ void Compiler::fgRemoveEHTableEntry(unsigned XTnum)
* The table entry itself is not filled in.
* Returns a pointer to the new entry.
*/
-EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
+EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
{
if (XTnum != compHndBBtabCount)
{
@@ -1539,18 +1501,14 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
EHblkDsc* xtabEnd;
EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount;
- xtab < xtabEnd;
- xtab++)
+ for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
{
- if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
- (xtab->ebdEnclosingTryIndex >= XTnum))
+ if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum))
{
// Update the enclosing scope link
xtab->ebdEnclosingTryIndex++;
}
- if ((xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
- (xtab->ebdEnclosingHndIndex >= XTnum))
+ if ((xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingHndIndex >= XTnum))
{
// Update the enclosing scope link
xtab->ebdEnclosingHndIndex++;
@@ -1579,41 +1537,38 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
{
// We need to reallocate the table
- if (compHndBBtabAllocCount == MAX_XCPTN_INDEX) // We're already at the max size for indices to be unsigned short
+ if (compHndBBtabAllocCount == MAX_XCPTN_INDEX)
+ { // We're already at the max size for indices to be unsigned short
IMPL_LIMITATION("too many exception clauses");
+ }
// Double the table size. For stress, we could use +1. Note that if the table isn't allocated
// yet, such as when we add an EH region for synchronized methods that don't already have one,
// we start at zero, so we need to make sure the new table has at least one entry.
- unsigned newHndBBtabAllocCount = max(1,compHndBBtabAllocCount * 2);
- noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow
+ unsigned newHndBBtabAllocCount = max(1, compHndBBtabAllocCount * 2);
+ noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow
if (newHndBBtabAllocCount > MAX_XCPTN_INDEX)
{
- newHndBBtabAllocCount = MAX_XCPTN_INDEX; // increase to the maximum size we allow
+ newHndBBtabAllocCount = MAX_XCPTN_INDEX; // increase to the maximum size we allow
}
- JITDUMP("*********** fgAddEHTableEntry: increasing EH table size from %d to %d\n",
- compHndBBtabAllocCount, newHndBBtabAllocCount);
+ JITDUMP("*********** fgAddEHTableEntry: increasing EH table size from %d to %d\n", compHndBBtabAllocCount,
+ newHndBBtabAllocCount);
compHndBBtabAllocCount = newHndBBtabAllocCount;
- EHblkDsc* newTable = new(this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount];
+ EHblkDsc* newTable = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount];
// Move over the stuff before the new entry
- memcpy_s(newTable,
- compHndBBtabAllocCount * sizeof(*compHndBBtab),
- compHndBBtab,
- XTnum * sizeof(*compHndBBtab));
+ memcpy_s(newTable, compHndBBtabAllocCount * sizeof(*compHndBBtab), compHndBBtab, XTnum * sizeof(*compHndBBtab));
if (XTnum != compHndBBtabCount)
{
// Move over the stuff after the new entry
- memcpy_s(newTable + XTnum + 1,
- (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab),
- compHndBBtab + XTnum,
- (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab));
+ memcpy_s(newTable + XTnum + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab),
+ compHndBBtab + XTnum, (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab));
}
// Now set the new table as the table to use. The old one gets lost, but we can't
@@ -1627,9 +1582,7 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
EHblkDsc* HBtab = compHndBBtab + XTnum;
- memmove_s(HBtab + 1,
- (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab),
- HBtab,
+ memmove_s(HBtab + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab), HBtab,
(compHndBBtabCount - XTnum) * sizeof(*compHndBBtab));
}
@@ -1641,7 +1594,6 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
#endif // FEATURE_EH_FUNCLETS
-
#if !FEATURE_EH
/*****************************************************************************
@@ -1668,10 +1620,10 @@ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum)
*
* This function assumes FEATURE_EH_FUNCLETS is defined.
*/
-void Compiler::fgRemoveEH()
+void Compiler::fgRemoveEH()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("\n*************** In fgRemoveEH()\n");
#endif // DEBUG
@@ -1682,7 +1634,7 @@ void Compiler::fgRemoveEH()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** Before fgRemoveEH()\n");
fgDispBasicBlocks();
@@ -1698,12 +1650,10 @@ void Compiler::fgRemoveEH()
assert(fgFirstFuncletBB == nullptr); // this should follow from "!fgFuncletsCreated"
assert(!optLoopsMarked);
- unsigned XTnum;
- EHblkDsc* HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
@@ -1714,9 +1664,7 @@ void Compiler::fgRemoveEH()
continue;
}
- if (HBtab->HasCatchHandler() ||
- HBtab->HasFilter() ||
- HBtab->HasFaultHandler())
+ if (HBtab->HasCatchHandler() || HBtab->HasFilter() || HBtab->HasFaultHandler())
{
// Remove all the blocks associated with the handler. Note that there is no
// fall-through into the handler, or fall-through out of the handler, so
@@ -1757,11 +1705,9 @@ void Compiler::fgRemoveEH()
// fix up the EH table. We only care about less nested
// EH table entries, since we've already deleted everything up to XTnum.
- unsigned XTnum2;
- EHblkDsc* HBtab2;
- for (XTnum2 = XTnum + 1, HBtab2 = compHndBBtab + XTnum2;
- XTnum2 < compHndBBtabCount;
- XTnum2++ , HBtab2++)
+ unsigned XTnum2;
+ EHblkDsc* HBtab2;
+ for (XTnum2 = XTnum + 1, HBtab2 = compHndBBtab + XTnum2; XTnum2 < compHndBBtabCount; XTnum2++, HBtab2++)
{
// Handle case where deleted range is at the end of a 'try'.
if (HBtab2->ebdTryLast == blkLast)
@@ -1811,7 +1757,7 @@ void Compiler::fgRemoveEH()
// Delete the EH table
- compHndBBtab = nullptr;
+ compHndBBtab = nullptr;
compHndBBtabCount = 0;
// Leave compHndBBtabAllocCount alone.
@@ -1820,7 +1766,7 @@ void Compiler::fgRemoveEH()
fgRenumberBlocks();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** After fgRemoveEH()\n");
fgDispBasicBlocks();
@@ -1832,16 +1778,17 @@ void Compiler::fgRemoveEH()
#endif // !FEATURE_EH
-
/*****************************************************************************
*
* Sort the EH table if necessary.
*/
-void Compiler::fgSortEHTable()
+void Compiler::fgSortEHTable()
{
if (!fgNeedToSortEHTable)
+ {
return;
+ }
// Now, all fields of the EH table are set except for those that are related
// to nesting. We need to first sort the table to ensure that an EH clause
@@ -1919,18 +1866,13 @@ void Compiler::fgSortEHTable()
}
#endif // DEBUG
+ EHblkDsc* xtab1;
+ EHblkDsc* xtab2;
+ unsigned xtabnum1, xtabnum2;
- EHblkDsc * xtab1;
- EHblkDsc * xtab2;
- unsigned xtabnum1, xtabnum2;
-
- for (xtabnum1 = 0, xtab1 = compHndBBtab;
- xtabnum1 < compHndBBtabCount;
- xtabnum1++ , xtab1++)
+ for (xtabnum1 = 0, xtab1 = compHndBBtab; xtabnum1 < compHndBBtabCount; xtabnum1++, xtab1++)
{
- for (xtabnum2 = xtabnum1 + 1, xtab2 = xtab1 + 1;
- xtabnum2 < compHndBBtabCount;
- xtabnum2++ , xtab2++)
+ for (xtabnum2 = xtabnum1 + 1, xtab2 = xtab1 + 1; xtabnum2 < compHndBBtabCount; xtabnum2++, xtab2++)
{
// If the nesting is wrong, swap them. The nesting is wrong if
// EH region 2 is nested in the try, handler, or filter of EH region 1.
@@ -1947,21 +1889,16 @@ void Compiler::fgSortEHTable()
IL_OFFSET hndEndOff = xtab2->ebdHndEndOffset;
assert(hndEndOff > hndBegOff);
- if (
- (hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset)
- ||
- (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset)
- ||
- (xtab1->HasFilter() &&
- (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset))
- // Note that end of filter is beginning of handler
- )
+ if ((hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset) ||
+ (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) ||
+ (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset))
+ // Note that end of filter is beginning of handler
+ )
{
#ifdef DEBUG
if (verbose)
{
- printf("fgSortEHTable: Swapping out-of-order EH#%u and EH#%u\n",
- xtabnum1, xtabnum2);
+ printf("fgSortEHTable: Swapping out-of-order EH#%u and EH#%u\n", xtabnum1, xtabnum2);
}
// Assert that the 'try' region is also nested in the same place as the handler
@@ -1971,17 +1908,24 @@ void Compiler::fgSortEHTable()
assert(tryEndOff > tryBegOff);
if (hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset)
+ {
assert(tryBegOff >= xtab1->ebdTryBegOffset && tryEndOff <= xtab1->ebdTryEndOffset);
+ }
if (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset)
+ {
assert(tryBegOff >= xtab1->ebdHndBegOffset && tryEndOff <= xtab1->ebdHndEndOffset);
- if (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset))
+ }
+ if (xtab1->HasFilter() &&
+ (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset))
+ {
assert(tryBegOff >= xtab1->ebdFilterBegOffset && tryEndOff <= xtab1->ebdHndBegOffset);
+ }
#endif // DEBUG
// Swap them!
EHblkDsc tmp = *xtab1;
- *xtab1 = *xtab2;
- *xtab2 = tmp;
+ *xtab1 = *xtab2;
+ *xtab2 = tmp;
}
}
}
@@ -2132,7 +2076,7 @@ void Compiler::fgNormalizeEH()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In fgNormalizeEH()\n");
fgDispBasicBlocks();
@@ -2148,7 +2092,8 @@ void Compiler::fgNormalizeEH()
modified = true;
}
- // Case #2: Prevent any two EH regions from starting with the same block (after case #3, we only need to worry about 'try' blocks).
+ // Case #2: Prevent any two EH regions from starting with the same block (after case #3, we only need to worry about
+ // 'try' blocks).
if (fgNormalizeEHCase2())
{
modified = true;
@@ -2215,10 +2160,10 @@ bool Compiler::fgNormalizeEHCase1()
{
EHblkDsc* eh = ehGetDsc(XTnum);
- BasicBlock* handlerStart = eh->ebdHndBeg;
- EHblkDsc* handlerStartContainingTry = ehGetBlockTryDsc(handlerStart);
+ BasicBlock* handlerStart = eh->ebdHndBeg;
+ EHblkDsc* handlerStartContainingTry = ehGetBlockTryDsc(handlerStart);
// If the handler start block is in a try, and is in fact the first block of that try...
- if (handlerStartContainingTry != NULL && handlerStartContainingTry->ebdTryBeg == handlerStart)
+ if (handlerStartContainingTry != nullptr && handlerStartContainingTry->ebdTryBeg == handlerStart)
{
// ...then we want to insert an empty, non-removable block outside the try to be the new first block of the
// handler.
@@ -2228,8 +2173,9 @@ bool Compiler::fgNormalizeEHCase1()
#ifdef DEBUG
if (verbose)
{
- printf("Handler begin for EH#%02u and 'try' begin for EH%02u are the same block; inserted new BB%02u before BB%02u as new handler begin for EH#%u.\n",
- XTnum, ehGetIndex(handlerStartContainingTry), newHndStart->bbNum, eh->ebdHndBeg->bbNum, XTnum);
+ printf("Handler begin for EH#%02u and 'try' begin for EH%02u are the same block; inserted new BB%02u "
+ "before BB%02u as new handler begin for EH#%u.\n",
+ XTnum, ehGetIndex(handlerStartContainingTry), newHndStart->bbNum, eh->ebdHndBeg->bbNum, XTnum);
}
#endif // DEBUG
@@ -2246,10 +2192,10 @@ bool Compiler::fgNormalizeEHCase1()
newHndStart->setTryIndex(eh->ebdEnclosingTryIndex);
}
newHndStart->setHndIndex(XTnum);
- newHndStart->bbCatchTyp = handlerStart->bbCatchTyp;
- handlerStart->bbCatchTyp = BBCT_NONE; // Now handlerStart is no longer the start of a handler...
- newHndStart->bbCodeOffs = handlerStart->bbCodeOffs;
- newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
+ newHndStart->bbCatchTyp = handlerStart->bbCatchTyp;
+ handlerStart->bbCatchTyp = BBCT_NONE; // Now handlerStart is no longer the start of a handler...
+ newHndStart->bbCodeOffs = handlerStart->bbCodeOffs;
+ newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
newHndStart->inheritWeight(handlerStart);
#if FEATURE_STACK_FP_X87
newHndStart->bbFPStateX87 = codeGen->FlatFPAllocFPState(handlerStart->bbFPStateX87);
@@ -2258,7 +2204,7 @@ bool Compiler::fgNormalizeEHCase1()
modified = true;
#ifdef DEBUG
- if (0&&verbose) // Normally this is way too verbose, but it is useful for debugging
+ if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging
{
printf("*************** fgNormalizeEH() made a change\n");
fgDispBasicBlocks();
@@ -2287,22 +2233,22 @@ bool Compiler::fgNormalizeEHCase2()
if (eh->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
- BasicBlock* tryStart = eh->ebdTryBeg;
+ BasicBlock* tryStart = eh->ebdTryBeg;
BasicBlock* insertBeforeBlk = tryStart; // If we need to insert new blocks, we insert before this block.
// We need to keep track of the last "mutually protect" region so we can properly not add additional header
// blocks to the second and subsequent mutually protect try blocks. We can't just keep track of the EH
// region pointer, because we're updating the 'try' begin blocks as we go. So, we need to keep track of the
// pre-update 'try' begin/last blocks themselves.
- BasicBlock* mutualTryBeg = eh->ebdTryBeg;
- BasicBlock* mutualTryLast = eh->ebdTryLast;
- unsigned mutualProtectIndex = XTnum;
+ BasicBlock* mutualTryBeg = eh->ebdTryBeg;
+ BasicBlock* mutualTryLast = eh->ebdTryLast;
+ unsigned mutualProtectIndex = XTnum;
EHblkDsc* ehOuter = eh;
do
{
- unsigned ehOuterTryIndex = ehOuter->ebdEnclosingTryIndex;
- ehOuter = ehGetDsc(ehOuterTryIndex);
+ unsigned ehOuterTryIndex = ehOuter->ebdEnclosingTryIndex;
+ ehOuter = ehGetDsc(ehOuterTryIndex);
BasicBlock* outerTryStart = ehOuter->ebdTryBeg;
if (outerTryStart == tryStart)
{
@@ -2310,7 +2256,7 @@ bool Compiler::fgNormalizeEHCase2()
if (ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast))
{
- // clang-format off
+// clang-format off
// Don't touch mutually-protect regions: their 'try' regions must remain identical!
// We want to continue the looping outwards, in case we have something like this:
//
@@ -2359,13 +2305,13 @@ bool Compiler::fgNormalizeEHCase2()
//
// In this case, all the 'try' start at the same block! Note that there are two sets of mutually-protect regions,
// separated by some nesting.
- // clang-format on
+// clang-format on
#ifdef DEBUG
if (verbose)
{
printf("Mutually protect regions EH#%u and EH#%u; leaving identical 'try' begin blocks.\n",
- mutualProtectIndex, ehGetIndex(ehOuter));
+ mutualProtectIndex, ehGetIndex(ehOuter));
}
#endif // DEBUG
@@ -2375,8 +2321,8 @@ bool Compiler::fgNormalizeEHCase2()
else
{
// We're in a new set of mutual protect regions, so don't compare against the original.
- mutualTryBeg = ehOuter->ebdTryBeg;
- mutualTryLast = ehOuter->ebdTryLast;
+ mutualTryBeg = ehOuter->ebdTryBeg;
+ mutualTryLast = ehOuter->ebdTryLast;
mutualProtectIndex = ehOuterTryIndex;
// We're going to need the preds. We compute them here, before inserting the new block,
@@ -2396,25 +2342,28 @@ bool Compiler::fgNormalizeEHCase2()
#ifdef DEBUG
if (verbose)
{
- printf("'try' begin for EH#%u and EH#%u are same block; inserted new BB%02u before BB%02u as new 'try' begin for EH#%u.\n",
- ehOuterTryIndex, XTnum, newTryStart->bbNum, insertBeforeBlk->bbNum, ehOuterTryIndex);
+ printf("'try' begin for EH#%u and EH#%u are same block; inserted new BB%02u before BB%02u "
+ "as new 'try' begin for EH#%u.\n",
+ ehOuterTryIndex, XTnum, newTryStart->bbNum, insertBeforeBlk->bbNum, ehOuterTryIndex);
}
#endif // DEBUG
// The new block is the new 'try' begin.
ehOuter->ebdTryBeg = newTryStart;
- newTryStart->copyEHRegion(tryStart); // Copy the EH region info
- newTryStart->setTryIndex(ehOuterTryIndex); // ... but overwrite the 'try' index
+ newTryStart->copyEHRegion(tryStart); // Copy the EH region info
+ newTryStart->setTryIndex(ehOuterTryIndex); // ... but overwrite the 'try' index
newTryStart->bbCatchTyp = BBCT_NONE;
newTryStart->bbCodeOffs = tryStart->bbCodeOffs;
- newTryStart->bbCodeOffsEnd = newTryStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
+ newTryStart->bbCodeOffsEnd =
+ newTryStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
newTryStart->inheritWeight(tryStart);
#if FEATURE_STACK_FP_X87
newTryStart->bbFPStateX87 = codeGen->FlatFPAllocFPState(tryStart->bbFPStateX87);
#endif // FEATURE_STACK_FP_X87
- // Note that we don't need to clear any flags on the old try start, since it is still a 'try' start.
+ // Note that we don't need to clear any flags on the old try start, since it is still a 'try'
+ // start.
newTryStart->bbFlags |= (BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_INTERNAL | BBF_HAS_LABEL);
// Now we need to split any flow edges targetting the old try begin block between the old
@@ -2478,8 +2427,8 @@ bool Compiler::fgNormalizeEHCase2()
#ifdef DEBUG
if (verbose)
{
- printf("Redirect BB%02u target from BB%02u to BB%02u.\n",
- predBlock->bbNum, insertBeforeBlk->bbNum, newTryStart->bbNum);
+ printf("Redirect BB%02u target from BB%02u to BB%02u.\n", predBlock->bbNum,
+ insertBeforeBlk->bbNum, newTryStart->bbNum);
}
#endif // DEBUG
}
@@ -2498,7 +2447,7 @@ bool Compiler::fgNormalizeEHCase2()
modified = true;
#ifdef DEBUG
- if (0&&verbose) // Normally this is way too verbose, but it is useful for debugging
+ if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging
{
printf("*************** fgNormalizeEH() made a change\n");
fgDispBasicBlocks();
@@ -2529,8 +2478,7 @@ bool Compiler::fgNormalizeEHCase2()
break;
}
- }
- while (ehOuter->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX);
+ } while (ehOuter->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX);
}
}
@@ -2566,14 +2514,14 @@ bool Compiler::fgNormalizeEHCase3()
EHblkDsc* eh = ehGetDsc(XTnum);
// Find the EH region 'eh' is most nested within, either 'try' or handler or none.
- bool outerIsTryRegion;
+ bool outerIsTryRegion;
unsigned ehOuterIndex = eh->ebdGetEnclosingRegionIndex(&outerIsTryRegion);
if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
- EHblkDsc* ehInner = eh; // This gets updated as we loop outwards in the EH nesting
- unsigned ehInnerIndex = XTnum; // This gets updated as we loop outwards in the EH nesting
- bool innerIsTryRegion;
+ EHblkDsc* ehInner = eh; // This gets updated as we loop outwards in the EH nesting
+ unsigned ehInnerIndex = XTnum; // This gets updated as we loop outwards in the EH nesting
+ bool innerIsTryRegion;
EHblkDsc* ehOuter = ehGetDsc(ehOuterIndex);
@@ -2607,7 +2555,7 @@ bool Compiler::fgNormalizeEHCase3()
// } } } } } // all the 'last' blocks are the same
//
// after normalization:
- //
+ //
// try { // EH#5
// ...
// catch { // EH#4
@@ -2670,7 +2618,7 @@ bool Compiler::fgNormalizeEHCase3()
// Case (1) try nested in try.
foundMatchingLastBlock = true;
INDEBUG(innerType = "try"; outerType = "try";)
- insertAfterBlk = ehOuter->ebdTryLast;
+ insertAfterBlk = ehOuter->ebdTryLast;
lastBlockPtrToCompare = insertAfterBlk;
if (EHblkDsc::ebdIsSameTry(ehOuter, ehInner))
@@ -2681,7 +2629,7 @@ bool Compiler::fgNormalizeEHCase3()
if (verbose)
{
printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last blocks.\n",
- ehOuterIndex, ehInnerIndex);
+ ehOuterIndex, ehInnerIndex);
}
#endif // DEBUG
@@ -2689,7 +2637,8 @@ bool Compiler::fgNormalizeEHCase3()
}
else
{
- nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
+ nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex()
+ : EHblkDsc::NO_ENCLOSING_INDEX;
}
}
else if (ehOuter->ebdTryLast == ehInner->ebdHndLast)
@@ -2697,7 +2646,7 @@ bool Compiler::fgNormalizeEHCase3()
// Case (2) handler nested in try.
foundMatchingLastBlock = true;
INDEBUG(innerType = "handler"; outerType = "try";)
- insertAfterBlk = ehOuter->ebdTryLast;
+ insertAfterBlk = ehOuter->ebdTryLast;
lastBlockPtrToCompare = insertAfterBlk;
assert(ehInner->ebdHndLast->getHndIndex() == ehInnerIndex);
@@ -2719,14 +2668,14 @@ bool Compiler::fgNormalizeEHCase3()
{
nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region.
- // The outer (enclosing) region is a handler (note that it can't be a filter; there is no nesting
+ // The outer (enclosing) region is a handler (note that it can't be a filter; there is no nesting
// within a filter).
if (ehOuter->ebdHndLast == ehInner->ebdTryLast)
{
// Case (3) try nested in handler.
foundMatchingLastBlock = true;
INDEBUG(innerType = "try"; outerType = "handler";)
- insertAfterBlk = ehOuter->ebdHndLast;
+ insertAfterBlk = ehOuter->ebdHndLast;
lastBlockPtrToCompare = insertAfterBlk;
assert(ehInner->ebdTryLast->getTryIndex() == ehInnerIndex);
@@ -2737,10 +2686,11 @@ bool Compiler::fgNormalizeEHCase3()
// Case (4) handler nested in handler.
foundMatchingLastBlock = true;
INDEBUG(innerType = "handler"; outerType = "handler";)
- insertAfterBlk = ehOuter->ebdHndLast;
+ insertAfterBlk = ehOuter->ebdHndLast;
lastBlockPtrToCompare = insertAfterBlk;
- nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
+ nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex()
+ : EHblkDsc::NO_ENCLOSING_INDEX;
}
else
{
@@ -2759,7 +2709,8 @@ bool Compiler::fgNormalizeEHCase3()
if (insertNormalizationBlock)
{
- // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and shares a 'last' pointer
+ // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and
+ // shares a 'last' pointer
BasicBlock* newLast = bbNewBasicBlock(BBJ_NONE);
assert(insertAfterBlk != nullptr);
@@ -2768,8 +2719,10 @@ bool Compiler::fgNormalizeEHCase3()
#ifdef DEBUG
if (verbose)
{
- printf("last %s block for EH#%u and last %s block for EH#%u are same block; inserted new BB%02u after BB%02u as new last %s block for EH#%u.\n",
- outerType, ehOuterIndex, innerType, ehInnerIndex, newLast->bbNum, insertAfterBlk->bbNum, outerType, ehOuterIndex);
+ printf("last %s block for EH#%u and last %s block for EH#%u are same block; inserted new "
+ "BB%02u after BB%02u as new last %s block for EH#%u.\n",
+ outerType, ehOuterIndex, innerType, ehInnerIndex, newLast->bbNum, insertAfterBlk->bbNum,
+ outerType, ehOuterIndex);
}
#endif // DEBUG
@@ -2800,8 +2753,9 @@ bool Compiler::fgNormalizeEHCase3()
newLast->setHndIndex(ehOuterIndex);
}
- newLast->bbCatchTyp = BBCT_NONE; // bbCatchTyp is only set on the first block of a handler, which is this not
- newLast->bbCodeOffs = insertAfterBlk->bbCodeOffsEnd;
+ newLast->bbCatchTyp =
+ BBCT_NONE; // bbCatchTyp is only set on the first block of a handler, which is this not
+ newLast->bbCodeOffs = insertAfterBlk->bbCodeOffsEnd;
newLast->bbCodeOffsEnd = newLast->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
newLast->inheritWeight(insertAfterBlk);
#if FEATURE_STACK_FP_X87
@@ -2822,7 +2776,7 @@ bool Compiler::fgNormalizeEHCase3()
modified = true;
#ifdef DEBUG
- if (verbose) // Normally this is way too verbose, but it is useful for debugging
+ if (verbose) // Normally this is way too verbose, but it is useful for debugging
{
printf("*************** fgNormalizeEH() made a change\n");
fgDispBasicBlocks();
@@ -2833,11 +2787,12 @@ bool Compiler::fgNormalizeEHCase3()
// Now find the next outer enclosing EH region and see if it also shares the last block.
foundMatchingLastBlock = false; // assume nothing will match
- ehInner = ehOuter;
- ehInnerIndex = ehOuterIndex;
- innerIsTryRegion = outerIsTryRegion;
+ ehInner = ehOuter;
+ ehInnerIndex = ehOuterIndex;
+ innerIsTryRegion = outerIsTryRegion;
- ehOuterIndex = ehOuter->ebdGetEnclosingRegionIndex(&outerIsTryRegion); // Loop outwards in the EH nesting.
+ ehOuterIndex =
+ ehOuter->ebdGetEnclosingRegionIndex(&outerIsTryRegion); // Loop outwards in the EH nesting.
if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
// There are more enclosing regions; check for equivalent 'last' pointers.
@@ -2867,8 +2822,9 @@ bool Compiler::fgNormalizeEHCase3()
#ifdef DEBUG
if (verbose)
{
- printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last blocks.\n",
- ehOuterIndex, ehInnerIndex);
+ printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last "
+ "blocks.\n",
+ ehOuterIndex, ehInnerIndex);
}
#endif // DEBUG
@@ -2883,7 +2839,9 @@ bool Compiler::fgNormalizeEHCase3()
if (innerIsTryRegion)
{
// Case (1) try nested in try.
- nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
+ nextHndIndex = ehInner->ebdTryLast->hasHndIndex()
+ ? ehInner->ebdTryLast->getHndIndex()
+ : EHblkDsc::NO_ENCLOSING_INDEX;
}
else
{
@@ -2893,16 +2851,19 @@ bool Compiler::fgNormalizeEHCase3()
}
}
- // The outer might be part of a new set of mutual protect regions (if it isn't part of one already).
+ // The outer might be part of a new set of mutual protect regions (if it isn't part of one
+ // already).
mutualTryBeg = ehOuter->ebdTryBeg;
mutualTryLast = ehOuter->ebdTryLast;
}
}
else
{
- nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region.
+ nextHndIndex =
+ EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region.
- // The outer (enclosing) region is a handler (note that it can't be a filter; there is no nesting within a filter).
+ // The outer (enclosing) region is a handler (note that it can't be a filter; there is no
+ // nesting within a filter).
if (ehOuter->ebdHndLast == lastBlockPtrToCompare)
{
// Case (3) and (4): try nested in try or handler.
@@ -2918,7 +2879,8 @@ bool Compiler::fgNormalizeEHCase3()
else
{
// Case (4) handler nested in handler.
- nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX;
+ nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex()
+ : EHblkDsc::NO_ENCLOSING_INDEX;
}
}
}
@@ -2940,17 +2902,16 @@ bool Compiler::fgNormalizeEHCase3()
// we'll get to try3 and process it and try4.
} // end while (foundMatchingLastBlock)
- } // if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX)
- } // EH table iteration
+ } // if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX)
+ } // EH table iteration
return modified;
}
-
/*****************************************************************************/
#ifdef DEBUG
-void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause)
+void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause)
{
printf("EH clause #%u:\n", num);
printf(" Flags: 0x%x", clause.Flags);
@@ -2961,21 +2922,21 @@ void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_E
const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7;
switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK)
{
- case CORINFO_EH_CLAUSE_NONE:
- printf(" (catch)");
- break;
- case CORINFO_EH_CLAUSE_FILTER:
- printf(" (filter)");
- break;
- case CORINFO_EH_CLAUSE_FINALLY:
- printf(" (finally)");
- break;
- case CORINFO_EH_CLAUSE_FAULT:
- printf(" (fault)");
- break;
- default:
- printf(" (UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK);
- break;
+ case CORINFO_EH_CLAUSE_NONE:
+ printf(" (catch)");
+ break;
+ case CORINFO_EH_CLAUSE_FILTER:
+ printf(" (filter)");
+ break;
+ case CORINFO_EH_CLAUSE_FINALLY:
+ printf(" (finally)");
+ break;
+ case CORINFO_EH_CLAUSE_FAULT:
+ printf(" (fault)");
+ break;
+ default:
+ printf(" (UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK);
+ break;
}
if (clause.Flags & ~CORINFO_EH_CLAUSE_TYPE_MASK)
{
@@ -2997,27 +2958,20 @@ void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_E
}
}
-void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause)
+void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause)
{
if (opts.dspDiffable)
{
/* (( brace matching editor workaround to compensate for the following line */
- printf("EH#%u: try [%s..%s) handled by [%s..%s) ",
- num,
- genEmitter->emitOffsetToLabel(clause.TryOffset),
- genEmitter->emitOffsetToLabel(clause.TryLength),
- genEmitter->emitOffsetToLabel(clause.HandlerOffset),
- genEmitter->emitOffsetToLabel(clause.HandlerLength));
+ printf("EH#%u: try [%s..%s) handled by [%s..%s) ", num, genEmitter->emitOffsetToLabel(clause.TryOffset),
+ genEmitter->emitOffsetToLabel(clause.TryLength), genEmitter->emitOffsetToLabel(clause.HandlerOffset),
+ genEmitter->emitOffsetToLabel(clause.HandlerLength));
}
else
{
/* (( brace matching editor workaround to compensate for the following line */
- printf("EH#%u: try [%04X..%04X) handled by [%04X..%04X) ",
- num,
- dspOffset(clause.TryOffset),
- dspOffset(clause.TryLength),
- dspOffset(clause.HandlerOffset),
- dspOffset(clause.HandlerLength));
+ printf("EH#%u: try [%04X..%04X) handled by [%04X..%04X) ", num, dspOffset(clause.TryOffset),
+ dspOffset(clause.TryLength), dspOffset(clause.HandlerOffset), dspOffset(clause.HandlerLength));
}
// Note: the flags field is kind of weird. It should be compared for equality
@@ -3028,38 +2982,37 @@ void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_E
const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7;
switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK)
{
- case CORINFO_EH_CLAUSE_NONE:
- printf("(class: %04X)", clause.ClassToken);
- break;
- case CORINFO_EH_CLAUSE_FILTER:
- if (opts.dspDiffable)
- {
- /* ( brace matching editor workaround to compensate for the following line */
- printf("filter at [%s..%s)",
- genEmitter->emitOffsetToLabel(clause.ClassToken),
- genEmitter->emitOffsetToLabel(clause.HandlerOffset));
- }
- else
- {
- /* ( brace matching editor workaround to compensate for the following line */
- printf("filter at [%04X..%04X)", dspOffset(clause.ClassToken), dspOffset(clause.HandlerOffset));
- }
- break;
- case CORINFO_EH_CLAUSE_FINALLY:
- printf("(finally)");
- break;
- case CORINFO_EH_CLAUSE_FAULT:
- printf("(fault)");
- break;
- default:
- printf("(UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK);
- assert(!"unknown type");
- break;
+ case CORINFO_EH_CLAUSE_NONE:
+ printf("(class: %04X)", clause.ClassToken);
+ break;
+ case CORINFO_EH_CLAUSE_FILTER:
+ if (opts.dspDiffable)
+ {
+ /* ( brace matching editor workaround to compensate for the following line */
+ printf("filter at [%s..%s)", genEmitter->emitOffsetToLabel(clause.ClassToken),
+ genEmitter->emitOffsetToLabel(clause.HandlerOffset));
+ }
+ else
+ {
+ /* ( brace matching editor workaround to compensate for the following line */
+ printf("filter at [%04X..%04X)", dspOffset(clause.ClassToken), dspOffset(clause.HandlerOffset));
+ }
+ break;
+ case CORINFO_EH_CLAUSE_FINALLY:
+ printf("(finally)");
+ break;
+ case CORINFO_EH_CLAUSE_FAULT:
+ printf("(fault)");
+ break;
+ default:
+ printf("(UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK);
+ assert(!"unknown type");
+ break;
}
- if ((clause.TryOffset == clause.TryLength) &&
- (clause.TryOffset == clause.HandlerOffset) &&
- ((clause.Flags & (COR_ILEXCEPTION_CLAUSE_DUPLICATED | COR_ILEXCEPTION_CLAUSE_FINALLY)) == (COR_ILEXCEPTION_CLAUSE_DUPLICATED | COR_ILEXCEPTION_CLAUSE_FINALLY)))
+ if ((clause.TryOffset == clause.TryLength) && (clause.TryOffset == clause.HandlerOffset) &&
+ ((clause.Flags & (COR_ILEXCEPTION_CLAUSE_DUPLICATED | COR_ILEXCEPTION_CLAUSE_FINALLY)) ==
+ (COR_ILEXCEPTION_CLAUSE_DUPLICATED | COR_ILEXCEPTION_CLAUSE_FINALLY)))
{
printf(" cloned finally");
}
@@ -3072,7 +3025,7 @@ void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_E
/*****************************************************************************/
-void Compiler::fgVerifyHandlerTab()
+void Compiler::fgVerifyHandlerTab()
{
if (compIsForInlining())
{
@@ -3088,24 +3041,24 @@ void Compiler::fgVerifyHandlerTab()
// Did we do the normalization that prevents the first block of a handler from being a 'try' block (case 1)?
bool handlerBegIsTryBegNormalizationDone = fgNormalizeEHDone;
- // Did we do the normalization that prevents multiple EH regions (namely, 'try' blocks) from starting on the same block (case 2)?
+ // Did we do the normalization that prevents multiple EH regions (namely, 'try' blocks) from starting on the same
+ // block (case 2)?
bool multipleBegBlockNormalizationDone = fgNormalizeEHDone;
- // Did we do the normalization that prevents multiple EH regions ('try' or handler blocks) from ending on the same block (case 3)?
+ // Did we do the normalization that prevents multiple EH regions ('try' or handler blocks) from ending on the same
+ // block (case 3)?
bool multipleLastBlockNormalizationDone = false; // Currently disabled
assert(compHndBBtabCount <= compHndBBtabAllocCount);
- unsigned XTnum;
- EHblkDsc* HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++, HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
- assert(HBtab->ebdTryBeg != nullptr);
+ assert(HBtab->ebdTryBeg != nullptr);
assert(HBtab->ebdTryLast != nullptr);
- assert(HBtab->ebdHndBeg != nullptr);
+ assert(HBtab->ebdHndBeg != nullptr);
assert(HBtab->ebdHndLast != nullptr);
assert(HBtab->ebdTryBeg->bbFlags & BBF_TRY_BEG);
@@ -3115,9 +3068,9 @@ void Compiler::fgVerifyHandlerTab()
assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE);
assert(HBtab->ebdHndBeg->bbFlags & BBF_HAS_LABEL);
- assert((HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) == 0);
+ assert((HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) == 0);
assert((HBtab->ebdTryLast->bbFlags & BBF_REMOVED) == 0);
- assert((HBtab->ebdHndBeg->bbFlags & BBF_REMOVED) == 0);
+ assert((HBtab->ebdHndBeg->bbFlags & BBF_REMOVED) == 0);
assert((HBtab->ebdHndLast->bbFlags & BBF_REMOVED) == 0);
if (HBtab->HasFilter())
@@ -3138,7 +3091,6 @@ void Compiler::fgVerifyHandlerTab()
}
}
#endif // FEATURE_EH_FUNCLETS
-
}
// I want to assert things about the relative ordering of blocks in the block list using
@@ -3149,21 +3101,21 @@ void Compiler::fgVerifyHandlerTab()
unsigned bbNumMax = compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : fgBBNumMax;
// blockNumMap[old block number] => new block number
- size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned);
- unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes);
+ size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned);
+ unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes);
memset(blockNumMap, 0, blockNumBytes);
BasicBlock* block;
- unsigned newBBnum = 1;
+ unsigned newBBnum = 1;
for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
assert((block->bbFlags & BBF_REMOVED) == 0);
assert(1 <= block->bbNum && block->bbNum <= bbNumMax);
- assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number.
+ assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number.
blockNumMap[block->bbNum] = newBBnum++;
}
- // Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks
- // haven't been renumbered since the deletion.
+// Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks
+// haven't been renumbered since the deletion.
#if 0 // Useful for debugging, but don't want to put this in the dump all the time
if (verbose)
@@ -3179,16 +3131,20 @@ void Compiler::fgVerifyHandlerTab()
}
#endif
- // To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first block
- // of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that are the beginning
- // blocks of 'try' regions, and one for blocks that are the beginning of handlers (including filters). Note that since
- // this checking function runs before EH normalization, we have to handle the case where blocks can be both the beginning
+ // To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first
+ // block
+ // of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that are the
+ // beginning
+ // blocks of 'try' regions, and one for blocks that are the beginning of handlers (including filters). Note that
+ // since
+ // this checking function runs before EH normalization, we have to handle the case where blocks can be both the
+ // beginning
// of a 'try' as well as the beginning of a handler. After we've iterated over the EH table, loop
// over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE, and some other things.
size_t blockBoolSetBytes = (bbNumMax + 1) * sizeof(bool);
- bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes);
- bool* blockHndBegSet = (bool*)_alloca(blockBoolSetBytes);
+ bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes);
+ bool* blockHndBegSet = (bool*)_alloca(blockBoolSetBytes);
for (unsigned i = 0; i <= bbNumMax; i++)
{
blockTryBegSet[i] = false;
@@ -3196,8 +3152,8 @@ void Compiler::fgVerifyHandlerTab()
}
#if FEATURE_EH_FUNCLETS
- bool isLegalFirstFunclet = false;
- unsigned bbNumFirstFunclet = 0;
+ bool isLegalFirstFunclet = false;
+ unsigned bbNumFirstFunclet = 0;
if (fgFuncletsCreated)
{
@@ -3213,24 +3169,22 @@ void Compiler::fgVerifyHandlerTab()
}
#endif // FEATURE_EH_FUNCLETS
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++, HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
unsigned bbNumTryBeg = blockNumMap[HBtab->ebdTryBeg->bbNum];
unsigned bbNumTryLast = blockNumMap[HBtab->ebdTryLast->bbNum];
unsigned bbNumHndBeg = blockNumMap[HBtab->ebdHndBeg->bbNum];
unsigned bbNumHndLast = blockNumMap[HBtab->ebdHndLast->bbNum];
- unsigned bbNumFilter = 0; // This should never get used except under "if (HBtab->HasFilter())"
+ unsigned bbNumFilter = 0; // This should never get used except under "if (HBtab->HasFilter())"
if (HBtab->HasFilter())
{
bbNumFilter = blockNumMap[HBtab->ebdFilter->bbNum];
}
// Assert that the EH blocks are in the main block list
- assert(bbNumTryBeg != 0);
+ assert(bbNumTryBeg != 0);
assert(bbNumTryLast != 0);
- assert(bbNumHndBeg != 0);
+ assert(bbNumHndBeg != 0);
assert(bbNumHndLast != 0);
if (HBtab->HasFilter())
{
@@ -3251,7 +3205,8 @@ void Compiler::fgVerifyHandlerTab()
assert(bbNumFilter < bbNumHndBeg);
}
- // The EH regions are disjoint: the handler (including the filter, if applicable) is strictly before or after the 'try'.
+ // The EH regions are disjoint: the handler (including the filter, if applicable) is strictly before or after
+ // the 'try'.
if (HBtab->HasFilter())
{
assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumFilter));
@@ -3279,7 +3234,8 @@ void Compiler::fgVerifyHandlerTab()
// The last block of the 'try' is in the funclet region; make sure the whole thing is.
if (multipleBegBlockNormalizationDone)
{
- assert(bbNumTryBeg > bbNumFirstFunclet); // ">" because a 'try' can't be the first block of a handler (by EH normalization).
+ assert(bbNumTryBeg > bbNumFirstFunclet); // ">" because a 'try' can't be the first block of a
+ // handler (by EH normalization).
}
else
{
@@ -3312,17 +3268,18 @@ void Compiler::fgVerifyHandlerTab()
#endif // FEATURE_EH_FUNCLETS
// Check the 'try' region nesting, using ebdEnclosingTryIndex.
- // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it later.
+ // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it
+ // later.
if (HBtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
assert(HBtab->ebdEnclosingTryIndex > XTnum); // The enclosing region must come after this one in the table
- EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingTryIndex);
- unsigned bbNumOuterTryBeg = blockNumMap[HBtabOuter->ebdTryBeg->bbNum];
- unsigned bbNumOuterTryLast = blockNumMap[HBtabOuter->ebdTryLast->bbNum];
+ EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingTryIndex);
+ unsigned bbNumOuterTryBeg = blockNumMap[HBtabOuter->ebdTryBeg->bbNum];
+ unsigned bbNumOuterTryLast = blockNumMap[HBtabOuter->ebdTryLast->bbNum];
// A few basic asserts (that will also get covered later, when this outer region gets handled).
- assert(bbNumOuterTryBeg != 0);
+ assert(bbNumOuterTryBeg != 0);
assert(bbNumOuterTryLast != 0);
assert(bbNumOuterTryBeg <= bbNumOuterTryLast);
@@ -3340,13 +3297,12 @@ void Compiler::fgVerifyHandlerTab()
// If both the 'try' region and the outer 'try' region are in the main function area, then we can
// do the normal nesting check. Otherwise, it's harder to find a useful assert to make about their
// relationship.
- if ((bbNumTryLast < bbNumFirstFunclet) &&
- (bbNumOuterTryLast < bbNumFirstFunclet))
+ if ((bbNumTryLast < bbNumFirstFunclet) && (bbNumOuterTryLast < bbNumFirstFunclet))
{
if (multipleBegBlockNormalizationDone)
{
- assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same
- // block (by EH normalization).
+ assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same
+ // block (by EH normalization).
}
else
{
@@ -3354,8 +3310,8 @@ void Compiler::fgVerifyHandlerTab()
}
if (multipleLastBlockNormalizationDone)
{
- assert(bbNumTryLast < bbNumOuterTryLast); // Two 'try' regions can't end at the same block
- //(by EH normalization).
+ assert(bbNumTryLast < bbNumOuterTryLast); // Two 'try' regions can't end at the same block
+ //(by EH normalization).
}
else
{
@@ -3372,15 +3328,15 @@ void Compiler::fgVerifyHandlerTab()
{
if (multipleBegBlockNormalizationDone)
{
- assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same block
- // (by EH normalization).
+ assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same block
+ // (by EH normalization).
}
else
{
assert(bbNumOuterTryBeg <= bbNumTryBeg);
}
- assert(bbNumOuterTryBeg < bbNumHndBeg); // An inner handler can never start at the same
- // block as an outer 'try' (by IL rules).
+ assert(bbNumOuterTryBeg < bbNumHndBeg); // An inner handler can never start at the same
+ // block as an outer 'try' (by IL rules).
if (multipleLastBlockNormalizationDone)
{
// An inner EH region can't share a 'last' block with the outer 'try' (by EH normalization).
@@ -3403,26 +3359,26 @@ void Compiler::fgVerifyHandlerTab()
if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
assert(HBtab->ebdEnclosingHndIndex > XTnum); // The enclosing region must come after this one in the table
- EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingHndIndex);
- unsigned bbNumOuterHndBeg = blockNumMap[HBtabOuter->ebdHndBeg->bbNum];
- unsigned bbNumOuterHndLast = blockNumMap[HBtabOuter->ebdHndLast->bbNum];
+ EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingHndIndex);
+ unsigned bbNumOuterHndBeg = blockNumMap[HBtabOuter->ebdHndBeg->bbNum];
+ unsigned bbNumOuterHndLast = blockNumMap[HBtabOuter->ebdHndLast->bbNum];
// A few basic asserts (that will also get covered later, when this outer regions gets handled).
- assert(bbNumOuterHndBeg != 0);
+ assert(bbNumOuterHndBeg != 0);
assert(bbNumOuterHndLast != 0);
assert(bbNumOuterHndBeg <= bbNumOuterHndLast);
- // The outer handler must completely contain all the blocks in the EH region nested within it. However, if
- // funclets have been created, it's harder to make any relationship asserts about the order of nested
- // handlers, which also have been made into funclets.
+// The outer handler must completely contain all the blocks in the EH region nested within it. However, if
+// funclets have been created, it's harder to make any relationship asserts about the order of nested
+// handlers, which also have been made into funclets.
#if FEATURE_EH_FUNCLETS
if (fgFuncletsCreated)
{
if (handlerBegIsTryBegNormalizationDone)
{
- assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an
- // outer handler (by EH normalization).
+ assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an
+ // outer handler (by EH normalization).
}
else
{
@@ -3430,8 +3386,8 @@ void Compiler::fgVerifyHandlerTab()
}
if (multipleLastBlockNormalizationDone)
{
- assert(bbNumTryLast < bbNumOuterHndLast); // An inner 'try' can't end at the same block as an
- // outer handler (by EH normalization).
+ assert(bbNumTryLast < bbNumOuterHndLast); // An inner 'try' can't end at the same block as an
+ // outer handler (by EH normalization).
}
else
{
@@ -3447,15 +3403,15 @@ void Compiler::fgVerifyHandlerTab()
{
if (handlerBegIsTryBegNormalizationDone)
{
- assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an
- // outer handler (by EH normalization).
+ assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an
+ // outer handler (by EH normalization).
}
else
{
assert(bbNumOuterHndBeg <= bbNumTryBeg);
}
- assert(bbNumOuterHndBeg < bbNumHndBeg); // An inner handler can never start at the same block
- // as an outer handler (by IL rules).
+ assert(bbNumOuterHndBeg < bbNumHndBeg); // An inner handler can never start at the same block
+ // as an outer handler (by IL rules).
if (multipleLastBlockNormalizationDone)
{
// An inner EH region can't share a 'last' block with the outer handler (by EH normalization).
@@ -3494,10 +3450,8 @@ void Compiler::fgVerifyHandlerTab()
}
else if (HBtab->HasCatchHandler())
{
- assert((HBtab->ebdHndBeg->bbCatchTyp != BBCT_NONE) &&
- (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FAULT) &&
- (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FINALLY) &&
- (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER) &&
+ assert((HBtab->ebdHndBeg->bbCatchTyp != BBCT_NONE) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FAULT) &&
+ (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FINALLY) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER) &&
(HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER_HANDLER));
}
else if (HBtab->HasFaultHandler())
@@ -3522,15 +3476,13 @@ void Compiler::fgVerifyHandlerTab()
// otherwise set. The duplicate clause handler is truly a duplicate of
// a previously processed handler, so we ignore it.
- size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short);
- unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes);
- unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes);
+ size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short);
+ unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes);
+ unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes);
memset(blockTryIndex, 0, blockIndexBytes);
memset(blockHndIndex, 0, blockIndexBytes);
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++, HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
BasicBlock* blockEnd;
@@ -3542,7 +3494,8 @@ void Compiler::fgVerifyHandlerTab()
}
}
- for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext)
+ for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext;
+ block != blockEnd; block = block->bbNext)
{
if (blockHndIndex[block->bbNum] == 0)
{
@@ -3558,9 +3511,7 @@ void Compiler::fgVerifyHandlerTab()
// we looped over above. This is similar to duplicate clause logic, but we only need to look at the most
// nested enclosing try index, not the entire set of enclosing try indices, since that is what we store
// on the block.
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++, HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
unsigned enclosingTryIndex = ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index,
// ignoring 'mutual protect' trys
@@ -3571,7 +3522,9 @@ void Compiler::fgVerifyHandlerTab()
// handler).
BasicBlock* blockEnd;
- for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext)
+ for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg),
+ blockEnd = HBtab->ebdHndLast->bbNext;
+ block != blockEnd; block = block->bbNext)
{
if (blockTryIndex[block->bbNum] == 0)
{
@@ -3612,7 +3565,7 @@ void Compiler::fgVerifyHandlerTab()
}
}
-void Compiler::fgDispHandlerTab()
+void Compiler::fgDispHandlerTab()
{
printf("\n*************** Exception Handling table");
@@ -3628,12 +3581,10 @@ void Compiler::fgDispHandlerTab()
#endif // !FEATURE_EH_FUNCLETS
printf("eTry, eHnd\n");
- unsigned XTnum;
- EHblkDsc* HBtab;
+ unsigned XTnum;
+ EHblkDsc* HBtab;
- for (XTnum = 0, HBtab = compHndBBtab;
- XTnum < compHndBBtabCount;
- XTnum++ , HBtab++)
+ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
HBtab->DispEntry(XTnum);
}
@@ -3662,45 +3613,50 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*
*/
-void Compiler::verInitEHTree(unsigned numEHClauses)
+void Compiler::verInitEHTree(unsigned numEHClauses)
{
ehnNext = new (this, CMK_BasicBlock) EHNodeDsc[numEHClauses * 3];
- ehnTree = NULL;
+ ehnTree = nullptr;
}
-
/* Inserts the try, handler and filter (optional) clause information in a tree structure
* in order to catch incorrect eh formatting (e.g. illegal overlaps, incorrect order)
*/
-void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab)
+void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab)
{
- EHNodeDsc* tryNode = ehnNext++;
+ EHNodeDsc* tryNode = ehnNext++;
EHNodeDsc* handlerNode = ehnNext++;
- EHNodeDsc* filterNode = NULL; // optional
+ EHNodeDsc* filterNode = nullptr; // optional
tryNode->ehnSetTryNodeType();
tryNode->ehnStartOffset = clause->TryOffset;
- tryNode->ehnEndOffset = clause->TryOffset+clause->TryLength - 1;
+ tryNode->ehnEndOffset = clause->TryOffset + clause->TryLength - 1;
tryNode->ehnHandlerNode = handlerNode;
if (clause->Flags & CORINFO_EH_CLAUSE_FINALLY)
+ {
handlerNode->ehnSetFinallyNodeType();
+ }
else if (clause->Flags & CORINFO_EH_CLAUSE_FAULT)
+ {
handlerNode->ehnSetFaultNodeType();
+ }
else
+ {
handlerNode->ehnSetHandlerNodeType();
+ }
handlerNode->ehnStartOffset = clause->HandlerOffset;
- handlerNode->ehnEndOffset = clause->HandlerOffset + clause->HandlerLength - 1;
- handlerNode->ehnTryNode = tryNode;
+ handlerNode->ehnEndOffset = clause->HandlerOffset + clause->HandlerLength - 1;
+ handlerNode->ehnTryNode = tryNode;
if (clause->Flags & CORINFO_EH_CLAUSE_FILTER)
{
- filterNode = ehnNext++;
+ filterNode = ehnNext++;
filterNode->ehnStartOffset = clause->FilterOffset;
- BasicBlock * blk = handlerTab->BBFilterLast();
- filterNode->ehnEndOffset = blk->bbCodeOffsEnd - 1;
+ BasicBlock* blk = handlerTab->BBFilterLast();
+ filterNode->ehnEndOffset = blk->bbCodeOffsEnd - 1;
noway_assert(filterNode->ehnEndOffset != 0);
filterNode->ehnSetFilterNodeType();
@@ -3711,7 +3667,9 @@ void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDs
verInsertEhNodeInTree(&ehnTree, tryNode);
verInsertEhNodeInTree(&ehnTree, handlerNode);
if (filterNode)
- verInsertEhNodeInTree(&ehnTree,filterNode);
+ {
+ verInsertEhNodeInTree(&ehnTree, filterNode);
+ }
}
/*
@@ -3771,8 +3729,7 @@ void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDs
*/
-void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
- EHNodeDsc* node)
+void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node)
{
unsigned nStart = node->ehnStartOffset;
unsigned nEnd = node->ehnEndOffset;
@@ -3781,13 +3738,13 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
{
BADCODE("start offset greater or equal to end offset");
}
- node->ehnNext = NULL;
- node->ehnChild = NULL;
- node->ehnEquivalent = NULL;
+ node->ehnNext = nullptr;
+ node->ehnChild = nullptr;
+ node->ehnEquivalent = nullptr;
while (TRUE)
{
- if (*ppRoot == NULL)
+ if (*ppRoot == nullptr)
{
*ppRoot = node;
break;
@@ -3801,33 +3758,32 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
if (nEnd < rStart)
{
// Left sibling
- node->ehnNext = *ppRoot;
- *ppRoot = node;
+ node->ehnNext = *ppRoot;
+ *ppRoot = node;
return;
}
// Case 2, 3
if (nEnd < rEnd)
{
-//[Error]
+ //[Error]
BADCODE("Overlapping try regions");
}
// Case 4, 5
-//[Parent]
+ //[Parent]
verInsertEhNodeParent(ppRoot, node);
return;
}
-
// Cases 6 - 13 (nStart >= rStart)
if (nEnd > rEnd)
- { // Case 6, 7, 8, 9
+ { // Case 6, 7, 8, 9
// Case 9
if (nStart > rEnd)
{
-//[RightSibling]
+ //[RightSibling]
// Recurse with Root.Sibling as the new root
ppRoot = &((*ppRoot)->ehnNext);
@@ -3837,12 +3793,12 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
// Case 6
if (nStart == rStart)
{
-//[Parent]
+ //[Parent]
if (node->ehnIsTryBlock() || (*ppRoot)->ehnIsTryBlock())
- {
- verInsertEhNodeParent(ppRoot, node);
- return;
- }
+ {
+ verInsertEhNodeParent(ppRoot, node);
+ return;
+ }
// non try blocks are not allowed to start at the same offset
BADCODE("Handlers start at the same offset");
@@ -3854,8 +3810,8 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
// Case 10-13 (nStart >= rStart && nEnd <= rEnd)
if ((nStart != rStart) || (nEnd != rEnd))
- { // Cases 10,11,12
-//[Child]
+ { // Cases 10,11,12
+ //[Child]
if ((*ppRoot)->ehnIsTryBlock())
{
@@ -3883,15 +3839,13 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
}
// Case 13
-//[Equivalent]
- if (!node->ehnIsTryBlock() &&
- !(*ppRoot)->ehnIsTryBlock())
+ //[Equivalent]
+ if (!node->ehnIsTryBlock() && !(*ppRoot)->ehnIsTryBlock())
{
BADCODE("Handlers cannot be shared");
}
- if (!node->ehnIsTryBlock() ||
- !(*ppRoot)->ehnIsTryBlock())
+ if (!node->ehnIsTryBlock() || !(*ppRoot)->ehnIsTryBlock())
{
// Equivalent is only allowed for try bodies
// If one is a handler, this means the nesting is wrong
@@ -3902,15 +3856,12 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
// check that the corresponding handler is either a catch handler
// or a filter
- if (node->ehnHandlerNode->ehnIsFaultBlock() ||
- node->ehnHandlerNode->ehnIsFinallyBlock() ||
- (*ppRoot)->ehnHandlerNode->ehnIsFaultBlock() ||
- (*ppRoot)->ehnHandlerNode->ehnIsFinallyBlock() )
+ if (node->ehnHandlerNode->ehnIsFaultBlock() || node->ehnHandlerNode->ehnIsFinallyBlock() ||
+ (*ppRoot)->ehnHandlerNode->ehnIsFaultBlock() || (*ppRoot)->ehnHandlerNode->ehnIsFinallyBlock())
{
BADCODE("Try block with multiple non-filter/non-handler blocks");
}
-
break;
}
}
@@ -3920,27 +3871,25 @@ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot,
* fully or partially nested in node remain siblings of *ppRoot
*/
-void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot,
- EHNodeDsc* node)
+void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node)
{
- noway_assert(node->ehnNext == NULL);
- noway_assert(node->ehnChild == NULL);
+ noway_assert(node->ehnNext == nullptr);
+ noway_assert(node->ehnChild == nullptr);
// Root is nested in Node
noway_assert(node->ehnStartOffset <= (*ppRoot)->ehnStartOffset);
- noway_assert(node->ehnEndOffset >= (*ppRoot)->ehnEndOffset);
+ noway_assert(node->ehnEndOffset >= (*ppRoot)->ehnEndOffset);
// Root is not the same as Node
- noway_assert(node->ehnStartOffset != (*ppRoot)->ehnStartOffset ||
- node->ehnEndOffset != (*ppRoot)->ehnEndOffset);
+ noway_assert(node->ehnStartOffset != (*ppRoot)->ehnStartOffset || node->ehnEndOffset != (*ppRoot)->ehnEndOffset);
if (node->ehnIsFilterBlock())
{
BADCODE("Protected block appearing within filter block");
}
- EHNodeDsc *lastChild = NULL;
- EHNodeDsc *sibling = (*ppRoot)->ehnNext;
+ EHNodeDsc* lastChild = nullptr;
+ EHNodeDsc* sibling = (*ppRoot)->ehnNext;
while (sibling)
{
@@ -3948,21 +3897,23 @@ void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot,
// nodes have a width of at least one.
// Hence sibling start will always be after Node start.
- noway_assert(sibling->ehnStartOffset > node->ehnStartOffset); // (1)
+ noway_assert(sibling->ehnStartOffset > node->ehnStartOffset); // (1)
// disjoint
if (sibling->ehnStartOffset > node->ehnEndOffset)
+ {
break;
+ }
// partial containment.
- if (sibling->ehnEndOffset > node->ehnEndOffset) // (2)
+ if (sibling->ehnEndOffset > node->ehnEndOffset) // (2)
{
BADCODE("Overlapping try regions");
}
- //else full containment (follows from (1) and (2))
+ // else full containment (follows from (1) and (2))
lastChild = sibling;
- sibling = sibling->ehnNext;
+ sibling = sibling->ehnNext;
}
// All siblings of Root up to and including lastChild will continue to be
@@ -3975,18 +3926,17 @@ void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot,
// Node has more than one child including Root
node->ehnNext = lastChild->ehnNext;
- lastChild->ehnNext = NULL;
+ lastChild->ehnNext = nullptr;
}
else
{
// Root is the only child of Node
node->ehnNext = (*ppRoot)->ehnNext;
- (*ppRoot)->ehnNext = NULL;
+ (*ppRoot)->ehnNext = nullptr;
}
node->ehnChild = *ppRoot;
- *ppRoot = node;
-
+ *ppRoot = node;
}
/*****************************************************************************
@@ -3999,11 +3949,16 @@ void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot,
* search through the next links for its corresponding try/handler/filter as the
* case may be. If not found, then fail.
*/
-void Compiler::verCheckNestingLevel(EHNodeDsc* root)
+void Compiler::verCheckNestingLevel(EHNodeDsc* root)
{
EHNodeDsc* ehnNode = root;
- #define exchange(a,b) { temp = a; a = b; b = temp;}
+#define exchange(a, b) \
+ { \
+ temp = a; \
+ a = b; \
+ b = temp; \
+ }
for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++)
{
@@ -4018,26 +3973,33 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root)
// arrange p1 and p2 in sequential order
if (p1->ehnStartOffset == p2->ehnStartOffset)
+ {
BADCODE("shared exception handler");
+ }
if (p1->ehnStartOffset > p2->ehnStartOffset)
- exchange(p1,p2);
+ exchange(p1, p2);
- temp = p1->ehnNext;
+ temp = p1->ehnNext;
unsigned numSiblings = 0;
search = p2;
if (search->ehnEquivalent)
+ {
search = search->ehnEquivalent;
+ }
- do {
+ do
+ {
if (temp == search)
{
numSiblings++;
break;
}
if (temp)
+ {
temp = temp->ehnNext;
+ }
} while (temp);
CORINFO_EH_CLAUSE clause;
@@ -4052,19 +4014,25 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root)
if (p3->ehnStartOffset < p1->ehnStartOffset)
{
- temp = p3; search = p1;
+ temp = p3;
+ search = p1;
}
else if (p3->ehnStartOffset < p2->ehnStartOffset)
{
- temp = p1; search = p3;
+ temp = p1;
+ search = p3;
}
else
{
- temp = p2; search = p3;
+ temp = p2;
+ search = p3;
}
if (search->ehnEquivalent)
+ {
search = search->ehnEquivalent;
- do {
+ }
+ do
+ {
if (temp == search)
{
numSiblings++;
@@ -4079,8 +4047,8 @@ void Compiler::verCheckNestingLevel(EHNodeDsc* root)
}
if (numSiblings != 2)
+ {
BADCODE("Outer block does not contain all code in inner handler");
+ }
}
-
}
-
diff --git a/src/jit/jiteh.h b/src/jit/jiteh.h
index 050efdc0c1..573116282c 100644
--- a/src/jit/jiteh.h
+++ b/src/jit/jiteh.h
@@ -24,7 +24,7 @@ class Compiler;
enum EHHandlerType
{
- EH_HANDLER_CATCH = 0x1, // Don't use zero (to aid debugging uninitialized memory)
+ EH_HANDLER_CATCH = 0x1, // Don't use zero (to aid debugging uninitialized memory)
EH_HANDLER_FILTER,
EH_HANDLER_FAULT,
EH_HANDLER_FINALLY
@@ -36,12 +36,16 @@ inline CORINFO_EH_CLAUSE_FLAGS ToCORINFO_EH_CLAUSE_FLAGS(EHHandlerType type)
{
switch (type)
{
- case EH_HANDLER_CATCH: return CORINFO_EH_CLAUSE_NONE;
- case EH_HANDLER_FILTER: return CORINFO_EH_CLAUSE_FILTER;
- case EH_HANDLER_FAULT: return CORINFO_EH_CLAUSE_FAULT;
- case EH_HANDLER_FINALLY: return CORINFO_EH_CLAUSE_FINALLY;
- default:
- unreached();
+ case EH_HANDLER_CATCH:
+ return CORINFO_EH_CLAUSE_NONE;
+ case EH_HANDLER_FILTER:
+ return CORINFO_EH_CLAUSE_FILTER;
+ case EH_HANDLER_FAULT:
+ return CORINFO_EH_CLAUSE_FAULT;
+ case EH_HANDLER_FINALLY:
+ return CORINFO_EH_CLAUSE_FINALLY;
+ default:
+ unreached();
}
}
@@ -72,23 +76,23 @@ inline EHHandlerType ToEHHandlerType(CORINFO_EH_CLAUSE_FLAGS flags)
}
}
-struct EHblkDsc
+struct EHblkDsc
{
- BasicBlock* ebdTryBeg; // First block of the try
- BasicBlock* ebdTryLast; // Last block of the try
- BasicBlock* ebdHndBeg; // First block of the handler
- BasicBlock* ebdHndLast; // Last block of the handler
- union
- {
- BasicBlock* ebdFilter; // First block of filter, if HasFilter()
- unsigned ebdTyp; // Exception type (a class token), otherwise
+ BasicBlock* ebdTryBeg; // First block of the try
+ BasicBlock* ebdTryLast; // Last block of the try
+ BasicBlock* ebdHndBeg; // First block of the handler
+ BasicBlock* ebdHndLast; // Last block of the handler
+ union {
+ BasicBlock* ebdFilter; // First block of filter, if HasFilter()
+ unsigned ebdTyp; // Exception type (a class token), otherwise
};
- EHHandlerType ebdHandlerType;
+ EHHandlerType ebdHandlerType;
#if !FEATURE_EH_FUNCLETS
- // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler, etc.
- unsigned short ebdHandlerNestingLevel;
+ // How nested is the try/handler within other *handlers* - 0 for outermost clauses, 1 for nesting with a handler,
+ // etc.
+ unsigned short ebdHandlerNestingLevel;
#endif // !FEATURE_EH_FUNCLETS
static const unsigned short NO_ENCLOSING_INDEX = USHRT_MAX;
@@ -99,10 +103,10 @@ struct EHblkDsc
// ebdEnclosingTryIndex, but the inner catch is *NOT* nested within the outer catch!
// That is, if the "inner catch" throws an exception, it won't be caught by
// the "outer catch" for mutually protect handlers.
- unsigned short ebdEnclosingTryIndex;
+ unsigned short ebdEnclosingTryIndex;
// The index of the enclosing outer handler region, NO_ENCLOSING_INDEX if none.
- unsigned short ebdEnclosingHndIndex;
+ unsigned short ebdEnclosingHndIndex;
#if FEATURE_EH_FUNCLETS
@@ -111,67 +115,66 @@ struct EHblkDsc
// Like the IL the filter funclet immediately preceeds the filter-handler funclet.
// So this index points to the filter-handler funclet. If you want the filter
// funclet index, just subtract 1.
- unsigned short ebdFuncIndex;
+ unsigned short ebdFuncIndex;
#endif // FEATURE_EH_FUNCLETS
- IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported
- IL_OFFSET ebdTryEndOffset;
- IL_OFFSET ebdFilterBegOffset; // only set if HasFilter()
- IL_OFFSET ebdHndBegOffset;
- IL_OFFSET ebdHndEndOffset;
+ IL_OFFSET ebdTryBegOffset; // IL offsets of EH try/end regions as they are imported
+ IL_OFFSET ebdTryEndOffset;
+ IL_OFFSET ebdFilterBegOffset; // only set if HasFilter()
+ IL_OFFSET ebdHndBegOffset;
+ IL_OFFSET ebdHndEndOffset;
// Returns the last block of the filter. Assumes the EH clause is a try/filter/filter-handler type.
- BasicBlock* BBFilterLast();
+ BasicBlock* BBFilterLast();
- bool HasCatchHandler();
- bool HasFilter();
- bool HasFinallyHandler();
- bool HasFaultHandler();
- bool HasFinallyOrFaultHandler();
+ bool HasCatchHandler();
+ bool HasFilter();
+ bool HasFinallyHandler();
+ bool HasFaultHandler();
+ bool HasFinallyOrFaultHandler();
// Returns the block to which control will flow if an (otherwise-uncaught) exception is raised
// in the try. This is normally "ebdHndBeg", unless the try region has a filter, in which case that is returned.
// (This is, in some sense, the "true handler," at least in the sense of control flow. Note
// that we model the transition from a filter to its handler as normal, non-exceptional control flow.)
- BasicBlock* ExFlowBlock();
+ BasicBlock* ExFlowBlock();
- bool InTryRegionILRange (BasicBlock* pBlk);
- bool InFilterRegionILRange (BasicBlock* pBlk);
- bool InHndRegionILRange (BasicBlock* pBlk);
+ bool InTryRegionILRange(BasicBlock* pBlk);
+ bool InFilterRegionILRange(BasicBlock* pBlk);
+ bool InHndRegionILRange(BasicBlock* pBlk);
- bool InTryRegionBBRange (BasicBlock* pBlk);
- bool InFilterRegionBBRange (BasicBlock* pBlk);
- bool InHndRegionBBRange (BasicBlock* pBlk);
+ bool InTryRegionBBRange(BasicBlock* pBlk);
+ bool InFilterRegionBBRange(BasicBlock* pBlk);
+ bool InHndRegionBBRange(BasicBlock* pBlk);
- IL_OFFSET ebdTryBegOffs();
- IL_OFFSET ebdTryEndOffs();
- IL_OFFSET ebdFilterBegOffs();
- IL_OFFSET ebdFilterEndOffs();
- IL_OFFSET ebdHndBegOffs();
- IL_OFFSET ebdHndEndOffs();
+ IL_OFFSET ebdTryBegOffs();
+ IL_OFFSET ebdTryEndOffs();
+ IL_OFFSET ebdFilterBegOffs();
+ IL_OFFSET ebdFilterEndOffs();
+ IL_OFFSET ebdHndBegOffs();
+ IL_OFFSET ebdHndEndOffs();
- static bool ebdIsSameILTry (EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare IL range.
+ static bool ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare IL range.
// Return the region index of the most nested EH region that encloses this region, or NO_ENCLOSING_INDEX
// if this region is directly in the main function body. Set '*inTryRegion' to 'true' if this region is
// most nested within a 'try' region, or 'false' if this region is most nested within a handler. (Note
// that filters cannot contain nested EH regions.)
- unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion);
+ unsigned ebdGetEnclosingRegionIndex(bool* inTryRegion);
- static bool ebdIsSameTry (EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks.
- bool ebdIsSameTry (Compiler* comp, unsigned t2);
- bool ebdIsSameTry (BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast);
+ static bool ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2); // Same 'try' region? Compare begin/last blocks.
+ bool ebdIsSameTry(Compiler* comp, unsigned t2);
+ bool ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast);
#ifdef DEBUG
- void DispEntry(unsigned num); // Display this table entry
-#endif // DEBUG
+ void DispEntry(unsigned num); // Display this table entry
+#endif // DEBUG
private:
- static bool InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd);
+ static bool InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd);
};
-
/*****************************************************************************/
#endif // _EH_H_
/*****************************************************************************/
diff --git a/src/jit/jitgcinfo.h b/src/jit/jitgcinfo.h
index f18346adf1..b93ac3376c 100644
--- a/src/jit/jitgcinfo.h
+++ b/src/jit/jitgcinfo.h
@@ -24,13 +24,17 @@ struct RegSlotIdKey
unsigned short m_regNum;
unsigned short m_flags;
- RegSlotIdKey() {}
+ RegSlotIdKey()
+ {
+ }
- RegSlotIdKey(unsigned short regNum, unsigned short flags) : m_regNum(regNum), m_flags(flags) {}
+ RegSlotIdKey(unsigned short regNum, unsigned short flags) : m_regNum(regNum), m_flags(flags)
+ {
+ }
static unsigned GetHashCode(RegSlotIdKey rsk)
{
- return (rsk.m_flags << (8*sizeof(unsigned short))) + rsk.m_regNum;
+ return (rsk.m_flags << (8 * sizeof(unsigned short))) + rsk.m_regNum;
}
static bool Equals(RegSlotIdKey rsk1, RegSlotIdKey rsk2)
@@ -41,17 +45,21 @@ struct RegSlotIdKey
struct StackSlotIdKey
{
- int m_offset;
- bool m_fpRel;
+ int m_offset;
+ bool m_fpRel;
unsigned short m_flags;
- StackSlotIdKey() {}
+ StackSlotIdKey()
+ {
+ }
- StackSlotIdKey(int offset, bool fpRel, unsigned short flags) : m_offset(offset), m_fpRel(fpRel), m_flags(flags) {}
+ StackSlotIdKey(int offset, bool fpRel, unsigned short flags) : m_offset(offset), m_fpRel(fpRel), m_flags(flags)
+ {
+ }
static unsigned GetHashCode(StackSlotIdKey ssk)
{
- return (ssk.m_flags << (8*sizeof(unsigned short))) ^ (unsigned)ssk.m_offset ^ (ssk.m_fpRel ? 0x1000000 : 0);
+ return (ssk.m_flags << (8 * sizeof(unsigned short))) ^ (unsigned)ssk.m_offset ^ (ssk.m_fpRel ? 0x1000000 : 0);
}
static bool Equals(StackSlotIdKey ssk1, StackSlotIdKey ssk2)
@@ -71,28 +79,26 @@ class GCInfo
friend class CodeGen;
private:
- Compiler* compiler;
- RegSet* regSet;
-
-public :
+ Compiler* compiler;
+ RegSet* regSet;
+public:
GCInfo(Compiler* theCompiler);
- void gcResetForBB ();
-
- void gcMarkRegSetGCref (regMaskTP regMask DEBUGARG(bool forceOutput = false));
- void gcMarkRegSetByref (regMaskTP regMask DEBUGARG(bool forceOutput = false));
- void gcMarkRegSetNpt (regMaskTP regMask DEBUGARG(bool forceOutput = false));
- void gcMarkRegPtrVal (regNumber reg, var_types type);
- void gcMarkRegPtrVal (GenTreePtr tree);
+ void gcResetForBB();
-#ifdef DEBUG
- void gcDspGCrefSetChanges(regMaskTP gcRegGCrefSetNew DEBUGARG(bool forceOutput = false));
- void gcDspByrefSetChanges(regMaskTP gcRegByrefSetNew DEBUGARG(bool forceOutput = false));
-#endif // DEBUG
+ void gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool forceOutput = false));
+ void gcMarkRegSetByref(regMaskTP regMask DEBUGARG(bool forceOutput = false));
+ void gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forceOutput = false));
+ void gcMarkRegPtrVal(regNumber reg, var_types type);
+ void gcMarkRegPtrVal(GenTreePtr tree);
-/*****************************************************************************/
+#ifdef DEBUG
+ void gcDspGCrefSetChanges(regMaskTP gcRegGCrefSetNew DEBUGARG(bool forceOutput = false));
+ void gcDspByrefSetChanges(regMaskTP gcRegByrefSetNew DEBUGARG(bool forceOutput = false));
+#endif // DEBUG
+ /*****************************************************************************/
//-------------------------------------------------------------------------
//
@@ -100,11 +106,11 @@ public :
// values.
//
- regMaskTP gcRegGCrefSetCur; // current regs holding GCrefs
- regMaskTP gcRegByrefSetCur; // current regs holding Byrefs
+ regMaskTP gcRegGCrefSetCur; // current regs holding GCrefs
+ regMaskTP gcRegByrefSetCur; // current regs holding Byrefs
- VARSET_TP gcTrkStkPtrLcls; // set of tracked stack ptr lcls (GCref and Byref) - no args
- VARSET_TP gcVarPtrSetCur; // currently live part of "gcTrkStkPtrLcls"
+ VARSET_TP gcTrkStkPtrLcls; // set of tracked stack ptr lcls (GCref and Byref) - no args
+ VARSET_TP gcVarPtrSetCur; // currently live part of "gcTrkStkPtrLcls"
//-------------------------------------------------------------------------
//
@@ -114,70 +120,80 @@ public :
struct varPtrDsc
{
- varPtrDsc * vpdNext;
+ varPtrDsc* vpdNext;
- unsigned vpdVarNum; // which variable is this about?
+ unsigned vpdVarNum; // which variable is this about?
- unsigned vpdBegOfs ; // the offset where life starts
- unsigned vpdEndOfs; // the offset where life starts
+ unsigned vpdBegOfs; // the offset where life starts
+ unsigned vpdEndOfs; // the offset where life starts
};
- varPtrDsc * gcVarPtrList;
- varPtrDsc * gcVarPtrLast;
+ varPtrDsc* gcVarPtrList;
+ varPtrDsc* gcVarPtrLast;
- void gcVarPtrSetInit();
+ void gcVarPtrSetInit();
-/*****************************************************************************/
+ /*****************************************************************************/
// 'pointer value' register tracking and argument pushes/pops tracking.
- enum rpdArgType_t { rpdARG_POP, rpdARG_PUSH, rpdARG_KILL };
+ enum rpdArgType_t
+ {
+ rpdARG_POP,
+ rpdARG_PUSH,
+ rpdARG_KILL
+ };
- struct regPtrDsc
+ struct regPtrDsc
{
- regPtrDsc * rpdNext; // next entry in the list
- unsigned rpdOffs; // the offset of the instruction
+ regPtrDsc* rpdNext; // next entry in the list
+ unsigned rpdOffs; // the offset of the instruction
- union // 2-16 byte union (depending on architecture)
+ union // 2-16 byte union (depending on architecture)
{
- struct // 2-16 byte structure (depending on architecture)
+ struct // 2-16 byte structure (depending on architecture)
{
- regMaskSmall rpdAdd; // regptr bitset being added
- regMaskSmall rpdDel; // regptr bitset being removed
- }
- rpdCompiler;
+ regMaskSmall rpdAdd; // regptr bitset being added
+ regMaskSmall rpdDel; // regptr bitset being removed
+ } rpdCompiler;
- unsigned short rpdPtrArg; // arg offset or popped arg count
+ unsigned short rpdPtrArg; // arg offset or popped arg count
};
#ifndef JIT32_GCENCODER
- unsigned char rpdCallInstrSize; // Length of the call instruction.
+ unsigned char rpdCallInstrSize; // Length of the call instruction.
#endif
- unsigned short rpdArg :1; // is this an argument descriptor?
- unsigned short rpdArgType :2; // is this an argument push,pop, or kill?
- rpdArgType_t rpdArgTypeGet() { return (rpdArgType_t) rpdArgType; }
+ unsigned short rpdArg : 1; // is this an argument descriptor?
+ unsigned short rpdArgType : 2; // is this an argument push,pop, or kill?
+ rpdArgType_t rpdArgTypeGet()
+ {
+ return (rpdArgType_t)rpdArgType;
+ }
- unsigned short rpdGCtype :2; // is this a pointer, after all?
- GCtype rpdGCtypeGet() { return (GCtype) rpdGCtype; }
+ unsigned short rpdGCtype : 2; // is this a pointer, after all?
+ GCtype rpdGCtypeGet()
+ {
+ return (GCtype)rpdGCtype;
+ }
- unsigned short rpdIsThis :1; // is it the 'this' pointer
- unsigned short rpdCall :1; // is this a true call site?
- unsigned short :1; // Padding bit, so next two start on a byte boundary
- unsigned short rpdCallGCrefRegs:CNT_CALLEE_SAVED; // Callee-saved registers containing GC pointers.
- unsigned short rpdCallByrefRegs:CNT_CALLEE_SAVED; // Callee-saved registers containing byrefs.
+ unsigned short rpdIsThis : 1; // is it the 'this' pointer
+ unsigned short rpdCall : 1; // is this a true call site?
+ unsigned short : 1; // Padding bit, so next two start on a byte boundary
+ unsigned short rpdCallGCrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing GC pointers.
+ unsigned short rpdCallByrefRegs : CNT_CALLEE_SAVED; // Callee-saved registers containing byrefs.
#ifndef JIT32_GCENCODER
- bool rpdIsCallInstr()
+ bool rpdIsCallInstr()
{
return rpdCall && rpdCallInstrSize != 0;
}
#endif
};
- regPtrDsc * gcRegPtrList;
- regPtrDsc * gcRegPtrLast;
- unsigned gcPtrArgCnt;
+ regPtrDsc* gcRegPtrList;
+ regPtrDsc* gcRegPtrLast;
+ unsigned gcPtrArgCnt;
#ifndef JIT32_GCENCODER
enum MakeRegPtrMode
@@ -190,13 +206,11 @@ public :
// used to contain GC references, and whether those locations contain byrefs or pinning references,
// building up mappings from tuples of <offset X byref/pinning> to the corresponding slot id.
// In the "do work" mode, we use these slot ids to actually declare live ranges to the encoder.
- void gcMakeVarPtrTable (GcInfoEncoder* gcInfoEncoder,
- MakeRegPtrMode mode);
+ void gcMakeVarPtrTable(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode);
// This method expands the tracked stack variables lifetimes so that any lifetimes within filters
// are reported as pinned.
- void gcMarkFilterVarsPinned();
-
+ void gcMarkFilterVarsPinned();
// At instruction offset "instrOffset," the set of registers indicated by "regMask" is becoming live or dead,
// depending on whether "newState" is "GC_SLOT_DEAD" or "GC_SLOT_LIVE". The subset of registers whose corresponding
@@ -206,35 +220,32 @@ public :
// using the previously assigned slot ids, and updates "*pPtrRegs" appropriately.
void gcInfoRecordGCRegStateChange(GcInfoEncoder* gcInfoEncoder,
MakeRegPtrMode mode,
- unsigned instrOffset,
- regMaskSmall regMask,
- GcSlotState newState,
- regMaskSmall byRefMask,
- regMaskSmall* pPtrRegs);
+ unsigned instrOffset,
+ regMaskSmall regMask,
+ GcSlotState newState,
+ regMaskSmall byRefMask,
+ regMaskSmall* pPtrRegs);
// regPtrDsc is also used to encode writes to the outgoing argument space (as if they were pushes)
- void gcInfoRecordGCStackArgLive (GcInfoEncoder* gcInfoEncoder,
- MakeRegPtrMode mode,
- regPtrDsc* genStackPtr);
+ void gcInfoRecordGCStackArgLive(GcInfoEncoder* gcInfoEncoder, MakeRegPtrMode mode, regPtrDsc* genStackPtr);
// Walk all the pushes between genStackPtrFirst (inclusive) and genStackPtrLast (exclusive)
// and mark them as going dead at instrOffset
- void gcInfoRecordGCStackArgsDead (GcInfoEncoder* gcInfoEncoder,
- unsigned instrOffset,
- regPtrDsc* genStackPtrFirst,
- regPtrDsc* genStackPtrLast);
+ void gcInfoRecordGCStackArgsDead(GcInfoEncoder* gcInfoEncoder,
+ unsigned instrOffset,
+ regPtrDsc* genStackPtrFirst,
+ regPtrDsc* genStackPtrLast);
#endif
#if MEASURE_PTRTAB_SIZE
- static size_t s_gcRegPtrDscSize;
- static size_t s_gcTotalPtrTabSize;
+ static size_t s_gcRegPtrDscSize;
+ static size_t s_gcTotalPtrTabSize;
#endif
- regPtrDsc * gcRegPtrAllocDsc ();
-
-/*****************************************************************************/
+ regPtrDsc* gcRegPtrAllocDsc();
+ /*****************************************************************************/
//-------------------------------------------------------------------------
//
@@ -242,93 +253,79 @@ public :
// linked list of call descriptors.
//
- struct CallDsc
+ struct CallDsc
{
- CallDsc * cdNext;
- void * cdBlock; // the code block of the call
- unsigned cdOffs; // the offset of the call
+ CallDsc* cdNext;
+ void* cdBlock; // the code block of the call
+ unsigned cdOffs; // the offset of the call
#ifndef JIT32_GCENCODER
- unsigned short cdCallInstrSize;// the size of the call instruction.
+ unsigned short cdCallInstrSize; // the size of the call instruction.
#endif
- unsigned short cdArgCnt;
+ unsigned short cdArgCnt;
- union
- {
- struct // used if cdArgCnt == 0
+ union {
+ struct // used if cdArgCnt == 0
{
- unsigned cdArgMask; // ptr arg bitfield
- unsigned cdByrefArgMask; // byref qualifier for cdArgMask
+ unsigned cdArgMask; // ptr arg bitfield
+ unsigned cdByrefArgMask; // byref qualifier for cdArgMask
} u1;
- unsigned * cdArgTable; // used if cdArgCnt != 0
+ unsigned* cdArgTable; // used if cdArgCnt != 0
};
- regMaskSmall cdGCrefRegs;
- regMaskSmall cdByrefRegs;
+ regMaskSmall cdGCrefRegs;
+ regMaskSmall cdByrefRegs;
};
- CallDsc * gcCallDescList;
- CallDsc * gcCallDescLast;
+ CallDsc* gcCallDescList;
+ CallDsc* gcCallDescLast;
//-------------------------------------------------------------------------
- void gcCountForHeader (UNALIGNED unsigned int * untrackedCount,
- UNALIGNED unsigned int * varPtrTableSize);
+ void gcCountForHeader(UNALIGNED unsigned int* untrackedCount, UNALIGNED unsigned int* varPtrTableSize);
#ifdef JIT32_GCENCODER
- size_t gcMakeRegPtrTable (BYTE * dest,
- int mask,
- const InfoHdr& header,
- unsigned codeSize,
- size_t* pArgTabOffset);
+ size_t gcMakeRegPtrTable(BYTE* dest, int mask, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset);
#else
RegSlotMap* m_regSlotMap;
StackSlotMap* m_stackSlotMap;
// This method has two modes. In the "assign slots" mode, it figures out what registers and stack
// locations are used to contain GC references, and whether those locations contain byrefs or pinning
// references, building up mappings from tuples of <reg/offset X byref/pinning> to the corresponding
- // slot id (in the two member fields declared above). In the "do work" mode, we use these slot ids to
+ // slot id (in the two member fields declared above). In the "do work" mode, we use these slot ids to
// actually declare live ranges to the encoder.
- void gcMakeRegPtrTable (GcInfoEncoder* gcInfoEncoder,
- unsigned codeSize,
- unsigned prologSize,
- MakeRegPtrMode mode);
+ void gcMakeRegPtrTable(GcInfoEncoder* gcInfoEncoder, unsigned codeSize, unsigned prologSize, MakeRegPtrMode mode);
#endif
#ifdef JIT32_GCENCODER
- size_t gcPtrTableSize (const InfoHdr& header,
- unsigned codeSize,
- size_t* pArgTabOffset);
- BYTE * gcPtrTableSave (BYTE * destPtr,
- const InfoHdr& header,
- unsigned codeSize,
- size_t* pArgTabOffset);
+ size_t gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset);
+ BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset);
#endif
- void gcRegPtrSetInit ();
-/*****************************************************************************/
+ void gcRegPtrSetInit();
+ /*****************************************************************************/
// This enumeration yields the result of the analysis below, whether a store
// requires a write barrier:
enum WriteBarrierForm
{
- WBF_NoBarrier, // No barrier is required
- WBF_BarrierUnknown, // A barrier is required, no information on checked/unchecked.
- WBF_BarrierChecked, // A checked barrier is required.
- WBF_BarrierUnchecked, // An unchecked barrier is required.
- WBF_NoBarrier_CheckNotHeapInDebug, // We believe that no barrier is required because the
- // target is not in the heap -- but in debug build use a
- // barrier call that verifies this property. (Because the
- // target not being in the heap relies on a convention that
- // might accidentally be violated in the future.)
+ WBF_NoBarrier, // No barrier is required
+ WBF_BarrierUnknown, // A barrier is required, no information on checked/unchecked.
+ WBF_BarrierChecked, // A checked barrier is required.
+ WBF_BarrierUnchecked, // An unchecked barrier is required.
+ WBF_NoBarrier_CheckNotHeapInDebug, // We believe that no barrier is required because the
+ // target is not in the heap -- but in debug build use a
+ // barrier call that verifies this property. (Because the
+ // target not being in the heap relies on a convention that
+ // might accidentally be violated in the future.)
};
- WriteBarrierForm gcIsWriteBarrierCandidate(GenTreePtr tgt, GenTreePtr assignVal);
- bool gcIsWriteBarrierAsgNode (GenTreePtr op);
+ WriteBarrierForm gcIsWriteBarrierCandidate(GenTreePtr tgt, GenTreePtr assignVal);
+ bool gcIsWriteBarrierAsgNode(GenTreePtr op);
// Returns a WriteBarrierForm decision based on the form of "tgtAddr", which is assumed to be the
// argument of a GT_IND LHS.
- WriteBarrierForm gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr);
+ WriteBarrierForm gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr);
//-------------------------------------------------------------------------
//
@@ -338,48 +335,42 @@ public :
#ifdef JIT32_GCENCODER
private:
- BYTE * gcEpilogTable;
+ BYTE* gcEpilogTable;
+
+ unsigned gcEpilogPrevOffset;
- unsigned gcEpilogPrevOffset;
+ size_t gcInfoBlockHdrSave(BYTE* dest,
+ int mask,
+ unsigned methodSize,
+ unsigned prologSize,
+ unsigned epilogSize,
+ InfoHdr* header,
+ int* s_cached);
- size_t gcInfoBlockHdrSave(BYTE * dest,
- int mask,
- unsigned methodSize,
- unsigned prologSize,
- unsigned epilogSize,
- InfoHdr * header,
- int * s_cached);
public:
- static void gcInitEncoderLookupTable ();
+ static void gcInitEncoderLookupTable();
+
private:
- static size_t gcRecordEpilog (void * pCallBackData,
- unsigned offset);
+ static size_t gcRecordEpilog(void* pCallBackData, unsigned offset);
#else // JIT32_GCENCODER
- void gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder,
- unsigned methodSize,
- unsigned prologSize);
+ void gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSize, unsigned prologSize);
#ifdef DEBUG
- void gcDumpVarPtrDsc (varPtrDsc* desc);
+ void gcDumpVarPtrDsc(varPtrDsc* desc);
#endif // DEBUG
#endif // JIT32_GCENCODER
-
#if DUMP_GC_TABLES
- void gcFindPtrsInFrame (const void * infoBlock,
- const void * codeBlock,
- unsigned offs);
+ void gcFindPtrsInFrame(const void* infoBlock, const void* codeBlock, unsigned offs);
#ifdef JIT32_GCENCODER
- unsigned gcInfoBlockHdrDump(const BYTE * table,
- InfoHdr * header, /* OUT */
- unsigned * methodSize); /* OUT */
+ unsigned gcInfoBlockHdrDump(const BYTE* table,
+ InfoHdr* header, /* OUT */
+ unsigned* methodSize); /* OUT */
- unsigned gcDumpPtrTable (const BYTE * table,
- const InfoHdr& header,
- unsigned methodSize);
+ unsigned gcDumpPtrTable(const BYTE* table, const InfoHdr& header, unsigned methodSize);
#endif // JIT32_GCENCODER
#endif // DUMP_GC_TABLES
@@ -387,66 +378,70 @@ private:
#ifndef LEGACY_BACKEND
// This method updates the appropriate reg masks when a variable is moved.
public:
- void gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc *varDsc);
+ void gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc* varDsc);
#endif // !LEGACY_BACKEND
};
-inline
-unsigned char encodeUnsigned(BYTE *dest, unsigned value)
+inline unsigned char encodeUnsigned(BYTE* dest, unsigned value)
{
unsigned char size = 1;
- unsigned tmp = value;
- while (tmp > 0x7F) {
+ unsigned tmp = value;
+ while (tmp > 0x7F)
+ {
tmp >>= 7;
- assert(size < 6); // Invariant.
+ assert(size < 6); // Invariant.
size++;
}
- if (dest) {
+ if (dest)
+ {
// write the bytes starting at the end of dest in LSB to MSB order
BYTE* p = dest + size;
BYTE cont = 0; // The last byte has no continuation flag
- while (value > 0x7F) {
+ while (value > 0x7F)
+ {
*--p = cont | (value & 0x7f);
value >>= 7;
- cont = 0x80; // Non last bytes have a continuation flag
+ cont = 0x80; // Non last bytes have a continuation flag
}
- *--p = cont | (BYTE)value; // Now write the first byte
+ *--p = cont | (BYTE)value; // Now write the first byte
assert(p == dest);
}
return size;
}
-inline
-unsigned char encodeUDelta(BYTE *dest, unsigned value, unsigned lastValue)
+inline unsigned char encodeUDelta(BYTE* dest, unsigned value, unsigned lastValue)
{
assert(value >= lastValue);
return encodeUnsigned(dest, value - lastValue);
}
-inline
-unsigned char encodeSigned(BYTE *dest, int val)
+inline unsigned char encodeSigned(BYTE* dest, int val)
{
unsigned char size = 1;
- unsigned value = val;
- BYTE neg = 0;
- if ( val < 0) {
+ unsigned value = val;
+ BYTE neg = 0;
+ if (val < 0)
+ {
value = -val;
- neg = 0x40;
+ neg = 0x40;
}
- unsigned tmp = value;
- while (tmp > 0x3F) {
+ unsigned tmp = value;
+ while (tmp > 0x3F)
+ {
tmp >>= 7;
assert(size < 16); // Definitely sufficient for unsigned. Fits in an unsigned char, certainly.
size++;
}
- if (dest) {
+ if (dest)
+ {
// write the bytes starting at the end of dest in LSB to MSB order
BYTE* p = dest + size;
BYTE cont = 0; // The last byte has no continuation flag
- while (value > 0x3F) {
+ while (value > 0x3F)
+ {
*--p = cont | (value & 0x7f);
value >>= 7;
- cont = 0x80; // Non last bytes have a continuation flag
+ cont = 0x80; // Non last bytes have a continuation flag
}
*--p = neg | cont | (BYTE)value; // Now write the first byte
assert(p == dest);
@@ -454,6 +449,4 @@ unsigned char encodeSigned(BYTE *dest, int val)
return size;
}
-
-
#endif // _JITGCINFO_H_
diff --git a/src/jit/jitpch.h b/src/jit/jitpch.h
index e7966de92e..2e69e79208 100644
--- a/src/jit/jitpch.h
+++ b/src/jit/jitpch.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include <stdint.h>
#include <windows.h>
#include <wchar.h>
diff --git a/src/jit/jitstd.h b/src/jit/jitstd.h
index 59995fd89f..6b428679f0 100644
--- a/src/jit/jitstd.h
+++ b/src/jit/jitstd.h
@@ -2,8 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
-
#include "allocator.h"
#include "type_traits.h"
#include "pair.h"
diff --git a/src/jit/jitstd/allocator.h b/src/jit/jitstd/allocator.h
index c09c3ab07c..2bd33daa98 100644
--- a/src/jit/jitstd/allocator.h
+++ b/src/jit/jitstd/allocator.h
@@ -116,7 +116,7 @@ public:
pointer address(reference val);
const_pointer address(const_reference val) const;
- pointer allocate(size_type count, allocator<void>::const_pointer hint = 0);
+ pointer allocate(size_type count, allocator<void>::const_pointer hint = nullptr);
void construct(pointer ptr, const_reference val);
void deallocate(pointer ptr, size_type size);
void destroy(pointer ptr);
diff --git a/src/jit/jitstd/list.h b/src/jit/jitstd/list.h
index 01043e09b5..85545f741e 100644
--- a/src/jit/jitstd/list.h
+++ b/src/jit/jitstd/list.h
@@ -293,8 +293,8 @@ namespace jitstd
{
template <typename T, typename Allocator>
list<T, Allocator>::list(const Allocator& allocator)
- : m_pHead(NULL)
- , m_pTail(NULL)
+ : m_pHead(nullptr)
+ , m_pTail(nullptr)
, m_nSize(0)
, m_allocator(allocator)
, m_nodeAllocator(allocator)
@@ -393,7 +393,7 @@ bool list<T, Allocator>::empty() const
template <typename T, typename Allocator>
typename list<T, Allocator>::iterator list<T, Allocator>::end()
{
- return iterator(NULL);
+ return iterator(nullptr);
}
template <typename T, typename Allocator>
@@ -406,7 +406,7 @@ template <typename T, typename Allocator>
typename list<T, Allocator>::iterator list<T, Allocator>::erase(iterator position)
{
// Nothing to erase.
- assert(position.m_pNode != NULL);
+ assert(position.m_pNode != nullptr);
--m_nSize;
@@ -414,7 +414,7 @@ typename list<T, Allocator>::iterator list<T, Allocator>::erase(iterator positio
Node* pPrev = pNode->m_pPrev;
Node* pNext = pNode->m_pNext;
- if (pPrev != NULL)
+ if (pPrev != nullptr)
{
pPrev->m_pNext = pNext;
}
@@ -423,7 +423,7 @@ typename list<T, Allocator>::iterator list<T, Allocator>::erase(iterator positio
m_pHead = pNext;
}
- if (pNext != NULL)
+ if (pNext != nullptr)
{
pNext->m_pPrev = pPrev;
}
@@ -563,12 +563,12 @@ void list<T, Allocator>::pop_back()
if (m_pHead != m_pTail)
{
m_pTail = m_pTail->m_pPrev;
- m_pTail->m_pNext = NULL;
+ m_pTail->m_pNext = nullptr;
}
else
{
- m_pHead = NULL;
- m_pTail = NULL;
+ m_pHead = nullptr;
+ m_pTail = nullptr;
}
pDelete->~Node();
m_nodeAllocator.deallocate(pDelete, 1);
@@ -664,7 +664,7 @@ void list<T, Allocator>::remove_if(Predicate pred)
template <typename T, typename Allocator>
typename list<T, Allocator>::reverse_iterator list<T, Allocator>::rend()
{
- return reverse_iterator(NULL);
+ return reverse_iterator(nullptr);
}
template <typename T, typename Allocator>
@@ -768,14 +768,14 @@ void list<T, Allocator>::unique(const BinaryPredicate& binary_pred)
template <typename T, typename Allocator>
void list<T, Allocator>::destroy_helper()
{
- while (m_pTail != NULL)
+ while (m_pTail != nullptr)
{
Node* prev = m_pTail->m_pPrev;
m_pTail->~Node();
m_nodeAllocator.deallocate(m_pTail, 1);
m_pTail = prev;
}
- m_pHead = NULL;
+ m_pHead = nullptr;
m_nSize = 0;
}
diff --git a/src/jit/jitstd/vector.h b/src/jit/jitstd/vector.h
index 6cfbfd6ca0..d252e18253 100644
--- a/src/jit/jitstd/vector.h
+++ b/src/jit/jitstd/vector.h
@@ -281,7 +281,7 @@ size_t iterator_difference(InputIterator first, const InputIterator& last)
template <typename T, typename Allocator>
vector<T, Allocator>::vector(const Allocator& allocator)
: m_allocator(allocator)
- , m_pArray(NULL)
+ , m_pArray(nullptr)
, m_nSize(0)
, m_nCapacity(0)
{
diff --git a/src/jit/jittelemetry.cpp b/src/jit/jittelemetry.cpp
index f480759231..2d5a2102d1 100644
--- a/src/jit/jittelemetry.cpp
+++ b/src/jit/jittelemetry.cpp
@@ -54,12 +54,12 @@
// in different binaries. Do not share the same provider handle across DLLs.
// As long as you do not pass an hProvider from one DLL to another, TraceLogging
// will properly keep track of the events."
-//
+//
// 2) CoreCLR is linked into the CLR. CLR already creates an instance, so where do we create the JIT's instance?
// Answer:
// "Ideally you would have one provider per DLL, but if you're folding distinct sets
// of functionality into one DLL (like shell32.dll or similar sort of catch-all things)
-// you can have perhaps a few more providers per binary."
+// you can have perhaps a few more providers per binary."
//
// B. Determining where to register and unregister the provider instance?
// 1) For CLRJIT.dll we can register the provider instance during jitDllOnProcessAttach.
@@ -67,7 +67,7 @@
// referencing environment variables during the DLL load and unload path.
// Referencing environment variables through ConfigDWORD uses UtilCode.
// This roughly translates to InitUtilcode() being called before jitDllOnProcessAttach.
-//
+//
// For CLRJIT.dll, compStartup is called on jitOnDllProcessAttach().
// This can be called twice through sxsJitStartup -- so prevent double initialization.
// UtilCode is init-ed by this time. The same is true for CoreCLR.
@@ -75,10 +75,10 @@
// 2) For CLRJIT.dll and CoreCLR, compShutdown will be called during jitOnDllProcessDetach().
//
// C. Determining the data to collect:
-//
+//
// IMPORTANT: Since telemetry data can be collected at any time after DLL load,
// make sure you initialize the compiler state variables you access in telemetry
-// data collection. For example, if you are transmitting method names, then
+// data collection. For example, if you are transmitting method names, then
// make sure info.compMethodHnd is initialized at that point.
//
// 1) Tracking noway assert count:
@@ -112,19 +112,22 @@
#define BUILD_MACHINE BUILD_STR2(__BUILDMACHINE__)
// A DLL local instance of the DotNet provider
-TRACELOGGING_DEFINE_PROVIDER(g_hClrJitProvider, CLRJIT_PROVIDER_NAME, CLRJIT_PROVIDER_ID, TraceLoggingOptionMicrosoftTelemetry());
+TRACELOGGING_DEFINE_PROVIDER(g_hClrJitProvider,
+ CLRJIT_PROVIDER_NAME,
+ CLRJIT_PROVIDER_ID,
+ TraceLoggingOptionMicrosoftTelemetry());
// Threshold to detect if we are hitting too many bad (noway) methods
// over good methods per process to prevent logging too much data.
-static const double NOWAY_NOISE_RATIO = 0.6; // Threshold of (bad / total) beyond which we'd stop
- // logging. We'd restart if the pass rate improves.
-static const unsigned NOWAY_SUFFICIENCY_THRESHOLD = 25; // Count of methods beyond which we'd apply percent
- // threshold
+static const double NOWAY_NOISE_RATIO = 0.6; // Threshold of (bad / total) beyond which we'd stop
+ // logging. We'd restart if the pass rate improves.
+static const unsigned NOWAY_SUFFICIENCY_THRESHOLD = 25; // Count of methods beyond which we'd apply percent
+ // threshold
// Initialize Telemetry State
-volatile bool JitTelemetry::s_fProviderRegistered = false;
-volatile UINT32 JitTelemetry::s_uMethodsCompiled = 0;
-volatile UINT32 JitTelemetry::s_uMethodsHitNowayAssert = 0;
+volatile bool JitTelemetry::s_fProviderRegistered = false;
+volatile UINT32 JitTelemetry::s_uMethodsCompiled = 0;
+volatile UINT32 JitTelemetry::s_uMethodsHitNowayAssert = 0;
// Constructor for telemetry state per compiler instance
JitTelemetry::JitTelemetry()
@@ -142,11 +145,11 @@ JitTelemetry::JitTelemetry()
//
void JitTelemetry::Initialize(Compiler* c)
{
- comp = c;
- m_pszAssemblyName = "";
- m_pszScopeName = "";
- m_pszMethodName = "";
- m_uMethodHash = 0;
+ comp = c;
+ m_pszAssemblyName = "";
+ m_pszScopeName = "";
+ m_pszMethodName = "";
+ m_uMethodHash = 0;
m_fMethodInfoCached = false;
}
@@ -254,9 +257,9 @@ void JitTelemetry::NotifyNowayAssert(const char* filename, unsigned line)
// Check if our assumption that noways are rare is invalid for this
// process. If so, return early than logging too much data.
- unsigned noways = s_uMethodsHitNowayAssert;
+ unsigned noways = s_uMethodsHitNowayAssert;
unsigned attempts = max(1, s_uMethodsCompiled + noways);
- double ratio = (noways / ((double) attempts));
+ double ratio = (noways / ((double)attempts));
if (noways > NOWAY_SUFFICIENCY_THRESHOLD && ratio > NOWAY_NOISE_RATIO)
{
return;
@@ -264,41 +267,37 @@ void JitTelemetry::NotifyNowayAssert(const char* filename, unsigned line)
assert(comp);
- UINT32 nowayIndex = s_uMethodsHitNowayAssert;
- UINT32 codeSize = 0;
- INT32 minOpts = -1;
- const char* lastPhase = "";
+ UINT32 nowayIndex = s_uMethodsHitNowayAssert;
+ UINT32 codeSize = 0;
+ INT32 minOpts = -1;
+ const char* lastPhase = "";
if (comp != nullptr)
{
- codeSize = comp->info.compILCodeSize;
- minOpts = comp->opts.IsMinOptsSet() ? comp->opts.MinOpts() : -1;
+ codeSize = comp->info.compILCodeSize;
+ minOpts = comp->opts.IsMinOptsSet() ? comp->opts.MinOpts() : -1;
lastPhase = PhaseNames[comp->previousCompletedPhase];
}
-
+
CacheCurrentMethodInfo();
- TraceLoggingWrite(g_hClrJitProvider,
- "CLRJIT.NowayAssert",
-
- TraceLoggingUInt32(codeSize, "IL_CODE_SIZE"),
- TraceLoggingInt32(minOpts, "MINOPTS_MODE"),
- TraceLoggingString(lastPhase, "PREVIOUS_COMPLETED_PHASE"),
-
- TraceLoggingString(m_pszAssemblyName, "ASSEMBLY_NAME"),
- TraceLoggingString(m_pszMethodName, "METHOD_NAME"),
- TraceLoggingString(m_pszScopeName, "METHOD_SCOPE"),
- TraceLoggingUInt32(m_uMethodHash, "METHOD_HASH"),
-
- TraceLoggingString(filename, "FILENAME"),
- TraceLoggingUInt32(line, "LINE"),
- TraceLoggingUInt32(nowayIndex, "NOWAY_INDEX"),
-
- TraceLoggingString(TARGET_READABLE_NAME, "ARCH"),
- TraceLoggingString(VER_FILEVERSION_STR, "VERSION"),
- TraceLoggingString(BUILD_MACHINE, "BUILD"),
- TraceLoggingString(VER_COMMENTS_STR, "FLAVOR"),
-
- TraceLoggingKeyword(MICROSOFT_KEYWORD_TELEMETRY));
+ TraceLoggingWrite(g_hClrJitProvider, "CLRJIT.NowayAssert",
+
+ TraceLoggingUInt32(codeSize, "IL_CODE_SIZE"), TraceLoggingInt32(minOpts, "MINOPTS_MODE"),
+ TraceLoggingString(lastPhase, "PREVIOUS_COMPLETED_PHASE"),
+
+ TraceLoggingString(m_pszAssemblyName, "ASSEMBLY_NAME"),
+ TraceLoggingString(m_pszMethodName, "METHOD_NAME"),
+ TraceLoggingString(m_pszScopeName, "METHOD_SCOPE"),
+ TraceLoggingUInt32(m_uMethodHash, "METHOD_HASH"),
+
+ TraceLoggingString(filename, "FILENAME"), TraceLoggingUInt32(line, "LINE"),
+ TraceLoggingUInt32(nowayIndex, "NOWAY_INDEX"),
+
+ TraceLoggingString(TARGET_READABLE_NAME, "ARCH"),
+ TraceLoggingString(VER_FILEVERSION_STR, "VERSION"), TraceLoggingString(BUILD_MACHINE, "BUILD"),
+ TraceLoggingString(VER_COMMENTS_STR, "FLAVOR"),
+
+ TraceLoggingKeyword(MICROSOFT_KEYWORD_TELEMETRY));
}
//------------------------------------------------------------------------
@@ -339,7 +338,7 @@ void JitTelemetry::CacheCurrentMethodInfo()
// scopeName - Pointer to hold scope name upon return
// methodName - Pointer to hold method name upon return
// methodHash - Pointer to hold method hash upon return
-//
+//
// Description:
// Obtains from the JIT EE interface the information for the
// current method under compilation.
@@ -349,7 +348,10 @@ void JitTelemetry::CacheCurrentMethodInfo()
// methods, so call this method only when there is less impact
// to throughput.
//
-void Compiler::compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash)
+void Compiler::compGetTelemetryDefaults(const char** assemblyName,
+ const char** scopeName,
+ const char** methodName,
+ unsigned* methodHash)
{
if (info.compMethodHnd != nullptr)
{
@@ -362,8 +364,7 @@ void Compiler::compGetTelemetryDefaults(const char** assemblyName, const char**
// SuperPMI needs to implement record/replay of these method calls.
*assemblyName = info.compCompHnd->getAssemblyName(
- info.compCompHnd->getModuleAssembly(
- info.compCompHnd->getClassModule(info.compClassHnd)));
+ info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd)));
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
diff --git a/src/jit/jittelemetry.h b/src/jit/jittelemetry.h
index 92f7acbec6..24a0ce7b5d 100644
--- a/src/jit/jittelemetry.h
+++ b/src/jit/jittelemetry.h
@@ -13,7 +13,6 @@ class Compiler;
class JitTelemetry
{
public:
-
// Notify DLL load.
static void NotifyDllProcessAttach();
@@ -36,7 +35,6 @@ public:
static bool IsTelemetryEnabled();
private:
-
// Obtain current method information from VM and cache for
// future uses.
void CacheCurrentMethodInfo();
@@ -50,7 +48,7 @@ private:
// Methods compiled per DLL unload
static volatile UINT32 s_uMethodsCompiled;
-
+
// Methods compiled per DLL unload that hit noway assert (per process)
static volatile UINT32 s_uMethodsHitNowayAssert;
//--------------------------------------------------------------------------------
diff --git a/src/jit/lclvars.cpp b/src/jit/lclvars.cpp
index 918e7aefd3..62f621e7c1 100644
--- a/src/jit/lclvars.cpp
+++ b/src/jit/lclvars.cpp
@@ -26,20 +26,20 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef DEBUG
#if DOUBLE_ALIGN
/* static */
-unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0;
+unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0;
#endif
#endif
/*****************************************************************************/
-void Compiler::lvaInit()
+void Compiler::lvaInit()
{
/* We haven't allocated stack variables yet */
lvaRefCountingStarted = false;
lvaLocalVarRefCounted = false;
- lvaSortAgain = false; // false: We don't need to call lvaSortOnly()
- lvaTrackedFixed = false; // false: We can still add new tracked variables
+ lvaSortAgain = false; // false: We don't need to call lvaSortOnly()
+ lvaTrackedFixed = false; // false: We can still add new tracked variables
lvaDoneFrameLayout = NO_FRAME_LAYOUT;
#if !FEATURE_EH_FUNCLETS
@@ -49,26 +49,26 @@ void Compiler::lvaInit()
lvaReversePInvokeFrameVar = BAD_VAR_NUM;
#if FEATURE_FIXED_OUT_ARGS
lvaPInvokeFrameRegSaveVar = BAD_VAR_NUM;
- lvaOutgoingArgSpaceVar = BAD_VAR_NUM;
+ lvaOutgoingArgSpaceVar = BAD_VAR_NUM;
#endif // FEATURE_FIXED_OUT_ARGS
#ifdef _TARGET_ARM_
lvaPromotedStructAssemblyScratchVar = BAD_VAR_NUM;
#endif // _TARGET_ARM_
- lvaLocAllocSPvar = BAD_VAR_NUM;
- lvaNewObjArrayArgs = BAD_VAR_NUM;
- lvaGSSecurityCookie = BAD_VAR_NUM;
+ lvaLocAllocSPvar = BAD_VAR_NUM;
+ lvaNewObjArrayArgs = BAD_VAR_NUM;
+ lvaGSSecurityCookie = BAD_VAR_NUM;
#ifdef _TARGET_X86_
lvaVarargsBaseOfStkArgs = BAD_VAR_NUM;
#endif // _TARGET_X86_
lvaVarargsHandleArg = BAD_VAR_NUM;
- lvaSecurityObject = BAD_VAR_NUM;
- lvaStubArgumentVar = BAD_VAR_NUM;
- lvaArg0Var = BAD_VAR_NUM;
- lvaMonAcquired = BAD_VAR_NUM;
-
+ lvaSecurityObject = BAD_VAR_NUM;
+ lvaStubArgumentVar = BAD_VAR_NUM;
+ lvaArg0Var = BAD_VAR_NUM;
+ lvaMonAcquired = BAD_VAR_NUM;
+
lvaInlineeReturnSpillTemp = BAD_VAR_NUM;
- gsShadowVarInfo = NULL;
+ gsShadowVarInfo = nullptr;
#if FEATURE_EH_FUNCLETS
lvaPSPSym = BAD_VAR_NUM;
#endif
@@ -83,7 +83,7 @@ void Compiler::lvaInit()
/*****************************************************************************/
-void Compiler::lvaInitTypeRef()
+void Compiler::lvaInitTypeRef()
{
/* x86 args look something like this:
@@ -104,9 +104,9 @@ void Compiler::lvaInitTypeRef()
/* Set compArgsCount and compLocalsCount */
- info.compArgsCount = info.compMethodInfo->args.numArgs;
-
- // Is there a 'this' pointer
+ info.compArgsCount = info.compMethodInfo->args.numArgs;
+
+ // Is there a 'this' pointer
if (!info.compIsStatic)
{
@@ -117,13 +117,13 @@ void Compiler::lvaInitTypeRef()
info.compThisArg = BAD_VAR_NUM;
}
- info.compILargsCount = info.compArgsCount;
+ info.compILargsCount = info.compArgsCount;
#ifdef FEATURE_SIMD
if (featureSIMD && (info.compRetNativeType == TYP_STRUCT))
{
var_types structType = impNormStructType(info.compMethodInfo->args.retTypeClass);
- info.compRetType = structType;
+ info.compRetType = structType;
}
#endif // FEATURE_SIMD
@@ -138,8 +138,8 @@ void Compiler::lvaInitTypeRef()
{
CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass;
- Compiler::structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ Compiler::structPassingKind howToReturnStruct;
+ var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
if (howToReturnStruct == SPK_PrimitiveType)
{
@@ -150,9 +150,13 @@ void Compiler::lvaInitTypeRef()
// ToDo: Refactor this common code sequence into its own method as it is used 4+ times
if ((returnType == TYP_LONG) && (compLongUsed == false))
+ {
compLongUsed = true;
+ }
else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
+ {
compFloatingPointUsed = true;
+ }
}
}
@@ -181,36 +185,35 @@ void Compiler::lvaInitTypeRef()
{
info.compArgsCount++;
}
- else
+ else
{
- info.compTypeCtxtArg = BAD_VAR_NUM;
+ info.compTypeCtxtArg = BAD_VAR_NUM;
}
- lvaCount =
- info.compLocalsCount = info.compArgsCount +
- info.compMethodInfo->locals.numArgs;
+ lvaCount = info.compLocalsCount = info.compArgsCount + info.compMethodInfo->locals.numArgs;
- info.compILlocalsCount = info.compILargsCount +
- info.compMethodInfo->locals.numArgs;
+ info.compILlocalsCount = info.compILargsCount + info.compMethodInfo->locals.numArgs;
/* Now allocate the variable descriptor table */
if (compIsForInlining())
{
- lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
+ lvaTable = impInlineInfo->InlinerCompiler->lvaTable;
lvaCount = impInlineInfo->InlinerCompiler->lvaCount;
lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt;
// No more stuff needs to be done.
return;
- }
+ }
lvaTableCnt = lvaCount * 2;
if (lvaTableCnt < 16)
+ {
lvaTableCnt = 16;
+ }
- lvaTable = (LclVarDsc*)compGetMemArray(lvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
+ lvaTable = (LclVarDsc*)compGetMemArray(lvaTableCnt, sizeof(*lvaTable), CMK_LvaTable);
size_t tableSize = lvaTableCnt * sizeof(*lvaTable);
memset(lvaTable, 0, tableSize);
for (unsigned i = 0; i < lvaTableCnt; i++)
@@ -225,46 +228,37 @@ void Compiler::lvaInitTypeRef()
//-------------------------------------------------------------------------
InitVarDscInfo varDscInfo;
- varDscInfo.Init(lvaTable, hasRetBuffArg);
+ varDscInfo.Init(lvaTable, hasRetBuffArg);
lvaInitArgs(&varDscInfo);
//-------------------------------------------------------------------------
// Finally the local variables
//-------------------------------------------------------------------------
-
- unsigned varNum = varDscInfo.varNum;
- LclVarDsc * varDsc = varDscInfo.varDsc;
- CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args;
- for (unsigned i = 0;
- i < info.compMethodInfo->locals.numArgs;
- i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig))
+ unsigned varNum = varDscInfo.varNum;
+ LclVarDsc* varDsc = varDscInfo.varDsc;
+ CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args;
+
+ for (unsigned i = 0; i < info.compMethodInfo->locals.numArgs;
+ i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig))
{
CORINFO_CLASS_HANDLE typeHnd;
- CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(
- &info.compMethodInfo->locals,
- localsSig,
- &typeHnd);
- lvaInitVarDsc(varDsc,
- varNum,
- strip(corInfoType),
- typeHnd,
- localsSig,
- &info.compMethodInfo->locals);
-
- varDsc->lvPinned = ((corInfoType & CORINFO_TYPE_MOD_PINNED) != 0);
- varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame
- }
-
- if (// If there already exist unsafe buffers, don't mark more structs as unsafe
- // as that will cause them to be placed along with the real unsafe buffers,
- // unnecessarily exposing them to overruns. This can affect GS tests which
+ CorInfoTypeWithMod corInfoType =
+ info.compCompHnd->getArgType(&info.compMethodInfo->locals, localsSig, &typeHnd);
+ lvaInitVarDsc(varDsc, varNum, strip(corInfoType), typeHnd, localsSig, &info.compMethodInfo->locals);
+
+ varDsc->lvPinned = ((corInfoType & CORINFO_TYPE_MOD_PINNED) != 0);
+ varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame
+ }
+
+ if ( // If there already exist unsafe buffers, don't mark more structs as unsafe
+ // as that will cause them to be placed along with the real unsafe buffers,
+ // unnecessarily exposing them to overruns. This can affect GS tests which
// intentionally do buffer-overruns.
!getNeedsGSSecurityCookie() &&
// GS checks require the stack to be re-ordered, which can't be done with EnC
- !opts.compDbgEnC &&
- compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25))
+ !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25))
{
setNeedsGSSecurityCookie();
compGSReorderStackLayout = true;
@@ -272,26 +266,30 @@ void Compiler::lvaInitTypeRef()
for (unsigned i = 0; i < lvaCount; i++)
{
if ((lvaTable[i].lvType == TYP_STRUCT) && compStressCompile(STRESS_GENERIC_VARN, 60))
+ {
lvaTable[i].lvIsUnsafeBuffer = true;
- }
+ }
+ }
}
if (getNeedsGSSecurityCookie())
{
// Ensure that there will be at least one stack variable since
// we require that the GSCookie does not have a 0 stack offset.
- unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy"));
+ unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy"));
lvaTable[dummy].lvType = TYP_INT;
}
#ifdef DEBUG
if (verbose)
+ {
lvaTableDump(INITIAL_FRAME_LAYOUT);
+ }
#endif
}
/*****************************************************************************/
-void Compiler::lvaInitArgs(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo)
{
compArgSize = 0;
@@ -300,8 +298,8 @@ void Compiler::lvaInitArgs(InitVarDscInfo * varDscInfo)
if (compIsProfilerHookNeeded())
{
codeGen->regSet.rsMaskPreSpillRegArg |= RBM_ARG_REGS;
- }
-#endif
+ }
+#endif
//----------------------------------------------------------------------
@@ -311,7 +309,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo * varDscInfo)
/* If we have a hidden return-buffer parameter, that comes here */
lvaInitRetBuffArg(varDscInfo);
- //======================================================================
+//======================================================================
#if USER_ARGS_COME_LAST
//@GENERICS: final instantiation-info argument for shared generic methods
@@ -340,7 +338,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo * varDscInfo)
// We have set info.compArgsCount in compCompile()
noway_assert(varDscInfo->varNum == info.compArgsCount);
- assert (varDscInfo->intRegArgNum <= MAX_REG_ARG);
+ assert(varDscInfo->intRegArgNum <= MAX_REG_ARG);
codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum;
#if !FEATURE_STACK_FP_X87
@@ -361,12 +359,12 @@ void Compiler::lvaInitArgs(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitThisPtr(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo)
{
- LclVarDsc * varDsc = varDscInfo->varDsc;
- if (!info.compIsStatic)
+ LclVarDsc* varDsc = varDscInfo->varDsc;
+ if (!info.compIsStatic)
{
- varDsc->lvIsParam = 1;
+ varDsc->lvIsParam = 1;
#if ASSERTION_PROP
varDsc->lvSingleDef = 1;
#endif
@@ -383,7 +381,7 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo * varDscInfo)
if (featureSIMD)
{
var_types simdBaseType = TYP_UNKNOWN;
- var_types type = impNormStructType(info.compClassHnd, nullptr, nullptr, &simdBaseType);
+ var_types type = impNormStructType(info.compClassHnd, nullptr, nullptr, &simdBaseType);
if (simdBaseType != TYP_UNKNOWN)
{
assert(varTypeIsSIMD(type));
@@ -398,12 +396,14 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo * varDscInfo)
varDsc->lvType = TYP_REF;
}
- if (tiVerificationNeeded)
+ if (tiVerificationNeeded)
{
- varDsc->lvVerTypeInfo = verMakeTypeInfo(info.compClassHnd);
+ varDsc->lvVerTypeInfo = verMakeTypeInfo(info.compClassHnd);
if (varDsc->lvVerTypeInfo.IsValueClass())
+ {
varDsc->lvVerTypeInfo.MakeByRef();
+ }
}
else
{
@@ -416,20 +416,20 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo * varDscInfo)
varDsc->lvIsRegArg = 1;
noway_assert(varDscInfo->intRegArgNum == 0);
- varDsc->lvArgReg = genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet());
+ varDsc->lvArgReg = genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet());
#if FEATURE_MULTIREG_ARGS
varDsc->lvOtherArgReg = REG_NA;
#endif
varDsc->setPrefReg(varDsc->lvArgReg, this);
varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("'this' passed in register %s\n", getRegName(varDsc->lvArgReg));
}
#endif
- compArgSize += TARGET_POINTER_SIZE;
+ compArgSize += TARGET_POINTER_SIZE;
varDscInfo->varNum++;
varDscInfo->varDsc++;
@@ -437,10 +437,10 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitRetBuffArg(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo)
{
- LclVarDsc * varDsc = varDscInfo->varDsc;
- bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo);
+ LclVarDsc* varDsc = varDscInfo->varDsc;
+ bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo);
// These two should always match
noway_assert(hasRetBuffArg == varDscInfo->hasRetBufArg);
@@ -461,7 +461,7 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo * varDscInfo)
else
{
unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT);
- varDsc->lvArgReg = genMapIntRegArgNumToRegNum(retBuffArgNum);
+ varDsc->lvArgReg = genMapIntRegArgNumToRegNum(retBuffArgNum);
}
#if FEATURE_MULTIREG__ARGS
@@ -475,21 +475,23 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo * varDscInfo)
{
CORINFO_SIG_INFO sigInfo;
info.compCompHnd->getMethodSig(info.compMethodHnd, &sigInfo);
- assert(JITtype2varType(sigInfo.retType) == info.compRetType); // Else shouldn't have a ret buff.
+ assert(JITtype2varType(sigInfo.retType) == info.compRetType); // Else shouldn't have a ret buff.
- info.compRetBuffDefStack = (info.compCompHnd->isStructRequiringStackAllocRetBuf(sigInfo.retTypeClass) == TRUE);
+ info.compRetBuffDefStack =
+ (info.compCompHnd->isStructRequiringStackAllocRetBuf(sigInfo.retTypeClass) == TRUE);
if (info.compRetBuffDefStack)
{
- // If we're assured that the ret buff argument points into a callers stack, we will type it as "TYP_I_IMPL"
+ // If we're assured that the ret buff argument points into a callers stack, we will type it as
+ // "TYP_I_IMPL"
// (native int/unmanaged pointer) so that it's not tracked as a GC ref.
varDsc->lvType = TYP_I_IMPL;
}
}
assert(isValidIntArgReg(varDsc->lvArgReg));
-#ifdef DEBUG
- if (verbose)
- {
+#ifdef DEBUG
+ if (verbose)
+ {
printf("'__retBuf' passed in register %s\n", getRegName(varDsc->lvArgReg));
}
#endif
@@ -503,11 +505,11 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
{
- //-------------------------------------------------------------------------
- // Walk the function signature for the explicit arguments
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
+// Walk the function signature for the explicit arguments
+//-------------------------------------------------------------------------
#if defined(_TARGET_X86_)
// Only (some of) the implicit args are enregistered for varargs
@@ -517,44 +519,36 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum;
#endif // _TARGET_*
- CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
+ CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
- const unsigned argSigLen = info.compMethodInfo->args.numArgs;
+ const unsigned argSigLen = info.compMethodInfo->args.numArgs;
regMaskTP doubleAlignMask = RBM_NONE;
- for (unsigned i = 0;
- i < argSigLen;
- i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst))
+ for (unsigned i = 0; i < argSigLen;
+ i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst))
{
- LclVarDsc * varDsc = varDscInfo->varDsc;
- CORINFO_CLASS_HANDLE typeHnd = NULL;
+ LclVarDsc* varDsc = varDscInfo->varDsc;
+ CORINFO_CLASS_HANDLE typeHnd = nullptr;
- CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args,
- argLst,
- &typeHnd);
- varDsc->lvIsParam = 1;
+ CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args, argLst, &typeHnd);
+ varDsc->lvIsParam = 1;
#if ASSERTION_PROP
varDsc->lvSingleDef = 1;
#endif
- lvaInitVarDsc(varDsc,
- varDscInfo->varNum,
- strip(corInfoType),
- typeHnd,
- argLst,
- &info.compMethodInfo->args);
+ lvaInitVarDsc(varDsc, varDscInfo->varNum, strip(corInfoType), typeHnd, argLst, &info.compMethodInfo->args);
// For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers
- var_types argType = mangleVarArgsType(varDsc->TypeGet());
+ var_types argType = mangleVarArgsType(varDsc->TypeGet());
var_types origArgType = argType;
// ARM softfp calling convention should affect only the floating point arguments.
// Otherwise there appear too many surplus pre-spills and other memory operations
// with the associated locations .
- bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet());
- unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args);
- unsigned cSlots = argSize / TARGET_POINTER_SIZE; // the total number of slots of this argument
- bool isHfaArg = false;
- var_types hfaType = TYP_UNDEF;
+ bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet());
+ unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args);
+ unsigned cSlots = argSize / TARGET_POINTER_SIZE; // the total number of slots of this argument
+ bool isHfaArg = false;
+ var_types hfaType = TYP_UNDEF;
// Methods that use VarArg or SoftFP cannot have HFA arguments
if (!info.compIsVarArgs && !opts.compUseSoftFP)
@@ -562,7 +556,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// If the argType is a struct, then check if it is an HFA
if (varTypeIsStruct(argType))
{
- hfaType = GetHfaType(typeHnd); // set to float or double if it is an HFA, otherwise TYP_UNDEF
+ hfaType = GetHfaType(typeHnd); // set to float or double if it is an HFA, otherwise TYP_UNDEF
isHfaArg = varTypeIsFloating(hfaType);
}
}
@@ -572,44 +566,44 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// The orginal struct type is available by using origArgType
// We also update the cSlots to be the number of float/double fields in the HFA
argType = hfaType;
- cSlots = varDsc->lvHfaSlots();
+ cSlots = varDsc->lvHfaSlots();
}
// The number of slots that must be enregistered if we are to consider this argument enregistered.
// This is normally the same as cSlots, since we normally either enregister the entire object,
// or none of it. For structs on ARM, however, we only need to enregister a single slot to consider
// it enregistered, as long as we can split the rest onto the stack.
- unsigned cSlotsToEnregister = cSlots;
+ unsigned cSlotsToEnregister = cSlots;
#ifdef _TARGET_ARM_
// On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers.
// But we pre-spill user arguments in varargs methods and structs.
//
unsigned cAlign;
- bool preSpill = info.compIsVarArgs || isSoftFPPreSpill;
+ bool preSpill = info.compIsVarArgs || isSoftFPPreSpill;
switch (origArgType)
{
- case TYP_STRUCT:
- assert(varDsc->lvSize() == argSize);
- cAlign = varDsc->lvStructDoubleAlign ? 2 : 1;
+ case TYP_STRUCT:
+ assert(varDsc->lvSize() == argSize);
+ cAlign = varDsc->lvStructDoubleAlign ? 2 : 1;
- // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct
- // arguments passed in the integer registers but get homed immediately after the prolog.
- if (!isHfaArg)
- {
- cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split.
- preSpill = true;
- }
- break;
+ // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct
+ // arguments passed in the integer registers but get homed immediately after the prolog.
+ if (!isHfaArg)
+ {
+ cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split.
+ preSpill = true;
+ }
+ break;
- case TYP_DOUBLE:
- case TYP_LONG:
- cAlign = 2;
- break;
+ case TYP_DOUBLE:
+ case TYP_LONG:
+ cAlign = 2;
+ break;
- default:
- cAlign = 1;
- break;
+ default:
+ cAlign = 1;
+ break;
}
if (isRegParamType(argType))
@@ -630,12 +624,12 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// Anything that follows will also be on the stack. However, if something from
// floating point regs has been spilled to the stack, we can still use r0-r3 until they are full.
- if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register
- !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register
- varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already
+ if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register
+ !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register
+ varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already
{
- varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers
- preSpill = false; // This struct won't be prespilled, since it will go on the stack
+ varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers
+ preSpill = false; // This struct won't be prespilled, since it will go on the stack
}
}
@@ -669,7 +663,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
if (structDesc.passedInRegisters)
{
- unsigned intRegCount = 0;
+ unsigned intRegCount = 0;
unsigned floatRegCount = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
@@ -735,15 +729,15 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
#endif // FEATURE_MULTIREG_ARGS
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- unsigned secondAllocatedRegArgNum = 0;
- var_types firstEightByteType = TYP_UNDEF;
- var_types secondEightByteType = TYP_UNDEF;
+ unsigned secondAllocatedRegArgNum = 0;
+ var_types firstEightByteType = TYP_UNDEF;
+ var_types secondEightByteType = TYP_UNDEF;
if (varTypeIsStruct(argType))
{
if (structDesc.eightByteCount >= 1)
{
- firstEightByteType = GetEightByteType(structDesc, 0);
+ firstEightByteType = GetEightByteType(structDesc, 0);
firstAllocatedRegArgNum = varDscInfo->allocRegArg(firstEightByteType, 1);
}
}
@@ -773,7 +767,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// If there is a second eightbyte, get a register for it too and map the arg to the reg number.
if (structDesc.eightByteCount >= 2)
{
- secondEightByteType = GetEightByteType(structDesc, 1);
+ secondEightByteType = GetEightByteType(structDesc, 1);
secondAllocatedRegArgNum = varDscInfo->allocRegArg(secondEightByteType, 1);
}
@@ -787,7 +781,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
#ifdef _TARGET_ARM64_
if (cSlots == 2)
{
- varDsc->lvOtherArgReg = genMapRegArgNumToRegNum(firstAllocatedRegArgNum+1, TYP_I_IMPL);
+ varDsc->lvOtherArgReg = genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_I_IMPL);
varDsc->addPrefReg(genRegMask(varDsc->lvOtherArgReg), this);
}
#endif // _TARGET_ARM64_
@@ -809,8 +803,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
}
#endif // _TARGET_ARM_
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("Arg #%u passed in register(s) ", varDscInfo->varNum);
bool isFloat = false;
@@ -822,14 +816,14 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
isFloat = varTypeIsFloating(firstEightByteType);
}
else
-#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
{
isFloat = varTypeIsFloating(argType);
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- if (varTypeIsStruct(argType))
+ if (varTypeIsStruct(argType))
{
// Print both registers, just to be clear
if (firstEightByteType == TYP_UNDEF)
@@ -838,7 +832,9 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
}
else
{
- printf("firstEightByte: %s", getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType), isFloat));
+ printf("firstEightByte: %s",
+ getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType),
+ isFloat));
}
if (secondEightByteType == TYP_UNDEF)
@@ -847,7 +843,9 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
}
else
{
- printf(", secondEightByte: %s", getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType), varTypeIsFloating(secondEightByteType)));
+ printf(", secondEightByte: %s",
+ getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType),
+ varTypeIsFloating(secondEightByteType)));
}
}
else
@@ -858,9 +856,12 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
for (unsigned ix = 0; ix < cSlots; ix++, regArgNum++)
{
if (ix > 0)
+ {
printf(",");
+ }
- if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between registers and stack
+ if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between
+ // registers and stack
{
printf(" stack slots:%d", cSlots - ix);
break;
@@ -873,8 +874,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
if (argType == TYP_DOUBLE)
{
// Print both registers, just to be clear
- printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType), isFloat),
- getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType), isFloat));
+ printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType), isFloat),
+ getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType), isFloat));
// doubles take 2 slots
assert(ix + 1 < cSlots);
@@ -895,8 +896,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
}
printf("\n");
}
-#endif // DEBUG
- } // end if (canPassArgInRegisters)
+#endif // DEBUG
+ } // end if (canPassArgInRegisters)
else
{
#if defined(_TARGET_ARM_)
@@ -923,14 +924,14 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE,
// so round it up.
compArgSize += (unsigned)roundUp(argSize, TARGET_POINTER_SIZE);
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
compArgSize += argSize;
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
if (info.compIsVarArgs || isHfaArg || isSoftFPPreSpill)
{
#if defined(_TARGET_X86_)
- varDsc->lvStkOffs = compArgSize;
-#else // !_TARGET_X86_
+ varDsc->lvStkOffs = compArgSize;
+#else // !_TARGET_X86_
// TODO-CQ: We shouldn't have to go as far as to declare these
// address-exposed -- DoNotEnregister should suffice.
lvaSetVarAddrExposed(varDscInfo->varNum);
@@ -960,7 +961,8 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
// ; callee saved regs
if (doubleAlignMask == 0x3 && doubleAlignMask != codeGen->regSet.rsMaskPreSpillRegArg)
{
- codeGen->regSet.rsMaskPreSpillAlign = (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS;
+ codeGen->regSet.rsMaskPreSpillAlign =
+ (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS;
}
}
}
@@ -968,7 +970,7 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitGenericsCtxt(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo)
{
//@GENERICS: final instantiation-info argument for shared generic methods
// and shared generic struct instance methods
@@ -976,13 +978,13 @@ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo * varDscInfo)
{
info.compTypeCtxtArg = varDscInfo->varNum;
- LclVarDsc * varDsc = varDscInfo->varDsc;
- varDsc->lvIsParam = 1;
+ LclVarDsc* varDsc = varDscInfo->varDsc;
+ varDsc->lvIsParam = 1;
#if ASSERTION_PROP
varDsc->lvSingleDef = 1;
#endif
- varDsc->lvType = TYP_I_IMPL;
+ varDsc->lvType = TYP_I_IMPL;
if (varDscInfo->canEnreg(TYP_I_IMPL))
{
@@ -998,8 +1000,8 @@ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo * varDscInfo)
varDscInfo->intRegArgNum++;
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("'GenCtxt' passed in register %s\n", getRegName(varDsc->lvArgReg));
}
@@ -1018,7 +1020,7 @@ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo * varDscInfo)
#if defined(_TARGET_X86_)
if (info.compIsVarArgs)
- varDsc->lvStkOffs = compArgSize;
+ varDsc->lvStkOffs = compArgSize;
#endif // _TARGET_X86_
varDscInfo->varNum++;
@@ -1027,15 +1029,15 @@ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo)
+void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo)
{
if (info.compIsVarArgs)
{
lvaVarargsHandleArg = varDscInfo->varNum;
- LclVarDsc * varDsc = varDscInfo->varDsc;
- varDsc->lvType = TYP_I_IMPL;
- varDsc->lvIsParam = 1;
+ LclVarDsc* varDsc = varDscInfo->varDsc;
+ varDsc->lvType = TYP_I_IMPL;
+ varDsc->lvIsParam = 1;
// Make sure this lives in the stack -- address may be reported to the VM.
// TODO-CQ: This should probably be:
// lvaSetVarDoNotEnregister(varDscInfo->varNum DEBUGARG(DNER_VMNeedsStackAddr));
@@ -1043,7 +1045,7 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo)
// hammer. But I think it should be possible to switch; it may just work now
// that other problems are fixed.
lvaSetVarAddrExposed(varDscInfo->varNum);
-
+
#if ASSERTION_PROP
varDsc->lvSingleDef = 1;
#endif
@@ -1071,8 +1073,8 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo)
}
#endif // _TARGET_ARM_
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("'VarArgHnd' passed in register %s\n", getRegName(varDsc->lvArgReg));
}
@@ -1095,11 +1097,11 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo)
varDscInfo->varDsc++;
#if defined(_TARGET_X86_)
- varDsc->lvStkOffs = compArgSize;
+ varDsc->lvStkOffs = compArgSize;
// Allocate a temp to point at the beginning of the args
- lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs"));
+ lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs"));
lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL;
#endif // _TARGET_X86_
@@ -1107,40 +1109,40 @@ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo * varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
- unsigned varNum,
- CorInfoType corInfoType,
- CORINFO_CLASS_HANDLE typeHnd,
- CORINFO_ARG_LIST_HANDLE varList,
- CORINFO_SIG_INFO * varSig)
+void Compiler::lvaInitVarDsc(LclVarDsc* varDsc,
+ unsigned varNum,
+ CorInfoType corInfoType,
+ CORINFO_CLASS_HANDLE typeHnd,
+ CORINFO_ARG_LIST_HANDLE varList,
+ CORINFO_SIG_INFO* varSig)
{
noway_assert(varDsc == &lvaTable[varNum]);
-
+
switch (corInfoType)
{
- // Mark types that looks like a pointer for doing shadow-copying of
- // parameters if we have an unsafe buffer.
- // Note that this does not handle structs with pointer fields. Instead,
- // we rely on using the assign-groups/equivalence-groups in
- // gsFindVulnerableParams() to determine if a buffer-struct contains a
- // pointer. We could do better by having the EE determine this for us.
- // Note that we want to keep buffers without pointers at lower memory
- // addresses than buffers with pointers.
- case CORINFO_TYPE_PTR:
- case CORINFO_TYPE_BYREF:
- case CORINFO_TYPE_CLASS:
- case CORINFO_TYPE_STRING:
- case CORINFO_TYPE_VAR:
- case CORINFO_TYPE_REFANY:
- varDsc->lvIsPtr = 1;
- break;
- default:
- break;
+ // Mark types that looks like a pointer for doing shadow-copying of
+ // parameters if we have an unsafe buffer.
+ // Note that this does not handle structs with pointer fields. Instead,
+ // we rely on using the assign-groups/equivalence-groups in
+ // gsFindVulnerableParams() to determine if a buffer-struct contains a
+ // pointer. We could do better by having the EE determine this for us.
+ // Note that we want to keep buffers without pointers at lower memory
+ // addresses than buffers with pointers.
+ case CORINFO_TYPE_PTR:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ case CORINFO_TYPE_VAR:
+ case CORINFO_TYPE_REFANY:
+ varDsc->lvIsPtr = 1;
+ break;
+ default:
+ break;
}
var_types type = JITtype2varType(corInfoType);
if (varTypeIsFloating(type))
- {
+ {
compFloatingPointUsed = true;
}
@@ -1149,8 +1151,8 @@ void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
varDsc->lvVerTypeInfo = verParseArgSigToTypeInfo(varSig, varList);
}
- if (tiVerificationNeeded)
- {
+ if (tiVerificationNeeded)
+ {
if (varDsc->lvIsParam)
{
// For an incoming ValueType we better be able to have the full type information
@@ -1164,8 +1166,8 @@ void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
// For an incoming reference type we need to verify that the actual type is
// a reference type and not a valuetype.
- if (type == TYP_REF && !(varDsc->lvVerTypeInfo.IsType(TI_REF) ||
- varDsc->lvVerTypeInfo.IsUnboxedGenericTypeVar()))
+ if (type == TYP_REF &&
+ !(varDsc->lvVerTypeInfo.IsType(TI_REF) || varDsc->lvVerTypeInfo.IsUnboxedGenericTypeVar()))
{
BADCODE("parameter type mismatch");
}
@@ -1174,22 +1176,25 @@ void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
// Disallow byrefs to byref like objects (ArgTypeHandle)
// techncally we could get away with just not setting them
if (varDsc->lvVerTypeInfo.IsByRef() && verIsByRefLike(DereferenceByRef(varDsc->lvVerTypeInfo)))
+ {
varDsc->lvVerTypeInfo = typeInfo();
-
+ }
+
// we don't want the EE to assert in lvaSetStruct on bad sigs, so change
// the JIT type to avoid even trying to call back
if (varTypeIsStruct(type) && varDsc->lvVerTypeInfo.IsDead())
+ {
type = TYP_VOID;
+ }
}
if (typeHnd)
- {
- unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd);
+ {
+ unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd);
// We can get typeHnds for primitive types, these are value types which only contain
// a primitive. We will need the typeHnd to distinguish them, so we store it here.
- if ((cFlags & CORINFO_FLG_VALUECLASS) &&
- !varTypeIsStruct(type))
+ if ((cFlags & CORINFO_FLG_VALUECLASS) && !varTypeIsStruct(type))
{
if (tiVerificationNeeded == false)
{
@@ -1201,22 +1206,26 @@ void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags);
}
- if (varTypeIsGC(type))
+ if (varTypeIsGC(type))
+ {
varDsc->lvStructGcCount = 1;
+ }
// Set the lvType (before this point it is TYP_UNDEF).
if ((varTypeIsStruct(type)))
{
- lvaSetStruct(varNum, typeHnd, typeHnd!=NULL, !tiVerificationNeeded);
+ lvaSetStruct(varNum, typeHnd, typeHnd != nullptr, !tiVerificationNeeded);
}
else
{
varDsc->lvType = type;
}
-
+
#if OPT_BOOL_OPS
- if (type == TYP_BOOL)
+ if (type == TYP_BOOL)
+ {
varDsc->lvIsBoolean = true;
+ }
#endif
#ifdef DEBUG
@@ -1229,10 +1238,9 @@ void Compiler::lvaInitVarDsc(LclVarDsc * varDsc,
* Asserts assume it is called after lvaTable[] has been set up.
*/
-unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
+unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
{
- noway_assert(ILvarNum < info.compILlocalsCount ||
- ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM));
+ noway_assert(ILvarNum < info.compILlocalsCount || ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM));
unsigned varNum;
@@ -1251,8 +1259,8 @@ unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
}
else if (ILvarNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
- noway_assert(info.compTypeCtxtArg >= 0);
- varNum = unsigned(info.compTypeCtxtArg);
+ noway_assert(info.compTypeCtxtArg >= 0);
+ varNum = unsigned(info.compTypeCtxtArg);
}
else if (ILvarNum < info.compILargsCount)
{
@@ -1264,7 +1272,7 @@ unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
{
// Local variable
unsigned lclNum = ILvarNum - info.compILargsCount;
- varNum = info.compArgsCount + lclNum;
+ varNum = info.compArgsCount + lclNum;
noway_assert(!lvaTable[varNum].lvIsParam);
}
else
@@ -1276,7 +1284,6 @@ unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
return varNum;
}
-
/*****************************************************************************
* Returns the IL variable number given our internal varNum.
* Special return values are VARG_ILNUM, RETBUF_ILNUM, TYPECTXT_ILNUM.
@@ -1284,57 +1291,68 @@ unsigned Compiler::compMapILvarNum(unsigned ILvarNum)
* Returns UNKNOWN_ILNUM if it can't be mapped.
*/
-unsigned Compiler::compMap2ILvarNum(unsigned varNum)
+unsigned Compiler::compMap2ILvarNum(unsigned varNum)
{
if (compIsForInlining())
{
return impInlineInfo->InlinerCompiler->compMap2ILvarNum(varNum);
}
-
+
noway_assert(varNum < lvaCount);
if (varNum == info.compRetBuffArg)
+ {
return (unsigned)ICorDebugInfo::RETBUF_ILNUM;
+ }
// Is this a varargs function?
if (info.compIsVarArgs && varNum == lvaVarargsHandleArg)
+ {
return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM;
+ }
// We create an extra argument for the type context parameter
// needed for shared generic code.
- if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) &&
- varNum == (unsigned)info.compTypeCtxtArg)
+ if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == (unsigned)info.compTypeCtxtArg)
+ {
return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM;
+ }
// Now mutate varNum to remove extra parameters from the count.
- if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) &&
- varNum > (unsigned)info.compTypeCtxtArg)
+ if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > (unsigned)info.compTypeCtxtArg)
+ {
varNum--;
+ }
if (info.compIsVarArgs && varNum > lvaVarargsHandleArg)
+ {
varNum--;
+ }
/* Is there a hidden argument for the return buffer.
- Note that this code works because if the RetBuffArg is not present,
+ Note that this code works because if the RetBuffArg is not present,
compRetBuffArg will be BAD_VAR_NUM */
if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg)
+ {
varNum--;
+ }
if (varNum >= info.compLocalsCount)
- return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped
+ {
+ return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped
+ }
return varNum;
}
-
/*****************************************************************************
* Returns true if variable "varNum" may be address-exposed.
*/
-bool Compiler::lvaVarAddrExposed(unsigned varNum)
+bool Compiler::lvaVarAddrExposed(unsigned varNum)
{
noway_assert(varNum < lvaCount);
- LclVarDsc * varDsc = &lvaTable[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
return varDsc->lvAddrExposed;
}
@@ -1343,24 +1361,22 @@ bool Compiler::lvaVarAddrExposed(unsigned varNum)
* Returns true iff variable "varNum" should not be enregistered (or one of several reasons).
*/
-bool Compiler::lvaVarDoNotEnregister(unsigned varNum)
+bool Compiler::lvaVarDoNotEnregister(unsigned varNum)
{
noway_assert(varNum < lvaCount);
- LclVarDsc * varDsc = &lvaTable[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
return varDsc->lvDoNotEnregister;
}
-
-
/*****************************************************************************
* Returns the handle to the class of the local variable varNum
*/
-CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum)
+CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum)
{
noway_assert(varNum < lvaCount);
- LclVarDsc * varDsc = &lvaTable[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
return varDsc->lvVerTypeInfo.GetClassHandleForValueClass();
}
@@ -1371,10 +1387,10 @@ CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum)
*/
/* static */
-int __cdecl Compiler::lvaFieldOffsetCmp(const void * field1, const void * field2)
+int __cdecl Compiler::lvaFieldOffsetCmp(const void* field1, const void* field2)
{
- lvaStructFieldInfo * pFieldInfo1 = (lvaStructFieldInfo *)field1;
- lvaStructFieldInfo * pFieldInfo2 = (lvaStructFieldInfo *)field2;
+ lvaStructFieldInfo* pFieldInfo1 = (lvaStructFieldInfo*)field1;
+ lvaStructFieldInfo* pFieldInfo2 = (lvaStructFieldInfo*)field2;
if (pFieldInfo1->fldOffset == pFieldInfo2->fldOffset)
{
@@ -1383,61 +1399,61 @@ int __cdecl Compiler::lvaFieldOffsetCmp(const void * field1, const void
else
{
return (pFieldInfo1->fldOffset > pFieldInfo2->fldOffset) ? +1 : -1;
- }
+ }
}
/*****************************************************************************
* Is this type promotable? */
-void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
- lvaStructPromotionInfo * StructPromotionInfo,
- bool sortFields)
-{
+void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
+ lvaStructPromotionInfo* StructPromotionInfo,
+ bool sortFields)
+{
assert(eeIsValueClass(typeHnd));
-
+
if (typeHnd != StructPromotionInfo->typeHnd)
{
// sizeof(double) represents the size of the largest primitive type that we can struct promote
- // In the future this may be changing to XMM_REGSIZE_BYTES
- const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); // must be a compile time constant
-
- assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized
- assert((BYTE)MAX_NumOfFieldsInPromotableStruct == MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized
-
- bool requiresScratchVar = false;
- bool containsHoles = false;
- bool customLayout = false;
- bool containsGCpointers = false;
-
- StructPromotionInfo->typeHnd = typeHnd;
+ // In the future this may be changing to XMM_REGSIZE_BYTES
+ const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); // must be a compile time constant
+
+ assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized
+ assert((BYTE)MAX_NumOfFieldsInPromotableStruct ==
+ MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized
+
+ bool requiresScratchVar = false;
+ bool containsHoles = false;
+ bool customLayout = false;
+ bool containsGCpointers = false;
+
+ StructPromotionInfo->typeHnd = typeHnd;
StructPromotionInfo->canPromote = false;
unsigned structSize = info.compCompHnd->getClassSize(typeHnd);
if (structSize >= MaxOffset)
{
- return; // struct is too large
+ return; // struct is too large
}
- unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(typeHnd);
- if (fieldCnt == 0 ||
- fieldCnt > MAX_NumOfFieldsInPromotableStruct)
+ unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(typeHnd);
+ if (fieldCnt == 0 || fieldCnt > MAX_NumOfFieldsInPromotableStruct)
{
- return; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields
+ return; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields
}
StructPromotionInfo->fieldCnt = (BYTE)fieldCnt;
- DWORD typeFlags = info.compCompHnd->getClassAttribs(typeHnd);
+ DWORD typeFlags = info.compCompHnd->getClassAttribs(typeHnd);
bool treatAsOverlapping = StructHasOverlappingFields(typeFlags);
-#if 1 // TODO-Cleanup: Consider removing this entire #if block in the future
+#if 1 // TODO-Cleanup: Consider removing this entire #if block in the future
// This method has two callers. The one in Importer.cpp passes sortFields == false
// and the other passes sortFields == true.
// This is a workaround that leave the inlining behavior the same and before while still
// performing extra struct promotions when compiling the method.
- //
- if (!sortFields) // the condition "!sortFields" really means "we are inlining"
+ //
+ if (!sortFields) // the condition "!sortFields" really means "we are inlining"
{
treatAsOverlapping = StructHasCustomLayout(typeFlags);
}
@@ -1448,7 +1464,7 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
return;
}
- // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type
+ // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type
if (StructHasCustomLayout(typeFlags) && IsHfa(typeHnd))
{
return;
@@ -1456,23 +1472,23 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
#ifdef _TARGET_ARM_
// On ARM, we have a requirement on the struct alignment; see below.
- unsigned structAlignment = roundUp(info.compCompHnd->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE);
+ unsigned structAlignment =
+ roundUp(info.compCompHnd->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE);
#endif // _TARGET_ARM
- bool isHole[MaxOffset]; // isHole[] is initialized to true for every valid offset in the struct and false for the rest
- unsigned i; // then as we process the fields we clear the isHole[] values that the field spans.
- for (i=0; i < MaxOffset; i++)
+ bool isHole[MaxOffset]; // isHole[] is initialized to true for every valid offset in the struct and false for
+ // the rest
+ unsigned i; // then as we process the fields we clear the isHole[] values that the field spans.
+ for (i = 0; i < MaxOffset; i++)
{
isHole[i] = (i < structSize) ? true : false;
}
- for (BYTE ordinal=0;
- ordinal < fieldCnt;
- ++ordinal)
- {
- lvaStructFieldInfo * pFieldInfo = &StructPromotionInfo->fields[ordinal];
- pFieldInfo->fldHnd = info.compCompHnd->getFieldInClass(typeHnd, ordinal);
- unsigned fldOffset = info.compCompHnd->getFieldOffset(pFieldInfo->fldHnd);
+ for (BYTE ordinal = 0; ordinal < fieldCnt; ++ordinal)
+ {
+ lvaStructFieldInfo* pFieldInfo = &StructPromotionInfo->fields[ordinal];
+ pFieldInfo->fldHnd = info.compCompHnd->getFieldInClass(typeHnd, ordinal);
+ unsigned fldOffset = info.compCompHnd->getFieldOffset(pFieldInfo->fldHnd);
// The fldOffset value should never be larger than our structSize.
if (fldOffset >= structSize)
@@ -1481,12 +1497,12 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
return;
}
- pFieldInfo->fldOffset = (BYTE)fldOffset;
- pFieldInfo->fldOrdinal = ordinal;
- CorInfoType corType = info.compCompHnd->getFieldType(pFieldInfo->fldHnd, &pFieldInfo->fldTypeHnd);
- var_types varType = JITtype2varType(corType);
- pFieldInfo->fldType = varType;
- pFieldInfo->fldSize = genTypeSize(varType);
+ pFieldInfo->fldOffset = (BYTE)fldOffset;
+ pFieldInfo->fldOrdinal = ordinal;
+ CorInfoType corType = info.compCompHnd->getFieldType(pFieldInfo->fldHnd, &pFieldInfo->fldTypeHnd);
+ var_types varType = JITtype2varType(corType);
+ pFieldInfo->fldType = varType;
+ pFieldInfo->fldSize = genTypeSize(varType);
if (varTypeIsGC(varType))
{
@@ -1496,8 +1512,8 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
if (pFieldInfo->fldSize == 0)
{
// Non-primitive struct field. Don't promote.
- return;
- }
+ return;
+ }
if ((pFieldInfo->fldOffset % pFieldInfo->fldSize) != 0)
{
@@ -1510,11 +1526,11 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
// The end offset for this field should never be larger than our structSize.
noway_assert(fldOffset + pFieldInfo->fldSize <= structSize);
- for (i=0; i < pFieldInfo->fldSize; i++)
+ for (i = 0; i < pFieldInfo->fldSize; i++)
{
- isHole[fldOffset+i] = false;
+ isHole[fldOffset + i] = false;
}
-
+
#ifdef _TARGET_ARM_
// On ARM, for struct types that don't use explicit layout, the alignment of the struct is
// at least the max alignment of its fields. We take advantage of this invariant in struct promotion,
@@ -1525,8 +1541,9 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
return;
}
// If we have any small fields we will allocate a single PromotedStructScratch local var for the method.
- // This is a stack area that we use to assemble the small fields in order to place them in a register argument.
- //
+ // This is a stack area that we use to assemble the small fields in order to place them in a register
+ // argument.
+ //
if (pFieldInfo->fldSize < TARGET_POINTER_SIZE)
{
requiresScratchVar = true;
@@ -1541,21 +1558,20 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
// Managed C++ uses this for its structs, such C++ types will not contain GC pointers.
//
// The current VM implementation also incorrectly sets the CORINFO_FLG_CUSTOMLAYOUT
- // whenever a managed value class contains any GC pointers.
+ // whenever a managed value class contains any GC pointers.
// (See the comment for VMFLAG_NOT_TIGHTLY_PACKED in class.h)
//
- // It is important to struct promote managed value classes that have GC pointers
+ // It is important to struct promote managed value classes that have GC pointers
// So we compute the correct value for "CustomLayout" here
//
- if (StructHasCustomLayout(typeFlags) &&
- ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0) )
+ if (StructHasCustomLayout(typeFlags) && ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0))
{
customLayout = true;
}
// Check if this promoted struct contains any holes
//
- for (i=0; i < structSize; i++)
+ for (i = 0; i < structSize; i++)
{
if (isHole[i])
{
@@ -1563,7 +1579,7 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
break;
}
}
-
+
// Cool, this struct is promotable.
StructPromotionInfo->canPromote = true;
StructPromotionInfo->requiresScratchVar = requiresScratchVar;
@@ -1575,9 +1591,7 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
// Sort the fields according to the increasing order of the field offset.
// This is needed because the fields need to be pushed on stack (when referenced
// as a struct) in order.
- qsort(StructPromotionInfo->fields,
- StructPromotionInfo->fieldCnt,
- sizeof(*StructPromotionInfo->fields),
+ qsort(StructPromotionInfo->fields, StructPromotionInfo->fieldCnt, sizeof(*StructPromotionInfo->fields),
lvaFieldOffsetCmp);
}
}
@@ -1589,18 +1603,17 @@ void Compiler::lvaCanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd,
}
}
-
/*****************************************************************************
* Is this struct type local variable promotable? */
-void Compiler::lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo * StructPromotionInfo)
-{
+void Compiler::lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo* StructPromotionInfo)
+{
noway_assert(lclNum < lvaCount);
-
- LclVarDsc * varDsc = &lvaTable[lclNum];
-
+
+ LclVarDsc* varDsc = &lvaTable[lclNum];
+
noway_assert(varTypeIsStruct(varDsc));
- noway_assert(!varDsc->lvPromoted); // Don't ask again :)
+ noway_assert(!varDsc->lvPromoted); // Don't ask again :)
#ifdef FEATURE_SIMD
// If this lclVar is used in a SIMD intrinsic, then we don't want to struct promote it.
@@ -1611,17 +1624,17 @@ void Compiler::lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo
StructPromotionInfo->canPromote = false;
return;
}
-
+
#endif
// TODO-PERF - Allow struct promotion for HFA register arguments
// Explicitly check for HFA reg args and reject them for promotion here.
- // Promoting HFA args will fire an assert in lvaAssignFrameOffsets
+ // Promoting HFA args will fire an assert in lvaAssignFrameOffsets
// when the HFA reg arg is struct promoted.
//
if (varDsc->lvIsHfaRegArg())
- {
+ {
StructPromotionInfo->canPromote = false;
return;
}
@@ -1630,94 +1643,88 @@ void Compiler::lvaCanPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo
lvaCanPromoteStructType(typeHnd, StructPromotionInfo, true);
}
-
/*****************************************************************************
* Promote a struct type local */
-void Compiler::lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo * StructPromotionInfo)
-{
- LclVarDsc * varDsc = &lvaTable[lclNum];
+void Compiler::lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInfo* StructPromotionInfo)
+{
+ LclVarDsc* varDsc = &lvaTable[lclNum];
// We should never see a reg-sized non-field-addressed struct here.
noway_assert(!varDsc->lvRegStruct);
- noway_assert(StructPromotionInfo->canPromote);
+ noway_assert(StructPromotionInfo->canPromote);
noway_assert(StructPromotionInfo->typeHnd == varDsc->lvVerTypeInfo.GetClassHandle());
-
+
varDsc->lvFieldCnt = StructPromotionInfo->fieldCnt;
- varDsc->lvFieldLclStart = lvaCount;
+ varDsc->lvFieldLclStart = lvaCount;
varDsc->lvPromoted = true;
varDsc->lvContainsHoles = StructPromotionInfo->containsHoles;
varDsc->lvCustomLayout = StructPromotionInfo->customLayout;
#ifdef DEBUG
- //Don't change the source to a TYP_BLK either.
+ // Don't change the source to a TYP_BLK either.
varDsc->lvKeepType = 1;
#endif
#ifdef DEBUG
if (verbose)
{
- printf("\nPromoting struct local V%02u (%s):",
- lclNum, eeGetClassName(StructPromotionInfo->typeHnd));
+ printf("\nPromoting struct local V%02u (%s):", lclNum, eeGetClassName(StructPromotionInfo->typeHnd));
}
-#endif
-
- for (unsigned index=0;
- index<StructPromotionInfo->fieldCnt;
- ++index)
- {
- lvaStructFieldInfo * pFieldInfo = &StructPromotionInfo->fields[index];
+#endif
+
+ for (unsigned index = 0; index < StructPromotionInfo->fieldCnt; ++index)
+ {
+ lvaStructFieldInfo* pFieldInfo = &StructPromotionInfo->fields[index];
if (varTypeIsFloating(pFieldInfo->fldType))
- {
- lvaTable[lclNum].lvContainsFloatingFields = 1;
+ {
+ lvaTable[lclNum].lvContainsFloatingFields = 1;
// Whenever we promote a struct that contains a floating point field
// it's possible we transition from a method that originally only had integer
// local vars to start having FP. We have to communicate this through this flag
// since LSRA later on will use this flag to determine whether or not to track FP register sets.
compFloatingPointUsed = true;
}
-
- // Now grab the temp for the field local.
+
+// Now grab the temp for the field local.
#ifdef DEBUG
- char buf[200];
- char * bufp = &buf[0];
+ char buf[200];
+ char* bufp = &buf[0];
- sprintf_s(bufp, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)",
- "field",
- lclNum,
- eeGetFieldName(pFieldInfo->fldHnd),
- pFieldInfo->fldOffset);
+ sprintf_s(bufp, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum,
+ eeGetFieldName(pFieldInfo->fldHnd), pFieldInfo->fldOffset);
- if (index>0)
+ if (index > 0)
{
- noway_assert(pFieldInfo->fldOffset > (pFieldInfo-1)->fldOffset);
+ noway_assert(pFieldInfo->fldOffset > (pFieldInfo - 1)->fldOffset);
}
#endif
-
- unsigned varNum = lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so they are long lifetime temps.
-
- LclVarDsc * fieldVarDsc = &lvaTable[varNum];
- fieldVarDsc->lvType = pFieldInfo->fldType;
- fieldVarDsc->lvExactSize = pFieldInfo->fldSize;
- fieldVarDsc->lvIsStructField = true;
- fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset;
- fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal;
- fieldVarDsc->lvParentLcl = lclNum;
- fieldVarDsc->lvIsParam = varDsc->lvIsParam;
+
+ unsigned varNum = lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so
+ // they are long lifetime temps.
+
+ LclVarDsc* fieldVarDsc = &lvaTable[varNum];
+ fieldVarDsc->lvType = pFieldInfo->fldType;
+ fieldVarDsc->lvExactSize = pFieldInfo->fldSize;
+ fieldVarDsc->lvIsStructField = true;
+ fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset;
+ fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal;
+ fieldVarDsc->lvParentLcl = lclNum;
+ fieldVarDsc->lvIsParam = varDsc->lvIsParam;
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
// Do we have a parameter that can be enregistered?
//
if (varDsc->lvIsRegArg)
{
fieldVarDsc->lvIsRegArg = true;
- fieldVarDsc->lvArgReg = varDsc->lvArgReg;
- fieldVarDsc->setPrefReg(varDsc->lvArgReg, this); // Set the preferred register
+ fieldVarDsc->lvArgReg = varDsc->lvArgReg;
+ fieldVarDsc->setPrefReg(varDsc->lvArgReg, this); // Set the preferred register
- lvaMarkRefsWeight = BB_UNITY_WEIGHT; // incRefCnts can use this compiler global variable
- fieldVarDsc->incRefCnts(BB_UNITY_WEIGHT, this); // increment the ref count for prolog initialization
+ lvaMarkRefsWeight = BB_UNITY_WEIGHT; // incRefCnts can use this compiler global variable
+ fieldVarDsc->incRefCnts(BB_UNITY_WEIGHT, this); // increment the ref count for prolog initialization
}
#endif
@@ -1739,20 +1746,19 @@ void Compiler::lvaPromoteStructVar(unsigned lclNum, lvaStructPromotionInf
// Return Value:
// None.
//
-void Compiler::lvaPromoteLongVars()
+void Compiler::lvaPromoteLongVars()
{
if ((opts.compFlags & CLFLG_REGVAR) == 0)
{
return;
}
// The lvaTable might grow as we grab temps. Make a local copy here.
- unsigned startLvaCount = lvaCount;
- for (unsigned lclNum = 0;
- lclNum < startLvaCount;
- lclNum++)
+ unsigned startLvaCount = lvaCount;
+ for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
- LclVarDsc * varDsc = &lvaTable[lclNum];
- if(!varTypeIsLong(varDsc) || varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegArgOrRet() || (varDsc->lvRefCnt == 0))
+ LclVarDsc* varDsc = &lvaTable[lclNum];
+ if (!varTypeIsLong(varDsc) || varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegArgOrRet() ||
+ (varDsc->lvRefCnt == 0))
{
continue;
}
@@ -1766,11 +1772,11 @@ void Compiler::lvaPromoteLongVars()
continue;
}
varDsc->lvIsStructField = false;
- varDsc->lvTracked = false;
+ varDsc->lvTracked = false;
}
varDsc->lvFieldCnt = 2;
- varDsc->lvFieldLclStart = lvaCount;
+ varDsc->lvFieldLclStart = lvaCount;
varDsc->lvPromoted = true;
varDsc->lvContainsHoles = false;
@@ -1782,32 +1788,30 @@ void Compiler::lvaPromoteLongVars()
#endif
bool isParam = varDsc->lvIsParam;
-
- for (unsigned index=0; index < 2; ++index)
+
+ for (unsigned index = 0; index < 2; ++index)
{
// Grab the temp for the field local.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- char buf[200];
- char * bufp = &buf[0];
+ char buf[200];
+ char* bufp = &buf[0];
- sprintf_s(bufp, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)",
- "field",
- lclNum,
- index == 0 ? "lo" : "hi",
+ sprintf_s(bufp, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, index == 0 ? "lo" : "hi",
index * 4);
#endif
- unsigned varNum = lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so they are long lifetime temps.
+ unsigned varNum = lvaGrabTemp(false DEBUGARG(bufp)); // Lifetime of field locals might span multiple BBs, so
+ // they are long lifetime temps.
- LclVarDsc * fieldVarDsc = &lvaTable[varNum];
- fieldVarDsc->lvType = TYP_INT;
- fieldVarDsc->lvExactSize = genTypeSize(TYP_INT);
- fieldVarDsc->lvIsStructField = true;
- fieldVarDsc->lvFldOffset = (unsigned char)(index * genTypeSize(TYP_INT));
- fieldVarDsc->lvFldOrdinal = (unsigned char)index;
- fieldVarDsc->lvParentLcl = lclNum;
- fieldVarDsc->lvIsParam = isParam;
+ LclVarDsc* fieldVarDsc = &lvaTable[varNum];
+ fieldVarDsc->lvType = TYP_INT;
+ fieldVarDsc->lvExactSize = genTypeSize(TYP_INT);
+ fieldVarDsc->lvIsStructField = true;
+ fieldVarDsc->lvFldOffset = (unsigned char)(index * genTypeSize(TYP_INT));
+ fieldVarDsc->lvFldOrdinal = (unsigned char)index;
+ fieldVarDsc->lvParentLcl = lclNum;
+ fieldVarDsc->lvIsParam = isParam;
}
}
@@ -1826,17 +1830,15 @@ void Compiler::lvaPromoteLongVars()
that represents this field.
*/
-unsigned Compiler::lvaGetFieldLocal(LclVarDsc * varDsc, unsigned int fldOffset)
-{
+unsigned Compiler::lvaGetFieldLocal(LclVarDsc* varDsc, unsigned int fldOffset)
+{
noway_assert(varTypeIsStruct(varDsc));
noway_assert(varDsc->lvPromoted);
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
- {
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
noway_assert(lvaTable[i].lvIsStructField);
- noway_assert(lvaTable[i].lvParentLcl == (unsigned) (varDsc-lvaTable));
+ noway_assert(lvaTable[i].lvParentLcl == (unsigned)(varDsc - lvaTable));
if (lvaTable[i].lvFldOffset == fldOffset)
{
return i;
@@ -1853,24 +1855,22 @@ unsigned Compiler::lvaGetFieldLocal(LclVarDsc * varDsc, unsigned int fldOffse
* If this is a promoted struct, label it's fields the same way.
*/
-void Compiler::lvaSetVarAddrExposed(unsigned varNum)
-{
+void Compiler::lvaSetVarAddrExposed(unsigned varNum)
+{
noway_assert(varNum < lvaCount);
-
- LclVarDsc * varDsc = &lvaTable[varNum];
-
+
+ LclVarDsc* varDsc = &lvaTable[varNum];
+
varDsc->lvAddrExposed = 1;
-
+
if (varDsc->lvPromoted)
{
noway_assert(varTypeIsStruct(varDsc));
-
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
- {
- noway_assert(lvaTable[i].lvIsStructField);
- lvaTable[i].lvAddrExposed = 1; // Make field local as address-exposed.
+
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
+ noway_assert(lvaTable[i].lvIsStructField);
+ lvaTable[i].lvAddrExposed = 1; // Make field local as address-exposed.
lvaSetVarDoNotEnregister(i DEBUGARG(DNER_AddrExposed));
}
}
@@ -1878,16 +1878,15 @@ void Compiler::lvaSetVarAddrExposed(unsigned varNum)
lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_AddrExposed));
}
-
/*****************************************************************************
*
* Record that the local var "varNum" should not be enregistered (for one of several reasons.)
*/
-void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason))
-{
+void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason))
+{
noway_assert(varNum < lvaCount);
- LclVarDsc * varDsc = &lvaTable[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
varDsc->lvDoNotEnregister = 1;
#ifdef DEBUG
@@ -1897,58 +1896,56 @@ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(D
}
switch (reason)
{
- case DNER_AddrExposed:
- JITDUMP("it is address exposed\n");
- assert(varDsc->lvAddrExposed);
- break;
- case DNER_IsStruct:
- JITDUMP("it is a struct\n");
- assert(varTypeIsStruct(varDsc));
- break;
- case DNER_BlockOp:
- JITDUMP("written in a block op\n");
- varDsc->lvLclBlockOpAddr = 1;
- break;
- case DNER_LocalField:
- JITDUMP("was accessed as a local field\n");
- varDsc->lvLclFieldExpr = 1;
- break;
- case DNER_VMNeedsStackAddr:
- JITDUMP("needs stack addr\n");
- varDsc->lvVMNeedsStackAddr = 1;
- break;
- case DNER_LiveInOutOfHandler:
- JITDUMP("live in/out of a handler\n");
- varDsc->lvLiveInOutOfHndlr = 1;
- break;
- case DNER_LiveAcrossUnmanagedCall:
- JITDUMP("live across unmanaged call\n");
- varDsc->lvLiveAcrossUCall = 1;
- break;
+ case DNER_AddrExposed:
+ JITDUMP("it is address exposed\n");
+ assert(varDsc->lvAddrExposed);
+ break;
+ case DNER_IsStruct:
+ JITDUMP("it is a struct\n");
+ assert(varTypeIsStruct(varDsc));
+ break;
+ case DNER_BlockOp:
+ JITDUMP("written in a block op\n");
+ varDsc->lvLclBlockOpAddr = 1;
+ break;
+ case DNER_LocalField:
+ JITDUMP("was accessed as a local field\n");
+ varDsc->lvLclFieldExpr = 1;
+ break;
+ case DNER_VMNeedsStackAddr:
+ JITDUMP("needs stack addr\n");
+ varDsc->lvVMNeedsStackAddr = 1;
+ break;
+ case DNER_LiveInOutOfHandler:
+ JITDUMP("live in/out of a handler\n");
+ varDsc->lvLiveInOutOfHndlr = 1;
+ break;
+ case DNER_LiveAcrossUnmanagedCall:
+ JITDUMP("live across unmanaged call\n");
+ varDsc->lvLiveAcrossUCall = 1;
+ break;
#ifdef JIT32_GCENCODER
- case DNER_PinningRef:
- JITDUMP("pinning ref\n");
- assert(varDsc->lvPinned);
- break;
+ case DNER_PinningRef:
+ JITDUMP("pinning ref\n");
+ assert(varDsc->lvPinned);
+ break;
#endif
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
#endif
}
// Returns true if this local var is a multireg struct
-bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc)
+bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc)
{
if (varDsc->TypeGet() == TYP_STRUCT)
- {
+ {
CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandleForValueClass();
- structPassingKind howToPassStruct;
-
- var_types type = getArgTypeForStruct(clsHnd,
- &howToPassStruct,
- varDsc->lvExactSize);
+ structPassingKind howToPassStruct;
+
+ var_types type = getArgTypeForStruct(clsHnd, &howToPassStruct, varDsc->lvExactSize);
if (howToPassStruct == SPK_ByValueAsHfa)
{
@@ -1963,22 +1960,22 @@ bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc)
return true;
}
#endif
-
}
return false;
}
-
/*****************************************************************************
* Set the lvClass for a local variable of a struct type */
-void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo)
+void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo)
{
noway_assert(varNum < lvaCount);
- LclVarDsc * varDsc = &lvaTable[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
if (setTypeInfo)
+ {
varDsc->lvVerTypeInfo = typeInfo(TI_STRUCT, typeHnd);
+ }
// Set the type and associated info if we haven't already set it.
var_types structType = varDsc->lvType;
@@ -1991,15 +1988,18 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, boo
varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd);
size_t lvSize = varDsc->lvSize();
- assert((lvSize % sizeof(void*)) == 0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
+ assert((lvSize % sizeof(void*)) ==
+ 0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
varDsc->lvGcLayout = (BYTE*)compGetMemA((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
- unsigned numGCVars;
+ unsigned numGCVars;
var_types simdBaseType = TYP_UNKNOWN;
- varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
+ varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
// We only save the count of GC vars in a struct up to 7.
if (numGCVars >= 8)
+ {
numGCVars = 7;
+ }
varDsc->lvStructGcCount = numGCVars;
#if FEATURE_SIMD
if (simdBaseType != TYP_UNKNOWN)
@@ -2013,7 +2013,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, boo
// for structs that are small enough, we check and set lvIsHfa and lvHfaTypeIsFloat
if (varDsc->lvExactSize <= MAX_PASS_MULTIREG_BYTES)
{
- var_types hfaType = GetHfaType(typeHnd); // set to float or double if it is an HFA, otherwise TYP_UNDEF
+ var_types hfaType = GetHfaType(typeHnd); // set to float or double if it is an HFA, otherwise TYP_UNDEF
if (varTypeIsFloating(hfaType))
{
varDsc->_lvIsHfa = true;
@@ -2039,14 +2039,14 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, boo
#ifndef _TARGET_64BIT_
bool fDoubleAlignHint = FALSE;
-# ifdef _TARGET_X86_
+#ifdef _TARGET_X86_
fDoubleAlignHint = TRUE;
-# endif
+#endif
if (info.compCompHnd->getClassAlignmentRequirement(typeHnd, fDoubleAlignHint) == 8)
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("Marking struct in V%02i with double align flag\n", varNum);
}
@@ -2061,10 +2061,8 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, boo
// Check whether this local is an unsafe value type and requires GS cookie protection.
// GS checks require the stack to be re-ordered, which can't be done with EnC.
- if (unsafeValueClsCheck &&
- (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) &&
- !opts.compDbgEnC)
- {
+ if (unsafeValueClsCheck && (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) && !opts.compDbgEnC)
+ {
setNeedsGSSecurityCookie();
compGSReorderStackLayout = true;
varDsc->lvIsUnsafeBuffer = true;
@@ -2075,7 +2073,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, boo
* Returns the array of BYTEs containing the GC layout information
*/
-BYTE * Compiler::lvaGetGcLayout(unsigned varNum)
+BYTE* Compiler::lvaGetGcLayout(unsigned varNum)
{
noway_assert(varTypeIsStruct(lvaTable[varNum].lvType) && (lvaTable[varNum].lvExactSize >= TARGET_POINTER_SIZE));
@@ -2086,90 +2084,92 @@ BYTE * Compiler::lvaGetGcLayout(unsigned varNum)
* Return the number of bytes needed for a local variable
*/
-unsigned Compiler::lvaLclSize(unsigned varNum)
+unsigned Compiler::lvaLclSize(unsigned varNum)
{
noway_assert(varNum < lvaCount);
-
- var_types varType = lvaTable[varNum].TypeGet();
+
+ var_types varType = lvaTable[varNum].TypeGet();
switch (varType)
{
- case TYP_STRUCT:
- case TYP_BLK:
- return lvaTable[varNum].lvSize();
+ case TYP_STRUCT:
+ case TYP_BLK:
+ return lvaTable[varNum].lvSize();
- case TYP_LCLBLK:
+ case TYP_LCLBLK:
#if FEATURE_FIXED_OUT_ARGS
- noway_assert(lvaOutgoingArgSpaceSize >= 0);
- noway_assert(varNum == lvaOutgoingArgSpaceVar);
- return lvaOutgoingArgSpaceSize;
+ noway_assert(lvaOutgoingArgSpaceSize >= 0);
+ noway_assert(varNum == lvaOutgoingArgSpaceVar);
+ return lvaOutgoingArgSpaceSize;
#else // FEATURE_FIXED_OUT_ARGS
- assert(!"Unknown size");
- NO_WAY("Target doesn't support TYP_LCLBLK");
+ assert(!"Unknown size");
+ NO_WAY("Target doesn't support TYP_LCLBLK");
- // Keep prefast happy
- __fallthrough;
+ // Keep prefast happy
+ __fallthrough;
#endif // FEATURE_FIXED_OUT_ARGS
- default: // This must be a primitive var. Fall out of switch statement
- break;
+ default: // This must be a primitive var. Fall out of switch statement
+ break;
}
#ifdef _TARGET_64BIT_
// We only need this Quirk for _TARGET_64BIT_
if (lvaTable[varNum].lvQuirkToLong)
{
noway_assert(lvaTable[varNum].lvAddrExposed);
- return genTypeStSz(TYP_LONG)*sizeof(int); // return 8 (2 * 4)
+ return genTypeStSz(TYP_LONG) * sizeof(int); // return 8 (2 * 4)
}
#endif
- return genTypeStSz(varType)*sizeof(int);
+ return genTypeStSz(varType) * sizeof(int);
}
//
// Return the exact width of local variable "varNum" -- the number of bytes
// you'd need to copy in order to overwrite the value.
-//
-unsigned Compiler::lvaLclExactSize(unsigned varNum)
+//
+unsigned Compiler::lvaLclExactSize(unsigned varNum)
{
noway_assert(varNum < lvaCount);
-
- var_types varType = lvaTable[varNum].TypeGet();
+
+ var_types varType = lvaTable[varNum].TypeGet();
switch (varType)
{
- case TYP_STRUCT:
- case TYP_BLK:
- return lvaTable[varNum].lvExactSize;
+ case TYP_STRUCT:
+ case TYP_BLK:
+ return lvaTable[varNum].lvExactSize;
- case TYP_LCLBLK:
+ case TYP_LCLBLK:
#if FEATURE_FIXED_OUT_ARGS
- noway_assert(lvaOutgoingArgSpaceSize >= 0);
- noway_assert(varNum == lvaOutgoingArgSpaceVar);
- return lvaOutgoingArgSpaceSize;
+ noway_assert(lvaOutgoingArgSpaceSize >= 0);
+ noway_assert(varNum == lvaOutgoingArgSpaceVar);
+ return lvaOutgoingArgSpaceSize;
#else // FEATURE_FIXED_OUT_ARGS
- assert(!"Unknown size");
- NO_WAY("Target doesn't support TYP_LCLBLK");
+ assert(!"Unknown size");
+ NO_WAY("Target doesn't support TYP_LCLBLK");
- // Keep prefast happy
- __fallthrough;
+ // Keep prefast happy
+ __fallthrough;
#endif // FEATURE_FIXED_OUT_ARGS
- default: // This must be a primitive var. Fall out of switch statement
- break;
+ default: // This must be a primitive var. Fall out of switch statement
+ break;
}
return genTypeSize(varType);
}
-//getBBWeight -- get the normalized weight of this block
-unsigned BasicBlock::getBBWeight(Compiler * comp)
+// getBBWeight -- get the normalized weight of this block
+unsigned BasicBlock::getBBWeight(Compiler* comp)
{
if (this->bbWeight == 0)
+ {
return 0;
+ }
else
{
unsigned calledWeight = comp->fgCalledWeight;
@@ -2177,33 +2177,37 @@ unsigned BasicBlock::getBBWeight(Compiler * comp)
{
calledWeight = comp->fgFirstBB->bbWeight;
if (calledWeight == 0)
+ {
calledWeight = BB_UNITY_WEIGHT;
+ }
+ }
+ if (this->bbWeight < (BB_MAX_WEIGHT / BB_UNITY_WEIGHT))
+ {
+ return max(1, (((this->bbWeight * BB_UNITY_WEIGHT) + (calledWeight / 2)) / calledWeight));
}
- if (this->bbWeight < (BB_MAX_WEIGHT / BB_UNITY_WEIGHT))
- return max(1, (((this->bbWeight * BB_UNITY_WEIGHT) + (calledWeight/2)) / calledWeight));
else
- return (unsigned) ((((double)this->bbWeight * (double)BB_UNITY_WEIGHT) / (double)calledWeight) + 0.5);
+ {
+ return (unsigned)((((double)this->bbWeight * (double)BB_UNITY_WEIGHT) / (double)calledWeight) + 0.5);
+ }
}
}
-
/*****************************************************************************
*
* Callback used by the tree walker to call lvaDecRefCnts
*/
-Compiler::fgWalkResult Compiler::lvaDecRefCntsCB(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::lvaDecRefCntsCB(GenTreePtr* pTree, fgWalkData* data)
{
data->compiler->lvaDecRefCnts(*pTree);
return WALK_CONTINUE;
}
-
// Decrement the ref counts for all locals contained in the tree and its children.
void Compiler::lvaRecursiveDecRefCounts(GenTreePtr tree)
{
assert(lvaLocalVarRefCounted);
- // We could just use the recursive walker for all cases but that is a
+ // We could just use the recursive walker for all cases but that is a
// fairly heavyweight thing to spin up when we're usually just handling a leaf.
if (tree->OperIsLeaf())
{
@@ -2214,7 +2218,7 @@ void Compiler::lvaRecursiveDecRefCounts(GenTreePtr tree)
}
else
{
- fgWalkTreePre(&tree, Compiler::lvaDecRefCntsCB, (void *)this, true);
+ fgWalkTreePre(&tree, Compiler::lvaDecRefCntsCB, (void*)this, true);
}
}
@@ -2223,7 +2227,7 @@ void Compiler::lvaRecursiveIncRefCounts(GenTreePtr tree)
{
assert(lvaLocalVarRefCounted);
- // We could just use the recursive walker for all cases but that is a
+ // We could just use the recursive walker for all cases but that is a
// fairly heavyweight thing to spin up when we're usually just handling a leaf.
if (tree->OperIsLeaf())
{
@@ -2234,7 +2238,7 @@ void Compiler::lvaRecursiveIncRefCounts(GenTreePtr tree)
}
else
{
- fgWalkTreePre(&tree, Compiler::lvaIncRefCntsCB, (void *)this, true);
+ fgWalkTreePre(&tree, Compiler::lvaIncRefCntsCB, (void*)this, true);
}
}
@@ -2243,10 +2247,10 @@ void Compiler::lvaRecursiveIncRefCounts(GenTreePtr tree)
* Helper passed to the tree walker to decrement the refCnts for
* all local variables in an expression
*/
-void Compiler::lvaDecRefCnts(GenTreePtr tree)
+void Compiler::lvaDecRefCnts(GenTreePtr tree)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
noway_assert(lvaRefCountingStarted || lvaLocalVarRefCounted);
@@ -2264,7 +2268,7 @@ void Compiler::lvaDecRefCnts(GenTreePtr tree)
/* Decrement the reference counts twice */
- varDsc->decRefCnts(compCurBB->getBBWeight(this), this);
+ varDsc->decRefCnts(compCurBB->getBBWeight(this), this);
varDsc->decRefCnts(compCurBB->getBBWeight(this), this);
}
}
@@ -2291,7 +2295,7 @@ void Compiler::lvaDecRefCnts(GenTreePtr tree)
*
* Callback used by the tree walker to call lvaIncRefCnts
*/
-Compiler::fgWalkResult Compiler::lvaIncRefCntsCB(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::lvaIncRefCntsCB(GenTreePtr* pTree, fgWalkData* data)
{
data->compiler->lvaIncRefCnts(*pTree);
return WALK_CONTINUE;
@@ -2302,10 +2306,10 @@ Compiler::fgWalkResult Compiler::lvaIncRefCntsCB(GenTreePtr *pTree, fgWalkD
* Helper passed to the tree walker to increment the refCnts for
* all local variables in an expression
*/
-void Compiler::lvaIncRefCnts(GenTreePtr tree)
+void Compiler::lvaIncRefCnts(GenTreePtr tree)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
noway_assert(lvaRefCountingStarted || lvaLocalVarRefCounted);
@@ -2323,7 +2327,7 @@ void Compiler::lvaIncRefCnts(GenTreePtr tree)
/* Increment the reference counts twice */
- varDsc->incRefCnts(compCurBB->getBBWeight(this), this);
+ varDsc->incRefCnts(compCurBB->getBBWeight(this), this);
varDsc->incRefCnts(compCurBB->getBBWeight(this), this);
}
}
@@ -2331,7 +2335,8 @@ void Compiler::lvaIncRefCnts(GenTreePtr tree)
{
/* This must be a local variable */
- noway_assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD || tree->gtOper == GT_STORE_LCL_VAR || tree->gtOper == GT_STORE_LCL_FLD);
+ noway_assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD || tree->gtOper == GT_STORE_LCL_VAR ||
+ tree->gtOper == GT_STORE_LCL_FLD);
/* Get the variable descriptor */
@@ -2346,7 +2351,6 @@ void Compiler::lvaIncRefCnts(GenTreePtr tree)
}
}
-
/*****************************************************************************
*
* Compare function passed to qsort() by Compiler::lclVars.lvaSortByRefCount().
@@ -2358,48 +2362,55 @@ void Compiler::lvaIncRefCnts(GenTreePtr tree)
*/
/* static */
-int __cdecl Compiler::RefCntCmp(const void *op1, const void *op2)
+int __cdecl Compiler::RefCntCmp(const void* op1, const void* op2)
{
- LclVarDsc * dsc1 = *(LclVarDsc * *)op1;
- LclVarDsc * dsc2 = *(LclVarDsc * *)op2;
+ LclVarDsc* dsc1 = *(LclVarDsc**)op1;
+ LclVarDsc* dsc2 = *(LclVarDsc**)op2;
/* Make sure we preference tracked variables over untracked variables */
- if (dsc1->lvTracked != dsc2->lvTracked)
+ if (dsc1->lvTracked != dsc2->lvTracked)
{
return (dsc2->lvTracked) ? +1 : -1;
}
-
unsigned weight1 = dsc1->lvRefCnt;
unsigned weight2 = dsc2->lvRefCnt;
#if !FEATURE_FP_REGALLOC
/* Force integer candidates to sort above float candidates */
- bool isFloat1 = isFloatRegType(dsc1->lvType);
- bool isFloat2 = isFloatRegType(dsc2->lvType);
+ bool isFloat1 = isFloatRegType(dsc1->lvType);
+ bool isFloat2 = isFloatRegType(dsc2->lvType);
- if (isFloat1 != isFloat2)
+ if (isFloat1 != isFloat2)
{
if (weight2 && isFloat1)
+ {
return +1;
+ }
if (weight1 && isFloat2)
+ {
return -1;
+ }
}
#endif
int diff = weight2 - weight1;
- if (diff != 0)
- return diff;
+ if (diff != 0)
+ {
+ return diff;
+ }
/* The unweighted ref counts were the same */
/* If the weighted ref counts are different then use their difference */
diff = dsc2->lvRefCntWtd - dsc1->lvRefCntWtd;
- if (diff != 0)
- return diff;
+ if (diff != 0)
+ {
+ return diff;
+ }
/* We have equal ref counts and weighted ref counts */
@@ -2413,45 +2424,67 @@ int __cdecl Compiler::RefCntCmp(const void *op1, const void *op2)
{
if (dsc1->lvPrefReg)
{
- if ( (dsc1->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc1->lvPrefReg))
+ if ((dsc1->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc1->lvPrefReg))
+ {
weight1 += 2 * BB_UNITY_WEIGHT;
+ }
else
+ {
weight1 += 1 * BB_UNITY_WEIGHT;
+ }
}
if (varTypeIsGC(dsc1->TypeGet()))
+ {
weight1 += BB_UNITY_WEIGHT / 2;
+ }
if (dsc1->lvRegister)
+ {
weight1 += BB_UNITY_WEIGHT / 2;
+ }
}
if (weight2)
{
if (dsc2->lvPrefReg)
{
- if ( (dsc2->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc2->lvPrefReg))
+ if ((dsc2->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc2->lvPrefReg))
+ {
weight2 += 2 * BB_UNITY_WEIGHT;
+ }
else
+ {
weight2 += 1 * BB_UNITY_WEIGHT;
+ }
}
if (varTypeIsGC(dsc2->TypeGet()))
+ {
weight1 += BB_UNITY_WEIGHT / 2;
+ }
if (dsc2->lvRegister)
+ {
weight2 += BB_UNITY_WEIGHT / 2;
+ }
}
diff = weight2 - weight1;
if (diff != 0)
+ {
return diff;
+ }
/* To achieve a Stable Sort we use the LclNum (by way of the pointer address) */
if (dsc1 < dsc2)
+ {
return -1;
+ }
if (dsc1 > dsc2)
+ {
return +1;
+ }
return 0;
}
@@ -2466,14 +2499,14 @@ int __cdecl Compiler::RefCntCmp(const void *op1, const void *op2)
*/
/* static */
-int __cdecl Compiler::WtdRefCntCmp(const void *op1, const void *op2)
+int __cdecl Compiler::WtdRefCntCmp(const void* op1, const void* op2)
{
- LclVarDsc * dsc1 = *(LclVarDsc * *)op1;
- LclVarDsc * dsc2 = *(LclVarDsc * *)op2;
+ LclVarDsc* dsc1 = *(LclVarDsc**)op1;
+ LclVarDsc* dsc2 = *(LclVarDsc**)op2;
/* Make sure we preference tracked variables over untracked variables */
- if (dsc1->lvTracked != dsc2->lvTracked)
+ if (dsc1->lvTracked != dsc2->lvTracked)
{
return (dsc2->lvTracked) ? +1 : -1;
}
@@ -2484,15 +2517,19 @@ int __cdecl Compiler::WtdRefCntCmp(const void *op1, const void *op2)
#if !FEATURE_FP_REGALLOC
/* Force integer candidates to sort above float candidates */
- bool isFloat1 = isFloatRegType(dsc1->lvType);
- bool isFloat2 = isFloatRegType(dsc2->lvType);
+ bool isFloat1 = isFloatRegType(dsc1->lvType);
+ bool isFloat2 = isFloatRegType(dsc2->lvType);
- if (isFloat1 != isFloat2)
+ if (isFloat1 != isFloat2)
{
if (weight2 && isFloat1)
+ {
return +1;
+ }
if (weight1 && isFloat2)
+ {
return -1;
+ }
}
#endif
@@ -2501,18 +2538,26 @@ int __cdecl Compiler::WtdRefCntCmp(const void *op1, const void *op2)
if (weight1 && dsc1->lvPrefReg)
{
- if ( (dsc1->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc1->lvPrefReg))
+ if ((dsc1->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc1->lvPrefReg))
+ {
weight1 += 2 * BB_UNITY_WEIGHT;
+ }
else
+ {
weight1 += 1 * BB_UNITY_WEIGHT;
+ }
}
if (weight2 && dsc2->lvPrefReg)
{
- if ( (dsc2->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc2->lvPrefReg))
+ if ((dsc2->lvPrefReg & ~RBM_BYTE_REG_FLAG) && genMaxOneBit((unsigned)dsc2->lvPrefReg))
+ {
weight2 += 2 * BB_UNITY_WEIGHT;
+ }
else
+ {
weight2 += 1 * BB_UNITY_WEIGHT;
+ }
}
if (weight2 > weight1)
@@ -2529,84 +2574,93 @@ int __cdecl Compiler::WtdRefCntCmp(const void *op1, const void *op2)
/* If the unweighted ref counts are different then use their difference */
int diff = (int)dsc2->lvRefCnt - (int)dsc1->lvRefCnt;
- if (diff != 0)
- return diff;
+ if (diff != 0)
+ {
+ return diff;
+ }
/* If one is a GC type and the other is not the GC type wins */
if (varTypeIsGC(dsc1->TypeGet()) != varTypeIsGC(dsc2->TypeGet()))
{
if (varTypeIsGC(dsc1->TypeGet()))
+ {
diff = -1;
+ }
else
+ {
diff = +1;
+ }
return diff;
}
-
+
/* If one was enregistered in the previous pass then it wins */
if (dsc1->lvRegister != dsc2->lvRegister)
{
if (dsc1->lvRegister)
+ {
diff = -1;
+ }
else
+ {
diff = +1;
+ }
return diff;
- }
+ }
/* We have a tie! */
/* To achieve a Stable Sort we use the LclNum (by way of the pointer address) */
if (dsc1 < dsc2)
+ {
return -1;
+ }
if (dsc1 > dsc2)
+ {
return +1;
+ }
return 0;
}
-
/*****************************************************************************
*
* Sort the local variable table by refcount and assign tracking indices.
*/
-void Compiler::lvaSortOnly()
+void Compiler::lvaSortOnly()
{
/* Now sort the variable table by ref-count */
- qsort(lvaRefSorted, lvaCount, sizeof(*lvaRefSorted),
- (compCodeOpt() == SMALL_CODE) ? RefCntCmp
- : WtdRefCntCmp);
+ qsort(lvaRefSorted, lvaCount, sizeof(*lvaRefSorted), (compCodeOpt() == SMALL_CODE) ? RefCntCmp : WtdRefCntCmp);
lvaSortAgain = false;
lvaDumpRefCounts();
-
}
-void
-Compiler::lvaDumpRefCounts()
+void Compiler::lvaDumpRefCounts()
{
-#ifdef DEBUG
+#ifdef DEBUG
- if (verbose && lvaCount)
+ if (verbose && lvaCount)
{
printf("refCnt table for '%s':\n", info.compMethodName);
for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++)
{
unsigned refCnt = lvaRefSorted[lclNum]->lvRefCnt;
- if (refCnt == 0)
+ if (refCnt == 0)
+ {
break;
+ }
unsigned refCntWtd = lvaRefSorted[lclNum]->lvRefCntWtd;
printf(" ");
gtDispLclVar((unsigned)(lvaRefSorted[lclNum] - lvaTable));
- printf(" [%6s]: refCnt = %4u, refCntWtd = %6s",
- varTypeName(lvaRefSorted[lclNum]->TypeGet()),
- refCnt,
+ printf(" [%6s]: refCnt = %4u, refCntWtd = %6s", varTypeName(lvaRefSorted[lclNum]->TypeGet()), refCnt,
refCntWtd2str(refCntWtd));
regMaskSmall pref = lvaRefSorted[lclNum]->lvPrefReg;
@@ -2629,18 +2683,20 @@ Compiler::lvaDumpRefCounts()
* Sort the local variable table by refcount and assign tracking indices.
*/
-void Compiler::lvaSortByRefCount()
+void Compiler::lvaSortByRefCount()
{
- lvaTrackedCount = 0;
+ lvaTrackedCount = 0;
lvaTrackedCountInSizeTUnits = 0;
if (lvaCount == 0)
+ {
return;
+ }
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- LclVarDsc * * refTab;
+ LclVarDsc** refTab;
/* We'll sort the variables by ref count - allocate the sorted table */
@@ -2648,9 +2704,7 @@ void Compiler::lvaSortByRefCount()
/* Fill in the table used for sorting */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Append this variable to the table for sorting */
@@ -2659,7 +2713,7 @@ void Compiler::lvaSortByRefCount()
/* If we have JMP, all arguments must have a location
* even if we don't use them inside the method */
- if (compJmpOpUsed && varDsc->lvIsParam)
+ if (compJmpOpUsed && varDsc->lvIsParam)
{
/* ...except when we have varargs and the argument is
passed on the stack. In that case, it's important
@@ -2679,7 +2733,7 @@ void Compiler::lvaSortByRefCount()
varDsc->lvTracked = 1;
/* If the ref count is zero */
- if (varDsc->lvRefCnt == 0)
+ if (varDsc->lvRefCnt == 0)
{
/* Zero ref count, make this untracked */
varDsc->lvTracked = 0;
@@ -2699,10 +2753,11 @@ void Compiler::lvaSortByRefCount()
// Pinned variables may not be tracked (a condition of the GCInfo representation)
// or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning")
// references when using the general GC encoding.
- if (varDsc->lvAddrExposed)
+ if (varDsc->lvAddrExposed)
{
- varDsc->lvTracked = 0;
- assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when we set lvAddrExposed.
+ varDsc->lvTracked = 0;
+ assert(varDsc->lvType != TYP_STRUCT ||
+ varDsc->lvDoNotEnregister); // For structs, should have set this when we set lvAddrExposed.
}
else if (varTypeIsStruct(varDsc))
{
@@ -2718,18 +2773,17 @@ void Compiler::lvaSortByRefCount()
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_IsStruct));
}
}
- else if (varDsc->lvIsStructField &&
- (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT))
+ else if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT))
{
// SSA must exclude struct fields that are not independently promoted
- // as dependent fields could be assigned using a CopyBlock
+ // as dependent fields could be assigned using a CopyBlock
// resulting in a single node causing multiple SSA definitions
// which isn't currently supported by SSA
//
- // TODO-CQ: Consider using lvLclBlockOpAddr and only marking these LclVars
+ // TODO-CQ: Consider using lvLclBlockOpAddr and only marking these LclVars
// untracked when a blockOp is used to assign the struct.
//
- varDsc->lvTracked = 0; // so, don't mark as tracked
+ varDsc->lvTracked = 0; // so, don't mark as tracked
}
else if (varDsc->lvPinned)
{
@@ -2742,7 +2796,7 @@ void Compiler::lvaSortByRefCount()
// Are we not optimizing and we have exception handlers?
// if so mark all args and locals "do not enregister".
//
- if (opts.MinOpts() && compHndBBtabCount > 0)
+ if (opts.MinOpts() && compHndBBtabCount > 0)
{
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LiveInOutOfHandler));
continue;
@@ -2753,31 +2807,31 @@ void Compiler::lvaSortByRefCount()
switch (type)
{
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#endif
- case TYP_INT:
- case TYP_LONG:
- case TYP_REF:
- case TYP_BYREF:
+ case TYP_INT:
+ case TYP_LONG:
+ case TYP_REF:
+ case TYP_BYREF:
#ifdef FEATURE_SIMD
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
#endif // FEATURE_SIMD
- case TYP_STRUCT:
- break;
+ case TYP_STRUCT:
+ break;
- case TYP_UNDEF:
- case TYP_UNKNOWN:
- noway_assert(!"lvType not set correctly");
- varDsc->lvType = TYP_INT;
+ case TYP_UNDEF:
+ case TYP_UNKNOWN:
+ noway_assert(!"lvType not set correctly");
+ varDsc->lvType = TYP_INT;
- __fallthrough;
+ __fallthrough;
- default:
- varDsc->lvTracked = 0;
+ default:
+ varDsc->lvTracked = 0;
}
}
@@ -2787,7 +2841,7 @@ void Compiler::lvaSortByRefCount()
/* Decide which variables will be worth tracking */
- if (lvaCount > lclMAX_TRACKED)
+ if (lvaCount > lclMAX_TRACKED)
{
/* Mark all variables past the first 'lclMAX_TRACKED' as untracked */
@@ -2804,25 +2858,25 @@ void Compiler::lvaSortByRefCount()
/* Assign indices to all the variables we've decided to track */
- for (lclNum = 0; lclNum < min(lvaCount,lclMAX_TRACKED); lclNum++)
+ for (lclNum = 0; lclNum < min(lvaCount, lclMAX_TRACKED); lclNum++)
{
varDsc = lvaRefSorted[lclNum];
- if (varDsc->lvTracked)
+ if (varDsc->lvTracked)
{
noway_assert(varDsc->lvRefCnt > 0);
/* This variable will be tracked - assign it an index */
- lvaTrackedToVarNum[lvaTrackedCount] = (unsigned)(varDsc - lvaTable); // The type of varDsc and lvaTable
- // is LclVarDsc. Subtraction will give us
- // the index.
+ lvaTrackedToVarNum[lvaTrackedCount] = (unsigned)(varDsc - lvaTable); // The type of varDsc and lvaTable
+ // is LclVarDsc. Subtraction will give us
+ // the index.
varDsc->lvVarIndex = lvaTrackedCount++;
}
}
// We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits.
lvaCurEpoch++;
- lvaTrackedCountInSizeTUnits = unsigned(roundUp(lvaTrackedCount, sizeof(size_t)*8))/unsigned(sizeof(size_t)*8);
+ lvaTrackedCountInSizeTUnits = unsigned(roundUp(lvaTrackedCount, sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this));
@@ -2832,14 +2886,14 @@ void Compiler::lvaSortByRefCount()
#if ASSERTION_PROP
/*****************************************************************************
*
- * This is called by lvaMarkLclRefs to disqualify a variable from being
- * considered by optAddCopies()
+ * This is called by lvaMarkLclRefs to disqualify a variable from being
+ * considered by optAddCopies()
*/
-void LclVarDsc::lvaDisqualifyVar()
+void LclVarDsc::lvaDisqualifyVar()
{
- this->lvDisqualify = true;
- this->lvSingleDef = false;
- this->lvDefStmt = NULL;
+ this->lvDisqualify = true;
+ this->lvSingleDef = false;
+ this->lvDefStmt = nullptr;
}
#endif // ASSERTION_PROP
@@ -2847,7 +2901,7 @@ void LclVarDsc::lvaDisqualifyVar()
/**********************************************************************************
* Get type of a variable when passed as an argument.
*/
-var_types LclVarDsc::lvaArgType()
+var_types LclVarDsc::lvaArgType()
{
var_types type = TypeGet();
@@ -2856,55 +2910,59 @@ var_types LclVarDsc::lvaArgType()
{
switch (lvExactSize)
{
- case 1: type = TYP_BYTE; break;
- case 2: type = TYP_SHORT; break;
- case 4: type = TYP_INT; break;
- case 8:
- switch (*lvGcLayout)
- {
- case TYPE_GC_NONE:
- type = TYP_I_IMPL;
+ case 1:
+ type = TYP_BYTE;
break;
-
- case TYPE_GC_REF:
- type = TYP_REF;
+ case 2:
+ type = TYP_SHORT;
break;
-
- case TYPE_GC_BYREF:
- type = TYP_BYREF;
+ case 4:
+ type = TYP_INT;
break;
+ case 8:
+ switch (*lvGcLayout)
+ {
+ case TYPE_GC_NONE:
+ type = TYP_I_IMPL;
+ break;
- default:
- unreached();
- }
- break;
+ case TYPE_GC_REF:
+ type = TYP_REF;
+ break;
- default:
- type = TYP_BYREF;
- break;
+ case TYPE_GC_BYREF:
+ type = TYP_BYREF;
+ break;
+
+ default:
+ unreached();
+ }
+ break;
+ default:
+ type = TYP_BYREF;
+ break;
}
}
#elif defined(_TARGET_X86_)
- // Nothing to do; use the type as is.
+// Nothing to do; use the type as is.
#else
NYI("lvaArgType");
#endif //_TARGET_AMD64_
return type;
}
-#endif // !LEGACY_BACKEND
-
+#endif // !LEGACY_BACKEND
/*****************************************************************************
*
* This is called by lvaMarkLclRefsCallback() to do variable ref marking
*/
-void Compiler::lvaMarkLclRefs(GenTreePtr tree)
+void Compiler::lvaMarkLclRefs(GenTreePtr tree)
{
/* Is this a call to unmanaged code ? */
- if (tree->gtOper == GT_CALL && tree->gtFlags & GTF_CALL_UNMANAGED)
+ if (tree->gtOper == GT_CALL && tree->gtFlags & GTF_CALL_UNMANAGED)
{
assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
@@ -2914,42 +2972,41 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
unsigned lclNum = info.compLvFrameListRoot;
noway_assert(lclNum <= lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
/* Increment the ref counts twice */
varDsc->incRefCnts(lvaMarkRefsWeight, this);
varDsc->incRefCnts(lvaMarkRefsWeight, this);
}
}
-
+
/* Is this an assigment? */
if (tree->OperKind() & GTK_ASGOP)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
-
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
/* Set target register for RHS local if assignment is of a "small" type */
if (varTypeIsByte(tree->gtType))
{
- unsigned lclNum;
- LclVarDsc * varDsc = NULL;
+ unsigned lclNum;
+ LclVarDsc* varDsc = nullptr;
/* GT_CHS is special it doesn't have a valid op2 */
- if (tree->gtOper == GT_CHS)
+ if (tree->gtOper == GT_CHS)
{
- if (op1->gtOper == GT_LCL_VAR)
- {
+ if (op1->gtOper == GT_LCL_VAR)
+ {
lclNum = op1->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
varDsc = &lvaTable[lclNum];
}
}
- else
+ else
{
- if (op2->gtOper == GT_LCL_VAR)
+ if (op2->gtOper == GT_LCL_VAR)
{
lclNum = op2->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
@@ -2966,41 +3023,49 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
/* Is this an assignment to a local variable? */
- if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL)
+ if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL)
{
/* Only simple assignments allowed for booleans */
- if (tree->gtOper != GT_ASG)
+ if (tree->gtOper != GT_ASG)
+ {
goto NOT_BOOL;
+ }
/* Is the RHS clearly a boolean value? */
switch (op2->gtOper)
{
- unsigned lclNum;
+ unsigned lclNum;
- case GT_CNS_INT:
+ case GT_CNS_INT:
- if (op2->gtIntCon.gtIconVal == 0)
- break;
- if (op2->gtIntCon.gtIconVal == 1)
- break;
+ if (op2->gtIntCon.gtIconVal == 0)
+ {
+ break;
+ }
+ if (op2->gtIntCon.gtIconVal == 1)
+ {
+ break;
+ }
- // Not 0 or 1, fall through ....
- __fallthrough;
+ // Not 0 or 1, fall through ....
+ __fallthrough;
- default:
+ default:
- if (op2->OperIsCompare())
- break;
+ if (op2->OperIsCompare())
+ {
+ break;
+ }
- NOT_BOOL:
+ NOT_BOOL:
- lclNum = op1->gtLclVarCommon.gtLclNum;
- noway_assert(lclNum < lvaCount);
+ lclNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
- lvaTable[lclNum].lvIsBoolean = false;
- break;
+ lvaTable[lclNum].lvIsBoolean = false;
+ break;
}
}
#endif
@@ -3010,23 +3075,23 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
/* Special case: assignment node */
- if (tree->gtOper == GT_ASG)
+ if (tree->gtOper == GT_ASG)
{
- if (tree->gtType == TYP_INT)
+ if (tree->gtType == TYP_INT)
{
- unsigned lclNum1;
- LclVarDsc * varDsc1;
+ unsigned lclNum1;
+ LclVarDsc* varDsc1;
- GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
- if (op1->gtOper != GT_LCL_VAR)
+ if (op1->gtOper != GT_LCL_VAR)
return;
lclNum1 = op1->gtLclVarCommon.gtLclNum;
noway_assert(lclNum1 < lvaCount);
varDsc1 = lvaTable + lclNum1;
- if (varDsc1->lvAssignOne)
+ if (varDsc1->lvAssignOne)
varDsc1->lvAssignTwo = true;
else
varDsc1->lvAssignOne = true;
@@ -3040,13 +3105,13 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
#ifdef _TARGET_XARCH_
/* Special case: integer shift node by a variable amount */
- if (tree->OperIsShiftOrRotate())
+ if (tree->OperIsShiftOrRotate())
{
- if (tree->gtType == TYP_INT)
+ if (tree->gtType == TYP_INT)
{
- GenTreePtr op2 = tree->gtOp.gtOp2;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
- if (op2->gtOper == GT_LCL_VAR)
+ if (op2->gtOper == GT_LCL_VAR)
{
unsigned lclNum = op2->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
@@ -3058,8 +3123,10 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
}
#endif
- if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD))
+ if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD))
+ {
return;
+ }
/* This must be a local variable reference */
@@ -3067,19 +3134,21 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
/* Increment the reference counts */
varDsc->incRefCnts(lvaMarkRefsWeight, this);
-
+
if (lvaVarAddrExposed(lclNum))
+ {
varDsc->lvIsBoolean = false;
+ }
- if (tree->gtOper == GT_LCL_FLD)
+ if (tree->gtOper == GT_LCL_FLD)
{
#if ASSERTION_PROP
- // variables that have uses inside a GT_LCL_FLD
+ // variables that have uses inside a GT_LCL_FLD
// cause problems, so we will disqualify them here
varDsc->lvaDisqualifyVar();
#endif // ASSERTION_PROP
@@ -3088,11 +3157,10 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
#if ASSERTION_PROP
/* Exclude the normal entry block */
- if (fgDomsComputed &&
- (lvaMarkRefsCurBlock->bbNum != 1) &&
- lvaMarkRefsCurBlock->bbIDom != NULL)
+ if (fgDomsComputed && (lvaMarkRefsCurBlock->bbNum != 1) && lvaMarkRefsCurBlock->bbIDom != nullptr)
{
- // If any entry block except the normal entry block dominates the block, then mark the local with the lvVolatileHint flag.
+ // If any entry block except the normal entry block dominates the block, then mark the local with the
+ // lvVolatileHint flag.
if (BlockSetOps::MayBeUninit(lvaMarkRefsCurBlock->bbDoms))
{
@@ -3109,11 +3177,11 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
/* Record if the variable has a single def or not */
- if (!varDsc->lvDisqualify) // If this variable is already disqualified we can skip this
+ if (!varDsc->lvDisqualify) // If this variable is already disqualified we can skip this
{
- if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable
+ if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable
{
- /*
+ /*
If we have one of these cases:
1. We have already seen a definition (i.e lvSingleDef is true)
2. or info.CompInitMem is true (thus this would be the second definition)
@@ -3123,20 +3191,18 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
Note that all parameters start out with lvSingleDef set to true
*/
- if ((varDsc->lvSingleDef == true) ||
- (info.compInitMem == true) ||
- (tree->gtFlags & GTF_COLON_COND) ||
- (tree->gtFlags & GTF_VAR_USEASG) )
+ if ((varDsc->lvSingleDef == true) || (info.compInitMem == true) || (tree->gtFlags & GTF_COLON_COND) ||
+ (tree->gtFlags & GTF_VAR_USEASG))
{
varDsc->lvaDisqualifyVar();
}
- else
+ else
{
- varDsc->lvSingleDef = true;
- varDsc->lvDefStmt = lvaMarkRefsCurStmt;
+ varDsc->lvSingleDef = true;
+ varDsc->lvDefStmt = lvaMarkRefsCurStmt;
}
}
- else // otherwise this is a ref of our variable
+ else // otherwise this is a ref of our variable
{
if (BlockSetOps::MayBeUninit(varDsc->lvRefBlks))
{
@@ -3155,14 +3221,11 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
/* Variables must be used as the same type throughout the method */
- noway_assert(tiVerificationNeeded ||
- varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN ||
- allowStructs ||
- genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) ||
- (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) ||
- (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) ||
- (tree->gtFlags & GTF_VAR_CAST) ||
- varTypeIsFloating(varDsc->TypeGet()) && varTypeIsFloating(tree->gtType));
+ noway_assert(tiVerificationNeeded || varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs ||
+ genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) ||
+ (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) ||
+ (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) || (tree->gtFlags & GTF_VAR_CAST) ||
+ varTypeIsFloating(varDsc->TypeGet()) && varTypeIsFloating(tree->gtType));
/* Remember the type of the reference */
@@ -3173,7 +3236,7 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
}
#ifdef DEBUG
- if (tree->gtFlags & GTF_VAR_CAST)
+ if (tree->gtFlags & GTF_VAR_CAST)
{
// it should never be bigger than the variable slot
@@ -3184,7 +3247,9 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
unsigned treeSize = genTypeSize(tree->TypeGet());
unsigned varSize = genTypeSize(varDsc->TypeGet());
if (varDsc->TypeGet() == TYP_STRUCT)
+ {
varSize = varDsc->lvSize();
+ }
assert(treeSize <= varSize);
}
@@ -3192,14 +3257,13 @@ void Compiler::lvaMarkLclRefs(GenTreePtr tree)
#endif
}
-
/*****************************************************************************
*
* Helper passed to Compiler::fgWalkTreePre() to do variable ref marking.
*/
/* static */
-Compiler::fgWalkResult Compiler::lvaMarkLclRefsCallback(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::lvaMarkLclRefsCallback(GenTreePtr* pTree, fgWalkData* data)
{
data->compiler->lvaMarkLclRefs(*pTree);
@@ -3211,36 +3275,37 @@ Compiler::fgWalkResult Compiler::lvaMarkLclRefsCallback(GenTreePtr *pTree, fgWa
* Update the local variable reference counts for one basic block
*/
-void Compiler::lvaMarkLocalVars(BasicBlock * block)
+void Compiler::lvaMarkLocalVars(BasicBlock* block)
{
#if ASSERTION_PROP
lvaMarkRefsCurBlock = block;
#endif
- lvaMarkRefsWeight = block->getBBWeight(this);
+ lvaMarkRefsWeight = block->getBBWeight(this);
#ifdef DEBUG
if (verbose)
- printf("\n*** marking local variables in block BB%02u (weight=%s)\n",
- block->bbNum, refCntWtd2str(lvaMarkRefsWeight));
+ {
+ printf("\n*** marking local variables in block BB%02u (weight=%s)\n", block->bbNum,
+ refCntWtd2str(lvaMarkRefsWeight));
+ }
#endif
for (GenTreePtr tree = block->FirstNonPhiDef(); tree; tree = tree->gtNext)
{
noway_assert(tree->gtOper == GT_STMT);
-
+
#if ASSERTION_PROP
lvaMarkRefsCurStmt = tree;
#endif
#ifdef DEBUG
if (verbose)
+ {
gtDispTree(tree);
+ }
#endif
- fgWalkTreePre(&tree->gtStmt.gtStmtExpr,
- Compiler::lvaMarkLclRefsCallback,
- (void *) this,
- false);
+ fgWalkTreePre(&tree->gtStmt.gtStmtExpr, Compiler::lvaMarkLclRefsCallback, (void*)this, false);
}
}
@@ -3250,12 +3315,14 @@ void Compiler::lvaMarkLocalVars(BasicBlock * block)
* counts.
*/
-void Compiler::lvaMarkLocalVars()
+void Compiler::lvaMarkLocalVars()
{
#ifdef DEBUG
if (verbose)
+ {
printf("\n*************** In lvaMarkLocalVars()");
+ }
#endif
/* If there is a call to an unmanaged target, we already grabbed a
@@ -3267,15 +3334,14 @@ void Compiler::lvaMarkLocalVars()
assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
if (!opts.ShouldUsePInvokeHelpers())
{
- noway_assert(info.compLvFrameListRoot >= info.compLocalsCount &&
- info.compLvFrameListRoot < lvaCount);
+ noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount);
- lvaTable[info.compLvFrameListRoot].lvType = TYP_I_IMPL;
+ lvaTable[info.compLvFrameListRoot].lvType = TYP_I_IMPL;
/* Set the refCnt, it is used in the prolog and return block(s) */
- lvaTable[info.compLvFrameListRoot].lvRefCnt = 2;
- lvaTable[info.compLvFrameListRoot].lvRefCntWtd = 2 * BB_UNITY_WEIGHT;
+ lvaTable[info.compLvFrameListRoot].lvRefCnt = 2;
+ lvaTable[info.compLvFrameListRoot].lvRefCntWtd = 2 * BB_UNITY_WEIGHT;
}
}
@@ -3284,7 +3350,7 @@ void Compiler::lvaMarkLocalVars()
#if !FEATURE_EH_FUNCLETS
// Grab space for exception handling
-
+
if (ehNeedsShadowSPslots())
{
// The first slot is reserved for ICodeManager::FixContext(ppEndRegion)
@@ -3292,7 +3358,7 @@ void Compiler::lvaMarkLocalVars()
unsigned slotsNeeded = 1;
unsigned handlerNestingLevel = ehMaxHndNestingCount;
-
+
if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL))
handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL;
@@ -3303,9 +3369,9 @@ void Compiler::lvaMarkLocalVars()
// For zero-termination of the shadow-Stack-pointer chain
slotsNeeded++;
- lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar"));
- LclVarDsc * shadowSPslotsVar = &lvaTable[lvaShadowSPslotsVar];
- shadowSPslotsVar->lvType = TYP_BLK;
+ lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar"));
+ LclVarDsc* shadowSPslotsVar = &lvaTable[lvaShadowSPslotsVar];
+ shadowSPslotsVar->lvType = TYP_BLK;
shadowSPslotsVar->lvExactSize = (slotsNeeded * TARGET_POINTER_SIZE);
}
@@ -3314,20 +3380,20 @@ void Compiler::lvaMarkLocalVars()
#if FEATURE_EH_FUNCLETS
if (ehNeedsPSPSym())
{
- lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym"));
- LclVarDsc * lclPSPSym = &lvaTable[lvaPSPSym];
- lclPSPSym->lvType = TYP_I_IMPL;
+ lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym"));
+ LclVarDsc* lclPSPSym = &lvaTable[lvaPSPSym];
+ lclPSPSym->lvType = TYP_I_IMPL;
}
#endif // FEATURE_EH_FUNCLETS
if (compLocallocUsed)
{
- lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar"));
- LclVarDsc * locAllocSPvar = &lvaTable[lvaLocAllocSPvar];
- locAllocSPvar->lvType = TYP_I_IMPL;
+ lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar"));
+ LclVarDsc* locAllocSPvar = &lvaTable[lvaLocAllocSPvar];
+ locAllocSPvar->lvType = TYP_I_IMPL;
}
-
- BasicBlock * block;
+
+ BasicBlock* block;
#if defined(DEBUGGING_SUPPORT) || defined(DEBUG)
@@ -3344,12 +3410,10 @@ void Compiler::lvaMarkLocalVars()
if (opts.compScopeInfo && (info.compVarScopesCount > 0))
#endif
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
varDsc->lvSlotNum = lclNum;
}
@@ -3358,11 +3422,9 @@ void Compiler::lvaMarkLocalVars()
#endif // defined(DEBUGGING_SUPPORT) || defined(DEBUG)
/* Mark all local variable references */
-
+
lvaRefCountingStarted = true;
- for (block = fgFirstBB;
- block;
- block = block->bbNext)
+ for (block = fgFirstBB; block; block = block->bbNext)
{
lvaMarkLocalVars(block);
}
@@ -3372,26 +3434,26 @@ void Compiler::lvaMarkLocalVars()
* This counts as an extra reference with a weight of 2
*/
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
if (lclNum >= info.compArgsCount)
- break; // early exit for loop
+ {
+ break; // early exit for loop
+ }
if ((varDsc->lvIsRegArg) && (varDsc->lvRefCnt > 0))
{
// Fix 388376 ARM JitStress WP7
- varDsc->incRefCnts(BB_UNITY_WEIGHT, this);
- varDsc->incRefCnts(BB_UNITY_WEIGHT, this);
+ varDsc->incRefCnts(BB_UNITY_WEIGHT, this);
+ varDsc->incRefCnts(BB_UNITY_WEIGHT, this);
}
}
#if ASSERTION_PROP
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (!opts.MinOpts() && !opts.compDbgCode)
{
// Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above.
optAddCopies();
@@ -3399,16 +3461,20 @@ void Compiler::lvaMarkLocalVars()
#endif
if (lvaKeepAliveAndReportThis() && lvaTable[0].lvRefCnt == 0)
+ {
lvaTable[0].lvRefCnt = 1;
- // This isn't strictly needed as we will make a copy of the param-type-arg
- // in the prolog. However, this ensures that the LclVarDsc corresponding to
- // info.compTypeCtxtArg is valid.
+ // This isn't strictly needed as we will make a copy of the param-type-arg
+ // in the prolog. However, this ensures that the LclVarDsc corresponding to
+ // info.compTypeCtxtArg is valid.
+ }
else if (lvaReportParamTypeArg() && lvaTable[info.compTypeCtxtArg].lvRefCnt == 0)
+ {
lvaTable[info.compTypeCtxtArg].lvRefCnt = 1;
+ }
lvaLocalVarRefCounted = true;
lvaRefCountingStarted = false;
-
+
lvaSortByRefCount();
}
@@ -3426,8 +3492,8 @@ void Compiler::lvaAllocOutgoingArgSpace()
/* Set the refCnts */
- lvaTable[lvaOutgoingArgSpaceVar].lvRefCnt = 1;
- lvaTable[lvaOutgoingArgSpaceVar].lvRefCntWtd = BB_UNITY_WEIGHT;
+ lvaTable[lvaOutgoingArgSpaceVar].lvRefCnt = 1;
+ lvaTable[lvaOutgoingArgSpaceVar].lvRefCntWtd = BB_UNITY_WEIGHT;
if (lvaOutgoingArgSpaceSize == 0)
{
@@ -3438,23 +3504,24 @@ void Compiler::lvaAllocOutgoingArgSpace()
// 2. we are generating profiling Enter/Leave/TailCall hooks. This will ensure
// that even methods without any calls will have outgoing arg area space allocated.
//
- // An example for these two cases is Windows Amd64, where the ABI requires to have 4 slots for
+ // An example for these two cases is Windows Amd64, where the ABI requires to have 4 slots for
// the outgoing arg space if the method makes any calls.
lvaOutgoingArgSpaceSize = MIN_ARG_AREA_FOR_CALL;
}
}
}
- noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount &&
- lvaOutgoingArgSpaceVar < lvaCount);
+ noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount && lvaOutgoingArgSpaceVar < lvaCount);
#endif // FEATURE_FIXED_OUT_ARGS
}
-inline void Compiler::lvaIncrementFrameSize(unsigned size)
+inline void Compiler::lvaIncrementFrameSize(unsigned size)
{
if (size > MAX_FrameSize || compLclFrameSize + size > MAX_FrameSize)
+ {
BADCODE("Frame size overflow");
+ }
compLclFrameSize += size;
}
@@ -3510,10 +3577,9 @@ unsigned Compiler::lvaGetMaxSpillTempSize()
if (lvaDoneFrameLayout >= REGALLOC_FRAME_LAYOUT)
{
unsigned maxTmpSize = sizeof(double) + sizeof(int);
-
- maxTmpSize += (tmpDoubleSpillMax * sizeof(double)) +
- (tmpIntSpillMax * sizeof(int));
-
+
+ maxTmpSize += (tmpDoubleSpillMax * sizeof(double)) + (tmpIntSpillMax * sizeof(int));
+
result = maxTmpSize;
}
else
@@ -3882,27 +3948,37 @@ unsigned Compiler::lvaGetMaxSpillTempSize()
*/
// clang-format on
-void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState)
+void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState)
{
noway_assert(lvaDoneFrameLayout < curState);
lvaDoneFrameLayout = curState;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In lvaAssignFrameOffsets");
if (curState == INITIAL_FRAME_LAYOUT)
+ {
printf("(INITIAL_FRAME_LAYOUT)");
+ }
else if (curState == PRE_REGALLOC_FRAME_LAYOUT)
+ {
printf("(PRE_REGALLOC_FRAME_LAYOUT)");
+ }
else if (curState == REGALLOC_FRAME_LAYOUT)
+ {
printf("(REGALLOC_FRAME_LAYOUT)");
+ }
else if (curState == TENTATIVE_FRAME_LAYOUT)
+ {
printf("(TENTATIVE_FRAME_LAYOUT)");
+ }
else if (curState == FINAL_FRAME_LAYOUT)
+ {
printf("(FINAL_FRAME_LAYOUT)");
+ }
else
{
printf("(UNKNOWN)");
@@ -3925,7 +4001,6 @@ void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState)
lvaAssignVirtualFrameOffsetsToArgs();
-
/*-------------------------------------------------------------------------
*
* Now compute stack offsets for any variables that don't live in registers
@@ -3946,10 +4021,9 @@ void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState)
lvaFixVirtualFrameOffsets();
- // Modify the stack offset for fields of promoted structs.
+ // Modify the stack offset for fields of promoted structs.
lvaAssignFrameOffsetsToPromotedStructs();
-
/*-------------------------------------------------------------------------
*
* Finalize
@@ -3976,7 +4050,7 @@ void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState)
*/
void Compiler::lvaFixVirtualFrameOffsets()
{
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
#if FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_)
if (ehNeedsPSPSym())
@@ -3986,8 +4060,8 @@ void Compiler::lvaFixVirtualFrameOffsets()
// the PSPSym and the outgoing argument space.
assert(lvaPSPSym != BAD_VAR_NUM);
varDsc = &lvaTable[lvaPSPSym];
- assert(varDsc->lvFramePointerBased); // We always access it RBP-relative.
- assert(!varDsc->lvMustInit); // It is never "must init".
+ assert(varDsc->lvFramePointerBased); // We always access it RBP-relative.
+ assert(!varDsc->lvMustInit); // It is never "must init".
varDsc->lvStkOffs = codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar);
}
#endif
@@ -3996,13 +4070,15 @@ void Compiler::lvaFixVirtualFrameOffsets()
int delta = 0;
#ifdef _TARGET_XARCH_
- delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64
+ delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64
if (codeGen->doubleAlignOrFramePointerUsed())
- delta += REGSIZE_BYTES; // pushed EBP (frame pointer)
+ {
+ delta += REGSIZE_BYTES; // pushed EBP (frame pointer)
+ }
#endif
- if (!codeGen->isFramePointerUsed())
+ if (!codeGen->isFramePointerUsed())
{
// pushed registers, return address, and padding
delta += codeGen->genTotalFrameSize();
@@ -4014,17 +4090,15 @@ void Compiler::lvaFixVirtualFrameOffsets()
delta += 2 * REGSIZE_BYTES;
}
#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
- else
+ else
{
- // FP is used.
+ // FP is used.
delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta();
}
#endif //_TARGET_AMD64_
unsigned lclNum;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
bool doAssignStkOffs = true;
@@ -4036,34 +4110,36 @@ void Compiler::lvaFixVirtualFrameOffsets()
//
if (varDsc->lvIsStructField && !varDsc->lvIsParam)
{
- LclVarDsc * parentvarDsc = &lvaTable[varDsc->lvParentLcl];
+ LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl];
lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc);
-
+
if (promotionType == PROMOTION_TYPE_DEPENDENT)
{
- doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs()
+ doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs()
}
}
if (!varDsc->lvOnFrame)
{
- if (!varDsc->lvIsParam
+ if (!varDsc->lvIsParam
#if !defined(_TARGET_AMD64_)
|| (varDsc->lvIsRegArg
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets for prespilled arguments
+ && compIsProfilerHookNeeded() &&
+ !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets
+ // for prespilled arguments
#endif
- )
+ )
#endif // !defined(_TARGET_AMD64_)
- )
+ )
{
- doAssignStkOffs = false; // Not on frame or an incomming stack arg
+ doAssignStkOffs = false; // Not on frame or an incomming stack arg
}
}
-
+
if (doAssignStkOffs)
{
- varDsc->lvStkOffs += delta;
+ varDsc->lvStkOffs += delta;
#if DOUBLE_ALIGN
if (genDoubleAlign() && !codeGen->isFramePointerUsed())
@@ -4075,21 +4151,20 @@ void Compiler::lvaFixVirtualFrameOffsets()
// We need to re-adjust the offsets of the parameters so they are EBP
// relative rather than stack/frame pointer relative
- varDsc->lvStkOffs += (2 * sizeof(void *)); // return address and pushed EBP
+ varDsc->lvStkOffs += (2 * sizeof(void*)); // return address and pushed EBP
noway_assert(varDsc->lvStkOffs >= FIRST_ARG_STACK_OFFS);
}
}
#endif
// On System V environments the stkOffs could be 0 for params passed in registers.
- assert(codeGen->isFramePointerUsed() || varDsc->lvStkOffs >= 0); // Only EBP relative references can have negative offsets
+ assert(codeGen->isFramePointerUsed() ||
+ varDsc->lvStkOffs >= 0); // Only EBP relative references can have negative offsets
}
}
assert(tmpAllFree());
- for (TempDsc* temp = tmpListBeg();
- temp != nullptr;
- temp = tmpListNxt(temp))
+ for (TempDsc* temp = tmpListBeg(); temp != nullptr; temp = tmpListNxt(temp))
{
temp->tdAdjustTempOffs(delta);
}
@@ -4098,16 +4173,15 @@ void Compiler::lvaFixVirtualFrameOffsets()
#if FEATURE_FIXED_OUT_ARGS
- if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM)
+ if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM)
{
- varDsc = &lvaTable[lvaOutgoingArgSpaceVar];
- varDsc->lvStkOffs = 0;
+ varDsc = &lvaTable[lvaOutgoingArgSpaceVar];
+ varDsc->lvStkOffs = 0;
varDsc->lvFramePointerBased = false;
- varDsc->lvMustInit = false;
+ varDsc->lvMustInit = false;
}
#endif // FEATURE_FIXED_OUT_ARGS
-
}
#ifdef _TARGET_ARM_
@@ -4126,7 +4200,9 @@ bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask)
void Compiler::lvaUpdateArgsWithInitialReg()
{
if (!compLSRADone)
+ {
return;
+ }
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
@@ -4134,10 +4210,10 @@ void Compiler::lvaUpdateArgsWithInitialReg()
if (varDsc->lvPromotedStruct())
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
- varDsc = lvaTable + fieldVarNum;
+ varDsc = lvaTable + fieldVarNum;
}
noway_assert(varDsc->lvIsParam);
@@ -4147,8 +4223,8 @@ void Compiler::lvaUpdateArgsWithInitialReg()
if (varTypeIsMultiReg(varDsc))
{
regPairNo initialRegPair = varDsc->lvArgInitRegPair;
- varDsc->lvRegNum = genRegPairLo(initialRegPair);
- varDsc->lvOtherReg = genRegPairHi(initialRegPair);
+ varDsc->lvRegNum = genRegPairLo(initialRegPair);
+ varDsc->lvOtherReg = genRegPairHi(initialRegPair);
}
else
{
@@ -4166,8 +4242,8 @@ void Compiler::lvaUpdateArgsWithInitialReg()
*/
void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
{
- unsigned lclNum = 0;
- int argOffs = 0;
+ unsigned lclNum = 0;
+ int argOffs = 0;
#ifdef UNIX_AMD64_ABI
int callerArgOffset = 0;
#endif // UNIX_AMD64_ABI
@@ -4183,15 +4259,17 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
*/
if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R)
- argOffs = compArgSize;
+ {
+ argOffs = compArgSize;
+ }
/* Update the argOffs to reflect arguments that are passed in registers */
- noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG);
- noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void *));
+ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG);
+ noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*));
#ifdef _TARGET_X86_
- argOffs -= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void *);
+ argOffs -= codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*);
#endif
#ifndef LEGACY_BACKEND
@@ -4201,11 +4279,12 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
/* Is there a "this" argument? */
- if (!info.compIsStatic)
+ if (!info.compIsStatic)
{
noway_assert(lclNum == info.compThisArg);
#ifndef _TARGET_X86_
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs =
+ lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
#endif // _TARGET_X86_
lclNum++;
}
@@ -4217,29 +4296,32 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
noway_assert(lclNum == info.compRetBuffArg);
noway_assert(lvaTable[lclNum].lvIsRegArg);
#ifndef _TARGET_X86_
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs =
+ lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
#endif // _TARGET_X86_
lclNum++;
}
#if USER_ARGS_COME_LAST
- //@GENERICS: extra argument for instantiation info
+ //@GENERICS: extra argument for instantiation info
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void *), argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void *), argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
#endif // USER_ARGS_COME_LAST
- CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
- unsigned argSigLen = info.compMethodInfo->args.numArgs;
+ CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
+ unsigned argSigLen = info.compMethodInfo->args.numArgs;
#ifdef _TARGET_ARM_
//
@@ -4251,7 +4333,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
// float i,double j,float k,double l,struct_3 m) { }
//
// Basically the signature is: (all float regs full, 1 double, struct_3);
- //
+ //
// The double argument occurs before pre spill in the argument iteration and
// computes an argOffset of 0. struct_3 offset becomes 8. This is wrong.
// Because struct_3 is prespilled and double occurs after prespill.
@@ -4273,16 +4355,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
// Take care of pre spill registers first.
regMaskTP preSpillMask = codeGen->regSet.rsMaskPreSpillRegs(false);
- regMaskTP tempMask = RBM_NONE;
+ regMaskTP tempMask = RBM_NONE;
for (unsigned i = 0, preSpillLclNum = lclNum; i < argSigLen; ++i, ++preSpillLclNum)
{
if (lvaIsPreSpilled(preSpillLclNum, preSpillMask))
{
unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args);
- argOffs = lvaAssignVirtualFrameOffsetToArg(
- preSpillLclNum,
- argSize,
- argOffs);
+ argOffs = lvaAssignVirtualFrameOffsetToArg(preSpillLclNum, argSize, argOffs);
argLcls++;
// Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100
@@ -4303,10 +4382,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
{
if (!lvaIsPreSpilled(stkLclNum, preSpillMask))
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(
- stkLclNum,
- eeGetArgSize(argLst, &info.compMethodInfo->args),
- argOffs);
+ argOffs =
+ lvaAssignVirtualFrameOffsetToArg(stkLclNum, eeGetArgSize(argLst, &info.compMethodInfo->args), argOffs);
argLcls++;
}
argLst = info.compCompHnd->getArgNext(argLst);
@@ -4324,43 +4401,44 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
argumentSize = (unsigned)roundUp(argumentSize, TARGET_POINTER_SIZE);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++,
- argumentSize,
- argOffs
- UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs =
+ lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
argLst = info.compCompHnd->getArgNext(argLst);
}
#endif // !_TARGET_ARM_
#if !USER_ARGS_COME_LAST
- //@GENERICS: extra argument for instantiation info
+ //@GENERICS: extra argument for instantiation info
if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
noway_assert(lclNum == (unsigned)info.compTypeCtxtArg);
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void *), argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
if (info.compIsVarArgs)
{
- argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void *), argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, sizeof(void*),
+ argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
}
#endif // USER_ARGS_COME_LAST
-
}
#ifdef UNIX_AMD64_ABI
//
// lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an
// individual argument, and return the offset for the next argument.
-// Note: This method only calculates the initial offset of the stack passed/spilled arguments
+// Note: This method only calculates the initial offset of the stack passed/spilled arguments
// (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.)
-// The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance,
-// ret address slot, stack frame padding, alloca instructions, etc.
+// The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance,
+// ret address slot, stack frame padding, alloca instructions, etc.
// Note: This is the implementation for UNIX_AMD64 System V platforms.
//
-int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int * callerArgOffset))
+int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
+ unsigned argSize,
+ int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset))
{
noway_assert(lclNum < info.compArgsCount);
noway_assert(argSize);
@@ -4371,11 +4449,11 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
unsigned fieldVarNum = BAD_VAR_NUM;
noway_assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
if (varDsc->lvPromotedStruct())
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
fieldVarNum = varDsc->lvFieldLclStart;
lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
@@ -4434,15 +4512,15 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
}
// For struct promoted parameters we need to set the offsets for both LclVars.
- //
- // For a dependent promoted struct we also assign the struct fields stack offset
+ //
+ // For a dependent promoted struct we also assign the struct fields stack offset
if (varDsc->lvPromotedStruct())
{
lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
if (promotionType == PROMOTION_TYPE_DEPENDENT)
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
assert(fieldVarNum == varDsc->lvFieldLclStart);
lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
@@ -4466,27 +4544,31 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
//
// lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an
// individual argument, and return the offset for the next argument.
-// Note: This method only calculates the initial offset of the stack passed/spilled arguments
+// Note: This method only calculates the initial offset of the stack passed/spilled arguments
// (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.)
-// The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance,
-// ret address slot, stack frame padding, alloca instructions, etc.
+// The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance,
+// ret address slot, stack frame padding, alloca instructions, etc.
// Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.)
-int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int * callerArgOffset))
+int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
+ unsigned argSize,
+ int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset))
{
noway_assert(lclNum < info.compArgsCount);
noway_assert(argSize);
if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R)
+ {
argOffs -= argSize;
+ }
unsigned fieldVarNum = BAD_VAR_NUM;
noway_assert(lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
if (varDsc->lvPromotedStruct())
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
fieldVarNum = varDsc->lvFieldLclStart;
lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
@@ -4512,19 +4594,19 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
#if DEBUG
// TODO: Remove this noway_assert and replace occurrences of sizeof(void *) with argSize
// Also investigate why we are incrementing argOffs for X86 as this seems incorrect
- //
- noway_assert(argSize == sizeof(void *));
+ //
+ noway_assert(argSize == sizeof(void*));
#endif // DEBUG
#endif
#if defined(_TARGET_X86_)
- argOffs += sizeof(void *);
+ argOffs += sizeof(void*);
#elif defined(_TARGET_AMD64_)
// Register arguments on AMD64 also takes stack space. (in the backing store)
varDsc->lvStkOffs = argOffs;
- argOffs += sizeof(void *);
+ argOffs += sizeof(void*);
#elif defined(_TARGET_ARM64_)
- // Register arguments on ARM64 only take stack space when they have a frame home.
+// Register arguments on ARM64 only take stack space when they have a frame home.
#elif defined(_TARGET_ARM_)
// On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg
// in the prolog, so we have to fill in lvStkOffs here
@@ -4548,7 +4630,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
{
// Increment argOffs just once for the _first_ register after alignment pos
// in the prespill mask.
- if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask, codeGen->regSet.rsMaskPreSpillAlign))
+ if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask,
+ codeGen->regSet.rsMaskPreSpillAlign))
{
argOffs += TARGET_POINTER_SIZE;
}
@@ -4557,46 +4640,47 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
switch (varDsc->lvType)
{
- case TYP_STRUCT:
- if (!varDsc->lvStructDoubleAlign)
- {
- break;
- }
- __fallthrough;
+ case TYP_STRUCT:
+ if (!varDsc->lvStructDoubleAlign)
+ {
+ break;
+ }
+ __fallthrough;
- case TYP_DOUBLE:
- case TYP_LONG:
- {
- //
- // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8.
- //
- // ------- CALLER SP -------
- // r3
- // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping.
- // r1 VACookie -- argOffs = 0
- // -------------------------
- //
- // Consider argOffs as if it accounts for number of prespilled registers before the current
- // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is
- // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't
- // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting
- // for r1, equivalently r1 is skipped.
- //
- // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register.
- int prevRegsSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE;
- if (argOffs < prevRegsSize)
+ case TYP_DOUBLE:
+ case TYP_LONG:
{
- // We must align up the argOffset to a multiple of 8 to account for skipped registers.
- argOffs = roundUp(argOffs, 2 * TARGET_POINTER_SIZE);
+ //
+ // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8.
+ //
+ // ------- CALLER SP -------
+ // r3
+ // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping.
+ // r1 VACookie -- argOffs = 0
+ // -------------------------
+ //
+ // Consider argOffs as if it accounts for number of prespilled registers before the current
+ // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is
+ // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't
+ // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting
+ // for r1, equivalently r1 is skipped.
+ //
+ // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register.
+ int prevRegsSize =
+ genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE;
+ if (argOffs < prevRegsSize)
+ {
+ // We must align up the argOffset to a multiple of 8 to account for skipped registers.
+ argOffs = roundUp(argOffs, 2 * TARGET_POINTER_SIZE);
+ }
+ // We should've skipped only a single register.
+ assert(argOffs == prevRegsSize);
}
- // We should've skipped only a single register.
- assert(argOffs == prevRegsSize);
- }
- break;
-
- default:
- // No alignment of argOffs required
break;
+
+ default:
+ // No alignment of argOffs required
+ break;
}
varDsc->lvStkOffs = argOffs;
argOffs += argSize;
@@ -4623,10 +4707,10 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
// struct we take the lvIsRegArg path above with "codeGen->regSet.rsMaskPreSpillRegArg &" matching.
// Next when we calculate the argOffs for the second 16-byte struct we have an argOffs
// of 16, which appears to be aligned properly so we don't skip a stack slot.
- //
+ //
// To fix this we must recover the actual OutArg offset by subtracting off the
// sizeof of the PreSpill register args.
- // Then we align this offset to a multiple of 8 and add back the sizeof
+ // Then we align this offset to a multiple of 8 and add back the sizeof
// of the PreSpill register args.
//
// Dev11 Bug 71767: failure of assert(sizeofPreSpillRegArgs <= argOffs)
@@ -4639,7 +4723,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
// signature type information for the variadic arguments. However, due to alignment,
// we have skipped a register that doesn't have a corresponding symbol. Make up
// for that by increasing argOffs here.
- //
+ //
int sizeofPreSpillRegArgs = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
@@ -4648,7 +4732,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
// This can only happen if we skipped the last register spot because current stk arg
// is a struct requiring alignment or a pre-spill alignment was required because the
// first reg arg needed alignment.
- //
+ //
// Example 1: First Stk Argument requiring alignment in vararg case (same as above comment.)
// Signature (int a0, int a1, int a2, struct {long} a3, ...)
//
@@ -4680,16 +4764,15 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
#endif
{
bool cond = ((info.compIsVarArgs || opts.compUseSoftFP) &&
- // Does cur stk arg require double alignment?
- ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) ||
- (varDsc->lvType == TYP_DOUBLE) ||
- (varDsc->lvType == TYP_LONG))
- ) ||
- // Did first reg arg require alignment?
- (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST));
+ // Does cur stk arg require double alignment?
+ ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) ||
+ (varDsc->lvType == TYP_DOUBLE) || (varDsc->lvType == TYP_LONG))) ||
+ // Did first reg arg require alignment?
+ (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST));
noway_assert(cond);
- noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of alignment
+ noway_assert(sizeofPreSpillRegArgs <=
+ argOffs + TARGET_POINTER_SIZE); // at most one register of alignment
}
argOffs = sizeofPreSpillRegArgs;
}
@@ -4699,21 +4782,21 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
switch (varDsc->lvType)
{
- case TYP_STRUCT:
- if (!varDsc->lvStructDoubleAlign)
- break;
+ case TYP_STRUCT:
+ if (!varDsc->lvStructDoubleAlign)
+ break;
- __fallthrough;
+ __fallthrough;
- case TYP_DOUBLE:
- case TYP_LONG:
- // We must align up the argOffset to a multiple of 8
- argOffs = roundUp(argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs;
- break;
+ case TYP_DOUBLE:
+ case TYP_LONG:
+ // We must align up the argOffset to a multiple of 8
+ argOffs = roundUp(argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs;
+ break;
- default:
- // No alignment of argOffs required
- break;
+ default:
+ // No alignment of argOffs required
+ break;
}
#endif // _TARGET_ARM_
@@ -4721,41 +4804,43 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize
}
// For struct promoted parameters we need to set the offsets for both LclVars.
- //
- // For a dependent promoted struct we also assign the struct fields stack offset
+ //
+ // For a dependent promoted struct we also assign the struct fields stack offset
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(_TARGET_64BIT_)
if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 2);
- fieldVarNum = varDsc->lvFieldLclStart;
- lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
+ fieldVarNum = varDsc->lvFieldLclStart;
+ lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
lvaTable[fieldVarNum + 1].lvStkOffs = varDsc->lvStkOffs + genTypeSize(TYP_INT);
}
else
#endif // !defined(_TARGET_64BIT_)
if (varDsc->lvPromotedStruct())
- {
- lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
-
- if (promotionType == PROMOTION_TYPE_DEPENDENT)
- {
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ {
+ lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
- assert(fieldVarNum == varDsc->lvFieldLclStart);
- lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
- }
- }
- // For an independent promoted struct field we also assign the parent struct stack offset
- else if (varDsc->lvIsStructField)
+ if (promotionType == PROMOTION_TYPE_DEPENDENT)
{
- noway_assert(varDsc->lvParentLcl < lvaCount);
- lvaTable[varDsc->lvParentLcl].lvStkOffs = varDsc->lvStkOffs;
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+
+ assert(fieldVarNum == varDsc->lvFieldLclStart);
+ lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
}
+ }
+ // For an independent promoted struct field we also assign the parent struct stack offset
+ else if (varDsc->lvIsStructField)
+ {
+ noway_assert(varDsc->lvParentLcl < lvaCount);
+ lvaTable[varDsc->lvParentLcl].lvStkOffs = varDsc->lvStkOffs;
+ }
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg)
+ {
argOffs += argSize;
+ }
return argOffs;
}
@@ -4770,12 +4855,14 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
{
int stkOffs = 0;
// codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout.
- if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT)
+ if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT)
+ {
codeGen->setFramePointerUsed(codeGen->isFramePointerRequired());
+ }
#ifdef _TARGET_XARCH_
// On x86/amd64, the return address has already been pushed by the call instruction in the caller.
- stkOffs -= sizeof(void *); // return address;
+ stkOffs -= sizeof(void*); // return address;
// TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other
// calleeregs. When you fix this, you'll also need to fix
@@ -4786,19 +4873,19 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
#endif //_TARGET_XARCH_
- int preSpillSize = 0;
+ int preSpillSize = 0;
bool mustDoubleAlign = false;
#ifdef _TARGET_ARM_
- mustDoubleAlign = true;
- preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
+ mustDoubleAlign = true;
+ preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
#else // !_TARGET_ARM_
- #if DOUBLE_ALIGN
+#if DOUBLE_ALIGN
if (genDoubleAlign())
{
- mustDoubleAlign = true; // X86 only
+ mustDoubleAlign = true; // X86 only
}
- #endif
+#endif
#endif // !_TARGET_ARM_
#ifdef _TARGET_ARM64_
@@ -4826,7 +4913,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES;
}
-#else // !_TARGET_ARM64_
+#else // !_TARGET_ARM64_
stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES;
#endif // !_TARGET_ARM64_
@@ -4834,7 +4921,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#ifdef _TARGET_AMD64_
// In case of Amd64 compCalleeRegsPushed includes float regs (Xmm6-xmm15) that
- // need to be pushed. But Amd64 doesn't support push/pop of xmm registers.
+ // need to be pushed. But Amd64 doesn't support push/pop of xmm registers.
// Instead we need to allocate space for them on the stack and save them in prolog.
// Therefore, we consider xmm registers being saved while computing stack offsets
// but space for xmm registers is considered part of compLclFrameSize.
@@ -4842,7 +4929,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// 1) We need to save the entire 128-bits of xmm register to stack, since amd64
// prolog unwind codes allow encoding of an instruction that stores the entire xmm reg
// at an offset relative to SP
- // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers.
+ // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers.
// This means while saving the first xmm register to its allocated stack location we might
// have to skip 8-bytes. The reason for padding is to use efficient "movaps" to save/restore
// xmm registers to/from stack to match Jit64 codegen. Without the aligning on 16-byte
@@ -4866,7 +4953,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#ifdef DEBUG
if (verbose)
{
- printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n", compVSQuirkStackPaddingNeeded);
+ printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n",
+ compVSQuirkStackPaddingNeeded);
}
#endif // DEBUG
@@ -4881,8 +4969,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including
// padding (so we can avoid computing the same padding in the funclet
// frame). Note that there is no special padding requirement for the PSPSym.
- noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer
- assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
+ noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer
+ assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs);
}
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARMARCH_)
@@ -4891,43 +4979,43 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
{
if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)
{
- // Allocate a pointer sized stack slot, since we may need to double align here
+ // Allocate a pointer sized stack slot, since we may need to double align here
// when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT
//
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
// If we have any TYP_LONG, TYP_DOUBLE or double aligned structs
- // then we need to allocate a second pointer sized stack slot,
+ // then we need to allocate a second pointer sized stack slot,
// since we may need to double align that LclVar when we see it
// in the loop below. We will just always do this so that the
// offsets that we calculate for the stack frame will always
- // be greater (or equal) to what they can be in the final layout.
+ // be greater (or equal) to what they can be in the final layout.
//
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
else // FINAL_FRAME_LAYOUT
{
- if (((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) != 0)
+ if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0)
{
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
- // We should now have a double-aligned (stkOffs+preSpillSize)
- noway_assert(((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) == 0);
+ // We should now have a double-aligned (stkOffs+preSpillSize)
+ noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0);
}
}
if (lvaMonAcquired != BAD_VAR_NUM)
{
- // This var must go first, in what is called the 'frame header' for EnC so that it is
+ // This var must go first, in what is called the 'frame header' for EnC so that it is
// preserved when remapping occurs. See vm\eetwain.cpp for detailed comment specifying frame
// layout requirements for EnC to work.
stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaMonAcquired, lvaLclSize(lvaMonAcquired), stkOffs);
- }
+ }
- if (opts.compNeedSecurityCheck)
+ if (opts.compNeedSecurityCheck)
{
#ifdef JIT32_GCENCODER
/* This can't work without an explicit frame, so make sure */
@@ -5001,14 +5089,15 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
non-pointer temps
*/
- enum Allocation{
- ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr
- ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs
- ALLOC_UNSAFE_BUFFERS = 0x4,
- ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8
+ enum Allocation
+ {
+ ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr
+ ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs
+ ALLOC_UNSAFE_BUFFERS = 0x4,
+ ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8
};
- UINT alloc_order[5];
-
+ UINT alloc_order[5];
+
unsigned int cur = 0;
if (compGSReorderStackLayout)
@@ -5022,7 +5111,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
}
- bool tempsAllocated = false;
+ bool tempsAllocated = false;
#ifdef _TARGET_ARM_
// On ARM, SP based offsets use smaller encoding. Since temps are relatively
@@ -5037,16 +5126,16 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// above the vars. Otherwise we place them after the vars (at the
// bottom of the frame).
noway_assert(!tempsAllocated);
- stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign);
+ stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign);
tempsAllocated = true;
}
alloc_order[cur++] = ALLOC_NON_PTRS;
- if (opts.compDbgEnC)
+ if (opts.compDbgEnC)
{
/* We will use just one pass, and assign offsets to all variables */
- alloc_order[cur-1] |= ALLOC_PTRS;
+ alloc_order[cur - 1] |= ALLOC_PTRS;
noway_assert(compGSReorderStackLayout == false);
}
else
@@ -5059,35 +5148,35 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS;
alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS;
}
-
+
alloc_order[cur] = 0;
- noway_assert(cur < sizeof(alloc_order)/sizeof(alloc_order[0]));
-
+ noway_assert(cur < sizeof(alloc_order) / sizeof(alloc_order[0]));
+
// Force first pass to happen
- UINT assignMore = 0xFFFFFFFF;
+ UINT assignMore = 0xFFFFFFFF;
bool have_LclVarDoubleAlign = false;
for (cur = 0; alloc_order[cur]; cur++)
{
if ((assignMore & alloc_order[cur]) == 0)
+ {
continue;
-
+ }
+
assignMore = 0;
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
- {
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
+ {
/* Ignore field locals of the promotion type PROMOTION_TYPE_FIELD_DEPENDENT.
In other words, we will not calculate the "base" address of the struct local if
- the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT.
- */
+ the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT.
+ */
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
- {
+ {
continue;
}
@@ -5101,8 +5190,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
bool allocateOnFrame = varDsc->lvOnFrame;
- if (varDsc->lvRegister &&
- (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) &&
+ if (varDsc->lvRegister && (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) &&
((varDsc->TypeGet() != TYP_LONG) || (varDsc->lvOtherReg != REG_STK)))
{
allocateOnFrame = false;
@@ -5110,7 +5198,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
/* Ignore variables that are not on the stack frame */
- if (!allocateOnFrame)
+ if (!allocateOnFrame)
{
/* For EnC, all variables have to be allocated space on the
stack, even though they may actually be enregistered. This
@@ -5119,13 +5207,17 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
*/
if (!opts.compDbgEnC)
+ {
continue;
- else if (lclNum >= info.compLocalsCount) // ignore temps for EnC
+ }
+ else if (lclNum >= info.compLocalsCount)
+ { // ignore temps for EnC
continue;
- }
+ }
+ }
else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie())
{
- continue; // This is allocated outside of this loop.
+ continue; // This is allocated outside of this loop.
}
// These need to be located as the very first variables (highest memory address)
@@ -5136,15 +5228,16 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#else
lclNum == lvaShadowSPslotsVar ||
#endif // FEATURE_EH_FUNCLETS
- lclNum == lvaLocAllocSPvar ||
- lclNum == lvaSecurityObject)
+ lclNum == lvaLocAllocSPvar || lclNum == lvaSecurityObject)
{
assert(varDsc->lvStkOffs != BAD_STK_OFFS);
continue;
}
if (lclNum == lvaMonAcquired)
+ {
continue;
+ }
// This should be low on the stack. Hence, it will be assigned later.
if (lclNum == lvaStubArgumentVar)
@@ -5154,7 +5247,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#endif
continue;
}
-
+
// This should be low on the stack. Hence, it will be assigned later.
if (lclNum == lvaInlinedPInvokeFrameVar)
{
@@ -5162,11 +5255,11 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
continue;
}
- if (varDsc->lvIsParam)
+ if (varDsc->lvIsParam)
{
#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
- // On Windows AMD64 we can use the caller-reserved stack area that is already setup
+ // On Windows AMD64 we can use the caller-reserved stack area that is already setup
assert(varDsc->lvStkOffs != BAD_STK_OFFS);
continue;
@@ -5175,14 +5268,15 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// A register argument that is not enregistered ends up as
// a local variable which will need stack frame space.
//
- if (!varDsc->lvIsRegArg)
+ if (!varDsc->lvIsRegArg)
continue;
#ifdef _TARGET_ARM64_
if (info.compIsVarArgs)
{
// Stack offset to varargs (parameters) should point to home area which will be preallocated.
- varDsc->lvStkOffs = -initialStkOffs + genMapIntRegNumToRegArgNum(varDsc->GetArgReg()) * REGSIZE_BYTES;
+ varDsc->lvStkOffs =
+ -initialStkOffs + genMapIntRegNumToRegArgNum(varDsc->GetArgReg()) * REGSIZE_BYTES;
continue;
}
#endif
@@ -5230,7 +5324,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
continue;
}
}
- else
+ else
{
if ((alloc_order[cur] & ALLOC_NON_PTRS) == 0)
{
@@ -5241,38 +5335,37 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
/* Need to align the offset? */
- if (mustDoubleAlign && (
- varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86
+ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86
#ifdef _TARGET_ARM_
- || varDsc->lvType == TYP_LONG // Align longs for ARM
+ || varDsc->lvType == TYP_LONG // Align longs for ARM
#endif
#ifndef _TARGET_64BIT_
- || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true
-#endif // !_TARGET_64BIT_
- ))
+ || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true
+#endif // !_TARGET_64BIT_
+ ))
{
noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0);
if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) && !have_LclVarDoubleAlign)
{
// If this is the first TYP_LONG, TYP_DOUBLE or double aligned struct
- // then we have seen in this loop then we allocate a pointer sized
- // stack slot since we may need to double align this LclVar
- // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT
+ // then we have seen in this loop then we allocate a pointer sized
+ // stack slot since we may need to double align this LclVar
+ // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT
//
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
else
{
- if (((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) != 0)
+ if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0)
{
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
- // We should now have a double-aligned (stkOffs+preSpillSize)
- noway_assert(((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) == 0);
+ // We should now have a double-aligned (stkOffs+preSpillSize)
+ noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0);
}
// Remember that we had to double align a LclVar
@@ -5282,14 +5375,14 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// Reserve the stack space for this variable
stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs);
#ifdef _TARGET_ARM64_
- // If we have an incoming register argument that has a struct promoted field
+ // If we have an incoming register argument that has a struct promoted field
// then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar
//
if (varDsc->lvIsRegArg && varDsc->lvPromotedStruct())
{
- noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
- unsigned fieldVarNum = varDsc->lvFieldLclStart;
+ unsigned fieldVarNum = varDsc->lvFieldLclStart;
lvaTable[fieldVarNum].lvStkOffs = varDsc->lvStkOffs;
}
#endif
@@ -5301,7 +5394,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer.
stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs);
}
-
+
if (tempsAllocated == false)
{
/*-------------------------------------------------------------------------
@@ -5323,7 +5416,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// lvaInlinedPInvokeFrameVar and lvaStubArgumentVar need to be assigned last
// Important: The stack walker depends on lvaStubArgumentVar immediately
// following lvaInlinedPInvokeFrameVar in the frame.
-
+
if (lvaStubArgumentVar != BAD_VAR_NUM)
{
#ifdef JIT32_GCENCODER
@@ -5335,14 +5428,15 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (lvaInlinedPInvokeFrameVar != BAD_VAR_NUM)
{
noway_assert(codeGen->isFramePointerUsed());
- stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs);
+ stkOffs =
+ lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs);
}
if (mustDoubleAlign)
{
if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)
{
- // Allocate a pointer sized stack slot, since we may need to double align here
+ // Allocate a pointer sized stack slot, since we may need to double align here
// when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT
//
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
@@ -5351,11 +5445,11 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (have_LclVarDoubleAlign)
{
// If we have any TYP_LONG, TYP_DOUBLE or double aligned structs
- // the we need to allocate a second pointer sized stack slot,
- // since we may need to double align the last LclVar that we saw
+ // the we need to allocate a second pointer sized stack slot,
+ // since we may need to double align the last LclVar that we saw
// in the loop above. We do this so that the offsets that we
// calculate for the stack frame are always greater than they will
- // be in the final layout.
+ // be in the final layout.
//
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
@@ -5363,13 +5457,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
else // FINAL_FRAME_LAYOUT
{
- if (((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) != 0)
+ if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0)
{
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
- // We should now have a double-aligned (stkOffs+preSpillSize)
- noway_assert(((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) == 0);
+ // We should now have a double-aligned (stkOffs+preSpillSize)
+ noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0);
}
}
@@ -5379,8 +5473,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument
// space. Any padding will be higher on the stack than this
// (including the padding added by lvaAlignFrame()).
- noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer
- assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
+ noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer
+ assert(lvaPSPSym != BAD_VAR_NUM); // We should have created the PSPSym variable
stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs);
}
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_AMD64_)
@@ -5394,7 +5488,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#endif // _TARGET_ARM64_
#if FEATURE_FIXED_OUT_ARGS
- if (lvaOutgoingArgSpaceSize > 0)
+ if (lvaOutgoingArgSpaceSize > 0)
{
#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V.
noway_assert(lvaOutgoingArgSpaceSize >= (4 * sizeof(void*)));
@@ -5402,7 +5496,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
noway_assert((lvaOutgoingArgSpaceSize % sizeof(void*)) == 0);
// Give it a value so we can avoid asserts in CHK builds.
- // Since this will always use an SP relative offset of zero
+ // Since this will always use an SP relative offset of zero
// at the end of lvaFixVirtualFrameOffsets, it will be set to absolute '0'
stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaOutgoingArgSpaceVar, lvaLclSize(lvaOutgoingArgSpaceVar), stkOffs);
@@ -5422,11 +5516,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
#ifdef _TARGET_XARCH_
if (codeGen->doubleAlignOrFramePointerUsed())
- pushedCount += 1; // pushed EBP (frame pointer)
- pushedCount += 1; // pushed PC (return address)
+ {
+ pushedCount += 1; // pushed EBP (frame pointer)
+ }
+ pushedCount += 1; // pushed PC (return address)
#endif
- noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int) sizeof(void *))));
+ noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)sizeof(void*))));
}
int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs)
@@ -5449,13 +5545,11 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i
//
// TYP_SIMD structs locals have alignment preference given by getSIMDTypeAlignment() for
// better performance.
- if ((size >= 8) &&
- ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) ||
- ((stkOffs % 8) != 0)
+ if ((size >= 8) && ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || ((stkOffs % 8) != 0)
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
- || lclVarIsSIMDType(lclNum)
+ || lclVarIsSIMDType(lclNum)
#endif
- ))
+ ))
{
// Note that stack offsets are negative
assert(stkOffs < 0);
@@ -5471,7 +5565,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i
{
if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)
{
- pad = alignment-1;
+ pad = alignment - 1;
// Note that all the objects will probably be misaligned, but we'll fix that in final layout.
}
else
@@ -5502,16 +5596,13 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i
{
printf("Pad ");
gtDispLclVar(lclNum, /*pad*/ false);
- printf(", size=%d, stkOffs=%c0x%x, pad=%d\n",
- size,
- stkOffs < 0 ? '-' : '+',
- stkOffs < 0 ? -stkOffs : stkOffs,
- pad);
+ printf(", size=%d, stkOffs=%c0x%x, pad=%d\n", size, stkOffs < 0 ? '-' : '+',
+ stkOffs < 0 ? -stkOffs : stkOffs, pad);
}
#endif
}
#endif // _TARGET_64BIT_
-
+
/* Reserve space on the stack by bumping the frame size */
lvaIncrementFrameSize(size);
@@ -5523,10 +5614,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i
{
printf("Assign ");
gtDispLclVar(lclNum, /*pad*/ false);
- printf(", size=%d, stkOffs=%c0x%x\n",
- size,
- stkOffs < 0 ? '-' : '+',
- stkOffs < 0 ? -stkOffs : stkOffs);
+ printf(", size=%d, stkOffs=%c0x%x\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs);
}
#endif
@@ -5545,7 +5633,7 @@ int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, i
bool Compiler::lvaIsCalleeSavedIntRegCountEven()
{
unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0);
- return (regsPushed % (16/REGSIZE_BYTES)) == 0;
+ return (regsPushed % (16 / REGSIZE_BYTES)) == 0;
}
#endif //_TARGET_AMD64_
@@ -5566,12 +5654,12 @@ void Compiler::lvaAlignFrame()
else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)
{
// If we are not doing final layout, we don't know the exact value of compLclFrameSize
- // and thus do not know how much we will need to add in order to be aligned.
+ // and thus do not know how much we will need to add in order to be aligned.
// We add 8 so compLclFrameSize is still a multiple of 8.
lvaIncrementFrameSize(8);
}
assert((compLclFrameSize % 8) == 0);
-
+
// Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD
// if needed, but off by 8 because of the return value.
// And don't forget that compCalleeRegsPused does *not* include RBP if we are
@@ -5592,7 +5680,7 @@ void Compiler::lvaAlignFrame()
// The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that there
// are calls and making sure the frame alignment logic is executed.
bool stackNeedsAlignment = (compLclFrameSize != 0 || opts.compNeedToAlignFrame);
-#else // !UNIX_AMD64_ABI
+#else // !UNIX_AMD64_ABI
bool stackNeedsAlignment = compLclFrameSize != 0;
#endif // !UNIX_AMD64_ABI
if ((!codeGen->isFramePointerUsed() && (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)) ||
@@ -5613,21 +5701,20 @@ void Compiler::lvaAlignFrame()
else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)
{
// If we are not doing final layout, we don't know the exact value of compLclFrameSize
- // and thus do not know how much we will need to add in order to be aligned.
+ // and thus do not know how much we will need to add in order to be aligned.
// We add 8 so compLclFrameSize is still a multiple of 8.
lvaIncrementFrameSize(8);
}
assert((compLclFrameSize % 8) == 0);
-
+
// Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD
// if needed.
- bool regPushedCountAligned = (compCalleeRegsPushed % (16/REGSIZE_BYTES)) == 0;
+ bool regPushedCountAligned = (compCalleeRegsPushed % (16 / REGSIZE_BYTES)) == 0;
bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0;
// If this isn't the final frame layout, assume we have to push an extra QWORD
// Just so the offsets are true upper limits.
- if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) ||
- (regPushedCountAligned != lclFrameSizeAligned))
+ if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || (regPushedCountAligned != lclFrameSizeAligned))
{
lvaIncrementFrameSize(REGSIZE_BYTES);
}
@@ -5637,13 +5724,12 @@ void Compiler::lvaAlignFrame()
// Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed.
//
bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0;
- bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)))
- % (sizeof(double) / sizeof(void *))) == 0;
-
+ bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) %
+ (sizeof(double) / sizeof(void*))) == 0;
if (regPushedCountAligned != lclFrameSizeAligned)
{
- lvaIncrementFrameSize(sizeof(void *));
+ lvaIncrementFrameSize(sizeof(void*));
}
#elif defined(_TARGET_X86_)
@@ -5670,27 +5756,27 @@ void Compiler::lvaAlignFrame()
*/
void Compiler::lvaAssignFrameOffsetsToPromotedStructs()
{
- LclVarDsc * varDsc = lvaTable;
+ LclVarDsc* varDsc = lvaTable;
for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++, varDsc++)
- {
+ {
// For promoted struct fields that are params, we will
// assign their offsets in lvaAssignVirtualFrameOffsetToArg().
- // This is not true for the System V systems since there is no
+ // This is not true for the System V systems since there is no
// outgoing args space. Assign the dependently promoted fields properly.
//
- if (varDsc->lvIsStructField
+ if (varDsc->lvIsStructField
#ifndef UNIX_AMD64_ABI
- // For System V platforms there is no outgoing args space.
- // A register passed struct arg is homed on the stack in a separate local var.
- // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos.
- // Make sure the code below is not executed for these structs and the offset is not changed.
+ // For System V platforms there is no outgoing args space.
+ // A register passed struct arg is homed on the stack in a separate local var.
+ // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos.
+ // Make sure the code below is not executed for these structs and the offset is not changed.
&& !varDsc->lvIsParam
#endif // UNIX_AMD64_ABI
)
{
- LclVarDsc * parentvarDsc = &lvaTable[varDsc->lvParentLcl];
+ LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl];
lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc);
-
+
if (promotionType == PROMOTION_TYPE_INDEPENDENT)
{
// The stack offset for these field locals must have been calculated
@@ -5707,29 +5793,28 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs()
}
}
-
/*****************************************************************************
* lvaAllocateTemps() : Assign virtual offsets to temps (always negative).
*/
int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign)
-{
+{
unsigned spillTempSize = 0;
if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT)
{
- int preSpillSize = 0;
+ int preSpillSize = 0;
#ifdef _TARGET_ARM_
preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE;
#endif
- bool assignDone;
- bool assignNptr;
- bool assignPtrs = true;
+ bool assignDone;
+ bool assignNptr;
+ bool assignPtrs = true;
/* Allocate temps */
- if (TRACK_GC_TEMP_LIFETIMES)
+ if (TRACK_GC_TEMP_LIFETIMES)
{
- /* first pointers, then non-pointers in second pass */
+ /* first pointers, then non-pointers in second pass */
assignNptr = false;
assignDone = false;
}
@@ -5742,21 +5827,23 @@ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign)
assert(tmpAllFree());
-AGAIN2:
+ AGAIN2:
- for (TempDsc* temp = tmpListBeg();
- temp != nullptr;
- temp = tmpListNxt(temp))
+ for (TempDsc* temp = tmpListBeg(); temp != nullptr; temp = tmpListNxt(temp))
{
- var_types tempType = temp->tdTempType();
- unsigned size;
+ var_types tempType = temp->tdTempType();
+ unsigned size;
/* Make sure the type is appropriate */
- if (!assignPtrs && varTypeIsGC(tempType))
+ if (!assignPtrs && varTypeIsGC(tempType))
+ {
continue;
- if (!assignNptr && !varTypeIsGC(tempType))
+ }
+ if (!assignNptr && !varTypeIsGC(tempType))
+ {
continue;
+ }
size = temp->tdTempSize();
@@ -5765,7 +5852,7 @@ AGAIN2:
/* Need to align the offset? */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef _TARGET_64BIT_
+#ifdef _TARGET_64BIT_
if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0))
{
// Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE
@@ -5783,18 +5870,18 @@ AGAIN2:
}
#endif
- if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM
+ if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM
{
noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0);
- if (((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) != 0)
+ if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0)
{
spillTempSize += TARGET_POINTER_SIZE;
lvaIncrementFrameSize(TARGET_POINTER_SIZE);
stkOffs -= TARGET_POINTER_SIZE;
}
- // We should now have a double-aligned (stkOffs+preSpillSize)
- noway_assert(((stkOffs+preSpillSize) % (2*TARGET_POINTER_SIZE)) == 0);
+ // We should now have a double-aligned (stkOffs+preSpillSize)
+ noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0);
}
spillTempSize += size;
@@ -5809,7 +5896,7 @@ AGAIN2:
/* If we've only assigned some temps, go back and do the rest now */
- if (!assignDone)
+ if (!assignDone)
{
assignNptr = !assignNptr;
assignPtrs = !assignPtrs;
@@ -5818,9 +5905,9 @@ AGAIN2:
goto AGAIN2;
}
}
- else // We haven't run codegen, so there are no Spill temps yet!
+ else // We haven't run codegen, so there are no Spill temps yet!
{
- unsigned size = lvaGetMaxSpillTempSize();
+ unsigned size = lvaGetMaxSpillTempSize();
lvaIncrementFrameSize(size);
stkOffs -= size;
@@ -5839,45 +5926,45 @@ AGAIN2:
* LSRA register assignments.
*/
-void Compiler::lvaDumpRegLocation(unsigned lclNum)
+void Compiler::lvaDumpRegLocation(unsigned lclNum)
{
- LclVarDsc * varDsc = lvaTable + lclNum;
- var_types type = varDsc->TypeGet();
+ LclVarDsc* varDsc = lvaTable + lclNum;
+ var_types type = varDsc->TypeGet();
-#if FEATURE_STACK_FP_X87
+#if FEATURE_STACK_FP_X87
if (varTypeIsFloating(type))
{
printf("fpu stack ");
}
- else
+ else
#endif
- if (isRegPairType(type))
+ if (isRegPairType(type))
{
- if (!doLSRA()) noway_assert(varDsc->lvRegNum != REG_STK);
+ if (!doLSRA())
+ {
+ noway_assert(varDsc->lvRegNum != REG_STK);
+ }
if (doLSRA() && varDsc->lvRegNum == REG_STK)
{
/* Hi-only enregistered long */
- int offset = varDsc->lvStkOffs;
+ int offset = varDsc->lvStkOffs;
printf("%-3s:[%1s0x%02X]",
- getRegName(varDsc->lvOtherReg), // hi32
- (offset < 0 ? "-" : "+"),
- (offset < 0 ? -offset : offset));
+ getRegName(varDsc->lvOtherReg), // hi32
+ (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset));
}
else if (varDsc->lvOtherReg != REG_STK)
{
/* Fully enregistered long */
printf("%3s:%-3s ",
- getRegName(varDsc->lvOtherReg), // hi32
- getRegName(varDsc->lvRegNum)); // lo32
+ getRegName(varDsc->lvOtherReg), // hi32
+ getRegName(varDsc->lvRegNum)); // lo32
}
else
{
/* Partially enregistered long */
- int offset = varDsc->lvStkOffs+4;
- printf("[%1s0x%02X]:%-3s",
- (offset < 0 ? "-" : "+"),
- (offset < 0 ? -offset : offset),
- getRegName(varDsc->lvRegNum)); // lo32
+ int offset = varDsc->lvStkOffs + 4;
+ printf("[%1s0x%02X]:%-3s", (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset),
+ getRegName(varDsc->lvRegNum)); // lo32
}
}
#ifdef _TARGET_ARM_
@@ -5900,23 +5987,20 @@ void Compiler::lvaDumpRegLocation(unsigned lclNum)
* in its home location.
*/
-void Compiler::lvaDumpFrameLocation(unsigned lclNum)
+void Compiler::lvaDumpFrameLocation(unsigned lclNum)
{
int offset;
regNumber baseReg;
#ifdef _TARGET_ARM_
- offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0);
+ offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0);
#else
bool EBPbased;
offset = lvaFrameAddress(lclNum, &EBPbased);
baseReg = EBPbased ? REG_FPBASE : REG_SPBASE;
#endif
- printf("[%2s%1s0x%02X] ",
- getRegName(baseReg),
- (offset < 0 ? "-" : "+"),
- (offset < 0 ? -offset : offset));
+ printf("[%2s%1s0x%02X] ", getRegName(baseReg), (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset));
}
/*****************************************************************************
@@ -5924,10 +6008,10 @@ void Compiler::lvaDumpFrameLocation(unsigned lclNum)
* dump a single lvaTable entry
*/
-void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth)
+void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth)
{
- LclVarDsc * varDsc = lvaTable + lclNum;
- var_types type = varDsc->TypeGet();
+ LclVarDsc* varDsc = lvaTable + lclNum;
+ var_types type = varDsc->TypeGet();
if (curState == INITIAL_FRAME_LAYOUT)
{
@@ -5936,7 +6020,9 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
printf(" %7s ", varTypeName(type));
if (genTypeSize(type) == 0)
+ {
printf("(%2d) ", lvaLclSize(lclNum));
+ }
}
else
{
@@ -5950,7 +6036,7 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
}
else
#if FEATURE_FIXED_OUT_ARGS
- if ((lclNum == lvaOutgoingArgSpaceVar) && (lvaLclSize(lclNum) == 0))
+ if ((lclNum == lvaOutgoingArgSpaceVar) && (lvaLclSize(lclNum) == 0))
{
// Similar to above; print this anyway.
printf(";# ");
@@ -5964,19 +6050,26 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
gtDispLclVar(lclNum);
printf("[V%02u", lclNum);
- if (varDsc->lvTracked) printf(",T%02u]", varDsc->lvVarIndex);
- else printf(" ]");
+ if (varDsc->lvTracked)
+ {
+ printf(",T%02u]", varDsc->lvVarIndex);
+ }
+ else
+ {
+ printf(" ]");
+ }
- printf(" (%3u,%*s)",
- varDsc->lvRefCnt,
- (int)refCntWtdWidth,
- refCntWtd2str(varDsc->lvRefCntWtd));
+ printf(" (%3u,%*s)", varDsc->lvRefCnt, (int)refCntWtdWidth, refCntWtd2str(varDsc->lvRefCntWtd));
- printf(" %7s ", varTypeName(type));
+ printf(" %7s ", varTypeName(type));
if (genTypeSize(type) == 0)
+ {
printf("(%2d) ", lvaLclSize(lclNum));
+ }
else
+ {
printf(" -> ");
+ }
// The register or stack location field is 11 characters wide.
if (varDsc->lvRefCnt == 0)
@@ -5997,7 +6090,9 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
// For RyuJIT backend, it might be in a register part of the time, but it will definitely have a stack home
// location. Otherwise, it's always on the stack.
if (lvaDoneFrameLayout != NO_FRAME_LAYOUT)
+ {
lvaDumpFrameLocation(lclNum);
+ }
}
}
@@ -6013,76 +6108,139 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
}
}
- if (varDsc->lvDoNotEnregister)
+ if (varDsc->lvDoNotEnregister)
{
printf(" do-not-enreg[");
- if (varDsc->lvAddrExposed) printf("X");
- if (varTypeIsStruct(varDsc)) printf("S");
- if (varDsc->lvVMNeedsStackAddr) printf("V");
- if (varDsc->lvLiveInOutOfHndlr) printf("H");
- if (varDsc->lvLclFieldExpr) printf("F");
- if (varDsc->lvLclBlockOpAddr) printf("B");
- if (varDsc->lvLiveAcrossUCall) printf("U");
- if (varDsc->lvIsMultiRegArg) printf("A");
- if (varDsc->lvIsMultiRegRet) printf("R");
+ if (varDsc->lvAddrExposed)
+ {
+ printf("X");
+ }
+ if (varTypeIsStruct(varDsc))
+ {
+ printf("S");
+ }
+ if (varDsc->lvVMNeedsStackAddr)
+ {
+ printf("V");
+ }
+ if (varDsc->lvLiveInOutOfHndlr)
+ {
+ printf("H");
+ }
+ if (varDsc->lvLclFieldExpr)
+ {
+ printf("F");
+ }
+ if (varDsc->lvLclBlockOpAddr)
+ {
+ printf("B");
+ }
+ if (varDsc->lvLiveAcrossUCall)
+ {
+ printf("U");
+ }
+ if (varDsc->lvIsMultiRegArg)
+ {
+ printf("A");
+ }
+ if (varDsc->lvIsMultiRegRet)
+ {
+ printf("R");
+ }
#ifdef JIT32_GCENCODER
- if (varDsc->lvPinned) printf("P");
+ if (varDsc->lvPinned)
+ printf("P");
#endif // JIT32_GCENCODER
printf("]");
}
- if (varDsc->lvIsMultiRegArg) printf(" multireg-arg");
- if (varDsc->lvIsMultiRegRet) printf(" multireg-ret");
- if (varDsc->lvMustInit) printf(" must-init");
- if (varDsc->lvAddrExposed) printf(" addr-exposed");
- if (varDsc->lvHasLdAddrOp) printf(" ld-addr-op");
- if (varDsc->lvVerTypeInfo.IsThisPtr()) printf(" this");
- if (varDsc->lvPinned) printf(" pinned");
- if (varDsc->lvRefAssign) printf(" ref-asgn");
- if (varDsc->lvStackByref) printf(" stack-byref");
+ if (varDsc->lvIsMultiRegArg)
+ {
+ printf(" multireg-arg");
+ }
+ if (varDsc->lvIsMultiRegRet)
+ {
+ printf(" multireg-ret");
+ }
+ if (varDsc->lvMustInit)
+ {
+ printf(" must-init");
+ }
+ if (varDsc->lvAddrExposed)
+ {
+ printf(" addr-exposed");
+ }
+ if (varDsc->lvHasLdAddrOp)
+ {
+ printf(" ld-addr-op");
+ }
+ if (varDsc->lvVerTypeInfo.IsThisPtr())
+ {
+ printf(" this");
+ }
+ if (varDsc->lvPinned)
+ {
+ printf(" pinned");
+ }
+ if (varDsc->lvRefAssign)
+ {
+ printf(" ref-asgn");
+ }
+ if (varDsc->lvStackByref)
+ {
+ printf(" stack-byref");
+ }
#ifndef _TARGET_64BIT_
- if (varDsc->lvStructDoubleAlign) printf(" double-align");
+ if (varDsc->lvStructDoubleAlign)
+ printf(" double-align");
#endif // !_TARGET_64BIT_
- if (varDsc->lvOverlappingFields) printf(" overlapping-fields");
+ if (varDsc->lvOverlappingFields)
+ {
+ printf(" overlapping-fields");
+ }
if (compGSReorderStackLayout && !varDsc->lvRegister)
{
- if (varDsc->lvIsPtr) printf(" ptr");
- if (varDsc->lvIsUnsafeBuffer) printf(" unsafe-buffer");
+ if (varDsc->lvIsPtr)
+ {
+ printf(" ptr");
+ }
+ if (varDsc->lvIsUnsafeBuffer)
+ {
+ printf(" unsafe-buffer");
+ }
}
if (varDsc->lvIsStructField)
{
- LclVarDsc * parentvarDsc = &lvaTable[varDsc->lvParentLcl];
+ LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl];
#if !defined(_TARGET_64BIT_)
if (varTypeIsLong(parentvarDsc))
{
bool isLo = (lclNum == parentvarDsc->lvFieldLclStart);
- printf(" V%02u.%s(offs=0x%02x)",
- varDsc->lvParentLcl,
- isLo ? "lo" : "hi",
- isLo ? 0 : genTypeSize(TYP_INT)
- );
+ printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT));
}
else
#endif // !defined(_TARGET_64BIT_)
{
- CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->lvVerTypeInfo.GetClassHandle();
- CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal);
+ CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->lvVerTypeInfo.GetClassHandle();
+ CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal);
- printf(" V%02u.%s(offs=0x%02x)",
- varDsc->lvParentLcl,
- eeGetFieldName(fldHnd),
- varDsc->lvFldOffset
- );
+ printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, eeGetFieldName(fldHnd), varDsc->lvFldOffset);
lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc);
// We should never have lvIsStructField set if it is a reg-sized non-field-addressed struct.
assert(!varDsc->lvRegStruct);
switch (promotionType)
{
- case PROMOTION_TYPE_NONE: printf(" P-NONE"); break;
- case PROMOTION_TYPE_DEPENDENT: printf(" P-DEP"); break;
- case PROMOTION_TYPE_INDEPENDENT: printf(" P-INDEP"); break;
+ case PROMOTION_TYPE_NONE:
+ printf(" P-NONE");
+ break;
+ case PROMOTION_TYPE_DEPENDENT:
+ printf(" P-DEP");
+ break;
+ case PROMOTION_TYPE_INDEPENDENT:
+ printf(" P-INDEP");
+ break;
}
}
}
@@ -6095,7 +6253,7 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t
* dump the lvaTable
*/
-void Compiler::lvaTableDump(FrameLayoutState curState)
+void Compiler::lvaTableDump(FrameLayoutState curState)
{
if (curState == NO_FRAME_LAYOUT)
{
@@ -6108,15 +6266,25 @@ void Compiler::lvaTableDump(FrameLayoutState curState)
}
if (curState == INITIAL_FRAME_LAYOUT)
+ {
printf("; Initial");
+ }
else if (curState == PRE_REGALLOC_FRAME_LAYOUT)
+ {
printf("; Pre-RegAlloc");
+ }
else if (curState == REGALLOC_FRAME_LAYOUT)
+ {
printf("; RegAlloc");
+ }
else if (curState == TENTATIVE_FRAME_LAYOUT)
+ {
printf("; Tentative");
+ }
else if (curState == FINAL_FRAME_LAYOUT)
+ {
printf("; Final");
+ }
else
{
printf("UNKNOWN FrameLayoutState!");
@@ -6126,30 +6294,28 @@ void Compiler::lvaTableDump(FrameLayoutState curState)
printf(" local variable assignments\n");
printf(";\n");
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
// Figure out some sizes, to help line things up
- size_t refCntWtdWidth = 6; // Use 6 as the minimum width
+ size_t refCntWtdWidth = 6; // Use 6 as the minimum width
- if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT
+ if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT
{
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
size_t width = strlen(refCntWtd2str(varDsc->lvRefCntWtd));
if (width > refCntWtdWidth)
+ {
refCntWtdWidth = width;
+ }
}
}
// Do the actual output
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
lvaDumpEntry(lclNum, curState, refCntWtdWidth);
}
@@ -6158,21 +6324,13 @@ void Compiler::lvaTableDump(FrameLayoutState curState)
// Display the code-gen temps
assert(tmpAllFree());
- for (TempDsc* temp = tmpListBeg();
- temp != nullptr;
- temp = tmpListNxt(temp))
- {
- printf("; TEMP_%02u %26s%*s%7s -> ",
- -temp->tdTempNum(),
- " ",
- refCntWtdWidth,
- " ",
- varTypeName(temp->tdTempType()));
- int offset = temp->tdTempOffs();
- printf(" [%2s%1s0x%02X]\n",
- isFramePointerUsed() ? STR_FPBASE : STR_SPBASE,
- (offset < 0 ? "-" : "+"),
- (offset < 0 ? -offset : offset));
+ for (TempDsc* temp = tmpListBeg(); temp != nullptr; temp = tmpListNxt(temp))
+ {
+ printf("; TEMP_%02u %26s%*s%7s -> ", -temp->tdTempNum(), " ", refCntWtdWidth, " ",
+ varTypeName(temp->tdTempType()));
+ int offset = temp->tdTempOffs();
+ printf(" [%2s%1s0x%02X]\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE, (offset < 0 ? "-" : "+"),
+ (offset < 0 ? -offset : offset));
}
if (curState >= TENTATIVE_FRAME_LAYOUT)
@@ -6198,7 +6356,7 @@ void Compiler::lvaTableDump(FrameLayoutState curState)
* and only if temps have a larger offset than variables.
*/
-unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
+unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
{
assert(curState < FINAL_FRAME_LAYOUT);
@@ -6213,12 +6371,16 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
if (compFloatingPointUsed)
compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT;
- compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters
+ compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters
#elif defined(_TARGET_AMD64_)
if (compFloatingPointUsed)
+ {
compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED;
+ }
else
+ {
compCalleeFPRegsSavedMask = RBM_NONE;
+ }
#endif
#if DOUBLE_ALIGN
@@ -6233,7 +6395,9 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
// Since FP/EBP is included in the SAVED_REG_MAXSZ we need to
// subtract 1 register if codeGen->isFramePointerUsed() is true.
if (codeGen->isFramePointerUsed())
+ {
compCalleeRegsPushed--;
+ }
#endif
lvaAssignFrameOffsets(curState);
@@ -6251,7 +6415,6 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
return result;
}
-
//------------------------------------------------------------------------
// lvaGetSPRelativeOffset: Given a variable, return the offset of that
// variable in the frame from the stack pointer. This number will be positive,
@@ -6267,7 +6430,7 @@ unsigned Compiler::lvaFrameSize(FrameLayoutState curState)
// Return Value:
// The offset.
-int Compiler::lvaGetSPRelativeOffset(unsigned varNum)
+int Compiler::lvaGetSPRelativeOffset(unsigned varNum)
{
assert(!compLocallocUsed);
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
@@ -6297,17 +6460,17 @@ int Compiler::lvaGetSPRelativeOffset(unsigned varNum)
* Requires the local to be on the stack and frame layout to be complete.
*/
-int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum)
+int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum)
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
assert(varNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + varNum;
+ LclVarDsc* varDsc = lvaTable + varNum;
assert(varDsc->lvOnFrame);
return lvaToCallerSPRelativeOffset(varDsc->lvStkOffs, varDsc->lvFramePointerBased);
}
-int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased)
+int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased)
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
@@ -6323,18 +6486,17 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBa
return offset;
}
-
/*****************************************************************************
*
* Return the Initial-SP-relative stack offset of a local/parameter.
* Requires the local to be on the stack and frame layout to be complete.
*/
-int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum)
+int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum)
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
assert(varNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + varNum;
+ LclVarDsc* varDsc = lvaTable + varNum;
assert(varDsc->lvOnFrame);
return lvaToInitialSPRelativeOffset(varDsc->lvStkOffs, varDsc->lvFramePointerBased);
@@ -6342,7 +6504,7 @@ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum)
// Given a local variable offset, and whether that offset is frame-pointer based, return its offset from Initial-SP.
// This is used, for example, to figure out the offset of the frame pointer from Initial-SP.
-int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased)
+int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased)
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
#ifdef _TARGET_AMD64_
@@ -6359,7 +6521,7 @@ int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool
{
// The offset is correct already!
}
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
NYI("lvaToInitialSPRelativeOffset");
#endif // !_TARGET_AMD64_
@@ -6374,20 +6536,20 @@ int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool
* 0 means that it should not be converted to a GT_LCL_FLD
*/
-static
-unsigned LCL_FLD_PADDING(unsigned lclNum)
+static unsigned LCL_FLD_PADDING(unsigned lclNum)
{
// Convert every 2nd variable
if (lclNum % 2)
+ {
return 0;
+ }
// Pick a padding size at "random"
- unsigned size = lclNum % 7;
+ unsigned size = lclNum % 7;
return size;
}
-
/*****************************************************************************
*
* Callback for fgWalkAllTreesPre()
@@ -6401,34 +6563,36 @@ unsigned LCL_FLD_PADDING(unsigned lclNum)
In the first pass we will mark the locals where we CAN't apply the stress mode.
In the second pass we will do the appropiate morphing wherever we've not determined we can't do it.
*/
-Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr* pTree, fgWalkData* data)
{
- GenTreePtr tree = *pTree;
- genTreeOps oper = tree->OperGet();
- GenTreePtr lcl;
+ GenTreePtr tree = *pTree;
+ genTreeOps oper = tree->OperGet();
+ GenTreePtr lcl;
switch (oper)
{
- case GT_LCL_VAR:
- lcl = tree;
- break;
+ case GT_LCL_VAR:
+ lcl = tree;
+ break;
- case GT_ADDR:
- if (tree->gtOp.gtOp1->gtOper != GT_LCL_VAR)
- return WALK_CONTINUE;
- lcl = tree->gtOp.gtOp1;
- break;
+ case GT_ADDR:
+ if (tree->gtOp.gtOp1->gtOper != GT_LCL_VAR)
+ {
+ return WALK_CONTINUE;
+ }
+ lcl = tree->gtOp.gtOp1;
+ break;
- default:
- return WALK_CONTINUE;
+ default:
+ return WALK_CONTINUE;
}
- Compiler * pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler;
- bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass;
+ Compiler* pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler;
+ bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass;
noway_assert(lcl->gtOper == GT_LCL_VAR);
- unsigned lclNum = lcl->gtLclVarCommon.gtLclNum;
- var_types type = lcl->TypeGet();
- LclVarDsc * varDsc = &pComp->lvaTable[lclNum];
+ unsigned lclNum = lcl->gtLclVarCommon.gtLclNum;
+ var_types type = lcl->TypeGet();
+ LclVarDsc* varDsc = &pComp->lvaTable[lclNum];
if (varDsc->lvNoLclFldStress)
{
@@ -6437,7 +6601,7 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
}
if (bFirstPass)
- {
+ {
// Ignore arguments and temps
if (varDsc->lvIsParam || lclNum >= pComp->info.compLocalsCount)
{
@@ -6448,14 +6612,14 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
// Fix for lcl_fld stress mode
if (varDsc->lvKeepType)
{
- varDsc->lvNoLclFldStress = true;
+ varDsc->lvNoLclFldStress = true;
return WALK_SKIP_SUBTREES;
}
- // Can't have GC ptrs in TYP_BLK.
+ // Can't have GC ptrs in TYP_BLK.
if (!varTypeIsArithmetic(type))
{
- varDsc->lvNoLclFldStress = true;
+ varDsc->lvNoLclFldStress = true;
return WALK_SKIP_SUBTREES;
}
@@ -6463,10 +6627,9 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
// node with the accurate small type. If we bash lvaTable[].lvType,
// then there will be no indication that it was ever a small type.
var_types varType = varDsc->TypeGet();
- if (varType != TYP_BLK &&
- genTypeSize(varType) != genTypeSize(genActualType(varType)))
+ if (varType != TYP_BLK && genTypeSize(varType) != genTypeSize(genActualType(varType)))
{
- varDsc->lvNoLclFldStress = true;
+ varDsc->lvNoLclFldStress = true;
return WALK_SKIP_SUBTREES;
}
@@ -6474,19 +6637,19 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
unsigned padding = LCL_FLD_PADDING(lclNum);
if (padding == 0)
{
- varDsc->lvNoLclFldStress = true;
+ varDsc->lvNoLclFldStress = true;
return WALK_SKIP_SUBTREES;
- }
+ }
}
else
{
// Do the morphing
noway_assert(varDsc->lvType == lcl->gtType || varDsc->lvType == TYP_BLK);
- var_types varType = varDsc->TypeGet();
+ var_types varType = varDsc->TypeGet();
// Calculate padding
unsigned padding = LCL_FLD_PADDING(lclNum);
-
+
// Change the variable to a TYP_BLK
if (varType != TYP_BLK)
{
@@ -6510,7 +6673,7 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
/* Change addr(lclVar) to addr(lclVar)+padding */
noway_assert(oper == GT_ADDR);
- GenTreePtr newAddr = new(pComp, GT_NONE) GenTreeOp(*tree->AsOp());
+ GenTreePtr newAddr = new (pComp, GT_NONE) GenTreeOp(*tree->AsOp());
tree->ChangeOper(GT_ADD);
tree->gtOp.gtOp1 = newAddr;
@@ -6519,16 +6682,18 @@ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTreePtr *pTree, fgWal
lcl->gtType = TYP_BLK;
}
}
-
+
return WALK_SKIP_SUBTREES;
}
/*****************************************************************************/
-void Compiler::lvaStressLclFld()
+void Compiler::lvaStressLclFld()
{
if (!compStressCompile(STRESS_LCL_FLDS, 5))
+ {
return;
+ }
lvaStressLclFldArgs Args;
Args.m_pCompiler = this;
@@ -6552,15 +6717,14 @@ void Compiler::lvaStressLclFld()
* inserted if its corresponding bit is not in 'set').
*/
-#ifdef DEBUG
-void Compiler::lvaDispVarSet(VARSET_VALARG_TP set)
+#ifdef DEBUG
+void Compiler::lvaDispVarSet(VARSET_VALARG_TP set)
{
VARSET_TP VARSET_INIT_NOCOPY(allVars, VarSetOps::MakeEmpty(this));
lvaDispVarSet(set, allVars);
}
-
-void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars)
+void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars)
{
printf("{");
@@ -6570,32 +6734,40 @@ void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_
{
if (VarSetOps::IsMember(this, set, index))
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
/* Look for the matching variable */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- if ((varDsc->lvVarIndex == index) && varDsc->lvTracked)
+ if ((varDsc->lvVarIndex == index) && varDsc->lvTracked)
+ {
break;
+ }
}
if (needSpace)
+ {
printf(" ");
+ }
else
+ {
needSpace = true;
+ }
printf("V%02u", lclNum);
}
else if (VarSetOps::IsMember(this, allVars, index))
{
if (needSpace)
+ {
printf(" ");
+ }
else
+ {
needSpace = true;
+ }
printf(" ");
}
diff --git a/src/jit/liveness.cpp b/src/jit/liveness.cpp
index f8c7227be6..9b07f8cd94 100644
--- a/src/jit/liveness.cpp
+++ b/src/jit/liveness.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// =================================================================================
// Code that works with liveness and related concepts (interference, debug scope)
// =================================================================================
@@ -12,7 +11,6 @@
#pragma hdrstop
#endif
-
/*****************************************************************************
*
* Helper for Compiler::fgPerBlockLocalVarLiveness().
@@ -22,18 +20,15 @@
* 'asgdLclVar' is set when 'tree' is part of an expression with no side-effects
* which is assigned to asgdLclVar, ie. asgdLclVar = (... tree ...)
*/
-void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *asgdLclVar)
+void Compiler::fgMarkUseDef(GenTreeLclVarCommon* tree, GenTree* asgdLclVar)
{
- bool rhsUSEDEF = false;
- unsigned lclNum;
- unsigned lhsLclNum;
- LclVarDsc* varDsc;
-
- noway_assert(tree->gtOper == GT_LCL_VAR ||
- tree->gtOper == GT_LCL_VAR_ADDR ||
- tree->gtOper == GT_LCL_FLD ||
- tree->gtOper == GT_LCL_FLD_ADDR ||
- tree->gtOper == GT_STORE_LCL_VAR ||
+ bool rhsUSEDEF = false;
+ unsigned lclNum;
+ unsigned lhsLclNum;
+ LclVarDsc* varDsc;
+
+ noway_assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_LCL_FLD ||
+ tree->gtOper == GT_LCL_FLD_ADDR || tree->gtOper == GT_STORE_LCL_VAR ||
tree->gtOper == GT_STORE_LCL_FLD);
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_VAR_ADDR || tree->gtOper == GT_STORE_LCL_VAR)
@@ -50,7 +45,7 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
varDsc = lvaTable + lclNum;
// We should never encounter a reference to a lclVar that has a zero refCnt.
- if(varDsc->lvRefCnt == 0 && (!varTypeIsPromotable(varDsc) || !varDsc->lvPromoted))
+ if (varDsc->lvRefCnt == 0 && (!varTypeIsPromotable(varDsc) || !varDsc->lvPromoted))
{
JITDUMP("Found reference to V%02u with zero refCnt.\n", lclNum);
assert(!"We should never encounter a reference to a lclVar that has a zero refCnt.");
@@ -67,9 +62,7 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
lhsLclNum = asgdLclVar->gtLclVarCommon.gtLclNum;
- if ((lhsLclNum == lclNum) &&
- ((tree->gtFlags & GTF_VAR_DEF) == 0) &&
- (tree != asgdLclVar) )
+ if ((lhsLclNum == lclNum) && ((tree->gtFlags & GTF_VAR_DEF) == 0) && (tree != asgdLclVar))
{
/* bingo - we have an x = f(x) case */
noway_assert(lvaTable[lhsLclNum].lvType != TYP_STRUCT);
@@ -80,12 +73,11 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
/* Is this a tracked variable? */
- if (varDsc->lvTracked)
+ if (varDsc->lvTracked)
{
noway_assert(varDsc->lvVarIndex < lvaTrackedCount);
- if ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0)
+ if ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0)
{
// if (!(fgCurUseSet & bitMask)) printf("V%02u,T%02u def at %08p\n", lclNum, varDsc->lvVarIndex, tree);
VarSetOps::AddElemD(this, fgCurDefSet, varDsc->lvVarIndex);
@@ -120,7 +112,9 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
{
/* assign to itself - do not include it in the USE set */
if (!opts.MinOpts() && !opts.compDbgCode)
+ {
return;
+ }
}
}
@@ -148,9 +142,7 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
{
VARSET_TP VARSET_INIT_NOCOPY(bitMask, VarSetOps::MakeEmpty(this));
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
noway_assert(lvaTable[i].lvIsStructField);
if (lvaTable[i].lvTracked)
@@ -161,8 +153,7 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
}
// For pure defs (i.e. not an "update" def which is also a use), add to the (all) def set.
- if ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0)
+ if ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0)
{
VarSetOps::UnionD(this, fgCurDefSet, bitMask);
}
@@ -176,10 +167,10 @@ void Compiler::fgMarkUseDef(GenTreeLclVarCommon *tree, GenTree *
}
/*****************************************************************************/
-void Compiler::fgLocalVarLiveness()
+void Compiler::fgLocalVarLiveness()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In fgLocalVarLiveness()\n");
@@ -194,7 +185,7 @@ void Compiler::fgLocalVarLiveness()
// Init liveness data structures.
fgLocalVarLivenessInit();
- assert(lvaSortAgain == false); // Set to false by lvaSortOnly()
+ assert(lvaSortAgain == false); // Set to false by lvaSortOnly()
EndPhase(PHASE_LCLVARLIVENESS_INIT);
@@ -215,28 +206,27 @@ void Compiler::fgLocalVarLiveness()
fgStmtRemoved = false;
fgInterBlockLocalVarLiveness();
- }
- while (fgStmtRemoved && fgLocalVarLivenessChanged);
+ } while (fgStmtRemoved && fgLocalVarLivenessChanged);
// If we removed any dead code we will have set 'lvaSortAgain' via decRefCnts
if (lvaSortAgain)
{
JITDUMP("In fgLocalVarLiveness, setting lvaSortAgain back to false (set during dead-code removal)\n");
- lvaSortAgain = false; // We don't re-Sort because we just performed LclVar liveness.
+ lvaSortAgain = false; // We don't re-Sort because we just performed LclVar liveness.
}
EndPhase(PHASE_LCLVARLIVENESS_INTERBLOCK);
}
/*****************************************************************************/
-void Compiler::fgLocalVarLivenessInit()
+void Compiler::fgLocalVarLivenessInit()
{
// If necessary, re-sort the variable table by ref-count...before creating any varsets using this sorting.
if (lvaSortAgain)
{
JITDUMP("In fgLocalVarLivenessInit, sorting locals\n");
lvaSortByRefCount();
- assert(lvaSortAgain == false); // Set to false by lvaSortOnly()
+ assert(lvaSortAgain == false); // Set to false by lvaSortOnly()
}
#ifdef LEGACY_BACKEND // RyuJIT backend does not use interference info
@@ -247,7 +237,7 @@ void Compiler::fgLocalVarLivenessInit()
}
/* If we're not optimizing at all, things are simple */
- if (opts.MinOpts())
+ if (opts.MinOpts())
{
VARSET_TP VARSET_INIT_NOCOPY(allOnes, VarSetOps::MakeFull(this));
for (unsigned i = 0; i < lvaTrackedCount; i++)
@@ -284,9 +274,9 @@ void Compiler::fgLocalVarLivenessInit()
//
#ifndef LEGACY_BACKEND
//------------------------------------------------------------------------
-// fgPerStatementLocalVarLiveness:
+// fgPerStatementLocalVarLiveness:
// Set fgCurHeapUse and fgCurHeapDef when the global heap is read or updated
-// Call fgMarkUseDef for any Local variables encountered
+// Call fgMarkUseDef for any Local variables encountered
//
// Arguments:
// startNode must be the first node in the statement
@@ -306,112 +296,111 @@ void Compiler::fgPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr a
{
switch (tree->gtOper)
{
- case GT_QMARK:
- case GT_COLON:
- // We never should encounter a GT_QMARK or GT_COLON node
- noway_assert(!"unexpected GT_QMARK/GT_COLON");
- break;
-
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_LCL_VAR_ADDR:
- case GT_LCL_FLD_ADDR:
- case GT_STORE_LCL_VAR:
- case GT_STORE_LCL_FLD:
- fgMarkUseDef(tree->AsLclVarCommon(), asgdLclVar);
- break;
+ case GT_QMARK:
+ case GT_COLON:
+ // We never should encounter a GT_QMARK or GT_COLON node
+ noway_assert(!"unexpected GT_QMARK/GT_COLON");
+ break;
- case GT_CLS_VAR:
- // For Volatile indirection, first mutate the global heap
- // see comments in ValueNum.cpp (under case GT_CLS_VAR)
- // This models Volatile reads as def-then-use of the heap.
- // and allows for a CSE of a subsequent non-volatile read
- if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
- {
- // For any Volatile indirection, we must handle it as a
- // definition of the global heap
- fgCurHeapDef = true;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_LCL_VAR_ADDR:
+ case GT_LCL_FLD_ADDR:
+ case GT_STORE_LCL_VAR:
+ case GT_STORE_LCL_FLD:
+ fgMarkUseDef(tree->AsLclVarCommon(), asgdLclVar);
+ break;
- }
- // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a heap def, when we get to assignment.
- // Otherwise, we treat it as a use here.
- if (!fgCurHeapDef && (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
- {
- fgCurHeapUse = true;
- }
- break;
+ case GT_CLS_VAR:
+ // For Volatile indirection, first mutate the global heap
+ // see comments in ValueNum.cpp (under case GT_CLS_VAR)
+ // This models Volatile reads as def-then-use of the heap.
+ // and allows for a CSE of a subsequent non-volatile read
+ if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
+ {
+ // For any Volatile indirection, we must handle it as a
+ // definition of the global heap
+ fgCurHeapDef = true;
+ }
+ // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a heap def, when we get to
+ // assignment.
+ // Otherwise, we treat it as a use here.
+ if (!fgCurHeapDef && (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
+ {
+ fgCurHeapUse = true;
+ }
+ break;
- case GT_IND:
- // For Volatile indirection, first mutate the global heap
- // see comments in ValueNum.cpp (under case GT_CLS_VAR)
- // This models Volatile reads as def-then-use of the heap.
- // and allows for a CSE of a subsequent non-volatile read
- if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
- {
- // For any Volatile indirection, we must handle it as a
- // definition of the global heap
- fgCurHeapDef = true;
- }
+ case GT_IND:
+ // For Volatile indirection, first mutate the global heap
+ // see comments in ValueNum.cpp (under case GT_CLS_VAR)
+ // This models Volatile reads as def-then-use of the heap.
+ // and allows for a CSE of a subsequent non-volatile read
+ if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
+ {
+ // For any Volatile indirection, we must handle it as a
+ // definition of the global heap
+ fgCurHeapDef = true;
+ }
- // If the GT_IND is the lhs of an assignment, we'll handle it
- // as a heap def, when we get to assignment.
- // Otherwise, we treat it as a use here.
- if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
- {
- GenTreeLclVarCommon* dummyLclVarTree = NULL;
- bool dummyIsEntire = false;
- GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/true);
- if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/0, &dummyLclVarTree, &dummyIsEntire))
+ // If the GT_IND is the lhs of an assignment, we'll handle it
+ // as a heap def, when we get to assignment.
+ // Otherwise, we treat it as a use here.
+ if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
{
- if (!fgCurHeapDef)
+ GenTreeLclVarCommon* dummyLclVarTree = nullptr;
+ bool dummyIsEntire = false;
+ GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
+ if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/ 0, &dummyLclVarTree, &dummyIsEntire))
{
- fgCurHeapUse = true;
+ if (!fgCurHeapDef)
+ {
+ fgCurHeapUse = true;
+ }
+ }
+ else
+ {
+ // Defines a local addr
+ assert(dummyLclVarTree != nullptr);
+ fgMarkUseDef(dummyLclVarTree->AsLclVarCommon(), asgdLclVar);
}
}
- else
- {
- // Defines a local addr
- assert(dummyLclVarTree != nullptr);
- fgMarkUseDef(dummyLclVarTree->AsLclVarCommon(), asgdLclVar);
- }
- }
- break;
+ break;
// These should have been morphed away to become GT_INDs:
- case GT_FIELD:
- case GT_INDEX:
- unreached();
- break;
+ case GT_FIELD:
+ case GT_INDEX:
+ unreached();
+ break;
// We'll assume these are use-then-defs of the heap.
- case GT_LOCKADD:
- case GT_XADD:
- case GT_XCHG:
- case GT_CMPXCHG:
- if (!fgCurHeapDef)
- {
- fgCurHeapUse = true;
- }
- fgCurHeapDef = true;
- fgCurHeapHavoc = true;
- break;
+ case GT_LOCKADD:
+ case GT_XADD:
+ case GT_XCHG:
+ case GT_CMPXCHG:
+ if (!fgCurHeapDef)
+ {
+ fgCurHeapUse = true;
+ }
+ fgCurHeapDef = true;
+ fgCurHeapHavoc = true;
+ break;
- case GT_MEMORYBARRIER:
- // Simliar to any Volatile indirection, we must handle this as a definition of the global heap
- fgCurHeapDef = true;
- break;
+ case GT_MEMORYBARRIER:
+ // Simliar to any Volatile indirection, we must handle this as a definition of the global heap
+ fgCurHeapDef = true;
+ break;
// For now, all calls read/write the heap, the latter in its entirety. Might tighten this case later.
- case GT_CALL:
+ case GT_CALL:
{
- GenTreeCall* call = tree->AsCall();
- bool modHeap = true;
+ GenTreeCall* call = tree->AsCall();
+ bool modHeap = true;
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
- if ( !s_helperCallProperties.MutatesHeap(helpFunc)
- && !s_helperCallProperties.MayRunCctor(helpFunc))
+ if (!s_helperCallProperties.MutatesHeap(helpFunc) && !s_helperCallProperties.MayRunCctor(helpFunc))
{
modHeap = false;
}
@@ -422,90 +411,87 @@ void Compiler::fgPerStatementLocalVarLiveness(GenTreePtr startNode, GenTreePtr a
{
fgCurHeapUse = true;
}
- fgCurHeapDef = true;
+ fgCurHeapDef = true;
fgCurHeapHavoc = true;
}
}
- // If this is a p/invoke unmanaged call or if this is a tail-call
- // and we have an unmanaged p/invoke call in the method,
- // then we're going to run the p/invoke epilog.
- // So we mark the FrameRoot as used by this instruction.
- // This ensures that the block->bbVarUse will contain
- // the FrameRoot local var if is it a tracked variable.
+ // If this is a p/invoke unmanaged call or if this is a tail-call
+ // and we have an unmanaged p/invoke call in the method,
+ // then we're going to run the p/invoke epilog.
+ // So we mark the FrameRoot as used by this instruction.
+ // This ensures that the block->bbVarUse will contain
+ // the FrameRoot local var if is it a tracked variable.
- if ((tree->gtCall.IsUnmanaged() || (tree->gtCall.IsTailCall() && info.compCallUnmanaged)))
- {
- assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
- if (!opts.ShouldUsePInvokeHelpers())
+ if ((tree->gtCall.IsUnmanaged() || (tree->gtCall.IsTailCall() && info.compCallUnmanaged)))
{
- /* Get the TCB local and mark it as used */
+ assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM));
+ if (!opts.ShouldUsePInvokeHelpers())
+ {
+ /* Get the TCB local and mark it as used */
- noway_assert(info.compLvFrameListRoot < lvaCount);
+ noway_assert(info.compLvFrameListRoot < lvaCount);
- LclVarDsc* varDsc = &lvaTable[info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &lvaTable[info.compLvFrameListRoot];
- if (varDsc->lvTracked)
- {
- if (!VarSetOps::IsMember(this, fgCurDefSet, varDsc->lvVarIndex))
+ if (varDsc->lvTracked)
{
- VarSetOps::AddElemD(this, fgCurUseSet, varDsc->lvVarIndex);
+ if (!VarSetOps::IsMember(this, fgCurDefSet, varDsc->lvVarIndex))
+ {
+ VarSetOps::AddElemD(this, fgCurUseSet, varDsc->lvVarIndex);
+ }
}
}
}
- }
- break;
+ break;
- default:
+ default:
- // Determine whether it defines a heap location.
- if (tree->OperIsAssignment() || tree->OperIsBlkOp())
- {
- GenTreeLclVarCommon* dummyLclVarTree = NULL;
- if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ // Determine whether it defines a heap location.
+ if (tree->OperIsAssignment() || tree->OperIsBlkOp())
{
- // If it doesn't define a local, then it might update the heap.
- fgCurHeapDef = true;
+ GenTreeLclVarCommon* dummyLclVarTree = nullptr;
+ if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ {
+ // If it doesn't define a local, then it might update the heap.
+ fgCurHeapDef = true;
+ }
}
- }
- break;
+ break;
}
}
}
#endif // LEGACY_BACKEND
-
/*****************************************************************************/
-void Compiler::fgPerBlockLocalVarLiveness()
+void Compiler::fgPerBlockLocalVarLiveness()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In fgPerBlockLocalVarLiveness()\n");
}
#endif // DEBUG
- BasicBlock* block;
+ BasicBlock* block;
#if CAN_DISABLE_DFA
/* If we're not optimizing at all, things are simple */
- if (opts.MinOpts())
+ if (opts.MinOpts())
{
- unsigned lclNum;
- LclVarDsc* varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- VARSET_TP VARSET_INIT_NOCOPY(liveAll, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(liveAll, VarSetOps::MakeEmpty(this));
/* We simply make everything live everywhere */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- if (varDsc->lvTracked)
+ if (varDsc->lvTracked)
{
VarSetOps::AddElemD(this, liveAll, varDsc->lvVarIndex);
}
@@ -516,9 +502,9 @@ void Compiler::fgPerBlockLocalVarLiveness()
// Strictly speaking, the assignments for the "Def" cases aren't necessary here.
// The empty set would do as well. Use means "use-before-def", so as long as that's
// "all", this has the right effect.
- VarSetOps::Assign(this, block->bbVarUse, liveAll);
- VarSetOps::Assign(this, block->bbVarDef, liveAll);
- VarSetOps::Assign(this, block->bbLiveIn, liveAll);
+ VarSetOps::Assign(this, block->bbVarUse, liveAll);
+ VarSetOps::Assign(this, block->bbVarDef, liveAll);
+ VarSetOps::Assign(this, block->bbLiveIn, liveAll);
VarSetOps::Assign(this, block->bbLiveOut, liveAll);
block->bbHeapUse = true;
block->bbHeapDef = true;
@@ -527,13 +513,13 @@ void Compiler::fgPerBlockLocalVarLiveness()
switch (block->bbJumpKind)
{
- case BBJ_EHFINALLYRET:
- case BBJ_THROW:
- case BBJ_RETURN:
- VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::MakeEmpty(this));
- break;
- default:
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::MakeEmpty(this));
+ break;
+ default:
+ break;
}
}
return;
@@ -547,9 +533,9 @@ void Compiler::fgPerBlockLocalVarLiveness()
for (block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
- GenTreePtr tree;
- GenTreePtr asgdLclVar;
+ GenTreePtr stmt;
+ GenTreePtr tree;
+ GenTreePtr asgdLclVar;
VarSetOps::ClearD(this, fgCurUseSet);
VarSetOps::ClearD(this, fgCurDefSet);
@@ -558,19 +544,21 @@ void Compiler::fgPerBlockLocalVarLiveness()
fgCurHeapDef = false;
fgCurHeapHavoc = false;
- compCurBB = block;
+ compCurBB = block;
for (stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
noway_assert(stmt->gtOper == GT_STMT);
if (!stmt->gtStmt.gtStmtIsTopLevel())
+ {
continue;
+ }
compCurStmt = stmt;
asgdLclVar = nullptr;
- tree = stmt->gtStmt.gtStmtExpr;
+ tree = stmt->gtStmt.gtStmtExpr;
noway_assert(tree);
// The following code checks if we have an assignment expression
@@ -585,15 +573,14 @@ void Compiler::fgPerBlockLocalVarLiveness()
{
noway_assert(tree->gtOp.gtOp2);
asgdLclVar = tree->gtOp.gtOp1;
- rhsNode = tree->gtOp.gtOp2;
+ rhsNode = tree->gtOp.gtOp2;
}
else
{
asgdLclVar = tree;
- rhsNode = tree->gtOp.gtOp1;
+ rhsNode = tree->gtOp.gtOp1;
}
-
// If this is an assignment to local var with no SIDE EFFECTS,
// set asgdLclVar so that genMarkUseDef will flag potential
// x=f(x) expressions as GTF_VAR_USEDEF.
@@ -629,7 +616,7 @@ void Compiler::fgPerBlockLocalVarLiveness()
{
noway_assert(info.compLvFrameListRoot < lvaCount);
- LclVarDsc * varDsc = &lvaTable[info.compLvFrameListRoot];
+ LclVarDsc* varDsc = &lvaTable[info.compLvFrameListRoot];
if (varDsc->lvTracked)
{
@@ -642,20 +629,26 @@ void Compiler::fgPerBlockLocalVarLiveness()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
VARSET_TP VARSET_INIT_NOCOPY(allVars, VarSetOps::Union(this, fgCurUseSet, fgCurDefSet));
printf("BB%02u", block->bbNum);
- printf( " USE(%d)=", VarSetOps::Count(this, fgCurUseSet));
- lvaDispVarSet(fgCurUseSet, allVars);
+ printf(" USE(%d)=", VarSetOps::Count(this, fgCurUseSet));
+ lvaDispVarSet(fgCurUseSet, allVars);
if (fgCurHeapUse)
- printf( " + HEAP");
+ {
+ printf(" + HEAP");
+ }
printf("\n DEF(%d)=", VarSetOps::Count(this, fgCurDefSet));
- lvaDispVarSet(fgCurDefSet, allVars);
+ lvaDispVarSet(fgCurDefSet, allVars);
if (fgCurHeapDef)
- printf( " + HEAP");
+ {
+ printf(" + HEAP");
+ }
if (fgCurHeapHavoc)
+ {
printf("*");
+ }
printf("\n\n");
}
#endif // DEBUG
@@ -673,18 +666,17 @@ void Compiler::fgPerBlockLocalVarLiveness()
}
}
-
/*****************************************************************************/
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************/
// Helper functions to mark variables live over their entire scope
-void Compiler::fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var)
+void Compiler::fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var)
{
assert(var);
- LclVarDsc * lclVarDsc1 = &lvaTable[var->vsdVarNum];
+ LclVarDsc* lclVarDsc1 = &lvaTable[var->vsdVarNum];
if (lclVarDsc1->lvTracked)
{
@@ -692,11 +684,11 @@ void Compiler::fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc*
}
}
-void Compiler::fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var)
+void Compiler::fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var)
{
assert(var);
- LclVarDsc * lclVarDsc1 = &lvaTable[var->vsdVarNum];
+ LclVarDsc* lclVarDsc1 = &lvaTable[var->vsdVarNum];
if (lclVarDsc1->lvTracked)
{
@@ -706,10 +698,10 @@ void Compiler::fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* va
/*****************************************************************************/
-void Compiler::fgMarkInScope(BasicBlock * block, VARSET_VALARG_TP inScope)
+void Compiler::fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Scope info: block BB%02u marking in scope: ", block->bbNum);
dumpConvertedVarSet(this, inScope);
@@ -732,11 +724,10 @@ void Compiler::fgMarkInScope(BasicBlock * block, VARSET_VALARG_TP
VarSetOps::UnionD(this, block->bbLiveOut, inScope);
}
-
-void Compiler::fgUnmarkInScope(BasicBlock * block, VARSET_VALARG_TP unmarkScope)
+void Compiler::fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope)
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Scope info: block BB%02u UNmarking in scope: ", block->bbNum);
dumpConvertedVarSet(this, unmarkScope);
@@ -746,15 +737,15 @@ void Compiler::fgUnmarkInScope(BasicBlock * block, VARSET_VALARG_
assert(VarSetOps::IsSubset(this, unmarkScope, block->bbScope));
- VarSetOps::DiffD(this, block->bbScope, unmarkScope);
- VarSetOps::DiffD(this, block->bbVarUse, unmarkScope);
- VarSetOps::DiffD(this, block->bbLiveIn, unmarkScope);
+ VarSetOps::DiffD(this, block->bbScope, unmarkScope);
+ VarSetOps::DiffD(this, block->bbVarUse, unmarkScope);
+ VarSetOps::DiffD(this, block->bbLiveIn, unmarkScope);
VarSetOps::DiffD(this, block->bbLiveOut, unmarkScope);
}
#ifdef DEBUG
-void Compiler::fgDispDebugScopes()
+void Compiler::fgDispDebugScopes()
{
printf("\nDebug scopes:\n");
@@ -776,16 +767,20 @@ void Compiler::fgDispDebugScopes()
#if FEATURE_EH_FUNCLETS
-void Compiler::fgExtendDbgScopes()
+void Compiler::fgExtendDbgScopes()
{
compResetScopeLists();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\nMarking vars alive over their entire scope :\n\n");
+ }
- if (verbose)
+ if (verbose)
+ {
compDispScopeLists();
+ }
#endif // DEBUG
VARSET_TP VARSET_INIT_NOCOPY(inScope, VarSetOps::MakeEmpty(this));
@@ -826,22 +821,22 @@ void Compiler::fgExtendDbgScopes()
#else // !FEATURE_EH_FUNCLETS
-void Compiler::fgExtendDbgScopes()
+void Compiler::fgExtendDbgScopes()
{
compResetScopeLists();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nMarking vars alive over their entire scope :\n\n");
compDispScopeLists();
}
#endif // DEBUG
- VARSET_TP VARSET_INIT_NOCOPY(inScope, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(inScope, VarSetOps::MakeEmpty(this));
compProcessScopesUntil(0, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife);
- IL_OFFSET lastEndOffs = 0;
+ IL_OFFSET lastEndOffs = 0;
// Mark all tracked LocalVars live over their scope - walk the blocks
// keeping track of the current life, and assign it to the blocks.
@@ -858,7 +853,8 @@ void Compiler::fgExtendDbgScopes()
{
noway_assert(lastEndOffs < block->bbCodeOffs);
- compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife, &Compiler::fgEndScopeLife);
+ compProcessScopesUntil(block->bbCodeOffs, &inScope, &Compiler::fgBeginScopeLife,
+ &Compiler::fgEndScopeLife);
}
else
{
@@ -901,11 +897,13 @@ void Compiler::fgExtendDbgScopes()
* by marking them live over their entire scope.
*/
-void Compiler::fgExtendDbgLifetimes()
+void Compiler::fgExtendDbgLifetimes()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgExtendDbgLifetimes()\n");
+ }
#endif // DEBUG
noway_assert(opts.compDbgCode && (info.compVarScopesCount > 0));
@@ -916,10 +914,10 @@ void Compiler::fgExtendDbgLifetimes()
fgExtendDbgScopes();
- /*-------------------------------------------------------------------------
- * Partly update liveness info so that we handle any funky BBF_INTERNAL
- * blocks inserted out of sequence.
- */
+/*-------------------------------------------------------------------------
+ * Partly update liveness info so that we handle any funky BBF_INTERNAL
+ * blocks inserted out of sequence.
+ */
#ifdef DEBUG
if (verbose && 0)
@@ -938,27 +936,28 @@ void Compiler::fgExtendDbgLifetimes()
assert(fgFirstBBisScratch());
- VARSET_TP VARSET_INIT_NOCOPY(trackedArgs, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(trackedArgs, VarSetOps::MakeEmpty(this));
for (unsigned argNum = 0; argNum < info.compArgsCount; argNum++)
{
- LclVarDsc* argDsc = lvaTable + argNum;
+ LclVarDsc* argDsc = lvaTable + argNum;
if (argDsc->lvPromoted)
{
lvaPromotionType promotionType = lvaGetPromotionType(argDsc);
if (promotionType == PROMOTION_TYPE_INDEPENDENT)
{
- noway_assert(argDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(argDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = argDsc->lvFieldLclStart;
- argDsc = lvaTable + fieldVarNum;
+ argDsc = lvaTable + fieldVarNum;
}
}
noway_assert(argDsc->lvIsParam);
if (argDsc->lvTracked)
{
- noway_assert(!VarSetOps::IsMember(this, trackedArgs, argDsc->lvVarIndex)); // Each arg should define a different bit.
+ noway_assert(!VarSetOps::IsMember(this, trackedArgs, argDsc->lvVarIndex)); // Each arg should define a
+ // different bit.
VarSetOps::AddElemD(this, trackedArgs, argDsc->lvVarIndex);
}
}
@@ -991,37 +990,37 @@ void Compiler::fgExtendDbgLifetimes()
switch (block->bbJumpKind)
{
- case BBJ_NONE:
- PREFIX_ASSUME(block->bbNext != NULL);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
- break;
+ case BBJ_NONE:
+ PREFIX_ASSUME(block->bbNext != nullptr);
+ VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ break;
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- case BBJ_EHFILTERRET:
- VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
- break;
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_EHFILTERRET:
+ VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
+ break;
- case BBJ_CALLFINALLY:
- if (!(block->bbFlags & BBF_RETLESS_CALL))
- {
- assert(block->isBBCallAlwaysPair());
- PREFIX_ASSUME(block->bbNext != NULL);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
- }
- VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
- break;
+ case BBJ_CALLFINALLY:
+ if (!(block->bbFlags & BBF_RETLESS_CALL))
+ {
+ assert(block->isBBCallAlwaysPair());
+ PREFIX_ASSUME(block->bbNext != nullptr);
+ VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ }
+ VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
+ break;
- case BBJ_COND:
- PREFIX_ASSUME(block->bbNext != NULL);
- VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
- VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
- break;
+ case BBJ_COND:
+ PREFIX_ASSUME(block->bbNext != nullptr);
+ VarSetOps::UnionD(this, initVars, block->bbNext->bbScope);
+ VarSetOps::UnionD(this, initVars, block->bbJumpDest->bbScope);
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
{
- BasicBlock** jmpTab;
- unsigned jmpCnt;
+ BasicBlock** jmpTab;
+ unsigned jmpCnt;
jmpCnt = block->bbJumpSwt->bbsCount;
jmpTab = block->bbJumpSwt->bbsDstTab;
@@ -1029,25 +1028,24 @@ void Compiler::fgExtendDbgLifetimes()
do
{
VarSetOps::UnionD(this, initVars, (*jmpTab)->bbScope);
- }
- while (++jmpTab, --jmpCnt);
+ } while (++jmpTab, --jmpCnt);
}
break;
- case BBJ_EHFINALLYRET:
- case BBJ_RETURN:
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_RETURN:
+ break;
- case BBJ_THROW:
- /* We don't have to do anything as we mark
- * all vars live on entry to a catch handler as
- * volatile anyway
- */
- break;
+ case BBJ_THROW:
+ /* We don't have to do anything as we mark
+ * all vars live on entry to a catch handler as
+ * volatile anyway
+ */
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
/* If the var is already live on entry to the current BB,
@@ -1061,28 +1059,30 @@ void Compiler::fgExtendDbgLifetimes()
{
/* Create initialization tree */
- unsigned varNum = lvaTrackedToVarNum[varIndex];
- LclVarDsc * varDsc = &lvaTable[varNum];
- var_types type = varDsc->TypeGet();
+ unsigned varNum = lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = &lvaTable[varNum];
+ var_types type = varDsc->TypeGet();
// Don't extend struct lifetimes -- they aren't enregistered, anyway.
if (type == TYP_STRUCT)
+ {
continue;
+ }
// If we haven't already done this ...
if (!fgLocalVarLivenessDone)
{
// Create a "zero" node
- GenTreePtr zero = gtNewZeroConNode(genActualType(type));
+ GenTreePtr zero = gtNewZeroConNode(genActualType(type));
// Create initialization node
- GenTreePtr varNode = gtNewLclvNode(varNum, type);
- GenTreePtr initNode = gtNewAssignNode(varNode, zero);
- GenTreePtr initStmt = gtNewStmt(initNode);
+ GenTreePtr varNode = gtNewLclvNode(varNum, type);
+ GenTreePtr initNode = gtNewAssignNode(varNode, zero);
+ GenTreePtr initStmt = gtNewStmt(initNode);
- gtSetStmtInfo (initStmt);
+ gtSetStmtInfo(initStmt);
/* Assign numbers and next/prev links for this tree */
@@ -1107,7 +1107,7 @@ void Compiler::fgExtendDbgLifetimes()
VarSetOps::AddElemD(this, block->bbVarDef, varIndex);
VarSetOps::AddElemD(this, block->bbLiveOut, varIndex);
- block->bbFlags |= BBF_CHANGED; // indicates that the liveness info has changed
+ block->bbFlags |= BBF_CHANGED; // indicates that the liveness info has changed
}
}
@@ -1116,7 +1116,7 @@ void Compiler::fgExtendDbgLifetimes()
// So just ensure that they don't have a 0 ref cnt
unsigned lclNum = 0;
- for (LclVarDsc* varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
+ for (LclVarDsc *varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
if (varDsc->lvRefCnt == 0 && varDsc->lvIsRegArg)
{
@@ -1138,8 +1138,7 @@ void Compiler::fgExtendDbgLifetimes()
#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
-
-VARSET_VALRET_TP Compiler::fgGetHandlerLiveVars(BasicBlock *block)
+VARSET_VALRET_TP Compiler::fgGetHandlerLiveVars(BasicBlock* block)
{
noway_assert(block);
noway_assert(ehBlockHasExnFlowDsc(block));
@@ -1171,7 +1170,7 @@ VARSET_VALRET_TP Compiler::fgGetHandlerLiveVars(BasicBlock *block)
/* If we have nested try's edbEnclosing will provide them */
noway_assert((HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ||
- (HBtab->ebdEnclosingTryIndex > ehGetIndex(HBtab)));
+ (HBtab->ebdEnclosingTryIndex > ehGetIndex(HBtab)));
unsigned outerIndex = HBtab->ebdEnclosingTryIndex;
if (outerIndex == EHblkDsc::NO_ENCLOSING_INDEX)
@@ -1185,19 +1184,18 @@ VARSET_VALRET_TP Compiler::fgGetHandlerLiveVars(BasicBlock *block)
return liveVars;
}
-
/*****************************************************************************
*
* This is the classic algorithm for Live Variable Analysis.
* If updateInternalOnly==true, only update BBF_INTERNAL blocks.
*/
-void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
+void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
{
- BasicBlock* block;
- bool change;
+ BasicBlock* block;
+ bool change;
#ifdef DEBUG
- VARSET_TP VARSET_INIT_NOCOPY(extraLiveOutFromFinally, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(extraLiveOutFromFinally, VarSetOps::MakeEmpty(this));
#endif // DEBUG
bool keepAliveThis = lvaKeepAliveAndReportThis() && lvaTable[info.compThisArg].lvTracked;
@@ -1211,18 +1209,20 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
/* Visit all blocks and compute new data flow values */
- VARSET_TP VARSET_INIT_NOCOPY(liveIn, VarSetOps::MakeEmpty(this));
- VARSET_TP VARSET_INIT_NOCOPY(liveOut, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(liveIn, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(liveOut, VarSetOps::MakeEmpty(this));
bool heapLiveIn = false;
- bool heapLiveOut = false;
+ bool heapLiveOut = false;
for (block = fgLastBB; block; block = block->bbPrev)
{
- // sometimes block numbers are not monotonically increasing which
+ // sometimes block numbers are not monotonically increasing which
// would cause us not to identify backedges
if (block->bbNext && block->bbNext->bbNum <= block->bbNum)
+ {
hasPossibleBackEdge = true;
+ }
if (updateInternalOnly)
{
@@ -1232,7 +1232,9 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
noway_assert(opts.compDbgCode && (info.compVarScopesCount > 0));
if (!(block->bbFlags & BBF_INTERNAL))
+ {
continue;
+ }
}
/* Compute the 'liveOut' set */
@@ -1240,7 +1242,7 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
VarSetOps::ClearD(this, liveOut);
heapLiveOut = false;
if (block->endsWithJmpMethod(this))
- {
+ {
// A JMP uses all the arguments, so mark them all
// as live at the JMP instruction
//
@@ -1263,7 +1265,9 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
VarSetOps::UnionD(this, liveOut, succ->bbLiveIn);
heapLiveOut = heapLiveOut || (*succs)->bbHeapLiveIn;
if (succ->bbNum <= block->bbNum)
+ {
hasPossibleBackEdge = true;
+ }
}
/* For lvaKeepAliveAndReportThis methods, "this" has to be kept alive everywhere
@@ -1285,17 +1289,17 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
/* Can exceptions from this block be handled (in this function)? */
- if (ehBlockHasExnFlowDsc(block))
+ if (ehBlockHasExnFlowDsc(block))
{
VARSET_TP VARSET_INIT_NOCOPY(liveVars, fgGetHandlerLiveVars(block));
- VarSetOps::UnionD(this, liveIn, liveVars);
+ VarSetOps::UnionD(this, liveIn, liveVars);
VarSetOps::UnionD(this, liveOut, liveVars);
}
/* Has there been any change in either live set? */
- if (!VarSetOps::Equal(this, block->bbLiveIn, liveIn) || !VarSetOps::Equal(this, block->bbLiveOut, liveOut))
+ if (!VarSetOps::Equal(this, block->bbLiveIn, liveIn) || !VarSetOps::Equal(this, block->bbLiveOut, liveOut))
{
if (updateInternalOnly)
{
@@ -1307,7 +1311,7 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
!VarSetOps::Equal(this, VarSetOps::Intersection(this, block->bbLiveOut, liveOut), liveOut))
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Scope info: block BB%02u LiveIn+ ", block->bbNum);
dumpConvertedVarSet(this, VarSetOps::Diff(this, liveIn, block->bbLiveIn));
@@ -1330,26 +1334,26 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
}
}
- if ((block->bbHeapLiveIn == 1) != heapLiveIn ||
- (block->bbHeapLiveOut == 1) != heapLiveOut)
+ if ((block->bbHeapLiveIn == 1) != heapLiveIn || (block->bbHeapLiveOut == 1) != heapLiveOut)
{
block->bbHeapLiveIn = heapLiveIn;
block->bbHeapLiveOut = heapLiveOut;
- change = true;
+ change = true;
}
}
// if there is no way we could have processed a block without seeing all of its predecessors
// then there is no need to iterate
if (!hasPossibleBackEdge)
+ {
break;
- }
- while (change);
+ }
+ } while (change);
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef DEBUG
- if (verbose && !updateInternalOnly)
+ if (verbose && !updateInternalOnly)
{
printf("\nBB liveness after fgLiveVarAnalysis():\n\n");
fgDispBBLiveness();
@@ -1358,7 +1362,6 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
#endif // DEBUG
}
-
/*****************************************************************************
*
* Mark any variables in varSet1 as interfering with any variables
@@ -1369,19 +1372,16 @@ void Compiler::fgLiveVarAnalysis(bool updateInternalOnly)
* This function returns true if any new interferences were added
* and returns false if no new interference were added
*/
-bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet1,
- VARSET_VALARG_TP varSet2)
+bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet1, VARSET_VALARG_TP varSet2)
{
#ifdef LEGACY_BACKEND
/* If either set has no bits set (or we are not optimizing), take an early out */
- if (VarSetOps::IsEmpty(this, varSet2) ||
- VarSetOps::IsEmpty(this, varSet1) ||
- opts.MinOpts())
+ if (VarSetOps::IsEmpty(this, varSet2) || VarSetOps::IsEmpty(this, varSet1) || opts.MinOpts())
{
return false;
}
- bool addedIntf = false; // This is set to true if we add any new interferences
+ bool addedIntf = false; // This is set to true if we add any new interferences
VarSetOps::Assign(this, fgMarkIntfUnionVS, varSet1);
VarSetOps::UnionD(this, fgMarkIntfUnionVS, varSet2);
@@ -1393,8 +1393,7 @@ bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet1,
if (VarSetOps::IsMember(this, varSet1, refIndex))
{
// Calculate the set of new interference to add
- VARSET_TP VARSET_INIT_NOCOPY(newIntf,
- VarSetOps::Diff(this, varSet2, lvaVarIntf[refIndex]));
+ VARSET_TP VARSET_INIT_NOCOPY(newIntf, VarSetOps::Diff(this, varSet2, lvaVarIntf[refIndex]));
if (!VarSetOps::IsEmpty(this, newIntf))
{
addedIntf = true;
@@ -1406,8 +1405,7 @@ bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet1,
if (VarSetOps::IsMember(this, varSet2, refIndex))
{
// Calculate the set of new interference to add
- VARSET_TP VARSET_INIT_NOCOPY(newIntf,
- VarSetOps::Diff(this, varSet1, lvaVarIntf[refIndex]));
+ VARSET_TP VARSET_INIT_NOCOPY(newIntf, VarSetOps::Diff(this, varSet1, lvaVarIntf[refIndex]));
if (!VarSetOps::IsEmpty(this, newIntf))
{
addedIntf = true;
@@ -1432,14 +1430,14 @@ bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet1,
* and returns false if no new interference were added
*/
-bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet)
+bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet)
{
#ifdef LEGACY_BACKEND
/* No bits set or we are not optimizing, take an early out */
if (VarSetOps::IsEmpty(this, varSet) || opts.MinOpts())
return false;
- bool addedIntf = false; // This is set to true if we add any new interferences
+ bool addedIntf = false; // This is set to true if we add any new interferences
VARSET_ITER_INIT(this, iter, varSet, refIndex);
while (iter.NextElem(this, &refIndex))
@@ -1454,7 +1452,7 @@ bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet)
}
return addedIntf;
-#else // !LEGACY_BACKEND
+#else // !LEGACY_BACKEND
return false;
#endif // !LEGACY_BACKEND
}
@@ -1463,16 +1461,13 @@ bool Compiler::fgMarkIntf(VARSET_VALARG_TP varSet)
* For updating liveset during traversal AFTER fgComputeLife has completed
*/
-VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
- GenTreePtr tree)
+VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet, GenTreePtr tree)
{
VARSET_TP VARSET_INIT(this, newLiveSet, liveSet);
assert(fgLocalVarLivenessDone == true);
GenTreePtr lclVarTree = tree; // After the tests below, "lclVarTree" will be the local variable.
- if (tree->gtOper == GT_LCL_VAR ||
- tree->gtOper == GT_LCL_FLD ||
- tree->gtOper == GT_REG_VAR ||
- (lclVarTree = fgIsIndirOfAddrOfLocal(tree)) != NULL)
+ if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD || tree->gtOper == GT_REG_VAR ||
+ (lclVarTree = fgIsIndirOfAddrOfLocal(tree)) != nullptr)
{
VARSET_TP VARSET_INIT_NOCOPY(varBits, fgGetVarBits(lclVarTree));
@@ -1486,9 +1481,8 @@ VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
// We maintain the invariant that if the lclVarTree is a promoted struct, but the
// the lookup fails, then all the field vars (i.e., "varBits") are dying.
- VARSET_TP* deadVarBits = NULL;
- if (varTypeIsStruct(lclVarTree) &&
- GetPromotedStructDeathVars()->Lookup(lclVarTree, &deadVarBits))
+ VARSET_TP* deadVarBits = nullptr;
+ if (varTypeIsStruct(lclVarTree) && GetPromotedStructDeathVars()->Lookup(lclVarTree, &deadVarBits))
{
VarSetOps::DiffD(this, newLiveSet, *deadVarBits);
}
@@ -1497,8 +1491,7 @@ VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
VarSetOps::DiffD(this, newLiveSet, varBits);
}
}
- else if ((tree->gtFlags & GTF_VAR_DEF) != 0 &&
- (tree->gtFlags & GTF_VAR_USEASG) == 0)
+ else if ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & GTF_VAR_USEASG) == 0)
{
assert(tree == lclVarTree); // LDOBJ case should only be a use.
@@ -1508,9 +1501,9 @@ VARSET_VALRET_TP Compiler::fgUpdateLiveSet(VARSET_VALARG_TP liveSet,
// Could add a check that, if it's in the newLiveSet, that it's also in
// fgGetHandlerLiveVars(compCurBB), but seems excessive
//
- assert(VarSetOps::IsEmptyIntersection(this, newLiveSet, varBits) ||
- opts.compDbgCode || lvaTable[tree->gtLclVarCommon.gtLclNum].lvAddrExposed ||
- (compCurBB != NULL && ehBlockHasExnFlowDsc(compCurBB)));
+ assert(VarSetOps::IsEmptyIntersection(this, newLiveSet, varBits) || opts.compDbgCode ||
+ lvaTable[tree->gtLclVarCommon.gtLclNum].lvAddrExposed ||
+ (compCurBB != nullptr && ehBlockHasExnFlowDsc(compCurBB)));
VarSetOps::UnionD(this, newLiveSet, varBits);
}
}
@@ -1576,21 +1569,21 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call)
if (frameVarDsc->lvTracked)
{
- unsigned varIndex = frameVarDsc->lvVarIndex;
+ unsigned varIndex = frameVarDsc->lvVarIndex;
noway_assert(varIndex < lvaTrackedCount);
// Is the variable already known to be alive?
//
- if (VarSetOps::IsMember(this, life, varIndex))
+ if (VarSetOps::IsMember(this, life, varIndex))
{
// Since we may call this multiple times, clear the GTF_CALL_M_FRAME_VAR_DEATH if set.
//
- call->gtCallMoreFlags &= ~GTF_CALL_M_FRAME_VAR_DEATH;
+ call->gtCallMoreFlags &= ~GTF_CALL_M_FRAME_VAR_DEATH;
}
else
{
// The variable is just coming to life
- // Since this is a backwards walk of the trees
+ // Since this is a backwards walk of the trees
// that makes this change in liveness a 'last-use'
//
VarSetOps::AddElemD(this, life, varIndex);
@@ -1612,23 +1605,25 @@ void Compiler::fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call)
// mark it volatile to prevent if from being enregistered
// across the unmanaged call.
- unsigned lclNum;
+ unsigned lclNum;
LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
+ {
continue;
+ }
- unsigned varNum = varDsc->lvVarIndex;
+ unsigned varNum = varDsc->lvVarIndex;
/* Ignore the variable if it's not live here */
- if (!VarSetOps::IsMember(this, life, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(this, life, varDsc->lvVarIndex))
+ {
continue;
+ }
// If it is a GC-ref type then mark it DoNotEnregister.
if (varTypeIsGC(varDsc->TypeGet()))
@@ -1666,18 +1661,18 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
noway_assert(lclNum < lvaCount);
LclVarDsc* varDsc = &lvaTable[lclNum];
- unsigned varIndex;
- VARSET_TP varBit;
+ unsigned varIndex;
+ VARSET_TP varBit;
// Is this a tracked variable?
- if (varDsc->lvTracked)
+ if (varDsc->lvTracked)
{
varIndex = varDsc->lvVarIndex;
noway_assert(varIndex < lvaTrackedCount);
/* Is this a definition or use? */
- if (lclVarNode->gtFlags & GTF_VAR_DEF)
+ if (lclVarNode->gtFlags & GTF_VAR_DEF)
{
/*
The variable is being defined here. The variable
@@ -1692,7 +1687,7 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
"used" in such a construct) -> see below the case when x is live
*/
- if (VarSetOps::IsMember(this, life, varIndex))
+ if (VarSetOps::IsMember(this, life, varIndex))
{
/* The variable is live */
@@ -1705,12 +1700,13 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
VarSetOps::RemoveElemD(this, life, varIndex);
}
#ifdef DEBUG
- if (verbose&&0)
+ if (verbose && 0)
{
printf("Def V%02u,T%02u at ", lclNum, varIndex);
printTreeID(lclVarNode);
printf(" life %s -> %s\n",
- VarSetOps::ToString(this, VarSetOps::Union(this, life, VarSetOps::MakeSingleton(this, varIndex))),
+ VarSetOps::ToString(this, VarSetOps::Union(this, life,
+ VarSetOps::MakeSingleton(this, varIndex))),
VarSetOps::ToString(this, life));
}
#endif // DEBUG
@@ -1751,13 +1747,11 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
{
printf("Ref V%02u,T%02u] at ", lclNum, varIndex);
printTreeID(node);
- printf(" life %s -> %s\n",
- VarSetOps::ToString(this, life),
+ printf(" life %s -> %s\n", VarSetOps::ToString(this, life),
VarSetOps::ToString(this, VarSetOps::Union(this, life, varBit)));
}
#endif // DEBUG
-
// The variable is being used, and it is not currently live.
// So the variable is just coming to life
lclVarNode->gtFlags |= GTF_VAR_DEATH;
@@ -1778,9 +1772,7 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
{
VarSetOps::AssignNoCopy(this, varBit, VarSetOps::MakeEmpty(this));
- for (unsigned i = varDsc->lvFieldLclStart;
- i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- ++i)
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
#if !defined(_TARGET_64BIT_) && !defined(LEGACY_BACKEND)
if (!varTypeIsLong(lvaTable[i].lvType) || !lvaTable[i].lvPromoted)
@@ -1788,14 +1780,14 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
{
noway_assert(lvaTable[i].lvIsStructField);
}
- if (lvaTable[i].lvTracked)
+ if (lvaTable[i].lvTracked)
{
varIndex = lvaTable[i].lvVarIndex;
noway_assert(varIndex < lvaTrackedCount);
VarSetOps::AddElemD(this, varBit, varIndex);
}
}
- if (node->gtFlags & GTF_VAR_DEF)
+ if (node->gtFlags & GTF_VAR_DEF)
{
VarSetOps::DiffD(this, varBit, keepAliveVars);
VarSetOps::DiffD(this, life, varBit);
@@ -1806,7 +1798,7 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
// Are the variables already known to be alive?
if (VarSetOps::IsSubset(this, varBit, life))
{
- node->gtFlags &= ~GTF_VAR_DEATH; // Since we may now call this multiple times, reset if live.
+ node->gtFlags &= ~GTF_VAR_DEATH; // Since we may now call this multiple times, reset if live.
return false;
}
@@ -1845,19 +1837,18 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_TP& keepAliveVars, Gen
*/
#ifndef LEGACY_BACKEND
-VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
- GenTreePtr startNode,
- GenTreePtr endNode,
- VARSET_VALARG_TP volatileVars,
- bool* pStmtInfoDirty
- DEBUGARG(bool* treeModf))
+VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
+ GenTreePtr startNode,
+ GenTreePtr endNode,
+ VARSET_VALARG_TP volatileVars,
+ bool* pStmtInfoDirty DEBUGARG(bool* treeModf))
{
- GenTreePtr tree;
- unsigned lclNum;
+ GenTreePtr tree;
+ unsigned lclNum;
VARSET_TP VARSET_INIT(this, life, lifeArg); // lifeArg is const ref; copy to allow modification.
- VARSET_TP VARSET_INIT(this, keepAliveVars, volatileVars);
+ VARSET_TP VARSET_INIT(this, keepAliveVars, volatileVars);
#ifdef DEBUGGING_SUPPORT
VarSetOps::UnionD(this, keepAliveVars, compCurBB->bbScope); // Don't kill vars in scope
#endif
@@ -1870,7 +1861,7 @@ VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
// to use the result of an assignment node directly!
for (tree = startNode; tree != endNode; tree = tree->gtPrev)
{
-AGAIN:
+ AGAIN:
assert(tree->OperGet() != GT_QMARK);
if (tree->gtOper == GT_CALL)
@@ -1907,31 +1898,30 @@ AGAIN:
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
- GenTreePtr startNode,
- GenTreePtr endNode,
- VARSET_VALARG_TP volatileVars,
- bool* pStmtInfoDirty
- DEBUGARG(bool* treeModf))
+VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
+ GenTreePtr startNode,
+ GenTreePtr endNode,
+ VARSET_VALARG_TP volatileVars,
+ bool* pStmtInfoDirty DEBUGARG(bool* treeModf))
{
- GenTreePtr tree;
- unsigned lclNum;
+ GenTreePtr tree;
+ unsigned lclNum;
- GenTreePtr gtQMark = NULL; // current GT_QMARK node (walking the trees backwards)
- GenTreePtr nextColonExit = 0; // gtQMark->gtOp.gtOp2 while walking the 'else' branch.
- // gtQMark->gtOp.gtOp1 while walking the 'then' branch
+ GenTreePtr gtQMark = NULL; // current GT_QMARK node (walking the trees backwards)
+ GenTreePtr nextColonExit = 0; // gtQMark->gtOp.gtOp2 while walking the 'else' branch.
+ // gtQMark->gtOp.gtOp1 while walking the 'then' branch
VARSET_TP VARSET_INIT(this, life, lifeArg); // lifeArg is const ref; copy to allow modification.
// TBD: This used to be an initialization to VARSET_NOT_ACCEPTABLE. Try to figure out what's going on here.
- VARSET_TP VARSET_INIT_NOCOPY(entryLiveSet, VarSetOps::MakeFull(this)); // liveness when we see gtQMark
- VARSET_TP VARSET_INIT_NOCOPY(gtColonLiveSet, VarSetOps::MakeFull(this)); // liveness when we see gtColon
- GenTreePtr gtColon = NULL;
+ VARSET_TP VARSET_INIT_NOCOPY(entryLiveSet, VarSetOps::MakeFull(this)); // liveness when we see gtQMark
+ VARSET_TP VARSET_INIT_NOCOPY(gtColonLiveSet, VarSetOps::MakeFull(this)); // liveness when we see gtColon
+ GenTreePtr gtColon = NULL;
- VARSET_TP VARSET_INIT(this, keepAliveVars, volatileVars);
+ VARSET_TP VARSET_INIT(this, keepAliveVars, volatileVars);
#ifdef DEBUGGING_SUPPORT
VarSetOps::UnionD(this, keepAliveVars, compCurBB->bbScope); /* Dont kill vars in scope */
#endif
@@ -1944,7 +1934,7 @@ VARSET_VALRET_TP Compiler::fgComputeLife(VARSET_VALARG_TP lifeArg,
for (tree = startNode; tree != endNode; tree = tree->gtPrev)
{
-AGAIN:
+ AGAIN:
/* For ?: nodes if we're done with the then branch, remember
* the liveness */
if (gtQMark && (tree == gtColon))
@@ -1961,8 +1951,8 @@ AGAIN:
noway_assert(tree->gtFlags & GTF_RELOP_QMARK);
noway_assert(gtQMark->gtOp.gtOp2->gtOper == GT_COLON);
- GenTreePtr thenNode = gtColon->AsColon()->ThenNode();
- GenTreePtr elseNode = gtColon->AsColon()->ElseNode();
+ GenTreePtr thenNode = gtColon->AsColon()->ThenNode();
+ GenTreePtr elseNode = gtColon->AsColon()->ElseNode();
noway_assert(thenNode && elseNode);
@@ -1978,10 +1968,11 @@ AGAIN:
noway_assert(gtColon->gtType == TYP_VOID);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("BB%02u - Removing dead QMark - Colon ...\n", compCurBB->bbNum);
- gtDispTree(gtQMark); printf("\n");
+ gtDispTree(gtQMark);
+ printf("\n");
}
#endif // DEBUG
@@ -2000,7 +1991,7 @@ AGAIN:
if (tree->gtFlags & GTF_SIDE_EFFECT)
{
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = NULL;
gtExtractSideEffList(tree, &sideEffList);
@@ -2008,10 +1999,11 @@ AGAIN:
{
noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Extracted side effects list from condition...\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif // DEBUG
fgUpdateRefCntForExtract(tree, sideEffList);
@@ -2103,7 +2095,7 @@ AGAIN:
VarSetOps::UnionD(this, life, gtColonLiveSet);
-SKIP_QMARK:
+ SKIP_QMARK:
/* We are out of the parallel branches, the rest is sequential */
@@ -2127,12 +2119,11 @@ SKIP_QMARK:
GenTreePtr lclVarTree = nullptr;
if (tree->gtOper == GT_OBJ)
{
- // fgIsIndirOfAddrOfLocal returns nullptr if the tree is
+ // fgIsIndirOfAddrOfLocal returns nullptr if the tree is
// not an indir(addr(local)), in which case we will set lclVarTree
// back to the original tree, and not handle it as a use/def.
lclVarTree = fgIsIndirOfAddrOfLocal(tree);
- if ((lclVarTree != nullptr) &&
- lvaTable[lclVarTree->gtLclVarCommon.gtLclNum].lvTracked)
+ if ((lclVarTree != nullptr) && lvaTable[lclVarTree->gtLclVarCommon.gtLclNum].lvTracked)
{
lclVarTree = nullptr;
}
@@ -2211,10 +2202,10 @@ SKIP_QMARK:
* when the COLON branch of the enclosing QMARK ends */
noway_assert(nextColonExit &&
- (nextColonExit == gtQMark->gtOp.gtOp1 ||
- nextColonExit == gtQMark->gtOp.gtOp2));
+ (nextColonExit == gtQMark->gtOp.gtOp1 || nextColonExit == gtQMark->gtOp.gtOp2));
- VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, tree, nextColonExit, volatileVars, pStmtInfoDirty DEBUGARG(treeModf)));
+ VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, tree, nextColonExit, volatileVars,
+ pStmtInfoDirty DEBUGARG(treeModf)));
/* Continue with exit node (the last node in the enclosing colon branch) */
@@ -2223,7 +2214,7 @@ SKIP_QMARK:
}
else
{
- gtQMark = tree;
+ gtQMark = tree;
VarSetOps::Assign(this, entryLiveSet, life);
gtColon = gtQMark->gtOp.gtOp2;
nextColonExit = gtColon;
@@ -2264,12 +2255,13 @@ SKIP_QMARK:
//
// Returns: true if we should skip the rest of the statement, false if we should continue
-bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool *doAgain, bool* pStmtInfoDirty DEBUGARG(bool* treeModf))
+bool Compiler::fgRemoveDeadStore(
+ GenTree** pTree, LclVarDsc* varDsc, VARSET_TP life, bool* doAgain, bool* pStmtInfoDirty DEBUGARG(bool* treeModf))
{
- GenTree* asgNode = nullptr;
- GenTree* rhsNode = nullptr;
- GenTree* addrNode = nullptr;
- GenTree* const tree = *pTree;
+ GenTree* asgNode = nullptr;
+ GenTree* rhsNode = nullptr;
+ GenTree* addrNode = nullptr;
+ GenTree* const tree = *pTree;
GenTree* nextNode = tree->gtNext;
@@ -2279,7 +2271,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
rhsNode = tree->gtOp.gtOp1;
asgNode = tree;
}
- else if(tree->OperIsLocal())
+ else if (tree->OperIsLocal())
{
if (nextNode == nullptr)
{
@@ -2294,7 +2286,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
else
{
assert(tree->OperIsLocalAddr());
- addrNode = tree;
+ addrNode = tree;
}
// Next, find the assignment.
@@ -2311,17 +2303,17 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
{
return false;
}
- switch(nextNode->OperGet())
+ switch (nextNode->OperGet())
{
- default:
- break;
- case GT_IND:
- asgNode = nextNode->gtNext;
- break;
- case GT_STOREIND:
- asgNode = nextNode;
- break;
- case GT_LIST:
+ default:
+ break;
+ case GT_IND:
+ asgNode = nextNode->gtNext;
+ break;
+ case GT_STOREIND:
+ asgNode = nextNode;
+ break;
+ case GT_LIST:
{
GenTree* sizeNode = nextNode->gtNext;
if ((sizeNode == nullptr) || (sizeNode->OperGet() != GT_CNS_INT))
@@ -2367,25 +2359,24 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nChanging dead <asgop> ovf to <op> ovf...\n");
}
#endif // DEBUG
-
switch (asgNode->gtOper)
{
- case GT_ASG_ADD:
- asgNode->gtOper = GT_ADD;
- break;
- case GT_ASG_SUB:
- asgNode->gtOper = GT_SUB;
- break;
- default:
- // Only add and sub allowed, we don't have ASG_MUL and ASG_DIV for ints, and
- // floats don't allow OVF forms.
- noway_assert(!"Unexpected ASG_OP");
+ case GT_ASG_ADD:
+ asgNode->gtOper = GT_ADD;
+ break;
+ case GT_ASG_SUB:
+ asgNode->gtOper = GT_SUB;
+ break;
+ default:
+ // Only add and sub allowed, we don't have ASG_MUL and ASG_DIV for ints, and
+ // floats don't allow OVF forms.
+ noway_assert(!"Unexpected ASG_OP");
}
asgNode->gtFlags &= ~GTF_REVERSE_OPS;
@@ -2439,7 +2430,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
/* Test for interior statement */
- if (asgNode->gtNext == 0)
+ if (asgNode->gtNext == nullptr)
{
/* This is a "NORMAL" statement with the
* assignment node hanging from the GT_STMT node */
@@ -2452,17 +2443,20 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
if (rhsNode->gtFlags & GTF_SIDE_EFFECT)
{
if (compRationalIRForm)
+ {
return false;
+ }
EXTRACT_SIDE_EFFECTS:
/* Extract the side effects */
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("BB%02u - Dead assignment has side effects...\n", compCurBB->bbNum);
- gtDispTree(asgNode); printf("\n");
+ gtDispTree(asgNode);
+ printf("\n");
}
#endif // DEBUG
gtExtractSideEffList(rhsNode, &sideEffList);
@@ -2471,10 +2465,11 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
{
noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Extracted side effects list...\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif // DEBUG
fgUpdateRefCntForExtract(asgNode, sideEffList);
@@ -2513,20 +2508,23 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
/* If this is GT_CATCH_ARG saved to a local var don't bother */
JITDUMP("removing stmt with no side effects\n");
-
+
if (asgNode->gtFlags & GTF_ORDER_SIDEEFF)
{
if (rhsNode->gtOper == GT_CATCH_ARG)
+ {
goto EXTRACT_SIDE_EFFECTS;
+ }
}
- // If there is an embedded statement this could be tricky because we need to
+ // If there is an embedded statement this could be tricky because we need to
// walk them next, and we have already skipped over them because they were
// not top level (but will be if we delete the top level statement)
- if (compCurStmt->gtStmt.gtNextStmt &&
- !compCurStmt->gtStmt.gtNextStmt->gtStmtIsTopLevel())
+ if (compCurStmt->gtStmt.gtNextStmt && !compCurStmt->gtStmt.gtNextStmt->gtStmtIsTopLevel())
+ {
return false;
-
+ }
+
/* No side effects - remove the whole statement from the block->bbTreeList */
fgRemoveStmt(compCurBB, compCurStmt);
@@ -2544,7 +2542,9 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
// don't want to deal with this
if (compRationalIRForm)
+ {
return false;
+ }
noway_assert(!VarSetOps::IsMember(this, life, varDsc->lvVarIndex));
@@ -2556,25 +2556,29 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
{
/* :-( we have side effects */
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("BB%02u - INTERIOR dead assignment has side effects...\n", compCurBB->bbNum);
- gtDispTree(asgNode); printf("\n");
+ gtDispTree(asgNode);
+ printf("\n");
}
#endif // DEBUG
gtExtractSideEffList(rhsNode, &sideEffList);
if (!sideEffList)
+ {
goto NO_SIDE_EFFECTS;
+ }
noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Extracted side effects list from condition...\n");
- gtDispTree(sideEffList); printf("\n");
+ gtDispTree(sideEffList);
+ printf("\n");
}
#endif // DEBUG
if (sideEffList->gtOper == asgNode->gtOper)
@@ -2625,7 +2629,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
}
#endif // DEBUG
/* No side effects - Remove the interior statement */
- fgUpdateRefCntForExtract(asgNode, NULL);
+ fgUpdateRefCntForExtract(asgNode, nullptr);
/* Change the assignment to a GT_NOP node */
@@ -2635,7 +2639,9 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
DISPTREE(rhsNode);
fgDeleteTreeFromList(compCurStmt->AsStmt(), rhsNode);
if (tree->gtOper == GT_STOREIND)
+ {
fgDeleteTreeFromList(compCurStmt->AsStmt(), asgNode->gtOp.gtOp1);
+ }
}
asgNode->gtBashToNOP();
@@ -2650,7 +2656,7 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
if (!compRationalIRForm)
{
// Do not update costs by calling gtSetStmtInfo. fgSetStmtSeq modifies
- // the tree threading based on the new costs. Removing nodes could
+ // the tree threading based on the new costs. Removing nodes could
// cause a subtree to get evaluated first (earlier second) during the
// liveness walk. Instead just set a flag that costs are dirty and
// caller has to call gtSetStmtInfo.
@@ -2674,11 +2680,13 @@ bool Compiler::fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_TP l
* Iterative data flow for live variable info and availability of range
* check index expressions.
*/
-void Compiler::fgInterBlockLocalVarLiveness()
+void Compiler::fgInterBlockLocalVarLiveness()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgInterBlockLocalVarLiveness()\n");
+ }
#endif
/* This global flag is set whenever we remove a statement */
@@ -2692,7 +2700,7 @@ void Compiler::fgInterBlockLocalVarLiveness()
fgLiveVarAnalysis();
- //-------------------------------------------------------------------------
+//-------------------------------------------------------------------------
#ifdef DEBUGGING_SUPPORT
@@ -2711,22 +2719,22 @@ void Compiler::fgInterBlockLocalVarLiveness()
* Variables involved in exception-handlers and finally blocks need
* to be specially marked
*/
- BasicBlock* block;
+ BasicBlock* block;
- VARSET_TP VARSET_INIT_NOCOPY(exceptVars, VarSetOps::MakeEmpty(this)); // vars live on entry to a handler
- VARSET_TP VARSET_INIT_NOCOPY(finallyVars, VarSetOps::MakeEmpty(this)); // vars live on exit of a 'finally' block
- VARSET_TP VARSET_INIT_NOCOPY(filterVars, VarSetOps::MakeEmpty(this)); // vars live on exit from a 'filter'
+ VARSET_TP VARSET_INIT_NOCOPY(exceptVars, VarSetOps::MakeEmpty(this)); // vars live on entry to a handler
+ VARSET_TP VARSET_INIT_NOCOPY(finallyVars, VarSetOps::MakeEmpty(this)); // vars live on exit of a 'finally' block
+ VARSET_TP VARSET_INIT_NOCOPY(filterVars, VarSetOps::MakeEmpty(this)); // vars live on exit from a 'filter'
for (block = fgFirstBB; block; block = block->bbNext)
{
- if (block->bbCatchTyp != BBCT_NONE)
+ if (block->bbCatchTyp != BBCT_NONE)
{
/* Note the set of variables live on entry to exception handler */
VarSetOps::UnionD(this, exceptVars, block->bbLiveIn);
}
- if (block->bbJumpKind == BBJ_EHFILTERRET)
+ if (block->bbJumpKind == BBJ_EHFILTERRET)
{
/* Get the set of live variables on exit from a 'filter' */
VarSetOps::UnionD(this, filterVars, block->bbLiveOut);
@@ -2745,36 +2753,36 @@ void Compiler::fgInterBlockLocalVarLiveness()
{
VarSetOps::UnionD(this, exceptVars, block->bbLiveIn);
}
- if ((block->bbJumpKind == BBJ_EHFINALLYRET) ||
- (block->bbJumpKind == BBJ_EHFILTERRET) ||
- (block->bbJumpKind == BBJ_EHCATCHRET) )
+ if ((block->bbJumpKind == BBJ_EHFINALLYRET) || (block->bbJumpKind == BBJ_EHFILTERRET) ||
+ (block->bbJumpKind == BBJ_EHCATCHRET))
{
VarSetOps::UnionD(this, exceptVars, block->bbLiveOut);
}
#endif // FEATURE_EH_FUNCLETS
}
- LclVarDsc* varDsc;
- unsigned varNum;
+ LclVarDsc* varDsc;
+ unsigned varNum;
- for (varNum = 0, varDsc = lvaTable;
- varNum < lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
+ {
continue;
+ }
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ {
continue;
+ }
/* Un-init locals may need auto-initialization. Note that the
liveness of such locals will bubble to the top (fgFirstBB)
in fgInterBlockLocalVarLiveness() */
- if (!varDsc->lvIsParam &&
- VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, varDsc->lvVarIndex) &&
+ if (!varDsc->lvIsParam && VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, varDsc->lvVarIndex) &&
(info.compInitMem || varTypeIsGC(varDsc->TypeGet())))
{
varDsc->lvMustInit = true;
@@ -2783,8 +2791,8 @@ void Compiler::fgInterBlockLocalVarLiveness()
// Mark all variables that are live on entry to an exception handler
// or on exit from a filter handler or finally as DoNotEnregister */
- if (VarSetOps::IsMember(this, exceptVars, varDsc->lvVarIndex) ||
- VarSetOps::IsMember(this, filterVars, varDsc->lvVarIndex))
+ if (VarSetOps::IsMember(this, exceptVars, varDsc->lvVarIndex) ||
+ VarSetOps::IsMember(this, filterVars, varDsc->lvVarIndex))
{
/* Mark the variable appropriately */
lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_LiveInOutOfHandler));
@@ -2794,17 +2802,21 @@ void Compiler::fgInterBlockLocalVarLiveness()
block as either volatile for non-GC ref types or as
'explicitly initialized' (volatile and must-init) for GC-ref types */
- if (VarSetOps::IsMember(this, finallyVars, varDsc->lvVarIndex))
+ if (VarSetOps::IsMember(this, finallyVars, varDsc->lvVarIndex))
{
lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_LiveInOutOfHandler));
/* Don't set lvMustInit unless we have a non-arg, GC pointer */
- if (varDsc->lvIsParam)
+ if (varDsc->lvIsParam)
+ {
continue;
+ }
- if (!varTypeIsGC(varDsc->TypeGet()))
+ if (!varTypeIsGC(varDsc->TypeGet()))
+ {
continue;
+ }
/* Mark it */
varDsc->lvMustInit = true;
@@ -2822,14 +2834,14 @@ void Compiler::fgInterBlockLocalVarLiveness()
{
/* Tell everyone what block we're working on */
- compCurBB = block;
+ compCurBB = block;
/* Remember those vars live on entry to exception handlers */
/* if we are part of a try block */
VARSET_TP VARSET_INIT_NOCOPY(volatileVars, VarSetOps::MakeEmpty(this));
- if (ehBlockHasExnFlowDsc(block))
+ if (ehBlockHasExnFlowDsc(block))
{
VarSetOps::Assign(this, volatileVars, fgGetHandlerLiveVars(block));
@@ -2847,14 +2859,16 @@ void Compiler::fgInterBlockLocalVarLiveness()
/* Get the first statement in the block */
- GenTreePtr firstStmt = block->FirstNonPhiDef();
+ GenTreePtr firstStmt = block->FirstNonPhiDef();
if (!firstStmt)
+ {
continue;
+ }
/* Walk all the statements of the block backwards - Get the LAST stmt */
- GenTreePtr nextStmt = block->bbTreeList->gtPrev;
+ GenTreePtr nextStmt = block->bbTreeList->gtPrev;
do
{
@@ -2865,16 +2879,18 @@ void Compiler::fgInterBlockLocalVarLiveness()
noway_assert(nextStmt->gtOper == GT_STMT);
compCurStmt = nextStmt;
- nextStmt = nextStmt->gtPrev;
-
+ nextStmt = nextStmt->gtPrev;
if (!compCurStmt->gtStmt.gtStmtIsTopLevel())
+ {
continue;
+ }
/* Compute the liveness for each tree node in the statement */
bool stmtInfoDirty = false;
- VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, compCurStmt->gtStmt.gtStmtExpr, NULL, volatileVars, &stmtInfoDirty DEBUGARG(&treeModf)));
+ VarSetOps::AssignNoCopy(this, life, fgComputeLife(life, compCurStmt->gtStmt.gtStmtExpr, nullptr,
+ volatileVars, &stmtInfoDirty DEBUGARG(&treeModf)));
if (stmtInfoDirty)
{
@@ -2890,8 +2906,7 @@ void Compiler::fgInterBlockLocalVarLiveness()
printf("\n");
}
#endif // DEBUG
- }
- while (compCurStmt != firstStmt);
+ } while (compCurStmt != firstStmt);
/* Done with the current block - if we removed any statements, some
* variables may have become dead at the beginning of the block
@@ -2913,12 +2928,11 @@ void Compiler::fgInterBlockLocalVarLiveness()
VarSetOps::Assign(this, block->bbLiveIn, life);
/* compute the new bbLiveOut for all the predecessors of this block */
-
}
noway_assert(compCurBB == block);
#ifdef DEBUG
- compCurBB = 0;
+ compCurBB = nullptr;
#endif
}
@@ -2929,26 +2943,26 @@ void Compiler::fgInterBlockLocalVarLiveness()
/*****************************************************************************/
-void Compiler::fgDispBBLiveness(BasicBlock* block)
+void Compiler::fgDispBBLiveness(BasicBlock* block)
{
VARSET_TP VARSET_INIT_NOCOPY(allVars, VarSetOps::Union(this, block->bbLiveIn, block->bbLiveOut));
- printf("BB%02u", block->bbNum);
- printf( " IN (%d)=", VarSetOps::Count(this, block->bbLiveIn));
- lvaDispVarSet(block->bbLiveIn, allVars);
+ printf("BB%02u", block->bbNum);
+ printf(" IN (%d)=", VarSetOps::Count(this, block->bbLiveIn));
+ lvaDispVarSet(block->bbLiveIn, allVars);
if (block->bbHeapLiveIn)
{
- printf( " + HEAP");
+ printf(" + HEAP");
}
printf("\n OUT(%d)=", VarSetOps::Count(this, block->bbLiveOut));
- lvaDispVarSet(block->bbLiveOut, allVars);
+ lvaDispVarSet(block->bbLiveOut, allVars);
if (block->bbHeapLiveOut)
{
- printf( " + HEAP");
+ printf(" + HEAP");
}
printf("\n\n");
}
-void Compiler::fgDispBBLiveness()
+void Compiler::fgDispBBLiveness()
{
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
diff --git a/src/jit/loopcloning.cpp b/src/jit/loopcloning.cpp
index f39c3e6658..8ce015e607 100644
--- a/src/jit/loopcloning.cpp
+++ b/src/jit/loopcloning.cpp
@@ -33,11 +33,12 @@ GenTreePtr LC_Array::ToGenTree(Compiler* comp)
if (type == Jagged)
{
// Create a a[i][j][k].length type node.
- GenTreePtr arr = comp->gtNewLclvNode(arrIndex->arrLcl, comp->lvaTable[arrIndex->arrLcl].lvType);
- int rank = GetDimRank();
+ GenTreePtr arr = comp->gtNewLclvNode(arrIndex->arrLcl, comp->lvaTable[arrIndex->arrLcl].lvType);
+ int rank = GetDimRank();
for (int i = 0; i < rank; ++i)
{
- arr = comp->gtNewIndexRef(TYP_REF, arr, comp->gtNewLclvNode(arrIndex->indLcls[i], comp->lvaTable[arrIndex->indLcls[i]].lvType));
+ arr = comp->gtNewIndexRef(TYP_REF, arr, comp->gtNewLclvNode(arrIndex->indLcls[i],
+ comp->lvaTable[arrIndex->indLcls[i]].lvType));
}
// If asked for arrlen invoke arr length operator.
if (oper == ArrLen)
@@ -74,22 +75,22 @@ GenTreePtr LC_Ident::ToGenTree(Compiler* comp)
// Convert to GenTree nodes.
switch (type)
{
- case Const:
+ case Const:
#ifdef _TARGET_64BIT_
- return comp->gtNewLconNode(constant);
+ return comp->gtNewLconNode(constant);
#else
- return comp->gtNewIconNode((ssize_t) constant);
+ return comp->gtNewIconNode((ssize_t)constant);
#endif
- case Var:
- return comp->gtNewLclvNode((unsigned) constant, comp->lvaTable[constant].lvType);
- case ArrLen:
- return arrLen.ToGenTree(comp);
- case Null:
- return comp->gtNewIconNode(0, TYP_REF);
- default:
- assert(!"Could not convert LC_Ident to GenTree");
- unreached();
- break;
+ case Var:
+ return comp->gtNewLclvNode((unsigned)constant, comp->lvaTable[constant].lvType);
+ case ArrLen:
+ return arrLen.ToGenTree(comp);
+ case Null:
+ return comp->gtNewIconNode(0, TYP_REF);
+ default:
+ assert(!"Could not convert LC_Ident to GenTree");
+ unreached();
+ break;
}
}
@@ -108,22 +109,21 @@ GenTreePtr LC_Expr::ToGenTree(Compiler* comp)
// Convert to GenTree nodes.
switch (type)
{
- case Ident:
- return ident.ToGenTree(comp);
- case IdentPlusConst:
+ case Ident:
+ return ident.ToGenTree(comp);
+ case IdentPlusConst:
#ifdef _TARGET_64BIT_
- return comp->gtNewOperNode(GT_ADD, TYP_LONG, ident.ToGenTree(comp), comp->gtNewLconNode(constant));
+ return comp->gtNewOperNode(GT_ADD, TYP_LONG, ident.ToGenTree(comp), comp->gtNewLconNode(constant));
#else
- return comp->gtNewOperNode(GT_ADD, TYP_INT, ident.ToGenTree(comp), comp->gtNewIconNode((ssize_t) constant));
+ return comp->gtNewOperNode(GT_ADD, TYP_INT, ident.ToGenTree(comp), comp->gtNewIconNode((ssize_t)constant));
#endif
- default:
- assert(!"Could not convert LC_Expr to GenTree");
- unreached();
- break;
+ default:
+ assert(!"Could not convert LC_Expr to GenTree");
+ unreached();
+ break;
}
}
-
//--------------------------------------------------------------------------------------------------
// ToGenTree - Convert a "condition" into a gentree node.
//
@@ -138,7 +138,6 @@ GenTreePtr LC_Condition::ToGenTree(Compiler* comp)
return comp->gtNewOperNode(oper, TYP_INT, op1.ToGenTree(comp), op2.ToGenTree(comp));
}
-
//--------------------------------------------------------------------------------------------------
// Evaluates - Evaluate a given loop cloning condition if it can be statically evaluated.
//
@@ -153,31 +152,31 @@ bool LC_Condition::Evaluates(bool* pResult)
{
switch (oper)
{
- case GT_EQ:
- case GT_GE:
- case GT_LE:
- // If op1 == op2 then equality should result in true.
- if (op1 == op2)
- {
- *pResult = true;
- return true;
- }
- break;
+ case GT_EQ:
+ case GT_GE:
+ case GT_LE:
+ // If op1 == op2 then equality should result in true.
+ if (op1 == op2)
+ {
+ *pResult = true;
+ return true;
+ }
+ break;
- case GT_GT:
- case GT_LT:
- case GT_NE:
- // If op1 == op2 then inequality should result in false.
- if (op1 == op2)
- {
- *pResult = false;
- return true;
- }
- break;
+ case GT_GT:
+ case GT_LT:
+ case GT_NE:
+ // If op1 == op2 then inequality should result in false.
+ if (op1 == op2)
+ {
+ *pResult = false;
+ return true;
+ }
+ break;
- default:
- // for all other 'oper' kinds, we will return false
- break;
+ default:
+ // for all other 'oper' kinds, we will return false
+ break;
}
return false;
}
@@ -211,7 +210,7 @@ bool LC_Condition::Combines(const LC_Condition& cond, LC_Condition* newCond)
return true;
}
else if ((oper == GT_LT || oper == GT_LE || oper == GT_GT || oper == GT_GE) &&
- GenTree::ReverseRelop(oper) == cond.oper && op1 == cond.op2 && op2 == cond.op1)
+ GenTree::ReverseRelop(oper) == cond.oper && op1 == cond.op2 && op2 == cond.op1)
{
*newCond = *this;
return true;
@@ -291,7 +290,6 @@ ExpandArrayStack<LC_Condition>* LoopCloneContext::EnsureConditions(unsigned loop
return conditions[loopNum];
}
-
//--------------------------------------------------------------------------------------------------
// GetConditions - Get the cloning conditions array for the loop, no allocation.
//
@@ -378,7 +376,8 @@ ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* LoopCloneContext::GetBlockCon
// Return Values:
// Return block conditions.
//
-ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum, unsigned condBlocks)
+ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum,
+ unsigned condBlocks)
{
if (blockConditions[loopNum] == nullptr)
{
@@ -407,7 +406,10 @@ void LoopCloneContext::PrintBlockConditions(unsigned loopNum)
JITDUMP("%d = {", i);
for (unsigned j = 0; j < ((*levelCond)[i])->Size(); ++j)
{
- if (j != 0) { JITDUMP(" & "); }
+ if (j != 0)
+ {
+ JITDUMP(" & ");
+ }
(*((*levelCond)[i]))[j].Print();
}
JITDUMP("}\n");
@@ -440,7 +442,7 @@ void LoopCloneContext::PrintBlockConditions(unsigned loopNum)
// "pAnyFalse" could be false if no other condition statically evaluates to "false".
void LoopCloneContext::EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose))
{
- bool allTrue = true;
+ bool allTrue = true;
bool anyFalse = false;
ExpandArrayStack<LC_Condition>& conds = *conditions[loopNum];
@@ -477,11 +479,10 @@ void LoopCloneContext::EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool
}
JITDUMP("Evaluation result allTrue = %d, anyFalse = %d\n", allTrue, anyFalse);
- *pAllTrue = allTrue;
+ *pAllTrue = allTrue;
*pAnyFalse = anyFalse;
}
-
//--------------------------------------------------------------------------------------------------
// OptimizeConditions - Evaluate the loop cloning conditions statically, if they can be evaluated
// then optimize the "conditions" array accordingly.
@@ -534,7 +535,7 @@ void LoopCloneContext::OptimizeConditions(ExpandArrayStack<LC_Condition>& conds)
{
conds.Remove(j);
conds[i] = newCond;
- i = -1;
+ i = -1;
break;
}
}
@@ -647,7 +648,10 @@ void LoopCloneContext::PrintConditions(unsigned loopNum)
}
for (unsigned i = 0; i < conditions[loopNum]->Size(); ++i)
{
- if (i != 0) { JITDUMP(" & "); }
+ if (i != 0)
+ {
+ JITDUMP(" & ");
+ }
(*conditions[loopNum])[i].Print();
}
}
@@ -669,7 +673,10 @@ void LoopCloneContext::PrintConditions(unsigned loopNum)
// Return Values:
// None.
//
-void LoopCloneContext::CondToStmtInBlock(Compiler* comp, ExpandArrayStack<LC_Condition>& conds, BasicBlock* block, bool reverse)
+void LoopCloneContext::CondToStmtInBlock(Compiler* comp,
+ ExpandArrayStack<LC_Condition>& conds,
+ BasicBlock* block,
+ bool reverse)
{
noway_assert(conds.Size() > 0);
@@ -686,7 +693,7 @@ void LoopCloneContext::CondToStmtInBlock(Compiler* comp, ExpandArrayStack<LC_Con
// Add jmpTrue "cond == 0" to slow path.
GenTreePtr stmt = comp->fgNewStmtFromTree(comp->gtNewOperNode(GT_JTRUE, TYP_VOID, cond));
-
+
// Add stmt to the block.
comp->fgInsertStmtAtEnd(block, stmt);
@@ -699,7 +706,7 @@ void LoopCloneContext::CondToStmtInBlock(Compiler* comp, ExpandArrayStack<LC_Con
//
// Arguments:
// None.
-//
+//
// Operation:
// If level is 0, then just return the array base. Else return the index variable on dim 'level'
//
@@ -709,7 +716,10 @@ void LoopCloneContext::CondToStmtInBlock(Compiler* comp, ExpandArrayStack<LC_Con
unsigned LC_Deref::Lcl()
{
unsigned lvl = level;
- if (lvl == 0) return array.arrIndex->arrLcl;
+ if (lvl == 0)
+ {
+ return array.arrIndex->arrLcl;
+ }
lvl--;
return array.arrIndex->indLcls[lvl];
}
@@ -719,7 +729,7 @@ unsigned LC_Deref::Lcl()
//
// Arguments:
// None.
-//
+//
// Return Values:
// Return true if children are present.
//
@@ -734,7 +744,7 @@ bool LC_Deref::HasChildren()
// Arguments:
// conds An array of conditions for each level i.e., (level x conditions). This array will
// contain the conditions for the tree at the end of the method.
-//
+//
// Operation:
// level0 yields only (a != null) condition. All other levels yield two conditions:
// (level < a[...].length && a[...][level] != null)
@@ -747,23 +757,22 @@ void LC_Deref::DeriveLevelConditions(ExpandArrayStack<ExpandArrayStack<LC_Condit
if (level == 0)
{
// For level 0, just push (a != null).
- (*conds)[level]->Push(LC_Condition(GT_NE,
- LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(LC_Ident::Null))));
+ (*conds)[level]->Push(
+ LC_Condition(GT_NE, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(LC_Ident::Null))));
}
else
{
// Adjust for level0 having just 1 condition and push condition (i < a.len).
LC_Array arrLen = array;
- arrLen.oper = LC_Array::ArrLen;
- arrLen.dim = level - 1;
- (*conds)[level * 2 - 1]->Push(LC_Condition(GT_LT,
- LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(arrLen))));
-
+ arrLen.oper = LC_Array::ArrLen;
+ arrLen.dim = level - 1;
+ (*conds)[level * 2 - 1]->Push(
+ LC_Condition(GT_LT, LC_Expr(LC_Ident(Lcl(), LC_Ident::Var)), LC_Expr(LC_Ident(arrLen))));
+
// Push condition (a[i] != null)
LC_Array arrTmp = array;
- arrTmp.dim = level;
- (*conds)[level * 2]->Push(LC_Condition(GT_NE,
- LC_Expr(LC_Ident(arrTmp)), LC_Expr(LC_Ident(LC_Ident::Null))));
+ arrTmp.dim = level;
+ (*conds)[level * 2]->Push(LC_Condition(GT_NE, LC_Expr(LC_Ident(arrTmp)), LC_Expr(LC_Ident(LC_Ident::Null))));
}
// Invoke on the children recursively.
@@ -781,7 +790,7 @@ void LC_Deref::DeriveLevelConditions(ExpandArrayStack<ExpandArrayStack<LC_Condit
//
// Arguments:
// alloc IAllocator instance
-//
+//
// Return Values:
// None
//
@@ -798,7 +807,7 @@ void LC_Deref::EnsureChildren(IAllocator* alloc)
//
// Arguments:
// lcl the local to find in the children array
-//
+//
// Return Values:
// The child node if found or nullptr.
//
@@ -813,7 +822,7 @@ LC_Deref* LC_Deref::Find(unsigned lcl)
// Arguments:
// lcl the local to find.
// children the list of nodes to find the node representing the lcl.
-//
+//
// Return Values:
// The node if found or nullptr.
//
@@ -834,4 +843,3 @@ LC_Deref* LC_Deref::Find(ExpandArrayStack<LC_Deref*>* children, unsigned lcl)
}
return nullptr;
}
-
diff --git a/src/jit/loopcloning.h b/src/jit/loopcloning.h
index faa6a0a1c2..40793afcf1 100644
--- a/src/jit/loopcloning.h
+++ b/src/jit/loopcloning.h
@@ -24,7 +24,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
i) The array index is stored in the "context" variable with
additional block, tree, stmt info.
- Once the optimization candidates are identified, we derive cloning conditions
- For ex: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the
+ For ex: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the
following conditions:
(a != null) && ((n >= 0) & (n <= a.length) & (stride > 0))
a) Note the short circuit AND for (a != null). These are called block
@@ -55,7 +55,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
GenTree and added to the loop cloning choice block.
Preconditions
- - Loop detection should have completed and the loop table should be
+ - Loop detection should have completed and the loop table should be
populated with the loop dscs.
- The loops that will be considered are the ones with the LPFLG_ITER
marked on them.
@@ -80,7 +80,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
bitwise AND operations.
- Perform short circuit AND for (array != null) side effect check
before hoisting (limit <= a.length) check.
- For ex: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the
+ For ex: to clone a simple "for (i=0; i<n; ++i) { a[i] }" loop, we need the
following conditions:
(a != null) && ((n >= 0) & (n <= a.length) & (stride > 0))
@@ -99,19 +99,15 @@ class Compiler;
*/
struct ArrIndex
{
- unsigned arrLcl; // The array base local num
- ExpandArrayStack<unsigned> indLcls; // The indices local nums
- ExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension.
- unsigned rank; // Rank of the array
- BasicBlock* useBlock; // Block where the [] occurs
-
- ArrIndex(IAllocator* alloc)
- : arrLcl(BAD_VAR_NUM)
- , indLcls(alloc)
- , bndsChks(alloc)
- , rank(0)
- , useBlock(nullptr)
- {}
+ unsigned arrLcl; // The array base local num
+ ExpandArrayStack<unsigned> indLcls; // The indices local nums
+ ExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension.
+ unsigned rank; // Rank of the array
+ BasicBlock* useBlock; // Block where the [] occurs
+
+ ArrIndex(IAllocator* alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
+ {
+ }
#ifdef DEBUG
void Print(unsigned dim = -1)
@@ -125,16 +121,15 @@ struct ArrIndex
#endif
};
-
// Forward declarations
#define LC_OPT(en) struct en##OptInfo;
#include "loopcloningopts.h"
/**
*
- * LcOptInfo represents the optimization information for loop cloning,
+ * LcOptInfo represents the optimization information for loop cloning,
* other classes are supposed to derive from this base class.
- *
+ *
* Example usage:
* LcMdArrayOptInfo is multi-dimensional array optimization for which the
* loop can be cloned.
@@ -154,19 +149,22 @@ struct LcOptInfo
#include "loopcloningopts.h"
};
- void* optInfo;
+ void* optInfo;
OptType optType;
- LcOptInfo(void* optInfo, OptType optType)
- : optInfo(optInfo)
- , optType(optType) {}
+ LcOptInfo(void* optInfo, OptType optType) : optInfo(optInfo), optType(optType)
+ {
+ }
- OptType GetOptType() { return optType; }
+ OptType GetOptType()
+ {
+ return optType;
+ }
#undef LC_OPT
-#define LC_OPT(en) \
- en##OptInfo* As##en##OptInfo() \
- { \
- assert(optType == en); \
- return reinterpret_cast<en##OptInfo*>(this); \
+#define LC_OPT(en) \
+ en##OptInfo* As##en##OptInfo() \
+ { \
+ assert(optType == en); \
+ return reinterpret_cast<en##OptInfo*>(this); \
}
#include "loopcloningopts.h"
};
@@ -177,23 +175,22 @@ struct LcOptInfo
*/
struct LcMdArrayOptInfo : public LcOptInfo
{
- GenTreeArrElem* arrElem; // "arrElem" node of an MD array.
- unsigned dim; // "dim" represents upto what level of the rank this optimization applies to.
- // For example, a[i,j,k] could be the MD array "arrElem" but if "dim" is 2,
- // then this node is treated as though it were a[i,j]
- ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation.
+ GenTreeArrElem* arrElem; // "arrElem" node of an MD array.
+ unsigned dim; // "dim" represents upto what level of the rank this optimization applies to.
+ // For example, a[i,j,k] could be the MD array "arrElem" but if "dim" is 2,
+ // then this node is treated as though it were a[i,j]
+ ArrIndex* index; // "index" cached computation in the form of an ArrIndex representation.
LcMdArrayOptInfo(GenTreeArrElem* arrElem, unsigned dim)
- : LcOptInfo(this, LcMdArray)
- , arrElem(arrElem)
- , dim(dim)
- , index(nullptr) {}
+ : LcOptInfo(this, LcMdArray), arrElem(arrElem), dim(dim), index(nullptr)
+ {
+ }
ArrIndex* GetArrIndexForDim(IAllocator* alloc)
{
if (index == nullptr)
{
- index = new (alloc) ArrIndex(alloc);
+ index = new (alloc) ArrIndex(alloc);
index->rank = arrElem->gtArrRank;
for (unsigned i = 0; i < dim; ++i)
{
@@ -211,17 +208,16 @@ struct LcMdArrayOptInfo : public LcOptInfo
*/
struct LcJaggedArrayOptInfo : public LcOptInfo
{
- unsigned dim; // "dim" represents upto what level of the rank this optimization applies to.
- // For example, a[i][j][k] could be the jagged array but if "dim" is 2,
- // then this node is treated as though it were a[i][j]
- ArrIndex arrIndex; // ArrIndex representation of the array.
- GenTreePtr stmt; // "stmt" where the optimization opportunity occurs.
+ unsigned dim; // "dim" represents upto what level of the rank this optimization applies to.
+ // For example, a[i][j][k] could be the jagged array but if "dim" is 2,
+ // then this node is treated as though it were a[i][j]
+ ArrIndex arrIndex; // ArrIndex representation of the array.
+ GenTreePtr stmt; // "stmt" where the optimization opportunity occurs.
LcJaggedArrayOptInfo(ArrIndex& arrIndex, unsigned dim, GenTreePtr stmt)
- : LcOptInfo(this, LcJaggedArray)
- , dim(dim)
- , arrIndex(arrIndex)
- , stmt(stmt) {}
+ : LcOptInfo(this, LcJaggedArray), dim(dim), arrIndex(arrIndex), stmt(stmt)
+ {
+ }
};
/**
@@ -244,8 +240,8 @@ struct LC_Array
ArrLen,
};
- ArrType type; // The type of the array on which to invoke length operator.
- ArrIndex* arrIndex; // ArrIndex representation of this array.
+ ArrType type; // The type of the array on which to invoke length operator.
+ ArrIndex* arrIndex; // ArrIndex representation of this array.
OperType oper;
@@ -260,13 +256,20 @@ struct LC_Array
}
#endif
- int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array
- // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length
- // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length
- LC_Array() : type(Invalid), dim(-1) {}
- LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(dim) {}
+ int dim; // "dim" = which index to invoke arrLen on, if -1 invoke on the whole array
+ // Example 1: a[0][1][2] and dim = 2 implies a[0][1].length
+ // Example 2: a[0][1][2] and dim = -1 implies a[0][1][2].length
+ LC_Array() : type(Invalid), dim(-1)
+ {
+ }
+ LC_Array(ArrType type, ArrIndex* arrIndex, int dim, OperType oper)
+ : type(type), arrIndex(arrIndex), oper(oper), dim(dim)
+ {
+ }
- LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1) {}
+ LC_Array(ArrType type, ArrIndex* arrIndex, OperType oper) : type(type), arrIndex(arrIndex), oper(oper), dim(-1)
+ {
+ }
// Equality operator
bool operator==(const LC_Array& that) const
@@ -274,9 +277,7 @@ struct LC_Array
assert(type != Invalid && that.type != Invalid);
// Types match and the array base matches.
- if (type != that.type ||
- arrIndex->arrLcl != that.arrIndex->arrLcl ||
- oper != that.oper)
+ if (type != that.type || arrIndex->arrLcl != that.arrIndex->arrLcl || oper != that.oper)
{
return false;
}
@@ -303,7 +304,7 @@ struct LC_Array
// The max dim on which length is invoked.
int GetDimRank() const
{
- return (dim < 0) ? (int) arrIndex->rank : dim;
+ return (dim < 0) ? (int)arrIndex->rank : dim;
}
// Get a tree representation for this symbolic a.length
@@ -312,7 +313,8 @@ struct LC_Array
/**
*
- * Symbolic representation of either a constant like 1, 2 or a variable V02, V03 etc. or an "LC_Array" or the null constant.
+ * Symbolic representation of either a constant like 1, 2 or a variable V02, V03 etc. or an "LC_Array" or the null
+ * constant.
*/
struct LC_Ident
{
@@ -325,25 +327,25 @@ struct LC_Ident
Null,
};
- INT64 constant; // The constant value if this node is of type "Const", or the lcl num if "Var"
- LC_Array arrLen; // The LC_Array if the type is "ArrLen"
- IdentType type; // The type of this object
+ INT64 constant; // The constant value if this node is of type "Const", or the lcl num if "Var"
+ LC_Array arrLen; // The LC_Array if the type is "ArrLen"
+ IdentType type; // The type of this object
// Equality operator
bool operator==(const LC_Ident& that) const
{
switch (type)
{
- case Const:
- case Var:
- return (type == that.type) && constant == that.constant;
- case ArrLen:
- return (type == that.type) && (arrLen == that.arrLen);
- case Null:
- return (type == that.type);
- default:
- assert(!"Unknown LC_Ident type");
- unreached();
+ case Const:
+ case Var:
+ return (type == that.type) && constant == that.constant;
+ case ArrLen:
+ return (type == that.type) && (arrLen == that.arrLen);
+ case Null:
+ return (type == that.type);
+ default:
+ assert(!"Unknown LC_Ident type");
+ unreached();
}
}
@@ -352,29 +354,37 @@ struct LC_Ident
{
switch (type)
{
- case Const:
- printf("%I64d", constant);
- break;
- case Var:
- printf("V%02d", constant);
- break;
- case ArrLen:
- arrLen.Print();
- break;
- case Null:
- printf("null");
- break;
- default:
- assert(false);
- break;
+ case Const:
+ printf("%I64d", constant);
+ break;
+ case Var:
+ printf("V%02d", constant);
+ break;
+ case ArrLen:
+ arrLen.Print();
+ break;
+ case Null:
+ printf("null");
+ break;
+ default:
+ assert(false);
+ break;
}
}
#endif
- LC_Ident() : type(Invalid) {}
- LC_Ident(INT64 constant, IdentType type) : constant(constant), type(type) {}
- explicit LC_Ident(IdentType type) : type(type) {}
- explicit LC_Ident(const LC_Array& arrLen) : arrLen(arrLen), type(ArrLen) {}
+ LC_Ident() : type(Invalid)
+ {
+ }
+ LC_Ident(INT64 constant, IdentType type) : constant(constant), type(type)
+ {
+ }
+ explicit LC_Ident(IdentType type) : type(type)
+ {
+ }
+ explicit LC_Ident(const LC_Array& arrLen) : arrLen(arrLen), type(ArrLen)
+ {
+ }
// Convert this symbolic representation into a tree node.
GenTreePtr ToGenTree(Compiler* comp);
@@ -394,7 +404,7 @@ struct LC_Expr
};
LC_Ident ident;
- INT64 constant;
+ INT64 constant;
ExprType type;
// Equality operator
@@ -434,9 +444,15 @@ struct LC_Expr
}
#endif
- LC_Expr() : type(Invalid) {}
- explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident) {}
- LC_Expr(const LC_Ident& ident, INT64 constant) : ident(ident), constant(constant), type(IdentPlusConst) {}
+ LC_Expr() : type(Invalid)
+ {
+ }
+ explicit LC_Expr(const LC_Ident& ident) : ident(ident), type(Ident)
+ {
+ }
+ LC_Expr(const LC_Ident& ident, INT64 constant) : ident(ident), constant(constant), type(IdentPlusConst)
+ {
+ }
// Convert LC_Expr into a tree node.
GenTreePtr ToGenTree(Compiler* comp);
@@ -449,8 +465,8 @@ struct LC_Expr
*/
struct LC_Condition
{
- LC_Expr op1;
- LC_Expr op2;
+ LC_Expr op1;
+ LC_Expr op2;
genTreeOps oper;
#ifdef DEBUG
@@ -470,8 +486,12 @@ struct LC_Condition
// Check if two conditions can be combined to yield one condition.
bool Combines(const LC_Condition& cond, LC_Condition* newCond);
- LC_Condition() {}
- LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2) : op1(op1), op2(op2), oper(oper) {}
+ LC_Condition()
+ {
+ }
+ LC_Condition(genTreeOps oper, const LC_Expr& op1, const LC_Expr& op2) : op1(op1), op2(op2), oper(oper)
+ {
+ }
// Convert this conditional operation into a GenTree.
GenTreePtr ToGenTree(Compiler* comp);
@@ -496,16 +516,14 @@ struct LC_Condition
*/
struct LC_Deref
{
- const LC_Array array;
+ const LC_Array array;
ExpandArrayStack<LC_Deref*>* children;
unsigned level;
- LC_Deref(const LC_Array& array, unsigned level)
- : array(array)
- , children(nullptr)
- , level(level)
- { }
+ LC_Deref(const LC_Array& array, unsigned level) : array(array), children(nullptr), level(level)
+ {
+ }
LC_Deref* Find(unsigned lcl);
@@ -525,11 +543,14 @@ struct LC_Deref
{
for (unsigned i = 0; i < children->Size(); ++i)
{
- if (i > 0) { printf(","); }
+ if (i > 0)
+ {
+ printf(",");
+ }
printf("\n");
#ifdef _MSC_VER
(*children)[i]->Print(indent + 1);
-#else // _MSC_VER
+#else // _MSC_VER
(*((ExpandArray<LC_Deref*>*)children))[i]->Print(indent + 1);
#endif // _MSC_VER
}
@@ -552,28 +573,31 @@ struct LC_Deref
* LC_Ident : Constant | Var | LC_Array
* LC_Array : .
* genTreeOps : GT_GE | GT_LE | GT_GT | GT_LT
- *
+ *
*/
struct LoopCloneContext
{
- IAllocator* alloc; // The allocator
- ExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x optimization-opportunities)
- ExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for each loop. (loop x cloning-conditions)
- ExpandArrayStack<LC_Array>** derefs; // The array of dereference conditions found in each loop. (loop x deref-conditions)
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>** blockConditions; // The array of block levels of conditions for each loop. (loop x level x conditions)
-
- LoopCloneContext(unsigned loopCount, IAllocator* alloc)
- : alloc(alloc)
- {
- optInfo = new (alloc) ExpandArrayStack<LcOptInfo*>*[loopCount];
- conditions = new (alloc) ExpandArrayStack<LC_Condition>*[loopCount];
- derefs = new (alloc) ExpandArrayStack<LC_Array>*[loopCount];
+ IAllocator* alloc; // The allocator
+ ExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x
+ // optimization-opportunities)
+ ExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for each
+ // loop. (loop x cloning-conditions)
+ ExpandArrayStack<LC_Array>** derefs; // The array of dereference conditions found in each loop. (loop x
+ // deref-conditions)
+ ExpandArrayStack<ExpandArrayStack<LC_Condition>*>** blockConditions; // The array of block levels of conditions for
+ // each loop. (loop x level x conditions)
+
+ LoopCloneContext(unsigned loopCount, IAllocator* alloc) : alloc(alloc)
+ {
+ optInfo = new (alloc) ExpandArrayStack<LcOptInfo*>*[loopCount];
+ conditions = new (alloc) ExpandArrayStack<LC_Condition>*[loopCount];
+ derefs = new (alloc) ExpandArrayStack<LC_Array>*[loopCount];
blockConditions = new (alloc) ExpandArrayStack<ExpandArrayStack<LC_Condition>*>*[loopCount];
for (unsigned i = 0; i < loopCount; ++i)
{
- optInfo[i] = nullptr;
- conditions[i] = nullptr;
- derefs[i] = nullptr;
+ optInfo[i] = nullptr;
+ conditions[i] = nullptr;
+ derefs[i] = nullptr;
blockConditions[i] = nullptr;
}
}
@@ -634,11 +658,10 @@ private:
public:
// Optimize conditions to remove redundant conditions.
void OptimizeConditions(unsigned loopNum DEBUGARG(bool verbose));
-
+
void OptimizeBlockConditions(unsigned loopNum DEBUGARG(bool verbose));
#ifdef DEBUG
void PrintConditions(unsigned loopNum);
#endif
};
-
diff --git a/src/jit/loopcloningopts.h b/src/jit/loopcloningopts.h
index 29e5433798..9048a41a14 100644
--- a/src/jit/loopcloningopts.h
+++ b/src/jit/loopcloningopts.h
@@ -6,7 +6,7 @@
/*****************************************************************************/
#ifndef LC_OPT
-#error Define LC_OPT before including this file.
+#error Define LC_OPT before including this file.
#endif
// Types of Loop Cloning based optimizations.
diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
index b5129ca9f0..c4ae0c842e 100644
--- a/src/jit/lower.cpp
+++ b/src/jit/lower.cpp
@@ -33,7 +33,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// MakeSrcContained: Make "tree" a contained node
//
// Arguments:
-// 'parentNode' is a non-leaf node that can contain its 'childNode'
+// 'parentNode' is a non-leaf node that can contain its 'childNode'
// 'childNode' is an op that will now be contained by its parent.
//
// Notes:
@@ -52,13 +52,13 @@ void Lowering::MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode)
//------------------------------------------------------------------------
// CheckImmedAndMakeContained: Check and make 'childNode' contained
// Arguments:
-// 'parentNode' is any non-leaf node
+// 'parentNode' is any non-leaf node
// 'childNode' is an child op of 'parentNode'
// Return value:
// returns true if we are able to make childNode contained immediate
//
// Notes:
-// Checks if the 'childNode' is a containable immediate
+// Checks if the 'childNode' is a containable immediate
// and then makes it contained
//
bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode)
@@ -94,15 +94,14 @@ bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
assert(childNode->isMemoryOp());
// Check conflicts against nodes between 'childNode' and 'parentNode'
- GenTree* node;
+ GenTree* node;
unsigned int childFlags = (childNode->gtFlags & GTF_ALL_EFFECT);
- for (node = childNode->gtNext;
- (node != parentNode) && (node != nullptr);
- node = node->gtNext)
+ for (node = childNode->gtNext; (node != parentNode) && (node != nullptr); node = node->gtNext)
{
if ((childFlags != 0) && node->IsCall())
{
- bool isPureHelper = (node->gtCall.gtCallType == CT_HELPER) && comp->s_helperCallProperties.IsPure(comp->eeGetHelperNum(node->gtCall.gtCallMethHnd));
+ bool isPureHelper = (node->gtCall.gtCallType == CT_HELPER) &&
+ comp->s_helperCallProperties.IsPure(comp->eeGetHelperNum(node->gtCall.gtCallMethHnd));
if (!isPureHelper && ((node->gtFlags & childFlags & GTF_ALL_EFFECT) != 0))
{
return false;
@@ -123,7 +122,7 @@ bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
//------------------------------------------------------------------------
-//static
+// static
Compiler::fgWalkResult Lowering::LowerNodeHelper(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
Lowering* lower = (Lowering*)data->pCallbackData;
@@ -131,16 +130,13 @@ Compiler::fgWalkResult Lowering::LowerNodeHelper(GenTreePtr* pTree, Compiler::fg
return Compiler::WALK_CONTINUE;
}
-
/** Creates an assignment of an existing tree to a new temporary local variable
* and the specified reference count for the new variable.
*/
-GenTreePtr Lowering::CreateLocalTempAsg(GenTreePtr rhs,
- unsigned refCount,
- GenTreePtr* ppLclVar) //out legacy arg
+GenTreePtr Lowering::CreateLocalTempAsg(GenTreePtr rhs, unsigned refCount, GenTreePtr* ppLclVar) // out legacy arg
{
- unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
- comp->lvaSortAgain = true;
+ unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
+ comp->lvaSortAgain = true;
comp->lvaTable[lclNum].lvType = rhs->TypeGet();
// Make sure we don't lose precision when downgrading to short
@@ -148,8 +144,9 @@ GenTreePtr Lowering::CreateLocalTempAsg(GenTreePtr rhs,
comp->lvaTable[lclNum].lvRefCnt = (short)(refCount);
JITDUMP("Lowering has requested a new temporary local variable: V%02u with refCount %u \n", lclNum, refCount);
- GenTreeLclVar* store = new(comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, rhs->TypeGet(), lclNum, BAD_IL_OFFSET);
- store->gtOp1 = rhs;
+ GenTreeLclVar* store =
+ new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, rhs->TypeGet(), lclNum, BAD_IL_OFFSET);
+ store->gtOp1 = rhs;
store->gtFlags = (rhs->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF;
return store;
@@ -173,37 +170,38 @@ GenTreePtr Lowering::CreateLocalTempAsg(GenTreePtr rhs,
// The newly created statement is usually an embedded statement but it can also be a top-level
// statement if the tree to be replaced extends to the begining of the current statement. If
// a top-level statement is created any embedded statements contained in the tree move to the
-// the new top-level statement, before the current statement. Such embedded statements need to
+// the new top-level statement, before the current statement. Such embedded statements need to
// be lowered here because the normal lowering code path won't reach them anymore.
//
-// TODO-Cleanup:
-// Some uses of fgInsertEmbeddedFormTemp in lowering could be replaced with this to avoid
+// TODO-Cleanup:
+// Some uses of fgInsertEmbeddedFormTemp in lowering could be replaced with this to avoid
// duplication, see LowerArrElem for example.
GenTreeStmt* Lowering::CreateTemporary(GenTree** ppTree)
{
GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(ppTree);
- // The tree is assumed to be already lowered so the newly created statement
+ // The tree is assumed to be already lowered so the newly created statement
// should not be lowered again.
newStmt->gtFlags |= GTF_STMT_SKIP_LOWER;
assert(newStmt->gtStmtExpr->OperIsLocalStore());
- // If the newly created statement is top-level then we need to manually lower its embedded
+ // If the newly created statement is top-level then we need to manually lower its embedded
// statements, the tree is lowered but some of its embedded statements are yet to be lowered.
if (newStmt->gtStmtIsTopLevel())
{
GenTree* curStmt = comp->compCurStmt;
- for (GenTree* nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded();
- nextEmbeddedStmt != nullptr;
- nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
+ for (GenTree* nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded(); nextEmbeddedStmt != nullptr;
+ nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
{
// A previous call to CreateTemporary could have created embedded statements
// from the tree and those are already lowered.
if ((nextEmbeddedStmt->gtFlags & GTF_STMT_SKIP_LOWER) != 0)
+ {
continue;
+ }
#ifdef DEBUG
if (comp->verbose)
@@ -216,7 +214,7 @@ GenTreeStmt* Lowering::CreateTemporary(GenTree** ppTree)
nextEmbeddedStmt->gtFlags |= GTF_STMT_SKIP_LOWER;
// Lowering can remove the statement and set compCurStmt to another suitable statement.
- // Currently only switch lowering does this and since embedded statements can't contain
+ // Currently only switch lowering does this and since embedded statements can't contain
// a GT_SWITCH this case should never be hit here.
assert(comp->compCurStmt == nextEmbeddedStmt);
}
@@ -227,7 +225,7 @@ GenTreeStmt* Lowering::CreateTemporary(GenTree** ppTree)
return newStmt;
}
-// This is the main entry point for Lowering.
+// This is the main entry point for Lowering.
// In addition to that, LowerNode is also responsible for initializing the
// treeNodeMap data structure consumed by LSRA. This map is a 1:1 mapping between
@@ -251,46 +249,46 @@ void Lowering::LowerNode(GenTreePtr* ppTree, Compiler::fgWalkData* data)
assert(*ppTree);
switch ((*ppTree)->gtOper)
{
- case GT_IND:
- case GT_STOREIND:
- LowerInd(ppTree);
- break;
+ case GT_IND:
+ case GT_STOREIND:
+ LowerInd(ppTree);
+ break;
- case GT_ADD:
- LowerAdd(ppTree, data);
- break;
-
- case GT_UDIV:
- case GT_UMOD:
- LowerUnsignedDivOrMod(*ppTree);
- break;
+ case GT_ADD:
+ LowerAdd(ppTree, data);
+ break;
- case GT_DIV:
- case GT_MOD:
- LowerSignedDivOrMod(ppTree, data);
- break;
+ case GT_UDIV:
+ case GT_UMOD:
+ LowerUnsignedDivOrMod(*ppTree);
+ break;
- case GT_SWITCH:
- LowerSwitch(ppTree);
- break;
+ case GT_DIV:
+ case GT_MOD:
+ LowerSignedDivOrMod(ppTree, data);
+ break;
- case GT_CALL:
- LowerCall(*ppTree);
- break;
+ case GT_SWITCH:
+ LowerSwitch(ppTree);
+ break;
- case GT_JMP:
- LowerJmpMethod(*ppTree);
- break;
+ case GT_CALL:
+ LowerCall(*ppTree);
+ break;
- case GT_RETURN:
- LowerRet(*ppTree);
- break;
+ case GT_JMP:
+ LowerJmpMethod(*ppTree);
+ break;
- case GT_CAST:
- LowerCast(ppTree);
- break;
+ case GT_RETURN:
+ LowerRet(*ppTree);
+ break;
- case GT_ARR_ELEM:
+ case GT_CAST:
+ LowerCast(ppTree);
+ break;
+
+ case GT_ARR_ELEM:
{
GenTree* oldTree = *ppTree;
LowerArrElem(ppTree, data);
@@ -298,57 +296,57 @@ void Lowering::LowerNode(GenTreePtr* ppTree, Compiler::fgWalkData* data)
}
break;
- case GT_ROL:
- case GT_ROR:
- LowerRotate(*ppTree);
- break;
+ case GT_ROL:
+ case GT_ROR:
+ LowerRotate(*ppTree);
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- if ((*ppTree)->TypeGet() == TYP_SIMD12)
- {
- // GT_SIMD node requiring to produce TYP_SIMD12 in fact
- // produces a TYP_SIMD16 result
- (*ppTree)->gtType = TYP_SIMD16;
- }
- break;
+ case GT_SIMD:
+ if ((*ppTree)->TypeGet() == TYP_SIMD12)
+ {
+ // GT_SIMD node requiring to produce TYP_SIMD12 in fact
+ // produces a TYP_SIMD16 result
+ (*ppTree)->gtType = TYP_SIMD16;
+ }
+ break;
- case GT_LCL_VAR:
- case GT_STORE_LCL_VAR:
- if ((*ppTree)->TypeGet() == TYP_SIMD12)
- {
+ case GT_LCL_VAR:
+ case GT_STORE_LCL_VAR:
+ if ((*ppTree)->TypeGet() == TYP_SIMD12)
+ {
#ifdef _TARGET_64BIT_
- // Assumption 1:
- // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
- // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
- // reading and writing purposes.
- //
- // Assumption 2:
- // RyuJit backend is making another implicit assumption that Vector3 type args when passed in
- // registers or on stack, the upper most 4-bytes will be zero.
- //
- // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
- // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
- // invalid.
- //
- // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
- // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
- // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
- // there is no need to clear upper 4-bytes of Vector3 type args.
- //
- // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
- // Vector3 return values are returned two return registers and Caller assembles them into a
- // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
- // type args in prolog and Vector3 type return value of a call
- (*ppTree)->gtType = TYP_SIMD16;
+ // Assumption 1:
+ // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
+ // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
+ // reading and writing purposes.
+ //
+ // Assumption 2:
+ // RyuJit backend is making another implicit assumption that Vector3 type args when passed in
+ // registers or on stack, the upper most 4-bytes will be zero.
+ //
+ // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
+ // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
+ // invalid.
+ //
+ // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
+ // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
+ // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
+ // there is no need to clear upper 4-bytes of Vector3 type args.
+ //
+ // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
+ // Vector3 return values are returned two return registers and Caller assembles them into a
+ // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
+ // type args in prolog and Vector3 type return value of a call
+ (*ppTree)->gtType = TYP_SIMD16;
#else
- NYI("Lowering of TYP_SIMD12 locals");
+ NYI("Lowering of TYP_SIMD12 locals");
#endif // _TARGET_64BIT_
- }
-#endif //FEATURE_SIMD
+ }
+#endif // FEATURE_SIMD
- default:
- return;
+ default:
+ return;
}
}
@@ -435,8 +433,8 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// jumpCnt is the number of elements in the jump table array.
// jumpTab is the actual pointer to the jump table array.
// targetCnt is the number of unique targets in the jump table array.
- jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
- jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
+ jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
+ jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
targetCnt = originalSwitchBB->NumSucc(comp);
JITDUMP("Lowering switch BB%02u, %d cases\n", originalSwitchBB->bbNum, jumpCnt);
@@ -461,15 +459,15 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// Remove extra predecessor links if there was more than one case.
for (unsigned i = 1; i < jumpCnt; ++i)
{
- (void) comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
+ (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
}
- // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
+ // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
// the result of the child subtree to a temp.
GenTree* store = CreateLocalTempAsg(tree->gtOp.gtOp1, 1);
tree->InsertAfterSelf(store, comp->compCurStmt->AsStmt());
Compiler::fgSnipNode(comp->compCurStmt->AsStmt(), tree);
*pTree = store;
-
+
return;
}
@@ -481,13 +479,13 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
GenTreeStmt* asgStmt = comp->fgInsertEmbeddedFormTemp(&(tree->gtOp.gtOp1));
// GT_SWITCH(indexExpression) is now two statements:
- // 1. a statement containing 'asg' (for temp = indexExpression)
+ // 1. a statement containing 'asg' (for temp = indexExpression)
// 2. and a statement with GT_SWITCH(temp)
- // The return value of fgInsertEmbeddedFormTemp is stmt 1
+ // The return value of fgInsertEmbeddedFormTemp is stmt 1
// The 'asg' can either be a GT_ASG or a GT_STORE_LCL_VAR
// 'tree' is still a GT_SWITCH but tree->gtOp.gtOp1 is modified to be 'temp'
-
+
// The asgStmt needs to pickup the IL offsets from the current statement
//
asgStmt->gtStmtILoffsx = comp->compCurStmt->gtStmt.gtStmtILoffsx;
@@ -498,11 +496,11 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
assert(tree->gtOper == GT_SWITCH);
GenTreePtr temp = tree->gtOp.gtOp1;
assert(temp->gtOper == GT_LCL_VAR);
- unsigned tempLclNum = temp->gtLclVarCommon.gtLclNum;
- LclVarDsc * tempVarDsc = comp->lvaTable + tempLclNum;
- var_types tempLclType = tempVarDsc->TypeGet();
+ unsigned tempLclNum = temp->gtLclVarCommon.gtLclNum;
+ LclVarDsc* tempVarDsc = comp->lvaTable + tempLclNum;
+ var_types tempLclType = tempVarDsc->TypeGet();
- BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
+ BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
BasicBlock* followingBB = originalSwitchBB->bbNext;
/* Is the number of cases right for a test and jump switch? */
@@ -513,7 +511,9 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// This means really just a single cmp/jcc (aka a simple if/else)
if (fFirstCaseFollows || fDefaultFollows)
+ {
minSwitchTabJumpCnt++;
+ }
#if defined(_TARGET_ARM_)
// On ARM for small switch tables we will
@@ -526,23 +526,21 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// the default case. As stated above, this conditional is being shared between
// both GT_SWITCH lowering code paths.
// This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; }
- GenTreePtr gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT,
- comp->gtNewLclvNode(tempLclNum, tempLclType),
+ GenTreePtr gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
comp->gtNewIconNode(jumpCnt - 2, TYP_INT));
//
- // Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
+ // Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
// is now less than zero 0 (that would also hit the default case).
gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED;
/* Increment the lvRefCnt and lvRefCntWtd for temp */
tempVarDsc->incRefCnts(originalSwitchBB->getBBWeight(comp), comp);
- GenTreePtr gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE,
- TYP_VOID,
- gtDefaultCaseCond);
- gtDefaultCaseJump->gtFlags = tree->gtFlags;
+ GenTreePtr gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond);
+ gtDefaultCaseJump->gtFlags = tree->gtFlags;
- GenTreePtr condStmt = comp->fgNewStmtFromTree(gtDefaultCaseJump, originalSwitchBB, comp->compCurStmt->gtStmt.gtStmtILoffsx);
+ GenTreePtr condStmt =
+ comp->fgNewStmtFromTree(gtDefaultCaseJump, originalSwitchBB, comp->compCurStmt->gtStmt.gtStmtILoffsx);
#ifdef DEBUG
condStmt->gtStmt.gtStmtLastILoffs = comp->compCurStmt->gtStmt.gtStmtLastILoffs;
@@ -598,7 +596,7 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
for (unsigned i = 1; i < jumpCnt - 1; ++i)
{
assert(jumpTab[i] == uniqueSucc);
- (void) comp->fgRemoveRefPred(uniqueSucc, afterDefCondBlock);
+ (void)comp->fgRemoveRefPred(uniqueSucc, afterDefCondBlock);
}
if (afterDefCondBlock->bbNext == uniqueSucc)
{
@@ -629,8 +627,8 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// We'll use 'afterDefCondBlock' for the first conditional. After that, we'll add new
// blocks. If we end up not needing it at all (say, if all the non-default cases just fall through),
// we'll delete it.
- bool fUsedAfterDefCondBlock = false;
- BasicBlock* currentBlock = afterDefCondBlock;
+ bool fUsedAfterDefCondBlock = false;
+ BasicBlock* currentBlock = afterDefCondBlock;
// Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through.
// If no case target follows, the last one doesn't need to be a compare/branch: it can be an
@@ -694,14 +692,14 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
// |____GT_EQ
// |____ (switchIndex) (The temp variable)
// |____ (ICon) (The actual case constant)
- GenTreePtr gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT,
- comp->gtNewLclvNode(tempLclNum, tempLclType),
- comp->gtNewIconNode(i, TYP_INT));
+ GenTreePtr gtCaseCond =
+ comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
+ comp->gtNewIconNode(i, TYP_INT));
/* Increment the lvRefCnt and lvRefCntWtd for temp */
tempVarDsc->incRefCnts(originalSwitchBB->getBBWeight(comp), comp);
GenTreePtr gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond);
- GenTreePtr gtCaseStmt = comp->fgNewStmtFromTree(gtCaseBranch, currentBlock);
+ GenTreePtr gtCaseStmt = comp->fgNewStmtFromTree(gtCaseBranch, currentBlock);
comp->fgInsertStmtAtEnd(currentBlock, gtCaseStmt);
}
}
@@ -742,10 +740,9 @@ void Lowering::LowerSwitch(GenTreePtr* pTree)
JITDUMP("Lowering switch BB%02u: using jump table expansion\n", originalSwitchBB->bbNum);
- GenTreePtr gtTableSwitch = comp->gtNewOperNode(GT_SWITCH_TABLE,
- TYP_VOID,
- comp->gtNewLclvNode(tempLclNum, tempLclType),
- comp->gtNewJmpTableNode());
+ GenTreePtr gtTableSwitch =
+ comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, comp->gtNewLclvNode(tempLclNum, tempLclType),
+ comp->gtNewJmpTableNode());
/* Increment the lvRefCnt and lvRefCntWtd for temp */
tempVarDsc->incRefCnts(originalSwitchBB->getBBWeight(comp), comp);
@@ -787,7 +784,7 @@ void Lowering::SpliceInUnary(GenTreePtr parent, GenTreePtr* ppChild, GenTreePtr
GenTreePtr oldChild = *ppChild;
// Replace tree in the parent node
- *ppChild = newNode;
+ *ppChild = newNode;
newNode->gtOp.gtOp1 = oldChild;
oldChild->InsertAfterSelf(newNode);
@@ -814,8 +811,8 @@ void Lowering::SpliceInUnary(GenTreePtr parent, GenTreePtr* ppChild, GenTreePtr
// this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_LIST of two GT_PUTARG_REGs
// for two eightbyte structs.
//
-// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
-// (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined) this method also sets the GP pointers count and the pointers
+// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
+// (i.e. FEATURE_UNIX_AMD64_STRUCT_PASSING defined) this method also sets the GP pointers count and the pointers
// layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
// (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
//
@@ -825,8 +822,8 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
assert(arg != nullptr);
assert(info != nullptr);
- GenTreePtr putArg = nullptr;
- bool updateArgTable = true;
+ GenTreePtr putArg = nullptr;
+ bool updateArgTable = true;
#if !defined(_TARGET_64BIT_)
if (varTypeIsLong(type))
@@ -834,11 +831,11 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
// For TYP_LONG, we leave the GT_LONG as the arg, and put the putArg below it.
// Therefore, we don't update the arg table entry.
updateArgTable = false;
- type = TYP_INT;
+ type = TYP_INT;
}
#endif // !defined(_TARGET_64BIT_)
- bool isOnStack = true;
+ bool isOnStack = true;
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
if (varTypeIsStruct(type))
{
@@ -848,8 +845,8 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
{
isOnStack = info->regNum == REG_STK;
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
- isOnStack = info->regNum == REG_STK;
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+ isOnStack = info->regNum == REG_STK;
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
if (!isOnStack)
@@ -860,7 +857,7 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
{
type = TYP_LONG;
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
if (info->isStruct)
@@ -953,7 +950,8 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
// Create a new GT_PUTARG_REG node with op1 the original GT_LCL_FLD.
GenTreePtr newOper = comp->gtNewOperNode(
GT_PUTARG_REG,
- comp->GetTypeFromClassificationAndSizes(info->structDesc.eightByteClassifications[ctr], info->structDesc.eightByteSizes[ctr]),
+ comp->GetTypeFromClassificationAndSizes(info->structDesc.eightByteClassifications[ctr],
+ info->structDesc.eightByteSizes[ctr]),
argListPtr->gtOp.gtOp1);
// CopyCosts
@@ -969,13 +967,13 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
}
else
{
- assert(false && "Illegal count of eightbytes for the CLR type system"); // No more than 2 eightbytes for the CLR.
-
+ assert(false &&
+ "Illegal count of eightbytes for the CLR type system"); // No more than 2 eightbytes for the CLR.
}
}
else
#else // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#if FEATURE_MULTIREG_ARGS
+#if FEATURE_MULTIREG_ARGS
if ((info->numRegs > 1) && (arg->OperGet() == GT_LIST))
{
assert(arg->OperGet() == GT_LIST);
@@ -986,7 +984,7 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
GenTreePtr curOp = argListPtr->gtOp.gtOp1;
var_types curTyp = curOp->TypeGet();
- // Create a new GT_PUTARG_REG node with op1
+ // Create a new GT_PUTARG_REG node with op1
GenTreePtr newOper = comp->gtNewOperNode(GT_PUTARG_REG, curTyp, curOp);
// CopyCosts
@@ -1001,7 +999,7 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
return arg;
}
else
-#endif // FEATURE_MULTIREG_ARGS
+#endif // FEATURE_MULTIREG_ARGS
#endif // not defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
{
putArg = comp->gtNewOperNode(GT_PUTARG_REG, type, arg);
@@ -1010,42 +1008,37 @@ GenTreePtr Lowering::NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryP
else
{
// Mark this one as tail call arg if it is a fast tail call.
- // This provides the info to put this argument in in-coming arg area slot
+ // This provides the info to put this argument in in-coming arg area slot
// instead of in out-going arg area slot.
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(assert(info->isStruct == varTypeIsStruct(type))); // Make sure state is correct
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(assert(info->isStruct == varTypeIsStruct(type))); // Make sure state is
+ // correct
#if FEATURE_FASTTAILCALL
- putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK,
- type,
- arg,
- info->slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->isStruct),
- call->IsFastTailCall()
- DEBUGARG(call));
+ putArg = new (comp, GT_PUTARG_STK)
+ GenTreePutArgStk(GT_PUTARG_STK, type, arg,
+ info->slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->isStruct),
+ call->IsFastTailCall() DEBUGARG(call));
#else
- putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK,
- type,
- arg,
- info->slotNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->numSlots)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->isStruct)
- DEBUGARG(call));
+ putArg = new (comp, GT_PUTARG_STK)
+ GenTreePutArgStk(GT_PUTARG_STK, type, arg,
+ info->slotNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->numSlots)
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(info->isStruct) DEBUGARG(call));
#endif
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// If the ArgTabEntry indicates that this arg is a struct
// get and store the number of slots that are references.
// This is later used in the codegen for PUT_ARG_STK implementation
- // for struct to decide whether and how many single eight-byte copies
+ // for struct to decide whether and how many single eight-byte copies
// to be done (only for reference slots), so gcinfo is emitted.
- // For non-reference slots faster/smaller size instructions are used -
+ // For non-reference slots faster/smaller size instructions are used -
// pair copying using XMM registers or rep mov instructions.
if (info->isStruct)
{
- unsigned numRefs = 0;
- BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots];
+ unsigned numRefs = 0;
+ BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots];
// We use GT_OBJ for non-SIMD struct arguments. However, for
// SIMD arguments the GT_OBJ has already been transformed.
if (arg->gtOper != GT_OBJ)
@@ -1111,20 +1104,16 @@ void Lowering::LowerArg(GenTreeCall* call, GenTreePtr* ppArg)
// assignments/stores at this level are not really placing an arg
// they are setting up temporary locals that will later be placed into
// outgoing regs or stack
- if (
- !arg->OperIsAssignment() &&
- !arg->OperIsStore() &&
- !arg->IsArgPlaceHolderNode() &&
- !arg->IsNothingNode() &&
+ if (!arg->OperIsAssignment() && !arg->OperIsStore() && !arg->IsArgPlaceHolderNode() && !arg->IsNothingNode() &&
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- !arg->OperIsPutArgStk() &&
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ !arg->OperIsPutArgStk() &&
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
!arg->OperIsCopyBlkOp()) // these are de facto placeholders (apparently)
{
fgArgTabEntryPtr info = comp->gtArgEntryByNode(call, arg);
assert(info->node == arg);
- bool isReg = (info->regNum != REG_STK);
- var_types type = arg->TypeGet();
+ bool isReg = (info->regNum != REG_STK);
+ var_types type = arg->TypeGet();
if (varTypeIsSmall(type))
{
@@ -1152,11 +1141,9 @@ void Lowering::LowerArg(GenTreeCall* call, GenTreePtr* ppArg)
GenTreePtr argLo = arg->gtGetOp1();
GenTreePtr argHi = arg->gtGetOp2();
- NYI_IF((argHi->OperGet() == GT_ADD_HI) ||
- (argHi->OperGet() == GT_SUB_HI) ||
- (argHi->OperGet() == GT_NEG),
+ NYI_IF((argHi->OperGet() == GT_ADD_HI) || (argHi->OperGet() == GT_SUB_HI) || (argHi->OperGet() == GT_NEG),
"Hi and Lo cannot be reordered");
-
+
GenTreePtr putArgLo = NewPutArg(call, argLo, info, type);
GenTreePtr putArgHi = NewPutArg(call, argHi, info, type);
@@ -1167,7 +1154,7 @@ void Lowering::LowerArg(GenTreeCall* call, GenTreePtr* ppArg)
GenTreePtr argLoFirst = comp->fgGetFirstNode(argLo);
GenTreePtr argHiFirst = comp->fgGetFirstNode(argHi);
- GenTreePtr argLoPrev = argLoFirst->gtPrev;
+ GenTreePtr argLoPrev = argLoFirst->gtPrev;
noway_assert(argHiFirst->gtPrev == argLo);
noway_assert(arg->gtPrev == argHi);
@@ -1181,14 +1168,14 @@ void Lowering::LowerArg(GenTreeCall* call, GenTreePtr* ppArg)
assert(comp->compCurStmt->gtStmt.gtStmtList == argLoFirst);
comp->compCurStmt->gtStmt.gtStmtList = argHiFirst;
}
- argHi->gtNext = putArgHi;
- putArgHi->gtPrev = argHi;
- putArgHi->gtNext = argLoFirst;
+ argHi->gtNext = putArgHi;
+ putArgHi->gtPrev = argHi;
+ putArgHi->gtNext = argLoFirst;
argLoFirst->gtPrev = putArgHi;
- argLo->gtNext = putArgLo;
- putArgLo->gtPrev = argLo;
- putArgLo->gtNext = arg;
- arg->gtPrev = putArgLo;
+ argLo->gtNext = putArgLo;
+ putArgLo->gtPrev = argLo;
+ putArgLo->gtNext = arg;
+ arg->gtPrev = putArgLo;
assert((arg->gtFlags & GTF_REVERSE_OPS) == 0);
arg->gtFlags |= GTF_REVERSE_OPS;
@@ -1202,15 +1189,15 @@ void Lowering::LowerArg(GenTreeCall* call, GenTreePtr* ppArg)
// Insert a copy to move float value to integer register.
if (call->IsVarargs() && varTypeIsFloating(type))
{
- var_types intType = (type == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
- GenTreePtr intArg = comp->gtNewOperNode(GT_COPY, intType, arg);
+ var_types intType = (type == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
+ GenTreePtr intArg = comp->gtNewOperNode(GT_COPY, intType, arg);
intArg->CopyCosts(arg);
info->node = intArg;
SpliceInUnary(call, ppArg, intArg);
// Update arg/type with new ones.
- arg = intArg;
+ arg = intArg;
type = intType;
}
#endif
@@ -1257,7 +1244,7 @@ void Lowering::LowerArgsForCall(GenTreeCall* call)
// (optionally specifying the register to place it in)
GenTree* Lowering::AddrGen(ssize_t addr, regNumber reg)
{
- //this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
+ // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
result->gtRegNum = reg;
@@ -1274,7 +1261,7 @@ GenTree* Lowering::AddrGen(void* addr, regNumber reg)
// do some common operations on trees before they are inserted as top level statements
GenTreeStmt* Lowering::LowerMorphAndSeqTree(GenTree* tree)
{
- tree = comp->fgMorphTree(tree);
+ tree = comp->fgMorphTree(tree);
GenTreeStmt* stmt = comp->fgNewStmtFromTree(tree);
return stmt;
}
@@ -1288,14 +1275,14 @@ GenTreeStmt* Lowering::LowerMorphAndSeqTree(GenTree* tree)
//
void Lowering::LowerCall(GenTree* node)
{
- GenTreeCall* call = node->AsCall();
+ GenTreeCall* call = node->AsCall();
GenTreeStmt* callStmt = comp->compCurStmt->AsStmt();
assert(comp->fgTreeIsInStmt(call, callStmt));
JITDUMP("lowering call (before):\n");
DISPTREE(call);
JITDUMP("\n");
-
+
LowerArgsForCall(call);
// RyuJIT arm is not set up for lowered call control
@@ -1318,33 +1305,33 @@ void Lowering::LowerCall(GenTree* node)
// Virtual and interface calls
switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
{
- case GTF_CALL_VIRT_STUB:
- result = LowerVirtualStubCall(call);
- break;
+ case GTF_CALL_VIRT_STUB:
+ result = LowerVirtualStubCall(call);
+ break;
- case GTF_CALL_VIRT_VTABLE:
- // stub dispatching is off or this is not a virtual call (could be a tailcall)
- result = LowerVirtualVtableCall(call);
- break;
+ case GTF_CALL_VIRT_VTABLE:
+ // stub dispatching is off or this is not a virtual call (could be a tailcall)
+ result = LowerVirtualVtableCall(call);
+ break;
- case GTF_CALL_NONVIRT:
- if (call->IsUnmanaged())
- {
- result = LowerNonvirtPinvokeCall(call);
- }
- else if (call->gtCallType == CT_INDIRECT)
- {
- result = LowerIndirectNonvirtCall(call);
- }
- else
- {
- result = LowerDirectCall(call);
- }
- break;
+ case GTF_CALL_NONVIRT:
+ if (call->IsUnmanaged())
+ {
+ result = LowerNonvirtPinvokeCall(call);
+ }
+ else if (call->gtCallType == CT_INDIRECT)
+ {
+ result = LowerIndirectNonvirtCall(call);
+ }
+ else
+ {
+ result = LowerDirectCall(call);
+ }
+ break;
- default:
- noway_assert(!"strange call type");
- break;
+ default:
+ noway_assert(!"strange call type");
+ break;
}
}
@@ -1361,7 +1348,7 @@ void Lowering::LowerCall(GenTree* node)
JITDUMP("results of lowering call:\n");
DISPTREE(result);
}
-
+
if (call->IsTailCallViaHelper())
{
// Either controlExpr or gtCallAddr must contain real call target.
@@ -1379,20 +1366,20 @@ void Lowering::LowerCall(GenTree* node)
comp->gtSetEvalOrder(result);
comp->fgSetTreeSeq(result, nullptr);
JITDUMP("results of lowering tail call via helper:\n");
- DISPTREE(result);
+ DISPTREE(result);
}
}
else if (call->IsFastTailCall())
{
LowerFastTailCall(call);
}
-
+
if (result)
- {
+ {
GenTree* insertionPoint = call;
if (!call->IsTailCallViaHelper())
- {
- // The controlExpr should go before the gtCallCookie and the gtCallAddr, if they exist
+ {
+ // The controlExpr should go before the gtCallCookie and the gtCallAddr, if they exist
if (call->gtCallType == CT_INDIRECT)
{
if (call->gtCallCookie != nullptr)
@@ -1425,18 +1412,18 @@ void Lowering::LowerCall(GenTree* node)
JITDUMP("\n");
}
-// Though the below described issue gets fixed in intellitrace dll of VS2015 (a.k.a Dev14),
+// Though the below described issue gets fixed in intellitrace dll of VS2015 (a.k.a Dev14),
// we still need this quirk for desktop so that older version of VS (e.g. VS2010/2012)
// continues to work.
// This quirk is excluded from other targets that have no back compat burden.
//
-// Quirk for VS debug-launch scenario to work:
+// Quirk for VS debug-launch scenario to work:
// See if this is a PInvoke call with exactly one param that is the address of a struct local.
// In such a case indicate to frame-layout logic to add 16-bytes of padding
// between save-reg area and locals. This is to protect against the buffer
-// overrun bug in microsoft.intellitrace.11.0.0.dll!ProfilerInterop.InitInterop().
+// overrun bug in microsoft.intellitrace.11.0.0.dll!ProfilerInterop.InitInterop().
//
-// A work-around to this bug is to disable IntelliTrace debugging
+// A work-around to this bug is to disable IntelliTrace debugging
// (VS->Tools->Options->IntelliTrace->Enable IntelliTrace - uncheck this option).
// The reason why this works on Jit64 is that at the point of AV the call stack is
//
@@ -1460,14 +1447,14 @@ void Lowering::LowerCall(GenTree* node)
//
// Due to buffer overrun, rbx doesn't get impacted. Whereas RyuJIT jitted code of
// the same method is pushing regs in the following order
-//
+//
// rbp
// rdi
// rsi
// rbx
// struct local
//
-// Therefore as a fix, we add padding between save-reg area and locals to
+// Therefore as a fix, we add padding between save-reg area and locals to
// make this scenario work against JB.
//
// Note: If this quirk gets broken due to other JIT optimizations, we should consider
@@ -1480,7 +1467,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
// Confine this to IL stub calls which aren't marked as unmanaged.
if (call->IsPInvoke() && !call->IsUnmanaged())
{
- bool paddingNeeded = false;
+ bool paddingNeeded = false;
GenTreePtr firstPutArgReg = nullptr;
for (GenTreeArgList* args = call->gtCallLateArgs; args; args = args->Rest())
{
@@ -1488,7 +1475,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
if (tmp->OperGet() == GT_PUTARG_REG)
{
if (firstPutArgReg == nullptr)
- {
+ {
firstPutArgReg = tmp;
GenTreePtr op1 = firstPutArgReg->gtOp.gtOp1;
@@ -1504,7 +1491,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
// First arg is addr of a struct local.
paddingNeeded = true;
}
- else
+ else
{
// Not a struct local.
assert(paddingNeeded == false);
@@ -1540,7 +1527,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
// We need to insert this after all nested calls, but before all the arguments to this call have been set up.
// To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before
// that. If there are no args, then it should be inserted before the call node.
-//
+//
// For example:
// * stmtExpr void (top level) (IL 0x000...0x010)
// arg0 SETUP | /--* argPlace ref REG NA $c5
@@ -1557,7 +1544,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
// arg0 in rcx | +--* putarg_reg ref REG NA
// control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA
// \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void
-//
+//
// In this case, the GT_PUTARG_REG src is a nested call. We need to put the embedded statement after that call
// (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call.
//
@@ -1566,7 +1553,7 @@ void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
// insertionPoint - if caller has an insertion point; If null
// profiler hook is inserted before args are setup
// but after all arg side effects are computed.
-void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree *insertionPoint)
+void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint)
{
assert(call->IsTailCall());
assert(comp->compIsProfilerHookNeeded());
@@ -1608,24 +1595,24 @@ void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree *insertionPoin
}
assert(insertionPoint != nullptr);
- GenTreeStmt* callStmt = comp->compCurStmt->AsStmt();
- GenTreePtr profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
+ GenTreeStmt* callStmt = comp->compCurStmt->AsStmt();
+ GenTreePtr profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
comp->fgInsertTreeBeforeAsEmbedded(profHookNode, insertionPoint, callStmt, comp->compCurBB);
}
// Lower fast tail call implemented as epilog+jmp.
// Also inserts PInvoke method epilog if required.
-void Lowering::LowerFastTailCall(GenTreeCall *call)
+void Lowering::LowerFastTailCall(GenTreeCall* call)
{
#if FEATURE_FASTTAILCALL
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
- assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
- assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
- assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
- assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
- assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
+ assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
+ assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
+ assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
+ assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
+ assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
// We expect to see a call that meets the following conditions
assert(call->IsFastTailCall());
@@ -1634,14 +1621,14 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// other in mutual recursion. Therefore, this block is reachable through
// a GC-safe point or the whole method is marked as fully interruptible.
//
- // TODO-Cleanup:
+ // TODO-Cleanup:
// optReachWithoutCall() depends on the fact that loop headers blocks
// will have a block number > fgLastBB. These loop headers gets added
// after dominator computation and get skipped by OptReachWithoutCall().
- // The below condition cannot be asserted in lower because fgSimpleLowering()
+ // The below condition cannot be asserted in lower because fgSimpleLowering()
// can add a new basic block for range check failure which becomes
// fgLastBB with block number > loop header block number.
- // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
+ // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
// !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->genInterruptible);
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
@@ -1653,37 +1640,43 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// Args for tail call are setup in incoming arg area. The gc-ness of args of
// caller and callee (which being tail called) may not match. Therefore, everything
- // from arg setup until the epilog need to be non-interuptible by GC. This is
+ // from arg setup until the epilog need to be non-interuptible by GC. This is
// achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node
// of call is setup. Note that once a stack arg is setup, it cannot have nested
// calls subsequently in execution order to setup other args, because the nested
- // call could over-write the stack arg that is setup earlier.
- GenTreePtr firstPutArgStk = nullptr;
- GenTreeArgList* args;
+ // call could over-write the stack arg that is setup earlier.
+ GenTreePtr firstPutArgStk = nullptr;
+ GenTreeArgList* args;
ArrayStack<GenTree*> putargs(comp);
for (args = call->gtCallArgs; args; args = args->Rest())
{
GenTreePtr tmp = args->Current();
if (tmp->OperGet() == GT_PUTARG_STK)
+ {
putargs.Push(tmp);
+ }
}
for (args = call->gtCallLateArgs; args; args = args->Rest())
{
GenTreePtr tmp = args->Current();
if (tmp->OperGet() == GT_PUTARG_STK)
+ {
putargs.Push(tmp);
+ }
}
if (putargs.Height() > 0)
+ {
firstPutArgStk = putargs.Bottom();
+ }
// If we have a putarg_stk node, also count the number of non-standard args the
// call node has. Note that while determining whether a tail call can be fast
// tail called, we don't count non-standard args (passed in R10 or R11) since they
// don't contribute to outgoing arg space. These non-standard args are not
- // accounted in caller's arg count but accounted in callee's arg count after
+ // accounted in caller's arg count but accounted in callee's arg count after
// fgMorphArgs(). Therefore, exclude callee's non-standard args while mapping
// callee's stack arg num to corresponding caller's stack arg num.
unsigned calleeNonStandardArgCount = call->GetNonStandardAddedArgCount(comp);
@@ -1691,9 +1684,9 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// Say Caller(a, b, c, d, e) fast tail calls Callee(e, d, c, b, a)
// i.e. passes its arguments in reverse to Callee. During call site
// setup, after computing argument side effects, stack args are setup
- // first and reg args next. In the above example, both Callers and
+ // first and reg args next. In the above example, both Callers and
// Callee stack args (e and a respectively) share the same stack slot
- // and are alive at the same time. The act of setting up Callee's
+ // and are alive at the same time. The act of setting up Callee's
// stack arg will over-write the stack arg of Caller and if there are
// further uses of Caller stack arg we have to make sure that we move
// it to a temp before over-writing its slot and use temp in place of
@@ -1713,15 +1706,15 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
{
GenTreePtr putArgStkNode = putargs.Bottom(i);
- assert(putArgStkNode->OperGet() == GT_PUTARG_STK);
-
+ assert(putArgStkNode->OperGet() == GT_PUTARG_STK);
+
// Get the caller arg num corresponding to this callee arg.
// Note that these two args share the same stack slot. Therefore,
// if there are further uses of corresponding caller arg, we need
// to move it to a temp and use the temp in this call tree.
//
// Note that Caller is guaranteed to have a param corresponding to
- // this Callee's arg since fast tail call mechanism counts the
+ // this Callee's arg since fast tail call mechanism counts the
// stack slots required for both Caller and Callee for passing params
// and allow fast tail call only if stack slots required by Caller >=
// Callee.
@@ -1730,27 +1723,28 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
unsigned callerArgNum = argTabEntry->argNum - calleeNonStandardArgCount;
noway_assert(callerArgNum < comp->info.compArgsCount);
- unsigned callerArgLclNum = callerArgNum;
- LclVarDsc* callerArgDsc = comp->lvaTable + callerArgLclNum;
+ unsigned callerArgLclNum = callerArgNum;
+ LclVarDsc* callerArgDsc = comp->lvaTable + callerArgLclNum;
if (callerArgDsc->lvPromoted)
{
- callerArgLclNum = callerArgDsc->lvFieldLclStart; // update the callerArgNum to the promoted struct field's lclNum
+ callerArgLclNum =
+ callerArgDsc->lvFieldLclStart; // update the callerArgNum to the promoted struct field's lclNum
callerArgDsc = comp->lvaTable + callerArgLclNum;
}
noway_assert(callerArgDsc->lvIsParam);
// Start searching in execution order list till we encounter call node
- unsigned tmpLclNum = BAD_VAR_NUM;
- var_types tmpType = TYP_UNDEF;
+ unsigned tmpLclNum = BAD_VAR_NUM;
+ var_types tmpType = TYP_UNDEF;
for (GenTreePtr treeNode = putArgStkNode->gtNext; treeNode != call; treeNode = treeNode->gtNext)
- {
+ {
if (treeNode->OperIsLocal() || treeNode->OperIsLocalAddr())
- {
+ {
// This should neither be a GT_REG_VAR nor GT_PHI_ARG.
assert((treeNode->OperGet() != GT_REG_VAR) && (treeNode->OperGet() != GT_PHI_ARG));
- GenTreeLclVarCommon *lcl = treeNode->AsLclVarCommon();
- LclVarDsc* lclVar = &comp->lvaTable[lcl->gtLclNum];
+ GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
+ LclVarDsc* lclVar = &comp->lvaTable[lcl->gtLclNum];
// Fast tail calling criteria permits passing of structs of size 1, 2, 4 and 8 as args.
// It is possible that the callerArgLclNum corresponds to such a struct whose stack slot
@@ -1762,17 +1756,17 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// Create tmp and use it in place of callerArgDsc
if (tmpLclNum == BAD_VAR_NUM)
{
- tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable"));
- comp->lvaSortAgain = true;
- tmpType = genActualType(callerArgDsc->lvaArgType());
- comp->lvaTable[tmpLclNum].lvType = tmpType;
+ tmpLclNum = comp->lvaGrabTemp(
+ true DEBUGARG("Fast tail call lowering is creating a new local variable"));
+ comp->lvaSortAgain = true;
+ tmpType = genActualType(callerArgDsc->lvaArgType());
+ comp->lvaTable[tmpLclNum].lvType = tmpType;
comp->lvaTable[tmpLclNum].lvRefCnt = 1;
}
lcl->SetLclNum(tmpLclNum);
lcl->SetOper(GT_LCL_VAR);
-
- }
+ }
}
}
@@ -1782,23 +1776,24 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
if (tmpLclNum != BAD_VAR_NUM)
{
assert(tmpType != TYP_UNDEF);
- GenTreeLclVar* local = new(comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, tmpType, callerArgLclNum, BAD_IL_OFFSET);
+ GenTreeLclVar* local =
+ new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, tmpType, callerArgLclNum, BAD_IL_OFFSET);
GenTree* assignExpr = comp->gtNewTempAssign(tmpLclNum, local);
comp->fgInsertTreeBeforeAsEmbedded(assignExpr, firstPutArgStk, callStmt, comp->compCurBB);
}
}
// Insert GT_START_NONGC node before the first GT_PUTARG_STK node.
- // Note that if there are no args to be setup on stack, no need to
- // insert GT_START_NONGC node.
+ // Note that if there are no args to be setup on stack, no need to
+ // insert GT_START_NONGC node.
GenTreePtr startNonGCNode = nullptr;
if (firstPutArgStk != nullptr)
- {
- startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
+ {
+ startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
comp->fgInsertTreeBeforeAsEmbedded(startNonGCNode, firstPutArgStk, callStmt, comp->compCurBB);
// Gc-interruptability in the following case:
- // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
+ // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
// bar(a, b, c, d, e) { foo(a, b, d, d, e); }
//
// Since the instruction group starting from the instruction that sets up first
@@ -1824,7 +1819,7 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
InsertProfTailCallHook(call, startNonGCNode);
}
-#else // !FEATURE_FASTTAILCALL
+#else // !FEATURE_FASTTAILCALL
// Platform choose not to implement fast tail call mechanism.
// In such a case we should never be reaching this method as
@@ -1834,7 +1829,6 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
#endif
}
-
//------------------------------------------------------------------------
// LowerTailCallViaHelper: lower a call via the tailcall helper. Morph
// has already inserted tailcall helper special arguments. This function
@@ -1848,7 +1842,8 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// For x86, lower
// tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg)
// as
-// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* callTarget)
+// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
+// callTarget)
// Note that the special arguments are on the stack, whereas the function arguments follow the normal convention.
//
// Also inserts PInvoke method epilog if required.
@@ -1860,22 +1855,22 @@ void Lowering::LowerFastTailCall(GenTreeCall *call)
// Return Value:
// Returns control expression tree for making a call to helper Jit_TailCall.
//
-GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget)
-{
+GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget)
+{
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
- assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
- assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
- assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
- assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
- assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
-
+ assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
+ assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
+ assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
+ assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
+ assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
+
// We expect to see a call that meets the following conditions
assert(call->IsTailCallViaHelper());
assert(callTarget != nullptr);
-
- // The TailCall helper call never returns to the caller and is not GC interruptible.
+
+ // The TailCall helper call never returns to the caller and is not GC interruptible.
// Therefore the block containing the tail call should be a GC safe point to avoid
// GC starvation.
assert(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT);
@@ -1888,7 +1883,7 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
}
// Remove gtCallAddr from execution order if one present.
- GenTreeStmt* callStmt = comp->compCurStmt->AsStmt();
+ GenTreeStmt* callStmt = comp->compCurStmt->AsStmt();
if (call->gtCallType == CT_INDIRECT)
{
assert(call->gtCallAddr != nullptr);
@@ -1899,13 +1894,13 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
#if defined(_TARGET_AMD64_)
- // For AMD64, first argument is CopyRoutine and second argument is a place holder node.
+// For AMD64, first argument is CopyRoutine and second argument is a place holder node.
#ifdef DEBUG
argEntry = comp->gtArgEntryByArgNum(call, 0);
assert(argEntry != nullptr);
assert(argEntry->node->gtOper == GT_PUTARG_REG);
- GenTree *firstArg = argEntry->node->gtOp.gtOp1;
+ GenTree* firstArg = argEntry->node->gtOp.gtOp1;
assert(firstArg->gtOper == GT_CNS_INT);
#endif
@@ -1913,9 +1908,9 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
argEntry = comp->gtArgEntryByArgNum(call, 1);
assert(argEntry != nullptr);
assert(argEntry->node->gtOper == GT_PUTARG_REG);
- GenTree *secondArg = argEntry->node->gtOp.gtOp1;
+ GenTree* secondArg = argEntry->node->gtOp.gtOp1;
- comp->fgInsertTreeInListAfter(callTarget, secondArg, callStmt);
+ comp->fgInsertTreeInListAfter(callTarget, secondArg, callStmt);
comp->fgDeleteTreeFromList(callStmt, secondArg);
argEntry->node->gtOp.gtOp1 = callTarget;
@@ -1937,7 +1932,7 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
assert(argEntry->node->gtOper == GT_PUTARG_STK);
GenTree* arg0 = argEntry->node->gtOp.gtOp1;
- comp->fgInsertTreeInListAfter(callTarget, arg0, callStmt);
+ comp->fgInsertTreeInListAfter(callTarget, arg0, callStmt);
comp->fgDeleteTreeFromList(callStmt, arg0);
argEntry->node->gtOp.gtOp1 = callTarget;
@@ -1948,9 +1943,8 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
GenTree* arg1 = argEntry->node->gtOp.gtOp1;
assert(arg1->gtOper == GT_CNS_INT);
- ssize_t tailCallHelperFlags =
- 1 | // always restore EDI,ESI,EBX
- (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
+ ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
+ (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
arg1->gtIntCon.gtIconVal = tailCallHelperFlags;
// arg 2 == numberOfNewStackArgsWords
@@ -1976,13 +1970,13 @@ GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree *callTarget
#endif // _TARGET_*
// Transform this call node into a call to Jit tail call helper.
- call->gtCallType = CT_HELPER;
+ call->gtCallType = CT_HELPER;
call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL);
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
// Lower this as if it were a pure helper call.
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER);
- GenTree *result = LowerDirectCall(call);
+ GenTree* result = LowerDirectCall(call);
// Now add back tail call flags for identifying this node as tail call dispatched via helper.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER;
@@ -2033,7 +2027,7 @@ void Lowering::LowerRet(GenTree* ret)
GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
-
+
// Don't support tail calling helper methods.
// But we might encounter tail calls dispatched via JIT helper appear as a tail call to helper.
noway_assert(!call->IsTailCall() || call->IsTailCallViaHelper() || call->gtCallType == CT_USER_FUNC);
@@ -2042,19 +2036,19 @@ GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
// call is known at JIT time. If not it is either an indirect call
// or the address must be accessed via an single/double indirection.
- void* addr;
- InfoAccessType accessType;
+ void* addr;
+ InfoAccessType accessType;
CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd);
#ifdef FEATURE_READYTORUN_COMPILER
if (call->gtEntryPoint.addr != nullptr)
{
accessType = call->gtEntryPoint.accessType;
- addr = call->gtEntryPoint.addr;
+ addr = call->gtEntryPoint.addr;
}
else
#endif
- if (call->gtCallType == CT_HELPER)
+ if (call->gtCallType == CT_HELPER)
{
noway_assert(helperNum != CORINFO_HELP_UNDEF);
@@ -2071,106 +2065,110 @@ GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
else
{
accessType = IAT_PVALUE;
- addr = pAddr;
+ addr = pAddr;
}
}
else
{
noway_assert(helperNum == CORINFO_HELP_UNDEF);
- CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
+ CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
if (call->IsSameThis())
+ {
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
+ }
if (!call->NeedsNullCheck())
+ {
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
+ }
CORINFO_CONST_LOOKUP addrInfo;
comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags);
accessType = addrInfo.accessType;
- addr = addrInfo.addr;
+ addr = addrInfo.addr;
}
GenTree* result = nullptr;
switch (accessType)
{
- case IAT_VALUE:
- // Non-virtual direct call to known address
- if (!IsCallTargetInRange(addr) || call->IsTailCall())
- {
- result = AddrGen(addr);
- }
- else
- {
- // a direct call within range of hardware relative call instruction
- // stash the address for codegen
- call->gtDirectCallAddress = addr;
- }
- break;
-
- case IAT_PVALUE:
- {
- // Non-virtual direct calls to addresses accessed by
- // a single indirection.
- GenTree* cellAddr = AddrGen(addr);
- GenTree* indir = Ind(cellAddr);
+ case IAT_VALUE:
+ // Non-virtual direct call to known address
+ if (!IsCallTargetInRange(addr) || call->IsTailCall())
+ {
+ result = AddrGen(addr);
+ }
+ else
+ {
+ // a direct call within range of hardware relative call instruction
+ // stash the address for codegen
+ call->gtDirectCallAddress = addr;
+ }
+ break;
+
+ case IAT_PVALUE:
+ {
+ // Non-virtual direct calls to addresses accessed by
+ // a single indirection.
+ GenTree* cellAddr = AddrGen(addr);
+ GenTree* indir = Ind(cellAddr);
#ifdef FEATURE_READYTORUN_COMPILER
#ifdef _TARGET_ARM64_
- // For arm64, we dispatch code same as VSD using X11 for indirection cell address,
- // which ZapIndirectHelperThunk expects.
- if (call->IsR2RRelativeIndir())
- {
- cellAddr->gtRegNum = REG_R2R_INDIRECT_PARAM;
- indir->gtRegNum = REG_JUMP_THUNK_PARAM;
- }
+ // For arm64, we dispatch code same as VSD using X11 for indirection cell address,
+ // which ZapIndirectHelperThunk expects.
+ if (call->IsR2RRelativeIndir())
+ {
+ cellAddr->gtRegNum = REG_R2R_INDIRECT_PARAM;
+ indir->gtRegNum = REG_JUMP_THUNK_PARAM;
+ }
#endif
#endif
- result = indir;
- break;
- }
-
- case IAT_PPVALUE:
- // Non-virtual direct calls to addresses accessed by
- // a double indirection.
- //
- // Double-indirection. Load the address into a register
- // and call indirectly through the register
- noway_assert(helperNum == CORINFO_HELP_UNDEF);
- result = AddrGen(addr);
- result = Ind(Ind(result));
- break;
-
- default:
- noway_assert(!"Bad accessType");
- break;
+ result = indir;
+ break;
+ }
+
+ case IAT_PPVALUE:
+ // Non-virtual direct calls to addresses accessed by
+ // a double indirection.
+ //
+ // Double-indirection. Load the address into a register
+ // and call indirectly through the register
+ noway_assert(helperNum == CORINFO_HELP_UNDEF);
+ result = AddrGen(addr);
+ result = Ind(Ind(result));
+ break;
+
+ default:
+ noway_assert(!"Bad accessType");
+ break;
}
return result;
}
-
GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
- assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE|CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE|CORINFO_FLG_FINAL));
+ assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) &
+ (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
GenTree* thisArgNode;
if (call->IsTailCallViaHelper())
{
#ifdef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
const unsigned argNum = 0;
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
// In case of helper dispatched tail calls, "thisptr" will be the third arg.
// The first two args are: real call target and addr of args copy routine.
- const unsigned argNum = 2;
+ const unsigned argNum = 2;
#endif // !_TARGET_X86_
fgArgTabEntryPtr thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum);
- thisArgNode = thisArgTabEntry->node;
+ thisArgNode = thisArgTabEntry->node;
}
else
{
@@ -2198,9 +2196,9 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
else
#endif // _TARGET_X86_
{
- unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
- GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(&thisArgNode->gtOp.gtOp1, delegateInvokeTmp);
- originalThisExpr = thisArgNode->gtOp.gtOp1; // it's changed; reload it.
+ unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
+ GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(&thisArgNode->gtOp.gtOp1, delegateInvokeTmp);
+ originalThisExpr = thisArgNode->gtOp.gtOp1; // it's changed; reload it.
newStmt->gtFlags |= GTF_STMT_SKIP_LOWER; // we're in postorder so we have already processed this subtree
GenTree* stLclVar = newStmt->gtStmtExpr;
assert(stLclVar->OperIsLocalStore());
@@ -2210,11 +2208,8 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
// replace original expression feeding into thisPtr with
// [originalThis + offsetOfDelegateInstance]
- GenTree* newThisAddr = new(comp, GT_LEA) GenTreeAddrMode(TYP_REF,
- originalThisExpr,
- nullptr,
- 0,
- comp->eeGetEEInfo()->offsetOfDelegateInstance);
+ GenTree* newThisAddr = new (comp, GT_LEA)
+ GenTreeAddrMode(TYP_REF, originalThisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance);
originalThisExpr->InsertAfterSelf(newThisAddr);
GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
@@ -2228,7 +2223,7 @@ GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(originalThisExpr->TypeGet(), lclNum, BAD_IL_OFFSET);
unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget;
- GenTree* result = new(comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
+ GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
GenTree* callTarget = Ind(result);
// don't need to sequence and insert this tree, caller will do it
@@ -2253,7 +2248,6 @@ GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call)
return nullptr;
}
-
//------------------------------------------------------------------------
// CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke
// epilogs to invoke a GC under a condition. The return trap checks some global
@@ -2292,7 +2286,6 @@ GenTree* Lowering::CreateReturnTrapSeq()
return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, testTree);
}
-
//------------------------------------------------------------------------
// SetGCState: Create a tree that stores the given constant (0 or 1) into the
// thread's GC state field.
@@ -2313,17 +2306,16 @@ GenTree* Lowering::SetGCState(int state)
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
- GenTree* base = new(comp, GT_LCL_VAR) GenTreeLclVar(TYP_I_IMPL, comp->info.compLvFrameListRoot, -1);
+ GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(TYP_I_IMPL, comp->info.compLvFrameListRoot, -1);
- GenTree* storeGcState = new(comp, GT_STOREIND)
+ GenTree* storeGcState = new (comp, GT_STOREIND)
GenTreeStoreInd(TYP_BYTE,
- new(comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState),
- new(comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state));
+ new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState),
+ new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state));
return storeGcState;
}
-
//------------------------------------------------------------------------
// CreateFrameLinkUpdate: Create a tree that either links or unlinks the
// locally-allocated InlinedCallFrame from the Frame list.
@@ -2338,22 +2330,21 @@ GenTree* Lowering::SetGCState(int state)
//
GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
{
- const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
+ const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
- GenTree* TCB = new(comp, GT_LCL_VAR)
- GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot, (IL_OFFSET)-1); // cast to resolve ambiguity.
+ GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot,
+ (IL_OFFSET)-1); // cast to resolve ambiguity.
// Thread->m_pFrame
- GenTree* addr = new(comp, GT_LEA)
- GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
+ GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
GenTree* data = nullptr;
if (action == PushFrame)
{
// Thread->m_pFrame = &inlinedCallFrame;
- data = new(comp, GT_LCL_FLD_ADDR)
+ data = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
}
else
@@ -2361,14 +2352,13 @@ GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
assert(action == PopFrame);
// Thread->m_pFrame = inlinedCallFrame.m_pNext;
- data = new(comp, GT_LCL_FLD)
- GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
+ data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar,
+ pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
}
- GenTree* storeInd = new(comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
+ GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
return storeInd;
}
-
//------------------------------------------------------------------------
// InsertPInvokeMethodProlog: Create the code that runs at the start of
// every method that has PInvoke calls.
@@ -2380,7 +2370,7 @@ GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
//
// The (current) layout is as follows:
//
-// 64-bit 32-bit CORINFO_EE_INFO
+// 64-bit 32-bit CORINFO_EE_INFO
// offset offset field name offset when set
// -----------------------------------------------------------------------------------------
// +00h +00h GS cookie offsetOfGSCookie
@@ -2419,12 +2409,12 @@ void Lowering::InsertPInvokeMethodProlog()
JITDUMP("======= Inserting PInvoke method prolog\n");
- const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
+ const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
// First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr
- GenTree* frameAddr = new(comp, GT_LCL_FLD_ADDR)
+ GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
// Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list:
@@ -2434,8 +2424,8 @@ void Lowering::InsertPInvokeMethodProlog()
#ifdef _TARGET_X86_
GenTreeArgList* argList = comp->gtNewArgList(frameAddr);
-#else // !_TARGET_X86_
- GenTreeArgList* argList = comp->gtNewArgList(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
+#else // !_TARGET_X86_
+ GenTreeArgList* argList = comp->gtNewArgList(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
#endif // !_TARGET_X86_
GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, 0, argList);
@@ -2445,8 +2435,9 @@ void Lowering::InsertPInvokeMethodProlog()
noway_assert(!varDsc->lvIsParam);
noway_assert(varDsc->lvType == TYP_I_IMPL);
- GenTree* store = new(comp, GT_STORE_LCL_VAR)
- GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot, (IL_OFFSET)-1); // cast to resolve ambiguity.
+ GenTree* store =
+ new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot,
+ (IL_OFFSET)-1); // cast to resolve ambiguity.
store->gtOp.gtOp1 = call;
store->gtFlags |= GTF_VAR_DEF;
@@ -2455,14 +2446,14 @@ void Lowering::InsertPInvokeMethodProlog()
GenTree* lastStmt = stmt;
DISPTREE(lastStmt);
-#ifndef _TARGET_X86_ // For x86, this step is done at the call site (due to stack pointer not being static in the function).
+#ifndef _TARGET_X86_ // For x86, this step is done at the call site (due to stack pointer not being static in the
+ // function).
// --------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = @RSP;
- GenTreeLclFld* storeSP = new(comp, GT_STORE_LCL_FLD)
- GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
- callFrameInfo.offsetOfCallSiteSP);
+ GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD)
+ GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeSP->gtOp1 = PhysReg(REG_SPBASE);
GenTreeStmt* storeSPStmt = LowerMorphAndSeqTree(storeSP);
@@ -2475,9 +2466,9 @@ void Lowering::InsertPInvokeMethodProlog()
// --------------------------------------------------------
// InlinedCallFrame.m_pCalleeSavedEBP = @RBP;
- GenTreeLclFld* storeFP = new(comp, GT_STORE_LCL_FLD)
- GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
- callFrameInfo.offsetOfCalleeSavedFP);
+ GenTreeLclFld* storeFP =
+ new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
+ callFrameInfo.offsetOfCalleeSavedFP);
storeFP->gtOp1 = PhysReg(REG_FPBASE);
GenTreeStmt* storeFPStmt = LowerMorphAndSeqTree(storeFP);
@@ -2499,7 +2490,6 @@ void Lowering::InsertPInvokeMethodProlog()
}
}
-
//------------------------------------------------------------------------
// InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method
// that has PInvoke inlines. This needs to be inserted any place you can exit the
@@ -2512,8 +2502,7 @@ void Lowering::InsertPInvokeMethodProlog()
// Return Value:
// Code tree to perform the action.
//
-void Lowering::InsertPInvokeMethodEpilog(BasicBlock *returnBB
- DEBUGARG(GenTreePtr lastExpr) )
+void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTreePtr lastExpr))
{
assert(returnBB != nullptr);
assert(comp->info.compCallUnmanaged);
@@ -2526,13 +2515,14 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock *returnBB
JITDUMP("======= Inserting PInvoke method epilog\n");
// Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls.
- assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp));
+ assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) ||
+ returnBB->endsWithTailCallOrJmp(comp));
- GenTreeStmt* lastTopLevelStmt = comp->fgGetLastTopLevelStmt(returnBB)->AsStmt();
- GenTreePtr lastTopLevelStmtExpr = lastTopLevelStmt->gtStmtExpr;
+ GenTreeStmt* lastTopLevelStmt = comp->fgGetLastTopLevelStmt(returnBB)->AsStmt();
+ GenTreePtr lastTopLevelStmtExpr = lastTopLevelStmt->gtStmtExpr;
// Gentree of the last top level stmnt should match.
- assert(lastTopLevelStmtExpr == lastExpr);
+ assert(lastTopLevelStmtExpr == lastExpr);
// Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution
// order so that it is guaranteed that there will be no further PInvokes after that point in the method.
@@ -2556,7 +2546,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock *returnBB
// will be live-in to a BBJ_RETURN block without any uses. Long term we need to fix liveness for x64 case to
// properly extend the life of compLvFrameListRoot var.
//
- // Thread.offsetOfGcState = 0/1
+ // Thread.offsetOfGcState = 0/1
// That is [tcb + offsetOfGcState] = 1
GenTree* storeGCState = SetGCState(1);
comp->fgInsertTreeBeforeAsEmbedded(storeGCState, lastTopLevelStmtExpr, lastTopLevelStmt, returnBB);
@@ -2570,7 +2560,6 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock *returnBB
}
}
-
//------------------------------------------------------------------------
// InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code.
// It does all the necessary call-site setup of the InlinedCallFrame.
@@ -2601,11 +2590,12 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
if (comp->opts.ShouldUsePInvokeHelpers())
{
// First argument is the address of the frame variable.
- GenTree* frameAddr = new(comp, GT_LCL_VAR_ADDR)
+ GenTree* frameAddr = new (comp, GT_LCL_VAR_ADDR)
GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, BAD_IL_OFFSET);
// Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN
- GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, 0, comp->gtNewArgList(frameAddr));
+ GenTree* helperCall =
+ comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, 0, comp->gtNewArgList(frameAddr));
comp->fgMorphTree(helperCall);
comp->fgInsertTreeBeforeAsEmbedded(helperCall, insertBefore, comp->compCurStmt->AsStmt(), currBlock);
@@ -2639,10 +2629,9 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
{
assert(callType == CT_USER_FUNC);
- void* pEmbedMethodHandle = nullptr;
- CORINFO_METHOD_HANDLE embedMethodHandle = comp->info.compCompHnd->embedMethodHandle(
- call->gtCallMethHnd,
- &pEmbedMethodHandle);
+ void* pEmbedMethodHandle = nullptr;
+ CORINFO_METHOD_HANDLE embedMethodHandle =
+ comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle);
noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle));
@@ -2661,11 +2650,9 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
if (src != nullptr)
{
// Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget.
- GenTreeLclFld* store = new(comp, GT_STORE_LCL_FLD)
- GenTreeLclFld(GT_STORE_LCL_FLD,
- TYP_I_IMPL,
- comp->lvaInlinedPInvokeFrameVar,
- callFrameInfo.offsetOfCallTarget);
+ GenTreeLclFld* store =
+ new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
+ callFrameInfo.offsetOfCallTarget);
store->gtOp1 = src;
comp->fgInsertTreeBeforeAsEmbedded(store, insertBefore, comp->compCurStmt->AsStmt(), currBlock);
DISPTREE(comp->compCurStmt);
@@ -2676,11 +2663,8 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = SP
- GenTreeLclFld* storeCallSiteSP = new(comp, GT_STORE_LCL_FLD)
- GenTreeLclFld(GT_STORE_LCL_FLD,
- TYP_I_IMPL,
- comp->lvaInlinedPInvokeFrameVar,
- callFrameInfo.offsetOfCallSiteSP);
+ GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD)
+ GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE);
@@ -2692,17 +2676,15 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call)
- GenTreeLclFld* storeLab = new(comp, GT_STORE_LCL_FLD)
- GenTreeLclFld(GT_STORE_LCL_FLD,
- TYP_I_IMPL,
- comp->lvaInlinedPInvokeFrameVar,
- callFrameInfo.offsetOfReturnAddress);
+ GenTreeLclFld* storeLab =
+ new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
+ callFrameInfo.offsetOfReturnAddress);
// We don't have a real label, and inserting one is hard (even if we made a special node),
// so for now we will just 'know' what this means in codegen.
- GenTreeLabel* labelRef = new(comp, GT_LABEL) GenTreeLabel(nullptr);
- labelRef->gtType = TYP_I_IMPL;
- storeLab->gtOp1 = labelRef;
+ GenTreeLabel* labelRef = new (comp, GT_LABEL) GenTreeLabel(nullptr);
+ labelRef->gtType = TYP_I_IMPL;
+ storeLab->gtOp1 = labelRef;
comp->fgInsertTreeBeforeAsEmbedded(storeLab, insertBefore, comp->compCurStmt->AsStmt(), currBlock);
DISPTREE(comp->compCurStmt);
@@ -2710,7 +2692,7 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
if (!(comp->opts.eeFlags & CORJIT_FLG_IL_STUB))
{
// Set the TCB's frame to be the one we just created.
- // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
+ // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
// has prepended it to the linked list to maintain the stack of Frames.
//
// Stubs do this once per stub, not once per call.
@@ -2729,7 +2711,6 @@ void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
DISPTREE(comp->compCurStmt);
}
-
//------------------------------------------------------------------------
// InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call.
//
@@ -2749,12 +2730,13 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
// First argument is the address of the frame variable.
- GenTree* frameAddr = new(comp, GT_LCL_VAR)
- GenTreeLclVar(GT_LCL_VAR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, BAD_IL_OFFSET);
+ GenTree* frameAddr =
+ new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, BAD_IL_OFFSET);
frameAddr->gtOper = GT_LCL_VAR_ADDR;
// Insert call to CORINFO_HELP_JIT_PINVOKE_END
- GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, 0, comp->gtNewArgList(frameAddr));
+ GenTree* helperCall =
+ comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, 0, comp->gtNewArgList(frameAddr));
comp->fgMorphTree(helperCall);
comp->fgInsertTreeAfterAsEmbedded(helperCall, call, comp->compCurStmt->AsStmt(), currBlock);
@@ -2768,8 +2750,8 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
// gcstate = 1
GenTree* latest = call;
- GenTree* tree = SetGCState(1);
- newStmt = comp->fgInsertTreeAfterAsEmbedded(tree, latest, topStmt, currBlock);
+ GenTree* tree = SetGCState(1);
+ newStmt = comp->fgInsertTreeAfterAsEmbedded(tree, latest, topStmt, currBlock);
DISPTREE(newStmt);
latest = tree;
if (newStmt->gtStmtIsTopLevel())
@@ -2777,7 +2759,7 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
topStmt = newStmt;
}
- tree = CreateReturnTrapSeq();
+ tree = CreateReturnTrapSeq();
newStmt = comp->fgInsertTreeAfterAsEmbedded(tree, latest, topStmt, currBlock);
DISPTREE(newStmt);
latest = tree;
@@ -2801,7 +2783,6 @@ void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
}
}
-
//------------------------------------------------------------------------
// LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call
//
@@ -2828,7 +2809,8 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
// // Set up frame information
// inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum
// inlinedCallFrame.m_pCallSiteSP = SP; // x86 only
- // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the call)
+ // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the
+ // call)
// Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only)
//
// // Switch the thread's GC mode to preemptive mode
@@ -2863,7 +2845,7 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
// platform. They may be changed in the future such that they preserve all register values.
GenTree* result = nullptr;
- void* addr = nullptr;
+ void* addr = nullptr;
// assert we have seen one of these
noway_assert(comp->info.compCallUnmanaged != 0);
@@ -2880,19 +2862,19 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
if (call->gtCallType != CT_INDIRECT)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
- CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
+ CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
CORINFO_CONST_LOOKUP lookup;
#if COR_JIT_EE_VERSION > 460
comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup);
#else
- void* pIndirection;
+ void* pIndirection;
lookup.accessType = IAT_PVALUE;
- lookup.addr = comp->info.compCompHnd->getAddressOfPInvokeFixup(methHnd, &pIndirection);
+ lookup.addr = comp->info.compCompHnd->getAddressOfPInvokeFixup(methHnd, &pIndirection);
if (lookup.addr == nullptr)
{
lookup.accessType = IAT_PPVALUE;
- lookup.addr = pIndirection;
+ lookup.addr = pIndirection;
}
#endif
@@ -2906,8 +2888,8 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
}
else
{
- // a direct call within range of hardware relative call instruction
- // stash the address for codegen
+ // a direct call within range of hardware relative call instruction
+ // stash the address for codegen
call->gtDirectCallAddress = addr;
#ifdef FEATURE_READYTORUN_COMPILER
call->gtEntryPoint.addr = nullptr;
@@ -2930,15 +2912,15 @@ GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
return result;
}
-// Expand the code necessary to calculate the control target.
+// Expand the code necessary to calculate the control target.
// Returns: the expression needed to calculate the control target
// May insert embedded statements
GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
{
- noway_assert(call->gtCallType == CT_USER_FUNC);
+ noway_assert(call->gtCallType == CT_USER_FUNC);
// If this is a tail call via helper, thisPtr will be the third argument.
- int thisPtrArgNum;
+ int thisPtrArgNum;
regNumber thisPtrArgReg;
#ifndef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
@@ -2958,7 +2940,7 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, thisPtrArgNum);
assert(argEntry->regNum == thisPtrArgReg);
assert(argEntry->node->gtOper == GT_PUTARG_REG);
- GenTree *thisPtr = argEntry->node->gtOp.gtOp1;
+ GenTree* thisPtr = argEntry->node->gtOp.gtOp1;
// If what we are passing as the thisptr is not already a local, make a new local to place it in
// because we will be creating expressions based on it.
@@ -2988,11 +2970,12 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
GenTree* local;
if (thisPtr->isLclField())
{
- local = new(comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->gtLclOffs);
+ local = new (comp, GT_LCL_FLD)
+ GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->gtLclOffs);
}
else
{
- local = new(comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum, BAD_IL_OFFSET);
+ local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum, BAD_IL_OFFSET);
}
// pointer to virtual table = [REG_CALL_THIS + offs]
@@ -3000,8 +2983,9 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
// Get hold of the vtable offset (note: this might be expensive)
unsigned vtabOffsOfIndirection;
- unsigned vtabOffsAfterIndirection;
- comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection);
+ unsigned vtabOffsAfterIndirection;
+ comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
+ &vtabOffsAfterIndirection);
// Get the appropriate vtable chunk
// result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
@@ -3056,11 +3040,11 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
// TODO-Cleanup: Disable emitting random NOPs
// This is code to set up an indirect call to a stub address computed
- // via dictionary lookup.
+ // via dictionary lookup.
if (call->gtCallType == CT_INDIRECT)
{
NYI_X86("Virtual Stub dispatched call lowering via dictionary lookup");
-
+
// The importer decided we needed a stub call via a computed
// stub dispatch address, i.e. an address which came from a dictionary lookup.
// - The dictionary lookup produces an indirected address, suitable for call
@@ -3088,7 +3072,7 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
noway_assert(call->IsVirtualStubRelativeIndir());
// Direct stub calls, though the stubAddr itself may still need to be
- // accesed via an indirection.
+ // accesed via an indirection.
GenTree* addr = AddrGen(stubAddr);
#ifdef _TARGET_X86_
@@ -3105,12 +3089,12 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
{
GenTree* indir = Ind(addr);
- // On x86 we generate this:
- // call dword ptr [rel32] ; FF 15 ---rel32----
- // So we don't use a register.
+// On x86 we generate this:
+// call dword ptr [rel32] ; FF 15 ---rel32----
+// So we don't use a register.
#ifndef _TARGET_X86_
// on x64 we must materialize the target using specific registers.
- addr->gtRegNum = REG_VIRTUAL_STUB_PARAM;
+ addr->gtRegNum = REG_VIRTUAL_STUB_PARAM;
indir->gtRegNum = REG_JUMP_THUNK_PARAM;
#endif
result = indir;
@@ -3121,7 +3105,6 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
return result;
}
-
//------------------------------------------------------------------------
// LowerIndCleanupHelper: Remove the nodes that are no longer used after an
// addressing mode is constructed
@@ -3138,7 +3121,10 @@ GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
void Lowering::LowerIndCleanupHelper(GenTreeAddrMode* addrMode, GenTreePtr tree)
{
- if (tree == addrMode->Base() || tree == addrMode->Index()) return;
+ if (tree == addrMode->Base() || tree == addrMode->Index())
+ {
+ return;
+ }
unsigned childCount = tree->NumChildren();
for (unsigned i = 0; i < childCount; i++)
{
@@ -3154,7 +3140,7 @@ void Lowering::LowerIndCleanupHelper(GenTreeAddrMode* addrMode, GenTreePtr tree)
// returns: true if the sources given may be modified before they are used
bool Lowering::AreSourcesPossiblyModified(GenTree* use, GenTree* src1, GenTree* src2)
{
- GenTree* cursor = use;
+ GenTree* cursor = use;
GenTree* firstTree = comp->compCurStmt->AsStmt()->gtStmtList;
while (cursor && cursor != firstTree)
@@ -3162,11 +3148,17 @@ bool Lowering::AreSourcesPossiblyModified(GenTree* use, GenTree* src1, GenTree*
cursor = cursor->gtPrev;
if (cursor == src1)
+ {
src1 = nullptr;
+ }
if (cursor == src2)
+ {
src2 = nullptr;
+ }
if (src2 == nullptr && src1 == nullptr)
+ {
return false;
+ }
if (src1 && comp->fgNodesMayInterfere(src1, cursor))
{
@@ -3177,7 +3169,6 @@ bool Lowering::AreSourcesPossiblyModified(GenTree* use, GenTree* src1, GenTree*
{
return true;
}
-
}
assert(!"ran off beginning of stmt\n");
return true;
@@ -3195,12 +3186,12 @@ bool Lowering::AreSourcesPossiblyModified(GenTree* use, GenTree* src1, GenTree*
//
void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWalkData* data, bool isIndir)
{
- GenTree* addr = *pTree;
- GenTreePtr base = nullptr;
- GenTreePtr index = nullptr;
- unsigned scale = 0;
- unsigned offset = 0;
- bool rev = false;
+ GenTree* addr = *pTree;
+ GenTreePtr base = nullptr;
+ GenTreePtr index = nullptr;
+ unsigned scale = 0;
+ unsigned offset = 0;
+ bool rev = false;
// If it's not an indir, we need the fgWalkData to get info about the parent.
assert(isIndir || data);
@@ -3210,7 +3201,9 @@ void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWal
comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &scale, &offset, true /*nogen*/);
if (scale == 0)
+ {
scale = 1;
+ }
if (!isIndir)
{
@@ -3230,7 +3223,7 @@ void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWal
// make sure there are not any side effects between def of leaves and use
if (doAddrMode && !AreSourcesPossiblyModified(addr, base, index))
{
- GenTreePtr arrLength = NULL;
+ GenTreePtr arrLength = nullptr;
JITDUMP("Addressing mode:\n");
JITDUMP(" Base\n");
@@ -3246,15 +3239,16 @@ void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWal
}
var_types addrModeType = addr->TypeGet();
- if (addrModeType == TYP_REF)
+ if (addrModeType == TYP_REF)
+ {
addrModeType = TYP_BYREF;
+ }
+
+ GenTreeAddrMode* addrMode = new (comp, GT_LEA) GenTreeAddrMode(addrModeType, base, index, scale, offset);
- GenTreeAddrMode* addrMode =
- new(comp, GT_LEA) GenTreeAddrMode(addrModeType, base, index, scale, offset);
-
addrMode->CopyCosts(addr);
addrMode->gtRsvdRegs = addr->gtRsvdRegs;
- addrMode->gtFlags |= (addr->gtFlags & (GTF_ALL_EFFECT | GTF_IND_FLAGS));
+ addrMode->gtFlags |= (addr->gtFlags & (GTF_ALL_EFFECT | GTF_IND_FLAGS));
JITDUMP("New addressing mode node:\n");
DISPNODE(addrMode);
@@ -3288,7 +3282,7 @@ void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWal
LowerIndCleanupHelper(addrMode, addr);
GenTree* old = *pTree;
- *pTree = addrMode;
+ *pTree = addrMode;
if (!isIndir)
{
@@ -3311,7 +3305,7 @@ void Lowering::LowerAddrMode(GenTreePtr* pTree, GenTree* before, Compiler::fgWal
void Lowering::LowerAdd(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
- GenTreePtr newNode = NULL;
+ GenTreePtr newNode = nullptr;
GenTreePtr addr = *pTree;
@@ -3319,20 +3313,28 @@ void Lowering::LowerAdd(GenTreePtr* pTree, Compiler::fgWalkData* data)
// For ARM architectures we don't have the LEA instruction
// therefore we won't get much benefit from doing this.
return;
-#else // _TARGET_ARMARCH_
+#else // _TARGET_ARMARCH_
if (data->parentStack->Height() < 2)
+ {
return;
-
+ }
+
// if this is a child of an indir, let the parent handle it
if (data->parentStack->Index(1)->OperIsIndir())
+ {
return;
+ }
// if there is a chain of adds, only look at the topmost one
if (data->parentStack->Index(1)->gtOper == GT_ADD)
+ {
return;
+ }
if (!varTypeIsIntegralOrI(addr))
+ {
return;
+ }
LowerAddrMode(pTree, addr, data, false);
#endif // !_TARGET_ARMARCH_
@@ -3361,7 +3363,7 @@ void Lowering::LowerUnsignedDivOrMod(GenTree* tree)
if (tree->OperGet() == GT_UDIV)
{
- newOper = GT_RSZ;
+ newOper = GT_RSZ;
divisorValue = genLog2(divisorValue);
}
else
@@ -3377,7 +3379,7 @@ void Lowering::LowerUnsignedDivOrMod(GenTree* tree)
}
//------------------------------------------------------------------------
-// LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
+// LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
// const divisor into equivalent but faster sequences.
//
// Arguments:
@@ -3399,8 +3401,8 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
if (dividend->IsCnsIntOrI())
{
- // We shouldn't see a divmod with constant operands here but if we do then it's likely
- // because optimizations are disabled or it's a case that's supposed to throw an exception.
+ // We shouldn't see a divmod with constant operands here but if we do then it's likely
+ // because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return;
}
@@ -3410,7 +3412,7 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
-
+
// x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is
// the minimum representable integer. However, the C# spec says that an exception "is" thrown in this
// case so optimizing this case would break C# code.
@@ -3418,22 +3420,22 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
// A runtime check could be used to handle this case but it's probably too rare to matter.
return;
}
-
+
bool isDiv = divMod->OperGet() == GT_DIV;
if (isDiv)
{
- if ((type == TYP_INT && divisorValue == INT_MIN) ||
- (type == TYP_LONG && divisorValue == INT64_MIN))
+ if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN))
{
- // If the divisor is the minimum representable integer value then we can use a compare,
+ // If the divisor is the minimum representable integer value then we can use a compare,
// the result is 1 iff the dividend equals divisor.
divMod->SetOper(GT_EQ);
return;
}
}
- size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
+ size_t absDivisorValue =
+ (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
if (isPow2(absDivisorValue))
{
@@ -3442,14 +3444,12 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
CreateTemporary(&(divMod->gtOp.gtOp1));
dividend = divMod->gtGetOp1();
- GenTreeStmt* curStmt = comp->compCurStmt->AsStmt();
- unsigned curBBWeight = currBlock->getBBWeight(comp);
- unsigned dividendLclNum = dividend->gtLclVar.gtLclNum;
+ GenTreeStmt* curStmt = comp->compCurStmt->AsStmt();
+ unsigned curBBWeight = currBlock->getBBWeight(comp);
+ unsigned dividendLclNum = dividend->gtLclVar.gtLclNum;
- GenTree* adjustment = comp->gtNewOperNode(
- GT_RSH, type,
- dividend,
- comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
+ GenTree* adjustment =
+ comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
if (absDivisorValue == 2)
{
@@ -3459,16 +3459,12 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
}
else
{
- adjustment = comp->gtNewOperNode(
- GT_AND, type,
- adjustment,
- comp->gtNewIconNode(absDivisorValue - 1, type));
+ adjustment =
+ comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type));
}
- GenTree* adjustedDividend = comp->gtNewOperNode(
- GT_ADD, type,
- adjustment,
- comp->gtNewLclvNode(dividendLclNum, type));
+ GenTree* adjustedDividend =
+ comp->gtNewOperNode(GT_ADD, type, adjustment, comp->gtNewLclvNode(dividendLclNum, type));
comp->lvaTable[dividendLclNum].incRefCnts(curBBWeight, comp);
@@ -3479,17 +3475,12 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
// perform the division by right shifting the adjusted dividend
divisor->gtIntCon.SetIconValue(genLog2(absDivisorValue));
- newDivMod = comp->gtNewOperNode(
- GT_RSH, type,
- adjustedDividend,
- divisor);
+ newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor);
if (divisorValue < 0)
{
// negate the result if the divisor is negative
- newDivMod = comp->gtNewOperNode(
- GT_NEG, type,
- newDivMod);
+ newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod);
}
}
else
@@ -3499,18 +3490,13 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
// which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1)
divisor->gtIntCon.SetIconValue(~(absDivisorValue - 1));
- newDivMod = comp->gtNewOperNode(
- GT_SUB, type,
- comp->gtNewLclvNode(dividendLclNum, type),
- comp->gtNewOperNode(
- GT_AND, type,
- adjustedDividend,
- divisor));
+ newDivMod = comp->gtNewOperNode(GT_SUB, type, comp->gtNewLclvNode(dividendLclNum, type),
+ comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor));
comp->lvaTable[dividendLclNum].incRefCnts(curBBWeight, comp);
}
- // Remove the divisor and dividend nodes from the linear order,
+ // Remove the divisor and dividend nodes from the linear order,
// since we have reused them and will resequence the tree
comp->fgSnipNode(curStmt, divisor);
comp->fgSnipNode(curStmt, dividend);
@@ -3540,8 +3526,8 @@ void Lowering::LowerSignedDivOrMod(GenTreePtr* ppTree, Compiler::fgWalkData* dat
void Lowering::LowerInd(GenTreePtr* pTree)
{
- GenTreePtr newNode = NULL;
- GenTreePtr cTree = *pTree;
+ GenTreePtr newNode = nullptr;
+ GenTreePtr cTree = *pTree;
JITDUMP("\n");
DISPNODE(cTree);
@@ -3550,12 +3536,14 @@ void Lowering::LowerInd(GenTreePtr* pTree)
GenTreePtr before = cTree;
if (cTree->OperGet() == GT_STOREIND && !cTree->IsReverseOp())
+ {
before = comp->fgGetFirstNode(cTree->gtGetOp2());
+ }
LowerAddrMode(&cTree->gtOp.gtOp1, before, nullptr, true);
// Mark all GT_STOREIND nodes to indicate that it is not known
- // whether it represents a RMW memory op.
+ // whether it represents a RMW memory op.
if (cTree->OperGet() == GT_STOREIND)
{
cTree->AsStoreInd()->SetRMWStatusDefault();
@@ -3605,15 +3593,14 @@ void Lowering::LowerInd(GenTreePtr* pTree)
// reference to NewTemp), because that provides more accurate lifetimes.
// There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively.
-void
-Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
+void Lowering::LowerArrElem(GenTree** ppTree, Compiler::fgWalkData* data)
{
- GenTreePtr tree = *ppTree;
+ GenTreePtr tree = *ppTree;
// This will assert if we don't have an ArrElem node
GenTreeArrElem* arrElem = tree->AsArrElem();
- Compiler* comp = data->compiler;
+ Compiler* comp = data->compiler;
GenTreePtr curStmt = comp->compCurStmt;
- unsigned char rank = arrElem->gtArrElem.gtArrRank;
+ unsigned char rank = arrElem->gtArrElem.gtArrRank;
JITDUMP("Lowering ArrElem\n");
JITDUMP("============\n");
@@ -3628,16 +3615,15 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
// Split off the array object and store to a temporary variable.
GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(&(arrElem->gtArrObj));
newStmt->gtFlags |= GTF_STMT_SKIP_LOWER;
- GenTreePtr stLclVar = newStmt->gtStmtExpr;
+ GenTreePtr stLclVar = newStmt->gtStmtExpr;
assert(stLclVar->OperIsLocalStore());
// If we have made a new top-level statement, and it has inherited any
// embedded statements from curStmt, they have not yet been lowered.
if (newStmt->gtStmtIsTopLevel())
{
- for (GenTreePtr nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded();
- nextEmbeddedStmt != nullptr;
- nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
+ for (GenTreePtr nextEmbeddedStmt = newStmt->gtStmtNextIfEmbedded(); nextEmbeddedStmt != nullptr;
+ nextEmbeddedStmt = nextEmbeddedStmt->gtStmt.gtStmtNextIfEmbedded())
{
comp->compCurStmt = nextEmbeddedStmt;
comp->fgWalkTreePost(&nextEmbeddedStmt->gtStmt.gtStmtExpr, &Lowering::LowerNodeHelper, this, true);
@@ -3660,8 +3646,8 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
if ((currIndexNode->gtFlags & GTF_SIDE_EFFECT) != 0)
{
// Split off this index computation and store to a temporary variable.
- GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(&(arrElem->gtArrElem.gtArrInds[dim]));
- GenTreePtr stLclVar = newStmt->gtStmtExpr;
+ GenTreeStmt* newStmt = comp->fgInsertEmbeddedFormTemp(&(arrElem->gtArrElem.gtArrInds[dim]));
+ GenTreePtr stLclVar = newStmt->gtStmtExpr;
assert(stLclVar->OperIsLocalStore());
// We can't have made a new top-level statement, because we know we've got an ArrObj
// prior to the index nodes.
@@ -3673,12 +3659,12 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
}
// The first ArrOffs node will have 0 for the offset of the previous dimension.
- GenTree* prevArrOffs = new(comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
+ GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
comp->fgInsertLinearNodeBefore(prevArrOffs, arrObjNode);
for (unsigned char dim = 0; dim < rank; dim++)
{
- GenTree* currIndexTree = arrElem->gtArrElem.gtArrInds[dim];
+ GenTree* currIndexTree = arrElem->gtArrElem.gtArrInds[dim];
GenTree* insertBeforeNode = nextNode;
// Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones.
@@ -3698,9 +3684,9 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
if (currIndexTree->gtNext != insertBeforeNode)
{
GenTree* firstIndexNode = comp->fgGetFirstNode(currIndexTree);
- GenTree* oldPrevNode = firstIndexNode->gtPrev;
- GenTree* oldNextNode = currIndexTree->gtNext;
- GenTree* newPrevNode = insertBeforeNode->gtPrev;
+ GenTree* oldPrevNode = firstIndexNode->gtPrev;
+ GenTree* oldNextNode = currIndexTree->gtNext;
+ GenTree* newPrevNode = insertBeforeNode->gtPrev;
// All these are inner nodes, so they cannot be null.
assert(oldPrevNode != nullptr && oldNextNode != nullptr && newPrevNode != nullptr);
@@ -3708,46 +3694,49 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
oldNextNode->gtPrev = oldPrevNode;
firstIndexNode->gtPrev = newPrevNode;
- newPrevNode->gtNext = firstIndexNode;
+ newPrevNode->gtNext = firstIndexNode;
- currIndexTree->gtNext = insertBeforeNode;
+ currIndexTree->gtNext = insertBeforeNode;
insertBeforeNode->gtPrev = currIndexTree;
}
// Next comes the GT_ARR_INDEX node.
- GenTreeArrIndex* arrMDIdx = new(comp, GT_ARR_INDEX)
+ GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX)
GenTreeArrIndex(TYP_INT, idxArrObjNode, currIndexTree, dim, rank, arrElem->gtArrElem.gtArrElemType);
- arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags|currIndexTree->gtFlags) & GTF_ALL_EFFECT);
+ arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | currIndexTree->gtFlags) & GTF_ALL_EFFECT);
comp->fgInsertLinearNodeBefore(arrMDIdx, insertBeforeNode);
GenTree* offsArrObjNode = comp->gtClone(arrObjNode);
comp->fgInsertLinearNodeBefore(offsArrObjNode, insertBeforeNode);
- GenTreeArrOffs* arrOffs = new(comp, GT_ARR_OFFSET)
- GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElem.gtArrElemType);
+ GenTreeArrOffs* arrOffs =
+ new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank,
+ arrElem->gtArrElem.gtArrElemType);
comp->fgInsertLinearNodeBefore(arrOffs, insertBeforeNode);
- arrOffs->gtFlags |= ((prevArrOffs->gtFlags|arrMDIdx->gtFlags|offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
+ arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
prevArrOffs = arrOffs;
}
- // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the base.
+ // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the
+ // base.
GenTreePtr leaBase = comp->gtClone(arrObjNode);
- unsigned scale = arrElem->gtArrElem.gtArrElemSize;
- unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrElem.gtArrElemType, arrElem->gtArrElem.gtArrRank);
+ unsigned scale = arrElem->gtArrElem.gtArrElemSize;
+ unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrElem.gtArrElemType, arrElem->gtArrElem.gtArrRank);
GenTreePtr leaIndexNode = prevArrOffs;
if (!jitIsScaleIndexMul(scale))
{
- // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are TYP_INT
- GenTreePtr scaleNode = new(comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
- GenTreePtr mulNode = new(comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
+ // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are
+ // TYP_INT
+ GenTreePtr scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
+ GenTreePtr mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
comp->fgInsertLinearNodeBefore(scaleNode, nextNode);
comp->fgInsertLinearNodeBefore(mulNode, nextNode);
leaIndexNode = mulNode;
- scale = 1;
+ scale = 1;
}
comp->fgInsertLinearNodeBefore(leaBase, nextNode);
- GenTreePtr leaNode = new(comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
+ GenTreePtr leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
leaNode->gtFlags |= GTF_REVERSE_OPS;
comp->fgInsertLinearNodeBefore(leaNode, nextNode);
@@ -3762,7 +3751,7 @@ Lowering::LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data)
// We can have a top-level GT_ARR_ELEM. For example, a function call
// with a parameter of GT_ARR_ELEM can end up being simplified by the
// inliner to single GT_ARR_ELEM node if the function has an empty body.
- arrElem->gtPrev->gtNext = nullptr;
+ arrElem->gtPrev->gtNext = nullptr;
curStmt->gtStmt.gtStmtExpr = *ppTree;
}
@@ -3807,23 +3796,23 @@ void Lowering::DoPhase()
#endif
#if !defined(_TARGET_64BIT_)
- DecomposeLongs decomp(comp); // Initialize the long decomposition class.
+ DecomposeLongs decomp(comp); // Initialize the long decomposition class.
decomp.PrepareForDecomposition();
#endif // !defined(_TARGET_64BIT_)
for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
+ GenTreePtr stmt;
/* Make the block publicly available */
- currBlock = block;
+ currBlock = block;
comp->compCurBB = block;
#if !defined(_TARGET_64BIT_)
decomp.DecomposeBlock(block);
#endif //!_TARGET_64BIT_
- // Walk the statement trees in this basic block
+ // Walk the statement trees in this basic block
for (stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
if (stmt->gtFlags & GTF_STMT_SKIP_LOWER)
@@ -3873,10 +3862,10 @@ void Lowering::DoPhase()
comp->fgLocalVarLiveness();
// local var liveness can delete code, which may create empty blocks
- if (!comp->opts.MinOpts() && !comp->opts.compDbgCode)
+ if (!comp->opts.MinOpts() && !comp->opts.compDbgCode)
{
comp->optLoopsMarked = false;
- bool modified = comp->fgUpdateFlowGraph();
+ bool modified = comp->fgUpdateFlowGraph();
if (modified || comp->lvaSortAgain)
{
JITDUMP("had to run another liveness pass:\n");
@@ -3911,9 +3900,7 @@ void Lowering::DoPhase()
// It's also desirable to avoid initializing this code using a non-execution order traversal.
//
LsraLocation currentLoc = 1;
- for( BasicBlock* block = m_lsra->startBlockSequence();
- block != nullptr;
- block = m_lsra->moveToNextBlock())
+ for (BasicBlock* block = m_lsra->startBlockSequence(); block != nullptr; block = m_lsra->moveToNextBlock())
{
GenTreePtr stmt;
@@ -3925,7 +3912,9 @@ void Lowering::DoPhase()
for (stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
if (stmt->gtStmt.gtStmtIsEmbedded())
+ {
continue;
+ }
/* We increment the number position of each tree node by 2 to
* simplify the logic when there's the case of a tree that implicitly
@@ -3950,7 +3939,9 @@ void Lowering::DoPhase()
for (stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
if (stmt->gtStmt.gtStmtIsEmbedded())
+ {
continue;
+ }
comp->compCurStmt = stmt;
@@ -3970,7 +3961,7 @@ void Lowering::DoPhase()
else if (comp->optIsTrackedLocal(tree))
{
tree->gtLsraInfo.isLocalDefUse = true;
- tree->gtLsraInfo.dstCount = 0;
+ tree->gtLsraInfo.dstCount = 0;
}
#if 0
// TODO-CQ: Enable this code after fixing the isContained() logic to not abort for these
@@ -4009,53 +4000,56 @@ void Lowering::DoPhase()
bool Lowering::IndirsAreEquivalent(GenTreePtr candidate, GenTreePtr storeInd)
{
assert(candidate->OperGet() == GT_IND);
- assert(storeInd->OperGet() == GT_STOREIND);
-
+ assert(storeInd->OperGet() == GT_STOREIND);
+
// We should check the size of the indirections. If they are
// different, say because of a cast, then we can't call them equivalent. Doing so could cause us
// to drop a cast.
- // Signed-ness difference is okay and expected since a store indirection must always
+ // Signed-ness difference is okay and expected since a store indirection must always
// be signed based on the CIL spec, but a load could be unsigned.
if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType))
+ {
return false;
-
+ }
+
GenTreePtr pTreeA = candidate->gtGetOp1();
GenTreePtr pTreeB = storeInd->gtGetOp1();
-
+
// This method will be called by codegen (as well as during lowering).
// After register allocation, the sources may have been spilled and reloaded
// to a different register, indicated by an inserted GT_RELOAD node.
pTreeA = pTreeA->gtSkipReloadOrCopy();
pTreeB = pTreeB->gtSkipReloadOrCopy();
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
if (pTreeA->OperGet() != pTreeB->OperGet())
+ {
return false;
+ }
oper = pTreeA->OperGet();
switch (oper)
{
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- case GT_CLS_VAR_ADDR:
- case GT_CNS_INT:
- return NodesAreEquivalentLeaves(pTreeA, pTreeB);
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ case GT_CLS_VAR_ADDR:
+ case GT_CNS_INT:
+ return NodesAreEquivalentLeaves(pTreeA, pTreeB);
- case GT_LEA:
+ case GT_LEA:
{
GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode();
GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode();
return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) &&
- NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
- gtAddr1->gtScale == gtAddr2->gtScale &&
- gtAddr1->gtOffset == gtAddr2->gtOffset;
+ NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
+ gtAddr1->gtScale == gtAddr2->gtScale && gtAddr1->gtOffset == gtAddr2->gtOffset;
}
- default:
- // We don't handle anything that is not either a constant,
- // a local var or LEA.
- return false;
+ default:
+ // We don't handle anything that is not either a constant,
+ // a local var or LEA.
+ return false;
}
}
@@ -4065,36 +4059,46 @@ bool Lowering::IndirsAreEquivalent(GenTreePtr candidate, GenTreePtr storeInd)
bool Lowering::NodesAreEquivalentLeaves(GenTreePtr tree1, GenTreePtr tree2)
{
if (tree1 == nullptr && tree2 == nullptr)
+ {
return true;
+ }
// both null, they are equivalent, otherwise if either is null not equivalent
if (tree1 == nullptr || tree2 == nullptr)
+ {
return false;
+ }
tree1 = tree1->gtSkipReloadOrCopy();
tree2 = tree2->gtSkipReloadOrCopy();
if (tree1->TypeGet() != tree2->TypeGet())
+ {
return false;
+ }
if (tree1->OperGet() != tree2->OperGet())
+ {
return false;
+ }
if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf())
+ {
return false;
+ }
switch (tree1->OperGet())
{
- case GT_CNS_INT:
- return tree1->gtIntCon.gtIconVal == tree2->gtIntCon.gtIconVal &&
- tree1->IsIconHandle() == tree2->IsIconHandle();
- case GT_LCL_VAR:
- case GT_LCL_VAR_ADDR:
- return tree1->gtLclVarCommon.gtLclNum == tree2->gtLclVarCommon.gtLclNum;
- case GT_CLS_VAR_ADDR:
- return tree1->gtClsVar.gtClsVarHnd == tree2->gtClsVar.gtClsVarHnd;
- default:
- return false;
+ case GT_CNS_INT:
+ return tree1->gtIntCon.gtIconVal == tree2->gtIntCon.gtIconVal &&
+ tree1->IsIconHandle() == tree2->IsIconHandle();
+ case GT_LCL_VAR:
+ case GT_LCL_VAR_ADDR:
+ return tree1->gtLclVarCommon.gtLclNum == tree2->gtLclVarCommon.gtLclNum;
+ case GT_CLS_VAR_ADDR:
+ return tree1->gtClsVar.gtClsVarHnd == tree2->gtClsVar.gtClsVarHnd;
+ default:
+ return false;
}
}
@@ -4119,7 +4123,7 @@ void Lowering::ReplaceNode(GenTree** ppTreeLocation, GenTree* replacementNode, G
JITDUMP("The node that replaces it is:\n");
DISPTREE(replacementNode);
- assert(comp->fgStmtContainsNode((GenTreeStmt*) stmt, treeLocation));
+ assert(comp->fgStmtContainsNode((GenTreeStmt*)stmt, treeLocation));
GenTreePtr first = comp->fgGetFirstNode(treeLocation);
comp->fgRemoveContainedEmbeddedStatements(treeLocation, stmt->AsStmt(), block);
@@ -4157,7 +4161,7 @@ void Lowering::ReplaceNode(GenTree** ppTreeLocation, GenTree* replacementNode, G
}
replacementNode->gtNext = gtNext;
- treeLocation = replacementNode;
+ treeLocation = replacementNode;
#ifdef DEBUG
comp->fgDebugCheckLinks();
#endif
@@ -4175,7 +4179,6 @@ void Lowering::UnlinkNode(GenTree** ppParentLink, GenTree* stmt, BasicBlock* blo
ReplaceNode(ppParentLink, comp->gtNewNothingNode(), stmt, block);
}
-
#ifdef _TARGET_64BIT_
/**
* Get common information required to handle a cast instruction
@@ -4194,87 +4197,88 @@ void Lowering::getCastDescription(GenTreePtr treeNode, CastInfo* castInfo)
var_types dstType = treeNode->CastToType();
var_types srcType = castOp->TypeGet();
- castInfo->unsignedDest = varTypeIsUnsigned(dstType);
+ castInfo->unsignedDest = varTypeIsUnsigned(dstType);
castInfo->unsignedSource = varTypeIsUnsigned(srcType);
// If necessary, force the srcType to unsigned when the GT_UNSIGNED flag is set.
if (!castInfo->unsignedSource && (treeNode->gtFlags & GTF_UNSIGNED) != 0)
{
- srcType = genUnsignedType(srcType);
+ srcType = genUnsignedType(srcType);
castInfo->unsignedSource = true;
}
- if (treeNode->gtOverflow() && (genTypeSize(srcType) >= genTypeSize(dstType) || (srcType == TYP_INT && dstType == TYP_ULONG)))
+ if (treeNode->gtOverflow() &&
+ (genTypeSize(srcType) >= genTypeSize(dstType) || (srcType == TYP_INT && dstType == TYP_ULONG)))
{
castInfo->requiresOverflowCheck = true;
}
if (castInfo->requiresOverflowCheck)
{
- ssize_t typeMin = 0;
- ssize_t typeMax = 0;
- ssize_t typeMask = 0;
- bool signCheckOnly = false;
+ ssize_t typeMin = 0;
+ ssize_t typeMax = 0;
+ ssize_t typeMask = 0;
+ bool signCheckOnly = false;
// Do we need to compare the value, or just check masks
switch (dstType)
{
- default:
- assert(!"unreachable: getCastDescription");
- break;
+ default:
+ assert(!"unreachable: getCastDescription");
+ break;
- case TYP_BYTE:
- typeMask = ssize_t((int)0xFFFFFF80);
- typeMin = SCHAR_MIN;
- typeMax = SCHAR_MAX;
- break;
+ case TYP_BYTE:
+ typeMask = ssize_t((int)0xFFFFFF80);
+ typeMin = SCHAR_MIN;
+ typeMax = SCHAR_MAX;
+ break;
- case TYP_UBYTE:
- typeMask = ssize_t((int)0xFFFFFF00L);
- break;
+ case TYP_UBYTE:
+ typeMask = ssize_t((int)0xFFFFFF00L);
+ break;
- case TYP_SHORT:
- typeMask = ssize_t((int)0xFFFF8000);
- typeMin = SHRT_MIN;
- typeMax = SHRT_MAX;
- break;
+ case TYP_SHORT:
+ typeMask = ssize_t((int)0xFFFF8000);
+ typeMin = SHRT_MIN;
+ typeMax = SHRT_MAX;
+ break;
- case TYP_CHAR:
- typeMask = ssize_t((int)0xFFFF0000L);
- break;
+ case TYP_CHAR:
+ typeMask = ssize_t((int)0xFFFF0000L);
+ break;
- case TYP_INT:
- if (srcType == TYP_UINT)
- {
- signCheckOnly = true;
- }
- else
- {
- typeMask = 0xFFFFFFFF80000000LL;
- typeMin = INT_MIN;
- typeMax = INT_MAX;
- }
- break;
+ case TYP_INT:
+ if (srcType == TYP_UINT)
+ {
+ signCheckOnly = true;
+ }
+ else
+ {
+ typeMask = 0xFFFFFFFF80000000LL;
+ typeMin = INT_MIN;
+ typeMax = INT_MAX;
+ }
+ break;
- case TYP_UINT:
- if (srcType == TYP_INT)
- {
- signCheckOnly = true;
- }
- else
- {
- typeMask = 0xFFFFFFFF00000000LL;
- }
- break;
+ case TYP_UINT:
+ if (srcType == TYP_INT)
+ {
+ signCheckOnly = true;
+ }
+ else
+ {
+ typeMask = 0xFFFFFFFF00000000LL;
+ }
+ break;
- case TYP_LONG:
- signCheckOnly = true;
- break;
+ case TYP_LONG:
+ signCheckOnly = true;
+ break;
- case TYP_ULONG:
- signCheckOnly = true;
- break;
+ case TYP_ULONG:
+ signCheckOnly = true;
+ break;
}
if (signCheckOnly)
@@ -4282,8 +4286,8 @@ void Lowering::getCastDescription(GenTreePtr treeNode, CastInfo* castInfo)
castInfo->signCheckOnly = true;
}
- castInfo->typeMax = typeMax;
- castInfo->typeMin = typeMin;
+ castInfo->typeMax = typeMax;
+ castInfo->typeMin = typeMin;
castInfo->typeMask = typeMask;
}
}
@@ -4300,14 +4304,17 @@ void Lowering::DumpNodeInfoMap()
for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
- GenTreePtr tree;
+ GenTreePtr stmt;
+ GenTreePtr tree;
for (stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
GenTreePtr node;
foreach_treenode_execution_order(node, stmt)
{
- if (stmt->gtStmt.gtStmtIsEmbedded()) continue;
+ if (stmt->gtStmt.gtStmtIsEmbedded())
+ {
+ continue;
+ }
comp->gtDispTree(node, nullptr, nullptr, true);
printf(" +");
node->gtLsraInfo.dump(m_lsra);
diff --git a/src/jit/lower.h b/src/jit/lower.h
index 9f62978a62..98e43fff7e 100644
--- a/src/jit/lower.h
+++ b/src/jit/lower.h
@@ -15,34 +15,33 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#define _LOWER_H_
#include "compiler.h"
-#include "phase.h"
+#include "phase.h"
#include "lsra.h"
class Lowering : public Phase
{
public:
inline Lowering(Compiler* compiler, LinearScanInterface* lsra)
- : Phase(compiler, "Lowering", PHASE_LOWERING),
- vtableCallTemp(BAD_VAR_NUM)
+ : Phase(compiler, "Lowering", PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM)
{
- m_lsra = (LinearScan *)lsra;
+ m_lsra = (LinearScan*)lsra;
assert(m_lsra);
}
virtual void DoPhase();
-
+
// If requiresOverflowCheck is false, all other values will be unset
struct CastInfo
{
- bool requiresOverflowCheck; // Will the cast require an overflow check
- bool unsignedSource; // Is the source unsigned
- bool unsignedDest; // is the dest unsigned
+ bool requiresOverflowCheck; // Will the cast require an overflow check
+ bool unsignedSource; // Is the source unsigned
+ bool unsignedDest; // is the dest unsigned
// All other fields are only meaningful if requiresOverflowCheck is set.
- ssize_t typeMin; // Lowest storable value of the dest type
- ssize_t typeMax; // Highest storable value of the dest type
- ssize_t typeMask; // For converting from/to unsigned
- bool signCheckOnly; // For converting between unsigned/signed int
+ ssize_t typeMin; // Lowest storable value of the dest type
+ ssize_t typeMax; // Highest storable value of the dest type
+ ssize_t typeMask; // For converting from/to unsigned
+ bool signCheckOnly; // For converting between unsigned/signed int
};
#ifdef _TARGET_64BIT_
@@ -51,47 +50,51 @@ public:
private:
// Friends
- static Compiler::fgWalkResult LowerNodeHelper (GenTreePtr* ppTree, Compiler::fgWalkData* data);
+ static Compiler::fgWalkResult LowerNodeHelper(GenTreePtr* ppTree, Compiler::fgWalkData* data);
static Compiler::fgWalkResult TreeInfoInitHelper(GenTreePtr* ppTree, Compiler::fgWalkData* data);
-
+
// Member Functions
void LowerNode(GenTreePtr* tree, Compiler::fgWalkData* data);
- GenTreeStmt* LowerMorphAndSeqTree(GenTree *tree);
+ GenTreeStmt* LowerMorphAndSeqTree(GenTree* tree);
void CheckVSQuirkStackPaddingNeeded(GenTreeCall* call);
// ------------------------------
// Call Lowering
// ------------------------------
- void LowerCall (GenTree* call);
- void LowerJmpMethod (GenTree* jmp);
- void LowerRet (GenTree* ret);
- GenTree* LowerDelegateInvoke (GenTreeCall* call);
- GenTree* LowerIndirectNonvirtCall (GenTreeCall* call);
- GenTree* LowerDirectCall (GenTreeCall* call);
- GenTree* LowerNonvirtPinvokeCall (GenTreeCall* call);
- GenTree* LowerTailCallViaHelper (GenTreeCall* callNode, GenTree *callTarget);
- void LowerFastTailCall (GenTreeCall* callNode);
- void InsertProfTailCallHook (GenTreeCall* callNode, GenTree *insertionPoint);
- GenTree* LowerVirtualVtableCall (GenTreeCall* call);
- GenTree* LowerVirtualStubCall (GenTreeCall* call);
- void LowerArgsForCall (GenTreeCall* call);
- GenTree* NewPutArg (GenTreeCall* call, GenTreePtr arg, fgArgTabEntryPtr info, var_types type);
- void LowerArg (GenTreeCall* call, GenTreePtr *ppTree);
- void InsertPInvokeCallProlog (GenTreeCall* call);
- void InsertPInvokeCallEpilog (GenTreeCall* call);
- void InsertPInvokeMethodProlog();
- void InsertPInvokeMethodEpilog(BasicBlock *returnBB DEBUGARG(GenTreePtr lastExpr));
- GenTree *SetGCState(int cns);
- GenTree *CreateReturnTrapSeq();
- enum FrameLinkAction { PushFrame, PopFrame };
- GenTree *CreateFrameLinkUpdate(FrameLinkAction);
- GenTree *AddrGen(ssize_t addr, regNumber reg = REG_NA);
- GenTree *AddrGen(void *addr, regNumber reg = REG_NA);
+ void LowerCall(GenTree* call);
+ void LowerJmpMethod(GenTree* jmp);
+ void LowerRet(GenTree* ret);
+ GenTree* LowerDelegateInvoke(GenTreeCall* call);
+ GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
+ GenTree* LowerDirectCall(GenTreeCall* call);
+ GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call);
+ GenTree* LowerTailCallViaHelper(GenTreeCall* callNode, GenTree* callTarget);
+ void LowerFastTailCall(GenTreeCall* callNode);
+ void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint);
+ GenTree* LowerVirtualVtableCall(GenTreeCall* call);
+ GenTree* LowerVirtualStubCall(GenTreeCall* call);
+ void LowerArgsForCall(GenTreeCall* call);
+ GenTree* NewPutArg(GenTreeCall* call, GenTreePtr arg, fgArgTabEntryPtr info, var_types type);
+ void LowerArg(GenTreeCall* call, GenTreePtr* ppTree);
+ void InsertPInvokeCallProlog(GenTreeCall* call);
+ void InsertPInvokeCallEpilog(GenTreeCall* call);
+ void InsertPInvokeMethodProlog();
+ void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTreePtr lastExpr));
+ GenTree* SetGCState(int cns);
+ GenTree* CreateReturnTrapSeq();
+ enum FrameLinkAction
+ {
+ PushFrame,
+ PopFrame
+ };
+ GenTree* CreateFrameLinkUpdate(FrameLinkAction);
+ GenTree* AddrGen(ssize_t addr, regNumber reg = REG_NA);
+ GenTree* AddrGen(void* addr, regNumber reg = REG_NA);
// return concatenation of two trees, which currently uses a comma and really should not
// because we're not supposed to have commas in codegen
- GenTree *Concat(GenTree *first, GenTree *second)
- {
+ GenTree* Concat(GenTree* first, GenTree* second)
+ {
// if any is null, it must be the first
if (first == nullptr)
{
@@ -103,7 +106,7 @@ private:
}
else
{
- return comp->gtNewOperNode(GT_COMMA, TYP_I_IMPL, first, second);
+ return comp->gtNewOperNode(GT_COMMA, TYP_I_IMPL, first, second);
}
}
@@ -130,14 +133,14 @@ private:
GenTree* Offset(GenTree* base, unsigned offset)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
- return new(comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
+ return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
}
// returns true if the tree can use the read-modify-write memory instruction form
bool isRMWRegOper(GenTreePtr tree);
-
+
// return true if this call target is within range of a pc-rel call on the machine
- bool IsCallTargetInRange(void *addr);
+ bool IsCallTargetInRange(void* addr);
void TreeNodeInfoInit(GenTree* stmt);
void TreeNodeInfoInit(GenTreePtr* tree, GenTree* parent);
@@ -159,7 +162,7 @@ private:
{
tree->gtLsraInfo.regOptional = true;
}
-
+
GenTree* PreferredRegOptionalOperand(GenTree* tree);
// ------------------------------------------------------------------
@@ -175,9 +178,9 @@ private:
// Arguments:
// tree - Gentree of a bininary operation.
//
- // Returns
+ // Returns
// None.
- //
+ //
// Note: On xarch at most only one of the operands will be marked as
// reg optional, even when both operands could be considered register
// optional.
@@ -188,8 +191,7 @@ private:
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
- if (tree->OperIsCommutative() &&
- tree->TypeGet() == op1->TypeGet())
+ if (tree->OperIsCommutative() && tree->TypeGet() == op1->TypeGet())
{
GenTree* preferredOp = PreferredRegOptionalOperand(tree);
SetRegOptional(preferredOp);
@@ -240,33 +242,32 @@ private:
#if !CPU_LOAD_STORE_ARCH
bool IsBinOpInRMWStoreInd(GenTreePtr tree);
- bool IsRMWMemOpRootedAtStoreInd(GenTreePtr storeIndTree, GenTreePtr *indirCandidate, GenTreePtr *indirOpSource);
+ bool IsRMWMemOpRootedAtStoreInd(GenTreePtr storeIndTree, GenTreePtr* indirCandidate, GenTreePtr* indirOpSource);
bool SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd);
#endif
void LowerStoreLoc(GenTreeLclVarCommon* tree);
- void SetIndirAddrOpCounts(GenTree *indirTree);
- void LowerGCWriteBarrier(GenTree *tree);
- void LowerArrElem(GenTree **ppTree, Compiler::fgWalkData* data);
- void LowerRotate(GenTree *tree);
+ void SetIndirAddrOpCounts(GenTree* indirTree);
+ void LowerGCWriteBarrier(GenTree* tree);
+ void LowerArrElem(GenTree** ppTree, Compiler::fgWalkData* data);
+ void LowerRotate(GenTree* tree);
// Utility functions
- void MorphBlkIntoHelperCall (GenTreePtr pTree, GenTreePtr treeStmt);
+ void MorphBlkIntoHelperCall(GenTreePtr pTree, GenTreePtr treeStmt);
+
public:
- static bool IndirsAreEquivalent (GenTreePtr pTreeA, GenTreePtr pTreeB);
+ static bool IndirsAreEquivalent(GenTreePtr pTreeA, GenTreePtr pTreeB);
+
private:
- static bool NodesAreEquivalentLeaves (GenTreePtr candidate, GenTreePtr storeInd);
+ static bool NodesAreEquivalentLeaves(GenTreePtr candidate, GenTreePtr storeInd);
- GenTreePtr CreateLocalTempAsg (GenTreePtr rhs, unsigned refCount, GenTreePtr *ppLclVar = nullptr);
- GenTreeStmt* CreateTemporary (GenTree** ppTree);
- bool AreSourcesPossiblyModified (GenTree* use, GenTree* src1, GenTree *src2);
- void ReplaceNode (GenTree** ppTreeLocation,
- GenTree* replacementNode,
- GenTree* stmt,
- BasicBlock* block);
+ GenTreePtr CreateLocalTempAsg(GenTreePtr rhs, unsigned refCount, GenTreePtr* ppLclVar = nullptr);
+ GenTreeStmt* CreateTemporary(GenTree** ppTree);
+ bool AreSourcesPossiblyModified(GenTree* use, GenTree* src1, GenTree* src2);
+ void ReplaceNode(GenTree** ppTreeLocation, GenTree* replacementNode, GenTree* stmt, BasicBlock* block);
- void UnlinkNode (GenTree** ppParentLink, GenTree* stmt, BasicBlock* block);
+ void UnlinkNode(GenTree** ppParentLink, GenTree* stmt, BasicBlock* block);
- // return true if 'childNode' is an immediate that can be contained
+ // return true if 'childNode' is an immediate that can be contained
// by the 'parentNode' (i.e. folded into an instruction)
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
@@ -276,13 +277,14 @@ private:
// Checks and makes 'childNode' contained in the 'parentNode'
bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
-
- // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode can be contained.
+
+ // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode
+ // can be contained.
bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode);
- LinearScan *m_lsra;
- BasicBlock *currBlock;
- unsigned vtableCallTemp; // local variable we use as a temp for vtable calls
+ LinearScan* m_lsra;
+ BasicBlock* currBlock;
+ unsigned vtableCallTemp; // local variable we use as a temp for vtable calls
};
#endif // _LOWER_H_
diff --git a/src/jit/lowerarm.cpp b/src/jit/lowerarm.cpp
index 8af915ecf2..2acb7498a2 100644
--- a/src/jit/lowerarm.cpp
+++ b/src/jit/lowerarm.cpp
@@ -32,10 +32,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lsra.h"
/* Lowering of GT_CAST nodes */
-void Lowering::LowerCast(GenTreePtr *ppTree) { }
-
+void Lowering::LowerCast(GenTreePtr* ppTree)
+{
+}
-void Lowering::LowerCntBlockOp(GenTreePtr *ppTree)
+void Lowering::LowerCntBlockOp(GenTreePtr* ppTree)
{
NYI_ARM("ARM Lowering for BlockOp");
}
@@ -57,7 +58,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
comp->fgWalkTreePost(&stmt->gtStmt.gtStmtExpr, &Lowering::TreeInfoInitHelper, this);
}
-void Lowering::TreeNodeInfoInit(GenTreePtr *pTree, GenTree* parent)
+void Lowering::TreeNodeInfoInit(GenTreePtr* pTree, GenTree* parent)
{
NYI("ARM TreeNodInfoInit");
}
@@ -68,13 +69,13 @@ bool Lowering::isRMWRegOper(GenTreePtr tree)
return false;
}
-bool Lowering::IsCallTargetInRange(void *addr)
+bool Lowering::IsCallTargetInRange(void* addr)
{
- return comp->codeGen->validImmForBL ((ssize_t)addr);
+ return comp->codeGen->validImmForBL((ssize_t)addr);
}
// return true if the immediate can be folded into an instruction, for example small enough and non-relocatable
-bool Lowering:: IsContainableImmed(GenTree* parentNode, GenTree* childNode)
+bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
{
NYI_ARM("ARM IsContainableImmed");
return false;
diff --git a/src/jit/lowerarm64.cpp b/src/jit/lowerarm64.cpp
index 9915872654..a9c5709209 100644
--- a/src/jit/lowerarm64.cpp
+++ b/src/jit/lowerarm64.cpp
@@ -28,7 +28,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "jit.h"
#include "lower.h"
-// there is not much lowering to do with storing a local but
+// there is not much lowering to do with storing a local but
// we do some handling of contained immediates and widening operations of unsigneds
void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
@@ -43,9 +43,9 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
// srcCount = number of registers in which the value is returned by call
- GenTreeCall* call = op1->AsCall();
+ GenTreeCall* call = op1->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
+ info->srcCount = retTypeDesc->GetReturnRegCount();
// Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
@@ -58,10 +58,10 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
// Try to widen the ops if they are going into a local var.
if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
{
- GenTreeIntCon* con = op1->AsIntCon();
- ssize_t ival = con->gtIconVal;
- unsigned varNum = storeLoc->gtLclNum;
- LclVarDsc* varDsc = comp->lvaTable + varNum;
+ GenTreeIntCon* con = op1->AsIntCon();
+ ssize_t ival = con->gtIconVal;
+ unsigned varNum = storeLoc->gtLclNum;
+ LclVarDsc* varDsc = comp->lvaTable + varNum;
if (varDsc->lvIsSIMDType())
{
@@ -69,7 +69,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
unsigned size = genTypeSize(storeLoc);
// If we are storing a constant into a local variable
- // we extend the size of the store here
+ // we extend the size of the store here
if ((size < 4) && !varTypeIsStruct(varDsc))
{
if (!varTypeIsUnsigned(varDsc))
@@ -105,80 +105,80 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
/**
- * Takes care of annotating the register requirements
+ * Takes care of annotating the register requirements
* for every TreeNodeInfo struct that maps to each tree node.
* Preconditions:
* LSRA has been initialized and there is a TreeNodeInfo node
* already allocated and initialized for every tree in the IR.
* Postconditions:
* Every TreeNodeInfo instance has the right annotations on register
- * requirements needed by LSRA to build the Interval Table (source,
+ * requirements needed by LSRA to build the Interval Table (source,
* destination and internal [temp] register counts).
* This code is refactored originally from LSRA.
*/
void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
assert(stmt->gtStmt.gtStmtIsTopLevel());
GenTree* tree = stmt->gtStmt.gtStmtList;
-
+
while (tree)
{
- unsigned kind = tree->OperKind();
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- RegisterType registerType = TypeGet(tree);
- GenTree* next = tree->gtNext;
+ unsigned kind = tree->OperKind();
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ RegisterType registerType = TypeGet(tree);
+ GenTree* next = tree->gtNext;
switch (tree->OperGet())
{
GenTree* op1;
GenTree* op2;
- default:
- info->dstCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
- if (kind & (GTK_CONST|GTK_LEAF))
- {
- info->srcCount = 0;
- }
- else if (kind & (GTK_SMPOP))
- {
- if (tree->gtGetOp2() != nullptr)
+ default:
+ info->dstCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
+ if (kind & (GTK_CONST | GTK_LEAF))
{
- info->srcCount = 2;
+ info->srcCount = 0;
+ }
+ else if (kind & (GTK_SMPOP))
+ {
+ if (tree->gtGetOp2() != nullptr)
+ {
+ info->srcCount = 2;
+ }
+ else
+ {
+ info->srcCount = 1;
+ }
}
else
{
- info->srcCount = 1;
+ unreached();
}
- }
- else
- {
- unreached();
- }
- break;
+ break;
- case GT_STORE_LCL_FLD:
- case GT_STORE_LCL_VAR:
- info->srcCount = 1;
- info->dstCount = 0;
- LowerStoreLoc(tree->AsLclVarCommon());
- break;
+ case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ info->srcCount = 1;
+ info->dstCount = 0;
+ LowerStoreLoc(tree->AsLclVarCommon());
+ break;
- case GT_BOX:
- noway_assert(!"box should not exist here");
- // The result of 'op1' is also the final result
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_BOX:
+ noway_assert(!"box should not exist here");
+ // The result of 'op1' is also the final result
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_PHYSREGDST:
- info->srcCount = 1;
- info->dstCount = 0;
- break;
+ case GT_PHYSREGDST:
+ info->srcCount = 1;
+ info->dstCount = 0;
+ break;
- case GT_COMMA:
+ case GT_COMMA:
{
GenTreePtr firstOperand;
GenTreePtr secondOperand;
@@ -195,192 +195,192 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
if (firstOperand->TypeGet() != TYP_VOID)
{
firstOperand->gtLsraInfo.isLocalDefUse = true;
- firstOperand->gtLsraInfo.dstCount = 0;
+ firstOperand->gtLsraInfo.dstCount = 0;
}
if (tree->TypeGet() == TYP_VOID && secondOperand->TypeGet() != TYP_VOID)
{
secondOperand->gtLsraInfo.isLocalDefUse = true;
- secondOperand->gtLsraInfo.dstCount = 0;
+ secondOperand->gtLsraInfo.dstCount = 0;
}
}
- __fallthrough;
+ __fallthrough;
- case GT_LIST:
- case GT_ARGPLACE:
- case GT_NO_OP:
- case GT_START_NONGC:
- case GT_PROF_HOOK:
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ case GT_NO_OP:
+ case GT_START_NONGC:
+ case GT_PROF_HOOK:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_CNS_DBL:
- info->srcCount = 0;
- info->dstCount = 1;
- {
- GenTreeDblCon *dblConst = tree->AsDblCon();
- double constValue = dblConst->gtDblCon.gtDconVal;
+ case GT_CNS_DBL:
+ info->srcCount = 0;
+ info->dstCount = 1;
+ {
+ GenTreeDblCon* dblConst = tree->AsDblCon();
+ double constValue = dblConst->gtDblCon.gtDconVal;
- if (emitter::emitIns_valid_imm_for_fmov(constValue))
+ if (emitter::emitIns_valid_imm_for_fmov(constValue))
+ {
+ // Directly encode constant to instructions.
+ }
+ else
+ {
+ // Reserve int to load constant from memory (IF_LARGELDC)
+ info->internalIntCount = 1;
+ }
+ }
+ break;
+
+ case GT_QMARK:
+ case GT_COLON:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ unreached();
+ break;
+
+ case GT_RETURN:
+ TreeNodeInfoInitReturn(tree);
+ break;
+
+ case GT_RETFILT:
+ if (tree->TypeGet() == TYP_VOID)
{
- // Directly encode constant to instructions.
+ info->srcCount = 0;
+ info->dstCount = 0;
}
else
{
- // Reserve int to load constant from memory (IF_LARGELDC)
- info->internalIntCount = 1;
- }
- }
- break;
+ assert(tree->TypeGet() == TYP_INT);
- case GT_QMARK:
- case GT_COLON:
- info->srcCount = 0;
- info->dstCount = 0;
- unreached();
- break;
+ info->srcCount = 1;
+ info->dstCount = 1;
- case GT_RETURN:
- TreeNodeInfoInitReturn(tree);
- break;
+ info->setSrcCandidates(l, RBM_INTRET);
+ tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
+ }
+ break;
- case GT_RETFILT:
- if (tree->TypeGet() == TYP_VOID)
- {
+ case GT_NOP:
+ // A GT_NOP is either a passthrough (if it is void, or if it has
+ // a child), but must be considered to produce a dummy value if it
+ // has a type but no child
+ info->srcCount = 0;
+ if (tree->TypeGet() != TYP_VOID && tree->gtOp.gtOp1 == nullptr)
+ {
+ info->dstCount = 1;
+ }
+ else
+ {
+ info->dstCount = 0;
+ }
+ break;
+
+ case GT_JTRUE:
info->srcCount = 0;
info->dstCount = 0;
- }
- else
- {
- assert(tree->TypeGet() == TYP_INT);
+ l->clearDstCount(tree->gtOp.gtOp1);
+ break;
- info->srcCount = 1;
- info->dstCount = 1;
+ case GT_JMP:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- info->setSrcCandidates(l, RBM_INTRET);
- tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
- }
- break;
+ case GT_SWITCH:
+ // This should never occur since switch nodes must not be visible at this
+ // point in the JIT.
+ info->srcCount = 0;
+ info->dstCount = 0; // To avoid getting uninit errors.
+ noway_assert(!"Switch must be lowered at this point");
+ break;
- case GT_NOP:
- // A GT_NOP is either a passthrough (if it is void, or if it has
- // a child), but must be considered to produce a dummy value if it
- // has a type but no child
- info->srcCount = 0;
- if (tree->TypeGet() != TYP_VOID && tree->gtOp.gtOp1 == nullptr)
- {
+ case GT_JMPTABLE:
+ info->srcCount = 0;
info->dstCount = 1;
- }
- else
- {
- info->dstCount = 0;
- }
- break;
-
- case GT_JTRUE:
- info->srcCount = 0;
- info->dstCount = 0;
- l->clearDstCount(tree->gtOp.gtOp1);
- break;
+ break;
- case GT_JMP:
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_SWITCH_TABLE:
+ info->srcCount = 2;
+ info->internalIntCount = 1;
+ info->dstCount = 0;
+ break;
- case GT_SWITCH:
- // This should never occur since switch nodes must not be visible at this
- // point in the JIT.
- info->srcCount = 0;
- info->dstCount = 0; // To avoid getting uninit errors.
- noway_assert(!"Switch must be lowered at this point");
- break;
+ case GT_ASG:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ noway_assert(!"We should never hit any assignment operator in lowering");
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_JMPTABLE:
- info->srcCount = 0;
- info->dstCount = 1;
- break;
+ case GT_ADD:
+ case GT_SUB:
+ if (varTypeIsFloating(tree->TypeGet()))
+ {
+ // overflow operations aren't supported on float/double types.
+ assert(!tree->gtOverflow());
- case GT_SWITCH_TABLE:
- info->srcCount = 2;
- info->internalIntCount = 1;
- info->dstCount = 0;
- break;
+ // No implicit conversions at this stage as the expectation is that
+ // everything is made explicit by adding casts.
+ assert(tree->gtOp.gtOp1->TypeGet() == tree->gtOp.gtOp2->TypeGet());
- case GT_ASG:
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- noway_assert(!"We should never hit any assignment operator in lowering");
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ info->srcCount = 2;
+ info->dstCount = 1;
- case GT_ADD:
- case GT_SUB:
- if (varTypeIsFloating(tree->TypeGet()))
- {
- // overflow operations aren't supported on float/double types.
- assert(!tree->gtOverflow());
+ break;
+ }
- // No implicit conversions at this stage as the expectation is that
- // everything is made explicit by adding casts.
- assert(tree->gtOp.gtOp1->TypeGet() == tree->gtOp.gtOp2->TypeGet());
+ __fallthrough;
+ case GT_AND:
+ case GT_OR:
+ case GT_XOR:
info->srcCount = 2;
- info->dstCount = 1;
-
+ info->dstCount = 1;
+ // Check and make op2 contained (if it is a containable immediate)
+ CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
break;
- }
- __fallthrough;
+ case GT_RETURNTRAP:
+ // this just turns into a compare of its child with an int
+ // + a conditional call
+ info->srcCount = 1;
+ info->dstCount = 1;
+ break;
- case GT_AND:
- case GT_OR:
- case GT_XOR:
- info->srcCount = 2;
- info->dstCount = 1;
- // Check and make op2 contained (if it is a containable immediate)
- CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
- break;
-
- case GT_RETURNTRAP:
- // this just turns into a compare of its child with an int
- // + a conditional call
- info->srcCount = 1;
- info->dstCount = 1;
- break;
+ case GT_MOD:
+ case GT_UMOD:
+ NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64");
+ assert(!"Shouldn't see an integer typed GT_MOD node in ARM64");
+ break;
- case GT_MOD:
- case GT_UMOD:
- NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64");
- assert(!"Shouldn't see an integer typed GT_MOD node in ARM64");
- break;
-
- case GT_MUL:
- if (tree->gtOverflow())
- {
- // Need a register different from target reg to check for overflow.
- info->internalIntCount = 2;
- }
- __fallthrough;
+ case GT_MUL:
+ if (tree->gtOverflow())
+ {
+ // Need a register different from target reg to check for overflow.
+ info->internalIntCount = 2;
+ }
+ __fallthrough;
- case GT_DIV:
- case GT_MULHI:
- case GT_UDIV:
+ case GT_DIV:
+ case GT_MULHI:
+ case GT_UDIV:
{
info->srcCount = 2;
info->dstCount = 1;
}
break;
-
- case GT_INTRINSIC:
+
+ case GT_INTRINSIC:
{
// TODO-ARM64-NYI
// Right now only Abs/Round/Sqrt are treated as math intrinsics
- noway_assert((tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs) ||
- (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round) ||
- (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt) );
+ noway_assert((tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs) ||
+ (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round) ||
+ (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt));
// Both operand and its result must be of the same floating point type.
op1 = tree->gtOp.gtOp1;
@@ -393,27 +393,27 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- TreeNodeInfoInitSIMD(tree);
- break;
+ case GT_SIMD:
+ TreeNodeInfoInitSIMD(tree);
+ break;
#endif // FEATURE_SIMD
- case GT_CAST:
+ case GT_CAST:
{
// TODO-ARM64-CQ: Int-To-Int conversions - castOp cannot be a memory op and must have an assigned
// register.
- // see CodeGen::genIntToIntCast()
+ // see CodeGen::genIntToIntCast()
info->srcCount = 1;
info->dstCount = 1;
// Non-overflow casts to/from float/double are done using SSE2 instructions
// and that allow the source operand to be either a reg or memop. Given the
- // fact that casts from small int to float/double are done as two-level casts,
+ // fact that casts from small int to float/double are done as two-level casts,
// the source operand is always guaranteed to be of size 4 or 8 bytes.
- var_types castToType = tree->CastToType();
- GenTreePtr castOp = tree->gtCast.CastOp();
- var_types castOpType = castOp->TypeGet();
+ var_types castToType = tree->CastToType();
+ GenTreePtr castOp = tree->gtCast.CastOp();
+ var_types castOpType = castOp->TypeGet();
if (tree->gtFlags & GTF_UNSIGNED)
{
castOpType = genUnsignedType(castOpType);
@@ -428,7 +428,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
assert(opSize == 4 || opSize == 8);
}
}
-#endif //DEBUG
+#endif // DEBUG
// Some overflow checks need a temp reg
CastInfo castInfo;
@@ -439,7 +439,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
if (castInfo.requiresOverflowCheck)
{
var_types srcType = castOp->TypeGet();
- emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
+ emitAttr cmpSize = EA_ATTR(genTypeSize(srcType));
// If we cannot store the comparisons in an immediate for either
// comparing against the max or min value, then we will need to
@@ -456,26 +456,26 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_NEG:
- info->srcCount = 1;
- info->dstCount = 1;
- break;
-
- case GT_NOT:
- info->srcCount = 1;
- info->dstCount = 1;
- break;
+ case GT_NEG:
+ info->srcCount = 1;
+ info->dstCount = 1;
+ break;
+
+ case GT_NOT:
+ info->srcCount = 1;
+ info->dstCount = 1;
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROR:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROR:
{
info->srcCount = 2;
info->dstCount = 1;
GenTreePtr shiftBy = tree->gtOp.gtOp2;
- GenTreePtr source = tree->gtOp.gtOp1;
+ GenTreePtr source = tree->gtOp.gtOp1;
if (shiftBy->IsCnsIntOrI())
{
l->clearDstCount(shiftBy);
@@ -484,40 +484,40 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- LowerCmp(tree);
- break;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ LowerCmp(tree);
+ break;
- case GT_CKFINITE:
- info->srcCount = 1;
- info->dstCount = 1;
- info->internalIntCount = 1;
- break;
+ case GT_CKFINITE:
+ info->srcCount = 1;
+ info->dstCount = 1;
+ info->internalIntCount = 1;
+ break;
- case GT_CMPXCHG:
- info->srcCount = 3;
- info->dstCount = 1;
+ case GT_CMPXCHG:
+ info->srcCount = 3;
+ info->dstCount = 1;
- // TODO-ARM64-NYI
- NYI("CMPXCHG");
- break;
+ // TODO-ARM64-NYI
+ NYI("CMPXCHG");
+ break;
- case GT_LOCKADD:
- info->srcCount = 2;
- info->dstCount = 0;
- CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
- break;
+ case GT_LOCKADD:
+ info->srcCount = 2;
+ info->dstCount = 0;
+ CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
+ break;
- case GT_CALL:
- TreeNodeInfoInitCall(tree->AsCall());
- break;
+ case GT_CALL:
+ TreeNodeInfoInitCall(tree->AsCall());
+ break;
- case GT_ADDR:
+ case GT_ADDR:
{
// For a GT_ADDR, the child node should not be evaluated into a register
GenTreePtr child = tree->gtOp.gtOp1;
@@ -528,13 +528,13 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
- TreeNodeInfoInitBlockStore(tree->AsBlkOp());
- break;
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ TreeNodeInfoInitBlockStore(tree->AsBlkOp());
+ break;
- case GT_LCLHEAP:
+ case GT_LCLHEAP:
{
info->srcCount = 1;
info->dstCount = 1;
@@ -548,11 +548,11 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// const and <PageSize No 0
// >6 ptr words Yes hasPspSym ? 1 : 0
// Non-const Yes hasPspSym ? 1 : 0
- // Non-const No 2
+ // Non-const No 2
//
// PSPSym - If the method has PSPSym increment internalIntCount by 1.
//
- bool hasPspSym;
+ bool hasPspSym;
#if FEATURE_EH_FUNCLETS
hasPspSym = (compiler->lvaPSPSym != BAD_VAR_NUM);
#else
@@ -570,12 +570,13 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
info->internalIntCount = 0;
}
- else
+ else
{
// Compute the amount of memory to properly STACK_ALIGN.
// Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size.
- // This should also help in debugging as we can examine the original size specified with localloc.
- sizeVal = AlignUp(sizeVal, STACK_ALIGN);
+ // This should also help in debugging as we can examine the original size specified with
+ // localloc.
+ sizeVal = AlignUp(sizeVal, STACK_ALIGN);
size_t cntStackAlignedWidthItems = (sizeVal >> STACK_ALIGN_SHIFT);
// For small allocations upto 4 'stp' instructions (i.e. 64 bytes of localloc)
@@ -624,7 +625,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// If the method has PSPSym, we would need an addtional register to relocate it on stack.
if (hasPspSym)
- {
+ {
// Exclude const size 0
if (!size->IsCnsIntOrI() || (size->gtIntCon.gtIconVal > 0))
info->internalIntCount++;
@@ -632,9 +633,9 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
{
GenTreeBoundsChk* node = tree->AsBoundsChk();
@@ -643,62 +644,62 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
info->dstCount = 0;
GenTree* intCns = nullptr;
- GenTree* other = nullptr;
+ GenTree* other = nullptr;
if (CheckImmedAndMakeContained(tree, node->gtIndex))
{
intCns = node->gtIndex;
- other = node->gtArrLen;
+ other = node->gtArrLen;
}
else if (CheckImmedAndMakeContained(tree, node->gtArrLen))
{
intCns = node->gtArrLen;
- other = node->gtIndex;
+ other = node->gtIndex;
}
- else
+ else
{
other = node->gtIndex;
}
}
break;
- case GT_ARR_ELEM:
- // These must have been lowered to GT_ARR_INDEX
- noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_ARR_ELEM:
+ // These must have been lowered to GT_ARR_INDEX
+ noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_ARR_INDEX:
- info->srcCount = 2;
- info->dstCount = 1;
+ case GT_ARR_INDEX:
+ info->srcCount = 2;
+ info->dstCount = 1;
- // We need one internal register when generating code for GT_ARR_INDEX, however the
- // register allocator always may just give us the same one as it gives us for the 'dst'
- // as a workaround we will just ask for two internal registers.
- //
- info->internalIntCount = 2;
+ // We need one internal register when generating code for GT_ARR_INDEX, however the
+ // register allocator always may just give us the same one as it gives us for the 'dst'
+ // as a workaround we will just ask for two internal registers.
+ //
+ info->internalIntCount = 2;
- // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
- // times while the result is being computed.
- tree->AsArrIndex()->ArrObj()->gtLsraInfo.isDelayFree = true;
- info->hasDelayFreeSrc = true;
- break;
+ // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
+ // times while the result is being computed.
+ tree->AsArrIndex()->ArrObj()->gtLsraInfo.isDelayFree = true;
+ info->hasDelayFreeSrc = true;
+ break;
- case GT_ARR_OFFSET:
- // This consumes the offset, if any, the arrObj and the effective index,
- // and produces the flattened offset for this dimension.
- info->srcCount = 3;
- info->dstCount = 1;
- info->internalIntCount = 1;
+ case GT_ARR_OFFSET:
+ // This consumes the offset, if any, the arrObj and the effective index,
+ // and produces the flattened offset for this dimension.
+ info->srcCount = 3;
+ info->dstCount = 1;
+ info->internalIntCount = 1;
- // we don't want to generate code for this
- if (tree->gtArrOffs.gtOffset->IsIntegralConst(0))
- {
- MakeSrcContained(tree, tree->gtArrOffs.gtOffset);
- }
- break;
+ // we don't want to generate code for this
+ if (tree->gtArrOffs.gtOffset->IsIntegralConst(0))
+ {
+ MakeSrcContained(tree, tree->gtArrOffs.gtOffset);
+ }
+ break;
- case GT_LEA:
+ case GT_LEA:
{
GenTreeAddrMode* lea = tree->AsAddrMode();
@@ -721,12 +722,12 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// On ARM64 we may need a single internal register
// (when both conditions are true then we still only need a single internal register)
- if ((index != nullptr) && (cns != 0))
+ if ((index != nullptr) && (cns != 0))
{
// ARM64 does not support both Index and offset so we need an internal register
info->internalIntCount = 1;
}
- else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE))
+ else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE))
{
// This offset can't be contained in the add instruction, so we need an internal register
info->internalIntCount = 1;
@@ -734,11 +735,11 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
info->srcCount = 2;
info->dstCount = 0;
- GenTree* src = tree->gtOp.gtOp2;
+ GenTree* src = tree->gtOp.gtOp2;
if (compiler->codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree))
{
@@ -754,38 +755,38 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
SetIndirAddrOpCounts(tree);
}
break;
-
- case GT_NULLCHECK:
- info->dstCount = 0;
- info->srcCount = 1;
- info->isLocalDefUse = true;
- // null check is an indirection on an addr
- SetIndirAddrOpCounts(tree);
- break;
- case GT_IND:
- info->dstCount = 1;
- info->srcCount = 1;
- SetIndirAddrOpCounts(tree);
- break;
+ case GT_NULLCHECK:
+ info->dstCount = 0;
+ info->srcCount = 1;
+ info->isLocalDefUse = true;
+ // null check is an indirection on an addr
+ SetIndirAddrOpCounts(tree);
+ break;
- case GT_CATCH_ARG:
- info->srcCount = 0;
- info->dstCount = 1;
- info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
- break;
+ case GT_IND:
+ info->dstCount = 1;
+ info->srcCount = 1;
+ SetIndirAddrOpCounts(tree);
+ break;
+
+ case GT_CATCH_ARG:
+ info->srcCount = 0;
+ info->dstCount = 1;
+ info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
+ break;
- case GT_CLS_VAR:
- info->srcCount = 0;
- // GT_CLS_VAR, by the time we reach the backend, must always
- // be a pure use.
- // It will produce a result of the type of the
- // node, and use an internal register for the address.
+ case GT_CLS_VAR:
+ info->srcCount = 0;
+ // GT_CLS_VAR, by the time we reach the backend, must always
+ // be a pure use.
+ // It will produce a result of the type of the
+ // node, and use an internal register for the address.
- info->dstCount = 1;
- assert((tree->gtFlags & (GTF_VAR_DEF|GTF_VAR_USEASG|GTF_VAR_USEDEF)) == 0);
- info->internalIntCount = 1;
- break;
+ info->dstCount = 1;
+ assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0);
+ info->internalIntCount = 1;
+ break;
} // end switch (tree->OperGet())
// We need to be sure that we've set info->srcCount and info->dstCount appropriately
@@ -803,14 +804,13 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitReturn(GenTree* tree)
+void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
- GenTree* op1 = tree->gtGetOp1();
+ GenTree* op1 = tree->gtGetOp1();
regMaskTP useCandidates = RBM_NONE;
info->srcCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
@@ -822,7 +822,7 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
if ((op1->OperGet() == GT_LCL_VAR) || (op1->OperGet() == GT_LCL_FLD))
{
GenTreeLclVarCommon* lclVarCommon = op1->AsLclVarCommon();
- LclVarDsc* varDsc = &(compiler->lvaTable[lclVarCommon->gtLclNum]);
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclVarCommon->gtLclNum]);
assert(varDsc->lvIsMultiRegRet);
// Mark var as contained if not enregistrable.
@@ -836,23 +836,33 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
noway_assert(op1->IsMultiRegCall());
ReturnTypeDesc* retTypeDesc = op1->AsCall()->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
- useCandidates = retTypeDesc->GetABIReturnRegs();
+ info->srcCount = retTypeDesc->GetReturnRegCount();
+ useCandidates = retTypeDesc->GetABIReturnRegs();
}
}
else
{
- // Non-struct type return - determine useCandidates
+ // Non-struct type return - determine useCandidates
switch (tree->TypeGet())
{
- case TYP_VOID: useCandidates = RBM_NONE; break;
- case TYP_FLOAT: useCandidates = RBM_FLOATRET; break;
- case TYP_DOUBLE: useCandidates = RBM_DOUBLERET; break;
- case TYP_LONG: useCandidates = RBM_LNGRET; break;
- default: useCandidates = RBM_INTRET; break;
+ case TYP_VOID:
+ useCandidates = RBM_NONE;
+ break;
+ case TYP_FLOAT:
+ useCandidates = RBM_FLOATRET;
+ break;
+ case TYP_DOUBLE:
+ useCandidates = RBM_DOUBLERET;
+ break;
+ case TYP_LONG:
+ useCandidates = RBM_LNGRET;
+ break;
+ default:
+ useCandidates = RBM_INTRET;
+ break;
}
- }
-
+ }
+
if (useCandidates != RBM_NONE)
{
tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, useCandidates);
@@ -868,14 +878,13 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
+void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
- TreeNodeInfo* info = &(call->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
- bool hasMultiRegRetVal = false;
- ReturnTypeDesc* retTypeDesc = nullptr;
+ TreeNodeInfo* info = &(call->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
+ bool hasMultiRegRetVal = false;
+ ReturnTypeDesc* retTypeDesc = nullptr;
info->srcCount = 0;
if (call->TypeGet() != TYP_VOID)
@@ -884,7 +893,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
if (hasMultiRegRetVal)
{
// dst count = number of registers in which the value is returned by call
- retTypeDesc = call->GetReturnTypeDesc();
+ retTypeDesc = call->GetReturnTypeDesc();
info->dstCount = retTypeDesc->GetReturnRegCount();
}
else
@@ -984,7 +993,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
continue;
}
- var_types argType = argNode->TypeGet();
+ var_types argType = argNode->TypeGet();
bool argIsFloat = varTypeIsFloating(argType);
callHasFloatRegArgs |= argIsFloat;
@@ -998,7 +1007,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
if (varTypeIsStruct(argNode) || (argNode->gtOper == GT_LIST))
{
GenTreePtr actualArgNode = argNode;
- unsigned originalSize = 0;
+ unsigned originalSize = 0;
if (argNode->gtOper == GT_LIST)
{
@@ -1006,10 +1015,10 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
GenTreeArgList* argListPtr = argNode->AsArgList();
// Initailize the first register and the first regmask in our list
- regNumber targetReg = argReg;
- regMaskTP targetMask = genRegMask(targetReg);
- unsigned iterationNum = 0;
- originalSize = 0;
+ regNumber targetReg = argReg;
+ regMaskTP targetMask = genRegMask(targetReg);
+ unsigned iterationNum = 0;
+ originalSize = 0;
for (; argListPtr; argListPtr = argListPtr->Rest())
{
@@ -1017,13 +1026,13 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
GenTreePtr putArgChild = putArgRegNode->gtOp.gtOp1;
- originalSize += REGSIZE_BYTES; // 8 bytes
+ originalSize += REGSIZE_BYTES; // 8 bytes
// Record the register requirements for the GT_PUTARG_REG node
putArgRegNode->gtLsraInfo.setDstCandidates(l, targetMask);
putArgRegNode->gtLsraInfo.setSrcCandidates(l, targetMask);
- // To avoid redundant moves, request that the argument child tree be
+ // To avoid redundant moves, request that the argument child tree be
// computed in the register in which the argument is passed to the call.
putArgChild->gtLsraInfo.setSrcCandidates(l, targetMask);
@@ -1044,10 +1053,10 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
noway_assert(!"Unsupported TYP_STRUCT arg kind");
}
- unsigned slots = ((unsigned)(roundUp(originalSize, REGSIZE_BYTES))) / REGSIZE_BYTES;
- regNumber curReg = argReg;
- regNumber lastReg = argIsFloat ? REG_ARG_FP_LAST : REG_ARG_LAST;
- unsigned remainingSlots = slots;
+ unsigned slots = ((unsigned)(roundUp(originalSize, REGSIZE_BYTES))) / REGSIZE_BYTES;
+ regNumber curReg = argReg;
+ regNumber lastReg = argIsFloat ? REG_ARG_FP_LAST : REG_ARG_LAST;
+ unsigned remainingSlots = slots;
while (remainingSlots > 0)
{
@@ -1064,7 +1073,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
noway_assert(remainingSlots == 0);
argNode->gtLsraInfo.internalIntCount = 0;
}
- else // A scalar argument (not a struct)
+ else // A scalar argument (not a struct)
{
// We consume one source
info->srcCount++;
@@ -1077,7 +1086,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
GenTreePtr putArgChild = argNode->gtOp.gtOp1;
- // To avoid redundant moves, request that the argument child tree be
+ // To avoid redundant moves, request that the argument child tree be
// computed in the register in which the argument is passed to the call.
putArgChild->gtLsraInfo.setSrcCandidates(l, argMask);
}
@@ -1124,10 +1133,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// If it is a fast tail call, it is already preferenced to use IP0.
// Therefore, no need set src candidates on call tgt again.
- if (call->IsVarargs() &&
- callHasFloatRegArgs &&
- !call->IsFastTailCall() &&
- (ctrlExpr != nullptr))
+ if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
{
// Don't assign the call target to any of the argument registers because
// we will use them to also pass floating point arguments as required
@@ -1154,7 +1160,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTree* argNode, fgArgTabEntryPtr info
GenTreePtr putArgChild = argNode->gtOp.gtOp1;
- // Initialize 'argNode' as not contained, as this is both the default case
+ // Initialize 'argNode' as not contained, as this is both the default case
// and how MakeSrcContained expects to find things setup.
//
argNode->gtLsraInfo.srcCount = 1;
@@ -1182,20 +1188,20 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTree* argNode, fgArgTabEntryPtr info
{
// We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
// as one contained operation
- //
+ //
MakeSrcContained(putArgChild, objChild);
}
}
- // We will generate all of the code for the GT_PUTARG_STK and it's child node
+ // We will generate all of the code for the GT_PUTARG_STK and it's child node
// as one contained operation
- //
+ //
MakeSrcContained(argNode, putArgChild);
}
}
else
{
- // We must not have a multi-reg struct
+ // We must not have a multi-reg struct
assert(info->numSlots == 1);
}
}
@@ -1211,13 +1217,12 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTree* argNode, fgArgTabEntryPtr info
//
// Notes:
-void
-Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
+void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
{
- GenTree* dstAddr = blkNode->Dest();
- unsigned size;
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ GenTree* dstAddr = blkNode->Dest();
+ unsigned size;
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
// Sources are dest address, initVal or source, and size
blkNode->gtLsraInfo.srcCount = 3;
@@ -1228,7 +1233,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
GenTreeInitBlk* initBlkNode = blkNode->AsInitBlk();
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr initVal = initBlkNode->InitVal();
#if 0
// TODO-ARM64-CQ: Currently we generate a helper call for every
@@ -1289,7 +1294,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
{
GenTreeCpObj* cpObjNode = blkNode->AsCpObj();
- GenTreePtr clsTok = cpObjNode->ClsTok();
+ GenTreePtr clsTok = cpObjNode->ClsTok();
GenTreePtr srcAddr = cpObjNode->Source();
unsigned slots = cpObjNode->gtSlots;
@@ -1301,12 +1306,12 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
assert(clsTok->IsIconHandle());
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)clsTok->gtIntCon.gtIconVal;
- size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
- size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)clsTok->gtIntCon.gtIconVal;
+ size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
- // Currently, the EE always round up a class data structure so
- // we are not handling the case where we have a non multiple of pointer sized
+ // Currently, the EE always round up a class data structure so
+ // we are not handling the case where we have a non multiple of pointer sized
// struct. This behavior may change in the future so in order to keeps things correct
// let's assert it just to be safe. Going forward we should simply
// handle this case.
@@ -1329,7 +1334,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
GenTreeCpBlk* cpBlkNode = blkNode->AsCpBlk();
GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr srcAddr = cpBlkNode->Source();
#if 0
// In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
@@ -1403,26 +1408,25 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
// Return Value:
// None.
-void
-Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
+void Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
{
NYI("TreeNodeInfoInitSIMD");
- GenTreeSIMD* simdTree = tree->AsSIMD();
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* lsra = m_lsra;
- info->dstCount = 1;
- switch(simdTree->gtSIMDIntrinsicID)
+ GenTreeSIMD* simdTree = tree->AsSIMD();
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* lsra = m_lsra;
+ info->dstCount = 1;
+ switch (simdTree->gtSIMDIntrinsicID)
{
- case SIMDIntrinsicInit:
+ case SIMDIntrinsicInit:
{
// This sets all fields of a SIMD struct to the given value.
// Mark op1 as contained if it is either zero or int constant of all 1's.
info->srcCount = 1;
- GenTree* op1 = tree->gtOp.gtOp1;
- if (op1->IsIntegralConst(0) ||
- (simdTree->gtSIMDBaseType == TYP_INT && op1->IsCnsIntOrI() && op1->AsIntConCommon()->IconValue() == 0xffffffff) ||
- (simdTree->gtSIMDBaseType == TYP_LONG && op1->IsCnsIntOrI() && op1->AsIntConCommon()->IconValue() == 0xffffffffffffffffLL)
- )
+ GenTree* op1 = tree->gtOp.gtOp1;
+ if (op1->IsIntegralConst(0) || (simdTree->gtSIMDBaseType == TYP_INT && op1->IsCnsIntOrI() &&
+ op1->AsIntConCommon()->IconValue() == 0xffffffff) ||
+ (simdTree->gtSIMDBaseType == TYP_LONG && op1->IsCnsIntOrI() &&
+ op1->AsIntConCommon()->IconValue() == 0xffffffffffffffffLL))
{
MakeSrcContained(tree, tree->gtOp.gtOp1);
info->srcCount = 0;
@@ -1430,167 +1434,166 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
}
break;
- case SIMDIntrinsicInitN:
- info->srcCount = (int)(simdTree->gtSIMDSize / genTypeSize(simdTree->gtSIMDBaseType));
- // Need an internal register to stitch together all the values into a single vector in an XMM reg
- info->internalFloatCount = 1;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- break;
+ case SIMDIntrinsicInitN:
+ info->srcCount = (int)(simdTree->gtSIMDSize / genTypeSize(simdTree->gtSIMDBaseType));
+ // Need an internal register to stitch together all the values into a single vector in an XMM reg
+ info->internalFloatCount = 1;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ break;
- case SIMDIntrinsicInitArray:
- // We have an array and an index, which may be contained.
- info->srcCount = 2;
- CheckImmedAndMakeContained(tree, tree->gtGetOp2());
- break;
+ case SIMDIntrinsicInitArray:
+ // We have an array and an index, which may be contained.
+ info->srcCount = 2;
+ CheckImmedAndMakeContained(tree, tree->gtGetOp2());
+ break;
- case SIMDIntrinsicDiv:
- // SSE2 has no instruction support for division on integer vectors
- noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 2;
- break;
+ case SIMDIntrinsicDiv:
+ // SSE2 has no instruction support for division on integer vectors
+ noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 2;
+ break;
- case SIMDIntrinsicAbs:
- // This gets implemented as bitwise-And operation with a mask
- // and hence should never see it here.
- unreached();
- break;
+ case SIMDIntrinsicAbs:
+ // This gets implemented as bitwise-And operation with a mask
+ // and hence should never see it here.
+ unreached();
+ break;
- case SIMDIntrinsicSqrt:
- // SSE2 has no instruction support for sqrt on integer vectors.
- noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 1;
- break;
+ case SIMDIntrinsicSqrt:
+ // SSE2 has no instruction support for sqrt on integer vectors.
+ noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 1;
+ break;
- case SIMDIntrinsicAdd:
- case SIMDIntrinsicSub:
- case SIMDIntrinsicMul:
- case SIMDIntrinsicBitwiseAnd:
- case SIMDIntrinsicBitwiseAndNot:
- case SIMDIntrinsicBitwiseOr:
- case SIMDIntrinsicBitwiseXor:
- case SIMDIntrinsicMin:
- case SIMDIntrinsicMax:
- info->srcCount = 2;
-
- // SSE2 32-bit integer multiplication requires two temp regs
- if (simdTree->gtSIMDIntrinsicID == SIMDIntrinsicMul &&
- simdTree->gtSIMDBaseType == TYP_INT)
- {
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- }
- break;
+ case SIMDIntrinsicAdd:
+ case SIMDIntrinsicSub:
+ case SIMDIntrinsicMul:
+ case SIMDIntrinsicBitwiseAnd:
+ case SIMDIntrinsicBitwiseAndNot:
+ case SIMDIntrinsicBitwiseOr:
+ case SIMDIntrinsicBitwiseXor:
+ case SIMDIntrinsicMin:
+ case SIMDIntrinsicMax:
+ info->srcCount = 2;
- case SIMDIntrinsicEqual:
- info->srcCount = 2;
- break;
+ // SSE2 32-bit integer multiplication requires two temp regs
+ if (simdTree->gtSIMDIntrinsicID == SIMDIntrinsicMul && simdTree->gtSIMDBaseType == TYP_INT)
+ {
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ }
+ break;
- // SSE2 doesn't support < and <= directly on int vectors.
- // Instead we need to use > and >= with swapped operands.
- case SIMDIntrinsicLessThan:
- case SIMDIntrinsicLessThanOrEqual:
- info->srcCount = 2;
- noway_assert(!varTypeIsIntegral(simdTree->gtSIMDBaseType));
- break;
+ case SIMDIntrinsicEqual:
+ info->srcCount = 2;
+ break;
- // SIMDIntrinsicEqual is supported only on non-floating point base type vectors.
- // SSE2 cmpps/pd doesn't support > and >= directly on float/double vectors.
- // Instead we need to use < and <= with swapped operands.
- case SIMDIntrinsicGreaterThan:
- noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 2;
- break;
+ // SSE2 doesn't support < and <= directly on int vectors.
+ // Instead we need to use > and >= with swapped operands.
+ case SIMDIntrinsicLessThan:
+ case SIMDIntrinsicLessThanOrEqual:
+ info->srcCount = 2;
+ noway_assert(!varTypeIsIntegral(simdTree->gtSIMDBaseType));
+ break;
- case SIMDIntrinsicGreaterThanOrEqual:
- noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 2;
-
- // a >= b = (a==b) | (a>b)
- // To hold intermediate result of a==b and a>b we need two distinct
- // registers. We can use targetReg and one internal reg provided
- // they are distinct which is not guaranteed. Therefore, we request
- // two internal registers so that one of the internal registers has
- // to be different from targetReg.
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- break;
+ // SIMDIntrinsicEqual is supported only on non-floating point base type vectors.
+ // SSE2 cmpps/pd doesn't support > and >= directly on float/double vectors.
+ // Instead we need to use < and <= with swapped operands.
+ case SIMDIntrinsicGreaterThan:
+ noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 2;
+ break;
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
- // Need two SIMD registers as scratch.
- // See genSIMDIntrinsicRelOp() for details on code sequence generate and
- // the need for two scratch registers.
- info->srcCount = 2;
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- break;
+ case SIMDIntrinsicGreaterThanOrEqual:
+ noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 2;
- case SIMDIntrinsicDotProduct:
- // Also need an internal register as scratch. Further we need that targetReg and internal reg
- // are two distinct regs. It is achieved by requesting two internal registers and one of them
- // has to be different from targetReg.
- //
- // See genSIMDIntrinsicDotProduct() for details on code sequence generated and
- // the need for scratch registers.
- info->srcCount = 2;
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- break;
+ // a >= b = (a==b) | (a>b)
+ // To hold intermediate result of a==b and a>b we need two distinct
+ // registers. We can use targetReg and one internal reg provided
+ // they are distinct which is not guaranteed. Therefore, we request
+ // two internal registers so that one of the internal registers has
+ // to be different from targetReg.
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ break;
+
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
+ // Need two SIMD registers as scratch.
+ // See genSIMDIntrinsicRelOp() for details on code sequence generate and
+ // the need for two scratch registers.
+ info->srcCount = 2;
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ break;
+
+ case SIMDIntrinsicDotProduct:
+ // Also need an internal register as scratch. Further we need that targetReg and internal reg
+ // are two distinct regs. It is achieved by requesting two internal registers and one of them
+ // has to be different from targetReg.
+ //
+ // See genSIMDIntrinsicDotProduct() for details on code sequence generated and
+ // the need for scratch registers.
+ info->srcCount = 2;
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ break;
- case SIMDIntrinsicGetItem:
- // This implements get_Item method. The sources are:
- // - the source SIMD struct
- // - index (which element to get)
- // The result is baseType of SIMD struct.
- info->srcCount = 2;
+ case SIMDIntrinsicGetItem:
+ // This implements get_Item method. The sources are:
+ // - the source SIMD struct
+ // - index (which element to get)
+ // The result is baseType of SIMD struct.
+ info->srcCount = 2;
- op2 = tree->gtGetOp2()
- // If the index is a constant, mark it as contained.
- if (CheckImmedAndMakeContained(tree, op2))
- {
+ op2 = tree->gtGetOp2()
+ // If the index is a constant, mark it as contained.
+ if (CheckImmedAndMakeContained(tree, op2))
+ {
+ info->srcCount = 1;
+ }
+
+ // If the index is not a constant, we will use the SIMD temp location to store the vector.
+ // Otherwise, if the baseType is floating point, the targetReg will be a xmm reg and we
+ // can use that in the process of extracting the element.
+ // In all other cases with constant index, we need a temp xmm register to extract the
+ // element if index is other than zero.
+ if (!op2->IsCnsIntOrI())
+ {
+ (void)comp->getSIMDInitTempVarNum();
+ }
+ else if (!varTypeIsFloating(simdTree->gtSIMDBaseType) && !op2->IsIntegralConst(0))
+ {
+ info->internalFloatCount = 1;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ }
+ break;
+
+ case SIMDIntrinsicCast:
info->srcCount = 1;
- }
+ break;
- // If the index is not a constant, we will use the SIMD temp location to store the vector.
- // Otherwise, if the baseType is floating point, the targetReg will be a xmm reg and we
- // can use that in the process of extracting the element.
- // In all other cases with constant index, we need a temp xmm register to extract the
- // element if index is other than zero.
- if (!op2->IsCnsIntOrI())
- {
- (void) comp->getSIMDInitTempVarNum();
- }
- else if (!varTypeIsFloating(simdTree->gtSIMDBaseType) && !op2->IsIntegralConst(0))
- {
- info->internalFloatCount = 1;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- }
- break;
+ // These should have been transformed in terms of other intrinsics
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
+ assert("OpEquality/OpInEquality intrinsics should not be seen during Lowering.");
+ unreached();
- case SIMDIntrinsicCast:
- info->srcCount = 1;
- break;
+ case SIMDIntrinsicGetX:
+ case SIMDIntrinsicGetY:
+ case SIMDIntrinsicGetZ:
+ case SIMDIntrinsicGetW:
+ case SIMDIntrinsicGetOne:
+ case SIMDIntrinsicGetZero:
+ case SIMDIntrinsicGetLength:
+ case SIMDIntrinsicGetAllOnes:
+ assert(!"Get intrinsics should not be seen during Lowering.");
+ unreached();
- // These should have been transformed in terms of other intrinsics
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
- assert("OpEquality/OpInEquality intrinsics should not be seen during Lowering.");
- unreached();
-
- case SIMDIntrinsicGetX:
- case SIMDIntrinsicGetY:
- case SIMDIntrinsicGetZ:
- case SIMDIntrinsicGetW:
- case SIMDIntrinsicGetOne:
- case SIMDIntrinsicGetZero:
- case SIMDIntrinsicGetLength:
- case SIMDIntrinsicGetAllOnes:
- assert(!"Get intrinsics should not be seen during Lowering.");
- unreached();
-
- default:
- noway_assert(!"Unimplemented SIMD node type.");
- unreached();
+ default:
+ noway_assert(!"Unimplemented SIMD node type.");
+ unreached();
}
}
#endif // FEATURE_SIMD
@@ -1628,7 +1631,7 @@ void Lowering::LowerGCWriteBarrier(GenTree* tree)
// the 'src' goes into x15 (REG_WRITE_BARRIER)
//
addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
- src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
+ src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
#else
// For the standard JIT Helper calls
// op1 goes into REG_ARG_0 and
@@ -1655,30 +1658,30 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
assert(indirTree->OperIsIndir());
assert(indirTree->TypeGet() != TYP_STRUCT);
- GenTreePtr addr = indirTree->gtGetOp1();
+ GenTreePtr addr = indirTree->gtGetOp1();
TreeNodeInfo* info = &(indirTree->gtLsraInfo);
- GenTreePtr base = nullptr;
+ GenTreePtr base = nullptr;
GenTreePtr index = nullptr;
- unsigned cns = 0;
- unsigned mul;
- bool rev;
- bool modifiedSources = false;
+ unsigned cns = 0;
+ unsigned mul;
+ bool rev;
+ bool modifiedSources = false;
if (addr->OperGet() == GT_LEA)
{
GenTreeAddrMode* lea = addr->AsAddrMode();
- base = lea->Base();
- index = lea->Index();
- cns = lea->gtOffset;
+ base = lea->Base();
+ index = lea->Index();
+ cns = lea->gtOffset;
m_lsra->clearOperandCounts(addr);
- // The srcCount is decremented because addr is now "contained",
- // then we account for the base and index below, if they are non-null.
+ // The srcCount is decremented because addr is now "contained",
+ // then we account for the base and index below, if they are non-null.
info->srcCount--;
}
- else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/)
- && !(modifiedSources = AreSourcesPossiblyModified(indirTree, base, index)))
+ else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
+ !(modifiedSources = AreSourcesPossiblyModified(indirTree, base, index)))
{
// An addressing mode will be constructed that may cause some
// nodes to not need a register, and cause others' lifetimes to be extended
@@ -1697,14 +1700,12 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
// up of simple arithmetic operators, and the code generator
// only traverses one leg of each node.
- bool foundBase = (base == nullptr);
- bool foundIndex = (index == nullptr);
- GenTreePtr nextChild = nullptr;
- for (GenTreePtr child = addr;
- child != nullptr && !child->OperIsLeaf();
- child = nextChild)
+ bool foundBase = (base == nullptr);
+ bool foundIndex = (index == nullptr);
+ GenTreePtr nextChild = nullptr;
+ for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
{
- nextChild = nullptr;
+ nextChild = nullptr;
GenTreePtr op1 = child->gtOp.gtOp1;
GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;
@@ -1763,7 +1764,7 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
else
{
// it is nothing but a plain indir
- info->srcCount--; //base gets added in below
+ info->srcCount--; // base gets added in below
base = addr;
}
@@ -1779,7 +1780,7 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
// On ARM64 we may need a single internal register
// (when both conditions are true then we still only need a single internal register)
- if ((index != nullptr) && (cns != 0))
+ if ((index != nullptr) && (cns != 0))
{
// ARM64 does not support both Index and offset so we need an internal register
info->internalIntCount = 1;
@@ -1790,18 +1791,17 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
info->internalIntCount = 1;
}
}
-
void Lowering::LowerCmp(GenTreePtr tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
-
+
info->srcCount = 2;
info->dstCount = 1;
CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
}
-/* Lower GT_CAST(srcType, DstType) nodes.
+/* Lower GT_CAST(srcType, DstType) nodes.
*
* Casts from small int type to float/double are transformed as follows:
* GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
@@ -1809,7 +1809,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
* GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
* GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
*
- * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64
+ * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64
* are morphed as follows by front-end and hence should not be seen here.
* GT_CAST(uint32, float/double) = GT_CAST(GT_CAST(uint32, long), float/double)
* GT_CAST(uint64, float) = GT_CAST(GT_CAST(uint64, double), float)
@@ -1823,23 +1823,23 @@ void Lowering::LowerCmp(GenTreePtr tree)
*
* SSE2 has instructions to convert a float/double vlaue into a signed 32/64-bit
* integer. The above transformations help us to leverage those instructions.
- *
+ *
* Note that for the overflow conversions we still depend on helper calls and
- * don't expect to see them here.
- * i) GT_CAST(float/double, int type with overflow detection)
+ * don't expect to see them here.
+ * i) GT_CAST(float/double, int type with overflow detection)
*
*/
-void Lowering::LowerCast( GenTreePtr* ppTree)
+void Lowering::LowerCast(GenTreePtr* ppTree)
{
- GenTreePtr tree = *ppTree;
+ GenTreePtr tree = *ppTree;
assert(tree->OperGet() == GT_CAST);
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types dstType = tree->CastToType();
- var_types srcType = op1->TypeGet();
- var_types tmpType = TYP_UNDEF;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ var_types dstType = tree->CastToType();
+ var_types srcType = op1->TypeGet();
+ var_types tmpType = TYP_UNDEF;
- // We should never see the following casts as they are expected to be lowered
+ // We should never see the following casts as they are expected to be lowered
// apropriately or converted into helper calls by front-end.
// srcType = float/double dstType = * and overflow detecting cast
// Reason: must be converted to a helper call
@@ -1865,7 +1865,7 @@ void Lowering::LowerCast( GenTreePtr* ppTree)
if (tmpType != TYP_UNDEF)
{
GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
- tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED|GTF_OVERFLOW|GTF_EXCEPT));
+ tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
tree->gtOp.gtOp1 = tmp;
@@ -1878,20 +1878,20 @@ void Lowering::LowerRotate(GenTreePtr tree)
if (tree->OperGet() == GT_ROL)
{
// There is no ROL instruction on ARM. Convert ROL into ROR.
- GenTreePtr rotatedValue = tree->gtOp.gtOp1;
- unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
+ GenTreePtr rotatedValue = tree->gtOp.gtOp1;
+ unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;
if (rotateLeftIndexNode->IsCnsIntOrI())
{
- ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
- ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
+ ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
+ ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
}
else
{
- GenTreePtr tmp = comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType),
- rotateLeftIndexNode);
+ GenTreePtr tmp =
+ comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
rotateLeftIndexNode->InsertAfterSelf(tmp);
tree->gtOp.gtOp2 = tmp;
}
@@ -1925,66 +1925,66 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
if (varTypeIsFloating(parentNode->TypeGet()))
{
// We can contain a floating point 0.0 constant in a compare instruction
- switch (parentNode->OperGet())
+ switch (parentNode->OperGet())
{
- default:
- return false;
-
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- if (childNode->IsIntegralConst(0))
- return true;
- break;
+ default:
+ return false;
+
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ if (childNode->IsIntegralConst(0))
+ return true;
+ break;
}
}
else
{
- // Make sure we have an actual immediate
+ // Make sure we have an actual immediate
if (!childNode->IsCnsIntOrI())
return false;
if (childNode->IsIconHandle() && comp->opts.compReloc)
return false;
- ssize_t immVal = childNode->gtIntCon.gtIconVal;
- emitAttr attr = emitActualTypeSize(childNode->TypeGet());
- emitAttr size = EA_SIZE(attr);
+ ssize_t immVal = childNode->gtIntCon.gtIconVal;
+ emitAttr attr = emitActualTypeSize(childNode->TypeGet());
+ emitAttr size = EA_SIZE(attr);
switch (parentNode->OperGet())
{
- default:
- return false;
+ default:
+ return false;
- case GT_ADD:
- case GT_SUB:
- if (emitter::emitIns_valid_imm_for_add(immVal, size))
- return true;
- break;
+ case GT_ADD:
+ case GT_SUB:
+ if (emitter::emitIns_valid_imm_for_add(immVal, size))
+ return true;
+ break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- if (emitter::emitIns_valid_imm_for_cmp(immVal, size))
- return true;
- break;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ if (emitter::emitIns_valid_imm_for_cmp(immVal, size))
+ return true;
+ break;
- case GT_AND:
- case GT_OR:
- case GT_XOR:
- if (emitter::emitIns_valid_imm_for_alu(immVal, size))
- return true;
- break;
+ case GT_AND:
+ case GT_OR:
+ case GT_XOR:
+ if (emitter::emitIns_valid_imm_for_alu(immVal, size))
+ return true;
+ break;
- case GT_STORE_LCL_VAR:
- if (immVal == 0)
- return true;
- break;
+ case GT_STORE_LCL_VAR:
+ if (immVal == 0)
+ return true;
+ break;
}
}
diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp
index 26443f08ed..318ef2c5e1 100644
--- a/src/jit/lowerxarch.cpp
+++ b/src/jit/lowerxarch.cpp
@@ -33,7 +33,7 @@ void Lowering::LowerRotate(GenTreePtr tree)
{
}
-// there is not much lowering to do with storing a local but
+// there is not much lowering to do with storing a local but
// we do some handling of contained immediates and widening operations of unsigneds
void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
@@ -48,9 +48,9 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
assert(storeLoc->OperGet() == GT_STORE_LCL_VAR);
// srcCount = number of registers in which the value is returned by call
- GenTreeCall* call = op1->AsCall();
+ GenTreeCall* call = op1->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
+ info->srcCount = retTypeDesc->GetReturnRegCount();
// Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
@@ -58,7 +58,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
return;
}
-#ifdef FEATURE_SIMD
+#ifdef FEATURE_SIMD
if (storeLoc->TypeGet() == TYP_SIMD12)
{
// Need an additional register to extract upper 4 bytes of Vector3.
@@ -80,14 +80,13 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
// Try to widen the ops if they are going into a local var.
- if ((storeLoc->gtOper == GT_STORE_LCL_VAR) &&
- (storeLoc->gtOp1->gtOper == GT_CNS_INT))
+ if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (storeLoc->gtOp1->gtOper == GT_CNS_INT))
{
- GenTreeIntCon* con = storeLoc->gtOp1->AsIntCon();
- ssize_t ival = con->gtIconVal;
+ GenTreeIntCon* con = storeLoc->gtOp1->AsIntCon();
+ ssize_t ival = con->gtIconVal;
- unsigned varNum = storeLoc->gtLclNum;
- LclVarDsc* varDsc = comp->lvaTable + varNum;
+ unsigned varNum = storeLoc->gtLclNum;
+ LclVarDsc* varDsc = comp->lvaTable + varNum;
if (varDsc->lvIsSIMDType())
{
@@ -95,7 +94,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
unsigned size = genTypeSize(storeLoc);
// If we are storing a constant into a local variable
- // we extend the size of the store here
+ // we extend the size of the store here
if ((size < 4) && !varTypeIsStruct(varDsc))
{
if (!varTypeIsUnsigned(varDsc))
@@ -130,78 +129,77 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
}
-
/**
- * Takes care of annotating the register requirements
+ * Takes care of annotating the register requirements
* for every TreeNodeInfo struct that maps to each tree node.
* Preconditions:
* LSRA Has been initialized and there is a TreeNodeInfo node
* already allocated and initialized for every tree in the IR.
* Postconditions:
* Every TreeNodeInfo instance has the right annotations on register
- * requirements needed by LSRA to build the Interval Table (source,
+ * requirements needed by LSRA to build the Interval Table (source,
* destination and internal [temp] register counts).
* This code is refactored originally from LSRA.
*/
void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
assert(stmt->gtStmt.gtStmtIsTopLevel());
GenTree* tree = stmt->gtStmt.gtStmtList;
-
+
while (tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- GenTree* next = tree->gtNext;
+ GenTree* next = tree->gtNext;
switch (tree->OperGet())
{
GenTree* op1;
GenTree* op2;
- default:
- TreeNodeInfoInitSimple(tree);
- break;
+ default:
+ TreeNodeInfoInitSimple(tree);
+ break;
- case GT_LCL_FLD:
- info->srcCount = 0;
- info->dstCount = 1;
+ case GT_LCL_FLD:
+ info->srcCount = 0;
+ info->dstCount = 1;
#ifdef FEATURE_SIMD
- // Need an additional register to read upper 4 bytes of Vector3.
- if (tree->TypeGet() == TYP_SIMD12)
- {
- // We need an internal register different from targetReg in which 'tree' produces its result
- // because both targetReg and internal reg will be in use at the same time. This is achieved
- // by asking for two internal registers.
- info->internalFloatCount = 2;
- info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
- }
+ // Need an additional register to read upper 4 bytes of Vector3.
+ if (tree->TypeGet() == TYP_SIMD12)
+ {
+ // We need an internal register different from targetReg in which 'tree' produces its result
+ // because both targetReg and internal reg will be in use at the same time. This is achieved
+ // by asking for two internal registers.
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
+ }
#endif
- break;
-
- case GT_STORE_LCL_FLD:
- case GT_STORE_LCL_VAR:
- info->srcCount = 1;
- info->dstCount = 0;
- LowerStoreLoc(tree->AsLclVarCommon());
- break;
+ break;
- case GT_BOX:
- noway_assert(!"box should not exist here");
- // The result of 'op1' is also the final result
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_STORE_LCL_FLD:
+ case GT_STORE_LCL_VAR:
+ info->srcCount = 1;
+ info->dstCount = 0;
+ LowerStoreLoc(tree->AsLclVarCommon());
+ break;
- case GT_PHYSREGDST:
- info->srcCount = 1;
- info->dstCount = 0;
- break;
+ case GT_BOX:
+ noway_assert(!"box should not exist here");
+ // The result of 'op1' is also the final result
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_COMMA:
+ case GT_PHYSREGDST:
+ info->srcCount = 1;
+ info->dstCount = 0;
+ break;
+
+ case GT_COMMA:
{
GenTreePtr firstOperand;
GenTreePtr secondOperand;
@@ -218,308 +216,308 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
if (firstOperand->TypeGet() != TYP_VOID)
{
firstOperand->gtLsraInfo.isLocalDefUse = true;
- firstOperand->gtLsraInfo.dstCount = 0;
+ firstOperand->gtLsraInfo.dstCount = 0;
}
if (tree->TypeGet() == TYP_VOID && secondOperand->TypeGet() != TYP_VOID)
{
secondOperand->gtLsraInfo.isLocalDefUse = true;
- secondOperand->gtLsraInfo.dstCount = 0;
+ secondOperand->gtLsraInfo.dstCount = 0;
}
}
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_LIST:
- case GT_ARGPLACE:
- case GT_NO_OP:
- case GT_START_NONGC:
- case GT_PROF_HOOK:
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ case GT_NO_OP:
+ case GT_START_NONGC:
+ case GT_PROF_HOOK:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_CNS_DBL:
- info->srcCount = 0;
- info->dstCount = 1;
- break;
+ case GT_CNS_DBL:
+ info->srcCount = 0;
+ info->dstCount = 1;
+ break;
#if !defined(_TARGET_64BIT_)
- case GT_LONG:
- if (tree->gtNext == nullptr)
- {
- // An uncontained GT_LONG node needs to consume its source operands
- info->srcCount = 2;
- }
- else
- {
- // Passthrough
- info->srcCount = 0;
- }
- info->dstCount = 0;
- break;
+ case GT_LONG:
+ if (tree->gtNext == nullptr)
+ {
+ // An uncontained GT_LONG node needs to consume its source operands
+ info->srcCount = 2;
+ }
+ else
+ {
+ // Passthrough
+ info->srcCount = 0;
+ }
+ info->dstCount = 0;
+ break;
#endif // !defined(_TARGET_64BIT_)
- case GT_QMARK:
- case GT_COLON:
- info->srcCount = 0;
- info->dstCount = 0;
- unreached();
- break;
-
- case GT_RETURN:
- TreeNodeInfoInitReturn(tree);
- break;
-
- case GT_RETFILT:
- if (tree->TypeGet() == TYP_VOID)
- {
+ case GT_QMARK:
+ case GT_COLON:
info->srcCount = 0;
info->dstCount = 0;
- }
- else
- {
- assert(tree->TypeGet() == TYP_INT);
+ unreached();
+ break;
- info->srcCount = 1;
- info->dstCount = 1;
+ case GT_RETURN:
+ TreeNodeInfoInitReturn(tree);
+ break;
- info->setSrcCandidates(l, RBM_INTRET);
- tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
- }
- break;
+ case GT_RETFILT:
+ if (tree->TypeGet() == TYP_VOID)
+ {
+ info->srcCount = 0;
+ info->dstCount = 0;
+ }
+ else
+ {
+ assert(tree->TypeGet() == TYP_INT);
+
+ info->srcCount = 1;
+ info->dstCount = 1;
+
+ info->setSrcCandidates(l, RBM_INTRET);
+ tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
+ }
+ break;
// A GT_NOP is either a passthrough (if it is void, or if it has
// a child), but must be considered to produce a dummy value if it
// has a type but no child
- case GT_NOP:
- info->srcCount = 0;
- if (tree->TypeGet() != TYP_VOID && tree->gtOp.gtOp1 == nullptr)
- {
- info->dstCount = 1;
- }
- else
- {
- info->dstCount = 0;
- }
- break;
+ case GT_NOP:
+ info->srcCount = 0;
+ if (tree->TypeGet() != TYP_VOID && tree->gtOp.gtOp1 == nullptr)
+ {
+ info->dstCount = 1;
+ }
+ else
+ {
+ info->dstCount = 0;
+ }
+ break;
- case GT_JTRUE:
- info->srcCount = 0;
- info->dstCount = 0;
- l->clearDstCount(tree->gtOp.gtOp1);
- break;
+ case GT_JTRUE:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ l->clearDstCount(tree->gtOp.gtOp1);
+ break;
- case GT_JMP:
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_JMP:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_SWITCH:
- // This should never occur since switch nodes must not be visible at this
- // point in the JIT.
- info->srcCount = 0;
- info->dstCount = 0; // To avoid getting uninit errors.
- noway_assert(!"Switch must be lowered at this point");
- break;
+ case GT_SWITCH:
+ // This should never occur since switch nodes must not be visible at this
+ // point in the JIT.
+ info->srcCount = 0;
+ info->dstCount = 0; // To avoid getting uninit errors.
+ noway_assert(!"Switch must be lowered at this point");
+ break;
- case GT_JMPTABLE:
- info->srcCount = 0;
- info->dstCount = 1;
- break;
+ case GT_JMPTABLE:
+ info->srcCount = 0;
+ info->dstCount = 1;
+ break;
- case GT_SWITCH_TABLE:
- info->srcCount = 2;
- info->internalIntCount = 1;
- info->dstCount = 0;
- break;
+ case GT_SWITCH_TABLE:
+ info->srcCount = 2;
+ info->internalIntCount = 1;
+ info->dstCount = 0;
+ break;
- case GT_ASG:
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- noway_assert(!"We should never hit any assignment operator in lowering");
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_ASG:
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ noway_assert(!"We should never hit any assignment operator in lowering");
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
#if !defined(_TARGET_64BIT_)
- case GT_ADD_LO:
- case GT_ADD_HI:
- case GT_SUB_LO:
- case GT_SUB_HI:
+ case GT_ADD_LO:
+ case GT_ADD_HI:
+ case GT_SUB_LO:
+ case GT_SUB_HI:
#endif
- case GT_ADD:
- case GT_SUB:
- // SSE2 arithmetic instructions doesn't support the form "op mem, xmm".
- // Rather they only support "op xmm, mem/xmm" form.
- if (varTypeIsFloating(tree->TypeGet()))
- {
- // overflow operations aren't supported on float/double types.
- assert(!tree->gtOverflow());
+ case GT_ADD:
+ case GT_SUB:
+ // SSE2 arithmetic instructions doesn't support the form "op mem, xmm".
+ // Rather they only support "op xmm, mem/xmm" form.
+ if (varTypeIsFloating(tree->TypeGet()))
+ {
+ // overflow operations aren't supported on float/double types.
+ assert(!tree->gtOverflow());
- op1 = tree->gtGetOp1();
- op2 = tree->gtGetOp2();
+ op1 = tree->gtGetOp1();
+ op2 = tree->gtGetOp2();
- // No implicit conversions at this stage as the expectation is that
- // everything is made explicit by adding casts.
- assert(op1->TypeGet() == op2->TypeGet());
+ // No implicit conversions at this stage as the expectation is that
+ // everything is made explicit by adding casts.
+ assert(op1->TypeGet() == op2->TypeGet());
- info->srcCount = 2;
- info->dstCount = 1;
-
- if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
- {
- MakeSrcContained(tree, op2);
- }
- else if (tree->OperIsCommutative() &&
- (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))))
- {
- // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
- // as long as it is safe so that the following efficient code sequence is generated:
- // addss/sd targetReg, memOp (if op1Reg == targetReg) OR
- // movaps targetReg, op2Reg; addss/sd targetReg, [memOp]
- //
- // Instead of
- // movss op1Reg, [memOp]; addss/sd targetReg, Op2Reg (if op1Reg == targetReg) OR
- // movss op1Reg, [memOp]; movaps targetReg, op1Reg, addss/sd targetReg, Op2Reg
- MakeSrcContained(tree, op1);
+ info->srcCount = 2;
+ info->dstCount = 1;
+
+ if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+ {
+ MakeSrcContained(tree, op2);
+ }
+ else if (tree->OperIsCommutative() &&
+ (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))))
+ {
+ // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
+ // as long as it is safe so that the following efficient code sequence is generated:
+ // addss/sd targetReg, memOp (if op1Reg == targetReg) OR
+ // movaps targetReg, op2Reg; addss/sd targetReg, [memOp]
+ //
+ // Instead of
+ // movss op1Reg, [memOp]; addss/sd targetReg, Op2Reg (if op1Reg == targetReg) OR
+ // movss op1Reg, [memOp]; movaps targetReg, op1Reg, addss/sd targetReg, Op2Reg
+ MakeSrcContained(tree, op1);
+ }
+ else
+ {
+ // If there are no containable operands, we can make an operand reg optional.
+ SetRegOptionalForBinOp(tree);
+ }
+ break;
}
- else
+
+ __fallthrough;
+
+ case GT_AND:
+ case GT_OR:
+ case GT_XOR:
+ TreeNodeInfoInitLogicalOp(tree);
+ break;
+
+ case GT_RETURNTRAP:
+ // this just turns into a compare of its child with an int
+ // + a conditional call
+ info->srcCount = 1;
+ info->dstCount = 0;
+ if (tree->gtOp.gtOp1->isIndir())
{
- // If there are no containable operands, we can make an operand reg optional.
- SetRegOptionalForBinOp(tree);
+ MakeSrcContained(tree, tree->gtOp.gtOp1);
}
+ info->internalIntCount = 1;
+ info->setInternalCandidates(l, l->allRegs(TYP_INT));
break;
- }
-
- __fallthrough;
- case GT_AND:
- case GT_OR:
- case GT_XOR:
- TreeNodeInfoInitLogicalOp(tree);
- break;
-
- case GT_RETURNTRAP:
- // this just turns into a compare of its child with an int
- // + a conditional call
- info->srcCount = 1;
- info->dstCount = 0;
- if (tree->gtOp.gtOp1->isIndir())
- {
- MakeSrcContained(tree, tree->gtOp.gtOp1);
- }
- info->internalIntCount = 1;
- info->setInternalCandidates(l, l->allRegs(TYP_INT));
- break;
+ case GT_MOD:
+ case GT_DIV:
+ case GT_UMOD:
+ case GT_UDIV:
+ TreeNodeInfoInitModDiv(tree);
+ break;
- case GT_MOD:
- case GT_DIV:
- case GT_UMOD:
- case GT_UDIV:
- TreeNodeInfoInitModDiv(tree);
- break;
+ case GT_MUL:
+ case GT_MULHI:
+ SetMulOpCounts(tree);
+ break;
- case GT_MUL:
- case GT_MULHI:
- SetMulOpCounts(tree);
- break;
-
- case GT_INTRINSIC:
- TreeNodeInfoInitIntrinsic(tree);
- break;
+ case GT_INTRINSIC:
+ TreeNodeInfoInitIntrinsic(tree);
+ break;
#ifdef FEATURE_SIMD
- case GT_SIMD:
- TreeNodeInfoInitSIMD(tree);
- break;
+ case GT_SIMD:
+ TreeNodeInfoInitSIMD(tree);
+ break;
#endif // FEATURE_SIMD
- case GT_CAST:
- TreeNodeInfoInitCast(tree);
- break;
+ case GT_CAST:
+ TreeNodeInfoInitCast(tree);
+ break;
- case GT_NEG:
- info->srcCount = 1;
- info->dstCount = 1;
+ case GT_NEG:
+ info->srcCount = 1;
+ info->dstCount = 1;
- // TODO-XArch-CQ:
- // SSE instruction set doesn't have an instruction to negate a number.
- // The recommended way is to xor the float/double number with a bitmask.
- // The only way to xor is using xorps or xorpd both of which operate on
- // 128-bit operands. To hold the bit-mask we would need another xmm
- // register or a 16-byte aligned 128-bit data constant. Right now emitter
- // lacks the support for emitting such constants or instruction with mem
- // addressing mode referring to a 128-bit operand. For now we use an
- // internal xmm register to load 32/64-bit bitmask from data section.
- // Note that by trading additional data section memory (128-bit) we can
- // save on the need for an internal register and also a memory-to-reg
- // move.
- //
- // Note: another option to avoid internal register requirement is by
- // lowering as GT_SUB(0, src). This will generate code different from
- // Jit64 and could possibly result in compat issues (?).
- if (varTypeIsFloating(tree))
- {
- info->internalFloatCount = 1;
- info->setInternalCandidates(l, l->internalFloatRegCandidates());
- }
- break;
-
- case GT_NOT:
- info->srcCount = 1;
- info->dstCount = 1;
- break;
+ // TODO-XArch-CQ:
+ // SSE instruction set doesn't have an instruction to negate a number.
+ // The recommended way is to xor the float/double number with a bitmask.
+ // The only way to xor is using xorps or xorpd both of which operate on
+ // 128-bit operands. To hold the bit-mask we would need another xmm
+ // register or a 16-byte aligned 128-bit data constant. Right now emitter
+ // lacks the support for emitting such constants or instruction with mem
+ // addressing mode referring to a 128-bit operand. For now we use an
+ // internal xmm register to load 32/64-bit bitmask from data section.
+ // Note that by trading additional data section memory (128-bit) we can
+ // save on the need for an internal register and also a memory-to-reg
+ // move.
+ //
+ // Note: another option to avoid internal register requirement is by
+ // lowering as GT_SUB(0, src). This will generate code different from
+ // Jit64 and could possibly result in compat issues (?).
+ if (varTypeIsFloating(tree))
+ {
+ info->internalFloatCount = 1;
+ info->setInternalCandidates(l, l->internalFloatRegCandidates());
+ }
+ break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- TreeNodeInfoInitShiftRotate(tree);
- break;
+ case GT_NOT:
+ info->srcCount = 1;
+ info->dstCount = 1;
+ break;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- LowerCmp(tree);
- break;
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ TreeNodeInfoInitShiftRotate(tree);
+ break;
- case GT_CKFINITE:
- info->srcCount = 1;
- info->dstCount = 1;
- info->internalIntCount = 1;
- break;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ LowerCmp(tree);
+ break;
- case GT_CMPXCHG:
- info->srcCount = 3;
- info->dstCount = 1;
+ case GT_CKFINITE:
+ info->srcCount = 1;
+ info->dstCount = 1;
+ info->internalIntCount = 1;
+ break;
- // comparand is preferenced to RAX.
- // Remaining two operands can be in any reg other than RAX.
- tree->gtCmpXchg.gtOpComparand->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
- tree->gtCmpXchg.gtOpLocation->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
- tree->gtCmpXchg.gtOpValue->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
- tree->gtLsraInfo.setDstCandidates(l, RBM_RAX);
- break;
+ case GT_CMPXCHG:
+ info->srcCount = 3;
+ info->dstCount = 1;
- case GT_LOCKADD:
- info->srcCount = 2;
- info->dstCount = 0;
+ // comparand is preferenced to RAX.
+ // Remaining two operands can be in any reg other than RAX.
+ tree->gtCmpXchg.gtOpComparand->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
+ tree->gtCmpXchg.gtOpLocation->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
+ tree->gtCmpXchg.gtOpValue->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
+ tree->gtLsraInfo.setDstCandidates(l, RBM_RAX);
+ break;
- CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
- break;
+ case GT_LOCKADD:
+ info->srcCount = 2;
+ info->dstCount = 0;
- case GT_CALL:
- TreeNodeInfoInitCall(tree->AsCall());
- break;
+ CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
+ break;
- case GT_ADDR:
+ case GT_CALL:
+ TreeNodeInfoInitCall(tree->AsCall());
+ break;
+
+ case GT_ADDR:
{
// For a GT_ADDR, the child node should not be evaluated into a register
GenTreePtr child = tree->gtOp.gtOp1;
@@ -531,36 +529,36 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
break;
#ifdef _TARGET_X86_
- case GT_OBJ:
- NYI_X86("GT_OBJ");
+ case GT_OBJ:
+ NYI_X86("GT_OBJ");
#endif //_TARGET_X86_
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
- TreeNodeInfoInitBlockStore(tree->AsBlkOp());
- break;
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ TreeNodeInfoInitBlockStore(tree->AsBlkOp());
+ break;
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- case GT_PUTARG_STK:
- TreeNodeInfoInitPutArgStk(tree);
- break;
+ case GT_PUTARG_STK:
+ TreeNodeInfoInitPutArgStk(tree);
+ break;
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- case GT_LCLHEAP:
- TreeNodeInfoInitLclHeap(tree);
- break;
+ case GT_LCLHEAP:
+ TreeNodeInfoInitLclHeap(tree);
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
{
GenTreeBoundsChk* node = tree->AsBoundsChk();
// Consumes arrLen & index - has no result
info->srcCount = 2;
info->dstCount = 0;
-
+
GenTreePtr other;
if (CheckImmedAndMakeContained(tree, node->gtIndex))
{
@@ -585,7 +583,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
MakeSrcContained(tree, other);
}
- else
+ else
{
// We can mark 'other' as reg optional, since it is not contained.
SetRegOptional(other);
@@ -594,56 +592,56 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
}
break;
- case GT_ARR_ELEM:
- // These must have been lowered to GT_ARR_INDEX
- noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_ARR_ELEM:
+ // These must have been lowered to GT_ARR_INDEX
+ noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
- case GT_ARR_INDEX:
- info->srcCount = 2;
- info->dstCount = 1;
- // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
- // times while the result is being computed.
- tree->AsArrIndex()->ArrObj()->gtLsraInfo.isDelayFree = true;
- info->hasDelayFreeSrc = true;
- break;
+ case GT_ARR_INDEX:
+ info->srcCount = 2;
+ info->dstCount = 1;
+ // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
+ // times while the result is being computed.
+ tree->AsArrIndex()->ArrObj()->gtLsraInfo.isDelayFree = true;
+ info->hasDelayFreeSrc = true;
+ break;
- case GT_ARR_OFFSET:
- // This consumes the offset, if any, the arrObj and the effective index,
- // and produces the flattened offset for this dimension.
- info->srcCount = 3;
- info->dstCount = 1;
- info->internalIntCount = 1;
- // we don't want to generate code for this
- if (tree->gtArrOffs.gtOffset->IsIntegralConst(0))
- {
- MakeSrcContained(tree, tree->gtArrOffs.gtOffset);
- }
- break;
+ case GT_ARR_OFFSET:
+ // This consumes the offset, if any, the arrObj and the effective index,
+ // and produces the flattened offset for this dimension.
+ info->srcCount = 3;
+ info->dstCount = 1;
+ info->internalIntCount = 1;
+ // we don't want to generate code for this
+ if (tree->gtArrOffs.gtOffset->IsIntegralConst(0))
+ {
+ MakeSrcContained(tree, tree->gtArrOffs.gtOffset);
+ }
+ break;
- case GT_LEA:
- // The LEA usually passes its operands through to the GT_IND, in which case we'll
- // clear the info->srcCount and info->dstCount later, but we may be instantiating an address,
- // so we set them here.
- info->srcCount = 0;
- if (tree->AsAddrMode()->HasBase())
- {
- info->srcCount++;
- }
- if (tree->AsAddrMode()->HasIndex())
- {
- info->srcCount++;
- }
- info->dstCount = 1;
- break;
+ case GT_LEA:
+ // The LEA usually passes its operands through to the GT_IND, in which case we'll
+ // clear the info->srcCount and info->dstCount later, but we may be instantiating an address,
+ // so we set them here.
+ info->srcCount = 0;
+ if (tree->AsAddrMode()->HasBase())
+ {
+ info->srcCount++;
+ }
+ if (tree->AsAddrMode()->HasIndex())
+ {
+ info->srcCount++;
+ }
+ info->dstCount = 1;
+ break;
- case GT_STOREIND:
+ case GT_STOREIND:
{
info->srcCount = 2;
info->dstCount = 0;
- GenTree* src = tree->gtOp.gtOp2;
+ GenTree* src = tree->gtOp.gtOp2;
if (compiler->codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree))
{
@@ -654,8 +652,8 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// If the source is a containable immediate, make it contained, unless it is
// an int-size or larger store of zero to memory, because we can generate smaller code
// by zeroing a register and then storing it.
- if (IsContainableImmed(tree, src) &&
- (!src->IsIntegralConst(0) || varTypeIsSmall(tree) || tree->gtGetOp1()->OperGet() == GT_CLS_VAR_ADDR))
+ if (IsContainableImmed(tree, src) && (!src->IsIntegralConst(0) || varTypeIsSmall(tree) ||
+ tree->gtGetOp1()->OperGet() == GT_CLS_VAR_ADDR))
{
MakeSrcContained(tree, src);
}
@@ -677,43 +675,43 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
SetIndirAddrOpCounts(tree);
}
break;
-
- case GT_NULLCHECK:
- info->dstCount = 0;
- info->srcCount = 1;
- info->isLocalDefUse = true;
- break;
- case GT_IND:
- info->dstCount = 1;
- info->srcCount = 1;
- SetIndirAddrOpCounts(tree);
- break;
+ case GT_NULLCHECK:
+ info->dstCount = 0;
+ info->srcCount = 1;
+ info->isLocalDefUse = true;
+ break;
- case GT_CATCH_ARG:
- info->srcCount = 0;
- info->dstCount = 1;
- info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
- break;
+ case GT_IND:
+ info->dstCount = 1;
+ info->srcCount = 1;
+ SetIndirAddrOpCounts(tree);
+ break;
+
+ case GT_CATCH_ARG:
+ info->srcCount = 0;
+ info->dstCount = 1;
+ info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
+ break;
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN:
- info->srcCount = 0;
- info->dstCount = 0;
- break;
+ case GT_END_LFIN:
+ info->srcCount = 0;
+ info->dstCount = 0;
+ break;
#endif
- case GT_CLS_VAR:
- info->srcCount = 0;
- // GT_CLS_VAR, by the time we reach the backend, must always
- // be a pure use.
- // It will produce a result of the type of the
- // node, and use an internal register for the address.
+ case GT_CLS_VAR:
+ info->srcCount = 0;
+ // GT_CLS_VAR, by the time we reach the backend, must always
+ // be a pure use.
+ // It will produce a result of the type of the
+ // node, and use an internal register for the address.
- info->dstCount = 1;
- assert((tree->gtFlags & (GTF_VAR_DEF|GTF_VAR_USEASG|GTF_VAR_USEDEF)) == 0);
- info->internalIntCount = 1;
- break;
+ info->dstCount = 1;
+ assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0);
+ info->internalIntCount = 1;
+ break;
} // end switch (tree->OperGet())
// If op2 of a binary-op gets marked as contained, then binary-op srcCount will be 1.
@@ -724,7 +722,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
{
GenTree* op1 = tree->gtOp.gtOp1;
GenTree* op2 = tree->gtOp.gtOp2;
-
+
// Commutative opers like add/mul/and/or/xor could reverse the order of
// operands if it is safe to do so. In such a case we would like op2 to be
// target preferenced instead of op1.
@@ -772,7 +770,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// Otherwise, we set it on delayUseSrc itself.
if (delayUseSrc->isIndir() && (delayUseSrc->gtLsraInfo.dstCount == 0))
{
- GenTree* base = delayUseSrc->AsIndir()->Base();
+ GenTree* base = delayUseSrc->AsIndir()->Base();
GenTree* index = delayUseSrc->AsIndir()->Index();
if (base != nullptr)
{
@@ -795,11 +793,11 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
#ifdef _TARGET_X86_
// Exclude RBM_NON_BYTE_REGS from dst candidates of tree node and src candidates of operands
// if the tree node is a byte type.
- //
+ //
// Example1: GT_STOREIND(byte, addr, op2) - storeind of byte sized value from op2 into mem 'addr'
// Storeind itself will not produce any value and hence dstCount=0. But op2 could be TYP_INT
// value. In this case we need to exclude esi/edi from the src candidates of op2.
- //
+ //
// Example2: GT_CAST(int <- bool <- int) - here type of GT_CAST node is int and castToType is bool.
//
// Example3: GT_EQ(int, op1 of type ubyte, op2 of type ubyte) - in this case codegen uses
@@ -811,10 +809,8 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
// Though this looks conservative in theory, in practice we could not think of a case where
// the below logic leads to conservative register specification. In future when or if we find
// one such case, this logic needs to be fine tuned for that case(s).
- if (varTypeIsByte(tree) ||
- ((tree->OperGet() == GT_CAST) && varTypeIsByte(tree->CastToType())) ||
- (tree->OperIsCompare() && varTypeIsByte(tree->gtGetOp1()) && varTypeIsByte(tree->gtGetOp2()))
- )
+ if (varTypeIsByte(tree) || ((tree->OperGet() == GT_CAST) && varTypeIsByte(tree->CastToType())) ||
+ (tree->OperIsCompare() && varTypeIsByte(tree->gtGetOp1()) && varTypeIsByte(tree->gtGetOp2())))
{
regMaskTP regMask;
if (info->dstCount > 0)
@@ -827,7 +823,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
if (tree->OperIsSimple() && (info->srcCount > 0))
{
// No need to set src candidates on a contained child operand.
- GenTree *op = tree->gtOp.gtOp1;
+ GenTree* op = tree->gtOp.gtOp1;
assert(op != nullptr);
bool containedNode = (op->gtLsraInfo.srcCount == 0) && (op->gtLsraInfo.dstCount == 0);
if (!containedNode)
@@ -839,8 +835,8 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
if (tree->OperIsBinary() && (tree->gtOp.gtOp2 != nullptr))
{
- op = tree->gtOp.gtOp2;
- containedNode = (op->gtLsraInfo.srcCount == 0) && (op->gtLsraInfo.dstCount == 0);
+ op = tree->gtOp.gtOp2;
+ containedNode = (op->gtLsraInfo.srcCount == 0) && (op->gtLsraInfo.dstCount == 0);
if (!containedNode)
{
regMask = op->gtLsraInfo.getSrcCandidates(l);
@@ -853,14 +849,12 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
#endif //_TARGET_X86_
// We need to be sure that we've set info->srcCount and info->dstCount appropriately
- assert((info->dstCount < 2) ||
- (tree->IsMultiRegCall() && info->dstCount == MAX_RET_REG_COUNT));
+ assert((info->dstCount < 2) || (tree->IsMultiRegCall() && info->dstCount == MAX_RET_REG_COUNT));
tree = next;
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitSimple: Sets the srcCount and dstCount for all the trees
// without special handling based on the tree node type.
@@ -874,8 +868,8 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
void Lowering::TreeNodeInfoInitSimple(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- unsigned kind = tree->OperKind();
- info->dstCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
+ unsigned kind = tree->OperKind();
+ info->dstCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
if (kind & (GTK_CONST | GTK_LEAF))
{
info->srcCount = 0;
@@ -897,7 +891,6 @@ void Lowering::TreeNodeInfoInitSimple(GenTree* tree)
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitReturn: Set the NodeInfo for a GT_RETURN.
//
@@ -907,12 +900,11 @@ void Lowering::TreeNodeInfoInitSimple(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitReturn(GenTree* tree)
+void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
#if !defined(_TARGET_64BIT_)
if (tree->TypeGet() == TYP_LONG)
@@ -929,7 +921,7 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
else
#endif // !defined(_TARGET_64BIT_)
{
- GenTree* op1 = tree->gtGetOp1();
+ GenTree* op1 = tree->gtGetOp1();
regMaskTP useCandidates = RBM_NONE;
info->srcCount = (tree->TypeGet() == TYP_VOID) ? 0 : 1;
@@ -937,12 +929,12 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
if (varTypeIsStruct(tree))
- {
+ {
// op1 has to be either an lclvar or a multi-reg returning call
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lclVarCommon = op1->AsLclVarCommon();
- LclVarDsc* varDsc = &(compiler->lvaTable[lclVarCommon->gtLclNum]);
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclVarCommon->gtLclNum]);
assert(varDsc->lvIsMultiRegRet);
// Mark var as contained if not enregistrable.
@@ -956,23 +948,33 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
noway_assert(op1->IsMultiRegCall());
ReturnTypeDesc* retTypeDesc = op1->AsCall()->GetReturnTypeDesc();
- info->srcCount = retTypeDesc->GetReturnRegCount();
- useCandidates = retTypeDesc->GetABIReturnRegs();
+ info->srcCount = retTypeDesc->GetReturnRegCount();
+ useCandidates = retTypeDesc->GetABIReturnRegs();
}
}
else
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
- // Non-struct type return - determine useCandidates
+ // Non-struct type return - determine useCandidates
switch (tree->TypeGet())
{
- case TYP_VOID: useCandidates = RBM_NONE; break;
- case TYP_FLOAT: useCandidates = RBM_FLOATRET; break;
- case TYP_DOUBLE: useCandidates = RBM_DOUBLERET; break;
+ case TYP_VOID:
+ useCandidates = RBM_NONE;
+ break;
+ case TYP_FLOAT:
+ useCandidates = RBM_FLOATRET;
+ break;
+ case TYP_DOUBLE:
+ useCandidates = RBM_DOUBLERET;
+ break;
#if defined(_TARGET_64BIT_)
- case TYP_LONG: useCandidates = RBM_LNGRET; break;
+ case TYP_LONG:
+ useCandidates = RBM_LNGRET;
+ break;
#endif // defined(_TARGET_64BIT_)
- default: useCandidates = RBM_INTRET; break;
+ default:
+ useCandidates = RBM_INTRET;
+ break;
}
}
@@ -983,7 +985,6 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate.
//
@@ -993,25 +994,23 @@ Lowering::TreeNodeInfoInitReturn(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
+void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
+ LinearScan* l = m_lsra;
info->srcCount = 2;
info->dstCount = 1;
// For shift operations, we need that the number
- // of bits moved gets stored in CL in case
+ // of bits moved gets stored in CL in case
// the number of bits to shift is not a constant.
GenTreePtr shiftBy = tree->gtOp.gtOp2;
- GenTreePtr source = tree->gtOp.gtOp1;
+ GenTreePtr source = tree->gtOp.gtOp1;
// x64 can encode 8 bits of shift and it will use 5 or 6. (the others are masked off)
// We will allow whatever can be encoded - hope you know what you are doing.
- if (!IsContainableImmed(tree, shiftBy) ||
- (shiftBy->gtIntConCommon.IconValue() > 255) ||
+ if (!IsContainableImmed(tree, shiftBy) || (shiftBy->gtIntConCommon.IconValue() > 255) ||
(shiftBy->gtIntConCommon.IconValue() < 0))
{
source->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RCX);
@@ -1024,7 +1023,6 @@ Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitCall: Set the NodeInfo for a call.
//
@@ -1034,14 +1032,13 @@ Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
+void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
- TreeNodeInfo* info = &(call->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
- bool hasMultiRegRetVal = false;
- ReturnTypeDesc* retTypeDesc = nullptr;
+ TreeNodeInfo* info = &(call->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
+ bool hasMultiRegRetVal = false;
+ ReturnTypeDesc* retTypeDesc = nullptr;
info->srcCount = 0;
if (call->TypeGet() != TYP_VOID)
@@ -1050,7 +1047,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
if (hasMultiRegRetVal)
{
// dst count = number of registers in which the value is returned by call
- retTypeDesc = call->GetReturnTypeDesc();
+ retTypeDesc = call->GetReturnTypeDesc();
info->dstCount = retTypeDesc->GetReturnRegCount();
}
else
@@ -1062,7 +1059,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
info->dstCount = 0;
}
-
+
GenTree* ctrlExpr = call->gtControlExpr;
if (call->gtCallType == CT_INDIRECT)
{
@@ -1077,10 +1074,10 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
if (ctrlExpr != nullptr)
{
// we should never see a gtControlExpr whose type is void.
- assert(ctrlExpr->TypeGet() != TYP_VOID);
+ assert(ctrlExpr->TypeGet() != TYP_VOID);
// call can take a Rm op on x64
- info->srcCount++;
+ info->srcCount++;
// In case of fast tail implemented as jmp, make sure that gtControlExpr is
// computed into a register.
@@ -1123,7 +1120,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
}
else
#endif // _TARGET_X86_
- if (hasMultiRegRetVal)
+ if (hasMultiRegRetVal)
{
assert(retTypeDesc != nullptr);
info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
@@ -1133,7 +1130,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
#ifdef _TARGET_X86_
// The return value will be on the X87 stack, and we will need to move it.
info->setDstCandidates(l, l->allRegs(registerType));
-#else // !_TARGET_X86_
+#else // !_TARGET_X86_
info->setDstCandidates(l, RBM_FLOATRET);
#endif // !_TARGET_X86_
}
@@ -1146,9 +1143,9 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
info->setDstCandidates(l, RBM_INTRET);
}
- // number of args to a call =
+ // number of args to a call =
// callRegArgs + (callargs - placeholders, setup, etc)
- // there is an explicit thisPtr but it is redundant
+ // there is an explicit thisPtr but it is redundant
// If there is an explicit this pointer, we don't want that node to produce anything
// as it is redundant
@@ -1170,7 +1167,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
#if FEATURE_VARARG
bool callHasFloatRegArgs = false;
#endif // !FEATURE_VARARG
-
+
// First, count reg args
for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
{
@@ -1192,33 +1189,33 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// If the node is TYP_STRUCT and it is put on stack with
// putarg_stk operation, we consume and produce no registers.
- // In this case the embedded Obj node should not produce
+ // In this case the embedded Obj node should not produce
// registers too since it is contained.
// Note that if it is a SIMD type the argument will be in a register.
if (argNode->TypeGet() == TYP_STRUCT)
{
assert(argNode->gtOp.gtOp1 != nullptr && argNode->gtOp.gtOp1->OperGet() == GT_OBJ);
argNode->gtOp.gtOp1->gtLsraInfo.dstCount = 0;
- argNode->gtLsraInfo.srcCount = 0;
+ argNode->gtLsraInfo.srcCount = 0;
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
continue;
}
- regNumber argReg = REG_NA;
- regMaskTP argMask = RBM_NONE;
- short regCount = 0;
- bool isOnStack = true;
+ regNumber argReg = REG_NA;
+ regMaskTP argMask = RBM_NONE;
+ short regCount = 0;
+ bool isOnStack = true;
if (curArgTabEntry->regNum != REG_STK)
{
- isOnStack = false;
+ isOnStack = false;
var_types argType = argNode->TypeGet();
#if FEATURE_VARARG
callHasFloatRegArgs |= varTypeIsFloating(argType);
#endif // !FEATURE_VARARG
- argReg = curArgTabEntry->regNum;
+ argReg = curArgTabEntry->regNum;
regCount = 1;
// Default case is that we consume one source; modify this later (e.g. for
@@ -1231,14 +1228,13 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// If the struct arg is wrapped in CPYBLK the type of the param will be TYP_VOID.
// Use the curArgTabEntry's isStruct to get whether the param is a struct.
- if (varTypeIsStruct(argNode)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(|| curArgTabEntry->isStruct))
+ if (varTypeIsStruct(argNode) FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(|| curArgTabEntry->isStruct))
{
- unsigned originalSize = 0;
- LclVarDsc* varDsc = nullptr;
+ unsigned originalSize = 0;
+ LclVarDsc* varDsc = nullptr;
if (argNode->gtOper == GT_LCL_VAR)
{
- varDsc = compiler->lvaTable + argNode->gtLclVarCommon.gtLclNum;
+ varDsc = compiler->lvaTable + argNode->gtLclVarCommon.gtLclNum;
originalSize = varDsc->lvSize();
}
else if (argNode->gtOper == GT_MKREFANY)
@@ -1259,8 +1255,8 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
originalSize = 0;
// There could be up to 2 PUTARG_REGs in the list
- GenTreeArgList* argListPtr = argNode->AsArgList();
- unsigned iterationNum = 0;
+ GenTreeArgList* argListPtr = argNode->AsArgList();
+ unsigned iterationNum = 0;
for (; argListPtr; argListPtr = argListPtr->Rest())
{
GenTreePtr putArgRegNode = argListPtr->gtOp.gtOp1;
@@ -1268,7 +1264,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
if (iterationNum == 0)
{
- varDsc = compiler->lvaTable + putArgRegNode->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
+ varDsc = compiler->lvaTable + putArgRegNode->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
originalSize = varDsc->lvSize();
assert(originalSize != 0);
}
@@ -1298,7 +1294,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
noway_assert(!"Can't predict unsupported TYP_STRUCT arg kind");
}
- unsigned slots = ((unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE))) / REGSIZE_BYTES;
+ unsigned slots = ((unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE))) / REGSIZE_BYTES;
unsigned remainingSlots = slots;
if (!isOnStack)
@@ -1322,9 +1318,9 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// This TYP_STRUCT argument is also passed in the outgoing argument area
// We need a register to address the TYP_STRUCT
internalIntCount = 1;
-#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
// And we may need 2
- internalIntCount = 2;
+ internalIntCount = 2;
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
}
argNode->gtLsraInfo.internalIntCount = internalIntCount;
@@ -1377,7 +1373,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
GenTreePtr arg = args->gtOp.gtOp1;
if (!(args->gtFlags & GTF_LATE_ARG))
- {
+ {
TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
#if !defined(_TARGET_64BIT_)
if (arg->TypeGet() == TYP_LONG)
@@ -1398,7 +1394,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
argInfo->isLocalDefUse = true;
}
- // If the child of GT_PUTARG_STK is a constant, we don't need a register to
+ // If the child of GT_PUTARG_STK is a constant, we don't need a register to
// move it to memory (stack location).
//
// On AMD64, we don't want to make 0 contained, because we can generate smaller code
@@ -1415,14 +1411,14 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// push rdx
argInfo->dstCount = 0;
- if (arg->gtOper == GT_PUTARG_STK)
+ if (arg->gtOper == GT_PUTARG_STK)
{
GenTree* op1 = arg->gtOp.gtOp1;
if (IsContainableImmed(arg, op1)
#if defined(_TARGET_AMD64_)
&& !op1->IsIntegralConst(0)
#endif // _TARGET_AMD64_
- )
+ )
{
MakeSrcContained(arg, op1);
}
@@ -1435,10 +1431,7 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
#if FEATURE_VARARG
// If it is a fast tail call, it is already preferenced to use RAX.
// Therefore, no need set src candidates on call tgt again.
- if (call->IsVarargs() &&
- callHasFloatRegArgs &&
- !call->IsFastTailCall() &&
- (ctrlExpr != nullptr))
+ if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
{
// Don't assign the call target to any of the argument registers because
// we will use them to also pass floating point arguments as required
@@ -1448,7 +1441,6 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
#endif // !FEATURE_VARARG
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store.
//
@@ -1458,13 +1450,12 @@ Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
+void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
{
- GenTree* dstAddr = blkNode->Dest();
- unsigned size;
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ GenTree* dstAddr = blkNode->Dest();
+ unsigned size;
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
// Sources are dest address, initVal or source, and size
blkNode->gtLsraInfo.srcCount = 3;
@@ -1475,15 +1466,15 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
GenTreeInitBlk* initBlkNode = blkNode->AsInitBlk();
GenTreePtr blockSize = initBlkNode->Size();
- GenTreePtr initVal = initBlkNode->InitVal();
+ GenTreePtr initVal = initBlkNode->InitVal();
// If we have an InitBlk with constant block size we can optimize several ways:
- // a) If the size is smaller than a small memory page but larger than INITBLK_UNROLL_LIMIT bytes
+ // a) If the size is smaller than a small memory page but larger than INITBLK_UNROLL_LIMIT bytes
// we use rep stosb since this reduces the register pressure in LSRA and we have
// roughly the same performance as calling the helper.
- // b) If the size is <= INITBLK_UNROLL_LIMIT bytes and the fill byte is a constant,
+ // b) If the size is <= INITBLK_UNROLL_LIMIT bytes and the fill byte is a constant,
// we can speed this up by unrolling the loop using SSE2 stores. The reason for
- // this threshold is because our last investigation (Fall 2013), more than 95% of initblks
+ // this threshold is because our last investigation (Fall 2013), more than 95% of initblks
// in our framework assemblies are actually <= INITBLK_UNROLL_LIMIT bytes size, so this is the
// preferred code sequence for the vast majority of cases.
@@ -1515,9 +1506,9 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
else
{
initVal->gtIntCon.gtIconVal = 0x0101010101010101LL * fill;
- initVal->gtType = TYP_LONG;
+ initVal->gtType = TYP_LONG;
}
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
initVal->gtIntCon.gtIconVal = 0x01010101 * fill;
#endif // !_TARGET_AMD64_
@@ -1528,7 +1519,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
// instruction.
if (size >= XMM_REGSIZE_BYTES)
{
- // Reserve an XMM register to fill it with
+ // Reserve an XMM register to fill it with
// a pack of 16 init value constants.
blkNode->gtLsraInfo.internalFloatCount = 1;
blkNode->gtLsraInfo.setInternalCandidates(l, l->internalFloatRegCandidates());
@@ -1555,7 +1546,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
initBlkNode->gtBlkOpKind = GenTreeBlkOp::BlkOpKindHelper;
-#else // !_TARGET_AMD64_
+#else // !_TARGET_AMD64_
dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_RDI);
initVal->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
blockSize->gtLsraInfo.setSrcCandidates(l, RBM_RCX);
@@ -1566,25 +1557,25 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
else if (blkNode->OperGet() == GT_COPYOBJ)
{
GenTreeCpObj* cpObjNode = blkNode->AsCpObj();
-
- GenTreePtr clsTok = cpObjNode->ClsTok();
+
+ GenTreePtr clsTok = cpObjNode->ClsTok();
GenTreePtr srcAddr = cpObjNode->Source();
-
+
unsigned slots = cpObjNode->gtSlots;
#ifdef DEBUG
// CpObj must always have at least one GC-Pointer as a member.
assert(cpObjNode->gtGcPtrCount > 0);
-
+
assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
assert(clsTok->IsIconHandle());
-
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)clsTok->gtIntCon.gtIconVal;
- size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
- size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
-
- // Currently, the EE always round up a class data structure so
- // we are not handling the case where we have a non multiple of pointer sized
+
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)clsTok->gtIntCon.gtIconVal;
+ size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
+
+ // Currently, the EE always round up a class data structure so
+ // we are not handling the case where we have a non multiple of pointer sized
// struct. This behavior may change in the future so in order to keeps things correct
// let's assert it just to be safe. Going forward we should simply
// handle this case.
@@ -1594,19 +1585,20 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
#endif
bool IsRepMovsProfitable = false;
-
+
// If the destination is not on the stack, let's find out if we
// can improve code size by using rep movsq instead of generating
// sequences of movsq instructions.
if (!dstAddr->OperIsLocalAddr())
{
// Let's inspect the struct/class layout and determine if it's profitable
- // to use rep movsq for copying non-gc memory instead of using single movsq
+ // to use rep movsq for copying non-gc memory instead of using single movsq
// instructions for each memory slot.
- unsigned i = 0;
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
-
- do {
+ unsigned i = 0;
+ BYTE* gcPtrs = cpObjNode->gtGcPtrs;
+
+ do
+ {
unsigned nonGCSlots = 0;
// Measure a contiguous non-gc area inside the struct and note the maximum.
while (i < slots && gcPtrs[i] == TYPE_GC_NONE)
@@ -1614,7 +1606,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
nonGCSlots++;
i++;
}
-
+
while (i < slots && gcPtrs[i] != TYPE_GC_NONE)
{
i++;
@@ -1632,9 +1624,9 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
IsRepMovsProfitable = true;
}
- // There are two cases in which we need to materialize the
+ // There are two cases in which we need to materialize the
// struct size:
- // a) When the destination is on the stack we don't need to use the
+ // a) When the destination is on the stack we don't need to use the
// write barrier, we can just simply call rep movsq and get a win in codesize.
// b) If we determine we have contiguous non-gc regions in the struct where it's profitable
// to use rep movsq instead of a sequence of single movsq instructions. According to the
@@ -1663,7 +1655,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
GenTreeCpBlk* cpBlkNode = blkNode->AsCpBlk();
GenTreePtr blockSize = cpBlkNode->Size();
- GenTreePtr srcAddr = cpBlkNode->Source();
+ GenTreePtr srcAddr = cpBlkNode->Source();
// In case of a CpBlk with a constant size and less than CPBLK_MOVS_LIMIT size
// we can use rep movs to generate code instead of the helper call.
@@ -1678,15 +1670,15 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
assert(!blockSize->IsIconHandle());
ssize_t size = blockSize->gtIntCon.gtIconVal;
- // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
+ // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
// Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of
// our framework assemblies, so this is the main code generation scheme we'll use.
if (size <= CPBLK_UNROLL_LIMIT)
{
MakeSrcContained(blkNode, blockSize);
-
+
// If we have a remainder smaller than XMM_REGSIZE_BYTES, we need an integer temp reg.
- //
+ //
// x86 specific note: if the size is odd, the last copy operation would be of size 1 byte.
// But on x86 only RBM_BYTE_REGS could be used as byte registers. Therefore, exclude
// RBM_NON_BYTE_REGS from internal candidates.
@@ -1706,8 +1698,8 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
if (size >= XMM_REGSIZE_BYTES)
{
- // If we have a buffer larger than XMM_REGSIZE_BYTES,
- // reserve an XMM register to use it for a
+ // If we have a buffer larger than XMM_REGSIZE_BYTES,
+ // reserve an XMM register to use it for a
// series of 16-byte loads and stores.
blkNode->gtLsraInfo.internalFloatCount = 1;
blkNode->gtLsraInfo.addInternalCandidates(l, l->internalFloatRegCandidates());
@@ -1724,7 +1716,7 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
{
MakeSrcContained(blkNode, dstAddr);
}
-
+
cpBlkNode->gtBlkOpKind = GenTreeBlkOp::BlkOpKindUnroll;
}
else
@@ -1763,7 +1755,6 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
}
}
-
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
//------------------------------------------------------------------------
// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK.
@@ -1774,11 +1765,10 @@ Lowering::TreeNodeInfoInitBlockStore(GenTreeBlkOp* blkNode)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
+void Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
+ LinearScan* l = m_lsra;
if (tree->TypeGet() != TYP_STRUCT)
{
@@ -1787,10 +1777,10 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
}
GenTreePutArgStk* putArgStkTree = tree->AsPutArgStk();
-
- GenTreePtr dst = tree;
- GenTreePtr src = tree->gtOp.gtOp1;
- GenTreePtr srcAddr = nullptr;
+
+ GenTreePtr dst = tree;
+ GenTreePtr src = tree->gtOp.gtOp1;
+ GenTreePtr srcAddr = nullptr;
if ((src->OperGet() == GT_OBJ) || (src->OperGet() == GT_IND))
{
@@ -1803,7 +1793,7 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
info->srcCount = src->gtLsraInfo.dstCount;
// If this is a stack variable address,
- // make the op1 contained, so this way
+ // make the op1 contained, so this way
// there is no unnecessary copying between registers.
// To avoid assertion, increment the parent's source.
// It is recovered below.
@@ -1812,30 +1802,30 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
{
info->srcCount += 1;
}
-
+
info->dstCount = 0;
-
- // In case of a CpBlk we could use a helper call. In case of putarg_stk we
+
+ // In case of a CpBlk we could use a helper call. In case of putarg_stk we
// can't do that since the helper call could kill some already set up outgoing args.
// TODO-Amd64-Unix: converge the code for putarg_stk with cpyblk/cpyobj.
// The cpyXXXX code is rather complex and this could cause it to be more complex, but
// it might be the right thing to do.
-
+
// This threshold will decide from using the helper or let the JIT decide to inline
// a code sequence of its choice.
ssize_t helperThreshold = max(CPBLK_MOVS_LIMIT, CPBLK_UNROLL_LIMIT);
- ssize_t size = putArgStkTree->gtNumSlots * TARGET_POINTER_SIZE;
-
+ ssize_t size = putArgStkTree->gtNumSlots * TARGET_POINTER_SIZE;
+
// TODO-X86-CQ: The helper call either is not supported on x86 or required more work
// (I don't know which).
-
- // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
+
+ // If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
// Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of
// our framework assemblies, so this is the main code generation scheme we'll use.
if (size <= CPBLK_UNROLL_LIMIT && putArgStkTree->gtNumberReferenceSlots == 0)
{
// If we have a remainder smaller than XMM_REGSIZE_BYTES, we need an integer temp reg.
- //
+ //
// x86 specific note: if the size is odd, the last copy operation would be of size 1 byte.
// But on x86 only RBM_BYTE_REGS could be used as byte registers. Therefore, exclude
// RBM_NON_BYTE_REGS from internal candidates.
@@ -1843,7 +1833,7 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
{
info->internalIntCount++;
regMaskTP regMask = l->allRegs(TYP_INT);
-
+
#ifdef _TARGET_X86_
if ((size % 2) != 0)
{
@@ -1852,21 +1842,21 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
#endif
info->setInternalCandidates(l, regMask);
}
-
+
if (size >= XMM_REGSIZE_BYTES)
{
- // If we have a buffer larger than XMM_REGSIZE_BYTES,
- // reserve an XMM register to use it for a
+ // If we have a buffer larger than XMM_REGSIZE_BYTES,
+ // reserve an XMM register to use it for a
// series of 16-byte loads and stores.
info->internalFloatCount = 1;
info->addInternalCandidates(l, l->internalFloatRegCandidates());
}
-
+
if (haveLocalAddr)
{
MakeSrcContained(putArgStkTree, srcAddr);
}
-
+
// If src or dst are on stack, we don't have to generate the address into a register
// because it's just some constant+SP
putArgStkTree->gtPutArgStkKind = GenTreePutArgStk::PutArgStkKindUnroll;
@@ -1879,13 +1869,13 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
{
MakeSrcContained(putArgStkTree, srcAddr);
}
-
+
putArgStkTree->gtPutArgStkKind = GenTreePutArgStk::PutArgStkKindRepInstr;
}
-
+
// Always mark the OBJ and ADDR as contained trees by the putarg_stk. The codegen will deal with this tree.
MakeSrcContained(putArgStkTree, src);
-
+
// Balance up the inc above.
if (haveLocalAddr)
{
@@ -1894,7 +1884,6 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-
//------------------------------------------------------------------------
// TreeNodeInfoInitLclHeap: Set the NodeInfo for a GT_LCLHEAP.
//
@@ -1904,12 +1893,11 @@ Lowering::TreeNodeInfoInitPutArgStk(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
+void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
{
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
- Compiler* compiler = comp;
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* l = m_lsra;
+ Compiler* compiler = comp;
info->srcCount = 1;
info->dstCount = 1;
@@ -1924,7 +1912,7 @@ Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
// const and <PageSize No 0 (amd64) 1 (x86)
// const and >=PageSize No 2
// Non-const Yes 0
- // Non-const No 2
+ // Non-const No 2
GenTreePtr size = tree->gtOp.gtOp1;
if (size->IsCnsIntOrI())
@@ -1937,7 +1925,7 @@ Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
{
info->internalIntCount = 0;
}
- else
+ else
{
// Compute the amount of memory to properly STACK_ALIGN.
// Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size.
@@ -1958,10 +1946,10 @@ Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
if (sizeVal < compiler->eeGetPageSize())
{
#ifdef _TARGET_X86_
- info->internalIntCount = 1; // x86 needs a register here to avoid generating "sub" on ESP.
-#else // !_TARGET_X86_
+ info->internalIntCount = 1; // x86 needs a register here to avoid generating "sub" on ESP.
+#else // !_TARGET_X86_
info->internalIntCount = 0;
-#endif // !_TARGET_X86_
+#endif // !_TARGET_X86_
}
else
{
@@ -1989,7 +1977,6 @@ Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitLogicalOp: Set the NodeInfo for GT_AND/GT_OR/GT_XOR,
// as well as GT_ADD/GT_SUB.
@@ -2000,34 +1987,33 @@ Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
+void Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
+ LinearScan* l = m_lsra;
// We're not marking a constant hanging on the left of the add
// as containable so we assign it to a register having CQ impact.
- // TODO-XArch-CQ: Detect this case and support both generating a single instruction
+ // TODO-XArch-CQ: Detect this case and support both generating a single instruction
// for GT_ADD(Constant, SomeTree)
info->srcCount = 2;
info->dstCount = 1;
GenTree* op1 = tree->gtGetOp1();
- GenTree* op2 = tree->gtGetOp2();
+ GenTree* op2 = tree->gtGetOp2();
// We can directly encode the second operand if it is either a containable constant or a memory-op.
// In case of memory-op, we can encode it directly provided its type matches with 'tree' type.
// This is because during codegen, type of 'tree' is used to determine emit Type size. If the types
// do not match, they get normalized (i.e. sign/zero extended) on load into a register.
- bool directlyEncodable = false;
- bool binOpInRMW = false;
- GenTreePtr operand = nullptr;
+ bool directlyEncodable = false;
+ bool binOpInRMW = false;
+ GenTreePtr operand = nullptr;
if (IsContainableImmed(tree, op2))
{
directlyEncodable = true;
- operand = op2;
+ operand = op2;
}
else
{
@@ -2037,16 +2023,17 @@ Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
if (op2->isMemoryOp() && tree->TypeGet() == op2->TypeGet())
{
directlyEncodable = true;
- operand = op2;
+ operand = op2;
}
else if (tree->OperIsCommutative())
{
if (IsContainableImmed(tree, op1) ||
(op1->isMemoryOp() && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
{
- // If it is safe, we can reverse the order of operands of commutative operations for efficient codegen
+ // If it is safe, we can reverse the order of operands of commutative operations for efficient
+ // codegen
directlyEncodable = true;
- operand = op1;
+ operand = op1;
}
}
}
@@ -2059,14 +2046,13 @@ Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
}
else if (!binOpInRMW)
{
- // If this binary op neither has contained operands, nor is a
+ // If this binary op neither has contained operands, nor is a
// Read-Modify-Write (RMW) operation, we can mark its operands
// as reg optional.
SetRegOptionalForBinOp(tree);
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitModDiv: Set the NodeInfo for GT_MOD/GT_DIV/GT_UMOD/GT_UDIV.
//
@@ -2076,11 +2062,10 @@ Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
+void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
+ LinearScan* l = m_lsra;
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
@@ -2090,34 +2075,34 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
switch (tree->OperGet())
{
- case GT_MOD:
- case GT_DIV:
- if (varTypeIsFloating(tree->TypeGet()))
- {
- // No implicit conversions at this stage as the expectation is that
- // everything is made explicit by adding casts.
- assert(op1->TypeGet() == op2->TypeGet());
-
- if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
- {
- MakeSrcContained(tree, op2);
- }
- else
+ case GT_MOD:
+ case GT_DIV:
+ if (varTypeIsFloating(tree->TypeGet()))
{
- // If there are no containable operands, we can make an operand reg optional.
- // SSE2 allows only op2 to be a memory-op.
- SetRegOptional(op2);
- }
+ // No implicit conversions at this stage as the expectation is that
+ // everything is made explicit by adding casts.
+ assert(op1->TypeGet() == op2->TypeGet());
- return;
- }
- break;
+ if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+ {
+ MakeSrcContained(tree, op2);
+ }
+ else
+ {
+ // If there are no containable operands, we can make an operand reg optional.
+ // SSE2 allows only op2 to be a memory-op.
+ SetRegOptional(op2);
+ }
- default:
- break;
+ return;
+ }
+ break;
+
+ default:
+ break;
}
- // Amd64 Div/Idiv instruction:
+ // Amd64 Div/Idiv instruction:
// Dividend in RAX:RDX and computes
// Quotient in RAX, Remainder in RDX
@@ -2127,11 +2112,11 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
// RAX is used as a trashable register during computation of remainder.
info->setDstCandidates(l, RBM_RDX);
}
- else
+ else
{
// We are interested in just the quotient.
// RDX gets used as trashable register during computation of quotient
- info->setDstCandidates(l, RBM_RAX);
+ info->setDstCandidates(l, RBM_RAX);
}
// If possible would like to have op1 in RAX to avoid a register move
@@ -2152,7 +2137,6 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
}
}
-
//------------------------------------------------------------------------
// TreeNodeInfoInitIntrinsic: Set the NodeInfo for a GT_INTRINSIC.
//
@@ -2162,11 +2146,10 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
+void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* l = m_lsra;
+ LinearScan* l = m_lsra;
// Both operand and its result must be of floating point type.
GenTree* op1 = tree->gtGetOp1();
@@ -2176,54 +2159,54 @@ Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
info->srcCount = 1;
info->dstCount = 1;
- switch(tree->gtIntrinsic.gtIntrinsicId)
+ switch (tree->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Sqrt:
- if (op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl())
- {
- MakeSrcContained(tree, op1);
- }
- else
- {
- // Mark the operand as reg optional since codegen can still
- // generate code if op1 is on stack.
- SetRegOptional(op1);
- }
- break;
+ case CORINFO_INTRINSIC_Sqrt:
+ if (op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl())
+ {
+ MakeSrcContained(tree, op1);
+ }
+ else
+ {
+ // Mark the operand as reg optional since codegen can still
+ // generate code if op1 is on stack.
+ SetRegOptional(op1);
+ }
+ break;
- case CORINFO_INTRINSIC_Abs:
- // Abs(float x) = x & 0x7fffffff
- // Abs(double x) = x & 0x7ffffff ffffffff
-
- // In case of Abs we need an internal register to hold mask.
-
- // TODO-XArch-CQ: avoid using an internal register for the mask.
- // Andps or andpd both will operate on 128-bit operands.
- // The data section constant to hold the mask is a 64-bit size.
- // Therefore, we need both the operand and mask to be in
- // xmm register. When we add support in emitter to emit 128-bit
- // data constants and instructions that operate on 128-bit
- // memory operands we can avoid the need for an internal register.
- if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs)
- {
- info->internalFloatCount = 1;
- info->setInternalCandidates(l, l->internalFloatRegCandidates());
- }
- break;
+ case CORINFO_INTRINSIC_Abs:
+ // Abs(float x) = x & 0x7fffffff
+ // Abs(double x) = x & 0x7ffffff ffffffff
+
+ // In case of Abs we need an internal register to hold mask.
+
+ // TODO-XArch-CQ: avoid using an internal register for the mask.
+ // Andps or andpd both will operate on 128-bit operands.
+ // The data section constant to hold the mask is a 64-bit size.
+ // Therefore, we need both the operand and mask to be in
+ // xmm register. When we add support in emitter to emit 128-bit
+ // data constants and instructions that operate on 128-bit
+ // memory operands we can avoid the need for an internal register.
+ if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs)
+ {
+ info->internalFloatCount = 1;
+ info->setInternalCandidates(l, l->internalFloatRegCandidates());
+ }
+ break;
#ifdef _TARGET_X86_
- case CORINFO_INTRINSIC_Cos:
- case CORINFO_INTRINSIC_Sin:
- case CORINFO_INTRINSIC_Round:
- NYI_X86("Math intrinsics Cos, Sin and Round");
- break;
+ case CORINFO_INTRINSIC_Cos:
+ case CORINFO_INTRINSIC_Sin:
+ case CORINFO_INTRINSIC_Round:
+ NYI_X86("Math intrinsics Cos, Sin and Round");
+ break;
#endif // _TARGET_X86_
- default:
- // Right now only Sqrt/Abs are treated as math intrinsics
- noway_assert(!"Unsupported math intrinsic");
- unreached();
- break;
+ default:
+ // Right now only Sqrt/Abs are treated as math intrinsics
+ noway_assert(!"Unsupported math intrinsic");
+ unreached();
+ break;
}
}
@@ -2237,21 +2220,20 @@ Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
// Return Value:
// None.
-void
-Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
+void Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
{
- GenTreeSIMD* simdTree = tree->AsSIMD();
- TreeNodeInfo* info = &(tree->gtLsraInfo);
- LinearScan* lsra = m_lsra;
- info->dstCount = 1;
- switch(simdTree->gtSIMDIntrinsicID)
+ GenTreeSIMD* simdTree = tree->AsSIMD();
+ TreeNodeInfo* info = &(tree->gtLsraInfo);
+ LinearScan* lsra = m_lsra;
+ info->dstCount = 1;
+ switch (simdTree->gtSIMDIntrinsicID)
{
GenTree* op2;
- case SIMDIntrinsicInit:
+ case SIMDIntrinsicInit:
{
info->srcCount = 1;
- GenTree* op1 = tree->gtOp.gtOp1;
+ GenTree* op1 = tree->gtOp.gtOp1;
// This sets all fields of a SIMD struct to the given value.
// Mark op1 as contained if it is either zero or int constant of all 1's,
@@ -2260,13 +2242,12 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
// Should never see small int base type vectors except for zero initialization.
assert(!varTypeIsSmallInt(simdTree->gtSIMDBaseType) || op1->IsIntegralConst(0));
- if (op1->IsFPZero() ||
- op1->IsIntegralConst(0) ||
+ if (op1->IsFPZero() || op1->IsIntegralConst(0) ||
(varTypeIsIntegral(simdTree->gtSIMDBaseType) && op1->IsIntegralConst(-1)))
{
MakeSrcContained(tree, tree->gtOp.gtOp1);
info->srcCount = 0;
- }
+ }
else if ((comp->getSIMDInstructionSet() == InstructionSet_AVX) &&
((simdTree->gtSIMDSize == 16) || (simdTree->gtSIMDSize == 32)))
{
@@ -2280,7 +2261,7 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
}
break;
- case SIMDIntrinsicInitN:
+ case SIMDIntrinsicInitN:
{
info->srcCount = (short)(simdTree->gtSIMDSize / genTypeSize(simdTree->gtSIMDBaseType));
@@ -2290,185 +2271,185 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
}
break;
- case SIMDIntrinsicInitArray:
- // We have an array and an index, which may be contained.
- info->srcCount = 2;
- CheckImmedAndMakeContained(tree, tree->gtGetOp2());
- break;
-
- case SIMDIntrinsicDiv:
- // SSE2 has no instruction support for division on integer vectors
- noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 2;
- break;
+ case SIMDIntrinsicInitArray:
+ // We have an array and an index, which may be contained.
+ info->srcCount = 2;
+ CheckImmedAndMakeContained(tree, tree->gtGetOp2());
+ break;
- case SIMDIntrinsicAbs:
- // This gets implemented as bitwise-And operation with a mask
- // and hence should never see it here.
- unreached();
- break;
+ case SIMDIntrinsicDiv:
+ // SSE2 has no instruction support for division on integer vectors
+ noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 2;
+ break;
- case SIMDIntrinsicSqrt:
- // SSE2 has no instruction support for sqrt on integer vectors.
- noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 1;
- break;
+ case SIMDIntrinsicAbs:
+ // This gets implemented as bitwise-And operation with a mask
+ // and hence should never see it here.
+ unreached();
+ break;
- case SIMDIntrinsicAdd:
- case SIMDIntrinsicSub:
- case SIMDIntrinsicMul:
- case SIMDIntrinsicBitwiseAnd:
- case SIMDIntrinsicBitwiseAndNot:
- case SIMDIntrinsicBitwiseOr:
- case SIMDIntrinsicBitwiseXor:
- case SIMDIntrinsicMin:
- case SIMDIntrinsicMax:
- info->srcCount = 2;
+ case SIMDIntrinsicSqrt:
+ // SSE2 has no instruction support for sqrt on integer vectors.
+ noway_assert(varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 1;
+ break;
- // SSE2 32-bit integer multiplication requires two temp regs
- if (simdTree->gtSIMDIntrinsicID == SIMDIntrinsicMul &&
- simdTree->gtSIMDBaseType == TYP_INT)
- {
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- }
- break;
+ case SIMDIntrinsicAdd:
+ case SIMDIntrinsicSub:
+ case SIMDIntrinsicMul:
+ case SIMDIntrinsicBitwiseAnd:
+ case SIMDIntrinsicBitwiseAndNot:
+ case SIMDIntrinsicBitwiseOr:
+ case SIMDIntrinsicBitwiseXor:
+ case SIMDIntrinsicMin:
+ case SIMDIntrinsicMax:
+ info->srcCount = 2;
- case SIMDIntrinsicEqual:
- info->srcCount = 2;
- break;
+ // SSE2 32-bit integer multiplication requires two temp regs
+ if (simdTree->gtSIMDIntrinsicID == SIMDIntrinsicMul && simdTree->gtSIMDBaseType == TYP_INT)
+ {
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ }
+ break;
- // SSE2 doesn't support < and <= directly on int vectors.
- // Instead we need to use > and >= with swapped operands.
- case SIMDIntrinsicLessThan:
- case SIMDIntrinsicLessThanOrEqual:
- info->srcCount = 2;
- noway_assert(!varTypeIsIntegral(simdTree->gtSIMDBaseType));
- break;
+ case SIMDIntrinsicEqual:
+ info->srcCount = 2;
+ break;
- // SIMDIntrinsicEqual is supported only on non-floating point base type vectors.
- // SSE2 cmpps/pd doesn't support > and >= directly on float/double vectors.
- // Instead we need to use < and <= with swapped operands.
- case SIMDIntrinsicGreaterThan:
- noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
- info->srcCount = 2;
- break;
+ // SSE2 doesn't support < and <= directly on int vectors.
+ // Instead we need to use > and >= with swapped operands.
+ case SIMDIntrinsicLessThan:
+ case SIMDIntrinsicLessThanOrEqual:
+ info->srcCount = 2;
+ noway_assert(!varTypeIsIntegral(simdTree->gtSIMDBaseType));
+ break;
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
- // Need two SIMD registers as scratch.
- // See genSIMDIntrinsicRelOp() for details on code sequence generate and
- // the need for two scratch registers.
- info->srcCount = 2;
- info->internalFloatCount = 2;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- break;
+ // SIMDIntrinsicEqual is supported only on non-floating point base type vectors.
+ // SSE2 cmpps/pd doesn't support > and >= directly on float/double vectors.
+ // Instead we need to use < and <= with swapped operands.
+ case SIMDIntrinsicGreaterThan:
+ noway_assert(!varTypeIsFloating(simdTree->gtSIMDBaseType));
+ info->srcCount = 2;
+ break;
- case SIMDIntrinsicDotProduct:
- if ((comp->getSIMDInstructionSet() == InstructionSet_SSE2) || (simdTree->gtOp.gtOp1->TypeGet() == TYP_SIMD32))
- {
- // For SSE, or AVX with 32-byte vectors, we also need an internal register as scratch.
- // Further we need the targetReg and internal reg to be distinct registers.
- // This is achieved by requesting two internal registers; thus one of them
- // will be different from targetReg.
- // Note that if this is a TYP_SIMD16 or smaller on AVX, then we don't need a tmpReg.
- //
- // See genSIMDIntrinsicDotProduct() for details on code sequence generated and
- // the need for scratch registers.
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
+ // Need two SIMD registers as scratch.
+ // See genSIMDIntrinsicRelOp() for details on code sequence generate and
+ // the need for two scratch registers.
+ info->srcCount = 2;
info->internalFloatCount = 2;
info->setInternalCandidates(lsra, lsra->allSIMDRegs());
- }
- info->srcCount = 2;
- break;
-
- case SIMDIntrinsicGetItem:
- // This implements get_Item method. The sources are:
- // - the source SIMD struct
- // - index (which element to get)
- // The result is baseType of SIMD struct.
- info->srcCount = 2;
- op2 = tree->gtOp.gtOp2;
+ break;
- // If the index is a constant, mark it as contained.
- if (CheckImmedAndMakeContained(tree, op2))
- {
- info->srcCount = 1;
- }
+ case SIMDIntrinsicDotProduct:
+ if ((comp->getSIMDInstructionSet() == InstructionSet_SSE2) ||
+ (simdTree->gtOp.gtOp1->TypeGet() == TYP_SIMD32))
+ {
+ // For SSE, or AVX with 32-byte vectors, we also need an internal register as scratch.
+ // Further we need the targetReg and internal reg to be distinct registers.
+ // This is achieved by requesting two internal registers; thus one of them
+ // will be different from targetReg.
+ // Note that if this is a TYP_SIMD16 or smaller on AVX, then we don't need a tmpReg.
+ //
+ // See genSIMDIntrinsicDotProduct() for details on code sequence generated and
+ // the need for scratch registers.
+ info->internalFloatCount = 2;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ }
+ info->srcCount = 2;
+ break;
- // If the index is not a constant, we will use the SIMD temp location to store the vector.
- // Otherwise, if the baseType is floating point, the targetReg will be a xmm reg and we
- // can use that in the process of extracting the element.
- //
- // If the index is a constant and base type is a small int we can use pextrw, but on AVX
- // we will need a temp if are indexing into the upper half of the AVX register.
- // In all other cases with constant index, we need a temp xmm register to extract the
- // element if index is other than zero.
+ case SIMDIntrinsicGetItem:
+ // This implements get_Item method. The sources are:
+ // - the source SIMD struct
+ // - index (which element to get)
+ // The result is baseType of SIMD struct.
+ info->srcCount = 2;
+ op2 = tree->gtOp.gtOp2;
- if (!op2->IsCnsIntOrI())
- {
- (void) comp->getSIMDInitTempVarNum();
- }
- else if (!varTypeIsFloating(simdTree->gtSIMDBaseType))
- {
- bool needFloatTemp;
- if (varTypeIsSmallInt(simdTree->gtSIMDBaseType) && (comp->getSIMDInstructionSet() == InstructionSet_AVX))
+ // If the index is a constant, mark it as contained.
+ if (CheckImmedAndMakeContained(tree, op2))
{
- int byteShiftCnt = (int) op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->gtSIMDBaseType);
- needFloatTemp = (byteShiftCnt >= 16);
+ info->srcCount = 1;
}
- else
+
+ // If the index is not a constant, we will use the SIMD temp location to store the vector.
+ // Otherwise, if the baseType is floating point, the targetReg will be a xmm reg and we
+ // can use that in the process of extracting the element.
+ //
+ // If the index is a constant and base type is a small int we can use pextrw, but on AVX
+ // we will need a temp if are indexing into the upper half of the AVX register.
+ // In all other cases with constant index, we need a temp xmm register to extract the
+ // element if index is other than zero.
+
+ if (!op2->IsCnsIntOrI())
{
- needFloatTemp = !op2->IsIntegralConst(0);
+ (void)comp->getSIMDInitTempVarNum();
}
- if (needFloatTemp)
+ else if (!varTypeIsFloating(simdTree->gtSIMDBaseType))
{
- info->internalFloatCount = 1;
- info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ bool needFloatTemp;
+ if (varTypeIsSmallInt(simdTree->gtSIMDBaseType) &&
+ (comp->getSIMDInstructionSet() == InstructionSet_AVX))
+ {
+ int byteShiftCnt = (int)op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->gtSIMDBaseType);
+ needFloatTemp = (byteShiftCnt >= 16);
+ }
+ else
+ {
+ needFloatTemp = !op2->IsIntegralConst(0);
+ }
+ if (needFloatTemp)
+ {
+ info->internalFloatCount = 1;
+ info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+ }
}
- }
- break;
+ break;
- case SIMDIntrinsicSetX:
- case SIMDIntrinsicSetY:
- case SIMDIntrinsicSetZ:
- case SIMDIntrinsicSetW:
- // We need an internal integer register
- info->srcCount = 2;
- info->internalIntCount = 1;
- info->setInternalCandidates(lsra, lsra->allRegs(TYP_INT));
- break;
+ case SIMDIntrinsicSetX:
+ case SIMDIntrinsicSetY:
+ case SIMDIntrinsicSetZ:
+ case SIMDIntrinsicSetW:
+ // We need an internal integer register
+ info->srcCount = 2;
+ info->internalIntCount = 1;
+ info->setInternalCandidates(lsra, lsra->allRegs(TYP_INT));
+ break;
- case SIMDIntrinsicCast:
- info->srcCount = 1;
- break;
+ case SIMDIntrinsicCast:
+ info->srcCount = 1;
+ break;
- case SIMDIntrinsicShuffleSSE2:
- info->srcCount = 2;
- // Second operand is an integer constant and marked as contained.
- op2 = tree->gtOp.gtOp2;
- noway_assert(op2->IsCnsIntOrI());
- MakeSrcContained(tree, op2);
- break;
+ case SIMDIntrinsicShuffleSSE2:
+ info->srcCount = 2;
+ // Second operand is an integer constant and marked as contained.
+ op2 = tree->gtOp.gtOp2;
+ noway_assert(op2->IsCnsIntOrI());
+ MakeSrcContained(tree, op2);
+ break;
- case SIMDIntrinsicGetX:
- case SIMDIntrinsicGetY:
- case SIMDIntrinsicGetZ:
- case SIMDIntrinsicGetW:
- case SIMDIntrinsicGetOne:
- case SIMDIntrinsicGetZero:
- case SIMDIntrinsicGetCount:
- case SIMDIntrinsicGetAllOnes:
- assert(!"Get intrinsics should not be seen during Lowering.");
- unreached();
+ case SIMDIntrinsicGetX:
+ case SIMDIntrinsicGetY:
+ case SIMDIntrinsicGetZ:
+ case SIMDIntrinsicGetW:
+ case SIMDIntrinsicGetOne:
+ case SIMDIntrinsicGetZero:
+ case SIMDIntrinsicGetCount:
+ case SIMDIntrinsicGetAllOnes:
+ assert(!"Get intrinsics should not be seen during Lowering.");
+ unreached();
- default:
- noway_assert(!"Unimplemented SIMD node type.");
- unreached();
+ default:
+ noway_assert(!"Unimplemented SIMD node type.");
+ unreached();
}
}
#endif // FEATURE_SIMD
-
//------------------------------------------------------------------------
// TreeNodeInfoInitCast: Set the NodeInfo for a GT_CAST.
//
@@ -2478,24 +2459,23 @@ Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
// Return Value:
// None.
//
-void
-Lowering::TreeNodeInfoInitCast(GenTree* tree)
+void Lowering::TreeNodeInfoInitCast(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
// TODO-XArch-CQ: Int-To-Int conversions - castOp cannot be a memory op and must have an assigned register.
- // see CodeGen::genIntToIntCast()
+ // see CodeGen::genIntToIntCast()
info->srcCount = 1;
info->dstCount = 1;
// Non-overflow casts to/from float/double are done using SSE2 instructions
// and that allow the source operand to be either a reg or memop. Given the
- // fact that casts from small int to float/double are done as two-level casts,
+ // fact that casts from small int to float/double are done as two-level casts,
// the source operand is always guaranteed to be of size 4 or 8 bytes.
- var_types castToType = tree->CastToType();
- GenTreePtr castOp = tree->gtCast.CastOp();
- var_types castOpType = castOp->TypeGet();
+ var_types castToType = tree->CastToType();
+ GenTreePtr castOp = tree->gtCast.CastOp();
+ var_types castOpType = castOp->TypeGet();
if (tree->gtFlags & GTF_UNSIGNED)
{
castOpType = genUnsignedType(castOpType);
@@ -2510,7 +2490,7 @@ Lowering::TreeNodeInfoInitCast(GenTree* tree)
unsigned opSize = genTypeSize(castOpType);
assert(opSize == 4 || opSize == 8);
}
-#endif //DEBUG
+#endif // DEBUG
// U8 -> R8 conversion requires that the operand be in a register.
if (castOpType != TYP_ULONG)
@@ -2547,14 +2527,13 @@ Lowering::TreeNodeInfoInitCast(GenTree* tree)
}
}
-
void Lowering::LowerGCWriteBarrier(GenTree* tree)
{
assert(tree->OperGet() == GT_STOREIND);
- GenTreeStoreInd* dst = tree->AsStoreInd();
- GenTreePtr addr = dst->Addr();
- GenTreePtr src = dst->Data();
+ GenTreeStoreInd* dst = tree->AsStoreInd();
+ GenTreePtr addr = dst->Addr();
+ GenTreePtr src = dst->Data();
if (addr->OperGet() == GT_LEA)
{
@@ -2582,7 +2561,7 @@ void Lowering::LowerGCWriteBarrier(GenTree* tree)
#if defined(_TARGET_X86_)
- useOptimizedWriteBarrierHelper = true; // On x86, use the optimized write barriers by default.
+ useOptimizedWriteBarrierHelper = true; // On x86, use the optimized write barriers by default.
#ifdef DEBUG
GCInfo::WriteBarrierForm wbf = comp->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, src);
if (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug) // This one is always a call to a C++ method.
@@ -2631,21 +2610,21 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
{
assert(indirTree->isIndir());
- GenTreePtr addr = indirTree->gtGetOp1();
+ GenTreePtr addr = indirTree->gtGetOp1();
TreeNodeInfo* info = &(indirTree->gtLsraInfo);
- GenTreePtr base = nullptr;
+ GenTreePtr base = nullptr;
GenTreePtr index = nullptr;
- unsigned mul, cns;
- bool rev;
- bool modifiedSources = false;
+ unsigned mul, cns;
+ bool rev;
+ bool modifiedSources = false;
#ifdef FEATURE_SIMD
// If indirTree is of TYP_SIMD12, don't mark addr as contained
// so that it always get computed to a register. This would
// mean codegen side logic doesn't need to handle all possible
// addr expressions that could be contained.
- //
+ //
// TODO-XArch-CQ: handle other addr mode expressions that could be marked
// as contained.
if (indirTree->TypeGet() == TYP_SIMD12)
@@ -2665,9 +2644,9 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
- return ;
+ return;
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
// These nodes go into an addr mode:
// - GT_CLS_VAR_ADDR turns into a constant.
@@ -2677,8 +2656,7 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
// make this contained, it turns into a constant that goes into an addr mode
MakeSrcContained(indirTree, addr);
}
- else if (addr->IsCnsIntOrI() &&
- addr->AsIntConCommon()->FitsInAddrBase(comp) &&
+ else if (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp) &&
addr->gtLsraInfo.getDstCandidates(m_lsra) != RBM_VIRTUAL_STUB_PARAM)
{
// Amd64:
@@ -2702,16 +2680,16 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
else if (addr->OperGet() == GT_LEA)
{
GenTreeAddrMode* lea = addr->AsAddrMode();
- base = lea->Base();
- index = lea->Index();
+ base = lea->Base();
+ index = lea->Index();
m_lsra->clearOperandCounts(addr);
- // The srcCount is decremented because addr is now "contained",
- // then we account for the base and index below, if they are non-null.
+ // The srcCount is decremented because addr is now "contained",
+ // then we account for the base and index below, if they are non-null.
info->srcCount--;
}
- else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/)
- && !(modifiedSources = AreSourcesPossiblyModified(indirTree, base, index)))
+ else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
+ !(modifiedSources = AreSourcesPossiblyModified(indirTree, base, index)))
{
// An addressing mode will be constructed that may cause some
// nodes to not need a register, and cause others' lifetimes to be extended
@@ -2730,14 +2708,12 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
// up of simple arithmetic operators, and the code generator
// only traverses one leg of each node.
- bool foundBase = (base == nullptr);
- bool foundIndex = (index == nullptr);
- GenTreePtr nextChild = nullptr;
- for (GenTreePtr child = addr;
- child != nullptr && !child->OperIsLeaf();
- child = nextChild)
+ bool foundBase = (base == nullptr);
+ bool foundIndex = (index == nullptr);
+ GenTreePtr nextChild = nullptr;
+ for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
{
- nextChild = nullptr;
+ nextChild = nullptr;
GenTreePtr op1 = child->gtOp.gtOp1;
GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;
@@ -2796,7 +2772,7 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
else
{
// it is nothing but a plain indir
- info->srcCount--; //base gets added in below
+ info->srcCount--; // base gets added in below
base = addr;
}
@@ -2810,12 +2786,11 @@ void Lowering::SetIndirAddrOpCounts(GenTreePtr indirTree)
info->srcCount++;
}
}
-
void Lowering::LowerCmp(GenTreePtr tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
-
+
info->srcCount = 2;
info->dstCount = 1;
@@ -2823,10 +2798,10 @@ void Lowering::LowerCmp(GenTreePtr tree)
info->setDstCandidates(m_lsra, RBM_BYTE_REGS);
#endif // _TARGET_X86_
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtOp.gtOp2;
- var_types op1Type = op1->TypeGet();
- var_types op2Type = op2->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtOp.gtOp2;
+ var_types op1Type = op1->TypeGet();
+ var_types op2Type = op2->TypeGet();
#if !defined(_TARGET_64BIT_)
// Long compares will consume GT_LONG nodes, each of which produces two results.
@@ -2845,13 +2820,13 @@ void Lowering::LowerCmp(GenTreePtr tree)
// If either of op1 or op2 is floating point values, then we need to use
// ucomiss or ucomisd to compare, both of which support the following form
// ucomis[s|d] xmm, xmm/mem. That is only the second operand can be a memory
- // op.
+ // op.
//
// Second operand is a memory Op: Note that depending on comparison operator,
- // the operands of ucomis[s|d] need to be reversed. Therefore, either op1 or
+ // the operands of ucomis[s|d] need to be reversed. Therefore, either op1 or
// op2 can be a memory op depending on the comparison operator.
if (varTypeIsFloating(op1Type))
- {
+ {
// The type of the operands has to be the same and no implicit conversions at this stage.
assert(op1Type == op2Type);
@@ -2881,8 +2856,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
{
MakeSrcContained(tree, otherOp);
}
- else if (otherOp->isMemoryOp() &&
- ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
+ else if (otherOp->isMemoryOp() && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
{
MakeSrcContained(tree, otherOp);
}
@@ -2898,14 +2872,14 @@ void Lowering::LowerCmp(GenTreePtr tree)
// TODO-XArch-CQ: factor out cmp optimization in 'genCondSetFlags' to be used here
// or in other backend.
-
+
bool hasShortCast = false;
if (CheckImmedAndMakeContained(tree, op2))
{
- bool op1CanBeContained = (op1Type == op2Type);
+ bool op1CanBeContained = (op1Type == op2Type);
if (!op1CanBeContained)
{
- if (genTypeSize(op1Type) == genTypeSize(op2Type))
+ if (genTypeSize(op1Type) == genTypeSize(op2Type))
{
// The constant is of the correct size, but we don't have an exact type match
// We can treat the isMemoryOp as "contained"
@@ -2920,39 +2894,40 @@ void Lowering::LowerCmp(GenTreePtr tree)
GenTreeIntCon* con = op2->AsIntCon();
ssize_t ival = con->gtIconVal;
- bool isEqualityCompare = (tree->gtOper == GT_EQ || tree->gtOper == GT_NE);
- bool useTest = isEqualityCompare && (ival == 0);
+ bool isEqualityCompare = (tree->gtOper == GT_EQ || tree->gtOper == GT_NE);
+ bool useTest = isEqualityCompare && (ival == 0);
if (!useTest)
{
- ssize_t lo = 0; // minimum imm value allowed for cmp reg,imm
- ssize_t hi = 0; // maximum imm value allowed for cmp reg,imm
+ ssize_t lo = 0; // minimum imm value allowed for cmp reg,imm
+ ssize_t hi = 0; // maximum imm value allowed for cmp reg,imm
bool isUnsigned = false;
- switch (op1Type) {
- case TYP_BOOL:
- op1Type = TYP_UBYTE;
- __fallthrough;
- case TYP_UBYTE:
- lo = 0;
- hi = 0x7f;
- isUnsigned = true;
- break;
- case TYP_BYTE:
- lo = -0x80;
- hi = 0x7f;
- break;
- case TYP_CHAR:
- lo = 0;
- hi = 0x7fff;
- isUnsigned = true;
- break;
- case TYP_SHORT:
- lo = -0x8000;
- hi = 0x7fff;
- break;
- default:
- unreached();
+ switch (op1Type)
+ {
+ case TYP_BOOL:
+ op1Type = TYP_UBYTE;
+ __fallthrough;
+ case TYP_UBYTE:
+ lo = 0;
+ hi = 0x7f;
+ isUnsigned = true;
+ break;
+ case TYP_BYTE:
+ lo = -0x80;
+ hi = 0x7f;
+ break;
+ case TYP_CHAR:
+ lo = 0;
+ hi = 0x7fff;
+ isUnsigned = true;
+ break;
+ case TYP_SHORT:
+ lo = -0x8000;
+ hi = 0x7fff;
+ break;
+ default:
+ unreached();
}
if ((ival >= lo) && (ival <= hi))
@@ -2975,8 +2950,8 @@ void Lowering::LowerCmp(GenTreePtr tree)
{
MakeSrcContained(tree, op1);
}
- else
- {
+ else
+ {
bool op1IsMadeContained = false;
// When op1 is a GT_AND we can often generate a single "test" instruction
@@ -2985,7 +2960,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
// This instruction can only be used for equality or inequality comparions.
// and we must have a compare against zero.
//
- // If we have a postive test for a single bit we can reverse the condition and
+ // If we have a postive test for a single bit we can reverse the condition and
// make the compare be against zero
//
// Example:
@@ -3011,7 +2986,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
if ((relOp2CnsVal == andOp2CnsVal) && isPow2(andOp2CnsVal))
{
// We have a single bit test, so now we can change the
- // tree into the alternative form,
+ // tree into the alternative form,
// so that we can generate a test instruction.
// Reverse the equality comparison
@@ -3046,47 +3021,46 @@ void Lowering::LowerCmp(GenTreePtr tree)
switch (andOp1->TypeGet())
{
- default:
- break;
- case TYP_BYTE:
- newIconVal = (signed char)andOp2CnsVal;
- containable = FitsIn<signed char>(andOp2CnsVal);
- break;
- case TYP_BOOL:
- case TYP_UBYTE:
- newIconVal = andOp2CnsVal & 0xFF;
- containable = true;
- break;
- case TYP_SHORT:
- newIconVal = (signed short)andOp2CnsVal;
- containable = FitsIn<signed short>(andOp2CnsVal);
- break;
- case TYP_CHAR:
- newIconVal = andOp2CnsVal & 0xFFFF;
- containable = true;
- break;
- case TYP_INT:
- newIconVal = (INT32)andOp2CnsVal;
- containable = FitsIn<INT32>(andOp2CnsVal);
- break;
- case TYP_UINT:
- newIconVal = andOp2CnsVal & 0xFFFFFFFF;
- containable = true;
- break;
+ default:
+ break;
+ case TYP_BYTE:
+ newIconVal = (signed char)andOp2CnsVal;
+ containable = FitsIn<signed char>(andOp2CnsVal);
+ break;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ newIconVal = andOp2CnsVal & 0xFF;
+ containable = true;
+ break;
+ case TYP_SHORT:
+ newIconVal = (signed short)andOp2CnsVal;
+ containable = FitsIn<signed short>(andOp2CnsVal);
+ break;
+ case TYP_CHAR:
+ newIconVal = andOp2CnsVal & 0xFFFF;
+ containable = true;
+ break;
+ case TYP_INT:
+ newIconVal = (INT32)andOp2CnsVal;
+ containable = FitsIn<INT32>(andOp2CnsVal);
+ break;
+ case TYP_UINT:
+ newIconVal = andOp2CnsVal & 0xFFFFFFFF;
+ containable = true;
+ break;
#ifdef _TARGET_64BIT_
- case TYP_LONG:
- newIconVal = (INT64)andOp2CnsVal;
- containable = true;
- break;
- case TYP_ULONG:
- newIconVal = (UINT64)andOp2CnsVal;
- containable = true;
- break;
+ case TYP_LONG:
+ newIconVal = (INT64)andOp2CnsVal;
+ containable = true;
+ break;
+ case TYP_ULONG:
+ newIconVal = (UINT64)andOp2CnsVal;
+ containable = true;
+ break;
#endif //_TARGET_64BIT_
}
-
if (containable)
{
andOp2->gtType = andOp1->TypeGet();
@@ -3112,50 +3086,48 @@ void Lowering::LowerCmp(GenTreePtr tree)
}
else if (op1->OperGet() == GT_CAST)
{
- //If the op1 is a cast operation, and cast type is one byte sized unsigned type,
- //we can directly use the number in register, instead of doing an extra cast step.
- var_types dstType = op1->CastToType();
- bool isUnsignedDst = varTypeIsUnsigned(dstType);
- emitAttr castSize = EA_ATTR(genTypeSize(dstType));
- GenTreePtr castOp1 = op1->gtOp.gtOp1;
- genTreeOps castOp1Oper = castOp1->OperGet();
- bool safeOper = false;
+ // If the op1 is a cast operation, and cast type is one byte sized unsigned type,
+ // we can directly use the number in register, instead of doing an extra cast step.
+ var_types dstType = op1->CastToType();
+ bool isUnsignedDst = varTypeIsUnsigned(dstType);
+ emitAttr castSize = EA_ATTR(genTypeSize(dstType));
+ GenTreePtr castOp1 = op1->gtOp.gtOp1;
+ genTreeOps castOp1Oper = castOp1->OperGet();
+ bool safeOper = false;
// It is not always safe to change the gtType of 'castOp1' to TYP_UBYTE
// For example when 'castOp1Oper' is a GT_RSZ or GT_RSH then we are shifting
// bits from the left into the lower bits. If we change the type to a TYP_UBYTE
// we will instead generate a byte sized shift operation: shr al, 24
// For the following ALU operations is it safe to change the gtType to the
- // smaller type:
+ // smaller type:
//
- if ((castOp1Oper == GT_CNS_INT) ||
- (castOp1Oper == GT_CALL) || // the return value from a Call
- (castOp1Oper == GT_LCL_VAR) ||
- castOp1->OperIsLogical() || // GT_AND, GT_OR, GT_XOR
- castOp1->isMemoryOp() ) // isIndir() || isLclField();
+ if ((castOp1Oper == GT_CNS_INT) || (castOp1Oper == GT_CALL) || // the return value from a Call
+ (castOp1Oper == GT_LCL_VAR) || castOp1->OperIsLogical() || // GT_AND, GT_OR, GT_XOR
+ castOp1->isMemoryOp()) // isIndir() || isLclField();
{
safeOper = true;
}
- if ((castSize == EA_1BYTE) && isUnsignedDst && // Unsigned cast to TYP_UBYTE
- safeOper && // Must be a safe operation
- !op1->gtOverflow() ) // Must not be an overflow checking cast
+ if ((castSize == EA_1BYTE) && isUnsignedDst && // Unsigned cast to TYP_UBYTE
+ safeOper && // Must be a safe operation
+ !op1->gtOverflow()) // Must not be an overflow checking cast
{
- // Currently all of the Oper accepted as 'safeOper' are
- // non-overflow checking operations. If we were to add
- // an overflow checking operation then this assert needs
+ // Currently all of the Oper accepted as 'safeOper' are
+ // non-overflow checking operations. If we were to add
+ // an overflow checking operation then this assert needs
// to be moved above to guard entry to this block.
- //
- assert(!castOp1->gtOverflowEx()); // Must not be an overflow checking operation
-
+ //
+ assert(!castOp1->gtOverflowEx()); // Must not be an overflow checking operation
+
GenTreePtr removeTreeNode = op1;
- tree->gtOp.gtOp1 = castOp1;
- op1 = castOp1;
- castOp1->gtType = TYP_UBYTE;
+ tree->gtOp.gtOp1 = castOp1;
+ op1 = castOp1;
+ castOp1->gtType = TYP_UBYTE;
// trim down the value if castOp1 is an int constant since its type changed to UBYTE.
if (castOp1Oper == GT_CNS_INT)
- {
+ {
castOp1->gtIntCon.gtIconVal = (UINT8)castOp1->gtIntCon.gtIconVal;
}
@@ -3164,11 +3136,11 @@ void Lowering::LowerCmp(GenTreePtr tree)
ssize_t val = (ssize_t)op2->AsIntConCommon()->IconValue();
if (val >= 0 && val <= 255)
{
- op2->gtType = TYP_UBYTE;
+ op2->gtType = TYP_UBYTE;
tree->gtFlags |= GTF_UNSIGNED;
-
- //right now the op1's type is the same as op2's type.
- //if op1 is MemoryOp, we should make the op1 as contained node.
+
+ // right now the op1's type is the same as op2's type.
+ // if op1 is MemoryOp, we should make the op1 as contained node.
if (castOp1->isMemoryOp())
{
MakeSrcContained(tree, op1);
@@ -3180,7 +3152,8 @@ void Lowering::LowerCmp(GenTreePtr tree)
#ifdef DEBUG
if (comp->verbose)
{
- printf("LowerCmp: Removing a GT_CAST to TYP_UBYTE and changing castOp1->gtType to TYP_UBYTE\n");
+ printf("LowerCmp: Removing a GT_CAST to TYP_UBYTE and changing castOp1->gtType to "
+ "TYP_UBYTE\n");
comp->gtDispTree(tree);
}
#endif
@@ -3195,37 +3168,37 @@ void Lowering::LowerCmp(GenTreePtr tree)
}
}
}
- else if (op1Type == op2Type)
- {
- if (op2->isMemoryOp())
- {
- MakeSrcContained(tree, op2);
- }
- else if (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))
- {
- MakeSrcContained(tree, op1);
- }
+ else if (op1Type == op2Type)
+ {
+ if (op2->isMemoryOp())
+ {
+ MakeSrcContained(tree, op2);
+ }
+ else if (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))
+ {
+ MakeSrcContained(tree, op1);
+ }
else
{
// One of op1 or op2 could be marked as reg optional
- // to indicate that codgen can still generate code
+ // to indicate that codgen can still generate code
// if one of them is on stack.
SetRegOptional(PreferredRegOptionalOperand(tree));
}
- if (varTypeIsSmall(op1Type) && varTypeIsUnsigned(op1Type))
- {
- // Mark the tree as doing unsigned comparison if
- // both the operands are small and unsigned types.
- // Otherwise we will end up performing a signed comparison
- // of two small unsigned values without zero extending them to
- // TYP_INT size and which is incorrect.
- tree->gtFlags |= GTF_UNSIGNED;
- }
- }
+ if (varTypeIsSmall(op1Type) && varTypeIsUnsigned(op1Type))
+ {
+ // Mark the tree as doing unsigned comparison if
+ // both the operands are small and unsigned types.
+ // Otherwise we will end up performing a signed comparison
+ // of two small unsigned values without zero extending them to
+ // TYP_INT size and which is incorrect.
+ tree->gtFlags |= GTF_UNSIGNED;
+ }
+ }
}
-/* Lower GT_CAST(srcType, DstType) nodes.
+/* Lower GT_CAST(srcType, DstType) nodes.
*
* Casts from small int type to float/double are transformed as follows:
* GT_CAST(byte, float/double) = GT_CAST(GT_CAST(byte, int32), float/double)
@@ -3233,7 +3206,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
* GT_CAST(int16, float/double) = GT_CAST(GT_CAST(int16, int32), float/double)
* GT_CAST(uint16, float/double) = GT_CAST(GT_CAST(uint16, int32), float/double)
*
- * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64
+ * SSE2 conversion instructions operate on signed integers. casts from Uint32/Uint64
* are morphed as follows by front-end and hence should not be seen here.
* GT_CAST(uint32, float/double) = GT_CAST(GT_CAST(uint32, long), float/double)
* GT_CAST(uint64, float) = GT_CAST(GT_CAST(uint64, double), float)
@@ -3247,11 +3220,11 @@ void Lowering::LowerCmp(GenTreePtr tree)
*
* SSE2 has instructions to convert a float/double vlaue into a signed 32/64-bit
* integer. The above transformations help us to leverage those instructions.
- *
+ *
* Note that for the following conversions we still depend on helper calls and
- * don't expect to see them here.
+ * don't expect to see them here.
* i) GT_CAST(float/double, uint64)
- * ii) GT_CAST(float/double, int type with overflow detection)
+ * ii) GT_CAST(float/double, int type with overflow detection)
*
* TODO-XArch-CQ: (Low-pri): Jit64 generates in-line code of 8 instructions for (i) above.
* There are hardly any occurrences of this conversion operation in platform
@@ -3260,16 +3233,16 @@ void Lowering::LowerCmp(GenTreePtr tree)
* system.windows.forms, scimark, fractals, bio mums). If we ever find evidence that
* doing this optimization is a win, should consider generating in-lined code.
*/
-void Lowering::LowerCast( GenTreePtr* ppTree)
+void Lowering::LowerCast(GenTreePtr* ppTree)
{
- GenTreePtr tree = *ppTree;
+ GenTreePtr tree = *ppTree;
assert(tree->OperGet() == GT_CAST);
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types dstType = tree->CastToType();
- var_types srcType = op1->TypeGet();
- var_types tmpType = TYP_UNDEF;
- bool srcUns = false;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ var_types dstType = tree->CastToType();
+ var_types srcType = op1->TypeGet();
+ var_types tmpType = TYP_UNDEF;
+ bool srcUns = false;
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (tree->gtFlags & GTF_UNSIGNED)
@@ -3277,7 +3250,7 @@ void Lowering::LowerCast( GenTreePtr* ppTree)
srcType = genUnsignedType(srcType);
}
- // We should never see the following casts as they are expected to be lowered
+ // We should never see the following casts as they are expected to be lowered
// apropriately or converted into helper calls by front-end.
// srcType = float/double dstType = * and overflow detecting cast
// Reason: must be converted to a helper call
@@ -3317,7 +3290,7 @@ void Lowering::LowerCast( GenTreePtr* ppTree)
if (tmpType != TYP_UNDEF)
{
GenTreePtr tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
- tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED|GTF_OVERFLOW|GTF_EXCEPT));
+ tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
tree->gtOp.gtOp1 = tmp;
@@ -3325,19 +3298,19 @@ void Lowering::LowerCast( GenTreePtr* ppTree)
}
}
- //----------------------------------------------------------------------------------------------
- // Returns true if this tree is bin-op of a GT_STOREIND of the following form
- // storeInd(subTreeA, binOp(gtInd(subTreeA), subtreeB)) or
- // storeInd(subTreeA, binOp(subtreeB, gtInd(subTreeA)) in case of commutative bin-ops
- //
- // The above form for storeInd represents a read-modify-write memory binary operation.
- //
- // Parameters
- // tree - GentreePtr of binOp
- //
- // Return Value
- // True if 'tree' is part of a RMW memory operation pattern
- //
+//----------------------------------------------------------------------------------------------
+// Returns true if this tree is bin-op of a GT_STOREIND of the following form
+// storeInd(subTreeA, binOp(gtInd(subTreeA), subtreeB)) or
+// storeInd(subTreeA, binOp(subtreeB, gtInd(subTreeA)) in case of commutative bin-ops
+//
+// The above form for storeInd represents a read-modify-write memory binary operation.
+//
+// Parameters
+// tree - GentreePtr of binOp
+//
+// Return Value
+// True if 'tree' is part of a RMW memory operation pattern
+//
bool Lowering::IsBinOpInRMWStoreInd(GenTreePtr tree)
{
// Must be a non floating-point type binary operator since SSE2 doesn't support RMW memory ops
@@ -3361,64 +3334,64 @@ bool Lowering::IsBinOpInRMWStoreInd(GenTreePtr tree)
// cache the result in GT_STOREIND node so that while lowering GT_STOREIND
// we can use the result.
GenTreePtr indirCandidate = nullptr;
- GenTreePtr indirOpSource = nullptr;
+ GenTreePtr indirOpSource = nullptr;
return IsRMWMemOpRootedAtStoreInd(parent, &indirCandidate, &indirOpSource);
}
-
- //----------------------------------------------------------------------------------------------
- // This method recognizes the case where we have a treeNode with the following structure:
- // storeInd(IndirDst, binOp(gtInd(IndirDst), indirOpSource)) OR
- // storeInd(IndirDst, binOp(indirOpSource, gtInd(IndirDst)) in case of commutative operations OR
- // storeInd(IndirDst, unaryOp(gtInd(IndirDst)) in case of unary operations
- //
- // Terminology:
- // indirDst = memory write of an addr mode (i.e. storeind destination)
- // indirSrc = value being written to memory (i.e. storeind source which could either be a binary or unary op)
- // indirCandidate = memory read i.e. a gtInd of an addr mode
- // indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node)
- //
- // In x86/x64 this storeInd pattern can be effectively encoded in a single instruction of the
- // following form in case of integer operations:
- // binOp [addressing mode], RegIndirOpSource
- // binOp [addressing mode], immediateVal
- // where RegIndirOpSource is the register where indirOpSource was computed.
- //
- // Right now, we recognize few cases:
- // a) The gtInd child is a lea/lclVar/lclVarAddr/clsVarAddr/constant
- // b) BinOp is either add, sub, xor, or, and, shl, rsh, rsz.
- // c) unaryOp is either not/neg
- //
- // Implementation Note: The following routines need to be in sync for RMW memory op optimization
- // to be correct and functional.
- // IndirsAreEquivalent()
- // NodesAreEquivalentLeaves()
- // Codegen of GT_STOREIND and genCodeForShiftRMW()
- // emitInsRMW()
- //
- // TODO-CQ: Enable support for more complex indirections (if needed) or use the value numbering
- // package to perform more complex tree recognition.
- //
- // TODO-XArch-CQ: Add support for RMW of lcl fields (e.g. lclfield binop= source)
- //
- // Parameters:
- // tree - GT_STOREIND node
- // outIndirCandidate - out param set to indirCandidate as described above
- // ouutIndirOpSource - out param set to indirOpSource as described above
- //
- // Return value
- // True if there is a RMW memory operation rooted at a GT_STOREIND tree
- // and out params indirCandidate and indirOpSource are set to non-null values.
- // Otherwise, returns false with indirCandidate and indirOpSource set to null.
- // Also updates flags of GT_STOREIND tree with its RMW status.
- //
-bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirCandidate, GenTreePtr *outIndirOpSource)
+
+//----------------------------------------------------------------------------------------------
+// This method recognizes the case where we have a treeNode with the following structure:
+// storeInd(IndirDst, binOp(gtInd(IndirDst), indirOpSource)) OR
+// storeInd(IndirDst, binOp(indirOpSource, gtInd(IndirDst)) in case of commutative operations OR
+// storeInd(IndirDst, unaryOp(gtInd(IndirDst)) in case of unary operations
+//
+// Terminology:
+// indirDst = memory write of an addr mode (i.e. storeind destination)
+// indirSrc = value being written to memory (i.e. storeind source which could either be a binary or unary op)
+// indirCandidate = memory read i.e. a gtInd of an addr mode
+// indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node)
+//
+// In x86/x64 this storeInd pattern can be effectively encoded in a single instruction of the
+// following form in case of integer operations:
+// binOp [addressing mode], RegIndirOpSource
+// binOp [addressing mode], immediateVal
+// where RegIndirOpSource is the register where indirOpSource was computed.
+//
+// Right now, we recognize few cases:
+// a) The gtInd child is a lea/lclVar/lclVarAddr/clsVarAddr/constant
+// b) BinOp is either add, sub, xor, or, and, shl, rsh, rsz.
+// c) unaryOp is either not/neg
+//
+// Implementation Note: The following routines need to be in sync for RMW memory op optimization
+// to be correct and functional.
+// IndirsAreEquivalent()
+// NodesAreEquivalentLeaves()
+// Codegen of GT_STOREIND and genCodeForShiftRMW()
+// emitInsRMW()
+//
+// TODO-CQ: Enable support for more complex indirections (if needed) or use the value numbering
+// package to perform more complex tree recognition.
+//
+// TODO-XArch-CQ: Add support for RMW of lcl fields (e.g. lclfield binop= source)
+//
+// Parameters:
+// tree - GT_STOREIND node
+// outIndirCandidate - out param set to indirCandidate as described above
+// ouutIndirOpSource - out param set to indirOpSource as described above
+//
+// Return value
+// True if there is a RMW memory operation rooted at a GT_STOREIND tree
+// and out params indirCandidate and indirOpSource are set to non-null values.
+// Otherwise, returns false with indirCandidate and indirOpSource set to null.
+// Also updates flags of GT_STOREIND tree with its RMW status.
+//
+bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr* outIndirCandidate, GenTreePtr* outIndirOpSource)
{
assert(!varTypeIsFloating(tree));
assert(outIndirCandidate != nullptr);
assert(outIndirOpSource != nullptr);
-
+
*outIndirCandidate = nullptr;
- *outIndirOpSource = nullptr;
+ *outIndirOpSource = nullptr;
// Early out if storeInd is already known to be a non-RMW memory op
GenTreeStoreInd* storeInd = tree->AsStoreInd();
@@ -3429,7 +3402,7 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
GenTreePtr indirDst = storeInd->gtGetOp1();
GenTreePtr indirSrc = storeInd->gtGetOp2();
- genTreeOps oper = indirSrc->OperGet();
+ genTreeOps oper = indirSrc->OperGet();
// Early out if it is already known to be a RMW memory op
if (storeInd->IsRMWMemoryOp())
@@ -3439,13 +3412,13 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
if (storeInd->IsRMWDstOp1())
{
*outIndirCandidate = indirSrc->gtGetOp1();
- *outIndirOpSource = indirSrc->gtGetOp2();
+ *outIndirOpSource = indirSrc->gtGetOp2();
}
else
{
assert(storeInd->IsRMWDstOp2());
*outIndirCandidate = indirSrc->gtGetOp2();
- *outIndirOpSource = indirSrc->gtGetOp1();
+ *outIndirOpSource = indirSrc->gtGetOp1();
}
assert(IndirsAreEquivalent(*outIndirCandidate, storeInd));
}
@@ -3454,7 +3427,7 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
assert(GenTree::OperIsUnary(oper));
assert(IndirsAreEquivalent(indirSrc->gtGetOp1(), storeInd));
*outIndirCandidate = indirSrc->gtGetOp1();
- *outIndirOpSource = indirSrc->gtGetOp1();
+ *outIndirOpSource = indirSrc->gtGetOp1();
}
return true;
@@ -3464,18 +3437,15 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
assert(storeInd->IsRMWStatusUnknown());
// Early out if indirDst is not one of the supported memory operands.
- if (indirDst->OperGet() != GT_LEA &&
- indirDst->OperGet() != GT_LCL_VAR &&
- indirDst->OperGet() != GT_LCL_VAR_ADDR &&
- indirDst->OperGet() != GT_CLS_VAR_ADDR &&
- indirDst->OperGet() != GT_CNS_INT)
+ if (indirDst->OperGet() != GT_LEA && indirDst->OperGet() != GT_LCL_VAR && indirDst->OperGet() != GT_LCL_VAR_ADDR &&
+ indirDst->OperGet() != GT_CLS_VAR_ADDR && indirDst->OperGet() != GT_CNS_INT)
{
storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_ADDR);
return false;
}
-
+
// We can not use Read-Modify-Write instruction forms with overflow checking instructions
- // because we are not allowed to modify the target until after the overflow check.
+ // because we are not allowed to modify the target until after the overflow check.
if (indirSrc->gtOverflowEx())
{
storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER);
@@ -3485,19 +3455,14 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
if (GenTree::OperIsBinary(oper))
{
// Return if binary op is not one of the supported operations for RMW of memory.
- if (oper != GT_ADD &&
- oper != GT_SUB &&
- oper != GT_AND &&
- oper != GT_OR &&
- oper != GT_XOR &&
+ if (oper != GT_ADD && oper != GT_SUB && oper != GT_AND && oper != GT_OR && oper != GT_XOR &&
!GenTree::OperIsShiftOrRotate(oper))
{
storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER);
return false;
}
- if (GenTree::OperIsShiftOrRotate(oper) &&
- varTypeIsSmall(storeInd))
+ if (GenTree::OperIsShiftOrRotate(oper) && varTypeIsSmall(storeInd))
{
// In ldind, Integer values smaller than 4 bytes, a boolean, or a character converted to 4 bytes
// by sign or zero-extension as appropriate. If we directly shift the short type data using sar, we
@@ -3506,27 +3471,23 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
return false;
}
- GenTreePtr rhsLeft = indirSrc->gtGetOp1();
+ GenTreePtr rhsLeft = indirSrc->gtGetOp1();
GenTreePtr rhsRight = indirSrc->gtGetOp2();
// The most common case is rhsRight is GT_IND
- if (GenTree::OperIsCommutative(oper) &&
- rhsRight->OperGet() == GT_IND &&
- rhsRight->gtGetOp1()->OperGet() == indirDst->OperGet() &&
- IndirsAreEquivalent(rhsRight, storeInd))
+ if (GenTree::OperIsCommutative(oper) && rhsRight->OperGet() == GT_IND &&
+ rhsRight->gtGetOp1()->OperGet() == indirDst->OperGet() && IndirsAreEquivalent(rhsRight, storeInd))
{
*outIndirCandidate = rhsRight;
- *outIndirOpSource = rhsLeft;
+ *outIndirOpSource = rhsLeft;
storeInd->SetRMWStatus(STOREIND_RMW_DST_IS_OP2);
return true;
}
- else if (rhsLeft->OperGet() == GT_IND &&
- rhsLeft->gtGetOp1()->OperGet() == indirDst->OperGet() &&
- IsSafeToContainMem(indirSrc, rhsLeft) &&
- IndirsAreEquivalent(rhsLeft, storeInd))
+ else if (rhsLeft->OperGet() == GT_IND && rhsLeft->gtGetOp1()->OperGet() == indirDst->OperGet() &&
+ IsSafeToContainMem(indirSrc, rhsLeft) && IndirsAreEquivalent(rhsLeft, storeInd))
{
*outIndirCandidate = rhsLeft;
- *outIndirOpSource = rhsRight;
+ *outIndirOpSource = rhsRight;
storeInd->SetRMWStatus(STOREIND_RMW_DST_IS_OP1);
return true;
}
@@ -3536,7 +3497,7 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
}
else if (GenTree::OperIsUnary(oper))
{
- // Nodes other than GT_NOT and GT_NEG are not yet supported.
+ // Nodes other than GT_NOT and GT_NEG are not yet supported.
if (oper != GT_NOT && oper != GT_NEG)
{
storeInd->SetRMWStatus(STOREIND_RMW_UNSUPPORTED_OPER);
@@ -3555,7 +3516,7 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
{
// src and dest are the same in case of unary ops
*outIndirCandidate = indirCandidate;
- *outIndirOpSource = indirCandidate;
+ *outIndirOpSource = indirCandidate;
storeInd->SetRMWStatus(STOREIND_RMW_DST_IS_OP1);
return true;
}
@@ -3567,20 +3528,20 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr *outIndirC
return false;
}
- //--------------------------------------------------------------------------------------------
- // SetStoreIndOpCountsIfRMWMemOp checks to see if there is a RMW memory operation rooted at
- // GT_STOREIND node and if so will mark register requirements for nodes under storeInd so
- // that CodeGen will generate a single instruction of the form:
- //
- // binOp [addressing mode], reg
- //
- // Parameters
- // storeInd - GT_STOREIND node
- //
- // Return value
- // True, if RMW memory op tree pattern is recognized and op counts are set.
- // False otherwise.
- //
+//--------------------------------------------------------------------------------------------
+// SetStoreIndOpCountsIfRMWMemOp checks to see if there is a RMW memory operation rooted at
+// GT_STOREIND node and if so will mark register requirements for nodes under storeInd so
+// that CodeGen will generate a single instruction of the form:
+//
+// binOp [addressing mode], reg
+//
+// Parameters
+// storeInd - GT_STOREIND node
+//
+// Return value
+// True, if RMW memory op tree pattern is recognized and op counts are set.
+// False otherwise.
+//
bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
{
assert(storeInd->OperGet() == GT_STOREIND);
@@ -3595,18 +3556,19 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
// indirOpSource = source operand used in binary/unary op (i.e. source operand of indirSrc node)
GenTreePtr indirCandidate = nullptr;
- GenTreePtr indirOpSource = nullptr;
+ GenTreePtr indirOpSource = nullptr;
if (!IsRMWMemOpRootedAtStoreInd(storeInd, &indirCandidate, &indirOpSource))
{
- JITDUMP("Lower of StoreInd didn't mark the node as self contained for reason: %d\n", storeInd->AsStoreInd()->GetRMWStatus());
+ JITDUMP("Lower of StoreInd didn't mark the node as self contained for reason: %d\n",
+ storeInd->AsStoreInd()->GetRMWStatus());
DISPTREE(storeInd);
return false;
}
GenTreePtr indirDst = storeInd->gtGetOp1();
GenTreePtr indirSrc = storeInd->gtGetOp2();
- genTreeOps oper = indirSrc->OperGet();
+ genTreeOps oper = indirSrc->OperGet();
// At this point we have successfully detected a RMW memory op of one of the following forms
// storeInd(indirDst, indirSrc(indirCandidate, indirOpSource)) OR
@@ -3621,12 +3583,13 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
// set storeInd src count to that of the dst count of indirOpSource
// clear operand counts on indirSrc (i.e. marked as contained and storeInd will generate code for it)
// clear operand counts on indirCandidate
- // clear operand counts on indirDst except when it is a GT_LCL_VAR or GT_CNS_INT that doesn't fit within addr base
+ // clear operand counts on indirDst except when it is a GT_LCL_VAR or GT_CNS_INT that doesn't fit within addr
+ // base
// Increment src count of storeInd to account for the registers required to form indirDst addr mode
// clear operand counts on indirCandidateChild
TreeNodeInfo* info = &(storeInd->gtLsraInfo);
- info->dstCount = 0;
+ info->dstCount = 0;
if (GenTree::OperIsBinary(oper))
{
@@ -3634,15 +3597,15 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
assert(!indirOpSource->isMemoryOp() || indirOpSource->gtLsraInfo.dstCount == 1);
JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n");
info->srcCount = indirOpSource->gtLsraInfo.dstCount;
- }
- else
+ }
+ else
{
assert(GenTree::OperIsUnary(oper));
JITDUMP("Lower succesfully detected an assignment of the form: *addrMode = UnaryOp(*addrMode)\n");
info->srcCount = 0;
}
DISPTREE(storeInd);
-
+
m_lsra->clearOperandCounts(indirSrc);
m_lsra->clearOperandCounts(indirCandidate);
@@ -3669,12 +3632,10 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
}
else
{
- assert(indirCandidateChild->OperGet() == GT_LCL_VAR ||
- indirCandidateChild->OperGet() == GT_LCL_VAR_ADDR ||
- indirCandidateChild->OperGet() == GT_CLS_VAR_ADDR ||
- indirCandidateChild->OperGet() == GT_CNS_INT);
+ assert(indirCandidateChild->OperGet() == GT_LCL_VAR || indirCandidateChild->OperGet() == GT_LCL_VAR_ADDR ||
+ indirCandidateChild->OperGet() == GT_CLS_VAR_ADDR || indirCandidateChild->OperGet() == GT_CNS_INT);
- // If it is a GT_LCL_VAR, it still needs the reg to hold the address.
+ // If it is a GT_LCL_VAR, it still needs the reg to hold the address.
// We would still need a reg for GT_CNS_INT if it doesn't fit within addressing mode base.
// For GT_CLS_VAR_ADDR, we don't need a reg to hold the address, because field address value is known at jit
// time. Also, we don't need a reg for GT_CLS_VAR_ADDR.
@@ -3682,8 +3643,7 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
{
m_lsra->clearOperandCounts(indirDst);
}
- else if (indirCandidateChild->IsCnsIntOrI() &&
- indirCandidateChild->AsIntConCommon()->FitsInAddrBase(comp))
+ else if (indirCandidateChild->IsCnsIntOrI() && indirCandidateChild->AsIntConCommon()->FitsInAddrBase(comp))
{
m_lsra->clearOperandCounts(indirDst);
}
@@ -3699,7 +3659,7 @@ bool Lowering::SetStoreIndOpCountsIfRMWMemOp(GenTreePtr storeInd)
}
/**
- * Takes care of annotating the src and dst register
+ * Takes care of annotating the src and dst register
* requirements for a GT_MUL treenode.
*/
void Lowering::SetMulOpCounts(GenTreePtr tree)
@@ -3736,15 +3696,15 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
}
return;
}
-
- bool isUnsignedMultiply = ((tree->gtFlags & GTF_UNSIGNED) != 0);
- bool requiresOverflowCheck = tree->gtOverflowEx();
- bool useLeaEncoding = false;
- GenTreePtr memOp = nullptr;
-
- bool hasImpliedFirstOperand = false;
- GenTreeIntConCommon* imm = nullptr;
- GenTreePtr other = nullptr;
+
+ bool isUnsignedMultiply = ((tree->gtFlags & GTF_UNSIGNED) != 0);
+ bool requiresOverflowCheck = tree->gtOverflowEx();
+ bool useLeaEncoding = false;
+ GenTreePtr memOp = nullptr;
+
+ bool hasImpliedFirstOperand = false;
+ GenTreeIntConCommon* imm = nullptr;
+ GenTreePtr other = nullptr;
// There are three forms of x86 multiply:
// one-op form: RDX:RAX = RAX * r/m
@@ -3757,17 +3717,17 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
// Multiply should never be using small types
assert(!varTypeIsSmall(tree->TypeGet()));
- // We do use the widening multiply to implement
+ // We do use the widening multiply to implement
// the overflow checking for unsigned multiply
- //
+ //
if (isUnsignedMultiply && requiresOverflowCheck)
{
// The only encoding provided is RDX:RAX = RAX * rm
- //
- // Here we set RAX as the only destination candidate
+ //
+ // Here we set RAX as the only destination candidate
// In LSRA we set the kill set for this operation to RBM_RAX|RBM_RDX
//
- info->setDstCandidates(m_lsra,RBM_RAX);
+ info->setDstCandidates(m_lsra, RBM_RAX);
hasImpliedFirstOperand = true;
}
else if (tree->gtOper == GT_MULHI)
@@ -3779,13 +3739,13 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
else if (IsContainableImmed(tree, op2) || IsContainableImmed(tree, op1))
{
if (IsContainableImmed(tree, op2))
- {
- imm = op2->AsIntConCommon();
+ {
+ imm = op2->AsIntConCommon();
other = op1;
}
else
- {
- imm = op1->AsIntConCommon();
+ {
+ imm = op1->AsIntConCommon();
other = op2;
}
@@ -3796,10 +3756,10 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
useLeaEncoding = true;
}
- MakeSrcContained(tree, imm); // The imm is always contained
+ MakeSrcContained(tree, imm); // The imm is always contained
if (other->isMemoryOp())
{
- memOp = other; // memOp may be contained below
+ memOp = other; // memOp may be contained below
}
}
@@ -3818,9 +3778,7 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
//
if (!useLeaEncoding)
{
- if ((memOp != nullptr) &&
- (memOp->TypeGet() == tree->TypeGet()) &&
- IsSafeToContainMem(tree, memOp))
+ if ((memOp != nullptr) && (memOp->TypeGet() == tree->TypeGet()) && IsSafeToContainMem(tree, memOp))
{
MakeSrcContained(tree, memOp);
}
@@ -3868,14 +3826,15 @@ bool Lowering::isRMWRegOper(GenTreePtr tree)
return false;
}
- // These Opers either support a three op form (i.e. GT_LEA), or do not read/write their first operand
+ // These Opers either support a three op form (i.e. GT_LEA), or do not read/write their first operand
if ((tree->OperGet() == GT_LEA) || (tree->OperGet() == GT_STOREIND) || (tree->OperGet() == GT_ARR_INDEX))
+ {
return false;
+ }
// x86/x64 does support a three op multiply when op2|op1 is a contained immediate
if ((tree->OperGet() == GT_MUL) &&
- (Lowering::IsContainableImmed(tree, tree->gtOp.gtOp2) ||
- Lowering::IsContainableImmed(tree, tree->gtOp.gtOp1)))
+ (Lowering::IsContainableImmed(tree, tree->gtOp.gtOp2) || Lowering::IsContainableImmed(tree, tree->gtOp.gtOp1)))
{
return false;
}
@@ -3891,7 +3850,7 @@ bool Lowering::IsCallTargetInRange(void* addr)
}
// return true if the immediate can be folded into an instruction, for example small enough and non-relocatable
-bool Lowering:: IsContainableImmed(GenTree* parentNode, GenTree* childNode)
+bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
{
if (!childNode->IsIntCnsFitsInI32())
{
@@ -3917,8 +3876,8 @@ bool Lowering:: IsContainableImmed(GenTree* parentNode, GenTree* childNode)
// know apriori which of op1 or op2 is not likely to get a register, it
// has to make a guess. This routine encapsulates heuristics that
// guess whether it is likely to be beneficial to mark op1 or op2 as
-// reg optional.
-//
+// reg optional.
+//
//
// Arguments:
// tree - a binary-op tree node that is either commutative
@@ -3936,8 +3895,8 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
assert(GenTree::OperIsBinary(tree->OperGet()));
assert(tree->OperIsCommutative() || tree->OperIsCompare());
- GenTree* op1 = tree->gtGetOp1();
- GenTree* op2 = tree->gtGetOp2();
+ GenTree* op1 = tree->gtGetOp1();
+ GenTree* op2 = tree->gtGetOp2();
GenTree* preferredOp = nullptr;
// This routine uses the following heuristics:
@@ -3948,15 +3907,15 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
//
// b) op1 = tracked local and op2 = untracked local: LSRA creates two
// ref positions for op2: a def and use position. op2's def position
- // requires a reg and it is allocated a reg by spilling another
+ // requires a reg and it is allocated a reg by spilling another
// interval (if required) and that could be even op1. For this reason
// it is beneficial to mark op1 as reg optional.
//
// TODO: It is not always mandatory for a def position of an untracked
- // local to be allocated a register if it is on rhs of an assignment
+ // local to be allocated a register if it is on rhs of an assignment
// and its use position is reg-optional and has not been assigned a
// register. Reg optional def positions is currently not yet supported.
- //
+ //
// c) op1 = untracked local and op2 = tracked local: marking op1 as
// reg optional is beneficial, since its use position is less likely
// to get a register.
@@ -3968,14 +3927,13 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
// spilling op1's def and in which case op1 is treated as contained
// memory operand rather than requiring to reload.
//
- // e) If only one of them is a local var, prefer to mark it as
+ // e) If only one of them is a local var, prefer to mark it as
// reg-optional. This is heuristic is based on the results
// obtained against CQ perf benchmarks.
//
// f) If neither of them are local vars (i.e. tree temps), prefer to
// mark op1 as reg optional for the same reason as mentioned in (d) above.
- if (op1->OperGet() == GT_LCL_VAR &&
- op2->OperGet() == GT_LCL_VAR)
+ if (op1->OperGet() == GT_LCL_VAR && op2->OperGet() == GT_LCL_VAR)
{
LclVarDsc* v1 = comp->lvaTable + op1->AsLclVarCommon()->GetLclNum();
LclVarDsc* v2 = comp->lvaTable + op2->AsLclVarCommon()->GetLclNum();
@@ -3996,7 +3954,7 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
}
else if (v2->lvTracked)
{
- // v1 is an untracked lcl and it is use position is less likely to
+ // v1 is an untracked lcl and it is use position is less likely to
// get a register.
preferredOp = op1;
}
@@ -4009,7 +3967,8 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
}
else
{
- preferredOp = op1;;
+ preferredOp = op1;
+ ;
}
}
else if (op1->OperGet() == GT_LCL_VAR)
@@ -4026,7 +3985,7 @@ GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
// operand that is evaluated first as reg optional
// since its use position is less likely to get a register.
bool reverseOps = ((tree->gtFlags & GTF_REVERSE_OPS) != 0);
- preferredOp = reverseOps ? op2 : op1;
+ preferredOp = reverseOps ? op2 : op1;
}
return preferredOp;
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index 1174593e74..e2c1930e2a 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -105,7 +105,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lsra.h"
#ifdef DEBUG
-const char* LinearScan::resolveTypeName[] = { "Split", "Join", "Critical", "SharedCritical" };
+const char* LinearScan::resolveTypeName[] = {"Split", "Join", "Critical", "SharedCritical"};
#endif // DEBUG
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -147,27 +147,27 @@ void lsraAssignRegToTree(GenTreePtr tree, regNumber reg, unsigned regIdx)
//-------------------------------------------------------------
// getWeight: Returns the weight of the RefPosition.
//
-// Arguments:
+// Arguments:
// refPos - ref position
//
// Returns:
// Weight of ref position.
unsigned LinearScan::getWeight(RefPosition* refPos)
{
- unsigned weight;
+ unsigned weight;
GenTreePtr treeNode = refPos->treeNode;
- if (treeNode != nullptr)
+ if (treeNode != nullptr)
{
if (isCandidateLocalRef(treeNode))
{
// Tracked locals: use weighted ref cnt as the weight of the
// ref position.
GenTreeLclVarCommon* lclCommon = treeNode->AsLclVarCommon();
- LclVarDsc* varDsc = &(compiler->lvaTable[lclCommon->gtLclNum]);
- weight = varDsc->lvRefCntWtd;
+ LclVarDsc* varDsc = &(compiler->lvaTable[lclCommon->gtLclNum]);
+ weight = varDsc->lvRefCntWtd;
}
- else
+ else
{
// Non-candidate local ref or non-lcl tree node.
// These are considered to have two references in the basic block:
@@ -180,7 +180,7 @@ unsigned LinearScan::getWeight(RefPosition* refPos)
{
// Non-tree node ref positions. These will have a single
// reference in the basic block and hence their weighted
- // refcount is equal to the block weight in which they
+ // refcount is equal to the block weight in which they
// appear.
weight = this->blockInfo[refPos->bbNum].weight;
}
@@ -194,16 +194,24 @@ unsigned LinearScan::getWeight(RefPosition* refPos)
regMaskTP LinearScan::allRegs(RegisterType rt)
{
if (rt == TYP_FLOAT)
+ {
return availableFloatRegs;
+ }
else if (rt == TYP_DOUBLE)
+ {
return availableDoubleRegs;
#ifdef FEATURE_SIMD
- // TODO-Cleanup: Add an RBM_ALLSIMD
+ // TODO-Cleanup: Add an RBM_ALLSIMD
+ }
else if (varTypeIsSIMD(rt))
+ {
return availableDoubleRegs;
#endif // FEATURE_SIMD
- else
+ }
+ else
+ {
return availableIntRegs;
+ }
}
//--------------------------------------------------------------------------
@@ -214,7 +222,7 @@ regMaskTP LinearScan::allRegs(RegisterType rt)
// call - Multi-reg call node
//
// Return Value:
-// Mask representing the set of available registers for multi-reg call
+// Mask representing the set of available registers for multi-reg call
// node.
//
// Note:
@@ -225,7 +233,7 @@ regMaskTP LinearScan::allMultiRegCallNodeRegs(GenTreeCall* call)
assert(call->HasMultiRegRetVal());
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- regMaskTP resultMask = allRegs(retTypeDesc->GetReturnRegType(0));
+ regMaskTP resultMask = allRegs(retTypeDesc->GetReturnRegType(0));
unsigned count = retTypeDesc->GetReturnRegCount();
for (unsigned i = 1; i < count; ++i)
@@ -247,7 +255,7 @@ regMaskTP LinearScan::allMultiRegCallNodeRegs(GenTreeCall* call)
// Mask representing the set of available registers for given tree
//
// Note: In case of multi-reg call node, the full set of registers must be
-// determined by looking at types of individual return register types.
+// determined by looking at types of individual return register types.
// In this case, the registers may include registers from different register
// sets and will not be limited to the actual ABI return registers.
regMaskTP LinearScan::allRegs(GenTree* tree)
@@ -287,8 +295,7 @@ regMaskTP LinearScan::allSIMDRegs()
// that it will select a callee-save register. But to be safe, we restrict
// the set of candidates if compFloatingPointUsed is not already set.
-regMaskTP
-LinearScan::internalFloatRegCandidates()
+regMaskTP LinearScan::internalFloatRegCandidates()
{
if (compiler->compFloatingPointUsed)
{
@@ -307,17 +314,19 @@ template <class T>
RegisterType regType(T type)
{
#ifdef FEATURE_SIMD
- if (varTypeIsSIMD(type)) return FloatRegisterType;
+ if (varTypeIsSIMD(type))
+ {
+ return FloatRegisterType;
+ }
#endif // FEATURE_SIMD
return varTypeIsFloating(TypeGet(type)) ? FloatRegisterType : IntRegisterType;
}
-bool
-useFloatReg(var_types type)
+bool useFloatReg(var_types type)
{
return (regType(type) == FloatRegisterType);
}
-
+
bool registerTypesEquivalent(RegisterType a, RegisterType b)
{
return varTypeIsIntegralOrI(a) == varTypeIsIntegralOrI(b);
@@ -332,18 +341,15 @@ bool isSingleRegister(regMaskTP regMask)
* Inline functions for RegRecord
*****************************************************************************/
-bool
-RegRecord::isFree()
+bool RegRecord::isFree()
{
- return ((assignedInterval == nullptr || !assignedInterval->isActive) &&
- !isBusyUntilNextKill);
+ return ((assignedInterval == nullptr || !assignedInterval->isActive) && !isBusyUntilNextKill);
}
/*****************************************************************************
* Inline functions for LinearScan
*****************************************************************************/
-RegRecord *
-LinearScan::getRegisterRecord(regNumber regNum)
+RegRecord* LinearScan::getRegisterRecord(regNumber regNum)
{
return &physRegs[regNum];
}
@@ -363,33 +369,36 @@ LinearScan::getRegisterRecord(regNumber regNum)
// This is the method used to implement the stress options that limit
// the set of registers considered for allocation.
-regMaskTP
-LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
+regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
{
if (getStressLimitRegs() != LSRA_LIMIT_NONE)
{
- switch(getStressLimitRegs())
+ switch (getStressLimitRegs())
{
- case LSRA_LIMIT_CALLEE:
- if (!compiler->opts.compDbgEnC && (mask & RBM_CALLEE_SAVED) != RBM_NONE)
- mask &= RBM_CALLEE_SAVED;
- break;
- case LSRA_LIMIT_CALLER:
- if ((mask & RBM_CALLEE_TRASH) != RBM_NONE)
- mask &= RBM_CALLEE_TRASH;
- break;
- case LSRA_LIMIT_SMALL_SET:
- if ((mask & LsraLimitSmallIntSet) != RBM_NONE)
- {
- mask &= LsraLimitSmallIntSet;
- }
- else if ((mask & LsraLimitSmallFPSet) != RBM_NONE)
- {
- mask &= LsraLimitSmallFPSet;
- }
- break;
- default:
- unreached();
+ case LSRA_LIMIT_CALLEE:
+ if (!compiler->opts.compDbgEnC && (mask & RBM_CALLEE_SAVED) != RBM_NONE)
+ {
+ mask &= RBM_CALLEE_SAVED;
+ }
+ break;
+ case LSRA_LIMIT_CALLER:
+ if ((mask & RBM_CALLEE_TRASH) != RBM_NONE)
+ {
+ mask &= RBM_CALLEE_TRASH;
+ }
+ break;
+ case LSRA_LIMIT_SMALL_SET:
+ if ((mask & LsraLimitSmallIntSet) != RBM_NONE)
+ {
+ mask &= LsraLimitSmallIntSet;
+ }
+ else if ((mask & LsraLimitSmallFPSet) != RBM_NONE)
+ {
+ mask &= LsraLimitSmallFPSet;
+ }
+ break;
+ default:
+ unreached();
}
if (refPosition != nullptr && refPosition->isFixedRegRef)
{
@@ -400,15 +409,13 @@ LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask)
}
#endif // DEBUG
-
// TODO-Cleanup: Consider adding an overload that takes a varDsc, and can appropriately
// set such fields as isStructField
-Interval *
-LinearScan::newInterval(RegisterType theRegisterType)
+Interval* LinearScan::newInterval(RegisterType theRegisterType)
{
intervals.emplace_back(theRegisterType, allRegs(theRegisterType));
- Interval *newInt = &intervals.back();
+ Interval* newInt = &intervals.back();
#ifdef DEBUG
newInt->intervalIndex = static_cast<unsigned>(intervals.size() - 1);
@@ -418,11 +425,10 @@ LinearScan::newInterval(RegisterType theRegisterType)
return newInt;
}
-RefPosition *
-LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType)
+RefPosition* LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType)
{
refPositions.emplace_back(curBBNum, nodeLocation, treeNode, refType);
- RefPosition *newRP = &refPositions.back();
+ RefPosition* newRP = &refPositions.back();
#ifdef DEBUG
newRP->rpNum = static_cast<unsigned>(refPositions.size() - 1);
#endif // DEBUG
@@ -480,20 +486,19 @@ LinearScan::newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefT
// we can use the fixed-reg on the def.
//
-void
-LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition)
+void LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition)
{
assert(!interval->isLocalVar);
- RefPosition* useRefPosition = defRefPosition->nextRefPosition;
- regMaskTP defRegAssignment = defRefPosition->registerAssignment;
- regMaskTP useRegAssignment = useRefPosition->registerAssignment;
- RegRecord* defRegRecord = nullptr;
- RegRecord* useRegRecord = nullptr;
- regNumber defReg = REG_NA;
- regNumber useReg = REG_NA;
- bool defRegConflict = false;
- bool useRegConflict = false;
+ RefPosition* useRefPosition = defRefPosition->nextRefPosition;
+ regMaskTP defRegAssignment = defRefPosition->registerAssignment;
+ regMaskTP useRegAssignment = useRefPosition->registerAssignment;
+ RegRecord* defRegRecord = nullptr;
+ RegRecord* useRegRecord = nullptr;
+ regNumber defReg = REG_NA;
+ regNumber useReg = REG_NA;
+ bool defRegConflict = false;
+ bool useRegConflict = false;
// If the useRefPosition is a "delayRegFree", we can't change the registerAssignment
// on it, or we will fail to ensure that the fixedReg is busy at the time the target
@@ -507,7 +512,7 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
}
if (defRefPosition->isFixedRegRef)
{
- defReg = defRefPosition->assignedReg();
+ defReg = defRefPosition->assignedReg();
defRegRecord = getRegisterRecord(defReg);
if (canChangeUseAssignment)
{
@@ -531,13 +536,14 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
}
if (useRefPosition->isFixedRegRef)
{
- useReg = useRefPosition->assignedReg();
- useRegRecord = getRegisterRecord(useReg);
+ useReg = useRefPosition->assignedReg();
+ useRegRecord = getRegisterRecord(useReg);
RefPosition* currFixedRegRefPosition = useRegRecord->recentRefPosition;
// We know that useRefPosition is a fixed use, so the nextRefPosition must not be null.
RefPosition* nextFixedRegRefPosition = useRegRecord->getNextRefPosition();
- assert(nextFixedRegRefPosition != nullptr && nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation);
+ assert(nextFixedRegRefPosition != nullptr &&
+ nextFixedRegRefPosition->nodeLocation <= useRefPosition->nodeLocation);
// First, check to see if there are any conflicting FixedReg references between the def and use.
if (nextFixedRegRefPosition->nodeLocation == useRefPosition->nodeLocation)
@@ -546,7 +552,7 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
// Now, check to see whether it is currently in use.
if (useRegRecord->assignedInterval != nullptr)
{
- RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition;
+ RefPosition* possiblyConflictingRef = useRegRecord->assignedInterval->recentRefPosition;
LsraLocation possiblyConflictingRefLocation = possiblyConflictingRef->getRefEndLocation();
if (possiblyConflictingRefLocation >= defRefPosition->nodeLocation)
{
@@ -568,7 +574,7 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
}
if (defRegRecord != nullptr && !useRegConflict)
{
- // This is case #3.
+ // This is case #3.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE3));
defRefPosition->registerAssignment = useRegAssignment;
return;
@@ -585,8 +591,9 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
// This is case #5.
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_DEFUSE_CASE5));
RegisterType regType = interval->registerType;
- assert((getRegisterType(interval, defRefPosition) == regType) && (getRegisterType(interval, useRefPosition) == regType));
- regMaskTP candidates = allRegs(regType);
+ assert((getRegisterType(interval, defRefPosition) == regType) &&
+ (getRegisterType(interval, useRefPosition) == regType));
+ regMaskTP candidates = allRegs(regType);
defRefPosition->registerAssignment = candidates;
return;
}
@@ -611,8 +618,7 @@ LinearScan::resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefP
// Assumptions:
// 'refPosition is non-null.
-bool
-RegRecord::conflictingFixedRegReference(RefPosition* refPosition)
+bool RegRecord::conflictingFixedRegReference(RefPosition* refPosition)
{
// Is this a fixed reference of this register? If so, there is no conflict.
if (refPosition->isFixedRefOfRegMask(genRegMask(regNum)))
@@ -627,24 +633,21 @@ RegRecord::conflictingFixedRegReference(RefPosition* refPosition)
// if refPosition is a delayed use (i.e. must be kept live through the next/def location).
LsraLocation refLocation = refPosition->nodeLocation;
- if (recentRefPosition != nullptr &&
- recentRefPosition->refType != RefTypeKill &&
+ if (recentRefPosition != nullptr && recentRefPosition->refType != RefTypeKill &&
recentRefPosition->nodeLocation == refLocation &&
(!isBusyUntilNextKill || assignedInterval != refPosition->getInterval()))
{
return true;
}
LsraLocation nextPhysRefLocation = getNextRefLocation();
- if ( nextPhysRefLocation == refLocation ||
- (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1)))
+ if (nextPhysRefLocation == refLocation || (refPosition->delayRegFree && nextPhysRefLocation == (refLocation + 1)))
{
return true;
}
return false;
}
-void
-LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
+void LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
{
#ifdef _TARGET_AMD64_
if (compiler->opts.compDbgEnC)
@@ -654,9 +657,9 @@ LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
}
#endif // _TARGET_AMD64_
- Interval * theInterval = rp->getInterval();
+ Interval* theInterval = rp->getInterval();
#ifdef DEBUG
- regMaskTP calleeSaveMask = calleeSaveRegs(getRegisterType(theInterval, rp));
+ regMaskTP calleeSaveMask = calleeSaveRegs(getRegisterType(theInterval, rp));
if (doReverseCallerCallee())
{
regMaskTP newAssignment = rp->registerAssignment;
@@ -664,18 +667,19 @@ LinearScan::applyCalleeSaveHeuristics(RefPosition* rp)
if (newAssignment != RBM_NONE)
{
rp->registerAssignment = newAssignment;
- }
+ }
}
else
#endif // DEBUG
- // Set preferences so that this register set will be preferred for earlier refs
- theInterval->updateRegisterPreferences(rp->registerAssignment);
+ {
+ // Set preferences so that this register set will be preferred for earlier refs
+ theInterval->updateRegisterPreferences(rp->registerAssignment);
+ }
}
-void
-LinearScan::associateRefPosWithInterval(RefPosition *rp)
+void LinearScan::associateRefPosWithInterval(RefPosition* rp)
{
- Referenceable *theReferent = rp->referent;
+ Referenceable* theReferent = rp->referent;
if (theReferent != nullptr)
{
@@ -683,7 +687,7 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
if (rp->isIntervalRef())
{
- Interval * theInterval = rp->getInterval();
+ Interval* theInterval = rp->getInterval();
applyCalleeSaveHeuristics(rp);
@@ -693,14 +697,16 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
if (RefTypeIsUse(rp->refType) && !theInterval->isLocalVar)
{
- RefPosition * prevRefPosition = theInterval->recentRefPosition;
+ RefPosition* prevRefPosition = theInterval->recentRefPosition;
assert(prevRefPosition != nullptr && theInterval->firstRefPosition == prevRefPosition);
regMaskTP prevAssignment = prevRefPosition->registerAssignment;
- regMaskTP newAssignment = (prevAssignment & rp->registerAssignment);
+ regMaskTP newAssignment = (prevAssignment & rp->registerAssignment);
if (newAssignment != RBM_NONE)
{
if (!theInterval->hasNonCommutativeRMWDef || !isSingleRegister(newAssignment))
+ {
prevRefPosition->registerAssignment = newAssignment;
+ }
}
else
{
@@ -709,7 +715,7 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
}
}
- RefPosition * prevRP = theReferent->recentRefPosition;
+ RefPosition* prevRP = theReferent->recentRefPosition;
if (prevRP != nullptr)
{
prevRP->nextRefPosition = rp;
@@ -719,8 +725,8 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
theReferent->firstRefPosition = rp;
}
theReferent->recentRefPosition = rp;
- theReferent->lastRefPosition = rp;
- }
+ theReferent->lastRefPosition = rp;
+ }
else
{
assert((rp->refType == RefTypeBB) || (rp->refType == RefTypeKillGCRefs));
@@ -731,7 +737,7 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
// newRefPosition: allocate and initialize a new RefPosition.
//
// Arguments:
-// reg - reg number that identifies RegRecord to be associated
+// reg - reg number that identifies RegRecord to be associated
// with this RefPosition
// theLocation - LSRA location of RefPosition
// theRefType - RefPosition type
@@ -742,13 +748,9 @@ LinearScan::associateRefPosWithInterval(RefPosition *rp)
//
// Return Value:
// a new RefPosition
-//
-RefPosition*
-LinearScan::newRefPosition(regNumber reg,
- LsraLocation theLocation,
- RefType theRefType,
- GenTree* theTreeNode,
- regMaskTP mask)
+//
+RefPosition* LinearScan::newRefPosition(
+ regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask)
{
RefPosition* newRP = newRefPositionRaw(theLocation, theTreeNode, theRefType);
@@ -778,14 +780,13 @@ LinearScan::newRefPosition(regNumber reg,
//
// Return Value:
// a new RefPosition
-//
-RefPosition*
-LinearScan::newRefPosition(Interval* theInterval,
- LsraLocation theLocation,
- RefType theRefType,
- GenTree* theTreeNode,
- regMaskTP mask,
- unsigned multiRegIdx /* = 0 */)
+//
+RefPosition* LinearScan::newRefPosition(Interval* theInterval,
+ LsraLocation theLocation,
+ RefType theRefType,
+ GenTree* theTreeNode,
+ regMaskTP mask,
+ unsigned multiRegIdx /* = 0 */)
{
#ifdef DEBUG
if (theInterval != nullptr && regType(theInterval->registerType) == FloatRegisterType)
@@ -793,8 +794,7 @@ LinearScan::newRefPosition(Interval* theInterval,
// In the case we're using floating point registers we must make sure
// this flag was set previously in the compiler since this will mandate
// whether LSRA will take into consideration FP reg killsets.
- assert(compiler->compFloatingPointUsed ||
- ((mask & RBM_FLT_CALLEE_SAVED) == 0));
+ assert(compiler->compFloatingPointUsed || ((mask & RBM_FLT_CALLEE_SAVED) == 0));
}
#endif // DEBUG
@@ -803,17 +803,20 @@ LinearScan::newRefPosition(Interval* theInterval,
// availability can be more accurately determined
bool isFixedRegister = isSingleRegister(mask);
- bool insertFixedRef = false;
+ bool insertFixedRef = false;
if (isFixedRegister)
{
// Insert a RefTypeFixedReg for any normal def or use (not ParamDef or BB)
- if (theRefType == RefTypeUse || theRefType == RefTypeDef) insertFixedRef = true;
+ if (theRefType == RefTypeUse || theRefType == RefTypeDef)
+ {
+ insertFixedRef = true;
+ }
}
if (insertFixedRef)
{
- regNumber physicalReg = genRegNumFromMask(mask);
- RefPosition* pos = newRefPosition (physicalReg, theLocation, RefTypeFixedReg, nullptr, mask);
+ regNumber physicalReg = genRegNumFromMask(mask);
+ RefPosition* pos = newRefPosition(physicalReg, theLocation, RefTypeFixedReg, nullptr, mask);
assert(theInterval != nullptr);
assert((allRegs(theInterval->registerType) & mask) != 0);
}
@@ -828,9 +831,7 @@ LinearScan::newRefPosition(Interval* theInterval,
#ifndef _TARGET_AMD64_
// We don't need this for AMD because the PInvoke method epilog code is explicit
// at register allocation time.
- if (theInterval != nullptr &&
- theInterval->isLocalVar &&
- compiler->info.compCallUnmanaged &&
+ if (theInterval != nullptr && theInterval->isLocalVar && compiler->info.compCallUnmanaged &&
theInterval->varNum == compiler->genReturnLocal)
{
mask &= ~(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME);
@@ -851,8 +852,7 @@ LinearScan::newRefPosition(Interval* theInterval,
/*****************************************************************************
* Inline functions for Interval
*****************************************************************************/
-RefPosition *
-Referenceable::getNextRefPosition()
+RefPosition* Referenceable::getNextRefPosition()
{
if (recentRefPosition == nullptr)
{
@@ -864,10 +864,9 @@ Referenceable::getNextRefPosition()
}
}
-LsraLocation
-Referenceable::getNextRefLocation()
+LsraLocation Referenceable::getNextRefLocation()
{
- RefPosition * nextRefPosition = getNextRefPosition();
+ RefPosition* nextRefPosition = getNextRefPosition();
if (nextRefPosition == nullptr)
{
return MaxLocation;
@@ -887,10 +886,15 @@ public:
RegisterIterator(RegisterType type) : regType(type)
{
if (useFloatReg(regType))
+ {
currentRegNum = REG_FP_FIRST;
+ }
else
+ {
currentRegNum = REG_INT_FIRST;
+ }
}
+
protected:
static RegisterIterator Begin(RegisterType regType)
{
@@ -903,30 +907,43 @@ protected:
// if we target a processor with additional register types,
// this would have to change
if (useFloatReg(regType))
+ {
// This just happens to work for both double & float
endIter.currentRegNum = REG_NEXT(REG_FP_LAST);
+ }
else
+ {
endIter.currentRegNum = REG_NEXT(REG_INT_LAST);
+ }
return endIter;
}
+
public:
- void operator++(int dummy) //int dummy is c++ for "this is postfix ++"
+ void operator++(int dummy) // int dummy is c++ for "this is postfix ++"
{
currentRegNum = REG_NEXT(currentRegNum);
#ifdef _TARGET_ARM_
- if (regType == TYP_DOUBLE) currentRegNum = REG_NEXT(currentRegNum);
+ if (regType == TYP_DOUBLE)
+ currentRegNum = REG_NEXT(currentRegNum);
#endif
}
- void operator++() // prefix operator++
+ void operator++() // prefix operator++
{
currentRegNum = REG_NEXT(currentRegNum);
#ifdef _TARGET_ARM_
- if (regType == TYP_DOUBLE) currentRegNum = REG_NEXT(currentRegNum);
+ if (regType == TYP_DOUBLE)
+ currentRegNum = REG_NEXT(currentRegNum);
#endif
}
- regNumber operator*() { return currentRegNum; }
- bool operator!=(const RegisterIterator &other) { return other.currentRegNum != currentRegNum; }
-
+ regNumber operator*()
+ {
+ return currentRegNum;
+ }
+ bool operator!=(const RegisterIterator& other)
+ {
+ return other.currentRegNum != currentRegNum;
+ }
+
private:
regNumber currentRegNum;
RegisterType regType;
@@ -937,14 +954,22 @@ class Registers
public:
friend class RegisterIterator;
RegisterType type;
- Registers(RegisterType t){ type = t; }
- RegisterIterator begin() { return RegisterIterator::Begin(type); }
- RegisterIterator end() { return RegisterIterator::End(type); }
+ Registers(RegisterType t)
+ {
+ type = t;
+ }
+ RegisterIterator begin()
+ {
+ return RegisterIterator::Begin(type);
+ }
+ RegisterIterator end()
+ {
+ return RegisterIterator::End(type);
+ }
};
#ifdef DEBUG
-void
-LinearScan::dumpVarToRegMap(VarToRegMap map)
+void LinearScan::dumpVarToRegMap(VarToRegMap map)
{
bool anyPrinted = false;
for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
@@ -963,16 +988,14 @@ LinearScan::dumpVarToRegMap(VarToRegMap map)
printf("\n");
}
-void
-LinearScan::dumpInVarToRegMap(BasicBlock * block)
+void LinearScan::dumpInVarToRegMap(BasicBlock* block)
{
printf("Var=Reg beg of BB%02u: ", block->bbNum);
VarToRegMap map = getInVarToRegMap(block->bbNum);
dumpVarToRegMap(map);
}
-void
-LinearScan::dumpOutVarToRegMap(BasicBlock * block)
+void LinearScan::dumpOutVarToRegMap(BasicBlock* block)
{
printf("Var=Reg end of BB%02u: ", block->bbNum);
VarToRegMap map = getOutVarToRegMap(block->bbNum);
@@ -981,13 +1004,11 @@ LinearScan::dumpOutVarToRegMap(BasicBlock * block)
#endif // DEBUG
-
-LinearScanInterface *getLinearScanAllocator(Compiler *comp)
+LinearScanInterface* getLinearScanAllocator(Compiler* comp)
{
return new (comp, CMK_LSRA) LinearScan(comp);
}
-
//------------------------------------------------------------------------
// LSRA constructor
//
@@ -999,9 +1020,8 @@ LinearScanInterface *getLinearScanAllocator(Compiler *comp)
// during Lowering, including (in DEBUG) getting the stress environment variables,
// as they may affect the block ordering.
-LinearScan::LinearScan(Compiler * theCompiler)
- :
- compiler(theCompiler)
+LinearScan::LinearScan(Compiler* theCompiler)
+ : compiler(theCompiler)
#if MEASURE_MEM_ALLOC
, lsraIAllocator(nullptr)
#endif // MEASURE_MEM_ALLOC
@@ -1009,7 +1029,7 @@ LinearScan::LinearScan(Compiler * theCompiler)
, refPositions(LinearScanMemoryAllocatorRefPosition(theCompiler))
{
#ifdef DEBUG
- maxNodeLocation = 0;
+ maxNodeLocation = 0;
activeRefPosition = nullptr;
// Get the value of the environment variable that controls stress for register allocation
@@ -1057,7 +1077,7 @@ LinearScan::LinearScan(Compiler * theCompiler)
#if ETW_EBP_FRAMED
availableIntRegs &= ~RBM_FPBASE;
#endif // ETW_EBP_FRAMED
- availableFloatRegs = RBM_ALLFLOAT;
+ availableFloatRegs = RBM_ALLFLOAT;
availableDoubleRegs = RBM_ALLDOUBLE;
#ifdef _TARGET_AMD64_
@@ -1066,12 +1086,12 @@ LinearScan::LinearScan(Compiler * theCompiler)
// On x64 when the EnC option is set, we always save exactly RBP, RSI and RDI.
// RBP is not available to the register allocator, so RSI and RDI are the only
// callee-save registers available.
- availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI;
- availableFloatRegs &= ~RBM_CALLEE_SAVED;
+ availableIntRegs &= ~RBM_CALLEE_SAVED | RBM_RSI | RBM_RDI;
+ availableFloatRegs &= ~RBM_CALLEE_SAVED;
availableDoubleRegs &= ~RBM_CALLEE_SAVED;
}
#endif // _TARGET_AMD64_
- compiler->rpFrameType = FT_NOT_SET;
+ compiler->rpFrameType = FT_NOT_SET;
compiler->rpMustCreateEBPCalled = false;
compiler->codeGen->intRegState.rsIsFloat = false;
@@ -1082,11 +1102,11 @@ LinearScan::LinearScan(Compiler * theCompiler)
// (currently during Lowering's second phase, where it sets the TreeNodeInfo).
// This is so that any blocks that are added during the first phase of Lowering
// are accounted for (and we don't have BasicBlockEpoch issues).
- blockSequencingDone = false;
- blockSequence = nullptr;
+ blockSequencingDone = false;
+ blockSequence = nullptr;
blockSequenceWorkList = nullptr;
- curBBSeqNum = 0;
- bbSeqCount = 0;
+ curBBSeqNum = 0;
+ bbSeqCount = 0;
// Information about each block, including predecessor blocks used for variable locations at block entry.
blockInfo = nullptr;
@@ -1095,7 +1115,7 @@ LinearScan::LinearScan(Compiler * theCompiler)
// The first two masks in the table are allint/allfloat
// The next N are the masks for each single register.
// After that are the dynamically added ones.
- regMaskTable = new (compiler, CMK_LSRA) regMaskTP[numMasks];
+ regMaskTable = new (compiler, CMK_LSRA) regMaskTP[numMasks];
regMaskTable[ALLINT_IDX] = allRegs(TYP_INT);
regMaskTable[ALLFLOAT_IDX] = allRegs(TYP_DOUBLE);
@@ -1140,15 +1160,17 @@ LinearScan::RegMaskIndex LinearScan::GetIndexForRegMask(regMaskTP mask)
for (int i = FIRST_SINGLE_REG_IDX + REG_COUNT; i < nextFreeMask; i++)
{
if (regMaskTable[i] == mask)
+ {
return i;
- }
+ }
+ }
// We only allocate a fixed number of masks. Since we don't reallocate, we will throw a
// noway_assert if we exceed this limit.
noway_assert(nextFreeMask < numMasks);
regMaskTable[nextFreeMask] = mask;
- result = nextFreeMask;
+ result = nextFreeMask;
nextFreeMask++;
}
assert(mask == regMaskTable[result]);
@@ -1201,15 +1223,12 @@ void LinearScan::dspRegisterMaskTable()
// encountered as both a flow and layout successor of the most recently selected
// block.
-BasicBlock*
-LinearScan::getNextCandidateFromWorkList()
+BasicBlock* LinearScan::getNextCandidateFromWorkList()
{
BasicBlockList* nextWorkList = nullptr;
- for (BasicBlockList* workList = blockSequenceWorkList;
- workList != nullptr;
- workList = nextWorkList)
+ for (BasicBlockList* workList = blockSequenceWorkList; workList != nullptr; workList = nextWorkList)
{
- nextWorkList = workList->next;
+ nextWorkList = workList->next;
BasicBlock* candBlock = workList->block;
removeFromBlockSequenceWorkList(workList, nullptr);
if (!isBlockVisited(candBlock))
@@ -1238,26 +1257,23 @@ LinearScan::getNextCandidateFromWorkList()
// combined with the first traversal (currently the one in Lowering that sets the
// TreeNodeInfo).
-void
-LinearScan::setBlockSequence()
+void LinearScan::setBlockSequence()
{
// Reset the "visited" flag on each block.
compiler->EnsureBasicBlockEpoch();
bbVisitedSet = BlockSetOps::MakeEmpty(compiler);
BlockSet BLOCKSET_INIT_NOCOPY(readySet, BlockSetOps::MakeEmpty(compiler));
assert(blockSequence == nullptr && bbSeqCount == 0);
- blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount];
- bbNumMaxBeforeResolution = compiler->fgBBNumMax;
- blockInfo = new(compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1];
+ blockSequence = new (compiler, CMK_LSRA) BasicBlock*[compiler->fgBBcount];
+ bbNumMaxBeforeResolution = compiler->fgBBNumMax;
+ blockInfo = new (compiler, CMK_LSRA) LsraBlockInfo[bbNumMaxBeforeResolution + 1];
assert(blockSequenceWorkList == nullptr);
bool addedInternalBlocks = false;
- verifiedAllBBs = false;
+ verifiedAllBBs = false;
BasicBlock* nextBlock;
- for (BasicBlock* block = compiler->fgFirstBB;
- block != nullptr;
- block = nextBlock)
+ for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = nextBlock)
{
blockSequence[bbSeqCount] = block;
markBlockVisited(block);
@@ -1268,17 +1284,15 @@ LinearScan::setBlockSequence()
// predBBNum will be set later. 0 is never used as a bbNum.
blockInfo[block->bbNum].predBBNum = 0;
// We check for critical edges below, but initialize to false.
- blockInfo[block->bbNum].hasCriticalInEdge = false;
+ blockInfo[block->bbNum].hasCriticalInEdge = false;
blockInfo[block->bbNum].hasCriticalOutEdge = false;
- blockInfo[block->bbNum].weight = block->bbWeight;
+ blockInfo[block->bbNum].weight = block->bbWeight;
if (block->GetUniquePred(compiler) == nullptr)
{
- for (flowList* pred = block->bbPreds;
- pred != nullptr;
- pred = pred->flNext)
+ for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
- BasicBlock * predBlock = pred->flBlock;
+ BasicBlock* predBlock = pred->flBlock;
if (predBlock->NumSucc(compiler) > 1)
{
blockInfo[block->bbNum].hasCriticalInEdge = true;
@@ -1379,18 +1393,14 @@ LinearScan::setBlockSequence()
#ifdef DEBUG
// Make sure that we've visited all the blocks.
- for( BasicBlock* block = compiler->fgFirstBB;
- block != nullptr;
- block = block->bbNext)
+ for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
{
assert(isBlockVisited(block));
}
JITDUMP("LSRA Block Sequence: ");
int i = 1;
- for (BasicBlock* block = startBlockSequence();
- block != nullptr;
- ++i, block = moveToNextBlock())
+ for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock())
{
JITDUMP("BB%02u", block->bbNum);
@@ -1427,9 +1437,8 @@ LinearScan::setBlockSequence()
//
// Notes:
// See addToBlockSequenceWorkList.
-int
-LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights)
-{
+int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights)
+{
if (useBlockWeights)
{
unsigned weight1 = block1->getBBWeight(compiler);
@@ -1481,20 +1490,19 @@ LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, b
// A block at the time of insertion may not have all its predecessors sequenced, in
// which case it will be sequenced based on its block number. Once a block is inserted,
// its priority\order will not be changed later once its remaining predecessors are
-// sequenced. This would mean that work list may not be sorted entirely based on
+// sequenced. This would mean that work list may not be sorted entirely based on
// block weights alone.
//
// Note also that, when random traversal order is implemented, this method
// should insert the blocks into the list in random order, so that we can always
// simply select the first block in the list.
-void
-LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block)
+void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block)
{
// The block that is being added is not already sequenced
assert(!BlockSetOps::IsMember(compiler, sequencedBlockSet, block->bbNum));
// Get predSet of block
- BlockSet BLOCKSET_INIT_NOCOPY(predSet, BlockSetOps::MakeEmpty(compiler));
+ BlockSet BLOCKSET_INIT_NOCOPY(predSet, BlockSetOps::MakeEmpty(compiler));
flowList* pred;
for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
@@ -1546,8 +1554,7 @@ LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* b
}
}
-void
-LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode)
+void LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode)
{
if (listNode == blockSequenceWorkList)
{
@@ -1564,16 +1571,15 @@ LinearScan::removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlock
}
// Initialize the block order for allocation (called each time a new traversal begins).
-BasicBlock*
-LinearScan::startBlockSequence()
+BasicBlock* LinearScan::startBlockSequence()
{
if (!blockSequencingDone)
{
setBlockSequence();
}
BasicBlock* curBB = compiler->fgFirstBB;
- curBBSeqNum = 0;
- curBBNum = curBB->bbNum;
+ curBBSeqNum = 0;
+ curBBNum = curBB->bbNum;
clearVisitedBlocks();
assert(blockSequence[0] == compiler->fgFirstBB);
markBlockVisited(curBB);
@@ -1593,8 +1599,7 @@ LinearScan::startBlockSequence()
// This method is used when the next block is actually going to be handled.
// It changes curBBNum.
-BasicBlock*
-LinearScan::moveToNextBlock()
+BasicBlock* LinearScan::moveToNextBlock()
{
BasicBlock* nextBlock = getNextBlock();
curBBSeqNum++;
@@ -1618,8 +1623,7 @@ LinearScan::moveToNextBlock()
// This method does not actually change the current block - it is used simply
// to determine which block will be next.
-BasicBlock*
-LinearScan::getNextBlock()
+BasicBlock* LinearScan::getNextBlock()
{
assert(blockSequencingDone);
unsigned int nextBBSeqNum = curBBSeqNum + 1;
@@ -1643,8 +1647,7 @@ LinearScan::getNextBlock()
// Lowering must have set the NodeInfo (gtLsraInfo) on each node to communicate
// the register requirements.
-void
-LinearScan::doLinearScan()
+void LinearScan::doLinearScan()
{
#ifdef DEBUG
if (VERBOSE)
@@ -1702,26 +1705,26 @@ LinearScan::doLinearScan()
// after calling this method.
// This is because we need to kill off the dead registers before setting the newly live ones.
-void
-LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
+void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
{
JITDUMP("Recording Var Locations at start of BB%02u\n", bb->bbNum);
- VarToRegMap map = getInVarToRegMap(bb->bbNum);
- unsigned count = 0;
+ VarToRegMap map = getInVarToRegMap(bb->bbNum);
+ unsigned count = 0;
VARSET_ITER_INIT(compiler, iter, bb->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
- regNumber regNum = getVarReg(map, varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = &(compiler->lvaTable[varNum]);
+ regNumber regNum = getVarReg(map, varNum);
regNumber oldRegNum = varDsc->lvRegNum;
regNumber newRegNum = regNum;
if (oldRegNum != newRegNum)
{
- JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum), compiler->compRegVarName(newRegNum));
+ JITDUMP(" V%02u(%s->%s)", varNum, compiler->compRegVarName(oldRegNum),
+ compiler->compRegVarName(newRegNum));
varDsc->lvRegNum = newRegNum;
count++;
}
@@ -1740,36 +1743,34 @@ LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb)
JITDUMP("\n");
}
-void
-Interval::setLocalNumber(unsigned lclNum, LinearScan *linScan)
+void Interval::setLocalNumber(unsigned lclNum, LinearScan* linScan)
{
linScan->localVarIntervals[lclNum] = this;
assert(linScan->getIntervalForLocalVar(lclNum) == this);
this->isLocalVar = true;
- this->varNum = lclNum;
+ this->varNum = lclNum;
}
-// identify the candidates which we are not going to enregister due to
+// identify the candidates which we are not going to enregister due to
// being used in EH in a way we don't want to deal with
// this logic cloned from fgInterBlockLocalVarLiveness
-void
-LinearScan::identifyCandidatesExceptionDataflow()
+void LinearScan::identifyCandidatesExceptionDataflow()
{
- VARSET_TP VARSET_INIT_NOCOPY(exceptVars, VarSetOps::MakeEmpty(compiler));
- VARSET_TP VARSET_INIT_NOCOPY(filterVars, VarSetOps::MakeEmpty(compiler));
- VARSET_TP VARSET_INIT_NOCOPY(finallyVars, VarSetOps::MakeEmpty(compiler));
- BasicBlock *block;
+ VARSET_TP VARSET_INIT_NOCOPY(exceptVars, VarSetOps::MakeEmpty(compiler));
+ VARSET_TP VARSET_INIT_NOCOPY(filterVars, VarSetOps::MakeEmpty(compiler));
+ VARSET_TP VARSET_INIT_NOCOPY(finallyVars, VarSetOps::MakeEmpty(compiler));
+ BasicBlock* block;
foreach_block(compiler, block)
{
- if (block->bbCatchTyp != BBCT_NONE)
+ if (block->bbCatchTyp != BBCT_NONE)
{
// live on entry to handler
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn);
}
- if (block->bbJumpKind == BBJ_EHFILTERRET)
+ if (block->bbJumpKind == BBJ_EHFILTERRET)
{
// live on exit from filter
VarSetOps::UnionD(compiler, filterVars, block->bbLiveOut);
@@ -1787,9 +1788,8 @@ LinearScan::identifyCandidatesExceptionDataflow()
{
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveIn);
}
- if ((block->bbJumpKind == BBJ_EHFINALLYRET) ||
- (block->bbJumpKind == BBJ_EHFILTERRET) ||
- (block->bbJumpKind == BBJ_EHCATCHRET) )
+ if ((block->bbJumpKind == BBJ_EHFINALLYRET) || (block->bbJumpKind == BBJ_EHFILTERRET) ||
+ (block->bbJumpKind == BBJ_EHCATCHRET))
{
VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut);
}
@@ -1807,7 +1807,7 @@ LinearScan::identifyCandidatesExceptionDataflow()
VARSET_ITER_INIT(compiler, iter, exceptVars, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
LclVarDsc* varDsc = compiler->lvaTable + varNum;
compiler->lvaSetVarDoNotEnregister(varNum DEBUGARG(Compiler::DNER_LiveInOutOfHandler));
@@ -1815,7 +1815,9 @@ LinearScan::identifyCandidatesExceptionDataflow()
if (varTypeIsGC(varDsc))
{
if (VarSetOps::IsMember(compiler, finallyVars, varIndex) && !varDsc->lvIsParam)
+ {
varDsc->lvMustInit = true;
+ }
}
}
}
@@ -1830,7 +1832,7 @@ bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
// If we have JMP, reg args must be put on the stack
- if (compiler->compJmpOpUsed && varDsc->lvIsRegArg)
+ if (compiler->compJmpOpUsed && varDsc->lvIsRegArg)
{
return false;
}
@@ -1855,16 +1857,20 @@ bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
void LinearScan::identifyCandidates()
{
if (compiler->lvaCount == 0)
+ {
return;
+ }
if (compiler->compHndBBtabCount > 0)
+ {
identifyCandidatesExceptionDataflow();
+ }
// initialize mapping from local to interval
- localVarIntervals = new(compiler, CMK_LSRA) Interval*[compiler->lvaCount];
+ localVarIntervals = new (compiler, CMK_LSRA) Interval*[compiler->lvaCount];
- unsigned lclNum;
- LclVarDsc *varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
// While we build intervals for the candidate lclVars, we will determine the floating point
// lclVars, if any, to consider for callee-save register preferencing.
@@ -1886,25 +1892,23 @@ void LinearScan::identifyCandidates()
// for vectors on Arm64, though the actual value may differ.
VarSetOps::AssignNoCopy(compiler, fpCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
- VARSET_TP VARSET_INIT_NOCOPY(fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler));
- unsigned int floatVarCount = 0;
+ VARSET_TP VARSET_INIT_NOCOPY(fpMaybeCandidateVars, VarSetOps::MakeEmpty(compiler));
+ unsigned int floatVarCount = 0;
unsigned int thresholdFPRefCntWtd = 4 * BB_UNITY_WEIGHT;
- unsigned int maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT;
+ unsigned int maybeFPRefCntWtd = 2 * BB_UNITY_WEIGHT;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VarSetOps::AssignNoCopy(compiler, largeVectorVars, VarSetOps::MakeEmpty(compiler));
VarSetOps::AssignNoCopy(compiler, largeVectorCalleeSaveCandidateVars, VarSetOps::MakeEmpty(compiler));
- unsigned int largeVectorVarCount = 0;
+ unsigned int largeVectorVarCount = 0;
unsigned int thresholdLargeVectorRefCntWtd = 4 * BB_UNITY_WEIGHT;
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- for (lclNum = 0, varDsc = compiler->lvaTable;
- lclNum < compiler->lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
// Assign intervals to all the variables - this makes it easier to map
// them back
- var_types intervalType = (var_types) varDsc->lvType;
- Interval *newInt = newInterval(intervalType);
+ var_types intervalType = (var_types)varDsc->lvType;
+ Interval* newInt = newInterval(intervalType);
newInt->setLocalNumber(lclNum, this);
if (varDsc->lvIsStructField)
@@ -1913,13 +1917,13 @@ void LinearScan::identifyCandidates()
}
// Initialize all variables to REG_STK
- varDsc->lvRegNum = REG_STK;
+ varDsc->lvRegNum = REG_STK;
#ifndef _TARGET_64BIT_
varDsc->lvOtherReg = REG_STK;
#endif // _TARGET_64BIT_
#if !defined(_TARGET_64BIT_)
- if(intervalType == TYP_LONG)
+ if (intervalType == TYP_LONG)
{
// Long variables should not be register candidates.
// Lowering will have split any candidate lclVars into lo/hi vars.
@@ -1943,7 +1947,7 @@ void LinearScan::identifyCandidates()
varDsc->lvRegister = false;
/* If the ref count is zero */
- if (varDsc->lvRefCnt == 0)
+ if (varDsc->lvRefCnt == 0)
{
/* Zero ref count, make this untracked */
varDsc->lvRefCntWtd = 0;
@@ -1956,9 +1960,9 @@ void LinearScan::identifyCandidates()
// or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning")
// references when using the general GC encoding.
- if (varDsc->lvAddrExposed || !varTypeIsEnregisterableStruct(varDsc))
+ if (varDsc->lvAddrExposed || !varTypeIsEnregisterableStruct(varDsc))
{
- varDsc->lvLRACandidate = 0;
+ varDsc->lvLRACandidate = 0;
#ifdef DEBUG
Compiler::DoNotEnregisterReason dner = Compiler::DNER_AddrExposed;
if (!varDsc->lvAddrExposed)
@@ -1970,7 +1974,7 @@ void LinearScan::identifyCandidates()
}
else if (varDsc->lvPinned)
{
- varDsc->lvTracked = 0;
+ varDsc->lvTracked = 0;
#ifdef JIT32_GCENCODER
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_PinningRef));
#endif // JIT32_GCENCODER
@@ -1980,7 +1984,7 @@ void LinearScan::identifyCandidates()
// if so mark all args and locals as volatile, so that they
// won't ever get enregistered.
//
- if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0)
+ if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0)
{
compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_LiveInOutOfHandler));
varDsc->lvLRACandidate = 0;
@@ -1998,52 +2002,57 @@ void LinearScan::identifyCandidates()
switch (type)
{
#if CPU_HAS_FP_SUPPORT
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if (compiler->opts.compDbgCode) varDsc->lvLRACandidate = 0;
- break;
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if (compiler->opts.compDbgCode)
+ {
+ varDsc->lvLRACandidate = 0;
+ }
+ break;
#endif // CPU_HAS_FP_SUPPORT
- case TYP_INT:
- case TYP_LONG:
- case TYP_REF:
- case TYP_BYREF:
- break;
+ case TYP_INT:
+ case TYP_LONG:
+ case TYP_REF:
+ case TYP_BYREF:
+ break;
#ifdef FEATURE_SIMD
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
- if (varDsc->lvPromoted)
- {
- varDsc->lvLRACandidate = 0;
- }
- break;
- // TODO-1stClassStructs: Move TYP_SIMD8 up with the other SIMD types, after handling the param issue
- // (passing & returning as TYP_LONG).
- case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
+ if (varDsc->lvPromoted)
+ {
+ varDsc->lvLRACandidate = 0;
+ }
+ break;
+ // TODO-1stClassStructs: Move TYP_SIMD8 up with the other SIMD types, after handling the param issue
+ // (passing & returning as TYP_LONG).
+ case TYP_SIMD8:
#endif // FEATURE_SIMD
- case TYP_STRUCT:
+ case TYP_STRUCT:
{
varDsc->lvLRACandidate = 0;
}
break;
- case TYP_UNDEF:
- case TYP_UNKNOWN:
- noway_assert(!"lvType not set correctly");
- varDsc->lvType = TYP_INT;
+ case TYP_UNDEF:
+ case TYP_UNKNOWN:
+ noway_assert(!"lvType not set correctly");
+ varDsc->lvType = TYP_INT;
- __fallthrough;
+ __fallthrough;
- default:
- varDsc->lvLRACandidate = 0;
+ default:
+ varDsc->lvLRACandidate = 0;
}
// we will set this later when we have determined liveness
if (varDsc->lvLRACandidate)
+ {
varDsc->lvMustInit = false;
+ }
// We maintain two sets of FP vars - those that meet the first threshold of weighted ref Count,
// and those that meet the second (see the definitions of thresholdFPRefCntWtd and maybeFPRefCntWtd
@@ -2051,7 +2060,8 @@ void LinearScan::identifyCandidates()
CLANG_FORMAT_COMMENT_ANCHOR;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- // Additionally, when we are generating AVX on non-UNIX amd64, we keep a separate set of the LargeVectorType vars.
+ // Additionally, when we are generating AVX on non-UNIX amd64, we keep a separate set of the LargeVectorType
+ // vars.
if (varDsc->lvType == LargeVectorType)
{
largeVectorVarCount++;
@@ -2064,7 +2074,7 @@ void LinearScan::identifyCandidates()
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- if (regType(newInt->registerType) == FloatRegisterType)
+ if (regType(newInt->registerType) == FloatRegisterType)
{
floatVarCount++;
unsigned refCntWtd = varDsc->lvRefCntWtd;
@@ -2107,16 +2117,12 @@ void LinearScan::identifyCandidates()
}
#endif
- JITDUMP("floatVarCount = %d; hasLoops = %d, singleExit = %d\n",
- floatVarCount,
- compiler->fgHasLoops,
+ JITDUMP("floatVarCount = %d; hasLoops = %d, singleExit = %d\n", floatVarCount, compiler->fgHasLoops,
(compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr));
// Determine whether to use the 2nd, more aggressive, threshold for fp callee saves.
- if (floatVarCount > 6
- && compiler->fgHasLoops
- && (compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr)
- )
+ if (floatVarCount > 6 && compiler->fgHasLoops &&
+ (compiler->fgReturnBlocks == nullptr || compiler->fgReturnBlocks->next == nullptr))
{
#ifdef DEBUG
if (VERBOSE)
@@ -2148,25 +2154,23 @@ void LinearScan::identifyCandidates()
#endif // _TARGET_ARM_
}
-
// TODO-Throughput: This mapping can surely be more efficiently done
-void
-LinearScan::initVarRegMaps()
+void LinearScan::initVarRegMaps()
{
- assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked
- // variables.
+ assert(compiler->lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked
+ // variables.
// The compiler memory allocator requires that the allocation be an
// even multiple of int-sized objects
unsigned int varCount = compiler->lvaTrackedCount;
- regMapCount = (unsigned int) roundUp(varCount, sizeof(int));
+ regMapCount = (unsigned int)roundUp(varCount, sizeof(int));
// Not sure why blocks aren't numbered from zero, but they don't appear to be.
// So, if we want to index by bbNum we have to know the maximum value.
unsigned int bbCount = compiler->fgBBNumMax + 1;
-
- inVarToRegMaps = new (compiler, CMK_LSRA) regNumber *[bbCount];
- outVarToRegMaps = new (compiler, CMK_LSRA) regNumber *[bbCount];
+
+ inVarToRegMaps = new (compiler, CMK_LSRA) regNumber*[bbCount];
+ outVarToRegMaps = new (compiler, CMK_LSRA) regNumber*[bbCount];
if (varCount > 0)
{
@@ -2175,10 +2179,10 @@ LinearScan::initVarRegMaps()
for (unsigned int i = 0; i < bbCount; i++)
{
- regNumber * inVarToRegMap = new (compiler, CMK_LSRA) regNumber[regMapCount];
- regNumber * outVarToRegMap = new (compiler, CMK_LSRA) regNumber[regMapCount];
+ regNumber* inVarToRegMap = new (compiler, CMK_LSRA) regNumber[regMapCount];
+ regNumber* outVarToRegMap = new (compiler, CMK_LSRA) regNumber[regMapCount];
- for(unsigned int j = 0; j < regMapCount; j++)
+ for (unsigned int j = 0; j < regMapCount; j++)
{
inVarToRegMap[j] = REG_STK;
outVarToRegMap[j] = REG_STK;
@@ -2192,28 +2196,25 @@ LinearScan::initVarRegMaps()
sharedCriticalVarToRegMap = nullptr;
for (unsigned int i = 0; i < bbCount; i++)
{
- inVarToRegMaps[i] = nullptr;
+ inVarToRegMaps[i] = nullptr;
outVarToRegMaps[i] = nullptr;
}
}
}
-void
-LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
+void LinearScan::setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
inVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = reg;
}
-void
-LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
+void LinearScan::setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg)
{
assert(reg < UCHAR_MAX && varNum < compiler->lvaCount);
- outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = reg;
+ outVarToRegMaps[bbNum][compiler->lvaTable[varNum].lvVarIndex] = reg;
}
-LinearScan::SplitEdgeInfo
-LinearScan::getSplitEdgeInfo(unsigned int bbNum)
+LinearScan::SplitEdgeInfo LinearScan::getSplitEdgeInfo(unsigned int bbNum)
{
SplitEdgeInfo splitEdgeInfo;
assert(bbNum <= compiler->fgBBNumMax);
@@ -2225,8 +2226,7 @@ LinearScan::getSplitEdgeInfo(unsigned int bbNum)
return splitEdgeInfo;
}
-VarToRegMap
-LinearScan::getInVarToRegMap(unsigned int bbNum)
+VarToRegMap LinearScan::getInVarToRegMap(unsigned int bbNum)
{
assert(bbNum <= compiler->fgBBNumMax);
// For the blocks inserted to split critical edges, the inVarToRegMap is
@@ -2234,7 +2234,7 @@ LinearScan::getInVarToRegMap(unsigned int bbNum)
if (bbNum > bbNumMaxBeforeResolution)
{
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
- unsigned fromBBNum = splitEdgeInfo.fromBBNum;
+ unsigned fromBBNum = splitEdgeInfo.fromBBNum;
if (fromBBNum == 0)
{
assert(splitEdgeInfo.toBBNum != 0);
@@ -2249,8 +2249,7 @@ LinearScan::getInVarToRegMap(unsigned int bbNum)
return inVarToRegMaps[bbNum];
}
-VarToRegMap
-LinearScan::getOutVarToRegMap(unsigned int bbNum)
+VarToRegMap LinearScan::getOutVarToRegMap(unsigned int bbNum)
{
assert(bbNum <= compiler->fgBBNumMax);
// For the blocks inserted to split critical edges, the outVarToRegMap is
@@ -2260,7 +2259,7 @@ LinearScan::getOutVarToRegMap(unsigned int bbNum)
// If this is an empty block, its in and out maps are both the same.
// We identify this case by setting fromBBNum or toBBNum to 0, and using only the other.
SplitEdgeInfo splitEdgeInfo = getSplitEdgeInfo(bbNum);
- unsigned toBBNum = splitEdgeInfo.toBBNum;
+ unsigned toBBNum = splitEdgeInfo.toBBNum;
if (toBBNum == 0)
{
assert(splitEdgeInfo.fromBBNum != 0);
@@ -2274,8 +2273,7 @@ LinearScan::getOutVarToRegMap(unsigned int bbNum)
return outVarToRegMaps[bbNum];
}
-regNumber
-LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int varNum)
+regNumber LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int varNum)
{
assert(compiler->lvaTable[varNum].lvTracked);
return bbVarToRegMap[compiler->lvaTable[varNum].lvVarIndex];
@@ -2283,27 +2281,25 @@ LinearScan::getVarReg(VarToRegMap bbVarToRegMap, unsigned int varNum)
// Initialize the incoming VarToRegMap to the given map values (generally a predecessor of
// the block)
-VarToRegMap
-LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap)
+VarToRegMap LinearScan::setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap)
{
VarToRegMap inVarToRegMap = inVarToRegMaps[bbNum];
memcpy(inVarToRegMap, srcVarToRegMap, (regMapCount * sizeof(regNumber)));
return inVarToRegMap;
}
-
// find the last node in the tree in execution order
// TODO-Throughput: this is inefficient!
-GenTree *lastNodeInTree(GenTree *tree)
+GenTree* lastNodeInTree(GenTree* tree)
{
- // There is no gtprev on the top level tree node so
- // apparently the way to walk a tree backwards is to walk
+ // There is no gtprev on the top level tree node so
+ // apparently the way to walk a tree backwards is to walk
// it forward, find the last node, and walk back from there.
- GenTree *last = nullptr;
+ GenTree* last = nullptr;
if (tree->OperGet() == GT_STMT)
{
- GenTree *statement = tree;
+ GenTree* statement = tree;
foreach_treenode_execution_order(tree, statement)
{
@@ -2322,19 +2318,22 @@ GenTree *lastNodeInTree(GenTree *tree)
}
}
-
// given a tree node
-RefType refTypeForLocalRefNode(GenTree *node)
+RefType refTypeForLocalRefNode(GenTree* node)
{
assert(node->IsLocal());
-
+
// We don't support updates
assert((node->gtFlags & GTF_VAR_USEASG) == 0);
if (node->gtFlags & GTF_VAR_DEF)
+ {
return RefTypeDef;
+ }
else
+ {
return RefTypeUse;
+ }
}
// This function sets RefPosition last uses by walking the RefPositions, instead of walking the
@@ -2346,8 +2345,7 @@ RefType refTypeForLocalRefNode(GenTree *node)
// being set by dataflow analysis. It is necessary to do it this way only because the execution
// order wasn't strictly correct.
-void
-LinearScan::setLastUses(BasicBlock * block)
+void LinearScan::setLastUses(BasicBlock* block)
{
#ifdef DEBUG
if (VERBOSE)
@@ -2364,7 +2362,7 @@ LinearScan::setLastUses(BasicBlock * block)
keepAliveVarNum = compiler->info.compThisArg;
assert(compiler->info.compIsStatic == false);
}
-
+
// find which uses are lastUses
// Work backwards starting with live out.
@@ -2379,15 +2377,15 @@ LinearScan::setLastUses(BasicBlock * block)
while (currentRefPosition->refType != RefTypeBB)
{
// We should never see ParamDefs or ZeroInits within a basic block.
- assert (currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit);
- if (currentRefPosition->isIntervalRef() &&
- currentRefPosition->getInterval()->isLocalVar)
+ assert(currentRefPosition->refType != RefTypeParamDef && currentRefPosition->refType != RefTypeZeroInit);
+ if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isLocalVar)
{
- unsigned varNum = currentRefPosition->getInterval()->varNum;
+ unsigned varNum = currentRefPosition->getInterval()->varNum;
unsigned varIndex = currentRefPosition->getInterval()->getVarIndex(compiler);
// We should always have a tree node for a localVar, except for the "special" RefPositions.
GenTreePtr tree = currentRefPosition->treeNode;
- assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef);
+ assert(tree != nullptr || currentRefPosition->refType == RefTypeExpUse ||
+ currentRefPosition->refType == RefTypeDummyDef);
if (!VarSetOps::IsMember(compiler, temp, varIndex) && varNum != keepAliveVarNum)
{
// There was no exposed use, so this is a
@@ -2401,7 +2399,9 @@ LinearScan::setLastUses(BasicBlock * block)
#ifdef DEBUG
if (getLsraExtendLifeTimes())
{
- JITDUMP("last use of V%02u @%u (not marked as last use for LSRA due to extendLifetimes stress option)\n", compiler->lvaTrackedToVarNum[varIndex], loc);
+ JITDUMP("last use of V%02u @%u (not marked as last use for LSRA due to extendLifetimes stress "
+ "option)\n",
+ compiler->lvaTrackedToVarNum[varIndex], loc);
}
else
#endif // DEBUG
@@ -2465,11 +2465,7 @@ LinearScan::setLastUses(BasicBlock * block)
#endif // DEBUG
}
-
-void LinearScan::addRefsForPhysRegMask(regMaskTP mask,
- LsraLocation currentLoc,
- RefType refType,
- bool isLastUse)
+void LinearScan::addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse)
{
for (regNumber reg = REG_FIRST; mask; reg = REG_NEXT(reg), mask >>= 1)
{
@@ -2477,12 +2473,9 @@ void LinearScan::addRefsForPhysRegMask(regMaskTP mask,
{
// This assumes that these are all "special" RefTypes that
// don't need to be recorded on the tree (hence treeNode is nullptr)
- RefPosition *pos = newRefPosition(reg,
- currentLoc,
- refType,
- nullptr,
+ RefPosition* pos = newRefPosition(reg, currentLoc, refType, nullptr,
genRegMask(reg)); // This MUST occupy the physical register (obviously)
-
+
if (isLastUse)
{
pos->lastUse = true;
@@ -2491,215 +2484,213 @@ void LinearScan::addRefsForPhysRegMask(regMaskTP mask,
}
}
-//------------------------------------------------------------------------
+//------------------------------------------------------------------------
// getKillSetForNode: Return the registers killed by the given tree node.
//
-// Arguments:
+// Arguments:
// tree - the tree for which the kill set is needed.
//
// Return Value: a register mask of the registers killed
//
-regMaskTP
-LinearScan::getKillSetForNode(GenTree* tree)
+regMaskTP LinearScan::getKillSetForNode(GenTree* tree)
{
regMaskTP killMask = RBM_NONE;
switch (tree->OperGet())
{
#ifdef _TARGET_XARCH_
- case GT_MUL:
- // We use the 128-bit multiply when performing an overflow checking unsigned multiply
- //
- if (((tree->gtFlags & GTF_UNSIGNED) != 0) && tree->gtOverflowEx())
- {
- // Both RAX and RDX are killed by the operation
- killMask = RBM_RAX|RBM_RDX;
- }
- break;
-
- case GT_MULHI:
- killMask = RBM_RAX|RBM_RDX;
- break;
+ case GT_MUL:
+ // We use the 128-bit multiply when performing an overflow checking unsigned multiply
+ //
+ if (((tree->gtFlags & GTF_UNSIGNED) != 0) && tree->gtOverflowEx())
+ {
+ // Both RAX and RDX are killed by the operation
+ killMask = RBM_RAX | RBM_RDX;
+ }
+ break;
- case GT_MOD:
- case GT_DIV:
- case GT_UMOD:
- case GT_UDIV:
- if (!varTypeIsFloating(tree->TypeGet()))
- {
- // RDX needs to be killed early, because it must not be used as a source register
- // (unlike most cases, where the kill happens AFTER the uses). So for this kill,
- // we add the RefPosition at the tree loc (where the uses are located) instead of the
- // usual kill location which is the same as the defs at tree loc+1.
- // Note that we don't have to add interference for the live vars, because that
- // will be done below, and is not sensitive to the precise location.
- LsraLocation currentLoc = tree->gtLsraInfo.loc;
- assert(currentLoc != 0);
- addRefsForPhysRegMask(RBM_RDX, currentLoc, RefTypeKill, true);
- // Both RAX and RDX are killed by the operation
- killMask = RBM_RAX|RBM_RDX;
- }
- break;
+ case GT_MULHI:
+ killMask = RBM_RAX | RBM_RDX;
+ break;
+
+ case GT_MOD:
+ case GT_DIV:
+ case GT_UMOD:
+ case GT_UDIV:
+ if (!varTypeIsFloating(tree->TypeGet()))
+ {
+ // RDX needs to be killed early, because it must not be used as a source register
+ // (unlike most cases, where the kill happens AFTER the uses). So for this kill,
+ // we add the RefPosition at the tree loc (where the uses are located) instead of the
+ // usual kill location which is the same as the defs at tree loc+1.
+ // Note that we don't have to add interference for the live vars, because that
+ // will be done below, and is not sensitive to the precise location.
+ LsraLocation currentLoc = tree->gtLsraInfo.loc;
+ assert(currentLoc != 0);
+ addRefsForPhysRegMask(RBM_RDX, currentLoc, RefTypeKill, true);
+ // Both RAX and RDX are killed by the operation
+ killMask = RBM_RAX | RBM_RDX;
+ }
+ break;
#endif // _TARGET_XARCH_
- case GT_COPYOBJ:
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF);
- break;
+ case GT_COPYOBJ:
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF);
+ break;
- case GT_COPYBLK:
+ case GT_COPYBLK:
{
GenTreeCpBlk* cpBlkNode = tree->AsCpBlk();
switch (cpBlkNode->gtBlkOpKind)
{
- case GenTreeBlkOp::BlkOpKindHelper:
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY);
- break;
+ case GenTreeBlkOp::BlkOpKindHelper:
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMCPY);
+ break;
#ifdef _TARGET_XARCH_
- case GenTreeBlkOp::BlkOpKindRepInstr:
- // rep movs kills RCX, RDI and RSI
- killMask = RBM_RCX | RBM_RDI | RBM_RSI;
- break;
+ case GenTreeBlkOp::BlkOpKindRepInstr:
+ // rep movs kills RCX, RDI and RSI
+ killMask = RBM_RCX | RBM_RDI | RBM_RSI;
+ break;
#else
- case GenTreeBlkOp::BlkOpKindRepInstr:
+ case GenTreeBlkOp::BlkOpKindRepInstr:
#endif
- case GenTreeBlkOp::BlkOpKindUnroll:
- case GenTreeBlkOp::BlkOpKindInvalid:
- // for these 'cpBlkNode->gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
- break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ case GenTreeBlkOp::BlkOpKindInvalid:
+ // for these 'cpBlkNode->gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
+ break;
}
}
break;
- case GT_INITBLK:
+ case GT_INITBLK:
{
GenTreeInitBlk* initBlkNode = tree->AsInitBlk();
switch (initBlkNode->gtBlkOpKind)
{
- case GenTreeBlkOp::BlkOpKindHelper:
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
- break;
+ case GenTreeBlkOp::BlkOpKindHelper:
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_MEMSET);
+ break;
#ifdef _TARGET_XARCH_
- case GenTreeBlkOp::BlkOpKindRepInstr:
- // rep stos kills RCX and RDI
- killMask = RBM_RCX | RBM_RDI;
- break;
+ case GenTreeBlkOp::BlkOpKindRepInstr:
+ // rep stos kills RCX and RDI
+ killMask = RBM_RCX | RBM_RDI;
+ break;
#else
- case GenTreeBlkOp::BlkOpKindRepInstr:
+ case GenTreeBlkOp::BlkOpKindRepInstr:
#endif
- case GenTreeBlkOp::BlkOpKindUnroll:
- case GenTreeBlkOp::BlkOpKindInvalid:
- // for these 'cpBlkNode->gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
- break;
+ case GenTreeBlkOp::BlkOpKindUnroll:
+ case GenTreeBlkOp::BlkOpKindInvalid:
+ // for these 'cpBlkNode->gtBlkOpKind' kinds, we leave 'killMask' = RBM_NONE
+ break;
}
}
break;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- if (tree->gtLsraInfo.isHelperCallWithKills)
- {
- killMask = RBM_CALLEE_TRASH;
- }
- break;
- case GT_RETURNTRAP:
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
- break;
- case GT_CALL:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ if (tree->gtLsraInfo.isHelperCallWithKills)
+ {
+ killMask = RBM_CALLEE_TRASH;
+ }
+ break;
+ case GT_RETURNTRAP:
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
+ break;
+ case GT_CALL:
#ifdef _TARGET_X86_
- if (compiler->compFloatingPointUsed)
- {
- if (tree->TypeGet() == TYP_DOUBLE)
+ if (compiler->compFloatingPointUsed)
{
- needDoubleTmpForFPCall = true;
+ if (tree->TypeGet() == TYP_DOUBLE)
+ {
+ needDoubleTmpForFPCall = true;
+ }
+ else if (tree->TypeGet() == TYP_FLOAT)
+ {
+ needFloatTmpForFPCall = true;
+ }
}
- else if (tree->TypeGet() == TYP_FLOAT)
+ if (tree->IsHelperCall())
{
- needFloatTmpForFPCall = true;
+ GenTreeCall* call = tree->AsCall();
+ CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
+ killMask = compiler->compHelperCallKillSet(helpFunc);
}
- }
- if (tree->IsHelperCall())
- {
- GenTreeCall* call = tree->AsCall();
- CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
- killMask = compiler->compHelperCallKillSet(helpFunc);
- }
- else
+ else
#endif // _TARGET_X86_
- {
- // if there is no FP used, we can ignore the FP kills
- if (compiler->compFloatingPointUsed)
{
- killMask = RBM_CALLEE_TRASH;
+ // if there is no FP used, we can ignore the FP kills
+ if (compiler->compFloatingPointUsed)
+ {
+ killMask = RBM_CALLEE_TRASH;
+ }
+ else
+ {
+ killMask = RBM_INT_CALLEE_TRASH;
+ }
}
- else
+ break;
+ case GT_STOREIND:
+ if (compiler->codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree))
{
- killMask = RBM_INT_CALLEE_TRASH;
+ killMask = RBM_CALLEE_TRASH_NOGC;
+#if !NOGC_WRITE_BARRIERS && (defined(_TARGET_ARM_) || defined(_TARGET_AMD64_))
+ killMask |= (RBM_ARG_0 | RBM_ARG_1);
+#endif // !NOGC_WRITE_BARRIERS && (defined(_TARGET_ARM_) || defined(_TARGET_AMD64_))
}
- }
- break;
- case GT_STOREIND:
- if (compiler->codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree))
- {
- killMask = RBM_CALLEE_TRASH_NOGC;
- #if !NOGC_WRITE_BARRIERS && (defined(_TARGET_ARM_) || defined(_TARGET_AMD64_))
- killMask |= (RBM_ARG_0 | RBM_ARG_1);
- #endif // !NOGC_WRITE_BARRIERS && (defined(_TARGET_ARM_) || defined(_TARGET_AMD64_))
- }
- break;
+ break;
#if defined(PROFILING_SUPPORTED) && defined(_TARGET_AMD64_)
- // If this method requires profiler ELT hook then mark these nodes as killing
- // callee trash registers (excluding RAX and XMM0). The reason for this is that
- // profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for
- // more details.
- case GT_RETURN:
- if (compiler->compIsProfilerHookNeeded())
- {
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE);
- }
- break;
-
- case GT_PROF_HOOK:
- if (compiler->compIsProfilerHookNeeded())
- {
- killMask = compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL);;
- }
- break;
+ // If this method requires profiler ELT hook then mark these nodes as killing
+ // callee trash registers (excluding RAX and XMM0). The reason for this is that
+ // profiler callback would trash these registers. See vm\amd64\asmhelpers.asm for
+ // more details.
+ case GT_RETURN:
+ if (compiler->compIsProfilerHookNeeded())
+ {
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_LEAVE);
+ }
+ break;
+
+ case GT_PROF_HOOK:
+ if (compiler->compIsProfilerHookNeeded())
+ {
+ killMask = compiler->compHelperCallKillSet(CORINFO_HELP_PROF_FCN_TAILCALL);
+ ;
+ }
+ break;
#endif // PROFILING_SUPPORTED && _TARGET_AMD64_
- default:
- // for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE
- break;
+ default:
+ // for all other 'tree->OperGet()' kinds, leave 'killMask' = RBM_NONE
+ break;
}
return killMask;
}
-//------------------------------------------------------------------------
+//------------------------------------------------------------------------
// buildKillPositionsForNode:
// Given some tree node add refpositions for all the registers this node kills
//
-// Arguments:
+// Arguments:
// tree - the tree for which kill positions should be generated
// currentLoc - the location at which the kills should be added
//
-// Return Value:
+// Return Value:
// true - kills were inserted
// false - no kills were inserted
//
-// Notes:
+// Notes:
// The return value is needed because if we have any kills, we need to make sure that
// all defs are located AFTER the kills. On the other hand, if there aren't kills,
// the multiple defs for a regPair are in different locations.
// If we generate any kills, we will mark all currentLiveVars as being preferenced
// to avoid the killed registers. This is somewhat conservative.
-bool
-LinearScan::buildKillPositionsForNode(GenTree* tree,
- LsraLocation currentLoc)
+bool LinearScan::buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc)
{
- regMaskTP killMask = getKillSetForNode(tree);
- bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH));
+ regMaskTP killMask = getKillSetForNode(tree);
+ bool isCallKill = ((killMask == RBM_INT_CALLEE_TRASH) || (killMask == RBM_CALLEE_TRASH));
if (killMask != RBM_NONE)
{
// The killMask identifies a set of registers that will be used during codegen.
@@ -2725,8 +2716,8 @@ LinearScan::buildKillPositionsForNode(GenTree* tree,
VARSET_ITER_INIT(compiler, iter, currentLiveVars, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (varDsc->lvType == LargeVectorType)
{
@@ -2737,11 +2728,12 @@ LinearScan::buildKillPositionsForNode(GenTree* tree,
}
else
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- if (varTypeIsFloating(varDsc) && !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex))
+ if (varTypeIsFloating(varDsc) &&
+ !VarSetOps::IsMember(compiler, fpCalleeSaveCandidateVars, varIndex))
{
continue;
}
- Interval * interval = getIntervalForLocalVar(varNum);
+ Interval* interval = getIntervalForLocalVar(varNum);
if (isCallKill)
{
interval->preferCalleeSave = true;
@@ -2754,8 +2746,9 @@ LinearScan::buildKillPositionsForNode(GenTree* tree,
}
else
{
- // If there are no callee-saved registers, the call could kill all the registers.
- // This is a valid state, so in that case assert should not trigger. The RA will spill in order to free a register later.
+ // If there are no callee-saved registers, the call could kill all the registers.
+ // This is a valid state, so in that case assert should not trigger. The RA will spill in order to
+ // free a register later.
assert(compiler->opts.compDbgEnC || (calleeSaveRegs(varDsc->lvType)) == RBM_NONE);
}
}
@@ -2763,7 +2756,8 @@ LinearScan::buildKillPositionsForNode(GenTree* tree,
if (tree->IsCall() && (tree->gtFlags & GTF_CALL_UNMANAGED) != 0)
{
- RefPosition * pos = newRefPosition((Interval *)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS));
+ RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree,
+ (allRegs(TYP_REF) & ~RBM_ARG_REGS));
}
return true;
}
@@ -2771,27 +2765,28 @@ LinearScan::buildKillPositionsForNode(GenTree* tree,
return false;
}
-RefPosition *
-LinearScan::defineNewInternalTemp(GenTree *tree, RegisterType regType, LsraLocation currentLoc, regMaskTP regMask)
+RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree,
+ RegisterType regType,
+ LsraLocation currentLoc,
+ regMaskTP regMask)
{
- Interval * current = newInterval(regType);
+ Interval* current = newInterval(regType);
current->isInternal = true;
return newRefPosition(current, currentLoc, RefTypeDef, tree, regMask);
}
-int
-LinearScan::buildInternalRegisterDefsForNode(GenTree *tree,
- LsraLocation currentLoc,
- RefPosition* temps[]) //populates
+int LinearScan::buildInternalRegisterDefsForNode(GenTree* tree,
+ LsraLocation currentLoc,
+ RefPosition* temps[]) // populates
{
- int count;
- int internalIntCount = tree->gtLsraInfo.internalIntCount;
- regMaskTP internalCands = tree->gtLsraInfo.getInternalCandidates(this);
+ int count;
+ int internalIntCount = tree->gtLsraInfo.internalIntCount;
+ regMaskTP internalCands = tree->gtLsraInfo.getInternalCandidates(this);
// If the number of internal integer registers required is the same as the number of candidate integer registers in
// the candidate set, then they must be handled as fixed registers.
// (E.g. for the integer registers that floating point arguments must be copied into for a varargs call.)
- bool fixedRegs = false;
+ bool fixedRegs = false;
regMaskTP internalIntCandidates = (internalCands & allRegs(TYP_INT));
if (((int)genCountBits(internalIntCandidates)) == internalIntCount)
{
@@ -2813,7 +2808,7 @@ LinearScan::buildInternalRegisterDefsForNode(GenTree *tree,
for (int i = 0; i < internalFloatCount; i++)
{
regMaskTP internalFPCands = (internalCands & internalFloatRegCandidates());
- temps[count++] = defineNewInternalTemp(tree, FloatRegisterType, currentLoc, internalFPCands);
+ temps[count++] = defineNewInternalTemp(tree, FloatRegisterType, currentLoc, internalFPCands);
}
noway_assert(count < MaxInternalRegisters);
@@ -2821,18 +2816,18 @@ LinearScan::buildInternalRegisterDefsForNode(GenTree *tree,
return count;
}
-void LinearScan::buildInternalRegisterUsesForNode(GenTree *tree,
+void LinearScan::buildInternalRegisterUsesForNode(GenTree* tree,
LsraLocation currentLoc,
RefPosition* defs[],
- int total)
+ int total)
{
assert(total < MaxInternalRegisters);
-
+
// defs[] has been populated by buildInternalRegisterDefsForNode
// now just add uses to the defs previously added.
- for (int i=0; i<total; i++)
+ for (int i = 0; i < total; i++)
{
- RefPosition * prevRefPosition = defs[i];
+ RefPosition* prevRefPosition = defs[i];
assert(prevRefPosition != nullptr);
regMaskTP mask = prevRefPosition->registerAssignment;
if (prevRefPosition->isPhysRegRef)
@@ -2841,34 +2836,30 @@ void LinearScan::buildInternalRegisterUsesForNode(GenTree *tree,
}
else
{
- RefPosition *newest = newRefPosition(defs[i]->getInterval(), currentLoc, RefTypeUse, tree, mask);
- newest->lastUse = true;
+ RefPosition* newest = newRefPosition(defs[i]->getInterval(), currentLoc, RefTypeUse, tree, mask);
+ newest->lastUse = true;
}
}
}
-regMaskTP
-LinearScan::getUseCandidates(GenTree *useNode)
+regMaskTP LinearScan::getUseCandidates(GenTree* useNode)
{
TreeNodeInfo info = useNode->gtLsraInfo;
return info.getSrcCandidates(this);
}
-regMaskTP
-LinearScan::getDefCandidates(GenTree *tree)
+regMaskTP LinearScan::getDefCandidates(GenTree* tree)
{
TreeNodeInfo info = tree->gtLsraInfo;
return info.getDstCandidates(this);
}
-RegisterType
-LinearScan::getDefType(GenTree *tree)
+RegisterType LinearScan::getDefType(GenTree* tree)
{
return tree->TypeGet();
}
-regMaskTP
-fixedCandidateMask(var_types type, regMaskTP candidates)
+regMaskTP fixedCandidateMask(var_types type, regMaskTP candidates)
{
if (genMaxOneBit(candidates))
{
@@ -2891,8 +2882,7 @@ class LocationInfoListNode final : public LocationInfo
LocationInfoListNode* m_next; // The next node in the list
public:
- LocationInfoListNode(LsraLocation l, Interval* i, GenTree* t, unsigned regIdx = 0)
- : LocationInfo(l, i, t, regIdx)
+ LocationInfoListNode(LsraLocation l, Interval* i, GenTree* t, unsigned regIdx = 0) : LocationInfo(l, i, t, regIdx)
{
}
@@ -2921,15 +2911,11 @@ class LocationInfoList final
LocationInfoListNode* m_tail; // The tail of the list
public:
- LocationInfoList()
- : m_head(nullptr)
- , m_tail(nullptr)
+ LocationInfoList() : m_head(nullptr), m_tail(nullptr)
{
}
- LocationInfoList(LocationInfoListNode* node)
- : m_head(node)
- , m_tail(node)
+ LocationInfoList(LocationInfoListNode* node) : m_head(node), m_tail(node)
{
assert(m_head->m_next == nullptr);
}
@@ -3020,7 +3006,7 @@ public:
class LocationInfoListNodePool final
{
LocationInfoListNode* m_freeList;
- Compiler* m_compiler;
+ Compiler* m_compiler;
public:
//------------------------------------------------------------------------
@@ -3031,22 +3017,21 @@ public:
// compiler - The compiler context.
// preallocate - The number of nodes to preallocate.
//
- LocationInfoListNodePool(Compiler* compiler, unsigned preallocate = 0)
- : m_compiler(compiler)
+ LocationInfoListNodePool(Compiler* compiler, unsigned preallocate = 0) : m_compiler(compiler)
{
if (preallocate > 0)
{
- size_t preallocateSize = sizeof(LocationInfoListNode) * preallocate;
- auto* preallocatedNodes = reinterpret_cast<LocationInfoListNode*>(compiler->compGetMem(preallocateSize));
+ size_t preallocateSize = sizeof(LocationInfoListNode) * preallocate;
+ auto* preallocatedNodes = reinterpret_cast<LocationInfoListNode*>(compiler->compGetMem(preallocateSize));
LocationInfoListNode* head = preallocatedNodes;
- head->m_next = nullptr;
+ head->m_next = nullptr;
for (unsigned i = 1; i < preallocate; i++)
{
LocationInfoListNode* node = &preallocatedNodes[i];
- node->m_next = head;
- head = node;
+ node->m_next = head;
+ head = node;
}
m_freeList = head;
@@ -3078,11 +3063,11 @@ public:
m_freeList = head->m_next;
}
- head->loc = l;
- head->interval = i;
- head->treeNode = t;
+ head->loc = l;
+ head->interval = i;
+ head->treeNode = t;
head->multiRegIdx = regIdx;
- head->m_next = nullptr;
+ head->m_next = nullptr;
return head;
}
@@ -3100,15 +3085,14 @@ public:
assert(list.m_tail != nullptr);
LocationInfoListNode* head = m_freeList;
- list.m_tail->m_next = head;
- m_freeList = list.m_head;
+ list.m_tail->m_next = head;
+ m_freeList = list.m_head;
}
};
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
VARSET_VALRET_TP
-LinearScan::buildUpperVectorSaveRefPositions(GenTree *tree,
- LsraLocation currentLoc)
+LinearScan::buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc)
{
VARSET_TP VARSET_INIT_NOCOPY(liveLargeVectors, VarSetOps::MakeEmpty(compiler));
regMaskTP fpCalleeKillSet = RBM_NONE;
@@ -3120,45 +3104,47 @@ LinearScan::buildUpperVectorSaveRefPositions(GenTree *tree,
fpCalleeKillSet = getKillSetForNode(tree);
if ((fpCalleeKillSet & RBM_FLT_CALLEE_TRASH) != RBM_NONE)
{
- VarSetOps::AssignNoCopy(compiler, liveLargeVectors, VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars));
+ VarSetOps::AssignNoCopy(compiler, liveLargeVectors,
+ VarSetOps::Intersection(compiler, currentLiveVars, largeVectorVars));
VARSET_ITER_INIT(compiler, iter, liveLargeVectors, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- Interval *varInterval = getIntervalForLocalVar(varNum);
- Interval *tempInterval = newInterval(LargeVectorType);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ Interval* varInterval = getIntervalForLocalVar(varNum);
+ Interval* tempInterval = newInterval(LargeVectorType);
tempInterval->isInternal = true;
- RefPosition *pos = newRefPosition(tempInterval, currentLoc, RefTypeUpperVectorSaveDef, tree, RBM_FLT_CALLEE_SAVED);
+ RefPosition* pos =
+ newRefPosition(tempInterval, currentLoc, RefTypeUpperVectorSaveDef, tree, RBM_FLT_CALLEE_SAVED);
// We are going to save the existing relatedInterval of varInterval on tempInterval, so that we can set
// the tempInterval as the relatedInterval of varInterval, so that we can build the corresponding
// RefTypeUpperVectorSaveUse RefPosition. We will then restore the relatedInterval onto varInterval,
// and set varInterval as the relatedInterval of tempInterval.
tempInterval->relatedInterval = varInterval->relatedInterval;
- varInterval->relatedInterval = tempInterval;
+ varInterval->relatedInterval = tempInterval;
}
}
}
return liveLargeVectors;
}
-void
-LinearScan::buildUpperVectorRestoreRefPositions(GenTree *tree,
- LsraLocation currentLoc,
- VARSET_VALARG_TP liveLargeVectors)
+void LinearScan::buildUpperVectorRestoreRefPositions(GenTree* tree,
+ LsraLocation currentLoc,
+ VARSET_VALARG_TP liveLargeVectors)
{
if (!VarSetOps::IsEmpty(compiler, liveLargeVectors))
{
VARSET_ITER_INIT(compiler, iter, liveLargeVectors, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- Interval *varInterval = getIntervalForLocalVar(varNum);
- Interval *tempInterval = varInterval->relatedInterval;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ Interval* varInterval = getIntervalForLocalVar(varNum);
+ Interval* tempInterval = varInterval->relatedInterval;
assert(tempInterval->isInternal == true);
- RefPosition *pos = newRefPosition(tempInterval, currentLoc, RefTypeUpperVectorSaveUse, tree, RBM_FLT_CALLEE_SAVED);
+ RefPosition* pos =
+ newRefPosition(tempInterval, currentLoc, RefTypeUpperVectorSaveUse, tree, RBM_FLT_CALLEE_SAVED);
// Restore the relatedInterval onto varInterval, and set varInterval as the relatedInterval
// of tempInterval.
- varInterval->relatedInterval = tempInterval->relatedInterval;
+ varInterval->relatedInterval = tempInterval->relatedInterval;
tempInterval->relatedInterval = varInterval;
}
}
@@ -3252,12 +3238,11 @@ static int ComputeAvailableSrcCount(GenTree* node)
}
#endif
-void
-LinearScan::buildRefPositionsForNode(GenTree *tree,
- BasicBlock *block,
- LocationInfoListNodePool& listNodePool,
- HashTableBase<GenTree*, LocationInfoList>& operandToLocationInfoMap,
- LsraLocation currentLoc)
+void LinearScan::buildRefPositionsForNode(GenTree* tree,
+ BasicBlock* block,
+ LocationInfoListNodePool& listNodePool,
+ HashTableBase<GenTree*, LocationInfoList>& operandToLocationInfoMap,
+ LsraLocation currentLoc)
{
#ifdef _TARGET_ARM_
assert(!isRegPairType(tree->TypeGet()));
@@ -3287,11 +3272,11 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
bool first = true;
for (auto kvp : operandToLocationInfoMap)
{
- GenTree* node = kvp.Key();
+ GenTree* node = kvp.Key();
LocationInfoList defList = kvp.Value();
JITDUMP("%sN%03u. %s -> (", first ? "" : "; ", node->gtSeqNum, GenTree::NodeName(node->OperGet()));
- for (LocationInfoListNode* def = defList.Begin(), *end = defList.End(); def != end; def = def->Next())
+ for (LocationInfoListNode *def = defList.Begin(), *end = defList.End(); def != end; def = def->Next())
{
JITDUMP("%s%d.N%03u", def == defList.Begin() ? "" : ", ", def->loc, def->treeNode->gtSeqNum);
}
@@ -3322,8 +3307,8 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument
// to a call
- Interval * interval = getIntervalForLocalVar(tree->gtLclVarCommon.gtLclNum);
- regMaskTP candidates = getUseCandidates(tree);
+ Interval* interval = getIntervalForLocalVar(tree->gtLclVarCommon.gtLclNum);
+ regMaskTP candidates = getUseCandidates(tree);
regMaskTP fixedAssignment = fixedCandidateMask(tree->TypeGet(), candidates);
// We have only approximate last-use information at this point. This is because the
@@ -3338,7 +3323,8 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// we can update currentLiveVars at the same place that we create the RefPosition.
if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
{
- VarSetOps::RemoveElemD(compiler, currentLiveVars, compiler->lvaTable[tree->gtLclVarCommon.gtLclNum].lvVarIndex);
+ VarSetOps::RemoveElemD(compiler, currentLiveVars,
+ compiler->lvaTable[tree->gtLclVarCommon.gtLclNum].lvVarIndex);
}
JITDUMP("t%u (i:%u)\n", currentLoc, interval->intervalIndex);
@@ -3348,7 +3334,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
if (produce != 0)
{
LocationInfoList list(listNodePool.GetNode(currentLoc, interval, tree));
- bool added = operandToLocationInfoMap.AddOrUpdate(tree, list);
+ bool added = operandToLocationInfoMap.AddOrUpdate(tree, list);
assert(added);
tree->gtLsraInfo.definesAnyRegisters = true;
@@ -3365,10 +3351,10 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
{
candidates = fixedAssignment;
}
- RefPosition *pos = newRefPosition(interval, currentLoc, RefTypeUse, tree, candidates);
+ RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeUse, tree, candidates);
pos->isLocalDefUse = true;
- bool isLastUse = ((tree->gtFlags & GTF_VAR_DEATH) != 0);
- pos->lastUse = isLastUse;
+ bool isLastUse = ((tree->gtFlags & GTF_VAR_DEATH) != 0);
+ pos->lastUse = isLastUse;
pos->setAllocateIfProfitable(tree->IsRegOptional());
DBEXEC(VERBOSE, pos->dump());
return;
@@ -3385,17 +3371,17 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
#endif // DEBUG
// Handle the case of local variable assignment
- Interval * varDefInterval = nullptr;
- RefType defRefType = RefTypeDef;
+ Interval* varDefInterval = nullptr;
+ RefType defRefType = RefTypeDef;
- GenTree * defNode = tree;
+ GenTree* defNode = tree;
// noAdd means the node creates a def but for purposes of map
// management do not add it because data is not flowing up the
// tree but over (as in ASG nodes)
- bool noAdd = info.isLocalDefUse;
- RefPosition * prevPos = nullptr;
+ bool noAdd = info.isLocalDefUse;
+ RefPosition* prevPos = nullptr;
bool isSpecialPutArg = false;
@@ -3406,12 +3392,12 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
{
// We always push the tracked lclVar intervals
varDefInterval = getIntervalForLocalVar(tree->gtLclVarCommon.gtLclNum);
- defRefType = refTypeForLocalRefNode(tree);
- defNode = tree;
+ defRefType = refTypeForLocalRefNode(tree);
+ defNode = tree;
if (produce == 0)
{
produce = 1;
- noAdd = true;
+ noAdd = true;
}
assert(consume <= MAX_RET_REG_COUNT);
@@ -3428,7 +3414,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
LocationInfo& operandInfo = *static_cast<LocationInfo*>(operandDefs.Begin());
- Interval * srcInterval = operandInfo.interval;
+ Interval* srcInterval = operandInfo.interval;
if (srcInterval->relatedInterval == nullptr)
{
// Preference the source to the dest, unless this is a non-last-use localVar.
@@ -3460,7 +3446,8 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
if ((tree->gtFlags & GTF_VAR_DEATH) == 0)
{
- VarSetOps::AddElemD(compiler, currentLiveVars, compiler->lvaTable[tree->gtLclVarCommon.gtLclNum].lvVarIndex);
+ VarSetOps::AddElemD(compiler, currentLiveVars,
+ compiler->lvaTable[tree->gtLclVarCommon.gtLclNum].lvVarIndex);
}
}
}
@@ -3488,14 +3475,16 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
if (varDefInterval != nullptr)
{
printf("t%u (i:%u) = op ", currentLoc, varDefInterval->intervalIndex);
- }
+ }
else
{
for (int i = 0; i < produce; i++)
+ {
printf("t%u ", currentLoc);
+ }
printf("= op ");
}
- }
+ }
else
{
printf(" op ");
@@ -3504,13 +3493,14 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
#endif // DEBUG
- Interval *prefSrcInterval = nullptr;
+ Interval* prefSrcInterval = nullptr;
// If this is a binary operator that will be encoded with 2 operand fields
// (i.e. the target is read-modify-write), preference the dst to op1.
bool hasDelayFreeSrc = tree->gtLsraInfo.hasDelayFreeSrc;
- if (tree->OperGet() == GT_PUTARG_REG && isCandidateLocalRef(tree->gtGetOp1()) && (tree->gtGetOp1()->gtFlags & GTF_VAR_DEATH) == 0)
+ if (tree->OperGet() == GT_PUTARG_REG && isCandidateLocalRef(tree->gtGetOp1()) &&
+ (tree->gtGetOp1()->gtFlags & GTF_VAR_DEATH) == 0)
{
// This is the case for a "pass-through" copy of a lclVar. In the case where it is a non-last-use,
// we don't want the def of the copy to kill the lclVar register, if it is assigned the same register
@@ -3519,11 +3509,11 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// Get the register information for the first operand of the node.
LocationInfoList operandDefs;
- bool found = operandToLocationInfoMap.TryGetValue(*(tree->OperandsBegin(true)), &operandDefs);
+ bool found = operandToLocationInfoMap.TryGetValue(*(tree->OperandsBegin(true)), &operandDefs);
assert(found);
// Preference the destination to the interval of the first register defined by the first operand.
- Interval * srcInterval = operandDefs.Begin()->interval;
+ Interval* srcInterval = operandDefs.Begin()->interval;
assert(srcInterval->isLocalVar);
prefSrcInterval = srcInterval;
isSpecialPutArg = true;
@@ -3540,7 +3530,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// `operandDefs` holds the list of `LocationInfo` values for the registers defined by the current
// operand. `operandDefsIterator` points to the current `LocationInfo` value in `operandDefs`.
- LocationInfoList operandDefs;
+ LocationInfoList operandDefs;
LocationInfoListNode* operandDefsIterator = operandDefs.End();
for (int useIndex = 0; useIndex < consume; useIndex++)
{
@@ -3580,7 +3570,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
LocationInfo& locInfo = *static_cast<LocationInfo*>(operandDefsIterator);
- operandDefsIterator = operandDefsIterator->Next();
+ operandDefsIterator = operandDefsIterator->Next();
JITDUMP("t%u ", locInfo.loc);
@@ -3588,10 +3578,10 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// this is set by default in newRefPosition
GenTree* useNode = locInfo.treeNode;
assert(useNode != nullptr);
- var_types type = useNode->TypeGet();
- regMaskTP candidates = getUseCandidates(useNode);
- Interval* i = locInfo.interval;
- unsigned multiRegIdx = locInfo.multiRegIdx;
+ var_types type = useNode->TypeGet();
+ regMaskTP candidates = getUseCandidates(useNode);
+ Interval* i = locInfo.interval;
+ unsigned multiRegIdx = locInfo.multiRegIdx;
#ifdef FEATURE_SIMD
// In case of multi-reg call store to a local, there won't be any mismatch of
@@ -3606,7 +3596,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
noway_assert((candidates & allRegs(useNode->gtType)) != RBM_NONE);
// Currently, the only case where this should happen is for a TYP_LONG
// source and a TYP_SIMD8 target.
- assert((useNode->gtType == TYP_LONG && tree->gtType == TYP_SIMD8) ||
+ assert((useNode->gtType == TYP_LONG && tree->gtType == TYP_SIMD8) ||
(useNode->gtType == TYP_SIMD8 && tree->gtType == TYP_LONG));
tree->gtType = useNode->gtType;
}
@@ -3620,11 +3610,11 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
bool regOptionalAtUse = useNode->IsRegOptional();
- bool isLastUse = true;
+ bool isLastUse = true;
if (isCandidateLocalRef(useNode))
{
isLastUse = ((useNode->gtFlags & GTF_VAR_DEATH) != 0);
- }
+ }
else
{
// For non-localVar uses we record nothing,
@@ -3648,8 +3638,8 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
{
// Explicitly insert a FixedRefPosition and fake the candidates, because otherwise newRefPosition
// will complain about the types not matching.
- regNumber physicalReg = genRegNumFromMask(fixedAssignment);
- RefPosition *pos = newRefPosition (physicalReg, currentLoc, RefTypeFixedReg, nullptr, fixedAssignment);
+ regNumber physicalReg = genRegNumFromMask(fixedAssignment);
+ RefPosition* pos = newRefPosition(physicalReg, currentLoc, RefTypeFixedReg, nullptr, fixedAssignment);
}
pos = newRefPosition(i, currentLoc, RefTypeUse, useNode, allRegs(i->registerType), multiRegIdx);
pos->registerAssignment = candidates;
@@ -3660,7 +3650,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
if (delayRegFree)
{
- hasDelayFreeSrc = true;
+ hasDelayFreeSrc = true;
pos->delayRegFree = true;
}
@@ -3683,9 +3673,9 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
buildInternalRegisterUsesForNode(tree, currentLoc, internalRefs, internalCount);
- RegisterType registerType = getDefType(tree);
- regMaskTP candidates = getDefCandidates(tree);
- regMaskTP useCandidates = getUseCandidates(tree);
+ RegisterType registerType = getDefType(tree);
+ regMaskTP candidates = getDefCandidates(tree);
+ regMaskTP useCandidates = getUseCandidates(tree);
#ifdef DEBUG
if (VERBOSE)
@@ -3709,7 +3699,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
buildKillPositionsForNode(tree, currentLoc + 1);
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- VARSET_TP VARSET_INIT_NOCOPY(liveLargeVectors, VarSetOps::UninitVal());
+ VARSET_TP VARSET_INIT_NOCOPY(liveLargeVectors, VarSetOps::UninitVal());
if (RBM_FLT_CALLEE_SAVED != RBM_NONE)
{
// Build RefPositions for saving any live large vectors.
@@ -3717,9 +3707,9 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
VarSetOps::AssignNoCopy(compiler, liveLargeVectors, buildUpperVectorSaveRefPositions(tree, currentLoc));
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
-
- ReturnTypeDesc* retTypeDesc = nullptr;
- bool isMultiRegCall = tree->IsMultiRegCall();
+
+ ReturnTypeDesc* retTypeDesc = nullptr;
+ bool isMultiRegCall = tree->IsMultiRegCall();
if (isMultiRegCall)
{
retTypeDesc = tree->AsCall()->GetReturnTypeDesc();
@@ -3729,26 +3719,26 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
// push defs
LocationInfoList locationInfoList;
- LsraLocation defLocation = currentLoc + 1;
- for (int i=0; i < produce; i++)
- {
+ LsraLocation defLocation = currentLoc + 1;
+ for (int i = 0; i < produce; i++)
+ {
regMaskTP currCandidates = candidates;
- Interval *interval = varDefInterval;
+ Interval* interval = varDefInterval;
// In case of multi-reg call node, registerType is given by
// the type of ith position return register.
if (isMultiRegCall)
{
- registerType = retTypeDesc->GetReturnRegType((unsigned)i);
+ registerType = retTypeDesc->GetReturnRegType((unsigned)i);
currCandidates = genRegMask(retTypeDesc->GetABIReturnReg(i));
- useCandidates = allRegs(registerType);
+ useCandidates = allRegs(registerType);
}
if (interval == nullptr)
{
// Make a new interval
interval = newInterval(registerType);
- if (hasDelayFreeSrc)
+ if (hasDelayFreeSrc)
{
interval->hasNonCommutativeRMWDef = true;
}
@@ -3777,19 +3767,19 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
{
interval->assignRelatedIntervalIfUnassigned(prefSrcInterval);
}
-
+
// for assignments, we want to create a refposition for the def
// but not push it
if (!noAdd)
{
- locationInfoList.Append(listNodePool.GetNode(defLocation, interval, tree, (unsigned) i));
+ locationInfoList.Append(listNodePool.GetNode(defLocation, interval, tree, (unsigned)i));
}
RefPosition* pos = newRefPosition(interval, defLocation, defRefType, defNode, currCandidates, (unsigned)i);
if (info.isLocalDefUse)
{
pos->isLocalDefUse = true;
- pos->lastUse = true;
+ pos->lastUse = true;
}
DBEXEC(VERBOSE, pos->dump());
interval->updateRegisterPreferences(currCandidates);
@@ -3800,7 +3790,8 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
buildUpperVectorRestoreRefPositions(tree, currentLoc, liveLargeVectors);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- bool isContainedNode = !noAdd && consume == 0 && produce == 0 && tree->TypeGet() != TYP_VOID && !tree->OperIsStore();
+ bool isContainedNode =
+ !noAdd && consume == 0 && produce == 0 && tree->TypeGet() != TYP_VOID && !tree->OperIsStore();
if (isContainedNode)
{
// Contained nodes map to the concatenated lists of their operands.
@@ -3813,7 +3804,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
LocationInfoList operandList;
- bool removed = operandToLocationInfoMap.TryRemove(op, &operandList);
+ bool removed = operandToLocationInfoMap.TryRemove(op, &operandList);
assert(removed);
locationInfoList.Append(operandList);
@@ -3829,24 +3820,21 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
}
// make an interval for each physical register
-void
-LinearScan::buildPhysRegRecords()
+void LinearScan::buildPhysRegRecords()
{
RegisterType regType = IntRegisterType;
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- RegRecord *curr = &physRegs[reg];
+ RegRecord* curr = &physRegs[reg];
curr->init(reg);
}
}
-
-BasicBlock *
-getNonEmptyBlock(BasicBlock * block)
+BasicBlock* getNonEmptyBlock(BasicBlock* block)
{
while (block != nullptr && block->bbTreeList == nullptr)
{
- BasicBlock * nextBlock = block->bbNext;
+ BasicBlock* nextBlock = block->bbNext;
// Note that here we use the version of NumSucc that does not take a compiler.
// That way this doesn't have to take a compiler, or be an instance method, e.g. of LinearScan.
// If we have an empty block, it must have jump type BBJ_NONE or BBJ_ALWAYS, in which
@@ -3860,27 +3848,24 @@ getNonEmptyBlock(BasicBlock * block)
return block;
}
-
-void
-LinearScan::insertZeroInitRefPositions()
+void LinearScan::insertZeroInitRefPositions()
{
// insert defs for this, then a block boundary
VARSET_ITER_INIT(compiler, iter, compiler->fgFirstBB->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
- if (!varDsc->lvIsParam
- && isCandidateVar(varDsc)
- && (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())))
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
+ if (!varDsc->lvIsParam && isCandidateVar(varDsc) &&
+ (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet())))
{
- GenTree * firstStmt = getNonEmptyBlock(compiler->fgFirstBB)->bbTreeList;
+ GenTree* firstStmt = getNonEmptyBlock(compiler->fgFirstBB)->bbTreeList;
JITDUMP("V%02u was live in\n", varNum);
DISPTREE(firstStmt);
- Interval * interval = getIntervalForLocalVar(varNum);
- RefPosition * pos = newRefPosition(interval, MinLocation, RefTypeZeroInit, firstStmt,
- allRegs(interval->registerType));
+ Interval* interval = getIntervalForLocalVar(varNum);
+ RefPosition* pos =
+ newRefPosition(interval, MinLocation, RefTypeZeroInit, firstStmt, allRegs(interval->registerType));
varDsc->lvMustInit = true;
}
}
@@ -3891,12 +3876,11 @@ LinearScan::insertZeroInitRefPositions()
// Sets the register state for an argument of type STRUCT for System V systems.
// See Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc) in regalloc.cpp
// for how state for argument is updated for unix non-structs and Windows AMD64 structs.
-void
-LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
+void LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
{
assert(varTypeIsStruct(argDsc));
- RegState * intRegState = &compiler->codeGen->intRegState;
- RegState * floatRegState = &compiler->codeGen->floatRegState;
+ RegState* intRegState = &compiler->codeGen->intRegState;
+ RegState* floatRegState = &compiler->codeGen->floatRegState;
if ((argDsc->lvArgReg != REG_STK) && (argDsc->lvArgReg != REG_NA))
{
@@ -3912,7 +3896,6 @@ LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
}
}
-
if ((argDsc->lvOtherArgReg != REG_STK) && (argDsc->lvOtherArgReg != REG_NA))
{
if (genRegMask(argDsc->lvOtherArgReg) & (RBM_ALLFLOAT))
@@ -3949,8 +3932,7 @@ LinearScan::unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc)
// and regAlloc. It is further abstracted here because regState is updated
// separately for tracked and untracked variables in LSRA.
//
-void
-LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
+void LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
{
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// For System V AMD64 calls the argDsc can have 2 registers (for structs.)
@@ -3962,19 +3944,19 @@ LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
else
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
{
- RegState* intRegState = &compiler->codeGen->intRegState;
- RegState* floatRegState = &compiler->codeGen->floatRegState;
+ RegState* intRegState = &compiler->codeGen->intRegState;
+ RegState* floatRegState = &compiler->codeGen->floatRegState;
// In the case of AMD64 we'll still use the floating point registers
// to model the register usage for argument on vararg calls, so
- // we will ignore the varargs condition to determine whether we use
+ // we will ignore the varargs condition to determine whether we use
// XMM registers or not for setting up the call.
bool isFloat = (isFloatRegType(argDsc->lvType)
#ifndef _TARGET_AMD64_
- && !compiler->info.compIsVarArgs
+ && !compiler->info.compIsVarArgs
#endif
- );
+ );
- if (argDsc->lvIsHfaRegArg())
+ if (argDsc->lvIsHfaRegArg())
{
isFloat = true;
}
@@ -4025,8 +4007,8 @@ LinearScan::updateRegStateForArg(LclVarDsc* argDsc)
// the register locations will be "rotated" to stress the resolution and allocation
// code.
-BasicBlock*
-LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated))
+BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block,
+ BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated))
{
BasicBlock* predBlock = nullptr;
#ifdef DEBUG
@@ -4040,7 +4022,7 @@ LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBU
}
else
#endif // DEBUG
- if (block != compiler->fgFirstBB)
+ if (block != compiler->fgFirstBB)
{
predBlock = block->GetUniquePred(compiler);
if (predBlock != nullptr)
@@ -4071,7 +4053,7 @@ LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBU
// |
// block
//
- for (flowList* pred = otherBlock->bbPreds; pred != NULL; pred = pred->flNext)
+ for (flowList* pred = otherBlock->bbPreds; pred != nullptr; pred = pred->flNext)
{
BasicBlock* otherPred = pred->flBlock;
if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum)
@@ -4090,9 +4072,7 @@ LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBU
}
else
{
- for (flowList* pred = block->bbPreds;
- pred != NULL;
- pred = pred->flNext)
+ for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
BasicBlock* candidatePredBlock = pred->flBlock;
if (isBlockVisited(candidatePredBlock))
@@ -4115,10 +4095,9 @@ LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBU
return predBlock;
}
-void
-LinearScan::buildIntervals()
+void LinearScan::buildIntervals()
{
- BasicBlock *block;
+ BasicBlock* block;
// start numbering at 1; 0 is the entry
LsraLocation currentLoc = 1;
@@ -4132,15 +4111,19 @@ LinearScan::buildIntervals()
if (VERBOSE)
{
printf("\n-----------------\n");
- printf( "LIVENESS:\n");
- printf( "-----------------\n");
+ printf("LIVENESS:\n");
+ printf("-----------------\n");
foreach_block(compiler, block)
{
printf("BB%02u use def in out\n", block->bbNum);
- dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n");
- dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n");
- dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n");
- dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n");
+ dumpConvertedVarSet(compiler, block->bbVarUse);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbVarDef);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbLiveIn);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbLiveOut);
+ printf("\n");
}
}
#endif // DEBUG
@@ -4149,30 +4132,30 @@ LinearScan::buildIntervals()
DBEXEC(VERBOSE, TupleStyleDump(LSRA_DUMP_PRE));
- // second part:
+ // second part:
JITDUMP("\nbuildIntervals second part ========\n");
currentLoc = 0;
// Next, create ParamDef RefPositions for all the tracked parameters,
// in order of their varIndex
- LclVarDsc * argDsc;
- unsigned int lclNum;
+ LclVarDsc* argDsc;
+ unsigned int lclNum;
- RegState * intRegState = &compiler->codeGen->intRegState;
- RegState * floatRegState = &compiler->codeGen->floatRegState;
+ RegState* intRegState = &compiler->codeGen->intRegState;
+ RegState* floatRegState = &compiler->codeGen->floatRegState;
intRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
floatRegState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
- for (unsigned int varIndex = 0;
- varIndex < compiler->lvaTrackedCount;
- varIndex++)
+ for (unsigned int varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
lclNum = compiler->lvaTrackedToVarNum[varIndex];
argDsc = &(compiler->lvaTable[lclNum]);
if (!argDsc->lvIsParam)
+ {
continue;
+ }
// Only reserve a register if the argument is actually used.
// Is it dead on entry? If compJmpOpUsed is true, then the arguments
@@ -4192,8 +4175,8 @@ LinearScan::buildIntervals()
if (isCandidateVar(argDsc))
{
- Interval * interval = getIntervalForLocalVar(lclNum);
- regMaskTP mask = allRegs(TypeGet(argDsc));
+ Interval* interval = getIntervalForLocalVar(lclNum);
+ regMaskTP mask = allRegs(TypeGet(argDsc));
if (argDsc->lvIsRegArg)
{
// Set this interval as currently assigned to that register
@@ -4202,31 +4185,27 @@ LinearScan::buildIntervals()
mask = genRegMask(inArgReg);
assignPhysReg(inArgReg, interval);
}
- RefPosition * pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask);
+ RefPosition* pos = newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, mask);
}
else if (varTypeIsStruct(argDsc->lvType))
{
for (unsigned fieldVarNum = argDsc->lvFieldLclStart;
- fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt;
- ++fieldVarNum)
+ fieldVarNum < argDsc->lvFieldLclStart + argDsc->lvFieldCnt; ++fieldVarNum)
{
- LclVarDsc * fieldVarDsc = &(compiler->lvaTable[fieldVarNum]);
+ LclVarDsc* fieldVarDsc = &(compiler->lvaTable[fieldVarNum]);
if (fieldVarDsc->lvLRACandidate)
{
- Interval * interval = getIntervalForLocalVar(fieldVarNum);
- RefPosition * pos = newRefPosition (interval, MinLocation, RefTypeParamDef, nullptr,
- allRegs(TypeGet(fieldVarDsc)));
+ Interval* interval = getIntervalForLocalVar(fieldVarNum);
+ RefPosition* pos =
+ newRefPosition(interval, MinLocation, RefTypeParamDef, nullptr, allRegs(TypeGet(fieldVarDsc)));
}
}
}
else
{
// We can overwrite the register (i.e. codegen saves it on entry)
- assert(argDsc->lvRefCnt == 0 ||
- !argDsc->lvIsRegArg ||
- argDsc->lvDoNotEnregister ||
- !argDsc->lvLRACandidate ||
- (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode));
+ assert(argDsc->lvRefCnt == 0 || !argDsc->lvIsRegArg || argDsc->lvDoNotEnregister ||
+ !argDsc->lvLRACandidate || (varTypeIsFloating(argDsc->TypeGet()) && compiler->opts.compDbgCode));
}
}
@@ -4240,10 +4219,10 @@ LinearScan::buildIntervals()
if (argDsc->lvPromotedStruct())
{
- noway_assert(argDsc->lvFieldCnt == 1); // We only handle one field here
+ noway_assert(argDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = argDsc->lvFieldLclStart;
- argDsc = &(compiler->lvaTable[fieldVarNum]);
+ argDsc = &(compiler->lvaTable[fieldVarNum]);
}
noway_assert(argDsc->lvIsParam);
if (!argDsc->lvTracked && argDsc->lvIsRegArg)
@@ -4251,7 +4230,7 @@ LinearScan::buildIntervals()
updateRegStateForArg(argDsc);
}
}
-
+
// If there is a secret stub param, it is also live in
if (compiler->info.compPublishStubParam)
{
@@ -4269,14 +4248,12 @@ LinearScan::buildIntervals()
// the first block).
VarSetOps::AssignNoCopy(compiler, currentLiveVars, VarSetOps::MakeEmpty(compiler));
- for( block = startBlockSequence();
- block != nullptr;
- block = moveToNextBlock())
+ for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
JITDUMP("\nNEW BLOCK BB%02u\n", block->bbNum);
bool predBlockIsAllocated = false;
- predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated));
+ predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated));
if (block == compiler->fgFirstBB)
{
@@ -4294,7 +4271,8 @@ LinearScan::buildIntervals()
VARSET_TP VARSET_INIT(compiler, newLiveIn, block->bbLiveIn);
if (predBlock)
{
- JITDUMP("\n\nSetting incoming variable registers of BB%02u to outVarToRegMap of BB%02u\n", block->bbNum, predBlock->bbNum);
+ JITDUMP("\n\nSetting incoming variable registers of BB%02u to outVarToRegMap of BB%02u\n", block->bbNum,
+ predBlock->bbNum);
assert(predBlock->bbNum <= bbNumMaxBeforeResolution);
blockInfo[block->bbNum].predBBNum = predBlock->bbNum;
// Compute set difference: newLiveIn = block->bbLiveIn - predBlock->bbLiveOut
@@ -4313,15 +4291,15 @@ LinearScan::buildIntervals()
VARSET_ITER_INIT(compiler, iter, newLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
// Add a dummyDef for any candidate vars that are in the "newLiveIn" set.
// If this is the entry block, don't add any incoming parameters (they're handled with ParamDefs).
if (isCandidateVar(varDsc) && (predBlock != nullptr || !varDsc->lvIsParam))
{
- Interval * interval = getIntervalForLocalVar(varNum);
- RefPosition * pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr,
- allRegs(interval->registerType));
+ Interval* interval = getIntervalForLocalVar(varNum);
+ RefPosition* pos =
+ newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr, allRegs(interval->registerType));
}
}
JITDUMP("Finished creating dummy definitions\n\n");
@@ -4332,21 +4310,21 @@ LinearScan::buildIntervals()
// register positions for those exposed uses need to be recorded at
// this point.
- RefPosition * pos = newRefPosition((Interval *)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
+ RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
VarSetOps::Assign(compiler, currentLiveVars, block->bbLiveIn);
- for (GenTree *statement = block->FirstNonPhiDef();
- statement;
- statement = statement->gtNext)
+ for (GenTree* statement = block->FirstNonPhiDef(); statement; statement = statement->gtNext)
{
- if (statement->gtStmt.gtStmtIsEmbedded())
+ if (statement->gtStmt.gtStmtIsEmbedded())
+ {
continue;
+ }
+
+ GenTree* treeNode;
+ int dstCount = 0;
- GenTree *treeNode;
- int dstCount = 0;
-
- GenTree * stmtExpr = statement->gtStmt.gtStmtExpr;
+ GenTree* stmtExpr = statement->gtStmt.gtStmtExpr;
// If we have a dead lclVar use, we have to generate a RefPosition for it,
// otherwise the dataflow won't match the allocations.
@@ -4360,23 +4338,22 @@ LinearScan::buildIntervals()
GenTree* nextStmt = statement;
do
{
- GenTree * nextStmtExpr = nextStmt->gtStmt.gtStmtExpr;
+ GenTree* nextStmtExpr = nextStmt->gtStmt.gtStmtExpr;
if (nextStmtExpr->gtLsraInfo.dstCount > 0)
{
nextStmtExpr->gtLsraInfo.isLocalDefUse = true;
- nextStmtExpr->gtLsraInfo.dstCount = 0;
+ nextStmtExpr->gtLsraInfo.dstCount = 0;
}
nextStmt = nextStmt->gtNext;
- }
- while (nextStmt && nextStmt->gtStmt.gtStmtIsEmbedded());
+ } while (nextStmt && nextStmt->gtStmt.gtStmtIsEmbedded());
// Go through the statement nodes in execution order, and build RefPositions
foreach_treenode_execution_order(treeNode, statement)
{
- assert (treeNode->gtLsraInfo.loc >= currentLoc);
+ assert(treeNode->gtLsraInfo.loc >= currentLoc);
currentLoc = treeNode->gtLsraInfo.loc;
- dstCount = treeNode->gtLsraInfo.dstCount;
+ dstCount = treeNode->gtLsraInfo.dstCount;
buildRefPositionsForNode(treeNode, block, listNodePool, operandToLocationInfoMap, currentLoc);
#ifdef DEBUG
if (currentLoc > maxNodeLocation)
@@ -4396,7 +4373,7 @@ LinearScan::buildIntervals()
for (auto kvp : operandToLocationInfoMap)
{
LocationInfoList defList = kvp.Value();
- for (LocationInfoListNode* def = defList.Begin(), *end = defList.End(); def != end; def = def->Next())
+ for (LocationInfoListNode *def = defList.Begin(), *end = defList.End(); def != end; def = def->Next())
{
locCount++;
}
@@ -4422,7 +4399,7 @@ LinearScan::buildIntervals()
// Blocks ending with "jmp method" are marked as BBJ_HAS_JMP,
// and jmp call is represented using GT_JMP node which is a leaf node.
// Liveness phase keeps all the arguments of the method live till the end of
- // block by adding them to liveout set of the block containing GT_JMP.
+ // block by adding them to liveout set of the block containing GT_JMP.
//
// The target of a GT_JMP implicitly uses all the current method arguments, however
// there are no actual references to them. This can cause LSRA to assert, because
@@ -4433,7 +4410,7 @@ LinearScan::buildIntervals()
// Note that a block ending with GT_JMP has no successors and hence the variables
// for which dummy use ref positions are added are arguments of the method.
- VARSET_TP VARSET_INIT(compiler, expUseSet, block->bbLiveOut);
+ VARSET_TP VARSET_INIT(compiler, expUseSet, block->bbLiveOut);
BasicBlock* nextBlock = getNextBlock();
if (nextBlock != nullptr)
{
@@ -4441,8 +4418,7 @@ LinearScan::buildIntervals()
}
AllSuccessorIter succsEnd = block->GetAllSuccs(compiler).end();
for (AllSuccessorIter succs = block->GetAllSuccs(compiler).begin();
- succs != succsEnd && !VarSetOps::IsEmpty(compiler, expUseSet);
- ++succs)
+ succs != succsEnd && !VarSetOps::IsEmpty(compiler, expUseSet); ++succs)
{
BasicBlock* succ = (*succs);
if (isBlockVisited(succ))
@@ -4458,13 +4434,13 @@ LinearScan::buildIntervals()
VARSET_ITER_INIT(compiler, iter, expUseSet, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- LclVarDsc *varDsc = compiler->lvaTable + varNum;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ LclVarDsc* varDsc = compiler->lvaTable + varNum;
if (isCandidateVar(varDsc))
{
- Interval * interval = getIntervalForLocalVar(varNum);
- RefPosition * pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr,
- allRegs(interval->registerType));
+ Interval* interval = getIntervalForLocalVar(varNum);
+ RefPosition* pos =
+ newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
JITDUMP(" V%02u", varNum);
}
}
@@ -4474,7 +4450,7 @@ LinearScan::buildIntervals()
// Identify the last uses of each variable, except in the case of MinOpts, where all vars
// are kept live everywhere.
- if (!compiler->opts.MinOpts())
+ if (!compiler->opts.MinOpts())
{
setLastUses(block);
}
@@ -4501,8 +4477,9 @@ LinearScan::buildIntervals()
if (isCandidateVar(&compiler->lvaTable[keepAliveVarNum]))
{
JITDUMP("Adding exposed use of this, for lvaKeepAliveAndReportThis\n");
- Interval * interval = getIntervalForLocalVar(keepAliveVarNum);
- RefPosition * pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
+ Interval* interval = getIntervalForLocalVar(keepAliveVarNum);
+ RefPosition* pos =
+ newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
}
}
@@ -4510,15 +4487,14 @@ LinearScan::buildIntervals()
if (getLsraExtendLifeTimes())
{
LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = compiler->lvaTable;
- lclNum < compiler->lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
if (varDsc->lvLRACandidate)
{
JITDUMP("Adding exposed use of V%02u for LsraExtendLifetimes\n", lclNum);
- Interval * interval = getIntervalForLocalVar(lclNum);
- RefPosition * pos = newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
+ Interval* interval = getIntervalForLocalVar(lclNum);
+ RefPosition* pos =
+ newRefPosition(interval, currentLoc, RefTypeExpUse, nullptr, allRegs(interval->registerType));
}
}
}
@@ -4529,7 +4505,7 @@ LinearScan::buildIntervals()
if (prevBlock->NumSucc(compiler) > 0)
{
- RefPosition * pos = newRefPosition((Interval *)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
+ RefPosition* pos = newRefPosition((Interval*)nullptr, currentLoc, RefTypeBB, nullptr, RBM_NONE);
}
#ifdef DEBUG
@@ -4549,17 +4525,16 @@ LinearScan::buildIntervals()
}
#ifdef DEBUG
-void
-LinearScan::dumpVarRefPositions(const char *title)
+void LinearScan::dumpVarRefPositions(const char* title)
{
printf("\nVAR REFPOSITIONS %s\n", title);
for (unsigned i = 0; i < compiler->lvaCount; i++)
{
- Interval * interval = getIntervalForLocalVar(i);
+ Interval* interval = getIntervalForLocalVar(i);
printf("--- V%02u\n", i);
- for (RefPosition * ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
+ for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
ref->dump();
}
@@ -4568,29 +4543,36 @@ LinearScan::dumpVarRefPositions(const char *title)
printf("\n");
}
-void
-LinearScan::validateIntervals()
+void LinearScan::validateIntervals()
{
for (unsigned i = 0; i < compiler->lvaCount; i++)
{
- Interval * interval = getIntervalForLocalVar(i);
+ Interval* interval = getIntervalForLocalVar(i);
bool defined = false;
printf("-----------------\n");
- for (RefPosition * ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
+ for (RefPosition* ref = interval->firstRefPosition; ref != nullptr; ref = ref->nextRefPosition)
{
ref->dump();
RefType refType = ref->refType;
if (!defined && RefTypeIsUse(refType))
{
if (compiler->info.compMethodName != nullptr)
- printf("%s: ", compiler->info.compMethodName);
+ {
+ printf("%s: ", compiler->info.compMethodName);
+ }
printf("LocalVar V%02u: undefined use at %u\n", i, ref->nodeLocation);
}
// Note that there can be multiple last uses if they are on disjoint paths,
// so we can't really check the lastUse flag
- if (ref->lastUse) defined = false;
- if (RefTypeIsDef(refType)) defined = true;
+ if (ref->lastUse)
+ {
+ defined = false;
+ }
+ if (RefTypeIsDef(refType))
+ {
+ defined = true;
+ }
}
}
}
@@ -4599,8 +4581,7 @@ LinearScan::validateIntervals()
// Set the default rpFrameType based upon codeGen->isFramePointerRequired()
// This was lifted from the register predictor
//
-void
-LinearScan::setFrameType()
+void LinearScan::setFrameType()
{
FrameType frameType = FT_NOT_SET;
if (compiler->codeGen->isFramePointerRequired())
@@ -4609,10 +4590,10 @@ LinearScan::setFrameType()
}
else
{
- if (compiler->rpMustCreateEBPCalled == false)
+ if (compiler->rpMustCreateEBPCalled == false)
{
#ifdef DEBUG
- const char * reason;
+ const char* reason;
#endif // DEBUG
compiler->rpMustCreateEBPCalled = true;
if (compiler->rpMustCreateEBPFrame(INDEBUG(&reason)))
@@ -4642,9 +4623,7 @@ LinearScan::setFrameType()
// determine whether to double-align). Note, though that there is at least one test
// (jit\opt\Perf\DoubleAlign\Locals.exe) that depends on double-alignment being set
// in certain situations.
- if (!compiler->opts.MinOpts() &&
- !compiler->codeGen->isFramePointerRequired() &&
- compiler->compFloatingPointUsed)
+ if (!compiler->opts.MinOpts() && !compiler->codeGen->isFramePointerRequired() && compiler->compFloatingPointUsed)
{
frameType = FT_DOUBLE_ALIGN_FRAME;
}
@@ -4652,24 +4631,24 @@ LinearScan::setFrameType()
switch (frameType)
{
- case FT_ESP_FRAME:
- noway_assert(!compiler->codeGen->isFramePointerRequired());
- noway_assert(!compiler->codeGen->isFrameRequired());
- compiler->codeGen->setFramePointerUsed(false);
- break;
- case FT_EBP_FRAME:
- compiler->codeGen->setFramePointerUsed(true);
- break;
+ case FT_ESP_FRAME:
+ noway_assert(!compiler->codeGen->isFramePointerRequired());
+ noway_assert(!compiler->codeGen->isFrameRequired());
+ compiler->codeGen->setFramePointerUsed(false);
+ break;
+ case FT_EBP_FRAME:
+ compiler->codeGen->setFramePointerUsed(true);
+ break;
#if DOUBLE_ALIGN
- case FT_DOUBLE_ALIGN_FRAME:
- noway_assert(!compiler->codeGen->isFramePointerRequired());
- compiler->codeGen->setFramePointerUsed(false);
- compiler->codeGen->setDoubleAlign(true);
- break;
+ case FT_DOUBLE_ALIGN_FRAME:
+ noway_assert(!compiler->codeGen->isFramePointerRequired());
+ compiler->codeGen->setFramePointerUsed(false);
+ compiler->codeGen->setDoubleAlign(true);
+ break;
#endif // DOUBLE_ALIGN
- default:
- noway_assert(!"rpFrameType not set correctly!");
- break;
+ default:
+ noway_assert(!"rpFrameType not set correctly!");
+ break;
}
// If we are using FPBASE as the frame register, we cannot also use it for
@@ -4698,19 +4677,16 @@ LinearScan::setFrameType()
// Is the copyReg given by this RefPosition still busy at the
// given location?
-bool
-copyRegInUse(RefPosition * ref, LsraLocation loc)
+bool copyRegInUse(RefPosition* ref, LsraLocation loc)
{
assert(ref->copyReg);
if (ref->getRefEndLocation() >= loc)
{
return true;
}
- Interval * interval = ref->getInterval();
- RefPosition * nextRef = interval->getNextRefPosition();
- if (nextRef != nullptr &&
- nextRef->treeNode == ref->treeNode &&
- nextRef->getRefEndLocation() >= loc)
+ Interval* interval = ref->getInterval();
+ RefPosition* nextRef = interval->getNextRefPosition();
+ if (nextRef != nullptr && nextRef->treeNode == ref->treeNode && nextRef->getRefEndLocation() >= loc)
{
return true;
}
@@ -4721,18 +4697,20 @@ copyRegInUse(RefPosition * ref, LsraLocation loc)
// at the "currentLoc", and if so, return the next location at which it is in use in
// "nextRefLocationPtr"
//
-bool
-LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLoc, LsraLocation * nextRefLocationPtr, RegisterType regType)
+bool LinearScan::registerIsAvailable(RegRecord* physRegRecord,
+ LsraLocation currentLoc,
+ LsraLocation* nextRefLocationPtr,
+ RegisterType regType)
{
- *nextRefLocationPtr = MaxLocation;
+ *nextRefLocationPtr = MaxLocation;
LsraLocation nextRefLocation = MaxLocation;
- regMaskTP regMask = genRegMask(physRegRecord->regNum);
+ regMaskTP regMask = genRegMask(physRegRecord->regNum);
if (physRegRecord->isBusyUntilNextKill)
{
return false;
}
- RefPosition * nextPhysReference = physRegRecord->getNextRefPosition();
+ RefPosition* nextPhysReference = physRegRecord->getNextRefPosition();
if (nextPhysReference != nullptr)
{
nextRefLocation = nextPhysReference->nodeLocation;
@@ -4743,11 +4721,11 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
nextRefLocation = MaxLocation - 1;
}
- Interval * assignedInterval = physRegRecord->assignedInterval;
+ Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
- RefPosition * recentReference = assignedInterval->recentRefPosition;
+ RefPosition* recentReference = assignedInterval->recentRefPosition;
// The only case where we have an assignedInterval, but recentReference is null
// is where this interval is live at procedure entry (i.e. an arg register), in which
@@ -4771,7 +4749,7 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
if (recentReference->copyReg && copyRegInUse(recentReference, currentLoc))
{
return false;
- }
+ }
}
else if (!assignedInterval->isActive && assignedInterval->isConstant)
{
@@ -4782,13 +4760,12 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
// If this interval isn't active, it's available if it isn't referenced
// at this location (or the previous location, if the recent RefPosition
// is a delayRegFree).
- else if (!assignedInterval->isActive &&
- (recentReference->refType == RefTypeExpUse ||
- recentReference->getRefEndLocation() < currentLoc))
+ else if (!assignedInterval->isActive &&
+ (recentReference->refType == RefTypeExpUse || recentReference->getRefEndLocation() < currentLoc))
{
// This interval must have a next reference (otherwise it wouldn't be assigned to this register)
- RefPosition * nextReference = recentReference->nextRefPosition;
- if ( nextReference != nullptr )
+ RefPosition* nextReference = recentReference->nextRefPosition;
+ if (nextReference != nullptr)
{
if (nextReference->nodeLocation < nextRefLocation)
{
@@ -4799,7 +4776,7 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
{
assert(recentReference->copyReg && recentReference->registerAssignment != regMask);
}
- }
+ }
else
{
return false;
@@ -4814,7 +4791,8 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
if (regType == TYP_DOUBLE)
{
// Recurse, but check the other half this time (TYP_FLOAT)
- if (!registerIsAvailable(getRegisterRecord(REG_NEXT(physRegRecord->regNum)), currentLoc, nextRefLocationPtr, TYP_FLOAT))
+ if (!registerIsAvailable(getRegisterRecord(REG_NEXT(physRegRecord->regNum)), currentLoc, nextRefLocationPtr,
+ TYP_FLOAT))
return false;
nextRefLocation = *nextRefLocationPtr;
}
@@ -4842,21 +4820,19 @@ LinearScan::registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLo
// we are interested in the "defining" type of the interval). This is because the situation of interest
// only happens at the use (where it must be copied to an integer register).
-RegisterType
-LinearScan::getRegisterType(Interval *currentInterval, RefPosition* refPosition)
+RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition)
{
assert(refPosition->getInterval() == currentInterval);
- RegisterType regType = currentInterval->registerType;
- regMaskTP candidates = refPosition->registerAssignment;
+ RegisterType regType = currentInterval->registerType;
+ regMaskTP candidates = refPosition->registerAssignment;
#if defined(FEATURE_SIMD) && defined(_TARGET_AMD64_)
if ((candidates & allRegs(regType)) == RBM_NONE)
{
- assert((regType == TYP_SIMD8) &&
- (refPosition->refType == RefTypeUse) &&
+ assert((regType == TYP_SIMD8) && (refPosition->refType == RefTypeUse) &&
((candidates & allRegs(TYP_INT)) != RBM_NONE));
regType = TYP_INT;
}
-#else // !(defined(FEATURE_SIMD) && defined(_TARGET_AMD64_))
+#else // !(defined(FEATURE_SIMD) && defined(_TARGET_AMD64_))
assert((candidates & allRegs(regType)) != RBM_NONE);
#endif // !(defined(FEATURE_SIMD) && defined(_TARGET_AMD64_))
return regType;
@@ -4877,19 +4853,18 @@ LinearScan::getRegisterType(Interval *currentInterval, RefPosition* refPosition)
// TODO-CQ: Consider whether we need to use a different order for tree temps than for vars, as
// reg predict does
-static const regNumber lsraRegOrder[] = { REG_VAR_ORDER };
-const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder);
-static const regNumber lsraRegOrderFlt[] = { REG_VAR_ORDER_FLT };
-const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt);
+static const regNumber lsraRegOrder[] = {REG_VAR_ORDER};
+const unsigned lsraRegOrderSize = ArrLen(lsraRegOrder);
+static const regNumber lsraRegOrderFlt[] = {REG_VAR_ORDER_FLT};
+const unsigned lsraRegOrderFltSize = ArrLen(lsraRegOrderFlt);
-regNumber
-LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPosition)
+regNumber LinearScan::tryAllocateFreeReg(Interval* currentInterval, RefPosition* refPosition)
{
regNumber foundReg = REG_NA;
- RegisterType regType = getRegisterType(currentInterval, refPosition);
- const regNumber * regOrder;
- unsigned regOrderSize;
+ RegisterType regType = getRegisterType(currentInterval, refPosition);
+ const regNumber* regOrder;
+ unsigned regOrderSize;
if (useFloatReg(regType))
{
regOrder = lsraRegOrderFlt;
@@ -4902,30 +4877,27 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
}
LsraLocation currentLocation = refPosition->nodeLocation;
- RefPosition* nextRefPos = refPosition->nextRefPosition;
- LsraLocation nextLocation = (nextRefPos == nullptr) ? currentLocation : nextRefPos->nodeLocation;
- regMaskTP candidates = refPosition->registerAssignment;
- regMaskTP preferences = currentInterval->registerPreferences;
+ RefPosition* nextRefPos = refPosition->nextRefPosition;
+ LsraLocation nextLocation = (nextRefPos == nullptr) ? currentLocation : nextRefPos->nodeLocation;
+ regMaskTP candidates = refPosition->registerAssignment;
+ regMaskTP preferences = currentInterval->registerPreferences;
if (RefTypeIsDef(refPosition->refType))
{
if (currentInterval->hasConflictingDefUse)
{
resolveConflictingDefAndUse(currentInterval, refPosition);
- candidates = refPosition->registerAssignment;
+ candidates = refPosition->registerAssignment;
}
// Otherwise, check for the case of a fixed-reg def of a reg that will be killed before the
// use, or interferes at the point of use (which shouldn't happen, but Lower doesn't mark
// the contained nodes as interfering).
// Note that we may have a ParamDef RefPosition that is marked isFixedRegRef, but which
// has had its registerAssignment changed to no longer be a single register.
- else if (refPosition->isFixedRegRef &&
- nextRefPos != nullptr &&
- RefTypeIsUse(nextRefPos->refType) &&
- !nextRefPos->isFixedRegRef &&
- genMaxOneBit(refPosition->registerAssignment))
+ else if (refPosition->isFixedRegRef && nextRefPos != nullptr && RefTypeIsUse(nextRefPos->refType) &&
+ !nextRefPos->isFixedRegRef && genMaxOneBit(refPosition->registerAssignment))
{
- regNumber defReg = refPosition->assignedReg();
+ regNumber defReg = refPosition->assignedReg();
RegRecord* defRegRecord = getRegisterRecord(defReg);
RefPosition* currFixedRegRefPosition = defRegRecord->recentRefPosition;
@@ -4947,7 +4919,10 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
}
preferences &= candidates;
- if (preferences == RBM_NONE) preferences = candidates;
+ if (preferences == RBM_NONE)
+ {
+ preferences = candidates;
+ }
regMaskTP relatedPreferences = RBM_NONE;
#ifdef DEBUG
@@ -4959,7 +4934,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// If the related interval has no further references, it is possible that it is a source of the
// node that produces this interval. However, we don't want to use the relatedInterval for preferencing
// if its next reference is not a new definition (as it either is or will become live).
- Interval * relatedInterval = currentInterval->relatedInterval;
+ Interval* relatedInterval = currentInterval->relatedInterval;
if (relatedInterval != nullptr)
{
RefPosition* nextRelatedRefPosition = relatedInterval->getNextRefPosition();
@@ -4974,7 +4949,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
else if ((relatedInterval->relatedInterval != nullptr) &&
(nextRelatedRefPosition->nextRefPosition != nullptr) &&
(nextRelatedRefPosition->nextRefPosition->nextRefPosition == nullptr) &&
- (nextRelatedRefPosition->nextRefPosition->nodeLocation < relatedInterval->relatedInterval->getNextRefLocation()))
+ (nextRelatedRefPosition->nextRefPosition->nodeLocation <
+ relatedInterval->relatedInterval->getNextRefLocation()))
{
// The current relatedInterval has only two remaining RefPositions, both of which
// occur prior to the next RefPosition for its relatedInterval.
@@ -4991,9 +4967,13 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// interval preferences into account in the loop over all the registers.
if (relatedInterval->assignedReg != nullptr)
+ {
relatedPreferences = genRegMask(relatedInterval->assignedReg->regNum);
+ }
else
+ {
relatedPreferences = relatedInterval->registerPreferences;
+ }
}
bool preferCalleeSave = currentInterval->preferCalleeSave;
@@ -5012,11 +4992,10 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// If we have a relatedInterval that is not currently occupying a register,
// and whose lifetime begins after this one ends,
// we want to try to select a register that will cover its lifetime.
- if ((relatedInterval != nullptr) &&
- (relatedInterval->assignedReg == nullptr) &&
+ if ((relatedInterval != nullptr) && (relatedInterval->assignedReg == nullptr) &&
(relatedInterval->getNextRefLocation() >= rangeEndRefPosition->nodeLocation))
{
- lastRefPosition = relatedInterval->lastRefPosition;
+ lastRefPosition = relatedInterval->lastRefPosition;
preferCalleeSave = relatedInterval->preferCalleeSave;
}
}
@@ -5025,8 +5004,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// non-commutative operator), its endLocation is delayed until the "def"
// position, which is one location past the use (getRefEndLocation() takes care of this).
LsraLocation rangeEndLocation = rangeEndRefPosition->getRefEndLocation();
- LsraLocation lastLocation = lastRefPosition->getRefEndLocation();
- regNumber prevReg = REG_NA;
+ LsraLocation lastLocation = lastRefPosition->getRefEndLocation();
+ regNumber prevReg = REG_NA;
if (currentInterval->assignedReg)
{
@@ -5037,8 +5016,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// Use it preemptively (i.e. before checking other available regs)
// only if it is preferred and available.
- RegRecord *regRec = currentInterval->assignedReg;
- prevReg = regRec->regNum;
+ RegRecord* regRec = currentInterval->assignedReg;
+ prevReg = regRec->regNum;
regMaskTP prevRegBit = genRegMask(prevReg);
// Is it in the preferred set of regs?
@@ -5065,16 +5044,15 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
refPosition->registerAssignment = genRegMask(foundReg);
return foundReg;
}
- else
+ else
{
// Don't keep trying to allocate to this register
currentInterval->assignedReg = nullptr;
}
}
-
- RegRecord * availablePhysRegInterval = nullptr;
- Interval * intervalToUnassign = nullptr;
+ RegRecord* availablePhysRegInterval = nullptr;
+ Interval* intervalToUnassign = nullptr;
// Each register will receive a score which is the sum of the scoring criteria below.
// These were selected on the assumption that they will have an impact on the "goodness"
@@ -5086,14 +5064,15 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// If the scores are equal, but one covers more of the current interval's range,
// then it wins. Otherwise, the one encountered earlier in the regOrder wins.
- enum RegisterScore {
- VALUE_AVAILABLE = 0x40, // It is a constant value that is already in an acceptable register.
- COVERS = 0x20, // It is in the interval's preference set and it covers the entire lifetime.
- OWN_PREFERENCE = 0x10, // It is in the preference set of this interval.
- COVERS_RELATED = 0x08, // It is in the preference set of the related interval and covers the entire lifetime.
- RELATED_PREFERENCE = 0x04, // It is in the preference set of the related interval.
- CALLER_CALLEE = 0x02, // It is in the right "set" for the interval (caller or callee-save).
- UNASSIGNED = 0x01, // It is not currently assigned to an inactive interval.
+ enum RegisterScore
+ {
+ VALUE_AVAILABLE = 0x40, // It is a constant value that is already in an acceptable register.
+ COVERS = 0x20, // It is in the interval's preference set and it covers the entire lifetime.
+ OWN_PREFERENCE = 0x10, // It is in the preference set of this interval.
+ COVERS_RELATED = 0x08, // It is in the preference set of the related interval and covers the entire lifetime.
+ RELATED_PREFERENCE = 0x04, // It is in the preference set of the related interval.
+ CALLER_CALLEE = 0x02, // It is in the right "set" for the interval (caller or callee-save).
+ UNASSIGNED = 0x01, // It is not currently assigned to an inactive interval.
};
int bestScore = 0;
@@ -5120,28 +5099,30 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// An optimization for the common case where there is only one candidate -
// avoid looping over all the other registers
- regNumber singleReg = REG_NA;
+ regNumber singleReg = REG_NA;
if (genMaxOneBit(candidates))
{
regOrderSize = 1;
- singleReg = genRegNumFromMask(candidates);
- regOrder = &singleReg;
+ singleReg = genRegNumFromMask(candidates);
+ regOrder = &singleReg;
}
-
+
for (unsigned i = 0; i < regOrderSize && (candidates != RBM_NONE); i++)
{
- regNumber regNum = regOrder[i];
+ regNumber regNum = regOrder[i];
regMaskTP candidateBit = genRegMask(regNum);
- if (!(candidates & candidateBit))
+ if (!(candidates & candidateBit))
+ {
continue;
+ }
candidates &= ~candidateBit;
- RegRecord * physRegRecord = getRegisterRecord(regNum);
+ RegRecord* physRegRecord = getRegisterRecord(regNum);
- int score = 0;
+ int score = 0;
LsraLocation nextPhysRefLocation = MaxLocation;
// By chance, is this register already holding this interval, as a copyReg or having
@@ -5149,7 +5130,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
if (physRegRecord->assignedInterval == currentInterval)
{
availablePhysRegInterval = physRegRecord;
- intervalToUnassign = nullptr;
+ intervalToUnassign = nullptr;
break;
}
@@ -5158,7 +5139,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
{
continue;
}
-
+
// If the register is next referenced at this location, only use it if
// this has a fixed reg requirement (i.e. this is the reference that caused
// the FixedReg ref to be created)
@@ -5169,10 +5150,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
}
// If this is a definition of a constant interval, check to see if its value is already in this register.
- if (currentInterval->isConstant &&
- RefTypeIsDef(refPosition->refType) &&
- (physRegRecord->assignedInterval != nullptr) &&
- physRegRecord->assignedInterval->isConstant)
+ if (currentInterval->isConstant && RefTypeIsDef(refPosition->refType) &&
+ (physRegRecord->assignedInterval != nullptr) && physRegRecord->assignedInterval->isConstant)
{
noway_assert(refPosition->treeNode != nullptr);
GenTree* otherTreeNode = physRegRecord->assignedInterval->firstRefPosition->treeNode;
@@ -5182,26 +5161,27 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
{
switch (otherTreeNode->OperGet())
{
- case GT_CNS_INT:
- if ((refPosition->treeNode->AsIntCon()->IconValue() == otherTreeNode->AsIntCon()->IconValue()) &&
- (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode)))
- {
+ case GT_CNS_INT:
+ if ((refPosition->treeNode->AsIntCon()->IconValue() ==
+ otherTreeNode->AsIntCon()->IconValue()) &&
+ (varTypeGCtype(refPosition->treeNode) == varTypeGCtype(otherTreeNode)))
+ {
#ifdef _TARGET_64BIT_
- // If the constant is negative, only reuse registers of the same type.
- // This is because, on a 64-bit system, we do not sign-extend immediates in registers to
- // 64-bits unless they are actually longs, as this requires a longer instruction.
- // This doesn't apply to a 32-bit system, on which long values occupy multiple registers.
- // (We could sign-extend, but we would have to always sign-extend, because if we reuse more
- // than once, we won't have access to the instruction that originally defines the constant).
- if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) ||
- (refPosition->treeNode->AsIntCon()->IconValue() >= 0))
+ // If the constant is negative, only reuse registers of the same type.
+ // This is because, on a 64-bit system, we do not sign-extend immediates in registers to
+ // 64-bits unless they are actually longs, as this requires a longer instruction.
+ // This doesn't apply to a 32-bit system, on which long values occupy multiple registers.
+ // (We could sign-extend, but we would have to always sign-extend, because if we reuse more
+ // than once, we won't have access to the instruction that originally defines the constant).
+ if ((refPosition->treeNode->TypeGet() == otherTreeNode->TypeGet()) ||
+ (refPosition->treeNode->AsIntCon()->IconValue() >= 0))
#endif // _TARGET_64BIT_
- {
- score |= VALUE_AVAILABLE;
+ {
+ score |= VALUE_AVAILABLE;
+ }
}
- }
- break;
- case GT_CNS_DBL:
+ break;
+ case GT_CNS_DBL:
{
// For floating point constants, the values must be identical, not simply compare
// equal. So we compare the bits.
@@ -5212,9 +5192,9 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
}
break;
}
- default:
- // for all other 'otherTreeNode->OperGet()' kinds, we leave 'score' unchanged
- break;
+ default:
+ // for all other 'otherTreeNode->OperGet()' kinds, we leave 'score' unchanged
+ break;
}
}
}
@@ -5238,8 +5218,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
score |= COVERS;
}
}
- if (relatedInterval != nullptr &&
- (candidateBit & relatedPreferences) != RBM_NONE)
+ if (relatedInterval != nullptr && (candidateBit & relatedPreferences) != RBM_NONE)
{
score |= RELATED_PREFERENCE;
if (nextPhysRefLocation > relatedInterval->lastRefPosition->nodeLocation)
@@ -5256,8 +5235,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
score |= RELATED_PREFERENCE;
}
- if ((preferCalleeSave && physRegRecord->isCalleeSave) ||
- (!preferCalleeSave && !physRegRecord->isCalleeSave))
+ if ((preferCalleeSave && physRegRecord->isCalleeSave) || (!preferCalleeSave && !physRegRecord->isCalleeSave))
{
score |= CALLER_CALLEE;
}
@@ -5285,7 +5263,7 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
{
foundBetterCandidate = true;
}
- }
+ }
// If both cover the range, prefer a register that is killed sooner (leaving the longer range register
// available). If both cover the range and also getting killed at the same location, prefer the one which
// is same as previous assignment.
@@ -5311,10 +5289,10 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
if (foundBetterCandidate)
{
- bestLocation = nextPhysRefLocation;
+ bestLocation = nextPhysRefLocation;
availablePhysRegInterval = physRegRecord;
- intervalToUnassign = physRegRecord->assignedInterval;
- bestScore = score;
+ intervalToUnassign = physRegRecord->assignedInterval;
+ bestScore = score;
}
// there is no way we can get a better score so break out
@@ -5324,7 +5302,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
}
}
- if (availablePhysRegInterval != nullptr) {
+ if (availablePhysRegInterval != nullptr)
+ {
if (intervalToUnassign != nullptr)
{
unassignPhysReg(availablePhysRegInterval, intervalToUnassign->recentRefPosition);
@@ -5346,8 +5325,8 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
assert((bestScore & VALUE_AVAILABLE) == 0);
}
assignPhysReg(availablePhysRegInterval, currentInterval);
- foundReg = availablePhysRegInterval->regNum;
- regMaskTP foundRegMask = genRegMask(foundReg);
+ foundReg = availablePhysRegInterval->regNum;
+ regMaskTP foundRegMask = genRegMask(foundReg);
refPosition->registerAssignment = foundRegMask;
if (relatedInterval != nullptr)
{
@@ -5372,28 +5351,28 @@ LinearScan::tryAllocateFreeReg(Interval *currentInterval, RefPosition *refPositi
// The regNumber allocated to the RefPositon. Returns REG_NA if no free register is found.
//
// Note: Currently this routine uses weight and farthest distance of next reference
-// to select a ref position for spilling.
+// to select a ref position for spilling.
// a) if allocateIfProfitable = false
// The ref position chosen for spilling will be the lowest weight
// of all and if there is is more than one ref position with the
// same lowest weight, among them choses the one with farthest
// distance to its next reference.
-//
+//
// b) if allocateIfProfitable = true
// The ref position chosen for spilling will not only be lowest weight
// of all but also has a weight lower than 'refPosition'. If there is
// no such ref position, reg will not be allocated.
-regNumber
-LinearScan::allocateBusyReg(Interval* current,
- RefPosition* refPosition,
- bool allocateIfProfitable)
+regNumber LinearScan::allocateBusyReg(Interval* current, RefPosition* refPosition, bool allocateIfProfitable)
{
regNumber foundReg = REG_NA;
- RegisterType regType = getRegisterType(current, refPosition);
- regMaskTP candidates = refPosition->registerAssignment;
- regMaskTP preferences = (current->registerPreferences & candidates);
- if (preferences == RBM_NONE) preferences = candidates;
+ RegisterType regType = getRegisterType(current, refPosition);
+ regMaskTP candidates = refPosition->registerAssignment;
+ regMaskTP preferences = (current->registerPreferences & candidates);
+ if (preferences == RBM_NONE)
+ {
+ preferences = candidates;
+ }
if (candidates == RBM_NONE)
{
// This assumes only integer and floating point register types
@@ -5409,22 +5388,22 @@ LinearScan::allocateBusyReg(Interval* current,
// TODO-CQ: Determine whether/how to take preferences into account in addition to
// prefering the one with the furthest ref position when considering
// a candidate to spill
- RegRecord* farthestRefPhysRegRecord = nullptr;
- LsraLocation farthestLocation = MinLocation;
- LsraLocation refLocation = refPosition->nodeLocation;
- unsigned farthestRefPosWeight;
+ RegRecord* farthestRefPhysRegRecord = nullptr;
+ LsraLocation farthestLocation = MinLocation;
+ LsraLocation refLocation = refPosition->nodeLocation;
+ unsigned farthestRefPosWeight;
if (allocateIfProfitable)
{
// If allocating a reg is optional, we will consider those ref positions
- // whose weight is less than 'refPosition' for spilling.
+ // whose weight is less than 'refPosition' for spilling.
farthestRefPosWeight = getWeight(refPosition);
}
else
{
// If allocating a reg is a must, we start off with max weight so
- // that the first spill candidate will be selected based on
+ // that the first spill candidate will be selected based on
// farthest distance alone. Since we start off with farthestLocation
- // initialized to MinLocation, the first available ref position
+ // initialized to MinLocation, the first available ref position
// will be selected as spill candidate and its weight as the
// fathestRefPosWeight.
farthestRefPosWeight = BB_MAX_WEIGHT;
@@ -5433,8 +5412,11 @@ LinearScan::allocateBusyReg(Interval* current,
for (regNumber regNum : Registers(regType))
{
regMaskTP candidateBit = genRegMask(regNum);
- if (!(candidates & candidateBit)) continue;
- RegRecord * physRegRecord = getRegisterRecord(regNum);
+ if (!(candidates & candidateBit))
+ {
+ continue;
+ }
+ RegRecord* physRegRecord = getRegisterRecord(regNum);
if (physRegRecord->isBusyUntilNextKill)
{
@@ -5461,7 +5443,7 @@ LinearScan::allocateBusyReg(Interval* current,
// to remain live until the use, we should set the candidates to allRegs(regType)
// to avoid a spill - codegen can then insert the copy.
assert(candidates == candidateBit);
- physRegNextLocation = MaxLocation;
+ physRegNextLocation = MaxLocation;
farthestRefPosWeight = BB_MAX_WEIGHT;
}
else
@@ -5518,7 +5500,7 @@ LinearScan::allocateBusyReg(Interval* current,
RefPosition* nextAssignedRef = recentAssignedRef->nextRefPosition;
assert(nextAssignedRef != nullptr);
assert(nextAssignedRef->nodeLocation == refLocation ||
- (nextAssignedRef->nodeLocation + 1 == refLocation && nextAssignedRef->delayRegFree));
+ (nextAssignedRef->nodeLocation + 1 == refLocation && nextAssignedRef->delayRegFree));
}
}
continue;
@@ -5537,11 +5519,10 @@ LinearScan::allocateBusyReg(Interval* current,
continue;
}
- // If the current position has the candidate register marked to be delayed,
+ // If the current position has the candidate register marked to be delayed,
// check if the previous location is using this register, if that's the case we have to skip
// since we can't spill this register.
- if (recentAssignedRef->delayRegFree &&
- (refLocation == recentAssignedRef->nodeLocation + 1))
+ if (recentAssignedRef->delayRegFree && (refLocation == recentAssignedRef->nodeLocation + 1))
{
continue;
}
@@ -5581,8 +5562,8 @@ LinearScan::allocateBusyReg(Interval* current,
}
else
#endif
- // This if-stmt is associated with the above else
- if (recentAssignedRefWeight < farthestRefPosWeight)
+ // This if-stmt is associated with the above else
+ if (recentAssignedRefWeight < farthestRefPosWeight)
{
isBetterLocation = true;
}
@@ -5596,7 +5577,7 @@ LinearScan::allocateBusyReg(Interval* current,
// If allocateIfProfitable=true, the first spill candidate selected
// will be based on weight alone. After we have found a spill
- // candidate whose weight is less than the 'refPosition', we will
+ // candidate whose weight is less than the 'refPosition', we will
// consider farthest distance when there is a tie in weights.
// This is to ensure that we don't spill a ref position whose
// weight is equal to weight of 'refPosition'.
@@ -5617,10 +5598,9 @@ LinearScan::allocateBusyReg(Interval* current,
// Both weight and distance are equal.
// Prefer that ref position which is marked both reload and
// allocate if profitable. These ref positions don't need
- // need to be spilled as they are already in memory and
+ // need to be spilled as they are already in memory and
// codegen considers them as contained memory operands.
- isBetterLocation = (recentAssignedRef != nullptr) &&
- recentAssignedRef->reload &&
+ isBetterLocation = (recentAssignedRef != nullptr) && recentAssignedRef->reload &&
recentAssignedRef->AllocateIfProfitable();
}
else
@@ -5632,9 +5612,9 @@ LinearScan::allocateBusyReg(Interval* current,
if (isBetterLocation)
{
- farthestLocation = nextLocation;
+ farthestLocation = nextLocation;
farthestRefPhysRegRecord = physRegRecord;
- farthestRefPosWeight = recentAssignedRefWeight;
+ farthestRefPosWeight = recentAssignedRefWeight;
}
}
@@ -5643,14 +5623,12 @@ LinearScan::allocateBusyReg(Interval* current,
{
// There may not be a spill candidate or if one is found
// its weight must be less than the weight of 'refPosition'
- assert((farthestRefPhysRegRecord == nullptr) ||
- (farthestRefPosWeight < getWeight(refPosition)));
+ assert((farthestRefPhysRegRecord == nullptr) || (farthestRefPosWeight < getWeight(refPosition)));
}
- else
+ else
{
// Must have found a spill candidate.
- assert((farthestRefPhysRegRecord != nullptr) &&
- (farthestLocation > refLocation || refPosition->isFixedRegRef));
+ assert((farthestRefPhysRegRecord != nullptr) && (farthestLocation > refLocation || refPosition->isFixedRegRef));
}
#endif
@@ -5663,10 +5641,10 @@ LinearScan::allocateBusyReg(Interval* current,
}
else
{
- foundReg = REG_NA;
+ foundReg = REG_NA;
refPosition->registerAssignment = RBM_NONE;
}
-
+
return foundReg;
}
@@ -5678,26 +5656,25 @@ LinearScan::allocateBusyReg(Interval* current,
// Prefer a free register that's got the earliest next use.
// Otherwise, spill something with the farthest next use
//
-regNumber
-LinearScan::assignCopyReg(RefPosition * refPosition)
+regNumber LinearScan::assignCopyReg(RefPosition* refPosition)
{
- Interval * currentInterval = refPosition->getInterval();
+ Interval* currentInterval = refPosition->getInterval();
assert(currentInterval != nullptr);
assert(currentInterval->isActive);
- bool foundFreeReg = false;
- RegRecord * bestPhysReg = nullptr;
+ bool foundFreeReg = false;
+ RegRecord* bestPhysReg = nullptr;
LsraLocation bestLocation = MinLocation;
- regMaskTP candidates = refPosition->registerAssignment;
+ regMaskTP candidates = refPosition->registerAssignment;
// Save the relatedInterval, if any, so that it doesn't get modified during allocation.
- Interval* savedRelatedInterval = currentInterval->relatedInterval;
+ Interval* savedRelatedInterval = currentInterval->relatedInterval;
currentInterval->relatedInterval = nullptr;
// We don't want really want to change the default assignment,
// so 1) pretend this isn't active, and 2) remember the old reg
- regNumber oldPhysReg = currentInterval->physReg;
- RegRecord * oldRegRecord = currentInterval->assignedReg;
+ regNumber oldPhysReg = currentInterval->physReg;
+ RegRecord* oldRegRecord = currentInterval->assignedReg;
assert(oldRegRecord->regNum == oldPhysReg);
currentInterval->isActive = false;
@@ -5709,19 +5686,18 @@ LinearScan::assignCopyReg(RefPosition * refPosition)
// Now restore the old info
currentInterval->relatedInterval = savedRelatedInterval;
- currentInterval->physReg = oldPhysReg;
- currentInterval->assignedReg = oldRegRecord;
- currentInterval->isActive = true;
+ currentInterval->physReg = oldPhysReg;
+ currentInterval->assignedReg = oldRegRecord;
+ currentInterval->isActive = true;
refPosition->copyReg = true;
return allocatedReg;
}
-
-// Check if the interval is already assigned and if it is then unassign the physical record
+// Check if the interval is already assigned and if it is then unassign the physical record
// then set the assignedInterval to 'interval'
//
-void LinearScan::checkAndAssignInterval( RegRecord * regRec, Interval * interval)
+void LinearScan::checkAndAssignInterval(RegRecord* regRec, Interval* interval)
{
if (regRec->assignedInterval != nullptr && regRec->assignedInterval != interval)
{
@@ -5745,7 +5721,7 @@ void LinearScan::checkAndAssignInterval( RegRecord * regRec, Interval * interval
}
// Assign the given physical register interval to the given interval
-void LinearScan::assignPhysReg( RegRecord * regRec, Interval * interval)
+void LinearScan::assignPhysReg(RegRecord* regRec, Interval* interval)
{
regMaskTP assignedRegMask = genRegMask(regRec->regNum);
compiler->codeGen->regSet.rsSetRegsModified(assignedRegMask DEBUGARG(dumpTerse));
@@ -5756,14 +5732,14 @@ void LinearScan::assignPhysReg( RegRecord * regRec, Interval * interval)
#ifdef _TARGET_ARM_
if ((interval->registerType == TYP_DOUBLE) && isFloatRegType(regRec->registerType))
{
- regNumber nextRegNum = REG_NEXT(regRec->regNum);
- RegRecord * nextRegRec = getRegisterRecord(nextRegNum);
+ regNumber nextRegNum = REG_NEXT(regRec->regNum);
+ RegRecord* nextRegRec = getRegisterRecord(nextRegNum);
checkAndAssignInterval(nextRegRec, interval);
}
#endif // _TARGET_ARM_
- interval->physReg = regRec->regNum;
+ interval->physReg = regRec->regNum;
interval->isActive = true;
if (interval->isLocalVar)
{
@@ -5785,8 +5761,7 @@ void LinearScan::assignPhysReg( RegRecord * regRec, Interval * interval)
// Assumptions:
// fromRefPosition and toRefPosition must not be null
//
-void
-LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefPosition* toRefPosition)
+void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefPosition* toRefPosition)
{
assert(fromRefPosition != nullptr && toRefPosition != nullptr);
assert(fromRefPosition->getInterval() == interval && toRefPosition->getInterval() == interval);
@@ -5796,8 +5771,7 @@ LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefP
{
// If not allocated a register, Lcl var def/use ref positions even if reg optional
// should be marked as spillAfter.
- if (!fromRefPosition->RequiresRegister() &&
- !(interval->isLocalVar && fromRefPosition->IsActualRef()))
+ if (!fromRefPosition->RequiresRegister() && !(interval->isLocalVar && fromRefPosition->IsActualRef()))
{
fromRefPosition->registerAssignment = RBM_NONE;
}
@@ -5815,7 +5789,7 @@ LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefP
}
#endif // DEBUG
- interval->isActive = false;
+ interval->isActive = false;
interval->isSpilled = true;
// If fromRefPosition occurs before the beginning of this block, mark this as living in the stack
@@ -5828,7 +5802,6 @@ LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefP
}
}
-
//------------------------------------------------------------------------
// unassignPhysRegNoSpill: Unassign the given physical register record from
// an active interval, without spilling.
@@ -5848,7 +5821,7 @@ LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition, RefP
void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec)
{
- Interval * assignedInterval = regRec->assignedInterval;
+ Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr && assignedInterval->isActive);
assignedInterval->isActive = false;
unassignPhysReg(regRec, nullptr);
@@ -5856,7 +5829,7 @@ void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec)
}
//------------------------------------------------------------------------
-// checkAndClearInterval: Clear the assignedInterval for the given
+// checkAndClearInterval: Clear the assignedInterval for the given
// physical register record
//
// Arguments:
@@ -5870,9 +5843,9 @@ void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec)
// Assumptions:
// see unassignPhysReg
//
-void LinearScan::checkAndClearInterval( RegRecord * regRec, RefPosition* spillRefPosition)
+void LinearScan::checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition)
{
- Interval * assignedInterval = regRec->assignedInterval;
+ Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
regNumber thisRegNum = regRec->regNum;
@@ -5910,9 +5883,9 @@ void LinearScan::checkAndClearInterval( RegRecord * regRec, RefPosition* spillRe
// assigned to this register (e.g. this is a copyReg for that Interval).
// Otherwise, spillRefPosition must be associated with the assignedInterval.
//
-void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosition)
+void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition)
{
- Interval * assignedInterval = regRec->assignedInterval;
+ Interval* assignedInterval = regRec->assignedInterval;
assert(assignedInterval != nullptr);
checkAndClearInterval(regRec, spillRefPosition);
regNumber thisRegNum = regRec->regNum;
@@ -5920,8 +5893,8 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
#ifdef _TARGET_ARM_
if ((assignedInterval->registerType == TYP_DOUBLE) && isFloatRegType(regRec->registerType))
{
- regNumber nextRegNum = REG_NEXT(regRec->regNum);
- RegRecord * nextRegRec = getRegisterRecord(nextRegNum);
+ regNumber nextRegNum = REG_NEXT(regRec->regNum);
+ RegRecord* nextRegRec = getRegisterRecord(nextRegNum);
checkAndClearInterval(nextRegRec, spillRefPosition);
}
#endif // _TARGET_ARM_
@@ -5935,11 +5908,13 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
}
#endif // DEBUG
- RefPosition * nextRefPosition = nullptr;
- if (spillRefPosition != nullptr) nextRefPosition = spillRefPosition->nextRefPosition;
+ RefPosition* nextRefPosition = nullptr;
+ if (spillRefPosition != nullptr)
+ {
+ nextRefPosition = spillRefPosition->nextRefPosition;
+ }
- if (assignedInterval->physReg != REG_NA &&
- assignedInterval->physReg != thisRegNum)
+ if (assignedInterval->physReg != REG_NA && assignedInterval->physReg != thisRegNum)
{
// This must have been a temporary copy reg, but we can't assert that because there
// may have been intervening RefPositions that were not copyRegs.
@@ -5948,7 +5923,7 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
}
regNumber victimAssignedReg = assignedInterval->physReg;
- assignedInterval->physReg = REG_NA;
+ assignedInterval->physReg = REG_NA;
bool spill = assignedInterval->isActive && nextRefPosition != nullptr;
if (spill)
@@ -5984,15 +5959,12 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
// after use. While we could conceivably add special handling for this case in codegen,
// it would be messy and undesirably cause the "bleeding" of LSRA stress modes outside
// of LSRA.
- if (extendLifetimes() &&
- assignedInterval->isLocalVar &&
- RefTypeIsUse(spillRefPosition->refType) &&
- spillRefPosition->treeNode != nullptr &&
- (spillRefPosition->treeNode->gtFlags & GTF_VAR_DEATH) != 0)
+ if (extendLifetimes() && assignedInterval->isLocalVar && RefTypeIsUse(spillRefPosition->refType) &&
+ spillRefPosition->treeNode != nullptr && (spillRefPosition->treeNode->gtFlags & GTF_VAR_DEATH) != 0)
{
dumpLsraAllocationEvent(LSRA_EVENT_SPILL_EXTENDED_LIFETIME, assignedInterval);
assignedInterval->isActive = false;
- spill = false;
+ spill = false;
// If the spillRefPosition occurs before the beginning of this block, it will have
// been marked as living in this register on entry to this block, but we now need
// to mark this as living on the stack.
@@ -6016,15 +5988,14 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
{
spillInterval(assignedInterval, spillRefPosition, nextRefPosition);
}
- }
+ }
// Maintain the association with the interval, if it has more references.
// Or, if we "remembered" an interval assigned to this register, restore it.
if (nextRefPosition != nullptr)
{
assignedInterval->assignedReg = regRec;
}
- else if (regRec->previousInterval != nullptr &&
- regRec->previousInterval->assignedReg == regRec &&
+ else if (regRec->previousInterval != nullptr && regRec->previousInterval->assignedReg == regRec &&
regRec->previousInterval->getNextRefPosition() != nullptr)
{
regRec->assignedInterval = regRec->previousInterval;
@@ -6032,7 +6003,8 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
#ifdef DEBUG
if (spill)
{
- dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval, thisRegNum);
+ dumpLsraAllocationEvent(LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, regRec->assignedInterval,
+ thisRegNum);
}
else
{
@@ -6056,8 +6028,7 @@ void LinearScan::unassignPhysReg( RegRecord * regRec, RefPosition* spillRefPosit
// Return Value:
// None.
//
-void
-LinearScan::spillGCRefs(RefPosition* killRefPosition)
+void LinearScan::spillGCRefs(RefPosition* killRefPosition)
{
// For each physical register that can hold a GC type,
// if it is occupied by an interval of a GC type, spill that interval.
@@ -6066,11 +6037,10 @@ LinearScan::spillGCRefs(RefPosition* killRefPosition)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
candidateRegs &= ~nextRegBit;
- regNumber nextReg = genRegNumFromMask(nextRegBit);
- RegRecord* regRecord = getRegisterRecord(nextReg);
- Interval* assignedInterval = regRecord->assignedInterval;
- if (assignedInterval == nullptr ||
- (assignedInterval->isActive == false) ||
+ regNumber nextReg = genRegNumFromMask(nextRegBit);
+ RegRecord* regRecord = getRegisterRecord(nextReg);
+ Interval* assignedInterval = regRecord->assignedInterval;
+ if (assignedInterval == nullptr || (assignedInterval->isActive == false) ||
!varTypeIsGC(assignedInterval->registerType))
{
continue;
@@ -6093,8 +6063,7 @@ LinearScan::spillGCRefs(RefPosition* killRefPosition)
// Calls processBlockEndLocation() to set the outVarToRegMap, then gets the next block,
// and sets the inVarToRegMap appropriately.
-void
-LinearScan::processBlockEndAllocation(BasicBlock * currentBlock)
+void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr);
processBlockEndLocations(currentBlock);
@@ -6125,16 +6094,15 @@ LinearScan::processBlockEndAllocation(BasicBlock * currentBlock)
// The new register to use.
#ifdef DEBUG
-regNumber
-LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs)
+regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs)
{
if (targetReg != REG_STK && getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_ROTATE)
{
// If we're rotating the register locations at block boundaries, try to use
// the next higher register number of the appropriate register type.
regMaskTP candidateRegs = allRegs(interval->registerType) & availableRegs;
- regNumber firstReg = REG_NA;
- regNumber newReg = REG_NA;
+ regNumber firstReg = REG_NA;
+ regNumber newReg = REG_NA;
while (candidateRegs != RBM_NONE)
{
regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
@@ -6176,13 +6144,12 @@ LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, re
// determine the lclVar locations for the inVarToRegMap.
// During the resolution (write-back) pass, we only modify the inVarToRegMap in cases where
// a lclVar was spilled after the block had been completed.
-void
-LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocationPass)
+void LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocationPass)
{
- unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum;
- VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum);
- VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
- bool hasCriticalInEdge = blockInfo[currentBlock->bbNum].hasCriticalInEdge;
+ unsigned predBBNum = blockInfo[currentBlock->bbNum].predBBNum;
+ VarToRegMap predVarToRegMap = getOutVarToRegMap(predBBNum);
+ VarToRegMap inVarToRegMap = getInVarToRegMap(currentBlock->bbNum);
+ bool hasCriticalInEdge = blockInfo[currentBlock->bbNum].hasCriticalInEdge;
VARSET_TP VARSET_INIT_NOCOPY(liveIn, currentBlock->bbLiveIn);
#ifdef DEBUG
@@ -6203,15 +6170,15 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
{
continue;
}
- regNumber targetReg;
- Interval* interval = getIntervalForLocalVar(varNum);
+ regNumber targetReg;
+ Interval* interval = getIntervalForLocalVar(varNum);
RefPosition* nextRefPosition = interval->getNextRefPosition();
assert(nextRefPosition != nullptr);
if (allocationPass)
{
targetReg = predVarToRegMap[varIndex];
- INDEBUG(targetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs)));
+ INDEBUG(targetReg = rotateBlockStartLocation(interval, targetReg, (~liveRegs | inactiveRegs)));
inVarToRegMap[varIndex] = targetReg;
}
else // !allocationPass (i.e. resolution/write-back pass)
@@ -6246,7 +6213,7 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
{
// case #2 above.
inVarToRegMap[varIndex] = REG_STK;
- targetReg = REG_STK;
+ targetReg = REG_STK;
}
// Else case 2a. - retain targetReg.
}
@@ -6257,8 +6224,7 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
if (interval->isActive)
{
assert(targetReg != REG_STK);
- assert(interval->assignedReg != nullptr &&
- interval->assignedReg->regNum == targetReg &&
+ assert(interval->assignedReg != nullptr && interval->assignedReg->regNum == targetReg &&
interval->assignedReg->assignedInterval == interval);
liveRegs |= genRegMask(targetReg);
continue;
@@ -6268,10 +6234,10 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
{
// This can happen if we are using the locations from a basic block other than the
// immediately preceding one - where the variable was in a different location.
- if(targetReg != REG_STK)
+ if (targetReg != REG_STK)
{
// Unassign it from the register (it will get a new register below).
- if(interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
+ if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
interval->isActive = false;
unassignPhysReg(getRegisterRecord(interval->physReg), nullptr);
@@ -6304,8 +6270,8 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
liveRegs |= genRegMask(targetReg);
if (!interval->isActive)
{
- interval->isActive = true;
- interval->physReg = targetReg;
+ interval->isActive = true;
+ interval->physReg = targetReg;
interval->assignedReg = targetRegRecord;
}
Interval* assignedInterval = targetRegRecord->assignedInterval;
@@ -6321,7 +6287,8 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
// assigned to this register).
assignedInterval->isActive = false;
unassignPhysReg(targetRegRecord, nullptr);
- if (allocationPass && assignedInterval->isLocalVar && inVarToRegMap[assignedInterval->getVarIndex(compiler)] == targetReg)
+ if (allocationPass && assignedInterval->isLocalVar &&
+ inVarToRegMap[assignedInterval->getVarIndex(compiler)] == targetReg)
{
inVarToRegMap[assignedInterval->getVarIndex(compiler)] = REG_STK;
}
@@ -6334,8 +6301,7 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
}
assignPhysReg(targetRegRecord, interval);
}
- if (interval->recentRefPosition != nullptr &&
- !interval->recentRefPosition->copyReg &&
+ if (interval->recentRefPosition != nullptr && !interval->recentRefPosition->copyReg &&
interval->recentRefPosition->registerAssignment != genRegMask(targetReg))
{
interval->getNextRefPosition()->outOfOrder = true;
@@ -6348,8 +6314,8 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
{
if ((liveRegs & genRegMask(reg)) == 0)
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
- Interval * assignedInterval = physRegRecord->assignedInterval;
+ RegRecord* physRegRecord = getRegisterRecord(reg);
+ Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
@@ -6390,8 +6356,7 @@ LinearScan::processBlockStartLocations(BasicBlock* currentBlock, bool allocation
// at successor blocks during allocation time, but if lclVars are spilled after a block has been
// completed, we need to record the REG_STK location for those variables at resolution time.
-void
-LinearScan::processBlockEndLocations(BasicBlock * currentBlock)
+void LinearScan::processBlockEndLocations(BasicBlock* currentBlock)
{
assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum);
VarToRegMap outVarToRegMap = getOutVarToRegMap(curBBNum);
@@ -6407,7 +6372,7 @@ LinearScan::processBlockEndLocations(BasicBlock * currentBlock)
VARSET_ITER_INIT(compiler, iter, liveOut, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
Interval* interval = getIntervalForLocalVar(varNum);
if (interval->isActive)
{
@@ -6423,8 +6388,7 @@ LinearScan::processBlockEndLocations(BasicBlock * currentBlock)
}
#ifdef DEBUG
-void
-LinearScan::dumpRefPositions(const char *str)
+void LinearScan::dumpRefPositions(const char* str)
{
printf("------------\n");
printf("REFPOSITIONS %s: \n", str);
@@ -6436,10 +6400,9 @@ LinearScan::dumpRefPositions(const char *str)
}
#endif // DEBUG
-bool
-LinearScan::registerIsFree(regNumber regNum, RegisterType regType)
+bool LinearScan::registerIsFree(regNumber regNum, RegisterType regType)
{
- RegRecord * physRegRecord = getRegisterRecord(regNum);
+ RegRecord* physRegRecord = getRegisterRecord(regNum);
bool isFree = physRegRecord->isFree();
@@ -6473,10 +6436,9 @@ LinearScan::registerIsFree(regNumber regNum, RegisterType regType)
// defs remain), it will remain assigned to the physRegRecord. However, since
// it is marked inactive, the register will be available, albeit less desirable
// to allocate.
-void
-LinearScan::freeRegister(RegRecord* physRegRecord)
+void LinearScan::freeRegister(RegRecord* physRegRecord)
{
- Interval * assignedInterval = physRegRecord->assignedInterval;
+ Interval* assignedInterval = physRegRecord->assignedInterval;
// It may have already been freed by a "Kill"
if (assignedInterval != nullptr)
{
@@ -6485,7 +6447,7 @@ LinearScan::freeRegister(RegRecord* physRegRecord)
// don't unassign it until we need the register.
if (!assignedInterval->isConstant)
{
- RefPosition *nextRefPosition = assignedInterval->getNextRefPosition();
+ RefPosition* nextRefPosition = assignedInterval->getNextRefPosition();
// Unassign the register only if there are no more RefPositions, or the next
// one is a def. Note that the latter condition doesn't actually ensure that
// there aren't subsequent uses that could be reached by a def in the assigned
@@ -6500,12 +6462,13 @@ LinearScan::freeRegister(RegRecord* physRegRecord)
}
}
-void
-LinearScan::freeRegisters(regMaskTP regsToFree)
+void LinearScan::freeRegisters(regMaskTP regsToFree)
{
if (regsToFree == RBM_NONE)
+ {
return;
-
+ }
+
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_FREE_REGS));
while (regsToFree != RBM_NONE)
{
@@ -6520,22 +6483,20 @@ LinearScan::freeRegisters(regMaskTP regsToFree)
// constructed Intervals
// Loosely based on raAssignVars()
//
-void
-LinearScan::allocateRegisters()
+void LinearScan::allocateRegisters()
{
JITDUMP("*************** In LinearScan::allocateRegisters()\n");
DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters"));
-
// at start, nothing is active except for register args
for (auto& interval : intervals)
{
- Interval* currentInterval = &interval;
+ Interval* currentInterval = &interval;
currentInterval->recentRefPosition = nullptr;
- currentInterval->isActive = false;
+ currentInterval->isActive = false;
if (currentInterval->isLocalVar)
{
- LclVarDsc * varDsc = currentInterval->getLocalVar(compiler);
+ LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
if (varDsc->lvIsRegArg && currentInterval->firstRefPosition != nullptr)
{
currentInterval->isActive = true;
@@ -6546,7 +6507,7 @@ LinearScan::allocateRegisters()
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
getRegisterRecord(reg)->recentRefPosition = nullptr;
- getRegisterRecord(reg)->isActive = false;
+ getRegisterRecord(reg)->isActive = false;
}
#ifdef DEBUG
@@ -6567,11 +6528,11 @@ LinearScan::allocateRegisters()
}
#endif // DEBUG
- BasicBlock * currentBlock = nullptr;
+ BasicBlock* currentBlock = nullptr;
- LsraLocation prevLocation = MinLocation;
- regMaskTP regsToFree = RBM_NONE;
- regMaskTP delayRegsToFree = RBM_NONE;
+ LsraLocation prevLocation = MinLocation;
+ regMaskTP regsToFree = RBM_NONE;
+ regMaskTP delayRegsToFree = RBM_NONE;
// This is the most recent RefPosition for which a register was allocated
// - currently only used for DEBUG but maintained in non-debug, for clarity of code
@@ -6603,21 +6564,18 @@ LinearScan::allocateRegisters()
#endif // DEBUG
// This is the previousRefPosition of the current Referent, if any
- RefPosition *previousRefPosition = nullptr;
+ RefPosition* previousRefPosition = nullptr;
+
+ Interval* currentInterval = nullptr;
+ Referenceable* currentReferent = nullptr;
+ bool isInternalRef = false;
+ RefType refType = currentRefPosition->refType;
- Interval *currentInterval = nullptr;
- Referenceable *currentReferent = nullptr;
- bool isInternalRef = false;
- RefType refType = currentRefPosition->refType;
-
currentReferent = currentRefPosition->referent;
- if (spillAlways() &&
- lastAllocatedRefPosition != nullptr &&
- !lastAllocatedRefPosition->isPhysRegRef &&
+ if (spillAlways() && lastAllocatedRefPosition != nullptr && !lastAllocatedRefPosition->isPhysRegRef &&
!lastAllocatedRefPosition->getInterval()->isInternal &&
- (RefTypeIsDef(lastAllocatedRefPosition->refType) ||
- lastAllocatedRefPosition->getInterval()->isLocalVar))
+ (RefTypeIsDef(lastAllocatedRefPosition->refType) || lastAllocatedRefPosition->getInterval()->isLocalVar))
{
assert(lastAllocatedRefPosition->registerAssignment != RBM_NONE);
RegRecord* regRecord = lastAllocatedRefPosition->getInterval()->assignedReg;
@@ -6626,7 +6584,7 @@ LinearScan::allocateRegisters()
lastAllocatedRefPosition = nullptr;
}
- // We wait to free any registers until we've completed all the
+ // We wait to free any registers until we've completed all the
// uses for the current node.
// This avoids reusing registers too soon.
// We free before the last true def (after all the uses & internal
@@ -6642,12 +6600,14 @@ LinearScan::allocateRegisters()
bool doFreeRegs = false;
// Free at a new location, or at a basic block boundary
if (currentLocation > prevLocation || refType == RefTypeBB)
+ {
doFreeRegs = true;
+ }
if (doFreeRegs)
{
freeRegisters(regsToFree);
- regsToFree = delayRegsToFree;
+ regsToFree = delayRegsToFree;
delayRegsToFree = RBM_NONE;
}
}
@@ -6656,10 +6616,10 @@ LinearScan::allocateRegisters()
// get previous refposition, then current refpos is the new previous
if (currentReferent != nullptr)
{
- previousRefPosition = currentReferent->recentRefPosition;
+ previousRefPosition = currentReferent->recentRefPosition;
currentReferent->recentRefPosition = currentRefPosition;
- }
- else
+ }
+ else
{
assert((refType == RefTypeBB) || (refType == RefTypeKillGCRefs));
}
@@ -6675,14 +6635,14 @@ LinearScan::allocateRegisters()
{
// Free any delayed regs (now in regsToFree) before processing the block boundary
freeRegisters(regsToFree);
- regsToFree = RBM_NONE;
- handledBlockEnd = true;
+ regsToFree = RBM_NONE;
+ handledBlockEnd = true;
curBBStartLocation = currentRefPosition->nodeLocation;
if (currentBlock == nullptr)
{
currentBlock = startBlockSequence();
- }
- else
+ }
+ else
{
processBlockEndAllocation(currentBlock);
currentBlock = moveToNextBlock();
@@ -6728,7 +6688,8 @@ LinearScan::allocateRegisters()
if (refType == RefTypeFixedReg)
{
RegRecord* regRecord = currentRefPosition->getReg();
- if (regRecord->assignedInterval != nullptr && !regRecord->assignedInterval->isActive && regRecord->assignedInterval->isConstant)
+ if (regRecord->assignedInterval != nullptr && !regRecord->assignedInterval->isActive &&
+ regRecord->assignedInterval->isConstant)
{
regRecord->assignedInterval = nullptr;
}
@@ -6747,10 +6708,10 @@ LinearScan::allocateRegisters()
}
regNumber assignedRegister = REG_NA;
-
+
if (currentRefPosition->isIntervalRef())
{
- currentInterval = currentRefPosition->getInterval();
+ currentInterval = currentRefPosition->getInterval();
assignedRegister = currentInterval->physReg;
#if DEBUG
if (VERBOSE && !dumpTerse)
@@ -6761,19 +6722,19 @@ LinearScan::allocateRegisters()
// Identify the special cases where we decide up-front not to allocate
bool allocate = true;
- bool didDump = false;
+ bool didDump = false;
if (refType == RefTypeParamDef || refType == RefTypeZeroInit)
{
// For a ParamDef with a weighted refCount less than unity, don't enregister it at entry.
// TODO-CQ: Consider doing this only for stack parameters, since otherwise we may be needlessly
// inserting a store.
- LclVarDsc * varDsc = currentInterval->getLocalVar(compiler);
+ LclVarDsc* varDsc = currentInterval->getLocalVar(compiler);
assert(varDsc != nullptr);
if (refType == RefTypeParamDef && varDsc->lvRefCntWtd <= BB_UNITY_WEIGHT)
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_ENTRY_REG_ALLOCATED, currentInterval));
- didDump = true;
+ didDump = true;
allocate = false;
}
// If it has no actual references, mark it as "lastUse"; since they're not actually part
@@ -6826,10 +6787,10 @@ LinearScan::allocateRegisters()
// is after the kill of fixed reg but before putarg_reg's next use, fixed reg's
// kill would lead to spill of source but not the putarg_reg if it were treated
// as special.
- if (srcInterval->isActive &&
+ if (srcInterval->isActive &&
genRegMask(srcInterval->physReg) == currentRefPosition->registerAssignment &&
currentInterval->getNextRefLocation() == physRegRecord->getNextRefLocation())
- {
+ {
assert(physRegRecord->regNum == srcInterval->physReg);
// Special putarg_reg acts as a pass-thru since both source lcl var
@@ -6840,10 +6801,10 @@ LinearScan::allocateRegisters()
// tree node, before its use at call node it will lead to spill of
// lcl var instead of putarg_reg since physical reg record is pointing
// to lcl var's interval. As a result, arg reg would get trashed leading
- // to bad codegen. The assumption here is that source lcl var of a
+ // to bad codegen. The assumption here is that source lcl var of a
// special putarg_reg doesn't get spilled and re-allocated prior to
// its use at the call node. This is ensured by marking physical reg
- // record as busy until next kill.
+ // record as busy until next kill.
physRegRecord->isBusyUntilNextKill = true;
}
else
@@ -6854,11 +6815,12 @@ LinearScan::allocateRegisters()
// If this is still a SpecialPutArg, continue;
if (currentInterval->isSpecialPutArg)
{
- INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval, currentRefPosition->assignedReg()));
+ INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, currentInterval,
+ currentRefPosition->assignedReg()));
continue;
}
}
-
+
if (assignedRegister == REG_NA && RefTypeIsUse(refType))
{
currentRefPosition->reload = true;
@@ -6867,10 +6829,10 @@ LinearScan::allocateRegisters()
}
regMaskTP assignedRegBit = RBM_NONE;
- bool isInRegister = false;
+ bool isInRegister = false;
if (assignedRegister != REG_NA)
{
- isInRegister = true;
+ isInRegister = true;
assignedRegBit = genRegMask(assignedRegister);
if (!currentInterval->isActive)
{
@@ -6878,7 +6840,8 @@ LinearScan::allocateRegisters()
// was available for use so we kept the association.
if (RefTypeIsUse(refType))
{
- assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK && previousRefPosition->nodeLocation <= curBBStartLocation);
+ assert(inVarToRegMaps[curBBNum][currentInterval->getVarIndex(compiler)] == REG_STK &&
+ previousRefPosition->nodeLocation <= curBBStartLocation);
isInRegister = false;
}
else
@@ -6894,32 +6857,29 @@ LinearScan::allocateRegisters()
// If this is a physical register, we unconditionally assign it to itself!
if (currentRefPosition->isPhysRegRef)
{
- RegRecord *currentReg = currentRefPosition->getReg();
- Interval * assignedInterval = currentReg->assignedInterval;
+ RegRecord* currentReg = currentRefPosition->getReg();
+ Interval* assignedInterval = currentReg->assignedInterval;
if (assignedInterval != nullptr)
{
unassignPhysReg(currentReg, assignedInterval->recentRefPosition);
}
currentReg->isActive = true;
- assignedRegister = currentReg->regNum;
- assignedRegBit = genRegMask(assignedRegister);
+ assignedRegister = currentReg->regNum;
+ assignedRegBit = genRegMask(assignedRegister);
if (refType == RefTypeKill)
{
currentReg->isBusyUntilNextKill = false;
}
- }
+ }
else if (previousRefPosition != nullptr)
{
assert(previousRefPosition->nextRefPosition == currentRefPosition);
- assert(assignedRegister == REG_NA ||
- assignedRegBit == previousRefPosition->registerAssignment ||
- currentRefPosition->outOfOrder ||
- previousRefPosition->copyReg ||
- previousRefPosition->refType == RefTypeExpUse ||
- currentRefPosition->refType == RefTypeDummyDef);
- }
- else if (assignedRegister != REG_NA)
+ assert(assignedRegister == REG_NA || assignedRegBit == previousRefPosition->registerAssignment ||
+ currentRefPosition->outOfOrder || previousRefPosition->copyReg ||
+ previousRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef);
+ }
+ else if (assignedRegister != REG_NA)
{
// Handle the case where this is a preassigned register (i.e. parameter).
// We don't want to actually use the preassigned register if it's not
@@ -6929,24 +6889,24 @@ LinearScan::allocateRegisters()
// it might be beneficial to keep it in this reg for PART of the lifetime
if (currentInterval->isLocalVar)
{
- regMaskTP preferences = currentInterval->registerPreferences;
- bool keepAssignment = true;
- bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE;
+ regMaskTP preferences = currentInterval->registerPreferences;
+ bool keepAssignment = true;
+ bool matchesPreferences = (preferences & genRegMask(assignedRegister)) != RBM_NONE;
// Will the assigned register cover the lifetime? If not, does it at least
// meet the preferences for the next RefPosition?
- RegRecord *physRegRecord = getRegisterRecord(currentInterval->physReg);
- RefPosition * nextPhysRegRefPos = physRegRecord->getNextRefPosition();
+ RegRecord* physRegRecord = getRegisterRecord(currentInterval->physReg);
+ RefPosition* nextPhysRegRefPos = physRegRecord->getNextRefPosition();
if (nextPhysRegRefPos != nullptr &&
nextPhysRegRefPos->nodeLocation <= currentInterval->lastRefPosition->nodeLocation)
{
// Check to see if the existing assignment matches the preferences (e.g. callee save registers)
// and ensure that the next use of this localVar does not occur after the nextPhysRegRefPos
- // There must be a next RefPosition, because we know that the Interval extends beyond the nextPhysRegRefPos.
- RefPosition * nextLclVarRefPos = currentRefPosition->nextRefPosition;
+ // There must be a next RefPosition, because we know that the Interval extends beyond the
+ // nextPhysRegRefPos.
+ RefPosition* nextLclVarRefPos = currentRefPosition->nextRefPosition;
assert(nextLclVarRefPos != nullptr);
- if (!matchesPreferences ||
- nextPhysRegRefPos->nodeLocation < nextLclVarRefPos->nodeLocation ||
+ if (!matchesPreferences || nextPhysRegRefPos->nodeLocation < nextLclVarRefPos->nodeLocation ||
physRegRecord->conflictingFixedRegReference(nextLclVarRefPos))
{
keepAssignment = false;
@@ -6983,7 +6943,7 @@ LinearScan::allocateRegisters()
}
assignedRegister = REG_NA;
- assignedRegBit = RBM_NONE;
+ assignedRegBit = RBM_NONE;
}
}
}
@@ -7001,7 +6961,7 @@ LinearScan::allocateRegisters()
unassignPhysRegNoSpill(physRegRecord);
}
currentRefPosition->moveReg = true;
- assignedRegister = REG_NA;
+ assignedRegister = REG_NA;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_MOVE_REG, currentInterval, assignedRegister));
}
else if ((genRegMask(assignedRegister) & currentRefPosition->registerAssignment) != 0)
@@ -7016,22 +6976,20 @@ LinearScan::allocateRegisters()
{
currentReferent->isActive = true;
assert(getRegisterRecord(assignedRegister)->assignedInterval == currentInterval);
- }
- else
+ }
+ else
{
currentRefPosition->reload = true;
}
}
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, currentInterval, assignedRegister));
- }
+ }
else
{
// This must be a localVar or a single-reg fixed use or a tree temp with conflicting def & use.
- assert(currentInterval &&
- (currentInterval->isLocalVar ||
- currentRefPosition->isFixedRegRef ||
- currentInterval->hasConflictingDefUse));
+ assert(currentInterval && (currentInterval->isLocalVar || currentRefPosition->isFixedRegRef ||
+ currentInterval->hasConflictingDefUse));
// It's already in a register, but not one we need.
// If it is a fixed use that is not marked "delayRegFree", there is already a FixedReg to ensure that
@@ -7052,12 +7010,15 @@ LinearScan::allocateRegisters()
{
if (currentRefPosition->delayRegFree)
{
- INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval, assignedRegister));
- delayRegsToFree |= (genRegMask(assignedRegister) | currentRefPosition->registerAssignment);
+ INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE_DELAYED, currentInterval,
+ assignedRegister));
+ delayRegsToFree |=
+ (genRegMask(assignedRegister) | currentRefPosition->registerAssignment);
}
else
{
- INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister));
+ INDEBUG(
+ dumpLsraAllocationEvent(LSRA_EVENT_LAST_USE, currentInterval, assignedRegister));
regsToFree |= (genRegMask(assignedRegister) | currentRefPosition->registerAssignment);
}
}
@@ -7068,7 +7029,7 @@ LinearScan::allocateRegisters()
currentRefPosition->copyReg = false;
}
continue;
- }
+ }
else
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NEEDS_NEW_REG, nullptr, assignedRegister));
@@ -7086,7 +7047,7 @@ LinearScan::allocateRegisters()
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, assignedRegister));
}
}
- }
+ }
if (assignedRegister == REG_NA)
{
@@ -7095,8 +7056,7 @@ LinearScan::allocateRegisters()
if (currentRefPosition->AllocateIfProfitable())
{
// We can avoid allocating a register if it is a the last use requiring a reload.
- if (currentRefPosition->lastUse &&
- currentRefPosition->reload)
+ if (currentRefPosition->lastUse && currentRefPosition->reload)
{
allocateReg = false;
}
@@ -7126,7 +7086,7 @@ LinearScan::allocateRegisters()
{
// TODO-CQ: Determine whether copying to two integer callee-save registers would be profitable.
currentRefPosition->registerAssignment = (allRegs(TYP_FLOAT) & RBM_FLT_CALLEE_TRASH);
- assignedRegister = tryAllocateFreeReg(currentInterval, currentRefPosition);
+ assignedRegister = tryAllocateFreeReg(currentInterval, currentRefPosition);
// There MUST be caller-save registers available, because they have all just been killed.
assert(assignedRegister != REG_NA);
// Now, spill it.
@@ -7138,19 +7098,18 @@ LinearScan::allocateRegisters()
}
else
#endif // FEATURE_SIMD
- if (currentRefPosition->RequiresRegister() ||
- currentRefPosition->AllocateIfProfitable())
+ if (currentRefPosition->RequiresRegister() || currentRefPosition->AllocateIfProfitable())
{
if (allocateReg)
{
- assignedRegister = allocateBusyReg(currentInterval,
- currentRefPosition,
+ assignedRegister = allocateBusyReg(currentInterval, currentRefPosition,
currentRefPosition->AllocateIfProfitable());
}
if (assignedRegister != REG_NA)
{
- INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_SPILLED_REG, currentInterval, assignedRegister));
+ INDEBUG(
+ dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_SPILLED_REG, currentInterval, assignedRegister));
}
else
{
@@ -7159,7 +7118,7 @@ LinearScan::allocateRegisters()
noway_assert(currentRefPosition->AllocateIfProfitable());
currentRefPosition->registerAssignment = RBM_NONE;
- currentRefPosition->reload = false;
+ currentRefPosition->reload = false;
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
}
@@ -7168,7 +7127,7 @@ LinearScan::allocateRegisters()
{
INDEBUG(dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, currentInterval));
currentRefPosition->registerAssignment = RBM_NONE;
- currentInterval->isActive = false;
+ currentInterval->isActive = false;
}
}
#ifdef DEBUG
@@ -7176,8 +7135,7 @@ LinearScan::allocateRegisters()
{
if (VERBOSE)
{
- if (currentInterval->isConstant &&
- (currentRefPosition->treeNode != nullptr) &&
+ if (currentInterval->isConstant && (currentRefPosition->treeNode != nullptr) &&
currentRefPosition->treeNode->IsReuseRegVal())
{
dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, assignedRegister, currentBlock);
@@ -7204,12 +7162,11 @@ LinearScan::allocateRegisters()
}
// If we allocated a register, record it
- if (currentInterval != nullptr &&
- assignedRegister != REG_NA)
+ if (currentInterval != nullptr && assignedRegister != REG_NA)
{
- assignedRegBit = genRegMask(assignedRegister);
+ assignedRegBit = genRegMask(assignedRegister);
currentRefPosition->registerAssignment = assignedRegBit;
- currentInterval->physReg = assignedRegister;
+ currentInterval->physReg = assignedRegister;
regsToFree &= ~assignedRegBit; // we'll set it again later if it's dead
// If this interval is dead, free the register.
@@ -7220,8 +7177,7 @@ LinearScan::allocateRegisters()
{
assert(currentRefPosition->isIntervalRef());
- if (refType != RefTypeExpUse &&
- currentRefPosition->nextRefPosition == nullptr)
+ if (refType != RefTypeExpUse && currentRefPosition->nextRefPosition == nullptr)
{
if (currentRefPosition->delayRegFree)
{
@@ -7251,21 +7207,21 @@ LinearScan::allocateRegisters()
if (getLsraExtendLifeTimes())
{
// If we have extended lifetimes, we need to make sure all the registers are freed.
- for( int regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++)
+ for (int regNumIndex = 0; regNumIndex <= REG_FP_LAST; regNumIndex++)
{
RegRecord& regRecord = physRegs[regNumIndex];
- Interval* interval = regRecord.assignedInterval;
+ Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
interval->isActive = false;
- unassignPhysReg( &regRecord, nullptr);
+ unassignPhysReg(&regRecord, nullptr);
}
}
}
else
#endif // DEBUG
{
- freeRegisters(regsToFree|delayRegsToFree);
+ freeRegisters(regsToFree | delayRegsToFree);
}
#ifdef DEBUG
@@ -7287,7 +7243,7 @@ LinearScan::allocateRegisters()
// We COULD just reuse the intervalIter from above, but ArrayListIterator doesn't
// provide a Reset function (!) - we'll probably replace this so don't bother
// adding it
-
+
for (auto& interval : intervals)
{
if (interval.isActive)
@@ -7309,7 +7265,7 @@ LinearScan::allocateRegisters()
// Arguments:
// treeNode: The lclVar that's being resolved
// currentRefPosition: the RefPosition associated with the treeNode
-//
+//
// Details:
// This method is called for each local reference, during the resolveRegisters
// phase of LSRA. It is responsible for keeping the following in sync:
@@ -7341,28 +7297,30 @@ LinearScan::allocateRegisters()
// NICE: Consider tracking whether an Interval is always in the same location (register/stack)
// in which case it will require no resolution.
//
-void
-LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPosition)
+void LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition* currentRefPosition)
{
// Is this a tracked local? Or just a register allocated for loading
// a non-tracked one?
- Interval * interval = currentRefPosition->getInterval();
- if (!interval->isLocalVar) return;
+ Interval* interval = currentRefPosition->getInterval();
+ if (!interval->isLocalVar)
+ {
+ return;
+ }
interval->recentRefPosition = currentRefPosition;
- LclVarDsc * varDsc = interval->getLocalVar(compiler);
+ LclVarDsc* varDsc = interval->getLocalVar(compiler);
if (currentRefPosition->registerAssignment == RBM_NONE)
{
assert(!currentRefPosition->RequiresRegister());
interval->isSpilled = true;
- varDsc->lvRegNum = REG_STK;
+ varDsc->lvRegNum = REG_STK;
if (interval->assignedReg != nullptr && interval->assignedReg->assignedInterval == interval)
{
interval->assignedReg->assignedInterval = nullptr;
}
interval->assignedReg = nullptr;
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
return;
}
@@ -7371,7 +7329,7 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
// The exception is the copyReg case, where we've assigned a register
// for a specific purpose, but will be keeping the register assignment
regNumber assignedReg = currentRefPosition->assignedReg();
- regNumber homeReg = assignedReg;
+ regNumber homeReg = assignedReg;
// Undo any previous association with a physical register, UNLESS this
// is a copyReg
@@ -7380,7 +7338,7 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
regNumber oldAssignedReg = interval->physReg;
if (oldAssignedReg != REG_NA && assignedReg != oldAssignedReg)
{
- RegRecord * oldRegRecord = getRegisterRecord(oldAssignedReg);
+ RegRecord* oldRegRecord = getRegisterRecord(oldAssignedReg);
if (oldRegRecord->assignedInterval == interval)
{
oldRegRecord->assignedInterval = nullptr;
@@ -7398,7 +7356,7 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
}
}
- bool reload = currentRefPosition->reload;
+ bool reload = currentRefPosition->reload;
bool spillAfter = currentRefPosition->spillAfter;
// In the reload case we simply do not set GTF_REG_VAL, and it gets
@@ -7407,7 +7365,10 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
if (reload && currentRefPosition->refType != RefTypeDef)
{
varDsc->lvRegNum = REG_STK;
- if (!spillAfter) interval->physReg = assignedReg;
+ if (!spillAfter)
+ {
+ interval->physReg = assignedReg;
+ }
// If there is no treeNode, this must be a RefTypeExpUse, in
// which case we did the reload already
@@ -7426,7 +7387,7 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
// it as a contained memory operand.
//
// Note that varDsc->lvRegNum is already to REG_STK above.
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
treeNode->gtRegNum = REG_NA;
treeNode->gtFlags &= ~GTF_SPILLED;
}
@@ -7463,12 +7424,14 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
// stack. However, we need to remember that it was spilled.
interval->isSpilled = true;
- varDsc->lvRegNum = REG_STK;
- interval->physReg = REG_NA;
+ varDsc->lvRegNum = REG_STK;
+ interval->physReg = REG_NA;
if (treeNode != nullptr)
+ {
treeNode->gtRegNum = REG_NA;
- }
- else
+ }
+ }
+ else
{
// Not reload and Not pure-def that's spillAfter
@@ -7499,13 +7462,13 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
if (!currentRefPosition->isFixedRegRef || currentRefPosition->moveReg)
{
// This is the second case, where we need to generate a copy
- insertCopyOrReload(treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition);
+ insertCopyOrReload(treeNode, currentRefPosition->getMultiRegIdx(), currentRefPosition);
}
}
else
{
interval->physReg = assignedReg;
-
+
if (!interval->isSpilled && !interval->isSplit)
{
if (varDsc->lvRegNum != REG_STK)
@@ -7516,7 +7479,7 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
if (varDsc->lvRegNum != assignedReg)
{
interval->isSplit = TRUE;
- varDsc->lvRegNum = REG_STK;
+ varDsc->lvRegNum = REG_STK;
}
}
else
@@ -7527,50 +7490,53 @@ LinearScan::resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPositio
}
if (spillAfter)
{
- if (treeNode != nullptr) treeNode->gtFlags |= GTF_SPILL;
+ if (treeNode != nullptr)
+ {
+ treeNode->gtFlags |= GTF_SPILL;
+ }
interval->isSpilled = true;
- interval->physReg = REG_NA;
- varDsc->lvRegNum = REG_STK;
+ interval->physReg = REG_NA;
+ varDsc->lvRegNum = REG_STK;
}
// This value is in a register, UNLESS we already saw this treeNode
// and marked it for reload
if (treeNode != nullptr && !(treeNode->gtFlags & GTF_SPILLED))
+ {
treeNode->gtFlags |= GTF_REG_VAL;
+ }
}
// Update the physRegRecord for the register, so that we know what vars are in
// regs at the block boundaries
- RegRecord * physRegRecord = getRegisterRecord(homeReg);
+ RegRecord* physRegRecord = getRegisterRecord(homeReg);
if (spillAfter || currentRefPosition->lastUse)
{
physRegRecord->assignedInterval = nullptr;
- interval->assignedReg = nullptr;
- interval->physReg = REG_NA;
- interval->isActive = false;
+ interval->assignedReg = nullptr;
+ interval->physReg = REG_NA;
+ interval->isActive = false;
}
else
{
- interval->isActive = true;
+ interval->isActive = true;
physRegRecord->assignedInterval = interval;
- interval->assignedReg = physRegRecord;
+ interval->assignedReg = physRegRecord;
}
}
-
-void
-LinearScan::writeRegisters(RefPosition *currentRefPosition, GenTree *tree)
+void LinearScan::writeRegisters(RefPosition* currentRefPosition, GenTree* tree)
{
lsraAssignRegToTree(tree, currentRefPosition->assignedReg(), currentRefPosition->getMultiRegIdx());
}
-//------------------------------------------------------------------------
+//------------------------------------------------------------------------
// insertCopyOrReload: Insert a copy in the case where a tree node value must be moved
// to a different register at the point of use (GT_COPY), or it is reloaded to a different register
// than the one it was spilled from (GT_RELOAD).
//
-// Arguments:
-// tree - This is the node to copy or reload.
+// Arguments:
+// tree - This is the node to copy or reload.
// Insert copy or reload node between this node and its parent.
// multiRegIdx - register position of tree node for which copy or reload is needed.
// refPosition - The RefPosition at which copy or reload will take place.
@@ -7579,7 +7545,7 @@ LinearScan::writeRegisters(RefPosition *currentRefPosition, GenTree *tree)
// The GT_COPY or GT_RELOAD will be inserted in the proper spot in execution order where the reload is to occur.
//
// For example, for this tree (numbers are execution order, lower is earlier and higher is later):
-//
+//
// +---------+----------+
// | GT_ADD (3) |
// +---------+----------+
@@ -7590,9 +7556,9 @@ LinearScan::writeRegisters(RefPosition *currentRefPosition, GenTree *tree)
// +-------------------+ +----------------------+
// | x (1) | "tree" | y (2) |
// +-------------------+ +----------------------+
-//
+//
// generate this tree:
-//
+//
// +---------+----------+
// | GT_ADD (4) |
// +---------+----------+
@@ -7607,7 +7573,7 @@ LinearScan::writeRegisters(RefPosition *currentRefPosition, GenTree *tree)
// +-------------------+
// | x (1) | "tree"
// +-------------------+
-//
+//
// Note in particular that the GT_RELOAD node gets inserted in execution order immediately before the parent of "tree",
// which seems a bit weird since normally a node's parent (in this case, the parent of "x", GT_RELOAD in the "after"
// picture) immediately follows all of its children (that is, normally the execution ordering is postorder).
@@ -7620,14 +7586,13 @@ LinearScan::writeRegisters(RefPosition *currentRefPosition, GenTree *tree)
// and the unspilling code automatically reuses the same register, and does the reload when it notices that flag
// when considering a node's operands.
//
-void
-LinearScan::insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPosition* refPosition)
-{
+void LinearScan::insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPosition* refPosition)
+{
GenTreePtr* parentChildPointer = nullptr;
- GenTreePtr parent = tree->gtGetParent(&parentChildPointer);
+ GenTreePtr parent = tree->gtGetParent(&parentChildPointer);
noway_assert(parent != nullptr && parentChildPointer != nullptr);
- genTreeOps oper;
+ genTreeOps oper;
if (refPosition->reload)
{
oper = GT_RELOAD;
@@ -7645,21 +7610,21 @@ LinearScan::insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPositio
//
// In this case set the ith position reg of reload/copy node to the reg allocated
// for copy/reload refPosition. Essentially a copy/reload node will have a reg
- // for each multi-reg position of its child. If there is a valid reg in ith
+ // for each multi-reg position of its child. If there is a valid reg in ith
// position of GT_COPY or GT_RELOAD node then the corresponding result of its
// child needs to be copied or reloaded to that reg.
if (parent->IsCopyOrReload())
{
noway_assert(parent->OperGet() == oper);
noway_assert(tree->IsMultiRegCall());
- GenTreeCall* call = tree->AsCall();
+ GenTreeCall* call = tree->AsCall();
GenTreeCopyOrReload* copyOrReload = parent->AsCopyOrReload();
noway_assert(copyOrReload->GetRegNumByIdx(multiRegIdx) == REG_NA);
copyOrReload->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
}
else
{
- // Create the new node, with "tree" as its only child.
+ // Create the new node, with "tree" as its only child.
var_types treeType = tree->TypeGet();
#ifdef FEATURE_SIMD
@@ -7676,11 +7641,11 @@ LinearScan::insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPositio
}
#endif // FEATURE_SIMD
- GenTreeCopyOrReload* newNode = new(compiler, oper) GenTreeCopyOrReload(oper, treeType, tree);
+ GenTreeCopyOrReload* newNode = new (compiler, oper) GenTreeCopyOrReload(oper, treeType, tree);
assert(refPosition->registerAssignment != RBM_NONE);
newNode->CopyCosts(tree);
newNode->SetRegNumByIdx(refPosition->assignedReg(), multiRegIdx);
- newNode->gtLsraInfo.isLsraAdded = true;
+ newNode->gtLsraInfo.isLsraAdded = true;
newNode->gtLsraInfo.isLocalDefUse = false;
if (refPosition->copyReg)
{
@@ -7699,33 +7664,32 @@ LinearScan::insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPositio
}
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
-//------------------------------------------------------------------------
+//------------------------------------------------------------------------
// insertUpperVectorSaveAndReload: Insert code to save and restore the upper half of a vector that lives
// in a callee-save register at the point of a kill (the upper half is
// not preserved).
//
-// Arguments:
+// Arguments:
// tree - This is the node around which we will insert the Save & Reload.
// It will be a call or some node that turns into a call.
// refPosition - The RefTypeUpperVectorSaveDef RefPosition.
//
-void
-LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosition, BasicBlock* block)
+void LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosition, BasicBlock* block)
{
Interval* lclVarInterval = refPosition->getInterval()->relatedInterval;
assert(lclVarInterval->isLocalVar == true);
- LclVarDsc * varDsc = compiler->lvaTable + lclVarInterval->varNum;
+ LclVarDsc* varDsc = compiler->lvaTable + lclVarInterval->varNum;
assert(varDsc->lvType == LargeVectorType);
regNumber lclVarReg = lclVarInterval->physReg;
if (lclVarReg == REG_NA)
{
return;
}
-
+
assert((genRegMask(lclVarReg) & RBM_FLT_CALLEE_SAVED) != RBM_NONE);
- regNumber spillReg = refPosition->assignedReg();
- bool spillToMem = refPosition->spillAfter;
+ regNumber spillReg = refPosition->assignedReg();
+ bool spillToMem = refPosition->spillAfter;
// We will insert the save before the statement containing 'tree', and the restore after it.
// They will each be inserted as embedded statements.
@@ -7745,15 +7709,17 @@ LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosi
// First, insert the save as an embedded statement before the call.
- GenTreePtr saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, LargeVectorType);
+ GenTreePtr saveLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, LargeVectorType);
saveLcl->gtLsraInfo.isLsraAdded = true;
- saveLcl->gtRegNum = lclVarReg;
+ saveLcl->gtRegNum = lclVarReg;
saveLcl->gtFlags |= GTF_REG_VAL;
saveLcl->gtLsraInfo.isLocalDefUse = false;
- GenTreeSIMD* simdNode = new (compiler, GT_SIMD) GenTreeSIMD(LargeVectorSaveType, saveLcl, nullptr, SIMDIntrinsicUpperSave, varDsc->lvBaseType, genTypeSize(LargeVectorType));
+ GenTreeSIMD* simdNode =
+ new (compiler, GT_SIMD) GenTreeSIMD(LargeVectorSaveType, saveLcl, nullptr, SIMDIntrinsicUpperSave,
+ varDsc->lvBaseType, genTypeSize(LargeVectorType));
simdNode->gtLsraInfo.isLsraAdded = true;
- simdNode->gtRegNum = spillReg;
+ simdNode->gtRegNum = spillReg;
if (spillToMem)
{
simdNode->gtFlags |= GTF_SPILL;
@@ -7762,22 +7728,22 @@ LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosi
// Now insert the restore after the call.
- GenTreePtr restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, LargeVectorType);
+ GenTreePtr restoreLcl = compiler->gtNewLclvNode(lclVarInterval->varNum, LargeVectorType);
restoreLcl->gtLsraInfo.isLsraAdded = true;
- restoreLcl->gtRegNum = lclVarReg;
+ restoreLcl->gtRegNum = lclVarReg;
restoreLcl->gtFlags |= GTF_REG_VAL;
restoreLcl->gtLsraInfo.isLocalDefUse = false;
- simdNode = new (compiler, GT_SIMD) GenTreeSIMD(LargeVectorType, restoreLcl, nullptr, SIMDIntrinsicUpperRestore, varDsc->lvBaseType, 32);
+ simdNode = new (compiler, GT_SIMD)
+ GenTreeSIMD(LargeVectorType, restoreLcl, nullptr, SIMDIntrinsicUpperRestore, varDsc->lvBaseType, 32);
simdNode->gtLsraInfo.isLsraAdded = true;
- simdNode->gtRegNum = spillReg;
+ simdNode->gtRegNum = spillReg;
if (spillToMem)
{
simdNode->gtFlags |= GTF_SPILLED;
}
compiler->fgInsertTreeAfterAsEmbedded(simdNode, tree, stmt->AsStmt(), block);
-
}
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
@@ -7796,14 +7762,13 @@ LinearScan::insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosi
// Assumptions:
// This is called before any calls to updateMaxSpill().
-void
-LinearScan::initMaxSpill()
+void LinearScan::initMaxSpill()
{
needDoubleTmpForFPCall = false;
- needFloatTmpForFPCall = false;
- for (int i=0; i < TYP_COUNT; i++)
+ needFloatTmpForFPCall = false;
+ for (int i = 0; i < TYP_COUNT; i++)
{
- maxSpill[i] = 0;
+ maxSpill[i] = 0;
currentSpill[i] = 0;
}
}
@@ -7822,8 +7787,7 @@ LinearScan::initMaxSpill()
// This is called after updateMaxSpill() has been called for all "real"
// RefPositions.
-void
-LinearScan::recordMaxSpill()
+void LinearScan::recordMaxSpill()
{
// Note: due to the temp normalization process (see tmpNormalizeType)
// only a few types should actually be seen here.
@@ -7876,21 +7840,19 @@ LinearScan::recordMaxSpill()
// phase of LSRA. It keeps track of how many concurrently-live
// spills there are, and the largest number seen so far.
-void
-LinearScan::updateMaxSpill(RefPosition* refPosition)
+void LinearScan::updateMaxSpill(RefPosition* refPosition)
{
RefType refType = refPosition->refType;
- if (refPosition->spillAfter ||
- refPosition->reload ||
+ if (refPosition->spillAfter || refPosition->reload ||
(refPosition->AllocateIfProfitable() && refPosition->assignedReg() == REG_NA))
{
Interval* interval = refPosition->getInterval();
if (!interval->isLocalVar)
{
// The tmp allocation logic 'normalizes' types to a small number of
- // types that need distinct stack locations from each other.
- // Those types are currently gc refs, byrefs, <= 4 byte non-GC items,
+ // types that need distinct stack locations from each other.
+ // Those types are currently gc refs, byrefs, <= 4 byte non-GC items,
// 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors.
// LSRA is agnostic to those choices but needs
// to know what they are here.
@@ -7917,7 +7879,7 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
if (treeNode->IsMultiRegCall())
{
ReturnTypeDesc* retTypeDesc = treeNode->AsCall()->GetReturnTypeDesc();
- typ = retTypeDesc->GetReturnRegType(refPosition->getMultiRegIdx());
+ typ = retTypeDesc->GetReturnRegType(refPosition->getMultiRegIdx());
}
else
{
@@ -7939,8 +7901,7 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
assert(currentSpill[typ] > 0);
currentSpill[typ]--;
}
- else if (refPosition->AllocateIfProfitable() &&
- refPosition->assignedReg() == REG_NA)
+ else if (refPosition->AllocateIfProfitable() && refPosition->assignedReg() == REG_NA)
{
// A spill temp not getting reloaded into a reg because it is
// marked as allocate if profitable and getting used from its
@@ -7958,8 +7919,7 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
// This is the final phase of register allocation. It writes the register assignments to
// the tree, and performs resolution across joins and backedges.
//
-void
-LinearScan::resolveRegisters()
+void LinearScan::resolveRegisters()
{
// Iterate over the tree and the RefPositions in lockstep
// - annotate the tree with register assignments by setting gtRegNum or gtRegPair (for longs)
@@ -7981,31 +7941,29 @@ LinearScan::resolveRegisters()
// At each branch, we identify the location of each liveOut interval, and check
// against the RefPositions at the target.
- BasicBlock * block;
+ BasicBlock* block;
LsraLocation currentLocation = MinLocation;
// Clear register assignments - these will be reestablished as lclVar defs (including RefTypeParamDefs)
// are encountered.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
- Interval * assignedInterval = physRegRecord->assignedInterval;
+ RegRecord* physRegRecord = getRegisterRecord(reg);
+ Interval* assignedInterval = physRegRecord->assignedInterval;
if (assignedInterval != nullptr)
{
assignedInterval->assignedReg = nullptr;
- assignedInterval->physReg = REG_NA;
+ assignedInterval->physReg = REG_NA;
}
- physRegRecord->assignedInterval = nullptr;
+ physRegRecord->assignedInterval = nullptr;
physRegRecord->recentRefPosition = nullptr;
}
// Clear "recentRefPosition" for lclVar intervals
- for (unsigned lclNum = 0;
- lclNum < compiler->lvaCount;
- lclNum++)
+ for (unsigned lclNum = 0; lclNum < compiler->lvaCount; lclNum++)
{
localVarIntervals[lclNum]->recentRefPosition = nullptr;
- localVarIntervals[lclNum]->isActive = false;
+ localVarIntervals[lclNum]->isActive = false;
}
// handle incoming arguments and special temps
@@ -8015,11 +7973,11 @@ LinearScan::resolveRegisters()
while (currentRefPosition != refPositions.end() &&
(currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit))
{
- Interval * interval = currentRefPosition->getInterval();
+ Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
resolveLocalRef(nullptr, currentRefPosition);
- regNumber reg = REG_STK;
- int varIndex = interval->getVarIndex(compiler);
+ regNumber reg = REG_STK;
+ int varIndex = interval->getVarIndex(compiler);
if (!currentRefPosition->spillAfter && currentRefPosition->registerAssignment != RBM_NONE)
{
@@ -8027,7 +7985,7 @@ LinearScan::resolveRegisters()
}
else
{
- reg = REG_STK;
+ reg = REG_STK;
interval->isActive = false;
}
entryVarToRegMap[varIndex] = reg;
@@ -8038,13 +7996,11 @@ LinearScan::resolveRegisters()
JITDUMP("WRITING BACK ASSIGNMENTS\n");
JITDUMP("------------------------\n");
- BasicBlock * insertionBlock = compiler->fgFirstBB;
- GenTreePtr insertionPoint = insertionBlock->FirstNonPhiDef();
+ BasicBlock* insertionBlock = compiler->fgFirstBB;
+ GenTreePtr insertionPoint = insertionBlock->FirstNonPhiDef();
// write back assignments
- for( block = startBlockSequence();
- block != nullptr;
- block = moveToNextBlock())
+ for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
assert(curBBNum == block->bbNum);
@@ -8066,14 +8022,13 @@ LinearScan::resolveRegisters()
}
// Handle the DummyDefs, updating the incoming var location.
- for ( ;
- currentRefPosition != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef;
- ++currentRefPosition)
+ for (; currentRefPosition != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef;
+ ++currentRefPosition)
{
assert(currentRefPosition->isIntervalRef());
// Don't mark dummy defs as reload
currentRefPosition->reload = false;
- resolveLocalRef(NULL, currentRefPosition);
+ resolveLocalRef(nullptr, currentRefPosition);
regNumber reg;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
@@ -8081,7 +8036,7 @@ LinearScan::resolveRegisters()
}
else
{
- reg = REG_STK;
+ reg = REG_STK;
currentRefPosition->getInterval()->isActive = false;
}
setInVarRegForBB(curBBNum, currentRefPosition->getInterval()->varNum, reg);
@@ -8093,9 +8048,9 @@ LinearScan::resolveRegisters()
++currentRefPosition;
// Handle the RefPositions for the block
- for ( ;
- currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB && currentRefPosition->refType != RefTypeDummyDef;
- ++currentRefPosition)
+ for (; currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB &&
+ currentRefPosition->refType != RefTypeDummyDef;
+ ++currentRefPosition)
{
currentLocation = currentRefPosition->nodeLocation;
JITDUMP("current : ");
@@ -8110,41 +8065,42 @@ LinearScan::resolveRegisters()
switch (currentRefPosition->refType)
{
#ifdef FEATURE_SIMD
- case RefTypeUpperVectorSaveUse:
- case RefTypeUpperVectorSaveDef:
+ case RefTypeUpperVectorSaveUse:
+ case RefTypeUpperVectorSaveDef:
#endif // FEATURE_SIMD
- case RefTypeUse:
- case RefTypeDef:
- // These are the ones we're interested in
- break;
- case RefTypeKill:
- case RefTypeFixedReg:
- // These require no handling at resolution time
- assert(currentRefPosition->referent != nullptr);
- currentRefPosition->referent->recentRefPosition = currentRefPosition;
- continue;
- case RefTypeExpUse:
- // Ignore the ExpUse cases - a RefTypeExpUse would only exist if the
- // variable is dead at the entry to the next block. So we'll mark
- // it as in its current location and resolution will take care of any
- // mismatch.
- assert(getNextBlock() == nullptr ||
- !VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn, currentRefPosition->getInterval()->getVarIndex(compiler)));
- currentRefPosition->referent->recentRefPosition = currentRefPosition;
- continue;
- case RefTypeKillGCRefs:
- // No action to take at resolution time, and no interval to update recentRefPosition for.
- continue;
- case RefTypeDummyDef:
- case RefTypeParamDef:
- case RefTypeZeroInit:
+ case RefTypeUse:
+ case RefTypeDef:
+ // These are the ones we're interested in
+ break;
+ case RefTypeKill:
+ case RefTypeFixedReg:
+ // These require no handling at resolution time
+ assert(currentRefPosition->referent != nullptr);
+ currentRefPosition->referent->recentRefPosition = currentRefPosition;
+ continue;
+ case RefTypeExpUse:
+ // Ignore the ExpUse cases - a RefTypeExpUse would only exist if the
+ // variable is dead at the entry to the next block. So we'll mark
+ // it as in its current location and resolution will take care of any
+ // mismatch.
+ assert(getNextBlock() == nullptr ||
+ !VarSetOps::IsMember(compiler, getNextBlock()->bbLiveIn,
+ currentRefPosition->getInterval()->getVarIndex(compiler)));
+ currentRefPosition->referent->recentRefPosition = currentRefPosition;
+ continue;
+ case RefTypeKillGCRefs:
+ // No action to take at resolution time, and no interval to update recentRefPosition for.
+ continue;
+ case RefTypeDummyDef:
+ case RefTypeParamDef:
+ case RefTypeZeroInit:
// Should have handled all of these already
- default:
- unreached();
- break;
+ default:
+ unreached();
+ break;
}
updateMaxSpill(currentRefPosition);
- GenTree *treeNode = currentRefPosition->treeNode;
+ GenTree* treeNode = currentRefPosition->treeNode;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if (currentRefPosition->refType == RefTypeUpperVectorSaveDef)
@@ -8170,14 +8126,12 @@ LinearScan::resolveRegisters()
// This is either a use, a dead def, or a field of a struct
Interval* interval = currentRefPosition->getInterval();
assert(currentRefPosition->refType == RefTypeUse ||
- currentRefPosition->registerAssignment == RBM_NONE ||
- interval->isStructField);
+ currentRefPosition->registerAssignment == RBM_NONE || interval->isStructField);
// TODO-Review: Need to handle the case where any of the struct fields
// are reloaded/spilled at this use
assert(!interval->isStructField ||
- (currentRefPosition->reload == false &&
- currentRefPosition->spillAfter == false));
+ (currentRefPosition->reload == false && currentRefPosition->spillAfter == false));
if (interval->isLocalVar && !interval->isStructField)
{
@@ -8199,10 +8153,9 @@ LinearScan::resolveRegisters()
LsraLocation loc = treeNode->gtLsraInfo.loc;
JITDUMP("curr = %u mapped = %u", currentLocation, loc);
- assert(treeNode->IsLocal() || currentLocation == loc || currentLocation == loc+1);
+ assert(treeNode->IsLocal() || currentLocation == loc || currentLocation == loc + 1);
- if (currentRefPosition->isIntervalRef()
- && currentRefPosition->getInterval()->isInternal)
+ if (currentRefPosition->isIntervalRef() && currentRefPosition->getInterval()->isInternal)
{
JITDUMP(" internal");
GenTreePtr indNode = nullptr;
@@ -8233,7 +8186,7 @@ LinearScan::resolveRegisters()
if (currentRefPosition->refType == RefTypeDef)
{
JITDUMP(" allocated at GT_ARR_ELEM, recorded on firstIndex V%02u");
- firstIndexTree->gtRsvdRegs = (regMaskSmall) currentRefPosition->registerAssignment;
+ firstIndexTree->gtRsvdRegs = (regMaskSmall)currentRefPosition->registerAssignment;
}
}
}
@@ -8243,7 +8196,7 @@ LinearScan::resolveRegisters()
{
writeRegisters(currentRefPosition, treeNode);
- if (treeNode->IsLocal() && currentRefPosition->getInterval()->isLocalVar)
+ if (treeNode->IsLocal() && currentRefPosition->getInterval()->isLocalVar)
{
resolveLocalRef(treeNode, currentRefPosition);
}
@@ -8252,10 +8205,10 @@ LinearScan::resolveRegisters()
// (local vars are handled in resolveLocalRef, above)
// Note that the tree node will be changed from GTF_SPILL to GTF_SPILLED
// in codegen, taking care of the "reload" case for temps
- else if (currentRefPosition->spillAfter ||
- (currentRefPosition->nextRefPosition != nullptr && currentRefPosition->nextRefPosition->moveReg))
+ else if (currentRefPosition->spillAfter || (currentRefPosition->nextRefPosition != nullptr &&
+ currentRefPosition->nextRefPosition->moveReg))
{
- if (treeNode != nullptr && currentRefPosition->isIntervalRef())
+ if (treeNode != nullptr && currentRefPosition->isIntervalRef())
{
if (currentRefPosition->spillAfter)
{
@@ -8268,7 +8221,7 @@ LinearScan::resolveRegisters()
treeNode->ResetReuseRegVal();
}
- // In case of multi-reg call node, also set spill flag on the
+ // In case of multi-reg call node, also set spill flag on the
// register specified by multi-reg index of current RefPosition.
// Note that the spill flag on treeNode indicates that one or
// more its allocated registers are in that state.
@@ -8284,7 +8237,7 @@ LinearScan::resolveRegisters()
RefPosition* nextRefPosition = currentRefPosition->nextRefPosition;
assert(nextRefPosition != nullptr);
if (INDEBUG(alwaysInsertReload() ||)
- nextRefPosition->assignedReg() != currentRefPosition->assignedReg())
+ nextRefPosition->assignedReg() != currentRefPosition->assignedReg())
{
if (nextRefPosition->assignedReg() != REG_NA)
{
@@ -8297,8 +8250,7 @@ LinearScan::resolveRegisters()
// In case of tree temps, if def is spilled and use didn't
// get a register, set a flag on tree node to be treated as
// contained at the point of its use.
- if (currentRefPosition->spillAfter &&
- currentRefPosition->refType == RefTypeDef &&
+ if (currentRefPosition->spillAfter && currentRefPosition->refType == RefTypeDef &&
nextRefPosition->refType == RefTypeUse)
{
assert(nextRefPosition->treeNode == nullptr);
@@ -8310,7 +8262,10 @@ LinearScan::resolveRegisters()
// We should never have to "spill after" a temp use, since
// they're single use
- else unreached();
+ else
+ {
+ unreached();
+ }
}
}
JITDUMP("\n");
@@ -8330,10 +8285,14 @@ LinearScan::resolveRegisters()
foreach_block(compiler, block)
{
printf("\nBB%02u use def in out\n", block->bbNum);
- dumpConvertedVarSet(compiler, block->bbVarUse); printf("\n");
- dumpConvertedVarSet(compiler, block->bbVarDef); printf("\n");
- dumpConvertedVarSet(compiler, block->bbLiveIn); printf("\n");
- dumpConvertedVarSet(compiler, block->bbLiveOut); printf("\n");
+ dumpConvertedVarSet(compiler, block->bbVarUse);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbVarDef);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbLiveIn);
+ printf("\n");
+ dumpConvertedVarSet(compiler, block->bbLiveOut);
+ printf("\n");
dumpInVarToRegMap(block);
dumpOutVarToRegMap(block);
@@ -8346,11 +8305,9 @@ LinearScan::resolveRegisters()
resolveEdges();
// Verify register assignments on variables
- unsigned lclNum;
- LclVarDsc * varDsc;
- for (lclNum = 0, varDsc = compiler->lvaTable;
- lclNum < compiler->lvaCount;
- lclNum++ , varDsc++)
+ unsigned lclNum;
+ LclVarDsc* varDsc;
+ for (lclNum = 0, varDsc = compiler->lvaTable; lclNum < compiler->lvaCount; lclNum++, varDsc++)
{
if (!isCandidateVar(varDsc))
{
@@ -8358,21 +8315,23 @@ LinearScan::resolveRegisters()
}
else
{
- Interval * interval = getIntervalForLocalVar(lclNum);
+ Interval* interval = getIntervalForLocalVar(lclNum);
// Determine initial position for parameters
if (varDsc->lvIsParam)
{
regMaskTP initialRegMask = interval->firstRefPosition->registerAssignment;
- regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter) ?
- REG_STK : genRegNumFromMask(initialRegMask);
+ regNumber initialReg = (initialRegMask == RBM_NONE || interval->firstRefPosition->spillAfter)
+ ? REG_STK
+ : genRegNumFromMask(initialRegMask);
regNumber sourceReg = (varDsc->lvIsRegArg) ? varDsc->lvArgReg : REG_STK;
#ifdef _TARGET_ARM_
if (varTypeIsMultiReg(varDsc))
{
- // TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and lvOtherReg (these should NYI before this)
+ // TODO-ARM-NYI: Map the hi/lo intervals back to lvRegNum and lvOtherReg (these should NYI before
+ // this)
assert(!"Multi-reg types not yet supported");
}
else
@@ -8392,15 +8351,14 @@ LinearScan::resolveRegisters()
// fields, so we have to do that if it's not already
// where it belongs.
assert(interval->isStructField);
- JITDUMP(" Move struct field param V%02u from %s to %s\n", lclNum,
- getRegName(sourceReg), getRegName(initialReg));
- insertMove (insertionBlock, insertionPoint, lclNum,
- sourceReg, initialReg);
+ JITDUMP(" Move struct field param V%02u from %s to %s\n", lclNum, getRegName(sourceReg),
+ getRegName(initialReg));
+ insertMove(insertionBlock, insertionPoint, lclNum, sourceReg, initialReg);
}
}
}
}
-
+
// If lvRegNum is REG_STK, that means that either no register
// was assigned, or (more likely) that the same register was not
// used for all references. In that case, codegen gets the register
@@ -8415,7 +8373,7 @@ LinearScan::resolveRegisters()
// Skip any dead defs or exposed uses
// (first use exposed will only occur when there is no explicit initialization)
- RefPosition * firstRefPosition = interval->firstRefPosition;
+ RefPosition* firstRefPosition = interval->firstRefPosition;
while ((firstRefPosition != nullptr) && (firstRefPosition->refType == RefTypeExpUse))
{
firstRefPosition = firstRefPosition->nextRefPosition;
@@ -8462,21 +8420,19 @@ LinearScan::resolveRegisters()
{
{
varDsc->lvRegister = true;
- varDsc->lvOnFrame = false;
+ varDsc->lvOnFrame = false;
}
#ifdef DEBUG
regMaskTP registerAssignment = genRegMask(varDsc->lvRegNum);
assert(!interval->isSpilled && !interval->isSplit);
- RefPosition * refPosition = interval->firstRefPosition;
+ RefPosition* refPosition = interval->firstRefPosition;
assert(refPosition != nullptr);
while (refPosition != nullptr)
{
// All RefPositions must match, except for dead definitions,
// copyReg/moveReg and RefTypeExpUse positions
- if (refPosition->registerAssignment != RBM_NONE &&
- !refPosition->copyReg &&
- !refPosition->moveReg &&
+ if (refPosition->registerAssignment != RBM_NONE && !refPosition->copyReg && !refPosition->moveReg &&
refPosition->refType != RefTypeExpUse)
{
assert(refPosition->registerAssignment == registerAssignment);
@@ -8501,7 +8457,7 @@ LinearScan::resolveRegisters()
recordMaxSpill();
// TODO-CQ: Review this comment and address as needed.
- // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
+ // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
// so that the gc tracking logic and lvMustInit logic will ignore them.
// Extract the code that does this from raAssignVars, and call it here.
// PRECONDITIONS: Ensure that lvPromoted is set on promoted structs, if and
@@ -8510,8 +8466,7 @@ LinearScan::resolveRegisters()
// compiler->BashUnusedStructLocals();
}
-
-//
+//
//------------------------------------------------------------------------
// insertMove: Insert a move of a lclVar with the given lclNum into the given block.
//
@@ -8530,14 +8485,10 @@ LinearScan::resolveRegisters()
// otherwise, insert "near" the end (prior to the branch, if any).
// If fromReg or toReg is REG_STK, then move from/to memory, respectively.
-void
-LinearScan::insertMove(BasicBlock * block,
- GenTreePtr insertionPoint,
- unsigned lclNum,
- regNumber fromReg,
- regNumber toReg)
+void LinearScan::insertMove(
+ BasicBlock* block, GenTreePtr insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg)
{
- LclVarDsc * varDsc = compiler->lvaTable + lclNum;
+ LclVarDsc* varDsc = compiler->lvaTable + lclNum;
// One or both MUST be a register
assert(fromReg != REG_STK || toReg != REG_STK);
// They must not be the same register.
@@ -8548,8 +8499,10 @@ LinearScan::insertMove(BasicBlock * block,
var_types lclTyp = varDsc->TypeGet();
if (varDsc->lvNormalizeOnStore())
+ {
lclTyp = genActualType(lclTyp);
- GenTreePtr src = compiler->gtNewLclvNode(lclNum, lclTyp);
+ }
+ GenTreePtr src = compiler->gtNewLclvNode(lclNum, lclTyp);
src->gtLsraInfo.isLsraAdded = true;
GenTreePtr top;
@@ -8571,19 +8524,20 @@ LinearScan::insertMove(BasicBlock * block,
}
else
{
- top = new(compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, varDsc->TypeGet(), src);
+ top = new (compiler, GT_COPY) GenTreeCopyOrReload(GT_COPY, varDsc->TypeGet(), src);
// This is the new home of the lclVar - indicate that by clearing the GTF_VAR_DEATH flag.
// Note that if src is itself a lastUse, this will have no effect.
top->gtFlags &= ~(GTF_VAR_DEATH);
src->gtRegNum = fromReg;
src->SetInReg();
- top->gtRegNum = toReg;
- src->gtNext = top; top->gtPrev = src;
+ top->gtRegNum = toReg;
+ src->gtNext = top;
+ top->gtPrev = src;
src->gtLsraInfo.isLocalDefUse = false;
- top->gtLsraInfo.isLsraAdded = true;
+ top->gtLsraInfo.isLsraAdded = true;
}
top->gtLsraInfo.isLocalDefUse = true;
- GenTreePtr stmt = compiler->gtNewStmt(top);
+ GenTreePtr stmt = compiler->gtNewStmt(top);
compiler->gtSetStmtInfo(stmt);
// The top-level node has no gtNext, and src has no gtPrev - they are set that way
@@ -8600,25 +8554,25 @@ LinearScan::insertMove(BasicBlock * block,
{
// Put the copy at the bottom
// If there's a branch, make an embedded statement that executes just prior to the branch
- if (block->bbJumpKind == BBJ_COND ||
- block->bbJumpKind == BBJ_SWITCH)
+ if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_SWITCH)
{
stmt->gtFlags &= ~GTF_STMT_TOP_LEVEL;
noway_assert(block->bbTreeList != nullptr);
- GenTreePtr lastStmt = block->lastStmt();
+ GenTreePtr lastStmt = block->lastStmt();
GenTreePtr branchStmt = block->lastTopLevelStmt();
- GenTreePtr branch = branchStmt->gtStmt.gtStmtExpr;
- assert(branch->OperGet() == GT_JTRUE || branch->OperGet() == GT_SWITCH_TABLE || branch->OperGet() == GT_SWITCH);
+ GenTreePtr branch = branchStmt->gtStmt.gtStmtExpr;
+ assert(branch->OperGet() == GT_JTRUE || branch->OperGet() == GT_SWITCH_TABLE ||
+ branch->OperGet() == GT_SWITCH);
GenTreePtr prev = branch->gtPrev;
- prev->gtNext = src;
- src->gtPrev = prev;
- branch->gtPrev = top;
- top->gtNext = branch;
-
- stmt->gtNext = nullptr;
- stmt->gtPrev = lastStmt;
- lastStmt->gtNext = stmt;
+ prev->gtNext = src;
+ src->gtPrev = prev;
+ branch->gtPrev = top;
+ top->gtNext = branch;
+
+ stmt->gtNext = nullptr;
+ stmt->gtPrev = lastStmt;
+ lastStmt->gtNext = stmt;
block->bbTreeList->gtPrev = stmt;
}
else
@@ -8629,47 +8583,42 @@ LinearScan::insertMove(BasicBlock * block,
}
}
-void
-LinearScan::insertSwap(BasicBlock* block,
- GenTreePtr insertionPoint,
- unsigned lclNum1,
- regNumber reg1,
- unsigned lclNum2,
- regNumber reg2)
+void LinearScan::insertSwap(
+ BasicBlock* block, GenTreePtr insertionPoint, unsigned lclNum1, regNumber reg1, unsigned lclNum2, regNumber reg2)
{
#ifdef DEBUG
if (VERBOSE)
{
- const char * insertionPointString = "top";
+ const char* insertionPointString = "top";
if (insertionPoint == nullptr)
{
insertionPointString = "bottom";
}
- printf(" BB%02u %s: swap V%02u in %s with V%02u in %s\n",
- block->bbNum, insertionPointString, lclNum1, getRegName(reg1), lclNum2, getRegName(reg2));
+ printf(" BB%02u %s: swap V%02u in %s with V%02u in %s\n", block->bbNum, insertionPointString, lclNum1,
+ getRegName(reg1), lclNum2, getRegName(reg2));
}
#endif // DEBUG
- LclVarDsc * varDsc1 = compiler->lvaTable + lclNum1;
- LclVarDsc * varDsc2 = compiler->lvaTable + lclNum2;
+ LclVarDsc* varDsc1 = compiler->lvaTable + lclNum1;
+ LclVarDsc* varDsc2 = compiler->lvaTable + lclNum2;
assert(reg1 != REG_STK && reg1 != REG_NA && reg2 != REG_STK && reg2 != REG_NA);
- GenTreePtr lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet());
- lcl1->gtLsraInfo.isLsraAdded = true;
+ GenTreePtr lcl1 = compiler->gtNewLclvNode(lclNum1, varDsc1->TypeGet());
+ lcl1->gtLsraInfo.isLsraAdded = true;
lcl1->gtLsraInfo.isLocalDefUse = false;
lcl1->SetInReg();
lcl1->gtRegNum = reg1;
- GenTreePtr lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet());
- lcl2->gtLsraInfo.isLsraAdded = true;
+ GenTreePtr lcl2 = compiler->gtNewLclvNode(lclNum2, varDsc2->TypeGet());
+ lcl2->gtLsraInfo.isLsraAdded = true;
lcl2->gtLsraInfo.isLocalDefUse = false;
lcl2->SetInReg();
lcl2->gtRegNum = reg2;
- GenTreePtr swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2);
- swap->gtLsraInfo.isLsraAdded = true;
+ GenTreePtr swap = compiler->gtNewOperNode(GT_SWAP, TYP_VOID, lcl1, lcl2);
+ swap->gtLsraInfo.isLsraAdded = true;
swap->gtLsraInfo.isLocalDefUse = false;
- swap->gtRegNum = REG_NA;
+ swap->gtRegNum = REG_NA;
lcl1->gtNext = lcl2;
lcl2->gtPrev = lcl1;
@@ -8693,25 +8642,24 @@ LinearScan::insertSwap(BasicBlock* block,
{
// Put the copy at the bottom
// If there's a branch, make an embedded statement that executes just prior to the branch
- if (block->bbJumpKind == BBJ_COND ||
- block->bbJumpKind == BBJ_SWITCH)
+ if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_SWITCH)
{
stmt->gtFlags &= ~GTF_STMT_TOP_LEVEL;
noway_assert(block->bbTreeList != nullptr);
- GenTreePtr lastStmt = block->lastStmt();
+ GenTreePtr lastStmt = block->lastStmt();
GenTreePtr branchStmt = block->lastTopLevelStmt();
- GenTreePtr branch = branchStmt->gtStmt.gtStmtExpr;
+ GenTreePtr branch = branchStmt->gtStmt.gtStmtExpr;
assert(branch->OperGet() == GT_JTRUE || branch->OperGet() == GT_SWITCH);
GenTreePtr prev = branch->gtPrev;
- prev->gtNext = lcl1;
- lcl1->gtPrev = prev;
- branch->gtPrev = swap;
- swap->gtNext = branch;
-
- stmt->gtNext = nullptr;
- stmt->gtPrev = lastStmt;
- lastStmt->gtNext = stmt;
+ prev->gtNext = lcl1;
+ lcl1->gtPrev = prev;
+ branch->gtPrev = swap;
+ swap->gtNext = branch;
+
+ stmt->gtNext = nullptr;
+ stmt->gtPrev = lastStmt;
+ lastStmt->gtNext = stmt;
block->bbTreeList->gtPrev = stmt;
}
else
@@ -8738,8 +8686,7 @@ LinearScan::insertSwap(BasicBlock* block,
// available, and to handle that case appropriately.
// It is also up to the caller to cache the return value, as this is not cheap to compute.
-regNumber
-LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type)
+regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type)
{
// TODO-Throughput: This would be much more efficient if we add RegToVarMaps instead of VarToRegMaps
// and they would be more space-efficient as well.
@@ -8760,12 +8707,16 @@ LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock,
while (iter.NextElem(compiler, &varIndex) && freeRegs != RBM_NONE)
{
regNumber fromReg = fromVarToRegMap[varIndex];
- regNumber toReg = toVarToRegMap[varIndex];
+ regNumber toReg = toVarToRegMap[varIndex];
assert(fromReg != REG_NA && toReg != REG_NA);
if (fromReg != REG_STK)
+ {
freeRegs &= ~genRegMask(fromReg);
+ }
if (toReg != REG_STK)
+ {
freeRegs &= ~genRegMask(toReg);
+ }
}
if (freeRegs == RBM_NONE)
{
@@ -8778,7 +8729,6 @@ LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock,
}
}
-
//------------------------------------------------------------------------
// addResolution: Add a resolution move of the given interval
//
@@ -8802,15 +8752,11 @@ LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock,
// The next time, we want to move from the stack to the destination (toReg),
// in which case fromReg will be REG_STK, and we insert at the top.
-void
-LinearScan::addResolution(BasicBlock* block,
- GenTreePtr insertionPoint,
- Interval* interval,
- regNumber toReg,
- regNumber fromReg)
+void LinearScan::addResolution(
+ BasicBlock* block, GenTreePtr insertionPoint, Interval* interval, regNumber toReg, regNumber fromReg)
{
#ifdef DEBUG
- const char * insertionPointString = "top";
+ const char* insertionPointString = "top";
#endif // DEBUG
if (insertionPoint == nullptr)
{
@@ -8823,8 +8769,14 @@ LinearScan::addResolution(BasicBlock* block,
JITDUMP("%s to %s", getRegName(fromReg), getRegName(toReg));
insertMove(block, insertionPoint, interval->varNum, fromReg, toReg);
- if (fromReg == REG_STK || toReg == REG_STK) interval->isSpilled = true;
- else interval->isSplit = true;
+ if (fromReg == REG_STK || toReg == REG_STK)
+ {
+ interval->isSpilled = true;
+ }
+ else
+ {
+ interval->isSplit = true;
+ }
}
//------------------------------------------------------------------------
@@ -8841,8 +8793,7 @@ LinearScan::addResolution(BasicBlock* block,
// a join edge), if there are any conflicts, split the edge by adding a new block,
// and generate the resolution code into that block.
-void
-LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
+void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
{
VARSET_TP VARSET_INIT_NOCOPY(sameResolutionSet, VarSetOps::MakeEmpty(compiler));
VARSET_TP VARSET_INIT_NOCOPY(sameLivePathsSet, VarSetOps::MakeEmpty(compiler));
@@ -8851,10 +8802,10 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
// Get the outVarToRegMap for this block
VarToRegMap outVarToRegMap = getOutVarToRegMap(block->bbNum);
- unsigned succCount = block->NumSucc(compiler);
+ unsigned succCount = block->NumSucc(compiler);
assert(succCount > 1);
VarToRegMap firstSuccInVarToRegMap = nullptr;
- BasicBlock* firstSucc = nullptr;
+ BasicBlock* firstSucc = nullptr;
// First, determine the live regs at the end of this block so that we know what regs are
// available to copy into.
@@ -8862,8 +8813,8 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
VARSET_ITER_INIT(compiler, iter1, block->bbLiveOut, varIndex1);
while (iter1.NextElem(compiler, &varIndex1))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex1];
- regNumber fromReg = getVarReg(outVarToRegMap, varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex1];
+ regNumber fromReg = getVarReg(outVarToRegMap, varNum);
if (fromReg != REG_STK)
{
liveOutRegs |= genRegMask(fromReg);
@@ -8880,9 +8831,9 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
GenTree* lastStmt = block->lastStmt();
assert(lastStmt != nullptr && lastStmt->gtStmt.gtStmtExpr->gtOper == GT_SWITCH_TABLE);
GenTree* switchTable = lastStmt->gtStmt.gtStmtExpr;
- switchRegs = switchTable->gtRsvdRegs;
- GenTree* op1 = switchTable->gtGetOp1();
- GenTree* op2 = switchTable->gtGetOp2();
+ switchRegs = switchTable->gtRsvdRegs;
+ GenTree* op1 = switchTable->gtGetOp1();
+ GenTree* op2 = switchTable->gtGetOp2();
noway_assert(op1 != nullptr && op2 != nullptr);
assert(op1->gtRegNum != REG_NA && op2->gtRegNum != REG_NA);
switchRegs |= genRegMask(op1->gtRegNum);
@@ -8905,14 +8856,14 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
VARSET_ITER_INIT(compiler, iter, block->bbLiveOut, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber fromReg = getVarReg(outVarToRegMap, varNum);
- bool isMatch = true;
- bool isSame = false;
- bool maybeSingleTarget = false;
- bool maybeSameLivePaths = false;
- bool liveOnlyAtSplitEdge = true;
- regNumber sameToReg = REG_NA;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber fromReg = getVarReg(outVarToRegMap, varNum);
+ bool isMatch = true;
+ bool isSame = false;
+ bool maybeSingleTarget = false;
+ bool maybeSameLivePaths = false;
+ bool liveOnlyAtSplitEdge = true;
+ regNumber sameToReg = REG_NA;
for (unsigned succIndex = 0; succIndex < succCount; succIndex++)
{
BasicBlock* succBlock = block->GetSucc(succIndex, compiler);
@@ -9026,16 +8977,17 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
// Now collect the resolution set for just this edge, if any.
// Check only the vars in diffResolutionSet that are live-in to this successor.
- bool needsResolution = false;
+ bool needsResolution = false;
VarToRegMap succInVarToRegMap = getInVarToRegMap(succBlock->bbNum);
- VARSET_TP VARSET_INIT_NOCOPY(edgeResolutionSet, VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn));
+ VARSET_TP VARSET_INIT_NOCOPY(edgeResolutionSet,
+ VarSetOps::Intersection(compiler, diffResolutionSet, succBlock->bbLiveIn));
VARSET_ITER_INIT(compiler, iter, edgeResolutionSet, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- Interval * interval = getIntervalForLocalVar(varNum);
- regNumber fromReg = getVarReg(outVarToRegMap, varNum);
- regNumber toReg = getVarReg(succInVarToRegMap, varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ Interval* interval = getIntervalForLocalVar(varNum);
+ regNumber fromReg = getVarReg(outVarToRegMap, varNum);
+ regNumber toReg = getVarReg(succInVarToRegMap, varNum);
if (fromReg == toReg)
{
@@ -9069,19 +9021,18 @@ LinearScan::handleOutgoingCriticalEdges(BasicBlock* block)
// Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing
// edges.
-void
-LinearScan::resolveEdges()
+void LinearScan::resolveEdges()
{
JITDUMP("RESOLVING EDGES\n");
- BasicBlock * block, *prevBlock = nullptr;
+ BasicBlock *block, *prevBlock = nullptr;
// Handle all the critical edges first.
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
// remaining mismatches. We visit the out-edges, as that allows us to share the moves that are
// common among allt he targets.
-
+
foreach_block(compiler, block)
{
if (block->bbNum > bbNumMaxBeforeResolution)
@@ -9104,10 +9055,10 @@ LinearScan::resolveEdges()
// This is a new block added during resolution - we don't need to visit these now.
continue;
}
-
- unsigned succCount = block->NumSucc(compiler);
- flowList * preds = block->bbPreds;
- BasicBlock * uniquePredBlock = block->GetUniquePred(compiler);
+
+ unsigned succCount = block->NumSucc(compiler);
+ flowList* preds = block->bbPreds;
+ BasicBlock* uniquePredBlock = block->GetUniquePred(compiler);
// First, if this block has a single predecessor,
// we may need resolution at the beginning of this block.
@@ -9163,13 +9114,15 @@ LinearScan::resolveEdges()
// two non-resolution blocks. This happens when an edge is split that requires it.
BasicBlock* succBlock = block;
- do {
+ do
+ {
succBlock = succBlock->GetUniqueSucc();
noway_assert(succBlock != nullptr);
} while ((succBlock->bbNum > bbNumMaxBeforeResolution) && succBlock->isEmpty());
BasicBlock* predBlock = block;
- do {
+ do
+ {
predBlock = predBlock->GetUniquePred(compiler);
noway_assert(predBlock != nullptr);
} while ((predBlock->bbNum > bbNumMaxBeforeResolution) && predBlock->isEmpty());
@@ -9209,21 +9162,19 @@ LinearScan::resolveEdges()
continue;
}
VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum);
- for (flowList* pred = block->bbPreds;
- pred != nullptr;
- pred = pred->flNext)
+ for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
{
- BasicBlock* predBlock = pred->flBlock;
+ BasicBlock* predBlock = pred->flBlock;
VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum);
VARSET_ITER_INIT(compiler, iter, block->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber fromReg = getVarReg(fromVarToRegMap, varNum);
- regNumber toReg = getVarReg(toVarToRegMap, varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber fromReg = getVarReg(fromVarToRegMap, varNum);
+ regNumber toReg = getVarReg(toVarToRegMap, varNum);
if (fromReg != toReg)
{
- Interval * interval = getIntervalForLocalVar(varNum);
+ Interval* interval = getIntervalForLocalVar(varNum);
if (!foundMismatch)
{
foundMismatch = true;
@@ -9263,11 +9214,10 @@ LinearScan::resolveEdges()
// registers), then the register to register moves, ensuring that the target register
// is free before the move, and then finally the stack to register moves.
-void
-LinearScan::resolveEdge(BasicBlock* fromBlock,
- BasicBlock* toBlock,
- ResolveType resolveType,
- VARSET_VALARG_TP liveSet)
+void LinearScan::resolveEdge(BasicBlock* fromBlock,
+ BasicBlock* toBlock,
+ ResolveType resolveType,
+ VARSET_VALARG_TP liveSet)
{
VarToRegMap fromVarToRegMap = getOutVarToRegMap(fromBlock->bbNum);
VarToRegMap toVarToRegMap;
@@ -9279,31 +9229,31 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
{
toVarToRegMap = getInVarToRegMap(toBlock->bbNum);
}
-
+
// The block to which we add the resolution moves depends on the resolveType
BasicBlock* block;
- switch(resolveType)
+ switch (resolveType)
{
- case ResolveJoin:
- case ResolveSharedCritical:
- block = fromBlock;
- break;
- case ResolveSplit:
- block = toBlock;
- break;
- case ResolveCritical:
- // fgSplitEdge may add one or two BasicBlocks. It returns the block that splits
- // the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after
- // a block with a fall-through it will have to create another block to handle that edge.
- // These new blocks can be mapped to existing blocks in order to correctly handle
- // the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled
- // in resolveEdges(), after all the edge resolution has been done (by calling this
- // method for each edge).
- block = compiler->fgSplitEdge(fromBlock, toBlock);
- break;
- default:
- unreached();
- break;
+ case ResolveJoin:
+ case ResolveSharedCritical:
+ block = fromBlock;
+ break;
+ case ResolveSplit:
+ block = toBlock;
+ break;
+ case ResolveCritical:
+ // fgSplitEdge may add one or two BasicBlocks. It returns the block that splits
+ // the edge from 'fromBlock' and 'toBlock', but if it inserts that block right after
+ // a block with a fall-through it will have to create another block to handle that edge.
+ // These new blocks can be mapped to existing blocks in order to correctly handle
+ // the calls to recordVarLocationsAtStartOfBB() from codegen. That mapping is handled
+ // in resolveEdges(), after all the edge resolution has been done (by calling this
+ // method for each edge).
+ block = compiler->fgSplitEdge(fromBlock, toBlock);
+ break;
+ default:
+ unreached();
+ break;
}
#ifndef _TARGET_XARCH_
@@ -9312,7 +9262,8 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
// TODO-Throughput: It would be better to determine the tempRegs on demand, but the code below
// modifies the varToRegMaps so we don't have all the correct registers at the time
// we need to get the tempReg.
- regNumber tempRegInt = (resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT);
+ regNumber tempRegInt =
+ (resolveType == ResolveSharedCritical) ? REG_NA : getTempRegForResolution(fromBlock, toBlock, TYP_INT);
#endif // !_TARGET_XARCH_
regNumber tempRegFlt = REG_NA;
if ((compiler->compFloatingPointUsed) && (resolveType != ResolveSharedCritical))
@@ -9320,8 +9271,8 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
tempRegFlt = getTempRegForResolution(fromBlock, toBlock, TYP_FLOAT);
}
- regMaskTP targetRegsToDo = RBM_NONE;
- regMaskTP targetRegsReady = RBM_NONE;
+ regMaskTP targetRegsToDo = RBM_NONE;
+ regMaskTP targetRegsReady = RBM_NONE;
regMaskTP targetRegsFromStack = RBM_NONE;
// The following arrays capture the location of the registers as they are moved:
@@ -9345,10 +9296,10 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
// What interval is this register associated with?
// (associated with incoming reg)
- Interval * sourceIntervals[REG_COUNT] = {nullptr};
+ Interval* sourceIntervals[REG_COUNT] = {nullptr};
// Intervals for vars that need to be loaded from the stack
- Interval * stackToRegIntervals[REG_COUNT] = {nullptr};
+ Interval* stackToRegIntervals[REG_COUNT] = {nullptr};
// Get the starting insertion point for the "to" resolution
GenTreePtr insertionPoint = nullptr;
@@ -9369,12 +9320,15 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
VARSET_ITER_INIT(compiler, iter, liveSet, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- bool isSpilled = false;
- Interval * interval = getIntervalForLocalVar(varNum);
- regNumber fromReg = getVarReg(fromVarToRegMap, varNum);
- regNumber toReg = getVarReg(toVarToRegMap, varNum);
- if (fromReg == toReg) continue;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ bool isSpilled = false;
+ Interval* interval = getIntervalForLocalVar(varNum);
+ regNumber fromReg = getVarReg(fromVarToRegMap, varNum);
+ regNumber toReg = getVarReg(toVarToRegMap, varNum);
+ if (fromReg == toReg)
+ {
+ continue;
+ }
// For Critical edges, the location will not change on either side of the edge,
// since we'll add a new block to do the move.
@@ -9391,7 +9345,7 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
bool done = false;
- if (fromReg != toReg)
+ if (fromReg != toReg)
{
if (fromReg == REG_STK)
{
@@ -9406,8 +9360,8 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
}
else
{
- location[fromReg] = (regNumberSmall) fromReg;
- source[toReg] = (regNumberSmall) fromReg;
+ location[fromReg] = (regNumberSmall)fromReg;
+ source[toReg] = (regNumberSmall)fromReg;
sourceIntervals[fromReg] = interval;
targetRegsToDo |= genRegMask(toReg);
}
@@ -9439,15 +9393,15 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
targetRegsReady &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
assert(location[targetReg] != targetReg);
- regNumber sourceReg = (regNumber) source[targetReg];
- regNumber fromReg = (regNumber) location[sourceReg];
+ regNumber sourceReg = (regNumber)source[targetReg];
+ regNumber fromReg = (regNumber)location[sourceReg];
assert(fromReg < UCHAR_MAX && sourceReg < UCHAR_MAX);
- Interval * interval = sourceIntervals[sourceReg];
+ Interval* interval = sourceIntervals[sourceReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, fromReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
sourceIntervals[sourceReg] = nullptr;
- location[sourceReg] = REG_NA;
+ location[sourceReg] = REG_NA;
// Do we have a free targetReg?
if (fromReg == sourceReg && source[fromReg] != REG_NA)
@@ -9459,12 +9413,12 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
if (targetRegsToDo != RBM_NONE)
{
regMaskTP targetRegMask = genFindLowestBit(targetRegsToDo);
- regNumber targetReg = genRegNumFromMask(targetRegMask);
+ regNumber targetReg = genRegNumFromMask(targetRegMask);
// Is it already there due to other moves?
// If not, move it to the temp reg, OR swap it with another register
- regNumber sourceReg = (regNumber) source[targetReg];
- regNumber fromReg = (regNumber) location[sourceReg];
+ regNumber sourceReg = (regNumber)source[targetReg];
+ regNumber fromReg = (regNumber)location[sourceReg];
if (targetReg == fromReg)
{
targetRegsToDo &= ~targetRegMask;
@@ -9472,7 +9426,7 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
else
{
regNumber tempReg = REG_NA;
- bool useSwap = false;
+ bool useSwap = false;
if (emitter::isFloatReg(targetReg))
{
tempReg = tempRegFlt;
@@ -9482,7 +9436,7 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
{
useSwap = true;
}
-#else // !_TARGET_XARCH_
+#else // !_TARGET_XARCH_
else
{
tempReg = tempRegInt;
@@ -9510,11 +9464,11 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
{
// Look at the remaining registers from targetRegsToDo (which we expect to be relatively
// small at this point) to find out what's currently in targetReg.
- regMaskTP mask = targetRegsToDo;
+ regMaskTP mask = targetRegsToDo;
while (mask != RBM_NONE && otherTargetReg == REG_NA)
{
regMaskTP nextRegMask = genFindLowestBit(mask);
- regNumber nextReg = genRegNumFromMask(nextRegMask);
+ regNumber nextReg = genRegNumFromMask(nextRegMask);
mask &= ~nextRegMask;
if (location[source[nextReg]] == targetReg)
{
@@ -9527,9 +9481,10 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
if (useSwap)
{
// Generate a "swap" of fromReg and targetReg
- insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg, sourceIntervals[sourceReg]->varNum, fromReg);
- location[sourceReg] = REG_NA;
- location[source[otherTargetReg]] = (regNumberSmall) fromReg;
+ insertSwap(block, insertionPoint, sourceIntervals[source[otherTargetReg]]->varNum, targetReg,
+ sourceIntervals[sourceReg]->varNum, fromReg);
+ location[sourceReg] = REG_NA;
+ location[source[otherTargetReg]] = (regNumberSmall)fromReg;
}
else
{
@@ -9559,7 +9514,7 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
assert(sourceIntervals[targetReg] != nullptr);
addResolution(block, insertionPoint, sourceIntervals[targetReg], tempReg, targetReg);
JITDUMP(" (%s)\n", resolveTypeName[resolveType]);
- location[targetReg] = (regNumberSmall) tempReg;
+ location[targetReg] = (regNumberSmall)tempReg;
targetRegsReady |= targetRegMask;
}
}
@@ -9574,7 +9529,7 @@ LinearScan::resolveEdge(BasicBlock* fromBlock,
targetRegsFromStack &= ~targetRegMask;
regNumber targetReg = genRegNumFromMask(targetRegMask);
- Interval * interval = stackToRegIntervals[targetReg];
+ Interval* interval = stackToRegIntervals[targetReg];
assert(interval != nullptr);
addResolution(block, insertionPoint, interval, targetReg, REG_STK);
@@ -9598,12 +9553,12 @@ void TreeNodeInfo::Initialize(LinearScan* lsra, GenTree* node, LsraLocation loca
dstCandidates = genRegMask(node->gtRegNum);
}
- internalIntCount = 0;
- internalFloatCount = 0;
- isLocalDefUse = false;
+ internalIntCount = 0;
+ internalFloatCount = 0;
+ isLocalDefUse = false;
isHelperCallWithKills = false;
- isLsraAdded = false;
- definesAnyRegisters = false;
+ isLsraAdded = false;
+ definesAnyRegisters = false;
setDstCandidates(lsra, dstCandidates);
srcCandsIndex = dstCandsIndex;
@@ -9618,47 +9573,47 @@ void TreeNodeInfo::Initialize(LinearScan* lsra, GenTree* node, LsraLocation loca
assert(IsValid(lsra));
}
-regMaskTP TreeNodeInfo::getSrcCandidates(LinearScan *lsra)
+regMaskTP TreeNodeInfo::getSrcCandidates(LinearScan* lsra)
{
return lsra->GetRegMaskForIndex(srcCandsIndex);
}
-void TreeNodeInfo::setSrcCandidates(LinearScan *lsra, regMaskTP mask)
+void TreeNodeInfo::setSrcCandidates(LinearScan* lsra, regMaskTP mask)
{
LinearScan::RegMaskIndex i = lsra->GetIndexForRegMask(mask);
assert(FitsIn<unsigned char>(i));
- srcCandsIndex = (unsigned char) i;
+ srcCandsIndex = (unsigned char)i;
}
-regMaskTP TreeNodeInfo::getDstCandidates(LinearScan *lsra)
+regMaskTP TreeNodeInfo::getDstCandidates(LinearScan* lsra)
{
return lsra->GetRegMaskForIndex(dstCandsIndex);
}
-void TreeNodeInfo::setDstCandidates(LinearScan *lsra, regMaskTP mask)
+void TreeNodeInfo::setDstCandidates(LinearScan* lsra, regMaskTP mask)
{
LinearScan::RegMaskIndex i = lsra->GetIndexForRegMask(mask);
assert(FitsIn<unsigned char>(i));
- dstCandsIndex = (unsigned char) i;
+ dstCandsIndex = (unsigned char)i;
}
-regMaskTP TreeNodeInfo::getInternalCandidates(LinearScan *lsra)
+regMaskTP TreeNodeInfo::getInternalCandidates(LinearScan* lsra)
{
return lsra->GetRegMaskForIndex(internalCandsIndex);
}
-void TreeNodeInfo::setInternalCandidates(LinearScan *lsra, regMaskTP mask)
+void TreeNodeInfo::setInternalCandidates(LinearScan* lsra, regMaskTP mask)
{
LinearScan::RegMaskIndex i = lsra->GetIndexForRegMask(mask);
assert(FitsIn<unsigned char>(i));
- internalCandsIndex = (unsigned char) i;
+ internalCandsIndex = (unsigned char)i;
}
-void TreeNodeInfo::addInternalCandidates(LinearScan *lsra, regMaskTP mask)
+void TreeNodeInfo::addInternalCandidates(LinearScan* lsra, regMaskTP mask)
{
LinearScan::RegMaskIndex i = lsra->GetIndexForRegMask(lsra->GetRegMaskForIndex(internalCandsIndex) | mask);
assert(FitsIn<unsigned char>(i));
- internalCandsIndex = (unsigned char) i;
+ internalCandsIndex = (unsigned char)i;
}
#ifdef DEBUG
@@ -9690,10 +9645,13 @@ static const char* getRefTypeName(RefType refType)
{
switch (refType)
{
-#define DEF_REFTYPE(memberName, memberValue, shortName) case memberName: return #memberName;
+#define DEF_REFTYPE(memberName, memberValue, shortName) \
+ case memberName: \
+ return #memberName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
- default: return nullptr;
+ default:
+ return nullptr;
}
}
@@ -9701,85 +9659,136 @@ static const char* getRefTypeShortName(RefType refType)
{
switch (refType)
{
-#define DEF_REFTYPE(memberName, memberValue, shortName) case memberName: return shortName;
+#define DEF_REFTYPE(memberName, memberValue, shortName) \
+ case memberName: \
+ return shortName;
#include "lsra_reftypes.h"
#undef DEF_REFTYPE
- default: return nullptr;
+ default:
+ return nullptr;
}
}
void RefPosition::dump()
{
printf("<RefPosition #%-3u @%-3u", rpNum, nodeLocation);
-
+
if (nextRefPosition)
+ {
printf(" ->#%-3u", nextRefPosition->rpNum);
+ }
printf(" %s ", getRefTypeName(refType));
if (this->isPhysRegRef)
+ {
this->getReg()->tinyDump();
+ }
else if (getInterval())
+ {
this->getInterval()->tinyDump();
+ }
if (this->treeNode)
+ {
printf("%s ", treeNode->OpName(treeNode->OperGet()));
+ }
printf("BB%02u ", this->bbNum);
printf("regmask=");
dumpRegMask(registerAssignment);
if (this->lastUse)
+ {
printf(" last");
+ }
if (this->reload)
+ {
printf(" reload");
+ }
if (this->spillAfter)
+ {
printf(" spillAfter");
+ }
if (this->moveReg)
+ {
printf(" move");
+ }
if (this->copyReg)
+ {
printf(" copy");
+ }
if (this->isFixedRegRef)
+ {
printf(" fixed");
+ }
if (this->isLocalDefUse)
+ {
printf(" local");
+ }
if (this->delayRegFree)
+ {
printf(" delay");
+ }
if (this->outOfOrder)
+ {
printf(" outOfOrder");
+ }
printf(">\n");
}
void RegRecord::dump()
{
tinyDump();
-}
+}
-void
-Interval::dump()
+void Interval::dump()
{
printf("Interval %2u:", intervalIndex);
- if (isLocalVar) printf(" (V%02u)", varNum);
- if (isInternal) printf(" (INTERNAL)");
- if (isSpilled) printf(" (SPILLED)");
- if (isSplit) printf(" (SPLIT)");
- if (isStructField) printf(" (struct)");
- if (isSpecialPutArg) printf(" (specialPutArg)");
- if (isConstant) printf(" (constant)");
+ if (isLocalVar)
+ {
+ printf(" (V%02u)", varNum);
+ }
+ if (isInternal)
+ {
+ printf(" (INTERNAL)");
+ }
+ if (isSpilled)
+ {
+ printf(" (SPILLED)");
+ }
+ if (isSplit)
+ {
+ printf(" (SPLIT)");
+ }
+ if (isStructField)
+ {
+ printf(" (struct)");
+ }
+ if (isSpecialPutArg)
+ {
+ printf(" (specialPutArg)");
+ }
+ if (isConstant)
+ {
+ printf(" (constant)");
+ }
printf(" RefPositions {");
- for (RefPosition * refPosition = this->firstRefPosition;
- refPosition != nullptr;
- refPosition = refPosition->nextRefPosition)
+ for (RefPosition* refPosition = this->firstRefPosition; refPosition != nullptr;
+ refPosition = refPosition->nextRefPosition)
{
printf("#%u@%u", refPosition->rpNum, refPosition->nodeLocation);
- if (refPosition->nextRefPosition) printf(" ");
+ if (refPosition->nextRefPosition)
+ {
+ printf(" ");
+ }
}
printf("}");
// this is not used (yet?)
- //printf(" SpillOffset %d", this->spillOffset);
+ // printf(" SpillOffset %d", this->spillOffset);
printf(" physReg:%s", getRegName(physReg));
@@ -9796,10 +9805,8 @@ Interval::dump()
printf("\n");
}
-
// print out very concise representation
-void
-Interval::tinyDump()
+void Interval::tinyDump()
{
printf("<Ivl:%u", intervalIndex);
if (isLocalVar)
@@ -9813,10 +9820,8 @@ Interval::tinyDump()
printf("> ");
}
-
// print out extremely concise representation
-void
-Interval::microDump()
+void Interval::microDump()
{
char intervalTypeChar = 'I';
if (isInternal)
@@ -9831,39 +9836,57 @@ Interval::microDump()
printf("<%c%u>", intervalTypeChar, intervalIndex);
}
-
void RegRecord::tinyDump()
{
printf("<Reg:%-3s> ", getRegName(regNum));
}
-
-void TreeNodeInfo::dump(LinearScan *lsra)
+void TreeNodeInfo::dump(LinearScan* lsra)
{
printf("<TreeNodeInfo @ %2u %d=%d %di %df", loc, dstCount, srcCount, internalIntCount, internalFloatCount);
- printf(" src="); dumpRegMask(getSrcCandidates(lsra));
- printf(" int="); dumpRegMask(getInternalCandidates(lsra));
- printf(" dst="); dumpRegMask(getDstCandidates(lsra));
- if (isLocalDefUse) printf(" L");
- if (isInitialized) printf(" I");
- if (isHelperCallWithKills) printf(" H");
- if (isLsraAdded) printf(" A");
- if (isDelayFree) printf(" D");
- if (isTgtPref) printf(" P");
+ printf(" src=");
+ dumpRegMask(getSrcCandidates(lsra));
+ printf(" int=");
+ dumpRegMask(getInternalCandidates(lsra));
+ printf(" dst=");
+ dumpRegMask(getDstCandidates(lsra));
+ if (isLocalDefUse)
+ {
+ printf(" L");
+ }
+ if (isInitialized)
+ {
+ printf(" I");
+ }
+ if (isHelperCallWithKills)
+ {
+ printf(" H");
+ }
+ if (isLsraAdded)
+ {
+ printf(" A");
+ }
+ if (isDelayFree)
+ {
+ printf(" D");
+ }
+ if (isTgtPref)
+ {
+ printf(" P");
+ }
printf(">\n");
}
-void
-LinearScan::lsraDumpIntervals(const char* msg)
+void LinearScan::lsraDumpIntervals(const char* msg)
{
- Interval * interval;
+ Interval* interval;
printf("\nLinear scan intervals %s:\n", msg);
for (auto& interval : intervals)
{
// only dump something if it has references
- //if (interval->firstRefPosition)
- interval.dump();
+ // if (interval->firstRefPosition)
+ interval.dump();
}
printf("\n");
@@ -9871,22 +9894,27 @@ LinearScan::lsraDumpIntervals(const char* msg)
// Dumps a tree node as a destination or source operand, with the style
// of dump dependent on the mode
-void
-LinearScan::lsraGetOperandString(GenTreePtr tree, LsraTupleDumpMode mode, char *operandString, unsigned operandStringLength)
+void LinearScan::lsraGetOperandString(GenTreePtr tree,
+ LsraTupleDumpMode mode,
+ char* operandString,
+ unsigned operandStringLength)
{
const char* lastUseChar = "";
- if ((tree->gtFlags & GTF_VAR_DEATH) != 0) lastUseChar = "*";
+ if ((tree->gtFlags & GTF_VAR_DEATH) != 0)
+ {
+ lastUseChar = "*";
+ }
switch (mode)
{
- case LinearScan::LSRA_DUMP_PRE:
- _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtSeqNum, lastUseChar);
- break;
- case LinearScan::LSRA_DUMP_REFPOS:
- _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtSeqNum, lastUseChar);
- break;
- case LinearScan::LSRA_DUMP_POST:
+ case LinearScan::LSRA_DUMP_PRE:
+ _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtSeqNum, lastUseChar);
+ break;
+ case LinearScan::LSRA_DUMP_REFPOS:
+ _snprintf_s(operandString, operandStringLength, operandStringLength, "t%d%s", tree->gtSeqNum, lastUseChar);
+ break;
+ case LinearScan::LSRA_DUMP_POST:
{
- Compiler *compiler = JitTls::GetCompiler();
+ Compiler* compiler = JitTls::GetCompiler();
if (!tree->gtHasReg())
{
@@ -9894,49 +9922,58 @@ LinearScan::lsraGetOperandString(GenTreePtr tree, LsraTupleDumpMode mode, char *
}
else
{
- _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s", getRegName(tree->gtRegNum, useFloatReg(tree->TypeGet())), lastUseChar);
+ _snprintf_s(operandString, operandStringLength, operandStringLength, "%s%s",
+ getRegName(tree->gtRegNum, useFloatReg(tree->TypeGet())), lastUseChar);
}
}
break;
- default:
- printf ("ERROR: INVALID TUPLE DUMP MODE\n");
- break;
+ default:
+ printf("ERROR: INVALID TUPLE DUMP MODE\n");
+ break;
}
}
-void
-LinearScan::lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest)
+void LinearScan::lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest)
{
- Compiler* compiler = JitTls::GetCompiler();
- const unsigned operandStringLength = 16;
- char operandString[operandStringLength];
- const char* emptyDestOperand = " ";
- char spillChar = ' ';
+ Compiler* compiler = JitTls::GetCompiler();
+ const unsigned operandStringLength = 16;
+ char operandString[operandStringLength];
+ const char* emptyDestOperand = " ";
+ char spillChar = ' ';
if (mode == LinearScan::LSRA_DUMP_POST)
{
- if ((tree->gtFlags & GTF_SPILL) != 0) spillChar = 'S';
+ if ((tree->gtFlags & GTF_SPILL) != 0)
+ {
+ spillChar = 'S';
+ }
if (!hasDest && tree->gtHasReg())
{
// This can be true for the "localDefUse" case - defining a reg, but
// pushing it on the stack
assert(spillChar == ' ');
spillChar = '*';
- hasDest = true;
+ hasDest = true;
}
}
printf("%c N%03u. ", spillChar, tree->gtSeqNum);
LclVarDsc* varDsc = nullptr;
- unsigned varNum = UINT_MAX;
+ unsigned varNum = UINT_MAX;
if (tree->IsLocal())
{
varNum = tree->gtLclVarCommon.gtLclNum;
varDsc = &(compiler->lvaTable[varNum]);
- if (varDsc->lvLRACandidate) hasDest = false;
+ if (varDsc->lvLRACandidate)
+ {
+ hasDest = false;
+ }
}
if (hasDest)
{
- if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) assert(tree->gtHasReg());
+ if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
+ {
+ assert(tree->gtHasReg());
+ }
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf("%-15s =", operandString);
}
@@ -9956,7 +9993,10 @@ LinearScan::lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest)
{
lsraGetOperandString(tree, mode, operandString, operandStringLength);
printf(" V%02u(%s)", varNum, operandString);
- if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED) printf("R");
+ if (mode == LinearScan::LSRA_DUMP_POST && tree->gtFlags & GTF_SPILLED)
+ {
+ printf("R");
+ }
}
}
else
@@ -9967,38 +10007,52 @@ LinearScan::lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest)
else if (tree->OperIsAssignment())
{
assert(!tree->gtHasReg());
- const char * isRev = "";
- if ((tree->gtFlags & GTF_REVERSE_OPS) != 0) isRev = "(Rev)";
+ const char* isRev = "";
+ if ((tree->gtFlags & GTF_REVERSE_OPS) != 0)
+ {
+ isRev = "(Rev)";
+ }
printf(" asg%s%s ", GenTree::NodeName(tree->OperGet()), isRev);
}
else
{
compiler->gtDispNodeName(tree);
- if ((tree->gtFlags & GTF_REVERSE_OPS) != 0) printf("(Rev)");
- if (tree->OperKind() & GTK_LEAF) compiler->gtDispLeaf(tree, 0);
+ if ((tree->gtFlags & GTF_REVERSE_OPS) != 0)
+ {
+ printf("(Rev)");
+ }
+ if (tree->OperKind() & GTK_LEAF)
+ {
+ compiler->gtDispLeaf(tree, nullptr);
+ }
}
}
-GenTreePtr
-popAndPrintLclVarUses(ArrayStack<GenTreePtr>* stack, int* remainingUses)
+GenTreePtr popAndPrintLclVarUses(ArrayStack<GenTreePtr>* stack, int* remainingUses)
{
while (*remainingUses != 0)
{
GenTreePtr nextUseNode = stack->Pop();
(*remainingUses)--;
- if (nextUseNode->IsLocal()) printf(" V%02u");
- else return nextUseNode;
+ if (nextUseNode->IsLocal())
+ {
+ printf(" V%02u");
+ }
+ else
+ {
+ return nextUseNode;
+ }
}
return nullptr;
}
void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
{
- BasicBlock* block;
- ArrayStack<GenTreePtr> stack(compiler, CMK_LSRA);
- LsraLocation currentLoc = 1; // 0 is the entry
- const unsigned operandStringLength = 16;
- char operandString[operandStringLength];
+ BasicBlock* block;
+ ArrayStack<GenTreePtr> stack(compiler, CMK_LSRA);
+ LsraLocation currentLoc = 1; // 0 is the entry
+ const unsigned operandStringLength = 16;
+ char operandString[operandStringLength];
// currentRefPosition is not used for LSRA_DUMP_PRE
// We keep separate iterators for defs, so that we can print them
@@ -10007,39 +10061,44 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
switch (mode)
{
- case LSRA_DUMP_PRE:
- printf("TUPLE STYLE DUMP BEFORE LSRA\n");
- break;
- case LSRA_DUMP_REFPOS:
- printf("TUPLE STYLE DUMP WITH REF POSITIONS\n");
- break;
- case LSRA_DUMP_POST:
- printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n");
- break;
- default:
- printf ("ERROR: INVALID TUPLE DUMP MODE\n");
- return;
+ case LSRA_DUMP_PRE:
+ printf("TUPLE STYLE DUMP BEFORE LSRA\n");
+ break;
+ case LSRA_DUMP_REFPOS:
+ printf("TUPLE STYLE DUMP WITH REF POSITIONS\n");
+ break;
+ case LSRA_DUMP_POST:
+ printf("TUPLE STYLE DUMP WITH REGISTER ASSIGNMENTS\n");
+ break;
+ default:
+ printf("ERROR: INVALID TUPLE DUMP MODE\n");
+ return;
}
if (mode != LSRA_DUMP_PRE)
{
- printf ("Incoming Parameters: ");
- for ( ;
- currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB;
- ++currentRefPosition)
+ printf("Incoming Parameters: ");
+ for (; currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB;
+ ++currentRefPosition)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
printf(" V%02d", interval->varNum);
- if (mode == LSRA_DUMP_POST)
+ if (mode == LSRA_DUMP_POST)
{
regNumber reg;
- if (currentRefPosition->registerAssignment == RBM_NONE) reg = REG_STK;
- else reg = currentRefPosition->assignedReg();
+ if (currentRefPosition->registerAssignment == RBM_NONE)
+ {
+ reg = REG_STK;
+ }
+ else
+ {
+ reg = currentRefPosition->assignedReg();
+ }
LclVarDsc* varDsc = &(compiler->lvaTable[interval->varNum]);
printf("(");
regNumber assignedReg = varDsc->lvRegNum;
- regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->lvArgReg : REG_STK;
+ regNumber argReg = (varDsc->lvIsRegArg) ? varDsc->lvArgReg : REG_STK;
assert(reg == assignedReg || varDsc->lvRegister == false);
if (reg != argReg)
@@ -10053,9 +10112,7 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
printf("\n");
}
- for( block = startBlockSequence();
- block != nullptr;
- block = moveToNextBlock())
+ for (block = startBlockSequence(); block != nullptr; block = moveToNextBlock())
{
currentLoc += 2;
@@ -10063,35 +10120,36 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
{
bool printedBlockHeader = false;
// We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks
- for ( ;
- currentRefPosition != refPositions.end() &&
- (currentRefPosition->refType == RefTypeExpUse ||
- currentRefPosition->refType == RefTypeDummyDef ||
- (currentRefPosition->refType == RefTypeBB && !printedBlockHeader));
- ++currentRefPosition)
- {
- Interval * interval = nullptr;
- if (currentRefPosition->isIntervalRef()) interval = currentRefPosition->getInterval();
+ for (; currentRefPosition != refPositions.end() &&
+ (currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef ||
+ (currentRefPosition->refType == RefTypeBB && !printedBlockHeader));
+ ++currentRefPosition)
+ {
+ Interval* interval = nullptr;
+ if (currentRefPosition->isIntervalRef())
+ {
+ interval = currentRefPosition->getInterval();
+ }
switch (currentRefPosition->refType)
{
- case RefTypeExpUse:
- assert(interval != nullptr);
- assert(interval->isLocalVar);
- printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
- break;
- case RefTypeDummyDef:
- assert(interval != nullptr);
- assert(interval->isLocalVar);
- printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
- break;
- case RefTypeBB:
- block->dspBlockHeader(compiler);
- printedBlockHeader = true;
- printf("=====\n");
- break;
- default:
- printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
- break;
+ case RefTypeExpUse:
+ assert(interval != nullptr);
+ assert(interval->isLocalVar);
+ printf(" Exposed use of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
+ break;
+ case RefTypeDummyDef:
+ assert(interval != nullptr);
+ assert(interval->isLocalVar);
+ printf(" Dummy def of V%02u at #%d\n", interval->varNum, currentRefPosition->rpNum);
+ break;
+ case RefTypeBB:
+ block->dspBlockHeader(compiler);
+ printedBlockHeader = true;
+ printf("=====\n");
+ break;
+ default:
+ printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
+ break;
}
}
}
@@ -10111,22 +10169,25 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
splitBBNumToTargetBBNumMap->Lookup(block->bbNum, &splitEdgeInfo);
assert(splitEdgeInfo.toBBNum <= bbNumMaxBeforeResolution);
assert(splitEdgeInfo.fromBBNum <= bbNumMaxBeforeResolution);
- printf("New block introduced for resolution from BB%02u to BB%02u\n", splitEdgeInfo.fromBBNum, splitEdgeInfo.toBBNum);
+ printf("New block introduced for resolution from BB%02u to BB%02u\n", splitEdgeInfo.fromBBNum,
+ splitEdgeInfo.toBBNum);
}
- for (GenTree *statement = block->FirstNonPhiDef();
- statement;
- statement = statement->gtNext)
+ for (GenTree* statement = block->FirstNonPhiDef(); statement; statement = statement->gtNext)
{
- if ((statement->gtFlags & GTF_STMT_TOP_LEVEL) == 0) continue;
+ if ((statement->gtFlags & GTF_STMT_TOP_LEVEL) == 0)
+ {
+ continue;
+ }
- for (GenTree *tree = statement->gtStmt.gtStmtList;
- tree;
- tree = tree->gtNext, currentLoc += 2)
+ for (GenTree *tree = statement->gtStmt.gtStmtList; tree; tree = tree->gtNext, currentLoc += 2)
{
genTreeOps oper = tree->OperGet();
- if (oper == GT_ARGPLACE) continue;
- TreeNodeInfo &info = tree->gtLsraInfo;
+ if (oper == GT_ARGPLACE)
+ {
+ continue;
+ }
+ TreeNodeInfo& info = tree->gtLsraInfo;
if (tree->gtLsraInfo.isLsraAdded)
{
// This must be one of the nodes that we add during LSRA
@@ -10144,7 +10205,7 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
#ifdef FEATURE_SIMD
else if (oper == GT_SIMD)
{
- if(tree->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicUpperSave)
+ if (tree->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicUpperSave)
{
info.srcCount = 1;
info.dstCount = 1;
@@ -10163,17 +10224,19 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
info.srcCount = 2;
info.dstCount = 0;
}
- info.internalIntCount = 0;
+ info.internalIntCount = 0;
info.internalFloatCount = 0;
}
- int consume = info.srcCount;
- int produce = info.dstCount;
- regMaskTP killMask = RBM_NONE;
+ int consume = info.srcCount;
+ int produce = info.dstCount;
+ regMaskTP killMask = RBM_NONE;
regMaskTP fixedMask = RBM_NONE;
if (tree->OperGet() == GT_LIST)
+ {
continue;
+ }
lsraDispNode(tree, mode, produce != 0 && mode != LSRA_DUMP_REFPOS);
@@ -10189,12 +10252,17 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
stack.ReverseTop(consume);
}
if (consume > 0)
+ {
printf("; ");
+ }
while (consume--)
{
lsraGetOperandString(stack.Pop(), mode, operandString, operandStringLength);
printf("%s", operandString);
- if (consume) printf(",");
+ if (consume)
+ {
+ printf(",");
+ }
}
while (produce--)
{
@@ -10206,51 +10274,55 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
// Print each RefPosition on a new line, but
// printing all the kills for each node on a single line
// and combining the fixed regs with their associated def or use
- bool killPrinted = false;
- RefPosition * lastFixedRegRefPos = nullptr;
- for ( ;
- currentRefPosition != refPositions.end() &&
- (currentRefPosition->refType == RefTypeUse ||
- currentRefPosition->refType == RefTypeFixedReg ||
- currentRefPosition->refType == RefTypeKill ||
- currentRefPosition->refType == RefTypeDef) &&
- (currentRefPosition->nodeLocation == tree->gtSeqNum ||
- currentRefPosition->nodeLocation == tree->gtSeqNum+1);
- ++currentRefPosition)
+ bool killPrinted = false;
+ RefPosition* lastFixedRegRefPos = nullptr;
+ for (;
+ currentRefPosition != refPositions.end() &&
+ (currentRefPosition->refType == RefTypeUse || currentRefPosition->refType == RefTypeFixedReg ||
+ currentRefPosition->refType == RefTypeKill || currentRefPosition->refType == RefTypeDef) &&
+ (currentRefPosition->nodeLocation == tree->gtSeqNum ||
+ currentRefPosition->nodeLocation == tree->gtSeqNum + 1);
+ ++currentRefPosition)
{
- Interval * interval = nullptr;
+ Interval* interval = nullptr;
if (currentRefPosition->isIntervalRef())
{
interval = currentRefPosition->getInterval();
}
switch (currentRefPosition->refType)
{
- case RefTypeUse:
- if (currentRefPosition->isPhysRegRef)
- {
- printf("\n Use:R%d(#%d)", currentRefPosition->getReg()->regNum, currentRefPosition->rpNum);
- }
- else
- {
- assert(interval != nullptr);
- printf("\n Use:");
- interval->microDump();
- printf("(#%d)", currentRefPosition->rpNum);
- if (currentRefPosition->isFixedRegRef)
+ case RefTypeUse:
+ if (currentRefPosition->isPhysRegRef)
{
- assert(genMaxOneBit(currentRefPosition->registerAssignment));
- assert(lastFixedRegRefPos != nullptr);
- printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg(), isFloatRegType(interval->registerType)), lastFixedRegRefPos->rpNum);
- lastFixedRegRefPos = nullptr;
+ printf("\n Use:R%d(#%d)",
+ currentRefPosition->getReg()->regNum, currentRefPosition->rpNum);
}
- if (currentRefPosition->isLocalDefUse)
+ else
{
- printf(" LocalDefUse");
+ assert(interval != nullptr);
+ printf("\n Use:");
+ interval->microDump();
+ printf("(#%d)", currentRefPosition->rpNum);
+ if (currentRefPosition->isFixedRegRef)
+ {
+ assert(genMaxOneBit(currentRefPosition->registerAssignment));
+ assert(lastFixedRegRefPos != nullptr);
+ printf(" Fixed:%s(#%d)", getRegName(currentRefPosition->assignedReg(),
+ isFloatRegType(interval->registerType)),
+ lastFixedRegRefPos->rpNum);
+ lastFixedRegRefPos = nullptr;
+ }
+ if (currentRefPosition->isLocalDefUse)
+ {
+ printf(" LocalDefUse");
+ }
+ if (currentRefPosition->lastUse)
+ {
+ printf(" *");
+ }
}
- if (currentRefPosition->lastUse) printf(" *");
- }
- break;
- case RefTypeDef:
+ break;
+ case RefTypeDef:
{
// Print each def on a new line
assert(interval != nullptr);
@@ -10260,7 +10332,8 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
if (currentRefPosition->isFixedRegRef)
{
assert(genMaxOneBit(currentRefPosition->registerAssignment));
- printf(" %s", getRegName(currentRefPosition->assignedReg(), isFloatRegType(interval->registerType)));
+ printf(" %s", getRegName(currentRefPosition->assignedReg(),
+ isFloatRegType(interval->registerType)));
}
if (currentRefPosition->isLocalDefUse)
{
@@ -10277,21 +10350,22 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
}
}
break;
- case RefTypeKill:
- if (!killPrinted)
- {
- printf ("\n Kill: ");
- killPrinted = true;
- }
- printf (getRegName(currentRefPosition->assignedReg(), isFloatRegType(currentRefPosition->getReg()->registerType)));
- printf (" ");
- break;
- case RefTypeFixedReg:
- lastFixedRegRefPos = currentRefPosition;
- break;
- default:
- printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
- break;
+ case RefTypeKill:
+ if (!killPrinted)
+ {
+ printf("\n Kill: ");
+ killPrinted = true;
+ }
+ printf(getRegName(currentRefPosition->assignedReg(),
+ isFloatRegType(currentRefPosition->getReg()->registerType)));
+ printf(" ");
+ break;
+ case RefTypeFixedReg:
+ lastFixedRegRefPos = currentRefPosition;
+ break;
+ default:
+ printf("Unexpected RefPosition type at #%d\n", currentRefPosition->rpNum);
+ break;
}
}
}
@@ -10306,8 +10380,8 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
else if ((info.getInternalCandidates(this) & allRegs(TYP_INT)) != allRegs(TYP_INT))
{
dumpRegMask(info.getInternalCandidates(this) & allRegs(TYP_INT));
- }
- printf("\n");
+ }
+ printf("\n");
}
if (info.internalFloatCount != 0 && mode != LSRA_DUMP_REFPOS)
{
@@ -10325,366 +10399,376 @@ void LinearScan::TupleStyleDump(LsraTupleDumpMode mode)
}
printf("\n");
}
- if (mode == LSRA_DUMP_POST) dumpOutVarToRegMap(block);
+ if (mode == LSRA_DUMP_POST)
+ {
+ dumpOutVarToRegMap(block);
+ }
printf("\n");
}
printf("\n\n");
}
-void
-LinearScan::dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval, regNumber reg, BasicBlock* currentBlock)
+void LinearScan::dumpLsraAllocationEvent(LsraDumpEvent event,
+ Interval* interval,
+ regNumber reg,
+ BasicBlock* currentBlock)
{
if (!(VERBOSE))
{
return;
}
- switch(event)
+ switch (event)
{
- // Conflicting def/use
- case LSRA_EVENT_DEFUSE_CONFLICT:
- if (!dumpTerse)
- {
- printf(" Def and Use have conflicting register requirements:");
- }
- else
- {
- printf("DUconflict ");
- dumpRegRecords();
- }
- break;
- case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE:
- if (!dumpTerse)
- {
- printf(" Can't change useAssignment ");
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE1:
- if (!dumpTerse)
- {
- printf(" case #1, use the defRegAssignment\n");
- }
- else
- {
- printf(indentFormat, " case #1 use defRegAssignment");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE2:
- if (!dumpTerse)
- {
- printf(" case #2, use the useRegAssignment\n");
- }
- else
- {
- printf(indentFormat, " case #2 use useRegAssignment");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE3:
- if (!dumpTerse)
- {
- printf(" case #3, change the defRegAssignment to the use regs\n");
- }
- else
- {
- printf(indentFormat, " case #3 use useRegAssignment");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE4:
- if (!dumpTerse)
- {
- printf(" case #4, change the useRegAssignment to the def regs\n");
- }
- else
- {
- printf(indentFormat, " case #4 use defRegAssignment");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE5:
- if (!dumpTerse)
- {
- printf(" case #5, Conflicting Def and Use single-register requirements require copies - set def to all regs of the appropriate type\n");
- }
- else
- {
- printf(indentFormat, " case #5 set def to all regs");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_DEFUSE_CASE6:
- if (!dumpTerse)
- {
- printf(" case #6, Conflicting Def and Use register requirements require a copy\n");
- }
- else
- {
- printf(indentFormat, " case #6 need a copy");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
+ // Conflicting def/use
+ case LSRA_EVENT_DEFUSE_CONFLICT:
+ if (!dumpTerse)
+ {
+ printf(" Def and Use have conflicting register requirements:");
+ }
+ else
+ {
+ printf("DUconflict ");
+ dumpRegRecords();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_FIXED_DELAY_USE:
+ if (!dumpTerse)
+ {
+ printf(" Can't change useAssignment ");
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE1:
+ if (!dumpTerse)
+ {
+ printf(" case #1, use the defRegAssignment\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #1 use defRegAssignment");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE2:
+ if (!dumpTerse)
+ {
+ printf(" case #2, use the useRegAssignment\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #2 use useRegAssignment");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE3:
+ if (!dumpTerse)
+ {
+ printf(" case #3, change the defRegAssignment to the use regs\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #3 use useRegAssignment");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE4:
+ if (!dumpTerse)
+ {
+ printf(" case #4, change the useRegAssignment to the def regs\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #4 use defRegAssignment");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE5:
+ if (!dumpTerse)
+ {
+ printf(" case #5, Conflicting Def and Use single-register requirements require copies - set def to all "
+ "regs of the appropriate type\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #5 set def to all regs");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_DEFUSE_CASE6:
+ if (!dumpTerse)
+ {
+ printf(" case #6, Conflicting Def and Use register requirements require a copy\n");
+ }
+ else
+ {
+ printf(indentFormat, " case #6 need a copy");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
- case LSRA_EVENT_SPILL:
- if (!dumpTerse)
- {
- printf("Spilled:\n");
- interval->dump();
- }
- else
- {
- assert(interval != nullptr && interval->assignedReg != nullptr);
- printf("Spill %-4s ", getRegName(interval->assignedReg->regNum));
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_SPILL_EXTENDED_LIFETIME:
- if (!dumpTerse)
- {
- printf(" Spilled extended lifetime var V%02u at last use; not marked for actual spill.", interval->intervalIndex);
- }
- break;
+ case LSRA_EVENT_SPILL:
+ if (!dumpTerse)
+ {
+ printf("Spilled:\n");
+ interval->dump();
+ }
+ else
+ {
+ assert(interval != nullptr && interval->assignedReg != nullptr);
+ printf("Spill %-4s ", getRegName(interval->assignedReg->regNum));
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_SPILL_EXTENDED_LIFETIME:
+ if (!dumpTerse)
+ {
+ printf(" Spilled extended lifetime var V%02u at last use; not marked for actual spill.",
+ interval->intervalIndex);
+ }
+ break;
- // Restoring the previous register
- case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL:
- assert(interval != nullptr);
- if (!dumpTerse)
- {
- printf(" Assign register %s to previous interval Ivl:%d after spill\n", getRegName(reg), interval->intervalIndex);
- }
- else
- {
- // If we spilled, then the dump is already pre-indented, but we need to pre-indent for the subsequent allocation
- // with a dumpEmptyRefPosition().
- printf("SRstr %-4s ", getRegName(reg));
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL:
- assert(interval != nullptr);
- if (!dumpTerse)
- {
- printf(" Assign register %s to previous interval Ivl:%d\n", getRegName(reg), interval->intervalIndex);
- }
- else
- {
- if (activeRefPosition == nullptr)
+ // Restoring the previous register
+ case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL:
+ assert(interval != nullptr);
+ if (!dumpTerse)
{
- printf(emptyRefPositionFormat, "");
+ printf(" Assign register %s to previous interval Ivl:%d after spill\n", getRegName(reg),
+ interval->intervalIndex);
}
- printf("Restr %-4s ", getRegName(reg));
- dumpRegRecords();
- if (activeRefPosition != nullptr)
+ else
{
- printf(emptyRefPositionFormat, "");
+ // If we spilled, then the dump is already pre-indented, but we need to pre-indent for the subsequent
+ // allocation
+ // with a dumpEmptyRefPosition().
+ printf("SRstr %-4s ", getRegName(reg));
+ dumpRegRecords();
+ dumpEmptyRefPosition();
}
- }
- break;
+ break;
+ case LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL:
+ assert(interval != nullptr);
+ if (!dumpTerse)
+ {
+ printf(" Assign register %s to previous interval Ivl:%d\n", getRegName(reg), interval->intervalIndex);
+ }
+ else
+ {
+ if (activeRefPosition == nullptr)
+ {
+ printf(emptyRefPositionFormat, "");
+ }
+ printf("Restr %-4s ", getRegName(reg));
+ dumpRegRecords();
+ if (activeRefPosition != nullptr)
+ {
+ printf(emptyRefPositionFormat, "");
+ }
+ }
+ break;
- // Done with GC Kills
- case LSRA_EVENT_DONE_KILL_GC_REFS:
- printf("DoneKillGC ");
- break;
+ // Done with GC Kills
+ case LSRA_EVENT_DONE_KILL_GC_REFS:
+ printf("DoneKillGC ");
+ break;
- // Block boundaries
- case LSRA_EVENT_START_BB:
- assert(currentBlock != nullptr);
- if (!dumpTerse)
- {
- printf("\n\n Live Vars(Regs) at start of BB%02u (from pred BB%02u):", currentBlock->bbNum, blockInfo[currentBlock->bbNum].predBBNum);
- dumpVarToRegMap(inVarToRegMaps[currentBlock->bbNum]);
- }
- break;
- case LSRA_EVENT_END_BB:
- if (!dumpTerse)
- {
- printf("\n\n Live Vars(Regs) after BB%02u:", currentBlock->bbNum);
- dumpVarToRegMap(outVarToRegMaps[currentBlock->bbNum]);
- }
- break;
+ // Block boundaries
+ case LSRA_EVENT_START_BB:
+ assert(currentBlock != nullptr);
+ if (!dumpTerse)
+ {
+ printf("\n\n Live Vars(Regs) at start of BB%02u (from pred BB%02u):", currentBlock->bbNum,
+ blockInfo[currentBlock->bbNum].predBBNum);
+ dumpVarToRegMap(inVarToRegMaps[currentBlock->bbNum]);
+ }
+ break;
+ case LSRA_EVENT_END_BB:
+ if (!dumpTerse)
+ {
+ printf("\n\n Live Vars(Regs) after BB%02u:", currentBlock->bbNum);
+ dumpVarToRegMap(outVarToRegMaps[currentBlock->bbNum]);
+ }
+ break;
- case LSRA_EVENT_FREE_REGS:
- if (!dumpTerse)
- {
- printf("Freeing registers:\n");
- }
- break;
+ case LSRA_EVENT_FREE_REGS:
+ if (!dumpTerse)
+ {
+ printf("Freeing registers:\n");
+ }
+ break;
- // Characteristics of the current RefPosition
- case LSRA_EVENT_INCREMENT_RANGE_END:
- if (!dumpTerse)
- {
- printf(" Incrementing nextPhysRegLocation for %s\n", getRegName(reg));
- }
- // else ???
- break;
- case LSRA_EVENT_LAST_USE:
- if (!dumpTerse)
- {
- printf(" Last use, marked to be freed\n");
- }
- break;
- case LSRA_EVENT_LAST_USE_DELAYED:
- if (!dumpTerse)
- {
- printf(" Last use, marked to be freed (delayed)\n");
- }
- break;
- case LSRA_EVENT_NEEDS_NEW_REG:
- if (!dumpTerse)
- {
- printf(" Needs new register; mark %s to be freed\n", getRegName(reg));
- }
- else
- {
- printf("Free %-4s ", getRegName(reg));
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
+ // Characteristics of the current RefPosition
+ case LSRA_EVENT_INCREMENT_RANGE_END:
+ if (!dumpTerse)
+ {
+ printf(" Incrementing nextPhysRegLocation for %s\n", getRegName(reg));
+ }
+ // else ???
+ break;
+ case LSRA_EVENT_LAST_USE:
+ if (!dumpTerse)
+ {
+ printf(" Last use, marked to be freed\n");
+ }
+ break;
+ case LSRA_EVENT_LAST_USE_DELAYED:
+ if (!dumpTerse)
+ {
+ printf(" Last use, marked to be freed (delayed)\n");
+ }
+ break;
+ case LSRA_EVENT_NEEDS_NEW_REG:
+ if (!dumpTerse)
+ {
+ printf(" Needs new register; mark %s to be freed\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Free %-4s ", getRegName(reg));
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
- // Allocation decisions
- case LSRA_EVENT_FIXED_REG:
- case LSRA_EVENT_EXP_USE:
- if (!dumpTerse)
- {
- printf("No allocation\n");
- }
- else
- {
- printf("Keep %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_ZERO_REF:
- assert(interval != nullptr && interval->isLocalVar);
- if (!dumpTerse)
- {
- printf("Marking V%02u as last use there are no actual references\n", interval->varNum);
- }
- else
- {
- printf("NoRef ");
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_KEPT_ALLOCATION:
- if (!dumpTerse)
- {
- printf("already allocated %4s\n", getRegName(reg));
- }
- else
- {
- printf("Keep %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_COPY_REG:
- assert(interval != nullptr && interval->recentRefPosition != nullptr);
- if (!dumpTerse)
- {
- printf("allocated %s as copyReg\n\n", getRegName(reg));
- }
- else
- {
- printf("Copy %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_MOVE_REG:
- assert(interval != nullptr && interval->recentRefPosition != nullptr);
- if (!dumpTerse)
- {
- printf(" needs a new register; marked as moveReg\n");
- }
- else
- {
- printf("Move %-4s ", getRegName(reg));
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_ALLOC_REG:
- if (!dumpTerse)
- {
- printf("allocated %s\n", getRegName(reg));
- }
- else
- {
- printf("Alloc %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_REUSE_REG:
- if (!dumpTerse)
- {
- printf("reused constant in %s\n", getRegName(reg));
- }
- else
- {
- printf("Reuse %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_ALLOC_SPILLED_REG:
- if (!dumpTerse)
- {
- printf("allocated spilled register %s\n", getRegName(reg));
- }
- else
- {
- printf("Steal %-4s ", getRegName(reg));
- }
- break;
- case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED:
- assert(interval != nullptr && interval->isLocalVar);
- if (!dumpTerse)
- {
- printf("Not allocating an entry register for V%02u due to low ref count\n", interval->varNum);
- }
- else
- {
- printf("LoRef ");
- }
- break;
- case LSRA_EVENT_NO_REG_ALLOCATED:
- if (!dumpTerse)
- {
- printf("no register allocated\n");
- }
- else
- {
- printf("NoReg ");
- }
- break;
- case LSRA_EVENT_RELOAD:
- if (!dumpTerse)
- {
- printf(" Marked for reload\n");
- }
- else
- {
- printf("ReLod %-4s ", getRegName(reg));
- dumpRegRecords();
- dumpEmptyRefPosition();
- }
- break;
- case LSRA_EVENT_SPECIAL_PUTARG:
- if (!dumpTerse)
- {
- printf(" Special case of putArg - using lclVar that's in the expected reg\n");
- }
- else
- {
- printf("PtArg %-4s ", getRegName(reg));
- }
- break;
- default:
- break;
+ // Allocation decisions
+ case LSRA_EVENT_FIXED_REG:
+ case LSRA_EVENT_EXP_USE:
+ if (!dumpTerse)
+ {
+ printf("No allocation\n");
+ }
+ else
+ {
+ printf("Keep %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_ZERO_REF:
+ assert(interval != nullptr && interval->isLocalVar);
+ if (!dumpTerse)
+ {
+ printf("Marking V%02u as last use there are no actual references\n", interval->varNum);
+ }
+ else
+ {
+ printf("NoRef ");
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_KEPT_ALLOCATION:
+ if (!dumpTerse)
+ {
+ printf("already allocated %4s\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Keep %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_COPY_REG:
+ assert(interval != nullptr && interval->recentRefPosition != nullptr);
+ if (!dumpTerse)
+ {
+ printf("allocated %s as copyReg\n\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Copy %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_MOVE_REG:
+ assert(interval != nullptr && interval->recentRefPosition != nullptr);
+ if (!dumpTerse)
+ {
+ printf(" needs a new register; marked as moveReg\n");
+ }
+ else
+ {
+ printf("Move %-4s ", getRegName(reg));
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_ALLOC_REG:
+ if (!dumpTerse)
+ {
+ printf("allocated %s\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Alloc %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_REUSE_REG:
+ if (!dumpTerse)
+ {
+ printf("reused constant in %s\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Reuse %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_ALLOC_SPILLED_REG:
+ if (!dumpTerse)
+ {
+ printf("allocated spilled register %s\n", getRegName(reg));
+ }
+ else
+ {
+ printf("Steal %-4s ", getRegName(reg));
+ }
+ break;
+ case LSRA_EVENT_NO_ENTRY_REG_ALLOCATED:
+ assert(interval != nullptr && interval->isLocalVar);
+ if (!dumpTerse)
+ {
+ printf("Not allocating an entry register for V%02u due to low ref count\n", interval->varNum);
+ }
+ else
+ {
+ printf("LoRef ");
+ }
+ break;
+ case LSRA_EVENT_NO_REG_ALLOCATED:
+ if (!dumpTerse)
+ {
+ printf("no register allocated\n");
+ }
+ else
+ {
+ printf("NoReg ");
+ }
+ break;
+ case LSRA_EVENT_RELOAD:
+ if (!dumpTerse)
+ {
+ printf(" Marked for reload\n");
+ }
+ else
+ {
+ printf("ReLod %-4s ", getRegName(reg));
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ }
+ break;
+ case LSRA_EVENT_SPECIAL_PUTARG:
+ if (!dumpTerse)
+ {
+ printf(" Special case of putArg - using lclVar that's in the expected reg\n");
+ }
+ else
+ {
+ printf("PtArg %-4s ", getRegName(reg));
+ }
+ break;
+ default:
+ break;
}
}
@@ -10707,9 +10791,8 @@ LinearScan::dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval, reg
// intervals, as Vnn for lclVar intervals, or as I<num> for other intervals.
// The table is indented by the amount needed for dumpRefPositionShort, which is
// captured in shortRefPositionDumpWidth.
-//
-void
-LinearScan::dumpRegRecordHeader()
+//
+void LinearScan::dumpRegRecordHeader()
{
printf("The following table has one or more rows for each RefPosition that is handled during allocation.\n"
"The first column provides the basic information about the RefPosition, with its type (e.g. Def,\n"
@@ -10743,15 +10826,16 @@ LinearScan::dumpRegRecordHeader()
// l is either '*' (if a last use) or ' ' (otherwise)
// d is either 'D' (if a delayed use) or ' ' (otherwise)
- maxNodeLocation = (maxNodeLocation == 0) ? 1: maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes
+ maxNodeLocation = (maxNodeLocation == 0)
+ ? 1
+ : maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes
assert(maxNodeLocation >= 1);
assert(refPositions.size() >= 1);
- int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1;
- int refPositionWidth = (int)log10((double)refPositions.size()) + 1;
- int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */;
- int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */;
- int shortRefPositionDumpWidth = locationAndRPNumWidth +
- regColumnWidth + 1 /* space */ + refTypeInfoWidth;
+ int nodeLocationWidth = (int)log10((double)maxNodeLocation) + 1;
+ int refPositionWidth = (int)log10((double)refPositions.size()) + 1;
+ int refTypeInfoWidth = 4 /*TYPE*/ + 2 /* last-use and delayed */ + 1 /* space */;
+ int locationAndRPNumWidth = nodeLocationWidth + 2 /* .# */ + refPositionWidth + 1 /* space */;
+ int shortRefPositionDumpWidth = locationAndRPNumWidth + regColumnWidth + 1 /* space */ + refTypeInfoWidth;
sprintf_s(shortRefPositionFormat, MAX_FORMAT_CHARS, "%%%dd.#%%-%dd ", nodeLocationWidth, refPositionWidth);
sprintf_s(emptyRefPositionFormat, MAX_FORMAT_CHARS, "%%-%ds", shortRefPositionDumpWidth);
@@ -10770,7 +10854,7 @@ LinearScan::dumpRegRecordHeader()
// BBnn printed left-justified in the NAME Typeld and allocationInfo space.
int bbDumpWidth = regColumnWidth + 1 + refTypeInfoWidth + allocationInfoWidth;
- int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1;
+ int bbNumWidth = (int)log10((double)compiler->fgBBNumMax) + 1;
// In the unlikely event that BB numbers overflow the space, we'll simply omit the predBB
int predBBNumDumpSpace = regTableIndent - locationAndRPNumWidth - bbNumWidth - 9; // 'BB' + ' PredBB'
if (predBBNumDumpSpace < bbNumWidth)
@@ -10782,39 +10866,38 @@ LinearScan::dumpRegRecordHeader()
sprintf_s(bbRefPosFormat, MAX_LEGEND_FORMAT_CHARS, "BB%%-%dd PredBB%%-%dd", bbNumWidth, predBBNumDumpSpace);
}
-
if (compiler->shouldDumpASCIITrees())
{
columnSeparator = "|";
- line = "-";
- leftBox = "+";
- middleBox = "+";
- rightBox = "+";
+ line = "-";
+ leftBox = "+";
+ middleBox = "+";
+ rightBox = "+";
}
else
{
columnSeparator = "\xe2\x94\x82";
- line = "\xe2\x94\x80";
- leftBox = "\xe2\x94\x9c";
- middleBox = "\xe2\x94\xbc";
- rightBox = "\xe2\x94\xa4";
+ line = "\xe2\x94\x80";
+ leftBox = "\xe2\x94\x9c";
+ middleBox = "\xe2\x94\xbc";
+ rightBox = "\xe2\x94\xa4";
}
sprintf_s(indentFormat, MAX_FORMAT_CHARS, "%%-%ds", regTableIndent);
// Now, set up the legend format for the RefPosition info
- sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth+1, nodeLocationWidth+1, refPositionWidth+2, refPositionWidth+2, regColumnWidth+1);
+ sprintf_s(legendFormat, MAX_LEGEND_FORMAT_CHARS, "%%-%d.%ds%%-%d.%ds%%-%ds%%s", nodeLocationWidth + 1,
+ nodeLocationWidth + 1, refPositionWidth + 2, refPositionWidth + 2, regColumnWidth + 1);
// Finally, print a "title row" including the legend and the reg names
dumpRegRecordTitle();
}
-int
-LinearScan::getLastUsedRegNumIndex()
+int LinearScan::getLastUsedRegNumIndex()
{
- int lastUsedRegNumIndex = 0;
- regMaskTP usedRegsMask = compiler->codeGen->regSet.rsGetModifiedRegsMask();
- int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST;
- for( int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++)
+ int lastUsedRegNumIndex = 0;
+ regMaskTP usedRegsMask = compiler->codeGen->regSet.rsGetModifiedRegsMask();
+ int lastRegNumIndex = compiler->compFloatingPointUsed ? REG_FP_LAST : REG_INT_LAST;
+ for (int regNumIndex = 0; regNumIndex <= lastRegNumIndex; regNumIndex++)
{
if ((usedRegsMask & genRegMask((regNumber)regNumIndex)) != 0)
{
@@ -10824,15 +10907,14 @@ LinearScan::getLastUsedRegNumIndex()
return lastUsedRegNumIndex;
}
-void
-LinearScan::dumpRegRecordTitleLines()
+void LinearScan::dumpRegRecordTitleLines()
{
- for( int i = 0; i < regTableIndent; i++)
+ for (int i = 0; i < regTableIndent; i++)
{
printf("%s", line);
}
int lastUsedRegNumIndex = getLastUsedRegNumIndex();
- for( int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
+ for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
printf("%s", middleBox);
for (int i = 0; i < regColumnWidth; i++)
@@ -10842,8 +10924,7 @@ LinearScan::dumpRegRecordTitleLines()
}
printf("%s\n", rightBox);
}
-void
-LinearScan::dumpRegRecordTitle()
+void LinearScan::dumpRegRecordTitle()
{
dumpRegRecordTitleLines();
@@ -10854,9 +10935,9 @@ LinearScan::dumpRegRecordTitle()
char columnFormatArray[MAX_FORMAT_CHARS];
sprintf_s(columnFormatArray, MAX_FORMAT_CHARS, "%s%%-%d.%ds", columnSeparator, regColumnWidth, regColumnWidth);
int lastUsedRegNumIndex = getLastUsedRegNumIndex();
- for( int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
+ for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
- regNumber regNum = (regNumber)regNumIndex;
+ regNumber regNum = (regNumber)regNumIndex;
const char* regName = getRegName(regNum);
printf(columnFormatArray, regName);
}
@@ -10867,18 +10948,17 @@ LinearScan::dumpRegRecordTitle()
dumpRegRecordTitleLines();
}
-void
-LinearScan::dumpRegRecords()
+void LinearScan::dumpRegRecords()
{
static char columnFormatArray[18];
- int lastUsedRegNumIndex = getLastUsedRegNumIndex();
- regMaskTP usedRegsMask = compiler->codeGen->regSet.rsGetModifiedRegsMask();
-
- for( int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
+ int lastUsedRegNumIndex = getLastUsedRegNumIndex();
+ regMaskTP usedRegsMask = compiler->codeGen->regSet.rsGetModifiedRegsMask();
+
+ for (int regNumIndex = 0; regNumIndex <= lastUsedRegNumIndex; regNumIndex++)
{
printf("%s", columnSeparator);
RegRecord& regRecord = physRegs[regNumIndex];
- Interval* interval = regRecord.assignedInterval;
+ Interval* interval = regRecord.assignedInterval;
if (interval != nullptr)
{
dumpIntervalName(interval);
@@ -10909,8 +10989,7 @@ LinearScan::dumpRegRecords()
rowCountSinceLastTitle++;
}
-void
-LinearScan::dumpIntervalName(Interval* interval)
+void LinearScan::dumpIntervalName(Interval* interval)
{
char intervalChar;
if (interval->isLocalVar)
@@ -10928,16 +11007,14 @@ LinearScan::dumpIntervalName(Interval* interval)
printf(intervalNameFormat, intervalChar, interval->intervalIndex);
}
-void
-LinearScan::dumpEmptyRefPosition()
+void LinearScan::dumpEmptyRefPosition()
{
printf(emptyRefPositionFormat, "");
}
// Note that the size of this dump is computed in dumpRegRecordHeader().
//
-void
-LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock)
+void LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBlock)
{
BasicBlock* block = currentBlock;
if (refPosition->refType == RefTypeBB)
@@ -11005,8 +11082,7 @@ LinearScan::dumpRefPositionShort(RefPosition* refPosition, BasicBlock* currentBl
//
// Notes:
// If verbose is set, this will also dump a table of the final allocations.
-void
-LinearScan::verifyFinalAllocation()
+void LinearScan::verifyFinalAllocation()
{
if (VERBOSE)
{
@@ -11016,34 +11092,35 @@ LinearScan::verifyFinalAllocation()
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
+ RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
for (auto& interval : intervals)
{
interval.assignedReg = nullptr;
- interval.physReg = REG_NA;
+ interval.physReg = REG_NA;
}
DBEXEC(VERBOSE, dumpRegRecordTitle());
- BasicBlock* currentBlock = nullptr;
+ BasicBlock* currentBlock = nullptr;
GenTreeStmt* firstBlockEndResolutionStmt = nullptr;
- regMaskTP regsToFree = RBM_NONE;
- regMaskTP delayRegsToFree = RBM_NONE;
- LsraLocation currentLocation = MinLocation;
+ regMaskTP regsToFree = RBM_NONE;
+ regMaskTP delayRegsToFree = RBM_NONE;
+ LsraLocation currentLocation = MinLocation;
for (auto& refPosition : refPositions)
{
RefPosition* currentRefPosition = &refPosition;
- Interval* interval = nullptr;
- RegRecord* regRecord = nullptr;
- regNumber regNum = REG_NA;
+ Interval* interval = nullptr;
+ RegRecord* regRecord = nullptr;
+ regNumber regNum = REG_NA;
if (currentRefPosition->refType == RefTypeBB)
{
regsToFree |= delayRegsToFree;
delayRegsToFree = RBM_NONE;
- // For BB RefPositions, wait until we dump the "end of block" info before dumping the basic RefPosition info.
+ // For BB RefPositions, wait until we dump the "end of block" info before dumping the basic RefPosition
+ // info.
}
else
{
@@ -11052,23 +11129,24 @@ LinearScan::verifyFinalAllocation()
if (currentRefPosition->isPhysRegRef)
{
- regRecord = currentRefPosition->getReg();
+ regRecord = currentRefPosition->getReg();
regRecord->recentRefPosition = currentRefPosition;
- regNum = regRecord->regNum;
+ regNum = regRecord->regNum;
}
else if (currentRefPosition->isIntervalRef())
{
- interval = currentRefPosition->getInterval();
+ interval = currentRefPosition->getInterval();
interval->recentRefPosition = currentRefPosition;
if (currentRefPosition->registerAssignment != RBM_NONE)
{
if (!genMaxOneBit(currentRefPosition->registerAssignment))
{
- assert(currentRefPosition->refType == RefTypeExpUse || currentRefPosition->refType == RefTypeDummyDef);
+ assert(currentRefPosition->refType == RefTypeExpUse ||
+ currentRefPosition->refType == RefTypeDummyDef);
}
else
{
- regNum = currentRefPosition->assignedReg();
+ regNum = currentRefPosition->assignedReg();
regRecord = getRegisterRecord(regNum);
}
}
@@ -11086,18 +11164,18 @@ LinearScan::verifyFinalAllocation()
regMaskTP regMask = genRegMask(reg);
if ((regsToFree & regMask) != RBM_NONE)
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
+ RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
}
- regsToFree= delayRegsToFree;
+ regsToFree = delayRegsToFree;
regsToFree = RBM_NONE;
}
currentLocation = newLocation;
- switch(currentRefPosition->refType)
+ switch (currentRefPosition->refType)
{
- case RefTypeBB:
+ case RefTypeBB:
{
if (currentBlock == nullptr)
{
@@ -11116,19 +11194,19 @@ LinearScan::verifyFinalAllocation()
VARSET_ITER_INIT(compiler, iter, currentBlock->bbLiveOut, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber regNum = getVarReg(outVarToRegMap, varNum);
- interval = getIntervalForLocalVar(varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber regNum = getVarReg(outVarToRegMap, varNum);
+ interval = getIntervalForLocalVar(varNum);
assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK));
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
interval->assignedReg = nullptr;
- interval->isActive = false;
+ interval->isActive = false;
}
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
+ RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
@@ -11142,12 +11220,12 @@ LinearScan::verifyFinalAllocation()
VARSET_ITER_INIT(compiler, iter, currentBlock->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber regNum = getVarReg(inVarToRegMap, varNum);
- interval = getIntervalForLocalVar(varNum);
- interval->physReg = regNum;
- interval->assignedReg = &(physRegs[regNum]);
- interval->isActive = true;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber regNum = getVarReg(inVarToRegMap, varNum);
+ interval = getIntervalForLocalVar(varNum);
+ interval->physReg = regNum;
+ interval->assignedReg = &(physRegs[regNum]);
+ interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
@@ -11163,8 +11241,7 @@ LinearScan::verifyFinalAllocation()
if (currentBlock != nullptr)
{
for (GenTreeStmt* stmt = currentBlock->FirstNonPhiDef();
- stmt != nullptr && firstBlockEndResolutionStmt == nullptr;
- stmt = stmt->getNextStmt())
+ stmt != nullptr && firstBlockEndResolutionStmt == nullptr; stmt = stmt->getNextStmt())
{
if (stmt->gtStmtExpr->gtLsraInfo.isLsraAdded
#ifdef FEATURE_SIMD
@@ -11192,134 +11269,132 @@ LinearScan::verifyFinalAllocation()
break;
- case RefTypeKill:
- assert(regRecord != nullptr);
- assert(regRecord->assignedInterval == nullptr);
- dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
- break;
- case RefTypeFixedReg:
- assert(regRecord != nullptr);
- dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
- break;
+ case RefTypeKill:
+ assert(regRecord != nullptr);
+ assert(regRecord->assignedInterval == nullptr);
+ dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
+ break;
+ case RefTypeFixedReg:
+ assert(regRecord != nullptr);
+ dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
+ break;
- case RefTypeUpperVectorSaveDef:
- case RefTypeUpperVectorSaveUse:
- case RefTypeDef:
- case RefTypeUse:
- case RefTypeParamDef:
- case RefTypeZeroInit:
- assert(interval != nullptr);
+ case RefTypeUpperVectorSaveDef:
+ case RefTypeUpperVectorSaveUse:
+ case RefTypeDef:
+ case RefTypeUse:
+ case RefTypeParamDef:
+ case RefTypeZeroInit:
+ assert(interval != nullptr);
- if (interval->isSpecialPutArg)
- {
- dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum);
- break;
- }
- if (currentRefPosition->reload)
- {
- interval->isActive = true;
- assert(regNum != REG_NA);
- interval->physReg = regNum;
- interval->assignedReg = regRecord;
- regRecord->assignedInterval = interval;
- dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock);
- }
- if (regNum == REG_NA)
- {
- dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval);
- }
- else if (RefTypeIsDef(currentRefPosition->refType))
- {
- interval->isActive = true;
- if (VERBOSE)
+ if (interval->isSpecialPutArg)
{
- if (interval->isConstant &&
- (currentRefPosition->treeNode != nullptr) &&
- currentRefPosition->treeNode->IsReuseRegVal())
- {
- dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock);
- }
- else
- {
- dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock);
- }
+ dumpLsraAllocationEvent(LSRA_EVENT_SPECIAL_PUTARG, interval, regNum);
+ break;
}
- }
- else if (currentRefPosition->copyReg)
- {
- dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock);
- }
- else if (currentRefPosition->moveReg)
- {
- assert(interval->assignedReg != nullptr);
- interval->assignedReg->assignedInterval = nullptr;
- interval->physReg = regNum;
- interval->assignedReg = regRecord;
- regRecord->assignedInterval = interval;
- if (VERBOSE)
+ if (currentRefPosition->reload)
{
- printf("Move %-4s ", getRegName(regRecord->regNum));
+ interval->isActive = true;
+ assert(regNum != REG_NA);
+ interval->physReg = regNum;
+ interval->assignedReg = regRecord;
+ regRecord->assignedInterval = interval;
+ dumpLsraAllocationEvent(LSRA_EVENT_RELOAD, nullptr, regRecord->regNum, currentBlock);
}
- }
- else
- {
- dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
- }
- if (currentRefPosition->lastUse || currentRefPosition->spillAfter)
- {
- interval->isActive = false;
- }
- if (regNum != REG_NA)
- {
- if (currentRefPosition->spillAfter)
+ if (regNum == REG_NA)
+ {
+ dumpLsraAllocationEvent(LSRA_EVENT_NO_REG_ALLOCATED, interval);
+ }
+ else if (RefTypeIsDef(currentRefPosition->refType))
{
+ interval->isActive = true;
if (VERBOSE)
{
- dumpRegRecords();
- dumpEmptyRefPosition();
- printf("Spill %-4s ", getRegName(regNum));
+ if (interval->isConstant && (currentRefPosition->treeNode != nullptr) &&
+ currentRefPosition->treeNode->IsReuseRegVal())
+ {
+ dumpLsraAllocationEvent(LSRA_EVENT_REUSE_REG, nullptr, regRecord->regNum, currentBlock);
+ }
+ else
+ {
+ dumpLsraAllocationEvent(LSRA_EVENT_ALLOC_REG, nullptr, regRecord->regNum, currentBlock);
+ }
}
}
else if (currentRefPosition->copyReg)
{
- regRecord->assignedInterval = interval;
+ dumpLsraAllocationEvent(LSRA_EVENT_COPY_REG, interval, regRecord->regNum, currentBlock);
+ }
+ else if (currentRefPosition->moveReg)
+ {
+ assert(interval->assignedReg != nullptr);
+ interval->assignedReg->assignedInterval = nullptr;
+ interval->physReg = regNum;
+ interval->assignedReg = regRecord;
+ regRecord->assignedInterval = interval;
+ if (VERBOSE)
+ {
+ printf("Move %-4s ", getRegName(regRecord->regNum));
+ }
}
else
{
- interval->physReg = regNum;
- interval->assignedReg = regRecord;
- regRecord->assignedInterval = interval;
+ dumpLsraAllocationEvent(LSRA_EVENT_KEPT_ALLOCATION, nullptr, regRecord->regNum, currentBlock);
}
- }
- break;
- case RefTypeKillGCRefs:
- // No action to take.
- // However, we will assert that, at resolution time, no registers contain GC refs.
- {
- DBEXEC(VERBOSE, printf(" "));
- regMaskTP candidateRegs = currentRefPosition->registerAssignment;
- while (candidateRegs != RBM_NONE)
+ if (currentRefPosition->lastUse || currentRefPosition->spillAfter)
{
- regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
- candidateRegs &= ~nextRegBit;
- regNumber nextReg = genRegNumFromMask(nextRegBit);
- RegRecord* regRecord = getRegisterRecord(nextReg);
- Interval* assignedInterval = regRecord->assignedInterval;
- assert (assignedInterval == nullptr ||
- !varTypeIsGC(assignedInterval->registerType));
+ interval->isActive = false;
}
- }
- break;
+ if (regNum != REG_NA)
+ {
+ if (currentRefPosition->spillAfter)
+ {
+ if (VERBOSE)
+ {
+ dumpRegRecords();
+ dumpEmptyRefPosition();
+ printf("Spill %-4s ", getRegName(regNum));
+ }
+ }
+ else if (currentRefPosition->copyReg)
+ {
+ regRecord->assignedInterval = interval;
+ }
+ else
+ {
+ interval->physReg = regNum;
+ interval->assignedReg = regRecord;
+ regRecord->assignedInterval = interval;
+ }
+ }
+ break;
+ case RefTypeKillGCRefs:
+ // No action to take.
+ // However, we will assert that, at resolution time, no registers contain GC refs.
+ {
+ DBEXEC(VERBOSE, printf(" "));
+ regMaskTP candidateRegs = currentRefPosition->registerAssignment;
+ while (candidateRegs != RBM_NONE)
+ {
+ regMaskTP nextRegBit = genFindLowestBit(candidateRegs);
+ candidateRegs &= ~nextRegBit;
+ regNumber nextReg = genRegNumFromMask(nextRegBit);
+ RegRecord* regRecord = getRegisterRecord(nextReg);
+ Interval* assignedInterval = regRecord->assignedInterval;
+ assert(assignedInterval == nullptr || !varTypeIsGC(assignedInterval->registerType));
+ }
+ }
+ break;
- case RefTypeExpUse:
- case RefTypeDummyDef:
- // Do nothing; these will be handled by the RefTypeBB.
- DBEXEC(VERBOSE, printf(" "));
- break;
+ case RefTypeExpUse:
+ case RefTypeDummyDef:
+ // Do nothing; these will be handled by the RefTypeBB.
+ DBEXEC(VERBOSE, printf(" "));
+ break;
- case RefTypeInvalid:
- // for these 'currentRefPosition->refType' values, No action to take
- break;
+ case RefTypeInvalid:
+ // for these 'currentRefPosition->refType' values, No action to take
+ break;
}
if (currentRefPosition->refType != RefTypeBB)
@@ -11336,7 +11411,7 @@ LinearScan::verifyFinalAllocation()
}
if (currentRefPosition->spillAfter || currentRefPosition->lastUse)
{
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
interval->assignedReg = nullptr;
// regRegcord could be null if RefPosition is to be allocated a
@@ -11373,7 +11448,7 @@ LinearScan::verifyFinalAllocation()
// Clear register assignments.
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
- RegRecord * physRegRecord = getRegisterRecord(reg);
+ RegRecord* physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
@@ -11382,19 +11457,17 @@ LinearScan::verifyFinalAllocation()
VARSET_ITER_INIT(compiler, iter, currentBlock->bbLiveIn, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber regNum = getVarReg(inVarToRegMap, varNum);
- Interval* interval = getIntervalForLocalVar(varNum);
- interval->physReg = regNum;
- interval->assignedReg = &(physRegs[regNum]);
- interval->isActive = true;
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber regNum = getVarReg(inVarToRegMap, varNum);
+ Interval* interval = getIntervalForLocalVar(varNum);
+ interval->physReg = regNum;
+ interval->assignedReg = &(physRegs[regNum]);
+ interval->isActive = true;
physRegs[regNum].assignedInterval = interval;
}
// Verify the moves in this block
- for (GenTreeStmt* stmt = currentBlock->FirstNonPhiDef();
- stmt != nullptr;
- stmt = stmt->getNextStmt())
+ for (GenTreeStmt* stmt = currentBlock->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->getNextStmt())
{
assert(stmt->gtStmtExpr->gtLsraInfo.isLsraAdded);
verifyResolutionMove(stmt, currentLocation);
@@ -11406,13 +11479,13 @@ LinearScan::verifyFinalAllocation()
VARSET_ITER_INIT(compiler, iter, currentBlock->bbLiveOut, varIndex);
while (iter.NextElem(compiler, &varIndex))
{
- unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
- regNumber regNum = getVarReg(outVarToRegMap, varNum);
- Interval* interval = getIntervalForLocalVar(varNum);
+ unsigned varNum = compiler->lvaTrackedToVarNum[varIndex];
+ regNumber regNum = getVarReg(outVarToRegMap, varNum);
+ Interval* interval = getIntervalForLocalVar(varNum);
assert(interval->physReg == regNum || (interval->physReg == REG_NA && regNum == REG_STK));
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
interval->assignedReg = nullptr;
- interval->isActive = false;
+ interval->isActive = false;
}
}
}
@@ -11433,23 +11506,22 @@ LinearScan::verifyFinalAllocation()
//
// Notes:
// If verbose is set, this will also dump the moves into the table of final allocations.
-void
-LinearScan::verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation currentLocation)
+void LinearScan::verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation currentLocation)
{
GenTree* dst = resolutionStmt->gtStmtExpr;
if (dst->OperGet() == GT_SWAP)
{
- GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon();
- GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon();
- regNumber leftRegNum = left->gtRegNum;
- regNumber rightRegNum = right->gtRegNum;
- Interval* leftInterval = getIntervalForLocalVar(left->gtLclNum);
- Interval* rightInterval = getIntervalForLocalVar(right->gtLclNum);
+ GenTreeLclVarCommon* left = dst->gtGetOp1()->AsLclVarCommon();
+ GenTreeLclVarCommon* right = dst->gtGetOp2()->AsLclVarCommon();
+ regNumber leftRegNum = left->gtRegNum;
+ regNumber rightRegNum = right->gtRegNum;
+ Interval* leftInterval = getIntervalForLocalVar(left->gtLclNum);
+ Interval* rightInterval = getIntervalForLocalVar(right->gtLclNum);
assert(leftInterval->physReg == leftRegNum && rightInterval->physReg == rightRegNum);
- leftInterval->physReg = rightRegNum;
- rightInterval->physReg = leftRegNum;
+ leftInterval->physReg = rightRegNum;
+ rightInterval->physReg = leftRegNum;
physRegs[rightRegNum].assignedInterval = leftInterval;
- physRegs[leftRegNum].assignedInterval = rightInterval;
+ physRegs[leftRegNum].assignedInterval = rightInterval;
if (VERBOSE)
{
printf(shortRefPositionFormat, currentLocation, 0);
@@ -11465,12 +11537,12 @@ LinearScan::verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation curre
}
return;
}
- regNumber dstRegNum = dst->gtRegNum;
- regNumber srcRegNum;
+ regNumber dstRegNum = dst->gtRegNum;
+ regNumber srcRegNum;
GenTreeLclVarCommon* lcl;
if (dst->OperGet() == GT_COPY)
{
- lcl = dst->gtGetOp1()->AsLclVarCommon();
+ lcl = dst->gtGetOp1()->AsLclVarCommon();
srcRegNum = lcl->gtRegNum;
}
else
@@ -11495,16 +11567,16 @@ LinearScan::verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation curre
}
if (dstRegNum != REG_STK)
{
- interval->physReg = dstRegNum;
- interval->assignedReg = &(physRegs[dstRegNum]);
+ interval->physReg = dstRegNum;
+ interval->assignedReg = &(physRegs[dstRegNum]);
physRegs[dstRegNum].assignedInterval = interval;
- interval->isActive = true;
+ interval->isActive = true;
}
else
{
- interval->physReg = REG_NA;
+ interval->physReg = REG_NA;
interval->assignedReg = nullptr;
- interval->isActive = false;
+ interval->isActive = false;
}
if (VERBOSE)
{
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index 7832f5ae45..05d6ecf16d 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -16,7 +16,7 @@ class RefPosition;
class LinearScan;
class RegRecord;
-template<class T>
+template <class T>
class ArrayStack;
// LsraLocation tracks the linearized order of the nodes.
@@ -24,54 +24,55 @@ class ArrayStack;
// def, and a second location for the last def (if any)
typedef unsigned int LsraLocation;
-const unsigned int MinLocation = 0;
-const unsigned int MaxLocation = UINT_MAX;
+const unsigned int MinLocation = 0;
+const unsigned int MaxLocation = UINT_MAX;
// max number of registers an operation could require internally (in addition to uses and defs)
const unsigned int MaxInternalRegisters = 8;
-const unsigned int RegisterTypeCount = 2;
+const unsigned int RegisterTypeCount = 2;
typedef var_types RegisterType;
#define IntRegisterType TYP_INT
#define FloatRegisterType TYP_FLOAT
-inline
-regMaskTP calleeSaveRegs(RegisterType rt)
+inline regMaskTP calleeSaveRegs(RegisterType rt)
{
return varTypeIsIntegralOrI(rt) ? RBM_INT_CALLEE_SAVED : RBM_FLT_CALLEE_SAVED;
}
struct LocationInfo
{
- LsraLocation loc;
-
+ LsraLocation loc;
+
// Reg Index in case of multi-reg result producing call node.
// Indicates the position of the register that this location refers to.
// The max bits needed is based on max value of MAX_RET_REG_COUNT value
// across all targets and that happens 4 on on Arm. Hence index value
- // would be 0..MAX_RET_REG_COUNT-1.
- unsigned multiRegIdx : 2;
+ // would be 0..MAX_RET_REG_COUNT-1.
+ unsigned multiRegIdx : 2;
- Interval* interval;
- GenTree* treeNode;
+ Interval* interval;
+ GenTree* treeNode;
LocationInfo(LsraLocation l, Interval* i, GenTree* t, unsigned regIdx = 0)
- : loc(l), multiRegIdx(regIdx), interval(i), treeNode(t)
+ : loc(l), multiRegIdx(regIdx), interval(i), treeNode(t)
{
assert(multiRegIdx == regIdx);
}
// default constructor for data structures
- LocationInfo() {}
+ LocationInfo()
+ {
+ }
};
struct LsraBlockInfo
{
// bbNum of the predecessor to use for the register location of live-in variables.
// 0 for fgFirstBB.
- BasicBlock::weight_t weight;
- unsigned int predBBNum;
- bool hasCriticalInEdge;
- bool hasCriticalOutEdge;
+ BasicBlock::weight_t weight;
+ unsigned int predBBNum;
+ bool hasCriticalInEdge;
+ bool hasCriticalOutEdge;
};
// This is sort of a bit mask
@@ -86,17 +87,22 @@ enum RefType : unsigned char
// position in a block (for resolution)
enum BlockStartOrEnd
{
- BlockPositionStart=0, BlockPositionEnd=1, PositionCount=2
+ BlockPositionStart = 0,
+ BlockPositionEnd = 1,
+ PositionCount = 2
};
+inline bool RefTypeIsUse(RefType refType)
+{
+ return ((refType & RefTypeUse) == RefTypeUse);
+}
-inline bool
-RefTypeIsUse(RefType refType) { return ((refType & RefTypeUse) == RefTypeUse); }
-
-inline bool
-RefTypeIsDef(RefType refType) { return ((refType & RefTypeDef) == RefTypeDef); }
+inline bool RefTypeIsDef(RefType refType)
+{
+ return ((refType & RefTypeDef) == RefTypeDef);
+}
-typedef regNumber * VarToRegMap;
+typedef regNumber* VarToRegMap;
template <typename ElementType, CompMemKind MemKind>
class ListElementAllocator
@@ -108,14 +114,12 @@ private:
Compiler* m_compiler;
public:
- ListElementAllocator(Compiler* compiler)
- : m_compiler(compiler)
+ ListElementAllocator(Compiler* compiler) : m_compiler(compiler)
{
}
template <typename U>
- ListElementAllocator(const ListElementAllocator<U, MemKind>& other)
- : m_compiler(other.m_compiler)
+ ListElementAllocator(const ListElementAllocator<U, MemKind>& other) : m_compiler(other.m_compiler)
{
}
@@ -135,10 +139,10 @@ public:
};
};
-typedef ListElementAllocator<Interval, CMK_LSRA_Interval> LinearScanMemoryAllocatorInterval;
+typedef ListElementAllocator<Interval, CMK_LSRA_Interval> LinearScanMemoryAllocatorInterval;
typedef ListElementAllocator<RefPosition, CMK_LSRA_RefPosition> LinearScanMemoryAllocatorRefPosition;
-typedef jitstd::list<Interval, LinearScanMemoryAllocatorInterval> IntervalList;
+typedef jitstd::list<Interval, LinearScanMemoryAllocatorInterval> IntervalList;
typedef jitstd::list<RefPosition, LinearScanMemoryAllocatorRefPosition> RefPositionList;
class Referenceable
@@ -156,18 +160,17 @@ public:
// direction, and are not moved, so they don't need to be doubly linked
// (see RefPosition).
- RefPosition * firstRefPosition;
- RefPosition * recentRefPosition;
- RefPosition * lastRefPosition;
+ RefPosition* firstRefPosition;
+ RefPosition* recentRefPosition;
+ RefPosition* lastRefPosition;
- bool isActive;
+ bool isActive;
// Get the position of the next reference which is at or greater than
// the current location (relies upon recentRefPosition being udpated
// during traversal).
- RefPosition * getNextRefPosition();
- LsraLocation getNextRefLocation();
-
+ RefPosition* getNextRefPosition();
+ LsraLocation getNextRefLocation();
};
class RegRecord : public Referenceable
@@ -175,40 +178,38 @@ class RegRecord : public Referenceable
public:
RegRecord()
{
- assignedInterval = nullptr;
- previousInterval = nullptr;
- regNum = REG_NA;
- isCalleeSave = false;
- registerType = IntRegisterType;
+ assignedInterval = nullptr;
+ previousInterval = nullptr;
+ regNum = REG_NA;
+ isCalleeSave = false;
+ registerType = IntRegisterType;
isBusyUntilNextKill = false;
}
-void
-init(regNumber reg)
-{
-#ifdef _TARGET_ARM64_
- // The Zero register, or the SP
- if ((reg == REG_ZR) || (reg == REG_SP))
+ void init(regNumber reg)
{
- // IsGeneralRegister returns false for REG_ZR and REG_SP
- regNum = reg;
- registerType = IntRegisterType;
- }
- else
+#ifdef _TARGET_ARM64_
+ // The Zero register, or the SP
+ if ((reg == REG_ZR) || (reg == REG_SP))
+ {
+ // IsGeneralRegister returns false for REG_ZR and REG_SP
+ regNum = reg;
+ registerType = IntRegisterType;
+ }
+ else
#endif
- if (emitter::isFloatReg(reg))
- {
- registerType = FloatRegisterType;
- }
- else
- {
- // The constructor defaults to IntRegisterType
- assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType);
+ if (emitter::isFloatReg(reg))
+ {
+ registerType = FloatRegisterType;
+ }
+ else
+ {
+ // The constructor defaults to IntRegisterType
+ assert(emitter::isGeneralRegister(reg) && registerType == IntRegisterType);
+ }
+ regNum = reg;
+ isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0);
}
- regNum = reg;
- isCalleeSave = ((RBM_CALLEE_SAVED & genRegMask(reg)) != 0);
-}
-
#ifdef DEBUG
// print out representation
@@ -219,8 +220,8 @@ init(regNumber reg)
bool isFree();
- //RefPosition * getNextRefPosition();
- //LsraLocation getNextRefLocation();
+ // RefPosition * getNextRefPosition();
+ // LsraLocation getNextRefLocation();
// DATA
@@ -228,54 +229,65 @@ init(regNumber reg)
// If the interval is inactive (isActive == false) then it is not currently live,
// and the register call be unassigned (i.e. setting assignedInterval to nullptr)
// without spilling the register.
- Interval* assignedInterval;
+ Interval* assignedInterval;
// Interval to which this register was previously allocated, and which was unassigned
// because it was inactive. This register will be reassigned to this Interval when
// assignedInterval becomes inactive.
- Interval* previousInterval;
+ Interval* previousInterval;
- regNumber regNum;
- bool isCalleeSave;
- RegisterType registerType;
+ regNumber regNum;
+ bool isCalleeSave;
+ RegisterType registerType;
// This register must be considered busy until the next time it is explicitly killed.
// This is used so that putarg_reg can avoid killing its lclVar source, while avoiding
// the problem with the reg becoming free if the last-use is encountered before the call.
- bool isBusyUntilNextKill;
+ bool isBusyUntilNextKill;
- bool conflictingFixedRegReference(RefPosition* refPosition);
+ bool conflictingFixedRegReference(RefPosition* refPosition);
};
-
-inline bool leafInRange(GenTree *leaf, int lower, int upper)
+inline bool leafInRange(GenTree* leaf, int lower, int upper)
{
if (!leaf->IsIntCnsFitsInI32())
+ {
return false;
+ }
if (leaf->gtIntCon.gtIconVal < lower)
+ {
return false;
+ }
if (leaf->gtIntCon.gtIconVal > upper)
+ {
return false;
+ }
return true;
}
-inline bool leafInRange(GenTree *leaf, int lower, int upper, int multiple)
+inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple)
{
if (!leafInRange(leaf, lower, upper))
+ {
return false;
+ }
if (leaf->gtIntCon.gtIconVal % multiple)
+ {
return false;
+ }
return true;
}
-inline bool leafAddInRange(GenTree *leaf, int lower, int upper, int multiple=1)
+inline bool leafAddInRange(GenTree* leaf, int lower, int upper, int multiple = 1)
{
if (leaf->OperGet() != GT_ADD)
+ {
return false;
+ }
return leafInRange(leaf->gtOp.gtOp2, lower, upper, multiple);
}
-inline bool isCandidateVar(LclVarDsc * varDsc)
+inline bool isCandidateVar(LclVarDsc* varDsc)
{
return varDsc->lvLRACandidate;
}
@@ -291,7 +303,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// OPTION 1: The algorithm as described in "Optimized Interval Splitting in a
-// Linear Scan Register Allocator". It is driven by iterating over the Interval
+// Linear Scan Register Allocator". It is driven by iterating over the Interval
// lists. In this case, we need multiple IntervalLists, and Intervals will be
// moved between them so they must be easily updated.
@@ -312,14 +324,12 @@ class LinearScan : public LinearScanInterface
friend class TreeNodeInfo;
public:
-
// This could use further abstraction. From Compiler we need the tree,
// the flowgraph and the allocator.
- LinearScan(Compiler * theCompiler);
+ LinearScan(Compiler* theCompiler);
// This is the main driver
- virtual
- void doLinearScan();
+ virtual void doLinearScan();
// TreeNodeInfo contains three register masks: src candidates, dst candidates, and internal condidates.
// Instead of storing actual register masks, however, which are large, we store a small index into a table
@@ -341,12 +351,17 @@ public:
#endif
regMaskTP* regMaskTable;
- int nextFreeMask;
+ int nextFreeMask;
typedef int RegMaskIndex;
// allint is 0, allfloat is 1, all the single-bit masks start at 2
- enum KnownRegIndex { ALLINT_IDX = 0, ALLFLOAT_IDX = 1, FIRST_SINGLE_REG_IDX = 2 };
+ enum KnownRegIndex
+ {
+ ALLINT_IDX = 0,
+ ALLFLOAT_IDX = 1,
+ FIRST_SINGLE_REG_IDX = 2
+ };
RegMaskIndex GetIndexForRegMask(regMaskTP mask);
regMaskTP GetRegMaskForIndex(RegMaskIndex index);
@@ -359,73 +374,78 @@ public:
// Initialize the block traversal for LSRA.
// This resets the bbVisitedSet, and on the first invocation sets the blockSequence array,
// which determines the order in which blocks will be allocated (currently called during Lowering).
- BasicBlock* startBlockSequence();
+ BasicBlock* startBlockSequence();
// Move to the next block in sequence, updating the current block information.
- BasicBlock* moveToNextBlock();
+ BasicBlock* moveToNextBlock();
// Get the next block to be scheduled without changing the current block,
// but updating the blockSequence during the first iteration if it is not fully computed.
- BasicBlock* getNextBlock();
+ BasicBlock* getNextBlock();
// This is called during code generation to update the location of variables
- virtual void recordVarLocationsAtStartOfBB(BasicBlock *bb);
+ virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb);
// This does the dataflow analysis and builds the intervals
- void buildIntervals();
+ void buildIntervals();
// This is where the actual assignment is done
- void allocateRegisters();
+ void allocateRegisters();
// This is the resolution phase, where cross-block mismatches are fixed up
- void resolveRegisters();
+ void resolveRegisters();
- void writeRegisters(RefPosition *currentRefPosition, GenTree *tree);
+ void writeRegisters(RefPosition* currentRefPosition, GenTree* tree);
// Insert a copy in the case where a tree node value must be moved to a different
// register at the point of use, or it is reloaded to a different register
// than the one it was spilled from
- void insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPosition* refPosition);
+ void insertCopyOrReload(GenTreePtr tree, unsigned multiRegIdx, RefPosition* refPosition);
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// Insert code to save and restore the upper half of a vector that lives
// in a callee-save register at the point of a call (the upper half is
// not preserved).
- void insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosition, BasicBlock* block);
+ void insertUpperVectorSaveAndReload(GenTreePtr tree, RefPosition* refPosition, BasicBlock* block);
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
// resolve along one block-block edge
- enum ResolveType { ResolveSplit, ResolveJoin, ResolveCritical, ResolveSharedCritical, ResolveTypeCount };
+ enum ResolveType
+ {
+ ResolveSplit,
+ ResolveJoin,
+ ResolveCritical,
+ ResolveSharedCritical,
+ ResolveTypeCount
+ };
#ifdef DEBUG
- static const char* resolveTypeName[ResolveTypeCount];
+ static const char* resolveTypeName[ResolveTypeCount];
#endif
- enum WhereToInsert { InsertAtTop, InsertAtBottom };
+ enum WhereToInsert
+ {
+ InsertAtTop,
+ InsertAtBottom
+ };
- void addResolution(BasicBlock* block,
- GenTreePtr insertionPoint,
- Interval* interval,
- regNumber outReg,
- regNumber inReg);
+ void addResolution(
+ BasicBlock* block, GenTreePtr insertionPoint, Interval* interval, regNumber outReg, regNumber inReg);
- void handleOutgoingCriticalEdges(BasicBlock* block);
+ void handleOutgoingCriticalEdges(BasicBlock* block);
- void resolveEdge (BasicBlock* fromBlock,
- BasicBlock* toBlock,
- ResolveType resolveType,
- VARSET_VALARG_TP liveSet);
+ void resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, VARSET_VALARG_TP liveSet);
- void resolveEdges();
+ void resolveEdges();
// Finally, the register assignments are written back to the tree nodes.
- void recordRegisterAssignments();
+ void recordRegisterAssignments();
// Keep track of how many temp locations we'll need for spill
- void initMaxSpill();
- void updateMaxSpill(RefPosition* refPosition);
- void recordMaxSpill();
+ void initMaxSpill();
+ void updateMaxSpill(RefPosition* refPosition);
+ void recordMaxSpill();
// max simultaneous spill locations used of every type
- unsigned int maxSpill [TYP_COUNT];
- unsigned int currentSpill [TYP_COUNT];
+ unsigned int maxSpill[TYP_COUNT];
+ unsigned int currentSpill[TYP_COUNT];
bool needFloatTmpForFPCall;
bool needDoubleTmpForFPCall;
@@ -443,14 +463,11 @@ private:
// Note that the field values are declared in a public enum, but the actual bits are
// only accessed via accessors.
- unsigned lsraStressMask;
+ unsigned lsraStressMask;
// This controls the registers available for allocation
- enum LsraStressLimitRegs { LSRA_LIMIT_NONE = 0,
- LSRA_LIMIT_CALLEE = 0x1,
- LSRA_LIMIT_CALLER = 0x2,
- LSRA_LIMIT_SMALL_SET = 0x3,
- LSRA_LIMIT_MASK = 0x3 };
+ enum LsraStressLimitRegs{LSRA_LIMIT_NONE = 0, LSRA_LIMIT_CALLEE = 0x1, LSRA_LIMIT_CALLER = 0x2,
+ LSRA_LIMIT_SMALL_SET = 0x3, LSRA_LIMIT_MASK = 0x3};
// When LSRA_LIMIT_SMALL_SET is specified, it is desirable to select a "mixed" set of caller- and callee-save
// registers, so as to get different coverage than limiting to callee or caller.
@@ -462,180 +479,229 @@ private:
#if defined(_TARGET_AMD64_)
#ifdef UNIX_AMD64_ABI
// On System V the RDI and RSI are not callee saved. Use R12 ans R13 as callee saved registers.
- static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13);
-#else // !UNIX_AMD64_ABI
+ static const regMaskTP LsraLimitSmallIntSet =
+ (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_R12 | RBM_R13);
+#else // !UNIX_AMD64_ABI
// On Windows Amd64 use the RDI and RSI as callee saved registers.
- static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI);
+ static const regMaskTP LsraLimitSmallIntSet =
+ (RBM_EAX | RBM_ECX | RBM_EBX | RBM_ETW_FRAMED_EBP | RBM_ESI | RBM_EDI);
#endif // !UNIX_AMD64_ABI
- static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7 );
+ static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7);
#elif defined(_TARGET_ARM_)
- static const regMaskTP LsraLimitSmallIntSet = (RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4);
- static const regMaskTP LsraLimitSmallFPSet = (RBM_F0|RBM_F1|RBM_F2|RBM_F16|RBM_F17);
+ static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4);
+ static const regMaskTP LsraLimitSmallFPSet = (RBM_F0 | RBM_F1 | RBM_F2 | RBM_F16 | RBM_F17);
#elif defined(_TARGET_ARM64_)
- static const regMaskTP LsraLimitSmallIntSet = (RBM_R0|RBM_R1|RBM_R2|RBM_R19|RBM_R20);
- static const regMaskTP LsraLimitSmallFPSet = (RBM_V0|RBM_V1|RBM_V2|RBM_V8|RBM_V9);
+ static const regMaskTP LsraLimitSmallIntSet = (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R19 | RBM_R20);
+ static const regMaskTP LsraLimitSmallFPSet = (RBM_V0 | RBM_V1 | RBM_V2 | RBM_V8 | RBM_V9);
#elif defined(_TARGET_X86_)
- static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI);
- static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7 );
+ static const regMaskTP LsraLimitSmallIntSet = (RBM_EAX | RBM_ECX | RBM_EDI);
+ static const regMaskTP LsraLimitSmallFPSet = (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM6 | RBM_XMM7);
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif // target
- LsraStressLimitRegs getStressLimitRegs() { return (LsraStressLimitRegs) (lsraStressMask & LSRA_LIMIT_MASK); }
- regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask);
+ LsraStressLimitRegs getStressLimitRegs()
+ {
+ return (LsraStressLimitRegs)(lsraStressMask & LSRA_LIMIT_MASK);
+ }
+ regMaskTP stressLimitRegs(RefPosition* refPosition, regMaskTP mask);
// This controls the heuristics used to select registers
// These can be combined.
- enum LsraSelect { LSRA_SELECT_DEFAULT = 0,
- LSRA_SELECT_REVERSE_HEURISTICS = 0x04,
- LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08,
- LSRA_SELECT_NEAREST = 0x10,
- LSRA_SELECT_MASK = 0x1c };
- LsraSelect getSelectionHeuristics() { return (LsraSelect) (lsraStressMask & LSRA_SELECT_MASK); }
- bool doReverseSelect() { return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0); }
- bool doReverseCallerCallee() { return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0); }
- bool doSelectNearest() { return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0); }
+ enum LsraSelect{LSRA_SELECT_DEFAULT = 0, LSRA_SELECT_REVERSE_HEURISTICS = 0x04,
+ LSRA_SELECT_REVERSE_CALLER_CALLEE = 0x08, LSRA_SELECT_NEAREST = 0x10, LSRA_SELECT_MASK = 0x1c};
+ LsraSelect getSelectionHeuristics()
+ {
+ return (LsraSelect)(lsraStressMask & LSRA_SELECT_MASK);
+ }
+ bool doReverseSelect()
+ {
+ return ((lsraStressMask & LSRA_SELECT_REVERSE_HEURISTICS) != 0);
+ }
+ bool doReverseCallerCallee()
+ {
+ return ((lsraStressMask & LSRA_SELECT_REVERSE_CALLER_CALLEE) != 0);
+ }
+ bool doSelectNearest()
+ {
+ return ((lsraStressMask & LSRA_SELECT_NEAREST) != 0);
+ }
// This controls the order in which basic blocks are visited during allocation
- enum LsraTraversalOrder { LSRA_TRAVERSE_LAYOUT = 0x20,
- LSRA_TRAVERSE_PRED_FIRST = 0x40,
- LSRA_TRAVERSE_RANDOM = 0x60, // NYI
- LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST,
- LSRA_TRAVERSE_MASK = 0x60 };
- LsraTraversalOrder getLsraTraversalOrder()
+ enum LsraTraversalOrder{LSRA_TRAVERSE_LAYOUT = 0x20, LSRA_TRAVERSE_PRED_FIRST = 0x40,
+ LSRA_TRAVERSE_RANDOM = 0x60, // NYI
+ LSRA_TRAVERSE_DEFAULT = LSRA_TRAVERSE_PRED_FIRST, LSRA_TRAVERSE_MASK = 0x60};
+ LsraTraversalOrder getLsraTraversalOrder()
{
if ((lsraStressMask & LSRA_TRAVERSE_MASK) == 0)
{
return LSRA_TRAVERSE_DEFAULT;
}
- return (LsraTraversalOrder) (lsraStressMask & LSRA_TRAVERSE_MASK);
+ return (LsraTraversalOrder)(lsraStressMask & LSRA_TRAVERSE_MASK);
+ }
+ bool isTraversalLayoutOrder()
+ {
+ return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT;
+ }
+ bool isTraversalPredFirstOrder()
+ {
+ return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST;
}
- bool isTraversalLayoutOrder () { return getLsraTraversalOrder() == LSRA_TRAVERSE_LAYOUT; }
- bool isTraversalPredFirstOrder () { return getLsraTraversalOrder() == LSRA_TRAVERSE_PRED_FIRST; }
// This controls whether lifetimes should be extended to the entire method.
// Note that this has no effect under MinOpts
- enum LsraExtendLifetimes { LSRA_DONT_EXTEND = 0,
- LSRA_EXTEND_LIFETIMES = 0x80,
- LSRA_EXTEND_LIFETIMES_MASK = 0x80 };
- LsraExtendLifetimes getLsraExtendLifeTimes() { return (LsraExtendLifetimes) (lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK); }
- bool extendLifetimes() { return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES; }
+ enum LsraExtendLifetimes{LSRA_DONT_EXTEND = 0, LSRA_EXTEND_LIFETIMES = 0x80, LSRA_EXTEND_LIFETIMES_MASK = 0x80};
+ LsraExtendLifetimes getLsraExtendLifeTimes()
+ {
+ return (LsraExtendLifetimes)(lsraStressMask & LSRA_EXTEND_LIFETIMES_MASK);
+ }
+ bool extendLifetimes()
+ {
+ return getLsraExtendLifeTimes() == LSRA_EXTEND_LIFETIMES;
+ }
// This controls whether variables locations should be set to the previous block in layout order
// (LSRA_BLOCK_BOUNDARY_LAYOUT), or to that of the highest-weight predecessor (LSRA_BLOCK_BOUNDARY_PRED -
// the default), or rotated (LSRA_BLOCK_BOUNDARY_ROTATE).
- enum LsraBlockBoundaryLocations { LSRA_BLOCK_BOUNDARY_PRED = 0,
- LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100,
- LSRA_BLOCK_BOUNDARY_ROTATE = 0x200,
- LSRA_BLOCK_BOUNDARY_MASK = 0x300 };
- LsraBlockBoundaryLocations getLsraBlockBoundaryLocations() { return (LsraBlockBoundaryLocations) (lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK); }
- regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs);
+ enum LsraBlockBoundaryLocations{LSRA_BLOCK_BOUNDARY_PRED = 0, LSRA_BLOCK_BOUNDARY_LAYOUT = 0x100,
+ LSRA_BLOCK_BOUNDARY_ROTATE = 0x200, LSRA_BLOCK_BOUNDARY_MASK = 0x300};
+ LsraBlockBoundaryLocations getLsraBlockBoundaryLocations()
+ {
+ return (LsraBlockBoundaryLocations)(lsraStressMask & LSRA_BLOCK_BOUNDARY_MASK);
+ }
+ regNumber rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs);
// This controls whether we always insert a GT_RELOAD instruction after a spill
// Note that this can be combined with LsraSpillAlways (or not)
- enum LsraReload { LSRA_NO_RELOAD_IF_SAME = 0,
- LSRA_ALWAYS_INSERT_RELOAD = 0x400,
- LSRA_RELOAD_MASK = 0x400 };
- LsraReload getLsraReload() { return (LsraReload) (lsraStressMask & LSRA_RELOAD_MASK); }
- bool alwaysInsertReload() { return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD; }
+ enum LsraReload{LSRA_NO_RELOAD_IF_SAME = 0, LSRA_ALWAYS_INSERT_RELOAD = 0x400, LSRA_RELOAD_MASK = 0x400};
+ LsraReload getLsraReload()
+ {
+ return (LsraReload)(lsraStressMask & LSRA_RELOAD_MASK);
+ }
+ bool alwaysInsertReload()
+ {
+ return getLsraReload() == LSRA_ALWAYS_INSERT_RELOAD;
+ }
// This controls whether we spill everywhere
- enum LsraSpill { LSRA_DONT_SPILL_ALWAYS = 0,
- LSRA_SPILL_ALWAYS = 0x800,
- LSRA_SPILL_MASK = 0x800 };
- LsraSpill getLsraSpill() { return (LsraSpill) (lsraStressMask & LSRA_SPILL_MASK); }
- bool spillAlways() { return getLsraSpill() == LSRA_SPILL_ALWAYS; }
+ enum LsraSpill{LSRA_DONT_SPILL_ALWAYS = 0, LSRA_SPILL_ALWAYS = 0x800, LSRA_SPILL_MASK = 0x800};
+ LsraSpill getLsraSpill()
+ {
+ return (LsraSpill)(lsraStressMask & LSRA_SPILL_MASK);
+ }
+ bool spillAlways()
+ {
+ return getLsraSpill() == LSRA_SPILL_ALWAYS;
+ }
// This controls whether RefPositions that lower/codegen indicated as reg optional be
// allocated a reg at all.
- enum LsraRegOptionalControl { LSRA_REG_OPTIONAL_DEFAULT = 0,
- LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000,
- LSRA_REG_OPTIONAL_MASK = 0x1000 };
+ enum LsraRegOptionalControl{LSRA_REG_OPTIONAL_DEFAULT = 0, LSRA_REG_OPTIONAL_NO_ALLOC = 0x1000,
+ LSRA_REG_OPTIONAL_MASK = 0x1000};
- LsraRegOptionalControl getLsraRegOptionalControl()
- {
- return (LsraRegOptionalControl) (lsraStressMask & LSRA_REG_OPTIONAL_MASK);
+ LsraRegOptionalControl getLsraRegOptionalControl()
+ {
+ return (LsraRegOptionalControl)(lsraStressMask & LSRA_REG_OPTIONAL_MASK);
}
- bool regOptionalNoAlloc()
- {
+ bool regOptionalNoAlloc()
+ {
return getLsraRegOptionalControl() == LSRA_REG_OPTIONAL_NO_ALLOC;
}
// Dump support
- void lsraDumpIntervals(const char* msg);
- void dumpRefPositions(const char *msg);
- void dumpVarRefPositions(const char *msg);
- void verifyFinalAllocation();
- void verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation currentLocation);
-#else // !DEBUG
- bool doSelectNearest() { return false; }
- bool extendLifetimes() { return false; }
- bool spillAlways() { return false; }
+ void lsraDumpIntervals(const char* msg);
+ void dumpRefPositions(const char* msg);
+ void dumpVarRefPositions(const char* msg);
+ void verifyFinalAllocation();
+ void verifyResolutionMove(GenTreeStmt* resolutionStmt, LsraLocation currentLocation);
+#else // !DEBUG
+ bool doSelectNearest()
+ {
+ return false;
+ }
+ bool extendLifetimes()
+ {
+ return false;
+ }
+ bool spillAlways()
+ {
+ return false;
+ }
// In a retail build we support only the default traversal order
- bool isTraversalLayoutOrder () { return false; }
- bool isTraversalPredFirstOrder () { return true; }
- bool getLsraExtendLifeTimes() { return false; }
+ bool isTraversalLayoutOrder()
+ {
+ return false;
+ }
+ bool isTraversalPredFirstOrder()
+ {
+ return true;
+ }
+ bool getLsraExtendLifeTimes()
+ {
+ return false;
+ }
#endif // !DEBUG
-
public:
// Used by Lowering when considering whether to split Longs, as well as by identifyCandidates().
- bool isRegCandidate(LclVarDsc* varDsc);
+ bool isRegCandidate(LclVarDsc* varDsc);
+
private:
// Determine which locals are candidates for allocation
- void identifyCandidates();
+ void identifyCandidates();
// determine which locals are used in EH constructs we don't want to deal with
- void identifyCandidatesExceptionDataflow();
+ void identifyCandidatesExceptionDataflow();
- void buildPhysRegRecords();
+ void buildPhysRegRecords();
- void setLastUses(BasicBlock * block);
+ void setLastUses(BasicBlock* block);
- void setFrameType();
+ void setFrameType();
// Update allocations at start/end of block
- void processBlockEndAllocation(BasicBlock * current);
+ void processBlockEndAllocation(BasicBlock* current);
// Record variable locations at start/end of block
- void processBlockStartLocations(BasicBlock* current, bool allocationPass);
- void processBlockEndLocations(BasicBlock* current);
+ void processBlockStartLocations(BasicBlock* current, bool allocationPass);
+ void processBlockEndLocations(BasicBlock* current);
- RefType CheckBlockType(BasicBlock * block, BasicBlock * prevBlock);
+ RefType CheckBlockType(BasicBlock* block, BasicBlock* prevBlock);
// insert refpositions representing prolog zero-inits which will be added later
- void insertZeroInitRefPositions();
+ void insertZeroInitRefPositions();
- void AddMapping(GenTree *node, LsraLocation loc);
+ void AddMapping(GenTree* node, LsraLocation loc);
// add physreg refpositions for a tree node, based on calling convention and instruction selection predictions
- void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse);
+ void addRefsForPhysRegMask(regMaskTP mask, LsraLocation currentLoc, RefType refType, bool isLastUse);
- void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition);
+ void resolveConflictingDefAndUse(Interval* interval, RefPosition* defRefPosition);
- void buildRefPositionsForNode(GenTree *tree, BasicBlock *block,
- LocationInfoListNodePool& listNodePool,
- HashTableBase<GenTree*, LocationInfoList>& operandToLocationInfoMap,
- LsraLocation loc);
+ void buildRefPositionsForNode(GenTree* tree,
+ BasicBlock* block,
+ LocationInfoListNodePool& listNodePool,
+ HashTableBase<GenTree*, LocationInfoList>& operandToLocationInfoMap,
+ LsraLocation loc);
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- VARSET_VALRET_TP buildUpperVectorSaveRefPositions(GenTree *tree, LsraLocation currentLoc);
- void buildUpperVectorRestoreRefPositions(GenTree *tree, LsraLocation currentLoc, VARSET_VALARG_TP liveLargeVectors);
-#endif //FEATURE_PARTIAL_SIMD_CALLEE_SAVE
+ VARSET_VALRET_TP buildUpperVectorSaveRefPositions(GenTree* tree, LsraLocation currentLoc);
+ void buildUpperVectorRestoreRefPositions(GenTree* tree, LsraLocation currentLoc, VARSET_VALARG_TP liveLargeVectors);
+#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- // For AMD64 on SystemV machines. This method
- // is called as replacement for raUpdateRegStateForArg
+ // For AMD64 on SystemV machines. This method
+ // is called as replacement for raUpdateRegStateForArg
// that is used on Windows. On System V systems a struct can be passed
// partially using registers from the 2 register files.
void unixAmd64UpdateRegStateForArg(LclVarDsc* argDsc);
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// Update reg state for an incoming register argument
- void updateRegStateForArg(LclVarDsc* argDsc);
+ void updateRegStateForArg(LclVarDsc* argDsc);
- inline void setTreeNodeInfo(GenTree *tree, TreeNodeInfo info)
+ inline void setTreeNodeInfo(GenTree* tree, TreeNodeInfo info)
{
tree->gtLsraInfo = info;
tree->gtClearReg(compiler);
@@ -643,151 +709,155 @@ private:
DBEXEC(VERBOSE, info.dump(this));
}
- inline void clearDstCount(GenTree *tree)
+ inline void clearDstCount(GenTree* tree)
{
tree->gtLsraInfo.dstCount = 0;
}
- inline void clearOperandCounts(GenTree *tree)
+ inline void clearOperandCounts(GenTree* tree)
{
- TreeNodeInfo &info = tree->gtLsraInfo;
- info.srcCount = 0;
- info.dstCount = 0;
+ TreeNodeInfo& info = tree->gtLsraInfo;
+ info.srcCount = 0;
+ info.dstCount = 0;
}
- inline bool isLocalDefUse(GenTree *tree)
+ inline bool isLocalDefUse(GenTree* tree)
{
return tree->gtLsraInfo.isLocalDefUse;
}
- inline bool isCandidateLocalRef(GenTree *tree)
+ inline bool isCandidateLocalRef(GenTree* tree)
{
if (tree->IsLocal())
{
unsigned int lclNum = tree->gtLclVarCommon.gtLclNum;
assert(lclNum < compiler->lvaCount);
- LclVarDsc * varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
return isCandidateVar(varDsc);
}
return false;
}
- static Compiler::fgWalkResult
- markAddrModeOperandsHelperMD(GenTreePtr tree, void *p);
+ static Compiler::fgWalkResult markAddrModeOperandsHelperMD(GenTreePtr tree, void* p);
// Return the registers killed by the given tree node.
- regMaskTP getKillSetForNode(GenTree* tree);
+ regMaskTP getKillSetForNode(GenTree* tree);
// Given some tree node add refpositions for all the registers this node kills
- bool buildKillPositionsForNode(GenTree* tree,
- LsraLocation currentLoc);
+ bool buildKillPositionsForNode(GenTree* tree, LsraLocation currentLoc);
- regMaskTP allRegs(RegisterType rt);
- regMaskTP allRegs(GenTree* tree);
- regMaskTP allMultiRegCallNodeRegs(GenTreeCall* tree);
- regMaskTP allSIMDRegs();
- regMaskTP internalFloatRegCandidates();
+ regMaskTP allRegs(RegisterType rt);
+ regMaskTP allRegs(GenTree* tree);
+ regMaskTP allMultiRegCallNodeRegs(GenTreeCall* tree);
+ regMaskTP allSIMDRegs();
+ regMaskTP internalFloatRegCandidates();
- bool registerIsFree(regNumber regNum, RegisterType regType);
- bool registerIsAvailable(RegRecord *physRegRecord, LsraLocation currentLoc, LsraLocation * nextRefLocationPtr, RegisterType regType);
- void freeRegister(RegRecord *physRegRecord);
- void freeRegisters(regMaskTP regsToFree);
+ bool registerIsFree(regNumber regNum, RegisterType regType);
+ bool registerIsAvailable(RegRecord* physRegRecord,
+ LsraLocation currentLoc,
+ LsraLocation* nextRefLocationPtr,
+ RegisterType regType);
+ void freeRegister(RegRecord* physRegRecord);
+ void freeRegisters(regMaskTP regsToFree);
- regMaskTP getUseCandidates(GenTree *useNode);
- regMaskTP getDefCandidates(GenTree *tree);
- var_types getDefType(GenTree *tree);
+ regMaskTP getUseCandidates(GenTree* useNode);
+ regMaskTP getDefCandidates(GenTree* tree);
+ var_types getDefType(GenTree* tree);
- RefPosition * defineNewInternalTemp(GenTree *tree, RegisterType regType, LsraLocation currentLoc, regMaskTP regMask);
+ RefPosition* defineNewInternalTemp(GenTree* tree, RegisterType regType, LsraLocation currentLoc, regMaskTP regMask);
- int buildInternalRegisterDefsForNode(GenTree *tree, LsraLocation currentLoc,
- RefPosition* defs[]);
+ int buildInternalRegisterDefsForNode(GenTree* tree, LsraLocation currentLoc, RefPosition* defs[]);
- void buildInternalRegisterUsesForNode(GenTree *tree, LsraLocation currentLoc,
- RefPosition* defs[], int total);
+ void buildInternalRegisterUsesForNode(GenTree* tree, LsraLocation currentLoc, RefPosition* defs[], int total);
- void resolveLocalRef(GenTreePtr treeNode, RefPosition * currentRefPosition);
+ void resolveLocalRef(GenTreePtr treeNode, RefPosition* currentRefPosition);
- void insertMove(BasicBlock * block, GenTreePtr insertionPoint, unsigned lclNum,
- regNumber inReg, regNumber outReg);
+ void insertMove(BasicBlock* block, GenTreePtr insertionPoint, unsigned lclNum, regNumber inReg, regNumber outReg);
- void insertSwap(BasicBlock* block,
- GenTreePtr insertionPoint,
- unsigned lclNum1,
- regNumber reg1,
- unsigned lclNum2,
- regNumber reg2);
-public:
+ void insertSwap(BasicBlock* block,
+ GenTreePtr insertionPoint,
+ unsigned lclNum1,
+ regNumber reg1,
+ unsigned lclNum2,
+ regNumber reg2);
+public:
// TODO-Cleanup: unused?
class PhysRegIntervalIterator
{
public:
- PhysRegIntervalIterator(LinearScan * theLinearScan)
+ PhysRegIntervalIterator(LinearScan* theLinearScan)
{
- nextRegNumber = (regNumber) 0;
- linearScan = theLinearScan;
+ nextRegNumber = (regNumber)0;
+ linearScan = theLinearScan;
}
- RegRecord * GetNext()
+ RegRecord* GetNext()
{
return &linearScan->physRegs[nextRegNumber];
}
+
private:
// This assumes that the physical registers are contiguous, starting
// with a register number of 0
regNumber nextRegNumber;
- LinearScan * linearScan;
+ LinearScan* linearScan;
};
private:
- Interval * newInterval(RegisterType regType);
+ Interval* newInterval(RegisterType regType);
- Interval * getIntervalForLocalVar(unsigned varNum)
+ Interval* getIntervalForLocalVar(unsigned varNum)
{
return localVarIntervals[varNum];
}
- RegRecord * getRegisterRecord(regNumber regNum);
+ RegRecord* getRegisterRecord(regNumber regNum);
- RefPosition * newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType);
+ RefPosition* newRefPositionRaw(LsraLocation nodeLocation, GenTree* treeNode, RefType refType);
- RefPosition* newRefPosition(Interval* theInterval,
- LsraLocation theLocation,
- RefType theRefType,
- GenTree* theTreeNode,
- regMaskTP mask,
- unsigned multiRegIdx = 0);
+ RefPosition* newRefPosition(Interval* theInterval,
+ LsraLocation theLocation,
+ RefType theRefType,
+ GenTree* theTreeNode,
+ regMaskTP mask,
+ unsigned multiRegIdx = 0);
- RefPosition* newRefPosition(regNumber reg,
- LsraLocation theLocation,
- RefType theRefType,
- GenTree* theTreeNode,
- regMaskTP mask);
+ RefPosition* newRefPosition(
+ regNumber reg, LsraLocation theLocation, RefType theRefType, GenTree* theTreeNode, regMaskTP mask);
void applyCalleeSaveHeuristics(RefPosition* rp);
- void associateRefPosWithInterval(RefPosition *rp);
+ void associateRefPosWithInterval(RefPosition* rp);
- void associateRefPosWithRegister(RefPosition *rp);
+ void associateRefPosWithRegister(RefPosition* rp);
unsigned getWeight(RefPosition* refPos);
/*****************************************************************************
* Register management
****************************************************************************/
- RegisterType getRegisterType(Interval *currentInterval, RefPosition* refPosition);
- regNumber tryAllocateFreeReg(Interval *current, RefPosition *refPosition);
- RegRecord* findBestPhysicalReg(RegisterType regType, LsraLocation endLocation,
- regMaskTP candidates, regMaskTP preferences);
+ RegisterType getRegisterType(Interval* currentInterval, RefPosition* refPosition);
+ regNumber tryAllocateFreeReg(Interval* current, RefPosition* refPosition);
+ RegRecord* findBestPhysicalReg(RegisterType regType,
+ LsraLocation endLocation,
+ regMaskTP candidates,
+ regMaskTP preferences);
regNumber allocateBusyReg(Interval* current, RefPosition* refPosition, bool allocateIfProfitable);
- regNumber assignCopyReg(RefPosition * refPosition);
+ regNumber assignCopyReg(RefPosition* refPosition);
- void checkAndAssignInterval(RegRecord * regRec, Interval * interval);
- void assignPhysReg(RegRecord * regRec, Interval * interval);
- void assignPhysReg( regNumber reg, Interval * interval) { assignPhysReg(getRegisterRecord(reg), interval); }
+ void checkAndAssignInterval(RegRecord* regRec, Interval* interval);
+ void assignPhysReg(RegRecord* regRec, Interval* interval);
+ void assignPhysReg(regNumber reg, Interval* interval)
+ {
+ assignPhysReg(getRegisterRecord(reg), interval);
+ }
- void checkAndClearInterval(RegRecord * regRec, RefPosition* spillRefPosition);
- void unassignPhysReg(RegRecord * regRec, RefPosition* spillRefPosition);
- void unassignPhysRegNoSpill( RegRecord* reg);
- void unassignPhysReg( regNumber reg) { unassignPhysReg(getRegisterRecord(reg), nullptr); }
+ void checkAndClearInterval(RegRecord* regRec, RefPosition* spillRefPosition);
+ void unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPosition);
+ void unassignPhysRegNoSpill(RegRecord* reg);
+ void unassignPhysReg(regNumber reg)
+ {
+ unassignPhysReg(getRegisterRecord(reg), nullptr);
+ }
void spillInterval(Interval* interval, RefPosition* fromRefPosition, RefPosition* toRefPosition);
@@ -797,7 +867,7 @@ private:
* For Resolution phase
****************************************************************************/
// TODO-Throughput: Consider refactoring this so that we keep a map from regs to vars for better scaling
- unsigned int regMapCount;
+ unsigned int regMapCount;
// When we split edges, we create new blocks, and instead of expanding the VarToRegMaps, we
// rely on the property that the "in" map is the same as the "from" block of the edge, and the
@@ -807,11 +877,11 @@ private:
// TODO-Throughput: We may want to look into the cost/benefit tradeoff of doing this vs. expanding
// the arrays.
- unsigned bbNumMaxBeforeResolution;
+ unsigned bbNumMaxBeforeResolution;
struct SplitEdgeInfo
{
- unsigned fromBBNum;
- unsigned toBBNum;
+ unsigned fromBBNum;
+ unsigned toBBNum;
};
typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo, JitSimplerHashBehavior>
SplitBBNumToTargetBBNumMap;
@@ -820,28 +890,29 @@ private:
{
if (splitBBNumToTargetBBNumMap == nullptr)
{
- splitBBNumToTargetBBNumMap = new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler));
+ splitBBNumToTargetBBNumMap =
+ new (getAllocator(compiler)) SplitBBNumToTargetBBNumMap(getAllocator(compiler));
}
return splitBBNumToTargetBBNumMap;
}
- SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum);
+ SplitEdgeInfo getSplitEdgeInfo(unsigned int bbNum);
- void initVarRegMaps();
- void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg);
- void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg);
+ void initVarRegMaps();
+ void setInVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg);
+ void setOutVarRegForBB(unsigned int bbNum, unsigned int varNum, regNumber reg);
VarToRegMap getInVarToRegMap(unsigned int bbNum);
VarToRegMap getOutVarToRegMap(unsigned int bbNum);
- regNumber getVarReg(VarToRegMap map, unsigned int varNum);
+ regNumber getVarReg(VarToRegMap map, unsigned int varNum);
// Initialize the incoming VarToRegMap to the given map values (generally a predecessor of
// the block)
VarToRegMap setInVarToRegMap(unsigned int bbNum, VarToRegMap srcVarToRegMap);
- regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type);
+ regNumber getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type);
#ifdef DEBUG
void dumpVarToRegMap(VarToRegMap map);
- void dumpInVarToRegMap(BasicBlock * block);
- void dumpOutVarToRegMap(BasicBlock * block);
+ void dumpInVarToRegMap(BasicBlock* block);
+ void dumpOutVarToRegMap(BasicBlock* block);
// There are three points at which a tuple-style dump is produced, and each
// differs slightly:
@@ -854,12 +925,15 @@ private:
// - In LSRA_DUMP_POST, which is after register allocation, the registers are
// shown.
- enum LsraTupleDumpMode { LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST };
- void lsraGetOperandString(GenTreePtr tree, LsraTupleDumpMode mode, char *operandString, unsigned operandStringLength);
+ enum LsraTupleDumpMode{LSRA_DUMP_PRE, LSRA_DUMP_REFPOS, LSRA_DUMP_POST};
+ void lsraGetOperandString(GenTreePtr tree,
+ LsraTupleDumpMode mode,
+ char* operandString,
+ unsigned operandStringLength);
void lsraDispNode(GenTreePtr tree, LsraTupleDumpMode mode, bool hasDest);
void TupleStyleDump(LsraTupleDumpMode mode);
- bool dumpTerse;
+ bool dumpTerse;
LsraLocation maxNodeLocation;
// Width of various fields - used to create a streamlined dump during allocation that shows the
@@ -874,18 +948,18 @@ private:
const char* rightBox;
static const int MAX_FORMAT_CHARS = 12;
- char intervalNameFormat[MAX_FORMAT_CHARS];
- char regNameFormat[MAX_FORMAT_CHARS];
- char shortRefPositionFormat[MAX_FORMAT_CHARS];
- char emptyRefPositionFormat[MAX_FORMAT_CHARS];
- char indentFormat[MAX_FORMAT_CHARS];
+ char intervalNameFormat[MAX_FORMAT_CHARS];
+ char regNameFormat[MAX_FORMAT_CHARS];
+ char shortRefPositionFormat[MAX_FORMAT_CHARS];
+ char emptyRefPositionFormat[MAX_FORMAT_CHARS];
+ char indentFormat[MAX_FORMAT_CHARS];
static const int MAX_LEGEND_FORMAT_CHARS = 25;
- char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS];
- char legendFormat[MAX_LEGEND_FORMAT_CHARS];
+ char bbRefPosFormat[MAX_LEGEND_FORMAT_CHARS];
+ char legendFormat[MAX_LEGEND_FORMAT_CHARS];
// How many rows have we printed since last printing a "title row"?
static const int MAX_ROWS_BETWEEN_TITLES = 50;
- int rowCountSinceLastTitle;
+ int rowCountSinceLastTitle;
void dumpRegRecordHeader();
void dumpRegRecordTitle();
@@ -901,68 +975,49 @@ private:
// Events during the allocation phase that cause some dump output, which differs depending
// upon whether dumpTerse is set:
- enum LsraDumpEvent {
- // Conflicting def/use
- LSRA_EVENT_DEFUSE_CONFLICT,
- LSRA_EVENT_DEFUSE_FIXED_DELAY_USE,
- LSRA_EVENT_DEFUSE_CASE1,
- LSRA_EVENT_DEFUSE_CASE2,
- LSRA_EVENT_DEFUSE_CASE3,
- LSRA_EVENT_DEFUSE_CASE4,
- LSRA_EVENT_DEFUSE_CASE5,
- LSRA_EVENT_DEFUSE_CASE6,
-
- // Spilling
- LSRA_EVENT_SPILL,
- LSRA_EVENT_SPILL_EXTENDED_LIFETIME,
- LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL,
- LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL,
- LSRA_EVENT_DONE_KILL_GC_REFS,
-
- // Block boundaries
- LSRA_EVENT_START_BB,
- LSRA_EVENT_END_BB,
-
- //Miscellaneous
- LSRA_EVENT_FREE_REGS,
-
- // Characteristics of the current RefPosition
- LSRA_EVENT_INCREMENT_RANGE_END, // ???
- LSRA_EVENT_LAST_USE,
- LSRA_EVENT_LAST_USE_DELAYED,
- LSRA_EVENT_NEEDS_NEW_REG,
-
- // Allocation decisions
- LSRA_EVENT_FIXED_REG,
- LSRA_EVENT_EXP_USE,
- LSRA_EVENT_ZERO_REF,
- LSRA_EVENT_NO_ENTRY_REG_ALLOCATED,
- LSRA_EVENT_KEPT_ALLOCATION,
- LSRA_EVENT_COPY_REG,
- LSRA_EVENT_MOVE_REG,
- LSRA_EVENT_ALLOC_REG,
- LSRA_EVENT_ALLOC_SPILLED_REG,
- LSRA_EVENT_NO_REG_ALLOCATED,
- LSRA_EVENT_RELOAD,
- LSRA_EVENT_SPECIAL_PUTARG,
- LSRA_EVENT_REUSE_REG,
+ enum LsraDumpEvent{
+ // Conflicting def/use
+ LSRA_EVENT_DEFUSE_CONFLICT, LSRA_EVENT_DEFUSE_FIXED_DELAY_USE, LSRA_EVENT_DEFUSE_CASE1, LSRA_EVENT_DEFUSE_CASE2,
+ LSRA_EVENT_DEFUSE_CASE3, LSRA_EVENT_DEFUSE_CASE4, LSRA_EVENT_DEFUSE_CASE5, LSRA_EVENT_DEFUSE_CASE6,
+
+ // Spilling
+ LSRA_EVENT_SPILL, LSRA_EVENT_SPILL_EXTENDED_LIFETIME, LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL,
+ LSRA_EVENT_RESTORE_PREVIOUS_INTERVAL_AFTER_SPILL, LSRA_EVENT_DONE_KILL_GC_REFS,
+
+ // Block boundaries
+ LSRA_EVENT_START_BB, LSRA_EVENT_END_BB,
+
+ // Miscellaneous
+ LSRA_EVENT_FREE_REGS,
+
+ // Characteristics of the current RefPosition
+ LSRA_EVENT_INCREMENT_RANGE_END, // ???
+ LSRA_EVENT_LAST_USE, LSRA_EVENT_LAST_USE_DELAYED, LSRA_EVENT_NEEDS_NEW_REG,
+
+ // Allocation decisions
+ LSRA_EVENT_FIXED_REG, LSRA_EVENT_EXP_USE, LSRA_EVENT_ZERO_REF, LSRA_EVENT_NO_ENTRY_REG_ALLOCATED,
+ LSRA_EVENT_KEPT_ALLOCATION, LSRA_EVENT_COPY_REG, LSRA_EVENT_MOVE_REG, LSRA_EVENT_ALLOC_REG,
+ LSRA_EVENT_ALLOC_SPILLED_REG, LSRA_EVENT_NO_REG_ALLOCATED, LSRA_EVENT_RELOAD, LSRA_EVENT_SPECIAL_PUTARG,
+ LSRA_EVENT_REUSE_REG,
};
- void dumpLsraAllocationEvent(LsraDumpEvent event, Interval* interval = nullptr, regNumber reg = REG_NA, BasicBlock* currentBlock = nullptr);
+ void dumpLsraAllocationEvent(LsraDumpEvent event,
+ Interval* interval = nullptr,
+ regNumber reg = REG_NA,
+ BasicBlock* currentBlock = nullptr);
void dumpBlockHeader(BasicBlock* block);
void validateIntervals();
#endif // DEBUG
- Compiler * compiler;
+ Compiler* compiler;
private:
-
#if MEASURE_MEM_ALLOC
- IAllocator* lsraIAllocator;
+ IAllocator* lsraIAllocator;
#endif
- IAllocator* getAllocator(Compiler* comp)
+ IAllocator* getAllocator(Compiler* comp)
{
#if MEASURE_MEM_ALLOC
if (lsraIAllocator == nullptr)
@@ -977,92 +1032,92 @@ private:
#ifdef DEBUG
// This is used for dumping
- RefPosition* activeRefPosition;
+ RefPosition* activeRefPosition;
#endif // DEBUG
- IntervalList intervals;
+ IntervalList intervals;
- RegRecord physRegs[REG_COUNT];
+ RegRecord physRegs[REG_COUNT];
- Interval ** localVarIntervals;
+ Interval** localVarIntervals;
// Set of blocks that have been visited.
- BlockSet bbVisitedSet;
- void markBlockVisited(BasicBlock* block)
+ BlockSet bbVisitedSet;
+ void markBlockVisited(BasicBlock* block)
{
BlockSetOps::AddElemD(compiler, bbVisitedSet, block->bbNum);
}
- void clearVisitedBlocks()
+ void clearVisitedBlocks()
{
BlockSetOps::ClearD(compiler, bbVisitedSet);
}
- bool isBlockVisited(BasicBlock* block)
+ bool isBlockVisited(BasicBlock* block)
{
return BlockSetOps::IsMember(compiler, bbVisitedSet, block->bbNum);
}
// A map from bbNum to the block information used during register allocation.
- LsraBlockInfo* blockInfo;
- BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated));
+ LsraBlockInfo* blockInfo;
+ BasicBlock* findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated));
// The order in which the blocks will be allocated.
// This is any array of BasicBlock*, in the order in which they should be traversed.
- BasicBlock** blockSequence;
+ BasicBlock** blockSequence;
// The verifiedAllBBs flag indicates whether we have verified that all BBs have been
// included in the blockSeuqence above, during setBlockSequence().
- bool verifiedAllBBs;
- void setBlockSequence();
- int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights);
- BasicBlockList* blockSequenceWorkList;
- bool blockSequencingDone;
- void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block);
- void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode);
- BasicBlock* getNextCandidateFromWorkList();
+ bool verifiedAllBBs;
+ void setBlockSequence();
+ int compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights);
+ BasicBlockList* blockSequenceWorkList;
+ bool blockSequencingDone;
+ void addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block);
+ void removeFromBlockSequenceWorkList(BasicBlockList* listNode, BasicBlockList* prevNode);
+ BasicBlock* getNextCandidateFromWorkList();
// The bbNum of the block being currently allocated or resolved.
- unsigned int curBBNum;
+ unsigned int curBBNum;
// The ordinal of the block we're on (i.e. this is the curBBSeqNum-th block we've allocated).
- unsigned int curBBSeqNum;
+ unsigned int curBBSeqNum;
// The number of blocks that we've sequenced.
- unsigned int bbSeqCount;
+ unsigned int bbSeqCount;
// The Location of the start of the current block.
- LsraLocation curBBStartLocation;
+ LsraLocation curBBStartLocation;
// Ordered list of RefPositions
- RefPositionList refPositions;
+ RefPositionList refPositions;
// Per-block variable location mappings: an array indexed by block number that yields a
// pointer to an array of regNumber, one per variable.
- VarToRegMap * inVarToRegMaps;
- VarToRegMap * outVarToRegMaps;
+ VarToRegMap* inVarToRegMaps;
+ VarToRegMap* outVarToRegMaps;
// A temporary VarToRegMap used during the resolution of critical edges.
- VarToRegMap sharedCriticalVarToRegMap;
+ VarToRegMap sharedCriticalVarToRegMap;
- PhasedVar<regMaskTP> availableIntRegs;
- PhasedVar<regMaskTP> availableFloatRegs;
- PhasedVar<regMaskTP> availableDoubleRegs;
+ PhasedVar<regMaskTP> availableIntRegs;
+ PhasedVar<regMaskTP> availableFloatRegs;
+ PhasedVar<regMaskTP> availableDoubleRegs;
// Current set of live tracked vars, used during building of RefPositions to determine whether
// to preference to callee-save
- VARSET_TP currentLiveVars;
+ VARSET_TP currentLiveVars;
// Set of floating point variables to consider for callee-save registers.
- VARSET_TP fpCalleeSaveCandidateVars;
+ VARSET_TP fpCalleeSaveCandidateVars;
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(_TARGET_AMD64_)
- static const var_types LargeVectorType = TYP_SIMD32;
- static const var_types LargeVectorSaveType = TYP_SIMD16;
+ static const var_types LargeVectorType = TYP_SIMD32;
+ static const var_types LargeVectorSaveType = TYP_SIMD16;
#elif defined(_TARGET_ARM64_)
- static const var_types LargeVectorType = TYP_SIMD16;
- static const var_types LargeVectorSaveType = TYP_DOUBLE;
+ static const var_types LargeVectorType = TYP_SIMD16;
+ static const var_types LargeVectorSaveType = TYP_DOUBLE;
#else // !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_)
#error("Unknown target architecture for FEATURE_SIMD")
#endif // !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_)
// Set of large vector (TYP_SIMD32 on AVX) variables.
- VARSET_TP largeVectorVars;
+ VARSET_TP largeVectorVars;
// Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers.
- VARSET_TP largeVectorCalleeSaveCandidateVars;
+ VARSET_TP largeVectorCalleeSaveCandidateVars;
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
};
@@ -1114,90 +1169,90 @@ public:
void microDump();
#endif // DEBUG
- void setLocalNumber(unsigned localNum, LinearScan *l);
+ void setLocalNumber(unsigned localNum, LinearScan* l);
// Fixed registers for which this Interval has a preference
- regMaskTP registerPreferences;
+ regMaskTP registerPreferences;
// The relatedInterval is:
// - for any other interval, it is the interval to which this interval
// is currently preferenced (e.g. because they are related by a copy)
- Interval * relatedInterval;
+ Interval* relatedInterval;
// The assignedReg is the RecRecord for the register to which this interval
// has been assigned at some point - if the interval is active, this is the
// register it currently occupies.
- RegRecord * assignedReg;
+ RegRecord* assignedReg;
// DECIDE : put this in a union or do something w/ inheritance?
// this is an interval for a physical register, not a allocatable entity
- RegisterType registerType;
- bool isLocalVar : 1;
+ RegisterType registerType;
+ bool isLocalVar : 1;
// Indicates whether this interval has been assigned to different registers
- bool isSplit : 1;
+ bool isSplit : 1;
// Indicates whether this interval is ever spilled
- bool isSpilled : 1;
+ bool isSpilled : 1;
// indicates an interval representing the internal requirements for
// generating code for a node (temp registers internal to the node)
// Note that this interval may live beyond a node in the GT_ARR_LENREF/GT_IND
// case (though never lives beyond a stmt)
- bool isInternal : 1;
+ bool isInternal : 1;
// true if this is a LocalVar for a struct field
- bool isStructField : 1;
+ bool isStructField : 1;
// true iff this is a GT_LDOBJ for a fully promoted (PROMOTION_TYPE_INDEPENDENT) struct
- bool isPromotedStruct : 1;
+ bool isPromotedStruct : 1;
// true if this is an SDSU interval for which the def and use have conflicting register
// requirements
- bool hasConflictingDefUse : 1;
+ bool hasConflictingDefUse : 1;
// true if this interval is defined by a non-commutative 2-operand instruction
- bool hasNonCommutativeRMWDef : 1;
+ bool hasNonCommutativeRMWDef : 1;
// True if this interval is defined by a putArg, whose source is a non-last-use lclVar.
// During allocation, this flag will be cleared if the source is not already in the required register.
// Othewise, we will leave the register allocated to the lclVar, but mark the RegRecord as
// isBusyUntilNextKill, so that it won't be reused if the lclVar goes dead before the call.
- bool isSpecialPutArg : 1;
+ bool isSpecialPutArg : 1;
// True if this interval interferes with a call.
- bool preferCalleeSave : 1;
+ bool preferCalleeSave : 1;
// True if this interval is defined by a constant node that may be reused and/or may be
// able to reuse a constant that's already in a register.
- bool isConstant : 1;
+ bool isConstant : 1;
// The register to which it is currently assigned.
- regNumber physReg;
+ regNumber physReg;
#ifdef DEBUG
- unsigned int intervalIndex;
+ unsigned int intervalIndex;
#endif // DEBUG
- unsigned int varNum; // This is the "variable number": the index into the lvaTable array
+ unsigned int varNum; // This is the "variable number": the index into the lvaTable array
- LclVarDsc *getLocalVar(Compiler *comp)
+ LclVarDsc* getLocalVar(Compiler* comp)
{
assert(isLocalVar);
return &(comp->lvaTable[this->varNum]);
}
// Get the local tracked variable "index" (lvVarIndex), used in bitmasks.
- unsigned getVarIndex(Compiler* comp)
+ unsigned getVarIndex(Compiler* comp)
{
- LclVarDsc * varDsc = getLocalVar(comp);
+ LclVarDsc* varDsc = getLocalVar(comp);
assert(varDsc->lvTracked); // If this isn't true, we shouldn't be calling this function!
return varDsc->lvVarIndex;
}
- bool isAssignedTo(regNumber regNum)
+ bool isAssignedTo(regNumber regNum)
{
// This uses regMasks to handle the case where a double actually occupies two registers
// TODO-Throughput: This could/should be done more cheaply.
- return (physReg != REG_NA && (genRegMask(physReg,registerType) & genRegMask(regNum)) != RBM_NONE);
+ return (physReg != REG_NA && (genRegMask(physReg, registerType) & genRegMask(regNum)) != RBM_NONE);
}
// Assign the related interval.
- void assignRelatedInterval(Interval* newRelatedInterval)
+ void assignRelatedInterval(Interval* newRelatedInterval)
{
#ifdef DEBUG
if (VERBOSE)
@@ -1213,7 +1268,7 @@ public:
}
// Assign the related interval, but only if it isn't already assigned.
- void assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval)
+ void assignRelatedIntervalIfUnassigned(Interval* newRelatedInterval)
{
if (relatedInterval == nullptr)
{
@@ -1238,12 +1293,12 @@ public:
// An exception is made in the case where one of the existing or new
// preferences are all callee-save, in which case we "prefer" the callee-save
- void updateRegisterPreferences(regMaskTP preferences)
+ void updateRegisterPreferences(regMaskTP preferences)
{
// We require registerPreferences to have been initialized.
assert(registerPreferences != RBM_NONE);
// It is invalid to update with empty preferences
- assert (preferences != RBM_NONE);
+ assert(preferences != RBM_NONE);
regMaskTP commonPreferences = (registerPreferences & preferences);
if (commonPreferences != RBM_NONE)
@@ -1280,11 +1335,11 @@ public:
// Keep only the callee-save preferences, if not empty.
// Otherwise, take the union of the preferences.
- regMaskTP newPreferences = registerPreferences|preferences;
+ regMaskTP newPreferences = registerPreferences | preferences;
if (preferCalleeSave)
{
- regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences));
+ regMaskTP calleeSaveMask = (calleeSaveRegs(this->registerType) & (newPreferences));
if (calleeSaveMask != RBM_NONE)
{
newPreferences = calleeSaveMask;
@@ -1328,57 +1383,74 @@ public:
//
// Q: can 'referent' be NULL?
- Referenceable * referent;
+ Referenceable* referent;
- Interval *getInterval() { assert (!isPhysRegRef); return (Interval *) referent; }
- void setInterval(Interval *i) { referent = i; isPhysRegRef = false; }
+ Interval* getInterval()
+ {
+ assert(!isPhysRegRef);
+ return (Interval*)referent;
+ }
+ void setInterval(Interval* i)
+ {
+ referent = i;
+ isPhysRegRef = false;
+ }
- RegRecord *getReg() { assert (isPhysRegRef); return (RegRecord *) referent; }
- void setReg(RegRecord *r) { referent = r; isPhysRegRef = true; registerAssignment = genRegMask(r->regNum); }
+ RegRecord* getReg()
+ {
+ assert(isPhysRegRef);
+ return (RegRecord*)referent;
+ }
+ void setReg(RegRecord* r)
+ {
+ referent = r;
+ isPhysRegRef = true;
+ registerAssignment = genRegMask(r->regNum);
+ }
// nextRefPosition is the next in code order.
// Note that in either case there is no need for these to be doubly linked, as they
// are only traversed in the forward direction, and are not moved.
- RefPosition * nextRefPosition;
+ RefPosition* nextRefPosition;
// The remaining fields are common to both options
- GenTree* treeNode;
- unsigned int bbNum;
+ GenTree* treeNode;
+ unsigned int bbNum;
// Prior to the allocation pass, registerAssignment captures the valid registers
// for this RefPosition. An empty set means that any register is valid. A non-empty
// set means that it must be one of the given registers (may be the full set if the
// only constraint is that it must reside in SOME register)
// After the allocation pass, this contains the actual assignment
- LsraLocation nodeLocation;
- regMaskTP registerAssignment;
+ LsraLocation nodeLocation;
+ regMaskTP registerAssignment;
- regNumber assignedReg() {
+ regNumber assignedReg()
+ {
if (registerAssignment == RBM_NONE)
{
return REG_NA;
}
- return genRegNumFromMask(registerAssignment);
+ return genRegNumFromMask(registerAssignment);
}
- RefType refType;
+ RefType refType;
// Returns true if it is a reference on a gentree node.
- bool IsActualRef()
+ bool IsActualRef()
{
- return (refType == RefTypeDef ||
- refType == RefTypeUse);
+ return (refType == RefTypeDef || refType == RefTypeUse);
}
- bool RequiresRegister()
+ bool RequiresRegister()
{
return (IsActualRef()
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- || refType == RefTypeUpperVectorSaveDef
- || refType == RefTypeUpperVectorSaveUse
+ || refType == RefTypeUpperVectorSaveDef || refType == RefTypeUpperVectorSaveUse
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
- ) && !AllocateIfProfitable();
+ ) &&
+ !AllocateIfProfitable();
}
// Indicates whether this ref position is to be allocated
@@ -1386,44 +1458,44 @@ public:
// ref positions that lower/codegen has indicated as reg
// optional and is considered a contained memory operand if
// no reg is allocated.
- unsigned allocRegIfProfitable : 1;
+ unsigned allocRegIfProfitable : 1;
- void setAllocateIfProfitable(unsigned val)
+ void setAllocateIfProfitable(unsigned val)
{
allocRegIfProfitable = val;
}
// Returns true whether this ref position is to be allocated
// a reg only if it is profitable.
- bool AllocateIfProfitable()
+ bool AllocateIfProfitable()
{
// TODO-CQ: Right now if a ref position is marked as
// copyreg or movereg, then it is not treated as
// 'allocate if profitable'. This is an implementation
// limitation that needs to be addressed.
- return allocRegIfProfitable &&
- !copyReg &&
- !moveReg;
+ return allocRegIfProfitable && !copyReg && !moveReg;
}
// Used by RefTypeDef/Use positions of a multi-reg call node.
// Indicates the position of the register that this ref position refers to.
// The max bits needed is based on max value of MAX_RET_REG_COUNT value
// across all targets and that happens 4 on on Arm. Hence index value
- // would be 0..MAX_RET_REG_COUNT-1.
- unsigned multiRegIdx : 2;
+ // would be 0..MAX_RET_REG_COUNT-1.
+ unsigned multiRegIdx : 2;
- void setMultiRegIdx(unsigned idx)
+ void setMultiRegIdx(unsigned idx)
{
multiRegIdx = idx;
assert(multiRegIdx == idx);
}
- unsigned getMultiRegIdx() { return multiRegIdx; }
-
+ unsigned getMultiRegIdx()
+ {
+ return multiRegIdx;
+ }
// Last Use - this may be true for multiple RefPositions in the same Interval
- bool lastUse : 1;
+ bool lastUse : 1;
// Spill and Copy info
// reload indicates that the value was spilled, and must be reloaded here.
@@ -1444,14 +1516,14 @@ public:
// we need an explicit move.
// - copyReg and moveReg must not exist with each other.
- bool reload : 1;
- bool spillAfter : 1;
- bool copyReg : 1;
- bool moveReg : 1; // true if this var is moved to a new register
+ bool reload : 1;
+ bool spillAfter : 1;
+ bool copyReg : 1;
+ bool moveReg : 1; // true if this var is moved to a new register
- bool isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval
- bool isFixedRegRef : 1;
- bool isLocalDefUse : 1;
+ bool isPhysRegRef : 1; // true if 'referent' points of a RegRecord, false if it points to an Interval
+ bool isFixedRegRef : 1;
+ bool isLocalDefUse : 1;
// delayRegFree indicates that the register should not be freed right away, but instead wait
// until the next Location after it would normally be freed. This is used for the case of
@@ -1460,50 +1532,52 @@ public:
// Another option would be to actually change the Location of the op2 use until the same
// Location as the def, but then it could potentially reuse a register that has been freed
// from the other source(s), e.g. if it's a lastUse or spilled.
- bool delayRegFree : 1;
+ bool delayRegFree : 1;
// outOfOrder is marked on a (non-def) RefPosition that doesn't follow a definition of the
// register currently assigned to the Interval. This happens when we use the assigned
// register from a predecessor that is not the most recently allocated BasicBlock.
- bool outOfOrder : 1;
+ bool outOfOrder : 1;
- LsraLocation getRefEndLocation()
+ LsraLocation getRefEndLocation()
{
- return delayRegFree ? nodeLocation+1 : nodeLocation;
+ return delayRegFree ? nodeLocation + 1 : nodeLocation;
}
#ifdef DEBUG
- unsigned rpNum; // The unique RefPosition number, equal to its index in the refPositions list. Only used for debugging dumps.
-#endif // DEBUG
+ unsigned rpNum; // The unique RefPosition number, equal to its index in the refPositions list. Only used for
+ // debugging dumps.
+#endif // DEBUG
- bool isIntervalRef() { return (!isPhysRegRef && (referent != nullptr)); }
+ bool isIntervalRef()
+ {
+ return (!isPhysRegRef && (referent != nullptr));
+ }
// isTrueDef indicates that the RefPosition is a non-update def of a non-internal
// interval
- bool isTrueDef()
+ bool isTrueDef()
{
- return (refType == RefTypeDef &&
- isIntervalRef() &&
- !getInterval()->isInternal);
+ return (refType == RefTypeDef && isIntervalRef() && !getInterval()->isInternal);
}
// isFixedRefOfRegMask indicates that the RefPosition has a fixed assignment to the register
// specified by the given mask
- bool isFixedRefOfRegMask(regMaskTP regMask)
+ bool isFixedRefOfRegMask(regMaskTP regMask)
{
assert(genMaxOneBit(regMask));
return (registerAssignment == regMask);
}
// isFixedRefOfReg indicates that the RefPosition has a fixed assignment to the given register
- bool isFixedRefOfReg(regNumber regNum)
+ bool isFixedRefOfReg(regNumber regNum)
{
return (isFixedRefOfRegMask(genRegMask(regNum)));
}
#ifdef DEBUG
// operator= copies everything except 'rpNum', which must remain unique
- RefPosition & operator=(const RefPosition & rp)
+ RefPosition& operator=(const RefPosition& rp)
{
unsigned rpNumSave = rpNum;
memcpy(this, &rp, sizeof(rp));
@@ -1513,7 +1587,6 @@ public:
void dump();
#endif // DEBUG
-
};
#ifdef DEBUG
@@ -1521,5 +1594,5 @@ void dumpRegMask(regMaskTP regs);
#endif // DEBUG
/*****************************************************************************/
-#endif//_LSRA_H_
+#endif //_LSRA_H_
/*****************************************************************************/
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 33dfb6dcfc..277c3755ff 100755
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -16,30 +16,32 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma hdrstop
#endif
-#include "allocacheck.h" // for alloca
+#include "allocacheck.h" // for alloca
// Convert the given node into a call to the specified helper passing
// the given argument list.
//
// Tries to fold constants and also adds an edge for overflow exception
// returns the morphed tree
-GenTreePtr Compiler::fgMorphCastIntoHelper(GenTreePtr tree,
- int helper,
- GenTreePtr oper)
+GenTreePtr Compiler::fgMorphCastIntoHelper(GenTreePtr tree, int helper, GenTreePtr oper)
{
- GenTree *result;
+ GenTree* result;
/* If the operand is a constant, we'll try to fold it */
- if (oper->OperIsConst())
+ if (oper->OperIsConst())
{
GenTreePtr oldTree = tree;
- tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
+ tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
if (tree != oldTree)
+ {
return fgMorphTree(tree);
+ }
else if (tree->OperKind() & GTK_CONST)
+ {
return fgMorphConst(tree);
+ }
// assert that oper is unchanged and that it is still a GT_CAST node
noway_assert(tree->gtCast.CastOp() == oper);
@@ -50,32 +52,31 @@ GenTreePtr Compiler::fgMorphCastIntoHelper(GenTreePtr tree,
return result;
}
-
/*****************************************************************************
*
* Convert the given node into a call to the specified helper passing
* the given argument list.
*/
-GenTreePtr Compiler::fgMorphIntoHelperCall(GenTreePtr tree,
- int helper,
- GenTreeArgList* args)
+GenTreePtr Compiler::fgMorphIntoHelperCall(GenTreePtr tree, int helper, GenTreeArgList* args)
{
tree->ChangeOper(GT_CALL);
- tree->gtFlags |= GTF_CALL;
+ tree->gtFlags |= GTF_CALL;
if (args)
- tree->gtFlags |= (args->gtFlags & GTF_ALL_EFFECT);
- tree->gtCall.gtCallType = CT_HELPER;
- tree->gtCall.gtCallMethHnd = eeFindHelper(helper);
- tree->gtCall.gtCallArgs = args;
- tree->gtCall.gtCallObjp = NULL;
- tree->gtCall.gtCallLateArgs = NULL;
- tree->gtCall.fgArgInfo = NULL;
- tree->gtCall.gtRetClsHnd = NULL;
- tree->gtCall.gtCallMoreFlags = 0;
- tree->gtCall.gtInlineCandidateInfo = NULL;
- tree->gtCall.gtControlExpr = NULL;
+ {
+ tree->gtFlags |= (args->gtFlags & GTF_ALL_EFFECT);
+ }
+ tree->gtCall.gtCallType = CT_HELPER;
+ tree->gtCall.gtCallMethHnd = eeFindHelper(helper);
+ tree->gtCall.gtCallArgs = args;
+ tree->gtCall.gtCallObjp = nullptr;
+ tree->gtCall.gtCallLateArgs = nullptr;
+ tree->gtCall.fgArgInfo = nullptr;
+ tree->gtCall.gtRetClsHnd = nullptr;
+ tree->gtCall.gtCallMoreFlags = 0;
+ tree->gtCall.gtInlineCandidateInfo = nullptr;
+ tree->gtCall.gtControlExpr = nullptr;
#ifdef LEGACY_BACKEND
tree->gtCall.gtCallRegUsedMask = RBM_NONE;
@@ -94,12 +95,12 @@ GenTreePtr Compiler::fgMorphIntoHelperCall(GenTreePtr tree,
#if defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
if (varTypeIsLong(tree))
{
- GenTreeCall* callNode = tree->AsCall();
+ GenTreeCall* callNode = tree->AsCall();
ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
retTypeDesc->Reset();
retTypeDesc->InitializeLongReturnType(this);
callNode->ClearOtherRegs();
-
+
NYI("Helper with TYP_LONG return type");
}
#endif
@@ -116,45 +117,40 @@ GenTreePtr Compiler::fgMorphIntoHelperCall(GenTreePtr tree,
* to catch extra references
*/
-inline
-void DEBUG_DESTROY_NODE(GenTreePtr tree)
+inline void DEBUG_DESTROY_NODE(GenTreePtr tree)
{
#ifdef DEBUG
// printf("DEBUG_DESTROY_NODE for [0x%08x]\n", tree);
// Save gtOper in case we want to find out what this node was
- tree->gtOperSave = tree->gtOper;
+ tree->gtOperSave = tree->gtOper;
- tree->gtType = TYP_UNDEF;
- tree->gtFlags |= 0xFFFFFFFF & ~GTF_NODE_MASK;
+ tree->gtType = TYP_UNDEF;
+ tree->gtFlags |= 0xFFFFFFFF & ~GTF_NODE_MASK;
if (tree->OperIsSimple())
{
- tree->gtOp.gtOp1 =
- tree->gtOp.gtOp2 = NULL;
+ tree->gtOp.gtOp1 = tree->gtOp.gtOp2 = nullptr;
}
// Must do this last, because the "gtOp" check above will fail otherwise.
// Don't call SetOper, because GT_COUNT is not a valid value
- tree->gtOper = GT_COUNT;
+ tree->gtOper = GT_COUNT;
#endif
}
-
/*****************************************************************************
*
* Determine if a relop must be morphed to a qmark to manifest a boolean value.
* This is done when code generation can't create straight-line code to do it.
*/
-bool Compiler::fgMorphRelopToQmark(GenTreePtr tree)
+bool Compiler::fgMorphRelopToQmark(GenTreePtr tree)
{
#ifndef LEGACY_BACKEND
return false;
-#else // LEGACY_BACKEND
- return (genActualType(tree->TypeGet()) == TYP_LONG) ||
- varTypeIsFloating(tree->TypeGet());
+#else // LEGACY_BACKEND
+ return (genActualType(tree->TypeGet()) == TYP_LONG) || varTypeIsFloating(tree->TypeGet());
#endif // LEGACY_BACKEND
}
-
/*****************************************************************************
*
* Morph a cast node (we perform some very simple transformations here).
@@ -162,21 +158,21 @@ bool Compiler::fgMorphRelopToQmark(GenTreePtr tree)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_CAST);
noway_assert(genTypeSize(TYP_I_IMPL) == sizeof(void*));
/* The first sub-operand is the thing being cast */
- GenTreePtr oper = tree->gtCast.CastOp();
- var_types srcType = genActualType(oper->TypeGet());
- unsigned srcSize;
+ GenTreePtr oper = tree->gtCast.CastOp();
+ var_types srcType = genActualType(oper->TypeGet());
+ unsigned srcSize;
- var_types dstType = tree->CastToType();
- unsigned dstSize = genTypeSize(dstType);
+ var_types dstType = tree->CastToType();
+ unsigned dstSize = genTypeSize(dstType);
// See if the cast has to be done in two steps. R -> I
if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType))
@@ -190,18 +186,18 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
#if defined(_TARGET_ARM64_)
// Amd64: src = float, dst is overflow conversion.
// This goes through helper and hence src needs to be converted to double.
- && tree->gtOverflow()
+ && tree->gtOverflow()
#elif defined(_TARGET_AMD64_)
// Amd64: src = float, dst = uint64 or overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& (tree->gtOverflow() || (dstType == TYP_ULONG))
-#elif defined(_TARGET_ARM_)
+#elif defined(_TARGET_ARM_)
// Arm: src = float, dst = int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType))
#endif
#endif // FEATURE_STACK_FP_X87
- )
+ )
{
oper = gtNewCastNode(TYP_DOUBLE, oper, TYP_DOUBLE);
}
@@ -213,14 +209,14 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
if (dstSize < genTypeSize(TYP_INT))
{
oper = gtNewCastNodeL(TYP_INT, oper, TYP_INT);
- oper->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED|GTF_OVERFLOW|GTF_EXCEPT));
+ oper->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
}
#else
if (dstSize < sizeof(void*))
{
oper = gtNewCastNodeL(TYP_I_IMPL, oper, TYP_I_IMPL);
- oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW|GTF_EXCEPT));
+ oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
}
#endif
else
@@ -228,44 +224,50 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
/* Note that if we need to use a helper call then we can not morph oper */
if (!tree->gtOverflow())
{
-#ifdef _TARGET_ARM64_ // On ARM64 All non-overflow checking conversions can be optimized
+#ifdef _TARGET_ARM64_ // On ARM64 All non-overflow checking conversions can be optimized
goto OPTIMIZECAST;
#else
switch (dstType)
{
- case TYP_INT:
-#ifdef _TARGET_X86_ // there is no rounding convert to integer instruction on ARM or x64 so skip this
- if ((oper->gtOper == GT_INTRINSIC) &&
- (oper->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round))
- {
- /* optimization: conv.i4(round.d(d)) -> round.i(d) */
- oper->gtType = dstType;
- return fgMorphTree(oper);
- }
- // if SSE2 is not enabled, we need the helper
- else if (!opts.compCanUseSSE2)
- {
- return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT, oper);
- }
- else
+ case TYP_INT:
+#ifdef _TARGET_X86_ // there is no rounding convert to integer instruction on ARM or x64 so skip this
+ if ((oper->gtOper == GT_INTRINSIC) &&
+ (oper->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round))
+ {
+ /* optimization: conv.i4(round.d(d)) -> round.i(d) */
+ oper->gtType = dstType;
+ return fgMorphTree(oper);
+ }
+ // if SSE2 is not enabled, we need the helper
+ else if (!opts.compCanUseSSE2)
+ {
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT, oper);
+ }
+ else
#endif // _TARGET_X86_
- {
- goto OPTIMIZECAST;
- }
+ {
+ goto OPTIMIZECAST;
+ }
#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
- case TYP_UINT: goto OPTIMIZECAST;
-#else // _TARGET_ARM_
- case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
+ case TYP_UINT:
+ goto OPTIMIZECAST;
+#else // _TARGET_ARM_
+ case TYP_UINT:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
#endif // _TARGET_ARM_
#ifdef _TARGET_AMD64_
- // SSE2 has instructions to convert a float/double directly to a long
- case TYP_LONG: goto OPTIMIZECAST;
+ // SSE2 has instructions to convert a float/double directly to a long
+ case TYP_LONG:
+ goto OPTIMIZECAST;
#else
- case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
+ case TYP_LONG:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
#endif //_TARGET_AMD64_
- case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
- default: break;
+ case TYP_ULONG:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
+ default:
+ break;
}
#endif // _TARGET_ARM64_
}
@@ -273,11 +275,16 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
{
switch (dstType)
{
- case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
- case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
- case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
- case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
- default: break;
+ case TYP_INT:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
+ case TYP_UINT:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
+ case TYP_LONG:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
+ case TYP_ULONG:
+ return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
+ default:
+ break;
}
}
noway_assert(!"Unexpected dstType");
@@ -290,18 +297,19 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType))
{
oper = gtNewCastNode(TYP_I_IMPL, oper, TYP_I_IMPL);
- oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW|GTF_EXCEPT|GTF_UNSIGNED));
+ oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT | GTF_UNSIGNED));
tree->gtFlags &= ~GTF_UNSIGNED;
}
#endif //!_TARGET_64BIT_
#ifdef _TARGET_ARM_
- else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && (oper->gtOper == GT_CAST) && !varTypeIsLong(oper->gtCast.CastOp()))
+ else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && (oper->gtOper == GT_CAST) &&
+ !varTypeIsLong(oper->gtCast.CastOp()))
{
// optimization: conv.r4(conv.r8(?)) -> conv.r4(d)
// except when the ultimate source is a long because there is no long-to-float helper, so it must be 2 step.
// This happens semi-frequently because there is no IL 'conv.r4.un'
- oper->gtType = TYP_FLOAT;
+ oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
return fgMorphTree(oper);
}
@@ -315,7 +323,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
- tree->gtType = TYP_DOUBLE;
+ tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, TYP_FLOAT);
@@ -350,15 +358,15 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
- tree->gtType = TYP_DOUBLE;
- tree = gtNewCastNode(TYP_FLOAT, tree, TYP_FLOAT);
+ tree->gtType = TYP_DOUBLE;
+ tree = gtNewCastNode(TYP_FLOAT, tree, TYP_FLOAT);
return fgMorphTree(tree);
}
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, TYP_LONG);
- oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW|GTF_EXCEPT|GTF_UNSIGNED));
+ oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT | GTF_UNSIGNED));
tree->gtFlags &= ~GTF_UNSIGNED;
}
}
@@ -377,7 +385,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, TYP_LONG);
- oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW|GTF_EXCEPT|GTF_UNSIGNED));
+ oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT | GTF_UNSIGNED));
tree->gtFlags &= ~GTF_UNSIGNED;
}
}
@@ -394,9 +402,9 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// We generate an assignment to an int and then do the cast from an int. With this we avoid
// the gc problem and we allow casts to bytes, longs, etc...
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC"));
- oper->gtType = TYP_I_IMPL;
- GenTreePtr asg = gtNewTempAssign(lclNum, oper);
- oper->gtType = srcType;
+ oper->gtType = TYP_I_IMPL;
+ GenTreePtr asg = gtNewTempAssign(lclNum, oper);
+ oper->gtType = srcType;
// do the real cast
GenTreePtr cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), dstType);
@@ -415,8 +423,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// overflow-insensitive narrowing casts, which always silently truncate.
//
// Note that casts from [u]long to small integer types are handled above.
- if ((srcType == TYP_LONG) &&
- ((dstType == TYP_INT) || (dstType == TYP_UINT)))
+ if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT)))
{
// As a special case, look for overflow-sensitive casts of an AND
// expression, and see if the second operand is a small constant. Since
@@ -428,8 +435,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
GenTreePtr andOp2 = oper->gtOp.gtOp2;
// Special case to the special case: AND with a casted int.
- if ((andOp2->OperGet() == GT_CAST) &&
- (andOp2->gtCast.CastOp()->OperGet() == GT_CNS_INT))
+ if ((andOp2->OperGet() == GT_CAST) && (andOp2->gtCast.CastOp()->OperGet() == GT_CNS_INT))
{
// gtFoldExprConst will deal with whether the cast is signed or
// unsigned, or overflow-sensitive.
@@ -440,8 +446,7 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
- if ((andOp2->OperGet() == GT_CNS_NATIVELONG) &&
- ((andOp2->gtIntConCommon.LngValue() >> maxWidth) == 0))
+ if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->gtIntConCommon.LngValue() >> maxWidth) == 0))
{
// This cast can't overflow.
tree->gtFlags &= ~(GTF_OVERFLOW | GTF_EXCEPT);
@@ -452,18 +457,13 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
// when neither the cast node nor the oper node may throw an exception
// based on the upper 32 bits.
//
- if (fgGlobalMorph &&
- !tree->gtOverflow() &&
- !oper->gtOverflowEx())
+ if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx())
{
- // For these operations the lower 32 bits of the result only depends
+ // For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands
//
- if ( (oper->OperGet() == GT_ADD) ||
- (oper->OperGet() == GT_MUL) ||
- (oper->OperGet() == GT_AND) ||
- (oper->OperGet() == GT_OR) ||
- (oper->OperGet() == GT_XOR) )
+ if ((oper->OperGet() == GT_ADD) || (oper->OperGet() == GT_MUL) || (oper->OperGet() == GT_AND) ||
+ (oper->OperGet() == GT_OR) || (oper->OperGet() == GT_XOR))
{
DEBUG_DESTROY_NODE(tree);
@@ -472,8 +472,10 @@ GenTreePtr Compiler::fgMorphCast(GenTreePtr tree)
oper->gtOp.gtOp2 = gtNewCastNode(TYP_INT, oper->gtOp.gtOp2, dstType);
// Clear the GT_MUL_64RSLT if it is set
- if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
+ if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
+ {
oper->gtFlags &= ~GTF_MUL_64RSLT;
+ }
// The operation now produces a 32-bit result.
oper->gtType = TYP_INT;
@@ -495,7 +497,9 @@ OPTIMIZECAST:
/* unless we have an overflow cast, reset the except flag */
if (!tree->gtOverflow())
+ {
tree->gtFlags &= ~GTF_EXCEPT;
+ }
/* Just in case new side effects were introduced */
tree->gtFlags |= (oper->gtFlags & GTF_ALL_EFFECT);
@@ -504,29 +508,35 @@ OPTIMIZECAST:
/* if GTF_UNSIGNED is set then force srcType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
+ {
srcType = genUnsignedType(srcType);
+ }
srcSize = genTypeSize(srcType);
- if (!gtIsActiveCSE_Candidate(tree)) // tree cannot be a CSE candidate
+ if (!gtIsActiveCSE_Candidate(tree)) // tree cannot be a CSE candidate
{
/* See if we can discard the cast */
if (varTypeIsIntegral(srcType) && varTypeIsIntegral(dstType))
{
- if (srcType == dstType) // Certainly if they are identical it is pointless
+ if (srcType == dstType)
+ { // Certainly if they are identical it is pointless
goto REMOVE_CAST;
+ }
if (oper->OperGet() == GT_LCL_VAR && varTypeIsSmall(dstType))
{
- unsigned varNum = oper->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[varNum];
+ unsigned varNum = oper->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[varNum];
if (varDsc->TypeGet() == dstType && varDsc->lvNormalizeOnStore())
+ {
goto REMOVE_CAST;
+ }
}
- bool unsignedSrc = varTypeIsUnsigned(srcType);
- bool unsignedDst = varTypeIsUnsigned(dstType);
- bool signsDiffer = (unsignedSrc != unsignedDst);
+ bool unsignedSrc = varTypeIsUnsigned(srcType);
+ bool unsignedDst = varTypeIsUnsigned(dstType);
+ bool signsDiffer = (unsignedSrc != unsignedDst);
// For same sized casts with
// the same signs or non-overflow cast we discard them as well
@@ -536,7 +546,9 @@ OPTIMIZECAST:
noway_assert(varTypeIsGC(srcType) == varTypeIsGC(dstType));
if (!signsDiffer)
+ {
goto REMOVE_CAST;
+ }
if (!tree->gtOverflow())
{
@@ -547,34 +559,41 @@ OPTIMIZECAST:
{
switch (oper->gtOper)
{
- case GT_IND:
- case GT_CLS_VAR:
- case GT_LCL_FLD:
- case GT_ARR_ELEM:
- oper->gtType = dstType;
- goto REMOVE_CAST;
- default: break;
+ case GT_IND:
+ case GT_CLS_VAR:
+ case GT_LCL_FLD:
+ case GT_ARR_ELEM:
+ oper->gtType = dstType;
+ goto REMOVE_CAST;
+ default:
+ break;
}
}
else
+ {
goto REMOVE_CAST;
+ }
}
}
- if (srcSize < dstSize) // widening cast
+ if (srcSize < dstSize) // widening cast
{
// Keep any long casts
if (dstSize == sizeof(int))
{
// Only keep signed to unsigned widening cast with overflow check
if (!tree->gtOverflow() || !unsignedDst || unsignedSrc)
+ {
goto REMOVE_CAST;
+ }
}
// Casts from signed->unsigned can never overflow while widening
if (unsignedSrc || !unsignedDst)
+ {
tree->gtFlags &= ~GTF_OVERFLOW;
+ }
}
else
{
@@ -582,13 +601,11 @@ OPTIMIZECAST:
// Note: Do not narrow a cast that is marked as a CSE
// And do not narrow if the oper is marked as a CSE either
//
- if (!tree->gtOverflow() &&
- !gtIsActiveCSE_Candidate(oper) &&
- (opts.compFlags & CLFLG_TREETRANS) &&
- optNarrowTree(oper, srcType, dstType, tree->gtVNPair, false))
+ if (!tree->gtOverflow() && !gtIsActiveCSE_Candidate(oper) && (opts.compFlags & CLFLG_TREETRANS) &&
+ optNarrowTree(oper, srcType, dstType, tree->gtVNPair, false))
{
- optNarrowTree(oper, srcType, dstType, tree->gtVNPair, true);
-
+ optNarrowTree(oper, srcType, dstType, tree->gtVNPair, true);
+
/* If oper is changed into a cast to TYP_INT, or to a GT_NOP, we may need to discard it */
if (oper->gtOper == GT_CAST && oper->CastToType() == genActualType(oper->CastFromType()))
{
@@ -598,18 +615,18 @@ OPTIMIZECAST:
}
}
}
-
+
switch (oper->gtOper)
{
/* If the operand is a constant, we'll fold it */
- case GT_CNS_INT:
- case GT_CNS_LNG:
- case GT_CNS_DBL:
- case GT_CNS_STR:
+ case GT_CNS_INT:
+ case GT_CNS_LNG:
+ case GT_CNS_DBL:
+ case GT_CNS_STR:
{
GenTreePtr oldTree = tree;
- tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
+ tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
// Did we get a comma throw as a result of gtFoldExprConst?
if ((oldTree != tree) && (oldTree->gtOper != GT_COMMA))
@@ -620,102 +637,111 @@ OPTIMIZECAST:
return tree;
}
else if (tree->gtOper != GT_CAST)
+ {
return tree;
+ }
noway_assert(tree->gtCast.CastOp() == oper); // unchanged
}
break;
- case GT_CAST:
- /* Check for two consecutive casts into the same dstType */
- if (!tree->gtOverflow())
- {
- var_types dstType2 = oper->CastToType();
- if (dstType == dstType2)
- goto REMOVE_CAST;
- }
- break;
+ case GT_CAST:
+ /* Check for two consecutive casts into the same dstType */
+ if (!tree->gtOverflow())
+ {
+ var_types dstType2 = oper->CastToType();
+ if (dstType == dstType2)
+ {
+ goto REMOVE_CAST;
+ }
+ }
+ break;
/* If op1 is a mod node, mark it with the GTF_MOD_INT_RESULT flag
so that the code generator will know not to convert the result
of the idiv to a regpair */
- case GT_MOD:
- if (dstType == TYP_INT)
- tree->gtOp.gtOp1->gtFlags |= GTF_MOD_INT_RESULT;
+ case GT_MOD:
+ if (dstType == TYP_INT)
+ {
+ tree->gtOp.gtOp1->gtFlags |= GTF_MOD_INT_RESULT;
+ }
break;
- case GT_UMOD:
- if (dstType == TYP_UINT)
- tree->gtOp.gtOp1->gtFlags |= GTF_MOD_INT_RESULT;
+ case GT_UMOD:
+ if (dstType == TYP_UINT)
+ {
+ tree->gtOp.gtOp1->gtFlags |= GTF_MOD_INT_RESULT;
+ }
break;
- case GT_COMMA:
- // Check for cast of a GT_COMMA with a throw overflow
- // Bug 110829: Since this optimization will bash the types
- // neither oper or commaOp2 can be CSE candidates
- if (fgIsCommaThrow(oper) &&
- !gtIsActiveCSE_Candidate(oper)) // oper can not be a CSE candidate
- {
- GenTreePtr commaOp2 = oper->gtOp.gtOp2;
-
- if (!gtIsActiveCSE_Candidate(commaOp2)) // commaOp2 can not be a CSE candidate
- {
- // need type of oper to be same as tree
- if (tree->gtType == TYP_LONG)
- {
- commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
- commaOp2->gtIntConCommon.SetLngValue(0);
- /* Change the types of oper and commaOp2 to TYP_LONG */
- oper->gtType = commaOp2->gtType = TYP_LONG;
- }
- else if (varTypeIsFloating(tree->gtType))
- {
- commaOp2->ChangeOperConst(GT_CNS_DBL);
- commaOp2->gtDblCon.gtDconVal = 0.0;
- // Change the types of oper and commaOp2
- // X87 promotes everything to TYP_DOUBLE
- // But other's are a little more precise
- const var_types newTyp
+ case GT_COMMA:
+ // Check for cast of a GT_COMMA with a throw overflow
+ // Bug 110829: Since this optimization will bash the types
+ // neither oper or commaOp2 can be CSE candidates
+ if (fgIsCommaThrow(oper) && !gtIsActiveCSE_Candidate(oper)) // oper can not be a CSE candidate
+ {
+ GenTreePtr commaOp2 = oper->gtOp.gtOp2;
+
+ if (!gtIsActiveCSE_Candidate(commaOp2)) // commaOp2 can not be a CSE candidate
+ {
+ // need type of oper to be same as tree
+ if (tree->gtType == TYP_LONG)
+ {
+ commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
+ commaOp2->gtIntConCommon.SetLngValue(0);
+ /* Change the types of oper and commaOp2 to TYP_LONG */
+ oper->gtType = commaOp2->gtType = TYP_LONG;
+ }
+ else if (varTypeIsFloating(tree->gtType))
+ {
+ commaOp2->ChangeOperConst(GT_CNS_DBL);
+ commaOp2->gtDblCon.gtDconVal = 0.0;
+ // Change the types of oper and commaOp2
+ // X87 promotes everything to TYP_DOUBLE
+ // But other's are a little more precise
+ const var_types newTyp
#if FEATURE_X87_DOUBLES
- = TYP_DOUBLE;
-#else // FEATURE_X87_DOUBLES
- = tree->gtType;
+ = TYP_DOUBLE;
+#else // FEATURE_X87_DOUBLES
+ = tree->gtType;
#endif // FEATURE_X87_DOUBLES
- oper->gtType = commaOp2->gtType = newTyp;
- }
- else
- {
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntCon.gtIconVal = 0;
- /* Change the types of oper and commaOp2 to TYP_INT */
- oper->gtType = commaOp2->gtType = TYP_INT;
- }
- }
+ oper->gtType = commaOp2->gtType = newTyp;
+ }
+ else
+ {
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntCon.gtIconVal = 0;
+ /* Change the types of oper and commaOp2 to TYP_INT */
+ oper->gtType = commaOp2->gtType = TYP_INT;
+ }
+ }
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(commaOp2);
- }
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(commaOp2);
+ }
- /* Return the GT_COMMA node as the new tree */
- return oper;
- }
- break;
+ /* Return the GT_COMMA node as the new tree */
+ return oper;
+ }
+ break;
- default:
- break;
+ default:
+ break;
} /* end switch (oper->gtOper) */
}
if (tree->gtOverflow())
+ {
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
+ }
return tree;
REMOVE_CAST:
/* Here we've eliminated the cast, so just return it's operand */
- assert(!gtIsActiveCSE_Candidate(tree)); // tree cannot be a CSE candidate
+ assert(!gtIsActiveCSE_Candidate(tree)); // tree cannot be a CSE candidate
DEBUG_DESTROY_NODE(tree);
return oper;
@@ -729,14 +755,12 @@ REMOVE_CAST:
* Perform an unwrap operation on a Proxy object
*/
-GenTreePtr Compiler::fgUnwrapProxy(GenTreePtr objRef)
+GenTreePtr Compiler::fgUnwrapProxy(GenTreePtr objRef)
{
- assert(info.compIsContextful &&
- info.compUnwrapContextful &&
- impIsThis(objRef));
+ assert(info.compIsContextful && info.compUnwrapContextful && impIsThis(objRef));
- CORINFO_EE_INFO * pInfo = eeGetEEInfo();
- GenTreePtr addTree;
+ CORINFO_EE_INFO* pInfo = eeGetEEInfo();
+ GenTreePtr addTree;
// Perform the unwrap:
//
@@ -746,18 +770,14 @@ GenTreePtr Compiler::fgUnwrapProxy(GenTreePtr objRef)
//
// Note that each dereference is a GC pointer
- addTree = gtNewOperNode(GT_ADD, TYP_I_IMPL,
- objRef,
- gtNewIconNode(pInfo->offsetOfTransparentProxyRP, TYP_I_IMPL));
+ addTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, objRef, gtNewIconNode(pInfo->offsetOfTransparentProxyRP, TYP_I_IMPL));
- objRef = gtNewOperNode(GT_IND, TYP_REF, addTree);
+ objRef = gtNewOperNode(GT_IND, TYP_REF, addTree);
objRef->gtFlags |= GTF_IND_INVARIANT;
- addTree = gtNewOperNode(GT_ADD, TYP_I_IMPL,
- objRef,
- gtNewIconNode(pInfo->offsetOfRealProxyServer, TYP_I_IMPL));
+ addTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, objRef, gtNewIconNode(pInfo->offsetOfRealProxyServer, TYP_I_IMPL));
- objRef = gtNewOperNode(GT_IND, TYP_REF, addTree);
+ objRef = gtNewOperNode(GT_IND, TYP_REF, addTree);
objRef->gtFlags |= GTF_IND_INVARIANT;
// objRef now hold the 'real this' reference (i.e. the unwrapped proxy)
@@ -803,7 +823,7 @@ void fgArgTabEntry::Dump()
printf(", numSlots=%u, slotNum=%u", numSlots, slotNum);
}
printf(", align=%u", alignment);
- if (lateArgInx != (unsigned) -1)
+ if (lateArgInx != (unsigned)-1)
{
printf(", lateArgInx=%u", lateArgInx);
}
@@ -843,11 +863,12 @@ void fgArgTabEntry::Dump()
}
#endif
-fgArgInfo::fgArgInfo(Compiler * comp, GenTreePtr call, unsigned numArgs)
+fgArgInfo::fgArgInfo(Compiler* comp, GenTreePtr call, unsigned numArgs)
{
- compiler = comp;
- callTree = call; assert(call->IsCall());
- argCount = 0; // filled in arg count, starts at zero
+ compiler = comp;
+ callTree = call;
+ assert(call->IsCall());
+ argCount = 0; // filled in arg count, starts at zero
nextSlotNum = INIT_ARG_STACK_SLOT;
stkLevel = 0;
argTableSize = numArgs; // the allocated table size
@@ -858,10 +879,14 @@ fgArgInfo::fgArgInfo(Compiler * comp, GenTreePtr call, unsigned numArgs)
argsSorted = false;
if (argTableSize == 0)
- argTable = NULL;
+ {
+ argTable = nullptr;
+ }
else
- argTable = new(compiler, CMK_fgArgInfoPtrArr) fgArgTabEntryPtr[argTableSize];
- }
+ {
+ argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntryPtr[argTableSize];
+ }
+}
/*****************************************************************************
*
@@ -874,27 +899,29 @@ fgArgInfo::fgArgInfo(Compiler * comp, GenTreePtr call, unsigned numArgs)
* in the argTable contains pointers that must point to the
* new arguments and not the old arguments.
*/
-fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
+fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
{
assert(oldCall->IsCall());
assert(newCall->IsCall());
fgArgInfoPtr oldArgInfo = oldCall->gtCall.fgArgInfo;
- compiler = oldArgInfo->compiler;;
- callTree = newCall; assert(newCall->IsCall());
- argCount = 0; // filled in arg count, starts at zero
+ compiler = oldArgInfo->compiler;
+ ;
+ callTree = newCall;
+ assert(newCall->IsCall());
+ argCount = 0; // filled in arg count, starts at zero
nextSlotNum = INIT_ARG_STACK_SLOT;
stkLevel = oldArgInfo->stkLevel;
argTableSize = oldArgInfo->argTableSize;
argsComplete = false;
- argTable = NULL;
+ argTable = nullptr;
if (argTableSize > 0)
{
- argTable = new(compiler, CMK_fgArgInfoPtrArr) fgArgTabEntryPtr[argTableSize];
- for (unsigned inx=0; inx<argTableSize; inx++)
+ argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntryPtr[argTableSize];
+ for (unsigned inx = 0; inx < argTableSize; inx++)
{
- argTable[inx] = NULL;
+ argTable[inx] = nullptr;
}
}
@@ -904,31 +931,31 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
// so we can iterate over these argument lists more uniformly.
// Need to provide a temporary non-null first arguments to these constructors: if we use them, we'll replace them
GenTreeArgList* newArgs;
- GenTreeArgList newArgObjp(newCall, newCall->gtCall.gtCallArgs);
+ GenTreeArgList newArgObjp(newCall, newCall->gtCall.gtCallArgs);
GenTreeArgList* oldArgs;
- GenTreeArgList oldArgObjp(oldCall, oldCall->gtCall.gtCallArgs);
+ GenTreeArgList oldArgObjp(oldCall, oldCall->gtCall.gtCallArgs);
- if (newCall->gtCall.gtCallObjp == NULL)
+ if (newCall->gtCall.gtCallObjp == nullptr)
{
- assert(oldCall->gtCall.gtCallObjp == NULL);
+ assert(oldCall->gtCall.gtCallObjp == nullptr);
newArgs = newCall->gtCall.gtCallArgs;
oldArgs = oldCall->gtCall.gtCallArgs;
}
else
{
- assert(oldCall->gtCall.gtCallObjp != NULL);
+ assert(oldCall->gtCall.gtCallObjp != nullptr);
newArgObjp.Current() = newCall->gtCall.gtCallArgs;
- newArgs = &newArgObjp;
+ newArgs = &newArgObjp;
oldArgObjp.Current() = oldCall->gtCall.gtCallObjp;
- oldArgs = &oldArgObjp;
+ oldArgs = &oldArgObjp;
}
- GenTreePtr newCurr;
- GenTreePtr oldCurr;
- GenTreeArgList* newParent = NULL;
- GenTreeArgList* oldParent = NULL;
- fgArgTabEntryPtr * oldArgTable = oldArgInfo->argTable;
- bool scanRegArgs = false;
+ GenTreePtr newCurr;
+ GenTreePtr oldCurr;
+ GenTreeArgList* newParent = nullptr;
+ GenTreeArgList* oldParent = nullptr;
+ fgArgTabEntryPtr* oldArgTable = oldArgInfo->argTable;
+ bool scanRegArgs = false;
while (newArgs)
{
@@ -943,21 +970,21 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
}
else
{
- assert(newParent == NULL && oldParent == NULL);
+ assert(newParent == nullptr && oldParent == nullptr);
}
newArgs = newArgs->Rest();
oldArgs = oldArgs->Rest();
- fgArgTabEntryPtr oldArgTabEntry = NULL;
- fgArgTabEntryPtr newArgTabEntry = NULL;
+ fgArgTabEntryPtr oldArgTabEntry = nullptr;
+ fgArgTabEntryPtr newArgTabEntry = nullptr;
- for (unsigned inx=0; inx<argTableSize; inx++)
+ for (unsigned inx = 0; inx < argTableSize; inx++)
{
oldArgTabEntry = oldArgTable[inx];
if (oldArgTabEntry->parent == oldParent)
{
- assert((oldParent == NULL) == (newParent == NULL));
+ assert((oldParent == nullptr) == (newParent == nullptr));
// We have found the matching "parent" field in oldArgTabEntry
@@ -985,7 +1012,7 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
//
// We will fix this pointer up in the next loop
//
- newArgTabEntry->node = NULL; // For now we assign a NULL to this field
+ newArgTabEntry->node = nullptr; // For now we assign a NULL to this field
scanRegArgs = true;
}
@@ -998,7 +1025,7 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
}
// We should have found the matching oldArgTabEntry and created the newArgTabEntry
//
- assert(newArgTabEntry != NULL);
+ assert(newArgTabEntry != nullptr);
}
if (scanRegArgs)
@@ -1020,10 +1047,10 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
oldCurr = oldArgs->Current();
oldArgs = oldArgs->Rest();
- fgArgTabEntryPtr oldArgTabEntry = NULL;
- fgArgTabEntryPtr newArgTabEntry = NULL;
+ fgArgTabEntryPtr oldArgTabEntry = nullptr;
+ fgArgTabEntryPtr newArgTabEntry = nullptr;
- for (unsigned inx=0; inx<argTableSize; inx++)
+ for (unsigned inx = 0; inx < argTableSize; inx++)
{
oldArgTabEntry = oldArgTable[inx];
@@ -1032,11 +1059,11 @@ fgArgInfo::fgArgInfo(GenTreePtr newCall, GenTreePtr oldCall)
// We have found the matching "node" field in oldArgTabEntry
newArgTabEntry = argTable[inx];
- assert(newArgTabEntry != NULL);
+ assert(newArgTabEntry != nullptr);
// update the "node" GenTreePtr fields in the newArgTabEntry
//
- assert(newArgTabEntry->node == NULL); // We previously assigned NULL to this field
+ assert(newArgTabEntry->node == nullptr); // We previously assigned NULL to this field
newArgTabEntry->node = newCurr;
break;
@@ -1058,33 +1085,29 @@ void fgArgInfo::AddArg(fgArgTabEntryPtr curArgTabEntry)
argCount++;
}
-fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment)
+fgArgTabEntryPtr fgArgInfo::AddRegArg(
+ unsigned argNum, GenTreePtr node, GenTreePtr parent, regNumber regNum, unsigned numRegs, unsigned alignment)
{
- fgArgTabEntryPtr curArgTabEntry = new(compiler, CMK_fgArgInfo) fgArgTabEntry;
-
- curArgTabEntry->argNum = argNum;
- curArgTabEntry->node = node;
- curArgTabEntry->parent = parent;
- curArgTabEntry->regNum = regNum;
- curArgTabEntry->slotNum = 0;
- curArgTabEntry->numRegs = numRegs;
- curArgTabEntry->numSlots = 0;
- curArgTabEntry->alignment = alignment;
- curArgTabEntry->lateArgInx = (unsigned)-1;
- curArgTabEntry->tmpNum = (unsigned)-1;
- curArgTabEntry->isSplit = false;
- curArgTabEntry->isTmp = false;
- curArgTabEntry->needTmp = false;
- curArgTabEntry->needPlace = false;
- curArgTabEntry->processed = false;
- curArgTabEntry->isHfaRegArg = false;
- curArgTabEntry->isBackFilled = false;
- curArgTabEntry->isNonStandard = false;
+ fgArgTabEntryPtr curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
+
+ curArgTabEntry->argNum = argNum;
+ curArgTabEntry->node = node;
+ curArgTabEntry->parent = parent;
+ curArgTabEntry->regNum = regNum;
+ curArgTabEntry->slotNum = 0;
+ curArgTabEntry->numRegs = numRegs;
+ curArgTabEntry->numSlots = 0;
+ curArgTabEntry->alignment = alignment;
+ curArgTabEntry->lateArgInx = (unsigned)-1;
+ curArgTabEntry->tmpNum = (unsigned)-1;
+ curArgTabEntry->isSplit = false;
+ curArgTabEntry->isTmp = false;
+ curArgTabEntry->needTmp = false;
+ curArgTabEntry->needPlace = false;
+ curArgTabEntry->processed = false;
+ curArgTabEntry->isHfaRegArg = false;
+ curArgTabEntry->isBackFilled = false;
+ curArgTabEntry->isNonStandard = false;
hasRegArgs = true;
AddArg(curArgTabEntry);
@@ -1092,14 +1115,14 @@ fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
}
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment,
- const bool isStruct,
- const regNumber otherRegNum,
+fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
+ GenTreePtr node,
+ GenTreePtr parent,
+ regNumber regNum,
+ unsigned numRegs,
+ unsigned alignment,
+ const bool isStruct,
+ const regNumber otherRegNum,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr)
{
fgArgTabEntryPtr curArgTabEntry = AddRegArg(argNum, node, parent, regNum, numRegs, alignment);
@@ -1110,8 +1133,8 @@ fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
// This requires using of an extra flag. At creation time the state is right, so
// and this assert enforces that.
assert((varTypeIsStruct(node) && isStruct) || (!varTypeIsStruct(node) && !isStruct));
- curArgTabEntry->otherRegNum = otherRegNum; // Second reg for the struct
- curArgTabEntry->isStruct = isStruct; // is this a struct arg
+ curArgTabEntry->otherRegNum = otherRegNum; // Second reg for the struct
+ curArgTabEntry->isStruct = isStruct; // is this a struct arg
if (isStruct && structDescPtr != nullptr)
{
@@ -1122,14 +1145,14 @@ fgArgTabEntryPtr fgArgInfo::AddRegArg(unsigned argNum,
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-fgArgTabEntryPtr fgArgInfo::AddStkArg(unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- unsigned numSlots,
- unsigned alignment
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct))
+fgArgTabEntryPtr fgArgInfo::AddStkArg(unsigned argNum,
+ GenTreePtr node,
+ GenTreePtr parent,
+ unsigned numSlots,
+ unsigned alignment
+ FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool isStruct))
{
- fgArgTabEntryPtr curArgTabEntry = new(compiler, CMK_fgArgInfo) fgArgTabEntry;
+ fgArgTabEntryPtr curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment);
@@ -1139,8 +1162,8 @@ fgArgTabEntryPtr fgArgInfo::AddStkArg(unsigned argNum,
// This reqires using of an extra flag. At creation time the state is right, so
// and this assert enforces that.
assert((varTypeIsStruct(node) && isStruct) || (!varTypeIsStruct(node) && !isStruct));
- curArgTabEntry->isStruct = isStruct; // is this a struct arg
-#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ curArgTabEntry->isStruct = isStruct; // is this a struct arg
+#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
curArgTabEntry->argNum = argNum;
curArgTabEntry->node = node;
@@ -1150,8 +1173,8 @@ fgArgTabEntryPtr fgArgInfo::AddStkArg(unsigned argNum,
curArgTabEntry->numRegs = 0;
curArgTabEntry->numSlots = numSlots;
curArgTabEntry->alignment = alignment;
- curArgTabEntry->lateArgInx = (unsigned) -1;
- curArgTabEntry->tmpNum = (unsigned) -1;
+ curArgTabEntry->lateArgInx = (unsigned)-1;
+ curArgTabEntry->tmpNum = (unsigned)-1;
curArgTabEntry->isSplit = false;
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
@@ -1173,26 +1196,24 @@ void fgArgInfo::RemorphReset()
nextSlotNum = INIT_ARG_STACK_SLOT;
}
-fgArgTabEntry* fgArgInfo::RemorphRegArg(unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- regNumber regNum,
- unsigned numRegs,
- unsigned alignment)
+fgArgTabEntry* fgArgInfo::RemorphRegArg(
+ unsigned argNum, GenTreePtr node, GenTreePtr parent, regNumber regNum, unsigned numRegs, unsigned alignment)
{
- fgArgTabEntryPtr curArgTabEntry = NULL;
- unsigned regArgInx = 0;
- unsigned inx;
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
+ unsigned regArgInx = 0;
+ unsigned inx;
- for (inx=0; inx < argCount; inx++)
+ for (inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
+ {
break;
+ }
- bool isRegArg;
+ bool isRegArg;
GenTreePtr argx;
- if (curArgTabEntry->parent != NULL)
+ if (curArgTabEntry->parent != nullptr)
{
assert(curArgTabEntry->parent->IsList());
argx = curArgTabEntry->parent->Current();
@@ -1211,25 +1232,29 @@ fgArgTabEntry* fgArgInfo::RemorphRegArg(unsigned argNum,
}
// if this was a nonstandard arg the table is definitive
if (curArgTabEntry->isNonStandard)
+ {
regNum = curArgTabEntry->regNum;
+ }
- assert(curArgTabEntry->argNum == argNum);
- assert(curArgTabEntry->regNum == regNum);
+ assert(curArgTabEntry->argNum == argNum);
+ assert(curArgTabEntry->regNum == regNum);
assert(curArgTabEntry->alignment == alignment);
- assert(curArgTabEntry->parent == parent);
+ assert(curArgTabEntry->parent == parent);
if (curArgTabEntry->node != node)
{
- GenTreePtr argx = NULL;
- unsigned regIndex = 0;
+ GenTreePtr argx = nullptr;
+ unsigned regIndex = 0;
/* process the register argument list */
for (GenTreeArgList* list = callTree->gtCall.gtCallLateArgs; list; (regIndex++, list = list->Rest()))
{
argx = list->Current();
- assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
+ assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
if (regIndex == regArgInx)
+ {
break;
+ }
}
assert(regIndex == regArgInx);
assert(regArgInx == curArgTabEntry->lateArgInx);
@@ -1242,23 +1267,20 @@ fgArgTabEntry* fgArgInfo::RemorphRegArg(unsigned argNum,
return curArgTabEntry;
}
-void fgArgInfo::RemorphStkArg(unsigned argNum,
- GenTreePtr node,
- GenTreePtr parent,
- unsigned numSlots,
- unsigned alignment)
+void fgArgInfo::RemorphStkArg(
+ unsigned argNum, GenTreePtr node, GenTreePtr parent, unsigned numSlots, unsigned alignment)
{
- fgArgTabEntryPtr curArgTabEntry = NULL;
- bool isRegArg = false;
- unsigned regArgInx = 0;
- GenTreePtr argx;
- unsigned inx;
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
+ bool isRegArg = false;
+ unsigned regArgInx = 0;
+ GenTreePtr argx;
+ unsigned inx;
- for (inx=0; inx < argCount; inx++)
+ for (inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
- if (curArgTabEntry->parent != NULL)
+ if (curArgTabEntry->parent != nullptr)
{
assert(curArgTabEntry->parent->IsList());
argx = curArgTabEntry->parent->Current();
@@ -1271,19 +1293,23 @@ void fgArgInfo::RemorphStkArg(unsigned argNum,
}
if (curArgTabEntry->argNum == argNum)
+ {
break;
+ }
if (isRegArg)
+ {
regArgInx++;
+ }
}
- nextSlotNum = (unsigned) roundUp(nextSlotNum, alignment);
+ nextSlotNum = (unsigned)roundUp(nextSlotNum, alignment);
- assert(curArgTabEntry->argNum == argNum);
- assert(curArgTabEntry->slotNum == nextSlotNum);
- assert(curArgTabEntry->numSlots == numSlots);
+ assert(curArgTabEntry->argNum == argNum);
+ assert(curArgTabEntry->slotNum == nextSlotNum);
+ assert(curArgTabEntry->numSlots == numSlots);
assert(curArgTabEntry->alignment == alignment);
- assert(curArgTabEntry->parent == parent);
+ assert(curArgTabEntry->parent == parent);
assert(parent->IsList());
#if FEATURE_FIXED_OUT_ARGS
@@ -1291,16 +1317,18 @@ void fgArgInfo::RemorphStkArg(unsigned argNum,
{
if (isRegArg)
{
- GenTreePtr argx = NULL;
- unsigned regIndex = 0;
+ GenTreePtr argx = nullptr;
+ unsigned regIndex = 0;
/* process the register argument list */
- for (GenTreeArgList * list = callTree->gtCall.gtCallLateArgs; list; list = list->Rest(), regIndex++)
+ for (GenTreeArgList *list = callTree->gtCall.gtCallLateArgs; list; list = list->Rest(), regIndex++)
{
argx = list->Current();
- assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
+ assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
if (regIndex == regArgInx)
+ {
break;
+ }
}
assert(regIndex == regArgInx);
assert(regArgInx == curArgTabEntry->lateArgInx);
@@ -1323,20 +1351,20 @@ void fgArgInfo::RemorphStkArg(unsigned argNum,
nextSlotNum += numSlots;
}
-void fgArgInfo::SplitArg(unsigned argNum,
- unsigned numRegs,
- unsigned numSlots)
+void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots)
{
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
assert(argNum < argCount);
- for (unsigned inx=0; inx < argCount; inx++)
+ for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
+ {
break;
+ }
}
- assert(numRegs > 0);
+ assert(numRegs > 0);
assert(numSlots > 0);
curArgTabEntry->isSplit = true;
@@ -1346,23 +1374,23 @@ void fgArgInfo::SplitArg(unsigned argNum,
nextSlotNum += numSlots;
}
-void fgArgInfo::EvalToTmp(unsigned argNum,
- unsigned tmpNum,
- GenTreePtr newNode)
+void fgArgInfo::EvalToTmp(unsigned argNum, unsigned tmpNum, GenTreePtr newNode)
{
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
assert(argNum < argCount);
- for (unsigned inx=0; inx < argCount; inx++)
+ for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
+ {
break;
+ }
}
assert(curArgTabEntry->parent->Current() == newNode);
- curArgTabEntry->node = newNode;
- curArgTabEntry->tmpNum = tmpNum;
- curArgTabEntry->isTmp = true;
+ curArgTabEntry->node = newNode;
+ curArgTabEntry->tmpNum = tmpNum;
+ curArgTabEntry->isTmp = true;
}
void fgArgInfo::ArgsComplete()
@@ -1373,8 +1401,8 @@ void fgArgInfo::ArgsComplete()
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntryPtr curArgTabEntry = argTable[curInx];
- assert(curArgTabEntry != NULL);
- GenTreePtr argx = curArgTabEntry->node;
+ assert(curArgTabEntry != nullptr);
+ GenTreePtr argx = curArgTabEntry->node;
if (curArgTabEntry->regNum == REG_STK)
{
@@ -1389,8 +1417,7 @@ void fgArgInfo::ArgsComplete()
}
else // we have a register argument, next we look for a struct type.
{
- if (varTypeIsStruct(argx)
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY( || curArgTabEntry->isStruct))
+ if (varTypeIsStruct(argx) FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
@@ -1415,9 +1442,9 @@ void fgArgInfo::ArgsComplete()
// a tmp, then we need a temp in the late arg list.
if ((argCount > 1) || argx->OperIsCopyBlkOp()
#ifdef FEATURE_FIXED_OUT_ARGS
- || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
- // that we only have late non-register args when that feature is on.
-#endif // FEATURE_FIXED_OUT_ARGS
+ || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
+ // that we only have late non-register args when that feature is on.
+#endif // FEATURE_FIXED_OUT_ARGS
)
{
curArgTabEntry->needTmp = true;
@@ -1427,7 +1454,7 @@ void fgArgInfo::ArgsComplete()
// we require that they be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
- fgArgTabEntryPtr prevArgTabEntry = argTable[prevInx];
+ fgArgTabEntryPtr prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
assert(prevArgTabEntry->node);
@@ -1445,16 +1472,16 @@ void fgArgInfo::ArgsComplete()
// This means unnesting, sorting, etc. Technically this is overly
// conservative, but I want to avoid as much special-case debug-only code
// as possible, so leveraging the GTF_CALL flag is the easiest.
- if (!(argx->gtFlags & GTF_CALL) &&
- (argx->gtFlags & GTF_EXCEPT) &&
- (argCount > 1) &&
+ if (!(argx->gtFlags & GTF_CALL) && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) &&
compiler->opts.compDbgCode &&
(compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT))
{
for (unsigned otherInx = 0; otherInx < argCount; otherInx++)
{
if (otherInx == curInx)
+ {
continue;
+ }
if (argTable[otherInx]->regNum == REG_STK)
{
@@ -1475,7 +1502,7 @@ void fgArgInfo::ArgsComplete()
if (argx->gtFlags & GTF_CALL)
{
- if (argCount > 1) // If this is not the only argument
+ if (argCount > 1) // If this is not the only argument
{
curArgTabEntry->needTmp = true;
}
@@ -1488,7 +1515,7 @@ void fgArgInfo::ArgsComplete()
// All previous arguments may need to be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
- fgArgTabEntryPtr prevArgTabEntry = argTable[prevInx];
+ fgArgTabEntryPtr prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
assert(prevArgTabEntry->node);
@@ -1512,7 +1539,7 @@ void fgArgInfo::ArgsComplete()
#ifndef LEGACY_BACKEND
#if FEATURE_MULTIREG_ARGS
- // For RyuJIT backend we will expand a Multireg arg into a GT_LIST
+ // For RyuJIT backend we will expand a Multireg arg into a GT_LIST
// with multiple indirections, so here we consider spilling it into a tmp LclVar.
//
// Note that Arm32 is a LEGACY_BACKEND and it defines FEATURE_MULTIREG_ARGS
@@ -1522,7 +1549,7 @@ void fgArgInfo::ArgsComplete()
bool isMultiRegArg = (curArgTabEntry->numRegs > 1);
if ((argx->TypeGet() == TYP_STRUCT) && (curArgTabEntry->needTmp == false))
- {
+ {
if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0))
{
// Spill multireg struct arguments that have Assignments or Calls embedded in them
@@ -1540,44 +1567,44 @@ void fgArgInfo::ArgsComplete()
}
else if (argx->OperGet() == GT_OBJ)
{
- GenTreeObj* argObj = argx->AsObj();
- CORINFO_CLASS_HANDLE objClass = argObj->gtClass;
- unsigned structSize = compiler->info.compCompHnd->getClassSize(objClass);
+ GenTreeObj* argObj = argx->AsObj();
+ CORINFO_CLASS_HANDLE objClass = argObj->gtClass;
+ unsigned structSize = compiler->info.compCompHnd->getClassSize(objClass);
switch (structSize)
{
- case 3:
- case 5:
- case 6:
- case 7:
- // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
- //
- if (argObj->gtObj.gtOp1->IsVarAddr() == false) // Is the source not a LclVar?
- {
- // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
- // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
+ case 3:
+ case 5:
+ case 6:
+ case 7:
+ // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
//
- curArgTabEntry->needTmp = true;
- }
- break;
+ if (argObj->gtObj.gtOp1->IsVarAddr() == false) // Is the source not a LclVar?
+ {
+ // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
+ // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
+ //
+ curArgTabEntry->needTmp = true;
+ }
+ break;
- case 11:
- case 13:
- case 14:
- case 15:
- // Spill any GT_OBJ multireg structs that are difficult to extract
- //
- // When we have a GT_OBJ of a struct with the above sizes we would need
- // to use 3 or 4 load instructions to load the exact size of this struct.
- // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
- // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
- // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
- // the argument.
- //
- curArgTabEntry->needTmp = true;
- break;
+ case 11:
+ case 13:
+ case 14:
+ case 15:
+ // Spill any GT_OBJ multireg structs that are difficult to extract
+ //
+ // When we have a GT_OBJ of a struct with the above sizes we would need
+ // to use 3 or 4 load instructions to load the exact size of this struct.
+ // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
+ // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
+ // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
+ // the argument.
+ //
+ curArgTabEntry->needTmp = true;
+ break;
- default:
- break;
+ default:
+ break;
}
}
}
@@ -1586,7 +1613,6 @@ void fgArgInfo::ArgsComplete()
#endif // LEGACY_BACKEND
}
-
// We only care because we can't spill structs and qmarks involve a lot of spilling, but
// if we don't have qmarks, then it doesn't matter.
// So check for Qmark's globally once here, instead of inside the loop.
@@ -1618,8 +1644,9 @@ void fgArgInfo::ArgsComplete()
{
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
- fgArgTabEntryPtr curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != NULL);
- GenTreePtr argx = curArgTabEntry->node;
+ fgArgTabEntryPtr curArgTabEntry = argTable[curInx];
+ assert(curArgTabEntry != nullptr);
+ GenTreePtr argx = curArgTabEntry->node;
// Examine the register args that are currently not marked needTmp
//
@@ -1714,19 +1741,22 @@ void fgArgInfo::SortArgs()
// [We use a backward iterator pattern]
//
curInx = argCount;
- do {
+ do
+ {
curInx--;
fgArgTabEntryPtr curArgTabEntry = argTable[curInx];
if (curArgTabEntry->regNum != REG_STK)
+ {
regCount++;
+ }
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
- GenTreePtr argx = curArgTabEntry->node;
+ GenTreePtr argx = curArgTabEntry->node;
// put constants at the end of the table
//
@@ -1748,7 +1778,7 @@ void fgArgInfo::SortArgs()
argsRemaining--;
}
}
- } while (curInx > 0);
+ } while (curInx > 0);
if (argsRemaining > 0)
{
@@ -1763,7 +1793,7 @@ void fgArgInfo::SortArgs()
//
if (!curArgTabEntry->processed)
{
- GenTreePtr argx = curArgTabEntry->node;
+ GenTreePtr argx = curArgTabEntry->node;
// put calls at the beginning of the table
//
@@ -1829,7 +1859,8 @@ void fgArgInfo::SortArgs()
// [We use a backward iterator pattern]
//
curInx = endTab + 1;
- do {
+ do
+ {
curInx--;
fgArgTabEntryPtr curArgTabEntry = argTable[curInx];
@@ -1838,7 +1869,7 @@ void fgArgInfo::SortArgs()
//
if (!curArgTabEntry->processed)
{
- GenTreePtr argx = curArgTabEntry->node;
+ GenTreePtr argx = curArgTabEntry->node;
if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD))
{
@@ -1868,7 +1899,7 @@ void fgArgInfo::SortArgs()
{
/* Find the most expensive arg remaining and evaluate it next */
- fgArgTabEntryPtr expensiveArgTabEntry = NULL;
+ fgArgTabEntryPtr expensiveArgTabEntry = nullptr;
unsigned expensiveArg = UINT_MAX;
unsigned expensiveArgCost = 0;
@@ -1882,12 +1913,12 @@ void fgArgInfo::SortArgs()
//
if (!curArgTabEntry->processed)
{
- GenTreePtr argx = curArgTabEntry->node;
+ GenTreePtr argx = curArgTabEntry->node;
// We should have already handled these kinds of args
- assert (argx->gtOper != GT_LCL_VAR);
- assert (argx->gtOper != GT_LCL_FLD);
- assert (argx->gtOper != GT_CNS_INT);
+ assert(argx->gtOper != GT_LCL_VAR);
+ assert(argx->gtOper != GT_LCL_FLD);
+ assert(argx->gtOper != GT_CNS_INT);
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
@@ -1947,7 +1978,7 @@ void fgArgInfo::SortArgs()
#if !FEATURE_FIXED_OUT_ARGS
// Finally build the regArgList
//
- callTree->gtCall.regArgList = NULL;
+ callTree->gtCall.regArgList = NULL;
callTree->gtCall.regArgListCount = regCount;
unsigned regInx = 0;
@@ -1977,17 +2008,17 @@ void fgArgInfo::SortArgs()
// tmpVarNum - the var num which we clone into the newly created temp var.
//
// Return Value:
-// the newly created temp var tree.
+// the newly created temp var tree.
-GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters))
+GenTreePtr Compiler::fgMakeTmpArgNode(
+ unsigned tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const bool passedInRegisters))
{
- LclVarDsc * varDsc = &lvaTable[tmpVarNum];
+ LclVarDsc* varDsc = &lvaTable[tmpVarNum];
assert(varDsc->lvIsTemp);
- var_types type = varDsc->TypeGet();
+ var_types type = varDsc->TypeGet();
// Create a copy of the temp to go into the late argument list
- GenTreePtr arg = gtNewLclvNode(tmpVarNum, type);
+ GenTreePtr arg = gtNewLclvNode(tmpVarNum, type);
GenTreePtr addrNode = nullptr;
if (varTypeIsStruct(type))
@@ -2008,45 +2039,55 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
}
else
{
- arg = gtNewOperNode(GT_ADDR, type, arg);
+ arg = gtNewOperNode(GT_ADDR, type, arg);
addrNode = arg;
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
unsigned structSize = lvaLclExactSize(tmpVarNum);
switch (structSize)
{
- case 1: type = TYP_BYTE; break;
- case 2: type = TYP_SHORT; break;
-#if defined (_TARGET_AMD64_)
- case 4: type = TYP_INT; break;
+ case 1:
+ type = TYP_BYTE;
+ break;
+ case 2:
+ type = TYP_SHORT;
+ break;
+#if defined(_TARGET_AMD64_)
+ case 4:
+ type = TYP_INT;
+ break;
#elif defined(_TARGET_ARM64_)
- case 3:
- case 4: type = TYP_INT; break;
- case 5:
- case 6:
- case 7: type = TYP_I_IMPL; break;
-#endif // defined (_TARGET_ARM64_)
- case 8:
- switch (*lvaGetGcLayout(tmpVarNum))
- {
- case TYPE_GC_NONE:
- type = TYP_I_IMPL;
+ case 3:
+ case 4:
+ type = TYP_INT;
break;
- case TYPE_GC_REF:
- type = TYP_REF;
+ case 5:
+ case 6:
+ case 7:
+ type = TYP_I_IMPL;
break;
- case TYPE_GC_BYREF:
- type = TYP_BYREF;
+#endif // defined (_TARGET_ARM64_)
+ case 8:
+ switch (*lvaGetGcLayout(tmpVarNum))
+ {
+ case TYPE_GC_NONE:
+ type = TYP_I_IMPL;
+ break;
+ case TYPE_GC_REF:
+ type = TYP_REF;
+ break;
+ case TYPE_GC_BYREF:
+ type = TYP_BYREF;
+ break;
+ default:
+ unreached();
+ }
break;
default:
- unreached();
- }
- break;
- default:
- break;
+ break;
}
// If we didn't change the type of the struct, it means
@@ -2062,9 +2103,9 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
{
// ToDo-ARM64: Consider using: arg->ChangeOper(GT_LCL_FLD);
// as that is how FEATURE_UNIX_AMD64_STRUCT_PASSING works.
- // Create a GT_OBJ for the argument
+ // Create a GT_OBJ for the argument
// This will be passed by value in two registers
- arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
+ arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
addrNode = arg;
// Create an Obj of the temp to use it as a call argument.
@@ -2074,7 +2115,7 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
#endif // _TARGET_ARM64_
#endif // FEATURE_MULTIREG_ARGS
{
- arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
+ arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
addrNode = arg;
}
}
@@ -2087,17 +2128,17 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
#else // not (_TARGET_AMD64_ or _TARGET_ARM64_)
- // other targets, we pass the struct by value
+ // other targets, we pass the struct by value
assert(varTypeIsStruct(type));
- arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
+ arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
addrNode = arg;
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
-#endif // not (_TARGET_AMD64_ or _TARGET_ARM64_)
+#endif // not (_TARGET_AMD64_ or _TARGET_ARM64_)
} // (varTypeIsStruct(type))
@@ -2106,7 +2147,7 @@ GenTreePtr Compiler::fgMakeTmpArgNode(unsigned tmpVarNum
assert(addrNode->gtOper == GT_ADDR);
// This will prevent this LclVar from being optimized away
- lvaSetVarAddrExposed(tmpVarNum);
+ lvaSetVarAddrExposed(tmpVarNum);
// the child of a GT_ADDR is required to have this flag set
addrNode->gtOp.gtOp1->gtFlags |= GTF_DONT_CSE;
@@ -2121,14 +2162,14 @@ void fgArgInfo::EvalArgsToTemps()
unsigned regArgInx = 0;
// Now go through the argument table and perform the necessary evaluation into temps
- GenTreeArgList* tmpRegArgNext = NULL;
+ GenTreeArgList* tmpRegArgNext = nullptr;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntryPtr curArgTabEntry = argTable[curInx];
- GenTreePtr argx = curArgTabEntry->node;
- GenTreePtr setupArg = NULL;
- GenTreePtr defArg;
+ GenTreePtr argx = curArgTabEntry->node;
+ GenTreePtr setupArg = nullptr;
+ GenTreePtr defArg;
#if !FEATURE_FIXED_OUT_ARGS
// Only ever set for FEATURE_FIXED_OUT_ARGS
@@ -2138,7 +2179,7 @@ void fgArgInfo::EvalArgsToTemps()
// Only the register arguments need to be replaced with placeholder nodes.
// Stacked arguments are evaluated and pushed (or stored into the stack) in order.
//
- if (curArgTabEntry->regNum == REG_STK)
+ if (curArgTabEntry->regNum == REG_STK)
continue;
#endif
@@ -2150,9 +2191,8 @@ void fgArgInfo::EvalArgsToTemps()
{
// Create a copy of the temp to go into the late argument list
tmpVarNum = curArgTabEntry->tmpNum;
- defArg = compiler->fgMakeTmpArgNode(
- tmpVarNum
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(argTable[curInx]->structDesc.passedInRegisters));
+ defArg = compiler->fgMakeTmpArgNode(tmpVarNum FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
+ argTable[curInx]->structDesc.passedInRegisters));
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
@@ -2207,7 +2247,7 @@ void fgArgInfo::EvalArgsToTemps()
}
}
- if (setupArg != NULL)
+ if (setupArg != nullptr)
{
// Now keep the mkrefany for the late argument list
defArg = argx;
@@ -2231,15 +2271,15 @@ void fgArgInfo::EvalArgsToTemps()
}
#endif // !LEGACY_BACKEND
- var_types lclVarType = genActualType(argx->gtType);
- var_types scalarType = TYP_UNKNOWN;
+ var_types lclVarType = genActualType(argx->gtType);
+ var_types scalarType = TYP_UNKNOWN;
if (setupArg->OperIsCopyBlkOp())
{
setupArg = compiler->fgMorphCopyBlock(setupArg);
#ifdef _TARGET_ARM64_
- // This scalar LclVar widening step is only performed for ARM64
- //
+ // This scalar LclVar widening step is only performed for ARM64
+ //
CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum);
unsigned structSize = varDsc->lvExactSize;
@@ -2259,8 +2299,8 @@ void fgArgInfo::EvalArgsToTemps()
defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType);
}
- curArgTabEntry->isTmp = true;
- curArgTabEntry->tmpNum = tmpVarNum;
+ curArgTabEntry->isTmp = true;
+ curArgTabEntry->tmpNum = tmpVarNum;
#ifdef _TARGET_ARM_
// Previously we might have thought the local was promoted, and thus the 'COPYBLK'
@@ -2269,7 +2309,7 @@ void fgArgInfo::EvalArgsToTemps()
// Too bad we're not that smart for these intermediate temps...
if (isValidIntArgReg(curArgTabEntry->regNum) && (curArgTabEntry->numRegs > 1))
{
- regNumber argReg = curArgTabEntry->regNum;
+ regNumber argReg = curArgTabEntry->regNum;
regMaskTP allUsedRegs = genRegMask(curArgTabEntry->regNum);
for (unsigned i = 1; i < curArgTabEntry->numRegs; i++)
{
@@ -2308,7 +2348,9 @@ void fgArgInfo::EvalArgsToTemps()
// (the initial argument evaluation list) with a placeholder.
//
if ((curArgTabEntry->regNum == REG_STK) && (curArgTabEntry->needPlace == false))
+ {
continue;
+ }
/* No temp needed - move the whole node to the gtCallLateArgs list */
@@ -2332,7 +2374,7 @@ void fgArgInfo::EvalArgsToTemps()
if (varTypeIsStruct(defArg))
{
// Need a temp to walk any GT_COMMA nodes when searching for the clsHnd
- GenTreePtr defArgTmp = defArg;
+ GenTreePtr defArgTmp = defArg;
// The GT_OBJ may be be a child of a GT_COMMA.
while (defArgTmp->gtOper == GT_COMMA)
@@ -2382,11 +2424,11 @@ void fgArgInfo::EvalArgsToTemps()
#endif
}
- if (setupArg != NULL)
+ if (setupArg != nullptr)
{
if (curArgTabEntry->parent)
{
- GenTreePtr parent = curArgTabEntry->parent;
+ GenTreePtr parent = curArgTabEntry->parent;
/* a normal argument from the list */
noway_assert(parent->IsList());
noway_assert(parent->gtOp.gtOp1 == argx);
@@ -2404,9 +2446,9 @@ void fgArgInfo::EvalArgsToTemps()
/* deferred arg goes into the late argument list */
- if (tmpRegArgNext == NULL)
+ if (tmpRegArgNext == nullptr)
{
- tmpRegArgNext = compiler->gtNewArgList(defArg);
+ tmpRegArgNext = compiler->gtNewArgList(defArg);
callTree->gtCall.gtCallLateArgs = tmpRegArgNext;
}
else
@@ -2414,7 +2456,7 @@ void fgArgInfo::EvalArgsToTemps()
noway_assert(tmpRegArgNext->IsList());
noway_assert(tmpRegArgNext->Current());
tmpRegArgNext->gtOp.gtOp2 = compiler->gtNewArgList(defArg);
- tmpRegArgNext = tmpRegArgNext->Rest();
+ tmpRegArgNext = tmpRegArgNext->Rest();
}
curArgTabEntry->node = defArg;
@@ -2431,7 +2473,7 @@ void fgArgInfo::EvalArgsToTemps()
if (curArgTabEntry->regNum != REG_STK)
{
- printf("%s ", getRegName( curArgTabEntry->regNum ));
+ printf("%s ", getRegName(curArgTabEntry->regNum));
}
}
printf("\n");
@@ -2464,9 +2506,13 @@ int Compiler::fgEstimateCallStackSize(GenTreeCall* call)
int numStkArgs;
if (numArgs > MAX_REG_ARG)
+ {
numStkArgs = numArgs - MAX_REG_ARG;
+ }
else
+ {
numStkArgs = 0;
+ }
return numStkArgs * REGSIZE_BYTES;
}
@@ -2480,7 +2526,7 @@ int Compiler::fgEstimateCallStackSize(GenTreeCall* call)
// evaluates ppTree to a temp and returns the result
//
// Return Value:
-// A fresh GT_LCL_VAR node referencing the temp which has not been used
+// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// Assumption:
// The result tree MUST be added to the tree structure since the ref counts are
@@ -2500,7 +2546,7 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
}
else
{
- GenTree* result = fgInsertCommaFormTemp(pOp);
+ GenTree* result = fgInsertCommaFormTemp(pOp);
// At this point, *pOp is GT_COMMA(GT_ASG(V01, *pOp), V01) and result = V01
// Therefore, the ref count has to be incremented 3 times for *pOp and result, if result will
@@ -2516,7 +2562,6 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
}
}
-
//------------------------------------------------------------------------------
// fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree,
// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl)
@@ -2528,15 +2573,15 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
// structType - value type handle if the temp created is of TYP_STRUCT.
//
// Return Value:
-// A fresh GT_LCL_VAR node referencing the temp which has not been used
+// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
-GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
+GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
{
GenTree* subTree = *ppTree;
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable"));
-
+
if (varTypeIsStruct(subTree))
{
assert(structType != nullptr);
@@ -2549,7 +2594,7 @@ GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDL
GenTree* asg = gtNewTempAssign(lclNum, subTree);
GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(subTree->TypeGet(), lclNum, BAD_IL_OFFSET);
-
+
GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load);
*ppTree = comma;
@@ -2557,7 +2602,6 @@ GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDL
return new (this, GT_LCL_VAR) GenTreeLclVar(subTree->TypeGet(), lclNum, BAD_IL_OFFSET);
}
-
//------------------------------------------------------------------------
// fgMorphArgs: Walk and transform (morph) the arguments of a call
//
@@ -2595,50 +2639,50 @@ GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDL
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
GenTreeCall* call = callNode->AsCall();
- GenTreePtr args;
- GenTreePtr argx;
+ GenTreePtr args;
+ GenTreePtr argx;
- unsigned flagsSummary = 0;
- unsigned genPtrArgCntSav = fgPtrArgCntCur;
+ unsigned flagsSummary = 0;
+ unsigned genPtrArgCntSav = fgPtrArgCntCur;
- unsigned argIndex = 0;
+ unsigned argIndex = 0;
- unsigned intArgRegNum = 0;
- unsigned fltArgRegNum = 0;
+ unsigned intArgRegNum = 0;
+ unsigned fltArgRegNum = 0;
#ifdef _TARGET_ARM_
- regMaskTP argSkippedRegMask = RBM_NONE;
- regMaskTP fltArgSkippedRegMask = RBM_NONE;
+ regMaskTP argSkippedRegMask = RBM_NONE;
+ regMaskTP fltArgSkippedRegMask = RBM_NONE;
#endif // _TARGET_ARM_
#if defined(_TARGET_X86_)
- unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
+ unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
#else
- const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
-#endif
+ const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
+#endif
- unsigned argSlots = 0;
- unsigned nonRegPassedStructSlots = 0;
- bool lateArgsComputed = (call->gtCallLateArgs != nullptr);
- bool callHasRetBuffArg = call->HasRetBufArg();
+ unsigned argSlots = 0;
+ unsigned nonRegPassedStructSlots = 0;
+ bool lateArgsComputed = (call->gtCallLateArgs != nullptr);
+ bool callHasRetBuffArg = call->HasRetBufArg();
-#ifndef _TARGET_X86_ // i.e. _TARGET_AMD64_ or _TARGET_ARM_
- bool callIsVararg = call->IsVarargs();
+#ifndef _TARGET_X86_ // i.e. _TARGET_AMD64_ or _TARGET_ARM_
+ bool callIsVararg = call->IsVarargs();
#endif
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- // If fgMakeOutgoingStructArgCopy is called and copies are generated, hasStackArgCopy is set
+ // If fgMakeOutgoingStructArgCopy is called and copies are generated, hasStackArgCopy is set
// to make sure to call EvalArgsToTemp. fgMakeOutgoingStructArgCopy just marks the argument
// to need a temp variable, and EvalArgsToTemp actually creates the temp variable node.
- bool hasStackArgCopy = false;
-#endif
-
+ bool hasStackArgCopy = false;
+#endif
+
#ifndef LEGACY_BACKEND
// Data structure for keeping track of non-standard args. Non-standard args are those that are not passed
// following the normal calling convention or in the normal argument registers. We either mark existing
@@ -2648,16 +2692,15 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
struct NonStandardArg
{
- regNumber reg; // The register to be assigned to this non-standard argument.
- GenTree* node; // The tree node representing this non-standard argument.
- // Note that this must be updated if the tree node changes due to morphing!
+ regNumber reg; // The register to be assigned to this non-standard argument.
+ GenTree* node; // The tree node representing this non-standard argument.
+ // Note that this must be updated if the tree node changes due to morphing!
};
ArrayStack<NonStandardArg> args;
public:
- NonStandardArgs(Compiler* compiler)
- : args(compiler, 3) // We will have at most 3 non-standard arguments
+ NonStandardArgs(Compiler* compiler) : args(compiler, 3) // We will have at most 3 non-standard arguments
{
}
@@ -2673,7 +2716,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
//
void Add(GenTree* node, regNumber reg)
{
- NonStandardArg nsa = { reg, node };
+ NonStandardArg nsa = {reg, node};
args.Push(nsa);
}
@@ -2754,7 +2797,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// Process the late arguments (which were determined by a previous caller).
// Do this before resetting fgPtrArgCntCur as fgMorphTree(call->gtCallLateArgs)
// may need to refer to it.
- if (lateArgsComputed)
+ if (lateArgsComputed)
{
// We need to reMorph the gtCallLateArgs early since that is what triggers
// the expression folding and we need to have the final folded gtCallLateArgs
@@ -2785,7 +2828,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
// First we need to count the args
if (call->gtCallObjp)
+ {
numArgs++;
+ }
for (args = call->gtCallArgs; (args != nullptr); args = args->gtOp.gtOp2)
{
numArgs++;
@@ -2807,7 +2852,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
GenTreeArgList* args = call->gtCallArgs;
- GenTree* arg1 = args->Current();
+ GenTree* arg1 = args->Current();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME);
}
@@ -2822,7 +2867,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
//
if (hasFixedRetBuffReg() && call->HasRetBufArg())
{
- args = call->gtCallArgs;
+ args = call->gtCallArgs;
assert(args != nullptr);
assert(args->IsList());
@@ -2842,26 +2887,24 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// Add a conservative estimate of the stack size in a special parameter (r11) at the call site.
// It will be used only on the intercepted-for-host code path to copy the arguments.
- GenTree* cns = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, fgEstimateCallStackSize(call));
+ GenTree* cns = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, fgEstimateCallStackSize(call));
call->gtCallArgs = gtNewListNode(cns, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(cns, REG_PINVOKE_COOKIE_PARAM);
}
- else if (call->IsVirtualStub() &&
- (call->gtCallType == CT_INDIRECT) &&
- !call->IsTailCallViaHelper())
+ else if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT) && !call->IsTailCallViaHelper())
{
- // indirect VSD stubs need the base of the indirection cell to be
+ // indirect VSD stubs need the base of the indirection cell to be
// passed in addition. At this point that is the value in gtCallAddr.
- // The actual call target will be derived from gtCallAddr in call
+ // The actual call target will be derived from gtCallAddr in call
// lowering.
// If it is a VSD call getting dispatched via tail call helper,
// fgMorphTailCall() would materialize stub addr as an additional
- // parameter added to the original arg list and hence no need to
+ // parameter added to the original arg list and hence no need to
// add as a non-standard arg.
-
+
GenTree* arg = call->gtCallAddr;
if (arg->OperIsLocal())
{
@@ -2873,7 +2916,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
call->gtFlags |= GTF_ASG;
}
noway_assert(arg != nullptr);
-
+
// And push the stub address onto the list of arguments
call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
numArgs++;
@@ -2895,15 +2938,15 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM);
// put destination into R10
- arg = gtClone(call->gtCallAddr, true);
+ arg = gtClone(call->gtCallAddr, true);
call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM);
// finally change this call to a helper call
- call->gtCallType = CT_HELPER;
- call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
+ call->gtCallType = CT_HELPER;
+ call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
}
#endif // !defined(LEGACY_BACKEND) && !defined(_TARGET_X86_)
@@ -2926,14 +2969,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
argx = call->gtCallObjp;
- if (argx)
+ if (argx)
{
- argx = fgMorphTree(argx);
+ argx = fgMorphTree(argx);
call->gtCallObjp = argx;
flagsSummary |= argx->gtFlags;
- assert(call->gtCallType == CT_USER_FUNC ||
- call->gtCallType == CT_INDIRECT);
+ assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT);
assert(argIndex == 0);
@@ -2942,19 +2984,19 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (lateArgsComputed)
{
/* this is a register argument - possibly update it in the table */
- call->fgArgInfo->RemorphRegArg(argIndex, argx, NULL, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1);
+ call->fgArgInfo->RemorphRegArg(argIndex, argx, nullptr, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1);
}
else
{
- assert(varTypeIsGC(call->gtCallObjp->gtType) ||
- (call->gtCallObjp->gtType == TYP_I_IMPL));
+ assert(varTypeIsGC(call->gtCallObjp->gtType) || (call->gtCallObjp->gtType == TYP_I_IMPL));
/* this is a register argument - put it in the table */
- call->fgArgInfo->AddRegArg(argIndex, argx, NULL, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1
+ call->fgArgInfo->AddRegArg(argIndex, argx, nullptr, genMapIntRegArgNumToRegNum(intArgRegNum), 1, 1
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
- , false, REG_STK, nullptr
+ ,
+ false, REG_STK, nullptr
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- );
+ );
}
// this can't be a struct.
assert(argx->gtType != TYP_STRUCT);
@@ -2965,7 +3007,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
intArgRegNum++;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
- // we skip the corresponding floating point register argument
+ // we skip the corresponding floating point register argument
fltArgRegNum++;
#endif // WINDOWS_AMD64_ABI
}
@@ -2981,7 +3023,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// Compute the maximum number of arguments that can be passed in registers.
// For X86 we handle the varargs and unmanaged calling conventions
- if (call->gtFlags & GTF_CALL_POP_ARGS)
+ if (call->gtFlags & GTF_CALL_POP_ARGS)
{
noway_assert(intArgRegNum < MAX_REG_ARG);
// No more register arguments for varargs (CALL_POP_ARGS)
@@ -3000,7 +3042,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
noway_assert(call->gtCallArgs->gtOp.gtOp1->TypeGet() == TYP_I_IMPL ||
call->gtCallArgs->gtOp.gtOp1->TypeGet() == TYP_BYREF ||
- call->gtCallArgs->gtOp.gtOp1->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
+ call->gtCallArgs->gtOp.gtOp1->gtOper ==
+ GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
maxRegArgs = 1;
}
else
@@ -3060,11 +3103,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
- bool hasStructArgument = false; // @TODO-ARM64-UNIX: Remove this bool during a future refactoring
+ bool hasStructArgument = false; // @TODO-ARM64-UNIX: Remove this bool during a future refactoring
bool hasMultiregStructArgs = false;
for (args = call->gtCallArgs; args; args = args->gtOp.gtOp2, argIndex++)
{
- GenTreePtr * parentArgx = &args->gtOp.gtOp1;
+ GenTreePtr* parentArgx = &args->gtOp.gtOp1;
#if FEATURE_MULTIREG_ARGS
if (!hasStructArgument)
@@ -3076,11 +3119,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifndef LEGACY_BACKEND
// Record the index of any nonStandard arg that we may be processing here, as we are
// about to call fgMorphTree on it and fgMorphTree may replace it with a new tree.
- GenTreePtr orig_argx = *parentArgx;
- int nonStandard_index = nonStandardArgs.Find(orig_argx);
+ GenTreePtr orig_argx = *parentArgx;
+ int nonStandard_index = nonStandardArgs.Find(orig_argx);
#endif // !LEGACY_BACKEND
- argx = fgMorphTree(*parentArgx);
+ argx = fgMorphTree(*parentArgx);
*parentArgx = argx;
flagsSummary |= argx->gtFlags;
@@ -3100,10 +3143,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
* NOTE: We deferred this from the importer because of the inliner */
if (argx->IsVarAddr())
+ {
argx->gtType = TYP_I_IMPL;
+ }
- bool passUsingFloatRegs;
- unsigned argAlign = 1;
+ bool passUsingFloatRegs;
+ unsigned argAlign = 1;
// Setup any HFA information about 'argx'
var_types hfaType = GetHfaType(argx);
bool isHfaArg = varTypeIsFloating(hfaType);
@@ -3113,19 +3158,19 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
hfaSlots = GetHfaCount(argx);
- // If we have a HFA struct it's possible we transition from a method that originally
+ // If we have a HFA struct it's possible we transition from a method that originally
// only had integer types to now start having FP types. We have to communicate this
- // through this flag since LSRA later on will use this flag to determine whether
+ // through this flag since LSRA later on will use this flag to determine whether
// or not to track the FP register set.
//
compFloatingPointUsed = true;
}
unsigned size = 0;
- CORINFO_CLASS_HANDLE copyBlkClass = NULL;
+ CORINFO_CLASS_HANDLE copyBlkClass = nullptr;
bool isRegArg = false;
- fgArgTabEntryPtr argEntry = NULL;
+ fgArgTabEntryPtr argEntry = nullptr;
if (lateArgsComputed)
{
@@ -3143,7 +3188,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
else
{
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeIsFloating(argx)) && !opts.compUseSoftFP;
- passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
+ passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
}
GenTreePtr curArg = argx;
@@ -3166,7 +3211,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (fltArgRegNum % 2 == 1)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
- fltArgRegNum ++;
+ fltArgRegNum++;
}
}
else if (passUsingIntRegs)
@@ -3174,13 +3219,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (intArgRegNum % 2 == 1)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
- intArgRegNum ++;
+ intArgRegNum++;
}
}
if (argSlots % 2 == 1)
{
- argSlots ++;
+ argSlots++;
}
}
@@ -3196,7 +3241,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
#elif defined(_TARGET_AMD64_)
-#if defined(UNIX_AMD64_ABI)
+#if defined(UNIX_AMD64_ABI)
if (lateArgsComputed)
{
passUsingFloatRegs = isValidFloatArgReg(argEntry->regNum);
@@ -3205,7 +3250,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
passUsingFloatRegs = varTypeIsFloating(argx);
}
-#else // WINDOWS_AMD64_ABI
+#else // WINDOWS_AMD64_ABI
passUsingFloatRegs = varTypeIsFloating(argx);
#endif // !UNIX_AMD64_ABI
#elif defined(_TARGET_X86_)
@@ -3213,13 +3258,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
passUsingFloatRegs = false;
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif // _TARGET_*
- bool isBackFilled = false;
- unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
- var_types structBaseType = TYP_STRUCT;
- unsigned structSize = 0;
+ bool isBackFilled = false;
+ unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
+ var_types structBaseType = TYP_STRUCT;
+ unsigned structSize = 0;
bool isStructArg = varTypeIsStruct(argx);
@@ -3230,10 +3275,10 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
fgArgTabEntryPtr fgEntryPtr = gtArgEntryByNode(call, argx);
assert(fgEntryPtr != nullptr);
- // As described in few other places, this can happen when the argx was morphed
+ // As described in few other places, this can happen when the argx was morphed
// into an arg setup node - COPYBLK. The COPYBLK has always a type of void.
// In such case the fgArgTabEntry keeps track of whether the original node (before morphing)
- // was a struct and the struct classification.
+ // was a struct and the struct classification.
isStructArg = fgEntryPtr->isStruct;
if (isStructArg)
@@ -3242,11 +3287,11 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- assert(argEntry != NULL);
+ assert(argEntry != nullptr);
if (argEntry->IsBackFilled())
{
- isRegArg = true;
- size = argEntry->numRegs;
+ isRegArg = true;
+ size = argEntry->numRegs;
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(argEntry->regNum);
assert(size == 1);
isBackFilled = true;
@@ -3267,7 +3312,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// This size has now been computed
assert(size != 0);
}
- else // !lateArgsComputed
+ else // !lateArgsComputed
{
//
// Figure out the size of the argument. This is either in number of registers, or number of
@@ -3276,7 +3321,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
//
if (argx->IsArgPlaceHolderNode() || (!isStructArg))
{
-#if defined(_TARGET_AMD64_)
+#if defined(_TARGET_AMD64_)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
if (!isStructArg)
{
@@ -3284,17 +3329,19 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
else
{
- size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
+ size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd),
+ TARGET_POINTER_SIZE)) /
+ TARGET_POINTER_SIZE;
eeGetSystemVAmd64PassStructInRegisterDescriptor(argx->gtArgPlace.gtArgPlaceClsHnd, &structDesc);
if (size > 1)
{
hasMultiregStructArgs = true;
}
}
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-#elif defined(_TARGET_ARM64_)
+#elif defined(_TARGET_ARM64_)
if (isStructArg)
{
if (isHfaArg)
@@ -3306,7 +3353,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
else
{
// Structs are either passed in 1 or 2 (64-bit) slots
- size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
+ size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd),
+ TARGET_POINTER_SIZE)) /
+ TARGET_POINTER_SIZE;
if (size == 2)
{
@@ -3315,7 +3364,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
else if (size > 2)
{
- size = 1; // Structs that are larger that 2 pointers (except for HFAs) are passed by reference (to a copy)
+ size = 1; // Structs that are larger that 2 pointers (except for HFAs) are passed by
+ // reference (to a copy)
}
}
// Note that there are some additional rules for multireg structs.
@@ -3328,7 +3378,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#elif defined(_TARGET_ARM_)
if (isStructArg)
{
- size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd), TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
+ size = (unsigned)(roundUp(info.compCompHnd->getClassSize(argx->gtArgPlace.gtArgPlaceClsHnd),
+ TARGET_POINTER_SIZE)) /
+ TARGET_POINTER_SIZE;
}
else
{
@@ -3336,8 +3388,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
size = genTypeStSz(argx->gtType);
}
#elif defined(_TARGET_X86_)
- size = genTypeStSz(argx->gtType);
-#else
+ size = genTypeStSz(argx->gtType);
+#else
#error Unsupported or unset target architecture
#endif // _TARGET_XXX_
}
@@ -3346,7 +3398,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
size = GetHfaCount(argx);
}
-#endif // _TARGET_ARM_
+#endif // _TARGET_ARM_
else // struct type
{
// We handle two opcodes: GT_MKREFANY and GT_OBJ
@@ -3360,9 +3412,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
if (varTypeIsStruct(argx))
{
- size = info.compCompHnd->getClassSize(impGetRefAnyClass());
+ size = info.compCompHnd->getClassSize(impGetRefAnyClass());
unsigned roundupSize = (unsigned)roundUp(size, TARGET_POINTER_SIZE);
- size = roundupSize / TARGET_POINTER_SIZE;
+ size = roundupSize / TARGET_POINTER_SIZE;
eeGetSystemVAmd64PassStructInRegisterDescriptor(impGetRefAnyClass(), &structDesc);
}
else
@@ -3371,13 +3423,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
size = 1;
}
#else
- size = 2;
+ size = 2;
#endif
}
else // We must have a GT_OBJ with a struct type, but the GT_OBJ may be be a child of a GT_COMMA
{
- GenTreePtr argObj = argx;
- GenTreePtr* parentOfArgObj = parentArgx;
+ GenTreePtr argObj = argx;
+ GenTreePtr* parentOfArgObj = parentArgx;
assert(args->IsList());
assert(argx == args->Current());
@@ -3386,11 +3438,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
while (argObj->gtOper == GT_COMMA)
{
parentOfArgObj = &argObj->gtOp.gtOp2;
- argObj = argObj->gtOp.gtOp2;
+ argObj = argObj->gtOp.gtOp2;
}
if (argObj->gtOper != GT_OBJ)
+ {
BADCODE("illegal argument tree in fgMorphArgs");
+ }
CORINFO_CLASS_HANDLE objClass = argObj->gtObj.gtClass;
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
@@ -3398,8 +3452,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
unsigned originalSize = info.compCompHnd->getClassSize(objClass);
- originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize);
- unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
+ originalSize = (originalSize == 0 ? TARGET_POINTER_SIZE : originalSize);
+ unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
structSize = originalSize;
@@ -3407,12 +3461,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, originalSize);
#ifdef _TARGET_ARM64_
- if ((howToPassStruct == SPK_PrimitiveType) && // Passed in a single register
- !isPow2(originalSize)) // size is 3,5,6 or 7 bytes
+ if ((howToPassStruct == SPK_PrimitiveType) && // Passed in a single register
+ !isPow2(originalSize)) // size is 3,5,6 or 7 bytes
{
- if (argObj->gtObj.gtOp1->IsVarAddr()) // Is the source a LclVar?
+ if (argObj->gtObj.gtOp1->IsVarAddr()) // Is the source a LclVar?
{
- // For ARM64 we pass structs that are 3,5,6,7 bytes in size
+ // For ARM64 we pass structs that are 3,5,6,7 bytes in size
// we can read 4 or 8 bytes from the LclVar to pass this arg
originalSize = genTypeSize(structBaseType);
}
@@ -3422,9 +3476,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// On System V OS-es a struct is never passed by reference.
// It is either passed by value on the stack or in registers.
- bool passStructInRegisters = false;
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
- bool passStructByRef = false;
+ bool passStructInRegisters = false;
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+ bool passStructByRef = false;
#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
// The following if-then-else needs to be carefully refactored.
@@ -3433,7 +3487,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// It can do this with structs sizes that are 1, 2, 4, or 8 bytes.
// It can't do this when FEATURE_UNIX_AMD64_STRUCT_PASSING is defined (Why?)
// TODO-Cleanup: Remove the #ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING below.
- // It also can't do this if we have a HFA arg,
+ // It also can't do this if we have a HFA arg,
// unless we have a 1-elem HFA in which case we want to do the optimization.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -3444,41 +3498,41 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
//
// Check for cases that we cannot optimize:
//
- if ((originalSize > TARGET_POINTER_SIZE) || // it is struct that is larger than a pointer
- !isPow2(originalSize) || // it is not a power of two (1, 2, 4 or 8)
- (isHfaArg && (hfaSlots != 1))) // it is a one element HFA struct
-#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ if ((originalSize > TARGET_POINTER_SIZE) || // it is struct that is larger than a pointer
+ !isPow2(originalSize) || // it is not a power of two (1, 2, 4 or 8)
+ (isHfaArg && (hfaSlots != 1))) // it is a one element HFA struct
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
{
// Normalize 'size' to the number of pointer sized items
// 'size' is the number of register slots that we will use to pass the argument
size = roundupSize / TARGET_POINTER_SIZE;
#if defined(_TARGET_AMD64_)
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
- size = 1; // This must be copied to a temp and passed by address
+ size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
- copyBlkClass = objClass;
+ copyBlkClass = objClass;
#else // FEATURE_UNIX_AMD64_STRUCT_PASSING
if (!structDesc.passedInRegisters)
{
- GenTreePtr lclVar = fgIsIndirOfAddrOfLocal(argObj);
- bool needCpyBlk = false;
+ GenTreePtr lclVar = fgIsIndirOfAddrOfLocal(argObj);
+ bool needCpyBlk = false;
if (lclVar != nullptr)
{
// If the struct is promoted to registers, it has to be materialized
// on stack. We may want to support promoted structures in
// codegening pugarg_stk instead of creating a copy here.
- LclVarDsc* varDsc = &lvaTable[lclVar->gtLclVarCommon.gtLclNum];
- needCpyBlk = varDsc->lvPromoted;
+ LclVarDsc* varDsc = &lvaTable[lclVar->gtLclVarCommon.gtLclNum];
+ needCpyBlk = varDsc->lvPromoted;
}
- else
+ else
{
// If simd16 comes from vector<t>, eeGetSystemVAmd64PassStructInRegisterDescriptor
// sets structDesc.passedInRegisters to be false.
//
- // GT_ADDR(GT_SIMD) is not a rationalized IR form and is not handled
- // by rationalizer. For now we will let SIMD struct arg to be copied to
- // a local. As part of cpblk rewrite, rationalizer will handle GT_ADDR(GT_SIMD)
- //
+ // GT_ADDR(GT_SIMD) is not a rationalized IR form and is not handled
+ // by rationalizer. For now we will let SIMD struct arg to be copied to
+ // a local. As part of cpblk rewrite, rationalizer will handle GT_ADDR(GT_SIMD)
+ //
// +--* obj simd16
// | \--* addr byref
// | | /--* lclVar simd16 V05 loc4
@@ -3488,13 +3542,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// TODO-Amd64-Unix: The rationalizer can be updated to handle this pattern,
// so that we don't need to generate a copy here.
GenTree* addr = argObj->gtOp.gtOp1;
- if (addr->OperGet() == GT_ADDR)
+ if (addr->OperGet() == GT_ADDR)
{
GenTree* addrChild = addr->gtOp.gtOp1;
- if (addrChild->OperGet() == GT_SIMD)
+ if (addrChild->OperGet() == GT_SIMD)
{
needCpyBlk = true;
- }
+ }
}
}
passStructInRegisters = false;
@@ -3503,38 +3557,38 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
copyBlkClass = objClass;
}
else
- {
+ {
copyBlkClass = NO_CLASS_HANDLE;
}
}
else
{
// The objClass is used to materialize the struct on stack.
- // For SystemV, the code below generates copies for struct arguments classified
- // as register argument.
+ // For SystemV, the code below generates copies for struct arguments classified
+ // as register argument.
// TODO-Amd64-Unix: We don't always need copies for this case. Struct arguments
- // can be passed on registers or can be copied directly to outgoing area.
+ // can be passed on registers or can be copied directly to outgoing area.
passStructInRegisters = true;
- copyBlkClass = objClass;
+ copyBlkClass = objClass;
}
-
+
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
#elif defined(_TARGET_ARM64_)
if ((size > 2) && !isHfaArg)
{
- size = 1; // This must be copied to a temp and passed by address
+ size = 1; // This must be copied to a temp and passed by address
passStructByRef = true;
- copyBlkClass = objClass;
+ copyBlkClass = objClass;
}
#endif
#ifdef _TARGET_ARM_
- // If we're passing a promoted struct local var,
+ // If we're passing a promoted struct local var,
// we may need to skip some registers due to alignment; record those.
GenTreePtr lclVar = fgIsIndirOfAddrOfLocal(argObj);
if (lclVar != NULL)
{
- LclVarDsc* varDsc = &lvaTable[lclVar->gtLclVarCommon.gtLclNum];
+ LclVarDsc* varDsc = &lvaTable[lclVar->gtLclVarCommon.gtLclNum];
if (varDsc->lvPromoted)
{
assert(argObj->OperGet() == GT_OBJ);
@@ -3548,20 +3602,22 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
#ifndef FEATURE_UNIX_AMD64_STRUCT_PASSING
// TODO-Amd64-Unix: Since the else part below is disabled for UNIX_AMD64, copies are always
- // generated for struct 1, 2, 4, or 8.
- else // We have a struct argument with size 1, 2, 4 or 8 bytes
+ // generated for struct 1, 2, 4, or 8.
+ else // We have a struct argument with size 1, 2, 4 or 8 bytes
{
// change our GT_OBJ into a GT_IND of the correct type.
- // We've already ensured above that size is a power of 2, and less than or equal to pointer size.
+ // We've already ensured above that size is a power of 2, and less than or equal to pointer
+ // size.
assert(howToPassStruct == SPK_PrimitiveType);
- // ToDo: remove this block as getArgTypeForStruct properly handles turning one element HFAs into primitives
+ // ToDo: remove this block as getArgTypeForStruct properly handles turning one element HFAs into
+ // primitives
if (isHfaArg)
{
// If we reach here with an HFA arg it has to be a one element HFA
assert(hfaSlots == 1);
- structBaseType = hfaType; // change the indirection type to a floating point type
+ structBaseType = hfaType; // change the indirection type to a floating point type
}
noway_assert(structBaseType != TYP_UNKNOWN);
@@ -3577,38 +3633,41 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
- DEBUG_DESTROY_NODE(argObj->gtOp.gtOp1); // GT_ADDR
- DEBUG_DESTROY_NODE(argObj); // GT_IND
+ DEBUG_DESTROY_NODE(argObj->gtOp.gtOp1); // GT_ADDR
+ DEBUG_DESTROY_NODE(argObj); // GT_IND
- argObj = temp;
+ argObj = temp;
*parentOfArgObj = temp;
// If the OBJ had been the top level node, we've now changed argx.
if (parentOfArgObj == parentArgx)
+ {
argx = temp;
+ }
}
if (argObj->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = argObj->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ unsigned lclNum = argObj->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varDsc->lvPromoted)
{
- if (varDsc->lvFieldCnt == 1)
+ if (varDsc->lvFieldCnt == 1)
{
// get the first and only promoted field
- LclVarDsc * fieldVarDsc = &lvaTable[varDsc->lvFieldLclStart];
+ LclVarDsc* fieldVarDsc = &lvaTable[varDsc->lvFieldLclStart];
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
argObj->gtLclVarCommon.SetLclNum(varDsc->lvFieldLclStart);
- if (varTypeCanReg(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
+ if (varTypeCanReg(fieldVarDsc->TypeGet()) &&
+ (genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
argObj->gtType = fieldVarDsc->TypeGet();
}
- else
+ else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
@@ -3662,14 +3721,14 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if ((structBaseType == TYP_STRUCT) &&
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
!passStructInRegisters
-#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+#else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
!passStructByRef
#endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
)
{
if (isHfaArg && passUsingFloatRegs)
{
- size = GetHfaCount(argx); // GetHfaCount returns number of elements in the HFA
+ size = GetHfaCount(argx); // GetHfaCount returns number of elements in the HFA
}
else
{
@@ -3681,8 +3740,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#if defined(_TARGET_X86_) && !defined(LEGACY_BACKEND)
// TODO-X86-CQ: [1091733] Revisit for small structs, we should use push instruction
copyBlkClass = objClass;
- size = roundupSize / TARGET_POINTER_SIZE; // Normalize size to number of pointer sized items
-#else // !defined(_TARGET_X86_) || defined(LEGACY_BACKEND)
+ size = roundupSize / TARGET_POINTER_SIZE; // Normalize size to number of pointer sized items
+#else // !defined(_TARGET_X86_) || defined(LEGACY_BACKEND)
if (roundupSize > originalSize)
{
copyBlkClass = objClass;
@@ -3690,20 +3749,21 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
- if (argObj->gtObj.gtOp1->IsVarAddr()) // Is the source a LclVar?
+ if (argObj->gtObj.gtOp1->IsVarAddr()) // Is the source a LclVar?
{
copyBlkClass = NO_CLASS_HANDLE;
}
}
- size = roundupSize / TARGET_POINTER_SIZE; // Normalize size to number of pointer sized items
+ size = roundupSize / TARGET_POINTER_SIZE; // Normalize size to number of pointer sized items
#endif // !defined(_TARGET_X86_) || defined(LEGACY_BACKEND)
}
}
}
#ifndef _TARGET_X86_
- // TODO-Arm: Does this apply for _TARGET_ARM_, where structs passed by value can be split between registers and stack?
+ // TODO-Arm: Does this apply for _TARGET_ARM_, where structs passed by value can be split between
+ // registers and stack?
if (size > 1)
{
hasMultiregStructArgs = true;
@@ -3722,20 +3782,21 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
&& (!isStructArg || structDesc.passedInRegisters)
#endif
- )
+ )
{
#ifdef _TARGET_ARM_
if (passUsingFloatRegs)
{
// First, see if it can be back-filled
- if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
- (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
- (size == 1)) // The size to back-fill is one float register
+ if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
+ (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
+ (size == 1)) // The size to back-fill is one float register
{
// Back-fill the register.
- isBackFilled = true;
+ isBackFilled = true;
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
- fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
+ fltArgSkippedRegMask &=
+ ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG);
}
@@ -3762,14 +3823,15 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers?
if (isHfaArg && !isRegArg)
{
- // recompute the 'size' so that it represent the number of stack slots rather than the number of registers
+ // recompute the 'size' so that it represent the number of stack slots rather than the number of
+ // registers
//
unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE);
- size = roundupSize / TARGET_POINTER_SIZE;
+ size = roundupSize / TARGET_POINTER_SIZE;
- // We also must update fltArgRegNum so that we no longer try to
+ // We also must update fltArgRegNum so that we no longer try to
// allocate any new floating point registers for args
- // This prevents us from backfilling a subsequent arg into d7
+ // This prevents us from backfilling a subsequent arg into d7
//
fltArgRegNum = MAX_FLOAT_REG_ARG;
}
@@ -3785,7 +3847,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
//
if (!isRegArg && (size > 1))
{
- // We also must update intArgRegNum so that we no longer try to
+ // We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
intArgRegNum = maxRegArgs;
@@ -3801,7 +3863,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
if (isStructArg)
{
unsigned int structFloatRegs = 0;
- unsigned int structIntRegs = 0;
+ unsigned int structIntRegs = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
@@ -3829,8 +3891,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
isRegArg = intArgRegNum < MAX_REG_ARG;
}
}
-#else // !defined(UNIX_AMD64_ABI)
- isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
+#else // !defined(UNIX_AMD64_ABI)
+ isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
#endif // !defined(UNIX_AMD64_ABI)
#endif // _TARGET_ARM_
}
@@ -3874,12 +3936,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// If we think we're going to split a struct between integer registers and the stack, check to
// see if we've already assigned a floating-point arg to the stack.
- if (isRegArg && // We decided above to use a register for the argument
- !passUsingFloatRegs && // We're using integer registers
- (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
- anyFloatStackArgs) // We've already used the stack for a floating-point argument
+ if (isRegArg && // We decided above to use a register for the argument
+ !passUsingFloatRegs && // We're using integer registers
+ (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
+ anyFloatStackArgs) // We've already used the stack for a floating-point argument
{
- isRegArg = false; // Change our mind; don't pass this struct partially in registers
+ isRegArg = false; // Change our mind; don't pass this struct partially in registers
// Skip the rest of the integer argument registers
for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum)
@@ -3894,15 +3956,15 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
regNumber nextRegNum = REG_STK;
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- regNumber nextOtherRegNum = REG_STK;
+ regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
- unsigned int structIntRegs = 0;
+ unsigned int structIntRegs = 0;
if (isStructArg && structDesc.passedInRegisters)
{
// It is a struct passed in registers. Assign the next available register.
assert((structDesc.eightByteCount <= 2) && "Too many eightbytes.");
- regNumber* nextRegNumPtrs[2] = { &nextRegNum, &nextOtherRegNum };
+ regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum};
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
@@ -3920,8 +3982,9 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
else
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
{
- // fill in or update the argInfo table
- nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum);
+ // fill in or update the argInfo table
+ nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
+ : genMapIntRegArgNumToRegNum(intArgRegNum);
}
#ifdef _TARGET_AMD64_
@@ -3943,7 +4006,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifndef LEGACY_BACKEND
// If there are nonstandard args (outside the calling convention) they were inserted above
// and noted them in a table so we can recognize them here and build their argInfo.
- //
+ //
// They should not affect the placement of any other args or stack space required.
// Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls.
isNonStandard = nonStandardArgs.FindReg(argx, &nextRegNum);
@@ -3952,11 +4015,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// This is a register argument - put it in the table
newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, argAlign
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- , isStructArg, nextOtherRegNum, &structDesc
+ ,
+ isStructArg, nextOtherRegNum, &structDesc
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- );
+ );
- newArgEntry->SetIsHfaRegArg(passUsingFloatRegs && isHfaArg); // Note on Arm32 a HFA is passed in int regs for varargs
+ newArgEntry->SetIsHfaRegArg(passUsingFloatRegs &&
+ isHfaArg); // Note on Arm32 a HFA is passed in int regs for varargs
newArgEntry->SetIsBackFilled(isBackFilled);
newArgEntry->isNonStandard = isNonStandard;
}
@@ -3984,7 +4049,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
- // we skip the corresponding floating point register argument
+ // we skip the corresponding floating point register argument
intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG);
#endif // WINDOWS_AMD64_ABI
#ifdef _TARGET_ARM_
@@ -4021,7 +4086,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
// This indicates a partial enregistration of a struct type
assert((isStructArg) || argx->OperIsCopyBlkOp() ||
- (argx->gtOper == GT_COMMA && (args->gtFlags & GTF_ASG)));
+ (argx->gtOper == GT_COMMA && (args->gtFlags & GTF_ASG)));
unsigned numRegsPartial = size - (intArgRegNum - MAX_REG_ARG);
assert((unsigned char)numRegsPartial == numRegsPartial);
call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial);
@@ -4033,13 +4098,13 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
}
}
- else // We have an argument that is not passed in a register
+ else // We have an argument that is not passed in a register
{
fgPtrArgCntCur += size;
// If the register arguments have not been determined then we must fill in the argInfo
- if (lateArgsComputed)
+ if (lateArgsComputed)
{
// This is a stack argument - possibly update it in the table
call->fgArgInfo->RemorphStkArg(argIndex, argx, args, size, argAlign);
@@ -4047,14 +4112,16 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
else
{
// This is a stack argument - put it in the table
- call->fgArgInfo->AddStkArg(argIndex, argx, args, size, argAlign FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(isStructArg));
+ call->fgArgInfo->AddStkArg(argIndex, argx, args, size,
+ argAlign FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(isStructArg));
}
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
noway_assert(!lateArgsComputed);
- fgMakeOutgoingStructArgCopy(call, args, argIndex, copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(&structDesc));
+ fgMakeOutgoingStructArgCopy(call, args, argIndex,
+ copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(&structDesc));
// This can cause a GTF_EXCEPT flag to be set.
// TODO-CQ: Fix the cases where this happens. We shouldn't be adding any new flags.
@@ -4063,8 +4130,8 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// any struct arguments.
// i.e. assert(((call->gtFlags & GTF_EXCEPT) != 0) || ((args->Current()->gtFlags & GTF_EXCEPT) == 0)
flagsSummary |= (args->Current()->gtFlags & GTF_EXCEPT);
-
-#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+
+#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
hasStackArgCopy = true;
#endif
}
@@ -4079,21 +4146,21 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// Get a new temp
// Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany
- unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
+ unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
lvaSetStruct(tmp, impGetRefAnyClass(), false);
// Build the mkrefany as a comma node:
// (tmp.ptr=argx),(tmp.type=handle)
GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, offsetof(CORINFO_RefAny, dataPtr));
GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, offsetof(CORINFO_RefAny, type));
- destPtrSlot->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyDataField());
+ destPtrSlot->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyDataField());
destPtrSlot->gtFlags |= GTF_VAR_DEF;
destTypeSlot->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
destTypeSlot->gtFlags |= GTF_VAR_DEF;
- GenTreePtr asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->gtOp.gtOp1);
- GenTreePtr asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->gtOp.gtOp2);
- GenTreePtr asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
+ GenTreePtr asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->gtOp.gtOp1);
+ GenTreePtr asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->gtOp.gtOp2);
+ GenTreePtr asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
// Change the expression to "(tmp=val)"
args->gtOp.gtOp1 = asg;
@@ -4116,7 +4183,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
}
} // end foreach argument loop
- if (!lateArgsComputed)
+ if (!lateArgsComputed)
{
call->fgArgInfo->ArgsComplete();
#ifdef LEGACY_BACKEND
@@ -4141,17 +4208,23 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
/* Process the function address, if indirect call */
if (call->gtCallType == CT_INDIRECT)
+ {
call->gtCallAddr = fgMorphTree(call->gtCallAddr);
+ }
call->fgArgInfo->RecordStkLevel(fgPtrArgCntCur);
- if ((call->gtCallType == CT_INDIRECT) && (call->gtCallCookie != NULL))
+ if ((call->gtCallType == CT_INDIRECT) && (call->gtCallCookie != nullptr))
+ {
fgPtrArgCntCur++;
+ }
/* Remember the maximum value we ever see */
- if (fgPtrArgCntMax < fgPtrArgCntCur)
- fgPtrArgCntMax = fgPtrArgCntCur;
+ if (fgPtrArgCntMax < fgPtrArgCntCur)
+ {
+ fgPtrArgCntMax = fgPtrArgCntCur;
+ }
/* The call will pop all the arguments we pushed */
@@ -4168,7 +4241,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
unsigned preallocatedArgCount = call->fgArgInfo->GetNextSlotNum();
#if defined(UNIX_AMD64_ABI)
- opts.compNeedToAlignFrame = true; // this is currently required for the UNIX ABI to work correctly
+ opts.compNeedToAlignFrame = true; // this is currently required for the UNIX ABI to work correctly
// ToDo: Remove this re-calculation preallocatedArgCount and use the value assigned above.
@@ -4180,7 +4253,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
{
preallocatedArgCount += argSlots - MAX_REG_ARG;
}
-#endif // UNIX_AMD64_ABI
+#endif // UNIX_AMD64_ABI
// Check if we need to increase the size of our Outgoing Arg Space
if (preallocatedArgCount * REGSIZE_BYTES > lvaOutgoingArgSpaceSize)
@@ -4194,14 +4267,14 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// stack alignment boundary.
if (compLocallocUsed)
{
- lvaOutgoingArgSpaceSize = (unsigned) roundUp(lvaOutgoingArgSpaceSize, STACK_ALIGN);
+ lvaOutgoingArgSpaceSize = (unsigned)roundUp(lvaOutgoingArgSpaceSize, STACK_ALIGN);
}
- }
+ }
#ifdef DEBUG
if (verbose)
{
- printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, lvaOutgoingArgSpaceSize=%d\n",
- argSlots, preallocatedArgCount, call->fgArgInfo->GetNextSlotNum(), lvaOutgoingArgSpaceSize);
+ printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, lvaOutgoingArgSpaceSize=%d\n", argSlots,
+ preallocatedArgCount, call->fgArgInfo->GetNextSlotNum(), lvaOutgoingArgSpaceSize);
}
#endif
}
@@ -4212,16 +4285,16 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
// If the register arguments have already been determined
- // or we have no register arguments then we don't need to
+ // or we have no register arguments then we don't need to
// call SortArgs() and EvalArgsToTemps()
//
// For UNIX_AMD64, the condition without hasStackArgCopy cannot catch
// all cases of fgMakeOutgoingStructArgCopy() being called. hasStackArgCopy
// is added to make sure to call EvalArgsToTemp.
if (!lateArgsComputed && (call->fgArgInfo->HasRegArgs()
- #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
+#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
|| hasStackArgCopy
- #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
))
{
// This is the first time that we morph this call AND it has register arguments.
@@ -4283,12 +4356,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* callNode)
// args:
// call: The call whose arguments need to be morphed.
// hasStructArgument: Whether this call has struct arguments.
-//
+//
void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgument)
{
- unsigned flagsSummary = 0;
- GenTreePtr args;
- GenTreePtr argx;
+ unsigned flagsSummary = 0;
+ GenTreePtr args;
+ GenTreePtr argx;
if (hasStructArgument)
{
@@ -4296,15 +4369,15 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
for (args = call->gtCallArgs; args != nullptr; args = args->gtOp.gtOp2)
{
- // For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
- // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
+ // For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
+ // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
- bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
+ bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntryPtr fgEntryPtr = gtArgEntryByNode(call, args->gtOp.gtOp1);
assert(fgEntryPtr != nullptr);
- GenTreePtr argx = fgEntryPtr->node;
+ GenTreePtr argx = fgEntryPtr->node;
GenTreePtr lateList = nullptr;
GenTreePtr lateNode = nullptr;
@@ -4324,10 +4397,10 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
}
assert(lateList != nullptr && lateNode != nullptr);
}
- GenTreePtr arg = argx;
- bool argListCreated = false;
+ GenTreePtr arg = argx;
+ bool argListCreated = false;
- var_types type = arg->TypeGet();
+ var_types type = arg->TypeGet();
if (varTypeIsStruct(type))
{
@@ -4345,21 +4418,19 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
continue;
}
- assert(
- arg->OperGet() == GT_LCL_VAR ||
- arg->OperGet() == GT_LCL_FLD ||
- (arg->OperGet() == GT_ADDR &&
- (arg->gtOp.gtOp1->OperGet() == GT_LCL_FLD ||
- arg->gtOp.gtOp1->OperGet() == GT_LCL_VAR)));
+ assert(arg->OperGet() == GT_LCL_VAR || arg->OperGet() == GT_LCL_FLD ||
+ (arg->OperGet() == GT_ADDR &&
+ (arg->gtOp.gtOp1->OperGet() == GT_LCL_FLD || arg->gtOp.gtOp1->OperGet() == GT_LCL_VAR)));
- GenTreeLclVarCommon* lclCommon = arg->OperGet() == GT_ADDR ?
- arg->gtOp.gtOp1->AsLclVarCommon() : arg->AsLclVarCommon();
+ GenTreeLclVarCommon* lclCommon =
+ arg->OperGet() == GT_ADDR ? arg->gtOp.gtOp1->AsLclVarCommon() : arg->AsLclVarCommon();
if (fgEntryPtr->structDesc.passedInRegisters)
{
if (fgEntryPtr->structDesc.eightByteCount == 1)
{
// Change the type and below the code will change the LclVar to a LCL_FLD
- type = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[0], fgEntryPtr->structDesc.eightByteSizes[0]);
+ type = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[0],
+ fgEntryPtr->structDesc.eightByteSizes[0]);
}
else if (fgEntryPtr->structDesc.eightByteCount == 2)
{
@@ -4367,28 +4438,28 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
argListCreated = true;
// Second eightbyte.
- GenTreeLclFld* newLclField = new(this, GT_LCL_FLD) GenTreeLclFld(
- GetTypeFromClassificationAndSizes(
- fgEntryPtr->structDesc.eightByteClassifications[1],
- fgEntryPtr->structDesc.eightByteSizes[1]),
- lclCommon->gtLclNum,
- fgEntryPtr->structDesc.eightByteOffsets[1]);
+ GenTreeLclFld* newLclField = new (this, GT_LCL_FLD)
+ GenTreeLclFld(GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc
+ .eightByteClassifications[1],
+ fgEntryPtr->structDesc.eightByteSizes[1]),
+ lclCommon->gtLclNum, fgEntryPtr->structDesc.eightByteOffsets[1]);
// Note this should actually be: secondNode = gtNewArgList(newLclField)
GenTreeArgList* secondNode = gtNewListNode(newLclField, nullptr);
- secondNode->gtType = originalType; // Preserve the type. It is a special case.
- newLclField->gtFieldSeq = FieldSeqStore::NotAField();
+ secondNode->gtType = originalType; // Preserve the type. It is a special case.
+ newLclField->gtFieldSeq = FieldSeqStore::NotAField();
// First field
arg->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField();
- arg->gtType = GetTypeFromClassificationAndSizes(
- fgEntryPtr->structDesc.eightByteClassifications[0],
- fgEntryPtr->structDesc.eightByteSizes[0]);
- arg = gtNewListNode(arg, secondNode);
+ arg->gtType =
+ GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[0],
+ fgEntryPtr->structDesc.eightByteSizes[0]);
+ arg = gtNewListNode(arg, secondNode);
arg->gtType = type; // Preserve the type. It is a special case.
}
else
{
- assert(false && "More than two eightbytes detected for CLR."); // No more than two eightbytes for the CLR.
+ assert(false && "More than two eightbytes detected for CLR."); // No more than two eightbytes
+ // for the CLR.
}
}
@@ -4420,10 +4491,10 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
if (argx != arg)
{
- bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
+ bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntryPtr fgEntryPtr = gtArgEntryByNode(call, args->gtOp.gtOp1);
assert(fgEntryPtr != nullptr);
- GenTreePtr argx = fgEntryPtr->node;
+ GenTreePtr argx = fgEntryPtr->node;
GenTreePtr lateList = nullptr;
GenTreePtr lateNode = nullptr;
if (isLateArg)
@@ -4462,7 +4533,7 @@ void Compiler::fgMorphSystemVStructArgs(GenTreeCall* call, bool hasStructArgumen
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
//-----------------------------------------------------------------------------
-// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
+// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
// call fgMorphMultiregStructArg on each of them.
//
// Arguments:
@@ -4479,9 +4550,9 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
GenTreePtr args;
GenTreePtr argx;
bool foundStructArg = false;
- unsigned initialFlags = call->gtFlags;
- unsigned flagsSummary = 0;
- fgArgInfoPtr allArgInfo = call->fgArgInfo;
+ unsigned initialFlags = call->gtFlags;
+ unsigned flagsSummary = 0;
+ fgArgInfoPtr allArgInfo = call->fgArgInfo;
// Currently only ARM64 is using this method to morph the MultiReg struct args
// in the future AMD64_UNIX and for HFAs ARM32, will also use this method
@@ -4497,22 +4568,22 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
#ifdef _TARGET_AMD64_
#if defined(UNIX_AMD64_ABI)
NYI_AMD64("fgMorphMultiregStructArgs (UNIX ABI)");
-#else // WINDOWS_AMD64_ABI
+#else // WINDOWS_AMD64_ABI
assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI");
#endif // !UNIX_AMD64_ABI
#endif
for (args = call->gtCallArgs; args != nullptr; args = args->gtOp.gtOp2)
{
- // For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
- // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
+ // For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
+ // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
- bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
+ bool isLateArg = (args->gtOp.gtOp1->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntryPtr fgEntryPtr = gtArgEntryByNode(call, args->gtOp.gtOp1);
assert(fgEntryPtr != nullptr);
- GenTreePtr argx = fgEntryPtr->node;
+ GenTreePtr argx = fgEntryPtr->node;
GenTreePtr lateList = nullptr;
GenTreePtr lateNode = nullptr;
@@ -4544,7 +4615,7 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
// Did we replace 'argx' with a new tree?
if (arg != argx)
{
- fgEntryPtr->node = arg; // Record the new value for the arg in the fgEntryPtr->node
+ fgEntryPtr->node = arg; // Record the new value for the arg in the fgEntryPtr->node
// link the new arg node into either the late arg list or the gtCallArgs list
if (isLateArg)
@@ -4566,13 +4637,12 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
-
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArg: Given a multireg TYP_STRUCT arg from a call argument list
// Morph the argument into a set of GT_LIST nodes.
//
// Arguments:
-// arg - A GenTree node containing a TYP_STRUCT arg that
+// arg - A GenTree node containing a TYP_STRUCT arg that
// is to be passed in multiple registers
// fgEntryPtr - the fgArgTabEntry information for the current 'arg'
//
@@ -4589,7 +4659,7 @@ void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
// indirections.
// Currently the implementation only handles ARM64 and will NYI for other architectures.
//
-GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr fgEntryPtr)
+GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPtr fgEntryPtr)
{
assert(arg->TypeGet() == TYP_STRUCT);
@@ -4601,14 +4671,14 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
// Examine 'arg' and setup argValue objClass and structSize
//
CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE;
- GenTreePtr argValue = arg; // normally argValue will be arg, but see right below
+ GenTreePtr argValue = arg; // normally argValue will be arg, but see right below
unsigned structSize = 0;
if (arg->OperGet() == GT_OBJ)
{
- GenTreeObj* argObj = arg->AsObj();
- objClass = argObj->gtClass;
- structSize = info.compCompHnd->getClassSize(objClass);
+ GenTreeObj* argObj = arg->AsObj();
+ objClass = argObj->gtClass;
+ structSize = info.compCompHnd->getClassSize(objClass);
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR
//
@@ -4620,7 +4690,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
else if (arg->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = arg->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum;
+ unsigned varNum = varNode->gtLclNum;
assert(varNum < lvaCount);
LclVarDsc* varDsc = &lvaTable[varNum];
@@ -4629,20 +4699,20 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
}
noway_assert(objClass != nullptr);
- var_types hfaType = TYP_UNDEF;
- var_types elemType = TYP_UNDEF;
- unsigned elemCount = 0;
- unsigned elemSize = 0;
- var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
+ var_types hfaType = TYP_UNDEF;
+ var_types elemType = TYP_UNDEF;
+ unsigned elemCount = 0;
+ unsigned elemSize = 0;
+ var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
- hfaType = GetHfaType(objClass); // set to float or double if it is an HFA, otherwise TYP_UNDEF
+ hfaType = GetHfaType(objClass); // set to float or double if it is an HFA, otherwise TYP_UNDEF
if (varTypeIsFloating(hfaType))
{
- elemType = hfaType;
- elemSize = genTypeSize(elemType);
+ elemType = hfaType;
+ elemSize = genTypeSize(elemType);
elemCount = structSize / elemSize;
- assert(elemSize*elemCount == structSize);
- for (unsigned inx = 0; inx<elemCount; inx++)
+ assert(elemSize * elemCount == structSize);
+ for (unsigned inx = 0; inx < elemCount; inx++)
{
type[inx] = elemType;
}
@@ -4650,20 +4720,19 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
else
{
assert(structSize <= 2 * TARGET_POINTER_SIZE);
- BYTE gcPtrs[2] = { TYPE_GC_NONE, TYPE_GC_NONE };
+ BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
elemCount = 2;
- type[0] = getJitGCType(gcPtrs[0]);
- type[1] = getJitGCType(gcPtrs[1]);
+ type[0] = getJitGCType(gcPtrs[0]);
+ type[1] = getJitGCType(gcPtrs[1]);
- if ((argValue->OperGet() == GT_LCL_FLD) ||
- (argValue->OperGet() == GT_LCL_VAR))
+ if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
- // We can safely widen this to 16 bytes since we are loading from
- // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
+ // We can safely widen this to 16 bytes since we are loading from
+ // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
// lives in the stack frame or will be a promoted field.
//
- elemSize = TARGET_POINTER_SIZE;
+ elemSize = TARGET_POINTER_SIZE;
structSize = 2 * TARGET_POINTER_SIZE;
}
else // we must have a GT_OBJ
@@ -4673,44 +4742,45 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
// We need to load the struct from an arbitrary address
// and we can't read past the end of the structSize
// We adjust the second load type here
- //
+ //
if (structSize < 2 * TARGET_POINTER_SIZE)
{
- switch (structSize - TARGET_POINTER_SIZE) {
- case 1:
- type[1] = TYP_BYTE;
- break;
- case 2:
- type[1] = TYP_SHORT;
- break;
- case 4:
- type[1] = TYP_INT;
- break;
- default:
- noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
- break;
+ switch (structSize - TARGET_POINTER_SIZE)
+ {
+ case 1:
+ type[1] = TYP_BYTE;
+ break;
+ case 2:
+ type[1] = TYP_SHORT;
+ break;
+ case 4:
+ type[1] = TYP_INT;
+ break;
+ default:
+ noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
+ break;
}
}
- }
+ }
}
// We should still have a TYP_STRUCT
assert(argValue->TypeGet() == TYP_STRUCT);
- GenTreeArgList* newArg = nullptr;
+ GenTreeArgList* newArg = nullptr;
// Are we passing a struct LclVar?
//
if (argValue->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum;
+ unsigned varNum = varNode->gtLclNum;
assert(varNum < lvaCount);
LclVarDsc* varDsc = &lvaTable[varNum];
// At this point any TYP_STRUCT LclVar must be a 16-byte struct
// or an HFA struct, both which are passed by value.
//
- assert((varDsc->lvSize() == 2*TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
+ assert((varDsc->lvSize() == 2 * TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
varDsc->lvIsMultiRegArg = true;
@@ -4729,7 +4799,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
noway_assert(elemType == (varDsc->lvHfaTypeIsFloat() ? TYP_FLOAT : TYP_DOUBLE));
noway_assert(elemSize == genTypeSize(elemType));
noway_assert(elemCount == (varDsc->lvExactSize / elemSize));
- noway_assert(elemSize*elemCount == varDsc->lvExactSize);
+ noway_assert(elemSize * elemCount == varDsc->lvExactSize);
for (unsigned inx = 0; (inx < elemCount); inx++)
{
@@ -4747,15 +4817,15 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
- //
+ //
if (currentGcLayoutType != TYPE_GC_NONE)
{
noway_assert(type[inx] == getJitGCType((BYTE)currentGcLayoutType));
}
else
{
- // We may have use a small type when we setup the type[inx] values above
- // We can safely widen this to TYP_I_IMPL
+ // We may have use a small type when we setup the type[inx] values above
+ // We can safely widen this to TYP_I_IMPL
type[inx] = TYP_I_IMPL;
}
}
@@ -4775,15 +4845,16 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
LclVarDsc* loVarDsc = &lvaTable[loVarNum];
LclVarDsc* hiVarDsc = &lvaTable[hiVarNum];
- var_types loType = loVarDsc->lvType;
- var_types hiType = hiVarDsc->lvType;
+ var_types loType = loVarDsc->lvType;
+ var_types hiType = hiVarDsc->lvType;
if (varTypeIsFloating(loType) || varTypeIsFloating(hiType))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
- JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum);
+ JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
+ varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
@@ -4796,7 +4867,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
GenTreePtr hiLclVar = gtNewLclvNode(hiVarNum, hiType, hiVarNum);
// Create a new tree for 'arg'
- // replace the existing LDOBJ(ADDR(LCLVAR))
+ // replace the existing LDOBJ(ADDR(LCLVAR))
// with a LIST(LCLVAR-LO, LIST(LCLVAR-HI, nullptr))
//
newArg = gtNewListNode(loLclVar, gtNewArgList(hiLclVar));
@@ -4828,7 +4899,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum;
+ unsigned varNum = varNode->gtLclNum;
assert(varNum < lvaCount);
LclVarDsc* varDsc = &lvaTable[varNum];
@@ -4843,8 +4914,8 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
noway_assert(elemSize == TARGET_POINTER_SIZE);
- unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
- const BYTE * gcPtrs = varDsc->lvGcLayout; // Get the GC layout for the local variable
+ unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
+ const BYTE* gcPtrs = varDsc->lvGcLayout; // Get the GC layout for the local variable
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
@@ -4853,9 +4924,9 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
}
else // this varDsc contains no GC pointers
{
- for (unsigned inx = 0; inx<elemCount; inx++)
+ for (unsigned inx = 0; inx < elemCount; inx++)
{
- // The GC information must match what we setup using 'objClass'
+ // The GC information must match what we setup using 'objClass'
noway_assert(!varTypeIsGC(type[inx]));
}
}
@@ -4867,10 +4938,10 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
// Start building our list from the last element
unsigned offset = lastOffset;
- unsigned inx = elemCount;
+ unsigned inx = elemCount;
// Create a new tree for 'arg'
- // replace the existing LDOBJ(ADDR(LCLVAR))
+ // replace the existing LDOBJ(ADDR(LCLVAR))
// with a LIST(LCLFLD-LO, LIST(LCLFLD-HI, nullptr) ...)
//
while (inx > 0)
@@ -4892,27 +4963,27 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
//
else if (argValue->OperGet() == GT_OBJ)
{
- GenTreeObj* argObj = argValue->AsObj();
- GenTreePtr baseAddr = argObj->gtOp1;
- var_types addrType = baseAddr->TypeGet();
+ GenTreeObj* argObj = argValue->AsObj();
+ GenTreePtr baseAddr = argObj->gtOp1;
+ var_types addrType = baseAddr->TypeGet();
// Create a new tree for 'arg'
- // replace the existing LDOBJ(EXPR)
+ // replace the existing LDOBJ(EXPR)
// with a LIST(IND(EXPR), LIST(IND(EXPR+8), nullptr) ...)
//
// Start building our list from the last element
unsigned offset = structSize;
- unsigned inx = elemCount;
+ unsigned inx = elemCount;
while (inx > 0)
{
inx--;
elemSize = genTypeSize(type[inx]);
offset -= elemSize;
- GenTreePtr curAddr = baseAddr;
+ GenTreePtr curAddr = baseAddr;
if (offset != 0)
{
- GenTreePtr baseAddrDup = gtCloneExpr(baseAddr);
+ GenTreePtr baseAddrDup = gtCloneExpr(baseAddr);
noway_assert(baseAddrDup != nullptr);
curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL));
}
@@ -4933,7 +5004,7 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
}
}
- // If we reach here we should have set newArg to something
+ // If we reach here we should have set newArg to something
if (newArg == nullptr)
{
#ifdef DEBUG
@@ -4950,22 +5021,21 @@ GenTreePtr Compiler::fgMorphMultiregStructArg(GenTreePtr arg, fgArgTabEntryPt
}
#endif
- arg = newArg; // consider calling fgMorphTree(newArg);
+ arg = newArg; // consider calling fgMorphTree(newArg);
#endif // FEATURE_MULTIREG_ARGS
return arg;
}
-
// Make a copy of a struct variable if necessary, to pass to a callee.
// returns: tree that computes address of the outgoing arg
-void
-Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
- GenTree* args,
- unsigned argIndex,
- CORINFO_CLASS_HANDLE copyBlkClass
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
+void Compiler::fgMakeOutgoingStructArgCopy(
+ GenTreeCall* call,
+ GenTree* args,
+ unsigned argIndex,
+ CORINFO_CLASS_HANDLE copyBlkClass FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(
+ const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr))
{
GenTree* argx = args->Current();
noway_assert(argx->gtOper != GT_MKREFANY);
@@ -4989,10 +5059,10 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
// struct parameters if they are passed as arguments to a tail call.
if (!call->IsTailCallViaHelper() && (varDsc->lvRefCnt == 1) && !fgMightHaveLoop())
{
- varDsc->lvRefCnt = 0;
- args->gtOp.gtOp1 = lcl;
+ varDsc->lvRefCnt = 0;
+ args->gtOp.gtOp1 = lcl;
fgArgTabEntryPtr fp = Compiler::gtArgEntryByNode(call, argx);
- fp->node = lcl;
+ fp->node = lcl;
JITDUMP("did not have to make outgoing copy for V%2d", varNum);
return;
@@ -5001,11 +5071,13 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
}
if (fgOutgoingArgTemps == nullptr)
+ {
fgOutgoingArgTemps = hashBv::Create(this);
+ }
+
+ unsigned tmp = 0;
+ bool found = false;
- unsigned tmp = 0;
- bool found = false;
-
// Attempt to find a local we have already used for an outgoing struct and reuse it.
// We do not reuse within a statement.
if (!opts.MinOpts())
@@ -5014,10 +5086,10 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps)
{
LclVarDsc* varDsc = &lvaTable[lclNum];
- if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass))
- && !fgCurrentlyInUseArgTemps->testBit(lclNum))
+ if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) &&
+ !fgCurrentlyInUseArgTemps->testBit(lclNum))
{
- tmp = (unsigned) lclNum;
+ tmp = (unsigned)lclNum;
found = true;
JITDUMP("reusing outgoing struct arg");
break;
@@ -5046,12 +5118,12 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
}
// Create a reference to the temp
- GenTreePtr dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
- dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
+ GenTreePtr dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
+ dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
dest = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
lvaTable[tmp].incRefCnts(compCurBB->getBBWeight(this), this);
- GenTreePtr src;
+ GenTreePtr src;
if (argx->gtOper == GT_OBJ)
{
src = argx->gtOp.gtOp1;
@@ -5064,7 +5136,7 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
// Copy the valuetype to the temp
GenTreePtr copyBlk = gtNewCpObjNode(dest, src, copyBlkClass, false);
- copyBlk = fgMorphCopyBlock(copyBlk);
+ copyBlk = fgMorphCopyBlock(copyBlk);
#if FEATURE_FIXED_OUT_ARGS
@@ -5076,9 +5148,7 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression
- GenTreePtr arg = fgMakeTmpArgNode(
- tmp
- FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(structDescPtr->passedInRegisters));
+ GenTreePtr arg = fgMakeTmpArgNode(tmp FEATURE_UNIX_AMD64_STRUCT_PASSING_ONLY_ARG(structDescPtr->passedInRegisters));
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
@@ -5093,9 +5163,9 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
#ifdef _TARGET_ARM_
// See declaration for specification comment.
-void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
- unsigned firstArgRegNum,
- regMaskTP* pArgSkippedRegMask)
+void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
+ unsigned firstArgRegNum,
+ regMaskTP* pArgSkippedRegMask)
{
assert(varDsc->lvPromoted);
// There's no way to do these calculations without breaking abstraction and assuming that
@@ -5103,24 +5173,28 @@ void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* var
// To start, figure out what register contains the last byte of the first argument.
LclVarDsc* firstFldVarDsc = &lvaTable[varDsc->lvFieldLclStart];
- unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;;
+ unsigned lastFldRegOfLastByte =
+ (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
+ ;
// Now we're keeping track of the register that the last field ended in; see what registers
// subsequent fields start in, and whether any are skipped.
// (We assume here the invariant that the fields are sorted in offset order.)
for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++)
{
- unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
- LclVarDsc* fldVarDsc = &lvaTable[fldVarNum];
- unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
+ unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
+ LclVarDsc* fldVarDsc = &lvaTable[fldVarNum];
+ unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields.
// This loop should enumerate the offsets of any registers skipped.
// Find what reg contains the last byte:
// And start at the first register after that. If that isn't the first reg of the current
- for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++)
+ for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset;
+ skippedRegOffsets++)
{
// If the register number would not be an arg reg, we're done.
- if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return;
+ if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG)
+ return;
*pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets));
}
lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
@@ -5129,26 +5203,25 @@ void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* var
#endif // _TARGET_ARM_
-
//****************************************************************************
// fgFixupStructReturn:
// The companion to impFixupCallStructReturn. Now that the importer is done
-// change the gtType to the precomputed native return type
+// change the gtType to the precomputed native return type
// requires that callNode currently has a struct type
//
-void Compiler::fgFixupStructReturn(GenTreePtr callNode)
+void Compiler::fgFixupStructReturn(GenTreePtr callNode)
{
assert(varTypeIsStruct(callNode));
- GenTreeCall* call = callNode->AsCall();
- bool callHasRetBuffArg = call->HasRetBufArg();
- bool isHelperCall = call->IsHelperCall();
+ GenTreeCall* call = callNode->AsCall();
+ bool callHasRetBuffArg = call->HasRetBufArg();
+ bool isHelperCall = call->IsHelperCall();
// Decide on the proper return type for this call that currently returns a struct
//
- CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
- Compiler::structPassingKind howToReturnStruct;
- var_types returnType;
+ CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
+ Compiler::structPassingKind howToReturnStruct;
+ var_types returnType;
// There are a couple of Helper Calls that say they return a TYP_STRUCT but they
// expect this method to re-type this to a TYP_REF (what is in call->gtReturnType)
@@ -5162,9 +5235,9 @@ void Compiler::fgFixupStructReturn(GenTreePtr callNode)
assert(!callHasRetBuffArg);
assert(retClsHnd == NO_CLASS_HANDLE);
- // Now that we are past the importer, re-type this node
+ // Now that we are past the importer, re-type this node
howToReturnStruct = SPK_PrimitiveType;
- returnType = (var_types)call->gtReturnType;
+ returnType = (var_types)call->gtReturnType;
}
else
{
@@ -5201,10 +5274,9 @@ void Compiler::fgFixupStructReturn(GenTreePtr callNode)
// with a return buffer (that returns TYP_VOID) or into a return
// of a primitive/enregisterable type
assert(!callHasRetBuffArg || (call->TypeGet() == TYP_VOID));
-#endif
+#endif
}
-
/*****************************************************************************
*
* A little helper used to rearrange nested commutative operations. The
@@ -5216,7 +5288,7 @@ void Compiler::fgFixupStructReturn(GenTreePtr callNode)
#if REARRANGE_ADDS
-void Compiler::fgMoveOpsLeft(GenTreePtr tree)
+void Compiler::fgMoveOpsLeft(GenTreePtr tree)
{
GenTreePtr op1;
GenTreePtr op2;
@@ -5229,15 +5301,16 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
oper = tree->OperGet();
noway_assert(GenTree::OperIsCommutative(oper));
- noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR ||
- oper == GT_AND || oper == GT_MUL);
+ noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL);
noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder);
noway_assert(oper == op2->gtOper);
// Commutativity doesn't hold if overflow checks are needed
if (tree->gtOverflowEx() || op2->gtOverflowEx())
+ {
return;
+ }
if (gtIsActiveCSE_Candidate(op2))
{
@@ -5250,16 +5323,17 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
}
if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT))
+ {
return;
+ }
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators
- if ( ((oper == GT_ADD) || (oper == GT_MUL))
- && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0) )
+ if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0))
{
return;
}
- if ( (tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN )
+ if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN)
{
// We could deal with this, but we were always broken and just hit the assert
// below regarding flags, which means it's not frequent, so will just bail out.
@@ -5269,42 +5343,45 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx());
- GenTreePtr ad1 = op2->gtOp.gtOp1;
- GenTreePtr ad2 = op2->gtOp.gtOp2;
+ GenTreePtr ad1 = op2->gtOp.gtOp1;
+ GenTreePtr ad2 = op2->gtOp.gtOp2;
// Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT
// We can not reorder such GT_OR trees
//
if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet()))
+ {
break;
+ }
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
GenTreePtr new_op1 = op2;
- new_op1->gtOp.gtOp1 = op1;
- new_op1->gtOp.gtOp2 = ad1;
-
+ new_op1->gtOp.gtOp1 = op1;
+ new_op1->gtOp.gtOp2 = ad1;
+
/* Change the flags. */
// Make sure we arent throwing away any flags
- noway_assert((new_op1->gtFlags & ~(
- GTF_MAKE_CSE |
- GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
- GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
- GTF_NODE_MASK|GTF_ALL_EFFECT|GTF_UNSIGNED)) == 0);
+ noway_assert((new_op1->gtFlags &
+ ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
+ GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
+ GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0);
- new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
- (op1->gtFlags & GTF_ALL_EFFECT) |
- (ad1->gtFlags & GTF_ALL_EFFECT);
+ new_op1->gtFlags =
+ (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
+ (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT);
/* Retype new_op1 if it has not/become a GC ptr. */
- if (varTypeIsGC(op1->TypeGet()))
+ if (varTypeIsGC(op1->TypeGet()))
{
- noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int))
- (varTypeIsI (tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval))
+ noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
+ oper == GT_ADD) || // byref(ref + (int+int))
+ (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
+ oper == GT_OR)); // int(gcref | int(gcref|intval))
new_op1->gtType = tree->gtType;
}
@@ -5319,8 +5396,8 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
// vnStore is null before the ValueNumber phase has run
if (vnStore != nullptr)
{
- // We can only keep the old value number on new_op1 if both op1 and ad2
- // have the same non-NoVN value numbers. Since op is commutative, comparing
+ // We can only keep the old value number on new_op1 if both op1 and ad2
+ // have the same non-NoVN value numbers. Since op is commutative, comparing
// only ad2 and op1 is enough.
if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
@@ -5335,8 +5412,10 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
/* If 'new_op1' is now the same nested op, process it recursively */
- if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
+ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
+ {
fgMoveOpsLeft(new_op1);
+ }
/* If 'ad2' is now the same nested op, process it
* Instead of recursion, we set up op1 and op2 for the next loop.
@@ -5344,8 +5423,7 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
op1 = new_op1;
op2 = ad2;
- }
- while ((op2->gtOper == oper) && !op2->gtOverflowEx());
+ } while ((op2->gtOper == oper) && !op2->gtOverflowEx());
return;
}
@@ -5354,20 +5432,19 @@ void Compiler::fgMoveOpsLeft(GenTreePtr tree)
/*****************************************************************************/
-void Compiler::fgSetRngChkTarget(GenTreePtr tree,
- bool delay)
+void Compiler::fgSetRngChkTarget(GenTreePtr tree, bool delay)
{
GenTreeBoundsChk* bndsChk = nullptr;
- SpecialCodeKind kind = SCK_RNGCHK_FAIL;
+ SpecialCodeKind kind = SCK_RNGCHK_FAIL;
#ifdef FEATURE_SIMD
if ((tree->gtOper == GT_ARR_BOUNDS_CHECK) || (tree->gtOper == GT_SIMD_CHK))
-#else // FEATURE_SIMD
+#else // FEATURE_SIMD
if (tree->gtOper == GT_ARR_BOUNDS_CHECK)
#endif // FEATURE_SIMD
{
bndsChk = tree->AsBoundsChk();
- kind = tree->gtBoundsChk.gtThrowKind;
+ kind = tree->gtBoundsChk.gtThrowKind;
}
else
{
@@ -5381,7 +5458,7 @@ void Compiler::fgSetRngChkTarget(GenTreePtr tree,
const unsigned callStkDepth = 0;
#endif
- if (opts.MinOpts())
+ if (opts.MinOpts())
{
delay = false;
@@ -5413,10 +5490,9 @@ void Compiler::fgSetRngChkTarget(GenTreePtr tree,
// fgPtrArgCntCur is only valid for global morph or if we walk full stmt.
noway_assert((bndsChk != nullptr) || fgGlobalMorph);
- unsigned stkDepth = (bndsChk != nullptr) ? bndsChk->gtStkDepth
- : callStkDepth;
+ unsigned stkDepth = (bndsChk != nullptr) ? bndsChk->gtStkDepth : callStkDepth;
- BasicBlock * rngErrBlk = fgRngChkTarget(compCurBB, stkDepth, kind);
+ BasicBlock* rngErrBlk = fgRngChkTarget(compCurBB, stkDepth, kind);
/* Add the label to the indirection node */
@@ -5447,16 +5523,16 @@ void Compiler::fgSetRngChkTarget(GenTreePtr tree,
* and label the constants and variables that occur in the tree.
*/
-const int MAX_ARR_COMPLEXITY = 4;
+const int MAX_ARR_COMPLEXITY = 4;
const int MAX_INDEX_COMPLEXITY = 4;
-GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_INDEX);
GenTreeIndex* asIndex = tree->AsIndex();
- var_types elemTyp = tree->TypeGet();
- unsigned elemSize = tree->gtIndex.gtIndElemSize;
+ var_types elemTyp = tree->TypeGet();
+ unsigned elemSize = tree->gtIndex.gtIndElemSize;
CORINFO_CLASS_HANDLE elemStructType = tree->gtIndex.gtStructElemClass;
noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr);
@@ -5480,18 +5556,18 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
}
#endif // FEATURE_SIMD
- GenTreePtr arrRef = asIndex->Arr();
- GenTreePtr index = asIndex->Index();
+ GenTreePtr arrRef = asIndex->Arr();
+ GenTreePtr index = asIndex->Index();
// Set up the the array length's offset into lenOffs
// And the the first element's offset into elemOffs
- ssize_t lenOffs;
- ssize_t elemOffs;
+ ssize_t lenOffs;
+ ssize_t elemOffs;
if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
{
lenOffs = offsetof(CORINFO_String, stringLen);
elemOffs = offsetof(CORINFO_String, chars);
- tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
+ tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
}
else if (tree->gtFlags & GTF_INX_REFARR_LAYOUT)
{
@@ -5504,17 +5580,17 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
elemOffs = offsetof(CORINFO_Array, u1Elems);
}
- bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
- bool nCSE = ((tree->gtFlags & GTF_DONT_CSE ) != 0);
+ bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
+ bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0);
- GenTreePtr arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
- GenTreePtr indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
+ GenTreePtr arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
+ GenTreePtr indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
GenTreePtr bndsChk = nullptr;
// If we're doing range checking, introduce a GT_ARR_BOUNDS_CHECK node for the address.
if (chkd)
{
- GenTreePtr arrRef2 = nullptr; // The second copy will be used in array address expression
+ GenTreePtr arrRef2 = nullptr; // The second copy will be used in array address expression
GenTreePtr index2 = nullptr;
// If the arrRef expression involves an assignment, a call or reads from global memory,
@@ -5523,12 +5599,12 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
// dereference.
// Also we allocate the temporary when the arrRef is sufficiently complex/expensive.
//
- if ((arrRef->gtFlags & (GTF_ASG|GTF_CALL|GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY))
+ if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY))
{
unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
- arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
- arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
- arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
+ arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
+ arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
+ arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
}
else
{
@@ -5542,12 +5618,12 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
// dereference.
// Also we allocate the temporary when the index is sufficiently complex/expensive.
//
- if ((index->gtFlags & (GTF_ASG|GTF_CALL|GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY))
+ if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY))
{
unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
- indexDefn = gtNewTempAssign(indexTmpNum, index);
- index = gtNewLclvNode(indexTmpNum, index->TypeGet());
- index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
+ indexDefn = gtNewTempAssign(indexTmpNum, index);
+ index = gtNewLclvNode(indexTmpNum, index->TypeGet());
+ index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
}
else
{
@@ -5556,11 +5632,11 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
}
// Next introduce a GT_ARR_BOUNDS_CHECK node
- var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
+ var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
#ifdef _TARGET_64BIT_
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case
- // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
+ // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
// the comparison will have to be widen to 64 bits.
if (index->TypeGet() == TYP_I_IMPL)
{
@@ -5575,7 +5651,8 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
arrLen = gtNewCastNode(bndsChkType, arrLen, bndsChkType);
}
- GenTreeBoundsChk* arrBndsChk = new (this, GT_ARR_BOUNDS_CHECK) GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, index, SCK_RNGCHK_FAIL);
+ GenTreeBoundsChk* arrBndsChk = new (this, GT_ARR_BOUNDS_CHECK)
+ GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, index, SCK_RNGCHK_FAIL);
bndsChk = arrBndsChk;
@@ -5613,7 +5690,7 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
#endif // _TARGET_64BIT_
/* Scale the index value if necessary */
- if (elemSize > 1)
+ if (elemSize > 1)
{
GenTreePtr size = gtNewIconNode(elemSize, TYP_I_IMPL);
@@ -5669,18 +5746,20 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
/* An indirection will cause a GPF if the address is null */
tree->gtFlags |= GTF_EXCEPT;
- if (nCSE)
+ if (nCSE)
+ {
tree->gtFlags |= GTF_DONT_CSE;
+ }
// Store information about it.
- GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int) elemOffs, elemStructType));
+ GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType));
// Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it.
GenTreePtr indTree = tree;
// Did we create a bndsChk tree?
- if (bndsChk)
+ if (bndsChk)
{
// Use a GT_COMMA node to prepend the array bound check
//
@@ -5703,27 +5782,27 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree);
}
- // Currently we morph the tree to perform some folding operations prior
+ // Currently we morph the tree to perform some folding operations prior
// to attaching fieldSeq info and labeling constant array index contributions
- //
+ //
fgMorphTree(tree);
- // Ideally we just want to proceed to attaching fieldSeq info and labeling the
- // constant array index contributions, but the morphing operation may have changed
+ // Ideally we just want to proceed to attaching fieldSeq info and labeling the
+ // constant array index contributions, but the morphing operation may have changed
// the 'tree' into something that now unconditionally throws an exception.
//
// In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified
- // or it could be left unchanged. If it is unchanged then we should not return,
+ // or it could be left unchanged. If it is unchanged then we should not return,
// instead we should proceed to attaching fieldSeq info, etc...
- //
+ //
GenTreePtr arrElem = tree->gtEffectiveVal();
if (fgIsCommaThrow(tree))
{
- if ((arrElem != indTree) || // A new tree node may have been created
- (indTree->OperGet() != GT_IND)) // The GT_IND may have been changed to a GT_CNS_INT
+ if ((arrElem != indTree) || // A new tree node may have been created
+ (indTree->OperGet() != GT_IND)) // The GT_IND may have been changed to a GT_CNS_INT
{
- return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc..
+ return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc..
}
}
@@ -5750,13 +5829,13 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
// Label any constant array index contributions with #ConstantIndex and any LclVars with GTF_VAR_ARR_INDEX
index->LabelIndex(this);
- addr = addr->gtOp.gtOp1;
+ addr = addr->gtOp.gtOp1;
}
assert(addr->TypeGet() == TYP_REF);
}
else if (addr->OperGet() == GT_CNS_INT)
{
- cnsOff = addr;
+ cnsOff = addr;
}
FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
@@ -5767,14 +5846,14 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
//
cnsOff->gtIntCon.gtFieldSeq = firstElemFseq;
}
- else // We have folded the first element's offset with the index expression
+ else // We have folded the first element's offset with the index expression
{
// Build the [#ConstantIndex, #FirstElem] field sequence
//
FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq);
- if (cnsOff == nullptr) // It must have folded into a zero offset
+ if (cnsOff == nullptr) // It must have folded into a zero offset
{
// Record in the general zero-offset map.
GetZeroOffsetFieldMap()->Set(addr, fieldSeq);
@@ -5798,23 +5877,21 @@ GenTreePtr Compiler::fgMorphArrayIndex(GenTreePtr tree)
* so we don't need this code.
*
*/
-GenTreePtr Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
+GenTreePtr Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
{
/* For the fixed stack arguments of a varargs function, we need to go
through the varargs cookies to access them, except for the
cookie itself */
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
- if (varDsc->lvIsParam && !varDsc->lvIsRegArg &&
- lclNum != lvaVarargsHandleArg)
+ if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg)
{
// Create a node representing the local pointing to the base of the args
- GenTreePtr ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL,
- gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
- gtNewIconNode(varDsc->lvStkOffs
- - codeGen->intRegState.rsCalleeRegArgCount*sizeof(void*)
- + lclOffs));
+ GenTreePtr ptrArg =
+ gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
+ gtNewIconNode(varDsc->lvStkOffs - codeGen->intRegState.rsCalleeRegArgCount * sizeof(void*) +
+ lclOffs));
// Access the argument through the local
GenTreePtr tree = gtNewOperNode(GT_IND, varType, ptrArg);
@@ -5837,13 +5914,13 @@ GenTreePtr Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_typ
* Transform the given GT_LCL_VAR tree for code generation.
*/
-GenTreePtr Compiler::fgMorphLocalVar(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphLocalVar(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_LCL_VAR);
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
- var_types varType = lvaGetRealType(lclNum);
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ var_types varType = lvaGetRealType(lclNum);
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varDsc->lvAddrExposed)
{
@@ -5862,20 +5939,19 @@ GenTreePtr Compiler::fgMorphLocalVar(GenTreePtr tree)
/* If not during the global morphing phase bail */
if (!fgGlobalMorph)
+ {
return tree;
+ }
bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0;
noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr
- if (!varAddr &&
- varTypeIsSmall(varDsc->TypeGet()) &&
- varDsc->lvNormalizeOnLoad())
+ if (!varAddr && varTypeIsSmall(varDsc->TypeGet()) && varDsc->lvNormalizeOnLoad())
{
#if LOCAL_ASSERTION_PROP
/* Assertion prop can tell us to omit adding a cast here */
- if (optLocalAssertionProp &&
- optAssertionIsSubrange(tree, varType, apFull) != NO_ASSERTION_INDEX)
+ if (optLocalAssertionProp && optAssertionIsSubrange(tree, varType, apFull) != NO_ASSERTION_INDEX)
{
return tree;
}
@@ -5896,22 +5972,23 @@ GenTreePtr Compiler::fgMorphLocalVar(GenTreePtr tree)
return tree;
}
-
/*****************************************************************************
Grab a temp for big offset morphing.
This method will grab a new temp if no temp of this "type" has been created.
Or it will return the same cached one if it has been created.
*/
-unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
+unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
{
unsigned lclNum = fgBigOffsetMorphingTemps[type];
- if (lclNum == BAD_VAR_NUM) {
+ if (lclNum == BAD_VAR_NUM)
+ {
// We haven't created a temp for this kind of type. Create one now.
- lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
+ lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
fgBigOffsetMorphingTemps[type] = lclNum;
}
- else {
+ else
+ {
// We better get the right type.
noway_assert(lvaTable[lclNum].TypeGet() == type);
}
@@ -5920,68 +5997,66 @@ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
return lclNum;
}
-
/*****************************************************************************
*
* Transform the given GT_FIELD tree for code generation.
*/
-GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* mac)
- {
- assert(tree->gtOper == GT_FIELD);
+GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* mac)
+{
+ assert(tree->gtOper == GT_FIELD);
- noway_assert(tree->gtFlags & GTF_GLOB_REF);
+ noway_assert(tree->gtFlags & GTF_GLOB_REF);
- CORINFO_FIELD_HANDLE symHnd = tree->gtField.gtFldHnd;
- unsigned fldOffset = tree->gtField.gtFldOffset;
- GenTreePtr objRef = tree->gtField.gtFldObj;
- bool fieldMayOverlap = false;
- if (tree->gtField.gtFldMayOverlap)
- {
- fieldMayOverlap = true;
- // Reset the flag because we may reuse the node.
- tree->gtField.gtFldMayOverlap = false;
- }
+ CORINFO_FIELD_HANDLE symHnd = tree->gtField.gtFldHnd;
+ unsigned fldOffset = tree->gtField.gtFldOffset;
+ GenTreePtr objRef = tree->gtField.gtFldObj;
+ bool fieldMayOverlap = false;
+ if (tree->gtField.gtFldMayOverlap)
+ {
+ fieldMayOverlap = true;
+ // Reset the flag because we may reuse the node.
+ tree->gtField.gtFldMayOverlap = false;
+ }
#ifdef FEATURE_SIMD
- // if this field belongs to simd struct, tranlate it to simd instrinsic.
- if (mac == nullptr || mac->m_kind != MACK_Addr)
- {
- GenTreePtr newTree = fgMorphFieldToSIMDIntrinsicGet(tree);
- if (newTree != tree)
- {
- newTree = fgMorphSmpOp(newTree);
- return newTree;
- }
- }
- else if (objRef != nullptr && objRef->OperGet() == GT_ADDR && objRef->OperIsSIMD())
- {
- // We have a field of an SIMD intrinsic in an address-taken context.
- // We need to copy the SIMD result to a temp, and take the field of that.
- GenTree* copy = fgCopySIMDNode(objRef->gtOp.gtOp1->AsSIMD());
- objRef->gtOp.gtOp1 = copy;
- }
+ // if this field belongs to simd struct, tranlate it to simd instrinsic.
+ if (mac == nullptr || mac->m_kind != MACK_Addr)
+ {
+ GenTreePtr newTree = fgMorphFieldToSIMDIntrinsicGet(tree);
+ if (newTree != tree)
+ {
+ newTree = fgMorphSmpOp(newTree);
+ return newTree;
+ }
+ }
+ else if (objRef != nullptr && objRef->OperGet() == GT_ADDR && objRef->OperIsSIMD())
+ {
+ // We have a field of an SIMD intrinsic in an address-taken context.
+ // We need to copy the SIMD result to a temp, and take the field of that.
+ GenTree* copy = fgCopySIMDNode(objRef->gtOp.gtOp1->AsSIMD());
+ objRef->gtOp.gtOp1 = copy;
+ }
#endif
-
- /* Is this an instance data member? */
- if (objRef)
- {
- GenTreePtr addr;
+ /* Is this an instance data member? */
+
+ if (objRef)
+ {
+ GenTreePtr addr;
if (tree->gtFlags & GTF_IND_TLS_REF)
+ {
NO_WAY("instance field can not be a TLS ref.");
+ }
/* We'll create the expression "*(objRef + mem_offs)" */
- noway_assert(varTypeIsGC(objRef->TypeGet()) ||
- objRef->TypeGet() == TYP_I_IMPL);
+ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL);
// An optimization for Contextful classes:
// we unwrap the proxy when we have a 'this reference'
- if (info.compIsContextful &&
- info.compUnwrapContextful &&
- impIsThis(objRef))
+ if (info.compIsContextful && info.compUnwrapContextful && impIsThis(objRef))
{
objRef = fgUnwrapProxy(objRef);
}
@@ -6054,16 +6129,19 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
*/
- var_types objRefType = objRef->TypeGet();
+ var_types objRefType = objRef->TypeGet();
- GenTreePtr comma = NULL;
+ GenTreePtr comma = nullptr;
bool addedExplicitNullCheck = false;
// NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field,
// and thus is equivalent to a MACK_Ind with zero offset.
MorphAddrContext defMAC(MACK_Ind);
- if (mac == NULL) mac = &defMAC;
+ if (mac == nullptr)
+ {
+ mac = &defMAC;
+ }
// This flag is set to enable the "conservative" style of explicit null-check insertion.
// This means that we insert an explicit null check whenever we create byref by adding a
@@ -6083,16 +6161,15 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression
// whose address is being taken is either a local or static variable, whose address is necessarily
// non-null, or else it is a field dereference, which will do its own bounds checking if necessary.
- if (objRef->gtOper != GT_ADDR
- && ((mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)
- && (!mac->m_allConstantOffsets
- || fgIsBigOffset(mac->m_totalOffset + fldOffset)
+ if (objRef->gtOper != GT_ADDR && ((mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind) &&
+ (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)
#if CONSERVATIVE_NULL_CHECK_BYREF_CREATION
- || (mac->m_kind == MACK_Addr && (mac->m_totalOffset + fldOffset > 0))
+ || (mac->m_kind == MACK_Addr && (mac->m_totalOffset + fldOffset > 0))
#else
- || (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && (mac->m_totalOffset + fldOffset > 0))
+ || (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr &&
+ (mac->m_totalOffset + fldOffset > 0))
#endif
- )))
+ )))
{
#ifdef DEBUG
if (verbose)
@@ -6105,8 +6182,8 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
//
// Create the "comma" subtree
//
- GenTreePtr asg = NULL;
- GenTreePtr nullchk;
+ GenTreePtr asg = nullptr;
+ GenTreePtr nullchk;
unsigned lclNum;
@@ -6115,7 +6192,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet()));
// Create the "asg" node
- asg = gtNewTempAssign(lclNum, objRef);
+ asg = gtNewTempAssign(lclNum, objRef);
}
else
{
@@ -6125,9 +6202,9 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// Create the "nullchk" node.
// Make it TYP_BYTE so we only deference it for 1 byte.
GenTreePtr lclVar = gtNewLclvNode(lclNum, objRefType);
- nullchk = new(this, GT_NULLCHECK) GenTreeIndir(GT_NULLCHECK, TYP_BYTE, lclVar, nullptr);
-
- nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
+ nullchk = new (this, GT_NULLCHECK) GenTreeIndir(GT_NULLCHECK, TYP_BYTE, lclVar, nullptr);
+
+ nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
// An indirection will cause a GPF if the address is null.
nullchk->gtFlags |= GTF_EXCEPT;
@@ -6138,27 +6215,27 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
if (asg)
{
// Create the "comma" node.
- comma = gtNewOperNode(GT_COMMA,
- TYP_VOID, // We don't want to return anything from this "comma" node.
- // Set the type to TYP_VOID, so we can select "cmp" instruction
- // instead of "mov" instruction later on.
- asg,
- nullchk);
+ comma = gtNewOperNode(GT_COMMA,
+ TYP_VOID, // We don't want to return anything from this "comma" node.
+ // Set the type to TYP_VOID, so we can select "cmp" instruction
+ // instead of "mov" instruction later on.
+ asg, nullchk);
}
else
{
comma = nullchk;
}
- addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
+ addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
addedExplicitNullCheck = true;
}
- else if (fldOffset == 0)
+ else if (fldOffset == 0)
{
// Generate the "addr" node.
addr = objRef;
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
GetZeroOffsetFieldMap()->Set(addr, fieldSeq);
}
else
@@ -6174,26 +6251,18 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
if (tree->gtField.gtFieldLookup.accessType == IAT_PVALUE)
baseOffset = gtNewOperNode(GT_IND, TYP_I_IMPL, baseOffset);
- addr = gtNewOperNode(GT_ADD,
- (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL
- : TYP_BYREF),
- addr,
- baseOffset
- );
+ addr =
+ gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, baseOffset);
}
#endif
- if (fldOffset != 0)
+ if (fldOffset != 0)
{
// Generate the "addr" node.
/* Add the member offset to the object's address */
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
- addr = gtNewOperNode(GT_ADD,
- (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL
- : TYP_BYREF),
- addr,
- gtNewIconHandleNode(fldOffset,
- GTF_ICON_FIELD_OFF,
- fieldSeq));
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr,
+ gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
@@ -6215,23 +6284,22 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
GenTreePtr comma2;
comma2 = gtNewOperNode(GT_COMMA,
addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
- comma,
- addr);
+ comma, addr);
tree->gtOp.gtOp1 = comma2;
}
#ifdef DEBUG
if (verbose)
{
- if (addedExplicitNullCheck) {
+ if (addedExplicitNullCheck)
+ {
printf("After adding explicit null check:\n");
gtDispTree(tree);
}
}
#endif
-
}
- else /* This is a static data member */
+ else /* This is a static data member */
{
if (tree->gtFlags & GTF_IND_TLS_REF)
{
@@ -6261,19 +6329,21 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
//
// # Denotes the orginal node
//
- void ** pIdAddr = NULL;
- unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**) &pIdAddr);
+ void** pIdAddr = nullptr;
+ unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr);
//
// If we can we access the TLS DLL index ID value directly
// then pIdAddr will be NULL and
// IdValue will be the actual TLS DLL index ID
//
- GenTreePtr dllRef = NULL;
- if (pIdAddr == NULL)
+ GenTreePtr dllRef = nullptr;
+ if (pIdAddr == nullptr)
{
if (IdValue != 0)
- dllRef = gtNewIconNode(IdValue*4, TYP_I_IMPL);
+ {
+ dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL);
+ }
}
else
{
@@ -6286,7 +6356,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL));
}
- #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
+#define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
// Mark this ICON as a TLS_HDL, codegen will use FS:[cns]
@@ -6294,7 +6364,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
- if (dllRef != NULL)
+ if (dllRef != nullptr)
{
/* Add the dllRef */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef);
@@ -6305,8 +6375,9 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
if (fldOffset != 0)
{
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
- GenTreePtr fldOffsetNode = new(this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ GenTreePtr fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
/* Add the TLS static field offset to the address */
@@ -6329,10 +6400,10 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// then pFldAddr will be NULL and
// fldAddr will be the actual address of the static field
//
- void ** pFldAddr = NULL;
- void * fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**) &pFldAddr);
+ void** pFldAddr = nullptr;
+ void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr);
- if (pFldAddr == NULL)
+ if (pFldAddr == nullptr)
{
#ifdef _TARGET_64BIT_
if (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr))
@@ -6341,12 +6412,13 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// constant, so we handle it properly
GenTreePtr addr = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL);
- addr->gtType = TYP_I_IMPL;
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ addr->gtType = TYP_I_IMPL;
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
addr->gtIntCon.gtFieldSeq = fieldSeq;
tree->SetOper(GT_IND);
- tree->gtOp.gtOp1 = addr;
+ tree->gtOp.gtOp1 = addr;
return fgMorphSmpOp(tree);
}
@@ -6358,7 +6430,8 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
noway_assert(GTF_FLD_VOLATILE == GTF_IND_VOLATILE);
tree->SetOper(GT_CLS_VAR);
tree->gtClsVar.gtClsVarHnd = symHnd;
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
tree->gtClsVar.gtFieldSeq = fieldSeq;
}
@@ -6366,7 +6439,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
}
else
{
- GenTreePtr addr = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL);
+ GenTreePtr addr = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL);
// There are two cases here, either the static is RVA based,
// in which case the type of the FIELD node is not a GC type
@@ -6374,10 +6447,9 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// a GC type and the handle to it is a TYP_BYREF in the GC heap
// because handles to statics now go into the large object heap
- var_types handleTyp = (var_types) (varTypeIsGC(tree->TypeGet()) ? TYP_BYREF
- : TYP_I_IMPL);
+ var_types handleTyp = (var_types)(varTypeIsGC(tree->TypeGet()) ? TYP_BYREF : TYP_I_IMPL);
GenTreePtr op1 = gtNewOperNode(GT_IND, handleTyp, addr);
- op1->gtFlags |= GTF_IND_INVARIANT;
+ op1->gtFlags |= GTF_IND_INVARIANT;
tree->SetOper(GT_IND);
tree->gtOp.gtOp1 = op1;
@@ -6392,15 +6464,14 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
{
GenTreePtr addr = res->gtOp.gtOp1;
// Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
- FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
+ FieldSeqNode* fieldSeq =
+ fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd);
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
return res;
-
}
-
//------------------------------------------------------------------------------
// fgMorphCallInline: attempt to inline a call
//
@@ -6418,7 +6489,7 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
// possible inline are undone, and the candidate flag on the call
// is cleared.
-void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
+void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
{
// The call must be a candiate for inlining.
assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0);
@@ -6470,7 +6541,7 @@ void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result)
{
// Don't expect any surprises here.
assert(result->IsCandidate());
-
+
if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING)
{
// For now, attributing this to call site, though it's really
@@ -6503,22 +6574,22 @@ void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result)
result->NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
return;
}
-
+
//
// Calling inlinee's compiler to inline the method.
//
- unsigned startVars = lvaCount;
+ unsigned startVars = lvaCount;
#ifdef DEBUG
if (verbose)
{
- printf("Expanding INLINE_CANDIDATE in statement ");
- printTreeID(fgMorphStmt);
- printf(" in BB%02u:\n", compCurBB->bbNum);
- gtDispTree(fgMorphStmt);
-
- // printf("startVars=%d.\n", startVars);
+ printf("Expanding INLINE_CANDIDATE in statement ");
+ printTreeID(fgMorphStmt);
+ printf(" in BB%02u:\n", compCurBB->bbNum);
+ gtDispTree(fgMorphStmt);
+
+ // printf("startVars=%d.\n", startVars);
}
#endif
@@ -6527,36 +6598,36 @@ void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result)
//
// Invoke the compiler to inline the call.
//
-
+
fgInvokeInlineeCompiler(call, result);
- if (result->IsFailure())
+ if (result->IsFailure())
{
- // Undo some changes made in anticipation of inlining...
+ // Undo some changes made in anticipation of inlining...
- // Zero out the used locals
- memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
- for (unsigned i = startVars; i < lvaCount; i++)
- {
- new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(this); // call the constructor.
- }
+ // Zero out the used locals
+ memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
+ for (unsigned i = startVars; i < lvaCount; i++)
+ {
+ new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(this); // call the constructor.
+ }
- lvaCount = startVars;
+ lvaCount = startVars;
#ifdef DEBUG
- if (verbose)
- {
- // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
- }
+ if (verbose)
+ {
+ // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
+ }
#endif
- return;
+ return;
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- // printf("After inlining lvaCount=%d.\n", lvaCount);
+ // printf("After inlining lvaCount=%d.\n", lvaCount);
}
#endif
}
@@ -6565,7 +6636,7 @@ void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result)
*
* Performs checks to see if this tail call can be optimized as epilog+jmp.
*/
-bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
+bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
{
#if FEATURE_FASTTAILCALL
// Reached here means that return types of caller and callee are tail call compatible.
@@ -6581,17 +6652,17 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
- assert(impTailCallRetTypeCompatible(info.compRetNativeType, info.compMethodInfo->args.retTypeClass,
+ assert(impTailCallRetTypeCompatible(info.compRetNativeType, info.compMethodInfo->args.retTypeClass,
(var_types)callee->gtReturnType, callee->callSig->retTypeClass));
}
#endif
// Note on vararg methods:
// If the caller is vararg method, we don't know the number of arguments passed by caller's caller.
- // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
+ // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
// fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as
// out-going area required for callee is bounded by caller's fixed argument space.
- //
+ //
// Note that callee being a vararg method is not a problem since we can account the params being passed.
// Count of caller args including implicit and hidden (i.e. thisPtr, RetBuf, GenericContext, VarargCookie)
@@ -6600,13 +6671,13 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
// Count the callee args including implicit and hidden.
// Note that GenericContext and VarargCookie are added by importer while
// importing the call to gtCallArgs list along with explicit user args.
- unsigned nCalleeArgs = 0;
- if (callee->gtCallObjp) // thisPtr
+ unsigned nCalleeArgs = 0;
+ if (callee->gtCallObjp) // thisPtr
{
nCalleeArgs++;
}
- if (callee->HasRetBufArg()) // RetBuf
+ if (callee->HasRetBufArg()) // RetBuf
{
nCalleeArgs++;
@@ -6647,7 +6718,7 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
unsigned typeSize = 0;
- hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), objClass, &typeSize, false);
+ hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), objClass, &typeSize, false);
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)
// On System V/arm64 the args could be a 2 eightbyte struct that is passed in two registers.
@@ -6669,7 +6740,6 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
assert(!"Target platform ABI rules regarding passing struct type args in registers");
unreached();
#endif //_TARGET_AMD64_ || _TARGET_ARM64_
-
}
else
{
@@ -6702,12 +6772,11 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
#endif
}
-
/*****************************************************************************
*
* Transform the given GT_CALL tree for tail call code generation.
*/
-void Compiler::fgMorphTailCall(GenTreeCall* call)
+void Compiler::fgMorphTailCall(GenTreeCall* call)
{
JITDUMP("fgMorphTailCall (before):\n");
DISPTREE(call);
@@ -6718,42 +6787,39 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
- assert(call->IsVirtual() ||
- (call->gtCallType != CT_INDIRECT) ||
- (call->gtCallCookie == NULL));
+ assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == NULL));
// First move the this pointer (if any) onto the regular arg list
GenTreePtr thisPtr = NULL;
if (call->gtCallObjp)
{
- GenTreePtr objp = call->gtCallObjp;
+ GenTreePtr objp = call->gtCallObjp;
call->gtCallObjp = NULL;
- if ((call->gtFlags & GTF_CALL_NULLCHECK) ||
- call->IsVirtualVtable())
+ if ((call->gtFlags & GTF_CALL_NULLCHECK) || call->IsVirtualVtable())
{
- thisPtr = gtClone(objp, true);
- var_types vt = objp->TypeGet();
+ thisPtr = gtClone(objp, true);
+ var_types vt = objp->TypeGet();
if (thisPtr == NULL)
{
// Too complex, so use a temp
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
- GenTreePtr asg = gtNewTempAssign(lclNum, objp);
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
+ GenTreePtr asg = gtNewTempAssign(lclNum, objp);
if (!call->IsVirtualVtable())
{
// Add an indirection to get the nullcheck
GenTreePtr tmp = gtNewLclvNode(lclNum, vt);
GenTreePtr ind = gtNewOperNode(GT_IND, TYP_INT, tmp);
- asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, ind);
+ asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, ind);
}
- objp = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
+ objp = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
thisPtr = gtNewLclvNode(lclNum, vt);
}
else if (!call->IsVirtualVtable())
{
GenTreePtr ind = gtNewOperNode(GT_IND, TYP_INT, thisPtr);
- objp = gtNewOperNode(GT_COMMA, vt, ind, objp);
- thisPtr = gtClone(thisPtr, true);
+ objp = gtNewOperNode(GT_COMMA, vt, ind, objp);
+ thisPtr = gtClone(thisPtr, true);
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
@@ -6769,19 +6835,21 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
flags = CORINFO_TAILCALL_STUB_DISPATCH_ARG;
GenTreePtr arg;
- if (call->gtCallType == CT_INDIRECT) {
+ if (call->gtCallType == CT_INDIRECT)
+ {
arg = gtClone(call->gtCallAddr, true);
noway_assert(arg != NULL);
}
- else {
+ else
+ {
noway_assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
- arg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
+ arg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
// Change the call type, so we can add the extra indirection here, rather than in codegen
- call->gtCallAddr = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
+ call->gtCallAddr = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
call->gtStubCallStubAddr = NULL;
- call->gtCallType = CT_INDIRECT;
+ call->gtCallType = CT_INDIRECT;
}
// Add the extra indirection to generate the real target
call->gtCallAddr = gtNewOperNode(GT_IND, TYP_I_IMPL, call->gtCallAddr);
@@ -6800,8 +6868,8 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
GenTreePtr vtbl = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
vtbl->gtFlags |= GTF_EXCEPT;
- unsigned vtabOffsOfIndirection;
- unsigned vtabOffsAfterIndirection;
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection);
/* Get the appropriate vtable chunk */
@@ -6819,7 +6887,7 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
assert(!call->IsVirtual());
call->gtCallType = CT_INDIRECT;
- call->gtCallAddr = vtbl;
+ call->gtCallAddr = vtbl;
call->gtCallCookie = NULL;
call->gtFlags |= GTF_EXCEPT;
}
@@ -6832,9 +6900,9 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
// Lastly inject the pointer for the copy routine
noway_assert(call->callSig != NULL);
- void * pfnCopyArgs = info.compCompHnd->getTailCallCopyArgsThunk(call->callSig, flags);
- arg = gtNewIconHandleNode(ssize_t(pfnCopyArgs), GTF_ICON_FTN_ADDR);
- call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
+ void* pfnCopyArgs = info.compCompHnd->getTailCallCopyArgsThunk(call->callSig, flags);
+ arg = gtNewIconHandleNode(ssize_t(pfnCopyArgs), GTF_ICON_FTN_ADDR);
+ call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
// It is now a varargs tail call
call->gtCallMoreFlags = GTF_CALL_M_VARARGS | GTF_CALL_M_TAILCALL;
@@ -6857,7 +6925,8 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
//
// For x86, the tailcall helper is defined as:
//
- // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* callTarget)
+ // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
+ // callTarget)
//
// Note that the special arguments are on the stack, whereas the function arguments follow
// the normal convention: there might be register arguments in ECX and EDX. The stack will
@@ -6884,10 +6953,8 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
- assert(call->IsVirtual() ||
- (call->gtCallType != CT_INDIRECT) ||
- (call->gtCallCookie == NULL));
-
+ assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr));
+
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
@@ -6923,20 +6990,20 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
if (call->gtCallObjp)
{
GenTreePtr thisPtr = nullptr;
- GenTreePtr objp = call->gtCallObjp;
- call->gtCallObjp = nullptr;
+ GenTreePtr objp = call->gtCallObjp;
+ call->gtCallObjp = nullptr;
#ifdef _TARGET_X86_
if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->IsLocal())
{
- // tmp = "this"
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
- GenTreePtr asg = gtNewTempAssign(lclNum, objp);
-
+ // tmp = "this"
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
+ GenTreePtr asg = gtNewTempAssign(lclNum, objp);
+
// COMMA(tmp = "this", tmp)
- var_types vt = objp->TypeGet();
+ var_types vt = objp->TypeGet();
GenTreePtr tmp = gtNewLclvNode(lclNum, vt);
- thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
+ thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
objp = thisPtr;
}
@@ -6946,32 +7013,32 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
{
// clone "this" if "this" has no side effects.
if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT))
- {
+ {
thisPtr = gtClone(objp, true);
}
-
+
var_types vt = objp->TypeGet();
if (thisPtr == nullptr)
{
// create a temp if either "this" has side effects or "this" is too complex to clone.
-
- // tmp = "this"
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
- GenTreePtr asg = gtNewTempAssign(lclNum, objp);
-
+
+ // tmp = "this"
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
+ GenTreePtr asg = gtNewTempAssign(lclNum, objp);
+
// COMMA(tmp = "this", deref(tmp))
GenTreePtr tmp = gtNewLclvNode(lclNum, vt);
GenTreePtr ind = gtNewOperNode(GT_IND, TYP_INT, tmp);
- asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, ind);
+ asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, ind);
- // COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
+ // COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
- }
+ }
else
{
// thisPtr = COMMA(deref("this"), "this")
GenTreePtr ind = gtNewOperNode(GT_IND, TYP_INT, thisPtr);
- thisPtr = gtNewOperNode(GT_COMMA, vt, ind, gtClone(objp, true));
+ thisPtr = gtNewOperNode(GT_COMMA, vt, ind, gtClone(objp, true));
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
@@ -7010,7 +7077,7 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
noway_assert((call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
- stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
+ stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
}
// Push the stub address onto the list of arguments
@@ -7018,14 +7085,14 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
}
// Now inject a placeholder for the real call target that Lower phase will generate.
- GenTreePtr arg = gtNewIconNode(0, TYP_I_IMPL);
+ GenTreePtr arg = gtNewIconNode(0, TYP_I_IMPL);
call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
// Inject the pointer for the copy routine to be used for struct copying
noway_assert(call->callSig != nullptr);
- void * pfnCopyArgs = info.compCompHnd->getTailCallCopyArgsThunk(call->callSig, flags);
- arg = gtNewIconHandleNode(ssize_t(pfnCopyArgs), GTF_ICON_FTN_ADDR);
- call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
+ void* pfnCopyArgs = info.compCompHnd->getTailCallCopyArgsThunk(call->callSig, flags);
+ arg = gtNewIconHandleNode(ssize_t(pfnCopyArgs), GTF_ICON_FTN_ADDR);
+ call->gtCallArgs = gtNewListNode(arg, call->gtCallArgs);
#else // !_TARGET_AMD64_
@@ -7039,27 +7106,28 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
assert(ppArg != nullptr);
assert(*ppArg == nullptr);
- unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
+ unsigned nOldStkArgsWords =
+ (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL);
- *ppArg = gtNewListNode(arg3, nullptr); // numberOfOldStackArgs
- ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
+ *ppArg = gtNewListNode(arg3, nullptr); // numberOfOldStackArgs
+ ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
// Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL);
- *ppArg = gtNewListNode(arg2, nullptr); // numberOfNewStackArgs
- ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
+ *ppArg = gtNewListNode(arg2, nullptr); // numberOfNewStackArgs
+ ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
// Inject a placeholder for the flags.
// The constant will be replaced.
GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL);
- *ppArg = gtNewListNode(arg1, nullptr);
- ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
+ *ppArg = gtNewListNode(arg1, nullptr);
+ ppArg = (GenTreeArgList**)&((*ppArg)->gtOp2);
// Inject a placeholder for the real call target that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL);
- *ppArg = gtNewListNode(arg0, nullptr);
+ *ppArg = gtNewListNode(arg0, nullptr);
#endif // !_TARGET_AMD64_
@@ -7093,7 +7161,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Transform recursive tail call into a loop.
GenTreePtr earlyArgInsertionPoint = last;
- IL_OFFSETX callILOffset = last->gtStmt.gtStmtILoffsx;
+ IL_OFFSETX callILOffset = last->gtStmt.gtStmtILoffsx;
// Hoist arg setup statement for the 'this' argument.
GenTreePtr thisArg = recursiveTailCall->gtCallObjp;
@@ -7101,7 +7169,6 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
{
GenTreePtr thisArgStmt = gtNewStmt(thisArg, callILOffset);
fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt);
-
}
// All arguments whose trees may involve caller parameter local variables need to be assigned to temps first;
@@ -7143,15 +7210,14 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// [000057] - A---------- \--* = int
// [000056] D------N---- \--* lclVar int V01 arg1
- GenTreePtr tmpAssignmentInsertionPoint = last;
+ GenTreePtr tmpAssignmentInsertionPoint = last;
GenTreePtr paramAssignmentInsertionPoint = last;
// Process early args. They may contain both setup statements for late args and actual args.
// Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum
// below has the correct second argument.
int earlyArgIndex = (thisArg == nullptr) ? 0 : 1;
- for (GenTreeArgList* earlyArgs = recursiveTailCall->gtCallArgs;
- earlyArgs != nullptr;
+ for (GenTreeArgList* earlyArgs = recursiveTailCall->gtCallArgs; earlyArgs != nullptr;
(earlyArgIndex++, earlyArgs = earlyArgs->Rest()))
{
GenTreePtr earlyArg = earlyArgs->Current();
@@ -7167,8 +7233,9 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
{
// This is an actual argument that needs to be assigned to the corresponding caller parameter.
fgArgTabEntryPtr curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex);
- GenTreePtr paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, block, callILOffset,
- tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint);
+ GenTreePtr paramAssignStmt =
+ fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, block, callILOffset,
+ tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == last) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
@@ -7180,15 +7247,15 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Process late args.
int lateArgIndex = 0;
- for (GenTreeArgList* lateArgs = recursiveTailCall->gtCallLateArgs;
- lateArgs != nullptr;
+ for (GenTreeArgList* lateArgs = recursiveTailCall->gtCallLateArgs; lateArgs != nullptr;
(lateArgIndex++, lateArgs = lateArgs->Rest()))
{
// A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter.
- GenTreePtr lateArg = lateArgs->Current();
+ GenTreePtr lateArg = lateArgs->Current();
fgArgTabEntryPtr curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex);
- GenTreePtr paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, block, callILOffset,
- tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint);
+ GenTreePtr paramAssignStmt =
+ fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, block, callILOffset,
+ tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == last) && (paramAssignStmt != nullptr))
{
@@ -7202,9 +7269,9 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here.
if (!info.compIsStatic && (lvaArg0Var != info.compThisArg))
{
- var_types thisType = lvaTable[info.compThisArg].TypeGet();
- GenTreePtr arg0 = gtNewLclvNode(lvaArg0Var, thisType);
- GenTreePtr arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
+ var_types thisType = lvaTable[info.compThisArg].TypeGet();
+ GenTreePtr arg0 = gtNewLclvNode(lvaArg0Var, thisType);
+ GenTreePtr arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
GenTreePtr arg0AssignmentStmt = gtNewStmt(arg0Assignment, callILOffset);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt);
}
@@ -7234,19 +7301,19 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Return Value:
// parameter assignment statement if one was inserted; nullptr otherwise.
-GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
+GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
fgArgTabEntryPtr argTabEntry,
- BasicBlock *block,
- IL_OFFSETX callILOffset,
- GenTreePtr tmpAssignmentInsertionPoint,
- GenTreePtr paramAssignmentInsertionPoint)
+ BasicBlock* block,
+ IL_OFFSETX callILOffset,
+ GenTreePtr tmpAssignmentInsertionPoint,
+ GenTreePtr paramAssignmentInsertionPoint)
{
// Call arguments should be assigned to temps first and then the temps should be assigned to parameters because
// some argument trees may reference parameters directly.
- GenTreePtr argInTemp = nullptr;
- unsigned originalArgNum = argTabEntry->argNum;
- bool needToAssignParameter = true;
+ GenTreePtr argInTemp = nullptr;
+ unsigned originalArgNum = argTabEntry->argNum;
+ bool needToAssignParameter = true;
// TODO-CQ: enable calls with struct arguments passed in registers.
noway_assert(!varTypeIsStruct(arg->TypeGet()));
@@ -7258,8 +7325,8 @@ GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
}
else if (arg->OperGet() == GT_LCL_VAR)
{
- unsigned lclNum = arg->AsLclVar()->gtLclNum;
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ unsigned lclNum = arg->AsLclVar()->gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (!varDsc->lvIsParam)
{
// The argument is a non-parameter local so it doesn't need to be assigned to a temp.
@@ -7284,10 +7351,10 @@ GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
{
// The argument is not assigned to a temp. We need to create a new temp and insert an assignment.
// TODO: we can avoid a temp assignment if we can prove that the argument tree
- // doesn't involve any caller parameters.
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
- GenTreePtr tempSrc = arg;
- GenTreePtr tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
+ // doesn't involve any caller parameters.
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
+ GenTreePtr tempSrc = arg;
+ GenTreePtr tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
GenTreePtr tmpAssignNode = gtNewAssignNode(tempDest, tempSrc);
GenTreePtr tmpAssignStmt = gtNewStmt(tmpAssignNode, callILOffset);
fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt);
@@ -7295,11 +7362,11 @@ GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
}
// Now assign the temp to the parameter.
- LclVarDsc *paramDsc = lvaTable + originalArgNum;
+ LclVarDsc* paramDsc = lvaTable + originalArgNum;
assert(paramDsc->lvIsParam);
- GenTreePtr paramDest = gtNewLclvNode(originalArgNum, paramDsc->lvType);
+ GenTreePtr paramDest = gtNewLclvNode(originalArgNum, paramDsc->lvType);
GenTreePtr paramAssignNode = gtNewAssignNode(paramDest, argInTemp);
- paramAssignStmt = gtNewStmt(paramAssignNode, callILOffset);
+ paramAssignStmt = gtNewStmt(paramAssignNode, callILOffset);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt);
}
@@ -7311,7 +7378,7 @@ GenTreePtr Compiler::fgAssignRecursiveCallArgToCallerParam(GenTreePtr arg,
* Transform the given GT_CALL tree for code generation.
*/
-GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
+GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
{
if (call->CanTailCall())
{
@@ -7321,8 +7388,8 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// It cannot be an inline candidate
assert(!call->IsInlineCandidate());
- const char * szFailReason = nullptr;
- bool hasStructParam = false;
+ const char* szFailReason = nullptr;
+ bool hasStructParam = false;
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
szFailReason = "Might turn into an intrinsic";
@@ -7337,7 +7404,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
szFailReason = "Localloc used";
}
#ifdef _TARGET_AMD64_
- // Needed for Jit64 compat.
+ // Needed for Jit64 compat.
// In future, enabling tail calls from methods that need GS cookie check
// would require codegen side work to emit GS cookie check before a tail
// call.
@@ -7362,7 +7429,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// has been leaked and the current stack frame must live until after the final
// call.
- // Verify that none of vars has lvHasLdAddrOp or lvAddrExposed bit set. Note
+ // Verify that none of vars has lvHasLdAddrOp or lvAddrExposed bit set. Note
// that lvHasLdAddrOp is much more conservative. We cannot just base it on
// lvAddrExposed alone since it is not guaranteed to be set on all VarDscs
// during morph stage. The reason for also checking lvAddrExposed is that in case
@@ -7372,14 +7439,14 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
//
// TODO-Throughput: have a compiler level flag to indicate whether method has vars whose
// address is taken. Such a flag could be set whenever lvHasLdAddrOp or LvAddrExposed
- // is set. This avoids the need for iterating through all lcl vars of the current
- // method. Right now throughout the code base we are not consistently using 'set'
+ // is set. This avoids the need for iterating through all lcl vars of the current
+ // method. Right now throughout the code base we are not consistently using 'set'
// method to set lvHasLdAddrOp and lvAddrExposed flags.
- unsigned varNum;
- LclVarDsc *varDsc;
- bool hasAddrExposedVars = false;
- bool hasStructPromotedParam = false;
- bool hasPinnedVars = false;
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ bool hasAddrExposedVars = false;
+ bool hasStructPromotedParam = false;
+ bool hasPinnedVars = false;
for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
@@ -7387,7 +7454,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// following three hazard checks.
// We still must check for any struct parameters and set 'hasStructParam'
// so that we won't transform the recursive tail call into a loop.
- //
+ //
if (call->IsImplicitTailCall())
{
if (varDsc->lvHasLdAddrOp || varDsc->lvAddrExposed)
@@ -7438,18 +7505,17 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
fgFixupStructReturn(call);
}
- var_types callType = call->TypeGet();
+ var_types callType = call->TypeGet();
// We have to ensure to pass the incoming retValBuf as the
// outgoing one. Using a temp will not do as this function will
- // not regain control to do the copy.
+ // not regain control to do the copy.
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(callType == TYP_VOID);
GenTreePtr retValBuf = call->gtCallArgs->gtOp.gtOp1;
- if (retValBuf->gtOper != GT_LCL_VAR ||
- retValBuf->gtLclVarCommon.gtLclNum != info.compRetBuffArg)
+ if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->gtLclVarCommon.gtLclNum != info.compRetBuffArg)
{
szFailReason = "Need to copy return buffer";
}
@@ -7486,11 +7552,12 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// ignored.
//
// Exception to the above rule: although Virtual Stub Dispatch (VSD) calls require
- // extra stub param (e.g. in R11 on Amd64), they can still be called via tail call helper.
- // This is done by by adding stubAddr as an additional arg before the original list of
+ // extra stub param (e.g. in R11 on Amd64), they can still be called via tail call helper.
+ // This is done by by adding stubAddr as an additional arg before the original list of
// args. For more details see fgMorphTailCall() and CreateTailCallCopyArgsThunk()
// in Stublinkerx86.cpp.
- szFailReason = "Method with non-standard args passed in callee trash register cannot be tail called via helper";
+ szFailReason = "Method with non-standard args passed in callee trash register cannot be tail "
+ "called via helper";
}
#ifdef _TARGET_ARM64_
else
@@ -7500,7 +7567,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
szFailReason = "Non-qualified fast tail call";
}
#endif
-#endif //LEGACY_BACKEND
+#endif // LEGACY_BACKEND
}
}
@@ -7511,7 +7578,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
-
+
#ifdef FEATURE_PAL
if (!canFastTailCall && szFailReason == nullptr)
{
@@ -7522,7 +7589,8 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
if (szFailReason != nullptr)
{
#ifdef DEBUG
- if (verbose) {
+ if (verbose)
+ {
printf("\nRejecting tail call late for call ");
printTreeID(call);
printf(": %s\n", szFailReason);
@@ -7531,8 +7599,8 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
- (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
- isTailPrefixed, TAILCALL_FAIL, szFailReason);
+ (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
+ isTailPrefixed, TAILCALL_FAIL, szFailReason);
goto NO_TAIL_CALL;
}
@@ -7546,8 +7614,9 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// Many tailcalls will have call and ret in the same block, and thus be BBJ_RETURN,
// but if the call falls through to a ret, and we are doing a tailcall, change it here.
if (compCurBB->bbJumpKind != BBJ_RETURN)
+ {
compCurBB->bbJumpKind = BBJ_RETURN;
-
+ }
}
// Set this flag before calling fgMorphCall() to prevent inlining this call.
@@ -7564,14 +7633,8 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming
// a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the
// generic type parameters of both caller and callee generic method are the same.
- if (opts.compTailCallLoopOpt &&
- canFastTailCall &&
- gtIsRecursiveCall(call) &&
- !lvaReportParamTypeArg() &&
- !lvaKeepAliveAndReportThis() &&
- !call->IsVirtual() &&
- !hasStructParam &&
- !varTypeIsStruct(call->TypeGet()))
+ if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() &&
+ !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet()))
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP;
fastTailCallToLoop = true;
@@ -7613,9 +7676,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
- isTailPrefixed,
- tailCallResult,
- nullptr);
+ isTailPrefixed, tailCallResult, nullptr);
// As we will actually call CORINFO_HELP_TAILCALL, set the callTyp to TYP_VOID.
// to avoid doing any extra work for the return value.
@@ -7636,17 +7697,17 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
}
#endif
-
- GenTreePtr stmtExpr = fgMorphStmt->gtStmt.gtStmtExpr;
-
+ GenTreePtr stmtExpr = fgMorphStmt->gtStmt.gtStmtExpr;
+
#ifdef DEBUG
// Tail call needs to be in one of the following IR forms
// Either a call stmt or
- // GT_RETURN(GT_CALL(..)) or
+ // GT_RETURN(GT_CALL(..)) or
// var = call
- noway_assert((stmtExpr->gtOper == GT_CALL && stmtExpr == call) ||
- (stmtExpr->gtOper == GT_RETURN && (stmtExpr->gtOp.gtOp1 == call || stmtExpr->gtOp.gtOp1->gtOp.gtOp1 == call)) ||
- (stmtExpr->gtOper == GT_ASG && stmtExpr->gtOp.gtOp2 == call));
+ noway_assert((stmtExpr->gtOper == GT_CALL && stmtExpr == call) ||
+ (stmtExpr->gtOper == GT_RETURN &&
+ (stmtExpr->gtOp.gtOp1 == call || stmtExpr->gtOp.gtOp1->gtOp.gtOp1 == call)) ||
+ (stmtExpr->gtOper == GT_ASG && stmtExpr->gtOp.gtOp2 == call));
#endif
// For void calls, we would have created a GT_CALL in the stmt list.
@@ -7661,9 +7722,9 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// Legacy Jit64 Compat:
// There could be any number of GT_NOPs between tail call and GT_RETURN.
// That is tail call pattern could be one of the following:
- // 1) tail.call, nop*, ret
- // 2) tail.call, nop*, pop, nop*, ret
- // 3) var=tail.call, nop*, ret(var)
+ // 1) tail.call, nop*, ret
+ // 2) tail.call, nop*, pop, nop*, ret
+ // 3) var=tail.call, nop*, ret(var)
// 4) var=tail.call, nop*, pop, ret
//
// See impIsTailCallILPattern() for details on tail call IL patterns
@@ -7681,20 +7742,20 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
}
morphStmtToRemove = nextMorphStmt;
- nextMorphStmt = morphStmtToRemove->gtNext;
+ nextMorphStmt = morphStmtToRemove->gtNext;
fgRemoveStmt(compCurBB, morphStmtToRemove);
}
// Check to see if there is a pop.
// Since tail call is honored, we can get rid of the stmt corresponding to pop.
if (nextMorphStmt != nullptr && nextMorphStmt->gtStmt.gtStmtExpr->gtOper != GT_RETURN)
- {
+ {
// Note that pop opcode may or may not result in a new stmt (for details see
// impImportBlockCode()). Hence, it is not possible to assert about the IR
// form generated by pop but pop tree must be side-effect free so that we can
// delete it safely.
GenTreePtr popStmt = nextMorphStmt;
- nextMorphStmt = nextMorphStmt->gtNext;
+ nextMorphStmt = nextMorphStmt->gtNext;
noway_assert((popStmt->gtStmt.gtStmtExpr->gtFlags & GTF_ALL_EFFECT) == 0);
fgRemoveStmt(compCurBB, popStmt);
@@ -7710,7 +7771,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
}
morphStmtToRemove = nextMorphStmt;
- nextMorphStmt = morphStmtToRemove->gtNext;
+ nextMorphStmt = morphStmtToRemove->gtNext;
fgRemoveStmt(compCurBB, morphStmtToRemove);
}
}
@@ -7725,9 +7786,10 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// If var=call, then the next stmt must be a GT_RETURN(TYP_VOID) or GT_RETURN(var).
// This can occur if impSpillStackEnsure() has introduced an assignment to a temp.
if (stmtExpr->gtOper == GT_ASG && info.compRetType != TYP_VOID)
- {
+ {
noway_assert(stmtExpr->gtGetOp1()->OperIsLocal());
- noway_assert(stmtExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum == retExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum);
+ noway_assert(stmtExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum ==
+ retExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum);
}
fgRemoveStmt(compCurBB, nextMorphStmt);
@@ -7746,16 +7808,14 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
// dominated by a Gc-SafePoint block. But we don't have dominator info at this
// point. One option is to just add a place holder node for GC-poll (e.g. GT_GCPOLL)
// here and remove it in lowering if the block is dominated by a GC-SafePoint. For
- // now it not clear whether optimizing slow tail calls is worth the effort. As a
- // low cost check, we check whether the first and current basic blocks are
+ // now it not clear whether optimizing slow tail calls is worth the effort. As a
+ // low cost check, we check whether the first and current basic blocks are
// GC-SafePoints.
//
// Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, fgSetBlockOrder()
// is going to mark the method as fully interruptible if the block containing this tail
// call is reachable without executing any call.
- if (canFastTailCall ||
- (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) ||
- (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
+ if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
!fgCreateGCPoll(GCPOLL_INLINE, compCurBB))
{
// We didn't insert a poll block, so we need to morph the call now
@@ -7781,7 +7841,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
}
// For non-void calls, we return a place holder which will be
- // used by the parent GT_RETURN node of this call.
+ // used by the parent GT_RETURN node of this call.
GenTree* result = call;
if (callType != TYP_VOID && info.compRetType != TYP_VOID)
@@ -7818,12 +7878,12 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
NO_TAIL_CALL:
- if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
+ if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)
#ifdef FEATURE_READYTORUN_COMPILER
- || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
+ || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
#endif
- ) &&
+ ) &&
(call == fgMorphStmt->gtStmt.gtStmtExpr))
{
// This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result.
@@ -7853,7 +7913,9 @@ NO_TAIL_CALL:
{
optCallCount++;
if (call->IsVirtual())
+ {
optIndirectCallCount++;
+ }
}
}
@@ -7877,7 +7939,9 @@ NO_TAIL_CALL:
else if (call->gtCallType == CT_USER_FUNC)
{
if ((call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) == 0)
+ {
compCurBB->bbFlags |= BBF_GC_SAFE_POINT;
+ }
}
// otherwise we have a CT_HELPER
}
@@ -7890,9 +7954,13 @@ NO_TAIL_CALL:
genTreeOps simpleOp = GT_CALL;
if (methodID == CORINFO_INTRINSIC_TypeEQ)
+ {
simpleOp = GT_EQ;
+ }
else if (methodID == CORINFO_INTRINSIC_TypeNEQ)
+ {
simpleOp = GT_NE;
+ }
if (simpleOp == GT_EQ || simpleOp == GT_NE)
{
@@ -7923,10 +7991,10 @@ NO_TAIL_CALL:
// Make sure that return buffers containing GC pointers that aren't too large are pointers into the stack.
GenTreePtr origDest = nullptr; // Will only become non-null if we do the transformation (and thus require
// copy-back).
- unsigned retValTmpNum = BAD_VAR_NUM;
- CORINFO_CLASS_HANDLE structHnd = nullptr;
+ unsigned retValTmpNum = BAD_VAR_NUM;
+ CORINFO_CLASS_HANDLE structHnd = nullptr;
if (call->HasRetBufArg() &&
- call->gtCallLateArgs == nullptr) // Don't do this if we're re-morphing (which will make late args non-null).
+ call->gtCallLateArgs == nullptr) // Don't do this if we're re-morphing (which will make late args non-null).
{
// We're enforcing the invariant that return buffers pointers (at least for
// struct return types containing GC pointers) are never pointers into the heap.
@@ -7938,17 +8006,16 @@ NO_TAIL_CALL:
GenTreePtr dest = call->gtCallArgs->gtOp.gtOp1;
assert(dest->OperGet() != GT_ARGPLACE); // If it was, we'd be in a remorph, which we've already excluded above.
- if (dest->gtType == TYP_BYREF
- && !(dest->OperGet() == GT_ADDR && dest->gtOp.gtOp1->OperGet() == GT_LCL_VAR))
+ if (dest->gtType == TYP_BYREF && !(dest->OperGet() == GT_ADDR && dest->gtOp.gtOp1->OperGet() == GT_LCL_VAR))
{
// We'll exempt helper calls from this, assuming that the helper implementation
// follows the old convention, and does whatever barrier is required.
if (call->gtCallType != CT_HELPER)
{
structHnd = call->gtRetClsHnd;
- if (info.compCompHnd->isStructRequiringStackAllocRetBuf(structHnd)
- && !((dest->OperGet() == GT_LCL_VAR || dest->OperGet() == GT_REG_VAR)
- && dest->gtLclVar.gtLclNum == info.compRetBuffArg))
+ if (info.compCompHnd->isStructRequiringStackAllocRetBuf(structHnd) &&
+ !((dest->OperGet() == GT_LCL_VAR || dest->OperGet() == GT_REG_VAR) &&
+ dest->gtLclVar.gtLclNum == info.compRetBuffArg))
{
origDest = dest;
@@ -7969,13 +8036,13 @@ NO_TAIL_CALL:
// Morph stelem.ref helper call to store a null value, into a store into an array without the helper.
// This needs to be done after the arguments are morphed to ensure constant propagation has already taken place.
if ((call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST)))
- {
+ {
GenTreePtr value = gtArgEntryByArgNum(call, 2)->node;
if (value->IsIntegralConst(0))
{
assert(value->OperGet() == GT_CNS_INT);
- GenTreePtr arr = gtArgEntryByArgNum(call, 0)->node;
+ GenTreePtr arr = gtArgEntryByArgNum(call, 0)->node;
GenTreePtr index = gtArgEntryByArgNum(call, 1)->node;
arr = gtClone(arr, true);
@@ -7988,28 +8055,28 @@ NO_TAIL_CALL:
noway_assert(value != nullptr);
GenTreePtr nullCheckedArr = impCheckForNullPointer(arr);
- GenTreePtr arrIndexNode = gtNewIndexRef(TYP_REF, nullCheckedArr, index);
- GenTreePtr arrStore = gtNewAssignNode(arrIndexNode, value);
+ GenTreePtr arrIndexNode = gtNewIndexRef(TYP_REF, nullCheckedArr, index);
+ GenTreePtr arrStore = gtNewAssignNode(arrIndexNode, value);
arrStore->gtFlags |= GTF_ASG;
return fgMorphTree(arrStore);
- }
+ }
}
}
}
// Optimize get_ManagedThreadId(get_CurrentThread)
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) &&
- info.compCompHnd->getIntrinsicID(call->gtCallMethHnd) == CORINFO_INTRINSIC_GetManagedThreadId)
+ info.compCompHnd->getIntrinsicID(call->gtCallMethHnd) == CORINFO_INTRINSIC_GetManagedThreadId)
{
- noway_assert(origDest == NULL);
- noway_assert(call->gtCallLateArgs->gtOp.gtOp1 != NULL);
+ noway_assert(origDest == nullptr);
+ noway_assert(call->gtCallLateArgs->gtOp.gtOp1 != nullptr);
GenTreePtr innerCall = call->gtCallLateArgs->gtOp.gtOp1;
- if (innerCall->gtOper == GT_CALL &&
- (innerCall->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) &&
- info.compCompHnd->getIntrinsicID(innerCall->gtCall.gtCallMethHnd) == CORINFO_INTRINSIC_GetCurrentManagedThread)
+ if (innerCall->gtOper == GT_CALL && (innerCall->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) &&
+ info.compCompHnd->getIntrinsicID(innerCall->gtCall.gtCallMethHnd) ==
+ CORINFO_INTRINSIC_GetCurrentManagedThread)
{
// substitute expression with call to helper
GenTreePtr newCall = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT, 0);
@@ -8018,7 +8085,7 @@ NO_TAIL_CALL:
}
}
- if (origDest != NULL)
+ if (origDest != nullptr)
{
GenTreePtr retValVarAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(retValTmpNum, TYP_STRUCT));
// If the origDest expression was an assignment to a variable, it might be to an otherwise-unused
@@ -8030,13 +8097,13 @@ NO_TAIL_CALL:
if (origDest->gtOp.gtOp1->OperGet() == GT_LCL_VAR)
{
GenTreePtr var = origDest->gtOp.gtOp1;
- origDest = gtNewOperNode(GT_COMMA, var->TypeGet(), origDest,
+ origDest = gtNewOperNode(GT_COMMA, var->TypeGet(), origDest,
gtNewLclvNode(var->gtLclVar.gtLclNum, var->TypeGet()));
}
}
GenTreePtr copyBlk = gtNewCpObjNode(origDest, retValVarAddr, structHnd, false);
- copyBlk = fgMorphTree(copyBlk);
- GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, call, copyBlk);
+ copyBlk = fgMorphTree(copyBlk);
+ GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, call, copyBlk);
#ifdef DEBUG
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
@@ -8051,11 +8118,11 @@ NO_TAIL_CALL:
// As a result the compiler won't need to preserve live registers across the call.
//
// This isn't need for tail calls as there shouldn't be any code after the call anyway.
- // Besides, the tail call code is part of the epilog and converting the block to
+ // Besides, the tail call code is part of the epilog and converting the block to
// BBJ_THROW would result in the tail call being dropped as the epilog is generated
// only for BBJ_RETURN blocks.
//
- // Currently this doesn't work for non-void callees. Some of the code that handles
+ // Currently this doesn't work for non-void callees. Some of the code that handles
// fgRemoveRestOfBlock expects the tree to have GTF_EXCEPT flag set but call nodes
// do not have this flag by default. We could add the flag here but the proper solution
// would be to replace the return expression with a local var node during inlining
@@ -8077,7 +8144,7 @@ NO_TAIL_CALL:
* Transform the given GTK_CONST tree for code generation.
*/
-GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
{
noway_assert(tree->OperKind() & GTK_CONST);
@@ -8086,8 +8153,10 @@ GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
- if (tree->OperGet() != GT_CNS_STR)
+ if (tree->OperGet() != GT_CNS_STR)
+ {
return tree;
+ }
// TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will
// guarantee slow performance for that block. Instead cache the return value
@@ -8100,7 +8169,7 @@ GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
{
// For un-important blocks, we want to construct the string lazily
- GenTreeArgList *args;
+ GenTreeArgList* args;
if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE)
{
args = gtNewArgList(gtNewIconNode(RidFromToken(tree->gtStrCon.gtSconCPX), TYP_INT));
@@ -8108,10 +8177,9 @@ GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
else
{
args = gtNewArgList(gtNewIconNode(RidFromToken(tree->gtStrCon.gtSconCPX), TYP_INT),
- gtNewIconEmbScpHndNode(tree->gtStrCon.gtScpHnd));
+ gtNewIconEmbScpHndNode(tree->gtStrCon.gtScpHnd));
}
-
tree = gtNewHelperCallNode(helper, TYP_REF, 0, args);
return fgMorphTree(tree);
}
@@ -8119,10 +8187,9 @@ GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
assert(tree->gtStrCon.gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->gtStrCon.gtScpHnd));
- LPVOID pValue;
- InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->gtStrCon.gtScpHnd,
- tree->gtStrCon.gtSconCPX,
- &pValue);
+ LPVOID pValue;
+ InfoAccessType iat =
+ info.compCompHnd->constructStringLiteral(tree->gtStrCon.gtScpHnd, tree->gtStrCon.gtSconCPX, &pValue);
tree = gtNewStringLiteralNode(iat, pValue);
@@ -8134,7 +8201,7 @@ GenTreePtr Compiler::fgMorphConst(GenTreePtr tree)
* Transform the given GTK_LEAF tree for code generation.
*/
-GenTreePtr Compiler::fgMorphLeaf(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphLeaf(GenTreePtr tree)
{
noway_assert(tree->OperKind() & GTK_LEAF);
@@ -8147,7 +8214,8 @@ GenTreePtr Compiler::fgMorphLeaf(GenTreePtr tree)
{
if (info.compIsVarArgs)
{
- GenTreePtr newTree = fgMorphStackArgForVarArgs(tree->gtLclFld.gtLclNum, tree->gtType, tree->gtLclFld.gtLclOffs);
+ GenTreePtr newTree =
+ fgMorphStackArgForVarArgs(tree->gtLclFld.gtLclNum, tree->gtType, tree->gtLclFld.gtLclOffs);
if (newTree != NULL)
return newTree;
}
@@ -8176,22 +8244,22 @@ GenTreePtr Compiler::fgMorphLeaf(GenTreePtr tree)
switch (addrInfo.accessType)
{
- case IAT_PPVALUE:
- tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
- tree->gtFlags |= GTF_IND_INVARIANT;
+ case IAT_PPVALUE:
+ tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
+ tree->gtFlags |= GTF_IND_INVARIANT;
- __fallthrough;
+ __fallthrough;
- case IAT_PVALUE:
- tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
- break;
+ case IAT_PVALUE:
+ tree = gtNewOperNode(GT_IND, TYP_I_IMPL, tree);
+ break;
- case IAT_VALUE:
- tree = gtNewOperNode(GT_NOP, tree->TypeGet(), tree); // prevents constant folding
- break;
+ case IAT_VALUE:
+ tree = gtNewOperNode(GT_NOP, tree->TypeGet(), tree); // prevents constant folding
+ break;
- default:
- noway_assert(!"Unknown addrInfo.accessType");
+ default:
+ noway_assert(!"Unknown addrInfo.accessType");
}
return fgMorphTree(tree);
@@ -8200,11 +8268,10 @@ GenTreePtr Compiler::fgMorphLeaf(GenTreePtr tree)
return tree;
}
-
void Compiler::fgAssignSetVarDef(GenTreePtr tree)
{
GenTreeLclVarCommon* lclVarCmnTree;
- bool isEntire = false;
+ bool isEntire = false;
if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire))
{
if (isEntire)
@@ -8223,70 +8290,74 @@ void Compiler::fgAssignSetVarDef(GenTreePtr tree)
GenTreePtr Compiler::fgMorphOneAsgBlockOp(GenTreePtr tree)
{
- genTreeOps oper = tree->gtOper;
+ genTreeOps oper = tree->gtOper;
// Only xxBlk opcodes are possible
noway_assert(tree->OperIsBlkOp());
- GenTreePtr dest = tree->gtOp.gtOp1->gtOp.gtOp1; // Dest address
- GenTreePtr src = tree->gtOp.gtOp1->gtOp.gtOp2; // Src
- GenTreePtr blkShape = tree->gtOp.gtOp2; // [size/clsHnd]
- bool volatil = tree->AsBlkOp()->IsVolatile();
- GenTreePtr result;
- GenTreePtr lclVarTree;
+ GenTreePtr dest = tree->gtOp.gtOp1->gtOp.gtOp1; // Dest address
+ GenTreePtr src = tree->gtOp.gtOp1->gtOp.gtOp2; // Src
+ GenTreePtr blkShape = tree->gtOp.gtOp2; // [size/clsHnd]
+ bool volatil = tree->AsBlkOp()->IsVolatile();
+ GenTreePtr result;
+ GenTreePtr lclVarTree;
// The dest must be an address
- noway_assert(genActualType(dest->gtType) == TYP_I_IMPL ||
- dest->gtType == TYP_BYREF);
+ noway_assert(genActualType(dest->gtType) == TYP_I_IMPL || dest->gtType == TYP_BYREF);
// For COPYBLK the src must be an address
- noway_assert(!tree->OperIsCopyBlkOp() ||
- (genActualType( src->gtType) == TYP_I_IMPL ||
- src->gtType == TYP_BYREF));
+ noway_assert(!tree->OperIsCopyBlkOp() || (genActualType(src->gtType) == TYP_I_IMPL || src->gtType == TYP_BYREF));
// For INITBLK the src must be a TYP_INT
- noway_assert(oper != GT_INITBLK ||
- (genActualType( src->gtType) == TYP_INT));
+ noway_assert(oper != GT_INITBLK || (genActualType(src->gtType) == TYP_INT));
// The size must be an integer type
noway_assert(varTypeIsIntegral(blkShape->gtType));
- CORINFO_CLASS_HANDLE clsHnd;
- size_t size;
- var_types type = TYP_UNDEF;
+ CORINFO_CLASS_HANDLE clsHnd;
+ size_t size;
+ var_types type = TYP_UNDEF;
if (blkShape->gtOper != GT_CNS_INT)
+ {
goto GENERAL_BLKOP;
+ }
#ifdef FEATURE_SIMD
- // importer introduces cpblk nodes with src = GT_ADDR(GT_SIMD)
+ // importer introduces cpblk nodes with src = GT_ADDR(GT_SIMD)
// The SIMD type in question could be Vector2f which is 8-bytes in size.
// The below check is to make sure that we don't turn that copyblk
// into a assignment, since rationalizer logic will transform the
- // copyblk apropriately. Otherwise, the transormation made in this
- // routine will prevent rationalizer logic and we might end up with
+ // copyblk apropriately. Otherwise, the transormation made in this
+ // routine will prevent rationalizer logic and we might end up with
// GT_ADDR(GT_SIMD) node post rationalization, leading to a noway assert
// in codegen.
if (src->OperGet() == GT_ADDR && src->gtGetOp1()->OperGet() == GT_SIMD)
+ {
goto GENERAL_BLKOP;
-#endif
+ }
+#endif
if (!blkShape->IsIconHandle())
{
- clsHnd = 0;
+ clsHnd = nullptr;
size = blkShape->gtIntCon.gtIconVal;
/* A four byte BLK_COPY can be treated as an integer asignment */
if (size == 4)
+ {
type = TYP_INT;
+ }
#ifdef _TARGET_64BIT_
if (size == 8)
+ {
type = TYP_LONG;
+ }
#endif
}
else
{
- clsHnd = (CORINFO_CLASS_HANDLE) blkShape->gtIntCon.gtIconVal;
+ clsHnd = (CORINFO_CLASS_HANDLE)blkShape->gtIntCon.gtIconVal;
size = roundUp(info.compCompHnd->getClassSize(clsHnd), sizeof(void*));
// Since we round up, we are not handling the case where we have a
@@ -8315,202 +8386,204 @@ GenTreePtr Compiler::fgMorphOneAsgBlockOp(GenTreePtr tree)
switch (size)
{
- case 1:
- type = TYP_BYTE;
- goto ONE_SIMPLE_ASG;
- case 2:
- type = TYP_SHORT;
- goto ONE_SIMPLE_ASG;
+ case 1:
+ type = TYP_BYTE;
+ goto ONE_SIMPLE_ASG;
+ case 2:
+ type = TYP_SHORT;
+ goto ONE_SIMPLE_ASG;
#ifdef _TARGET_64BIT_
- case 4:
- type = TYP_INT;
- goto ONE_SIMPLE_ASG;
+ case 4:
+ type = TYP_INT;
+ goto ONE_SIMPLE_ASG;
#endif // _TARGET_64BIT_
- case REGSIZE_BYTES:
- noway_assert(type != TYP_UNDEF);
+ case REGSIZE_BYTES:
+ noway_assert(type != TYP_UNDEF);
-ONE_SIMPLE_ASG:
+ ONE_SIMPLE_ASG:
- noway_assert(size <= REGSIZE_BYTES);
+ noway_assert(size <= REGSIZE_BYTES);
- // For INITBLK, a non constant source is not going to allow us to fiddle
- // with the bits to create a single assigment.
+ // For INITBLK, a non constant source is not going to allow us to fiddle
+ // with the bits to create a single assigment.
- if ((oper == GT_INITBLK) && (src->gtOper != GT_CNS_INT))
- {
- goto GENERAL_BLKOP;
- }
+ if ((oper == GT_INITBLK) && (src->gtOper != GT_CNS_INT))
+ {
+ goto GENERAL_BLKOP;
+ }
- if (impIsAddressInLocal(dest, &lclVarTree))
- {
-#if LOCAL_ASSERTION_PROP
- // Kill everything about dest
- if (optLocalAssertionProp)
+ if (impIsAddressInLocal(dest, &lclVarTree))
{
- if (optAssertionCount > 0)
+#if LOCAL_ASSERTION_PROP
+ // Kill everything about dest
+ if (optLocalAssertionProp)
{
- fgKillDependentAssertions(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(tree));
+ if (optAssertionCount > 0)
+ {
+ fgKillDependentAssertions(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(tree));
+ }
}
- }
#endif // LOCAL_ASSERTION_PROP
- unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- // A previous incarnation of this code also required the local not to be
- // address-exposed(=taken). That seems orthogonal to the decision of whether
- // to do field-wise assignments: being address-exposed will cause it to be
- // "dependently" promoted, so it will be in the right memory location. One possible
- // further reason for avoiding field-wise stores is that the struct might have alignment-induced
- // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
- // concern, then we could compromise, and say that address-exposed + fields do not completely cover the
- // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
- if (varTypeIsStruct(lclVarTree) &&
- (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
- {
+ unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
+ // A previous incarnation of this code also required the local not to be
+ // address-exposed(=taken). That seems orthogonal to the decision of whether
+ // to do field-wise assignments: being address-exposed will cause it to be
+ // "dependently" promoted, so it will be in the right memory location. One possible
+ // further reason for avoiding field-wise stores is that the struct might have alignment-induced
+ // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
+ // concern, then we could compromise, and say that address-exposed + fields do not completely cover the
+ // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
+ if (varTypeIsStruct(lclVarTree) && (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
+ {
- // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
- goto GENERAL_BLKOP;
- }
- else
- if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
- size == genTypeSize(var_types(lvaTable[lclNum].lvType)))
- {
- // Use the dest local var directly.
- dest = lclVarTree;
- type = lvaTable[lclNum].lvType; // Make the type used in the GT_IND node match
+ // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
+ goto GENERAL_BLKOP;
+ }
+ else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
+ size == genTypeSize(var_types(lvaTable[lclNum].lvType)))
+ {
+ // Use the dest local var directly.
+ dest = lclVarTree;
+ type = lvaTable[lclNum].lvType; // Make the type used in the GT_IND node match
+
+ // If the block operation had been a write to a local var of a small int type,
+ // of the exact size of the small int type, and the var is NormalizeOnStore,
+ // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
+ // have done that normalization. If we're now making it into an assignment,
+ // the NormalizeOnStore will work, and it can be a full def.
+ if (lvaTable[lclNum].lvNormalizeOnStore())
+ {
+ dest->gtFlags &= (~GTF_VAR_USEASG);
+ }
- // If the block operation had been a write to a local var of a small int type,
- // of the exact size of the small int type, and the var is NormalizeOnStore,
- // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
- // have done that normalization. If we're now making it into an assignment,
- // the NormalizeOnStore will work, and it can be a full def.
- if (lvaTable[lclNum].lvNormalizeOnStore())
+ goto _DoneDest;
+ }
+ else
{
- dest->gtFlags &= (~GTF_VAR_USEASG);
+ // Could be a non-promoted struct, or a floating point type local, or
+ // an int subject to a partial write. Don't enregister.
+ lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField));
+ // Fall through to indirect the dest node.
+ }
+ // Mark the local var tree as a definition point of the local.
+ lclVarTree->gtFlags |= GTF_VAR_DEF;
+ if (size < lvaTable[lclNum].lvExactSize)
+ { // If it's not a full-width assignment....
+ lclVarTree->gtFlags |= GTF_VAR_USEASG;
}
-
- goto _DoneDest;
- }
- else
- {
- // Could be a non-promoted struct, or a floating point type local, or
- // an int subject to a partial write. Don't enregister.
- lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField));
- // Fall through to indirect the dest node.
}
- // Mark the local var tree as a definition point of the local.
- lclVarTree->gtFlags |= GTF_VAR_DEF;
- if (size < lvaTable[lclNum].lvExactSize) // If it's not a full-width assignment....
- lclVarTree->gtFlags |= GTF_VAR_USEASG;
- }
- // Check to ensure we are not creating a reducible *(& ... )
- if (dest->gtOper == GT_ADDR)
- {
- GenTreePtr addrOp = dest->gtOp.gtOp1;
- // Ignore reinterpret casts between int/gc
- if ((addrOp->TypeGet() == type) ||
- (varTypeIsIntegralOrI(addrOp) && (genTypeSize(addrOp->TypeGet()) == size)))
+ // Check to ensure we are not creating a reducible *(& ... )
+ if (dest->gtOper == GT_ADDR)
{
- dest = addrOp;
- type = addrOp->TypeGet();
- goto _DoneDest;
+ GenTreePtr addrOp = dest->gtOp.gtOp1;
+ // Ignore reinterpret casts between int/gc
+ if ((addrOp->TypeGet() == type) ||
+ (varTypeIsIntegralOrI(addrOp) && (genTypeSize(addrOp->TypeGet()) == size)))
+ {
+ dest = addrOp;
+ type = addrOp->TypeGet();
+ goto _DoneDest;
+ }
}
- }
- // Indirect the dest node.
+ // Indirect the dest node.
- dest = gtNewOperNode(GT_IND, type, dest);
+ dest = gtNewOperNode(GT_IND, type, dest);
- // If we have no information about the destination, we have to assume it could
- // live anywhere (not just in the GC heap).
- // Mark the GT_IND node so that we use the correct write barrier helper in case
- // the field is a GC ref.
+ // If we have no information about the destination, we have to assume it could
+ // live anywhere (not just in the GC heap).
+ // Mark the GT_IND node so that we use the correct write barrier helper in case
+ // the field is a GC ref.
- if (!fgIsIndirOfAddrOfLocal(dest))
- {
- dest->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
- }
+ if (!fgIsIndirOfAddrOfLocal(dest))
+ {
+ dest->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
+ }
-_DoneDest:;
+ _DoneDest:;
- if (volatil)
- dest->gtFlags |= GTF_DONT_CSE;
+ if (volatil)
+ {
+ dest->gtFlags |= GTF_DONT_CSE;
+ }
- if (tree->OperIsCopyBlkOp())
- {
- if (impIsAddressInLocal(src, &lclVarTree))
+ if (tree->OperIsCopyBlkOp())
{
- unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- if (varTypeIsStruct(lclVarTree) &&
- (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
- {
- // Let fgMorphCopyBlock handle it.
- goto GENERAL_BLKOP;
- }
- else
- if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
- size == genTypeSize(genActualType(lclVarTree->TypeGet())))
- {
- /* Use the src local var directly */
- src = lclVarTree;
- goto _DoneSrc;
- }
- else
+ if (impIsAddressInLocal(src, &lclVarTree))
{
+ unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
+ if (varTypeIsStruct(lclVarTree) && (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
+ {
+ // Let fgMorphCopyBlock handle it.
+ goto GENERAL_BLKOP;
+ }
+ else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
+ size == genTypeSize(genActualType(lclVarTree->TypeGet())))
+ {
+ /* Use the src local var directly */
+ src = lclVarTree;
+ goto _DoneSrc;
+ }
+ else
+ {
#ifndef LEGACY_BACKEND
- // The source argument of the copyblk can potentially
- // be accessed only through indir(addr(lclVar))
- // or indir(lclVarAddr) in rational form and liveness
- // won't account for these uses. That said,
- // we have to mark this local as address exposed so
- // we don't delete it as a dead store later on.
- unsigned lclVarNum = lclVarTree->gtLclVarCommon.gtLclNum;
- lvaTable[lclVarNum].lvAddrExposed = true;
- lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DNER_AddrExposed));
-
-#else // LEGACY_BACKEND
- lvaSetVarDoNotEnregister(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
+ // The source argument of the copyblk can potentially
+ // be accessed only through indir(addr(lclVar))
+ // or indir(lclVarAddr) in rational form and liveness
+ // won't account for these uses. That said,
+ // we have to mark this local as address exposed so
+ // we don't delete it as a dead store later on.
+ unsigned lclVarNum = lclVarTree->gtLclVarCommon.gtLclNum;
+ lvaTable[lclVarNum].lvAddrExposed = true;
+ lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DNER_AddrExposed));
+
+#else // LEGACY_BACKEND
+ lvaSetVarDoNotEnregister(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
#endif // LEGACY_BACKEND
- // Fall through to indirect the src node.
+ // Fall through to indirect the src node.
+ }
}
- }
- // Indirect the src node.
+ // Indirect the src node.
- src = gtNewOperNode(GT_IND, type, src);
+ src = gtNewOperNode(GT_IND, type, src);
- // If we have no information about the src, we have to assume it could
- // live anywhere (not just in the GC heap).
- // Mark the GT_IND node so that we use the correct write barrier helper in case
- // the field is a GC ref.
+ // If we have no information about the src, we have to assume it could
+ // live anywhere (not just in the GC heap).
+ // Mark the GT_IND node so that we use the correct write barrier helper in case
+ // the field is a GC ref.
- if (!fgIsIndirOfAddrOfLocal(src))
- {
- src->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
- }
+ if (!fgIsIndirOfAddrOfLocal(src))
+ {
+ src->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
+ }
-_DoneSrc:;
+ _DoneSrc:;
- if (volatil)
- src->gtFlags |= GTF_DONT_CSE;
- }
- else // (oper == GT_INITBLK)
- {
- // This will mutate the integer constant, in place, to be the correct
- // value for the type were are using in the assignment.
- src->AsIntCon()->FixupInitBlkValue(type);
- }
+ if (volatil)
+ {
+ src->gtFlags |= GTF_DONT_CSE;
+ }
+ }
+ else // (oper == GT_INITBLK)
+ {
+ // This will mutate the integer constant, in place, to be the correct
+ // value for the type were are using in the assignment.
+ src->AsIntCon()->FixupInitBlkValue(type);
+ }
- /* Create the assignment node */
+ /* Create the assignment node */
- result = gtNewAssignNode(dest, src);
- result->gtType = type;
+ result = gtNewAssignNode(dest, src);
+ result->gtType = type;
- return result;
+ return result;
}
GENERAL_BLKOP:
@@ -8533,10 +8606,10 @@ GENERAL_BLKOP:
//
// Notes:
// If we leave it as a GT_INITBLK we will call lvaSetVarDoNotEnregister() with a reason of DNER_BlockOp
-// if the Dest() is a a struct that has a "CustomLayout" and "ConstainsHoles" then we
+// if the Dest() is a a struct that has a "CustomLayout" and "ConstainsHoles" then we
// can not use a field by field assignment and must the orginal GT_INITBLK unmodified.
-GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_INITBLK);
@@ -8552,32 +8625,31 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
{
GenTreeInitBlk* initBlkOp = tree->AsInitBlk();
- GenTreePtr destAddr = initBlkOp->Dest();
- GenTreePtr initVal = initBlkOp->InitVal();
- GenTreePtr blockSize = initBlkOp->Size();
+ GenTreePtr destAddr = initBlkOp->Dest();
+ GenTreePtr initVal = initBlkOp->InitVal();
+ GenTreePtr blockSize = initBlkOp->Size();
// The dest must be an address
- noway_assert(genActualType(destAddr->gtType) == TYP_I_IMPL ||
- destAddr->gtType == TYP_BYREF);
+ noway_assert(genActualType(destAddr->gtType) == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
// The size must be an integer type
assert(varTypeIsIntegral(blockSize->gtType));
- unsigned blockWidth = 0;
- bool blockWidthIsConst = false;
+ unsigned blockWidth = 0;
+ bool blockWidthIsConst = false;
if (blockSize->IsCnsIntOrI())
{
blockWidthIsConst = true;
- blockWidth = unsigned(blockSize->gtIntConCommon.IconValue());
+ blockWidth = unsigned(blockSize->gtIntConCommon.IconValue());
}
GenTreeLclVarCommon* lclVarTree = nullptr;
- FieldSeqNode* destFldSeq = nullptr;
- unsigned destLclNum = BAD_VAR_NUM;
- LclVarDsc * destLclVar = nullptr;
- bool destDoFldAsg = false;
+ FieldSeqNode* destFldSeq = nullptr;
+ unsigned destLclNum = BAD_VAR_NUM;
+ LclVarDsc* destLclVar = nullptr;
+ bool destDoFldAsg = false;
if (destAddr->IsLocalAddrExpr(this, &lclVarTree, &destFldSeq))
{
@@ -8601,7 +8673,7 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
noway_assert(!opts.MinOpts());
if (destLclVar->lvAddrExposed & destLclVar->lvContainsHoles)
{
- JITDUMP(" dest is address exposed");
+ JITDUMP(" dest is address exposed");
}
else
{
@@ -8611,7 +8683,7 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
// We may decide later that a copyblk is required when this struct has holes
destDoFldAsg = true;
}
- else
+ else
{
JITDUMP(" with mismatched size");
}
@@ -8626,8 +8698,7 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
destDoFldAsg = false;
}
- JITDUMP(destDoFldAsg ? " using field by field initialization.\n"
- : " this requires an InitBlock.\n");
+ JITDUMP(destDoFldAsg ? " using field by field initialization.\n" : " this requires an InitBlock.\n");
if (!destDoFldAsg && (destLclVar != nullptr))
{
@@ -8639,9 +8710,9 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
}
}
- // Mark the dest struct as DoNotEnreg
- // when they are LclVar structs and we are using a CopyBlock
- // or the struct is not promoted
+ // Mark the dest struct as DoNotEnreg
+ // when they are LclVar structs and we are using a CopyBlock
+ // or the struct is not promoted
//
if (!destDoFldAsg)
{
@@ -8670,10 +8741,10 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
unsigned fieldLclNum;
unsigned fieldCnt = destLclVar->lvFieldCnt;
- for (unsigned i=0; i<fieldCnt; ++i)
+ for (unsigned i = 0; i < fieldCnt; ++i)
{
fieldLclNum = destLclVar->lvFieldLclStart + i;
- dest = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
+ dest = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
noway_assert(destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR);
// If it had been labeled a "USEASG", assignments to the the individual promoted fields are not.
@@ -8695,7 +8766,8 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
{
srcCopy->ChangeOperConst(GT_CNS_DBL);
// setup the bit pattern
- memset(&srcCopy->gtDblCon.gtDconVal, (int)initVal->gtIntCon.gtIconVal, sizeof(srcCopy->gtDblCon.gtDconVal));
+ memset(&srcCopy->gtDblCon.gtDconVal, (int)initVal->gtIntCon.gtIconVal,
+ sizeof(srcCopy->gtDblCon.gtDconVal));
/* Change the types of srcCopy to TYP_DOUBLE */
srcCopy->gtType = TYP_DOUBLE;
}
@@ -8704,7 +8776,8 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
noway_assert(srcCopy->gtOper == GT_CNS_INT);
noway_assert(srcCopy->TypeGet() == TYP_INT);
// setup the bit pattern
- memset(&srcCopy->gtIntCon.gtIconVal, (int)initVal->gtIntCon.gtIconVal, sizeof(srcCopy->gtIntCon.gtIconVal));
+ memset(&srcCopy->gtIntCon.gtIconVal, (int)initVal->gtIntCon.gtIconVal,
+ sizeof(srcCopy->gtIntCon.gtIconVal));
}
srcCopy->gtType = dest->TypeGet();
@@ -8720,10 +8793,7 @@ GenTreePtr Compiler::fgMorphInitBlock(GenTreePtr tree)
if (tree)
{
- tree = gtNewOperNode(GT_COMMA,
- TYP_VOID,
- tree,
- asg);
+ tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
@@ -8763,10 +8833,10 @@ _Done:
// If we leave it as a GT_COPYBLK or GT_COPYOBJ we will call lvaSetVarDoNotEnregister() on both Source() and Dest()
// When performing a field by field assignment we can have one of Source() or Dest treated as a blob of bytes
// and in such cases we will call lvaSetVarDoNotEnregister() on the one treated as a blob of bytes.
-// if the Source() or Dest() is a a struct that has a "CustomLayout" and "ConstainsHoles" then we
+// if the Source() or Dest() is a a struct that has a "CustomLayout" and "ConstainsHoles" then we
// can not use a field by field assignment and must the orginal GT_COPYBLK unmodified.
-GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
{
noway_assert(tree->OperIsCopyBlkOp());
@@ -8783,10 +8853,10 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
}
else
{
- GenTreePtr destAddr;
- GenTreePtr srcAddr;
- GenTreePtr blockSize;
- bool isCopyObj;
+ GenTreePtr destAddr;
+ GenTreePtr srcAddr;
+ GenTreePtr blockSize;
+ bool isCopyObj;
if (tree->OperGet() == GT_COPYBLK)
{
@@ -8808,38 +8878,38 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
}
noway_assert(destAddr->TypeGet() == TYP_BYREF || destAddr->TypeGet() == TYP_I_IMPL);
- noway_assert(srcAddr->TypeGet() == TYP_BYREF || srcAddr->TypeGet() == TYP_I_IMPL);
+ noway_assert(srcAddr->TypeGet() == TYP_BYREF || srcAddr->TypeGet() == TYP_I_IMPL);
- unsigned blockWidth = 0;
- bool blockWidthIsConst = false;
+ unsigned blockWidth = 0;
+ bool blockWidthIsConst = false;
if (blockSize->IsCnsIntOrI())
{
blockWidthIsConst = true;
if (blockSize->IsIconHandle(GTF_ICON_CLASS_HDL))
{
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE) blockSize->gtIntConCommon.IconValue();
- blockWidth = info.compCompHnd->getClassSize(clsHnd);
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)blockSize->gtIntConCommon.IconValue();
+ blockWidth = info.compCompHnd->getClassSize(clsHnd);
}
else
{
blockWidth = unsigned(blockSize->gtIntConCommon.IconValue());
}
}
-
+
GenTreeLclVarCommon* lclVarTree = nullptr;
- FieldSeqNode* destFldSeq = nullptr;
- unsigned destLclNum = BAD_VAR_NUM;
- LclVarDsc* destLclVar = nullptr;
- bool destDoFldAsg = false;
- bool destOnStack = false;
+ FieldSeqNode* destFldSeq = nullptr;
+ unsigned destLclNum = BAD_VAR_NUM;
+ LclVarDsc* destLclVar = nullptr;
+ bool destDoFldAsg = false;
+ bool destOnStack = false;
if (destAddr->IsLocalAddrExpr(this, &lclVarTree, &destFldSeq))
{
destOnStack = true;
- destLclNum = lclVarTree->gtLclNum;
- destLclVar = &lvaTable[destLclNum];
+ destLclNum = lclVarTree->gtLclNum;
+ destLclVar = &lvaTable[destLclNum];
#if LOCAL_ASSERTION_PROP
// Kill everything about destLclNum (and its field locals)
@@ -8863,17 +8933,17 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// We may decide later that a copyblk is required when this struct has holes
destDoFldAsg = true;
}
- else
+ else
{
JITDUMP(" with mismatched dest size");
}
}
}
- FieldSeqNode* srcFldSeq = nullptr;
- unsigned srcLclNum = BAD_VAR_NUM;
- LclVarDsc* srcLclVar = nullptr;
- bool srcDoFldAsg = false;
+ FieldSeqNode* srcFldSeq = nullptr;
+ unsigned srcLclNum = BAD_VAR_NUM;
+ LclVarDsc* srcLclVar = nullptr;
+ bool srcDoFldAsg = false;
if (srcAddr->IsLocalAddrExpr(this, &lclVarTree, &srcFldSeq))
{
@@ -8891,7 +8961,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// We may decide later that a copyblk is required when this struct has holes
srcDoFldAsg = true;
}
- else
+ else
{
JITDUMP(" with mismatched src size");
}
@@ -8901,12 +8971,11 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// Check to see if we are required to do a copy block because the struct contains holes
// and either the src or dest is externally visible
//
- bool requiresCopyBlock = false;
- bool srcSingleLclVarAsg = false;
+ bool requiresCopyBlock = false;
+ bool srcSingleLclVarAsg = false;
// If either src or dest is a reg-sized non-field-addressed struct, keep the copyBlock.
- if ((destLclVar != nullptr && destLclVar->lvRegStruct) ||
- (srcLclVar != nullptr && srcLclVar->lvRegStruct))
+ if ((destLclVar != nullptr && destLclVar->lvRegStruct) || (srcLclVar != nullptr && srcLclVar->lvRegStruct))
{
requiresCopyBlock = true;
}
@@ -8935,20 +9004,21 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// If we passed the above checks, then we will check these two
if (!requiresCopyBlock)
{
- // Are both dest and src promoted structs?
+ // Are both dest and src promoted structs?
if (destDoFldAsg && srcDoFldAsg)
{
// Both structs should be of the same type, if not we will use a copy block
- if (lvaTable[destLclNum].lvVerTypeInfo.GetClassHandle() != lvaTable[srcLclNum].lvVerTypeInfo.GetClassHandle())
+ if (lvaTable[destLclNum].lvVerTypeInfo.GetClassHandle() !=
+ lvaTable[srcLclNum].lvVerTypeInfo.GetClassHandle())
{
- requiresCopyBlock = true; // Mismatched types, leave as a CopyBlock
+ requiresCopyBlock = true; // Mismatched types, leave as a CopyBlock
JITDUMP(" with mismatched types");
}
}
- // Are neither dest or src promoted structs?
- else if (!destDoFldAsg && !srcDoFldAsg)
+ // Are neither dest or src promoted structs?
+ else if (!destDoFldAsg && !srcDoFldAsg)
{
- requiresCopyBlock = true; // Leave as a CopyBlock
+ requiresCopyBlock = true; // Leave as a CopyBlock
JITDUMP(" with no promoted structs");
}
else if (destDoFldAsg)
@@ -8956,22 +9026,20 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// Match the following kinds of trees:
// fgMorphTree BB01, stmt 9 (before)
// [000052] ------------ const int 8
- // [000053] -A--G------- copyBlk void
- // [000051] ------------ addr byref
- // [000050] ------------ lclVar long V07 loc5
- // [000054] --------R--- <list> void
- // [000049] ------------ addr byref
- // [000048] ------------ lclVar struct(P) V06 loc4
+ // [000053] -A--G------- copyBlk void
+ // [000051] ------------ addr byref
+ // [000050] ------------ lclVar long V07 loc5
+ // [000054] --------R--- <list> void
+ // [000049] ------------ addr byref
+ // [000048] ------------ lclVar struct(P) V06 loc4
// long V06.h (offs=0x00) -> V17 tmp9
// Yields this transformation
// fgMorphCopyBlock (after):
- // [000050] ------------ lclVar long V07 loc5
- // [000085] -A---------- = long
- // [000083] D------N---- lclVar long V17 tmp9
+ // [000050] ------------ lclVar long V07 loc5
+ // [000085] -A---------- = long
+ // [000083] D------N---- lclVar long V17 tmp9
//
- if (blockWidthIsConst &&
- (destLclVar->lvFieldCnt == 1) &&
- (srcLclVar != nullptr) &&
+ if (blockWidthIsConst && (destLclVar->lvFieldCnt == 1) && (srcLclVar != nullptr) &&
(blockWidth == genTypeSize(srcLclVar->TypeGet())))
{
// Reject the following tree:
@@ -8979,18 +9047,18 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
//
// fgMorphTree BB01, stmt 6 (before)
// [000038] ------------- const int 4
- // [000039] -A--G-------- copyBlk void
- // [000037] ------------- addr byref
- // [000036] ------------- lclVar int V05 loc3
- // [000040] --------R---- <list> void
- // [000035] ------------- addr byref
- // [000034] ------------- lclVar struct(P) V04 loc2
- // float V04.f1 (offs=0x00) -> V13 tmp6
- // As this would framsform into
+ // [000039] -A--G-------- copyBlk void
+ // [000037] ------------- addr byref
+ // [000036] ------------- lclVar int V05 loc3
+ // [000040] --------R---- <list> void
+ // [000035] ------------- addr byref
+ // [000034] ------------- lclVar struct(P) V04 loc2
+ // float V04.f1 (offs=0x00) -> V13 tmp6
+ // As this would framsform into
// float V13 = int V05
//
- unsigned fieldLclNum = lvaTable[destLclNum].lvFieldLclStart;
- var_types destType = lvaTable[fieldLclNum].TypeGet();
+ unsigned fieldLclNum = lvaTable[destLclNum].lvFieldLclStart;
+ var_types destType = lvaTable[fieldLclNum].TypeGet();
if (srcLclVar->TypeGet() == destType)
{
srcSingleLclVarAsg = true;
@@ -9004,16 +9072,15 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
{
// If a copy block is required then we won't do field by field assignments
destDoFldAsg = false;
- srcDoFldAsg = false;
+ srcDoFldAsg = false;
}
- JITDUMP(requiresCopyBlock ? " this requires a CopyBlock.\n"
- : " using field by field assignments.\n");
+ JITDUMP(requiresCopyBlock ? " this requires a CopyBlock.\n" : " using field by field assignments.\n");
- // Mark the dest/src structs as DoNotEnreg
- // when they are not reg-sized non-field-addressed structs and we are using a CopyBlock
- // or the struct is not promoted
- //
+ // Mark the dest/src structs as DoNotEnreg
+ // when they are not reg-sized non-field-addressed structs and we are using a CopyBlock
+ // or the struct is not promoted
+ //
if (!destDoFldAsg && (destLclVar != nullptr))
{
if (!destLclVar->lvRegStruct)
@@ -9044,13 +9111,13 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// we will convert it into an GC Unsafe CopyBlk that is non-interruptible
// when its size is small enouch to be completely unrolled (i.e. between [16..64] bytes)
//
- if (isCopyObj && destOnStack && blockWidthIsConst &&
- (blockWidth >= (2*TARGET_POINTER_SIZE)) && (blockWidth <= CPBLK_UNROLL_LIMIT))
+ if (isCopyObj && destOnStack && blockWidthIsConst && (blockWidth >= (2 * TARGET_POINTER_SIZE)) &&
+ (blockWidth <= CPBLK_UNROLL_LIMIT))
{
tree->SetOper(GT_COPYBLK);
- tree->AsCpBlk()->gtBlkOpGcUnsafe = true; // Mark as a GC unsage copy block
+ tree->AsCpBlk()->gtBlkOpGcUnsafe = true; // Mark as a GC unsage copy block
blockSize->gtIntConCommon.SetIconValue(ssize_t(blockWidth));
- blockSize->gtFlags &= ~GTF_ICON_HDL_MASK; // Clear the GTF_ICON_CLASS_HDL flags
+ blockSize->gtFlags &= ~GTF_ICON_HDL_MASK; // Clear the GTF_ICON_CLASS_HDL flags
}
#endif
// Liveness doesn't consider copyblk arguments of simple types as being
@@ -9078,22 +9145,24 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
GenTreePtr asg;
GenTreePtr dest;
GenTreePtr src;
- GenTreePtr addrSpill = nullptr;
- unsigned addrSpillTemp = BAD_VAR_NUM;
- bool addrSpillIsStackDest = false; // true if 'addrSpill' represents the address in our local stack frame
+ GenTreePtr addrSpill = nullptr;
+ unsigned addrSpillTemp = BAD_VAR_NUM;
+ bool addrSpillIsStackDest = false; // true if 'addrSpill' represents the address in our local stack frame
- unsigned fieldCnt = DUMMY_INIT(0);
+ unsigned fieldCnt = DUMMY_INIT(0);
if (destDoFldAsg && srcDoFldAsg)
{
// To do fieldwise assignments for both sides, they'd better be the same struct type!
// All of these conditions were checked above...
assert(destLclNum != BAD_VAR_NUM && srcLclNum != BAD_VAR_NUM);
- assert(lvaTable[destLclNum].lvVerTypeInfo.GetClassHandle() == lvaTable[srcLclNum].lvVerTypeInfo.GetClassHandle());
+ assert(lvaTable[destLclNum].lvVerTypeInfo.GetClassHandle() ==
+ lvaTable[srcLclNum].lvVerTypeInfo.GetClassHandle());
assert(destLclVar != nullptr && srcLclVar != nullptr && destLclVar->lvFieldCnt == srcLclVar->lvFieldCnt);
fieldCnt = destLclVar->lvFieldCnt;
- goto _AssignFields; // No need to spill the address to the temp. Go ahead to morph it into field assignments.
+ goto _AssignFields; // No need to spill the address to the temp. Go ahead to morph it into field
+ // assignments.
}
else if (destDoFldAsg)
{
@@ -9120,7 +9189,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// address value once...)
if (destLclVar->lvFieldCnt > 1)
{
- addrSpill = gtCloneExpr(srcAddr); // addrSpill represents the 'srcAddr'
+ addrSpill = gtCloneExpr(srcAddr); // addrSpill represents the 'srcAddr'
noway_assert(addrSpill != nullptr);
}
}
@@ -9153,7 +9222,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// use the address value once...)
if (srcLclVar->lvFieldCnt > 1)
{
- addrSpill = gtCloneExpr(destAddr); // addrSpill represents the 'destAddr'
+ addrSpill = gtCloneExpr(destAddr); // addrSpill represents the 'destAddr'
noway_assert(addrSpill != nullptr);
}
@@ -9167,9 +9236,9 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// We will *not* consider this to define the local, but rather have each individual field assign
// be a definition.
addrSpill->gtOp.gtOp1->gtFlags &= ~(GTF_LIVENESS_MASK);
- assert(lvaTable[addrSpill->gtOp.gtOp1->gtLclVarCommon.gtLclNum].lvLclBlockOpAddr == 1);
- addrSpillIsStackDest = true; // addrSpill represents the address of LclVar[varNum] in our
- // local stack frame
+ assert(lvaTable[addrSpill->gtOp.gtOp1->gtLclVarCommon.gtLclNum].lvLclBlockOpAddr == 1);
+ addrSpillIsStackDest = true; // addrSpill represents the address of LclVar[varNum] in our
+ // local stack frame
}
}
}
@@ -9188,15 +9257,14 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
lvaTable[addrSpillTemp].lvStackByref = true;
}
- tree = gtNewAssignNode(gtNewLclvNode(addrSpillTemp, TYP_BYREF),
- addrSpill);
+ tree = gtNewAssignNode(gtNewLclvNode(addrSpillTemp, TYP_BYREF), addrSpill);
#ifndef LEGACY_BACKEND
- // If we are assigning the address of a LclVar here
- // liveness does not account for this kind of address taken use.
- //
+ // If we are assigning the address of a LclVar here
+ // liveness does not account for this kind of address taken use.
+ //
// We have to mark this local as address exposed so
- // that we don't delete the definition for this LclVar
+ // that we don't delete the definition for this LclVar
// as a dead store later on.
//
if (addrSpill->OperGet() == GT_ADDR)
@@ -9204,7 +9272,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
GenTreePtr addrOp = addrSpill->gtOp.gtOp1;
if (addrOp->IsLocal())
{
- unsigned lclVarNum = addrOp->gtLclVarCommon.gtLclNum;
+ unsigned lclVarNum = addrOp->gtLclVarCommon.gtLclNum;
lvaTable[lclVarNum].lvAddrExposed = true;
lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DNER_AddrExposed));
}
@@ -9214,14 +9282,14 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
_AssignFields:
- for (unsigned i=0; i<fieldCnt; ++i)
+ for (unsigned i = 0; i < fieldCnt; ++i)
{
FieldSeqNode* curFieldSeq = nullptr;
if (destDoFldAsg)
{
noway_assert(destLclNum != BAD_VAR_NUM);
unsigned fieldLclNum = lvaTable[destLclNum].lvFieldLclStart + i;
- dest = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
+ dest = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
noway_assert(destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR);
// If it had been labeled a "USEASG", assignments to the the individual promoted fields are not.
@@ -9245,26 +9313,27 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// Is the address of a local?
GenTreeLclVarCommon* lclVarTree = nullptr;
- bool isEntire = false;
- bool* pIsEntire = (blockWidthIsConst ? &isEntire : nullptr);
+ bool isEntire = false;
+ bool* pIsEntire = (blockWidthIsConst ? &isEntire : nullptr);
if (dest->DefinesLocalAddr(this, blockWidth, &lclVarTree, pIsEntire))
{
lclVarTree->gtFlags |= GTF_VAR_DEF;
if (!isEntire)
+ {
lclVarTree->gtFlags |= GTF_VAR_USEASG;
+ }
}
}
GenTreePtr fieldOffsetNode = gtNewIconNode(lvaTable[fieldLclNum].lvFldOffset, TYP_I_IMPL);
// Have to set the field sequence -- which means we need the field handle.
CORINFO_CLASS_HANDLE classHnd = lvaTable[srcLclNum].lvVerTypeInfo.GetClassHandle();
- CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(classHnd, lvaTable[fieldLclNum].lvFldOrdinal);
- curFieldSeq = GetFieldSeqStore()->CreateSingleton(fieldHnd);
+ CORINFO_FIELD_HANDLE fieldHnd =
+ info.compCompHnd->getFieldInClass(classHnd, lvaTable[fieldLclNum].lvFldOrdinal);
+ curFieldSeq = GetFieldSeqStore()->CreateSingleton(fieldHnd);
fieldOffsetNode->gtIntCon.gtFieldSeq = curFieldSeq;
- dest = gtNewOperNode(GT_ADD, TYP_BYREF,
- dest,
- fieldOffsetNode);
+ dest = gtNewOperNode(GT_ADD, TYP_BYREF, dest, fieldOffsetNode);
dest = gtNewOperNode(GT_IND, lvaTable[fieldLclNum].TypeGet(), dest);
@@ -9273,12 +9342,11 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
dest->gtFlags |= GTF_IND_TGTANYWHERE;
}
-
if (srcDoFldAsg)
{
noway_assert(srcLclNum != BAD_VAR_NUM);
unsigned fieldLclNum = lvaTable[srcLclNum].lvFieldLclStart + i;
- src = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
+ src = gtNewLclvNode(fieldLclNum, lvaTable[fieldLclNum].TypeGet());
noway_assert(srcAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR);
src->gtFlags |= srcAddr->gtOp.gtOp1->gtFlags & ~GTF_NODE_MASK;
@@ -9292,7 +9360,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
if (srcSingleLclVarAsg)
{
noway_assert(fieldCnt == 1);
- noway_assert(srcLclVar != nullptr);
+ noway_assert(srcLclVar != nullptr);
noway_assert(addrSpill == nullptr);
src = gtNewLclvNode(srcLclNum, srcLclVar->TypeGet());
@@ -9311,14 +9379,13 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
}
CORINFO_CLASS_HANDLE classHnd = lvaTable[destLclNum].lvVerTypeInfo.GetClassHandle();
- CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(classHnd, lvaTable[fieldLclNum].lvFldOrdinal);
+ CORINFO_FIELD_HANDLE fieldHnd =
+ info.compCompHnd->getFieldInClass(classHnd, lvaTable[fieldLclNum].lvFldOrdinal);
curFieldSeq = GetFieldSeqStore()->CreateSingleton(fieldHnd);
- src = gtNewOperNode(GT_ADD, TYP_BYREF,
- src,
- new(this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL,
- lvaTable[fieldLclNum].lvFldOffset,
- curFieldSeq));
+ src = gtNewOperNode(GT_ADD, TYP_BYREF, src,
+ new (this, GT_CNS_INT)
+ GenTreeIntCon(TYP_I_IMPL, lvaTable[fieldLclNum].lvFldOffset, curFieldSeq));
src = gtNewOperNode(GT_IND, lvaTable[fieldLclNum].TypeGet(), src);
}
@@ -9332,9 +9399,8 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
// and it was of a local, record the assignment as an indirect update of a local.
if (addrSpill && !destDoFldAsg && destLclNum != BAD_VAR_NUM)
{
- curFieldSeq = GetFieldSeqStore()->Append(destFldSeq, curFieldSeq);
- bool isEntire = (genTypeSize(var_types(lvaTable[destLclNum].lvType))
- == genTypeSize(dest->TypeGet()));
+ curFieldSeq = GetFieldSeqStore()->Append(destFldSeq, curFieldSeq);
+ bool isEntire = (genTypeSize(var_types(lvaTable[destLclNum].lvType)) == genTypeSize(dest->TypeGet()));
IndirectAssignmentAnnotation* pIndirAnnot =
new (this, CMK_Unknown) IndirectAssignmentAnnotation(destLclNum, curFieldSeq, isEntire);
GetIndirAssignMap()->Set(asg, pIndirAnnot);
@@ -9349,10 +9415,7 @@ GenTreePtr Compiler::fgMorphCopyBlock(GenTreePtr tree)
if (tree)
{
- tree = gtNewOperNode(GT_COMMA,
- TYP_VOID,
- tree,
- asg);
+ tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
@@ -9382,13 +9445,12 @@ _Done:
// insert conversions and normalize to make tree amenable to register
// FP architectures
-GenTree* Compiler::fgMorphForRegisterFP(GenTree *tree)
+GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
- if (tree->OperIsArithmetic()
- && varTypeIsFloating(tree))
+ if (tree->OperIsArithmetic() && varTypeIsFloating(tree))
{
if (op1->TypeGet() != tree->TypeGet())
{
@@ -9399,12 +9461,10 @@ GenTree* Compiler::fgMorphForRegisterFP(GenTree *tree)
tree->gtOp.gtOp2 = gtNewCastNode(tree->TypeGet(), tree->gtOp.gtOp2, tree->TypeGet());
}
}
- else if (tree->OperIsCompare()
- && varTypeIsFloating(op1)
- && op1->TypeGet() != op2->TypeGet())
+ else if (tree->OperIsCompare() && varTypeIsFloating(op1) && op1->TypeGet() != op2->TypeGet())
{
// both had better be floating, just one bigger than other
- assert (varTypeIsFloating(op2));
+ assert(varTypeIsFloating(op2));
if (op1->TypeGet() == TYP_FLOAT)
{
tree->gtOp.gtOp1 = gtNewCastNode(TYP_DOUBLE, tree->gtOp.gtOp1, TYP_DOUBLE);
@@ -9420,9 +9480,9 @@ GenTree* Compiler::fgMorphForRegisterFP(GenTree *tree)
GenTree* Compiler::fgMorphRecognizeBoxNullable(GenTree* compare)
{
- GenTree* op1 = compare->gtOp.gtOp1;
- GenTree* op2 = compare->gtOp.gtOp2;
- GenTree* opCns;
+ GenTree* op1 = compare->gtOp.gtOp1;
+ GenTree* op2 = compare->gtOp.gtOp2;
+ GenTree* opCns;
GenTreeCall* opCall;
// recognize this pattern:
@@ -9447,12 +9507,12 @@ GenTree* Compiler::fgMorphRecognizeBoxNullable(GenTree* compare)
if (op1->IsCnsIntOrI() && op2->IsHelperCall())
{
- opCns = op1;
+ opCns = op1;
opCall = op2->AsCall();
}
else if (op1->IsHelperCall() && op2->IsCnsIntOrI())
{
- opCns = op2;
+ opCns = op2;
opCall = op1->AsCall();
}
else
@@ -9461,18 +9521,26 @@ GenTree* Compiler::fgMorphRecognizeBoxNullable(GenTree* compare)
}
if (!opCns->IsIntegralConst(0))
+ {
return compare;
+ }
if (eeGetHelperNum(opCall->gtCallMethHnd) != CORINFO_HELP_BOX_NULLABLE)
+ {
return compare;
+ }
// replace the box with an access of the nullable 'hasValue' field which is at the zero offset
GenTree* newOp = gtNewOperNode(GT_IND, TYP_BOOL, opCall->gtCall.gtCallArgs->gtOp.gtOp2->gtOp.gtOp1);
if (opCall == op1)
+ {
compare->gtOp.gtOp1 = newOp;
+ }
else
+ {
compare->gtOp.gtOp2 = newOp;
+ }
return compare;
}
@@ -9488,8 +9556,7 @@ GenTree* Compiler::fgMorphRecognizeBoxNullable(GenTree* compare)
// Return Value:
// A comma node where op1 is the assignment of the simd node to a temp, and op2 is the temp lclVar.
//
-GenTree*
-Compiler::fgCopySIMDNode(GenTreeSIMD* simdNode)
+GenTree* Compiler::fgCopySIMDNode(GenTreeSIMD* simdNode)
{
// Copy the result of the SIMD intrinsic into a temp.
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Copy of SIMD intrinsic with field access"));
@@ -9497,56 +9564,68 @@ Compiler::fgCopySIMDNode(GenTreeSIMD* simdNode)
CORINFO_CLASS_HANDLE simdHandle = NO_CLASS_HANDLE;
// We only have fields of the fixed float vectors.
noway_assert(simdNode->gtSIMDBaseType == TYP_FLOAT);
- switch(simdNode->gtSIMDSize)
+ switch (simdNode->gtSIMDSize)
{
- case 8: simdHandle = SIMDVector2Handle; break;
- case 12: simdHandle = SIMDVector3Handle; break;
- case 16: simdHandle = SIMDVector4Handle; break;
- default: noway_assert(!"field of unexpected SIMD type"); break;
+ case 8:
+ simdHandle = SIMDVector2Handle;
+ break;
+ case 12:
+ simdHandle = SIMDVector3Handle;
+ break;
+ case 16:
+ simdHandle = SIMDVector4Handle;
+ break;
+ default:
+ noway_assert(!"field of unexpected SIMD type");
+ break;
}
assert(simdHandle != NO_CLASS_HANDLE);
lvaSetStruct(lclNum, simdHandle, false, true);
lvaTable[lclNum].lvFieldAccessed = true;
- GenTree* asg = gtNewTempAssign(lclNum, simdNode);
+ GenTree* asg = gtNewTempAssign(lclNum, simdNode);
GenTree* newLclVarNode = new (this, GT_LCL_VAR) GenTreeLclVar(simdNode->TypeGet(), lclNum, BAD_IL_OFFSET);
-
+
GenTree* comma = gtNewOperNode(GT_COMMA, simdNode->TypeGet(), asg, newLclVarNode);
return comma;
}
//--------------------------------------------------------------------------------------------------------------
-// getSIMDStructFromField:
-// Checking whether the field belongs to a simd struct or not. If it is, return the GenTreePtr for
+// getSIMDStructFromField:
+// Checking whether the field belongs to a simd struct or not. If it is, return the GenTreePtr for
// the struct node, also base type, field index and simd size. If it is not, just return nullptr.
-// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
-// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
+// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
+// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
// However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic
// as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node
// if the struct is a SIMD struct.
//
// Arguments:
-// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
+// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
-// pBaseTypeOut - var_types pointer, if the tree node is the tree we want, we set *pBaseTypeOut
+// pBaseTypeOut - var_types pointer, if the tree node is the tree we want, we set *pBaseTypeOut
// to simd lclvar's base type.
-// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
+// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
-// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
-// equals to the simd struct size which this tree belongs to.
-// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
+// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
+// equals to the simd struct size which this tree belongs to.
+// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
// the UsedInSIMDIntrinsic check.
//
// return value:
-// A GenTreePtr which points the simd lclvar tree belongs to. If the tree is not the simd
-// instrinic related field, return nullptr.
+// A GenTreePtr which points the simd lclvar tree belongs to. If the tree is not the simd
+// instrinic related field, return nullptr.
//
-GenTreePtr Compiler::getSIMDStructFromField(GenTreePtr tree, var_types* pBaseTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic/*false*/)
+GenTreePtr Compiler::getSIMDStructFromField(GenTreePtr tree,
+ var_types* pBaseTypeOut,
+ unsigned* indexOut,
+ unsigned* simdSizeOut,
+ bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTreePtr ret = nullptr;
- if(tree->OperGet() == GT_FIELD)
+ if (tree->OperGet() == GT_FIELD)
{
GenTreePtr objRef = tree->gtField.gtFldObj;
if (objRef != nullptr)
@@ -9556,7 +9635,7 @@ GenTreePtr Compiler::getSIMDStructFromField(GenTreePtr tree, var_types* pBaseTyp
{
obj = objRef->gtOp.gtOp1;
}
- else if(ignoreUsedInSIMDIntrinsic)
+ else if (ignoreUsedInSIMDIntrinsic)
{
obj = objRef;
}
@@ -9564,127 +9643,123 @@ GenTreePtr Compiler::getSIMDStructFromField(GenTreePtr tree, var_types* pBaseTyp
{
return nullptr;
}
-
+
if (isSIMDTypeLocal(obj))
{
- unsigned lclNum = obj->gtLclVarCommon.gtLclNum;
- LclVarDsc* varDsc = &lvaTable[lclNum];
- if(varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
+ unsigned lclNum = obj->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
+ if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
- *simdSizeOut = varDsc->lvExactSize;
+ *simdSizeOut = varDsc->lvExactSize;
*pBaseTypeOut = getBaseTypeOfSIMDLocal(obj);
- ret = obj;
+ ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
- ret = obj;
+ ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
- *simdSizeOut = simdNode->gtSIMDSize;
- *pBaseTypeOut = simdNode->gtSIMDBaseType;
+ *simdSizeOut = simdNode->gtSIMDSize;
+ *pBaseTypeOut = simdNode->gtSIMDBaseType;
}
}
}
if (ret != nullptr)
{
unsigned BaseTypeSize = genTypeSize(*pBaseTypeOut);
- *indexOut = tree->gtField.gtFldOffset / BaseTypeSize;
+ *indexOut = tree->gtField.gtFldOffset / BaseTypeSize;
}
return ret;
-}
-
+}
+
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the this
-* operation to to the SIMD intrinsic SIMDIntrinsicGetItem, and return the new tree.
-* Otherwise, return the old tree.
+* operation to to the SIMD intrinsic SIMDIntrinsicGetItem, and return the new tree.
+* Otherwise, return the old tree.
* Argument:
-* tree - GenTreePtr. If this pointer points to simd struct which is used for simd
-* intrinsic. We will morph it as simd intrinsic SIMDIntrinsicGetItem.
+* tree - GenTreePtr. If this pointer points to simd struct which is used for simd
+* intrinsic. We will morph it as simd intrinsic SIMDIntrinsicGetItem.
* Return:
* A GenTreePtr which points to the new tree. If the tree is not for simd intrinsic,
-* return nullptr.
+* return nullptr.
*/
GenTreePtr Compiler::fgMorphFieldToSIMDIntrinsicGet(GenTreePtr tree)
{
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
+ unsigned index = 0;
+ var_types baseType = TYP_UNKNOWN;
+ unsigned simdSize = 0;
GenTreePtr simdStructNode = getSIMDStructFromField(tree, &baseType, &index, &simdSize);
- if(simdStructNode != nullptr)
+ if (simdStructNode != nullptr)
{
-
+
assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
- GenTree* op2 = gtNewIconNode(index);
- tree = gtNewSIMDNode(baseType, simdStructNode, op2, SIMDIntrinsicGetItem, baseType, simdSize);
+ GenTree* op2 = gtNewIconNode(index);
+ tree = gtNewSIMDNode(baseType, simdStructNode, op2, SIMDIntrinsicGetItem, baseType, simdSize);
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
-}
+}
/*****************************************************************************
-* Transform an assignment of a SIMD struct field to SIMD intrinsic
+* Transform an assignment of a SIMD struct field to SIMD intrinsic
* SIMDIntrinsicGetItem, and return a new tree. If If it is not such an assignment,
-* then return the old tree.
+* then return the old tree.
* Argument:
-* tree - GenTreePtr. If this pointer points to simd struct which is used for simd
-* intrinsic. We will morph it as simd intrinsic set.
+* tree - GenTreePtr. If this pointer points to simd struct which is used for simd
+* intrinsic. We will morph it as simd intrinsic set.
* Return:
* A GenTreePtr which points to the new tree. If the tree is not for simd intrinsic,
-* return nullptr.
+* return nullptr.
*/
-GenTreePtr Compiler::fgMorphFieldAssignToSIMDIntrinsicSet(GenTreePtr tree)
+GenTreePtr Compiler::fgMorphFieldAssignToSIMDIntrinsicSet(GenTreePtr tree)
{
assert(tree->OperGet() == GT_ASG);
GenTreePtr op1 = tree->gtGetOp1();
GenTreePtr op2 = tree->gtGetOp2();
-
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
+
+ unsigned index = 0;
+ var_types baseType = TYP_UNKNOWN;
+ unsigned simdSize = 0;
GenTreePtr simdOp1Struct = getSIMDStructFromField(op1, &baseType, &index, &simdSize);
if (simdOp1Struct != nullptr)
{
- //Generate the simd set intrinsic
+ // Generate the simd set intrinsic
assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
-
- SIMDIntrinsicID simdIntrinsicID = SIMDIntrinsicInvalid;
+
+ SIMDIntrinsicID simdIntrinsicID = SIMDIntrinsicInvalid;
switch (index)
{
- case 0:
- simdIntrinsicID = SIMDIntrinsicSetX;
- break;
- case 1:
- simdIntrinsicID = SIMDIntrinsicSetY;
- break;
- case 2:
- simdIntrinsicID = SIMDIntrinsicSetZ;
- break;
- case 3:
- simdIntrinsicID = SIMDIntrinsicSetW;
- break;
- default:
- noway_assert(!"There is no set intrinsic for index bigger than 3");
+ case 0:
+ simdIntrinsicID = SIMDIntrinsicSetX;
+ break;
+ case 1:
+ simdIntrinsicID = SIMDIntrinsicSetY;
+ break;
+ case 2:
+ simdIntrinsicID = SIMDIntrinsicSetZ;
+ break;
+ case 3:
+ simdIntrinsicID = SIMDIntrinsicSetW;
+ break;
+ default:
+ noway_assert(!"There is no set intrinsic for index bigger than 3");
}
-
GenTreePtr newStruct = gtClone(simdOp1Struct);
assert((newStruct != nullptr) && (varTypeIsSIMD(newStruct)));
GenTreePtr simdTree = gtNewSIMDNode(newStruct->gtType, simdOp1Struct, op2, simdIntrinsicID, baseType, simdSize);
GenTreePtr copyBlkDst = gtNewOperNode(GT_ADDR, TYP_BYREF, newStruct);
- tree = gtNewBlkOpNode(GT_COPYBLK,
- copyBlkDst,
- gtNewOperNode(GT_ADDR, TYP_BYREF, simdTree),
- gtNewIconNode(simdSize),
- false);
+ tree = gtNewBlkOpNode(GT_COPYBLK, copyBlkDst, gtNewOperNode(GT_ADDR, TYP_BYREF, simdTree),
+ gtNewIconNode(simdSize), false);
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
-
+
return tree;
}
@@ -9696,2760 +9771,2811 @@ GenTreePtr Compiler::fgMorphFieldAssignToSIMDIntrinsicSet(GenTreePtr tree)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-GenTreePtr Compiler::fgMorphSmpOp(GenTreePtr tree, MorphAddrContext* mac)
+GenTreePtr Compiler::fgMorphSmpOp(GenTreePtr tree, MorphAddrContext* mac)
{
// this extra scope is a workaround for a gcc bug
// the inline destructor for ALLOCA_CHECK confuses the control
// flow and gcc thinks that the function never returns
{
- ALLOCA_CHECK();
- noway_assert(tree->OperKind() & GTK_SMPOP);
+ ALLOCA_CHECK();
+ noway_assert(tree->OperKind() & GTK_SMPOP);
- /* The steps in this function are :
- o Perform required preorder processing
- o Process the first, then second operand, if any
- o Perform required postorder morphing
- o Perform optional postorder morphing if optimizing
- */
+ /* The steps in this function are :
+ o Perform required preorder processing
+ o Process the first, then second operand, if any
+ o Perform required postorder morphing
+ o Perform optional postorder morphing if optimizing
+ */
- bool isQmarkColon = false;
+ bool isQmarkColon = false;
#if LOCAL_ASSERTION_PROP
- AssertionIndex origAssertionCount = DUMMY_INIT(0);
- AssertionDsc * origAssertionTab = DUMMY_INIT(NULL);
+ AssertionIndex origAssertionCount = DUMMY_INIT(0);
+ AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
- AssertionIndex thenAssertionCount = DUMMY_INIT(0);
- AssertionDsc * thenAssertionTab = DUMMY_INIT(NULL);
+ AssertionIndex thenAssertionCount = DUMMY_INIT(0);
+ AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
#endif
- if (fgGlobalMorph)
- {
+ if (fgGlobalMorph)
+ {
#if !FEATURE_STACK_FP_X87
- tree = fgMorphForRegisterFP(tree);
+ tree = fgMorphForRegisterFP(tree);
#endif
- }
+ }
- genTreeOps oper = tree->OperGet();
- var_types typ = tree->TypeGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ genTreeOps oper = tree->OperGet();
+ var_types typ = tree->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
- /*-------------------------------------------------------------------------
- * First do any PRE-ORDER processing
- */
+ /*-------------------------------------------------------------------------
+ * First do any PRE-ORDER processing
+ */
- switch (oper)
- {
- // Some arithmetic operators need to use a helper call to the EE
- int helper;
+ switch (oper)
+ {
+ // Some arithmetic operators need to use a helper call to the EE
+ int helper;
- case GT_ASG:
- tree = fgDoNormalizeOnStore(tree);
- /* fgDoNormalizeOnStore can change op2 */
- noway_assert(op1 == tree->gtOp.gtOp1);
- op2 = tree->gtOp.gtOp2;
+ case GT_ASG:
+ tree = fgDoNormalizeOnStore(tree);
+ /* fgDoNormalizeOnStore can change op2 */
+ noway_assert(op1 == tree->gtOp.gtOp1);
+ op2 = tree->gtOp.gtOp2;
#ifdef FEATURE_SIMD
- {
- // We should check whether op2 should be assigned to a SIMD field or not.
- // if it is, we should tranlate the tree to simd intrinsic
- GenTreePtr newTree = fgMorphFieldAssignToSIMDIntrinsicSet(tree);
- if (newTree != tree)
- {
- tree = newTree;
- oper = tree->OperGet();
- typ = tree->TypeGet();
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2();
- }
- }
+ {
+ // We should check whether op2 should be assigned to a SIMD field or not.
+ // if it is, we should tranlate the tree to simd intrinsic
+ GenTreePtr newTree = fgMorphFieldAssignToSIMDIntrinsicSet(tree);
+ if (newTree != tree)
+ {
+ tree = newTree;
+ oper = tree->OperGet();
+ typ = tree->TypeGet();
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2();
+ }
+ }
#endif
- __fallthrough;
-
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_MOD:
- case GT_ASG_UDIV:
- case GT_ASG_UMOD:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
- case GT_CHS:
-
- /* We can't CSE the LHS of an assignment. Only r-values can be CSEed */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ __fallthrough;
+
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ case GT_CHS:
+
+ /* We can't CSE the LHS of an assignment. Only r-values can be CSEed */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_ADDR:
+ case GT_ADDR:
- /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_QMARK:
- case GT_JTRUE:
+ case GT_QMARK:
+ case GT_JTRUE:
- noway_assert(op1);
+ noway_assert(op1);
- if (op1->OperKind() & GTK_RELOP)
- {
- noway_assert((oper == GT_JTRUE) || (op1->gtFlags & GTF_RELOP_QMARK));
- /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
- not need to materialize the result as a 0 or 1. */
+ if (op1->OperKind() & GTK_RELOP)
+ {
+ noway_assert((oper == GT_JTRUE) || (op1->gtFlags & GTF_RELOP_QMARK));
+ /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
+ not need to materialize the result as a 0 or 1. */
- /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
- op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
+ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
+ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
- // Request that the codegen for op1 sets the condition flags
- // when it generates the code for op1.
- //
- // Codegen for op1 must set the condition flags if
- // this method returns true.
- //
- op1->gtRequestSetFlags();
- }
- else
- {
- GenTreePtr effOp1 = op1->gtEffectiveVal();
- noway_assert((effOp1->gtOper == GT_CNS_INT) &&
- (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
- }
- break;
+ // Request that the codegen for op1 sets the condition flags
+ // when it generates the code for op1.
+ //
+ // Codegen for op1 must set the condition flags if
+ // this method returns true.
+ //
+ op1->gtRequestSetFlags();
+ }
+ else
+ {
+ GenTreePtr effOp1 = op1->gtEffectiveVal();
+ noway_assert((effOp1->gtOper == GT_CNS_INT) &&
+ (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
+ }
+ break;
- case GT_COLON:
+ case GT_COLON:
#if LOCAL_ASSERTION_PROP
- if (optLocalAssertionProp)
+ if (optLocalAssertionProp)
+ {
#endif
- isQmarkColon = true;
- break;
+ isQmarkColon = true;
+ }
+ break;
- case GT_INDEX:
- return fgMorphArrayIndex(tree);
+ case GT_INDEX:
+ return fgMorphArrayIndex(tree);
- case GT_CAST:
- return fgMorphCast(tree);
+ case GT_CAST:
+ return fgMorphCast(tree);
- case GT_MUL:
+ case GT_MUL:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- /* For (long)int1 * (long)int2, we dont actually do the
- casts, and just multiply the 32 bit values, which will
- give us the 64 bit result in edx:eax */
-
- noway_assert(op2);
- if ((op1->gtOper == GT_CAST &&
- op2->gtOper == GT_CAST &&
- genActualType(op1->CastFromType()) == TYP_INT &&
- genActualType(op2->CastFromType()) == TYP_INT)&&
- !op1->gtOverflow() && !op2->gtOverflow())
- {
- // The casts have to be of the same signedness.
- if ((op1->gtFlags & GTF_UNSIGNED) != (op2->gtFlags & GTF_UNSIGNED))
- {
- //We see if we can force an int constant to change its signedness
- GenTreePtr constOp;
- if (op1->gtCast.CastOp()->gtOper == GT_CNS_INT)
- constOp = op1;
- else if (op2->gtCast.CastOp()->gtOper == GT_CNS_INT)
- constOp = op2;
- else
- goto NO_MUL_64RSLT;
+ if (typ == TYP_LONG)
+ {
+ /* For (long)int1 * (long)int2, we dont actually do the
+ casts, and just multiply the 32 bit values, which will
+ give us the 64 bit result in edx:eax */
+
+ noway_assert(op2);
+ if ((op1->gtOper == GT_CAST && op2->gtOper == GT_CAST &&
+ genActualType(op1->CastFromType()) == TYP_INT &&
+ genActualType(op2->CastFromType()) == TYP_INT) &&
+ !op1->gtOverflow() && !op2->gtOverflow())
+ {
+ // The casts have to be of the same signedness.
+ if ((op1->gtFlags & GTF_UNSIGNED) != (op2->gtFlags & GTF_UNSIGNED))
+ {
+ // We see if we can force an int constant to change its signedness
+ GenTreePtr constOp;
+ if (op1->gtCast.CastOp()->gtOper == GT_CNS_INT)
+ constOp = op1;
+ else if (op2->gtCast.CastOp()->gtOper == GT_CNS_INT)
+ constOp = op2;
+ else
+ goto NO_MUL_64RSLT;
- if ( ((unsigned)(constOp->gtCast.CastOp()->gtIntCon.gtIconVal) < (unsigned)(0x80000000)) )
- constOp->gtFlags ^= GTF_UNSIGNED;
- else
- goto NO_MUL_64RSLT;
- }
+ if (((unsigned)(constOp->gtCast.CastOp()->gtIntCon.gtIconVal) < (unsigned)(0x80000000)))
+ constOp->gtFlags ^= GTF_UNSIGNED;
+ else
+ goto NO_MUL_64RSLT;
+ }
- // The only combination that can overflow
- if (tree->gtOverflow() && (tree->gtFlags & GTF_UNSIGNED) &&
- !( op1->gtFlags & GTF_UNSIGNED))
- goto NO_MUL_64RSLT;
+ // The only combination that can overflow
+ if (tree->gtOverflow() && (tree->gtFlags & GTF_UNSIGNED) && !(op1->gtFlags & GTF_UNSIGNED))
+ goto NO_MUL_64RSLT;
- /* Remaining combinations can never overflow during long mul. */
+ /* Remaining combinations can never overflow during long mul. */
- tree->gtFlags &= ~GTF_OVERFLOW;
+ tree->gtFlags &= ~GTF_OVERFLOW;
- /* Do unsigned mul only if the casts were unsigned */
+ /* Do unsigned mul only if the casts were unsigned */
- tree->gtFlags &= ~GTF_UNSIGNED;
- tree->gtFlags |= op1->gtFlags & GTF_UNSIGNED;
+ tree->gtFlags &= ~GTF_UNSIGNED;
+ tree->gtFlags |= op1->gtFlags & GTF_UNSIGNED;
- /* Since we are committing to GTF_MUL_64RSLT, we don't want
- the casts to be folded away. So morph the castees directly */
+ /* Since we are committing to GTF_MUL_64RSLT, we don't want
+ the casts to be folded away. So morph the castees directly */
- op1->gtOp.gtOp1 = fgMorphTree(op1->gtOp.gtOp1);
- op2->gtOp.gtOp1 = fgMorphTree(op2->gtOp.gtOp1);
+ op1->gtOp.gtOp1 = fgMorphTree(op1->gtOp.gtOp1);
+ op2->gtOp.gtOp1 = fgMorphTree(op2->gtOp.gtOp1);
- // Propagate side effect flags up the tree
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ // Propagate side effect flags up the tree
+ op1->gtFlags &= ~GTF_ALL_EFFECT;
+ op1->gtFlags |= (op1->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ op2->gtFlags &= ~GTF_ALL_EFFECT;
+ op2->gtFlags |= (op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- // If the GT_MUL can be altogether folded away, we should do that.
+ // If the GT_MUL can be altogether folded away, we should do that.
- if ((op1->gtCast.CastOp()->OperKind() &
- op2->gtCast.CastOp()->OperKind() & GTK_CONST) && opts.OptEnabled(CLFLG_CONSTANTFOLD))
- {
- tree->gtOp.gtOp1 = op1 = gtFoldExprConst(op1);
- tree->gtOp.gtOp2 = op2 = gtFoldExprConst(op2);
- noway_assert(op1->OperKind() & op2->OperKind() & GTK_CONST);
- tree = gtFoldExprConst(tree);
- noway_assert(tree->OperIsConst());
- return tree;
- }
+ if ((op1->gtCast.CastOp()->OperKind() & op2->gtCast.CastOp()->OperKind() & GTK_CONST) &&
+ opts.OptEnabled(CLFLG_CONSTANTFOLD))
+ {
+ tree->gtOp.gtOp1 = op1 = gtFoldExprConst(op1);
+ tree->gtOp.gtOp2 = op2 = gtFoldExprConst(op2);
+ noway_assert(op1->OperKind() & op2->OperKind() & GTK_CONST);
+ tree = gtFoldExprConst(tree);
+ noway_assert(tree->OperIsConst());
+ return tree;
+ }
- tree->gtFlags |= GTF_MUL_64RSLT;
+ tree->gtFlags |= GTF_MUL_64RSLT;
- // If op1 and op2 are unsigned casts, we need to do an unsigned mult
- tree->gtFlags |= (op1->gtFlags & GTF_UNSIGNED);
+ // If op1 and op2 are unsigned casts, we need to do an unsigned mult
+ tree->gtFlags |= (op1->gtFlags & GTF_UNSIGNED);
- // Insert GT_NOP nodes for the cast operands so that they do not get folded
- // And propagate the new flags. We don't want to CSE the casts because
- // codegen expects GTF_MUL_64RSLT muls to have a certain layout.
+ // Insert GT_NOP nodes for the cast operands so that they do not get folded
+ // And propagate the new flags. We don't want to CSE the casts because
+ // codegen expects GTF_MUL_64RSLT muls to have a certain layout.
- if (op1->gtCast.CastOp()->OperGet() != GT_NOP)
- {
- op1->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op1->gtCast.CastOp());
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
- op1->gtFlags |= GTF_DONT_CSE;
- }
+ if (op1->gtCast.CastOp()->OperGet() != GT_NOP)
+ {
+ op1->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op1->gtCast.CastOp());
+ op1->gtFlags &= ~GTF_ALL_EFFECT;
+ op1->gtFlags |= (op1->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
+ op1->gtFlags |= GTF_DONT_CSE;
+ }
- if (op2->gtCast.CastOp()->OperGet() != GT_NOP)
- {
- op2->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op2->gtCast.CastOp());
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
- op2->gtFlags |= GTF_DONT_CSE;
- }
+ if (op2->gtCast.CastOp()->OperGet() != GT_NOP)
+ {
+ op2->gtOp.gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op2->gtCast.CastOp());
+ op2->gtFlags &= ~GTF_ALL_EFFECT;
+ op2->gtFlags |= (op2->gtCast.CastOp()->gtFlags & GTF_ALL_EFFECT);
+ op2->gtFlags |= GTF_DONT_CSE;
+ }
- tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= ((op1->gtFlags | op2->gtFlags) & GTF_ALL_EFFECT);
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
+ tree->gtFlags |= ((op1->gtFlags | op2->gtFlags) & GTF_ALL_EFFECT);
- goto DONE_MORPHING_CHILDREN;
- }
- else if ((tree->gtFlags & GTF_MUL_64RSLT) == 0)
- {
-NO_MUL_64RSLT:
- if (tree->gtOverflow())
- helper = (tree->gtFlags & GTF_UNSIGNED) ? CORINFO_HELP_ULMUL_OVF
- : CORINFO_HELP_LMUL_OVF;
- else
- helper = CORINFO_HELP_LMUL;
+ goto DONE_MORPHING_CHILDREN;
+ }
+ else if ((tree->gtFlags & GTF_MUL_64RSLT) == 0)
+ {
+ NO_MUL_64RSLT:
+ if (tree->gtOverflow())
+ helper = (tree->gtFlags & GTF_UNSIGNED) ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
+ else
+ helper = CORINFO_HELP_LMUL;
- goto USE_HELPER_FOR_ARITH;
- }
- else
- {
- /* We are seeing this node again. We have decided to use
- GTF_MUL_64RSLT, so leave it alone. */
+ goto USE_HELPER_FOR_ARITH;
+ }
+ else
+ {
+ /* We are seeing this node again. We have decided to use
+ GTF_MUL_64RSLT, so leave it alone. */
- assert(tree->gtIsValid64RsltMul());
- }
- }
+ assert(tree->gtIsValid64RsltMul());
+ }
+ }
#endif // !_TARGET_64BIT_
- break;
-
+ break;
- case GT_DIV:
+ case GT_DIV:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- helper = CORINFO_HELP_LDIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_LONG)
+ {
+ helper = CORINFO_HELP_LDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
-#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT && !fgIsSignedDivOptimizable(op2))
- {
- helper = CORINFO_HELP_DIV;
- goto USE_HELPER_FOR_ARITH;
- }
+#if USE_HELPERS_FOR_INT_DIV
+ if (typ == TYP_INT && !fgIsSignedDivOptimizable(op2))
+ {
+ helper = CORINFO_HELP_DIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#endif
#endif // !_TARGET_64BIT_
#ifndef LEGACY_BACKEND
- if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
- {
- op2 = gtFoldExprConst(op2);
- }
-
- if (fgShouldUseMagicNumberDivide(tree->AsOp()))
- {
- tree = fgMorphDivByConst(tree->AsOp());
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
- }
-#endif // !LEGACY_BACKEND
- break;
+ if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
+ {
+ op2 = gtFoldExprConst(op2);
+ }
+ if (fgShouldUseMagicNumberDivide(tree->AsOp()))
+ {
+ tree = fgMorphDivByConst(tree->AsOp());
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
+ }
+#endif // !LEGACY_BACKEND
+ break;
- case GT_UDIV:
+ case GT_UDIV:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- helper = CORINFO_HELP_ULDIV;
- goto USE_HELPER_FOR_ARITH;
- }
-#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT && !fgIsUnsignedDivOptimizable(op2))
- {
- helper = CORINFO_HELP_UDIV;
- goto USE_HELPER_FOR_ARITH;
- }
+ if (typ == TYP_LONG)
+ {
+ helper = CORINFO_HELP_ULDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
+#if USE_HELPERS_FOR_INT_DIV
+ if (typ == TYP_INT && !fgIsUnsignedDivOptimizable(op2))
+ {
+ helper = CORINFO_HELP_UDIV;
+ goto USE_HELPER_FOR_ARITH;
+ }
#endif
#endif // _TARGET_64BIT_
- break;
-
+ break;
- case GT_MOD:
+ case GT_MOD:
- if (varTypeIsFloating(typ))
- {
- helper = CORINFO_HELP_DBLREM;
- noway_assert(op2);
- if (op1->TypeGet() == TYP_FLOAT)
- if (op2->TypeGet() == TYP_FLOAT)
- helper = CORINFO_HELP_FLTREM;
- else
- tree->gtOp.gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
- else
- if (op2->TypeGet() == TYP_FLOAT)
- tree->gtOp.gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
- goto USE_HELPER_FOR_ARITH;
- }
+ if (varTypeIsFloating(typ))
+ {
+ helper = CORINFO_HELP_DBLREM;
+ noway_assert(op2);
+ if (op1->TypeGet() == TYP_FLOAT)
+ {
+ if (op2->TypeGet() == TYP_FLOAT)
+ {
+ helper = CORINFO_HELP_FLTREM;
+ }
+ else
+ {
+ tree->gtOp.gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
+ }
+ }
+ else if (op2->TypeGet() == TYP_FLOAT)
+ {
+ tree->gtOp.gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ }
+ goto USE_HELPER_FOR_ARITH;
+ }
- // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
- // A similar optimization for signed mod will not work for a negative perfectly divisible
- // HI-word. To make it correct, we would need to divide without the sign and then flip the
- // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
- goto ASSIGN_HELPER_FOR_MOD;
+ // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
+ // A similar optimization for signed mod will not work for a negative perfectly divisible
+ // HI-word. To make it correct, we would need to divide without the sign and then flip the
+ // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
+ goto ASSIGN_HELPER_FOR_MOD;
- case GT_UMOD:
+ case GT_UMOD:
#ifdef _TARGET_ARMARCH_
- //
- // Note for _TARGET_ARMARCH_ we don't have a remainder instruction, so we don't do this optimization
- //
-#else // _TARGET_XARCH
- /* If this is an unsigned long mod with op2 which is a cast to long from a
- constant int, then don't morph to a call to the helper. This can be done
- faster inline using idiv.
- */
+//
+// Note for _TARGET_ARMARCH_ we don't have a remainder instruction, so we don't do this optimization
+//
+#else // _TARGET_XARCH
+ /* If this is an unsigned long mod with op2 which is a cast to long from a
+ constant int, then don't morph to a call to the helper. This can be done
+ faster inline using idiv.
+ */
- noway_assert(op2);
- if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD) &&
- ((tree->gtFlags & GTF_UNSIGNED) == (op1->gtFlags & GTF_UNSIGNED)) &&
- ((tree->gtFlags & GTF_UNSIGNED) == (op2->gtFlags & GTF_UNSIGNED)))
- {
- if (op2->gtOper == GT_CAST &&
- op2->gtCast.CastOp()->gtOper == GT_CNS_INT &&
- op2->gtCast.CastOp()->gtIntCon.gtIconVal >= 2 &&
- op2->gtCast.CastOp()->gtIntCon.gtIconVal <= 0x3fffffff &&
- (tree->gtFlags & GTF_UNSIGNED) == (op2->gtCast.CastOp()->gtFlags & GTF_UNSIGNED))
- {
- tree->gtOp.gtOp2 = op2 = fgMorphCast(op2);
- noway_assert(op2->gtOper == GT_CNS_NATIVELONG);
- }
+ noway_assert(op2);
+ if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD) &&
+ ((tree->gtFlags & GTF_UNSIGNED) == (op1->gtFlags & GTF_UNSIGNED)) &&
+ ((tree->gtFlags & GTF_UNSIGNED) == (op2->gtFlags & GTF_UNSIGNED)))
+ {
+ if (op2->gtOper == GT_CAST && op2->gtCast.CastOp()->gtOper == GT_CNS_INT &&
+ op2->gtCast.CastOp()->gtIntCon.gtIconVal >= 2 &&
+ op2->gtCast.CastOp()->gtIntCon.gtIconVal <= 0x3fffffff &&
+ (tree->gtFlags & GTF_UNSIGNED) == (op2->gtCast.CastOp()->gtFlags & GTF_UNSIGNED))
+ {
+ tree->gtOp.gtOp2 = op2 = fgMorphCast(op2);
+ noway_assert(op2->gtOper == GT_CNS_NATIVELONG);
+ }
- if (op2->gtOper == GT_CNS_NATIVELONG &&
- op2->gtIntConCommon.LngValue() >= 2 &&
- op2->gtIntConCommon.LngValue() <= 0x3fffffff)
- {
- tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
- noway_assert(op1->TypeGet() == TYP_LONG);
+ if (op2->gtOper == GT_CNS_NATIVELONG && op2->gtIntConCommon.LngValue() >= 2 &&
+ op2->gtIntConCommon.LngValue() <= 0x3fffffff)
+ {
+ tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
+ noway_assert(op1->TypeGet() == TYP_LONG);
- // Update flags for op1 morph
- tree->gtFlags &= ~GTF_ALL_EFFECT;
+ // Update flags for op1 morph
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // Only update with op1 as op2 is a constant
+ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // Only update with op1 as op2 is a constant
- // If op1 is a constant, then do constant folding of the division operator
- if (op1->gtOper == GT_CNS_NATIVELONG)
- {
- tree = gtFoldExpr(tree);
+ // If op1 is a constant, then do constant folding of the division operator
+ if (op1->gtOper == GT_CNS_NATIVELONG)
+ {
+ tree = gtFoldExpr(tree);
+ }
+ return tree;
}
- return tree;
}
- }
#endif // _TARGET_XARCH
- ASSIGN_HELPER_FOR_MOD:
+ ASSIGN_HELPER_FOR_MOD:
- // For "val % 1", return 0 if op1 doesn't have any side effects
- // and we are not in the CSE phase, we cannot discard 'tree'
- // because it may contain CSE expressions that we haven't yet examined.
- //
- if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
- {
- if (op2->IsIntegralConst(1))
- {
- GenTreePtr zeroNode = gtNewZeroConNode(typ);
+ // For "val % 1", return 0 if op1 doesn't have any side effects
+ // and we are not in the CSE phase, we cannot discard 'tree'
+ // because it may contain CSE expressions that we haven't yet examined.
+ //
+ if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
+ {
+ if (op2->IsIntegralConst(1))
+ {
+ GenTreePtr zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
- zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- DEBUG_DESTROY_NODE(tree);
- return zeroNode;
- }
- }
+ DEBUG_DESTROY_NODE(tree);
+ return zeroNode;
+ }
+ }
-#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
- goto USE_HELPER_FOR_ARITH;
- }
+#ifndef _TARGET_64BIT_
+ if (typ == TYP_LONG)
+ {
+ helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
+ goto USE_HELPER_FOR_ARITH;
+ }
-#if USE_HELPERS_FOR_INT_DIV
- if (typ == TYP_INT)
- {
- if (oper == GT_UMOD && !fgIsUnsignedModOptimizable(op2))
- {
- helper = CORINFO_HELP_UMOD;
- goto USE_HELPER_FOR_ARITH;
- }
- else if (oper == GT_MOD && !fgIsSignedModOptimizable(op2))
- {
- helper = CORINFO_HELP_MOD;
- goto USE_HELPER_FOR_ARITH;
- }
- }
+#if USE_HELPERS_FOR_INT_DIV
+ if (typ == TYP_INT)
+ {
+ if (oper == GT_UMOD && !fgIsUnsignedModOptimizable(op2))
+ {
+ helper = CORINFO_HELP_UMOD;
+ goto USE_HELPER_FOR_ARITH;
+ }
+ else if (oper == GT_MOD && !fgIsSignedModOptimizable(op2))
+ {
+ helper = CORINFO_HELP_MOD;
+ goto USE_HELPER_FOR_ARITH;
+ }
+ }
#endif
#endif // !_TARGET_64BIT_
#ifndef LEGACY_BACKEND
- if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
- {
- op2 = gtFoldExprConst(op2);
- }
+ if (op2->gtOper == GT_CAST && op2->gtOp.gtOp1->IsCnsIntOrI())
+ {
+ op2 = gtFoldExprConst(op2);
+ }
#ifdef _TARGET_ARM64_
- // For ARM64 we don't have a remainder instruction,
- // The architecture manual suggests the following transformation to
- // generate code for such operator:
- //
- // a % b = a - (a / b) * b;
- //
- tree = fgMorphModToSubMulDiv(tree->AsOp());
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
+ // For ARM64 we don't have a remainder instruction,
+ // The architecture manual suggests the following transformation to
+ // generate code for such operator:
+ //
+ // a % b = a - (a / b) * b;
+ //
+ tree = fgMorphModToSubMulDiv(tree->AsOp());
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
-#else // !_TARGET_ARM64_
+#else // !_TARGET_ARM64_
- if (oper != GT_UMOD && fgShouldUseMagicNumberDivide(tree->AsOp()))
- {
- tree = fgMorphModByConst(tree->AsOp());
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
- }
+ if (oper != GT_UMOD && fgShouldUseMagicNumberDivide(tree->AsOp()))
+ {
+ tree = fgMorphModByConst(tree->AsOp());
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
+ }
#endif //_TARGET_ARM64_
#endif // !LEGACY_BACKEND
- break;
+ break;
- USE_HELPER_FOR_ARITH:
- {
- /* We have to morph these arithmetic operations into helper calls
- before morphing the arguments (preorder), else the arguments
- won't get correct values of fgPtrArgCntCur.
- However, try to fold the tree first in case we end up with a
- simple node which won't need a helper call at all */
+ USE_HELPER_FOR_ARITH:
+ {
+ /* We have to morph these arithmetic operations into helper calls
+ before morphing the arguments (preorder), else the arguments
+ won't get correct values of fgPtrArgCntCur.
+ However, try to fold the tree first in case we end up with a
+ simple node which won't need a helper call at all */
- noway_assert(tree->OperIsBinary());
+ noway_assert(tree->OperIsBinary());
- GenTreePtr oldTree = tree;
+ GenTreePtr oldTree = tree;
- tree = gtFoldExpr(tree);
+ tree = gtFoldExpr(tree);
- // Were we able to fold it ?
- // Note that gtFoldExpr may return a non-leaf even if successful
- // e.g. for something like "expr / 1" - see also bug #290853
- if (tree->OperIsLeaf() || (oldTree != tree))
+ // Were we able to fold it ?
+ // Note that gtFoldExpr may return a non-leaf even if successful
+ // e.g. for something like "expr / 1" - see also bug #290853
+ if (tree->OperIsLeaf() || (oldTree != tree))
- {
- return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
- }
+ {
+ return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
+ }
- // Did we fold it into a comma node with throw?
- if (tree->gtOper == GT_COMMA)
- {
- noway_assert(fgIsCommaThrow(tree));
- return fgMorphTree(tree);
+ // Did we fold it into a comma node with throw?
+ if (tree->gtOper == GT_COMMA)
+ {
+ noway_assert(fgIsCommaThrow(tree));
+ return fgMorphTree(tree);
+ }
}
- }
- return fgMorphIntoHelperCall(tree, helper, gtNewArgList(op1, op2));
+ return fgMorphIntoHelperCall(tree, helper, gtNewArgList(op1, op2));
- case GT_RETURN:
- // normalize small integer return values
- if (fgGlobalMorph && varTypeIsSmall(info.compRetType) &&
- (op1 != NULL) && (op1->TypeGet() != TYP_VOID) &&
- fgCastNeeded(op1, info.compRetType))
- {
- // Small-typed return values are normalized by the callee
- op1 = gtNewCastNode(TYP_INT, op1, info.compRetType);
+ case GT_RETURN:
+ // normalize small integer return values
+ if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) &&
+ (op1->TypeGet() != TYP_VOID) && fgCastNeeded(op1, info.compRetType))
+ {
+ // Small-typed return values are normalized by the callee
+ op1 = gtNewCastNode(TYP_INT, op1, info.compRetType);
- // Propagate GTF_COLON_COND
- op1->gtFlags|=(tree->gtFlags & GTF_COLON_COND);
+ // Propagate GTF_COLON_COND
+ op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
- tree->gtOp.gtOp1 = fgMorphCast(op1);
+ tree->gtOp.gtOp1 = fgMorphCast(op1);
- // Propagate side effect flags
- tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
+ // Propagate side effect flags
+ tree->gtFlags &= ~GTF_ALL_EFFECT;
+ tree->gtFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT);
- return tree;
- }
- break;
+ return tree;
+ }
+ break;
- case GT_EQ:
- case GT_NE:
+ case GT_EQ:
+ case GT_NE:
- // Check for typeof(...) == obj.GetType()
- // Also check for typeof(...) == typeof(...)
- // IMPORTANT NOTE: this optimization relies on a one-to-one mapping between
- // type handles and instances of System.Type
- // If this invariant is ever broken, the optimization will need updating
- CLANG_FORMAT_COMMENT_ANCHOR;
+ // Check for typeof(...) == obj.GetType()
+ // Also check for typeof(...) == typeof(...)
+ // IMPORTANT NOTE: this optimization relies on a one-to-one mapping between
+ // type handles and instances of System.Type
+ // If this invariant is ever broken, the optimization will need updating
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef LEGACY_BACKEND
- if ( op1->gtOper == GT_CALL &&
- op2->gtOper == GT_CALL &&
- ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) || (op1->gtCall.gtCallType == CT_HELPER)) &&
- ((op2->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) || (op2->gtCall.gtCallType == CT_HELPER)))
+ if (op1->gtOper == GT_CALL && op2->gtOper == GT_CALL &&
+ ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) ||
+ (op1->gtCall.gtCallType == CT_HELPER)) &&
+ ((op2->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) ||
+ (op2->gtCall.gtCallType == CT_HELPER)))
#else
- if ((((op1->gtOper == GT_INTRINSIC) && (op1->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
- ((op1->gtOper == GT_CALL) && (op1->gtCall.gtCallType == CT_HELPER))) &&
- (((op2->gtOper == GT_INTRINSIC) && (op2->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
- ((op2->gtOper == GT_CALL) && (op2->gtCall.gtCallType == CT_HELPER))))
+ if ((((op1->gtOper == GT_INTRINSIC) &&
+ (op1->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
+ ((op1->gtOper == GT_CALL) && (op1->gtCall.gtCallType == CT_HELPER))) &&
+ (((op2->gtOper == GT_INTRINSIC) &&
+ (op2->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType)) ||
+ ((op2->gtOper == GT_CALL) && (op2->gtCall.gtCallType == CT_HELPER))))
#endif
- {
- GenTreePtr pGetClassFromHandle;
- GenTreePtr pGetType;
+ {
+ GenTreePtr pGetClassFromHandle;
+ GenTreePtr pGetType;
#ifdef LEGACY_BACKEND
- bool bOp1ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op1);
- bool bOp2ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op2);
+ bool bOp1ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op1);
+ bool bOp2ClassFromHandle = gtIsTypeHandleToRuntimeTypeHelper(op2);
#else
- bool bOp1ClassFromHandle = op1->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op1) : false;
- bool bOp2ClassFromHandle = op2->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op2) : false;
+ bool bOp1ClassFromHandle = op1->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op1) : false;
+ bool bOp2ClassFromHandle = op2->gtOper == GT_CALL ? gtIsTypeHandleToRuntimeTypeHelper(op2) : false;
#endif
- // Optimize typeof(...) == typeof(...)
- // Typically this occurs in generic code that attempts a type switch
- // e.g. typeof(T) == typeof(int)
+ // Optimize typeof(...) == typeof(...)
+ // Typically this occurs in generic code that attempts a type switch
+ // e.g. typeof(T) == typeof(int)
- if (bOp1ClassFromHandle && bOp2ClassFromHandle)
- {
- GenTreePtr classFromHandleArg1 = tree->gtOp.gtOp1->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr classFromHandleArg2 = tree->gtOp.gtOp2->gtCall.gtCallArgs->gtOp.gtOp1;
+ if (bOp1ClassFromHandle && bOp2ClassFromHandle)
+ {
+ GenTreePtr classFromHandleArg1 = tree->gtOp.gtOp1->gtCall.gtCallArgs->gtOp.gtOp1;
+ GenTreePtr classFromHandleArg2 = tree->gtOp.gtOp2->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr compare = gtNewOperNode(oper, TYP_INT,
- classFromHandleArg1,
- classFromHandleArg2);
+ GenTreePtr compare = gtNewOperNode(oper, TYP_INT, classFromHandleArg1, classFromHandleArg2);
- compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- // Morph and return
- return fgMorphTree(compare);
- }
- else if (bOp1ClassFromHandle || bOp2ClassFromHandle)
- {
- //
- // Now check for GetClassFromHandle(handle) == obj.GetType()
- //
+ // Morph and return
+ return fgMorphTree(compare);
+ }
+ else if (bOp1ClassFromHandle || bOp2ClassFromHandle)
+ {
+ //
+ // Now check for GetClassFromHandle(handle) == obj.GetType()
+ //
- if (bOp1ClassFromHandle)
- {
- pGetClassFromHandle = tree->gtOp.gtOp1;
- pGetType = op2;
- }
- else
- {
- pGetClassFromHandle = tree->gtOp.gtOp2;
- pGetType = op1;
- }
+ if (bOp1ClassFromHandle)
+ {
+ pGetClassFromHandle = tree->gtOp.gtOp1;
+ pGetType = op2;
+ }
+ else
+ {
+ pGetClassFromHandle = tree->gtOp.gtOp2;
+ pGetType = op1;
+ }
- GenTreePtr pGetClassFromHandleArgument = pGetClassFromHandle->gtCall.gtCallArgs->gtOp.gtOp1;
- GenTreePtr pConstLiteral = pGetClassFromHandleArgument;
+ GenTreePtr pGetClassFromHandleArgument = pGetClassFromHandle->gtCall.gtCallArgs->gtOp.gtOp1;
+ GenTreePtr pConstLiteral = pGetClassFromHandleArgument;
- // Unwrap GT_NOP node used to prevent constant folding
- if (pConstLiteral->gtOper == GT_NOP && pConstLiteral->gtType == TYP_I_IMPL)
- {
- pConstLiteral = pConstLiteral->gtOp.gtOp1;
- }
+ // Unwrap GT_NOP node used to prevent constant folding
+ if (pConstLiteral->gtOper == GT_NOP && pConstLiteral->gtType == TYP_I_IMPL)
+ {
+ pConstLiteral = pConstLiteral->gtOp.gtOp1;
+ }
- // In the ngen case, we have to go thru an indirection to get the right handle.
- if (pConstLiteral->gtOper == GT_IND)
- {
- pConstLiteral = pConstLiteral->gtOp.gtOp1;
- }
+ // In the ngen case, we have to go thru an indirection to get the right handle.
+ if (pConstLiteral->gtOper == GT_IND)
+ {
+ pConstLiteral = pConstLiteral->gtOp.gtOp1;
+ }
#ifdef LEGACY_BACKEND
- if (pGetType->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC &&
- info.compCompHnd->getIntrinsicID(pGetType->gtCall.gtCallMethHnd) == CORINFO_INTRINSIC_Object_GetType &&
+ if (pGetType->gtCall.gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC &&
+ info.compCompHnd->getIntrinsicID(pGetType->gtCall.gtCallMethHnd) ==
+ CORINFO_INTRINSIC_Object_GetType &&
#else
- if ((pGetType->gtOper == GT_INTRINSIC) && (pGetType->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType) &&
+ if ((pGetType->gtOper == GT_INTRINSIC) &&
+ (pGetType->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Object_GetType) &&
#endif
- pConstLiteral->gtOper == GT_CNS_INT &&
- pConstLiteral->gtType == TYP_I_IMPL)
- {
- CORINFO_CLASS_HANDLE clsHnd = CORINFO_CLASS_HANDLE(pConstLiteral->gtIntCon.gtCompileTimeHandle);
+ pConstLiteral->gtOper == GT_CNS_INT && pConstLiteral->gtType == TYP_I_IMPL)
+ {
+ CORINFO_CLASS_HANDLE clsHnd =
+ CORINFO_CLASS_HANDLE(pConstLiteral->gtIntCon.gtCompileTimeHandle);
- if (info.compCompHnd->canInlineTypeCheckWithObjectVTable(clsHnd))
- {
- // Method Table tree
- CLANG_FORMAT_COMMENT_ANCHOR;
+ if (info.compCompHnd->canInlineTypeCheckWithObjectVTable(clsHnd))
+ {
+ // Method Table tree
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef LEGACY_BACKEND
- GenTreePtr objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtCall.gtCallObjp);
+ GenTreePtr objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtCall.gtCallObjp);
#else
- GenTreePtr objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtUnOp.gtOp1);
+ GenTreePtr objMT = gtNewOperNode(GT_IND, TYP_I_IMPL, pGetType->gtUnOp.gtOp1);
#endif
- objMT->gtFlags |= GTF_EXCEPT; // Null ref exception if object is null
- compCurBB->bbFlags |= BBF_HAS_VTABREF;
- optMethodFlags |= OMF_HAS_VTABLEREF;
+ objMT->gtFlags |= GTF_EXCEPT; // Null ref exception if object is null
+ compCurBB->bbFlags |= BBF_HAS_VTABREF;
+ optMethodFlags |= OMF_HAS_VTABLEREF;
- // Method table constant
- GenTreePtr cnsMT = pGetClassFromHandleArgument;
+ // Method table constant
+ GenTreePtr cnsMT = pGetClassFromHandleArgument;
- GenTreePtr compare = gtNewOperNode(oper, TYP_INT,
- objMT,
- cnsMT);
+ GenTreePtr compare = gtNewOperNode(oper, TYP_INT, objMT, cnsMT);
- compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ compare->gtFlags |=
+ tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- // Morph and return
- return fgMorphTree(compare);
+ // Morph and return
+ return fgMorphTree(compare);
+ }
+ }
}
}
- }
- }
- fgMorphRecognizeBoxNullable(tree);
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2();
+ fgMorphRecognizeBoxNullable(tree);
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2();
- break;
+ break;
#ifdef _TARGET_ARM_
- case GT_INTRINSIC:
- if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round)
- {
- switch (tree->TypeGet())
- {
- case TYP_DOUBLE:
- return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewArgList(op1));
- case TYP_FLOAT:
- return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewArgList(op1));
- default:
- unreached();
- }
- }
- break;
+ case GT_INTRINSIC:
+ if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round)
+ {
+ switch (tree->TypeGet())
+ {
+ case TYP_DOUBLE:
+ return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewArgList(op1));
+ case TYP_FLOAT:
+ return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewArgList(op1));
+ default:
+ unreached();
+ }
+ }
+ break;
#endif
- default:
- break;
- }
+ default:
+ break;
+ }
#if !CPU_HAS_FP_SUPPORT
- tree = fgMorphToEmulatedFP(tree);
+ tree = fgMorphToEmulatedFP(tree);
#endif
- /* Could this operator throw an exception? */
- if (fgGlobalMorph && tree->OperMayThrow())
- {
- if ((tree->OperGet() != GT_IND) || fgAddrCouldBeNull(tree->gtOp.gtOp1))
+ /* Could this operator throw an exception? */
+ if (fgGlobalMorph && tree->OperMayThrow())
{
- /* Mark the tree node as potentially throwing an exception */
- tree->gtFlags |= GTF_EXCEPT;
+ if ((tree->OperGet() != GT_IND) || fgAddrCouldBeNull(tree->gtOp.gtOp1))
+ {
+ /* Mark the tree node as potentially throwing an exception */
+ tree->gtFlags |= GTF_EXCEPT;
+ }
}
- }
- /*-------------------------------------------------------------------------
- * Process the first operand, if any
- */
+ /*-------------------------------------------------------------------------
+ * Process the first operand, if any
+ */
- if (op1)
- {
+ if (op1)
+ {
#if LOCAL_ASSERTION_PROP
- // If we are entering the "then" part of a Qmark-Colon we must
- // save the state of the current copy assignment table
- // so that we can restore this state when entering the "else" part
- if (isQmarkColon)
- {
- noway_assert(optLocalAssertionProp);
- if (optAssertionCount)
+ // If we are entering the "then" part of a Qmark-Colon we must
+ // save the state of the current copy assignment table
+ // so that we can restore this state when entering the "else" part
+ if (isQmarkColon)
{
- noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
- unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
- origAssertionTab = (AssertionDsc*) ALLOCA(tabSize);
- origAssertionCount = optAssertionCount;
- memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
- }
- else
- {
- origAssertionCount = 0;
- origAssertionTab = NULL;
+ noway_assert(optLocalAssertionProp);
+ if (optAssertionCount)
+ {
+ noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
+ unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
+ origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
+ origAssertionCount = optAssertionCount;
+ memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
+ }
+ else
+ {
+ origAssertionCount = 0;
+ origAssertionTab = nullptr;
+ }
}
- }
#endif // LOCAL_ASSERTION_PROP
- // We might need a new MorphAddressContext context. (These are used to convey
- // parent context about how addresses being calculated will be used; see the
- // specification comment for MorphAddrContext for full details.)
- // Assume it's an Ind context to start.
- MorphAddrContext subIndMac1(MACK_Ind);
- MorphAddrContext* subMac1 = mac;
- if (subMac1 == NULL || subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_CopyBlock)
- {
- switch (tree->gtOper)
+ // We might need a new MorphAddressContext context. (These are used to convey
+ // parent context about how addresses being calculated will be used; see the
+ // specification comment for MorphAddrContext for full details.)
+ // Assume it's an Ind context to start.
+ MorphAddrContext subIndMac1(MACK_Ind);
+ MorphAddrContext* subMac1 = mac;
+ if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_CopyBlock)
{
- case GT_ADDR:
- if (subMac1 == NULL)
+ switch (tree->gtOper)
{
- subMac1 = &subIndMac1;
- subMac1->m_kind = MACK_Addr;
- }
- break;
- case GT_COMMA:
- // In a comma, the incoming context only applies to the rightmost arg of the
- // comma list. The left arg (op1) gets a fresh context.
- subMac1 = NULL;
- break;
- case GT_COPYBLK:
- case GT_COPYOBJ:
- assert(subMac1 == NULL); // Should only occur at top level, since value is void.
- subMac1 = &s_CopyBlockMAC;
- break;
- case GT_LIST:
- // If the list is the first arg of a copy block, its two args should be evaluated as
- // IND-context addresses, separately.
- if (subMac1 != NULL && subMac1->m_kind == MACK_CopyBlock)
- {
- subMac1 = &subIndMac1;
+ case GT_ADDR:
+ if (subMac1 == nullptr)
+ {
+ subMac1 = &subIndMac1;
+ subMac1->m_kind = MACK_Addr;
+ }
+ break;
+ case GT_COMMA:
+ // In a comma, the incoming context only applies to the rightmost arg of the
+ // comma list. The left arg (op1) gets a fresh context.
+ subMac1 = nullptr;
+ break;
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
+ assert(subMac1 == nullptr); // Should only occur at top level, since value is void.
+ subMac1 = &s_CopyBlockMAC;
+ break;
+ case GT_LIST:
+ // If the list is the first arg of a copy block, its two args should be evaluated as
+ // IND-context addresses, separately.
+ if (subMac1 != nullptr && subMac1->m_kind == MACK_CopyBlock)
+ {
+ subMac1 = &subIndMac1;
+ }
+ break;
+ case GT_IND:
+ case GT_INITBLK:
+ case GT_OBJ:
+ subMac1 = &subIndMac1;
+ break;
+ default:
+ break;
}
- break;
- case GT_IND:
- case GT_INITBLK:
- case GT_OBJ:
- subMac1 = &subIndMac1;
- break;
- default:
- break;
}
- }
- // For additions, if we're in an IND context keep track of whether
- // all offsets added to the address are constant, and their sum.
- if (tree->gtOper == GT_ADD && subMac1 != NULL)
- {
- assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
- GenTreePtr otherOp = tree->gtOp.gtOp2;
- // Is the other operator a constant?
- if (otherOp->IsCnsIntOrI())
+ // For additions, if we're in an IND context keep track of whether
+ // all offsets added to the address are constant, and their sum.
+ if (tree->gtOper == GT_ADD && subMac1 != nullptr)
{
- ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
- totalOffset += otherOp->gtIntConCommon.IconValue();
- if (totalOffset.IsOverflow())
+ assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
+ GenTreePtr otherOp = tree->gtOp.gtOp2;
+ // Is the other operator a constant?
+ if (otherOp->IsCnsIntOrI())
{
- // We will consider an offset so large as to overflow as "not a constant" --
- // we will do a null check.
- subMac1->m_allConstantOffsets = false;
+ ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
+ totalOffset += otherOp->gtIntConCommon.IconValue();
+ if (totalOffset.IsOverflow())
+ {
+ // We will consider an offset so large as to overflow as "not a constant" --
+ // we will do a null check.
+ subMac1->m_allConstantOffsets = false;
+ }
+ else
+ {
+ subMac1->m_totalOffset += otherOp->gtIntConCommon.IconValue();
+ }
}
else
{
- subMac1->m_totalOffset += otherOp->gtIntConCommon.IconValue();
+ subMac1->m_allConstantOffsets = false;
}
}
- else
- {
- subMac1->m_allConstantOffsets = false;
- }
- }
- tree->gtOp.gtOp1 = op1 = fgMorphTree(op1, subMac1);
+ tree->gtOp.gtOp1 = op1 = fgMorphTree(op1, subMac1);
#if LOCAL_ASSERTION_PROP
- // If we are exiting the "then" part of a Qmark-Colon we must
- // save the state of the current copy assignment table
- // so that we can merge this state with the "else" part exit
- if (isQmarkColon)
- {
- noway_assert(optLocalAssertionProp);
- if (optAssertionCount)
- {
- noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
- unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
- thenAssertionTab = (AssertionDsc*) ALLOCA(tabSize);
- thenAssertionCount = optAssertionCount;
- memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
- }
- else
+ // If we are exiting the "then" part of a Qmark-Colon we must
+ // save the state of the current copy assignment table
+ // so that we can merge this state with the "else" part exit
+ if (isQmarkColon)
{
- thenAssertionCount = 0;
- thenAssertionTab = NULL;
+ noway_assert(optLocalAssertionProp);
+ if (optAssertionCount)
+ {
+ noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
+ unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
+ thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
+ thenAssertionCount = optAssertionCount;
+ memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
+ }
+ else
+ {
+ thenAssertionCount = 0;
+ thenAssertionTab = nullptr;
+ }
}
- }
#endif // LOCAL_ASSERTION_PROP
- /* Morphing along with folding and inlining may have changed the
- * side effect flags, so we have to reset them
- *
- * NOTE: Don't reset the exception flags on nodes that may throw */
-
- noway_assert(tree->gtOper != GT_CALL);
-
- if ((tree->gtOper != GT_INTRINSIC) || !IsIntrinsicImplementedByUserCall(tree->gtIntrinsic.gtIntrinsicId))
- {
- tree->gtFlags &= ~GTF_CALL;
- }
-
- if (!tree->OperMayThrow())
- tree->gtFlags &= ~GTF_EXCEPT;
+ /* Morphing along with folding and inlining may have changed the
+ * side effect flags, so we have to reset them
+ *
+ * NOTE: Don't reset the exception flags on nodes that may throw */
- /* Propagate the new flags */
- tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
+ noway_assert(tree->gtOper != GT_CALL);
- // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
- // Similarly for clsVar
- if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
- tree->gtFlags &= ~GTF_GLOB_REF;
- } // if (op1)
+ if ((tree->gtOper != GT_INTRINSIC) || !IsIntrinsicImplementedByUserCall(tree->gtIntrinsic.gtIntrinsicId))
+ {
+ tree->gtFlags &= ~GTF_CALL;
+ }
- /*-------------------------------------------------------------------------
- * Process the second operand, if any
- */
+ if (!tree->OperMayThrow())
+ {
+ tree->gtFlags &= ~GTF_EXCEPT;
+ }
- if (op2)
- {
+ /* Propagate the new flags */
+ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
-#if LOCAL_ASSERTION_PROP
- // If we are entering the "else" part of a Qmark-Colon we must
- // reset the state of the current copy assignment table
- if (isQmarkColon)
- {
- noway_assert(optLocalAssertionProp);
- optAssertionReset(0);
- if (origAssertionCount)
+ // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
+ // Similarly for clsVar
+ if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
{
- size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
- memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
- optAssertionReset(origAssertionCount);
+ tree->gtFlags &= ~GTF_GLOB_REF;
}
- }
-#endif // LOCAL_ASSERTION_PROP
+ } // if (op1)
- // We might need a new MorphAddressContext context to use in evaluating op2.
- // (These are used to convey parent context about how addresses being calculated
- // will be used; see the specification comment for MorphAddrContext for full details.)
- // Assume it's an Ind context to start.
- MorphAddrContext subIndMac2(MACK_Ind);
- switch (tree->gtOper)
+ /*-------------------------------------------------------------------------
+ * Process the second operand, if any
+ */
+
+ if (op2)
{
- case GT_ADD:
- if (mac != NULL && mac->m_kind == MACK_Ind)
+
+#if LOCAL_ASSERTION_PROP
+ // If we are entering the "else" part of a Qmark-Colon we must
+ // reset the state of the current copy assignment table
+ if (isQmarkColon)
{
- GenTreePtr otherOp = tree->gtOp.gtOp1;
- // Is the other operator a constant?
- if (otherOp->IsCnsIntOrI())
- {
- mac->m_totalOffset += otherOp->gtIntConCommon.IconValue();
- }
- else
+ noway_assert(optLocalAssertionProp);
+ optAssertionReset(0);
+ if (origAssertionCount)
{
- mac->m_allConstantOffsets = false;
+ size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
+ memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
+ optAssertionReset(origAssertionCount);
}
}
- break;
- case GT_LIST:
- if (mac != NULL && mac->m_kind == MACK_CopyBlock)
+#endif // LOCAL_ASSERTION_PROP
+
+ // We might need a new MorphAddressContext context to use in evaluating op2.
+ // (These are used to convey parent context about how addresses being calculated
+ // will be used; see the specification comment for MorphAddrContext for full details.)
+ // Assume it's an Ind context to start.
+ MorphAddrContext subIndMac2(MACK_Ind);
+ switch (tree->gtOper)
{
- mac = &subIndMac2;
+ case GT_ADD:
+ if (mac != nullptr && mac->m_kind == MACK_Ind)
+ {
+ GenTreePtr otherOp = tree->gtOp.gtOp1;
+ // Is the other operator a constant?
+ if (otherOp->IsCnsIntOrI())
+ {
+ mac->m_totalOffset += otherOp->gtIntConCommon.IconValue();
+ }
+ else
+ {
+ mac->m_allConstantOffsets = false;
+ }
+ }
+ break;
+ case GT_LIST:
+ if (mac != nullptr && mac->m_kind == MACK_CopyBlock)
+ {
+ mac = &subIndMac2;
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
- }
- tree->gtOp.gtOp2 = op2 = fgMorphTree(op2, mac);
+ tree->gtOp.gtOp2 = op2 = fgMorphTree(op2, mac);
- /* Propagate the side effect flags from op2 */
+ /* Propagate the side effect flags from op2 */
- tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
+ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
#if LOCAL_ASSERTION_PROP
- // If we are exiting the "else" part of a Qmark-Colon we must
- // merge the state of the current copy assignment table with
- // that of the exit of the "then" part.
- if (isQmarkColon)
- {
- noway_assert(optLocalAssertionProp);
- // If either exit table has zero entries then
- // the merged table also has zero entries
- if (optAssertionCount == 0 || thenAssertionCount == 0)
- {
- optAssertionReset(0);
- }
- else
- {
- size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
- if ( (optAssertionCount != thenAssertionCount) ||
- (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0) )
+ // If we are exiting the "else" part of a Qmark-Colon we must
+ // merge the state of the current copy assignment table with
+ // that of the exit of the "then" part.
+ if (isQmarkColon)
+ {
+ noway_assert(optLocalAssertionProp);
+ // If either exit table has zero entries then
+ // the merged table also has zero entries
+ if (optAssertionCount == 0 || thenAssertionCount == 0)
{
- // Yes they are different so we have to find the merged set
- // Iterate over the copy asgn table removing any entries
- // that do not have an exact match in the thenAssertionTab
- AssertionIndex index = 1;
- while (index <= optAssertionCount)
+ optAssertionReset(0);
+ }
+ else
+ {
+ size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
+ if ((optAssertionCount != thenAssertionCount) ||
+ (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
- AssertionDsc* curAssertion = optGetAssertion(index);
-
- for (unsigned j=0; j < thenAssertionCount; j++)
+ // Yes they are different so we have to find the merged set
+ // Iterate over the copy asgn table removing any entries
+ // that do not have an exact match in the thenAssertionTab
+ AssertionIndex index = 1;
+ while (index <= optAssertionCount)
{
- AssertionDsc* thenAssertion = &thenAssertionTab[j];
+ AssertionDsc* curAssertion = optGetAssertion(index);
- // Do the left sides match?
- if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
- (curAssertion->assertionKind == thenAssertion->assertionKind))
+ for (unsigned j = 0; j < thenAssertionCount; j++)
{
- // Do the right sides match?
- if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
- (curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
- {
- goto KEEP;
- }
- else
+ AssertionDsc* thenAssertion = &thenAssertionTab[j];
+
+ // Do the left sides match?
+ if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
+ (curAssertion->assertionKind == thenAssertion->assertionKind))
{
- goto REMOVE;
+ // Do the right sides match?
+ if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
+ (curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
+ {
+ goto KEEP;
+ }
+ else
+ {
+ goto REMOVE;
+ }
}
}
- }
//
// If we fall out of the loop above then we didn't find
// any matching entry in the thenAssertionTab so it must
// have been killed on that path so we remove it here
//
- REMOVE:
- // The data at optAssertionTabPrivate[i] is to be removed
- CLANG_FORMAT_COMMENT_ANCHOR;
+ REMOVE:
+ // The data at optAssertionTabPrivate[i] is to be removed
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (verbose)
- {
- printf("The QMARK-COLON ");
- printTreeID(tree);
- printf(" removes assertion candidate #%d\n", index);
- }
+ if (verbose)
+ {
+ printf("The QMARK-COLON ");
+ printTreeID(tree);
+ printf(" removes assertion candidate #%d\n", index);
+ }
#endif
- optAssertionRemove(index);
- continue;
- KEEP:
- // The data at optAssertionTabPrivate[i] is to be kept
- index++;
+ optAssertionRemove(index);
+ continue;
+ KEEP:
+ // The data at optAssertionTabPrivate[i] is to be kept
+ index++;
+ }
}
}
}
- }
-#endif // LOCAL_ASSERTION_PROP
- } // if (op2)
+#endif // LOCAL_ASSERTION_PROP
+ } // if (op2)
-DONE_MORPHING_CHILDREN:
+ DONE_MORPHING_CHILDREN:
- /*-------------------------------------------------------------------------
- * Now do POST-ORDER processing
- */
+/*-------------------------------------------------------------------------
+ * Now do POST-ORDER processing
+ */
#if FEATURE_FIXED_OUT_ARGS && !defined(_TARGET_64BIT_)
- // Variable shifts of a long end up being helper calls, so mark the tree as such. This
- // is potentially too conservative, since they'll get treated as having side effects.
- // It is important to mark them as calls so if they are part of an argument list,
- // they will get sorted and processed properly (for example, it is important to handle
- // all nested calls before putting struct arguments in the argument registers). We
- // could mark the trees just before argument processing, but it would require a full
- // tree walk of the argument tree, so we just do it here, instead, even though we'll
- // mark non-argument trees (that will still get converted to calls, anyway).
- if (GenTree::OperIsShift(oper) &&
- (tree->TypeGet() == TYP_LONG) &&
- (op2->OperGet() != GT_CNS_INT))
- {
- tree->gtFlags |= GTF_CALL;
- }
+ // Variable shifts of a long end up being helper calls, so mark the tree as such. This
+ // is potentially too conservative, since they'll get treated as having side effects.
+ // It is important to mark them as calls so if they are part of an argument list,
+ // they will get sorted and processed properly (for example, it is important to handle
+ // all nested calls before putting struct arguments in the argument registers). We
+ // could mark the trees just before argument processing, but it would require a full
+ // tree walk of the argument tree, so we just do it here, instead, even though we'll
+ // mark non-argument trees (that will still get converted to calls, anyway).
+ if (GenTree::OperIsShift(oper) && (tree->TypeGet() == TYP_LONG) && (op2->OperGet() != GT_CNS_INT))
+ {
+ tree->gtFlags |= GTF_CALL;
+ }
#endif // FEATURE_FIXED_OUT_ARGS && !_TARGET_64BIT_
- if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet()))
- && (op2 && !varTypeIsGC(op2->TypeGet())))
- {
- // The tree is really not GC but was marked as such. Now that the
- // children have been unmarked, unmark the tree too.
+ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) &&
+ (op2 && !varTypeIsGC(op2->TypeGet())))
+ {
+ // The tree is really not GC but was marked as such. Now that the
+ // children have been unmarked, unmark the tree too.
- // Remember that GT_COMMA inherits it's type only from op2
- if (tree->gtOper == GT_COMMA)
- tree->gtType = genActualType(op2->TypeGet());
- else
- tree->gtType = genActualType(op1->TypeGet());
- }
+ // Remember that GT_COMMA inherits it's type only from op2
+ if (tree->gtOper == GT_COMMA)
+ {
+ tree->gtType = genActualType(op2->TypeGet());
+ }
+ else
+ {
+ tree->gtType = genActualType(op1->TypeGet());
+ }
+ }
- GenTreePtr oldTree = tree;
+ GenTreePtr oldTree = tree;
- GenTreePtr qmarkOp1 = NULL;
- GenTreePtr qmarkOp2 = NULL;
+ GenTreePtr qmarkOp1 = nullptr;
+ GenTreePtr qmarkOp2 = nullptr;
- if ((tree->OperGet() == GT_QMARK) &&
- (tree->gtOp.gtOp2->OperGet() == GT_COLON))
- {
- qmarkOp1 = oldTree->gtOp.gtOp2->gtOp.gtOp1;
- qmarkOp2 = oldTree->gtOp.gtOp2->gtOp.gtOp2;
- }
+ if ((tree->OperGet() == GT_QMARK) && (tree->gtOp.gtOp2->OperGet() == GT_COLON))
+ {
+ qmarkOp1 = oldTree->gtOp.gtOp2->gtOp.gtOp1;
+ qmarkOp2 = oldTree->gtOp.gtOp2->gtOp.gtOp2;
+ }
- // Try to fold it, maybe we get lucky,
- tree = gtFoldExpr(tree);
+ // Try to fold it, maybe we get lucky,
+ tree = gtFoldExpr(tree);
- if (oldTree != tree)
- {
- /* if gtFoldExpr returned op1 or op2 then we are done */
- if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
- return tree;
+ if (oldTree != tree)
+ {
+ /* if gtFoldExpr returned op1 or op2 then we are done */
+ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
+ {
+ return tree;
+ }
+
+ /* If we created a comma-throw tree then we need to morph op1 */
+ if (fgIsCommaThrow(tree))
+ {
+ tree->gtOp.gtOp1 = fgMorphTree(tree->gtOp.gtOp1);
+ fgMorphTreeDone(tree);
+ return tree;
+ }
- /* If we created a comma-throw tree then we need to morph op1 */
- if (fgIsCommaThrow(tree))
+ return tree;
+ }
+ else if (tree->OperKind() & GTK_CONST)
{
- tree->gtOp.gtOp1 = fgMorphTree(tree->gtOp.gtOp1);
- fgMorphTreeDone(tree);
return tree;
}
- return tree;
- }
- else if (tree->OperKind() & GTK_CONST)
- {
- return tree;
- }
-
- /* gtFoldExpr could have used setOper to change the oper */
- oper = tree->OperGet();
- typ = tree->TypeGet();
+ /* gtFoldExpr could have used setOper to change the oper */
+ oper = tree->OperGet();
+ typ = tree->TypeGet();
- /* gtFoldExpr could have changed op1 and op2 */
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2();
+ /* gtFoldExpr could have changed op1 and op2 */
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2();
- // Do we have an integer compare operation?
- //
- if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
- {
- // Are we comparing against zero?
+ // Do we have an integer compare operation?
//
- if (op2->IsIntegralConst(0))
+ if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
{
- // Request that the codegen for op1 sets the condition flags
- // when it generates the code for op1.
+ // Are we comparing against zero?
//
- // Codegen for op1 must set the condition flags if
- // this method returns true.
- //
- op1->gtRequestSetFlags();
+ if (op2->IsIntegralConst(0))
+ {
+ // Request that the codegen for op1 sets the condition flags
+ // when it generates the code for op1.
+ //
+ // Codegen for op1 must set the condition flags if
+ // this method returns true.
+ //
+ op1->gtRequestSetFlags();
+ }
}
- }
- /*-------------------------------------------------------------------------
- * Perform the required oper-specific postorder morphing
- */
+ /*-------------------------------------------------------------------------
+ * Perform the required oper-specific postorder morphing
+ */
- GenTreePtr temp;
- GenTreePtr cns1, cns2;
- GenTreePtr thenNode;
- GenTreePtr elseNode;
- size_t ival1, ival2;
- GenTreePtr lclVarTree;
- GenTreeLclVarCommon* lclVarCmnTree;
- FieldSeqNode* fieldSeq = NULL;
+ GenTreePtr temp;
+ GenTreePtr cns1, cns2;
+ GenTreePtr thenNode;
+ GenTreePtr elseNode;
+ size_t ival1, ival2;
+ GenTreePtr lclVarTree;
+ GenTreeLclVarCommon* lclVarCmnTree;
+ FieldSeqNode* fieldSeq = nullptr;
- switch (oper)
- {
- case GT_ASG:
-
- lclVarTree = fgIsIndirOfAddrOfLocal(op1);
- if (lclVarTree != NULL)
- {
- lclVarTree->gtFlags |= GTF_VAR_DEF;
- }
-
- /* If we are storing a small type, we might be able to omit a cast */
- if ((op1->gtOper == GT_IND) && varTypeIsSmall(op1->TypeGet()))
+ switch (oper)
{
- if (!gtIsActiveCSE_Candidate(op2) && (op2->gtOper == GT_CAST) && !op2->gtOverflow())
- {
- var_types castType = op2->CastToType();
+ case GT_ASG:
- // If we are performing a narrowing cast and
- // castType is larger or the same as op1's type
- // then we can discard the cast.
-
- if (varTypeIsSmall(castType) && (castType >= op1->TypeGet()))
+ lclVarTree = fgIsIndirOfAddrOfLocal(op1);
+ if (lclVarTree != nullptr)
{
- tree->gtOp.gtOp2 = op2 = op2->gtCast.CastOp();
+ lclVarTree->gtFlags |= GTF_VAR_DEF;
}
- }
- else if (op2->OperIsCompare() && varTypeIsByte(op1->TypeGet()))
- {
- /* We don't need to zero extend the setcc instruction */
- op2->gtType = TYP_BYTE;
- }
- }
- // If we introduced a CSE we may need to undo the optimization above
- // (i.e. " op2->gtType = TYP_BYTE;" which depends upon op1 being a GT_IND of a byte type)
- // When we introduce the CSE we remove the GT_IND and subsitute a GT_LCL_VAR in it place.
- else if (op2->OperIsCompare() && (op2->gtType == TYP_BYTE) && (op1->gtOper == GT_LCL_VAR))
- {
- unsigned varNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[varNum];
- /* We again need to zero extend the setcc instruction */
- op2->gtType = varDsc->TypeGet();
- }
+ /* If we are storing a small type, we might be able to omit a cast */
+ if ((op1->gtOper == GT_IND) && varTypeIsSmall(op1->TypeGet()))
+ {
+ if (!gtIsActiveCSE_Candidate(op2) && (op2->gtOper == GT_CAST) && !op2->gtOverflow())
+ {
+ var_types castType = op2->CastToType();
- __fallthrough;
+ // If we are performing a narrowing cast and
+ // castType is larger or the same as op1's type
+ // then we can discard the cast.
- case GT_COPYOBJ:
- case GT_COPYBLK:
- case GT_INITBLK:
- fgAssignSetVarDef(tree);
+ if (varTypeIsSmall(castType) && (castType >= op1->TypeGet()))
+ {
+ tree->gtOp.gtOp2 = op2 = op2->gtCast.CastOp();
+ }
+ }
+ else if (op2->OperIsCompare() && varTypeIsByte(op1->TypeGet()))
+ {
+ /* We don't need to zero extend the setcc instruction */
+ op2->gtType = TYP_BYTE;
+ }
+ }
+ // If we introduced a CSE we may need to undo the optimization above
+ // (i.e. " op2->gtType = TYP_BYTE;" which depends upon op1 being a GT_IND of a byte type)
+ // When we introduce the CSE we remove the GT_IND and subsitute a GT_LCL_VAR in it place.
+ else if (op2->OperIsCompare() && (op2->gtType == TYP_BYTE) && (op1->gtOper == GT_LCL_VAR))
+ {
+ unsigned varNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[varNum];
- __fallthrough;
+ /* We again need to zero extend the setcc instruction */
+ op2->gtType = varDsc->TypeGet();
+ }
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_MOD:
- case GT_ASG_UDIV:
- case GT_ASG_UMOD:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
+ __fallthrough;
- /* We can't CSE the LHS of an assignment */
- /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ case GT_COPYOBJ:
+ case GT_COPYBLK:
+ case GT_INITBLK:
+ fgAssignSetVarDef(tree);
+
+ __fallthrough;
+
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_MOD:
+ case GT_ASG_UDIV:
+ case GT_ASG_UMOD:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+
+ /* We can't CSE the LHS of an assignment */
+ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_EQ:
- case GT_NE:
+ case GT_EQ:
+ case GT_NE:
- /* Make sure we're allowed to do this */
+ /* Make sure we're allowed to do this */
- if (optValnumCSE_phase)
- {
- // It is not safe to reorder/delete CSE's
- break;
- }
+ if (optValnumCSE_phase)
+ {
+ // It is not safe to reorder/delete CSE's
+ break;
+ }
- cns2 = op2;
+ cns2 = op2;
- /* Check for "(expr +/- icon1) ==/!= (non-zero-icon2)" */
+ /* Check for "(expr +/- icon1) ==/!= (non-zero-icon2)" */
- if (cns2->gtOper == GT_CNS_INT && cns2->gtIntCon.gtIconVal != 0)
- {
- op1 = tree->gtOp.gtOp1;
+ if (cns2->gtOper == GT_CNS_INT && cns2->gtIntCon.gtIconVal != 0)
+ {
+ op1 = tree->gtOp.gtOp1;
- /* Since this can occur repeatedly we use a while loop */
+ /* Since this can occur repeatedly we use a while loop */
- while ((op1->gtOper == GT_ADD || op1->gtOper == GT_SUB) &&
- (op1->gtOp.gtOp2->gtOper == GT_CNS_INT) &&
- (op1->gtType == TYP_INT) &&
- (op1->gtOverflow() == false))
- {
- /* Got it; change "x+icon1==icon2" to "x==icon2-icon1" */
+ while ((op1->gtOper == GT_ADD || op1->gtOper == GT_SUB) &&
+ (op1->gtOp.gtOp2->gtOper == GT_CNS_INT) && (op1->gtType == TYP_INT) &&
+ (op1->gtOverflow() == false))
+ {
+ /* Got it; change "x+icon1==icon2" to "x==icon2-icon1" */
- ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- ival2 = cns2->gtIntCon.gtIconVal;
+ ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
+ ival2 = cns2->gtIntCon.gtIconVal;
- if (op1->gtOper == GT_ADD)
- {
- ival2 -= ival1;
- }
- else
- {
- ival2 += ival1;
- }
- cns2->gtIntCon.gtIconVal = ival2;
+ if (op1->gtOper == GT_ADD)
+ {
+ ival2 -= ival1;
+ }
+ else
+ {
+ ival2 += ival1;
+ }
+ cns2->gtIntCon.gtIconVal = ival2;
#ifdef _TARGET_64BIT_
- // we need to properly re-sign-extend or truncate as needed.
- cns2->AsIntCon()->TruncateOrSignExtend32();
-#endif // _TARGET_64BIT_
+ // we need to properly re-sign-extend or truncate as needed.
+ cns2->AsIntCon()->TruncateOrSignExtend32();
+#endif // _TARGET_64BIT_
- op1 = tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
- }
- }
+ op1 = tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
+ }
+ }
- //
- // Here we look for the following tree
- //
- // EQ/NE
- // / \
+ //
+ // Here we look for the following tree
+ //
+ // EQ/NE
+ // / \
// op1 CNS 0/1
- //
- ival2 = INT_MAX; // The value of INT_MAX for ival2 just means that the constant value is not 0 or 1
+ //
+ ival2 = INT_MAX; // The value of INT_MAX for ival2 just means that the constant value is not 0 or 1
- // cast to unsigned allows test for both 0 and 1
- if ((cns2->gtOper == GT_CNS_INT) && (((size_t) cns2->gtIntConCommon.IconValue()) <= 1U))
- {
- ival2 = (size_t) cns2->gtIntConCommon.IconValue();
- }
- else // cast to UINT64 allows test for both 0 and 1
- if ((cns2->gtOper == GT_CNS_LNG) && (((UINT64) cns2->gtIntConCommon.LngValue()) <= 1ULL))
- {
- ival2 = (size_t) cns2->gtIntConCommon.LngValue();
- }
+ // cast to unsigned allows test for both 0 and 1
+ if ((cns2->gtOper == GT_CNS_INT) && (((size_t)cns2->gtIntConCommon.IconValue()) <= 1U))
+ {
+ ival2 = (size_t)cns2->gtIntConCommon.IconValue();
+ }
+ else // cast to UINT64 allows test for both 0 and 1
+ if ((cns2->gtOper == GT_CNS_LNG) && (((UINT64)cns2->gtIntConCommon.LngValue()) <= 1ULL))
+ {
+ ival2 = (size_t)cns2->gtIntConCommon.LngValue();
+ }
- if (ival2 != INT_MAX)
- {
- // If we don't have a comma and relop, we can't do this optimization
- //
- if ((op1->gtOper == GT_COMMA) && (op1->gtOp.gtOp2->OperIsCompare()))
- {
- // Here we look for the following transformation
- //
- // EQ/NE Possible REVERSE(RELOP)
- // / \ / \
+ if (ival2 != INT_MAX)
+ {
+ // If we don't have a comma and relop, we can't do this optimization
+ //
+ if ((op1->gtOper == GT_COMMA) && (op1->gtOp.gtOp2->OperIsCompare()))
+ {
+ // Here we look for the following transformation
+ //
+ // EQ/NE Possible REVERSE(RELOP)
+ // / \ / \
// COMMA CNS 0/1 -> COMMA relop_op2
- // / \ / \
+ // / \ / \
// x RELOP x relop_op1
- // / \
+ // / \
// relop_op1 relop_op2
- //
- //
- //
- GenTreePtr comma = op1;
- GenTreePtr relop = comma->gtOp.gtOp2;
+ //
+ //
+ //
+ GenTreePtr comma = op1;
+ GenTreePtr relop = comma->gtOp.gtOp2;
- GenTreePtr relop_op1 = relop->gtOp.gtOp1;
+ GenTreePtr relop_op1 = relop->gtOp.gtOp1;
- bool reverse = ((ival2 == 0) == (oper == GT_EQ));
+ bool reverse = ((ival2 == 0) == (oper == GT_EQ));
- if (reverse)
- {
- gtReverseCond(relop);
- }
+ if (reverse)
+ {
+ gtReverseCond(relop);
+ }
- relop->gtOp.gtOp1 = comma;
- comma->gtOp.gtOp2 = relop_op1;
+ relop->gtOp.gtOp1 = comma;
+ comma->gtOp.gtOp2 = relop_op1;
- // Comma now has fewer nodes underneath it, so we need to regenerate its flags
- comma->gtFlags &= ~GTF_ALL_EFFECT;
- comma->gtFlags |= (comma->gtOp.gtOp1->gtFlags) & GTF_ALL_EFFECT;
- comma->gtFlags |= (comma->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
+ // Comma now has fewer nodes underneath it, so we need to regenerate its flags
+ comma->gtFlags &= ~GTF_ALL_EFFECT;
+ comma->gtFlags |= (comma->gtOp.gtOp1->gtFlags) & GTF_ALL_EFFECT;
+ comma->gtFlags |= (comma->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
- noway_assert((relop->gtFlags & GTF_RELOP_JMP_USED) == 0);
- noway_assert((relop->gtFlags & GTF_REVERSE_OPS) == 0);
- relop->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED|GTF_RELOP_QMARK|GTF_DONT_CSE|GTF_ALL_EFFECT);
+ noway_assert((relop->gtFlags & GTF_RELOP_JMP_USED) == 0);
+ noway_assert((relop->gtFlags & GTF_REVERSE_OPS) == 0);
+ relop->gtFlags |=
+ tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE | GTF_ALL_EFFECT);
- return relop;
- }
+ return relop;
+ }
- if (op1->gtOper == GT_COMMA)
- {
- // Here we look for the following tree
- // and when the LCL_VAR is a temp we can fold the tree:
- //
- // EQ/NE EQ/NE
- // / \ / \
+ if (op1->gtOper == GT_COMMA)
+ {
+ // Here we look for the following tree
+ // and when the LCL_VAR is a temp we can fold the tree:
+ //
+ // EQ/NE EQ/NE
+ // / \ / \
// COMMA CNS 0/1 -> RELOP CNS 0/1
- // / \ / \
+ // / \ / \
// ASG LCL_VAR
- // / \
+ // / \
// LCL_VAR RELOP
- // / \
+ // / \
//
- GenTreePtr asg = op1->gtOp.gtOp1;
- GenTreePtr lcl = op1->gtOp.gtOp2;
+ GenTreePtr asg = op1->gtOp.gtOp1;
+ GenTreePtr lcl = op1->gtOp.gtOp2;
- /* Make sure that the left side of the comma is the assignment of the LCL_VAR */
- if (asg->gtOper != GT_ASG)
- goto SKIP;
+ /* Make sure that the left side of the comma is the assignment of the LCL_VAR */
+ if (asg->gtOper != GT_ASG)
+ {
+ goto SKIP;
+ }
- /* The right side of the comma must be a LCL_VAR temp */
- if (lcl->gtOper != GT_LCL_VAR)
- goto SKIP;
+ /* The right side of the comma must be a LCL_VAR temp */
+ if (lcl->gtOper != GT_LCL_VAR)
+ {
+ goto SKIP;
+ }
- unsigned lclNum = lcl->gtLclVarCommon.gtLclNum; noway_assert(lclNum < lvaCount);
+ unsigned lclNum = lcl->gtLclVarCommon.gtLclNum;
+ noway_assert(lclNum < lvaCount);
- /* If the LCL_VAR is not a temp then bail, a temp has a single def */
- if (!lvaTable[lclNum].lvIsTemp)
- goto SKIP;
+ /* If the LCL_VAR is not a temp then bail, a temp has a single def */
+ if (!lvaTable[lclNum].lvIsTemp)
+ {
+ goto SKIP;
+ }
#if FEATURE_ANYCSE
- /* If the LCL_VAR is a CSE temp then bail, it could have multiple defs/uses */
- // Fix 383856 X86/ARM ILGEN
- if (lclNumIsCSE(lclNum))
- goto SKIP;
+ /* If the LCL_VAR is a CSE temp then bail, it could have multiple defs/uses */
+ // Fix 383856 X86/ARM ILGEN
+ if (lclNumIsCSE(lclNum))
+ {
+ goto SKIP;
+ }
#endif
- /* We also must be assigning the result of a RELOP */
- if (asg->gtOp.gtOp1->gtOper != GT_LCL_VAR)
- goto SKIP;
-
- /* Both of the LCL_VAR must match */
- if (asg->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lclNum)
- goto SKIP;
-
- /* If right side of asg is not a RELOP then skip */
- if (!asg->gtOp.gtOp2->OperIsCompare())
- goto SKIP;
-
- LclVarDsc * varDsc = lvaTable + lclNum;
-
- /* Set op1 to the right side of asg, (i.e. the RELOP) */
- op1 = asg->gtOp.gtOp2;
+ /* We also must be assigning the result of a RELOP */
+ if (asg->gtOp.gtOp1->gtOper != GT_LCL_VAR)
+ {
+ goto SKIP;
+ }
- DEBUG_DESTROY_NODE(asg->gtOp.gtOp1);
- DEBUG_DESTROY_NODE(lcl);
+ /* Both of the LCL_VAR must match */
+ if (asg->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lclNum)
+ {
+ goto SKIP;
+ }
- /* This local variable should never be used again */
- // <BUGNUM>
- // VSW 184221: Make RefCnt to zero to indicate that this local var
- // is not used any more. (Keey the lvType as is.)
- // Otherwise lvOnFrame will be set to true in Compiler::raMarkStkVars
- // And then emitter::emitEndCodeGen will assert in the following line:
- // noway_assert( dsc->lvTracked);
- // </BUGNUM>
- noway_assert(varDsc->lvRefCnt == 0 || // lvRefCnt may not have been set yet.
- varDsc->lvRefCnt == 2 // Or, we assume this tmp should only be used here,
- // and it only shows up twice.
- );
- lvaTable[lclNum].lvRefCnt = 0;
- lvaTable[lclNum].lvaResetSortAgainFlag(this);
- }
+ /* If right side of asg is not a RELOP then skip */
+ if (!asg->gtOp.gtOp2->OperIsCompare())
+ {
+ goto SKIP;
+ }
+ LclVarDsc* varDsc = lvaTable + lclNum;
+
+ /* Set op1 to the right side of asg, (i.e. the RELOP) */
+ op1 = asg->gtOp.gtOp2;
+
+ DEBUG_DESTROY_NODE(asg->gtOp.gtOp1);
+ DEBUG_DESTROY_NODE(lcl);
+
+ /* This local variable should never be used again */
+ // <BUGNUM>
+ // VSW 184221: Make RefCnt to zero to indicate that this local var
+ // is not used any more. (Keey the lvType as is.)
+ // Otherwise lvOnFrame will be set to true in Compiler::raMarkStkVars
+ // And then emitter::emitEndCodeGen will assert in the following line:
+ // noway_assert( dsc->lvTracked);
+ // </BUGNUM>
+ noway_assert(varDsc->lvRefCnt == 0 || // lvRefCnt may not have been set yet.
+ varDsc->lvRefCnt == 2 // Or, we assume this tmp should only be used here,
+ // and it only shows up twice.
+ );
+ lvaTable[lclNum].lvRefCnt = 0;
+ lvaTable[lclNum].lvaResetSortAgainFlag(this);
+ }
- if (op1->OperIsCompare())
- {
- // Here we look for the following tree
- //
- // EQ/NE -> RELOP/!RELOP
- // / \ / \
+ if (op1->OperIsCompare())
+ {
+ // Here we look for the following tree
+ //
+ // EQ/NE -> RELOP/!RELOP
+ // / \ / \
// RELOP CNS 0/1
- // / \
+ // / \
//
- // Note that we will remove/destroy the EQ/NE node and move
- // the RELOP up into it's location.
-
- /* Here we reverse the RELOP if necessary */
+ // Note that we will remove/destroy the EQ/NE node and move
+ // the RELOP up into it's location.
- bool reverse = ((ival2 == 0) == (oper == GT_EQ));
+ /* Here we reverse the RELOP if necessary */
- if (reverse)
- {
- gtReverseCond(op1);
- }
+ bool reverse = ((ival2 == 0) == (oper == GT_EQ));
- /* Propagate gtType of tree into op1 in case it is TYP_BYTE for setcc optimization */
- op1->gtType = tree->gtType;
+ if (reverse)
+ {
+ gtReverseCond(op1);
+ }
- noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
- op1->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED|GTF_RELOP_QMARK|GTF_DONT_CSE);
+ /* Propagate gtType of tree into op1 in case it is TYP_BYTE for setcc optimization */
+ op1->gtType = tree->gtType;
- DEBUG_DESTROY_NODE(tree);
- return op1;
+ noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
+ op1->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- }
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
+ }
- //
- // Now we check for a compare with the result of an '&' operator
- //
- // Here we look for the following transformation:
- //
- // EQ/NE EQ/NE
- // / \ / \
+ //
+ // Now we check for a compare with the result of an '&' operator
+ //
+ // Here we look for the following transformation:
+ //
+ // EQ/NE EQ/NE
+ // / \ / \
// AND CNS 0/1 -> AND CNS 0
- // / \ / \
+ // / \ / \
// RSZ/RSH CNS 1 x CNS (1 << y)
- // / \
+ // / \
// x CNS_INT +y
- if (op1->gtOper == GT_AND)
- {
- GenTreePtr andOp = op1;
- GenTreePtr rshiftOp = andOp->gtOp.gtOp1;
+ if (op1->gtOper == GT_AND)
+ {
+ GenTreePtr andOp = op1;
+ GenTreePtr rshiftOp = andOp->gtOp.gtOp1;
- if ((rshiftOp->gtOper != GT_RSZ) && (rshiftOp->gtOper != GT_RSH))
- goto SKIP;
+ if ((rshiftOp->gtOper != GT_RSZ) && (rshiftOp->gtOper != GT_RSH))
+ {
+ goto SKIP;
+ }
- if (!rshiftOp->gtOp.gtOp2->IsCnsIntOrI())
- goto SKIP;
+ if (!rshiftOp->gtOp.gtOp2->IsCnsIntOrI())
+ {
+ goto SKIP;
+ }
- ssize_t shiftAmount = rshiftOp->gtOp.gtOp2->gtIntCon.gtIconVal;
+ ssize_t shiftAmount = rshiftOp->gtOp.gtOp2->gtIntCon.gtIconVal;
- if (shiftAmount < 0)
- goto SKIP;
+ if (shiftAmount < 0)
+ {
+ goto SKIP;
+ }
- if (!andOp->gtOp.gtOp2->IsIntegralConst(1))
- goto SKIP;
+ if (!andOp->gtOp.gtOp2->IsIntegralConst(1))
+ {
+ goto SKIP;
+ }
- if (andOp->gtType == TYP_INT)
- {
- if (shiftAmount > 31)
- goto SKIP;
+ if (andOp->gtType == TYP_INT)
+ {
+ if (shiftAmount > 31)
+ {
+ goto SKIP;
+ }
- UINT32 newAndOperand = ((UINT32) 1) << shiftAmount;
+ UINT32 newAndOperand = ((UINT32)1) << shiftAmount;
- andOp->gtOp.gtOp2->gtIntCon.gtIconVal = newAndOperand;
+ andOp->gtOp.gtOp2->gtIntCon.gtIconVal = newAndOperand;
- // Reverse the cond if necessary
- if (ival2 == 1)
- {
- gtReverseCond(tree);
- cns2->gtIntCon.gtIconVal = 0;
- oper = tree->gtOper;
- }
+ // Reverse the cond if necessary
+ if (ival2 == 1)
+ {
+ gtReverseCond(tree);
+ cns2->gtIntCon.gtIconVal = 0;
+ oper = tree->gtOper;
+ }
+ }
+ else if (andOp->gtType == TYP_LONG)
+ {
+ if (shiftAmount > 63)
+ {
+ goto SKIP;
+ }
- }
- else if (andOp->gtType == TYP_LONG)
- {
- if (shiftAmount > 63)
- goto SKIP;
+ UINT64 newAndOperand = ((UINT64)1) << shiftAmount;
- UINT64 newAndOperand = ((UINT64) 1) << shiftAmount;
+ andOp->gtOp.gtOp2->gtIntConCommon.SetLngValue(newAndOperand);
- andOp->gtOp.gtOp2->gtIntConCommon.SetLngValue(newAndOperand);
+ // Reverse the cond if necessary
+ if (ival2 == 1)
+ {
+ gtReverseCond(tree);
+ cns2->gtIntConCommon.SetLngValue(0);
+ oper = tree->gtOper;
+ }
+ }
- // Reverse the cond if necessary
- if (ival2 == 1)
- {
- gtReverseCond(tree);
- cns2->gtIntConCommon.SetLngValue(0);
- oper = tree->gtOper;
- }
- }
+ andOp->gtOp.gtOp1 = rshiftOp->gtOp.gtOp1;
- andOp->gtOp.gtOp1 = rshiftOp->gtOp.gtOp1;
+ DEBUG_DESTROY_NODE(rshiftOp->gtOp.gtOp2);
+ DEBUG_DESTROY_NODE(rshiftOp);
+ }
+ } // END if (ival2 != INT_MAX)
- DEBUG_DESTROY_NODE(rshiftOp->gtOp.gtOp2);
- DEBUG_DESTROY_NODE(rshiftOp);
- }
- } // END if (ival2 != INT_MAX)
+ SKIP:
+ /* Now check for compares with small constant longs that can be cast to int */
-SKIP:
- /* Now check for compares with small constant longs that can be cast to int */
+ if (!cns2->OperIsConst())
+ {
+ goto COMPARE;
+ }
- if (!cns2->OperIsConst())
- goto COMPARE;
+ if (cns2->TypeGet() != TYP_LONG)
+ {
+ goto COMPARE;
+ }
- if (cns2->TypeGet() != TYP_LONG)
- goto COMPARE;
+ /* Is the constant 31 bits or smaller? */
- /* Is the constant 31 bits or smaller? */
+ if ((cns2->gtIntConCommon.LngValue() >> 31) != 0)
+ {
+ goto COMPARE;
+ }
- if ((cns2->gtIntConCommon.LngValue() >> 31) != 0)
- goto COMPARE;
+ /* Is the first comparand mask operation of type long ? */
- /* Is the first comparand mask operation of type long ? */
+ if (op1->gtOper != GT_AND)
+ {
+ /* Another interesting case: cast from int */
- if (op1->gtOper != GT_AND)
- {
- /* Another interesting case: cast from int */
+ if (op1->gtOper == GT_CAST && op1->CastFromType() == TYP_INT &&
+ !gtIsActiveCSE_Candidate(op1) && // op1 cannot be a CSE candidate
+ !op1->gtOverflow()) // cannot be an overflow checking cast
+ {
+ /* Simply make this into an integer comparison */
- if (op1->gtOper == GT_CAST &&
- op1->CastFromType() == TYP_INT &&
- !gtIsActiveCSE_Candidate(op1) && // op1 cannot be a CSE candidate
- !op1->gtOverflow()) // cannot be an overflow checking cast
- {
- /* Simply make this into an integer comparison */
+ tree->gtOp.gtOp1 = op1->gtCast.CastOp();
+ tree->gtOp.gtOp2 = gtNewIconNode((int)cns2->gtIntConCommon.LngValue(), TYP_INT);
+ }
- tree->gtOp.gtOp1 = op1->gtCast.CastOp();
- tree->gtOp.gtOp2 = gtNewIconNode((int)cns2->gtIntConCommon.LngValue(), TYP_INT);
- }
+ goto COMPARE;
+ }
- goto COMPARE;
- }
+ noway_assert(op1->TypeGet() == TYP_LONG && op1->OperGet() == GT_AND);
- noway_assert(op1->TypeGet() == TYP_LONG && op1->OperGet() == GT_AND);
+ /* Is the result of the mask effectively an INT ? */
- /* Is the result of the mask effectively an INT ? */
+ GenTreePtr andMask;
+ andMask = op1->gtOp.gtOp2;
+ if (andMask->gtOper != GT_CNS_NATIVELONG)
+ {
+ goto COMPARE;
+ }
+ if ((andMask->gtIntConCommon.LngValue() >> 32) != 0)
+ {
+ goto COMPARE;
+ }
- GenTreePtr andMask; andMask = op1->gtOp.gtOp2;
- if (andMask->gtOper != GT_CNS_NATIVELONG)
- goto COMPARE;
- if ((andMask->gtIntConCommon.LngValue() >> 32) != 0)
- goto COMPARE;
+ /* Now we know that we can cast gtOp.gtOp1 of AND to int */
- /* Now we know that we can cast gtOp.gtOp1 of AND to int */
+ op1->gtOp.gtOp1 = gtNewCastNode(TYP_INT, op1->gtOp.gtOp1, TYP_INT);
- op1->gtOp.gtOp1 = gtNewCastNode(TYP_INT,
- op1->gtOp.gtOp1,
- TYP_INT);
+ /* now replace the mask node (gtOp.gtOp2 of AND node) */
- /* now replace the mask node (gtOp.gtOp2 of AND node) */
+ noway_assert(andMask == op1->gtOp.gtOp2);
- noway_assert(andMask == op1->gtOp.gtOp2);
+ ival1 = (int)andMask->gtIntConCommon.LngValue();
+ andMask->SetOper(GT_CNS_INT);
+ andMask->gtType = TYP_INT;
+ andMask->gtIntCon.gtIconVal = ival1;
- ival1 = (int) andMask->gtIntConCommon.LngValue();
- andMask->SetOper(GT_CNS_INT);
- andMask->gtType = TYP_INT;
- andMask->gtIntCon.gtIconVal = ival1;
+ /* now change the type of the AND node */
- /* now change the type of the AND node */
+ op1->gtType = TYP_INT;
- op1->gtType = TYP_INT;
+ /* finally we replace the comparand */
- /* finally we replace the comparand */
+ ival2 = (int)cns2->gtIntConCommon.LngValue();
+ cns2->SetOper(GT_CNS_INT);
+ cns2->gtType = TYP_INT;
- ival2 = (int) cns2->gtIntConCommon.LngValue();
- cns2->SetOper(GT_CNS_INT);
- cns2->gtType = TYP_INT;
+ noway_assert(cns2 == op2);
+ cns2->gtIntCon.gtIconVal = ival2;
- noway_assert(cns2 == op2);
- cns2->gtIntCon.gtIconVal = ival2;
+ goto COMPARE;
- goto COMPARE;
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
-
- if ((tree->gtFlags & GTF_UNSIGNED) == 0)
- {
- if (op2->gtOper == GT_CNS_INT)
- {
- cns2 = op2;
- /* Check for "expr relop 1" */
- if (cns2->IsIntegralConst(1))
+ if ((tree->gtFlags & GTF_UNSIGNED) == 0)
{
- /* Check for "expr >= 1" */
- if (oper == GT_GE)
- {
- /* Change to "expr > 0" */
- oper = GT_GT;
- goto SET_OPER;
- }
- /* Check for "expr < 1" */
- else if (oper == GT_LT)
+ if (op2->gtOper == GT_CNS_INT)
{
- /* Change to "expr <= 0" */
- oper = GT_LE;
- goto SET_OPER;
- }
- }
- /* Check for "expr relop -1" */
- else if (cns2->IsIntegralConst(-1) && ((oper == GT_LE) || (oper == GT_GT)))
- {
- /* Check for "expr <= -1" */
- if (oper == GT_LE)
- {
- /* Change to "expr < 0" */
- oper = GT_LT;
- goto SET_OPER;
- }
- /* Check for "expr > -1" */
- else if (oper == GT_GT)
- {
- /* Change to "expr >= 0" */
- oper = GT_GE;
+ cns2 = op2;
+ /* Check for "expr relop 1" */
+ if (cns2->IsIntegralConst(1))
+ {
+ /* Check for "expr >= 1" */
+ if (oper == GT_GE)
+ {
+ /* Change to "expr > 0" */
+ oper = GT_GT;
+ goto SET_OPER;
+ }
+ /* Check for "expr < 1" */
+ else if (oper == GT_LT)
+ {
+ /* Change to "expr <= 0" */
+ oper = GT_LE;
+ goto SET_OPER;
+ }
+ }
+ /* Check for "expr relop -1" */
+ else if (cns2->IsIntegralConst(-1) && ((oper == GT_LE) || (oper == GT_GT)))
+ {
+ /* Check for "expr <= -1" */
+ if (oper == GT_LE)
+ {
+ /* Change to "expr < 0" */
+ oper = GT_LT;
+ goto SET_OPER;
+ }
+ /* Check for "expr > -1" */
+ else if (oper == GT_GT)
+ {
+ /* Change to "expr >= 0" */
+ oper = GT_GE;
-SET_OPER:
- // IF we get here we should be changing 'oper'
- assert(tree->OperGet() != oper);
+ SET_OPER:
+ // IF we get here we should be changing 'oper'
+ assert(tree->OperGet() != oper);
- // Keep the old ValueNumber for 'tree' as the new expr
- // will still compute the same value as before
- tree->SetOper(oper, GenTree::PRESERVE_VN);
- cns2->gtIntCon.gtIconVal = 0;
+ // Keep the old ValueNumber for 'tree' as the new expr
+ // will still compute the same value as before
+ tree->SetOper(oper, GenTree::PRESERVE_VN);
+ cns2->gtIntCon.gtIconVal = 0;
- // vnStore is null before the ValueNumber phase has run
- if (vnStore != nullptr)
- {
- // Update the ValueNumber for 'cns2', as we just changed it to 0
- fgValueNumberTreeConst(cns2);
+ // vnStore is null before the ValueNumber phase has run
+ if (vnStore != nullptr)
+ {
+ // Update the ValueNumber for 'cns2', as we just changed it to 0
+ fgValueNumberTreeConst(cns2);
+ }
+
+ op2 = tree->gtOp.gtOp2 = gtFoldExpr(op2);
+ }
}
-
- op2 = tree->gtOp.gtOp2 = gtFoldExpr(op2);
}
}
- }
- }
-COMPARE:
+ COMPARE:
- noway_assert(tree->OperKind() & GTK_RELOP);
+ noway_assert(tree->OperKind() & GTK_RELOP);
- /* Check if the result of the comparison is used for a jump.
- * If not then only the int (i.e. 32 bit) case is handled in
- * the code generator through the (x86) "set" instructions.
- * For the rest of the cases, the simplest way is to
- * "simulate" the comparison with ?:
- *
- * On ARM, we previously used the IT instruction, but the IT instructions
- * have mostly been declared obsolete and off-limits, so all cases on ARM
- * get converted to ?: */
+ /* Check if the result of the comparison is used for a jump.
+ * If not then only the int (i.e. 32 bit) case is handled in
+ * the code generator through the (x86) "set" instructions.
+ * For the rest of the cases, the simplest way is to
+ * "simulate" the comparison with ?:
+ *
+ * On ARM, we previously used the IT instruction, but the IT instructions
+ * have mostly been declared obsolete and off-limits, so all cases on ARM
+ * get converted to ?: */
- if (!(tree->gtFlags & GTF_RELOP_JMP_USED) &&
- fgMorphRelopToQmark(op1))
- {
- /* We convert it to "(CMP_TRUE) ? (1):(0)" */
-
- op1 = tree;
- op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
- op1->gtRequestSetFlags();
+ if (!(tree->gtFlags & GTF_RELOP_JMP_USED) && fgMorphRelopToQmark(op1))
+ {
+ /* We convert it to "(CMP_TRUE) ? (1):(0)" */
- op2 = new (this, GT_COLON) GenTreeColon(TYP_INT, gtNewIconNode(1), gtNewIconNode(0)
- );
- op2 = fgMorphTree(op2);
+ op1 = tree;
+ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_RELOP_QMARK | GTF_DONT_CSE);
+ op1->gtRequestSetFlags();
- tree = gtNewQmarkNode(TYP_INT, op1, op2);
+ op2 = new (this, GT_COLON) GenTreeColon(TYP_INT, gtNewIconNode(1), gtNewIconNode(0));
+ op2 = fgMorphTree(op2);
- fgMorphTreeDone(tree);
+ tree = gtNewQmarkNode(TYP_INT, op1, op2);
- return tree;
- }
- break;
+ fgMorphTreeDone(tree);
- case GT_QMARK:
+ return tree;
+ }
+ break;
- /* If op1 is a comma throw node then we won't be keeping op2 */
- if (fgIsCommaThrow(op1))
- break;
+ case GT_QMARK:
- /* Get hold of the two branches */
+ /* If op1 is a comma throw node then we won't be keeping op2 */
+ if (fgIsCommaThrow(op1))
+ {
+ break;
+ }
- noway_assert(op2->OperGet() == GT_COLON);
- elseNode = op2->AsColon()->ElseNode();
- thenNode = op2->AsColon()->ThenNode();
+ /* Get hold of the two branches */
- /* Try to hoist assignments out of qmark colon constructs.
- ie. replace (cond?(x=a):(x=b)) with (x=(cond?a:b)). */
+ noway_assert(op2->OperGet() == GT_COLON);
+ elseNode = op2->AsColon()->ElseNode();
+ thenNode = op2->AsColon()->ThenNode();
- if (tree->TypeGet() == TYP_VOID &&
- thenNode->OperGet() == GT_ASG &&
- elseNode->OperGet() == GT_ASG &&
- thenNode->TypeGet() != TYP_LONG &&
- GenTree::Compare(thenNode->gtOp.gtOp1, elseNode->gtOp.gtOp1) &&
- thenNode->gtOp.gtOp2->TypeGet() == elseNode->gtOp.gtOp2->TypeGet())
- {
- noway_assert(thenNode->TypeGet() == elseNode->TypeGet());
+ /* Try to hoist assignments out of qmark colon constructs.
+ ie. replace (cond?(x=a):(x=b)) with (x=(cond?a:b)). */
- GenTreePtr asg = thenNode;
- GenTreePtr colon = op2;
- colon->gtOp.gtOp1 = thenNode->gtOp.gtOp2;
- colon->gtOp.gtOp2 = elseNode->gtOp.gtOp2;
- tree->gtType = colon->gtType = asg->gtOp.gtOp2->gtType;
- asg->gtOp.gtOp2 = tree;
+ if (tree->TypeGet() == TYP_VOID && thenNode->OperGet() == GT_ASG && elseNode->OperGet() == GT_ASG &&
+ thenNode->TypeGet() != TYP_LONG && GenTree::Compare(thenNode->gtOp.gtOp1, elseNode->gtOp.gtOp1) &&
+ thenNode->gtOp.gtOp2->TypeGet() == elseNode->gtOp.gtOp2->TypeGet())
+ {
+ noway_assert(thenNode->TypeGet() == elseNode->TypeGet());
- // Asg will have all the flags that the QMARK had
- asg->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
+ GenTreePtr asg = thenNode;
+ GenTreePtr colon = op2;
+ colon->gtOp.gtOp1 = thenNode->gtOp.gtOp2;
+ colon->gtOp.gtOp2 = elseNode->gtOp.gtOp2;
+ tree->gtType = colon->gtType = asg->gtOp.gtOp2->gtType;
+ asg->gtOp.gtOp2 = tree;
- // Colon flag won't have the flags that x had.
- colon->gtFlags &= ~GTF_ALL_EFFECT;
- colon->gtFlags |= (colon->gtOp.gtOp1->gtFlags |
- colon->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
+ // Asg will have all the flags that the QMARK had
+ asg->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
- DEBUG_DESTROY_NODE(elseNode->gtOp.gtOp1);
- DEBUG_DESTROY_NODE(elseNode);
+ // Colon flag won't have the flags that x had.
+ colon->gtFlags &= ~GTF_ALL_EFFECT;
+ colon->gtFlags |= (colon->gtOp.gtOp1->gtFlags | colon->gtOp.gtOp2->gtFlags) & GTF_ALL_EFFECT;
- return asg;
- }
+ DEBUG_DESTROY_NODE(elseNode->gtOp.gtOp1);
+ DEBUG_DESTROY_NODE(elseNode);
+ return asg;
+ }
- /* If the 'else' branch is empty swap the two branches and reverse the condition */
+ /* If the 'else' branch is empty swap the two branches and reverse the condition */
- if (elseNode->IsNothingNode())
- {
- /* This can only happen for VOID ?: */
- noway_assert(op2->gtType == TYP_VOID);
+ if (elseNode->IsNothingNode())
+ {
+ /* This can only happen for VOID ?: */
+ noway_assert(op2->gtType == TYP_VOID);
- /* If the thenNode and elseNode are both nop nodes then optimize away the QMARK */
- if (thenNode->IsNothingNode())
- {
- // We may be able to throw away op1 (unless it has side-effects)
+ /* If the thenNode and elseNode are both nop nodes then optimize away the QMARK */
+ if (thenNode->IsNothingNode())
+ {
+ // We may be able to throw away op1 (unless it has side-effects)
- if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
- {
- /* Just return a a Nop Node */
- return thenNode;
- }
- else
- {
- /* Just return the relop, but clear the special flags. Note
- that we can't do that for longs and floats (see code under
- COMPARE label above) */
+ if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
+ {
+ /* Just return a a Nop Node */
+ return thenNode;
+ }
+ else
+ {
+ /* Just return the relop, but clear the special flags. Note
+ that we can't do that for longs and floats (see code under
+ COMPARE label above) */
- if (!fgMorphRelopToQmark(op1->gtOp.gtOp1))
+ if (!fgMorphRelopToQmark(op1->gtOp.gtOp1))
+ {
+ op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
+ return op1;
+ }
+ }
+ }
+ else
{
- op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
- return op1;
+ GenTreePtr tmp = elseNode;
+
+ op2->AsColon()->ElseNode() = elseNode = thenNode;
+ op2->AsColon()->ThenNode() = thenNode = tmp;
+ gtReverseCond(op1);
}
}
- }
- else
- {
- GenTreePtr tmp = elseNode;
-
- op2->AsColon()->ElseNode() = elseNode = thenNode;
- op2->AsColon()->ThenNode() = thenNode = tmp;
- gtReverseCond(op1);
- }
- }
#if !defined(_TARGET_ARM_)
- // If we have (cond)?0:1, then we just return "cond" for TYP_INTs
- //
- // Don't do this optimization for ARM: we always require assignment
- // to boolean to remain ?:, since we don't have any way to generate
- // this with straight-line code, like x86 does using setcc (at least
- // after the IT instruction is deprecated).
+ // If we have (cond)?0:1, then we just return "cond" for TYP_INTs
+ //
+ // Don't do this optimization for ARM: we always require assignment
+ // to boolean to remain ?:, since we don't have any way to generate
+ // this with straight-line code, like x86 does using setcc (at least
+ // after the IT instruction is deprecated).
- if (genActualType(op1->gtOp.gtOp1->gtType) == TYP_INT &&
- genActualType(typ) == TYP_INT &&
- thenNode->gtOper == GT_CNS_INT &&
- elseNode->gtOper == GT_CNS_INT)
- {
- ival1 = thenNode->gtIntCon.gtIconVal;
- ival2 = elseNode->gtIntCon.gtIconVal;
+ if (genActualType(op1->gtOp.gtOp1->gtType) == TYP_INT && genActualType(typ) == TYP_INT &&
+ thenNode->gtOper == GT_CNS_INT && elseNode->gtOper == GT_CNS_INT)
+ {
+ ival1 = thenNode->gtIntCon.gtIconVal;
+ ival2 = elseNode->gtIntCon.gtIconVal;
- // Is one constant 0 and the other 1?
- if ((ival1 | ival2) == 1 && (ival1 & ival2) == 0)
- {
- // If the constants are {1, 0}, reverse the condition
- if (ival1 == 1)
- gtReverseCond(op1);
+ // Is one constant 0 and the other 1?
+ if ((ival1 | ival2) == 1 && (ival1 & ival2) == 0)
+ {
+ // If the constants are {1, 0}, reverse the condition
+ if (ival1 == 1)
+ {
+ gtReverseCond(op1);
+ }
- // Unmark GTF_RELOP_JMP_USED on the condition node so it knows that it
- // needs to materialize the result as a 0 or 1.
- noway_assert(op1->gtFlags & (GTF_RELOP_QMARK | GTF_RELOP_JMP_USED));
- op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
+ // Unmark GTF_RELOP_JMP_USED on the condition node so it knows that it
+ // needs to materialize the result as a 0 or 1.
+ noway_assert(op1->gtFlags & (GTF_RELOP_QMARK | GTF_RELOP_JMP_USED));
+ op1->gtFlags &= ~(GTF_RELOP_QMARK | GTF_RELOP_JMP_USED);
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
- return op1;
- }
- }
+ return op1;
+ }
+ }
#endif // !_TARGET_ARM_
- break; // end case GT_QMARK
-
+ break; // end case GT_QMARK
- case GT_MUL:
+ case GT_MUL:
#ifndef _TARGET_64BIT_
- if (typ == TYP_LONG)
- {
- // This must be GTF_MUL_64RSLT
- assert(tree->gtIsValid64RsltMul());
- return tree;
- }
+ if (typ == TYP_LONG)
+ {
+ // This must be GTF_MUL_64RSLT
+ assert(tree->gtIsValid64RsltMul());
+ return tree;
+ }
#endif // _TARGET_64BIT_
- goto CM_OVF_OP;
+ goto CM_OVF_OP;
- case GT_SUB:
+ case GT_SUB:
- if (tree->gtOverflow())
- goto CM_OVF_OP;
+ if (tree->gtOverflow())
+ {
+ goto CM_OVF_OP;
+ }
- /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
+ /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
- noway_assert(op2);
- if (op2->IsCnsIntOrI())
- {
- /* Negate the constant and change the node to be "+" */
+ noway_assert(op2);
+ if (op2->IsCnsIntOrI())
+ {
+ /* Negate the constant and change the node to be "+" */
- op2->gtIntConCommon.SetIconValue(-op2->gtIntConCommon.IconValue());
- oper = GT_ADD;
- tree->ChangeOper(oper);
- goto CM_ADD_OP;
- }
+ op2->gtIntConCommon.SetIconValue(-op2->gtIntConCommon.IconValue());
+ oper = GT_ADD;
+ tree->ChangeOper(oper);
+ goto CM_ADD_OP;
+ }
- /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
+ /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
- noway_assert(op1);
- if (op1->IsCnsIntOrI())
- {
- noway_assert(varTypeIsIntOrI(tree));
+ noway_assert(op1);
+ if (op1->IsCnsIntOrI())
+ {
+ noway_assert(varTypeIsIntOrI(tree));
- tree->gtOp.gtOp2 = op2 = gtNewOperNode(GT_NEG, tree->gtType, op2); // The type of the new GT_NEG node should be the same
- // as the type of the tree, i.e. tree->gtType.
- fgMorphTreeDone(op2);
+ tree->gtOp.gtOp2 = op2 =
+ gtNewOperNode(GT_NEG, tree->gtType, op2); // The type of the new GT_NEG node should be the same
+ // as the type of the tree, i.e. tree->gtType.
+ fgMorphTreeDone(op2);
- oper = GT_ADD;
- tree->ChangeOper(oper);
- goto CM_ADD_OP;
- }
+ oper = GT_ADD;
+ tree->ChangeOper(oper);
+ goto CM_ADD_OP;
+ }
- /* No match - exit */
+ /* No match - exit */
- break;
+ break;
#ifdef _TARGET_ARM64_
- case GT_DIV:
- if (!varTypeIsFloating(tree->gtType))
- {
- // Codegen for this instruction needs to be able to throw two exceptions:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
- }
- break;
- case GT_UDIV:
- // Codegen for this instruction needs to be able to throw one exception:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
- break;
+ case GT_DIV:
+ if (!varTypeIsFloating(tree->gtType))
+ {
+ // Codegen for this instruction needs to be able to throw two exceptions:
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
+ }
+ break;
+ case GT_UDIV:
+ // Codegen for this instruction needs to be able to throw one exception:
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO, fgPtrArgCntCur);
+ break;
#endif
- case GT_ADD:
-
-CM_OVF_OP :
- if (tree->gtOverflow())
- {
- tree->gtRequestSetFlags();
-
- // Add the excptn-throwing basic block to jump to on overflow
+ case GT_ADD:
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
+ CM_OVF_OP:
+ if (tree->gtOverflow())
+ {
+ tree->gtRequestSetFlags();
- // We can't do any commutative morphing for overflow instructions
+ // Add the excptn-throwing basic block to jump to on overflow
- break;
- }
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW, fgPtrArgCntCur);
-CM_ADD_OP:
+ // We can't do any commutative morphing for overflow instructions
- case GT_OR:
- case GT_XOR:
- case GT_AND:
+ break;
+ }
- /* Commute any non-REF constants to the right */
+ CM_ADD_OP:
- noway_assert(op1);
- if (op1->OperIsConst() && (op1->gtType != TYP_REF))
- {
- // TODO-Review: We used to assert here that
- // noway_assert(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD));
- // With modifications to AddrTaken==>AddrExposed, we did more assertion propagation,
- // and would sometimes hit this assertion. This may indicate a missed "remorph".
- // Task is to re-enable this assertion and investigate.
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
- /* Swap the operands */
- tree->gtOp.gtOp1 = op2;
- tree->gtOp.gtOp2 = op1;
+ /* Commute any non-REF constants to the right */
- op1 = op2;
- op2 = tree->gtOp.gtOp2;
- }
+ noway_assert(op1);
+ if (op1->OperIsConst() && (op1->gtType != TYP_REF))
+ {
+ // TODO-Review: We used to assert here that
+ // noway_assert(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD));
+ // With modifications to AddrTaken==>AddrExposed, we did more assertion propagation,
+ // and would sometimes hit this assertion. This may indicate a missed "remorph".
+ // Task is to re-enable this assertion and investigate.
+
+ /* Swap the operands */
+ tree->gtOp.gtOp1 = op2;
+ tree->gtOp.gtOp2 = op1;
+
+ op1 = op2;
+ op2 = tree->gtOp.gtOp2;
+ }
- /* See if we can fold GT_ADD nodes. */
+ /* See if we can fold GT_ADD nodes. */
- if (oper == GT_ADD)
- {
- /* Fold "((x+icon1)+(y+icon2)) to ((x+y)+(icon1+icon2))" */
+ if (oper == GT_ADD)
+ {
+ /* Fold "((x+icon1)+(y+icon2)) to ((x+y)+(icon1+icon2))" */
- if (op1->gtOper == GT_ADD &&
- op2->gtOper == GT_ADD &&
- !gtIsActiveCSE_Candidate(op2) &&
- op1->gtOp.gtOp2->gtOper == GT_CNS_INT &&
- op2->gtOp.gtOp2->gtOper == GT_CNS_INT &&
- !op1->gtOverflow() &&
- !op2->gtOverflow() )
- {
- cns1 = op1->gtOp.gtOp2;
- cns2 = op2->gtOp.gtOp2;
- cns1->gtIntCon.gtIconVal += cns2->gtIntCon.gtIconVal;
+ if (op1->gtOper == GT_ADD && op2->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op2) &&
+ op1->gtOp.gtOp2->gtOper == GT_CNS_INT && op2->gtOp.gtOp2->gtOper == GT_CNS_INT &&
+ !op1->gtOverflow() && !op2->gtOverflow())
+ {
+ cns1 = op1->gtOp.gtOp2;
+ cns2 = op2->gtOp.gtOp2;
+ cns1->gtIntCon.gtIconVal += cns2->gtIntCon.gtIconVal;
#ifdef _TARGET_64BIT_
- if (cns1->TypeGet() == TYP_INT)
- {
- // we need to properly re-sign-extend or truncate after adding two int constants above
- cns1->AsIntCon()->TruncateOrSignExtend32();
- }
+ if (cns1->TypeGet() == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after adding two int constants above
+ cns1->AsIntCon()->TruncateOrSignExtend32();
+ }
#endif //_TARGET_64BIT_
- tree->gtOp.gtOp2 = cns1;
- DEBUG_DESTROY_NODE(cns2);
+ tree->gtOp.gtOp2 = cns1;
+ DEBUG_DESTROY_NODE(cns2);
- op1->gtOp.gtOp2 = op2->gtOp.gtOp1;
- op1->gtFlags |= (op1->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT);
- DEBUG_DESTROY_NODE(op2);
- op2 = tree->gtOp.gtOp2;
- }
+ op1->gtOp.gtOp2 = op2->gtOp.gtOp1;
+ op1->gtFlags |= (op1->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT);
+ DEBUG_DESTROY_NODE(op2);
+ op2 = tree->gtOp.gtOp2;
+ }
- if (op2->IsCnsIntOrI() && varTypeIsIntegralOrI(typ))
- {
- /* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
+ if (op2->IsCnsIntOrI() && varTypeIsIntegralOrI(typ))
+ {
+ /* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
- if (op1->gtOper == GT_ADD &&
- !gtIsActiveCSE_Candidate(op1) &&
- op1->gtOp.gtOp2->IsCnsIntOrI() &&
- !op1->gtOverflow() &&
- op1->gtOp.gtOp2->OperGet() == op2->OperGet())
- {
- cns1 = op1->gtOp.gtOp2;
- op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() + op2->gtIntConCommon.IconValue());
+ if (op1->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op1) && op1->gtOp.gtOp2->IsCnsIntOrI() &&
+ !op1->gtOverflow() && op1->gtOp.gtOp2->OperGet() == op2->OperGet())
+ {
+ cns1 = op1->gtOp.gtOp2;
+ op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() +
+ op2->gtIntConCommon.IconValue());
#ifdef _TARGET_64BIT_
- if (op2->TypeGet() == TYP_INT)
- {
- // we need to properly re-sign-extend or truncate after adding two int constants above
- op2->AsIntCon()->TruncateOrSignExtend32();
- }
+ if (op2->TypeGet() == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after adding two int constants above
+ op2->AsIntCon()->TruncateOrSignExtend32();
+ }
#endif //_TARGET_64BIT_
- if (cns1->OperGet() == GT_CNS_INT)
- {
- op2->gtIntCon.gtFieldSeq =
- GetFieldSeqStore()->Append(cns1->gtIntCon.gtFieldSeq,
- op2->gtIntCon.gtFieldSeq);
- }
- DEBUG_DESTROY_NODE(cns1);
+ if (cns1->OperGet() == GT_CNS_INT)
+ {
+ op2->gtIntCon.gtFieldSeq =
+ GetFieldSeqStore()->Append(cns1->gtIntCon.gtFieldSeq, op2->gtIntCon.gtFieldSeq);
+ }
+ DEBUG_DESTROY_NODE(cns1);
- tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
- DEBUG_DESTROY_NODE(op1);
- op1 = tree->gtOp.gtOp1;
- }
+ tree->gtOp.gtOp1 = op1->gtOp.gtOp1;
+ DEBUG_DESTROY_NODE(op1);
+ op1 = tree->gtOp.gtOp1;
+ }
- // Fold (x + 0).
+ // Fold (x + 0).
- if ((op2->gtIntConCommon.IconValue() == 0) && !gtIsActiveCSE_Candidate(tree))
- {
+ if ((op2->gtIntConCommon.IconValue() == 0) && !gtIsActiveCSE_Candidate(tree))
+ {
- // If this addition is adding an offset to a null pointer,
- // avoid the work and yield the null pointer immediately.
- // Dereferencing the pointer in either case will have the
- // same effect.
+ // If this addition is adding an offset to a null pointer,
+ // avoid the work and yield the null pointer immediately.
+ // Dereferencing the pointer in either case will have the
+ // same effect.
- if (!gtIsActiveCSE_Candidate(op1) && varTypeIsGC(op2->TypeGet()))
- {
- op2->gtType = tree->gtType;
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
- return op2;
- }
+ if (!gtIsActiveCSE_Candidate(op1) && varTypeIsGC(op2->TypeGet()))
+ {
+ op2->gtType = tree->gtType;
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(tree);
+ return op2;
+ }
- // Remove the addition iff it won't change the tree type
- // to TYP_REF.
+ // Remove the addition iff it won't change the tree type
+ // to TYP_REF.
- if (!gtIsActiveCSE_Candidate(op2) &&
- ((op1->TypeGet() == tree->TypeGet()) ||
- (op1->TypeGet() != TYP_REF)))
- {
- if (fgGlobalMorph &&
- (op2->OperGet() == GT_CNS_INT) &&
- (op2->gtIntCon.gtFieldSeq != NULL) &&
- (op2->gtIntCon.gtFieldSeq != FieldSeqStore::NotAField()))
- {
- fgAddFieldSeqForZeroOffset(op1, op2->gtIntCon.gtFieldSeq);
- }
+ if (!gtIsActiveCSE_Candidate(op2) &&
+ ((op1->TypeGet() == tree->TypeGet()) || (op1->TypeGet() != TYP_REF)))
+ {
+ if (fgGlobalMorph && (op2->OperGet() == GT_CNS_INT) &&
+ (op2->gtIntCon.gtFieldSeq != nullptr) &&
+ (op2->gtIntCon.gtFieldSeq != FieldSeqStore::NotAField()))
+ {
+ fgAddFieldSeqForZeroOffset(op1, op2->gtIntCon.gtFieldSeq);
+ }
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
- return op1;
+ return op1;
+ }
+ }
}
}
- }
- }
- /* See if we can fold GT_MUL by const nodes */
- else if (oper == GT_MUL && op2->IsCnsIntOrI() && !optValnumCSE_phase)
- {
+ /* See if we can fold GT_MUL by const nodes */
+ else if (oper == GT_MUL && op2->IsCnsIntOrI() && !optValnumCSE_phase)
+ {
#ifndef _TARGET_64BIT_
- noway_assert(typ <= TYP_UINT);
+ noway_assert(typ <= TYP_UINT);
#endif // _TARGET_64BIT_
- noway_assert(!tree->gtOverflow());
+ noway_assert(!tree->gtOverflow());
- ssize_t mult = op2->gtIntConCommon.IconValue();
- bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT &&
- op2->gtIntCon.gtFieldSeq != nullptr &&
- op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq();
+ ssize_t mult = op2->gtIntConCommon.IconValue();
+ bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
+ op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq();
- assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr);
+ assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr);
- if (mult == 0)
- {
- // We may be able to throw away op1 (unless it has side-effects)
+ if (mult == 0)
+ {
+ // We may be able to throw away op1 (unless it has side-effects)
- if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
- {
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
- return op2; // Just return the "0" node
- }
+ if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
+ {
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(tree);
+ return op2; // Just return the "0" node
+ }
- // We need to keep op1 for the side-effects. Hang it off
- // a GT_COMMA node
+ // We need to keep op1 for the side-effects. Hang it off
+ // a GT_COMMA node
- tree->ChangeOper(GT_COMMA);
- return tree;
- }
+ tree->ChangeOper(GT_COMMA);
+ return tree;
+ }
- size_t abs_mult = (mult >= 0) ? mult : -mult;
- size_t lowestBit = genFindLowestBit(abs_mult);
- bool changeToShift = false;
+ size_t abs_mult = (mult >= 0) ? mult : -mult;
+ size_t lowestBit = genFindLowestBit(abs_mult);
+ bool changeToShift = false;
- // is it a power of two? (positive or negative)
- if (abs_mult == lowestBit)
- {
- // if negative negate (min-int does not need negation)
- if (mult < 0 && mult != SSIZE_T_MIN)
- {
- tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
- fgMorphTreeDone(op1);
- }
+ // is it a power of two? (positive or negative)
+ if (abs_mult == lowestBit)
+ {
+ // if negative negate (min-int does not need negation)
+ if (mult < 0 && mult != SSIZE_T_MIN)
+ {
+ tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
+ fgMorphTreeDone(op1);
+ }
- // If "op2" is a constant array index, the other multiplicand must be a constant.
- // Transfer the annotation to the other one.
- if (op2->OperGet() == GT_CNS_INT &&
- op2->gtIntCon.gtFieldSeq != nullptr &&
- op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- assert(op2->gtIntCon.gtFieldSeq->m_next == nullptr);
- GenTreePtr otherOp = op1;
- if (otherOp->OperGet() == GT_NEG)
- otherOp = otherOp->gtOp.gtOp1;
- assert(otherOp->OperGet() == GT_CNS_INT);
- assert(otherOp->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
- otherOp->gtIntCon.gtFieldSeq = op2->gtIntCon.gtFieldSeq;
- }
+ // If "op2" is a constant array index, the other multiplicand must be a constant.
+ // Transfer the annotation to the other one.
+ if (op2->OperGet() == GT_CNS_INT && op2->gtIntCon.gtFieldSeq != nullptr &&
+ op2->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ assert(op2->gtIntCon.gtFieldSeq->m_next == nullptr);
+ GenTreePtr otherOp = op1;
+ if (otherOp->OperGet() == GT_NEG)
+ {
+ otherOp = otherOp->gtOp.gtOp1;
+ }
+ assert(otherOp->OperGet() == GT_CNS_INT);
+ assert(otherOp->gtIntCon.gtFieldSeq == FieldSeqStore::NotAField());
+ otherOp->gtIntCon.gtFieldSeq = op2->gtIntCon.gtFieldSeq;
+ }
- if (abs_mult == 1)
- {
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(tree);
- return op1;
- }
+ if (abs_mult == 1)
+ {
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
+ }
- /* Change the multiplication into a shift by log2(val) bits */
- op2->gtIntConCommon.SetIconValue(genLog2(abs_mult));
- changeToShift = true;
- }
+ /* Change the multiplication into a shift by log2(val) bits */
+ op2->gtIntConCommon.SetIconValue(genLog2(abs_mult));
+ changeToShift = true;
+ }
#if LEA_AVAILABLE
- else if ((lowestBit > 1) && jitIsScaleIndexMul(lowestBit) && optAvoidIntMult())
- {
- int shift = genLog2(lowestBit);
- ssize_t factor = abs_mult >> shift;
-
- if (factor == 3 || factor == 5 || factor == 9)
- {
- // if negative negate (min-int does not need negation)
- if (mult < 0 && mult != SSIZE_T_MIN)
+ else if ((lowestBit > 1) && jitIsScaleIndexMul(lowestBit) && optAvoidIntMult())
{
- tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
- fgMorphTreeDone(op1);
- }
+ int shift = genLog2(lowestBit);
+ ssize_t factor = abs_mult >> shift;
- GenTreePtr factorIcon = gtNewIconNode(factor, TYP_I_IMPL);
- if (op2IsConstIndex)
- {
- factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
- }
+ if (factor == 3 || factor == 5 || factor == 9)
+ {
+ // if negative negate (min-int does not need negation)
+ if (mult < 0 && mult != SSIZE_T_MIN)
+ {
+ tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_NEG, op1->gtType, op1);
+ fgMorphTreeDone(op1);
+ }
+
+ GenTreePtr factorIcon = gtNewIconNode(factor, TYP_I_IMPL);
+ if (op2IsConstIndex)
+ {
+ factorIcon->AsIntCon()->gtFieldSeq =
+ GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
+ }
- // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
- tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_MUL, tree->gtType, op1, factorIcon);
- fgMorphTreeDone(op1);
+ // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
+ tree->gtOp.gtOp1 = op1 = gtNewOperNode(GT_MUL, tree->gtType, op1, factorIcon);
+ fgMorphTreeDone(op1);
- op2->gtIntConCommon.SetIconValue(shift);
- changeToShift = true;
- }
- }
+ op2->gtIntConCommon.SetIconValue(shift);
+ changeToShift = true;
+ }
+ }
#endif // LEA_AVAILABLE
- if (changeToShift)
- {
- // vnStore is null before the ValueNumber phase has run
- if (vnStore != nullptr)
- {
- // Update the ValueNumber for 'op2', as we just changed the constant
- fgValueNumberTreeConst(op2);
+ if (changeToShift)
+ {
+ // vnStore is null before the ValueNumber phase has run
+ if (vnStore != nullptr)
+ {
+ // Update the ValueNumber for 'op2', as we just changed the constant
+ fgValueNumberTreeConst(op2);
+ }
+ oper = GT_LSH;
+ // Keep the old ValueNumber for 'tree' as the new expr
+ // will still compute the same value as before
+ tree->ChangeOper(oper, GenTree::PRESERVE_VN);
+
+ goto DONE_MORPHING_CHILDREN;
+ }
}
- oper = GT_LSH;
- // Keep the old ValueNumber for 'tree' as the new expr
- // will still compute the same value as before
- tree->ChangeOper(oper, GenTree::PRESERVE_VN);
+ else if (fgOperIsBitwiseRotationRoot(oper))
+ {
+ tree = fgRecognizeAndMorphBitwiseRotation(tree);
- goto DONE_MORPHING_CHILDREN;
- }
- }
- else if (fgOperIsBitwiseRotationRoot(oper))
- {
- tree = fgRecognizeAndMorphBitwiseRotation(tree);
+ // fgRecognizeAndMorphBitwiseRotation may return a new tree
+ oper = tree->OperGet();
+ typ = tree->TypeGet();
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtOp.gtOp2;
+ }
- // fgRecognizeAndMorphBitwiseRotation may return a new tree
- oper = tree->OperGet();
- typ = tree->TypeGet();
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtOp.gtOp2;
- }
+ break;
- break;
+ case GT_CHS:
+ case GT_NOT:
+ case GT_NEG:
- case GT_CHS:
- case GT_NOT:
- case GT_NEG:
+ /* Any constant cases should have been folded earlier */
+ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
+ break;
- /* Any constant cases should have been folded earlier */
- noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
- break;
+ case GT_CKFINITE:
- case GT_CKFINITE:
+ noway_assert(varTypeIsFloating(op1->TypeGet()));
- noway_assert(varTypeIsFloating(op1->TypeGet()));
+ fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN, fgPtrArgCntCur);
+ break;
- fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN, fgPtrArgCntCur);
- break;
+ case GT_OBJ:
+ // If we have GT_OBJ(GT_ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
+ // the GT_OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
+ // is a local or clsVar, even if it has been address-exposed.
+ if (op1->OperGet() == GT_ADDR)
+ {
+ tree->gtFlags |= (op1->gtGetOp1()->gtFlags & GTF_GLOB_REF);
+ }
+ break;
- case GT_OBJ:
- // If we have GT_OBJ(GT_ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
- // the GT_OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
- // is a local or clsVar, even if it has been address-exposed.
- if (op1->OperGet() == GT_ADDR)
- {
- tree->gtFlags |= (op1->gtGetOp1()->gtFlags & GTF_GLOB_REF);
- }
- break;
+ case GT_IND:
- case GT_IND:
+ // Can not remove a GT_IND if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(tree))
+ {
+ break;
+ }
- // Can not remove a GT_IND if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(tree))
- break;
+ bool foldAndReturnTemp;
+ foldAndReturnTemp = false;
+ temp = nullptr;
+ ival1 = 0;
- bool foldAndReturnTemp; foldAndReturnTemp = false;
- temp = nullptr;
- ival1 = 0;
+ /* Try to Fold *(&X) into X */
+ if (op1->gtOper == GT_ADDR)
+ {
+ // Can not remove a GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1))
+ {
+ break;
+ }
- /* Try to Fold *(&X) into X */
- if (op1->gtOper == GT_ADDR)
- {
- // Can not remove a GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1))
- break;
+ temp = op1->gtOp.gtOp1; // X
- temp = op1->gtOp.gtOp1; // X
+ // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
+ // they are the *same* struct type. In fact, they almost certainly aren't. If the
+ // address has an associated field sequence, that identifies this case; go through
+ // the "lcl_fld" path rather than this one.
+ FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
+ if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
+ {
+ foldAndReturnTemp = true;
+ }
+ else if (temp->OperIsLocal())
+ {
+ unsigned lclNum = temp->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
- // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
- // they are the *same* struct type. In fact, they almost certainly aren't. If the
- // address has an associated field sequence, that identifies this case; go through
- // the "lcl_fld" path rather than this one.
- FieldSeqNode* addrFieldSeq = NULL; // This is an unused out parameter below.
- if ( typ == temp->TypeGet()
- && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
- {
- foldAndReturnTemp = true;
- }
- else if (temp->OperIsLocal())
- {
- unsigned lclNum = temp->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
+ if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
+ {
+ noway_assert(varTypeIsStruct(varDsc));
- // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
- if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
- {
- noway_assert(varTypeIsStruct(varDsc));
+ // We will try to optimize when we have a single field struct that is being struct promoted
+ if (varDsc->lvFieldCnt == 1)
+ {
+ unsigned lclNumFld = varDsc->lvFieldLclStart;
+ // just grab the promoted field
+ LclVarDsc* fieldVarDsc = &lvaTable[lclNumFld];
- // We will try to optimize when we have a single field struct that is being struct promoted
- if (varDsc->lvFieldCnt == 1)
- {
- unsigned lclNumFld = varDsc->lvFieldLclStart;
- // just grab the promoted field
- LclVarDsc * fieldVarDsc = &lvaTable[lclNumFld];
+ // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
+ // is zero
+ if (fieldVarDsc->TypeGet() == tree->TypeGet() && (fieldVarDsc->lvFldOffset == 0))
+ {
+ // We can just use the existing promoted field LclNum
+ temp->gtLclVarCommon.SetLclNum(lclNumFld);
+ temp->gtType = fieldVarDsc->TypeGet();
- // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset is zero
- if (fieldVarDsc->TypeGet() == tree->TypeGet() && (fieldVarDsc->lvFldOffset == 0))
+ foldAndReturnTemp = true;
+ }
+ }
+ }
+ // If the type of the IND (typ) is a "small int", and the type of the local has the
+ // same width, then we can reduce to just the local variable -- it will be
+ // correctly normalized, and signed/unsigned differences won't matter.
+ //
+ // The below transformation cannot be applied if the local var needs to be normalized on load.
+ else if (varTypeIsSmall(typ) && (genTypeSize(lvaTable[lclNum].lvType) == genTypeSize(typ)) &&
+ !lvaTable[lclNum].lvNormalizeOnLoad())
{
- // We can just use the existing promoted field LclNum
- temp->gtLclVarCommon.SetLclNum(lclNumFld);
- temp->gtType = fieldVarDsc->TypeGet();
-
+ tree->gtType = temp->gtType;
foldAndReturnTemp = true;
}
- }
- }
- // If the type of the IND (typ) is a "small int", and the type of the local has the
- // same width, then we can reduce to just the local variable -- it will be
- // correctly normalized, and signed/unsigned differences won't matter.
- //
- // The below transformation cannot be applied if the local var needs to be normalized on load.
- else if ( varTypeIsSmall(typ) &&
- (genTypeSize(lvaTable[lclNum].lvType) == genTypeSize(typ)) &&
- !lvaTable[lclNum].lvNormalizeOnLoad() )
- {
- tree->gtType = temp->gtType;
- foldAndReturnTemp = true;
- }
- else
- {
- // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. nullptr)
- assert(fieldSeq == nullptr);
- bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
- assert(b || fieldSeq == nullptr);
+ else
+ {
+ // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
+ // nullptr)
+ assert(fieldSeq == nullptr);
+ bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
+ assert(b || fieldSeq == nullptr);
- if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
- {
- // Append the field sequence, change the type.
- temp->AsLclFld()->gtFieldSeq = GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
- temp->gtType = tree->TypeGet();
+ if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
+ {
+ // Append the field sequence, change the type.
+ temp->AsLclFld()->gtFieldSeq =
+ GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
+ temp->gtType = tree->TypeGet();
- foldAndReturnTemp = true;
+ foldAndReturnTemp = true;
+ }
+ }
+ // Otherwise will will fold this into a GT_LCL_FLD below
+ // where we check (temp != nullptr)
+ }
+ else // !temp->OperIsLocal()
+ {
+ // We don't try to fold away the GT_IND/GT_ADDR for this case
+ temp = nullptr;
}
}
- // Otherwise will will fold this into a GT_LCL_FLD below
- // where we check (temp != nullptr)
- }
- else // !temp->OperIsLocal()
- {
- // We don't try to fold away the GT_IND/GT_ADDR for this case
- temp = nullptr;
- }
- }
- else if (op1->OperGet() == GT_ADD)
- {
- /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
+ else if (op1->OperGet() == GT_ADD)
+ {
+ /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
- if (op1->gtOp.gtOp1->OperGet() == GT_ADDR &&
- op1->gtOp.gtOp2->OperGet() == GT_CNS_INT
- && (!(opts.MinOpts() || opts.compDbgCode)))
- {
- // No overflow arithmetic with pointers
- noway_assert(!op1->gtOverflow());
+ if (op1->gtOp.gtOp1->OperGet() == GT_ADDR && op1->gtOp.gtOp2->OperGet() == GT_CNS_INT &&
+ (!(opts.MinOpts() || opts.compDbgCode)))
+ {
+ // No overflow arithmetic with pointers
+ noway_assert(!op1->gtOverflow());
- temp = op1->gtOp.gtOp1->gtOp.gtOp1;
- if (!temp->OperIsLocal())
- {
- temp = nullptr;
- break;
- }
+ temp = op1->gtOp.gtOp1->gtOp.gtOp1;
+ if (!temp->OperIsLocal())
+ {
+ temp = nullptr;
+ break;
+ }
- // Can not remove the GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1->gtOp.gtOp1))
- break;
+ // Can not remove the GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1->gtOp.gtOp1))
+ {
+ break;
+ }
- ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- fieldSeq = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
+ ival1 = op1->gtOp.gtOp2->gtIntCon.gtIconVal;
+ fieldSeq = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
- // Does the address have an associated zero-offset field sequence?
- FieldSeqNode* addrFieldSeq = NULL;
- if (GetZeroOffsetFieldMap()->Lookup(op1->gtOp.gtOp1, &addrFieldSeq))
- {
- fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
- }
+ // Does the address have an associated zero-offset field sequence?
+ FieldSeqNode* addrFieldSeq = nullptr;
+ if (GetZeroOffsetFieldMap()->Lookup(op1->gtOp.gtOp1, &addrFieldSeq))
+ {
+ fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
+ }
- if (ival1 == 0 &&
- typ == temp->TypeGet() &&
- temp->TypeGet() != TYP_STRUCT)
- {
- noway_assert(!varTypeIsGC(temp->TypeGet()));
- foldAndReturnTemp = true;
- }
- else
- {
- // The emitter can't handle large offsets
- if (ival1 != (unsigned short)ival1)
- break;
+ if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
+ {
+ noway_assert(!varTypeIsGC(temp->TypeGet()));
+ foldAndReturnTemp = true;
+ }
+ else
+ {
+ // The emitter can't handle large offsets
+ if (ival1 != (unsigned short)ival1)
+ {
+ break;
+ }
- // The emitter can get confused by invalid offsets
- if (ival1 >= Compiler::lvaLclSize(temp->gtLclVarCommon.gtLclNum))
- break;
+ // The emitter can get confused by invalid offsets
+ if (ival1 >= Compiler::lvaLclSize(temp->gtLclVarCommon.gtLclNum))
+ {
+ break;
+ }
#ifdef _TARGET_ARM_
- // Check for a LclVar TYP_STRUCT with misalignment on a Floating Point field
- //
- if (varTypeIsFloating(tree->TypeGet()))
- {
- if ((ival1 % emitTypeSize(tree->TypeGet())) != 0)
- {
- tree->gtFlags |= GTF_IND_UNALIGNED;
- break;
+ // Check for a LclVar TYP_STRUCT with misalignment on a Floating Point field
+ //
+ if (varTypeIsFloating(tree->TypeGet()))
+ {
+ if ((ival1 % emitTypeSize(tree->TypeGet())) != 0)
+ {
+ tree->gtFlags |= GTF_IND_UNALIGNED;
+ break;
+ }
+ }
+#endif
}
+ // Now we can fold this into a GT_LCL_FLD below
+ // where we check (temp != nullptr)
}
-#endif
}
- // Now we can fold this into a GT_LCL_FLD below
- // where we check (temp != nullptr)
- }
- }
#ifdef DEBUG
- // If we have decided to fold, then temp cannot be nullptr
- if (foldAndReturnTemp)
- {
- assert(temp != nullptr);
- }
-#endif
-
- if (temp != nullptr)
- {
- noway_assert(op1->gtOper == GT_ADD || op1->gtOper == GT_ADDR);
-
- // If we haven't already decided to fold this expression
- //
- if (!foldAndReturnTemp)
- {
- noway_assert(temp->OperIsLocal());
- LclVarDsc* varDsc = &(lvaTable[temp->AsLclVarCommon()->gtLclNum]);
- // Make sure we don't separately promote the fields of this struct.
- if (varDsc->lvRegStruct)
+ // If we have decided to fold, then temp cannot be nullptr
+ if (foldAndReturnTemp)
{
- // We can enregister, but can't promote.
- varDsc->lvPromoted = false;
- }
- else
- {
- lvaSetVarDoNotEnregister(temp->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
+ assert(temp != nullptr);
}
+#endif
- // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
- // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
- // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
- //
- if (temp->OperGet() == GT_LCL_FLD)
+ if (temp != nullptr)
{
- temp->AsLclFld()->gtLclOffs += (unsigned short)ival1;
- temp->AsLclFld()->gtFieldSeq =
- GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
- }
- else
- {
- temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField"...
- temp->AsLclFld()->gtLclOffs = (unsigned short)ival1;
- if (fieldSeq != NULL) // If it does represent a field, note that.
- temp->AsLclFld()->gtFieldSeq = fieldSeq;
- }
- temp->gtType = tree->gtType;
- foldAndReturnTemp = true;
- }
+ noway_assert(op1->gtOper == GT_ADD || op1->gtOper == GT_ADDR);
- assert(foldAndReturnTemp == true);
+ // If we haven't already decided to fold this expression
+ //
+ if (!foldAndReturnTemp)
+ {
+ noway_assert(temp->OperIsLocal());
+ LclVarDsc* varDsc = &(lvaTable[temp->AsLclVarCommon()->gtLclNum]);
+ // Make sure we don't separately promote the fields of this struct.
+ if (varDsc->lvRegStruct)
+ {
+ // We can enregister, but can't promote.
+ varDsc->lvPromoted = false;
+ }
+ else
+ {
+ lvaSetVarDoNotEnregister(temp->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
+ }
- // Keep the DONT_CSE flag in sync
- // (i.e keep the original value of this flag from tree)
- // as it can be set for 'temp' because a GT_ADDR always marks it for it's op1
- //
- temp->gtFlags &= ~GTF_DONT_CSE;
- temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
+ // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
+ // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
+ // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
+ //
+ if (temp->OperGet() == GT_LCL_FLD)
+ {
+ temp->AsLclFld()->gtLclOffs += (unsigned short)ival1;
+ temp->AsLclFld()->gtFieldSeq =
+ GetFieldSeqStore()->Append(temp->AsLclFld()->gtFieldSeq, fieldSeq);
+ }
+ else
+ {
+ temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField"...
+ temp->AsLclFld()->gtLclOffs = (unsigned short)ival1;
+ if (fieldSeq != nullptr)
+ { // If it does represent a field, note that.
+ temp->AsLclFld()->gtFieldSeq = fieldSeq;
+ }
+ }
+ temp->gtType = tree->gtType;
+ foldAndReturnTemp = true;
+ }
- noway_assert(op1->gtOper == GT_ADD || op1->gtOper == GT_ADDR);
- noway_assert(temp->gtType == tree->gtType);
+ assert(foldAndReturnTemp == true);
- if (op1->OperGet() == GT_ADD)
- {
- DEBUG_DESTROY_NODE(op1->gtOp.gtOp1); // GT_ADDR
- DEBUG_DESTROY_NODE(op1->gtOp.gtOp2); // GT_CNS_INT
- }
- DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
- DEBUG_DESTROY_NODE(tree); // GT_IND
+ // Keep the DONT_CSE flag in sync
+ // (i.e keep the original value of this flag from tree)
+ // as it can be set for 'temp' because a GT_ADDR always marks it for it's op1
+ //
+ temp->gtFlags &= ~GTF_DONT_CSE;
+ temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
- return temp;
- }
+ noway_assert(op1->gtOper == GT_ADD || op1->gtOper == GT_ADDR);
+ noway_assert(temp->gtType == tree->gtType);
- // Only do this optimization when we are in the global optimizer. Doing this after value numbering
- // could result in an invalid value number for the newly generated GT_IND node.
- if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
- {
- // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
- // TBD: this transformation is currently necessary for correctness -- it might
- // be good to analyze the failures that result if we don't do this, and fix them
- // in other ways. Ideally, this should be optional.
- GenTreePtr commaNode = op1;
- unsigned treeFlags = tree->gtFlags;
- commaNode->gtType = typ;
- commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is dangerous, clear the GTF_REVERSE_OPS at least.
+ if (op1->OperGet() == GT_ADD)
+ {
+ DEBUG_DESTROY_NODE(op1->gtOp.gtOp1); // GT_ADDR
+ DEBUG_DESTROY_NODE(op1->gtOp.gtOp2); // GT_CNS_INT
+ }
+ DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
+ DEBUG_DESTROY_NODE(tree); // GT_IND
+
+ return temp;
+ }
+
+ // Only do this optimization when we are in the global optimizer. Doing this after value numbering
+ // could result in an invalid value number for the newly generated GT_IND node.
+ if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
+ {
+ // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
+ // TBD: this transformation is currently necessary for correctness -- it might
+ // be good to analyze the failures that result if we don't do this, and fix them
+ // in other ways. Ideally, this should be optional.
+ GenTreePtr commaNode = op1;
+ unsigned treeFlags = tree->gtFlags;
+ commaNode->gtType = typ;
+ commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
+ // dangerous, clear the GTF_REVERSE_OPS at
+ // least.
#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
- {
- commaNode = commaNode->gtOp.gtOp2;
- commaNode->gtType = typ;
- commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is dangerous, clear the GTF_REVERSE_OPS at least.
+ while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
+ {
+ commaNode = commaNode->gtOp.gtOp2;
+ commaNode->gtType = typ;
+ commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
+ // dangerous, clear the GTF_REVERSE_OPS at
+ // least.
#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- }
- bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
- ArrayInfo arrInfo;
- if (wasArrIndex)
- {
- bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
- assert(b);
- GetArrayInfoMap()->Remove(tree);
- }
- tree = op1;
- op1 = gtNewOperNode(GT_IND, typ, commaNode->gtOp.gtOp2);
- op1->gtFlags = treeFlags;
- if (wasArrIndex)
- {
- GetArrayInfoMap()->Set(op1, arrInfo);
- }
+ }
+ bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
+ ArrayInfo arrInfo;
+ if (wasArrIndex)
+ {
+ bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
+ assert(b);
+ GetArrayInfoMap()->Remove(tree);
+ }
+ tree = op1;
+ op1 = gtNewOperNode(GT_IND, typ, commaNode->gtOp.gtOp2);
+ op1->gtFlags = treeFlags;
+ if (wasArrIndex)
+ {
+ GetArrayInfoMap()->Set(op1, arrInfo);
+ }
#ifdef DEBUG
- op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- commaNode->gtOp.gtOp2 = op1;
- return tree;
- }
-
- break;
+ commaNode->gtOp.gtOp2 = op1;
+ return tree;
+ }
- case GT_ADDR:
+ break;
- // Can not remove op1 if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(op1))
- break;
+ case GT_ADDR:
- if (op1->OperGet() == GT_IND)
- {
- if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
- {
- // Can not remove a GT_ADDR if it is currently a CSE candidate.
- if (gtIsActiveCSE_Candidate(tree))
+ // Can not remove op1 if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(op1))
+ {
break;
+ }
- // Perform the transform ADDR(IND(...)) == (...).
- GenTreePtr addr = op1->gtOp.gtOp1;
+ if (op1->OperGet() == GT_IND)
+ {
+ if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
+ {
+ // Can not remove a GT_ADDR if it is currently a CSE candidate.
+ if (gtIsActiveCSE_Candidate(tree))
+ {
+ break;
+ }
- noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
+ // Perform the transform ADDR(IND(...)) == (...).
+ GenTreePtr addr = op1->gtOp.gtOp1;
- DEBUG_DESTROY_NODE(op1);
- DEBUG_DESTROY_NODE(tree);
+ noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
- return addr;
- }
- }
- else if (op1->gtOper == GT_CAST)
- {
- GenTreePtr casting = op1->gtCast.CastOp();
- if (casting->gtOper == GT_LCL_VAR || casting->gtOper == GT_CLS_VAR)
- {
- DEBUG_DESTROY_NODE(op1);
- tree->gtOp.gtOp1 = op1 = casting;
- }
- }
- else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
- {
- // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
- // (Be sure to mark "z" as an l-value...)
- GenTreePtr commaNode = op1;
- while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
- {
- commaNode = commaNode->gtOp.gtOp2;
- }
- // The top-level addr might be annotated with a zeroOffset field.
- FieldSeqNode* zeroFieldSeq = nullptr;
- bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
- tree = op1;
- commaNode->gtOp.gtOp2->gtFlags |= GTF_DONT_CSE;
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(tree);
- // If the node we're about to put under a GT_ADDR is a GT_IND, the indirection
- // doesn't need to be materialized, since we only want the addressing mode. Because
- // of this, this GT_IND is not a faulting indirection and we don't have to extract it
- // as a side effect.
- GenTree* commaOp2 = commaNode->gtOp.gtOp2;
- if (commaOp2->gtOper == GT_IND)
- {
- commaOp2->gtFlags |= GTF_IND_NONFAULTING;
- }
+ return addr;
+ }
+ }
+ else if (op1->gtOper == GT_CAST)
+ {
+ GenTreePtr casting = op1->gtCast.CastOp();
+ if (casting->gtOper == GT_LCL_VAR || casting->gtOper == GT_CLS_VAR)
+ {
+ DEBUG_DESTROY_NODE(op1);
+ tree->gtOp.gtOp1 = op1 = casting;
+ }
+ }
+ else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
+ {
+ // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
+ // (Be sure to mark "z" as an l-value...)
+ GenTreePtr commaNode = op1;
+ while (commaNode->gtOp.gtOp2->gtOper == GT_COMMA)
+ {
+ commaNode = commaNode->gtOp.gtOp2;
+ }
+ // The top-level addr might be annotated with a zeroOffset field.
+ FieldSeqNode* zeroFieldSeq = nullptr;
+ bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
+ tree = op1;
+ commaNode->gtOp.gtOp2->gtFlags |= GTF_DONT_CSE;
+
+ // If the node we're about to put under a GT_ADDR is a GT_IND, the indirection
+ // doesn't need to be materialized, since we only want the addressing mode. Because
+ // of this, this GT_IND is not a faulting indirection and we don't have to extract it
+ // as a side effect.
+ GenTree* commaOp2 = commaNode->gtOp.gtOp2;
+ if (commaOp2->gtOper == GT_IND)
+ {
+ commaOp2->gtFlags |= GTF_IND_NONFAULTING;
+ }
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
- if (isZeroOffset)
- {
- // Transfer the annotation to the new GT_ADDR node.
- GetZeroOffsetFieldMap()->Set(op1, zeroFieldSeq);
- }
- commaNode->gtOp.gtOp2 = op1;
- // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
- // might give op1 a type different from byref (like, say, native int). So now go back and give
- // all the comma nodes the type of op1.
- // TODO: the comma flag update below is conservative and can be improved.
- // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
- // get rid of some of the the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
- commaNode = tree;
- while (commaNode->gtOper == GT_COMMA)
- {
- commaNode->gtType = op1->gtType; commaNode->gtFlags |= op1->gtFlags;
+ if (isZeroOffset)
+ {
+ // Transfer the annotation to the new GT_ADDR node.
+ GetZeroOffsetFieldMap()->Set(op1, zeroFieldSeq);
+ }
+ commaNode->gtOp.gtOp2 = op1;
+ // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
+ // might give op1 a type different from byref (like, say, native int). So now go back and give
+ // all the comma nodes the type of op1.
+ // TODO: the comma flag update below is conservative and can be improved.
+ // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
+ // get rid of some of the the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
+ commaNode = tree;
+ while (commaNode->gtOper == GT_COMMA)
+ {
+ commaNode->gtType = op1->gtType;
+ commaNode->gtFlags |= op1->gtFlags;
#ifdef DEBUG
- commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- commaNode = commaNode->gtOp.gtOp2;
- }
-
- return tree;
- }
+ commaNode = commaNode->gtOp.gtOp2;
+ }
- /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
- op1->gtFlags |= GTF_DONT_CSE;
- break;
+ return tree;
+ }
- case GT_COLON:
- if (fgGlobalMorph)
- {
- /* Mark the nodes that are conditionally executed */
- fgWalkTreePre(&tree, gtMarkColonCond);
- }
- /* Since we're doing this postorder we clear this if it got set by a child */
- fgRemoveRestOfBlock = false;
- break;
+ /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
+ op1->gtFlags |= GTF_DONT_CSE;
+ break;
- case GT_COMMA:
+ case GT_COLON:
+ if (fgGlobalMorph)
+ {
+ /* Mark the nodes that are conditionally executed */
+ fgWalkTreePre(&tree, gtMarkColonCond);
+ }
+ /* Since we're doing this postorder we clear this if it got set by a child */
+ fgRemoveRestOfBlock = false;
+ break;
- /* Special case: trees that don't produce a value */
- if ((op2->OperKind() & GTK_ASGOP) ||
- (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) ||
- fgIsThrow(op2))
- {
- typ = tree->gtType = TYP_VOID;
- }
+ case GT_COMMA:
- // If we are in the Valuenum CSE phase then don't morph away anything as these
- // nodes may have CSE defs/uses in them.
- //
- if (!optValnumCSE_phase)
- {
- // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
- // is all we need.
+ /* Special case: trees that don't produce a value */
+ if ((op2->OperKind() & GTK_ASGOP) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) ||
+ fgIsThrow(op2))
+ {
+ typ = tree->gtType = TYP_VOID;
+ }
- GenTreePtr op1SideEffects = NULL;
- // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
- // hoisted expressions in loops.
- gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
- if (op1SideEffects)
- {
- //Replace the left hand side with the side effect list.
- tree->gtOp.gtOp1 = op1SideEffects;
- tree->gtFlags |= (op1SideEffects->gtFlags & GTF_ALL_EFFECT);
- }
- else
- {
- /* The left operand is worthless, throw it away */
- if (lvaLocalVarRefCounted)
+ // If we are in the Valuenum CSE phase then don't morph away anything as these
+ // nodes may have CSE defs/uses in them.
+ //
+ if (!optValnumCSE_phase)
{
- lvaRecursiveDecRefCounts(op1);
+ // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
+ // is all we need.
+
+ GenTreePtr op1SideEffects = nullptr;
+ // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
+ // hoisted expressions in loops.
+ gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
+ if (op1SideEffects)
+ {
+ // Replace the left hand side with the side effect list.
+ tree->gtOp.gtOp1 = op1SideEffects;
+ tree->gtFlags |= (op1SideEffects->gtFlags & GTF_ALL_EFFECT);
+ }
+ else
+ {
+ /* The left operand is worthless, throw it away */
+ if (lvaLocalVarRefCounted)
+ {
+ lvaRecursiveDecRefCounts(op1);
+ }
+ op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op1);
+ return op2;
+ }
+
+ /* If the right operand is just a void nop node, throw it away */
+ if (op2->IsNothingNode() && op1->gtType == TYP_VOID)
+ {
+ op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
+ DEBUG_DESTROY_NODE(tree);
+ DEBUG_DESTROY_NODE(op2);
+ return op1;
+ }
}
- op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op1);
- return op2;
- }
- /* If the right operand is just a void nop node, throw it away */
- if (op2->IsNothingNode() && op1->gtType == TYP_VOID)
- {
- op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
- DEBUG_DESTROY_NODE(tree);
- DEBUG_DESTROY_NODE(op2);
- return op1;
- }
- }
+ break;
- break;
+ case GT_JTRUE:
- case GT_JTRUE:
+ /* Special case if fgRemoveRestOfBlock is set to true */
+ if (fgRemoveRestOfBlock)
+ {
+ if (fgIsCommaThrow(op1, true))
+ {
+ GenTreePtr throwNode = op1->gtOp.gtOp1;
+ noway_assert(throwNode->gtType == TYP_VOID);
- /* Special case if fgRemoveRestOfBlock is set to true */
- if (fgRemoveRestOfBlock)
- {
- if (fgIsCommaThrow(op1, true))
- {
- GenTreePtr throwNode = op1->gtOp.gtOp1;
- noway_assert(throwNode->gtType == TYP_VOID);
+ return throwNode;
+ }
- return throwNode;
- }
+ noway_assert(op1->OperKind() & GTK_RELOP);
+ noway_assert(op1->gtFlags & GTF_EXCEPT);
- noway_assert(op1->OperKind() & GTK_RELOP);
- noway_assert(op1->gtFlags & GTF_EXCEPT);
+ // We need to keep op1 for the side-effects. Hang it off
+ // a GT_COMMA node
- // We need to keep op1 for the side-effects. Hang it off
- // a GT_COMMA node
+ tree->ChangeOper(GT_COMMA);
+ tree->gtOp.gtOp2 = op2 = gtNewNothingNode();
- tree->ChangeOper(GT_COMMA);
- tree->gtOp.gtOp2 = op2 = gtNewNothingNode();
+ // Additionally since we're eliminating the JTRUE
+ // codegen won't like it if op1 is a RELOP of longs, floats or doubles.
+ // So we change it into a GT_COMMA as well.
+ op1->ChangeOper(GT_COMMA);
+ op1->gtType = op1->gtOp.gtOp1->gtType;
- // Additionally since we're eliminating the JTRUE
- // codegen won't like it if op1 is a RELOP of longs, floats or doubles.
- // So we change it into a GT_COMMA as well.
- op1->ChangeOper(GT_COMMA);
- op1->gtType = op1->gtOp.gtOp1->gtType;
+ return tree;
+ }
- return tree;
+ default:
+ break;
}
- default:
- break;
- }
+ noway_assert(oper == tree->gtOper);
- noway_assert(oper == tree->gtOper);
-
- // If we are in the Valuenum CSE phase then don't morph away anything as these
- // nodes may have CSE defs/uses in them.
- //
- if (!optValnumCSE_phase && (oper != GT_ASG) && (oper != GT_COLON) && !tree->IsList())
- {
- /* Check for op1 as a GT_COMMA with a unconditional throw node */
- if (op1 && fgIsCommaThrow(op1, true))
+ // If we are in the Valuenum CSE phase then don't morph away anything as these
+ // nodes may have CSE defs/uses in them.
+ //
+ if (!optValnumCSE_phase && (oper != GT_ASG) && (oper != GT_COLON) && !tree->IsList())
{
- if ((op1->gtFlags & GTF_COLON_COND) == 0)
+ /* Check for op1 as a GT_COMMA with a unconditional throw node */
+ if (op1 && fgIsCommaThrow(op1, true))
{
- /* We can safely throw out the rest of the statements */
- fgRemoveRestOfBlock = true;
- }
+ if ((op1->gtFlags & GTF_COLON_COND) == 0)
+ {
+ /* We can safely throw out the rest of the statements */
+ fgRemoveRestOfBlock = true;
+ }
- GenTreePtr throwNode = op1->gtOp.gtOp1;
- noway_assert(throwNode->gtType == TYP_VOID);
+ GenTreePtr throwNode = op1->gtOp.gtOp1;
+ noway_assert(throwNode->gtType == TYP_VOID);
- if (oper == GT_COMMA)
- {
- /* Both tree and op1 are GT_COMMA nodes */
- /* Change the tree's op1 to the throw node: op1->gtOp.gtOp1 */
- tree->gtOp.gtOp1 = throwNode;
- return tree;
- }
- else if (oper != GT_NOP)
- {
- if (genActualType(typ) == genActualType(op1->gtType))
+ if (oper == GT_COMMA)
{
- /* The types match so, return the comma throw node as the new tree */
- return op1;
+ /* Both tree and op1 are GT_COMMA nodes */
+ /* Change the tree's op1 to the throw node: op1->gtOp.gtOp1 */
+ tree->gtOp.gtOp1 = throwNode;
+ return tree;
}
- else
+ else if (oper != GT_NOP)
{
- if (typ == TYP_VOID)
+ if (genActualType(typ) == genActualType(op1->gtType))
{
- // Return the throw node
- return throwNode;
+ /* The types match so, return the comma throw node as the new tree */
+ return op1;
}
else
{
- GenTreePtr commaOp2 = op1->gtOp.gtOp2;
-
- // need type of oper to be same as tree
- if (typ == TYP_LONG)
+ if (typ == TYP_VOID)
{
- commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
- commaOp2->gtIntConCommon.SetLngValue(0);
- /* Change the types of oper and commaOp2 to TYP_LONG */
- op1->gtType = commaOp2->gtType = TYP_LONG;
- }
- else if (varTypeIsFloating(typ))
- {
- commaOp2->ChangeOperConst(GT_CNS_DBL);
- commaOp2->gtDblCon.gtDconVal = 0.0;
- /* Change the types of oper and commaOp2 to TYP_DOUBLE */
- op1->gtType = commaOp2->gtType = TYP_DOUBLE;
+ // Return the throw node
+ return throwNode;
}
else
{
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntConCommon.SetIconValue(0);
- /* Change the types of oper and commaOp2 to TYP_INT */
- op1->gtType = commaOp2->gtType = TYP_INT;
- }
+ GenTreePtr commaOp2 = op1->gtOp.gtOp2;
- /* Return the GT_COMMA node as the new tree */
- return op1;
+ // need type of oper to be same as tree
+ if (typ == TYP_LONG)
+ {
+ commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
+ commaOp2->gtIntConCommon.SetLngValue(0);
+ /* Change the types of oper and commaOp2 to TYP_LONG */
+ op1->gtType = commaOp2->gtType = TYP_LONG;
+ }
+ else if (varTypeIsFloating(typ))
+ {
+ commaOp2->ChangeOperConst(GT_CNS_DBL);
+ commaOp2->gtDblCon.gtDconVal = 0.0;
+ /* Change the types of oper and commaOp2 to TYP_DOUBLE */
+ op1->gtType = commaOp2->gtType = TYP_DOUBLE;
+ }
+ else
+ {
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntConCommon.SetIconValue(0);
+ /* Change the types of oper and commaOp2 to TYP_INT */
+ op1->gtType = commaOp2->gtType = TYP_INT;
+ }
+
+ /* Return the GT_COMMA node as the new tree */
+ return op1;
+ }
}
}
}
- }
- /* Check for op2 as a GT_COMMA with a unconditional throw */
+ /* Check for op2 as a GT_COMMA with a unconditional throw */
- if (op2 && fgIsCommaThrow(op2, true))
- {
- if ((op2->gtFlags & GTF_COLON_COND) == 0)
+ if (op2 && fgIsCommaThrow(op2, true))
{
- /* We can safely throw out the rest of the statements */
- fgRemoveRestOfBlock = true;
- }
-
- // If op1 has no side-effects
- if ((op1->gtFlags & GTF_ALL_EFFECT) == 0)
- {
- // If tree is an asg node
- if (tree->OperIsAssignment())
+ if ((op2->gtFlags & GTF_COLON_COND) == 0)
{
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
+ /* We can safely throw out the rest of the statements */
+ fgRemoveRestOfBlock = true;
}
- if (tree->OperGet() == GT_ARR_BOUNDS_CHECK)
+ // If op1 has no side-effects
+ if ((op1->gtFlags & GTF_ALL_EFFECT) == 0)
{
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
- }
+ // If tree is an asg node
+ if (tree->OperIsAssignment())
+ {
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
+ }
- // If tree is a comma node
- if (tree->OperGet() == GT_COMMA)
- {
- /* Return the throw node as the new tree */
- return op2->gtOp.gtOp1;
- }
+ if (tree->OperGet() == GT_ARR_BOUNDS_CHECK)
+ {
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
+ }
- /* for the shift nodes the type of op2 can differ from the tree type */
- if ((typ == TYP_LONG) && (genActualType(op2->gtType) == TYP_INT))
- {
- noway_assert(GenTree::OperIsShiftOrRotate(oper));
+ // If tree is a comma node
+ if (tree->OperGet() == GT_COMMA)
+ {
+ /* Return the throw node as the new tree */
+ return op2->gtOp.gtOp1;
+ }
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ /* for the shift nodes the type of op2 can differ from the tree type */
+ if ((typ == TYP_LONG) && (genActualType(op2->gtType) == TYP_INT))
+ {
+ noway_assert(GenTree::OperIsShiftOrRotate(oper));
- commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
- commaOp2->gtIntConCommon.SetLngValue(0);
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
- /* Change the types of oper and commaOp2 to TYP_LONG */
- op2->gtType = commaOp2->gtType = TYP_LONG;
- }
+ commaOp2->ChangeOperConst(GT_CNS_NATIVELONG);
+ commaOp2->gtIntConCommon.SetLngValue(0);
- if ((genActualType(typ) == TYP_INT) && (genActualType(op2->gtType) == TYP_LONG ||
- varTypeIsFloating(op2->TypeGet())))
- {
- // An example case is comparison (say GT_GT) of two longs or floating point values.
+ /* Change the types of oper and commaOp2 to TYP_LONG */
+ op2->gtType = commaOp2->gtType = TYP_LONG;
+ }
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ if ((genActualType(typ) == TYP_INT) &&
+ (genActualType(op2->gtType) == TYP_LONG || varTypeIsFloating(op2->TypeGet())))
+ {
+ // An example case is comparison (say GT_GT) of two longs or floating point values.
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntCon.gtIconVal = 0;
- /* Change the types of oper and commaOp2 to TYP_INT */
- op2->gtType = commaOp2->gtType = TYP_INT;
- }
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
- if ((typ == TYP_BYREF) && (genActualType(op2->gtType) == TYP_I_IMPL))
- {
- noway_assert(tree->OperGet() == GT_ADD);
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntCon.gtIconVal = 0;
+ /* Change the types of oper and commaOp2 to TYP_INT */
+ op2->gtType = commaOp2->gtType = TYP_INT;
+ }
- GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+ if ((typ == TYP_BYREF) && (genActualType(op2->gtType) == TYP_I_IMPL))
+ {
+ noway_assert(tree->OperGet() == GT_ADD);
- commaOp2->ChangeOperConst(GT_CNS_INT);
- commaOp2->gtIntCon.gtIconVal = 0;
- /* Change the types of oper and commaOp2 to TYP_BYREF */
- op2->gtType = commaOp2->gtType = TYP_BYREF;
- }
+ GenTreePtr commaOp2 = op2->gtOp.gtOp2;
+
+ commaOp2->ChangeOperConst(GT_CNS_INT);
+ commaOp2->gtIntCon.gtIconVal = 0;
+ /* Change the types of oper and commaOp2 to TYP_BYREF */
+ op2->gtType = commaOp2->gtType = TYP_BYREF;
+ }
- /* types should now match */
- noway_assert( (genActualType(typ) == genActualType(op2->gtType)));
+ /* types should now match */
+ noway_assert((genActualType(typ) == genActualType(op2->gtType)));
- /* Return the GT_COMMA node as the new tree */
- return op2;
+ /* Return the GT_COMMA node as the new tree */
+ return op2;
+ }
}
}
- }
- /*-------------------------------------------------------------------------
- * Optional morphing is done if tree transformations is permitted
- */
+ /*-------------------------------------------------------------------------
+ * Optional morphing is done if tree transformations is permitted
+ */
- if ((opts.compFlags & CLFLG_TREETRANS) == 0)
- return tree;
+ if ((opts.compFlags & CLFLG_TREETRANS) == 0)
+ {
+ return tree;
+ }
- tree = fgMorphSmpOpOptional(tree->AsOp());
+ tree = fgMorphSmpOpOptional(tree->AsOp());
} // extra scope for gcc workaround
return tree;
@@ -12458,19 +12584,18 @@ CM_ADD_OP:
#pragma warning(pop)
#endif
-
GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
{
genTreeOps oper = tree->gtOper;
- GenTree* op1 = tree->gtOp1;
- GenTree* op2 = tree->gtOp2;
- var_types typ = tree->TypeGet();
+ GenTree* op1 = tree->gtOp1;
+ GenTree* op2 = tree->gtOp2;
+ var_types typ = tree->TypeGet();
- if (GenTree::OperIsCommutative(oper))
+ if (GenTree::OperIsCommutative(oper))
{
/* Swap the operands so that the more expensive one is 'op1' */
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtOp1 = op2;
tree->gtOp2 = op1;
@@ -12490,14 +12615,13 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
/* Things are handled differently for floating-point operators */
- if (!varTypeIsFloating(tree->TypeGet()))
+ if (!varTypeIsFloating(tree->TypeGet()))
{
fgMoveOpsLeft(tree);
op1 = tree->gtOp1;
op2 = tree->gtOp2;
}
}
-
}
#if REARRANGE_ADDS
@@ -12505,15 +12629,14 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
/* Change "((x+icon)+y)" to "((x+y)+icon)"
Don't reorder floating-point operations */
- if ((oper == GT_ADD) && !tree->gtOverflow() &&
- (op1->gtOper == GT_ADD) && ! op1->gtOverflow() && varTypeIsIntegralOrI(typ))
+ if ((oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() &&
+ varTypeIsIntegralOrI(typ))
{
- GenTreePtr ad2 = op1->gtOp.gtOp2;
+ GenTreePtr ad2 = op1->gtOp.gtOp2;
- if (op2->OperIsConst() == 0 &&
- ad2->OperIsConst() != 0)
+ if (op2->OperIsConst() == 0 && ad2->OperIsConst() != 0)
{
- //This takes
+ // This takes
// + (tree)
// / \
// / \
@@ -12538,8 +12661,8 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
}
tree->gtOp2 = ad2;
- op1 ->gtOp.gtOp2 = op2;
- op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
+ op1->gtOp.gtOp2 = op2;
+ op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
op2 = tree->gtOp2;
}
@@ -12553,496 +12676,507 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
switch (oper)
{
- genTreeOps cmop;
- bool dstIsSafeLclVar;
+ genTreeOps cmop;
+ bool dstIsSafeLclVar;
- case GT_ASG:
- /* We'll convert "a = a <op> x" into "a <op>= x" */
- /* and also "a = x <op> a" into "a <op>= x" for communative ops */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ case GT_ASG:
+ /* We'll convert "a = a <op> x" into "a <op>= x" */
+ /* and also "a = x <op> a" into "a <op>= x" for communative ops */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if !LONG_ASG_OPS
- if (typ == TYP_LONG)
- break;
+ if (typ == TYP_LONG)
+ {
+ break;
+ }
#endif
- /* Make sure we're allowed to do this */
+ /* Make sure we're allowed to do this */
- if (optValnumCSE_phase)
- {
- // It is not safe to reorder/delete CSE's
- break;
- }
+ if (optValnumCSE_phase)
+ {
+ // It is not safe to reorder/delete CSE's
+ break;
+ }
- /* Are we assigning to a GT_LCL_VAR ? */
+ /* Are we assigning to a GT_LCL_VAR ? */
- dstIsSafeLclVar = (op1->gtOper == GT_LCL_VAR);
+ dstIsSafeLclVar = (op1->gtOper == GT_LCL_VAR);
- /* If we have a GT_LCL_VAR, then is the address taken? */
- if (dstIsSafeLclVar)
- {
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc * varDsc = lvaTable + lclNum;
+ /* If we have a GT_LCL_VAR, then is the address taken? */
+ if (dstIsSafeLclVar)
+ {
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
- noway_assert(lclNum < lvaCount);
+ noway_assert(lclNum < lvaCount);
- /* Is the address taken? */
- if (varDsc->lvAddrExposed)
- {
- dstIsSafeLclVar = false;
- }
- else if (op2->gtFlags & GTF_ASG)
- {
- break;
+ /* Is the address taken? */
+ if (varDsc->lvAddrExposed)
+ {
+ dstIsSafeLclVar = false;
+ }
+ else if (op2->gtFlags & GTF_ASG)
+ {
+ break;
+ }
}
- }
- if (!dstIsSafeLclVar)
- {
- if (op2->gtFlags & GTF_ASG)
- break;
+ if (!dstIsSafeLclVar)
+ {
+ if (op2->gtFlags & GTF_ASG)
+ {
+ break;
+ }
- if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
- break;
- }
+ if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
+ {
+ break;
+ }
+ }
- /* Special case: a cast that can be thrown away */
+ /* Special case: a cast that can be thrown away */
- if (op1->gtOper == GT_IND &&
- op2->gtOper == GT_CAST &&
- !op2->gtOverflow() )
- {
- var_types srct;
- var_types cast;
- var_types dstt;
+ if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow())
+ {
+ var_types srct;
+ var_types cast;
+ var_types dstt;
- srct = op2->gtCast.CastOp()->TypeGet();
- cast = (var_types) op2->CastToType();
- dstt = op1->TypeGet();
+ srct = op2->gtCast.CastOp()->TypeGet();
+ cast = (var_types)op2->CastToType();
+ dstt = op1->TypeGet();
- /* Make sure these are all ints and precision is not lost */
+ /* Make sure these are all ints and precision is not lost */
- if (cast >= dstt && dstt <= TYP_INT && srct <= TYP_INT)
- op2 = tree->gtOp2 = op2->gtCast.CastOp();
- }
+ if (cast >= dstt && dstt <= TYP_INT && srct <= TYP_INT)
+ {
+ op2 = tree->gtOp2 = op2->gtCast.CastOp();
+ }
+ }
- /* Make sure we have the operator range right */
+ /* Make sure we have the operator range right */
- noway_assert(GT_SUB == GT_ADD + 1);
- noway_assert(GT_MUL == GT_ADD + 2);
- noway_assert(GT_DIV == GT_ADD + 3);
- noway_assert(GT_MOD == GT_ADD + 4);
- noway_assert(GT_UDIV== GT_ADD + 5);
- noway_assert(GT_UMOD== GT_ADD + 6);
+ noway_assert(GT_SUB == GT_ADD + 1);
+ noway_assert(GT_MUL == GT_ADD + 2);
+ noway_assert(GT_DIV == GT_ADD + 3);
+ noway_assert(GT_MOD == GT_ADD + 4);
+ noway_assert(GT_UDIV == GT_ADD + 5);
+ noway_assert(GT_UMOD == GT_ADD + 6);
- noway_assert(GT_OR == GT_ADD + 7);
- noway_assert(GT_XOR == GT_ADD + 8);
- noway_assert(GT_AND == GT_ADD + 9);
+ noway_assert(GT_OR == GT_ADD + 7);
+ noway_assert(GT_XOR == GT_ADD + 8);
+ noway_assert(GT_AND == GT_ADD + 9);
- noway_assert(GT_LSH == GT_ADD + 10);
- noway_assert(GT_RSH == GT_ADD + 11);
- noway_assert(GT_RSZ == GT_ADD + 12);
+ noway_assert(GT_LSH == GT_ADD + 10);
+ noway_assert(GT_RSH == GT_ADD + 11);
+ noway_assert(GT_RSZ == GT_ADD + 12);
- /* Check for a suitable operator on the RHS */
+ /* Check for a suitable operator on the RHS */
- cmop = op2->OperGet();
+ cmop = op2->OperGet();
- switch (cmop)
- {
- case GT_NEG:
- // GT_CHS only supported for integer types
- if ( varTypeIsFloating(tree->TypeGet()))
- break;
+ switch (cmop)
+ {
+ case GT_NEG:
+ // GT_CHS only supported for integer types
+ if (varTypeIsFloating(tree->TypeGet()))
+ {
+ break;
+ }
- goto ASG_OP;
+ goto ASG_OP;
- case GT_MUL:
- // GT_ASG_MUL only supported for floating point types
- if (!varTypeIsFloating(tree->TypeGet()))
- break;
+ case GT_MUL:
+ // GT_ASG_MUL only supported for floating point types
+ if (!varTypeIsFloating(tree->TypeGet()))
+ {
+ break;
+ }
- __fallthrough;
+ __fallthrough;
- case GT_ADD:
- case GT_SUB:
- if (op2->gtOverflow())
- {
- /* Disable folding into "<op>=" if the result can be
- visible to anyone as <op> may throw an exception and
- the assignment should not proceed
- We are safe with an assignment to a local variables
- */
- if (ehBlockHasExnFlowDsc(compCurBB))
- break;
- if (!dstIsSafeLclVar)
- break;
- }
+ case GT_ADD:
+ case GT_SUB:
+ if (op2->gtOverflow())
+ {
+ /* Disable folding into "<op>=" if the result can be
+ visible to anyone as <op> may throw an exception and
+ the assignment should not proceed
+ We are safe with an assignment to a local variables
+ */
+ if (ehBlockHasExnFlowDsc(compCurBB))
+ {
+ break;
+ }
+ if (!dstIsSafeLclVar)
+ {
+ break;
+ }
+ }
#ifndef _TARGET_AMD64_
- // This is hard for byte-operations as we need to make
- // sure both operands are in RBM_BYTE_REGS.
- if (varTypeIsByte(op2->TypeGet()))
- break;
+ // This is hard for byte-operations as we need to make
+ // sure both operands are in RBM_BYTE_REGS.
+ if (varTypeIsByte(op2->TypeGet()))
+ break;
#endif // _TARGET_AMD64_
- goto ASG_OP;
+ goto ASG_OP;
- case GT_DIV:
- case GT_UDIV:
- // GT_ASG_DIV only supported for floating point types
- if (!varTypeIsFloating(tree->TypeGet()))
- break;
+ case GT_DIV:
+ case GT_UDIV:
+ // GT_ASG_DIV only supported for floating point types
+ if (!varTypeIsFloating(tree->TypeGet()))
+ {
+ break;
+ }
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
#if LONG_ASG_OPS
- if (typ == TYP_LONG)
- break;
+ if (typ == TYP_LONG)
+ break;
#endif
- case GT_OR:
- case GT_XOR:
- case GT_AND:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
#if LONG_ASG_OPS
- /* TODO: allow non-const long assignment operators */
+ /* TODO: allow non-const long assignment operators */
- if (typ == TYP_LONG && op2->gtOp.gtOp2->gtOper != GT_CNS_LNG)
- break;
+ if (typ == TYP_LONG && op2->gtOp.gtOp2->gtOper != GT_CNS_LNG)
+ break;
#endif
-ASG_OP:
- {
- bool bReverse = false;
- bool bAsgOpFoldable = fgShouldCreateAssignOp(tree, &bReverse);
- if (bAsgOpFoldable)
+ ASG_OP:
{
- if (bReverse)
- {
- // We will transform this from "a = x <op> a" to "a <op>= x"
- // so we can now destroy the duplicate "a"
- DEBUG_DESTROY_NODE(op2->gtOp.gtOp2);
- op2->gtOp.gtOp2 = op2->gtOp.gtOp1;
- }
-
- /* Special case: "x |= -1" and "x &= 0" */
- if (((cmop == GT_AND) && op2->gtOp.gtOp2->IsIntegralConst(0)) ||
- ((cmop == GT_OR) && op2->gtOp.gtOp2->IsIntegralConst(-1)))
+ bool bReverse = false;
+ bool bAsgOpFoldable = fgShouldCreateAssignOp(tree, &bReverse);
+ if (bAsgOpFoldable)
{
- /* Simply change to an assignment */
- tree->gtOp2 = op2->gtOp.gtOp2;
- break;
- }
+ if (bReverse)
+ {
+ // We will transform this from "a = x <op> a" to "a <op>= x"
+ // so we can now destroy the duplicate "a"
+ DEBUG_DESTROY_NODE(op2->gtOp.gtOp2);
+ op2->gtOp.gtOp2 = op2->gtOp.gtOp1;
+ }
- if (cmop == GT_NEG)
- {
- /* This is "x = -x;", use the flipsign operator */
+ /* Special case: "x |= -1" and "x &= 0" */
+ if (((cmop == GT_AND) && op2->gtOp.gtOp2->IsIntegralConst(0)) ||
+ ((cmop == GT_OR) && op2->gtOp.gtOp2->IsIntegralConst(-1)))
+ {
+ /* Simply change to an assignment */
+ tree->gtOp2 = op2->gtOp.gtOp2;
+ break;
+ }
- tree->ChangeOper (GT_CHS);
+ if (cmop == GT_NEG)
+ {
+ /* This is "x = -x;", use the flipsign operator */
- if (op1->gtOper == GT_LCL_VAR)
- op1->gtFlags |= GTF_VAR_USEASG;
+ tree->ChangeOper(GT_CHS);
- tree->gtOp2 = gtNewIconNode(0, op1->TypeGet());
+ if (op1->gtOper == GT_LCL_VAR)
+ {
+ op1->gtFlags |= GTF_VAR_USEASG;
+ }
- break;
- }
+ tree->gtOp2 = gtNewIconNode(0, op1->TypeGet());
- if (cmop == GT_RSH && varTypeIsSmall(op1->TypeGet()) && varTypeIsUnsigned(op1->TypeGet()))
- {
- // Changing from x = x op y to x op= y when x is a small integer type
- // makes the op size smaller (originally the op size was 32 bits, after
- // sign or zero extension of x, and there is an implicit truncation in the
- // assignment).
- // This is ok in most cases because the upper bits were
- // lost when assigning the op result to a small type var,
- // but it may not be ok for the right shift operation where the higher bits
- // could be shifted into the lower bits and preserved.
- // Signed right shift of signed x still works (i.e. (sbyte)((int)(sbyte)x >>signed y) ==
- // (sbyte)x >>signed y)) as do unsigned right shift ((ubyte)((int)(ubyte)x >>unsigned y) ==
- // (ubyte)x >>unsigned y), but signed right shift of an unigned small type may give the wrong
- // result:
- // e.g. (ubyte)((int)(ubyte)0xf0 >>signed 4) == 0x0f,
- // but (ubyte)0xf0 >>signed 4 == 0xff which is incorrect.
- // The result becomes correct if we use >>unsigned instead of >>signed.
- noway_assert(op1->TypeGet() == op2->gtOp.gtOp1->TypeGet());
- cmop = GT_RSZ;
- }
+ break;
+ }
- /* Replace with an assignment operator */
- noway_assert(GT_ADD - GT_ADD == GT_ASG_ADD - GT_ASG_ADD);
- noway_assert(GT_SUB - GT_ADD == GT_ASG_SUB - GT_ASG_ADD);
- noway_assert(GT_OR - GT_ADD == GT_ASG_OR - GT_ASG_ADD);
- noway_assert(GT_XOR - GT_ADD == GT_ASG_XOR - GT_ASG_ADD);
- noway_assert(GT_AND - GT_ADD == GT_ASG_AND - GT_ASG_ADD);
- noway_assert(GT_LSH - GT_ADD == GT_ASG_LSH - GT_ASG_ADD);
- noway_assert(GT_RSH - GT_ADD == GT_ASG_RSH - GT_ASG_ADD);
- noway_assert(GT_RSZ - GT_ADD == GT_ASG_RSZ - GT_ASG_ADD);
+ if (cmop == GT_RSH && varTypeIsSmall(op1->TypeGet()) && varTypeIsUnsigned(op1->TypeGet()))
+ {
+ // Changing from x = x op y to x op= y when x is a small integer type
+ // makes the op size smaller (originally the op size was 32 bits, after
+ // sign or zero extension of x, and there is an implicit truncation in the
+ // assignment).
+ // This is ok in most cases because the upper bits were
+ // lost when assigning the op result to a small type var,
+ // but it may not be ok for the right shift operation where the higher bits
+ // could be shifted into the lower bits and preserved.
+ // Signed right shift of signed x still works (i.e. (sbyte)((int)(sbyte)x >>signed y) ==
+ // (sbyte)x >>signed y)) as do unsigned right shift ((ubyte)((int)(ubyte)x >>unsigned y) ==
+ // (ubyte)x >>unsigned y), but signed right shift of an unigned small type may give the
+ // wrong
+ // result:
+ // e.g. (ubyte)((int)(ubyte)0xf0 >>signed 4) == 0x0f,
+ // but (ubyte)0xf0 >>signed 4 == 0xff which is incorrect.
+ // The result becomes correct if we use >>unsigned instead of >>signed.
+ noway_assert(op1->TypeGet() == op2->gtOp.gtOp1->TypeGet());
+ cmop = GT_RSZ;
+ }
- tree->SetOper((genTreeOps)(cmop - GT_ADD + GT_ASG_ADD));
- tree->gtOp2 = op2->gtOp.gtOp2;
+ /* Replace with an assignment operator */
+ noway_assert(GT_ADD - GT_ADD == GT_ASG_ADD - GT_ASG_ADD);
+ noway_assert(GT_SUB - GT_ADD == GT_ASG_SUB - GT_ASG_ADD);
+ noway_assert(GT_OR - GT_ADD == GT_ASG_OR - GT_ASG_ADD);
+ noway_assert(GT_XOR - GT_ADD == GT_ASG_XOR - GT_ASG_ADD);
+ noway_assert(GT_AND - GT_ADD == GT_ASG_AND - GT_ASG_ADD);
+ noway_assert(GT_LSH - GT_ADD == GT_ASG_LSH - GT_ASG_ADD);
+ noway_assert(GT_RSH - GT_ADD == GT_ASG_RSH - GT_ASG_ADD);
+ noway_assert(GT_RSZ - GT_ADD == GT_ASG_RSZ - GT_ASG_ADD);
+
+ tree->SetOper((genTreeOps)(cmop - GT_ADD + GT_ASG_ADD));
+ tree->gtOp2 = op2->gtOp.gtOp2;
- /* Propagate GTF_OVERFLOW */
+ /* Propagate GTF_OVERFLOW */
- if (op2->gtOverflowEx())
- {
- tree->gtType = op2->gtType;
- tree->gtFlags |= (op2->gtFlags &
- (GTF_OVERFLOW|GTF_EXCEPT|GTF_UNSIGNED));
- }
+ if (op2->gtOverflowEx())
+ {
+ tree->gtType = op2->gtType;
+ tree->gtFlags |= (op2->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT | GTF_UNSIGNED));
+ }
#if FEATURE_SET_FLAGS
- /* Propagate GTF_SET_FLAGS */
- if (op2->gtSetFlags())
- {
- tree->gtRequestSetFlags();
- }
+ /* Propagate GTF_SET_FLAGS */
+ if (op2->gtSetFlags())
+ {
+ tree->gtRequestSetFlags();
+ }
#endif // FEATURE_SET_FLAGS
- DEBUG_DESTROY_NODE(op2);
- op2 = tree->gtOp2;
-
- /* The target is used as well as being defined */
- if (op1->OperIsLocal())
- op1->gtFlags |= GTF_VAR_USEASG;
+ DEBUG_DESTROY_NODE(op2);
+ op2 = tree->gtOp2;
+ /* The target is used as well as being defined */
+ if (op1->OperIsLocal())
+ {
+ op1->gtFlags |= GTF_VAR_USEASG;
+ }
#if CPU_HAS_FP_SUPPORT
- /* Check for the special case "x += y * x;" */
+ /* Check for the special case "x += y * x;" */
- // GT_ASG_MUL only supported for floating point types
- if (cmop != GT_ADD && cmop != GT_SUB)
- break;
-
- if (op2->gtOper == GT_MUL && varTypeIsFloating(tree->TypeGet()))
- {
- if (GenTree::Compare(op1, op2->gtOp.gtOp1))
+ // GT_ASG_MUL only supported for floating point types
+ if (cmop != GT_ADD && cmop != GT_SUB)
{
- /* Change "x += x * y" into "x *= (y + 1)" */
-
- op2 = op2->gtOp.gtOp2;
+ break;
}
- else if (GenTree::Compare(op1, op2->gtOp.gtOp2))
+
+ if (op2->gtOper == GT_MUL && varTypeIsFloating(tree->TypeGet()))
{
- /* Change "x += y * x" into "x *= (y + 1)" */
+ if (GenTree::Compare(op1, op2->gtOp.gtOp1))
+ {
+ /* Change "x += x * y" into "x *= (y + 1)" */
- op2 = op2->gtOp.gtOp1;
- }
- else
- break;
+ op2 = op2->gtOp.gtOp2;
+ }
+ else if (GenTree::Compare(op1, op2->gtOp.gtOp2))
+ {
+ /* Change "x += y * x" into "x *= (y + 1)" */
- op1 = gtNewDconNode(1.0);
+ op2 = op2->gtOp.gtOp1;
+ }
+ else
+ {
+ break;
+ }
- /* Now make the "*=" node */
+ op1 = gtNewDconNode(1.0);
- if (cmop == GT_ADD)
- {
- /* Change "x += x * y" into "x *= (y + 1)" */
+ /* Now make the "*=" node */
- tree->gtOp2 = op2 = gtNewOperNode(GT_ADD,
- tree->TypeGet(),
- op2,
- op1);
- }
- else
- {
- /* Change "x -= x * y" into "x *= (1 - y)" */
+ if (cmop == GT_ADD)
+ {
+ /* Change "x += x * y" into "x *= (y + 1)" */
+
+ tree->gtOp2 = op2 = gtNewOperNode(GT_ADD, tree->TypeGet(), op2, op1);
+ }
+ else
+ {
+ /* Change "x -= x * y" into "x *= (1 - y)" */
- noway_assert(cmop == GT_SUB);
- tree->gtOp2 = op2 = gtNewOperNode(GT_SUB,
- tree->TypeGet(),
- op1,
- op2);
+ noway_assert(cmop == GT_SUB);
+ tree->gtOp2 = op2 = gtNewOperNode(GT_SUB, tree->TypeGet(), op1, op2);
+ }
+ tree->ChangeOper(GT_ASG_MUL);
}
- tree->ChangeOper(GT_ASG_MUL);
- }
#endif // CPU_HAS_FP_SUPPORT
+ }
}
- }
- break;
+ break;
- case GT_NOT:
+ case GT_NOT:
- /* Is the destination identical to the first RHS sub-operand? */
+ /* Is the destination identical to the first RHS sub-operand? */
- if (GenTree::Compare(op1, op2->gtOp.gtOp1))
- {
- /* This is "x = ~x" which is the same as "x ^= -1"
- * Transform the node into a GT_ASG_XOR */
+ if (GenTree::Compare(op1, op2->gtOp.gtOp1))
+ {
+ /* This is "x = ~x" which is the same as "x ^= -1"
+ * Transform the node into a GT_ASG_XOR */
- noway_assert(genActualType(typ) == TYP_INT ||
- genActualType(typ) == TYP_LONG);
+ noway_assert(genActualType(typ) == TYP_INT || genActualType(typ) == TYP_LONG);
- op2->gtOp.gtOp2 = (genActualType(typ) == TYP_INT)
- ? gtNewIconNode(-1)
- : gtNewLconNode(-1);
+ op2->gtOp.gtOp2 = (genActualType(typ) == TYP_INT) ? gtNewIconNode(-1) : gtNewLconNode(-1);
- cmop = GT_XOR;
- goto ASG_OP;
+ cmop = GT_XOR;
+ goto ASG_OP;
+ }
+
+ break;
+ default:
+ break;
}
break;
- default:
- break;
- }
- break;
-
- case GT_MUL:
-
- /* Check for the case "(val + icon) * icon" */
+ case GT_MUL:
- if (op2->gtOper == GT_CNS_INT &&
- op1->gtOper == GT_ADD)
- {
- GenTreePtr add = op1->gtOp.gtOp2;
+ /* Check for the case "(val + icon) * icon" */
- if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
+ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
{
- if (tree->gtOverflow() || op1->gtOverflow())
+ GenTreePtr add = op1->gtOp.gtOp2;
+
+ if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
{
- break;
- }
+ if (tree->gtOverflow() || op1->gtOverflow())
+ {
+ break;
+ }
- ssize_t imul = op2->gtIntCon.gtIconVal;
- ssize_t iadd = add->gtIntCon.gtIconVal;
+ ssize_t imul = op2->gtIntCon.gtIconVal;
+ ssize_t iadd = add->gtIntCon.gtIconVal;
- /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
+ /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
- oper = GT_ADD;
- tree->ChangeOper(oper);
+ oper = GT_ADD;
+ tree->ChangeOper(oper);
- op2->gtIntCon.gtIconVal = iadd * imul;
+ op2->gtIntCon.gtIconVal = iadd * imul;
- op1->ChangeOper(GT_MUL);
+ op1->ChangeOper(GT_MUL);
- add->gtIntCon.gtIconVal = imul;
+ add->gtIntCon.gtIconVal = imul;
#ifdef _TARGET_64BIT_
- if (add->gtType == TYP_INT)
- {
- // we need to properly re-sign-extend or truncate after multiplying two int constants above
- add->AsIntCon()->TruncateOrSignExtend32();
- }
+ if (add->gtType == TYP_INT)
+ {
+ // we need to properly re-sign-extend or truncate after multiplying two int constants above
+ add->AsIntCon()->TruncateOrSignExtend32();
+ }
#endif //_TARGET_64BIT_
+ }
}
- }
- break;
-
- case GT_DIV:
+ break;
- /* For "val / 1", just return "val" */
+ case GT_DIV:
- if (op2->IsIntegralConst(1))
- {
- DEBUG_DESTROY_NODE(tree);
- return op1;
- }
+ /* For "val / 1", just return "val" */
- break;
+ if (op2->IsIntegralConst(1))
+ {
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
+ }
- case GT_LSH:
+ break;
- /* Check for the case "(val + icon) << icon" */
+ case GT_LSH:
- if (op2->IsCnsIntOrI() &&
- op1->gtOper == GT_ADD && !op1->gtOverflow())
- {
- GenTreePtr cns = op1->gtOp.gtOp2;
+ /* Check for the case "(val + icon) << icon" */
- if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
+ if (op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow())
{
- ssize_t ishf = op2->gtIntConCommon.IconValue();
- ssize_t iadd = cns->gtIntConCommon.IconValue();
+ GenTreePtr cns = op1->gtOp.gtOp2;
+
+ if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
+ {
+ ssize_t ishf = op2->gtIntConCommon.IconValue();
+ ssize_t iadd = cns->gtIntConCommon.IconValue();
- // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
+ // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
- /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
+ /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
- tree->ChangeOper(GT_ADD);
- ssize_t result = iadd << ishf;
- op2->gtIntConCommon.SetIconValue(result);
+ tree->ChangeOper(GT_ADD);
+ ssize_t result = iadd << ishf;
+ op2->gtIntConCommon.SetIconValue(result);
#ifdef _TARGET_64BIT_
- if (op1->gtType == TYP_INT)
- {
- op2->AsIntCon()->TruncateOrSignExtend32();
- }
+ if (op1->gtType == TYP_INT)
+ {
+ op2->AsIntCon()->TruncateOrSignExtend32();
+ }
#endif // _TARGET_64BIT_
-
- // we are reusing the shift amount node here, but the type we want is that of the shift result
- op2->gtType = op1->gtType;
- if (cns->gtOper == GT_CNS_INT &&
- cns->gtIntCon.gtFieldSeq != nullptr &&
- cns->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
- {
- assert(cns->gtIntCon.gtFieldSeq->m_next == nullptr);
- op2->gtIntCon.gtFieldSeq = cns->gtIntCon.gtFieldSeq;
- }
+ // we are reusing the shift amount node here, but the type we want is that of the shift result
+ op2->gtType = op1->gtType;
- op1->ChangeOper(GT_LSH);
+ if (cns->gtOper == GT_CNS_INT && cns->gtIntCon.gtFieldSeq != nullptr &&
+ cns->gtIntCon.gtFieldSeq->IsConstantIndexFieldSeq())
+ {
+ assert(cns->gtIntCon.gtFieldSeq->m_next == nullptr);
+ op2->gtIntCon.gtFieldSeq = cns->gtIntCon.gtFieldSeq;
+ }
+
+ op1->ChangeOper(GT_LSH);
- cns->gtIntConCommon.SetIconValue(ishf);
+ cns->gtIntConCommon.SetIconValue(ishf);
+ }
}
- }
- break;
+ break;
- case GT_XOR:
+ case GT_XOR:
- if (!optValnumCSE_phase)
- {
- /* "x ^ -1" is "~x" */
-
- if (op2->IsIntegralConst(-1))
- {
- tree->ChangeOper(GT_NOT);
- tree->gtOp2 = NULL;
- DEBUG_DESTROY_NODE(op2);
- }
- else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
+ if (!optValnumCSE_phase)
{
- /* "binaryVal ^ 1" is "!binaryVal" */
- gtReverseCond(op1);
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(tree);
- return op1;
+ /* "x ^ -1" is "~x" */
+
+ if (op2->IsIntegralConst(-1))
+ {
+ tree->ChangeOper(GT_NOT);
+ tree->gtOp2 = nullptr;
+ DEBUG_DESTROY_NODE(op2);
+ }
+ else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
+ {
+ /* "binaryVal ^ 1" is "!binaryVal" */
+ gtReverseCond(op1);
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(tree);
+ return op1;
+ }
}
- }
- break;
+ break;
- case GT_INITBLK:
- return fgMorphInitBlock(tree);
- break;
+ case GT_INITBLK:
+ return fgMorphInitBlock(tree);
+ break;
- case GT_COPYOBJ:
- case GT_COPYBLK:
- return fgMorphCopyBlock(tree);
- break;
+ case GT_COPYOBJ:
+ case GT_COPYBLK:
+ return fgMorphCopyBlock(tree);
+ break;
- default:
- break;
+ default:
+ break;
}
return tree;
}
-// code to generate a magic number and shift amount for the magic number division
+// code to generate a magic number and shift amount for the magic number division
// optimization. This code is previously from UTC where it notes it was taken from
// _The_PowerPC_Compiler_Writer's_Guide_, pages 57-58.
// The paper it is based on is "Division by invariant integers using multiplication"
// by Torbjorn Granlund and Peter L. Montgomery in PLDI 94
template <typename T>
-T GetSignedMagicNumberForDivide(T denom, int *shift /*out*/)
+T GetSignedMagicNumberForDivide(T denom, int* shift /*out*/)
{
// static SMAG smag;
- const int bits = sizeof(T) * 8;
+ const int bits = sizeof(T) * 8;
const int bits_minus_1 = bits - 1;
typedef typename jitstd::make_unsigned<T>::type UT;
@@ -13050,42 +13184,45 @@ T GetSignedMagicNumberForDivide(T denom, int *shift /*out*/)
const UT two_nminus1 = UT(1) << bits_minus_1;
int p;
- UT absDenom;
- UT absNc;
- UT delta;
- UT q1;
- UT r1;
- UT r2;
- UT q2;
- UT t;
- T result_magic;
+ UT absDenom;
+ UT absNc;
+ UT delta;
+ UT q1;
+ UT r1;
+ UT r2;
+ UT q2;
+ UT t;
+ T result_magic;
int result_shift;
int iters = 0;
absDenom = abs(denom);
- t = two_nminus1 + ((unsigned int)denom >> 31);
- absNc = t - 1 - (t % absDenom); // absolute value of nc
- p = bits_minus_1; // initialize p
- q1 = two_nminus1 / absNc; // initialize q1 = 2^p / abs(nc)
- r1 = two_nminus1 - (q1 * absNc); // initialize r1 = rem(2^p, abs(nc))
- q2 = two_nminus1 / absDenom; // initialize q1 = 2^p / abs(denom)
- r2 = two_nminus1 - (q2 * absDenom); // initialize r1 = rem(2^p, abs(denom))
-
- do {
+ t = two_nminus1 + ((unsigned int)denom >> 31);
+ absNc = t - 1 - (t % absDenom); // absolute value of nc
+ p = bits_minus_1; // initialize p
+ q1 = two_nminus1 / absNc; // initialize q1 = 2^p / abs(nc)
+ r1 = two_nminus1 - (q1 * absNc); // initialize r1 = rem(2^p, abs(nc))
+ q2 = two_nminus1 / absDenom; // initialize q1 = 2^p / abs(denom)
+ r2 = two_nminus1 - (q2 * absDenom); // initialize r1 = rem(2^p, abs(denom))
+
+ do
+ {
iters++;
p++;
- q1 *= 2; // update q1 = 2^p / abs(nc)
- r1 *= 2; // update r1 = rem(2^p / abs(nc))
+ q1 *= 2; // update q1 = 2^p / abs(nc)
+ r1 *= 2; // update r1 = rem(2^p / abs(nc))
- if (r1 >= absNc) { // must be unsigned comparison
+ if (r1 >= absNc)
+ { // must be unsigned comparison
q1++;
r1 -= absNc;
}
- q2 *= 2; // update q2 = 2^p / abs(denom)
- r2 *= 2; // update r2 = rem(2^p / abs(denom))
+ q2 *= 2; // update q2 = 2^p / abs(denom)
+ r2 *= 2; // update r2 = rem(2^p / abs(denom))
- if (r2 >= absDenom) { // must be unsigned comparison
+ if (r2 >= absDenom)
+ { // must be unsigned comparison
q2++;
r2 -= absDenom;
}
@@ -13093,16 +13230,16 @@ T GetSignedMagicNumberForDivide(T denom, int *shift /*out*/)
delta = absDenom - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
- result_magic = q2 + 1; // resulting magic number
- if (denom < 0) {
+ result_magic = q2 + 1; // resulting magic number
+ if (denom < 0)
+ {
result_magic = -result_magic;
}
- *shift = p - bits; // resulting shift
+ *shift = p - bits; // resulting shift
return result_magic;
}
-
bool Compiler::fgShouldUseMagicNumberDivide(GenTreeOp* tree)
{
#ifdef _TARGET_ARM64_
@@ -13113,13 +13250,13 @@ bool Compiler::fgShouldUseMagicNumberDivide(GenTreeOp* tree)
// During the optOptimizeValnumCSEs phase we can call fgMorph and when we do,
// if this method returns true we will introduce a new LclVar and
// a couple of new GenTree nodes, including an assignment to the new LclVar.
- // None of these new GenTree nodes will have valid ValueNumbers.
+ // None of these new GenTree nodes will have valid ValueNumbers.
// That is an invalid state for a GenTree node during the optOptimizeValnumCSEs phase.
//
- // Also during optAssertionProp when extracting side effects we can assert
+ // Also during optAssertionProp when extracting side effects we can assert
// during gtBuildCommaList if we have one tree that has Value Numbers
// and another one that does not.
- //
+ //
if (!fgGlobalMorph)
{
// We only perform the Magic Number Divide optimization during
@@ -13128,34 +13265,45 @@ bool Compiler::fgShouldUseMagicNumberDivide(GenTreeOp* tree)
}
if (tree->gtFlags & GTF_OVERFLOW)
+ {
return false;
+ }
if (tree->gtOp2->gtOper != GT_CNS_INT && tree->gtOp2->gtOper != GT_CNS_LNG)
+ {
return false;
+ }
ssize_t cons = tree->gtOp2->gtIntConCommon.IconValue();
if (cons == 0 || cons == -1 || cons == 1)
+ {
return false;
+ }
// codegen will expand these
if (cons == SSIZE_T_MIN || isPow2(abs(cons)))
+ {
return false;
+ }
// someone else will fold this away, so don't make it complicated for them
if (tree->gtOp1->IsCnsIntOrI())
+ {
return false;
+ }
// There is no technical barrier to handling unsigned, however it is quite rare
// and more work to support and test
if (tree->gtFlags & GTF_UNSIGNED)
+ {
return false;
+ }
return true;
#endif
}
-
// transform x%c -> x-((x/c)*c)
GenTree* Compiler::fgMorphModByConst(GenTreeOp* tree)
@@ -13182,7 +13330,7 @@ GenTree* Compiler::fgMorphModByConst(GenTreeOp* tree)
}
// For ARM64 we don't have a remainder instruction,
-// The architecture manual suggests the following transformation to
+// The architecture manual suggests the following transformation to
// generate code for such operator:
//
// a % b = a - (a / b) * b;
@@ -13202,7 +13350,7 @@ GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
{
tree->SetOper(GT_DIV);
}
- else if (tree->OperGet() == GT_UMOD)
+ else if (tree->OperGet() == GT_UMOD)
{
tree->SetOper(GT_UDIV);
}
@@ -13211,9 +13359,9 @@ GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv");
}
- var_types type = tree->gtType;
- GenTree* denominator = tree->gtOp2;
- GenTree* numerator = tree->gtOp1;
+ var_types type = tree->gtType;
+ GenTree* denominator = tree->gtOp2;
+ GenTree* numerator = tree->gtOp1;
if (!numerator->OperIsLeaf())
{
@@ -13252,11 +13400,11 @@ GenTree* Compiler::fgMorphDivByConst(GenTreeOp* tree)
if (tree->gtType == TYP_INT)
{
- magic = GetSignedMagicNumberForDivide<int32_t>((int32_t) denominator, &shift);
+ magic = GetSignedMagicNumberForDivide<int32_t>((int32_t)denominator, &shift);
}
else
{
- magic = GetSignedMagicNumberForDivide<int64_t>((int64_t) denominator, &shift);
+ magic = GetSignedMagicNumberForDivide<int64_t>((int64_t)denominator, &shift);
}
GenTree* numerator = nullptr;
@@ -13270,26 +13418,30 @@ GenTree* Compiler::fgMorphDivByConst(GenTreeOp* tree)
}
if (type == TYP_LONG)
+ {
tree->gtOp2->gtIntConCommon.SetLngValue(magic);
+ }
else
+ {
tree->gtOp2->gtIntConCommon.SetIconValue((ssize_t)magic);
+ }
tree->SetOper(GT_MULHI);
-
- GenTree* t = tree;
+
+ GenTree* t = tree;
GenTree* mulresult = tree;
JITDUMP("Multiply Result:\n");
DISPTREE(mulresult);
-
- GenTree *adjusted = mulresult;
- if (denominator > 0 && magic < 0)
+ GenTree* adjusted = mulresult;
+
+ if (denominator > 0 && magic < 0)
{
// add the numerator back in
adjusted = gtNewOperNode(GT_ADD, type, mulresult, numerator);
- }
- else if (denominator < 0 && magic > 0)
+ }
+ else if (denominator < 0 && magic > 0)
{
// subtract the numerator off
adjusted = gtNewOperNode(GT_SUB, type, mulresult, numerator);
@@ -13308,7 +13460,6 @@ GenTree* Compiler::fgMorphDivByConst(GenTreeOp* tree)
GenTree* secondClone = fgMakeMultiUse(&result1);
GenTree* result2 = gtNewOperNode(GT_RSZ, type, secondClone, gtNewIconNode(genTypeSize(type) * 8 - 1, type));
-
GenTree* result = gtNewOperNode(GT_ADD, type, result1, result2);
JITDUMP("Final Magic Number divide:\n");
@@ -13382,7 +13533,7 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
// (x << c1) op (x >>> c2)
// (x >>> c1) op (x << c2)
//
- // where
+ // where
// c1 and c2 are const
// c1 + c2 == bitsize(x)
// N == bitsize(x)
@@ -13390,8 +13541,7 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
// M & (N - 1) == N - 1
// op is either | or ^
- if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) ||
- ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
+ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
{
// We can't do anything if the tree has assignments, calls, or volatile
// reads. Note that we allow GTF_EXCEPT side effect since any exceptions
@@ -13399,22 +13549,22 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
return tree;
}
- genTreeOps oper = tree->OperGet();
+ genTreeOps oper = tree->OperGet();
assert(fgOperIsBitwiseRotationRoot(oper));
// Check if we have an LSH on one side of the OR and an RSZ on the other side.
- GenTreePtr op1 = tree->gtGetOp1();
- GenTreePtr op2 = tree->gtGetOp2();
- GenTreePtr leftShiftTree = nullptr;
+ GenTreePtr op1 = tree->gtGetOp1();
+ GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr leftShiftTree = nullptr;
GenTreePtr rightShiftTree = nullptr;
if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ))
{
- leftShiftTree = op1;
+ leftShiftTree = op1;
rightShiftTree = op2;
}
else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH))
{
- leftShiftTree = op2;
+ leftShiftTree = op2;
rightShiftTree = op1;
}
else
@@ -13426,11 +13576,11 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
// We already checked that there are no side effects above.
if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1()))
{
- GenTreePtr rotatedValue = leftShiftTree->gtGetOp1();
- var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
- ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
+ GenTreePtr rotatedValue = leftShiftTree->gtGetOp1();
+ var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
+ ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64));
- GenTreePtr leftShiftIndex = leftShiftTree->gtGetOp2();
+ GenTreePtr leftShiftIndex = leftShiftTree->gtGetOp2();
GenTreePtr rightShiftIndex = rightShiftTree->gtGetOp2();
// The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits
@@ -13438,15 +13588,15 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
// higher bits are not masked, the transformation is still valid since the result
// of MSIL shift instructions is unspecified if the shift amount is greater or equal
// than the width of the value being shifted.
- ssize_t minimalMask = rotatedValueBitSize - 1;
- ssize_t leftShiftMask = -1;
+ ssize_t minimalMask = rotatedValueBitSize - 1;
+ ssize_t leftShiftMask = -1;
ssize_t rightShiftMask = -1;
if ((leftShiftIndex->OperGet() == GT_AND))
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
- leftShiftMask = leftShiftIndex->gtGetOp2()->gtIntCon.gtIconVal;
+ leftShiftMask = leftShiftIndex->gtGetOp2()->gtIntCon.gtIconVal;
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
@@ -13459,7 +13609,7 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
- rightShiftMask = rightShiftIndex->gtGetOp2()->gtIntCon.gtIconVal;
+ rightShiftMask = rightShiftIndex->gtGetOp2()->gtIntCon.gtIconVal;
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
@@ -13468,8 +13618,7 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
}
}
- if (((minimalMask & leftShiftMask) != minimalMask) ||
- ((minimalMask & rightShiftMask) != minimalMask))
+ if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask))
{
// The shift index is overmasked, e.g., we have
// something like (x << y & 15) or
@@ -13478,22 +13627,22 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
return tree;
}
- GenTreePtr shiftIndexWithAdd = nullptr;
+ GenTreePtr shiftIndexWithAdd = nullptr;
GenTreePtr shiftIndexWithoutAdd = nullptr;
- genTreeOps rotateOp = GT_NONE;
- GenTreePtr rotateIndex = nullptr;
+ genTreeOps rotateOp = GT_NONE;
+ GenTreePtr rotateIndex = nullptr;
if (leftShiftIndex->OperGet() == GT_ADD)
{
- shiftIndexWithAdd = leftShiftIndex;
+ shiftIndexWithAdd = leftShiftIndex;
shiftIndexWithoutAdd = rightShiftIndex;
- rotateOp = GT_ROR;
+ rotateOp = GT_ROR;
}
else if (rightShiftIndex->OperGet() == GT_ADD)
{
- shiftIndexWithAdd = rightShiftIndex;
+ shiftIndexWithAdd = rightShiftIndex;
shiftIndexWithoutAdd = leftShiftIndex;
- rotateOp = GT_ROL;
+ rotateOp = GT_ROL;
}
if (shiftIndexWithAdd != nullptr)
@@ -13531,16 +13680,14 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
}
}
}
- else if ((leftShiftIndex->IsCnsIntOrI() &&
- rightShiftIndex->IsCnsIntOrI()))
+ else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
- if (leftShiftIndex->gtIntCon.gtIconVal +
- rightShiftIndex->gtIntCon.gtIconVal == rotatedValueBitSize)
+ if (leftShiftIndex->gtIntCon.gtIconVal + rightShiftIndex->gtIntCon.gtIconVal == rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
// where c1 and c2 are const and c1 + c2 == bitsize(x)
- rotateOp = GT_ROL;
+ rotateOp = GT_ROL;
rotateIndex = leftShiftIndex;
}
}
@@ -13569,7 +13716,7 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
return tree;
}
}
-#endif //LEGACY_BACKEND
+#endif // LEGACY_BACKEND
return tree;
}
@@ -13577,10 +13724,10 @@ GenTreePtr Compiler::fgRecognizeAndMorphBitwiseRotation(GenTreePtr tree)
GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
{
- genTreeOps oper = tree->OperGet();
- var_types typ = tree->TypeGet();
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ genTreeOps oper = tree->OperGet();
+ var_types typ = tree->TypeGet();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
/*
We have to use helper calls for all FP operations:
@@ -13590,34 +13737,32 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
comparisons of FP values
*/
- if (varTypeIsFloating(typ) || (op1 && varTypeIsFloating(op1->TypeGet())))
+ if (varTypeIsFloating(typ) || (op1 && varTypeIsFloating(op1->TypeGet())))
{
- int helper;
- GenTreePtr args;
- size_t argc = genTypeStSz(typ);
+ int helper;
+ GenTreePtr args;
+ size_t argc = genTypeStSz(typ);
/* Not all FP operations need helper calls */
switch (oper)
{
- case GT_ASG:
- case GT_IND:
- case GT_LIST:
- case GT_ADDR:
- case GT_COMMA:
- return tree;
+ case GT_ASG:
+ case GT_IND:
+ case GT_LIST:
+ case GT_ADDR:
+ case GT_COMMA:
+ return tree;
}
#ifdef DEBUG
/* If the result isn't FP, it better be a compare or cast */
- if (!(varTypeIsFloating(typ) ||
- tree->OperIsCompare() || oper == GT_CAST))
+ if (!(varTypeIsFloating(typ) || tree->OperIsCompare() || oper == GT_CAST))
gtDispTree(tree);
- noway_assert(varTypeIsFloating(typ) ||
- tree->OperIsCompare() || oper == GT_CAST);
+ noway_assert(varTypeIsFloating(typ) || tree->OperIsCompare() || oper == GT_CAST);
#endif
/* Keep track of how many arguments we're passing */
@@ -13626,35 +13771,56 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
/* Is this a binary operator? */
- if (op2)
+ if (op2)
{
/* Add the second operand to the argument count */
- fgPtrArgCntCur += argc; argc *= 2;
+ fgPtrArgCntCur += argc;
+ argc *= 2;
/* What kind of an operator do we have? */
switch (oper)
{
- case GT_ADD: helper = CPX_R4_ADD; break;
- case GT_SUB: helper = CPX_R4_SUB; break;
- case GT_MUL: helper = CPX_R4_MUL; break;
- case GT_DIV: helper = CPX_R4_DIV; break;
- // case GT_MOD: helper = CPX_R4_REM; break;
+ case GT_ADD:
+ helper = CPX_R4_ADD;
+ break;
+ case GT_SUB:
+ helper = CPX_R4_SUB;
+ break;
+ case GT_MUL:
+ helper = CPX_R4_MUL;
+ break;
+ case GT_DIV:
+ helper = CPX_R4_DIV;
+ break;
+ // case GT_MOD: helper = CPX_R4_REM; break;
- case GT_EQ : helper = CPX_R4_EQ ; break;
- case GT_NE : helper = CPX_R4_NE ; break;
- case GT_LT : helper = CPX_R4_LT ; break;
- case GT_LE : helper = CPX_R4_LE ; break;
- case GT_GE : helper = CPX_R4_GE ; break;
- case GT_GT : helper = CPX_R4_GT ; break;
+ case GT_EQ:
+ helper = CPX_R4_EQ;
+ break;
+ case GT_NE:
+ helper = CPX_R4_NE;
+ break;
+ case GT_LT:
+ helper = CPX_R4_LT;
+ break;
+ case GT_LE:
+ helper = CPX_R4_LE;
+ break;
+ case GT_GE:
+ helper = CPX_R4_GE;
+ break;
+ case GT_GT:
+ helper = CPX_R4_GT;
+ break;
- default:
+ default:
#ifdef DEBUG
- gtDispTree(tree);
+ gtDispTree(tree);
#endif
- noway_assert(!"unexpected FP binary op");
- break;
+ noway_assert(!"unexpected FP binary op");
+ break;
}
args = gtNewArgList(tree->gtOp.gtOp2, tree->gtOp.gtOp1);
@@ -13663,20 +13829,22 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
{
switch (oper)
{
- case GT_RETURN:
- return tree;
+ case GT_RETURN:
+ return tree;
- case GT_CAST:
- noway_assert(!"FP cast");
+ case GT_CAST:
+ noway_assert(!"FP cast");
- case GT_NEG: helper = CPX_R4_NEG; break;
+ case GT_NEG:
+ helper = CPX_R4_NEG;
+ break;
- default:
+ default:
#ifdef DEBUG
- gtDispTree(tree);
+ gtDispTree(tree);
#endif
- noway_assert(!"unexpected FP unary op");
- break;
+ noway_assert(!"unexpected FP unary op");
+ break;
}
args = gtNewArgList(tree->gtOp.gtOp1);
@@ -13684,13 +13852,13 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
/* If we have double result/operands, modify the helper */
- if (typ == TYP_DOUBLE)
+ if (typ == TYP_DOUBLE)
{
- noway_assert(CPX_R4_NEG+1 == CPX_R8_NEG);
- noway_assert(CPX_R4_ADD+1 == CPX_R8_ADD);
- noway_assert(CPX_R4_SUB+1 == CPX_R8_SUB);
- noway_assert(CPX_R4_MUL+1 == CPX_R8_MUL);
- noway_assert(CPX_R4_DIV+1 == CPX_R8_DIV);
+ noway_assert(CPX_R4_NEG + 1 == CPX_R8_NEG);
+ noway_assert(CPX_R4_ADD + 1 == CPX_R8_ADD);
+ noway_assert(CPX_R4_SUB + 1 == CPX_R8_SUB);
+ noway_assert(CPX_R4_MUL + 1 == CPX_R8_MUL);
+ noway_assert(CPX_R4_DIV + 1 == CPX_R8_DIV);
helper++;
}
@@ -13698,65 +13866,61 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
{
noway_assert(tree->OperIsCompare());
- noway_assert(CPX_R4_EQ+1 == CPX_R8_EQ);
- noway_assert(CPX_R4_NE+1 == CPX_R8_NE);
- noway_assert(CPX_R4_LT+1 == CPX_R8_LT);
- noway_assert(CPX_R4_LE+1 == CPX_R8_LE);
- noway_assert(CPX_R4_GE+1 == CPX_R8_GE);
- noway_assert(CPX_R4_GT+1 == CPX_R8_GT);
+ noway_assert(CPX_R4_EQ + 1 == CPX_R8_EQ);
+ noway_assert(CPX_R4_NE + 1 == CPX_R8_NE);
+ noway_assert(CPX_R4_LT + 1 == CPX_R8_LT);
+ noway_assert(CPX_R4_LE + 1 == CPX_R8_LE);
+ noway_assert(CPX_R4_GE + 1 == CPX_R8_GE);
+ noway_assert(CPX_R4_GT + 1 == CPX_R8_GT);
}
tree = fgMorphIntoHelperCall(tree, helper, args);
- if (fgPtrArgCntMax < fgPtrArgCntCur)
+ if (fgPtrArgCntMax < fgPtrArgCntCur)
fgPtrArgCntMax = fgPtrArgCntCur;
fgPtrArgCntCur -= argc;
return tree;
- case GT_RETURN:
+ case GT_RETURN:
- if (op1)
- {
-
- if (compCurBB == genReturnBB)
+ if (op1)
{
- /* This is the 'exitCrit' call at the exit label */
- noway_assert(op1->gtType == TYP_VOID);
- noway_assert(op2 == 0);
+ if (compCurBB == genReturnBB)
+ {
+ /* This is the 'exitCrit' call at the exit label */
- tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
+ noway_assert(op1->gtType == TYP_VOID);
+ noway_assert(op2 == 0);
- return tree;
- }
+ tree->gtOp.gtOp1 = op1 = fgMorphTree(op1);
+ return tree;
+ }
- /* This is a (real) return value -- check its type */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ /* This is a (real) return value -- check its type */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- if (genActualType(op1->TypeGet()) != genActualType(info.compRetType))
- {
- bool allowMismatch = false;
+ if (genActualType(op1->TypeGet()) != genActualType(info.compRetType))
+ {
+ bool allowMismatch = false;
- // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
- if ((info.compRetType == TYP_BYREF &&
- genActualType(op1->TypeGet()) == TYP_I_IMPL) ||
- (op1->TypeGet() == TYP_BYREF &&
- genActualType(info.compRetType) == TYP_I_IMPL))
- allowMismatch = true;
+ // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
+ if ((info.compRetType == TYP_BYREF && genActualType(op1->TypeGet()) == TYP_I_IMPL) ||
+ (op1->TypeGet() == TYP_BYREF && genActualType(info.compRetType) == TYP_I_IMPL))
+ allowMismatch = true;
- if (varTypeIsFloating(info.compRetType) && varTypeIsFloating(op1->TypeGet()))
- allowMismatch = true;
+ if (varTypeIsFloating(info.compRetType) && varTypeIsFloating(op1->TypeGet()))
+ allowMismatch = true;
- if (!allowMismatch)
- NO_WAY("Return type mismatch");
- }
+ if (!allowMismatch)
+ NO_WAY("Return type mismatch");
+ }
#endif
- }
- break;
-
+ }
+ break;
}
return tree;
}
@@ -13767,8 +13931,7 @@ GenTreePtr Compiler::fgMorphToEmulatedFP(GenTreePtr tree)
* Transform the given tree for code generation and return an equivalent tree.
*/
-
-GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac)
+GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac)
{
noway_assert(tree);
noway_assert(tree->gtOper != GT_STMT);
@@ -13793,21 +13956,21 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
}
#endif
- /*-------------------------------------------------------------------------
- * fgMorphTree() can potentially replace a tree with another, and the
- * caller has to store the return value correctly.
- * Turn this on to always make copy of "tree" here to shake out
- * hidden/unupdated references.
- */
+/*-------------------------------------------------------------------------
+ * fgMorphTree() can potentially replace a tree with another, and the
+ * caller has to store the return value correctly.
+ * Turn this on to always make copy of "tree" here to shake out
+ * hidden/unupdated references.
+ */
#ifdef DEBUG
- if (compStressCompile(STRESS_GENERIC_CHECK, 0))
+ if (compStressCompile(STRESS_GENERIC_CHECK, 0))
{
- GenTreePtr copy;
+ GenTreePtr copy;
#ifdef SMALL_TREE_NODES
- if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
+ if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(GT_ADD, TYP_INT);
}
@@ -13819,9 +13982,9 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
copy->CopyFrom(tree, this);
-#if defined (LATE_DISASM)
+#if defined(LATE_DISASM)
// GT_CNS_INT is considered small, so CopyFrom() won't copy all fields
- if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
+ if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
copy->gtIntCon.gtIconHdl.gtIconHdl1 = tree->gtIntCon.gtIconHdl.gtIconHdl1;
copy->gtIntCon.gtIconHdl.gtIconHdl2 = tree->gtIntCon.gtIconHdl.gtIconHdl2;
@@ -13847,16 +14010,16 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
if (optAssertionCount > 0)
{
GenTreePtr newTree = tree;
- while (newTree != NULL)
+ while (newTree != nullptr)
{
tree = newTree;
/* newTree is non-Null if we propagated an assertion */
- newTree = optAssertionProp(apFull, tree, NULL);
+ newTree = optAssertionProp(apFull, tree, nullptr);
}
- noway_assert(tree != NULL);
+ noway_assert(tree != nullptr);
}
}
- PREFAST_ASSUME(tree != NULL);
+ PREFAST_ASSUME(tree != nullptr);
#endif
}
@@ -13870,7 +14033,7 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
/* Is this a constant node? */
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
tree = fgMorphConst(tree);
goto DONE;
@@ -13878,7 +14041,7 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
/* Is this a leaf node? */
- if (kind & GTK_LEAF)
+ if (kind & GTK_LEAF)
{
tree = fgMorphLeaf(tree);
goto DONE;
@@ -13886,7 +14049,7 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
tree = fgMorphSmpOp(tree, mac);
goto DONE;
@@ -13894,26 +14057,26 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
/* See what kind of a special operator we have here */
- switch (tree->OperGet())
+ switch (tree->OperGet())
{
- case GT_FIELD:
- tree = fgMorphField(tree, mac);
- break;
+ case GT_FIELD:
+ tree = fgMorphField(tree, mac);
+ break;
- case GT_CALL:
- tree = fgMorphCall(tree->AsCall());
- break;
+ case GT_CALL:
+ tree = fgMorphCall(tree->AsCall());
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
{
fgSetRngChkTarget(tree);
GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
- bndsChk->gtArrLen = fgMorphTree(bndsChk->gtArrLen);
- bndsChk->gtIndex = fgMorphTree(bndsChk->gtIndex);
+ bndsChk->gtArrLen = fgMorphTree(bndsChk->gtArrLen);
+ bndsChk->gtIndex = fgMorphTree(bndsChk->gtIndex);
// If the index is a comma(throw, x), just return that.
if (!optValnumCSE_phase && fgIsCommaThrow(bndsChk->gtIndex))
{
@@ -13922,48 +14085,52 @@ GenTreePtr Compiler::fgMorphTree(GenTreePtr tree, MorphAddrContext* mac
// Propagate effects flags upwards
bndsChk->gtFlags |= (bndsChk->gtArrLen->gtFlags & GTF_ALL_EFFECT);
- bndsChk->gtFlags |= (bndsChk->gtIndex->gtFlags & GTF_ALL_EFFECT);
+ bndsChk->gtFlags |= (bndsChk->gtIndex->gtFlags & GTF_ALL_EFFECT);
// Otherwise, we don't change the tree.
}
break;
- case GT_ARR_ELEM:
- tree->gtArrElem.gtArrObj = fgMorphTree(tree->gtArrElem.gtArrObj);
- tree->gtFlags |= tree->gtArrElem.gtArrObj->gtFlags & GTF_ALL_EFFECT;
+ case GT_ARR_ELEM:
+ tree->gtArrElem.gtArrObj = fgMorphTree(tree->gtArrElem.gtArrObj);
+ tree->gtFlags |= tree->gtArrElem.gtArrObj->gtFlags & GTF_ALL_EFFECT;
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- tree->gtArrElem.gtArrInds[dim] = fgMorphTree(tree->gtArrElem.gtArrInds[dim]);
- tree->gtFlags |= tree->gtArrElem.gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
- }
- if (fgGlobalMorph)
- fgSetRngChkTarget(tree, false);
- break;
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ tree->gtArrElem.gtArrInds[dim] = fgMorphTree(tree->gtArrElem.gtArrInds[dim]);
+ tree->gtFlags |= tree->gtArrElem.gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
+ }
+ if (fgGlobalMorph)
+ {
+ fgSetRngChkTarget(tree, false);
+ }
+ break;
- case GT_ARR_OFFSET:
- tree->gtArrOffs.gtOffset = fgMorphTree(tree->gtArrOffs.gtOffset);
- tree->gtFlags |= tree->gtArrOffs.gtOffset->gtFlags & GTF_ALL_EFFECT;
- tree->gtArrOffs.gtIndex = fgMorphTree(tree->gtArrOffs.gtIndex);
- tree->gtFlags |= tree->gtArrOffs.gtIndex->gtFlags & GTF_ALL_EFFECT;
- tree->gtArrOffs.gtArrObj = fgMorphTree(tree->gtArrOffs.gtArrObj);
- tree->gtFlags |= tree->gtArrOffs.gtArrObj->gtFlags & GTF_ALL_EFFECT;
- if (fgGlobalMorph)
- fgSetRngChkTarget(tree, false);
- break;
+ case GT_ARR_OFFSET:
+ tree->gtArrOffs.gtOffset = fgMorphTree(tree->gtArrOffs.gtOffset);
+ tree->gtFlags |= tree->gtArrOffs.gtOffset->gtFlags & GTF_ALL_EFFECT;
+ tree->gtArrOffs.gtIndex = fgMorphTree(tree->gtArrOffs.gtIndex);
+ tree->gtFlags |= tree->gtArrOffs.gtIndex->gtFlags & GTF_ALL_EFFECT;
+ tree->gtArrOffs.gtArrObj = fgMorphTree(tree->gtArrOffs.gtArrObj);
+ tree->gtFlags |= tree->gtArrOffs.gtArrObj->gtFlags & GTF_ALL_EFFECT;
+ if (fgGlobalMorph)
+ {
+ fgSetRngChkTarget(tree, false);
+ }
+ break;
- case GT_CMPXCHG:
- tree->gtCmpXchg.gtOpLocation = fgMorphTree(tree->gtCmpXchg.gtOpLocation);
- tree->gtCmpXchg.gtOpValue = fgMorphTree(tree->gtCmpXchg.gtOpValue);
- tree->gtCmpXchg.gtOpComparand = fgMorphTree(tree->gtCmpXchg.gtOpComparand);
- break;
+ case GT_CMPXCHG:
+ tree->gtCmpXchg.gtOpLocation = fgMorphTree(tree->gtCmpXchg.gtOpLocation);
+ tree->gtCmpXchg.gtOpValue = fgMorphTree(tree->gtCmpXchg.gtOpValue);
+ tree->gtCmpXchg.gtOpComparand = fgMorphTree(tree->gtCmpXchg.gtOpComparand);
+ break;
- default:
+ default:
#ifdef DEBUG
- gtDispTree(tree);
+ gtDispTree(tree);
#endif
- noway_assert(!"unexpected operator");
+ noway_assert(!"unexpected operator");
}
DONE:
@@ -13972,7 +14139,6 @@ DONE:
return tree;
}
-
#if LOCAL_ASSERTION_PROP
/*****************************************************************************
*
@@ -13980,10 +14146,9 @@ DONE:
*
*/
-void Compiler::fgKillDependentAssertions(unsigned lclNum
- DEBUGARG(GenTreePtr tree))
+void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTreePtr tree))
{
- LclVarDsc * varDsc = &lvaTable[lclNum];
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varDsc->lvPromoted)
{
@@ -14004,16 +14169,15 @@ void Compiler::fgKillDependentAssertions(unsigned lclNum
if (killed)
{
- AssertionIndex index = optAssertionCount;
+ AssertionIndex index = optAssertionCount;
while (killed && (index > 0))
{
- if (BitVecOps::IsMember(apTraits, killed, index - 1))
+ if (BitVecOps::IsMember(apTraits, killed, index - 1))
{
#ifdef DEBUG
AssertionDsc* curAssertion = optGetAssertion(index);
- noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
- ((curAssertion->op2.kind == O2K_LCLVAR_COPY) &&
- (curAssertion->op2.lcl.lclNum == lclNum)));
+ noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
+ ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum)));
if (verbose)
{
printf("\nThe assignment ");
@@ -14037,7 +14201,6 @@ void Compiler::fgKillDependentAssertions(unsigned lclNum
}
#endif // LOCAL_ASSERTION_PROP
-
/*****************************************************************************
*
* This function is called to complete the morphing of a tree node
@@ -14049,23 +14212,25 @@ void Compiler::fgKillDependentAssertions(unsigned lclNum
*
*/
-void Compiler::fgMorphTreeDone(GenTreePtr tree,
- GenTreePtr oldTree /* == NULL */
- DEBUGARG(int morphNum))
+void Compiler::fgMorphTreeDone(GenTreePtr tree,
+ GenTreePtr oldTree /* == NULL */
+ DEBUGARG(int morphNum))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (after %d):\n", morphNum);
gtDispTree(tree);
- printf(""); // in our logic this causes a flush
+ printf(""); // in our logic this causes a flush
}
#endif
if (!fgGlobalMorph)
+ {
return;
+ }
- if ((oldTree != NULL) && (oldTree != tree))
+ if ((oldTree != nullptr) && (oldTree != tree))
{
/* Ensure that we have morphed this node */
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!");
@@ -14076,17 +14241,21 @@ void Compiler::fgMorphTreeDone(GenTreePtr tree,
}
else
{
- // Ensure that we haven't morphed this node already
+ // Ensure that we haven't morphed this node already
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
}
if (tree->OperKind() & GTK_CONST)
+ {
goto DONE;
+ }
#if LOCAL_ASSERTION_PROP
if (!optLocalAssertionProp)
+ {
goto DONE;
+ }
/* Do we have any active assertions? */
@@ -14097,7 +14266,8 @@ void Compiler::fgMorphTreeDone(GenTreePtr tree,
if ((tree->OperKind() & GTK_ASGOP) &&
(tree->gtOp.gtOp1->gtOper == GT_LCL_VAR || tree->gtOp.gtOp1->gtOper == GT_LCL_FLD))
{
- unsigned op1LclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum; noway_assert(op1LclNum < lvaCount);
+ unsigned op1LclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
+ noway_assert(op1LclNum < lvaCount);
fgKillDependentAssertions(op1LclNum DEBUGARG(tree));
}
}
@@ -14115,20 +14285,21 @@ DONE:;
#endif
}
-
/*****************************************************************************
*
* Check and fold blocks of type BBJ_COND and BBJ_SWITCH on constants
* Returns true if we modified the flow graph
*/
-bool Compiler::fgFoldConditional(BasicBlock * block)
+bool Compiler::fgFoldConditional(BasicBlock* block)
{
bool result = false;
// We don't want to make any code unreachable
if (opts.compDbgCode || opts.MinOpts())
- return false;
+ {
+ return false;
+ }
if (block->bbJumpKind == BBJ_COND)
{
@@ -14136,7 +14307,7 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
GenTreePtr stmt = block->bbTreeList->gtPrev;
- noway_assert(stmt->gtNext == NULL);
+ noway_assert(stmt->gtNext == nullptr);
if (stmt->gtStmt.gtStmtExpr->gtOper == GT_CALL)
{
@@ -14152,7 +14323,7 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
fgRemoveRefPred(block->bbJumpDest, block);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nConditional folded at BB%02u\n", block->bbNum);
printf("BB%02u becomes a BBJ_THROW\n", block->bbNum);
@@ -14166,7 +14337,8 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
/* Did we fold the conditional */
noway_assert(stmt->gtStmt.gtStmtExpr->gtOp.gtOp1);
- GenTreePtr cond; cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
+ GenTreePtr cond;
+ cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
if (cond->OperKind() & GTK_CONST)
{
@@ -14174,8 +14346,7 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
- noway_assert((block->bbNext->countOfInEdges() > 0) &&
- (block->bbJumpDest->countOfInEdges() > 0));
+ noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
/* remove the statement from bbTreelist - No need to update
* the reference counts since there are no lcl vars */
@@ -14185,23 +14356,22 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
// bTaken is the path that will always be taken from block
// bNotTaken is the path that will never be taken from block
//
- BasicBlock * bTaken;
- BasicBlock * bNotTaken;
+ BasicBlock* bTaken;
+ BasicBlock* bNotTaken;
if (cond->gtIntCon.gtIconVal != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
- bTaken = block->bbJumpDest;
- bNotTaken = block->bbNext;
+ bTaken = block->bbJumpDest;
+ bNotTaken = block->bbNext;
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
- if ((block->bbJumpDest->isLoopHead()) &&
- (block->bbJumpDest->bbNum <= block->bbNum) &&
+ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
@@ -14220,8 +14390,8 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
- flowList * edgeTaken = fgGetPredForBlock(bTaken, block);
- BasicBlock * bUpdated = NULL; // non-NULL if we updated the weight of an internal block
+ flowList* edgeTaken = fgGetPredForBlock(bTaken, block);
+ BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
@@ -14258,29 +14428,29 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
}
}
- if (bUpdated != NULL)
+ if (bUpdated != nullptr)
{
- flowList * edge;
+ flowList* edge;
// Now fix the weights of the edges out of 'bUpdated'
- switch (bUpdated->bbJumpKind) {
- case BBJ_NONE:
- edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
- edge->flEdgeWeightMax = bUpdated->bbWeight;
- break;
- case BBJ_COND:
- edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
- edge->flEdgeWeightMax = bUpdated->bbWeight;
- __fallthrough;
- case BBJ_ALWAYS:
- edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
- edge->flEdgeWeightMax = bUpdated->bbWeight;
- break;
- default:
- // We don't handle BBJ_SWITCH
- break;
+ switch (bUpdated->bbJumpKind)
+ {
+ case BBJ_NONE:
+ edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
+ edge->flEdgeWeightMax = bUpdated->bbWeight;
+ break;
+ case BBJ_COND:
+ edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
+ edge->flEdgeWeightMax = bUpdated->bbWeight;
+ __fallthrough;
+ case BBJ_ALWAYS:
+ edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
+ edge->flEdgeWeightMax = bUpdated->bbWeight;
+ break;
+ default:
+ // We don't handle BBJ_SWITCH
+ break;
}
}
-
}
/* modify the flow graph */
@@ -14289,13 +14459,15 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
fgRemoveRefPred(bNotTaken, block);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nConditional folded at BB%02u\n", block->bbNum);
printf("BB%02u becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
- if (block->bbJumpKind == BBJ_ALWAYS)
+ if (block->bbJumpKind == BBJ_ALWAYS)
+ {
printf(" to BB%02u", block->bbJumpDest->bbNum);
+ }
printf("\n");
}
#endif
@@ -14309,41 +14481,41 @@ bool Compiler::fgFoldConditional(BasicBlock * block)
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
/* We are only interested in the loop bottom */
- if (optLoopTable[loopNum].lpBottom == block)
+ if (optLoopTable[loopNum].lpBottom == block)
{
- if (cond->gtIntCon.gtIconVal == 0)
+ if (cond->gtIntCon.gtIconVal == 0)
{
/* This was a bogus loop (condition always false)
* Remove the loop from the table */
optLoopTable[loopNum].lpFlags |= LPFLG_REMOVED;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("Removing loop L%02u (from BB%02u to BB%02u)\n\n",
- loopNum,
- optLoopTable[loopNum].lpFirst ->bbNum,
- optLoopTable[loopNum].lpBottom->bbNum);
+ printf("Removing loop L%02u (from BB%02u to BB%02u)\n\n", loopNum,
+ optLoopTable[loopNum].lpFirst->bbNum, optLoopTable[loopNum].lpBottom->bbNum);
}
#endif
}
}
}
-DONE_COND:
+ DONE_COND:
result = true;
}
}
- else if (block->bbJumpKind == BBJ_SWITCH)
+ else if (block->bbJumpKind == BBJ_SWITCH)
{
noway_assert(block->bbTreeList && block->bbTreeList->gtPrev);
GenTreePtr stmt = block->bbTreeList->gtPrev;
- noway_assert(stmt->gtNext == NULL);
+ noway_assert(stmt->gtNext == nullptr);
if (stmt->gtStmt.gtStmtExpr->gtOper == GT_CALL)
{
@@ -14354,23 +14526,22 @@ DONE_COND:
/* update the flow graph */
- unsigned jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock * * jumpTab = block->bbJumpSwt->bbsDstTab;
+ unsigned jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
- BasicBlock * curJump = *jumpTab;
+ BasicBlock* curJump = *jumpTab;
/* Remove 'block' from the predecessor list of 'curJump' */
fgRemoveRefPred(curJump, block);
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nConditional folded at BB%02u\n", block->bbNum);
printf("BB%02u becomes a BBJ_THROW\n", block->bbNum);
-
}
#endif
goto DONE_SWITCH;
@@ -14381,7 +14552,8 @@ DONE_COND:
/* Did we fold the conditional */
noway_assert(stmt->gtStmt.gtStmtExpr->gtOp.gtOp1);
- GenTreePtr cond; cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
+ GenTreePtr cond;
+ cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
if (cond->OperKind() & GTK_CONST)
{
@@ -14397,21 +14569,25 @@ DONE_COND:
/* modify the flow graph */
/* Find the actual jump target */
- unsigned switchVal; switchVal = (unsigned)cond->gtIntCon.gtIconVal;
- unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock * * jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab;
- bool foundVal; foundVal = false;
+ unsigned switchVal;
+ switchVal = (unsigned)cond->gtIntCon.gtIconVal;
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
+ bool foundVal;
+ foundVal = false;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
- BasicBlock * curJump = *jumpTab;
+ BasicBlock* curJump = *jumpTab;
- assert (curJump->countOfInEdges() > 0);
+ assert(curJump->countOfInEdges() > 0);
// If val matches switchVal or we are at the last entry and
// we never found the switch value then set the new jump dest
- if ( (val == switchVal) || (!foundVal && (val == jumpCnt-1)))
+ if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
if (curJump != block->bbNext)
{
@@ -14419,9 +14595,11 @@ DONE_COND:
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = curJump;
- //if we are jumping backwards, make sure we have a GC Poll.
+ // if we are jumping backwards, make sure we have a GC Poll.
if (curJump->bbNum > block->bbNum)
+ {
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
+ }
}
else
{
@@ -14438,24 +14616,25 @@ DONE_COND:
}
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nConditional folded at BB%02u\n", block->bbNum);
printf("BB%02u becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
- if (block->bbJumpKind == BBJ_ALWAYS)
+ if (block->bbJumpKind == BBJ_ALWAYS)
+ {
printf(" to BB%02u", block->bbJumpDest->bbNum);
+ }
printf("\n");
}
#endif
-DONE_SWITCH:
+ DONE_SWITCH:
result = true;
}
}
return result;
}
-
//*****************************************************************************
//
// Morphs a single statement in a block.
@@ -14465,18 +14644,16 @@ DONE_SWITCH:
// Returns false if 'stmt' is still in the block (even if other statements were removed).
//
-bool Compiler::fgMorphBlockStmt(BasicBlock * block,
- GenTreePtr stmt
- DEBUGARG(const char * msg) )
+bool Compiler::fgMorphBlockStmt(BasicBlock* block, GenTreePtr stmt DEBUGARG(const char* msg))
{
noway_assert(stmt->gtOper == GT_STMT);
- compCurBB = block;
+ compCurBB = block;
compCurStmt = stmt;
- GenTreePtr morph = fgMorphTree(stmt->gtStmt.gtStmtExpr);
+ GenTreePtr morph = fgMorphTree(stmt->gtStmt.gtStmtExpr);
- // Bug 1106830 - During the CSE phase we can't just remove
+ // Bug 1106830 - During the CSE phase we can't just remove
// morph->gtOp.gtOp2 as it could contain CSE expressions.
// This leads to a noway_assert in OptCSE.cpp when
// searching for the removed CSE ref. (using gtFindLink)
@@ -14523,13 +14700,15 @@ bool Compiler::fgMorphBlockStmt(BasicBlock * block,
/* Or this is the last statement of a conditional branch that was just folded */
- if ((!removedStmt) && (stmt->gtNext == NULL) && !fgRemoveRestOfBlock)
+ if ((!removedStmt) && (stmt->gtNext == nullptr) && !fgRemoveRestOfBlock)
{
- if (fgFoldConditional(block))
- {
+ if (fgFoldConditional(block))
+ {
if (block->bbJumpKind != BBJ_THROW)
+ {
removedStmt = true;
- }
+ }
+ }
}
if (!removedStmt)
@@ -14571,7 +14750,8 @@ bool Compiler::fgMorphBlockStmt(BasicBlock * block,
// For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE.
// We should not convert it to a ThrowBB.
- if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0) ) {
+ if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0))
+ {
// Convert block to a throw bb
fgConvertBBToThrowBB(block);
}
@@ -14595,8 +14775,7 @@ bool Compiler::fgMorphBlockStmt(BasicBlock * block,
* for reentrant calls.
*/
-void Compiler::fgMorphStmts(BasicBlock * block,
- bool * mult, bool * lnot, bool * loadw)
+void Compiler::fgMorphStmts(BasicBlock* block, bool* mult, bool* lnot, bool* loadw)
{
fgRemoveRestOfBlock = false;
@@ -14611,10 +14790,7 @@ void Compiler::fgMorphStmts(BasicBlock * block,
fgCurrentlyInUseArgTemps = hashBv::Create(this);
GenTreePtr stmt, prev;
- for (stmt = block->bbTreeList, prev = NULL;
- stmt;
- prev = stmt->gtStmt.gtStmtExpr,
- stmt = stmt->gtNext)
+ for (stmt = block->bbTreeList, prev = nullptr; stmt; prev = stmt->gtStmt.gtStmtExpr, stmt = stmt->gtNext)
{
noway_assert(stmt->gtOper == GT_STMT);
@@ -14624,22 +14800,23 @@ void Compiler::fgMorphStmts(BasicBlock * block,
continue;
}
#ifdef FEATURE_SIMD
- if (!opts.MinOpts() &&
- stmt->gtStmt.gtStmtExpr->TypeGet() == TYP_FLOAT &&
+ if (!opts.MinOpts() && stmt->gtStmt.gtStmtExpr->TypeGet() == TYP_FLOAT &&
stmt->gtStmt.gtStmtExpr->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
- fgMorphStmt = stmt;
- compCurStmt = stmt;
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ fgMorphStmt = stmt;
+ compCurStmt = stmt;
+ GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
#ifdef DEBUG
compCurStmtNum++;
if (stmt == block->bbTreeList)
- block->bbStmtNum = compCurStmtNum; // Set the block->bbStmtNum
+ {
+ block->bbStmtNum = compCurStmtNum; // Set the block->bbStmtNum
+ }
unsigned oldHash = verbose ? gtHashValue(tree) : DUMMY_INIT(~0);
@@ -14652,10 +14829,10 @@ void Compiler::fgMorphStmts(BasicBlock * block,
/* Morph this statement tree */
- GenTreePtr morph = fgMorphTree(tree);
+ GenTreePtr morph = fgMorphTree(tree);
// mark any outgoing arg temps as free so we can reuse them in the next statement.
-
+
fgCurrentlyInUseArgTemps->ZeroAll();
// Has fgMorphStmt been sneakily changed ?
@@ -14668,15 +14845,16 @@ void Compiler::fgMorphStmts(BasicBlock * block,
morph = stmt->gtStmt.gtStmtExpr;
noway_assert(compTailCallUsed);
noway_assert((morph->gtOper == GT_CALL) && morph->AsCall()->IsTailCall());
- noway_assert(stmt->gtNext == NULL);
+ noway_assert(stmt->gtNext == nullptr);
GenTreeCall* call = morph->AsCall();
- // Could either be
+ // Could either be
// - a tail call dispatched via helper in which case block will be ending with BBJ_THROW or
- // - a fast call made as jmp in which case block will be ending with BBJ_RETURN and marked as containing
+ // - a fast call made as jmp in which case block will be ending with BBJ_RETURN and marked as containing
// a jmp.
- noway_assert((call->IsTailCallViaHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
- (call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && (compCurBB->bbFlags & BBF_HAS_JMP)));
+ noway_assert((call->IsTailCallViaHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
+ (call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
+ (compCurBB->bbFlags & BBF_HAS_JMP)));
}
else if (block != compCurBB)
{
@@ -14691,16 +14869,17 @@ void Compiler::fgMorphStmts(BasicBlock * block,
noway_assert(compTailCallUsed);
noway_assert((tree->gtOper == GT_CALL) && tree->AsCall()->IsTailCall());
- noway_assert(stmt->gtNext == NULL);
+ noway_assert(stmt->gtNext == nullptr);
GenTreeCall* call = morph->AsCall();
- // Could either be
+ // Could either be
// - a tail call dispatched via helper in which case block will be ending with BBJ_THROW or
// - a fast call made as jmp in which case block will be ending with BBJ_RETURN and marked as containing
// a jmp.
- noway_assert((call->IsTailCallViaHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
- (call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && (compCurBB->bbFlags & BBF_HAS_JMP)));
+ noway_assert((call->IsTailCallViaHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
+ (call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
+ (compCurBB->bbFlags & BBF_HAS_JMP)));
}
#ifdef DEBUG
@@ -14752,39 +14931,46 @@ void Compiler::fgMorphStmts(BasicBlock * block,
noway_assert(fgPtrArgCntCur == 0);
if (fgRemoveRestOfBlock)
+ {
continue;
+ }
/* Has the statement been optimized away */
if (fgCheckRemoveStmt(block, stmt))
+ {
continue;
+ }
/* Check if this block ends with a conditional branch that can be folded */
if (fgFoldConditional(block))
+ {
continue;
+ }
- if (ehBlockHasExnFlowDsc(block))
+ if (ehBlockHasExnFlowDsc(block))
+ {
continue;
+ }
#if OPT_MULT_ADDSUB
/* Note whether we have two or more +=/-= operators in a row */
- if (tree->gtOper == GT_ASG_ADD ||
- tree->gtOper == GT_ASG_SUB)
+ if (tree->gtOper == GT_ASG_ADD || tree->gtOper == GT_ASG_SUB)
{
- if (prev && prev->gtOper == tree->gtOper)
+ if (prev && prev->gtOper == tree->gtOper)
+ {
*mult = true;
+ }
}
#endif
/* Note "x = a[i] & icon" followed by "x |= a[i] << 8" */
- if (tree->gtOper == GT_ASG_OR &&
- prev &&
- prev->gtOper == GT_ASG)
+ if (tree->gtOper == GT_ASG_OR && prev && prev->gtOper == GT_ASG)
{
*loadw = true;
}
@@ -14794,14 +14980,16 @@ void Compiler::fgMorphStmts(BasicBlock * block,
{
if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH))
{
- GenTreePtr first = block->bbTreeList; noway_assert(first);
- GenTreePtr last = first->gtPrev; noway_assert(last && last->gtNext == NULL);
+ GenTreePtr first = block->bbTreeList;
+ noway_assert(first);
+ GenTreePtr last = first->gtPrev;
+ noway_assert(last && last->gtNext == nullptr);
GenTreePtr lastStmt = last->gtStmt.gtStmtExpr;
- if (((block->bbJumpKind == BBJ_COND ) && (lastStmt->gtOper == GT_JTRUE )) ||
- ((block->bbJumpKind == BBJ_SWITCH) && (lastStmt->gtOper == GT_SWITCH)) )
+ if (((block->bbJumpKind == BBJ_COND) && (lastStmt->gtOper == GT_JTRUE)) ||
+ ((block->bbJumpKind == BBJ_SWITCH) && (lastStmt->gtOper == GT_SWITCH)))
{
- GenTreePtr op1 = lastStmt->gtOp.gtOp1;
+ GenTreePtr op1 = lastStmt->gtOp.gtOp1;
if (op1->OperKind() & GTK_RELOP)
{
@@ -14842,18 +15030,20 @@ void Compiler::fgMorphStmts(BasicBlock * block,
* This function should be called just once.
*/
-void Compiler::fgMorphBlocks()
+void Compiler::fgMorphBlocks()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgMorphBlocks()\n");
+ }
#endif
/* Since fgMorphTree can be called after various optimizations to re-arrange
* the nodes we need a global flag to signal if we are during the one-pass
* global morphing */
- fgGlobalMorph = true;
+ fgGlobalMorph = true;
#if LOCAL_ASSERTION_PROP
//
@@ -14882,7 +15072,8 @@ void Compiler::fgMorphBlocks()
* Process all basic blocks in the function
*/
- BasicBlock * block = fgFirstBB; noway_assert(block);
+ BasicBlock* block = fgFirstBB;
+ noway_assert(block);
#ifdef DEBUG
compCurStmtNum = 0;
@@ -14891,18 +15082,20 @@ void Compiler::fgMorphBlocks()
do
{
#if OPT_MULT_ADDSUB
- bool mult = false;
+ bool mult = false;
#endif
#if OPT_BOOL_OPS
- bool lnot = false;
+ bool lnot = false;
#endif
- bool loadw = false;
+ bool loadw = false;
#ifdef DEBUG
if (verbose)
+ {
printf("\nMorphing BB%02u of '%s'\n", block->bbNum, info.compFullName);
+ }
#endif
#if LOCAL_ASSERTION_PROP
@@ -14919,77 +15112,95 @@ void Compiler::fgMorphBlocks()
/* Process all statement trees in the basic block */
- GenTreePtr tree;
+ GenTreePtr tree;
fgMorphStmts(block, &mult, &lnot, &loadw);
#if OPT_MULT_ADDSUB
- if (mult && (opts.compFlags & CLFLG_TREETRANS) &&
- !opts.compDbgCode && !opts.MinOpts())
+ if (mult && (opts.compFlags & CLFLG_TREETRANS) && !opts.compDbgCode && !opts.MinOpts())
{
for (tree = block->bbTreeList; tree; tree = tree->gtNext)
{
noway_assert(tree->gtOper == GT_STMT);
GenTreePtr last = tree->gtStmt.gtStmtExpr;
- if (last->gtOper == GT_ASG_ADD ||
- last->gtOper == GT_ASG_SUB)
+ if (last->gtOper == GT_ASG_ADD || last->gtOper == GT_ASG_SUB)
{
- GenTreePtr temp;
- GenTreePtr next;
+ GenTreePtr temp;
+ GenTreePtr next;
- GenTreePtr dst1 = last->gtOp.gtOp1;
- GenTreePtr src1 = last->gtOp.gtOp2;
+ GenTreePtr dst1 = last->gtOp.gtOp1;
+ GenTreePtr src1 = last->gtOp.gtOp2;
- if (!last->IsCnsIntOrI())
+ if (!last->IsCnsIntOrI())
+ {
goto NOT_CAFFE;
+ }
- if (dst1->gtOper != GT_LCL_VAR)
+ if (dst1->gtOper != GT_LCL_VAR)
+ {
goto NOT_CAFFE;
- if (!src1->IsCnsIntOrI())
+ }
+ if (!src1->IsCnsIntOrI())
+ {
goto NOT_CAFFE;
+ }
for (;;)
{
- GenTreePtr dst2;
- GenTreePtr src2;
+ GenTreePtr dst2;
+ GenTreePtr src2;
/* Look at the next statement */
temp = tree->gtNext;
- if (!temp)
+ if (!temp)
+ {
goto NOT_CAFFE;
+ }
noway_assert(temp->gtOper == GT_STMT);
next = temp->gtStmt.gtStmtExpr;
- if (next->gtOper != last->gtOper)
+ if (next->gtOper != last->gtOper)
+ {
goto NOT_CAFFE;
- if (next->gtType != last->gtType)
+ }
+ if (next->gtType != last->gtType)
+ {
goto NOT_CAFFE;
+ }
dst2 = next->gtOp.gtOp1;
src2 = next->gtOp.gtOp2;
- if (dst2->gtOper != GT_LCL_VAR)
+ if (dst2->gtOper != GT_LCL_VAR)
+ {
goto NOT_CAFFE;
- if (dst2->gtLclVarCommon.gtLclNum != dst1->gtLclVarCommon.gtLclNum)
+ }
+ if (dst2->gtLclVarCommon.gtLclNum != dst1->gtLclVarCommon.gtLclNum)
+ {
goto NOT_CAFFE;
+ }
- if (!src2->IsCnsIntOrI())
+ if (!src2->IsCnsIntOrI())
+ {
goto NOT_CAFFE;
+ }
- if (last->gtOverflow() != next->gtOverflow())
+ if (last->gtOverflow() != next->gtOverflow())
+ {
goto NOT_CAFFE;
+ }
- const ssize_t i1 = src1->gtIntCon.gtIconVal;
- const ssize_t i2 = src2->gtIntCon.gtIconVal;
+ const ssize_t i1 = src1->gtIntCon.gtIconVal;
+ const ssize_t i2 = src2->gtIntCon.gtIconVal;
const ssize_t itemp = i1 + i2;
/* if the operators are checking for overflow, check for overflow of the operands */
- if (next->gtOverflow())
+ if (next->gtOverflow())
{
if (next->TypeGet() == TYP_LONG)
{
@@ -14997,26 +15208,34 @@ void Compiler::fgMorphBlocks()
{
ClrSafeInt<UINT64> si1(i1);
if ((si1 + ClrSafeInt<UINT64>(i2)).IsOverflow())
+ {
goto NOT_CAFFE;
+ }
}
else
{
ClrSafeInt<INT64> si1(i1);
if ((si1 + ClrSafeInt<INT64>(i2)).IsOverflow())
+ {
goto NOT_CAFFE;
+ }
}
}
else if (next->gtFlags & GTF_UNSIGNED)
{
ClrSafeInt<UINT32> si1(i1);
if ((si1 + ClrSafeInt<UINT32>(i2)).IsOverflow())
+ {
goto NOT_CAFFE;
+ }
}
else
{
ClrSafeInt<INT32> si1(i1);
if ((si1 + ClrSafeInt<INT32>(i2)).IsOverflow())
+ {
goto NOT_CAFFE;
+ }
}
}
@@ -15035,7 +15254,7 @@ void Compiler::fgMorphBlocks()
noway_assert(tree->gtNext == temp);
noway_assert(temp->gtPrev == tree);
- if (temp->gtNext)
+ if (temp->gtNext)
{
noway_assert(temp->gtNext->gtPrev == temp);
@@ -15044,7 +15263,7 @@ void Compiler::fgMorphBlocks()
}
else
{
- tree->gtNext = 0;
+ tree->gtNext = nullptr;
noway_assert(block->bbTreeList->gtPrev == temp);
@@ -15054,9 +15273,7 @@ void Compiler::fgMorphBlocks()
}
NOT_CAFFE:;
-
}
-
}
#endif
@@ -15065,12 +15282,10 @@ void Compiler::fgMorphBlocks()
if (block->bbJumpKind == BBJ_RETURN)
{
- if ((genReturnBB != nullptr) &&
- (genReturnBB != block) &&
- ((block->bbFlags & BBF_HAS_JMP) == 0))
- {
- /* We'll jump to the genReturnBB */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ if ((genReturnBB != nullptr) && (genReturnBB != block) && ((block->bbFlags & BBF_HAS_JMP) == 0))
+ {
+ /* We'll jump to the genReturnBB */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(_TARGET_X86_)
if (info.compFlags & CORINFO_FLG_SYNCH)
@@ -15093,13 +15308,13 @@ void Compiler::fgMorphBlocks()
// It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC.
// For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal
// is BAD_VAR_NUM.
- //
+ //
// TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN.
GenTreePtr last = (block->bbTreeList != nullptr) ? block->bbTreeList->gtPrev : nullptr;
- GenTreePtr ret = (last != nullptr) ? last->gtStmt.gtStmtExpr : nullptr;
-
- //replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
+ GenTreePtr ret = (last != nullptr) ? last->gtStmt.gtStmtExpr : nullptr;
+
+ // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
if (genReturnLocal != BAD_VAR_NUM)
{
// Method must be returning a value other than TYP_VOID.
@@ -15108,10 +15323,11 @@ void Compiler::fgMorphBlocks()
// This block must be ending with a GT_RETURN
noway_assert(last != nullptr);
noway_assert(last->gtOper == GT_STMT);
- noway_assert(last->gtNext == nullptr);
+ noway_assert(last->gtNext == nullptr);
noway_assert(ret != nullptr);
- // GT_RETURN must have non-null operand as the method is returning the value assigned to genReturnLocal
+ // GT_RETURN must have non-null operand as the method is returning the value assigned to
+ // genReturnLocal
noway_assert(ret->OperGet() == GT_RETURN);
noway_assert(ret->gtGetOp1() != nullptr);
noway_assert(ret->gtGetOp2() == nullptr);
@@ -15120,7 +15336,7 @@ void Compiler::fgMorphBlocks()
last->gtStmt.gtStmtExpr = (tree->OperIsCopyBlkOp()) ? fgMorphCopyBlock(tree) : tree;
- //make sure that copy-prop ignores this assignment.
+ // make sure that copy-prop ignores this assignment.
last->gtStmt.gtStmtExpr->gtFlags |= GTF_DONT_CSE;
}
else if (ret != nullptr && ret->OperGet() == GT_RETURN)
@@ -15129,7 +15345,7 @@ void Compiler::fgMorphBlocks()
noway_assert(last != nullptr);
noway_assert(last->gtOper == GT_STMT);
noway_assert(last->gtNext == nullptr);
-
+
// Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn block
noway_assert(ret->TypeGet() == TYP_VOID);
noway_assert(ret->gtGetOp1() == nullptr);
@@ -15141,42 +15357,38 @@ void Compiler::fgMorphBlocks()
#ifdef DEBUG
if (verbose)
{
- printf("morph BB%02u to point at onereturn. New block is\n",
- block->bbNum);
+ printf("morph BB%02u to point at onereturn. New block is\n", block->bbNum);
fgTableDispBasicBlock(block);
}
#endif
- }
+ }
}
- block = block->bbNext;
- }
- while (block);
+ block = block->bbNext;
+ } while (block);
/* We are done with the global morphing phase */
- fgGlobalMorph = false;
-
+ fgGlobalMorph = false;
#ifdef DEBUG
- if (verboseTrees)
+ if (verboseTrees)
+ {
fgDispBasicBlocks(true);
+ }
#endif
-
}
-
/*****************************************************************************
*
* Make some decisions about the kind of code to generate.
*/
-void Compiler::fgSetOptions()
+void Compiler::fgSetOptions()
{
#ifdef DEBUG
/* Should we force fully interruptible code ? */
- if (JitConfig.JitFullyInt() ||
- compStressCompile(STRESS_GENERIC_VARN, 30))
+ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30))
{
noway_assert(!codeGen->isGCTypeFixed());
genInterruptible = true;
@@ -15187,19 +15399,20 @@ void Compiler::fgSetOptions()
if (opts.compDbgCode)
{
assert(!codeGen->isGCTypeFixed());
- genInterruptible = true; // debugging is easier this way ...
+ genInterruptible = true; // debugging is easier this way ...
}
#endif
/* Assume we won't need an explicit stack frame if this is allowed */
-
// CORINFO_HELP_TAILCALL won't work with localloc because of the restoring of
// the callee-saved registers.
noway_assert(!compTailCallUsed || !compLocallocUsed);
if (compLocallocUsed)
+ {
codeGen->setFramePointerRequired(true);
+ }
#ifdef _TARGET_X86_
@@ -15209,7 +15422,9 @@ void Compiler::fgSetOptions()
#endif // _TARGET_X86_
if (!opts.genFPopt)
+ {
codeGen->setFramePointerRequired(true);
+ }
// Assert that the EH table has been initialized by now. Note that
// compHndBBtabAllocCount never decreases; it is a high-water mark
@@ -15235,7 +15450,9 @@ void Compiler::fgSetOptions()
#else // !_TARGET_X86_
if (compHndBBtabCount > 0)
+ {
codeGen->setFramePointerRequiredEH(true);
+ }
#endif // _TARGET_X86_
@@ -15248,7 +15465,10 @@ void Compiler::fgSetOptions()
{
#ifdef DEBUG
if (verbose)
- printf("Too many pushed arguments for fully interruptible encoding, marking method as partially interruptible\n");
+ {
+ printf("Too many pushed arguments for fully interruptible encoding, marking method as partially "
+ "interruptible\n");
+ }
#endif
genInterruptible = false;
}
@@ -15256,14 +15476,16 @@ void Compiler::fgSetOptions()
{
#ifdef DEBUG
if (verbose)
+ {
printf("Too many pushed arguments for an ESP based encoding, forcing an EBP frame\n");
+ }
#endif
codeGen->setFramePointerRequiredGCInfo(true);
}
if (info.compCallUnmanaged)
{
- codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
+ codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
}
if (info.compPublishStubParam)
@@ -15271,7 +15493,7 @@ void Compiler::fgSetOptions()
codeGen->setFramePointerRequiredGCInfo(true);
}
- if (opts.compNeedSecurityCheck)
+ if (opts.compNeedSecurityCheck)
{
codeGen->setFramePointerRequiredGCInfo(true);
@@ -15306,10 +15528,9 @@ void Compiler::fgSetOptions()
// printf("method will %s be fully interruptible\n", genInterruptible ? " " : "not");
}
-
/*****************************************************************************/
-GenTreePtr Compiler::fgInitThisClass()
+GenTreePtr Compiler::fgInitThisClass()
{
noway_assert(!compIsForInlining());
@@ -15328,45 +15549,39 @@ GenTreePtr Compiler::fgInitThisClass()
switch (kind.runtimeLookupKind)
{
- case CORINFO_LOOKUP_THISOBJ :
- // This code takes a this pointer; but we need to pass the static method desc to get the right point in the hierarchy
- {
- GenTreePtr vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
- // Vtable pointer of this object
- vtTree = gtNewOperNode(GT_IND, TYP_I_IMPL, vtTree);
- vtTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
- GenTreePtr methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
-
- return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS,
- TYP_VOID, 0,
- gtNewArgList(vtTree, methodHnd));
-
- }
+ case CORINFO_LOOKUP_THISOBJ:
+ // This code takes a this pointer; but we need to pass the static method desc to get the right point in
+ // the hierarchy
+ {
+ GenTreePtr vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
+ // Vtable pointer of this object
+ vtTree = gtNewOperNode(GT_IND, TYP_I_IMPL, vtTree);
+ vtTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
+ GenTreePtr methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
+
+ return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, 0,
+ gtNewArgList(vtTree, methodHnd));
+ }
- case CORINFO_LOOKUP_CLASSPARAM :
- {
- GenTreePtr vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
- return gtNewHelperCallNode(CORINFO_HELP_INITCLASS,
- TYP_VOID, 0,
- gtNewArgList(vtTree));
- }
+ case CORINFO_LOOKUP_CLASSPARAM:
+ {
+ GenTreePtr vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
+ return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(vtTree));
+ }
- case CORINFO_LOOKUP_METHODPARAM :
- {
- GenTreePtr methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
- return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS,
- TYP_VOID, 0,
- gtNewArgList(gtNewIconNode(0),methHndTree));
- }
+ case CORINFO_LOOKUP_METHODPARAM:
+ {
+ GenTreePtr methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
+ return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, 0,
+ gtNewArgList(gtNewIconNode(0), methHndTree));
+ }
}
-
}
noway_assert(!"Unknown LOOKUP_KIND");
UNREACHABLE();
}
-
#ifdef DEBUG
/*****************************************************************************
*
@@ -15387,7 +15602,7 @@ void Compiler::fgCheckQmarkAllowedForm(GenTree* tree)
assert(tree->OperGet() == GT_QMARK);
#ifndef LEGACY_BACKEND
assert(!"Qmarks beyond morph disallowed.");
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
GenTreePtr colon = tree->gtOp.gtOp2;
assert(colon->gtOp.gtOp1->IsIntegralConst(0));
@@ -15414,15 +15629,15 @@ void Compiler::fgPreExpandQmarkChecks(GenTreePtr expr)
// If the top level Qmark is null, then scan the tree to make sure
// there are no qmarks within it.
- if (topQmark == NULL)
+ if (topQmark == nullptr)
{
- fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, NULL);
+ fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
else
{
// We could probably expand the cond node also, but don't think the extra effort is necessary,
// so let's just assert the cond node of a top level qmark doesn't have further top level qmarks.
- fgWalkTreePre(&topQmark->gtOp.gtOp1, Compiler::fgAssertNoQmark, NULL);
+ fgWalkTreePre(&topQmark->gtOp.gtOp1, Compiler::fgAssertNoQmark, nullptr);
fgPreExpandQmarkChecks(topQmark->gtOp.gtOp2->gtOp.gtOp1);
fgPreExpandQmarkChecks(topQmark->gtOp.gtOp2->gtOp.gtOp2);
@@ -15439,22 +15654,20 @@ void Compiler::fgPreExpandQmarkChecks(GenTreePtr expr)
*/
GenTreePtr Compiler::fgGetTopLevelQmark(GenTreePtr expr, GenTreePtr* ppDst /* = NULL */)
{
- if (ppDst != NULL)
+ if (ppDst != nullptr)
{
- *ppDst = NULL;
+ *ppDst = nullptr;
}
- GenTreePtr topQmark = NULL;
+ GenTreePtr topQmark = nullptr;
if (expr->gtOper == GT_QMARK)
{
topQmark = expr;
}
- else if (expr->gtOper == GT_ASG &&
- expr->gtOp.gtOp2->gtOper == GT_QMARK &&
- expr->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ else if (expr->gtOper == GT_ASG && expr->gtOp.gtOp2->gtOper == GT_QMARK && expr->gtOp.gtOp1->gtOper == GT_LCL_VAR)
{
topQmark = expr->gtOp.gtOp2;
- if (ppDst != NULL)
+ if (ppDst != nullptr)
{
*ppDst = expr->gtOp.gtOp1;
}
@@ -15462,7 +15675,6 @@ GenTreePtr Compiler::fgGetTopLevelQmark(GenTreePtr expr, GenTreePtr* ppDst /* =
return topQmark;
}
-
/*********************************************************************************
*
* For a castclass helper call,
@@ -15505,7 +15717,7 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
- GenTreePtr dst = nullptr;
+ GenTreePtr dst = nullptr;
GenTreePtr qmark = fgGetTopLevelQmark(expr, &dst);
noway_assert(dst != nullptr);
@@ -15536,8 +15748,8 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
// This is a rare case that arises when we are doing minopts and encounter isinst of null
// gtFoldExpr was still is able to optimize away part of the tree (but not all).
// That means it does not match our pattern.
-
- // Rather than write code to handle this case, just fake up some nodes to make it match the common
+
+ // Rather than write code to handle this case, just fake up some nodes to make it match the common
// case. Synthesize a comparison that is always true, and for the result-on-true, use the
// entire subtree we expected to be the nested question op.
@@ -15548,8 +15760,8 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
assert(false2Expr->OperGet() == trueExpr->OperGet());
// Clear flags as they are now going to be part of JTRUE.
- assert(condExpr->gtFlags & GTF_RELOP_QMARK);
- condExpr->gtFlags &= ~GTF_RELOP_QMARK;
+ assert(condExpr->gtFlags & GTF_RELOP_QMARK);
+ condExpr->gtFlags &= ~GTF_RELOP_QMARK;
// Create the chain of blocks. See method header comment.
// The order of blocks after this is the following:
@@ -15559,14 +15771,14 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
- unsigned propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
+ unsigned propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
- BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
- BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
- BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
- BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
+ BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
+ BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
+ BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
+ BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
remainderBlock->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL | propagateFlags;
@@ -15575,13 +15787,13 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
helperBlock->bbFlags &= ~BBF_INTERNAL;
- cond2Block->bbFlags &= ~BBF_INTERNAL;
- cond1Block->bbFlags &= ~BBF_INTERNAL;
- asgBlock->bbFlags &= ~BBF_INTERNAL;
- helperBlock->bbFlags |= BBF_IMPORTED;
- cond2Block->bbFlags |= BBF_IMPORTED;
- cond1Block->bbFlags |= BBF_IMPORTED;
- asgBlock->bbFlags |= BBF_IMPORTED;
+ cond2Block->bbFlags &= ~BBF_INTERNAL;
+ cond1Block->bbFlags &= ~BBF_INTERNAL;
+ asgBlock->bbFlags &= ~BBF_INTERNAL;
+ helperBlock->bbFlags |= BBF_IMPORTED;
+ cond2Block->bbFlags |= BBF_IMPORTED;
+ cond1Block->bbFlags |= BBF_IMPORTED;
+ asgBlock->bbFlags |= BBF_IMPORTED;
}
// Chain the flow correctly.
@@ -15613,7 +15825,7 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, GenTreePtr stmt)
fgInsertStmtAtEnd(cond2Block, jmpStmt);
// AsgBlock should get tmp = op1 assignment.
- trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
+ trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
GenTreePtr trueStmt = fgNewStmtFromTree(trueExpr, stmt->gtStmt.gtStmtILoffsx);
fgInsertStmtAtEnd(asgBlock, trueStmt);
@@ -15690,7 +15902,7 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, GenTreePtr stmt)
GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
// Retrieve the Qmark node to be expanded.
- GenTreePtr dst = nullptr;
+ GenTreePtr dst = nullptr;
GenTreePtr qmark = fgGetTopLevelQmark(expr, &dst);
if (qmark == nullptr)
{
@@ -15721,7 +15933,7 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, GenTreePtr stmt)
assert(!varTypeIsFloating(condExpr->TypeGet()));
- bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
+ bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP);
assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark!
@@ -15732,12 +15944,12 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, GenTreePtr stmt)
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
- unsigned propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
+ unsigned propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
- BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
- BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
+ BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
+ BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
@@ -15745,8 +15957,8 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, GenTreePtr stmt)
{
condBlock->bbFlags &= ~BBF_INTERNAL;
elseBlock->bbFlags &= ~BBF_INTERNAL;
- condBlock->bbFlags |= BBF_IMPORTED;
- elseBlock->bbFlags |= BBF_IMPORTED;
+ condBlock->bbFlags |= BBF_IMPORTED;
+ elseBlock->bbFlags |= BBF_IMPORTED;
}
remainderBlock->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL | propagateFlags;
@@ -15771,12 +15983,12 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, GenTreePtr stmt)
gtReverseCond(condExpr);
condBlock->bbJumpDest = elseBlock;
- thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
+ thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->bbJumpDest = remainderBlock;
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
thenBlock->bbFlags &= ~BBF_INTERNAL;
- thenBlock->bbFlags |= BBF_IMPORTED;
+ thenBlock->bbFlags |= BBF_IMPORTED;
}
elseBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
@@ -15910,7 +16122,7 @@ void Compiler::fgPostExpandQmarkChecks()
for (GenTreePtr stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
- fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, NULL);
+ fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
}
@@ -15921,23 +16133,28 @@ void Compiler::fgPostExpandQmarkChecks()
* Transform all basic blocks for codegen.
*/
-void Compiler::fgMorph()
+void Compiler::fgMorph()
{
noway_assert(!compIsForInlining()); // Inlinee's compiler should never reach here.
fgOutgoingArgTemps = nullptr;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgMorph()\n");
- if (verboseTrees)
+ }
+ if (verboseTrees)
+ {
fgDispBasicBlocks(true);
+ }
#endif // DEBUG
// Insert call to class constructor as the first basic block if
// we were asked to do so.
- if (info.compCompHnd->initClass(NULL /* field */, info.compMethodHnd /* method */,
- impTokenLookupContextHandle /* context */) & CORINFO_INITCLASS_USE_HELPER)
+ if (info.compCompHnd->initClass(nullptr /* field */, info.compMethodHnd /* method */,
+ impTokenLookupContextHandle /* context */) &
+ CORINFO_INITCLASS_USE_HELPER)
{
fgEnsureFirstBBisScratch();
fgInsertStmtAtBeg(fgFirstBB, fgInitThisClass());
@@ -15951,9 +16168,9 @@ void Compiler::fgMorph()
if (lvaTable[i].TypeGet() == TYP_REF)
{
// confirm that the argument is a GC pointer (for debugging (GC stress))
- GenTreePtr op = gtNewLclvNode(i, TYP_REF);
+ GenTreePtr op = gtNewLclvNode(i, TYP_REF);
GenTreeArgList* args = gtNewArgList(op);
- op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, 0, args);
+ op = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_VOID, 0, args);
fgEnsureFirstBBisScratch();
fgInsertStmtAtEnd(fgFirstBB, op);
@@ -15963,13 +16180,13 @@ void Compiler::fgMorph()
if (opts.compStackCheckOnRet)
{
- lvaReturnEspCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnEspCheck"));
+ lvaReturnEspCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("ReturnEspCheck"));
lvaTable[lvaReturnEspCheck].lvType = TYP_INT;
}
if (opts.compStackCheckOnCall)
{
- lvaCallEspCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallEspCheck"));
+ lvaCallEspCheck = lvaGrabTempWithImplicitUse(false DEBUGARG("CallEspCheck"));
lvaTable[lvaCallEspCheck].lvType = TYP_INT;
}
#endif // DEBUG
@@ -15998,7 +16215,7 @@ void Compiler::fgMorph()
DBEXEC(VERBOSE, fgDispBasicBlocks(true));
#endif
- RecordStateAtEndOfInlining(); // Record "start" values for post-inlining cycles and elapsed time.
+ RecordStateAtEndOfInlining(); // Record "start" values for post-inlining cycles and elapsed time.
#ifdef DEBUG
/* Inliner could add basic blocks. Check that the flowgraph data is up-to-date */
@@ -16036,27 +16253,32 @@ void Compiler::fgMorph()
fgExpandQmarkNodes();
#ifdef DEBUG
- compCurBB = 0;
+ compCurBB = nullptr;
#endif // DEBUG
}
-
/*****************************************************************************
*
* Promoting struct locals
*/
-void Compiler::fgPromoteStructs()
+void Compiler::fgPromoteStructs()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In fgPromoteStructs()\n");
+ }
#endif // DEBUG
if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE))
+ {
return;
+ }
if (fgNoStructPromotion)
+ {
return;
+ }
#if 0
// The code in this #if has been useful in debugging struct promotion issues, by
@@ -16090,29 +16312,31 @@ void Compiler::fgPromoteStructs()
#endif // 0
if (info.compIsVarArgs)
+ {
return;
+ }
if (getNeedsGSSecurityCookie())
+ {
return;
+ }
// The lvaTable might grow as we grab temps. Make a local copy here.
- unsigned startLvaCount = lvaCount;
+ unsigned startLvaCount = lvaCount;
//
// Loop through the original lvaTable. Looking for struct locals to be promoted.
//
lvaStructPromotionInfo structPromotionInfo;
- bool tooManyLocals = false;
+ bool tooManyLocals = false;
- for (unsigned lclNum = 0;
- lclNum < startLvaCount;
- lclNum++)
+ for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
// Whether this var got promoted
- bool promotedVar = false;
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ bool promotedVar = false;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
#ifdef FEATURE_SIMD
if (varDsc->lvSIMDType && varDsc->lvUsedInSIMDIntrinsic)
@@ -16122,9 +16346,9 @@ void Compiler::fgPromoteStructs()
varDsc->lvRegStruct = true;
}
else
-#endif //FEATURE_SIMD
- // Don't promote if we have reached the tracking limit.
- if (lvaHaveManyLocals())
+#endif // FEATURE_SIMD
+ // Don't promote if we have reached the tracking limit.
+ if (lvaHaveManyLocals())
{
// Print the message first time when we detected this condition
if (!tooManyLocals)
@@ -16146,7 +16370,7 @@ void Compiler::fgPromoteStructs()
else if (varTypeIsStruct(varDsc))
{
lvaCanPromoteStructVar(lclNum, &structPromotionInfo);
- bool canPromote = structPromotionInfo.canPromote;
+ bool canPromote = structPromotionInfo.canPromote;
// We start off with shouldPromote same as canPromote.
// Based on further profitablity checks done below, shouldPromote
@@ -16155,7 +16379,7 @@ void Compiler::fgPromoteStructs()
if (canPromote)
{
-
+
// We *can* promote; *should* we promote?
// We should only do so if promotion has potential savings. One source of savings
// is if a field of the struct is accessed, since this access will be turned into
@@ -16163,12 +16387,12 @@ void Compiler::fgPromoteStructs()
// field accesses, but only block-level operations on the whole struct, if the struct
// has only one or two fields, then doing those block operations field-wise is probably faster
// than doing a whole-variable block operation (e.g., a hardware "copy loop" on x86).
- // So if no fields are accessed independently, and there are three or more fields,
+ // So if no fields are accessed independently, and there are three or more fields,
// then do not promote.
if (structPromotionInfo.fieldCnt > 2 && !varDsc->lvFieldAccessed)
{
- JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n",
- lclNum, structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed);
+ JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n", lclNum,
+ structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed);
shouldPromote = false;
}
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
@@ -16177,13 +16401,14 @@ void Compiler::fgPromoteStructs()
// TODO-LSRA - Currently doesn't support the passing of floating point LCL_VARS in the integer registers
//
// For now we currently don't promote structs with a single float field
- // Promoting it can cause us to shuffle it back and forth between the int and
+ // Promoting it can cause us to shuffle it back and forth between the int and
// the float regs when it is used as a argument, which is very expensive for XARCH
//
else if ((structPromotionInfo.fieldCnt == 1) &&
- varTypeIsFloating(structPromotionInfo.fields[0].fldType))
+ varTypeIsFloating(structPromotionInfo.fields[0].fldType))
{
- JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with single float field.\n",
+ JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with "
+ "single float field.\n",
lclNum, structPromotionInfo.fieldCnt);
shouldPromote = false;
}
@@ -16196,42 +16421,45 @@ void Compiler::fgPromoteStructs()
//
else if (lvaIsMultiregStruct(varDsc))
{
- JITDUMP("Not promoting promotable multireg struct local V%02u (size==%d): ",
- lclNum, lvaLclExactSize(lclNum));
+ JITDUMP("Not promoting promotable multireg struct local V%02u (size==%d): ", lclNum,
+ lvaLclExactSize(lclNum));
shouldPromote = false;
}
#endif // _TARGET_ARM64_
#endif // !FEATURE_MULTIREG_STRUCT_PROMOTE
else if (varDsc->lvIsParam)
{
-#if FEATURE_MULTIREG_STRUCT_PROMOTE
- if (lvaIsMultiregStruct(varDsc) && // Is this a variable holding a value that is passed in multiple registers?
- (structPromotionInfo.fieldCnt != 2)) // Does it have exactly two fields
+#if FEATURE_MULTIREG_STRUCT_PROMOTE
+ if (lvaIsMultiregStruct(
+ varDsc) && // Is this a variable holding a value that is passed in multiple registers?
+ (structPromotionInfo.fieldCnt != 2)) // Does it have exactly two fields
{
- JITDUMP("Not promoting multireg struct local V%02u, because lvIsParam is true and #fields != 2\n",
- lclNum);
+ JITDUMP(
+ "Not promoting multireg struct local V%02u, because lvIsParam is true and #fields != 2\n",
+ lclNum);
shouldPromote = false;
}
else
-#endif // !FEATURE_MULTIREG_STRUCT_PROMOTE
+#endif // !FEATURE_MULTIREG_STRUCT_PROMOTE
- // TODO-PERF - Implement struct promotion for incoming multireg structs
- // Currently it hits assert(lvFieldCnt==1) in lclvar.cpp line 4417
+ // TODO-PERF - Implement struct promotion for incoming multireg structs
+ // Currently it hits assert(lvFieldCnt==1) in lclvar.cpp line 4417
- if (structPromotionInfo.fieldCnt != 1)
+ if (structPromotionInfo.fieldCnt != 1)
{
- JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = %d.\n",
+ JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = "
+ "%d.\n",
lclNum, structPromotionInfo.fieldCnt);
shouldPromote = false;
}
}
- //
+ //
// If the lvRefCnt is zero and we have a struct promoted parameter we can end up with an extra store of
// the the incoming register into the stack frame slot.
// In that case, we would like to avoid promortion.
// However we haven't yet computed the lvRefCnt values so we can't do that.
- //
+ //
CLANG_FORMAT_COMMENT_ANCHOR;
#if 0
@@ -16275,30 +16503,28 @@ void Compiler::fgPromoteStructs()
varDsc->lvRegStruct = true;
}
#endif // FEATURE_SIMD
-
}
}
-
-Compiler::fgWalkResult Compiler::fgMorphStructField(GenTreePtr tree, fgWalkData* fgWalkPre)
+Compiler::fgWalkResult Compiler::fgMorphStructField(GenTreePtr tree, fgWalkData* fgWalkPre)
{
noway_assert(tree->OperGet() == GT_FIELD);
noway_assert(tree->gtFlags & GTF_GLOB_REF);
- GenTreePtr objRef = tree->gtField.gtFldObj;
+ GenTreePtr objRef = tree->gtField.gtFldObj;
/* Is this an instance data member? */
- if (objRef)
+ if (objRef)
{
if (objRef->gtOper == GT_ADDR)
{
- GenTreePtr obj = objRef->gtOp.gtOp1;
+ GenTreePtr obj = objRef->gtOp.gtOp1;
if (obj->gtOper == GT_LCL_VAR)
{
- unsigned lclNum = obj->gtLclVarCommon.gtLclNum;
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ unsigned lclNum = obj->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varTypeIsStruct(obj))
{
@@ -16311,12 +16537,12 @@ Compiler::fgWalkResult Compiler::fgMorphStructField(GenTreePtr tree, fgWalk
tree->SetOper(GT_LCL_VAR);
tree->gtLclVarCommon.SetLclNum(fieldLclIndex);
- tree->gtType = lvaTable[fieldLclIndex].TypeGet();
+ tree->gtType = lvaTable[fieldLclIndex].TypeGet();
tree->gtFlags &= GTF_NODE_MASK;
tree->gtFlags &= ~GTF_GLOB_REF;
GenTreePtr parent = fgWalkPre->parentStack->Index(1);
- if ((parent->gtOper == GT_ASG) && (parent->gtOp.gtOp1 == tree))
+ if ((parent->gtOper == GT_ASG) && (parent->gtOp.gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
@@ -16393,19 +16619,19 @@ Compiler::fgWalkResult Compiler::fgMorphStructField(GenTreePtr tree, fgWalk
return WALK_CONTINUE;
}
-Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTreePtr tree, fgWalkData* fgWalkPre)
+Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTreePtr tree, fgWalkData* fgWalkPre)
{
noway_assert(tree->OperGet() == GT_LCL_FLD);
- unsigned lclNum = tree->gtLclFld.gtLclNum;
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ unsigned lclNum = tree->gtLclFld.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (varTypeIsStruct(varDsc) && (varDsc->lvPromoted))
{
// Promoted struct
- unsigned fldOffset = tree->gtLclFld.gtLclOffs;
- unsigned fieldLclIndex = 0;
- LclVarDsc* fldVarDsc = NULL;
+ unsigned fldOffset = tree->gtLclFld.gtLclOffs;
+ unsigned fieldLclIndex = 0;
+ LclVarDsc* fldVarDsc = nullptr;
if (fldOffset != BAD_VAR_NUM)
{
@@ -16418,7 +16644,7 @@ Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTreePtr tree, fgWalkD
#ifdef _TARGET_X86_
&& varTypeIsFloating(fldVarDsc->TypeGet()) == varTypeIsFloating(tree->gtType)
#endif
- )
+ )
{
// There is an existing sub-field we can use
tree->gtLclFld.SetLclNum(fieldLclIndex);
@@ -16447,7 +16673,7 @@ Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTreePtr tree, fgWalkD
}
GenTreePtr parent = fgWalkPre->parentStack->Index(1);
- if ((parent->gtOper == GT_ASG) && (parent->gtOp.gtOp1 == tree))
+ if ((parent->gtOper == GT_ASG) && (parent->gtOp.gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
@@ -16477,19 +16703,21 @@ Compiler::fgWalkResult Compiler::fgMorphLocalField(GenTreePtr tree, fgWalkD
* Mark irregular parameters. For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
* For ARM64, this is structs larger than 16 bytes that are also not HFAs that are passed by reference.
*/
-void Compiler::fgMarkImplicitByRefArgs()
+void Compiler::fgMarkImplicitByRefArgs()
{
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgMarkImplicitByRefs()\n");
+ }
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++)
{
LclVarDsc* varDsc = &lvaTable[lclNum];
- assert(!varDsc->lvPromoted); // Called in the wrong order?
+ assert(!varDsc->lvPromoted); // Called in the wrong order?
if (varDsc->lvIsParam && varTypeIsStruct(varDsc))
{
@@ -16502,10 +16730,9 @@ void Compiler::fgMarkImplicitByRefArgs()
else
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
- size = info.compCompHnd->getClassSize(typeHnd);
+ size = info.compCompHnd->getClassSize(typeHnd);
}
-
#if !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
#if defined(_TARGET_AMD64_)
if (size > REGSIZE_BYTES || (size & (size - 1)) != 0)
@@ -16521,14 +16748,14 @@ void Compiler::fgMarkImplicitByRefArgs()
varDsc->lvIsTemp = 1;
// Also marking them as BYREF will hide them from struct promotion.
- varDsc->lvType = TYP_BYREF;
+ varDsc->lvType = TYP_BYREF;
varDsc->lvRefCnt = 0;
// Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF
// make sure that the following flag is not set as these will force SSA to
// exclude tracking/enregistering these LclVars. (see fgExcludeFromSsa)
- //
- varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
+ //
+ varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
#ifdef DEBUG
// This should not be converted to a double in stress mode,
@@ -16553,7 +16780,7 @@ void Compiler::fgMarkImplicitByRefArgs()
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
-bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr *pTree, fgWalkData* fgWalkPre)
+bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr* pTree, fgWalkData* fgWalkPre)
{
#if !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_)
@@ -16562,13 +16789,12 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr *pTree, fgWalkData* fgWalkPre
#else // _TARGET_AMD64_ || _TARGET_ARM64_
GenTree* tree = *pTree;
- assert((tree->gtOper == GT_LCL_VAR) ||
- ((tree->gtOper == GT_ADDR) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)));
+ assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)));
- bool isAddr = (tree->gtOper == GT_ADDR);
- GenTreePtr lclVarTree = isAddr ? tree->gtOp.gtOp1 : tree;
- unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ bool isAddr = (tree->gtOper == GT_ADDR);
+ GenTreePtr lclVarTree = isAddr ? tree->gtOp.gtOp1 : tree;
+ unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
if (!lvaIsImplicitByRefLocal(lclNum))
{
@@ -16615,10 +16841,10 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr *pTree, fgWalkData* fgWalkPre
// possible-modified tree back to the caller, so we modify the original lclVar node in-place
// to the GT_IND.
var_types structType = tree->gtType;
- lclVarTree = gtClone(tree);
+ lclVarTree = gtClone(tree);
// Now, set the types appropriately.
lclVarTree->gtType = TYP_BYREF;
- tree->gtType = structType;
+ tree->gtType = structType;
// Now, "insert" the GT_IND by changing the oper of the original node and setting its op1.
tree->SetOper(GT_IND);
tree->gtOp.gtOp1 = lclVarTree;
@@ -16640,10 +16866,8 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr *pTree, fgWalkData* fgWalkPre
return true;
#endif // _TARGET_AMD64_ || _TARGET_ARM64_
-
}
-
// An "AddrExposedContext" expresses the calling context in which an address expression occurs.
enum AddrExposedContext
{
@@ -16666,21 +16890,19 @@ enum AddrExposedContext
typedef ArrayStack<AddrExposedContext> AXCStack;
// We use pre-post to simulate passing an argument in a recursion, via a stack.
-Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPostCB(GenTreePtr* pTree,
- fgWalkData* fgWalkPre)
+Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPostCB(GenTreePtr* pTree, fgWalkData* fgWalkPre)
{
AXCStack* axcStack = reinterpret_cast<AXCStack*>(fgWalkPre->pCallbackData);
(void)axcStack->Pop();
return WALK_CONTINUE;
}
-Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTree,
- fgWalkData* fgWalkPre)
+Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTree, fgWalkData* fgWalkPre)
{
- GenTreePtr tree = *pTree;
- Compiler* comp = fgWalkPre->compiler;
- AXCStack* axcStack = reinterpret_cast<AXCStack*>(fgWalkPre->pCallbackData);
- AddrExposedContext axc = axcStack->Top();
+ GenTreePtr tree = *pTree;
+ Compiler* comp = fgWalkPre->compiler;
+ AXCStack* axcStack = reinterpret_cast<AXCStack*>(fgWalkPre->pCallbackData);
+ AddrExposedContext axc = axcStack->Top();
// In some situations, we have to figure out what the effective context is in which to
// evaluate the current tree, depending on which argument position it is in its parent.
@@ -16689,8 +16911,8 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
// and "ind" context.
switch (axc)
{
- case AXC_InitBlk:
- case AXC_CopyBlk:
+ case AXC_InitBlk:
+ case AXC_CopyBlk:
{
// In both cases, the second argument is an integer struct size. That should have a "none" context.
// The first argument is a GT_LIST. For GT_COPYBLK, both args of the list are addresses
@@ -16702,8 +16924,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
// (byte) value to fill the block with. The second argument of the GT_INITBLK is also
// an integer, the block size.
GenTreePtr parent = fgWalkPre->parentStack->Index(1);
- if (parent->gtOp.gtOp2 == tree &&
- parent->OperIsBlkOp())
+ if (parent->gtOp.gtOp2 == tree && parent->OperIsBlkOp())
{
axc = AXC_None;
}
@@ -16718,7 +16939,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
// arguments, then we have to consider that storage location (indeed, it's underlying containing
// location) to be address taken. So get the width of the initblk or copyblk.
GenTreePtr widthNode = fgWalkPre->parentStack->Index(2)->gtOp.gtOp2;
- unsigned width = UINT_MAX; // If it's not a constant, assume it's maximally big.
+ unsigned width = UINT_MAX; // If it's not a constant, assume it's maximally big.
if (widthNode->IsCnsIntOrI())
{
if (widthNode->IsIconHandle())
@@ -16769,7 +16990,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
}
break;
- case AXC_IndAdd:
+ case AXC_IndAdd:
{
GenTreePtr parent = fgWalkPre->parentStack->Index(1);
assert(parent->OperGet() == GT_ADD);
@@ -16790,126 +17011,127 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
}
break;
- default:
- break;
+ default:
+ break;
}
// Now recurse properly for the tree.
switch (tree->gtOper)
{
- case GT_IND:
- case GT_OBJ:
- if (axc != AXC_Addr)
- {
- axcStack->Push(AXC_Ind);
- }
- else
- {
- axcStack->Push(AXC_None);
- }
- return WALK_CONTINUE;
-
- case GT_INITBLK:
- axcStack->Push(AXC_InitBlk);
- return WALK_CONTINUE;
+ case GT_IND:
+ case GT_OBJ:
+ if (axc != AXC_Addr)
+ {
+ axcStack->Push(AXC_Ind);
+ }
+ else
+ {
+ axcStack->Push(AXC_None);
+ }
+ return WALK_CONTINUE;
- case GT_COPYOBJ:
- case GT_COPYBLK:
- axcStack->Push(AXC_CopyBlk);
- return WALK_CONTINUE;
+ case GT_INITBLK:
+ axcStack->Push(AXC_InitBlk);
+ return WALK_CONTINUE;
- case GT_LIST:
- if (axc == AXC_InitBlk || axc == AXC_CopyBlk)
- {
- axcStack->Push(axc);
- }
- else
- {
- axcStack->Push(AXC_None);
- }
- return WALK_CONTINUE;
+ case GT_COPYOBJ:
+ case GT_COPYBLK:
+ axcStack->Push(AXC_CopyBlk);
+ return WALK_CONTINUE;
- case GT_INDEX:
- // Taking the address of an array element never takes the address of a local.
- axcStack->Push(AXC_None);
- return WALK_CONTINUE;
+ case GT_LIST:
+ if (axc == AXC_InitBlk || axc == AXC_CopyBlk)
+ {
+ axcStack->Push(axc);
+ }
+ else
+ {
+ axcStack->Push(AXC_None);
+ }
+ return WALK_CONTINUE;
- case GT_ADDR:
- // If we have ADDR(lcl), and "lcl" is an implicit byref parameter, fgMorphImplicitByRefArgs will
- // convert to just "lcl". This is never an address-context use, since the local is already a
- // byref after this transformation.
- if (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR && comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
- {
- // Push something to keep the PostCB, which will pop it, happy.
+ case GT_INDEX:
+ // Taking the address of an array element never takes the address of a local.
axcStack->Push(AXC_None);
- // In the first case, tree may no longer be a leaf, but we're done with it; is a leaf in the second case.
- return WALK_SKIP_SUBTREES;
- }
+ return WALK_CONTINUE;
+
+ case GT_ADDR:
+ // If we have ADDR(lcl), and "lcl" is an implicit byref parameter, fgMorphImplicitByRefArgs will
+ // convert to just "lcl". This is never an address-context use, since the local is already a
+ // byref after this transformation.
+ if (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR && comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
+ {
+ // Push something to keep the PostCB, which will pop it, happy.
+ axcStack->Push(AXC_None);
+ // In the first case, tree may no longer be a leaf, but we're done with it; is a leaf in the second
+ // case.
+ return WALK_SKIP_SUBTREES;
+ }
#ifdef FEATURE_SIMD
- if (tree->gtOp.gtOp1->OperGet() == GT_SIMD)
- {
- axcStack->Push(AXC_None);
- }
- else
+ if (tree->gtOp.gtOp1->OperGet() == GT_SIMD)
+ {
+ axcStack->Push(AXC_None);
+ }
+ else
#endif // FEATURE_SIMD
- if (axc == AXC_Ind)
- {
- axcStack->Push(AXC_None);
- }
- else if (axc == AXC_IndWide)
- {
- axcStack->Push(AXC_AddrWide);
- }
- else
- {
- assert(axc == AXC_None);
- axcStack->Push(AXC_Addr);
- }
- return WALK_CONTINUE;
+ if (axc == AXC_Ind)
+ {
+ axcStack->Push(AXC_None);
+ }
+ else if (axc == AXC_IndWide)
+ {
+ axcStack->Push(AXC_AddrWide);
+ }
+ else
+ {
+ assert(axc == AXC_None);
+ axcStack->Push(AXC_Addr);
+ }
+ return WALK_CONTINUE;
- case GT_FIELD:
- // First, handle a couple of special cases: field of promoted struct local, field
- // of "normed" struct.
- if (comp->fgMorphStructField(tree, fgWalkPre) == WALK_SKIP_SUBTREES)
- {
- // It (may have) replaced the field with a local var or local field. If we're in an addr context,
- // label it addr-taken.
- if (tree->OperIsLocal() && (axc == AXC_Addr || axc == AXC_AddrWide))
+ case GT_FIELD:
+ // First, handle a couple of special cases: field of promoted struct local, field
+ // of "normed" struct.
+ if (comp->fgMorphStructField(tree, fgWalkPre) == WALK_SKIP_SUBTREES)
{
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
- comp->lvaSetVarAddrExposed(lclNum);
- if (axc == AXC_AddrWide)
+ // It (may have) replaced the field with a local var or local field. If we're in an addr context,
+ // label it addr-taken.
+ if (tree->OperIsLocal() && (axc == AXC_Addr || axc == AXC_AddrWide))
{
- LclVarDsc* varDsc = &comp->lvaTable[lclNum];
- if (varDsc->lvIsStructField)
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ comp->lvaSetVarAddrExposed(lclNum);
+ if (axc == AXC_AddrWide)
{
- comp->lvaSetVarAddrExposed(varDsc->lvParentLcl);
+ LclVarDsc* varDsc = &comp->lvaTable[lclNum];
+ if (varDsc->lvIsStructField)
+ {
+ comp->lvaSetVarAddrExposed(varDsc->lvParentLcl);
+ }
}
}
- }
- // Push something to keep the PostCB, which will pop it, happy.
- axcStack->Push(AXC_None);
- return WALK_SKIP_SUBTREES;
- }
- else
- {
- // GT_FIELD is an implicit deref.
- if (axc == AXC_Addr)
- {
+ // Push something to keep the PostCB, which will pop it, happy.
axcStack->Push(AXC_None);
- }
- else if (axc == AXC_AddrWide)
- {
- axcStack->Push(AXC_IndWide);
+ return WALK_SKIP_SUBTREES;
}
else
{
- axcStack->Push(AXC_Ind);
+ // GT_FIELD is an implicit deref.
+ if (axc == AXC_Addr)
+ {
+ axcStack->Push(AXC_None);
+ }
+ else if (axc == AXC_AddrWide)
+ {
+ axcStack->Push(AXC_IndWide);
+ }
+ else
+ {
+ axcStack->Push(AXC_Ind);
+ }
+ return WALK_CONTINUE;
}
- return WALK_CONTINUE;
- }
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
assert(axc != AXC_Addr);
// This recognizes certain forms, and does all the work. In that case, returns WALK_SKIP_SUBTREES,
@@ -16935,132 +17157,133 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
return res;
}
- case GT_LCL_VAR:
- // On some architectures, some arguments are passed implicitly by reference.
- // Modify the trees to reflect that, if this local is one of those.
- if (comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
- {
- // We can't be in an address context; the ADDR(lcl), where lcl is an implicit byref param, was
- // handled earlier. (And we can't have added anything to this address, since it was implicit.)
- assert(axc != AXC_Addr);
- }
- else
- {
- if (axc == AXC_Addr || axc == AXC_AddrWide)
+ case GT_LCL_VAR:
+ // On some architectures, some arguments are passed implicitly by reference.
+ // Modify the trees to reflect that, if this local is one of those.
+ if (comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
{
- unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
- comp->lvaSetVarAddrExposed(lclNum);
- if (axc == AXC_AddrWide)
+ // We can't be in an address context; the ADDR(lcl), where lcl is an implicit byref param, was
+ // handled earlier. (And we can't have added anything to this address, since it was implicit.)
+ assert(axc != AXC_Addr);
+ }
+ else
+ {
+ if (axc == AXC_Addr || axc == AXC_AddrWide)
{
- LclVarDsc* varDsc = &comp->lvaTable[lclNum];
- if (varDsc->lvIsStructField)
+ unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
+ comp->lvaSetVarAddrExposed(lclNum);
+ if (axc == AXC_AddrWide)
{
- comp->lvaSetVarAddrExposed(varDsc->lvParentLcl);
+ LclVarDsc* varDsc = &comp->lvaTable[lclNum];
+ if (varDsc->lvIsStructField)
+ {
+ comp->lvaSetVarAddrExposed(varDsc->lvParentLcl);
+ }
}
- }
- // We may need to Quirk the storage size for this LCL_VAR
- // some PInvoke signatures incorrectly specify a ByRef to an INT32
- // when they actually write a SIZE_T or INT64
- if (axc == AXC_Addr)
- {
- comp->gtCheckQuirkAddrExposedLclVar(tree, fgWalkPre->parentStack);
+ // We may need to Quirk the storage size for this LCL_VAR
+ // some PInvoke signatures incorrectly specify a ByRef to an INT32
+ // when they actually write a SIZE_T or INT64
+ if (axc == AXC_Addr)
+ {
+ comp->gtCheckQuirkAddrExposedLclVar(tree, fgWalkPre->parentStack);
+ }
}
}
- }
- // Push something to keep the PostCB, which will pop it, happy.
- axcStack->Push(AXC_None);
- // In the first case, tree may no longer be a leaf, but we're done with it; is a leaf in the second case.
- return WALK_SKIP_SUBTREES;
-
- case GT_ADD:
- assert(axc != AXC_Addr);
- // See below about treating pointer operations as wider indirection.
- if (tree->gtOp.gtOp1->gtType == TYP_BYREF || tree->gtOp.gtOp2->gtType == TYP_BYREF)
- {
- axcStack->Push(AXC_IndWide);
- }
- else if (axc == AXC_Ind)
- {
- // Let the children know that the parent was a GT_ADD, to be evaluated in an IND context.
- // If it's an add of a constant and an address, and the constant represents a field,
- // then we'll evaluate the address argument in an Ind context; otherwise, the None context.
- axcStack->Push(AXC_IndAdd);
- }
- else
- {
- axcStack->Push(axc);
- }
- return WALK_CONTINUE;
+ // Push something to keep the PostCB, which will pop it, happy.
+ axcStack->Push(AXC_None);
+ // In the first case, tree may no longer be a leaf, but we're done with it; is a leaf in the second case.
+ return WALK_SKIP_SUBTREES;
- // !!! Treat Pointer Operations as Wider Indirection
- //
- // If we are performing pointer operations, make sure we treat that as equivalent to a wider
- // indirection. This is because the pointers could be pointing to the address of struct fields
- // and could be used to perform operations on the whole struct or passed to another method.
- //
- // When visiting a node in this pre-order walk, we do not know if we would in the future
- // encounter a GT_ADDR of a GT_FIELD below.
- //
- // Note: GT_ADDR of a GT_FIELD is always a TYP_BYREF.
- // So let us be conservative and treat TYP_BYREF operations as AXC_IndWide and propagate a
- // wider indirection context down the expr tree.
- //
- // Example, in unsafe code,
- //
- // IL_000e 12 00 ldloca.s 0x0
- // IL_0010 7c 02 00 00 04 ldflda 0x4000002
- // IL_0015 12 00 ldloca.s 0x0
- // IL_0017 7c 01 00 00 04 ldflda 0x4000001
- // IL_001c 59 sub
- //
- // When visiting the GT_SUB node, if the types of either of the GT_SUB's operand are BYREF, then
- // consider GT_SUB to be equivalent of an AXC_IndWide.
- //
- // Similarly for pointer comparisons and pointer escaping as integers through conversions, treat
- // them as AXC_IndWide.
- //
-
- // BINOP
- case GT_SUB:
- case GT_MUL:
- case GT_DIV:
- case GT_UDIV:
- case GT_OR:
- case GT_XOR:
- case GT_AND:
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GT:
- case GT_GE:
- // UNOP
- case GT_CAST:
- if ((tree->gtOp.gtOp1->gtType == TYP_BYREF) || (tree->OperIsBinary() && (tree->gtOp.gtOp2->gtType == TYP_BYREF)))
- {
- axcStack->Push(AXC_IndWide);
+ case GT_ADD:
+ assert(axc != AXC_Addr);
+ // See below about treating pointer operations as wider indirection.
+ if (tree->gtOp.gtOp1->gtType == TYP_BYREF || tree->gtOp.gtOp2->gtType == TYP_BYREF)
+ {
+ axcStack->Push(AXC_IndWide);
+ }
+ else if (axc == AXC_Ind)
+ {
+ // Let the children know that the parent was a GT_ADD, to be evaluated in an IND context.
+ // If it's an add of a constant and an address, and the constant represents a field,
+ // then we'll evaluate the address argument in an Ind context; otherwise, the None context.
+ axcStack->Push(AXC_IndAdd);
+ }
+ else
+ {
+ axcStack->Push(axc);
+ }
return WALK_CONTINUE;
- }
- __fallthrough;
- default:
- // To be safe/conservative: pass Addr through, but not Ind -- otherwise, revert to "None". We must
- // handle the "Ind" propogation explicitly above.
- if (axc == AXC_Addr || axc == AXC_AddrWide)
- {
- axcStack->Push(axc);
- }
- else
- {
- axcStack->Push(AXC_None);
- }
- return WALK_CONTINUE;
+ // !!! Treat Pointer Operations as Wider Indirection
+ //
+ // If we are performing pointer operations, make sure we treat that as equivalent to a wider
+ // indirection. This is because the pointers could be pointing to the address of struct fields
+ // and could be used to perform operations on the whole struct or passed to another method.
+ //
+ // When visiting a node in this pre-order walk, we do not know if we would in the future
+ // encounter a GT_ADDR of a GT_FIELD below.
+ //
+ // Note: GT_ADDR of a GT_FIELD is always a TYP_BYREF.
+ // So let us be conservative and treat TYP_BYREF operations as AXC_IndWide and propagate a
+ // wider indirection context down the expr tree.
+ //
+ // Example, in unsafe code,
+ //
+ // IL_000e 12 00 ldloca.s 0x0
+ // IL_0010 7c 02 00 00 04 ldflda 0x4000002
+ // IL_0015 12 00 ldloca.s 0x0
+ // IL_0017 7c 01 00 00 04 ldflda 0x4000001
+ // IL_001c 59 sub
+ //
+ // When visiting the GT_SUB node, if the types of either of the GT_SUB's operand are BYREF, then
+ // consider GT_SUB to be equivalent of an AXC_IndWide.
+ //
+ // Similarly for pointer comparisons and pointer escaping as integers through conversions, treat
+ // them as AXC_IndWide.
+ //
+
+ // BINOP
+ case GT_SUB:
+ case GT_MUL:
+ case GT_DIV:
+ case GT_UDIV:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GT:
+ case GT_GE:
+ // UNOP
+ case GT_CAST:
+ if ((tree->gtOp.gtOp1->gtType == TYP_BYREF) ||
+ (tree->OperIsBinary() && (tree->gtOp.gtOp2->gtType == TYP_BYREF)))
+ {
+ axcStack->Push(AXC_IndWide);
+ return WALK_CONTINUE;
+ }
+ __fallthrough;
+
+ default:
+ // To be safe/conservative: pass Addr through, but not Ind -- otherwise, revert to "None". We must
+ // handle the "Ind" propogation explicitly above.
+ if (axc == AXC_Addr || axc == AXC_AddrWide)
+ {
+ axcStack->Push(axc);
+ }
+ else
+ {
+ axcStack->Push(AXC_None);
+ }
+ return WALK_CONTINUE;
}
}
@@ -17078,7 +17301,7 @@ bool Compiler::fgFitsInOrNotLoc(GenTreePtr tree, unsigned width)
}
else if (tree->OperGet() == GT_FIELD)
{
- CORINFO_CLASS_HANDLE fldClass = info.compCompHnd->getFieldClass (tree->gtField.gtFldHnd);
+ CORINFO_CLASS_HANDLE fldClass = info.compCompHnd->getFieldClass(tree->gtField.gtFldHnd);
return width <= info.compCompHnd->getClassSize(fldClass);
}
else
@@ -17087,57 +17310,56 @@ bool Compiler::fgFitsInOrNotLoc(GenTreePtr tree, unsigned width)
}
}
-
void Compiler::fgAddFieldSeqForZeroOffset(GenTreePtr op1, FieldSeqNode* fieldSeq)
{
assert(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_REF);
switch (op1->OperGet())
{
- case GT_ADDR:
- if (op1->gtOp.gtOp1->OperGet() == GT_LCL_FLD)
- {
- GenTreeLclFld* lclFld = op1->gtOp.gtOp1->AsLclFld();
- lclFld->gtFieldSeq = GetFieldSeqStore()->Append(lclFld->gtFieldSeq, fieldSeq);
- }
- break;
+ case GT_ADDR:
+ if (op1->gtOp.gtOp1->OperGet() == GT_LCL_FLD)
+ {
+ GenTreeLclFld* lclFld = op1->gtOp.gtOp1->AsLclFld();
+ lclFld->gtFieldSeq = GetFieldSeqStore()->Append(lclFld->gtFieldSeq, fieldSeq);
+ }
+ break;
- case GT_ADD:
- if (op1->gtOp.gtOp1->OperGet() == GT_CNS_INT)
- {
- FieldSeqNode* op1Fs = op1->gtOp.gtOp1->gtIntCon.gtFieldSeq;
- if (op1Fs != NULL)
+ case GT_ADD:
+ if (op1->gtOp.gtOp1->OperGet() == GT_CNS_INT)
{
- op1Fs = GetFieldSeqStore()->Append(op1Fs, fieldSeq);
- op1->gtOp.gtOp1->gtIntCon.gtFieldSeq = op1Fs;
+ FieldSeqNode* op1Fs = op1->gtOp.gtOp1->gtIntCon.gtFieldSeq;
+ if (op1Fs != nullptr)
+ {
+ op1Fs = GetFieldSeqStore()->Append(op1Fs, fieldSeq);
+ op1->gtOp.gtOp1->gtIntCon.gtFieldSeq = op1Fs;
+ }
}
- }
- else if (op1->gtOp.gtOp2->OperGet() == GT_CNS_INT)
- {
- FieldSeqNode* op2Fs = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
- if (op2Fs != NULL)
+ else if (op1->gtOp.gtOp2->OperGet() == GT_CNS_INT)
{
- op2Fs = GetFieldSeqStore()->Append(op2Fs, fieldSeq);
- op1->gtOp.gtOp2->gtIntCon.gtFieldSeq = op2Fs;
+ FieldSeqNode* op2Fs = op1->gtOp.gtOp2->gtIntCon.gtFieldSeq;
+ if (op2Fs != nullptr)
+ {
+ op2Fs = GetFieldSeqStore()->Append(op2Fs, fieldSeq);
+ op1->gtOp.gtOp2->gtIntCon.gtFieldSeq = op2Fs;
+ }
}
- }
- break;
+ break;
- case GT_CNS_INT:
+ case GT_CNS_INT:
{
FieldSeqNode* op1Fs = op1->gtIntCon.gtFieldSeq;
- if (op1Fs != NULL)
+ if (op1Fs != nullptr)
{
- op1Fs = GetFieldSeqStore()->Append(op1Fs, fieldSeq);
+ op1Fs = GetFieldSeqStore()->Append(op1Fs, fieldSeq);
op1->gtIntCon.gtFieldSeq = op1Fs;
}
}
break;
- default:
- // Record in the general zero-offset map.
- GetZeroOffsetFieldMap()->Set(op1, fieldSeq);
- break;
+ default:
+ // Record in the general zero-offset map.
+ GetZeroOffsetFieldMap()->Set(op1, fieldSeq);
+ break;
}
}
@@ -17146,11 +17368,13 @@ void Compiler::fgAddFieldSeqForZeroOffset(GenTreePtr op1, FieldSeqNode* fieldSeq
* Mark address-taken locals.
*/
-void Compiler::fgMarkAddressExposedLocals()
+void Compiler::fgMarkAddressExposedLocals()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In fgMarkAddressExposedLocals()\n");
+ }
#endif // DEBUG
BasicBlock* block = fgFirstBB;
@@ -17164,17 +17388,12 @@ void Compiler::fgMarkAddressExposedLocals()
GenTreePtr stmt;
- for (stmt = block->bbTreeList;
- stmt;
- stmt = stmt->gtNext)
+ for (stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
// Call Compiler::fgMarkAddrTakenLocalsCB on each node
AXCStack stk(this);
stk.Push(AXC_None); // We start in neither an addr or ind context.
- fgWalkTree(&stmt->gtStmt.gtStmtExpr,
- fgMarkAddrTakenLocalsPreCB,
- fgMarkAddrTakenLocalsPostCB,
- &stk);
+ fgWalkTree(&stmt->gtStmt.gtStmtExpr, fgMarkAddrTakenLocalsPreCB, fgMarkAddrTakenLocalsPostCB, &stk);
}
block = block->bbNext;
@@ -17182,32 +17401,37 @@ void Compiler::fgMarkAddressExposedLocals()
} while (block);
}
-
// fgNodesMayInterfere:
// return true if moving nodes relative to each other can change the result of a computation
//
// args:
// read: a node which reads
-//
+//
bool Compiler::fgNodesMayInterfere(GenTree* write, GenTree* read)
{
- LclVarDsc* srcVar = nullptr;
- bool srcAliased = false;
- bool dstAliased = false;
+ LclVarDsc* srcVar = nullptr;
+ bool srcAliased = false;
+ bool dstAliased = false;
- bool readIsIndir = read->OperIsIndir() || read->OperIsImplicitIndir();
+ bool readIsIndir = read->OperIsIndir() || read->OperIsImplicitIndir();
bool writeIsIndir = write->OperIsIndir() || write->OperIsImplicitIndir();
if (read->OperIsLocal())
+ {
srcVar = &lvaTable[read->gtLclVarCommon.gtLclNum];
-
+ }
+
if (writeIsIndir)
{
if (srcVar && srcVar->lvAddrExposed)
+ {
return true;
+ }
else if (readIsIndir)
+ {
return true;
+ }
return false;
}
else if (write->OperIsLocal())
@@ -17220,7 +17444,9 @@ bool Compiler::fgNodesMayInterfere(GenTree* write, GenTree* read)
else if (read->OperIsLocal())
{
if (read->gtLclVarCommon.gtLclNum == write->gtLclVarCommon.gtLclNum)
+ {
return true;
+ }
return false;
}
else
@@ -17247,22 +17473,21 @@ bool Compiler::fgNodesMayInterfere(GenTree* write, GenTree* read)
* reverse the tree due to the fact we saw x = y <op> x and we want to fold that into
* x <op>= y because the operator property.
*/
-bool Compiler::fgShouldCreateAssignOp(GenTreePtr tree, bool* bReverse)
+bool Compiler::fgShouldCreateAssignOp(GenTreePtr tree, bool* bReverse)
{
#if CPU_LOAD_STORE_ARCH
/* In the case of a load/store architecture, there's no gain by doing any of this, we bail. */
return false;
#elif !defined(LEGACY_BACKEND)
return false;
-#else // defined(LEGACY_BACKEND)
-
+#else // defined(LEGACY_BACKEND)
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
genTreeOps cmop = op2->OperGet();
/* Is the destination identical to the first RHS sub-operand? */
- if (GenTree::Compare(op1, op2->gtOp.gtOp1))
+ if (GenTree::Compare(op1, op2->gtOp.gtOp1))
{
/*
Do not transform the following tree
@@ -17282,12 +17507,10 @@ bool Compiler::fgShouldCreateAssignOp(GenTreePtr tree, bool* bRev
, when V01 is a struct field local.
*/
- if (op1->gtOper == GT_LCL_VAR &&
- varTypeIsSmall(op1->TypeGet()) &&
- op1->TypeGet() != op2->gtOp.gtOp2->TypeGet())
+ if (op1->gtOper == GT_LCL_VAR && varTypeIsSmall(op1->TypeGet()) && op1->TypeGet() != op2->gtOp.gtOp2->TypeGet())
{
- unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
- LclVarDsc* varDsc = lvaTable + lclNum;
+ unsigned lclNum = op1->gtLclVarCommon.gtLclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
if (varDsc->lvIsStructField)
{
@@ -17303,21 +17526,20 @@ bool Compiler::fgShouldCreateAssignOp(GenTreePtr tree, bool* bRev
/* For commutative ops only, check for "a = x <op> a" */
/* Should we be doing this at all? */
- if ((opts.compFlags & CLFLG_TREETRANS) == 0)
+ if ((opts.compFlags & CLFLG_TREETRANS) == 0)
{
return false;
}
/* Can we swap the operands to cmop ... */
- if ((op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT) &&
- (op2->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT) )
+ if ((op2->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT) && (op2->gtOp.gtOp2->gtFlags & GTF_ALL_EFFECT))
{
// Both sides must have side effects to prevent swap */
return false;
}
/* Is the destination identical to the second RHS sub-operand? */
- if (GenTree::Compare(op1, op2->gtOp.gtOp2))
+ if (GenTree::Compare(op1, op2->gtOp.gtOp2))
{
*bReverse = true;
return true;
@@ -17335,8 +17557,8 @@ Compiler::MorphAddrContext Compiler::s_CopyBlockMAC(Compiler::MACK_CopyBlock);
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
-// will keep reading next few stmts based on the vector size(2, 3, 4).
-// If the next stmts LHS are located contiguous and RHS are also located
+// will keep reading next few stmts based on the vector size(2, 3, 4).
+// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
@@ -17344,7 +17566,7 @@ Compiler::MorphAddrContext Compiler::s_CopyBlockMAC(Compiler::MACK_CopyBlock);
// stmt - GenTreeStmt*. the stmt node we want to check
//
// return value:
-// if this funciton successfully optimized the stmts, then return true. Otherwise
+// if this funciton successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr stmt)
@@ -17354,26 +17576,24 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
assert(tree->OperGet() == GT_ASG);
- GenTreePtr originalLHS = tree->gtOp.gtOp1;
- GenTreePtr prevLHS = tree->gtOp.gtOp1;
- GenTreePtr prevRHS = tree->gtOp.gtOp2;
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
+ GenTreePtr originalLHS = tree->gtOp.gtOp1;
+ GenTreePtr prevLHS = tree->gtOp.gtOp1;
+ GenTreePtr prevRHS = tree->gtOp.gtOp2;
+ unsigned index = 0;
+ var_types baseType = TYP_UNKNOWN;
+ unsigned simdSize = 0;
GenTreePtr simdStructNode = getSIMDStructFromField(prevRHS, &baseType, &index, &simdSize, true);
- if (simdStructNode == nullptr ||
- index != 0 ||
- baseType != TYP_FLOAT)
+ if (simdStructNode == nullptr || index != 0 || baseType != TYP_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
- int assignmentsCount = simdSize / genTypeSize(baseType) - 1;
- int remainingAssignments = assignmentsCount;
- GenTreePtr curStmt = stmt->gtNext;
- GenTreePtr lastStmt = stmt;
+ int assignmentsCount = simdSize / genTypeSize(baseType) - 1;
+ int remainingAssignments = assignmentsCount;
+ GenTreePtr curStmt = stmt->gtNext;
+ GenTreePtr lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
@@ -17385,8 +17605,7 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
GenTreePtr curLHS = exp->gtGetOp1();
GenTreePtr curRHS = exp->gtGetOp2();
- if (!areArgumentsContiguous(prevLHS, curLHS) ||
- !areArgumentsContiguous(prevRHS, curRHS))
+ if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
@@ -17396,15 +17615,15 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
prevRHS = curRHS;
lastStmt = curStmt;
- curStmt = curStmt->gtNext;
+ curStmt = curStmt->gtNext;
}
if (remainingAssignments > 0)
{
- // if the left assignments number is bigger than zero, then this means
- // that the assignments are not assgining to the contiguously memory
- // locations from same vector.
- return false;
+ // if the left assignments number is bigger than zero, then this means
+ // that the assignments are not assgining to the contiguously memory
+ // locations from same vector.
+ return false;
}
#ifdef DEBUG
if (verbose)
@@ -17416,14 +17635,13 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
printTreeID(lastStmt);
printf("\n");
}
-#endif
-
+#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->gtNext);
}
-
+
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
@@ -17461,24 +17679,17 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
printf("(before)\n");
gtDispTree(stmt);
}
-#endif
+#endif
- tree = gtNewBlkOpNode(GT_COPYBLK,
- copyBlkDst,
- simdStructAddr,
- gtNewIconNode(simdSize),
- false);
+ tree = gtNewBlkOpNode(GT_COPYBLK, copyBlkDst, simdStructAddr, gtNewIconNode(simdSize), false);
stmt->gtStmt.gtStmtExpr = tree;
-
+
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
AXCStack stk(this);
- stk.Push(AXC_None);
- fgWalkTree(&stmt->gtStmt.gtStmtExpr,
- fgMarkAddrTakenLocalsPreCB,
- fgMarkAddrTakenLocalsPostCB,
- &stk);
+ stk.Push(AXC_None);
+ fgWalkTree(&stmt->gtStmt.gtStmtExpr, fgMarkAddrTakenLocalsPreCB, fgMarkAddrTakenLocalsPostCB, &stk);
#ifdef DEBUG
if (verbose)
@@ -17488,8 +17699,8 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
printf("(after)\n");
gtDispTree(stmt);
}
-#endif
+#endif
return true;
}
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
diff --git a/src/jit/nodeinfo.h b/src/jit/nodeinfo.h
index 872e9d2b0a..8373dcf29b 100644
--- a/src/jit/nodeinfo.h
+++ b/src/jit/nodeinfo.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef _NODEINFO_H_
#define _NODEINFO_H_
@@ -12,14 +11,13 @@ typedef unsigned int LsraLocation;
class TreeNodeInfo
{
public:
-
TreeNodeInfo()
{
- loc = 0;
- _dstCount = 0;
- _srcCount = 0;
- _internalIntCount = 0;
- _internalFloatCount = 0;
+ loc = 0;
+ _dstCount = 0;
+ _srcCount = 0;
+ _internalIntCount = 0;
+ _internalFloatCount = 0;
srcCandsIndex = 0;
dstCandsIndex = 0;
@@ -31,69 +29,77 @@ public:
hasDelayFreeSrc = false;
isTgtPref = false;
regOptional = false;
- definesAnyRegisters = false;
+ definesAnyRegisters = false;
#ifdef DEBUG
- isInitialized = false;
+ isInitialized = false;
#endif
}
// dst
- __declspec(property(put=setDstCount, get=getDstCount))
- int dstCount;
+ __declspec(property(put = setDstCount, get = getDstCount)) int dstCount;
void setDstCount(int count)
{
assert(count <= MAX_RET_REG_COUNT);
- _dstCount = (char) count;
+ _dstCount = (char)count;
+ }
+ int getDstCount()
+ {
+ return _dstCount;
}
- int getDstCount() { return _dstCount; }
// src
- __declspec(property(put=setSrcCount, get=getSrcCount))
- int srcCount;
+ __declspec(property(put = setSrcCount, get = getSrcCount)) int srcCount;
void setSrcCount(int count)
{
- _srcCount = (char) count;
+ _srcCount = (char)count;
assert(_srcCount == count);
}
- int getSrcCount() { return _srcCount; }
+ int getSrcCount()
+ {
+ return _srcCount;
+ }
// internalInt
- __declspec(property(put=setInternalIntCount, get=getInternalIntCount))
- int internalIntCount;
+ __declspec(property(put = setInternalIntCount, get = getInternalIntCount)) int internalIntCount;
void setInternalIntCount(int count)
{
- _internalIntCount = (char) count;
+ _internalIntCount = (char)count;
assert(_internalIntCount == count);
}
- int getInternalIntCount() { return _internalIntCount; }
+ int getInternalIntCount()
+ {
+ return _internalIntCount;
+ }
// internalFloat
- __declspec(property(put=setInternalFloatCount, get=getInternalFloatCount))
- int internalFloatCount;
+ __declspec(property(put = setInternalFloatCount, get = getInternalFloatCount)) int internalFloatCount;
void setInternalFloatCount(int count)
{
- _internalFloatCount = (char) count;
+ _internalFloatCount = (char)count;
assert(_internalFloatCount == count);
}
- int getInternalFloatCount() { return _internalFloatCount; }
+ int getInternalFloatCount()
+ {
+ return _internalFloatCount;
+ }
// SrcCandidates are constraints of the consuming (parent) operation applied to this node
// (i.e. what registers it is constrained to consume).
- regMaskTP getSrcCandidates(LinearScan *lsra);
- void setSrcCandidates(LinearScan *lsra, regMaskTP mask);
+ regMaskTP getSrcCandidates(LinearScan* lsra);
+ void setSrcCandidates(LinearScan* lsra, regMaskTP mask);
// DstCandidates are constraints of this node (i.e. what registers it is constrained to produce).
- regMaskTP getDstCandidates(LinearScan *lsra);
- void setDstCandidates(LinearScan *lsra, regMaskTP mask);
+ regMaskTP getDstCandidates(LinearScan* lsra);
+ void setDstCandidates(LinearScan* lsra, regMaskTP mask);
// InternalCandidates are constraints of the registers used as temps in the evaluation of this node.
- regMaskTP getInternalCandidates(LinearScan *lsra);
- void setInternalCandidates(LinearScan *lsra, regMaskTP mask);
- void addInternalCandidates(LinearScan *lsra, regMaskTP mask);
+ regMaskTP getInternalCandidates(LinearScan* lsra);
+ void setInternalCandidates(LinearScan* lsra, regMaskTP mask);
+ void addInternalCandidates(LinearScan* lsra, regMaskTP mask);
- LsraLocation loc;
+ LsraLocation loc;
private:
- unsigned char _dstCount;
- unsigned char _srcCount;
+ unsigned char _dstCount;
+ unsigned char _srcCount;
unsigned char _internalIntCount;
unsigned char _internalFloatCount;
@@ -102,35 +108,34 @@ public:
unsigned char dstCandsIndex;
unsigned char internalCandsIndex;
-
// isLocalDefUse identifies trees that produce a value that is not consumed elsewhere.
// Examples include stack arguments to a call (they are immediately stored), lhs of comma
// nodes, or top-level nodes that are non-void.
- unsigned char isLocalDefUse:1;
+ unsigned char isLocalDefUse : 1;
// isHelperCallWithKills is set when this is a helper call that kills more than just its in/out regs.
- unsigned char isHelperCallWithKills:1;
+ unsigned char isHelperCallWithKills : 1;
// Is this node added by LSRA, e.g. as a resolution or copy/reload move.
- unsigned char isLsraAdded:1;
+ unsigned char isLsraAdded : 1;
// isDelayFree is set when the register defined by this node will interfere with the destination
// of the consuming node, and therefore it must not be freed immediately after use.
- unsigned char isDelayFree:1;
+ unsigned char isDelayFree : 1;
// hasDelayFreeSrc is set when this node has sources that are marked "isDelayFree". This is because,
// we may eventually "contain" this node, in which case we don't want it's children (which have
// already been marked "isDelayFree" to be handled that way when allocating.
- unsigned char hasDelayFreeSrc:1;
+ unsigned char hasDelayFreeSrc : 1;
// isTgtPref is set to true when we have a rmw op, where we would like the result to be allocated
// in the same register as op1.
- unsigned char isTgtPref:1;
+ unsigned char isTgtPref : 1;
// Whether a spilled second src can be treated as a contained operand
- unsigned char regOptional:1;
+ unsigned char regOptional : 1;
// Whether or not a node defines any registers, whether directly (for nodes where dstCout is non-zero)
// or indirectly (for contained nodes, which propagate the transitive closure of the registers
// defined by their inputs). Used during buildRefPositionsForNode in order to avoid unnecessary work.
- unsigned char definesAnyRegisters:1;
+ unsigned char definesAnyRegisters : 1;
#ifdef DEBUG
// isInitialized is set when the tree node is handled.
- unsigned char isInitialized:1;
+ unsigned char isInitialized : 1;
#endif
public:
@@ -138,14 +143,15 @@ public:
void Initialize(LinearScan* lsra, GenTree* node, LsraLocation location);
#ifdef DEBUG
- void dump(LinearScan *lsra);
+ void dump(LinearScan* lsra);
// This method checks to see whether the information has been initialized,
// and is in a consistent state
- bool IsValid(LinearScan *lsra)
+ bool IsValid(LinearScan* lsra)
{
return (isInitialized &&
- ((getSrcCandidates(lsra)|getInternalCandidates(lsra)|getDstCandidates(lsra)) & ~(RBM_ALLFLOAT|RBM_ALLINT)) == 0);
+ ((getSrcCandidates(lsra) | getInternalCandidates(lsra) | getDstCandidates(lsra)) &
+ ~(RBM_ALLFLOAT | RBM_ALLINT)) == 0);
}
#endif // DEBUG
};
diff --git a/src/jit/objectalloc.cpp b/src/jit/objectalloc.cpp
index 38a441c6f0..1921e20afc 100644
--- a/src/jit/objectalloc.cpp
+++ b/src/jit/objectalloc.cpp
@@ -71,9 +71,7 @@ void ObjectAllocator::MorphAllocObjNodes()
continue;
}
- for (GenTreeStmt* stmt = block->firstStmt();
- stmt;
- stmt = stmt->gtNextStmt)
+ for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
GenTreePtr stmtExpr = stmt->gtStmtExpr;
GenTreePtr op2 = nullptr;
@@ -104,11 +102,11 @@ void ObjectAllocator::MorphAllocObjNodes()
assert(op1->OperGet() == GT_LCL_VAR);
assert(op1->TypeGet() == TYP_REF);
- assert(op2 != nullptr);
+ assert(op2 != nullptr);
assert(op2->OperGet() == GT_ALLOCOBJ);
GenTreeAllocObj* asAllocObj = op2->AsAllocObj();
- unsigned int lclNum = op1->AsLclVar()->GetLclNum();
+ unsigned int lclNum = op1->AsLclVar()->GetLclNum();
if (IsObjectStackAllocationEnabled() && CanAllocateLclVarOnStack(lclNum))
{
@@ -121,7 +119,7 @@ void ObjectAllocator::MorphAllocObjNodes()
// Propagate flags of op2 to its parent.
stmtExpr->gtOp.gtOp2 = op2;
- stmtExpr->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
+ stmtExpr->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
#ifdef DEBUG
else
@@ -153,8 +151,7 @@ GenTreePtr ObjectAllocator::MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* all
GenTreePtr op1 = allocObj->gtGetOp1();
- GenTreePtr helperCall = comp->fgMorphIntoHelperCall(
- allocObj, allocObj->gtNewHelper, comp->gtNewArgList(op1));
+ GenTreePtr helperCall = comp->fgMorphIntoHelperCall(allocObj, allocObj->gtNewHelper, comp->gtNewArgList(op1));
return helperCall;
}
@@ -173,7 +170,9 @@ GenTreePtr ObjectAllocator::MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* all
// Notes:
// Must update parents flags after this.
// This function can insert additional statements before stmt.
-GenTreePtr ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj, BasicBlock* block, GenTreeStmt* stmt)
+GenTreePtr ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj,
+ BasicBlock* block,
+ GenTreeStmt* stmt)
{
assert(allocObj != nullptr);
assert(m_AnalysisDone);
@@ -193,7 +192,7 @@ Compiler::fgWalkResult ObjectAllocator::AssertWhenAllocObjFoundVisitor(GenTreePt
{
GenTreePtr tree = *pTree;
- assert(tree != nullptr);
+ assert(tree != nullptr);
assert(tree->OperGet() != GT_ALLOCOBJ);
return Compiler::fgWalkResult::WALK_CONTINUE;
diff --git a/src/jit/objectalloc.h b/src/jit/objectalloc.h
index a9707f326d..bea6744024 100644
--- a/src/jit/objectalloc.h
+++ b/src/jit/objectalloc.h
@@ -37,8 +37,8 @@ protected:
private:
bool CanAllocateLclVarOnStack(unsigned int lclNum) const;
- void DoAnalysis();
- void MorphAllocObjNodes();
+ void DoAnalysis();
+ void MorphAllocObjNodes();
GenTreePtr MorphAllocObjNodeIntoHelperCall(GenTreeAllocObj* allocObj);
GenTreePtr MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* allocObj, BasicBlock* block, GenTreeStmt* stmt);
#ifdef DEBUG
@@ -48,22 +48,19 @@ private:
//===============================================================================
-inline
-ObjectAllocator::ObjectAllocator(Compiler* comp) :
- Phase(comp, "Allocate Objects", PHASE_ALLOCATE_OBJECTS),
- m_IsObjectStackAllocationEnabled(false),
- m_AnalysisDone(false)
+inline ObjectAllocator::ObjectAllocator(Compiler* comp)
+ : Phase(comp, "Allocate Objects", PHASE_ALLOCATE_OBJECTS)
+ , m_IsObjectStackAllocationEnabled(false)
+ , m_AnalysisDone(false)
{
}
-inline
-bool ObjectAllocator::IsObjectStackAllocationEnabled() const
+inline bool ObjectAllocator::IsObjectStackAllocationEnabled() const
{
return m_IsObjectStackAllocationEnabled;
}
-inline
-void ObjectAllocator::EnableObjectStackAllocation()
+inline void ObjectAllocator::EnableObjectStackAllocation()
{
m_IsObjectStackAllocationEnabled = true;
}
@@ -72,8 +69,7 @@ void ObjectAllocator::EnableObjectStackAllocation()
// CanAllocateLclVarOnStack: Returns true iff local variable can not
// potentially escape from the method and
// can be allocated on the stack.
-inline
-bool ObjectAllocator::CanAllocateLclVarOnStack(unsigned int lclNum) const
+inline bool ObjectAllocator::CanAllocateLclVarOnStack(unsigned int lclNum) const
{
assert(m_AnalysisDone);
// TODO-ObjectStackAllocation
diff --git a/src/jit/opcode.h b/src/jit/opcode.h
index d3189266f3..87741e97d9 100644
--- a/src/jit/opcode.h
+++ b/src/jit/opcode.h
@@ -5,7 +5,7 @@
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
-XX opcodes.h XX
+XX opcodes.h XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -17,15 +17,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "openum.h"
-extern const signed char opcodeSizes [];
-
+extern const signed char opcodeSizes[];
#if defined(DEBUG)
-extern const char * const opcodeNames [];
-extern const BYTE opcodeArgKinds [];
+extern const char* const opcodeNames[];
+extern const BYTE opcodeArgKinds[];
#endif
-
/*****************************************************************************/
#endif // _OPCODE_H_
/*****************************************************************************/
diff --git a/src/jit/optcse.cpp b/src/jit/optcse.cpp
index 424939d9a2..2075cfb282 100644
--- a/src/jit/optcse.cpp
+++ b/src/jit/optcse.cpp
@@ -21,37 +21,36 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
/* static */
-const size_t Compiler::s_optCSEhashSize = EXPSET_SZ*2;
-
+const size_t Compiler::s_optCSEhashSize = EXPSET_SZ * 2;
/*****************************************************************************
*
* We've found all the candidates, build the index for easy access.
*/
-void Compiler::optCSEstop()
+void Compiler::optCSEstop()
{
if (optCSECandidateCount == 0)
+ {
return;
+ }
- CSEdsc * dsc;
- CSEdsc * * ptr;
- unsigned cnt;
+ CSEdsc* dsc;
+ CSEdsc** ptr;
+ unsigned cnt;
optCSEtab = new (this, CMK_CSE) CSEdsc*[optCSECandidateCount]();
- for (cnt = s_optCSEhashSize, ptr = optCSEhash;
- cnt;
- cnt--, ptr++)
+ for (cnt = s_optCSEhashSize, ptr = optCSEhash; cnt; cnt--, ptr++)
{
for (dsc = *ptr; dsc; dsc = dsc->csdNextInBucket)
{
if (dsc->csdIndex)
{
noway_assert((unsigned)dsc->csdIndex <= optCSECandidateCount);
- if (optCSEtab[dsc->csdIndex-1] == 0)
+ if (optCSEtab[dsc->csdIndex - 1] == nullptr)
{
- optCSEtab[dsc->csdIndex-1] = dsc;
+ optCSEtab[dsc->csdIndex - 1] = dsc;
}
}
}
@@ -60,7 +59,7 @@ void Compiler::optCSEstop()
#ifdef DEBUG
for (cnt = 0; cnt < optCSECandidateCount; cnt++)
{
- noway_assert(optCSEtab[cnt] != NULL);
+ noway_assert(optCSEtab[cnt] != nullptr);
}
#endif
}
@@ -70,14 +69,13 @@ void Compiler::optCSEstop()
* Return the descriptor for the CSE with the given index.
*/
-inline
-Compiler::CSEdsc * Compiler::optCSEfindDsc(unsigned index)
+inline Compiler::CSEdsc* Compiler::optCSEfindDsc(unsigned index)
{
noway_assert(index);
noway_assert(index <= optCSECandidateCount);
- noway_assert(optCSEtab[index-1]);
+ noway_assert(optCSEtab[index - 1]);
- return optCSEtab[index-1];
+ return optCSEtab[index - 1];
}
/*****************************************************************************
@@ -85,7 +83,7 @@ Compiler::CSEdsc * Compiler::optCSEfindDsc(unsigned index)
* For a previously marked CSE, decrement the use counts and unmark it
*/
-void Compiler::optUnmarkCSE(GenTreePtr tree)
+void Compiler::optUnmarkCSE(GenTreePtr tree)
{
if (!IS_CSE_INDEX(tree->gtCSEnum))
{
@@ -95,18 +93,18 @@ void Compiler::optUnmarkCSE(GenTreePtr tree)
}
unsigned CSEnum = GET_CSE_INDEX(tree->gtCSEnum);
- CSEdsc * desc;
+ CSEdsc* desc;
// make sure it's been initialized
- noway_assert(optCSEweight <= BB_MAX_WEIGHT);
+ noway_assert(optCSEweight <= BB_MAX_WEIGHT);
- /* Is this a CSE use? */
- if (IS_CSE_USE(tree->gtCSEnum))
+ /* Is this a CSE use? */
+ if (IS_CSE_USE(tree->gtCSEnum))
{
- desc = optCSEfindDsc(CSEnum);
+ desc = optCSEfindDsc(CSEnum);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("Unmark CSE use #%02d at ", CSEnum);
printTreeID(tree);
@@ -118,22 +116,26 @@ void Compiler::optUnmarkCSE(GenTreePtr tree)
noway_assert(desc->csdUseCount > 0);
- if (desc->csdUseCount > 0)
+ if (desc->csdUseCount > 0)
{
desc->csdUseCount -= 1;
if (desc->csdUseWtCnt < optCSEweight)
- desc->csdUseWtCnt = 0;
+ {
+ desc->csdUseWtCnt = 0;
+ }
else
+ {
desc->csdUseWtCnt -= optCSEweight;
+ }
}
}
else
{
desc = optCSEfindDsc(CSEnum);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("Unmark CSE def #%02d at ", CSEnum);
printTreeID(tree);
@@ -145,25 +147,31 @@ void Compiler::optUnmarkCSE(GenTreePtr tree)
noway_assert(desc->csdDefCount > 0);
- if (desc->csdDefCount > 0)
+ if (desc->csdDefCount > 0)
{
desc->csdDefCount -= 1;
if (desc->csdDefWtCnt < optCSEweight)
- desc->csdDefWtCnt = 0;
+ {
+ desc->csdDefWtCnt = 0;
+ }
else
+ {
desc->csdDefWtCnt -= optCSEweight;
+ }
}
}
tree->gtCSEnum = NO_CSE;
}
-Compiler::fgWalkResult Compiler::optHasNonCSEChild(GenTreePtr * pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optHasNonCSEChild(GenTreePtr* pTree, fgWalkData* data)
{
if (*pTree == data->pCallbackData)
+ {
return WALK_CONTINUE;
-
+ }
+
if ((*pTree)->gtFlags & GTF_DONT_CSE)
{
@@ -176,7 +184,9 @@ Compiler::fgWalkResult Compiler::optHasNonCSEChild(GenTreePtr * pTree, fgWa
// should not prevent tree's above the constant from becoming CSE's.
//
if ((*pTree)->gtOper == GT_CNS_INT)
+ {
return WALK_SKIP_SUBTREES;
+ }
return WALK_ABORT;
}
@@ -184,26 +194,30 @@ Compiler::fgWalkResult Compiler::optHasNonCSEChild(GenTreePtr * pTree, fgWa
return WALK_SKIP_SUBTREES;
}
-Compiler::fgWalkResult Compiler::optPropagateNonCSE(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optPropagateNonCSE(GenTreePtr* pTree, fgWalkData* data)
{
- GenTree *tree = *pTree;
+ GenTree* tree = *pTree;
Compiler* comp = data->compiler;
/* Calls get DONT_CSE implicitly */
if (tree->OperGet() == GT_CALL)
{
if (!IsSharedStaticHelper(tree))
+ {
tree->gtFlags |= GTF_DONT_CSE;
+ }
}
if ((tree->gtFlags & GTF_DONT_CSE) == 0)
{
/* Propagate the DONT_CSE flag from child to parent */
if (comp->fgWalkTreePre(&tree, optHasNonCSEChild, tree) == WALK_ABORT)
+ {
tree->gtFlags |= GTF_DONT_CSE;
+ }
}
- return WALK_CONTINUE;
+ return WALK_CONTINUE;
}
/*****************************************************************************
@@ -212,11 +226,11 @@ Compiler::fgWalkResult Compiler::optPropagateNonCSE(GenTreePtr *pTree, fgWa
*/
/* static */
-Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr* pTree, fgWalkData* data)
{
- GenTreePtr tree = *pTree;
- Compiler * comp = data->compiler;
- GenTreePtr keepList = (GenTreePtr)(data->pCallbackData);
+ GenTreePtr tree = *pTree;
+ Compiler* comp = data->compiler;
+ GenTreePtr keepList = (GenTreePtr)(data->pCallbackData);
// We may have a non-NULL side effect list that is being kept
//
@@ -226,15 +240,15 @@ Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr *pTree, fgWalkDat
while (keptTree->OperGet() == GT_COMMA)
{
assert(keptTree->OperKind() & GTK_SMPOP);
- GenTreePtr op1 = keptTree->gtOp.gtOp1;
+ GenTreePtr op1 = keptTree->gtOp.gtOp1;
GenTreePtr op2 = keptTree->gtGetOp2();
- // For the GT_COMMA case the op1 is part of the orginal CSE tree
+ // For the GT_COMMA case the op1 is part of the orginal CSE tree
// that is being kept because it contains some side-effect
//
if (tree == op1)
{
- // This tree and all of its sub trees are being kept
+ // This tree and all of its sub trees are being kept
return WALK_SKIP_SUBTREES;
}
@@ -245,7 +259,7 @@ Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr *pTree, fgWalkDat
}
if (tree == keptTree)
{
- // This tree and all of its sub trees are being kept
+ // This tree and all of its sub trees are being kept
return WALK_SKIP_SUBTREES;
}
}
@@ -257,10 +271,10 @@ Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr *pTree, fgWalkDat
/* Look for any local variable references */
- if (tree->gtOper == GT_LCL_VAR)
+ if (tree->gtOper == GT_LCL_VAR)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
/* This variable ref is going away, decrease its ref counts */
@@ -269,21 +283,21 @@ Compiler::fgWalkResult Compiler::optUnmarkCSEs(GenTreePtr *pTree, fgWalkDat
varDsc = comp->lvaTable + lclNum;
// make sure it's been initialized
- assert(comp->optCSEweight <= BB_MAX_WEIGHT);
+ assert(comp->optCSEweight <= BB_MAX_WEIGHT);
/* Decrement its lvRefCnt and lvRefCntWtd */
varDsc->decRefCnts(comp->optCSEweight, comp);
}
- return WALK_CONTINUE;
+ return WALK_CONTINUE;
}
-Compiler::fgWalkResult Compiler::optCSE_MaskHelper(GenTreePtr *pTree, fgWalkData *walkData)
+Compiler::fgWalkResult Compiler::optCSE_MaskHelper(GenTreePtr* pTree, fgWalkData* walkData)
{
- GenTree* tree = *pTree;
- Compiler* comp = walkData->compiler;
- optCSE_MaskData* pUserData = (optCSE_MaskData*)(walkData->pCallbackData);
+ GenTree* tree = *pTree;
+ Compiler* comp = walkData->compiler;
+ optCSE_MaskData* pUserData = (optCSE_MaskData*)(walkData->pCallbackData);
if (IS_CSE_INDEX(tree->gtCSEnum))
{
@@ -299,20 +313,19 @@ Compiler::fgWalkResult Compiler::optCSE_MaskHelper(GenTreePtr *pTree, fgWal
}
}
- return WALK_CONTINUE;
+ return WALK_CONTINUE;
}
// This functions walks all the node for an given tree
// and return the mask of CSE defs and uses for the tree
//
-void Compiler::optCSE_GetMaskData(GenTreePtr tree, optCSE_MaskData* pMaskData)
+void Compiler::optCSE_GetMaskData(GenTreePtr tree, optCSE_MaskData* pMaskData)
{
pMaskData->CSE_defMask = 0;
pMaskData->CSE_useMask = 0;
fgWalkTreePre(&tree, optCSE_MaskHelper, (void*)pMaskData);
}
-
//------------------------------------------------------------------------
// optCSE_canSwap: Determine if the execution order of two nodes can be swapped.
//
@@ -327,13 +340,13 @@ void Compiler::optCSE_GetMaskData(GenTreePtr tree, optCSE_MaskDat
// Assumptions:
// 'op1' currently occurse before 'op2' in the execution order.
//
-bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2)
+bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2)
{
// op1 and op2 must be non-null.
assert(op1 != nullptr);
assert(op2 != nullptr);
- bool canSwap = true; // the default result unless proven otherwise.
+ bool canSwap = true; // the default result unless proven otherwise.
optCSE_MaskData op1MaskData;
optCSE_MaskData op2MaskData;
@@ -368,13 +381,13 @@ bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2)
// Return true iff it safe to swap the execution order of the operands of 'tree',
// considering only the locations of the CSE defs and uses.
//
-bool Compiler::optCSE_canSwap(GenTreePtr tree)
+bool Compiler::optCSE_canSwap(GenTreePtr tree)
{
// We must have a binary treenode with non-null op1 and op2
assert((tree->OperKind() & GTK_SMPOP) != 0);
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
return optCSE_canSwap(op1, op2);
}
@@ -386,35 +399,41 @@ bool Compiler::optCSE_canSwap(GenTreePtr tree)
*/
/* static */
-int __cdecl Compiler::optCSEcostCmpEx(const void *op1, const void *op2)
+int __cdecl Compiler::optCSEcostCmpEx(const void* op1, const void* op2)
{
- CSEdsc * dsc1 = *(CSEdsc * *)op1;
- CSEdsc * dsc2 = *(CSEdsc * *)op2;
+ CSEdsc* dsc1 = *(CSEdsc**)op1;
+ CSEdsc* dsc2 = *(CSEdsc**)op2;
- GenTreePtr exp1 = dsc1->csdTree;
- GenTreePtr exp2 = dsc2->csdTree;
+ GenTreePtr exp1 = dsc1->csdTree;
+ GenTreePtr exp2 = dsc2->csdTree;
int diff;
-
- diff = (int) (exp2->gtCostEx - exp1->gtCostEx);
+
+ diff = (int)(exp2->gtCostEx - exp1->gtCostEx);
if (diff != 0)
+ {
return diff;
+ }
// Sort the higher Use Counts toward the top
- diff = (int) (dsc2->csdUseWtCnt - dsc1->csdUseWtCnt);
+ diff = (int)(dsc2->csdUseWtCnt - dsc1->csdUseWtCnt);
if (diff != 0)
+ {
return diff;
+ }
// With the same use count, Sort the lower Def Counts toward the top
- diff = (int) (dsc1->csdDefWtCnt - dsc2->csdDefWtCnt);
+ diff = (int)(dsc1->csdDefWtCnt - dsc2->csdDefWtCnt);
if (diff != 0)
+ {
return diff;
+ }
// In order to ensure that we have a stable sort, we break ties using the csdIndex
- return (int) (dsc1->csdIndex - dsc2->csdIndex);
+ return (int)(dsc1->csdIndex - dsc2->csdIndex);
}
/*****************************************************************************
@@ -424,32 +443,38 @@ int __cdecl Compiler::optCSEcostCmpEx(const void *op1, const void *op2)
*/
/* static */
-int __cdecl Compiler::optCSEcostCmpSz(const void *op1, const void *op2)
+int __cdecl Compiler::optCSEcostCmpSz(const void* op1, const void* op2)
{
- CSEdsc * dsc1 = *(CSEdsc * *)op1;
- CSEdsc * dsc2 = *(CSEdsc * *)op2;
+ CSEdsc* dsc1 = *(CSEdsc**)op1;
+ CSEdsc* dsc2 = *(CSEdsc**)op2;
- GenTreePtr exp1 = dsc1->csdTree;
- GenTreePtr exp2 = dsc2->csdTree;
+ GenTreePtr exp1 = dsc1->csdTree;
+ GenTreePtr exp2 = dsc2->csdTree;
int diff;
diff = (int)(exp2->gtCostSz - exp1->gtCostSz);
if (diff != 0)
+ {
return diff;
+ }
// Sort the higher Use Counts toward the top
diff = (int)(dsc2->csdUseCount - dsc1->csdUseCount);
if (diff != 0)
+ {
return diff;
+ }
// With the same use count, Sort the lower Def Counts toward the top
diff = (int)(dsc1->csdDefCount - dsc2->csdDefCount);
if (diff != 0)
+ {
return diff;
+ }
// In order to ensure that we have a stable sort, we break ties using the csdIndex
return (int)(dsc1->csdIndex - dsc2->csdIndex);
@@ -464,10 +489,10 @@ int __cdecl Compiler::optCSEcostCmpSz(const void *op1, const void *op2)
* Initialize the Value Number CSE tracking logic.
*/
-void Compiler::optValnumCSE_Init()
+void Compiler::optValnumCSE_Init()
{
-#ifdef DEBUG
- optCSEtab = NULL;
+#ifdef DEBUG
+ optCSEtab = nullptr;
#endif
/* Allocate and clear the hash bucket table */
@@ -475,7 +500,7 @@ void Compiler::optValnumCSE_Init()
optCSEhash = new (this, CMK_CSE) CSEdsc*[s_optCSEhashSize]();
optCSECandidateCount = 0;
- optDoCSE = false; // Stays false until we find duplicate CSE tree
+ optDoCSE = false; // Stays false until we find duplicate CSE tree
}
/*****************************************************************************
@@ -486,38 +511,36 @@ void Compiler::optValnumCSE_Init()
unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
{
- unsigned key;
- unsigned hash;
- unsigned hval;
- CSEdsc * hashDsc;
+ unsigned key;
+ unsigned hash;
+ unsigned hval;
+ CSEdsc* hashDsc;
- ValueNum vnlib = tree->GetVN(VNK_Liberal);
+ ValueNum vnlib = tree->GetVN(VNK_Liberal);
/* Compute the hash value for the expression */
- key = (unsigned) vnlib;
+ key = (unsigned)vnlib;
- hash = key;
- hash *= (unsigned) (s_optCSEhashSize + 1);
- hash >>= 7;
+ hash = key;
+ hash *= (unsigned)(s_optCSEhashSize + 1);
+ hash >>= 7;
hval = hash % s_optCSEhashSize;
/* Look for a matching index in the hash table */
bool newCSE = false;
-
- for (hashDsc = optCSEhash[hval];
- hashDsc;
- hashDsc = hashDsc->csdNextInBucket)
+
+ for (hashDsc = optCSEhash[hval]; hashDsc; hashDsc = hashDsc->csdNextInBucket)
{
- if (hashDsc->csdHashValue == key)
+ if (hashDsc->csdHashValue == key)
{
- treeStmtLstPtr newElem;
+ treeStmtLstPtr newElem;
/* Have we started the list of matching nodes? */
- if (hashDsc->csdTreeList == 0)
+ if (hashDsc->csdTreeList == nullptr)
{
// Create the new element based upon the matching hashDsc element.
@@ -526,7 +549,7 @@ unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
newElem->tslTree = hashDsc->csdTree;
newElem->tslStmt = hashDsc->csdStmt;
newElem->tslBlock = hashDsc->csdBlock;
- newElem->tslNext = 0;
+ newElem->tslNext = nullptr;
/* Start the list with the first CSE candidate recorded */
@@ -543,12 +566,12 @@ unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
newElem->tslTree = tree;
newElem->tslStmt = stmt;
newElem->tslBlock = compCurBB;
- newElem->tslNext = 0;
+ newElem->tslNext = nullptr;
hashDsc->csdTreeLast->tslNext = newElem;
hashDsc->csdTreeLast = newElem;
- optDoCSE = true; // Found a duplicate CSE tree
+ optDoCSE = true; // Found a duplicate CSE tree
/* Have we assigned a CSE index? */
if (hashDsc->csdIndex == 0)
@@ -562,36 +585,36 @@ unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
#endif
assert(FitsIn<signed char>(hashDsc->csdIndex));
- tree->gtCSEnum = ((signed char) hashDsc->csdIndex);
- return hashDsc->csdIndex;
+ tree->gtCSEnum = ((signed char)hashDsc->csdIndex);
+ return hashDsc->csdIndex;
}
}
- if (!newCSE)
+ if (!newCSE)
{
/* Not found, create a new entry (unless we have too many already) */
- if (optCSECandidateCount < MAX_CSE_CNT)
+ if (optCSECandidateCount < MAX_CSE_CNT)
{
hashDsc = new (this, CMK_CSE) CSEdsc;
- hashDsc->csdHashValue = key;
- hashDsc->csdIndex = 0;
+ hashDsc->csdHashValue = key;
+ hashDsc->csdIndex = 0;
hashDsc->csdLiveAcrossCall = 0;
- hashDsc->csdDefCount = 0;
- hashDsc->csdUseCount = 0;
- hashDsc->csdDefWtCnt = 0;
- hashDsc->csdUseWtCnt = 0;
-
- hashDsc->csdTree = tree;
- hashDsc->csdStmt = stmt;
- hashDsc->csdBlock = compCurBB;
- hashDsc->csdTreeList = 0;
+ hashDsc->csdDefCount = 0;
+ hashDsc->csdUseCount = 0;
+ hashDsc->csdDefWtCnt = 0;
+ hashDsc->csdUseWtCnt = 0;
+
+ hashDsc->csdTree = tree;
+ hashDsc->csdStmt = stmt;
+ hashDsc->csdBlock = compCurBB;
+ hashDsc->csdTreeList = nullptr;
/* Append the entry to the hash bucket */
-
+
hashDsc->csdNextInBucket = optCSEhash[hval];
- optCSEhash[hval] = hashDsc;
+ optCSEhash[hval] = hashDsc;
}
return 0;
}
@@ -601,13 +624,15 @@ unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
/* Create a new CSE (unless we have the maximum already) */
- if (optCSECandidateCount == MAX_CSE_CNT)
- return 0;
+ if (optCSECandidateCount == MAX_CSE_CNT)
+ {
+ return 0;
+ }
C_ASSERT((signed char)MAX_CSE_CNT == MAX_CSE_CNT);
- unsigned CSEindex = ++optCSECandidateCount;
- EXPSET_TP CSEmask = genCSEnum2bit(CSEindex);
+ unsigned CSEindex = ++optCSECandidateCount;
+ EXPSET_TP CSEmask = genCSEnum2bit(CSEindex);
/* Record the new CSE index in the hashDsc */
hashDsc->csdIndex = CSEindex;
@@ -616,41 +641,41 @@ unsigned Compiler::optValnumCSE_Index(GenTreePtr tree, GenTreePtr stmt)
noway_assert(hashDsc->csdTreeList->tslTree->gtCSEnum == 0);
assert(FitsIn<signed char>(CSEindex));
- hashDsc->csdTreeList->tslTree->gtCSEnum = ((signed char) CSEindex);
- noway_assert(((unsigned) hashDsc->csdTreeList->tslTree->gtCSEnum) == CSEindex);
+ hashDsc->csdTreeList->tslTree->gtCSEnum = ((signed char)CSEindex);
+ noway_assert(((unsigned)hashDsc->csdTreeList->tslTree->gtCSEnum) == CSEindex);
- tree->gtCSEnum = ((signed char) CSEindex);
+ tree->gtCSEnum = ((signed char)CSEindex);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\nCSE candidate #%02u, vn=", CSEindex);
- vnPrint(vnlib, 0);
- printf(" cseMask=%s in BB%02u, [cost=%2u, size=%2u]: \n",
- genES2str(genCSEnum2bit(CSEindex)), compCurBB->bbNum, tree->gtCostEx, tree->gtCostSz);
+ vnPrint(vnlib, 0);
+ printf(" cseMask=%s in BB%02u, [cost=%2u, size=%2u]: \n", genES2str(genCSEnum2bit(CSEindex)),
+ compCurBB->bbNum, tree->gtCostEx, tree->gtCostSz);
gtDispTree(tree);
}
#endif // DEBUG
- return CSEindex;
+ return CSEindex;
}
}
/*****************************************************************************
*
- * Locate CSE candidates and assign indices to them
+ * Locate CSE candidates and assign indices to them
* return 0 if no CSE candidates were found
* Also initialize bbCseIn, bbCseout and bbCseGen sets for all blocks
*/
-unsigned Compiler::optValnumCSE_Locate()
+unsigned Compiler::optValnumCSE_Locate()
{
// Locate CSE candidates and assign them indices
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
- GenTreePtr tree;
+ GenTreePtr stmt;
+ GenTreePtr tree;
/* Make the block publicly available */
@@ -658,7 +683,7 @@ unsigned Compiler::optValnumCSE_Locate()
/* Ensure that the BBF_VISITED and BBF_MARKED flag are clear */
/* Everyone who uses these flags are required to clear afterwards */
- noway_assert((block->bbFlags & (BBF_VISITED|BBF_MARKED)) == 0);
+ noway_assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
/* Walk the statement trees in this basic block */
for (stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
@@ -669,26 +694,32 @@ unsigned Compiler::optValnumCSE_Locate()
for (tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
if (!optIsCSEcandidate(tree))
+ {
continue;
+ }
ValueNum vnlib = tree->GetVN(VNK_Liberal);
if (ValueNumStore::isReservedVN(vnlib))
+ {
continue;
+ }
// Don't CSE constant values, instead let the Value Number
// based Assertion Prop phase handle them.
//
if (vnStore->IsVNConstant(vnlib))
+ {
continue;
+ }
/* Assign an index to this expression */
- unsigned CSEindex = optValnumCSE_Index(tree, stmt);
-
+ unsigned CSEindex = optValnumCSE_Index(tree, stmt);
+
if (CSEindex != 0)
{
- noway_assert(((unsigned) tree->gtCSEnum ) == CSEindex);
+ noway_assert(((unsigned)tree->gtCSEnum) == CSEindex);
}
}
}
@@ -696,8 +727,10 @@ unsigned Compiler::optValnumCSE_Locate()
/* We're done if there were no interesting expressions */
- if (!optDoCSE)
+ if (!optDoCSE)
+ {
return 0;
+ }
/* We're finished building the expression lookup table */
@@ -711,12 +744,12 @@ unsigned Compiler::optValnumCSE_Locate()
* Compute each blocks bbCseGen
* This is the bitset that represents the CSEs that are generated within the block
*/
-void Compiler::optValnumCSE_InitDataFlow()
+void Compiler::optValnumCSE_InitDataFlow()
{
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
- GenTreePtr tree;
+ GenTreePtr stmt;
+ GenTreePtr tree;
/* Initialize the blocks's bbCseIn set */
@@ -728,7 +761,7 @@ void Compiler::optValnumCSE_InitDataFlow()
init_to_zero = true;
}
#if !CSE_INTO_HANDLERS
- else
+ else
{
if (bbIsHandlerBeg(block))
{
@@ -738,31 +771,32 @@ void Compiler::optValnumCSE_InitDataFlow()
}
#endif
if (init_to_zero)
- {
+ {
/* Initialize to {ZERO} prior to dataflow */
- block->bbCseIn = 0;
+ block->bbCseIn = 0;
}
else
{
/* Initialize to {ALL} prior to dataflow */
- block->bbCseIn = EXPSET_ALL;
+ block->bbCseIn = EXPSET_ALL;
}
- block->bbCseOut = EXPSET_ALL;
+ block->bbCseOut = EXPSET_ALL;
- /* Initialize to {ZERO} prior to locating the CSE candidates */
- block->bbCseGen = 0;
+ /* Initialize to {ZERO} prior to locating the CSE candidates */
+ block->bbCseGen = 0;
}
// We walk the set of CSE candidates and set the bit corresponsing to the CSEindex
- // in the block's bbCseGen bitset
+ // in the block's bbCseGen bitset
//
for (unsigned cnt = 0; cnt < optCSECandidateCount; cnt++)
{
- CSEdsc* dsc = optCSEtab[cnt];
- unsigned CSEindex = dsc->csdIndex;
- treeStmtLstPtr lst = dsc->csdTreeList; noway_assert(lst);
+ CSEdsc* dsc = optCSEtab[cnt];
+ unsigned CSEindex = dsc->csdIndex;
+ treeStmtLstPtr lst = dsc->csdTreeList;
+ noway_assert(lst);
while (lst != nullptr)
{
@@ -775,7 +809,7 @@ void Compiler::optValnumCSE_InitDataFlow()
#ifdef DEBUG
// Dump out the bbCseGen information that we just created
//
- if (verbose)
+ if (verbose)
{
bool headerPrinted = false;
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
@@ -806,17 +840,19 @@ void Compiler::optValnumCSE_InitDataFlow()
class CSE_DataFlow
{
private:
- EXPSET_TP m_preMergeOut;
+ EXPSET_TP m_preMergeOut;
- Compiler* m_pCompiler;
+ Compiler* m_pCompiler;
public:
- CSE_DataFlow(Compiler* pCompiler)
- : m_pCompiler(pCompiler)
- {}
+ CSE_DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler)
+ {
+ }
Compiler* getCompiler()
- { return m_pCompiler; }
+ {
+ return m_pCompiler;
+ }
// At the start of the merge function of the dataflow equations, initialize premerge state (to detect changes.)
void StartMerge(BasicBlock* block)
@@ -834,7 +870,7 @@ public:
bool EndMerge(BasicBlock* block)
{
EXPSET_TP mergeOut = block->bbCseOut & (block->bbCseIn | block->bbCseGen);
- block->bbCseOut = mergeOut;
+ block->bbCseOut = mergeOut;
return (mergeOut != m_preMergeOut);
}
};
@@ -852,7 +888,7 @@ public:
* bbCseOut - Computed CSEs that are available at exit to the block
*/
-void Compiler::optValnumCSE_DataFlow()
+void Compiler::optValnumCSE_DataFlow()
{
CSE_DataFlow cse(this);
@@ -861,15 +897,15 @@ void Compiler::optValnumCSE_DataFlow()
cse_flow.ForwardAnalysis(cse);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("\nAfter performing DataFlow for ValnumCSE's\n");
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
printf("BB%02u", block->bbNum);
- printf(" cseIn = %s", genES2str(block->bbCseIn ));
+ printf(" cseIn = %s", genES2str(block->bbCseIn));
printf(" cseOut = %s", genES2str(block->bbCseOut));
printf("\n");
}
@@ -885,7 +921,7 @@ void Compiler::optValnumCSE_DataFlow()
* CSE whether the CSE is a definition (if the CSE was not available)
* or if the CSE is a use (if the CSE was previously made available)
* The implementation iterates of all blocks setting 'available_cses'
- * to the CSEs that are available at input to the block.
+ * to the CSEs that are available at input to the block.
* When a CSE expression is encountered it is classified as either
* as a definition (if the CSE is not in the 'available_cses' set) or
* as a use (if the CSE is in the 'available_cses' set). If the CSE
@@ -893,7 +929,7 @@ void Compiler::optValnumCSE_DataFlow()
* In the Value Number based CSEs we do not need to have kill sets
*/
-void Compiler::optValnumCSE_Availablity()
+void Compiler::optValnumCSE_Availablity()
{
#ifdef DEBUG
if (verbose)
@@ -901,16 +937,16 @@ void Compiler::optValnumCSE_Availablity()
printf("Labeling the CSEs with Use/Def information\n");
}
#endif
- for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
- GenTreePtr tree;
+ GenTreePtr stmt;
+ GenTreePtr tree;
/* Make the block publicly available */
compCurBB = block;
- EXPSET_TP available_cses = block->bbCseIn;
+ EXPSET_TP available_cses = block->bbCseIn;
optCSEweight = block->getBBWeight(this);
@@ -923,15 +959,15 @@ void Compiler::optValnumCSE_Availablity()
/* We walk the tree in the forwards direction (bottom up) */
for (tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
- if (IS_CSE_INDEX(tree->gtCSEnum))
+ if (IS_CSE_INDEX(tree->gtCSEnum))
{
- EXPSET_TP mask = genCSEnum2bit(tree->gtCSEnum);
- CSEdsc * desc = optCSEfindDsc(tree->gtCSEnum);
- unsigned stmw = block->getBBWeight(this);
+ EXPSET_TP mask = genCSEnum2bit(tree->gtCSEnum);
+ CSEdsc* desc = optCSEfindDsc(tree->gtCSEnum);
+ unsigned stmw = block->getBBWeight(this);
/* Is this expression available here? */
- if (available_cses & mask)
+ if (available_cses & mask)
{
/* This is a CSE use */
@@ -951,23 +987,21 @@ void Compiler::optValnumCSE_Availablity()
desc->csdDefCount += 1;
desc->csdDefWtCnt += stmw;
-
+
/* Mark the node as a CSE definition */
-
+
tree->gtCSEnum = TO_CSE_DEF(tree->gtCSEnum);
-
+
/* This CSE will be available after this def */
-
- available_cses |= mask;
+ available_cses |= mask;
}
#ifdef DEBUG
if (verbose && IS_CSE_INDEX(tree->gtCSEnum))
{
printf("BB%02u ", block->bbNum);
printTreeID(tree);
- printf(" %s of CSE #%02u [weight=%s]\n",
- IS_CSE_USE(tree->gtCSEnum) ? "Use" : "Def",
+ printf(" %s of CSE #%02u [weight=%s]\n", IS_CSE_USE(tree->gtCSEnum) ? "Use" : "Def",
GET_CSE_INDEX(tree->gtCSEnum), refCntWtd2str(stmw));
}
#endif
@@ -977,36 +1011,38 @@ void Compiler::optValnumCSE_Availablity()
}
}
-// The following class handles the CSE heuristics
-// we use a complex set of heuristic rules
-// to determine if it is likely to be profitable to perform this CSE
+// The following class handles the CSE heuristics
+// we use a complex set of heuristic rules
+// to determine if it is likely to be profitable to perform this CSE
//
class CSE_Heuristic
{
- Compiler* m_pCompiler;
- unsigned m_addCSEcount;
-
- unsigned aggressiveRefCnt;
- unsigned moderateRefCnt;
- unsigned enregCount; // count of the number of enregisterable variables
- bool largeFrame;
- bool hugeFrame;
- Compiler::codeOptimize codeOptKind;
- Compiler::CSEdsc** sortTab;
- size_t sortSiz;
+ Compiler* m_pCompiler;
+ unsigned m_addCSEcount;
+
+ unsigned aggressiveRefCnt;
+ unsigned moderateRefCnt;
+ unsigned enregCount; // count of the number of enregisterable variables
+ bool largeFrame;
+ bool hugeFrame;
+ Compiler::codeOptimize codeOptKind;
+ Compiler::CSEdsc** sortTab;
+ size_t sortSiz;
#ifdef DEBUG
- CLRRandom m_cseRNG;
- unsigned m_bias;
+ CLRRandom m_cseRNG;
+ unsigned m_bias;
#endif
public:
- CSE_Heuristic(Compiler* pCompiler)
- : m_pCompiler(pCompiler)
+ CSE_Heuristic(Compiler* pCompiler) : m_pCompiler(pCompiler)
{
codeOptKind = m_pCompiler->compCodeOpt();
}
- Compiler::codeOptimize CodeOptKind() { return codeOptKind; }
+ Compiler::codeOptimize CodeOptKind()
+ {
+ return codeOptKind;
+ }
// Perform the Initialization step for our CSE Heuristics
// determine the various cut off values to use for
@@ -1016,9 +1052,9 @@ public:
//
void Initialize()
{
- m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */
+ m_addCSEcount = 0; /* Count of the number of LclVars for CSEs that we added */
- // Record the weighted ref count of the last "for sure" callee saved LclVar
+ // Record the weighted ref count of the last "for sure" callee saved LclVar
aggressiveRefCnt = 0;
moderateRefCnt = 0;
enregCount = 0;
@@ -1034,23 +1070,25 @@ public:
}
#endif
- unsigned frameSize = 0;
- unsigned regAvailEstimate = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2) + 1);
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned frameSize = 0;
+ unsigned regAvailEstimate = ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2) + 1);
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = m_pCompiler->lvaTable;
- lclNum < m_pCompiler->lvaCount;
- lclNum++, varDsc++)
+ for (lclNum = 0, varDsc = m_pCompiler->lvaTable; lclNum < m_pCompiler->lvaCount; lclNum++, varDsc++)
{
if (varDsc->lvRefCnt == 0)
+ {
continue;
+ }
- bool onStack = (regAvailEstimate == 0); // true when it is likely that this LclVar will have a stack home
+ bool onStack = (regAvailEstimate == 0); // true when it is likely that this LclVar will have a stack home
// Some LclVars always have stack homes
if ((varDsc->lvDoNotEnregister) || (varDsc->lvType == TYP_LCLBLK))
+ {
onStack = true;
+ }
#ifdef _TARGET_X86_
// Treat floating point and 64 bit integers as always on the stack
@@ -1064,13 +1102,13 @@ public:
}
else
{
- // For the purposes of estimating the frameSize we
+ // For the purposes of estimating the frameSize we
// will consider this LclVar as being enregistered.
// Now we reduce the remaining regAvailEstimate by
// an appropriate amount.
if (varDsc->lvRefCnt <= 2)
{
- // a single use single def LclVar only uses 1
+ // a single use single def LclVar only uses 1
regAvailEstimate -= 1;
}
else
@@ -1091,10 +1129,10 @@ public:
if (frameSize > 0x080)
{
// We likely have a large stack frame.
- // Thus we might need to use large displacements when loading or storing
+ // Thus we might need to use large displacements when loading or storing
// to CSE LclVars that are not enregistered
largeFrame = true;
- break; // early out, we don't need to keep increasing frameSize
+ break; // early out, we don't need to keep increasing frameSize
}
#else // _TARGET_ARM_
if (frameSize > 0x0400)
@@ -1112,11 +1150,13 @@ public:
unsigned sortNum = 0;
while (sortNum < m_pCompiler->lvaTrackedCount)
{
- LclVarDsc* varDsc = m_pCompiler->lvaRefSorted[sortNum++];
- var_types varTyp = varDsc->TypeGet();
+ LclVarDsc* varDsc = m_pCompiler->lvaRefSorted[sortNum++];
+ var_types varTyp = varDsc->TypeGet();
- if (varDsc->lvDoNotEnregister)
+ if (varDsc->lvDoNotEnregister)
+ {
continue;
+ }
if (!varTypeIsFloating(varTyp))
{
@@ -1126,22 +1166,30 @@ public:
{
varTyp = TYP_STRUCT;
}
- enregCount += genTypeStSz(varTyp);
+ enregCount += genTypeStSz(varTyp);
}
- if ((aggressiveRefCnt == 0) && (enregCount > (CNT_CALLEE_ENREG*3/2)))
+ if ((aggressiveRefCnt == 0) && (enregCount > (CNT_CALLEE_ENREG * 3 / 2)))
{
if (CodeOptKind() == Compiler::SMALL_CODE)
- aggressiveRefCnt = varDsc->lvRefCnt+BB_UNITY_WEIGHT;
+ {
+ aggressiveRefCnt = varDsc->lvRefCnt + BB_UNITY_WEIGHT;
+ }
else
- aggressiveRefCnt = varDsc->lvRefCntWtd+BB_UNITY_WEIGHT;
+ {
+ aggressiveRefCnt = varDsc->lvRefCntWtd + BB_UNITY_WEIGHT;
+ }
}
if ((moderateRefCnt == 0) && (enregCount > ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2))))
{
if (CodeOptKind() == Compiler::SMALL_CODE)
+ {
moderateRefCnt = varDsc->lvRefCnt;
+ }
else
+ {
moderateRefCnt = varDsc->lvRefCntWtd;
+ }
}
}
unsigned mult = 3;
@@ -1152,10 +1200,10 @@ public:
}
aggressiveRefCnt = max(BB_UNITY_WEIGHT * mult, aggressiveRefCnt);
- moderateRefCnt = max((BB_UNITY_WEIGHT * mult) / 2, moderateRefCnt);
-
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+ moderateRefCnt = max((BB_UNITY_WEIGHT * mult) / 2, moderateRefCnt);
+
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("\n");
printf("Aggressive CSE Promotion cutoff is %u\n", aggressiveRefCnt);
@@ -1164,49 +1212,53 @@ public:
printf("We have a %s frame\n", hugeFrame ? "huge" : (largeFrame ? "large" : "small"));
}
#endif
-
}
void SortCandidates()
{
- /* Create an expression table sorted by decreasing cost */
+ /* Create an expression table sorted by decreasing cost */
sortTab = new (m_pCompiler, CMK_CSE) Compiler::CSEdsc*[m_pCompiler->optCSECandidateCount];
sortSiz = m_pCompiler->optCSECandidateCount * sizeof(*sortTab);
memcpy(sortTab, m_pCompiler->optCSEtab, sortSiz);
if (CodeOptKind() == Compiler::SMALL_CODE)
+ {
qsort(sortTab, m_pCompiler->optCSECandidateCount, sizeof(*sortTab), m_pCompiler->optCSEcostCmpSz);
+ }
else
+ {
qsort(sortTab, m_pCompiler->optCSECandidateCount, sizeof(*sortTab), m_pCompiler->optCSEcostCmpEx);
+ }
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("\nSorted CSE candidates:\n");
/* Print out the CSE candidates */
for (unsigned cnt = 0; cnt < m_pCompiler->optCSECandidateCount; cnt++)
{
- Compiler::CSEdsc* dsc = sortTab[cnt];
- GenTreePtr expr = dsc->csdTree;
+ Compiler::CSEdsc* dsc = sortTab[cnt];
+ GenTreePtr expr = dsc->csdTree;
- unsigned def;
- unsigned use;
+ unsigned def;
+ unsigned use;
if (CodeOptKind() == Compiler::SMALL_CODE)
{
- def = dsc->csdDefCount; // def count
- use = dsc->csdUseCount; // use count (excluding the implicit uses at defs)
+ def = dsc->csdDefCount; // def count
+ use = dsc->csdUseCount; // use count (excluding the implicit uses at defs)
}
else
{
- def = dsc->csdDefWtCnt; // weighted def count
- use = dsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
+ def = dsc->csdDefWtCnt; // weighted def count
+ use = dsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
}
- printf("CSE #%02u,cseMask=%s,useCnt=%d: [def=%3u, use=%3u", dsc->csdIndex, genES2str(genCSEnum2bit(dsc->csdIndex)), dsc->csdUseCount, def, use);
+ printf("CSE #%02u,cseMask=%s,useCnt=%d: [def=%3u, use=%3u", dsc->csdIndex,
+ genES2str(genCSEnum2bit(dsc->csdIndex)), dsc->csdUseCount, def, use);
printf("] :: ");
- m_pCompiler->gtDispTree(expr, 0, nullptr, true);
+ m_pCompiler->gtDispTree(expr, nullptr, nullptr, true);
}
printf("\n");
}
@@ -1221,51 +1273,73 @@ public:
//
class CSE_Candidate
{
- CSE_Heuristic* m_context;
- Compiler::CSEdsc* m_CseDsc;
+ CSE_Heuristic* m_context;
+ Compiler::CSEdsc* m_CseDsc;
- unsigned m_cseIndex;
+ unsigned m_cseIndex;
- unsigned m_defCount;
- unsigned m_useCount;
+ unsigned m_defCount;
+ unsigned m_useCount;
- unsigned m_Cost;
- unsigned m_Size;
+ unsigned m_Cost;
+ unsigned m_Size;
public:
- CSE_Candidate(CSE_Heuristic* context, Compiler::CSEdsc* cseDsc)
- : m_context(context)
- , m_CseDsc(cseDsc)
+ CSE_Candidate(CSE_Heuristic* context, Compiler::CSEdsc* cseDsc) : m_context(context), m_CseDsc(cseDsc)
{
m_cseIndex = m_CseDsc->csdIndex;
}
- Compiler::CSEdsc* CseDsc() { return m_CseDsc; }
- unsigned CseIndex() { return m_cseIndex; }
- unsigned DefCount() { return m_defCount; }
- unsigned UseCount() { return m_useCount; }
+ Compiler::CSEdsc* CseDsc()
+ {
+ return m_CseDsc;
+ }
+ unsigned CseIndex()
+ {
+ return m_cseIndex;
+ }
+ unsigned DefCount()
+ {
+ return m_defCount;
+ }
+ unsigned UseCount()
+ {
+ return m_useCount;
+ }
// TODO-CQ: With ValNum CSE's the Expr and its cost can vary.
- GenTreePtr Expr() { return m_CseDsc->csdTree; }
- unsigned Cost() { return m_Cost; }
- unsigned Size() { return m_Size; }
+ GenTreePtr Expr()
+ {
+ return m_CseDsc->csdTree;
+ }
+ unsigned Cost()
+ {
+ return m_Cost;
+ }
+ unsigned Size()
+ {
+ return m_Size;
+ }
- bool LiveAcrossCall() { return (m_CseDsc->csdLiveAcrossCall != 0); }
+ bool LiveAcrossCall()
+ {
+ return (m_CseDsc->csdLiveAcrossCall != 0);
+ }
void InitializeCounts()
{
if (m_context->CodeOptKind() == Compiler::SMALL_CODE)
{
- m_Cost = Expr()->gtCostSz; // the estimated code size
- m_Size = Expr()->gtCostSz; // always the gtCostSz
- m_defCount = m_CseDsc->csdDefCount; // def count
- m_useCount = m_CseDsc->csdUseCount; // use count (excluding the implicit uses at defs)
+ m_Cost = Expr()->gtCostSz; // the estimated code size
+ m_Size = Expr()->gtCostSz; // always the gtCostSz
+ m_defCount = m_CseDsc->csdDefCount; // def count
+ m_useCount = m_CseDsc->csdUseCount; // use count (excluding the implicit uses at defs)
}
else
{
- m_Cost = Expr()->gtCostEx; // the estimated execution cost
- m_Size = Expr()->gtCostSz; // always the gtCostSz
- m_defCount = m_CseDsc->csdDefWtCnt; // weighted def count
- m_useCount = m_CseDsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
+ m_Cost = Expr()->gtCostEx; // the estimated execution cost
+ m_Size = Expr()->gtCostSz; // always the gtCostSz
+ m_defCount = m_CseDsc->csdDefWtCnt; // weighted def count
+ m_useCount = m_CseDsc->csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
}
}
};
@@ -1285,9 +1359,9 @@ public:
// Operation:
// A debug stress only method that returns "1" with probability (P)
// defined by:
- //
+ //
// P = (COMPlus_JitStressBiasedCSE / 100) (or)
- // P = (random(100) / 100) when COMPlus_JitStress is specified and
+ // P = (random(100) / 100) when COMPlus_JitStress is specified and
// COMPlus_JitStressBiasedCSE is unspecified.
//
// When specified, the bias is reinterpreted as a decimal number between 0
@@ -1309,8 +1383,7 @@ public:
}
// Obtain the bias value and reinterpret as decimal.
- unsigned bias = ReinterpretHexAsDecimal(
- JitConfig.JitStressBiasedCSE());
+ unsigned bias = ReinterpretHexAsDecimal(JitConfig.JitStressBiasedCSE());
// Invalid value, check if JitStress is ON.
if (bias > 100)
@@ -1327,7 +1400,7 @@ public:
// Generate a number between (0, 99) and if the generated
// number is smaller than bias, then perform CSE.
unsigned gen = m_cseRNG.Next(100);
- int ret = (gen < bias) ? 1 : -1;
+ int ret = (gen < bias) ? 1 : -1;
if (m_pCompiler->verbose)
{
@@ -1353,7 +1426,7 @@ public:
{
bool result = false;
-#ifdef DEBUG
+#ifdef DEBUG
int stressResult = optConfigBiasedCSE();
if (stressResult != 0)
{
@@ -1367,11 +1440,11 @@ public:
}
#endif
- /*
+ /*
Our calculation is based on the following cost estimate formula
Existing costs are:
-
+
(def + use) * cost
If we introduce a CSE temp are each definition and
@@ -1381,8 +1454,8 @@ public:
We must estimate the values to use for cse-def-cost and cse-use-cost
- If we are able to enregister the CSE then the cse-use-cost is one
- and cse-def-cost is either zero or one. Zero in the case where
+ If we are able to enregister the CSE then the cse-use-cost is one
+ and cse-def-cost is either zero or one. Zero in the case where
we needed to evaluate the def into a register and we can use that
register as the CSE temp as well.
@@ -1393,10 +1466,10 @@ public:
for both cse-def-cost and cse-use-cost and then we never introduce
a CSE that could pessimize the execution time of the method.
- If we want to be more moderate we use (IND_COST_EX + 1) / 2 as the
+ If we want to be more moderate we use (IND_COST_EX + 1) / 2 as the
values for both cse-def-cost and cse-use-cost.
- If we want to be aggressive we use 1 as the values for both
+ If we want to be aggressive we use 1 as the values for both
cse-def-cost and cse-use-cost.
If we believe that the CSE very valuable in terms of weighted ref counts
@@ -1413,11 +1486,11 @@ public:
unsigned cse_def_cost;
unsigned cse_use_cost;
-
- unsigned no_cse_cost = 0;
- unsigned yes_cse_cost = 0;
+
+ unsigned no_cse_cost = 0;
+ unsigned yes_cse_cost = 0;
unsigned extra_yes_cost = 0;
- unsigned extra_no_cost = 0;
+ unsigned extra_no_cost = 0;
// The 'cseRefCnt' is the RefCnt that we will have if we promote this CSE into a new LclVar
// Each CSE Def will contain two Refs and each CSE Use wil have one Ref of this new LclVar
@@ -1427,121 +1500,123 @@ public:
{
if (cseRefCnt >= aggressiveRefCnt)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Aggressive CSE Promotion (%u >= %u)\n", cseRefCnt, aggressiveRefCnt);
}
#endif
cse_def_cost = 1;
- cse_use_cost = 1;
+ cse_use_cost = 1;
if (candidate->LiveAcrossCall() != 0)
{
if (largeFrame)
{
cse_def_cost++;
- cse_use_cost++;
+ cse_use_cost++;
}
if (hugeFrame)
{
cse_def_cost++;
- cse_use_cost++;
+ cse_use_cost++;
}
}
}
else if (largeFrame)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Codesize CSE Promotion (large frame)\n");
}
#endif
#ifdef _TARGET_XARCH_
/* The following formula is good choice when optimizing CSE for SMALL_CODE */
- cse_def_cost = 6; // mov [EBP-0x00001FC],reg
- cse_use_cost = 5; // [EBP-0x00001FC]
-#else // _TARGET_ARM_
+ cse_def_cost = 6; // mov [EBP-0x00001FC],reg
+ cse_use_cost = 5; // [EBP-0x00001FC]
+#else // _TARGET_ARM_
if (hugeFrame)
{
- cse_def_cost = 12; // movw/movt r10 and str reg,[sp+r10]
+ cse_def_cost = 12; // movw/movt r10 and str reg,[sp+r10]
cse_use_cost = 12;
}
else
{
- cse_def_cost = 8; // movw r10 and str reg,[sp+r10]
+ cse_def_cost = 8; // movw r10 and str reg,[sp+r10]
cse_use_cost = 8;
}
#endif
}
else // small frame
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Codesize CSE Promotion (small frame)\n");
}
#endif
#ifdef _TARGET_XARCH_
/* The following formula is good choice when optimizing CSE for SMALL_CODE */
- cse_def_cost = 3; // mov [EBP-1C],reg
- cse_use_cost = 2; // [EBP-1C]
-#else // _TARGET_ARM_
- cse_def_cost = 2; // str reg,[sp+0x9c]
- cse_use_cost = 2; // ldr reg,[sp+0x9c]
+ cse_def_cost = 3; // mov [EBP-1C],reg
+ cse_use_cost = 2; // [EBP-1C]
+#else // _TARGET_ARM_
+ cse_def_cost = 2; // str reg,[sp+0x9c]
+ cse_use_cost = 2; // ldr reg,[sp+0x9c]
#endif
}
}
- else // not SMALL_CODE ...
+ else // not SMALL_CODE ...
{
if (cseRefCnt >= aggressiveRefCnt)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Aggressive CSE Promotion (%u >= %u)\n", cseRefCnt, aggressiveRefCnt);
}
#endif
cse_def_cost = 1;
- cse_use_cost = 1;
+ cse_use_cost = 1;
}
else if (cseRefCnt >= moderateRefCnt)
{
if (candidate->LiveAcrossCall() == 0)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
- printf("Moderate CSE Promotion (CSE never live at call) (%u >= %u)\n", cseRefCnt, moderateRefCnt);
+ printf("Moderate CSE Promotion (CSE never live at call) (%u >= %u)\n", cseRefCnt,
+ moderateRefCnt);
}
#endif
cse_def_cost = 2;
- cse_use_cost = 1;
+ cse_use_cost = 1;
}
else // candidate is live across call
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Moderate CSE Promotion (%u >= %u)\n", cseRefCnt, moderateRefCnt);
}
#endif
- cse_def_cost = 2;
- cse_use_cost = 2;
+ cse_def_cost = 2;
+ cse_use_cost = 2;
extra_yes_cost = BB_UNITY_WEIGHT * 2; // Extra cost in case we have to spill/restore a caller
// saved register
}
}
else // Conservative CSE promotion
- {
+ {
if (candidate->LiveAcrossCall() == 0)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
- printf("Conservative CSE Promotion (CSE never live at call) (%u < %u)\n", cseRefCnt, moderateRefCnt);
+ printf("Conservative CSE Promotion (CSE never live at call) (%u < %u)\n", cseRefCnt,
+ moderateRefCnt);
}
#endif
cse_def_cost = 2;
@@ -1549,16 +1624,16 @@ public:
}
else // candidate is live across call
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Conservative CSE Promotion (%u < %u)\n", cseRefCnt, moderateRefCnt);
}
#endif
- cse_def_cost = 3;
- cse_use_cost = 3;
- extra_yes_cost = BB_UNITY_WEIGHT * 4; // Extra cost in case we have to spill/restore a caller
- // saved register
+ cse_def_cost = 3;
+ cse_use_cost = 3;
+ extra_yes_cost = BB_UNITY_WEIGHT * 4; // Extra cost in case we have to spill/restore a caller
+ // saved register
}
// If we have maxed out lvaTrackedCount then this CSE may end up as an untracked variable
@@ -1572,12 +1647,12 @@ public:
if (largeFrame)
{
cse_def_cost++;
- cse_use_cost++;
+ cse_use_cost++;
}
if (hugeFrame)
{
cse_def_cost++;
- cse_use_cost++;
+ cse_use_cost++;
}
}
@@ -1592,7 +1667,7 @@ public:
/* no_cse_cost is the cost estimate when we decide not to make a CSE */
/* yes_cse_cost is the cost estimate when we decide to make a CSE */
-
+
no_cse_cost = candidate->UseCount() * candidate->Cost();
yes_cse_cost = (candidate->DefCount() * cse_def_cost) + (candidate->UseCount() * cse_use_cost);
@@ -1602,39 +1677,41 @@ public:
yes_cse_cost *= 2;
}
#endif
- no_cse_cost += extra_no_cost;
+ no_cse_cost += extra_no_cost;
yes_cse_cost += extra_yes_cost;
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
- printf("cseRefCnt=%d, aggressiveRefCnt=%d, moderateRefCnt=%d\n", cseRefCnt, aggressiveRefCnt, moderateRefCnt);
- printf("defCnt=%d, useCnt=%d, cost=%d, size=%d\n", candidate->DefCount(), candidate->UseCount(), candidate->Cost(), candidate->Size());
- printf("def_cost=%d, use_cost=%d, extra_no_cost=%d, extra_yes_cost=%d\n", cse_def_cost, cse_use_cost, extra_no_cost, extra_yes_cost);
-
- printf("CSE cost savings check (%u >= %u) %s\n",
- no_cse_cost, yes_cse_cost,
- (no_cse_cost >= yes_cse_cost) ? "passes" : "fails");
+ printf("cseRefCnt=%d, aggressiveRefCnt=%d, moderateRefCnt=%d\n", cseRefCnt, aggressiveRefCnt,
+ moderateRefCnt);
+ printf("defCnt=%d, useCnt=%d, cost=%d, size=%d\n", candidate->DefCount(), candidate->UseCount(),
+ candidate->Cost(), candidate->Size());
+ printf("def_cost=%d, use_cost=%d, extra_no_cost=%d, extra_yes_cost=%d\n", cse_def_cost, cse_use_cost,
+ extra_no_cost, extra_yes_cost);
+
+ printf("CSE cost savings check (%u >= %u) %s\n", no_cse_cost, yes_cse_cost,
+ (no_cse_cost >= yes_cse_cost) ? "passes" : "fails");
}
#endif
// Should we make this candidate into a CSE?
// Is the yes cost less than the no cost
//
- if (yes_cse_cost <= no_cse_cost)
+ if (yes_cse_cost <= no_cse_cost)
{
- result = true; // Yes make this a CSE
+ result = true; // Yes make this a CSE
}
else
{
/* In stress mode we will make some extra CSEs */
if (no_cse_cost > 0)
{
- int percentage = (no_cse_cost * 100) / yes_cse_cost;
+ int percentage = (no_cse_cost * 100) / yes_cse_cost;
if (m_pCompiler->compStressCompile(Compiler::STRESS_MAKE_CSE, percentage))
{
- result = true; // Yes make this a CSE
+ result = true; // Yes make this a CSE
}
}
}
@@ -1648,7 +1725,7 @@ public:
// and will replace all of the CSE uses with reads of the "cse0" LclVar
//
void PerformCSE(CSE_Candidate* successfulCandidate)
- {
+ {
unsigned cseRefCnt = (successfulCandidate->DefCount() * 2) + successfulCandidate->UseCount();
if (successfulCandidate->LiveAcrossCall() != 0)
@@ -1664,45 +1741,51 @@ public:
#endif
if (cseRefCnt > aggressiveRefCnt)
+ {
aggressiveRefCnt += incr;
+ }
if (cseRefCnt > moderateRefCnt)
- moderateRefCnt += (incr/2);
+ {
+ moderateRefCnt += (incr / 2);
+ }
}
/* Introduce a new temp for the CSE */
// we will create a long lifetime temp for the new cse LclVar
- unsigned cseLclVarNum = m_pCompiler->lvaGrabTemp(false DEBUGARG("ValNumCSE"));
- var_types cseLclVarTyp = genActualType(successfulCandidate->Expr()->TypeGet());
+ unsigned cseLclVarNum = m_pCompiler->lvaGrabTemp(false DEBUGARG("ValNumCSE"));
+ var_types cseLclVarTyp = genActualType(successfulCandidate->Expr()->TypeGet());
if (varTypeIsStruct(cseLclVarTyp))
{
m_pCompiler->lvaSetStruct(cseLclVarNum, m_pCompiler->gtGetStructHandle(successfulCandidate->Expr()), false);
}
- m_pCompiler->lvaTable[cseLclVarNum].lvType = cseLclVarTyp;
+ m_pCompiler->lvaTable[cseLclVarNum].lvType = cseLclVarTyp;
m_pCompiler->lvaTable[cseLclVarNum].lvIsCSE = true;
- m_addCSEcount++; // Record that we created a new LclVar for use as a CSE temp
+ m_addCSEcount++; // Record that we created a new LclVar for use as a CSE temp
m_pCompiler->optCSEcount++;
/* Walk all references to this CSE, adding an assignment
to the CSE temp to all defs and changing all refs to
a simple use of the CSE temp.
-
+
We also unmark nested CSE's for all uses.
*/
- Compiler::treeStmtLstPtr lst; lst = successfulCandidate->CseDsc()->csdTreeList; noway_assert(lst);
+ Compiler::treeStmtLstPtr lst;
+ lst = successfulCandidate->CseDsc()->csdTreeList;
+ noway_assert(lst);
#define QQQ_CHECK_CSE_VNS 0
#if QQQ_CHECK_CSE_VNS
assert(lst != NULL);
ValueNum firstVN = lst->tslTree->gtVN;
- lst = lst->tslNext;
- bool allSame = true;
+ lst = lst->tslNext;
+ bool allSame = true;
while (lst != NULL)
{
- if (IS_CSE_INDEX(lst->tslTree->gtCSEnum))
+ if (IS_CSE_INDEX(lst->tslTree->gtCSEnum))
{
if (lst->tslTree->gtVN != firstVN)
{
@@ -1714,14 +1797,16 @@ public:
}
if (!allSame)
{
- lst = dsc->csdTreeList;
+ lst = dsc->csdTreeList;
GenTreePtr firstTree = lst->tslTree;
printf("In %s, CSE (oper = %s, type = %s) has differing VNs: ", info.compFullName,
- GenTree::NodeName(firstTree->OperGet()), varTypeName(firstTree->TypeGet()));
- while (lst != NULL) {
- if (IS_CSE_INDEX(lst->tslTree->gtCSEnum))
+ GenTree::NodeName(firstTree->OperGet()), varTypeName(firstTree->TypeGet()));
+ while (lst != NULL)
+ {
+ if (IS_CSE_INDEX(lst->tslTree->gtCSEnum))
{
- printf("0x%x(%s,%d) ", lst->tslTree, IS_CSE_USE(lst->tslTree->gtCSEnum) ? "u" : "d", lst->tslTree->gtVN);
+ printf("0x%x(%s,%d) ", lst->tslTree, IS_CSE_USE(lst->tslTree->gtCSEnum) ? "u" : "d",
+ lst->tslTree->gtVN);
}
lst = lst->tslNext;
}
@@ -1729,24 +1814,27 @@ public:
}
lst = dsc->csdTreeList;
#endif
-
+
do
{
/* Process the next node in the list */
- GenTreePtr exp = lst->tslTree;
- GenTreePtr stm = lst->tslStmt; noway_assert(stm->gtOper == GT_STMT);
- BasicBlock * blk = lst->tslBlock;
+ GenTreePtr exp = lst->tslTree;
+ GenTreePtr stm = lst->tslStmt;
+ noway_assert(stm->gtOper == GT_STMT);
+ BasicBlock* blk = lst->tslBlock;
/* Advance to the next node in the list */
lst = lst->tslNext;
- // Assert if we used DEBUG_DESTROY_NODE on this CSE exp
+ // Assert if we used DEBUG_DESTROY_NODE on this CSE exp
assert(exp->gtOper != GT_COUNT);
-
+
/* Ignore the node if it's not been marked as a CSE */
if (!IS_CSE_INDEX(exp->gtCSEnum))
+ {
continue;
-
+ }
+
/* Make sure we update the weighted ref count correctly */
m_pCompiler->optCSEweight = blk->getBBWeight(m_pCompiler);
@@ -1757,17 +1845,17 @@ public:
// This will contain the replacement tree for exp
// It will either be the CSE def or CSE ref
//
- GenTreePtr cse = nullptr;
- bool isDef;
- FieldSeqNode* fldSeq = nullptr;
- bool hasZeroMapAnnotation = m_pCompiler->GetZeroOffsetFieldMap()->Lookup(exp, &fldSeq);
-
- if (IS_CSE_USE(exp->gtCSEnum))
+ GenTreePtr cse = nullptr;
+ bool isDef;
+ FieldSeqNode* fldSeq = nullptr;
+ bool hasZeroMapAnnotation = m_pCompiler->GetZeroOffsetFieldMap()->Lookup(exp, &fldSeq);
+
+ if (IS_CSE_USE(exp->gtCSEnum))
{
/* This is a use of the CSE */
isDef = false;
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("\nCSE #%02u use at ", exp->gtCSEnum);
Compiler::printTreeID(exp);
@@ -1776,20 +1864,20 @@ public:
#endif // DEBUG
/* check for and collect any SIDE_EFFECTS */
- GenTreePtr sideEffList = NULL;
+ GenTreePtr sideEffList = nullptr;
if (exp->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS)
{
- // Extract any side effects from exp
+ // Extract any side effects from exp
//
m_pCompiler->gtExtractSideEffList(exp, &sideEffList, GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE);
}
- // We will replace the CSE ref with a new tree
+ // We will replace the CSE ref with a new tree
// this is typically just a simple use of the new CSE LclVar
//
- cse = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp);
- cse->gtVNPair = exp->gtVNPair; // assign the proper Value Numbers
+ cse = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp);
+ cse->gtVNPair = exp->gtVNPair; // assign the proper Value Numbers
#ifdef DEBUG
cse->gtDebugFlags |= GTF_DEBUG_VAR_CSE_REF;
#endif // DEBUG
@@ -1799,11 +1887,12 @@ public:
if (sideEffList)
{
noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT);
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("\nThe CSE has side effects! Extracting side effects...\n");
- m_pCompiler->gtDispTree(sideEffList); printf("\n");
+ m_pCompiler->gtDispTree(sideEffList);
+ printf("\n");
}
#endif
@@ -1814,19 +1903,19 @@ public:
while ((curSideEff->OperGet() == GT_COMMA) || (curSideEff->OperGet() == GT_ASG))
{
- GenTreePtr op1 = curSideEff->gtOp.gtOp1;
- GenTreePtr op2 = curSideEff->gtOp.gtOp2;
+ GenTreePtr op1 = curSideEff->gtOp.gtOp1;
+ GenTreePtr op2 = curSideEff->gtOp.gtOp2;
- ValueNumPair op1vnp;
- ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op1vnp;
+ ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(op1->gtVNPair, &op1vnp, &op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
- curSideEff = op2;
+ curSideEff = op2;
}
// We may have inserted a narrowing cast during a previous remorph
- // and it will not have a value number.
+ // and it will not have a value number.
if ((curSideEff->OperGet() == GT_CAST) && !curSideEff->gtVNPair.BothDefined())
{
// The inserted cast will have no exceptional effects
@@ -1835,8 +1924,8 @@ public:
curSideEff = curSideEff->gtOp.gtOp1;
}
- ValueNumPair op2vnp;
- ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op2vnp;
+ ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(curSideEff->gtVNPair, &op2vnp, &op2Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
@@ -1845,16 +1934,16 @@ public:
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
/* Create a comma node with the sideEffList as op1 */
- cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, sideEffList, cseVal);
+ cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, sideEffList, cseVal);
cse->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
- exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field
+ exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field
/* Unmark any nested CSE's in the sub-operands */
- // But we do need to communicate the side effect list to optUnmarkCSEs
- // as any part of the 'exp' tree that is in the sideEffList is preserved
+ // But we do need to communicate the side effect list to optUnmarkCSEs
+ // as any part of the 'exp' tree that is in the sideEffList is preserved
// and is not deleted and does not have its ref counts decremented
//
m_pCompiler->optValnumCSE_UnmarkCSEs(exp, sideEffList);
@@ -1863,33 +1952,32 @@ public:
{
/* This is a def of the CSE */
isDef = true;
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("\nCSE #%02u def at ", GET_CSE_INDEX(exp->gtCSEnum));
Compiler::printTreeID(exp);
- printf(" replaced in BB%02u with def of V%02u\n",
- blk->bbNum, cseLclVarNum);
+ printf(" replaced in BB%02u with def of V%02u\n", blk->bbNum, cseLclVarNum);
}
#endif // DEBUG
- exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field
+ exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field
- GenTreePtr val = exp;
+ GenTreePtr val = exp;
/* Create an assignment of the value to the temp */
- GenTreePtr asg = m_pCompiler->gtNewTempAssign(cseLclVarNum, val);
+ GenTreePtr asg = m_pCompiler->gtNewTempAssign(cseLclVarNum, val);
// assign the proper Value Numbers
- asg->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); // The GT_ASG node itself is $VN.Void
- asg->gtOp.gtOp1->gtVNPair = val->gtVNPair; // The dest op is the same as 'val'
+ asg->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); // The GT_ASG node itself is $VN.Void
+ asg->gtOp.gtOp1->gtVNPair = val->gtVNPair; // The dest op is the same as 'val'
noway_assert(asg->gtOp.gtOp1->gtOper == GT_LCL_VAR);
- noway_assert(asg->gtOp.gtOp2 == val);
-
+ noway_assert(asg->gtOp.gtOp2 == val);
+
/* Create a reference to the CSE temp */
- GenTreePtr ref = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp);
- ref->gtVNPair = val->gtVNPair; // The new 'ref' is the same as 'val'
+ GenTreePtr ref = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp);
+ ref->gtVNPair = val->gtVNPair; // The new 'ref' is the same as 'val'
// If it has a zero-offset field seq, copy annotation to the ref
if (hasZeroMapAnnotation)
@@ -1898,10 +1986,10 @@ public:
}
/* Create a comma node for the CSE assignment */
- cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, asg, ref);
- cse->gtVNPair = ref->gtVNPair; // The comma's value is the same as 'val'
- // as the assignment to the CSE LclVar
- // cannot add any new exceptions
+ cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, asg, ref);
+ cse->gtVNPair = ref->gtVNPair; // The comma's value is the same as 'val'
+ // as the assignment to the CSE LclVar
+ // cannot add any new exceptions
}
// Increment ref count for the CSE ref
@@ -1909,17 +1997,17 @@ public:
if (isDef)
{
- // Also increment ref count for the CSE assignment
+ // Also increment ref count for the CSE assignment
m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(blk->getBBWeight(m_pCompiler), m_pCompiler);
}
- // Walk the statement 'stm' and find the pointer
+ // Walk the statement 'stm' and find the pointer
// in the tree is pointing to 'exp'
//
- GenTreePtr * link = m_pCompiler->gtFindLink(stm, exp);
+ GenTreePtr* link = m_pCompiler->gtFindLink(stm, exp);
-#ifdef DEBUG
- if (link == NULL)
+#ifdef DEBUG
+ if (link == nullptr)
{
printf("\ngtFindLink failed: stm=");
Compiler::printTreeID(stm);
@@ -1927,9 +2015,11 @@ public:
Compiler::printTreeID(exp);
printf("\n");
printf("stm =");
- m_pCompiler->gtDispTree(stm); printf("\n");
+ m_pCompiler->gtDispTree(stm);
+ printf("\n");
printf("exp =");
- m_pCompiler->gtDispTree(exp); printf("\n");
+ m_pCompiler->gtDispTree(exp);
+ printf("\n");
}
#endif // DEBUG
@@ -1950,29 +2040,28 @@ public:
/* re-morph the statement */
m_pCompiler->fgMorphBlockStmt(blk, stm DEBUGARG("optValnumCSE"));
- }
- while (lst != nullptr);
+ } while (lst != nullptr);
}
- // Consider each of the CSE candidates and if the CSE passes
+ // Consider each of the CSE candidates and if the CSE passes
// the PromotionCheck then transform the CSE by calling PerformCSE
//
void ConsiderCandidates()
{
/* Consider each CSE candidate, in order of decreasing cost */
- unsigned cnt = m_pCompiler->optCSECandidateCount;
- Compiler::CSEdsc* * ptr = sortTab;
+ unsigned cnt = m_pCompiler->optCSECandidateCount;
+ Compiler::CSEdsc** ptr = sortTab;
for (; (cnt > 0); cnt--, ptr++)
{
- Compiler::CSEdsc* dsc = *ptr;
- CSE_Candidate candidate(this, dsc);
+ Compiler::CSEdsc* dsc = *ptr;
+ CSE_Candidate candidate(this, dsc);
candidate.InitializeCounts();
if (candidate.UseCount() == 0)
{
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
printf("Skipped CSE #%02u because use count is 0\n", candidate.CseIndex());
}
@@ -1980,31 +2069,31 @@ public:
continue;
}
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
- printf("\nConsidering CSE #%02u [def=%2u, use=%2u, cost=%2u] CSE Expression:\n",
- candidate.CseIndex(), candidate.DefCount(), candidate.UseCount(), candidate.Cost());
+ printf("\nConsidering CSE #%02u [def=%2u, use=%2u, cost=%2u] CSE Expression:\n", candidate.CseIndex(),
+ candidate.DefCount(), candidate.UseCount(), candidate.Cost());
m_pCompiler->gtDispTree(candidate.Expr());
printf("\n");
}
#endif
- if ((dsc->csdDefCount <= 0) || (dsc->csdUseCount == 0))
+ if ((dsc->csdDefCount <= 0) || (dsc->csdUseCount == 0))
{
// If we reach this point, then the CSE def was incorrectly marked or the
// block with this use is unreachable. So skip and go to the next CSE.
// Without the "continue", we'd generate bad code in retail.
// Commented out a noway_assert(false) here due to bug: 3290124.
- // The problem is if there is sub-graph that is not reachable from the
+ // The problem is if there is sub-graph that is not reachable from the
// entry point, the CSE flags propagated, would be incorrect for it.
continue;
}
bool doCSE = PromotionCheck(&candidate);
-#ifdef DEBUG
- if (m_pCompiler->verbose)
+#ifdef DEBUG
+ if (m_pCompiler->verbose)
{
if (doCSE)
{
@@ -2041,13 +2130,13 @@ public:
* Routine for performing the Value Number based CSE using our heuristics
*/
-void Compiler::optValnumCSE_Heuristic()
+void Compiler::optValnumCSE_Heuristic()
{
#ifdef DEBUG
if (verbose)
{
printf("\n************ Trees at start of optValnumCSE_Heuristic()\n");
- fgDumpTrees(fgFirstBB, NULL);
+ fgDumpTrees(fgFirstBB, nullptr);
printf("\n");
}
#endif // DEBUG
@@ -2064,15 +2153,15 @@ void Compiler::optValnumCSE_Heuristic()
*
* Routine to unmark any CSEs contained within a tree
* - optionally a 'keepList' vcan be provided to specify a list of trees that will be kept
- *
+ *
*/
void Compiler::optValnumCSE_UnmarkCSEs(GenTreePtr deadTree, GenTreePtr keepList)
{
assert(optValnumCSE_phase);
- // We need to communicate the 'keepList' to optUnmarkCSEs
- // as any part of the 'deadTree' tree that is in the keepList is preserved
+ // We need to communicate the 'keepList' to optUnmarkCSEs
+ // as any part of the 'deadTree' tree that is in the keepList is preserved
// and is not deleted and does not have its ref counts decremented
// We communicate this value using the walkData.pCallbackData field
//
@@ -2085,17 +2174,21 @@ void Compiler::optValnumCSE_UnmarkCSEs(GenTreePtr deadTree, GenTreePtr keepList)
* Perform common sub-expression elimination.
*/
-void Compiler::optOptimizeValnumCSEs()
+void Compiler::optOptimizeValnumCSEs()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("\n*************** In optOptimizeValnumCSEs()\n");
+ }
if (optConfigDisableCSE())
- return; // Disabled by JitNoCSE
+ {
+ return; // Disabled by JitNoCSE
+ }
#endif
- optValnumCSE_phase = true;
+ optValnumCSE_phase = true;
/* Initialize the expression tracking logic */
@@ -2109,9 +2202,9 @@ void Compiler::optOptimizeValnumCSEs()
optValnumCSE_InitDataFlow();
- optValnumCSE_DataFlow();
+ optValnumCSE_DataFlow();
- optValnumCSE_Availablity();
+ optValnumCSE_Availablity();
optValnumCSE_Heuristic();
}
@@ -2120,41 +2213,42 @@ void Compiler::optOptimizeValnumCSEs()
}
/*****************************************************************************/
-#endif // FEATURE_VALNUM_CSE
+#endif // FEATURE_VALNUM_CSE
/*****************************************************************************/
-
/*****************************************************************************
*
* The following determines whether the given expression is a worthy CSE
* candidate.
*/
-bool Compiler::optIsCSEcandidate(GenTreePtr tree)
+bool Compiler::optIsCSEcandidate(GenTreePtr tree)
{
/* No good if the expression contains side effects or if it was marked as DONT CSE */
- if (tree->gtFlags & (GTF_ASG|GTF_DONT_CSE))
+ if (tree->gtFlags & (GTF_ASG | GTF_DONT_CSE))
{
- return false;
+ return false;
}
-
+
/* The only reason a TYP_STRUCT tree might occur is as an argument to
GT_ADDR. It will never be actually materialized. So ignore them.
Also TYP_VOIDs */
- var_types type = tree->TypeGet();
- genTreeOps oper = tree->OperGet();
+ var_types type = tree->TypeGet();
+ genTreeOps oper = tree->OperGet();
// TODO-1stClassStructs: Enable CSE for struct types (depends on either transforming
// to use regular assignments, or handling copyObj.
if (varTypeIsStruct(type) || type == TYP_VOID)
+ {
return false;
+ }
#ifdef _TARGET_X86_
if (type == TYP_FLOAT)
{
// TODO-X86-CQ: Revisit this
- // Don't CSE a TYP_FLOAT on x86 as we currently can only enregister doubles
+ // Don't CSE a TYP_FLOAT on x86 as we currently can only enregister doubles
return false;
}
#else
@@ -2166,129 +2260,133 @@ bool Compiler::optIsCSEcandidate(GenTreePtr tree)
}
#endif
- unsigned cost;
+ unsigned cost;
if (compCodeOpt() == SMALL_CODE)
+ {
cost = tree->gtCostSz;
+ }
else
+ {
cost = tree->gtCostEx;
+ }
/* Don't bother if the potential savings are very low */
- if (cost < MIN_CSE_COST)
+ if (cost < MIN_CSE_COST)
{
- return false;
+ return false;
}
#if !CSE_CONSTS
/* Don't bother with constants */
- if (tree->OperKind() & GTK_CONST)
- return false;
+ if (tree->OperKind() & GTK_CONST)
+ return false;
#endif
/* Check for some special cases */
switch (oper)
{
- case GT_CALL:
- // If we have a simple helper call with no other persistent side-effects
- // then we allow this tree to be a CSE candidate
- //
- if (gtTreeHasSideEffects(tree, GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE) == false)
- {
+ case GT_CALL:
+ // If we have a simple helper call with no other persistent side-effects
+ // then we allow this tree to be a CSE candidate
+ //
+ if (gtTreeHasSideEffects(tree, GTF_PERSISTENT_SIDE_EFFECTS_IN_CSE) == false)
+ {
+ return true;
+ }
+ else
+ {
+ // Calls generally cannot be CSE-ed
+ return false;
+ }
+
+ case GT_IND:
+ // TODO-CQ: Review this...
+ /* We try to cse GT_ARR_ELEM nodes instead of GT_IND(GT_ARR_ELEM).
+ Doing the first allows cse to also kick in for code like
+ "GT_IND(GT_ARR_ELEM) = GT_IND(GT_ARR_ELEM) + xyz", whereas doing
+ the second would not allow it */
+
+ return (tree->gtOp.gtOp1->gtOper != GT_ARR_ELEM);
+
+ case GT_CNS_INT:
+ case GT_CNS_LNG:
+ case GT_CNS_DBL:
+ case GT_CNS_STR:
+ return true; // We reach here only when CSE_CONSTS is enabled
+
+ case GT_ARR_ELEM:
+ case GT_ARR_LENGTH:
+ case GT_CLS_VAR:
+ case GT_LCL_FLD:
return true;
- }
- else
- {
- // Calls generally cannot be CSE-ed
- return false;
- }
-
- case GT_IND:
- // TODO-CQ: Review this...
- /* We try to cse GT_ARR_ELEM nodes instead of GT_IND(GT_ARR_ELEM).
- Doing the first allows cse to also kick in for code like
- "GT_IND(GT_ARR_ELEM) = GT_IND(GT_ARR_ELEM) + xyz", whereas doing
- the second would not allow it */
-
- return (tree->gtOp.gtOp1->gtOper != GT_ARR_ELEM);
-
-
- case GT_CNS_INT:
- case GT_CNS_LNG:
- case GT_CNS_DBL:
- case GT_CNS_STR:
- return true; // We reach here only when CSE_CONSTS is enabled
-
- case GT_ARR_ELEM:
- case GT_ARR_LENGTH:
- case GT_CLS_VAR:
- case GT_LCL_FLD:
- return true;
-
- case GT_LCL_VAR:
- return false; // Can't CSE a volatile LCL_VAR
-
- case GT_NEG:
- case GT_NOT:
- case GT_CAST:
- return true; // CSE these Unary Operators
-
- case GT_SUB:
- case GT_DIV:
- case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
- case GT_OR:
- case GT_AND:
- case GT_XOR:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- return true; // CSE these Binary Operators
-
- case GT_ADD: // Check for ADDRMODE flag on these Binary Operators
- case GT_MUL:
- case GT_LSH:
- if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
- return false;
-
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
- return true; // Also CSE these Comparison Operators
-
- case GT_INTRINSIC:
- return true; // Intrinsics
-
- case GT_COMMA:
- return true; // Allow GT_COMMA nodes to be CSE-ed.
-
- case GT_COLON:
- case GT_QMARK:
- case GT_NOP:
- case GT_RETURN:
- return false; // Currently the only special nodes that we hit
- // that we know that we don't want to CSE
-
- default:
- break; // Any new nodes that we might add later...
+
+ case GT_LCL_VAR:
+ return false; // Can't CSE a volatile LCL_VAR
+
+ case GT_NEG:
+ case GT_NOT:
+ case GT_CAST:
+ return true; // CSE these Unary Operators
+
+ case GT_SUB:
+ case GT_DIV:
+ case GT_MOD:
+ case GT_UDIV:
+ case GT_UMOD:
+ case GT_OR:
+ case GT_AND:
+ case GT_XOR:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ return true; // CSE these Binary Operators
+
+ case GT_ADD: // Check for ADDRMODE flag on these Binary Operators
+ case GT_MUL:
+ case GT_LSH:
+ if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
+ {
+ return false;
+ }
+
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
+ return true; // Also CSE these Comparison Operators
+
+ case GT_INTRINSIC:
+ return true; // Intrinsics
+
+ case GT_COMMA:
+ return true; // Allow GT_COMMA nodes to be CSE-ed.
+
+ case GT_COLON:
+ case GT_QMARK:
+ case GT_NOP:
+ case GT_RETURN:
+ return false; // Currently the only special nodes that we hit
+ // that we know that we don't want to CSE
+
+ default:
+ break; // Any new nodes that we might add later...
}
return false;
}
-
#ifdef DEBUG
//
-// A Debug only method that allows you to control whether the CSE logic is enabled for this method.
+// A Debug only method that allows you to control whether the CSE logic is enabled for this method.
//
// If this method returns false then the CSE phase should be performed.
// If the method returns true then the CSE phase should be skipped.
//
-bool Compiler::optConfigDisableCSE()
+bool Compiler::optConfigDisableCSE()
{
// Next check if COMPlus_JitNoCSE is set and applies to this method
//
@@ -2299,29 +2397,28 @@ bool Compiler::optConfigDisableCSE()
unsigned methodCount = Compiler::jitTotalMethodCompiled;
if ((jitNoCSE & 0xF000000) == 0xF000000)
{
- unsigned methodCountMask = methodCount & 0xFFF;
+ unsigned methodCountMask = methodCount & 0xFFF;
unsigned bitsZero = (jitNoCSE >> 12) & 0xFFF;
- unsigned bitsOne = (jitNoCSE >> 0) & 0xFFF;
-
- if ((( methodCountMask & bitsOne) == bitsOne) &&
- ((~methodCountMask & bitsZero) == bitsZero))
+ unsigned bitsOne = (jitNoCSE >> 0) & 0xFFF;
+
+ if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero))
{
if (verbose)
{
printf(" Disabled by JitNoCSE methodCountMask\n");
}
- return true; // The CSE phase for this method is disabled
+ return true; // The CSE phase for this method is disabled
}
}
- else if (jitNoCSE <= (methodCount+1))
+ else if (jitNoCSE <= (methodCount + 1))
{
if (verbose)
{
printf(" Disabled by JitNoCSE > methodCount\n");
}
- return true; // The CSE phase for this method is disabled
+ return true; // The CSE phase for this method is disabled
}
}
@@ -2329,13 +2426,13 @@ bool Compiler::optConfigDisableCSE()
}
//
-// A Debug only method that allows you to control whether the CSE logic is enabled for
+// A Debug only method that allows you to control whether the CSE logic is enabled for
// a particular CSE in a method
//
// If this method returns false then the CSE should be performed.
// If the method returns true then the CSE should be skipped.
//
-bool Compiler::optConfigDisableCSE2()
+bool Compiler::optConfigDisableCSE2()
{
static unsigned totalCSEcount = 0;
@@ -2347,15 +2444,16 @@ bool Compiler::optConfigDisableCSE2()
{
if ((jitNoCSE2 & 0xF000000) == 0xF000000)
{
- unsigned totalCSEMask = totalCSEcount & 0xFFF;
+ unsigned totalCSEMask = totalCSEcount & 0xFFF;
unsigned bitsZero = (jitNoCSE2 >> 12) & 0xFFF;
- unsigned bitsOne = (jitNoCSE2 >> 0) & 0xFFF;
-
- if ((( totalCSEMask & bitsOne) == bitsOne) &&
- ((~totalCSEMask & bitsZero) == bitsZero) )
+ unsigned bitsOne = (jitNoCSE2 >> 0) & 0xFFF;
+
+ if (((totalCSEMask & bitsOne) == bitsOne) && ((~totalCSEMask & bitsZero) == bitsZero))
{
- if (verbose)
+ if (verbose)
+ {
printf(" Disabled by jitNoCSE2 Ones/Zeros mask\n");
+ }
return true;
}
}
@@ -2368,15 +2466,19 @@ bool Compiler::optConfigDisableCSE2()
if (disableMask & 1)
{
- if (verbose)
+ if (verbose)
+ {
printf(" Disabled by jitNoCSE2 rotating disable mask\n");
+ }
return true;
}
}
else if (jitNoCSE2 <= totalCSEcount)
{
- if (verbose)
+ if (verbose)
+ {
printf(" Disabled by jitNoCSE2 > totalCSEcount\n");
+ }
return true;
}
}
@@ -2384,7 +2486,7 @@ bool Compiler::optConfigDisableCSE2()
}
#endif
-void Compiler::optOptimizeCSEs()
+void Compiler::optOptimizeCSEs()
{
#ifdef DEBUG
if (verbose)
@@ -2396,14 +2498,13 @@ void Compiler::optOptimizeCSEs()
#endif // DEBUG
optCSECandidateCount = 0;
- optCSEstart = lvaCount;
+ optCSEstart = lvaCount;
#if FEATURE_VALNUM_CSE
INDEBUG(optEnsureClearCSEInfo());
optOptimizeValnumCSEs();
EndPhase(PHASE_OPTIMIZE_VALNUM_CSES);
-#endif // FEATURE_VALNUM_CSE
-
+#endif // FEATURE_VALNUM_CSE
}
/*****************************************************************************
@@ -2411,24 +2512,24 @@ void Compiler::optOptimizeCSEs()
* Cleanup after CSE to allow us to run more than once.
*/
-void Compiler::optCleanupCSEs()
+void Compiler::optCleanupCSEs()
{
- // We must clear the BBF_VISITED and BBF_MARKED flags
+ // We must clear the BBF_VISITED and BBF_MARKED flags
//
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- unsigned blkFlags = block->bbFlags;
+ unsigned blkFlags = block->bbFlags;
// And clear all the "visited" bits on the block
//
- block->bbFlags &= ~(BBF_VISITED|BBF_MARKED);
+ block->bbFlags &= ~(BBF_VISITED | BBF_MARKED);
/* Walk the statement trees in this basic block */
- GenTreePtr stmt;
+ GenTreePtr stmt;
// Initialize 'stmt' to the first non-Phi statement
- stmt = block->FirstNonPhiDef();
+ stmt = block->FirstNonPhiDef();
for (; stmt; stmt = stmt->gtNext)
{
@@ -2451,18 +2552,18 @@ void Compiler::optCleanupCSEs()
* before running a CSE phase. This is basically an assert that optCleanupCSEs() is not needed.
*/
-void Compiler::optEnsureClearCSEInfo()
+void Compiler::optEnsureClearCSEInfo()
{
for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
- assert((block->bbFlags & (BBF_VISITED|BBF_MARKED)) == 0);
+ assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
/* Walk the statement trees in this basic block */
- GenTreePtr stmt;
+ GenTreePtr stmt;
// Initialize 'stmt' to the first non-Phi statement
- stmt = block->FirstNonPhiDef();
+ stmt = block->FirstNonPhiDef();
for (; stmt; stmt = stmt->gtNext)
{
@@ -2479,5 +2580,5 @@ void Compiler::optEnsureClearCSEInfo()
#endif // DEBUG
/*****************************************************************************/
-#endif // FEATURE_ANYCSE
+#endif // FEATURE_ANYCSE
/*****************************************************************************/
diff --git a/src/jit/optimizer.cpp b/src/jit/optimizer.cpp
index c024d8691a..7121bafc54 100644
--- a/src/jit/optimizer.cpp
+++ b/src/jit/optimizer.cpp
@@ -14,43 +14,41 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
-#pragma warning ( disable : 4701 )
+#pragma warning(disable : 4701)
#endif
/*****************************************************************************/
#if COUNT_RANGECHECKS
/* static */
-unsigned Compiler::optRangeChkRmv = 0;
+unsigned Compiler::optRangeChkRmv = 0;
/* static */
-unsigned Compiler::optRangeChkAll = 0;
+unsigned Compiler::optRangeChkAll = 0;
#endif
/*****************************************************************************/
-void Compiler::optInit()
+void Compiler::optInit()
{
- optLoopsMarked = false;
- fgHasLoops = false;
-
+ optLoopsMarked = false;
+ fgHasLoops = false;
+
/* Initialize the # of tracked loops to 0 */
- optLoopCount = 0;
+ optLoopCount = 0;
/* Keep track of the number of calls and indirect calls made by this method */
optCallCount = 0;
optIndirectCallCount = 0;
optNativeCallCount = 0;
optAssertionCount = 0;
- optAssertionDep = NULL;
+ optAssertionDep = nullptr;
#if FEATURE_ANYCSE
optCSECandidateTotal = 0;
optCSEstart = UINT_MAX;
optCSEcount = 0;
#endif // FEATURE_ANYCSE
-
}
-DataFlow::DataFlow(Compiler* pCompiler)
- : m_pCompiler(pCompiler)
+DataFlow::DataFlow(Compiler* pCompiler) : m_pCompiler(pCompiler)
{
}
@@ -58,7 +56,7 @@ DataFlow::DataFlow(Compiler* pCompiler)
*
*/
-void Compiler::optSetBlockWeights()
+void Compiler::optSetBlockWeights()
{
noway_assert(!opts.MinOpts() && !opts.compDbgCode);
assert(fgDomsComputed);
@@ -69,13 +67,15 @@ void Compiler::optSetBlockWeights()
bool firstBBdomsRets = true;
- BasicBlock * block;
+ BasicBlock* block;
- for (block = fgFirstBB; (block != NULL); block = block->bbNext)
+ for (block = fgFirstBB; (block != nullptr); block = block->bbNext)
{
/* Blocks that can't be reached via the first block are rarely executed */
if (!fgReachable(fgFirstBB, block))
+ {
block->bbSetRunRarely();
+ }
if (block->bbWeight != BB_ZERO_WEIGHT)
{
@@ -84,8 +84,8 @@ void Compiler::optSetBlockWeights()
// o BB_UNITY_WEIGHT if we dominate all BBJ_RETURN blocks
// o otherwise BB_UNITY_WEIGHT / 2
//
- bool domsRets = true; // Assume that we will dominate
-
+ bool domsRets = true; // Assume that we will dominate
+
for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks != nullptr; retBlocks = retBlocks->next)
{
if (!fgDominate(block, retBlocks->block))
@@ -99,8 +99,8 @@ void Compiler::optSetBlockWeights()
{
firstBBdomsRets = domsRets;
}
-
- // If we are not using profile weight then we lower the weight
+
+ // If we are not using profile weight then we lower the weight
// of blocks that do not dominate a return block
//
if (firstBBdomsRets && (fgIsUsingProfileWeights() == false) && (domsRets == false))
@@ -115,7 +115,7 @@ void Compiler::optSetBlockWeights()
}
#if DEBUG
- if (changed && verbose)
+ if (changed && verbose)
{
printf("\nAfter optSetBlockWeights:\n");
fgDispBasicBlocks();
@@ -132,9 +132,7 @@ void Compiler::optSetBlockWeights()
* Marks the blocks between 'begBlk' and 'endBlk' as part of a loop.
*/
-void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
- BasicBlock *endBlk,
- bool excludeEndBlk)
+void Compiler::optMarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk, bool excludeEndBlk)
{
/* Calculate the 'loopWeight',
this is the amount to increase each block in the loop
@@ -156,8 +154,9 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
noway_assert(begBlk->isLoopHead());
noway_assert(fgReachable(begBlk, endBlk));
-#ifdef DEBUG
- if (verbose) {
+#ifdef DEBUG
+ if (verbose)
+ {
printf("\nMarking loop L%02u", begBlk->bbLoopNum);
}
#endif
@@ -165,11 +164,9 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
noway_assert(!opts.MinOpts());
/* Build list of backedges for block begBlk */
- flowList * backedgeList = NULL;
+ flowList* backedgeList = nullptr;
- for (flowList* pred = begBlk->bbPreds;
- pred != NULL;
- pred = pred->flNext)
+ for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
{
/* Is this a backedge? */
if (pred->flBlock->bbNum >= begBlk->bbNum)
@@ -177,20 +174,20 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
flowList* flow = new (this, CMK_FlowList) flowList();
#if MEASURE_BLOCK_SIZE
- genFlowNodeCnt += 1;
+ genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
flow->flNext = backedgeList;
flow->flBlock = pred->flBlock;
- backedgeList = flow;
+ backedgeList = flow;
}
}
/* At least one backedge must have been found (the one from endBlk) */
noway_assert(backedgeList);
-
- BasicBlock * curBlk = begBlk;
+
+ BasicBlock* curBlk = begBlk;
while (true)
{
@@ -199,28 +196,28 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
// For curBlk to be part of a loop that starts at begBlk
// curBlk must be reachable from begBlk and (since this is a loop)
// likewise begBlk must be reachable from curBlk.
- //
+ //
if (fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk))
{
/* If this block reaches any of the backedge blocks we set reachable */
/* If this block dominates any of the backedge blocks we set dominates */
- bool reachable = false;
- bool dominates = false;
+ bool reachable = false;
+ bool dominates = false;
- for (flowList* tmp = backedgeList;
- tmp != NULL;
- tmp = tmp->flNext)
+ for (flowList* tmp = backedgeList; tmp != nullptr; tmp = tmp->flNext)
{
- BasicBlock * backedge = tmp->flBlock;
+ BasicBlock* backedge = tmp->flBlock;
if (!curBlk->isRunRarely())
{
reachable |= fgReachable(curBlk, backedge);
- dominates |= fgDominate (curBlk, backedge);
+ dominates |= fgDominate(curBlk, backedge);
if (dominates && reachable)
+ {
break;
+ }
}
}
@@ -237,11 +234,13 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
}
else
{
- if (dominates) {
+ if (dominates)
+ {
weight = curBlk->bbWeight * BB_LOOP_WEIGHT;
}
- else {
- weight = curBlk->bbWeight * (BB_LOOP_WEIGHT/2);
+ else
+ {
+ weight = curBlk->bbWeight * (BB_LOOP_WEIGHT / 2);
}
//
@@ -249,36 +248,38 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
//
if (weight < curBlk->bbWeight)
{
- // The multiplication caused us to overflow
+ // The multiplication caused us to overflow
weight = BB_MAX_WEIGHT;
}
//
// Set the new weight
- //
+ //
curBlk->modifyBBWeight(weight);
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
- printf("\n BB%02u(wt=%s)",
- curBlk->bbNum,
- refCntWtd2str(curBlk->getBBWeight(this)));
+ printf("\n BB%02u(wt=%s)", curBlk->bbNum, refCntWtd2str(curBlk->getBBWeight(this)));
}
#endif
}
}
/* Stop if we've reached the last block in the loop */
-
- if (curBlk == endBlk)
+
+ if (curBlk == endBlk)
+ {
break;
-
+ }
+
curBlk = curBlk->bbNext;
/* If we are excluding the endBlk then stop if we've reached endBlk */
-
- if (excludeEndBlk && (curBlk == endBlk))
+
+ if (excludeEndBlk && (curBlk == endBlk))
+ {
break;
+ }
}
}
@@ -287,8 +288,7 @@ void Compiler::optMarkLoopBlocks(BasicBlock *begBlk,
* Unmark the blocks between 'begBlk' and 'endBlk' as part of a loop.
*/
-void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
- BasicBlock *endBlk)
+void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk)
{
/* A set of blocks that were previously marked as a loop are now
to be unmarked, since we have decided that for some reason this
@@ -296,31 +296,32 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
Basically we are just reseting the blocks bbWeight to their
previous values.
*/
-
+
noway_assert(begBlk->bbNum <= endBlk->bbNum);
noway_assert(begBlk->isLoopHead());
noway_assert(!opts.MinOpts());
- BasicBlock * curBlk;
- unsigned backEdgeCount = 0;
+ BasicBlock* curBlk;
+ unsigned backEdgeCount = 0;
- for (flowList * pred = begBlk->bbPreds;
- pred != NULL;
- pred = pred->flNext)
+ for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
{
curBlk = pred->flBlock;
/* is this a backward edge? (from curBlk to begBlk) */
if (begBlk->bbNum > curBlk->bbNum)
+ {
continue;
+ }
/* We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops */
- if ((curBlk->bbJumpKind != BBJ_COND) &&
- (curBlk->bbJumpKind != BBJ_ALWAYS) )
- continue;
+ if ((curBlk->bbJumpKind != BBJ_COND) && (curBlk->bbJumpKind != BBJ_ALWAYS))
+ {
+ continue;
+ }
backEdgeCount++;
}
@@ -328,18 +329,16 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
/* Only unmark the loop blocks if we have exactly one loop back edge */
if (backEdgeCount != 1)
{
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
if (backEdgeCount > 0)
{
- printf("\nNot removing loop L%02u, due to an additional back edge",
- begBlk->bbLoopNum);
+ printf("\nNot removing loop L%02u, due to an additional back edge", begBlk->bbLoopNum);
}
else if (backEdgeCount == 0)
{
- printf("\nNot removing loop L%02u, due to no back edge",
- begBlk->bbLoopNum);
+ printf("\nNot removing loop L%02u, due to no back edge", begBlk->bbLoopNum);
}
}
#endif
@@ -348,20 +347,22 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
noway_assert(backEdgeCount == 1);
noway_assert(fgReachable(begBlk, endBlk));
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
+ {
printf("\nUnmarking loop L%02u", begBlk->bbLoopNum);
+ }
#endif
curBlk = begBlk;
while (true)
- {
+ {
noway_assert(curBlk);
// For curBlk to be part of a loop that starts at begBlk
// curBlk must be reachable from begBlk and (since this is a loop)
// likewise begBlk must be reachable from curBlk.
- //
+ //
if (!curBlk->isRunRarely() && fgReachable(curBlk, begBlk) && fgReachable(begBlk, curBlk))
{
unsigned weight = curBlk->bbWeight;
@@ -380,37 +381,43 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
/* Merging of blocks can disturb the Dominates
information (see RAID #46649) */
if (weight < BB_LOOP_WEIGHT)
+ {
weight *= 2;
+ }
}
// We can overflow here so check for it
if (weight < curBlk->bbWeight)
+ {
weight = BB_MAX_WEIGHT;
+ }
+
+ assert(weight >= BB_LOOP_WEIGHT);
- assert (weight >= BB_LOOP_WEIGHT);
-
curBlk->modifyBBWeight(weight / BB_LOOP_WEIGHT);
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
- printf("\n BB%02u(wt=%s)",
- curBlk->bbNum,
- refCntWtd2str(curBlk->getBBWeight(this)));
+ printf("\n BB%02u(wt=%s)", curBlk->bbNum, refCntWtd2str(curBlk->getBBWeight(this)));
}
#endif
}
/* Stop if we've reached the last block in the loop */
-
- if (curBlk == endBlk)
+
+ if (curBlk == endBlk)
+ {
break;
+ }
curBlk = curBlk->bbNext;
/* Stop if we go past the last block in the loop, as it may have been deleted */
if (curBlk->bbNum > endBlk->bbNum)
+ {
break;
+ }
}
}
@@ -419,11 +426,12 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock *begBlk,
* Function called to update the loop table and bbWeight before removing a block
*/
-void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock * block,
- bool skipUnmarkLoop)
+void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop)
{
if (!optLoopsMarked)
+ {
return;
+ }
noway_assert(!opts.MinOpts());
@@ -438,16 +446,17 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock * block
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
- if (block == optLoopTable[loopNum].lpEntry ||
- block == optLoopTable[loopNum].lpBottom )
+ if (block == optLoopTable[loopNum].lpEntry || block == optLoopTable[loopNum].lpBottom)
{
optLoopTable[loopNum].lpFlags |= LPFLG_REMOVED;
continue;
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\nUpdateLoopsBeforeRemoveBlock Before: ");
@@ -463,111 +472,118 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock * block
if (optLoopTable[loopNum].lpExit == block)
{
- optLoopTable[loopNum].lpExit = NULL;
- optLoopTable[loopNum].lpFlags &= ~LPFLG_ONE_EXIT;;
+ optLoopTable[loopNum].lpExit = nullptr;
+ optLoopTable[loopNum].lpFlags &= ~LPFLG_ONE_EXIT;
+ ;
}
-
/* If this points to the actual entry in the loop
* then the whole loop may become unreachable */
switch (block->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock * * jumpTab;
-
- case BBJ_NONE:
- case BBJ_COND:
- if (block->bbNext == optLoopTable[loopNum].lpEntry)
- {
- removeLoop = true;
- break;
- }
- if (block->bbJumpKind == BBJ_NONE)
- break;
-
- __fallthrough;
+ unsigned jumpCnt;
+ BasicBlock** jumpTab;
- case BBJ_ALWAYS:
- noway_assert(block->bbJumpDest);
- if (block->bbJumpDest == optLoopTable[loopNum].lpEntry)
- {
- removeLoop = true;
- }
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ if (block->bbNext == optLoopTable[loopNum].lpEntry)
+ {
+ removeLoop = true;
+ break;
+ }
+ if (block->bbJumpKind == BBJ_NONE)
+ {
+ break;
+ }
- case BBJ_SWITCH:
- jumpCnt = block->bbJumpSwt->bbsCount;
- jumpTab = block->bbJumpSwt->bbsDstTab;
+ __fallthrough;
- do {
- noway_assert(*jumpTab);
- if ((*jumpTab) == optLoopTable[loopNum].lpEntry)
+ case BBJ_ALWAYS:
+ noway_assert(block->bbJumpDest);
+ if (block->bbJumpDest == optLoopTable[loopNum].lpEntry)
{
removeLoop = true;
}
- } while (++jumpTab, --jumpCnt);
- break;
+ break;
- default:
- break;
+ case BBJ_SWITCH:
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
+
+ do
+ {
+ noway_assert(*jumpTab);
+ if ((*jumpTab) == optLoopTable[loopNum].lpEntry)
+ {
+ removeLoop = true;
+ }
+ } while (++jumpTab, --jumpCnt);
+ break;
+
+ default:
+ break;
}
- if (removeLoop)
+ if (removeLoop)
{
/* Check if the entry has other predecessors outside the loop
* TODO: Replace this when predecessors are available */
- BasicBlock * auxBlock;
+ BasicBlock* auxBlock;
for (auxBlock = fgFirstBB; auxBlock; auxBlock = auxBlock->bbNext)
{
/* Ignore blocks in the loop */
- if (auxBlock->bbNum > optLoopTable[loopNum].lpHead->bbNum &&
- auxBlock->bbNum <= optLoopTable[loopNum].lpBottom->bbNum)
+ if (auxBlock->bbNum > optLoopTable[loopNum].lpHead->bbNum &&
+ auxBlock->bbNum <= optLoopTable[loopNum].lpBottom->bbNum)
+ {
continue;
+ }
switch (auxBlock->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock * * jumpTab;
-
- case BBJ_NONE:
- case BBJ_COND:
- if (auxBlock->bbNext == optLoopTable[loopNum].lpEntry)
- {
- removeLoop = false;
- break;
- }
- if (auxBlock->bbJumpKind == BBJ_NONE)
- break;
-
- __fallthrough;
+ unsigned jumpCnt;
+ BasicBlock** jumpTab;
- case BBJ_ALWAYS:
- noway_assert(auxBlock->bbJumpDest);
- if (auxBlock->bbJumpDest == optLoopTable[loopNum].lpEntry)
- {
- removeLoop = false;
- }
- break;
+ case BBJ_NONE:
+ case BBJ_COND:
+ if (auxBlock->bbNext == optLoopTable[loopNum].lpEntry)
+ {
+ removeLoop = false;
+ break;
+ }
+ if (auxBlock->bbJumpKind == BBJ_NONE)
+ {
+ break;
+ }
- case BBJ_SWITCH:
- jumpCnt = auxBlock->bbJumpSwt->bbsCount;
- jumpTab = auxBlock->bbJumpSwt->bbsDstTab;
+ __fallthrough;
- do
- {
- noway_assert(*jumpTab);
- if ((*jumpTab) == optLoopTable[loopNum].lpEntry)
+ case BBJ_ALWAYS:
+ noway_assert(auxBlock->bbJumpDest);
+ if (auxBlock->bbJumpDest == optLoopTable[loopNum].lpEntry)
{
removeLoop = false;
}
- } while (++jumpTab, --jumpCnt);
- break;
+ break;
- default:
- break;
+ case BBJ_SWITCH:
+ jumpCnt = auxBlock->bbJumpSwt->bbsCount;
+ jumpTab = auxBlock->bbJumpSwt->bbsDstTab;
+
+ do
+ {
+ noway_assert(*jumpTab);
+ if ((*jumpTab) == optLoopTable[loopNum].lpEntry)
+ {
+ removeLoop = false;
+ }
+ } while (++jumpTab, --jumpCnt);
+ break;
+
+ default:
+ break;
}
}
@@ -582,27 +598,24 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock * block
optLoopTable[loopNum].lpHead = block->bbPrev;
}
-#ifdef DEBUG
- if (verbose) {
+#ifdef DEBUG
+ if (verbose)
+ {
printf("\nUpdateLoopsBeforeRemoveBlock After: ");
optPrintLoopInfo(loopNum);
}
#endif
}
- if ((skipUnmarkLoop == false) &&
- ((block->bbJumpKind == BBJ_ALWAYS) || (block->bbJumpKind == BBJ_COND)) &&
- (block->bbJumpDest->isLoopHead()) &&
- (block->bbJumpDest->bbNum <= block->bbNum) &&
- fgDomsComputed &&
- (fgCurBBEpochSize == fgDomBBcount + 1) &&
- fgReachable(block->bbJumpDest, block))
+ if ((skipUnmarkLoop == false) && ((block->bbJumpKind == BBJ_ALWAYS) || (block->bbJumpKind == BBJ_COND)) &&
+ (block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgDomsComputed &&
+ (fgCurBBEpochSize == fgDomBBcount + 1) && fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
}
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
@@ -613,7 +626,7 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock * block
unsigned Compiler::optFindLoopNumberFromBeginBlock(BasicBlock* begBlk)
{
unsigned lnum = 0;
-
+
for (lnum = 0; lnum < optLoopCount; lnum++)
{
if (optLoopTable[lnum].lpHead->bbNext == begBlk)
@@ -625,7 +638,7 @@ unsigned Compiler::optFindLoopNumberFromBeginBlock(BasicBlock* begBlk)
noway_assert(!"Loop number not found.");
- return optLoopCount;
+ return optLoopCount;
}
/*****************************************************************************
@@ -633,19 +646,18 @@ unsigned Compiler::optFindLoopNumberFromBeginBlock(BasicBlock* begBlk)
* Print loop info in an uniform way.
*/
-void Compiler::optPrintLoopInfo(unsigned loopInd,
- BasicBlock * lpHead,
- BasicBlock * lpFirst,
- BasicBlock * lpTop,
- BasicBlock * lpEntry,
- BasicBlock * lpBottom,
- unsigned char lpExitCnt,
- BasicBlock * lpExit,
- unsigned parentLoop
- )
+void Compiler::optPrintLoopInfo(unsigned loopInd,
+ BasicBlock* lpHead,
+ BasicBlock* lpFirst,
+ BasicBlock* lpTop,
+ BasicBlock* lpEntry,
+ BasicBlock* lpBottom,
+ unsigned char lpExitCnt,
+ BasicBlock* lpExit,
+ unsigned parentLoop)
{
noway_assert(lpHead);
-
+
//
// NOTE: we take "loopInd" as an argument instead of using the one
// stored in begBlk->bbLoopNum because sometimes begBlk->bbLoopNum
@@ -660,14 +672,11 @@ void Compiler::optPrintLoopInfo(unsigned loopInd,
printf(" (loop top is BB%02u)", lpTop->bbNum);
}
- printf(" to BB%02u (Head=BB%02u, Entry=BB%02u, ExitCnt=%d",
- lpBottom->bbNum,
- lpHead->bbNum,
- lpEntry->bbNum,
- lpExitCnt
- );
+ printf(" to BB%02u (Head=BB%02u, Entry=BB%02u, ExitCnt=%d", lpBottom->bbNum, lpHead->bbNum, lpEntry->bbNum,
+ lpExitCnt);
- if (lpExitCnt == 1) {
+ if (lpExitCnt == 1)
+ {
printf(" at BB%02u", lpExit->bbNum);
}
@@ -687,20 +696,11 @@ void Compiler::optPrintLoopInfo(unsigned lnum)
{
noway_assert(lnum < optLoopCount);
- LoopDsc* ldsc = &optLoopTable[lnum]; // lnum is the INDEX to the loop table.
-
- optPrintLoopInfo(lnum,
- ldsc->lpHead,
- ldsc->lpFirst,
- ldsc->lpTop,
- ldsc->lpEntry,
- ldsc->lpBottom,
- ldsc->lpExitCnt,
- ldsc->lpExit,
- ldsc->lpParent
- );
-}
+ LoopDsc* ldsc = &optLoopTable[lnum]; // lnum is the INDEX to the loop table.
+ optPrintLoopInfo(lnum, ldsc->lpHead, ldsc->lpFirst, ldsc->lpTop, ldsc->lpEntry, ldsc->lpBottom, ldsc->lpExitCnt,
+ ldsc->lpExit, ldsc->lpParent);
+}
#endif
@@ -771,7 +771,8 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, GenTreePtr init, unsigned i
// "false" if the loop table could not be populated with the loop test info or
// if the test condition doesn't involve iterVar.
//
-bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBlock* from, BasicBlock* to, unsigned iterVar)
+bool Compiler::optCheckIterInLoopTest(
+ unsigned loopInd, GenTreePtr test, BasicBlock* from, BasicBlock* to, unsigned iterVar)
{
// Obtain the relop from the "test" tree.
GenTreePtr relop;
@@ -779,13 +780,13 @@ bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBl
{
relop = test->gtGetOp1();
}
- else
+ else
{
assert(test->gtOper == GT_ASG);
relop = test->gtGetOp2();
}
- noway_assert(relop->OperKind() & GTK_RELOP);
+ noway_assert(relop->OperKind() & GTK_RELOP);
GenTreePtr opr1 = relop->gtOp.gtOp1;
GenTreePtr opr2 = relop->gtOp.gtOp2;
@@ -796,12 +797,12 @@ bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBl
// Make sure op1 or op2 is the iterVar.
if (opr1->gtOper == GT_LCL_VAR && opr1->gtLclVarCommon.gtLclNum == iterVar)
{
- iterOp = opr1;
+ iterOp = opr1;
limitOp = opr2;
}
else if (opr2->gtOper == GT_LCL_VAR && opr2->gtLclVarCommon.gtLclNum == iterVar)
{
- iterOp = opr2;
+ iterOp = opr2;
limitOp = opr1;
}
else
@@ -856,22 +857,22 @@ bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTreePtr test, BasicBl
//
unsigned Compiler::optIsLoopIncrTree(GenTreePtr incr)
{
- GenTree* incrVal;
+ GenTree* incrVal;
genTreeOps updateOper;
- unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper);
+ unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper);
if (iterVar != BAD_VAR_NUM)
{
// We have v = v op y type asg node.
switch (updateOper)
{
- case GT_ADD:
- case GT_SUB:
- case GT_MUL:
- case GT_RSH:
- case GT_LSH:
- break;
- default:
- return BAD_VAR_NUM;
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ case GT_RSH:
+ case GT_LSH:
+ break;
+ default:
+ return BAD_VAR_NUM;
}
// Increment should be by a const int.
@@ -957,9 +958,7 @@ bool Compiler::optIsLoopTestEvalIntoTemp(GenTreePtr testStmt, GenTreePtr* newTes
GenTreePtr opr2 = relop->gtOp.gtOp2;
// Make sure we have jtrue (vtmp != 0)
- if ((relop->OperGet() == GT_NE) &&
- (opr1->OperGet() == GT_LCL_VAR) &&
- (opr2->OperGet() == GT_CNS_INT) &&
+ if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) &&
opr2->IsIntegralConst(0))
{
// Get the previous statement to get the def (rhs) of Vtmp to see
@@ -982,7 +981,7 @@ bool Compiler::optIsLoopTestEvalIntoTemp(GenTreePtr testStmt, GenTreePtr* newTes
if (rhs->OperIsCompare())
{
*newTest = prevStmt;
- return true;
+ return true;
}
}
}
@@ -1025,7 +1024,8 @@ bool Compiler::optIsLoopTestEvalIntoTemp(GenTreePtr testStmt, GenTreePtr* newTes
// This method just retrieves what it thinks is the "test" node,
// the callers are expected to verify that "iterVar" is used in the test.
//
-bool Compiler::optExtractInitTestIncr(BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTreePtr* ppInit, GenTreePtr* ppTest, GenTreePtr* ppIncr)
+bool Compiler::optExtractInitTestIncr(
+ BasicBlock* head, BasicBlock* bottom, BasicBlock* top, GenTreePtr* ppInit, GenTreePtr* ppTest, GenTreePtr* ppIncr)
{
assert(ppInit != nullptr);
assert(ppTest != nullptr);
@@ -1076,7 +1076,7 @@ bool Compiler::optExtractInitTestIncr(BasicBlock* head, BasicBlock* bottom, Basi
}
GenTreePtr init = phdr->gtPrev;
- noway_assert(init != nullptr && (init->gtNext == 0));
+ noway_assert(init != nullptr && (init->gtNext == nullptr));
// If it is a duplicated loop condition, skip it.
if (init->gtFlags & GTF_STMT_CMPADD)
@@ -1103,18 +1103,18 @@ bool Compiler::optExtractInitTestIncr(BasicBlock* head, BasicBlock* bottom, Basi
* Record the loop in the loop table.
*/
-void Compiler::optRecordLoop(BasicBlock * head,
- BasicBlock * first,
- BasicBlock * top,
- BasicBlock * entry,
- BasicBlock * bottom,
- BasicBlock * exit,
- unsigned char exitCnt)
+void Compiler::optRecordLoop(BasicBlock* head,
+ BasicBlock* first,
+ BasicBlock* top,
+ BasicBlock* entry,
+ BasicBlock* bottom,
+ BasicBlock* exit,
+ unsigned char exitCnt)
{
// Record this loop in the table, if there's room.
assert(optLoopCount <= MAX_LOOP_NUM);
- if (optLoopCount == MAX_LOOP_NUM)
+ if (optLoopCount == MAX_LOOP_NUM)
{
#if COUNT_LOOPS
loopOverflowThisMethod = true;
@@ -1128,50 +1128,55 @@ void Compiler::optRecordLoop(BasicBlock * head,
assert(entry->bbNum <= bottom->bbNum);
assert(head->bbNum < top->bbNum || head->bbNum > bottom->bbNum);
- // If the new loop contains any existing ones, add it in the right place.
+ // If the new loop contains any existing ones, add it in the right place.
unsigned char loopInd = optLoopCount;
for (unsigned char prevPlus1 = optLoopCount; prevPlus1 > 0; prevPlus1--)
{
unsigned char prev = prevPlus1 - 1;
if (optLoopTable[prev].lpContainedBy(first, bottom))
+ {
loopInd = prev;
+ }
}
// Move up any loops if necessary.
for (unsigned j = optLoopCount; j > loopInd; j--)
{
- optLoopTable[j] = optLoopTable[j-1];
+ optLoopTable[j] = optLoopTable[j - 1];
}
-
+
#ifdef DEBUG
- for (unsigned i = loopInd+1; i < optLoopCount; i++)
+ for (unsigned i = loopInd + 1; i < optLoopCount; i++)
{
// The loop is well-formed.
assert(optLoopTable[i].lpWellFormed());
// Check for disjoint.
- if (optLoopTable[i].lpDisjoint(first, bottom)) continue;
+ if (optLoopTable[i].lpDisjoint(first, bottom))
+ {
+ continue;
+ }
// Otherwise, assert complete containment (of optLoopTable[i] in new loop).
assert(optLoopTable[i].lpContainedBy(first, bottom));
}
#endif // DEBUG
- optLoopTable[loopInd].lpHead = head;
- optLoopTable[loopInd].lpFirst = first;
- optLoopTable[loopInd].lpTop = top;
- optLoopTable[loopInd].lpBottom = bottom;
- optLoopTable[loopInd].lpEntry = entry;
- optLoopTable[loopInd].lpExit = exit;
- optLoopTable[loopInd].lpExitCnt = exitCnt;
+ optLoopTable[loopInd].lpHead = head;
+ optLoopTable[loopInd].lpFirst = first;
+ optLoopTable[loopInd].lpTop = top;
+ optLoopTable[loopInd].lpBottom = bottom;
+ optLoopTable[loopInd].lpEntry = entry;
+ optLoopTable[loopInd].lpExit = exit;
+ optLoopTable[loopInd].lpExitCnt = exitCnt;
- optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP;
- optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP;
- optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP;
+ optLoopTable[loopInd].lpParent = BasicBlock::NOT_IN_LOOP;
+ optLoopTable[loopInd].lpChild = BasicBlock::NOT_IN_LOOP;
+ optLoopTable[loopInd].lpSibling = BasicBlock::NOT_IN_LOOP;
- optLoopTable[loopInd].lpFlags = 0;
+ optLoopTable[loopInd].lpFlags = 0;
// We haven't yet recorded any side effects.
- optLoopTable[loopInd].lpLoopHasHeapHavoc = false;
- optLoopTable[loopInd].lpFieldsModified = nullptr;
- optLoopTable[loopInd].lpArrayElemTypesModified = 0;
+ optLoopTable[loopInd].lpLoopHasHeapHavoc = false;
+ optLoopTable[loopInd].lpFieldsModified = nullptr;
+ optLoopTable[loopInd].lpArrayElemTypesModified = nullptr;
// If DO-WHILE loop mark it as such.
if (head->bbNext == entry)
@@ -1195,7 +1200,7 @@ void Compiler::optRecordLoop(BasicBlock * head,
// 3. The iterator is incremented exactly once
// 4. The loop condition must use the iterator.
//
- if (bottom->bbJumpKind == BBJ_COND)
+ if (bottom->bbJumpKind == BBJ_COND)
{
GenTreePtr init;
GenTreePtr test;
@@ -1249,8 +1254,7 @@ void Compiler::optRecordLoop(BasicBlock * head,
#endif
// Check if a constant iteration loop.
- if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) &&
- (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT))
+ if ((optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT) && (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT))
{
// This is a constant loop.
optLoopTable[loopInd].lpFlags |= LPFLG_CONST;
@@ -1259,7 +1263,7 @@ void Compiler::optRecordLoop(BasicBlock * head,
#endif
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose && 0)
{
printf("\nConstant loop initializer:\n");
@@ -1274,17 +1278,17 @@ void Compiler::optRecordLoop(BasicBlock * head,
for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
if (stmt->gtStmt.gtStmtExpr == incr)
+ {
break;
+ }
printf("\n");
gtDispTree(stmt->gtStmt.gtStmtExpr);
}
- }
- while (block != bottom);
+ } while (block != bottom);
}
#endif // DEBUG
}
-
DONE_LOOP:
DBEXEC(verbose, optPrintLoopRecording(loopInd));
optLoopCount++;
@@ -1300,18 +1304,13 @@ DONE_LOOP:
void Compiler::optPrintLoopRecording(unsigned loopInd)
{
printf("Recorded loop %s", (loopInd != optLoopCount ? "(extended) " : ""));
- optPrintLoopInfo(optLoopCount, // Not necessarily the loop index, but the number of loops that have been added.
- optLoopTable[loopInd].lpHead,
- optLoopTable[loopInd].lpFirst,
- optLoopTable[loopInd].lpTop,
- optLoopTable[loopInd].lpEntry,
- optLoopTable[loopInd].lpBottom,
- optLoopTable[loopInd].lpExitCnt,
- optLoopTable[loopInd].lpExit
- );
+ optPrintLoopInfo(optLoopCount, // Not necessarily the loop index, but the number of loops that have been added.
+ optLoopTable[loopInd].lpHead, optLoopTable[loopInd].lpFirst, optLoopTable[loopInd].lpTop,
+ optLoopTable[loopInd].lpEntry, optLoopTable[loopInd].lpBottom, optLoopTable[loopInd].lpExitCnt,
+ optLoopTable[loopInd].lpExit);
// If an iterator loop print the iterator and the initialization.
- if (optLoopTable[loopInd].lpFlags & LPFLG_ITER)
+ if (optLoopTable[loopInd].lpFlags & LPFLG_ITER)
{
printf(" [over V%02u", optLoopTable[loopInd].lpIterVar());
printf(" (");
@@ -1319,19 +1318,27 @@ void Compiler::optPrintLoopRecording(unsigned loopInd)
printf(" ");
printf("%d )", optLoopTable[loopInd].lpIterConst());
- if (optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT)
+ if (optLoopTable[loopInd].lpFlags & LPFLG_CONST_INIT)
+ {
printf(" from %d", optLoopTable[loopInd].lpConstInit);
- if (optLoopTable[loopInd].lpFlags & LPFLG_VAR_INIT)
+ }
+ if (optLoopTable[loopInd].lpFlags & LPFLG_VAR_INIT)
+ {
printf(" from V%02u", optLoopTable[loopInd].lpVarInit);
+ }
// If a simple test condition print operator and the limits */
printf(GenTree::NodeName(optLoopTable[loopInd].lpTestOper()));
- if (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)
+ if (optLoopTable[loopInd].lpFlags & LPFLG_CONST_LIMIT)
+ {
printf("%d ", optLoopTable[loopInd].lpConstLimit());
+ }
- if (optLoopTable[loopInd].lpFlags & LPFLG_VAR_LIMIT)
+ if (optLoopTable[loopInd].lpFlags & LPFLG_VAR_LIMIT)
+ {
printf("V%02u ", optLoopTable[loopInd].lpVarLimit());
+ }
printf("]");
}
@@ -1339,11 +1346,11 @@ void Compiler::optPrintLoopRecording(unsigned loopInd)
printf("\n");
}
-void Compiler::optCheckPreds()
+void Compiler::optCheckPreds()
{
- BasicBlock * block;
- BasicBlock * blockPred;
- flowList * pred;
+ BasicBlock* block;
+ BasicBlock* blockPred;
+ flowList* pred;
for (block = fgFirstBB; block; block = block->bbNext)
{
@@ -1353,25 +1360,29 @@ void Compiler::optCheckPreds()
for (blockPred = fgFirstBB; blockPred; blockPred = blockPred->bbNext)
{
if (blockPred == pred->flBlock)
+ {
break;
+ }
}
noway_assert(blockPred);
switch (blockPred->bbJumpKind)
{
- case BBJ_COND:
- if (blockPred->bbJumpDest == block)
+ case BBJ_COND:
+ if (blockPred->bbJumpDest == block)
+ {
+ break;
+ }
+ __fallthrough;
+ case BBJ_NONE:
+ noway_assert(blockPred->bbNext == block);
+ break;
+ case BBJ_EHFILTERRET:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ noway_assert(blockPred->bbJumpDest == block);
+ break;
+ default:
break;
- __fallthrough;
- case BBJ_NONE:
- noway_assert(blockPred->bbNext == block);
- break;
- case BBJ_EHFILTERRET:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- noway_assert(blockPred->bbJumpDest == block);
- break;
- default:
- break;
}
}
}
@@ -1385,23 +1396,25 @@ void Compiler::optCheckPreds()
* not done a depth first reordering of the basic blocks.
*/
-void Compiler::optFindNaturalLoops()
-{
+void Compiler::optFindNaturalLoops()
+{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In optFindNaturalLoops()\n");
+ }
#endif // DEBUG
- flowList * pred;
- flowList * predTop;
- flowList * predEntry;
+ flowList* pred;
+ flowList* predTop;
+ flowList* predEntry;
noway_assert(fgDomsComputed);
assert(fgHasLoops);
#if COUNT_LOOPS
- hasMethodLoops = false;
- loopsThisMethod = 0;
+ hasMethodLoops = false;
+ loopsThisMethod = 0;
loopOverflowThisMethod = false;
#endif
@@ -1419,7 +1432,7 @@ void Compiler::optFindNaturalLoops()
|
v
- head
+ head
|
| top/beg <--+
| | |
@@ -1437,32 +1450,33 @@ void Compiler::optFindNaturalLoops()
| | |
| v |
| bottom ---+
- |
- +------+
+ |
+ +------+
|
v
*/
- BasicBlock * head;
- BasicBlock * top;
- BasicBlock * bottom;
- BasicBlock * entry;
- BasicBlock * exit;
- unsigned char exitCount;
-
+ BasicBlock* head;
+ BasicBlock* top;
+ BasicBlock* bottom;
+ BasicBlock* entry;
+ BasicBlock* exit;
+ unsigned char exitCount;
for (head = fgFirstBB; head->bbNext; head = head->bbNext)
{
top = head->bbNext;
- exit = NULL;
+ exit = nullptr;
exitCount = 0;
// Blocks that are rarely run have a zero bbWeight and should
// never be optimized here
if (top->bbWeight == BB_ZERO_WEIGHT)
+ {
continue;
+ }
for (pred = top->bbPreds; pred; pred = pred->flNext)
{
@@ -1476,20 +1490,18 @@ void Compiler::optFindNaturalLoops()
bottom = pred->flBlock;
exitCount = 0;
- if (top->bbNum <= bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP)
+ if (top->bbNum <= bottom->bbNum) // is this a backward edge? (from BOTTOM to TOP)
{
- if ((bottom->bbJumpKind == BBJ_EHFINALLYRET) ||
- (bottom->bbJumpKind == BBJ_EHFILTERRET) ||
- (bottom->bbJumpKind == BBJ_EHCATCHRET) ||
- (bottom->bbJumpKind == BBJ_CALLFINALLY) ||
- (bottom->bbJumpKind == BBJ_SWITCH) )
+ if ((bottom->bbJumpKind == BBJ_EHFINALLYRET) || (bottom->bbJumpKind == BBJ_EHFILTERRET) ||
+ (bottom->bbJumpKind == BBJ_EHCATCHRET) || (bottom->bbJumpKind == BBJ_CALLFINALLY) ||
+ (bottom->bbJumpKind == BBJ_SWITCH))
{
/* BBJ_EHFINALLYRET, BBJ_EHFILTERRET, BBJ_EHCATCHRET, and BBJ_CALLFINALLY can never form a loop.
* BBJ_SWITCH that has a backward jump appears only for labeled break. */
goto NO_LOOP;
}
- BasicBlock * loopBlock;
+ BasicBlock* loopBlock;
/* The presence of a "back edge" is an indication that a loop might be present here
*
@@ -1504,8 +1516,7 @@ void Compiler::optFindNaturalLoops()
if (head->bbJumpKind == BBJ_ALWAYS)
{
- if (head->bbJumpDest->bbNum <= bottom->bbNum &&
- head->bbJumpDest->bbNum >= top->bbNum )
+ if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum)
{
/* OK - we enter somewhere within the loop */
entry = head->bbJumpDest;
@@ -1513,12 +1524,12 @@ void Compiler::optFindNaturalLoops()
/* some useful asserts
* Cannot enter at the top - should have being caught by redundant jumps */
- assert ((entry != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS));
+ assert((entry != top) || (head->bbFlags & BBF_KEEP_BBJ_ALWAYS));
}
else
{
/* special case - don't consider now */
- //assert (!"Loop entered in weird way!");
+ // assert (!"Loop entered in weird way!");
goto NO_LOOP;
}
}
@@ -1528,9 +1539,9 @@ void Compiler::optFindNaturalLoops()
/* The ENTRY is at the TOP (a do-while loop) */
entry = top;
}
- else
+ else
{
- goto NO_LOOP; // head does not flow into the loop bail for now
+ goto NO_LOOP; // head does not flow into the loop bail for now
}
// Now we find the "first" block -- the earliest block reachable within the loop.
@@ -1540,12 +1551,12 @@ void Compiler::optFindNaturalLoops()
// in the loop known so far.
BasicBlock* first = top;
BasicBlock* newFirst;
- bool blocksToSearch = true;
+ bool blocksToSearch = true;
BasicBlock* validatedAfter = bottom->bbNext;
while (blocksToSearch)
{
blocksToSearch = false;
- newFirst = nullptr;
+ newFirst = nullptr;
blocksToSearch = false;
for (loopBlock = first; loopBlock != validatedAfter; loopBlock = loopBlock->bbNext)
{
@@ -1553,8 +1564,8 @@ void Compiler::optFindNaturalLoops()
for (unsigned j = 0; j < nSucc; j++)
{
BasicBlock* succ = loopBlock->GetSucc(j);
- if ( (newFirst == nullptr && succ->bbNum < first->bbNum)
- || (newFirst != nullptr && succ->bbNum < newFirst->bbNum))
+ if ((newFirst == nullptr && succ->bbNum < first->bbNum) ||
+ (newFirst != nullptr && succ->bbNum < newFirst->bbNum))
{
newFirst = succ;
}
@@ -1563,7 +1574,7 @@ void Compiler::optFindNaturalLoops()
if (newFirst != nullptr)
{
validatedAfter = first;
- first = newFirst;
+ first = newFirst;
blocksToSearch = true;
}
}
@@ -1571,8 +1582,9 @@ void Compiler::optFindNaturalLoops()
// Is "head" still before "first"? If not, we don't have a valid loop...
if (head->bbNum >= first->bbNum)
{
- JITDUMP("Extending loop [BB%02u..BB%02u] 'first' to BB%02u captures head BB%02u. Rejecting loop.\n",
- top->bbNum, bottom->bbNum, first->bbNum, head->bbNum);
+ JITDUMP(
+ "Extending loop [BB%02u..BB%02u] 'first' to BB%02u captures head BB%02u. Rejecting loop.\n",
+ top->bbNum, bottom->bbNum, first->bbNum, head->bbNum);
goto NO_LOOP;
}
@@ -1581,9 +1593,7 @@ void Compiler::optFindNaturalLoops()
* At the same time check if the loop has a single exit
* point - those loops are easier to optimize */
- for (loopBlock = top;
- loopBlock != bottom->bbNext;
- loopBlock = loopBlock->bbNext)
+ for (loopBlock = top; loopBlock != bottom->bbNext; loopBlock = loopBlock->bbNext)
{
if (!fgDominate(entry, loopBlock))
{
@@ -1603,65 +1613,64 @@ void Compiler::optFindNaturalLoops()
}
}
- BasicBlock * exitPoint;
+ BasicBlock* exitPoint;
switch (loopBlock->bbJumpKind)
{
- case BBJ_COND:
- case BBJ_CALLFINALLY:
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- assert (loopBlock->bbJumpDest);
- exitPoint = loopBlock->bbJumpDest;
-
- if (exitPoint->bbNum < top->bbNum ||
- exitPoint->bbNum > bottom->bbNum )
- {
- /* exit from a block other than BOTTOM */
- exit = loopBlock;
- exitCount++;
- }
- break;
-
- case BBJ_NONE:
- break;
+ case BBJ_COND:
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ assert(loopBlock->bbJumpDest);
+ exitPoint = loopBlock->bbJumpDest;
+
+ if (exitPoint->bbNum < top->bbNum || exitPoint->bbNum > bottom->bbNum)
+ {
+ /* exit from a block other than BOTTOM */
+ exit = loopBlock;
+ exitCount++;
+ }
+ break;
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- /* The "try" associated with this "finally" must be in the
- * same loop, so the finally block will return control inside the loop */
- break;
+ case BBJ_NONE:
+ break;
- case BBJ_THROW:
- case BBJ_RETURN:
- /* those are exits from the loop */
- exit = loopBlock;
- exitCount++;
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ /* The "try" associated with this "finally" must be in the
+ * same loop, so the finally block will return control inside the loop */
+ break;
- case BBJ_SWITCH:
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ /* those are exits from the loop */
+ exit = loopBlock;
+ exitCount++;
+ break;
- unsigned jumpCnt; jumpCnt = loopBlock->bbJumpSwt->bbsCount;
- BasicBlock * * jumpTab; jumpTab = loopBlock->bbJumpSwt->bbsDstTab;
+ case BBJ_SWITCH:
- do
- {
- noway_assert(*jumpTab);
- exitPoint = *jumpTab;
+ unsigned jumpCnt;
+ jumpCnt = loopBlock->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = loopBlock->bbJumpSwt->bbsDstTab;
- if (exitPoint->bbNum < top->bbNum ||
- exitPoint->bbNum > bottom->bbNum )
+ do
{
- exit = loopBlock;
- exitCount++;
- }
- }
- while (++jumpTab, --jumpCnt);
- break;
+ noway_assert(*jumpTab);
+ exitPoint = *jumpTab;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ if (exitPoint->bbNum < top->bbNum || exitPoint->bbNum > bottom->bbNum)
+ {
+ exit = loopBlock;
+ exitCount++;
+ }
+ } while (++jumpTab, --jumpCnt);
+ break;
+
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
@@ -1693,13 +1702,15 @@ void Compiler::optFindNaturalLoops()
{
switch (loopBlock->bbJumpKind)
{
- case BBJ_ALWAYS:
- case BBJ_THROW:
- case BBJ_RETURN:
- if (fgDominate(loopBlock, bottom))
- goto NO_LOOP;
- default:
- break;
+ case BBJ_ALWAYS:
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ if (fgDominate(loopBlock, bottom))
+ {
+ goto NO_LOOP;
+ }
+ default:
+ break;
}
}
@@ -1707,8 +1718,7 @@ void Compiler::optFindNaturalLoops()
for (predEntry = entry->bbPreds; predEntry; predEntry = predEntry->flNext)
{
- if (predEntry->flBlock->bbNum >= top->bbNum &&
- predEntry->flBlock->bbNum <= bottom->bbNum )
+ if (predEntry->flBlock->bbNum >= top->bbNum && predEntry->flBlock->bbNum <= bottom->bbNum)
{
canIterateLoop = true;
break;
@@ -1722,7 +1732,9 @@ void Compiler::optFindNaturalLoops()
}
if (!canIterateLoop)
+ {
goto NO_LOOP;
+ }
/* Double check - make sure that all loop blocks except ENTRY
* have no predecessors outside the loop - this ensures only one loop entry and prevents
@@ -1732,21 +1744,18 @@ void Compiler::optFindNaturalLoops()
* Loops of the form "while (a || b)" will be treated as 2 nested loops (with the same header)
*/
- for (loopBlock = top;
- loopBlock != bottom->bbNext;
- loopBlock = loopBlock->bbNext)
+ for (loopBlock = top; loopBlock != bottom->bbNext; loopBlock = loopBlock->bbNext)
{
if (loopBlock == entry)
+ {
continue;
+ }
- for (predTop = loopBlock->bbPreds;
- predTop != nullptr;
- predTop = predTop->flNext)
+ for (predTop = loopBlock->bbPreds; predTop != nullptr; predTop = predTop->flNext)
{
- if (predTop->flBlock->bbNum < top->bbNum ||
- predTop->flBlock->bbNum > bottom->bbNum )
+ if (predTop->flBlock->bbNum < top->bbNum || predTop->flBlock->bbNum > bottom->bbNum)
{
- //noway_assert(!"Found loop with multiple entries");
+ // noway_assert(!"Found loop with multiple entries");
goto NO_LOOP;
}
}
@@ -1768,13 +1777,13 @@ void Compiler::optFindNaturalLoops()
// ...
// }
//
- // Here, BB10 is more nested than BB02.
+ // Here, BB10 is more nested than BB02.
- if (bottom->hasTryIndex() &&
- !bbInTryRegions(bottom->getTryIndex(), first))
+ if (bottom->hasTryIndex() && !bbInTryRegions(bottom->getTryIndex(), first))
{
- JITDUMP("Loop 'first' BB%02u is in an outer EH region compared to loop 'bottom' BB%02u. Rejecting loop.\n",
- first->bbNum, bottom->bbNum);
+ JITDUMP("Loop 'first' BB%02u is in an outer EH region compared to loop 'bottom' BB%02u. Rejecting "
+ "loop.\n",
+ first->bbNum, bottom->bbNum);
goto NO_LOOP;
}
@@ -1790,8 +1799,7 @@ void Compiler::optFindNaturalLoops()
if ((first->bbFlags & BBF_FINALLY_TARGET) != 0)
{
- JITDUMP("Loop 'first' BB%02u is a finally target. Rejecting loop.\n",
- first->bbNum);
+ JITDUMP("Loop 'first' BB%02u is a finally target. Rejecting loop.\n", first->bbNum);
goto NO_LOOP;
}
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
@@ -1800,10 +1808,10 @@ void Compiler::optFindNaturalLoops()
* If we found only one exit, record it in the table too
* (otherwise an exit = 0 in the loop table means multiple exits) */
- assert (pred);
+ assert(pred);
if (exitCount != 1)
{
- exit = 0;
+ exit = nullptr;
}
optRecordLoop(head, first, top, entry, bottom, exit, exitCount);
@@ -1824,8 +1832,8 @@ void Compiler::optFindNaturalLoops()
#endif // COUNT_LOOPS
}
- /* current predecessor not good for a loop - continue with another one, if any */
-NO_LOOP: ;
+ /* current predecessor not good for a loop - continue with another one, if any */
+ NO_LOOP:;
}
}
@@ -1850,8 +1858,8 @@ NO_LOOP: ;
possibleParent--;
if (optLoopTable[possibleParent].lpContains(optLoopTable[loopInd]))
{
- optLoopTable[loopInd].lpParent = possibleParent;
- optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild;
+ optLoopTable[loopInd].lpParent = possibleParent;
+ optLoopTable[loopInd].lpSibling = optLoopTable[possibleParent].lpChild;
optLoopTable[possibleParent].lpChild = loopInd;
break;
}
@@ -1863,13 +1871,16 @@ NO_LOOP: ;
// this -- the innermost loop labeling will be done last.
for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++)
{
- BasicBlock* first = optLoopTable[loopInd].lpFirst;
+ BasicBlock* first = optLoopTable[loopInd].lpFirst;
BasicBlock* bottom = optLoopTable[loopInd].lpBottom;
for (BasicBlock* blk = first; blk != nullptr; blk = blk->bbNext)
{
blk->bbNatLoopNum = loopInd;
- if (blk == bottom) break;
- assert(blk->bbNext != nullptr); // We should never reach nullptr.
+ if (blk == bottom)
+ {
+ break;
+ }
+ assert(blk->bbNext != nullptr); // We should never reach nullptr.
}
}
@@ -1880,7 +1891,9 @@ NO_LOOP: ;
{
// Traverse the outermost loops as entries into the loop nest; so skip non-outermost.
if (optLoopTable[loopInd].lpParent != BasicBlock::NOT_IN_LOOP)
+ {
continue;
+ }
// Otherwise...
if (optCanonicalizeLoopNest(loopInd))
@@ -1911,27 +1924,27 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap)
BasicBlock* newJumpDest = nullptr;
switch (blk->bbJumpKind)
{
- case BBJ_THROW:
- case BBJ_RETURN:
- case BBJ_NONE:
- case BBJ_EHFILTERRET:
- case BBJ_EHFINALLYRET:
- case BBJ_EHCATCHRET:
- // These have no jump destination to update.
- break;
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ case BBJ_NONE:
+ case BBJ_EHFILTERRET:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHCATCHRET:
+ // These have no jump destination to update.
+ break;
- case BBJ_ALWAYS:
- case BBJ_LEAVE:
- case BBJ_CALLFINALLY:
- case BBJ_COND:
- // All of these have a single jump destination to update.
- if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest))
- {
- blk->bbJumpDest = newJumpDest;
- }
- break;
+ case BBJ_ALWAYS:
+ case BBJ_LEAVE:
+ case BBJ_CALLFINALLY:
+ case BBJ_COND:
+ // All of these have a single jump destination to update.
+ if (redirectMap->Lookup(blk->bbJumpDest, &newJumpDest))
+ {
+ blk->bbJumpDest = newJumpDest;
+ }
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
{
bool redirected = false;
for (unsigned i = 0; i < blk->bbJumpSwt->bbsCount; i++)
@@ -1939,17 +1952,19 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap)
if (redirectMap->Lookup(blk->bbJumpSwt->bbsDstTab[i], &newJumpDest))
{
blk->bbJumpSwt->bbsDstTab[i] = newJumpDest;
- redirected = true;
+ redirected = true;
}
}
// If any redirections happend, invalidate the switch table map for the switch.
if (redirected)
+ {
GetSwitchDescMap()->Remove(blk);
+ }
}
break;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -1961,18 +1976,18 @@ void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to)
// copy the jump destination(s) from "from" to "to".
switch (to->bbJumpKind)
{
- case BBJ_ALWAYS:
- case BBJ_LEAVE:
- case BBJ_CALLFINALLY:
- case BBJ_COND:
- // All of these have a single jump destination to update.
- to->bbJumpDest = from->bbJumpDest;
- break;
+ case BBJ_ALWAYS:
+ case BBJ_LEAVE:
+ case BBJ_CALLFINALLY:
+ case BBJ_COND:
+ // All of these have a single jump destination to update.
+ to->bbJumpDest = from->bbJumpDest;
+ break;
- case BBJ_SWITCH:
+ case BBJ_SWITCH:
{
- to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc();
- to->bbJumpSwt->bbsCount = from->bbJumpSwt->bbsCount;
+ to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc();
+ to->bbJumpSwt->bbsCount = from->bbJumpSwt->bbsCount;
to->bbJumpSwt->bbsDstTab = new (this, CMK_BasicBlock) BasicBlock*[from->bbJumpSwt->bbsCount];
for (unsigned i = 0; i < from->bbJumpSwt->bbsCount; i++)
@@ -1982,8 +1997,8 @@ void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to)
}
break;
- default:
- break;
+ default:
+ break;
}
}
@@ -2002,9 +2017,8 @@ bool Compiler::optCanonicalizeLoopNest(unsigned char loopInd)
}
}
- for (unsigned char child = optLoopTable[loopInd].lpChild;
- child != BasicBlock::NOT_IN_LOOP;
- child = optLoopTable[child].lpSibling)
+ for (unsigned char child = optLoopTable[loopInd].lpChild; child != BasicBlock::NOT_IN_LOOP;
+ child = optLoopTable[child].lpSibling)
{
if (optCanonicalizeLoopNest(child))
{
@@ -2021,9 +2035,12 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
BasicBlock* t = optLoopTable[loopInd].lpTop;
if (t->bbNatLoopNum == loopInd)
+ {
return false;
+ }
- JITDUMP("in optCanonicalizeLoop: L%02u has top BB%02u (bottom BB%02u) with natural loop number L%02u: need to canonicalize\n",
+ JITDUMP("in optCanonicalizeLoop: L%02u has top BB%02u (bottom BB%02u) with natural loop number L%02u: need to "
+ "canonicalize\n",
loopInd, t->bbNum, optLoopTable[loopInd].lpBottom->bbNum, t->bbNatLoopNum);
// Otherwise, the top of this loop is also part of a nested loop.
@@ -2112,8 +2129,8 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
// If the bottom block is in the same "try" region, then we extend the EH
// region. Otherwise, we add the new block outside the "try" region.
- bool extendRegion = BasicBlock::sameTryRegion(f, b);
- BasicBlock* newT = fgNewBBbefore(BBJ_NONE, f, extendRegion);
+ bool extendRegion = BasicBlock::sameTryRegion(f, b);
+ BasicBlock* newT = fgNewBBbefore(BBJ_NONE, f, extendRegion);
if (!extendRegion)
{
// We need to set the EH region manually. Set it to be the same
@@ -2148,12 +2165,14 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
// outside-in, so we shouldn't encounter the new blocks at the loop boundaries, or in the predecessor lists.
if (t->bbNum <= topPredBlock->bbNum && topPredBlock->bbNum <= b->bbNum)
{
- JITDUMP("in optCanonicalizeLoop: 'top' predecessor BB%02u is in the range of L%02u (BB%02u..BB%02u); not redirecting its bottom edge\n",
+ JITDUMP("in optCanonicalizeLoop: 'top' predecessor BB%02u is in the range of L%02u (BB%02u..BB%02u); not "
+ "redirecting its bottom edge\n",
topPredBlock->bbNum, loopInd, t->bbNum, b->bbNum);
continue;
}
- JITDUMP("in optCanonicalizeLoop: redirect top predecessor BB%02u to BB%02u\n", topPredBlock->bbNum, newT->bbNum);
+ JITDUMP("in optCanonicalizeLoop: redirect top predecessor BB%02u to BB%02u\n", topPredBlock->bbNum,
+ newT->bbNum);
optRedirectBlock(topPredBlock, blockMap);
}
@@ -2169,43 +2188,40 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
// If it had been a do-while loop (top == entry), update entry, as well.
BasicBlock* origE = optLoopTable[loopInd].lpEntry;
if (optLoopTable[loopInd].lpTop == origE)
+ {
optLoopTable[loopInd].lpEntry = newT;
- optLoopTable[loopInd].lpTop = newT;
+ }
+ optLoopTable[loopInd].lpTop = newT;
optLoopTable[loopInd].lpFirst = newT;
newT->bbNatLoopNum = loopInd;
- JITDUMP("in optCanonicalizeLoop: made new block BB%02u [%p] the new unique top of loop %d.\n", newT->bbNum, dspPtr(newT), loopInd);
+ JITDUMP("in optCanonicalizeLoop: made new block BB%02u [%p] the new unique top of loop %d.\n", newT->bbNum,
+ dspPtr(newT), loopInd);
// Make sure the head block still goes to the entry...
- if (h->bbJumpKind == BBJ_NONE &&
- h->bbNext != optLoopTable[loopInd].lpEntry)
+ if (h->bbJumpKind == BBJ_NONE && h->bbNext != optLoopTable[loopInd].lpEntry)
{
h->bbJumpKind = BBJ_ALWAYS;
h->bbJumpDest = optLoopTable[loopInd].lpEntry;
}
- else if (h->bbJumpKind == BBJ_COND &&
- h->bbNext == newT &&
- newT != optLoopTable[loopInd].lpEntry)
+ else if (h->bbJumpKind == BBJ_COND && h->bbNext == newT && newT != optLoopTable[loopInd].lpEntry)
{
- BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/true);
+ BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true);
optLoopTable[loopInd].lpHead = h2;
- h2->bbJumpDest = optLoopTable[loopInd].lpEntry;
- h2->bbTreeList = nullptr;
+ h2->bbJumpDest = optLoopTable[loopInd].lpEntry;
+ h2->bbTreeList = nullptr;
fgInsertStmtAtEnd(h2, fgNewStmtFromTree(gtNewOperNode(GT_NOP, TYP_VOID, nullptr)));
}
// If any loops nested in "loopInd" have the same head and entry as "loopInd",
// it must be the case that they were do-while's (since "h" fell through to the entry).
// The new node "newT" becomes the head of such loops.
- for (unsigned char childLoop = optLoopTable[loopInd].lpChild;
- childLoop != BasicBlock::NOT_IN_LOOP;
- childLoop = optLoopTable[childLoop].lpSibling)
+ for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP;
+ childLoop = optLoopTable[childLoop].lpSibling)
{
- if ( optLoopTable[childLoop].lpEntry == origE
- && optLoopTable[childLoop].lpHead == h
- && newT->bbJumpKind == BBJ_NONE
- && newT->bbNext == origE)
+ if (optLoopTable[childLoop].lpEntry == origE && optLoopTable[childLoop].lpHead == h &&
+ newT->bbJumpKind == BBJ_NONE && newT->bbNext == origE)
{
optUpdateLoopHead(childLoop, h, newT);
}
@@ -2216,21 +2232,31 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
bool Compiler::optLoopContains(unsigned l1, unsigned l2)
{
assert(l1 != BasicBlock::NOT_IN_LOOP);
- if (l1 == l2) return true;
- else if (l2 == BasicBlock::NOT_IN_LOOP) return false;
- else return optLoopContains(l1, optLoopTable[l2].lpParent);
+ if (l1 == l2)
+ {
+ return true;
+ }
+ else if (l2 == BasicBlock::NOT_IN_LOOP)
+ {
+ return false;
+ }
+ else
+ {
+ return optLoopContains(l1, optLoopTable[l2].lpParent);
+ }
}
-
+
void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to)
{
assert(optLoopTable[loopInd].lpHead == from);
optLoopTable[loopInd].lpHead = to;
- for (unsigned char childLoop = optLoopTable[loopInd].lpChild;
- childLoop != BasicBlock::NOT_IN_LOOP;
- childLoop = optLoopTable[childLoop].lpSibling)
+ for (unsigned char childLoop = optLoopTable[loopInd].lpChild; childLoop != BasicBlock::NOT_IN_LOOP;
+ childLoop = optLoopTable[childLoop].lpSibling)
{
if (optLoopTable[childLoop].lpHead == from)
+ {
optUpdateLoopHead(childLoop, from, to);
+ }
}
}
@@ -2238,54 +2264,82 @@ void Compiler::optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock*
* If the : i += const" will cause an overflow exception for the small types.
*/
-bool jitIterSmallOverflow(int iterAtExit, var_types incrType)
+bool jitIterSmallOverflow(int iterAtExit, var_types incrType)
{
- int type_MAX;
+ int type_MAX;
switch (incrType)
{
- case TYP_BYTE: type_MAX = SCHAR_MAX; break;
- case TYP_UBYTE: type_MAX = UCHAR_MAX; break;
- case TYP_SHORT: type_MAX = SHRT_MAX; break;
- case TYP_CHAR: type_MAX = USHRT_MAX; break;
+ case TYP_BYTE:
+ type_MAX = SCHAR_MAX;
+ break;
+ case TYP_UBYTE:
+ type_MAX = UCHAR_MAX;
+ break;
+ case TYP_SHORT:
+ type_MAX = SHRT_MAX;
+ break;
+ case TYP_CHAR:
+ type_MAX = USHRT_MAX;
+ break;
- case TYP_UINT: // Detected by checking for 32bit ....
- case TYP_INT: return false; // ... overflow same as done for TYP_INT
+ case TYP_UINT: // Detected by checking for 32bit ....
+ case TYP_INT:
+ return false; // ... overflow same as done for TYP_INT
- default: NO_WAY("Bad type");
+ default:
+ NO_WAY("Bad type");
}
if (iterAtExit > type_MAX)
+ {
return true;
+ }
else
+ {
return false;
+ }
}
/*****************************************************************************
* If the "i -= const" will cause an underflow exception for the small types
*/
-bool jitIterSmallUnderflow(int iterAtExit, var_types decrType)
+bool jitIterSmallUnderflow(int iterAtExit, var_types decrType)
{
- int type_MIN;
+ int type_MIN;
switch (decrType)
{
- case TYP_BYTE: type_MIN = SCHAR_MIN; break;
- case TYP_SHORT: type_MIN = SHRT_MIN; break;
- case TYP_UBYTE: type_MIN = 0; break;
- case TYP_CHAR: type_MIN = 0; break;
+ case TYP_BYTE:
+ type_MIN = SCHAR_MIN;
+ break;
+ case TYP_SHORT:
+ type_MIN = SHRT_MIN;
+ break;
+ case TYP_UBYTE:
+ type_MIN = 0;
+ break;
+ case TYP_CHAR:
+ type_MIN = 0;
+ break;
- case TYP_UINT: // Detected by checking for 32bit ....
- case TYP_INT: return false; // ... underflow same as done for TYP_INT
+ case TYP_UINT: // Detected by checking for 32bit ....
+ case TYP_INT:
+ return false; // ... underflow same as done for TYP_INT
- default: NO_WAY("Bad type");
+ default:
+ NO_WAY("Bad type");
}
if (iterAtExit < type_MIN)
+ {
return true;
+ }
else
+ {
return false;
+ }
}
/*****************************************************************************
@@ -2294,57 +2348,82 @@ bool jitIterSmallUnderflow(int iterAtExit, var_types decrType)
* in a constant loop. If it cannot prove the number is constant returns false
*/
-bool Compiler::optComputeLoopRep(int constInit,
- int constLimit,
- int iterInc,
- genTreeOps iterOper,
- var_types iterOperType,
- genTreeOps testOper,
- bool unsTest,
- bool dupCond,
- unsigned * iterCount)
+bool Compiler::optComputeLoopRep(int constInit,
+ int constLimit,
+ int iterInc,
+ genTreeOps iterOper,
+ var_types iterOperType,
+ genTreeOps testOper,
+ bool unsTest,
+ bool dupCond,
+ unsigned* iterCount)
{
noway_assert(genActualType(iterOperType) == TYP_INT);
- __int64 constInitX;
- __int64 constLimitX;
+ __int64 constInitX;
+ __int64 constLimitX;
- unsigned loopCount;
- int iterSign;
+ unsigned loopCount;
+ int iterSign;
// Using this, we can just do a signed comparison with other 32 bit values.
- if (unsTest) constLimitX = (unsigned int)constLimit;
- else constLimitX = ( signed int)constLimit;
+ if (unsTest)
+ {
+ constLimitX = (unsigned int)constLimit;
+ }
+ else
+ {
+ constLimitX = (signed int)constLimit;
+ }
switch (iterOperType)
{
- // For small types, the iteration operator will narrow these values if big
+// For small types, the iteration operator will narrow these values if big
-#define INIT_ITER_BY_TYPE(type) constInitX = (type)constInit; iterInc = (type)iterInc;
+#define INIT_ITER_BY_TYPE(type) \
+ constInitX = (type)constInit; \
+ iterInc = (type)iterInc;
- case TYP_BYTE: INIT_ITER_BY_TYPE( signed char ); break;
- case TYP_UBYTE: INIT_ITER_BY_TYPE(unsigned char ); break;
- case TYP_SHORT: INIT_ITER_BY_TYPE( signed short); break;
- case TYP_CHAR: INIT_ITER_BY_TYPE(unsigned short); break;
+ case TYP_BYTE:
+ INIT_ITER_BY_TYPE(signed char);
+ break;
+ case TYP_UBYTE:
+ INIT_ITER_BY_TYPE(unsigned char);
+ break;
+ case TYP_SHORT:
+ INIT_ITER_BY_TYPE(signed short);
+ break;
+ case TYP_CHAR:
+ INIT_ITER_BY_TYPE(unsigned short);
+ break;
// For the big types, 32 bit arithmetic is performed
- case TYP_INT:
- case TYP_UINT: if (unsTest) constInitX = (unsigned int)constInit;
- else constInitX = ( signed int)constInit;
- break;
+ case TYP_INT:
+ case TYP_UINT:
+ if (unsTest)
+ {
+ constInitX = (unsigned int)constInit;
+ }
+ else
+ {
+ constInitX = (signed int)constInit;
+ }
+ break;
- default:
- noway_assert(!"Bad type");
- NO_WAY("Bad type");
+ default:
+ noway_assert(!"Bad type");
+ NO_WAY("Bad type");
}
/* If iterInc is zero we have an infinite loop */
if (iterInc == 0)
+ {
return false;
+ }
/* Set iterSign to +1 for positive iterInc and -1 for negative iterInc */
- iterSign = (iterInc > 0) ? +1 : -1;
+ iterSign = (iterInc > 0) ? +1 : -1;
/* Initialize loopCount to zero */
loopCount = 0;
@@ -2356,294 +2435,341 @@ bool Compiler::optComputeLoopRep(int constInit,
// always execute the loop once before performing the loop test
if (!dupCond)
{
- loopCount += 1;
- constInitX += iterInc;
+ loopCount += 1;
+ constInitX += iterInc;
}
// bail if count is based on wrap-around math
if (iterInc > 0)
{
if (constLimitX < constInitX)
+ {
return false;
+ }
}
else if (constLimitX > constInitX)
+ {
return false;
+ }
- /* Compute the number of repetitions */
+ /* Compute the number of repetitions */
switch (testOper)
{
- __int64 iterAtExitX;
+ __int64 iterAtExitX;
- case GT_EQ:
- /* something like "for (i=init; i == lim; i++)" doesn't make any sense */
- return false;
+ case GT_EQ:
+ /* something like "for (i=init; i == lim; i++)" doesn't make any sense */
+ return false;
- case GT_NE:
- /* "for (i=init; i != lim; i+=const)" - this is tricky since it may
- * have a constant number of iterations or loop forever -
- * we have to compute (lim-init) mod iterInc to see if it is zero.
- * If mod iterInc is not zero then the limit test will miss an a wrap will occur
- * which is probably not what the end user wanted, but it is legal.
- */
+ case GT_NE:
+ /* "for (i=init; i != lim; i+=const)" - this is tricky since it may
+ * have a constant number of iterations or loop forever -
+ * we have to compute (lim-init) mod iterInc to see if it is zero.
+ * If mod iterInc is not zero then the limit test will miss an a wrap will occur
+ * which is probably not what the end user wanted, but it is legal.
+ */
- if (iterInc > 0)
- {
- /* Stepping by one, i.e. Mod with 1 is always zero */
- if (iterInc != 1)
+ if (iterInc > 0)
{
- if (((constLimitX - constInitX) % iterInc) != 0)
- return false;
+ /* Stepping by one, i.e. Mod with 1 is always zero */
+ if (iterInc != 1)
+ {
+ if (((constLimitX - constInitX) % iterInc) != 0)
+ {
+ return false;
+ }
+ }
}
- }
- else
- {
- noway_assert(iterInc < 0);
- /* Stepping by -1, i.e. Mod with 1 is always zero */
- if (iterInc != -1)
+ else
+ {
+ noway_assert(iterInc < 0);
+ /* Stepping by -1, i.e. Mod with 1 is always zero */
+ if (iterInc != -1)
+ {
+ if (((constInitX - constLimitX) % (-iterInc)) != 0)
+ {
+ return false;
+ }
+ }
+ }
+
+ switch (iterOper)
{
- if (((constInitX - constLimitX) % (-iterInc)) != 0)
+ case GT_ASG_SUB:
+ case GT_SUB:
+ iterInc = -iterInc;
+ __fallthrough;
+
+ case GT_ASG_ADD:
+ case GT_ADD:
+ if (constInitX != constLimitX)
+ {
+ loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ }
+
+ iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
+
+ if (unsTest)
+ {
+ iterAtExitX = (unsigned)iterAtExitX;
+ }
+
+ // Check if iteration incr will cause overflow for small types
+ if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
+ {
+ return false;
+ }
+
+ // iterator with 32bit overflow. Bad for TYP_(U)INT
+ if (iterAtExitX < constLimitX)
+ {
+ return false;
+ }
+
+ *iterCount = loopCount;
+ return true;
+
+ case GT_ASG_MUL:
+ case GT_MUL:
+ case GT_ASG_DIV:
+ case GT_DIV:
+ case GT_ASG_RSH:
+ case GT_RSH:
+ case GT_ASG_LSH:
+ case GT_LSH:
+ case GT_ASG_UDIV:
+ case GT_UDIV:
+ return false;
+
+ default:
+ noway_assert(!"Unknown operator for loop iterator");
return false;
}
- }
- switch (iterOper)
- {
- case GT_ASG_SUB:
- case GT_SUB:
- iterInc = -iterInc;
- __fallthrough;
+ case GT_LT:
+ switch (iterOper)
+ {
+ case GT_ASG_SUB:
+ case GT_SUB:
+ iterInc = -iterInc;
+ __fallthrough;
- case GT_ASG_ADD:
- case GT_ADD:
- if (constInitX != constLimitX)
- loopCount += (unsigned) ((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ case GT_ASG_ADD:
+ case GT_ADD:
+ if (constInitX < constLimitX)
+ {
+ loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ }
- iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
-
- if (unsTest)
- iterAtExitX = (unsigned)iterAtExitX;
-
- // Check if iteration incr will cause overflow for small types
- if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
- return false;
-
- // iterator with 32bit overflow. Bad for TYP_(U)INT
- if (iterAtExitX < constLimitX)
- return false;
+ iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
- *iterCount = loopCount;
- return true;
+ if (unsTest)
+ {
+ iterAtExitX = (unsigned)iterAtExitX;
+ }
- case GT_ASG_MUL:
- case GT_MUL:
- case GT_ASG_DIV:
- case GT_DIV:
- case GT_ASG_RSH:
- case GT_RSH:
- case GT_ASG_LSH:
- case GT_LSH:
- case GT_ASG_UDIV:
- case GT_UDIV:
- return false;
+ // Check if iteration incr will cause overflow for small types
+ if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
+ {
+ return false;
+ }
- default:
- noway_assert(!"Unknown operator for loop iterator");
- return false;
- }
+ // iterator with 32bit overflow. Bad for TYP_(U)INT
+ if (iterAtExitX < constLimitX)
+ {
+ return false;
+ }
- case GT_LT:
- switch (iterOper)
- {
- case GT_ASG_SUB:
- case GT_SUB:
- iterInc = -iterInc;
- __fallthrough;
+ *iterCount = loopCount;
+ return true;
+
+ case GT_ASG_MUL:
+ case GT_MUL:
+ case GT_ASG_DIV:
+ case GT_DIV:
+ case GT_ASG_RSH:
+ case GT_RSH:
+ case GT_ASG_LSH:
+ case GT_LSH:
+ case GT_ASG_UDIV:
+ case GT_UDIV:
+ return false;
- case GT_ASG_ADD:
- case GT_ADD:
- if (constInitX < constLimitX)
- loopCount += (unsigned) ((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ default:
+ noway_assert(!"Unknown operator for loop iterator");
+ return false;
+ }
- iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
-
- if (unsTest)
- iterAtExitX = (unsigned)iterAtExitX;
-
- // Check if iteration incr will cause overflow for small types
- if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
- return false;
-
- // iterator with 32bit overflow. Bad for TYP_(U)INT
- if (iterAtExitX < constLimitX)
- return false;
-
- *iterCount = loopCount;
- return true;
+ case GT_LE:
+ switch (iterOper)
+ {
+ case GT_ASG_SUB:
+ case GT_SUB:
+ iterInc = -iterInc;
+ __fallthrough;
- case GT_ASG_MUL:
- case GT_MUL:
- case GT_ASG_DIV:
- case GT_DIV:
- case GT_ASG_RSH:
- case GT_RSH:
- case GT_ASG_LSH:
- case GT_LSH:
- case GT_ASG_UDIV:
- case GT_UDIV:
- return false;
+ case GT_ASG_ADD:
+ case GT_ADD:
+ if (constInitX <= constLimitX)
+ {
+ loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1;
+ }
- default:
- noway_assert(!"Unknown operator for loop iterator");
- return false;
- }
+ iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
- case GT_LE:
- switch (iterOper)
- {
- case GT_ASG_SUB:
- case GT_SUB:
- iterInc = -iterInc;
- __fallthrough;
+ if (unsTest)
+ {
+ iterAtExitX = (unsigned)iterAtExitX;
+ }
- case GT_ASG_ADD:
- case GT_ADD:
- if (constInitX <= constLimitX)
- loopCount += (unsigned) ((constLimitX - constInitX) / iterInc) + 1;
-
- iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
-
- if (unsTest)
- iterAtExitX = (unsigned)iterAtExitX;
-
- // Check if iteration incr will cause overflow for small types
- if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
- return false;
-
- // iterator with 32bit overflow. Bad for TYP_(U)INT
- if (iterAtExitX <= constLimitX)
- return false;
+ // Check if iteration incr will cause overflow for small types
+ if (jitIterSmallOverflow((int)iterAtExitX, iterOperType))
+ {
+ return false;
+ }
- *iterCount = loopCount;
- return true;
+ // iterator with 32bit overflow. Bad for TYP_(U)INT
+ if (iterAtExitX <= constLimitX)
+ {
+ return false;
+ }
- case GT_ASG_MUL:
- case GT_MUL:
- case GT_ASG_DIV:
- case GT_DIV:
- case GT_ASG_RSH:
- case GT_RSH:
- case GT_ASG_LSH:
- case GT_LSH:
- case GT_ASG_UDIV:
- case GT_UDIV:
- return false;
+ *iterCount = loopCount;
+ return true;
+
+ case GT_ASG_MUL:
+ case GT_MUL:
+ case GT_ASG_DIV:
+ case GT_DIV:
+ case GT_ASG_RSH:
+ case GT_RSH:
+ case GT_ASG_LSH:
+ case GT_LSH:
+ case GT_ASG_UDIV:
+ case GT_UDIV:
+ return false;
- default:
- noway_assert(!"Unknown operator for loop iterator");
- return false;
- }
+ default:
+ noway_assert(!"Unknown operator for loop iterator");
+ return false;
+ }
- case GT_GT:
- switch (iterOper)
- {
- case GT_ASG_SUB:
- case GT_SUB:
- iterInc = -iterInc;
- __fallthrough;
+ case GT_GT:
+ switch (iterOper)
+ {
+ case GT_ASG_SUB:
+ case GT_SUB:
+ iterInc = -iterInc;
+ __fallthrough;
- case GT_ASG_ADD:
- case GT_ADD:
- if (constInitX > constLimitX)
- loopCount += (unsigned) ((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ case GT_ASG_ADD:
+ case GT_ADD:
+ if (constInitX > constLimitX)
+ {
+ loopCount += (unsigned)((constLimitX - constInitX - iterSign) / iterInc) + 1;
+ }
- iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
-
- if (unsTest)
- iterAtExitX = (unsigned)iterAtExitX;
-
- // Check if small types will underflow
- if (jitIterSmallUnderflow((int)iterAtExitX, iterOperType))
- return false;
+ iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
- // iterator with 32bit underflow. Bad for TYP_INT and unsigneds
- if (iterAtExitX > constLimitX)
- return false;
+ if (unsTest)
+ {
+ iterAtExitX = (unsigned)iterAtExitX;
+ }
- *iterCount = loopCount;
- return true;
+ // Check if small types will underflow
+ if (jitIterSmallUnderflow((int)iterAtExitX, iterOperType))
+ {
+ return false;
+ }
- case GT_ASG_MUL:
- case GT_MUL:
- case GT_ASG_DIV:
- case GT_DIV:
- case GT_ASG_RSH:
- case GT_RSH:
- case GT_ASG_LSH:
- case GT_LSH:
- case GT_ASG_UDIV:
- case GT_UDIV:
- return false;
+ // iterator with 32bit underflow. Bad for TYP_INT and unsigneds
+ if (iterAtExitX > constLimitX)
+ {
+ return false;
+ }
- default:
- noway_assert(!"Unknown operator for loop iterator");
- return false;
- }
+ *iterCount = loopCount;
+ return true;
+
+ case GT_ASG_MUL:
+ case GT_MUL:
+ case GT_ASG_DIV:
+ case GT_DIV:
+ case GT_ASG_RSH:
+ case GT_RSH:
+ case GT_ASG_LSH:
+ case GT_LSH:
+ case GT_ASG_UDIV:
+ case GT_UDIV:
+ return false;
- case GT_GE:
- switch (iterOper)
- {
- case GT_ASG_SUB:
- case GT_SUB:
- iterInc = -iterInc;
- __fallthrough;
+ default:
+ noway_assert(!"Unknown operator for loop iterator");
+ return false;
+ }
- case GT_ASG_ADD:
- case GT_ADD:
- if (constInitX >= constLimitX)
- loopCount += (unsigned) ((constLimitX - constInitX) / iterInc) + 1;
-
- iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
-
- if (unsTest)
- iterAtExitX = (unsigned)iterAtExitX;
-
- // Check if small types will underflow
- if (jitIterSmallUnderflow((int)iterAtExitX, iterOperType))
- return false;
-
- // iterator with 32bit underflow. Bad for TYP_INT and unsigneds
- if (iterAtExitX >= constLimitX)
- return false;
+ case GT_GE:
+ switch (iterOper)
+ {
+ case GT_ASG_SUB:
+ case GT_SUB:
+ iterInc = -iterInc;
+ __fallthrough;
- *iterCount = loopCount;
- return true;
+ case GT_ASG_ADD:
+ case GT_ADD:
+ if (constInitX >= constLimitX)
+ {
+ loopCount += (unsigned)((constLimitX - constInitX) / iterInc) + 1;
+ }
- case GT_ASG_MUL:
- case GT_MUL:
- case GT_ASG_DIV:
- case GT_DIV:
- case GT_ASG_RSH:
- case GT_RSH:
- case GT_ASG_LSH:
- case GT_LSH:
- case GT_ASG_UDIV:
- case GT_UDIV:
- return false;
+ iterAtExitX = (int)(constInitX + iterInc * (int)loopCount);
- default:
- noway_assert(!"Unknown operator for loop iterator");
- return false;
- }
+ if (unsTest)
+ {
+ iterAtExitX = (unsigned)iterAtExitX;
+ }
- default:
- noway_assert(!"Unknown operator for loop condition");
+ // Check if small types will underflow
+ if (jitIterSmallUnderflow((int)iterAtExitX, iterOperType))
+ {
+ return false;
+ }
+
+ // iterator with 32bit underflow. Bad for TYP_INT and unsigneds
+ if (iterAtExitX >= constLimitX)
+ {
+ return false;
+ }
+
+ *iterCount = loopCount;
+ return true;
+
+ case GT_ASG_MUL:
+ case GT_MUL:
+ case GT_ASG_DIV:
+ case GT_DIV:
+ case GT_ASG_RSH:
+ case GT_RSH:
+ case GT_ASG_LSH:
+ case GT_LSH:
+ case GT_ASG_UDIV:
+ case GT_UDIV:
+ return false;
+
+ default:
+ noway_assert(!"Unknown operator for loop iterator");
+ return false;
+ }
+
+ default:
+ noway_assert(!"Unknown operator for loop condition");
}
return false;
}
-
/*****************************************************************************
*
* Look for loop unrolling candidates and unroll them
@@ -2651,15 +2777,19 @@ bool Compiler::optComputeLoopRep(int constInit,
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-void Compiler::optUnrollLoops()
+void Compiler::optUnrollLoops()
{
if (compCodeOpt() == SMALL_CODE)
+ {
return;
+ }
if (optLoopCount == 0)
+ {
return;
+ }
#ifdef DEBUG
if (JitConfig.JitNoUnroll())
@@ -2674,109 +2804,120 @@ void Compiler::optUnrollLoops()
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In optUnrollLoops()\n");
+ }
#endif
/* Look for loop unrolling candidates */
/* Double loop so that after unrolling an inner loop we set change to true
* and we then go back over all of the loop candidates and try to unroll
- * the next outer loop, until we don't unroll any loops,
+ * the next outer loop, until we don't unroll any loops,
* then change will be false and we are done.
*/
for (;;)
{
- bool change = false;
+ bool change = false;
for (unsigned lnum = 0; lnum < optLoopCount; lnum++)
{
- BasicBlock * block;
- BasicBlock * head;
- BasicBlock * bottom;
-
- GenTree * loop;
- GenTree * test;
- GenTree * incr;
- GenTree * phdr;
- GenTree * init;
-
- bool dupCond;
- int lval;
- int lbeg; // initial value for iterator
- int llim; // limit value for iterator
- unsigned lvar; // iterator lclVar #
- int iterInc; // value to increment the iterator
- genTreeOps iterOper; // type of iterator increment (i.e. ASG_ADD, ASG_SUB, etc.)
- var_types iterOperType; // type result of the oper (for overflow instrs)
- genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.)
- bool unsTest; // Is the comparison u/int
-
- unsigned totalIter; // total number of iterations in the constant loop
- unsigned loopCostSz; // Cost is size of one iteration
- unsigned loopFlags; // actual lpFlags
- unsigned requiredFlags; // required lpFlags
-
- GenTree * loopList; // new stmt list of the unrolled loop
- GenTree * loopLast;
-
- static const int ITER_LIMIT[COUNT_OPT_CODE + 1] =
- {
+ BasicBlock* block;
+ BasicBlock* head;
+ BasicBlock* bottom;
+
+ GenTree* loop;
+ GenTree* test;
+ GenTree* incr;
+ GenTree* phdr;
+ GenTree* init;
+
+ bool dupCond;
+ int lval;
+ int lbeg; // initial value for iterator
+ int llim; // limit value for iterator
+ unsigned lvar; // iterator lclVar #
+ int iterInc; // value to increment the iterator
+ genTreeOps iterOper; // type of iterator increment (i.e. ASG_ADD, ASG_SUB, etc.)
+ var_types iterOperType; // type result of the oper (for overflow instrs)
+ genTreeOps testOper; // type of loop test (i.e. GT_LE, GT_GE, etc.)
+ bool unsTest; // Is the comparison u/int
+
+ unsigned totalIter; // total number of iterations in the constant loop
+ unsigned loopCostSz; // Cost is size of one iteration
+ unsigned loopFlags; // actual lpFlags
+ unsigned requiredFlags; // required lpFlags
+
+ GenTree* loopList; // new stmt list of the unrolled loop
+ GenTree* loopLast;
+
+ static const int ITER_LIMIT[COUNT_OPT_CODE + 1] = {
10, // BLENDED_CODE
0, // SMALL_CODE
20, // FAST_CODE
0 // COUNT_OPT_CODE
};
- noway_assert(ITER_LIMIT[ SMALL_CODE] == 0);
+ noway_assert(ITER_LIMIT[SMALL_CODE] == 0);
noway_assert(ITER_LIMIT[COUNT_OPT_CODE] == 0);
unsigned iterLimit = (unsigned)ITER_LIMIT[compCodeOpt()];
#ifdef DEBUG
if (compStressCompile(STRESS_UNROLL_LOOPS, 50))
+ {
iterLimit *= 10;
+ }
#endif
- static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] =
- {
+ static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = {
30, // BLENDED_CODE
0, // SMALL_CODE
60, // FAST_CODE
0 // COUNT_OPT_CODE
};
- noway_assert(UNROLL_LIMIT_SZ[ SMALL_CODE] == 0);
+ noway_assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0);
noway_assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0);
int unrollLimitSz = (unsigned)UNROLL_LIMIT_SZ[compCodeOpt()];
#ifdef DEBUG
if (compStressCompile(STRESS_UNROLL_LOOPS, 50))
+ {
unrollLimitSz *= 10;
+ }
#endif
loopFlags = optLoopTable[lnum].lpFlags;
requiredFlags = LPFLG_DO_WHILE | LPFLG_ONE_EXIT | LPFLG_CONST;
-
/* Ignore the loop if we don't have a do-while with a single exit
that has a constant number of iterations */
- if ((loopFlags & requiredFlags) != requiredFlags)
+ if ((loopFlags & requiredFlags) != requiredFlags)
+ {
continue;
+ }
/* ignore if removed or marked as not unrollable */
- if (optLoopTable[lnum].lpFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED))
+ if (optLoopTable[lnum].lpFlags & (LPFLG_DONT_UNROLL | LPFLG_REMOVED))
+ {
continue;
+ }
- head = optLoopTable[lnum].lpHead; noway_assert(head);
- bottom = optLoopTable[lnum].lpBottom; noway_assert(bottom);
+ head = optLoopTable[lnum].lpHead;
+ noway_assert(head);
+ bottom = optLoopTable[lnum].lpBottom;
+ noway_assert(bottom);
/* The single exit must be at the bottom of the loop */
noway_assert(optLoopTable[lnum].lpExit);
- if (optLoopTable[lnum].lpExit != bottom)
+ if (optLoopTable[lnum].lpExit != bottom)
+ {
continue;
+ }
/* Unrolling loops with jumps in them is not worth the headache
* Later we might consider unrolling loops after un-switching */
@@ -2784,15 +2925,17 @@ void Compiler::optUnrollLoops()
block = head;
do
{
- block = block->bbNext; noway_assert(block);
+ block = block->bbNext;
+ noway_assert(block);
- if (block->bbJumpKind != BBJ_NONE)
+ if (block->bbJumpKind != BBJ_NONE)
{
- if (block != bottom)
+ if (block != bottom)
+ {
goto DONE_LOOP;
+ }
}
- }
- while (block != bottom);
+ } while (block != bottom);
/* Get the loop data:
- initial constant
@@ -2803,58 +2946,74 @@ void Compiler::optUnrollLoops()
- loop test type (i.e. GT_GE, GT_LT, etc...)
*/
- lbeg = optLoopTable[lnum].lpConstInit;
- llim = optLoopTable[lnum].lpConstLimit();
- testOper = optLoopTable[lnum].lpTestOper();
+ lbeg = optLoopTable[lnum].lpConstInit;
+ llim = optLoopTable[lnum].lpConstLimit();
+ testOper = optLoopTable[lnum].lpTestOper();
- lvar = optLoopTable[lnum].lpIterVar();
- iterInc = optLoopTable[lnum].lpIterConst();
- iterOper = optLoopTable[lnum].lpIterOper();
+ lvar = optLoopTable[lnum].lpIterVar();
+ iterInc = optLoopTable[lnum].lpIterConst();
+ iterOper = optLoopTable[lnum].lpIterOper();
- iterOperType= optLoopTable[lnum].lpIterOperType();
- unsTest =(optLoopTable[lnum].lpTestTree->gtFlags & GTF_UNSIGNED) != 0;
+ iterOperType = optLoopTable[lnum].lpIterOperType();
+ unsTest = (optLoopTable[lnum].lpTestTree->gtFlags & GTF_UNSIGNED) != 0;
- if (lvaTable[lvar].lvAddrExposed) // If the loop iteration variable is address-exposed then bail
+ if (lvaTable[lvar].lvAddrExposed)
+ { // If the loop iteration variable is address-exposed then bail
continue;
- if (lvaTable[lvar].lvIsStructField) // If the loop iteration variable is a promoted field from a struct then bail
- continue;
+ }
+ if (lvaTable[lvar].lvIsStructField)
+ { // If the loop iteration variable is a promoted field from a struct then bail
+ continue;
+ }
/* Locate the pre-header and initialization and increment/test statements */
- phdr = head->bbTreeList; noway_assert(phdr);
- loop = bottom->bbTreeList; noway_assert(loop);
+ phdr = head->bbTreeList;
+ noway_assert(phdr);
+ loop = bottom->bbTreeList;
+ noway_assert(loop);
- init = head->lastStmt(); noway_assert(init && (init->gtNext == 0));
- test = bottom->lastStmt(); noway_assert(test && (test->gtNext == 0));
- incr = test->gtPrev; noway_assert(incr);
+ init = head->lastStmt();
+ noway_assert(init && (init->gtNext == nullptr));
+ test = bottom->lastStmt();
+ noway_assert(test && (test->gtNext == nullptr));
+ incr = test->gtPrev;
+ noway_assert(incr);
- if (init->gtFlags & GTF_STMT_CMPADD)
+ if (init->gtFlags & GTF_STMT_CMPADD)
{
/* Must be a duplicated loop condition */
noway_assert(init->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
dupCond = true;
- init = init->gtPrev; noway_assert(init);
+ init = init->gtPrev;
+ noway_assert(init);
}
else
+ {
dupCond = false;
+ }
/* Find the number of iterations - the function returns false if not a constant number */
- if (!optComputeLoopRep(lbeg, llim,
- iterInc, iterOper, iterOperType,
- testOper, unsTest, dupCond,
- &totalIter))
+ if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, dupCond, &totalIter))
+ {
continue;
+ }
/* Forget it if there are too many repetitions or not a constant loop */
- if (totalIter > iterLimit)
+ if (totalIter > iterLimit)
+ {
continue;
+ }
- noway_assert(init->gtOper == GT_STMT); init = init->gtStmt.gtStmtExpr;
- noway_assert(test->gtOper == GT_STMT); test = test->gtStmt.gtStmtExpr;
- noway_assert(incr->gtOper == GT_STMT); incr = incr->gtStmt.gtStmtExpr;
+ noway_assert(init->gtOper == GT_STMT);
+ init = init->gtStmt.gtStmtExpr;
+ noway_assert(test->gtOper == GT_STMT);
+ test = test->gtStmt.gtStmtExpr;
+ noway_assert(incr->gtOper == GT_STMT);
+ incr = incr->gtStmt.gtStmtExpr;
// Don't unroll loops we don't understand.
if (incr->gtOper == GT_ASG)
@@ -2863,20 +3022,15 @@ void Compiler::optUnrollLoops()
}
/* Make sure everything looks ok */
- if (
- (init->gtOper != GT_ASG) ||
- (init->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
- (init->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lvar) ||
- (init->gtOp.gtOp2->gtOper != GT_CNS_INT) ||
- (init->gtOp.gtOp2->gtIntCon.gtIconVal != lbeg) ||
+ if ((init->gtOper != GT_ASG) || (init->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
+ (init->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lvar) || (init->gtOp.gtOp2->gtOper != GT_CNS_INT) ||
+ (init->gtOp.gtOp2->gtIntCon.gtIconVal != lbeg) ||
!((incr->gtOper == GT_ASG_ADD) || (incr->gtOper == GT_ASG_SUB)) ||
- (incr->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
- (incr->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lvar) ||
- (incr->gtOp.gtOp2->gtOper != GT_CNS_INT) ||
- (incr->gtOp.gtOp2->gtIntCon.gtIconVal != iterInc) ||
+ (incr->gtOp.gtOp1->gtOper != GT_LCL_VAR) || (incr->gtOp.gtOp1->gtLclVarCommon.gtLclNum != lvar) ||
+ (incr->gtOp.gtOp2->gtOper != GT_CNS_INT) || (incr->gtOp.gtOp2->gtIntCon.gtIconVal != iterInc) ||
- (test->gtOper != GT_JTRUE) )
+ (test->gtOper != GT_JTRUE))
{
noway_assert(!"Bad precondition in Compiler::optUnrollLoops()");
continue;
@@ -2899,8 +3053,10 @@ void Compiler::optUnrollLoops()
/* Get the expression and stop if end reached */
GenTreePtr expr = stmt->gtStmtExpr;
- if (expr == incr)
+ if (expr == incr)
+ {
break;
+ }
/* Calculate gtCostSz */
gtSetStmtInfo(stmt);
@@ -2908,18 +3064,19 @@ void Compiler::optUnrollLoops()
/* Update loopCostSz */
loopCostSz += stmt->gtCostSz;
}
- }
- while (block != bottom);
+ } while (block != bottom);
/* Compute the estimated increase in code size for the unrolled loop */
- unsigned int fixedLoopCostSz; fixedLoopCostSz = 8;
+ unsigned int fixedLoopCostSz;
+ fixedLoopCostSz = 8;
- int unrollCostSz; unrollCostSz = (loopCostSz * totalIter) - (loopCostSz + fixedLoopCostSz);
+ int unrollCostSz;
+ unrollCostSz = (loopCostSz * totalIter) - (loopCostSz + fixedLoopCostSz);
/* Don't unroll if too much code duplication would result. */
- if (unrollCostSz > unrollLimitSz)
+ if (unrollCostSz > unrollLimitSz)
{
/* prevent this loop from being revisited */
optLoopTable[lnum].lpFlags |= LPFLG_DONT_UNROLL;
@@ -2929,12 +3086,14 @@ void Compiler::optUnrollLoops()
/* Looks like a good idea to unroll this loop, let's do it! */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\nUnrolling loop BB%02u", head->bbNext->bbNum);
if (head->bbNext->bbNum != bottom->bbNum)
+ {
printf("..BB%02u", bottom->bbNum);
+ }
printf(" over V%02u from %u to %u", lvar, lbeg, llim);
printf(" unrollCostSz = %d\n", unrollCostSz);
printf("\n");
@@ -2943,8 +3102,7 @@ void Compiler::optUnrollLoops()
/* Create the unrolled loop statement list */
- loopList =
- loopLast = 0;
+ loopList = loopLast = nullptr;
for (lval = lbeg; totalIter; totalIter--)
{
@@ -2952,10 +3110,11 @@ void Compiler::optUnrollLoops()
do
{
- GenTreeStmt * stmt;
- GenTree * expr;
+ GenTreeStmt* stmt;
+ GenTree* expr;
- block = block->bbNext; noway_assert(block);
+ block = block->bbNext;
+ noway_assert(block);
/* Visit all the statements in the block */
@@ -2963,8 +3122,10 @@ void Compiler::optUnrollLoops()
{
/* Stop if we've reached the end of the loop */
- if (stmt->gtStmtExpr == incr)
+ if (stmt->gtStmtExpr == incr)
+ {
break;
+ }
/* Clone/substitute the expression */
@@ -2972,7 +3133,7 @@ void Compiler::optUnrollLoops()
// cloneExpr doesn't handle everything
- if (!expr)
+ if (!expr)
{
optLoopTable[lnum].lpFlags |= LPFLG_DONT_UNROLL;
goto DONE_LOOP;
@@ -2980,16 +3141,19 @@ void Compiler::optUnrollLoops()
/* Append the expression to our list */
- if (loopList)
+ if (loopList)
+ {
loopLast->gtNext = expr;
+ }
else
- loopList = expr;
+ {
+ loopList = expr;
+ }
expr->gtPrev = loopLast;
- loopLast = expr;
+ loopLast = expr;
}
- }
- while (block != bottom);
+ } while (block != bottom);
/* update the new value for the unrolled iterator */
@@ -3019,7 +3183,7 @@ void Compiler::optUnrollLoops()
if (loopList)
{
loopList->gtPrev = loopLast;
- loopLast->gtNext = 0;
+ loopLast->gtNext = nullptr;
}
/* Replace the body with the unrolled one */
@@ -3028,19 +3192,19 @@ void Compiler::optUnrollLoops()
do
{
- block = block->bbNext; noway_assert(block);
- block->bbTreeList = 0;
+ block = block->bbNext;
+ noway_assert(block);
+ block->bbTreeList = nullptr;
block->bbJumpKind = BBJ_NONE;
block->bbFlags &= ~BBF_NEEDS_GCPOLL;
- }
- while (block != bottom);
+ } while (block != bottom);
- bottom->bbJumpKind = BBJ_NONE;
- bottom->bbTreeList = loopList;
+ bottom->bbJumpKind = BBJ_NONE;
+ bottom->bbTreeList = loopList;
bottom->bbFlags &= ~BBF_NEEDS_GCPOLL;
bottom->modifyBBWeight(bottom->bbWeight / BB_LOOP_WEIGHT);
- bool dummy;
+ bool dummy;
fgMorphStmts(bottom, &dummy, &dummy, &dummy);
@@ -3053,24 +3217,26 @@ void Compiler::optUnrollLoops()
* (the last value of the iterator in the loop)
* and drop the jump condition since the unrolled loop will always execute */
- init->gtOp.gtOp2->gtIntCon.gtIconVal = lval;
+ init->gtOp.gtOp2->gtIntCon.gtIconVal = lval;
/* if the HEAD is a BBJ_COND drop the condition (and make HEAD a BBJ_NONE block) */
if (head->bbJumpKind == BBJ_COND)
{
- phdr = head->bbTreeList; noway_assert(phdr);
+ phdr = head->bbTreeList;
+ noway_assert(phdr);
test = phdr->gtPrev;
- noway_assert(test && (test->gtNext == 0));
+ noway_assert(test && (test->gtNext == nullptr));
noway_assert(test->gtOper == GT_STMT);
noway_assert(test->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
- init = test->gtPrev; noway_assert(init && (init->gtNext == test));
+ init = test->gtPrev;
+ noway_assert(init && (init->gtNext == test));
noway_assert(init->gtOper == GT_STMT);
- init->gtNext = 0;
- phdr->gtPrev = init;
+ init->gtNext = nullptr;
+ phdr->gtPrev = init;
head->bbJumpKind = BBJ_NONE;
head->bbFlags &= ~BBF_NEEDS_GCPOLL;
@@ -3084,7 +3250,7 @@ void Compiler::optUnrollLoops()
noway_assert(head->bbJumpKind == BBJ_NONE);
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("Whole unrolled loop:\n");
@@ -3114,17 +3280,18 @@ void Compiler::optUnrollLoops()
* (also make head and bottom NULL - to hit an assert or GPF) */
optLoopTable[lnum].lpFlags |= LPFLG_REMOVED;
- optLoopTable[lnum].lpHead =
- optLoopTable[lnum].lpBottom = nullptr;
+ optLoopTable[lnum].lpHead = optLoopTable[lnum].lpBottom = nullptr;
DONE_LOOP:;
}
- if (!change)
+ if (!change)
+ {
break;
+ }
}
-#ifdef DEBUG
+#ifdef DEBUG
fgDebugCheckBBlist();
#endif
}
@@ -3138,20 +3305,21 @@ void Compiler::optUnrollLoops()
* not execute a method call.
*/
-bool Compiler::optReachWithoutCall(BasicBlock *topBB,
- BasicBlock *botBB)
+bool Compiler::optReachWithoutCall(BasicBlock* topBB, BasicBlock* botBB)
{
- // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls,
+ // TODO-Cleanup: Currently BBF_GC_SAFE_POINT is not set for helper calls,
// as some helper calls are neither interruptible nor hijackable.
// When we can determine this, then we can set BBF_GC_SAFE_POINT for
// those helpers too.
-
+
noway_assert(topBB->bbNum <= botBB->bbNum);
// We can always check topBB and botBB for any gc safe points and early out
if ((topBB->bbFlags | botBB->bbFlags) & BBF_GC_SAFE_POINT)
+ {
return false;
+ }
// Otherwise we will need to rely upon the dominator sets
@@ -3161,7 +3329,7 @@ bool Compiler::optReachWithoutCall(BasicBlock *topBB,
return true;
}
- BasicBlock *curBB = topBB;
+ BasicBlock* curBB = topBB;
for (;;)
{
noway_assert(curBB);
@@ -3180,9 +3348,9 @@ bool Compiler::optReachWithoutCall(BasicBlock *topBB,
{
// Will this block always execute on the way to botBB ?
//
- // Since we are checking every block in [topBB .. botBB] and we are using
+ // Since we are checking every block in [topBB .. botBB] and we are using
// a lexical definition of a loop.
- // (all that we know is that is that botBB is a back-edge to topBB)
+ // (all that we know is that is that botBB is a back-edge to topBB)
// Thus while walking blocks in this range we may encounter some blocks
// that are not really part of the loop, and so we need to perform
// some additional checks:
@@ -3193,14 +3361,18 @@ bool Compiler::optReachWithoutCall(BasicBlock *topBB,
// will be encountered in the loop and we can return false
//
if (fgDominate(curBB, botBB) && fgReachable(topBB, curBB))
- return false;
+ {
+ return false;
+ }
}
else
{
// If we've reached the destination block, then we're done
if (curBB == botBB)
+ {
break;
+ }
}
}
@@ -3210,7 +3382,7 @@ bool Compiler::optReachWithoutCall(BasicBlock *topBB,
// If we didn't find any blocks that contained a gc safe point and
// also met the fgDominate and fgReachable criteria then we must return true
//
- return true;
+ return true;
}
/*****************************************************************************
@@ -3218,18 +3390,19 @@ bool Compiler::optReachWithoutCall(BasicBlock *topBB,
* Find the loop termination test at the bottom of the loop
*/
-static
-GenTreePtr optFindLoopTermTest(BasicBlock *bottom)
+static GenTreePtr optFindLoopTermTest(BasicBlock* bottom)
{
- GenTreePtr testt = bottom->bbTreeList;
+ GenTreePtr testt = bottom->bbTreeList;
assert(testt && testt->gtOper == GT_STMT);
- GenTreePtr result = testt->gtPrev;
+ GenTreePtr result = testt->gtPrev;
#ifdef DEBUG
while (testt->gtNext)
+ {
testt = testt->gtNext;
+ }
assert(testt == result);
#endif
@@ -3241,11 +3414,11 @@ GenTreePtr optFindLoopTermTest(BasicBlock *bottom)
* Optimize "jmp C; do{} C:while(cond);" loops to "if (cond){ do{}while(cond}; }"
*/
-void Compiler::fgOptWhileLoop(BasicBlock * block)
+void Compiler::fgOptWhileLoop(BasicBlock* block)
{
noway_assert(!opts.MinOpts() && !opts.compDbgCode);
noway_assert(compCodeOpt() != SMALL_CODE);
-
+
/*
Optimize while loops into do { } while loop
Our loop hoisting logic requires do { } while loops.
@@ -3279,26 +3452,33 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
/* Does the BB end with an unconditional jump? */
- if (block->bbJumpKind != BBJ_ALWAYS ||
- (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) // It can't be one of the ones we use for our exception magic
+ if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS))
+ { // It can't be one of the ones we use for our exception magic
return;
+ }
- // It has to be a forward jump
+ // It has to be a forward jump
// TODO-CQ: Check if we can also optimize the backwards jump as well.
//
if (fgIsForwardBranch(block) == false)
+ {
return;
+ }
// Get hold of the jump target
- BasicBlock * bTest = block->bbJumpDest;
-
+ BasicBlock* bTest = block->bbJumpDest;
+
// Does the block consist of 'jtrue(cond) block' ?
- if (bTest->bbJumpKind != BBJ_COND)
+ if (bTest->bbJumpKind != BBJ_COND)
+ {
return;
+ }
// bTest must be a backwards jump to block->bbNext
- if (bTest->bbJumpDest != block->bbNext)
+ if (bTest->bbJumpDest != block->bbNext)
+ {
return;
+ }
// Since test is a BBJ_COND it will have a bbNext
noway_assert(bTest->bbNext);
@@ -3306,13 +3486,17 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
// 'block' must be in the same try region as the condition, since we're going to insert
// a duplicated condition in 'block', and the condition might include exception throwing code.
if (!BasicBlock::sameTryRegion(block, bTest))
+ {
return;
+ }
// We're going to change 'block' to branch to bTest->bbNext, so that also better be in the
// same try region (or no try region) to avoid generating illegal flow.
BasicBlock* bTestNext = bTest->bbNext;
if (bTestNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bTestNext))
+ {
return;
+ }
GenTreePtr condStmt = optFindLoopTermTest(bTest);
@@ -3321,34 +3505,38 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
// TODO-CQ: consider cloning the whole bTest block as inserting it after block.
//
if (bTest->bbTreeList != condStmt)
+ {
return;
+ }
/* Get to the condition node from the statement tree */
noway_assert(condStmt->gtOper == GT_STMT);
-
+
GenTreePtr condTree = condStmt->gtStmt.gtStmtExpr;
noway_assert(condTree->gtOper == GT_JTRUE);
-
+
condTree = condTree->gtOp.gtOp1;
// The condTree has to be a RelOp comparison
// TODO-CQ: Check if we can also optimize the backwards jump as well.
//
if (condTree->OperIsCompare() == false)
+ {
return;
-
+ }
+
/* We call gtPrepareCost to measure the cost of duplicating this tree */
-
+
gtPrepareCost(condTree);
unsigned estDupCostSz = condTree->gtCostSz;
- double loopIterations = (double) BB_LOOP_WEIGHT;
+ double loopIterations = (double)BB_LOOP_WEIGHT;
- bool allProfileWeightsAreValid = false;
- BasicBlock::weight_t weightBlock = block->bbWeight;
- BasicBlock::weight_t weightTest = bTest->bbWeight;
- BasicBlock::weight_t weightNext = block->bbNext->bbWeight;
+ bool allProfileWeightsAreValid = false;
+ BasicBlock::weight_t weightBlock = block->bbWeight;
+ BasicBlock::weight_t weightTest = bTest->bbWeight;
+ BasicBlock::weight_t weightNext = block->bbNext->bbWeight;
// If we have profile data then we calculate the number of time
// the loop will iterate into loopIterations
@@ -3356,26 +3544,27 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
{
// Only rely upon the profile weight when all three of these blocks
// have good profile weights
- if ((block->bbFlags & BBF_PROF_WEIGHT) &&
- (bTest->bbFlags & BBF_PROF_WEIGHT) &&
+ if ((block->bbFlags & BBF_PROF_WEIGHT) && (bTest->bbFlags & BBF_PROF_WEIGHT) &&
(block->bbNext->bbFlags & BBF_PROF_WEIGHT))
{
allProfileWeightsAreValid = true;
// If this while loop never iterates then don't bother transforming
if (weightNext == 0)
+ {
return;
+ }
// with (weighNext > 0) we should also have (weightTest >= weightBlock)
// if the profile weights are all valid.
//
// weightNext is the number of time this loop iterates
// weightBlock is the number of times that we enter the while loop
- // loopIterations is the average number of times that this loop iterates
+ // loopIterations is the average number of times that this loop iterates
//
if (weightTest >= weightBlock)
{
- loopIterations = (double) block->bbNext->bbWeight / (double) block->bbWeight;
+ loopIterations = (double)block->bbNext->bbWeight / (double)block->bbWeight;
}
}
}
@@ -3384,17 +3573,20 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
// optFastCodeOrBlendedLoop(bTest->bbWeight) does not work here as we have not
// set loop weights yet
- if ((compCodeOpt() == FAST_CODE) ||
- compStressCompile(STRESS_DO_WHILE_LOOPS, 30))
+ if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30))
{
maxDupCostSz *= 4;
}
// If this loop iterates a lot then raise the maxDupCost
if (loopIterations >= 12.0)
+ {
maxDupCostSz *= 2;
+ }
if (loopIterations >= 96.0)
+ {
maxDupCostSz *= 2;
+ }
// If the loop condition has a shared static helper, we really want this loop converted
// as not converting the loop will disable loop hoisting, meaning the shared helper will
@@ -3404,26 +3596,22 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
if (countOfHelpers > 0 && compCodeOpt() != SMALL_CODE)
{
- maxDupCostSz += 24 * min(countOfHelpers, (int) (loopIterations + 1.5));
+ maxDupCostSz += 24 * min(countOfHelpers, (int)(loopIterations + 1.5));
}
- // If the compare has too high cost then we don't want to dup
+ // If the compare has too high cost then we don't want to dup
bool costIsTooHigh = (estDupCostSz > maxDupCostSz);
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i,"
- "\n loopIterations = %7.3f, countOfHelpers = %d, validProfileWeights = %s\n",
- condTree->gtTreeID,
- costIsTooHigh ? "not done" : "performed",
- estDupCostSz,
- costIsTooHigh ? "greater" : "less or equal",
- maxDupCostSz,
- loopIterations, countOfHelpers,
- allProfileWeightsAreValid ? "true" : "false");
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nDuplication of loop condition [%06u] is %s, because the cost of duplication (%i) is %s than %i,"
+ "\n loopIterations = %7.3f, countOfHelpers = %d, validProfileWeights = %s\n",
+ condTree->gtTreeID, costIsTooHigh ? "not done" : "performed", estDupCostSz,
+ costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, loopIterations, countOfHelpers,
+ allProfileWeightsAreValid ? "true" : "false");
+ }
#endif
if (costIsTooHigh)
@@ -3451,8 +3639,10 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
copyOfCondStmt->gtFlags |= GTF_STMT_CMPADD;
#ifdef DEBUGGING_SUPPORT
- if (opts.compDbgInfo)
+ if (opts.compDbgInfo)
+ {
copyOfCondStmt->gtStmt.gtStmtILoffsx = condStmt->gtStmt.gtStmtILoffsx;
+ }
#endif
// Flag the block that received the copy as potentially having an array/vtable
@@ -3462,9 +3652,9 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
block->bbFlags |= copyFlags;
}
- // If we have profile data for all blocks and we know that we are cloning the
+ // If we have profile data for all blocks and we know that we are cloning the
// bTest block into block and thus changing the control flow from block so
- // that it no longer goes directly to bTest anymore, we have to adjust the
+ // that it no longer goes directly to bTest anymore, we have to adjust the
// weight of bTest by subtracting out the weight of block.
//
if (allProfileWeightsAreValid)
@@ -3475,14 +3665,13 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
if ((weightNext > 0) && (weightTest >= weightBlock) && (weightTest != BB_MAX_WEIGHT))
{
// Get the two edge that flow out of bTest
- flowList * edgeToNext = fgGetPredForBlock(bTest->bbNext, bTest);
- flowList * edgeToJump = fgGetPredForBlock(bTest->bbJumpDest, bTest);
+ flowList* edgeToNext = fgGetPredForBlock(bTest->bbNext, bTest);
+ flowList* edgeToJump = fgGetPredForBlock(bTest->bbJumpDest, bTest);
// Calculate the new weight for block bTest
- BasicBlock::weight_t newWeightTest = (weightTest > weightBlock)
- ? (weightTest - weightBlock)
- : BB_ZERO_WEIGHT;
+ BasicBlock::weight_t newWeightTest =
+ (weightTest > weightBlock) ? (weightTest - weightBlock) : BB_ZERO_WEIGHT;
bTest->bbWeight = newWeightTest;
if (newWeightTest == BB_ZERO_WEIGHT)
@@ -3498,11 +3687,11 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
{
// Update the our edge weights
edgeToNext->flEdgeWeightMin = BB_ZERO_WEIGHT;
- edgeToNext->flEdgeWeightMax = min(edgeToNext->flEdgeWeightMax, newWeightTest);
+ edgeToNext->flEdgeWeightMax = min(edgeToNext->flEdgeWeightMax, newWeightTest);
edgeToJump->flEdgeWeightMin = BB_ZERO_WEIGHT;
- edgeToJump->flEdgeWeightMax = min(edgeToJump->flEdgeWeightMax, newWeightTest);
+ edgeToJump->flEdgeWeightMax = min(edgeToJump->flEdgeWeightMax, newWeightTest);
}
- }
+ }
}
/* Change the block to end with a conditional jump */
@@ -3511,7 +3700,7 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
block->bbJumpDest = bTest->bbNext;
/* Mark the jump dest block as being a jump target */
- block->bbJumpDest->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
/* Update bbRefs and bbPreds for 'block->bbNext' 'bTest' and 'bTest->bbNext' */
@@ -3520,11 +3709,11 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
fgRemoveRefPred(bTest, block);
fgAddRefPred(bTest->bbNext, block);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\nDuplicating loop condition in BB%02u for loop (BB%02u - BB%02u)",
- block->bbNum, block->bbNext->bbNum, bTest->bbNum);
+ printf("\nDuplicating loop condition in BB%02u for loop (BB%02u - BB%02u)", block->bbNum, block->bbNext->bbNum,
+ bTest->bbNum);
printf("\nEstimated code size expansion is %d\n ", estDupCostSz);
gtDispTree(copyOfCondStmt);
@@ -3538,12 +3727,12 @@ void Compiler::fgOptWhileLoop(BasicBlock * block)
* Optimize the BasicBlock layout of the method
*/
-void Compiler::optOptimizeLayout()
+void Compiler::optOptimizeLayout()
{
noway_assert(!opts.MinOpts() && !opts.compDbgCode);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In optOptimizeLayout()\n");
fgDispHandlerTab();
@@ -3555,7 +3744,7 @@ void Compiler::optOptimizeLayout()
noway_assert(fgModified == false);
- for (BasicBlock *block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
{
/* Make sure the appropriate fields are initialized */
@@ -3565,8 +3754,8 @@ void Compiler::optOptimizeLayout()
noway_assert(block->isLoopHead() == false);
continue;
}
-
- assert(block->bbLoopNum == 0);
+
+ assert(block->bbLoopNum == 0);
if (compCodeOpt() != SMALL_CODE)
{
@@ -3576,7 +3765,7 @@ void Compiler::optOptimizeLayout()
}
}
- if (fgModified)
+ if (fgModified)
{
// Recompute the edge weight if we have modified the flow graph in fgOptWhileLoop
fgComputeEdgeWeights();
@@ -3592,20 +3781,22 @@ void Compiler::optOptimizeLayout()
* Perform loop inversion, find and classify natural loops
*/
-void Compiler::optOptimizeLoops()
+void Compiler::optOptimizeLoops()
{
noway_assert(!opts.MinOpts() && !opts.compDbgCode);
#ifdef DEBUG
- if (verbose)
+ if (verbose)
+ {
printf("*************** In optOptimizeLoops()\n");
+ }
#endif
optSetBlockWeights();
/* Were there any loops in the flow graph? */
- if (fgHasLoops)
+ if (fgHasLoops)
{
/* now that we have dominator information we can find loops */
@@ -3621,43 +3812,50 @@ void Compiler::optOptimizeLoops()
* lastBottom - used when we have multiple back-edges to the same top
*/
- flowList * pred;
+ flowList* pred;
- BasicBlock * top;
+ BasicBlock* top;
for (top = fgFirstBB; top; top = top->bbNext)
{
- BasicBlock * foundBottom = NULL;
+ BasicBlock* foundBottom = nullptr;
for (pred = top->bbPreds; pred; pred = pred->flNext)
{
/* Is this a loop candidate? - We look for "back edges" */
- BasicBlock * bottom = pred->flBlock;
+ BasicBlock* bottom = pred->flBlock;
/* is this a backward edge? (from BOTTOM to TOP) */
if (top->bbNum > bottom->bbNum)
+ {
continue;
+ }
/* 'top' also must have the BBF_LOOP_HEAD flag set */
if (top->isLoopHead() == false)
+ {
continue;
+ }
/* We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops */
- if ((bottom->bbJumpKind != BBJ_COND) &&
- (bottom->bbJumpKind != BBJ_ALWAYS) )
+ if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS))
+ {
continue;
+ }
/* the top block must be able to reach the bottom block */
if (!fgReachable(top, bottom))
+ {
continue;
+ }
/* Found a new loop, record the longest backedge in foundBottom */
- if ((foundBottom == NULL) || (bottom->bbNum > foundBottom->bbNum))
+ if ((foundBottom == nullptr) || (bottom->bbNum > foundBottom->bbNum))
{
foundBottom = bottom;
}
@@ -3669,11 +3867,11 @@ void Compiler::optOptimizeLoops()
#ifdef DEBUG
/* Mark the loop header as such */
assert(FitsIn<unsigned char>(loopNum));
- top->bbLoopNum = (unsigned char) loopNum;
+ top->bbLoopNum = (unsigned char)loopNum;
#endif
/* Mark all blocks between 'top' and 'bottom' */
-
+
optMarkLoopBlocks(top, foundBottom, false);
}
@@ -3691,10 +3889,10 @@ void Compiler::optOptimizeLoops()
totalUnnatLoopCount += loopNum;
#endif
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- if (loopNum > 0)
+ if (loopNum > 0)
{
printf("\nFound a total of %d loops.", loopNum);
printf("\nAfter loop weight marking:\n");
@@ -3731,12 +3929,12 @@ void Compiler::optOptimizeLoops()
// Callers should assume AND operation is used i.e., if all conditions are
// true, then take the fast path.
//
-bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context)
+bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context)
{
JITDUMP("------------------------------------------------------------\n");
JITDUMP("Deriving cloning conditions for L%02u\n", loopNum);
- LoopDsc* loop = &optLoopTable[loopNum];
+ LoopDsc* loop = &optLoopTable[loopNum];
ExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
if (loop->lpTestOper() == GT_LT)
@@ -3761,9 +3959,8 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
else if (loop->lpFlags & LPFLG_VAR_INIT)
{
// limitVar >= 0
- LC_Condition geZero(GT_GE,
- LC_Expr(LC_Ident(loop->lpVarInit, LC_Ident::Var)),
- LC_Expr(LC_Ident(0, LC_Ident::Const)));
+ LC_Condition geZero(GT_GE, LC_Expr(LC_Ident(loop->lpVarInit, LC_Ident::Var)),
+ LC_Expr(LC_Ident(0, LC_Ident::Const)));
context->EnsureConditions(loopNum)->Push(geZero);
}
else
@@ -3787,11 +3984,9 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
else if (loop->lpFlags & LPFLG_VAR_LIMIT)
{
unsigned limitLcl = loop->lpVarLimit();
- ident = LC_Ident(limitLcl, LC_Ident::Var);
+ ident = LC_Ident(limitLcl, LC_Ident::Var);
- LC_Condition geZero(GT_GE,
- LC_Expr(ident),
- LC_Expr(LC_Ident(0, LC_Ident::Const)));
+ LC_Condition geZero(GT_GE, LC_Expr(ident), LC_Expr(LC_Ident(0, LC_Ident::Const)));
context->EnsureConditions(loopNum)->Push(geZero);
}
@@ -3820,7 +4015,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
LcOptInfo* optInfo = optInfos->GetRef(i);
switch (optInfo->GetOptType())
{
- case LcOptInfo::LcJaggedArray:
+ case LcOptInfo::LcJaggedArray:
{
// limit <= arrLen
LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo();
@@ -3835,23 +4030,21 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
context->EnsureDerefs(loopNum)->Push(array);
}
break;
- case LcOptInfo::LcMdArray:
+ case LcOptInfo::LcMdArray:
{
// limit <= mdArrLen
LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo();
- LC_Condition cond(GT_LE,
- LC_Expr(ident),
- LC_Expr(LC_Ident(
- LC_Array(
- LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(getAllocator()), mdArrInfo->dim, LC_Array::None)
- )));
+ LC_Condition cond(GT_LE, LC_Expr(ident),
+ LC_Expr(LC_Ident(LC_Array(LC_Array::MdArray,
+ mdArrInfo->GetArrIndexForDim(getAllocator()),
+ mdArrInfo->dim, LC_Array::None))));
context->EnsureConditions(loopNum)->Push(cond);
}
break;
- default:
- JITDUMP("Unknown opt\n");
- return false;
+ default:
+ JITDUMP("Unknown opt\n");
+ return false;
}
}
JITDUMP("Conditions: (");
@@ -3905,7 +4098,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
//
// But these conditions can be checked together with conditions
// (i < a.len) without a need for a separate block. In summary, the conditions will be:
-//
+//
// (a != null) &&
// ((i < a.len) & (x < a.len)) && <-- Note the bitwise AND here.
// (a[i] != null & a[x] != null) && <-- Note the bitwise AND here.
@@ -3914,7 +4107,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
//
// This naturally yields a tree style pattern, where the nodes of the tree are
// the array and indices respectively.
-//
+//
// Example:
// a => {
// i => {
@@ -3960,7 +4153,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
//
// (i < a.len) & (i < b.len) && // from the second level of the tree. Levels can be combined.
// (a[i] != null) & (b[i] != null) && // from the second level of the tree.
-//
+//
// (j < a[i].len) & (y < a[i].len) && // from the third level.
// (a[i][j] != null) & (a[i][y] != null) && // from the third level.
//
@@ -3970,7 +4163,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, L
bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context)
{
ExpandArrayStack<LC_Deref*> nodes(getAllocator());
- int maxRank = -1;
+ int maxRank = -1;
// Get the dereference-able arrays.
ExpandArrayStack<LC_Array>* deref = context->EnsureDerefs(loopNum);
@@ -3993,7 +4186,7 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
// For each dimension (level) for the array, populate the tree with the variable
// from that dimension.
- unsigned rank = (unsigned) array.GetDimRank();
+ unsigned rank = (unsigned)array.GetDimRank();
for (unsigned i = 0; i < rank; ++i)
{
node->EnsureChildren(getAllocator());
@@ -4009,7 +4202,7 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
}
// Keep the maxRank of all array dereferences.
- maxRank = max((int) rank, maxRank);
+ maxRank = max((int)rank, maxRank);
}
#ifdef DEBUG
@@ -4017,7 +4210,10 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
{
for (unsigned i = 0; i < nodes.Size(); ++i)
{
- if (i != 0) printf(",");
+ if (i != 0)
+ {
+ printf(",");
+ }
nodes[i]->Print();
printf("\n");
}
@@ -4032,7 +4228,7 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
// First level will always yield the null-check, since it is made of the array base variables.
// All other levels (dimensions) will yield two conditions ex: (i < a.length && a[i] != null)
// So add 1 after rank * 2.
- unsigned condBlocks = (unsigned) maxRank * 2 + 1;
+ unsigned condBlocks = (unsigned)maxRank * 2 + 1;
// Heuristic to not create too many blocks;
if (condBlocks > 4)
@@ -4059,14 +4255,14 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
// block - the block in which the helper call needs to be inserted.
// insertBefore - the tree before which the helper call will be inserted.
//
-void Compiler::optDebugLogLoopCloning(BasicBlock* block, GenTreePtr insertBefore)
+void Compiler::optDebugLogLoopCloning(BasicBlock* block, GenTreePtr insertBefore)
{
if (JitConfig.JitDebugLogLoopCloning() == 0)
{
return;
}
GenTreePtr logCall = gtNewHelperCallNode(CORINFO_HELP_DEBUG_LOG_LOOP_CLONING, TYP_VOID);
- GenTreePtr stmt = fgNewStmtFromTree(logCall);
+ GenTreePtr stmt = fgNewStmtFromTree(logCall);
fgInsertStmtBefore(block, insertBefore, stmt);
fgMorphBlockStmt(block, stmt DEBUGARG("Debug log loop cloning"));
}
@@ -4084,7 +4280,7 @@ void Compiler::optDebugLogLoopCloning(BasicBlock* block, GenTreeP
// there is no slow path.)
//
// Operation:
-// Perform the optimizations on the fast path i.e., the path in which the
+// Perform the optimizations on the fast path i.e., the path in which the
// optimization candidates were collected at the time of identifying them.
// The candidates store all the information necessary (the tree/stmt/block
// they are from) to perform the optimization.
@@ -4095,7 +4291,7 @@ void Compiler::optDebugLogLoopCloning(BasicBlock* block, GenTreeP
// performs the optimizations assuming that the path in which the candidates
// were collected is the fast path in which the optimizations will be performed.
//
-void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool dynamicPath))
+void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool dynamicPath))
{
ExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
for (unsigned i = 0; i < optInfos->Size(); ++i)
@@ -4103,26 +4299,24 @@ void Compiler::optPerformStaticOptimizations(unsigned loopNum, Lo
LcOptInfo* optInfo = optInfos->GetRef(i);
switch (optInfo->GetOptType())
{
- case LcOptInfo::LcJaggedArray:
+ case LcOptInfo::LcJaggedArray:
{
LcJaggedArrayOptInfo* arrIndexInfo = optInfo->AsLcJaggedArrayOptInfo();
- compCurBB = arrIndexInfo->arrIndex.useBlock;
- optRemoveRangeCheck(
- arrIndexInfo->arrIndex.bndsChks[arrIndexInfo->dim],
- arrIndexInfo->stmt, true, GTF_ASG, true);
+ compCurBB = arrIndexInfo->arrIndex.useBlock;
+ optRemoveRangeCheck(arrIndexInfo->arrIndex.bndsChks[arrIndexInfo->dim], arrIndexInfo->stmt, true,
+ GTF_ASG, true);
DBEXEC(dynamicPath, optDebugLogLoopCloning(arrIndexInfo->arrIndex.useBlock, arrIndexInfo->stmt));
}
break;
- case LcOptInfo::LcMdArray:
- // TODO-CQ: CLONE: Implement.
- break;
- default:
- break;
+ case LcOptInfo::LcMdArray:
+ // TODO-CQ: CLONE: Implement.
+ break;
+ default:
+ break;
}
}
}
-
//----------------------------------------------------------------------------
// optCanCloneLoops: Use the environment flag to determine whether loop
// cloning is allowed to be performed.
@@ -4131,7 +4325,7 @@ void Compiler::optPerformStaticOptimizations(unsigned loopNum, Lo
// Returns true in debug builds if COMPlus_JitCloneLoops flag is set.
// Disabled for retail for now.
//
-bool Compiler::optCanCloneLoops()
+bool Compiler::optCanCloneLoops()
{
// Enabled for retail builds now.
unsigned cloneLoopsFlag = 1;
@@ -4141,7 +4335,6 @@ bool Compiler::optCanCloneLoops()
return (cloneLoopsFlag != 0);
}
-
//----------------------------------------------------------------------------
// optIsLoopClonable: Determine whether this loop can be cloned.
//
@@ -4152,19 +4345,21 @@ bool Compiler::optCanCloneLoops()
// Returns true if the loop can be cloned. If it returns false
// prints a message in debug as why the loop can't be cloned.
//
-bool Compiler::optIsLoopClonable(unsigned loopInd)
+bool Compiler::optIsLoopClonable(unsigned loopInd)
{
// First, for now, make sure the loop doesn't have any embedded exception handling -- I don't want to tackle
// inserting new EH regions in the exception table yet.
- BasicBlock* stopAt = optLoopTable[loopInd].lpBottom->bbNext;
- unsigned loopRetCount = 0;
+ BasicBlock* stopAt = optLoopTable[loopInd].lpBottom->bbNext;
+ unsigned loopRetCount = 0;
for (BasicBlock* blk = optLoopTable[loopInd].lpFirst; blk != stopAt; blk = blk->bbNext)
{
- if (blk->bbJumpKind == BBJ_RETURN) loopRetCount++;
- if (bbIsTryBeg(blk))
+ if (blk->bbJumpKind == BBJ_RETURN)
+ {
+ loopRetCount++;
+ }
+ if (bbIsTryBeg(blk))
{
- JITDUMP("Loop cloning: rejecting loop %d in %s, because it has a try begin.\n",
- loopInd, info.compFullName);
+ JITDUMP("Loop cloning: rejecting loop %d in %s, because it has a try begin.\n", loopInd, info.compFullName);
return false;
}
}
@@ -4203,7 +4398,9 @@ bool Compiler::optIsLoopClonable(unsigned loopInd)
// heuristic tradeoff; perhaps we're just choosing to live with 4 as the limit.)
if (fgReturnCount + loopRetCount > 4)
{
- JITDUMP("Loop cloning: rejecting loop because it has %d returns; if added to previously-existing %d returns, would exceed the limit of 4.\n", loopRetCount, fgReturnCount);
+ JITDUMP("Loop cloning: rejecting loop because it has %d returns; if added to previously-existing %d returns, "
+ "would exceed the limit of 4.\n",
+ loopRetCount, fgReturnCount);
return false;
}
@@ -4219,7 +4416,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd)
* perform loop cloning, use the derived conditions to choose which
* path to take.
*/
-void Compiler::optCloneLoops()
+void Compiler::optCloneLoops()
{
JITDUMP("\n*************** In optCloneLoops()\n");
if (optLoopCount == 0 || !optCanCloneLoops())
@@ -4256,7 +4453,7 @@ void Compiler::optCloneLoops()
}
else
{
- bool allTrue = false;
+ bool allTrue = false;
bool anyFalse = false;
context.EvaluateConditions(i, &allTrue, &anyFalse DEBUGARG(verbose));
if (anyFalse)
@@ -4275,7 +4472,6 @@ void Compiler::optCloneLoops()
}
}
-
#if 0
// The code in this #if has been useful in debugging loop cloning issues, by
// enabling selective enablement of the loop cloning optimization according to
@@ -4316,25 +4512,21 @@ void Compiler::optCloneLoops()
if (verbose)
{
printf("\nAfter loop cloning:\n");
- fgDispBasicBlocks(/*dumpTrees*/true);
+ fgDispBasicBlocks(/*dumpTrees*/ true);
}
#endif
}
-void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
+void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
{
assert(loopInd < optLoopCount);
- JITDUMP("\nCloning loop %d: [h: %d, f: %d, t: %d, e: %d, b: %d].\n",
- loopInd,
- optLoopTable[loopInd].lpHead->bbNum,
- optLoopTable[loopInd].lpFirst->bbNum,
- optLoopTable[loopInd].lpTop->bbNum,
- optLoopTable[loopInd].lpEntry->bbNum,
- optLoopTable[loopInd].lpBottom->bbNum);
+ JITDUMP("\nCloning loop %d: [h: %d, f: %d, t: %d, e: %d, b: %d].\n", loopInd, optLoopTable[loopInd].lpHead->bbNum,
+ optLoopTable[loopInd].lpFirst->bbNum, optLoopTable[loopInd].lpTop->bbNum,
+ optLoopTable[loopInd].lpEntry->bbNum, optLoopTable[loopInd].lpBottom->bbNum);
// Determine the depth of the loop, so we can properly weight blocks added (outside the cloned loop blocks).
- unsigned depth = optLoopDepth(loopInd);
+ unsigned depth = optLoopDepth(loopInd);
unsigned ambientWeight = 1;
for (unsigned j = 0; j < depth; j++)
{
@@ -4365,20 +4557,20 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
// We're going to make
// H --> E
- // F
- // T
+ // F
+ // T
// E
- // B ?-> T
+ // B ?-> T
// X
//
// become
//
// H ?-> E2
// H2--> E (Optional; if E == T == F, let H fall through to F/T/E)
- // F
- // T
+ // F
+ // T
// E
- // B ?-> T
+ // B ?-> T
// X2--> X
// F2
// T2
@@ -4391,28 +4583,27 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
{
// Make a new block to be the unique entry to the loop.
assert(h->bbJumpKind == BBJ_COND && h->bbNext == optLoopTable[loopInd].lpEntry);
- BasicBlock* newH = fgNewBBafter(BBJ_NONE,
- h,
- /*extendRegion*/true);
+ BasicBlock* newH = fgNewBBafter(BBJ_NONE, h,
+ /*extendRegion*/ true);
newH->bbWeight = (newH->isRunRarely() ? 0 : ambientWeight);
BlockSetOps::Assign(this, newH->bbReach, h->bbReach);
// This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning.
newH->bbNatLoopNum = ambientLoop;
- h = newH;
+ h = newH;
optUpdateLoopHead(loopInd, optLoopTable[loopInd].lpHead, h);
}
// First, make X2 after B, if necessary. (Not necessary if b is a BBJ_ALWAYS.)
// "newPred" will be the predecessor of the blocks of the cloned loop.
- BasicBlock* b = optLoopTable[loopInd].lpBottom;
+ BasicBlock* b = optLoopTable[loopInd].lpBottom;
BasicBlock* newPred = b;
if (b->bbJumpKind != BBJ_ALWAYS)
{
BasicBlock* x = b->bbNext;
if (x != nullptr)
{
- BasicBlock* x2 = fgNewBBafter(BBJ_ALWAYS, b, /*extendRegion*/true);
- x2->bbWeight = (x2->isRunRarely() ? 0 : ambientWeight);
+ BasicBlock* x2 = fgNewBBafter(BBJ_ALWAYS, b, /*extendRegion*/ true);
+ x2->bbWeight = (x2->isRunRarely() ? 0 : ambientWeight);
// This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning.
x2->bbNatLoopNum = ambientLoop;
@@ -4428,30 +4619,27 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
BasicBlock* h2 = nullptr;
if (optLoopTable[loopInd].lpHead->bbNext != optLoopTable[loopInd].lpEntry)
{
- BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS,
- optLoopTable[loopInd].lpHead,
- /*extendRegion*/true);
+ BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, optLoopTable[loopInd].lpHead,
+ /*extendRegion*/ true);
h2->bbWeight = (h2->isRunRarely() ? 0 : ambientWeight);
// This is in the scope of a surrounding loop, if one exists -- the parent of the loop we're cloning.
h2->bbNatLoopNum = ambientLoop;
h2->bbJumpDest = optLoopTable[loopInd].lpEntry;
- optUpdateLoopHead(loopInd,optLoopTable[loopInd].lpHead, h2);
+ optUpdateLoopHead(loopInd, optLoopTable[loopInd].lpHead, h2);
}
// Now we'll clone the blocks of the loop body.
BasicBlock* newFirst = nullptr;
- BasicBlock* newBot = nullptr;
+ BasicBlock* newBot = nullptr;
BlockToBlockMap* blockMap = new (getAllocator()) BlockToBlockMap(getAllocator());
- for (BasicBlock* blk = optLoopTable[loopInd].lpFirst;
- blk != optLoopTable[loopInd].lpBottom->bbNext;
- blk = blk->bbNext)
+ for (BasicBlock* blk = optLoopTable[loopInd].lpFirst; blk != optLoopTable[loopInd].lpBottom->bbNext;
+ blk = blk->bbNext)
{
- BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind,
- newPred,
- /*extendRegion*/true);
+ BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred,
+ /*extendRegion*/ true);
BasicBlock::CloneBlockState(this, newBlk, blk);
// TODO-Cleanup: The above clones the bbNatLoopNum, which is incorrect. Eventually, we should probably insert
@@ -4459,8 +4647,11 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
// loop, if one exists -- the parent of the loop we're cloning.
newBlk->bbNatLoopNum = optLoopTable[loopInd].lpParent;
- if (newFirst == nullptr) newFirst = newBlk;
- newBot = newBlk; // Continually overwrite to make sure we get the last one.
+ if (newFirst == nullptr)
+ {
+ newFirst = newBlk;
+ }
+ newBot = newBlk; // Continually overwrite to make sure we get the last one.
newPred = newBlk;
blockMap->Set(blk, newBlk);
}
@@ -4469,30 +4660,29 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
optPerformStaticOptimizations(loopInd, context DEBUGARG(true));
// Now go through the new blocks, remapping their jump targets within the loop.
- for (BasicBlock* blk = optLoopTable[loopInd].lpFirst;
- blk != optLoopTable[loopInd].lpBottom->bbNext;
- blk = blk->bbNext)
+ for (BasicBlock* blk = optLoopTable[loopInd].lpFirst; blk != optLoopTable[loopInd].lpBottom->bbNext;
+ blk = blk->bbNext)
{
BasicBlock* newblk = nullptr;
- bool b = blockMap->Lookup(blk, &newblk);
+ bool b = blockMap->Lookup(blk, &newblk);
assert(b && newblk != nullptr);
assert(blk->bbJumpKind == newblk->bbJumpKind);
// First copy the jump destination(s) from "blk".
optCopyBlkDest(blk, newblk);
-
+
// Now redirect the new block according to "blockMap".
optRedirectBlock(newblk, blockMap);
}
- assert((h->bbJumpKind == BBJ_NONE && (h->bbNext == h2 || h->bbNext == optLoopTable[loopInd].lpEntry))
- || (h->bbJumpKind == BBJ_ALWAYS));
+ assert((h->bbJumpKind == BBJ_NONE && (h->bbNext == h2 || h->bbNext == optLoopTable[loopInd].lpEntry)) ||
+ (h->bbJumpKind == BBJ_ALWAYS));
// If all the conditions are true, go to E2.
- BasicBlock* e2 = nullptr;
- bool foundIt = blockMap->Lookup(optLoopTable[loopInd].lpEntry, &e2);
+ BasicBlock* e2 = nullptr;
+ bool foundIt = blockMap->Lookup(optLoopTable[loopInd].lpEntry, &e2);
h->bbJumpKind = BBJ_COND;
@@ -4510,10 +4700,10 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
assert(context->HasBlockConditions(loopInd));
// Create a unique header for the slow path.
- BasicBlock* slowHead = fgNewBBafter(BBJ_ALWAYS, h, true);
- slowHead->bbWeight = (h->isRunRarely() ? 0 : ambientWeight);
+ BasicBlock* slowHead = fgNewBBafter(BBJ_ALWAYS, h, true);
+ slowHead->bbWeight = (h->isRunRarely() ? 0 : ambientWeight);
slowHead->bbNatLoopNum = ambientLoop;
- slowHead->bbJumpDest = e2;
+ slowHead->bbJumpDest = e2;
BasicBlock* condLast = optInsertLoopChoiceConditions(context, loopInd, h, slowHead);
condLast->bbJumpDest = slowHead;
@@ -4523,7 +4713,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
{
optUpdateLoopHead(loopInd, optLoopTable[loopInd].lpHead, condLast);
}
- assert(foundIt && e2 != NULL);
+ assert(foundIt && e2 != nullptr);
fgUpdateChangedFlowGraph();
}
@@ -4556,12 +4746,15 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* c
//
// Insert condition 0 in 'h' and create other condition blocks and insert conditions in them.
//
-BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* head, BasicBlock* slowHead)
+BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context,
+ unsigned loopNum,
+ BasicBlock* head,
+ BasicBlock* slowHead)
{
JITDUMP("Inserting loop cloning conditions\n");
assert(context->HasBlockConditions(loopNum));
- BasicBlock* curCond = head;
+ BasicBlock* curCond = head;
ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = context->GetBlockConditions(loopNum);
for (unsigned i = 0; i < levelCond->Size(); ++i)
{
@@ -4571,9 +4764,9 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, u
context->CondToStmtInBlock(this, *((*levelCond)[i]), curCond, isHeaderBlock);
// Create each condition block ensuring wiring between them.
- BasicBlock* tmp = fgNewBBafter(BBJ_COND, isHeaderBlock ? slowHead : curCond, true);
+ BasicBlock* tmp = fgNewBBafter(BBJ_COND, isHeaderBlock ? slowHead : curCond, true);
curCond->bbJumpDest = isHeaderBlock ? tmp : slowHead;
- curCond = tmp;
+ curCond = tmp;
curCond->inheritWeight(head);
curCond->bbNatLoopNum = head->bbNatLoopNum;
@@ -4593,7 +4786,10 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight)
BasicBlock* b = optLoopTable[loopInd].lpBottom;
// If "h" dominates the entry block, then it is the unique header.
- if (fgDominate(h, e)) return;
+ if (fgDominate(h, e))
+ {
+ return;
+ }
// Otherwise, create a new empty header block, make it the pred of the entry block,
// and redirect the preds of the entry block to go to this.
@@ -4601,16 +4797,16 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight)
BasicBlock* beforeTop = t->bbPrev;
// Make sure that the new block is in the same region as the loop.
// (We will only create loops that are entirely within a region.)
- BasicBlock * h2 = fgNewBBafter(BBJ_ALWAYS, beforeTop, true);
+ BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, beforeTop, true);
// This is in the containing loop.
h2->bbNatLoopNum = optLoopTable[loopInd].lpParent;
- h2->bbWeight = (h2->isRunRarely() ? 0 : ambientWeight);
+ h2->bbWeight = (h2->isRunRarely() ? 0 : ambientWeight);
// We don't care where it was put; splice it between beforeTop and top.
if (beforeTop->bbNext != h2)
{
- h2->bbPrev->setNext(h2->bbNext); // Splice h2 out.
- beforeTop->setNext(h2); // Splice h2 in, between beforeTop and t.
+ h2->bbPrev->setNext(h2->bbNext); // Splice h2 out.
+ beforeTop->setNext(h2); // Splice h2 in, between beforeTop and t.
h2->setNext(t);
}
@@ -4630,11 +4826,14 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight)
BasicBlock* predBlock = predEntry->flBlock;
// Skip if predBlock is in the loop.
- if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum) continue;
+ if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum)
+ {
+ continue;
+ }
optRedirectBlock(predBlock, blockMap);
}
- optUpdateLoopHead(loopInd, optLoopTable[loopInd].lpHead, h2);
+ optUpdateLoopHead(loopInd, optLoopTable[loopInd].lpHead, h2);
}
/*****************************************************************************
@@ -4642,44 +4841,45 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, unsigned ambientWeight)
* Determine the kind of interference for the call.
*/
-/* static */ inline
-Compiler::callInterf Compiler::optCallInterf(GenTreePtr call)
+/* static */ inline Compiler::callInterf Compiler::optCallInterf(GenTreePtr call)
{
assert(call->gtOper == GT_CALL);
// if not a helper, kills everything
- if (call->gtCall.gtCallType != CT_HELPER)
+ if (call->gtCall.gtCallType != CT_HELPER)
+ {
return CALLINT_ALL;
+ }
// setfield and array address store kill all indirections
switch (eeGetHelperNum(call->gtCall.gtCallMethHnd))
{
- case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this
- case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this
- case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this
- case CORINFO_HELP_SETFIELDOBJ:
- case CORINFO_HELP_ARRADDR_ST:
+ case CORINFO_HELP_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this
+ case CORINFO_HELP_CHECKED_ASSIGN_REF: // Not strictly needed as we don't make a GT_CALL with this
+ case CORINFO_HELP_ASSIGN_BYREF: // Not strictly needed as we don't make a GT_CALL with this
+ case CORINFO_HELP_SETFIELDOBJ:
+ case CORINFO_HELP_ARRADDR_ST:
- return CALLINT_REF_INDIRS;
+ return CALLINT_REF_INDIRS;
- case CORINFO_HELP_SETFIELDFLOAT:
- case CORINFO_HELP_SETFIELDDOUBLE:
- case CORINFO_HELP_SETFIELD8:
- case CORINFO_HELP_SETFIELD16:
- case CORINFO_HELP_SETFIELD32:
- case CORINFO_HELP_SETFIELD64:
+ case CORINFO_HELP_SETFIELDFLOAT:
+ case CORINFO_HELP_SETFIELDDOUBLE:
+ case CORINFO_HELP_SETFIELD8:
+ case CORINFO_HELP_SETFIELD16:
+ case CORINFO_HELP_SETFIELD32:
+ case CORINFO_HELP_SETFIELD64:
- return CALLINT_SCL_INDIRS;
+ return CALLINT_SCL_INDIRS;
- case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this in Jit32
- case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this
- case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this
- case CORINFO_HELP_SETFIELDSTRUCT:
+ case CORINFO_HELP_ASSIGN_STRUCT: // Not strictly needed as we don't use this in Jit32
+ case CORINFO_HELP_MEMSET: // Not strictly needed as we don't make a GT_CALL with this
+ case CORINFO_HELP_MEMCPY: // Not strictly needed as we don't make a GT_CALL with this
+ case CORINFO_HELP_SETFIELDSTRUCT:
- return CALLINT_ALL_INDIRS;
+ return CALLINT_ALL_INDIRS;
- default:
- break;
+ default:
+ break;
}
// other helpers kill nothing
@@ -4694,14 +4894,10 @@ Compiler::callInterf Compiler::optCallInterf(GenTreePtr call)
* get called with 'doit' being true, we actually perform the narrowing.
*/
-bool Compiler::optNarrowTree(GenTreePtr tree,
- var_types srct,
- var_types dstt,
- ValueNumPair vnpNarrow,
- bool doit)
+bool Compiler::optNarrowTree(GenTreePtr tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
noway_assert(tree);
noway_assert(genActualType(tree->gtType) == genActualType(srct));
@@ -4710,262 +4906,305 @@ bool Compiler::optNarrowTree(GenTreePtr tree,
noway_assert(varTypeIsIntegral(srct));
noway_assert(varTypeIsIntegral(dstt));
- unsigned srcSize = genTypeSize(srct);
- unsigned dstSize = genTypeSize(dstt);
+ unsigned srcSize = genTypeSize(srct);
+ unsigned dstSize = genTypeSize(dstt);
/* dstt must be smaller than srct to narrow */
if (dstSize >= srcSize)
+ {
return false;
+ }
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
- if (kind & GTK_ASGOP)
+ if (kind & GTK_ASGOP)
{
noway_assert(doit == false);
- return false;
+ return false;
}
ValueNumPair NoVNPair = ValueNumPair();
- if (kind & GTK_LEAF)
+ if (kind & GTK_LEAF)
{
switch (oper)
{
- /* Constants can usually be narrowed by changing their value */
- CLANG_FORMAT_COMMENT_ANCHOR;
+ /* Constants can usually be narrowed by changing their value */
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef _TARGET_64BIT_
- __int64 lval;
- __int64 lmask;
+ __int64 lval;
+ __int64 lmask;
- case GT_CNS_LNG:
- lval = tree->gtIntConCommon.LngValue();
- lmask = 0;
+ case GT_CNS_LNG:
+ lval = tree->gtIntConCommon.LngValue();
+ lmask = 0;
- switch (dstt)
- {
- case TYP_BYTE : lmask = 0x0000007F; break;
- case TYP_BOOL :
- case TYP_UBYTE: lmask = 0x000000FF; break;
- case TYP_SHORT: lmask = 0x00007FFF; break;
- case TYP_CHAR : lmask = 0x0000FFFF; break;
- case TYP_INT : lmask = 0x7FFFFFFF; break;
- case TYP_UINT : lmask = 0xFFFFFFFF; break;
+ switch (dstt)
+ {
+ case TYP_BYTE:
+ lmask = 0x0000007F;
+ break;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ lmask = 0x000000FF;
+ break;
+ case TYP_SHORT:
+ lmask = 0x00007FFF;
+ break;
+ case TYP_CHAR:
+ lmask = 0x0000FFFF;
+ break;
+ case TYP_INT:
+ lmask = 0x7FFFFFFF;
+ break;
+ case TYP_UINT:
+ lmask = 0xFFFFFFFF;
+ break;
- default: return false;
- }
+ default:
+ return false;
+ }
- if ((lval & lmask) != lval)
- return false;
+ if ((lval & lmask) != lval)
+ return false;
- if (doit)
- {
- tree->ChangeOperConst (GT_CNS_INT);
- tree->gtType = TYP_INT;
- tree->gtIntCon.gtIconVal = (int) lval;
- if (vnStore != nullptr)
+ if (doit)
{
- fgValueNumberTreeConst(tree);
+ tree->ChangeOperConst(GT_CNS_INT);
+ tree->gtType = TYP_INT;
+ tree->gtIntCon.gtIconVal = (int)lval;
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
}
- }
- return true;
+ return true;
#endif
- case GT_CNS_INT:
+ case GT_CNS_INT:
- ssize_t ival; ival = tree->gtIntCon.gtIconVal;
- ssize_t imask; imask = 0;
+ ssize_t ival;
+ ival = tree->gtIntCon.gtIconVal;
+ ssize_t imask;
+ imask = 0;
- switch (dstt)
- {
- case TYP_BYTE : imask = 0x0000007F; break;
- case TYP_BOOL :
- case TYP_UBYTE: imask = 0x000000FF; break;
- case TYP_SHORT: imask = 0x00007FFF; break;
- case TYP_CHAR : imask = 0x0000FFFF; break;
+ switch (dstt)
+ {
+ case TYP_BYTE:
+ imask = 0x0000007F;
+ break;
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ imask = 0x000000FF;
+ break;
+ case TYP_SHORT:
+ imask = 0x00007FFF;
+ break;
+ case TYP_CHAR:
+ imask = 0x0000FFFF;
+ break;
#ifdef _TARGET_64BIT_
- case TYP_INT : imask = 0x7FFFFFFF; break;
- case TYP_UINT : imask = 0xFFFFFFFF; break;
+ case TYP_INT:
+ imask = 0x7FFFFFFF;
+ break;
+ case TYP_UINT:
+ imask = 0xFFFFFFFF;
+ break;
#endif // _TARGET_64BIT_
- default: return false;
- }
+ default:
+ return false;
+ }
- if ((ival & imask) != ival)
- return false;
+ if ((ival & imask) != ival)
+ {
+ return false;
+ }
#ifdef _TARGET_64BIT_
- if (doit)
- {
- tree->gtType = TYP_INT;
- tree->gtIntCon.gtIconVal = (int) ival;
- if (vnStore != nullptr)
+ if (doit)
{
- fgValueNumberTreeConst(tree);
+ tree->gtType = TYP_INT;
+ tree->gtIntCon.gtIconVal = (int)ival;
+ if (vnStore != nullptr)
+ {
+ fgValueNumberTreeConst(tree);
+ }
}
- }
#endif // _TARGET_64BIT_
- return true;
+ return true;
- /* Operands that are in memory can usually be narrowed
- simply by changing their gtType */
+ /* Operands that are in memory can usually be narrowed
+ simply by changing their gtType */
- case GT_LCL_VAR:
- /* We only allow narrowing long -> int for a GT_LCL_VAR */
- if (dstSize == sizeof(int))
- goto NARROW_IND;
- break;
+ case GT_LCL_VAR:
+ /* We only allow narrowing long -> int for a GT_LCL_VAR */
+ if (dstSize == sizeof(int))
+ {
+ goto NARROW_IND;
+ }
+ break;
- case GT_CLS_VAR:
- case GT_LCL_FLD:
- goto NARROW_IND;
- default:
- break;
+ case GT_CLS_VAR:
+ case GT_LCL_FLD:
+ goto NARROW_IND;
+ default:
+ break;
}
noway_assert(doit == false);
- return false;
-
+ return false;
}
- if (kind & (GTK_BINOP|GTK_UNOP))
+ if (kind & (GTK_BINOP | GTK_UNOP))
{
- GenTreePtr op1; op1 = tree->gtOp.gtOp1;
- GenTreePtr op2; op2 = tree->gtOp.gtOp2;
+ GenTreePtr op1;
+ op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2;
+ op2 = tree->gtOp.gtOp2;
switch (tree->gtOper)
{
- case GT_AND:
- noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType));
+ case GT_AND:
+ noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType));
- // Is op2 a small constant than can be narrowed into dstt?
- // if so the result of the GT_AND will also fit into 'dstt' and can be narrowed
- if ((op2->gtOper == GT_CNS_INT) && optNarrowTree(op2, srct, dstt, NoVNPair, false))
- {
- // We will change the type of the tree and narrow op2
- //
- if (doit)
+ // Is op2 a small constant than can be narrowed into dstt?
+ // if so the result of the GT_AND will also fit into 'dstt' and can be narrowed
+ if ((op2->gtOper == GT_CNS_INT) && optNarrowTree(op2, srct, dstt, NoVNPair, false))
{
- tree->gtType = genActualType(dstt);
- tree->SetVNs(vnpNarrow);
-
- optNarrowTree(op2, srct, dstt, NoVNPair, true);
- // We may also need to cast away the upper bits of op1
- if (srcSize == 8)
+ // We will change the type of the tree and narrow op2
+ //
+ if (doit)
{
- assert(tree->gtType == TYP_INT);
- op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
+ tree->gtType = genActualType(dstt);
+ tree->SetVNs(vnpNarrow);
+
+ optNarrowTree(op2, srct, dstt, NoVNPair, true);
+ // We may also need to cast away the upper bits of op1
+ if (srcSize == 8)
+ {
+ assert(tree->gtType == TYP_INT);
+ op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
#ifdef DEBUG
- op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
+ op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
- tree->gtOp.gtOp1 = op1;
+ tree->gtOp.gtOp1 = op1;
+ }
}
+ return true;
}
- return true;
- }
-
- goto COMMON_BINOP;
- case GT_ADD:
- case GT_MUL:
+ goto COMMON_BINOP;
- if (tree->gtOverflow() || varTypeIsSmall(dstt))
- {
- noway_assert(doit == false);
- return false;
- }
- __fallthrough;
+ case GT_ADD:
+ case GT_MUL:
- case GT_OR:
- case GT_XOR:
-COMMON_BINOP:
- noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType));
- noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType));
+ if (tree->gtOverflow() || varTypeIsSmall(dstt))
+ {
+ noway_assert(doit == false);
+ return false;
+ }
+ __fallthrough;
- if (gtIsActiveCSE_Candidate(op1) ||
- gtIsActiveCSE_Candidate(op2) ||
- !optNarrowTree(op1, srct, dstt, NoVNPair, doit) ||
- !optNarrowTree(op2, srct, dstt, NoVNPair, doit) )
- {
- noway_assert(doit == false);
- return false;
- }
+ case GT_OR:
+ case GT_XOR:
+ COMMON_BINOP:
+ noway_assert(genActualType(tree->gtType) == genActualType(op1->gtType));
+ noway_assert(genActualType(tree->gtType) == genActualType(op2->gtType));
- /* Simply change the type of the tree */
+ if (gtIsActiveCSE_Candidate(op1) || gtIsActiveCSE_Candidate(op2) ||
+ !optNarrowTree(op1, srct, dstt, NoVNPair, doit) || !optNarrowTree(op2, srct, dstt, NoVNPair, doit))
+ {
+ noway_assert(doit == false);
+ return false;
+ }
- if (doit)
- {
- if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT))
- tree->gtFlags &= ~GTF_MUL_64RSLT;
+ /* Simply change the type of the tree */
- tree->gtType = genActualType(dstt);
- tree->SetVNs(vnpNarrow);
- }
+ if (doit)
+ {
+ if (tree->gtOper == GT_MUL && (tree->gtFlags & GTF_MUL_64RSLT))
+ {
+ tree->gtFlags &= ~GTF_MUL_64RSLT;
+ }
- return true;
+ tree->gtType = genActualType(dstt);
+ tree->SetVNs(vnpNarrow);
+ }
- case GT_IND:
+ return true;
-NARROW_IND:
- /* Simply change the type of the tree */
+ case GT_IND:
- if (doit && (dstSize <= genTypeSize(tree->gtType)))
- {
- tree->gtType = genSignedType(dstt);
- tree->SetVNs(vnpNarrow);
+ NARROW_IND:
+ /* Simply change the type of the tree */
- /* Make sure we don't mess up the variable type */
- if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD))
- tree->gtFlags |= GTF_VAR_CAST;
- }
+ if (doit && (dstSize <= genTypeSize(tree->gtType)))
+ {
+ tree->gtType = genSignedType(dstt);
+ tree->SetVNs(vnpNarrow);
- return true;
+ /* Make sure we don't mess up the variable type */
+ if ((oper == GT_LCL_VAR) || (oper == GT_LCL_FLD))
+ {
+ tree->gtFlags |= GTF_VAR_CAST;
+ }
+ }
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GT:
- case GT_GE:
+ return true;
- /* These can always be narrowed since they only represent 0 or 1 */
- return true;
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GT:
+ case GT_GE:
+
+ /* These can always be narrowed since they only represent 0 or 1 */
+ return true;
- case GT_CAST:
+ case GT_CAST:
{
- var_types cast = tree->CastToType();
- var_types oprt = op1->TypeGet();
- unsigned oprSize = genTypeSize(oprt);
+ var_types cast = tree->CastToType();
+ var_types oprt = op1->TypeGet();
+ unsigned oprSize = genTypeSize(oprt);
if (cast != srct)
+ {
return false;
+ }
if (varTypeIsIntegralOrI(dstt) != varTypeIsIntegralOrI(oprt))
+ {
return false;
+ }
if (tree->gtOverflow())
+ {
return false;
+ }
/* Is this a cast from the type we're narrowing to or a smaller one? */
- if (oprSize <= dstSize)
+ if (oprSize <= dstSize)
{
/* Bash the target type of the cast */
- if (doit)
+ if (doit)
{
dstt = genSignedType(dstt);
- if (oprSize == dstSize)
+ if (oprSize == dstSize)
{
// Same size: change the CAST into a NOP
- tree->ChangeOper (GT_NOP);
- tree->gtType = dstt;
- tree->gtOp.gtOp2 = nullptr;
- tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber
+ tree->ChangeOper(GT_NOP);
+ tree->gtType = dstt;
+ tree->gtOp.gtOp2 = nullptr;
+ tree->gtVNPair = op1->gtVNPair; // Set to op1's ValueNumber
}
else
{
@@ -4982,68 +5221,70 @@ NARROW_IND:
}
}
- return true;
+ return true;
}
}
- return false;
-
- case GT_COMMA:
- if (!gtIsActiveCSE_Candidate(op2) &&
- optNarrowTree(op2, srct, dstt, vnpNarrow, doit))
- {
- /* Simply change the type of the tree */
+ return false;
- if (doit)
+ case GT_COMMA:
+ if (!gtIsActiveCSE_Candidate(op2) && optNarrowTree(op2, srct, dstt, vnpNarrow, doit))
{
- tree->gtType = genActualType(dstt);
- tree->SetVNs(vnpNarrow);
+ /* Simply change the type of the tree */
+
+ if (doit)
+ {
+ tree->gtType = genActualType(dstt);
+ tree->SetVNs(vnpNarrow);
+ }
+ return true;
}
- return true;
- }
- return false;
+ return false;
- default:
- noway_assert(doit == false);
- return false;
+ default:
+ noway_assert(doit == false);
+ return false;
}
-
}
- return false;
+ return false;
}
-
/*****************************************************************************
*
* The following logic figures out whether the given variable is assigned
* somewhere in a list of basic blocks (or in an entire loop).
*/
-
-Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTreePtr* pTree, fgWalkData* data)
{
GenTreePtr tree = *pTree;
- if (tree->OperKind() & GTK_ASGOP)
+ if (tree->OperKind() & GTK_ASGOP)
{
- GenTreePtr dest = tree->gtOp.gtOp1;
- genTreeOps destOper = dest->OperGet();
+ GenTreePtr dest = tree->gtOp.gtOp1;
+ genTreeOps destOper = dest->OperGet();
- isVarAssgDsc * desc = (isVarAssgDsc*)data->pCallbackData;
+ isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData;
assert(desc && desc->ivaSelf == desc);
- if (destOper == GT_LCL_VAR)
+ if (destOper == GT_LCL_VAR)
{
- unsigned tvar = dest->gtLclVarCommon.gtLclNum;
- if (tvar < lclMAX_ALLSET_TRACKED)
+ unsigned tvar = dest->gtLclVarCommon.gtLclNum;
+ if (tvar < lclMAX_ALLSET_TRACKED)
+ {
AllVarSetOps::AddElemD(data->compiler, desc->ivaMaskVal, tvar);
+ }
else
- desc->ivaMaskIncomplete = true;
+ {
+ desc->ivaMaskIncomplete = true;
+ }
- if (tvar == desc->ivaVar)
+ if (tvar == desc->ivaVar)
{
- if (tree != desc->ivaSkip)
- return WALK_ABORT;
+ if (tree != desc->ivaSkip)
+ {
+ return WALK_ABORT;
+ }
}
}
else if (destOper == GT_LCL_FLD)
@@ -5055,8 +5296,7 @@ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTreePtr *pTree, fgWalkDa
// unsigned lclNum = dest->gtLclFld.gtLclNum;
// noway_assert(lvaTable[lclNum].lvAddrTaken);
- varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF
- : VR_IND_SCL;
+ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL;
desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs);
}
else if (destOper == GT_CLS_VAR)
@@ -5067,35 +5307,31 @@ Compiler::fgWalkResult Compiler::optIsVarAssgCB(GenTreePtr *pTree, fgWalkDa
{
/* Set the proper indirection bits */
- varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF
- : VR_IND_SCL;
+ varRefKinds refs = varTypeIsGC(tree->TypeGet()) ? VR_IND_REF : VR_IND_SCL;
desc->ivaMaskInd = varRefKinds(desc->ivaMaskInd | refs);
}
}
else if (tree->gtOper == GT_CALL)
{
- isVarAssgDsc * desc = (isVarAssgDsc*)data->pCallbackData;
+ isVarAssgDsc* desc = (isVarAssgDsc*)data->pCallbackData;
assert(desc && desc->ivaSelf == desc);
desc->ivaMaskCall = optCallInterf(tree);
}
- return WALK_CONTINUE;
+ return WALK_CONTINUE;
}
/*****************************************************************************/
-bool Compiler::optIsVarAssigned(BasicBlock * beg,
- BasicBlock * end,
- GenTreePtr skip,
- unsigned var)
+bool Compiler::optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTreePtr skip, unsigned var)
{
- bool result;
- isVarAssgDsc desc;
+ bool result;
+ isVarAssgDsc desc;
- desc.ivaSkip = skip;
+ desc.ivaSkip = skip;
#ifdef DEBUG
- desc.ivaSelf = &desc;
+ desc.ivaSelf = &desc;
#endif
desc.ivaVar = var;
desc.ivaMaskCall = CALLINT_NONE;
@@ -5108,15 +5344,17 @@ bool Compiler::optIsVarAssigned(BasicBlock * beg,
for (GenTreeStmt* stmt = beg->firstStmt(); stmt; stmt = stmt->gtNextStmt)
{
noway_assert(stmt->gtOper == GT_STMT);
- if (fgWalkTreePre(&stmt->gtStmtExpr, optIsVarAssgCB, &desc))
+ if (fgWalkTreePre(&stmt->gtStmtExpr, optIsVarAssgCB, &desc))
{
result = true;
goto DONE;
}
}
- if (beg == end)
+ if (beg == end)
+ {
break;
+ }
beg = beg->bbNext;
}
@@ -5125,15 +5363,13 @@ bool Compiler::optIsVarAssigned(BasicBlock * beg,
DONE:
- return result;
+ return result;
}
/*****************************************************************************/
-int Compiler::optIsSetAssgLoop(unsigned lnum,
- ALLVARSET_VALARG_TP vars,
- varRefKinds inds)
+int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds)
{
- LoopDsc * loop;
+ LoopDsc* loop;
/* Get hold of the loop descriptor */
@@ -5142,19 +5378,19 @@ int Compiler::optIsSetAssgLoop(unsigned lnum,
/* Do we already know what variables are assigned within this loop? */
- if (!(loop->lpFlags & LPFLG_ASGVARS_YES))
+ if (!(loop->lpFlags & LPFLG_ASGVARS_YES))
{
- isVarAssgDsc desc;
+ isVarAssgDsc desc;
- BasicBlock * beg;
- BasicBlock * end;
+ BasicBlock* beg;
+ BasicBlock* end;
/* Prepare the descriptor used by the tree walker call-back */
- desc.ivaVar = (unsigned)-1;
- desc.ivaSkip = NULL;
+ desc.ivaVar = (unsigned)-1;
+ desc.ivaSkip = nullptr;
#ifdef DEBUG
- desc.ivaSelf = &desc;
+ desc.ivaSelf = &desc;
#endif
AllVarSetOps::AssignNoCopy(this, desc.ivaMaskVal, AllVarSetOps::MakeEmpty(this));
desc.ivaMaskInd = VR_NONE;
@@ -5175,14 +5411,16 @@ int Compiler::optIsSetAssgLoop(unsigned lnum,
noway_assert(stmt->gtOper == GT_STMT);
fgWalkTreePre(&stmt->gtStmtExpr, optIsVarAssgCB, &desc);
- if (desc.ivaMaskIncomplete)
+ if (desc.ivaMaskIncomplete)
{
loop->lpFlags |= LPFLG_ASGVARS_INC;
}
}
- if (beg == end)
+ if (beg == end)
+ {
break;
+ }
}
AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal);
@@ -5195,81 +5433,88 @@ int Compiler::optIsSetAssgLoop(unsigned lnum,
}
/* Now we can finally test the caller's mask against the loop's */
- if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) ||
- (loop->lpAsgInds & inds))
+ if (!AllVarSetOps::IsEmptyIntersection(this, loop->lpAsgVars, vars) || (loop->lpAsgInds & inds))
{
- return 1;
+ return 1;
}
switch (loop->lpAsgCall)
{
- case CALLINT_ALL:
+ case CALLINT_ALL:
- /* Can't hoist if the call might have side effect on an indirection. */
+ /* Can't hoist if the call might have side effect on an indirection. */
- if (loop->lpAsgInds != VR_NONE)
- return 1;
+ if (loop->lpAsgInds != VR_NONE)
+ {
+ return 1;
+ }
- break;
+ break;
- case CALLINT_REF_INDIRS:
+ case CALLINT_REF_INDIRS:
- /* Can't hoist if the call might have side effect on an ref indirection. */
+ /* Can't hoist if the call might have side effect on an ref indirection. */
- if (loop->lpAsgInds & VR_IND_REF)
- return 1;
+ if (loop->lpAsgInds & VR_IND_REF)
+ {
+ return 1;
+ }
- break;
+ break;
- case CALLINT_SCL_INDIRS:
+ case CALLINT_SCL_INDIRS:
- /* Can't hoist if the call might have side effect on an non-ref indirection. */
+ /* Can't hoist if the call might have side effect on an non-ref indirection. */
- if (loop->lpAsgInds & VR_IND_SCL)
- return 1;
+ if (loop->lpAsgInds & VR_IND_SCL)
+ {
+ return 1;
+ }
- break;
+ break;
- case CALLINT_ALL_INDIRS:
+ case CALLINT_ALL_INDIRS:
- /* Can't hoist if the call might have side effect on any indirection. */
+ /* Can't hoist if the call might have side effect on any indirection. */
- if (loop->lpAsgInds & (VR_IND_REF|VR_IND_SCL))
- return 1;
+ if (loop->lpAsgInds & (VR_IND_REF | VR_IND_SCL))
+ {
+ return 1;
+ }
- break;
+ break;
- case CALLINT_NONE:
+ case CALLINT_NONE:
- /* Other helpers kill nothing */
+ /* Other helpers kill nothing */
- break;
+ break;
- default:
- noway_assert(!"Unexpected lpAsgCall value");
+ default:
+ noway_assert(!"Unexpected lpAsgCall value");
}
- return 0;
+ return 0;
}
-void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsigned lnum)
+void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsigned lnum)
{
#ifdef DEBUG
if (verbose)
{
printf("\nHoisting a copy of ");
printTreeID(origExpr);
- printf(" into PreHeader for loop L%02u <BB%02u..BB%02u>:\n",
- lnum, optLoopTable[lnum].lpFirst->bbNum, optLoopTable[lnum].lpBottom->bbNum);
+ printf(" into PreHeader for loop L%02u <BB%02u..BB%02u>:\n", lnum, optLoopTable[lnum].lpFirst->bbNum,
+ optLoopTable[lnum].lpBottom->bbNum);
gtDispTree(origExpr);
printf("\n");
}
#endif
// This loop has to be in a form that is approved for hoisting.
- assert (optLoopTable[lnum].lpFlags & LPFLG_HOISTABLE);
+ assert(optLoopTable[lnum].lpFlags & LPFLG_HOISTABLE);
- // Create a copy of the expression and mark it for CSE's.
+ // Create a copy of the expression and mark it for CSE's.
GenTreePtr hoistExpr = gtCloneExpr(origExpr, GTF_MAKE_CSE);
// At this point we should have a cloned expression, marked with the GTF_MAKE_CSE flag
@@ -5287,13 +5532,13 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
fgCreateLoopPreHeader(lnum);
- BasicBlock * preHead = optLoopTable[lnum].lpHead;
- assert (preHead->bbJumpKind == BBJ_NONE);
+ BasicBlock* preHead = optLoopTable[lnum].lpHead;
+ assert(preHead->bbJumpKind == BBJ_NONE);
// fgMorphTree and lvaRecursiveIncRefCounts requires that compCurBB be the block that contains
// (or in this case, will contain) the expression.
- compCurBB = preHead;
-
+ compCurBB = preHead;
+
// Increment the ref counts of any local vars appearing in "hoist".
// Note that we need to do this before fgMorphTree() as fgMorph() could constant
// fold away some of the lcl vars referenced by "hoist".
@@ -5312,12 +5557,12 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
{
/* append after last statement */
- GenTreePtr last = treeList->gtPrev;
- assert (last->gtNext == 0);
+ GenTreePtr last = treeList->gtPrev;
+ assert(last->gtNext == nullptr);
- last->gtNext = hoistStmt;
- hoistStmt->gtPrev = last;
- treeList->gtPrev = hoistStmt;
+ last->gtNext = hoistStmt;
+ hoistStmt->gtPrev = last;
+ treeList->gtPrev = hoistStmt;
}
else
{
@@ -5344,11 +5589,11 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
}
#ifdef DEBUG
- if (m_nodeTestData != NULL)
+ if (m_nodeTestData != nullptr)
{
// What is the depth of the loop "lnum"?
- ssize_t depth = 0;
+ ssize_t depth = 0;
unsigned lnumIter = lnum;
while (optLoopTable[lnumIter].lpParent != BasicBlock::NOT_IN_LOOP)
{
@@ -5372,16 +5617,18 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
{
printf("Node ");
printTreeID(origExpr);
- printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth %d.\n",
+ printf(" was declared as hoistable from loop at nesting depth %d; actually hoisted from loop at depth "
+ "%d.\n",
tlAndN.m_num, depth);
assert(false);
}
else
{
- // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must hoist" annotations.
+ // We've correctly hoisted this, so remove the annotation. Later, we'll check for any remaining "must
+ // hoist" annotations.
testData->Remove(origExpr);
// Now we insert an annotation to make sure that "hoistExpr" is actually CSE'd.
- tlAndN.m_tl = TL_CSE_Def;
+ tlAndN.m_tl = TL_CSE_Def;
tlAndN.m_num = m_loopHoistCSEClass++;
testData->Set(hoistExpr, tlAndN);
}
@@ -5397,13 +5644,15 @@ void Compiler::optPerformHoistExpr(GenTreePtr origExpr, unsig
}
m_totalHoistedExpressions++;
#endif // LOOP_HOIST_STATS
-}
+}
-void Compiler::optHoistLoopCode()
+void Compiler::optHoistLoopCode()
{
// If we don't have any loops in the method then take an early out now.
if (optLoopCount == 0)
+ {
return;
+ }
#ifdef DEBUG
unsigned jitNoHoist = JitConfig.JitNoHoist();
@@ -5436,11 +5685,11 @@ void Compiler::optHoistLoopCode()
if (methHash < methHashLo || methHash > methHashHi)
return;
printf("Doing loop hoisting in %s (0x%x).\n", info.compFullName, methHash);
-#endif // DEBUG
-#endif // 0 -- debugging loop cloning issues
+#endif // DEBUG
+#endif // 0 -- debugging loop cloning issues
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\n*************** In optHoistLoopCode()\n");
printf("Blocks/Trees before phase\n");
@@ -5455,7 +5704,9 @@ void Compiler::optHoistLoopCode()
for (unsigned lnum = 0; lnum < optLoopCount; lnum++)
{
if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP)
{
@@ -5464,8 +5715,8 @@ void Compiler::optHoistLoopCode()
}
#if DEBUG
- if (fgModified)
- {
+ if (fgModified)
+ {
if (verbose)
{
printf("Blocks/Trees after optHoistLoopCode() modified flowgraph\n");
@@ -5481,17 +5732,23 @@ void Compiler::optHoistLoopCode()
#ifdef DEBUG
// Test Data stuff..
// If we have no test data, early out.
- if (m_nodeTestData == NULL) return;
+ if (m_nodeTestData == nullptr)
+ {
+ return;
+ }
NodeToTestDataMap* testData = GetNodeTestData();
for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki)
{
TestLabelAndNum tlAndN;
- GenTreePtr node = ki.Get();
- bool b = testData->Lookup(node, &tlAndN);
+ GenTreePtr node = ki.Get();
+ bool b = testData->Lookup(node, &tlAndN);
assert(b);
- if (tlAndN.m_tl != TL_LoopHoist) continue;
+ if (tlAndN.m_tl != TL_LoopHoist)
+ {
+ continue;
+ }
// Otherwise, it is a loop hoist annotation.
- assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved.
+ assert(tlAndN.m_num < 100); // >= 100 indicates nested static field address, should already have been moved.
if (tlAndN.m_num >= 0)
{
printf("Node ");
@@ -5500,10 +5757,10 @@ void Compiler::optHoistLoopCode()
assert(false);
}
}
-#endif // DEBUG
+#endif // DEBUG
}
-void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt)
+void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt)
{
// Do this loop, then recursively do all nested loops.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -5515,7 +5772,7 @@ void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistConte
#endif // LOOP_HOIST_STATS
optHoistThisLoop(lnum, hoistCtxt);
-
+
VNSet* hoistedInCurLoop = hoistCtxt->ExtractHoistedInCurLoop();
if (optLoopTable[lnum].lpChild != BasicBlock::NOT_IN_LOOP)
@@ -5534,7 +5791,8 @@ void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistConte
}
}
- for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP; child = optLoopTable[child].lpSibling)
+ for (unsigned child = optLoopTable[lnum].lpChild; child != BasicBlock::NOT_IN_LOOP;
+ child = optLoopTable[child].lpSibling)
{
optHoistLoopNest(child, hoistCtxt);
}
@@ -5552,14 +5810,16 @@ void Compiler::optHoistLoopNest(unsigned lnum, LoopHoistConte
}
}
-void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt)
+void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt)
{
LoopDsc* pLoopDsc = &optLoopTable[lnum];
/* If loop was removed continue */
- if (pLoopDsc->lpFlags & LPFLG_REMOVED)
+ if (pLoopDsc->lpFlags & LPFLG_REMOVED)
+ {
return;
+ }
/* Get the head and tail of the loop */
@@ -5570,39 +5830,46 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
// We must have a do-while loop
if ((pLoopDsc->lpFlags & LPFLG_DO_WHILE) == 0)
+ {
return;
+ }
// The loop-head must dominate the loop-entry.
// TODO-CQ: Couldn't we make this true if it's not?
if (!fgDominate(head, lbeg))
+ {
return;
+ }
// if lbeg is the start of a new try block then we won't be able to hoist
if (!BasicBlock::sameTryRegion(head, lbeg))
+ {
return;
+ }
// We don't bother hoisting when inside of a catch block
if ((lbeg->bbCatchTyp != BBCT_NONE) && (lbeg->bbCatchTyp != BBCT_FINALLY))
+ {
return;
+ }
pLoopDsc->lpFlags |= LPFLG_HOISTABLE;
- unsigned begn = lbeg->bbNum;
- unsigned endn = tail->bbNum;
+ unsigned begn = lbeg->bbNum;
+ unsigned endn = tail->bbNum;
// Ensure the per-loop sets/tables are empty.
hoistCtxt->m_curLoopVnInvariantCache.RemoveAll();
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("optHoistLoopCode for loop L%02u <BB%02u..BB%02u>:\n", lnum, begn, endn);
- printf(" Loop body %s a call\n", pLoopDsc->lpContainsCall ? "contains" : "does not contain");
+ printf(" Loop body %s a call\n", pLoopDsc->lpContainsCall ? "contains" : "does not contain");
}
#endif
- VARSET_TP VARSET_INIT_NOCOPY(loopVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut,
- pLoopDsc->lpVarUseDef));
+ VARSET_TP VARSET_INIT_NOCOPY(loopVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, pLoopDsc->lpVarUseDef));
pLoopDsc->lpVarInOutCount = VarSetOps::Count(this, pLoopDsc->lpVarInOut);
pLoopDsc->lpLoopVarCount = VarSetOps::Count(this, loopVars);
@@ -5616,32 +5883,32 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
// Since 64-bit variables take up two registers on 32-bit targets, we increase
// the Counts such that each TYP_LONG variable counts twice.
//
- VARSET_TP VARSET_INIT_NOCOPY(loopLongVars, VarSetOps::Intersection(this, loopVars, lvaLongVars));
- VARSET_TP VARSET_INIT_NOCOPY(inOutLongVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars));
+ VARSET_TP VARSET_INIT_NOCOPY(loopLongVars, VarSetOps::Intersection(this, loopVars, lvaLongVars));
+ VARSET_TP VARSET_INIT_NOCOPY(inOutLongVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaLongVars));
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars));
- lvaDispVarSet(lvaLongVars);
+ printf("\n LONGVARS(%d)=", VarSetOps::Count(this, lvaLongVars));
+ lvaDispVarSet(lvaLongVars);
}
#endif
- pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars);
+ pLoopDsc->lpLoopVarCount += VarSetOps::Count(this, loopLongVars);
pLoopDsc->lpVarInOutCount += VarSetOps::Count(this, inOutLongVars);
}
#endif // !_TARGET_64BIT_
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef));
- lvaDispVarSet(pLoopDsc->lpVarUseDef);
+ printf("\n USEDEF (%d)=", VarSetOps::Count(this, pLoopDsc->lpVarUseDef));
+ lvaDispVarSet(pLoopDsc->lpVarUseDef);
- printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount);
- lvaDispVarSet(pLoopDsc->lpVarInOut);
-
- printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount);
- lvaDispVarSet(loopVars);
+ printf("\n INOUT (%d)=", pLoopDsc->lpVarInOutCount);
+ lvaDispVarSet(pLoopDsc->lpVarInOut);
+
+ printf("\n LOOPVARS(%d)=", pLoopDsc->lpLoopVarCount);
+ lvaDispVarSet(loopVars);
printf("\n");
}
#endif
@@ -5650,24 +5917,24 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
if (floatVarsCount > 0)
{
- VARSET_TP VARSET_INIT_NOCOPY(loopFPVars, VarSetOps::Intersection(this, loopVars, lvaFloatVars));
- VARSET_TP VARSET_INIT_NOCOPY(inOutFPVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars));
+ VARSET_TP VARSET_INIT_NOCOPY(loopFPVars, VarSetOps::Intersection(this, loopVars, lvaFloatVars));
+ VARSET_TP VARSET_INIT_NOCOPY(inOutFPVars, VarSetOps::Intersection(this, pLoopDsc->lpVarInOut, lvaFloatVars));
pLoopDsc->lpLoopVarFPCount = VarSetOps::Count(this, loopFPVars);
pLoopDsc->lpVarInOutFPCount = VarSetOps::Count(this, inOutFPVars);
pLoopDsc->lpHoistedFPExprCount = 0;
- pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount;
+ pLoopDsc->lpLoopVarCount -= pLoopDsc->lpLoopVarFPCount;
pLoopDsc->lpVarInOutCount -= pLoopDsc->lpVarInOutFPCount;
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf( " INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount);
- lvaDispVarSet(inOutFPVars);
+ printf(" INOUT-FP(%d)=", pLoopDsc->lpVarInOutFPCount);
+ lvaDispVarSet(inOutFPVars);
- printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount);
- lvaDispVarSet(loopFPVars);
+ printf("\n LOOPV-FP(%d)=", pLoopDsc->lpLoopVarFPCount);
+ lvaDispVarSet(loopFPVars);
}
#endif
}
@@ -5687,9 +5954,7 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
assert(pLoopDsc->lpExit != nullptr);
BasicBlock* cur = pLoopDsc->lpExit;
// Push dominators, until we reach "entry" or exit the loop.
- while ( cur != nullptr
- && pLoopDsc->lpContains(cur)
- && cur != pLoopDsc->lpEntry)
+ while (cur != nullptr && pLoopDsc->lpContains(cur) && cur != pLoopDsc->lpEntry)
{
defExec.Push(cur);
cur = cur->bbIDom;
@@ -5701,9 +5966,9 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
}
defExec.Push(pLoopDsc->lpEntry);
}
- else // More than one exit
+ else // More than one exit
{
- // We'll assume that only the entry block is definitely executed.
+ // We'll assume that only the entry block is definitely executed.
// We could in the future do better.
defExec.Push(pLoopDsc->lpEntry);
}
@@ -5717,41 +5982,35 @@ void Compiler::optHoistThisLoop(unsigned lnum, LoopHoistConte
}
// Hoist any expressions in "blk" that are invariant in loop "lnum" outside of "blk" and into a PreHead for loop "lnum".
-void Compiler::optHoistLoopExprsForBlock(BasicBlock* blk,
- unsigned lnum,
- LoopHoistContext* hoistCtxt)
+void Compiler::optHoistLoopExprsForBlock(BasicBlock* blk, unsigned lnum, LoopHoistContext* hoistCtxt)
{
- LoopDsc* pLoopDsc = &optLoopTable[lnum];
- bool firstBlockAndBeforeSideEffect = (blk == pLoopDsc->lpEntry);
- unsigned blkWeight = blk->getBBWeight(this);
+ LoopDsc* pLoopDsc = &optLoopTable[lnum];
+ bool firstBlockAndBeforeSideEffect = (blk == pLoopDsc->lpEntry);
+ unsigned blkWeight = blk->getBBWeight(this);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf(" optHoistLoopExprsForBlock BB%02u (weight=%6s) of loop L%02u <BB%02u..BB%02u>, firstBlock is %s\n",
- blk->bbNum,
- refCntWtd2str(blkWeight),
- lnum,
- pLoopDsc->lpFirst->bbNum,
- pLoopDsc->lpBottom->bbNum,
+ blk->bbNum, refCntWtd2str(blkWeight), lnum, pLoopDsc->lpFirst->bbNum, pLoopDsc->lpBottom->bbNum,
firstBlockAndBeforeSideEffect ? "true" : "false");
- if (blkWeight < (BB_UNITY_WEIGHT / 10))
+ if (blkWeight < (BB_UNITY_WEIGHT / 10))
{
printf(" block weight is too small to perform hoisting.\n");
}
}
#endif
- if (blkWeight < (BB_UNITY_WEIGHT / 10))
- {
- // Block weight is too small to perform hoisting.
- return;
- }
+ if (blkWeight < (BB_UNITY_WEIGHT / 10))
+ {
+ // Block weight is too small to perform hoisting.
+ return;
+ }
for (GenTreeStmt* stmt = blk->FirstNonPhiDef(); stmt; stmt = stmt->gtNextStmt)
{
GenTreePtr stmtTree = stmt->gtStmtExpr;
- bool hoistable;
+ bool hoistable;
(void)optHoistLoopExprsForTree(stmtTree, lnum, hoistCtxt, &firstBlockAndBeforeSideEffect, &hoistable);
if (hoistable)
{
@@ -5778,14 +6037,14 @@ bool Compiler::optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum)
loopVarCount = pLoopDsc->lpLoopVarFPCount;
varInOutCount = pLoopDsc->lpVarInOutFPCount;
- availRegCount = CNT_CALLEE_SAVED_FLOAT;
+ availRegCount = CNT_CALLEE_SAVED_FLOAT;
if (!loopContainsCall)
{
- availRegCount += CNT_CALLEE_TRASH_FLOAT-1;
+ availRegCount += CNT_CALLEE_TRASH_FLOAT - 1;
}
#ifdef _TARGET_ARM_
- // For ARM each double takes two FP registers
- // For now on ARM we won't track singles/doubles
+ // For ARM each double takes two FP registers
+ // For now on ARM we won't track singles/doubles
// and instead just assume that we always have doubles.
//
availRegCount /= 2;
@@ -5797,16 +6056,16 @@ bool Compiler::optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum)
loopVarCount = pLoopDsc->lpLoopVarCount;
varInOutCount = pLoopDsc->lpVarInOutCount;
- availRegCount = CNT_CALLEE_SAVED-1;
+ availRegCount = CNT_CALLEE_SAVED - 1;
if (!loopContainsCall)
{
- availRegCount += CNT_CALLEE_TRASH-1;
+ availRegCount += CNT_CALLEE_TRASH - 1;
}
#ifndef _TARGET_64BIT_
// For our 32-bit targets Long types take two registers.
if (varTypeIsLong(tree->TypeGet()))
{
- availRegCount = (availRegCount+1) / 2;
+ availRegCount = (availRegCount + 1) / 2;
}
#endif
}
@@ -5814,13 +6073,13 @@ bool Compiler::optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum)
// decrement the availRegCount by the count of expression that we have already hoisted.
availRegCount -= hoistedExprCount;
- // the variables that are read/written inside the loop should
+ // the variables that are read/written inside the loop should
// always be a subset of the InOut variables for the loop
assert(loopVarCount <= varInOutCount);
// When loopVarCount >= availRegCount we believe that all of the
// available registers will get used to hold LclVars inside the loop.
- // This pessimistically assumes that each loopVar has a conflicting
+ // This pessimistically assumes that each loopVar has a conflicting
// lifetime with every other loopVar.
// For this case we will hoist the expression only if is profitable
// to place it in a stack home location (gtCostEx >= 2*IND_COST_EX)
@@ -5830,23 +6089,27 @@ bool Compiler::optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum)
if (loopVarCount >= availRegCount)
{
// Don't hoist expressions that are not heavy: tree->gtCostEx < (2*IND_COST_EX)
- if (tree->gtCostEx < (2*IND_COST_EX))
+ if (tree->gtCostEx < (2 * IND_COST_EX))
+ {
return false;
+ }
}
- // When varInOutCount < availRegCount we are know that there are
+ // When varInOutCount < availRegCount we are know that there are
// some available register(s) when we enter the loop body.
// When varInOutCount == availRegCount there often will be a register
- // available when we enter the loop body, since a loop often defines a
- // LclVar on exit or there is often at least one LclVar that is worth
+ // available when we enter the loop body, since a loop often defines a
+ // LclVar on exit or there is often at least one LclVar that is worth
// spilling to the stack to make way for this hoisted expression.
// So we are willing hoist an expression with gtCostEx == MIN_CSE_COST
//
if (varInOutCount > availRegCount)
{
// Don't hoist expressions that barely meet CSE cost requirements: tree->gtCostEx == MIN_CSE_COST
- if (tree->gtCostEx <= MIN_CSE_COST+1)
+ if (tree->gtCostEx <= MIN_CSE_COST + 1)
+ {
return false;
+ }
}
return true;
@@ -5856,17 +6119,14 @@ bool Compiler::optIsProfitableToHoistableTree(GenTreePtr tree, unsigned lnum)
// This function returns true if 'tree' is a loop invariant expression.
// It also sets '*pHoistable' to true if 'tree' can be hoisted into a loop PreHeader block
//
-bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
- unsigned lnum,
- LoopHoistContext* hoistCtxt,
- bool* pFirstBlockAndBeforeSideEffect,
- bool* pHoistable)
+bool Compiler::optHoistLoopExprsForTree(
+ GenTreePtr tree, unsigned lnum, LoopHoistContext* hoistCtxt, bool* pFirstBlockAndBeforeSideEffect, bool* pHoistable)
{
// First do the children.
// We must keep track of whether each child node was hoistable or not
//
unsigned nChildren = tree->NumChildren();
- bool childrenHoistable[GenTree::MAX_CHILDREN];
+ bool childrenHoistable[GenTree::MAX_CHILDREN];
// Initialize the array elements for childrenHoistable[] to false
for (unsigned i = 0; i < nChildren; i++)
@@ -5877,7 +6137,8 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
bool treeIsInvariant = true;
for (unsigned childNum = 0; childNum < nChildren; childNum++)
{
- if (!optHoistLoopExprsForTree(tree->GetChild(childNum), lnum, hoistCtxt, pFirstBlockAndBeforeSideEffect, &childrenHoistable[childNum]))
+ if (!optHoistLoopExprsForTree(tree->GetChild(childNum), lnum, hoistCtxt, pFirstBlockAndBeforeSideEffect,
+ &childrenHoistable[childNum]))
{
treeIsInvariant = false;
}
@@ -5895,7 +6156,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
treeIsHoistable = optIsCSEcandidate(tree);
// If it's a call, it must be a helper call, and be pure.
- // Further, if it may run a cctor, it must be labeled as "Hoistable"
+ // Further, if it may run a cctor, it must be labeled as "Hoistable"
// (meaning it won't run a cctor because the class is not precise-init).
if (treeIsHoistable && tree->OperGet() == GT_CALL)
{
@@ -5911,8 +6172,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
{
treeIsHoistable = false;
}
- else if ( s_helperCallProperties.MayRunCctor(helpFunc)
- && (call->gtFlags & GTF_CALL_HOISTABLE) == 0)
+ else if (s_helperCallProperties.MayRunCctor(helpFunc) && (call->gtFlags & GTF_CALL_HOISTABLE) == 0)
{
treeIsHoistable = false;
}
@@ -5925,7 +6185,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
{
// For now, we give up on an expression that might raise an exception if it is after the
// first possible global side effect (and we assume we're after that if we're not in the first block).
- //TODO-CQ: this is when we might do loop cloning.
+ // TODO-CQ: this is when we might do loop cloning.
//
if ((tree->gtFlags & GTF_EXCEPT) != 0)
{
@@ -5936,13 +6196,15 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
//
if (tree->OperGet() == GT_CLS_VAR)
{
- // TODO-CQ: test that fails if we hoist GT_CLS_VAR: JIT\Directed\Languages\ComponentPascal\pi_r.exe method Main
+ // TODO-CQ: test that fails if we hoist GT_CLS_VAR: JIT\Directed\Languages\ComponentPascal\pi_r.exe
+ // method Main
treeIsHoistable = false;
}
}
// Is the value of the whole tree loop invariant?
- treeIsInvariant = optVNIsLoopInvariant(tree->gtVNPair.GetLiberal(), lnum, &hoistCtxt->m_curLoopVnInvariantCache);
+ treeIsInvariant =
+ optVNIsLoopInvariant(tree->gtVNPair.GetLiberal(), lnum, &hoistCtxt->m_curLoopVnInvariantCache);
// Is the value of the whole tree loop invariant?
if (!treeIsInvariant)
@@ -5952,7 +6214,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
}
// Check if we need to set '*pFirstBlockAndBeforeSideEffect' to false.
- // If we encounter a tree with a call in it
+ // If we encounter a tree with a call in it
// or if we see an assignment to global we set it to false.
//
// If we are already set to false then we can skip these checks
@@ -5966,7 +6228,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
if (tree->gtFlags & GTF_CALL)
{
*pFirstBlockAndBeforeSideEffect = false;
- }
+ }
else if (tree->OperIsAssignment())
{
// If the LHS of the assignment has a global reference, then assume it's a global side effect.
@@ -5975,7 +6237,8 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
{
*pFirstBlockAndBeforeSideEffect = false;
}
- } else if (tree->OperIsCopyBlkOp())
+ }
+ else if (tree->OperIsCopyBlkOp())
{
GenTreePtr args = tree->gtOp.gtOp1;
assert(args->OperGet() == GT_LIST);
@@ -5985,7 +6248,7 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
}
}
}
-
+
// If this 'tree' is hoistable then we return and the caller will
// decide to hoist it as part of larger hoistable expression.
//
@@ -5997,9 +6260,11 @@ bool Compiler::optHoistLoopExprsForTree(GenTreePtr tree,
{
if (childrenHoistable[childNum])
{
- // We can't hoist the LHS of an assignment, isn't a real use.
- if (childNum == 0 && (tree->OperIsAssignment()))
+ // We can't hoist the LHS of an assignment, isn't a real use.
+ if (childNum == 0 && (tree->OperIsAssignment()))
+ {
continue;
+ }
GenTreePtr child = tree->GetChild(childNum);
@@ -6023,16 +6288,21 @@ void Compiler::optHoistCandidate(GenTreePtr tree, unsigned lnum, LoopHoistContex
// The outer loop also must be suitable for hoisting...
if ((optLoopTable[lnum].lpFlags & LPFLG_HOISTABLE) == 0)
+ {
return;
+ }
// If the hoisted expression isn't valid at this loop head then break
if (!optTreeIsValidAtLoopHead(tree, lnum))
+ {
return;
+ }
// It must pass the hoistable profitablity tests for this loop level
if (!optIsProfitableToHoistableTree(tree, lnum))
+ {
return;
-
+ }
bool b;
if (hoistCtxt->m_hoistedInParentLoops.Lookup(tree->gtVNPair.GetLiberal(), &b))
@@ -6040,53 +6310,59 @@ void Compiler::optHoistCandidate(GenTreePtr tree, unsigned lnum, LoopHoistContex
// already hoisted in a parent loop, so don't hoist this expression.
return;
}
-
+
if (hoistCtxt->GetHoistedInCurLoop(this)->Lookup(tree->gtVNPair.GetLiberal(), &b))
{
// already hoisted this expression in the current loop, so don't hoist this expression.
return;
}
- // Expression can be hoisted
+ // Expression can be hoisted
optPerformHoistExpr(tree, lnum);
- // Increment lpHoistedExprCount or lpHoistedFPExprCount
+ // Increment lpHoistedExprCount or lpHoistedFPExprCount
if (!varTypeIsFloating(tree->TypeGet()))
{
- optLoopTable[lnum].lpHoistedExprCount++;
+ optLoopTable[lnum].lpHoistedExprCount++;
#ifndef _TARGET_64BIT_
// For our 32-bit targets Long types take two registers.
if (varTypeIsLong(tree->TypeGet()))
{
- optLoopTable[lnum].lpHoistedExprCount++;
+ optLoopTable[lnum].lpHoistedExprCount++;
}
#endif
}
else // Floating point expr hoisted
{
- optLoopTable[lnum].lpHoistedFPExprCount++;
+ optLoopTable[lnum].lpHoistedFPExprCount++;
}
// Record the hoisted expression in hoistCtxt
hoistCtxt->GetHoistedInCurLoop(this)->Set(tree->gtVNPair.GetLiberal(), true);
}
-
bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* loopVnInvariantCache)
{
// If it is not a VN, is not loop-invariant.
- if (vn == ValueNumStore::NoVN) return false;
+ if (vn == ValueNumStore::NoVN)
+ {
+ return false;
+ }
// We'll always short-circuit constants.
if (vnStore->IsVNConstant(vn) || vn == vnStore->VNForVoid())
+ {
return true;
+ }
// If we've done this query previously, don't repeat.
bool previousRes = false;
if (loopVnInvariantCache->Lookup(vn, &previousRes))
+ {
return previousRes;
+ }
- bool res = true;
+ bool res = true;
VNFuncApp funcApp;
if (vnStore->GetVNFunc(vn, &funcApp))
{
@@ -6094,8 +6370,7 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* loo
{
// First, make sure it's a "proper" phi -- the definition is a Phi application.
VNFuncApp phiDefValFuncApp;
- if ( !vnStore->GetVNFunc(funcApp.m_args[2], &phiDefValFuncApp)
- || phiDefValFuncApp.m_func != VNF_Phi)
+ if (!vnStore->GetVNFunc(funcApp.m_args[2], &phiDefValFuncApp) || phiDefValFuncApp.m_func != VNF_Phi)
{
// It's not *really* a definition, rather a pass-through of some other VN.
// (This could occur, say if both sides of an if-then-else diamond made the
@@ -6105,16 +6380,16 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* loo
else
{
// Is the definition within the loop? If so, is not loop-invariant.
- unsigned lclNum = funcApp.m_args[0];
- unsigned ssaNum = funcApp.m_args[1];
+ unsigned lclNum = funcApp.m_args[0];
+ unsigned ssaNum = funcApp.m_args[1];
LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum);
- res = !optLoopContains(lnum, ssaDef->m_defLoc.m_blk->bbNatLoopNum);
+ res = !optLoopContains(lnum, ssaDef->m_defLoc.m_blk->bbNatLoopNum);
}
}
else if (funcApp.m_func == VNF_PhiHeapDef)
{
BasicBlock* defnBlk = reinterpret_cast<BasicBlock*>(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0]));
- res = !optLoopContains(lnum, defnBlk->bbNatLoopNum);
+ res = !optLoopContains(lnum, defnBlk->bbNatLoopNum);
}
else
{
@@ -6136,65 +6411,72 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNToBoolMap* loo
// Otherwise, assume non-function "new, unique" VN's are not loop invariant.
res = false;
}
-
+
loopVnInvariantCache->Set(vn, res);
return res;
}
-bool Compiler::optTreeIsValidAtLoopHead(GenTreePtr tree, unsigned lnum)
+bool Compiler::optTreeIsValidAtLoopHead(GenTreePtr tree, unsigned lnum)
{
if (tree->OperIsLocal())
{
GenTreeLclVarCommon* lclVar = tree->AsLclVarCommon();
- unsigned lclNum = lclVar->gtLclNum;
+ unsigned lclNum = lclVar->gtLclNum;
// The lvlVar must be have an Ssa tracked lifetime
if (fgExcludeFromSsa(lclNum))
+ {
return false;
+ }
// If the loop does not contains the SSA def we can hoist it.
if (!optLoopTable[lnum].lpContains(lvaTable[lclNum].GetPerSsaData(lclVar->GetSsaNum())->m_defLoc.m_blk))
+ {
return true;
+ }
}
else if (tree->OperIsConst())
{
return true;
}
- else // If every one of the children nodes are valid at this Loop's Head.
+ else // If every one of the children nodes are valid at this Loop's Head.
{
unsigned nChildren = tree->NumChildren();
for (unsigned childNum = 0; childNum < nChildren; childNum++)
{
if (!optTreeIsValidAtLoopHead(tree->GetChild(childNum), lnum))
+ {
return false;
+ }
}
return true;
}
return false;
}
-
/*****************************************************************************
*
* Creates a pre-header block for the given loop - a preheader is a BBJ_NONE
* header. The pre-header will replace the current lpHead in the loop table.
* The loop has to be a do-while loop. Thus, all blocks dominated by lpHead
- * will also be dominated by the loop-top, lpHead->bbNext.
+ * will also be dominated by the loop-top, lpHead->bbNext.
*
*/
-void Compiler::fgCreateLoopPreHeader(unsigned lnum)
+void Compiler::fgCreateLoopPreHeader(unsigned lnum)
{
LoopDsc* pLoopDsc = &optLoopTable[lnum];
/* This loop has to be a "do-while" loop */
- assert (pLoopDsc->lpFlags & LPFLG_DO_WHILE);
-
+ assert(pLoopDsc->lpFlags & LPFLG_DO_WHILE);
+
/* Have we already created a loop-preheader block? */
if (pLoopDsc->lpFlags & LPFLG_HAS_PREHEAD)
+ {
return;
+ }
BasicBlock* head = pLoopDsc->lpHead;
BasicBlock* top = pLoopDsc->lpTop;
@@ -6202,36 +6484,39 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
// if 'entry' and 'head' are in different try regions then we won't be able to hoist
if (!BasicBlock::sameTryRegion(head, entry))
+ {
return;
+ }
// Ensure that lpHead always dominates lpEntry
noway_assert(fgDominate(head, entry));
-
+
/* Get hold of the first block of the loop body */
- assert (top == entry);
+ assert(top == entry);
/* Allocate a new basic block */
- BasicBlock * preHead = bbNewBasicBlock(BBJ_NONE);
- preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER;
+ BasicBlock* preHead = bbNewBasicBlock(BBJ_NONE);
+ preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER;
// Must set IL code offset
- preHead->bbCodeOffs = top->bbCodeOffs;
+ preHead->bbCodeOffs = top->bbCodeOffs;
- // Set the default value of the preHead weight in case we don't have
+ // Set the default value of the preHead weight in case we don't have
// valid profile data and since this blocks weight is just an estimate
// we clear any BBF_PROF_WEIGHT flag that we may have picked up from head.
//
preHead->inheritWeight(head);
preHead->bbFlags &= ~BBF_PROF_WEIGHT;
-#ifdef DEBUG
- if (verbose)
- printf("\nCreated PreHeader (BB%02u) for loop L%02u (BB%02u - BB%02u), with weight = %s\n",
- preHead->bbNum, lnum, top->bbNum, pLoopDsc->lpBottom->bbNum,
- refCntWtd2str(preHead->getBBWeight(this)));
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\nCreated PreHeader (BB%02u) for loop L%02u (BB%02u - BB%02u), with weight = %s\n", preHead->bbNum,
+ lnum, top->bbNum, pLoopDsc->lpBottom->bbNum, refCntWtd2str(preHead->getBBWeight(this)));
+ }
#endif
// The preheader block is part of the containing loop (if any).
@@ -6239,17 +6524,17 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND))
{
- if ((head->bbWeight == 0) || (head->bbNext->bbWeight == 0))
+ if ((head->bbWeight == 0) || (head->bbNext->bbWeight == 0))
{
preHead->bbWeight = 0;
preHead->bbFlags |= BBF_RUN_RARELY;
}
else
{
- bool allValidProfileWeights = ((head->bbFlags & BBF_PROF_WEIGHT) != 0)
- && ((head->bbJumpDest->bbFlags & BBF_PROF_WEIGHT) != 0)
- && ((head->bbNext->bbFlags & BBF_PROF_WEIGHT) != 0);
-
+ bool allValidProfileWeights = ((head->bbFlags & BBF_PROF_WEIGHT) != 0) &&
+ ((head->bbJumpDest->bbFlags & BBF_PROF_WEIGHT) != 0) &&
+ ((head->bbNext->bbFlags & BBF_PROF_WEIGHT) != 0);
+
if (allValidProfileWeights)
{
double loopEnteredCount;
@@ -6257,27 +6542,29 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
if (fgHaveValidEdgeWeights)
{
- flowList * edgeToNext = fgGetPredForBlock(head->bbNext, head);
- flowList * edgeToJump = fgGetPredForBlock(head->bbJumpDest, head);
- noway_assert(edgeToNext != NULL);
- noway_assert(edgeToJump != NULL);
-
- loopEnteredCount = ((double) edgeToNext->flEdgeWeightMin + (double) edgeToNext->flEdgeWeightMax) / 2.0;
- loopSkippedCount = ((double) edgeToJump->flEdgeWeightMin + (double) edgeToJump->flEdgeWeightMax) / 2.0;
+ flowList* edgeToNext = fgGetPredForBlock(head->bbNext, head);
+ flowList* edgeToJump = fgGetPredForBlock(head->bbJumpDest, head);
+ noway_assert(edgeToNext != nullptr);
+ noway_assert(edgeToJump != nullptr);
+
+ loopEnteredCount =
+ ((double)edgeToNext->flEdgeWeightMin + (double)edgeToNext->flEdgeWeightMax) / 2.0;
+ loopSkippedCount =
+ ((double)edgeToJump->flEdgeWeightMin + (double)edgeToJump->flEdgeWeightMax) / 2.0;
}
else
{
- loopEnteredCount = (double) head->bbNext->bbWeight;
- loopSkippedCount = (double) head->bbJumpDest->bbWeight;
+ loopEnteredCount = (double)head->bbNext->bbWeight;
+ loopSkippedCount = (double)head->bbJumpDest->bbWeight;
}
double loopTakenRatio = loopEnteredCount / (loopEnteredCount + loopSkippedCount);
// Calculate a good approximation of the preHead's block weight
- unsigned preHeadWeight = (unsigned) (((double) head->bbWeight * loopTakenRatio) + 0.5);
+ unsigned preHeadWeight = (unsigned)(((double)head->bbWeight * loopTakenRatio) + 0.5);
preHead->setBBWeight(max(preHeadWeight, 1));
noway_assert(!preHead->isRunRarely());
- }
+ }
}
}
@@ -6322,12 +6609,12 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
// Update the EH table to make the hoisted block part of the loop's EH block.
fgExtendEHRegionBefore(top);
- // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them
+ // TODO-CQ: set dominators for this block, to allow loop optimizations requiring them
// (e.g: hoisting expression in a loop with the same 'head' as this one)
/* Update the loop entry */
- pLoopDsc->lpHead = preHead;
+ pLoopDsc->lpHead = preHead;
pLoopDsc->lpFlags |= LPFLG_HAS_PREHEAD;
/* The new block becomes the 'head' of the loop - update bbRefs and bbPreds
@@ -6338,9 +6625,9 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
fgAddRefPred(preHead, head);
bool checkNestedLoops = false;
- for (flowList * pred = top->bbPreds; pred; pred = pred->flNext)
+ for (flowList* pred = top->bbPreds; pred; pred = pred->flNext)
{
- BasicBlock * predBlock = pred->flBlock;
+ BasicBlock* predBlock = pred->flBlock;
if (fgDominate(top, predBlock))
{
@@ -6358,58 +6645,59 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
switch (predBlock->bbJumpKind)
{
- case BBJ_NONE:
- noway_assert(predBlock == head);
- break;
-
- case BBJ_COND:
- if (predBlock == head)
- {
- noway_assert(predBlock->bbJumpDest != top);
+ case BBJ_NONE:
+ noway_assert(predBlock == head);
break;
- }
- __fallthrough;
- case BBJ_ALWAYS:
- case BBJ_EHCATCHRET:
- noway_assert(predBlock->bbJumpDest == top);
- predBlock->bbJumpDest = preHead;
- preHead->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
-
- if (predBlock == head)
- {
- // This is essentially the same case of predBlock being a BBJ_NONE. We may not be
- // able to make this a BBJ_NONE if it's an internal block (for example, a leave).
- // Just break, pred will be removed after switch.
- }
- else
- {
- fgRemoveRefPred(top, predBlock);
- fgAddRefPred(preHead, predBlock);
- }
- break;
+ case BBJ_COND:
+ if (predBlock == head)
+ {
+ noway_assert(predBlock->bbJumpDest != top);
+ break;
+ }
+ __fallthrough;
- case BBJ_SWITCH:
- unsigned jumpCnt; jumpCnt = predBlock->bbJumpSwt->bbsCount;
- BasicBlock * * jumpTab; jumpTab = predBlock->bbJumpSwt->bbsDstTab;
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ noway_assert(predBlock->bbJumpDest == top);
+ predBlock->bbJumpDest = preHead;
+ preHead->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
- do
- {
- assert (*jumpTab);
- if ((*jumpTab) == top)
+ if (predBlock == head)
+ {
+ // This is essentially the same case of predBlock being a BBJ_NONE. We may not be
+ // able to make this a BBJ_NONE if it's an internal block (for example, a leave).
+ // Just break, pred will be removed after switch.
+ }
+ else
{
- (*jumpTab) = preHead;
-
fgRemoveRefPred(top, predBlock);
fgAddRefPred(preHead, predBlock);
- preHead->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
}
- }
- while (++jumpTab, --jumpCnt);
+ break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = predBlock->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = predBlock->bbJumpSwt->bbsDstTab;
+
+ do
+ {
+ assert(*jumpTab);
+ if ((*jumpTab) == top)
+ {
+ (*jumpTab) = preHead;
+
+ fgRemoveRefPred(top, predBlock);
+ fgAddRefPred(preHead, predBlock);
+ preHead->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
+ }
+ } while (++jumpTab, --jumpCnt);
+
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
@@ -6417,7 +6705,7 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
fgRemoveRefPred(top, head);
fgAddRefPred(top, preHead);
- /*
+ /*
If we found at least one back-edge in the flowgraph pointing to the top/entry of the loop
(other than the back-edge of the loop we are considering) then we likely have nested
do-while loops with the same entry block and inserting the preheader block changes the head
@@ -6431,21 +6719,23 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
{
if (optLoopTable[l].lpHead == head)
{
- noway_assert(l != lnum); // pLoopDsc->lpHead was already changed from 'head' to 'preHead'
+ noway_assert(l != lnum); // pLoopDsc->lpHead was already changed from 'head' to 'preHead'
noway_assert(optLoopTable[l].lpEntry == top);
optUpdateLoopHead(l, optLoopTable[l].lpHead, preHead);
optLoopTable[l].lpFlags |= LPFLG_HAS_PREHEAD;
-#ifdef DEBUG
- if (verbose)
- printf("Same PreHeader (BB%02u) can be used for loop L%02u (BB%02u - BB%02u)\n\n",
- preHead->bbNum, l, top->bbNum, optLoopTable[l].lpBottom->bbNum);
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Same PreHeader (BB%02u) can be used for loop L%02u (BB%02u - BB%02u)\n\n", preHead->bbNum,
+ l, top->bbNum, optLoopTable[l].lpBottom->bbNum);
+ }
#endif
}
}
}
}
-bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum)
+bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum)
{
unsigned lnum = blk->bbNatLoopNum;
while (lnum != BasicBlock::NOT_IN_LOOP)
@@ -6460,12 +6750,12 @@ bool Compiler::optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLn
return false;
}
-void Compiler::optComputeLoopSideEffects()
+void Compiler::optComputeLoopSideEffects()
{
- unsigned lnum;
+ unsigned lnum;
for (lnum = 0; lnum < optLoopCount; lnum++)
{
- VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this));
+ VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarInOut, VarSetOps::MakeEmpty(this));
VarSetOps::AssignNoCopy(this, optLoopTable[lnum].lpVarUseDef, VarSetOps::MakeEmpty(this));
optLoopTable[lnum].lpContainsCall = false;
}
@@ -6473,15 +6763,19 @@ void Compiler::optComputeLoopSideEffects()
for (lnum = 0; lnum < optLoopCount; lnum++)
{
if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED)
+ {
continue;
+ }
- if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP) // Is outermost...
+ if (optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP)
+ { // Is outermost...
optComputeLoopNestSideEffects(lnum);
+ }
}
- VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this));
+ VarSetOps::AssignNoCopy(this, lvaFloatVars, VarSetOps::MakeEmpty(this));
#ifndef _TARGET_64BIT_
- VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this));
+ VarSetOps::AssignNoCopy(this, lvaLongVars, VarSetOps::MakeEmpty(this));
#endif
for (unsigned i = 0; i < lvaCount; i++)
@@ -6503,9 +6797,9 @@ void Compiler::optComputeLoopSideEffects()
}
}
-void Compiler::optComputeLoopNestSideEffects(unsigned lnum)
+void Compiler::optComputeLoopNestSideEffects(unsigned lnum)
{
- assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost.
+ assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost.
BasicBlock* botNext = optLoopTable[lnum].lpBottom->bbNext;
for (BasicBlock* bbInLoop = optLoopTable[lnum].lpFirst; bbInLoop != botNext; bbInLoop = bbInLoop->bbNext)
{
@@ -6513,14 +6807,14 @@ void Compiler::optComputeLoopNestSideEffects(unsigned lnum)
}
}
-void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
+void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
{
unsigned mostNestedLoop = blk->bbNatLoopNum;
assert(mostNestedLoop != BasicBlock::NOT_IN_LOOP);
-
+
AddVariableLivenessAllContainingLoops(mostNestedLoop, blk);
- bool heapHavoc = false; // True ==> there's a call or a memory store that has arbitrary heap effects.
+ bool heapHavoc = false; // True ==> there's a call or a memory store that has arbitrary heap effects.
// Now iterate over the remaining statements, and their trees.
for (GenTreePtr stmts = blk->FirstNonPhiDef(); (stmts != nullptr); stmts = stmts->gtNext)
@@ -6538,8 +6832,8 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
AddContainsCallAllContainingLoops(mostNestedLoop);
}
- // If we just set lpContainsCall or it was previously set
- if (optLoopTable[mostNestedLoop].lpContainsCall)
+ // If we just set lpContainsCall or it was previously set
+ if (optLoopTable[mostNestedLoop].lpContainsCall)
{
// We can early exit after both heapHavoc and lpContainsCall are both set to true.
break;
@@ -6558,11 +6852,11 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
if (GenTree::OperIsAssignment(oper))
{
- GenTreePtr lhs = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/true);
+ GenTreePtr lhs = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
if (lhs->OperGet() == GT_IND)
{
- GenTreePtr arg = lhs->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/true);
+ GenTreePtr arg = lhs->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
FieldSeqNode* fldSeqArrElem = nullptr;
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
@@ -6579,12 +6873,15 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
GenTreeLclVar* argLcl = arg->AsLclVar();
if (!fgExcludeFromSsa(argLcl->GetLclNum()))
{
- ValueNum argVN = lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal();
+ ValueNum argVN =
+ lvaTable[argLcl->GetLclNum()].GetPerSsaData(argLcl->GetSsaNum())->m_vnPair.GetLiberal();
VNFuncApp funcApp;
- if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) && funcApp.m_func == VNF_PtrToArrElem)
+ if (argVN != ValueNumStore::NoVN && vnStore->GetVNFunc(argVN, &funcApp) &&
+ funcApp.m_func == VNF_PtrToArrElem)
{
assert(vnStore->IsVNHandle(funcApp.m_args[0]));
- CORINFO_CLASS_HANDLE elemType = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0]));
+ CORINFO_CLASS_HANDLE elemType =
+ CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0]));
AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType);
// Don't set heapHavoc below.
continue;
@@ -6601,13 +6898,13 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq);
}
- else
- {
+ else
+ {
// We are only interested in IsFieldAddr()'s fldSeq out parameter.
//
- GenTreePtr obj = nullptr; // unused
- GenTreePtr staticOffset = nullptr; // unused
- FieldSeqNode* fldSeq = nullptr;
+ GenTreePtr obj = nullptr; // unused
+ GenTreePtr staticOffset = nullptr; // unused
+ FieldSeqNode* fldSeq = nullptr;
if (arg->IsFieldAddr(this, &obj, &staticOffset, &fldSeq) &&
(fldSeq != FieldSeqStore::NotAField()))
@@ -6627,7 +6924,7 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
heapHavoc = true;
}
}
- }
+ }
else if (lhs->OperGet() == GT_CLS_VAR)
{
AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->gtClsVar.gtClsVarHnd);
@@ -6636,54 +6933,57 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
else if (lhs->OperGet() == GT_LCL_VAR)
{
GenTreeLclVar* lhsLcl = lhs->AsLclVar();
- GenTreePtr rhs = tree->gtOp.gtOp2;
- ValueNum rhsVN = rhs->gtVNPair.GetLiberal();
+ GenTreePtr rhs = tree->gtOp.gtOp2;
+ ValueNum rhsVN = rhs->gtVNPair.GetLiberal();
// If we gave the RHS a value number, propagate it.
if (rhsVN != ValueNumStore::NoVN)
{
rhsVN = vnStore->VNNormVal(rhsVN);
if (!fgExcludeFromSsa(lhsLcl->GetLclNum()))
{
- lvaTable[lhsLcl->GetLclNum()].GetPerSsaData(lhsLcl->GetSsaNum())->m_vnPair.SetLiberal(rhsVN);
+ lvaTable[lhsLcl->GetLclNum()]
+ .GetPerSsaData(lhsLcl->GetSsaNum())
+ ->m_vnPair.SetLiberal(rhsVN);
}
}
}
}
- else // not GenTree::OperIsAssignment(oper)
+ else // not GenTree::OperIsAssignment(oper)
{
switch (oper)
{
- case GT_COMMA:
- tree->gtVNPair = tree->gtOp.gtOp2->gtVNPair;
- break;
+ case GT_COMMA:
+ tree->gtVNPair = tree->gtOp.gtOp2->gtVNPair;
+ break;
- case GT_ADDR:
- // Is it an addr of a array index expression?
- {
- GenTreePtr addrArg = tree->gtOp.gtOp1;
- if (addrArg->OperGet() == GT_IND)
+ case GT_ADDR:
+ // Is it an addr of a array index expression?
{
- // Is the LHS an array index expression?
- if (addrArg->gtFlags & GTF_IND_ARR_INDEX)
+ GenTreePtr addrArg = tree->gtOp.gtOp1;
+ if (addrArg->OperGet() == GT_IND)
{
- ArrayInfo arrInfo;
- bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo);
- assert(b);
- CORINFO_CLASS_HANDLE elemType = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
- tree->gtVNPair.SetBoth(vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem,
- vnStore->VNForHandle(ssize_t(elemType), GTF_ICON_CLASS_HDL),
- // The rest are dummy arguments.
- vnStore->VNForNull(),
- vnStore->VNForNull(),
- vnStore->VNForNull()));
+ // Is the LHS an array index expression?
+ if (addrArg->gtFlags & GTF_IND_ARR_INDEX)
+ {
+ ArrayInfo arrInfo;
+ bool b = GetArrayInfoMap()->Lookup(addrArg, &arrInfo);
+ assert(b);
+ CORINFO_CLASS_HANDLE elemType =
+ EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
+ tree->gtVNPair.SetBoth(
+ vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem,
+ vnStore->VNForHandle(ssize_t(elemType), GTF_ICON_CLASS_HDL),
+ // The rest are dummy arguments.
+ vnStore->VNForNull(), vnStore->VNForNull(),
+ vnStore->VNForNull()));
+ }
}
}
- }
- break;
+ break;
- case GT_INITBLK:
- case GT_COPYBLK:
- case GT_COPYOBJ:
+ case GT_INITBLK:
+ case GT_COPYBLK:
+ case GT_COPYOBJ:
{
GenTreeLclVarCommon* lclVarTree;
bool isEntire;
@@ -6696,16 +6996,16 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
}
break;
- case GT_LOCKADD: // Binop
- case GT_XADD: // Binop
- case GT_XCHG: // Binop
- case GT_CMPXCHG: // Specialop
+ case GT_LOCKADD: // Binop
+ case GT_XADD: // Binop
+ case GT_XCHG: // Binop
+ case GT_CMPXCHG: // Specialop
{
heapHavoc = true;
}
break;
- case GT_CALL:
+ case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
@@ -6738,9 +7038,9 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
break;
}
- default:
- // All other gtOper node kinds, leave 'heapHavoc' unchanged (i.e. false)
- break;
+ default:
+ // All other gtOper node kinds, leave 'heapHavoc' unchanged (i.e. false)
+ break;
}
}
}
@@ -6753,34 +7053,34 @@ void Compiler::optComputeLoopSideEffectsOfBlock(BasicBlock* blk)
while (lnum != BasicBlock::NOT_IN_LOOP)
{
optLoopTable[lnum].lpLoopHasHeapHavoc = true;
- lnum = optLoopTable[lnum].lpParent;
+ lnum = optLoopTable[lnum].lpParent;
}
}
}
// Marks the containsCall information to "lnum" and any parent loops.
-void Compiler::AddContainsCallAllContainingLoops(unsigned lnum)
+void Compiler::AddContainsCallAllContainingLoops(unsigned lnum)
{
assert(0 <= lnum && lnum < optLoopCount);
while (lnum != BasicBlock::NOT_IN_LOOP)
{
optLoopTable[lnum].lpContainsCall = true;
- lnum = optLoopTable[lnum].lpParent;
+ lnum = optLoopTable[lnum].lpParent;
}
}
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
-void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock * blk)
+void Compiler::LoopDsc::AddVariableLiveness(Compiler* comp, BasicBlock* blk)
{
- VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn);
- VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut);
+ VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveIn);
+ VarSetOps::UnionD(comp, this->lpVarInOut, blk->bbLiveOut);
VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarUse);
VarSetOps::UnionD(comp, this->lpVarUseDef, blk->bbVarDef);
}
// Adds the variable liveness information for 'blk' to "lnum" and any parent loops.
-void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock * blk)
+void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk)
{
assert(0 <= lnum && lnum < optLoopCount);
while (lnum != BasicBlock::NOT_IN_LOOP)
@@ -6791,7 +7091,7 @@ void Compiler::AddVariableLivenessAllContainingLoops(unsigned lnu
}
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
-void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd)
+void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd)
{
assert(0 <= lnum && lnum < optLoopCount);
while (lnum != BasicBlock::NOT_IN_LOOP)
@@ -6802,7 +7102,7 @@ void Compiler::AddModifiedFieldAllContainingLoops(unsigned lnum,
}
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
-void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd)
+void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemClsHnd)
{
assert(0 <= lnum && lnum < optLoopCount);
while (lnum != BasicBlock::NOT_IN_LOOP)
@@ -6821,11 +7121,11 @@ void Compiler::AddModifiedElemTypeAllContainingLoops(unsigned lnu
*/
/* static */
-Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr *pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr* pTree, fgWalkData* data)
{
- GenTreePtr tree = *pTree;
- Compiler * comp = data->compiler;
- GenTreePtr keepList = (GenTreePtr)(data->pCallbackData);
+ GenTreePtr tree = *pTree;
+ Compiler* comp = data->compiler;
+ GenTreePtr keepList = (GenTreePtr)(data->pCallbackData);
// We may have a non-NULL side effect list that is being kept
//
@@ -6835,10 +7135,10 @@ Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr *pTree, fg
while (keptTree->OperGet() == GT_COMMA)
{
assert(keptTree->OperKind() & GTK_SMPOP);
- GenTreePtr op1 = keptTree->gtOp.gtOp1;
+ GenTreePtr op1 = keptTree->gtOp.gtOp1;
GenTreePtr op2 = keptTree->gtGetOp2();
- // For the GT_COMMA case the op1 is part of the orginal CSE tree
+ // For the GT_COMMA case the op1 is part of the orginal CSE tree
// that is being kept because it contains some side-effect
//
if (tree == op1)
@@ -6863,10 +7163,10 @@ Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr *pTree, fg
// Look for any local variable references
- if (tree->gtOper == GT_LCL_VAR && comp->lvaLocalVarRefCounted)
+ if (tree->gtOper == GT_LCL_VAR && comp->lvaLocalVarRefCounted)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
/* This variable ref is going away, decrease its ref counts */
@@ -6876,7 +7176,7 @@ Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr *pTree, fg
// make sure it's been initialized
assert(comp->compCurBB != nullptr);
- assert(comp->compCurBB->bbWeight <= BB_MAX_WEIGHT);
+ assert(comp->compCurBB->bbWeight <= BB_MAX_WEIGHT);
/* Decrement its lvRefCnt and lvRefCntWtd */
@@ -6885,14 +7185,14 @@ Compiler::fgWalkResult Compiler::optRemoveTreeVisitor(GenTreePtr *pTree, fg
varDsc->decRefCnts(comp->compCurBB->getBBWeight(comp), comp);
}
- return WALK_CONTINUE;
+ return WALK_CONTINUE;
}
/*****************************************************************************
*
* Routine called to decrement the LclVar ref counts when removing a tree
* during the remove RangeCheck phase.
- * This method will decrement the refcounts for any LclVars used below 'deadTree',
+ * This method will decrement the refcounts for any LclVars used below 'deadTree',
* unless the node is found in the 'keepList' (which are saved side effects)
* The keepList is communicated using the walkData.pCallbackData field
* Also the compCurBB must be set to the current BasicBlock which contains
@@ -6911,36 +7211,33 @@ void Compiler::optRemoveTree(GenTreePtr deadTree, GenTreePtr keepList)
* Given an array index node, mark it as not needing a range check.
*/
-void Compiler::optRemoveRangeCheck(GenTreePtr tree,
- GenTreePtr stmt,
- bool updateCSEcounts,
- unsigned sideEffFlags,
- bool forceRemove)
+void Compiler::optRemoveRangeCheck(
+ GenTreePtr tree, GenTreePtr stmt, bool updateCSEcounts, unsigned sideEffFlags, bool forceRemove)
{
- GenTreePtr add1;
- GenTreePtr * addp;
+ GenTreePtr add1;
+ GenTreePtr* addp;
- GenTreePtr nop1;
- GenTreePtr * nopp;
+ GenTreePtr nop1;
+ GenTreePtr* nopp;
- GenTreePtr icon;
- GenTreePtr mult;
+ GenTreePtr icon;
+ GenTreePtr mult;
- GenTreePtr base;
+ GenTreePtr base;
- ssize_t ival;
+ ssize_t ival;
#if !REARRANGE_ADDS
noway_assert(!"can't remove range checks without REARRANGE_ADDS right now");
#endif
- noway_assert(stmt->gtOper == GT_STMT);
- noway_assert(tree->gtOper == GT_COMMA);
+ noway_assert(stmt->gtOper == GT_STMT);
+ noway_assert(tree->gtOper == GT_COMMA);
noway_assert(tree->gtOp.gtOp1->gtOper == GT_ARR_BOUNDS_CHECK);
noway_assert(forceRemove || optIsRangeCheckRemovable(tree->gtOp.gtOp1));
GenTreeBoundsChk* bndsChk = tree->gtOp.gtOp1->AsBoundsChk();
-
+
#ifdef DEBUG
if (verbose)
{
@@ -6956,11 +7253,11 @@ void Compiler::optRemoveRangeCheck(GenTreePtr tree,
}
// Decrement the ref counts for any LclVars that are being deleted
- //
+ //
optRemoveTree(tree->gtOp.gtOp1, sideEffList);
// Just replace the bndsChk with a NOP as an operand to the GT_COMMA, if there are no side effects.
- tree->gtOp.gtOp1 = (sideEffList != NULL) ? sideEffList : gtNewNothingNode();
+ tree->gtOp.gtOp1 = (sideEffList != nullptr) ? sideEffList : gtNewNothingNode();
// TODO-CQ: We should also remove the GT_COMMA, but in any case we can no longer CSE the GT_COMMA.
tree->gtFlags |= GTF_DONT_CSE;
@@ -6981,7 +7278,6 @@ void Compiler::optRemoveRangeCheck(GenTreePtr tree,
gtDispTree(tree);
}
#endif
-
}
/*****************************************************************************
@@ -6989,16 +7285,18 @@ void Compiler::optRemoveRangeCheck(GenTreePtr tree,
* multiplication node.
*/
-ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTreePtr mul, GenTreePtr *pIndex DEBUGARG(bool bRngChk))
+ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTreePtr mul, GenTreePtr* pIndex DEBUGARG(bool bRngChk))
{
- assert (mul);
- assert (mul->gtOper == GT_MUL || mul->gtOper == GT_LSH);
- assert (mul->gtOp.gtOp2->IsCnsIntOrI());
+ assert(mul);
+ assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH);
+ assert(mul->gtOp.gtOp2->IsCnsIntOrI());
ssize_t scale = mul->gtOp.gtOp2->gtIntConCommon.IconValue();
- if (mul->gtOper == GT_LSH)
+ if (mul->gtOper == GT_LSH)
+ {
scale = ((ssize_t)1) << scale;
+ }
GenTreePtr index = mul->gtOp.gtOp1;
@@ -7012,10 +7310,12 @@ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTreePtr mul,
index = index->gtOp.gtOp1;
}
- assert (!bRngChk || index->gtOper != GT_COMMA);
+ assert(!bRngChk || index->gtOper != GT_COMMA);
if (pIndex)
+ {
*pIndex = index;
+ }
return scale;
}
@@ -7028,7 +7328,10 @@ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTreePtr mul,
*
*/
-GenTreePtr Compiler::optFindLocalInit(BasicBlock *block, GenTreePtr local, VARSET_TP* pKilledInOut, bool* pLhsRhsKilledAfterInit)
+GenTreePtr Compiler::optFindLocalInit(BasicBlock* block,
+ GenTreePtr local,
+ VARSET_TP* pKilledInOut,
+ bool* pLhsRhsKilledAfterInit)
{
assert(pKilledInOut);
assert(pLhsRhsKilledAfterInit);
@@ -7038,17 +7341,17 @@ GenTreePtr Compiler::optFindLocalInit(BasicBlock *block, GenTreePtr local, VARSE
unsigned LclNum = local->gtLclVarCommon.gtLclNum;
GenTreePtr list = block->bbTreeList;
- if (list == NULL)
+ if (list == nullptr)
{
- return NULL;
+ return nullptr;
}
- GenTreePtr rhs = NULL;
+ GenTreePtr rhs = nullptr;
GenTreePtr stmt = list;
do
{
stmt = stmt->gtPrev;
- if (stmt == NULL)
+ if (stmt == nullptr)
{
break;
}
@@ -7070,39 +7373,36 @@ GenTreePtr Compiler::optFindLocalInit(BasicBlock *block, GenTreePtr local, VARSE
else
{
*pLhsRhsKilledAfterInit = true;
- assert(rhs == NULL);
+ assert(rhs == nullptr);
}
break;
}
else
{
LclVarDsc* varDsc = optIsTrackedLocal(tree->gtOp.gtOp1);
- if (varDsc == NULL)
+ if (varDsc == nullptr)
{
- return NULL;
+ return nullptr;
}
VarSetOps::AddElemD(this, *pKilledInOut, varDsc->lvVarIndex);
}
}
- }
- while (stmt != list);
+ } while (stmt != list);
- if (rhs == NULL)
+ if (rhs == nullptr)
{
- return NULL;
+ return nullptr;
}
// If any local in the RHS is killed in intervening code, or RHS has an indirection, return NULL.
varRefKinds rhsRefs = VR_NONE;
- VARSET_TP VARSET_INIT_NOCOPY(rhsLocals, VarSetOps::UninitVal());
- bool b = lvaLclVarRefs(rhs, NULL, &rhsRefs, &rhsLocals);
- if (!b ||
- !VarSetOps::IsEmptyIntersection(this, rhsLocals, *pKilledInOut) ||
- (rhsRefs != VR_NONE))
+ VARSET_TP VARSET_INIT_NOCOPY(rhsLocals, VarSetOps::UninitVal());
+ bool b = lvaLclVarRefs(rhs, nullptr, &rhsRefs, &rhsLocals);
+ if (!b || !VarSetOps::IsEmptyIntersection(this, rhsLocals, *pKilledInOut) || (rhsRefs != VR_NONE))
{
// If RHS has been indirectly referenced, consider it a write and a kill.
*pLhsRhsKilledAfterInit = true;
- return NULL;
+ return nullptr;
}
return rhs;
@@ -7115,11 +7415,9 @@ GenTreePtr Compiler::optFindLocalInit(BasicBlock *block, GenTreePtr local, VARSE
#if FANCY_ARRAY_OPT
-bool Compiler::optIsNoMore(GenTreePtr op1, GenTreePtr op2,
- int add1, int add2)
+bool Compiler::optIsNoMore(GenTreePtr op1, GenTreePtr op2, int add1, int add2)
{
- if (op1->gtOper == GT_CNS_INT &&
- op2->gtOper == GT_CNS_INT)
+ if (op1->gtOper == GT_CNS_INT && op2->gtOper == GT_CNS_INT)
{
add1 += op1->gtIntCon.gtIconVal;
add2 += op2->gtIntCon.gtIconVal;
@@ -7128,25 +7426,25 @@ bool Compiler::optIsNoMore(GenTreePtr op1, GenTreePtr op2,
{
/* Check for +/- constant on either operand */
- if (op1->gtOper == GT_ADD && op1->gtOp.gtOp2->gtOper == GT_CNS_INT)
+ if (op1->gtOper == GT_ADD && op1->gtOp.gtOp2->gtOper == GT_CNS_INT)
{
add1 += op1->gtOp.gtOp2->gtIntCon.gtIconVal;
- op1 = op1->gtOp.gtOp1;
+ op1 = op1->gtOp.gtOp1;
}
- if (op2->gtOper == GT_ADD && op2->gtOp.gtOp2->gtOper == GT_CNS_INT)
+ if (op2->gtOper == GT_ADD && op2->gtOp.gtOp2->gtOper == GT_CNS_INT)
{
add2 += op2->gtOp.gtOp2->gtIntCon.gtIconVal;
- op2 = op2->gtOp.gtOp1;
+ op2 = op2->gtOp.gtOp1;
}
/* We only allow local variable references */
- if (op1->gtOper != GT_LCL_VAR)
+ if (op1->gtOper != GT_LCL_VAR)
return false;
- if (op2->gtOper != GT_LCL_VAR)
+ if (op2->gtOper != GT_LCL_VAR)
return false;
- if (op1->gtLclVarCommon.gtLclNum != op2->gtLclVarCommon.gtLclNum)
+ if (op1->gtLclVarCommon.gtLclNum != op2->gtLclVarCommon.gtLclNum)
return false;
/* NOTE: Caller ensures that this variable has only one def */
@@ -7154,10 +7452,9 @@ bool Compiler::optIsNoMore(GenTreePtr op1, GenTreePtr op2,
// printf("limit [%d]:\n", add1); gtDispTree(op1);
// printf("size [%d]:\n", add2); gtDispTree(op2);
// printf("\n");
-
}
- return (bool)(add1 <= add2);
+ return (bool)(add1 <= add2);
}
#endif
@@ -7170,7 +7467,7 @@ bool Compiler::optIsNoMore(GenTreePtr op1, GenTreePtr op2,
// context - data structure where all loop cloning info is kept. The
// optInfo fields of the context are updated with the
// identified optimization candidates.
-//
+//
void Compiler::optObtainLoopCloningOpts(LoopCloneContext* context)
{
for (unsigned i = 0; i < optLoopCount; i++)
@@ -7207,7 +7504,7 @@ void Compiler::optObtainLoopCloningOpts(LoopCloneContext* context)
// optimization candidates and update the "context" parameter with all the
// contextual information necessary to perform the optimization later.
//
-bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context)
+bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context)
{
noway_assert(loopNum < optLoopCount);
@@ -7227,8 +7524,8 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
}
BasicBlock* head = pLoop->lpHead;
- BasicBlock* end = pLoop->lpBottom;
- BasicBlock* beg = head->bbNext;
+ BasicBlock* end = pLoop->lpBottom;
+ BasicBlock* beg = head->bbNext;
if (end->bbJumpKind != BBJ_COND)
{
@@ -7236,7 +7533,7 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
return false;
}
- if (end->bbJumpDest != beg)
+ if (end->bbJumpDest != beg)
{
JITDUMP("> Branch at loop 'end' not looping to 'begin'.\n");
return false;
@@ -7249,16 +7546,17 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
return false;
}
- if ((pLoop->lpFlags & LPFLG_CONST_LIMIT) == 0 &&
- (pLoop->lpFlags & LPFLG_VAR_LIMIT) == 0 &&
+ if ((pLoop->lpFlags & LPFLG_CONST_LIMIT) == 0 && (pLoop->lpFlags & LPFLG_VAR_LIMIT) == 0 &&
(pLoop->lpFlags & LPFLG_ARRLEN_LIMIT) == 0)
{
JITDUMP("> Loop limit is neither constant, variable or array length\n");
return false;
}
- if (!(((pLoop->lpTestOper() == GT_LT || pLoop->lpTestOper() == GT_LE) && (pLoop->lpIterOper() == GT_ADD || pLoop->lpIterOper() == GT_ASG_ADD)) ||
- ((pLoop->lpTestOper() == GT_GT || pLoop->lpTestOper() == GT_GE) && (pLoop->lpIterOper() == GT_SUB || pLoop->lpIterOper() == GT_ASG_SUB))))
+ if (!(((pLoop->lpTestOper() == GT_LT || pLoop->lpTestOper() == GT_LE) &&
+ (pLoop->lpIterOper() == GT_ADD || pLoop->lpIterOper() == GT_ASG_ADD)) ||
+ ((pLoop->lpTestOper() == GT_GT || pLoop->lpTestOper() == GT_GE) &&
+ (pLoop->lpIterOper() == GT_SUB || pLoop->lpIterOper() == GT_ASG_SUB))))
{
JITDUMP("> Loop test (%s) doesn't agree with the direction (%s) of the pLoop->\n",
GenTree::NodeName(pLoop->lpTestOper()), GenTree::NodeName(pLoop->lpIterOper()));
@@ -7267,7 +7565,8 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
if (!(pLoop->lpTestTree->OperKind() & GTK_RELOP) || !(pLoop->lpTestTree->gtFlags & GTF_RELOP_ZTT))
{
- JITDUMP("> Loop inversion NOT present, loop test [%06u] may not protect entry from head.\n", pLoop->lpTestTree->gtTreeID);
+ JITDUMP("> Loop inversion NOT present, loop test [%06u] may not protect entry from head.\n",
+ pLoop->lpTestTree->gtTreeID);
return false;
}
@@ -7276,7 +7575,8 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
noway_assert((op1->gtOper == GT_LCL_VAR) && (op1->gtLclVarCommon.gtLclNum == ivLclNum));
#endif
- JITDUMP("Checking blocks BB%02d..BB%02d for optimization candidates\n", beg->bbNum, end->bbNext ? end->bbNext->bbNum : 0);
+ JITDUMP("Checking blocks BB%02d..BB%02d for optimization candidates\n", beg->bbNum,
+ end->bbNext ? end->bbNext->bbNum : 0);
LoopCloneVisitorInfo info(context, loopNum, nullptr);
for (BasicBlock* block = beg; block != end->bbNext; block = block->bbNext)
@@ -7315,22 +7615,22 @@ bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneC
//
// TODO-CQ: CLONE: After morph make sure this method extracts values before morph.
//
-// [000000001AF828D8] ---XG------- indir int
+// [000000001AF828D8] ---XG------- indir int
// [000000001AF872C8] ------------ const long 16 Fseq[#FirstElem]
-// [000000001AF87340] ------------ + byref
+// [000000001AF87340] ------------ + byref
// [000000001AF87160] -------N---- const long 2
-// [000000001AF871D8] ------------ << long
+// [000000001AF871D8] ------------ << long
// [000000001AF870C0] ------------ cast long <- int
-// [000000001AF86F30] i----------- lclVar int V04 loc0
-// [000000001AF87250] ------------ + byref
-// [000000001AF86EB8] ------------ lclVar ref V01 arg1
-// [000000001AF87468] ---XG------- comma int
-// [000000001AF87020] ---X-------- arrBndsChk void
-// [000000001AF86FA8] ---X-------- arrLen int
-// [000000001AF827E8] ------------ lclVar ref V01 arg1
-// [000000001AF82860] ------------ lclVar int V04 loc0
-// [000000001AF829F0] -A-XG------- = int
-// [000000001AF82978] D------N---- lclVar int V06 tmp0
+// [000000001AF86F30] i----------- lclVar int V04 loc0
+// [000000001AF87250] ------------ + byref
+// [000000001AF86EB8] ------------ lclVar ref V01 arg1
+// [000000001AF87468] ---XG------- comma int
+// [000000001AF87020] ---X-------- arrBndsChk void
+// [000000001AF86FA8] ---X-------- arrLen int
+// [000000001AF827E8] ------------ lclVar ref V01 arg1
+// [000000001AF82860] ------------ lclVar int V04 loc0
+// [000000001AF829F0] -A-XG------- = int
+// [000000001AF82978] D------N---- lclVar int V06 tmp0
//
bool Compiler::optExtractArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lhsNum)
{
@@ -7381,7 +7681,7 @@ bool Compiler::optExtractArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lh
{
return false;
}
- GenTreePtr si = sib->gtGetOp2();
+ GenTreePtr si = sib->gtGetOp2();
GenTreePtr base = sib->gtGetOp1();
if (si->gtOper != GT_LSH)
{
@@ -7442,18 +7742,18 @@ bool Compiler::optExtractArrIndex(GenTreePtr tree, ArrIndex* result, unsigned lh
//
// V00[V01][V02] would be morphed as:
//
-// [000000001B366848] ---XG------- indir int
+// [000000001B366848] ---XG------- indir int
// [000000001B36BC50] ------------ V05 + (V02 << 2) + 16
-// [000000001B36C200] ---XG------- comma int
+// [000000001B36C200] ---XG------- comma int
// [000000001B36BDB8] ---X-------- arrBndsChk(V05, V02)
-// [000000001B36C278] -A-XG------- comma int
+// [000000001B36C278] -A-XG------- comma int
// [000000001B366730] R--XG------- indir ref
// [000000001B36C2F0] ------------ V00 + (V01 << 3) + 24
-// [000000001B36C818] ---XG------- comma ref
+// [000000001B36C818] ---XG------- comma ref
// [000000001B36C458] ---X-------- arrBndsChk(V00, V01)
-// [000000001B36BB60] -A-XG------- = ref
-// [000000001B36BAE8] D------N---- lclVar ref V05 tmp2
-// [000000001B36A668] -A-XG------- = int
+// [000000001B36BB60] -A-XG------- = ref
+// [000000001B36BAE8] D------N---- lclVar ref V05 tmp2
+// [000000001B36A668] -A-XG------- = int
// [000000001B36A5F0] D------N---- lclVar int V03 tmp0
//
// Assumption:
@@ -7483,8 +7783,8 @@ bool Compiler::optReconstructArrIndex(GenTreePtr tree, ArrIndex* result, unsigne
{
return false;
}
- unsigned lhsNum = lhs->gtLclVarCommon.gtLclNum;
- GenTreePtr after = tree->gtGetOp2();
+ unsigned lhsNum = lhs->gtLclVarCommon.gtLclNum;
+ GenTreePtr after = tree->gtGetOp2();
// Pass the "lhsNum", so we can verify if indeed it is used as the array base.
return optExtractArrIndex(after, result, lhsNum);
}
@@ -7494,10 +7794,9 @@ bool Compiler::optReconstructArrIndex(GenTreePtr tree, ArrIndex* result, unsigne
/* static */
Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloningVisitor(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
- return data->compiler->optCanOptimizeByLoopCloning(*pTree, (LoopCloneVisitorInfo*) data->pCallbackData);
+ return data->compiler->optCanOptimizeByLoopCloning(*pTree, (LoopCloneVisitorInfo*)data->pCallbackData);
}
-
//-------------------------------------------------------------------------
// optIsStackLocalInvariant: Is stack local invariant in loop.
//
@@ -7535,7 +7834,7 @@ bool Compiler::optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum)
// If array index can be reconstructed, check if the iter var of the loop matches the
// array index var in some dim. Also ensure other index vars before the identified
// dim are loop invariant.
-//
+//
// Return Value:
// Skip sub trees if the optimization candidate is identified or else continue walking
//
@@ -7586,11 +7885,13 @@ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTreePtr tree, Lo
}
#endif
// Update the loop context.
- info->context->EnsureLoopOptInfo(info->loopNum)->Push(new (this, CMK_LoopOpt) LcJaggedArrayOptInfo(arrIndex, dim, info->stmt));
+ info->context->EnsureLoopOptInfo(info->loopNum)
+ ->Push(new (this, CMK_LoopOpt) LcJaggedArrayOptInfo(arrIndex, dim, info->stmt));
}
else
{
- JITDUMP("Induction V%02d is not used as index on dim %d\n", optLoopTable[info->loopNum].lpIterVar(), dim);
+ JITDUMP("Induction V%02d is not used as index on dim %d\n", optLoopTable[info->loopNum].lpIterVar(),
+ dim);
}
}
return WALK_SKIP_SUBTREES;
@@ -7612,13 +7913,12 @@ struct optRangeCheckDsc
Walk to make sure that only locals and constants are contained in the index
for a range check
*/
-Compiler::fgWalkResult Compiler::optValidRangeCheckIndex(GenTreePtr * pTree, fgWalkData *data)
+Compiler::fgWalkResult Compiler::optValidRangeCheckIndex(GenTreePtr* pTree, fgWalkData* data)
{
- GenTreePtr tree = *pTree;
- optRangeCheckDsc* pData= (optRangeCheckDsc*) data->pCallbackData;
+ GenTreePtr tree = *pTree;
+ optRangeCheckDsc* pData = (optRangeCheckDsc*)data->pCallbackData;
- if (tree->gtOper == GT_IND || tree->gtOper == GT_CLS_VAR ||
- tree->gtOper == GT_FIELD || tree->gtOper == GT_LCL_FLD)
+ if (tree->gtOper == GT_IND || tree->gtOper == GT_CLS_VAR || tree->gtOper == GT_FIELD || tree->gtOper == GT_LCL_FLD)
{
pData->bValidIndex = false;
return WALK_ABORT;
@@ -7645,19 +7945,19 @@ bool Compiler::optIsRangeCheckRemovable(GenTreePtr tree)
{
noway_assert(tree->gtOper == GT_ARR_BOUNDS_CHECK);
GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
- GenTreePtr pArray = bndsChk->GetArray();
- if (pArray == NULL && !bndsChk->gtArrLen->IsCnsIntOrI())
+ GenTreePtr pArray = bndsChk->GetArray();
+ if (pArray == nullptr && !bndsChk->gtArrLen->IsCnsIntOrI())
{
return false;
}
GenTreePtr pIndex = bndsChk->gtIndex;
-
- // The length must be a constant (the pArray == NULL case) or the array reference must be a local.
+
+ // The length must be a constant (the pArray == NULL case) or the array reference must be a local.
// Otherwise we can be targeted by malicious race-conditions.
- if (pArray != NULL)
+ if (pArray != nullptr)
{
- if ( pArray->gtOper != GT_LCL_VAR )
- {
+ if (pArray->gtOper != GT_LCL_VAR)
+ {
#ifdef DEBUG
if (verbose)
@@ -7670,7 +7970,7 @@ bool Compiler::optIsRangeCheckRemovable(GenTreePtr tree)
}
else
{
- noway_assert(pArray->gtType == TYP_REF);
+ noway_assert(pArray->gtType == TYP_REF);
noway_assert(pArray->gtLclVarCommon.gtLclNum < lvaCount);
if (lvaTable[pArray->gtLclVarCommon.gtLclNum].lvAddrExposed)
@@ -7689,27 +7989,25 @@ bool Compiler::optIsRangeCheckRemovable(GenTreePtr tree)
}
}
}
-
-
+
optRangeCheckDsc Data;
- Data.pCompiler =this;
- Data.bValidIndex=true;
+ Data.pCompiler = this;
+ Data.bValidIndex = true;
fgWalkTreePre(&pIndex, optValidRangeCheckIndex, &Data);
-
+
if (!Data.bValidIndex)
{
- #ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("Can't remove range check with this index");
gtDispTree(pIndex);
}
- #endif
+#endif
return false;
}
-
return true;
}
@@ -7722,35 +8020,41 @@ bool Compiler::optIsRangeCheckRemovable(GenTreePtr tree)
#ifdef DEBUG
-void Compiler::optOptimizeBoolsGcStress(BasicBlock * condBlock)
+void Compiler::optOptimizeBoolsGcStress(BasicBlock* condBlock)
{
if (!compStressCompile(STRESS_OPT_BOOLS_GC, 20))
+ {
return;
-
+ }
+
noway_assert(condBlock->bbJumpKind == BBJ_COND);
- GenTreePtr condStmt = condBlock->bbTreeList->gtPrev->gtStmt.gtStmtExpr;
+ GenTreePtr condStmt = condBlock->bbTreeList->gtPrev->gtStmt.gtStmtExpr;
noway_assert(condStmt->gtOper == GT_JTRUE);
- bool isBool;
- GenTreePtr relop;
+ bool isBool;
+ GenTreePtr relop;
- GenTreePtr comparand = optIsBoolCond(condStmt, &relop, &isBool);
-
- if (comparand == NULL || !varTypeIsGC(comparand->TypeGet()))
+ GenTreePtr comparand = optIsBoolCond(condStmt, &relop, &isBool);
+
+ if (comparand == nullptr || !varTypeIsGC(comparand->TypeGet()))
+ {
return;
+ }
- if (comparand->gtFlags & (GTF_ASG|GTF_CALL|GTF_ORDER_SIDEEFF))
+ if (comparand->gtFlags & (GTF_ASG | GTF_CALL | GTF_ORDER_SIDEEFF))
+ {
return;
+ }
- GenTreePtr comparandClone = gtCloneExpr(comparand);
+ GenTreePtr comparandClone = gtCloneExpr(comparand);
// Bump up the ref-counts of any variables in 'comparandClone'
compCurBB = condBlock;
- fgWalkTreePre(&comparandClone, Compiler::lvaIncRefCntsCB, (void *)this, true);
-
+ fgWalkTreePre(&comparandClone, Compiler::lvaIncRefCntsCB, (void*)this, true);
+
noway_assert(relop->gtOp.gtOp1 == comparand);
- genTreeOps oper = compStressCompile(STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND;
+ genTreeOps oper = compStressCompile(STRESS_OPT_BOOLS_GC, 50) ? GT_OR : GT_AND;
relop->gtOp.gtOp1 = gtNewOperNode(oper, TYP_I_IMPL, comparand, comparandClone);
// Comparand type is already checked, and we have const int, there is no harm
@@ -7773,19 +8077,19 @@ void Compiler::optOptimizeBoolsGcStress(BasicBlock * condBlock)
* value then we morph the tree by reversing the GT_EQ/GT_NE and change the 1 to 0.
*/
-GenTree * Compiler::optIsBoolCond(GenTree * condBranch,
- GenTree * * compPtr,
- bool * boolPtr)
+GenTree* Compiler::optIsBoolCond(GenTree* condBranch, GenTree** compPtr, bool* boolPtr)
{
bool isBool = false;
noway_assert(condBranch->gtOper == GT_JTRUE);
- GenTree * cond = condBranch->gtOp.gtOp1;
+ GenTree* cond = condBranch->gtOp.gtOp1;
/* The condition must be "!= 0" or "== 0" */
if ((cond->gtOper != GT_EQ) && (cond->gtOper != GT_NE))
+ {
return nullptr;
+ }
/* Return the compare node to the caller */
@@ -7793,14 +8097,18 @@ GenTree * Compiler::optIsBoolCond(GenTree * condBranch,
/* Get hold of the comparands */
- GenTree * opr1 = cond->gtOp.gtOp1;
- GenTree * opr2 = cond->gtOp.gtOp2;
+ GenTree* opr1 = cond->gtOp.gtOp1;
+ GenTree* opr2 = cond->gtOp.gtOp2;
- if (opr2->gtOper != GT_CNS_INT)
- return nullptr;
+ if (opr2->gtOper != GT_CNS_INT)
+ {
+ return nullptr;
+ }
if (!opr2->IsIntegralConst(0) && !opr2->IsIntegralConst(1))
+ {
return nullptr;
+ }
ssize_t ival2 = opr2->gtIntCon.gtIconVal;
@@ -7808,12 +8116,11 @@ GenTree * Compiler::optIsBoolCond(GenTree * condBranch,
* We can either have a boolean expression (marked GTF_BOOLEAN) or
* a local variable that is marked as being boolean (lvIsBoolean) */
- if (opr1->gtFlags & GTF_BOOLEAN)
+ if (opr1->gtFlags & GTF_BOOLEAN)
{
isBool = true;
}
- else if ((opr1->gtOper == GT_CNS_INT) &&
- (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1)))
+ else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1)))
{
isBool = true;
}
@@ -7821,17 +8128,19 @@ GenTree * Compiler::optIsBoolCond(GenTree * condBranch,
{
/* is it a boolean local variable */
- unsigned lclNum = opr1->gtLclVarCommon.gtLclNum;
+ unsigned lclNum = opr1->gtLclVarCommon.gtLclNum;
noway_assert(lclNum < lvaCount);
if (lvaTable[lclNum].lvIsBoolean)
+ {
isBool = true;
+ }
}
/* Was our comparison against the constant 1 (i.e. true) */
- if (ival2 == 1)
+ if (ival2 == 1)
{
- // If this is a boolean expression tree we can reverse the relop
+ // If this is a boolean expression tree we can reverse the relop
// and change the true to false.
if (isBool)
{
@@ -7839,18 +8148,19 @@ GenTree * Compiler::optIsBoolCond(GenTree * condBranch,
opr2->gtIntCon.gtIconVal = 0;
}
else
- return NULL;
+ {
+ return nullptr;
+ }
}
*boolPtr = isBool;
return opr1;
}
-
-void Compiler::optOptimizeBools()
+void Compiler::optOptimizeBools()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("*************** In optOptimizeBools()\n");
if (verboseTrees)
@@ -7860,32 +8170,38 @@ void Compiler::optOptimizeBools()
}
}
#endif
- bool change;
+ bool change;
do
{
change = false;
- for (BasicBlock * b1 = fgFirstBB; b1; b1 = b1->bbNext)
+ for (BasicBlock* b1 = fgFirstBB; b1; b1 = b1->bbNext)
{
/* We're only interested in conditional jumps here */
- if (b1->bbJumpKind != BBJ_COND)
+ if (b1->bbJumpKind != BBJ_COND)
+ {
continue;
+ }
/* If there is no next block, we're done */
- BasicBlock * b2 = b1->bbNext;
- if (!b2)
+ BasicBlock* b2 = b1->bbNext;
+ if (!b2)
+ {
break;
+ }
/* The next block must not be marked as BBF_DONT_REMOVE */
- if (b2->bbFlags & BBF_DONT_REMOVE)
+ if (b2->bbFlags & BBF_DONT_REMOVE)
+ {
continue;
+ }
/* The next block also needs to be a condition */
- if (b2->bbJumpKind != BBJ_COND)
+ if (b2->bbJumpKind != BBJ_COND)
{
#ifdef DEBUG
optOptimizeBoolsGcStress(b1);
@@ -7893,9 +8209,9 @@ void Compiler::optOptimizeBools()
continue;
}
- bool sameTarget; // Do b1 and b2 have the same bbJumpDest?
+ bool sameTarget; // Do b1 and b2 have the same bbJumpDest?
- if (b1->bbJumpDest == b2->bbJumpDest)
+ if (b1->bbJumpDest == b2->bbJumpDest)
{
/* Given the following sequence of blocks :
B1: brtrue(t1, BX)
@@ -7908,7 +8224,7 @@ void Compiler::optOptimizeBools()
sameTarget = true;
}
- else if (b1->bbJumpDest == b2->bbNext) /*b1->bbJumpDest->bbNum == n1+2*/
+ else if (b1->bbJumpDest == b2->bbNext) /*b1->bbJumpDest->bbNum == n1+2*/
{
/* Given the following sequence of blocks :
B1: brtrue(t1, B3)
@@ -7929,48 +8245,64 @@ void Compiler::optOptimizeBools()
/* The second block must contain a single statement */
GenTreePtr s2 = b2->bbTreeList;
- if (s2->gtPrev != s2)
+ if (s2->gtPrev != s2)
+ {
continue;
+ }
noway_assert(s2->gtOper == GT_STMT);
- GenTreePtr t2 = s2->gtStmt.gtStmtExpr;
+ GenTreePtr t2 = s2->gtStmt.gtStmtExpr;
noway_assert(t2->gtOper == GT_JTRUE);
/* Find the condition for the first block */
- GenTreePtr s1 = b1->bbTreeList->gtPrev;
+ GenTreePtr s1 = b1->bbTreeList->gtPrev;
noway_assert(s1->gtOper == GT_STMT);
- GenTreePtr t1 = s1->gtStmt.gtStmtExpr;
+ GenTreePtr t1 = s1->gtStmt.gtStmtExpr;
noway_assert(t1->gtOper == GT_JTRUE);
- if (b2->countOfInEdges() > 1)
+ if (b2->countOfInEdges() > 1)
+ {
continue;
+ }
/* Find the branch conditions of b1 and b2 */
- bool bool1, bool2;
+ bool bool1, bool2;
- GenTreePtr c1 = optIsBoolCond(t1, &t1, &bool1);
- if (!c1) continue;
+ GenTreePtr c1 = optIsBoolCond(t1, &t1, &bool1);
+ if (!c1)
+ {
+ continue;
+ }
- GenTreePtr c2 = optIsBoolCond(t2, &t2, &bool2);
- if (!c2) continue;
+ GenTreePtr c2 = optIsBoolCond(t2, &t2, &bool2);
+ if (!c2)
+ {
+ continue;
+ }
noway_assert(t1->gtOper == GT_EQ || t1->gtOper == GT_NE && t1->gtOp.gtOp1 == c1);
noway_assert(t2->gtOper == GT_EQ || t2->gtOper == GT_NE && t2->gtOp.gtOp1 == c2);
- // Leave out floats where the bit-representation is more complicated
+ // Leave out floats where the bit-representation is more complicated
// - there are two representations for 0.
- //
+ //
if (varTypeIsFloating(c1->TypeGet()) || varTypeIsFloating(c2->TypeGet()))
+ {
continue;
+ }
// Make sure the types involved are of the same sizes
if (genTypeSize(c1->TypeGet()) != genTypeSize(c2->TypeGet()))
+ {
continue;
+ }
if (genTypeSize(t1->TypeGet()) != genTypeSize(t2->TypeGet()))
+ {
continue;
+ }
#ifdef _TARGET_ARMARCH_
// Skip the small operand which we cannot encode.
if (varTypeIsSmall(c1->TypeGet()))
@@ -7978,19 +8310,23 @@ void Compiler::optOptimizeBools()
#endif
/* The second condition must not contain side effects */
- if (c2->gtFlags & GTF_GLOB_EFFECT)
+ if (c2->gtFlags & GTF_GLOB_EFFECT)
+ {
continue;
+ }
/* The second condition must not be too expensive */
gtPrepareCost(c2);
if (c2->gtCostEx > 12)
+ {
continue;
+ }
- genTreeOps foldOp;
- genTreeOps cmpOp;
- var_types foldType = c1->TypeGet();
+ genTreeOps foldOp;
+ genTreeOps cmpOp;
+ var_types foldType = c1->TypeGet();
if (varTypeIsGC(foldType))
{
foldType = TYP_I_IMPL;
@@ -8001,7 +8337,9 @@ void Compiler::optOptimizeBools()
/* Both conditions must be the same */
if (t1->gtOper != t2->gtOper)
+ {
continue;
+ }
if (t1->gtOper == GT_EQ)
{
@@ -8025,7 +8363,9 @@ void Compiler::optOptimizeBools()
/* The b1 condition must be the reverse of the b2 condition */
if (t1->gtOper == t2->gtOper)
+ {
continue;
+ }
if (t1->gtOper == GT_EQ)
{
@@ -8048,7 +8388,9 @@ void Compiler::optOptimizeBools()
// Anding requires both values to be 0 or 1
if ((foldOp == GT_AND) && (!bool1 || !bool2))
+ {
continue;
+ }
//
// Now update the trees
@@ -8061,7 +8403,7 @@ void Compiler::optOptimizeBools()
}
t1->SetOper(cmpOp);
- t1->gtOp.gtOp1 = cmpOp1;
+ t1->gtOp.gtOp1 = cmpOp1;
t1->gtOp.gtOp2->gtType = foldType; // Could have been varTypeIsGC()
#if FEATURE_SET_FLAGS
@@ -8079,17 +8421,17 @@ void Compiler::optOptimizeBools()
// The new top level node that we just created does feed directly into
// a comparison against zero, so set the GTF_SET_FLAGS bit so that
- // we generate an instuction that sets the flags, which allows us
+ // we generate an instuction that sets the flags, which allows us
// to omit the cmp with zero instruction.
// Request that the codegen for cmpOp1 sets the condition flags
// when it generates the code for cmpOp1.
//
cmpOp1->gtRequestSetFlags();
-#endif
+#endif
- flowList * edge1 = fgGetPredForBlock(b1->bbJumpDest, b1);
- flowList * edge2;
+ flowList* edge1 = fgGetPredForBlock(b1->bbJumpDest, b1);
+ flowList* edge2;
/* Modify the target of the conditional jump and update bbRefs and bbPreds */
@@ -8108,8 +8450,8 @@ void Compiler::optOptimizeBools()
fgAddRefPred(b2->bbJumpDest, b1);
}
- noway_assert(edge1 != NULL);
- noway_assert(edge2 != NULL);
+ noway_assert(edge1 != nullptr);
+ noway_assert(edge2 != nullptr);
BasicBlock::weight_t edgeSumMin = edge1->flEdgeWeightMin + edge2->flEdgeWeightMin;
BasicBlock::weight_t edgeSumMax = edge1->flEdgeWeightMax + edge2->flEdgeWeightMax;
@@ -8129,7 +8471,8 @@ void Compiler::optOptimizeBools()
noway_assert(b1->bbJumpKind == BBJ_COND);
noway_assert(b2->bbJumpKind == BBJ_COND);
noway_assert(b1->bbJumpDest == b2->bbJumpDest);
- noway_assert(b1->bbNext == b2); noway_assert(b2->bbNext);
+ noway_assert(b1->bbNext == b2);
+ noway_assert(b2->bbNext);
fgUnlinkBlock(b2);
b2->bbFlags |= BBF_REMOVED;
@@ -8150,30 +8493,29 @@ void Compiler::optOptimizeBools()
/* Update the block numbers and try again */
change = true;
-/*
- do
- {
- b2->bbNum = ++n1;
- b2 = b2->bbNext;
- }
- while (b2);
-*/
+ /*
+ do
+ {
+ b2->bbNum = ++n1;
+ b2 = b2->bbNext;
+ }
+ while (b2);
+ */
// Update loop table
fgUpdateLoopsAfterCompacting(b1, b2);
-
+
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
- printf("Folded %sboolean conditions of BB%02u and BB%02u to :\n",
- c2->OperIsLeaf() ? "" : "non-leaf ",
+ printf("Folded %sboolean conditions of BB%02u and BB%02u to :\n", c2->OperIsLeaf() ? "" : "non-leaf ",
b1->bbNum, b2->bbNum);
- gtDispTree(s1); printf("\n");
+ gtDispTree(s1);
+ printf("\n");
}
#endif
}
- }
- while (change);
+ } while (change);
#ifdef DEBUG
fgDebugCheckBBlist();
diff --git a/src/jit/phase.h b/src/jit/phase.h
index 791658b8a5..d8e2940089 100644
--- a/src/jit/phase.h
+++ b/src/jit/phase.h
@@ -12,18 +12,17 @@ public:
virtual void Run();
protected:
- Phase(Compiler *_comp,
- const char *_name,
- Phases _phase=PHASE_NUMBER_OF)
- : comp(_comp), name(_name), phase(_phase) {}
+ Phase(Compiler* _comp, const char* _name, Phases _phase = PHASE_NUMBER_OF) : comp(_comp), name(_name), phase(_phase)
+ {
+ }
virtual void PrePhase();
virtual void DoPhase() = 0;
virtual void PostPhase();
- Compiler *comp;
- const char *name;
- Phases phase;
+ Compiler* comp;
+ const char* name;
+ Phases phase;
};
inline void Phase::Run()
@@ -73,7 +72,6 @@ inline void Phase::PostPhase()
comp->fgDebugCheckBBlist();
comp->fgDebugCheckLinks();
#endif // DEBUG
-
}
#endif /* End of _PHASE_H_ */
diff --git a/src/jit/rangecheck.cpp b/src/jit/rangecheck.cpp
index 5c32f85c29..ae0c792f11 100644
--- a/src/jit/rangecheck.cpp
+++ b/src/jit/rangecheck.cpp
@@ -81,11 +81,11 @@ bool RangeCheck::BetweenBounds(Range& range, int lower, GenTreePtr upper)
#endif
ValueNum arrRefVN = ValueNumStore::NoVN;
- int arrSize = 0;
+ int arrSize = 0;
if (m_pCompiler->vnStore->IsVNConstant(uLimitVN))
{
- ssize_t constVal = -1;
+ ssize_t constVal = -1;
unsigned iconFlags = 0;
if (m_pCompiler->optIsTreeKnownIntValue(true, upper, &constVal, &iconFlags))
@@ -102,7 +102,7 @@ bool RangeCheck::BetweenBounds(Range& range, int lower, GenTreePtr upper)
}
else
{
- // If the upper limit is not length, then bail.
+ // If the upper limit is not length, then bail.
return false;
}
@@ -126,7 +126,7 @@ bool RangeCheck::BetweenBounds(Range& range, int lower, GenTreePtr upper)
}
int ucns = range.UpperLimit().GetConstant();
-
+
// Upper limit: a.Len + [0..n]
if (ucns >= 0)
{
@@ -138,13 +138,13 @@ bool RangeCheck::BetweenBounds(Range& range, int lower, GenTreePtr upper)
{
return false;
}
-
+
// Since upper limit is bounded by the array, return true if lower bound is good.
if (range.LowerLimit().IsConstant() && range.LowerLimit().GetConstant() >= 0)
{
return true;
}
-
+
// Check if we have the array size allocated by new.
if (arrSize <= 0)
{
@@ -161,7 +161,7 @@ bool RangeCheck::BetweenBounds(Range& range, int lower, GenTreePtr upper)
{
return false;
}
- return (range.LowerLimit().vn == arrRefVN && lcns <= ucns);
+ return (range.LowerLimit().vn == arrRefVN && lcns <= ucns);
}
}
// If upper limit is constant
@@ -214,17 +214,17 @@ void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreePtr stmt, GenTreeP
}
GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
- m_pCurBndsChk = bndsChk;
- GenTreePtr treeIndex = bndsChk->gtIndex;
+ m_pCurBndsChk = bndsChk;
+ GenTreePtr treeIndex = bndsChk->gtIndex;
// Take care of constant index first, like a[2], for example.
- ValueNum idxVn = treeIndex->gtVNPair.GetConservative();
+ ValueNum idxVn = treeIndex->gtVNPair.GetConservative();
ValueNum arrLenVn = bndsChk->gtArrLen->gtVNPair.GetConservative();
- int arrSize = 0;
+ int arrSize = 0;
if (m_pCompiler->vnStore->IsVNConstant(arrLenVn))
{
- ssize_t constVal = -1;
+ ssize_t constVal = -1;
unsigned iconFlags = 0;
if (m_pCompiler->optIsTreeKnownIntValue(true, bndsChk->gtArrLen, &constVal, &iconFlags))
@@ -240,14 +240,15 @@ void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreePtr stmt, GenTreeP
JITDUMP("ArrSize for lengthVN:%03X = %d\n", arrLenVn, arrSize);
if (m_pCompiler->vnStore->IsVNConstant(idxVn) && arrSize > 0)
{
- ssize_t idxVal = -1;
+ ssize_t idxVal = -1;
unsigned iconFlags = 0;
if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags))
{
return;
}
- JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn VN%X sz:%d>.\n", idxVal, arrLenVn, arrSize);
+ JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn VN%X sz:%d>.\n", idxVal, arrLenVn,
+ arrSize);
if (arrSize > 0 && idxVal < arrSize && idxVal >= 0)
{
JITDUMP("Removing range check\n");
@@ -340,17 +341,15 @@ bool RangeCheck::IsBinOpMonotonicallyIncreasing(GenTreePtr op1, GenTreePtr op2,
}
switch (op2->OperGet())
{
- case GT_LCL_VAR:
- return IsMonotonicallyIncreasing(op1, path) &&
- IsMonotonicallyIncreasing(op2, path);
+ case GT_LCL_VAR:
+ return IsMonotonicallyIncreasing(op1, path) && IsMonotonicallyIncreasing(op2, path);
- case GT_CNS_INT:
- return oper == GT_ADD && op2->AsIntConCommon()->IconValue() >= 0 &&
- IsMonotonicallyIncreasing(op1, path);
+ case GT_CNS_INT:
+ return oper == GT_ADD && op2->AsIntConCommon()->IconValue() >= 0 && IsMonotonicallyIncreasing(op1, path);
- default:
- JITDUMP("Not monotonic because expression is not recognized.\n");
- return false;
+ default:
+ JITDUMP("Not monotonic because expression is not recognized.\n");
+ return false;
}
}
@@ -363,10 +362,10 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTreePtr expr, SearchPath* path)
}
// Add hashtable entry for expr.
- path->Set(expr, NULL);
+ path->Set(expr, nullptr);
// Remove hashtable entry for expr when we exit the present scope.
- auto code = [&] { path->Remove(expr); };
+ auto code = [&] { path->Remove(expr); };
jitstd::utility::scoped_code<decltype(code)> finally(code);
// If the rhs expr is constant, then it is not part of the dependency
@@ -392,15 +391,15 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTreePtr expr, SearchPath* path)
assert(asg->OperKind() & GTK_ASGOP);
switch (asg->OperGet())
{
- case GT_ASG:
- return IsMonotonicallyIncreasing(asg->gtGetOp2(), path);
+ case GT_ASG:
+ return IsMonotonicallyIncreasing(asg->gtGetOp2(), path);
- case GT_ASG_ADD:
- return IsBinOpMonotonicallyIncreasing(asg->gtGetOp1(), asg->gtGetOp2(), GT_ADD, path);
+ case GT_ASG_ADD:
+ return IsBinOpMonotonicallyIncreasing(asg->gtGetOp1(), asg->gtGetOp2(), GT_ADD, path);
- default:
- // All other 'asg->OperGet()' kinds, return false
- break;
+ default:
+ // All other 'asg->OperGet()' kinds, return false
+ break;
}
JITDUMP("Unknown local definition type\n");
return false;
@@ -411,8 +410,7 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTreePtr expr, SearchPath* path)
}
else if (expr->OperGet() == GT_PHI)
{
- for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList();
- args != nullptr; args = args->Rest())
+ for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
// If the arg is already in the path, skip.
if (path->Lookup(args->Current()))
@@ -431,7 +429,6 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTreePtr expr, SearchPath* path)
return false;
}
-
UINT64 RangeCheck::HashCode(unsigned lclNum, unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
@@ -478,14 +475,14 @@ void RangeCheck::SetDef(UINT64 hash, Location* loc)
Location* loc2;
if (m_pDefTable->Lookup(hash, &loc2))
{
- JITDUMP("Already have BB%02d, %08X, %08X for hash => %0I64X", loc2->block->bbNum, dspPtr(loc2->stmt), dspPtr(loc2->tree), hash);
+ JITDUMP("Already have BB%02d, %08X, %08X for hash => %0I64X", loc2->block->bbNum, dspPtr(loc2->stmt),
+ dspPtr(loc2->tree), hash);
assert(false);
}
#endif
m_pDefTable->Set(hash, loc);
}
-
// Merge assertions on the edge flowing into the block about a variable.
void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP assertions, Range* pRange)
{
@@ -494,14 +491,14 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
return;
}
- GenTreeLclVarCommon* lcl = (GenTreeLclVarCommon*) tree;
+ GenTreeLclVarCommon* lcl = (GenTreeLclVarCommon*)tree;
if (lcl->gtSsaNum == SsaConfig::RESERVED_SSA_NUM)
{
return;
}
// Walk through the "assertions" to check if the apply.
BitVecOps::Iter iter(m_pCompiler->apTraits, assertions);
- unsigned index = 0;
+ unsigned index = 0;
while (iter.NextElem(m_pCompiler->apTraits, &index))
{
index++;
@@ -509,9 +506,7 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
Compiler::AssertionDsc* curAssertion = m_pCompiler->optGetAssertion((Compiler::AssertionIndex)index);
// Current assertion is about array length.
- if (!curAssertion->IsArrLenArithBound() &&
- !curAssertion->IsArrLenBound() &&
- !curAssertion->IsConstantBound())
+ if (!curAssertion->IsArrLenArithBound() && !curAssertion->IsArrLenBound() && !curAssertion->IsConstantBound())
{
continue;
}
@@ -526,8 +521,8 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
assert(m_pCompiler->vnStore->IsVNArrLenArithBound(curAssertion->op1.vn) ||
m_pCompiler->vnStore->IsVNArrLenBound(curAssertion->op1.vn) ||
m_pCompiler->vnStore->IsVNConstantBound(curAssertion->op1.vn));
-
- Limit limit(Limit::keUndef);
+
+ Limit limit(Limit::keUndef);
genTreeOps cmpOper = GT_NONE;
// Current assertion is of the form (i < a.len - cns) != 0
@@ -537,25 +532,26 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
// Get i, a.len, cns and < as "info."
m_pCompiler->vnStore->GetArrLenArithBoundInfo(curAssertion->op1.vn, &info);
-
- if (m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative()
- != info.cmpOp)
+
+ if (m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative() !=
+ info.cmpOp)
{
continue;
}
switch (info.arrOper)
{
- case GT_SUB:
- case GT_ADD:
+ case GT_SUB:
+ case GT_ADD:
{
// If the operand that operates on the array is not constant, then done.
- if (!m_pCompiler->vnStore->IsVNConstant(info.arrOp) || m_pCompiler->vnStore->TypeOfVN(info.arrOp) != TYP_INT)
+ if (!m_pCompiler->vnStore->IsVNConstant(info.arrOp) ||
+ m_pCompiler->vnStore->TypeOfVN(info.arrOp) != TYP_INT)
{
break;
}
int cons = m_pCompiler->vnStore->ConstantValue<int>(info.arrOp);
- limit = Limit(Limit::keBinOpArray, info.vnArray, info.arrOper == GT_SUB ? -cons : cons);
+ limit = Limit(Limit::keBinOpArray, info.vnArray, info.arrOper == GT_SUB ? -cons : cons);
}
}
@@ -565,29 +561,31 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
else if (curAssertion->IsArrLenBound())
{
ValueNumStore::ArrLenArithBoundInfo info;
-
+
// Get the info as "i", "<" and "a.len"
m_pCompiler->vnStore->GetArrLenBoundInfo(curAssertion->op1.vn, &info);
- ValueNum lclVn = m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative();
+ ValueNum lclVn =
+ m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative();
// If we don't have the same variable we are comparing against, bail.
if (lclVn != info.cmpOp)
{
continue;
}
limit.type = Limit::keArray;
- limit.vn = info.vnArray;
- cmpOper = (genTreeOps)info.cmpOper;
+ limit.vn = info.vnArray;
+ cmpOper = (genTreeOps)info.cmpOper;
}
// Current assertion is of the form (i < 100) != 0
else if (curAssertion->IsConstantBound())
{
- ValueNumStore::ConstantBoundInfo info;
+ ValueNumStore::ConstantBoundInfo info;
// Get the info as "i", "<" and "100"
m_pCompiler->vnStore->GetConstantBoundInfo(curAssertion->op1.vn, &info);
- ValueNum lclVn = m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative();
+ ValueNum lclVn =
+ m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.GetConservative();
// If we don't have the same variable we are comparing against, bail.
if (lclVn != info.cmpOpVN)
@@ -595,7 +593,7 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
continue;
}
- limit = Limit(Limit::keConstant, ValueNumStore::NoVN, info.constVal);
+ limit = Limit(Limit::keConstant, ValueNumStore::NoVN, info.constVal);
cmpOper = (genTreeOps)info.cmpOper;
}
else
@@ -614,7 +612,10 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
continue;
}
#ifdef DEBUG
- if (m_pCompiler->verbose) m_pCompiler->optPrintAssertion(curAssertion, (Compiler::AssertionIndex)index);
+ if (m_pCompiler->verbose)
+ {
+ m_pCompiler->optPrintAssertion(curAssertion, (Compiler::AssertionIndex)index);
+ }
#endif
noway_assert(limit.IsBinOpArray() || limit.IsArray() || limit.IsConstant());
@@ -629,7 +630,7 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
}
// During assertion prop we add assertions of the form:
- //
+ //
// (i < a.Length) == 0
// (i < a.Length) != 0
// (i < 100) == 0
@@ -686,7 +687,7 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
int curCns = (pRange->uLimit.IsBinOpArray()) ? pRange->uLimit.cns : 0;
int limCns = (limit.IsBinOpArray()) ? limit.cns : 0;
-
+
// Incoming limit doesn't tighten the existing upper limit.
if (limCns >= curCns)
{
@@ -706,25 +707,25 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
// cmpOp (loop index i) cmpOper a.len +/- cns
switch (cmpOper)
{
- case GT_LT:
- pRange->uLimit = limit;
- break;
+ case GT_LT:
+ pRange->uLimit = limit;
+ break;
- case GT_GT:
- pRange->lLimit = limit;
- break;
+ case GT_GT:
+ pRange->lLimit = limit;
+ break;
- case GT_GE:
- pRange->lLimit = limit;
- break;
+ case GT_GE:
+ pRange->lLimit = limit;
+ break;
- case GT_LE:
- pRange->uLimit = limit;
- break;
+ case GT_LE:
+ pRange->uLimit = limit;
+ break;
- default:
- // All other 'cmpOper' kinds leave lLimit/uLimit unchanged
- break;
+ default:
+ // All other 'cmpOper' kinds leave lLimit/uLimit unchanged
+ break;
}
JITDUMP("The range after edge merging:");
JITDUMP(pRange->ToString(m_pCompiler->getAllocatorDebugOnly()));
@@ -734,27 +735,31 @@ void RangeCheck::MergeEdgeAssertions(GenTreePtr tree, const ASSERT_VALARG_TP ass
// Merge assertions from the pred edges of the block, i.e., check for any assertions about "op's" value numbers for phi
// arguments. If not a phi argument, check if we assertions about local variables.
-void RangeCheck::MergeAssertion(BasicBlock* block, GenTreePtr stmt, GenTreePtr op, SearchPath* path, Range* pRange DEBUGARG(int indent))
+void RangeCheck::MergeAssertion(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr op, SearchPath* path, Range* pRange DEBUGARG(int indent))
{
- JITDUMP("Merging assertions from pred edges of BB%02d for op(%p) $%03x\n", block->bbNum, dspPtr(op), op->gtVNPair.GetConservative());
+ JITDUMP("Merging assertions from pred edges of BB%02d for op(%p) $%03x\n", block->bbNum, dspPtr(op),
+ op->gtVNPair.GetConservative());
ASSERT_TP assertions = BitVecOps::UninitVal();
// If we have a phi arg, we can get to the block from it and use its assertion out.
if (op->gtOper == GT_PHI_ARG)
{
- GenTreePhiArg* arg = (GenTreePhiArg*) op;
- BasicBlock* pred = arg->gtPredBB;
+ GenTreePhiArg* arg = (GenTreePhiArg*)op;
+ BasicBlock* pred = arg->gtPredBB;
if (pred->bbFallsThrough() && pred->bbNext == block)
{
assertions = pred->bbAssertionOut;
- JITDUMP("Merge assertions from pred BB%02d edge: %s\n", pred->bbNum, BitVecOps::ToString(m_pCompiler->apTraits, assertions));
+ JITDUMP("Merge assertions from pred BB%02d edge: %s\n", pred->bbNum,
+ BitVecOps::ToString(m_pCompiler->apTraits, assertions));
}
else if ((pred->bbJumpKind == BBJ_COND || pred->bbJumpKind == BBJ_ALWAYS) && pred->bbJumpDest == block)
{
- if (m_pCompiler->bbJtrueAssertionOut != NULL)
+ if (m_pCompiler->bbJtrueAssertionOut != nullptr)
{
assertions = m_pCompiler->bbJtrueAssertionOut[pred->bbNum];
- JITDUMP("Merge assertions from pred BB%02d JTrue edge: %s\n", pred->bbNum, BitVecOps::ToString(m_pCompiler->apTraits, assertions));
+ JITDUMP("Merge assertions from pred BB%02d JTrue edge: %s\n", pred->bbNum,
+ BitVecOps::ToString(m_pCompiler->apTraits, assertions));
}
}
}
@@ -771,14 +776,18 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTreePtr stmt, GenTreePtr o
}
}
-
// Compute the range for a binary operation.
-Range RangeCheck::ComputeRangeForBinOp(BasicBlock* block, GenTreePtr stmt,
- GenTreePtr op1, GenTreePtr op2, genTreeOps oper, SearchPath* path, bool monotonic DEBUGARG(int indent))
+Range RangeCheck::ComputeRangeForBinOp(BasicBlock* block,
+ GenTreePtr stmt,
+ GenTreePtr op1,
+ GenTreePtr op2,
+ genTreeOps oper,
+ SearchPath* path,
+ bool monotonic DEBUGARG(int indent))
{
- Range* op1RangeCached = NULL;
- Range op1Range = Limit(Limit::keUndef);
- bool inPath1 = path->Lookup(op1);
+ Range* op1RangeCached = nullptr;
+ Range op1Range = Limit(Limit::keUndef);
+ bool inPath1 = path->Lookup(op1);
// Check if the range value is already cached.
if (!GetRangeMap()->Lookup(op1, &op1RangeCached))
{
@@ -800,8 +809,8 @@ Range RangeCheck::ComputeRangeForBinOp(BasicBlock* block, GenTreePtr stmt,
}
Range* op2RangeCached;
- Range op2Range = Limit(Limit::keUndef);
- bool inPath2 = path->Lookup(op2);
+ Range op2Range = Limit(Limit::keUndef);
+ bool inPath2 = path->Lookup(op2);
// Check if the range value is already cached.
if (!GetRangeMap()->Lookup(op2, &op2RangeCached))
{
@@ -824,15 +833,14 @@ Range RangeCheck::ComputeRangeForBinOp(BasicBlock* block, GenTreePtr stmt,
assert(oper == GT_ADD); // For now just GT_ADD.
Range r = RangeOps::Add(op1Range, op2Range);
- JITDUMP("BinOp add ranges %s %s = %s\n",
- op1Range.ToString(m_pCompiler->getAllocatorDebugOnly()),
- op2Range.ToString(m_pCompiler->getAllocatorDebugOnly()),
- r.ToString(m_pCompiler->getAllocatorDebugOnly()));
+ JITDUMP("BinOp add ranges %s %s = %s\n", op1Range.ToString(m_pCompiler->getAllocatorDebugOnly()),
+ op2Range.ToString(m_pCompiler->getAllocatorDebugOnly()), r.ToString(m_pCompiler->getAllocatorDebugOnly()));
return r;
}
// Compute the range for a local var definition.
-Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
+Range RangeCheck::ComputeRangeForLocalDef(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
{
// Get the program location of the def.
Location* loc = GetDef(expr);
@@ -854,26 +862,27 @@ Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, GenTreePtr stmt, Ge
assert(asg->OperKind() & GTK_ASGOP);
switch (asg->OperGet())
{
- // If the operator of the definition is assignment, then compute the range of the rhs.
- case GT_ASG:
+ // If the operator of the definition is assignment, then compute the range of the rhs.
+ case GT_ASG:
{
Range range = GetRange(loc->block, loc->stmt, asg->gtGetOp2(), path, monotonic DEBUGARG(indent));
- JITDUMP("Merge assertions from BB%02d:%s for assignment about %p\n", block->bbNum, BitVecOps::ToString(m_pCompiler->apTraits, block->bbAssertionIn), dspPtr(asg->gtGetOp1()));
+ JITDUMP("Merge assertions from BB%02d:%s for assignment about %p\n", block->bbNum,
+ BitVecOps::ToString(m_pCompiler->apTraits, block->bbAssertionIn), dspPtr(asg->gtGetOp1()));
MergeEdgeAssertions(asg->gtGetOp1(), block->bbAssertionIn, &range);
JITDUMP("done merging\n");
return range;
}
- case GT_ASG_ADD:
- // If the operator of the definition is +=, then compute the range of the operands of +.
- // Note that gtGetOp1 will return op1 to be the lhs; in the formulation of ssa, we have
- // a side table for defs and the lhs of a += is considered to be a use for SSA numbering.
- return ComputeRangeForBinOp(loc->block, loc->stmt,
- asg->gtGetOp1(), asg->gtGetOp2(), GT_ADD, path, monotonic DEBUGARG(indent));
+ case GT_ASG_ADD:
+ // If the operator of the definition is +=, then compute the range of the operands of +.
+ // Note that gtGetOp1 will return op1 to be the lhs; in the formulation of ssa, we have
+ // a side table for defs and the lhs of a += is considered to be a use for SSA numbering.
+ return ComputeRangeForBinOp(loc->block, loc->stmt, asg->gtGetOp1(), asg->gtGetOp2(), GT_ADD, path,
+ monotonic DEBUGARG(indent));
- default:
- // All other 'asg->OperGet()' kinds, return Limit::keUnknown
- break;
+ default:
+ // All other 'asg->OperGet()' kinds, return Limit::keUnknown
+ break;
}
return Range(Limit(Limit::keUnknown));
}
@@ -890,11 +899,11 @@ bool RangeCheck::GetLimitMax(Limit& limit, int* pMax)
int& max1 = *pMax;
switch (limit.type)
{
- case Limit::keConstant:
- max1 = limit.GetConstant();
- break;
+ case Limit::keConstant:
+ max1 = limit.GetConstant();
+ break;
- case Limit::keBinOpArray:
+ case Limit::keBinOpArray:
{
int tmp = GetArrLength(limit.vn);
if (tmp <= 0)
@@ -909,7 +918,7 @@ bool RangeCheck::GetLimitMax(Limit& limit, int* pMax)
}
break;
- case Limit::keArray:
+ case Limit::keArray:
{
int tmp = GetArrLength(limit.vn);
if (tmp <= 0)
@@ -920,28 +929,28 @@ bool RangeCheck::GetLimitMax(Limit& limit, int* pMax)
}
break;
- case Limit::keSsaVar:
- case Limit::keBinOp:
- if (m_pCompiler->vnStore->IsVNConstant(limit.vn) && m_pCompiler->vnStore->TypeOfVN(limit.vn) == TYP_INT)
- {
- max1 = m_pCompiler->vnStore->ConstantValue<int>(limit.vn);
- }
- else
- {
- return false;
- }
- if (limit.type == Limit::keBinOp)
- {
- if (IntAddOverflows(max1, limit.GetConstant()))
+ case Limit::keSsaVar:
+ case Limit::keBinOp:
+ if (m_pCompiler->vnStore->IsVNConstant(limit.vn) && m_pCompiler->vnStore->TypeOfVN(limit.vn) == TYP_INT)
+ {
+ max1 = m_pCompiler->vnStore->ConstantValue<int>(limit.vn);
+ }
+ else
{
return false;
}
- max1 += limit.GetConstant();
- }
- break;
+ if (limit.type == Limit::keBinOp)
+ {
+ if (IntAddOverflows(max1, limit.GetConstant()))
+ {
+ return false;
+ }
+ max1 += limit.GetConstant();
+ }
+ break;
- default:
- return false;
+ default:
+ return false;
}
return true;
}
@@ -1002,8 +1011,7 @@ bool RangeCheck::DoesBinOpOverflow(BasicBlock* block, GenTreePtr stmt, GenTreePt
MergeAssertion(block, stmt, op2, path, op2Range DEBUGARG(0));
}
- JITDUMP("Checking bin op overflow %s %s\n",
- op1Range->ToString(m_pCompiler->getAllocatorDebugOnly()),
+ JITDUMP("Checking bin op overflow %s %s\n", op1Range->ToString(m_pCompiler->getAllocatorDebugOnly()),
op2Range->ToString(m_pCompiler->getAllocatorDebugOnly()));
if (!AddOverflows(op1Range->UpperLimit(), op2Range->UpperLimit()))
@@ -1027,25 +1035,23 @@ bool RangeCheck::DoesVarDefOverflow(BasicBlock* block, GenTreePtr stmt, GenTreeP
assert(asg->OperKind() & GTK_ASGOP);
switch (asg->OperGet())
{
- case GT_ASG:
- return DoesOverflow(loc->block, loc->stmt, asg->gtGetOp2(), path);
+ case GT_ASG:
+ return DoesOverflow(loc->block, loc->stmt, asg->gtGetOp2(), path);
- case GT_ASG_ADD:
- // For GT_ASG_ADD, op2 is use, op1 is also use since we side table for defs in useasg case.
- return DoesBinOpOverflow(loc->block, loc->stmt, asg->gtGetOp1(), asg->gtGetOp2(), path);
+ case GT_ASG_ADD:
+ // For GT_ASG_ADD, op2 is use, op1 is also use since we side table for defs in useasg case.
+ return DoesBinOpOverflow(loc->block, loc->stmt, asg->gtGetOp1(), asg->gtGetOp2(), path);
- default:
- // All other 'asg->OperGet()' kinds, conservatively return true
- break;
+ default:
+ // All other 'asg->OperGet()' kinds, conservatively return true
+ break;
}
return true;
}
bool RangeCheck::DoesPhiOverflow(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path)
{
- for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList();
- args != nullptr;
- args = args->Rest())
+ for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
GenTreePtr arg = args->Current();
if (path->Lookup(arg))
@@ -1078,8 +1084,8 @@ bool RangeCheck::ComputeDoesOverflow(BasicBlock* block, GenTreePtr stmt, GenTree
bool overflows = true;
// Remove hashtable entry for expr when we exit the present scope.
- Range range = Limit(Limit::keUndef);
- ValueNum vn = expr->gtVNPair.GetConservative();
+ Range range = Limit(Limit::keUndef);
+ ValueNum vn = expr->gtVNPair.GetConservative();
if (path->GetCount() > MAX_SEARCH_DEPTH)
{
overflows = true;
@@ -1113,9 +1119,9 @@ struct Node
{
Range range;
Node* next;
- Node()
- : range(Limit(Limit::keUndef)),
- next(NULL) {}
+ Node() : range(Limit(Limit::keUndef)), next(nullptr)
+ {
+ }
};
// Compute the range recursively by asking for the range of each variable in the dependency chain.
@@ -1125,10 +1131,11 @@ struct Node
// value as "dependent" (dep).
// If the loop is proven to be "monotonic", then make liberal decisions while merging phi node.
// eg.: merge((0, dep), (dep, dep)) = (0, dep)
-Range RangeCheck::ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
+Range RangeCheck::ComputeRange(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
{
- bool newlyAdded = !path->Set(expr, block);
- Range range = Limit(Limit::keUndef);
+ bool newlyAdded = !path->Set(expr, block);
+ Range range = Limit(Limit::keUndef);
ValueNum vn = expr->gtVNPair.GetConservative();
// If newly added in the current search path, then reduce the budget.
@@ -1156,7 +1163,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr ex
JITDUMP("GetRange not tractable within max stack depth.\n");
}
// TODO-CQ: The current implementation is reliant on integer storage types
- // for constants. It could use INT64. Still, representing ULONG constants
+ // for constants. It could use INT64. Still, representing ULONG constants
// might require preserving the var_type whether it is a un/signed 64-bit.
// JIT64 doesn't do anything for "long" either. No asm diffs.
else if (expr->TypeGet() == TYP_LONG || expr->TypeGet() == TYP_ULONG)
@@ -1168,8 +1175,8 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr ex
else if (m_pCompiler->vnStore->IsVNConstant(vn))
{
range = (m_pCompiler->vnStore->TypeOfVN(vn) == TYP_INT)
- ? Range(Limit(Limit::keConstant, m_pCompiler->vnStore->ConstantValue<int>(vn)))
- : Limit(Limit::keUnknown);
+ ? Range(Limit(Limit::keConstant, m_pCompiler->vnStore->ConstantValue<int>(vn)))
+ : Limit(Limit::keUnknown);
}
// If local, find the definition from the def map and evaluate the range for rhs.
else if (expr->IsLocal())
@@ -1180,28 +1187,27 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr ex
// If add, then compute the range for the operands and add them.
else if (expr->OperGet() == GT_ADD)
{
- range = ComputeRangeForBinOp(block, stmt,
- expr->gtGetOp1(), expr->gtGetOp2(), GT_ADD, path, monotonic DEBUGARG(indent + 1));
+ range = ComputeRangeForBinOp(block, stmt, expr->gtGetOp1(), expr->gtGetOp2(), GT_ADD, path,
+ monotonic DEBUGARG(indent + 1));
}
// If phi, then compute the range for arguments, calling the result "dependent" when looping begins.
else if (expr->OperGet() == GT_PHI)
{
- Node* cur = nullptr;
+ Node* cur = nullptr;
Node* head = nullptr;
- for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList();
- args != nullptr; args = args->Rest())
+ for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest())
{
// Collect the range for each phi argument in a linked list.
Node* node = new (m_pCompiler->getAllocator()) Node();
if (cur != nullptr)
{
cur->next = node;
- cur = cur->next;
+ cur = cur->next;
}
else
{
head = node;
- cur = head;
+ cur = head;
}
if (path->Lookup(args->Current()))
{
@@ -1218,8 +1224,7 @@ Range RangeCheck::ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr ex
{
assert(!cur->range.LowerLimit().IsUndef());
assert(!cur->range.UpperLimit().IsUndef());
- JITDUMP("Merging ranges %s %s:",
- range.ToString(m_pCompiler->getAllocatorDebugOnly()),
+ JITDUMP("Merging ranges %s %s:", range.ToString(m_pCompiler->getAllocatorDebugOnly()),
cur->range.ToString(m_pCompiler->getAllocatorDebugOnly()));
range = RangeOps::Merge(range, cur->range, monotonic);
JITDUMP("%s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
@@ -1247,7 +1252,8 @@ void Indent(int indent)
#endif
// Get the range, if it is already computed, use the cached range value, else compute it.
-Range RangeCheck::GetRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
+Range RangeCheck::GetRange(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent))
{
#ifdef DEBUG
if (m_pCompiler->verbose)
@@ -1261,18 +1267,15 @@ Range RangeCheck::GetRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr,
#endif
Range* pRange = nullptr;
- Range range = GetRangeMap()->Lookup(expr, &pRange)
- ? *pRange
- : ComputeRange(block, stmt, expr, path, monotonic DEBUGARG(indent));
+ Range range = GetRangeMap()->Lookup(expr, &pRange) ? *pRange : ComputeRange(block, stmt, expr, path,
+ monotonic DEBUGARG(indent));
#ifdef DEBUG
if (m_pCompiler->verbose)
{
Indent(indent);
- JITDUMP(" %s Range (%08X) => %s\n",
- (pRange == nullptr) ? "Computed" : "Cached",
- dspPtr(expr),
- range.ToString(m_pCompiler->getAllocatorDebugOnly()));
+ JITDUMP(" %s Range (%08X) => %s\n", (pRange == nullptr) ? "Computed" : "Cached", dspPtr(expr),
+ range.ToString(m_pCompiler->getAllocatorDebugOnly()));
Indent(indent);
JITDUMP("}\n", expr);
}
@@ -1305,8 +1308,7 @@ void RangeCheck::MapStmtDefs(const Location& loc)
// To avoid ind(addr) use asgs
if (loc.parent->OperKind() & GTK_ASGOP)
{
- SetDef(HashCode(lclNum, ssaNum),
- new (m_pCompiler->getAllocator()) Location(loc));
+ SetDef(HashCode(lclNum, ssaNum), new (m_pCompiler->getAllocator()) Location(loc));
}
}
}
@@ -1324,18 +1326,16 @@ struct MapMethodDefsData
{
RangeCheck* rc;
BasicBlock* block;
- GenTreePtr stmt;
+ GenTreePtr stmt;
- MapMethodDefsData(RangeCheck* rc, BasicBlock* block, GenTreePtr stmt)
- : rc(rc)
- , block(block)
- , stmt(stmt)
- { }
+ MapMethodDefsData(RangeCheck* rc, BasicBlock* block, GenTreePtr stmt) : rc(rc), block(block), stmt(stmt)
+ {
+ }
};
Compiler::fgWalkResult MapMethodDefsVisitor(GenTreePtr* ptr, Compiler::fgWalkData* data)
{
- MapMethodDefsData* rcd = ((MapMethodDefsData*) data->pCallbackData);
+ MapMethodDefsData* rcd = ((MapMethodDefsData*)data->pCallbackData);
rcd->rc->MapStmtDefs(RangeCheck::Location(rcd->block, rcd->stmt, *ptr, data->parent));
return Compiler::WALK_CONTINUE;
}
@@ -1362,7 +1362,7 @@ void RangeCheck::OptimizeRangeChecks()
return;
}
#ifdef DEBUG
- if (m_pCompiler->verbose)
+ if (m_pCompiler->verbose)
{
JITDUMP("*************** In OptimizeRangeChecks()\n");
JITDUMP("Blocks/trees before phase\n");
diff --git a/src/jit/rangecheck.h b/src/jit/rangecheck.h
index c01c907eee..b00bfb8a67 100644
--- a/src/jit/rangecheck.h
+++ b/src/jit/rangecheck.h
@@ -50,7 +50,7 @@
// involving i_1 and i_2. Merge assertions from the block's edges whenever possible.
//
// **Step 4. Check if the dependency chain is monotonic.
-//
+//
// **Step 5. If monotonic is true, then perform a widening step, where we assume, the
// SSA variables that are "dependent" get their values from the definitions in the
// dependency loop and their initial values must be the definitions that are not in
@@ -86,7 +86,7 @@ struct Limit
{
enum LimitType
{
- keUndef, // The limit is yet to be computed.
+ keUndef, // The limit is yet to be computed.
keBinOp,
keBinOpArray,
keSsaVar,
@@ -94,29 +94,22 @@ struct Limit
keConstant,
keDependent, // The limit is dependent on some other value.
keUnknown, // The limit could not be determined.
- };
+ };
- Limit()
- : type(keUndef)
+ Limit() : type(keUndef)
{
}
- Limit(LimitType type)
- : type(type)
+ Limit(LimitType type) : type(type)
{
}
- Limit(LimitType type, int cns)
- : cns(cns),
- type(type)
+ Limit(LimitType type, int cns) : cns(cns), type(type)
{
assert(type == keConstant);
}
- Limit(LimitType type, ValueNum vn, int cns)
- : cns(cns)
- , vn(vn)
- , type(type)
+ Limit(LimitType type, ValueNum vn, int cns) : cns(cns), vn(vn), type(type)
{
assert(type == keBinOpArray || keBinOp);
}
@@ -161,39 +154,39 @@ struct Limit
{
switch (type)
{
- case keDependent:
- return true;
- case keBinOp:
- case keBinOpArray:
- if (IntAddOverflows(cns, i))
- {
- return false;
- }
- cns += i;
- return true;
-
- case keSsaVar:
- type = keBinOp;
- cns = i;
- return true;
-
- case keArray:
- type = keBinOpArray;
- cns = i;
- return true;
-
- case keConstant:
- if (IntAddOverflows(cns, i))
- {
- return false;
- }
- cns += i;
- return true;
-
- case keUndef:
- case keUnknown:
- // For these values of 'type', conservatively return false
- break;
+ case keDependent:
+ return true;
+ case keBinOp:
+ case keBinOpArray:
+ if (IntAddOverflows(cns, i))
+ {
+ return false;
+ }
+ cns += i;
+ return true;
+
+ case keSsaVar:
+ type = keBinOp;
+ cns = i;
+ return true;
+
+ case keArray:
+ type = keBinOpArray;
+ cns = i;
+ return true;
+
+ case keConstant:
+ if (IntAddOverflows(cns, i))
+ {
+ return false;
+ }
+ cns += i;
+ return true;
+
+ case keUndef:
+ case keUnknown:
+ // For these values of 'type', conservatively return false
+ break;
}
return false;
@@ -203,21 +196,21 @@ struct Limit
{
switch (type)
{
- case keUndef:
- case keUnknown:
- case keDependent:
- return l.type == type;
+ case keUndef:
+ case keUnknown:
+ case keDependent:
+ return l.type == type;
- case keBinOp:
- case keBinOpArray:
- return l.type == type && l.vn == vn && l.cns == cns;
+ case keBinOp:
+ case keBinOpArray:
+ return l.type == type && l.vn == vn && l.cns == cns;
- case keSsaVar:
- case keArray:
- return l.type == type && l.vn == vn;
+ case keSsaVar:
+ case keArray:
+ return l.type == type && l.vn == vn;
- case keConstant:
- return l.type == type && l.cns == cns;
+ case keConstant:
+ return l.type == type && l.cns == cns;
}
return false;
}
@@ -225,40 +218,40 @@ struct Limit
const char* ToString(IAllocator* alloc)
{
unsigned size = 64;
- char* buf = (char*) alloc->Alloc(size);
+ char* buf = (char*)alloc->Alloc(size);
switch (type)
{
- case keUndef:
- return "Undef";
-
- case keUnknown:
- return "Unknown";
-
- case keDependent:
- return "Dependent";
-
- case keBinOp:
- case keBinOpArray:
- sprintf_s(buf, size, "VN%04X + %d", vn, cns);
- return buf;
-
- case keSsaVar:
- sprintf_s(buf, size, "VN%04X", vn);
- return buf;
-
- case keArray:
- sprintf_s(buf, size, "VN%04X", vn);
- return buf;
-
- case keConstant:
- sprintf_s(buf, size, "%d", cns);
- return buf;
+ case keUndef:
+ return "Undef";
+
+ case keUnknown:
+ return "Unknown";
+
+ case keDependent:
+ return "Dependent";
+
+ case keBinOp:
+ case keBinOpArray:
+ sprintf_s(buf, size, "VN%04X + %d", vn, cns);
+ return buf;
+
+ case keSsaVar:
+ sprintf_s(buf, size, "VN%04X", vn);
+ return buf;
+
+ case keArray:
+ sprintf_s(buf, size, "VN%04X", vn);
+ return buf;
+
+ case keConstant:
+ sprintf_s(buf, size, "%d", cns);
+ return buf;
}
unreached();
}
#endif
- int cns;
- ValueNum vn;
+ int cns;
+ ValueNum vn;
LimitType type;
};
@@ -268,15 +261,11 @@ struct Range
Limit uLimit;
Limit lLimit;
- Range(const Limit& limit)
- : uLimit(limit),
- lLimit(limit)
+ Range(const Limit& limit) : uLimit(limit), lLimit(limit)
{
}
- Range(const Limit& lLimit, const Limit& uLimit)
- : uLimit(uLimit),
- lLimit(lLimit)
+ Range(const Limit& lLimit, const Limit& uLimit) : uLimit(uLimit), lLimit(lLimit)
{
}
@@ -294,7 +283,7 @@ struct Range
char* ToString(IAllocator* alloc)
{
size_t size = 64;
- char* buf = (char*) alloc->Alloc(size);
+ char* buf = (char*)alloc->Alloc(size);
sprintf_s(buf, size, "<%s, %s>", lLimit.ToString(alloc), uLimit.ToString(alloc));
return buf;
}
@@ -433,13 +422,13 @@ struct RangeOps
// This is correct if k >= 0 and n >= k, since a.len always >= 0
// (a.len + n) could overflow, but the result (a.len + n) also
// preserves the overflow.
- if (r1hi.IsConstant() && r1hi.GetConstant() >= 0 &&
- r2hi.IsBinOpArray() && r2hi.GetConstant() >= r1hi.GetConstant())
+ if (r1hi.IsConstant() && r1hi.GetConstant() >= 0 && r2hi.IsBinOpArray() &&
+ r2hi.GetConstant() >= r1hi.GetConstant())
{
result.uLimit = r2hi;
}
- if (r2hi.IsConstant() && r2hi.GetConstant() >= 0 &&
- r1hi.IsBinOpArray() && r1hi.GetConstant() >= r2hi.GetConstant())
+ if (r2hi.IsConstant() && r2hi.GetConstant() >= 0 && r1hi.IsBinOpArray() &&
+ r1hi.GetConstant() >= r2hi.GetConstant())
{
result.uLimit = r1hi;
}
@@ -454,7 +443,6 @@ struct RangeOps
}
return result;
}
-
};
class RangeCheck
@@ -462,29 +450,29 @@ class RangeCheck
public:
// Constructor
RangeCheck(Compiler* pCompiler);
-
+
// Location information is used to map where the defs occur in the method.
struct Location
{
BasicBlock* block;
- GenTreePtr stmt;
- GenTreePtr tree;
- GenTreePtr parent;
+ GenTreePtr stmt;
+ GenTreePtr tree;
+ GenTreePtr parent;
Location(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree, GenTreePtr parent)
- : block(block)
- , stmt(stmt)
- , tree(tree)
- , parent(parent)
- { }
+ : block(block), stmt(stmt), tree(tree), parent(parent)
+ {
+ }
+
private:
Location();
- };
+ };
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, bool, JitSimplerHashBehavior> OverflowMap;
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, Range*, JitSimplerHashBehavior> RangeMap;
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, BasicBlock*, JitSimplerHashBehavior> SearchPath;
+ typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, bool, JitSimplerHashBehavior> OverflowMap;
+ typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, Range*, JitSimplerHashBehavior> RangeMap;
+ typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, BasicBlock*, JitSimplerHashBehavior> SearchPath;
typedef SimplerHashTable<INT64, LargePrimitiveKeyFuncs<INT64>, Location*, JitSimplerHashBehavior> VarToLocMap;
- typedef SimplerHashTable<INT64, LargePrimitiveKeyFuncs<INT64>, ExpandArrayStack<Location*>*, JitSimplerHashBehavior> VarToLocArrayMap;
+ typedef SimplerHashTable<INT64, LargePrimitiveKeyFuncs<INT64>, ExpandArrayStack<Location*>*, JitSimplerHashBehavior>
+ VarToLocArrayMap;
// Generate a hashcode unique for this ssa var.
UINT64 HashCode(unsigned lclNum, unsigned ssaNum);
@@ -526,21 +514,31 @@ public:
// The "path" is the path taken in the search for the rhs' range and its constituents' range.
// If "monotonic" is true, the calculations are made more liberally assuming initial values
// at phi definitions.
- Range GetRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
+ Range GetRange(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
// Given the local variable, first find the definition of the local and find the range of the rhs.
// Helper for GetRange.
- Range ComputeRangeForLocalDef(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
+ Range ComputeRangeForLocalDef(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
// Compute the range, rather than retrieve a cached value. Helper for GetRange.
- Range ComputeRange(BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
+ Range ComputeRange(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr expr, SearchPath* path, bool monotonic DEBUGARG(int indent));
// Compute the range for the op1 and op2 for the given binary operator.
- Range ComputeRangeForBinOp(BasicBlock* block, GenTreePtr stmt, GenTreePtr op1, GenTreePtr op2, genTreeOps oper, SearchPath* path, bool monotonic DEBUGARG(int indent));
+ Range ComputeRangeForBinOp(BasicBlock* block,
+ GenTreePtr stmt,
+ GenTreePtr op1,
+ GenTreePtr op2,
+ genTreeOps oper,
+ SearchPath* path,
+ bool monotonic DEBUGARG(int indent));
// Merge assertions from AssertionProp's flags, for the corresponding "phiArg."
// Requires "pRange" to contain range that is computed partially.
- void MergeAssertion(BasicBlock* block, GenTreePtr stmt, GenTreePtr phiArg, SearchPath* path, Range* pRange DEBUGARG(int indent));
+ void MergeAssertion(
+ BasicBlock* block, GenTreePtr stmt, GenTreePtr phiArg, SearchPath* path, Range* pRange DEBUGARG(int indent));
// Inspect the "assertions" and extract assertions about the given "phiArg" and
// refine the "pRange" value.
@@ -595,9 +593,9 @@ private:
RangeMap* GetRangeMap();
RangeMap* m_pRangeMap;
- bool m_fMappedDefs;
+ bool m_fMappedDefs;
VarToLocMap* m_pDefTable;
- Compiler* m_pCompiler;
+ Compiler* m_pCompiler;
// The number of nodes for which range is computed throughout the current method.
// When this limit is zero, we have exhausted all the budget to walk the ud-chain.
diff --git a/src/jit/rationalize.cpp b/src/jit/rationalize.cpp
index fa7e03d911..293a1d7a8a 100644
--- a/src/jit/rationalize.cpp
+++ b/src/jit/rationalize.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -10,40 +9,41 @@
#include "hashbv.h"
-
#ifdef DEBUG
void dumpMethod()
{
if (VERBOSE)
+ {
JitTls::GetCompiler()->fgDispBasicBlocks(true);
+ }
}
-void dumpTreeStack(Compiler *comp, ArrayStack<GenTree *> *stack)
+void dumpTreeStack(Compiler* comp, ArrayStack<GenTree*>* stack)
{
printf("=TOS=================\n");
- for (int i=0; i<stack->Height(); i++)
+ for (int i = 0; i < stack->Height(); i++)
{
- comp->gtDispNode(stack->Index(i), 0, "");
+ comp->gtDispNode(stack->Index(i), nullptr, "");
printf("\n");
}
printf("=====================\n");
}
-void dumpArgTable(Compiler *comp, GenTree *call)
+void dumpArgTable(Compiler* comp, GenTree* call)
{
noway_assert(call->IsCall());
fgArgInfoPtr argInfo = call->gtCall.fgArgInfo;
- noway_assert(argInfo != NULL);
+ noway_assert(argInfo != nullptr);
- unsigned argCount = argInfo->ArgCount();
- fgArgTabEntryPtr * argTable = argInfo->ArgTable();
- fgArgTabEntryPtr curArgTabEntry = NULL;
+ unsigned argCount = argInfo->ArgCount();
+ fgArgTabEntryPtr* argTable = argInfo->ArgTable();
+ fgArgTabEntryPtr curArgTabEntry = nullptr;
JITDUMP("ARG TABLE for call ");
Compiler::printTreeID(call);
JITDUMP(":\n");
- for (unsigned i=0; i < argCount; i++)
+ for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
JITDUMP("entry %d\n", i);
@@ -54,20 +54,18 @@ void dumpArgTable(Compiler *comp, GenTree *call)
#endif // DEBUG
-
-
// state carried over the tree walk, to be used in making
// a splitting decision.
struct SplitData
{
// callbacks to determine if we should split here, in pre and post order traversals
- Compiler::fgSplitPredicate *predicatePre;
- Compiler::fgSplitPredicate *predicatePost;
-
- GenTree *root; // root stmt of tree being processed
- BasicBlock *block;
- Rationalizer *thisPhase;
-
+ Compiler::fgSplitPredicate* predicatePre;
+ Compiler::fgSplitPredicate* predicatePost;
+
+ GenTree* root; // root stmt of tree being processed
+ BasicBlock* block;
+ Rationalizer* thisPhase;
+
bool continueSubtrees; // whether to continue after splitting off a tree (in pre-order)
};
@@ -75,39 +73,38 @@ struct SplitData
// isNodeCallArg - given a context (stack of parent nodes), determine if the TOS is an arg to a call
//------------------------------------------------------------------------------
-GenTree *isNodeCallArg(ArrayStack<GenTree *> *parentStack)
+GenTree* isNodeCallArg(ArrayStack<GenTree*>* parentStack)
{
for (int i = 1; // 0 is current node, so start at 1
- i < parentStack->Height();
- i++)
+ i < parentStack->Height(); i++)
{
- GenTree *node = parentStack->Index(i);
+ GenTree* node = parentStack->Index(i);
switch (node->OperGet())
{
- case GT_LIST:
- case GT_ARGPLACE:
- break;
- case GT_NOP:
- // Currently there's an issue when the rationalizer performs
- // the fixup of a call argument: the case is when we remove an
- // inserted NOP as a parent of a call introduced by fgMorph;
- // when then the rationalizer removes it, the tree stack in the
- // walk is not consistent with the node it was just deleted, so the
- // solution is just to go 1 level deeper.
- // TODO-Cleanup: This has to be fixed in a proper way: make the rationalizer
- // correctly modify the evaluation stack when removing treenodes.
- if (node->gtOp.gtOp1->gtOper == GT_CALL)
- {
- return node->gtOp.gtOp1;
- }
- break;
- case GT_CALL:
- return node;
- default:
- return NULL;
+ case GT_LIST:
+ case GT_ARGPLACE:
+ break;
+ case GT_NOP:
+ // Currently there's an issue when the rationalizer performs
+ // the fixup of a call argument: the case is when we remove an
+ // inserted NOP as a parent of a call introduced by fgMorph;
+ // when then the rationalizer removes it, the tree stack in the
+ // walk is not consistent with the node it was just deleted, so the
+ // solution is just to go 1 level deeper.
+ // TODO-Cleanup: This has to be fixed in a proper way: make the rationalizer
+ // correctly modify the evaluation stack when removing treenodes.
+ if (node->gtOp.gtOp1->gtOper == GT_CALL)
+ {
+ return node->gtOp.gtOp1;
+ }
+ break;
+ case GT_CALL:
+ return node;
+ default:
+ return nullptr;
}
}
- return NULL;
+ return nullptr;
}
//------------------------------------------------------------------------------
@@ -129,33 +126,32 @@ GenTree *isNodeCallArg(ArrayStack<GenTree *> *parentStack)
// If 'tree' is at the beginning of the linear order of 'parentStmt', it
// is made into a top-level statement.
-GenTreeStmt*
-Compiler::fgMakeEmbeddedStmt(BasicBlock* block, GenTree* tree, GenTree* parentStmt)
+GenTreeStmt* Compiler::fgMakeEmbeddedStmt(BasicBlock* block, GenTree* tree, GenTree* parentStmt)
{
assert(tree->gtOper != GT_STMT);
assert(parentStmt->gtOper == GT_STMT);
assert(fgBlockContainsStatementBounded(block, parentStmt));
- GenTreePtr newStmtFirstNode = fgGetFirstNode(tree);
+ GenTreePtr newStmtFirstNode = fgGetFirstNode(tree);
GenTreePtr parentStmtFirstNode = parentStmt->gtStmt.gtStmtList;
- GenTreePtr prevStmt = parentStmt;
- bool newTopLevelStmt = false;
- bool splitParentStmt = false;
+ GenTreePtr prevStmt = parentStmt;
+ bool newTopLevelStmt = false;
+ bool splitParentStmt = false;
if (newStmtFirstNode == parentStmtFirstNode)
{
// If this is the first node of the new statement, split them.
parentStmt->gtStmt.gtStmtList = tree->gtNext;
- prevStmt = parentStmt->gtPrev;
- splitParentStmt = true;
+ prevStmt = parentStmt->gtPrev;
+ splitParentStmt = true;
}
- GenTreeStmt* newStmt = gtNewStmt(tree, parentStmt->gtStmt.gtStmtILoffsx); // Use same IL offset as parent statement
+ GenTreeStmt* newStmt = gtNewStmt(tree, parentStmt->gtStmt.gtStmtILoffsx); // Use same IL offset as parent statement
newStmt->CopyCosts(tree);
newStmt->gtStmtList = newStmtFirstNode;
if (splitParentStmt && parentStmt->gtStmt.gtStmtIsTopLevel())
{
- newTopLevelStmt = true;
+ newTopLevelStmt = true;
tree->gtNext->gtPrev = nullptr;
- tree->gtNext = nullptr;
+ tree->gtNext = nullptr;
}
else
{
@@ -191,12 +187,14 @@ Compiler::fgMakeEmbeddedStmt(BasicBlock* block, GenTree* tree, GenTree* parentSt
while (nextLinearNode != searchNode && nextLinearNode != nextEmbeddedNode)
{
nextLinearNode = nextLinearNode->gtNext;
- assert (nextLinearNode != nullptr);
+ assert(nextLinearNode != nullptr);
}
if (nextLinearNode == searchNode)
+ {
break;
- prevStmt = nextStmt;
- nextStmt = nextStmt->gtNext;
+ }
+ prevStmt = nextStmt;
+ nextStmt = nextStmt->gtNext;
foundEmbeddedStmts = true;
}
@@ -223,9 +221,9 @@ Compiler::fgMakeEmbeddedStmt(BasicBlock* block, GenTree* tree, GenTree* parentSt
}
parentStmt->gtPrev = prevStmt;
- prevStmt->gtNext = parentStmt;
+ prevStmt->gtNext = parentStmt;
- newStmt->gtNext = firstEmbeddedStmt;
+ newStmt->gtNext = firstEmbeddedStmt;
firstEmbeddedStmt->gtPrev = newStmt;
}
}
@@ -251,11 +249,10 @@ Compiler::fgMakeEmbeddedStmt(BasicBlock* block, GenTree* tree, GenTree* parentSt
// Either the callee must ensure that 'before' is part of compCurStmt,
// or before->gtPrev must be non-null
-void
-Compiler::fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before)
+void Compiler::fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before)
{
GenTreePtr prevNode = before->gtPrev;
- newNode->gtPrev = prevNode;
+ newNode->gtPrev = prevNode;
if (prevNode == nullptr)
{
assert(compCurStmt->gtStmt.gtStmtList == before && compCurStmt->gtStmt.gtStmtIsTopLevel());
@@ -271,7 +268,7 @@ Compiler::fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before)
compCurStmt->gtStmt.gtStmtList = newNode;
}
newNode->gtNext = before;
- before->gtPrev = newNode;
+ before->gtPrev = newNode;
}
//-----------------------------------------------------------------------------------------------
@@ -290,8 +287,7 @@ Compiler::fgInsertLinearNodeBefore(GenTreePtr newNode, GenTreePtr before)
// The caller must ensure that '*ppTree' is part of compCurStmt, and that
// compCurStmt is in compCurBB;
-GenTreeStmt*
-Compiler::fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lclNum)
+GenTreeStmt* Compiler::fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lclNum)
{
GenTree* subTree = *ppTree;
@@ -309,7 +305,8 @@ Compiler::fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lclNum)
subTree->InsertAfterSelf(store);
- GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(store->TypeGet(), store->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
+ GenTree* load =
+ new (this, GT_LCL_VAR) GenTreeLclVar(store->TypeGet(), store->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
gtSetEvalOrder(load);
store->InsertAfterSelf(load);
@@ -319,7 +316,7 @@ Compiler::fgInsertEmbeddedFormTemp(GenTree** ppTree, unsigned lclNum)
JITDUMP("fgInsertEmbeddedFormTemp created store :\n");
DISPTREE(store);
- GenTreeStmt* stmt = fgMakeEmbeddedStmt(compCurBB, store, compCurStmt);
+ GenTreeStmt* stmt = fgMakeEmbeddedStmt(compCurBB, store, compCurStmt);
stmt->gtStmtILoffsx = compCurStmt->gtStmt.gtStmtILoffsx;
#ifdef DEBUG
stmt->gtStmtLastILoffs = compCurStmt->gtStmt.gtStmtLastILoffs;
@@ -333,16 +330,16 @@ genTreeOps storeForm(genTreeOps loadForm)
{
switch (loadForm)
{
- case GT_LCL_VAR:
- return GT_STORE_LCL_VAR;
- case GT_LCL_FLD:
- return GT_STORE_LCL_FLD;
- case GT_REG_VAR:
- noway_assert(!"reg vars only supported in classic backend\n");
- unreached();
- default:
- noway_assert(!"not a data load opcode\n");
- unreached();
+ case GT_LCL_VAR:
+ return GT_STORE_LCL_VAR;
+ case GT_LCL_FLD:
+ return GT_STORE_LCL_FLD;
+ case GT_REG_VAR:
+ noway_assert(!"reg vars only supported in classic backend\n");
+ unreached();
+ default:
+ noway_assert(!"not a data load opcode\n");
+ unreached();
}
}
@@ -351,24 +348,23 @@ genTreeOps addrForm(genTreeOps loadForm)
{
switch (loadForm)
{
- case GT_LCL_VAR:
- return GT_LCL_VAR_ADDR;
- case GT_LCL_FLD:
- return GT_LCL_FLD_ADDR;
- default:
- noway_assert(!"not a data load opcode\n");
- unreached();
+ case GT_LCL_VAR:
+ return GT_LCL_VAR_ADDR;
+ case GT_LCL_FLD:
+ return GT_LCL_FLD_ADDR;
+ default:
+ noway_assert(!"not a data load opcode\n");
+ unreached();
}
}
// copy the flags determined by mask from src to dst
-void copyFlags(GenTree *dst, GenTree *src, unsigned mask)
+void copyFlags(GenTree* dst, GenTree* src, unsigned mask)
{
dst->gtFlags &= ~mask;
dst->gtFlags |= (src->gtFlags & mask);
}
-
//--------------------------------------------------------------------------------------
// RewriteTopLevelComma - remove a top-level comma by creating a new preceding statement
// from its LHS and replacing the comma with its RHS (unless the
@@ -416,21 +412,20 @@ Location Rationalizer::RewriteTopLevelComma(Location loc)
// Create and insert a new preceding statement from the LHS of the comma node.
GenTreeStmt* newStatement = comp->gtNewStmt(commaOp1, commaStmt->gtStmtILoffsx);
newStatement->CopyCosts(commaOp1);
- newStatement->gtStmtList = Compiler::fgGetFirstNode(commaOp1);
+ newStatement->gtStmtList = Compiler::fgGetFirstNode(commaOp1);
newStatement->gtStmtList->gtPrev = nullptr;
- commaOp1->gtNext = nullptr;
+ commaOp1->gtNext = nullptr;
comp->fgInsertStmtBefore(loc.block, commaStmt, newStatement);
return Location(newStatement, loc.block);
}
-
//------------------------------------------------------------------------------
// MorphAsgIntoStoreLcl -
// Receives an assignment of type GT_ASG(Lhs, Rhs) where:
// -- Lhs can be GT_LCL_VAR or GT_LCL_FLD
-// -- Rhs is an arbitrary tree and converts that into its corresponding
+// -- Rhs is an arbitrary tree and converts that into its corresponding
// store local form.
//
// Returns the tree converted into GT_STORE_LCL_VAR or GT_STORE_LCL_FLD form.
@@ -483,7 +478,7 @@ void Rationalizer::MorphAsgIntoStoreLcl(GenTreeStmt* stmt, GenTreePtr pTree)
//------------------------------------------------------------------------------
// CreateTempAssignment -
-// Constructs an assignment where its left hand side is a GenTree node
+// Constructs an assignment where its left hand side is a GenTree node
// representing the given local variable number and the right hand side is
// the given tree.
//
@@ -497,24 +492,23 @@ GenTreePtr Rationalizer::CreateTempAssignment(Compiler* comp, unsigned lclNum, G
return gtAsg;
}
-
// turn "comma(lcl x, lcl x)" into "lcl x"
// this is produced by earlier transformations
-void Rationalizer::DuplicateCommaProcessOneTree(Compiler *comp, Rationalizer *irt, BasicBlock *block, GenTree *statement)
+void Rationalizer::DuplicateCommaProcessOneTree(Compiler* comp,
+ Rationalizer* irt,
+ BasicBlock* block,
+ GenTree* statement)
{
- SplitData tmpState = {0};
- tmpState.root = statement;
+ SplitData tmpState = {nullptr};
+ tmpState.root = statement;
tmpState.continueSubtrees = true;
- tmpState.thisPhase = irt;
- tmpState.block = block;
-
+ tmpState.thisPhase = irt;
+ tmpState.block = block;
+
assert(statement->IsStatement());
- comp->fgWalkTree(&(statement->gtStmt.gtStmtExpr),
- NULL,
- CommaHelper,
- &tmpState);
+ comp->fgWalkTree(&(statement->gtStmt.gtStmtExpr), nullptr, CommaHelper, &tmpState);
#if 0
JITDUMP("resulting block\n");
@@ -522,26 +516,24 @@ void Rationalizer::DuplicateCommaProcessOneTree(Compiler *comp, Rationalizer *ir
#endif
}
-// call args have other pointers to them which must be fixed up if
+// call args have other pointers to them which must be fixed up if
// they are replaced
-void Compiler::fgFixupIfCallArg(ArrayStack<GenTree *> *parentStack,
- GenTree *oldChild,
- GenTree *newChild)
+void Compiler::fgFixupIfCallArg(ArrayStack<GenTree*>* parentStack, GenTree* oldChild, GenTree* newChild)
{
- GenTree *parentCall = isNodeCallArg(parentStack);
- if (!parentCall)
+ GenTree* parentCall = isNodeCallArg(parentStack);
+ if (!parentCall)
{
DBEXEC(VERBOSE, dumpTreeStack(JitTls::GetCompiler(), parentStack));
return;
}
-
+
// we have replaced an arg, so update pointers in argtable
fgFixupArgTabEntryPtr(parentCall, oldChild, newChild);
}
//------------------------------------------------------------------------
-// fgFixupArgTabEntryPtr: Fixup the fgArgTabEntryPtr of parentCall after
-// replacing oldArg with newArg
+// fgFixupArgTabEntryPtr: Fixup the fgArgTabEntryPtr of parentCall after
+// replacing oldArg with newArg
//
// Arguments:
// parentCall - a pointer to the parent call node
@@ -549,9 +541,7 @@ void Compiler::fgFixupIfCallArg(ArrayStack<GenTree *> *parentStack,
// newArg - the replacement argument node
//
-void Compiler::fgFixupArgTabEntryPtr(GenTreePtr parentCall,
- GenTreePtr oldArg,
- GenTreePtr newArg)
+void Compiler::fgFixupArgTabEntryPtr(GenTreePtr parentCall, GenTreePtr oldArg, GenTreePtr newArg)
{
assert(parentCall != nullptr);
assert(oldArg != nullptr);
@@ -562,7 +552,7 @@ void Compiler::fgFixupArgTabEntryPtr(GenTreePtr parentCall,
JITDUMP("old child was :\n");
DISPTREE(oldArg);
-
+
if (oldArg->gtFlags & GTF_LATE_ARG)
{
newArg->gtFlags |= GTF_LATE_ARG;
@@ -593,12 +583,11 @@ void Compiler::fgFixupArgTabEntryPtr(GenTreePtr parentCall,
// Notes:
// These comma forms are produced by earlier transformations.
-bool
-Rationalizer::CommaUselessChild(GenTree **ppTree, Compiler::fgWalkData *data)
+bool Rationalizer::CommaUselessChild(GenTree** ppTree, Compiler::fgWalkData* data)
{
- GenTree *tree = *ppTree;
- GenTree *subChild1, *subChild2;
- SplitData *tmpState = (SplitData *) data->pCallbackData;
+ GenTree* tree = *ppTree;
+ GenTree * subChild1, *subChild2;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
assert(tree->OperGet() == GT_COMMA);
@@ -621,10 +610,8 @@ Rationalizer::CommaUselessChild(GenTree **ppTree, Compiler::fgWalkData *data)
data->parentStack->Pop();
}
- if (subChild1 != nullptr &&
- subChild2 != nullptr &&
- (subChild1->OperIsLocalRead() ||
- (subChild1->OperGet() == GT_NOP && subChild1->gtGetOp1() == nullptr)))
+ if (subChild1 != nullptr && subChild2 != nullptr &&
+ (subChild1->OperIsLocalRead() || (subChild1->OperGet() == GT_NOP && subChild1->gtGetOp1() == nullptr)))
{
JITDUMP("found comma subtree with useless child:\n");
DISPTREE(tree);
@@ -650,7 +637,9 @@ Rationalizer::CommaUselessChild(GenTree **ppTree, Compiler::fgWalkData *data)
// however if that local node is a last use, codegen will not count it as such, and blow up
// so get rid of those here
if (subChild2->IsLocal())
+ {
subChild2->gtBashToNOP();
+ }
}
tmpState->thisPhase->comp->fgFixupIfCallArg(data->parentStack, tree, subChild2);
@@ -661,16 +650,14 @@ Rationalizer::CommaUselessChild(GenTree **ppTree, Compiler::fgWalkData *data)
// Call CommaUselessChild() to turn "comma(lcl x, lcl x)" into "lcl x"
-Compiler::fgWalkResult Rationalizer::CommaHelper(GenTree **ppTree, Compiler::fgWalkData *data)
+Compiler::fgWalkResult Rationalizer::CommaHelper(GenTree** ppTree, Compiler::fgWalkData* data)
{
- GenTree *tree = *ppTree;
+ GenTree* tree = *ppTree;
Compiler* comp = data->compiler;
- SplitData *tmpState = (SplitData *) data->pCallbackData;
-
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
- if (tree->OperGet() == GT_COMMA &&
- CommaUselessChild(ppTree, data))
+ if (tree->OperGet() == GT_COMMA && CommaUselessChild(ppTree, data))
{
return Compiler::WALK_SKIP_SUBTREES;
}
@@ -695,7 +682,7 @@ Location Rationalizer::TreeTransformRationalization(Location loc)
if (statement->gtStmtIsTopLevel())
{
- comp->compCurBB = loc.block;
+ comp->compCurBB = loc.block;
comp->compCurStmt = statement;
while (tree->OperGet() == GT_COMMA)
@@ -722,16 +709,13 @@ Location Rationalizer::TreeTransformRationalization(Location loc)
}
}
- SplitData tmpState = {0};
- tmpState.root = statement;
+ SplitData tmpState = {nullptr};
+ tmpState.root = statement;
tmpState.continueSubtrees = true;
- tmpState.thisPhase = this;
- tmpState.block = loc.block;
+ tmpState.thisPhase = this;
+ tmpState.block = loc.block;
- comp->fgWalkTree(&(statement->gtStmt.gtStmtExpr),
- SimpleTransformHelper,
- NULL,
- &tmpState);
+ comp->fgWalkTree(&(statement->gtStmt.gtStmtExpr), SimpleTransformHelper, nullptr, &tmpState);
tree = statement->gtStmt.gtStmtExpr;
if (tree->OperIsLocalRead())
@@ -758,31 +742,31 @@ Location Rationalizer::TreeTransformRationalization(Location loc)
// The degenerate case is a single comma but (?????)
//
// ppTree : pointer to a link to a comma node
-// discard: true if any value produced by the node will ultimately be discarded.
-// In a tree of commas with some non-comma expressions hanging off the terminal commas,
-// ultimately all results of those expressions will be discarded except for
+// discard: true if any value produced by the node will ultimately be discarded.
+// In a tree of commas with some non-comma expressions hanging off the terminal commas,
+// ultimately all results of those expressions will be discarded except for
// the expression reached by following the second link of of all commas on a path from the base
// ex: in "comma(comma(exp1, exp2), comma(exp3, comma(exp4, exp5)))"
// the only expression whose value makes it to the root of the comma tree is exp5
// nested: true if there is another comma as the parent
-//
-void Rationalizer::RecursiveRewriteComma(GenTree **ppTree, Compiler::fgWalkData *data, bool discard, bool nested)
+//
+void Rationalizer::RecursiveRewriteComma(GenTree** ppTree, Compiler::fgWalkData* data, bool discard, bool nested)
{
GenTree* comma = *ppTree;
assert(comma->gtOper == GT_COMMA);
- GenTreePtr op2 = comma->gtOp.gtOp2;
- GenTreePtr op1 = comma->gtOp.gtOp1;
- SplitData *tmpState = (SplitData *) data->pCallbackData;
- GenTreePtr stmt = tmpState->root;
+ GenTreePtr op2 = comma->gtOp.gtOp2;
+ GenTreePtr op1 = comma->gtOp.gtOp1;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
+ GenTreePtr stmt = tmpState->root;
Compiler* comp = data->compiler;
JITDUMP("recursive rewrite comma :\n");
DISPTREE(comma);
JITDUMP("\n");
-
+
if (op1->gtOper == GT_COMMA)
{
- // embed all of the expressions reachable from op1.
+ // embed all of the expressions reachable from op1.
// Since they feed into op1, their results are discarded (not used up the tree)
RecursiveRewriteComma(&(comma->gtOp.gtOp1), data, true, true);
}
@@ -802,60 +786,65 @@ void Rationalizer::RecursiveRewriteComma(GenTree **ppTree, Compiler::fgWalkData
GenTree* commaNext = comma->gtNext;
op1 = comma->gtOp.gtOp1;
-
+
// op1 of the comma will now be a new statement, either top-level or embedded
// depending on the execution order.
// The comma is simply eliminated.
GenTreePtr newStmt = comp->fgMakeEmbeddedStmt(tmpState->block, op1, tmpState->root);
if (!nested)
+ {
comp->fgFixupIfCallArg(data->parentStack, comma, *ppTree);
-
- JITDUMP("Split comma into %s statements. New statement:\n", (newStmt->gtFlags & GTF_STMT_TOP_LEVEL) ? "top-level" : "embedded");
+ }
+
+ JITDUMP("Split comma into %s statements. New statement:\n",
+ (newStmt->gtFlags & GTF_STMT_TOP_LEVEL) ? "top-level" : "embedded");
DISPTREE(newStmt);
JITDUMP("\nOld statement:\n");
DISPTREE(stmt);
JITDUMP("\n");
- (void) ((Rationalizer *)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
+ (void)((Rationalizer*)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
// In a sense, assignment nodes have two destinations: 1) whatever they are writing to
// and 2) they also produce the value that was written so their parent can consume it.
- // In the case where the parent is going to consume the value,
+ // In the case where the parent is going to consume the value,
// insert the assign as an embedded statement and clone the destination to replace itself in the tree.
-
+
if (op2->OperGet() == GT_ASG && !discard)
{
JITDUMP("op2 of comma was an assignment, doing additional work\n");
assert(op2->gtNext);
- GenTree* dst = op2->gtOp.gtOp1;
- GenTree* newSrc = nullptr;
+ GenTree* dst = op2->gtOp.gtOp1;
+ GenTree* newSrc = nullptr;
GenTreeStmt* newStmt;
newStmt = comp->fgMakeEmbeddedStmt(tmpState->block, op2, tmpState->root);
- // can this happen ?
+ // can this happen ?
assert(dst->OperIsLocal());
-
+
newSrc = comp->gtClone(dst);
newSrc->gtFlags &= ~GTF_VAR_DEF;
*ppTree = newSrc;
comp->fgInsertTreeInListBefore(newSrc, commaNext, stmt->AsStmt());
-
- JITDUMP("Split comma into %s statements. New statement:\n", (newStmt->gtFlags & GTF_STMT_TOP_LEVEL) ? "top-level":"embedded");
+ JITDUMP("Split comma into %s statements. New statement:\n",
+ (newStmt->gtFlags & GTF_STMT_TOP_LEVEL) ? "top-level" : "embedded");
DISPTREE(newStmt);
JITDUMP("\nOld statement:\n");
DISPTREE(stmt);
JITDUMP("\n");
- (void) ((Rationalizer *)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
+ (void)((Rationalizer*)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
if (!nested)
+ {
comp->fgFixupIfCallArg(data->parentStack, comma, newSrc);
+ }
- (void) ((Rationalizer *)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
+ (void)((Rationalizer*)tmpState->thisPhase)->TreeTransformRationalization(Location(newStmt, tmpState->block));
return;
}
@@ -879,12 +868,12 @@ void Rationalizer::RecursiveRewriteComma(GenTree **ppTree, Compiler::fgWalkData
// Notes:
// If op1 of the comma is a (unused) lclVar, it is deleted by CommmaUselessChild()
-void Rationalizer::RewriteOneComma(GenTree **ppTree, Compiler::fgWalkData *data)
+void Rationalizer::RewriteOneComma(GenTree** ppTree, Compiler::fgWalkData* data)
{
GenTreePtr comma = *ppTree;
Compiler* comp = data->compiler;
- SplitData* tmpState = (SplitData *) data->pCallbackData;
- GenTreePtr stmt = tmpState->root;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
+ GenTreePtr stmt = tmpState->root;
assert(comma->gtOper == GT_COMMA);
GenTreePtr op2 = comma->gtOp.gtOp2;
@@ -892,15 +881,15 @@ void Rationalizer::RewriteOneComma(GenTree **ppTree, Compiler::fgWalkData *data)
// Remove the comma from the tree; we know it has non-null gtPrev, otherwise
// we would have handled it as a top-level comma.
- assert (comma->gtPrev != nullptr);
+ assert(comma->gtPrev != nullptr);
JITDUMP("Rationalizing comma:");
DISPNODE(comma);
if (!CommaUselessChild(ppTree, data))
{
- // Set 'discard' to true when the comma tree does not return a value
+ // Set 'discard' to true when the comma tree does not return a value
// If the comma's type is TYP_VOID then 'discard' is set to true
- // otherwise 'discard' is set to false
+ // otherwise 'discard' is set to false
bool discard = (comma->TypeGet() == TYP_VOID);
RecursiveRewriteComma(ppTree, data, discard, false);
}
@@ -919,12 +908,12 @@ void Rationalizer::RewriteOneComma(GenTree **ppTree, Compiler::fgWalkData *data)
// be required.
//
void Rationalizer::RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data)
-{
+{
#ifdef FEATURE_SIMD
Compiler* comp = data->compiler;
// No lowering is needed for non-SIMD nodes, so early out if featureSIMD is not enabled.
- if (!comp->featureSIMD)
+ if (!comp->featureSIMD)
{
return;
}
@@ -939,46 +928,47 @@ void Rationalizer::RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
return;
}
- GenTree* dst = dstAddr->gtGetOp1();
+ GenTree* dst = dstAddr->gtGetOp1();
var_types baseType = comp->getBaseTypeOfSIMDLocal(dst);
if (baseType == TYP_UNKNOWN)
{
return;
}
- CORINFO_CLASS_HANDLE typeHnd = comp->lvaTable[dst->AsLclVarCommon()->gtLclNum].lvVerTypeInfo.GetClassHandle();
- unsigned simdLocalSize = comp->getSIMDTypeSizeInBytes(typeHnd);
+ CORINFO_CLASS_HANDLE typeHnd = comp->lvaTable[dst->AsLclVarCommon()->gtLclNum].lvVerTypeInfo.GetClassHandle();
+ unsigned simdLocalSize = comp->getSIMDTypeSizeInBytes(typeHnd);
JITDUMP("Rewriting SIMD InitBlk\n");
DISPTREE(tree);
- // Get rid of the parent node in GT_ADDR(GT_LCL_VAR)
+ // Get rid of the parent node in GT_ADDR(GT_LCL_VAR)
comp->fgSnipInnerNode(dstAddr);
- assert((dst->gtFlags &GTF_VAR_USEASG) == 0);
+ assert((dst->gtFlags & GTF_VAR_USEASG) == 0);
// Remove 'size' from execution order
// There are currently only three sizes supported: 8 bytes, 16 bytes or the vector register length.
GenTreeIntConCommon* sizeNode = tree->Size()->AsIntConCommon();
- unsigned int size = (unsigned int) roundUp(sizeNode->IconValue(), TARGET_POINTER_SIZE);
- var_types simdType = comp->getSIMDTypeForSize(size);
+ unsigned int size = (unsigned int)roundUp(sizeNode->IconValue(), TARGET_POINTER_SIZE);
+ var_types simdType = comp->getSIMDTypeForSize(size);
assert(roundUp(simdLocalSize, TARGET_POINTER_SIZE) == size);
comp->fgSnipInnerNode(sizeNode);
- GenTree* initVal = tree->InitVal();
- GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, (unsigned)sizeNode->IconValue());
+ GenTree* initVal = tree->InitVal();
+ GenTreeSIMD* simdTree = new (comp, GT_SIMD)
+ GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, (unsigned)sizeNode->IconValue());
dst->SetOper(GT_STORE_LCL_VAR);
- dst->gtType = simdType;
+ dst->gtType = simdType;
dst->gtOp.gtOp1 = simdTree;
dst->gtFlags |= (simdTree->gtFlags & GTF_ALL_EFFECT);
- initVal->gtNext = simdTree;
+ initVal->gtNext = simdTree;
simdTree->gtPrev = initVal;
simdTree->gtNext = dst;
- dst->gtPrev = simdTree;
+ dst->gtPrev = simdTree;
GenTree* nextNode = tree->gtNext;
- dst->gtNext = nextNode;
+ dst->gtNext = nextNode;
if (nextNode != nullptr)
{
nextNode->gtPrev = dst;
@@ -1004,7 +994,7 @@ void Rationalizer::RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
//
// If either the source or the dst are known to be SIMD (a lclVar or SIMD intrinsic),
// get the simdType (TYP_DOUBLE or a SIMD type for SSE2) from the size of the SIMD node.
-//
+//
// For the source:
// - If it is a SIMD intrinsic or a lvSIMDType lclVar, change the node type to simdType.
// - Otherwise, add a GT_IND of simdType.
@@ -1016,31 +1006,31 @@ void Rationalizer::RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
// be required.
//
void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data)
-{
+{
#ifdef FEATURE_SIMD
Compiler* comp = data->compiler;
// No need to transofrm non-SIMD nodes, if featureSIMD is not enabled.
- if (!comp->featureSIMD)
+ if (!comp->featureSIMD)
{
return;
}
// See if this is a SIMD copyBlk
- GenTreeCpBlk* tree = (*ppTree)->AsCpBlk();
- genTreeOps oper = GT_NONE;
- GenTreePtr dstAddr = tree->Dest();
- GenTree* srcAddr = tree->Source();
-
+ GenTreeCpBlk* tree = (*ppTree)->AsCpBlk();
+ genTreeOps oper = GT_NONE;
+ GenTreePtr dstAddr = tree->Dest();
+ GenTree* srcAddr = tree->Source();
+
// Do not transform if neither src or dst is known to be a SIMD type.
// If src tree type is something we cannot reason but if dst is known to be of a SIMD type
// we will treat src tree as a SIMD type and vice versa.
if (!(comp->isAddrOfSIMDType(srcAddr) || comp->isAddrOfSIMDType(dstAddr)))
{
return;
- }
+ }
- // At this point it is known to be a copyblk of SIMD vectors and we can
+ // At this point it is known to be a copyblk of SIMD vectors and we can
// start transforming the original tree. Prior to this point do not perform
// any modifications to the original tree.
JITDUMP("\nRewriting SIMD CopyBlk\n");
@@ -1049,7 +1039,7 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
// Remove 'size' from execution order
// There are currently only three sizes supported: 8 bytes, 12 bytes, 16 bytes or the vector register length.
GenTreeIntConCommon* sizeNode = tree->Size()->AsIntConCommon();
- var_types simdType = comp->getSIMDTypeForSize((unsigned int) sizeNode->IconValue());
+ var_types simdType = comp->getSIMDTypeForSize((unsigned int)sizeNode->IconValue());
comp->fgSnipInnerNode(sizeNode);
// Is destination a lclVar which is not an arg?
@@ -1059,9 +1049,9 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
{
// Get rid of parent node in GT_ADDR(GT_LCL_VAR)
comp->fgSnipInnerNode(dstAddr);
- simdDst = dstAddr->gtGetOp1();
+ simdDst = dstAddr->gtGetOp1();
simdDst->gtType = simdType;
- oper = GT_STORE_LCL_VAR;
+ oper = GT_STORE_LCL_VAR;
// For structs that are padded (e.g. Vector3f, Vector3i), the morpher will have marked them
// as GTF_VAR_USEASG. Unmark them.
@@ -1071,7 +1061,7 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
{
// Address of a non-local var
simdDst = dstAddr;
- oper = GT_STOREIND;
+ oper = GT_STOREIND;
}
// Src: Get rid of parent node of GT_ADDR(..) if its child happens to be of a SIMD type.
@@ -1093,9 +1083,9 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
GenTree* indir = comp->gtNewOperNode(GT_IND, simdType, srcAddr);
indir->SetCosts(IND_COST_EX, 2);
srcAddr->InsertAfterSelf(indir);
-
+
tree->gtGetOp1()->gtOp.gtOp2 = indir;
- simdSrc = indir;
+ simdSrc = indir;
}
simdSrc->gtType = simdType;
@@ -1107,8 +1097,8 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
assert(simdDst != nullptr);
assert(simdSrc != nullptr);
- GenTree *newTree = nullptr;
- GenTree* list = tree->gtGetOp1();
+ GenTree* newTree = nullptr;
+ GenTree* list = tree->gtGetOp1();
if (oper == GT_STORE_LCL_VAR)
{
// get rid of the list node
@@ -1116,11 +1106,11 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
newTree = simdDst;
newTree->SetOper(oper);
- newTree->gtOp.gtOp1 = simdSrc;
- newTree->gtType = simdType;
+ newTree->gtOp.gtOp1 = simdSrc;
+ newTree->gtType = simdType;
newTree->gtFlags |= (simdSrc->gtFlags & GTF_ALL_EFFECT);
simdSrc->gtNext = newTree;
- newTree->gtPrev = simdSrc;
+ newTree->gtPrev = simdSrc;
}
else
{
@@ -1132,11 +1122,11 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
newTree->gtFlags |= (simdSrc->gtFlags & GTF_ALL_EFFECT);
newTree->gtOp.gtOp1 = simdDst;
newTree->gtOp.gtOp2 = simdSrc;
- }
+ }
assert(newTree != nullptr);
GenTree* nextNode = tree->gtNext;
- newTree->gtNext = nextNode;
+ newTree->gtNext = nextNode;
if (nextNode != nullptr)
{
nextNode->gtPrev = newTree;
@@ -1163,10 +1153,10 @@ void Rationalizer::RewriteCopyBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data
// be required.
//
void Rationalizer::RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
-{
+{
#ifdef FEATURE_SIMD
- Compiler* comp = data->compiler;
- GenTreeObj* obj = (*ppTree)->AsObj();
+ Compiler* comp = data->compiler;
+ GenTreeObj* obj = (*ppTree)->AsObj();
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
// For UNIX struct passing, we can have Obj nodes for arguments.
@@ -1203,7 +1193,7 @@ void Rationalizer::RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
// Vector4.Dot(default(Vector4) * 2f, Vector4.One);
if (obj->gtNext == nullptr)
{
- SplitData *tmpState = (SplitData *) data->pCallbackData;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
comp->fgSnipNode(tmpState->root->AsStmt(), obj);
}
else
@@ -1215,7 +1205,7 @@ void Rationalizer::RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
*ppTree = src;
}
- else
+ else
{
obj->SetOper(GT_IND);
obj->gtType = simdType;
@@ -1240,25 +1230,26 @@ void Rationalizer::RewriteObj(GenTreePtr* ppTree, Compiler::fgWalkData* data)
// None.
//
-void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* data,
- CORINFO_METHOD_HANDLE callHnd,
+void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree,
+ Compiler::fgWalkData* data,
+ CORINFO_METHOD_HANDLE callHnd,
#ifdef FEATURE_READYTORUN_COMPILER
- CORINFO_CONST_LOOKUP entryPoint,
+ CORINFO_CONST_LOOKUP entryPoint,
#endif
- GenTreeArgList* args)
+ GenTreeArgList* args)
{
- GenTreePtr tree = *ppTree;
- Compiler* comp = data->compiler;
- SplitData* tmpState = (SplitData *)data->pCallbackData;
- GenTreePtr root = tmpState->root;
+ GenTreePtr tree = *ppTree;
+ Compiler* comp = data->compiler;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
+ GenTreePtr root = tmpState->root;
GenTreePtr treeFirstNode = comp->fgGetFirstNode(tree);
- GenTreePtr treeLastNode = tree;
- GenTreePtr treePrevNode = treeFirstNode->gtPrev;
- GenTreePtr treeNextNode = treeLastNode->gtNext;
+ GenTreePtr treeLastNode = tree;
+ GenTreePtr treePrevNode = treeFirstNode->gtPrev;
+ GenTreePtr treeNextNode = treeLastNode->gtNext;
// Create the call node
GenTreeCall* call = comp->gtNewCallNode(CT_USER_FUNC, callHnd, tree->gtType, args);
- call = comp->fgMorphArgs(call);
+ call = comp->fgMorphArgs(call);
call->CopyCosts(tree);
#ifdef FEATURE_READYTORUN_COMPILER
call->gtCall.setEntryPoint(entryPoint);
@@ -1266,7 +1257,7 @@ void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* d
// Replace "tree" with "call"
*ppTree = call;
-
+
// Rebuild the evaluation order.
comp->gtSetStmtInfo(root);
@@ -1276,13 +1267,13 @@ void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* d
// Restore linear-order Prev and Next for "call".
if (treePrevNode)
{
- treeFirstNode = comp->fgGetFirstNode(call);
+ treeFirstNode = comp->fgGetFirstNode(call);
treeFirstNode->gtPrev = treePrevNode;
- treePrevNode->gtNext = treeFirstNode;
+ treePrevNode->gtNext = treeFirstNode;
}
else
{
- // Update the linear oder start of "root" if treeFirstNode
+ // Update the linear oder start of "root" if treeFirstNode
// appears to have replaced the original first node.
assert(treeFirstNode == root->gtStmt.gtStmtList);
root->gtStmt.gtStmtList = comp->fgGetFirstNode(call);
@@ -1290,18 +1281,18 @@ void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* d
if (treeNextNode)
{
- treeLastNode = call;
+ treeLastNode = call;
treeLastNode->gtNext = treeNextNode;
treeNextNode->gtPrev = treeLastNode;
}
-
+
comp->fgFixupIfCallArg(data->parentStack, tree, call);
// Propagate flags of "call" to its parents.
// 0 is current node, so start at 1
for (int i = 1; i < data->parentStack->Height(); i++)
{
- GenTree *node = data->parentStack->Index(i);
+ GenTree* node = data->parentStack->Index(i);
node->gtFlags |= GTF_CALL;
node->gtFlags |= call->gtFlags & GTF_ALL_EFFECT;
}
@@ -1324,15 +1315,15 @@ void Rationalizer::RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* d
// Return Value:
// None.
//
-// Some intrinsics, such as operation Sqrt, are rewritten back to calls, and some are not.
+// Some intrinsics, such as operation Sqrt, are rewritten back to calls, and some are not.
// The ones that are not being rewritten here must be handled in Codegen.
// Conceptually, the lower is the right place to do the rewrite. Keeping it in rationalization is
// mainly for throughput issue.
void Rationalizer::RewriteIntrinsicAsUserCall(GenTreePtr* ppTree, Compiler::fgWalkData* data)
-{
- GenTreePtr tree = *ppTree;
- Compiler* comp = data->compiler;
+{
+ GenTreePtr tree = *ppTree;
+ Compiler* comp = data->compiler;
GenTreeArgList* args;
assert(tree->OperGet() == GT_INTRINSIC);
@@ -1346,31 +1337,30 @@ void Rationalizer::RewriteIntrinsicAsUserCall(GenTreePtr* ppTree, Compiler::fgWa
args = comp->gtNewArgList(tree->gtOp.gtOp1, tree->gtOp.gtOp2);
}
- RewriteNodeAsCall(ppTree, data,
- tree->gtIntrinsic.gtMethodHandle,
+ RewriteNodeAsCall(ppTree, data, tree->gtIntrinsic.gtMethodHandle,
#ifdef FEATURE_READYTORUN_COMPILER
- tree->gtIntrinsic.gtEntryPoint,
+ tree->gtIntrinsic.gtEntryPoint,
#endif
- args);
+ args);
}
// tree walker callback function that rewrites ASG and ADDR nodes
-Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Compiler::fgWalkData *data)
+Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree** ppTree, Compiler::fgWalkData* data)
{
- GenTree *tree = *ppTree;
- Compiler* comp = data->compiler;
- SplitData *tmpState = (SplitData *) data->pCallbackData;
+ GenTree* tree = *ppTree;
+ Compiler* comp = data->compiler;
+ SplitData* tmpState = (SplitData*)data->pCallbackData;
while (tree->OperGet() == GT_COMMA)
{
RewriteOneComma(ppTree, data);
tree = *ppTree;
}
-
+
if (tree->OperIsAssignment())
{
- GenTree *lhs = tree->gtGetOp1();
- GenTree *dataSrc = tree->gtGetOp2();
+ GenTree* lhs = tree->gtGetOp1();
+ GenTree* dataSrc = tree->gtGetOp2();
// the other assign ops should have already been rewritten to ASG
assert(tree->OperGet() == GT_ASG);
@@ -1382,18 +1372,22 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
}
switch (lhs->OperGet())
{
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_REG_VAR:
- case GT_PHI_ARG:
- MorphAsgIntoStoreLcl(tmpState->root->AsStmt(), tree);
- tree->gtFlags &= ~GTF_REVERSE_OPS;
- break;
-
- case GT_IND:
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_REG_VAR:
+ case GT_PHI_ARG:
+ MorphAsgIntoStoreLcl(tmpState->root->AsStmt(), tree);
+ tree->gtFlags &= ~GTF_REVERSE_OPS;
+ break;
+
+ case GT_IND:
{
- GenTreeStoreInd *store = new(comp, GT_STOREIND) GenTreeStoreInd(lhs->TypeGet(), lhs->gtGetOp1(), dataSrc);
- if (tree->IsReverseOp()) store->gtFlags |= GTF_REVERSE_OPS;
+ GenTreeStoreInd* store =
+ new (comp, GT_STOREIND) GenTreeStoreInd(lhs->TypeGet(), lhs->gtGetOp1(), dataSrc);
+ if (tree->IsReverseOp())
+ {
+ store->gtFlags |= GTF_REVERSE_OPS;
+ }
store->gtFlags |= (lhs->gtFlags & GTF_IND_FLAGS);
store->CopyCosts(tree);
@@ -1402,17 +1396,19 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
JITDUMP("\n");
// Snip out the old GT_IND node
- GenTreePtr indPrev = lhs->gtPrev;
- indPrev->gtNext = lhs->gtNext;
+ GenTreePtr indPrev = lhs->gtPrev;
+ indPrev->gtNext = lhs->gtNext;
indPrev->gtNext->gtPrev = indPrev;
// Replace "tree" with "store"
- *ppTree = store;
+ *ppTree = store;
store->gtNext = tree->gtNext;
store->gtPrev = tree->gtPrev;
if (store->gtNext != nullptr)
+ {
store->gtNext->gtPrev = store;
- assert (store->gtPrev != nullptr);
+ }
+ assert(store->gtPrev != nullptr);
store->gtPrev->gtNext = store;
// Since "tree" is replaced with "store", pop "tree" node (i.e the current node)
@@ -1427,10 +1423,10 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
}
break;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
{
- lhs->gtOper = GT_CLS_VAR_ADDR;
- lhs->gtType = TYP_BYREF;
+ lhs->gtOper = GT_CLS_VAR_ADDR;
+ lhs->gtType = TYP_BYREF;
tree->gtOper = GT_STOREIND;
JITDUMP("Rewriting GT_ASG(GT_CLS_VAR, X) to GT_STOREIND(GT_CLS_VAR_ADDR, X):\n");
@@ -1439,9 +1435,9 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
}
break;
- default:
- assert(!"unhandled op\n");
- break;
+ default:
+ assert(!"unhandled op\n");
+ break;
}
}
else if (tree->OperGet() == GT_BOX)
@@ -1457,7 +1453,7 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
}
else if (tree->gtOper == GT_ADDR)
{
- GenTree *child = tree->gtOp.gtOp1;
+ GenTree* child = tree->gtOp.gtOp1;
if (child->IsLocal())
{
// We are changing the child from GT_LCL_VAR TO GT_LCL_VAR_ADDR.
@@ -1502,11 +1498,10 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
DISPTREE(*ppTree);
JITDUMP("\n");
}
- else if (tree->gtOper == GT_NOP
- && tree->gtOp.gtOp1)
+ else if (tree->gtOper == GT_NOP && tree->gtOp.gtOp1)
{
// fgmorph sometimes inserts NOP nodes between def and use
- // supposedly 'to prevent constant folding'
+ // supposedly 'to prevent constant folding'
Compiler::fgSnipNode(tmpState->root->AsStmt(), tree);
*ppTree = tree->gtOp.gtOp1;
comp->fgFixupIfCallArg(data->parentStack, tree, *ppTree);
@@ -1526,7 +1521,7 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
{
// rewrite "clsvar" as [&clsvar] so indirs are explicit
tree->gtOper = GT_CLS_VAR_ADDR;
- GenTree *ind = comp->gtNewOperNode(GT_IND, tree->TypeGet(), tree);
+ GenTree* ind = comp->gtNewOperNode(GT_IND, tree->TypeGet(), tree);
tree->gtType = TYP_BYREF;
ind->CopyCosts(tree);
tree->InsertAfterSelf(ind, tmpState->root->AsStmt());
@@ -1538,14 +1533,14 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
JITDUMP("\n");
}
#endif // _TARGET_XARCH_
- else if ((tree->gtOper == GT_INTRINSIC) &&
+ else if ((tree->gtOper == GT_INTRINSIC) &&
Compiler::IsIntrinsicImplementedByUserCall(tree->gtIntrinsic.gtIntrinsicId))
{
RewriteIntrinsicAsUserCall(ppTree, data);
}
#ifdef FEATURE_SIMD
else
- {
+ {
assert(tree->gtOper != GT_INTRINSIC || Compiler::IsTargetIntrinsic(tree->gtIntrinsic.gtIntrinsicId));
// Transform the treeNode types for SIMD nodes.
@@ -1553,48 +1548,48 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
// set the actual type according to its size (which may be less than a full
// vector register).
unsigned simdSize = 0;
- switch(tree->gtOper)
+ switch (tree->gtOper)
{
- default:
- // Nothing to do for most nodes.
- break;
-
- case GT_INITBLK:
- RewriteInitBlk(ppTree, data);
- break;
-
- case GT_COPYBLK:
- RewriteCopyBlk(ppTree, data);
- break;
-
- case GT_OBJ:
- RewriteObj(ppTree, data);
- break;
-
- case GT_LCL_FLD:
- case GT_STORE_LCL_FLD:
- FixupIfSIMDLocal(comp, tree->AsLclVarCommon());
- break;
-
- case GT_STOREIND:
- case GT_IND:
- if (tree->gtType == TYP_STRUCT)
- {
- GenTree* addr = tree->AsIndir()->Addr();
- assert(addr->OperIsLocal() && addr->TypeGet() == TYP_BYREF);
- LclVarDsc* varDsc = &(comp->lvaTable[addr->AsLclVarCommon()->gtLclNum]);
- assert(varDsc->lvSIMDType);
- simdSize = (unsigned int) roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE);
- tree->gtType = comp->getSIMDTypeForSize(simdSize);
- }
- break;
+ default:
+ // Nothing to do for most nodes.
+ break;
+
+ case GT_INITBLK:
+ RewriteInitBlk(ppTree, data);
+ break;
+
+ case GT_COPYBLK:
+ RewriteCopyBlk(ppTree, data);
+ break;
+
+ case GT_OBJ:
+ RewriteObj(ppTree, data);
+ break;
+
+ case GT_LCL_FLD:
+ case GT_STORE_LCL_FLD:
+ FixupIfSIMDLocal(comp, tree->AsLclVarCommon());
+ break;
+
+ case GT_STOREIND:
+ case GT_IND:
+ if (tree->gtType == TYP_STRUCT)
+ {
+ GenTree* addr = tree->AsIndir()->Addr();
+ assert(addr->OperIsLocal() && addr->TypeGet() == TYP_BYREF);
+ LclVarDsc* varDsc = &(comp->lvaTable[addr->AsLclVarCommon()->gtLclNum]);
+ assert(varDsc->lvSIMDType);
+ simdSize = (unsigned int)roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE);
+ tree->gtType = comp->getSIMDTypeForSize(simdSize);
+ }
+ break;
- case GT_SIMD:
+ case GT_SIMD:
{
noway_assert(comp->featureSIMD);
GenTreeSIMD* simdTree = (*ppTree)->AsSIMD();
- simdSize = simdTree->gtSIMDSize;
- var_types simdType = comp->getSIMDTypeForSize(simdSize);
+ simdSize = simdTree->gtSIMDSize;
+ var_types simdType = comp->getSIMDTypeForSize(simdSize);
// TODO-Cleanup: This is no-longer required once we plumb SIMD types thru front-end
if (simdTree->gtType == TYP_I_IMPL && simdTree->gtSIMDSize == TARGET_POINTER_SIZE)
{
@@ -1613,8 +1608,10 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
// Rewrite this as an explicit load.
JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n");
unsigned int baseTypeSize = genTypeSize(simdTree->gtSIMDBaseType);
- GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdTree->gtOp1, simdTree->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems));
- GenTree *ind = comp->gtNewOperNode(GT_IND, simdType, address);
+ GenTree* address =
+ new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdTree->gtOp1, simdTree->gtOp2, baseTypeSize,
+ offsetof(CORINFO_Array, u1Elems));
+ GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address);
address->CopyCosts(simdTree);
ind->CopyCosts(simdTree);
@@ -1625,14 +1622,14 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
// We don't have any top-level GT_SIMD nodes.
assert(addressPrev != nullptr);
- address->gtPrev = addressPrev;
+ address->gtPrev = addressPrev;
addressPrev->gtNext = address;
- ind->gtPrev = address;
+ ind->gtPrev = address;
address->gtNext = ind;
indNext->gtPrev = ind;
- ind->gtNext = indNext;
+ ind->gtNext = indNext;
// Replace "simdTree" with "ind"
*ppTree = ind;
@@ -1640,7 +1637,7 @@ Compiler::fgWalkResult Rationalizer::SimpleTransformHelper(GenTree **ppTree, Com
DISPTREE(tmpState->root);
JITDUMP("\n");
}
- else
+ else
{
// This code depends on the fact that NONE of the SIMD intrinsics take vector operands
// of a different width. If that assumption changes, we will EITHER have to make these type
@@ -1694,47 +1691,45 @@ void Rationalizer::FixupIfSIMDLocal(Compiler* comp, GenTreeLclVarCommon* tree)
// Don't mark byref of SIMD vector as a SIMD type.
// Note that struct args though marked as lvIsSIMD=true,
- // the tree node representing such an arg should not be
+ // the tree node representing such an arg should not be
// marked as a SIMD type, since it is a byref of a SIMD type.
if (!varTypeIsSIMD(varDsc))
{
return;
}
- switch(tree->OperGet())
- {
- default:
- // Nothing to do for most tree nodes.
- break;
-
- case GT_LCL_FLD:
- // We may see a lclFld used for pointer-sized structs that have been morphed, in which
- // case we can change it to GT_LCL_VAR.
- // However, we may also see a lclFld with FieldSeqStore::NotAField() for structs that can't
- // be analyzed, e.g. those with overlapping fields such as the IL implementation of Vector<T>.
- if ((tree->AsLclFld()->gtFieldSeq == FieldSeqStore::NotAField()) &&
- (tree->AsLclFld()->gtLclOffs == 0) &&
- (tree->gtType == TYP_I_IMPL) &&
- (varDsc->lvExactSize == TARGET_POINTER_SIZE))
- {
- tree->SetOper(GT_LCL_VAR);
+ switch (tree->OperGet())
+ {
+ default:
+ // Nothing to do for most tree nodes.
+ break;
+
+ case GT_LCL_FLD:
+ // We may see a lclFld used for pointer-sized structs that have been morphed, in which
+ // case we can change it to GT_LCL_VAR.
+ // However, we may also see a lclFld with FieldSeqStore::NotAField() for structs that can't
+ // be analyzed, e.g. those with overlapping fields such as the IL implementation of Vector<T>.
+ if ((tree->AsLclFld()->gtFieldSeq == FieldSeqStore::NotAField()) && (tree->AsLclFld()->gtLclOffs == 0) &&
+ (tree->gtType == TYP_I_IMPL) && (varDsc->lvExactSize == TARGET_POINTER_SIZE))
+ {
+ tree->SetOper(GT_LCL_VAR);
+ tree->gtFlags &= ~(GTF_VAR_USEASG);
+ }
+ else
+ {
+ // If we access a field of a SIMD lclVar via GT_LCL_FLD, it cannot have been
+ // independently promoted.
+ assert(comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT);
+ return;
+ }
+ break;
+ case GT_STORE_LCL_FLD:
+ assert(tree->gtType == TYP_I_IMPL);
+ tree->SetOper(GT_STORE_LCL_VAR);
tree->gtFlags &= ~(GTF_VAR_USEASG);
- }
- else
- {
- // If we access a field of a SIMD lclVar via GT_LCL_FLD, it cannot have been
- // independently promoted.
- assert(comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT);
- return;
- }
- break;
- case GT_STORE_LCL_FLD:
- assert(tree->gtType == TYP_I_IMPL);
- tree->SetOper(GT_STORE_LCL_VAR);
- tree->gtFlags &= ~(GTF_VAR_USEASG);
- break;
- }
- unsigned simdSize = (unsigned int) roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE);
- tree->gtType = comp->getSIMDTypeForSize(simdSize);
+ break;
+ }
+ unsigned simdSize = (unsigned int)roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE);
+ tree->gtType = comp->getSIMDTypeForSize(simdSize);
#endif // FEATURE_SIMD
}
@@ -1745,7 +1740,7 @@ void Rationalizer::ValidateStatement(Location loc)
ValidateStatement(loc.tree, loc.block);
}
-void Rationalizer::ValidateStatement(GenTree *tree, BasicBlock *block)
+void Rationalizer::ValidateStatement(GenTree* tree, BasicBlock* block)
{
assert(tree->gtOper == GT_STMT);
DBEXEC(TRUE, JitTls::GetCompiler()->fgDebugCheckNodeLinks(block, tree));
@@ -1754,18 +1749,14 @@ void Rationalizer::ValidateStatement(GenTree *tree, BasicBlock *block)
// sanity checks that apply to all kinds of IR
void Rationalizer::SanityCheck()
{
- BasicBlock * block;
+ BasicBlock* block;
foreach_block(comp, block)
{
- for (GenTree* statement = block->bbTreeList;
- statement != nullptr;
- statement = statement->gtNext)
+ for (GenTree* statement = block->bbTreeList; statement != nullptr; statement = statement->gtNext)
{
ValidateStatement(statement, block);
- for (GenTree *tree = statement->gtStmt.gtStmtList;
- tree;
- tree = tree->gtNext)
+ for (GenTree* tree = statement->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
// QMARK nodes should have been removed before this phase.
assert(tree->OperGet() != GT_QMARK);
@@ -1799,8 +1790,8 @@ void Rationalizer::DoPhase()
{
DBEXEC(TRUE, SanityCheck());
- comp->compCurBB = NULL;
- comp->fgOrder = Compiler::FGOrderLinear;
+ comp->compCurBB = nullptr;
+ comp->fgOrder = Compiler::FGOrderLinear;
// break up the trees at side effects, etc
Location loc(comp->fgFirstBB);
diff --git a/src/jit/rationalize.h b/src/jit/rationalize.h
index e1e7df140c..fe8118b429 100644
--- a/src/jit/rationalize.h
+++ b/src/jit/rationalize.h
@@ -11,10 +11,12 @@
class Location
{
public:
- GenTree* tree;
+ GenTree* tree;
BasicBlock* block;
- Location() : tree(nullptr), block(nullptr) {}
+ Location() : tree(nullptr), block(nullptr)
+ {
+ }
Location(GenTree* t, BasicBlock* b) : tree(t), block(b)
{
@@ -62,7 +64,7 @@ public:
void Reset(Compiler* comp)
{
block = comp->fgFirstBB;
- tree = nullptr;
+ tree = nullptr;
Initialize();
}
@@ -77,7 +79,7 @@ private:
if (block == nullptr)
{
block = nullptr;
- tree = nullptr;
+ tree = nullptr;
break;
}
tree = block->bbTreeList;
@@ -92,7 +94,7 @@ class Rationalizer : public Phase
// Methods
public:
Rationalizer(Compiler* comp);
- Location TreeTransformRationalization (Location loc);
+ Location TreeTransformRationalization(Location loc);
#ifdef DEBUG
@@ -100,37 +102,33 @@ public:
static void ValidateStatement(GenTree* tree, BasicBlock* block);
// general purpose sanity checking of de facto standard GenTree
- void SanityCheck();
+ void SanityCheck();
// sanity checking of rationalized IR
- void SanityCheckRational();
+ void SanityCheckRational();
#endif // DEBUG
- virtual void DoPhase();
- typedef ArrayStack<GenTree*> GenTreeStack;
- static void MorphAsgIntoStoreLcl (GenTreeStmt* stmt, GenTreePtr pTree);
+ virtual void DoPhase();
+ typedef ArrayStack<GenTree*> GenTreeStack;
+ static void MorphAsgIntoStoreLcl(GenTreeStmt* stmt, GenTreePtr pTree);
private:
- static Compiler::fgWalkResult CommaHelper (GenTree** ppTree, Compiler::fgWalkData* data);
- static void RewriteOneComma (GenTree** ppTree, Compiler::fgWalkData* data);
- static bool CommaUselessChild (GenTree** ppTree, Compiler::fgWalkData* data);
- static void RecursiveRewriteComma(GenTree** ppTree, Compiler::fgWalkData* data, bool discard, bool nested);
- static bool RewriteArrElem (GenTree** ppTree, Compiler::fgWalkData* data);
+ static Compiler::fgWalkResult CommaHelper(GenTree** ppTree, Compiler::fgWalkData* data);
+ static void RewriteOneComma(GenTree** ppTree, Compiler::fgWalkData* data);
+ static bool CommaUselessChild(GenTree** ppTree, Compiler::fgWalkData* data);
+ static void RecursiveRewriteComma(GenTree** ppTree, Compiler::fgWalkData* data, bool discard, bool nested);
+ static bool RewriteArrElem(GenTree** ppTree, Compiler::fgWalkData* data);
static Compiler::fgWalkResult SimpleTransformHelper(GenTree** ppTree, Compiler::fgWalkData* data);
- static void DuplicateCommaProcessOneTree (Compiler* comp, Rationalizer* irt, BasicBlock* block, GenTree* tree);
+ static void DuplicateCommaProcessOneTree(Compiler* comp, Rationalizer* irt, BasicBlock* block, GenTree* tree);
- static void FixupIfCallArg (GenTreeStack* parentStack,
- GenTree* oldChild,
- GenTree* newChild);
+ static void FixupIfCallArg(GenTreeStack* parentStack, GenTree* oldChild, GenTree* newChild);
- static void FixupIfSIMDLocal (Compiler* comp, GenTreeLclVarCommon* tree);
+ static void FixupIfSIMDLocal(Compiler* comp, GenTreeLclVarCommon* tree);
- static GenTreePtr CreateTempAssignment (Compiler* comp,
- unsigned lclNum,
- GenTreePtr rhs);
+ static GenTreePtr CreateTempAssignment(Compiler* comp, unsigned lclNum, GenTreePtr rhs);
Location RewriteTopLevelComma(Location loc);
@@ -140,17 +138,17 @@ private:
static void RewriteInitBlk(GenTreePtr* ppTree, Compiler::fgWalkData* data);
// Intrinsic related
- static void RewriteNodeAsCall(GenTreePtr* ppTree, Compiler::fgWalkData* data,
- CORINFO_METHOD_HANDLE callHnd,
+ static void RewriteNodeAsCall(GenTreePtr* ppTree,
+ Compiler::fgWalkData* data,
+ CORINFO_METHOD_HANDLE callHnd,
#ifdef FEATURE_READYTORUN_COMPILER
- CORINFO_CONST_LOOKUP entryPoint,
+ CORINFO_CONST_LOOKUP entryPoint,
#endif
- GenTreeArgList* args);
+ GenTreeArgList* args);
static void RewriteIntrinsicAsUserCall(GenTreePtr* ppTree, Compiler::fgWalkData* data);
};
-inline Rationalizer::Rationalizer(Compiler* _comp)
- : Phase(_comp, "IR Rationalize", PHASE_RATIONALIZE)
+inline Rationalizer::Rationalizer(Compiler* _comp) : Phase(_comp, "IR Rationalize", PHASE_RATIONALIZE)
{
#ifdef DEBUG
comp->compNumStatementLinksTraversed = 0;
diff --git a/src/jit/regalloc.cpp b/src/jit/regalloc.cpp
index ec040d7751..0b5aaad460 100644
--- a/src/jit/regalloc.cpp
+++ b/src/jit/regalloc.cpp
@@ -24,7 +24,7 @@ Compiler::enumConfigRegisterFP Compiler::raConfigRegisterFP()
{
DWORD val = JitConfig.JitRegisterFP();
- return (enumConfigRegisterFP) (val & 0x3);
+ return (enumConfigRegisterFP)(val & 0x3);
}
#endif // FEATURE_FP_REGALLOC
@@ -33,19 +33,20 @@ regMaskTP Compiler::raConfigRestrictMaskFP()
regMaskTP result = RBM_NONE;
#if FEATURE_FP_REGALLOC
- switch (raConfigRegisterFP()) {
- case CONFIG_REGISTER_FP_NONE:
- result = RBM_NONE;
- break;
- case CONFIG_REGISTER_FP_CALLEE_TRASH:
- result = RBM_FLT_CALLEE_TRASH;
- break;
- case CONFIG_REGISTER_FP_CALLEE_SAVED:
- result = RBM_FLT_CALLEE_SAVED;
- break;
- case CONFIG_REGISTER_FP_FULL:
- result = RBM_ALLFLOAT;
- break;
+ switch (raConfigRegisterFP())
+ {
+ case CONFIG_REGISTER_FP_NONE:
+ result = RBM_NONE;
+ break;
+ case CONFIG_REGISTER_FP_CALLEE_TRASH:
+ result = RBM_FLT_CALLEE_TRASH;
+ break;
+ case CONFIG_REGISTER_FP_CALLEE_SAVED:
+ result = RBM_FLT_CALLEE_SAVED;
+ break;
+ case CONFIG_REGISTER_FP_FULL:
+ result = RBM_ALLFLOAT;
+ break;
}
#endif
@@ -68,8 +69,7 @@ DWORD Compiler::getCanDoubleAlign()
}
#endif // DOUBLE_ALIGN
-
-void Compiler::raInit()
+void Compiler::raInit()
{
#if FEATURE_STACK_FP_X87
/* We have not assigned any FP variables to registers yet */
@@ -87,15 +87,15 @@ void Compiler::raInit()
{
rpPassesMax++;
}
- rpStkPredict = (unsigned) -1;
- rpFrameType = FT_NOT_SET;
- rpLostEnreg = false;
- rpMustCreateEBPCalled = false;
- rpRegAllocDone = false;
+ rpStkPredict = (unsigned)-1;
+ rpFrameType = FT_NOT_SET;
+ rpLostEnreg = false;
+ rpMustCreateEBPCalled = false;
+ rpRegAllocDone = false;
rpMaskPInvokeEpilogIntf = RBM_NONE;
- rpPredictMap[PREDICT_NONE] = RBM_NONE;
- rpPredictMap[PREDICT_ADDR] = RBM_NONE;
+ rpPredictMap[PREDICT_NONE] = RBM_NONE;
+ rpPredictMap[PREDICT_ADDR] = RBM_NONE;
#if FEATURE_FP_REGALLOC
rpPredictMap[PREDICT_REG] = RBM_ALLINT | RBM_ALLFLOAT;
@@ -105,14 +105,14 @@ void Compiler::raInit()
rpPredictMap[PREDICT_SCRATCH_REG] = RBM_ALLINT;
#endif
-#define REGDEF(name, rnum, mask, sname) rpPredictMap[PREDICT_REG_ ## name ] = RBM_ ## name;
+#define REGDEF(name, rnum, mask, sname) rpPredictMap[PREDICT_REG_##name] = RBM_##name;
#include "register.h"
#if defined(_TARGET_ARM_)
- rpPredictMap[PREDICT_PAIR_R0R1] = RBM_R0 | RBM_R1;
- rpPredictMap[PREDICT_PAIR_R2R3] = RBM_R2 | RBM_R3;
- rpPredictMap[PREDICT_REG_SP] = RBM_ILLEGAL;
+ rpPredictMap[PREDICT_PAIR_R0R1] = RBM_R0 | RBM_R1;
+ rpPredictMap[PREDICT_PAIR_R2R3] = RBM_R2 | RBM_R3;
+ rpPredictMap[PREDICT_REG_SP] = RBM_ILLEGAL;
#elif defined(_TARGET_AMD64_)
@@ -139,33 +139,33 @@ void Compiler::raInit()
* for variables to live in
*/
-const regNumber* Compiler::raGetRegVarOrder(var_types regType, unsigned* wbVarOrderSize)
+const regNumber* Compiler::raGetRegVarOrder(var_types regType, unsigned* wbVarOrderSize)
{
#if FEATURE_FP_REGALLOC
- if (varTypeIsFloating(regType))
- {
- static const regNumber raRegVarOrderFlt[] = { REG_VAR_ORDER_FLT };
- const unsigned raRegVarOrderFltSize = sizeof(raRegVarOrderFlt)/sizeof(raRegVarOrderFlt[0]);
+ if (varTypeIsFloating(regType))
+ {
+ static const regNumber raRegVarOrderFlt[] = {REG_VAR_ORDER_FLT};
+ const unsigned raRegVarOrderFltSize = sizeof(raRegVarOrderFlt) / sizeof(raRegVarOrderFlt[0]);
- if (wbVarOrderSize != NULL)
- *wbVarOrderSize = raRegVarOrderFltSize;
+ if (wbVarOrderSize != NULL)
+ *wbVarOrderSize = raRegVarOrderFltSize;
- return &raRegVarOrderFlt[0];
- }
- else
+ return &raRegVarOrderFlt[0];
+ }
+ else
#endif
- {
- static const regNumber raRegVarOrder[] = { REG_VAR_ORDER };
- const unsigned raRegVarOrderSize = sizeof(raRegVarOrder)/sizeof(raRegVarOrder[0]);
+ {
+ static const regNumber raRegVarOrder[] = {REG_VAR_ORDER};
+ const unsigned raRegVarOrderSize = sizeof(raRegVarOrder) / sizeof(raRegVarOrder[0]);
- if (wbVarOrderSize != NULL)
- *wbVarOrderSize = raRegVarOrderSize;
+ if (wbVarOrderSize != NULL)
+ *wbVarOrderSize = raRegVarOrderSize;
- return &raRegVarOrder[0];
- }
+ return &raRegVarOrder[0];
+ }
}
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
@@ -173,32 +173,30 @@ const regNumber* Compiler::raGetRegVarOrder(var_types regType, unsigned* wbVarO
*
*/
-void Compiler::raDumpVarIntf()
+void Compiler::raDumpVarIntf()
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
printf("Var. interference graph for %s\n", info.compFullName);
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
continue;
/* Get hold of the index and the interference mask for the variable */
- unsigned varIndex = varDsc->lvVarIndex;
+ unsigned varIndex = varDsc->lvVarIndex;
printf(" V%02u,T%02u and ", lclNum, varIndex);
- unsigned refIndex;
+ unsigned refIndex;
for (refIndex = 0; refIndex < lvaTrackedCount; refIndex++)
{
- if (VarSetOps::IsMember(this, lvaVarIntf[varIndex], refIndex))
+ if (VarSetOps::IsMember(this, lvaVarIntf[varIndex], refIndex))
printf("T%02u ", refIndex);
else
printf(" ");
@@ -215,22 +213,20 @@ void Compiler::raDumpVarIntf()
* Dump out the register interference graph
*
*/
-void Compiler::raDumpRegIntf()
+void Compiler::raDumpRegIntf()
{
printf("Reg. interference graph for %s\n", info.compFullName);
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- unsigned varNum;
+ unsigned varNum;
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
continue;
/* Get hold of the index and the interference mask for the variable */
@@ -239,13 +235,13 @@ void Compiler::raDumpRegIntf()
printf(" V%02u,T%02u and ", lclNum, varNum);
- if (varDsc->IsFloatRegType())
+ if (varDsc->IsFloatRegType())
{
#if !FEATURE_STACK_FP_X87
for (regNumber regNum = REG_FP_FIRST; regNum <= REG_FP_LAST; regNum = REG_NEXT(regNum))
{
- if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varNum))
- printf("%3s ", getRegName(regNum, true));
+ if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varNum))
+ printf("%3s ", getRegName(regNum, true));
else
printf(" ");
}
@@ -255,7 +251,7 @@ void Compiler::raDumpRegIntf()
{
for (regNumber regNum = REG_INT_FIRST; regNum <= REG_INT_LAST; regNum = REG_NEXT(regNum))
{
- if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varNum))
+ if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varNum))
printf("%3s ", getRegName(regNum));
else
printf(" ");
@@ -275,7 +271,7 @@ void Compiler::raDumpRegIntf()
*
*/
-void Compiler::raAdjustVarIntf()
+void Compiler::raAdjustVarIntf()
{
// This method was not correct and has been disabled.
return;
@@ -286,8 +282,7 @@ void Compiler::raAdjustVarIntf()
/* Determine register mask for a call/return from type.
*/
-inline
-regMaskTP Compiler::genReturnRegForTree(GenTreePtr tree)
+inline regMaskTP Compiler::genReturnRegForTree(GenTreePtr tree)
{
var_types type = tree->TypeGet();
@@ -297,38 +292,36 @@ regMaskTP Compiler::genReturnRegForTree(GenTreePtr tree)
return ((1 << retSlots) - 1) << REG_FLOATRET;
}
- const static
- regMaskTP returnMap[TYP_COUNT] =
- {
- RBM_ILLEGAL, // TYP_UNDEF,
- RBM_NONE, // TYP_VOID,
- RBM_INTRET, // TYP_BOOL,
- RBM_INTRET, // TYP_CHAR,
- RBM_INTRET, // TYP_BYTE,
- RBM_INTRET, // TYP_UBYTE,
- RBM_INTRET, // TYP_SHORT,
- RBM_INTRET, // TYP_USHORT,
- RBM_INTRET, // TYP_INT,
- RBM_INTRET, // TYP_UINT,
- RBM_LNGRET, // TYP_LONG,
- RBM_LNGRET, // TYP_ULONG,
+ const static regMaskTP returnMap[TYP_COUNT] = {
+ RBM_ILLEGAL, // TYP_UNDEF,
+ RBM_NONE, // TYP_VOID,
+ RBM_INTRET, // TYP_BOOL,
+ RBM_INTRET, // TYP_CHAR,
+ RBM_INTRET, // TYP_BYTE,
+ RBM_INTRET, // TYP_UBYTE,
+ RBM_INTRET, // TYP_SHORT,
+ RBM_INTRET, // TYP_USHORT,
+ RBM_INTRET, // TYP_INT,
+ RBM_INTRET, // TYP_UINT,
+ RBM_LNGRET, // TYP_LONG,
+ RBM_LNGRET, // TYP_ULONG,
RBM_FLOATRET, // TYP_FLOAT,
RBM_DOUBLERET, // TYP_DOUBLE,
- RBM_INTRET, // TYP_REF,
- RBM_INTRET, // TYP_BYREF,
- RBM_INTRET, // TYP_ARRAY,
- RBM_ILLEGAL, // TYP_STRUCT,
- RBM_ILLEGAL, // TYP_BLK,
- RBM_ILLEGAL, // TYP_LCLBLK,
- RBM_ILLEGAL, // TYP_PTR,
- RBM_ILLEGAL, // TYP_FNC,
- RBM_ILLEGAL, // TYP_UNKNOWN,
+ RBM_INTRET, // TYP_REF,
+ RBM_INTRET, // TYP_BYREF,
+ RBM_INTRET, // TYP_ARRAY,
+ RBM_ILLEGAL, // TYP_STRUCT,
+ RBM_ILLEGAL, // TYP_BLK,
+ RBM_ILLEGAL, // TYP_LCLBLK,
+ RBM_ILLEGAL, // TYP_PTR,
+ RBM_ILLEGAL, // TYP_FNC,
+ RBM_ILLEGAL, // TYP_UNKNOWN,
};
- assert((unsigned)type < sizeof(returnMap)/sizeof(returnMap[0]));
- assert(returnMap[TYP_LONG] == RBM_LNGRET);
+ assert((unsigned)type < sizeof(returnMap) / sizeof(returnMap[0]));
+ assert(returnMap[TYP_LONG] == RBM_LNGRET);
assert(returnMap[TYP_DOUBLE] == RBM_DOUBLERET);
- assert(returnMap[TYP_REF] == RBM_INTRET);
+ assert(returnMap[TYP_REF] == RBM_INTRET);
assert(returnMap[TYP_STRUCT] == RBM_ILLEGAL);
regMaskTP result = returnMap[type];
@@ -336,31 +329,26 @@ regMaskTP Compiler::genReturnRegForTree(GenTreePtr tree)
return result;
}
-
/*****************************************************************************/
-
/****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
-static
-void dispLifeSet(Compiler *comp, VARSET_VALARG_TP mask, VARSET_VALARG_TP life)
+static void dispLifeSet(Compiler* comp, VARSET_VALARG_TP mask, VARSET_VALARG_TP life)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = comp->lvaTable;
- lclNum < comp->lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = comp->lvaTable; lclNum < comp->lvaCount; lclNum++, varDsc++)
{
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
continue;
- if (!VarSetOps::IsMember(comp, mask, varDsc->lvVarIndex))
+ if (!VarSetOps::IsMember(comp, mask, varDsc->lvVarIndex))
continue;
- if (VarSetOps::IsMember(comp, life, varDsc->lvVarIndex))
+ if (VarSetOps::IsMember(comp, life, varDsc->lvVarIndex))
printf("V%02u ", lclNum);
}
}
@@ -368,51 +356,45 @@ void dispLifeSet(Compiler *comp, VARSET_VALARG_TP mask, VARSET_VA
#endif
/*****************************************************************************/
-#ifdef DEBUG
+#ifdef DEBUG
/*****************************************************************************
*
* Debugging helpers - display variables liveness info.
*/
-void dispFPvarsInBBlist(BasicBlock * beg,
- BasicBlock * end,
- VARSET_TP mask,
- Compiler * comp)
+void dispFPvarsInBBlist(BasicBlock* beg, BasicBlock* end, VARSET_TP mask, Compiler* comp)
{
do
{
printf("BB%02u: ", beg->bbNum);
printf(" in = [ ");
- dispLifeSet(comp, mask, beg->bbLiveIn );
+ dispLifeSet(comp, mask, beg->bbLiveIn);
printf("] ,");
printf(" out = [ ");
dispLifeSet(comp, mask, beg->bbLiveOut);
printf("]");
- if (beg->bbFlags & BBF_VISITED)
+ if (beg->bbFlags & BBF_VISITED)
printf(" inner=%u", beg->bbFPinVars);
printf("\n");
beg = beg->bbNext;
- if (!beg)
+ if (!beg)
return;
- }
- while (beg != end);
+ } while (beg != end);
}
#if FEATURE_STACK_FP_X87
-void Compiler::raDispFPlifeInfo()
+void Compiler::raDispFPlifeInfo()
{
- BasicBlock * block;
+ BasicBlock* block;
- for (block = fgFirstBB;
- block;
- block = block->bbNext)
+ for (block = fgFirstBB; block; block = block->bbNext)
{
- GenTreePtr stmt;
+ GenTreePtr stmt;
printf("BB%02u: in = [ ", block->bbNum);
dispLifeSet(this, optAllFloatVars, block->bbLiveIn);
@@ -421,13 +403,11 @@ void Compiler::raDispFPlifeInfo()
VARSET_TP VARSET_INIT(this, life, block->bbLiveIn);
for (stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
- GenTreePtr tree;
+ GenTreePtr tree;
noway_assert(stmt->gtOper == GT_STMT);
- for (tree = stmt->gtStmt.gtStmtList;
- tree;
- tree = tree->gtNext)
+ for (tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
VarSetOps::AssignNoCopy(this, life, fgUpdateLiveSet(life, tree));
@@ -444,25 +424,21 @@ void Compiler::raDispFPlifeInfo()
printf("]\n\n");
}
}
-#endif // FEATURE_STACK_FP_X87
+#endif // FEATURE_STACK_FP_X87
/*****************************************************************************/
-#endif//DEBUG
+#endif // DEBUG
/*****************************************************************************/
-
/*****************************************************************************/
-void Compiler::raSetRegVarOrder(var_types regType,
- regNumber * customVarOrder,
- unsigned * customVarOrderSize,
- regMaskTP prefReg,
- regMaskTP avoidReg)
+void Compiler::raSetRegVarOrder(
+ var_types regType, regNumber* customVarOrder, unsigned* customVarOrderSize, regMaskTP prefReg, regMaskTP avoidReg)
{
- unsigned normalVarOrderSize;
- const regNumber * normalVarOrder = raGetRegVarOrder(regType, &normalVarOrderSize);
- unsigned index;
- unsigned listIndex = 0;
- regMaskTP usedReg = avoidReg;
+ unsigned normalVarOrderSize;
+ const regNumber* normalVarOrder = raGetRegVarOrder(regType, &normalVarOrderSize);
+ unsigned index;
+ unsigned listIndex = 0;
+ regMaskTP usedReg = avoidReg;
noway_assert(*customVarOrderSize >= normalVarOrderSize);
@@ -470,12 +446,10 @@ void Compiler::raSetRegVarOrder(var_types regType,
{
/* First place the preferred registers at the start of customVarOrder */
- regMaskTP regBit;
- regNumber regNum;
+ regMaskTP regBit;
+ regNumber regNum;
- for (index = 0;
- index < normalVarOrderSize;
- index++)
+ for (index = 0; index < normalVarOrderSize; index++)
{
regNum = normalVarOrder[index];
regBit = genRegMask(regNum);
@@ -492,7 +466,6 @@ void Compiler::raSetRegVarOrder(var_types regType,
if (prefReg == 0)
break;
}
-
}
#if CPU_HAS_BYTE_REGS
@@ -500,9 +473,7 @@ void Compiler::raSetRegVarOrder(var_types regType,
if (prefReg & RBM_BYTE_REG_FLAG)
{
- for (index = 0;
- index < normalVarOrderSize;
- index++)
+ for (index = 0; index < normalVarOrderSize; index++)
{
regNum = normalVarOrder[index];
regBit = genRegMask(regNum);
@@ -520,14 +491,11 @@ void Compiler::raSetRegVarOrder(var_types regType,
}
#endif // CPU_HAS_BYTE_REGS
-
}
/* Now place all the non-preferred registers */
- for (index = 0;
- index < normalVarOrderSize;
- index++)
+ for (index = 0; index < normalVarOrderSize; index++)
{
regNumber regNum = normalVarOrder[index];
regMaskTP regBit = genRegMask(regNum);
@@ -544,9 +512,7 @@ void Compiler::raSetRegVarOrder(var_types regType,
{
/* Now place the "avoid" registers */
- for (index = 0;
- index < normalVarOrderSize;
- index++)
+ for (index = 0; index < normalVarOrderSize; index++)
{
regNumber regNum = normalVarOrder[index];
regMaskTP regBit = genRegMask(regNum);
@@ -571,18 +537,18 @@ void Compiler::raSetRegVarOrder(var_types regType,
* Setup the raAvoidArgRegMask and rsCalleeRegArgMaskLiveIn
*/
-void Compiler::raSetupArgMasks(RegState *regState)
+void Compiler::raSetupArgMasks(RegState* regState)
{
/* Determine the registers holding incoming register arguments */
/* and setup raAvoidArgRegMask to the set of registers that we */
/* may want to avoid when enregistering the locals. */
regState->rsCalleeRegArgMaskLiveIn = RBM_NONE;
- raAvoidArgRegMask = RBM_NONE;
+ raAvoidArgRegMask = RBM_NONE;
- LclVarDsc * argsEnd = lvaTable + info.compArgsCount;
+ LclVarDsc* argsEnd = lvaTable + info.compArgsCount;
- for (LclVarDsc * argDsc = lvaTable; argDsc < argsEnd; argDsc++)
+ for (LclVarDsc* argDsc = lvaTable; argDsc < argsEnd; argDsc++)
{
noway_assert(argDsc->lvIsParam);
@@ -601,17 +567,15 @@ void Compiler::raSetupArgMasks(RegState *regState)
// or when we have a generic type context arg that we must report
// then the arguments have to be kept alive throughout the prolog.
// So we have to consider it as live on entry.
- //
+ //
bool keepArgAlive = compJmpOpUsed;
- if ( (unsigned(info.compTypeCtxtArg) != BAD_VAR_NUM) &&
- lvaReportParamTypeArg() &&
- ((lvaTable + info.compTypeCtxtArg) == argDsc) )
+ if ((unsigned(info.compTypeCtxtArg) != BAD_VAR_NUM) && lvaReportParamTypeArg() &&
+ ((lvaTable + info.compTypeCtxtArg) == argDsc))
{
keepArgAlive = true;
}
- if (!keepArgAlive && argDsc->lvTracked &&
- !VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, argDsc->lvVarIndex))
+ if (!keepArgAlive && argDsc->lvTracked && !VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, argDsc->lvVarIndex))
{
continue;
}
@@ -623,7 +587,8 @@ void Compiler::raSetupArgMasks(RegState *regState)
// Do we need to try to avoid this incoming arg registers?
// If it's not tracked, don't do the stuff below.
- if (!argDsc->lvTracked) continue;
+ if (!argDsc->lvTracked)
+ continue;
// If the incoming arg is used after a call it is live accross
// a call and will have to be allocated to a caller saved
@@ -632,13 +597,13 @@ void Compiler::raSetupArgMasks(RegState *regState)
// In this case it is pointless to ask that the higher ref count
// locals to avoid using the incoming arg register
- unsigned argVarIndex = argDsc->lvVarIndex;
+ unsigned argVarIndex = argDsc->lvVarIndex;
/* Does the incoming register and the arg variable interfere? */
- if (!VarSetOps::IsMember(this, raLclRegIntf[inArgReg], argVarIndex))
+ if (!VarSetOps::IsMember(this, raLclRegIntf[inArgReg], argVarIndex))
{
- // No they do not interfere,
+ // No they do not interfere,
// so we add inArgReg to raAvoidArgRegMask
raAvoidArgRegMask |= genRegMask(inArgReg);
@@ -660,7 +625,7 @@ void Compiler::raSetupArgMasks(RegState *regState)
// The code to set the regState for each arg is outlined for shared use
// by linear scan. (It is not shared for System V AMD64 platform.)
-regNumber Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *argDsc)
+regNumber Compiler::raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc)
{
regNumber inArgReg = argDsc->lvArgReg;
regMaskTP inArgMask = genRegMask(inArgReg);
@@ -678,9 +643,9 @@ regNumber Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *ar
// We should have a TYP_BYREF or TYP_I_IMPL arg and not a TYP_STRUCT arg
noway_assert(argDsc->lvType == TYP_BYREF || argDsc->lvType == TYP_I_IMPL);
// We should have recorded the variable number for the return buffer arg
- noway_assert(info.compRetBuffArg != BAD_VAR_NUM);
+ noway_assert(info.compRetBuffArg != BAD_VAR_NUM);
}
- else // we have a regular arg
+ else // we have a regular arg
{
noway_assert(inArgMask & RBM_ARG_REGS);
}
@@ -701,14 +666,13 @@ regNumber Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *ar
assert(regState->rsIsFloat);
assert(emitter::isDoubleReg(inArgReg));
}
- regState->rsCalleeRegArgMaskLiveIn |= genRegMask((regNumber)(inArgReg+1));
+ regState->rsCalleeRegArgMaskLiveIn |= genRegMask((regNumber)(inArgReg + 1));
}
else if (argDsc->lvType == TYP_LONG)
{
assert((inArgReg == REG_R0) || (inArgReg == REG_R2));
assert(!regState->rsIsFloat);
- regState->rsCalleeRegArgMaskLiveIn |= genRegMask((regNumber)(inArgReg+1));
-
+ regState->rsCalleeRegArgMaskLiveIn |= genRegMask((regNumber)(inArgReg + 1));
}
#endif // _TARGET_ARM_
@@ -728,7 +692,7 @@ regNumber Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *ar
else
{
unsigned cSlots = argDsc->lvSize() / TARGET_POINTER_SIZE;
- for (unsigned i=1; i < cSlots; i++)
+ for (unsigned i = 1; i < cSlots; i++)
{
regNumber nextArgReg = (regNumber)(inArgReg + i);
if (nextArgReg > REG_ARG_LAST)
@@ -752,10 +716,10 @@ regNumber Compiler::raUpdateRegStateForArg(RegState *regState, LclVarDsc *ar
* Assign variables to live in registers, etc.
*/
-void Compiler::raAssignVars()
+void Compiler::raAssignVars()
{
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("*************** In raAssignVars()\n");
#endif
/* We need to keep track of which registers we ever touch */
@@ -769,21 +733,19 @@ void Compiler::raAssignVars()
#endif
/* Predict registers used by code generation */
- rpPredictRegUse(); // New reg predictor/allocator
+ rpPredictRegUse(); // New reg predictor/allocator
- // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
+ // Change all unused promoted non-argument struct locals to a non-GC type (in this case TYP_INT)
// so that the gc tracking logic and lvMustInit logic will ignore them.
-
- unsigned lclNum;
- LclVarDsc * varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ unsigned lclNum;
+ LclVarDsc* varDsc;
+
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
if (varDsc->lvType != TYP_STRUCT)
continue;
-
+
if (!varDsc->lvPromoted)
continue;
@@ -794,22 +756,21 @@ void Compiler::raAssignVars()
continue;
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("Mark unused struct local V%02u\n", lclNum);
}
- lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
+ lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
if (promotionType == PROMOTION_TYPE_DEPENDENT)
{
// This should only happen when all its field locals are unused as well.
-
- for (unsigned varNum = varDsc->lvFieldLclStart;
- varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
+
+ for (unsigned varNum = varDsc->lvFieldLclStart; varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
varNum++)
- {
- noway_assert(lvaTable[varNum].lvRefCnt == 0);
+ {
+ noway_assert(lvaTable[varNum].lvRefCnt == 0);
}
}
else
@@ -821,13 +782,13 @@ void Compiler::raAssignVars()
#endif
// Change such struct locals to ints
-
- varDsc->lvType = TYP_INT; // Bash to a non-gc type.
- noway_assert(!varDsc->lvTracked);
- noway_assert(!varDsc->lvRegister);
- varDsc->lvOnFrame = false; // Force it not to be onstack.
- varDsc->lvMustInit = false; // Force not to init it.
- varDsc->lvStkOffs = 0; // Set it to anything other than BAD_STK_OFFS to make genSetScopeInfo() happy
+
+ varDsc->lvType = TYP_INT; // Bash to a non-gc type.
+ noway_assert(!varDsc->lvTracked);
+ noway_assert(!varDsc->lvRegister);
+ varDsc->lvOnFrame = false; // Force it not to be onstack.
+ varDsc->lvMustInit = false; // Force not to init it.
+ varDsc->lvStkOffs = 0; // Set it to anything other than BAD_STK_OFFS to make genSetScopeInfo() happy
}
}
@@ -841,7 +802,7 @@ void Compiler::raAssignVars()
inline static rpPredictReg rpGetPredictForReg(regNumber reg)
{
- return (rpPredictReg) ( ((int) reg) + ((int) PREDICT_REG_FIRST) );
+ return (rpPredictReg)(((int)reg) + ((int)PREDICT_REG_FIRST));
}
/*****************************************************************************
@@ -851,7 +812,7 @@ inline static rpPredictReg rpGetPredictForReg(regNumber reg)
inline static rpPredictReg rpGetPredictForVarIndex(unsigned varIndex)
{
- return (rpPredictReg) ( varIndex + ((int) PREDICT_REG_VAR_T00) );
+ return (rpPredictReg)(varIndex + ((int)PREDICT_REG_VAR_T00));
}
/*****************************************************************************
@@ -861,7 +822,7 @@ inline static rpPredictReg rpGetPredictForVarIndex(unsigned varIndex)
inline static unsigned rpGetVarIndexForPredict(rpPredictReg predict)
{
- return (unsigned) predict - (unsigned) PREDICT_REG_VAR_T00;
+ return (unsigned)predict - (unsigned)PREDICT_REG_VAR_T00;
}
/*****************************************************************************
@@ -885,9 +846,9 @@ inline static bool rpHasVarIndexForPredict(rpPredictReg predict)
static rpPredictReg rpGetPredictForMask(regMaskTP regmask)
{
rpPredictReg result = PREDICT_NONE;
- if (regmask != 0) /* Check if regmask has zero bits set */
+ if (regmask != 0) /* Check if regmask has zero bits set */
{
- if (((regmask-1) & regmask) == 0) /* Check if regmask has one bit set */
+ if (((regmask - 1) & regmask) == 0) /* Check if regmask has one bit set */
{
DWORD reg = 0;
assert(FitsIn<DWORD>(regmask));
@@ -897,14 +858,26 @@ static rpPredictReg rpGetPredictForMask(regMaskTP regmask)
#if defined(_TARGET_ARM_)
/* It has multiple bits set */
- else if (regmask == (RBM_R0 | RBM_R1)) { result = PREDICT_PAIR_R0R1; }
- else if (regmask == (RBM_R2 | RBM_R3)) { result = PREDICT_PAIR_R2R3; }
+ else if (regmask == (RBM_R0 | RBM_R1))
+ {
+ result = PREDICT_PAIR_R0R1;
+ }
+ else if (regmask == (RBM_R2 | RBM_R3))
+ {
+ result = PREDICT_PAIR_R2R3;
+ }
#elif defined(_TARGET_X86_)
/* It has multiple bits set */
- else if (regmask == (RBM_EAX | RBM_EDX)) { result = PREDICT_PAIR_EAXEDX; }
- else if (regmask == (RBM_ECX | RBM_EBX)) { result = PREDICT_PAIR_ECXEBX; }
+ else if (regmask == (RBM_EAX | RBM_EDX))
+ {
+ result = PREDICT_PAIR_EAXEDX;
+ }
+ else if (regmask == (RBM_ECX | RBM_EBX))
+ {
+ result = PREDICT_PAIR_ECXEBX;
+ }
#endif
- else /* It doesn't match anything */
+ else /* It doesn't match anything */
{
result = PREDICT_NONE;
assert(!"unreachable");
@@ -919,9 +892,7 @@ static rpPredictReg rpGetPredictForMask(regMaskTP regmask)
* Record a variable to register(s) interference
*/
-bool Compiler::rpRecordRegIntf(regMaskTP regMask,
- VARSET_VALARG_TP life
- DEBUGARG( const char * msg))
+bool Compiler::rpRecordRegIntf(regMaskTP regMask, VARSET_VALARG_TP life DEBUGARG(const char* msg))
{
bool addedIntf = false;
@@ -930,21 +901,21 @@ bool Compiler::rpRecordRegIntf(regMaskTP regMask,
{
for (regNumber regNum = REG_FIRST; regNum < REG_COUNT; regNum = REG_NEXT(regNum))
{
- regMaskTP regBit = genRegMask(regNum);
+ regMaskTP regBit = genRegMask(regNum);
if (regMask & regBit)
{
VARSET_TP VARSET_INIT_NOCOPY(newIntf, VarSetOps::Diff(this, life, raLclRegIntf[regNum]));
if (!VarSetOps::IsEmpty(this, newIntf))
{
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
VARSET_ITER_INIT(this, newIntfIter, newIntf, varNum);
while (newIntfIter.NextElem(this, &varNum))
{
- unsigned lclNum = lvaTrackedToVarNum[varNum];
- LclVarDsc * varDsc = &lvaTable[varNum];
+ unsigned lclNum = lvaTrackedToVarNum[varNum];
+ LclVarDsc* varDsc = &lvaTable[varNum];
#if FEATURE_FP_REGALLOC
// Only print the useful interferences
// i.e. floating point LclVar interference with floating point registers
@@ -952,8 +923,8 @@ bool Compiler::rpRecordRegIntf(regMaskTP regMask,
if (varTypeIsFloating(varDsc->TypeGet()) == genIsValidFloatReg(regNum))
#endif
{
- printf("Record interference between V%02u,T%02u and %s -- %s\n",
- lclNum, varNum, getRegName(regNum), msg);
+ printf("Record interference between V%02u,T%02u and %s -- %s\n", lclNum, varNum,
+ getRegName(regNum), msg);
}
}
}
@@ -971,15 +942,12 @@ bool Compiler::rpRecordRegIntf(regMaskTP regMask,
return addedIntf;
}
-
/*****************************************************************************
*
* Record a new variable to variable(s) interference
*/
-bool Compiler::rpRecordVarIntf(unsigned varNum,
- VARSET_VALARG_TP intfVar
- DEBUGARG( const char * msg))
+bool Compiler::rpRecordVarIntf(unsigned varNum, VARSET_VALARG_TP intfVar DEBUGARG(const char* msg))
{
noway_assert((varNum >= 0) && (varNum < lvaTrackedCount));
noway_assert(!VarSetOps::IsEmpty(this, intfVar));
@@ -992,8 +960,8 @@ bool Compiler::rpRecordVarIntf(unsigned varNum,
if (newIntf)
rpAddedVarIntf = true;
-#ifdef DEBUG
- if (verbose && newIntf)
+#ifdef DEBUG
+ if (verbose && newIntf)
{
for (unsigned oneNum = 0; oneNum < lvaTrackedCount; oneNum++)
{
@@ -1001,8 +969,8 @@ bool Compiler::rpRecordVarIntf(unsigned varNum,
{
unsigned lclNum = lvaTrackedToVarNum[varNum];
unsigned lclOne = lvaTrackedToVarNum[oneNum];
- printf("Record interference between V%02u,T%02u and V%02u,T%02u -- %s\n",
- lclNum, varNum, lclOne, oneNum, msg);
+ printf("Record interference between V%02u,T%02u and V%02u,T%02u -- %s\n", lclNum, varNum, lclOne,
+ oneNum, msg);
}
}
}
@@ -1016,12 +984,12 @@ bool Compiler::rpRecordVarIntf(unsigned varNum,
* Determine preferred register mask for a given predictReg value
*/
-inline regMaskTP Compiler::rpPredictRegMask(rpPredictReg predictReg, var_types type)
+inline regMaskTP Compiler::rpPredictRegMask(rpPredictReg predictReg, var_types type)
{
if (rpHasVarIndexForPredict(predictReg))
predictReg = PREDICT_REG;
- noway_assert((unsigned)predictReg < sizeof(rpPredictMap)/sizeof(rpPredictMap[0]));
+ noway_assert((unsigned)predictReg < sizeof(rpPredictMap) / sizeof(rpPredictMap[0]));
noway_assert(rpPredictMap[predictReg] != RBM_ILLEGAL);
regMaskTP regAvailForType = rpPredictMap[predictReg];
@@ -1041,7 +1009,7 @@ inline regMaskTP Compiler::rpPredictRegMask(rpPredictReg predictReg, var_types
// Fix 388433 ARM JitStress WP7
if ((regAvailForType & RBM_DBL_REGS) != 0)
{
- regAvailForType |= (regAvailForType<<1);
+ regAvailForType |= (regAvailForType << 1);
}
else
{
@@ -1059,13 +1027,11 @@ inline regMaskTP Compiler::rpPredictRegMask(rpPredictReg predictReg, var_types
*
* Adds the predicted registers to rsModifiedRegsMask.
*/
-regMaskTP Compiler::rpPredictRegPick(var_types type,
- rpPredictReg predictReg,
- regMaskTP lockedRegs)
+regMaskTP Compiler::rpPredictRegPick(var_types type, rpPredictReg predictReg, regMaskTP lockedRegs)
{
- regMaskTP preferReg = rpPredictRegMask(predictReg, type);
- regNumber regNum;
- regMaskTP regBits;
+ regMaskTP preferReg = rpPredictRegMask(predictReg, type);
+ regNumber regNum;
+ regMaskTP regBits;
// Add any reserved register to the lockedRegs
lockedRegs |= codeGen->regSet.rsMaskResvd;
@@ -1078,7 +1044,7 @@ regMaskTP Compiler::rpPredictRegPick(var_types type,
noway_assert((rpAsgVarNum >= 0) && (rpAsgVarNum < (int)lclMAX_TRACKED));
/* Don't pick the register used by rpAsgVarNum either */
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[rpAsgVarNum];
+ LclVarDsc* tgtVar = lvaTable + lvaTrackedToVarNum[rpAsgVarNum];
noway_assert(tgtVar->lvRegNum != REG_STK);
preferReg &= ~genRegMask(tgtVar->lvRegNum);
@@ -1086,190 +1052,189 @@ regMaskTP Compiler::rpPredictRegPick(var_types type,
switch (type)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_SHORT:
- case TYP_CHAR:
- case TYP_INT:
- case TYP_UINT:
- case TYP_REF:
- case TYP_BYREF:
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_SHORT:
+ case TYP_CHAR:
+ case TYP_INT:
+ case TYP_UINT:
+ case TYP_REF:
+ case TYP_BYREF:
#ifdef _TARGET_AMD64_
- case TYP_LONG:
+ case TYP_LONG:
#endif // _TARGET_AMD64_
- // expand preferReg to all non-locked registers if no bits set
- preferReg = codeGen->regSet.rsUseIfZero(preferReg & RBM_ALLINT, RBM_ALLINT & ~lockedRegs);
+ // expand preferReg to all non-locked registers if no bits set
+ preferReg = codeGen->regSet.rsUseIfZero(preferReg & RBM_ALLINT, RBM_ALLINT & ~lockedRegs);
- if (preferReg == 0) // no bits set?
- {
- // Add one predefined spill choice register if no bits set.
- // (The jit will introduce one spill temp)
- preferReg |= RBM_SPILL_CHOICE;
- rpPredictSpillCnt++;
+ if (preferReg == 0) // no bits set?
+ {
+ // Add one predefined spill choice register if no bits set.
+ // (The jit will introduce one spill temp)
+ preferReg |= RBM_SPILL_CHOICE;
+ rpPredictSpillCnt++;
-#ifdef DEBUG
- if (verbose)
- printf("Predict one spill temp\n");
+#ifdef DEBUG
+ if (verbose)
+ printf("Predict one spill temp\n");
#endif
- }
-
- if (preferReg != 0)
- {
- /* Iterate the registers in the order specified by rpRegTmpOrder */
+ }
- for (unsigned index = 0;
- index < REG_TMP_ORDER_COUNT;
- index++)
+ if (preferReg != 0)
{
- regNum = rpRegTmpOrder[index];
- regBits = genRegMask(regNum);
+ /* Iterate the registers in the order specified by rpRegTmpOrder */
- if ((preferReg & regBits) == regBits)
+ for (unsigned index = 0; index < REG_TMP_ORDER_COUNT; index++)
{
- goto RET;
+ regNum = rpRegTmpOrder[index];
+ regBits = genRegMask(regNum);
+
+ if ((preferReg & regBits) == regBits)
+ {
+ goto RET;
+ }
}
}
- }
- /* Otherwise we have allocated all registers, so do nothing */
- break;
+ /* Otherwise we have allocated all registers, so do nothing */
+ break;
#ifndef _TARGET_AMD64_
- case TYP_LONG:
-
- if (( preferReg == 0) || // no bits set?
- ((preferReg & (preferReg-1)) == 0) ) // or only one bit set?
- {
- // expand preferReg to all non-locked registers
- preferReg = RBM_ALLINT & ~lockedRegs;
- }
+ case TYP_LONG:
- if (preferReg == 0) // no bits set?
- {
- // Add EAX:EDX to the registers
- // (The jit will introduce two spill temps)
- preferReg = RBM_PAIR_TMP;
- rpPredictSpillCnt += 2;
-#ifdef DEBUG
- if (verbose)
- printf("Predict two spill temps\n");
-#endif
- }
- else if ((preferReg & (preferReg-1)) == 0) // only one bit set?
- {
- if ((preferReg & RBM_PAIR_TMP_LO) == 0)
+ if ((preferReg == 0) || // no bits set?
+ ((preferReg & (preferReg - 1)) == 0)) // or only one bit set?
{
- // Add EAX to the registers
- // (The jit will introduce one spill temp)
- preferReg |= RBM_PAIR_TMP_LO;
+ // expand preferReg to all non-locked registers
+ preferReg = RBM_ALLINT & ~lockedRegs;
}
- else
+
+ if (preferReg == 0) // no bits set?
{
- // Add EDX to the registers
- // (The jit will introduce one spill temp)
- preferReg |= RBM_PAIR_TMP_HI;
+ // Add EAX:EDX to the registers
+ // (The jit will introduce two spill temps)
+ preferReg = RBM_PAIR_TMP;
+ rpPredictSpillCnt += 2;
+#ifdef DEBUG
+ if (verbose)
+ printf("Predict two spill temps\n");
+#endif
}
- rpPredictSpillCnt++;
-#ifdef DEBUG
- if (verbose)
- printf("Predict one spill temp\n");
+ else if ((preferReg & (preferReg - 1)) == 0) // only one bit set?
+ {
+ if ((preferReg & RBM_PAIR_TMP_LO) == 0)
+ {
+ // Add EAX to the registers
+ // (The jit will introduce one spill temp)
+ preferReg |= RBM_PAIR_TMP_LO;
+ }
+ else
+ {
+ // Add EDX to the registers
+ // (The jit will introduce one spill temp)
+ preferReg |= RBM_PAIR_TMP_HI;
+ }
+ rpPredictSpillCnt++;
+#ifdef DEBUG
+ if (verbose)
+ printf("Predict one spill temp\n");
#endif
- }
+ }
- regPairNo regPair;
- regPair = codeGen->regSet.rsFindRegPairNo(preferReg);
- if (regPair != REG_PAIR_NONE)
- {
- regBits = genRegPairMask(regPair);
- goto RET;
- }
+ regPairNo regPair;
+ regPair = codeGen->regSet.rsFindRegPairNo(preferReg);
+ if (regPair != REG_PAIR_NONE)
+ {
+ regBits = genRegPairMask(regPair);
+ goto RET;
+ }
- /* Otherwise we have allocated all registers, so do nothing */
- break;
+ /* Otherwise we have allocated all registers, so do nothing */
+ break;
#endif // _TARGET_AMD64_
#ifdef _TARGET_ARM_
- case TYP_STRUCT:
+ case TYP_STRUCT:
#endif
- case TYP_FLOAT:
- case TYP_DOUBLE:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
#if FEATURE_FP_REGALLOC
- regMaskTP restrictMask; restrictMask = (raConfigRestrictMaskFP() | RBM_FLT_CALLEE_TRASH);
- assert((restrictMask & RBM_SPILL_CHOICE_FLT) == RBM_SPILL_CHOICE_FLT);
+ regMaskTP restrictMask;
+ restrictMask = (raConfigRestrictMaskFP() | RBM_FLT_CALLEE_TRASH);
+ assert((restrictMask & RBM_SPILL_CHOICE_FLT) == RBM_SPILL_CHOICE_FLT);
- // expand preferReg to all available non-locked registers if no bits set
- preferReg = codeGen->regSet.rsUseIfZero(preferReg & restrictMask, restrictMask & ~lockedRegs);
- regMaskTP preferDouble; preferDouble = preferReg & (preferReg>>1);
+ // expand preferReg to all available non-locked registers if no bits set
+ preferReg = codeGen->regSet.rsUseIfZero(preferReg & restrictMask, restrictMask & ~lockedRegs);
+ regMaskTP preferDouble;
+ preferDouble = preferReg & (preferReg >> 1);
- if ( (preferReg == 0) // no bits set?
+ if ((preferReg == 0) // no bits set?
#ifdef _TARGET_ARM_
- || ((type == TYP_DOUBLE) && ((preferReg & (preferReg>>1)) == 0) ) // or two consecutive bits set for TYP_DOUBLE
+ || ((type == TYP_DOUBLE) &&
+ ((preferReg & (preferReg >> 1)) == 0)) // or two consecutive bits set for TYP_DOUBLE
#endif
- )
- {
- // Add one predefined spill choice register if no bits set.
- // (The jit will introduce one spill temp)
- preferReg |= RBM_SPILL_CHOICE_FLT;
- rpPredictSpillCnt++;
+ )
+ {
+ // Add one predefined spill choice register if no bits set.
+ // (The jit will introduce one spill temp)
+ preferReg |= RBM_SPILL_CHOICE_FLT;
+ rpPredictSpillCnt++;
-#ifdef DEBUG
- if (verbose)
- printf("Predict one spill temp (float)\n");
+#ifdef DEBUG
+ if (verbose)
+ printf("Predict one spill temp (float)\n");
#endif
- }
-
- assert(preferReg != 0);
+ }
- /* Iterate the registers in the order specified by raRegFltTmpOrder */
+ assert(preferReg != 0);
- for (unsigned index = 0;
- index < REG_FLT_TMP_ORDER_COUNT;
- index++)
- {
- regNum = raRegFltTmpOrder[index];
- regBits = genRegMask(regNum);
+ /* Iterate the registers in the order specified by raRegFltTmpOrder */
- if (varTypeIsFloating(type))
+ for (unsigned index = 0; index < REG_FLT_TMP_ORDER_COUNT; index++)
{
-#ifdef _TARGET_ARM_
- if (type == TYP_DOUBLE)
+ regNum = raRegFltTmpOrder[index];
+ regBits = genRegMask(regNum);
+
+ if (varTypeIsFloating(type))
{
- if ((regBits & RBM_DBL_REGS) == 0)
- {
- continue; // We must restrict the set to the double registers
- }
- else
+#ifdef _TARGET_ARM_
+ if (type == TYP_DOUBLE)
{
- // TYP_DOUBLE use two consecutive registers
- regBits |= genRegMask(REG_NEXT(regNum));
+ if ((regBits & RBM_DBL_REGS) == 0)
+ {
+ continue; // We must restrict the set to the double registers
+ }
+ else
+ {
+ // TYP_DOUBLE use two consecutive registers
+ regBits |= genRegMask(REG_NEXT(regNum));
+ }
}
- }
#endif
- // See if COMPlus_JitRegisterFP is restricting this FP register
- //
- if ((restrictMask & regBits) != regBits)
- continue;
- }
+ // See if COMPlus_JitRegisterFP is restricting this FP register
+ //
+ if ((restrictMask & regBits) != regBits)
+ continue;
+ }
- if ((preferReg & regBits) == regBits)
- {
- goto RET;
+ if ((preferReg & regBits) == regBits)
+ {
+ goto RET;
+ }
}
- }
- /* Otherwise we have allocated all registers, so do nothing */
- break;
+ /* Otherwise we have allocated all registers, so do nothing */
+ break;
-#else // !FEATURE_FP_REGALLOC
+#else // !FEATURE_FP_REGALLOC
- return RBM_NONE;
+ return RBM_NONE;
#endif
- default:
- noway_assert(!"unexpected type in reg use prediction");
+ default:
+ noway_assert(!"unexpected type in reg use prediction");
}
/* Abnormal return */
@@ -1289,32 +1254,33 @@ RET:
// Add a register interference to each of the last use variables
if (!VarSetOps::IsEmpty(this, rpLastUseVars) || !VarSetOps::IsEmpty(this, rpUseInPlace))
{
- VARSET_TP VARSET_INIT_NOCOPY(lastUse, VarSetOps::MakeEmpty(this)); VarSetOps::Assign(this, lastUse, rpLastUseVars);
- VARSET_TP VARSET_INIT_NOCOPY(inPlaceUse, VarSetOps::MakeEmpty(this)); VarSetOps::Assign(this, inPlaceUse, rpUseInPlace);
+ VARSET_TP VARSET_INIT_NOCOPY(lastUse, VarSetOps::MakeEmpty(this));
+ VarSetOps::Assign(this, lastUse, rpLastUseVars);
+ VARSET_TP VARSET_INIT_NOCOPY(inPlaceUse, VarSetOps::MakeEmpty(this));
+ VarSetOps::Assign(this, inPlaceUse, rpUseInPlace);
// While we still have any lastUse or inPlaceUse bits
- VARSET_TP VARSET_INIT_NOCOPY(useUnion, VarSetOps::Union(this, lastUse, inPlaceUse));
+ VARSET_TP VARSET_INIT_NOCOPY(useUnion, VarSetOps::Union(this, lastUse, inPlaceUse));
- VARSET_TP VARSET_INIT_NOCOPY(varAsSet, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(varAsSet, VarSetOps::MakeEmpty(this));
VARSET_ITER_INIT(this, iter, useUnion, varNum);
while (iter.NextElem(this, &varNum))
{
// We'll need this for one of the calls...
- VarSetOps::ClearD(this, varAsSet); VarSetOps::AddElemD(this, varAsSet, varNum);
+ VarSetOps::ClearD(this, varAsSet);
+ VarSetOps::AddElemD(this, varAsSet, varNum);
// If this varBit and lastUse?
if (VarSetOps::IsMember(this, lastUse, varNum))
{
// Record a register to variable interference
- rpRecordRegIntf(regBits, varAsSet
- DEBUGARG( "last use RegPick"));
+ rpRecordRegIntf(regBits, varAsSet DEBUGARG("last use RegPick"));
}
// If this varBit and inPlaceUse?
if (VarSetOps::IsMember(this, inPlaceUse, varNum))
{
// Record a register to variable interference
- rpRecordRegIntf(regBits, varAsSet
- DEBUGARG( "used in place RegPick"));
+ rpRecordRegIntf(regBits, varAsSet DEBUGARG("used in place RegPick"));
}
}
}
@@ -1343,25 +1309,22 @@ RET:
* to form an address expression)
*/
-regMaskTP Compiler::rpPredictAddressMode(GenTreePtr tree,
- var_types type,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs,
- GenTreePtr lenCSE)
+regMaskTP Compiler::rpPredictAddressMode(
+ GenTreePtr tree, var_types type, regMaskTP lockedRegs, regMaskTP rsvdRegs, GenTreePtr lenCSE)
{
- GenTreePtr op1;
- GenTreePtr op2;
- GenTreePtr opTemp;
- genTreeOps oper = tree->OperGet();
- regMaskTP op1Mask;
- regMaskTP op2Mask;
- regMaskTP regMask;
- ssize_t sh;
- ssize_t cns = 0;
- bool rev;
- bool hasTwoAddConst = false;
- bool restoreLastUseVars = false;
- VARSET_TP VARSET_INIT_NOCOPY(oldLastUseVars, VarSetOps::MakeEmpty(this));
+ GenTreePtr op1;
+ GenTreePtr op2;
+ GenTreePtr opTemp;
+ genTreeOps oper = tree->OperGet();
+ regMaskTP op1Mask;
+ regMaskTP op2Mask;
+ regMaskTP regMask;
+ ssize_t sh;
+ ssize_t cns = 0;
+ bool rev;
+ bool hasTwoAddConst = false;
+ bool restoreLastUseVars = false;
+ VARSET_TP VARSET_INIT_NOCOPY(oldLastUseVars, VarSetOps::MakeEmpty(this));
/* do we need to save and restore the rpLastUseVars set ? */
if ((rsvdRegs & RBM_LASTUSE) && (lenCSE == NULL))
@@ -1406,7 +1369,7 @@ regMaskTP Compiler::rpPredictAddressMode(GenTreePtr tree,
rev = ((op1->gtFlags & GTF_REVERSE_OPS) != 0);
op2 = op1->gtOp.gtOp2;
- op1 = op1->gtOp.gtOp1; // Overwrite op1 last!!
+ op1 = op1->gtOp.gtOp1; // Overwrite op1 last!!
}
/* Check for CNS_INT or LSH of CNS_INT in op2 slot */
@@ -1441,7 +1404,7 @@ regMaskTP Compiler::rpPredictAddressMode(GenTreePtr tree,
{
// Compute the new cns value that Codegen will end up using
cns += (opTemp->gtIntCon.gtIconVal << sh);
-
+
goto ONE_ADDR_EXPR;
}
}
@@ -1490,12 +1453,12 @@ TWO_ADDR_EXPR:
/* Evaluate op1 and op2 in the correct order */
if (rev)
{
- op2Mask = rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
+ op2Mask = rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
op1Mask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs | op2Mask, rsvdRegs);
}
else
{
- op1Mask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ op1Mask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
op2Mask = rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | op1Mask, rsvdRegs);
}
@@ -1516,11 +1479,11 @@ TWO_ADDR_EXPR:
//
if (hasTwoAddConst && (sh != 0) && (op1Mask == RBM_NONE))
{
- op1Mask |= rpPredictRegPick(TYP_INT, PREDICT_REG, (lockedRegs | op1Mask | op2Mask));
+ op1Mask |= rpPredictRegPick(TYP_INT, PREDICT_REG, (lockedRegs | op1Mask | op2Mask));
}
//
- // On the ARM we will need at least one scratch register for trees that have this form:
+ // On the ARM we will need at least one scratch register for trees that have this form:
// [op1 + op2 + cns] or [op1 + op2<<sh + cns]
// or for a float/double or long when we have both op1 and op2
// or when we have an 'cns' that is too large for the ld/st instruction
@@ -1539,19 +1502,18 @@ TWO_ADDR_EXPR:
opTemp = op2->gtOp.gtOp2;
if (opTemp->OperGet() == GT_LCL_VAR)
{
- unsigned varNum = opTemp->gtLclVar.gtLclNum;
- LclVarDsc * varDsc = &lvaTable[varNum];
+ unsigned varNum = opTemp->gtLclVar.gtLclNum;
+ LclVarDsc* varDsc = &lvaTable[varNum];
if (varDsc->lvTracked && !VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex))
{
- rpRecordRegIntf(RBM_TMP_0, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex)
- DEBUGARG( "dead CSE (gt_ind)"));
-
+ rpRecordRegIntf(RBM_TMP_0,
+ VarSetOps::MakeSingleton(this, varDsc->lvVarIndex) DEBUGARG("dead CSE (gt_ind)"));
}
}
}
#endif
-
+
regMask = (op1Mask | op2Mask);
tree->gtUsedRegs = (regMaskSmall)regMask;
goto DONE;
@@ -1565,7 +1527,8 @@ ONE_ADDR_EXPR:
#ifdef _TARGET_ARM_
//
- // On the ARM we will need another scratch register when we have an 'cns' that is too large for the ld/st instruction
+ // On the ARM we will need another scratch register when we have an 'cns' that is too large for the ld/st
+ // instruction
//
if (!codeGen->validDispForLdSt(cns, type))
{
@@ -1593,7 +1556,7 @@ NO_ADDR_EXPR:
}
DONE:
- regMaskTP regUse = tree->gtUsedRegs;
+ regMaskTP regUse = tree->gtUsedRegs;
if (!VarSetOps::IsEmpty(this, compCurLife))
{
@@ -1601,8 +1564,7 @@ DONE:
// the set of temporary registers need to evaluate the sub tree
if (regUse)
{
- rpRecordRegIntf(regUse, compCurLife
- DEBUGARG( "tmp use (gt_ind)"));
+ rpRecordRegIntf(regUse, compCurLife DEBUGARG("tmp use (gt_ind)"));
}
}
@@ -1617,8 +1579,7 @@ DONE:
if (!VarSetOps::Equal(this, rpLastUseVars, oldLastUseVars) && rpAsgVarNum != -1)
{
rpRecordVarIntf(rpAsgVarNum,
- VarSetOps::Diff(this, rpLastUseVars, oldLastUseVars)
- DEBUGARG( "asgn conflict (gt_ind)"));
+ VarSetOps::Diff(this, rpLastUseVars, oldLastUseVars) DEBUGARG("asgn conflict (gt_ind)"));
}
VarSetOps::Assign(this, rpLastUseVars, oldLastUseVars);
}
@@ -1633,21 +1594,21 @@ DONE:
void Compiler::rpPredictRefAssign(unsigned lclNum)
{
- LclVarDsc * varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaTable + lclNum;
varDsc->lvRefAssign = 1;
#if NOGC_WRITE_BARRIERS
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex))
- printf("Record interference between V%02u,T%02u and REG WRITE BARRIER -- ref assign\n",
- lclNum, varDsc->lvVarIndex);
+ if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex))
+ printf("Record interference between V%02u,T%02u and REG WRITE BARRIER -- ref assign\n", lclNum,
+ varDsc->lvVarIndex);
}
#endif
- /* Make sure that write barrier pointer variables never land in EDX */
+ /* Make sure that write barrier pointer variables never land in EDX */
VarSetOps::AddElemD(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex);
#endif // NOGC_WRITE_BARRIERS
}
@@ -1659,8 +1620,8 @@ void Compiler::rpPredictRefAssign(unsigned lclNum)
* Records the internal temp physical register usage for this tree.
* Returns a mask of interfering registers for this tree.
*
- * Each of the switch labels in this function updates regMask and assigns tree->gtUsedRegs
- * to the set of scratch registers needed when evaluating the tree.
+ * Each of the switch labels in this function updates regMask and assigns tree->gtUsedRegs
+ * to the set of scratch registers needed when evaluating the tree.
* Generally tree->gtUsedRegs and the return value retMask are the same, except when the
* parameter "lockedRegs" conflicts with the computed tree->gtUsedRegs, in which case we
* predict additional internal temp physical registers to spill into.
@@ -1675,26 +1636,26 @@ void Compiler::rpPredictRefAssign(unsigned lclNum)
* the rpLastUseVars set should be saved and restored
* so that we don't add any new variables to rpLastUseVars.
*/
-regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
- rpPredictReg predictReg,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs)
+regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
+ rpPredictReg predictReg,
+ regMaskTP lockedRegs,
+ regMaskTP rsvdRegs)
{
- regMaskTP regMask = RBM_NONE;
- regMaskTP interferingRegs = RBM_NONE;
+ regMaskTP regMask = RBM_NONE;
+ regMaskTP interferingRegs = RBM_NONE;
- bool hasGCpointer = false;
- bool dstIsOnStack = false;
- bool useMemHelper = false;
- bool useBarriers = false;
+ bool hasGCpointer = false;
+ bool dstIsOnStack = false;
+ bool useMemHelper = false;
+ bool useBarriers = false;
- GenTreeBlkOp* blkNode = tree->AsBlkOp();
- GenTreePtr dstAddr = blkNode->Dest();
- GenTreePtr op1 = blkNode->gtGetOp1();
- GenTreePtr srcAddrOrFill = op1->gtGetOp2();
- GenTreePtr sizeNode = blkNode->gtGetOp2();
+ GenTreeBlkOp* blkNode = tree->AsBlkOp();
+ GenTreePtr dstAddr = blkNode->Dest();
+ GenTreePtr op1 = blkNode->gtGetOp1();
+ GenTreePtr srcAddrOrFill = op1->gtGetOp2();
+ GenTreePtr sizeNode = blkNode->gtGetOp2();
- size_t blkSize = 0;
+ size_t blkSize = 0;
hasGCpointer = (blkNode->HasGCPtr());
@@ -1711,27 +1672,27 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
dstIsOnStack = (dstAddr->gtOper == GT_ADDR && (dstAddr->gtFlags & GTF_ADDR_ONSTACK));
}
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE) sizeNode->gtIntCon.gtIconVal;
- blkSize = roundUp(info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)sizeNode->gtIntCon.gtIconVal;
+ blkSize = roundUp(info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
}
- else // gtIconVal contains amount to copy
+ else // gtIconVal contains amount to copy
{
- blkSize = (unsigned) sizeNode->gtIntCon.gtIconVal;
+ blkSize = (unsigned)sizeNode->gtIntCon.gtIconVal;
}
if (isInitBlk)
{
if (srcAddrOrFill->OperGet() != GT_CNS_INT)
{
- useMemHelper = true;
+ useMemHelper = true;
}
}
}
else
{
- useMemHelper = true;
+ useMemHelper = true;
}
-
+
if (hasGCpointer && !dstIsOnStack)
{
useBarriers = true;
@@ -1745,31 +1706,32 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
{
bool useLoop = false;
unsigned fullStoreCount = blkSize / TARGET_POINTER_SIZE;
-
+
// A mask to use to force the predictor to choose low registers (to reduce code size)
- regMaskTP avoidReg = (RBM_R12|RBM_LR);
+ regMaskTP avoidReg = (RBM_R12 | RBM_LR);
// Allow the src and dst to be used in place, unless we use a loop, in which
// case we will need scratch registers as we will be writing to them.
rpPredictReg srcAndDstPredict = PREDICT_REG;
// Will we be using a loop to implement this INITBLK/COPYBLK?
- if ((isCopyBlk && (fullStoreCount >= 8)) ||
- (isInitBlk && (fullStoreCount >= 16)))
+ if ((isCopyBlk && (fullStoreCount >= 8)) || (isInitBlk && (fullStoreCount >= 16)))
{
- useLoop = true;
- avoidReg = RBM_NONE;
+ useLoop = true;
+ avoidReg = RBM_NONE;
srcAndDstPredict = PREDICT_SCRATCH_REG;
}
if (op1->gtFlags & GTF_REVERSE_OPS)
{
- regMask |= rpPredictTreeRegUse(srcAddrOrFill, srcAndDstPredict, lockedRegs, dstAddr->gtRsvdRegs | avoidReg | RBM_LASTUSE);
+ regMask |= rpPredictTreeRegUse(srcAddrOrFill, srcAndDstPredict, lockedRegs,
+ dstAddr->gtRsvdRegs | avoidReg | RBM_LASTUSE);
regMask |= rpPredictTreeRegUse(dstAddr, srcAndDstPredict, lockedRegs | regMask, avoidReg);
}
else
{
- regMask |= rpPredictTreeRegUse(dstAddr, srcAndDstPredict, lockedRegs, srcAddrOrFill->gtRsvdRegs | avoidReg | RBM_LASTUSE);
+ regMask |= rpPredictTreeRegUse(dstAddr, srcAndDstPredict, lockedRegs,
+ srcAddrOrFill->gtRsvdRegs | avoidReg | RBM_LASTUSE);
regMask |= rpPredictTreeRegUse(srcAddrOrFill, srcAndDstPredict, lockedRegs | regMask, avoidReg);
}
@@ -1793,21 +1755,17 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regMask);
}
- tree->gtUsedRegs = dstAddr->gtUsedRegs |
- srcAddrOrFill->gtUsedRegs |
- (regMaskSmall)regMask;
+ tree->gtUsedRegs = dstAddr->gtUsedRegs | srcAddrOrFill->gtUsedRegs | (regMaskSmall)regMask;
return interferingRegs;
}
#endif
// What order should the Dest, Val/Src, and Size be calculated
- GenTreePtr opsPtr [3];
- regMaskTP regsPtr[3];
+ GenTreePtr opsPtr[3];
+ regMaskTP regsPtr[3];
#if defined(_TARGET_XARCH_)
- fgOrderBlockOps(tree,
- RBM_EDI, (isInitBlk) ? RBM_EAX : RBM_ESI, RBM_ECX,
- opsPtr, regsPtr);
+ fgOrderBlockOps(tree, RBM_EDI, (isInitBlk) ? RBM_EAX : RBM_ESI, RBM_ECX, opsPtr, regsPtr);
// We're going to use these, might as well make them available now
@@ -1825,7 +1783,7 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
interferingRegs |= RBM_CALLEE_TRASH;
#ifdef DEBUG
if (verbose)
- printf("Adding interference with RBM_CALLEE_TRASH for memcpy/memset\n");
+ printf("Adding interference with RBM_CALLEE_TRASH for memcpy/memset\n");
#endif
}
else // useBarriers
@@ -1839,41 +1797,29 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
interferingRegs |= RBM_CALLEE_TRASH_NOGC;
#ifdef DEBUG
if (verbose)
- printf("Adding interference with RBM_CALLEE_TRASH_NOGC for Byref WriteBarrier\n");
+ printf("Adding interference with RBM_CALLEE_TRASH_NOGC for Byref WriteBarrier\n");
#endif
}
-#else // !_TARGET_X86_ && !_TARGET_ARM_
+#else // !_TARGET_X86_ && !_TARGET_ARM_
#error "Non-ARM or x86 _TARGET_ in RegPredict for INITBLK/COPYBLK"
#endif // !_TARGET_X86_ && !_TARGET_ARM_
- regMask |= rpPredictTreeRegUse(opsPtr[0],
- rpGetPredictForMask(regsPtr[0]),
- lockedRegs,
+ regMask |= rpPredictTreeRegUse(opsPtr[0], rpGetPredictForMask(regsPtr[0]), lockedRegs,
opsPtr[1]->gtRsvdRegs | opsPtr[2]->gtRsvdRegs | RBM_LASTUSE);
regMask |= regsPtr[0];
opsPtr[0]->gtUsedRegs |= regsPtr[0];
- rpRecordRegIntf(regsPtr[0], compCurLife
- DEBUGARG("movsd dest"));
+ rpRecordRegIntf(regsPtr[0], compCurLife DEBUGARG("movsd dest"));
- regMask |= rpPredictTreeRegUse(opsPtr[1],
- rpGetPredictForMask(regsPtr[1]),
- lockedRegs | regMask,
+ regMask |= rpPredictTreeRegUse(opsPtr[1], rpGetPredictForMask(regsPtr[1]), lockedRegs | regMask,
opsPtr[2]->gtRsvdRegs | RBM_LASTUSE);
regMask |= regsPtr[1];
opsPtr[1]->gtUsedRegs |= regsPtr[1];
- rpRecordRegIntf(regsPtr[1], compCurLife
- DEBUGARG("movsd src"));
+ rpRecordRegIntf(regsPtr[1], compCurLife DEBUGARG("movsd src"));
- regMask |= rpPredictTreeRegUse(opsPtr[2],
- rpGetPredictForMask(regsPtr[2]),
- lockedRegs | regMask,
- RBM_NONE);
+ regMask |= rpPredictTreeRegUse(opsPtr[2], rpGetPredictForMask(regsPtr[2]), lockedRegs | regMask, RBM_NONE);
regMask |= regsPtr[2];
opsPtr[2]->gtUsedRegs |= regsPtr[2];
- tree->gtUsedRegs = opsPtr[0]->gtUsedRegs |
- opsPtr[1]->gtUsedRegs |
- opsPtr[2]->gtUsedRegs |
- (regMaskSmall)regMask;
+ tree->gtUsedRegs = opsPtr[0]->gtUsedRegs | opsPtr[1]->gtUsedRegs | opsPtr[2]->gtUsedRegs | (regMaskSmall)regMask;
return interferingRegs;
}
@@ -1882,8 +1828,8 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
* Predict the internal temp physical register usage for a tree by setting tree->gtUsedRegs.
* Returns a regMask with the internal temp physical register usage for this tree.
*
- * Each of the switch labels in this function updates regMask and assigns tree->gtUsedRegs
- * to the set of scratch registers needed when evaluating the tree.
+ * Each of the switch labels in this function updates regMask and assigns tree->gtUsedRegs
+ * to the set of scratch registers needed when evaluating the tree.
* Generally tree->gtUsedRegs and the return value retMask are the same, except when the
* parameter "lockedRegs" conflicts with the computed tree->gtUsedRegs, in which case we
* predict additional internal temp physical registers to spill into.
@@ -1899,30 +1845,30 @@ regMaskTP Compiler::rpPredictBlkAsgRegUse(GenTreePtr tree,
* so that we don't add any new variables to rpLastUseVars.
*/
-#pragma warning(disable:4701)
+#pragma warning(disable : 4701)
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
-regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
- rpPredictReg predictReg,
- regMaskTP lockedRegs,
- regMaskTP rsvdRegs)
+regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
+ rpPredictReg predictReg,
+ regMaskTP lockedRegs,
+ regMaskTP rsvdRegs)
{
- regMaskTP regMask = DUMMY_INIT(RBM_ILLEGAL);
- regMaskTP op2Mask;
- regMaskTP tmpMask;
- rpPredictReg op1PredictReg;
- rpPredictReg op2PredictReg;
- LclVarDsc * varDsc = NULL;
- VARSET_TP VARSET_INIT_NOCOPY(oldLastUseVars, VarSetOps::UninitVal());
+ regMaskTP regMask = DUMMY_INIT(RBM_ILLEGAL);
+ regMaskTP op2Mask;
+ regMaskTP tmpMask;
+ rpPredictReg op1PredictReg;
+ rpPredictReg op2PredictReg;
+ LclVarDsc* varDsc = NULL;
+ VARSET_TP VARSET_INIT_NOCOPY(oldLastUseVars, VarSetOps::UninitVal());
- VARSET_TP VARSET_INIT_NOCOPY(varBits, VarSetOps::UninitVal());
- VARSET_TP VARSET_INIT_NOCOPY(lastUseVarBits, VarSetOps::MakeEmpty(this));
+ VARSET_TP VARSET_INIT_NOCOPY(varBits, VarSetOps::UninitVal());
+ VARSET_TP VARSET_INIT_NOCOPY(lastUseVarBits, VarSetOps::MakeEmpty(this));
- bool restoreLastUseVars = false;
- regMaskTP interferingRegs = RBM_NONE;
+ bool restoreLastUseVars = false;
+ regMaskTP interferingRegs = RBM_NONE;
#ifdef DEBUG
// if (verbose) printf("rpPredictTreeRegUse() [%08x]\n", tree);
@@ -1936,12 +1882,12 @@ regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
/* Figure out what kind of a node we have */
- genTreeOps oper = tree->OperGet();
- var_types type = tree->TypeGet();
- unsigned kind = tree->OperKind();
+ genTreeOps oper = tree->OperGet();
+ var_types type = tree->TypeGet();
+ unsigned kind = tree->OperKind();
// In the comma case, we care about whether this is "effectively" ADDR(IND(...))
- genTreeOps effectiveOper = tree->gtEffectiveVal()->OperGet();
+ genTreeOps effectiveOper = tree->gtEffectiveVal()->OperGet();
if ((predictReg == PREDICT_ADDR) && (effectiveOper != GT_IND))
predictReg = PREDICT_NONE;
else if (rpHasVarIndexForPredict(predictReg))
@@ -1950,12 +1896,10 @@ regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
// assignment case where varIndex is the var being assigned to.
// We need to check whether the variable is used between here and
// its redefinition.
- unsigned varIndex = rpGetVarIndexForPredict(predictReg);
- unsigned lclNum = lvaTrackedToVarNum[varIndex];
- bool found = false;
- for (GenTreePtr nextTree = tree->gtNext;
- nextTree != NULL && !found;
- nextTree = nextTree->gtNext)
+ unsigned varIndex = rpGetVarIndexForPredict(predictReg);
+ unsigned lclNum = lvaTrackedToVarNum[varIndex];
+ bool found = false;
+ for (GenTreePtr nextTree = tree->gtNext; nextTree != NULL && !found; nextTree = nextTree->gtNext)
{
if (nextTree->gtOper == GT_LCL_VAR && nextTree->gtLclVarCommon.gtLclNum == lclNum)
{
@@ -1968,19 +1912,19 @@ regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
break;
}
}
- assert (found);
+ assert(found);
}
if (rsvdRegs & RBM_LASTUSE)
{
- restoreLastUseVars = true;
+ restoreLastUseVars = true;
VarSetOps::Assign(this, oldLastUseVars, rpLastUseVars);
- rsvdRegs &= ~RBM_LASTUSE;
+ rsvdRegs &= ~RBM_LASTUSE;
}
/* Is this a constant or leaf node? */
- if (kind & (GTK_CONST | GTK_LEAF))
+ if (kind & (GTK_CONST | GTK_LEAF))
{
bool lastUse = false;
regMaskTP enregMask = RBM_NONE;
@@ -1988,397 +1932,395 @@ regMaskTP Compiler::rpPredictTreeRegUse(GenTreePtr tree,
switch (oper)
{
#ifdef _TARGET_ARM_
- case GT_CNS_DBL:
- // Codegen for floating point constants on the ARM is currently
- // movw/movt rT1, <lo32 bits>
- // movw/movt rT2, <hi32 bits>
- // vmov.i2d dT0, rT1,rT2
- //
- // For TYP_FLOAT one integer register is required
- //
- // These integer register(s) immediately die
- tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
- if (type == TYP_DOUBLE)
- {
- // For TYP_DOUBLE a second integer register is required
+ case GT_CNS_DBL:
+ // Codegen for floating point constants on the ARM is currently
+ // movw/movt rT1, <lo32 bits>
+ // movw/movt rT2, <hi32 bits>
+ // vmov.i2d dT0, rT1,rT2
//
- tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
- }
+ // For TYP_FLOAT one integer register is required
+ //
+ // These integer register(s) immediately die
+ tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
+ if (type == TYP_DOUBLE)
+ {
+ // For TYP_DOUBLE a second integer register is required
+ //
+ tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
+ }
- // We also need a floating point register that we keep
- //
- if (predictReg == PREDICT_NONE)
- predictReg = PREDICT_SCRATCH_REG;
+ // We also need a floating point register that we keep
+ //
+ if (predictReg == PREDICT_NONE)
+ predictReg = PREDICT_SCRATCH_REG;
- regMask = rpPredictRegPick(type, predictReg, lockedRegs | rsvdRegs);
- tree->gtUsedRegs = regMask | tmpMask;
- goto RETURN_CHECK;
+ regMask = rpPredictRegPick(type, predictReg, lockedRegs | rsvdRegs);
+ tree->gtUsedRegs = regMask | tmpMask;
+ goto RETURN_CHECK;
#endif
- case GT_CNS_INT:
- case GT_CNS_LNG:
+ case GT_CNS_INT:
+ case GT_CNS_LNG:
- if (rpHasVarIndexForPredict(predictReg))
- {
- unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
- rpAsgVarNum = tgtIndex;
-
- // We don't need any register as we plan on writing to the rpAsgVarNum register
- predictReg = PREDICT_NONE;
+ if (rpHasVarIndexForPredict(predictReg))
+ {
+ unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
+ rpAsgVarNum = tgtIndex;
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
- tgtVar->lvDependReg = true;
+ // We don't need any register as we plan on writing to the rpAsgVarNum register
+ predictReg = PREDICT_NONE;
- if (type == TYP_LONG)
- {
- assert(oper == GT_CNS_LNG);
+ LclVarDsc* tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
+ tgtVar->lvDependReg = true;
- if (tgtVar->lvOtherReg == REG_STK)
+ if (type == TYP_LONG)
{
- // Well we do need one register for a partially enregistered
- type = TYP_INT;
- predictReg = PREDICT_SCRATCH_REG;
+ assert(oper == GT_CNS_LNG);
+
+ if (tgtVar->lvOtherReg == REG_STK)
+ {
+ // Well we do need one register for a partially enregistered
+ type = TYP_INT;
+ predictReg = PREDICT_SCRATCH_REG;
+ }
}
}
- }
- else
- {
+ else
+ {
#if !CPU_LOAD_STORE_ARCH
- /* If the constant is a handle then it will need to have a relocation
- applied to it. It will need to be loaded into a register.
- But never throw away an existing hint.
- */
- if (opts.compReloc && tree->IsCnsIntOrI() && tree->IsIconHandle())
+ /* If the constant is a handle then it will need to have a relocation
+ applied to it. It will need to be loaded into a register.
+ But never throw away an existing hint.
+ */
+ if (opts.compReloc && tree->IsCnsIntOrI() && tree->IsIconHandle())
#endif
- {
- if (predictReg == PREDICT_NONE)
- predictReg = PREDICT_SCRATCH_REG;
+ {
+ if (predictReg == PREDICT_NONE)
+ predictReg = PREDICT_SCRATCH_REG;
+ }
}
- }
- break;
+ break;
- case GT_NO_OP:
- break;
+ case GT_NO_OP:
+ break;
- case GT_CLS_VAR:
- if ((predictReg == PREDICT_NONE) &&
- (genActualType(type) == TYP_INT) &&
- (genTypeSize(type) < sizeof(int)) )
- {
- predictReg = PREDICT_SCRATCH_REG;
- }
+ case GT_CLS_VAR:
+ if ((predictReg == PREDICT_NONE) && (genActualType(type) == TYP_INT) &&
+ (genTypeSize(type) < sizeof(int)))
+ {
+ predictReg = PREDICT_SCRATCH_REG;
+ }
#ifdef _TARGET_ARM_
- // Unaligned loads/stores for floating point values must first be loaded into integer register(s)
- //
- if ((tree->gtFlags & GTF_IND_UNALIGNED) && varTypeIsFloating(type))
- {
- // These integer register(s) immediately die
- tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
- // Two integer registers are required for a TYP_DOUBLE
- if (type == TYP_DOUBLE)
- tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
- }
- // We need a temp register in some cases of loads/stores to a class var
- if (predictReg == PREDICT_NONE)
- {
- predictReg = PREDICT_SCRATCH_REG;
- }
+ // Unaligned loads/stores for floating point values must first be loaded into integer register(s)
+ //
+ if ((tree->gtFlags & GTF_IND_UNALIGNED) && varTypeIsFloating(type))
+ {
+ // These integer register(s) immediately die
+ tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
+ // Two integer registers are required for a TYP_DOUBLE
+ if (type == TYP_DOUBLE)
+ tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
+ }
+ // We need a temp register in some cases of loads/stores to a class var
+ if (predictReg == PREDICT_NONE)
+ {
+ predictReg = PREDICT_SCRATCH_REG;
+ }
#endif
- if (rpHasVarIndexForPredict(predictReg))
- {
- unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
- rpAsgVarNum = tgtIndex;
+ if (rpHasVarIndexForPredict(predictReg))
+ {
+ unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
+ rpAsgVarNum = tgtIndex;
- // We don't need any register as we plan on writing to the rpAsgVarNum register
- predictReg = PREDICT_NONE;
+ // We don't need any register as we plan on writing to the rpAsgVarNum register
+ predictReg = PREDICT_NONE;
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
- tgtVar->lvDependReg = true;
+ LclVarDsc* tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
+ tgtVar->lvDependReg = true;
- if (type == TYP_LONG)
- {
- if (tgtVar->lvOtherReg == REG_STK)
+ if (type == TYP_LONG)
{
- // Well we do need one register for a partially enregistered
- type = TYP_INT;
- predictReg = PREDICT_SCRATCH_REG;
+ if (tgtVar->lvOtherReg == REG_STK)
+ {
+ // Well we do need one register for a partially enregistered
+ type = TYP_INT;
+ predictReg = PREDICT_SCRATCH_REG;
+ }
}
}
- }
- break;
+ break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
#ifdef _TARGET_ARM_
- // Check for a misalignment on a Floating Point field
- //
- if (varTypeIsFloating(type))
- {
- if ((tree->gtLclFld.gtLclOffs % emitTypeSize(tree->TypeGet())) != 0)
+ // Check for a misalignment on a Floating Point field
+ //
+ if (varTypeIsFloating(type))
{
- // These integer register(s) immediately die
- tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
- // Two integer registers are required for a TYP_DOUBLE
- if (type == TYP_DOUBLE)
- tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
+ if ((tree->gtLclFld.gtLclOffs % emitTypeSize(tree->TypeGet())) != 0)
+ {
+ // These integer register(s) immediately die
+ tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs);
+ // Two integer registers are required for a TYP_DOUBLE
+ if (type == TYP_DOUBLE)
+ tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | rsvdRegs | tmpMask);
+ }
}
- }
#endif
- __fallthrough;
+ __fallthrough;
- case GT_LCL_VAR:
- case GT_REG_VAR:
+ case GT_LCL_VAR:
+ case GT_REG_VAR:
- varDsc = lvaTable + tree->gtLclVarCommon.gtLclNum;
+ varDsc = lvaTable + tree->gtLclVarCommon.gtLclNum;
- VarSetOps::Assign(this, varBits, fgGetVarBits(tree));
- compUpdateLifeVar</*ForCodeGen*/false>(tree, &lastUseVarBits);
- lastUse = !VarSetOps::IsEmpty(this, lastUseVarBits);
+ VarSetOps::Assign(this, varBits, fgGetVarBits(tree));
+ compUpdateLifeVar</*ForCodeGen*/ false>(tree, &lastUseVarBits);
+ lastUse = !VarSetOps::IsEmpty(this, lastUseVarBits);
#if FEATURE_STACK_FP_X87
- // If it's a floating point var, there's nothing to do
- if (varTypeIsFloating(type))
- {
- tree->gtUsedRegs = RBM_NONE;
- regMask = RBM_NONE;
- goto RETURN_CHECK;
- }
+ // If it's a floating point var, there's nothing to do
+ if (varTypeIsFloating(type))
+ {
+ tree->gtUsedRegs = RBM_NONE;
+ regMask = RBM_NONE;
+ goto RETURN_CHECK;
+ }
#endif
- // If the variable is already a register variable, no need to go further.
- if (oper == GT_REG_VAR)
- break;
-
- /* Apply the type of predictReg to the LCL_VAR */
-
- if (predictReg == PREDICT_REG)
- {
-PREDICT_REG_COMMON:
- if (varDsc->lvRegNum == REG_STK)
+ // If the variable is already a register variable, no need to go further.
+ if (oper == GT_REG_VAR)
break;
- goto GRAB_COUNT;
- }
- else if (predictReg == PREDICT_SCRATCH_REG)
- {
- noway_assert(predictReg == PREDICT_SCRATCH_REG);
+ /* Apply the type of predictReg to the LCL_VAR */
- /* Is this the last use of a local var? */
- if (lastUse)
+ if (predictReg == PREDICT_REG)
{
- if (VarSetOps::IsEmptyIntersection(this, rpUseInPlace, lastUseVarBits))
- goto PREDICT_REG_COMMON;
+ PREDICT_REG_COMMON:
+ if (varDsc->lvRegNum == REG_STK)
+ break;
+
+ goto GRAB_COUNT;
}
- }
- else if (rpHasVarIndexForPredict(predictReg))
- {
- /* Get the tracked local variable that has an lvVarIndex of tgtIndex1 */
+ else if (predictReg == PREDICT_SCRATCH_REG)
+ {
+ noway_assert(predictReg == PREDICT_SCRATCH_REG);
+
+ /* Is this the last use of a local var? */
+ if (lastUse)
+ {
+ if (VarSetOps::IsEmptyIntersection(this, rpUseInPlace, lastUseVarBits))
+ goto PREDICT_REG_COMMON;
+ }
+ }
+ else if (rpHasVarIndexForPredict(predictReg))
{
- unsigned tgtIndex1 = rpGetVarIndexForPredict(predictReg);
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex1];
- VarSetOps::MakeSingleton(this, tgtIndex1);
+ /* Get the tracked local variable that has an lvVarIndex of tgtIndex1 */
+ {
+ unsigned tgtIndex1 = rpGetVarIndexForPredict(predictReg);
+ LclVarDsc* tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex1];
+ VarSetOps::MakeSingleton(this, tgtIndex1);
- noway_assert(tgtVar->lvVarIndex == tgtIndex1);
- noway_assert(tgtVar->lvRegNum != REG_STK); /* Must have been enregistered */
+ noway_assert(tgtVar->lvVarIndex == tgtIndex1);
+ noway_assert(tgtVar->lvRegNum != REG_STK); /* Must have been enregistered */
#ifndef _TARGET_AMD64_
- // On amd64 we have the occasional spec-allowed implicit conversion from TYP_I_IMPL to TYP_INT
- // so this assert is meaningless
- noway_assert((type != TYP_LONG) || (tgtVar->TypeGet() == TYP_LONG));
+ // On amd64 we have the occasional spec-allowed implicit conversion from TYP_I_IMPL to TYP_INT
+ // so this assert is meaningless
+ noway_assert((type != TYP_LONG) || (tgtVar->TypeGet() == TYP_LONG));
#endif // !_TARGET_AMD64_
-
- if (varDsc->lvTracked)
- {
- unsigned srcIndex; srcIndex = varDsc->lvVarIndex;
-
- // If this register has it's last use here then we will prefer
- // to color to the same register as tgtVar.
- if (lastUse)
- {
- /*
- * Add an entry in the lvaVarPref graph to indicate
- * that it would be worthwhile to color these two variables
- * into the same physical register.
- * This will help us avoid having an extra copy instruction
- */
- VarSetOps::AddElemD(this, lvaVarPref[srcIndex], tgtIndex1);
- VarSetOps::AddElemD(this, lvaVarPref[tgtIndex1], srcIndex);
- }
-
- // Add a variable interference from srcIndex to each of the last use variables
- if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+
+ if (varDsc->lvTracked)
{
- rpRecordVarIntf(srcIndex, rpLastUseVars
- DEBUGARG( "src reg conflict"));
+ unsigned srcIndex;
+ srcIndex = varDsc->lvVarIndex;
+
+ // If this register has it's last use here then we will prefer
+ // to color to the same register as tgtVar.
+ if (lastUse)
+ {
+ /*
+ * Add an entry in the lvaVarPref graph to indicate
+ * that it would be worthwhile to color these two variables
+ * into the same physical register.
+ * This will help us avoid having an extra copy instruction
+ */
+ VarSetOps::AddElemD(this, lvaVarPref[srcIndex], tgtIndex1);
+ VarSetOps::AddElemD(this, lvaVarPref[tgtIndex1], srcIndex);
+ }
+
+ // Add a variable interference from srcIndex to each of the last use variables
+ if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+ {
+ rpRecordVarIntf(srcIndex, rpLastUseVars DEBUGARG("src reg conflict"));
+ }
}
+ rpAsgVarNum = tgtIndex1;
+
+ /* We will rely on the target enregistered variable from the GT_ASG */
+ varDsc = tgtVar;
}
- rpAsgVarNum = tgtIndex1;
-
- /* We will rely on the target enregistered variable from the GT_ASG */
- varDsc = tgtVar;
- }
-GRAB_COUNT:
- unsigned grabCount; grabCount = 0;
+ GRAB_COUNT:
+ unsigned grabCount;
+ grabCount = 0;
- if (genIsValidFloatReg(varDsc->lvRegNum))
- {
- enregMask = genRegMaskFloat(varDsc->lvRegNum, varDsc->TypeGet());
- }
- else
- {
- enregMask = genRegMask(varDsc->lvRegNum);
- }
+ if (genIsValidFloatReg(varDsc->lvRegNum))
+ {
+ enregMask = genRegMaskFloat(varDsc->lvRegNum, varDsc->TypeGet());
+ }
+ else
+ {
+ enregMask = genRegMask(varDsc->lvRegNum);
+ }
#ifdef _TARGET_ARM_
- if ((type == TYP_DOUBLE) && (varDsc->TypeGet() == TYP_FLOAT))
- {
- // We need to compute the intermediate value using a TYP_DOUBLE
- // but we storing the result in a TYP_SINGLE enregistered variable
- //
- grabCount++;
- }
- else
-#endif
- {
- /* We can't trust a prediction of rsvdRegs or lockedRegs sets */
- if (enregMask & (rsvdRegs | lockedRegs))
+ if ((type == TYP_DOUBLE) && (varDsc->TypeGet() == TYP_FLOAT))
{
+ // We need to compute the intermediate value using a TYP_DOUBLE
+ // but we storing the result in a TYP_SINGLE enregistered variable
+ //
grabCount++;
}
-#ifndef _TARGET_64BIT_
- if (type == TYP_LONG)
+ else
+#endif
{
- if (varDsc->lvOtherReg != REG_STK)
+ /* We can't trust a prediction of rsvdRegs or lockedRegs sets */
+ if (enregMask & (rsvdRegs | lockedRegs))
{
- tmpMask = genRegMask(varDsc->lvOtherReg);
- enregMask |= tmpMask;
-
- /* We can't trust a prediction of rsvdRegs or lockedRegs sets */
- if (tmpMask & (rsvdRegs | lockedRegs))
- grabCount++;
+ grabCount++;
}
- else // lvOtherReg == REG_STK
+#ifndef _TARGET_64BIT_
+ if (type == TYP_LONG)
{
- grabCount++;
+ if (varDsc->lvOtherReg != REG_STK)
+ {
+ tmpMask = genRegMask(varDsc->lvOtherReg);
+ enregMask |= tmpMask;
+
+ /* We can't trust a prediction of rsvdRegs or lockedRegs sets */
+ if (tmpMask & (rsvdRegs | lockedRegs))
+ grabCount++;
+ }
+ else // lvOtherReg == REG_STK
+ {
+ grabCount++;
+ }
}
- }
#endif // _TARGET_64BIT_
- }
+ }
- varDsc->lvDependReg = true;
+ varDsc->lvDependReg = true;
- if (grabCount == 0)
- {
- /* Does not need a register */
- predictReg = PREDICT_NONE;
- //noway_assert(!VarSetOps::IsEmpty(this, varBits));
- VarSetOps::UnionD(this, rpUseInPlace, varBits);
- }
- else // (grabCount > 0)
- {
-#ifndef _TARGET_64BIT_
- /* For TYP_LONG and we only need one register then change the type to TYP_INT */
- if ((type == TYP_LONG) && (grabCount == 1))
+ if (grabCount == 0)
{
- /* We will need to pick one register */
- type = TYP_INT;
- //noway_assert(!VarSetOps::IsEmpty(this, varBits));
+ /* Does not need a register */
+ predictReg = PREDICT_NONE;
+ // noway_assert(!VarSetOps::IsEmpty(this, varBits));
VarSetOps::UnionD(this, rpUseInPlace, varBits);
}
- noway_assert((type == TYP_DOUBLE) || (grabCount == (genTypeSize(genActualType(type)) / REGSIZE_BYTES)));
-#else // !_TARGET_64BIT_
- noway_assert(grabCount == 1);
+ else // (grabCount > 0)
+ {
+#ifndef _TARGET_64BIT_
+ /* For TYP_LONG and we only need one register then change the type to TYP_INT */
+ if ((type == TYP_LONG) && (grabCount == 1))
+ {
+ /* We will need to pick one register */
+ type = TYP_INT;
+ // noway_assert(!VarSetOps::IsEmpty(this, varBits));
+ VarSetOps::UnionD(this, rpUseInPlace, varBits);
+ }
+ noway_assert((type == TYP_DOUBLE) ||
+ (grabCount == (genTypeSize(genActualType(type)) / REGSIZE_BYTES)));
+#else // !_TARGET_64BIT_
+ noway_assert(grabCount == 1);
#endif // !_TARGET_64BIT_
-
- }
- }
- else if (type == TYP_STRUCT)
- {
+ }
+ }
+ else if (type == TYP_STRUCT)
+ {
#ifdef _TARGET_ARM_
- // TODO-ARM-Bug?: Passing structs in registers on ARM hits an assert here when
- // predictReg is PREDICT_REG_R0 to PREDICT_REG_R3
- // As a workaround we just bash it to PREDICT_NONE here
- //
- if (predictReg != PREDICT_NONE)
- predictReg = PREDICT_NONE;
+ // TODO-ARM-Bug?: Passing structs in registers on ARM hits an assert here when
+ // predictReg is PREDICT_REG_R0 to PREDICT_REG_R3
+ // As a workaround we just bash it to PREDICT_NONE here
+ //
+ if (predictReg != PREDICT_NONE)
+ predictReg = PREDICT_NONE;
#endif
- // Currently predictReg is saying that we will not need any scratch registers
- noway_assert(predictReg == PREDICT_NONE);
-
- /* We may need to sign or zero extend a small type when pushing a struct */
- if (varDsc->lvPromoted && !varDsc->lvAddrExposed)
- {
- for (unsigned varNum = varDsc->lvFieldLclStart;
- varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
- varNum++)
- {
- LclVarDsc * fldVar = lvaTable + varNum;
+ // Currently predictReg is saying that we will not need any scratch registers
+ noway_assert(predictReg == PREDICT_NONE);
- if (fldVar->lvStackAligned())
+ /* We may need to sign or zero extend a small type when pushing a struct */
+ if (varDsc->lvPromoted && !varDsc->lvAddrExposed)
+ {
+ for (unsigned varNum = varDsc->lvFieldLclStart;
+ varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; varNum++)
{
- // When we are stack aligned Codegen will just use
- // a push instruction and thus doesn't need any register
- // since we can push both a register or a stack frame location
- continue;
- }
+ LclVarDsc* fldVar = lvaTable + varNum;
- if (varTypeIsByte(fldVar->TypeGet()))
- {
- // We will need to reserve one byteable register,
- //
- type = TYP_BYTE;
- predictReg = PREDICT_SCRATCH_REG;
-#if CPU_HAS_BYTE_REGS
- // It is best to enregister this fldVar in a byteable register
- //
- fldVar->addPrefReg(RBM_BYTE_REG_FLAG, this);
-#endif
- }
- else if (varTypeIsShort(fldVar->TypeGet()))
- {
- bool isEnregistered = fldVar->lvTracked && (fldVar->lvRegNum != REG_STK);
- // If fldVar is not enregistered then we will need a scratch register
- //
- if (!isEnregistered)
+ if (fldVar->lvStackAligned())
{
- // We will need either an int register or a byte register
- // If we are not requesting a byte register we will request an int register
+ // When we are stack aligned Codegen will just use
+ // a push instruction and thus doesn't need any register
+ // since we can push both a register or a stack frame location
+ continue;
+ }
+
+ if (varTypeIsByte(fldVar->TypeGet()))
+ {
+ // We will need to reserve one byteable register,
//
- if (type != TYP_BYTE)
- type = TYP_INT;
+ type = TYP_BYTE;
predictReg = PREDICT_SCRATCH_REG;
+#if CPU_HAS_BYTE_REGS
+ // It is best to enregister this fldVar in a byteable register
+ //
+ fldVar->addPrefReg(RBM_BYTE_REG_FLAG, this);
+#endif
+ }
+ else if (varTypeIsShort(fldVar->TypeGet()))
+ {
+ bool isEnregistered = fldVar->lvTracked && (fldVar->lvRegNum != REG_STK);
+ // If fldVar is not enregistered then we will need a scratch register
+ //
+ if (!isEnregistered)
+ {
+ // We will need either an int register or a byte register
+ // If we are not requesting a byte register we will request an int register
+ //
+ if (type != TYP_BYTE)
+ type = TYP_INT;
+ predictReg = PREDICT_SCRATCH_REG;
+ }
}
}
}
}
- }
- else
- {
- regMaskTP preferReg = rpPredictRegMask(predictReg, type);
- if (preferReg != 0)
+ else
{
- if ( (genTypeStSz(type) == 1) ||
- (genCountBits(preferReg) <= genTypeStSz(type)) )
+ regMaskTP preferReg = rpPredictRegMask(predictReg, type);
+ if (preferReg != 0)
{
- varDsc->addPrefReg(preferReg, this);
+ if ((genTypeStSz(type) == 1) || (genCountBits(preferReg) <= genTypeStSz(type)))
+ {
+ varDsc->addPrefReg(preferReg, this);
+ }
}
}
- }
- break; /* end of case GT_LCL_VAR */
+ break; /* end of case GT_LCL_VAR */
- case GT_JMP:
- tree->gtUsedRegs = RBM_NONE;
- regMask = RBM_NONE;
+ case GT_JMP:
+ tree->gtUsedRegs = RBM_NONE;
+ regMask = RBM_NONE;
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- // Mark the registers required to emit a tailcall profiler callback
- if (compIsProfilerHookNeeded())
- {
- tree->gtUsedRegs |= RBM_PROFILER_JMP_USED;
- }
+ // Mark the registers required to emit a tailcall profiler callback
+ if (compIsProfilerHookNeeded())
+ {
+ tree->gtUsedRegs |= RBM_PROFILER_JMP_USED;
+ }
#endif
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- default:
- break;
+ default:
+ break;
} /* end of switch (oper) */
/* If we don't need to evaluate to register, regmask is the empty set */
@@ -2395,18 +2337,18 @@ GRAB_COUNT:
/* We need to sign or zero extend a small type when pushing a struct */
noway_assert((type == TYP_INT) || (type == TYP_BYTE));
- varDsc = lvaTable + tree->gtLclVarCommon.gtLclNum;
+ varDsc = lvaTable + tree->gtLclVarCommon.gtLclNum;
noway_assert(varDsc->lvPromoted && !varDsc->lvAddrExposed);
- for (unsigned varNum = varDsc->lvFieldLclStart;
- varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
+ for (unsigned varNum = varDsc->lvFieldLclStart; varNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt;
varNum++)
- {
- LclVarDsc * fldVar = lvaTable + varNum;
+ {
+ LclVarDsc* fldVar = lvaTable + varNum;
if (fldVar->lvTracked)
{
- VARSET_TP VARSET_INIT_NOCOPY(fldBit, VarSetOps::MakeSingleton(this, fldVar->lvVarIndex));
- rpRecordRegIntf(regMask, fldBit DEBUGARG( "need scratch register when pushing a small field of a struct"));
+ VARSET_TP VARSET_INIT_NOCOPY(fldBit, VarSetOps::MakeSingleton(this, fldVar->lvVarIndex));
+ rpRecordRegIntf(regMask, fldBit DEBUGARG(
+ "need scratch register when pushing a small field of a struct"));
}
}
}
@@ -2423,16 +2365,14 @@ GRAB_COUNT:
*/
if (lockedRegs)
{
- rpRecordRegIntf(lockedRegs, varAsSet
- DEBUGARG("last use Predict lockedRegs"));
+ rpRecordRegIntf(lockedRegs, varAsSet DEBUGARG("last use Predict lockedRegs"));
}
/*
* Add interference from any reserved temps into this last use variable.
*/
if (rsvdRegs)
{
- rpRecordRegIntf(rsvdRegs, varAsSet
- DEBUGARG("last use Predict rsvdRegs"));
+ rpRecordRegIntf(rsvdRegs, varAsSet DEBUGARG("last use Predict rsvdRegs"));
}
/*
* For partially enregistered longs add an interference with the
@@ -2440,8 +2380,7 @@ GRAB_COUNT:
*/
if ((type == TYP_INT) && (tree->TypeGet() == TYP_LONG))
{
- rpRecordRegIntf(regMask, varAsSet
- DEBUGARG("last use with partial enreg"));
+ rpRecordRegIntf(regMask, varAsSet DEBUGARG("last use with partial enreg"));
}
}
@@ -2451,753 +2390,746 @@ GRAB_COUNT:
/* Is it a 'simple' unary/binary operator? */
- if (kind & GTK_SMPOP)
+ if (kind & GTK_SMPOP)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
- GenTreePtr opsPtr [3];
- regMaskTP regsPtr[3];
+ GenTreePtr opsPtr[3];
+ regMaskTP regsPtr[3];
VARSET_TP VARSET_INIT_NOCOPY(startAsgUseInPlaceVars, VarSetOps::UninitVal());
switch (oper)
{
- case GT_ASG:
+ case GT_ASG:
- /* Is the value being assigned into a LCL_VAR? */
- if (op1->gtOper == GT_LCL_VAR)
- {
- varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
-
- /* Are we assigning a LCL_VAR the result of a call? */
- if (op2->gtOper == GT_CALL)
+ /* Is the value being assigned into a LCL_VAR? */
+ if (op1->gtOper == GT_LCL_VAR)
{
- /* Set a preferred register for the LCL_VAR */
- if (isRegPairType(varDsc->TypeGet()))
- varDsc->addPrefReg(RBM_LNGRET, this);
- else if (!varTypeIsFloating(varDsc->TypeGet()))
- varDsc->addPrefReg(RBM_INTRET, this);
+ varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
+
+ /* Are we assigning a LCL_VAR the result of a call? */
+ if (op2->gtOper == GT_CALL)
+ {
+ /* Set a preferred register for the LCL_VAR */
+ if (isRegPairType(varDsc->TypeGet()))
+ varDsc->addPrefReg(RBM_LNGRET, this);
+ else if (!varTypeIsFloating(varDsc->TypeGet()))
+ varDsc->addPrefReg(RBM_INTRET, this);
#ifdef _TARGET_AMD64_
- else
- varDsc->addPrefReg(RBM_FLOATRET, this);
+ else
+ varDsc->addPrefReg(RBM_FLOATRET, this);
#endif
- /*
- * When assigning the result of a call we don't
- * bother trying to target the right side of the
- * assignment, since we have a fixed calling convention.
- */
- }
- else if (varDsc->lvTracked)
- {
- // We interfere with uses in place
- if (!VarSetOps::IsEmpty(this, rpUseInPlace))
- {
- rpRecordVarIntf(varDsc->lvVarIndex, rpUseInPlace
- DEBUGARG( "Assign UseInPlace conflict"));
+ /*
+ * When assigning the result of a call we don't
+ * bother trying to target the right side of the
+ * assignment, since we have a fixed calling convention.
+ */
}
-
-
- // Did we predict that this local will be fully enregistered?
- // and the assignment type is the same as the expression type?
- // and it is dead on the right side of the assignment?
- // and we current have no other rpAsgVarNum active?
- //
- if ((varDsc->lvRegNum != REG_STK) &&
- ((type != TYP_LONG) || (varDsc->lvOtherReg != REG_STK)) &&
- (type == op2->TypeGet()) &&
- (op1->gtFlags & GTF_VAR_DEF) &&
- (rpAsgVarNum == -1))
+ else if (varDsc->lvTracked)
{
+ // We interfere with uses in place
+ if (!VarSetOps::IsEmpty(this, rpUseInPlace))
+ {
+ rpRecordVarIntf(varDsc->lvVarIndex, rpUseInPlace DEBUGARG("Assign UseInPlace conflict"));
+ }
+
+ // Did we predict that this local will be fully enregistered?
+ // and the assignment type is the same as the expression type?
+ // and it is dead on the right side of the assignment?
+ // and we current have no other rpAsgVarNum active?
//
- // Yes, we should try to target the right side (op2) of this
- // assignment into the (enregistered) tracked variable.
- //
-
- op1PredictReg = PREDICT_NONE; /* really PREDICT_REG, but we've already done the check */
- op2PredictReg = rpGetPredictForVarIndex(varDsc->lvVarIndex);
-
- // Remember that this is a new use in place
-
- // We've added "new UseInPlace"; remove from the global set.
- VarSetOps::RemoveElemD(this, rpUseInPlace, varDsc->lvVarIndex);
-
- // Note that later when we walk down to the leaf node for op2
- // if we decide to actually use the register for the 'varDsc'
- // to enregister the operand, the we will set rpAsgVarNum to
- // varDsc->lvVarIndex, by extracting this value using
- // rpGetVarIndexForPredict()
- //
- // Also we reset rpAsgVarNum back to -1 after we have finished
- // predicting the current GT_ASG node
- //
- goto ASG_COMMON;
+ if ((varDsc->lvRegNum != REG_STK) && ((type != TYP_LONG) || (varDsc->lvOtherReg != REG_STK)) &&
+ (type == op2->TypeGet()) && (op1->gtFlags & GTF_VAR_DEF) && (rpAsgVarNum == -1))
+ {
+ //
+ // Yes, we should try to target the right side (op2) of this
+ // assignment into the (enregistered) tracked variable.
+ //
+
+ op1PredictReg = PREDICT_NONE; /* really PREDICT_REG, but we've already done the check */
+ op2PredictReg = rpGetPredictForVarIndex(varDsc->lvVarIndex);
+
+ // Remember that this is a new use in place
+
+ // We've added "new UseInPlace"; remove from the global set.
+ VarSetOps::RemoveElemD(this, rpUseInPlace, varDsc->lvVarIndex);
+
+ // Note that later when we walk down to the leaf node for op2
+ // if we decide to actually use the register for the 'varDsc'
+ // to enregister the operand, the we will set rpAsgVarNum to
+ // varDsc->lvVarIndex, by extracting this value using
+ // rpGetVarIndexForPredict()
+ //
+ // Also we reset rpAsgVarNum back to -1 after we have finished
+ // predicting the current GT_ASG node
+ //
+ goto ASG_COMMON;
+ }
}
}
- }
- __fallthrough;
+ __fallthrough;
- case GT_CHS:
+ case GT_CHS:
- case GT_ASG_OR:
- case GT_ASG_XOR:
- case GT_ASG_AND:
- case GT_ASG_SUB:
- case GT_ASG_ADD:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
- case GT_ASG_UDIV:
+ case GT_ASG_OR:
+ case GT_ASG_XOR:
+ case GT_ASG_AND:
+ case GT_ASG_SUB:
+ case GT_ASG_ADD:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
+ case GT_ASG_UDIV:
- /* We can't use "reg <op>= addr" for TYP_LONG or if op2 is a short type */
- if ((type != TYP_LONG) && !varTypeIsSmall(op2->gtType))
- {
- /* Is the value being assigned into an enregistered LCL_VAR? */
- /* For debug code we only allow a simple op2 to be assigned */
- if ((op1->gtOper == GT_LCL_VAR) &&
- (!opts.compDbgCode || rpCanAsgOperWithoutReg(op2, false)))
+ /* We can't use "reg <op>= addr" for TYP_LONG or if op2 is a short type */
+ if ((type != TYP_LONG) && !varTypeIsSmall(op2->gtType))
{
- varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
- /* Did we predict that this local will be enregistered? */
- if (varDsc->lvRegNum != REG_STK)
+ /* Is the value being assigned into an enregistered LCL_VAR? */
+ /* For debug code we only allow a simple op2 to be assigned */
+ if ((op1->gtOper == GT_LCL_VAR) && (!opts.compDbgCode || rpCanAsgOperWithoutReg(op2, false)))
{
- /* Yes, we can use "reg <op>= addr" */
+ varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
+ /* Did we predict that this local will be enregistered? */
+ if (varDsc->lvRegNum != REG_STK)
+ {
+ /* Yes, we can use "reg <op>= addr" */
- op1PredictReg = PREDICT_NONE; /* really PREDICT_REG, but we've already done the check */
- op2PredictReg = PREDICT_NONE;
+ op1PredictReg = PREDICT_NONE; /* really PREDICT_REG, but we've already done the check */
+ op2PredictReg = PREDICT_NONE;
- goto ASG_COMMON;
+ goto ASG_COMMON;
+ }
}
}
- }
#if CPU_LOAD_STORE_ARCH
- if (oper != GT_ASG)
- {
- op1PredictReg = PREDICT_REG;
- op2PredictReg = PREDICT_REG;
- }
- else
+ if (oper != GT_ASG)
+ {
+ op1PredictReg = PREDICT_REG;
+ op2PredictReg = PREDICT_REG;
+ }
+ else
#endif
- {
- /*
- * Otherwise, initialize the normal forcing of operands:
- * "addr <op>= reg"
- */
- op1PredictReg = PREDICT_ADDR;
- op2PredictReg = PREDICT_REG;
- }
+ {
+ /*
+ * Otherwise, initialize the normal forcing of operands:
+ * "addr <op>= reg"
+ */
+ op1PredictReg = PREDICT_ADDR;
+ op2PredictReg = PREDICT_REG;
+ }
-ASG_COMMON:
+ ASG_COMMON:
#if !CPU_LOAD_STORE_ARCH
- if (op2PredictReg != PREDICT_NONE)
- {
- /* Is the value being assigned a simple one? */
- if (rpCanAsgOperWithoutReg(op2, false))
- op2PredictReg = PREDICT_NONE;
- }
+ if (op2PredictReg != PREDICT_NONE)
+ {
+ /* Is the value being assigned a simple one? */
+ if (rpCanAsgOperWithoutReg(op2, false))
+ op2PredictReg = PREDICT_NONE;
+ }
#endif
- bool simpleAssignment;
- simpleAssignment = false;
+ bool simpleAssignment;
+ simpleAssignment = false;
- if ((oper == GT_ASG) &&
- (op1->gtOper == GT_LCL_VAR))
- {
- // Add a variable interference from the assign target
- // to each of the last use variables
- if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+ if ((oper == GT_ASG) && (op1->gtOper == GT_LCL_VAR))
{
- varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
-
- if (varDsc->lvTracked)
+ // Add a variable interference from the assign target
+ // to each of the last use variables
+ if (!VarSetOps::IsEmpty(this, rpLastUseVars))
{
- unsigned varIndex = varDsc->lvVarIndex;
-
- rpRecordVarIntf(varIndex, rpLastUseVars
- DEBUGARG( "Assign conflict"));
- }
- }
+ varDsc = lvaTable + op1->gtLclVarCommon.gtLclNum;
- /* Record whether this tree is a simple assignment to a local */
+ if (varDsc->lvTracked)
+ {
+ unsigned varIndex = varDsc->lvVarIndex;
- simpleAssignment = ((type != TYP_LONG) || !opts.compDbgCode);
- }
+ rpRecordVarIntf(varIndex, rpLastUseVars DEBUGARG("Assign conflict"));
+ }
+ }
- bool requireByteReg; requireByteReg = false;
+ /* Record whether this tree is a simple assignment to a local */
-#if CPU_HAS_BYTE_REGS
- /* Byte-assignments need the byte registers, unless op1 is an enregistered local */
+ simpleAssignment = ((type != TYP_LONG) || !opts.compDbgCode);
+ }
- if (varTypeIsByte(type) &&
- ((op1->gtOper != GT_LCL_VAR) || (lvaTable[op1->gtLclVarCommon.gtLclNum].lvRegNum == REG_STK)))
+ bool requireByteReg;
+ requireByteReg = false;
- {
- // Byte-assignments typically need a byte register
- requireByteReg = true;
+#if CPU_HAS_BYTE_REGS
+ /* Byte-assignments need the byte registers, unless op1 is an enregistered local */
+
+ if (varTypeIsByte(type) &&
+ ((op1->gtOper != GT_LCL_VAR) || (lvaTable[op1->gtLclVarCommon.gtLclNum].lvRegNum == REG_STK)))
- if (op1->gtOper == GT_LCL_VAR)
{
- varDsc = lvaTable + op1->gtLclVar.gtLclNum;
+ // Byte-assignments typically need a byte register
+ requireByteReg = true;
- // Did we predict that this local will be enregistered?
- if (varDsc->lvTracked && (varDsc->lvRegNum != REG_STK) && (oper != GT_CHS))
+ if (op1->gtOper == GT_LCL_VAR)
{
- // We don't require a byte register when op1 is an enregistered local */
- requireByteReg = false;
- }
+ varDsc = lvaTable + op1->gtLclVar.gtLclNum;
- // Is op1 part of an Assign-Op or is the RHS a simple memory indirection?
- if ((oper != GT_ASG) || (op2->gtOper == GT_IND) || (op2->gtOper == GT_CLS_VAR))
- {
- // We should try to put op1 in an byte register
- varDsc->addPrefReg(RBM_BYTE_REG_FLAG, this);
+ // Did we predict that this local will be enregistered?
+ if (varDsc->lvTracked && (varDsc->lvRegNum != REG_STK) && (oper != GT_CHS))
+ {
+ // We don't require a byte register when op1 is an enregistered local */
+ requireByteReg = false;
+ }
+
+ // Is op1 part of an Assign-Op or is the RHS a simple memory indirection?
+ if ((oper != GT_ASG) || (op2->gtOper == GT_IND) || (op2->gtOper == GT_CLS_VAR))
+ {
+ // We should try to put op1 in an byte register
+ varDsc->addPrefReg(RBM_BYTE_REG_FLAG, this);
+ }
}
}
- }
#endif
- VarSetOps::Assign(this, startAsgUseInPlaceVars, rpUseInPlace);
+ VarSetOps::Assign(this, startAsgUseInPlaceVars, rpUseInPlace);
- bool isWriteBarrierAsgNode;
- isWriteBarrierAsgNode = codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree);
+ bool isWriteBarrierAsgNode;
+ isWriteBarrierAsgNode = codeGen->gcInfo.gcIsWriteBarrierAsgNode(tree);
#ifdef DEBUG
- GCInfo::WriteBarrierForm wbf;
- if (isWriteBarrierAsgNode)
- wbf = codeGen->gcInfo.gcIsWriteBarrierCandidate(tree->gtOp.gtOp1, tree->gtOp.gtOp2);
- else
- wbf = GCInfo::WBF_NoBarrier;
+ GCInfo::WriteBarrierForm wbf;
+ if (isWriteBarrierAsgNode)
+ wbf = codeGen->gcInfo.gcIsWriteBarrierCandidate(tree->gtOp.gtOp1, tree->gtOp.gtOp2);
+ else
+ wbf = GCInfo::WBF_NoBarrier;
#endif // DEBUG
- regMaskTP wbaLockedRegs; wbaLockedRegs = lockedRegs;
- if (isWriteBarrierAsgNode)
- {
+ regMaskTP wbaLockedRegs;
+ wbaLockedRegs = lockedRegs;
+ if (isWriteBarrierAsgNode)
+ {
#if defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
#ifdef DEBUG
- if (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
- {
+ if (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
+ {
#endif // DEBUG
- wbaLockedRegs |= RBM_WRITE_BARRIER;
- op1->gtRsvdRegs |= RBM_WRITE_BARRIER; // This will steer op2 away from REG_WRITE_BARRIER
- assert(REG_WRITE_BARRIER == REG_EDX);
- op1PredictReg = PREDICT_REG_EDX;
+ wbaLockedRegs |= RBM_WRITE_BARRIER;
+ op1->gtRsvdRegs |= RBM_WRITE_BARRIER; // This will steer op2 away from REG_WRITE_BARRIER
+ assert(REG_WRITE_BARRIER == REG_EDX);
+ op1PredictReg = PREDICT_REG_EDX;
#ifdef DEBUG
- }
- else
+ }
+ else
#endif // DEBUG
#endif // defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS
#if defined(DEBUG) || !(defined(_TARGET_X86_) && NOGC_WRITE_BARRIERS)
- {
+ {
#ifdef _TARGET_X86_
- op1PredictReg = PREDICT_REG_ECX;
- op2PredictReg = PREDICT_REG_EDX;
+ op1PredictReg = PREDICT_REG_ECX;
+ op2PredictReg = PREDICT_REG_EDX;
#elif defined(_TARGET_ARM_)
- op1PredictReg = PREDICT_REG_R0;
- op2PredictReg = PREDICT_REG_R1;
+ op1PredictReg = PREDICT_REG_R0;
+ op2PredictReg = PREDICT_REG_R1;
- // This is my best guess as to what the previous code meant by checking "gtRngChkLen() == NULL".
- if ((op1->OperGet() == GT_IND) && (op1->gtOp.gtOp1->OperGet() != GT_ARR_BOUNDS_CHECK))
- {
- op1 = op1->gtOp.gtOp1;
- }
+ // This is my best guess as to what the previous code meant by checking "gtRngChkLen() == NULL".
+ if ((op1->OperGet() == GT_IND) && (op1->gtOp.gtOp1->OperGet() != GT_ARR_BOUNDS_CHECK))
+ {
+ op1 = op1->gtOp.gtOp1;
+ }
#else // !_TARGET_X86_ && !_TARGET_ARM_
#error "Non-ARM or x86 _TARGET_ in RegPredict for WriteBarrierAsg"
#endif
- }
+ }
#endif
- }
+ }
- /* Are we supposed to evaluate RHS first? */
+ /* Are we supposed to evaluate RHS first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs,
- rsvdRegs | op1->gtRsvdRegs);
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
#if CPU_HAS_BYTE_REGS
- // Should we insure that op2 gets evaluated into a byte register?
- if (requireByteReg && ((op2Mask & RBM_BYTE_REGS) == 0))
- {
- // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
- // and we can't select one that is already reserved (i.e. lockedRegs)
- //
- op2Mask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | RBM_NON_BYTE_REGS));
- op2->gtUsedRegs |= op2Mask;
+ // Should we insure that op2 gets evaluated into a byte register?
+ if (requireByteReg && ((op2Mask & RBM_BYTE_REGS) == 0))
+ {
+ // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
+ // and we can't select one that is already reserved (i.e. lockedRegs)
+ //
+ op2Mask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | RBM_NON_BYTE_REGS));
+ op2->gtUsedRegs |= op2Mask;
- // No longer a simple assignment because we're using extra registers and might
- // have interference between op1 and op2. See DevDiv #136681
- simpleAssignment = false;
- }
+ // No longer a simple assignment because we're using extra registers and might
+ // have interference between op1 and op2. See DevDiv #136681
+ simpleAssignment = false;
+ }
#endif
- /*
- * For a simple assignment we don't want the op2Mask to be
- * marked as interferring with the LCL_VAR, since it is likely
- * that we will want to enregister the LCL_VAR in exactly
- * the register that is used to compute op2
- */
- tmpMask = lockedRegs;
+ /*
+ * For a simple assignment we don't want the op2Mask to be
+ * marked as interferring with the LCL_VAR, since it is likely
+ * that we will want to enregister the LCL_VAR in exactly
+ * the register that is used to compute op2
+ */
+ tmpMask = lockedRegs;
- if (!simpleAssignment)
- tmpMask |= op2Mask;
+ if (!simpleAssignment)
+ tmpMask |= op2Mask;
- regMask = rpPredictTreeRegUse(op1, op1PredictReg, tmpMask, RBM_NONE);
+ regMask = rpPredictTreeRegUse(op1, op1PredictReg, tmpMask, RBM_NONE);
- // Did we relax the register prediction for op1 and op2 above ?
- // - because we are depending upon op1 being enregistered
- //
- if ((op1PredictReg == PREDICT_NONE) &&
- ((op2PredictReg == PREDICT_NONE) || rpHasVarIndexForPredict(op2PredictReg)))
- {
- /* We must be assigning into an enregistered LCL_VAR */
- noway_assert(op1->gtOper == GT_LCL_VAR);
- varDsc = lvaTable + op1->gtLclVar.gtLclNum;
- noway_assert(varDsc->lvRegNum != REG_STK);
+ // Did we relax the register prediction for op1 and op2 above ?
+ // - because we are depending upon op1 being enregistered
+ //
+ if ((op1PredictReg == PREDICT_NONE) &&
+ ((op2PredictReg == PREDICT_NONE) || rpHasVarIndexForPredict(op2PredictReg)))
+ {
+ /* We must be assigning into an enregistered LCL_VAR */
+ noway_assert(op1->gtOper == GT_LCL_VAR);
+ varDsc = lvaTable + op1->gtLclVar.gtLclNum;
+ noway_assert(varDsc->lvRegNum != REG_STK);
- /* We need to set lvDependReg, in case we lose the enregistration of op1 */
- varDsc->lvDependReg = true;
+ /* We need to set lvDependReg, in case we lose the enregistration of op1 */
+ varDsc->lvDependReg = true;
+ }
}
- }
- else
- {
- // For the case of simpleAssignments op2 should always be evaluated first
- noway_assert(!simpleAssignment);
-
- regMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- if (isWriteBarrierAsgNode)
+ else
{
- wbaLockedRegs |= op1->gtUsedRegs;
- }
- op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, wbaLockedRegs | regMask, RBM_NONE);
+ // For the case of simpleAssignments op2 should always be evaluated first
+ noway_assert(!simpleAssignment);
+
+ regMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ if (isWriteBarrierAsgNode)
+ {
+ wbaLockedRegs |= op1->gtUsedRegs;
+ }
+ op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, wbaLockedRegs | regMask, RBM_NONE);
#if CPU_HAS_BYTE_REGS
- // Should we insure that op2 gets evaluated into a byte register?
- if (requireByteReg && ((op2Mask & RBM_BYTE_REGS) == 0))
- {
- // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
- // and we can't select one that is already reserved (i.e. lockedRegs or regMask)
- //
- op2Mask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | regMask | RBM_NON_BYTE_REGS));
- op2->gtUsedRegs |= op2Mask;
- }
+ // Should we insure that op2 gets evaluated into a byte register?
+ if (requireByteReg && ((op2Mask & RBM_BYTE_REGS) == 0))
+ {
+ // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
+ // and we can't select one that is already reserved (i.e. lockedRegs or regMask)
+ //
+ op2Mask |=
+ rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | regMask | RBM_NON_BYTE_REGS));
+ op2->gtUsedRegs |= op2Mask;
+ }
#endif
- }
+ }
- if (rpHasVarIndexForPredict(op2PredictReg))
- {
- rpAsgVarNum = -1;
- }
+ if (rpHasVarIndexForPredict(op2PredictReg))
+ {
+ rpAsgVarNum = -1;
+ }
- if (isWriteBarrierAsgNode)
- {
+ if (isWriteBarrierAsgNode)
+ {
#if NOGC_WRITE_BARRIERS
#ifdef DEBUG
- if (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
- {
+ if (wbf != GCInfo::WBF_NoBarrier_CheckNotHeapInDebug)
+ {
#endif // DEBUG
- /* Steer computation away from REG_WRITE_BARRIER as the pointer is
- passed to the write-barrier call in REG_WRITE_BARRIER */
+ /* Steer computation away from REG_WRITE_BARRIER as the pointer is
+ passed to the write-barrier call in REG_WRITE_BARRIER */
- regMask = op2Mask;
+ regMask = op2Mask;
- if (op1->gtOper == GT_IND)
- {
- GenTreePtr rv1, rv2;
- unsigned mul, cns;
- bool rev;
+ if (op1->gtOper == GT_IND)
+ {
+ GenTreePtr rv1, rv2;
+ unsigned mul, cns;
+ bool rev;
- /* Special handling of indirect assigns for write barrier */
+ /* Special handling of indirect assigns for write barrier */
- bool yes = codeGen->genCreateAddrMode(op1->gtOp.gtOp1, -1, true, RBM_NONE, &rev, &rv1, &rv2, &mul, &cns);
+ bool yes = codeGen->genCreateAddrMode(op1->gtOp.gtOp1, -1, true, RBM_NONE, &rev, &rv1, &rv2,
+ &mul, &cns);
- /* Check address mode for enregisterable locals */
+ /* Check address mode for enregisterable locals */
- if (yes)
- {
- if (rv1 != NULL && rv1->gtOper == GT_LCL_VAR)
- {
- rpPredictRefAssign(rv1->gtLclVarCommon.gtLclNum);
+ if (yes)
+ {
+ if (rv1 != NULL && rv1->gtOper == GT_LCL_VAR)
+ {
+ rpPredictRefAssign(rv1->gtLclVarCommon.gtLclNum);
+ }
+ if (rv2 != NULL && rv2->gtOper == GT_LCL_VAR)
+ {
+ rpPredictRefAssign(rv2->gtLclVarCommon.gtLclNum);
+ }
+ }
}
- if (rv2 != NULL && rv2->gtOper == GT_LCL_VAR)
+
+ if (op2->gtOper == GT_LCL_VAR)
{
- rpPredictRefAssign(rv2->gtLclVarCommon.gtLclNum);
+ rpPredictRefAssign(op2->gtLclVarCommon.gtLclNum);
}
- }
- }
- if (op2->gtOper == GT_LCL_VAR)
- {
- rpPredictRefAssign(op2->gtLclVarCommon.gtLclNum);
- }
-
- // Add a register interference for REG_WRITE_BARRIER to each of the last use variables
- if (!VarSetOps::IsEmpty(this, rpLastUseVars))
- {
- rpRecordRegIntf(RBM_WRITE_BARRIER, rpLastUseVars
- DEBUGARG( "WriteBarrier and rpLastUseVars conflict"));
- }
- tree->gtUsedRegs |= RBM_WRITE_BARRIER;
+ // Add a register interference for REG_WRITE_BARRIER to each of the last use variables
+ if (!VarSetOps::IsEmpty(this, rpLastUseVars))
+ {
+ rpRecordRegIntf(RBM_WRITE_BARRIER,
+ rpLastUseVars DEBUGARG("WriteBarrier and rpLastUseVars conflict"));
+ }
+ tree->gtUsedRegs |= RBM_WRITE_BARRIER;
#ifdef DEBUG
- }
- else
+ }
+ else
#endif // DEBUG
#endif // NOGC_WRITE_BARRIERS
#if defined(DEBUG) || !NOGC_WRITE_BARRIERS
- {
+ {
#ifdef _TARGET_ARM_
#ifdef DEBUG
- if (verbose)
- printf("Adding interference with RBM_CALLEE_TRASH_NOGC for NoGC WriteBarrierAsg\n");
+ if (verbose)
+ printf("Adding interference with RBM_CALLEE_TRASH_NOGC for NoGC WriteBarrierAsg\n");
#endif
- //
- // For the ARM target we have an optimized JIT Helper
- // that only trashes a subset of the callee saved registers
- //
+ //
+ // For the ARM target we have an optimized JIT Helper
+ // that only trashes a subset of the callee saved registers
+ //
- // NOTE: Adding it to the gtUsedRegs will cause the interference to
- // be added appropriately
+ // NOTE: Adding it to the gtUsedRegs will cause the interference to
+ // be added appropriately
- // the RBM_CALLEE_TRASH_NOGC set is killed. We will record this in interferingRegs
- // instead of gtUsedRegs, because the latter will be modified later, but we need
- // to remember to add the interference.
+ // the RBM_CALLEE_TRASH_NOGC set is killed. We will record this in interferingRegs
+ // instead of gtUsedRegs, because the latter will be modified later, but we need
+ // to remember to add the interference.
- interferingRegs |= RBM_CALLEE_TRASH_NOGC;
+ interferingRegs |= RBM_CALLEE_TRASH_NOGC;
- op1->gtUsedRegs |= RBM_R0;
- op2->gtUsedRegs |= RBM_R1;
+ op1->gtUsedRegs |= RBM_R0;
+ op2->gtUsedRegs |= RBM_R1;
#else // _TARGET_ARM_
#ifdef DEBUG
- if (verbose)
- printf("Adding interference with RBM_CALLEE_TRASH for NoGC WriteBarrierAsg\n");
+ if (verbose)
+ printf("Adding interference with RBM_CALLEE_TRASH for NoGC WriteBarrierAsg\n");
#endif
- // We have to call a normal JIT helper to perform the Write Barrier Assignment
- // It will trash the callee saved registers
+ // We have to call a normal JIT helper to perform the Write Barrier Assignment
+ // It will trash the callee saved registers
- tree->gtUsedRegs |= RBM_CALLEE_TRASH;
+ tree->gtUsedRegs |= RBM_CALLEE_TRASH;
#endif // _TARGET_ARM_
+ }
+#endif // defined(DEBUG) || !NOGC_WRITE_BARRIERS
}
-#endif // defined(DEBUG) || !NOGC_WRITE_BARRIERS
- }
- if (simpleAssignment)
- {
- /*
- * Consider a simple assignment to a local:
- *
- * lcl = expr;
- *
- * Since the "=" node is visited after the variable
- * is marked live (assuming it's live after the
- * assignment), we don't want to use the register
- * use mask of the "=" node but rather that of the
- * variable itself.
- */
- tree->gtUsedRegs = op1->gtUsedRegs;
- }
- else
- {
- tree->gtUsedRegs = op1->gtUsedRegs | op2->gtUsedRegs;
- }
- VarSetOps::Assign(this, rpUseInPlace, startAsgUseInPlaceVars);
- goto RETURN_CHECK;
+ if (simpleAssignment)
+ {
+ /*
+ * Consider a simple assignment to a local:
+ *
+ * lcl = expr;
+ *
+ * Since the "=" node is visited after the variable
+ * is marked live (assuming it's live after the
+ * assignment), we don't want to use the register
+ * use mask of the "=" node but rather that of the
+ * variable itself.
+ */
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ }
+ else
+ {
+ tree->gtUsedRegs = op1->gtUsedRegs | op2->gtUsedRegs;
+ }
+ VarSetOps::Assign(this, rpUseInPlace, startAsgUseInPlaceVars);
+ goto RETURN_CHECK;
- case GT_ASG_LSH:
- case GT_ASG_RSH:
- case GT_ASG_RSZ:
- /* assigning shift operators */
+ case GT_ASG_LSH:
+ case GT_ASG_RSH:
+ case GT_ASG_RSZ:
+ /* assigning shift operators */
- noway_assert(type != TYP_LONG);
+ noway_assert(type != TYP_LONG);
#if CPU_LOAD_STORE_ARCH
- predictReg = PREDICT_ADDR;
+ predictReg = PREDICT_ADDR;
#else
- predictReg = PREDICT_NONE;
+ predictReg = PREDICT_NONE;
#endif
- /* shift count is handled same as ordinary shift */
- goto HANDLE_SHIFT_COUNT;
+ /* shift count is handled same as ordinary shift */
+ goto HANDLE_SHIFT_COUNT;
- case GT_ADDR:
- regMask = rpPredictTreeRegUse(op1, PREDICT_ADDR, lockedRegs, RBM_LASTUSE);
+ case GT_ADDR:
+ regMask = rpPredictTreeRegUse(op1, PREDICT_ADDR, lockedRegs, RBM_LASTUSE);
- if ((regMask == RBM_NONE) && (predictReg >= PREDICT_REG))
- {
- // We need a scratch register for the LEA instruction
- regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | rsvdRegs);
- }
-
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- goto RETURN_CHECK;
+ if ((regMask == RBM_NONE) && (predictReg >= PREDICT_REG))
+ {
+ // We need a scratch register for the LEA instruction
+ regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | rsvdRegs);
+ }
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
- case GT_CAST:
+ case GT_CAST:
- /* Cannot cast to VOID */
- noway_assert(type != TYP_VOID);
+ /* Cannot cast to VOID */
+ noway_assert(type != TYP_VOID);
- /* cast to long is special */
- if (type == TYP_LONG && op1->gtType <= TYP_INT)
- {
- noway_assert(tree->gtCast.gtCastType==TYP_LONG || tree->gtCast.gtCastType==TYP_ULONG);
+ /* cast to long is special */
+ if (type == TYP_LONG && op1->gtType <= TYP_INT)
+ {
+ noway_assert(tree->gtCast.gtCastType == TYP_LONG || tree->gtCast.gtCastType == TYP_ULONG);
#if CPU_LONG_USES_REGPAIR
- rpPredictReg predictRegHi = PREDICT_SCRATCH_REG;
+ rpPredictReg predictRegHi = PREDICT_SCRATCH_REG;
- if (rpHasVarIndexForPredict(predictReg))
- {
- unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
- rpAsgVarNum = tgtIndex;
+ if (rpHasVarIndexForPredict(predictReg))
+ {
+ unsigned tgtIndex = rpGetVarIndexForPredict(predictReg);
+ rpAsgVarNum = tgtIndex;
- // We don't need any register as we plan on writing to the rpAsgVarNum register
- predictReg = PREDICT_NONE;
+ // We don't need any register as we plan on writing to the rpAsgVarNum register
+ predictReg = PREDICT_NONE;
- LclVarDsc * tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
- tgtVar->lvDependReg = true;
+ LclVarDsc* tgtVar = lvaTable + lvaTrackedToVarNum[tgtIndex];
+ tgtVar->lvDependReg = true;
- if (tgtVar->lvOtherReg != REG_STK)
- {
- predictRegHi = PREDICT_NONE;
+ if (tgtVar->lvOtherReg != REG_STK)
+ {
+ predictRegHi = PREDICT_NONE;
+ }
}
- }
- else
+ else
#endif
- if (predictReg == PREDICT_NONE)
- {
- predictReg = PREDICT_SCRATCH_REG;
- }
+ if (predictReg == PREDICT_NONE)
+ {
+ predictReg = PREDICT_SCRATCH_REG;
+ }
#ifdef _TARGET_ARM_
- // If we are widening an int into a long using a targeted register pair we
- // should retarget so that the low part get loaded into the appropriate register
- else if (predictReg == PREDICT_PAIR_R0R1)
- {
- predictReg = PREDICT_REG_R0;
- predictRegHi = PREDICT_REG_R1;
- }
- else if (predictReg == PREDICT_PAIR_R2R3)
- {
- predictReg = PREDICT_REG_R2;
- predictRegHi = PREDICT_REG_R3;
- }
+ // If we are widening an int into a long using a targeted register pair we
+ // should retarget so that the low part get loaded into the appropriate register
+ else if (predictReg == PREDICT_PAIR_R0R1)
+ {
+ predictReg = PREDICT_REG_R0;
+ predictRegHi = PREDICT_REG_R1;
+ }
+ else if (predictReg == PREDICT_PAIR_R2R3)
+ {
+ predictReg = PREDICT_REG_R2;
+ predictRegHi = PREDICT_REG_R3;
+ }
#endif
#ifdef _TARGET_X86_
- // If we are widening an int into a long using a targeted register pair we
- // should retarget so that the low part get loaded into the appropriate register
- else if (predictReg == PREDICT_PAIR_EAXEDX)
- {
- predictReg = PREDICT_REG_EAX;
- predictRegHi = PREDICT_REG_EDX;
- }
- else if (predictReg == PREDICT_PAIR_ECXEBX)
- {
- predictReg = PREDICT_REG_ECX;
- predictRegHi = PREDICT_REG_EBX;
- }
+ // If we are widening an int into a long using a targeted register pair we
+ // should retarget so that the low part get loaded into the appropriate register
+ else if (predictReg == PREDICT_PAIR_EAXEDX)
+ {
+ predictReg = PREDICT_REG_EAX;
+ predictRegHi = PREDICT_REG_EDX;
+ }
+ else if (predictReg == PREDICT_PAIR_ECXEBX)
+ {
+ predictReg = PREDICT_REG_ECX;
+ predictRegHi = PREDICT_REG_EBX;
+ }
#endif
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
#if CPU_LONG_USES_REGPAIR
- if (predictRegHi != PREDICT_NONE)
- {
- // Now get one more reg for the upper part
- regMask |= rpPredictRegPick(TYP_INT, predictRegHi, lockedRegs | rsvdRegs | regMask);
- }
+ if (predictRegHi != PREDICT_NONE)
+ {
+ // Now get one more reg for the upper part
+ regMask |= rpPredictRegPick(TYP_INT, predictRegHi, lockedRegs | rsvdRegs | regMask);
+ }
#endif
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- goto RETURN_CHECK;
- }
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
- /* cast from long is special - it frees a register */
- if (type <= TYP_INT // nice. this presumably is intended to mean "signed int and shorter types"
- && op1->gtType == TYP_LONG)
- {
- if ((predictReg == PREDICT_NONE) || rpHasVarIndexForPredict(predictReg))
- predictReg = PREDICT_REG;
+ /* cast from long is special - it frees a register */
+ if (type <= TYP_INT // nice. this presumably is intended to mean "signed int and shorter types"
+ && op1->gtType == TYP_LONG)
+ {
+ if ((predictReg == PREDICT_NONE) || rpHasVarIndexForPredict(predictReg))
+ predictReg = PREDICT_REG;
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- // If we have 2 or more regs, free one of them
- if (!genMaxOneBit(regMask))
- {
- /* Clear the 2nd lowest bit in regMask */
- /* First set tmpMask to the lowest bit in regMask */
- tmpMask = genFindLowestBit(regMask);
- /* Next find the second lowest bit in regMask */
- tmpMask = genFindLowestBit(regMask & ~tmpMask);
- /* Clear this bit from regmask */
- regMask &= ~tmpMask;
+ // If we have 2 or more regs, free one of them
+ if (!genMaxOneBit(regMask))
+ {
+ /* Clear the 2nd lowest bit in regMask */
+ /* First set tmpMask to the lowest bit in regMask */
+ tmpMask = genFindLowestBit(regMask);
+ /* Next find the second lowest bit in regMask */
+ tmpMask = genFindLowestBit(regMask & ~tmpMask);
+ /* Clear this bit from regmask */
+ regMask &= ~tmpMask;
+ }
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ goto RETURN_CHECK;
}
- tree->gtUsedRegs = op1->gtUsedRegs;
- goto RETURN_CHECK;
- }
#if CPU_HAS_BYTE_REGS
- /* cast from signed-byte is special - it uses byteable registers */
- if (type == TYP_INT)
- {
- var_types smallType;
+ /* cast from signed-byte is special - it uses byteable registers */
+ if (type == TYP_INT)
+ {
+ var_types smallType;
- if (genTypeSize(tree->gtCast.CastOp()->TypeGet()) < genTypeSize(tree->gtCast.gtCastType))
- smallType = tree->gtCast.CastOp()->TypeGet();
- else
- smallType = tree->gtCast.gtCastType;
+ if (genTypeSize(tree->gtCast.CastOp()->TypeGet()) < genTypeSize(tree->gtCast.gtCastType))
+ smallType = tree->gtCast.CastOp()->TypeGet();
+ else
+ smallType = tree->gtCast.gtCastType;
- if (smallType == TYP_BYTE)
- {
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ if (smallType == TYP_BYTE)
+ {
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- if ((regMask & RBM_BYTE_REGS) == 0)
- regMask = rpPredictRegPick(type, PREDICT_SCRATCH_REG, RBM_NON_BYTE_REGS);
+ if ((regMask & RBM_BYTE_REGS) == 0)
+ regMask = rpPredictRegPick(type, PREDICT_SCRATCH_REG, RBM_NON_BYTE_REGS);
- tree->gtUsedRegs = (regMaskSmall)regMask;
- goto RETURN_CHECK;
+ tree->gtUsedRegs = (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
}
- }
#endif
#if FEATURE_STACK_FP_X87
- /* cast to float/double is special */
- if (varTypeIsFloating(type))
- {
- switch (op1->TypeGet())
+ /* cast to float/double is special */
+ if (varTypeIsFloating(type))
{
- /* uses fild, so don't need to be loaded to reg */
- case TYP_INT:
- case TYP_LONG:
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs;
- regMask = 0;
- goto RETURN_CHECK;
- default:
- break;
+ switch (op1->TypeGet())
+ {
+ /* uses fild, so don't need to be loaded to reg */
+ case TYP_INT:
+ case TYP_LONG:
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, rsvdRegs);
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ regMask = 0;
+ goto RETURN_CHECK;
+ default:
+ break;
+ }
}
- }
- /* Casting from integral type to floating type is special */
- if (!varTypeIsFloating(type) && varTypeIsFloating(op1->TypeGet()))
- {
- if (opts.compCanUseSSE2)
+ /* Casting from integral type to floating type is special */
+ if (!varTypeIsFloating(type) && varTypeIsFloating(op1->TypeGet()))
{
- // predict for SSE2 based casting
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ if (opts.compCanUseSSE2)
+ {
+ // predict for SSE2 based casting
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- // Get one more int reg to hold cast result
- regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs|rsvdRegs|regMask);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- goto RETURN_CHECK;
+ // Get one more int reg to hold cast result
+ regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs | regMask);
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
}
- }
#endif
#if FEATURE_FP_REGALLOC
- // Are we casting between int to float or float to int
- // Fix 388428 ARM JitStress WP7
- if (varTypeIsFloating(type) != varTypeIsFloating(op1->TypeGet()))
- {
- // op1 needs to go into a register
- regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs);
+ // Are we casting between int to float or float to int
+ // Fix 388428 ARM JitStress WP7
+ if (varTypeIsFloating(type) != varTypeIsFloating(op1->TypeGet()))
+ {
+ // op1 needs to go into a register
+ regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs);
#ifdef _TARGET_ARM_
- if (varTypeIsFloating(op1->TypeGet()))
- {
- // We also need a fp scratch register for the convert operation
- regMask |= rpPredictRegPick((genTypeStSz(type) == 1) ? TYP_FLOAT : TYP_DOUBLE,
- PREDICT_SCRATCH_REG, regMask|lockedRegs|rsvdRegs);
- }
+ if (varTypeIsFloating(op1->TypeGet()))
+ {
+ // We also need a fp scratch register for the convert operation
+ regMask |= rpPredictRegPick((genTypeStSz(type) == 1) ? TYP_FLOAT : TYP_DOUBLE,
+ PREDICT_SCRATCH_REG, regMask | lockedRegs | rsvdRegs);
+ }
#endif
- // We also need a register to hold the result
- regMask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, regMask|lockedRegs|rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ // We also need a register to hold the result
+ regMask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, regMask | lockedRegs | rsvdRegs);
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
goto RETURN_CHECK;
- }
+ }
#endif
- /* otherwise must load op1 into a register */
- goto GENERIC_UNARY;
+ /* otherwise must load op1 into a register */
+ goto GENERIC_UNARY;
- case GT_INTRINSIC:
+ case GT_INTRINSIC:
#ifdef _TARGET_XARCH_
- if (tree->gtIntrinsic.gtIntrinsicId==CORINFO_INTRINSIC_Round &&
- tree->TypeGet()==TYP_INT)
- {
- // This is a special case to handle the following
- // optimization: conv.i4(round.d(d)) -> round.i(d)
- // if flowgraph 3186
+ if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round && tree->TypeGet() == TYP_INT)
+ {
+ // This is a special case to handle the following
+ // optimization: conv.i4(round.d(d)) -> round.i(d)
+ // if flowgraph 3186
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | rsvdRegs);
+ regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- goto RETURN_CHECK;
- }
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
#endif
- __fallthrough;
+ __fallthrough;
- case GT_NEG:
+ case GT_NEG:
#ifdef _TARGET_ARM_
- if (tree->TypeGet() == TYP_LONG)
- {
- // On ARM this consumes an extra register for the '0' value
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (tree->TypeGet() == TYP_LONG)
+ {
+ // On ARM this consumes an extra register for the '0' value
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- regMaskTP op1Mask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
-
- regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | op1Mask | rsvdRegs);
+ regMaskTP op1Mask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- goto RETURN_CHECK;
- }
+ regMask = rpPredictRegPick(TYP_INT, predictReg, lockedRegs | op1Mask | rsvdRegs);
+
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
#endif // _TARGET_ARM_
- __fallthrough;
+ __fallthrough;
- case GT_NOT:
+ case GT_NOT:
// these unary operators will write new values
// and thus will need a scratch register
-GENERIC_UNARY:
- /* generic unary operators */
+ GENERIC_UNARY:
+ /* generic unary operators */
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- __fallthrough;
+ __fallthrough;
- case GT_NOP:
- // these unary operators do not write new values
- // and thus won't need a scratch register
- CLANG_FORMAT_COMMENT_ANCHOR;
+ case GT_NOP:
+ // these unary operators do not write new values
+ // and thus won't need a scratch register
+ CLANG_FORMAT_COMMENT_ANCHOR;
#if OPT_BOOL_OPS
- if (!op1)
- {
- tree->gtUsedRegs = 0;
- regMask = 0;
- goto RETURN_CHECK;
- }
+ if (!op1)
+ {
+ tree->gtUsedRegs = 0;
+ regMask = 0;
+ goto RETURN_CHECK;
+ }
#endif
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs;
- goto RETURN_CHECK;
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ goto RETURN_CHECK;
- case GT_IND:
- case GT_NULLCHECK: // At this point, nullcheck is just like an IND...
+ case GT_IND:
+ case GT_NULLCHECK: // At this point, nullcheck is just like an IND...
{
- bool intoReg = true;
+ bool intoReg = true;
VARSET_TP VARSET_INIT(this, startIndUseInPlaceVars, rpUseInPlace);
if (fgIsIndirOfAddrOfLocal(tree) != NULL)
{
- compUpdateLifeVar</*ForCodeGen*/false>(tree);
+ compUpdateLifeVar</*ForCodeGen*/ false>(tree);
}
if (predictReg == PREDICT_ADDR)
@@ -3222,8 +3154,8 @@ GENERIC_UNARY:
rsvdRegs |= RBM_LASTUSE;
}
- GenTreePtr lenCSE; lenCSE = NULL;
-
+ GenTreePtr lenCSE;
+ lenCSE = NULL;
/* check for address mode */
regMask = rpPredictAddressMode(op1, type, lockedRegs, rsvdRegs, lenCSE);
@@ -3239,7 +3171,7 @@ GENERIC_UNARY:
#endif // CPU_LOAD_STORE_ARCH
#ifdef _TARGET_ARM_
- // Unaligned loads/stores for floating point values must first be loaded into integer register(s)
+ // Unaligned loads/stores for floating point values must first be loaded into integer register(s)
//
if ((tree->gtFlags & GTF_IND_UNALIGNED) && varTypeIsFloating(type))
{
@@ -3247,14 +3179,15 @@ GENERIC_UNARY:
tmpMask = rpPredictRegPick(TYP_INT, PREDICT_REG, op1->gtUsedRegs | lockedRegs | rsvdRegs);
// Two integer registers are required for a TYP_DOUBLE
if (type == TYP_DOUBLE)
- tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_REG, op1->gtUsedRegs | lockedRegs | rsvdRegs | tmpMask);
+ tmpMask |=
+ rpPredictRegPick(TYP_INT, PREDICT_REG, op1->gtUsedRegs | lockedRegs | rsvdRegs | tmpMask);
}
#endif
/* forcing to register? */
if (intoReg)
{
- regMaskTP lockedMask = lockedRegs | rsvdRegs;
+ regMaskTP lockedMask = lockedRegs | rsvdRegs;
tmpMask |= regMask;
// We will compute a new regMask that holds the register(s)
@@ -3266,14 +3199,14 @@ GENERIC_UNARY:
if (type == TYP_LONG)
{
// We need to use multiple load instructions here:
- // For the first register we can not choose
- // any registers that are being used in place or
- // any register in the current regMask
+ // For the first register we can not choose
+ // any registers that are being used in place or
+ // any register in the current regMask
//
regMask = rpPredictRegPick(TYP_INT, predictReg, regMask | lockedMask);
// For the second register we can choose a register that was
- // used in place or any register in the old now overwritten regMask
+ // used in place or any register in the old now overwritten regMask
// but not the same register that we picked above in 'regMask'
//
VarSetOps::Assign(this, rpUseInPlace, startIndUseInPlaceVars);
@@ -3283,7 +3216,7 @@ GENERIC_UNARY:
#endif
{
// We will use one load instruction here:
- // The load target register can be a register that was used in place
+ // The load target register can be a register that was used in place
// or one of the register from the orginal regMask.
//
VarSetOps::Assign(this, rpUseInPlace, startIndUseInPlaceVars);
@@ -3299,1046 +3232,1051 @@ GENERIC_UNARY:
}
}
- tree->gtUsedRegs = (regMaskSmall)(regMask | tmpMask);
+ tree->gtUsedRegs = (regMaskSmall)(regMask | tmpMask);
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_EQ:
- case GT_NE:
- case GT_LT:
- case GT_LE:
- case GT_GE:
- case GT_GT:
+ case GT_EQ:
+ case GT_NE:
+ case GT_LT:
+ case GT_LE:
+ case GT_GE:
+ case GT_GT:
#ifdef _TARGET_X86_
- /* Floating point comparison uses EAX for flags */
- if (varTypeIsFloating(op1->TypeGet()))
- {
- regMask = RBM_EAX;
- }
- else
-#endif
- if (!(tree->gtFlags & GTF_RELOP_JMP_USED))
- {
- // Some comparisons are converted to ?:
- noway_assert(!fgMorphRelopToQmark(op1));
-
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
-
- // The set instructions need a byte register
- regMask = rpPredictRegPick(TYP_BYTE, predictReg, lockedRegs | rsvdRegs);
- }
- else
- {
- regMask = RBM_NONE;
-#ifdef _TARGET_XARCH_
- tmpMask = RBM_NONE;
- // Optimize the compare with a constant cases for xarch
- if (op1->gtOper == GT_CNS_INT)
+ /* Floating point comparison uses EAX for flags */
+ if (varTypeIsFloating(op1->TypeGet()))
{
- if (op2->gtOper == GT_CNS_INT)
- tmpMask = rpPredictTreeRegUse(op1, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- rpPredictTreeRegUse(op2, PREDICT_NONE, lockedRegs | tmpMask, RBM_LASTUSE);
- tree->gtUsedRegs = op2->gtUsedRegs;
- goto RETURN_CHECK;
+ regMask = RBM_EAX;
}
- else if (op2->gtOper == GT_CNS_INT)
+ else
+#endif
+ if (!(tree->gtFlags & GTF_RELOP_JMP_USED))
{
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, rsvdRegs);
- tree->gtUsedRegs = op1->gtUsedRegs;
- goto RETURN_CHECK;
+ // Some comparisons are converted to ?:
+ noway_assert(!fgMorphRelopToQmark(op1));
+
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
+
+ // The set instructions need a byte register
+ regMask = rpPredictRegPick(TYP_BYTE, predictReg, lockedRegs | rsvdRegs);
}
- else if (op2->gtOper == GT_CNS_LNG)
+ else
{
- regMaskTP op1Mask = rpPredictTreeRegUse(op1, PREDICT_ADDR, lockedRegs, rsvdRegs);
+ regMask = RBM_NONE;
+#ifdef _TARGET_XARCH_
+ tmpMask = RBM_NONE;
+ // Optimize the compare with a constant cases for xarch
+ if (op1->gtOper == GT_CNS_INT)
+ {
+ if (op2->gtOper == GT_CNS_INT)
+ tmpMask =
+ rpPredictTreeRegUse(op1, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ rpPredictTreeRegUse(op2, PREDICT_NONE, lockedRegs | tmpMask, RBM_LASTUSE);
+ tree->gtUsedRegs = op2->gtUsedRegs;
+ goto RETURN_CHECK;
+ }
+ else if (op2->gtOper == GT_CNS_INT)
+ {
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, rsvdRegs);
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ goto RETURN_CHECK;
+ }
+ else if (op2->gtOper == GT_CNS_LNG)
+ {
+ regMaskTP op1Mask = rpPredictTreeRegUse(op1, PREDICT_ADDR, lockedRegs, rsvdRegs);
#ifdef _TARGET_X86_
- // We also need one extra register to read values from
- tmpMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | op1Mask | rsvdRegs);
+ // We also need one extra register to read values from
+ tmpMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | op1Mask | rsvdRegs);
#endif // _TARGET_X86_
- tree->gtUsedRegs = (regMaskSmall)tmpMask | op1->gtUsedRegs;
- goto RETURN_CHECK;
- }
+ tree->gtUsedRegs = (regMaskSmall)tmpMask | op1->gtUsedRegs;
+ goto RETURN_CHECK;
+ }
#endif // _TARGET_XARCH_
- }
+ }
- unsigned op1TypeSize;
- unsigned op2TypeSize;
+ unsigned op1TypeSize;
+ unsigned op2TypeSize;
- op1TypeSize = genTypeSize(op1->TypeGet());
- op2TypeSize = genTypeSize(op2->TypeGet());
+ op1TypeSize = genTypeSize(op1->TypeGet());
+ op2TypeSize = genTypeSize(op2->TypeGet());
- op1PredictReg = PREDICT_REG;
- op2PredictReg = PREDICT_REG;
+ op1PredictReg = PREDICT_REG;
+ op2PredictReg = PREDICT_REG;
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
#ifdef _TARGET_XARCH_
- if (op1TypeSize == sizeof(int))
- op1PredictReg = PREDICT_NONE;
+ if (op1TypeSize == sizeof(int))
+ op1PredictReg = PREDICT_NONE;
#endif
- tmpMask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
- rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | tmpMask, RBM_LASTUSE);
- }
- else
- {
-#ifdef _TARGET_XARCH_
- // For full DWORD compares we can have
- //
- // op1 is an address mode and op2 is a register
- // or
- // op1 is a register and op2 is an address mode
- //
- if ((op2TypeSize == sizeof(int)) &&
- (op1TypeSize == op2TypeSize))
+ tmpMask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
+ rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | tmpMask, RBM_LASTUSE);
+ }
+ else
{
- if (op2->gtOper == GT_LCL_VAR)
+#ifdef _TARGET_XARCH_
+ // For full DWORD compares we can have
+ //
+ // op1 is an address mode and op2 is a register
+ // or
+ // op1 is a register and op2 is an address mode
+ //
+ if ((op2TypeSize == sizeof(int)) && (op1TypeSize == op2TypeSize))
{
- unsigned lclNum = op2->gtLclVar.gtLclNum;
- varDsc = lvaTable + lclNum;
- /* Did we predict that this local will be enregistered? */
- if (varDsc->lvTracked && (varDsc->lvRegNum != REG_STK))
+ if (op2->gtOper == GT_LCL_VAR)
{
- op1PredictReg = PREDICT_ADDR;
+ unsigned lclNum = op2->gtLclVar.gtLclNum;
+ varDsc = lvaTable + lclNum;
+ /* Did we predict that this local will be enregistered? */
+ if (varDsc->lvTracked && (varDsc->lvRegNum != REG_STK))
+ {
+ op1PredictReg = PREDICT_ADDR;
+ }
}
}
- }
- // Codegen will generate cmp reg,[mem] for 4 or 8-byte types, but not for 1 or 2 byte types
- if ((op1PredictReg != PREDICT_ADDR) && (op2TypeSize >= sizeof(int)))
- op2PredictReg = PREDICT_ADDR;
+ // Codegen will generate cmp reg,[mem] for 4 or 8-byte types, but not for 1 or 2 byte types
+ if ((op1PredictReg != PREDICT_ADDR) && (op2TypeSize >= sizeof(int)))
+ op2PredictReg = PREDICT_ADDR;
#endif // _TARGET_XARCH_
- tmpMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ tmpMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
#ifdef _TARGET_ARM_
- if ((op2->gtOper != GT_CNS_INT) || !codeGen->validImmForAlu(op2->gtIntCon.gtIconVal))
+ if ((op2->gtOper != GT_CNS_INT) || !codeGen->validImmForAlu(op2->gtIntCon.gtIconVal))
#endif
- {
- rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | tmpMask, RBM_LASTUSE);
+ {
+ rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | tmpMask, RBM_LASTUSE);
+ }
}
- }
#ifdef _TARGET_XARCH_
- // In some cases in genCondSetFlags(), we need to use a temporary register (via rsPickReg())
- // to generate a sign/zero extension before doing a compare. Save a register for this purpose
- // if one of the registers is small and the types aren't equal.
+ // In some cases in genCondSetFlags(), we need to use a temporary register (via rsPickReg())
+ // to generate a sign/zero extension before doing a compare. Save a register for this purpose
+ // if one of the registers is small and the types aren't equal.
- if (regMask == RBM_NONE)
- {
- rpPredictReg op1xPredictReg, op2xPredictReg;
- GenTreePtr op1x, op2x;
- if (tree->gtFlags & GTF_REVERSE_OPS) // TODO: do we really need to handle this case?
- {
- op1xPredictReg = op2PredictReg;
- op2xPredictReg = op1PredictReg;
- op1x = op2;
- op2x = op1;
- }
- else
- {
- op1xPredictReg = op1PredictReg;
- op2xPredictReg = op2PredictReg;
- op1x = op1;
- op2x = op2;
- }
- if ((op1xPredictReg < PREDICT_REG) && // op1 doesn't get a register (probably an indir)
- (op2xPredictReg >= PREDICT_REG) && // op2 gets a register
- varTypeIsSmall(op1x->TypeGet())) // op1 is smaller than an int
+ if (regMask == RBM_NONE)
{
- bool needTmp = false;
-
- // If op1x is a byte, and op2x is not a byteable register, we'll need a temp.
- // We could predict a byteable register for op2x, but what if we don't get it?
- // So, be conservative and always ask for a temp. There are a couple small CQ losses as a result.
- if (varTypeIsByte(op1x->TypeGet()))
+ rpPredictReg op1xPredictReg, op2xPredictReg;
+ GenTreePtr op1x, op2x;
+ if (tree->gtFlags & GTF_REVERSE_OPS) // TODO: do we really need to handle this case?
{
- needTmp = true;
+ op1xPredictReg = op2PredictReg;
+ op2xPredictReg = op1PredictReg;
+ op1x = op2;
+ op2x = op1;
}
else
{
- if (op2x->gtOper == GT_LCL_VAR) // this will be a GT_REG_VAR during code generation
+ op1xPredictReg = op1PredictReg;
+ op2xPredictReg = op2PredictReg;
+ op1x = op1;
+ op2x = op2;
+ }
+ if ((op1xPredictReg < PREDICT_REG) && // op1 doesn't get a register (probably an indir)
+ (op2xPredictReg >= PREDICT_REG) && // op2 gets a register
+ varTypeIsSmall(op1x->TypeGet())) // op1 is smaller than an int
+ {
+ bool needTmp = false;
+
+ // If op1x is a byte, and op2x is not a byteable register, we'll need a temp.
+ // We could predict a byteable register for op2x, but what if we don't get it?
+ // So, be conservative and always ask for a temp. There are a couple small CQ losses as a
+ // result.
+ if (varTypeIsByte(op1x->TypeGet()))
{
- if (genActualType(op1x->TypeGet()) != lvaGetActualType(op2x->gtLclVar.gtLclNum))
- needTmp = true;
+ needTmp = true;
}
else
{
- if (op1x->TypeGet() != op2x->TypeGet())
- needTmp = true;
+ if (op2x->gtOper == GT_LCL_VAR) // this will be a GT_REG_VAR during code generation
+ {
+ if (genActualType(op1x->TypeGet()) != lvaGetActualType(op2x->gtLclVar.gtLclNum))
+ needTmp = true;
+ }
+ else
+ {
+ if (op1x->TypeGet() != op2x->TypeGet())
+ needTmp = true;
+ }
+ }
+ if (needTmp)
+ {
+ regMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
}
}
- if (needTmp)
- {
- regMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
- }
}
- }
#endif // _TARGET_XARCH_
- tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs | op2->gtUsedRegs;
- goto RETURN_CHECK;
+ tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs | op2->gtUsedRegs;
+ goto RETURN_CHECK;
- case GT_MUL:
+ case GT_MUL:
#ifndef _TARGET_AMD64_
- if (type == TYP_LONG)
- {
- assert(tree->gtIsValid64RsltMul());
+ if (type == TYP_LONG)
+ {
+ assert(tree->gtIsValid64RsltMul());
- /* Strip out the cast nodes */
+ /* Strip out the cast nodes */
- noway_assert(op1->gtOper == GT_CAST && op2->gtOper == GT_CAST);
- op1 = op1->gtCast.CastOp();
- op2 = op2->gtCast.CastOp();
+ noway_assert(op1->gtOper == GT_CAST && op2->gtOper == GT_CAST);
+ op1 = op1->gtCast.CastOp();
+ op2 = op2->gtCast.CastOp();
#else
- if (false)
- {
+ if (false)
+ {
#endif // !_TARGET_AMD64_
-USE_MULT_EAX:
+ USE_MULT_EAX:
#if defined(_TARGET_X86_)
- // This will done by a 64-bit imul "imul eax, reg"
- // (i.e. EDX:EAX = EAX * reg)
+ // This will done by a 64-bit imul "imul eax, reg"
+ // (i.e. EDX:EAX = EAX * reg)
- /* Are we supposed to evaluate op2 first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- rpPredictTreeRegUse(op2, PREDICT_PAIR_TMP_LO, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
- rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs | RBM_PAIR_TMP_LO, RBM_LASTUSE);
- }
- else
- {
- rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP_LO, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | RBM_PAIR_TMP_LO, RBM_LASTUSE);
- }
+ /* Are we supposed to evaluate op2 first? */
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ rpPredictTreeRegUse(op2, PREDICT_PAIR_TMP_LO, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
+ rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs | RBM_PAIR_TMP_LO, RBM_LASTUSE);
+ }
+ else
+ {
+ rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP_LO, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | RBM_PAIR_TMP_LO, RBM_LASTUSE);
+ }
- /* set gtUsedRegs to EAX, EDX and the registers needed by op1 and op2 */
+ /* set gtUsedRegs to EAX, EDX and the registers needed by op1 and op2 */
- tree->gtUsedRegs = RBM_PAIR_TMP | op1->gtUsedRegs | op2->gtUsedRegs;
+ tree->gtUsedRegs = RBM_PAIR_TMP | op1->gtUsedRegs | op2->gtUsedRegs;
- /* set regMask to the set of held registers */
+ /* set regMask to the set of held registers */
- regMask = RBM_PAIR_TMP_LO;
+ regMask = RBM_PAIR_TMP_LO;
- if (type == TYP_LONG)
- regMask |= RBM_PAIR_TMP_HI;
+ if (type == TYP_LONG)
+ regMask |= RBM_PAIR_TMP_HI;
#elif defined(_TARGET_ARM_)
- // This will done by a 4 operand multiply
+ // This will done by a 4 operand multiply
- // Are we supposed to evaluate op2 first?
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
- rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs , RBM_LASTUSE);
- }
- else
- {
- rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, RBM_LASTUSE);
- }
-
- // set regMask to the set of held registers,
- // the two scratch register we need to compute the mul result
+ // Are we supposed to evaluate op2 first?
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
+ rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, RBM_LASTUSE);
+ }
+ else
+ {
+ rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs, RBM_LASTUSE);
+ }
+
+ // set regMask to the set of held registers,
+ // the two scratch register we need to compute the mul result
- regMask = rpPredictRegPick(TYP_LONG, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
+ regMask = rpPredictRegPick(TYP_LONG, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
- // set gtUsedRegs toregMask and the registers needed by op1 and op2
+ // set gtUsedRegs toregMask and the registers needed by op1 and op2
- tree->gtUsedRegs = regMask | op1->gtUsedRegs | op2->gtUsedRegs;
+ tree->gtUsedRegs = regMask | op1->gtUsedRegs | op2->gtUsedRegs;
#else // !_TARGET_X86_ && !_TARGET_ARM_
#error "Non-ARM or x86 _TARGET_ in RegPredict for 64-bit imul"
#endif
- goto RETURN_CHECK;
- }
- else
- {
- /* We use imulEAX for most unsigned multiply operations */
- if (tree->gtOverflow())
- {
- if ((tree->gtFlags & GTF_UNSIGNED) ||
- varTypeIsSmall(tree->TypeGet()) )
+ goto RETURN_CHECK;
+ }
+ else
{
- goto USE_MULT_EAX;
+ /* We use imulEAX for most unsigned multiply operations */
+ if (tree->gtOverflow())
+ {
+ if ((tree->gtFlags & GTF_UNSIGNED) || varTypeIsSmall(tree->TypeGet()))
+ {
+ goto USE_MULT_EAX;
+ }
+ }
}
- }
- }
- __fallthrough;
+ __fallthrough;
- case GT_OR:
- case GT_XOR:
- case GT_AND:
+ case GT_OR:
+ case GT_XOR:
+ case GT_AND:
- case GT_SUB:
- case GT_ADD:
- tree->gtUsedRegs = 0;
+ case GT_SUB:
+ case GT_ADD:
+ tree->gtUsedRegs = 0;
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
-GENERIC_BINARY:
+ GENERIC_BINARY:
- noway_assert(op2);
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- op1PredictReg = PREDICT_REG;
+ noway_assert(op2);
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ op1PredictReg = PREDICT_REG;
#if !CPU_LOAD_STORE_ARCH
- if (genTypeSize(op1->gtType) >= sizeof(int))
- op1PredictReg = PREDICT_NONE;
+ if (genTypeSize(op1->gtType) >= sizeof(int))
+ op1PredictReg = PREDICT_NONE;
#endif
- regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
- rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | regMask, RBM_LASTUSE);
- }
- else
- {
- op2PredictReg = PREDICT_REG;
+ regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs | op1->gtRsvdRegs);
+ rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | regMask, RBM_LASTUSE);
+ }
+ else
+ {
+ op2PredictReg = PREDICT_REG;
#if !CPU_LOAD_STORE_ARCH
- if (genTypeSize(op2->gtType) >= sizeof(int))
- op2PredictReg = PREDICT_NONE;
+ if (genTypeSize(op2->gtType) >= sizeof(int))
+ op2PredictReg = PREDICT_NONE;
#endif
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
#ifdef _TARGET_ARM_
- // For most ALU operations we can generate a single instruction that encodes
- // a small immediate integer constant value. (except for multiply)
- //
- if ((op2->gtOper == GT_CNS_INT) && (oper != GT_MUL))
- {
- ssize_t ival = op2->gtIntCon.gtIconVal;
- if (codeGen->validImmForAlu(ival))
+ // For most ALU operations we can generate a single instruction that encodes
+ // a small immediate integer constant value. (except for multiply)
+ //
+ if ((op2->gtOper == GT_CNS_INT) && (oper != GT_MUL))
{
- op2PredictReg = PREDICT_NONE;
+ ssize_t ival = op2->gtIntCon.gtIconVal;
+ if (codeGen->validImmForAlu(ival))
+ {
+ op2PredictReg = PREDICT_NONE;
+ }
+ else if (codeGen->validImmForAdd(ival, INS_FLAGS_DONT_CARE) &&
+ ((oper == GT_ADD) || (oper == GT_SUB)))
+ {
+ op2PredictReg = PREDICT_NONE;
+ }
}
- else if (codeGen->validImmForAdd(ival, INS_FLAGS_DONT_CARE) &&
- ((oper == GT_ADD) || (oper == GT_SUB)))
+ if (op2PredictReg == PREDICT_NONE)
{
- op2PredictReg = PREDICT_NONE;
+ op2->gtUsedRegs = RBM_NONE;
}
- }
- if (op2PredictReg == PREDICT_NONE)
- {
- op2->gtUsedRegs = RBM_NONE;
- }
- else
+ else
#endif
- {
- rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | regMask, RBM_LASTUSE);
+ {
+ rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | regMask, RBM_LASTUSE);
+ }
}
- }
- tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs | op2->gtUsedRegs;
+ tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs | op2->gtUsedRegs;
#if CPU_HAS_BYTE_REGS
- /* We have special register requirements for byte operations */
+ /* We have special register requirements for byte operations */
- if (varTypeIsByte(tree->TypeGet()))
- {
- /* For 8 bit arithmetic, one operands has to be in a
- byte-addressable register, and the other has to be
- in a byte-addrble reg or in memory. Assume its in a reg */
+ if (varTypeIsByte(tree->TypeGet()))
+ {
+ /* For 8 bit arithmetic, one operands has to be in a
+ byte-addressable register, and the other has to be
+ in a byte-addrble reg or in memory. Assume its in a reg */
- regMaskTP regByteMask = 0;
- regMaskTP op1ByteMask = op1->gtUsedRegs;
+ regMaskTP regByteMask = 0;
+ regMaskTP op1ByteMask = op1->gtUsedRegs;
- if (!(op1->gtUsedRegs & RBM_BYTE_REGS))
- {
- // Pick a Byte register to use for op1
- regByteMask = rpPredictRegPick(TYP_BYTE, PREDICT_REG, lockedRegs | rsvdRegs);
- op1ByteMask = regByteMask;
- }
+ if (!(op1->gtUsedRegs & RBM_BYTE_REGS))
+ {
+ // Pick a Byte register to use for op1
+ regByteMask = rpPredictRegPick(TYP_BYTE, PREDICT_REG, lockedRegs | rsvdRegs);
+ op1ByteMask = regByteMask;
+ }
- if (!(op2->gtUsedRegs & RBM_BYTE_REGS))
- {
- // Pick a Byte register to use for op2, avoiding the one used by op1
- regByteMask |= rpPredictRegPick(TYP_BYTE, PREDICT_REG, lockedRegs | rsvdRegs | op1ByteMask);
- }
+ if (!(op2->gtUsedRegs & RBM_BYTE_REGS))
+ {
+ // Pick a Byte register to use for op2, avoiding the one used by op1
+ regByteMask |= rpPredictRegPick(TYP_BYTE, PREDICT_REG, lockedRegs | rsvdRegs | op1ByteMask);
+ }
- if (regByteMask)
- {
- tree->gtUsedRegs |= regByteMask;
- regMask = regByteMask;
+ if (regByteMask)
+ {
+ tree->gtUsedRegs |= regByteMask;
+ regMask = regByteMask;
+ }
}
- }
#endif
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_DIV:
- case GT_MOD:
+ case GT_DIV:
+ case GT_MOD:
- case GT_UDIV:
- case GT_UMOD:
+ case GT_UDIV:
+ case GT_UMOD:
- /* non-integer division handled in generic way */
- if (!varTypeIsIntegral(type))
- {
- tree->gtUsedRegs = 0;
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
- goto GENERIC_BINARY;
- }
+ /* non-integer division handled in generic way */
+ if (!varTypeIsIntegral(type))
+ {
+ tree->gtUsedRegs = 0;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
+ goto GENERIC_BINARY;
+ }
#ifndef _TARGET_64BIT_
- if (type == TYP_LONG && (oper == GT_MOD || oper == GT_UMOD))
- {
- /* Special case: a mod with an int op2 is done inline using idiv or div
- to avoid a costly call to the helper */
+ if (type == TYP_LONG && (oper == GT_MOD || oper == GT_UMOD))
+ {
+ /* Special case: a mod with an int op2 is done inline using idiv or div
+ to avoid a costly call to the helper */
- noway_assert((op2->gtOper == GT_CNS_LNG) &&
- (op2->gtLngCon.gtLconVal == int(op2->gtLngCon.gtLconVal)));
+ noway_assert((op2->gtOper == GT_CNS_LNG) &&
+ (op2->gtLngCon.gtLconVal == int(op2->gtLngCon.gtLconVal)));
#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- tmpMask = rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | RBM_PAIR_TMP, rsvdRegs | op1->gtRsvdRegs);
- tmpMask |= rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP, lockedRegs | tmpMask, RBM_LASTUSE);
- }
- else
- {
- tmpMask = rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- tmpMask |= rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | tmpMask | RBM_PAIR_TMP, RBM_LASTUSE);
- }
- regMask = RBM_PAIR_TMP;
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ tmpMask = rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | RBM_PAIR_TMP,
+ rsvdRegs | op1->gtRsvdRegs);
+ tmpMask |= rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP, lockedRegs | tmpMask, RBM_LASTUSE);
+ }
+ else
+ {
+ tmpMask = rpPredictTreeRegUse(op1, PREDICT_PAIR_TMP, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ tmpMask |=
+ rpPredictTreeRegUse(op2, PREDICT_REG, lockedRegs | tmpMask | RBM_PAIR_TMP, RBM_LASTUSE);
+ }
+ regMask = RBM_PAIR_TMP;
#else // !_TARGET_X86_ && !_TARGET_ARM_
#error "Non-ARM or x86 _TARGET_ in RegPredict for 64-bit MOD"
#endif // !_TARGET_X86_ && !_TARGET_ARM_
- tree->gtUsedRegs = (regMaskSmall)(regMask |
- op1->gtUsedRegs |
- op2->gtUsedRegs |
- rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, regMask | tmpMask));
+ tree->gtUsedRegs =
+ (regMaskSmall)(regMask | op1->gtUsedRegs | op2->gtUsedRegs |
+ rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, regMask | tmpMask));
- goto RETURN_CHECK;
- }
+ goto RETURN_CHECK;
+ }
#endif // _TARGET_64BIT_
- /* no divide immediate, so force integer constant which is not
- * a power of two to register
- */
+ /* no divide immediate, so force integer constant which is not
+ * a power of two to register
+ */
- if (op2->OperKind() & GTK_CONST)
- {
- ssize_t ival = op2->gtIntConCommon.IconValue();
+ if (op2->OperKind() & GTK_CONST)
+ {
+ ssize_t ival = op2->gtIntConCommon.IconValue();
- /* Is the divisor a power of 2 ? */
+ /* Is the divisor a power of 2 ? */
- if (ival > 0 && genMaxOneBit(size_t(ival)))
- {
- goto GENERIC_UNARY;
+ if (ival > 0 && genMaxOneBit(size_t(ival)))
+ {
+ goto GENERIC_UNARY;
+ }
+ else
+ op2PredictReg = PREDICT_SCRATCH_REG;
}
else
- op2PredictReg = PREDICT_SCRATCH_REG;
- }
- else
- {
- /* Non integer constant also must be enregistered */
- op2PredictReg = PREDICT_REG;
- }
+ {
+ /* Non integer constant also must be enregistered */
+ op2PredictReg = PREDICT_REG;
+ }
- regMaskTP trashedMask; trashedMask = DUMMY_INIT(RBM_ILLEGAL);
- regMaskTP op1ExcludeMask; op1ExcludeMask = DUMMY_INIT(RBM_ILLEGAL);
- regMaskTP op2ExcludeMask; op2ExcludeMask = DUMMY_INIT(RBM_ILLEGAL);
+ regMaskTP trashedMask;
+ trashedMask = DUMMY_INIT(RBM_ILLEGAL);
+ regMaskTP op1ExcludeMask;
+ op1ExcludeMask = DUMMY_INIT(RBM_ILLEGAL);
+ regMaskTP op2ExcludeMask;
+ op2ExcludeMask = DUMMY_INIT(RBM_ILLEGAL);
#ifdef _TARGET_XARCH_
- /* Consider the case "a / b" - we'll need to trash EDX (via "CDQ") before
- * we can safely allow the "b" value to die. Unfortunately, if we simply
- * mark the node "b" as using EDX, this will not work if "b" is a register
- * variable that dies with this particular reference. Thus, if we want to
- * avoid this situation (where we would have to spill the variable from
- * EDX to someplace else), we need to explicitly mark the interference
- * of the variable at this point.
- */
+ /* Consider the case "a / b" - we'll need to trash EDX (via "CDQ") before
+ * we can safely allow the "b" value to die. Unfortunately, if we simply
+ * mark the node "b" as using EDX, this will not work if "b" is a register
+ * variable that dies with this particular reference. Thus, if we want to
+ * avoid this situation (where we would have to spill the variable from
+ * EDX to someplace else), we need to explicitly mark the interference
+ * of the variable at this point.
+ */
- if (op2->gtOper == GT_LCL_VAR)
- {
- unsigned lclNum = op2->gtLclVarCommon.gtLclNum;
- varDsc = lvaTable + lclNum;
- if (varDsc->lvTracked)
+ if (op2->gtOper == GT_LCL_VAR)
{
-#ifdef DEBUG
- if (verbose)
+ unsigned lclNum = op2->gtLclVarCommon.gtLclNum;
+ varDsc = lvaTable + lclNum;
+ if (varDsc->lvTracked)
{
- if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EAX], varDsc->lvVarIndex))
- printf("Record interference between V%02u,T%02u and EAX -- int divide\n",
- lclNum, varDsc->lvVarIndex);
- if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex))
- printf("Record interference between V%02u,T%02u and EDX -- int divide\n",
- lclNum, varDsc->lvVarIndex);
- }
+#ifdef DEBUG
+ if (verbose)
+ {
+ if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EAX], varDsc->lvVarIndex))
+ printf("Record interference between V%02u,T%02u and EAX -- int divide\n", lclNum,
+ varDsc->lvVarIndex);
+ if (!VarSetOps::IsMember(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex))
+ printf("Record interference between V%02u,T%02u and EDX -- int divide\n", lclNum,
+ varDsc->lvVarIndex);
+ }
#endif
- VarSetOps::AddElemD(this, raLclRegIntf[REG_EAX], varDsc->lvVarIndex);
- VarSetOps::AddElemD(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex);
+ VarSetOps::AddElemD(this, raLclRegIntf[REG_EAX], varDsc->lvVarIndex);
+ VarSetOps::AddElemD(this, raLclRegIntf[REG_EDX], varDsc->lvVarIndex);
+ }
}
- }
- /* set the held register based on opcode */
- if (oper == GT_DIV || oper == GT_UDIV)
- regMask = RBM_EAX;
- else
- regMask = RBM_EDX;
- trashedMask = (RBM_EAX | RBM_EDX);
- op1ExcludeMask = 0;
- op2ExcludeMask = (RBM_EAX | RBM_EDX);
+ /* set the held register based on opcode */
+ if (oper == GT_DIV || oper == GT_UDIV)
+ regMask = RBM_EAX;
+ else
+ regMask = RBM_EDX;
+ trashedMask = (RBM_EAX | RBM_EDX);
+ op1ExcludeMask = 0;
+ op2ExcludeMask = (RBM_EAX | RBM_EDX);
#endif // _TARGET_XARCH_
#ifdef _TARGET_ARM_
- trashedMask = RBM_NONE;
- op1ExcludeMask = RBM_NONE;
- op2ExcludeMask = RBM_NONE;
+ trashedMask = RBM_NONE;
+ op1ExcludeMask = RBM_NONE;
+ op2ExcludeMask = RBM_NONE;
#endif
- /* set the lvPref reg if possible */
- GenTreePtr dest;
- /*
- * Walking the gtNext link twice from here should get us back
- * to our parent node, if this is an simple assignment tree.
- */
- dest = tree->gtNext;
- if (dest && (dest->gtOper == GT_LCL_VAR) &&
- dest->gtNext && (dest->gtNext->OperKind() & GTK_ASGOP) &&
- dest->gtNext->gtOp.gtOp2 == tree)
- {
- varDsc = lvaTable + dest->gtLclVarCommon.gtLclNum;
- varDsc->addPrefReg(regMask, this);
- }
+ /* set the lvPref reg if possible */
+ GenTreePtr dest;
+ /*
+ * Walking the gtNext link twice from here should get us back
+ * to our parent node, if this is an simple assignment tree.
+ */
+ dest = tree->gtNext;
+ if (dest && (dest->gtOper == GT_LCL_VAR) && dest->gtNext && (dest->gtNext->OperKind() & GTK_ASGOP) &&
+ dest->gtNext->gtOp.gtOp2 == tree)
+ {
+ varDsc = lvaTable + dest->gtLclVarCommon.gtLclNum;
+ varDsc->addPrefReg(regMask, this);
+ }
#ifdef _TARGET_XARCH_
- op1PredictReg = PREDICT_REG_EDX; /* Normally target op1 into EDX */
+ op1PredictReg = PREDICT_REG_EDX; /* Normally target op1 into EDX */
#else
- op1PredictReg = PREDICT_SCRATCH_REG;
+ op1PredictReg = PREDICT_SCRATCH_REG;
#endif
- /* are we supposed to evaluate op2 first? */
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- tmpMask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | op2ExcludeMask, rsvdRegs | op1->gtRsvdRegs);
- rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | tmpMask | op1ExcludeMask, RBM_LASTUSE);
- }
- else
- {
- tmpMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | op1ExcludeMask, rsvdRegs | op2->gtRsvdRegs);
- rpPredictTreeRegUse(op2, op2PredictReg, tmpMask | lockedRegs | op2ExcludeMask, RBM_LASTUSE);
- }
+ /* are we supposed to evaluate op2 first? */
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ tmpMask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | op2ExcludeMask,
+ rsvdRegs | op1->gtRsvdRegs);
+ rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | tmpMask | op1ExcludeMask, RBM_LASTUSE);
+ }
+ else
+ {
+ tmpMask = rpPredictTreeRegUse(op1, op1PredictReg, lockedRegs | op1ExcludeMask,
+ rsvdRegs | op2->gtRsvdRegs);
+ rpPredictTreeRegUse(op2, op2PredictReg, tmpMask | lockedRegs | op2ExcludeMask, RBM_LASTUSE);
+ }
#ifdef _TARGET_ARM_
- regMask = tmpMask;
+ regMask = tmpMask;
#endif
- /* grab EAX, EDX for this tree node */
- tree->gtUsedRegs = (regMaskSmall)trashedMask | op1->gtUsedRegs | op2->gtUsedRegs;
+ /* grab EAX, EDX for this tree node */
+ tree->gtUsedRegs = (regMaskSmall)trashedMask | op1->gtUsedRegs | op2->gtUsedRegs;
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
#ifndef _TARGET_64BIT_
- if (type == TYP_LONG)
- {
- if (op2->IsCnsIntOrI())
- {
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- // no register used by op2
- op2->gtUsedRegs = 0;
- tree->gtUsedRegs = op1->gtUsedRegs;
- }
- else
+ if (type == TYP_LONG)
{
- // since RBM_LNGARG_0 and RBM_SHIFT_LNG are hardwired we can't have them in the locked registers
- tmpMask = lockedRegs;
- tmpMask &= ~RBM_LNGARG_0;
- tmpMask &= ~RBM_SHIFT_LNG;
-
- // op2 goes to RBM_SHIFT, op1 to the RBM_LNGARG_0 pair
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (op2->IsCnsIntOrI())
{
- rpPredictTreeRegUse(op2, PREDICT_REG_SHIFT_LNG, tmpMask, RBM_NONE);
- tmpMask |= RBM_SHIFT_LNG;
- // Ensure that the RBM_SHIFT_LNG register interfere with op2's compCurLife
- // Fix 383843 X86/ARM ILGEN
- rpRecordRegIntf(RBM_SHIFT_LNG, compCurLife
- DEBUGARG("SHIFT_LNG arg setup"));
- rpPredictTreeRegUse(op1, PREDICT_PAIR_LNGARG_0, tmpMask, RBM_LASTUSE);
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ // no register used by op2
+ op2->gtUsedRegs = 0;
+ tree->gtUsedRegs = op1->gtUsedRegs;
}
else
{
- rpPredictTreeRegUse(op1, PREDICT_PAIR_LNGARG_0, tmpMask, RBM_NONE);
- tmpMask |= RBM_LNGARG_0;
- // Ensure that the RBM_LNGARG_0 registers interfere with op1's compCurLife
- // Fix 383839 ARM ILGEN
- rpRecordRegIntf(RBM_LNGARG_0, compCurLife
- DEBUGARG("LNGARG_0 arg setup"));
- rpPredictTreeRegUse(op2, PREDICT_REG_SHIFT_LNG, tmpMask, RBM_LASTUSE);
- }
- regMask = RBM_LNGRET; // function return registers
- op1->gtUsedRegs |= RBM_LNGARG_0;
- op2->gtUsedRegs |= RBM_SHIFT_LNG;
+ // since RBM_LNGARG_0 and RBM_SHIFT_LNG are hardwired we can't have them in the locked registers
+ tmpMask = lockedRegs;
+ tmpMask &= ~RBM_LNGARG_0;
+ tmpMask &= ~RBM_SHIFT_LNG;
+
+ // op2 goes to RBM_SHIFT, op1 to the RBM_LNGARG_0 pair
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ rpPredictTreeRegUse(op2, PREDICT_REG_SHIFT_LNG, tmpMask, RBM_NONE);
+ tmpMask |= RBM_SHIFT_LNG;
+ // Ensure that the RBM_SHIFT_LNG register interfere with op2's compCurLife
+ // Fix 383843 X86/ARM ILGEN
+ rpRecordRegIntf(RBM_SHIFT_LNG, compCurLife DEBUGARG("SHIFT_LNG arg setup"));
+ rpPredictTreeRegUse(op1, PREDICT_PAIR_LNGARG_0, tmpMask, RBM_LASTUSE);
+ }
+ else
+ {
+ rpPredictTreeRegUse(op1, PREDICT_PAIR_LNGARG_0, tmpMask, RBM_NONE);
+ tmpMask |= RBM_LNGARG_0;
+ // Ensure that the RBM_LNGARG_0 registers interfere with op1's compCurLife
+ // Fix 383839 ARM ILGEN
+ rpRecordRegIntf(RBM_LNGARG_0, compCurLife DEBUGARG("LNGARG_0 arg setup"));
+ rpPredictTreeRegUse(op2, PREDICT_REG_SHIFT_LNG, tmpMask, RBM_LASTUSE);
+ }
+ regMask = RBM_LNGRET; // function return registers
+ op1->gtUsedRegs |= RBM_LNGARG_0;
+ op2->gtUsedRegs |= RBM_SHIFT_LNG;
- tree->gtUsedRegs = op1->gtUsedRegs | op2->gtUsedRegs;
+ tree->gtUsedRegs = op1->gtUsedRegs | op2->gtUsedRegs;
- // We are using a helper function to do shift:
- //
- tree->gtUsedRegs |= RBM_CALLEE_TRASH;
+ // We are using a helper function to do shift:
+ //
+ tree->gtUsedRegs |= RBM_CALLEE_TRASH;
+ }
}
- }
- else
+ else
#endif // _TARGET_64BIT_
- {
+ {
#ifdef _TARGET_XARCH_
- if (!op2->IsCnsIntOrI())
- predictReg = PREDICT_NOT_REG_ECX;
+ if (!op2->IsCnsIntOrI())
+ predictReg = PREDICT_NOT_REG_ECX;
#endif
-HANDLE_SHIFT_COUNT:
- // Note that this code is also used by assigning shift operators (i.e. GT_ASG_LSH)
-
- regMaskTP tmpRsvdRegs;
+ HANDLE_SHIFT_COUNT:
+ // Note that this code is also used by assigning shift operators (i.e. GT_ASG_LSH)
- if ((tree->gtFlags & GTF_REVERSE_OPS) == 0)
- {
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
- rsvdRegs = RBM_LASTUSE;
- tmpRsvdRegs = RBM_NONE;
+ regMaskTP tmpRsvdRegs;
- }
- else
- {
- regMask = RBM_NONE;
- // Special case op1 of a constant
- if (op1->IsCnsIntOrI())
- tmpRsvdRegs = RBM_LASTUSE; // Allow a last use to occur in op2; See System.Xml.Schema.BitSet:Get(int):bool
- else
- tmpRsvdRegs = op1->gtRsvdRegs;
- }
-
- op2Mask = RBM_NONE;
- if (!op2->IsCnsIntOrI())
- {
- if ((REG_SHIFT != REG_NA) && ((RBM_SHIFT & tmpRsvdRegs) == 0))
+ if ((tree->gtFlags & GTF_REVERSE_OPS) == 0)
{
- op2PredictReg = PREDICT_REG_SHIFT;
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
+ rsvdRegs = RBM_LASTUSE;
+ tmpRsvdRegs = RBM_NONE;
}
else
{
- op2PredictReg = PREDICT_REG;
+ regMask = RBM_NONE;
+ // Special case op1 of a constant
+ if (op1->IsCnsIntOrI())
+ tmpRsvdRegs = RBM_LASTUSE; // Allow a last use to occur in op2; See
+ // System.Xml.Schema.BitSet:Get(int):bool
+ else
+ tmpRsvdRegs = op1->gtRsvdRegs;
}
- /* evaluate shift count into a register, likely the PREDICT_REG_SHIFT register */
- op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | regMask, tmpRsvdRegs);
-
- // If our target arch has a REG_SHIFT register then
- // we set the PrefReg when we have a LclVar for op2
- // we add an interference with REG_SHIFT for any other LclVars alive at op2
- if (REG_SHIFT != REG_NA)
+ op2Mask = RBM_NONE;
+ if (!op2->IsCnsIntOrI())
{
- VARSET_TP VARSET_INIT(this, liveSet, compCurLife);
-
- while (op2->gtOper == GT_COMMA)
+ if ((REG_SHIFT != REG_NA) && ((RBM_SHIFT & tmpRsvdRegs) == 0))
{
- op2 = op2->gtOp.gtOp2;
+ op2PredictReg = PREDICT_REG_SHIFT;
+ }
+ else
+ {
+ op2PredictReg = PREDICT_REG;
}
- if (op2->gtOper == GT_LCL_VAR)
+ /* evaluate shift count into a register, likely the PREDICT_REG_SHIFT register */
+ op2Mask = rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | regMask, tmpRsvdRegs);
+
+ // If our target arch has a REG_SHIFT register then
+ // we set the PrefReg when we have a LclVar for op2
+ // we add an interference with REG_SHIFT for any other LclVars alive at op2
+ if (REG_SHIFT != REG_NA)
{
- varDsc = lvaTable + op2->gtLclVarCommon.gtLclNum;
- varDsc->setPrefReg(REG_SHIFT, this);
- if (varDsc->lvTracked)
+ VARSET_TP VARSET_INIT(this, liveSet, compCurLife);
+
+ while (op2->gtOper == GT_COMMA)
{
- VarSetOps::RemoveElemD(this, liveSet, varDsc->lvVarIndex);
+ op2 = op2->gtOp.gtOp2;
}
- }
- // Ensure that we have a register interference with the LclVar in tree's LiveSet,
- // excluding the LclVar that was used for the shift amount as it is read-only
- // and can be kept alive through the shift operation
- //
- rpRecordRegIntf(RBM_SHIFT, liveSet
- DEBUGARG("Variable Shift Register"));
- // In case op2Mask doesn't contain the required shift register,
- // we will or it in now.
- op2Mask |= RBM_SHIFT;
+ if (op2->gtOper == GT_LCL_VAR)
+ {
+ varDsc = lvaTable + op2->gtLclVarCommon.gtLclNum;
+ varDsc->setPrefReg(REG_SHIFT, this);
+ if (varDsc->lvTracked)
+ {
+ VarSetOps::RemoveElemD(this, liveSet, varDsc->lvVarIndex);
+ }
+ }
+
+ // Ensure that we have a register interference with the LclVar in tree's LiveSet,
+ // excluding the LclVar that was used for the shift amount as it is read-only
+ // and can be kept alive through the shift operation
+ //
+ rpRecordRegIntf(RBM_SHIFT, liveSet DEBUGARG("Variable Shift Register"));
+ // In case op2Mask doesn't contain the required shift register,
+ // we will or it in now.
+ op2Mask |= RBM_SHIFT;
+ }
}
- }
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- assert(regMask == RBM_NONE);
- regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs | op2Mask, rsvdRegs | RBM_LASTUSE);
- }
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ assert(regMask == RBM_NONE);
+ regMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs | op2Mask, rsvdRegs | RBM_LASTUSE);
+ }
#if CPU_HAS_BYTE_REGS
- if (varTypeIsByte(type))
- {
- // Fix 383789 X86 ILGEN
- // Fix 383813 X86 ILGEN
- // Fix 383828 X86 ILGEN
- if (op1->gtOper == GT_LCL_VAR)
+ if (varTypeIsByte(type))
{
- varDsc = lvaTable + op1->gtLclVar.gtLclNum;
- if (varDsc->lvTracked)
+ // Fix 383789 X86 ILGEN
+ // Fix 383813 X86 ILGEN
+ // Fix 383828 X86 ILGEN
+ if (op1->gtOper == GT_LCL_VAR)
{
- VARSET_TP VARSET_INIT_NOCOPY(op1VarBit, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex));
+ varDsc = lvaTable + op1->gtLclVar.gtLclNum;
+ if (varDsc->lvTracked)
+ {
+ VARSET_TP VARSET_INIT_NOCOPY(op1VarBit,
+ VarSetOps::MakeSingleton(this, varDsc->lvVarIndex));
- // Ensure that we don't assign a Non-Byteable register for op1's LCL_VAR
- rpRecordRegIntf(RBM_NON_BYTE_REGS, op1VarBit
- DEBUGARG("Non Byte Register"));
+ // Ensure that we don't assign a Non-Byteable register for op1's LCL_VAR
+ rpRecordRegIntf(RBM_NON_BYTE_REGS, op1VarBit DEBUGARG("Non Byte Register"));
+ }
+ }
+ if ((regMask & RBM_BYTE_REGS) == 0)
+ {
+ // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
+ // and we can't select one that is already reserved (i.e. lockedRegs or regMask)
+ //
+ regMask |=
+ rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | regMask | RBM_NON_BYTE_REGS));
}
}
- if ((regMask & RBM_BYTE_REGS) == 0)
- {
- // We need to grab a byte-able register, (i.e. EAX, EDX, ECX, EBX)
- // and we can't select one that is already reserved (i.e. lockedRegs or regMask)
- //
- regMask |= rpPredictRegPick(type, PREDICT_SCRATCH_REG, (lockedRegs | regMask | RBM_NON_BYTE_REGS));
- }
- }
#endif
- tree->gtUsedRegs = (regMaskSmall)(regMask | op2Mask);
- }
+ tree->gtUsedRegs = (regMaskSmall)(regMask | op2Mask);
+ }
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_COMMA:
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- if (predictReg == PREDICT_NONE)
+ case GT_COMMA:
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
- predictReg = PREDICT_REG;
+ if (predictReg == PREDICT_NONE)
+ {
+ predictReg = PREDICT_REG;
+ }
+ else if (rpHasVarIndexForPredict(predictReg))
+ {
+ /* Don't propagate the use of tgt reg use in a GT_COMMA */
+ predictReg = PREDICT_SCRATCH_REG;
+ }
+
+ regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs | regMask, RBM_LASTUSE);
}
- else if (rpHasVarIndexForPredict(predictReg))
+ else
{
- /* Don't propagate the use of tgt reg use in a GT_COMMA */
- predictReg = PREDICT_SCRATCH_REG;
- }
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
- regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs | regMask, RBM_LASTUSE);
- }
- else
- {
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
+ /* CodeGen will enregister the op2 side of a GT_COMMA */
+ if (predictReg == PREDICT_NONE)
+ {
+ predictReg = PREDICT_REG;
+ }
+ else if (rpHasVarIndexForPredict(predictReg))
+ {
+ /* Don't propagate the use of tgt reg use in a GT_COMMA */
+ predictReg = PREDICT_SCRATCH_REG;
+ }
- /* CodeGen will enregister the op2 side of a GT_COMMA */
- if (predictReg == PREDICT_NONE)
- {
- predictReg = PREDICT_REG;
+ regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
}
- else if (rpHasVarIndexForPredict(predictReg))
+ // tree should only accumulate the used registers from the op2 side of the GT_COMMA
+ //
+ tree->gtUsedRegs = op2->gtUsedRegs;
+ if ((op2->gtOper == GT_LCL_VAR) && (rsvdRegs != 0))
{
- /* Don't propagate the use of tgt reg use in a GT_COMMA */
- predictReg = PREDICT_SCRATCH_REG;
- }
-
- regMask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
- }
- // tree should only accumulate the used registers from the op2 side of the GT_COMMA
- //
- tree->gtUsedRegs = op2->gtUsedRegs;
- if ((op2->gtOper == GT_LCL_VAR) && (rsvdRegs != 0))
- {
- LclVarDsc * op2VarDsc = lvaTable + op2->gtLclVarCommon.gtLclNum;
+ LclVarDsc* op2VarDsc = lvaTable + op2->gtLclVarCommon.gtLclNum;
- if (op2VarDsc->lvTracked)
- {
- VARSET_TP VARSET_INIT_NOCOPY(op2VarBit, VarSetOps::MakeSingleton(this, op2VarDsc->lvVarIndex));
- rpRecordRegIntf(rsvdRegs, op2VarBit DEBUGARG( "comma use"));
+ if (op2VarDsc->lvTracked)
+ {
+ VARSET_TP VARSET_INIT_NOCOPY(op2VarBit, VarSetOps::MakeSingleton(this, op2VarDsc->lvVarIndex));
+ rpRecordRegIntf(rsvdRegs, op2VarBit DEBUGARG("comma use"));
+ }
}
- }
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_QMARK:
+ case GT_QMARK:
{
- noway_assert(op1 != NULL && op2 != NULL);
-
- /*
- * If the gtUsedRegs conflicts with lockedRegs
- * then we going to have to spill some registers
- * into the non-trashed register set to keep it alive
- */
- unsigned spillCnt; spillCnt = 0;
- regMaskTP spillRegs; spillRegs = lockedRegs & tree->gtUsedRegs;
+ noway_assert(op1 != NULL && op2 != NULL);
- while (spillRegs)
- {
- /* Find the next register that needs to be spilled */
- tmpMask = genFindLowestBit(spillRegs);
+ /*
+ * If the gtUsedRegs conflicts with lockedRegs
+ * then we going to have to spill some registers
+ * into the non-trashed register set to keep it alive
+ */
+ unsigned spillCnt;
+ spillCnt = 0;
+ regMaskTP spillRegs;
+ spillRegs = lockedRegs & tree->gtUsedRegs;
-#ifdef DEBUG
- if (verbose)
+ while (spillRegs)
{
- printf("Predict spill of %s before: ",
- getRegName(genRegNumFromMask(tmpMask)));
- gtDispTree(tree, 0, NULL, true);
- }
+ /* Find the next register that needs to be spilled */
+ tmpMask = genFindLowestBit(spillRegs);
+
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Predict spill of %s before: ", getRegName(genRegNumFromMask(tmpMask)));
+ gtDispTree(tree, 0, NULL, true);
+ }
#endif
- /* In Codegen it will typically introduce a spill temp here */
- /* rather than relocating the register to a non trashed reg */
- rpPredictSpillCnt++;
- spillCnt++;
+ /* In Codegen it will typically introduce a spill temp here */
+ /* rather than relocating the register to a non trashed reg */
+ rpPredictSpillCnt++;
+ spillCnt++;
- /* Remove it from the spillRegs and lockedRegs*/
- spillRegs &= ~tmpMask;
- lockedRegs &= ~tmpMask;
- }
- {
- VARSET_TP VARSET_INIT(this, startQmarkCondUseInPlaceVars, rpUseInPlace);
+ /* Remove it from the spillRegs and lockedRegs*/
+ spillRegs &= ~tmpMask;
+ lockedRegs &= ~tmpMask;
+ }
+ {
+ VARSET_TP VARSET_INIT(this, startQmarkCondUseInPlaceVars, rpUseInPlace);
- /* Evaluate the <cond> subtree */
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
- VarSetOps::Assign(this, rpUseInPlace, startQmarkCondUseInPlaceVars);
- tree->gtUsedRegs = op1->gtUsedRegs;
+ /* Evaluate the <cond> subtree */
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
+ VarSetOps::Assign(this, rpUseInPlace, startQmarkCondUseInPlaceVars);
+ tree->gtUsedRegs = op1->gtUsedRegs;
- noway_assert(op2->gtOper == GT_COLON);
- if (rpHasVarIndexForPredict(predictReg) && ((op2->gtFlags & (GTF_ASG|GTF_CALL)) != 0))
- {
- // Don't try to target the register specified in predictReg when we have complex subtrees
- //
- predictReg = PREDICT_SCRATCH_REG;
- }
- GenTreePtr elseTree = op2->AsColon()->ElseNode();
- GenTreePtr thenTree = op2->AsColon()->ThenNode();
+ noway_assert(op2->gtOper == GT_COLON);
+ if (rpHasVarIndexForPredict(predictReg) && ((op2->gtFlags & (GTF_ASG | GTF_CALL)) != 0))
+ {
+ // Don't try to target the register specified in predictReg when we have complex subtrees
+ //
+ predictReg = PREDICT_SCRATCH_REG;
+ }
+ GenTreePtr elseTree = op2->AsColon()->ElseNode();
+ GenTreePtr thenTree = op2->AsColon()->ThenNode();
- noway_assert(thenTree != NULL && elseTree != NULL);
+ noway_assert(thenTree != NULL && elseTree != NULL);
- // Update compCurLife to only those vars live on the <then> subtree
+ // Update compCurLife to only those vars live on the <then> subtree
- VarSetOps::Assign(this, compCurLife, tree->gtQmark.gtThenLiveSet);
+ VarSetOps::Assign(this, compCurLife, tree->gtQmark.gtThenLiveSet);
- if (type == TYP_VOID)
- {
- /* Evaluate the <then> subtree */
- rpPredictTreeRegUse(thenTree, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
- regMask = RBM_NONE;
- predictReg = PREDICT_NONE;
- }
- else
- {
- // A mask to use to force the predictor to choose low registers (to reduce code size)
- regMaskTP avoidRegs = RBM_NONE;
+ if (type == TYP_VOID)
+ {
+ /* Evaluate the <then> subtree */
+ rpPredictTreeRegUse(thenTree, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
+ regMask = RBM_NONE;
+ predictReg = PREDICT_NONE;
+ }
+ else
+ {
+ // A mask to use to force the predictor to choose low registers (to reduce code size)
+ regMaskTP avoidRegs = RBM_NONE;
#ifdef _TARGET_ARM_
- avoidRegs = (RBM_R12|RBM_LR);
+ avoidRegs = (RBM_R12 | RBM_LR);
#endif
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- /* Evaluate the <then> subtree */
- regMask = rpPredictTreeRegUse(thenTree, predictReg, lockedRegs, rsvdRegs | avoidRegs | RBM_LASTUSE);
+ /* Evaluate the <then> subtree */
+ regMask =
+ rpPredictTreeRegUse(thenTree, predictReg, lockedRegs, rsvdRegs | avoidRegs | RBM_LASTUSE);
- if (regMask)
- {
- rpPredictReg op1PredictReg = rpGetPredictForMask(regMask);
- if (op1PredictReg != PREDICT_NONE)
- predictReg = op1PredictReg;
- }
- }
+ if (regMask)
+ {
+ rpPredictReg op1PredictReg = rpGetPredictForMask(regMask);
+ if (op1PredictReg != PREDICT_NONE)
+ predictReg = op1PredictReg;
+ }
+ }
- VarSetOps::Assign(this, rpUseInPlace, startQmarkCondUseInPlaceVars);
+ VarSetOps::Assign(this, rpUseInPlace, startQmarkCondUseInPlaceVars);
- /* Evaluate the <else> subtree */
- // First record the post-then liveness, and reset the current liveness to the else
- // branch liveness.
- CLANG_FORMAT_COMMENT_ANCHOR;
+ /* Evaluate the <else> subtree */
+ // First record the post-then liveness, and reset the current liveness to the else
+ // branch liveness.
+ CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
- VARSET_TP VARSET_INIT(this, postThenLive, compCurLife);
+ VARSET_TP VARSET_INIT(this, postThenLive, compCurLife);
#endif
- VarSetOps::Assign(this, compCurLife, tree->gtQmark.gtElseLiveSet);
+ VarSetOps::Assign(this, compCurLife, tree->gtQmark.gtElseLiveSet);
- rpPredictTreeRegUse(elseTree, predictReg, lockedRegs, rsvdRegs | RBM_LASTUSE);
- tree->gtUsedRegs |= thenTree->gtUsedRegs | elseTree->gtUsedRegs;
+ rpPredictTreeRegUse(elseTree, predictReg, lockedRegs, rsvdRegs | RBM_LASTUSE);
+ tree->gtUsedRegs |= thenTree->gtUsedRegs | elseTree->gtUsedRegs;
- // The then and the else are "virtual basic blocks" that form a control-flow diamond.
- // They each have only one successor, which they share. Their live-out sets must equal the
- // live-in set of this virtual successor block, and thus must be the same. We can assert
- // that equality here.
- assert(VarSetOps::Equal(this, compCurLife, postThenLive));
+ // The then and the else are "virtual basic blocks" that form a control-flow diamond.
+ // They each have only one successor, which they share. Their live-out sets must equal the
+ // live-in set of this virtual successor block, and thus must be the same. We can assert
+ // that equality here.
+ assert(VarSetOps::Equal(this, compCurLife, postThenLive));
- if (spillCnt > 0)
- {
- regMaskTP reloadMask = RBM_NONE;
+ if (spillCnt > 0)
+ {
+ regMaskTP reloadMask = RBM_NONE;
- while (spillCnt)
- {
- regMaskTP reloadReg;
+ while (spillCnt)
+ {
+ regMaskTP reloadReg;
- /* Get an extra register to hold it */
- reloadReg = rpPredictRegPick(TYP_INT, PREDICT_REG,
- lockedRegs | regMask | reloadMask);
-#ifdef DEBUG
- if (verbose)
- {
- printf("Predict reload into %s after : ",
- getRegName(genRegNumFromMask(reloadReg)));
- gtDispTree(tree, 0, NULL, true);
- }
+ /* Get an extra register to hold it */
+ reloadReg = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | regMask | reloadMask);
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("Predict reload into %s after : ", getRegName(genRegNumFromMask(reloadReg)));
+ gtDispTree(tree, 0, NULL, true);
+ }
#endif
- reloadMask |= reloadReg;
+ reloadMask |= reloadReg;
- spillCnt--;
- }
+ spillCnt--;
+ }
- /* update the gtUsedRegs mask */
- tree->gtUsedRegs |= reloadMask;
- }
- }
+ /* update the gtUsedRegs mask */
+ tree->gtUsedRegs |= reloadMask;
+ }
+ }
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
}
- case GT_RETURN:
- tree->gtUsedRegs = RBM_NONE;
- regMask = RBM_NONE;
+ case GT_RETURN:
+ tree->gtUsedRegs = RBM_NONE;
+ regMask = RBM_NONE;
- /* Is there a return value? */
- if (op1 != NULL)
- {
-#if FEATURE_FP_REGALLOC
- if (varTypeIsFloating(type))
+ /* Is there a return value? */
+ if (op1 != NULL)
{
- predictReg = PREDICT_FLTRET;
- if (type == TYP_FLOAT)
- regMask = RBM_FLOATRET;
+#if FEATURE_FP_REGALLOC
+ if (varTypeIsFloating(type))
+ {
+ predictReg = PREDICT_FLTRET;
+ if (type == TYP_FLOAT)
+ regMask = RBM_FLOATRET;
+ else
+ regMask = RBM_DOUBLERET;
+ }
else
- regMask = RBM_DOUBLERET;
- }
- else
#endif
- if (isRegPairType(type))
- {
- predictReg = PREDICT_LNGRET;
- regMask = RBM_LNGRET;
- }
- else
- {
- predictReg = PREDICT_INTRET;
- regMask = RBM_INTRET;
- }
- if (info.compCallUnmanaged)
- {
- lockedRegs |= (RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME);
+ if (isRegPairType(type))
+ {
+ predictReg = PREDICT_LNGRET;
+ regMask = RBM_LNGRET;
+ }
+ else
+ {
+ predictReg = PREDICT_INTRET;
+ regMask = RBM_INTRET;
+ }
+ if (info.compCallUnmanaged)
+ {
+ lockedRegs |= (RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME);
+ }
+ rpPredictTreeRegUse(op1, predictReg, lockedRegs, RBM_LASTUSE);
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
}
- rpPredictTreeRegUse(op1, predictReg, lockedRegs, RBM_LASTUSE);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
- }
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- // When on Arm under profiler, to emit Leave callback we would need RBM_PROFILER_RETURN_USED.
- // We could optimize on registers based on int/long or no return value. But to
- // keep it simple we will mark entire RBM_PROFILER_RETURN_USED as used regs here.
- if (compIsProfilerHookNeeded())
- {
- tree->gtUsedRegs |= RBM_PROFILER_RET_USED;
- }
+ // When on Arm under profiler, to emit Leave callback we would need RBM_PROFILER_RETURN_USED.
+ // We could optimize on registers based on int/long or no return value. But to
+ // keep it simple we will mark entire RBM_PROFILER_RETURN_USED as used regs here.
+ if (compIsProfilerHookNeeded())
+ {
+ tree->gtUsedRegs |= RBM_PROFILER_RET_USED;
+ }
#endif
- goto RETURN_CHECK;
-
- case GT_RETFILT:
- if (op1 != NULL)
- {
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
- regMask = genReturnRegForTree(tree);
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
goto RETURN_CHECK;
- }
- tree->gtUsedRegs = 0;
- regMask = 0;
- goto RETURN_CHECK;
+ case GT_RETFILT:
+ if (op1 != NULL)
+ {
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
+ regMask = genReturnRegForTree(tree);
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)regMask;
+ goto RETURN_CHECK;
+ }
+ tree->gtUsedRegs = 0;
+ regMask = 0;
+
+ goto RETURN_CHECK;
- case GT_JTRUE:
- /* This must be a test of a relational operator */
+ case GT_JTRUE:
+ /* This must be a test of a relational operator */
- noway_assert(op1->OperIsCompare());
+ noway_assert(op1->OperIsCompare());
- /* Only condition code set by this operation */
+ /* Only condition code set by this operation */
- rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_NONE);
+ rpPredictTreeRegUse(op1, PREDICT_NONE, lockedRegs, RBM_NONE);
- tree->gtUsedRegs = op1->gtUsedRegs;
- regMask = 0;
+ tree->gtUsedRegs = op1->gtUsedRegs;
+ regMask = 0;
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_SWITCH:
- noway_assert(type <= TYP_INT);
- noway_assert(compCurBB->bbJumpKind == BBJ_SWITCH);
+ case GT_SWITCH:
+ noway_assert(type <= TYP_INT);
+ noway_assert(compCurBB->bbJumpKind == BBJ_SWITCH);
#ifdef _TARGET_ARM_
- {
- regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, RBM_NONE);
- unsigned jumpCnt = compCurBB->bbJumpSwt->bbsCount;
- if (jumpCnt > 2)
{
- // Table based switch requires an extra register for the table base
- regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regMask);
+ regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, RBM_NONE);
+ unsigned jumpCnt = compCurBB->bbJumpSwt->bbsCount;
+ if (jumpCnt > 2)
+ {
+ // Table based switch requires an extra register for the table base
+ regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regMask);
+ }
+ tree->gtUsedRegs = op1->gtUsedRegs | regMask;
}
- tree->gtUsedRegs = op1->gtUsedRegs | regMask;
- }
-#else // !_TARGET_ARM_
- rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, RBM_NONE);
- tree->gtUsedRegs = op1->gtUsedRegs;
+#else // !_TARGET_ARM_
+ rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, RBM_NONE);
+ tree->gtUsedRegs = op1->gtUsedRegs;
#endif // _TARGET_ARM_
- regMask = 0;
- goto RETURN_CHECK;
+ regMask = 0;
+ goto RETURN_CHECK;
- case GT_CKFINITE:
- if (predictReg <= PREDICT_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ case GT_CKFINITE:
+ if (predictReg <= PREDICT_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
- // Need a reg to load exponent into
- regMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
- tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs;
- goto RETURN_CHECK;
+ rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs);
+ // Need a reg to load exponent into
+ regMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs);
+ tree->gtUsedRegs = (regMaskSmall)regMask | op1->gtUsedRegs;
+ goto RETURN_CHECK;
- case GT_LCLHEAP:
- regMask = rpPredictTreeRegUse(op1, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs);
- op2Mask = 0;
+ case GT_LCLHEAP:
+ regMask = rpPredictTreeRegUse(op1, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs);
+ op2Mask = 0;
#ifdef _TARGET_ARM_
- if (info.compInitMem)
- {
- // We zero out two registers in the ARM codegen path
- op2Mask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs | regMask | op2Mask);
- }
+ if (info.compInitMem)
+ {
+ // We zero out two registers in the ARM codegen path
+ op2Mask |=
+ rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | rsvdRegs | regMask | op2Mask);
+ }
#endif
- op1->gtUsedRegs |= (regMaskSmall)regMask;
- tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)op2Mask;
+ op1->gtUsedRegs |= (regMaskSmall)regMask;
+ tree->gtUsedRegs = op1->gtUsedRegs | (regMaskSmall)op2Mask;
- // The result will be put in the reg we picked for the size
- // regMask = <already set as we want it to be>
+ // The result will be put in the reg we picked for the size
+ // regMask = <already set as we want it to be>
- goto RETURN_CHECK;
+ goto RETURN_CHECK;
- case GT_COPYOBJ:
- case GT_COPYBLK:
- case GT_INITBLK:
- interferingRegs |= rpPredictBlkAsgRegUse(tree, predictReg,lockedRegs,rsvdRegs);
- regMask = 0;
- goto RETURN_CHECK;
+ case GT_COPYOBJ:
+ case GT_COPYBLK:
+ case GT_INITBLK:
+ interferingRegs |= rpPredictBlkAsgRegUse(tree, predictReg, lockedRegs, rsvdRegs);
+ regMask = 0;
+ goto RETURN_CHECK;
- case GT_OBJ:
+ case GT_OBJ:
{
#ifdef _TARGET_ARM_
if (predictReg <= PREDICT_REG)
predictReg = PREDICT_SCRATCH_REG;
- regMaskTP avoidRegs = (RBM_R12|RBM_LR); // A mask to use to force the predictor to choose low registers (to reduce code size)
+ regMaskTP avoidRegs = (RBM_R12 | RBM_LR); // A mask to use to force the predictor to choose low
+ // registers (to reduce code size)
regMask = RBM_NONE;
tmpMask = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | avoidRegs);
#endif
if (fgIsIndirOfAddrOfLocal(tree) != NULL)
{
- compUpdateLifeVar</*ForCodeGen*/false>(tree);
+ compUpdateLifeVar</*ForCodeGen*/ false>(tree);
}
#ifdef _TARGET_ARM_
- unsigned objSize = info.compCompHnd->getClassSize(tree->gtObj.gtClass);
+ unsigned objSize = info.compCompHnd->getClassSize(tree->gtObj.gtClass);
regMaskTP preferReg = rpPredictRegMask(predictReg, TYP_I_IMPL);
// If it has one bit set, and that's an arg reg...
if (preferReg != RBM_NONE && genMaxOneBit(preferReg) && ((preferReg & RBM_ARG_REGS) != 0))
@@ -4348,7 +4286,7 @@ HANDLE_SHIFT_COUNT:
regNumber rn = genRegNumFromMask(preferReg);
// Add the registers used to pass the 'obj' to regMask.
- for (unsigned i = 0; i < objSize/4; i++)
+ for (unsigned i = 0; i < objSize / 4; i++)
{
if (rn == MAX_REG_ARG)
break;
@@ -4367,23 +4305,23 @@ HANDLE_SHIFT_COUNT:
regMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | tmpMask | avoidRegs);
}
}
- tree->gtUsedRegs = (regMaskSmall)(regMask | tmpMask);
+ tree->gtUsedRegs = (regMaskSmall)(regMask | tmpMask);
goto RETURN_CHECK;
#else // !_TARGET_ARM
goto GENERIC_UNARY;
#endif // _TARGET_ARM_
}
- case GT_MKREFANY:
+ case GT_MKREFANY:
{
#ifdef _TARGET_ARM_
regMaskTP preferReg = rpPredictRegMask(predictReg, TYP_I_IMPL);
- regMask = RBM_NONE;
- if ((((preferReg-1) & preferReg) == 0) && ((preferReg & RBM_ARG_REGS) != 0))
+ regMask = RBM_NONE;
+ if ((((preferReg - 1) & preferReg) == 0) && ((preferReg & RBM_ARG_REGS) != 0))
{
// A MKREFANY takes up two registers.
regNumber rn = genRegNumFromMask(preferReg);
- regMask = RBM_NONE;
+ regMask = RBM_NONE;
if (rn < MAX_REG_ARG)
{
regMask |= genRegMask(rn);
@@ -4396,11 +4334,11 @@ HANDLE_SHIFT_COUNT:
{
// Condensation of GENERIC_BINARY path.
assert((tree->gtFlags & GTF_REVERSE_OPS) == 0);
- op2PredictReg = PREDICT_REG;
+ op2PredictReg = PREDICT_REG;
regMaskTP regMaskOp1 = rpPredictTreeRegUse(op1, predictReg, lockedRegs, rsvdRegs | op2->gtRsvdRegs);
rpPredictTreeRegUse(op2, op2PredictReg, lockedRegs | regMaskOp1, RBM_LASTUSE);
regMask |= op1->gtUsedRegs | op2->gtUsedRegs;
- tree->gtUsedRegs = (regMaskSmall)regMask;
+ tree->gtUsedRegs = (regMaskSmall)regMask;
goto RETURN_CHECK;
}
tree->gtUsedRegs = op1->gtUsedRegs;
@@ -4408,46 +4346,46 @@ HANDLE_SHIFT_COUNT:
goto GENERIC_BINARY;
}
- case GT_BOX:
- goto GENERIC_UNARY;
+ case GT_BOX:
+ goto GENERIC_UNARY;
- case GT_LOCKADD:
- goto GENERIC_BINARY;
+ case GT_LOCKADD:
+ goto GENERIC_BINARY;
- case GT_XADD:
- case GT_XCHG:
- //Ensure we can write to op2. op2 will hold the output.
- if (predictReg < PREDICT_SCRATCH_REG)
- predictReg = PREDICT_SCRATCH_REG;
+ case GT_XADD:
+ case GT_XCHG:
+ // Ensure we can write to op2. op2 will hold the output.
+ if (predictReg < PREDICT_SCRATCH_REG)
+ predictReg = PREDICT_SCRATCH_REG;
- if (tree->gtFlags & GTF_REVERSE_OPS)
- {
- op2Mask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
- regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs|op2Mask);
- }
- else
- {
- regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs);
- op2Mask = rpPredictTreeRegUse(op2, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs|regMask);
- }
- tree->gtUsedRegs = (regMaskSmall)(regMask | op2Mask);
- goto RETURN_CHECK;
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ {
+ op2Mask = rpPredictTreeRegUse(op2, predictReg, lockedRegs, rsvdRegs);
+ regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs | op2Mask);
+ }
+ else
+ {
+ regMask = rpPredictTreeRegUse(op1, PREDICT_REG, lockedRegs, rsvdRegs);
+ op2Mask = rpPredictTreeRegUse(op2, PREDICT_SCRATCH_REG, lockedRegs, rsvdRegs | regMask);
+ }
+ tree->gtUsedRegs = (regMaskSmall)(regMask | op2Mask);
+ goto RETURN_CHECK;
- case GT_ARR_LENGTH:
- goto GENERIC_UNARY;
+ case GT_ARR_LENGTH:
+ goto GENERIC_UNARY;
- default:
-#ifdef DEBUG
- gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ gtDispTree(tree);
#endif
- noway_assert(!"unexpected simple operator in reg use prediction");
- break;
+ noway_assert(!"unexpected simple operator in reg use prediction");
+ break;
}
}
/* See what kind of a special operator we have here */
- switch (oper)
+ switch (oper)
{
GenTreePtr args;
GenTreeArgList* list;
@@ -4457,586 +4395,585 @@ HANDLE_SHIFT_COUNT:
regMaskTP regArgMask;
regMaskTP curArgMask;
- case GT_CALL:
+ case GT_CALL:
{
- /* initialize so we can just or in various bits */
- tree->gtUsedRegs = RBM_NONE;
+ /* initialize so we can just or in various bits */
+ tree->gtUsedRegs = RBM_NONE;
#if GTF_CALL_REG_SAVE
- /*
- * Unless the GTF_CALL_REG_SAVE flag is set,
- * we can't preserve the RBM_CALLEE_TRASH registers.
- * (likewise we can't preserve the return registers)
- * So we remove them from the lockedRegs set and
- * record any of them in the keepMask
- */
+ /*
+ * Unless the GTF_CALL_REG_SAVE flag is set,
+ * we can't preserve the RBM_CALLEE_TRASH registers.
+ * (likewise we can't preserve the return registers)
+ * So we remove them from the lockedRegs set and
+ * record any of them in the keepMask
+ */
- if (tree->gtFlags & GTF_CALL_REG_SAVE)
- {
- regMaskTP trashMask = genReturnRegForTree(tree);
+ if (tree->gtFlags & GTF_CALL_REG_SAVE)
+ {
+ regMaskTP trashMask = genReturnRegForTree(tree);
- keepMask = lockedRegs & trashMask;
- lockedRegs &= ~trashMask;
- }
- else
+ keepMask = lockedRegs & trashMask;
+ lockedRegs &= ~trashMask;
+ }
+ else
#endif
- {
- keepMask = lockedRegs & RBM_CALLEE_TRASH;
- lockedRegs &= ~RBM_CALLEE_TRASH;
- }
-
- regArgsNum = 0;
- regIndex = 0;
+ {
+ keepMask = lockedRegs & RBM_CALLEE_TRASH;
+ lockedRegs &= ~RBM_CALLEE_TRASH;
+ }
- /* Is there an object pointer? */
- if (tree->gtCall.gtCallObjp)
- {
- /* Evaluate the instance pointer first */
+ regArgsNum = 0;
+ regIndex = 0;
- args = tree->gtCall.gtCallObjp;
+ /* Is there an object pointer? */
+ if (tree->gtCall.gtCallObjp)
+ {
+ /* Evaluate the instance pointer first */
- /* the objPtr always goes to an integer register (through temp or directly) */
- noway_assert(regArgsNum == 0);
- regArgsNum++;
+ args = tree->gtCall.gtCallObjp;
- /* Must be passed in a register */
+ /* the objPtr always goes to an integer register (through temp or directly) */
+ noway_assert(regArgsNum == 0);
+ regArgsNum++;
- noway_assert(args->gtFlags & GTF_LATE_ARG);
+ /* Must be passed in a register */
- /* Must be either a deferred reg arg node or a GT_ASG node */
+ noway_assert(args->gtFlags & GTF_LATE_ARG);
- noway_assert( args->IsArgPlaceHolderNode() ||
- args->IsNothingNode() ||
- (args->gtOper == GT_ASG) ||
- args->OperIsCopyBlkOp() ||
- (args->gtOper == GT_COMMA));
+ /* Must be either a deferred reg arg node or a GT_ASG node */
- if (!args->IsArgPlaceHolderNode())
- {
- rpPredictTreeRegUse(args, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
- }
- }
- VARSET_TP VARSET_INIT_NOCOPY(startArgUseInPlaceVars, VarSetOps::UninitVal());
- VarSetOps::Assign(this, startArgUseInPlaceVars, rpUseInPlace);
-
- /* process argument list */
- for (list = tree->gtCall.gtCallArgs; list; list = list->Rest())
- {
- args = list->Current();
-
- if (args->gtFlags & GTF_LATE_ARG)
- {
- /* Must be either a Placeholder/NOP node or a GT_ASG node */
-
- noway_assert( args->IsArgPlaceHolderNode() ||
- args->IsNothingNode() ||
- (args->gtOper == GT_ASG) ||
- args->OperIsCopyBlkOp() ||
- (args->gtOper == GT_COMMA));
+ noway_assert(args->IsArgPlaceHolderNode() || args->IsNothingNode() || (args->gtOper == GT_ASG) ||
+ args->OperIsCopyBlkOp() || (args->gtOper == GT_COMMA));
if (!args->IsArgPlaceHolderNode())
{
rpPredictTreeRegUse(args, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
}
-
- regArgsNum++;
}
- else
+ VARSET_TP VARSET_INIT_NOCOPY(startArgUseInPlaceVars, VarSetOps::UninitVal());
+ VarSetOps::Assign(this, startArgUseInPlaceVars, rpUseInPlace);
+
+ /* process argument list */
+ for (list = tree->gtCall.gtCallArgs; list; list = list->Rest())
{
-#ifdef FEATURE_FIXED_OUT_ARGS
- // We'll store this argument into the outgoing argument area
- // It needs to be in a register to be stored.
- //
- predictReg = PREDICT_REG;
+ args = list->Current();
-#else // !FEATURE_FIXED_OUT_ARGS
- // We'll generate a push for this argument
- //
- predictReg = PREDICT_NONE;
- if (varTypeIsSmall(args->TypeGet()))
+ if (args->gtFlags & GTF_LATE_ARG)
{
- /* We may need to sign or zero extend a small type using a register */
- predictReg = PREDICT_SCRATCH_REG;
+ /* Must be either a Placeholder/NOP node or a GT_ASG node */
+
+ noway_assert(args->IsArgPlaceHolderNode() || args->IsNothingNode() || (args->gtOper == GT_ASG) ||
+ args->OperIsCopyBlkOp() || (args->gtOper == GT_COMMA));
+
+ if (!args->IsArgPlaceHolderNode())
+ {
+ rpPredictTreeRegUse(args, PREDICT_NONE, lockedRegs, RBM_LASTUSE);
+ }
+
+ regArgsNum++;
}
+ else
+ {
+#ifdef FEATURE_FIXED_OUT_ARGS
+ // We'll store this argument into the outgoing argument area
+ // It needs to be in a register to be stored.
+ //
+ predictReg = PREDICT_REG;
+
+#else // !FEATURE_FIXED_OUT_ARGS
+ // We'll generate a push for this argument
+ //
+ predictReg = PREDICT_NONE;
+ if (varTypeIsSmall(args->TypeGet()))
+ {
+ /* We may need to sign or zero extend a small type using a register */
+ predictReg = PREDICT_SCRATCH_REG;
+ }
#endif
- rpPredictTreeRegUse(args, predictReg, lockedRegs, RBM_LASTUSE);
+ rpPredictTreeRegUse(args, predictReg, lockedRegs, RBM_LASTUSE);
+ }
+ VarSetOps::Assign(this, rpUseInPlace, startArgUseInPlaceVars);
+ tree->gtUsedRegs |= args->gtUsedRegs;
}
- VarSetOps::Assign(this, rpUseInPlace, startArgUseInPlaceVars);
- tree->gtUsedRegs |= args->gtUsedRegs;
- }
- /* Is there a late argument list */
+ /* Is there a late argument list */
- regIndex = 0;
- regArgMask = RBM_NONE; // Set of argument registers that have already been setup.
- args = NULL;
+ regIndex = 0;
+ regArgMask = RBM_NONE; // Set of argument registers that have already been setup.
+ args = NULL;
- /* process the late argument list */
- for (list = tree->gtCall.gtCallLateArgs; list; regIndex++)
- {
- // If the current argument being copied is a promoted struct local, set this pointer to its description.
- LclVarDsc* promotedStructLocal = NULL;
+ /* process the late argument list */
+ for (list = tree->gtCall.gtCallLateArgs; list; regIndex++)
+ {
+ // If the current argument being copied is a promoted struct local, set this pointer to its description.
+ LclVarDsc* promotedStructLocal = NULL;
- curArgMask = RBM_NONE; // Set of argument registers that are going to be setup by this arg
- tmpMask = RBM_NONE; // Set of additional temp registers that are need only to setup the current arg
+ curArgMask = RBM_NONE; // Set of argument registers that are going to be setup by this arg
+ tmpMask = RBM_NONE; // Set of additional temp registers that are need only to setup the current arg
- assert(list->IsList());
+ assert(list->IsList());
- args = list->Current();
- list = list->Rest();
+ args = list->Current();
+ list = list->Rest();
- assert(!args->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
+ assert(!args->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
- fgArgTabEntryPtr curArgTabEntry = gtArgEntryByNode(tree, args);
- assert(curArgTabEntry);
+ fgArgTabEntryPtr curArgTabEntry = gtArgEntryByNode(tree, args);
+ assert(curArgTabEntry);
- regNumber regNum = curArgTabEntry->regNum; // first register use to pass this argument
- unsigned numSlots = curArgTabEntry->numSlots; // number of outgoing arg stack slots used by this argument
+ regNumber regNum = curArgTabEntry->regNum; // first register use to pass this argument
+ unsigned numSlots =
+ curArgTabEntry->numSlots; // number of outgoing arg stack slots used by this argument
- rpPredictReg argPredictReg;
- regMaskTP avoidReg = RBM_NONE;
+ rpPredictReg argPredictReg;
+ regMaskTP avoidReg = RBM_NONE;
- if (regNum != REG_STK)
- {
- argPredictReg = rpGetPredictForReg(regNum);
- curArgMask |= genRegMask(regNum);
- }
- else
- {
- assert(numSlots> 0);
- argPredictReg = PREDICT_NONE;
+ if (regNum != REG_STK)
+ {
+ argPredictReg = rpGetPredictForReg(regNum);
+ curArgMask |= genRegMask(regNum);
+ }
+ else
+ {
+ assert(numSlots > 0);
+ argPredictReg = PREDICT_NONE;
#ifdef _TARGET_ARM_
- // Force the predictor to choose a low register when regNum is REG_STK to reduce code bloat
- avoidReg = (RBM_R12|RBM_LR);
+ // Force the predictor to choose a low register when regNum is REG_STK to reduce code bloat
+ avoidReg = (RBM_R12 | RBM_LR);
#endif
- }
+ }
#ifdef _TARGET_ARM_
- // For TYP_LONG or TYP_DOUBLE register arguments we need to add the second argument register
- //
- if ((regNum != REG_STK) && ((args->TypeGet() == TYP_LONG) || (args->TypeGet() == TYP_DOUBLE)))
- {
- // 64-bit longs and doubles require 2 consecutive argument registers
- curArgMask |= genRegMask(REG_NEXT(regNum));
- }
- else if (args->TypeGet() == TYP_STRUCT)
- {
- GenTreePtr argx = args;
- GenTreePtr lclVarTree = NULL;
-
- /* The GT_OBJ may be be a child of a GT_COMMA */
- while (argx->gtOper == GT_COMMA)
+ // For TYP_LONG or TYP_DOUBLE register arguments we need to add the second argument register
+ //
+ if ((regNum != REG_STK) && ((args->TypeGet() == TYP_LONG) || (args->TypeGet() == TYP_DOUBLE)))
{
- argx = argx->gtOp.gtOp2;
+ // 64-bit longs and doubles require 2 consecutive argument registers
+ curArgMask |= genRegMask(REG_NEXT(regNum));
}
- unsigned originalSize = 0;
+ else if (args->TypeGet() == TYP_STRUCT)
+ {
+ GenTreePtr argx = args;
+ GenTreePtr lclVarTree = NULL;
+
+ /* The GT_OBJ may be be a child of a GT_COMMA */
+ while (argx->gtOper == GT_COMMA)
+ {
+ argx = argx->gtOp.gtOp2;
+ }
+ unsigned originalSize = 0;
- if (argx->gtOper == GT_OBJ)
- {
- originalSize = info.compCompHnd->getClassSize(argx->gtObj.gtClass);
+ if (argx->gtOper == GT_OBJ)
+ {
+ originalSize = info.compCompHnd->getClassSize(argx->gtObj.gtClass);
- // Is it the address of a promoted struct local?
- if (argx->gtObj.gtOp1->gtOper == GT_ADDR &&
- argx->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ // Is it the address of a promoted struct local?
+ if (argx->gtObj.gtOp1->gtOper == GT_ADDR && argx->gtObj.gtOp1->gtOp.gtOp1->gtOper == GT_LCL_VAR)
+ {
+ lclVarTree = argx->gtObj.gtOp1->gtOp.gtOp1;
+ LclVarDsc* varDsc = &lvaTable[lclVarTree->gtLclVarCommon.gtLclNum];
+ if (varDsc->lvPromoted)
+ promotedStructLocal = varDsc;
+ }
+ }
+ else if (argx->gtOper == GT_LCL_VAR)
{
- lclVarTree = argx->gtObj.gtOp1->gtOp.gtOp1;
- LclVarDsc* varDsc = &lvaTable[lclVarTree->gtLclVarCommon.gtLclNum];
+ varDsc = lvaTable + argx->gtLclVarCommon.gtLclNum;
+ originalSize = varDsc->lvSize();
+
+ // Is it a promoted struct local?
if (varDsc->lvPromoted)
promotedStructLocal = varDsc;
}
- }
- else if (argx->gtOper == GT_LCL_VAR)
- {
- varDsc = lvaTable + argx->gtLclVarCommon.gtLclNum;
- originalSize = varDsc->lvSize();
+ else if (argx->gtOper == GT_MKREFANY)
+ {
+ originalSize = 2 * TARGET_POINTER_SIZE;
+ }
+ else
+ {
+ noway_assert(!"Can't predict unsupported TYP_STRUCT arg kind");
+ }
- // Is it a promoted struct local?
- if (varDsc->lvPromoted)
- promotedStructLocal = varDsc;
- }
- else if (argx->gtOper == GT_MKREFANY)
- {
- originalSize = 2 * TARGET_POINTER_SIZE;
- }
- else
- {
- noway_assert(!"Can't predict unsupported TYP_STRUCT arg kind");
- }
+ // We only pass arguments differently if it a struct local "independently" promoted, which
+ // allows the field locals can be independently enregistered.
+ if (promotedStructLocal != NULL)
+ {
+ if (lvaGetPromotionType(promotedStructLocal) != PROMOTION_TYPE_INDEPENDENT)
+ promotedStructLocal = NULL;
+ }
- // We only pass arguments differently if it a struct local "independently" promoted, which
- // allows the field locals can be independently enregistered.
- if (promotedStructLocal != NULL)
- {
- if (lvaGetPromotionType(promotedStructLocal) != PROMOTION_TYPE_INDEPENDENT)
- promotedStructLocal = NULL;
- }
-
- unsigned slots = ((unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE))) / REGSIZE_BYTES;
+ unsigned slots = ((unsigned)(roundUp(originalSize, TARGET_POINTER_SIZE))) / REGSIZE_BYTES;
- // Are we passing a TYP_STRUCT in multiple integer registers?
- // if so set up curArgMask to reflect this
- // Also slots is updated to reflect the number of outgoing arg slots that we will write
- if (regNum != REG_STK)
- {
- regNumber regLast = (curArgTabEntry->isHfaRegArg) ? LAST_FP_ARGREG : REG_ARG_LAST;
- assert(genIsValidReg(regNum));
- regNumber nextReg = REG_NEXT(regNum);
- slots--;
- while (slots > 0 && nextReg <= regLast)
+ // Are we passing a TYP_STRUCT in multiple integer registers?
+ // if so set up curArgMask to reflect this
+ // Also slots is updated to reflect the number of outgoing arg slots that we will write
+ if (regNum != REG_STK)
{
- curArgMask |= genRegMask(nextReg);
- nextReg = REG_NEXT(nextReg);
+ regNumber regLast = (curArgTabEntry->isHfaRegArg) ? LAST_FP_ARGREG : REG_ARG_LAST;
+ assert(genIsValidReg(regNum));
+ regNumber nextReg = REG_NEXT(regNum);
slots--;
+ while (slots > 0 && nextReg <= regLast)
+ {
+ curArgMask |= genRegMask(nextReg);
+ nextReg = REG_NEXT(nextReg);
+ slots--;
+ }
}
- }
-
- if ((promotedStructLocal != NULL) && (curArgMask != RBM_NONE))
- {
- // All or a portion of this struct will be placed in the argument registers indicated by
- // "curArgMask". We build in knowledge of the order in which the code is generated here, so
- // that the second arg to be evaluated interferes with the reg for the first, the third with
- // the regs for the first and second, etc. But since we always place the stack slots before
- // placing the register slots we do not add inteferences for any part of the struct that gets
- // passed on the stack.
- argPredictReg = PREDICT_NONE; // We will target the indivual fields into registers but not the whole struct
- regMaskTP prevArgMask = RBM_NONE;
- for (unsigned i = 0; i < promotedStructLocal->lvFieldCnt; i++)
+ if ((promotedStructLocal != NULL) && (curArgMask != RBM_NONE))
{
- LclVarDsc* fieldVarDsc = &lvaTable[promotedStructLocal->lvFieldLclStart + i];
- if (fieldVarDsc->lvTracked)
+ // All or a portion of this struct will be placed in the argument registers indicated by
+ // "curArgMask". We build in knowledge of the order in which the code is generated here, so
+ // that the second arg to be evaluated interferes with the reg for the first, the third with
+ // the regs for the first and second, etc. But since we always place the stack slots before
+ // placing the register slots we do not add inteferences for any part of the struct that gets
+ // passed on the stack.
+
+ argPredictReg =
+ PREDICT_NONE; // We will target the indivual fields into registers but not the whole struct
+ regMaskTP prevArgMask = RBM_NONE;
+ for (unsigned i = 0; i < promotedStructLocal->lvFieldCnt; i++)
{
- assert (lclVarTree != NULL);
- if (prevArgMask != RBM_NONE)
+ LclVarDsc* fieldVarDsc = &lvaTable[promotedStructLocal->lvFieldLclStart + i];
+ if (fieldVarDsc->lvTracked)
{
- rpRecordRegIntf(prevArgMask, VarSetOps::MakeSingleton(this, fieldVarDsc->lvVarIndex)
- DEBUGARG("fieldVar/argReg"));
+ assert(lclVarTree != NULL);
+ if (prevArgMask != RBM_NONE)
+ {
+ rpRecordRegIntf(prevArgMask, VarSetOps::MakeSingleton(this, fieldVarDsc->lvVarIndex)
+ DEBUGARG("fieldVar/argReg"));
+ }
+ }
+ // Now see many registers this uses up.
+ unsigned firstRegOffset = fieldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
+ unsigned nextAfterLastRegOffset =
+ (fieldVarDsc->lvFldOffset + fieldVarDsc->lvExactSize + TARGET_POINTER_SIZE - 1) /
+ TARGET_POINTER_SIZE;
+ unsigned nextAfterLastArgRegOffset =
+ min(nextAfterLastRegOffset,
+ genIsValidIntReg(regNum) ? REG_NEXT(REG_ARG_LAST) : REG_NEXT(LAST_FP_ARGREG));
+
+ for (unsigned regOffset = firstRegOffset; regOffset < nextAfterLastArgRegOffset;
+ regOffset++)
+ {
+ prevArgMask |= genRegMask(regNumber(regNum + regOffset));
}
- }
- // Now see many registers this uses up.
- unsigned firstRegOffset = fieldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
- unsigned nextAfterLastRegOffset = (fieldVarDsc->lvFldOffset + fieldVarDsc->lvExactSize + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE;
- unsigned nextAfterLastArgRegOffset = min(nextAfterLastRegOffset, genIsValidIntReg(regNum) ? REG_NEXT(REG_ARG_LAST) : REG_NEXT(LAST_FP_ARGREG));
-
- for (unsigned regOffset = firstRegOffset; regOffset < nextAfterLastArgRegOffset; regOffset++)
- {
- prevArgMask |= genRegMask(regNumber(regNum + regOffset));
- }
- if (nextAfterLastRegOffset > nextAfterLastArgRegOffset)
- {
- break;
- }
+ if (nextAfterLastRegOffset > nextAfterLastArgRegOffset)
+ {
+ break;
+ }
- if ((fieldVarDsc->lvFldOffset % TARGET_POINTER_SIZE) == 0)
- {
- // Add the argument register used here as a preferred register for this fieldVarDsc
- //
- regNumber firstRegUsed = regNumber(regNum + firstRegOffset);
- fieldVarDsc->setPrefReg(firstRegUsed, this);
+ if ((fieldVarDsc->lvFldOffset % TARGET_POINTER_SIZE) == 0)
+ {
+ // Add the argument register used here as a preferred register for this fieldVarDsc
+ //
+ regNumber firstRegUsed = regNumber(regNum + firstRegOffset);
+ fieldVarDsc->setPrefReg(firstRegUsed, this);
+ }
}
+ compUpdateLifeVar</*ForCodeGen*/ false>(argx);
}
- compUpdateLifeVar</*ForCodeGen*/false>(argx);
- }
- // If slots is greater than zero then part or all of this TYP_STRUCT
- // argument is passed in the outgoing argument area. (except HFA arg)
- //
- if ((slots > 0) && !curArgTabEntry->isHfaRegArg)
- {
- // We will need a register to address the TYP_STRUCT
- // Note that we can use an argument register in curArgMask as in
- // codegen we pass the stack portion of the argument before we
- // setup the register part.
+ // If slots is greater than zero then part or all of this TYP_STRUCT
+ // argument is passed in the outgoing argument area. (except HFA arg)
//
+ if ((slots > 0) && !curArgTabEntry->isHfaRegArg)
+ {
+ // We will need a register to address the TYP_STRUCT
+ // Note that we can use an argument register in curArgMask as in
+ // codegen we pass the stack portion of the argument before we
+ // setup the register part.
+ //
- // Force the predictor to choose a LOW_REG here to reduce code bloat
- avoidReg = (RBM_R12|RBM_LR);
+ // Force the predictor to choose a LOW_REG here to reduce code bloat
+ avoidReg = (RBM_R12 | RBM_LR);
- assert(tmpMask == RBM_NONE);
- tmpMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | avoidReg);
+ assert(tmpMask == RBM_NONE);
+ tmpMask = rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | avoidReg);
- // If slots > 1 then we will need a second register to perform the load/store into the outgoing arg area
- if (slots > 1)
- {
- tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | tmpMask | avoidReg);
+ // If slots > 1 then we will need a second register to perform the load/store into the outgoing
+ // arg area
+ if (slots > 1)
+ {
+ tmpMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG,
+ lockedRegs | regArgMask | tmpMask | avoidReg);
+ }
}
+ } // (args->TypeGet() == TYP_STRUCT)
+#endif // _TARGET_ARM_
+ // If we have a promotedStructLocal we don't need to call rpPredictTreeRegUse(args, ...
+ // as we have already calculated the correct tmpMask and curArgMask values and
+ // by calling rpPredictTreeRegUse we would just add unnecessary register inteferences.
+ //
+ if (promotedStructLocal == NULL)
+ {
+ /* Target the appropriate argument register */
+ tmpMask |= rpPredictTreeRegUse(args, argPredictReg, lockedRegs | regArgMask, RBM_LASTUSE);
}
- } // (args->TypeGet() == TYP_STRUCT)
-#endif // _TARGET_ARM_
-
- // If we have a promotedStructLocal we don't need to call rpPredictTreeRegUse(args, ...
- // as we have already calculated the correct tmpMask and curArgMask values and
- // by calling rpPredictTreeRegUse we would just add unnecessary register inteferences.
- //
- if (promotedStructLocal == NULL)
- {
- /* Target the appropriate argument register */
- tmpMask |= rpPredictTreeRegUse(args, argPredictReg, lockedRegs | regArgMask, RBM_LASTUSE);
- }
- // We mark OBJ(ADDR(LOCAL)) with GTF_VAR_DEATH since the local is required to live
- // for the duration of the OBJ.
- if (args->OperGet() == GT_OBJ && (args->gtFlags & GTF_VAR_DEATH))
- {
- GenTreePtr lclVarTree = fgIsIndirOfAddrOfLocal(args);
- assert(lclVarTree != NULL); // Or else would not be marked with GTF_VAR_DEATH.
- compUpdateLifeVar</*ForCodeGen*/false>(lclVarTree);
- }
+ // We mark OBJ(ADDR(LOCAL)) with GTF_VAR_DEATH since the local is required to live
+ // for the duration of the OBJ.
+ if (args->OperGet() == GT_OBJ && (args->gtFlags & GTF_VAR_DEATH))
+ {
+ GenTreePtr lclVarTree = fgIsIndirOfAddrOfLocal(args);
+ assert(lclVarTree != NULL); // Or else would not be marked with GTF_VAR_DEATH.
+ compUpdateLifeVar</*ForCodeGen*/ false>(lclVarTree);
+ }
- regArgMask |= curArgMask;
- args->gtUsedRegs |= (tmpMask | regArgMask);
- tree->gtUsedRegs |= args->gtUsedRegs;
- tree->gtCall.gtCallLateArgs->gtUsedRegs |= args->gtUsedRegs;
+ regArgMask |= curArgMask;
+ args->gtUsedRegs |= (tmpMask | regArgMask);
+ tree->gtUsedRegs |= args->gtUsedRegs;
+ tree->gtCall.gtCallLateArgs->gtUsedRegs |= args->gtUsedRegs;
- if (args->gtUsedRegs != RBM_NONE)
- {
- // Add register interference with the set of registers used or in use when we evaluated
- // the current arg, with whatever is alive after the current arg
- //
- rpRecordRegIntf(args->gtUsedRegs, compCurLife
- DEBUGARG("register arg setup"));
+ if (args->gtUsedRegs != RBM_NONE)
+ {
+ // Add register interference with the set of registers used or in use when we evaluated
+ // the current arg, with whatever is alive after the current arg
+ //
+ rpRecordRegIntf(args->gtUsedRegs, compCurLife DEBUGARG("register arg setup"));
+ }
+ VarSetOps::Assign(this, rpUseInPlace, startArgUseInPlaceVars);
}
- VarSetOps::Assign(this, rpUseInPlace, startArgUseInPlaceVars);
- }
- assert(list == NULL);
+ assert(list == NULL);
- regMaskTP callAddrMask;
- callAddrMask = RBM_NONE;
+ regMaskTP callAddrMask;
+ callAddrMask = RBM_NONE;
#if CPU_LOAD_STORE_ARCH
- predictReg = PREDICT_SCRATCH_REG;
+ predictReg = PREDICT_SCRATCH_REG;
#else
- predictReg = PREDICT_NONE;
+ predictReg = PREDICT_NONE;
#endif
- switch (tree->gtFlags & GTF_CALL_VIRT_KIND_MASK)
- {
- case GTF_CALL_VIRT_STUB:
+ switch (tree->gtFlags & GTF_CALL_VIRT_KIND_MASK)
+ {
+ case GTF_CALL_VIRT_STUB:
- // We only want to record an interference between the virtual stub
- // param reg and anything that's live AFTER the call, but we've not
- // yet processed the indirect target. So add RBM_VIRTUAL_STUB_PARAM
- // to interferingRegs.
- interferingRegs |= RBM_VIRTUAL_STUB_PARAM;
+ // We only want to record an interference between the virtual stub
+ // param reg and anything that's live AFTER the call, but we've not
+ // yet processed the indirect target. So add RBM_VIRTUAL_STUB_PARAM
+ // to interferingRegs.
+ interferingRegs |= RBM_VIRTUAL_STUB_PARAM;
#ifdef DEBUG
- if (verbose)
- printf("Adding interference with Virtual Stub Param\n");
+ if (verbose)
+ printf("Adding interference with Virtual Stub Param\n");
#endif
- codeGen->regSet.rsSetRegsModified(RBM_VIRTUAL_STUB_PARAM);
+ codeGen->regSet.rsSetRegsModified(RBM_VIRTUAL_STUB_PARAM);
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
- predictReg = PREDICT_REG_VIRTUAL_STUB_PARAM;
- }
- break;
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
+ {
+ predictReg = PREDICT_REG_VIRTUAL_STUB_PARAM;
+ }
+ break;
- case GTF_CALL_VIRT_VTABLE:
- predictReg = PREDICT_SCRATCH_REG;
- break;
+ case GTF_CALL_VIRT_VTABLE:
+ predictReg = PREDICT_SCRATCH_REG;
+ break;
- case GTF_CALL_NONVIRT:
- predictReg = PREDICT_SCRATCH_REG;
- break;
- }
+ case GTF_CALL_NONVIRT:
+ predictReg = PREDICT_SCRATCH_REG;
+ break;
+ }
- if (tree->gtCall.gtCallType == CT_INDIRECT)
- {
-#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
- if (tree->gtCall.gtCallCookie)
+ if (tree->gtCall.gtCallType == CT_INDIRECT)
{
- codeGen->regSet.rsSetRegsModified(RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
+#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
+ if (tree->gtCall.gtCallCookie)
+ {
+ codeGen->regSet.rsSetRegsModified(RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
- callAddrMask |= rpPredictTreeRegUse(tree->gtCall.gtCallCookie, PREDICT_REG_PINVOKE_COOKIE_PARAM,
- lockedRegs | regArgMask, RBM_LASTUSE);
+ callAddrMask |= rpPredictTreeRegUse(tree->gtCall.gtCallCookie, PREDICT_REG_PINVOKE_COOKIE_PARAM,
+ lockedRegs | regArgMask, RBM_LASTUSE);
- // Just in case we predict some other registers, force interference with our two special
- // parameters: PINVOKE_COOKIE_PARAM & PINVOKE_TARGET_PARAM
- callAddrMask |= (RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
-
- predictReg = PREDICT_REG_PINVOKE_TARGET_PARAM;
- }
+ // Just in case we predict some other registers, force interference with our two special
+ // parameters: PINVOKE_COOKIE_PARAM & PINVOKE_TARGET_PARAM
+ callAddrMask |= (RBM_PINVOKE_COOKIE_PARAM | RBM_PINVOKE_TARGET_PARAM);
+
+ predictReg = PREDICT_REG_PINVOKE_TARGET_PARAM;
+ }
#endif
- callAddrMask |= rpPredictTreeRegUse(tree->gtCall.gtCallAddr, predictReg,
- lockedRegs | regArgMask, RBM_LASTUSE);
- }
- else if (predictReg != PREDICT_NONE)
- {
- callAddrMask |= rpPredictRegPick(TYP_I_IMPL, predictReg,
- lockedRegs | regArgMask);
- }
+ callAddrMask |=
+ rpPredictTreeRegUse(tree->gtCall.gtCallAddr, predictReg, lockedRegs | regArgMask, RBM_LASTUSE);
+ }
+ else if (predictReg != PREDICT_NONE)
+ {
+ callAddrMask |= rpPredictRegPick(TYP_I_IMPL, predictReg, lockedRegs | regArgMask);
+ }
- if (tree->gtFlags & GTF_CALL_UNMANAGED)
- {
- // Need a register for tcbReg
- callAddrMask |= rpPredictRegPick(TYP_I_IMPL, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | callAddrMask);
+ if (tree->gtFlags & GTF_CALL_UNMANAGED)
+ {
+ // Need a register for tcbReg
+ callAddrMask |=
+ rpPredictRegPick(TYP_I_IMPL, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | callAddrMask);
#if CPU_LOAD_STORE_ARCH
- // Need an extra register for tmpReg
- callAddrMask |= rpPredictRegPick(TYP_I_IMPL, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | callAddrMask);
+ // Need an extra register for tmpReg
+ callAddrMask |=
+ rpPredictRegPick(TYP_I_IMPL, PREDICT_SCRATCH_REG, lockedRegs | regArgMask | callAddrMask);
#endif
- }
+ }
- tree->gtUsedRegs |= callAddrMask;
+ tree->gtUsedRegs |= callAddrMask;
- /* After the call restore the orginal value of lockedRegs */
- lockedRegs |= keepMask;
+ /* After the call restore the orginal value of lockedRegs */
+ lockedRegs |= keepMask;
- /* set the return register */
- regMask = genReturnRegForTree(tree);
+ /* set the return register */
+ regMask = genReturnRegForTree(tree);
- if (regMask & rsvdRegs)
- {
- // We will need to relocate the return register value
- regMaskTP intRegMask = (regMask & RBM_ALLINT);
+ if (regMask & rsvdRegs)
+ {
+ // We will need to relocate the return register value
+ regMaskTP intRegMask = (regMask & RBM_ALLINT);
#if FEATURE_FP_REGALLOC
- regMaskTP floatRegMask = (regMask & RBM_ALLFLOAT);
+ regMaskTP floatRegMask = (regMask & RBM_ALLFLOAT);
#endif
- regMask = RBM_NONE;
+ regMask = RBM_NONE;
- if (intRegMask)
- {
- if (intRegMask == RBM_INTRET)
- {
- regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
- }
- else if (intRegMask == RBM_LNGRET)
- {
- regMask |= rpPredictRegPick(TYP_LONG, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
- }
- else
+ if (intRegMask)
{
- noway_assert(!"unexpected return regMask");
+ if (intRegMask == RBM_INTRET)
+ {
+ regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
+ }
+ else if (intRegMask == RBM_LNGRET)
+ {
+ regMask |= rpPredictRegPick(TYP_LONG, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
+ }
+ else
+ {
+ noway_assert(!"unexpected return regMask");
+ }
}
- }
#if FEATURE_FP_REGALLOC
- if (floatRegMask)
- {
- if (floatRegMask == RBM_FLOATRET)
+ if (floatRegMask)
{
- regMask |= rpPredictRegPick(TYP_FLOAT, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
- }
- else if (floatRegMask == RBM_DOUBLERET)
- {
- regMask |= rpPredictRegPick(TYP_DOUBLE, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
- }
- else // HFA return case
- {
- for (unsigned f=0; f<genCountBits(floatRegMask); f++)
+ if (floatRegMask == RBM_FLOATRET)
{
regMask |= rpPredictRegPick(TYP_FLOAT, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
}
+ else if (floatRegMask == RBM_DOUBLERET)
+ {
+ regMask |= rpPredictRegPick(TYP_DOUBLE, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
+ }
+ else // HFA return case
+ {
+ for (unsigned f = 0; f < genCountBits(floatRegMask); f++)
+ {
+ regMask |= rpPredictRegPick(TYP_FLOAT, PREDICT_SCRATCH_REG, rsvdRegs | regMask);
+ }
+ }
}
- }
#endif
- }
+ }
- /* the return registers (if any) are killed */
- tree->gtUsedRegs |= regMask;
+ /* the return registers (if any) are killed */
+ tree->gtUsedRegs |= regMask;
#if GTF_CALL_REG_SAVE
- if (!(tree->gtFlags & GTF_CALL_REG_SAVE))
+ if (!(tree->gtFlags & GTF_CALL_REG_SAVE))
#endif
- {
- /* the RBM_CALLEE_TRASH set are killed (i.e. EAX,ECX,EDX) */
- tree->gtUsedRegs |= RBM_CALLEE_TRASH;
- }
+ {
+ /* the RBM_CALLEE_TRASH set are killed (i.e. EAX,ECX,EDX) */
+ tree->gtUsedRegs |= RBM_CALLEE_TRASH;
+ }
}
#if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
- // Mark required registers for emitting tailcall profiler callback as used
- if (compIsProfilerHookNeeded() &&
- tree->gtCall.IsTailCall() &&
- (tree->gtCall.gtCallType == CT_USER_FUNC))
- {
- tree->gtUsedRegs |= RBM_PROFILER_TAIL_USED;
- }
+ // Mark required registers for emitting tailcall profiler callback as used
+ if (compIsProfilerHookNeeded() && tree->gtCall.IsTailCall() && (tree->gtCall.gtCallType == CT_USER_FUNC))
+ {
+ tree->gtUsedRegs |= RBM_PROFILER_TAIL_USED;
+ }
#endif
- break;
+ break;
- case GT_ARR_ELEM:
+ case GT_ARR_ELEM:
- // Figure out which registers can't be touched
- unsigned dim;
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- rsvdRegs |= tree->gtArrElem.gtArrInds[dim]->gtRsvdRegs;
+ // Figure out which registers can't be touched
+ unsigned dim;
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ rsvdRegs |= tree->gtArrElem.gtArrInds[dim]->gtRsvdRegs;
- regMask = rpPredictTreeRegUse(tree->gtArrElem.gtArrObj, PREDICT_REG, lockedRegs, rsvdRegs);
+ regMask = rpPredictTreeRegUse(tree->gtArrElem.gtArrObj, PREDICT_REG, lockedRegs, rsvdRegs);
- regMaskTP dimsMask; dimsMask = 0;
+ regMaskTP dimsMask;
+ dimsMask = 0;
#if CPU_LOAD_STORE_ARCH
- // We need a register to load the bounds of the MD array
- regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs|regMask);
+ // We need a register to load the bounds of the MD array
+ regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regMask);
#endif
- for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
- {
- /* We need scratch registers to compute index-lower_bound.
- Also, gtArrInds[0]'s register will be used as the second
- addressability register (besides gtArrObj's) */
+ for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++)
+ {
+ /* We need scratch registers to compute index-lower_bound.
+ Also, gtArrInds[0]'s register will be used as the second
+ addressability register (besides gtArrObj's) */
- regMaskTP dimMask = rpPredictTreeRegUse(tree->gtArrElem.gtArrInds[dim],
- PREDICT_SCRATCH_REG, lockedRegs|regMask|dimsMask, rsvdRegs);
- if (dim == 0)
- regMask |= dimMask;
+ regMaskTP dimMask = rpPredictTreeRegUse(tree->gtArrElem.gtArrInds[dim], PREDICT_SCRATCH_REG,
+ lockedRegs | regMask | dimsMask, rsvdRegs);
+ if (dim == 0)
+ regMask |= dimMask;
- dimsMask |= dimMask;
- }
+ dimsMask |= dimMask;
+ }
#ifdef _TARGET_XARCH_
- // INS_imul doesnt have an immediate constant.
- if (!jitIsScaleIndexMul(tree->gtArrElem.gtArrElemSize))
- regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs|regMask|dimsMask);
+ // INS_imul doesnt have an immediate constant.
+ if (!jitIsScaleIndexMul(tree->gtArrElem.gtArrElemSize))
+ regMask |= rpPredictRegPick(TYP_INT, PREDICT_SCRATCH_REG, lockedRegs | regMask | dimsMask);
#endif
- tree->gtUsedRegs = (regMaskSmall)(regMask | dimsMask);
- break;
+ tree->gtUsedRegs = (regMaskSmall)(regMask | dimsMask);
+ break;
- case GT_CMPXCHG:
+ case GT_CMPXCHG:
{
#ifdef _TARGET_XARCH_
rsvdRegs |= RBM_EAX;
#endif
if (tree->gtCmpXchg.gtOpLocation->OperGet() == GT_LCL_VAR)
{
- regMask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpLocation, PREDICT_REG, lockedRegs, rsvdRegs);
+ regMask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpLocation, PREDICT_REG, lockedRegs, rsvdRegs);
}
else
{
regMask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpLocation, PREDICT_ADDR, lockedRegs, rsvdRegs);
}
- op2Mask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpValue, PREDICT_REG, lockedRegs, rsvdRegs|regMask);
+ op2Mask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpValue, PREDICT_REG, lockedRegs, rsvdRegs | regMask);
#ifdef _TARGET_XARCH_
rsvdRegs &= ~RBM_EAX;
tmpMask = rpPredictTreeRegUse(tree->gtCmpXchg.gtOpComparand, PREDICT_REG_EAX, lockedRegs,
- rsvdRegs|regMask|op2Mask);
+ rsvdRegs | regMask | op2Mask);
tree->gtUsedRegs = (regMaskSmall)(RBM_EAX | regMask | op2Mask | tmpMask);
- predictReg = PREDICT_REG_EAX; //When this is done the result is always in EAX.
+ predictReg = PREDICT_REG_EAX; // When this is done the result is always in EAX.
#else
- tmpMask = 0;
- tree->gtUsedRegs = (regMaskSmall) (regMask | op2Mask | tmpMask);
+ tmpMask = 0;
+ tree->gtUsedRegs = (regMaskSmall)(regMask | op2Mask | tmpMask);
#endif
}
break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
{
regMaskTP opArrLenRsvd = rsvdRegs | tree->gtBoundsChk.gtIndex->gtRsvdRegs;
regMask = rpPredictTreeRegUse(tree->gtBoundsChk.gtArrLen, PREDICT_REG, lockedRegs, opArrLenRsvd);
rpPredictTreeRegUse(tree->gtBoundsChk.gtIndex, PREDICT_REG, lockedRegs | regMask, RBM_LASTUSE);
- tree->gtUsedRegs = (regMaskSmall)regMask
- | tree->gtBoundsChk.gtArrLen->gtUsedRegs
- | tree->gtBoundsChk.gtIndex->gtUsedRegs;
+ tree->gtUsedRegs =
+ (regMaskSmall)regMask | tree->gtBoundsChk.gtArrLen->gtUsedRegs | tree->gtBoundsChk.gtIndex->gtUsedRegs;
}
break;
- default:
- NO_WAY("unexpected special operator in reg use prediction");
- break;
+ default:
+ NO_WAY("unexpected special operator in reg use prediction");
+ break;
}
RETURN_CHECK:
@@ -5066,16 +5003,14 @@ RETURN_CHECK:
/* Find the next register that needs to be spilled */
tmpMask = genFindLowestBit(spillMask);
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
- printf("Predict spill of %s before: ",
- getRegName(genRegNumFromMask(tmpMask)));
+ printf("Predict spill of %s before: ", getRegName(genRegNumFromMask(tmpMask)));
gtDispTree(tree, 0, NULL, true);
if ((tmpMask & regMask) == 0)
{
- printf("Predict reload of %s after : ",
- getRegName(genRegNumFromMask(tmpMask)));
+ printf("Predict reload of %s after : ", getRegName(genRegNumFromMask(tmpMask)));
gtDispTree(tree, 0, NULL, true);
}
}
@@ -5111,13 +5046,11 @@ RETURN_CHECK:
while (spillMask)
{
/* Get an extra register to hold it */
- regMaskTP reloadReg = rpPredictRegPick(TYP_INT, PREDICT_REG,
- lockedRegs | regMask | reloadMask);
-#ifdef DEBUG
+ regMaskTP reloadReg = rpPredictRegPick(TYP_INT, PREDICT_REG, lockedRegs | regMask | reloadMask);
+#ifdef DEBUG
if (verbose)
{
- printf("Predict reload into %s after : ",
- getRegName(genRegNumFromMask(reloadReg)));
+ printf("Predict reload into %s after : ", getRegName(genRegNumFromMask(reloadReg)));
gtDispTree(tree, 0, NULL, true);
}
#endif
@@ -5134,8 +5067,7 @@ RETURN_CHECK:
tree->gtUsedRegs |= (regMaskSmall)regMask;
}
-
- regMaskTP regUse = tree->gtUsedRegs;
+ regMaskTP regUse = tree->gtUsedRegs;
regUse |= interferingRegs;
if (!VarSetOps::IsEmpty(this, compCurLife))
@@ -5144,7 +5076,7 @@ RETURN_CHECK:
// the set of temporary registers need to evaluate the sub tree
if (regUse)
{
- rpRecordRegIntf(regUse, compCurLife DEBUGARG( "tmp use"));
+ rpRecordRegIntf(regUse, compCurLife DEBUGARG("tmp use"));
}
}
@@ -5154,19 +5086,17 @@ RETURN_CHECK:
// and the assignment target variable
if (regUse)
{
- rpRecordRegIntf(regUse, VarSetOps::MakeSingleton(this, rpAsgVarNum)
- DEBUGARG("tgt var tmp use"));
+ rpRecordRegIntf(regUse, VarSetOps::MakeSingleton(this, rpAsgVarNum) DEBUGARG("tgt var tmp use"));
}
// Add a variable interference from rpAsgVarNum (i.e. the enregistered left hand
// side of the assignment passed here using PREDICT_REG_VAR_Txx)
// to the set of currently live variables. This new interference will prevent us
- // from using the register value used here for enregistering different live variable
+ // from using the register value used here for enregistering different live variable
//
if (!VarSetOps::IsEmpty(this, compCurLife))
{
- rpRecordVarIntf(rpAsgVarNum, compCurLife
- DEBUGARG( "asg tgt live conflict"));
+ rpRecordVarIntf(rpAsgVarNum, compCurLife DEBUGARG("asg tgt live conflict"));
}
}
@@ -5179,9 +5109,8 @@ RETURN_CHECK:
*/
if (!VarSetOps::Equal(this, rpLastUseVars, oldLastUseVars) && rpAsgVarNum != -1)
{
- rpRecordVarIntf(rpAsgVarNum,
- VarSetOps::Diff(this, rpLastUseVars, oldLastUseVars)
- DEBUGARG( "asgn tgt last use conflict"));
+ rpRecordVarIntf(rpAsgVarNum, VarSetOps::Diff(this, rpLastUseVars, oldLastUseVars)
+ DEBUGARG("asgn tgt last use conflict"));
}
VarSetOps::Assign(this, rpLastUseVars, oldLastUseVars);
}
@@ -5194,18 +5123,17 @@ RETURN_CHECK:
#endif // LEGACY_BACKEND
-
/****************************************************************************/
/* Returns true when we must create an EBP frame
This is used to force most managed methods to have EBP based frames
which allows the ETW kernel stackwalker to walk the stacks of managed code
this allows the kernel to perform light weight profiling
*/
-bool Compiler::rpMustCreateEBPFrame(INDEBUG( const char ** wbReason))
+bool Compiler::rpMustCreateEBPFrame(INDEBUG(const char** wbReason))
{
- bool result = false;
+ bool result = false;
#ifdef DEBUG
- const char * reason = NULL;
+ const char* reason = nullptr;
#endif
#if ETW_EBP_FRAMED
@@ -5214,27 +5142,27 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG( const char ** wbReason))
INDEBUG(reason = "Debug Code");
result = true;
}
- if (!result && (info.compMethodInfo->ILCodeSize > DEFAULT_MAX_INLINE_SIZE))
+ if (!result && (info.compMethodInfo->ILCodeSize > DEFAULT_MAX_INLINE_SIZE))
{
INDEBUG(reason = "IL Code Size");
result = true;
}
- if (!result && (fgBBcount > 3))
+ if (!result && (fgBBcount > 3))
{
INDEBUG(reason = "BasicBlock Count");
result = true;
}
- if (!result && fgHasLoops)
+ if (!result && fgHasLoops)
{
INDEBUG(reason = "Method has Loops");
result = true;
}
- if (!result && (optCallCount >= 2))
+ if (!result && (optCallCount >= 2))
{
INDEBUG(reason = "Call Count");
result = true;
}
- if (!result && (optIndirectCallCount >= 1))
+ if (!result && (optIndirectCallCount >= 1))
{
INDEBUG(reason = "Indirect Call");
result = true;
@@ -5243,15 +5171,16 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG( const char ** wbReason))
// VM wants to identify the containing frame of an InlinedCallFrame always
// via the frame register never the stack register so we need a frame.
- if (!result && (optNativeCallCount != 0))
+ if (!result && (optNativeCallCount != 0))
{
INDEBUG(reason = "Uses PInvoke");
result = true;
}
#ifdef _TARGET_ARM64_
- // TODO-ARM64-NYI: This is temporary: force a frame pointer-based frame until genFnProlog can handle non-frame pointer frames.
- if (!result)
+ // TODO-ARM64-NYI: This is temporary: force a frame pointer-based frame until genFnProlog can handle non-frame
+ // pointer frames.
+ if (!result)
{
INDEBUG(reason = "Temporary ARM64 force frame pointer");
result = true;
@@ -5259,7 +5188,7 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG( const char ** wbReason))
#endif // _TARGET_ARM64_
#ifdef DEBUG
- if ((result == true) && (wbReason != NULL))
+ if ((result == true) && (wbReason != nullptr))
{
*wbReason = reason;
}
@@ -5283,11 +5212,11 @@ bool Compiler::rpMustCreateEBPFrame(INDEBUG( const char ** wbReason))
#ifdef _PREFAST_
#pragma warning(push)
-#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
{
- unsigned regInx;
+ unsigned regInx;
if (rpPasses <= rpPassesPessimize)
{
@@ -5326,12 +5255,10 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
if (regAvail == RBM_NONE)
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++, varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
#if FEATURE_STACK_FP_X87
if (!varDsc->IsFloatRegType())
@@ -5344,12 +5271,13 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
}
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\nCompiler::rpPredictAssignRegVars pass #%d", rpPasses);
- printf("\n Available registers = ");
- dspRegMask(regAvail); printf("\n");
+ printf("\n Available registers = ");
+ dspRegMask(regAvail);
+ printf("\n");
}
#endif
@@ -5363,7 +5291,7 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
/* Which will change the order in which we select the */
/* locals for enregistering. */
- assert(lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked variables.
+ assert(lvaTrackedFixed); // We should have already set this to prevent us from adding any new tracked variables.
// Should not be set unless optimizing
noway_assert((lvaSortAgain == false) || (opts.MinOpts() == false));
@@ -5377,12 +5305,12 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
/* Initialize the weighted count of variables that could have */
/* been enregistered but weren't */
- unsigned refCntStk = 0; // sum of ref counts for all stack based variables
- unsigned refCntEBP = 0; // sum of ref counts for EBP enregistered variables
- unsigned refCntWtdEBP = 0; // sum of wtd ref counts for EBP enregistered variables
+ unsigned refCntStk = 0; // sum of ref counts for all stack based variables
+ unsigned refCntEBP = 0; // sum of ref counts for EBP enregistered variables
+ unsigned refCntWtdEBP = 0; // sum of wtd ref counts for EBP enregistered variables
#if DOUBLE_ALIGN
- unsigned refCntStkParam ; // sum of ref counts for all stack based parameters
- unsigned refCntWtdStkDbl ; // sum of wtd ref counts for stack based doubles
+ unsigned refCntStkParam; // sum of ref counts for all stack based parameters
+ unsigned refCntWtdStkDbl; // sum of wtd ref counts for stack based doubles
#if FEATURE_STACK_FP_X87
refCntStkParam = raCntStkParamDblStackFP;
@@ -5397,7 +5325,7 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
#endif // DOUBLE_ALIGN
/* Set of registers used to enregister variables in the predition */
- regMaskTP regUsed = RBM_NONE;
+ regMaskTP regUsed = RBM_NONE;
/*-------------------------------------------------------------------------
*
@@ -5408,36 +5336,36 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
VARSET_TP VARSET_INIT_NOCOPY(unprocessedVars, VarSetOps::MakeFull(this));
unsigned FPRegVarLiveInCnt;
- FPRegVarLiveInCnt = 0; // How many enregistered doubles are live on entry to the method
+ FPRegVarLiveInCnt = 0; // How many enregistered doubles are live on entry to the method
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
for (unsigned sortNum = 0; sortNum < lvaCount; sortNum++)
{
bool notWorthy = false;
-
- unsigned varIndex;
- bool isDouble;
+
+ unsigned varIndex;
+ bool isDouble;
regMaskTP regAvailForType;
var_types regType;
- regMaskTP avoidReg;
- unsigned customVarOrderSize;
- regNumber customVarOrder[MAX_VAR_ORDER_SIZE];
+ regMaskTP avoidReg;
+ unsigned customVarOrderSize;
+ regNumber customVarOrder[MAX_VAR_ORDER_SIZE];
bool firstHalf;
regNumber saveOtherReg;
- varDsc = lvaRefSorted[sortNum];
+ varDsc = lvaRefSorted[sortNum];
-#if FEATURE_STACK_FP_X87
+#if FEATURE_STACK_FP_X87
if (varTypeIsFloating(varDsc->TypeGet()))
{
-#ifdef DEBUG
- if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+#ifdef DEBUG
+ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
// Field local of a PROMOTION_TYPE_DEPENDENT struct should not
// be en-registered.
noway_assert(!varDsc->lvRegister);
- }
-#endif
+ }
+#endif
continue;
}
#endif
@@ -5445,9 +5373,9 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
/* Check the set of invariant things that would prevent enregistration */
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
goto CANT_REG;
-
+
/* Get hold of the index and the interference mask for the variable */
varIndex = varDsc->lvVarIndex;
@@ -5456,13 +5384,13 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
// Skip the variable if it's marked as DoNotEnregister.
- if (varDsc->lvDoNotEnregister)
+ if (varDsc->lvDoNotEnregister)
goto CANT_REG;
/* TODO: For now if we have JMP all register args go to stack
* TODO: Later consider extending the life of the argument or make a copy of it */
- if (compJmpOpUsed && varDsc->lvIsRegArg)
+ if (compJmpOpUsed && varDsc->lvIsRegArg)
goto CANT_REG;
/* Skip the variable if the ref count is zero */
@@ -5472,15 +5400,15 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
/* Ignore field of PROMOTION_TYPE_DEPENDENT type of promoted struct */
- if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
goto CANT_REG;
}
/* Is the unweighted ref count too low to be interesting? */
- if (!varDsc->lvIsStructField && // We do encourage enregistering field locals.
- (varDsc->lvRefCnt <= 1))
+ if (!varDsc->lvIsStructField && // We do encourage enregistering field locals.
+ (varDsc->lvRefCnt <= 1))
{
/* Sometimes it's useful to enregister a variable with only one use */
/* arguments referenced in loops are one example */
@@ -5496,16 +5424,16 @@ regMaskTP Compiler::rpPredictAssignRegVars(regMaskTP regAvail)
goto CANT_REG;
}
-OK_TO_ENREGISTER:
+ OK_TO_ENREGISTER:
if (varTypeIsFloating(varDsc->TypeGet()))
{
- regType = varDsc->TypeGet();
+ regType = varDsc->TypeGet();
regAvailForType = regAvail & RBM_ALLFLOAT;
}
else
{
- regType = TYP_INT;
+ regType = TYP_INT;
regAvailForType = regAvail & RBM_ALLINT;
}
@@ -5514,7 +5442,7 @@ OK_TO_ENREGISTER:
if (isDouble)
{
- regAvailForType &= RBM_DBL_REGS; // We must restrict the set to the double registers
+ regAvailForType &= RBM_DBL_REGS; // We must restrict the set to the double registers
}
#endif
@@ -5523,12 +5451,12 @@ OK_TO_ENREGISTER:
goto NO_REG;
// On the pessimize passes don't even try to enregister LONGS
- if (isRegPairType(varDsc->lvType))
+ if (isRegPairType(varDsc->lvType))
{
if (rpPasses > rpPassesPessimize)
- goto NO_REG;
+ goto NO_REG;
else if (rpLostEnreg && (rpPasses == rpPassesPessimize))
- goto NO_REG;
+ goto NO_REG;
}
// Set of registers to avoid when performing register allocation
@@ -5542,17 +5470,17 @@ OK_TO_ENREGISTER:
if (raAvoidArgRegMask != 0)
{
- LclVarDsc * argDsc;
- LclVarDsc * argsEnd = lvaTable + info.compArgsCount;
+ LclVarDsc* argDsc;
+ LclVarDsc* argsEnd = lvaTable + info.compArgsCount;
for (argDsc = lvaTable; argDsc < argsEnd; argDsc++)
{
if (!argDsc->lvIsRegArg)
continue;
- bool isFloat = argDsc->IsFloatRegType();
- regNumber inArgReg = argDsc->lvArgReg;
- regMaskTP inArgBit = genRegMask(inArgReg);
+ bool isFloat = argDsc->IsFloatRegType();
+ regNumber inArgReg = argDsc->lvArgReg;
+ regMaskTP inArgBit = genRegMask(inArgReg);
// Is this inArgReg in the raAvoidArgRegMask set?
@@ -5562,11 +5490,11 @@ OK_TO_ENREGISTER:
noway_assert(argDsc->lvIsParam);
noway_assert(inArgBit & (isFloat ? RBM_FLTARG_REGS : RBM_ARG_REGS));
- unsigned locVarIndex = varDsc->lvVarIndex;
- unsigned argVarIndex = argDsc->lvVarIndex;
+ unsigned locVarIndex = varDsc->lvVarIndex;
+ unsigned argVarIndex = argDsc->lvVarIndex;
/* Does this variable interfere with the arg variable ? */
- if (VarSetOps::IsMember(this, lvaVarIntf[locVarIndex], argVarIndex))
+ if (VarSetOps::IsMember(this, lvaVarIntf[locVarIndex], argVarIndex))
{
noway_assert(VarSetOps::IsMember(this, lvaVarIntf[argVarIndex], locVarIndex));
/* Yes, so try to avoid the incoming arg reg */
@@ -5590,20 +5518,18 @@ OK_TO_ENREGISTER:
firstHalf = false;
saveOtherReg = DUMMY_INIT(REG_NA);
- for (regInx = 0;
- regInx < customVarOrderSize;
- regInx++)
+ for (regInx = 0; regInx < customVarOrderSize; regInx++)
{
- regNumber regNum = customVarOrder[regInx];
- regMaskTP regBits = genRegMask(regNum);
+ regNumber regNum = customVarOrder[regInx];
+ regMaskTP regBits = genRegMask(regNum);
/* Skip this register if it isn't available */
- if ((regAvailForType & regBits) == 0)
+ if ((regAvailForType & regBits) == 0)
continue;
/* Skip this register if it interferes with the variable */
- if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varIndex))
+ if (VarSetOps::IsMember(this, raLclRegIntf[regNum], varIndex))
continue;
if (varTypeIsFloating(regType))
@@ -5615,7 +5541,7 @@ OK_TO_ENREGISTER:
regBits |= genRegMask(regNext);
/* Skip if regNext interferes with the variable */
- if (VarSetOps::IsMember(this, raLclRegIntf[regNext], varIndex))
+ if (VarSetOps::IsMember(this, raLclRegIntf[regNext], varIndex))
continue;
}
#endif
@@ -5628,20 +5554,19 @@ OK_TO_ENREGISTER:
/* Skip this register if the weighted ref count is less than two
and we are considering a unused callee saved register */
- if (lessThanTwoRefWtd && // less than two references (weighted)
- firstUseOfReg && // first use of this register
- calleeSavedReg) // callee saved register
+ if (lessThanTwoRefWtd && // less than two references (weighted)
+ firstUseOfReg && // first use of this register
+ calleeSavedReg) // callee saved register
{
unsigned int totalRefCntWtd = varDsc->lvRefCntWtd;
// psc is abbeviation for possibleSameColor
- VARSET_TP VARSET_INIT_NOCOPY(pscVarSet, VarSetOps::Diff(this, unprocessedVars,
- lvaVarIntf[varIndex]));
-
+ VARSET_TP VARSET_INIT_NOCOPY(pscVarSet, VarSetOps::Diff(this, unprocessedVars, lvaVarIntf[varIndex]));
+
VARSET_ITER_INIT(this, pscIndexIter, pscVarSet, pscIndex);
while (pscIndexIter.NextElem(this, &pscIndex))
{
- LclVarDsc * pscVar = lvaTable + lvaTrackedToVarNum[pscIndex];
+ LclVarDsc* pscVar = lvaTable + lvaTrackedToVarNum[pscIndex];
totalRefCntWtd += pscVar->lvRefCntWtd;
if (totalRefCntWtd > (2 * BB_UNITY_WEIGHT))
break;
@@ -5650,20 +5575,19 @@ OK_TO_ENREGISTER:
if (totalRefCntWtd <= (2 * BB_UNITY_WEIGHT))
{
notWorthy = true;
- continue; // not worth spilling a callee saved register
+ continue; // not worth spilling a callee saved register
}
// otherwise we will spill this callee saved registers,
- // because its uses when combined with the uses of
+ // because its uses when combined with the uses of
// other yet to be processed candidates exceed our threshold.
// totalRefCntWtd = totalRefCntWtd;
}
-
/* Looks good - mark the variable as living in the register */
- if (isRegPairType(varDsc->lvType))
+ if (isRegPairType(varDsc->lvType))
{
- if (firstHalf == false)
+ if (firstHalf == false)
{
/* Enregister the first half of the long */
varDsc->lvRegNum = regNum;
@@ -5676,7 +5600,7 @@ OK_TO_ENREGISTER:
/* Ensure 'well-formed' register pairs */
/* (those returned by gen[Pick|Grab]RegPair) */
- if (regNum < varDsc->lvRegNum)
+ if (regNum < varDsc->lvRegNum)
{
varDsc->lvOtherReg = varDsc->lvRegNum;
varDsc->lvRegNum = regNum;
@@ -5696,12 +5620,12 @@ OK_TO_ENREGISTER:
{
varDsc->lvOtherReg = REG_NEXT(regNum);
}
-#endif
+#endif
}
if (regNum == REG_FPBASE)
{
- refCntEBP += varDsc->lvRefCnt;
+ refCntEBP += varDsc->lvRefCnt;
refCntWtdEBP += varDsc->lvRefCntWtd;
#if DOUBLE_ALIGN
if (varDsc->lvIsParam)
@@ -5770,7 +5694,7 @@ OK_TO_ENREGISTER:
goto ENREG_VAR;
}
-NO_REG:;
+ NO_REG:;
if (varDsc->lvDependReg)
{
rpLostEnreg = true;
@@ -5780,12 +5704,12 @@ NO_REG:;
{
/* Weighted count of variables that could have been enregistered but weren't */
raAddToStkPredict(varDsc->lvRefCntWtd);
-
+
if (isRegPairType(varDsc->lvType) && (varDsc->lvOtherReg == REG_STK))
raAddToStkPredict(varDsc->lvRefCntWtd);
}
-CANT_REG:;
+ CANT_REG:;
varDsc->lvRegister = false;
varDsc->lvRegNum = REG_STK;
@@ -5811,8 +5735,8 @@ CANT_REG:;
}
}
#endif
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("; ");
gtDispLclVar((unsigned)(varDsc - lvaTable));
@@ -5820,17 +5744,15 @@ CANT_REG:;
printf("T%02u", varDsc->lvVarIndex);
else
printf(" ");
- printf(" (refcnt=%2u,refwtd=%s) not enregistered",
- varDsc->lvRefCnt,
- refCntWtd2str(varDsc->lvRefCntWtd));
- if (varDsc->lvDoNotEnregister)
+ printf(" (refcnt=%2u,refwtd=%s) not enregistered", varDsc->lvRefCnt, refCntWtd2str(varDsc->lvRefCntWtd));
+ if (varDsc->lvDoNotEnregister)
printf(", do-not-enregister");
printf("\n");
}
#endif
continue;
-ENREG_VAR:;
+ ENREG_VAR:;
varDsc->lvRegister = true;
@@ -5844,19 +5766,18 @@ ENREG_VAR:;
}
}
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
printf("; ");
gtDispLclVar((unsigned)(varDsc - lvaTable));
- printf("T%02u (refcnt=%2u,refwtd=%s) predicted to be assigned to ",
- varIndex, varDsc->lvRefCnt,
+ printf("T%02u (refcnt=%2u,refwtd=%s) predicted to be assigned to ", varIndex, varDsc->lvRefCnt,
refCntWtd2str(varDsc->lvRefCntWtd));
varDsc->PrintVarReg();
#ifdef _TARGET_ARM_
if (isDouble)
{
- printf(":%s", getRegName(varDsc->lvOtherReg));
+ printf(":%s", getRegName(varDsc->lvOtherReg));
}
#endif
printf("\n");
@@ -5936,8 +5857,8 @@ ENREG_VAR:;
// We also pay 7 extra bytes for the MOV EBP,ESP,
// LEA ESP,[EBP-0x10] and the AND ESP,-8 to double align ESP
const unsigned DBL_ALIGN_SETUP_SIZE = 7;
-
- unsigned bytesUsed = refCntStk + refCntEBP - refCntStkParam + DBL_ALIGN_SETUP_SIZE;
+
+ unsigned bytesUsed = refCntStk + refCntEBP - refCntStkParam + DBL_ALIGN_SETUP_SIZE;
unsigned misaligned_weight = 4;
if (compCodeOpt() == SMALL_CODE)
@@ -5983,12 +5904,12 @@ ENREG_VAR:;
// <BUGNUM>
// VSW 346717: On P4 2 Proc XEON's, SciMark.FFT degrades if SciMark.FFT.transform_internal is
// not double aligned.
- // Here are the numbers that make this not double-aligned.
+ // Here are the numbers that make this not double-aligned.
// refCntWtdStkDbl = 0x164
// refCntWtdEBP = 0x1a4
// We think we do need to change the heuristic to be in favor of double-align.
// </BUGNUM>
-
+
if (refCntWtdEBP > refCntWtdStkDbl * 2)
{
/* It's probably better to use EBP to enregister integer variables */
@@ -6002,7 +5923,7 @@ ENREG_VAR:;
}
#ifdef DEBUG
- if (verbose)
+ if (verbose)
printf("; Predicting to create a double-aligned frame\n");
#endif
/*
@@ -6016,12 +5937,12 @@ ENREG_VAR:;
}
NO_DOUBLE_ALIGN:
-#endif // DOUBLE_ALIGN
+#endif // DOUBLE_ALIGN
- if (!codeGen->isFramePointerRequired() && !codeGen->isFrameRequired())
+ if (!codeGen->isFramePointerRequired() && !codeGen->isFrameRequired())
{
#ifdef _TARGET_XARCH_
- // clang-format off
+// clang-format off
/* If we are using EBP to enregister variables then
will we actually save bytes by setting up an EBP frame?
@@ -6048,7 +5969,7 @@ NO_DOUBLE_ALIGN:
if (refCntStk > (refCntEBP + EBP_FRAME_SETUP_SIZE))
{
- unsigned bytesSaved = refCntStk - (refCntEBP + EBP_FRAME_SETUP_SIZE);
+ unsigned bytesSaved = refCntStk - (refCntEBP + EBP_FRAME_SETUP_SIZE);
unsigned mem_access_weight = 3;
if (compCodeOpt() == SMALL_CODE)
@@ -6060,7 +5981,7 @@ NO_DOUBLE_ALIGN:
{
/* It's not be a good idea to use EBP in our predictions */
CLANG_FORMAT_COMMENT_ANCHOR;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose && (refCntEBP > 0))
printf("; Predicting that it's not worth using EBP to enregister variables\n");
#endif
@@ -6073,9 +5994,9 @@ NO_DOUBLE_ALIGN:
if ((rpFrameType == FT_NOT_SET) || (rpFrameType == FT_ESP_FRAME))
{
#ifdef DEBUG
- const char * reason;
+ const char* reason;
#endif
- if (rpMustCreateEBPCalled == false)
+ if (rpMustCreateEBPCalled == false)
{
rpMustCreateEBPCalled = true;
if (rpMustCreateEBPFrame(INDEBUG(&reason)))
@@ -6111,21 +6032,18 @@ REVERSE_EBP_ENREG:
/* variables that were enregistered in EBP become stack based variables */
raAddToStkPredict(refCntWtdEBP);
- unsigned lclNum;
+ unsigned lclNum;
/* We're going to have to undo some predicted enregistered variables */
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Is this a register variable? */
- if (varDsc->lvRegNum != REG_STK)
+ if (varDsc->lvRegNum != REG_STK)
{
if (isRegPairType(varDsc->lvType))
{
/* Only one can be EBP */
- if (varDsc->lvRegNum == REG_FPBASE ||
- varDsc->lvOtherReg == REG_FPBASE)
+ if (varDsc->lvRegNum == REG_FPBASE || varDsc->lvOtherReg == REG_FPBASE)
{
if (varDsc->lvRegNum == REG_FPBASE)
varDsc->lvRegNum = varDsc->lvOtherReg;
@@ -6156,10 +6074,10 @@ REVERSE_EBP_ENREG:
#ifdef DEBUG
if (verbose)
{
-DUMP_MSG:
- printf("; reversing enregisteration of V%02u,T%02u (refcnt=%2u,refwtd=%4u%s)\n",
- lclNum, varDsc->lvVarIndex, varDsc->lvRefCnt,
- varDsc->lvRefCntWtd/2, (varDsc->lvRefCntWtd & 1) ? ".5" : "");
+ DUMP_MSG:
+ printf("; reversing enregisteration of V%02u,T%02u (refcnt=%2u,refwtd=%4u%s)\n", lclNum,
+ varDsc->lvVarIndex, varDsc->lvRefCnt, varDsc->lvRefCntWtd / 2,
+ (varDsc->lvRefCntWtd & 1) ? ".5" : "");
}
#endif
}
@@ -6172,25 +6090,21 @@ DUMP_MSG:
EXIT:;
unsigned lclNum;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++, varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Clear the lvDependReg flag for next iteration of the predictor */
varDsc->lvDependReg = false;
// If we set rpLostEnreg and this is the first pessimize pass
// then reverse the enreg of all TYP_LONG
- if (rpLostEnreg &&
- isRegPairType(varDsc->lvType) &&
- (rpPasses == rpPassesPessimize))
+ if (rpLostEnreg && isRegPairType(varDsc->lvType) && (rpPasses == rpPassesPessimize))
{
varDsc->lvRegNum = REG_STK;
varDsc->lvOtherReg = REG_STK;
}
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose && raNewBlocks)
{
printf("\nAdded FP register killing blocks:\n");
@@ -6213,9 +6127,9 @@ EXIT:;
* at different times (not to mention in a totally different way) for x86 vs
* RISC targets.
*/
-void Compiler::rpPredictRegUse()
+void Compiler::rpPredictRegUse()
{
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
raDumpVarIntf();
#endif
@@ -6225,11 +6139,11 @@ void Compiler::rpPredictRegUse()
regMaskTP allAcceptableRegs = RBM_ALLINT;
-#if FEATURE_FP_REGALLOC
+#if FEATURE_FP_REGALLOC
allAcceptableRegs |= raConfigRestrictMaskFP();
#endif
- allAcceptableRegs &= ~codeGen->regSet.rsMaskResvd; // Remove any register reserved for special purposes
+ allAcceptableRegs &= ~codeGen->regSet.rsMaskResvd; // Remove any register reserved for special purposes
/* For debuggable code, genJumpToThrowHlpBlk() generates an inline call
to acdHelper(). This is done implicitly, without creating a GT_CALL
@@ -6276,7 +6190,7 @@ void Compiler::rpPredictRegUse()
regAvail &= ~RBM_FPBASE;
#endif
-#ifdef DEBUG
+#ifdef DEBUG
BOOL fJitNoRegLoc = JitConfig.JitNoRegLoc();
if (fJitNoRegLoc)
regAvail = RBM_NONE;
@@ -6292,21 +6206,19 @@ void Compiler::rpPredictRegUse()
// Calculate the set of all tracked FP/non-FP variables
// into optAllFloatVars and optAllNonFPvars
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Ignore the variable if it's not tracked */
- if (!varDsc->lvTracked)
+ if (!varDsc->lvTracked)
continue;
/* Get hold of the index and the interference mask for the variable */
- unsigned varNum = varDsc->lvVarIndex;
+ unsigned varNum = varDsc->lvVarIndex;
/* add to the set of all tracked FP/non-FP variables */
@@ -6330,10 +6242,10 @@ void Compiler::rpPredictRegUse()
rpPredictAssignAgain = false;
rpPasses = 0;
- bool mustPredict = true;
- unsigned stmtNum = 0;
- unsigned oldStkPredict = DUMMY_INIT(~0);
- VARSET_TP oldLclRegIntf[REG_COUNT];
+ bool mustPredict = true;
+ unsigned stmtNum = 0;
+ unsigned oldStkPredict = DUMMY_INIT(~0);
+ VARSET_TP oldLclRegIntf[REG_COUNT];
for (unsigned i = 0; i < REG_COUNT; i++)
{
@@ -6355,8 +6267,8 @@ void Compiler::rpPredictRegUse()
if ((rpPasses == 0) && (codeGen->regSet.rsMaskResvd & RBM_OPT_RSVD))
{
if (compRsvdRegCheck(REGALLOC_FRAME_LAYOUT))
- {
- // We must keep reserving R10 in this case
+ {
+ // We must keep reserving R10 in this case
codeGen->regSet.rsMaskResvd |= RBM_OPT_RSVD;
}
else
@@ -6377,8 +6289,7 @@ void Compiler::rpPredictRegUse()
/* of the caller saved registers. */
/* This fixes RAID perf bug 43440 VB Ackerman function */
- if ((rpPasses == 1) && (stmtNum <= 12) &&
- (regUsed & RBM_CALLEE_SAVED))
+ if ((rpPasses == 1) && (stmtNum <= 12) && (regUsed & RBM_CALLEE_SAVED))
{
goto EXTRA_PASS;
}
@@ -6395,26 +6306,26 @@ void Compiler::rpPredictRegUse()
noway_assert(oldStkPredict != (unsigned)DUMMY_INIT(~0));
// Be careful about overflow
- unsigned highStkPredict = (rpStkPredict*2 < rpStkPredict) ? ULONG_MAX : rpStkPredict*2;
+ unsigned highStkPredict = (rpStkPredict * 2 < rpStkPredict) ? ULONG_MAX : rpStkPredict * 2;
if (oldStkPredict < highStkPredict)
goto ALL_DONE;
if (rpStkPredict < rpPasses * 8)
goto ALL_DONE;
- if (rpPasses >= (rpPassesMax-1))
+ if (rpPasses >= (rpPassesMax - 1))
goto ALL_DONE;
}
EXTRA_PASS:
- /* We will do another pass */ ;
+ /* We will do another pass */;
}
#ifdef DEBUG
if (JitConfig.JitAssertOnMaxRAPasses())
{
noway_assert(rpPasses < rpPassesMax &&
- "This may not a bug, but dev team should look and see what is happening");
+ "This may not a bug, but dev team should look and see what is happening");
}
#endif
@@ -6426,7 +6337,6 @@ void Compiler::rpPredictRegUse()
NO_WAY("we seem to be stuck in an infinite loop. breaking out");
}
-
#ifdef DEBUG
if (verbose)
{
@@ -6439,7 +6349,7 @@ void Compiler::rpPredictRegUse()
if ((rpPasses == 1) && rpPredictAssignAgain)
printf("\n; Another pass due to rpPredictAssignAgain");
}
- printf("\n; Register predicting pass# %d\n", rpPasses+1);
+ printf("\n; Register predicting pass# %d\n", rpPasses + 1);
}
#endif
@@ -6456,28 +6366,28 @@ void Compiler::rpPredictRegUse()
assert(!opts.ShouldUsePInvokeHelpers());
noway_assert(info.compLvFrameListRoot < lvaCount);
- LclVarDsc * pinvokeVarDsc = &lvaTable[info.compLvFrameListRoot];
+ LclVarDsc* pinvokeVarDsc = &lvaTable[info.compLvFrameListRoot];
if (pinvokeVarDsc->lvTracked)
{
rpRecordRegIntf(RBM_CALLEE_TRASH, VarSetOps::MakeSingleton(this, pinvokeVarDsc->lvVarIndex)
- DEBUGARG("compLvFrameListRoot"));
+ DEBUGARG("compLvFrameListRoot"));
// We would prefer to have this be enregister in the PINVOKE_TCB register
pinvokeVarDsc->addPrefReg(RBM_PINVOKE_TCB, this);
}
- //If we're using a single return block, the p/invoke epilog code trashes ESI and EDI (in the
- //worst case). Make sure that the return value compiler temp that we create for the single
- //return block knows about this interference.
+ // If we're using a single return block, the p/invoke epilog code trashes ESI and EDI (in the
+ // worst case). Make sure that the return value compiler temp that we create for the single
+ // return block knows about this interference.
if (genReturnLocal != BAD_VAR_NUM)
{
noway_assert(genReturnBB);
- LclVarDsc * localTmp = &lvaTable[genReturnLocal];
+ LclVarDsc* localTmp = &lvaTable[genReturnLocal];
if (localTmp->lvTracked)
{
- rpRecordRegIntf(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME, VarSetOps::MakeSingleton(this, localTmp->lvVarIndex)
- DEBUGARG( "genReturnLocal"));
+ rpRecordRegIntf(RBM_PINVOKE_TCB | RBM_PINVOKE_FRAME,
+ VarSetOps::MakeSingleton(this, localTmp->lvVarIndex) DEBUGARG("genReturnLocal"));
}
}
}
@@ -6487,17 +6397,15 @@ void Compiler::rpPredictRegUse()
{
bool hasMustInitFloat = false;
- // if we have any must-init floating point LclVars then we will add register interferences
- // for the arguments with RBM_SCRATCH
+ // if we have any must-init floating point LclVars then we will add register interferences
+ // for the arguments with RBM_SCRATCH
// this is so that if we need to reset the initReg to REG_SCRATCH in Compiler::genFnProlog()
// we won't home the arguments into REG_SCRATCH
-
- unsigned lclNum;
- LclVarDsc * varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ unsigned lclNum;
+ LclVarDsc* varDsc;
+
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
if (varDsc->lvMustInit && varTypeIsFloating(varDsc->TypeGet()))
{
@@ -6508,42 +6416,35 @@ void Compiler::rpPredictRegUse()
if (hasMustInitFloat)
{
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
// If is an incoming argument, that is tracked and not floating-point
if (varDsc->lvIsParam && varDsc->lvTracked && !varTypeIsFloating(varDsc->TypeGet()))
{
rpRecordRegIntf(RBM_SCRATCH, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex)
- DEBUGARG( "arg home with must-init fp"));
+ DEBUGARG("arg home with must-init fp"));
}
}
}
}
#endif
- stmtNum = 0;
- rpAddedVarIntf = false;
- rpLostEnreg = false;
-
+ stmtNum = 0;
+ rpAddedVarIntf = false;
+ rpLostEnreg = false;
/* Walk the basic blocks and predict reg use for each tree */
- for (BasicBlock * block = fgFirstBB;
- block != NULL;
- block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != NULL; block = block->bbNext)
{
- GenTreePtr stmt;
+ GenTreePtr stmt;
compCurBB = block;
compCurLifeTree = NULL;
VarSetOps::Assign(this, compCurLife, block->bbLiveIn);
compCurBB = block;
- for (stmt = block->FirstNonPhiDef();
- stmt != NULL;
- stmt = stmt->gtNext)
+ for (stmt = block->FirstNonPhiDef(); stmt != NULL; stmt = stmt->gtNext)
{
noway_assert(stmt->gtOper == GT_STMT);
@@ -6553,11 +6454,10 @@ void Compiler::rpPredictRegUse()
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
stmtNum++;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose && 1)
{
- printf("\nRegister predicting BB%02u, stmt %d\n",
- block->bbNum, stmtNum);
+ printf("\nRegister predicting BB%02u, stmt %d\n", block->bbNum, stmtNum);
gtDispTree(tree);
printf("\n");
}
@@ -6578,7 +6478,7 @@ void Compiler::rpPredictRegUse()
if (rpAddedVarIntf)
{
mustPredict = true;
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
raDumpVarIntf();
#endif
@@ -6591,7 +6491,7 @@ void Compiler::rpPredictRegUse()
if (rpPredictAssignAgain)
mustPredict = true;
-#ifdef DEBUG
+#ifdef DEBUG
if (fJitNoRegLoc)
goto ALL_DONE;
#endif
@@ -6623,7 +6523,7 @@ void Compiler::rpPredictRegUse()
regAvail &= ~RBM_FPBASE;
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
raDumpRegIntf();
#endif
@@ -6634,7 +6534,7 @@ void Compiler::rpPredictRegUse()
VarSetOps::Assign(this, oldLclRegIntf[i], raLclRegIntf[i]);
}
oldStkPredict = rpStkPredict;
- } // end of while (true)
+ } // end of while (true)
ALL_DONE:;
@@ -6647,25 +6547,25 @@ ALL_DONE:;
switch (rpFrameType)
{
- default:
- noway_assert(!"rpFrameType not set correctly!");
- break;
- case FT_ESP_FRAME:
- noway_assert(!codeGen->isFramePointerRequired());
- noway_assert(!codeGen->isFrameRequired());
- codeGen->setFramePointerUsed(false);
- break;
- case FT_EBP_FRAME:
- noway_assert((regUsed & RBM_FPBASE) == 0);
- codeGen->setFramePointerUsed(true);
- break;
+ default:
+ noway_assert(!"rpFrameType not set correctly!");
+ break;
+ case FT_ESP_FRAME:
+ noway_assert(!codeGen->isFramePointerRequired());
+ noway_assert(!codeGen->isFrameRequired());
+ codeGen->setFramePointerUsed(false);
+ break;
+ case FT_EBP_FRAME:
+ noway_assert((regUsed & RBM_FPBASE) == 0);
+ codeGen->setFramePointerUsed(true);
+ break;
#if DOUBLE_ALIGN
- case FT_DOUBLE_ALIGN_FRAME:
- noway_assert((regUsed & RBM_FPBASE) == 0);
- noway_assert(!codeGen->isFramePointerRequired());
- codeGen->setFramePointerUsed(false);
- codeGen->setDoubleAlign(true);
- break;
+ case FT_DOUBLE_ALIGN_FRAME:
+ noway_assert((regUsed & RBM_FPBASE) == 0);
+ noway_assert(!codeGen->isFramePointerRequired());
+ codeGen->setFramePointerUsed(false);
+ codeGen->setDoubleAlign(true);
+ break;
#endif
}
@@ -6685,7 +6585,7 @@ ALL_DONE:;
raMarkStkVars();
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("# rpPasses was %u for %s\n", rpPasses, info.compFullName);
printf(" rpStkPredict was %u\n", rpStkPredict);
@@ -6702,14 +6602,12 @@ ALL_DONE:;
* (part or whole), and if so what the base is (FP or SP).
*/
-void Compiler::raMarkStkVars()
+void Compiler::raMarkStkVars()
{
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
// For RyuJIT, lvOnFrame is set by LSRA, except in the case of zero-ref, which is set below.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -6718,43 +6616,44 @@ void Compiler::raMarkStkVars()
varDsc->lvOnFrame = false;
#endif // LEGACY_BACKEND
- if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
noway_assert(!varDsc->lvRegister);
goto ON_STK;
}
-
+
/* Fully enregistered variables don't need any frame space */
- if (varDsc->lvRegister)
+ if (varDsc->lvRegister)
{
- if (!isRegPairType(varDsc->TypeGet()))
+ if (!isRegPairType(varDsc->TypeGet()))
+ {
goto NOT_STK;
+ }
/* For "large" variables make sure both halves are enregistered */
- if (varDsc->lvRegNum != REG_STK &&
- varDsc->lvOtherReg != REG_STK)
+ if (varDsc->lvRegNum != REG_STK && varDsc->lvOtherReg != REG_STK)
{
goto NOT_STK;
}
}
/* Unused variables typically don't get any frame space */
- else if (varDsc->lvRefCnt == 0)
+ else if (varDsc->lvRefCnt == 0)
{
- bool needSlot = false;
+ bool needSlot = false;
- bool stkFixedArgInVarArgs = info.compIsVarArgs &&
- varDsc->lvIsParam &&
- !varDsc->lvIsRegArg &&
- lclNum != lvaVarargsHandleArg;
+ bool stkFixedArgInVarArgs =
+ info.compIsVarArgs && varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg;
// If its address has been exposed, ignore lvRefCnt. However, exclude
// fixed arguments in varargs method as lvOnFrame shouldn't be set
// for them as we don't want to explicitly report them to GC.
if (!stkFixedArgInVarArgs)
+ {
needSlot |= varDsc->lvAddrExposed;
+ }
#if FEATURE_FIXED_OUT_ARGS
@@ -6774,7 +6673,9 @@ void Compiler::raMarkStkVars()
if (opts.compDbgCode && !varDsc->lvIsParam && varDsc->lvTracked)
{
for (unsigned scopeNum = 0; scopeNum < info.compVarScopesCount; scopeNum++)
+ {
noway_assert(info.compVarScopes[scopeNum].vsdVarNum != lclNum);
+ }
}
#endif
/*
@@ -6783,20 +6684,18 @@ void Compiler::raMarkStkVars()
So we set lvMustInit and artifically bump up the ref-cnt.
*/
- if (opts.compDbgCode && !stkFixedArgInVarArgs &&
- lclNum < info.compLocalsCount)
+ if (opts.compDbgCode && !stkFixedArgInVarArgs && lclNum < info.compLocalsCount)
{
- needSlot |= true;
+ needSlot |= true;
if (lvaTypeIsGC(lclNum))
{
- varDsc->lvRefCnt = 1;
-
+ varDsc->lvRefCnt = 1;
}
if (!varDsc->lvIsParam)
{
- varDsc->lvMustInit = true;
+ varDsc->lvMustInit = true;
}
}
#endif // DEBUGGING_SUPPORT
@@ -6823,12 +6722,10 @@ void Compiler::raMarkStkVars()
ON_STK:
/* The variable (or part of it) lives on the stack frame */
- noway_assert((varDsc->lvType != TYP_UNDEF) &&
- (varDsc->lvType != TYP_VOID) &&
- (varDsc->lvType != TYP_UNKNOWN) );
+ noway_assert((varDsc->lvType != TYP_UNDEF) && (varDsc->lvType != TYP_VOID) && (varDsc->lvType != TYP_UNKNOWN));
#if FEATURE_FIXED_OUT_ARGS
noway_assert((lclNum == lvaOutgoingArgSpaceVar) || lvaLclSize(lclNum) != 0);
-#else // FEATURE_FIXED_OUT_ARGS
+#else // FEATURE_FIXED_OUT_ARGS
noway_assert(lvaLclSize(lclNum) != 0);
#endif // FEATURE_FIXED_OUT_ARGS
@@ -6840,13 +6737,13 @@ void Compiler::raMarkStkVars()
#if DOUBLE_ALIGN
- if (codeGen->doDoubleAlign())
+ if (codeGen->doDoubleAlign())
{
noway_assert(codeGen->isFramePointerUsed() == false);
/* All arguments are off of EBP with double-aligned frames */
- if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
+ if (varDsc->lvIsParam && !varDsc->lvIsRegArg)
{
varDsc->lvFramePointerBased = true;
}
@@ -6858,18 +6755,15 @@ void Compiler::raMarkStkVars()
// It must be in a register, on frame, or have zero references.
- noway_assert( varDsc->lvIsInReg() ||
- varDsc->lvOnFrame ||
- varDsc->lvRefCnt == 0);
+ noway_assert(varDsc->lvIsInReg() || varDsc->lvOnFrame || varDsc->lvRefCnt == 0);
#ifndef LEGACY_BACKEND
// We can't have both lvRegister and lvOnFrame for RyuJIT
noway_assert(!varDsc->lvRegister || !varDsc->lvOnFrame);
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
/* If both lvRegister and lvOnFrame are set, it must be partially enregistered */
- noway_assert(!varDsc->lvRegister ||
- !varDsc->lvOnFrame ||
+ noway_assert(!varDsc->lvRegister || !varDsc->lvOnFrame ||
(varDsc->lvType == TYP_LONG && varDsc->lvOtherReg == REG_STK));
#endif // LEGACY_BACKEND
@@ -6886,11 +6780,9 @@ void Compiler::raMarkStkVars()
if (varDsc->lvIsParam && raIsVarargsStackArg(lclNum))
{
- if (!varDsc->lvPromoted && !varDsc->lvIsStructField)
- {
- noway_assert( varDsc->lvRefCnt == 0 &&
- !varDsc->lvRegister &&
- !varDsc->lvOnFrame );
+ if (!varDsc->lvPromoted && !varDsc->lvIsStructField)
+ {
+ noway_assert(varDsc->lvRefCnt == 0 && !varDsc->lvRegister && !varDsc->lvOnFrame);
}
}
#endif
@@ -6900,12 +6792,12 @@ void Compiler::raMarkStkVars()
#ifdef LEGACY_BACKEND
void Compiler::rpRecordPrediction()
{
- if ( rpBestRecordedPrediction == NULL
- || rpStkPredict < rpBestRecordedStkPredict)
+ if (rpBestRecordedPrediction == NULL || rpStkPredict < rpBestRecordedStkPredict)
{
if (rpBestRecordedPrediction == NULL)
{
- rpBestRecordedPrediction = reinterpret_cast<VarRegPrediction*>(compGetMemArrayA(lvaCount, sizeof(VarRegPrediction)));
+ rpBestRecordedPrediction =
+ reinterpret_cast<VarRegPrediction*>(compGetMemArrayA(lvaCount, sizeof(VarRegPrediction)));
}
for (unsigned k = 0; k < lvaCount; k++)
{
@@ -6920,11 +6812,12 @@ void Compiler::rpRecordPrediction()
void Compiler::rpUseRecordedPredictionIfBetter()
{
- JITDUMP("rpStkPredict is %d; previous feasible reg prediction is %d.\n", rpStkPredict, rpBestRecordedPrediction != NULL ? rpBestRecordedStkPredict : 0);
- if ( rpBestRecordedPrediction != NULL
- && rpStkPredict > rpBestRecordedStkPredict)
+ JITDUMP("rpStkPredict is %d; previous feasible reg prediction is %d.\n", rpStkPredict,
+ rpBestRecordedPrediction != NULL ? rpBestRecordedStkPredict : 0);
+ if (rpBestRecordedPrediction != NULL && rpStkPredict > rpBestRecordedStkPredict)
{
- JITDUMP("Reverting to a previously-recorded feasible reg prediction with weighted stack use count %d.\n", rpBestRecordedStkPredict);
+ JITDUMP("Reverting to a previously-recorded feasible reg prediction with weighted stack use count %d.\n",
+ rpBestRecordedStkPredict);
for (unsigned k = 0; k < lvaCount; k++)
{
diff --git a/src/jit/regalloc.h b/src/jit/regalloc.h
index f8d572dec6..7e2d7c7eb1 100644
--- a/src/jit/regalloc.h
+++ b/src/jit/regalloc.h
@@ -9,11 +9,11 @@
enum FrameType
{
- FT_NOT_SET,
- FT_ESP_FRAME,
- FT_EBP_FRAME,
+ FT_NOT_SET,
+ FT_ESP_FRAME,
+ FT_EBP_FRAME,
#if DOUBLE_ALIGN
- FT_DOUBLE_ALIGN_FRAME,
+ FT_DOUBLE_ALIGN_FRAME,
#endif
};
@@ -27,80 +27,80 @@ enum FrameType
// This enumeration specifies register restrictions for the predictor
enum rpPredictReg
{
- PREDICT_NONE, // any subtree
- PREDICT_ADDR, // subtree is left side of an assignment
- PREDICT_REG, // subtree must be any register
- PREDICT_SCRATCH_REG, // subtree must be any writable register
+ PREDICT_NONE, // any subtree
+ PREDICT_ADDR, // subtree is left side of an assignment
+ PREDICT_REG, // subtree must be any register
+ PREDICT_SCRATCH_REG, // subtree must be any writable register
#if defined(_TARGET_ARM_)
- PREDICT_PAIR_R0R1, // subtree will write R0 and R1
- PREDICT_PAIR_R2R3, // subtree will write R2 and R3
+ PREDICT_PAIR_R0R1, // subtree will write R0 and R1
+ PREDICT_PAIR_R2R3, // subtree will write R2 and R3
#elif defined(_TARGET_AMD64_)
- PREDICT_NOT_REG_EAX, // subtree must be any writable register, except EAX
- PREDICT_NOT_REG_ECX, // subtree must be any writable register, except ECX
+ PREDICT_NOT_REG_EAX, // subtree must be any writable register, except EAX
+ PREDICT_NOT_REG_ECX, // subtree must be any writable register, except ECX
#elif defined(_TARGET_X86_)
- PREDICT_NOT_REG_EAX, // subtree must be any writable register, except EAX
- PREDICT_NOT_REG_ECX, // subtree must be any writable register, except ECX
+ PREDICT_NOT_REG_EAX, // subtree must be any writable register, except EAX
+ PREDICT_NOT_REG_ECX, // subtree must be any writable register, except ECX
- PREDICT_PAIR_EAXEDX, // subtree will write EAX and EDX
- PREDICT_PAIR_ECXEBX, // subtree will write ECX and EBX
+ PREDICT_PAIR_EAXEDX, // subtree will write EAX and EDX
+ PREDICT_PAIR_ECXEBX, // subtree will write ECX and EBX
#else
#error "Unknown Target!"
-#endif // _TARGET_
+#endif // _TARGET_
-#define REGDEF(name, rnum, mask, sname) PREDICT_REG_ ## name ,
+#define REGDEF(name, rnum, mask, sname) PREDICT_REG_##name,
#include "register.h"
- // The following are use whenever we have a ASG node into a LCL_VAR that
- // we predict to be enregistered. This flags indicates that we can expect
- // to use the register that is being assigned into as the temporary to
- // compute the right side of the ASGN node.
+ // The following are use whenever we have a ASG node into a LCL_VAR that
+ // we predict to be enregistered. This flags indicates that we can expect
+ // to use the register that is being assigned into as the temporary to
+ // compute the right side of the ASGN node.
- PREDICT_REG_VAR_T00, // write the register used by tracked varable 00
- PREDICT_REG_VAR_MAX = PREDICT_REG_VAR_T00 + lclMAX_TRACKED - 1,
+ PREDICT_REG_VAR_T00, // write the register used by tracked varable 00
+ PREDICT_REG_VAR_MAX = PREDICT_REG_VAR_T00 + lclMAX_TRACKED - 1,
- PREDICT_COUNT = PREDICT_REG_VAR_T00,
+ PREDICT_COUNT = PREDICT_REG_VAR_T00,
#define REGDEF(name, rnum, mask, sname)
-#define REGALIAS(alias, realname) PREDICT_REG_ ## alias = PREDICT_REG_ ## realname ,
+#define REGALIAS(alias, realname) PREDICT_REG_##alias = PREDICT_REG_##realname,
#include "register.h"
#if defined(_TARGET_ARM_)
- PREDICT_REG_FIRST = PREDICT_REG_R0,
- PREDICT_INTRET = PREDICT_REG_R0,
- PREDICT_LNGRET = PREDICT_PAIR_R0R1,
- PREDICT_FLTRET = PREDICT_REG_F0,
+ PREDICT_REG_FIRST = PREDICT_REG_R0,
+ PREDICT_INTRET = PREDICT_REG_R0,
+ PREDICT_LNGRET = PREDICT_PAIR_R0R1,
+ PREDICT_FLTRET = PREDICT_REG_F0,
#elif defined(_TARGET_AMD64_)
- PREDICT_REG_FIRST = PREDICT_REG_RAX,
- PREDICT_INTRET = PREDICT_REG_EAX,
- PREDICT_LNGRET = PREDICT_REG_RAX,
+ PREDICT_REG_FIRST = PREDICT_REG_RAX,
+ PREDICT_INTRET = PREDICT_REG_EAX,
+ PREDICT_LNGRET = PREDICT_REG_RAX,
#elif defined(_TARGET_X86_)
- PREDICT_REG_FIRST = PREDICT_REG_EAX,
- PREDICT_INTRET = PREDICT_REG_EAX,
- PREDICT_LNGRET = PREDICT_PAIR_EAXEDX,
+ PREDICT_REG_FIRST = PREDICT_REG_EAX,
+ PREDICT_INTRET = PREDICT_REG_EAX,
+ PREDICT_LNGRET = PREDICT_PAIR_EAXEDX,
#else
#error "Unknown _TARGET_"
-#endif // _TARGET_
+#endif // _TARGET_
};
#if DOUBLE_ALIGN
enum CanDoubleAlign
{
- CANT_DOUBLE_ALIGN,
- CAN_DOUBLE_ALIGN,
- MUST_DOUBLE_ALIGN,
- COUNT_DOUBLE_ALIGN,
+ CANT_DOUBLE_ALIGN,
+ CAN_DOUBLE_ALIGN,
+ MUST_DOUBLE_ALIGN,
+ COUNT_DOUBLE_ALIGN,
DEFAULT_DOUBLE_ALIGN = CAN_DOUBLE_ALIGN
};
@@ -108,5 +108,4 @@ enum CanDoubleAlign
#endif // LEGACY_BACKEND
-#endif // REGALLOC_H_
-
+#endif // REGALLOC_H_
diff --git a/src/jit/register_arg_convention.cpp b/src/jit/register_arg_convention.cpp
index 429c585f8d..4678cdec41 100644
--- a/src/jit/register_arg_convention.cpp
+++ b/src/jit/register_arg_convention.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -15,14 +14,14 @@ unsigned InitVarDscInfo::allocRegArg(var_types type, unsigned numRegs /* = 1 */)
assert(numRegs > 0);
unsigned resultArgNum = regArgNum(type);
- bool isBackFilled = false;
+ bool isBackFilled = false;
#ifdef _TARGET_ARM_
// Check for back-filling
- if (varTypeIsFloating(type) && // We only back-fill the float registers
- !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
- (numRegs == 1) && // Is there a possibility we could back-fill?
- (fltArgSkippedRegMask != RBM_NONE)) // Is there an available back-fill slot?
+ if (varTypeIsFloating(type) && // We only back-fill the float registers
+ !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
+ (numRegs == 1) && // Is there a possibility we could back-fill?
+ (fltArgSkippedRegMask != RBM_NONE)) // Is there an available back-fill slot?
{
// We will never back-fill something greater than a single register
// (TYP_FLOAT, or TYP_STRUCT HFA with a single float). This is because
@@ -31,7 +30,7 @@ unsigned InitVarDscInfo::allocRegArg(var_types type, unsigned numRegs /* = 1 */)
// Back-fill the register
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
- fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
+ fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
resultArgNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(resultArgNum < MAX_FLOAT_REG_ARG);
isBackFilled = true;
@@ -61,10 +60,10 @@ bool InitVarDscInfo::enoughAvailRegs(var_types type, unsigned numRegs /* = 1 */)
#ifdef _TARGET_ARM_
// Check for back-filling
- if (varTypeIsFloating(type) && // We only back-fill the float registers
- !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
- (numRegs == 1) && // Is there a possibility we could back-fill?
- (fltArgSkippedRegMask != RBM_NONE)) // Is there an available back-fill slot?
+ if (varTypeIsFloating(type) && // We only back-fill the float registers
+ !anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
+ (numRegs == 1) && // Is there a possibility we could back-fill?
+ (fltArgSkippedRegMask != RBM_NONE)) // Is there an available back-fill slot?
{
backFillCount = 1;
}
@@ -79,13 +78,17 @@ unsigned InitVarDscInfo::alignReg(var_types type, unsigned requiredRegAlignment)
assert(requiredRegAlignment > 0);
if (requiredRegAlignment == 1)
- return 0; // Everything is always "1" aligned
+ {
+ return 0; // Everything is always "1" aligned
+ }
assert(requiredRegAlignment == 2); // we don't expect anything else right now
int alignMask = regArgNum(type) & (requiredRegAlignment - 1);
if (alignMask == 0)
- return 0; // We're already aligned
+ {
+ return 0; // We're already aligned
+ }
unsigned cAlignSkipped = requiredRegAlignment - alignMask;
assert(cAlignSkipped == 1); // Alignment is currently only 1 or 2, so misalignment can only be 1.
@@ -97,8 +100,8 @@ unsigned InitVarDscInfo::alignReg(var_types type, unsigned requiredRegAlignment)
}
#endif // _TARGET_ARM_
- assert(regArgNum(type) + cAlignSkipped <= maxRegArgNum(type)); // if equal, then we aligned the last slot, and the
- // arg can't be enregistered
+ assert(regArgNum(type) + cAlignSkipped <= maxRegArgNum(type)); // if equal, then we aligned the last slot, and the
+ // arg can't be enregistered
regArgNum(type) += cAlignSkipped;
return cAlignSkipped;
@@ -107,10 +110,14 @@ unsigned InitVarDscInfo::alignReg(var_types type, unsigned requiredRegAlignment)
bool InitVarDscInfo::canEnreg(var_types type, unsigned numRegs /* = 1 */)
{
if (!isRegParamType(type))
+ {
return false;
+ }
if (!enoughAvailRegs(type, numRegs))
+ {
return false;
+ }
return true;
}
diff --git a/src/jit/register_arg_convention.h b/src/jit/register_arg_convention.h
index 5114843c1c..5073732a3e 100644
--- a/src/jit/register_arg_convention.h
+++ b/src/jit/register_arg_convention.h
@@ -2,16 +2,15 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef __register_arg_convention__
#define __register_arg_convention__
class LclVarDsc;
-struct InitVarDscInfo
+struct InitVarDscInfo
{
- LclVarDsc * varDsc;
- unsigned varNum;
+ LclVarDsc* varDsc;
+ unsigned varNum;
unsigned intRegArgNum;
unsigned floatRegArgNum;
@@ -24,17 +23,16 @@ struct InitVarDscInfo
// Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that
// handles arguments.
regMaskTP fltArgSkippedRegMask;
- bool anyFloatStackArgs;
+ bool anyFloatStackArgs;
#endif // _TARGET_ARM_
public:
-
// set to initial values
- void Init(LclVarDsc *lvaTable, bool _hasRetBufArg)
+ void Init(LclVarDsc* lvaTable, bool _hasRetBufArg)
{
hasRetBufArg = _hasRetBufArg;
- varDsc = &lvaTable[0]; // the first argument LclVar 0
- varNum = 0; // the first argument varNum 0
+ varDsc = &lvaTable[0]; // the first argument LclVar 0
+ varNum = 0; // the first argument varNum 0
intRegArgNum = 0;
floatRegArgNum = 0;
maxIntRegArgNum = MAX_REG_ARG;
@@ -42,14 +40,14 @@ public:
#ifdef _TARGET_ARM_
fltArgSkippedRegMask = RBM_NONE;
- anyFloatStackArgs = false;
+ anyFloatStackArgs = false;
#endif // _TARGET_ARM_
}
// return ref to current register arg for this type
- unsigned& regArgNum(var_types type)
- {
- return varTypeIsFloating(type) ? floatRegArgNum : intRegArgNum;
+ unsigned& regArgNum(var_types type)
+ {
+ return varTypeIsFloating(type) ? floatRegArgNum : intRegArgNum;
}
// Allocate a set of contiguous argument registers. "type" is either an integer
@@ -96,11 +94,10 @@ public:
#endif // _TARGET_ARM_
private:
-
// return max register arg for this type
- unsigned maxRegArgNum(var_types type)
- {
- return varTypeIsFloating(type) ? maxFloatRegArgNum : maxIntRegArgNum;
+ unsigned maxRegArgNum(var_types type)
+ {
+ return varTypeIsFloating(type) ? maxFloatRegArgNum : maxIntRegArgNum;
}
bool enoughAvailRegs(var_types type, unsigned numRegs = 1);
diff --git a/src/jit/registerfp.cpp b/src/jit/registerfp.cpp
index 1774b8a9e7..997c223ed4 100644
--- a/src/jit/registerfp.cpp
+++ b/src/jit/registerfp.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -19,33 +18,31 @@
#endif // !_TARGET_ARM_
// get the next argument register which is aligned to 'alignment' # of bytes
-regNumber alignFloatArgReg(regNumber argReg, int alignment)
+regNumber alignFloatArgReg(regNumber argReg, int alignment)
{
assert(isValidFloatArgReg(argReg));
int regsize_alignment = alignment /= REGSIZE_BYTES;
if (genMapFloatRegNumToRegArgNum(argReg) % regsize_alignment)
argReg = genRegArgNext(argReg);
-
- // technically the above should be a 'while' so make sure
+
+ // technically the above should be a 'while' so make sure
// we never should have incremented more than once
assert(!(genMapFloatRegNumToRegArgNum(argReg) % regsize_alignment));
return argReg;
}
-
// Instruction list
// N=normal, R=reverse, P=pop
-void CodeGen::genFloatConst(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genFloatConst(GenTree* tree, RegSet::RegisterPreference* pref)
{
assert(tree->gtOper == GT_CNS_DBL);
- var_types type = tree->gtType;
- double constValue = tree->gtDblCon.gtDconVal;
- size_t* cv = (size_t*)&constValue;
-
-
+ var_types type = tree->gtType;
+ double constValue = tree->gtDblCon.gtDconVal;
+ size_t* cv = (size_t*)&constValue;
+
regNumber dst = regSet.PickRegFloat(type, pref);
if (type == TYP_FLOAT)
@@ -67,15 +64,14 @@ void CodeGen::genFloatConst(GenTree *tree, RegSet::RegisterPreference *pref)
genSetRegToIcon(reg2, cv[1]);
regSet.rsUnlockReg(genRegMask(reg1));
- getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE,
- dst, reg1, reg2);
+ getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, dst, reg1, reg2);
}
genMarkTreeInReg(tree, dst);
return;
}
-void CodeGen::genFloatMath(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genFloatMath(GenTree* tree, RegSet::RegisterPreference* pref)
{
assert(tree->OperGet() == GT_INTRINSIC);
@@ -88,32 +84,31 @@ void CodeGen::genFloatMath(GenTree *tree, RegSet::RegisterPreference *pref)
switch (tree->gtIntrinsic.gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Sin:
- ins = INS_invalid;
- break;
- case CORINFO_INTRINSIC_Cos:
- ins = INS_invalid;
- break;
- case CORINFO_INTRINSIC_Sqrt:
- ins = INS_vsqrt;
- break;
- case CORINFO_INTRINSIC_Abs:
- ins = INS_vabs;
+ case CORINFO_INTRINSIC_Sin:
+ ins = INS_invalid;
+ break;
+ case CORINFO_INTRINSIC_Cos:
+ ins = INS_invalid;
+ break;
+ case CORINFO_INTRINSIC_Sqrt:
+ ins = INS_vsqrt;
+ break;
+ case CORINFO_INTRINSIC_Abs:
+ ins = INS_vabs;
+ break;
+ case CORINFO_INTRINSIC_Round:
+ {
+ regNumber reg = regSet.PickRegFloat(tree->TypeGet(), pref);
+ genMarkTreeInReg(tree, reg);
+ // convert it to a long and back
+ inst_RV_RV(ins_FloatConv(TYP_LONG, tree->TypeGet()), reg, op1->gtRegNum, tree->TypeGet());
+ inst_RV_RV(ins_FloatConv(tree->TypeGet(), TYP_LONG), reg, reg);
+ genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
+ return;
+ }
break;
- case CORINFO_INTRINSIC_Round:
- {
- regNumber reg = regSet.PickRegFloat(tree->TypeGet(), pref);
- genMarkTreeInReg(tree, reg);
- // convert it to a long and back
- inst_RV_RV(ins_FloatConv(TYP_LONG,tree->TypeGet()),
- reg, op1->gtRegNum, tree->TypeGet());
- inst_RV_RV(ins_FloatConv(tree->TypeGet(), TYP_LONG), reg, reg);
- genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
- return;
- }
- break;
- default:
- unreached();
+ default:
+ unreached();
}
if (ins != INS_invalid)
@@ -130,11 +125,11 @@ void CodeGen::genFloatMath(GenTree *tree, RegSet::RegisterPreference *pref)
// If unreached is removed, mark register that holds tree
// genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
}
-
- return;
+
+ return;
}
-void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genFloatSimple(GenTree* tree, RegSet::RegisterPreference* pref)
{
assert(tree->OperKind() & GTK_SMPOP);
var_types type = tree->TypeGet();
@@ -149,7 +144,7 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
{
// Assignment
case GT_ASG:
- {
+ {
genFloatAssign(tree);
break;
}
@@ -163,7 +158,7 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
genFloatArith(tree, pref);
break;
}
-
+
case GT_NEG:
{
GenTreePtr op1 = tree->gtOp.gtOp1;
@@ -174,9 +169,8 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
// change the sign
regNumber reg = regSet.PickRegFloat(type, pref);
genMarkTreeInReg(tree, reg);
- inst_RV_RV(ins_MathOp(tree->OperGet(), type),
- reg, op1->gtRegNum, type);
-
+ inst_RV_RV(ins_MathOp(tree->OperGet(), type), reg, op1->gtRegNum, type);
+
// mark register that holds tree
genCodeForTreeFloat_DONE(tree, reg);
return;
@@ -184,12 +178,12 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
case GT_IND:
{
- regMaskTP addrReg;
-
+ regMaskTP addrReg;
+
// Make sure the address value is 'addressable' */
addrReg = genMakeAddressable(tree, 0, RegSet::FREE_REG);
- // Load the value onto the FP stack
+ // Load the value onto the FP stack
regNumber reg = regSet.PickRegFloat(type, pref);
genLoadFloat(tree, reg);
@@ -223,7 +217,7 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
GenTreePtr op1 = tree->gtOp.gtOp1;
assert(op1);
- pref->best = (type==TYP_DOUBLE) ? RBM_DOUBLERET : RBM_FLOATRET;
+ pref->best = (type == TYP_DOUBLE) ? RBM_DOUBLERET : RBM_FLOATRET;
// Compute the result
genCodeForTreeFloat(op1, pref);
@@ -250,7 +244,7 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
{
GenTreePtr op1 = tree->gtOp.gtOp1;
GenTreePtr op2 = tree->gtGetOp2();
-
+
if (tree->gtFlags & GTF_REVERSE_OPS)
{
genCodeForTreeFloat(op2, pref);
@@ -264,7 +258,7 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
genEvalSideEffects(op1);
genCodeForTreeFloat(op2, pref);
}
-
+
genCodeForTreeFloat_DONE(tree, op2->gtRegNum);
break;
}
@@ -279,11 +273,11 @@ void CodeGen::genFloatSimple(GenTree *tree, RegSet::RegisterPreference *pref)
}
// generate code for ckfinite tree/instruction
-void CodeGen::genFloatCheckFinite(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genFloatCheckFinite(GenTree* tree, RegSet::RegisterPreference* pref)
{
- TempDsc * temp;
- int offs;
-
+ TempDsc* temp;
+ int offs;
+
GenTreePtr op1 = tree->gtOp.gtOp1;
// Offset of the DWord containing the exponent
@@ -303,9 +297,9 @@ void CodeGen::genFloatCheckFinite(GenTree *tree, RegSet::RegisterPreference *pre
else // double
{
assert(op1->gtType == TYP_DOUBLE);
- getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, reg,
- REG_NEXT(op1->gtRegNum)); // the high 32 bits of the double register
- expMask = 0x7FF00000;
+ getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, reg,
+ REG_NEXT(op1->gtRegNum)); // the high 32 bits of the double register
+ expMask = 0x7FF00000;
}
regTracker.rsTrackRegTrash(reg);
@@ -320,21 +314,21 @@ void CodeGen::genFloatCheckFinite(GenTree *tree, RegSet::RegisterPreference *pre
genCodeForTreeFloat_DONE(tree, op1->gtRegNum);
}
-void CodeGen::genFloatAssign(GenTree *tree)
+void CodeGen::genFloatAssign(GenTree* tree)
{
- var_types type = tree->TypeGet();
- GenTreePtr op1 = tree->gtGetOp1();
- GenTreePtr op2 = tree->gtGetOp2();
+ var_types type = tree->TypeGet();
+ GenTreePtr op1 = tree->gtGetOp1();
+ GenTreePtr op2 = tree->gtGetOp2();
- regMaskTP needRegOp1 = RBM_ALLINT;
- regMaskTP addrReg = RBM_NONE;
- bool volat = false; // Is this a volatile store
- bool unaligned = false; // Is this an unaligned store
- regNumber op2reg = REG_NA;
+ regMaskTP needRegOp1 = RBM_ALLINT;
+ regMaskTP addrReg = RBM_NONE;
+ bool volat = false; // Is this a volatile store
+ bool unaligned = false; // Is this an unaligned store
+ regNumber op2reg = REG_NA;
#ifdef DEBUGGING_SUPPORT
- unsigned lclVarNum = compiler->lvaCount;
- unsigned lclILoffs = DUMMY_INIT(0);
+ unsigned lclVarNum = compiler->lvaCount;
+ unsigned lclILoffs = DUMMY_INIT(0);
#endif
noway_assert(tree->OperGet() == GT_ASG);
@@ -344,150 +338,152 @@ void CodeGen::genFloatAssign(GenTree *tree)
//
switch (op1->gtOper)
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- case GT_LCL_FLD:
- // Check for a misalignment on a Floating Point field
- //
- if (varTypeIsFloating(op1->TypeGet()))
- {
- if ((op1->gtLclFld.gtLclOffs % emitTypeSize(op1->TypeGet())) != 0)
+ case GT_LCL_FLD:
+ // Check for a misalignment on a Floating Point field
+ //
+ if (varTypeIsFloating(op1->TypeGet()))
{
- unaligned = true;
+ if ((op1->gtLclFld.gtLclOffs % emitTypeSize(op1->TypeGet())) != 0)
+ {
+ unaligned = true;
+ }
}
- }
- break;
-
- case GT_LCL_VAR:
- varNum = op1->gtLclVarCommon.gtLclNum;
- noway_assert(varNum < compiler->lvaCount);
- varDsc = compiler->lvaTable + varNum;
-
- #ifdef DEBUGGING_SUPPORT
- // For non-debuggable code, every definition of a lcl-var has
- // to be checked to see if we need to open a new scope for it.
- // Remember the local var info to call siCheckVarScope
- // AFTER code generation of the assignment.
- //
- if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
- {
- lclVarNum = varNum;
- lclILoffs = op1->gtLclVar.gtLclILoffs;
- }
- #endif
-
- // Dead Store assert (with min opts we may have dead stores)
- //
- noway_assert(!varDsc->lvTracked || compiler->opts.MinOpts() || !(op1->gtFlags & GTF_VAR_DEATH));
-
- // Does this variable live in a register?
- //
- if (genMarkLclVar(op1))
- {
- noway_assert(!compiler->opts.compDbgCode); // We don't enregister any floats with debug codegen
-
- // Get hold of the target register
- //
- regNumber op1Reg = op1->gtRegVar.gtRegNum;
+ break;
- // the variable being assigned should be dead in op2
- assert(!varDsc->lvTracked || !VarSetOps::IsMember(compiler, genUpdateLiveSetForward(op2), varDsc->lvVarIndex));
+ case GT_LCL_VAR:
+ varNum = op1->gtLclVarCommon.gtLclNum;
+ noway_assert(varNum < compiler->lvaCount);
+ varDsc = compiler->lvaTable + varNum;
- // Setup register preferencing, so that we try to target the op1 enregistered variable
+#ifdef DEBUGGING_SUPPORT
+ // For non-debuggable code, every definition of a lcl-var has
+ // to be checked to see if we need to open a new scope for it.
+ // Remember the local var info to call siCheckVarScope
+ // AFTER code generation of the assignment.
//
- regMaskTP bestMask = genRegMask(op1Reg);
- if (type==TYP_DOUBLE)
+ if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
{
- assert((bestMask & RBM_DBL_REGS) != 0);
- bestMask |= genRegMask(REG_NEXT(op1Reg));
+ lclVarNum = varNum;
+ lclILoffs = op1->gtLclVar.gtLclILoffs;
}
- RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestMask);
+#endif
- // Evaluate op2 into a floating point register
+ // Dead Store assert (with min opts we may have dead stores)
//
- genCodeForTreeFloat(op2, &pref);
-
- noway_assert(op2->gtFlags & GTF_REG_VAL);
+ noway_assert(!varDsc->lvTracked || compiler->opts.MinOpts() || !(op1->gtFlags & GTF_VAR_DEATH));
- // Make sure the value ends up in the right place ...
- // For example if op2 is a call that returns a result
- // in REG_F0, we will need to do a move instruction here
+ // Does this variable live in a register?
//
- if ((op2->gtRegNum != op1Reg) || (op2->TypeGet() != type))
+ if (genMarkLclVar(op1))
{
- regMaskTP spillRegs = regSet.rsMaskUsed & genRegMaskFloat(op1Reg, op1->TypeGet());
- if (spillRegs != 0)
- regSet.rsSpillRegs(spillRegs);
+ noway_assert(!compiler->opts.compDbgCode); // We don't enregister any floats with debug codegen
+
+ // Get hold of the target register
+ //
+ regNumber op1Reg = op1->gtRegVar.gtRegNum;
- assert(type == op1->TypeGet());
+ // the variable being assigned should be dead in op2
+ assert(!varDsc->lvTracked ||
+ !VarSetOps::IsMember(compiler, genUpdateLiveSetForward(op2), varDsc->lvVarIndex));
- inst_RV_RV(ins_FloatConv(type, op2->TypeGet()), op1Reg, op2->gtRegNum, type);
+ // Setup register preferencing, so that we try to target the op1 enregistered variable
+ //
+ regMaskTP bestMask = genRegMask(op1Reg);
+ if (type == TYP_DOUBLE)
+ {
+ assert((bestMask & RBM_DBL_REGS) != 0);
+ bestMask |= genRegMask(REG_NEXT(op1Reg));
+ }
+ RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestMask);
+
+ // Evaluate op2 into a floating point register
+ //
+ genCodeForTreeFloat(op2, &pref);
+
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+
+ // Make sure the value ends up in the right place ...
+ // For example if op2 is a call that returns a result
+ // in REG_F0, we will need to do a move instruction here
+ //
+ if ((op2->gtRegNum != op1Reg) || (op2->TypeGet() != type))
+ {
+ regMaskTP spillRegs = regSet.rsMaskUsed & genRegMaskFloat(op1Reg, op1->TypeGet());
+ if (spillRegs != 0)
+ regSet.rsSpillRegs(spillRegs);
+
+ assert(type == op1->TypeGet());
+
+ inst_RV_RV(ins_FloatConv(type, op2->TypeGet()), op1Reg, op2->gtRegNum, type);
+ }
+ genUpdateLife(op1);
+ goto DONE_ASG;
}
- genUpdateLife(op1);
- goto DONE_ASG;
- }
- break;
+ break;
- case GT_CLS_VAR:
- case GT_IND:
- // Check for a volatile/unaligned store
- //
- assert((op1->OperGet() == GT_CLS_VAR) || (op1->OperGet() == GT_IND)); // Required for GTF_IND_VOLATILE flag to be valid
- if (op1->gtFlags & GTF_IND_VOLATILE)
- volat = true;
- if (op1->gtFlags & GTF_IND_UNALIGNED)
- unaligned = true;
- break;
+ case GT_CLS_VAR:
+ case GT_IND:
+ // Check for a volatile/unaligned store
+ //
+ assert((op1->OperGet() == GT_CLS_VAR) ||
+ (op1->OperGet() == GT_IND)); // Required for GTF_IND_VOLATILE flag to be valid
+ if (op1->gtFlags & GTF_IND_VOLATILE)
+ volat = true;
+ if (op1->gtFlags & GTF_IND_UNALIGNED)
+ unaligned = true;
+ break;
- default:
- break;
+ default:
+ break;
}
// Is the value being assigned an enregistered floating-point local variable?
//
switch (op2->gtOper)
{
- case GT_LCL_VAR:
+ case GT_LCL_VAR:
- if (!genMarkLclVar(op2))
- break;
+ if (!genMarkLclVar(op2))
+ break;
- __fallthrough;
+ __fallthrough;
- case GT_REG_VAR:
+ case GT_REG_VAR:
- // We must honor the order evalauation in case op1 reassigns our op2 register
- //
- if (tree->gtFlags & GTF_REVERSE_OPS)
- break;
+ // We must honor the order evalauation in case op1 reassigns our op2 register
+ //
+ if (tree->gtFlags & GTF_REVERSE_OPS)
+ break;
- // Is there an implicit conversion that we have to insert?
- // Handle this case with the normal cases below.
- //
- if (type != op2->TypeGet())
- break;
+ // Is there an implicit conversion that we have to insert?
+ // Handle this case with the normal cases below.
+ //
+ if (type != op2->TypeGet())
+ break;
- // Make the target addressable
- //
- addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
+ // Make the target addressable
+ //
+ addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
- noway_assert(op2->gtFlags & GTF_REG_VAL);
- noway_assert(op2->IsRegVar());
-
- op2reg = op2->gtRegVar.gtRegNum;
- genUpdateLife(op2);
+ noway_assert(op2->gtFlags & GTF_REG_VAL);
+ noway_assert(op2->IsRegVar());
- goto CHK_VOLAT_UNALIGN;
- default:
- break;
+ op2reg = op2->gtRegVar.gtRegNum;
+ genUpdateLife(op2);
+
+ goto CHK_VOLAT_UNALIGN;
+ default:
+ break;
}
// Is the op2 (RHS) more complex than op1 (LHS)?
//
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
- regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op1->gtRsvdRegs);
+ regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op1->gtRsvdRegs);
RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);
// Generate op2 (RHS) into a floating point register
@@ -497,9 +493,7 @@ void CodeGen::genFloatAssign(GenTree *tree)
// Make the target addressable
//
- addrReg = genMakeAddressable(op1,
- needRegOp1,
- RegSet::KEEP_REG, true);
+ addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
genRecoverReg(op2, RBM_ALLFLOAT, RegSet::KEEP_REG);
noway_assert(op2->gtFlags & GTF_REG_VAL);
@@ -511,14 +505,10 @@ void CodeGen::genFloatAssign(GenTree *tree)
// Make the target addressable
//
- addrReg = genMakeAddressable(op1,
- needRegOp1,
- RegSet::KEEP_REG, true);
-
+ addrReg = genMakeAddressable(op1, needRegOp1, RegSet::KEEP_REG, true);
// Generate the RHS into any floating point register
genCodeForTreeFloat(op2);
-
}
noway_assert(op2->gtFlags & GTF_REG_VAL);
@@ -529,7 +519,7 @@ void CodeGen::genFloatAssign(GenTree *tree)
if (type != op2->TypeGet())
{
regMaskTP bestMask = genRegMask(op2reg);
- if (type==TYP_DOUBLE)
+ if (type == TYP_DOUBLE)
{
if (bestMask & RBM_DBL_REGS)
{
@@ -551,9 +541,9 @@ void CodeGen::genFloatAssign(GenTree *tree)
addrReg = genKeepAddressable(op1, addrReg);
CHK_VOLAT_UNALIGN:
-
- regSet.rsLockUsedReg(addrReg); // Must prevent unaligned regSet.rsGrabReg from choosing an addrReg
-
+
+ regSet.rsLockUsedReg(addrReg); // Must prevent unaligned regSet.rsGrabReg from choosing an addrReg
+
if (volat)
{
// Emit a memory barrier instruction before the store
@@ -565,46 +555,46 @@ CHK_VOLAT_UNALIGN:
assert(storeType == TYP_DOUBLE || storeType == TYP_FLOAT);
// Unaligned Floating-Point Stores must be done using the integer register(s)
- regNumber intRegLo = regSet.rsGrabReg(RBM_ALLINT);
- regNumber intRegHi = REG_NA;
- regMaskTP tmpLockMask = genRegMask(intRegLo);
+ regNumber intRegLo = regSet.rsGrabReg(RBM_ALLINT);
+ regNumber intRegHi = REG_NA;
+ regMaskTP tmpLockMask = genRegMask(intRegLo);
if (storeType == TYP_DOUBLE)
{
- intRegHi = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(intRegLo));
+ intRegHi = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(intRegLo));
tmpLockMask |= genRegMask(intRegHi);
}
// move the FP register over to the integer register(s)
//
- if (storeType == TYP_DOUBLE)
+ if (storeType == TYP_DOUBLE)
{
getEmitter()->emitIns_R_R_R(INS_vmov_d2i, EA_8BYTE, intRegLo, intRegHi, op2reg);
regTracker.rsTrackRegTrash(intRegHi);
}
- else
+ else
{
getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, intRegLo, op2reg);
}
regTracker.rsTrackRegTrash(intRegLo);
- regSet.rsLockReg(tmpLockMask); // Temporarily lock the intRegs
- op1->gtType = TYP_INT; // Temporarily change the type to TYP_INT
-
+ regSet.rsLockReg(tmpLockMask); // Temporarily lock the intRegs
+ op1->gtType = TYP_INT; // Temporarily change the type to TYP_INT
+
inst_TT_RV(ins_Store(TYP_INT), op1, intRegLo);
if (storeType == TYP_DOUBLE)
{
inst_TT_RV(ins_Store(TYP_INT), op1, intRegHi, 4);
}
-
- op1->gtType = storeType; // Change the type back to the floating point type
- regSet.rsUnlockReg(tmpLockMask); // Unlock the intRegs
+
+ op1->gtType = storeType; // Change the type back to the floating point type
+ regSet.rsUnlockReg(tmpLockMask); // Unlock the intRegs
}
else
{
// Move the value into the target
- //
+ //
inst_TT_RV(ins_Store(op1->TypeGet()), op1, op2reg);
}
@@ -626,11 +616,10 @@ DONE_ASG:
#endif
}
-void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
- RegSet::RegisterPreference *pref)
+void CodeGen::genCodeForTreeFloat(GenTreePtr tree, RegSet::RegisterPreference* pref)
{
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
assert(tree);
assert(tree->gtOper != GT_STMT);
@@ -639,7 +628,7 @@ void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
oper = tree->OperGet();
kind = tree->OperKind();
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
genFloatConst(tree, pref);
}
@@ -655,46 +644,46 @@ void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
{
assert(oper == GT_CALL);
genCodeForCall(tree, true);
- }
+ }
}
-void CodeGen::genFloatLeaf(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genFloatLeaf(GenTree* tree, RegSet::RegisterPreference* pref)
{
regNumber reg = REG_NA;
switch (tree->OperGet())
{
- case GT_LCL_VAR:
- // Does the variable live in a register?
- //
- if (!genMarkLclVar(tree))
- goto MEM_LEAF;
- __fallthrough;
+ case GT_LCL_VAR:
+ // Does the variable live in a register?
+ //
+ if (!genMarkLclVar(tree))
+ goto MEM_LEAF;
+ __fallthrough;
- case GT_REG_VAR:
- noway_assert(tree->gtFlags & GTF_REG_VAL);
- reg = tree->gtRegVar.gtRegNum;
- break;
+ case GT_REG_VAR:
+ noway_assert(tree->gtFlags & GTF_REG_VAL);
+ reg = tree->gtRegVar.gtRegNum;
+ break;
- case GT_LCL_FLD:
- // We only use GT_LCL_FLD for lvAddrTaken vars, so we don't have
- // to worry about it being enregistered.
- noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
- __fallthrough;
+ case GT_LCL_FLD:
+ // We only use GT_LCL_FLD for lvAddrTaken vars, so we don't have
+ // to worry about it being enregistered.
+ noway_assert(compiler->lvaTable[tree->gtLclFld.gtLclNum].lvRegister == 0);
+ __fallthrough;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
-MEM_LEAF:
- reg = regSet.PickRegFloat(tree->TypeGet(), pref);
- genLoadFloat(tree, reg);
- break;
+ MEM_LEAF:
+ reg = regSet.PickRegFloat(tree->TypeGet(), pref);
+ genLoadFloat(tree, reg);
+ break;
- default:
- DISPTREE(tree);
- assert(!"unexpected leaf");
+ default:
+ DISPTREE(tree);
+ assert(!"unexpected leaf");
}
- genCodeForTreeFloat_DONE (tree, reg);
+ genCodeForTreeFloat_DONE(tree, reg);
return;
}
@@ -703,7 +692,7 @@ void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
if (tree->IsRegVar())
{
// if it has been spilled, unspill it.%
- LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
+ LclVarDsc* varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
if (varDsc->lvSpilled)
{
UnspillFloat(varDsc);
@@ -716,50 +705,50 @@ void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
bool unalignedLoad = false;
switch (tree->OperGet())
{
- case GT_IND:
- case GT_CLS_VAR:
- if (tree->gtFlags & GTF_IND_UNALIGNED)
- unalignedLoad = true;
- break;
- case GT_LCL_FLD:
- // Check for a misalignment on a Floating Point field
- //
- if (varTypeIsFloating(tree->TypeGet()))
- {
- if ((tree->gtLclFld.gtLclOffs % emitTypeSize(tree->TypeGet())) != 0)
- {
+ case GT_IND:
+ case GT_CLS_VAR:
+ if (tree->gtFlags & GTF_IND_UNALIGNED)
unalignedLoad = true;
+ break;
+ case GT_LCL_FLD:
+ // Check for a misalignment on a Floating Point field
+ //
+ if (varTypeIsFloating(tree->TypeGet()))
+ {
+ if ((tree->gtLclFld.gtLclOffs % emitTypeSize(tree->TypeGet())) != 0)
+ {
+ unalignedLoad = true;
+ }
}
- }
- break;
- default:
- break;
+ break;
+ default:
+ break;
}
if (unalignedLoad)
{
// Make the target addressable
//
- regMaskTP addrReg = genMakeAddressable(tree, 0, RegSet::KEEP_REG, true);
- regSet.rsLockUsedReg(addrReg); // Must prevent regSet.rsGrabReg from choosing an addrReg
+ regMaskTP addrReg = genMakeAddressable(tree, 0, RegSet::KEEP_REG, true);
+ regSet.rsLockUsedReg(addrReg); // Must prevent regSet.rsGrabReg from choosing an addrReg
var_types loadType = tree->TypeGet();
assert(loadType == TYP_DOUBLE || loadType == TYP_FLOAT);
// Unaligned Floating-Point Loads must be loaded into integer register(s)
// and then moved over to the Floating-Point register
- regNumber intRegLo = regSet.rsGrabReg(RBM_ALLINT);
- regNumber intRegHi = REG_NA;
- regMaskTP tmpLockMask = genRegMask(intRegLo);
+ regNumber intRegLo = regSet.rsGrabReg(RBM_ALLINT);
+ regNumber intRegHi = REG_NA;
+ regMaskTP tmpLockMask = genRegMask(intRegLo);
if (loadType == TYP_DOUBLE)
{
- intRegHi = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(intRegLo));
+ intRegHi = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(intRegLo));
tmpLockMask |= genRegMask(intRegHi);
}
-
- regSet.rsLockReg(tmpLockMask); // Temporarily lock the intRegs
- tree->gtType = TYP_INT; // Temporarily change the type to TYP_INT
+
+ regSet.rsLockReg(tmpLockMask); // Temporarily lock the intRegs
+ tree->gtType = TYP_INT; // Temporarily change the type to TYP_INT
inst_RV_TT(ins_Load(TYP_INT), intRegLo, tree);
regTracker.rsTrackRegTrash(intRegLo);
@@ -770,14 +759,14 @@ void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
regTracker.rsTrackRegTrash(intRegHi);
}
- tree->gtType = loadType; // Change the type back to the floating point type
- regSet.rsUnlockReg(tmpLockMask); // Unlock the intRegs
+ tree->gtType = loadType; // Change the type back to the floating point type
+ regSet.rsUnlockReg(tmpLockMask); // Unlock the intRegs
// move the integer register(s) over to the FP register
//
- if (loadType == TYP_DOUBLE)
+ if (loadType == TYP_DOUBLE)
getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, reg, intRegLo, intRegHi);
- else
+ else
getEmitter()->emitIns_R_R(INS_vmov_i2f, EA_4BYTE, reg, intRegLo);
// Free up anything that was tied up by genMakeAddressable
@@ -789,35 +778,32 @@ void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
{
inst_RV_TT(ins_FloatLoad(tree->TypeGet()), reg, tree);
}
- if (((tree->OperGet() == GT_CLS_VAR) || (tree->OperGet() == GT_IND)) &&
- (tree->gtFlags & GTF_IND_VOLATILE))
+ if (((tree->OperGet() == GT_CLS_VAR) || (tree->OperGet() == GT_IND)) && (tree->gtFlags & GTF_IND_VOLATILE))
{
- // Emit a memory barrier instruction after the load
+ // Emit a memory barrier instruction after the load
instGen_MemoryBarrier();
}
}
}
-void CodeGen::genCodeForTreeFloat_DONE (GenTreePtr tree, regNumber reg)
+void CodeGen::genCodeForTreeFloat_DONE(GenTreePtr tree, regNumber reg)
{
return genCodeForTree_DONE(tree, reg);
}
-void CodeGen::genFloatAsgArith (GenTreePtr tree)
+void CodeGen::genFloatAsgArith(GenTreePtr tree)
{
// Set Flowgraph.cpp, line 13750
// arm VFP has tons of regs, 3-op instructions, and no addressing modes
// so asg ops are kind of pointless
noway_assert(!"Not Reachable for _TARGET_ARM_");
-
}
-regNumber CodeGen::genAssignArithFloat(genTreeOps oper,
- GenTreePtr dst, regNumber dstreg,
- GenTreePtr src, regNumber srcreg)
+regNumber CodeGen::genAssignArithFloat(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg)
{
regNumber result;
-
+
// dst should be a regvar or memory
if (dst->IsRegVar())
@@ -825,7 +811,7 @@ regNumber CodeGen::genAssignArithFloat(genTreeOps oper,
regNumber reg = dst->gtRegNum;
if (src->IsRegVar())
- {
+ {
inst_RV_RV(ins_MathOp(oper, dst->gtType), reg, src->gtRegNum, dst->gtType);
}
else
@@ -834,20 +820,20 @@ regNumber CodeGen::genAssignArithFloat(genTreeOps oper,
}
result = reg;
}
- else // dst in memory
+ else // dst in memory
{
// since this is an asgop the ACTUAL destination is memory
// but it is also one of the sources and SSE ops do not allow mem dests
// so we have loaded it into a reg, and that is what dstreg represents
assert(dstreg != REG_NA);
- if ( (src->InReg()))
+ if ((src->InReg()))
{
inst_RV_RV(ins_MathOp(oper, dst->gtType), dstreg, src->gtRegNum, dst->gtType);
}
else
{
- //mem mem operation
+ // mem mem operation
inst_RV_TT(ins_MathOp(oper, dst->gtType), dstreg, src, 0, EmitSize(dst));
}
@@ -861,23 +847,19 @@ regNumber CodeGen::genAssignArithFloat(genTreeOps oper,
return result;
}
-void CodeGen::genFloatArith (GenTreePtr tree,
- RegSet::RegisterPreference *tgtPref)
+void CodeGen::genFloatArith(GenTreePtr tree, RegSet::RegisterPreference* tgtPref)
{
- var_types type = tree->TypeGet();
- genTreeOps oper = tree->OperGet();
- GenTreePtr op1 = tree->gtGetOp1();
- GenTreePtr op2 = tree->gtGetOp2();
+ var_types type = tree->TypeGet();
+ genTreeOps oper = tree->OperGet();
+ GenTreePtr op1 = tree->gtGetOp1();
+ GenTreePtr op2 = tree->gtGetOp2();
- regNumber tgtReg;
- unsigned varNum;
- LclVarDsc * varDsc;
- VARSET_TP varBit;
+ regNumber tgtReg;
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ VARSET_TP varBit;
- assert(oper == GT_ADD ||
- oper == GT_SUB ||
- oper == GT_MUL ||
- oper == GT_DIV);
+ assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL || oper == GT_DIV);
RegSet::RegisterPreference defaultPref(RBM_ALLFLOAT, RBM_NONE);
if (tgtPref == NULL)
@@ -887,23 +869,23 @@ void CodeGen::genFloatArith (GenTreePtr tree,
// Is the op2 (RHS)more complex than op1 (LHS)?
//
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
- regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op1->gtRsvdRegs);
+ regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op1->gtRsvdRegs);
RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);
- // Evaluate op2 into a floating point register
+ // Evaluate op2 into a floating point register
//
genCodeForTreeFloat(op2, &pref);
regSet.SetUsedRegFloat(op2, true);
- // Evaluate op1 into any floating point register
+ // Evaluate op1 into any floating point register
//
genCodeForTreeFloat(op1);
regSet.SetUsedRegFloat(op1, true);
- regNumber op1Reg = op1->gtRegNum;
- regMaskTP op1Mask = genRegMaskFloat(op1Reg, type);
+ regNumber op1Reg = op1->gtRegNum;
+ regMaskTP op1Mask = genRegMaskFloat(op1Reg, type);
// Fix 388445 ARM JitStress WP7
regSet.rsLockUsedReg(op1Mask);
@@ -916,21 +898,21 @@ void CodeGen::genFloatArith (GenTreePtr tree,
}
else
{
- regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op2->gtRsvdRegs);
+ regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op2->gtRsvdRegs);
RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);
- // Evaluate op1 into a floating point register
+ // Evaluate op1 into a floating point register
//
genCodeForTreeFloat(op1, &pref);
regSet.SetUsedRegFloat(op1, true);
- // Evaluate op2 into any floating point register
+ // Evaluate op2 into any floating point register
//
genCodeForTreeFloat(op2);
regSet.SetUsedRegFloat(op2, true);
-
- regNumber op2Reg = op2->gtRegNum;
- regMaskTP op2Mask = genRegMaskFloat(op2Reg, type);
+
+ regNumber op2Reg = op2->gtRegNum;
+ regMaskTP op2Mask = genRegMaskFloat(op2Reg, type);
// Fix 388445 ARM JitStress WP7
regSet.rsLockUsedReg(op2Mask);
@@ -938,7 +920,7 @@ void CodeGen::genFloatArith (GenTreePtr tree,
noway_assert(op1->gtFlags & GTF_REG_VAL);
regSet.rsUnlockUsedReg(op2Mask);
- regSet.SetUsedRegFloat(op2, false);
+ regSet.SetUsedRegFloat(op2, false);
regSet.SetUsedRegFloat(op1, false);
}
@@ -952,29 +934,27 @@ void CodeGen::genFloatArith (GenTreePtr tree,
genCodeForTreeFloat_DONE(tree, tgtReg);
}
-regNumber CodeGen::genArithmFloat(genTreeOps oper,
- GenTreePtr dst, regNumber dstreg,
- GenTreePtr src, regNumber srcreg,
- bool bReverse)
-{
+regNumber CodeGen::genArithmFloat(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse)
+{
regNumber result = REG_NA;
assert(dstreg != REG_NA);
if (bReverse)
{
- GenTree *temp = src;
+ GenTree* temp = src;
regNumber tempreg = srcreg;
- src = dst;
- srcreg = dstreg;
- dst = temp;
- dstreg = tempreg;
+ src = dst;
+ srcreg = dstreg;
+ dst = temp;
+ dstreg = tempreg;
}
if (srcreg == REG_NA)
{
if (src->IsRegVar())
- {
+ {
inst_RV_RV(ins_MathOp(oper, dst->gtType), dst->gtRegNum, src->gtRegNum, dst->gtType);
}
else
@@ -989,11 +969,11 @@ regNumber CodeGen::genArithmFloat(genTreeOps oper,
result = dstreg;
- assert (result != REG_NA);
+ assert(result != REG_NA);
return result;
}
-void CodeGen::genKeepAddressableFloat(GenTreePtr tree, regMaskTP * regMaskIntPtr, regMaskTP * regMaskFltPtr)
+void CodeGen::genKeepAddressableFloat(GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr)
{
regMaskTP regMaskInt, regMaskFlt;
@@ -1001,57 +981,57 @@ void CodeGen::genKeepAddressableFloat(GenTreePtr tree, regMaskTP * regMaskIntPt
regMaskFlt = *regMaskFltPtr;
*regMaskIntPtr = *regMaskFltPtr = 0;
-
+
switch (tree->OperGet())
{
- case GT_REG_VAR:
- // If register has been spilled, unspill it
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
- }
- break;
+ case GT_REG_VAR:
+ // If register has been spilled, unspill it
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
+ }
+ break;
- case GT_CNS_DBL:
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(tree);
- }
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
- break;
+ case GT_CNS_DBL:
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(tree);
+ }
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
+ break;
- case GT_LCL_FLD:
- case GT_LCL_VAR:
- case GT_CLS_VAR:
- break;
+ case GT_LCL_FLD:
+ case GT_LCL_VAR:
+ case GT_CLS_VAR:
+ break;
- case GT_IND:
- if (regMaskFlt == RBM_NONE)
- {
- *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
- *regMaskFltPtr = 0;
- return;
- }
- __fallthrough;
+ case GT_IND:
+ if (regMaskFlt == RBM_NONE)
+ {
+ *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
+ *regMaskFltPtr = 0;
+ return;
+ }
+ __fallthrough;
- default:
- *regMaskIntPtr = 0;
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(tree);
- }
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
- break;
+ default:
+ *regMaskIntPtr = 0;
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(tree);
+ }
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
+ break;
}
}
-void CodeGen::genComputeAddressableFloat(GenTreePtr tree,
- regMaskTP addrRegInt,
- regMaskTP addrRegFlt,
- RegSet::KeepReg keptReg,
- regMaskTP needReg,
- RegSet::KeepReg keepReg,
- bool freeOnly /* = false */)
+void CodeGen::genComputeAddressableFloat(GenTreePtr tree,
+ regMaskTP addrRegInt,
+ regMaskTP addrRegFlt,
+ RegSet::KeepReg keptReg,
+ regMaskTP needReg,
+ RegSet::KeepReg keepReg,
+ bool freeOnly /* = false */)
{
noway_assert(genStillAddressable(tree));
noway_assert(varTypeIsFloating(tree->TypeGet()));
@@ -1069,10 +1049,10 @@ void CodeGen::genComputeAddressableFloat(GenTreePtr tree,
}
else
{
- LOAD_REG:
- RegSet::RegisterPreference pref(needReg, RBM_NONE);
- reg = regSet.PickRegFloat(tree->TypeGet(), &pref);
- genLoadFloat(tree, reg);
+ LOAD_REG:
+ RegSet::RegisterPreference pref(needReg, RBM_NONE);
+ reg = regSet.PickRegFloat(tree->TypeGet(), &pref);
+ genLoadFloat(tree, reg);
}
genMarkTreeInReg(tree, reg);
@@ -1083,7 +1063,10 @@ void CodeGen::genComputeAddressableFloat(GenTreePtr tree,
}
}
-void CodeGen::genDoneAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg)
+void CodeGen::genDoneAddressableFloat(GenTreePtr tree,
+ regMaskTP addrRegInt,
+ regMaskTP addrRegFlt,
+ RegSet::KeepReg keptReg)
{
assert(!(addrRegInt && addrRegFlt));
@@ -1092,7 +1075,7 @@ void CodeGen::genDoneAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, reg
return genDoneAddressable(tree, addrRegInt, keptReg);
}
else if (addrRegFlt)
- {
+ {
if (keptReg == RegSet::KEEP_REG)
{
for (regNumber r = REG_FP_FIRST; r != REG_NA; r = regNextOfType(r, tree->TypeGet()))
@@ -1104,73 +1087,67 @@ void CodeGen::genDoneAddressableFloat(GenTreePtr tree, regMaskTP addrRegInt, reg
regSet.SetUsedRegFloat(tree, false);
}
}
- }
+ }
}
}
-GenTreePtr CodeGen::genMakeAddressableFloat(GenTreePtr tree,
- regMaskTP * regMaskIntPtr,
- regMaskTP * regMaskFltPtr,
- bool bCollapseConstantDoubles)
-{
+GenTreePtr CodeGen::genMakeAddressableFloat(GenTreePtr tree,
+ regMaskTP* regMaskIntPtr,
+ regMaskTP* regMaskFltPtr,
+ bool bCollapseConstantDoubles)
+{
*regMaskIntPtr = *regMaskFltPtr = 0;
switch (tree->OperGet())
- {
-
- case GT_LCL_VAR:
- genMarkLclVar(tree);
- __fallthrough;
-
- case GT_REG_VAR:
- case GT_LCL_FLD:
- case GT_CLS_VAR:
- return tree;
-
- case GT_IND:
- // Try to make the address directly addressable
-
- if (genMakeIndAddrMode(tree->gtOp.gtOp1,
- tree,
- false,
- RBM_ALLFLOAT,
- RegSet::KEEP_REG,
- regMaskIntPtr,
- false))
- {
- genUpdateLife(tree);
+ {
+
+ case GT_LCL_VAR:
+ genMarkLclVar(tree);
+ __fallthrough;
+
+ case GT_REG_VAR:
+ case GT_LCL_FLD:
+ case GT_CLS_VAR:
return tree;
- }
- else
- {
- GenTreePtr addr = tree;
- tree = tree->gtOp.gtOp1;
- genCodeForTree(tree, 0);
- regSet.rsMarkRegUsed(tree, addr);
- *regMaskIntPtr = genRegMask(tree->gtRegNum);
- return addr;
- }
+ case GT_IND:
+ // Try to make the address directly addressable
+
+ if (genMakeIndAddrMode(tree->gtOp.gtOp1, tree, false, RBM_ALLFLOAT, RegSet::KEEP_REG, regMaskIntPtr, false))
+ {
+ genUpdateLife(tree);
+ return tree;
+ }
+ else
+ {
+ GenTreePtr addr = tree;
+ tree = tree->gtOp.gtOp1;
+ genCodeForTree(tree, 0);
+ regSet.rsMarkRegUsed(tree, addr);
+
+ *regMaskIntPtr = genRegMask(tree->gtRegNum);
+ return addr;
+ }
// fall through
- default:
- genCodeForTreeFloat(tree);
- regSet.SetUsedRegFloat(tree, true);
+ default:
+ genCodeForTreeFloat(tree);
+ regSet.SetUsedRegFloat(tree, true);
- // update mask
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
+ // update mask
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
- return tree;
- break;
- }
+ return tree;
+ break;
+ }
}
-void CodeGen::genCodeForTreeCastFloat(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genCodeForTreeCastFloat(GenTree* tree, RegSet::RegisterPreference* pref)
{
- GenTreePtr op1 = tree->gtOp.gtOp1;
- var_types from = op1->gtType;
- var_types to = tree->gtType;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ var_types from = op1->gtType;
+ var_types to = tree->gtType;
if (varTypeIsFloating(from))
genCodeForTreeCastFromFloat(tree, pref);
@@ -1178,8 +1155,7 @@ void CodeGen::genCodeForTreeCastFloat(GenTree *tree, RegSet::RegisterPreference
genCodeForTreeCastToFloat(tree, pref);
}
-
-void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPreference *pref)
+void CodeGen::genCodeForTreeCastFromFloat(GenTree* tree, RegSet::RegisterPreference* pref)
{
GenTreePtr op1 = tree->gtOp.gtOp1;
var_types from = op1->gtType;
@@ -1191,7 +1167,7 @@ void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPrefere
assert(varTypeIsFloating(from));
- // Evaluate op1 into a floating point register
+ // Evaluate op1 into a floating point register
//
if (varTypeIsFloating(final))
{
@@ -1207,10 +1183,10 @@ void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPrefere
if (varTypeIsFloating(final))
{
- // float => double or
+ // float => double or
// double => float
- dstReg = regSet.PickRegFloat(final, pref);
+ dstReg = regSet.PickRegFloat(final, pref);
instruction ins = ins_FloatConv(final, from);
if (!isMoveIns(ins) || (srcReg != dstReg))
@@ -1220,13 +1196,13 @@ void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPrefere
}
else
{
- // float => int or
+ // float => int or
// double => int
dstReg = regSet.rsPickReg(pref->ok, pref->best);
RegSet::RegisterPreference defaultPref(RBM_ALLFLOAT, genRegMask(srcReg));
- regNumber intermediateReg = regSet.PickRegFloat(TYP_FLOAT, &defaultPref);
+ regNumber intermediateReg = regSet.PickRegFloat(TYP_FLOAT, &defaultPref);
if ((intermediate == TYP_UINT) && (final == TYP_INT))
{
@@ -1245,7 +1221,6 @@ void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPrefere
// the integer result is now in the FP register, move it to the integer ones
getEmitter()->emitIns_R_R(INS_vmov_f2i, EA_4BYTE, dstReg, intermediateReg);
-
regTracker.rsTrackRegTrash(dstReg);
// handle things like int <- short <- double
@@ -1261,19 +1236,18 @@ void CodeGen::genCodeForTreeCastFromFloat(GenTree *tree, RegSet::RegisterPrefere
genCodeForTree_DONE(tree, dstReg);
}
-void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPreference *pref)
+void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPreference* pref)
{
- regNumber srcReg;
- regNumber dstReg;
- regNumber vmovReg;
+ regNumber srcReg;
+ regNumber dstReg;
+ regNumber vmovReg;
- regMaskTP addrReg;
-
+ regMaskTP addrReg;
GenTreePtr op1 = tree->gtOp.gtOp1;
- op1 = genCodeForCommaTree(op1); // Trim off any comma expressions.
+ op1 = genCodeForCommaTree(op1); // Trim off any comma expressions.
var_types from = op1->gtType;
- var_types to = tree->gtType;
+ var_types to = tree->gtType;
switch (from)
{
@@ -1293,7 +1267,7 @@ void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPrefere
__fallthrough;
case TYP_INT:
- {
+ {
if (op1->gtOper == GT_LCL_FLD)
{
genComputeReg(op1, 0, RegSet::ANY_REG, RegSet::FREE_REG);
@@ -1309,13 +1283,13 @@ void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPrefere
// float type that is same size as the int we are coming from
var_types vmovType = TYP_FLOAT;
- regNumber vmovReg = regSet.PickRegFloat(vmovType);
+ regNumber vmovReg = regSet.PickRegFloat(vmovType);
if (tree->gtFlags & GTF_UNSIGNED)
from = TYP_UINT;
// Is the value a constant, or now sitting in a register?
- if (op1->InReg() || op1->IsCnsIntOrI())
+ if (op1->InReg() || op1->IsCnsIntOrI())
{
if (op1->IsCnsIntOrI())
{
@@ -1333,10 +1307,10 @@ void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPrefere
}
else
{
- // Load the value from its address
+ // Load the value from its address
inst_RV_TT(ins_FloatLoad(vmovType), vmovReg, op1);
inst_RV_RV(ins_FloatConv(to, from), dstReg, vmovReg, to);
- }
+ }
if (addrReg)
{
@@ -1363,19 +1337,17 @@ void CodeGen::genCodeForTreeCastToFloat(GenTreePtr tree, RegSet::RegisterPrefere
// Assign reg to tree
genMarkTreeInReg(tree, dstReg);
-
- break;
+
+ break;
}
default:
{
assert(!"unsupported cast");
break;
- }
+ }
}
}
-
-
void CodeGen::genRoundFloatExpression(GenTreePtr op, var_types type)
{
// Do nothing with memory resident opcodes - these are the right precision
@@ -1384,27 +1356,27 @@ void CodeGen::genRoundFloatExpression(GenTreePtr op, var_types type)
switch (op->gtOper)
{
- case GT_LCL_VAR:
- genMarkLclVar(op);
- __fallthrough;
-
- case GT_LCL_FLD:
- case GT_CLS_VAR:
- case GT_CNS_DBL:
- case GT_IND:
- if (type == op->TypeGet())
- return;
+ case GT_LCL_VAR:
+ genMarkLclVar(op);
+ __fallthrough;
- default:
- break;
+ case GT_LCL_FLD:
+ case GT_CLS_VAR:
+ case GT_CNS_DBL:
+ case GT_IND:
+ if (type == op->TypeGet())
+ return;
+
+ default:
+ break;
}
}
-#ifdef DEBUG
+#ifdef DEBUG
regMaskTP CodeGenInterface::genStressLockedMaskFloat()
-{
- return 0;
+{
+ return 0;
}
#endif // DEBUG
@@ -1437,7 +1409,7 @@ void CodeGenInterface::SpillFloat(regNumber reg, bool bIsCall /* = false */)
regSet.rsSpillReg(reg);
}
-void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc *spillDsc)
+void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc)
{
// Do actual unspill
regNumber reg;
@@ -1468,7 +1440,7 @@ void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc, bool useSameReg
reg = spillDsc->spillTree->gtRegNum;
regMaskTP maskPref = genRegMask(reg);
- if (type==TYP_DOUBLE)
+ if (type == TYP_DOUBLE)
{
assert((maskPref & RBM_DBL_REGS) != 0);
maskPref |= genRegMask(REG_NEXT(reg));
@@ -1489,46 +1461,43 @@ void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc, bool useSameReg
regSet.SetUsedRegFloat(spillDsc->spillTree, true);
}
-//
-instruction genFloatJumpInstr(genTreeOps cmp,
- bool isUnordered)
+//
+instruction genFloatJumpInstr(genTreeOps cmp, bool isUnordered)
{
switch (cmp)
{
- case GT_EQ:
- return INS_beq;
- case GT_NE:
- return INS_bne;
- case GT_LT:
- return isUnordered ? INS_blt : INS_blo;
- case GT_LE:
- return isUnordered ? INS_ble : INS_bls;
- case GT_GE:
- return isUnordered ? INS_bpl : INS_bge;
- case GT_GT:
- return isUnordered ? INS_bhi : INS_bgt;
- default:
- unreached();
+ case GT_EQ:
+ return INS_beq;
+ case GT_NE:
+ return INS_bne;
+ case GT_LT:
+ return isUnordered ? INS_blt : INS_blo;
+ case GT_LE:
+ return isUnordered ? INS_ble : INS_bls;
+ case GT_GE:
+ return isUnordered ? INS_bpl : INS_bge;
+ case GT_GT:
+ return isUnordered ? INS_bhi : INS_bgt;
+ default:
+ unreached();
}
}
-void CodeGen::genCondJumpFloat(GenTreePtr cond,
- BasicBlock * jumpTrue,
- BasicBlock * jumpFalse)
+void CodeGen::genCondJumpFloat(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse)
{
assert(jumpTrue && jumpFalse);
assert(!(cond->gtFlags & GTF_REVERSE_OPS)); // Done in genCondJump()
assert(varTypeIsFloating(cond->gtOp.gtOp1->gtType));
- GenTreePtr op1 = cond->gtOp.gtOp1;
- GenTreePtr op2 = cond->gtOp.gtOp2;
- genTreeOps cmp = cond->OperGet();
- bool isUnordered = cond->gtFlags & GTF_RELOP_NAN_UN ? true : false;
+ GenTreePtr op1 = cond->gtOp.gtOp1;
+ GenTreePtr op2 = cond->gtOp.gtOp2;
+ genTreeOps cmp = cond->OperGet();
+ bool isUnordered = cond->gtFlags & GTF_RELOP_NAN_UN ? true : false;
- regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op2->gtRsvdRegs);
+ regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op2->gtRsvdRegs);
RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);
- // Prepare operands.
+ // Prepare operands.
genCodeForTreeFloat(op1, &pref);
regSet.SetUsedRegFloat(op1, true);
@@ -1539,8 +1508,7 @@ void CodeGen::genCondJumpFloat(GenTreePtr cond,
noway_assert(op1->gtFlags & GTF_REG_VAL);
// cmp here
- getEmitter()->emitIns_R_R(INS_vcmp, EmitSize(op1),
- op1->gtRegNum, op2->gtRegNum);
+ getEmitter()->emitIns_R_R(INS_vcmp, EmitSize(op1), op1->gtRegNum, op2->gtRegNum);
// vmrs with register 0xf has special meaning of transferring flags
getEmitter()->emitIns_R(INS_vmrs, EA_4BYTE, REG_R15);
@@ -1551,5 +1519,4 @@ void CodeGen::genCondJumpFloat(GenTreePtr cond,
getEmitter()->emitIns_J(genFloatJumpInstr(cmp, isUnordered), jumpTrue);
}
-
#endif // LEGACY_BACKEND
diff --git a/src/jit/registerfp.h b/src/jit/registerfp.h
index 7dddf13ca4..4c3ecb6050 100644
--- a/src/jit/registerfp.h
+++ b/src/jit/registerfp.h
@@ -5,23 +5,22 @@
/*****************************************************************************/
/*****************************************************************************/
#ifndef REGDEF
-#error Must define REGDEF macro before including this file
+#error Must define REGDEF macro before including this file
#endif
/*****************************************************************************/
/* The following is x86 specific */
/*****************************************************************************/
/*
REGDEF(name, rnum, mask, sname) */
-REGDEF(FPV0, 0, 0x01, "FPV0" )
-REGDEF(FPV1, 1, 0x02, "FPV1" )
-REGDEF(FPV2, 2, 0x04, "FPV2" )
-REGDEF(FPV3, 3, 0x08, "FPV3" )
-REGDEF(FPV4, 4, 0x10, "FPV4" )
-REGDEF(FPV5, 5, 0x20, "FPV5" )
-REGDEF(FPV6, 6, 0x40, "FPV6" )
-REGDEF(FPV7, 7, 0x80, "FPV7" )
-
+REGDEF(FPV0, 0, 0x01, "FPV0")
+REGDEF(FPV1, 1, 0x02, "FPV1")
+REGDEF(FPV2, 2, 0x04, "FPV2")
+REGDEF(FPV3, 3, 0x08, "FPV3")
+REGDEF(FPV4, 4, 0x10, "FPV4")
+REGDEF(FPV5, 5, 0x20, "FPV5")
+REGDEF(FPV6, 6, 0x40, "FPV6")
+REGDEF(FPV7, 7, 0x80, "FPV7")
/*****************************************************************************/
-#undef REGDEF
+#undef REGDEF
/*****************************************************************************/
diff --git a/src/jit/regpair.h b/src/jit/regpair.h
index cf31897524..cfc109b882 100644
--- a/src/jit/regpair.h
+++ b/src/jit/regpair.h
@@ -9,11 +9,11 @@
#endif
#ifndef PAIRDEF
-#define PAIRDEF(r1,r2)
+#define PAIRDEF(r1, r2)
#endif
#ifndef PAIRSTK
-#define PAIRSTK(r1,r2) PAIRDEF(r1,r2)
+#define PAIRSTK(r1, r2) PAIRDEF(r1, r2)
#endif
#if defined(_TARGET_X86_)
@@ -23,77 +23,77 @@
// rlo rhi
-PAIRBEG(EAX )
-PAIRDEF(EAX,ECX)
-PAIRDEF(EAX,EDX)
-PAIRDEF(EAX,EBX)
-PAIRDEF(EAX,EBP)
-PAIRDEF(EAX,ESI)
-PAIRDEF(EAX,EDI)
-PAIRSTK(EAX,STK)
-
-PAIRBEG(ECX )
-PAIRDEF(ECX,EAX)
-PAIRDEF(ECX,EDX)
-PAIRDEF(ECX,EBX)
-PAIRDEF(ECX,EBP)
-PAIRDEF(ECX,ESI)
-PAIRDEF(ECX,EDI)
-PAIRSTK(ECX,STK)
-
-PAIRBEG(EDX )
-PAIRDEF(EDX,EAX)
-PAIRDEF(EDX,ECX)
-PAIRDEF(EDX,EBX)
-PAIRDEF(EDX,EBP)
-PAIRDEF(EDX,ESI)
-PAIRDEF(EDX,EDI)
-PAIRSTK(EDX,STK)
-
-PAIRBEG(EBX )
-PAIRDEF(EBX,EAX)
-PAIRDEF(EBX,EDX)
-PAIRDEF(EBX,ECX)
-PAIRDEF(EBX,EBP)
-PAIRDEF(EBX,ESI)
-PAIRDEF(EBX,EDI)
-PAIRSTK(EBX,STK)
-
-PAIRBEG(EBP )
-PAIRDEF(EBP,EAX)
-PAIRDEF(EBP,EDX)
-PAIRDEF(EBP,ECX)
-PAIRDEF(EBP,EBX)
-PAIRDEF(EBP,ESI)
-PAIRDEF(EBP,EDI)
-PAIRSTK(EBP,STK)
-
-PAIRBEG(ESI )
-PAIRDEF(ESI,EAX)
-PAIRDEF(ESI,EDX)
-PAIRDEF(ESI,ECX)
-PAIRDEF(ESI,EBX)
-PAIRDEF(ESI,EBP)
-PAIRDEF(ESI,EDI)
-PAIRSTK(ESI,STK)
-
-PAIRBEG(EDI )
-PAIRDEF(EDI,EAX)
-PAIRDEF(EDI,EDX)
-PAIRDEF(EDI,ECX)
-PAIRDEF(EDI,EBX)
-PAIRDEF(EDI,EBP)
-PAIRDEF(EDI,ESI)
-PAIRSTK(EDI,STK)
-
-PAIRBEG(STK )
-PAIRSTK(STK,EAX)
-PAIRSTK(STK,EDX)
-PAIRSTK(STK,ECX)
-PAIRSTK(STK,EBX)
-PAIRSTK(STK,EBP)
-PAIRSTK(STK,ESI)
-PAIRSTK(STK,EDI)
+PAIRBEG(EAX)
+PAIRDEF(EAX, ECX)
+PAIRDEF(EAX, EDX)
+PAIRDEF(EAX, EBX)
+PAIRDEF(EAX, EBP)
+PAIRDEF(EAX, ESI)
+PAIRDEF(EAX, EDI)
+PAIRSTK(EAX, STK)
+
+PAIRBEG(ECX)
+PAIRDEF(ECX, EAX)
+PAIRDEF(ECX, EDX)
+PAIRDEF(ECX, EBX)
+PAIRDEF(ECX, EBP)
+PAIRDEF(ECX, ESI)
+PAIRDEF(ECX, EDI)
+PAIRSTK(ECX, STK)
+
+PAIRBEG(EDX)
+PAIRDEF(EDX, EAX)
+PAIRDEF(EDX, ECX)
+PAIRDEF(EDX, EBX)
+PAIRDEF(EDX, EBP)
+PAIRDEF(EDX, ESI)
+PAIRDEF(EDX, EDI)
+PAIRSTK(EDX, STK)
+
+PAIRBEG(EBX)
+PAIRDEF(EBX, EAX)
+PAIRDEF(EBX, EDX)
+PAIRDEF(EBX, ECX)
+PAIRDEF(EBX, EBP)
+PAIRDEF(EBX, ESI)
+PAIRDEF(EBX, EDI)
+PAIRSTK(EBX, STK)
+
+PAIRBEG(EBP)
+PAIRDEF(EBP, EAX)
+PAIRDEF(EBP, EDX)
+PAIRDEF(EBP, ECX)
+PAIRDEF(EBP, EBX)
+PAIRDEF(EBP, ESI)
+PAIRDEF(EBP, EDI)
+PAIRSTK(EBP, STK)
+
+PAIRBEG(ESI)
+PAIRDEF(ESI, EAX)
+PAIRDEF(ESI, EDX)
+PAIRDEF(ESI, ECX)
+PAIRDEF(ESI, EBX)
+PAIRDEF(ESI, EBP)
+PAIRDEF(ESI, EDI)
+PAIRSTK(ESI, STK)
+
+PAIRBEG(EDI)
+PAIRDEF(EDI, EAX)
+PAIRDEF(EDI, EDX)
+PAIRDEF(EDI, ECX)
+PAIRDEF(EDI, EBX)
+PAIRDEF(EDI, EBP)
+PAIRDEF(EDI, ESI)
+PAIRSTK(EDI, STK)
+
+PAIRBEG(STK)
+PAIRSTK(STK, EAX)
+PAIRSTK(STK, EDX)
+PAIRSTK(STK, ECX)
+PAIRSTK(STK, EBX)
+PAIRSTK(STK, EBP)
+PAIRSTK(STK, ESI)
+PAIRSTK(STK, EDI)
#endif
@@ -106,245 +106,245 @@ PAIRSTK(STK,EDI)
// rlo rhi
-PAIRBEG(R0 )
-PAIRDEF(R0,R1)
-PAIRDEF(R0,R2)
-PAIRDEF(R0,R3)
-PAIRDEF(R0,R4)
-PAIRDEF(R0,R5)
-PAIRDEF(R0,R6)
-PAIRDEF(R0,R7)
-PAIRDEF(R0,R8)
-PAIRDEF(R0,R9)
-PAIRDEF(R0,R10)
-PAIRDEF(R0,R11)
-PAIRDEF(R0,R12)
-PAIRDEF(R0,LR)
-PAIRSTK(R0,STK)
-
-PAIRBEG(R1 )
-PAIRDEF(R1,R0)
-PAIRDEF(R1,R2)
-PAIRDEF(R1,R3)
-PAIRDEF(R1,R4)
-PAIRDEF(R1,R5)
-PAIRDEF(R1,R6)
-PAIRDEF(R1,R7)
-PAIRDEF(R1,R8)
-PAIRDEF(R1,R9)
-PAIRDEF(R1,R10)
-PAIRDEF(R1,R11)
-PAIRDEF(R1,R12)
-PAIRDEF(R1,LR)
-PAIRSTK(R1,STK)
-
-PAIRBEG(R2 )
-PAIRDEF(R2,R0)
-PAIRDEF(R2,R1)
-PAIRDEF(R2,R3)
-PAIRDEF(R2,R4)
-PAIRDEF(R2,R5)
-PAIRDEF(R2,R6)
-PAIRDEF(R2,R7)
-PAIRDEF(R2,R8)
-PAIRDEF(R2,R9)
-PAIRDEF(R2,R10)
-PAIRDEF(R2,R11)
-PAIRDEF(R2,R12)
-PAIRDEF(R2,LR)
-PAIRSTK(R2,STK)
-
-PAIRBEG(R3 )
-PAIRDEF(R3,R0)
-PAIRDEF(R3,R1)
-PAIRDEF(R3,R2)
-PAIRDEF(R3,R4)
-PAIRDEF(R3,R5)
-PAIRDEF(R3,R6)
-PAIRDEF(R3,R7)
-PAIRDEF(R3,R8)
-PAIRDEF(R3,R9)
-PAIRDEF(R3,R10)
-PAIRDEF(R3,R11)
-PAIRDEF(R3,R12)
-PAIRDEF(R3,LR)
-PAIRSTK(R3,STK)
-
-PAIRBEG(R4 )
-PAIRDEF(R4,R0)
-PAIRDEF(R4,R1)
-PAIRDEF(R4,R2)
-PAIRDEF(R4,R3)
-PAIRDEF(R4,R5)
-PAIRDEF(R4,R6)
-PAIRDEF(R4,R7)
-PAIRDEF(R4,R8)
-PAIRDEF(R4,R9)
-PAIRDEF(R4,R10)
-PAIRDEF(R4,R11)
-PAIRDEF(R4,R12)
-PAIRDEF(R4,LR)
-PAIRSTK(R4,STK)
-
-PAIRBEG(R5 )
-PAIRDEF(R5,R0)
-PAIRDEF(R5,R1)
-PAIRDEF(R5,R2)
-PAIRDEF(R5,R3)
-PAIRDEF(R5,R4)
-PAIRDEF(R5,R6)
-PAIRDEF(R5,R7)
-PAIRDEF(R5,R8)
-PAIRDEF(R5,R9)
-PAIRDEF(R5,R10)
-PAIRDEF(R5,R11)
-PAIRDEF(R5,R12)
-PAIRDEF(R5,LR)
-PAIRSTK(R5,STK)
-
-PAIRBEG(R6 )
-PAIRDEF(R6,R0)
-PAIRDEF(R6,R1)
-PAIRDEF(R6,R2)
-PAIRDEF(R6,R3)
-PAIRDEF(R6,R4)
-PAIRDEF(R6,R5)
-PAIRDEF(R6,R7)
-PAIRDEF(R6,R8)
-PAIRDEF(R6,R9)
-PAIRDEF(R6,R10)
-PAIRDEF(R6,R11)
-PAIRDEF(R6,R12)
-PAIRDEF(R6,LR)
-PAIRSTK(R6,STK)
-
-PAIRBEG(R7 )
-PAIRDEF(R7,R0)
-PAIRDEF(R7,R1)
-PAIRDEF(R7,R2)
-PAIRDEF(R7,R3)
-PAIRDEF(R7,R4)
-PAIRDEF(R7,R5)
-PAIRDEF(R7,R6)
-PAIRDEF(R7,R8)
-PAIRDEF(R7,R9)
-PAIRDEF(R7,R10)
-PAIRDEF(R7,R11)
-PAIRDEF(R7,R12)
-PAIRDEF(R7,LR)
-PAIRSTK(R7,STK)
-
-PAIRBEG(R8 )
-PAIRDEF(R8,R0)
-PAIRDEF(R8,R1)
-PAIRDEF(R8,R2)
-PAIRDEF(R8,R3)
-PAIRDEF(R8,R4)
-PAIRDEF(R8,R5)
-PAIRDEF(R8,R6)
-PAIRDEF(R8,R7)
-PAIRDEF(R8,R9)
-PAIRDEF(R8,R10)
-PAIRDEF(R8,R11)
-PAIRDEF(R8,R12)
-PAIRDEF(R8,LR)
-PAIRSTK(R8,STK)
-
-PAIRBEG(R9 )
-PAIRDEF(R9,R0)
-PAIRDEF(R9,R1)
-PAIRDEF(R9,R2)
-PAIRDEF(R9,R3)
-PAIRDEF(R9,R4)
-PAIRDEF(R9,R5)
-PAIRDEF(R9,R6)
-PAIRDEF(R9,R7)
-PAIRDEF(R9,R8)
-PAIRDEF(R9,R10)
-PAIRDEF(R9,R11)
-PAIRDEF(R9,R12)
-PAIRDEF(R9,LR)
-PAIRSTK(R9,STK)
-
-PAIRBEG(R10 )
-PAIRDEF(R10,R0)
-PAIRDEF(R10,R1)
-PAIRDEF(R10,R2)
-PAIRDEF(R10,R3)
-PAIRDEF(R10,R4)
-PAIRDEF(R10,R5)
-PAIRDEF(R10,R6)
-PAIRDEF(R10,R7)
-PAIRDEF(R10,R8)
-PAIRDEF(R10,R9)
-PAIRDEF(R10,R11)
-PAIRDEF(R10,R12)
-PAIRDEF(R10,LR)
-PAIRSTK(R10,STK)
-
-PAIRBEG(R11 )
-PAIRDEF(R11,R0)
-PAIRDEF(R11,R1)
-PAIRDEF(R11,R2)
-PAIRDEF(R11,R3)
-PAIRDEF(R11,R4)
-PAIRDEF(R11,R5)
-PAIRDEF(R11,R6)
-PAIRDEF(R11,R7)
-PAIRDEF(R11,R8)
-PAIRDEF(R11,R9)
-PAIRDEF(R11,R10)
-PAIRDEF(R11,R12)
-PAIRDEF(R11,LR)
-PAIRSTK(R11,STK)
-
-PAIRBEG(R12 )
-PAIRDEF(R12,R0)
-PAIRDEF(R12,R1)
-PAIRDEF(R12,R2)
-PAIRDEF(R12,R3)
-PAIRDEF(R12,R4)
-PAIRDEF(R12,R5)
-PAIRDEF(R12,R6)
-PAIRDEF(R12,R7)
-PAIRDEF(R12,R8)
-PAIRDEF(R12,R9)
-PAIRDEF(R12,R10)
-PAIRDEF(R12,R11)
-PAIRDEF(R12,LR)
-PAIRSTK(R12,STK)
-
-PAIRBEG(LR )
-PAIRDEF(LR ,R0)
-PAIRDEF(LR ,R1)
-PAIRDEF(LR ,R2)
-PAIRDEF(LR ,R3)
-PAIRDEF(LR ,R4)
-PAIRDEF(LR ,R5)
-PAIRDEF(LR ,R6)
-PAIRDEF(LR ,R7)
-PAIRDEF(LR ,R8)
-PAIRDEF(LR ,R9)
-PAIRDEF(LR ,R10)
-PAIRDEF(LR ,R11)
-PAIRDEF(LR ,R12)
-PAIRSTK(LR ,STK)
-
-PAIRBEG(STK )
-PAIRSTK(STK,R0)
-PAIRSTK(STK,R1)
-PAIRSTK(STK,R2)
-PAIRSTK(STK,R3)
-PAIRSTK(STK,R4)
-PAIRSTK(STK,R5)
-PAIRSTK(STK,R6)
-PAIRSTK(STK,R7)
-PAIRSTK(STK,R8)
-PAIRSTK(STK,R9)
-PAIRSTK(STK,R10)
-PAIRSTK(STK,R11)
-PAIRSTK(STK,R12)
-PAIRSTK(STK,LR)
+PAIRBEG(R0)
+PAIRDEF(R0, R1)
+PAIRDEF(R0, R2)
+PAIRDEF(R0, R3)
+PAIRDEF(R0, R4)
+PAIRDEF(R0, R5)
+PAIRDEF(R0, R6)
+PAIRDEF(R0, R7)
+PAIRDEF(R0, R8)
+PAIRDEF(R0, R9)
+PAIRDEF(R0, R10)
+PAIRDEF(R0, R11)
+PAIRDEF(R0, R12)
+PAIRDEF(R0, LR)
+PAIRSTK(R0, STK)
+
+PAIRBEG(R1)
+PAIRDEF(R1, R0)
+PAIRDEF(R1, R2)
+PAIRDEF(R1, R3)
+PAIRDEF(R1, R4)
+PAIRDEF(R1, R5)
+PAIRDEF(R1, R6)
+PAIRDEF(R1, R7)
+PAIRDEF(R1, R8)
+PAIRDEF(R1, R9)
+PAIRDEF(R1, R10)
+PAIRDEF(R1, R11)
+PAIRDEF(R1, R12)
+PAIRDEF(R1, LR)
+PAIRSTK(R1, STK)
+
+PAIRBEG(R2)
+PAIRDEF(R2, R0)
+PAIRDEF(R2, R1)
+PAIRDEF(R2, R3)
+PAIRDEF(R2, R4)
+PAIRDEF(R2, R5)
+PAIRDEF(R2, R6)
+PAIRDEF(R2, R7)
+PAIRDEF(R2, R8)
+PAIRDEF(R2, R9)
+PAIRDEF(R2, R10)
+PAIRDEF(R2, R11)
+PAIRDEF(R2, R12)
+PAIRDEF(R2, LR)
+PAIRSTK(R2, STK)
+
+PAIRBEG(R3)
+PAIRDEF(R3, R0)
+PAIRDEF(R3, R1)
+PAIRDEF(R3, R2)
+PAIRDEF(R3, R4)
+PAIRDEF(R3, R5)
+PAIRDEF(R3, R6)
+PAIRDEF(R3, R7)
+PAIRDEF(R3, R8)
+PAIRDEF(R3, R9)
+PAIRDEF(R3, R10)
+PAIRDEF(R3, R11)
+PAIRDEF(R3, R12)
+PAIRDEF(R3, LR)
+PAIRSTK(R3, STK)
+
+PAIRBEG(R4)
+PAIRDEF(R4, R0)
+PAIRDEF(R4, R1)
+PAIRDEF(R4, R2)
+PAIRDEF(R4, R3)
+PAIRDEF(R4, R5)
+PAIRDEF(R4, R6)
+PAIRDEF(R4, R7)
+PAIRDEF(R4, R8)
+PAIRDEF(R4, R9)
+PAIRDEF(R4, R10)
+PAIRDEF(R4, R11)
+PAIRDEF(R4, R12)
+PAIRDEF(R4, LR)
+PAIRSTK(R4, STK)
+
+PAIRBEG(R5)
+PAIRDEF(R5, R0)
+PAIRDEF(R5, R1)
+PAIRDEF(R5, R2)
+PAIRDEF(R5, R3)
+PAIRDEF(R5, R4)
+PAIRDEF(R5, R6)
+PAIRDEF(R5, R7)
+PAIRDEF(R5, R8)
+PAIRDEF(R5, R9)
+PAIRDEF(R5, R10)
+PAIRDEF(R5, R11)
+PAIRDEF(R5, R12)
+PAIRDEF(R5, LR)
+PAIRSTK(R5, STK)
+
+PAIRBEG(R6)
+PAIRDEF(R6, R0)
+PAIRDEF(R6, R1)
+PAIRDEF(R6, R2)
+PAIRDEF(R6, R3)
+PAIRDEF(R6, R4)
+PAIRDEF(R6, R5)
+PAIRDEF(R6, R7)
+PAIRDEF(R6, R8)
+PAIRDEF(R6, R9)
+PAIRDEF(R6, R10)
+PAIRDEF(R6, R11)
+PAIRDEF(R6, R12)
+PAIRDEF(R6, LR)
+PAIRSTK(R6, STK)
+
+PAIRBEG(R7)
+PAIRDEF(R7, R0)
+PAIRDEF(R7, R1)
+PAIRDEF(R7, R2)
+PAIRDEF(R7, R3)
+PAIRDEF(R7, R4)
+PAIRDEF(R7, R5)
+PAIRDEF(R7, R6)
+PAIRDEF(R7, R8)
+PAIRDEF(R7, R9)
+PAIRDEF(R7, R10)
+PAIRDEF(R7, R11)
+PAIRDEF(R7, R12)
+PAIRDEF(R7, LR)
+PAIRSTK(R7, STK)
+
+PAIRBEG(R8)
+PAIRDEF(R8, R0)
+PAIRDEF(R8, R1)
+PAIRDEF(R8, R2)
+PAIRDEF(R8, R3)
+PAIRDEF(R8, R4)
+PAIRDEF(R8, R5)
+PAIRDEF(R8, R6)
+PAIRDEF(R8, R7)
+PAIRDEF(R8, R9)
+PAIRDEF(R8, R10)
+PAIRDEF(R8, R11)
+PAIRDEF(R8, R12)
+PAIRDEF(R8, LR)
+PAIRSTK(R8, STK)
+
+PAIRBEG(R9)
+PAIRDEF(R9, R0)
+PAIRDEF(R9, R1)
+PAIRDEF(R9, R2)
+PAIRDEF(R9, R3)
+PAIRDEF(R9, R4)
+PAIRDEF(R9, R5)
+PAIRDEF(R9, R6)
+PAIRDEF(R9, R7)
+PAIRDEF(R9, R8)
+PAIRDEF(R9, R10)
+PAIRDEF(R9, R11)
+PAIRDEF(R9, R12)
+PAIRDEF(R9, LR)
+PAIRSTK(R9, STK)
+
+PAIRBEG(R10)
+PAIRDEF(R10, R0)
+PAIRDEF(R10, R1)
+PAIRDEF(R10, R2)
+PAIRDEF(R10, R3)
+PAIRDEF(R10, R4)
+PAIRDEF(R10, R5)
+PAIRDEF(R10, R6)
+PAIRDEF(R10, R7)
+PAIRDEF(R10, R8)
+PAIRDEF(R10, R9)
+PAIRDEF(R10, R11)
+PAIRDEF(R10, R12)
+PAIRDEF(R10, LR)
+PAIRSTK(R10, STK)
+
+PAIRBEG(R11)
+PAIRDEF(R11, R0)
+PAIRDEF(R11, R1)
+PAIRDEF(R11, R2)
+PAIRDEF(R11, R3)
+PAIRDEF(R11, R4)
+PAIRDEF(R11, R5)
+PAIRDEF(R11, R6)
+PAIRDEF(R11, R7)
+PAIRDEF(R11, R8)
+PAIRDEF(R11, R9)
+PAIRDEF(R11, R10)
+PAIRDEF(R11, R12)
+PAIRDEF(R11, LR)
+PAIRSTK(R11, STK)
+
+PAIRBEG(R12)
+PAIRDEF(R12, R0)
+PAIRDEF(R12, R1)
+PAIRDEF(R12, R2)
+PAIRDEF(R12, R3)
+PAIRDEF(R12, R4)
+PAIRDEF(R12, R5)
+PAIRDEF(R12, R6)
+PAIRDEF(R12, R7)
+PAIRDEF(R12, R8)
+PAIRDEF(R12, R9)
+PAIRDEF(R12, R10)
+PAIRDEF(R12, R11)
+PAIRDEF(R12, LR)
+PAIRSTK(R12, STK)
+
+PAIRBEG(LR)
+PAIRDEF(LR, R0)
+PAIRDEF(LR, R1)
+PAIRDEF(LR, R2)
+PAIRDEF(LR, R3)
+PAIRDEF(LR, R4)
+PAIRDEF(LR, R5)
+PAIRDEF(LR, R6)
+PAIRDEF(LR, R7)
+PAIRDEF(LR, R8)
+PAIRDEF(LR, R9)
+PAIRDEF(LR, R10)
+PAIRDEF(LR, R11)
+PAIRDEF(LR, R12)
+PAIRSTK(LR, STK)
+
+PAIRBEG(STK)
+PAIRSTK(STK, R0)
+PAIRSTK(STK, R1)
+PAIRSTK(STK, R2)
+PAIRSTK(STK, R3)
+PAIRSTK(STK, R4)
+PAIRSTK(STK, R5)
+PAIRSTK(STK, R6)
+PAIRSTK(STK, R7)
+PAIRSTK(STK, R8)
+PAIRSTK(STK, R9)
+PAIRSTK(STK, R10)
+PAIRSTK(STK, R11)
+PAIRSTK(STK, R12)
+PAIRSTK(STK, LR)
#endif
diff --git a/src/jit/regset.cpp b/src/jit/regset.cpp
index b74b101896..2980f96813 100644
--- a/src/jit/regset.cpp
+++ b/src/jit/regset.cpp
@@ -25,24 +25,21 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
#ifdef _TARGET_ARM64_
-const regMaskSmall regMasks[] =
-{
- #define REGDEF(name, rnum, mask, xname, wname) mask,
- #include "register.h"
+const regMaskSmall regMasks[] = {
+#define REGDEF(name, rnum, mask, xname, wname) mask,
+#include "register.h"
};
#else // !_TARGET_ARM64_
-const regMaskSmall regMasks[] =
-{
- #define REGDEF(name, rnum, mask, sname) mask,
- #include "register.h"
+const regMaskSmall regMasks[] = {
+#define REGDEF(name, rnum, mask, sname) mask,
+#include "register.h"
};
#endif
#ifdef _TARGET_X86_
-const regMaskSmall regFPMasks[] =
-{
- #define REGDEF(name, rnum, mask, sname) mask,
- #include "registerfp.h"
+const regMaskSmall regFPMasks[] = {
+#define REGDEF(name, rnum, mask, sname) mask,
+#include "registerfp.h"
};
#endif // _TARGET_X86_
@@ -55,7 +52,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-void RegSet::rsClearRegsModified()
+void RegSet::rsClearRegsModified()
{
#ifndef LEGACY_BACKEND
assert(m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT);
@@ -72,7 +69,7 @@ void RegSet::rsClearRegsModified()
rsModifiedRegsMask = RBM_NONE;
}
-void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump))
+void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump))
{
assert(mask != RBM_NONE);
assert(rsModifiedRegsMaskInitialized);
@@ -84,8 +81,7 @@ void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppr
// code generation isn't actually adding to set of modified registers.
// Frame layout is only affected by callee-saved registers, so only ensure that callee-saved
// registers aren't modified after final frame layout.
- assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) ||
- m_rsCompiler->compGeneratingProlog ||
+ assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog ||
m_rsCompiler->compGeneratingEpilog ||
(((rsModifiedRegsMask | mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED)));
#endif // !LEGACY_BACKEND
@@ -109,15 +105,14 @@ void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppr
rsModifiedRegsMask |= mask;
}
-void RegSet::rsRemoveRegsModified(regMaskTP mask)
+void RegSet::rsRemoveRegsModified(regMaskTP mask)
{
assert(mask != RBM_NONE);
assert(rsModifiedRegsMaskInitialized);
#ifndef LEGACY_BACKEND
// See comment in rsSetRegsModified().
- assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) ||
- m_rsCompiler->compGeneratingProlog ||
+ assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog ||
m_rsCompiler->compGeneratingEpilog ||
(((rsModifiedRegsMask & ~mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED)));
#endif // !LEGACY_BACKEND
@@ -173,13 +168,14 @@ void RegSet::SetMaskVars(regMaskTP newMaskVars)
#ifdef DEBUG
-RegSet::rsStressRegsType RegSet::rsStressRegs()
+RegSet::rsStressRegsType RegSet::rsStressRegs()
{
#ifndef LEGACY_BACKEND
return RS_STRESS_NONE;
-#else // LEGACY_BACKEND
- rsStressRegsType val = (rsStressRegsType) JitConfig.JitStressRegs();
- if (val == RS_STRESS_NONE && m_rsCompiler->compStressCompile(Compiler::STRESS_REGS, 15)) val = RS_PICK_BAD_REG;
+#else // LEGACY_BACKEND
+ rsStressRegsType val = (rsStressRegsType)JitConfig.JitStressRegs();
+ if (val == RS_STRESS_NONE && m_rsCompiler->compStressCompile(Compiler::STRESS_REGS, 15))
+ val = RS_PICK_BAD_REG;
return val;
#endif // LEGACY_BACKEND
}
@@ -190,7 +186,7 @@ RegSet::rsStressRegsType RegSet::rsStressRegs()
* Includes 'includeHint' if 'regs' is empty
*/
-regMaskTP RegSet::rsUseIfZero(regMaskTP regs, regMaskTP includeHint)
+regMaskTP RegSet::rsUseIfZero(regMaskTP regs, regMaskTP includeHint)
{
return regs ? regs : includeHint;
}
@@ -199,9 +195,9 @@ regMaskTP RegSet::rsUseIfZero(regMaskTP regs, regMaskTP includeHint)
* Excludes 'excludeHint' if it results in a non-empty mask
*/
-regMaskTP RegSet::rsExcludeHint(regMaskTP regs, regMaskTP excludeHint)
+regMaskTP RegSet::rsExcludeHint(regMaskTP regs, regMaskTP excludeHint)
{
- regMaskTP OKmask = regs & ~excludeHint;
+ regMaskTP OKmask = regs & ~excludeHint;
return OKmask ? OKmask : regs;
}
@@ -209,9 +205,9 @@ regMaskTP RegSet::rsExcludeHint(regMaskTP regs, regMaskTP excludeHint)
* Narrows choice by 'narrowHint' if it results in a non-empty mask
*/
-regMaskTP RegSet::rsNarrowHint(regMaskTP regs, regMaskTP narrowHint)
+regMaskTP RegSet::rsNarrowHint(regMaskTP regs, regMaskTP narrowHint)
{
- regMaskTP narrowed = regs & narrowHint;
+ regMaskTP narrowed = regs & narrowHint;
return narrowed ? narrowed : regs;
}
@@ -219,10 +215,10 @@ regMaskTP RegSet::rsNarrowHint(regMaskTP regs, regMaskTP narrowHint)
* Excludes 'exclude' from regs if non-zero, or from RBM_ALLINT
*/
-regMaskTP RegSet::rsMustExclude(regMaskTP regs, regMaskTP exclude)
+regMaskTP RegSet::rsMustExclude(regMaskTP regs, regMaskTP exclude)
{
// Try to exclude from current set
- regMaskTP OKmask = regs & ~exclude;
+ regMaskTP OKmask = regs & ~exclude;
// If current set wont work, exclude from RBM_ALLINT
if (OKmask == RBM_NONE)
@@ -239,7 +235,7 @@ regMaskTP RegSet::rsMustExclude(regMaskTP regs, regMaskTP exclude)
*/
// inline
-regMaskTP RegSet::rsRegMaskFree()
+regMaskTP RegSet::rsRegMaskFree()
{
/* Any register that is locked must also be marked as 'used' */
@@ -247,7 +243,7 @@ regMaskTP RegSet::rsRegMaskFree()
/* Any register that isn't used and doesn't hold a variable is free */
- return RBM_ALLINT & ~(rsMaskUsed|rsMaskVars|rsMaskResvd);
+ return RBM_ALLINT & ~(rsMaskUsed | rsMaskVars | rsMaskResvd);
}
/*****************************************************************************
@@ -256,7 +252,7 @@ regMaskTP RegSet::rsRegMaskFree()
*/
// inline
-regMaskTP RegSet::rsRegMaskCanGrab()
+regMaskTP RegSet::rsRegMaskCanGrab()
{
/* Any register that is locked must also be marked as 'used' */
@@ -264,17 +260,17 @@ regMaskTP RegSet::rsRegMaskCanGrab()
/* Any register that isn't locked and doesn't hold a var can be grabbed */
- regMaskTP result = (RBM_ALLINT & ~(rsMaskLock|rsMaskVars));
+ regMaskTP result = (RBM_ALLINT & ~(rsMaskLock | rsMaskVars));
#ifdef _TARGET_ARM_
- // On the ARM when we pass structs in registers we set the rsUsedTree[]
+ // On the ARM when we pass structs in registers we set the rsUsedTree[]
// to be the full TYP_STRUCT tree, which doesn't allow us to spill/unspill
- // these argument registers. To fix JitStress issues that can occur
+ // these argument registers. To fix JitStress issues that can occur
// when rsPickReg tries to spill one of these registers we just remove them
// from the set of registers that we can grab
//
- regMaskTP structArgMask = RBM_NONE;
+ regMaskTP structArgMask = RBM_NONE;
// Load all the variable arguments in registers back to their registers.
for (regNumber reg = REG_ARG_FIRST; reg <= REG_ARG_LAST; reg = REG_NEXT(reg))
{
@@ -282,7 +278,7 @@ regMaskTP RegSet::rsRegMaskCanGrab()
if ((regHolds != NULL) && (regHolds->TypeGet() == TYP_STRUCT))
{
structArgMask |= genRegMask(reg);
- }
+ }
}
result &= ~structArgMask;
#endif
@@ -297,7 +293,7 @@ regMaskTP RegSet::rsRegMaskCanGrab()
*/
// inline
-regNumber RegSet::rsPickFreeReg(regMaskTP regMaskHint)
+regNumber RegSet::rsPickFreeReg(regMaskTP regMaskHint)
{
regMaskTP freeRegs = rsRegMaskFree();
assert(freeRegs != RBM_NONE);
@@ -313,14 +309,14 @@ regNumber RegSet::rsPickFreeReg(regMaskTP regMaskHint)
*/
// inline
-void RegSet::rsLockReg(regMaskTP regMask)
+void RegSet::rsLockReg(regMaskTP regMask)
{
/* Must not be already marked as either used or locked */
- assert((rsMaskUsed & regMask) == 0);
- rsMaskUsed |= regMask;
- assert((rsMaskLock & regMask) == 0);
- rsMaskLock |= regMask;
+ assert((rsMaskUsed & regMask) == 0);
+ rsMaskUsed |= regMask;
+ assert((rsMaskLock & regMask) == 0);
+ rsMaskLock |= regMask;
}
/*****************************************************************************
@@ -329,14 +325,14 @@ void RegSet::rsLockReg(regMaskTP regMask)
*/
// inline
-void RegSet::rsLockUsedReg(regMaskTP regMask)
+void RegSet::rsLockUsedReg(regMaskTP regMask)
{
/* Must not be already marked as locked. Must be already marked as used. */
- assert((rsMaskLock & regMask) == 0);
- assert((rsMaskUsed & regMask) == regMask);
+ assert((rsMaskLock & regMask) == 0);
+ assert((rsMaskUsed & regMask) == regMask);
- rsMaskLock |= regMask;
+ rsMaskLock |= regMask;
}
/*****************************************************************************
@@ -345,14 +341,14 @@ void RegSet::rsLockUsedReg(regMaskTP regMask)
*/
// inline
-void RegSet::rsUnlockReg(regMaskTP regMask)
+void RegSet::rsUnlockReg(regMaskTP regMask)
{
/* Must be currently marked as both used and locked */
- assert((rsMaskUsed & regMask) == regMask);
- rsMaskUsed -= regMask;
- assert((rsMaskLock & regMask) == regMask);
- rsMaskLock -= regMask;
+ assert((rsMaskUsed & regMask) == regMask);
+ rsMaskUsed -= regMask;
+ assert((rsMaskLock & regMask) == regMask);
+ rsMaskLock -= regMask;
}
/*****************************************************************************
@@ -361,13 +357,13 @@ void RegSet::rsUnlockReg(regMaskTP regMask)
*/
// inline
-void RegSet::rsUnlockUsedReg(regMaskTP regMask)
+void RegSet::rsUnlockUsedReg(regMaskTP regMask)
{
/* Must be currently marked as both used and locked */
- assert((rsMaskUsed & regMask) == regMask);
- assert((rsMaskLock & regMask) == regMask);
- rsMaskLock -= regMask;
+ assert((rsMaskUsed & regMask) == regMask);
+ assert((rsMaskLock & regMask) == regMask);
+ rsMaskLock -= regMask;
}
/*****************************************************************************
@@ -377,14 +373,14 @@ void RegSet::rsUnlockUsedReg(regMaskTP regMask)
*/
// inline
-void RegSet::rsLockReg(regMaskTP regMask, regMaskTP * usedMask)
+void RegSet::rsLockReg(regMaskTP regMask, regMaskTP* usedMask)
{
/* Is it already marked as used? */
- regMaskTP used = (rsMaskUsed & regMask);
+ regMaskTP used = (rsMaskUsed & regMask);
regMaskTP unused = (regMask & ~used);
- if ( used)
+ if (used)
rsLockUsedReg(used);
if (unused)
@@ -399,7 +395,7 @@ void RegSet::rsLockReg(regMaskTP regMask, regMaskTP * usedMask)
*/
// inline
-void RegSet::rsUnlockReg(regMaskTP regMask, regMaskTP usedMask)
+void RegSet::rsUnlockReg(regMaskTP regMask, regMaskTP usedMask)
{
regMaskTP unused = (regMask & ~usedMask);
@@ -418,7 +414,7 @@ void RegSet::rsUnlockReg(regMaskTP regMask, regMaskTP usedMask)
*/
// inline
-void RegTracker::rsTrackRegClr()
+void RegTracker::rsTrackRegClr()
{
assert(RV_TRASH == 0);
memset(rsRegValues, 0, sizeof(rsRegValues));
@@ -430,7 +426,7 @@ void RegTracker::rsTrackRegClr()
*/
// inline
-void RegTracker::rsTrackRegTrash(regNumber reg)
+void RegTracker::rsTrackRegTrash(regNumber reg)
{
/* Keep track of which registers we ever touch */
@@ -447,27 +443,28 @@ void RegTracker::rsTrackRegTrash(regNumber reg)
*/
// inline
-void RegTracker::rsTrackRegMaskTrash(regMaskTP regMask)
+void RegTracker::rsTrackRegMaskTrash(regMaskTP regMask)
{
- regMaskTP regBit = 1;
+ regMaskTP regBit = 1;
for (regNumber regNum = REG_FIRST; regNum < REG_COUNT; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if (regBit > regMask)
+ if (regBit > regMask)
+ {
break;
+ }
- if (regBit & regMask)
+ if (regBit & regMask)
{
rsTrackRegTrash(regNum);
}
}
}
-
/*****************************************************************************/
// inline
-void RegTracker::rsTrackRegIntCns(regNumber reg, ssize_t val)
+void RegTracker::rsTrackRegIntCns(regNumber reg, ssize_t val)
{
assert(genIsValidIntReg(reg));
@@ -481,16 +478,17 @@ void RegTracker::rsTrackRegIntCns(regNumber reg, ssize_t val)
rsRegValues[reg].rvdIntCnsVal = val;
}
-
/*****************************************************************************/
// inline
-void RegTracker::rsTrackRegLclVarLng(regNumber reg, unsigned var, bool low)
+void RegTracker::rsTrackRegLclVarLng(regNumber reg, unsigned var, bool low)
{
assert(genIsValidIntReg(reg));
if (compiler->lvaTable[var].lvAddrExposed)
+ {
return;
+ }
/* Keep track of which registers we ever touch */
@@ -505,52 +503,57 @@ void RegTracker::rsTrackRegLclVarLng(regNumber reg, unsigned var,
/*****************************************************************************/
// inline
-bool RegTracker::rsTrackIsLclVarLng(regValKind rvKind)
+bool RegTracker::rsTrackIsLclVarLng(regValKind rvKind)
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
- return false;
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
+ return false;
+ }
- if (rvKind == RV_LCL_VAR_LNG_LO ||
- rvKind == RV_LCL_VAR_LNG_HI)
- return true;
+ if (rvKind == RV_LCL_VAR_LNG_LO || rvKind == RV_LCL_VAR_LNG_HI)
+ {
+ return true;
+ }
else
- return false;
+ {
+ return false;
+ }
}
/*****************************************************************************/
// inline
-void RegTracker::rsTrackRegClsVar(regNumber reg, GenTreePtr clsVar)
+void RegTracker::rsTrackRegClsVar(regNumber reg, GenTreePtr clsVar)
{
- rsTrackRegTrash(reg);
+ rsTrackRegTrash(reg);
}
/*****************************************************************************/
// inline
-void RegTracker::rsTrackRegAssign(GenTree *op1, GenTree *op2)
+void RegTracker::rsTrackRegAssign(GenTree* op1, GenTree* op2)
{
/* Constant/bitvalue has precedence over local */
switch (rsRegValues[op2->gtRegNum].rvdKind)
{
- case RV_INT_CNS:
- break;
+ case RV_INT_CNS:
+ break;
- default:
+ default:
- /* Mark RHS register as containing the value */
+ /* Mark RHS register as containing the value */
- switch (op1->gtOper)
- {
- case GT_LCL_VAR:
- rsTrackRegLclVar(op2->gtRegNum, op1->gtLclVarCommon.gtLclNum);
- break;
- case GT_CLS_VAR:
- rsTrackRegClsVar(op2->gtRegNum, op1);
- break;
- default:
- break;
- }
+ switch (op1->gtOper)
+ {
+ case GT_LCL_VAR:
+ rsTrackRegLclVar(op2->gtRegNum, op1->gtLclVarCommon.gtLclNum);
+ break;
+ case GT_CLS_VAR:
+ rsTrackRegClsVar(op2->gtRegNum, op1);
+ break;
+ default:
+ break;
+ }
}
}
@@ -562,9 +565,9 @@ void RegTracker::rsTrackRegAssign(GenTree *op1, GenTree *op2)
* or return REG_PAIR_NONE if no register pair can be formed
*/
-regPairNo RegSet::rsFindRegPairNo (regMaskTP regAllowedMask)
+regPairNo RegSet::rsFindRegPairNo(regMaskTP regAllowedMask)
{
- regPairNo regPair;
+ regPairNo regPair;
// Remove any special purpose registers such as SP, EBP, etc...
regMaskTP specialUseMask = (rsMaskResvd | RBM_SPBASE);
@@ -578,68 +581,152 @@ regPairNo RegSet::rsFindRegPairNo (regMaskTP regAllowedMask)
regAllowedMask &= ~specialUseMask;
/* Check if regAllowedMask has zero or one bits set */
- if ((regAllowedMask & (regAllowedMask-1)) == 0)
+ if ((regAllowedMask & (regAllowedMask - 1)) == 0)
{
/* If so we won't be able to find a reg pair */
return REG_PAIR_NONE;
}
#ifdef _TARGET_X86_
- if (regAllowedMask & RBM_EAX)
+ if (regAllowedMask & RBM_EAX)
{
/* EAX is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_EDX) { regPair = REG_PAIR_EAXEDX; goto RET; }
- if (regAllowedMask & RBM_ECX) { regPair = REG_PAIR_EAXECX; goto RET; }
- if (regAllowedMask & RBM_EBX) { regPair = REG_PAIR_EAXEBX; goto RET; }
- if (regAllowedMask & RBM_ESI) { regPair = REG_PAIR_EAXESI; goto RET; }
- if (regAllowedMask & RBM_EDI) { regPair = REG_PAIR_EAXEDI; goto RET; }
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_EAXEBP; goto RET; }
+ if (regAllowedMask & RBM_EDX)
+ {
+ regPair = REG_PAIR_EAXEDX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_ECX)
+ {
+ regPair = REG_PAIR_EAXECX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBX)
+ {
+ regPair = REG_PAIR_EAXEBX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_ESI)
+ {
+ regPair = REG_PAIR_EAXESI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EDI)
+ {
+ regPair = REG_PAIR_EAXEDI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_EAXEBP;
+ goto RET;
+ }
}
- if (regAllowedMask & RBM_ECX)
+ if (regAllowedMask & RBM_ECX)
{
/* ECX is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_EDX) { regPair = REG_PAIR_ECXEDX; goto RET; }
- if (regAllowedMask & RBM_EBX) { regPair = REG_PAIR_ECXEBX; goto RET; }
- if (regAllowedMask & RBM_ESI) { regPair = REG_PAIR_ECXESI; goto RET; }
- if (regAllowedMask & RBM_EDI) { regPair = REG_PAIR_ECXEDI; goto RET; }
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_ECXEBP; goto RET; }
+ if (regAllowedMask & RBM_EDX)
+ {
+ regPair = REG_PAIR_ECXEDX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBX)
+ {
+ regPair = REG_PAIR_ECXEBX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_ESI)
+ {
+ regPair = REG_PAIR_ECXESI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EDI)
+ {
+ regPair = REG_PAIR_ECXEDI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_ECXEBP;
+ goto RET;
+ }
}
- if (regAllowedMask & RBM_EDX)
+ if (regAllowedMask & RBM_EDX)
{
/* EDX is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_EBX) { regPair = REG_PAIR_EDXEBX; goto RET; }
- if (regAllowedMask & RBM_ESI) { regPair = REG_PAIR_EDXESI; goto RET; }
- if (regAllowedMask & RBM_EDI) { regPair = REG_PAIR_EDXEDI; goto RET; }
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_EDXEBP; goto RET; }
+ if (regAllowedMask & RBM_EBX)
+ {
+ regPair = REG_PAIR_EDXEBX;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_ESI)
+ {
+ regPair = REG_PAIR_EDXESI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EDI)
+ {
+ regPair = REG_PAIR_EDXEDI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_EDXEBP;
+ goto RET;
+ }
}
- if (regAllowedMask & RBM_EBX)
+ if (regAllowedMask & RBM_EBX)
{
/* EBX is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_ESI) { regPair = REG_PAIR_EBXESI; goto RET; }
- if (regAllowedMask & RBM_EDI) { regPair = REG_PAIR_EBXEDI; goto RET; }
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_EBXEBP; goto RET; }
+ if (regAllowedMask & RBM_ESI)
+ {
+ regPair = REG_PAIR_EBXESI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EDI)
+ {
+ regPair = REG_PAIR_EBXEDI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_EBXEBP;
+ goto RET;
+ }
}
- if (regAllowedMask & RBM_ESI)
+ if (regAllowedMask & RBM_ESI)
{
/* ESI is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_EDI) { regPair = REG_PAIR_ESIEDI; goto RET; }
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_EBPESI; goto RET; }
+ if (regAllowedMask & RBM_EDI)
+ {
+ regPair = REG_PAIR_ESIEDI;
+ goto RET;
+ }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_EBPESI;
+ goto RET;
+ }
}
- if (regAllowedMask & RBM_EDI)
+ if (regAllowedMask & RBM_EDI)
{
/* EDI is available, see if we can pair it with another reg */
- if (regAllowedMask & RBM_EBP) { regPair = REG_PAIR_EBPEDI; goto RET; }
+ if (regAllowedMask & RBM_EBP)
+ {
+ regPair = REG_PAIR_EBPEDI;
+ goto RET;
+ }
}
#endif
@@ -648,9 +735,7 @@ regPairNo RegSet::rsFindRegPairNo (regMaskTP regAllowedMask)
//
// Iterate the registers in the order specified by rpRegTmpOrder/raRegTmpOrder
- for (unsigned index1 = 0;
- index1 < REG_TMP_ORDER_COUNT;
- index1++)
+ for (unsigned index1 = 0; index1 < REG_TMP_ORDER_COUNT; index1++)
{
regNumber reg1;
if (m_rsCompiler->rpRegAllocDone)
@@ -663,9 +748,7 @@ regPairNo RegSet::rsFindRegPairNo (regMaskTP regAllowedMask)
if ((regAllowedMask & reg1Mask) == 0)
continue;
- for (unsigned index2 = index1+1;
- index2 < REG_TMP_ORDER_COUNT;
- index2++)
+ for (unsigned index2 = index1 + 1; index2 < REG_TMP_ORDER_COUNT; index2++)
{
regNumber reg2;
if (m_rsCompiler->rpRegAllocDone)
@@ -680,37 +763,35 @@ regPairNo RegSet::rsFindRegPairNo (regMaskTP regAllowedMask)
regMaskTP pairMask = genRegMask(reg1) | genRegMask(reg2);
- // if reg1 is larger than reg2 then swap the registers
+ // if reg1 is larger than reg2 then swap the registers
if (reg1 > reg2)
{
regNumber regT = reg1;
- reg1 = reg2;
- reg2 = regT;
+ reg1 = reg2;
+ reg2 = regT;
}
regPair = gen2regs2pair(reg1, reg2);
- return regPair;
+ return regPair;
}
}
#endif
assert(!"Unreachable code");
regPair = REG_PAIR_NONE;
-
+
#ifdef _TARGET_X86_
RET:
#endif
- return regPair;
+ return regPair;
}
#endif // LEGACY_BACKEND
/*****************************************************************************/
-RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) :
- m_rsCompiler(compiler),
- m_rsGCInfo(gcInfo)
+RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_rsGCInfo(gcInfo)
{
/* Initialize the spill logic */
@@ -722,16 +803,16 @@ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) :
// intRegState.rsCurRegArgNum = 0;
// loatRegState.rsCurRegArgNum = 0;
- rsMaskResvd = RBM_NONE;
+ rsMaskResvd = RBM_NONE;
#ifdef LEGACY_BACKEND
- rsMaskMult = RBM_NONE;
- rsMaskUsed = RBM_NONE;
- rsMaskLock = RBM_NONE;
+ rsMaskMult = RBM_NONE;
+ rsMaskUsed = RBM_NONE;
+ rsMaskLock = RBM_NONE;
#endif // LEGACY_BACKEND
#ifdef _TARGET_ARMARCH_
- rsMaskCalleeSaved = RBM_NONE;
+ rsMaskCalleeSaved = RBM_NONE;
#endif // _TARGET_ARMARCH_
#ifdef _TARGET_ARM_
@@ -752,27 +833,27 @@ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) :
* be marked if the register is ever spilled.
*/
-void RegSet::rsMarkRegUsed(GenTreePtr tree, GenTreePtr addr)
+void RegSet::rsMarkRegUsed(GenTreePtr tree, GenTreePtr addr)
{
- var_types type;
- regNumber regNum;
- regMaskTP regMask;
+ var_types type;
+ regNumber regNum;
+ regMaskTP regMask;
/* The value must be sitting in a register */
assert(tree);
assert(tree->gtFlags & GTF_REG_VAL);
- type = tree->TypeGet();
- regNum = tree->gtRegNum;
+ type = tree->TypeGet();
+ regNum = tree->gtRegNum;
if (isFloatRegType(type))
regMask = genRegMaskFloat(regNum, type);
else
regMask = genRegMask(regNum);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tThe register %s currently holds ", m_rsCompiler->compRegVarName(regNum));
Compiler::printTreeID(tree);
@@ -802,7 +883,7 @@ void RegSet::rsMarkRegUsed(GenTreePtr tree, GenTreePtr addr)
/* Is the register used by two different values simultaneously? */
- if (regMask & rsMaskUsed)
+ if (regMask & rsMaskUsed)
{
/* Save the preceding use information */
@@ -815,13 +896,15 @@ void RegSet::rsMarkRegUsed(GenTreePtr tree, GenTreePtr addr)
/* Remember what values are in what registers, in case we have to spill */
assert(regNum != REG_SPBASE);
- assert(rsUsedTree[regNum] == NULL); rsUsedTree[regNum] = tree;
- assert(rsUsedAddr[regNum] == NULL); rsUsedAddr[regNum] = addr;
+ assert(rsUsedTree[regNum] == NULL);
+ rsUsedTree[regNum] = tree;
+ assert(rsUsedAddr[regNum] == NULL);
+ rsUsedAddr[regNum] = addr;
}
-void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promotedStructArg, regNumber regNum, bool isGCRef)
+void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promotedStructArg, regNumber regNum, bool isGCRef)
{
- regMaskTP regMask;
+ regMaskTP regMask;
/* The value must be sitting in a register */
@@ -832,8 +915,8 @@ void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promot
regMask = genRegMask(regNum);
assert((regMask & RBM_ARG_REGS) != RBM_NONE);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tThe register %s currently holds ", m_rsCompiler->compRegVarName(regNum));
Compiler::printTreeID(promotedStructArg);
@@ -858,11 +941,11 @@ void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promot
/* Is the register used by two different values simultaneously? */
- if (regMask & rsMaskUsed)
+ if (regMask & rsMaskUsed)
{
/* Save the preceding use information */
- assert(isValidIntArgReg(regNum)); // We are expecting only integer argument registers here
+ assert(isValidIntArgReg(regNum)); // We are expecting only integer argument registers here
rsRecMultiReg(regNum, TYP_I_IMPL);
}
@@ -872,7 +955,8 @@ void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promot
/* Remember what values are in what registers, in case we have to spill */
assert(regNum != REG_SPBASE);
- assert(rsUsedTree[regNum] == 0); rsUsedTree[regNum] = promotedStructArg;
+ assert(rsUsedTree[regNum] == 0);
+ rsUsedTree[regNum] = promotedStructArg;
}
/*****************************************************************************
@@ -880,12 +964,12 @@ void RegSet::rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promot
* Marks the register pair that holds the given operand value as 'used'.
*/
-void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
+void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
{
- regNumber regLo;
- regNumber regHi;
- regPairNo regPair;
- regMaskTP regMask;
+ regNumber regLo;
+ regNumber regHi;
+ regPairNo regPair;
+ regMaskTP regMask;
/* The value must be sitting in a register */
@@ -900,18 +984,16 @@ void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
regPair = tree->gtRegPair;
regMask = genRegPairMask(regPair);
- regLo = genRegPairLo(regPair);
- regHi = genRegPairHi(regPair);
+ regLo = genRegPairLo(regPair);
+ regHi = genRegPairHi(regPair);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
- printf("\t\t\t\t\t\t\tThe register %s currently holds \n",
- m_rsCompiler->compRegVarName(regLo));
+ printf("\t\t\t\t\t\t\tThe register %s currently holds \n", m_rsCompiler->compRegVarName(regLo));
Compiler::printTreeID(tree);
printf("/lo32\n");
- printf("\t\t\t\t\t\t\tThe register %s currently holds \n",
- m_rsCompiler->compRegVarName(regHi));
+ printf("\t\t\t\t\t\t\tThe register %s currently holds \n", m_rsCompiler->compRegVarName(regHi));
Compiler::printTreeID(tree);
printf("/hi32\n");
}
@@ -927,14 +1009,14 @@ void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
/* Are the registers used by two different values simultaneously? */
- if (rsMaskUsed & genRegMask(regLo))
+ if (rsMaskUsed & genRegMask(regLo))
{
/* Save the preceding use information */
rsRecMultiReg(regLo, TYP_INT);
}
- if (rsMaskUsed & genRegMask(regHi))
+ if (rsMaskUsed & genRegMask(regHi))
{
/* Save the preceding use information */
@@ -951,14 +1033,14 @@ void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
/* Remember what values are in what registers, in case we have to spill */
- if (regLo != REG_STK)
+ if (regLo != REG_STK)
{
assert(rsUsedTree[regLo] == 0);
assert(regLo != REG_SPBASE);
rsUsedTree[regLo] = tree;
}
- if (regHi != REG_STK)
+ if (regHi != REG_STK)
{
assert(rsUsedTree[regHi] == 0);
assert(regHi != REG_SPBASE);
@@ -973,7 +1055,7 @@ void RegSet::rsMarkRegPairUsed(GenTreePtr tree)
* to search rsMultiDesc[reg].
*/
-bool RegSet::rsIsTreeInReg(regNumber reg, GenTreePtr tree)
+bool RegSet::rsIsTreeInReg(regNumber reg, GenTreePtr tree)
{
/* First do the trivial check */
@@ -985,7 +1067,7 @@ bool RegSet::rsIsTreeInReg(regNumber reg, GenTreePtr tree)
if (genRegMask(reg) & rsMaskMult)
{
- SpillDsc * multiDesc = rsMultiDesc[reg];
+ SpillDsc* multiDesc = rsMultiDesc[reg];
assert(multiDesc);
for (/**/; multiDesc; multiDesc = multiDesc->spillNext)
@@ -1008,13 +1090,14 @@ bool RegSet::rsIsTreeInReg(regNumber reg, GenTreePtr tree)
* Finds the SpillDsc corresponding to 'tree' assuming it was spilled from 'reg'.
*/
-RegSet::SpillDsc * RegSet::rsGetSpillInfo(GenTreePtr tree,
- regNumber reg,
- SpillDsc** pPrevDsc
+RegSet::SpillDsc* RegSet::rsGetSpillInfo(GenTreePtr tree,
+ regNumber reg,
+ SpillDsc** pPrevDsc
#ifdef LEGACY_BACKEND
- , SpillDsc** pMultiDsc
+ ,
+ SpillDsc** pMultiDsc
#endif // LEGACY_BACKEND
- )
+ )
{
/* Normally, trees are unspilled in the order of being spilled due to
the post-order walking of trees during code-gen. However, this will
@@ -1027,9 +1110,7 @@ RegSet::SpillDsc * RegSet::rsGetSpillInfo(GenTreePtr tree,
SpillDsc* prev;
SpillDsc* dsc;
- for (prev = nullptr, dsc = rsSpillDesc[reg];
- dsc != nullptr;
- prev = dsc , dsc = dsc->spillNext)
+ for (prev = nullptr, dsc = rsSpillDesc[reg]; dsc != nullptr; prev = dsc, dsc = dsc->spillNext)
{
#ifdef LEGACY_BACKEND
if (prev && !prev->spillMoreMultis)
@@ -1037,12 +1118,18 @@ RegSet::SpillDsc * RegSet::rsGetSpillInfo(GenTreePtr tree,
#endif // LEGACY_BACKEND
if (dsc->spillTree == tree)
+ {
break;
+ }
}
- if (pPrevDsc) *pPrevDsc = prev;
+ if (pPrevDsc)
+ {
+ *pPrevDsc = prev;
+ }
#ifdef LEGACY_BACKEND
- if (pMultiDsc) *pMultiDsc = multi;
+ if (pMultiDsc)
+ *pMultiDsc = multi;
#endif // LEGACY_BACKEND
return dsc;
@@ -1054,11 +1141,11 @@ RegSet::SpillDsc * RegSet::rsGetSpillInfo(GenTreePtr tree,
* Mark the register set given by the register mask as not used.
*/
-void RegSet::rsMarkRegFree(regMaskTP regMask)
+void RegSet::rsMarkRegFree(regMaskTP regMask)
{
/* Are we freeing any multi-use registers? */
- if (regMask & rsMaskMult)
+ if (regMask & rsMaskMult)
{
rsMultRegFree(regMask);
return;
@@ -1066,20 +1153,19 @@ void RegSet::rsMarkRegFree(regMaskTP regMask)
m_rsGCInfo.gcMarkRegSetNpt(regMask);
- regMaskTP regBit = 1;
+ regMaskTP regBit = 1;
for (regNumber regNum = REG_FIRST; regNum < REG_COUNT; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if (regBit > regMask)
+ if (regBit > regMask)
break;
- if (regBit & regMask)
+ if (regBit & regMask)
{
#ifdef DEBUG
- if (m_rsCompiler->verbose)
+ if (m_rsCompiler->verbose)
{
- printf("\t\t\t\t\t\t\tThe register %s no longer holds ",
- m_rsCompiler->compRegVarName(regNum));
+ printf("\t\t\t\t\t\t\tThe register %s no longer holds ", m_rsCompiler->compRegVarName(regNum));
Compiler::printTreeID(rsUsedTree[regNum]);
Compiler::printTreeID(rsUsedAddr[regNum]);
printf("\n");
@@ -1102,10 +1188,10 @@ void RegSet::rsMarkRegFree(regMaskTP regMask)
#endif
}
}
-
+
/* Remove the register set from the 'used' set */
- assert((regMask & rsMaskUsed) == regMask);
+ assert((regMask & rsMaskUsed) == regMask);
rsMaskUsed -= regMask;
/* No locked register may ever be marked as free */
@@ -1119,7 +1205,7 @@ void RegSet::rsMarkRegFree(regMaskTP regMask)
* it will still be marked as used, else it will be completely free.
*/
-void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
+void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
{
assert(rsIsTreeInReg(reg, tree));
regMaskTP regMask = genRegMask(reg);
@@ -1135,7 +1221,7 @@ void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
/* The tree is multi-used. We just have to free it off the given tree but
leave other trees which use the register as they are. The register may
not be multi-used after freeing it from the given tree */
-
+
/* Is the tree in rsUsedTree[] or in rsMultiDesc[]?
If it is in rsUsedTree[], update rsUsedTree[] */
@@ -1147,9 +1233,8 @@ void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
/* The tree is in rsMultiDesc[] instead of in rsUsedTree[]. Find the desc
corresponding to the tree and just remove it from there */
-
- for (SpillDsc * multiDesc = rsMultiDesc[reg], *prevDesc = NULL;
- multiDesc;
+
+ for (SpillDsc *multiDesc = rsMultiDesc[reg], *prevDesc = NULL; multiDesc;
prevDesc = multiDesc, multiDesc = multiDesc->spillNext)
{
/* If we find the descriptor with the tree we are looking for,
@@ -1182,8 +1267,8 @@ void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
SpillDsc::freeDsc(this, multiDesc);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tRegister %s multi-use dec for ", m_rsCompiler->compRegVarName(reg));
Compiler::printTreeID(tree);
@@ -1199,32 +1284,29 @@ void RegSet::rsMarkRegFree(regNumber reg, GenTreePtr tree)
assert(!"Didn't find the spilled tree in rsMultiDesc[]");
}
-
/*****************************************************************************
*
* Mark the register set given by the register mask as not used; there may
* be some 'multiple-use' registers in the set.
*/
-void RegSet::rsMultRegFree(regMaskTP regMask)
+void RegSet::rsMultRegFree(regMaskTP regMask)
{
/* Free any multiple-use registers first */
regMaskTP nonMultMask = regMask & ~rsMaskMult;
regMaskTP myMultMask = regMask & rsMaskMult;
- if (myMultMask)
+ if (myMultMask)
{
- regNumber regNum;
- regMaskTP regBit;
+ regNumber regNum;
+ regMaskTP regBit;
- for (regNum = REG_FIRST , regBit = 1;
- regNum < REG_COUNT;
- regNum = REG_NEXT(regNum), regBit <<= 1)
+ for (regNum = REG_FIRST, regBit = 1; regNum < REG_COUNT; regNum = REG_NEXT(regNum), regBit <<= 1)
{
if (regBit > myMultMask)
break;
- if (regBit & myMultMask)
+ if (regBit & myMultMask)
{
/* Free the multi-use register 'regNum' */
var_types type = rsRmvMultiReg(regNum);
@@ -1242,7 +1324,7 @@ void RegSet::rsMultRegFree(regMaskTP regMask)
/* If there are any single-use registers, free them */
- if (nonMultMask)
+ if (nonMultMask)
rsMarkRegFree(nonMultMask);
}
@@ -1251,10 +1333,10 @@ void RegSet::rsMultRegFree(regMaskTP regMask)
* Returns the number of registers that are currently free which appear in needReg.
*/
-unsigned RegSet::rsFreeNeededRegCount(regMaskTP needReg)
+unsigned RegSet::rsFreeNeededRegCount(regMaskTP needReg)
{
- regMaskTP regNeededFree = rsRegMaskFree() & needReg;
- unsigned cntFree = 0;
+ regMaskTP regNeededFree = rsRegMaskFree() & needReg;
+ unsigned cntFree = 0;
/* While some registers are free ... */
@@ -1278,9 +1360,9 @@ unsigned RegSet::rsFreeNeededRegCount(regMaskTP needReg)
* variable.
*/
-void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
+void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
{
- LclVarDsc * varDsc = &compiler->lvaTable[var];
+ LclVarDsc* varDsc = &compiler->lvaTable[var];
assert(reg != REG_STK);
#if CPU_HAS_FP_SUPPORT
assert(varTypeIsFloating(varDsc->TypeGet()) == false);
@@ -1290,7 +1372,9 @@ void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
rsRegValues[reg].rvdKind = RV_TRASH;
if (compiler->lvaTable[var].lvAddrExposed)
+ {
return;
+ }
/* Keep track of which registers we ever touch */
@@ -1305,7 +1389,9 @@ void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
/* Don't track pointer register vars */
if (varDsc->lvRegister)
+ {
return;
+ }
/* Don't track when fully interruptible */
@@ -1321,16 +1407,18 @@ void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
#endif
-#ifdef DEBUG
- if (compiler->verbose)
+#ifdef DEBUG
+ if (compiler->verbose)
+ {
printf("\t\t\t\t\t\t\tThe register %s now holds V%02u\n", compiler->compRegVarName(reg), var);
+ }
#endif
/* Record the new value for the register. ptr var needed for
* lifetime extension
*/
- rsRegValues[reg].rvdKind = RV_LCL_VAR;
+ rsRegValues[reg].rvdKind = RV_LCL_VAR;
// If this is a cast of a 64 bit int, then we must have the low 32 bits.
if (genActualType(varDsc->TypeGet()) == TYP_LONG)
@@ -1343,16 +1431,16 @@ void RegTracker::rsTrackRegLclVar(regNumber reg, unsigned var)
/*****************************************************************************/
-void RegTracker::rsTrackRegSwap(regNumber reg1, regNumber reg2)
+void RegTracker::rsTrackRegSwap(regNumber reg1, regNumber reg2)
{
- RegValDsc tmp;
+ RegValDsc tmp;
- tmp = rsRegValues[reg1];
- rsRegValues[reg1] = rsRegValues[reg2];
- rsRegValues[reg2] = tmp;
+ tmp = rsRegValues[reg1];
+ rsRegValues[reg1] = rsRegValues[reg2];
+ rsRegValues[reg2] = tmp;
}
-void RegTracker::rsTrackRegCopy(regNumber reg1, regNumber reg2)
+void RegTracker::rsTrackRegCopy(regNumber reg1, regNumber reg2)
{
/* Keep track of which registers we ever touch */
@@ -1364,19 +1452,18 @@ void RegTracker::rsTrackRegCopy(regNumber reg1, regNumber reg2)
rsRegValues[reg1] = rsRegValues[reg2];
}
-
#ifdef LEGACY_BACKEND
/*****************************************************************************
* One of the operands of this complex address mode has been spilled
*/
-void rsAddrSpillOper(GenTreePtr addr)
+void rsAddrSpillOper(GenTreePtr addr)
{
- if (addr)
+ if (addr)
{
- assert (addr->gtOper == GT_IND || addr->gtOper == GT_ARR_ELEM || addr->gtOper == GT_LEA
- || addr->gtOper == GT_CMPXCHG);
+ assert(addr->gtOper == GT_IND || addr->gtOper == GT_ARR_ELEM || addr->gtOper == GT_LEA ||
+ addr->gtOper == GT_CMPXCHG);
// GTF_SPILLED_OP2 says "both operands have been spilled"
assert((addr->gtFlags & GTF_SPILLED_OP2) == 0);
@@ -1388,24 +1475,24 @@ void rsAddrSpillOper(GenTreePtr addr)
}
}
-void rsAddrUnspillOper(GenTreePtr addr)
+void rsAddrUnspillOper(GenTreePtr addr)
{
if (addr)
{
- assert (addr->gtOper == GT_IND || addr->gtOper == GT_ARR_ELEM || addr->gtOper == GT_LEA
- || addr->gtOper == GT_CMPXCHG);
+ assert(addr->gtOper == GT_IND || addr->gtOper == GT_ARR_ELEM || addr->gtOper == GT_LEA ||
+ addr->gtOper == GT_CMPXCHG);
- assert((addr->gtFlags & GTF_SPILLED_OPER) != 0);
+ assert((addr->gtFlags & GTF_SPILLED_OPER) != 0);
// Both operands spilled? */
- if ((addr->gtFlags & GTF_SPILLED_OP2 ) != 0)
- addr->gtFlags &= ~GTF_SPILLED_OP2 ;
+ if ((addr->gtFlags & GTF_SPILLED_OP2) != 0)
+ addr->gtFlags &= ~GTF_SPILLED_OP2;
else
- addr->gtFlags &= ~GTF_SPILLED_OPER;
+ addr->gtFlags &= ~GTF_SPILLED_OPER;
}
}
-void RegSet::rsSpillRegIfUsed(regNumber reg)
+void RegSet::rsSpillRegIfUsed(regNumber reg)
{
if (rsMaskUsed & genRegMask(reg))
{
@@ -1415,14 +1502,13 @@ void RegSet::rsSpillRegIfUsed(regNumber reg)
#endif // LEGACY_BACKEND
-
//------------------------------------------------------------
// rsSpillTree: Spill the tree held in 'reg'.
//
// Arguments:
// reg - Register of tree node that is to be spilled
// tree - GenTree node that is being spilled
-// regIdx - Register index identifying the specific result
+// regIdx - Register index identifying the specific result
// register of a multi-reg call node. For single-reg
// producing tree nodes its value is zero.
//
@@ -1435,21 +1521,19 @@ void RegSet::rsSpillRegIfUsed(regNumber reg)
// caller of this method is expected to clear GTF_SPILL flag on call
// node after all of its registers marked for spilling are spilled.
//
-void RegSet::rsSpillTree(regNumber reg,
- GenTreePtr tree,
- unsigned regIdx /* =0 */)
-{
+void RegSet::rsSpillTree(regNumber reg, GenTreePtr tree, unsigned regIdx /* =0 */)
+{
assert(tree != nullptr);
-
+
GenTreeCall* call = nullptr;
var_types treeType;
#ifndef LEGACY_BACKEND
if (tree->IsMultiRegCall())
{
- call = tree->AsCall();
+ call = tree->AsCall();
ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
- treeType = retTypeDesc->GetReturnRegType(regIdx);
+ treeType = retTypeDesc->GetReturnRegType(regIdx);
}
else
#endif
@@ -1457,14 +1541,14 @@ void RegSet::rsSpillTree(regNumber reg,
treeType = tree->TypeGet();
}
- var_types tempType = Compiler::tmpNormalizeType(treeType);
- regMaskTP mask;
- bool floatSpill = false;
+ var_types tempType = Compiler::tmpNormalizeType(treeType);
+ regMaskTP mask;
+ bool floatSpill = false;
if (isFloatRegType(treeType))
{
floatSpill = true;
- mask = genRegMaskFloat(reg, treeType);
+ mask = genRegMaskFloat(reg, treeType);
}
else
{
@@ -1475,7 +1559,7 @@ void RegSet::rsSpillTree(regNumber reg,
#ifdef LEGACY_BACKEND
// The register we're spilling must be used but not locked
- // or an enregistered variable.
+ // or an enregistered variable.
assert((mask & rsMaskUsed) == mask);
assert((mask & rsMaskLock) == 0);
@@ -1504,16 +1588,15 @@ void RegSet::rsSpillTree(regNumber reg,
{
assert(!varTypeIsMultiReg(tree));
tree->gtFlags &= ~GTF_SPILL;
- }
+ }
#endif // !LEGACY_BACKEND
#if CPU_LONG_USES_REGPAIR
- // Are we spilling a part of a register pair?
- if (treeType == TYP_LONG)
+ // Are we spilling a part of a register pair?
+ if (treeType == TYP_LONG)
{
tempType = TYP_I_IMPL;
- assert(genRegPairLo(tree->gtRegPair) == reg ||
- genRegPairHi(tree->gtRegPair) == reg);
+ assert(genRegPairLo(tree->gtRegPair) == reg || genRegPairHi(tree->gtRegPair) == reg);
}
else
{
@@ -1529,9 +1612,9 @@ void RegSet::rsSpillTree(regNumber reg,
SpillDsc* spill = SpillDsc::alloc(m_rsCompiler, this, tempType);
// Grab a temp to store the spilled value
- TempDsc* temp = m_rsCompiler->tmpGetTemp(tempType);
+ TempDsc* temp = m_rsCompiler->tmpGetTemp(tempType);
spill->spillTemp = temp;
- tempType = temp->tdTempType();
+ tempType = temp->tdTempType();
// Remember what it is we have spilled
spill->spillTree = tree;
@@ -1539,11 +1622,10 @@ void RegSet::rsSpillTree(regNumber reg,
spill->spillAddr = rsUsedAddr[reg];
#endif // LEGACY_BACKEND
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
- printf("\t\t\t\t\t\t\tThe register %s spilled with ",
- m_rsCompiler->compRegVarName(reg));
+ printf("\t\t\t\t\t\t\tThe register %s spilled with ", m_rsCompiler->compRegVarName(reg));
Compiler::printTreeID(spill->spillTree);
#ifdef LEGACY_BACKEND
printf("/");
@@ -1559,10 +1641,10 @@ void RegSet::rsSpillTree(regNumber reg,
// 'lastDsc' is 'spill' for simple cases, and will point to the last
// multi-use descriptor if 'reg' is being multi-used
- SpillDsc* lastDsc = spill;
+ SpillDsc* lastDsc = spill;
#ifdef LEGACY_BACKEND
- if ((rsMaskMult & mask) == 0)
+ if ((rsMaskMult & mask) == 0)
{
spill->spillMoreMultis = false;
}
@@ -1585,14 +1667,14 @@ void RegSet::rsSpillTree(regNumber reg,
// Is this multi-use part of a complex address mode?
rsAddrSpillOper(nextDsc->spillAddr);
- // Mark the tree node as having been spilled
+ // Mark the tree node as having been spilled
rsMarkSpill(nextDsc->spillTree, reg);
// lastDsc points to the last of the multi-spill descrs for 'reg'
nextDsc->spillTemp = temp;
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf(", ");
Compiler::printTreeID(nextDsc->spillTree);
@@ -1602,30 +1684,32 @@ void RegSet::rsSpillTree(regNumber reg,
#endif
lastDsc->spillNext = nextDsc;
- lastDsc = nextDsc;
+ lastDsc = nextDsc;
nextDsc = nextDsc->spillNext;
- }
- while (lastDsc->spillMoreMultis);
+ } while (lastDsc->spillMoreMultis);
rsMultiDesc[reg] = nextDsc;
// 'reg' is no longer considered to be multi-used. We will set this
- // mask again when this value gets unspilled
+ // mask again when this value gets unspilled
rsMaskMult &= ~mask;
}
#endif // LEGACY_BACKEND
// Insert the spill descriptor(s) in the list
lastDsc->spillNext = rsSpillDesc[reg];
- rsSpillDesc[reg] = spill;
+ rsSpillDesc[reg] = spill;
-#ifdef DEBUG
- if (m_rsCompiler->verbose) printf("\n");
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
+ {
+ printf("\n");
+ }
#endif
// Generate the code to spill the register
- var_types storeType = floatSpill ? treeType : tempType;
+ var_types storeType = floatSpill ? treeType : tempType;
m_rsCompiler->codeGen->spillReg(storeType, temp, reg);
@@ -1643,7 +1727,7 @@ void RegSet::rsSpillTree(regNumber reg,
regFlags |= GTF_SPILLED;
call->SetRegSpillFlagByIdx(regFlags, regIdx);
}
-#endif //!LEGACY_BACKEND
+#endif //! LEGACY_BACKEND
}
#if defined(_TARGET_X86_) && !FEATURE_STACK_FP_X87
@@ -1651,11 +1735,11 @@ void RegSet::rsSpillTree(regNumber reg,
*
* Spill the top of the FP x87 stack.
*/
-void RegSet::rsSpillFPStack(GenTreePtr tree)
+void RegSet::rsSpillFPStack(GenTreePtr tree)
{
- SpillDsc * spill;
- TempDsc * temp;
- var_types treeType = tree->TypeGet();
+ SpillDsc* spill;
+ TempDsc* temp;
+ var_types treeType = tree->TypeGet();
assert(tree->OperGet() == GT_CALL);
spill = SpillDsc::alloc(m_rsCompiler, this, treeType);
@@ -1666,21 +1750,19 @@ void RegSet::rsSpillFPStack(GenTreePtr tree)
/* Remember what it is we have spilled */
- spill->spillTree = tree;
- SpillDsc * lastDsc = spill;
+ spill->spillTree = tree;
+ SpillDsc* lastDsc = spill;
- regNumber reg = tree->gtRegNum;
+ regNumber reg = tree->gtRegNum;
lastDsc->spillNext = rsSpillDesc[reg];
- rsSpillDesc[reg] = spill;
+ rsSpillDesc[reg] = spill;
-#ifdef DEBUG
- if (m_rsCompiler->verbose) printf("\n");
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
+ printf("\n");
#endif
// m_rsCompiler->codeGen->inst_FS_ST(INS_fstp, emitActualTypeSize(treeType), temp, 0);
- m_rsCompiler->codeGen->getEmitter()->emitIns_S(INS_fstp,
- emitActualTypeSize(treeType),
- temp->tdTempNum(),
- 0);
+ m_rsCompiler->codeGen->getEmitter()->emitIns_S(INS_fstp, emitActualTypeSize(treeType), temp->tdTempNum(), 0);
/* Mark the tree node as having been spilled */
@@ -1688,7 +1770,6 @@ void RegSet::rsSpillFPStack(GenTreePtr tree)
}
#endif // defined(_TARGET_X86_) && !FEATURE_STACK_FP_X87
-
#ifdef LEGACY_BACKEND
/*****************************************************************************
@@ -1696,10 +1777,10 @@ void RegSet::rsSpillFPStack(GenTreePtr tree)
* Spill the given register (which we assume to be currently marked as used).
*/
-void RegSet::rsSpillReg(regNumber reg)
+void RegSet::rsSpillReg(regNumber reg)
{
/* We must know the value in the register that we are spilling */
- GenTreePtr tree = rsUsedTree[reg];
+ GenTreePtr tree = rsUsedTree[reg];
#ifdef _TARGET_ARM_
if (tree == NULL && genIsValidFloatReg(reg) && !genIsValidDoubleReg(reg))
@@ -1723,7 +1804,7 @@ void RegSet::rsSpillReg(regNumber reg)
* Spill all registers in 'regMask' that are currently marked as used.
*/
-void RegSet::rsSpillRegs(regMaskTP regMask)
+void RegSet::rsSpillRegs(regMaskTP regMask)
{
/* The registers we're spilling must not be locked,
or enregistered variables */
@@ -1733,20 +1814,21 @@ void RegSet::rsSpillRegs(regMaskTP regMask)
/* Only spill what's currently marked as used */
- regMask &= rsMaskUsed; assert(regMask);
+ regMask &= rsMaskUsed;
+ assert(regMask);
- regNumber regNum;
- regMaskTP regBit;
+ regNumber regNum;
+ regMaskTP regBit;
for (regNum = REG_FIRST, regBit = 1; regNum < REG_COUNT; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if (regMask & regBit)
+ if (regMask & regBit)
{
rsSpillReg(regNum);
regMask &= rsMaskUsed;
- if (!regMask)
+ if (!regMask)
break;
}
}
@@ -1758,10 +1840,10 @@ void RegSet::rsSpillRegs(regMaskTP regMask)
* for internal tree temps to live in
*/
-extern const regNumber raRegTmpOrder[] = { REG_TMP_ORDER };
-extern const regNumber rpRegTmpOrder[] = { REG_PREDICT_ORDER };
+extern const regNumber raRegTmpOrder[] = {REG_TMP_ORDER};
+extern const regNumber rpRegTmpOrder[] = {REG_PREDICT_ORDER};
#if FEATURE_FP_REGALLOC
-extern const regNumber raRegFltTmpOrder[] = { REG_FLT_TMP_ORDER };
+extern const regNumber raRegFltTmpOrder[] = {REG_FLT_TMP_ORDER};
#endif
/*****************************************************************************
@@ -1770,24 +1852,23 @@ extern const regNumber raRegFltTmpOrder[] = { REG_FLT_TMP_ORDER };
* if no registers are in the set return REG_STK.
*/
-regNumber RegSet::rsPickRegInTmpOrder(regMaskTP regMask)
+regNumber RegSet::rsPickRegInTmpOrder(regMaskTP regMask)
{
if (regMask == RBM_NONE)
return REG_STK;
- bool firstPass = true;
- regMaskTP avoidMask = ~rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; // We want to avoid using any new callee saved register
+ bool firstPass = true;
+ regMaskTP avoidMask =
+ ~rsGetModifiedRegsMask() & RBM_CALLEE_SAVED; // We want to avoid using any new callee saved register
while (true)
{
/* Iterate the registers in the order specified by raRegTmpOrder */
- for (unsigned index = 0;
- index < REG_TMP_ORDER_COUNT;
- index++)
+ for (unsigned index = 0; index < REG_TMP_ORDER_COUNT; index++)
{
- regNumber candidateReg = raRegTmpOrder[index];
- regMaskTP candidateMask = genRegMask(candidateReg);
+ regNumber candidateReg = raRegTmpOrder[index];
+ regMaskTP candidateMask = genRegMask(candidateReg);
// For a FP base frame, don't use FP register.
if (m_rsCompiler->codeGen->isFramePointerUsed() && (candidateMask == RBM_FPBASE))
@@ -1802,7 +1883,7 @@ regNumber RegSet::rsPickRegInTmpOrder(regMaskTP regMask)
}
if (firstPass == true)
- firstPass = false; // OK, now we are willing to select a never used register
+ firstPass = false; // OK, now we are willing to select a never used register
else
break;
}
@@ -1820,11 +1901,11 @@ regNumber RegSet::rsPickRegInTmpOrder(regMaskTP regMask)
* rsModifiedRegsMask is modified to include the returned register.
*/
-regNumber RegSet::rsGrabReg(regMaskTP regMask)
+regNumber RegSet::rsGrabReg(regMaskTP regMask)
{
- regMaskTP OKmask;
- regNumber regNum;
- regMaskTP regBit;
+ regMaskTP OKmask;
+ regNumber regNum;
+ regMaskTP regBit;
assert(regMask);
regMask &= ~rsMaskLock;
@@ -1835,7 +1916,8 @@ regNumber RegSet::rsGrabReg(regMaskTP regMask)
OKmask = regMask & rsRegMaskFree();
regNum = rsPickRegInTmpOrder(OKmask);
- if (REG_STK != regNum) {
+ if (REG_STK != regNum)
+ {
goto RET;
}
@@ -1844,11 +1926,10 @@ regNumber RegSet::rsGrabReg(regMaskTP regMask)
OKmask = regMask & rsRegMaskCanGrab();
assert(OKmask);
- for (regNum = REG_FIRST, regBit = 1;
- (regBit & OKmask) == 0;
- regNum = REG_NEXT(regNum), regBit <<= 1)
+ for (regNum = REG_FIRST, regBit = 1; (regBit & OKmask) == 0; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if (regNum >= REG_COUNT) {
+ if (regNum >= REG_COUNT)
+ {
assert(!"no register to grab!");
NO_WAY("Could not grab a register, Predictor should have prevented this!");
}
@@ -1863,10 +1944,9 @@ regNumber RegSet::rsGrabReg(regMaskTP regMask)
RET:
/* Keep track of which registers we ever touch */
rsSetRegsModified(genRegMask(regNum));
- return regNum;
+ return regNum;
}
-
/*****************************************************************************
* Find a register to use and return it, spilling if necessary.
*
@@ -1889,50 +1969,49 @@ RET:
* rsModifiedRegsMask is modified to include the returned register.
*/
-regNumber RegSet::rsPickReg(regMaskTP regMask,
- regMaskTP regBest)
+regNumber RegSet::rsPickReg(regMaskTP regMask, regMaskTP regBest)
{
- regNumber regNum;
- regMaskTP spillMask;
- regMaskTP canGrabMask;
+ regNumber regNum;
+ regMaskTP spillMask;
+ regMaskTP canGrabMask;
#ifdef DEBUG
- if (rsStressRegs() >= 1 )
+ if (rsStressRegs() >= 1)
{
/* 'regMask' is purely a recommendation, and callers should be
able to handle the case where it is not satisfied.
The logic here tries to return ~regMask to check that all callers
are prepared to handle such a case */
- regMaskTP badRegs = rsMaskMult & rsRegMaskCanGrab();
+ regMaskTP badRegs = rsMaskMult & rsRegMaskCanGrab();
badRegs = rsUseIfZero(badRegs, rsMaskUsed & rsRegMaskCanGrab());
badRegs = rsUseIfZero(badRegs, rsRegMaskCanGrab());
badRegs = rsExcludeHint(badRegs, regMask);
assert(badRegs != RBM_NONE);
-
+
return rsGrabReg(badRegs);
}
-
+
#endif
- regMaskTP freeMask = rsRegMaskFree();
+ regMaskTP freeMask = rsRegMaskFree();
AGAIN:
/* By default we'd prefer to accept all available registers */
- regMaskTP OKmask = freeMask;
+ regMaskTP OKmask = freeMask;
// OKmask = rsNarrowHint(OKmask, rsUselessRegs());
/* Is there a 'best' register set? */
- if (regBest)
+ if (regBest)
{
OKmask &= regBest;
- if (OKmask)
+ if (OKmask)
goto TRY_REG;
else
goto TRY_ALL;
@@ -1940,10 +2019,10 @@ AGAIN:
/* Was a register set recommended by the caller? */
- if (regMask)
+ if (regMask)
{
OKmask &= regMask;
- if (!OKmask)
+ if (!OKmask)
goto TRY_ALL;
}
@@ -1952,7 +2031,8 @@ TRY_REG:
/* Iterate the registers in the order specified by raRegTmpOrder */
regNum = rsPickRegInTmpOrder(OKmask);
- if (REG_STK != regNum) {
+ if (REG_STK != regNum)
+ {
goto RET;
}
@@ -1960,7 +2040,7 @@ TRY_ALL:
/* Were we considering 'regBest' ? */
- if (regBest)
+ if (regBest)
{
/* 'regBest' is no good -- ignore it and try 'regMask' instead */
@@ -1972,7 +2052,7 @@ TRY_ALL:
/* Were we limited in our consideration? */
- if (!regMask)
+ if (!regMask)
{
/* We need to spill one of the free registers */
@@ -1982,7 +2062,7 @@ TRY_ALL:
{
/* Did we not consider all free registers? */
- if ((regMask & freeMask) != freeMask)
+ if ((regMask & freeMask) != freeMask)
{
/* The recommended regset didn't work, so try all available regs */
@@ -1999,19 +2079,19 @@ TRY_ALL:
/* Make sure we can spill some register. */
canGrabMask = rsRegMaskCanGrab();
- if ((spillMask & canGrabMask) == 0)
+ if ((spillMask & canGrabMask) == 0)
spillMask = canGrabMask;
assert(spillMask);
/* We have no choice but to spill one of the regs */
- return rsGrabReg(spillMask);
+ return rsGrabReg(spillMask);
RET:
rsSetRegsModified(genRegMask(regNum));
- return regNum;
+ return regNum;
}
#endif // LEGACY_BACKEND
@@ -2022,9 +2102,7 @@ RET:
* spill descriptor while we're at it). Returns the temp (i.e. local var)
*/
-TempDsc * RegSet::rsGetSpillTempWord(regNumber reg,
- SpillDsc* dsc,
- SpillDsc* prevDsc)
+TempDsc* RegSet::rsGetSpillTempWord(regNumber reg, SpillDsc* dsc, SpillDsc* prevDsc)
{
assert((prevDsc == nullptr) || (prevDsc->spillNext == dsc));
@@ -2041,7 +2119,7 @@ TempDsc * RegSet::rsGetSpillTempWord(regNumber reg,
/* Remember which temp the value is in */
- TempDsc * temp = dsc->spillTemp;
+ TempDsc* temp = dsc->spillTemp;
SpillDsc::freeDsc(this, dsc);
@@ -2063,24 +2141,21 @@ TempDsc * RegSet::rsGetSpillTempWord(regNumber reg,
* again as needed.
*/
-regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
- regNumber oldReg,
- KeepReg willKeepNewReg,
- regMaskTP needReg)
+regNumber RegSet::rsUnspillOneReg(GenTreePtr tree, regNumber oldReg, KeepReg willKeepNewReg, regMaskTP needReg)
{
/* Was oldReg multi-used when it was spilled? */
- SpillDsc * prevDsc, * multiDsc;
- SpillDsc * spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc, &multiDsc);
+ SpillDsc *prevDsc, *multiDsc;
+ SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc, &multiDsc);
noway_assert((spillDsc != NULL) && (multiDsc != NULL));
- bool multiUsed = multiDsc->spillMoreMultis;
+ bool multiUsed = multiDsc->spillMoreMultis;
/* We will use multiDsc to walk the rest of the spill list (if it's
multiUsed). As we're going to remove spillDsc from the multiDsc
- list in the rsGetSpillTempWord() call we have to take care of the
+ list in the rsGetSpillTempWord() call we have to take care of the
case where multiDsc==spillDsc. We will set multiDsc as spillDsc->spillNext */
- if (multiUsed && multiDsc==spillDsc)
+ if (multiUsed && multiDsc == spillDsc)
{
assert(spillDsc->spillNext);
multiDsc = spillDsc->spillNext;
@@ -2088,20 +2163,20 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
/* Get the temp and free the spill-descriptor */
- TempDsc * temp = rsGetSpillTempWord(oldReg, spillDsc, prevDsc);
+ TempDsc* temp = rsGetSpillTempWord(oldReg, spillDsc, prevDsc);
// Pick a new home for the value:
// This must be a register matching the 'needReg' mask, if it is non-zero.
// Additionally, if 'oldReg' is in 'needMask' and it is free we will select oldReg.
// Also note that the rsGrabReg() call below may cause the chosen register to be spilled.
//
- regMaskTP prefMask;
- regMaskTP freeMask;
- regNumber newReg;
- var_types regType;
- var_types loadType;
+ regMaskTP prefMask;
+ regMaskTP freeMask;
+ regNumber newReg;
+ var_types regType;
+ var_types loadType;
- bool floatUnspill = false;
+ bool floatUnspill = false;
#if FEATURE_FP_REGALLOC
floatUnspill = genIsValidFloatReg(oldReg);
@@ -2113,9 +2188,9 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
regType = TYP_DOUBLE;
else
regType = TYP_FLOAT;
- loadType = regType;
- prefMask = genRegMaskFloat(oldReg, regType);
- freeMask = RegFreeFloat();
+ loadType = regType;
+ prefMask = genRegMaskFloat(oldReg, regType);
+ freeMask = RegFreeFloat();
}
else
{
@@ -2125,8 +2200,7 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
freeMask = rsRegMaskFree();
}
- if ( (((prefMask & needReg) != 0) || (needReg == 0)) &&
- ((prefMask & freeMask) != 0) )
+ if ((((prefMask & needReg) != 0) || (needReg == 0)) && ((prefMask & freeMask) != 0))
{
needReg = prefMask;
}
@@ -2159,7 +2233,7 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
since someone up the call chain may have a different idea about
what registers are used to form the complex address mode (the
addrReg return value from genMakeAddressable).
-
+
Also, it is not safe to unspill all the multi-uses with a TYP_LONG.
Finally, it is not safe to unspill into a different register, because
@@ -2170,11 +2244,9 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
in rsMarkRegFree via genDoneAddressable.
*/
- for (SpillDsc * dsc = multiDsc; /**/; dsc = dsc->spillNext)
+ for (SpillDsc* dsc = multiDsc; /**/; dsc = dsc->spillNext)
{
- if ((oldReg != newReg) ||
- (dsc->spillAddr != NULL) ||
- (dsc->spillTree->gtType == TYP_LONG))
+ if ((oldReg != newReg) || (dsc->spillAddr != NULL) || (dsc->spillTree->gtType == TYP_LONG))
{
return newReg;
}
@@ -2187,24 +2259,23 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
}
}
-
- bool bFound=false;
+ bool bFound = false;
SpillDsc* pDsc;
SpillDsc** ppPrev;
- for (pDsc=rsSpillDesc[oldReg], ppPrev=&rsSpillDesc[oldReg] ; ; pDsc=pDsc->spillNext)
+ for (pDsc = rsSpillDesc[oldReg], ppPrev = &rsSpillDesc[oldReg];; pDsc = pDsc->spillNext)
{
- if (pDsc==multiDsc)
+ if (pDsc == multiDsc)
{
// We've found the sequence we were searching for
- bFound=true;
+ bFound = true;
}
if (bFound)
- {
+ {
rsAddrUnspillOper(pDsc->spillAddr);
- // Mark the tree node as having been unspilled into newReg
+ // Mark the tree node as having been unspilled into newReg
rsMarkUnspill(pDsc->spillTree, newReg);
}
@@ -2215,14 +2286,14 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
// End of sequence
// We link remaining sides of list
- *ppPrev=pDsc->spillNext;
+ *ppPrev = pDsc->spillNext;
// Exit walk
break;
}
else
{
- ppPrev=&(pDsc->spillNext);
+ ppPrev = &(pDsc->spillNext);
}
}
}
@@ -2230,8 +2301,8 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
/* pDsc points to the last multi-used descriptor from the spill-list
for the current value (pDsc->spillMoreMultis == false) */
- pDsc->spillNext = rsMultiDesc[newReg];
- rsMultiDesc[newReg] = multiDsc;
+ pDsc->spillNext = rsMultiDesc[newReg];
+ rsMultiDesc[newReg] = multiDsc;
if (floatUnspill)
rsMaskMult |= genRegMaskFloat(newReg, regType);
@@ -2260,19 +2331,17 @@ regNumber RegSet::rsUnspillOneReg(GenTreePtr tree,
//
// Assumptions:
// 1. It is the responsibility of the caller to free the spill temp.
-// 2. RyuJIT backend specific: In case of multi-reg call node
+// 2. RyuJIT backend specific: In case of multi-reg call node
// GTF_SPILLED flag associated with reg is cleared. It is the
// responsibility of caller to clear GTF_SPILLED flag on call node
// itself after ensuring there are no outstanding regs in GTF_SPILLED
// state.
//
-TempDsc* RegSet::rsUnspillInPlace(GenTreePtr tree,
- regNumber oldReg,
- unsigned regIdx /* =0 */)
+TempDsc* RegSet::rsUnspillInPlace(GenTreePtr tree, regNumber oldReg, unsigned regIdx /* =0 */)
{
assert(!isRegPairType(tree->gtType));
- // Get the tree's SpillDsc
+ // Get the tree's SpillDsc
SpillDsc* prevDsc;
SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc);
PREFIX_ASSUME(spillDsc != nullptr);
@@ -2283,8 +2352,8 @@ TempDsc* RegSet::rsUnspillInPlace(GenTreePtr tree,
// The value is now unspilled
if (tree->IsMultiRegCall())
{
- GenTreeCall* call = tree->AsCall();
- unsigned flags = call->GetRegSpillFlagByIdx(regIdx);
+ GenTreeCall* call = tree->AsCall();
+ unsigned flags = call->GetRegSpillFlagByIdx(regIdx);
flags &= ~GTF_SPILLED;
call->SetRegSpillFlagByIdx(flags, regIdx);
}
@@ -2293,8 +2362,8 @@ TempDsc* RegSet::rsUnspillInPlace(GenTreePtr tree,
tree->gtFlags &= ~GTF_SPILLED;
}
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tTree-Node marked unspilled from ");
Compiler::printTreeID(tree);
@@ -2314,27 +2383,25 @@ TempDsc* RegSet::rsUnspillInPlace(GenTreePtr tree,
* is set to KEEP_REG, we'll mark the new register as used.
*/
-void RegSet::rsUnspillReg(GenTreePtr tree,
- regMaskTP needReg,
- KeepReg keepReg)
+void RegSet::rsUnspillReg(GenTreePtr tree, regMaskTP needReg, KeepReg keepReg)
{
assert(!isRegPairType(tree->gtType)); // use rsUnspillRegPair()
- regNumber oldReg = tree->gtRegNum;
+ regNumber oldReg = tree->gtRegNum;
/* Get the SpillDsc for the tree */
- SpillDsc * spillDsc = rsGetSpillInfo(tree, oldReg);
+ SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg);
PREFIX_ASSUME(spillDsc != NULL);
/* Before spillDsc is stomped on by rsUnspillOneReg(), note whether
* the reg was part of an address mode
*/
- GenTreePtr unspillAddr = spillDsc->spillAddr;
+ GenTreePtr unspillAddr = spillDsc->spillAddr;
/* Pick a new home for the value */
- regNumber newReg = rsUnspillOneReg(tree, oldReg, keepReg, needReg);
+ regNumber newReg = rsUnspillOneReg(tree, oldReg, keepReg, needReg);
/* Mark the tree node as having been unspilled into newReg */
@@ -2345,8 +2412,8 @@ void RegSet::rsUnspillReg(GenTreePtr tree,
rsAddrUnspillOper(unspillAddr);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tThe register %s unspilled from ", m_rsCompiler->compRegVarName(newReg));
Compiler::printTreeID(tree);
@@ -2356,28 +2423,28 @@ void RegSet::rsUnspillReg(GenTreePtr tree,
/* Mark the new value as used, if the caller desires so */
- if (keepReg == KEEP_REG)
+ if (keepReg == KEEP_REG)
rsMarkRegUsed(tree, unspillAddr);
}
#endif // LEGACY_BACKEND
-void RegSet::rsMarkSpill(GenTreePtr tree, regNumber reg)
+void RegSet::rsMarkSpill(GenTreePtr tree, regNumber reg)
{
- tree->gtFlags &= ~GTF_REG_VAL;
- tree->gtFlags |= GTF_SPILLED;
+ tree->gtFlags &= ~GTF_REG_VAL;
+ tree->gtFlags |= GTF_SPILLED;
}
#ifdef LEGACY_BACKEND
-void RegSet::rsMarkUnspill(GenTreePtr tree, regNumber reg)
+void RegSet::rsMarkUnspill(GenTreePtr tree, regNumber reg)
{
#ifndef _TARGET_AMD64_
assert(tree->gtType != TYP_LONG);
#endif // _TARGET_AMD64_
- tree->gtFlags |= GTF_REG_VAL;
- tree->gtFlags &= ~GTF_SPILLED;
- tree->gtRegNum = reg;
+ tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags &= ~GTF_SPILLED;
+ tree->gtRegNum = reg;
}
/*****************************************************************************
@@ -2386,12 +2453,12 @@ void RegSet::rsMarkUnspill(GenTreePtr tree, regNumber reg)
* given set will be considered).
*/
-regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
+regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
{
- regPairNo regPair;
- regMaskTP OKmask;
- regNumber reg1;
- regNumber reg2;
+ regPairNo regPair;
+ regMaskTP OKmask;
+ regNumber reg1;
+ regNumber reg2;
assert(regMask);
regMask &= ~rsMaskLock;
@@ -2404,7 +2471,7 @@ regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
/* Any takers in the recommended/free set? */
regPair = rsFindRegPairNo(OKmask);
-
+
if (regPair != REG_PAIR_NONE)
{
// The normal early exit
@@ -2417,7 +2484,7 @@ regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
/* We have no choice but to spill one or two used regs */
- if (OKmask)
+ if (OKmask)
{
/* One (and only one) register is free and acceptable - grab it */
@@ -2425,7 +2492,7 @@ regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
for (reg1 = REG_INT_FIRST; reg1 <= REG_INT_LAST; reg1 = REG_NEXT(reg1))
{
- if (OKmask & genRegMask(reg1))
+ if (OKmask & genRegMask(reg1))
break;
}
assert(OKmask & genRegMask(reg1));
@@ -2451,12 +2518,12 @@ regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
/* Convert the two register numbers into a pair */
- if (reg1 < reg2)
+ if (reg1 < reg2)
regPair = gen2regs2pair(reg1, reg2);
- else
+ else
regPair = gen2regs2pair(reg2, reg1);
- return regPair;
+ return regPair;
}
/*****************************************************************************
@@ -2465,18 +2532,18 @@ regPairNo RegSet::rsGrabRegPair(regMaskTP regMask)
* currently available registers (if 'regMask' is zero).
*/
-regPairNo RegSet::rsPickRegPair(regMaskTP regMask)
+regPairNo RegSet::rsPickRegPair(regMaskTP regMask)
{
- regMaskTP OKmask;
- regPairNo regPair;
+ regMaskTP OKmask;
+ regPairNo regPair;
- int repeat = 0;
+ int repeat = 0;
/* By default we'd prefer to accept all available registers */
OKmask = rsRegMaskFree();
- if (regMask)
+ if (regMask)
{
/* A register set was recommended by the caller */
@@ -2486,10 +2553,10 @@ regPairNo RegSet::rsPickRegPair(regMaskTP regMask)
AGAIN:
regPair = rsFindRegPairNo(OKmask);
-
+
if (regPair != REG_PAIR_NONE)
{
- return regPair; // Normal early exit
+ return regPair; // Normal early exit
}
regMaskTP freeMask;
@@ -2501,7 +2568,7 @@ AGAIN:
/* Were we limited in our consideration? */
- if (!regMask)
+ if (!regMask)
{
/* We need to spill two of the free registers */
@@ -2511,7 +2578,7 @@ AGAIN:
{
/* Did we not consider all free registers? */
- if ((regMask & freeMask) != freeMask && repeat == 0)
+ if ((regMask & freeMask) != freeMask && repeat == 0)
{
/* The recommended regset didn't work, so try all available regs */
@@ -2534,10 +2601,9 @@ AGAIN:
/* We have no choice but to spill 1/2 of the regs */
- return rsGrabRegPair(spillMask);
+ return rsGrabRegPair(spillMask);
}
-
/*****************************************************************************
*
* The given tree operand has been spilled; reload it into a register pair
@@ -2547,23 +2613,21 @@ AGAIN:
* any spillage, of course).
*/
-void RegSet::rsUnspillRegPair(GenTreePtr tree,
- regMaskTP needReg,
- KeepReg keepReg)
+void RegSet::rsUnspillRegPair(GenTreePtr tree, regMaskTP needReg, KeepReg keepReg)
{
assert(isRegPairType(tree->gtType));
- regPairNo regPair = tree->gtRegPair;
- regNumber regLo = genRegPairLo(regPair);
- regNumber regHi = genRegPairHi(regPair);
+ regPairNo regPair = tree->gtRegPair;
+ regNumber regLo = genRegPairLo(regPair);
+ regNumber regHi = genRegPairHi(regPair);
/* Has the register holding the lower half been spilled? */
- if (!rsIsTreeInReg(regLo, tree))
+ if (!rsIsTreeInReg(regLo, tree))
{
/* Is the upper half already in the right place? */
- if (rsIsTreeInReg(regHi, tree))
+ if (rsIsTreeInReg(regHi, tree))
{
/* Temporarily lock the high part */
@@ -2594,40 +2658,40 @@ void RegSet::rsUnspillRegPair(GenTreePtr tree,
if (regHi != REG_STK)
{
/* Has the register holding the upper half been spilled? */
-
- if (!rsIsTreeInReg(regHi, tree))
+
+ if (!rsIsTreeInReg(regHi, tree))
{
- regMaskTP regLoUsed;
-
+ regMaskTP regLoUsed;
+
/* Temporarily lock the low part so it doesnt get spilled */
-
+
rsLockReg(genRegMask(regLo), &regLoUsed);
-
+
/* Pick a new home for the upper half */
-
+
regHi = rsUnspillOneReg(tree, regHi, keepReg, needReg);
-
+
/* We can unlock the low register now */
-
+
rsUnlockReg(genRegMask(regLo), regLoUsed);
}
else
{
/* Free the register holding the upper half */
-
+
rsMarkRegFree(genRegMask(regHi));
}
}
/* The value is now residing in the new register */
- tree->gtFlags |= GTF_REG_VAL;
- tree->gtFlags &= ~GTF_SPILLED;
- tree->gtRegPair = gen2regs2pair(regLo, regHi);
+ tree->gtFlags |= GTF_REG_VAL;
+ tree->gtFlags &= ~GTF_SPILLED;
+ tree->gtRegPair = gen2regs2pair(regLo, regHi);
/* Mark the new value as used, if the caller desires so */
- if (keepReg == KEEP_REG)
+ if (keepReg == KEEP_REG)
rsMarkRegPairUsed(tree);
}
@@ -2642,18 +2706,18 @@ void RegSet::rsUnspillRegPair(GenTreePtr tree,
* appear unused.
*/
-void RegSet::rsRecMultiReg(regNumber reg, var_types type)
+void RegSet::rsRecMultiReg(regNumber reg, var_types type)
{
- SpillDsc * spill;
- regMaskTP regMask;
+ SpillDsc* spill;
+ regMaskTP regMask;
if (genIsValidFloatReg(reg) && isFloatRegType(type))
regMask = genRegMaskFloat(reg, type);
else
regMask = genRegMask(reg);
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tRegister %s multi-use inc for ", m_rsCompiler->compRegVarName(reg));
Compiler::printTreeID(rsUsedTree[reg]);
@@ -2673,17 +2737,19 @@ void RegSet::rsRecMultiReg(regNumber reg, var_types type)
/* Record the current 'use' info in the spill descriptor */
- spill->spillTree = rsUsedTree[reg]; rsUsedTree[reg] = 0;
- spill->spillAddr = rsUsedAddr[reg]; rsUsedAddr[reg] = 0;
+ spill->spillTree = rsUsedTree[reg];
+ rsUsedTree[reg] = 0;
+ spill->spillAddr = rsUsedAddr[reg];
+ rsUsedAddr[reg] = 0;
/* Remember whether the register is already 'multi-use' */
spill->spillMoreMultis = ((rsMaskMult & regMask) != 0);
-
+
/* Insert the new multi-use record in the list for the register */
spill->spillNext = rsMultiDesc[reg];
- rsMultiDesc[reg] = spill;
+ rsMultiDesc[reg] = spill;
/* This register is now 'multi-use' */
@@ -2695,14 +2761,14 @@ void RegSet::rsRecMultiReg(regNumber reg, var_types type)
* Free the given register, which is known to have multiple uses.
*/
-var_types RegSet::rsRmvMultiReg(regNumber reg)
+var_types RegSet::rsRmvMultiReg(regNumber reg)
{
- SpillDsc * dsc;
+ SpillDsc* dsc;
assert(rsMaskMult & genRegMask(reg));
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tRegister %s multi-use dec for ", m_rsCompiler->compRegVarName(reg));
Compiler::printTreeID(rsUsedTree[reg]);
@@ -2712,8 +2778,9 @@ var_types RegSet::rsRmvMultiReg(regNumber reg)
/* Get hold of the spill descriptor for the register */
- dsc = rsMultiDesc[reg]; assert(dsc);
- rsMultiDesc[reg] = dsc->spillNext;
+ dsc = rsMultiDesc[reg];
+ assert(dsc);
+ rsMultiDesc[reg] = dsc->spillNext;
/* Copy the previous 'use' info from the descriptor */
@@ -2724,8 +2791,8 @@ var_types RegSet::rsRmvMultiReg(regNumber reg)
if (!(dsc->spillTree->gtFlags & GTF_SPILLED))
m_rsGCInfo.gcMarkRegPtrVal(reg, dsc->spillTree->TypeGet());
- var_types type = dsc->spillTree->TypeGet();
- regMaskTP regMask;
+ var_types type = dsc->spillTree->TypeGet();
+ regMaskTP regMask;
if (genIsValidFloatReg(reg) && isFloatRegType(type))
regMask = genRegMaskFloat(reg, type);
@@ -2734,13 +2801,13 @@ var_types RegSet::rsRmvMultiReg(regNumber reg)
/* Is only one use of the register left? */
- if (!dsc->spillMoreMultis)
+ if (!dsc->spillMoreMultis)
{
rsMaskMult -= regMask;
}
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
printf("\t\t\t\t\t\t\tRegister %s multi-use dec - now ", m_rsCompiler->compRegVarName(reg));
Compiler::printTreeID(rsUsedTree[reg]);
@@ -2769,23 +2836,26 @@ var_types RegSet::rsRmvMultiReg(regNumber reg)
* contains the close integer constant.
*/
-regNumber RegTracker::rsIconIsInReg(ssize_t val,
- ssize_t* closeDelta /* = NULL */)
+regNumber RegTracker::rsIconIsInReg(ssize_t val, ssize_t* closeDelta /* = NULL */)
{
- regNumber closeReg = REG_NA;
+ regNumber closeReg = REG_NA;
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return REG_NA;
+ }
for (regNumber reg = REG_INT_FIRST; reg <= REG_INT_LAST; reg = REG_NEXT(reg))
- {
+ {
if (rsRegValues[reg].rvdKind == RV_INT_CNS)
{
ssize_t regCnsVal = rsRegValues[reg].rvdIntCnsVal;
if (regCnsVal == val)
{
if (closeDelta)
+ {
*closeDelta = 0;
+ }
return reg;
}
if (closeDelta)
@@ -2795,11 +2865,11 @@ regNumber RegTracker::rsIconIsInReg(ssize_t val,
// TODO-CQ: find the smallest delta from a low register?
// That is, is it better to return a high register with a
// small constant delta, or a low register with
- // a larger offset? It's better to have a low register with an offset within the low register range, or a high register otherwise...
+ // a larger offset? It's better to have a low register with an offset within the low register
+ // range, or a high register otherwise...
ssize_t regCnsDelta = val - regCnsVal;
- if ((closeReg == REG_NA) ||
- (unsigned_abs(regCnsDelta) < unsigned_abs(*closeDelta)))
+ if ((closeReg == REG_NA) || (unsigned_abs(regCnsDelta) < unsigned_abs(*closeDelta)))
{
closeReg = reg;
*closeDelta = regCnsDelta;
@@ -2822,7 +2892,7 @@ regNumber RegTracker::rsIconIsInReg(ssize_t val,
/* There was not an exact match */
- return closeReg; /* will always be REG_NA when closeDelta is NULL */
+ return closeReg; /* will always be REG_NA when closeDelta is NULL */
}
/*****************************************************************************
@@ -2833,26 +2903,30 @@ regNumber RegTracker::rsIconIsInReg(ssize_t val,
* out of date).
*/
-void RegTracker::rsTrackRegClrPtr()
+void RegTracker::rsTrackRegClrPtr()
{
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
/* Preserve constant values */
- if (rsRegValues[reg].rvdKind == RV_INT_CNS)
+ if (rsRegValues[reg].rvdKind == RV_INT_CNS)
{
/* Make sure we don't preserve NULL (it's a pointer) */
- if (rsRegValues[reg].rvdIntCnsVal != NULL)
+ if (rsRegValues[reg].rvdIntCnsVal != NULL)
+ {
continue;
+ }
}
/* Preserve variables known to not be pointers */
- if (rsRegValues[reg].rvdKind == RV_LCL_VAR)
+ if (rsRegValues[reg].rvdKind == RV_LCL_VAR)
{
- if (!varTypeIsGC(compiler->lvaTable[rsRegValues[reg].rvdLclVarNum].TypeGet()))
+ if (!varTypeIsGC(compiler->lvaTable[rsRegValues[reg].rvdLclVarNum].TypeGet()))
+ {
continue;
+ }
}
rsRegValues[reg].rvdKind = RV_TRASH;
@@ -2869,17 +2943,19 @@ void RegTracker::rsTrackRegClrPtr()
*
*/
-regMaskTP RegTracker::rsTrashRegsForGCInterruptability()
+regMaskTP RegTracker::rsTrashRegsForGCInterruptability()
{
regMaskTP result = RBM_NONE;
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rsRegValues[reg].rvdKind == RV_LCL_VAR)
+ if (rsRegValues[reg].rvdKind == RV_LCL_VAR)
{
- LclVarDsc * varDsc = &compiler->lvaTable[rsRegValues[reg].rvdLclVarNum];
-
- if (!varTypeIsGC(varDsc->TypeGet()))
+ LclVarDsc* varDsc = &compiler->lvaTable[rsRegValues[reg].rvdLclVarNum];
+
+ if (!varTypeIsGC(varDsc->TypeGet()))
+ {
continue;
+ }
// Only stack locals got tracked.
assert(!varDsc->lvRegister);
@@ -2901,22 +2977,25 @@ regMaskTP RegTracker::rsTrashRegsForGCInterruptability()
* can get bungled with respect to pointer tracking.
*/
-regNumber RegTracker::rsLclIsInReg(unsigned var)
+regNumber RegTracker::rsLclIsInReg(unsigned var)
{
assert(var < compiler->lvaCount);
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return REG_NA;
+ }
/* return false if register var so genMarkLclVar can do its job */
if (compiler->lvaTable[var].lvRegister)
+ {
return REG_NA;
+ }
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rsRegValues[reg].rvdLclVarNum == var &&
- rsRegValues[reg].rvdKind == RV_LCL_VAR)
+ if (rsRegValues[reg].rvdLclVarNum == var && rsRegValues[reg].rvdKind == RV_LCL_VAR)
{
return reg;
}
@@ -2927,27 +3006,28 @@ regNumber RegTracker::rsLclIsInReg(unsigned var)
/*****************************************************************************/
-regPairNo RegTracker::rsLclIsInRegPair(unsigned var)
+regPairNo RegTracker::rsLclIsInRegPair(unsigned var)
{
assert(var < compiler->lvaCount);
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return REG_PAIR_NONE;
+ }
- regValKind rvKind = RV_TRASH;
- regNumber regNo = DUMMY_INIT(REG_NA);
+ regValKind rvKind = RV_TRASH;
+ regNumber regNo = DUMMY_INIT(REG_NA);
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rvKind != rsRegValues[reg].rvdKind &&
- rsTrackIsLclVarLng(rsRegValues[reg].rvdKind) &&
- rsRegValues[reg].rvdLclVarNum == var)
+ if (rvKind != rsRegValues[reg].rvdKind && rsTrackIsLclVarLng(rsRegValues[reg].rvdKind) &&
+ rsRegValues[reg].rvdLclVarNum == var)
{
/* first occurrence of this variable ? */
- if (rvKind == RV_TRASH)
+ if (rvKind == RV_TRASH)
{
- regNo = reg;
+ regNo = reg;
rvKind = rsRegValues[reg].rvdKind;
}
else if (rvKind == RV_LCL_VAR_LNG_HI)
@@ -2971,19 +3051,20 @@ regPairNo RegTracker::rsLclIsInRegPair(unsigned var)
/*****************************************************************************/
-void RegTracker::rsTrashLclLong(unsigned var)
+void RegTracker::rsTrashLclLong(unsigned var)
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return;
+ }
- for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
+ for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rsTrackIsLclVarLng(rsRegValues[reg].rvdKind) &&
- rsRegValues[reg].rvdLclVarNum == var)
+ if (rsTrackIsLclVarLng(rsRegValues[reg].rvdKind) && rsRegValues[reg].rvdLclVarNum == var)
{
rsRegValues[reg].rvdKind = RV_TRASH;
}
- }
+ }
}
/*****************************************************************************
@@ -2991,15 +3072,16 @@ void RegTracker::rsTrashLclLong(unsigned var)
* Local's value has changed, mark all regs which contained it as trash.
*/
-void RegTracker::rsTrashLcl(unsigned var)
+void RegTracker::rsTrashLcl(unsigned var)
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return;
+ }
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rsRegValues[reg].rvdKind == RV_LCL_VAR &&
- rsRegValues[reg].rvdLclVarNum == var)
+ if (rsRegValues[reg].rvdKind == RV_LCL_VAR && rsRegValues[reg].rvdLclVarNum == var)
{
rsRegValues[reg].rvdKind = RV_TRASH;
}
@@ -3012,14 +3094,16 @@ void RegTracker::rsTrashLcl(unsigned var)
* Usually used after a call has been generated.
*/
-void RegTracker::rsTrashRegSet(regMaskTP regMask)
+void RegTracker::rsTrashRegSet(regMaskTP regMask)
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return;
- regMaskTP regBit = 1;
+ }
+ regMaskTP regBit = 1;
for (regNumber regNum = REG_FIRST; regMask != 0; regNum = REG_NEXT(regNum), regBit <<= 1)
{
- if (regBit & regMask)
+ if (regBit & regMask)
{
rsTrackRegTrash(regNum);
regMask -= regBit;
@@ -3032,28 +3116,29 @@ void RegTracker::rsTrashRegSet(regMaskTP regMask)
* Return a mask of registers that hold no useful value.
*/
-regMaskTP RegTracker::rsUselessRegs()
+regMaskTP RegTracker::rsUselessRegs()
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
- return RBM_ALLINT;
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
+ return RBM_ALLINT;
+ }
- regMaskTP mask = RBM_NONE;
+ regMaskTP mask = RBM_NONE;
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- if (rsRegValues[reg].rvdKind == RV_TRASH)
+ if (rsRegValues[reg].rvdKind == RV_TRASH)
+ {
mask |= genRegMask(reg);
+ }
}
- return mask;
+ return mask;
}
/*****************************************************************************/
-#endif//REDUNDANT_LOAD
+#endif // REDUNDANT_LOAD
/*****************************************************************************/
-
-
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -3066,16 +3151,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
-
-void Compiler::tmpInit()
+void Compiler::tmpInit()
{
#ifdef LEGACY_BACKEND
tmpDoubleSpillMax = 0;
tmpIntSpillMax = 0;
#endif // LEGACY_BACKEND
- tmpCount = 0;
- tmpSize = 0;
+ tmpCount = 0;
+ tmpSize = 0;
#ifdef DEBUG
tmpGetCount = 0;
#endif
@@ -3091,19 +3175,19 @@ var_types Compiler::tmpNormalizeType(var_types type)
type = genActualType(type);
-#else // LEGACY_BACKEND
+#else // LEGACY_BACKEND
if (!varTypeIsGC(type))
{
switch (genTypeStSz(type))
{
- case 1:
- type = TYP_INT; // Maps all 4-byte non-GC types to TYP_INT temps
- break;
- case 2:
- type = TYP_DOUBLE; // Maps all 8-byte types to TYP_DOUBLE temps
- break;
- default:
- assert(!"unexpected type");
+ case 1:
+ type = TYP_INT; // Maps all 4-byte non-GC types to TYP_INT temps
+ break;
+ case 2:
+ type = TYP_DOUBLE; // Maps all 8-byte types to TYP_DOUBLE temps
+ break;
+ default:
+ assert(!"unexpected type");
}
}
#endif // LEGACY_BACKEND
@@ -3117,9 +3201,9 @@ var_types Compiler::tmpNormalizeType(var_types type)
* the garbage collector).
*/
-TempDsc * Compiler::tmpGetTemp(var_types type)
+TempDsc* Compiler::tmpGetTemp(var_types type)
{
- type = tmpNormalizeType(type);
+ type = tmpNormalizeType(type);
unsigned size = genTypeSize(type);
// If TYP_STRUCT ever gets in here we do bad things (tmpSlot returns -1)
@@ -3127,18 +3211,18 @@ TempDsc * Compiler::tmpGetTemp(var_types type)
/* Find the slot to search for a free temp of the right size */
- unsigned slot = tmpSlot(size);
+ unsigned slot = tmpSlot(size);
/* Look for a temp with a matching type */
- TempDsc * * last = &tmpFree[slot];
- TempDsc * temp;
+ TempDsc** last = &tmpFree[slot];
+ TempDsc* temp;
for (temp = *last; temp; last = &temp->tdNext, temp = *last)
{
/* Does the type match? */
- if (temp->tdTempType() == type)
+ if (temp->tdTempType() == type)
{
/* We have a match -- remove it from the free list */
@@ -3149,7 +3233,7 @@ TempDsc * Compiler::tmpGetTemp(var_types type)
#ifdef DEBUG
/* Do we need to allocate a new temp */
- bool isNewTemp = false;
+ bool isNewTemp = false;
#endif // DEBUG
#ifndef LEGACY_BACKEND
@@ -3158,41 +3242,40 @@ TempDsc * Compiler::tmpGetTemp(var_types type)
#else // LEGACY_BACKEND
- if (temp == nullptr)
+ if (temp == nullptr)
{
#ifdef DEBUG
isNewTemp = true;
#endif // DEBUG
tmpCount++;
- tmpSize += (unsigned) size;
+ tmpSize += (unsigned)size;
#ifdef _TARGET_ARM_
if (type == TYP_DOUBLE)
{
// Adjust tmpSize in case it needs alignment
- tmpSize += TARGET_POINTER_SIZE;
+ tmpSize += TARGET_POINTER_SIZE;
}
#endif // _TARGET_ARM_
genEmitter->emitTmpSizeChanged(tmpSize);
- temp = new (this, CMK_Unknown) TempDsc(-((int) tmpCount), size, type);
+ temp = new (this, CMK_Unknown) TempDsc(-((int)tmpCount), size, type);
}
#endif // LEGACY_BACKEND
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("%s temp #%u, slot %u, size = %u\n",
- isNewTemp ? "created" : "reused",
- -temp->tdTempNum(), slot, temp->tdTempSize());
+ printf("%s temp #%u, slot %u, size = %u\n", isNewTemp ? "created" : "reused", -temp->tdTempNum(), slot,
+ temp->tdTempSize());
}
tmpGetCount++;
#endif // DEBUG
- temp->tdNext = tmpUsed[slot];
- tmpUsed[slot] = temp;
+ temp->tdNext = tmpUsed[slot];
+ tmpUsed[slot] = temp;
return temp;
}
@@ -3228,19 +3311,18 @@ void Compiler::tmpPreAllocateTemps(var_types type, unsigned count)
tmpCount++;
tmpSize += size;
- TempDsc* temp = new (this, CMK_Unknown) TempDsc(-((int) tmpCount), size, type);
+ TempDsc* temp = new (this, CMK_Unknown) TempDsc(-((int)tmpCount), size, type);
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("pre-allocated temp #%u, slot %u, size = %u\n",
- -temp->tdTempNum(), slot, temp->tdTempSize());
+ printf("pre-allocated temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize());
}
#endif // DEBUG
// Add it to the front of the appropriate slot list.
- temp->tdNext = tmpFree[slot];
- tmpFree[slot] = temp;
+ temp->tdNext = tmpFree[slot];
+ tmpFree[slot] = temp;
}
}
@@ -3251,21 +3333,20 @@ void Compiler::tmpPreAllocateTemps(var_types type, unsigned count)
* Release the given temp.
*/
-void Compiler::tmpRlsTemp(TempDsc* temp)
+void Compiler::tmpRlsTemp(TempDsc* temp)
{
assert(temp != nullptr);
- unsigned slot;
+ unsigned slot;
/* Add the temp to the 'free' list */
slot = tmpSlot(temp->tdTempSize());
-#ifdef DEBUG
- if (verbose)
+#ifdef DEBUG
+ if (verbose)
{
- printf("release temp #%u, slot %u, size = %u\n",
- -temp->tdTempNum(), slot, temp->tdTempSize());
+ printf("release temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize());
}
assert(tmpGetCount);
tmpGetCount--;
@@ -3289,8 +3370,8 @@ void Compiler::tmpRlsTemp(TempDsc* temp)
// Add it to the free list.
- temp->tdNext = tmpFree[slot];
- tmpFree[slot] = temp;
+ temp->tdNext = tmpFree[slot];
+ tmpFree[slot] = temp;
}
/*****************************************************************************
@@ -3302,28 +3383,29 @@ void Compiler::tmpRlsTemp(TempDsc* temp)
*
* When looking for temps on the "used" list, this can be used any time.
*/
-TempDsc* Compiler::tmpFindNum(int tnum, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
+TempDsc* Compiler::tmpFindNum(int tnum, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
{
assert(tnum < 0); // temp numbers are negative
for (TempDsc* temp = tmpListBeg(usageType); temp != nullptr; temp = tmpListNxt(temp, usageType))
{
if (temp->tdTempNum() == tnum)
+ {
return temp;
+ }
}
return nullptr;
}
-
/*****************************************************************************
*
* A helper function is used to iterate over all the temps.
*/
-TempDsc* Compiler::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
+TempDsc* Compiler::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
{
- TempDsc* const * tmpLists;
+ TempDsc* const* tmpLists;
if (usageType == TEMP_USAGE_FREE)
{
tmpLists = tmpFree;
@@ -3335,7 +3417,7 @@ TempDsc* Compiler::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE
// Return the first temp in the slot for the smallest size
unsigned slot = 0;
- while (slot < (TEMP_SLOT_COUNT-1) && tmpLists[slot] == nullptr)
+ while (slot < (TEMP_SLOT_COUNT - 1) && tmpLists[slot] == nullptr)
{
slot++;
}
@@ -3348,7 +3430,7 @@ TempDsc* Compiler::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE
* Used with tmpListBeg() to iterate over the list of temps.
*/
-TempDsc* Compiler::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
+TempDsc* Compiler::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const
{
assert(curTemp != nullptr);
@@ -3360,7 +3442,7 @@ TempDsc* Compiler::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /
// If there are no more temps in the list, check if there are more
// slots (for bigger sized temps) to walk.
- TempDsc* const * tmpLists;
+ TempDsc* const* tmpLists;
if (usageType == TEMP_USAGE_FREE)
{
tmpLists = tmpFree;
@@ -3374,7 +3456,7 @@ TempDsc* Compiler::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /
{
size += sizeof(int);
unsigned slot = tmpSlot(size);
- temp = tmpLists[slot];
+ temp = tmpLists[slot];
}
assert((temp == nullptr) || (temp->tdTempSize() == size));
@@ -3383,12 +3465,11 @@ TempDsc* Compiler::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /
return temp;
}
-
#ifdef DEBUG
/*****************************************************************************
* Return 'true' if all allocated temps are free (not in use).
*/
-bool Compiler::tmpAllFree() const
+bool Compiler::tmpAllFree() const
{
// The 'tmpGetCount' should equal the number of things in the 'tmpUsed' lists. This is a convenient place
// to assert that.
@@ -3417,7 +3498,6 @@ bool Compiler::tmpAllFree() const
#endif // DEBUG
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -3435,37 +3515,42 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* In debug it also asserts that reg1 and reg2 are not the same.
*/
-bool genIsProperRegPair(regPairNo regPair)
+bool genIsProperRegPair(regPairNo regPair)
{
regNumber rlo = genRegPairLo(regPair);
regNumber rhi = genRegPairHi(regPair);
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
- if (rlo == rhi)
+ if (rlo == rhi)
+ {
return false;
+ }
- if (rlo == REG_L_STK || rhi == REG_L_STK)
+ if (rlo == REG_L_STK || rhi == REG_L_STK)
+ {
return false;
+ }
- if (rlo >= REG_COUNT || rhi >= REG_COUNT)
+ if (rlo >= REG_COUNT || rhi >= REG_COUNT)
+ {
return false;
+ }
return (rlo != REG_STK && rhi != REG_STK);
}
/*****************************************************************************
*
- * Given a register that is an argument register
- * returns the next argument register
+ * Given a register that is an argument register
+ * returns the next argument register
*
- * Note: that this method will return a non arg register
+ * Note: that this method will return a non arg register
* when given REG_ARG_LAST
*
*/
-regNumber genRegArgNext(regNumber argReg)
+regNumber genRegArgNext(regNumber argReg)
{
regNumber result = REG_NA;
@@ -3483,27 +3568,27 @@ regNumber genRegArgNext(regNumber argReg)
// Windows X64 ABI:
// REG_EDI, REG_ESI, REG_ECX, REG_EDX, REG_R8, REG_R9
//
- if (argReg == REG_ARG_1) // REG_ESI
+ if (argReg == REG_ARG_1) // REG_ESI
{
- result = REG_ARG_2; // REG_ECX
+ result = REG_ARG_2; // REG_ECX
}
- else if (argReg == REG_ARG_3) // REG_EDX
+ else if (argReg == REG_ARG_3) // REG_EDX
{
- result = REG_ARG_4; // REG_R8
+ result = REG_ARG_4; // REG_R8
}
-#else // Windows ABI
+#else // Windows ABI
// Windows X64 ABI:
// REG_ECX, REG_EDX, REG_R8, REG_R9
//
- if (argReg == REG_ARG_1) // REG_EDX
+ if (argReg == REG_ARG_1) // REG_EDX
{
- result = REG_ARG_2; // REG_R8
+ result = REG_ARG_2; // REG_R8
}
#endif // UNIX or Windows ABI
#endif // _TARGET_AMD64_
-
- // If we didn't set 'result' to valid register above
- // then we will just iterate 'argReg' using REG_NEXT
+
+ // If we didn't set 'result' to valid register above
+ // then we will just iterate 'argReg' using REG_NEXT
//
if (result == REG_NA)
{
@@ -3523,14 +3608,14 @@ regNumber genRegArgNext(regNumber argReg)
* register numbers and corresponding bitmaps.
*/
-const regNumber raRegCalleeSaveOrder[] = { REG_CALLEE_SAVED_ORDER };
-const regMaskTP raRbmCalleeSaveOrder[] = { RBM_CALLEE_SAVED_ORDER };
-
+const regNumber raRegCalleeSaveOrder[] = {REG_CALLEE_SAVED_ORDER};
+const regMaskTP raRbmCalleeSaveOrder[] = {RBM_CALLEE_SAVED_ORDER};
-regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short calleeSaveMask)
+regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short calleeSaveMask)
{
regMaskSmall res = 0;
- for (int i = 0; i < CNT_CALLEE_SAVED; i++) {
+ for (int i = 0; i < CNT_CALLEE_SAVED; i++)
+ {
if ((calleeSaveMask & ((regMaskTP)1 << i)) != 0)
{
res |= raRbmCalleeSaveOrder[i];
@@ -3545,15 +3630,15 @@ regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short calleeSaveMask)
*/
// inline
-void RegSet::rsSpillInit()
+void RegSet::rsSpillInit()
{
/* Clear out the spill and multi-use tables */
memset(rsSpillDesc, 0, sizeof(rsSpillDesc));
#ifdef LEGACY_BACKEND
- memset(rsUsedTree, 0, sizeof(rsUsedTree) );
- memset(rsUsedAddr, 0, sizeof(rsUsedAddr) );
+ memset(rsUsedTree, 0, sizeof(rsUsedTree));
+ memset(rsUsedAddr, 0, sizeof(rsUsedAddr));
memset(rsMultiDesc, 0, sizeof(rsMultiDesc));
rsSpillFloat = nullptr;
#endif // LEGACY_BACKEND
@@ -3562,7 +3647,7 @@ void RegSet::rsSpillInit()
/* We don't have any descriptors allocated */
- rsSpillFree = nullptr;
+ rsSpillFree = nullptr;
}
/*****************************************************************************
@@ -3571,7 +3656,7 @@ void RegSet::rsSpillInit()
*/
// inline
-void RegSet::rsSpillDone()
+void RegSet::rsSpillDone()
{
rsSpillChk();
}
@@ -3583,7 +3668,7 @@ void RegSet::rsSpillDone()
*/
// inline
-void RegSet::rsSpillBeg()
+void RegSet::rsSpillBeg()
{
rsSpillChk();
}
@@ -3595,7 +3680,7 @@ void RegSet::rsSpillBeg()
*/
// inline
-void RegSet::rsSpillEnd()
+void RegSet::rsSpillEnd()
{
rsSpillChk();
}
@@ -3605,22 +3690,22 @@ void RegSet::rsSpillEnd()
//
// inline
-RegSet::SpillDsc * RegSet::SpillDsc::alloc(Compiler * pComp, RegSet *regSet, var_types type)
+RegSet::SpillDsc* RegSet::SpillDsc::alloc(Compiler* pComp, RegSet* regSet, var_types type)
{
- RegSet::SpillDsc *spill;
- RegSet::SpillDsc **pSpill;
+ RegSet::SpillDsc* spill;
+ RegSet::SpillDsc** pSpill;
pSpill = &(regSet->rsSpillFree);
// Allocate spill structure
- if (*pSpill)
+ if (*pSpill)
{
- spill = *pSpill;
+ spill = *pSpill;
*pSpill = spill->spillNext;
}
else
{
- spill = (RegSet::SpillDsc *)pComp->compGetMem(sizeof(SpillDsc));
+ spill = (RegSet::SpillDsc*)pComp->compGetMem(sizeof(SpillDsc));
}
return spill;
}
@@ -3630,10 +3715,10 @@ RegSet::SpillDsc * RegSet::SpillDsc::alloc(Compiler * pComp, RegSet *regSet, va
//
// inline
-void RegSet::SpillDsc::freeDsc (RegSet *regSet, RegSet::SpillDsc *spillDsc)
+void RegSet::SpillDsc::freeDsc(RegSet* regSet, RegSet::SpillDsc* spillDsc)
{
spillDsc->spillNext = regSet->rsSpillFree;
- regSet->rsSpillFree = spillDsc;
+ regSet->rsSpillFree = spillDsc;
}
/*****************************************************************************
@@ -3645,17 +3730,17 @@ void RegSet::SpillDsc::freeDsc (RegSet *regSet, RegSet::SpillDsc *spillDsc)
#ifdef DEBUG
// inline
-void RegSet::rsSpillChk()
+void RegSet::rsSpillChk()
{
// All grabbed temps should have been released
assert(m_rsCompiler->tmpGetCount == 0);
for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg))
{
- assert(rsSpillDesc[reg] == NULL);
+ assert(rsSpillDesc[reg] == nullptr);
#ifdef LEGACY_BACKEND
- assert(rsUsedTree [reg] == NULL);
+ assert(rsUsedTree[reg] == NULL);
assert(rsMultiDesc[reg] == NULL);
#endif // LEGACY_BACKEND
}
@@ -3664,25 +3749,26 @@ void RegSet::rsSpillChk()
#else
// inline
-void RegSet::rsSpillChk(){}
+void RegSet::rsSpillChk()
+{
+}
#endif
-
-
/*****************************************************************************/
#if REDUNDANT_LOAD
// inline
-bool RegTracker::rsIconIsInReg(ssize_t val, regNumber reg)
+bool RegTracker::rsIconIsInReg(ssize_t val, regNumber reg)
{
- if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ if (compiler->opts.MinOpts() || compiler->opts.compDbgCode)
+ {
return false;
+ }
- if (rsRegValues[reg].rvdKind == RV_INT_CNS &&
- rsRegValues[reg].rvdIntCnsVal == val)
+ if (rsRegValues[reg].rvdKind == RV_INT_CNS && rsRegValues[reg].rvdIntCnsVal == val)
{
- return true;
+ return true;
}
return false;
}
diff --git a/src/jit/regset.h b/src/jit/regset.h
index 11a824cbe2..cdfbb1502a 100644
--- a/src/jit/regset.h
+++ b/src/jit/regset.h
@@ -11,7 +11,7 @@
class LclVarDsc;
class TempDsc;
-typedef struct GenTree * GenTreePtr;
+typedef struct GenTree* GenTreePtr;
class Compiler;
class CodeGen;
class GCInfo;
@@ -37,16 +37,15 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* thing it does is note which registers we use in each method.
*/
-enum regValKind
+enum regValKind
{
- RV_TRASH, // random unclassified garbage
- RV_INT_CNS, // integer constant
- RV_LCL_VAR, // local variable value
- RV_LCL_VAR_LNG_LO, // lower half of long local variable
+ RV_TRASH, // random unclassified garbage
+ RV_INT_CNS, // integer constant
+ RV_LCL_VAR, // local variable value
+ RV_LCL_VAR_LNG_LO, // lower half of long local variable
RV_LCL_VAR_LNG_HI,
};
-
/*****************************************************************************/
class RegSet
@@ -55,227 +54,215 @@ class RegSet
friend class CodeGenInterface;
private:
- Compiler *m_rsCompiler;
- GCInfo &m_rsGCInfo;
-
-public :
+ Compiler* m_rsCompiler;
+ GCInfo& m_rsGCInfo;
- RegSet(Compiler* compiler, GCInfo &gcInfo);
+public:
+ RegSet(Compiler* compiler, GCInfo& gcInfo);
#ifdef _TARGET_ARM_
- regMaskTP rsMaskPreSpillRegs(bool includeAlignment)
- {
- return includeAlignment ?
- (rsMaskPreSpillRegArg | rsMaskPreSpillAlign) : rsMaskPreSpillRegArg;
- }
+ regMaskTP rsMaskPreSpillRegs(bool includeAlignment)
+ {
+ return includeAlignment ? (rsMaskPreSpillRegArg | rsMaskPreSpillAlign) : rsMaskPreSpillRegArg;
+ }
#endif // _TARGET_ARM_
private:
// The same descriptor is also used for 'multi-use' register tracking, BTW.
- struct SpillDsc
+ struct SpillDsc
{
- SpillDsc* spillNext; // next spilled value of same reg
+ SpillDsc* spillNext; // next spilled value of same reg
- union
- {
- GenTreePtr spillTree; // the value that was spilled
+ union {
+ GenTreePtr spillTree; // the value that was spilled
#ifdef LEGACY_BACKEND
- LclVarDsc* spillVarDsc; // variable if it's an enregistered variable
-#endif // LEGACY_BACKEND
+ LclVarDsc* spillVarDsc; // variable if it's an enregistered variable
+#endif // LEGACY_BACKEND
};
- TempDsc* spillTemp; // the temp holding the spilled value
+ TempDsc* spillTemp; // the temp holding the spilled value
#ifdef LEGACY_BACKEND
- GenTreePtr spillAddr; // owning complex address mode or nullptr
+ GenTreePtr spillAddr; // owning complex address mode or nullptr
- union
- {
- bool spillMoreMultis;
- bool bEnregisteredVariable; // For FP. Indicates that what was spilled was
- // an enregistered variable
+ union {
+ bool spillMoreMultis;
+ bool bEnregisteredVariable; // For FP. Indicates that what was spilled was
+ // an enregistered variable
};
#endif // LEGACY_BACKEND
- static SpillDsc* alloc (Compiler* pComp, RegSet* regSet, var_types type);
- static void freeDsc (RegSet *regSet, SpillDsc* spillDsc);
+ static SpillDsc* alloc(Compiler* pComp, RegSet* regSet, var_types type);
+ static void freeDsc(RegSet* regSet, SpillDsc* spillDsc);
};
#ifdef LEGACY_BACKEND
public:
- regMaskTP rsUseIfZero(regMaskTP regs, regMaskTP includeHint);
+ regMaskTP rsUseIfZero(regMaskTP regs, regMaskTP includeHint);
#endif // LEGACY_BACKEND
-
- //-------------------------------------------------------------------------
- //
- // Track the status of the registers
- //
+//-------------------------------------------------------------------------
+//
+// Track the status of the registers
+//
#ifdef LEGACY_BACKEND
-public: // TODO-Cleanup: Should be private, but Compiler uses it
- GenTreePtr rsUsedTree[REG_COUNT]; // trees currently sitting in the registers
+public: // TODO-Cleanup: Should be private, but Compiler uses it
+ GenTreePtr rsUsedTree[REG_COUNT]; // trees currently sitting in the registers
private:
- GenTreePtr rsUsedAddr[REG_COUNT]; // addr for which rsUsedTree[reg] is a part of the addressing mode
- SpillDsc * rsMultiDesc[REG_COUNT]; // keeps track of 'multiple-use' registers.
-#endif // LEGACY_BACKEND
+ GenTreePtr rsUsedAddr[REG_COUNT]; // addr for which rsUsedTree[reg] is a part of the addressing mode
+ SpillDsc* rsMultiDesc[REG_COUNT]; // keeps track of 'multiple-use' registers.
+#endif // LEGACY_BACKEND
private:
-
- bool rsNeededSpillReg; // true if this method needed to spill any registers
- regMaskTP rsModifiedRegsMask; // mask of the registers modified by the current function.
+ bool rsNeededSpillReg; // true if this method needed to spill any registers
+ regMaskTP rsModifiedRegsMask; // mask of the registers modified by the current function.
#ifdef DEBUG
- bool rsModifiedRegsMaskInitialized; // Has rsModifiedRegsMask been initialized? Guards against illegal use.
-#endif // DEBUG
+ bool rsModifiedRegsMaskInitialized; // Has rsModifiedRegsMask been initialized? Guards against illegal use.
+#endif // DEBUG
public:
-
- regMaskTP rsGetModifiedRegsMask() const
+ regMaskTP rsGetModifiedRegsMask() const
{
assert(rsModifiedRegsMaskInitialized);
return rsModifiedRegsMask;
}
- void rsClearRegsModified();
+ void rsClearRegsModified();
- void rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump = false));
+ void rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump = false));
- void rsRemoveRegsModified(regMaskTP mask);
+ void rsRemoveRegsModified(regMaskTP mask);
- bool rsRegsModified(regMaskTP mask) const
+ bool rsRegsModified(regMaskTP mask) const
{
assert(rsModifiedRegsMaskInitialized);
return (rsModifiedRegsMask & mask) != 0;
}
public: // TODO-Cleanup: Should be private, but GCInfo uses them
-
#ifdef LEGACY_BACKEND
- regMaskTP rsMaskUsed; // currently 'used' registers mask
-#endif // LEGACY_BACKEND
+ regMaskTP rsMaskUsed; // currently 'used' registers mask
+#endif // LEGACY_BACKEND
- __declspec(property(get=GetMaskVars,put=SetMaskVars))
- regMaskTP rsMaskVars; // mask of registers currently allocated to variables
+ __declspec(property(get = GetMaskVars, put = SetMaskVars)) regMaskTP rsMaskVars; // mask of registers currently
+ // allocated to variables
- regMaskTP GetMaskVars() const // 'get' property function for rsMaskVars property
+ regMaskTP GetMaskVars() const // 'get' property function for rsMaskVars property
{
return _rsMaskVars;
}
- void SetMaskVars(regMaskTP newMaskVars); // 'put' property function for rsMaskVars property
+ void SetMaskVars(regMaskTP newMaskVars); // 'put' property function for rsMaskVars property
- void AddMaskVars(regMaskTP addMaskVars) // union 'addMaskVars' with the rsMaskVars set
+ void AddMaskVars(regMaskTP addMaskVars) // union 'addMaskVars' with the rsMaskVars set
{
SetMaskVars(_rsMaskVars | addMaskVars);
}
- void RemoveMaskVars(regMaskTP removeMaskVars) // remove 'removeMaskVars' from the rsMaskVars set (like bitset DiffD)
+ void RemoveMaskVars(regMaskTP removeMaskVars) // remove 'removeMaskVars' from the rsMaskVars set (like bitset DiffD)
{
SetMaskVars(_rsMaskVars & ~removeMaskVars);
}
- void ClearMaskVars() // Like SetMaskVars(RBM_NONE), but without any debug output.
+ void ClearMaskVars() // Like SetMaskVars(RBM_NONE), but without any debug output.
{
_rsMaskVars = RBM_NONE;
}
private:
-
- regMaskTP _rsMaskVars; // backing store for rsMaskVars property
+ regMaskTP _rsMaskVars; // backing store for rsMaskVars property
#ifdef LEGACY_BACKEND
- regMaskTP rsMaskLock; // currently 'locked' registers mask
- regMaskTP rsMaskMult; // currently 'multiply used' registers mask
-#endif // LEGACY_BACKEND
+ regMaskTP rsMaskLock; // currently 'locked' registers mask
+ regMaskTP rsMaskMult; // currently 'multiply used' registers mask
+#endif // LEGACY_BACKEND
#ifdef _TARGET_ARMARCH_
- regMaskTP rsMaskCalleeSaved; // mask of the registers pushed/popped in the prolog/epilog
-#endif // _TARGET_ARM_
-
-public: // TODO-Cleanup: Should be private, but Compiler uses it
+ regMaskTP rsMaskCalleeSaved; // mask of the registers pushed/popped in the prolog/epilog
+#endif // _TARGET_ARM_
- regMaskTP rsMaskResvd; // mask of the registers that are reserved for special purposes (typically empty)
+public: // TODO-Cleanup: Should be private, but Compiler uses it
+ regMaskTP rsMaskResvd; // mask of the registers that are reserved for special purposes (typically empty)
public: // The PreSpill masks are used in LclVars.cpp
-
#ifdef _TARGET_ARM_
- regMaskTP rsMaskPreSpillAlign; // Mask of alignment padding added to prespill to keep double aligned args
- // at aligned stack addresses.
- regMaskTP rsMaskPreSpillRegArg; // mask of incoming registers that are spilled at the start of the prolog
- // This includes registers used to pass a struct (or part of a struct)
- // and all enregistered user arguments in a varargs call
-#endif // _TARGET_ARM_
+ regMaskTP rsMaskPreSpillAlign; // Mask of alignment padding added to prespill to keep double aligned args
+ // at aligned stack addresses.
+ regMaskTP rsMaskPreSpillRegArg; // mask of incoming registers that are spilled at the start of the prolog
+ // This includes registers used to pass a struct (or part of a struct)
+ // and all enregistered user arguments in a varargs call
+#endif // _TARGET_ARM_
#ifdef LEGACY_BACKEND
private:
-
// These getters/setters are ifdef here so that the accesses to these values in sharedfloat.cpp are redirected
- // to the appropriate value.
- // With FEATURE_STACK_FP_X87 (x86 FP codegen) we have separate register mask that just handle FP registers.
+ // to the appropriate value.
+ // With FEATURE_STACK_FP_X87 (x86 FP codegen) we have separate register mask that just handle FP registers.
// For all other platforms (and eventually on x86) we use unified register masks that handle both kinds.
//
- regMaskTP rsGetMaskUsed (); // Getter for rsMaskUsed or rsMaskUsedFloat
- regMaskTP rsGetMaskVars (); // Getter for rsMaskVars or rsMaskRegVarFloat
- regMaskTP rsGetMaskLock (); // Getter for rsMaskLock or rsMaskLockedFloat
- regMaskTP rsGetMaskMult (); // Getter for rsMaskMult or 0
+ regMaskTP rsGetMaskUsed(); // Getter for rsMaskUsed or rsMaskUsedFloat
+ regMaskTP rsGetMaskVars(); // Getter for rsMaskVars or rsMaskRegVarFloat
+ regMaskTP rsGetMaskLock(); // Getter for rsMaskLock or rsMaskLockedFloat
+ regMaskTP rsGetMaskMult(); // Getter for rsMaskMult or 0
- void rsSetMaskUsed (regMaskTP maskUsed); // Setter for rsMaskUsed or rsMaskUsedFloat
- void rsSetMaskVars (regMaskTP maskVars); // Setter for rsMaskVars or rsMaskRegVarFloat
- void rsSetMaskLock (regMaskTP maskLock); // Setter for rsMaskLock or rsMaskLockedFloat
+ void rsSetMaskUsed(regMaskTP maskUsed); // Setter for rsMaskUsed or rsMaskUsedFloat
+ void rsSetMaskVars(regMaskTP maskVars); // Setter for rsMaskVars or rsMaskRegVarFloat
+ void rsSetMaskLock(regMaskTP maskLock); // Setter for rsMaskLock or rsMaskLockedFloat
- void rsSetUsedTree (regNumber regNum, GenTreePtr tree); // Setter for rsUsedTree[]/genUsedRegsFloat[]
- void rsFreeUsedTree (regNumber regNum, GenTreePtr tree); // Free for rsUsedTree[]/genUsedRegsFloat[]
+ void rsSetUsedTree(regNumber regNum, GenTreePtr tree); // Setter for rsUsedTree[]/genUsedRegsFloat[]
+ void rsFreeUsedTree(regNumber regNum, GenTreePtr tree); // Free for rsUsedTree[]/genUsedRegsFloat[]
public:
-
- regPairNo rsFindRegPairNo (regMaskTP regMask);
+ regPairNo rsFindRegPairNo(regMaskTP regMask);
private:
- bool rsIsTreeInReg (regNumber reg, GenTreePtr tree);
+ bool rsIsTreeInReg(regNumber reg, GenTreePtr tree);
- regMaskTP rsExcludeHint(regMaskTP regs, regMaskTP excludeHint);
- regMaskTP rsNarrowHint(regMaskTP regs, regMaskTP narrowHint);
- regMaskTP rsMustExclude(regMaskTP regs, regMaskTP exclude);
- regMaskTP rsRegMaskFree ();
- regMaskTP rsRegMaskCanGrab ();
+ regMaskTP rsExcludeHint(regMaskTP regs, regMaskTP excludeHint);
+ regMaskTP rsNarrowHint(regMaskTP regs, regMaskTP narrowHint);
+ regMaskTP rsMustExclude(regMaskTP regs, regMaskTP exclude);
+ regMaskTP rsRegMaskFree();
+ regMaskTP rsRegMaskCanGrab();
- void rsMarkRegUsed (GenTreePtr tree, GenTreePtr addr = 0);
+ void rsMarkRegUsed(GenTreePtr tree, GenTreePtr addr = 0);
// A special case of "rsMarkRegUsed": the register used is an argument register, used to hold part of
// the given argument node "promotedStructArg". (The name suggests that we're likely to use use this
// for register holding a promoted struct argument, but the implementation doesn't depend on that.) The
// "isGCRef" argument indicates whether the register contains a GC reference.
- void rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promotedStructArg, regNumber regNum, bool isGCRef);
+ void rsMarkArgRegUsedByPromotedFieldArg(GenTreePtr promotedStructArg, regNumber regNum, bool isGCRef);
- void rsMarkRegPairUsed (GenTreePtr tree);
+ void rsMarkRegPairUsed(GenTreePtr tree);
- void rsMarkRegFree (regMaskTP regMask);
- void rsMarkRegFree (regNumber reg, GenTreePtr tree);
- void rsMultRegFree (regMaskTP regMask);
- unsigned rsFreeNeededRegCount(regMaskTP needReg);
+ void rsMarkRegFree(regMaskTP regMask);
+ void rsMarkRegFree(regNumber reg, GenTreePtr tree);
+ void rsMultRegFree(regMaskTP regMask);
+ unsigned rsFreeNeededRegCount(regMaskTP needReg);
- void rsLockReg (regMaskTP regMask);
- void rsUnlockReg (regMaskTP regMask);
- void rsLockUsedReg (regMaskTP regMask);
- void rsUnlockUsedReg (regMaskTP regMask);
- void rsLockReg (regMaskTP regMask, regMaskTP * usedMask);
- void rsUnlockReg (regMaskTP regMask, regMaskTP usedMask);
+ void rsLockReg(regMaskTP regMask);
+ void rsUnlockReg(regMaskTP regMask);
+ void rsLockUsedReg(regMaskTP regMask);
+ void rsUnlockUsedReg(regMaskTP regMask);
+ void rsLockReg(regMaskTP regMask, regMaskTP* usedMask);
+ void rsUnlockReg(regMaskTP regMask, regMaskTP usedMask);
- regMaskTP rsRegExclMask (regMaskTP regMask, regMaskTP rmvMask);
-
- regNumber rsPickRegInTmpOrder(regMaskTP regMask);
+ regMaskTP rsRegExclMask(regMaskTP regMask, regMaskTP rmvMask);
+ regNumber rsPickRegInTmpOrder(regMaskTP regMask);
public: // used by emitter (!)
- regNumber rsGrabReg (regMaskTP regMask);
+ regNumber rsGrabReg(regMaskTP regMask);
+
private:
- regNumber rsPickReg (regMaskTP regMask = RBM_NONE,
- regMaskTP regBest = RBM_NONE);
+ regNumber rsPickReg(regMaskTP regMask = RBM_NONE, regMaskTP regBest = RBM_NONE);
+
public: // used by emitter (!)
- regNumber rsPickFreeReg (regMaskTP regMaskHint = RBM_ALLINT);
+ regNumber rsPickFreeReg(regMaskTP regMaskHint = RBM_ALLINT);
+
private:
- regPairNo rsGrabRegPair (regMaskTP regMask);
- regPairNo rsPickRegPair (regMaskTP regMask);
+ regPairNo rsGrabRegPair(regMaskTP regMask);
+ regPairNo rsPickRegPair(regMaskTP regMask);
class RegisterPreference
{
@@ -284,32 +271,29 @@ private:
regMaskTP best;
RegisterPreference(regMaskTP _ok, regMaskTP _best)
{
- ok = _ok;
+ ok = _ok;
best = _best;
}
};
- regNumber PickRegFloat (GenTreePtr tree,
- var_types type = TYP_DOUBLE,
- RegisterPreference *pref = NULL,
- bool bUsed = true);
- regNumber PickRegFloat (var_types type = TYP_DOUBLE,
- RegisterPreference *pref = NULL,
- bool bUsed = true);
- regNumber PickRegFloatOtherThan (GenTreePtr tree, var_types type, regNumber reg);
- regNumber PickRegFloatOtherThan (var_types type, regNumber reg);
-
- regMaskTP RegFreeFloat ();
-
- void SetUsedRegFloat (GenTreePtr tree, bool bValue);
- void SetLockedRegFloat (GenTreePtr tree, bool bValue);
- bool IsLockedRegFloat (GenTreePtr tree);
-
- var_types rsRmvMultiReg (regNumber reg);
- void rsRecMultiReg (regNumber reg, var_types type);
+ regNumber PickRegFloat(GenTreePtr tree,
+ var_types type = TYP_DOUBLE,
+ RegisterPreference* pref = NULL,
+ bool bUsed = true);
+ regNumber PickRegFloat(var_types type = TYP_DOUBLE, RegisterPreference* pref = NULL, bool bUsed = true);
+ regNumber PickRegFloatOtherThan(GenTreePtr tree, var_types type, regNumber reg);
+ regNumber PickRegFloatOtherThan(var_types type, regNumber reg);
+
+ regMaskTP RegFreeFloat();
+
+ void SetUsedRegFloat(GenTreePtr tree, bool bValue);
+ void SetLockedRegFloat(GenTreePtr tree, bool bValue);
+ bool IsLockedRegFloat(GenTreePtr tree);
+
+ var_types rsRmvMultiReg(regNumber reg);
+ void rsRecMultiReg(regNumber reg, var_types type);
#endif // LEGACY_BACKEND
public:
-
#ifdef DEBUG
/*****************************************************************************
* Should we stress register tracking logic ?
@@ -321,11 +305,13 @@ public:
* 1 = rsPickReg() picks 'bad' registers.
* 2 = codegen spills at safe points. This is still flaky
*/
- enum rsStressRegsType { RS_STRESS_NONE = 0,
- RS_PICK_BAD_REG = 01,
- RS_SPILL_SAFE = 02,
- };
- rsStressRegsType rsStressRegs ();
+ enum rsStressRegsType
+ {
+ RS_STRESS_NONE = 0,
+ RS_PICK_BAD_REG = 01,
+ RS_SPILL_SAFE = 02,
+ };
+ rsStressRegsType rsStressRegs();
#endif // DEBUG
private:
@@ -335,86 +321,78 @@ private:
//
// When a register gets spilled, the old information is stored here
- SpillDsc* rsSpillDesc[REG_COUNT];
- SpillDsc* rsSpillFree; // list of unused spill descriptors
+ SpillDsc* rsSpillDesc[REG_COUNT];
+ SpillDsc* rsSpillFree; // list of unused spill descriptors
#ifdef LEGACY_BACKEND
- SpillDsc* rsSpillFloat;
+ SpillDsc* rsSpillFloat;
#endif // LEGACY_BACKEND
+ void rsSpillChk();
+ void rsSpillInit();
+ void rsSpillDone();
+ void rsSpillBeg();
+ void rsSpillEnd();
- void rsSpillChk ();
- void rsSpillInit ();
- void rsSpillDone ();
- void rsSpillBeg ();
- void rsSpillEnd ();
-
- void rsSpillTree (regNumber reg,
- GenTreePtr tree,
- unsigned regIdx = 0);
+ void rsSpillTree(regNumber reg, GenTreePtr tree, unsigned regIdx = 0);
#if defined(_TARGET_X86_) && !FEATURE_STACK_FP_X87
- void rsSpillFPStack(GenTreePtr tree);
+ void rsSpillFPStack(GenTreePtr tree);
#endif // defined(_TARGET_X86_) && !FEATURE_STACK_FP_X87
#ifdef LEGACY_BACKEND
- void rsSpillReg (regNumber reg);
- void rsSpillRegIfUsed(regNumber reg);
- void rsSpillRegs (regMaskTP regMask);
+ void rsSpillReg(regNumber reg);
+ void rsSpillRegIfUsed(regNumber reg);
+ void rsSpillRegs(regMaskTP regMask);
#endif // LEGACY_BACKEND
- SpillDsc * rsGetSpillInfo (GenTreePtr tree,
- regNumber reg,
- SpillDsc ** pPrevDsc = NULL
+ SpillDsc* rsGetSpillInfo(GenTreePtr tree,
+ regNumber reg,
+ SpillDsc** pPrevDsc = nullptr
#ifdef LEGACY_BACKEND
- , SpillDsc ** pMultiDsc = NULL
+ ,
+ SpillDsc** pMultiDsc = NULL
#endif // LEGACY_BACKEND
- );
+ );
- TempDsc * rsGetSpillTempWord(regNumber oldReg,
- SpillDsc * dsc,
- SpillDsc * prevDsc);
+ TempDsc* rsGetSpillTempWord(regNumber oldReg, SpillDsc* dsc, SpillDsc* prevDsc);
#ifdef LEGACY_BACKEND
- enum ExactReg { ANY_REG, EXACT_REG };
- enum KeepReg { FREE_REG, KEEP_REG };
+ enum ExactReg
+ {
+ ANY_REG,
+ EXACT_REG
+ };
+ enum KeepReg
+ {
+ FREE_REG,
+ KEEP_REG
+ };
- regNumber rsUnspillOneReg (GenTreePtr tree,
- regNumber oldReg,
- KeepReg willKeepNewReg,
- regMaskTP needReg);
+ regNumber rsUnspillOneReg(GenTreePtr tree, regNumber oldReg, KeepReg willKeepNewReg, regMaskTP needReg);
#endif // LEGACY_BACKEND
- TempDsc* rsUnspillInPlace(GenTreePtr tree,
- regNumber oldReg,
- unsigned regIdx = 0);
+ TempDsc* rsUnspillInPlace(GenTreePtr tree, regNumber oldReg, unsigned regIdx = 0);
#ifdef LEGACY_BACKEND
- void rsUnspillReg (GenTreePtr tree,
- regMaskTP needReg,
- KeepReg keepReg);
+ void rsUnspillReg(GenTreePtr tree, regMaskTP needReg, KeepReg keepReg);
- void rsUnspillRegPair(GenTreePtr tree,
- regMaskTP needReg,
- KeepReg keepReg);
+ void rsUnspillRegPair(GenTreePtr tree, regMaskTP needReg, KeepReg keepReg);
#endif // LEGACY_BACKEND
- void rsMarkSpill (GenTreePtr tree,
- regNumber reg);
+ void rsMarkSpill(GenTreePtr tree, regNumber reg);
#ifdef LEGACY_BACKEND
- void rsMarkUnspill (GenTreePtr tree,
- regNumber reg);
+ void rsMarkUnspill(GenTreePtr tree, regNumber reg);
#endif // LEGACY_BACKEND
#if FEATURE_STACK_FP_X87
- regMaskTP rsMaskUsedFloat;
- regMaskTP rsMaskRegVarFloat;
- regMaskTP rsMaskLockedFloat;
- GenTreePtr genUsedRegsFloat[REG_FPCOUNT];
- LclVarDsc* genRegVarsFloat[REG_FPCOUNT];
+ regMaskTP rsMaskUsedFloat;
+ regMaskTP rsMaskRegVarFloat;
+ regMaskTP rsMaskLockedFloat;
+ GenTreePtr genUsedRegsFloat[REG_FPCOUNT];
+ LclVarDsc* genRegVarsFloat[REG_FPCOUNT];
#endif // FEATURE_STACK_FP_X87
-
};
//-------------------------------------------------------------------------
@@ -425,61 +403,58 @@ private:
// Only integer registers are tracked.
//
-struct RegValDsc
+struct RegValDsc
{
- regValKind rvdKind;
- union
- {
- ssize_t rvdIntCnsVal; // for rvdKind == RV_INT_CNS
- unsigned rvdLclVarNum; // for rvdKind == RV_LCL_VAR, RV_LCL_VAR_LNG_LO, RV_LCL_VAR_LNG_HI
+ regValKind rvdKind;
+ union {
+ ssize_t rvdIntCnsVal; // for rvdKind == RV_INT_CNS
+ unsigned rvdLclVarNum; // for rvdKind == RV_LCL_VAR, RV_LCL_VAR_LNG_LO, RV_LCL_VAR_LNG_HI
};
};
-class RegTracker
+class RegTracker
{
- Compiler* compiler;
- RegSet* regSet;
- RegValDsc rsRegValues[REG_COUNT];
+ Compiler* compiler;
+ RegSet* regSet;
+ RegValDsc rsRegValues[REG_COUNT];
public:
-
- void rsTrackInit(Compiler* comp, RegSet* rs)
+ void rsTrackInit(Compiler* comp, RegSet* rs)
{
compiler = comp;
- regSet = rs;
+ regSet = rs;
rsTrackRegClr();
}
- void rsTrackRegClr ();
- void rsTrackRegClrPtr ();
- void rsTrackRegTrash (regNumber reg);
- void rsTrackRegMaskTrash(regMaskTP regMask);
- regMaskTP rsTrashRegsForGCInterruptability();
- void rsTrackRegIntCns (regNumber reg, ssize_t val);
- void rsTrackRegLclVar (regNumber reg, unsigned var);
- void rsTrackRegLclVarLng(regNumber reg, unsigned var, bool low);
- bool rsTrackIsLclVarLng(regValKind rvKind);
- void rsTrackRegClsVar (regNumber reg, GenTreePtr clsVar);
- void rsTrackRegCopy (regNumber reg1, regNumber reg2);
- void rsTrackRegSwap (regNumber reg1, regNumber reg2);
- void rsTrackRegAssign (GenTree *op1, GenTree *op2);
-
- regNumber rsIconIsInReg (ssize_t val, ssize_t* closeDelta = NULL);
- bool rsIconIsInReg (ssize_t val, regNumber reg);
- regNumber rsLclIsInReg (unsigned var);
- regPairNo rsLclIsInRegPair (unsigned var);
-
- //---------------------- Load suppression ---------------------------------
+ void rsTrackRegClr();
+ void rsTrackRegClrPtr();
+ void rsTrackRegTrash(regNumber reg);
+ void rsTrackRegMaskTrash(regMaskTP regMask);
+ regMaskTP rsTrashRegsForGCInterruptability();
+ void rsTrackRegIntCns(regNumber reg, ssize_t val);
+ void rsTrackRegLclVar(regNumber reg, unsigned var);
+ void rsTrackRegLclVarLng(regNumber reg, unsigned var, bool low);
+ bool rsTrackIsLclVarLng(regValKind rvKind);
+ void rsTrackRegClsVar(regNumber reg, GenTreePtr clsVar);
+ void rsTrackRegCopy(regNumber reg1, regNumber reg2);
+ void rsTrackRegSwap(regNumber reg1, regNumber reg2);
+ void rsTrackRegAssign(GenTree* op1, GenTree* op2);
+
+ regNumber rsIconIsInReg(ssize_t val, ssize_t* closeDelta = nullptr);
+ bool rsIconIsInReg(ssize_t val, regNumber reg);
+ regNumber rsLclIsInReg(unsigned var);
+ regPairNo rsLclIsInRegPair(unsigned var);
+
+//---------------------- Load suppression ---------------------------------
#if REDUNDANT_LOAD
- void rsTrashLclLong (unsigned var);
- void rsTrashLcl (unsigned var);
- void rsTrashRegSet (regMaskTP regMask);
+ void rsTrashLclLong(unsigned var);
+ void rsTrashLcl(unsigned var);
+ void rsTrashRegSet(regMaskTP regMask);
- regMaskTP rsUselessRegs ();
+ regMaskTP rsUselessRegs();
#endif // REDUNDANT_LOAD
-
};
#endif // _REGSET_H
diff --git a/src/jit/scopeinfo.cpp b/src/jit/scopeinfo.cpp
index f873dc7b9c..f2a7902317 100644
--- a/src/jit/scopeinfo.cpp
+++ b/src/jit/scopeinfo.cpp
@@ -17,36 +17,36 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/******************************************************************************
* Debuggable code
- *
- * We break up blocks at the start and end IL ranges of the local variables.
- * This is because IL offsets do not correspond exactly to native offsets
- * except at block boundaries. No basic-blocks are deleted (not even
- * unreachable), so there will not be any missing address-ranges, though the
+ *
+ * We break up blocks at the start and end IL ranges of the local variables.
+ * This is because IL offsets do not correspond exactly to native offsets
+ * except at block boundaries. No basic-blocks are deleted (not even
+ * unreachable), so there will not be any missing address-ranges, though the
* blocks themselves may not be ordered. (Also, internal blocks may be added).
- * o At the start of each basic block, siBeginBlock() checks if any variables
+ * o At the start of each basic block, siBeginBlock() checks if any variables
* are coming in scope, and adds an open scope to siOpenScopeList if needed.
- * o At the end of each basic block, siEndBlock() checks if any variables
- * are going out of scope and moves the open scope from siOpenScopeLast
+ * o At the end of each basic block, siEndBlock() checks if any variables
+ * are going out of scope and moves the open scope from siOpenScopeLast
* to siScopeList.
- *
+ *
* Optimized code
- *
- * We cannot break up the blocks as this will produce different code under
+ *
+ * We cannot break up the blocks as this will produce different code under
* the debugger. Instead we try to do a best effort.
- * o At the start of each basic block, siBeginBlock() adds open scopes
- * corresponding to block->bbLiveIn to siOpenScopeList. Also siUpdate()
+ * o At the start of each basic block, siBeginBlock() adds open scopes
+ * corresponding to block->bbLiveIn to siOpenScopeList. Also siUpdate()
* is called to close scopes for variables which are not live anymore.
- * o siEndBlock() closes scopes for any variables which go out of range
+ * o siEndBlock() closes scopes for any variables which go out of range
* before bbCodeOffsEnd.
- * o siCloseAllOpenScopes() closes any open scopes after all the blocks.
- * This should only be needed if some basic block are deleted/out of order,
+ * o siCloseAllOpenScopes() closes any open scopes after all the blocks.
+ * This should only be needed if some basic block are deleted/out of order,
* etc.
* Also,
- * o At every assignment to a variable, siCheckVarScope() adds an open scope
+ * o At every assignment to a variable, siCheckVarScope() adds an open scope
* for the variable being assigned to.
- * o genChangeLife() calls siUpdate() which closes scopes for variables which
+ * o genChangeLife() calls siUpdate() which closes scopes for variables which
* are not live anymore.
- *
+ *
******************************************************************************
*/
@@ -62,66 +62,75 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef DEBUGGING_SUPPORT
/*****************************************************************************/
-bool Compiler::siVarLoc::vlIsInReg(regNumber reg)
+bool Compiler::siVarLoc::vlIsInReg(regNumber reg)
{
switch (vlType)
{
- case VLT_REG: return ( vlReg.vlrReg == reg);
- case VLT_REG_REG: return ((vlRegReg.vlrrReg1 == reg) ||
- (vlRegReg.vlrrReg2 == reg));
- case VLT_REG_STK: return ( vlRegStk.vlrsReg == reg);
- case VLT_STK_REG: return ( vlStkReg.vlsrReg == reg);
-
- case VLT_STK:
- case VLT_STK2:
- case VLT_FPSTK: return false;
-
- default: assert(!"Bad locType");
- return false;
+ case VLT_REG:
+ return (vlReg.vlrReg == reg);
+ case VLT_REG_REG:
+ return ((vlRegReg.vlrrReg1 == reg) || (vlRegReg.vlrrReg2 == reg));
+ case VLT_REG_STK:
+ return (vlRegStk.vlrsReg == reg);
+ case VLT_STK_REG:
+ return (vlStkReg.vlsrReg == reg);
+
+ case VLT_STK:
+ case VLT_STK2:
+ case VLT_FPSTK:
+ return false;
+
+ default:
+ assert(!"Bad locType");
+ return false;
}
}
-bool Compiler::siVarLoc::vlIsOnStk(regNumber reg,
- signed offset)
+bool Compiler::siVarLoc::vlIsOnStk(regNumber reg, signed offset)
{
regNumber actualReg;
-
+
switch (vlType)
{
- case VLT_REG_STK: actualReg = vlRegStk.vlrsStk.vlrssBaseReg;
- if ((int) actualReg == (int) ICorDebugInfo::REGNUM_AMBIENT_SP) {
- actualReg = REG_SPBASE;
- }
- return ((actualReg == reg) &&
- (vlRegStk.vlrsStk.vlrssOffset == offset));
- case VLT_STK_REG: actualReg = vlStkReg.vlsrStk.vlsrsBaseReg;
- if ((int) actualReg == (int) ICorDebugInfo::REGNUM_AMBIENT_SP) {
- actualReg = REG_SPBASE;
- }
- return ((actualReg == reg) &&
- (vlStkReg.vlsrStk.vlsrsOffset == offset));
- case VLT_STK: actualReg = vlStk.vlsBaseReg;
- if ((int) actualReg == (int) ICorDebugInfo::REGNUM_AMBIENT_SP) {
- actualReg = REG_SPBASE;
- }
- return ((actualReg == reg) &&
- (vlStk.vlsOffset == offset));
- case VLT_STK2: actualReg = vlStk2.vls2BaseReg;
- if ((int) actualReg == (int) ICorDebugInfo::REGNUM_AMBIENT_SP) {
- actualReg = REG_SPBASE;
- }
- return ((actualReg == reg) &&
- ((vlStk2.vls2Offset == offset) ||
- (vlStk2.vls2Offset == (offset - 4))));
+ case VLT_REG_STK:
+ actualReg = vlRegStk.vlrsStk.vlrssBaseReg;
+ if ((int)actualReg == (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ actualReg = REG_SPBASE;
+ }
+ return ((actualReg == reg) && (vlRegStk.vlrsStk.vlrssOffset == offset));
+ case VLT_STK_REG:
+ actualReg = vlStkReg.vlsrStk.vlsrsBaseReg;
+ if ((int)actualReg == (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ actualReg = REG_SPBASE;
+ }
+ return ((actualReg == reg) && (vlStkReg.vlsrStk.vlsrsOffset == offset));
+ case VLT_STK:
+ actualReg = vlStk.vlsBaseReg;
+ if ((int)actualReg == (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ actualReg = REG_SPBASE;
+ }
+ return ((actualReg == reg) && (vlStk.vlsOffset == offset));
+ case VLT_STK2:
+ actualReg = vlStk2.vls2BaseReg;
+ if ((int)actualReg == (int)ICorDebugInfo::REGNUM_AMBIENT_SP)
+ {
+ actualReg = REG_SPBASE;
+ }
+ return ((actualReg == reg) && ((vlStk2.vls2Offset == offset) || (vlStk2.vls2Offset == (offset - 4))));
- case VLT_REG:
- case VLT_REG_FP:
- case VLT_REG_REG:
- case VLT_FPSTK: return false;
+ case VLT_REG:
+ case VLT_REG_FP:
+ case VLT_REG_REG:
+ case VLT_FPSTK:
+ return false;
- default: assert(!"Bad locType");
- return false;
+ default:
+ assert(!"Bad locType");
+ return false;
}
}
@@ -139,40 +148,37 @@ bool Compiler::siVarLoc::vlIsOnStk(regNumber reg,
*============================================================================
*/
-
/*****************************************************************************
* siNewScope
*
* Creates a new scope and adds it to the Open scope list.
*/
-CodeGen::siScope * CodeGen::siNewScope( unsigned LVnum,
- unsigned varNum)
+CodeGen::siScope* CodeGen::siNewScope(unsigned LVnum, unsigned varNum)
{
- bool tracked = compiler->lvaTable[varNum].lvTracked;
- unsigned varIndex = compiler->lvaTable[varNum].lvVarIndex;
+ bool tracked = compiler->lvaTable[varNum].lvTracked;
+ unsigned varIndex = compiler->lvaTable[varNum].lvVarIndex;
if (tracked)
{
siEndTrackedScope(varIndex);
}
-
- siScope * newScope = (siScope*) compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
+ siScope* newScope = (siScope*)compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
newScope->scStartLoc.CaptureLocation(getEmitter());
assert(newScope->scStartLoc.Valid());
newScope->scEndLoc.Init();
- newScope->scLVnum = LVnum;
- newScope->scVarNum = varNum;
- newScope->scNext = NULL;
- newScope->scStackLevel = genStackLevel; // used only by stack vars
+ newScope->scLVnum = LVnum;
+ newScope->scVarNum = varNum;
+ newScope->scNext = nullptr;
+ newScope->scStackLevel = genStackLevel; // used only by stack vars
- siOpenScopeLast->scNext = newScope;
- newScope->scPrev = siOpenScopeLast;
- siOpenScopeLast = newScope;
+ siOpenScopeLast->scNext = newScope;
+ newScope->scPrev = siOpenScopeLast;
+ siOpenScopeLast = newScope;
if (tracked)
{
@@ -182,77 +188,73 @@ CodeGen::siScope * CodeGen::siNewScope( unsigned LVnum,
return newScope;
}
-
-
/*****************************************************************************
* siRemoveFromOpenScopeList
*
* Removes a scope from the open-scope list and puts it into the done-scope list
*/
-void CodeGen::siRemoveFromOpenScopeList(CodeGen::siScope * scope)
+void CodeGen::siRemoveFromOpenScopeList(CodeGen::siScope* scope)
{
assert(scope);
assert(scope->scEndLoc.Valid());
// Remove from open-scope list
- scope->scPrev->scNext = scope->scNext;
+ scope->scPrev->scNext = scope->scNext;
if (scope->scNext)
{
- scope->scNext->scPrev = scope->scPrev;
+ scope->scNext->scPrev = scope->scPrev;
}
else
{
- siOpenScopeLast = scope->scPrev;
+ siOpenScopeLast = scope->scPrev;
}
// Add to the finished scope list. (Try to) filter out scopes of length 0.
if (scope->scStartLoc != scope->scEndLoc)
{
- siScopeLast->scNext = scope;
- siScopeLast = scope;
+ siScopeLast->scNext = scope;
+ siScopeLast = scope;
siScopeCnt++;
}
}
-
-
/*----------------------------------------------------------------------------
* These functions end scopes given different types of parameters
*----------------------------------------------------------------------------
*/
-
/*****************************************************************************
* For tracked vars, we don't need to search for the scope in the list as we
* have a pointer to the open scopes of all tracked variables.
*/
-void CodeGen::siEndTrackedScope(unsigned varIndex)
+void CodeGen::siEndTrackedScope(unsigned varIndex)
{
- siScope * scope = siLatestTrackedScopes[varIndex];
+ siScope* scope = siLatestTrackedScopes[varIndex];
if (!scope)
+ {
return;
+ }
scope->scEndLoc.CaptureLocation(getEmitter());
assert(scope->scEndLoc.Valid());
siRemoveFromOpenScopeList(scope);
- siLatestTrackedScopes[varIndex] = NULL;
+ siLatestTrackedScopes[varIndex] = nullptr;
}
-
/*****************************************************************************
* If we don't know that the variable is tracked, this function handles both
* cases.
*/
-void CodeGen::siEndScope(unsigned varNum)
+void CodeGen::siEndScope(unsigned varNum)
{
- for (siScope * scope = siOpenScopeList.scNext; scope; scope = scope->scNext)
+ for (siScope* scope = siOpenScopeList.scNext; scope; scope = scope->scNext)
{
if (scope->scVarNum == varNum)
{
@@ -273,28 +275,26 @@ void CodeGen::siEndScope(unsigned varNum)
}
}
-
/*****************************************************************************
* If we have a handle to the siScope structure, we handle ending this scope
* differently than if we just had a variable number. This saves us searching
* the open-scope list again.
*/
-void CodeGen::siEndScope(siScope * scope)
+void CodeGen::siEndScope(siScope* scope)
{
scope->scEndLoc.CaptureLocation(getEmitter());
assert(scope->scEndLoc.Valid());
siRemoveFromOpenScopeList(scope);
- LclVarDsc & lclVarDsc1 = compiler->lvaTable[scope->scVarNum];
+ LclVarDsc& lclVarDsc1 = compiler->lvaTable[scope->scVarNum];
if (lclVarDsc1.lvTracked)
{
- siLatestTrackedScopes[lclVarDsc1.lvVarIndex] = NULL;
+ siLatestTrackedScopes[lclVarDsc1.lvVarIndex] = nullptr;
}
}
-
/*****************************************************************************
* siVerifyLocalVarTab
*
@@ -304,23 +304,23 @@ void CodeGen::siEndScope(siScope * scope)
#ifdef DEBUG
-bool CodeGen::siVerifyLocalVarTab()
+bool CodeGen::siVerifyLocalVarTab()
{
// No entries with overlapping lives should have the same slot.
for (unsigned i = 0; i < compiler->info.compVarScopesCount; i++)
{
- for (unsigned j = i+1; j < compiler->info.compVarScopesCount; j++)
+ for (unsigned j = i + 1; j < compiler->info.compVarScopesCount; j++)
{
- unsigned slot1 = compiler->info.compVarScopes[i].vsdVarNum;
- unsigned beg1 = compiler->info.compVarScopes[i].vsdLifeBeg;
- unsigned end1 = compiler->info.compVarScopes[i].vsdLifeEnd;
+ unsigned slot1 = compiler->info.compVarScopes[i].vsdVarNum;
+ unsigned beg1 = compiler->info.compVarScopes[i].vsdLifeBeg;
+ unsigned end1 = compiler->info.compVarScopes[i].vsdLifeEnd;
- unsigned slot2 = compiler->info.compVarScopes[j].vsdVarNum;
- unsigned beg2 = compiler->info.compVarScopes[j].vsdLifeBeg;
- unsigned end2 = compiler->info.compVarScopes[j].vsdLifeEnd;
+ unsigned slot2 = compiler->info.compVarScopes[j].vsdVarNum;
+ unsigned beg2 = compiler->info.compVarScopes[j].vsdLifeBeg;
+ unsigned end2 = compiler->info.compVarScopes[j].vsdLifeEnd;
- if (slot1==slot2 && (end1>beg2 && beg1<end2))
+ if (slot1 == slot2 && (end1 > beg2 && beg1 < end2))
{
return false;
}
@@ -332,37 +332,34 @@ bool CodeGen::siVerifyLocalVarTab()
#endif
-
-
/*============================================================================
* INTERFACE (public) Functions for ScopeInfo
*============================================================================
*/
-
-void CodeGen::siInit()
+void CodeGen::siInit()
{
#ifdef _TARGET_X86_
- assert((unsigned)ICorDebugInfo::REGNUM_EAX == REG_EAX);
- assert((unsigned)ICorDebugInfo::REGNUM_ECX == REG_ECX);
- assert((unsigned)ICorDebugInfo::REGNUM_EDX == REG_EDX);
- assert((unsigned)ICorDebugInfo::REGNUM_EBX == REG_EBX);
- assert((unsigned)ICorDebugInfo::REGNUM_ESP == REG_ESP);
- assert((unsigned)ICorDebugInfo::REGNUM_EBP == REG_EBP);
- assert((unsigned)ICorDebugInfo::REGNUM_ESI == REG_ESI);
- assert((unsigned)ICorDebugInfo::REGNUM_EDI == REG_EDI);
+ assert((unsigned)ICorDebugInfo::REGNUM_EAX == REG_EAX);
+ assert((unsigned)ICorDebugInfo::REGNUM_ECX == REG_ECX);
+ assert((unsigned)ICorDebugInfo::REGNUM_EDX == REG_EDX);
+ assert((unsigned)ICorDebugInfo::REGNUM_EBX == REG_EBX);
+ assert((unsigned)ICorDebugInfo::REGNUM_ESP == REG_ESP);
+ assert((unsigned)ICorDebugInfo::REGNUM_EBP == REG_EBP);
+ assert((unsigned)ICorDebugInfo::REGNUM_ESI == REG_ESI);
+ assert((unsigned)ICorDebugInfo::REGNUM_EDI == REG_EDI);
#endif
- assert((unsigned)ICorDebugInfo::VLT_REG == Compiler::VLT_REG );
- assert((unsigned)ICorDebugInfo::VLT_STK == Compiler::VLT_STK );
- assert((unsigned)ICorDebugInfo::VLT_REG_REG == Compiler::VLT_REG_REG );
- assert((unsigned)ICorDebugInfo::VLT_REG_STK == Compiler::VLT_REG_STK );
- assert((unsigned)ICorDebugInfo::VLT_STK_REG == Compiler::VLT_STK_REG );
- assert((unsigned)ICorDebugInfo::VLT_STK2 == Compiler::VLT_STK2 );
- assert((unsigned)ICorDebugInfo::VLT_FPSTK == Compiler::VLT_FPSTK );
- assert((unsigned)ICorDebugInfo::VLT_FIXED_VA == Compiler::VLT_FIXED_VA );
- assert((unsigned)ICorDebugInfo::VLT_COUNT == Compiler::VLT_COUNT );
- assert((unsigned)ICorDebugInfo::VLT_INVALID == Compiler::VLT_INVALID );
+ assert((unsigned)ICorDebugInfo::VLT_REG == Compiler::VLT_REG);
+ assert((unsigned)ICorDebugInfo::VLT_STK == Compiler::VLT_STK);
+ assert((unsigned)ICorDebugInfo::VLT_REG_REG == Compiler::VLT_REG_REG);
+ assert((unsigned)ICorDebugInfo::VLT_REG_STK == Compiler::VLT_REG_STK);
+ assert((unsigned)ICorDebugInfo::VLT_STK_REG == Compiler::VLT_STK_REG);
+ assert((unsigned)ICorDebugInfo::VLT_STK2 == Compiler::VLT_STK2);
+ assert((unsigned)ICorDebugInfo::VLT_FPSTK == Compiler::VLT_FPSTK);
+ assert((unsigned)ICorDebugInfo::VLT_FIXED_VA == Compiler::VLT_FIXED_VA);
+ assert((unsigned)ICorDebugInfo::VLT_COUNT == Compiler::VLT_COUNT);
+ assert((unsigned)ICorDebugInfo::VLT_INVALID == Compiler::VLT_INVALID);
/* ICorDebugInfo::VarLoc and siVarLoc should overlap exactly as we cast
* one to the other in eeSetLVinfo()
@@ -373,31 +370,32 @@ void CodeGen::siInit()
assert(compiler->opts.compScopeInfo);
- siOpenScopeList.scNext = NULL;
- siOpenScopeLast = & siOpenScopeList;
- siScopeLast = & siScopeList;
+ siOpenScopeList.scNext = nullptr;
+ siOpenScopeLast = &siOpenScopeList;
+ siScopeLast = &siScopeList;
- siScopeCnt = 0;
+ siScopeCnt = 0;
VarSetOps::AssignNoCopy(compiler, siLastLife, VarSetOps::MakeEmpty(compiler));
- siLastEndOffs = 0;
+ siLastEndOffs = 0;
if (compiler->info.compVarScopesCount == 0)
+ {
return;
+ }
#if FEATURE_EH_FUNCLETS
- siInFuncletRegion = false;
+ siInFuncletRegion = false;
#endif // FEATURE_EH_FUNCLETS
- for (unsigned i=0; i<lclMAX_TRACKED; i++)
+ for (unsigned i = 0; i < lclMAX_TRACKED; i++)
{
- siLatestTrackedScopes[i] = NULL;
+ siLatestTrackedScopes[i] = nullptr;
}
compiler->compResetScopeLists();
}
-
/*****************************************************************************
* siBeginBlock
*
@@ -405,19 +403,25 @@ void CodeGen::siInit()
* need to be opened.
*/
-void CodeGen::siBeginBlock(BasicBlock* block)
+void CodeGen::siBeginBlock(BasicBlock* block)
{
assert(block != nullptr);
if (!compiler->opts.compScopeInfo)
+ {
return;
-
+ }
+
if (compiler->info.compVarScopesCount == 0)
+ {
return;
+ }
#if FEATURE_EH_FUNCLETS
if (siInFuncletRegion)
+ {
return;
+ }
if (block->bbFlags & BBF_FUNCLET_BEG)
{
@@ -425,14 +429,14 @@ void CodeGen::siBeginBlock(BasicBlock* block)
siInFuncletRegion = true;
JITDUMP("Scope info: found beginning of funclet region at block BB%02u; ignoring following blocks\n",
- block->bbNum);
+ block->bbNum);
return;
}
#endif // FEATURE_EH_FUNCLETS
#ifdef DEBUG
- if (verbose)
+ if (verbose)
{
printf("\nScope info: begin block BB%02u, IL range ", block->bbNum);
block->dspBlockILRange();
@@ -451,7 +455,7 @@ void CodeGen::siBeginBlock(BasicBlock* block)
if (!compiler->opts.compDbgCode)
{
/* For non-debuggable code */
-
+
// End scope of variables which are not live for this block
siUpdate();
@@ -488,23 +492,22 @@ void CodeGen::siBeginBlock(BasicBlock* block)
// Ignore the enter/exit scope changes of the missing scopes, which for
// funclets must be matched.
- if (siLastEndOffs != beginOffs)
+ if (siLastEndOffs != beginOffs)
{
assert(beginOffs > 0);
assert(siLastEndOffs < beginOffs);
- JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n",
- siLastEndOffs, beginOffs);
+ JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", siLastEndOffs, beginOffs);
// Skip enter scopes
- while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != NULL)
+ while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr)
{
/* do nothing */
JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum);
}
// Skip exit scopes
- while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != NULL)
+ while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr)
{
/* do nothing */
JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum);
@@ -513,7 +516,7 @@ void CodeGen::siBeginBlock(BasicBlock* block)
#else // FEATURE_EH_FUNCLETS
- if (siLastEndOffs != beginOffs)
+ if (siLastEndOffs != beginOffs)
{
assert(siLastEndOffs < beginOffs);
return;
@@ -521,10 +524,11 @@ void CodeGen::siBeginBlock(BasicBlock* block)
#endif // FEATURE_EH_FUNCLETS
- while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != NULL)
+ while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != nullptr)
{
// brace-matching editor workaround for following line: (
- JITDUMP("Scope info: opening scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg, varScope->vsdLifeEnd);
+ JITDUMP("Scope info: opening scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg,
+ varScope->vsdLifeEnd);
siNewScope(varScope->vsdLVnum, varScope->vsdVarNum);
@@ -533,9 +537,7 @@ void CodeGen::siBeginBlock(BasicBlock* block)
if (VERBOSE)
{
printf("Scope info: >> new scope, VarNum=%u, tracked? %s, VarIndex=%u, bbLiveIn=%s ",
- varScope->vsdVarNum,
- lclVarDsc1->lvTracked ? "yes" : "no",
- lclVarDsc1->lvVarIndex,
+ varScope->vsdVarNum, lclVarDsc1->lvTracked ? "yes" : "no", lclVarDsc1->lvVarIndex,
VarSetOps::ToString(compiler, block->bbLiveIn));
dumpConvertedVarSet(compiler, block->bbLiveIn);
printf("\n");
@@ -547,9 +549,10 @@ void CodeGen::siBeginBlock(BasicBlock* block)
#ifdef DEBUG
if (verbose)
+ {
siDispOpenScopes();
+ }
#endif
-
}
/*****************************************************************************
@@ -560,13 +563,15 @@ void CodeGen::siBeginBlock(BasicBlock* block)
* only begin or end at block boundaries for debuggable code.
*/
-void CodeGen::siEndBlock(BasicBlock* block)
+void CodeGen::siEndBlock(BasicBlock* block)
{
assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0));
#if FEATURE_EH_FUNCLETS
if (siInFuncletRegion)
+ {
return;
+ }
#endif // FEATURE_EH_FUNCLETS
#ifdef DEBUG
@@ -591,13 +596,14 @@ void CodeGen::siEndBlock(BasicBlock* block)
// boundaries.
VarScopeDsc* varScope;
- while ((varScope = compiler->compGetNextExitScope(endOffs, !compiler->opts.compDbgCode)) != NULL)
+ while ((varScope = compiler->compGetNextExitScope(endOffs, !compiler->opts.compDbgCode)) != nullptr)
{
// brace-matching editor workaround for following line: (
- JITDUMP("Scope info: ending scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg, varScope->vsdLifeEnd);
+ JITDUMP("Scope info: ending scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg,
+ varScope->vsdLifeEnd);
- unsigned varNum = varScope->vsdVarNum;
- LclVarDsc * lclVarDsc1 = &compiler->lvaTable[varNum];
+ unsigned varNum = varScope->vsdVarNum;
+ LclVarDsc* lclVarDsc1 = &compiler->lvaTable[varNum];
assert(lclVarDsc1);
@@ -615,9 +621,10 @@ void CodeGen::siEndBlock(BasicBlock* block)
#ifdef DEBUG
if (verbose)
+ {
siDispOpenScopes();
+ }
#endif
-
}
/*****************************************************************************
@@ -629,20 +636,28 @@ void CodeGen::siEndBlock(BasicBlock* block)
* live over their entire scope, and so they go live or dead only on
* block boundaries.
*/
-void CodeGen::siUpdate ()
-{
+void CodeGen::siUpdate()
+{
if (!compiler->opts.compScopeInfo)
+ {
return;
+ }
if (compiler->opts.compDbgCode)
+ {
return;
+ }
if (compiler->info.compVarScopesCount == 0)
+ {
return;
+ }
- #if FEATURE_EH_FUNCLETS
+#if FEATURE_EH_FUNCLETS
if (siInFuncletRegion)
+ {
return;
+ }
#endif // FEATURE_EH_FUNCLETS
VARSET_TP VARSET_INIT_NOCOPY(killed, VarSetOps::Diff(compiler, siLastLife, compiler->compCurLife));
@@ -652,12 +667,12 @@ void CodeGen::siUpdate ()
while (iter.NextElem(compiler, &i))
{
#ifdef DEBUG
- unsigned lclNum = compiler->lvaTrackedToVarNum[i];
- LclVarDsc * lclVar = &compiler->lvaTable[lclNum];
+ unsigned lclNum = compiler->lvaTrackedToVarNum[i];
+ LclVarDsc* lclVar = &compiler->lvaTable[lclNum];
assert(lclVar->lvTracked);
#endif
- siScope * scope = siLatestTrackedScopes[i];
+ siScope* scope = siLatestTrackedScopes[i];
siEndTrackedScope(i);
}
@@ -679,21 +694,24 @@ void CodeGen::siUpdate ()
* variable has an open scope. Also, check if it has the correct LVnum.
*/
-void CodeGen::siCheckVarScope (unsigned varNum,
- IL_OFFSET offs)
+void CodeGen::siCheckVarScope(unsigned varNum, IL_OFFSET offs)
{
assert(compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0));
#if FEATURE_EH_FUNCLETS
if (siInFuncletRegion)
+ {
return;
+ }
#endif // FEATURE_EH_FUNCLETS
if (offs == BAD_IL_OFFSET)
+ {
return;
+ }
- siScope * scope;
- LclVarDsc * lclVarDsc1 = &compiler->lvaTable[varNum];
+ siScope* scope;
+ LclVarDsc* lclVarDsc1 = &compiler->lvaTable[varNum];
// If there is an open scope corresponding to varNum, find it
@@ -706,7 +724,9 @@ void CodeGen::siCheckVarScope (unsigned varNum,
for (scope = siOpenScopeList.scNext; scope; scope = scope->scNext)
{
if (scope->scVarNum == varNum)
+ {
break;
+ }
}
}
@@ -724,30 +744,31 @@ void CodeGen::siCheckVarScope (unsigned varNum,
{
if (scope->scLVnum != varScope->vsdLVnum)
{
- siEndScope (scope);
- siNewScope (varScope->vsdLVnum, varScope->vsdVarNum);
+ siEndScope(scope);
+ siNewScope(varScope->vsdLVnum, varScope->vsdVarNum);
}
}
else
{
- siNewScope (varScope->vsdLVnum, varScope->vsdVarNum);
+ siNewScope(varScope->vsdLVnum, varScope->vsdVarNum);
}
}
-
/*****************************************************************************
* siCloseAllOpenScopes
*
- * For unreachable code, or optimized code with blocks reordered, there may be
+ * For unreachable code, or optimized code with blocks reordered, there may be
* scopes left open at the end. Simply close them.
*/
-void CodeGen::siCloseAllOpenScopes()
+void CodeGen::siCloseAllOpenScopes()
{
assert(siOpenScopeList.scNext);
while (siOpenScopeList.scNext)
+ {
siEndScope(siOpenScopeList.scNext);
+ }
}
/*****************************************************************************
@@ -758,7 +779,7 @@ void CodeGen::siCloseAllOpenScopes()
#ifdef DEBUG
-void CodeGen::siDispOpenScopes()
+void CodeGen::siDispOpenScopes()
{
assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0));
@@ -780,11 +801,8 @@ void CodeGen::siDispOpenScopes()
{
const char* name = compiler->VarNameToStr(localVars->vsdName);
// brace-matching editor workaround for following line: (
- printf(" %u (%s) [%03X..%03X)\n",
- localVars->vsdLVnum,
- name == nullptr ? "UNKNOWN" : name,
- localVars->vsdLifeBeg,
- localVars->vsdLifeEnd);
+ printf(" %u (%s) [%03X..%03X)\n", localVars->vsdLVnum, name == nullptr ? "UNKNOWN" : name,
+ localVars->vsdLifeBeg, localVars->vsdLifeEnd);
break;
}
}
@@ -794,8 +812,6 @@ void CodeGen::siDispOpenScopes()
#endif // DEBUG
-
-
/*============================================================================
*
* Implementation for PrologScopeInfo
@@ -803,28 +819,25 @@ void CodeGen::siDispOpenScopes()
*============================================================================
*/
-
/*****************************************************************************
* psiNewPrologScope
*
* Creates a new scope and adds it to the Open scope list.
*/
-CodeGen::psiScope *
- CodeGen::psiNewPrologScope(unsigned LVnum,
- unsigned slotNum)
+CodeGen::psiScope* CodeGen::psiNewPrologScope(unsigned LVnum, unsigned slotNum)
{
- psiScope * newScope = (psiScope *) compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
+ psiScope* newScope = (psiScope*)compiler->compGetMem(sizeof(*newScope), CMK_SiScope);
newScope->scStartLoc.CaptureLocation(getEmitter());
assert(newScope->scStartLoc.Valid());
newScope->scEndLoc.Init();
- newScope->scLVnum = LVnum;
- newScope->scSlotNum = slotNum;
+ newScope->scLVnum = LVnum;
+ newScope->scSlotNum = slotNum;
- newScope->scNext = NULL;
+ newScope->scNext = nullptr;
psiOpenScopeLast->scNext = newScope;
newScope->scPrev = psiOpenScopeLast;
psiOpenScopeLast = newScope;
@@ -832,8 +845,6 @@ CodeGen::psiScope *
return newScope;
}
-
-
/*****************************************************************************
* psiEndPrologScope
*
@@ -841,20 +852,20 @@ CodeGen::psiScope *
* list if its length is non-zero
*/
-void CodeGen::psiEndPrologScope(psiScope * scope)
+void CodeGen::psiEndPrologScope(psiScope* scope)
{
scope->scEndLoc.CaptureLocation(getEmitter());
assert(scope->scEndLoc.Valid());
// Remove from open-scope list
- scope->scPrev->scNext = scope->scNext;
+ scope->scPrev->scNext = scope->scNext;
if (scope->scNext)
{
- scope->scNext->scPrev = scope->scPrev;
+ scope->scNext->scPrev = scope->scPrev;
}
else
{
- psiOpenScopeLast = scope->scPrev;
+ psiOpenScopeLast = scope->scPrev;
}
// Add to the finished scope list.
@@ -862,8 +873,7 @@ void CodeGen::psiEndPrologScope(psiScope * scope)
// CodeGen::genSetScopeInfo will report the liveness of all arguments
// as spanning the first instruction in the method, so that they can
// at least be inspected on entry to the method.
- if (scope->scStartLoc != scope->scEndLoc ||
- scope->scStartLoc.IsOffsetZero())
+ if (scope->scStartLoc != scope->scEndLoc || scope->scStartLoc.IsOffsetZero())
{
psiScopeLast->scNext = scope;
psiScopeLast = scope;
@@ -871,8 +881,6 @@ void CodeGen::psiEndPrologScope(psiScope * scope)
}
}
-
-
/*============================================================================
* INTERFACE (protected) Functions for PrologScopeInfo
*============================================================================
@@ -886,16 +894,17 @@ void CodeGen::psiEndPrologScope(psiScope * scope)
// 'lclVarDsc' is an op that will now be contained by its parent.
//
//
-void CodeGen::psSetScopeOffset(psiScope* newScope, LclVarDsc * lclVarDsc)
+void CodeGen::psSetScopeOffset(psiScope* newScope, LclVarDsc* lclVarDsc)
{
- newScope->scRegister = false;
+ newScope->scRegister = false;
newScope->u2.scBaseReg = REG_SPBASE;
-#ifdef _TARGET_AMD64_
+#ifdef _TARGET_AMD64_
// scOffset = offset from caller SP - REGSIZE_BYTES
// TODO-Cleanup - scOffset needs to be understood. For now just matching with the existing definition.
- newScope->u2.scOffset = compiler->lvaToCallerSPRelativeOffset(lclVarDsc->lvStkOffs, lclVarDsc->lvFramePointerBased) + REGSIZE_BYTES;
-#else // !_TARGET_AMD64_
+ newScope->u2.scOffset =
+ compiler->lvaToCallerSPRelativeOffset(lclVarDsc->lvStkOffs, lclVarDsc->lvFramePointerBased) + REGSIZE_BYTES;
+#else // !_TARGET_AMD64_
if (doubleAlignOrFramePointerUsed())
{
// REGSIZE_BYTES - for the pushed value of EBP
@@ -920,28 +929,29 @@ void CodeGen::psSetScopeOffset(psiScope* newScope, LclVarDsc * lc
* parameters of the method.
*/
-void CodeGen::psiBegProlog()
+void CodeGen::psiBegProlog()
{
assert(compiler->compGeneratingProlog);
VarScopeDsc* varScope;
- psiOpenScopeList.scNext = NULL;
- psiOpenScopeLast = &psiOpenScopeList;
- psiScopeLast = &psiScopeList;
- psiScopeCnt = 0;
+ psiOpenScopeList.scNext = nullptr;
+ psiOpenScopeLast = &psiOpenScopeList;
+ psiScopeLast = &psiScopeList;
+ psiScopeCnt = 0;
compiler->compResetScopeLists();
- while ((varScope = compiler->compGetNextEnterScope(0)) != NULL)
+ while ((varScope = compiler->compGetNextEnterScope(0)) != nullptr)
{
- LclVarDsc * lclVarDsc1 = &compiler->lvaTable[varScope->vsdVarNum];
+ LclVarDsc* lclVarDsc1 = &compiler->lvaTable[varScope->vsdVarNum];
if (!lclVarDsc1->lvIsParam)
+ {
continue;
+ }
- psiScope * newScope = psiNewPrologScope(varScope->vsdLVnum,
- varScope->vsdVarNum);
+ psiScope* newScope = psiNewPrologScope(varScope->vsdLVnum, varScope->vsdVarNum);
if (lclVarDsc1->lvIsRegArg)
{
@@ -955,11 +965,11 @@ void CodeGen::psiBegProlog()
compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc);
if (structDesc.passedInRegisters)
{
- regNumber regNum = REG_NA;
+ regNumber regNum = REG_NA;
regNumber otherRegNum = REG_NA;
for (unsigned nCnt = 0; nCnt < structDesc.eightByteCount; nCnt++)
{
- unsigned len = structDesc.eightByteSizes[nCnt];
+ unsigned len = structDesc.eightByteSizes[nCnt];
var_types regType = TYP_UNDEF;
if (nCnt == 0)
@@ -982,8 +992,8 @@ void CodeGen::psiBegProlog()
#endif // DEBUG
}
- newScope->scRegister = true;
- newScope->u1.scRegNum = (regNumberSmall)regNum;
+ newScope->scRegister = true;
+ newScope->u1.scRegNum = (regNumberSmall)regNum;
newScope->u1.scOtherReg = (regNumberSmall)otherRegNum;
}
else
@@ -1006,7 +1016,7 @@ void CodeGen::psiBegProlog()
assert(genMapRegNumToRegArgNum(lclVarDsc1->lvArgReg, regType) != (unsigned)-1);
#endif // DEBUG
- newScope->scRegister = true;
+ newScope->scRegister = true;
newScope->u1.scRegNum = (regNumberSmall)lclVarDsc1->lvArgReg;
}
}
@@ -1019,12 +1029,12 @@ void CodeGen::psiBegProlog()
/*****************************************************************************
Enable this macro to get accurate prolog information for every instruction
- in the prolog. However, this is overkill as nobody steps through the
+ in the prolog. However, this is overkill as nobody steps through the
disassembly of the prolog. Even if they do they will not expect rich debug info.
We still report all the arguments at the very start of the method so that
the user can see the arguments at the very start of the method (offset=0).
-
+
Disabling this decreased the debug maps in mscorlib by 10% (01/2003)
*/
@@ -1038,43 +1048,43 @@ void CodeGen::psiBegProlog()
* When ESP changes, all scopes relative to ESP have to be updated.
*/
-void CodeGen::psiAdjustStackLevel(unsigned size)
+void CodeGen::psiAdjustStackLevel(unsigned size)
{
#ifdef DEBUGGING_SUPPORT
if (!compiler->opts.compScopeInfo || (compiler->info.compVarScopesCount == 0))
+ {
return;
+ }
assert(compiler->compGeneratingProlog);
#ifdef ACCURATE_PROLOG_DEBUG_INFO
- psiScope * scope;
+ psiScope* scope;
// walk the list backwards
// Works as psiEndPrologScope does not change scPrev
for (scope = psiOpenScopeLast; scope != &psiOpenScopeList; scope = scope->scPrev)
{
- if (scope->scRegister)
+ if (scope->scRegister)
{
assert(compiler->lvaTable[scope->scSlotNum].lvIsRegArg);
continue;
}
assert(scope->u2.scBaseReg == REG_SPBASE);
- psiScope * newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
- newScope->scRegister = false;
- newScope->u2.scBaseReg = REG_SPBASE;
- newScope->u2.scOffset = scope->u2.scOffset + size;
+ psiScope* newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
+ newScope->scRegister = false;
+ newScope->u2.scBaseReg = REG_SPBASE;
+ newScope->u2.scOffset = scope->u2.scOffset + size;
- psiEndPrologScope (scope);
+ psiEndPrologScope(scope);
}
-
+
#endif // ACCURATE_PROLOG_DEBUG_INFO
#endif // DEBUGGING_SUPPORT
}
-
-
/*****************************************************************************
* psiMoveESPtoEBP
*
@@ -1082,65 +1092,67 @@ void CodeGen::psiAdjustStackLevel(unsigned size)
* but via EBP right after a "mov ebp,esp" instruction
*/
-void CodeGen::psiMoveESPtoEBP()
+void CodeGen::psiMoveESPtoEBP()
{
#ifdef DEBUGGING_SUPPORT
if (!compiler->opts.compScopeInfo || (compiler->info.compVarScopesCount == 0))
+ {
return;
+ }
assert(compiler->compGeneratingProlog);
assert(doubleAlignOrFramePointerUsed());
#ifdef ACCURATE_PROLOG_DEBUG_INFO
- psiScope * scope;
+ psiScope* scope;
// walk the list backwards
// Works as psiEndPrologScope does not change scPrev
for (scope = psiOpenScopeLast; scope != &psiOpenScopeList; scope = scope->scPrev)
{
- if (scope->scRegister)
+ if (scope->scRegister)
{
assert(compiler->lvaTable[scope->scSlotNum].lvIsRegArg);
continue;
}
assert(scope->u2.scBaseReg == REG_SPBASE);
- psiScope * newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
- newScope->scRegister = false;
- newScope->u2.scBaseReg = REG_FPBASE;
- newScope->u2.scOffset = scope->u2.scOffset;
+ psiScope* newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
+ newScope->scRegister = false;
+ newScope->u2.scBaseReg = REG_FPBASE;
+ newScope->u2.scOffset = scope->u2.scOffset;
- psiEndPrologScope (scope);
+ psiEndPrologScope(scope);
}
-
+
#endif // ACCURATE_PROLOG_DEBUG_INFO
#endif // DEBUGGING_SUPPORT
}
-
-
/*****************************************************************************
* psiMoveToReg
*
* Called when a parameter is loaded into its assigned register from the stack,
* or when parameters are moved around due to circular dependancy.
- * If reg != REG_NA, then the parameter is being moved into its assigned
+ * If reg != REG_NA, then the parameter is being moved into its assigned
* register, else it may be being moved to a temp register.
*/
-void CodeGen::psiMoveToReg (unsigned varNum,
- regNumber reg,
- regNumber otherReg)
+void CodeGen::psiMoveToReg(unsigned varNum, regNumber reg, regNumber otherReg)
{
#ifdef DEBUGGING_SUPPORT
assert(compiler->compGeneratingProlog);
if (!compiler->opts.compScopeInfo)
+ {
return;
-
+ }
+
if (compiler->info.compVarScopesCount == 0)
+ {
return;
+ }
assert((int)varNum >= 0); // It's not a spill temp number.
assert(compiler->lvaTable[varNum].lvIsInReg());
@@ -1151,7 +1163,7 @@ void CodeGen::psiMoveToReg (unsigned varNum,
* being moved through temp register "reg".
* If reg==REG_NA, it is being moved to its assigned register.
*/
- if (reg == REG_NA)
+ if (reg == REG_NA)
{
// Grab the assigned registers.
@@ -1159,7 +1171,7 @@ void CodeGen::psiMoveToReg (unsigned varNum,
otherReg = compiler->lvaTable[varNum].lvOtherReg;
}
- psiScope * scope;
+ psiScope* scope;
// walk the list backwards
// Works as psiEndPrologScope does not change scPrev
@@ -1168,25 +1180,24 @@ void CodeGen::psiMoveToReg (unsigned varNum,
if (scope->scSlotNum != compiler->lvaTable[varNum].lvSlotNum)
continue;
- psiScope * newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
+ psiScope* newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
newScope->scRegister = true;
newScope->u1.scRegNum = reg;
newScope->u1.scOtherReg = otherReg;
- psiEndPrologScope (scope);
+ psiEndPrologScope(scope);
return;
}
// May happen if a parameter does not have an entry in the LocalVarTab
// But assert() just in case it is because of something else.
- assert(varNum == compiler->info.compRetBuffArg ||
+ assert(varNum == compiler->info.compRetBuffArg ||
!"Parameter scope not found (Assert doesnt always indicate error)");
#endif // ACCURATE_PROLOG_DEBUG_INFO
#endif // DEBUGGING_SUPPORT
}
-
/*****************************************************************************
* CodeGen::psiMoveToStack
*
@@ -1194,19 +1205,21 @@ void CodeGen::psiMoveToReg (unsigned varNum,
* (ie. all adjustements to {F/S}PBASE have been made
*/
-void CodeGen::psiMoveToStack(unsigned varNum)
+void CodeGen::psiMoveToStack(unsigned varNum)
{
#ifdef DEBUGGING_SUPPORT
if (!compiler->opts.compScopeInfo || (compiler->info.compVarScopesCount == 0))
+ {
return;
+ }
assert(compiler->compGeneratingProlog);
- assert( compiler->lvaTable[varNum].lvIsRegArg);
+ assert(compiler->lvaTable[varNum].lvIsRegArg);
assert(!compiler->lvaTable[varNum].lvRegister);
#ifdef ACCURATE_PROLOG_DEBUG_INFO
- psiScope * scope;
+ psiScope* scope;
// walk the list backwards
// Works as psiEndPrologScope does not change scPrev
@@ -1220,22 +1233,21 @@ void CodeGen::psiMoveToStack(unsigned varNum)
assert(scope->scRegister);
assert(scope->u1.scRegNum == compiler->lvaTable[varNum].lvArgReg);
- psiScope * newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
- newScope->scRegister = false;
- newScope->u2.scBaseReg = (compiler->lvaTable[varNum].lvFramePointerBased) ? REG_FPBASE
- : REG_SPBASE;
- newScope->u2.scOffset = compiler->lvaTable[varNum].lvStkOffs;
+ psiScope* newScope = psiNewPrologScope(scope->scLVnum, scope->scSlotNum);
+ newScope->scRegister = false;
+ newScope->u2.scBaseReg = (compiler->lvaTable[varNum].lvFramePointerBased) ? REG_FPBASE : REG_SPBASE;
+ newScope->u2.scOffset = compiler->lvaTable[varNum].lvStkOffs;
- psiEndPrologScope (scope);
+ psiEndPrologScope(scope);
return;
}
// May happen if a parameter does not have an entry in the LocalVarTab
// But assert() just in case it is because of something else.
- assert(varNum == compiler->info.compRetBuffArg ||
+ assert(varNum == compiler->info.compRetBuffArg ||
!"Parameter scope not found (Assert doesnt always indicate error)");
-#endif // ACCURATE_PROLOG_DEBUG_INFO
+#endif // ACCURATE_PROLOG_DEBUG_INFO
#endif // DEBUGGING_SUPPORT
}
@@ -1243,10 +1255,10 @@ void CodeGen::psiMoveToStack(unsigned varNum)
* psiEndProlog
*/
-void CodeGen::psiEndProlog()
+void CodeGen::psiEndProlog()
{
assert(compiler->compGeneratingProlog);
- psiScope * scope;
+ psiScope* scope;
for (scope = psiOpenScopeList.scNext; scope; scope = psiOpenScopeList.scNext)
{
@@ -1254,14 +1266,6 @@ void CodeGen::psiEndProlog()
}
}
-
-
-
-
/*****************************************************************************/
#endif // DEBUGGING_SUPPORT
/*****************************************************************************/
-
-
-
-
diff --git a/src/jit/sharedfloat.cpp b/src/jit/sharedfloat.cpp
index 16f2de2b09..0dbbac4862 100644
--- a/src/jit/sharedfloat.cpp
+++ b/src/jit/sharedfloat.cpp
@@ -17,59 +17,101 @@
#ifdef LEGACY_BACKEND
#if FEATURE_STACK_FP_X87
- regMaskTP RegSet::rsGetMaskUsed() { return rsMaskUsedFloat; }
- regMaskTP RegSet::rsGetMaskVars() { return rsMaskRegVarFloat; }
- regMaskTP RegSet::rsGetMaskLock() { return rsMaskLockedFloat; }
- regMaskTP RegSet::rsGetMaskMult() { return 0; }
-
- void RegSet::rsSetMaskUsed(regMaskTP maskUsed) { rsMaskUsedFloat = maskUsed; }
- void RegSet::rsSetMaskVars(regMaskTP maskVars) { rsMaskRegVarFloat = maskVars; }
- void RegSet::rsSetMaskLock(regMaskTP maskLock) { rsMaskLockedFloat = maskLock; }
-
- void RegSet::rsSetUsedTree(regNumber regNum, GenTreePtr tree)
- {
- assert(genUsedRegsFloat[regNum] == 0);
- genUsedRegsFloat[regNum] = tree;
- }
- void RegSet::rsFreeUsedTree(regNumber regNum, GenTreePtr tree)
- {
- assert(genUsedRegsFloat[regNum] == tree);
- genUsedRegsFloat[regNum] = 0;
- }
+regMaskTP RegSet::rsGetMaskUsed()
+{
+ return rsMaskUsedFloat;
+}
+regMaskTP RegSet::rsGetMaskVars()
+{
+ return rsMaskRegVarFloat;
+}
+regMaskTP RegSet::rsGetMaskLock()
+{
+ return rsMaskLockedFloat;
+}
+regMaskTP RegSet::rsGetMaskMult()
+{
+ return 0;
+}
-#else // !FEATURE_STACK_FP_X87
- regMaskTP RegSet::rsGetMaskUsed() { return rsMaskUsed; }
- regMaskTP RegSet::rsGetMaskVars() { return rsMaskVars; }
- regMaskTP RegSet::rsGetMaskLock() { return rsMaskLock; }
- regMaskTP RegSet::rsGetMaskMult() { return rsMaskMult; }
+void RegSet::rsSetMaskUsed(regMaskTP maskUsed)
+{
+ rsMaskUsedFloat = maskUsed;
+}
+void RegSet::rsSetMaskVars(regMaskTP maskVars)
+{
+ rsMaskRegVarFloat = maskVars;
+}
+void RegSet::rsSetMaskLock(regMaskTP maskLock)
+{
+ rsMaskLockedFloat = maskLock;
+}
- void RegSet::rsSetMaskUsed(regMaskTP maskUsed) { rsMaskUsed = maskUsed; }
- void RegSet::rsSetMaskVars(regMaskTP maskVars) { rsMaskVars = maskVars; }
- void RegSet::rsSetMaskLock(regMaskTP maskLock) { rsMaskLock = maskLock; }
+void RegSet::rsSetUsedTree(regNumber regNum, GenTreePtr tree)
+{
+ assert(genUsedRegsFloat[regNum] == 0);
+ genUsedRegsFloat[regNum] = tree;
+}
+void RegSet::rsFreeUsedTree(regNumber regNum, GenTreePtr tree)
+{
+ assert(genUsedRegsFloat[regNum] == tree);
+ genUsedRegsFloat[regNum] = 0;
+}
- void RegSet::rsSetUsedTree(regNumber regNum, GenTreePtr tree)
- {
- assert(rsUsedTree[regNum] == 0);
- rsUsedTree[regNum] = tree;
- }
- void RegSet::rsFreeUsedTree(regNumber regNum, GenTreePtr tree)
- {
- assert(rsUsedTree[regNum] == tree);
- rsUsedTree[regNum] = 0;
- }
+#else // !FEATURE_STACK_FP_X87
+regMaskTP RegSet::rsGetMaskUsed()
+{
+ return rsMaskUsed;
+}
+regMaskTP RegSet::rsGetMaskVars()
+{
+ return rsMaskVars;
+}
+regMaskTP RegSet::rsGetMaskLock()
+{
+ return rsMaskLock;
+}
+regMaskTP RegSet::rsGetMaskMult()
+{
+ return rsMaskMult;
+}
+
+void RegSet::rsSetMaskUsed(regMaskTP maskUsed)
+{
+ rsMaskUsed = maskUsed;
+}
+void RegSet::rsSetMaskVars(regMaskTP maskVars)
+{
+ rsMaskVars = maskVars;
+}
+void RegSet::rsSetMaskLock(regMaskTP maskLock)
+{
+ rsMaskLock = maskLock;
+}
+
+void RegSet::rsSetUsedTree(regNumber regNum, GenTreePtr tree)
+{
+ assert(rsUsedTree[regNum] == 0);
+ rsUsedTree[regNum] = tree;
+}
+void RegSet::rsFreeUsedTree(regNumber regNum, GenTreePtr tree)
+{
+ assert(rsUsedTree[regNum] == tree);
+ rsUsedTree[regNum] = 0;
+}
#endif // !FEATURE_STACK_FP_X87
// float stress mode. Will lock out registers to stress high register pressure.
// This implies setting interferences in register allocator and pushing regs in
// the prolog and popping them before a ret.
-#ifdef DEBUG
+#ifdef DEBUG
int CodeGenInterface::genStressFloat()
{
- return compiler->compStressCompile(Compiler::STRESS_FLATFP, 40)?1:JitConfig.JitStressFP();
+ return compiler->compStressCompile(Compiler::STRESS_FLATFP, 40) ? 1 : JitConfig.JitStressFP();
}
#endif
-regMaskTP RegSet::RegFreeFloat()
+regMaskTP RegSet::RegFreeFloat()
{
regMaskTP mask = RBM_ALLFLOAT;
#if FEATURE_FP_REGALLOC
@@ -80,7 +122,7 @@ regMaskTP RegSet::RegFreeFloat()
mask &= ~rsGetMaskLock();
mask &= ~rsGetMaskVars();
-#ifdef DEBUG
+#ifdef DEBUG
if (m_rsCompiler->codeGen->genStressFloat())
{
mask &= ~(m_rsCompiler->codeGen->genStressLockedMaskFloat());
@@ -90,30 +132,23 @@ regMaskTP RegSet::RegFreeFloat()
}
#ifdef _TARGET_ARM_
-// order registers are picked
+// order registers are picked
// go in reverse order to minimize chance of spilling with calls
-static const regNumber pickOrder[] = {REG_F15, REG_F14, REG_F13, REG_F12,
- REG_F11, REG_F10, REG_F9, REG_F8,
- REG_F7, REG_F6, REG_F5, REG_F4,
- REG_F3, REG_F2, REG_F1, REG_F0,
+static const regNumber pickOrder[] = {REG_F15, REG_F14, REG_F13, REG_F12, REG_F11, REG_F10, REG_F9, REG_F8,
+ REG_F7, REG_F6, REG_F5, REG_F4, REG_F3, REG_F2, REG_F1, REG_F0,
- REG_F16, REG_F17, REG_F18, REG_F19,
- REG_F20, REG_F21, REG_F22, REG_F23,
- REG_F24, REG_F25, REG_F26, REG_F27,
- REG_F28, REG_F29, REG_F30, REG_F31
-};
+ REG_F16, REG_F17, REG_F18, REG_F19, REG_F20, REG_F21, REG_F22, REG_F23,
+ REG_F24, REG_F25, REG_F26, REG_F27, REG_F28, REG_F29, REG_F30, REG_F31};
#elif _TARGET_AMD64_
-// order registers are picked
-static const regNumber pickOrder[] = {REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3,
- REG_XMM4, REG_XMM5, REG_XMM6, REG_XMM7,
- REG_XMM8, REG_XMM9, REG_XMM10, REG_XMM11,
+// order registers are picked
+static const regNumber pickOrder[] = {REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3, REG_XMM4, REG_XMM5,
+ REG_XMM6, REG_XMM7, REG_XMM8, REG_XMM9, REG_XMM10, REG_XMM11,
REG_XMM12, REG_XMM13, REG_XMM14, REG_XMM15};
#elif _TARGET_X86_
-// order registers are picked
-static const regNumber pickOrder[] = {REG_FPV0, REG_FPV1, REG_FPV2, REG_FPV3,
- REG_FPV4, REG_FPV5, REG_FPV6, REG_FPV7};
+// order registers are picked
+static const regNumber pickOrder[] = {REG_FPV0, REG_FPV1, REG_FPV2, REG_FPV3, REG_FPV4, REG_FPV5, REG_FPV6, REG_FPV7};
#endif
// picks a reg other than the one specified
@@ -128,18 +163,18 @@ regNumber RegSet::PickRegFloatOtherThan(var_types type, regNumber reg)
return PickRegFloat(type, &pref);
}
-regNumber RegSet::PickRegFloat(GenTreePtr tree, var_types type, RegisterPreference * pref, bool bUsed)
+regNumber RegSet::PickRegFloat(GenTreePtr tree, var_types type, RegisterPreference* pref, bool bUsed)
{
return PickRegFloat(type, pref, bUsed);
}
-regNumber RegSet::PickRegFloat(var_types type, RegisterPreference *pref, bool bUsed)
+regNumber RegSet::PickRegFloat(var_types type, RegisterPreference* pref, bool bUsed)
{
regMaskTP wantedMask;
bool tryBest = true;
bool tryOk = true;
bool bSpill = false;
- regNumber reg = REG_NA;
+ regNumber reg = REG_NA;
while (tryOk)
{
@@ -148,27 +183,27 @@ regNumber RegSet::PickRegFloat(var_types type, RegisterPreference *pref, bool bU
if (tryBest)
{
wantedMask = pref->best;
- tryBest = false;
+ tryBest = false;
}
else
{
assert(tryOk);
wantedMask = pref->ok;
- tryOk = false;
+ tryOk = false;
}
}
- else// pref is NULL
+ else // pref is NULL
{
wantedMask = RBM_ALLFLOAT;
- tryBest = false;
- tryOk = false;
+ tryBest = false;
+ tryOk = false;
}
-
+
// better not have asked for a non-fp register
assert((wantedMask & ~RBM_ALLFLOAT) == 0);
-
+
regMaskTP availMask = RegFreeFloat();
- regMaskTP OKmask = availMask & wantedMask;
+ regMaskTP OKmask = availMask & wantedMask;
if (OKmask == 0)
{
@@ -189,7 +224,7 @@ regNumber RegSet::PickRegFloat(var_types type, RegisterPreference *pref, bool bU
regMaskTP restrictMask = (m_rsCompiler->raConfigRestrictMaskFP() | RBM_FLT_CALLEE_TRASH);
#endif
- for (unsigned i=0; i<ArrLen(pickOrder); i++)
+ for (unsigned i = 0; i < ArrLen(pickOrder); i++)
{
regNumber r = pickOrder[i];
if (!floatRegCanHoldType(r, type))
@@ -230,14 +265,13 @@ RET:
return reg;
}
-
void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
{
/* The value must be sitting in a register */
assert(tree);
assert(tree->gtFlags & GTF_REG_VAL);
- var_types type = tree->TypeGet();
+ var_types type = tree->TypeGet();
#ifdef _TARGET_ARM_
if (type == TYP_STRUCT)
{
@@ -245,16 +279,15 @@ void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
type = TYP_FLOAT;
}
#endif
- regNumber regNum = tree->gtRegNum;
- regMaskTP regMask = genRegMaskFloat(regNum, type);
+ regNumber regNum = tree->gtRegNum;
+ regMaskTP regMask = genRegMaskFloat(regNum, type);
if (bValue)
{
-#ifdef DEBUG
- if (m_rsCompiler->verbose)
+#ifdef DEBUG
+ if (m_rsCompiler->verbose)
{
- printf("\t\t\t\t\t\t\tThe register %s currently holds ",
- getRegNameFloat(regNum, type));
+ printf("\t\t\t\t\t\t\tThe register %s currently holds ", getRegNameFloat(regNum, type));
Compiler::printTreeID(tree);
printf("\n");
}
@@ -268,7 +301,7 @@ void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
#else
/* Is the register used by two different values simultaneously? */
- if (regMask & rsGetMaskUsed())
+ if (regMask & rsGetMaskUsed())
{
/* Save the preceding use information */
@@ -277,7 +310,7 @@ void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
#endif
/* Set the register's bit in the 'used' bitset */
- rsSetMaskUsed( (rsGetMaskUsed() | regMask) );
+ rsSetMaskUsed((rsGetMaskUsed() | regMask));
// Assign slot
rsSetUsedTree(regNum, tree);
@@ -285,10 +318,9 @@ void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
else
{
#ifdef DEBUG
- if (m_rsCompiler->verbose)
+ if (m_rsCompiler->verbose)
{
- printf("\t\t\t\t\t\t\tThe register %s no longer holds ",
- getRegNameFloat(regNum, type));
+ printf("\t\t\t\t\t\t\tThe register %s no longer holds ", getRegNameFloat(regNum, type));
Compiler::printTreeID(tree);
printf("\n");
}
@@ -299,25 +331,26 @@ void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
// Are we freeing a multi-use registers?
- if (regMask & rsGetMaskMult())
+ if (regMask & rsGetMaskMult())
{
// Free any multi-use registers
rsMultRegFree(regMask);
return;
}
- rsSetMaskUsed( (rsGetMaskUsed() & ~regMask) );
+ rsSetMaskUsed((rsGetMaskUsed() & ~regMask));
// Free slot
rsFreeUsedTree(regNum, tree);
}
}
-void RegSet::SetLockedRegFloat(GenTree * tree, bool bValue)
+void RegSet::SetLockedRegFloat(GenTree* tree, bool bValue)
{
- regNumber reg = tree->gtRegNum;
- var_types type = tree->TypeGet(); assert(varTypeIsFloating(type));
- regMaskTP regMask = genRegMaskFloat(reg, tree->TypeGet());
+ regNumber reg = tree->gtRegNum;
+ var_types type = tree->TypeGet();
+ assert(varTypeIsFloating(type));
+ regMaskTP regMask = genRegMaskFloat(reg, tree->TypeGet());
if (bValue)
{
@@ -326,16 +359,16 @@ void RegSet::SetLockedRegFloat(GenTree * tree, bool bValue)
assert((rsGetMaskUsed() & regMask) == regMask);
assert((rsGetMaskLock() & regMask) == 0);
- rsSetMaskLock( (rsGetMaskLock() | regMask) );
+ rsSetMaskLock((rsGetMaskLock() | regMask));
}
else
{
JITDUMP("unlocking register %s\n", getRegNameFloat(reg, type));
- assert((rsGetMaskUsed() & regMask) == regMask);
+ assert((rsGetMaskUsed() & regMask) == regMask);
assert((rsGetMaskLock() & regMask) == regMask);
- rsSetMaskLock( (rsGetMaskLock() & ~regMask) );
+ rsSetMaskLock((rsGetMaskLock() & ~regMask));
}
}
@@ -346,7 +379,7 @@ bool RegSet::IsLockedRegFloat(GenTreePtr tree)
assert(tree->gtFlags & GTF_REG_VAL);
assert(varTypeIsFloating(tree->TypeGet()));
- regMaskTP regMask = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
+ regMaskTP regMask = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
return (rsGetMaskLock() & regMask) == regMask;
}
@@ -361,8 +394,8 @@ void CodeGen::UnspillFloat(GenTreePtr tree)
}
#endif // DEBUG
- RegSet::SpillDsc* cur = regSet.rsSpillFloat;
- assert(cur);
+ RegSet::SpillDsc* cur = regSet.rsSpillFloat;
+ assert(cur);
while (cur->spillTree != tree)
cur = cur->spillNext;
@@ -370,12 +403,12 @@ void CodeGen::UnspillFloat(GenTreePtr tree)
UnspillFloat(cur);
}
-void CodeGen::UnspillFloat(LclVarDsc * varDsc)
+void CodeGen::UnspillFloat(LclVarDsc* varDsc)
{
JITDUMP("UnspillFloat() for var [%08p]\n", dspPtr(varDsc));
- RegSet::SpillDsc* cur = regSet.rsSpillFloat;
- assert(cur);
+ RegSet::SpillDsc* cur = regSet.rsSpillFloat;
+ assert(cur);
while (cur->spillVarDsc != varDsc)
cur = cur->spillNext;
@@ -388,8 +421,7 @@ void CodeGen::RemoveSpillDsc(RegSet::SpillDsc* spillDsc)
RegSet::SpillDsc* cur;
RegSet::SpillDsc** prev;
- for (cur = regSet.rsSpillFloat, prev = &regSet.rsSpillFloat;
- cur != spillDsc ;
+ for (cur = regSet.rsSpillFloat, prev = &regSet.rsSpillFloat; cur != spillDsc;
prev = &cur->spillNext, cur = cur->spillNext)
; // EMPTY LOOP
@@ -399,11 +431,11 @@ void CodeGen::RemoveSpillDsc(RegSet::SpillDsc* spillDsc)
*prev = cur->spillNext;
}
-void CodeGen::UnspillFloat(RegSet::SpillDsc *spillDsc)
+void CodeGen::UnspillFloat(RegSet::SpillDsc* spillDsc)
{
JITDUMP("UnspillFloat() for SpillDsc [%08p]\n", dspPtr(spillDsc));
- RemoveSpillDsc(spillDsc);
+ RemoveSpillDsc(spillDsc);
UnspillFloatMachineDep(spillDsc);
RegSet::SpillDsc::freeDsc(&regSet, spillDsc);
@@ -412,32 +444,29 @@ void CodeGen::UnspillFloat(RegSet::SpillDsc *spillDsc)
#if FEATURE_STACK_FP_X87
-Compiler::fgWalkResult CodeGen::genRegVarDiesInSubTreeWorker(GenTreePtr * pTree, Compiler::fgWalkData *data)
+Compiler::fgWalkResult CodeGen::genRegVarDiesInSubTreeWorker(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
- GenTreePtr tree = *pTree;
- genRegVarDiesInSubTreeData* pData = (genRegVarDiesInSubTreeData*) data->pCallbackData;
+ GenTreePtr tree = *pTree;
+ genRegVarDiesInSubTreeData* pData = (genRegVarDiesInSubTreeData*)data->pCallbackData;
// if it's dying, just rename the register, else load it normally
- if (tree->IsRegVar() &&
- tree->IsRegVarDeath() &&
- tree->gtRegVar.gtRegNum == pData->reg)
+ if (tree->IsRegVar() && tree->IsRegVarDeath() && tree->gtRegVar.gtRegNum == pData->reg)
{
pData->result = true;
return Compiler::WALK_ABORT;
}
-
+
return Compiler::WALK_CONTINUE;
}
-
-bool CodeGen::genRegVarDiesInSubTree (GenTreePtr tree, regNumber reg)
+bool CodeGen::genRegVarDiesInSubTree(GenTreePtr tree, regNumber reg)
{
genRegVarDiesInSubTreeData Data;
- Data.reg = reg;
+ Data.reg = reg;
Data.result = false;
compiler->fgWalkTreePre(&tree, genRegVarDiesInSubTreeWorker, (void*)&Data);
-
+
return Data.result;
}
@@ -451,8 +480,7 @@ bool CodeGen::genRegVarDiesInSubTree (GenTreePtr tree, regNumb
* If type!=TYP_UNDEF, that is the desired presicion, else it is op->gtType
*/
-void CodeGen::genRoundFpExpression(GenTreePtr op,
- var_types type)
+void CodeGen::genRoundFpExpression(GenTreePtr op, var_types type)
{
#if FEATURE_STACK_FP_X87
return genRoundFpExpressionStackFP(op, type);
@@ -461,13 +489,10 @@ void CodeGen::genRoundFpExpression(GenTreePtr op,
#endif
}
-void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
- regMaskTP needReg,
- regMaskTP bestReg)
+void CodeGen::genCodeForTreeFloat(GenTreePtr tree, regMaskTP needReg, regMaskTP bestReg)
{
RegSet::RegisterPreference pref(needReg, bestReg);
genCodeForTreeFloat(tree, &pref);
}
#endif // LEGACY_BACKEND
-
diff --git a/src/jit/simd.cpp b/src/jit/simd.cpp
index cb8dc90f23..c418a6344f 100644
--- a/src/jit/simd.cpp
+++ b/src/jit/simd.cpp
@@ -13,7 +13,7 @@
// Nodes of SIMD types will be typed as TYP_SIMD* (e.g. TYP_SIMD8, TYP_SIMD16, etc.).
//
// Note that currently the "reference implementation" is the same as the runtime dll. As such, it is currently
-// providing implementations for those methods not currently supported by the JIT as intrinsics.
+// providing implementations for those methods not currently supported by the JIT as intrinsics.
//
// These are currently recognized using string compares, in order to provide an implementation in the JIT
// without taking a dependency on the VM.
@@ -23,8 +23,6 @@
// This has been addressed for RTM by doing the assembly recognition in the VM.
// --------------------------------------------------------------------------------------
-
-
#include "jitpch.h"
#include "simd.h"
@@ -36,13 +34,12 @@
#ifdef FEATURE_SIMD
-
// Intrinsic Id to intrinsic info map
-const SIMDIntrinsicInfo simdIntrinsicInfoArray[] =
-{
- #define SIMD_INTRINSIC(mname, inst, id, name, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) \
- {SIMDIntrinsic##id, mname, inst, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10},
- #include "simdintrinsiclist.h"
+const SIMDIntrinsicInfo simdIntrinsicInfoArray[] = {
+#define SIMD_INTRINSIC(mname, inst, id, name, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, \
+ t10) \
+ {SIMDIntrinsic##id, mname, inst, retType, argCount, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10},
+#include "simdintrinsiclist.h"
};
//------------------------------------------------------------------------
@@ -67,8 +64,8 @@ int Compiler::getSIMDVectorLength(unsigned simdSize, var_types baseType)
//
int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd)
{
- unsigned sizeBytes = 0;
- var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
+ unsigned sizeBytes = 0;
+ var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return getSIMDVectorLength(sizeBytes, baseType);
}
@@ -87,10 +84,12 @@ int Compiler::getSIMDTypeAlignment(var_types simdType)
unsigned size = genTypeSize(simdType);
// preferred alignment for SSE2 128-bit vectors is 16-bytes
- if (size == 8)
+ if (size == 8)
+ {
return 8;
+ }
- // As per Intel manual, AVX vectors preferred alignment is 32-bytes but on Amd64
+ // As per Intel manual, AVX vectors preferred alignment is 32-bytes but on Amd64
// RSP/EBP is aligned at 16-bytes, therefore to align SIMD types at 32-bytes we need even
// RSP/EBP to be 32-byte aligned. It is not clear whether additional stack space used in
// aligning stack is worth the benefit and for now will use 16-byte alignment for AVX
@@ -102,26 +101,24 @@ int Compiler::getSIMDTypeAlignment(var_types simdType)
#endif
}
-
//----------------------------------------------------------------------------------
// Return the base type and size of SIMD vector type given its type handle.
//
// Arguments:
// typeHnd - The handle of the type we're interested in.
-// sizeBytes - out param
+// sizeBytes - out param
//
// Return Value:
// base type of SIMD vector.
// sizeBytes if non-null is set to size in bytes.
//
-// TODO-Throughput: current implementation parses class name to find base type. Change
+// TODO-Throughput: current implementation parses class name to find base type. Change
// this when we implement SIMD intrinsic identification for the final
// product.
//
-var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
- unsigned *sizeBytes /*= nullptr */)
+var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */)
{
- assert(featureSIMD);
+ assert(featureSIMD);
if (typeHnd == nullptr)
{
return TYP_UNKNOWN;
@@ -129,7 +126,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
// fast path search using cached type handles of important types
var_types simdBaseType = TYP_UNKNOWN;
- unsigned size = 0;
+ unsigned size = 0;
// Early return if it is not a SIMD module.
if (!isSIMDClass(typeHnd))
@@ -152,21 +149,21 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
else if (typeHnd == SIMDVector2Handle)
{
simdBaseType = TYP_FLOAT;
- size = 2*genTypeSize(TYP_FLOAT);
+ size = 2 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Known type Vector2\n");
}
else if (typeHnd == SIMDVector3Handle)
{
simdBaseType = TYP_FLOAT;
- size = 3*genTypeSize(TYP_FLOAT);
+ size = 3 * genTypeSize(TYP_FLOAT);
assert(size == info.compCompHnd->getClassSize(typeHnd));
JITDUMP(" Known type Vector3\n");
}
else if (typeHnd == SIMDVector4Handle)
{
simdBaseType = TYP_FLOAT;
- size = 4*genTypeSize(TYP_FLOAT);
+ size = 4 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Known type Vector4\n");
}
@@ -222,10 +219,10 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
// Obtain base type by parsing fully qualified class name.
//
// TODO-Throughput: implement product shipping solution to query base type.
- WCHAR className[256] = {0};
- WCHAR *pbuf = &className[0];
- int len = sizeof(className)/sizeof(className[0]);
- info.compCompHnd->appendClassName(&pbuf, &len, typeHnd, TRUE, FALSE, FALSE);
+ WCHAR className[256] = {0};
+ WCHAR* pbuf = &className[0];
+ int len = sizeof(className) / sizeof(className[0]);
+ info.compCompHnd->appendClassName(&pbuf, &len, typeHnd, TRUE, FALSE, FALSE);
noway_assert(pbuf < &className[256]);
JITDUMP("SIMD Candidate Type %S\n", className);
@@ -236,61 +233,61 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
if (wcsncmp(&(className[25]), W("System.Single"), 13) == 0)
{
SIMDFloatHandle = typeHnd;
- simdBaseType = TYP_FLOAT;
+ simdBaseType = TYP_FLOAT;
JITDUMP(" Found type SIMD Vector<Float>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int32"), 12) == 0)
{
SIMDIntHandle = typeHnd;
- simdBaseType = TYP_INT;
+ simdBaseType = TYP_INT;
JITDUMP(" Found type SIMD Vector<Int>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt16"), 13) == 0)
{
SIMDUShortHandle = typeHnd;
- simdBaseType = TYP_CHAR;
+ simdBaseType = TYP_CHAR;
JITDUMP(" Found type SIMD Vector<ushort>\n");
}
else if (wcsncmp(&(className[25]), W("System.Byte"), 11) == 0)
{
SIMDUByteHandle = typeHnd;
- simdBaseType = TYP_UBYTE;
+ simdBaseType = TYP_UBYTE;
JITDUMP(" Found type SIMD Vector<ubyte>\n");
}
else if (wcsncmp(&(className[25]), W("System.Double"), 13) == 0)
{
SIMDDoubleHandle = typeHnd;
- simdBaseType = TYP_DOUBLE;
+ simdBaseType = TYP_DOUBLE;
JITDUMP(" Found type SIMD Vector<Double>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int64"), 12) == 0)
{
SIMDLongHandle = typeHnd;
- simdBaseType = TYP_LONG;
+ simdBaseType = TYP_LONG;
JITDUMP(" Found type SIMD Vector<Long>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int16"), 12) == 0)
{
SIMDShortHandle = typeHnd;
- simdBaseType = TYP_SHORT;
+ simdBaseType = TYP_SHORT;
JITDUMP(" Found type SIMD Vector<short>\n");
}
else if (wcsncmp(&(className[25]), W("System.SByte"), 12) == 0)
{
SIMDByteHandle = typeHnd;
- simdBaseType = TYP_BYTE;
+ simdBaseType = TYP_BYTE;
JITDUMP(" Found type SIMD Vector<byte>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt32"), 13) == 0)
{
SIMDUIntHandle = typeHnd;
- simdBaseType = TYP_UINT;
+ simdBaseType = TYP_UINT;
JITDUMP(" Found type SIMD Vector<uint>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt64"), 13) == 0)
{
SIMDULongHandle = typeHnd;
- simdBaseType = TYP_ULONG;
+ simdBaseType = TYP_ULONG;
JITDUMP(" Found type SIMD Vector<ulong>\n");
}
else
@@ -298,12 +295,12 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
JITDUMP(" Unknown SIMD Vector<T>\n");
}
}
- else if (wcsncmp(&(className[16]), W("Vector2"), 8) == 0)
+ else if (wcsncmp(&(className[16]), W("Vector2"), 8) == 0)
{
SIMDVector2Handle = typeHnd;
simdBaseType = TYP_FLOAT;
- size = 2*genTypeSize(TYP_FLOAT);
+ size = 2 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Found Vector2\n");
}
@@ -312,7 +309,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
SIMDVector3Handle = typeHnd;
simdBaseType = TYP_FLOAT;
- size = 3*genTypeSize(TYP_FLOAT);
+ size = 3 * genTypeSize(TYP_FLOAT);
assert(size == info.compCompHnd->getClassSize(typeHnd));
JITDUMP(" Found Vector3\n");
}
@@ -321,7 +318,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
SIMDVector4Handle = typeHnd;
simdBaseType = TYP_FLOAT;
- size = 4*genTypeSize(TYP_FLOAT);
+ size = 4 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Found Vector4\n");
}
@@ -337,8 +334,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
}
}
- if (simdBaseType != TYP_UNKNOWN &&
- sizeBytes != nullptr)
+ if (simdBaseType != TYP_UNKNOWN && sizeBytes != nullptr)
{
// If not a fixed size vector then its size is same as SIMD vector
// register length in bytes
@@ -367,7 +363,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
//
// Return Value:
// SIMDIntrinsicInfo struct initialized corresponding to methodHnd.
-// Sets SIMDIntrinsicInfo.id to SIMDIntrinsicInvalid if methodHnd doesn't correspond
+// Sets SIMDIntrinsicInfo.id to SIMDIntrinsicInvalid if methodHnd doesn't correspond
// to any SIMD intrinsic. Also, sets the out params inOutTypeHnd, argCount, baseType and
// sizeBytes.
//
@@ -379,9 +375,9 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd,
// Although we now have type identification from the VM, the parsing of intrinsic names
// could be made more efficient.
//
-const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* inOutTypeHnd,
+const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* inOutTypeHnd,
CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_SIG_INFO * sig,
+ CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
var_types* baseType,
@@ -390,10 +386,10 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
assert(featureSIMD);
assert(baseType != nullptr);
assert(sizeBytes != nullptr);
-
+
// get baseType and size of the type
CORINFO_CLASS_HANDLE typeHnd = *inOutTypeHnd;
- *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
+ *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
bool isHWAcceleratedIntrinsic = false;
if (typeHnd == SIMDVectorHandle)
@@ -402,7 +398,7 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
// which determines the baseType.
// The exception is the IsHardwareAccelerated property, which is handled as a special case.
assert(*baseType == TYP_UNKNOWN);
- if(sig->numArgs == 0)
+ if (sig->numArgs == 0)
{
const SIMDIntrinsicInfo* hwAccelIntrinsicInfo = &(simdIntrinsicInfoArray[SIMDIntrinsicHWAccel]);
if ((strcmp(eeGetMethodName(methodHnd, nullptr), hwAccelIntrinsicInfo->methodName) == 0) &&
@@ -416,9 +412,9 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
}
else
{
- typeHnd = info.compCompHnd->getArgClass(sig, sig->args);
+ typeHnd = info.compCompHnd->getArgClass(sig, sig->args);
*inOutTypeHnd = typeHnd;
- *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
+ *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
}
}
@@ -440,20 +436,22 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
// TODO-Throughput: replace sequential search by binary search by arranging entries
// sorted by method name.
SIMDIntrinsicID intrinsicId = SIMDIntrinsicInvalid;
- const char* methodName = eeGetMethodName(methodHnd, nullptr);
- for (int i = SIMDIntrinsicNone+1; i < SIMDIntrinsicInvalid; ++i)
+ const char* methodName = eeGetMethodName(methodHnd, nullptr);
+ for (int i = SIMDIntrinsicNone + 1; i < SIMDIntrinsicInvalid; ++i)
{
if (strcmp(methodName, simdIntrinsicInfoArray[i].methodName) == 0)
{
// Found an entry for the method; further check whether it is one of
// the supported base types.
bool found = false;
- for (int j=0; j < SIMD_INTRINSIC_MAX_BASETYPE_COUNT; ++j)
+ for (int j = 0; j < SIMD_INTRINSIC_MAX_BASETYPE_COUNT; ++j)
{
// Convention: if there are fewer base types supported than MAX_BASETYPE_COUNT,
- // the end of the list is marked by TYP_UNDEF.
+ // the end of the list is marked by TYP_UNDEF.
if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == TYP_UNDEF)
+ {
break;
+ }
if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == *baseType)
{
@@ -468,7 +466,7 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
}
// Now, check the arguments.
- unsigned int fixedArgCnt = simdIntrinsicInfoArray[i].argCount;
+ unsigned int fixedArgCnt = simdIntrinsicInfoArray[i].argCount;
unsigned int expectedArgCnt = fixedArgCnt;
// First handle SIMDIntrinsicInitN, where the arg count depends on the type.
@@ -516,9 +514,9 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
// Track the arguments from the signature - we currently only use this to distinguish
// integral and pointer types, both of which will by TYP_I_IMPL on the importer stack.
- CORINFO_ARG_LIST_HANDLE argLst = sig->args;
+ CORINFO_ARG_LIST_HANDLE argLst = sig->args;
- CORINFO_CLASS_HANDLE argClass;
+ CORINFO_CLASS_HANDLE argClass;
for (unsigned int argIndex = 0; found == true && argIndex < expectedArgCnt; argIndex++)
{
bool isThisPtr = ((argIndex == 0) && sig->hasThis());
@@ -540,7 +538,7 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
assert(expectedArgType != TYP_UNDEF);
if (expectedArgType == TYP_UNKNOWN)
{
- // The type of the argument will be genActualType(*baseType).
+ // The type of the argument will be genActualType(*baseType).
expectedArgType = genActualType(*baseType);
}
}
@@ -582,15 +580,16 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
}
// Cross check return type and static vs. instance is what we are expecting.
- // If not, don't consider it as an intrinsic.
+ // If not, don't consider it as an intrinsic.
// Note that ret type of TYP_UNKNOWN means that it is not known apriori and must be same as baseType
if (found)
{
var_types expectedRetType = simdIntrinsicInfoArray[i].retType;
if (expectedRetType == TYP_UNKNOWN)
{
- // JIT maps uint/ulong type vars to TYP_INT/TYP_LONG.
- expectedRetType = (*baseType == TYP_UINT || *baseType == TYP_ULONG) ? genActualType(*baseType) : *baseType;
+ // JIT maps uint/ulong type vars to TYP_INT/TYP_LONG.
+ expectedRetType =
+ (*baseType == TYP_UINT || *baseType == TYP_ULONG) ? genActualType(*baseType) : *baseType;
}
if (JITtype2varType(sig->retType) != expectedRetType ||
@@ -602,7 +601,7 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
if (found)
{
- intrinsicId = (SIMDIntrinsicID) i;
+ intrinsicId = (SIMDIntrinsicID)i;
break;
}
}
@@ -634,9 +633,9 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* i
GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
{
- StackEntry se = impPopStack();
- typeInfo ti = se.seTypeInfo;
- GenTreePtr tree = se.val;
+ StackEntry se = impPopStack();
+ typeInfo ti = se.seTypeInfo;
+ GenTreePtr tree = se.val;
// If expectAddr is true implies what we have on stack is address and we need
// SIMD type struct that it points to.
@@ -652,7 +651,7 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
tree = gtNewOperNode(GT_IND, type, tree);
}
}
-
+
bool isParam = false;
// If we have a ldobj of a SIMD local we need to transform it.
@@ -667,9 +666,9 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
if (tree->OperGet() == GT_LCL_VAR)
{
- unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
- isParam = lclVarDsc->lvIsParam;
+ unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ isParam = lclVarDsc->lvIsParam;
}
// normalize TYP_STRUCT value
@@ -677,7 +676,7 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
{
assert(ti.IsType(TI_STRUCT));
CORINFO_CLASS_HANDLE structType = ti.GetClassHandleForValueClass();
- tree = impNormStructVal(tree, structType, (unsigned)CHECK_SPILL_ALL);
+ tree = impNormStructVal(tree, structType, (unsigned)CHECK_SPILL_ALL);
}
// Now set the type of the tree to the specialized SIMD struct type, if applicable.
@@ -688,8 +687,7 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
}
else if (tree->gtType == TYP_BYREF)
{
- assert(tree->IsLocal() ||
- (tree->gtOper == GT_ADDR) && varTypeIsSIMD(tree->gtGetOp1()));
+ assert(tree->IsLocal() || (tree->gtOper == GT_ADDR) && varTypeIsSIMD(tree->gtGetOp1()));
}
return tree;
@@ -705,23 +703,20 @@ GenTreePtr Compiler::impSIMDPopStack(var_types type, bool expectAddr)
// Return Value:
// Returns a GT_SIMD node with the SIMDIntrinsicGetItem intrinsic id.
//
-GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType,
- var_types baseType,
- unsigned simdSize,
- int index)
+GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType, var_types baseType, unsigned simdSize, int index)
{
assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
// op1 is a SIMD source.
GenTree* op1 = impSIMDPopStack(simdType, true);
- GenTree* op2 = gtNewIconNode(index);
+ GenTree* op2 = gtNewIconNode(index);
GenTreeSIMD* simdTree = gtNewSIMDNode(baseType, op1, op2, SIMDIntrinsicGetItem, baseType, simdSize);
return simdTree;
}
#ifdef _TARGET_AMD64_
-// impSIMDLongRelOpEqual: transforms operands and returns the SIMD intrinsic to be applied on
+// impSIMDLongRelOpEqual: transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
//
// Argumens:
@@ -734,16 +729,16 @@ GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType,
// Modifies in-out params op1, op2 and returns intrinsic ID to be applied to modified operands
//
SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned size,
- GenTree** pOp1,
- GenTree** pOp2)
+ unsigned size,
+ GenTree** pOp1,
+ GenTree** pOp2)
{
var_types simdType = (*pOp1)->TypeGet();
assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType));
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
- //
+ //
// Equality(v1, v2):
// tmp = (v1 == v2) i.e. compare for equality as if v1 and v2 are vector<int>
// result = BitwiseAnd(t, shuffle(t, (2, 3, 1 0)))
@@ -751,7 +746,7 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// Compare vector<long> as if they were vector<int> and assign the result to a temp
GenTree* compResult = gtNewSIMDNode(simdType, *pOp1, *pOp2, SIMDIntrinsicEqual, TYP_INT, size);
- unsigned lclNum = lvaGrabTemp(true DEBUGARG("SIMD Long =="));
+ unsigned lclNum = lvaGrabTemp(true DEBUGARG("SIMD Long =="));
lvaSetStruct(lclNum, typeHnd, false);
GenTree* tmp = gtNewLclvNode(lclNum, simdType);
GenTree* asg = gtNewTempAssign(lclNum, compResult);
@@ -760,11 +755,12 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// op2 = Shuffle(tmp, 0xB1)
// IntrinsicId = BitwiseAnd
*pOp1 = gtNewOperNode(GT_COMMA, simdType, asg, tmp);
- *pOp2 = gtNewSIMDNode(simdType, gtNewLclvNode(lclNum, simdType), gtNewIconNode(SHUFFLE_ZWYX, TYP_INT), SIMDIntrinsicShuffleSSE2, TYP_INT, size);
+ *pOp2 = gtNewSIMDNode(simdType, gtNewLclvNode(lclNum, simdType), gtNewIconNode(SHUFFLE_ZWYX, TYP_INT),
+ SIMDIntrinsicShuffleSSE2, TYP_INT, size);
return SIMDIntrinsicBitwiseAnd;
}
-// impSIMDLongRelOpGreaterThan: transforms operands and returns the SIMD intrinsic to be applied on
+// impSIMDLongRelOpGreaterThan: transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain > comparison result.
//
// Argumens:
@@ -777,18 +773,20 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// Modifies in-out params pOp1, pOp2 and returns intrinsic ID to be applied to modified operands
//
SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeHnd,
- unsigned size,
- GenTree** pOp1,
- GenTree** pOp2)
+ unsigned size,
+ GenTree** pOp1,
+ GenTree** pOp2)
{
var_types simdType = (*pOp1)->TypeGet();
assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType));
// GreaterThan(v1, v2) where v1 and v2 are vector long.
// Let us consider the case of single long element comparison.
- // say L1 = (x1, y1) and L2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise the longs L1 and L2.
+ // say L1 = (x1, y1) and L2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise the longs L1 and
+ // L2.
//
- // GreaterThan(L1, L2) can be expressed in terms of > relationship between 32-bit integers that comprise L1 and L2 as
+ // GreaterThan(L1, L2) can be expressed in terms of > relationship between 32-bit integers that comprise L1 and L2
+ // as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
@@ -803,30 +801,30 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeH
// Result = BitwiseOr(z, w)
// Since op1 and op2 gets used multiple times, make sure side effects are computed.
- GenTree* dupOp1 = nullptr;
- GenTree* dupOp2 = nullptr;
+ GenTree* dupOp1 = nullptr;
+ GenTree* dupOp2 = nullptr;
GenTree* dupDupOp1 = nullptr;
GenTree* dupDupOp2 = nullptr;
if (((*pOp1)->gtFlags & GTF_SIDE_EFFECT) != 0)
{
- dupOp1 = fgInsertCommaFormTemp(pOp1, typeHnd);
+ dupOp1 = fgInsertCommaFormTemp(pOp1, typeHnd);
dupDupOp1 = gtNewLclvNode(dupOp1->AsLclVarCommon()->GetLclNum(), simdType);
}
else
{
- dupOp1 = gtCloneExpr(*pOp1);
+ dupOp1 = gtCloneExpr(*pOp1);
dupDupOp1 = gtCloneExpr(*pOp1);
}
if (((*pOp2)->gtFlags & GTF_SIDE_EFFECT) != 0)
{
- dupOp2 = fgInsertCommaFormTemp(pOp2, typeHnd);
+ dupOp2 = fgInsertCommaFormTemp(pOp2, typeHnd);
dupDupOp2 = gtNewLclvNode(dupOp2->AsLclVarCommon()->GetLclNum(), simdType);
}
else
{
- dupOp2 = gtCloneExpr(*pOp2);
+ dupOp2 = gtCloneExpr(*pOp2);
dupDupOp2 = gtCloneExpr(*pOp2);
}
@@ -841,13 +839,16 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeH
GenTree* v1Equalsv2 = gtNewSIMDNode(simdType, dupOp1, dupOp2, SIMDIntrinsicEqual, TYP_INT, size);
// v1GreaterThanv2Unsigned - unsigned 32-bit comparison
- var_types tempBaseType = TYP_UINT;
+ var_types tempBaseType = TYP_UINT;
SIMDIntrinsicID sid = impSIMDRelOp(SIMDIntrinsicGreaterThan, typeHnd, size, &tempBaseType, &dupDupOp1, &dupDupOp2);
- GenTree* v1GreaterThanv2Unsigned = gtNewSIMDNode(simdType, dupDupOp1, dupDupOp2, sid, tempBaseType, size);
-
- GenTree* z = gtNewSIMDNode(simdType, v1GreaterThanv2Signed, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), SIMDIntrinsicShuffleSSE2, TYP_FLOAT, size);
- GenTree* t1 = gtNewSIMDNode(simdType, v1GreaterThanv2Unsigned, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), SIMDIntrinsicShuffleSSE2, TYP_FLOAT, size);
- GenTree* u1 = gtNewSIMDNode(simdType, v1Equalsv2, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), SIMDIntrinsicShuffleSSE2, TYP_FLOAT, size);
+ GenTree* v1GreaterThanv2Unsigned = gtNewSIMDNode(simdType, dupDupOp1, dupDupOp2, sid, tempBaseType, size);
+
+ GenTree* z = gtNewSIMDNode(simdType, v1GreaterThanv2Signed, gtNewIconNode(SHUFFLE_WWYY, TYP_INT),
+ SIMDIntrinsicShuffleSSE2, TYP_FLOAT, size);
+ GenTree* t1 = gtNewSIMDNode(simdType, v1GreaterThanv2Unsigned, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT),
+ SIMDIntrinsicShuffleSSE2, TYP_FLOAT, size);
+ GenTree* u1 = gtNewSIMDNode(simdType, v1Equalsv2, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), SIMDIntrinsicShuffleSSE2,
+ TYP_FLOAT, size);
GenTree* w = gtNewSIMDNode(simdType, u1, t1, SIMDIntrinsicBitwiseAnd, TYP_INT, size);
*pOp1 = z;
@@ -855,7 +856,7 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeH
return SIMDIntrinsicBitwiseOr;
}
-// impSIMDLongRelOpGreaterThanOrEqual: transforms operands and returns the SIMD intrinsic to be applied on
+// impSIMDLongRelOpGreaterThanOrEqual: transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain >= comparison result.
//
// Argumens:
@@ -868,9 +869,9 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThan(CORINFO_CLASS_HANDLE typeH
// Modifies in-out params pOp1, pOp2 and returns intrinsic ID to be applied to modified operands
//
SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned size,
- GenTree** pOp1,
- GenTree** pOp2)
+ unsigned size,
+ GenTree** pOp1,
+ GenTree** pOp2)
{
var_types simdType = (*pOp1)->TypeGet();
assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType));
@@ -902,16 +903,16 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDL
// (a==b)
SIMDIntrinsicID id = impSIMDLongRelOpEqual(typeHnd, size, pOp1, pOp2);
- *pOp1 = gtNewSIMDNode(simdType, *pOp1, *pOp2, id, TYP_LONG, size);
+ *pOp1 = gtNewSIMDNode(simdType, *pOp1, *pOp2, id, TYP_LONG, size);
// (a > b)
- id = impSIMDLongRelOpGreaterThan(typeHnd, size, &dupOp1, &dupOp2);
+ id = impSIMDLongRelOpGreaterThan(typeHnd, size, &dupOp1, &dupOp2);
*pOp2 = gtNewSIMDNode(simdType, dupOp1, dupOp2, id, TYP_LONG, size);
return SIMDIntrinsicBitwiseOr;
}
-// impSIMDInt32OrSmallIntRelOpGreaterThanOrEqual: transforms operands and returns the SIMD intrinsic to be applied on
+// impSIMDInt32OrSmallIntRelOpGreaterThanOrEqual: transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain >= comparison result in case of integer base type vectors
//
// Argumens:
@@ -924,19 +925,15 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDL
// Return Value:
// Modifies in-out params pOp1, pOp2 and returns intrinsic ID to be applied to modified operands
//
-SIMDIntrinsicID Compiler::impSIMDIntegralRelOpGreaterThanOrEqual(CORINFO_CLASS_HANDLE typeHnd,
- unsigned size,
- var_types baseType,
- GenTree** pOp1,
- GenTree** pOp2)
+SIMDIntrinsicID Compiler::impSIMDIntegralRelOpGreaterThanOrEqual(
+ CORINFO_CLASS_HANDLE typeHnd, unsigned size, var_types baseType, GenTree** pOp1, GenTree** pOp2)
{
var_types simdType = (*pOp1)->TypeGet();
assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType));
// This routine should be used only for integer base type vectors
assert(varTypeIsIntegral(baseType));
- if ((getSIMDInstructionSet() == InstructionSet_SSE2) &&
- ((baseType == TYP_LONG) || baseType == TYP_UBYTE))
+ if ((getSIMDInstructionSet() == InstructionSet_SSE2) && ((baseType == TYP_LONG) || baseType == TYP_UBYTE))
{
return impSIMDLongRelOpGreaterThanOrEqual(typeHnd, size, pOp1, pOp2);
}
@@ -976,7 +973,7 @@ SIMDIntrinsicID Compiler::impSIMDIntegralRelOpGreaterThanOrEqual(CORINFO_CLASS_H
}
#endif //_TARGET_AMD64_
-// Transforms operands and returns the SIMD intrinsic to be applied on
+// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
//
// Argumens:
@@ -990,12 +987,12 @@ SIMDIntrinsicID Compiler::impSIMDIntegralRelOpGreaterThanOrEqual(CORINFO_CLASS_H
// Return Value:
// Modifies in-out params pOp1, pOp2, inOutBaseType and returns intrinsic ID to be applied to modified operands
//
-SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
+SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
- unsigned size,
- var_types* inOutBaseType,
- GenTree** pOp1,
- GenTree** pOp2)
+ unsigned size,
+ var_types* inOutBaseType,
+ GenTree** pOp1,
+ GenTree** pOp2)
{
var_types simdType = (*pOp1)->TypeGet();
assert(varTypeIsSIMD(simdType) && ((*pOp2)->TypeGet() == simdType));
@@ -1004,36 +1001,34 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
#ifdef _TARGET_AMD64_
SIMDIntrinsicID intrinsicID = relOpIntrinsicId;
- var_types baseType = *inOutBaseType;
+ var_types baseType = *inOutBaseType;
if (varTypeIsFloating(baseType))
{
- // SSE2/AVX doesn't support > and >= on vector float/double.
+ // SSE2/AVX doesn't support > and >= on vector float/double.
// Therefore, we need to use < and <= with swapped operands
- if (relOpIntrinsicId == SIMDIntrinsicGreaterThan ||
- relOpIntrinsicId == SIMDIntrinsicGreaterThanOrEqual)
+ if (relOpIntrinsicId == SIMDIntrinsicGreaterThan || relOpIntrinsicId == SIMDIntrinsicGreaterThanOrEqual)
{
GenTree* tmp = *pOp1;
- *pOp1 = *pOp2;
- *pOp2 = tmp;
+ *pOp1 = *pOp2;
+ *pOp2 = tmp;
- intrinsicID = (relOpIntrinsicId == SIMDIntrinsicGreaterThan) ? SIMDIntrinsicLessThan :
- SIMDIntrinsicLessThanOrEqual;
+ intrinsicID =
+ (relOpIntrinsicId == SIMDIntrinsicGreaterThan) ? SIMDIntrinsicLessThan : SIMDIntrinsicLessThanOrEqual;
}
}
else if (varTypeIsIntegral(baseType))
{
// SSE/AVX doesn't support < and <= on integer base type vectors.
- // Therefore, we need to use > and >= with swapped operands.
- if (intrinsicID == SIMDIntrinsicLessThan ||
- intrinsicID == SIMDIntrinsicLessThanOrEqual)
+ // Therefore, we need to use > and >= with swapped operands.
+ if (intrinsicID == SIMDIntrinsicLessThan || intrinsicID == SIMDIntrinsicLessThanOrEqual)
{
GenTree* tmp = *pOp1;
- *pOp1 = *pOp2;
- *pOp2 = tmp;
+ *pOp1 = *pOp2;
+ *pOp2 = tmp;
- intrinsicID = (relOpIntrinsicId == SIMDIntrinsicLessThan) ? SIMDIntrinsicGreaterThan :
- SIMDIntrinsicGreaterThanOrEqual;
+ intrinsicID = (relOpIntrinsicId == SIMDIntrinsicLessThan) ? SIMDIntrinsicGreaterThan
+ : SIMDIntrinsicGreaterThanOrEqual;
}
if ((getSIMDInstructionSet() == InstructionSet_SSE2) && baseType == TYP_LONG)
@@ -1041,7 +1036,7 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented interms of TYP_INT vector comparison operations.
if (intrinsicID == SIMDIntrinsicEqual)
- {
+ {
intrinsicID = impSIMDLongRelOpEqual(typeHnd, size, pOp1, pOp2);
}
else if (intrinsicID == SIMDIntrinsicGreaterThan)
@@ -1069,7 +1064,7 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
{
// Vector<byte>, Vector<ushort>, Vector<uint> and Vector<ulong>:
// SSE2 supports > for signed comparison. Therefore, to use it for
- // comparing unsigned numbers, we subtract a constant from both the
+ // comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using SSE2 signed
// comparison.
@@ -1082,37 +1077,37 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
ssize_t constVal = 0;
- switch(baseType)
+ switch (baseType)
{
- case TYP_UBYTE:
- constVal = 0x80808080;
- *inOutBaseType = TYP_BYTE;
- break;
- case TYP_CHAR:
- constVal = 0x80008000;
- *inOutBaseType = TYP_SHORT;
- break;
- case TYP_UINT:
- constVal = 0x80000000;
- *inOutBaseType = TYP_INT;
- break;
- case TYP_ULONG:
- constVal = 0x8000000000000000LL;
- *inOutBaseType = TYP_LONG;
- break;
- default:
- unreached();
- break;
+ case TYP_UBYTE:
+ constVal = 0x80808080;
+ *inOutBaseType = TYP_BYTE;
+ break;
+ case TYP_CHAR:
+ constVal = 0x80008000;
+ *inOutBaseType = TYP_SHORT;
+ break;
+ case TYP_UINT:
+ constVal = 0x80000000;
+ *inOutBaseType = TYP_INT;
+ break;
+ case TYP_ULONG:
+ constVal = 0x8000000000000000LL;
+ *inOutBaseType = TYP_LONG;
+ break;
+ default:
+ unreached();
+ break;
}
assert(constVal != 0);
// This transformation is not required for equality.
if (intrinsicID != SIMDIntrinsicEqual)
{
- // For constructing const vector use either long or int base type.
+ // For constructing const vector use either long or int base type.
var_types tempBaseType = (baseType == TYP_ULONG) ? TYP_LONG : TYP_INT;
- GenTree* initVal = gtNewIconNode(constVal);
- initVal->gtType = tempBaseType;
+ GenTree* initVal = gtNewIconNode(constVal);
+ initVal->gtType = tempBaseType;
GenTree* constVector = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, tempBaseType, size);
// Assign constVector to a temp, since we intend to use it more than once
@@ -1151,19 +1146,14 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
// Return Value:
// Returns GT_SIMD tree that computes Select(vc, va, vb)
//
-GenTreePtr Compiler::impSIMDSelect(CORINFO_CLASS_HANDLE typeHnd,
- var_types baseType,
- unsigned size,
- GenTree* op1,
- GenTree* op2,
- GenTree* op3)
+GenTreePtr Compiler::impSIMDSelect(
+ CORINFO_CLASS_HANDLE typeHnd, var_types baseType, unsigned size, GenTree* op1, GenTree* op2, GenTree* op3)
{
assert(varTypeIsSIMD(op1));
var_types simdType = op1->TypeGet();
assert(op2->TypeGet() == simdType);
assert(op3->TypeGet() == simdType);
-
// Select(BitVector vc, va, vb) = (va & vc) | (vb & !vc)
// Select(op1, op2, op3) = (op2 & op1) | (op3 & !op1)
// = SIMDIntrinsicBitwiseOr(SIMDIntrinsicBitwiseAnd(op2, op1),
@@ -1181,15 +1171,15 @@ GenTreePtr Compiler::impSIMDSelect(CORINFO_CLASS_HANDLE typeHnd,
}
GenTree* andExpr = gtNewSIMDNode(simdType, op2, tmp, SIMDIntrinsicBitwiseAnd, baseType, size);
- GenTree* dupOp1 = gtCloneExpr(tmp);
+ GenTree* dupOp1 = gtCloneExpr(tmp);
assert(dupOp1 != nullptr);
GenTree* andNotExpr = gtNewSIMDNode(simdType, dupOp1, op3, SIMDIntrinsicBitwiseAndNot, baseType, size);
- GenTree* simdTree = gtNewSIMDNode(simdType, andExpr, andNotExpr, SIMDIntrinsicBitwiseOr, baseType, size);
+ GenTree* simdTree = gtNewSIMDNode(simdType, andExpr, andNotExpr, SIMDIntrinsicBitwiseOr, baseType, size);
// If asg not null, create a GT_COMMA tree.
if (asg != nullptr)
{
- simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), asg, simdTree);
+ simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), asg, simdTree);
}
return simdTree;
@@ -1208,24 +1198,24 @@ GenTreePtr Compiler::impSIMDSelect(CORINFO_CLASS_HANDLE typeHnd,
// Return Value:
// Returns GT_SIMD tree that computes Max(va, vb)
//
-GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
- CORINFO_CLASS_HANDLE typeHnd,
- var_types baseType,
- unsigned size,
- GenTree* op1,
- GenTree* op2)
+GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
+ CORINFO_CLASS_HANDLE typeHnd,
+ var_types baseType,
+ unsigned size,
+ GenTree* op1,
+ GenTree* op2)
{
assert(intrinsicId == SIMDIntrinsicMin || intrinsicId == SIMDIntrinsicMax);
assert(varTypeIsSIMD(op1));
var_types simdType = op1->TypeGet();
assert(op2->TypeGet() == simdType);
-
+
#ifdef _TARGET_AMD64_
// SSE2 has direct support for float/double/signed word/unsigned byte.
// For other integer types we compute min/max as follows
//
// int32/uint32/int64/uint64:
- // compResult = (op1 < op2) in case of Min
+ // compResult = (op1 < op2) in case of Min
// (op1 > op2) in case of Max
// Min/Max(op1, op2) = Select(compResult, op1, op2)
//
@@ -1236,11 +1226,11 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
// result = result + 2^15 ; readjust it back
//
// signed byte:
- // op1 = op1 + 2^7 ; to make it unsigned
+ // op1 = op1 + 2^7 ; to make it unsigned
// op1 = op1 + 2^7 ; to make it unsigned
// result = SSE2 unsigned byte Min/Max(op1, op2)
// result = result - 2^15 ; readjust it back
-
+
GenTree* simdTree = nullptr;
if (varTypeIsFloating(baseType) || baseType == TYP_SHORT || baseType == TYP_UBYTE)
@@ -1250,27 +1240,27 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
}
else if (baseType == TYP_CHAR || baseType == TYP_BYTE)
{
- int constVal;
+ int constVal;
SIMDIntrinsicID operIntrinsic;
SIMDIntrinsicID adjustIntrinsic;
- var_types minMaxOperBaseType;
+ var_types minMaxOperBaseType;
if (baseType == TYP_CHAR)
{
- constVal = 0x80008000;
- operIntrinsic = SIMDIntrinsicSub;
- adjustIntrinsic = SIMDIntrinsicAdd;
+ constVal = 0x80008000;
+ operIntrinsic = SIMDIntrinsicSub;
+ adjustIntrinsic = SIMDIntrinsicAdd;
minMaxOperBaseType = TYP_SHORT;
}
else
{
assert(baseType == TYP_BYTE);
- constVal = 0x80808080;
- operIntrinsic = SIMDIntrinsicAdd;
- adjustIntrinsic = SIMDIntrinsicSub;
+ constVal = 0x80808080;
+ operIntrinsic = SIMDIntrinsicAdd;
+ adjustIntrinsic = SIMDIntrinsicSub;
minMaxOperBaseType = TYP_UBYTE;
}
- GenTree* initVal = gtNewIconNode(constVal);
+ GenTree* initVal = gtNewIconNode(constVal);
GenTree* constVector = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, TYP_INT, size);
// Assign constVector to a temp, since we intend to use it more than once
@@ -1288,13 +1278,13 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
simdTree = gtNewSIMDNode(simdType, op1, op2, intrinsicId, minMaxOperBaseType, size);
// re-adjust the value by adding or subtracting constVector
- tmp = gtNewLclvNode(tmp->AsLclVarCommon()->GetLclNum(), tmp->TypeGet());
+ tmp = gtNewLclvNode(tmp->AsLclVarCommon()->GetLclNum(), tmp->TypeGet());
simdTree = gtNewSIMDNode(simdType, simdTree, tmp, adjustIntrinsic, baseType, size);
}
else
{
- GenTree* dupOp1 = nullptr;
- GenTree* dupOp2 = nullptr;
+ GenTree* dupOp1 = nullptr;
+ GenTree* dupOp2 = nullptr;
GenTree* op1Assign = nullptr;
GenTree* op2Assign = nullptr;
unsigned op1LclNum;
@@ -1303,10 +1293,10 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op1LclNum = lvaGrabTemp(true DEBUGARG("SIMD Min/Max"));
- dupOp1 = gtNewLclvNode(op1LclNum, op1->TypeGet());
+ dupOp1 = gtNewLclvNode(op1LclNum, op1->TypeGet());
lvaSetStruct(op1LclNum, typeHnd, false);
op1Assign = gtNewTempAssign(op1LclNum, op1);
- op1 = gtNewLclvNode(op1LclNum, op1->TypeGet());
+ op1 = gtNewLclvNode(op1LclNum, op1->TypeGet());
}
else
{
@@ -1316,41 +1306,42 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
if ((op2->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op2LclNum = lvaGrabTemp(true DEBUGARG("SIMD Min/Max"));
- dupOp2 = gtNewLclvNode(op2LclNum, op2->TypeGet());
+ dupOp2 = gtNewLclvNode(op2LclNum, op2->TypeGet());
lvaSetStruct(op2LclNum, typeHnd, false);
op2Assign = gtNewTempAssign(op2LclNum, op2);
- op2 = gtNewLclvNode(op2LclNum, op2->TypeGet());
+ op2 = gtNewLclvNode(op2LclNum, op2->TypeGet());
}
else
{
dupOp2 = gtCloneExpr(op2);
}
- SIMDIntrinsicID relOpIntrinsic = (intrinsicId == SIMDIntrinsicMin) ? SIMDIntrinsicLessThan : SIMDIntrinsicGreaterThan;
+ SIMDIntrinsicID relOpIntrinsic =
+ (intrinsicId == SIMDIntrinsicMin) ? SIMDIntrinsicLessThan : SIMDIntrinsicGreaterThan;
var_types relOpBaseType = baseType;
// compResult = op1 relOp op2
// simdTree = Select(compResult, op1, op2);
assert(dupOp1 != nullptr);
assert(dupOp2 != nullptr);
- relOpIntrinsic = impSIMDRelOp(relOpIntrinsic, typeHnd, size, &relOpBaseType, &dupOp1, &dupOp2);
- GenTree* compResult = gtNewSIMDNode(simdType, dupOp1, dupOp2, relOpIntrinsic, relOpBaseType, size);
+ relOpIntrinsic = impSIMDRelOp(relOpIntrinsic, typeHnd, size, &relOpBaseType, &dupOp1, &dupOp2);
+ GenTree* compResult = gtNewSIMDNode(simdType, dupOp1, dupOp2, relOpIntrinsic, relOpBaseType, size);
unsigned compResultLclNum = lvaGrabTemp(true DEBUGARG("SIMD Min/Max"));
lvaSetStruct(compResultLclNum, typeHnd, false);
GenTree* compResultAssign = gtNewTempAssign(compResultLclNum, compResult);
- compResult = gtNewLclvNode(compResultLclNum, compResult->TypeGet());
- simdTree = impSIMDSelect(typeHnd, baseType, size, compResult, op1, op2);
- simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), compResultAssign, simdTree);
-
+ compResult = gtNewLclvNode(compResultLclNum, compResult->TypeGet());
+ simdTree = impSIMDSelect(typeHnd, baseType, size, compResult, op1, op2);
+ simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), compResultAssign, simdTree);
+
// Now create comma trees if we have created assignments of op1/op2 to temps
if (op2Assign != nullptr)
{
- simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), op2Assign, simdTree);
+ simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), op2Assign, simdTree);
}
if (op1Assign != nullptr)
{
- simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), op1Assign, simdTree);
+ simdTree = gtNewOperNode(GT_COMMA, simdTree->TypeGet(), op1Assign, simdTree);
}
}
@@ -1376,16 +1367,13 @@ GenTreePtr Compiler::impSIMDMinMax(SIMDIntrinsicID intrinsicId,
// Notes:
// This method handles the differences between the CEE_NEWOBJ and constructor cases.
//
-GenTreePtr Compiler::getOp1ForConstructor(OPCODE opcode,
- GenTreePtr newobjThis,
- CORINFO_CLASS_HANDLE clsHnd)
+GenTreePtr Compiler::getOp1ForConstructor(OPCODE opcode, GenTreePtr newobjThis, CORINFO_CLASS_HANDLE clsHnd)
{
GenTree* op1;
if (opcode == CEE_NEWOBJ)
{
op1 = newobjThis;
- assert(newobjThis->gtOper == GT_ADDR &&
- newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
+ assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
// push newobj result on type stack
unsigned tmp = op1->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
@@ -1400,25 +1388,25 @@ GenTreePtr Compiler::getOp1ForConstructor(OPCODE opcode,
}
//-------------------------------------------------------------------
-// Set the flag that indicates that the lclVar referenced by this tree
+// Set the flag that indicates that the lclVar referenced by this tree
// is used in a SIMD intrinsic.
// Arguments:
// tree - GenTreePtr
void Compiler::setLclRelatedToSIMDIntrinsic(GenTreePtr tree)
{
- assert(tree->OperIsLocal());
- unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
- LclVarDsc* lclVarDsc = &lvaTable[lclNum];
- lclVarDsc->lvUsedInSIMDIntrinsic = true;
+ assert(tree->OperIsLocal());
+ unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* lclVarDsc = &lvaTable[lclNum];
+ lclVarDsc->lvUsedInSIMDIntrinsic = true;
}
//-------------------------------------------------------------
// Check if two field nodes reference at the same memory location.
-// Notice that this check is just based on pattern matching.
+// Notice that this check is just based on pattern matching.
// Arguments:
-// op1 - GenTreePtr.
-// op2 - GenTreePtr.
+// op1 - GenTreePtr.
+// op2 - GenTreePtr.
// Return Value:
// If op1's parents node and op2's parents node are at the same location, return true. Otherwise, return false
@@ -1426,13 +1414,13 @@ bool areFieldsParentsLocatedSame(GenTreePtr op1, GenTreePtr op2)
{
assert(op1->OperGet() == GT_FIELD);
assert(op2->OperGet() == GT_FIELD);
-
+
GenTreePtr op1ObjRef = op1->gtField.gtFldObj;
GenTreePtr op2ObjRef = op2->gtField.gtFldObj;
while (op1ObjRef != nullptr && op2ObjRef != nullptr)
{
- if(op1ObjRef->OperGet() != op2ObjRef->OperGet())
+ if (op1ObjRef->OperGet() != op2ObjRef->OperGet())
{
break;
}
@@ -1442,14 +1430,12 @@ bool areFieldsParentsLocatedSame(GenTreePtr op1, GenTreePtr op2)
op2ObjRef = op2ObjRef->gtOp.gtOp1;
}
- if (op1ObjRef->OperIsLocal() &&
- op2ObjRef->OperIsLocal() &&
+ if (op1ObjRef->OperIsLocal() && op2ObjRef->OperIsLocal() &&
op1ObjRef->AsLclVarCommon()->GetLclNum() == op2ObjRef->AsLclVarCommon()->GetLclNum())
{
return true;
}
- else if (op1ObjRef->OperGet() == GT_FIELD &&
- op2ObjRef->OperGet() == GT_FIELD &&
+ else if (op1ObjRef->OperGet() == GT_FIELD && op2ObjRef->OperGet() == GT_FIELD &&
op1ObjRef->gtField.gtFldHnd == op2ObjRef->gtField.gtFldHnd)
{
op1ObjRef = op1ObjRef->gtField.gtFldObj;
@@ -1457,8 +1443,8 @@ bool areFieldsParentsLocatedSame(GenTreePtr op1, GenTreePtr op2)
continue;
}
else
- {
- break;
+ {
+ break;
}
}
@@ -1466,7 +1452,7 @@ bool areFieldsParentsLocatedSame(GenTreePtr op1, GenTreePtr op2)
}
//----------------------------------------------------------------------
-// Check whether two field are contiguous
+// Check whether two field are contiguous
// Arguments:
// first - GenTreePtr. The Type of the node should be TYP_FLOAT
// second - GenTreePtr. The Type of the node should be TYP_FLOAT
@@ -1477,17 +1463,16 @@ bool areFieldsParentsLocatedSame(GenTreePtr op1, GenTreePtr op2)
bool Compiler::areFieldsContiguous(GenTreePtr first, GenTreePtr second)
{
assert(first->OperGet() == GT_FIELD);
- assert(second->OperGet() == GT_FIELD);
+ assert(second->OperGet() == GT_FIELD);
assert(first->gtType == TYP_FLOAT);
assert(second->gtType == TYP_FLOAT);
-
- var_types firstFieldType = first->gtType;
+
+ var_types firstFieldType = first->gtType;
var_types secondFieldType = second->gtType;
-
+
unsigned firstFieldEndOffset = first->gtField.gtFldOffset + genTypeSize(firstFieldType);
- unsigned secondFieldOffset = second->gtField.gtFldOffset;
- if (firstFieldEndOffset == secondFieldOffset &&
- firstFieldType == secondFieldType &&
+ unsigned secondFieldOffset = second->gtField.gtFldOffset;
+ if (firstFieldEndOffset == secondFieldOffset && firstFieldType == secondFieldType &&
areFieldsParentsLocatedSame(first, second))
{
return true;
@@ -1504,8 +1489,8 @@ bool Compiler::areFieldsContiguous(GenTreePtr first, GenTreePtr second)
// Return Value:
// if the array element op1 is located before array element op2, and they are contiguous,
// then return true. Otherwise, return false.
-// TODO-CQ:
-// Right this can only check array element with const number as index. In future,
+// TODO-CQ:
+// Right this can only check array element with const number as index. In future,
// we should consider to allow this function to check the index using expression.
bool Compiler::areArrayElementsContiguous(GenTreePtr op1, GenTreePtr op2)
@@ -1525,18 +1510,16 @@ bool Compiler::areArrayElementsContiguous(GenTreePtr op1, GenTreePtr op2)
if ((op1IndexNode->OperGet() == GT_CNS_INT && op2IndexNode->OperGet() == GT_CNS_INT) &&
op1IndexNode->gtIntCon.gtIconVal + 1 == op2IndexNode->gtIntCon.gtIconVal)
{
- if (op1ArrayRef->OperGet() == GT_FIELD &&
- op2ArrayRef->OperGet() == GT_FIELD &&
+ if (op1ArrayRef->OperGet() == GT_FIELD && op2ArrayRef->OperGet() == GT_FIELD &&
areFieldsParentsLocatedSame(op1ArrayRef, op2ArrayRef))
{
return true;
}
else if (op1ArrayRef->OperIsLocal() && op2ArrayRef->OperIsLocal() &&
- op1ArrayRef->AsLclVarCommon()->GetLclNum() == op2ArrayRef->AsLclVarCommon()->GetLclNum())
+ op1ArrayRef->AsLclVarCommon()->GetLclNum() == op2ArrayRef->AsLclVarCommon()->GetLclNum())
{
return true;
- }
-
+ }
}
return false;
}
@@ -1544,30 +1527,31 @@ bool Compiler::areArrayElementsContiguous(GenTreePtr op1, GenTreePtr op2)
//-------------------------------------------------------------------------------
// Check whether two argument nodes are contiguous or not.
// Arguments:
-// op1 - GenTreePtr.
-// op2 - GenTreePtr.
+// op1 - GenTreePtr.
+// op2 - GenTreePtr.
// Return Value:
// if the argument node op1 is located before argument node op2, and they are located contiguously,
// then return true. Otherwise, return false.
-// TODO-CQ:
+// TODO-CQ:
// Right now this can only check field and array. In future we should add more cases.
-//
+//
bool Compiler::areArgumentsContiguous(GenTreePtr op1, GenTreePtr op2)
{
- if(op1->OperGet() == GT_INDEX && op2->OperGet() == GT_INDEX)
+ if (op1->OperGet() == GT_INDEX && op2->OperGet() == GT_INDEX)
{
return areArrayElementsContiguous(op1, op2);
}
- else if(op1->OperGet() == GT_FIELD && op2->OperGet() == GT_FIELD)
+ else if (op1->OperGet() == GT_FIELD && op2->OperGet() == GT_FIELD)
{
return areFieldsContiguous(op1, op2);
}
return false;
-}
+}
//--------------------------------------------------------------------------------------------------------
-// createAddressNodeForSIMDInit: Generate the address node(GT_LEA) if we want to intialize vector2, vector3 or vector4 from first argument's address.
+// createAddressNodeForSIMDInit: Generate the address node(GT_LEA) if we want to intialize vector2, vector3 or vector4
+// from first argument's address.
//
// Arguments:
// tree - GenTreePtr. This the tree node which is used to get the address for indir.
@@ -1577,7 +1561,7 @@ bool Compiler::areArgumentsContiguous(GenTreePtr op1, GenTreePtr op2)
// Return value:
// return the address node.
//
-// TODO-CQ:
+// TODO-CQ:
// 1. Currently just support for GT_FIELD and GT_INDEX, because we can only verify the GT_INDEX node or GT_Field
// are located contiguously or not. In future we should support more cases.
// 2. Though it happens to just work fine front-end phases are not aware of GT_LEA node. Therefore, convert these
@@ -1585,20 +1569,20 @@ bool Compiler::areArgumentsContiguous(GenTreePtr op1, GenTreePtr op2)
GenTreePtr Compiler::createAddressNodeForSIMDInit(GenTreePtr tree, unsigned simdSize)
{
assert(tree->OperGet() == GT_FIELD || tree->OperGet() == GT_INDEX);
- GenTreePtr byrefNode = nullptr;
+ GenTreePtr byrefNode = nullptr;
GenTreePtr startIndex = nullptr;
- unsigned offset = 0;
- var_types baseType = tree->gtType;
-
+ unsigned offset = 0;
+ var_types baseType = tree->gtType;
+
if (tree->OperGet() == GT_FIELD)
{
GenTreePtr objRef = tree->gtField.gtFldObj;
- if(objRef != nullptr && objRef->gtOper == GT_ADDR)
+ if (objRef != nullptr && objRef->gtOper == GT_ADDR)
{
GenTreePtr obj = objRef->gtOp.gtOp1;
// If the field is directly from a struct, then in this case,
- // we should set this struct's lvUsedInSIMDIntrinsic as true,
+ // we should set this struct's lvUsedInSIMDIntrinsic as true,
// so that this sturct won't be promoted.
// e.g. s.x x is a field, and s is a struct, then we should set the s's lvUsedInSIMDIntrinsic as true.
// so that s won't be promoted.
@@ -1607,8 +1591,8 @@ GenTreePtr Compiler::createAddressNodeForSIMDInit(GenTreePtr tree, unsigned simd
// promoted, then this will affect the other optimizations which are depend on s1's struct promotion.
// TODO-CQ:
// In future, we should optimize this case so that if there is a nested field like s1.s2.x and s1.s2.x's
- // address is used for initializing the vector, then s1 can be promoted but s2 can't.
- if(varTypeIsSIMD(obj) && obj->OperIsLocal())
+ // address is used for initializing the vector, then s1 can be promoted but s2 can't.
+ if (varTypeIsSIMD(obj) && obj->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(obj);
}
@@ -1617,43 +1601,44 @@ GenTreePtr Compiler::createAddressNodeForSIMDInit(GenTreePtr tree, unsigned simd
byrefNode = gtCloneExpr(tree->gtField.gtFldObj);
assert(byrefNode != nullptr);
offset = tree->gtField.gtFldOffset;
-
}
- else if(tree->OperGet() == GT_INDEX)
+ else if (tree->OperGet() == GT_INDEX)
{
GenTreePtr index = tree->AsIndex()->Index();
assert(index->OperGet() == GT_CNS_INT);
-
+
GenTreePtr checkIndexExpr = nullptr;
- unsigned indexVal = (unsigned)(index->gtIntCon.gtIconVal);
- offset = indexVal * genTypeSize(tree->TypeGet());
- GenTreePtr arrayRef = tree->AsIndex()->Arr();
-
+ unsigned indexVal = (unsigned)(index->gtIntCon.gtIconVal);
+ offset = indexVal * genTypeSize(tree->TypeGet());
+ GenTreePtr arrayRef = tree->AsIndex()->Arr();
+
// Generate the boundary check exception.
// The length for boundary check should be the maximum index number which should be
- // (first argument's index number) + (how many array arguments we have) - 1
+ // (first argument's index number) + (how many array arguments we have) - 1
// = indexVal + arrayElementsCount - 1
unsigned arrayElementsCount = simdSize / genTypeSize(baseType);
- checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, indexVal + arrayElementsCount - 1);
- GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, arrayRef, (int)offsetof(CORINFO_Array, length));
- GenTreeBoundsChk* arrBndsChk = new (this, GT_ARR_BOUNDS_CHECK) GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, checkIndexExpr, SCK_RNGCHK_FAIL);
+ checkIndexExpr = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, indexVal + arrayElementsCount - 1);
+ GenTreeArrLen* arrLen =
+ new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, arrayRef, (int)offsetof(CORINFO_Array, length));
+ GenTreeBoundsChk* arrBndsChk = new (this, GT_ARR_BOUNDS_CHECK)
+ GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, checkIndexExpr, SCK_RNGCHK_FAIL);
offset += offsetof(CORINFO_Array, u1Elems);
byrefNode = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef));
-
}
else
{
unreached();
}
- GenTreePtr address = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, byrefNode, startIndex, genTypeSize(tree->TypeGet()), offset);
+ GenTreePtr address =
+ new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, byrefNode, startIndex, genTypeSize(tree->TypeGet()), offset);
return address;
}
//-------------------------------------------------------------------------------
-// impMarkContiguousSIMDFieldAssignments: Try to identify if there are contiguous
-// assignments from SIMD field to memory. If there are, then mark the related
+// impMarkContiguousSIMDFieldAssignments: Try to identify if there are contiguous
+// assignments from SIMD field to memory. If there are, then mark the related
// lclvar so that it won't be promoted.
//
// Arguments:
@@ -1666,17 +1651,15 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt)
return;
}
GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
- if (expr->OperGet() == GT_ASG &&
- expr->TypeGet() == TYP_FLOAT)
- {
- GenTreePtr curDst = expr->gtOp.gtOp1;
- GenTreePtr curSrc = expr->gtOp.gtOp2;
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
+ if (expr->OperGet() == GT_ASG && expr->TypeGet() == TYP_FLOAT)
+ {
+ GenTreePtr curDst = expr->gtOp.gtOp1;
+ GenTreePtr curSrc = expr->gtOp.gtOp2;
+ unsigned index = 0;
+ var_types baseType = TYP_UNKNOWN;
+ unsigned simdSize = 0;
GenTreePtr srcSimdStructNode = getSIMDStructFromField(curSrc, &baseType, &index, &simdSize, true);
- if (srcSimdStructNode == nullptr ||
- baseType != TYP_FLOAT)
+ if (srcSimdStructNode == nullptr || baseType != TYP_FLOAT)
{
fgPreviousCandidateSIMDFieldAsgStmt = nullptr;
}
@@ -1688,10 +1671,9 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt)
{
assert(index > 0);
GenTreePtr prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->gtStmt.gtStmtExpr;
- GenTreePtr prevDst = prevAsgExpr->gtOp.gtOp1;
- GenTreePtr prevSrc = prevAsgExpr->gtOp.gtOp2;
- if (!areArgumentsContiguous(prevDst, curDst) ||
- !areArgumentsContiguous(prevSrc, curSrc))
+ GenTreePtr prevDst = prevAsgExpr->gtOp.gtOp1;
+ GenTreePtr prevSrc = prevAsgExpr->gtOp.gtOp2;
+ if (!areArgumentsContiguous(prevDst, curDst) || !areArgumentsContiguous(prevSrc, curSrc))
{
fgPreviousCandidateSIMDFieldAsgStmt = nullptr;
}
@@ -1722,7 +1704,6 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt)
{
fgPreviousCandidateSIMDFieldAsgStmt = stmt;
}
-
}
}
}
@@ -1748,12 +1729,12 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(GenTreePtr stmt)
// implemented as an intrinsic in the JIT, then return the tree that implements
// it.
//
-GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
- GenTreePtr newobjThis,
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_METHOD_HANDLE methodHnd,
- CORINFO_SIG_INFO * sig,
- int memberRef)
+GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
+ GenTreePtr newobjThis,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE methodHnd,
+ CORINFO_SIG_INFO* sig,
+ int memberRef)
{
assert(featureSIMD);
@@ -1763,17 +1744,18 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
// Get base type and intrinsic Id
- var_types baseType = TYP_UNKNOWN;
- unsigned size = 0;
- unsigned argCount = 0;
- const SIMDIntrinsicInfo* intrinsicInfo = getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &baseType, &size);
+ var_types baseType = TYP_UNKNOWN;
+ unsigned size = 0;
+ unsigned argCount = 0;
+ const SIMDIntrinsicInfo* intrinsicInfo =
+ getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &baseType, &size);
if (intrinsicInfo == nullptr || intrinsicInfo->id == SIMDIntrinsicInvalid)
{
return nullptr;
}
SIMDIntrinsicID simdIntrinsicID = intrinsicInfo->id;
- var_types simdType;
+ var_types simdType;
if (baseType != TYP_UNKNOWN)
{
simdType = getSIMDTypeForSize(size);
@@ -1783,8 +1765,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(simdIntrinsicID == SIMDIntrinsicHWAccel);
simdType = TYP_UNKNOWN;
}
- bool instMethod = intrinsicInfo->isInstMethod;
- var_types callType = JITtype2varType(sig->retType);
+ bool instMethod = intrinsicInfo->isInstMethod;
+ var_types callType = JITtype2varType(sig->retType);
if (callType == TYP_STRUCT)
{
// Note that here we are assuming that, if the call returns a struct, that it is the same size as the
@@ -1794,41 +1776,41 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
callType = simdType;
}
- GenTree* simdTree = nullptr;
- GenTree* op1 = nullptr;
- GenTree* op2 = nullptr;
- GenTree* op3 = nullptr;
- GenTree* retVal = nullptr;
- GenTree* copyBlkDst = nullptr;
- bool doCopyBlk = false;
+ GenTree* simdTree = nullptr;
+ GenTree* op1 = nullptr;
+ GenTree* op2 = nullptr;
+ GenTree* op3 = nullptr;
+ GenTree* retVal = nullptr;
+ GenTree* copyBlkDst = nullptr;
+ bool doCopyBlk = false;
- switch(simdIntrinsicID)
+ switch (simdIntrinsicID)
{
- case SIMDIntrinsicGetCount:
+ case SIMDIntrinsicGetCount:
{
- int length = getSIMDVectorLength(clsHnd);
+ int length = getSIMDVectorLength(clsHnd);
GenTreeIntCon* intConstTree = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, length);
- retVal = intConstTree;
+ retVal = intConstTree;
}
break;
- case SIMDIntrinsicGetZero:
+ case SIMDIntrinsicGetZero:
{
- baseType = genActualType(baseType);
- GenTree *initVal = gtNewZeroConNode(baseType);
- initVal->gtType = baseType;
- simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, baseType, size);
- retVal = simdTree;
+ baseType = genActualType(baseType);
+ GenTree* initVal = gtNewZeroConNode(baseType);
+ initVal->gtType = baseType;
+ simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, baseType, size);
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicGetOne:
+ case SIMDIntrinsicGetOne:
{
- GenTree *initVal;
+ GenTree* initVal;
if (varTypeIsSmallInt(baseType))
{
unsigned baseSize = genTypeSize(baseType);
- int val;
+ int val;
if (baseSize == 1)
{
val = 0x01010101;
@@ -1844,18 +1826,18 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
initVal = gtNewOneConNode(baseType);
}
- baseType = genActualType(baseType);
+ baseType = genActualType(baseType);
initVal->gtType = baseType;
- simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, baseType, size);
- retVal = simdTree;
+ simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, baseType, size);
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicGetAllOnes:
+ case SIMDIntrinsicGetAllOnes:
{
// Equivalent to (Vector<T>) new Vector<int>(0xffffffff);
- GenTree *initVal = gtNewIconNode(0xffffffff, TYP_INT);
- simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, TYP_INT, size);
+ GenTree* initVal = gtNewIconNode(0xffffffff, TYP_INT);
+ simdTree = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, TYP_INT, size);
if (baseType != TYP_INT)
{
// cast it to required baseType if different from TYP_INT
@@ -1865,8 +1847,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
break;
- case SIMDIntrinsicInit:
- case SIMDIntrinsicInitN:
+ case SIMDIntrinsicInit:
+ case SIMDIntrinsicInitN:
{
// SIMDIntrinsicInit:
// op2 - the initializer value
@@ -1878,55 +1860,55 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
bool initFromFirstArgIndir = false;
if (simdIntrinsicID == SIMDIntrinsicInit)
{
- op2 = impSIMDPopStack(baseType);
+ op2 = impSIMDPopStack(baseType);
}
- else
+ else
{
assert(simdIntrinsicID == SIMDIntrinsicInitN);
assert(baseType == TYP_FLOAT);
- unsigned initCount = argCount - 1;
+ unsigned initCount = argCount - 1;
unsigned elementCount = getSIMDVectorLength(size, baseType);
noway_assert(initCount == elementCount);
GenTree* nextArg = op2;
-
+
// Build a GT_LIST with the N values.
// We must maintain left-to-right order of the args, but we will pop
// them off in reverse order (the Nth arg was pushed onto the stack last).
-
- GenTree* list = nullptr;
- GenTreePtr firstArg = nullptr;
- GenTreePtr prevArg = nullptr;
- int offset = 0;
- bool areArgsContiguous = true;
+
+ GenTree* list = nullptr;
+ GenTreePtr firstArg = nullptr;
+ GenTreePtr prevArg = nullptr;
+ int offset = 0;
+ bool areArgsContiguous = true;
for (unsigned i = 0; i < initCount; i++)
- {
+ {
GenTree* nextArg = impSIMDPopStack(baseType);
if (areArgsContiguous)
- {
+ {
GenTreePtr curArg = nextArg;
- firstArg = curArg;
-
- if(prevArg != nullptr)
+ firstArg = curArg;
+
+ if (prevArg != nullptr)
{
// Recall that we are popping the args off the stack in reverse order.
areArgsContiguous = areArgumentsContiguous(curArg, prevArg);
}
- prevArg = curArg;
+ prevArg = curArg;
}
-
- list = new (this, GT_LIST) GenTreeOp(GT_LIST, baseType, nextArg, list);
+
+ list = new (this, GT_LIST) GenTreeOp(GT_LIST, baseType, nextArg, list);
}
if (areArgsContiguous && baseType == TYP_FLOAT)
{
- // Since Vector2, Vector3 and Vector4's arguments type are only float,
- // we intialize the vector from first argument address, only when
+ // Since Vector2, Vector3 and Vector4's arguments type are only float,
+ // we intialize the vector from first argument address, only when
// the baseType is TYP_FLOAT and the arguments are located contiguously in memory
initFromFirstArgIndir = true;
GenTreePtr op2Address = createAddressNodeForSIMDInit(firstArg, size);
- var_types simdType = getSIMDTypeForSize(size);
- op2 = gtNewOperNode(GT_IND, simdType, op2Address);
+ var_types simdType = getSIMDTypeForSize(size);
+ op2 = gtNewOperNode(GT_IND, simdType, op2Address);
}
else
{
@@ -1937,7 +1919,7 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
assert(op1->TypeGet() == TYP_BYREF);
- assert(genActualType(op2->TypeGet()) == genActualType(baseType)||initFromFirstArgIndir);
+ assert(genActualType(op2->TypeGet()) == genActualType(baseType) || initFromFirstArgIndir);
#if AVX_WITHOUT_AVX2
// NOTE: This #define, AVX_WITHOUT_AVX2, is never defined. This code is kept here
@@ -1954,11 +1936,11 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
// address-taken and ineligible for register allocation.
//
// op2 = GT_COMMA(tmp=op2, GT_ADDR(tmp))
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Val addr for vector Init"));
- GenTreePtr asg = gtNewTempAssign(tmpNum, op2);
- GenTreePtr tmp = gtNewLclvNode(tmpNum, op2->TypeGet());
- tmp = gtNewOperNode(GT_ADDR, TYP_BYREF,tmp);
- op2 = gtNewOperNode(GT_COMMA, TYP_BYREF, asg, tmp);
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Val addr for vector Init"));
+ GenTreePtr asg = gtNewTempAssign(tmpNum, op2);
+ GenTreePtr tmp = gtNewLclvNode(tmpNum, op2->TypeGet());
+ tmp = gtNewOperNode(GT_ADDR, TYP_BYREF, tmp);
+ op2 = gtNewOperNode(GT_COMMA, TYP_BYREF, asg, tmp);
}
}
#endif
@@ -1970,7 +1952,7 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(simdIntrinsicID == SIMDIntrinsicInit);
unsigned baseSize = genTypeSize(baseType);
- int multiplier;
+ int multiplier;
if (baseSize == 1)
{
multiplier = 0x01010101;
@@ -2003,10 +1985,10 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(baseType == TYP_UBYTE || baseType == TYP_CHAR);
t1 = gtNewCastNode(TYP_INT, op2, TYP_INT);
}
-
+
assert(t1 != nullptr);
GenTree* t2 = gtNewIconNode(multiplier, TYP_INT);
- op2 = gtNewOperNode(GT_MUL, TYP_INT, t1, t2);
+ op2 = gtNewOperNode(GT_MUL, TYP_INT, t1, t2);
// Construct a vector of TYP_INT with the new initializer and cast it back to vector of baseType
simdTree = gtNewSIMDNode(simdType, op2, nullptr, simdIntrinsicID, TYP_INT, size);
@@ -2032,14 +2014,14 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
copyBlkDst = op1;
- doCopyBlk = true;
+ doCopyBlk = true;
}
break;
- case SIMDIntrinsicInitArray:
- case SIMDIntrinsicInitArrayX:
- case SIMDIntrinsicCopyToArray:
- case SIMDIntrinsicCopyToArrayX:
+ case SIMDIntrinsicInitArray:
+ case SIMDIntrinsicInitArrayX:
+ case SIMDIntrinsicCopyToArray:
+ case SIMDIntrinsicCopyToArrayX:
{
// op3 - index into array in case of SIMDIntrinsicCopyToArrayX and SIMDIntrinsicInitArrayX
// op2 - array itself
@@ -2051,7 +2033,7 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
// 1. If we have an index, we must do a check on that first.
// We can't combine it with the index + vectorLength check because
// a. It might be negative, and b. It may need to raise a different exception
- // (captured as SCK_ARG_RNG_EXCPN for CopyTo and SCK_RNGCHK_FAIL for Init).
+ // (captured as SCK_ARG_RNG_EXCPN for CopyTo and SCK_RNGCHK_FAIL for Init).
// 2. We need to generate a check (SCK_ARG_EXCPN for CopyTo and SCK_RNGCHK_FAIL for Init)
// for the last array element we will access.
// We'll either check against (vectorLength - 1) or (index + vectorLength - 1).
@@ -2080,8 +2062,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = impSIMDPopStack(TYP_REF);
assert(op2->TypeGet() == TYP_REF);
GenTree* arrayRefForArgChk = op2;
- GenTree* argRngChk = nullptr;
- GenTree* asg = nullptr;
+ GenTree* argRngChk = nullptr;
+ GenTree* asg = nullptr;
if ((arrayRefForArgChk->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op2 = fgInsertCommaFormTemp(&arrayRefForArgChk);
@@ -2120,8 +2102,10 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op3 = gtCloneExpr(index);
}
- GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, arrayRefForArgRngChk, (int)offsetof(CORINFO_Array, length));
- argRngChk = new (this, GT_ARR_BOUNDS_CHECK) GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, index, op3CheckKind);
+ GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH)
+ GenTreeArrLen(TYP_INT, arrayRefForArgRngChk, (int)offsetof(CORINFO_Array, length));
+ argRngChk = new (this, GT_ARR_BOUNDS_CHECK)
+ GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, index, op3CheckKind);
// Now, clone op3 to create another node for the argChk
GenTree* index2 = gtCloneExpr(op3);
assert(index != nullptr);
@@ -2139,8 +2123,10 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
{
op2CheckKind = SCK_ARG_EXCPN;
}
- GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, arrayRefForArgChk, (int)offsetof(CORINFO_Array, length));
- GenTreeBoundsChk* argChk = new (this, GT_ARR_BOUNDS_CHECK) GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, checkIndexExpr, op2CheckKind);
+ GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH)
+ GenTreeArrLen(TYP_INT, arrayRefForArgChk, (int)offsetof(CORINFO_Array, length));
+ GenTreeBoundsChk* argChk = new (this, GT_ARR_BOUNDS_CHECK)
+ GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, arrLen, checkIndexExpr, op2CheckKind);
// Create a GT_COMMA tree for the bounds check(s).
op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), argChk, op2);
@@ -2151,30 +2137,33 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX)
{
- op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
- simdTree = gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, baseType, size);
+ op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
+ simdTree = gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, baseType, size);
copyBlkDst = op1;
- doCopyBlk = true;
+ doCopyBlk = true;
}
else
{
assert(simdIntrinsicID == SIMDIntrinsicCopyToArray || simdIntrinsicID == SIMDIntrinsicCopyToArrayX);
- op1 = impSIMDPopStack(simdType, instMethod);
+ op1 = impSIMDPopStack(simdType, instMethod);
assert(op1->TypeGet() == simdType);
// copy vector (op1) to array (op2) starting at index (op3)
simdTree = op1;
-
- // TODO-Cleanup: Though it happens to just work fine front-end phases are not aware of GT_LEA node. Therefore, convert these to use GT_ADDR .
- copyBlkDst = new (this, GT_LEA) GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(baseType), offsetof(CORINFO_Array, u1Elems));
- doCopyBlk = true;
+
+ // TODO-Cleanup: Though it happens to just work fine front-end phases are not aware of GT_LEA node.
+ // Therefore, convert these to use GT_ADDR .
+ copyBlkDst = new (this, GT_LEA)
+ GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(baseType), offsetof(CORINFO_Array, u1Elems));
+ doCopyBlk = true;
}
}
break;
- case SIMDIntrinsicInitFixed:
+ case SIMDIntrinsicInitFixed:
{
- // We are initializing a fixed-length vector VLarge with a smaller fixed-length vector VSmall, plus 1 or 2 additional floats.
+ // We are initializing a fixed-length vector VLarge with a smaller fixed-length vector VSmall, plus 1 or 2
+ // additional floats.
// op4 (optional) - float value for VLarge.W, if VLarge is Vector4, and VSmall is Vector2
// op3 - float value for VLarge.Z or VLarge.W
// op2 - VSmall
@@ -2200,7 +2189,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
// We are going to redefine the operands so that:
- // - op3 is the value that's going into the Z position, or null if it's a Vector4 constructor with a single operand, and
+ // - op3 is the value that's going into the Z position, or null if it's a Vector4 constructor with a single
+ // operand, and
// - op4 is the W position value, or null if this is a Vector3 constructor.
if (size == 16 && argCount == 3)
{
@@ -2219,12 +2209,12 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
copyBlkDst = op1;
- doCopyBlk = true;
+ doCopyBlk = true;
}
break;
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicInstEquals:
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicInstEquals:
{
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
@@ -2233,44 +2223,44 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(op2->TypeGet() == simdType);
simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, SIMDIntrinsicOpEquality, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
break;
-
- case SIMDIntrinsicOpInEquality:
+
+ case SIMDIntrinsicOpInEquality:
{
// op1 is the first operand
// op2 is the second operand
- op2 = impSIMDPopStack(simdType);
- op1 = impSIMDPopStack(simdType, instMethod);
+ op2 = impSIMDPopStack(simdType);
+ op1 = impSIMDPopStack(simdType, instMethod);
simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, SIMDIntrinsicOpInEquality, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
- break;
-
- case SIMDIntrinsicEqual:
- case SIMDIntrinsicLessThan:
- case SIMDIntrinsicLessThanOrEqual:
- case SIMDIntrinsicGreaterThan:
- case SIMDIntrinsicGreaterThanOrEqual:
- {
+ break;
+
+ case SIMDIntrinsicEqual:
+ case SIMDIntrinsicLessThan:
+ case SIMDIntrinsicLessThanOrEqual:
+ case SIMDIntrinsicGreaterThan:
+ case SIMDIntrinsicGreaterThanOrEqual:
+ {
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &baseType, &op1, &op2);
- simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, baseType, size);
- retVal = simdTree;
+ simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, baseType, size);
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicAdd:
- case SIMDIntrinsicSub:
- case SIMDIntrinsicMul:
- case SIMDIntrinsicDiv:
- case SIMDIntrinsicBitwiseAnd:
- case SIMDIntrinsicBitwiseAndNot:
- case SIMDIntrinsicBitwiseOr:
- case SIMDIntrinsicBitwiseXor:
+ case SIMDIntrinsicAdd:
+ case SIMDIntrinsicSub:
+ case SIMDIntrinsicMul:
+ case SIMDIntrinsicDiv:
+ case SIMDIntrinsicBitwiseAnd:
+ case SIMDIntrinsicBitwiseAndNot:
+ case SIMDIntrinsicBitwiseOr:
+ case SIMDIntrinsicBitwiseXor:
{
#if defined(_TARGET_AMD64_) && defined(DEBUG)
// check for the cases where we don't support intrinsics.
@@ -2291,7 +2281,7 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
return nullptr;
}
}
-
+
// common to all integer type vectors
if (simdIntrinsicID == SIMDIntrinsicDiv)
{
@@ -2308,11 +2298,11 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op1 = impSIMDPopStack(simdType, instMethod);
simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicSelect:
+ case SIMDIntrinsicSelect:
{
// op3 is a SIMD variable that is the second source
// op2 is a SIMD variable that is the first source
@@ -2325,8 +2315,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
break;
- case SIMDIntrinsicMin:
- case SIMDIntrinsicMax:
+ case SIMDIntrinsicMin:
+ case SIMDIntrinsicMax:
{
// op1 is the first operand; if instance method, op1 is "this" arg
// op2 is the second operand
@@ -2337,12 +2327,12 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
break;
- case SIMDIntrinsicGetItem:
+ case SIMDIntrinsicGetItem:
{
// op1 is a SIMD variable that is "this" arg
// op2 is an index of TYP_INT
- op2 = impSIMDPopStack(TYP_INT);
- op1 = impSIMDPopStack(simdType, instMethod);
+ op2 = impSIMDPopStack(TYP_INT);
+ op1 = impSIMDPopStack(simdType, instMethod);
unsigned int vectorLength = getSIMDVectorLength(size, baseType);
if (!op2->IsCnsIntOrI() || op2->AsIntCon()->gtIconVal >= vectorLength)
{
@@ -2358,8 +2348,9 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = gtCloneExpr(index);
}
- GenTree* lengthNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, vectorLength);
- GenTreeBoundsChk* simdChk = new (this, GT_SIMD_CHK) GenTreeBoundsChk(GT_SIMD_CHK, TYP_VOID, lengthNode, index, SCK_RNGCHK_FAIL);
+ GenTree* lengthNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, vectorLength);
+ GenTreeBoundsChk* simdChk =
+ new (this, GT_SIMD_CHK) GenTreeBoundsChk(GT_SIMD_CHK, TYP_VOID, lengthNode, index, SCK_RNGCHK_FAIL);
// Create a GT_COMMA tree for the bounds check.
op2 = gtNewOperNode(GT_COMMA, op2->TypeGet(), simdChk, op2);
@@ -2369,16 +2360,16 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(op2->TypeGet() == TYP_INT);
simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, simdIntrinsicID, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicDotProduct:
+ case SIMDIntrinsicDotProduct:
{
#if defined(_TARGET_AMD64_) && defined(DEBUG)
// Right now dot product is supported only for float vectors.
// See SIMDIntrinsicList.h for supported base types for this intrinsic.
- if (!varTypeIsFloating(baseType))
+ if (!varTypeIsFloating(baseType))
{
assert(!"Dot product on integer type vectors not supported");
return nullptr;
@@ -2391,21 +2382,21 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
op1 = impSIMDPopStack(simdType, instMethod);
simdTree = gtNewSIMDNode(baseType, op1, op2, simdIntrinsicID, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicSqrt:
+ case SIMDIntrinsicSqrt:
{
#if defined(_TARGET_AMD64_) && defined(DEBUG)
- // SSE/AVX doesn't support sqrt on integer type vectors and hence
- // should never be seen as an intrinsic here. See SIMDIntrinsicList.h
- // for supported base types for this intrinsic.
- if (!varTypeIsFloating(baseType))
- {
- assert(!"Sqrt not supported on integer vectors\n");
- return nullptr;
- }
+ // SSE/AVX doesn't support sqrt on integer type vectors and hence
+ // should never be seen as an intrinsic here. See SIMDIntrinsicList.h
+ // for supported base types for this intrinsic.
+ if (!varTypeIsFloating(baseType))
+ {
+ assert(!"Sqrt not supported on integer vectors\n");
+ return nullptr;
+ }
#endif // _TARGET_AMD64_ && DEBUG
op1 = impSIMDPopStack(simdType);
@@ -2414,8 +2405,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
}
break;
- case SIMDIntrinsicAbs:
- {
+ case SIMDIntrinsicAbs:
+ {
op1 = impSIMDPopStack(simdType);
#ifdef _TARGET_AMD64_
@@ -2428,21 +2419,21 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
{
float f;
static_assert_no_msg(sizeof(float) == sizeof(int));
- *((int *)&f) = 0x7fffffff;
- bitMask = gtNewDconNode(f);
+ *((int*)&f) = 0x7fffffff;
+ bitMask = gtNewDconNode(f);
}
else if (baseType == TYP_DOUBLE)
{
double d;
static_assert_no_msg(sizeof(double) == sizeof(__int64));
*((__int64*)&d) = 0x7fffffffffffffffLL;
- bitMask = gtNewDconNode(d);
+ bitMask = gtNewDconNode(d);
}
assert(bitMask != nullptr);
- bitMask->gtType = baseType;
+ bitMask->gtType = baseType;
GenTree* bitMaskVector = gtNewSIMDNode(simdType, bitMask, SIMDIntrinsicInit, baseType, size);
- retVal = gtNewSIMDNode(simdType, op1, bitMaskVector, SIMDIntrinsicBitwiseAnd, baseType, size);
+ retVal = gtNewSIMDNode(simdType, op1, bitMaskVector, SIMDIntrinsicBitwiseAnd, baseType, size);
}
else if (baseType == TYP_CHAR || baseType == TYP_UBYTE || baseType == TYP_UINT || baseType == TYP_ULONG)
{
@@ -2457,40 +2448,40 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
unreached();
}
-#else //!_TARGET_AMD64_
+#else //!_TARGET_AMD64_
assert(!"Abs intrinsic on non-Amd64 target not implemented");
unreached();
-#endif //!_TARGET_AMD64_
+#endif //!_TARGET_AMD64_
}
break;
- case SIMDIntrinsicGetW:
- retVal = impSIMDGetFixed(simdType, baseType, size, 3);
- break;
+ case SIMDIntrinsicGetW:
+ retVal = impSIMDGetFixed(simdType, baseType, size, 3);
+ break;
- case SIMDIntrinsicGetZ:
- retVal = impSIMDGetFixed(simdType, baseType, size, 2);
- break;
+ case SIMDIntrinsicGetZ:
+ retVal = impSIMDGetFixed(simdType, baseType, size, 2);
+ break;
- case SIMDIntrinsicGetY:
- retVal = impSIMDGetFixed(simdType, baseType, size, 1);
- break;
+ case SIMDIntrinsicGetY:
+ retVal = impSIMDGetFixed(simdType, baseType, size, 1);
+ break;
- case SIMDIntrinsicGetX:
- retVal = impSIMDGetFixed(simdType, baseType, size, 0);
- break;
+ case SIMDIntrinsicGetX:
+ retVal = impSIMDGetFixed(simdType, baseType, size, 0);
+ break;
- case SIMDIntrinsicSetW:
- case SIMDIntrinsicSetZ:
- case SIMDIntrinsicSetY:
- case SIMDIntrinsicSetX:
- {
+ case SIMDIntrinsicSetW:
+ case SIMDIntrinsicSetZ:
+ case SIMDIntrinsicSetY:
+ case SIMDIntrinsicSetX:
+ {
// op2 is the value to be set at indexTemp position
// op1 is SIMD vector that is going to be modified, which is a byref
// If op1 has a side-effect, then don't make it an intrinsic.
// It would be in-efficient to read the entire vector into xmm reg,
- // modify it and write back entire xmm reg.
+ // modify it and write back entire xmm reg.
//
// TODO-CQ: revisit this later.
op1 = impStackTop(1).val;
@@ -2505,32 +2496,32 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
GenTree* src = gtCloneExpr(op1);
assert(src != nullptr);
simdTree = gtNewSIMDNode(simdType, src, op2, simdIntrinsicID, baseType, size);
-
+
copyBlkDst = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
- doCopyBlk = true;
+ doCopyBlk = true;
}
break;
- // Unary operators that take and return a Vector.
- case SIMDIntrinsicCast:
+ // Unary operators that take and return a Vector.
+ case SIMDIntrinsicCast:
{
op1 = impSIMDPopStack(simdType, instMethod);
simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, baseType, size);
- retVal = simdTree;
+ retVal = simdTree;
}
break;
- case SIMDIntrinsicHWAccel:
+ case SIMDIntrinsicHWAccel:
{
GenTreeIntCon* intConstTree = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, 1);
- retVal = intConstTree;
+ retVal = intConstTree;
}
break;
- default:
- assert(!"Unimplemented SIMD Intrinsic");
- return nullptr;
+ default:
+ assert(!"Unimplemented SIMD Intrinsic");
+ return nullptr;
}
#ifdef _TARGET_AMD64_
@@ -2548,11 +2539,8 @@ GenTreePtr Compiler::impSIMDIntrinsic(OPCODE opcode,
// be a simple store or assignment.
if (doCopyBlk)
{
- retVal = gtNewBlkOpNode(GT_COPYBLK,
- copyBlkDst,
- gtNewOperNode(GT_ADDR, TYP_BYREF, simdTree),
- gtNewIconNode(getSIMDTypeSizeInBytes(clsHnd)),
- false);
+ retVal = gtNewBlkOpNode(GT_COPYBLK, copyBlkDst, gtNewOperNode(GT_ADDR, TYP_BYREF, simdTree),
+ gtNewIconNode(getSIMDTypeSizeInBytes(clsHnd)), false);
retVal->gtFlags |= ((simdTree->gtFlags | copyBlkDst->gtFlags) & GTF_ALL_EFFECT);
}
diff --git a/src/jit/simd.h b/src/jit/simd.h
index ffa5f07e3f..c68899e412 100644
--- a/src/jit/simd.h
+++ b/src/jit/simd.h
@@ -2,43 +2,42 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef _SIMD_H_
#define _SIMD_H_
#ifdef FEATURE_SIMD
#ifdef DEBUG
-extern const char * const simdIntrinsicNames [];
+extern const char* const simdIntrinsicNames[];
#endif
-enum SIMDIntrinsicID
+enum SIMDIntrinsicID
{
- #define SIMD_INTRINSIC(m, i, id, n, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) SIMDIntrinsic##id ,
- #include "simdintrinsiclist.h"
+#define SIMD_INTRINSIC(m, i, id, n, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) SIMDIntrinsic##id,
+#include "simdintrinsiclist.h"
};
// Static info about a SIMD intrinsic
struct SIMDIntrinsicInfo
{
SIMDIntrinsicID id;
- const char *methodName;
- bool isInstMethod;
- var_types retType;
- unsigned char argCount;
- var_types argType[SIMD_INTRINSIC_MAX_MODELED_PARAM_COUNT];
- var_types supportedBaseTypes[SIMD_INTRINSIC_MAX_BASETYPE_COUNT];
+ const char* methodName;
+ bool isInstMethod;
+ var_types retType;
+ unsigned char argCount;
+ var_types argType[SIMD_INTRINSIC_MAX_MODELED_PARAM_COUNT];
+ var_types supportedBaseTypes[SIMD_INTRINSIC_MAX_BASETYPE_COUNT];
};
#ifdef _TARGET_AMD64_
-//SSE2 Shuffle control byte to shuffle vector <W, Z, Y, X>
-//These correspond to shuffle immediate byte in shufps SSE2 instruction.
-#define SHUFFLE_XXXX 0x00
-#define SHUFFLE_ZWYX 0xB1
-#define SHUFFLE_WWYY 0xF5
-#define SHUFFLE_ZZXX 0xA0
+// SSE2 Shuffle control byte to shuffle vector <W, Z, Y, X>
+// These correspond to shuffle immediate byte in shufps SSE2 instruction.
+#define SHUFFLE_XXXX 0x00
+#define SHUFFLE_ZWYX 0xB1
+#define SHUFFLE_WWYY 0xF5
+#define SHUFFLE_ZZXX 0xA0
#endif
-#endif //FEATURE_SIMD
+#endif // FEATURE_SIMD
#endif //_SIMD_H_
diff --git a/src/jit/simdcodegenxarch.cpp b/src/jit/simdcodegenxarch.cpp
index 2c22d48267..14c4493f07 100644
--- a/src/jit/simdcodegenxarch.cpp
+++ b/src/jit/simdcodegenxarch.cpp
@@ -32,9 +32,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// - bits 6 and 7 of the immediate indicate which source item to select (0..3)
// - bits 4 and 5 of the immediate indicate which target item to insert into (0..3)
// - bits 0 to 3 of the immediate indicate which target item to zero
-#define INSERTPS_SOURCE_SELECT(i) (i<<6)
-#define INSERTPS_TARGET_SELECT(i) (i<<4)
-#define INSERTPS_ZERO(i) (1<<i)
+#define INSERTPS_SOURCE_SELECT(i) (i << 6)
+#define INSERTPS_TARGET_SELECT(i) (i << 4)
+#define INSERTPS_ZERO(i) (1 << i)
// getOpForSIMDIntrinsic: return the opcode for the given SIMD Intrinsic
//
@@ -42,21 +42,18 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// intrinsicId - SIMD intrinsic Id
// baseType - Base type of the SIMD vector
// immed - Out param. Any immediate byte operand that needs to be passed to SSE2 opcode
-//
+//
//
// Return Value:
// Instruction (op) to be used, and immed is set if instruction requires an immediate operand.
//
-instruction
-CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
- var_types baseType,
- unsigned *ival /*=nullptr*/)
+instruction CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival /*=nullptr*/)
{
// Minimal required instruction set is SSE2.
assert(compiler->canUseSSE2());
instruction result = INS_invalid;
- switch(intrinsicId)
+ switch (intrinsicId)
{
case SIMDIntrinsicInit:
if (compiler->canUseAVX())
@@ -70,17 +67,34 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
}
switch (baseType)
{
- case TYP_FLOAT: result = INS_vbroadcastss; break;
- case TYP_DOUBLE: result = INS_vbroadcastsd; break;
- case TYP_ULONG: __fallthrough;
- case TYP_LONG: result = INS_vpbroadcastq; break;
- case TYP_UINT: __fallthrough;
- case TYP_INT: result = INS_vpbroadcastd; break;
- case TYP_CHAR: __fallthrough;
- case TYP_SHORT: result = INS_vpbroadcastw; break;
- case TYP_UBYTE: __fallthrough;
- case TYP_BYTE: result = INS_vpbroadcastb; break;
- default: unreached();
+ case TYP_FLOAT:
+ result = INS_vbroadcastss;
+ break;
+ case TYP_DOUBLE:
+ result = INS_vbroadcastsd;
+ break;
+ case TYP_ULONG:
+ __fallthrough;
+ case TYP_LONG:
+ result = INS_vpbroadcastq;
+ break;
+ case TYP_UINT:
+ __fallthrough;
+ case TYP_INT:
+ result = INS_vpbroadcastd;
+ break;
+ case TYP_CHAR:
+ __fallthrough;
+ case TYP_SHORT:
+ result = INS_vpbroadcastw;
+ break;
+ case TYP_UBYTE:
+ __fallthrough;
+ case TYP_BYTE:
+ result = INS_vpbroadcastb;
+ break;
+ default:
+ unreached();
}
break;
}
@@ -102,7 +116,7 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
else if (baseType == TYP_LONG || baseType == TYP_ULONG)
{
// We don't have a seperate SSE2 instruction and will
- // use the instruction meant for doubles since it is
+ // use the instruction meant for doubles since it is
// of the same size as a long.
result = INS_shufpd;
}
@@ -117,7 +131,7 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
{
result = INS_sqrtpd;
}
- else
+ else
{
unreached();
}
@@ -296,13 +310,13 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
assert(baseType != TYP_INT);
if (baseType == TYP_FLOAT)
- {
+ {
result = INS_cmpps;
assert(ival != nullptr);
*ival = 1;
}
else if (baseType == TYP_DOUBLE)
- {
+ {
result = INS_cmppd;
assert(ival != nullptr);
*ival = 1;
@@ -314,13 +328,13 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
assert(baseType != TYP_INT);
if (baseType == TYP_FLOAT)
- {
+ {
result = INS_cmpps;
assert(ival != nullptr);
*ival = 2;
}
else if (baseType == TYP_DOUBLE)
- {
+ {
result = INS_cmppd;
assert(ival != nullptr);
*ival = 2;
@@ -335,7 +349,7 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
if (baseType == TYP_INT)
{
result = INS_pcmpgtd;
- }
+ }
else if (baseType == TYP_SHORT)
{
result = INS_pcmpgtw;
@@ -460,8 +474,7 @@ CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId,
// Notes:
// This is currently only supported for floating point types.
//
-void
-CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg, SIMDScalarMoveType moveType)
+void CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg, SIMDScalarMoveType moveType)
{
var_types targetType = compiler->getSIMDVectorType();
assert(varTypeIsFloating(type));
@@ -470,45 +483,46 @@ CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg
{
switch (moveType)
{
- case SMT_PreserveUpper:
- if (srcReg != targetReg)
- {
- instruction ins = ins_Store(type);
- if (getEmitter()->IsThreeOperandMoveAVXInstruction(ins))
- {
- // In general, when we use a three-operands move instruction, we want to merge the src with
- // itself. This is an exception in that we actually want the "merge" behavior, so we must
- // specify it with all 3 operands.
- inst_RV_RV_RV(ins, targetReg, targetReg, srcReg, emitTypeSize(targetType));
- }
- else
+ case SMT_PreserveUpper:
+ if (srcReg != targetReg)
{
- inst_RV_RV(ins, targetReg, srcReg, targetType, emitTypeSize(targetType));
+ instruction ins = ins_Store(type);
+ if (getEmitter()->IsThreeOperandMoveAVXInstruction(ins))
+ {
+ // In general, when we use a three-operands move instruction, we want to merge the src with
+ // itself. This is an exception in that we actually want the "merge" behavior, so we must
+ // specify it with all 3 operands.
+ inst_RV_RV_RV(ins, targetReg, targetReg, srcReg, emitTypeSize(targetType));
+ }
+ else
+ {
+ inst_RV_RV(ins, targetReg, srcReg, targetType, emitTypeSize(targetType));
+ }
}
- }
- break;
+ break;
- case SMT_ZeroInitUpper:
+ case SMT_ZeroInitUpper:
{
// insertps is a 128-bit only instruction, and clears the upper 128 bits, which is what we want.
// The insertpsImm selects which fields are copied and zero'd of the lower 128 bits, so we choose
// to zero all but the lower bits.
- unsigned int insertpsImm = (INSERTPS_TARGET_SELECT(0) | INSERTPS_ZERO(1) | INSERTPS_ZERO(2) | INSERTPS_ZERO(3));
+ unsigned int insertpsImm =
+ (INSERTPS_TARGET_SELECT(0) | INSERTPS_ZERO(1) | INSERTPS_ZERO(2) | INSERTPS_ZERO(3));
inst_RV_RV_IV(INS_insertps, EA_16BYTE, targetReg, srcReg, insertpsImm);
break;
}
- case SMT_ZeroInitUpper_SrcHasUpperZeros:
- if (srcReg != targetReg)
- {
- instruction ins = ins_Copy(type);
- assert(!getEmitter()->IsThreeOperandMoveAVXInstruction(ins));
- inst_RV_RV(ins, targetReg, srcReg, targetType, emitTypeSize(targetType));
- }
- break;
+ case SMT_ZeroInitUpper_SrcHasUpperZeros:
+ if (srcReg != targetReg)
+ {
+ instruction ins = ins_Copy(type);
+ assert(!getEmitter()->IsThreeOperandMoveAVXInstruction(ins));
+ inst_RV_RV(ins, targetReg, srcReg, targetType, emitTypeSize(targetType));
+ }
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
else
@@ -518,45 +532,44 @@ CodeGen::genSIMDScalarMove(var_types type, regNumber targetReg, regNumber srcReg
switch (moveType)
{
- case SMT_PreserveUpper:
- if (srcReg != targetReg)
- {
- inst_RV_RV(ins_Store(type), targetReg, srcReg, targetType, emitTypeSize(targetType));
- }
- break;
+ case SMT_PreserveUpper:
+ if (srcReg != targetReg)
+ {
+ inst_RV_RV(ins_Store(type), targetReg, srcReg, targetType, emitTypeSize(targetType));
+ }
+ break;
- case SMT_ZeroInitUpper:
- if (srcReg == targetReg)
- {
- // There is no guarantee that upper bits of op1Reg are zero.
- // We achieve this by using left logical shift 12-bytes and right logical shift 12 bytes.
- instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, type);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
- ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, type);
- getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
- }
- else
- {
- genSIMDZero(targetType, TYP_FLOAT, targetReg);
- inst_RV_RV(ins_Store(type), targetReg, srcReg);
- }
- break;
+ case SMT_ZeroInitUpper:
+ if (srcReg == targetReg)
+ {
+ // There is no guarantee that upper bits of op1Reg are zero.
+ // We achieve this by using left logical shift 12-bytes and right logical shift 12 bytes.
+ instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, type);
+ getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
+ ins = getOpForSIMDIntrinsic(SIMDIntrinsicShiftRightInternal, type);
+ getEmitter()->emitIns_R_I(ins, EA_16BYTE, srcReg, 12);
+ }
+ else
+ {
+ genSIMDZero(targetType, TYP_FLOAT, targetReg);
+ inst_RV_RV(ins_Store(type), targetReg, srcReg);
+ }
+ break;
- case SMT_ZeroInitUpper_SrcHasUpperZeros:
- if (srcReg != targetReg)
- {
- inst_RV_RV(ins_Copy(type), targetReg, srcReg, targetType, emitTypeSize(targetType));
- }
- break;
+ case SMT_ZeroInitUpper_SrcHasUpperZeros:
+ if (srcReg != targetReg)
+ {
+ inst_RV_RV(ins_Copy(type), targetReg, srcReg, targetType, emitTypeSize(targetType));
+ }
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
}
-void
-CodeGen::genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg)
+void CodeGen::genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg)
{
// pxor reg, reg
instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicBitwiseXor, baseType);
@@ -572,18 +585,17 @@ CodeGen::genSIMDZero(var_types targetType, var_types baseType, regNumber targetR
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInit);
- GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ GenTree* op1 = simdNode->gtGetOp1();
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
- var_types targetType = simdNode->TypeGet();
- InstructionSet iset = compiler->getSIMDInstructionSet();
- unsigned size = simdNode->gtSIMDSize;
+ var_types targetType = simdNode->TypeGet();
+ InstructionSet iset = compiler->getSIMDInstructionSet();
+ unsigned size = simdNode->gtSIMDSize;
// Should never see small int base type vectors except for zero initialization.
noway_assert(!varTypeIsSmallInt(baseType) || op1->IsIntegralConst(0));
@@ -592,7 +604,7 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
if (op1->isContained())
{
if (op1->IsIntegralConst(0) || op1->IsFPZero())
- {
+ {
genSIMDZero(targetType, baseType, targetReg);
}
else if (varTypeIsIntegral(baseType) && op1->IsIntegralConst(-1))
@@ -614,7 +626,8 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
else if (op1->OperIsLocalAddr())
{
unsigned offset = (op1->OperGet() == GT_LCL_FLD_ADDR) ? op1->gtLclFld.gtLclOffs : 0;
- getEmitter()->emitIns_R_S(ins, emitTypeSize(targetType), targetReg, op1->gtLclVarCommon.gtLclNum, offset);
+ getEmitter()->emitIns_R_S(ins, emitTypeSize(targetType), targetReg, op1->gtLclVarCommon.gtLclNum,
+ offset);
}
else
{
@@ -626,8 +639,7 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
else if (iset == InstructionSet_AVX && ((size == 32) || (size == 16)))
{
regNumber srcReg = genConsumeReg(op1);
- if (baseType == TYP_INT || baseType == TYP_UINT ||
- baseType == TYP_LONG || baseType == TYP_ULONG)
+ if (baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG)
{
ins = ins_CopyIntToFloat(baseType, TYP_FLOAT);
assert(ins != INS_invalid);
@@ -643,26 +655,25 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
// If we reach here, op1 is not contained and we are using SSE or it is a SubRegisterSIMDType.
// In either case we are going to use the SSE2 shuffle instruction.
- regNumber op1Reg = genConsumeReg(op1);
- unsigned shuffleControl = 0;
+ regNumber op1Reg = genConsumeReg(op1);
+ unsigned shuffleControl = 0;
if (compiler->isSubRegisterSIMDType(simdNode))
{
- assert(baseType == TYP_FLOAT);
+ assert(baseType == TYP_FLOAT);
// We cannot assume that upper bits of op1Reg or targetReg be zero.
// Therefore we need to explicitly zero out upper bits. This is
// essential for the shuffle operation performed below.
//
- // If op1 is a float/double constant, we would have loaded it from
+ // If op1 is a float/double constant, we would have loaded it from
// data section using movss/sd. Similarly if op1 is a memory op we
// would have loaded it using movss/sd. Movss/sd when loading a xmm reg
// from memory would zero-out upper bits. In these cases we can
// avoid explicitly zero'ing out targetReg if targetReg and op1Reg are the same or do it more efficiently
// if they are not the same.
- SIMDScalarMoveType moveType = op1->IsCnsFltOrDbl() || op1->isMemoryOp()
- ? SMT_ZeroInitUpper_SrcHasUpperZeros
- : SMT_ZeroInitUpper;
+ SIMDScalarMoveType moveType =
+ op1->IsCnsFltOrDbl() || op1->isMemoryOp() ? SMT_ZeroInitUpper_SrcHasUpperZeros : SMT_ZeroInitUpper;
genSIMDScalarMove(TYP_FLOAT, targetReg, op1Reg, moveType);
@@ -675,20 +686,19 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
shuffleControl = 0x40;
}
else
- {
+ {
noway_assert(!"Unexpected size for SIMD type");
}
}
else // Vector<T>
- {
+ {
if (op1Reg != targetReg)
- {
+ {
if (varTypeIsFloating(baseType))
{
ins = ins_Copy(targetType);
}
- else if (baseType == TYP_INT || baseType == TYP_UINT ||
- baseType == TYP_LONG || baseType == TYP_ULONG)
+ else if (baseType == TYP_INT || baseType == TYP_UINT || baseType == TYP_LONG || baseType == TYP_ULONG)
{
ins = ins_CopyIntToFloat(baseType, TYP_FLOAT);
}
@@ -715,10 +725,9 @@ CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
{
- assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInitN);
+ assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInitN);
// Right now this intrinsic is supported only on TYP_FLOAT vectors
var_types baseType = simdNode->gtSIMDBaseType;
@@ -729,8 +738,8 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
var_types targetType = simdNode->TypeGet();
- // Note that we cannot use targetReg before consumed all source operands. Therefore,
- // Need an internal register to stitch together all the values into a single vector
+ // Note that we cannot use targetReg before consumed all source operands. Therefore,
+ // Need an internal register to stitch together all the values into a single vector
// in an XMM reg.
assert(simdNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(simdNode->gtRsvdRegs) == 1);
@@ -744,19 +753,19 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
}
unsigned int baseTypeSize = genTypeSize(baseType);
- instruction insLeftShift = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, baseType);
+ instruction insLeftShift = getOpForSIMDIntrinsic(SIMDIntrinsicShiftLeftInternal, baseType);
// We will first consume the list items in execution (left to right) order,
// and record the registers.
regNumber operandRegs[SIMD_INTRINSIC_MAX_PARAM_COUNT];
- unsigned initCount = 0;
+ unsigned initCount = 0;
for (GenTree* list = simdNode->gtGetOp1(); list != nullptr; list = list->gtGetOp2())
{
assert(list->OperGet() == GT_LIST);
GenTree* listItem = list->gtGetOp1();
assert(listItem->TypeGet() == baseType);
assert(!listItem->isContained());
- regNumber operandReg = genConsumeReg(listItem);
+ regNumber operandReg = genConsumeReg(listItem);
operandRegs[initCount] = operandReg;
initCount++;
}
@@ -773,7 +782,7 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
regNumber operandReg = operandRegs[initCount - i - 1];
if (offset != 0)
- {
+ {
getEmitter()->emitIns_R_I(insLeftShift, EA_16BYTE, vectorReg, baseTypeSize);
}
genSIMDScalarMove(baseType, vectorReg, operandReg, SMT_PreserveUpper);
@@ -782,7 +791,7 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
}
noway_assert(offset == simdNode->gtSIMDSize);
-
+
// Load the initialized value.
if (targetReg != vectorReg)
{
@@ -800,19 +809,18 @@ CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicSqrt || simdNode->gtSIMDIntrinsicID == SIMDIntrinsicCast);
- GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ GenTree* op1 = simdNode->gtGetOp1();
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
- regNumber op1Reg = genConsumeReg(op1);
- instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
+ regNumber op1Reg = genConsumeReg(op1);
+ instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
if (simdNode->gtSIMDIntrinsicID != SIMDIntrinsicCast || targetReg != op1Reg)
{
inst_RV_RV(ins, targetReg, op1Reg, targetType, emitActualTypeSize(targetType));
@@ -830,41 +838,34 @@ CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
{
- assert( simdNode->gtSIMDIntrinsicID == SIMDIntrinsicAdd ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicSub ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMul ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicDiv ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseAnd ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseAndNot ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseOr ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseXor ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMin ||
- simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMax
- );
-
- GenTree* op1 = simdNode->gtGetOp1();
- GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicAdd || simdNode->gtSIMDIntrinsicID == SIMDIntrinsicSub ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMul || simdNode->gtSIMDIntrinsicID == SIMDIntrinsicDiv ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseAnd ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseAndNot ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseOr ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicBitwiseXor || simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMin ||
+ simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMax);
+
+ GenTree* op1 = simdNode->gtGetOp1();
+ GenTree* op2 = simdNode->gtGetOp2();
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
- var_types targetType = simdNode->TypeGet();
- InstructionSet iset = compiler->getSIMDInstructionSet();
+ var_types targetType = simdNode->TypeGet();
+ InstructionSet iset = compiler->getSIMDInstructionSet();
genConsumeOperands(simdNode);
- regNumber op1Reg = op1->gtRegNum;
- regNumber op2Reg = op2->gtRegNum;
+ regNumber op1Reg = op1->gtRegNum;
+ regNumber op2Reg = op2->gtRegNum;
regNumber otherReg = op2Reg;
// Vector<Int>.Mul:
// SSE2 doesn't have an instruction to perform this operation directly
// whereas SSE4.1 does (pmulld). This is special cased and computed
// as follows.
- if (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMul &&
- baseType == TYP_INT &&
- iset == InstructionSet_SSE2)
+ if (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicMul && baseType == TYP_INT && iset == InstructionSet_SSE2)
{
// We need a temporary register that is NOT the same as the target,
// and we MAY need another.
@@ -874,14 +875,13 @@ CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
regMaskTP tmpRegsMask = simdNode->gtRsvdRegs;
regMaskTP tmpReg1Mask = genFindLowestBit(tmpRegsMask);
tmpRegsMask &= ~tmpReg1Mask;
- regNumber tmpReg = genRegNumFromMask(tmpReg1Mask);
+ regNumber tmpReg = genRegNumFromMask(tmpReg1Mask);
regNumber tmpReg2 = genRegNumFromMask(tmpRegsMask);
// The register allocator guarantees the following conditions:
// - the only registers that may be the same among op1Reg, op2Reg, tmpReg
// and tmpReg2 are op1Reg and op2Reg.
// Let's be extra-careful and assert that now.
- assert((op1Reg != tmpReg) && (op1Reg != tmpReg2) &&
- (op2Reg != tmpReg) && (op2Reg != tmpReg2) &&
+ assert((op1Reg != tmpReg) && (op1Reg != tmpReg2) && (op2Reg != tmpReg) && (op2Reg != tmpReg2) &&
(tmpReg != tmpReg2));
// We will start by setting things up so that:
@@ -956,8 +956,8 @@ CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
getEmitter()->emitIns_R_I(INS_psrldq, emitActualTypeSize(targetType), tmpReg, 4);
// tmp = unsigned double word multiply of targetReg and tmpReg. Essentially
- // tmpReg[63:0] = op1[1] * op2[1]
- // tmpReg[127:64] = op1[3] * op2[3]
+ // tmpReg[63:0] = op1[1] * op2[1]
+ // tmpReg[127:64] = op1[3] * op2[3]
inst_RV_RV(INS_pmuludq, tmpReg, targetReg, targetType, emitActualTypeSize(targetType));
// Extract first and third double word results from tmpReg
@@ -965,7 +965,7 @@ CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(targetType), tmpReg, tmpReg, 0x08);
// targetReg[63:0] = op1[0] * op2[0]
- // targetReg[127:64] = op1[2] * op2[2]
+ // targetReg[127:64] = op1[2] * op2[2]
inst_RV_RV(INS_movaps, targetReg, op1Reg, targetType, emitActualTypeSize(targetType));
inst_RV_RV(INS_pmuludq, targetReg, op2Reg, targetType, emitActualTypeSize(targetType));
@@ -980,11 +980,9 @@ CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
{
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
- //Currently AVX doesn't support integer.
- //if the ins is INS_cvtsi2ss or INS_cvtsi2sd, we won't use AVX.
- if (op1Reg != targetReg &&
- compiler->canUseAVX() &&
- !(ins == INS_cvtsi2ss || ins == INS_cvtsi2sd) &&
+ // Currently AVX doesn't support integer.
+ // if the ins is INS_cvtsi2ss or INS_cvtsi2sd, we won't use AVX.
+ if (op1Reg != targetReg && compiler->canUseAVX() && !(ins == INS_cvtsi2ss || ins == INS_cvtsi2sd) &&
getEmitter()->IsThreeOperandAVXInstruction(ins))
{
inst_RV_RV_RV(ins, targetReg, op1Reg, op2Reg, emitActualTypeSize(targetType));
@@ -1032,30 +1030,28 @@ CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
{
- GenTree* op1 = simdNode->gtGetOp1();
- GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ GenTree* op1 = simdNode->gtGetOp1();
+ GenTree* op2 = simdNode->gtGetOp2();
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
- var_types targetType = simdNode->TypeGet();
- InstructionSet iset = compiler->getSIMDInstructionSet();
+ var_types targetType = simdNode->TypeGet();
+ InstructionSet iset = compiler->getSIMDInstructionSet();
genConsumeOperands(simdNode);
- regNumber op1Reg = op1->gtRegNum;
- regNumber op2Reg = op2->gtRegNum;
+ regNumber op1Reg = op1->gtRegNum;
+ regNumber op2Reg = op2->gtRegNum;
regNumber otherReg = op2Reg;
- switch(simdNode->gtSIMDIntrinsicID)
+ switch (simdNode->gtSIMDIntrinsicID)
{
- case SIMDIntrinsicEqual:
- case SIMDIntrinsicGreaterThan:
+ case SIMDIntrinsicEqual:
+ case SIMDIntrinsicGreaterThan:
{
// SSE2: vector<(u)long> relation op should be implemented in terms of TYP_INT comparison operations
- assert(((iset == InstructionSet_AVX) || (baseType != TYP_LONG)) &&
- (baseType != TYP_ULONG));
+ assert(((iset == InstructionSet_AVX) || (baseType != TYP_LONG)) && (baseType != TYP_ULONG));
// Greater-than: Floating point vectors use "<" with swapped operands
if (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicGreaterThan)
@@ -1063,8 +1059,8 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
assert(!varTypeIsFloating(baseType));
}
- unsigned ival = 0;
- instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType, &ival);
+ unsigned ival = 0;
+ instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType, &ival);
// targetReg = op1reg > op2reg
// Therefore, we can optimize if op1Reg == targetReg
@@ -1093,14 +1089,14 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
}
break;
- case SIMDIntrinsicLessThan:
- case SIMDIntrinsicLessThanOrEqual:
+ case SIMDIntrinsicLessThan:
+ case SIMDIntrinsicLessThanOrEqual:
{
// Int vectors use ">" and ">=" with swapped operands
assert(varTypeIsFloating(baseType));
// Get the instruction opcode for compare operation
- unsigned ival;
+ unsigned ival;
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType, &ival);
// targetReg = op1reg RelOp op2reg
@@ -1114,9 +1110,9 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
}
break;
- // (In)Equality that produces bool result instead of a bit vector
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
+ // (In)Equality that produces bool result instead of a bit vector
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
{
assert(genIsValidIntReg(targetReg));
@@ -1127,12 +1123,14 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
regMaskTP tmpRegsMask = simdNode->gtRsvdRegs;
regMaskTP tmpReg1Mask = genFindLowestBit(tmpRegsMask);
tmpRegsMask &= ~tmpReg1Mask;
- regNumber tmpReg1 = genRegNumFromMask(tmpReg1Mask);
- regNumber tmpReg2 = genRegNumFromMask(tmpRegsMask);
+ regNumber tmpReg1 = genRegNumFromMask(tmpReg1Mask);
+ regNumber tmpReg2 = genRegNumFromMask(tmpRegsMask);
var_types simdType = op1->TypeGet();
// TODO-1stClassStructs: Temporary to minimize asmDiffs
if (simdType == TYP_DOUBLE)
+ {
simdType = TYP_SIMD8;
+ }
// Here we should consider TYP_SIMD12 operands as if they were TYP_SIMD16
// since both the operands will be in XMM registers.
@@ -1157,8 +1155,9 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
}
// For all integer types we can use TYP_INT comparison.
- unsigned ival = 0;
- instruction ins = getOpForSIMDIntrinsic(SIMDIntrinsicEqual, varTypeIsFloating(baseType) ? baseType : TYP_INT, &ival);
+ unsigned ival = 0;
+ instruction ins =
+ getOpForSIMDIntrinsic(SIMDIntrinsicEqual, varTypeIsFloating(baseType) ? baseType : TYP_INT, &ival);
if (varTypeIsFloating(baseType))
{
@@ -1168,7 +1167,7 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
{
inst_RV_RV(ins, tmpReg1, otherReg, simdType, emitActualTypeSize(simdType));
}
-
+
// If we have 32 bytes, start by anding the two 16-byte halves to get a 16-byte result.
if (compiler->canUseAVX() && (simdType == TYP_SIMD32))
{
@@ -1179,7 +1178,7 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
// tmpReg2[128..255] <- 0
// tmpReg2[0..127] <- tmpReg1[128..255]
// - vandps tmpReg1, tempReg2
- // This will zero-out upper portion of tmpReg1 and
+ // This will zero-out upper portion of tmpReg1 and
// lower portion of tmpReg1 is and of upper and lower 128-bit comparison result.
getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, tmpReg2, tmpReg1, 0x01);
inst_RV_RV(INS_andps, tmpReg1, tmpReg2, simdType, emitActualTypeSize(simdType));
@@ -1225,22 +1224,22 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
// movzx targetReg, targetReg
//
getEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, targetReg, 0xFFFFFFFF);
- inst_RV((simdNode->gtSIMDIntrinsicID == SIMDIntrinsicOpEquality) ? INS_sete : INS_setne, targetReg, TYP_INT, EA_1BYTE);
+ inst_RV((simdNode->gtSIMDIntrinsicID == SIMDIntrinsicOpEquality) ? INS_sete : INS_setne, targetReg, TYP_INT,
+ EA_1BYTE);
assert(simdNode->TypeGet() == TYP_INT);
// Set the higher bytes to 0
inst_RV_RV(ins_Move_Extend(TYP_UBYTE, true), targetReg, targetReg, TYP_UBYTE, emitTypeSize(TYP_UBYTE));
}
break;
- default:
- noway_assert(!"Unimplemented SIMD relational operation.");
- unreached();
+ default:
+ noway_assert(!"Unimplemented SIMD relational operation.");
+ unreached();
}
genProduceReg(simdNode);
}
-
//--------------------------------------------------------------------------------
// genSIMDIntrinsicDotProduct: Generate code for SIMD Intrinsic Dot Product.
//
@@ -1250,20 +1249,21 @@ CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicDotProduct);
- GenTree* op1 = simdNode->gtGetOp1();
- GenTree* op2 = simdNode->gtGetOp2();
+ GenTree* op1 = simdNode->gtGetOp1();
+ GenTree* op2 = simdNode->gtGetOp2();
var_types baseType = simdNode->gtSIMDBaseType;
var_types simdType = op1->TypeGet();
// TODO-1stClassStructs: Temporary to minimize asmDiffs
if (simdType == TYP_DOUBLE)
+ {
simdType = TYP_SIMD8;
+ }
var_types simdEvalType = (simdType == TYP_SIMD12) ? TYP_SIMD16 : simdType;
- regNumber targetReg = simdNode->gtRegNum;
+ regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
// DotProduct is only supported on floating point types.
@@ -1295,7 +1295,7 @@ CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
{
tmpReg = tmpReg1;
}
- else
+ else
{
assert(targetReg != tmpReg2);
tmpReg = tmpReg2;
@@ -1306,7 +1306,7 @@ CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
if (compiler->getSIMDInstructionSet() == InstructionSet_SSE2)
{
- // We avoid reg move if either op1Reg == targetReg or op2Reg == targetReg
+ // We avoid reg move if either op1Reg == targetReg or op2Reg == targetReg
if (op1Reg == targetReg)
{
// Best case
@@ -1330,7 +1330,7 @@ CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
// // position
// tmp = shuffle(tmp, tmp, Shuffle(2,3,0,1)) // tmp = (2, 3, 0, 1)
// v0 = v0 + tmp // v0 = (3+2, 2+3, 1+0, 0+1)
- // tmp = v0
+ // tmp = v0
// tmp = shuffle(tmp, tmp, Shuffle(0,1,2,3)) // tmp = (0+1, 1+0, 2+3, 3+2)
// v0 = v0 + tmp // v0 = (0+1+2+3, 0+1+2+3, 0+1+2+3, 0+1+2+3)
// // Essentially horizontal addtion of all elements.
@@ -1427,24 +1427,23 @@ CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicGetItem);
- GenTree* op1 = simdNode->gtGetOp1();
- GenTree* op2 = simdNode->gtGetOp2();
- var_types simdType = op1->TypeGet();
+ GenTree* op1 = simdNode->gtGetOp1();
+ GenTree* op2 = simdNode->gtGetOp2();
+ var_types simdType = op1->TypeGet();
assert(varTypeIsSIMD(simdType));
- // op1 of TYP_SIMD12 should be considered as TYP_SIMD16,
+ // op1 of TYP_SIMD12 should be considered as TYP_SIMD16,
// since it is in XMM register.
if (simdType == TYP_SIMD12)
{
simdType = TYP_SIMD16;
}
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -1455,7 +1454,7 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
// - the index of the value to be returned.
genConsumeOperands(simdNode);
regNumber srcReg = op1->gtRegNum;
-
+
// SSE2 doesn't have an instruction to implement this intrinsic if the index is not a constant.
// For the non-constant case, we will use the SIMD temp location to store the vector, and
// the load the desired element.
@@ -1465,28 +1464,28 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
{
unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
noway_assert(simdInitTempVarNum != BAD_VAR_NUM);
- bool isEBPbased;
- unsigned offs = compiler->lvaFrameAddress(simdInitTempVarNum, &isEBPbased);
+ bool isEBPbased;
+ unsigned offs = compiler->lvaFrameAddress(simdInitTempVarNum, &isEBPbased);
regNumber indexReg = op2->gtRegNum;
// Store the vector to the temp location.
- getEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
- emitTypeSize(simdType), srcReg, simdInitTempVarNum, 0);
+ getEmitter()->emitIns_S_R(ins_Store(simdType, compiler->isSIMDTypeLocalAligned(simdInitTempVarNum)),
+ emitTypeSize(simdType), srcReg, simdInitTempVarNum, 0);
// Now, load the desired element.
- getEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
- emitTypeSize(baseType), // Of the vector baseType
- targetReg, // To targetReg
- (isEBPbased) ? REG_EBP : REG_ESP, // Stack-based
- indexReg, // Indexed
- genTypeSize(baseType), // by the size of the baseType
+ getEmitter()->emitIns_R_ARX(ins_Move_Extend(baseType, false), // Load
+ emitTypeSize(baseType), // Of the vector baseType
+ targetReg, // To targetReg
+ (isEBPbased) ? REG_EBP : REG_ESP, // Stack-based
+ indexReg, // Indexed
+ genTypeSize(baseType), // by the size of the baseType
offs);
genProduceReg(simdNode);
return;
}
noway_assert(op2->isContained());
- unsigned int index = (unsigned int) op2->gtIntCon.gtIconVal;
+ unsigned int index = (unsigned int)op2->gtIntCon.gtIconVal;
unsigned int byteShiftCnt = index * genTypeSize(baseType);
// In general we shouldn't have an index greater than or equal to the length of the vector.
@@ -1507,8 +1506,7 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
}
else
{
- assert((byteShiftCnt == 0) ||
- varTypeIsFloating(baseType) ||
+ assert((byteShiftCnt == 0) || varTypeIsFloating(baseType) ||
(varTypeIsSmallInt(baseType) && (byteShiftCnt < 16)));
}
@@ -1536,7 +1534,7 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
// 1) baseType is floating point
// movaps targetReg, srcReg
// psrldq targetReg, byteShiftCnt <-- not generated if accessing zero'th element
- //
+ //
// 2) baseType is not floating point
// movaps tmpReg, srcReg <-- not generated if accessing zero'th element
// OR if tmpReg == srcReg
@@ -1555,8 +1553,8 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
getEmitter()->emitIns_R_I(ins, emitActualTypeSize(simdType), targetReg, byteShiftCnt);
}
}
- else
- {
+ else
+ {
if (varTypeIsSmallInt(baseType))
{
// Note that pextrw extracts 16-bit value by index and zero extends it to 32-bits.
@@ -1582,7 +1580,7 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
bool ZeroOrSignExtnReqd = true;
if (baseSize == 1)
- {
+ {
if ((op2->gtIntCon.gtIconVal % 2) == 1)
{
// Right shift extracted word by 8-bits if index is odd if we are extracting a byte sized element.
@@ -1593,13 +1591,13 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
}
// else - we just need to zero/sign extend the byte since pextrw extracted 16-bits
}
- else
+ else
{
// Since Pextrw zero extends to 32-bits, we need sign extension in case of TYP_SHORT
assert(baseSize == 2);
ZeroOrSignExtnReqd = (baseType == TYP_SHORT);
}
-
+
if (ZeroOrSignExtnReqd)
{
// Zero/sign extend the byte/short to 32-bits
@@ -1609,7 +1607,7 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
else
{
// We need a temp xmm register if the baseType is not floating point and
- // accessing non-zero'th element.
+ // accessing non-zero'th element.
instruction ins;
if (byteShiftCnt != 0)
@@ -1650,28 +1648,27 @@ CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
//
// TODO-CQ: Use SIMDIntrinsicShuffleSSE2 for the SSE2 case.
//
-void
-CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
{
// Determine index based on intrinsic ID
int index = -1;
- switch(simdNode->gtSIMDIntrinsicID)
+ switch (simdNode->gtSIMDIntrinsicID)
{
- case SIMDIntrinsicSetX:
- index = 0;
- break;
- case SIMDIntrinsicSetY:
- index = 1;
- break;
- case SIMDIntrinsicSetZ:
- index = 2;
- break;
- case SIMDIntrinsicSetW:
- index = 3;
- break;
+ case SIMDIntrinsicSetX:
+ index = 0;
+ break;
+ case SIMDIntrinsicSetY:
+ index = 1;
+ break;
+ case SIMDIntrinsicSetZ:
+ index = 2;
+ break;
+ case SIMDIntrinsicSetW:
+ index = 3;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
assert(index != -1);
@@ -1680,7 +1677,7 @@ CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->gtSIMDBaseType;
regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -1708,7 +1705,7 @@ CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
noway_assert(baseType == TYP_FLOAT);
if (compiler->getSIMDInstructionSet() == InstructionSet_SSE2)
- {
+ {
// We need one additional int register as scratch
assert(simdNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(simdNode->gtRsvdRegs) == 1);
@@ -1722,15 +1719,15 @@ CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
// First insert the lower 16-bits of tmpReg in targetReg at 2*index position
// since every float has two 16-bit words.
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2*index);
+ getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index);
// Logical right shift tmpReg by 16-bits and insert in targetReg at 2*index + 1 position
inst_RV_SH(INS_SHIFT_RIGHT_LOGICAL, EA_4BYTE, tmpReg, 16);
- getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2*index+1);
+ getEmitter()->emitIns_R_R_I(INS_pinsrw, emitTypeSize(TYP_INT), targetReg, tmpReg, 2 * index + 1);
}
else
{
- unsigned int insertpsImm = (INSERTPS_SOURCE_SELECT(0)|INSERTPS_TARGET_SELECT(index));
+ unsigned int insertpsImm = (INSERTPS_SOURCE_SELECT(0) | INSERTPS_TARGET_SELECT(index));
inst_RV_RV_IV(INS_insertps, EA_16BYTE, targetReg, op2Reg, insertpsImm);
}
@@ -1746,8 +1743,7 @@ CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
// Return Value:
// None.
//
-void
-CodeGen::genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicShuffleSSE2);
noway_assert(compiler->getSIMDInstructionSet() == InstructionSet_SSE2);
@@ -1756,10 +1752,10 @@ CodeGen::genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode)
GenTree* op2 = simdNode->gtGetOp2();
assert(op2->isContained());
assert(op2->IsCnsIntOrI());
- int shuffleControl = (int) op2->AsIntConCommon()->IconValue();
- var_types baseType = simdNode->gtSIMDBaseType;
- var_types targetType = simdNode->TypeGet();
- regNumber targetReg = simdNode->gtRegNum;
+ int shuffleControl = (int)op2->AsIntConCommon()->IconValue();
+ var_types baseType = simdNode->gtSIMDBaseType;
+ var_types targetType = simdNode->TypeGet();
+ regNumber targetReg = simdNode->gtRegNum;
assert(targetReg != REG_NA);
regNumber op1Reg = genConsumeReg(op1);
@@ -1780,19 +1776,18 @@ CodeGen::genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode)
//
// Arguments:
// treeNode - tree node that is attempting to store indirect
-//
+//
//
// Return Value:
// None.
//
-void
-CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode)
+void CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_STOREIND);
GenTree* addr = treeNode->gtOp.gtOp1;
GenTree* data = treeNode->gtOp.gtOp2;
-
+
// addr and data should not be contained.
assert(!data->isContained());
assert(!addr->isContained());
@@ -1827,18 +1822,17 @@ CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode)
//
// Arguments:
// treeNode - tree node of GT_IND
-//
+//
//
// Return Value:
// None.
//
-void
-CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
+void CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_IND);
- regNumber targetReg = treeNode->gtRegNum;
- GenTreePtr op1 = treeNode->gtOp.gtOp1;
+ regNumber targetReg = treeNode->gtRegNum;
+ GenTreePtr op1 = treeNode->gtOp.gtOp1;
assert(!op1->isContained());
regNumber operandReg = genConsumeReg(op1);
@@ -1846,7 +1840,7 @@ CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
assert(treeNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(treeNode->gtRsvdRegs) == 2);
- regNumber tmpReg = REG_NA;
+ regNumber tmpReg = REG_NA;
regMaskTP tmpRegsMask = treeNode->gtRsvdRegs;
regMaskTP tmpReg1Mask = genFindLowestBit(tmpRegsMask);
tmpRegsMask &= ~tmpReg1Mask;
@@ -1858,7 +1852,7 @@ CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
{
tmpReg = tmpReg1;
}
- else
+ else
{
assert(targetReg != tmpReg2);
tmpReg = tmpReg2;
@@ -1885,16 +1879,15 @@ CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
//
// Arguments:
// treeNode - tree node that is attempting to store TYP_SIMD12 field
-//
+//
// Return Value:
// None.
//
-void
-CodeGen::genStoreLclFldTypeSIMD12(GenTree* treeNode)
+void CodeGen::genStoreLclFldTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_STORE_LCL_FLD);
- unsigned offs = treeNode->gtLclFld.gtLclOffs;
+ unsigned offs = treeNode->gtLclFld.gtLclOffs;
unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
assert(varNum < compiler->lvaCount);
@@ -1914,7 +1907,7 @@ CodeGen::genStoreLclFldTypeSIMD12(GenTree* treeNode)
getEmitter()->emitIns_R_R_I(INS_pshufd, emitActualTypeSize(TYP_SIMD16), tmpReg, operandReg, 0x02);
// Store upper 4 bytes
- getEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, varNum, offs+8);
+ getEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, tmpReg, varNum, offs + 8);
}
//-----------------------------------------------------------------------------
@@ -1924,25 +1917,24 @@ CodeGen::genStoreLclFldTypeSIMD12(GenTree* treeNode)
//
// Arguments:
// treeNode - tree node that is attempting to load TYP_SIMD12 field
-//
+//
// Return Value:
// None.
//
-void
-CodeGen::genLoadLclFldTypeSIMD12(GenTree* treeNode)
+void CodeGen::genLoadLclFldTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_LCL_FLD);
- regNumber targetReg = treeNode->gtRegNum;
- unsigned offs = treeNode->gtLclFld.gtLclOffs;
- unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
+ regNumber targetReg = treeNode->gtRegNum;
+ unsigned offs = treeNode->gtLclFld.gtLclOffs;
+ unsigned varNum = treeNode->gtLclVarCommon.gtLclNum;
assert(varNum < compiler->lvaCount);
// Need an addtional Xmm register to read upper 4 bytes
assert(treeNode->gtRsvdRegs != RBM_NONE);
assert(genCountBits(treeNode->gtRsvdRegs) == 2);
- regNumber tmpReg = REG_NA;
+ regNumber tmpReg = REG_NA;
regMaskTP tmpRegsMask = treeNode->gtRsvdRegs;
regMaskTP tmpReg1Mask = genFindLowestBit(tmpRegsMask);
tmpRegsMask &= ~tmpReg1Mask;
@@ -1954,7 +1946,7 @@ CodeGen::genLoadLclFldTypeSIMD12(GenTree* treeNode)
{
tmpReg = tmpReg1;
}
- else
+ else
{
assert(targetReg != tmpReg2);
tmpReg = tmpReg2;
@@ -1963,9 +1955,9 @@ CodeGen::genLoadLclFldTypeSIMD12(GenTree* treeNode)
assert(tmpReg != targetReg);
// Read upper 4 bytes to tmpReg
- getEmitter()->emitIns_R_S(ins_Move_Extend(TYP_FLOAT, false), EA_4BYTE, tmpReg, varNum, offs+8);
+ getEmitter()->emitIns_R_S(ins_Move_Extend(TYP_FLOAT, false), EA_4BYTE, tmpReg, varNum, offs + 8);
- // Read lower 8 bytes to targetReg
+ // Read lower 8 bytes to targetReg
getEmitter()->emitIns_R_S(ins_Move_Extend(TYP_DOUBLE, false), EA_8BYTE, targetReg, varNum, offs);
// combine upper 4 bytes and lower 8 bytes in targetReg
@@ -1993,15 +1985,14 @@ CodeGen::genLoadLclFldTypeSIMD12(GenTree* treeNode)
// value to the stack. (Note that if there are no caller-save registers available, the entire 32 byte
// value will be spilled to the stack.)
//
-void
-CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicUpperSave);
GenTree* op1 = simdNode->gtGetOp1();
assert(op1->IsLocal() && op1->TypeGet() == TYP_SIMD32);
regNumber targetReg = simdNode->gtRegNum;
- regNumber op1Reg = genConsumeReg(op1);
+ regNumber op1Reg = genConsumeReg(op1);
assert(op1Reg != REG_NA);
assert(targetReg != REG_NA);
getEmitter()->emitIns_R_R_I(INS_vextractf128, EA_32BYTE, targetReg, op1Reg, 0x01);
@@ -2030,16 +2021,15 @@ CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode)
// spilled tree (saveNode) in order to perform the reload. We can easily find that tree,
// as it is in the spill descriptor for the register from which it was saved.
//
-void
-CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
{
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicUpperRestore);
GenTree* op1 = simdNode->gtGetOp1();
assert(op1->IsLocal() && op1->TypeGet() == TYP_SIMD32);
- regNumber srcReg = simdNode->gtRegNum;
+ regNumber srcReg = simdNode->gtRegNum;
regNumber lclVarReg = genConsumeReg(op1);
- unsigned varNum = op1->AsLclVarCommon()->gtLclNum;
+ unsigned varNum = op1->AsLclVarCommon()->gtLclNum;
assert(lclVarReg != REG_NA);
assert(srcReg != REG_NA);
if (simdNode->gtFlags & GTF_SPILLED)
@@ -2065,92 +2055,85 @@ CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
// Currently, we only recognize SIMDVector<float> and SIMDVector<int>, and
// a limited set of methods.
//
-void
-CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
+void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
{
// NYI for unsupported base types
- if (simdNode->gtSIMDBaseType != TYP_INT &&
- simdNode->gtSIMDBaseType != TYP_LONG &&
- simdNode->gtSIMDBaseType != TYP_FLOAT &&
- simdNode->gtSIMDBaseType != TYP_DOUBLE &&
- simdNode->gtSIMDBaseType != TYP_CHAR &&
- simdNode->gtSIMDBaseType != TYP_UBYTE &&
- simdNode->gtSIMDBaseType != TYP_SHORT &&
- simdNode->gtSIMDBaseType != TYP_BYTE &&
- simdNode->gtSIMDBaseType != TYP_UINT &&
- simdNode->gtSIMDBaseType != TYP_ULONG
- )
+ if (simdNode->gtSIMDBaseType != TYP_INT && simdNode->gtSIMDBaseType != TYP_LONG &&
+ simdNode->gtSIMDBaseType != TYP_FLOAT && simdNode->gtSIMDBaseType != TYP_DOUBLE &&
+ simdNode->gtSIMDBaseType != TYP_CHAR && simdNode->gtSIMDBaseType != TYP_UBYTE &&
+ simdNode->gtSIMDBaseType != TYP_SHORT && simdNode->gtSIMDBaseType != TYP_BYTE &&
+ simdNode->gtSIMDBaseType != TYP_UINT && simdNode->gtSIMDBaseType != TYP_ULONG)
{
noway_assert(!"SIMD intrinsic with unsupported base type.");
}
- switch(simdNode->gtSIMDIntrinsicID)
+ switch (simdNode->gtSIMDIntrinsicID)
{
- case SIMDIntrinsicInit:
- genSIMDIntrinsicInit(simdNode);
- break;
+ case SIMDIntrinsicInit:
+ genSIMDIntrinsicInit(simdNode);
+ break;
- case SIMDIntrinsicInitN:
- genSIMDIntrinsicInitN(simdNode);
- break;
+ case SIMDIntrinsicInitN:
+ genSIMDIntrinsicInitN(simdNode);
+ break;
- case SIMDIntrinsicSqrt:
- case SIMDIntrinsicCast:
- genSIMDIntrinsicUnOp(simdNode);
- break;
+ case SIMDIntrinsicSqrt:
+ case SIMDIntrinsicCast:
+ genSIMDIntrinsicUnOp(simdNode);
+ break;
- case SIMDIntrinsicAdd:
- case SIMDIntrinsicSub:
- case SIMDIntrinsicMul:
- case SIMDIntrinsicDiv:
- case SIMDIntrinsicBitwiseAnd:
- case SIMDIntrinsicBitwiseAndNot:
- case SIMDIntrinsicBitwiseOr:
- case SIMDIntrinsicBitwiseXor:
- case SIMDIntrinsicMin:
- case SIMDIntrinsicMax:
- genSIMDIntrinsicBinOp(simdNode);
- break;
+ case SIMDIntrinsicAdd:
+ case SIMDIntrinsicSub:
+ case SIMDIntrinsicMul:
+ case SIMDIntrinsicDiv:
+ case SIMDIntrinsicBitwiseAnd:
+ case SIMDIntrinsicBitwiseAndNot:
+ case SIMDIntrinsicBitwiseOr:
+ case SIMDIntrinsicBitwiseXor:
+ case SIMDIntrinsicMin:
+ case SIMDIntrinsicMax:
+ genSIMDIntrinsicBinOp(simdNode);
+ break;
- case SIMDIntrinsicOpEquality:
- case SIMDIntrinsicOpInEquality:
- case SIMDIntrinsicEqual:
- case SIMDIntrinsicLessThan:
- case SIMDIntrinsicGreaterThan:
- case SIMDIntrinsicLessThanOrEqual:
- case SIMDIntrinsicGreaterThanOrEqual:
- genSIMDIntrinsicRelOp(simdNode);
- break;
+ case SIMDIntrinsicOpEquality:
+ case SIMDIntrinsicOpInEquality:
+ case SIMDIntrinsicEqual:
+ case SIMDIntrinsicLessThan:
+ case SIMDIntrinsicGreaterThan:
+ case SIMDIntrinsicLessThanOrEqual:
+ case SIMDIntrinsicGreaterThanOrEqual:
+ genSIMDIntrinsicRelOp(simdNode);
+ break;
- case SIMDIntrinsicDotProduct:
- genSIMDIntrinsicDotProduct(simdNode);
- break;
+ case SIMDIntrinsicDotProduct:
+ genSIMDIntrinsicDotProduct(simdNode);
+ break;
- case SIMDIntrinsicGetItem:
- genSIMDIntrinsicGetItem(simdNode);
- break;
+ case SIMDIntrinsicGetItem:
+ genSIMDIntrinsicGetItem(simdNode);
+ break;
- case SIMDIntrinsicShuffleSSE2:
- genSIMDIntrinsicShuffleSSE2(simdNode);
- break;
+ case SIMDIntrinsicShuffleSSE2:
+ genSIMDIntrinsicShuffleSSE2(simdNode);
+ break;
- case SIMDIntrinsicSetX:
- case SIMDIntrinsicSetY:
- case SIMDIntrinsicSetZ:
- case SIMDIntrinsicSetW:
- genSIMDIntrinsicSetItem(simdNode);
- break;
+ case SIMDIntrinsicSetX:
+ case SIMDIntrinsicSetY:
+ case SIMDIntrinsicSetZ:
+ case SIMDIntrinsicSetW:
+ genSIMDIntrinsicSetItem(simdNode);
+ break;
- case SIMDIntrinsicUpperSave:
- genSIMDIntrinsicUpperSave(simdNode);
- break;
- case SIMDIntrinsicUpperRestore:
- genSIMDIntrinsicUpperRestore(simdNode);
- break;
+ case SIMDIntrinsicUpperSave:
+ genSIMDIntrinsicUpperSave(simdNode);
+ break;
+ case SIMDIntrinsicUpperRestore:
+ genSIMDIntrinsicUpperRestore(simdNode);
+ break;
- default:
- noway_assert(!"Unimplemented SIMD intrinsic.");
- unreached();
+ default:
+ noway_assert(!"Unimplemented SIMD intrinsic.");
+ unreached();
}
}
diff --git a/src/jit/sm.cpp b/src/jit/sm.cpp
index 0154c11b0c..859b238ec8 100644
--- a/src/jit/sm.cpp
+++ b/src/jit/sm.cpp
@@ -21,16 +21,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
//
// The array to map from EE opcodes (i.e. CEE_ ) to state machine opcodes (i.e. SM_ )
//
-const SM_OPCODE smOpcodeMap[] =
-{
- #define OPCODEMAP(eename,eestring,smname) smname,
- #include "smopcodemap.def"
- #undef OPCODEMAP
+const SM_OPCODE smOpcodeMap[] = {
+#define OPCODEMAP(eename, eestring, smname) smname,
+#include "smopcodemap.def"
+#undef OPCODEMAP
};
-
// ????????? How to make this method inlinable, since it refers to smOpcodeMap????
-/* static */ SM_OPCODE CodeSeqSM::MapToSMOpcode(OPCODE opcode)
+/* static */ SM_OPCODE CodeSeqSM::MapToSMOpcode(OPCODE opcode)
{
assert(opcode < CEE_COUNT);
@@ -39,29 +37,28 @@ const SM_OPCODE smOpcodeMap[] =
return smOpcode;
}
-void CodeSeqSM::Start(Compiler * comp)
-{
- pComp = comp;
- States = gp_SMStates;
- JumpTableCells = gp_SMJumpTableCells;
- StateWeights = gp_StateWeights;
- NativeSize = 0;
+void CodeSeqSM::Start(Compiler* comp)
+{
+ pComp = comp;
+ States = gp_SMStates;
+ JumpTableCells = gp_SMJumpTableCells;
+ StateWeights = gp_StateWeights;
+ NativeSize = 0;
- Reset();
+ Reset();
}
-void CodeSeqSM::Reset()
+void CodeSeqSM::Reset()
{
- curState = SM_STATE_ID_START;
+ curState = SM_STATE_ID_START;
#ifdef DEBUG
// Reset the state occurence counts
memset(StateMatchedCounts, 0, sizeof(StateMatchedCounts));
#endif
-
}
-void CodeSeqSM::End()
+void CodeSeqSM::End()
{
if (States[curState].term)
{
@@ -69,124 +66,125 @@ void CodeSeqSM::End()
}
}
-void CodeSeqSM::Run(SM_OPCODE opcode DEBUGARG(int level))
-{
+void CodeSeqSM::Run(SM_OPCODE opcode DEBUGARG(int level))
+{
SM_STATE_ID nextState;
SM_STATE_ID rollbackState;
- SM_OPCODE opcodesToRevisit[MAX_CODE_SEQUENCE_LENGTH];
+ SM_OPCODE opcodesToRevisit[MAX_CODE_SEQUENCE_LENGTH];
- assert(level<=MAX_CODE_SEQUENCE_LENGTH);
+ assert(level <= MAX_CODE_SEQUENCE_LENGTH);
_Next:
- nextState = GetDestState(curState, opcode);
+ nextState = GetDestState(curState, opcode);
if (nextState != 0)
{
// This is easy, Just go to the next state.
curState = nextState;
- return;
+ return;
}
- assert(curState != SM_STATE_ID_START);
-
+ assert(curState != SM_STATE_ID_START);
+
if (States[curState].term)
{
TermStateMatch(curState DEBUGARG(pComp->verbose));
curState = SM_STATE_ID_START;
goto _Next;
}
-
+
// This is hard. We need to rollback to the longest matched term state and restart from there.
rollbackState = States[curState].longestTermState;
TermStateMatch(rollbackState DEBUGARG(pComp->verbose));
assert(States[curState].length > States[rollbackState].length);
-
+
unsigned numOfOpcodesToRevisit = States[curState].length - States[rollbackState].length + 1;
- assert(numOfOpcodesToRevisit > 1 &&
+ assert(numOfOpcodesToRevisit > 1 &&
numOfOpcodesToRevisit <= MAX_CODE_SEQUENCE_LENGTH); // So it can fit in the local array opcodesToRevisit[]
- SM_OPCODE * p = opcodesToRevisit + (numOfOpcodesToRevisit - 1);
+ SM_OPCODE* p = opcodesToRevisit + (numOfOpcodesToRevisit - 1);
*p = opcode;
// Fill in the local array:
- for (unsigned i = 0; i<numOfOpcodesToRevisit-1; ++i)
+ for (unsigned i = 0; i < numOfOpcodesToRevisit - 1; ++i)
{
- * (--p) = States[curState].opc;
+ *(--p) = States[curState].opc;
curState = States[curState].prevState;
}
- assert(curState == rollbackState);
+ assert(curState == rollbackState);
// Now revisit these opcodes, starting from SM_STATE_ID_START.
curState = SM_STATE_ID_START;
- for (p = opcodesToRevisit; p< opcodesToRevisit + numOfOpcodesToRevisit; ++p)
+ for (p = opcodesToRevisit; p < opcodesToRevisit + numOfOpcodesToRevisit; ++p)
{
- Run(*p DEBUGARG(level+1));
- }
+ Run(*p DEBUGARG(level + 1));
+ }
}
-
-SM_STATE_ID CodeSeqSM::GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode)
+SM_STATE_ID CodeSeqSM::GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode)
{
assert(opcode < SM_COUNT);
-
- JumpTableCell * pThisJumpTable = (JumpTableCell * )(((PBYTE)JumpTableCells) + States[srcState].jumpTableByteOffset);
- JumpTableCell * cell = pThisJumpTable+opcode;
+ JumpTableCell* pThisJumpTable = (JumpTableCell*)(((PBYTE)JumpTableCells) + States[srcState].jumpTableByteOffset);
+
+ JumpTableCell* cell = pThisJumpTable + opcode;
if (cell->srcState != srcState)
{
- assert(cell->srcState == 0 || cell->srcState != srcState); // Either way means there is not outgoing edge from srcState.
+ assert(cell->srcState == 0 ||
+ cell->srcState != srcState); // Either way means there is not outgoing edge from srcState.
return 0;
}
else
{
- return cell->destState;
+ return cell->destState;
}
}
#ifdef DEBUG
-const char * CodeSeqSM::StateDesc(SM_STATE_ID stateID)
-{
+const char* CodeSeqSM::StateDesc(SM_STATE_ID stateID)
+{
static char s_StateDesc[500];
static SM_OPCODE s_StateDescOpcodes[MAX_CODE_SEQUENCE_LENGTH];
if (stateID == 0)
+ {
return "invalid";
-
+ }
if (stateID == SM_STATE_ID_START)
- return "start";
-
+ {
+ return "start";
+ }
unsigned i = 0;
-
- SM_STATE_ID b = stateID;
-
+
+ SM_STATE_ID b = stateID;
+
while (States[b].prevState != 0)
{
s_StateDescOpcodes[i] = States[b].opc;
- b = States[b].prevState;
+ b = States[b].prevState;
++i;
}
- assert(i == States[stateID].length && i>0);
+ assert(i == States[stateID].length && i > 0);
- * s_StateDesc = 0;
-
- while (--i>0)
+ *s_StateDesc = 0;
+
+ while (--i > 0)
{
strcat(s_StateDesc, smOpcodeNames[s_StateDescOpcodes[i]]);
strcat(s_StateDesc, " -> ");
}
- strcat(s_StateDesc, smOpcodeNames[s_StateDescOpcodes[0]]);
+ strcat(s_StateDesc, smOpcodeNames[s_StateDescOpcodes[0]]);
return s_StateDesc;
}
#endif // DEBUG
-
diff --git a/src/jit/sm.h b/src/jit/sm.h
index 738c14d9e2..33d65092bb 100644
--- a/src/jit/sm.h
+++ b/src/jit/sm.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
//
// State machine header used ONLY in the JIT.
//
@@ -12,47 +11,46 @@
#include "smcommon.h"
-extern const SMState * gp_SMStates;
-extern const JumpTableCell * gp_SMJumpTableCells;
-extern const short * gp_StateWeights;
+extern const SMState* gp_SMStates;
+extern const JumpTableCell* gp_SMJumpTableCells;
+extern const short* gp_StateWeights;
-class CodeSeqSM // Represent a particualr run of the state machine
- // For example, it maintains the array of counts for the terminated states.
- // These counts should be stored in per method based for them to be correct
- // under multithreadeded environment.
+class CodeSeqSM // Represent a particualr run of the state machine
+ // For example, it maintains the array of counts for the terminated states.
+ // These counts should be stored in per method based for them to be correct
+ // under multithreadeded environment.
{
-public :
+public:
+ Compiler* pComp;
- Compiler * pComp;
+ const SMState* States;
+ const JumpTableCell* JumpTableCells;
+ const short* StateWeights; // Weight for each state. Including non-terminate states.
- const SMState * States;
- const JumpTableCell * JumpTableCells;
- const short * StateWeights; // Weight for each state. Including non-terminate states.
+ SM_STATE_ID curState;
- SM_STATE_ID curState;
+ int NativeSize; // This is a signed integer!
- int NativeSize; // This is a signed integer!
-
- void Start(Compiler * comp);
- void Reset();
- void End();
- void Run(SM_OPCODE opcode DEBUGARG(int level));
+ void Start(Compiler* comp);
+ void Reset();
+ void End();
+ void Run(SM_OPCODE opcode DEBUGARG(int level));
- SM_STATE_ID GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode);
+ SM_STATE_ID GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode);
// Matched a termination state
- inline void TermStateMatch(SM_STATE_ID stateID DEBUGARG(bool verbose))
+ inline void TermStateMatch(SM_STATE_ID stateID DEBUGARG(bool verbose))
{
- assert(States[stateID].term);
+ assert(States[stateID].term);
assert(StateMatchedCounts[stateID] < _UI16_MAX);
-#ifdef DEBUG
- ++StateMatchedCounts[stateID];
-#ifndef SMGEN_COMPILE
+#ifdef DEBUG
+ ++StateMatchedCounts[stateID];
+#ifndef SMGEN_COMPILE
if (verbose)
{
- printf("weight=%3d : state %3d [ %s ]\n", StateWeights[stateID], stateID, StateDesc(stateID));
+ printf("weight=%3d : state %3d [ %s ]\n", StateWeights[stateID], stateID, StateDesc(stateID));
}
-#endif // SMGEN_COMPILE
+#endif // SMGEN_COMPILE
#endif // DEBUG
NativeSize += StateWeights[stateID];
@@ -60,9 +58,9 @@ public :
// Given an SM opcode retrieve the weight for this single opcode state.
// For example, ID for single opcode state SM_NOSHOW is 2.
- inline short GetWeightForOpcode(SM_OPCODE opcode)
+ inline short GetWeightForOpcode(SM_OPCODE opcode)
{
- SM_STATE_ID stateID = ((SM_STATE_ID)opcode) + SM_STATE_ID_START + 1;
+ SM_STATE_ID stateID = ((SM_STATE_ID)opcode) + SM_STATE_ID_START + 1;
return StateWeights[stateID];
}
@@ -71,9 +69,7 @@ public :
const char* StateDesc(SM_STATE_ID stateID);
#endif
- static SM_OPCODE MapToSMOpcode(OPCODE opcode);
+ static SM_OPCODE MapToSMOpcode(OPCODE opcode);
};
#endif /* __sm_h__ */
-
-
diff --git a/src/jit/smallhash.h b/src/jit/smallhash.h
index e2ccf20cd1..7a3b6d32e9 100644
--- a/src/jit/smallhash.h
+++ b/src/jit/smallhash.h
@@ -9,7 +9,7 @@
// HashTableInfo: a concept that provides equality and hashing methods for
// a particular key type. Used by HashTableBase and its
// subclasses.
-template<typename TKey>
+template <typename TKey>
struct HashTableInfo
{
// static bool Equals(const TKey& x, const TKey& y);
@@ -19,7 +19,7 @@ struct HashTableInfo
//------------------------------------------------------------------------
// HashTableInfo<TKey*>: specialized version of HashTableInfo for pointer-
// typed keys.
-template<typename TKey>
+template <typename TKey>
struct HashTableInfo<TKey*>
{
static bool Equals(const TKey* x, const TKey* y)
@@ -65,7 +65,7 @@ struct HashTableInfo<TKey*>
// TKey - The type of the table's keys.
// TValue - The type of the table's values.
// TKeyInfo - A type that conforms to the HashTableInfo<TKey> concept.
-template<typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
+template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
class HashTableBase
{
friend class KeyValuePair;
@@ -96,21 +96,21 @@ protected:
// bucket in a chain must be occupied (i.e. `m_isFull` will be true).
struct Bucket
{
- bool m_isFull; // True if the bucket is occupied; false otherwise.
+ bool m_isFull; // True if the bucket is occupied; false otherwise.
unsigned m_firstOffset; // The offset to the first node in the chain for this bucket index.
unsigned m_nextOffset; // The offset to the next node in the chain for this bucket index.
- unsigned m_hash; // The hash code for the element stored in this bucket.
- TKey m_key; // The key for the element stored in this bucket.
- TValue m_value; // The value for the element stored in this bucket.
+ unsigned m_hash; // The hash code for the element stored in this bucket.
+ TKey m_key; // The key for the element stored in this bucket.
+ TValue m_value; // The value for the element stored in this bucket.
};
private:
- Compiler* m_compiler; // The compiler context to use for allocations.
- Bucket* m_buckets; // The bucket array.
- unsigned m_numBuckets; // The number of buckets in the bucket array.
- unsigned m_numFullBuckets; // The number of occupied buckets.
+ Compiler* m_compiler; // The compiler context to use for allocations.
+ Bucket* m_buckets; // The bucket array.
+ unsigned m_numBuckets; // The number of buckets in the bucket array.
+ unsigned m_numFullBuckets; // The number of occupied buckets.
//------------------------------------------------------------------------
// HashTableBase::Insert: inserts a key-value pair into a bucket array.
@@ -127,8 +127,8 @@ private:
// otherwise.
static bool Insert(Bucket* buckets, unsigned numBuckets, unsigned hash, const TKey& key, const TValue& value)
{
- const unsigned mask = numBuckets - 1;
- unsigned homeIndex = hash & mask;
+ const unsigned mask = numBuckets - 1;
+ unsigned homeIndex = hash & mask;
Bucket* home = &buckets[homeIndex];
if (!home->m_isFull)
@@ -137,26 +137,26 @@ private:
//
// Note that the next offset does not need to be updated: whether or not it is non-zero,
// it is already correct, since we're inserting at the head of the list.
- home->m_isFull = true;
+ home->m_isFull = true;
home->m_firstOffset = 0;
- home->m_hash = hash;
- home->m_key = key;
- home->m_value = value;
+ home->m_hash = hash;
+ home->m_key = key;
+ home->m_value = value;
return true;
}
// If the home bucket is full, probe to find the next empty bucket.
unsigned precedingIndexInChain = homeIndex;
- unsigned nextIndexInChain = (homeIndex + home->m_firstOffset) & mask;
+ unsigned nextIndexInChain = (homeIndex + home->m_firstOffset) & mask;
for (unsigned j = 1; j < numBuckets; j++)
{
unsigned bucketIndex = (homeIndex + j) & mask;
- Bucket* bucket = &buckets[bucketIndex];
+ Bucket* bucket = &buckets[bucketIndex];
if (bucketIndex == nextIndexInChain)
{
assert(bucket->m_isFull);
precedingIndexInChain = bucketIndex;
- nextIndexInChain = (bucketIndex + bucket->m_nextOffset) & mask;
+ nextIndexInChain = (bucketIndex + bucket->m_nextOffset) & mask;
}
else if (!bucket->m_isFull)
{
@@ -181,8 +181,8 @@ private:
buckets[precedingIndexInChain].m_nextOffset = offset;
}
- bucket->m_hash = hash;
- bucket->m_key = key;
+ bucket->m_hash = hash;
+ bucket->m_key = key;
bucket->m_value = value;
return true;
}
@@ -215,14 +215,14 @@ private:
return false;
}
- const unsigned mask = m_numBuckets - 1;
- unsigned index = hash & mask;
+ const unsigned mask = m_numBuckets - 1;
+ unsigned index = hash & mask;
Bucket* bucket = &m_buckets[index];
if (bucket->m_isFull && bucket->m_hash == hash && TKeyInfo::Equals(bucket->m_key, key))
{
*precedingIndex = index;
- *bucketIndex = index;
+ *bucketIndex = index;
return true;
}
@@ -230,14 +230,14 @@ private:
{
unsigned precedingIndexInChain = index;
- index = (index + offset) & mask;
+ index = (index + offset) & mask;
bucket = &m_buckets[index];
assert(bucket->m_isFull);
if (bucket->m_hash == hash && TKeyInfo::Equals(bucket->m_key, key))
{
*precedingIndex = precedingIndexInChain;
- *bucketIndex = index;
+ *bucketIndex = index;
return true;
}
}
@@ -254,7 +254,7 @@ private:
Bucket* currentBuckets = m_buckets;
unsigned newNumBuckets = m_numBuckets == 0 ? InitialNumBuckets : m_numBuckets * 2;
- size_t allocSize = sizeof(Bucket) * newNumBuckets;
+ size_t allocSize = sizeof(Bucket) * newNumBuckets;
assert((sizeof(Bucket) * m_numBuckets) < allocSize);
auto* newBuckets = reinterpret_cast<Bucket*>(m_compiler->compGetMem(allocSize));
@@ -268,20 +268,18 @@ private:
continue;
}
- bool inserted = Insert(newBuckets, newNumBuckets, currentBucket->m_hash, currentBucket->m_key, currentBucket->m_value);
+ bool inserted =
+ Insert(newBuckets, newNumBuckets, currentBucket->m_hash, currentBucket->m_key, currentBucket->m_value);
(assert(inserted), (void)inserted);
}
m_numBuckets = newNumBuckets;
- m_buckets = newBuckets;
+ m_buckets = newBuckets;
}
protected:
HashTableBase(Compiler* compiler, Bucket* buckets, unsigned numBuckets)
- : m_compiler(compiler)
- , m_buckets(buckets)
- , m_numBuckets(numBuckets)
- , m_numFullBuckets(0)
+ : m_compiler(compiler), m_buckets(buckets), m_numBuckets(numBuckets), m_numFullBuckets(0)
{
assert(compiler != nullptr);
@@ -304,15 +302,13 @@ public:
Bucket* m_bucket;
- KeyValuePair(Bucket* bucket)
- : m_bucket(bucket)
+ KeyValuePair(Bucket* bucket) : m_bucket(bucket)
{
assert(m_bucket != nullptr);
}
public:
- KeyValuePair()
- : m_bucket(nullptr)
+ KeyValuePair() : m_bucket(nullptr)
{
}
@@ -334,14 +330,12 @@ public:
{
friend class HashTableBase<TKey, TValue, TKeyInfo>;
- Bucket* m_buckets;
+ Bucket* m_buckets;
unsigned m_numBuckets;
unsigned m_index;
Iterator(Bucket* buckets, unsigned numBuckets, unsigned index)
- : m_buckets(buckets)
- , m_numBuckets(numBuckets)
- , m_index(index)
+ : m_buckets(buckets), m_numBuckets(numBuckets), m_index(index)
{
assert((buckets != nullptr) || (numBuckets == 0));
assert(index <= numBuckets);
@@ -354,10 +348,7 @@ public:
}
public:
- Iterator()
- : m_buckets(nullptr)
- , m_numBuckets(0)
- , m_index(0)
+ Iterator() : m_buckets(nullptr), m_numBuckets(0), m_index(0)
{
}
@@ -429,7 +420,7 @@ public:
// the key does not already exist in the
// table, or updates the value if the key
// already exists.
- //
+ //
// Arguments:
// key - The key for which to add or update a value.
// value - The value.
@@ -482,13 +473,13 @@ public:
return false;
}
- Bucket* bucket = &m_buckets[bucketIndex];
+ Bucket* bucket = &m_buckets[bucketIndex];
bucket->m_isFull = false;
if (precedingIndexInChain != bucketIndex)
{
- const unsigned mask = m_numBuckets - 1;
- unsigned homeIndex = hash & mask;
+ const unsigned mask = m_numBuckets - 1;
+ unsigned homeIndex = hash & mask;
unsigned nextOffset;
if (bucket->m_nextOffset == 0)
@@ -498,7 +489,7 @@ public:
else
{
unsigned nextIndexInChain = (bucketIndex + bucket->m_nextOffset) & mask;
- nextOffset = (nextIndexInChain - precedingIndexInChain) & mask;
+ nextOffset = (nextIndexInChain - precedingIndexInChain) & mask;
}
if (precedingIndexInChain == homeIndex)
@@ -543,7 +534,7 @@ public:
//------------------------------------------------------------------------
// HashTable: a simple subclass of `HashTableBase` that always uses heap
// storage for its bucket array.
-template<typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
+template <typename TKey, typename TValue, typename TKeyInfo = HashTableInfo<TKey>>
class HashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
{
typedef HashTableBase<TKey, TValue, TKeyInfo> TBase;
@@ -554,15 +545,15 @@ class HashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
}
public:
- HashTable(Compiler* compiler)
- : TBase(compiler, nullptr, 0)
+ HashTable(Compiler* compiler) : TBase(compiler, nullptr, 0)
{
}
HashTable(Compiler* compiler, unsigned initialSize)
: TBase(compiler,
- reinterpret_cast<typename TBase::Bucket*>(compiler->compGetMem(RoundUp(initialSize) * sizeof(typename TBase::Bucket))),
- RoundUp(initialSize))
+ reinterpret_cast<typename TBase::Bucket*>(
+ compiler->compGetMem(RoundUp(initialSize) * sizeof(typename TBase::Bucket))),
+ RoundUp(initialSize))
{
}
};
@@ -574,7 +565,7 @@ public:
// the map at any given time falls below a certain
// threshold. Switches to heap storage once the initial
// inline storage is exhausted.
-template<typename TKey, typename TValue, unsigned NumInlineBuckets = 8, typename TKeyInfo = HashTableInfo<TKey>>
+template <typename TKey, typename TValue, unsigned NumInlineBuckets = 8, typename TKeyInfo = HashTableInfo<TKey>>
class SmallHashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
{
typedef HashTableBase<TKey, TValue, TKeyInfo> TBase;
@@ -587,8 +578,7 @@ class SmallHashTable final : public HashTableBase<TKey, TValue, TKeyInfo>
typename TBase::Bucket m_inlineBuckets[RoundedNumInlineBuckets];
public:
- SmallHashTable(Compiler* compiler)
- : TBase(compiler, m_inlineBuckets, RoundedNumInlineBuckets)
+ SmallHashTable(Compiler* compiler) : TBase(compiler, m_inlineBuckets, RoundedNumInlineBuckets)
{
}
};
diff --git a/src/jit/smcommon.cpp b/src/jit/smcommon.cpp
index da5f271fc9..d17e21b874 100644
--- a/src/jit/smcommon.cpp
+++ b/src/jit/smcommon.cpp
@@ -2,178 +2,165 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#if defined(DEBUG) || defined(SMGEN_COMPILE)
-
-//
+
+//
// The array of state-machine-opcode names
//
-const char * const smOpcodeNames[] =
-{
- #define SMOPDEF(smname,string) string,
- #include "smopcode.def"
- #undef SMOPDEF
+const char* const smOpcodeNames[] = {
+#define SMOPDEF(smname, string) string,
+#include "smopcode.def"
+#undef SMOPDEF
};
//
// The code sequences the state machine will look for.
//
-const SM_OPCODE s_CodeSeqs[][MAX_CODE_SEQUENCE_LENGTH] =
-{
+const SM_OPCODE s_CodeSeqs[][MAX_CODE_SEQUENCE_LENGTH] = {
-#define SMOPDEF(smname,string) {smname, CODE_SEQUENCE_END},
-// ==== Single opcode states ====
+#define SMOPDEF(smname, string) {smname, CODE_SEQUENCE_END},
+// ==== Single opcode states ====
#include "smopcode.def"
-#undef SMOPDEF
-
+#undef SMOPDEF
+
// ==== Legel prefixed opcode sequences ====
- {SM_CONSTRAINED, SM_CALLVIRT, CODE_SEQUENCE_END},
-
+ {SM_CONSTRAINED, SM_CALLVIRT, CODE_SEQUENCE_END},
+
// ==== Interesting patterns ====
// Fetching of object field
- {SM_LDARG_0, SM_LDFLD, CODE_SEQUENCE_END},
- {SM_LDARG_1, SM_LDFLD, CODE_SEQUENCE_END},
- {SM_LDARG_2, SM_LDFLD, CODE_SEQUENCE_END},
- {SM_LDARG_3, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDARG_0, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDARG_1, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDARG_2, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDARG_3, SM_LDFLD, CODE_SEQUENCE_END},
// Fetching of struct field
- {SM_LDARGA_S, SM_LDFLD, CODE_SEQUENCE_END},
- {SM_LDLOCA_S, SM_LDFLD, CODE_SEQUENCE_END},
-
+ {SM_LDARGA_S, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDLOCA_S, SM_LDFLD, CODE_SEQUENCE_END},
+
// Fetching of struct field from a normed struct
- {SM_LDARGA_S_NORMED, SM_LDFLD, CODE_SEQUENCE_END},
- {SM_LDLOCA_S_NORMED, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDARGA_S_NORMED, SM_LDFLD, CODE_SEQUENCE_END},
+ {SM_LDLOCA_S_NORMED, SM_LDFLD, CODE_SEQUENCE_END},
// stloc/ldloc --> dup
- {SM_STLOC_0, SM_LDLOC_0, CODE_SEQUENCE_END},
- {SM_STLOC_1, SM_LDLOC_1, CODE_SEQUENCE_END},
- {SM_STLOC_2, SM_LDLOC_2, CODE_SEQUENCE_END},
- {SM_STLOC_3, SM_LDLOC_3, CODE_SEQUENCE_END},
+ {SM_STLOC_0, SM_LDLOC_0, CODE_SEQUENCE_END},
+ {SM_STLOC_1, SM_LDLOC_1, CODE_SEQUENCE_END},
+ {SM_STLOC_2, SM_LDLOC_2, CODE_SEQUENCE_END},
+ {SM_STLOC_3, SM_LDLOC_3, CODE_SEQUENCE_END},
// FPU operations
- {SM_LDC_R4, SM_ADD, CODE_SEQUENCE_END},
- {SM_LDC_R4, SM_SUB, CODE_SEQUENCE_END},
- {SM_LDC_R4, SM_MUL, CODE_SEQUENCE_END},
- {SM_LDC_R4, SM_DIV, CODE_SEQUENCE_END},
+ {SM_LDC_R4, SM_ADD, CODE_SEQUENCE_END},
+ {SM_LDC_R4, SM_SUB, CODE_SEQUENCE_END},
+ {SM_LDC_R4, SM_MUL, CODE_SEQUENCE_END},
+ {SM_LDC_R4, SM_DIV, CODE_SEQUENCE_END},
- {SM_LDC_R8, SM_ADD, CODE_SEQUENCE_END},
- {SM_LDC_R8, SM_SUB, CODE_SEQUENCE_END},
- {SM_LDC_R8, SM_MUL, CODE_SEQUENCE_END},
- {SM_LDC_R8, SM_DIV, CODE_SEQUENCE_END},
+ {SM_LDC_R8, SM_ADD, CODE_SEQUENCE_END},
+ {SM_LDC_R8, SM_SUB, CODE_SEQUENCE_END},
+ {SM_LDC_R8, SM_MUL, CODE_SEQUENCE_END},
+ {SM_LDC_R8, SM_DIV, CODE_SEQUENCE_END},
- {SM_CONV_R4, SM_ADD, CODE_SEQUENCE_END},
- {SM_CONV_R4, SM_SUB, CODE_SEQUENCE_END},
- {SM_CONV_R4, SM_MUL, CODE_SEQUENCE_END},
- {SM_CONV_R4, SM_DIV, CODE_SEQUENCE_END},
+ {SM_CONV_R4, SM_ADD, CODE_SEQUENCE_END},
+ {SM_CONV_R4, SM_SUB, CODE_SEQUENCE_END},
+ {SM_CONV_R4, SM_MUL, CODE_SEQUENCE_END},
+ {SM_CONV_R4, SM_DIV, CODE_SEQUENCE_END},
// {SM_CONV_R8, SM_ADD, CODE_SEQUENCE_END}, // Removed since it collides with ldelem.r8 in
- // Math.InternalRound
+ // Math.InternalRound
// {SM_CONV_R8, SM_SUB, CODE_SEQUENCE_END}, // Just remove the SM_SUB as well.
- {SM_CONV_R8, SM_MUL, CODE_SEQUENCE_END},
- {SM_CONV_R8, SM_DIV, CODE_SEQUENCE_END},
-
+ {SM_CONV_R8, SM_MUL, CODE_SEQUENCE_END},
+ {SM_CONV_R8, SM_DIV, CODE_SEQUENCE_END},
-/* Constant init constructor:
- L_0006: ldarg.0
- L_0007: ldc.r8 0
- L_0010: stfld float64 raytracer.Vec::x
-*/
+ /* Constant init constructor:
+ L_0006: ldarg.0
+ L_0007: ldc.r8 0
+ L_0010: stfld float64 raytracer.Vec::x
+ */
{SM_LDARG_0, SM_LDC_I4_0, SM_STFLD, CODE_SEQUENCE_END},
- {SM_LDARG_0, SM_LDC_R4, SM_STFLD, CODE_SEQUENCE_END},
- {SM_LDARG_0, SM_LDC_R8, SM_STFLD, CODE_SEQUENCE_END},
-
-/* Copy constructor:
- L_0006: ldarg.0
- L_0007: ldarg.1
- L_0008: ldfld float64 raytracer.Vec::x
- L_000d: stfld float64 raytracer.Vec::x
-*/
+ {SM_LDARG_0, SM_LDC_R4, SM_STFLD, CODE_SEQUENCE_END},
+ {SM_LDARG_0, SM_LDC_R8, SM_STFLD, CODE_SEQUENCE_END},
+
+ /* Copy constructor:
+ L_0006: ldarg.0
+ L_0007: ldarg.1
+ L_0008: ldfld float64 raytracer.Vec::x
+ L_000d: stfld float64 raytracer.Vec::x
+ */
{SM_LDARG_0, SM_LDARG_1, SM_LDFLD, SM_STFLD, CODE_SEQUENCE_END},
-/* Field setter:
+ /* Field setter:
+
+ [DebuggerNonUserCode]
+ private void CtorClosed(object target, IntPtr methodPtr)
+ {
+ if (target == null)
+ {
+ this.ThrowNullThisInDelegateToInstance();
+ }
+ base._target = target;
+ base._methodPtr = methodPtr;
+ }
+
- [DebuggerNonUserCode]
- private void CtorClosed(object target, IntPtr methodPtr)
- {
- if (target == null)
+ .method private hidebysig instance void CtorClosed(object target, native int methodPtr) cil managed
{
- this.ThrowNullThisInDelegateToInstance();
+ .custom instance void System.Diagnostics.DebuggerNonUserCodeAttribute::.ctor()
+ .maxstack 8
+ L_0000: ldarg.1
+ L_0001: brtrue.s L_0009
+ L_0003: ldarg.0
+ L_0004: call instance void System.MulticastDelegate::ThrowNullThisInDelegateToInstance()
+
+ L_0009: ldarg.0
+ L_000a: ldarg.1
+ L_000b: stfld object System.Delegate::_target
+
+ L_0010: ldarg.0
+ L_0011: ldarg.2
+ L_0012: stfld native int System.Delegate::_methodPtr
+
+ L_0017: ret
}
- base._target = target;
- base._methodPtr = methodPtr;
- }
-
-
- .method private hidebysig instance void CtorClosed(object target, native int methodPtr) cil managed
- {
- .custom instance void System.Diagnostics.DebuggerNonUserCodeAttribute::.ctor()
- .maxstack 8
- L_0000: ldarg.1
- L_0001: brtrue.s L_0009
- L_0003: ldarg.0
- L_0004: call instance void System.MulticastDelegate::ThrowNullThisInDelegateToInstance()
-
- L_0009: ldarg.0
- L_000a: ldarg.1
- L_000b: stfld object System.Delegate::_target
-
- L_0010: ldarg.0
- L_0011: ldarg.2
- L_0012: stfld native int System.Delegate::_methodPtr
-
- L_0017: ret
- }
-*/
+ */
{SM_LDARG_0, SM_LDARG_1, SM_STFLD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_LDARG_2, SM_STFLD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_LDARG_3, SM_STFLD, CODE_SEQUENCE_END},
-
-
-/* Scale operator:
-
- L_0000: ldarg.0
- L_0001: dup
- L_0002: ldfld float64 raytracer.Vec::x
- L_0007: ldarg.1
- L_0008: mul
- L_0009: stfld float64 raytracer.Vec::x
-*/
+
+ /* Scale operator:
+
+ L_0000: ldarg.0
+ L_0001: dup
+ L_0002: ldfld float64 raytracer.Vec::x
+ L_0007: ldarg.1
+ L_0008: mul
+ L_0009: stfld float64 raytracer.Vec::x
+ */
{SM_LDARG_0, SM_DUP, SM_LDFLD, SM_LDARG_1, SM_ADD, SM_STFLD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_DUP, SM_LDFLD, SM_LDARG_1, SM_SUB, SM_STFLD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_DUP, SM_LDFLD, SM_LDARG_1, SM_MUL, SM_STFLD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_DUP, SM_LDFLD, SM_LDARG_1, SM_DIV, SM_STFLD, CODE_SEQUENCE_END},
-/* Add operator
- L_0000: ldarg.0
- L_0001: ldfld float64 raytracer.Vec::x
- L_0006: ldarg.1
- L_0007: ldfld float64 raytracer.Vec::x
- L_000c: add
-*/
+ /* Add operator
+ L_0000: ldarg.0
+ L_0001: ldfld float64 raytracer.Vec::x
+ L_0006: ldarg.1
+ L_0007: ldfld float64 raytracer.Vec::x
+ L_000c: add
+ */
{SM_LDARG_0, SM_LDFLD, SM_LDARG_1, SM_LDFLD, SM_ADD, CODE_SEQUENCE_END},
{SM_LDARG_0, SM_LDFLD, SM_LDARG_1, SM_LDFLD, SM_SUB, CODE_SEQUENCE_END},
// No need for mul and div since there is no mathemetical meaning of it.
-
+
{SM_LDARGA_S, SM_LDFLD, SM_LDARGA_S, SM_LDFLD, SM_ADD, CODE_SEQUENCE_END},
{SM_LDARGA_S, SM_LDFLD, SM_LDARGA_S, SM_LDFLD, SM_SUB, CODE_SEQUENCE_END},
// No need for mul and div since there is no mathemetical meaning of it.
// The end:
- {CODE_SEQUENCE_END}
-};
-
-#endif // defined(DEBUG) || defined(SMGEN_COMPILE)
-
-
-
-
-
-
+ {CODE_SEQUENCE_END}};
+#endif // defined(DEBUG) || defined(SMGEN_COMPILE)
diff --git a/src/jit/smcommon.h b/src/jit/smcommon.h
index cbfa34c882..0c33e05a7b 100644
--- a/src/jit/smcommon.h
+++ b/src/jit/smcommon.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
//
// Common headers used both in smgen.exe and the JIT.
//
@@ -12,44 +11,40 @@
#include "smopenum.h"
-#define NUM_SM_STATES 250
+#define NUM_SM_STATES 250
typedef BYTE SM_STATE_ID;
static_assert_no_msg(sizeof(SM_STATE_ID) == 1); // To conserve memory, we don't want to have more than 256 states.
-#define SM_STATE_ID_START 1
+#define SM_STATE_ID_START 1
-static_assert_no_msg(SM_STATE_ID_START == 1); // Make sure nobody changes it. We rely on this to map the SM_OPCODE
- // to single-opcode states. For example, in GetWeightForOpcode().
+static_assert_no_msg(SM_STATE_ID_START == 1); // Make sure nobody changes it. We rely on this to map the SM_OPCODE
+ // to single-opcode states. For example, in GetWeightForOpcode().
struct JumpTableCell
{
- SM_STATE_ID srcState;
- SM_STATE_ID destState;
+ SM_STATE_ID srcState;
+ SM_STATE_ID destState;
};
-struct SMState
+struct SMState
{
- bool term; // does this state terminate a code sequence?
- BYTE length; // the length of currently matched opcodes
- SM_STATE_ID longestTermState; // the ID of the longest matched terminate state
+ bool term; // does this state terminate a code sequence?
+ BYTE length; // the length of currently matched opcodes
+ SM_STATE_ID longestTermState; // the ID of the longest matched terminate state
- SM_STATE_ID prevState; // previous state
- SM_OPCODE opc; // opcode that leads from the previous state to current state
+ SM_STATE_ID prevState; // previous state
+ SM_OPCODE opc; // opcode that leads from the previous state to current state
unsigned short jumpTableByteOffset;
};
-
//
// Code sequences
//
-#define MAX_CODE_SEQUENCE_LENGTH 7
-#define CODE_SEQUENCE_END ((SM_OPCODE)(SM_COUNT+1))
-
+#define MAX_CODE_SEQUENCE_LENGTH 7
+#define CODE_SEQUENCE_END ((SM_OPCODE)(SM_COUNT + 1))
#endif /* __sm_common_h__ */
-
-
diff --git a/src/jit/smdata.cpp b/src/jit/smdata.cpp
index 8cf86e59c1..9fe00d4984 100644
--- a/src/jit/smdata.cpp
+++ b/src/jit/smdata.cpp
@@ -4,8 +4,8 @@
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
-// Automatically generated code. DO NOT MODIFY!
-// To generate this file. Do "smgen.exe > SMData.cpp"
+// Automatically generated code. DO NOT MODIFY!
+// To generate this file. Do "smgen.exe > SMData.cpp"
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -270,9 +270,9 @@ const SMState g_SMStates[] =
};
// clang-format on
-static_assert_no_msg(NUM_SM_STATES == sizeof(g_SMStates)/sizeof(g_SMStates[0]));
+static_assert_no_msg(NUM_SM_STATES == sizeof(g_SMStates) / sizeof(g_SMStates[0]));
-const SMState * gp_SMStates = g_SMStates;
+const SMState* gp_SMStates = g_SMStates;
//
// JumpTableCells in the state machine
@@ -702,5 +702,4 @@ const JumpTableCell g_SMJumpTableCells[] =
};
// clang-format on
-const JumpTableCell * gp_SMJumpTableCells = g_SMJumpTableCells;
-
+const JumpTableCell* gp_SMJumpTableCells = g_SMJumpTableCells;
diff --git a/src/jit/smopenum.h b/src/jit/smopenum.h
index 53bf843584..978bbc2c3b 100644
--- a/src/jit/smopenum.h
+++ b/src/jit/smopenum.h
@@ -2,20 +2,16 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#ifndef __smopenum_h__
#define __smopenum_h__
-typedef enum smopcode_t
-{
-#define SMOPDEF(smname,string) smname,
+typedef enum smopcode_t {
+#define SMOPDEF(smname, string) smname,
#include "smopcode.def"
-#undef SMOPDEF
-
- SM_COUNT, /* number of state machine opcodes */
-
-} SM_OPCODE;
+#undef SMOPDEF
-#endif /* __smopenum_h__ */
+ SM_COUNT, /* number of state machine opcodes */
+} SM_OPCODE;
+#endif /* __smopenum_h__ */
diff --git a/src/jit/smweights.cpp b/src/jit/smweights.cpp
index 13d4913ea9..f93d739b61 100644
--- a/src/jit/smweights.cpp
+++ b/src/jit/smweights.cpp
@@ -6,271 +6,269 @@
//
// Automatically generated code. DO NOT MODIFY!
// To generate this file, do
-// "WeightsArrayGen.pl matrix.txt results.txt > SMWeights.cpp"
+// "WeightsArrayGen.pl matrix.txt results.txt > SMWeights.cpp"
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#include "jitpch.h"
-#define DEFAULT_WEIGHT_VALUE 65 // This is the average of all the weights.
+#define DEFAULT_WEIGHT_VALUE 65 // This is the average of all the weights.
-#define NA 9999
+#define NA 9999
-const short g_StateWeights[] =
-{
- NA, // state 0
- NA, // state 1
- DEFAULT_WEIGHT_VALUE, // state 2 [noshow]
- 10, // state 3 [ldarg.0]
- 16, // state 4 [ldarg.1]
- 35, // state 5 [ldarg.2]
- 28, // state 6 [ldarg.3]
- 12, // state 7 [ldloc.0]
- 9, // state 8 [ldloc.1]
- 22, // state 9 [ldloc.2]
- 24, // state 10 [ldloc.3]
- 6, // state 11 [stloc.0]
- 34, // state 12 [stloc.1]
- 4, // state 13 [stloc.2]
- 49, // state 14 [stloc.3]
- 32, // state 15 [ldarg.s]
- 77, // state 16 [ldarga.s]
- 21, // state 17 [starg.s]
- 32, // state 18 [ldloc.s]
- 61, // state 19 [ldloca.s]
- -45, // state 20 [stloc.s]
- 7, // state 21 [ldnull]
- 22, // state 22 [ldc.i4.m1]
- 15, // state 23 [ldc.i4.0]
- 28, // state 24 [ldc.i4.1]
- 34, // state 25 [ldc.i4.2]
- -6, // state 26 [ldc.i4.3]
- 20, // state 27 [ldc.i4.4]
- 4, // state 28 [ldc.i4.5]
- 10, // state 29 [ldc.i4.6]
- 56, // state 30 [ldc.i4.7]
- 42, // state 31 [ldc.i4.8]
- 41, // state 32 [ldc.i4.s]
- 38, // state 33 [ldc.i4]
- 160, // state 34 [ldc.i8]
- 33, // state 35 [ldc.r4]
- 113, // state 36 [ldc.r8]
- DEFAULT_WEIGHT_VALUE, // state 37 [unused]
- 11, // state 38 [dup]
- -24, // state 39 [pop]
- 79, // state 40 [call]
- DEFAULT_WEIGHT_VALUE, // state 41 [calli]
- 19, // state 42 [ret]
- 44, // state 43 [br.s]
- 27, // state 44 [brfalse.s]
- 25, // state 45 [brtrue.s]
- 6, // state 46 [beq.s]
- 20, // state 47 [bge.s]
- 33, // state 48 [bgt.s]
- 53, // state 49 [ble.s]
- 28, // state 50 [blt.s]
- 12, // state 51 [bne.un.s]
- 85, // state 52 [bge.un.s]
- -52, // state 53 [bgt.un.s]
- 147, // state 54 [ble.un.s]
- -63, // state 55 [blt.un.s]
- DEFAULT_WEIGHT_VALUE, // state 56 [long.branch]
- 116, // state 57 [switch]
- -19, // state 58 [ldind.i1]
- 17, // state 59 [ldind.u1]
- -18, // state 60 [ldind.i2]
- 10, // state 61 [ldind.u2]
- -11, // state 62 [ldind.i4]
- -33, // state 63 [ldind.u4]
- 41, // state 64 [ldind.i8]
- -110, // state 65 [ldind.i]
- 31, // state 66 [ldind.r4]
- 45, // state 67 [ldind.r8]
- 1, // state 68 [ldind.ref]
- 60, // state 69 [stind.ref]
- 36, // state 70 [stind.i1]
- 40, // state 71 [stind.i2]
- 11, // state 72 [stind.i4]
- 84, // state 73 [stind.i8]
- 50, // state 74 [stind.r4]
- 73, // state 75 [stind.r8]
- -12, // state 76 [add]
- -15, // state 77 [sub]
- -9, // state 78 [mul]
- 35, // state 79 [div]
- 89, // state 80 [div.un]
- 89, // state 81 [rem]
- 82, // state 82 [rem.un]
- -5, // state 83 [and]
- -7, // state 84 [or]
- 35, // state 85 [xor]
- 0, // state 86 [shl]
- 17, // state 87 [shr]
- 27, // state 88 [shr.un]
- 58, // state 89 [neg]
- 19, // state 90 [not]
- 78, // state 91 [conv.i1]
- 54, // state 92 [conv.i2]
- 2, // state 93 [conv.i4]
- 99, // state 94 [conv.i8]
- 273, // state 95 [conv.r4]
- 197, // state 96 [conv.r8]
- 45, // state 97 [conv.u4]
- 55, // state 98 [conv.u8]
- 83, // state 99 [callvirt]
- DEFAULT_WEIGHT_VALUE, // state 100 [cpobj]
- 29, // state 101 [ldobj]
- 66, // state 102 [ldstr]
- 227, // state 103 [newobj]
- 261, // state 104 [castclass]
- 166, // state 105 [isinst]
- 209, // state 106 [conv.r.un]
- DEFAULT_WEIGHT_VALUE, // state 107 [unbox]
- 210, // state 108 [throw]
- 18, // state 109 [ldfld]
- 17, // state 110 [ldflda]
- 31, // state 111 [stfld]
- 159, // state 112 [ldsfld]
- 177, // state 113 [ldsflda]
- 125, // state 114 [stsfld]
- 36, // state 115 [stobj]
- 148, // state 116 [ovf.notype.un]
- 247, // state 117 [box]
- 152, // state 118 [newarr]
- 7, // state 119 [ldlen]
- 145, // state 120 [ldelema]
- 103, // state 121 [ldelem.i1]
- 91, // state 122 [ldelem.u1]
- 267, // state 123 [ldelem.i2]
- 148, // state 124 [ldelem.u2]
- 92, // state 125 [ldelem.i4]
- 213, // state 126 [ldelem.u4]
- 223, // state 127 [ldelem.i8]
- DEFAULT_WEIGHT_VALUE, // state 128 [ldelem.i]
- DEFAULT_WEIGHT_VALUE, // state 129 [ldelem.r4]
- 549, // state 130 [ldelem.r8]
- 81, // state 131 [ldelem.ref]
- DEFAULT_WEIGHT_VALUE, // state 132 [stelem.i]
- 14, // state 133 [stelem.i1]
- 23, // state 134 [stelem.i2]
- 66, // state 135 [stelem.i4]
- 254, // state 136 [stelem.i8]
- DEFAULT_WEIGHT_VALUE, // state 137 [stelem.r4]
- DEFAULT_WEIGHT_VALUE, // state 138 [stelem.r8]
- 94, // state 139 [stelem.ref]
- DEFAULT_WEIGHT_VALUE, // state 140 [ldelem]
- DEFAULT_WEIGHT_VALUE, // state 141 [stelem]
- 274, // state 142 [unbox.any]
- DEFAULT_WEIGHT_VALUE, // state 143 [conv.ovf.i1]
- DEFAULT_WEIGHT_VALUE, // state 144 [conv.ovf.u1]
- DEFAULT_WEIGHT_VALUE, // state 145 [conv.ovf.i2]
- DEFAULT_WEIGHT_VALUE, // state 146 [conv.ovf.u2]
- 242, // state 147 [conv.ovf.i4]
- DEFAULT_WEIGHT_VALUE, // state 148 [conv.ovf.u4]
- 293, // state 149 [conv.ovf.i8]
- 293, // state 150 [conv.ovf.u8]
- DEFAULT_WEIGHT_VALUE, // state 151 [refanyval]
- DEFAULT_WEIGHT_VALUE, // state 152 [ckfinite]
- -17, // state 153 [mkrefany]
- 32, // state 154 [ldtoken]
- 25, // state 155 [conv.u2]
- 50, // state 156 [conv.u1]
- -0, // state 157 [conv.i]
- 178, // state 158 [conv.ovf.i]
- DEFAULT_WEIGHT_VALUE, // state 159 [conv.ovf.u]
- DEFAULT_WEIGHT_VALUE, // state 160 [add.ovf]
- DEFAULT_WEIGHT_VALUE, // state 161 [mul.ovf]
- DEFAULT_WEIGHT_VALUE, // state 162 [sub.ovf]
- -17, // state 163 [leave.s]
- 182, // state 164 [stind.i]
- -36, // state 165 [conv.u]
- DEFAULT_WEIGHT_VALUE, // state 166 [prefix.n]
- 120, // state 167 [arglist]
- 20, // state 168 [ceq]
- -1, // state 169 [cgt]
- 47, // state 170 [cgt.un]
- 26, // state 171 [clt]
- 85, // state 172 [clt.un]
- 102, // state 173 [ldftn]
- 234, // state 174 [ldvirtftn]
- DEFAULT_WEIGHT_VALUE, // state 175 [long.loc.arg]
- 347, // state 176 [localloc]
- DEFAULT_WEIGHT_VALUE, // state 177 [unaligned]
- -44, // state 178 [volatile]
- DEFAULT_WEIGHT_VALUE, // state 179 [tailcall]
- 55, // state 180 [initobj]
- DEFAULT_WEIGHT_VALUE, // state 181 [constrained]
- DEFAULT_WEIGHT_VALUE, // state 182 [cpblk]
- DEFAULT_WEIGHT_VALUE, // state 183 [initblk]
- DEFAULT_WEIGHT_VALUE, // state 184 [rethrow]
- 38, // state 185 [sizeof]
- -68, // state 186 [refanytype]
- DEFAULT_WEIGHT_VALUE, // state 187 [readonly]
- 55, // state 188 [ldarga.s.normed]
- 35, // state 189 [ldloca.s.normed]
- 161, // state 190 [constrained -> callvirt]
- 31, // state 191 [ldarg.0 -> ldfld]
- 29, // state 192 [ldarg.1 -> ldfld]
- 22, // state 193 [ldarg.2 -> ldfld]
- 321, // state 194 [ldarg.3 -> ldfld]
- 46, // state 195 [ldarga.s -> ldfld]
- 8, // state 196 [ldloca.s -> ldfld]
- 19, // state 197 [ldarga.s.normed -> ldfld]
- -35, // state 198 [ldloca.s.normed -> ldfld]
- 20, // state 199 [stloc.0 -> ldloc.0]
- -7, // state 200 [stloc.1 -> ldloc.1]
- -10, // state 201 [stloc.2 -> ldloc.2]
- -4, // state 202 [stloc.3 -> ldloc.3]
- DEFAULT_WEIGHT_VALUE, // state 203 [ldc.r4 -> add]
- DEFAULT_WEIGHT_VALUE, // state 204 [ldc.r4 -> sub]
- DEFAULT_WEIGHT_VALUE, // state 205 [ldc.r4 -> mul]
- DEFAULT_WEIGHT_VALUE, // state 206 [ldc.r4 -> div]
- 52, // state 207 [ldc.r8 -> add]
- DEFAULT_WEIGHT_VALUE, // state 208 [ldc.r8 -> sub]
- -169, // state 209 [ldc.r8 -> mul]
- -17, // state 210 [ldc.r8 -> div]
- DEFAULT_WEIGHT_VALUE, // state 211 [conv.r4 -> add]
- DEFAULT_WEIGHT_VALUE, // state 212 [conv.r4 -> sub]
- DEFAULT_WEIGHT_VALUE, // state 213 [conv.r4 -> mul]
- DEFAULT_WEIGHT_VALUE, // state 214 [conv.r4 -> div]
- 358, // state 215 [conv.r8 -> mul]
- DEFAULT_WEIGHT_VALUE, // state 216 [conv.r8 -> div]
- NA, // state 217
- 32, // state 218 [ldarg.0 -> ldc.i4.0 -> stfld]
- NA, // state 219
- DEFAULT_WEIGHT_VALUE, // state 220 [ldarg.0 -> ldc.r4 -> stfld]
- NA, // state 221
- 38, // state 222 [ldarg.0 -> ldc.r8 -> stfld]
- NA, // state 223
- NA, // state 224
- 64, // state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld]
- 69, // state 226 [ldarg.0 -> ldarg.1 -> stfld]
- NA, // state 227
- 98, // state 228 [ldarg.0 -> ldarg.2 -> stfld]
- NA, // state 229
- 97, // state 230 [ldarg.0 -> ldarg.3 -> stfld]
- NA, // state 231
- NA, // state 232
- NA, // state 233
- NA, // state 234
- 34, // state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld]
- NA, // state 236
- -10, // state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld]
- NA, // state 238
- DEFAULT_WEIGHT_VALUE, // state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld]
- NA, // state 240
- DEFAULT_WEIGHT_VALUE, // state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld]
- NA, // state 242
- NA, // state 243
- DEFAULT_WEIGHT_VALUE, // state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add]
- DEFAULT_WEIGHT_VALUE, // state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub]
- NA, // state 246
- NA, // state 247
- DEFAULT_WEIGHT_VALUE, // state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add]
- DEFAULT_WEIGHT_VALUE, // state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub]
+const short g_StateWeights[] = {
+ NA, // state 0
+ NA, // state 1
+ DEFAULT_WEIGHT_VALUE, // state 2 [noshow]
+ 10, // state 3 [ldarg.0]
+ 16, // state 4 [ldarg.1]
+ 35, // state 5 [ldarg.2]
+ 28, // state 6 [ldarg.3]
+ 12, // state 7 [ldloc.0]
+ 9, // state 8 [ldloc.1]
+ 22, // state 9 [ldloc.2]
+ 24, // state 10 [ldloc.3]
+ 6, // state 11 [stloc.0]
+ 34, // state 12 [stloc.1]
+ 4, // state 13 [stloc.2]
+ 49, // state 14 [stloc.3]
+ 32, // state 15 [ldarg.s]
+ 77, // state 16 [ldarga.s]
+ 21, // state 17 [starg.s]
+ 32, // state 18 [ldloc.s]
+ 61, // state 19 [ldloca.s]
+ -45, // state 20 [stloc.s]
+ 7, // state 21 [ldnull]
+ 22, // state 22 [ldc.i4.m1]
+ 15, // state 23 [ldc.i4.0]
+ 28, // state 24 [ldc.i4.1]
+ 34, // state 25 [ldc.i4.2]
+ -6, // state 26 [ldc.i4.3]
+ 20, // state 27 [ldc.i4.4]
+ 4, // state 28 [ldc.i4.5]
+ 10, // state 29 [ldc.i4.6]
+ 56, // state 30 [ldc.i4.7]
+ 42, // state 31 [ldc.i4.8]
+ 41, // state 32 [ldc.i4.s]
+ 38, // state 33 [ldc.i4]
+ 160, // state 34 [ldc.i8]
+ 33, // state 35 [ldc.r4]
+ 113, // state 36 [ldc.r8]
+ DEFAULT_WEIGHT_VALUE, // state 37 [unused]
+ 11, // state 38 [dup]
+ -24, // state 39 [pop]
+ 79, // state 40 [call]
+ DEFAULT_WEIGHT_VALUE, // state 41 [calli]
+ 19, // state 42 [ret]
+ 44, // state 43 [br.s]
+ 27, // state 44 [brfalse.s]
+ 25, // state 45 [brtrue.s]
+ 6, // state 46 [beq.s]
+ 20, // state 47 [bge.s]
+ 33, // state 48 [bgt.s]
+ 53, // state 49 [ble.s]
+ 28, // state 50 [blt.s]
+ 12, // state 51 [bne.un.s]
+ 85, // state 52 [bge.un.s]
+ -52, // state 53 [bgt.un.s]
+ 147, // state 54 [ble.un.s]
+ -63, // state 55 [blt.un.s]
+ DEFAULT_WEIGHT_VALUE, // state 56 [long.branch]
+ 116, // state 57 [switch]
+ -19, // state 58 [ldind.i1]
+ 17, // state 59 [ldind.u1]
+ -18, // state 60 [ldind.i2]
+ 10, // state 61 [ldind.u2]
+ -11, // state 62 [ldind.i4]
+ -33, // state 63 [ldind.u4]
+ 41, // state 64 [ldind.i8]
+ -110, // state 65 [ldind.i]
+ 31, // state 66 [ldind.r4]
+ 45, // state 67 [ldind.r8]
+ 1, // state 68 [ldind.ref]
+ 60, // state 69 [stind.ref]
+ 36, // state 70 [stind.i1]
+ 40, // state 71 [stind.i2]
+ 11, // state 72 [stind.i4]
+ 84, // state 73 [stind.i8]
+ 50, // state 74 [stind.r4]
+ 73, // state 75 [stind.r8]
+ -12, // state 76 [add]
+ -15, // state 77 [sub]
+ -9, // state 78 [mul]
+ 35, // state 79 [div]
+ 89, // state 80 [div.un]
+ 89, // state 81 [rem]
+ 82, // state 82 [rem.un]
+ -5, // state 83 [and]
+ -7, // state 84 [or]
+ 35, // state 85 [xor]
+ 0, // state 86 [shl]
+ 17, // state 87 [shr]
+ 27, // state 88 [shr.un]
+ 58, // state 89 [neg]
+ 19, // state 90 [not]
+ 78, // state 91 [conv.i1]
+ 54, // state 92 [conv.i2]
+ 2, // state 93 [conv.i4]
+ 99, // state 94 [conv.i8]
+ 273, // state 95 [conv.r4]
+ 197, // state 96 [conv.r8]
+ 45, // state 97 [conv.u4]
+ 55, // state 98 [conv.u8]
+ 83, // state 99 [callvirt]
+ DEFAULT_WEIGHT_VALUE, // state 100 [cpobj]
+ 29, // state 101 [ldobj]
+ 66, // state 102 [ldstr]
+ 227, // state 103 [newobj]
+ 261, // state 104 [castclass]
+ 166, // state 105 [isinst]
+ 209, // state 106 [conv.r.un]
+ DEFAULT_WEIGHT_VALUE, // state 107 [unbox]
+ 210, // state 108 [throw]
+ 18, // state 109 [ldfld]
+ 17, // state 110 [ldflda]
+ 31, // state 111 [stfld]
+ 159, // state 112 [ldsfld]
+ 177, // state 113 [ldsflda]
+ 125, // state 114 [stsfld]
+ 36, // state 115 [stobj]
+ 148, // state 116 [ovf.notype.un]
+ 247, // state 117 [box]
+ 152, // state 118 [newarr]
+ 7, // state 119 [ldlen]
+ 145, // state 120 [ldelema]
+ 103, // state 121 [ldelem.i1]
+ 91, // state 122 [ldelem.u1]
+ 267, // state 123 [ldelem.i2]
+ 148, // state 124 [ldelem.u2]
+ 92, // state 125 [ldelem.i4]
+ 213, // state 126 [ldelem.u4]
+ 223, // state 127 [ldelem.i8]
+ DEFAULT_WEIGHT_VALUE, // state 128 [ldelem.i]
+ DEFAULT_WEIGHT_VALUE, // state 129 [ldelem.r4]
+ 549, // state 130 [ldelem.r8]
+ 81, // state 131 [ldelem.ref]
+ DEFAULT_WEIGHT_VALUE, // state 132 [stelem.i]
+ 14, // state 133 [stelem.i1]
+ 23, // state 134 [stelem.i2]
+ 66, // state 135 [stelem.i4]
+ 254, // state 136 [stelem.i8]
+ DEFAULT_WEIGHT_VALUE, // state 137 [stelem.r4]
+ DEFAULT_WEIGHT_VALUE, // state 138 [stelem.r8]
+ 94, // state 139 [stelem.ref]
+ DEFAULT_WEIGHT_VALUE, // state 140 [ldelem]
+ DEFAULT_WEIGHT_VALUE, // state 141 [stelem]
+ 274, // state 142 [unbox.any]
+ DEFAULT_WEIGHT_VALUE, // state 143 [conv.ovf.i1]
+ DEFAULT_WEIGHT_VALUE, // state 144 [conv.ovf.u1]
+ DEFAULT_WEIGHT_VALUE, // state 145 [conv.ovf.i2]
+ DEFAULT_WEIGHT_VALUE, // state 146 [conv.ovf.u2]
+ 242, // state 147 [conv.ovf.i4]
+ DEFAULT_WEIGHT_VALUE, // state 148 [conv.ovf.u4]
+ 293, // state 149 [conv.ovf.i8]
+ 293, // state 150 [conv.ovf.u8]
+ DEFAULT_WEIGHT_VALUE, // state 151 [refanyval]
+ DEFAULT_WEIGHT_VALUE, // state 152 [ckfinite]
+ -17, // state 153 [mkrefany]
+ 32, // state 154 [ldtoken]
+ 25, // state 155 [conv.u2]
+ 50, // state 156 [conv.u1]
+ -0, // state 157 [conv.i]
+ 178, // state 158 [conv.ovf.i]
+ DEFAULT_WEIGHT_VALUE, // state 159 [conv.ovf.u]
+ DEFAULT_WEIGHT_VALUE, // state 160 [add.ovf]
+ DEFAULT_WEIGHT_VALUE, // state 161 [mul.ovf]
+ DEFAULT_WEIGHT_VALUE, // state 162 [sub.ovf]
+ -17, // state 163 [leave.s]
+ 182, // state 164 [stind.i]
+ -36, // state 165 [conv.u]
+ DEFAULT_WEIGHT_VALUE, // state 166 [prefix.n]
+ 120, // state 167 [arglist]
+ 20, // state 168 [ceq]
+ -1, // state 169 [cgt]
+ 47, // state 170 [cgt.un]
+ 26, // state 171 [clt]
+ 85, // state 172 [clt.un]
+ 102, // state 173 [ldftn]
+ 234, // state 174 [ldvirtftn]
+ DEFAULT_WEIGHT_VALUE, // state 175 [long.loc.arg]
+ 347, // state 176 [localloc]
+ DEFAULT_WEIGHT_VALUE, // state 177 [unaligned]
+ -44, // state 178 [volatile]
+ DEFAULT_WEIGHT_VALUE, // state 179 [tailcall]
+ 55, // state 180 [initobj]
+ DEFAULT_WEIGHT_VALUE, // state 181 [constrained]
+ DEFAULT_WEIGHT_VALUE, // state 182 [cpblk]
+ DEFAULT_WEIGHT_VALUE, // state 183 [initblk]
+ DEFAULT_WEIGHT_VALUE, // state 184 [rethrow]
+ 38, // state 185 [sizeof]
+ -68, // state 186 [refanytype]
+ DEFAULT_WEIGHT_VALUE, // state 187 [readonly]
+ 55, // state 188 [ldarga.s.normed]
+ 35, // state 189 [ldloca.s.normed]
+ 161, // state 190 [constrained -> callvirt]
+ 31, // state 191 [ldarg.0 -> ldfld]
+ 29, // state 192 [ldarg.1 -> ldfld]
+ 22, // state 193 [ldarg.2 -> ldfld]
+ 321, // state 194 [ldarg.3 -> ldfld]
+ 46, // state 195 [ldarga.s -> ldfld]
+ 8, // state 196 [ldloca.s -> ldfld]
+ 19, // state 197 [ldarga.s.normed -> ldfld]
+ -35, // state 198 [ldloca.s.normed -> ldfld]
+ 20, // state 199 [stloc.0 -> ldloc.0]
+ -7, // state 200 [stloc.1 -> ldloc.1]
+ -10, // state 201 [stloc.2 -> ldloc.2]
+ -4, // state 202 [stloc.3 -> ldloc.3]
+ DEFAULT_WEIGHT_VALUE, // state 203 [ldc.r4 -> add]
+ DEFAULT_WEIGHT_VALUE, // state 204 [ldc.r4 -> sub]
+ DEFAULT_WEIGHT_VALUE, // state 205 [ldc.r4 -> mul]
+ DEFAULT_WEIGHT_VALUE, // state 206 [ldc.r4 -> div]
+ 52, // state 207 [ldc.r8 -> add]
+ DEFAULT_WEIGHT_VALUE, // state 208 [ldc.r8 -> sub]
+ -169, // state 209 [ldc.r8 -> mul]
+ -17, // state 210 [ldc.r8 -> div]
+ DEFAULT_WEIGHT_VALUE, // state 211 [conv.r4 -> add]
+ DEFAULT_WEIGHT_VALUE, // state 212 [conv.r4 -> sub]
+ DEFAULT_WEIGHT_VALUE, // state 213 [conv.r4 -> mul]
+ DEFAULT_WEIGHT_VALUE, // state 214 [conv.r4 -> div]
+ 358, // state 215 [conv.r8 -> mul]
+ DEFAULT_WEIGHT_VALUE, // state 216 [conv.r8 -> div]
+ NA, // state 217
+ 32, // state 218 [ldarg.0 -> ldc.i4.0 -> stfld]
+ NA, // state 219
+ DEFAULT_WEIGHT_VALUE, // state 220 [ldarg.0 -> ldc.r4 -> stfld]
+ NA, // state 221
+ 38, // state 222 [ldarg.0 -> ldc.r8 -> stfld]
+ NA, // state 223
+ NA, // state 224
+ 64, // state 225 [ldarg.0 -> ldarg.1 -> ldfld -> stfld]
+ 69, // state 226 [ldarg.0 -> ldarg.1 -> stfld]
+ NA, // state 227
+ 98, // state 228 [ldarg.0 -> ldarg.2 -> stfld]
+ NA, // state 229
+ 97, // state 230 [ldarg.0 -> ldarg.3 -> stfld]
+ NA, // state 231
+ NA, // state 232
+ NA, // state 233
+ NA, // state 234
+ 34, // state 235 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> add -> stfld]
+ NA, // state 236
+ -10, // state 237 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> sub -> stfld]
+ NA, // state 238
+ DEFAULT_WEIGHT_VALUE, // state 239 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> mul -> stfld]
+ NA, // state 240
+ DEFAULT_WEIGHT_VALUE, // state 241 [ldarg.0 -> dup -> ldfld -> ldarg.1 -> div -> stfld]
+ NA, // state 242
+ NA, // state 243
+ DEFAULT_WEIGHT_VALUE, // state 244 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> add]
+ DEFAULT_WEIGHT_VALUE, // state 245 [ldarg.0 -> ldfld -> ldarg.1 -> ldfld -> sub]
+ NA, // state 246
+ NA, // state 247
+ DEFAULT_WEIGHT_VALUE, // state 248 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> add]
+ DEFAULT_WEIGHT_VALUE, // state 249 [ldarga.s -> ldfld -> ldarga.s -> ldfld -> sub]
};
-static_assert_no_msg(NUM_SM_STATES == sizeof(g_StateWeights)/sizeof(g_StateWeights[0]));
-
-const short * gp_StateWeights = g_StateWeights;
+static_assert_no_msg(NUM_SM_STATES == sizeof(g_StateWeights) / sizeof(g_StateWeights[0]));
+const short* gp_StateWeights = g_StateWeights;
diff --git a/src/jit/ssabuilder.cpp b/src/jit/ssabuilder.cpp
index 9d7762ee8f..3cdf659cce 100644
--- a/src/jit/ssabuilder.cpp
+++ b/src/jit/ssabuilder.cpp
@@ -40,7 +40,7 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
{
visited[block->bbNum] = true;
- ArrayStack<BasicBlock *> blocks(comp);
+ ArrayStack<BasicBlock*> blocks(comp);
ArrayStack<AllSuccessorIter> iterators(comp);
ArrayStack<AllSuccessorIter> ends(comp);
@@ -57,7 +57,7 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
block = blocks.Top();
#ifdef DEBUG
- if (comp->verboseSsa)
+ if (comp->verboseSsa)
{
printf("[SsaBuilder::TopologicalSortHelper] Visiting BB%02u: ", block->bbNum);
printf("[");
@@ -67,7 +67,7 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
printf("BB%02u, ", block->GetSucc(i, comp)->bbNum);
}
EHSuccessorIter end = block->GetEHSuccs(comp).end();
- for (EHSuccessorIter ehsi = block->GetEHSuccs(comp).begin(); ehsi != end; ++ehsi)
+ for (EHSuccessorIter ehsi = block->GetEHSuccs(comp).begin(); ehsi != end; ++ehsi)
{
printf("[EH]BB%02u, ", (*ehsi)->bbNum);
}
@@ -79,7 +79,7 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
{
// if the block on TOS still has unreached successors, visit them
AllSuccessorIter& iter = iterators.TopRef();
- BasicBlock* succ = *iter;
+ BasicBlock* succ = *iter;
++iter;
// push the child
@@ -98,7 +98,7 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
iterators.Pop();
ends.Pop();
- postOrder[*count] = block;
+ postOrder[*count] = block;
block->bbPostOrderNum = *count;
*count += 1;
@@ -109,8 +109,8 @@ static void TopologicalSortHelper(BasicBlock* block, Compiler* comp, bool* visit
/**
* Method that finds a common IDom parent, much like least common ancestor.
- *
- * @param finger1 A basic block that might share IDom ancestor with finger2.
+ *
+ * @param finger1 A basic block that might share IDom ancestor with finger2.
* @param finger2 A basic block that might share IDom ancestor with finger1.
*
* @see "A simple, fast dominance algorithm" by Keith D. Cooper, Timothy J. Harvey, Ken Kennedy.
@@ -126,13 +126,19 @@ static inline BasicBlock* IntersectDom(BasicBlock* finger1, BasicBlock* finger2)
{
while (finger1 != finger2)
{
- if (finger1 == NULL || finger2 == NULL) return NULL;
- while (finger1 != NULL && finger1->bbPostOrderNum < finger2->bbPostOrderNum)
+ if (finger1 == nullptr || finger2 == nullptr)
+ {
+ return nullptr;
+ }
+ while (finger1 != nullptr && finger1->bbPostOrderNum < finger2->bbPostOrderNum)
{
finger1 = finger1->bbIDom;
}
- if (finger1 == NULL) return NULL;
- while (finger2 != NULL && finger2->bbPostOrderNum < finger1->bbPostOrderNum)
+ if (finger1 == nullptr)
+ {
+ return nullptr;
+ }
+ while (finger2 != nullptr && finger2->bbPostOrderNum < finger1->bbPostOrderNum)
{
finger2 = finger2->bbIDom;
}
@@ -152,7 +158,9 @@ void Compiler::fgSsaBuild()
// If this is not the first invocation, reset data structures for SSA.
if (fgSsaPassesCompleted > 0)
+ {
fgResetForSsa();
+ }
SsaBuilder builder(this, pIAllocator);
builder.Build();
@@ -165,7 +173,7 @@ void Compiler::fgSsaBuild()
if (verbose)
{
JITDUMP("\nAfter fgSsaBuild:\n");
- fgDispBasicBlocks(/*dumpTrees*/true);
+ fgDispBasicBlocks(/*dumpTrees*/ true);
}
#endif // DEBUG
}
@@ -174,7 +182,7 @@ void Compiler::fgResetForSsa()
{
for (unsigned i = 0; i < lvaCount; ++i)
{
- lvaTable[i].lvPerSsaData.Reset();
+ lvaTable[i].lvPerSsaData.Reset();
}
for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
{
@@ -185,7 +193,9 @@ void Compiler::fgResetForSsa()
GenTreePtr last = blk->bbTreeList->gtPrev;
blk->bbTreeList = blk->FirstNonPhiDef();
if (blk->bbTreeList != nullptr)
+ {
blk->bbTreeList->gtPrev = last;
+ }
}
}
}
@@ -223,7 +233,7 @@ SsaBuilder::SsaBuilder(Compiler* pCompiler, IAllocator* pIAllocator)
int SsaBuilder::TopologicalSort(BasicBlock** postOrder, int count)
{
// Allocate and initialize visited flags.
- bool* visited = (bool*) alloca(count * sizeof(bool));
+ bool* visited = (bool*)alloca(count * sizeof(bool));
memset(visited, 0, count * sizeof(bool));
// Display basic blocks.
@@ -254,14 +264,14 @@ void SsaBuilder::ComputeImmediateDom(BasicBlock** postOrder, int count)
// TODO-Cleanup: We currently have two dominance computations happening. We should unify them; for
// now, at least forget the results of the first.
- for (BasicBlock* blk = m_pCompiler->fgFirstBB; blk != NULL; blk = blk->bbNext)
+ for (BasicBlock* blk = m_pCompiler->fgFirstBB; blk != nullptr; blk = blk->bbNext)
{
- blk->bbIDom = NULL;
+ blk->bbIDom = nullptr;
}
// Add entry point to processed as its IDom is NULL.
BitVecTraits traits(m_pCompiler->fgBBNumMax + 1, m_pCompiler);
- BitVec BITVEC_INIT_NOCOPY(processed, BitVecOps::MakeEmpty(&traits));
+ BitVec BITVEC_INIT_NOCOPY(processed, BitVecOps::MakeEmpty(&traits));
BitVecOps::AddElemD(&traits, processed, m_pCompiler->fgFirstBB->bbNum);
assert(postOrder[count - 1] == m_pCompiler->fgFirstBB);
@@ -279,7 +289,7 @@ void SsaBuilder::ComputeImmediateDom(BasicBlock** postOrder, int count)
DBG_SSA_JITDUMP("Visiting in reverse post order: BB%02u.\n", block->bbNum);
// Find the first processed predecessor block.
- BasicBlock* predBlock = NULL;
+ BasicBlock* predBlock = nullptr;
for (flowList* pred = m_pCompiler->BlockPredsWithEH(block); pred; pred = pred->flNext)
{
if (BitVecOps::IsMember(&traits, processed, pred->flBlock->bbNum))
@@ -288,9 +298,9 @@ void SsaBuilder::ComputeImmediateDom(BasicBlock** postOrder, int count)
break;
}
}
-
+
// There could just be a single basic block, so just check if there were any preds.
- if (predBlock != NULL)
+ if (predBlock != nullptr)
{
DBG_SSA_JITDUMP("Pred block is BB%02u.\n", predBlock->bbNum);
}
@@ -308,7 +318,10 @@ void SsaBuilder::ComputeImmediateDom(BasicBlock** postOrder, int count)
// been computed. But that's OK -- if they're in a cycle, they share the same immediate
// dominator, so the contribution of "pred->flBlock" is not necessary to compute
// the result.
- if (domAncestor != NULL) bbIDom = domAncestor;
+ if (domAncestor != nullptr)
+ {
+ bbIDom = domAncestor;
+ }
}
}
@@ -381,7 +394,7 @@ void SsaBuilder::ConstructDomTreeForBlock(Compiler* pCompiler, BasicBlock* block
BasicBlock* bbIDom = block->bbIDom;
// bbIDom for (only) fgFirstBB will be NULL.
- if (bbIDom == NULL)
+ if (bbIDom == nullptr)
{
return;
}
@@ -414,7 +427,7 @@ void SsaBuilder::ComputeDominators(Compiler* pCompiler, BlkToBlkSetMap* domTree)
JITDUMP("*************** In SsaBuilder::ComputeDominators(Compiler*, ...)\n");
// Construct the DOM tree from bbIDom
- for (BasicBlock* block = pCompiler->fgFirstBB; block != NULL; block = block->bbNext)
+ for (BasicBlock* block = pCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
{
ConstructDomTreeForBlock(pCompiler, block, domTree);
}
@@ -449,11 +462,11 @@ void SsaBuilder::ComputeDominators(BasicBlock** postOrder, int count, BlkToBlkSe
// Allocate space for constant time computation of (a DOM b?) query.
unsigned bbArrSize = m_pCompiler->fgBBNumMax + 1; // We will use 1-based bbNums as indices into these arrays, so
// add 1.
- m_pDomPreOrder = jitstd::utility::allocate<int>(m_allocator, bbArrSize);
+ m_pDomPreOrder = jitstd::utility::allocate<int>(m_allocator, bbArrSize);
m_pDomPostOrder = jitstd::utility::allocate<int>(m_allocator, bbArrSize);
// Initial counters.
- int preIndex = 0;
+ int preIndex = 0;
int postIndex = 0;
// Populate the pre and post order of the tree.
@@ -513,15 +526,15 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
// In that definition, we're considering "block" to be B3, and trying
// to find B1's. To do so, first we consider the predecessors of "block",
// searching for candidate B2's -- "block" is obviously an immediate successor
- // of its immediate predecessors. If there are zero or one preds, then there
+ // of its immediate predecessors. If there are zero or one preds, then there
// is no pred, or else the single pred dominates "block", so no B2 exists.
flowList* blockPreds = m_pCompiler->BlockPredsWithEH(block);
// If block has more 0/1 predecessor, skip.
- if (blockPreds == NULL || blockPreds->flNext == NULL)
+ if (blockPreds == nullptr || blockPreds->flNext == nullptr)
{
- DBG_SSA_JITDUMP(" Has %d preds; skipping.\n", blockPreds == NULL ? 0 : 1);
+ DBG_SSA_JITDUMP(" Has %d preds; skipping.\n", blockPreds == nullptr ? 0 : 1);
continue;
}
@@ -540,9 +553,8 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
// Along this way, make "block"/B3 part of the dom frontier of the B1.
// When we reach this immediate dominator, the definition no longer applies, since this
// potential B1 *does* dominate "block"/B3, so we stop.
- for (BasicBlock* b1 = pred->flBlock;
- (b1 != NULL) && (b1 != block->bbIDom); // !root && !loop
- b1 = b1->bbIDom)
+ for (BasicBlock* b1 = pred->flBlock; (b1 != nullptr) && (b1 != block->bbIDom); // !root && !loop
+ b1 = b1->bbIDom)
{
DBG_SSA_JITDUMP(" Adding BB%02u to dom frontier of pred dom BB%02u.\n", block->bbNum, b1->bbNum);
BlkSet* pBlkSet;
@@ -565,7 +577,7 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
BasicBlock* block = postOrder[i];
printf("Block BB%02u := {", block->bbNum);
- bool first = true;
+ bool first = true;
BlkSet* blkDf;
if (frontier->Lookup(block, &blkDf))
{
@@ -584,11 +596,11 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
}
#endif
- // Now do the closure operation to make the dominance frontier into an IDF.
+ // Now do the closure operation to make the dominance frontier into an IDF.
// There's probably a better way to do this...
BlkToBlkSetMap* idf = new (m_pCompiler->getAllocator()) BlkToBlkSetMap(m_pCompiler->getAllocator());
- for (BlkToBlkSetMap::KeyIterator kiFrontBlks = frontier->Begin();
- !kiFrontBlks.Equal(frontier->End()); kiFrontBlks++)
+ for (BlkToBlkSetMap::KeyIterator kiFrontBlks = frontier->Begin(); !kiFrontBlks.Equal(frontier->End());
+ kiFrontBlks++)
{
// Create IDF(b)
BlkSet* blkIdf = new (m_pCompiler->getAllocator()) BlkSet(m_pCompiler->getAllocator());
@@ -602,8 +614,8 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
while (delta->GetCount() > 0)
{
// Extract a block x to be worked on.
- BlkSet::KeyIterator ki = delta->Begin();
- BasicBlock* curBlk = ki.Get();
+ BlkSet::KeyIterator ki = delta->Begin();
+ BasicBlock* curBlk = ki.Get();
// TODO-Cleanup: Remove(ki) doesn't work correctly in SimplerHash.
delta->Remove(curBlk);
@@ -633,7 +645,7 @@ BlkToBlkSetMap* SsaBuilder::ComputeIteratedDominanceFrontier(BasicBlock** postOr
BasicBlock* block = postOrder[i];
printf("Block BB%02u := {", block->bbNum);
- bool first = true;
+ bool first = true;
BlkSet* blkIdf;
if (idf->Lookup(block, &blkIdf))
{
@@ -670,7 +682,10 @@ static GenTree* GetPhiNode(BasicBlock* block, unsigned lclNum)
{
// A prefix of the statements of the block are phi definition nodes. If we complete processing
// that prefix, exit.
- if (!stmt->IsPhiDefnStmt()) break;
+ if (!stmt->IsPhiDefnStmt())
+ {
+ break;
+ }
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
@@ -681,7 +696,7 @@ static GenTree* GetPhiNode(BasicBlock* block, unsigned lclNum)
return tree->gtOp.gtOp2;
}
}
- return NULL;
+ return nullptr;
}
/**
@@ -702,7 +717,7 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
m_pCompiler->fgLocalVarLiveness();
EndPhase(PHASE_BUILD_SSA_LIVENESS);
- // Compute dominance frontier.
+ // Compute dominance frontier.
BlkToBlkSetMap* frontier = ComputeIteratedDominanceFrontier(postOrder, count);
EndPhase(PHASE_BUILD_SSA_IDF);
@@ -733,12 +748,12 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
continue;
}
-
// For each block "bbInDomFront" that is in the dominance frontier of "block"...
for (BlkSet::KeyIterator iterBlk = blkIdf->Begin(); !iterBlk.Equal(blkIdf->End()); ++iterBlk)
{
BasicBlock* bbInDomFront = iterBlk.Get();
- DBG_SSA_JITDUMP(" Considering BB%02u in dom frontier of BB%02u:\n", bbInDomFront->bbNum, block->bbNum);
+ DBG_SSA_JITDUMP(" Considering BB%02u in dom frontier of BB%02u:\n", bbInDomFront->bbNum,
+ block->bbNum);
// Check if variable "lclNum" is live in block "*iterBlk".
if (!VarSetOps::IsMember(m_pCompiler, bbInDomFront->bbLiveIn, varIndex))
@@ -747,20 +762,21 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
}
// Check if we've already inserted a phi node.
- if (GetPhiNode(bbInDomFront, lclNum) == NULL)
+ if (GetPhiNode(bbInDomFront, lclNum) == nullptr)
{
// We have a variable i that is defined in block j and live at l, and l belongs to dom frontier of
// j. So insert a phi node at l.
JITDUMP("Inserting phi definition for V%02u at start of BB%02u.\n", lclNum, bbInDomFront->bbNum);
- GenTreePtr phiLhs = m_pCompiler->gtNewLclvNode(lclNum, m_pCompiler->lvaTable[lclNum].TypeGet());
+ GenTreePtr phiLhs = m_pCompiler->gtNewLclvNode(lclNum, m_pCompiler->lvaTable[lclNum].TypeGet());
// Create 'phiRhs' as a GT_PHI node for 'lclNum', it will eventually hold a GT_LIST of GT_PHI_ARG
// nodes. However we have to construct this list so for now the gtOp1 of 'phiRhs' is a nullptr.
// It will get replaced with a GT_LIST of GT_PHI_ARG nodes in
// SsaBuilder::AssignPhiNodeRhsVariables() and in SsaBuilder::AddDefToHandlerPhis()
- GenTreePtr phiRhs = m_pCompiler->gtNewOperNode(GT_PHI, m_pCompiler->lvaTable[lclNum].TypeGet(), nullptr);
+ GenTreePtr phiRhs =
+ m_pCompiler->gtNewOperNode(GT_PHI, m_pCompiler->lvaTable[lclNum].TypeGet(), nullptr);
GenTreePtr phiAsg = m_pCompiler->gtNewAssignNode(phiLhs, phiRhs);
@@ -778,14 +794,17 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
for (BlkSet::KeyIterator iterBlk = blkIdf->Begin(); !iterBlk.Equal(blkIdf->End()); ++iterBlk)
{
BasicBlock* bbInDomFront = iterBlk.Get();
- DBG_SSA_JITDUMP(" Considering BB%02u in dom frontier of BB%02u for Heap phis:\n", bbInDomFront->bbNum, block->bbNum);
+ DBG_SSA_JITDUMP(" Considering BB%02u in dom frontier of BB%02u for Heap phis:\n",
+ bbInDomFront->bbNum, block->bbNum);
// Check if Heap is live into block "*iterBlk".
- if (!bbInDomFront->bbHeapLiveIn)
+ if (!bbInDomFront->bbHeapLiveIn)
+ {
continue;
+ }
// Check if we've already inserted a phi node.
- if (bbInDomFront->bbHeapSsaPhiFunc == NULL)
+ if (bbInDomFront->bbHeapSsaPhiFunc == nullptr)
{
// We have a variable i that is defined in block j and live at l, and l belongs to dom frontier of
// j. So insert a phi node at l.
@@ -811,7 +830,7 @@ void SsaBuilder::InsertPhiFunctions(BasicBlock** postOrder, int count)
void SsaBuilder::AddUsePoint(GenTree* tree)
{
assert(tree->IsLocal());
- SsaVarName key(tree->gtLclVarCommon.gtLclNum, tree->gtLclVarCommon.gtSsaNum);
+ SsaVarName key(tree->gtLclVarCommon.gtLclNum, tree->gtLclVarCommon.gtSsaNum);
VarToUses::iterator iter = m_uses.find(key);
if (iter == m_uses.end())
{
@@ -840,14 +859,14 @@ void SsaBuilder::AddDefPoint(GenTree* tree, BasicBlock* blk)
unsigned defSsaNum;
if (tree->IsLocal())
{
- lclNum = tree->gtLclVarCommon.gtLclNum;
+ lclNum = tree->gtLclVarCommon.gtLclNum;
defSsaNum = m_pCompiler->GetSsaNumForLocalVarDef(tree);
}
else
{
bool b = m_pCompiler->GetIndirAssignMap()->Lookup(tree, &pIndirAnnot);
assert(b);
- lclNum = pIndirAnnot->m_lclNum;
+ lclNum = pIndirAnnot->m_lclNum;
defSsaNum = pIndirAnnot->m_defSsaNum;
}
#ifdef DEBUG
@@ -855,12 +874,12 @@ void SsaBuilder::AddDefPoint(GenTree* tree, BasicBlock* blk)
m_pCompiler->lvaTable[lclNum].lvNumSsaNames++;
#endif
// Record where the defn happens.
- LclSsaVarDsc* ssaDef = m_pCompiler->lvaTable[lclNum].GetPerSsaData(defSsaNum);
- ssaDef->m_defLoc.m_blk = blk;
+ LclSsaVarDsc* ssaDef = m_pCompiler->lvaTable[lclNum].GetPerSsaData(defSsaNum);
+ ssaDef->m_defLoc.m_blk = blk;
ssaDef->m_defLoc.m_tree = tree;
#ifdef SSA_FEATURE_USEDEF
- SsaVarName key(lclNum, defSsaNum);
+ SsaVarName key(lclNum, defSsaNum);
VarToDef::iterator iter = m_defs.find(key);
if (iter == m_defs.end())
{
@@ -874,7 +893,8 @@ void SsaBuilder::AddDefPoint(GenTree* tree, BasicBlock* blk)
bool SsaBuilder::IsIndirectAssign(GenTreePtr tree, Compiler::IndirectAssignmentAnnotation** ppIndirAssign)
{
- return tree->OperGet() == GT_ASG && m_pCompiler->m_indirAssignMap != NULL && m_pCompiler->GetIndirAssignMap()->Lookup(tree, ppIndirAssign);
+ return tree->OperGet() == GT_ASG && m_pCompiler->m_indirAssignMap != nullptr &&
+ m_pCompiler->GetIndirAssignMap()->Lookup(tree, ppIndirAssign);
}
/**
@@ -895,8 +915,8 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
// can skip these during (at least) value numbering.
if (tree->OperIsAssignment())
{
- GenTreePtr lhs = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/true);
- GenTreePtr trueLhs = lhs->gtEffectiveVal(/*commaOnly*/true);
+ GenTreePtr lhs = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
+ GenTreePtr trueLhs = lhs->gtEffectiveVal(/*commaOnly*/ true);
if (trueLhs->OperGet() == GT_IND)
{
trueLhs->gtFlags |= GTF_IND_ASG_LHS;
@@ -910,8 +930,7 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
// Figure out if "tree" may make a new heap state (if we care for this block).
if (!block->bbHeapHavoc)
{
- if (tree->OperIsAssignment() ||
- tree->OperIsBlkOp())
+ if (tree->OperIsAssignment() || tree->OperIsBlkOp())
{
if (m_pCompiler->ehBlockHasExnFlowDsc(block))
{
@@ -923,7 +942,7 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
pRenameState->PushHeap(block, count);
m_pCompiler->GetHeapSsaMap()->Set(tree, count);
#ifdef DEBUG
- if (JitTls::GetCompiler()->verboseSsa)
+ if (JitTls::GetCompiler()->verboseSsa)
{
printf("Node ");
Compiler::printTreeID(tree);
@@ -938,13 +957,13 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
}
}
- Compiler::IndirectAssignmentAnnotation* pIndirAssign = NULL;
+ Compiler::IndirectAssignmentAnnotation* pIndirAssign = nullptr;
if (!tree->IsLocal() && !IsIndirectAssign(tree, &pIndirAssign))
{
return;
}
- if (pIndirAssign != NULL)
+ if (pIndirAssign != nullptr)
{
unsigned lclNum = pIndirAssign->m_lclNum;
// Is this a variable we exclude from SSA?
@@ -958,7 +977,7 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
{
pIndirAssign->m_useSsaNum = pRenameState->CountForUse(lclNum);
}
- unsigned count = pRenameState->CountForDef(lclNum);
+ unsigned count = pRenameState->CountForDef(lclNum);
pIndirAssign->m_defSsaNum = count;
pRenameState->Push(block, lclNum, count);
AddDefPoint(tree, block);
@@ -972,7 +991,7 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
tree->gtLclVarCommon.SetSsaNum(SsaConfig::RESERVED_SSA_NUM);
return;
}
-
+
if (tree->gtFlags & GTF_VAR_DEF)
{
if (tree->gtFlags & GTF_VAR_USEASG)
@@ -1004,7 +1023,9 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
// handlers for try blocks that "block" is within. (But only do this for "real" definitions,
// not phi definitions.)
if (!isPhiDefn)
- AddDefToHandlerPhis(block, lclNum, count);
+ {
+ AddDefToHandlerPhis(block, lclNum, count);
+ }
}
else if (!isPhiDefn) // Phi args already have ssa numbers.
{
@@ -1038,13 +1059,15 @@ void SsaBuilder::TreeRenameVariables(GenTree* tree, BasicBlock* block, SsaRename
void SsaBuilder::AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigned count)
{
- assert(m_pCompiler->lvaTable[lclNum].lvTracked); // Precondition.
+ assert(m_pCompiler->lvaTable[lclNum].lvTracked); // Precondition.
unsigned lclIndex = m_pCompiler->lvaTable[lclNum].lvVarIndex;
EHblkDsc* tryBlk = m_pCompiler->ehGetBlockExnFlowDsc(block);
if (tryBlk != nullptr)
{
- DBG_SSA_JITDUMP("Definition of local V%02u/d:%d in block BB%02u has exn handler; adding as phi arg to handlers.\n", lclNum, count, block->bbNum);
+ DBG_SSA_JITDUMP(
+ "Definition of local V%02u/d:%d in block BB%02u has exn handler; adding as phi arg to handlers.\n", lclNum,
+ count, block->bbNum);
while (true)
{
BasicBlock* handler = tryBlk->ExFlowBlock();
@@ -1059,7 +1082,10 @@ void SsaBuilder::AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigne
for (GenTreePtr stmt = handler->bbTreeList; stmt; stmt = stmt->gtNext)
{
// If the tree is not an SSA def, break out of the loop: we're done.
- if (!stmt->IsPhiDefnStmt()) break;
+ if (!stmt->IsPhiDefnStmt())
+ {
+ break;
+ }
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
@@ -1068,28 +1094,32 @@ void SsaBuilder::AddDefToHandlerPhis(BasicBlock* block, unsigned lclNum, unsigne
if (tree->gtOp.gtOp1->gtLclVar.gtLclNum == lclNum)
{
// It's the definition for the right local. Add "count" to the RHS.
- GenTreePtr phi = tree->gtOp.gtOp2;
- GenTreeArgList* args = NULL;
- if (phi->gtOp.gtOp1 != NULL) args = phi->gtOp.gtOp1->AsArgList();
+ GenTreePtr phi = tree->gtOp.gtOp2;
+ GenTreeArgList* args = nullptr;
+ if (phi->gtOp.gtOp1 != nullptr)
+ {
+ args = phi->gtOp.gtOp1->AsArgList();
+ }
#ifdef DEBUG
// Make sure it isn't already present: we should only add each definition once.
- for (GenTreeArgList* curArgs = args; curArgs != NULL; curArgs = curArgs->Rest())
+ for (GenTreeArgList* curArgs = args; curArgs != nullptr; curArgs = curArgs->Rest())
{
GenTreePhiArg* phiArg = curArgs->Current()->AsPhiArg();
assert(phiArg->gtSsaNum != count);
}
#endif
- var_types typ = m_pCompiler->lvaTable[lclNum].TypeGet();
- GenTreePhiArg* newPhiArg =
+ var_types typ = m_pCompiler->lvaTable[lclNum].TypeGet();
+ GenTreePhiArg* newPhiArg =
new (m_pCompiler, GT_PHI_ARG) GenTreePhiArg(typ, lclNum, count, block);
- phi->gtOp.gtOp1 = new (m_pCompiler, GT_LIST) GenTreeArgList(newPhiArg, args );
+ phi->gtOp.gtOp1 = new (m_pCompiler, GT_LIST) GenTreeArgList(newPhiArg, args);
m_pCompiler->gtSetStmtInfo(stmt);
m_pCompiler->fgSetStmtSeq(stmt);
#ifdef DEBUG
phiFound = true;
#endif
- DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u to phi defn in handler block BB%02u.\n", count, lclNum, handler->bbNum);
+ DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u to phi defn in handler block BB%02u.\n", count,
+ lclNum, handler->bbNum);
break;
}
}
@@ -1112,13 +1142,14 @@ void SsaBuilder::AddHeapDefToHandlerPhis(BasicBlock* block, unsigned count)
if (m_pCompiler->ehBlockHasExnFlowDsc(block))
{
// Don't do anything for a compiler-inserted BBJ_ALWAYS that is a "leave helper".
- if ( block->bbJumpKind == BBJ_ALWAYS
- && (block->bbFlags & BBF_INTERNAL)
- && (block->bbPrev->isBBCallAlwaysPair()))
+ if (block->bbJumpKind == BBJ_ALWAYS && (block->bbFlags & BBF_INTERNAL) && (block->bbPrev->isBBCallAlwaysPair()))
+ {
return;
+ }
// Otherwise...
- DBG_SSA_JITDUMP("Definition of Heap/d:%d in block BB%02u has exn handler; adding as phi arg to handlers.\n", count, block->bbNum);
+ DBG_SSA_JITDUMP("Definition of Heap/d:%d in block BB%02u has exn handler; adding as phi arg to handlers.\n",
+ count, block->bbNum);
EHblkDsc* tryBlk = m_pCompiler->ehGetBlockExnFlowDsc(block);
while (true)
{
@@ -1127,8 +1158,8 @@ void SsaBuilder::AddHeapDefToHandlerPhis(BasicBlock* block, unsigned count)
// Is Heap live on entry to the handler?
if (handler->bbHeapLiveIn)
{
- assert(handler->bbHeapSsaPhiFunc != NULL);
-
+ assert(handler->bbHeapSsaPhiFunc != nullptr);
+
// Add "count" to the phi args of Heap.
if (handler->bbHeapSsaPhiFunc == BasicBlock::EmptyHeapPhiDef)
{
@@ -1138,16 +1169,18 @@ void SsaBuilder::AddHeapDefToHandlerPhis(BasicBlock* block, unsigned count)
{
#ifdef DEBUG
BasicBlock::HeapPhiArg* curArg = handler->bbHeapSsaPhiFunc;
- while (curArg != NULL)
+ while (curArg != nullptr)
{
assert(curArg->GetSsaNum() != count);
curArg = curArg->m_nextArg;
}
#endif // DEBUG
- handler->bbHeapSsaPhiFunc = new (m_pCompiler) BasicBlock::HeapPhiArg(count, handler->bbHeapSsaPhiFunc);
+ handler->bbHeapSsaPhiFunc =
+ new (m_pCompiler) BasicBlock::HeapPhiArg(count, handler->bbHeapSsaPhiFunc);
}
- DBG_SSA_JITDUMP(" Added phi arg u:%d for Heap to phi defn in handler block BB%02u.\n", count, handler->bbNum);
+ DBG_SSA_JITDUMP(" Added phi arg u:%d for Heap to phi defn in handler block BB%02u.\n", count,
+ handler->bbNum);
}
unsigned tryInd = tryBlk->ebdEnclosingTryIndex;
if (tryInd == EHblkDsc::NO_ENCLOSING_INDEX)
@@ -1174,7 +1207,7 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block, SsaRenameState* pRename
// First handle the incoming Heap state.
// Is there an Phi definition for heap at the start of this block?
- if (block->bbHeapSsaPhiFunc != NULL)
+ if (block->bbHeapSsaPhiFunc != nullptr)
{
unsigned count = pRenameState->CountForHeapDef();
pRenameState->PushHeap(block, count);
@@ -1185,15 +1218,17 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block, SsaRenameState* pRename
// Record the "in" Ssa # for Heap.
block->bbHeapSsaNumIn = pRenameState->CountForHeapUse();
-
// We need to iterate over phi definitions, to give them SSA names, but we need
// to know which are which, so we don't add phi definitions to handler phi arg lists.
// Statements are phi defns until they aren't.
- bool isPhiDefn = true;
+ bool isPhiDefn = true;
GenTreePtr firstNonPhi = block->FirstNonPhiDef();
for (GenTreePtr stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
{
- if (stmt == firstNonPhi) isPhiDefn = false;
+ if (stmt == firstNonPhi)
+ {
+ isPhiDefn = false;
+ }
for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
@@ -1205,7 +1240,7 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block, SsaRenameState* pRename
// If the block defines Heap, allocate an SSA variable for the final heap state in the block.
// (This may be redundant with the last SSA var explicitly created, but there's no harm in that.)
- if (block->bbHeapDef)
+ if (block->bbHeapDef)
{
unsigned count = pRenameState->CountForHeapDef();
pRenameState->PushHeap(block, count);
@@ -1215,8 +1250,8 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block, SsaRenameState* pRename
// Record the "out" Ssa" # for Heap.
block->bbHeapSsaNumOut = pRenameState->CountForHeapUse();
- DBG_SSA_JITDUMP("Ssa # for Heap on entry to BB%02u is %d; on exit is %d.\n",
- block->bbNum, block->bbHeapSsaNumIn, block->bbHeapSsaNumOut);
+ DBG_SSA_JITDUMP("Ssa # for Heap on entry to BB%02u is %d; on exit is %d.\n", block->bbNum, block->bbHeapSsaNumIn,
+ block->bbHeapSsaNumOut);
}
/**
@@ -1230,8 +1265,8 @@ void SsaBuilder::BlockRenameVariables(BasicBlock* block, SsaRenameState* pRename
*/
void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pRenameState)
{
- BasicBlock::AllSuccs allSuccs = block->GetAllSuccs(m_pCompiler);
- AllSuccessorIter allSuccsEnd = allSuccs.end();
+ BasicBlock::AllSuccs allSuccs = block->GetAllSuccs(m_pCompiler);
+ AllSuccessorIter allSuccsEnd = allSuccs.end();
for (AllSuccessorIter allSuccsIter = allSuccs.begin(); allSuccsIter != allSuccsEnd; ++allSuccsIter)
{
BasicBlock* succ = (*allSuccsIter);
@@ -1243,7 +1278,7 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// Get the phi node from GT_ASG.
GenTreePtr phiNode = tree->gtOp.gtOp2;
- assert(phiNode->gtOp.gtOp1 == NULL || phiNode->gtOp.gtOp1->OperGet() == GT_LIST);
+ assert(phiNode->gtOp.gtOp1 == nullptr || phiNode->gtOp.gtOp1->OperGet() == GT_LIST);
unsigned lclNum = tree->gtOp.gtOp1->gtLclVar.gtLclNum;
unsigned ssaNum = pRenameState->CountForUse(lclNum);
@@ -1252,30 +1287,32 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// during renaming for a definition that occurs within a try, and then that's the last
// value of the var within that basic block.)
GenTreeArgList* argList = (phiNode->gtOp.gtOp1 == nullptr ? nullptr : phiNode->gtOp.gtOp1->AsArgList());
- bool found = false;
+ bool found = false;
while (argList != nullptr)
{
if (argList->Current()->AsLclVarCommon()->GetSsaNum() == ssaNum)
{
- found = true;
+ found = true;
break;
}
argList = argList->Rest();
}
if (!found)
{
- GenTreePtr newPhiArg = new (m_pCompiler, GT_PHI_ARG) GenTreePhiArg(tree->gtOp.gtOp1->TypeGet(), lclNum, ssaNum, block);
- argList = (phiNode->gtOp.gtOp1 == nullptr ? nullptr : phiNode->gtOp.gtOp1->AsArgList());
+ GenTreePtr newPhiArg =
+ new (m_pCompiler, GT_PHI_ARG) GenTreePhiArg(tree->gtOp.gtOp1->TypeGet(), lclNum, ssaNum, block);
+ argList = (phiNode->gtOp.gtOp1 == nullptr ? nullptr : phiNode->gtOp.gtOp1->AsArgList());
phiNode->gtOp.gtOp1 = new (m_pCompiler, GT_LIST) GenTreeArgList(newPhiArg, argList);
- DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u from BB%02u in BB%02u.\n", ssaNum, lclNum, block->bbNum, succ->bbNum);
+ DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u from BB%02u in BB%02u.\n", ssaNum, lclNum, block->bbNum,
+ succ->bbNum);
}
-
+
m_pCompiler->gtSetStmtInfo(stmt);
m_pCompiler->fgSetStmtSeq(stmt);
}
// Now handle Heap.
- if (succ->bbHeapSsaPhiFunc != NULL)
+ if (succ->bbHeapSsaPhiFunc != nullptr)
{
if (succ->bbHeapSsaPhiFunc == BasicBlock::EmptyHeapPhiDef)
{
@@ -1284,10 +1321,10 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
else
{
BasicBlock::HeapPhiArg* curArg = succ->bbHeapSsaPhiFunc;
- bool found = false;
+ bool found = false;
// This is a quadratic algorithm. We might need to consider some switch over to a hash table
// representation for the arguments of a phi node, to make this linear.
- while (curArg != NULL)
+ while (curArg != nullptr)
{
if (curArg->m_predBB == block)
{
@@ -1305,7 +1342,7 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
}
// If "succ" is the first block of a try block (and "block" is not also in that try block)
- // then we must look at the vars that have phi defs in the corresponding handler;
+ // then we must look at the vars that have phi defs in the corresponding handler;
// the current SSA name for such vars must be included as an argument to that phi.
if (m_pCompiler->bbIsTryBeg(succ))
{
@@ -1317,9 +1354,8 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// Check if the predecessor "block" is within the same try block.
if (block->hasTryIndex())
{
- for (unsigned blockTryInd = block->getTryIndex();
- blockTryInd != EHblkDsc::NO_ENCLOSING_INDEX;
- blockTryInd = m_pCompiler->ehGetEnclosingTryIndex(blockTryInd))
+ for (unsigned blockTryInd = block->getTryIndex(); blockTryInd != EHblkDsc::NO_ENCLOSING_INDEX;
+ blockTryInd = m_pCompiler->ehGetEnclosingTryIndex(blockTryInd))
{
if (blockTryInd == tryInd)
{
@@ -1331,18 +1367,21 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// The loop just above found that the predecessor "block" is within the same
// try block as "succ." So we don't need to process this try, or any
- // further outer try blocks here, since they would also contain both "succ"
+ // further outer try blocks here, since they would also contain both "succ"
// and "block".
if (tryInd == EHblkDsc::NO_ENCLOSING_INDEX)
+ {
break;
+ }
}
-
EHblkDsc* succTry = m_pCompiler->ehGetDsc(tryInd);
// This is necessarily true on the first iteration, but not
// necessarily on the second and subsequent.
if (succTry->ebdTryBeg != succ)
+ {
break;
+ }
// succ is the first block of this try. Look at phi defs in the handler.
// For a filter, we consider the filter to be the "real" handler.
@@ -1353,24 +1392,28 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
// Check if the first n of the statements are phi nodes. If not, exit.
- if (tree->OperGet() != GT_ASG ||
- tree->gtOp.gtOp2 == NULL || tree->gtOp.gtOp2->OperGet() != GT_PHI)
+ if (tree->OperGet() != GT_ASG || tree->gtOp.gtOp2 == nullptr ||
+ tree->gtOp.gtOp2->OperGet() != GT_PHI)
{
break;
}
// Get the phi node from GT_ASG.
GenTreePtr lclVar = tree->gtOp.gtOp1;
- unsigned lclNum = lclVar->gtLclVar.gtLclNum;
+ unsigned lclNum = lclVar->gtLclVar.gtLclNum;
// If the variable is live-out of "blk", and is therefore live on entry to the try-block-start
// "succ", then we make sure the current SSA name for the
// var is one of the args of the phi node. If not, go on.
LclVarDsc* lclVarDsc = &m_pCompiler->lvaTable[lclNum];
- if (!lclVarDsc->lvTracked || !VarSetOps::IsMember(m_pCompiler, block->bbLiveOut, lclVarDsc->lvVarIndex)) continue;
+ if (!lclVarDsc->lvTracked ||
+ !VarSetOps::IsMember(m_pCompiler, block->bbLiveOut, lclVarDsc->lvVarIndex))
+ {
+ continue;
+ }
GenTreePtr phiNode = tree->gtOp.gtOp2;
- assert(phiNode->gtOp.gtOp1 == NULL || phiNode->gtOp.gtOp1->OperGet() == GT_LIST);
+ assert(phiNode->gtOp.gtOp1 == nullptr || phiNode->gtOp.gtOp1->OperGet() == GT_LIST);
GenTreeArgList* argList = reinterpret_cast<GenTreeArgList*>(phiNode->gtOp.gtOp1);
// What is the current SSAName from the predecessor for this local?
@@ -1378,7 +1421,7 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// See if this ssaNum is already an arg to the phi.
bool alreadyArg = false;
- for (GenTreeArgList* curArgs = argList; curArgs != NULL; curArgs = curArgs->Rest())
+ for (GenTreeArgList* curArgs = argList; curArgs != nullptr; curArgs = curArgs->Rest())
{
if (curArgs->Current()->gtPhiArg.gtSsaNum == ssaNum)
{
@@ -1391,18 +1434,18 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
// Add the new argument.
GenTreePtr newPhiArg =
new (m_pCompiler, GT_PHI_ARG) GenTreePhiArg(lclVar->TypeGet(), lclNum, ssaNum, block);
- phiNode->gtOp.gtOp1 =
- new (m_pCompiler, GT_LIST) GenTreeArgList(newPhiArg, argList );
+ phiNode->gtOp.gtOp1 = new (m_pCompiler, GT_LIST) GenTreeArgList(newPhiArg, argList);
- DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u from BB%02u in BB%02u.\n", ssaNum, lclNum, block->bbNum, handlerStart->bbNum);
+ DBG_SSA_JITDUMP(" Added phi arg u:%d for V%02u from BB%02u in BB%02u.\n", ssaNum, lclNum,
+ block->bbNum, handlerStart->bbNum);
m_pCompiler->gtSetStmtInfo(stmt);
m_pCompiler->fgSetStmtSeq(stmt);
}
}
- // Now handle Heap.
- if (handlerStart->bbHeapSsaPhiFunc != NULL)
+ // Now handle Heap.
+ if (handlerStart->bbHeapSsaPhiFunc != nullptr)
{
if (handlerStart->bbHeapSsaPhiFunc == BasicBlock::EmptyHeapPhiDef)
{
@@ -1412,15 +1455,17 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
{
#ifdef DEBUG
BasicBlock::HeapPhiArg* curArg = handlerStart->bbHeapSsaPhiFunc;
- while (curArg != NULL)
+ while (curArg != nullptr)
{
assert(curArg->m_predBB != block);
curArg = curArg->m_nextArg;
}
#endif // DEBUG
- handlerStart->bbHeapSsaPhiFunc = new (m_pCompiler) BasicBlock::HeapPhiArg(block, handlerStart->bbHeapSsaPhiFunc);
+ handlerStart->bbHeapSsaPhiFunc =
+ new (m_pCompiler) BasicBlock::HeapPhiArg(block, handlerStart->bbHeapSsaPhiFunc);
}
- DBG_SSA_JITDUMP(" Added phi arg for Heap from BB%02u in BB%02u.\n", block->bbNum, handlerStart->bbNum);
+ DBG_SSA_JITDUMP(" Added phi arg for Heap from BB%02u in BB%02u.\n", block->bbNum,
+ handlerStart->bbNum);
}
tryInd = succTry->ebdEnclosingTryIndex;
@@ -1439,7 +1484,7 @@ void SsaBuilder::AssignPhiNodeRhsVariables(BasicBlock* block, SsaRenameState* pR
void SsaBuilder::BlockPopStacks(BasicBlock* block, SsaRenameState* pRenameState)
{
// Pop the names given to the non-phi nodes.
- pRenameState->PopBlockStacks(block);
+ pRenameState->PopBlockStacks(block);
// And for Heap.
pRenameState->PopBlockHeapStack(block);
@@ -1469,16 +1514,17 @@ void SsaBuilder::RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenam
// The first thing we do is treat parameters and must-init variables as if they have a
// virtual definition before entry -- they start out at SSA name 1.
- for (unsigned i = 0; i < m_pCompiler->lvaCount; i++)
+ for (unsigned i = 0; i < m_pCompiler->lvaCount; i++)
{
LclVarDsc* varDsc = &m_pCompiler->lvaTable[i];
#ifdef DEBUG
- varDsc->lvNumSsaNames = SsaConfig::UNINIT_SSA_NUM; // Start off fresh...
+ varDsc->lvNumSsaNames = SsaConfig::UNINIT_SSA_NUM; // Start off fresh...
#endif
if (varDsc->lvIsParam || m_pCompiler->info.compInitMem || varDsc->lvMustInit ||
- (varDsc->lvTracked && VarSetOps::IsMember(m_pCompiler, m_pCompiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex)))
+ (varDsc->lvTracked &&
+ VarSetOps::IsMember(m_pCompiler, m_pCompiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex)))
{
unsigned count = pRenameState->CountForDef(i);
@@ -1487,7 +1533,7 @@ void SsaBuilder::RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenam
#ifdef DEBUG
varDsc->lvNumSsaNames++;
#endif
- pRenameState->Push(NULL, i, count);
+ pRenameState->Push(nullptr, i, count);
}
}
// In ValueNum we'd assume un-inited heap gets FIRST_SSA_NUM.
@@ -1500,9 +1546,9 @@ void SsaBuilder::RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenam
// heap ssa numbers to have some intitial value.
for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext)
{
- if (block->bbIDom == NULL)
+ if (block->bbIDom == nullptr)
{
- block->bbHeapSsaNumIn = initHeapCount;
+ block->bbHeapSsaNumIn = initHeapCount;
block->bbHeapSsaNumOut = initHeapCount;
}
}
@@ -1510,21 +1556,25 @@ void SsaBuilder::RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenam
struct BlockWork
{
BasicBlock* m_blk;
- bool m_processed; // Whether the this block have already been processed: its var renamed, and children processed.
- // If so, awaiting only BlockPopStacks.
- BlockWork(BasicBlock* blk, bool processed = false) : m_blk(blk), m_processed(processed) {}
+ bool m_processed; // Whether the this block have already been processed: its var renamed, and children
+ // processed.
+ // If so, awaiting only BlockPopStacks.
+ BlockWork(BasicBlock* blk, bool processed = false) : m_blk(blk), m_processed(processed)
+ {
+ }
};
typedef jitstd::vector<BlockWork> BlockWorkStack;
- BlockWorkStack* blocksToDo = new (jitstd::utility::allocate<BlockWorkStack>(m_allocator), jitstd::placement_t()) BlockWorkStack(m_allocator);
+ BlockWorkStack* blocksToDo =
+ new (jitstd::utility::allocate<BlockWorkStack>(m_allocator), jitstd::placement_t()) BlockWorkStack(m_allocator);
- blocksToDo->push_back(BlockWork(m_pCompiler->fgFirstBB)); // Probably have to include other roots of dom tree.
+ blocksToDo->push_back(BlockWork(m_pCompiler->fgFirstBB)); // Probably have to include other roots of dom tree.
while (blocksToDo->size() != 0)
{
BlockWork blockWrk = blocksToDo->back();
blocksToDo->pop_back();
BasicBlock* block = blockWrk.m_blk;
-
+
DBG_SSA_JITDUMP("[SsaBuilder::RenameVariables](BB%02u, processed = %d)\n", block->bbNum, blockWrk.m_processed);
if (!blockWrk.m_processed)
@@ -1571,7 +1621,7 @@ void SsaBuilder::RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenam
* N001 ( 1, 1) [0027CB70] ----------- const int 23
* N003 ( 3, 3) [0027CBD8] -A------R-- = int
* N002 ( 1, 1) [0027CBA4] D------N--- lclVar int V01 arg1 d:5
- *
+ *
* After SSA BB04:
* [0027D530] ----------- stmtExpr void (IL ???... ???)
* N002 ( 0, 0) [0027D4C8] ----------- phi int
@@ -1598,7 +1648,7 @@ void SsaBuilder::Print(BasicBlock** postOrder, int count)
*
* Identifies each block's immediate dominator.
* - Computes this in bbIDom of each BasicBlock.
- *
+ *
* Computes DOM tree relation.
* - Computes domTree as block -> set of blocks.
* - Computes pre/post order traversal of the DOM tree.
@@ -1636,7 +1686,7 @@ void SsaBuilder::Build()
JITDUMP("[SsaBuilder] Max block count is %d.\n", blockCount);
// Allocate the postOrder array for the graph.
- BasicBlock** postOrder = (BasicBlock**) alloca(blockCount * sizeof(BasicBlock*));
+ BasicBlock** postOrder = (BasicBlock**)alloca(blockCount * sizeof(BasicBlock*));
// Topologically sort the graph.
int count = TopologicalSort(postOrder, blockCount);
@@ -1655,13 +1705,17 @@ void SsaBuilder::Build()
InsertPhiFunctions(postOrder, count);
// Rename local variables and collect UD information for each ssa var.
- SsaRenameState* pRenameState = new (jitstd::utility::allocate<SsaRenameState>(m_allocator), jitstd::placement_t()) SsaRenameState(m_allocator, m_pCompiler->lvaCount);
+ SsaRenameState* pRenameState = new (jitstd::utility::allocate<SsaRenameState>(m_allocator), jitstd::placement_t())
+ SsaRenameState(m_allocator, m_pCompiler->lvaCount);
RenameVariables(domTree, pRenameState);
EndPhase(PHASE_BUILD_SSA_RENAME);
#ifdef DEBUG
// At this point we are in SSA form. Print the SSA form.
- if (m_pCompiler->verboseSsa) Print(postOrder, count);
+ if (m_pCompiler->verboseSsa)
+ {
+ Print(postOrder, count);
+ }
#endif
}
@@ -1671,11 +1725,12 @@ void SsaBuilder::SetupBBRoot()
// We need a unique block to be the root of the dominator tree.
// This can be violated if the first block is in a try, or if it is the first block of
// a loop (which would necessarily be an infinite loop) -- i.e., it has a predecessor.
-
+
// If neither condition holds, no reason to make a new block.
- if (!m_pCompiler->fgFirstBB->hasTryIndex()
- && m_pCompiler->fgFirstBB->bbPreds == NULL)
+ if (!m_pCompiler->fgFirstBB->hasTryIndex() && m_pCompiler->fgFirstBB->bbPreds == nullptr)
+ {
return;
+ }
BasicBlock* bbRoot = m_pCompiler->bbNewBasicBlock(BBJ_NONE);
bbRoot->bbFlags |= BBF_INTERNAL;
@@ -1712,11 +1767,11 @@ void SsaBuilder::SetupBBRoot()
// This method asserts that SSA name constraints specified are satisfied.
void Compiler::JitTestCheckSSA()
{
- struct SSAName
- {
- unsigned m_lvNum;
+ struct SSAName
+ {
+ unsigned m_lvNum;
unsigned m_ssaNum;
-
+
static unsigned GetHashCode(SSAName ssaNm)
{
return ssaNm.m_lvNum << 16 | ssaNm.m_ssaNum;
@@ -1728,11 +1783,15 @@ void Compiler::JitTestCheckSSA()
}
};
- typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, SSAName, JitSimplerHashBehavior> LabelToSSANameMap;
+ typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, SSAName, JitSimplerHashBehavior>
+ LabelToSSANameMap;
typedef SimplerHashTable<SSAName, SSAName, ssize_t, JitSimplerHashBehavior> SSANameToLabelMap;
// If we have no test data, early out.
- if (m_nodeTestData == NULL) return;
+ if (m_nodeTestData == nullptr)
+ {
+ return;
+ }
NodeToTestDataMap* testData = GetNodeTestData();
@@ -1749,8 +1808,8 @@ void Compiler::JitTestCheckSSA()
for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki)
{
TestLabelAndNum tlAndN;
- GenTreePtr node = ki.Get();
- bool b = testData->Lookup(node, &tlAndN);
+ GenTreePtr node = ki.Get();
+ bool b = testData->Lookup(node, &tlAndN);
assert(b);
if (tlAndN.m_tl == TL_SsaName)
{
@@ -1768,7 +1827,8 @@ void Compiler::JitTestCheckSSA()
{
printf("Node ");
printTreeID(lcl);
- printf(" had a test constraint declared, but has become unreachable at the time the constraint is tested.\n"
+ printf(" had a test constraint declared, but has become unreachable at the time the constraint is "
+ "tested.\n"
"(This is probably as a result of some optimization -- \n"
"you may need to modify the test case to defeat this opt.)\n");
unreached();
@@ -1778,8 +1838,7 @@ void Compiler::JitTestCheckSSA()
{
printf(" Node: ");
printTreeID(lcl);
- printf(", SSA name = <%d, %d> -- SSA name class %d.\n",
- lcl->gtLclNum, lcl->gtSsaNum, tlAndN.m_num);
+ printf(", SSA name = <%d, %d> -- SSA name class %d.\n", lcl->gtLclNum, lcl->gtSsaNum, tlAndN.m_num);
}
SSAName ssaNm;
if (labelToSSA->Lookup(tlAndN.m_num, &ssaNm))
@@ -1790,26 +1849,26 @@ void Compiler::JitTestCheckSSA()
}
// The mapping(s) must be one-to-one: if the label has a mapping, then the ssaNm must, as well.
ssize_t num2;
- bool b = ssaToLabel->Lookup(ssaNm, &num2);
+ bool b = ssaToLabel->Lookup(ssaNm, &num2);
// And the mappings must be the same.
if (tlAndN.m_num != num2)
{
printf("Node: ");
printTreeID(lcl);
- printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n",
- lcl->gtLclNum, lcl->gtSsaNum, tlAndN.m_num);
- printf("but this SSA name <%d,%d> has already been associated with a different SSA name class: %d.\n",
- ssaNm.m_lvNum, ssaNm.m_ssaNum, num2);
+ printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n", lcl->gtLclNum, lcl->gtSsaNum,
+ tlAndN.m_num);
+ printf(
+ "but this SSA name <%d,%d> has already been associated with a different SSA name class: %d.\n",
+ ssaNm.m_lvNum, ssaNm.m_ssaNum, num2);
unreached();
}
// And the current node must be of the specified SSA family.
- if (!(lcl->gtLclNum == ssaNm.m_lvNum
- && lcl->gtSsaNum == ssaNm.m_ssaNum))
+ if (!(lcl->gtLclNum == ssaNm.m_lvNum && lcl->gtSsaNum == ssaNm.m_ssaNum))
{
printf("Node: ");
printTreeID(lcl);
- printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n",
- lcl->gtLclNum, lcl->gtSsaNum, tlAndN.m_num);
+ printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n", lcl->gtLclNum, lcl->gtSsaNum,
+ tlAndN.m_num);
printf("but that name class was previously bound to a different SSA name: <%d,%d>.\n",
ssaNm.m_lvNum, ssaNm.m_ssaNum);
unreached();
@@ -1817,7 +1876,7 @@ void Compiler::JitTestCheckSSA()
}
else
{
- ssaNm.m_lvNum = lcl->gtLclNum;
+ ssaNm.m_lvNum = lcl->gtLclNum;
ssaNm.m_ssaNum = lcl->gtSsaNum;
ssize_t num;
// The mapping(s) must be one-to-one: if the label has no mapping, then the ssaNm may not, either.
@@ -1825,14 +1884,14 @@ void Compiler::JitTestCheckSSA()
{
printf("Node: ");
printTreeID(lcl);
- printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n",
- lcl->gtLclNum, lcl->gtSsaNum, tlAndN.m_num);
+ printf(", SSA name = <%d, %d> was declared in SSA name class %d,\n", lcl->gtLclNum, lcl->gtSsaNum,
+ tlAndN.m_num);
printf("but this SSA name has already been associated with a different name class: %d.\n", num);
unreached();
}
// Add to both mappings.
labelToSSA->Set(tlAndN.m_num, ssaNm);
- ssaToLabel->Set(ssaNm, tlAndN.m_num);
+ ssaToLabel->Set(ssaNm, tlAndN.m_num);
if (verbose)
{
printf(" added to hash tables.\n");
diff --git a/src/jit/ssabuilder.h b/src/jit/ssabuilder.h
index cfffe58208..2fff06573e 100644
--- a/src/jit/ssabuilder.h
+++ b/src/jit/ssabuilder.h
@@ -20,7 +20,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#pragma once
-#pragma warning(disable:4503) // 'identifier' : decorated name length exceeded, name was truncated
+#pragma warning(disable : 4503) // 'identifier' : decorated name length exceeded, name was truncated
#undef SSA_FEATURE_USEDEF
#undef SSA_FEATURE_DOMARR
@@ -53,7 +53,7 @@ private:
// Used to maintain a map of a given SSA numbering to its use or def.
typedef jitstd::unordered_map<SsaVarName, jitstd::vector<GenTree*>, SsaVarNameHasher> VarToUses;
- typedef jitstd::unordered_map<SsaVarName, GenTree*, SsaVarNameHasher> VarToDef;
+ typedef jitstd::unordered_map<SsaVarName, GenTree*, SsaVarNameHasher> VarToDef;
inline void EndPhase(Phases phase)
{
@@ -61,7 +61,6 @@ private:
}
public:
-
// Constructor
SsaBuilder(Compiler* pCompiler, IAllocator* pIAllocator);
@@ -80,11 +79,9 @@ public:
// Using IDom of each basic block, compute the whole domTree. If a block "b" has IDom "i",
// then, block "b" is dominated by "i". The mapping then is i -> { ..., b, ... }, in
// other words, "domTree" is a tree represented by nodes mapped to their children.
- static
- void ComputeDominators(Compiler* pCompiler, BlkToBlkSetMap* domTree);
+ static void ComputeDominators(Compiler* pCompiler, BlkToBlkSetMap* domTree);
private:
-
// Ensures that the basic block graph has a root for the dominator graph, by ensuring
// that there is a first block that is not in a try region (adding an empty block for that purpose
// if necessary). Eventually should move to Compiler.
@@ -113,9 +110,8 @@ private:
// Requires all blocks to have computed "bbIDom." Requires "domTree" to be a preallocated BlkToBlkSetMap.
// Helper to compute "domTree" from the pre-computed bbIDom of the basic blocks.
- static
- void ConstructDomTreeForBlock(Compiler* pCompiler, BasicBlock* block, BlkToBlkSetMap* domTree);
-
+ static void ConstructDomTreeForBlock(Compiler* pCompiler, BasicBlock* block, BlkToBlkSetMap* domTree);
+
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted order. Requires
// count to be the valid entries in the "postOrder" array. Computes "domTree" as a adjacency list
// like object, i.e., a set of blocks with a set of blocks as children defining the DOM relation.
@@ -123,8 +119,7 @@ private:
#ifdef DEBUG
// Display the dominator tree.
- static
- void DisplayDominators(BlkToBlkSetMap* domTree);
+ static void DisplayDominators(BlkToBlkSetMap* domTree);
#endif // DEBUG
// Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted order. Requires
@@ -142,7 +137,7 @@ private:
// GT_ASG(GT_LCL_VAR, GT_PHI(GT_PHI_ARG(GT_LCL_VAR, Block*), GT_LIST(GT_PHI_ARG(GT_LCL_VAR, Block*), NULL));
void InsertPhiFunctions(BasicBlock** postOrder, int count);
- // Requires "domTree" to be the dominator tree relation defined by a DOM b.
+ // Requires "domTree" to be the dominator tree relation defined by a DOM b.
// Requires "pRenameState" to have counts and stacks at their initial state.
// Assigns gtSsaNames to all variables.
void RenameVariables(BlkToBlkSetMap* domTree, SsaRenameState* pRenameState);
@@ -195,13 +190,12 @@ private:
#endif
private:
-
#ifdef SSA_FEATURE_USEDEF
// Use Def information after SSA. To query the uses and def of a given ssa var,
// probe these data structures.
// Do not move these outside of this class, use accessors/interface methods.
VarToUses m_uses;
- VarToDef m_defs;
+ VarToDef m_defs;
#endif
#ifdef SSA_FEATURE_DOMARR
diff --git a/src/jit/ssaconfig.h b/src/jit/ssaconfig.h
index 2a655ca60f..6e81ad9fd6 100644
--- a/src/jit/ssaconfig.h
+++ b/src/jit/ssaconfig.h
@@ -21,28 +21,29 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#pragma once
-
#ifdef DEBUG
- #define DBG_SSA_JITDUMP(...) if (JitTls::GetCompiler()->verboseSsa) JitDump(__VA_ARGS__)
+#define DBG_SSA_JITDUMP(...) \
+ if (JitTls::GetCompiler()->verboseSsa) \
+ JitDump(__VA_ARGS__)
#else
- #define DBG_SSA_JITDUMP(...)
+#define DBG_SSA_JITDUMP(...)
#endif
// DBG_SSA_JITDUMP prints only if DEBUG, DEBUG_SSA, and tlsCompiler->verbose are all set.
namespace SsaConfig
{
- // FIRST ssa num is given to the first definition of a variable which can either be:
- // 1. A regular definition in the program.
- // 2. Or initialization by compInitMem.
- static const int FIRST_SSA_NUM = 2;
-
- // UNINIT ssa num is given to variables whose definitions were never encountered:
- // 1. Neither by SsaBuilder
- // 2. Nor were they initialized using compInitMem.
- static const int UNINIT_SSA_NUM = 1;
-
- // Sentinel value to indicate variable not touched by SSA.
- static const int RESERVED_SSA_NUM = 0;
+// FIRST ssa num is given to the first definition of a variable which can either be:
+// 1. A regular definition in the program.
+// 2. Or initialization by compInitMem.
+static const int FIRST_SSA_NUM = 2;
+
+// UNINIT ssa num is given to variables whose definitions were never encountered:
+// 1. Neither by SsaBuilder
+// 2. Nor were they initialized using compInitMem.
+static const int UNINIT_SSA_NUM = 1;
+
+// Sentinel value to indicate variable not touched by SSA.
+static const int RESERVED_SSA_NUM = 0;
} // end of namespace SsaConfig
diff --git a/src/jit/ssarenamestate.cpp b/src/jit/ssarenamestate.cpp
index 03bc8974a5..a1e05f192f 100644
--- a/src/jit/ssarenamestate.cpp
+++ b/src/jit/ssarenamestate.cpp
@@ -29,8 +29,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* @params alloc The allocator class used to allocate jitstd data.
*/
SsaRenameState::SsaRenameState(const jitstd::allocator<int>& alloc, unsigned lvaCount)
- : counts(NULL)
- , stacks(NULL)
+ : counts(nullptr)
+ , stacks(nullptr)
, definedLocs(alloc)
, heapStack(alloc)
, heapCount(0)
@@ -46,7 +46,7 @@ SsaRenameState::SsaRenameState(const jitstd::allocator<int>& alloc, unsigned lva
*/
void SsaRenameState::EnsureCounts()
{
- if (counts == NULL)
+ if (counts == nullptr)
{
counts = jitstd::utility::allocate<unsigned>(m_alloc, lvaCount);
for (unsigned i = 0; i < lvaCount; ++i)
@@ -63,17 +63,16 @@ void SsaRenameState::EnsureCounts()
*/
void SsaRenameState::EnsureStacks()
{
- if (stacks == NULL)
+ if (stacks == nullptr)
{
stacks = jitstd::utility::allocate<Stack*>(m_alloc, lvaCount);
for (unsigned i = 0; i < lvaCount; ++i)
{
- stacks[i] = NULL;
+ stacks[i] = nullptr;
}
}
}
-
/**
* Returns a SSA count number for a local variable and does a post increment.
*
@@ -109,9 +108,9 @@ unsigned SsaRenameState::CountForUse(unsigned lclNum)
{
EnsureStacks();
DBG_SSA_JITDUMP("[SsaRenameState::CountForUse] V%02u\n", lclNum);
-
+
Stack* stack = stacks[lclNum];
- if (stack == NULL || stack->empty())
+ if (stack == nullptr || stack->empty())
{
return SsaConfig::UNINIT_SSA_NUM;
}
@@ -133,15 +132,15 @@ void SsaRenameState::Push(BasicBlock* bb, unsigned lclNum, unsigned count)
// We'll use BB00 here to indicate the "block before any real blocks..."
DBG_SSA_JITDUMP("[SsaRenameState::Push] BB%02u, V%02u, count = %d\n", bb != nullptr ? bb->bbNum : 0, lclNum, count);
-
+
Stack* stack = stacks[lclNum];
- if (stack == NULL)
+ if (stack == nullptr)
{
DBG_SSA_JITDUMP("\tCreating a new stack\n");
stack = stacks[lclNum] = new (jitstd::utility::allocate<Stack>(m_alloc), jitstd::placement_t()) Stack(m_alloc);
}
-
+
if (stack->empty() || stack->back().m_bb != bb)
{
stack->push_back(SsaRenameStateForBlock(bb, count));
@@ -160,7 +159,7 @@ void SsaRenameState::Push(BasicBlock* bb, unsigned lclNum, unsigned count)
printf("\tContents of the stack: [");
for (Stack::iterator iter2 = stack->begin(); iter2 != stack->end(); iter2++)
{
- printf("<BB%02u, %d>", ((*iter2).m_bb != nullptr ? (*iter2).m_bb->bbNum : 0) , (*iter2).m_count);
+ printf("<BB%02u, %d>", ((*iter2).m_bb != nullptr ? (*iter2).m_bb->bbNum : 0), (*iter2).m_count);
}
printf("]\n");
@@ -177,9 +176,9 @@ void SsaRenameState::PopBlockStacks(BasicBlock* block)
while (!definedLocs.empty() && definedLocs.back().m_bb == block)
{
unsigned lclNum = definedLocs.back().m_lclNum;
- assert(stacks != NULL); // Cannot be empty because definedLocs is not empty.
+ assert(stacks != nullptr); // Cannot be empty because definedLocs is not empty.
Stack* stack = stacks[lclNum];
- assert(stack != NULL);
+ assert(stack != nullptr);
assert(stack->back().m_bb == block);
stack->pop_back();
definedLocs.pop_back();
@@ -189,7 +188,7 @@ void SsaRenameState::PopBlockStacks(BasicBlock* block)
// the loop above popped them all.
for (unsigned i = 0; i < lvaCount; ++i)
{
- if (stacks != NULL && stacks[i] != NULL && !stacks[i]->empty())
+ if (stacks != nullptr && stacks[i] != nullptr && !stacks[i]->empty())
{
assert(stacks[i]->back().m_bb != block);
}
@@ -209,7 +208,6 @@ void SsaRenameState::PopBlockHeapStack(BasicBlock* block)
}
}
-
#ifdef DEBUG
/**
* Print the stack data for each variable in a loop.
@@ -228,7 +226,7 @@ void SsaRenameState::DumpStacks()
{
Stack* stack = stacks[i];
printf("V%02u:\t", i);
- if (stack != NULL)
+ if (stack != nullptr)
{
for (Stack::iterator iter2 = stack->begin(); iter2 != stack->end(); ++iter2)
{
diff --git a/src/jit/ssarenamestate.h b/src/jit/ssarenamestate.h
index f567cefc85..1db36c5b37 100644
--- a/src/jit/ssarenamestate.h
+++ b/src/jit/ssarenamestate.h
@@ -28,8 +28,12 @@ struct SsaRenameStateForBlock
BasicBlock* m_bb;
unsigned m_count;
- SsaRenameStateForBlock(BasicBlock* bb, unsigned count) : m_bb(bb), m_count(count) {}
- SsaRenameStateForBlock() : m_bb(NULL), m_count(0) {}
+ SsaRenameStateForBlock(BasicBlock* bb, unsigned count) : m_bb(bb), m_count(count)
+ {
+ }
+ SsaRenameStateForBlock() : m_bb(nullptr), m_count(0)
+ {
+ }
};
// A record indicating that local "m_loc" was defined in block "m_bb".
@@ -38,15 +42,17 @@ struct SsaRenameStateLocDef
BasicBlock* m_bb;
unsigned m_lclNum;
- SsaRenameStateLocDef(BasicBlock* bb, unsigned lclNum) : m_bb(bb), m_lclNum(lclNum) {}
+ SsaRenameStateLocDef(BasicBlock* bb, unsigned lclNum) : m_bb(bb), m_lclNum(lclNum)
+ {
+ }
};
struct SsaRenameState
{
typedef jitstd::list<SsaRenameStateForBlock> Stack;
- typedef Stack** Stacks;
- typedef unsigned* Counts;
- typedef jitstd::list<SsaRenameStateLocDef> DefStack;
+ typedef Stack** Stacks;
+ typedef unsigned* Counts;
+ typedef jitstd::list<SsaRenameStateLocDef> DefStack;
SsaRenameState(const jitstd::allocator<int>& allocator, unsigned lvaCount);
@@ -64,7 +70,7 @@ struct SsaRenameState
// Requires "lclNum" to be a variable number, and requires "count" to represent
// an ssa number, that needs to be pushed on to the stack corresponding to the lclNum.
void Push(BasicBlock* bb, unsigned lclNum, unsigned count);
-
+
// Pop all stacks that have an entry for "bb" on top.
void PopBlockStacks(BasicBlock* bb);
@@ -72,7 +78,9 @@ struct SsaRenameState
unsigned CountForHeapDef()
{
if (heapCount == 0)
+ {
heapCount = SsaConfig::FIRST_SSA_NUM;
+ }
unsigned res = heapCount;
heapCount++;
return res;
@@ -89,7 +97,10 @@ struct SsaRenameState
void PopBlockHeapStack(BasicBlock* bb);
- unsigned HeapCount() { return heapCount; }
+ unsigned HeapCount()
+ {
+ return heapCount;
+ }
#ifdef DEBUG
// Debug interface
@@ -97,7 +108,6 @@ struct SsaRenameState
#endif
private:
-
// Map of lclNum -> count.
Counts counts;
@@ -108,13 +118,12 @@ private:
DefStack definedLocs;
// Same state for the special implicit Heap variable.
- Stack heapStack;
- unsigned heapCount;
+ Stack heapStack;
+ unsigned heapCount;
// Number of stacks/counts to allocate.
- unsigned lvaCount;
+ unsigned lvaCount;
// Allocator to allocate stacks.
jitstd::allocator<void> m_alloc;
};
-
diff --git a/src/jit/stackfp.cpp b/src/jit/stackfp.cpp
index a1788b7a02..f975822740 100644
--- a/src/jit/stackfp.cpp
+++ b/src/jit/stackfp.cpp
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
@@ -21,12 +20,12 @@
// Instruction list
// N=normal, R=reverse, P=pop
#if FEATURE_STACK_FP_X87
-const static instruction FPmathNN[] = { INS_fadd , INS_fsub , INS_fmul , INS_fdiv };
-const static instruction FPmathNP[] = { INS_faddp, INS_fsubp , INS_fmulp, INS_fdivp };
-const static instruction FPmathRN[] = { INS_fadd , INS_fsubr , INS_fmul , INS_fdivr };
-const static instruction FPmathRP[] = { INS_faddp, INS_fsubrp, INS_fmulp, INS_fdivrp };
+const static instruction FPmathNN[] = {INS_fadd, INS_fsub, INS_fmul, INS_fdiv};
+const static instruction FPmathNP[] = {INS_faddp, INS_fsubp, INS_fmulp, INS_fdivp};
+const static instruction FPmathRN[] = {INS_fadd, INS_fsubr, INS_fmul, INS_fdivr};
+const static instruction FPmathRP[] = {INS_faddp, INS_fsubrp, INS_fmulp, INS_fdivrp};
-FlatFPStateX87* CodeGenInterface::FlatFPAllocFPState(FlatFPStateX87* pInitFrom)
+FlatFPStateX87* CodeGenInterface::FlatFPAllocFPState(FlatFPStateX87* pInitFrom)
{
FlatFPStateX87* pNewState;
@@ -39,19 +38,19 @@ FlatFPStateX87* CodeGenInterface::FlatFPAllocFPState(FlatFPStateX87* pInitFrom)
bool CodeGen::FlatFPSameRegisters(FlatFPStateX87* pState, regMaskTP mask)
{
int i;
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
if (pState->Mapped(i))
{
- regMaskTP regmask = genRegMaskFloat((regNumber) i);
- if ( (mask & regmask) == 0)
+ regMaskTP regmask = genRegMaskFloat((regNumber)i);
+ if ((mask & regmask) == 0)
{
return false;
}
mask &= ~regmask;
}
- }
+ }
return mask ? false : true;
}
@@ -66,13 +65,11 @@ void FlatFPStateX87::Unmap(unsigned uEntry)
assert(Mapped(uEntry));
m_uVirtualMap[uEntry] = (unsigned)FP_VRNOTMAPPED;
}
-
-
bool FlatFPStateX87::AreEqual(FlatFPStateX87* pA, FlatFPStateX87* pB)
{
unsigned i;
-
+
assert(pA->IsConsistent());
assert(pB->IsConsistent());
@@ -81,30 +78,28 @@ bool FlatFPStateX87::AreEqual(FlatFPStateX87* pA, FlatFPStateX87* pB)
return false;
}
- for (i = 0 ; i < pA->m_uStackSize ; i++)
+ for (i = 0; i < pA->m_uStackSize; i++)
{
if (pA->m_uStack[i] != pB->m_uStack[i])
{
return false;
}
}
-
+
return true;
}
-
#ifdef DEBUG
bool FlatFPStateX87::IsValidEntry(unsigned uEntry)
{
- return (Mapped(uEntry) && (m_uVirtualMap[uEntry] >= 0 && m_uVirtualMap[uEntry]<m_uStackSize)) ||
- !Mapped(uEntry);
+ return (Mapped(uEntry) && (m_uVirtualMap[uEntry] >= 0 && m_uVirtualMap[uEntry] < m_uStackSize)) || !Mapped(uEntry);
}
bool FlatFPStateX87::IsConsistent()
{
unsigned i;
- for (i = 0 ; i < FP_VIRTUALREGISTERS ; i++)
+ for (i = 0; i < FP_VIRTUALREGISTERS; i++)
{
if (!IsValidEntry(i))
{
@@ -120,7 +115,7 @@ bool FlatFPStateX87::IsConsistent()
}
}
- for (i = 0 ; i < m_uStackSize ; i++)
+ for (i = 0; i < m_uStackSize; i++)
{
if (m_uVirtualMap[m_uStack[i]] != i)
{
@@ -148,7 +143,7 @@ void FlatFPStateX87::Dump()
if (m_uStackSize > 0)
{
printf("Virtual stack state: ");
- for (i = 0 ; i < m_uStackSize ; i++)
+ for (i = 0; i < m_uStackSize; i++)
{
printf("ST(%i): FPV%i | ", StackToST(i), m_uStack[i]);
}
@@ -159,23 +154,21 @@ void FlatFPStateX87::Dump()
void FlatFPStateX87::UpdateMappingFromStack()
{
memset(m_uVirtualMap, -1, sizeof(m_uVirtualMap));
-
+
unsigned i;
- for (i = 0 ; i < m_uStackSize ; i++)
+ for (i = 0; i < m_uStackSize; i++)
{
m_uVirtualMap[m_uStack[i]] = i;
}
}
-
-
#endif
unsigned FlatFPStateX87::StackToST(unsigned uEntry)
{
assert(IsValidEntry(uEntry));
- return m_uStackSize-1-uEntry;
+ return m_uStackSize - 1 - uEntry;
}
unsigned FlatFPStateX87::VirtualToST(unsigned uEntry)
@@ -183,14 +176,13 @@ unsigned FlatFPStateX87::VirtualToST(unsigned uEntry)
assert(Mapped(uEntry));
return StackToST(m_uVirtualMap[uEntry]);
-
}
unsigned FlatFPStateX87::STToVirtual(unsigned uST)
{
assert(uST < m_uStackSize);
- return m_uStack[m_uStackSize-1-uST];
+ return m_uStack[m_uStackSize - 1 - uST];
}
void FlatFPStateX87::Init(FlatFPStateX87* pFrom)
@@ -202,49 +194,47 @@ void FlatFPStateX87::Init(FlatFPStateX87* pFrom)
else
{
memset(m_uVirtualMap, -1, sizeof(m_uVirtualMap));
-
- #ifdef DEBUG
+
+#ifdef DEBUG
memset(m_uStack, -1, sizeof(m_uStack));
- #endif
- m_uStackSize = 0;
+#endif
+ m_uStackSize = 0;
}
- #ifdef DEBUG
+#ifdef DEBUG
m_bIgnoreConsistencyChecks = false;
- #endif
+#endif
}
void FlatFPStateX87::Associate(unsigned uEntry, unsigned uStack)
{
assert(uStack < m_uStackSize);
- m_uStack[uStack] = uEntry;
+ m_uStack[uStack] = uEntry;
m_uVirtualMap[uEntry] = uStack;
}
unsigned FlatFPStateX87::TopIndex()
{
- return m_uStackSize-1;
+ return m_uStackSize - 1;
}
unsigned FlatFPStateX87::TopVirtual()
{
assert(m_uStackSize > 0);
- return m_uStack[m_uStackSize-1];
+ return m_uStack[m_uStackSize - 1];
}
void FlatFPStateX87::Rename(unsigned uVirtualTo, unsigned uVirtualFrom)
{
assert(!Mapped(uVirtualTo));
- unsigned uSlot=m_uVirtualMap[uVirtualFrom];
+ unsigned uSlot = m_uVirtualMap[uVirtualFrom];
Unmap(uVirtualFrom);
Associate(uVirtualTo, uSlot);
-
}
-
void FlatFPStateX87::Push(unsigned uEntry)
{
assert(m_uStackSize <= FP_PHYSICREGISTERS);
@@ -252,19 +242,19 @@ void FlatFPStateX87::Push(unsigned uEntry)
m_uStackSize++;
Associate(uEntry, TopIndex());
-
+
assert(IsConsistent());
}
unsigned FlatFPStateX87::Pop()
{
assert(m_uStackSize != 0);
-
+
unsigned uVirtual = m_uStack[--m_uStackSize];
- #ifdef DEBUG
+#ifdef DEBUG
m_uStack[m_uStackSize] = (unsigned)-1;
- #endif
+#endif
Unmap(uVirtual);
@@ -280,49 +270,48 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
{
FlatFPStateX87 fpState;
FlatFPStateX87* pTmp;
- int i;
+ int i;
// Make a temp copy
memcpy(&fpState, pSrc, sizeof(FlatFPStateX87));
pTmp = &fpState;
// Make sure everything seems consistent.
- assert(pSrc->m_uStackSize >= pDst->m_uStackSize);
- #ifdef DEBUG
- for (i = 0 ; i < FP_VIRTUALREGISTERS ; i++)
+ assert(pSrc->m_uStackSize >= pDst->m_uStackSize);
+#ifdef DEBUG
+ for (i = 0; i < FP_VIRTUALREGISTERS; i++)
{
if (!pTmp->Mapped(i) && pDst->Mapped(i))
{
assert(!"Dst stack state can't have a virtual register live if Src target has it dead");
}
}
- #endif
+#endif
-
// First we need to get rid of the stuff that's dead in pDst
- for (i = 0 ; i < FP_VIRTUALREGISTERS ; i++)
+ for (i = 0; i < FP_VIRTUALREGISTERS; i++)
{
if (pTmp->Mapped(i) && !pDst->Mapped(i))
{
// We have to get rid of this one
JITDUMP("Removing virtual register V%i from stack\n", i);
- // Don't need this virtual register any more
- FlatFPX87_Unload(pTmp, i);
+ // Don't need this virtual register any more
+ FlatFPX87_Unload(pTmp, i);
}
}
- assert(pTmp->m_uStackSize == pDst->m_uStackSize);
+ assert(pTmp->m_uStackSize == pDst->m_uStackSize);
// Extract cycles
- int iProcessed = 0;
+ int iProcessed = 0;
- // We start with the top of the stack so that we can
+ // We start with the top of the stack so that we can
// easily recognize the cycle that contains it
- for (i = pTmp->m_uStackSize-1 ; i >= 0 ; i--)
+ for (i = pTmp->m_uStackSize - 1; i >= 0; i--)
{
// Have we processed this stack element yet?
- if ( ( (1<<i) & iProcessed ) == 0)
+ if (((1 << i) & iProcessed) == 0)
{
// Extract cycle
int iCycle[FP_VIRTUALREGISTERS];
@@ -333,22 +322,21 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
do
{
// Mark current stack element as processed
- iProcessed |= (1<<iCurrent);
-
+ iProcessed |= (1 << iCurrent);
+
// Update cycle
iCycle[iCycleLength++] = iCurrent;
// Next element in cycle
iCurrent = pDst->m_uVirtualMap[pTmp->m_uStack[iCurrent]];
-
- }
- while ( (iProcessed & (1<<iCurrent)) == 0);
- #ifdef DEBUG
+ } while ((iProcessed & (1 << iCurrent)) == 0);
+
+#ifdef DEBUG
if (verbose)
{
printf("Cycle: (");
- for (int l = 0 ; l < iCycleLength ; l++)
+ for (int l = 0; l < iCycleLength; l++)
{
printf("%i", pTmp->StackToST(iCycle[l]));
if (l + 1 < iCycleLength)
@@ -356,7 +344,7 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
}
printf(")\n");
}
- #endif
+#endif
// Extract cycle
if (iCycleLength == 1)
@@ -370,7 +358,7 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
// Cycle includes stack element 0
int j;
- for (j = 1 ; j < iCycleLength ; j++)
+ for (j = 1; j < iCycleLength; j++)
{
FlatFPX87_SwapStack(pTmp, iCycle[j], iTOS);
}
@@ -380,7 +368,7 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
// Cycle doesn't include stack element 0
int j;
- for (j = 0 ; j < iCycleLength ; j++)
+ for (j = 0; j < iCycleLength; j++)
{
FlatFPX87_SwapStack(pTmp, iCycle[j], iTOS);
}
@@ -397,18 +385,19 @@ void CodeGen::genCodeForTransitionStackFP(FlatFPStateX87* pSrc, FlatFPStateX87*
void CodeGen::genCodeForTransitionFromMask(FlatFPStateX87* pSrc, regMaskTP mask, bool bEmitCode)
{
unsigned i;
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
if (pSrc->Mapped(i))
{
if ((mask & genRegMaskFloat((regNumber)i)) == 0)
{
- FlatFPX87_Unload(pSrc, i, bEmitCode);
+ FlatFPX87_Unload(pSrc, i, bEmitCode);
}
}
else
{
- assert( (mask & genRegMaskFloat((regNumber) i)) == 0 && "A register marked as incoming live in the target block isnt live in the current block");
+ assert((mask & genRegMaskFloat((regNumber)i)) == 0 &&
+ "A register marked as incoming live in the target block isnt live in the current block");
}
}
}
@@ -419,42 +408,38 @@ void CodeGen::genCodeForPrologStackFP()
assert(compiler->fgFirstBB);
FlatFPStateX87* pState = compiler->fgFirstBB->bbFPStateX87;
-
+
if (pState && pState->m_uStackSize)
{
- VARSET_TP VARSET_INIT_NOCOPY(liveEnregIn, VarSetOps::Intersection(compiler, compiler->fgFirstBB->bbLiveIn, compiler->optAllFPregVars));
+ VARSET_TP VARSET_INIT_NOCOPY(liveEnregIn, VarSetOps::Intersection(compiler, compiler->fgFirstBB->bbLiveIn,
+ compiler->optAllFPregVars));
unsigned i;
- #ifdef DEBUG
+#ifdef DEBUG
unsigned uLoads = 0;
- #endif
+#endif
assert(pState->m_uStackSize <= FP_VIRTUALREGISTERS);
- for (i = 0 ; i < pState->m_uStackSize ; i++)
+ for (i = 0; i < pState->m_uStackSize; i++)
{
- // Get the virtual register that matches
- unsigned iVirtual = pState->STToVirtual(pState->m_uStackSize-i-1);
+ // Get the virtual register that matches
+ unsigned iVirtual = pState->STToVirtual(pState->m_uStackSize - i - 1);
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->IsFloatRegType() && varDsc->lvRegister && varDsc->lvRegNum == iVirtual)
+ if (varDsc->IsFloatRegType() && varDsc->lvRegister && varDsc->lvRegNum == iVirtual)
{
- unsigned varIndex = varDsc->lvVarIndex;
+ unsigned varIndex = varDsc->lvVarIndex;
// Is this variable live on entry?
- if (VarSetOps::IsMember(compiler, liveEnregIn, varIndex))
+ if (VarSetOps::IsMember(compiler, liveEnregIn, varIndex))
{
if (varDsc->lvIsParam)
{
- getEmitter()->emitIns_S(INS_fld,
- EmitSize(varDsc->TypeGet()),
- varNum,
- 0);
+ getEmitter()->emitIns_S(INS_fld, EmitSize(varDsc->TypeGet()), varNum, 0);
}
else
{
@@ -462,9 +447,9 @@ void CodeGen::genCodeForPrologStackFP()
getEmitter()->emitIns(INS_fldz);
}
- #ifdef DEBUG
+#ifdef DEBUG
uLoads++;
- #endif
+#endif
break;
}
}
@@ -481,68 +466,65 @@ void CodeGen::genCodeForEndBlockTransitionStackFP(BasicBlock* block)
{
switch (block->bbJumpKind)
{
- case BBJ_EHFINALLYRET:
- case BBJ_EHFILTERRET:
- case BBJ_EHCATCHRET:
- // Nothing to do
- assert(compCurFPState.m_uStackSize == 0);
- break;
- case BBJ_THROW:
- break;
- case BBJ_RETURN:
- // Nothing to do
- assert(
- (varTypeIsFloating(compiler->info.compRetType) && compCurFPState.m_uStackSize == 1)||
- compCurFPState.m_uStackSize == 0);
- break;
- case BBJ_COND:
- case BBJ_NONE:
- genCodeForBBTransitionStackFP(block->bbNext);
- break;
- case BBJ_ALWAYS:
- genCodeForBBTransitionStackFP(block->bbJumpDest);
- break;
- case BBJ_LEAVE:
- assert(!"BBJ_LEAVE blocks shouldn't get here");
- break;
- case BBJ_CALLFINALLY:
- assert(compCurFPState.IsEmpty() && "we don't enregister variables live on entry to finallys");
- genCodeForBBTransitionStackFP(block->bbJumpDest);
- break;
- case BBJ_SWITCH:
- // Nothing to do here
- break;
- default:
- noway_assert(!"Unexpected bbJumpKind");
- break;
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_EHCATCHRET:
+ // Nothing to do
+ assert(compCurFPState.m_uStackSize == 0);
+ break;
+ case BBJ_THROW:
+ break;
+ case BBJ_RETURN:
+ // Nothing to do
+ assert((varTypeIsFloating(compiler->info.compRetType) && compCurFPState.m_uStackSize == 1) ||
+ compCurFPState.m_uStackSize == 0);
+ break;
+ case BBJ_COND:
+ case BBJ_NONE:
+ genCodeForBBTransitionStackFP(block->bbNext);
+ break;
+ case BBJ_ALWAYS:
+ genCodeForBBTransitionStackFP(block->bbJumpDest);
+ break;
+ case BBJ_LEAVE:
+ assert(!"BBJ_LEAVE blocks shouldn't get here");
+ break;
+ case BBJ_CALLFINALLY:
+ assert(compCurFPState.IsEmpty() && "we don't enregister variables live on entry to finallys");
+ genCodeForBBTransitionStackFP(block->bbJumpDest);
+ break;
+ case BBJ_SWITCH:
+ // Nothing to do here
+ break;
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
}
}
regMaskTP CodeGen::genRegMaskFromLivenessStackFP(VARSET_VALARG_TP varset)
{
- unsigned varNum;
- LclVarDsc * varDsc;
- regMaskTP result = 0;
+ unsigned varNum;
+ LclVarDsc* varDsc;
+ regMaskTP result = 0;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->IsFloatRegType() && varDsc->lvRegister)
+ if (varDsc->IsFloatRegType() && varDsc->lvRegister)
{
- unsigned varIndex = varDsc->lvVarIndex;
+ unsigned varIndex = varDsc->lvVarIndex;
/* Is this variable live on entry? */
- if (VarSetOps::IsMember(compiler, varset, varIndex))
+ if (VarSetOps::IsMember(compiler, varset, varIndex))
{
// We should only call this function doing a transition
// To a block which hasn't state yet. All incoming live enregistered variables
// should have been already initialized.
assert(varDsc->lvRegNum != REG_FPNONE);
-
- result |= genRegMaskFloat(varDsc->lvRegNum);
+
+ result |= genRegMaskFloat(varDsc->lvRegNum);
}
}
}
@@ -567,42 +549,41 @@ void CodeGen::genCodeForBBTransitionStackFP(BasicBlock* pDst)
// Copy current state
pDst->bbFPStateX87 = FlatFPAllocFPState(&compCurFPState);
- regMaskTP liveRegIn = genRegMaskFromLivenessStackFP(VarSetOps::Intersection(compiler, pDst->bbLiveIn, compiler->optAllFPregVars));
+ regMaskTP liveRegIn =
+ genRegMaskFromLivenessStackFP(VarSetOps::Intersection(compiler, pDst->bbLiveIn, compiler->optAllFPregVars));
// Match to live vars
- genCodeForTransitionFromMask(pDst->bbFPStateX87, liveRegIn);
+ genCodeForTransitionFromMask(pDst->bbFPStateX87, liveRegIn);
}
}
-
void CodeGen::SpillTempsStackFP(regMaskTP canSpillMask)
{
- unsigned i;
- regMaskTP spillMask = 0;
- regNumber reg;
+ unsigned i;
+ regMaskTP spillMask = 0;
+ regNumber reg;
// First pass we determine which registers we spill
- for (i = 0 ; i < compCurFPState.m_uStackSize ; i++)
+ for (i = 0; i < compCurFPState.m_uStackSize; i++)
{
- reg = (regNumber) compCurFPState.m_uStack[i];
+ reg = (regNumber)compCurFPState.m_uStack[i];
regMaskTP regMask = genRegMaskFloat(reg);
- if ((regMask & canSpillMask) &&
- (regMask & regSet.rsMaskRegVarFloat) == 0)
+ if ((regMask & canSpillMask) && (regMask & regSet.rsMaskRegVarFloat) == 0)
{
spillMask |= regMask;
}
}
// Second pass we do the actual spills
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if ((genRegMaskFloat((regNumber) i) & spillMask) )
- {
- JITDUMP("spilling temp in register %s\n", regVarNameStackFP((regNumber) i));
- SpillFloat((regNumber) i, true);
+ if ((genRegMaskFloat((regNumber)i) & spillMask))
+ {
+ JITDUMP("spilling temp in register %s\n", regVarNameStackFP((regNumber)i));
+ SpillFloat((regNumber)i, true);
}
- }
+ }
}
// Spills all the fp stack. We need this to spill
@@ -612,21 +593,21 @@ void CodeGen::SpillForCallStackFP()
unsigned i;
unsigned uSize = compCurFPState.m_uStackSize;
- for (i = 0 ; i < uSize ; i++)
+ for (i = 0; i < uSize; i++)
{
- SpillFloat((regNumber) compCurFPState.m_uStack[compCurFPState.TopIndex()], true);
- }
-}
+ SpillFloat((regNumber)compCurFPState.m_uStack[compCurFPState.TopIndex()], true);
+ }
+}
void CodeGenInterface::SpillFloat(regNumber reg, bool bIsCall)
{
- #ifdef DEBUG
- regMaskTP mask = genRegMaskFloat(reg);
+#ifdef DEBUG
+ regMaskTP mask = genRegMaskFloat(reg);
// We can allow spilling regvars, but we don't need it at the moment, and we're
// missing code in setupopforflatfp, so assert.
- assert(bIsCall || (mask & (regSet.rsMaskLockedFloat | regSet.rsMaskRegVarFloat)) == 0);
- #endif
+ assert(bIsCall || (mask & (regSet.rsMaskLockedFloat | regSet.rsMaskRegVarFloat)) == 0);
+#endif
JITDUMP("SpillFloat spilling register %s\n", regVarNameStackFP(reg));
@@ -634,15 +615,15 @@ void CodeGenInterface::SpillFloat(regNumber reg, bool bIsCall)
FlatFPX87_MoveToTOS(&compCurFPState, reg);
// Allocate spill structure
- RegSet::SpillDsc *spill = RegSet::SpillDsc::alloc(compiler, &regSet, TYP_FLOAT);
+ RegSet::SpillDsc* spill = RegSet::SpillDsc::alloc(compiler, &regSet, TYP_FLOAT);
- // Fill out spill structure
+ // Fill out spill structure
var_types type;
if (regSet.genUsedRegsFloat[reg])
{
JITDUMP("will spill tree [%08p]\n", dspPtr(regSet.genUsedRegsFloat[reg]));
- // register used for temp stack
- spill->spillTree = regSet.genUsedRegsFloat[reg];
+ // register used for temp stack
+ spill->spillTree = regSet.genUsedRegsFloat[reg];
spill->bEnregisteredVariable = false;
regSet.genUsedRegsFloat[reg]->gtFlags |= GTF_SPILLED;
@@ -657,27 +638,27 @@ void CodeGenInterface::SpillFloat(regNumber reg, bool bIsCall)
JITDUMP("will spill varDsc [%08p]\n", dspPtr(regSet.genRegVarsFloat[reg]));
// enregistered variable
- spill->spillVarDsc = regSet.genRegVarsFloat[reg];
+ spill->spillVarDsc = regSet.genRegVarsFloat[reg];
assert(spill->spillVarDsc);
-
+
spill->bEnregisteredVariable = true;
// Mark as spilled
spill->spillVarDsc->lvSpilled = true;
- type = genActualType(regSet.genRegVarsFloat[reg]->TypeGet());
+ type = genActualType(regSet.genRegVarsFloat[reg]->TypeGet());
// Clear register flag
SetRegVarFloat(reg, type, 0);
}
-
+
// Add to spill list
- spill->spillNext = regSet.rsSpillFloat;
+ spill->spillNext = regSet.rsSpillFloat;
regSet.rsSpillFloat = spill;
- // Obtain space
- TempDsc * temp = spill->spillTemp = compiler->tmpGetTemp(type);
- emitAttr size = EmitSize(type);
-
+ // Obtain space
+ TempDsc* temp = spill->spillTemp = compiler->tmpGetTemp(type);
+ emitAttr size = EmitSize(type);
+
getEmitter()->emitIns_S(INS_fstp, size, temp->tdTempNum(), 0);
compCurFPState.Pop();
}
@@ -696,7 +677,7 @@ void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc)
// Do the logic as it was a regvar birth
genRegVarBirthStackFP(spillDsc->spillVarDsc);
-
+
// Mark as not spilled any more
spillDsc->spillVarDsc->lvSpilled = false;
@@ -708,47 +689,45 @@ void CodeGen::UnspillFloatMachineDep(RegSet::SpillDsc* spillDsc)
assert(spillDsc->spillTree->gtFlags & GTF_SPILLED);
spillDsc->spillTree->gtFlags &= ~GTF_SPILLED;
-
+
regNumber reg = regSet.PickRegFloat();
genMarkTreeInReg(spillDsc->spillTree, reg);
regSet.SetUsedRegFloat(spillDsc->spillTree, true);
-
+
compCurFPState.Push(reg);
}
// load from spilled spot
- emitAttr size = EmitSize(spillDsc->spillTemp->tdTempType());
+ emitAttr size = EmitSize(spillDsc->spillTemp->tdTempType());
getEmitter()->emitIns_S(INS_fld, size, spillDsc->spillTemp->tdTempNum(), 0);
}
-
-// unspills any reg var that we have in the spill list. We need this
+// unspills any reg var that we have in the spill list. We need this
// because we can't have any spilled vars across basic blocks
void CodeGen::UnspillRegVarsStackFp()
{
- RegSet::SpillDsc* cur;
- RegSet::SpillDsc* next;
+ RegSet::SpillDsc* cur;
+ RegSet::SpillDsc* next;
- for (cur = regSet.rsSpillFloat ; cur ; cur = next)
+ for (cur = regSet.rsSpillFloat; cur; cur = next)
{
next = cur->spillNext;
if (cur->bEnregisteredVariable)
{
- UnspillFloat(cur);
- }
+ UnspillFloat(cur);
+ }
}
}
-#ifdef DEBUG
-const char* regNamesFP[] =
-{
- #define REGDEF(name, rnum, mask, sname) sname,
- #include "registerfp.h"
+#ifdef DEBUG
+const char* regNamesFP[] = {
+#define REGDEF(name, rnum, mask, sname) sname,
+#include "registerfp.h"
};
// static
-const char* CodeGenInterface::regVarNameStackFP(regNumber reg)
+const char* CodeGenInterface::regVarNameStackFP(regNumber reg)
{
return regNamesFP[reg];
}
@@ -760,7 +739,7 @@ bool CodeGen::ConsistentAfterStatementStackFP()
return false;
}
- if (regSet.rsMaskUsedFloat != 0)
+ if (regSet.rsMaskUsedFloat != 0)
{
assert(!"FP register marked as used after statement");
return false;
@@ -772,10 +751,10 @@ bool CodeGen::ConsistentAfterStatementStackFP()
}
if (genCountBits(regSet.rsMaskRegVarFloat) > compCurFPState.m_uStackSize)
{
- assert(!"number of FP regvars in regSet.rsMaskRegVarFloat doesnt match current FP state");
+ assert(!"number of FP regvars in regSet.rsMaskRegVarFloat doesnt match current FP state");
return false;
}
-
+
return true;
}
@@ -790,24 +769,24 @@ void CodeGen::genDiscardStackFP(GenTreePtr tree)
{
assert(tree->InReg());
assert(varTypeIsFloating(tree));
-
+
FlatFPX87_Unload(&compCurFPState, tree->gtRegNum, true);
}
- void CodeGen::genRegRenameWithMasks(regNumber dstReg, regNumber srcReg)
- {
+void CodeGen::genRegRenameWithMasks(regNumber dstReg, regNumber srcReg)
+{
regMaskTP dstregmask = genRegMaskFloat(dstReg);
regMaskTP srcregmask = genRegMaskFloat(srcReg);
-
+
// rename use register
- compCurFPState.Rename(dstReg , srcReg);
-
+ compCurFPState.Rename(dstReg, srcReg);
+
regSet.rsMaskUsedFloat &= ~srcregmask;
regSet.rsMaskUsedFloat |= dstregmask;
if (srcregmask & regSet.rsMaskLockedFloat)
{
- assert( (dstregmask & regSet.rsMaskLockedFloat) == 0);
+ assert((dstregmask & regSet.rsMaskLockedFloat) == 0);
// We will set the new one as locked
regSet.rsMaskLockedFloat &= ~srcregmask;
regSet.rsMaskLockedFloat |= dstregmask;
@@ -815,11 +794,11 @@ void CodeGen::genDiscardStackFP(GenTreePtr tree)
// Updated used tree
assert(!regSet.genUsedRegsFloat[dstReg]);
- regSet.genUsedRegsFloat[dstReg] = regSet.genUsedRegsFloat[srcReg];
- regSet.genUsedRegsFloat[dstReg]->gtRegNum = dstReg;
- regSet.genUsedRegsFloat[srcReg] = NULL;
- }
-
+ regSet.genUsedRegsFloat[dstReg] = regSet.genUsedRegsFloat[srcReg];
+ regSet.genUsedRegsFloat[dstReg]->gtRegNum = dstReg;
+ regSet.genUsedRegsFloat[srcReg] = NULL;
+}
+
void CodeGen::genRegVarBirthStackFP(LclVarDsc* varDsc)
{
// Mark the virtual register we're assigning to this local;
@@ -832,8 +811,8 @@ void CodeGen::genRegVarBirthStackFP(LclVarDsc* varDsc)
assert(varDsc->lvTracked && varDsc->lvRegister && reg != REG_FPNONE);
if (regSet.genUsedRegsFloat[reg])
{
-
- // Register was marked as used... will have to rename it so we can put the
+
+ // Register was marked as used... will have to rename it so we can put the
// regvar where it belongs.
JITDUMP("Renaming used register %s\n", regVarNameStackFP(reg));
@@ -842,19 +821,18 @@ void CodeGen::genRegVarBirthStackFP(LclVarDsc* varDsc)
newreg = regSet.PickRegFloat();
#ifdef DEBUG
- regMaskTP newregmask = genRegMaskFloat(newreg);
+ regMaskTP newregmask = genRegMaskFloat(newreg);
#endif
// Update used mask
- assert((regSet.rsMaskUsedFloat & regmask) &&
- (regSet.rsMaskUsedFloat & newregmask) == 0);
-
+ assert((regSet.rsMaskUsedFloat & regmask) && (regSet.rsMaskUsedFloat & newregmask) == 0);
+
genRegRenameWithMasks(newreg, reg);
}
// Mark the reg as holding a regvar
varDsc->lvSpilled = false;
- SetRegVarFloat(reg, varDsc->TypeGet(), varDsc);
+ SetRegVarFloat(reg, varDsc->TypeGet(), varDsc);
}
void CodeGen::genRegVarBirthStackFP(GenTreePtr tree)
@@ -870,20 +848,19 @@ void CodeGen::genRegVarBirthStackFP(GenTreePtr tree)
// Update register in local var
LclVarDsc* varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
-
+
genRegVarBirthStackFP(varDsc);
- assert(tree->gtRegNum == tree->gtRegVar.gtRegNum &&
- tree->gtRegNum == varDsc->lvRegNum);
+ assert(tree->gtRegNum == tree->gtRegVar.gtRegNum && tree->gtRegNum == varDsc->lvRegNum);
}
void CodeGen::genRegVarDeathStackFP(LclVarDsc* varDsc)
{
regNumber reg = varDsc->lvRegNum;
-
+
assert(varDsc->lvTracked && varDsc->lvRegister && reg != REG_FPNONE);
SetRegVarFloat(reg, varDsc->TypeGet(), 0);
}
-
+
void CodeGen::genRegVarDeathStackFP(GenTreePtr tree)
{
#ifdef DEBUG
@@ -896,8 +873,8 @@ void CodeGen::genRegVarDeathStackFP(GenTreePtr tree)
#endif // DEBUG
LclVarDsc* varDsc = compiler->lvaTable + tree->gtLclVarCommon.gtLclNum;
- genRegVarDeathStackFP(varDsc);
-}
+ genRegVarDeathStackFP(varDsc);
+}
void CodeGen::genLoadStackFP(GenTreePtr tree, regNumber reg)
{
@@ -913,7 +890,7 @@ void CodeGen::genLoadStackFP(GenTreePtr tree, regNumber reg)
if (tree->IsRegVar())
{
// if it has been spilled, unspill it.%
- LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
+ LclVarDsc* varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
if (varDsc->lvSpilled)
{
UnspillFloat(varDsc);
@@ -927,8 +904,8 @@ void CodeGen::genLoadStackFP(GenTreePtr tree, regNumber reg)
}
else
{
- assert(tree->gtRegNum == tree->gtRegVar.gtRegNum);
- inst_FN(INS_fld, compCurFPState.VirtualToST( tree->gtRegVar.gtRegNum ));
+ assert(tree->gtRegNum == tree->gtRegVar.gtRegNum);
+ inst_FN(INS_fld, compCurFPState.VirtualToST(tree->gtRegVar.gtRegNum));
FlatFPX87_PushVirtual(&compCurFPState, reg);
}
}
@@ -938,7 +915,7 @@ void CodeGen::genLoadStackFP(GenTreePtr tree, regNumber reg)
inst_FS_TT(INS_fld, tree);
}
}
-
+
void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg)
{
if (dstreg == REG_FPNONE && !dst->IsRegVar())
@@ -948,45 +925,43 @@ void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, re
// reg to mem path
if (srcreg == REG_FPNONE)
{
- assert (src->IsRegVar());
+ assert(src->IsRegVar());
reg = src->gtRegNum;
}
else
{
reg = srcreg;
}
-
+
// Mov src to top of the stack
FlatFPX87_MoveToTOS(&compCurFPState, reg);
- if (srcreg != REG_FPNONE ||
- (src->IsRegVar() && src->IsRegVarDeath()))
+ if (srcreg != REG_FPNONE || (src->IsRegVar() && src->IsRegVarDeath()))
{
// Emit instruction
inst_FS_TT(INS_fstp, dst);
// Update stack
compCurFPState.Pop();
- }
+ }
else
{
inst_FS_TT(INS_fst, dst);
}
}
else
- {
+ {
if (dstreg == REG_FPNONE)
{
- assert (dst->IsRegVar());
+ assert(dst->IsRegVar());
dstreg = dst->gtRegNum;
}
-
+
if (srcreg == REG_FPNONE && !src->IsRegVar())
{
// mem to reg
- assert( dst->IsRegVar() &&
- dst->IsRegVarBirth());
-
+ assert(dst->IsRegVar() && dst->IsRegVarBirth());
+
FlatFPX87_PushVirtual(&compCurFPState, dstreg);
FlatFPX87_MoveToTOS(&compCurFPState, dstreg);
@@ -1002,24 +977,21 @@ void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, re
else
{
// disposable reg to reg, use renaming
- assert( dst->IsRegVar() &&
- dst->IsRegVarBirth());
+ assert(dst->IsRegVar() && dst->IsRegVarBirth());
assert(src->IsRegVar() || (src->InReg()));
assert(src->gtRegNum != REG_FPNONE);
-
if ((src->InReg()) || (src->IsRegVar() && src->IsRegVarDeath()))
{
// src is disposable and dst is a regvar, so we'll rename src to dst
// SetupOp should have masked out the regvar
- assert(!src->IsRegVar() ||
- !src->IsRegVarDeath() ||
+ assert(!src->IsRegVar() || !src->IsRegVarDeath() ||
!(genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat));
-
+
// get slot that holds the value
- unsigned uStack = compCurFPState.m_uVirtualMap[src->gtRegNum];
-
+ unsigned uStack = compCurFPState.m_uVirtualMap[src->gtRegNum];
+
// unlink the slot that holds the value
compCurFPState.Unmap(src->gtRegNum);
@@ -1028,20 +1000,20 @@ void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, re
compCurFPState.IgnoreConsistencyChecks(true);
if (regSet.genUsedRegsFloat[tgtreg])
- {
+ {
// tgtreg is used, we move it to src reg. We do this here as src reg won't be
// marked as used, if tgtreg is used it srcreg will be a candidate for moving
// which is something we don't want, so we do the renaming here.
- genRegRenameWithMasks(src->gtRegNum, tgtreg);
+ genRegRenameWithMasks(src->gtRegNum, tgtreg);
}
compCurFPState.IgnoreConsistencyChecks(false);
// Birth of FP var
genRegVarBirthStackFP(dst);
-
+
// Associate target reg with source physical register
- compCurFPState.Associate(tgtreg, uStack);
+ compCurFPState.Associate(tgtreg, uStack);
}
else
{
@@ -1049,20 +1021,20 @@ void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, re
{
// regvar that isnt dying to regvar
assert(!src->IsRegVarDeath());
-
+
// Birth of FP var
genRegVarBirthStackFP(dst);
// Load register
- inst_FN(INS_fld, compCurFPState.VirtualToST( src->gtRegVar.gtRegNum ));
+ inst_FN(INS_fld, compCurFPState.VirtualToST(src->gtRegVar.gtRegNum));
// update our logic stack
- FlatFPX87_PushVirtual(&compCurFPState, dst->gtRegVar.gtRegNum);
+ FlatFPX87_PushVirtual(&compCurFPState, dst->gtRegVar.gtRegNum);
}
else
{
// memory to regvar
-
+
// Birth of FP var
genRegVarBirthStackFP(dst);
@@ -1070,15 +1042,14 @@ void CodeGen::genMovStackFP(GenTreePtr dst, regNumber dstreg, GenTreePtr src, re
inst_FS_TT(INS_fld, src);
// update our logic stack
- FlatFPX87_PushVirtual(&compCurFPState, dst->gtRegVar.gtRegNum);
- }
+ FlatFPX87_PushVirtual(&compCurFPState, dst->gtRegVar.gtRegNum);
+ }
}
}
- }
+ }
}
-
-void CodeGen::genCodeForTreeStackFP_DONE (GenTreePtr tree, regNumber reg)
+void CodeGen::genCodeForTreeStackFP_DONE(GenTreePtr tree, regNumber reg)
{
return genCodeForTree_DONE(tree, reg);
}
@@ -1095,24 +1066,23 @@ void CodeGen::genSetupStateStackFP(BasicBlock* block)
}
// Update liveset and lock enregistered live vars on entry
- VARSET_TP VARSET_INIT_NOCOPY(liveSet, VarSetOps::Intersection(compiler, block->bbLiveIn, compiler->optAllFPregVars));
+ VARSET_TP VARSET_INIT_NOCOPY(liveSet,
+ VarSetOps::Intersection(compiler, block->bbLiveIn, compiler->optAllFPregVars));
if (!VarSetOps::IsEmpty(compiler, liveSet))
{
- unsigned varNum;
- LclVarDsc * varDsc;
+ unsigned varNum;
+ LclVarDsc* varDsc;
- for (varNum = 0, varDsc = compiler->lvaTable;
- varNum < compiler->lvaCount;
- varNum++ , varDsc++)
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++)
{
- if (varDsc->IsFloatRegType() && varDsc->lvRegister)
+ if (varDsc->IsFloatRegType() && varDsc->lvRegister)
{
- unsigned varIndex = varDsc->lvVarIndex;
+ unsigned varIndex = varDsc->lvVarIndex;
// Is this variable live on entry?
- if (VarSetOps::IsMember(compiler, liveSet, varIndex))
+ if (VarSetOps::IsMember(compiler, liveSet, varIndex))
{
JITDUMP("genSetupStateStackFP(): enregistered variable V%i is live on entry to block\n", varNum);
@@ -1138,35 +1108,33 @@ void CodeGen::genSetupStateStackFP(BasicBlock* block)
regMaskTP CodeGen::genPushArgumentStackFP(GenTreePtr args)
{
- regMaskTP addrReg = 0;
- unsigned opsz = genTypeSize(genActualType(args->TypeGet()));
-
+ regMaskTP addrReg = 0;
+ unsigned opsz = genTypeSize(genActualType(args->TypeGet()));
switch (args->gtOper)
{
- GenTreePtr temp;
- GenTreePtr fval;
- size_t flopsz;
-
+ GenTreePtr temp;
+ GenTreePtr fval;
+ size_t flopsz;
case GT_CNS_DBL:
{
- float f = 0.0;
- int* addr = NULL;
- if (args->TypeGet() == TYP_FLOAT)
+ float f = 0.0;
+ int* addr = NULL;
+ if (args->TypeGet() == TYP_FLOAT)
{
- f = (float) args->gtDblCon.gtDconVal;
+ f = (float)args->gtDblCon.gtDconVal;
// *(long*) (&f) used instead of *addr because of of strict
// pointer aliasing optimization. According to the ISO C/C++
// standard, an optimizer can assume two pointers of
// non-compatible types do not point to the same memory.
- inst_IV(INS_push, *((int*) (&f)));
+ inst_IV(INS_push, *((int*)(&f)));
genSinglePush();
addrReg = 0;
}
- else
+ else
{
- addr = (int *)&args->gtDblCon.gtDconVal;
+ addr = (int*)&args->gtDblCon.gtDconVal;
// store forwarding fix for pentium 4 and Centrino
// (even for down level CPUs as we don't care about their perf any more)
@@ -1177,19 +1145,17 @@ regMaskTP CodeGen::genPushArgumentStackFP(GenTreePtr args)
getEmitter()->emitIns_AR_R(INS_fstp, EA_ATTR(flopsz), REG_NA, REG_ESP, 0);
genSinglePush();
genSinglePush();
-
+
addrReg = 0;
}
-
break;
}
-
+
case GT_CAST:
{
- // Is the value a cast from double ?
- if ((args->gtOper == GT_CAST ) &&
- (args->CastFromType() == TYP_DOUBLE) )
+ // Is the value a cast from double ?
+ if ((args->gtOper == GT_CAST) && (args->CastFromType() == TYP_DOUBLE))
{
/* Load the value onto the FP stack */
@@ -1200,20 +1166,20 @@ regMaskTP CodeGen::genPushArgumentStackFP(GenTreePtr args)
addrReg = 0;
goto PUSH_FLT;
- }
+ }
// Fall through to default case....
- }
+ }
default:
{
temp = genMakeAddrOrFPstk(args, &addrReg, false);
- if (temp)
+ if (temp)
{
- unsigned offs;
+ unsigned offs;
// We have the address of the float operand, push its bytes
- offs = opsz; assert(offs % sizeof(int) == 0);
+ offs = opsz;
+ assert(offs % sizeof(int) == 0);
-
if (offs == 4)
{
assert(args->gtType == temp->gtType);
@@ -1222,8 +1188,7 @@ regMaskTP CodeGen::genPushArgumentStackFP(GenTreePtr args)
offs -= sizeof(int);
inst_TT(INS_push, temp, offs);
genSinglePush();
- }
- while (offs);
+ } while (offs);
}
else
{
@@ -1238,14 +1203,14 @@ regMaskTP CodeGen::genPushArgumentStackFP(GenTreePtr args)
}
else
{
- // The argument is on the FP stack -- pop it into [ESP-4/8]
+ // The argument is on the FP stack -- pop it into [ESP-4/8]
PUSH_FLT:
inst_RV_IV(INS_sub, REG_ESP, opsz, EA_PTRSIZE);
genSinglePush();
- if (opsz == 2*sizeof(unsigned))
+ if (opsz == 2 * sizeof(unsigned))
genSinglePush();
// Take reg to top of stack
@@ -1273,16 +1238,16 @@ void CodeGen::genRoundFpExpressionStackFP(GenTreePtr op, var_types type)
switch (op->gtOper)
{
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_CLS_VAR:
- case GT_CNS_DBL:
- case GT_IND:
- case GT_LEA:
- if (type == op->TypeGet())
- return;
- default:
- break;
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_CLS_VAR:
+ case GT_CNS_DBL:
+ case GT_IND:
+ case GT_LEA:
+ if (type == op->TypeGet())
+ return;
+ default:
+ break;
}
assert(op->gtRegNum != REG_FPNONE);
@@ -1291,13 +1256,13 @@ void CodeGen::genRoundFpExpressionStackFP(GenTreePtr op, var_types type)
FlatFPX87_MoveToTOS(&compCurFPState, op->gtRegNum);
// Allocate a temp for the expression
- TempDsc * temp = compiler->tmpGetTemp(type);
-
- // Store the FP value into the temp
+ TempDsc* temp = compiler->tmpGetTemp(type);
+
+ // Store the FP value into the temp
inst_FS_ST(INS_fstp, EmitSize(type), temp, 0);
- // Load the value back onto the FP stack
- inst_FS_ST(INS_fld , EmitSize(type), temp, 0);
+ // Load the value back onto the FP stack
+ inst_FS_ST(INS_fld, EmitSize(type), temp, 0);
// We no longer need the temp
compiler->tmpRlsTemp(temp);
@@ -1313,14 +1278,14 @@ void CodeGen::genCodeForTreeStackFP_Const(GenTreePtr tree)
printf("\n");
}
#endif // DEBUG
-
+
#ifdef DEBUG
- if (tree->OperGet() != GT_CNS_DBL)
+ if (tree->OperGet() != GT_CNS_DBL)
{
compiler->gtDispTree(tree);
assert(!"bogus float const");
}
-#endif
+#endif
// Pick register
regNumber reg = regSet.PickRegFloat();
@@ -1329,7 +1294,7 @@ void CodeGen::genCodeForTreeStackFP_Const(GenTreePtr tree)
// Push register to virtual stack
FlatFPX87_PushVirtual(&compCurFPState, reg);
-
+
// Update tree
genCodeForTreeStackFP_DONE(tree, reg);
}
@@ -1347,8 +1312,8 @@ void CodeGen::genCodeForTreeStackFP_Leaf(GenTreePtr tree)
switch (tree->OperGet())
{
- case GT_LCL_VAR:
- case GT_LCL_FLD:
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
{
assert(!compiler->lvaTable[tree->gtLclVarCommon.gtLclNum].lvRegister);
@@ -1356,48 +1321,48 @@ void CodeGen::genCodeForTreeStackFP_Leaf(GenTreePtr tree)
regNumber reg = regSet.PickRegFloat();
// Load it
- genLoadStackFP(tree, reg);
+ genLoadStackFP(tree, reg);
- genCodeForTreeStackFP_DONE (tree, reg);
+ genCodeForTreeStackFP_DONE(tree, reg);
break;
}
- case GT_REG_VAR:
+ case GT_REG_VAR:
{
regNumber reg = regSet.PickRegFloat();
genLoadStackFP(tree, reg);
- genCodeForTreeStackFP_DONE (tree, reg);
-
+ genCodeForTreeStackFP_DONE(tree, reg);
+
break;
}
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
{
// Pick register
regNumber reg = regSet.PickRegFloat();
// Load it
- genLoadStackFP(tree, reg);
+ genLoadStackFP(tree, reg);
- genCodeForTreeStackFP_DONE (tree, reg);
+ genCodeForTreeStackFP_DONE(tree, reg);
break;
}
- default:
-#ifdef DEBUG
- compiler->gtDispTree(tree);
+ default:
+#ifdef DEBUG
+ compiler->gtDispTree(tree);
#endif
- assert(!"unexpected leaf");
+ assert(!"unexpected leaf");
}
genUpdateLife(tree);
}
-void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
+void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -1408,17 +1373,15 @@ void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
}
#endif // DEBUG
- emitAttr size;
- unsigned offs;
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
+ emitAttr size;
+ unsigned offs;
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
assert(tree->OperGet() == GT_ASG);
- if (!op1->IsRegVar() &&
- (op2->gtOper == GT_CAST) &&
- (op1->gtType == op2->gtType) &&
- varTypeIsFloating(op2->gtCast.CastOp()))
+ if (!op1->IsRegVar() && (op2->gtOper == GT_CAST) && (op1->gtType == op2->gtType) &&
+ varTypeIsFloating(op2->gtCast.CastOp()))
{
/* We can discard the cast */
op2 = op2->gtCast.CastOp();
@@ -1435,10 +1398,10 @@ void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
}
GenTreePtr op1NonCom = op1->gtEffectiveVal();
- if (op1NonCom->gtOper == GT_LCL_VAR)
+ if (op1NonCom->gtOper == GT_LCL_VAR)
{
#ifdef DEBUG
- LclVarDsc * varDsc = &compiler->lvaTable[op1NonCom->gtLclVarCommon.gtLclNum];
+ LclVarDsc* varDsc = &compiler->lvaTable[op1NonCom->gtLclVarCommon.gtLclNum];
// No dead stores
assert(!varDsc->lvTracked || compiler->opts.MinOpts() || !(op1NonCom->gtFlags & GTF_VAR_DEATH));
#endif
@@ -1449,136 +1412,136 @@ void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
* to be checked to see if we need to open a new scope for it.
*/
- if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
+ if (compiler->opts.compScopeInfo && !compiler->opts.compDbgCode && (compiler->info.compVarScopesCount > 0))
{
- siCheckVarScope(op1NonCom->gtLclVarCommon.gtLclNum,
- op1NonCom->gtLclVar.gtLclILoffs);
+ siCheckVarScope(op1NonCom->gtLclVarCommon.gtLclNum, op1NonCom->gtLclVar.gtLclILoffs);
}
-#endif
+#endif
}
assert(op2);
switch (op2->gtOper)
{
- case GT_CNS_DBL:
-
- assert(compCurFPState.m_uStackSize <= FP_PHYSICREGISTERS);
+ case GT_CNS_DBL:
- regMaskTP addrRegInt; addrRegInt = 0;
- regMaskTP addrRegFlt; addrRegFlt = 0;
+ assert(compCurFPState.m_uStackSize <= FP_PHYSICREGISTERS);
- // op2 is already "evaluated," so doesn't matter if they're reversed or not...
- op1 = genCodeForCommaTree(op1);
- op1 = genMakeAddressableStackFP(op1, &addrRegInt, &addrRegFlt);
+ regMaskTP addrRegInt;
+ addrRegInt = 0;
+ regMaskTP addrRegFlt;
+ addrRegFlt = 0;
- // We want to 'cast' the constant to the op1'a type
- double constantValue; constantValue = op2->gtDblCon.gtDconVal;
- if (op1->gtType == TYP_FLOAT)
- {
- float temp = forceCastToFloat(constantValue);
- constantValue = (double) temp;
- }
-
- GenTreePtr constantTree; constantTree = compiler->gtNewDconNode(constantValue);
- if (genConstantLoadStackFP(constantTree, true))
- {
- if (op1->IsRegVar())
- {
- // regvar birth
- genRegVarBirthStackFP(op1);
+ // op2 is already "evaluated," so doesn't matter if they're reversed or not...
+ op1 = genCodeForCommaTree(op1);
+ op1 = genMakeAddressableStackFP(op1, &addrRegInt, &addrRegFlt);
- // Update
- compCurFPState.Push(op1->gtRegNum);
- }
- else
+ // We want to 'cast' the constant to the op1'a type
+ double constantValue;
+ constantValue = op2->gtDblCon.gtDconVal;
+ if (op1->gtType == TYP_FLOAT)
{
- // store in target
- inst_FS_TT(INS_fstp, op1);
+ float temp = forceCastToFloat(constantValue);
+ constantValue = (double)temp;
}
- }
- else
- {
- // Standard constant
- if (op1->IsRegVar())
- {
- // Load constant to fp stack.
- GenTreePtr cnsaddr;
-
- // Create slot for constant
- if (op1->gtType == TYP_FLOAT ||
- StackFPIsSameAsFloat(op2->gtDblCon.gtDconVal))
+ GenTreePtr constantTree;
+ constantTree = compiler->gtNewDconNode(constantValue);
+ if (genConstantLoadStackFP(constantTree, true))
+ {
+ if (op1->IsRegVar())
{
- // We're going to use that double as a float, so recompute addr
- float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
- cnsaddr = genMakeConst(&f, TYP_FLOAT, tree, true);
+ // regvar birth
+ genRegVarBirthStackFP(op1);
+
+ // Update
+ compCurFPState.Push(op1->gtRegNum);
}
else
{
- cnsaddr = genMakeConst(&op2->gtDblCon.gtDconVal, TYP_DOUBLE, tree, true);
+ // store in target
+ inst_FS_TT(INS_fstp, op1);
}
-
- // Load into stack
- inst_FS_TT(INS_fld, cnsaddr);
-
- // regvar birth
- genRegVarBirthStackFP(op1);
-
- // Update
- compCurFPState.Push(op1->gtRegNum);
}
else
{
- if (size == 4)
+ // Standard constant
+ if (op1->IsRegVar())
{
+ // Load constant to fp stack.
- float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
- int* addr = (int*) &f;
-
- do
- {
- inst_TT_IV(INS_mov, op1, *addr++, offs);
- offs += sizeof(int);
- }
- while (offs < size);
- }
- else
- {
- // store forwarding fix for pentium 4 and centrino and also
- // fld for doubles that can be represented as floats, saving
- // 4 bytes of load
GenTreePtr cnsaddr;
// Create slot for constant
- if (op1->gtType == TYP_FLOAT ||
- StackFPIsSameAsFloat(op2->gtDblCon.gtDconVal))
+ if (op1->gtType == TYP_FLOAT || StackFPIsSameAsFloat(op2->gtDblCon.gtDconVal))
{
// We're going to use that double as a float, so recompute addr
- float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
+ float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
cnsaddr = genMakeConst(&f, TYP_FLOAT, tree, true);
}
else
{
- assert(tree->gtType == TYP_DOUBLE);
cnsaddr = genMakeConst(&op2->gtDblCon.gtDconVal, TYP_DOUBLE, tree, true);
}
-
- inst_FS_TT(INS_fld, cnsaddr);
- inst_FS_TT(INS_fstp, op1);
+
+ // Load into stack
+ inst_FS_TT(INS_fld, cnsaddr);
+
+ // regvar birth
+ genRegVarBirthStackFP(op1);
+
+ // Update
+ compCurFPState.Push(op1->gtRegNum);
+ }
+ else
+ {
+ if (size == 4)
+ {
+
+ float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
+ int* addr = (int*)&f;
+
+ do
+ {
+ inst_TT_IV(INS_mov, op1, *addr++, offs);
+ offs += sizeof(int);
+ } while (offs < size);
+ }
+ else
+ {
+ // store forwarding fix for pentium 4 and centrino and also
+ // fld for doubles that can be represented as floats, saving
+ // 4 bytes of load
+ GenTreePtr cnsaddr;
+
+ // Create slot for constant
+ if (op1->gtType == TYP_FLOAT || StackFPIsSameAsFloat(op2->gtDblCon.gtDconVal))
+ {
+ // We're going to use that double as a float, so recompute addr
+ float f = forceCastToFloat(op2->gtDblCon.gtDconVal);
+ cnsaddr = genMakeConst(&f, TYP_FLOAT, tree, true);
+ }
+ else
+ {
+ assert(tree->gtType == TYP_DOUBLE);
+ cnsaddr = genMakeConst(&op2->gtDblCon.gtDconVal, TYP_DOUBLE, tree, true);
+ }
+
+ inst_FS_TT(INS_fld, cnsaddr);
+ inst_FS_TT(INS_fstp, op1);
+ }
}
}
- }
- genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
- genUpdateLife(op1);
- return;
+ genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
+ genUpdateLife(op1);
+ return;
- default:
- break;
+ default:
+ break;
}
// Not one of the easy optimizations. Proceed normally
- if (tree->gtFlags & GTF_REVERSE_OPS)
+ if (tree->gtFlags & GTF_REVERSE_OPS)
{
/* Evaluate the RHS onto the FP stack.
We don't need to round it as we will be doing a spill for
@@ -1587,12 +1550,7 @@ void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
genSetupForOpStackFP(op1, op2, true, true, false, true);
// Do the move
- genMovStackFP(
- op1,
- REG_FPNONE,
- op2,
- (op2->InReg())?op2->gtRegNum:REG_FPNONE);
-
+ genMovStackFP(op1, REG_FPNONE, op2, (op2->InReg()) ? op2->gtRegNum : REG_FPNONE);
}
else
{
@@ -1600,20 +1558,16 @@ void CodeGen::genCodeForTreeStackFP_Asg(GenTreePtr tree)
// This should never happen
assert(!op1->IsRegVar());
-
+
genSetupForOpStackFP(op1, op2, false, true, false, true);
- // Do the actual move
- genMovStackFP(op1, REG_FPNONE, op2, (op2->InReg())?op2->gtRegNum:REG_FPNONE);
+ // Do the actual move
+ genMovStackFP(op1, REG_FPNONE, op2, (op2->InReg()) ? op2->gtRegNum : REG_FPNONE);
}
}
-void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
- GenTreePtr& op2,
- bool bReverse,
- bool bMakeOp1Addressable,
- bool bOp1ReadOnly,
- bool bOp2ReadOnly)
+void CodeGen::genSetupForOpStackFP(
+ GenTreePtr& op1, GenTreePtr& op2, bool bReverse, bool bMakeOp1Addressable, bool bOp1ReadOnly, bool bOp2ReadOnly)
{
if (bMakeOp1Addressable)
{
@@ -1623,15 +1577,13 @@ void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
}
else
{
- regMaskTP addrRegInt = 0;
- regMaskTP addrRegFlt = 0;
+ regMaskTP addrRegInt = 0;
+ regMaskTP addrRegFlt = 0;
op1 = genCodeForCommaTree(op1);
-
+
// Evaluate RHS on FP stack
- if (bOp2ReadOnly &&
- op2->IsRegVar() &&
- !op2->IsRegVarDeath())
+ if (bOp2ReadOnly && op2->IsRegVar() && !op2->IsRegVarDeath())
{
// read only and not dying, so just make addressable
op1 = genMakeAddressableStackFP(op1, &addrRegInt, &addrRegFlt);
@@ -1657,23 +1609,22 @@ void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
regSet.SetUsedRegFloat(op2, false);
}
-
/* Free up anything that was tied up by the target address */
- genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
- }
+ genDoneAddressableStackFP(op1, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
+ }
}
else
{
- assert(!bReverse || !"Can't do this. if op2 is a reg var and dies in op1, we have a serious problem. For the moment, handle this in the caller");
+ assert(!bReverse ||
+ !"Can't do this. if op2 is a reg var and dies in op1, we have a serious problem. For the "
+ "moment, handle this in the caller");
- regMaskTP addrRegInt = 0;
- regMaskTP addrRegFlt = 0;
+ regMaskTP addrRegInt = 0;
+ regMaskTP addrRegFlt = 0;
op1 = genCodeForCommaTree(op1);
-
- if (bOp1ReadOnly &&
- op1->IsRegVar() &&
- !op1->IsRegVarDeath() &&
+
+ if (bOp1ReadOnly && op1->IsRegVar() && !op1->IsRegVarDeath() &&
!genRegVarDiesInSubTree(op2, op1->gtRegVar.gtRegNum)) // regvar can't die in op2 either
{
// First update liveness for op1, since we're "evaluating" it here
@@ -1682,7 +1633,7 @@ void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
op2 = genCodeForCommaTree(op2);
// read only and not dying, we dont have to do anything.
- op2 = genMakeAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
+ op2 = genMakeAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
genKeepAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
}
else
@@ -1694,17 +1645,17 @@ void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
op2 = genCodeForCommaTree(op2);
op2 = genMakeAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
-
+
// Restore op1 if necessary
- if (op1->gtFlags & GTF_SPILLED)
+ if (op1->gtFlags & GTF_SPILLED)
{
UnspillFloat(op1);
}
-
+
// Lock op1
regSet.SetLockedRegFloat(op1, true);
-
- genKeepAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
+
+ genKeepAddressableStackFP(op2, &addrRegInt, &addrRegFlt);
// unlock op1
regSet.SetLockedRegFloat(op1, false);
@@ -1712,13 +1663,12 @@ void CodeGen::genSetupForOpStackFP(GenTreePtr& op1,
// mark as free
regSet.SetUsedRegFloat(op1, false);
}
-
- genDoneAddressableStackFP(op2, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
-
+
+ genDoneAddressableStackFP(op2, addrRegInt, addrRegFlt, RegSet::KEEP_REG);
}
}
-void CodeGen::genCodeForTreeStackFP_Arithm (GenTreePtr tree)
+void CodeGen::genCodeForTreeStackFP_Arithm(GenTreePtr tree)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -1729,68 +1679,54 @@ void CodeGen::genCodeForTreeStackFP_Arithm (GenTreePtr tree)
}
#endif // DEBUG
- assert(tree->OperGet() == GT_ADD ||
- tree->OperGet() == GT_SUB ||
- tree->OperGet() == GT_MUL ||
+ assert(tree->OperGet() == GT_ADD || tree->OperGet() == GT_SUB || tree->OperGet() == GT_MUL ||
tree->OperGet() == GT_DIV);
-
-
// We handle the reverse here instead of leaving setupop to do it. As for this case
//
// + with reverse
- // op1 regvar
+ // op1 regvar
//
// and in regvar dies in op1, we would need a load of regvar, instead of a noop. So we handle this
// here and tell genArithmStackFP to do the reverse operation
bool bReverse;
GenTreePtr op1, op2;
-
+
if (tree->gtFlags & GTF_REVERSE_OPS)
{
bReverse = true;
- op1 = tree->gtGetOp2();
- op2 = tree->gtOp.gtOp1;
+ op1 = tree->gtGetOp2();
+ op2 = tree->gtOp.gtOp1;
}
else
{
bReverse = false;
- op1 = tree->gtOp.gtOp1;
- op2 = tree->gtGetOp2();
+ op1 = tree->gtOp.gtOp1;
+ op2 = tree->gtGetOp2();
}
regNumber result;
-
+
// Fast paths
genTreeOps oper = tree->OperGet();
- if ( op1->IsRegVar() &&
- op2->IsRegVar() &&
- !op1->IsRegVarDeath() &&
- op2->IsRegVarDeath())
+ if (op1->IsRegVar() && op2->IsRegVar() && !op1->IsRegVarDeath() && op2->IsRegVarDeath())
{
// In this fastpath, we will save a load by doing the operation directly on the op2
// register, as it's dying.
// Mark op2 as dead
genRegVarDeathStackFP(op2);
-
+
// Do operation
- result =
- genArithmStackFP(
- oper,
- op2,
- op2->gtRegVar.gtRegNum,
- op1,
- REG_FPNONE,
- !bReverse);
+ result = genArithmStackFP(oper, op2, op2->gtRegVar.gtRegNum, op1, REG_FPNONE, !bReverse);
genUpdateLife(op1);
genUpdateLife(op2);
}
- else if ( !op1->IsRegVar() && // We don't do this for regvars, as we'll need a scratch reg
- ((tree->gtFlags & GTF_SIDE_EFFECT) == 0) && // No side effects
- GenTree::Compare(op1, op2)) // op1 and op2 are the same
+ else if (!op1->IsRegVar() && // We don't do this for regvars, as we'll need a scratch reg
+ ((tree->gtFlags & GTF_SIDE_EFFECT) == 0) && // No side effects
+ GenTree::Compare(op1, op2)) // op1 and op2 are the same
{
// op1 is same thing as op2. Ideal for CSEs that werent optimized
// due to their low cost.
@@ -1801,100 +1737,85 @@ void CodeGen::genCodeForTreeStackFP_Arithm (GenTreePtr tree)
genCodeForTreeFloat(op2);
- result =
- genArithmStackFP(
- oper,
- op2,
- op2->gtRegNum,
- op2,
- op2->gtRegNum,
- bReverse);
+ result = genArithmStackFP(oper, op2, op2->gtRegNum, op2, op2->gtRegNum, bReverse);
}
else
{
genSetupForOpStackFP(op1, op2, false, false, false, true);
- result =
- genArithmStackFP(
- oper,
- op1,
- (op1->InReg())?op1->gtRegNum:REG_FPNONE,
- op2,
- (op2->InReg())?op2->gtRegNum:REG_FPNONE,
- bReverse);
+ result = genArithmStackFP(oper, op1, (op1->InReg()) ? op1->gtRegNum : REG_FPNONE, op2,
+ (op2->InReg()) ? op2->gtRegNum : REG_FPNONE, bReverse);
}
-
genCodeForTreeStackFP_DONE(tree, result);
}
-regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse)
+regNumber CodeGen::genArithmStackFP(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg, bool bReverse)
{
- #ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("genArithmStackFP() dst: ");
Compiler::printTreeID(dst);
printf(" src: ");
Compiler::printTreeID(src);
- printf(" dstreg: %s srcreg: %s\n",
- dstreg==REG_FPNONE ? "NONE" : regVarNameStackFP(dstreg),
- srcreg==REG_FPNONE ? "NONE" : regVarNameStackFP(srcreg));
+ printf(" dstreg: %s srcreg: %s\n", dstreg == REG_FPNONE ? "NONE" : regVarNameStackFP(dstreg),
+ srcreg == REG_FPNONE ? "NONE" : regVarNameStackFP(srcreg));
}
#endif // DEBUG
-
+
// Select instruction depending on oper and bReverseOp
-
- instruction ins_NN;
- instruction ins_RN;
- instruction ins_RP;
- instruction ins_NP;
+
+ instruction ins_NN;
+ instruction ins_RN;
+ instruction ins_RP;
+ instruction ins_NP;
switch (oper)
{
- default:
- assert(!"Unexpected oper");
- case GT_ADD:
- case GT_SUB:
- case GT_MUL:
- case GT_DIV:
-
- /* Make sure the instruction tables look correctly ordered */
- assert(FPmathNN[GT_ADD - GT_ADD] == INS_fadd );
- assert(FPmathNN[GT_SUB - GT_ADD] == INS_fsub );
- assert(FPmathNN[GT_MUL - GT_ADD] == INS_fmul );
- assert(FPmathNN[GT_DIV - GT_ADD] == INS_fdiv );
-
- assert(FPmathNP[GT_ADD - GT_ADD] == INS_faddp );
- assert(FPmathNP[GT_SUB - GT_ADD] == INS_fsubp );
- assert(FPmathNP[GT_MUL - GT_ADD] == INS_fmulp );
- assert(FPmathNP[GT_DIV - GT_ADD] == INS_fdivp );
-
- assert(FPmathRN[GT_ADD - GT_ADD] == INS_fadd );
- assert(FPmathRN[GT_SUB - GT_ADD] == INS_fsubr );
- assert(FPmathRN[GT_MUL - GT_ADD] == INS_fmul );
- assert(FPmathRN[GT_DIV - GT_ADD] == INS_fdivr );
-
- assert(FPmathRP[GT_ADD - GT_ADD] == INS_faddp );
- assert(FPmathRP[GT_SUB - GT_ADD] == INS_fsubrp);
- assert(FPmathRP[GT_MUL - GT_ADD] == INS_fmulp );
- assert(FPmathRP[GT_DIV - GT_ADD] == INS_fdivrp);
-
-
- if (bReverse)
- {
- ins_NN = FPmathRN[oper - GT_ADD];
- ins_NP = FPmathRP[oper - GT_ADD];
- ins_RN = FPmathNN[oper - GT_ADD];
- ins_RP = FPmathNP[oper - GT_ADD];
- }
- else
- {
- ins_NN = FPmathNN[oper - GT_ADD];
- ins_NP = FPmathNP[oper - GT_ADD];
- ins_RN = FPmathRN[oper - GT_ADD];
- ins_RP = FPmathRP[oper - GT_ADD];
- }
+ default:
+ assert(!"Unexpected oper");
+ case GT_ADD:
+ case GT_SUB:
+ case GT_MUL:
+ case GT_DIV:
+
+ /* Make sure the instruction tables look correctly ordered */
+ assert(FPmathNN[GT_ADD - GT_ADD] == INS_fadd);
+ assert(FPmathNN[GT_SUB - GT_ADD] == INS_fsub);
+ assert(FPmathNN[GT_MUL - GT_ADD] == INS_fmul);
+ assert(FPmathNN[GT_DIV - GT_ADD] == INS_fdiv);
+
+ assert(FPmathNP[GT_ADD - GT_ADD] == INS_faddp);
+ assert(FPmathNP[GT_SUB - GT_ADD] == INS_fsubp);
+ assert(FPmathNP[GT_MUL - GT_ADD] == INS_fmulp);
+ assert(FPmathNP[GT_DIV - GT_ADD] == INS_fdivp);
+
+ assert(FPmathRN[GT_ADD - GT_ADD] == INS_fadd);
+ assert(FPmathRN[GT_SUB - GT_ADD] == INS_fsubr);
+ assert(FPmathRN[GT_MUL - GT_ADD] == INS_fmul);
+ assert(FPmathRN[GT_DIV - GT_ADD] == INS_fdivr);
+
+ assert(FPmathRP[GT_ADD - GT_ADD] == INS_faddp);
+ assert(FPmathRP[GT_SUB - GT_ADD] == INS_fsubrp);
+ assert(FPmathRP[GT_MUL - GT_ADD] == INS_fmulp);
+ assert(FPmathRP[GT_DIV - GT_ADD] == INS_fdivrp);
+
+ if (bReverse)
+ {
+ ins_NN = FPmathRN[oper - GT_ADD];
+ ins_NP = FPmathRP[oper - GT_ADD];
+ ins_RN = FPmathNN[oper - GT_ADD];
+ ins_RP = FPmathNP[oper - GT_ADD];
+ }
+ else
+ {
+ ins_NN = FPmathNN[oper - GT_ADD];
+ ins_NP = FPmathNP[oper - GT_ADD];
+ ins_RN = FPmathRN[oper - GT_ADD];
+ ins_RP = FPmathRP[oper - GT_ADD];
+ }
}
regNumber result = REG_FPNONE;
@@ -1904,10 +1825,10 @@ regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber d
if (srcreg == REG_FPNONE)
{
if (src->IsRegVar())
- {
+ {
if (src->IsRegVarDeath())
{
- if (compCurFPState.TopVirtual() == (unsigned) dst->gtRegNum)
+ if (compCurFPState.TopVirtual() == (unsigned)dst->gtRegNum)
{
// Do operation and store in srcreg
inst_FS(ins_RP, compCurFPState.VirtualToST(src->gtRegNum));
@@ -1925,17 +1846,15 @@ regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber d
inst_FS(ins_NP, compCurFPState.VirtualToST(dstreg));
// Kill the register
- FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
+ FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
}
-
- assert(!src->IsRegVar() ||
- !src->IsRegVarDeath() ||
- !(genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat));
+ assert(!src->IsRegVar() || !src->IsRegVarDeath() ||
+ !(genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat));
}
else
{
- if (compCurFPState.TopVirtual() == (unsigned) src->gtRegNum)
+ if (compCurFPState.TopVirtual() == (unsigned)src->gtRegNum)
{
inst_FS(ins_RN, compCurFPState.VirtualToST(dst->gtRegNum));
}
@@ -1962,7 +1881,7 @@ regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber d
}
else
{
- if (compCurFPState.TopVirtual() == (unsigned) dst->gtRegNum)
+ if (compCurFPState.TopVirtual() == (unsigned)dst->gtRegNum)
{
// Do operation and store in srcreg
inst_FS(ins_RP, compCurFPState.VirtualToST(srcreg));
@@ -1972,7 +1891,7 @@ regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber d
compCurFPState.Rename(dstreg, srcreg);
}
else
- {
+ {
FlatFPX87_MoveToTOS(&compCurFPState, srcreg);
// do reverse and pop operation
@@ -1991,12 +1910,11 @@ regNumber CodeGen::genArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber d
assert(!"if we get here it means we didnt load op1 into a temp. Investigate why");
}
- assert (result != REG_FPNONE);
+ assert(result != REG_FPNONE);
return result;
}
-
-void CodeGen::genCodeForTreeStackFP_AsgArithm (GenTreePtr tree)
+void CodeGen::genCodeForTreeStackFP_AsgArithm(GenTreePtr tree)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -2007,182 +1925,175 @@ void CodeGen::genCodeForTreeStackFP_AsgArithm (GenTreePtr tree)
}
#endif // DEBUG
- assert(tree->OperGet() == GT_ASG_ADD ||
- tree->OperGet() == GT_ASG_SUB ||
- tree->OperGet() == GT_ASG_MUL ||
+ assert(tree->OperGet() == GT_ASG_ADD || tree->OperGet() == GT_ASG_SUB || tree->OperGet() == GT_ASG_MUL ||
tree->OperGet() == GT_ASG_DIV);
GenTreePtr op1, op2;
-
+
op1 = tree->gtOp.gtOp1;
op2 = tree->gtGetOp2();
-
- genSetupForOpStackFP(op1, op2, (tree->gtFlags & GTF_REVERSE_OPS)?true:false, true, false, true);
- regNumber result =
- genAsgArithmStackFP(
- tree->OperGet(),
- op1,
- (op1->InReg())?op1->gtRegNum:REG_FPNONE,
- op2,
- (op2->InReg())?op2->gtRegNum:REG_FPNONE);
+ genSetupForOpStackFP(op1, op2, (tree->gtFlags & GTF_REVERSE_OPS) ? true : false, true, false, true);
+
+ regNumber result = genAsgArithmStackFP(tree->OperGet(), op1, (op1->InReg()) ? op1->gtRegNum : REG_FPNONE, op2,
+ (op2->InReg()) ? op2->gtRegNum : REG_FPNONE);
genCodeForTreeStackFP_DONE(tree, result);
}
-regNumber CodeGen::genAsgArithmStackFP(genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg)
+regNumber CodeGen::genAsgArithmStackFP(
+ genTreeOps oper, GenTreePtr dst, regNumber dstreg, GenTreePtr src, regNumber srcreg)
{
regNumber result = REG_FPNONE;
- #ifdef DEBUG
+#ifdef DEBUG
if (compiler->verbose)
{
printf("genAsgArithmStackFP() dst: ");
Compiler::printTreeID(dst);
printf(" src: ");
Compiler::printTreeID(src);
- printf(" dstreg: %s srcreg: %s\n",
- dstreg==REG_FPNONE ? "NONE" : regVarNameStackFP(dstreg),
- srcreg==REG_FPNONE ? "NONE" : regVarNameStackFP(srcreg));
+ printf(" dstreg: %s srcreg: %s\n", dstreg == REG_FPNONE ? "NONE" : regVarNameStackFP(dstreg),
+ srcreg == REG_FPNONE ? "NONE" : regVarNameStackFP(srcreg));
}
#endif // DEBUG
-
- instruction ins_NN;
- instruction ins_RN;
- instruction ins_RP;
- instruction ins_NP;
+
+ instruction ins_NN;
+ instruction ins_RN;
+ instruction ins_RP;
+ instruction ins_NP;
switch (oper)
{
- default:
- assert(!"Unexpected oper");
- break;
- case GT_ASG_ADD:
- case GT_ASG_SUB:
- case GT_ASG_MUL:
- case GT_ASG_DIV:
+ default:
+ assert(!"Unexpected oper");
+ break;
+ case GT_ASG_ADD:
+ case GT_ASG_SUB:
+ case GT_ASG_MUL:
+ case GT_ASG_DIV:
- assert(FPmathRN[GT_ASG_ADD - GT_ASG_ADD] == INS_fadd );
- assert(FPmathRN[GT_ASG_SUB - GT_ASG_ADD] == INS_fsubr );
- assert(FPmathRN[GT_ASG_MUL - GT_ASG_ADD] == INS_fmul );
- assert(FPmathRN[GT_ASG_DIV - GT_ASG_ADD] == INS_fdivr );
+ assert(FPmathRN[GT_ASG_ADD - GT_ASG_ADD] == INS_fadd);
+ assert(FPmathRN[GT_ASG_SUB - GT_ASG_ADD] == INS_fsubr);
+ assert(FPmathRN[GT_ASG_MUL - GT_ASG_ADD] == INS_fmul);
+ assert(FPmathRN[GT_ASG_DIV - GT_ASG_ADD] == INS_fdivr);
- assert(FPmathRP[GT_ASG_ADD - GT_ASG_ADD] == INS_faddp );
- assert(FPmathRP[GT_ASG_SUB - GT_ASG_ADD] == INS_fsubrp);
- assert(FPmathRP[GT_ASG_MUL - GT_ASG_ADD] == INS_fmulp );
- assert(FPmathRP[GT_ASG_DIV - GT_ASG_ADD] == INS_fdivrp);
+ assert(FPmathRP[GT_ASG_ADD - GT_ASG_ADD] == INS_faddp);
+ assert(FPmathRP[GT_ASG_SUB - GT_ASG_ADD] == INS_fsubrp);
+ assert(FPmathRP[GT_ASG_MUL - GT_ASG_ADD] == INS_fmulp);
+ assert(FPmathRP[GT_ASG_DIV - GT_ASG_ADD] == INS_fdivrp);
- ins_NN = FPmathNN[oper - GT_ASG_ADD];
- ins_NP = FPmathNP[oper - GT_ASG_ADD];
+ ins_NN = FPmathNN[oper - GT_ASG_ADD];
+ ins_NP = FPmathNP[oper - GT_ASG_ADD];
- ins_RN = FPmathRN[oper - GT_ASG_ADD];
- ins_RP = FPmathRP[oper - GT_ASG_ADD];
+ ins_RN = FPmathRN[oper - GT_ASG_ADD];
+ ins_RP = FPmathRP[oper - GT_ASG_ADD];
- if (dstreg != REG_FPNONE)
- {
- assert(!"dst should be a regvar or memory");
- }
- else
- {
- if (dst->IsRegVar())
- {
- if (src->IsRegVar())
+ if (dstreg != REG_FPNONE)
{
- if (src->IsRegVarDeath())
+ assert(!"dst should be a regvar or memory");
+ }
+ else
+ {
+ if (dst->IsRegVar())
{
- // Take src to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
+ if (src->IsRegVar())
+ {
+ if (src->IsRegVarDeath())
+ {
+ // Take src to top of stack
+ FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
- // Do op
- inst_FS(ins_NP, compCurFPState.VirtualToST(dst->gtRegNum));
+ // Do op
+ inst_FS(ins_NP, compCurFPState.VirtualToST(dst->gtRegNum));
- // Kill the register
- FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
+ // Kill the register
+ FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
- // SetupOp should mark the regvar as dead
- assert( (genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat) == 0);
- }
- else
- {
- assert(src->gtRegNum == src->gtRegVar.gtRegNum && "We shoudnt be loading regvar src on the stack as src is readonly");
-
- // Take src to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
+ // SetupOp should mark the regvar as dead
+ assert((genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat) == 0);
+ }
+ else
+ {
+ assert(src->gtRegNum == src->gtRegVar.gtRegNum &&
+ "We shoudnt be loading regvar src on the stack as src is readonly");
- // Do op
- inst_FS(ins_RN, compCurFPState.VirtualToST(dst->gtRegNum));
- }
- }
- else
- {
- if (srcreg == REG_FPNONE)
- {
- // take enregistered variable to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, dst->gtRegNum);
+ // Take src to top of stack
+ FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
- // Do operation with mem
- inst_FS_TT(ins_NN, src);
+ // Do op
+ inst_FS(ins_RN, compCurFPState.VirtualToST(dst->gtRegNum));
+ }
+ }
+ else
+ {
+ if (srcreg == REG_FPNONE)
+ {
+ // take enregistered variable to top of stack
+ FlatFPX87_MoveToTOS(&compCurFPState, dst->gtRegNum);
+
+ // Do operation with mem
+ inst_FS_TT(ins_NN, src);
+ }
+ else
+ {
+ // take enregistered variable to top of stack
+ FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
+
+ // do op
+ inst_FS(ins_NP, compCurFPState.VirtualToST(dst->gtRegNum));
+
+ // Kill the register
+ FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
+ }
+ }
}
else
{
- // take enregistered variable to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, src->gtRegNum);
-
- // do op
- inst_FS(ins_NP, compCurFPState.VirtualToST(dst->gtRegNum));
-
- // Kill the register
- FlatFPX87_Kill(&compCurFPState, src->gtRegNum);
- }
- }
- }
- else
- {
- // To memory
- if ( (src->IsRegVar()) &&
- !src->IsRegVarDeath() )
- {
- // We set src as read only, but as dst is in memory, we will need
- // an extra physical register (which we should have, as we have a
- // spare one for transitions).
- //
- // There used to be an assertion: assert(src->gtRegNum == src->gtRegVar.gtRegNum, ...)
- // here, but there's actually no reason to assume that. AFAICT, for FP vars under stack FP,
- // src->gtRegVar.gtRegNum is the allocated stack pseudo-register, but src->gtRegNum is the
- // FP stack position into which that is loaded to represent a particular use of the variable.
- inst_FN(INS_fld, compCurFPState.VirtualToST( src->gtRegNum ));
-
- // Do operation with mem
- inst_FS_TT(ins_RN, dst);
-
- // store back
- inst_FS_TT(INS_fstp, dst);
- }
- else
- {
- // put src in top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, srcreg);
+ // To memory
+ if ((src->IsRegVar()) && !src->IsRegVarDeath())
+ {
+ // We set src as read only, but as dst is in memory, we will need
+ // an extra physical register (which we should have, as we have a
+ // spare one for transitions).
+ //
+ // There used to be an assertion: assert(src->gtRegNum == src->gtRegVar.gtRegNum, ...)
+ // here, but there's actually no reason to assume that. AFAICT, for FP vars under stack FP,
+ // src->gtRegVar.gtRegNum is the allocated stack pseudo-register, but src->gtRegNum is the
+ // FP stack position into which that is loaded to represent a particular use of the variable.
+ inst_FN(INS_fld, compCurFPState.VirtualToST(src->gtRegNum));
+
+ // Do operation with mem
+ inst_FS_TT(ins_RN, dst);
+
+ // store back
+ inst_FS_TT(INS_fstp, dst);
+ }
+ else
+ {
+ // put src in top of stack
+ FlatFPX87_MoveToTOS(&compCurFPState, srcreg);
- // Do operation with mem
- inst_FS_TT(ins_RN, dst);
+ // Do operation with mem
+ inst_FS_TT(ins_RN, dst);
- // store back
- inst_FS_TT(INS_fstp, dst);
+ // store back
+ inst_FS_TT(INS_fstp, dst);
- // SetupOp should have marked the regvar as dead in tat case
- assert( !src->IsRegVar() || !src->IsRegVarDeath() ||
- (genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat) == 0);
+ // SetupOp should have marked the regvar as dead in tat case
+ assert(!src->IsRegVar() || !src->IsRegVarDeath() ||
+ (genRegMaskFloat(src->gtRegVar.gtRegNum) & regSet.rsMaskRegVarFloat) == 0);
- FlatFPX87_Kill(&compCurFPState, srcreg);
+ FlatFPX87_Kill(&compCurFPState, srcreg);
+ }
+ }
}
- }
- }
}
return result;
}
-void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
+void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -2199,7 +2110,7 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
{
// Assignment
case GT_ASG:
- {
+ {
genCodeForTreeStackFP_Asg(tree);
break;
}
@@ -2223,17 +2134,17 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
genCodeForTreeStackFP_AsgArithm(tree);
break;
}
-
+
case GT_IND:
case GT_LEA:
{
- regMaskTP addrReg;
-
+ regMaskTP addrReg;
+
// Make sure the address value is 'addressable' */
addrReg = genMakeAddressable(tree, 0, RegSet::FREE_REG);
- // Load the value onto the FP stack
- regNumber reg = regSet.PickRegFloat();
+ // Load the value onto the FP stack
+ regNumber reg = regSet.PickRegFloat();
genLoadStackFP(tree, reg);
genDoneAddressable(tree, addrReg, RegSet::FREE_REG);
@@ -2248,34 +2159,34 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
GenTreePtr op1 = tree->gtOp.gtOp1;
assert(op1);
- // Compute the result onto the FP stack
+ // Compute the result onto the FP stack
if (op1->gtType == TYP_FLOAT)
{
#if ROUND_FLOAT
- bool roundOp1 = false;
+ bool roundOp1 = false;
switch (getRoundFloatLevel())
{
- case ROUND_NEVER:
- /* No rounding at all */
- break;
+ case ROUND_NEVER:
+ /* No rounding at all */
+ break;
- case ROUND_CMP_CONST:
- break;
+ case ROUND_CMP_CONST:
+ break;
- case ROUND_CMP:
- /* Round all comparands and return values*/
- roundOp1 = true;
- break;
+ case ROUND_CMP:
+ /* Round all comparands and return values*/
+ roundOp1 = true;
+ break;
- case ROUND_ALWAYS:
- /* Round everything */
- roundOp1 = true;
- break;
+ case ROUND_ALWAYS:
+ /* Round everything */
+ roundOp1 = true;
+ break;
- default:
- assert(!"Unsupported Round Level");
- break;
+ default:
+ assert(!"Unsupported Round Level");
+ break;
}
#endif
genCodeForTreeFlt(op1);
@@ -2301,7 +2212,7 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
{
GenTreePtr op1 = tree->gtOp.gtOp1;
GenTreePtr op2 = tree->gtGetOp2();
-
+
if (tree->gtFlags & GTF_REVERSE_OPS)
{
genCodeForTreeFloat(op2);
@@ -2309,8 +2220,8 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
regSet.SetUsedRegFloat(op2, true);
genEvalSideEffects(op1);
-
- if (op2->gtFlags & GTF_SPILLED)
+
+ if (op2->gtFlags & GTF_SPILLED)
{
UnspillFloat(op2);
}
@@ -2322,7 +2233,7 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
genEvalSideEffects(op1);
genCodeForTreeFloat(op2);
}
-
+
genCodeForTreeStackFP_DONE(tree, op2->gtRegNum);
break;
}
@@ -2331,7 +2242,7 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
genCodeForTreeStackFP_Cast(tree);
break;
}
-
+
case GT_NEG:
{
GenTreePtr op1 = tree->gtOp.gtOp1;
@@ -2340,11 +2251,11 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
genCodeForTreeFloat(op1);
// Take reg to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
-
+ FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
+
// change the sign
instGen(INS_fchs);
-
+
// mark register that holds tree
genCodeForTreeStackFP_DONE(tree, op1->gtRegNum);
return;
@@ -2359,36 +2270,30 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
genCodeForTreeFloat(op1);
// Take reg to top of stack
- FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
-
-
- static const instruction mathIns[] =
- {
- INS_fsin,
- INS_fcos,
- INS_fsqrt,
- INS_fabs,
- INS_frndint,
+ FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
+
+ static const instruction mathIns[] = {
+ INS_fsin, INS_fcos, INS_fsqrt, INS_fabs, INS_frndint,
};
- assert(mathIns[CORINFO_INTRINSIC_Sin ] == INS_fsin );
- assert(mathIns[CORINFO_INTRINSIC_Cos ] == INS_fcos );
- assert(mathIns[CORINFO_INTRINSIC_Sqrt] == INS_fsqrt);
- assert(mathIns[CORINFO_INTRINSIC_Abs ] == INS_fabs );
+ assert(mathIns[CORINFO_INTRINSIC_Sin] == INS_fsin);
+ assert(mathIns[CORINFO_INTRINSIC_Cos] == INS_fcos);
+ assert(mathIns[CORINFO_INTRINSIC_Sqrt] == INS_fsqrt);
+ assert(mathIns[CORINFO_INTRINSIC_Abs] == INS_fabs);
assert(mathIns[CORINFO_INTRINSIC_Round] == INS_frndint);
- assert((unsigned)(tree->gtIntrinsic.gtIntrinsicId) < sizeof(mathIns)/sizeof(mathIns[0]));
+ assert((unsigned)(tree->gtIntrinsic.gtIntrinsicId) < sizeof(mathIns) / sizeof(mathIns[0]));
instGen(mathIns[tree->gtIntrinsic.gtIntrinsicId]);
// mark register that holds tree
genCodeForTreeStackFP_DONE(tree, op1->gtRegNum);
-
- return;
+
+ return;
}
case GT_CKFINITE:
{
- TempDsc * temp;
- int offs;
-
+ TempDsc* temp;
+ int offs;
+
GenTreePtr op1 = tree->gtOp.gtOp1;
// Offset of the DWord containing the exponent
@@ -2400,12 +2305,12 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
// Take reg to top of stack
FlatFPX87_MoveToTOS(&compCurFPState, op1->gtRegNum);
- temp = compiler->tmpGetTemp (op1->TypeGet());
+ temp = compiler->tmpGetTemp(op1->TypeGet());
emitAttr size = EmitSize(op1);
- // Store the value from the FP stack into the temp
+ // Store the value from the FP stack into the temp
getEmitter()->emitIns_S(INS_fst, size, temp->tdTempNum(), 0);
-
+
regNumber reg = regSet.rsPickReg();
// Load the DWord containing the exponent into a general reg.
@@ -2418,8 +2323,8 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
// Mask of exponent with all 1's - appropriate for given type
int expMask;
- expMask = (op1->gtType == TYP_FLOAT) ? 0x7F800000 // TYP_FLOAT
- : 0x7FF00000; // TYP_DOUBLE
+ expMask = (op1->gtType == TYP_FLOAT) ? 0x7F800000 // TYP_FLOAT
+ : 0x7FF00000; // TYP_DOUBLE
// Check if the exponent is all 1's
@@ -2429,7 +2334,7 @@ void CodeGen::genCodeForTreeStackFP_SmpOp(GenTreePtr tree)
// If exponent was all 1's, we need to throw ArithExcep
genJumpToThrowHlpBlk(EJ_je, SCK_ARITH_EXCPN);
- genUpdateLife(tree);
+ genUpdateLife(tree);
genCodeForTreeStackFP_DONE(tree, op1->gtRegNum);
break;
@@ -2450,14 +2355,13 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
}
#endif // DEBUG
- #if ROUND_FLOAT
+#if ROUND_FLOAT
bool roundResult = true;
- #endif
+#endif
regMaskTP addrReg;
- TempDsc * temp;
- emitAttr size;
-
+ TempDsc* temp;
+ emitAttr size;
GenTreePtr op1 = tree->gtOp.gtOp1;
@@ -2476,10 +2380,10 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
// Operand too small for 'fild', load it into a register
genCodeForTree(op1, 0);
- #if ROUND_FLOAT
- // no need to round, can't overflow float or dbl
+#if ROUND_FLOAT
+ // no need to round, can't overflow float or dbl
roundResult = false;
- #endif
+#endif
// fall through
}
@@ -2487,21 +2391,21 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
case TYP_BYREF:
case TYP_LONG:
{
- // Can't 'fild' a constant, it has to be loaded from memory
+ // Can't 'fild' a constant, it has to be loaded from memory
switch (op1->gtOper)
{
- case GT_CNS_INT:
- op1 = genMakeConst(&op1->gtIntCon.gtIconVal, TYP_INT , tree, false);
- break;
-
- case GT_CNS_LNG:
- // Our encoder requires fild on m64int to be 64-bit aligned.
- op1 = genMakeConst(&op1->gtLngCon.gtLconVal, TYP_LONG, tree, true);
- break;
- default:
- break;
+ case GT_CNS_INT:
+ op1 = genMakeConst(&op1->gtIntCon.gtIconVal, TYP_INT, tree, false);
+ break;
+
+ case GT_CNS_LNG:
+ // Our encoder requires fild on m64int to be 64-bit aligned.
+ op1 = genMakeConst(&op1->gtLngCon.gtLconVal, TYP_LONG, tree, true);
+ break;
+ default:
+ break;
}
-
+
addrReg = genMakeAddressable(op1, 0, RegSet::FREE_REG);
// Grab register for the cast
@@ -2510,20 +2414,20 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
compCurFPState.Push(reg);
// Is the value now sitting in a register?
- if (op1->InReg())
+ if (op1->InReg())
{
// We'll have to store the value into the stack */
size = EA_ATTR(roundUp(genTypeSize(op1->gtType)));
temp = compiler->tmpGetTemp(op1->TypeGet());
- // Move the value into the temp
- if (op1->gtType == TYP_LONG)
+ // Move the value into the temp
+ if (op1->gtType == TYP_LONG)
{
- regPairNo regPair = op1->gtRegPair;
+ regPairNo regPair = op1->gtRegPair;
// This code is pretty ugly, but straightforward
- if (genRegPairLo(regPair) == REG_STK)
+ if (genRegPairLo(regPair) == REG_STK)
{
regNumber rg1 = genRegPairHi(regPair);
@@ -2541,9 +2445,8 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
/* Reload transfer register */
inst_RV_ST(INS_mov, rg1, temp, 4, TYP_LONG);
-
}
- else if (genRegPairHi(regPair) == REG_STK)
+ else if (genRegPairHi(regPair) == REG_STK)
{
regNumber rg1 = genRegPairLo(regPair);
@@ -2561,7 +2464,6 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
/* Reload transfer register */
inst_RV_ST(INS_mov, rg1, temp, 0, TYP_LONG);
-
}
else
{
@@ -2569,7 +2471,6 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
inst_ST_RV(INS_mov, temp, 0, genRegPairLo(regPair), TYP_LONG);
inst_ST_RV(INS_mov, temp, 4, genRegPairHi(regPair), TYP_LONG);
-
}
genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
@@ -2581,36 +2482,35 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
{
/* Move the value into the temp */
- inst_ST_RV(INS_mov , temp, 0, op1->gtRegNum, TYP_INT);
+ inst_ST_RV(INS_mov, temp, 0, op1->gtRegNum, TYP_INT);
genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
/* Load the integer from the temp */
- inst_FS_ST(INS_fild , size, temp, 0);
+ inst_FS_ST(INS_fild, size, temp, 0);
}
-
- // We no longer need the temp
+
+ // We no longer need the temp
compiler->tmpRlsTemp(temp);
}
else
{
- // Load the value from its address
- if (op1->gtType == TYP_LONG)
+ // Load the value from its address
+ if (op1->gtType == TYP_LONG)
inst_TT(INS_fildl, op1);
else
- inst_TT(INS_fild , op1);
+ inst_TT(INS_fild, op1);
genDoneAddressable(op1, addrReg, RegSet::FREE_REG);
- }
+ }
#if ROUND_FLOAT
/* integer to fp conversions can overflow. roundResult
* is cleared above in cases where it can't
*/
- if (roundResult &&
- ((tree->gtType == TYP_FLOAT) ||
- ((tree->gtType == TYP_DOUBLE) && (op1->gtType == TYP_LONG))))
+ if (roundResult &&
+ ((tree->gtType == TYP_FLOAT) || ((tree->gtType == TYP_DOUBLE) && (op1->gtType == TYP_LONG))))
genRoundFpExpression(tree);
#endif
@@ -2618,26 +2518,23 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
}
case TYP_FLOAT:
{
- // This is a cast from float to double.
+ // This is a cast from float to double.
// Note that conv.r(r4/r8) and conv.r8(r4/r9) are indistinguishable
// as we will generate GT_CAST-TYP_DOUBLE for both. This would
// cause us to truncate precision in either case. However,
// conv.r was needless in the first place, and should have
- // been removed */
- genCodeForTreeFloat(op1); // Trucate its precision
-
- if (op1->gtOper == GT_LCL_VAR ||
- op1->gtOper == GT_LCL_FLD ||
- op1->gtOper == GT_CLS_VAR ||
- op1->gtOper == GT_IND ||
- op1->gtOper == GT_LEA)
+ // been removed */
+ genCodeForTreeFloat(op1); // Trucate its precision
+
+ if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_LCL_FLD || op1->gtOper == GT_CLS_VAR ||
+ op1->gtOper == GT_IND || op1->gtOper == GT_LEA)
{
// We take advantage here of the fact that we know that our
// codegen will have just loaded this from memory, and that
// therefore, no cast is really needed.
// Ideally we wouldn't do this optimization here, but in
// morphing, however, we need to do this after regalloc, as
- // this optimization doesnt apply if what we're loading is a
+ // this optimization doesnt apply if what we're loading is a
// regvar
}
else
@@ -2647,27 +2544,25 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
// Assign reg to tree
genMarkTreeInReg(tree, op1->gtRegNum);
-
- break;
+
+ break;
}
case TYP_DOUBLE:
{
- // This is a cast from double to float or double
- // Load the value, store as destType, load back
+ // This is a cast from double to float or double
+ // Load the value, store as destType, load back
genCodeForTreeFlt(op1);
- if ( (op1->gtOper == GT_LCL_VAR ||
- op1->gtOper == GT_LCL_FLD ||
- op1->gtOper == GT_CLS_VAR ||
- op1->gtOper == GT_IND ||
- op1->gtOper == GT_LEA) && tree->TypeGet() == TYP_DOUBLE)
+ if ((op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_LCL_FLD || op1->gtOper == GT_CLS_VAR ||
+ op1->gtOper == GT_IND || op1->gtOper == GT_LEA) &&
+ tree->TypeGet() == TYP_DOUBLE)
{
// We take advantage here of the fact that we know that our
// codegen will have just loaded this from memory, and that
// therefore, no cast is really needed.
// Ideally we wouldn't do this optimization here, but in
// morphing. However, we need to do this after regalloc, as
- // this optimization doesnt apply if what we're loading is a
+ // this optimization doesnt apply if what we're loading is a
// regvar
}
else
@@ -2677,18 +2572,18 @@ void CodeGen::genCodeForTreeStackFP_Cast(GenTreePtr tree)
// Assign reg to tree
genMarkTreeInReg(tree, op1->gtRegNum);
-
+
break;
}
default:
{
assert(!"unsupported cast");
break;
- }
+ }
}
}
-void CodeGen::genCodeForTreeStackFP_Special(GenTreePtr tree)
+void CodeGen::genCodeForTreeStackFP_Special(GenTreePtr tree)
{
#ifdef DEBUG
if (compiler->verbose)
@@ -2701,23 +2596,22 @@ void CodeGen::genCodeForTreeStackFP_Special(GenTreePtr tree)
switch (tree->OperGet())
{
- case GT_CALL:
- {
- genCodeForCall(tree, true);
- break;
- }
- default:
- NYI("genCodeForTreeStackFP_Special");
- break;
+ case GT_CALL:
+ {
+ genCodeForCall(tree, true);
+ break;
+ }
+ default:
+ NYI("genCodeForTreeStackFP_Special");
+ break;
}
}
-void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
- RegSet::RegisterPreference *pref)
+void CodeGen::genCodeForTreeFloat(GenTreePtr tree, RegSet::RegisterPreference* pref)
{
// TestTransitions();
- genTreeOps oper;
- unsigned kind;
+ genTreeOps oper;
+ unsigned kind;
assert(tree);
assert(tree->gtOper != GT_STMT);
@@ -2727,7 +2621,7 @@ void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
oper = tree->OperGet();
kind = tree->OperKind();
- if (kind & GTK_CONST)
+ if (kind & GTK_CONST)
{
genCodeForTreeStackFP_Const(tree);
}
@@ -2751,22 +2645,20 @@ void CodeGen::genCodeForTreeFloat(GenTreePtr tree,
}
assert(compCurFPState.IsConsistent());
#endif
-
}
-
bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
{
// assume gensetupop done
-
+
bool bUseFcomip = genUse_fcomip();
- bool bReverse = false;
+ bool bReverse = false;
// Take op1 to top of the stack
FlatFPX87_MoveToTOS(&compCurFPState, tos->gtRegNum);
// We pop top of stack if it's not a live regvar
- bool bPopTos = !(tos->IsRegVar() && !tos->IsRegVarDeath()) || (tos->InReg());
+ bool bPopTos = !(tos->IsRegVar() && !tos->IsRegVarDeath()) || (tos->InReg());
bool bPopOther = !(other->IsRegVar() && !other->IsRegVarDeath()) || (other->InReg());
assert(tos->IsRegVar() || (tos->InReg()));
@@ -2779,8 +2671,8 @@ bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
if (bUseFcomip)
{
// We should have space for a load
- assert(compCurFPState.m_uStackSize < FP_PHYSICREGISTERS);
-
+ assert(compCurFPState.m_uStackSize < FP_PHYSICREGISTERS);
+
// load from mem, now the comparison will be the other way around
inst_FS_TT(INS_fld, other);
inst_FN(INS_fcomip, 1);
@@ -2788,15 +2680,15 @@ bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
// pop if we've been asked to do so
if (bPopTos)
{
- inst_FS(INS_fstp, 0);
+ inst_FS(INS_fstp, 0);
FlatFPX87_Kill(&compCurFPState, tos->gtRegNum);
}
-
+
bReverse = true;
}
else
{
- // compare directly with memory
+ // compare directly with memory
if (bPopTos)
{
inst_FS_TT(INS_fcomp, other);
@@ -2825,7 +2717,7 @@ bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
if (bPopOther)
{
FlatFPX87_Unload(&compCurFPState, other->gtRegNum);
- }
+ }
}
else
{
@@ -2842,18 +2734,18 @@ bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
if (bPopOther)
{
FlatFPX87_Unload(&compCurFPState, other->gtRegNum);
- }
+ }
}
}
if (!bUseFcomip)
{
// oops, we have to put result of compare in eflags
-
- // Grab EAX for the result of the fnstsw
+
+ // Grab EAX for the result of the fnstsw
regSet.rsGrabReg(RBM_EAX);
- // Generate the 'fnstsw' and test its result
+ // Generate the 'fnstsw' and test its result
inst_RV(INS_fnstsw, REG_EAX, TYP_INT);
regTracker.rsTrackRegTrash(REG_EAX);
instGen(INS_sahf);
@@ -2862,37 +2754,35 @@ bool CodeGen::genCompInsStackFP(GenTreePtr tos, GenTreePtr other)
return bReverse;
}
-void CodeGen::genCondJumpFltStackFP(GenTreePtr cond, BasicBlock * jumpTrue, BasicBlock * jumpFalse, bool bDoTransition)
+void CodeGen::genCondJumpFltStackFP(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse, bool bDoTransition)
{
assert(jumpTrue && jumpFalse);
assert(!(cond->gtFlags & GTF_REVERSE_OPS)); // Done in genCondJump()
assert(varTypeIsFloating(cond->gtOp.gtOp1));
- GenTreePtr op1 = cond->gtOp.gtOp1;
- GenTreePtr op2 = cond->gtOp.gtOp2;
- genTreeOps cmp = cond->OperGet();
+ GenTreePtr op1 = cond->gtOp.gtOp1;
+ GenTreePtr op2 = cond->gtOp.gtOp2;
+ genTreeOps cmp = cond->OperGet();
-
- // Prepare operands.
- genSetupForOpStackFP(op1, op2, false, false, true, false);
+ // Prepare operands.
+ genSetupForOpStackFP(op1, op2, false, false, true, false);
GenTreePtr tos;
- GenTreePtr other;
- bool bReverseCmp = false;
+ GenTreePtr other;
+ bool bReverseCmp = false;
-
- if ( (op2->IsRegVar() || (op2->InReg())) && // op2 is in a reg
- (compCurFPState.TopVirtual() == (unsigned) op2->gtRegNum && // Is it already at the top of the stack?
- (!op2->IsRegVar() || op2->IsRegVarDeath()))) // are we going to pop it off?
+ if ((op2->IsRegVar() || (op2->InReg())) && // op2 is in a reg
+ (compCurFPState.TopVirtual() == (unsigned)op2->gtRegNum && // Is it already at the top of the stack?
+ (!op2->IsRegVar() || op2->IsRegVarDeath()))) // are we going to pop it off?
{
- tos = op2;
- other = op1;
+ tos = op2;
+ other = op1;
bReverseCmp = true;
}
else
{
- tos = op1;
- other = op2;
+ tos = op1;
+ other = op2;
bReverseCmp = false;
}
@@ -2900,7 +2790,7 @@ void CodeGen::genCondJumpFltStackFP(GenTreePtr cond, BasicBlock * jumpTrue
{
bReverseCmp = !bReverseCmp;
}
-
+
// do .un comparison
if (cond->gtFlags & GTF_RELOP_NAN_UN)
{
@@ -2909,22 +2799,20 @@ void CodeGen::genCondJumpFltStackFP(GenTreePtr cond, BasicBlock * jumpTrue
}
else
{
- jumpFalse->bbFlags |= BBF_JMP_TARGET|BBF_HAS_LABEL;
+ jumpFalse->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
- // Generate the first jump (NaN check)
+ // Generate the first jump (NaN check)
genCondJmpInsStackFP(EJ_jpe, jumpFalse, NULL, bDoTransition);
}
/* Generate the second jump (comparison) */
- const static
- BYTE dblCmpTstJmp2[] =
- {
- EJ_je , // GT_EQ
- EJ_jne , // GT_NE
- EJ_jb , // GT_LT
- EJ_jbe , // GT_LE
- EJ_jae , // GT_GE
- EJ_ja , // GT_GT
+ const static BYTE dblCmpTstJmp2[] = {
+ EJ_je, // GT_EQ
+ EJ_jne, // GT_NE
+ EJ_jb, // GT_LT
+ EJ_jbe, // GT_LE
+ EJ_jae, // GT_GE
+ EJ_ja, // GT_GT
};
// Swap comp order if necessary
@@ -2933,53 +2821,48 @@ void CodeGen::genCondJumpFltStackFP(GenTreePtr cond, BasicBlock * jumpTrue
cmp = GenTree::SwapRelop(cmp);
}
- genCondJmpInsStackFP((emitJumpKind)dblCmpTstJmp2[cmp - GT_EQ],
- jumpTrue,
- jumpFalse,
- bDoTransition);
+ genCondJmpInsStackFP((emitJumpKind)dblCmpTstJmp2[cmp - GT_EQ], jumpTrue, jumpFalse, bDoTransition);
}
BasicBlock* CodeGen::genTransitionBlockStackFP(FlatFPStateX87* pState, BasicBlock* pFrom, BasicBlock* pTarget)
{
// Fast paths where a transition block is not necessary
- if (pTarget->bbFPStateX87 && FlatFPStateX87::AreEqual(pState, pTarget->bbFPStateX87) ||
- pState->IsEmpty())
+ if (pTarget->bbFPStateX87 && FlatFPStateX87::AreEqual(pState, pTarget->bbFPStateX87) || pState->IsEmpty())
{
return pTarget;
}
-
- // We shouldn't have any handlers if we're generating transition blocks, as we don't know
+
+ // We shouldn't have any handlers if we're generating transition blocks, as we don't know
// how to recover them
assert(compiler->compMayHaveTransitionBlocks);
assert(compiler->compHndBBtabCount == 0);
-
- #ifdef DEBUG
+
+#ifdef DEBUG
compiler->fgSafeBasicBlockCreation = true;
- #endif
-
+#endif
+
// Create a temp block
BasicBlock* pBlock = compiler->bbNewBasicBlock(BBJ_ALWAYS);
- #ifdef DEBUG
+#ifdef DEBUG
compiler->fgSafeBasicBlockCreation = false;
- #endif
+#endif
VarSetOps::Assign(compiler, pBlock->bbLiveIn, pFrom->bbLiveOut);
VarSetOps::Assign(compiler, pBlock->bbLiveOut, pFrom->bbLiveOut);
pBlock->bbJumpDest = pTarget;
- pBlock->bbFlags |= BBF_JMP_TARGET;
+ pBlock->bbFlags |= BBF_JMP_TARGET;
//
- // If either pFrom or pTarget are cold blocks then
+ // If either pFrom or pTarget are cold blocks then
// the transition block also must be cold
//
- pBlock->bbFlags |= (pFrom->bbFlags & BBF_COLD);
- pBlock->bbFlags |= (pTarget->bbFlags & BBF_COLD);
+ pBlock->bbFlags |= (pFrom->bbFlags & BBF_COLD);
+ pBlock->bbFlags |= (pTarget->bbFlags & BBF_COLD);
// The FP state for the block is the same as the current one
pBlock->bbFPStateX87 = FlatFPAllocFPState(pState);
-
if ((pBlock->bbFlags & BBF_COLD) || (compiler->fgFirstColdBlock == NULL))
{
//
@@ -3009,14 +2892,12 @@ BasicBlock* CodeGen::genTransitionBlockStackFP(FlatFPStateX87* pState, BasicBloc
return pBlock;
}
-void CodeGen::genCondJumpLngStackFP(GenTreePtr cond,
- BasicBlock* jumpTrue,
- BasicBlock* jumpFalse)
+void CodeGen::genCondJumpLngStackFP(GenTreePtr cond, BasicBlock* jumpTrue, BasicBlock* jumpFalse)
{
// For the moment, and so we don't have to deal with the amount of special cases
- // we have, will insert a dummy block for jumpTrue (if necessary) that will do the
+ // we have, will insert a dummy block for jumpTrue (if necessary) that will do the
// transition for us. For the jumpFalse case, we play a trick. For the false case ,
- // a Long conditional has a fallthrough (least significant DWORD check is false) and
+ // a Long conditional has a fallthrough (least significant DWORD check is false) and
// also has a jump to the fallthrough (bbNext) if the most significant DWORD check
// fails. However, we do want to make an FP transition if we're in the later case,
// So what we do is create a label and make jumpFalse go there. This label is defined
@@ -3024,8 +2905,8 @@ void CodeGen::genCondJumpLngStackFP(GenTreePtr cond,
// for false condition will go through the transition and then fall through to bbnext.
assert(jumpFalse == compiler->compCurBB->bbNext);
- BasicBlock* pTransition = genCreateTempLabel();
-
+ BasicBlock* pTransition = genCreateTempLabel();
+
genCondJumpLng(cond, jumpTrue, pTransition, true);
genDefineTempLabel(pTransition);
@@ -3036,30 +2917,30 @@ void CodeGen::genQMarkRegVarTransition(GenTreePtr nextNode, VARSET_VALARG_TP liv
// Kill any vars that may die in the transition
VARSET_TP VARSET_INIT_NOCOPY(newLiveSet, VarSetOps::Intersection(compiler, liveset, compiler->optAllFPregVars));
- regMaskTP liveRegIn = genRegMaskFromLivenessStackFP(newLiveSet );
+ regMaskTP liveRegIn = genRegMaskFromLivenessStackFP(newLiveSet);
genCodeForTransitionFromMask(&compCurFPState, liveRegIn);
unsigned i;
// Kill all regvars
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if ((genRegMaskFloat((regNumber) i) & regSet.rsMaskRegVarFloat) )
+ if ((genRegMaskFloat((regNumber)i) & regSet.rsMaskRegVarFloat))
{
-
- genRegVarDeathStackFP(regSet.genRegVarsFloat[i]);
+
+ genRegVarDeathStackFP(regSet.genRegVarsFloat[i]);
}
}
// Born necessary regvars
- for (i = 0 ; i < compiler->lvaTrackedCount ; i++)
+ for (i = 0; i < compiler->lvaTrackedCount; i++)
{
- unsigned lclVar = compiler->lvaTrackedToVarNum[i];
- LclVarDsc * varDsc = compiler->lvaTable+lclVar;
+ unsigned lclVar = compiler->lvaTrackedToVarNum[i];
+ LclVarDsc* varDsc = compiler->lvaTable + lclVar;
assert(varDsc->lvTracked);
- if (varDsc->lvRegister && VarSetOps::IsMember(compiler, newLiveSet, i))
+ if (varDsc->lvRegister && VarSetOps::IsMember(compiler, newLiveSet, i))
{
genRegVarBirthStackFP(varDsc);
}
@@ -3069,10 +2950,9 @@ void CodeGen::genQMarkRegVarTransition(GenTreePtr nextNode, VARSET_VALARG_TP liv
void CodeGen::genQMarkBeforeElseStackFP(QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode)
{
assert(regSet.rsMaskLockedFloat == 0);
-
+
// Save current state at colon
pState->stackState.Init(&compCurFPState);
-
// Kill any vars that may die in the transition to then
genQMarkRegVarTransition(nextNode, varsetCond);
@@ -3081,26 +2961,25 @@ void CodeGen::genQMarkBeforeElseStackFP(QmarkStateStackFP* pState, VARSET_VALARG
void CodeGen::genQMarkAfterElseBlockStackFP(QmarkStateStackFP* pState, VARSET_VALARG_TP varsetCond, GenTreePtr nextNode)
{
assert(regSet.rsMaskLockedFloat == 0);
-
+
FlatFPStateX87 tempSwap;
// Save current state. Now tempFPState will store the target state for the else block
tempSwap.Init(&compCurFPState);
-
+
compCurFPState.Init(&pState->stackState);
-
+
pState->stackState.Init(&tempSwap);
// Did any regvars die in the then block that are live on entry to the else block?
unsigned i;
- for (i = 0 ; i < compiler->lvaTrackedCount ; i++)
+ for (i = 0; i < compiler->lvaTrackedCount; i++)
{
- if (VarSetOps::IsMember(compiler, varsetCond, i) &&
- VarSetOps::IsMember(compiler, compiler->optAllFPregVars, i))
+ if (VarSetOps::IsMember(compiler, varsetCond, i) && VarSetOps::IsMember(compiler, compiler->optAllFPregVars, i))
{
// This variable should be live
- unsigned lclnum = compiler->lvaTrackedToVarNum[i];
- LclVarDsc * varDsc = compiler->lvaTable + lclnum;
+ unsigned lclnum = compiler->lvaTrackedToVarNum[i];
+ LclVarDsc* varDsc = compiler->lvaTable + lclnum;
if (regSet.genRegVarsFloat[varDsc->lvRegNum] != varDsc)
{
@@ -3108,13 +2987,13 @@ void CodeGen::genQMarkAfterElseBlockStackFP(QmarkStateStackFP* pState, VARSET_VA
if (regSet.genRegVarsFloat[varDsc->lvRegNum])
{
genRegVarDeathStackFP(regSet.genRegVarsFloat[varDsc->lvRegNum]);
- }
+ }
genRegVarBirthStackFP(varDsc);
}
}
}
-
+
// Kill any vars that may die in the transition
genQMarkRegVarTransition(nextNode, varsetCond);
}
@@ -3123,7 +3002,7 @@ void CodeGen::genQMarkAfterThenBlockStackFP(QmarkStateStackFP* pState)
{
JITDUMP("genQMarkAfterThenBlockStackFP()\n");
assert(regSet.rsMaskLockedFloat == 0);
-
+
// Generate transition to the previous one set by the then block
genCodeForTransitionStackFP(&compCurFPState, &pState->stackState);
@@ -3131,9 +3010,8 @@ void CodeGen::genQMarkAfterThenBlockStackFP(QmarkStateStackFP* pState)
compCurFPState.Init(&pState->stackState);
}
-
void CodeGenInterface::SetRegVarFloat(regNumber reg, var_types type, LclVarDsc* varDsc)
-{
+{
regMaskTP mask = genRegMaskFloat(reg, type);
if (varDsc)
@@ -3157,31 +3035,27 @@ void CodeGenInterface::SetRegVarFloat(regNumber reg, var_types type, LclVarDsc*
regSet.genRegVarsFloat[reg] = varDsc;
}
-
// Generates a conditional jump. It will do the appropiate stack matching for the jmpTrue.
-// We don't use jumpFalse anywhere and the integer codebase assumes that it will be bbnext, and that is
+// We don't use jumpFalse anywhere and the integer codebase assumes that it will be bbnext, and that is
// taken care of at the end of the bb code generation.
void CodeGen::genCondJmpInsStackFP(emitJumpKind jumpKind,
- BasicBlock * jumpTrue,
- BasicBlock * jumpFalse,
- bool bDoTransition)
+ BasicBlock* jumpTrue,
+ BasicBlock* jumpFalse,
+ bool bDoTransition)
{
// Assert the condition above.
assert(!jumpFalse || jumpFalse == compiler->compCurBB->bbNext || !bDoTransition);
- // Do the fp stack matching.
- if (bDoTransition &&
- !jumpTrue->bbFPStateX87 &&
- FlatFPSameRegisters(&compCurFPState,
- genRegMaskFromLivenessStackFP(jumpTrue->bbLiveIn)))
+ // Do the fp stack matching.
+ if (bDoTransition && !jumpTrue->bbFPStateX87 &&
+ FlatFPSameRegisters(&compCurFPState, genRegMaskFromLivenessStackFP(jumpTrue->bbLiveIn)))
{
// Target block doesn't have state yet, but has the same registers, so
// we allocate the block and generate the normal jump
genCodeForBBTransitionStackFP(jumpTrue);
inst_JMP(jumpKind, jumpTrue);
}
- else if (!bDoTransition ||
- compCurFPState.IsEmpty() || // If it's empty, target has to be empty too.
+ else if (!bDoTransition || compCurFPState.IsEmpty() || // If it's empty, target has to be empty too.
(jumpTrue->bbFPStateX87 && FlatFPStateX87::AreEqual(&compCurFPState, jumpTrue->bbFPStateX87)))
{
// Nothing to do here. Proceed normally and generate the jump
@@ -3198,9 +3072,9 @@ void CodeGen::genCondJmpInsStackFP(emitJumpKind jumpKind,
// do a forward conditional jump, generate the transition and jump to the target
// The payload is an aditional jump instruction, but both jumps will be correctly
// predicted by the processor in the loop case.
- BasicBlock* endLabel = NULL;
+ BasicBlock* endLabel = NULL;
- endLabel = genCreateTempLabel();
+ endLabel = genCreateTempLabel();
inst_JMP(emitter::emitReverseJumpKind(jumpKind), endLabel);
@@ -3212,17 +3086,14 @@ void CodeGen::genCondJmpInsStackFP(emitJumpKind jumpKind,
}
}
-
-void CodeGen::genTableSwitchStackFP(regNumber reg,
- unsigned jumpCnt,
- BasicBlock ** jumpTab)
+void CodeGen::genTableSwitchStackFP(regNumber reg, unsigned jumpCnt, BasicBlock** jumpTab)
{
// Only come here when we have to do something special for the FPU stack!
//
assert(!compCurFPState.IsEmpty());
VARSET_TP VARSET_INIT_NOCOPY(liveInFP, VarSetOps::MakeEmpty(compiler));
VARSET_TP VARSET_INIT_NOCOPY(liveOutFP, VarSetOps::MakeEmpty(compiler));
- for (unsigned i = 0 ; i < jumpCnt ; i++)
+ for (unsigned i = 0; i < jumpCnt; i++)
{
VarSetOps::Assign(compiler, liveInFP, jumpTab[i]->bbLiveIn);
VarSetOps::IntersectionD(compiler, liveInFP, compiler->optAllFPregVars);
@@ -3232,10 +3103,9 @@ void CodeGen::genTableSwitchStackFP(regNumber reg,
if (!jumpTab[i]->bbFPStateX87 && VarSetOps::Equal(compiler, liveInFP, liveOutFP))
{
// Hasn't state yet and regvar set is the same, so just copy state and don't change the jump
- jumpTab[i]->bbFPStateX87 = FlatFPAllocFPState(&compCurFPState);
+ jumpTab[i]->bbFPStateX87 = FlatFPAllocFPState(&compCurFPState);
}
- else if (jumpTab[i]->bbFPStateX87 &&
- FlatFPStateX87::AreEqual(&compCurFPState, jumpTab[i]->bbFPStateX87))
+ else if (jumpTab[i]->bbFPStateX87 && FlatFPStateX87::AreEqual(&compCurFPState, jumpTab[i]->bbFPStateX87))
{
// Same state, don't change the jump
}
@@ -3243,11 +3113,11 @@ void CodeGen::genTableSwitchStackFP(regNumber reg,
{
// We have to do a transition. First check if we can reuse another one
unsigned j;
- for (j = 0 ; j < i ; j++)
+ for (j = 0; j < i; j++)
{
// Has to be already forwarded. If not it can't be targetting the same block
if (jumpTab[j]->bbFlags & BBF_FORWARD_SWITCH)
- {
+ {
if (jumpTab[i] == jumpTab[j]->bbJumpDest)
{
// yipee, we can reuse this transition block
@@ -3260,16 +3130,15 @@ void CodeGen::genTableSwitchStackFP(regNumber reg,
if (j == i)
{
// We will have to create a new transition block
- jumpTab[i] =
- genTransitionBlockStackFP(&compCurFPState, compiler->compCurBB, jumpTab[i]);
-
+ jumpTab[i] = genTransitionBlockStackFP(&compCurFPState, compiler->compCurBB, jumpTab[i]);
+
jumpTab[i]->bbFlags |= BBF_FORWARD_SWITCH;
}
- }
- }
+ }
+ }
// Clear flag
- for (unsigned i = 0 ; i < jumpCnt ; i++)
+ for (unsigned i = 0; i < jumpCnt; i++)
{
jumpTab[i]->bbFlags &= ~BBF_FORWARD_SWITCH;
}
@@ -3277,93 +3146,91 @@ void CodeGen::genTableSwitchStackFP(regNumber reg,
// everything's fixed now, so go down the normal path
return genTableSwitch(reg, jumpCnt, jumpTab);
}
-
+
bool CodeGen::genConstantLoadStackFP(GenTreePtr tree, bool bOnlyNoMemAccess)
{
assert(tree->gtOper == GT_CNS_DBL);
- bool bFastConstant = false;
+ bool bFastConstant = false;
instruction ins_ConstantNN = INS_fldz; // keep compiler happy
-
// Both positive 0 and 1 are represnetable in float and double, beware if we add other constants
- switch (*((__int64 *)&(tree->gtDblCon.gtDconVal)))
- {
- case 0:
- // CAREFUL here!, -0 is different than +0, a -0 shouldn't issue a fldz.
- ins_ConstantNN = INS_fldz;
- bFastConstant = true;
- break;
- case I64(0x3ff0000000000000):
- ins_ConstantNN = INS_fld1;
- bFastConstant = true;
- }
-
+ switch (*((__int64*)&(tree->gtDblCon.gtDconVal)))
+ {
+ case 0:
+ // CAREFUL here!, -0 is different than +0, a -0 shouldn't issue a fldz.
+ ins_ConstantNN = INS_fldz;
+ bFastConstant = true;
+ break;
+ case I64(0x3ff0000000000000):
+ ins_ConstantNN = INS_fld1;
+ bFastConstant = true;
+ }
+
if (bFastConstant == false && bOnlyNoMemAccess)
- {
+ {
// Caller asked only to generate instructions if it didn't involve memory accesses
- return false;
+ return false;
}
-
+
if (bFastConstant)
{
assert(compCurFPState.m_uStackSize <= FP_PHYSICREGISTERS);
instGen(ins_ConstantNN);
}
- else
- {
+ else
+ {
GenTreePtr addr;
- if (tree->gtType == TYP_FLOAT || StackFPIsSameAsFloat(tree->gtDblCon.gtDconVal))
+ if (tree->gtType == TYP_FLOAT || StackFPIsSameAsFloat(tree->gtDblCon.gtDconVal))
{
float f = forceCastToFloat(tree->gtDblCon.gtDconVal);
- addr = genMakeConst(&f, TYP_FLOAT, tree, false);
+ addr = genMakeConst(&f, TYP_FLOAT, tree, false);
}
else
{
addr = genMakeConst(&tree->gtDblCon.gtDconVal, tree->gtType, tree, true);
}
-
- inst_FS_TT(INS_fld, addr);
+
+ inst_FS_TT(INS_fld, addr);
}
return true;
}
-// Function called at the end of every statement. For stack based x87 its mission is to
+// Function called at the end of every statement. For stack based x87 its mission is to
// remove any remaining temps on the stack.
void CodeGen::genEndOfStatement()
{
unsigned i;
- #ifdef DEBUG
+#ifdef DEBUG
// Sanity check
unsigned uTemps = 0;
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (compCurFPState.Mapped(i) && // register is mapped
- (genRegMaskFloat((regNumber) i) & regSet.rsMaskRegVarFloat) == 0) // but not enregistered
+ if (compCurFPState.Mapped(i) && // register is mapped
+ (genRegMaskFloat((regNumber)i) & regSet.rsMaskRegVarFloat) == 0) // but not enregistered
{
uTemps++;
- }
+ }
}
assert(uTemps <= 1);
- #endif
+#endif
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (compCurFPState.Mapped(i) && // register is mapped
- (genRegMaskFloat((regNumber) i) & regSet.rsMaskRegVarFloat) == 0) // but not enregistered
+ if (compCurFPState.Mapped(i) && // register is mapped
+ (genRegMaskFloat((regNumber)i) & regSet.rsMaskRegVarFloat) == 0) // but not enregistered
{
// remove register from stacks
FlatFPX87_Unload(&compCurFPState, i);
- }
+ }
}
assert(ConsistentAfterStatementStackFP());
}
-
-bool CodeGen::StackFPIsSameAsFloat(double d)
+bool CodeGen::StackFPIsSameAsFloat(double d)
{
if (forceCastToFloat(d) == d)
{
@@ -3374,114 +3241,105 @@ bool CodeGen::StackFPIsSameAsFloat(double d)
{
JITDUMP("StackFPIsSameAsFloat is false for value %lf\n", d);
}
-
+
return false;
}
-GenTreePtr CodeGen::genMakeAddressableStackFP (GenTreePtr tree, regMaskTP * regMaskIntPtr, regMaskTP * regMaskFltPtr, bool bCollapseConstantDoubles)
-{
+GenTreePtr CodeGen::genMakeAddressableStackFP(GenTreePtr tree,
+ regMaskTP* regMaskIntPtr,
+ regMaskTP* regMaskFltPtr,
+ bool bCollapseConstantDoubles)
+{
*regMaskIntPtr = *regMaskFltPtr = 0;
switch (tree->OperGet())
- {
- case GT_CNS_DBL:
- if (tree->gtDblCon.gtDconVal == 0.0 ||
- tree->gtDblCon.gtDconVal == 1.0)
- {
- // For constants like 0 or 1 don't waste memory
- genCodeForTree(tree, 0);
- regSet.SetUsedRegFloat(tree, true);
-
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
- return tree;
- }
- else
- {
- GenTreePtr addr;
- if (tree->gtType == TYP_FLOAT || (bCollapseConstantDoubles && StackFPIsSameAsFloat(tree->gtDblCon.gtDconVal)))
+ {
+ case GT_CNS_DBL:
+ if (tree->gtDblCon.gtDconVal == 0.0 || tree->gtDblCon.gtDconVal == 1.0)
{
- float f = forceCastToFloat(tree->gtDblCon.gtDconVal);
- addr = genMakeConst(&f, TYP_FLOAT, tree, true);
+ // For constants like 0 or 1 don't waste memory
+ genCodeForTree(tree, 0);
+ regSet.SetUsedRegFloat(tree, true);
+
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
+ return tree;
}
else
{
- addr = genMakeConst(&tree->gtDblCon.gtDconVal, tree->gtType, tree, true);
- }
+ GenTreePtr addr;
+ if (tree->gtType == TYP_FLOAT ||
+ (bCollapseConstantDoubles && StackFPIsSameAsFloat(tree->gtDblCon.gtDconVal)))
+ {
+ float f = forceCastToFloat(tree->gtDblCon.gtDconVal);
+ addr = genMakeConst(&f, TYP_FLOAT, tree, true);
+ }
+ else
+ {
+ addr = genMakeConst(&tree->gtDblCon.gtDconVal, tree->gtType, tree, true);
+ }
#ifdef DEBUG
- if (compiler->verbose)
- {
- printf("Generated new constant in tree ");
- Compiler::printTreeID(addr);
- printf(" with value %lf\n", tree->gtDblCon.gtDconVal);
- }
+ if (compiler->verbose)
+ {
+ printf("Generated new constant in tree ");
+ Compiler::printTreeID(addr);
+ printf(" with value %lf\n", tree->gtDblCon.gtDconVal);
+ }
#endif // DEBUG
- tree->CopyFrom(addr, compiler);
+ tree->CopyFrom(addr, compiler);
+ return tree;
+ }
+ break;
+ case GT_REG_VAR:
+ // We take care about this in genKeepAddressableStackFP
return tree;
- }
- break;
- case GT_REG_VAR:
- // We take care about this in genKeepAddressableStackFP
- return tree;
- case GT_LCL_VAR:
- case GT_LCL_FLD:
- case GT_CLS_VAR:
- return tree;
-
- case GT_LEA:
- if (!genMakeIndAddrMode(tree,
- tree,
- false,
- 0,
- RegSet::KEEP_REG,
- regMaskIntPtr,
- false))
- {
- assert (false);
- }
- genUpdateLife(tree);
- return tree;
-
- case GT_IND:
- // Try to make the address directly addressable
-
- if (genMakeIndAddrMode(tree->gtOp.gtOp1,
- tree,
- false,
- 0,
- RegSet::KEEP_REG,
- regMaskIntPtr,
- false))
- {
+ case GT_LCL_VAR:
+ case GT_LCL_FLD:
+ case GT_CLS_VAR:
+ return tree;
+
+ case GT_LEA:
+ if (!genMakeIndAddrMode(tree, tree, false, 0, RegSet::KEEP_REG, regMaskIntPtr, false))
+ {
+ assert(false);
+ }
genUpdateLife(tree);
return tree;
- }
- else
- {
- GenTreePtr addr = tree;
- tree = tree->gtOp.gtOp1;
-
- genCodeForTree(tree, 0);
- regSet.rsMarkRegUsed(tree, addr);
-
- *regMaskIntPtr = genRegMask(tree->gtRegNum);
- return addr;
- }
+
+ case GT_IND:
+ // Try to make the address directly addressable
+
+ if (genMakeIndAddrMode(tree->gtOp.gtOp1, tree, false, 0, RegSet::KEEP_REG, regMaskIntPtr, false))
+ {
+ genUpdateLife(tree);
+ return tree;
+ }
+ else
+ {
+ GenTreePtr addr = tree;
+ tree = tree->gtOp.gtOp1;
+
+ genCodeForTree(tree, 0);
+ regSet.rsMarkRegUsed(tree, addr);
+
+ *regMaskIntPtr = genRegMask(tree->gtRegNum);
+ return addr;
+ }
// fall through
- default:
- genCodeForTreeFloat(tree);
- regSet.SetUsedRegFloat(tree, true);
+ default:
+ genCodeForTreeFloat(tree);
+ regSet.SetUsedRegFloat(tree, true);
- // update mask
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
+ // update mask
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
- return tree;
- break;
- }
+ return tree;
+ break;
+ }
}
-void CodeGen::genKeepAddressableStackFP(GenTreePtr tree, regMaskTP * regMaskIntPtr, regMaskTP * regMaskFltPtr)
+void CodeGen::genKeepAddressableStackFP(GenTreePtr tree, regMaskTP* regMaskIntPtr, regMaskTP* regMaskFltPtr)
{
regMaskTP regMaskInt, regMaskFlt;
@@ -3489,66 +3347,68 @@ void CodeGen::genKeepAddressableStackFP(GenTreePtr tree, regMaskTP * regMaskInt
regMaskFlt = *regMaskFltPtr;
*regMaskIntPtr = *regMaskFltPtr = 0;
-
+
switch (tree->OperGet())
{
- case GT_REG_VAR:
- // If register has been spilled, unspill it
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
- }
+ case GT_REG_VAR:
+ // If register has been spilled, unspill it
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
+ }
- // If regvar is dying, take it out of the regvar mask
- if (tree->IsRegVarDeath())
- {
- genRegVarDeathStackFP(tree);
- }
- genUpdateLife(tree);
-
- return ;
- case GT_CNS_DBL:
- {
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(tree);
- }
+ // If regvar is dying, take it out of the regvar mask
+ if (tree->IsRegVarDeath())
+ {
+ genRegVarDeathStackFP(tree);
+ }
+ genUpdateLife(tree);
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
-
- return;
- }
- case GT_LCL_FLD:
- case GT_LCL_VAR:
- case GT_CLS_VAR:
- genUpdateLife(tree);
- return;
- case GT_IND:
- case GT_LEA:
- if (regMaskFlt)
+ return;
+ case GT_CNS_DBL:
{
- // fall through
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(tree);
+ }
+
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
+
+ return;
}
- else
- {
- *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
- *regMaskFltPtr = 0;
+ case GT_LCL_FLD:
+ case GT_LCL_VAR:
+ case GT_CLS_VAR:
+ genUpdateLife(tree);
return;
- }
- default:
+ case GT_IND:
+ case GT_LEA:
+ if (regMaskFlt)
+ {
+ // fall through
+ }
+ else
+ {
+ *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
+ *regMaskFltPtr = 0;
+ return;
+ }
+ default:
- *regMaskIntPtr = 0;
- if (tree->gtFlags & GTF_SPILLED)
- {
- UnspillFloat(tree);
- }
- *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
- return;
- }
+ *regMaskIntPtr = 0;
+ if (tree->gtFlags & GTF_SPILLED)
+ {
+ UnspillFloat(tree);
+ }
+ *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum);
+ return;
+ }
}
-
-void CodeGen::genDoneAddressableStackFP(GenTreePtr tree, regMaskTP addrRegInt, regMaskTP addrRegFlt, RegSet::KeepReg keptReg)
+void CodeGen::genDoneAddressableStackFP(GenTreePtr tree,
+ regMaskTP addrRegInt,
+ regMaskTP addrRegFlt,
+ RegSet::KeepReg keptReg)
{
assert(!(addrRegInt && addrRegFlt));
@@ -3557,57 +3417,57 @@ void CodeGen::genDoneAddressableStackFP(GenTreePtr tree, regMaskTP addrRegInt, r
return genDoneAddressable(tree, addrRegInt, keptReg);
}
else if (addrRegFlt)
- {
+ {
if (keptReg == RegSet::KEEP_REG)
{
- for (unsigned i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (unsigned i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (genRegMaskFloat((regNumber) i) & addrRegFlt)
+ if (genRegMaskFloat((regNumber)i) & addrRegFlt)
{
regSet.SetUsedRegFloat(tree, false);
}
}
- }
+ }
}
}
-void CodeGen::FlatFPX87_Kill (FlatFPStateX87* pState, unsigned uVirtual)
+void CodeGen::FlatFPX87_Kill(FlatFPStateX87* pState, unsigned uVirtual)
{
- JITDUMP("Killing %s\n", regVarNameStackFP((regNumber) uVirtual));
+ JITDUMP("Killing %s\n", regVarNameStackFP((regNumber)uVirtual));
assert(pState->TopVirtual() == uVirtual);
pState->Pop();
}
-void CodeGen::FlatFPX87_PushVirtual (FlatFPStateX87* pState, unsigned uRegister, bool bEmitCode)
+void CodeGen::FlatFPX87_PushVirtual(FlatFPStateX87* pState, unsigned uRegister, bool bEmitCode)
{
- JITDUMP("Pushing %s to stack\n", regVarNameStackFP((regNumber) uRegister));
+ JITDUMP("Pushing %s to stack\n", regVarNameStackFP((regNumber)uRegister));
pState->Push(uRegister);
}
-unsigned CodeGen::FlatFPX87_Pop (FlatFPStateX87* pState, bool bEmitCode)
+unsigned CodeGen::FlatFPX87_Pop(FlatFPStateX87* pState, bool bEmitCode)
{
assert(pState->m_uStackSize > 0);
// Update state
unsigned uVirtual = pState->Pop();
-
+
// Emit instruction
if (bEmitCode)
{
inst_FS(INS_fstp, 0);
}
-
+
return (uVirtual);
}
-unsigned CodeGen::FlatFPX87_Top (FlatFPStateX87* pState, bool bEmitCode)
+unsigned CodeGen::FlatFPX87_Top(FlatFPStateX87* pState, bool bEmitCode)
{
return pState->TopVirtual();
}
-void CodeGen::FlatFPX87_Unload(FlatFPStateX87* pState, unsigned uVirtual, bool bEmitCode)
+void CodeGen::FlatFPX87_Unload(FlatFPStateX87* pState, unsigned uVirtual, bool bEmitCode)
{
if (uVirtual != pState->TopVirtual())
{
@@ -3621,16 +3481,15 @@ void CodeGen::FlatFPX87_Unload(FlatFPStateX87* pState, unsigned uVirt
pState->Associate(pState->TopVirtual(), uStack);
pState->m_uStackSize--;
-
- #ifdef DEBUG
+#ifdef DEBUG
pState->m_uStack[pState->m_uStackSize] = (unsigned)-1;
- #endif
-
+#endif
+
// Emit instruction
if (bEmitCode)
{
- inst_FS(INS_fstp, uPhysic);
+ inst_FS(INS_fstp, uPhysic);
}
}
else
@@ -3642,11 +3501,11 @@ void CodeGen::FlatFPX87_Unload(FlatFPStateX87* pState, unsigned uVirt
assert(pState->IsConsistent());
}
-void CodeGenInterface::FlatFPX87_MoveToTOS (FlatFPStateX87* pState, unsigned uVirtual, bool bEmitCode)
+void CodeGenInterface::FlatFPX87_MoveToTOS(FlatFPStateX87* pState, unsigned uVirtual, bool bEmitCode)
{
assert(!IsUninitialized(uVirtual));
-
- JITDUMP("Moving %s to top of stack\n", regVarNameStackFP((regNumber) uVirtual));
+
+ JITDUMP("Moving %s to top of stack\n", regVarNameStackFP((regNumber)uVirtual));
if (uVirtual != pState->TopVirtual())
{
@@ -3654,19 +3513,18 @@ void CodeGenInterface::FlatFPX87_MoveToTOS (FlatFPStateX87* pState
}
else
{
- JITDUMP("%s already on the top of stack\n", regVarNameStackFP((regNumber) uVirtual));
+ JITDUMP("%s already on the top of stack\n", regVarNameStackFP((regNumber)uVirtual));
}
assert(pState->IsConsistent());
}
-
-void CodeGenInterface::FlatFPX87_SwapStack (FlatFPStateX87* pState, unsigned i, unsigned j, bool bEmitCode)
+void CodeGenInterface::FlatFPX87_SwapStack(FlatFPStateX87* pState, unsigned i, unsigned j, bool bEmitCode)
{
assert(i != j);
assert(i < pState->m_uStackSize);
assert(j < pState->m_uStackSize);
-
+
JITDUMP("Exchanging ST(%i) and ST(%i)\n", pState->StackToST(i), pState->StackToST(j));
// issue actual swaps
@@ -3677,8 +3535,8 @@ void CodeGenInterface::FlatFPX87_SwapStack (FlatFPStateX87* pSt
{
if (iPhysic == 0 || jPhysic == 0)
{
- inst_FN(INS_fxch, iPhysic?iPhysic:jPhysic);
- }
+ inst_FN(INS_fxch, iPhysic ? iPhysic : jPhysic);
+ }
else
{
inst_FN(INS_fxch, iPhysic);
@@ -3695,7 +3553,7 @@ void CodeGenInterface::FlatFPX87_SwapStack (FlatFPStateX87* pSt
// Swap stack
int temp;
- temp = pState->m_uStack[i];
+ temp = pState->m_uStack[i];
pState->m_uStack[i] = pState->m_uStack[j];
pState->m_uStack[j] = temp;
@@ -3712,9 +3570,9 @@ void CodeGen::JitDumpFPState()
{
printf("FPSTATE\n");
printf("Used virtual registers: ");
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (genRegMaskFloat((regNumber) i) & regSet.rsMaskUsedFloat)
+ if (genRegMaskFloat((regNumber)i) & regSet.rsMaskUsedFloat)
{
printf("FPV%i ", i);
}
@@ -3722,9 +3580,9 @@ void CodeGen::JitDumpFPState()
printf("\n");
printf("virtual registers holding reg vars: ");
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (genRegMaskFloat((regNumber) i) & regSet.rsMaskRegVarFloat)
+ if (genRegMaskFloat((regNumber)i) & regSet.rsMaskRegVarFloat)
{
printf("FPV%i ", i);
}
@@ -3735,7 +3593,7 @@ void CodeGen::JitDumpFPState()
}
#endif
-//
+//
//
// Register allocation
//
@@ -3745,45 +3603,42 @@ struct ChangeToRegVarCallback
regNumber reg;
};
-
-void Compiler::raInitStackFP ()
+void Compiler::raInitStackFP()
{
// Reset local/reg interference
for (int i = 0; i < REG_FPCOUNT; i++)
{
VarSetOps::AssignNoCopy(this, raLclRegIntfFloat[i], VarSetOps::MakeEmpty(this));
}
-
+
VarSetOps::AssignNoCopy(this, optAllFPregVars, VarSetOps::MakeEmpty(this));
VarSetOps::AssignNoCopy(this, optAllNonFPvars, VarSetOps::MakeEmpty(this));
VarSetOps::AssignNoCopy(this, optAllFloatVars, VarSetOps::MakeEmpty(this));
- raCntStkStackFP = 0;
- raCntWtdStkDblStackFP = 0;
- raCntStkParamDblStackFP = 0;
+ raCntStkStackFP = 0;
+ raCntWtdStkDblStackFP = 0;
+ raCntStkParamDblStackFP = 0;
VarSetOps::AssignNoCopy(this, raMaskDontEnregFloat, VarSetOps::MakeEmpty(this));
// Calculate the set of all tracked FP/non-FP variables
// into compiler->optAllFloatVars and compiler->optAllNonFPvars
- unsigned lclNum;
- LclVarDsc * varDsc;
+ unsigned lclNum;
+ LclVarDsc* varDsc;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
/* Ignore the variable if it's not tracked */
-
- if (!varDsc->lvTracked)
+
+ if (!varDsc->lvTracked)
continue;
/* Get hold of the index and the interference mask for the variable */
-
- unsigned varNum = varDsc->lvVarIndex;
+
+ unsigned varNum = varDsc->lvVarIndex;
/* add to the set of all tracked FP/non-FP variables */
-
+
if (varDsc->IsFloatRegType())
VarSetOps::AddElemD(this, optAllFloatVars, varNum);
else
@@ -3797,40 +3652,39 @@ void Compiler::raDumpVariableRegIntfFloat()
unsigned i;
unsigned j;
- for (i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (i = REG_FPV0; i < REG_FPCOUNT; i++)
{
if (!VarSetOps::IsEmpty(this, raLclRegIntfFloat[i]))
{
JITDUMP("FPV%u interferes with ", i);
- for (j = 0 ; j < lvaTrackedCount ; j++)
+ for (j = 0; j < lvaTrackedCount; j++)
{
- assert( VarSetOps::IsEmpty(this, VarSetOps::Diff(this, raLclRegIntfFloat[i], optAllFloatVars)) );
+ assert(VarSetOps::IsEmpty(this, VarSetOps::Diff(this, raLclRegIntfFloat[i], optAllFloatVars)));
if (VarSetOps::IsMember(this, raLclRegIntfFloat[i], j))
{
JITDUMP("T%02u/V%02u, ", j, lvaTrackedToVarNum[j]);
}
- }
+ }
JITDUMP("\n");
}
}
}
#endif
-
// Returns the regnum for the variable passed as param takin in account
// the fpvar to register interference mask. If we can't find anything, we
// will return REG_FPNONE
regNumber Compiler::raRegForVarStackFP(unsigned varTrackedIndex)
{
- for (unsigned i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ for (unsigned i = REG_FPV0; i < REG_FPCOUNT; i++)
{
if (!VarSetOps::IsMember(this, raLclRegIntfFloat[i], varTrackedIndex))
{
- return (regNumber) i;
+ return (regNumber)i;
}
}
-
+
return REG_FPNONE;
}
@@ -3842,41 +3696,38 @@ void Compiler::raAddPayloadStackFP(VARSET_VALARG_TP maskArg, unsigned weight)
return;
}
- for (unsigned i = 0 ; i < lvaTrackedCount ; i++)
+ for (unsigned i = 0; i < lvaTrackedCount; i++)
{
if (VarSetOps::IsMember(this, mask, i))
- {
+ {
raPayloadStackFP[i] += weight;
}
}
-
}
-bool Compiler::raVarIsGreaterValueStackFP(LclVarDsc *lv1, LclVarDsc *lv2)
+bool Compiler::raVarIsGreaterValueStackFP(LclVarDsc* lv1, LclVarDsc* lv2)
{
assert(lv1->lvTracked);
assert(lv2->lvTracked);
bool bSmall = (compCodeOpt() == SMALL_CODE);
-
- double weight1 = double(bSmall?lv1->lvRefCnt:lv1->lvRefCntWtd) -
- double(raPayloadStackFP[lv1->lvVarIndex])-
+
+ double weight1 = double(bSmall ? lv1->lvRefCnt : lv1->lvRefCntWtd) - double(raPayloadStackFP[lv1->lvVarIndex]) -
double(raHeightsStackFP[lv1->lvVarIndex][FP_VIRTUALREGISTERS]);
- double weight2 = double(bSmall?lv2->lvRefCnt:lv2->lvRefCntWtd) -
- double(raPayloadStackFP[lv2->lvVarIndex])-
+ double weight2 = double(bSmall ? lv2->lvRefCnt : lv2->lvRefCntWtd) - double(raPayloadStackFP[lv2->lvVarIndex]) -
double(raHeightsStackFP[lv2->lvVarIndex][FP_VIRTUALREGISTERS]);
double diff = weight1 - weight2;
if (diff)
{
- return diff > 0 ? true:false;
+ return diff > 0 ? true : false;
}
else
{
- return int(lv1->lvRefCnt - lv2->lvRefCnt)?true:false;
- }
+ return int(lv1->lvRefCnt - lv2->lvRefCnt) ? true : false;
+ }
}
#ifdef DEBUG
@@ -3889,21 +3740,20 @@ void Compiler::raDumpHeightsStackFP()
JITDUMP("raDumpHeightsStackFP():\n");
JITDUMP("--------------------------------------------------------\n");
JITDUMP("Weighted Height Table Dump\n ");
- for (i = 0 ; i < FP_VIRTUALREGISTERS ; i++)
+ for (i = 0; i < FP_VIRTUALREGISTERS; i++)
{
- JITDUMP(" %i ", i+1);
+ JITDUMP(" %i ", i + 1);
}
JITDUMP("OVF\n");
- for (i = 0 ; i < lvaTrackedCount ; i++)
+ for (i = 0; i < lvaTrackedCount; i++)
{
- if (VarSetOps::IsMember(this, optAllFloatVars, i) &&
- !VarSetOps::IsMember(this, optAllFPregVars, i))
+ if (VarSetOps::IsMember(this, optAllFloatVars, i) && !VarSetOps::IsMember(this, optAllFPregVars, i))
{
JITDUMP("V%02u/T%02u: ", lvaTrackedToVarNum[i], i);
- for (j = 0 ; j <= FP_VIRTUALREGISTERS ; j++)
+ for (j = 0; j <= FP_VIRTUALREGISTERS; j++)
{
JITDUMP("%5u ", raHeightsStackFP[i][j]);
}
@@ -3912,21 +3762,20 @@ void Compiler::raDumpHeightsStackFP()
}
JITDUMP("\nNonweighted Height Table Dump\n ");
- for (i = 0 ; i < FP_VIRTUALREGISTERS ; i++)
+ for (i = 0; i < FP_VIRTUALREGISTERS; i++)
{
- JITDUMP(" %i ", i+1);
+ JITDUMP(" %i ", i + 1);
}
JITDUMP("OVF\n");
-
- for (i = 0 ; i < lvaTrackedCount ; i++)
+
+ for (i = 0; i < lvaTrackedCount; i++)
{
- if (VarSetOps::IsMember(this, optAllFloatVars, i) &&
- !VarSetOps::IsMember(this, optAllFPregVars, i))
+ if (VarSetOps::IsMember(this, optAllFloatVars, i) && !VarSetOps::IsMember(this, optAllFPregVars, i))
{
JITDUMP("V%02u/T%02u: ", lvaTrackedToVarNum[i], i);
- for (j = 0 ; j <= FP_VIRTUALREGISTERS ; j++)
+ for (j = 0; j <= FP_VIRTUALREGISTERS; j++)
{
JITDUMP("%5u ", raHeightsNonWeightedStackFP[i][j]);
}
@@ -3942,25 +3791,25 @@ void Compiler::raDumpHeightsStackFP()
// shift one place to the right.
void Compiler::raUpdateHeightsForVarsStackFP(VARSET_VALARG_TP mask)
{
- assert( VarSetOps::IsSubset(this, mask, optAllFloatVars) );
-
- for (unsigned i = 0 ; i < lvaTrackedCount ; i++)
+ assert(VarSetOps::IsSubset(this, mask, optAllFloatVars));
+
+ for (unsigned i = 0; i < lvaTrackedCount; i++)
{
if (VarSetOps::IsMember(this, mask, i))
{
- for (unsigned j = FP_VIRTUALREGISTERS ; j > 0 ; j--)
+ for (unsigned j = FP_VIRTUALREGISTERS; j > 0; j--)
{
- raHeightsStackFP[i][j] = raHeightsStackFP[i][j-1];
+ raHeightsStackFP[i][j] = raHeightsStackFP[i][j - 1];
#ifdef DEBUG
- raHeightsNonWeightedStackFP[i][j] = raHeightsNonWeightedStackFP[i][j-1];
+ raHeightsNonWeightedStackFP[i][j] = raHeightsNonWeightedStackFP[i][j - 1];
#endif
}
-
+
raHeightsStackFP[i][0] = 0;
#ifdef DEBUG
raHeightsNonWeightedStackFP[i][0] = 0;
-#endif
+#endif
}
}
@@ -3971,12 +3820,12 @@ void Compiler::raUpdateHeightsForVarsStackFP(VARSET_VALARG_TP mask)
// This is the prepass we do to adjust refcounts across calls and
// create the height structure.
-void Compiler::raEnregisterVarsPrePassStackFP ()
+void Compiler::raEnregisterVarsPrePassStackFP()
{
BasicBlock* block;
assert(!VarSetOps::IsEmpty(this, optAllFloatVars));
-
+
// Initialization of the height table
memset(raHeightsStackFP, 0, sizeof(raHeightsStackFP));
@@ -3989,42 +3838,41 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
// We will have a quick table with the pointers to the interesting varDscs
// so that we don't have to scan for them for each tree.
- unsigned FPVars[lclMAX_TRACKED];
- unsigned numFPVars = 0;
- for (unsigned i = 0 ; i < lvaTrackedCount ; i++)
+ unsigned FPVars[lclMAX_TRACKED];
+ unsigned numFPVars = 0;
+ for (unsigned i = 0; i < lvaTrackedCount; i++)
{
if (VarSetOps::IsMember(this, optAllFloatVars, i))
{
FPVars[numFPVars++] = i;
}
}
-
+
assert(numFPVars == VarSetOps::Count(this, optAllFloatVars));
-
-
+
// Things we check here:
//
// We substract 2 for each FP variable that's live across a call, as we will
// have 2 memory accesses to spill and unpsill around it.
//
- //
- //
+ //
+ //
VARSET_TP VARSET_INIT_NOCOPY(blockLiveOutFloats, VarSetOps::MakeEmpty(this));
for (block = fgFirstBB; block; block = block->bbNext)
{
compCurBB = block;
/*
- This opt fails in the case of a variable that has it's entire lifetime contained in the 'then' of
- a qmark. The use mask for the whole qmark won't contain that variable as it variable's value comes
- from a def in the else, and the def can't be set for the qmark if the else side of
+ This opt fails in the case of a variable that has it's entire lifetime contained in the 'then' of
+ a qmark. The use mask for the whole qmark won't contain that variable as it variable's value comes
+ from a def in the else, and the def can't be set for the qmark if the else side of
the qmark doesn't do a def.
See VSW# 354454 for more info. Leaving the comment and code here just in case we try to be
'smart' again in the future
- if (((block->bbVarUse |
- block->bbVarDef |
+ if (((block->bbVarUse |
+ block->bbVarDef |
block->bbLiveIn ) & optAllFloatVars) == 0)
{
// Fast way out
@@ -4041,95 +3889,96 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
{
case BBJ_COND:
{
- GenTreePtr stmt;
+ GenTreePtr stmt;
stmt = block->bbTreeList->gtPrev;
- assert(stmt->gtNext == NULL &&
- stmt->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
+ assert(stmt->gtNext == NULL && stmt->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
assert(stmt->gtStmt.gtStmtExpr->gtOp.gtOp1);
- GenTreePtr cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
+ GenTreePtr cond = stmt->gtStmt.gtStmtExpr->gtOp.gtOp1;
assert(cond->OperIsCompare());
-
+
if (cond->gtOp.gtOp1->TypeGet() == TYP_LONG)
{
if (compHndBBtabCount > 0)
{
- // If we have any handlers we won't enregister whatever is live out of this block
- JITDUMP("PERF Warning: Taking out FP candidates due to transition blocks + exception handlers.\n");
- VarSetOps::UnionD(this, raMaskDontEnregFloat, VarSetOps::Intersection(this, block->bbLiveOut, optAllFloatVars));
- }
+ // If we have any handlers we won't enregister whatever is live out of this block
+ JITDUMP("PERF Warning: Taking out FP candidates due to transition blocks + exception "
+ "handlers.\n");
+ VarSetOps::UnionD(this, raMaskDontEnregFloat,
+ VarSetOps::Intersection(this, block->bbLiveOut, optAllFloatVars));
+ }
else
- {
+ {
// long conditional jumps can generate transition bloks
compMayHaveTransitionBlocks = true;
}
}
-
+
break;
}
case BBJ_SWITCH:
{
if (compHndBBtabCount > 0)
- {
+ {
// If we have any handlers we won't enregister whatever is live out of this block
- JITDUMP("PERF Warning: Taking out FP candidates due to transition blocks + exception handlers.\n");
- VarSetOps::UnionD(this, raMaskDontEnregFloat, VarSetOps::Intersection(this, block->bbLiveOut, optAllFloatVars));
+ JITDUMP(
+ "PERF Warning: Taking out FP candidates due to transition blocks + exception handlers.\n");
+ VarSetOps::UnionD(this, raMaskDontEnregFloat,
+ VarSetOps::Intersection(this, block->bbLiveOut, optAllFloatVars));
}
else
- {
+ {
// fp vars are live out of the switch, so we may have transition blocks
compMayHaveTransitionBlocks = true;
}
break;
- default:
- break;
+ default:
+ break;
}
}
}
-
+
VARSET_TP VARSET_INIT(this, liveSet, block->bbLiveIn);
for (GenTreePtr stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
assert(stmt->gtOper == GT_STMT);
unsigned prevHeight = stmt->gtStmt.gtStmtList->gtFPlvl;
- for (GenTreePtr tree = stmt->gtStmt.gtStmtList;
- tree;
- tree = tree->gtNext)
+ for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
{
VarSetOps::AssignNoCopy(this, liveSet, fgUpdateLiveSet(liveSet, tree));
switch (tree->gtOper)
{
- case GT_CALL:
- raAddPayloadStackFP(liveSet, block->getBBWeight(this)*2);
- break;
- case GT_CAST:
- // For cast from long local var to double, decrement the ref count of the long
- // to avoid store forwarding stall
- if (tree->gtType == TYP_DOUBLE)
- {
- GenTreePtr op1 = tree->gtOp.gtOp1;
- if (op1->gtOper == GT_LCL_VAR && op1->gtType == TYP_LONG)
+ case GT_CALL:
+ raAddPayloadStackFP(liveSet, block->getBBWeight(this) * 2);
+ break;
+ case GT_CAST:
+ // For cast from long local var to double, decrement the ref count of the long
+ // to avoid store forwarding stall
+ if (tree->gtType == TYP_DOUBLE)
{
- unsigned int lclNum = op1->gtLclVarCommon.gtLclNum;
- assert( lclNum < lvaCount);
- LclVarDsc * varDsc = lvaTable + lclNum;
- unsigned int weightedRefCnt = varDsc->lvRefCntWtd;
- unsigned int refCntDecrement = 2*block->getBBWeight(this);
- if (refCntDecrement > weightedRefCnt)
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ if (op1->gtOper == GT_LCL_VAR && op1->gtType == TYP_LONG)
{
- varDsc->lvRefCntWtd = 0;
- }
- else
- {
- varDsc->lvRefCntWtd = weightedRefCnt - refCntDecrement;
+ unsigned int lclNum = op1->gtLclVarCommon.gtLclNum;
+ assert(lclNum < lvaCount);
+ LclVarDsc* varDsc = lvaTable + lclNum;
+ unsigned int weightedRefCnt = varDsc->lvRefCntWtd;
+ unsigned int refCntDecrement = 2 * block->getBBWeight(this);
+ if (refCntDecrement > weightedRefCnt)
+ {
+ varDsc->lvRefCntWtd = 0;
+ }
+ else
+ {
+ varDsc->lvRefCntWtd = weightedRefCnt - refCntDecrement;
+ }
}
}
- }
- break;
- default:
- break;
+ break;
+ default:
+ break;
}
// Update heights
@@ -4139,7 +3988,7 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
{
if (height > prevHeight && height < FP_VIRTUALREGISTERS)
{
- for (unsigned i = 0 ; i < numFPVars ; i++)
+ for (unsigned i = 0; i < numFPVars; i++)
{
if (VarSetOps::IsMember(this, liveSet, FPVars[i]))
{
@@ -4150,16 +3999,16 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
// (even if we op directly with the spill)
if (compCodeOpt() == SMALL_CODE)
{
- raHeightsStackFP[FPVars[i]][height-1] += 2;
+ raHeightsStackFP[FPVars[i]][height - 1] += 2;
}
else
{
- raHeightsStackFP[FPVars[i]][height-1] += 2*block->getBBWeight(this);
+ raHeightsStackFP[FPVars[i]][height - 1] += 2 * block->getBBWeight(this);
}
- #ifdef DEBUG
- raHeightsNonWeightedStackFP[FPVars[i]][height-1]++;
- #endif
+#ifdef DEBUG
+ raHeightsNonWeightedStackFP[FPVars[i]][height - 1]++;
+#endif
}
}
}
@@ -4167,7 +4016,7 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
prevHeight = height;
}
}
- }
+ }
}
compCurBB = NULL;
@@ -4178,28 +4027,26 @@ void Compiler::raEnregisterVarsPrePassStackFP ()
// The problem with FP enreg vars is that the returning block is marked with having
// all variables live on exit. This works for integer vars, but for FP vars we must
// do the work to unload them. This is fairly straightforward to do, but I'm worried
- // by the coverage, so I'll take the conservative aproach of disabling FP enregistering
+ // by the coverage, so I'll take the conservative aproach of disabling FP enregistering
// and we will fix it if there is demand
JITDUMP("PERF Warning: Disabling FP enregistering due to JMP op!!!!!!!.\n");
VarSetOps::UnionD(this, raMaskDontEnregFloat, optAllFloatVars);
}
-
+
#ifdef DEBUG
raDumpHeightsStackFP();
#endif
}
-
void Compiler::raSetRegLclBirthDeath(GenTreePtr tree, VARSET_VALARG_TP lastlife, bool fromLDOBJ)
{
assert(tree->gtOper == GT_LCL_VAR);
-
- unsigned lclnum = tree->gtLclVarCommon.gtLclNum;
+
+ unsigned lclnum = tree->gtLclVarCommon.gtLclNum;
assert(lclnum < lvaCount);
+ LclVarDsc* varDsc = lvaTable + lclnum;
- LclVarDsc * varDsc = lvaTable + lclnum;
-
if (!varDsc->lvTracked)
{
// Not tracked, can't be one of the enreg fp vars
@@ -4216,16 +4063,16 @@ void Compiler::raSetRegLclBirthDeath(GenTreePtr tree, VARSET_VALARG_TP lastlife,
assert(varDsc->lvRegNum != REG_FPNONE);
assert(!VarSetOps::IsMember(this, raMaskDontEnregFloat, varIndex));
-
+
unsigned livenessFlags = (tree->gtFlags & GTF_LIVENESS_MASK);
tree->ChangeOper(GT_REG_VAR);
tree->gtFlags |= livenessFlags;
- tree->gtRegNum = varDsc->lvRegNum;
- tree->gtRegVar.gtRegNum = varDsc->lvRegNum;
+ tree->gtRegNum = varDsc->lvRegNum;
+ tree->gtRegVar.gtRegNum = varDsc->lvRegNum;
tree->gtRegVar.SetLclNum(lclnum);
- // A liveset can change in a lclvar even if the lclvar itself is not
- // changing its life. This can happen for lclvars inside qmarks,
+ // A liveset can change in a lclvar even if the lclvar itself is not
+ // changing its life. This can happen for lclvars inside qmarks,
// where lclvars die across the colon edge.
// SO, either
// it is marked GTF_VAR_DEATH (already set by fgComputeLife)
@@ -4240,17 +4087,16 @@ void Compiler::raSetRegLclBirthDeath(GenTreePtr tree, VARSET_VALARG_TP lastlife,
tree->gtFlags |= GTF_REG_BIRTH;
}
}
-
+
#ifdef DEBUG
if (verbose)
gtDispTree(tree);
#endif
-
}
// In this pass we set the regvars and set the birth and death flags. we do it
// for all enregistered variables at once.
-void Compiler::raEnregisterVarsPostPassStackFP ()
+void Compiler::raEnregisterVarsPostPassStackFP()
{
if (VarSetOps::IsEmpty(this, optAllFPregVars))
{
@@ -4260,15 +4106,15 @@ void Compiler::raEnregisterVarsPostPassStackFP ()
BasicBlock* block;
JITDUMP("raEnregisterVarsPostPassStackFP:\n");
-
+
for (block = fgFirstBB; block; block = block->bbNext)
{
compCurBB = block;
/*
- This opt fails in the case of a variable that has it's entire lifetime contained in the 'then' of
- a qmark. The use mask for the whole qmark won't contain that variable as it variable's value comes
- from a def in the else, and the def can't be set for the qmark if the else side of
+ This opt fails in the case of a variable that has it's entire lifetime contained in the 'then' of
+ a qmark. The use mask for the whole qmark won't contain that variable as it variable's value comes
+ from a def in the else, and the def can't be set for the qmark if the else side of
the qmark doesn't do a def.
See VSW# 354454 for more info. Leaving the comment and code here just in case we try to be
@@ -4276,8 +4122,8 @@ void Compiler::raEnregisterVarsPostPassStackFP ()
- if (((block->bbVarUse |
- block->bbVarDef |
+ if (((block->bbVarUse |
+ block->bbVarDef |
block->bbLiveIn ) & optAllFPregVars) == 0)
{
// Fast way out
@@ -4285,29 +4131,25 @@ void Compiler::raEnregisterVarsPostPassStackFP ()
}
*/
-
VARSET_TP VARSET_INIT(this, lastlife, block->bbLiveIn);
for (GenTreePtr stmt = block->FirstNonPhiDef(); stmt; stmt = stmt->gtNext)
{
assert(stmt->gtOper == GT_STMT);
-
- for (GenTreePtr tree = stmt->gtStmt.gtStmtList;
- tree;
- VarSetOps::AssignNoCopy(this, lastlife, fgUpdateLiveSet(lastlife, tree)),
- tree = tree->gtNext)
+
+ for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree;
+ VarSetOps::AssignNoCopy(this, lastlife, fgUpdateLiveSet(lastlife, tree)), tree = tree->gtNext)
{
if (tree->gtOper == GT_LCL_VAR)
{
- raSetRegLclBirthDeath(tree, lastlife, false);
- }
+ raSetRegLclBirthDeath(tree, lastlife, false);
+ }
}
- }
+ }
assert(VarSetOps::Equal(this, lastlife, block->bbLiveOut));
- }
+ }
compCurBB = NULL;
}
-
void Compiler::raGenerateFPRefCounts()
{
// Update ref counts to stack
@@ -4315,15 +4157,13 @@ void Compiler::raGenerateFPRefCounts()
assert(raCntStkParamDblStackFP == 0);
assert(raCntStkStackFP == 0);
- LclVarDsc * varDsc;
- unsigned lclNum;
- for (lclNum = 0, varDsc = lvaTable;
- lclNum < lvaCount;
- lclNum++ , varDsc++)
+ LclVarDsc* varDsc;
+ unsigned lclNum;
+ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
- if (varDsc->lvType == TYP_DOUBLE ||
+ if (varDsc->lvType == TYP_DOUBLE ||
varDsc->lvStructDoubleAlign) // Account for structs (A bit over aggressive here, we should
- // account for field accesses, but should be a reasonable
+ // account for field accesses, but should be a reasonable
// heuristic).
{
if (varDsc->lvRegister)
@@ -4334,7 +4174,7 @@ void Compiler::raGenerateFPRefCounts()
{
// Increment tmp access
raCntStkStackFP += varDsc->lvRefCnt;
-
+
if (varDsc->lvIsParam)
{
// Why is this not weighted?
@@ -4349,17 +4189,17 @@ void Compiler::raGenerateFPRefCounts()
}
#ifdef DEBUG
- if ((raCntWtdStkDblStackFP> 0) || (raCntStkParamDblStackFP > 0))
+ if ((raCntWtdStkDblStackFP > 0) || (raCntStkParamDblStackFP > 0))
{
- JITDUMP("StackFP double stack weighted ref count: %u ; param ref count: %u\n",
- raCntWtdStkDblStackFP, raCntStkParamDblStackFP);
+ JITDUMP("StackFP double stack weighted ref count: %u ; param ref count: %u\n", raCntWtdStkDblStackFP,
+ raCntStkParamDblStackFP);
}
#endif
}
void Compiler::raEnregisterVarsStackFP()
-{
- const int FPENREGTHRESHOLD = 1;
+{
+ const int FPENREGTHRESHOLD = 1;
const unsigned int FPENREGTHRESHOLD_WEIGHTED = FPENREGTHRESHOLD;
// Do init
@@ -4370,7 +4210,7 @@ void Compiler::raEnregisterVarsStackFP()
// no enregistering for these options.
return;
}
-
+
if (VarSetOps::IsEmpty(this, optAllFloatVars))
{
// No floating point vars. bail out
@@ -4379,9 +4219,9 @@ void Compiler::raEnregisterVarsStackFP()
// Do additional pass updating weights and generating height table
raEnregisterVarsPrePassStackFP();
-
+
// Vars are ordered by weight
- LclVarDsc * varDsc;
+ LclVarDsc* varDsc;
// Set an interference with V0 and V1, which we reserve as a temp registers.
// We need only one temp. but we will take the easy way, as by using
@@ -4393,75 +4233,74 @@ void Compiler::raEnregisterVarsStackFP()
if (codeGen->genStressFloat())
{
// Lock out registers for stress.
- regMaskTP locked = codeGen->genStressLockedMaskFloat();
- for (unsigned i = REG_FPV0 ; i < REG_FPCOUNT ; i++)
+ regMaskTP locked = codeGen->genStressLockedMaskFloat();
+ for (unsigned i = REG_FPV0; i < REG_FPCOUNT; i++)
{
- if (locked & genRegMaskFloat((regNumber) i))
+ if (locked & genRegMaskFloat((regNumber)i))
{
VarSetOps::Assign(this, raLclRegIntfFloat[i], optAllFloatVars);
}
}
}
#endif
-
-
+
// Build the interesting FP var table
- LclVarDsc * fpLclFPVars[lclMAX_TRACKED];
+ LclVarDsc* fpLclFPVars[lclMAX_TRACKED];
unsigned numFPVars = 0;
- for (unsigned i = 0 ; i < lvaTrackedCount ; i++)
+ for (unsigned i = 0; i < lvaTrackedCount; i++)
{
if (VarSetOps::IsMember(this, raMaskDontEnregFloat, i))
{
- JITDUMP("Won't enregister V%02i (T%02i) because it's marked as dont enregister\n", lvaTrackedToVarNum[i], i);
+ JITDUMP("Won't enregister V%02i (T%02i) because it's marked as dont enregister\n", lvaTrackedToVarNum[i],
+ i);
continue;
}
-
+
if (VarSetOps::IsMember(this, optAllFloatVars, i))
{
- varDsc = lvaTable+lvaTrackedToVarNum[i];
+ varDsc = lvaTable + lvaTrackedToVarNum[i];
assert(varDsc->lvTracked);
if (varDsc->lvDoNotEnregister)
{
- JITDUMP("Won't enregister V%02i (T%02i) because it's marked as DoNotEnregister\n", lvaTrackedToVarNum[i], i);
+ JITDUMP("Won't enregister V%02i (T%02i) because it's marked as DoNotEnregister\n",
+ lvaTrackedToVarNum[i], i);
continue;
}
-#if !FEATURE_X87_DOUBLES
+#if !FEATURE_X87_DOUBLES
if (varDsc->TypeGet() == TYP_FLOAT)
{
- JITDUMP("Won't enregister V%02i (T%02i) because it's a TYP_FLOAT and we have disabled FEATURE_X87_DOUBLES\n", lvaTrackedToVarNum[i], i);
+ JITDUMP("Won't enregister V%02i (T%02i) because it's a TYP_FLOAT and we have disabled "
+ "FEATURE_X87_DOUBLES\n",
+ lvaTrackedToVarNum[i], i);
continue;
}
#endif
- fpLclFPVars[numFPVars++] = lvaTable+lvaTrackedToVarNum[i];
+ fpLclFPVars[numFPVars++] = lvaTable + lvaTrackedToVarNum[i];
}
}
- unsigned maxRegVars = 0; // Max num of regvars at one time
-
+ unsigned maxRegVars = 0; // Max num of regvars at one time
+
for (unsigned sortNum = 0; sortNum < numFPVars; sortNum++)
- {
+ {
#ifdef DEBUG
{
JITDUMP("\n");
JITDUMP("FP regvar candidates:\n");
-
- for (unsigned i = sortNum ; i < numFPVars ; i++)
+
+ for (unsigned i = sortNum; i < numFPVars; i++)
{
- varDsc = fpLclFPVars[i];
+ varDsc = fpLclFPVars[i];
unsigned lclNum = varDsc - lvaTable;
- unsigned varIndex; varIndex = varDsc->lvVarIndex;
-
- JITDUMP("V%02u/T%02u RefCount: %u Weight: %u ; Payload: %u ; Overflow: %u\n",
- lclNum,
- varIndex,
- varDsc->lvRefCnt,
- varDsc->lvRefCntWtd,
- raPayloadStackFP[varIndex],
- raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS]
- );
+ unsigned varIndex;
+ varIndex = varDsc->lvVarIndex;
+
+ JITDUMP("V%02u/T%02u RefCount: %u Weight: %u ; Payload: %u ; Overflow: %u\n", lclNum, varIndex,
+ varDsc->lvRefCnt, varDsc->lvRefCntWtd, raPayloadStackFP[varIndex],
+ raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS]);
}
JITDUMP("\n");
}
@@ -4470,7 +4309,7 @@ void Compiler::raEnregisterVarsStackFP()
unsigned min = sortNum;
// Find the one that will save us most
- for (unsigned i = sortNum + 1; i < numFPVars ; i++)
+ for (unsigned i = sortNum + 1; i < numFPVars; i++)
{
if (raVarIsGreaterValueStackFP(fpLclFPVars[i], fpLclFPVars[sortNum]))
{
@@ -4480,12 +4319,11 @@ void Compiler::raEnregisterVarsStackFP()
// Put it at the top of the array
LclVarDsc* temp;
- temp = fpLclFPVars[min];
- fpLclFPVars[min] = fpLclFPVars[sortNum];
- fpLclFPVars[sortNum] = temp;
+ temp = fpLclFPVars[min];
+ fpLclFPVars[min] = fpLclFPVars[sortNum];
+ fpLclFPVars[sortNum] = temp;
-
- varDsc = fpLclFPVars[sortNum];
+ varDsc = fpLclFPVars[sortNum];
#ifdef DEBUG
unsigned lclNum = varDsc - lvaTable;
@@ -4495,70 +4333,55 @@ void Compiler::raEnregisterVarsStackFP()
assert(VarSetOps::IsMember(this, optAllFloatVars, varIndex));
JITDUMP("Candidate for enregistering: V%02u/T%02u RefCount: %u Weight: %u ; Payload: %u ; Overflow: %u\n",
- lclNum,
- varIndex,
- varDsc->lvRefCnt,
- varDsc->lvRefCntWtd,
- raPayloadStackFP[varIndex],
- raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS]
- );
-
+ lclNum, varIndex, varDsc->lvRefCnt, varDsc->lvRefCntWtd, raPayloadStackFP[varIndex],
+ raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS]);
bool bMeetsThreshold = true;
- if (varDsc->lvRefCnt < FPENREGTHRESHOLD ||
- varDsc->lvRefCntWtd < FPENREGTHRESHOLD_WEIGHTED)
+ if (varDsc->lvRefCnt < FPENREGTHRESHOLD || varDsc->lvRefCntWtd < FPENREGTHRESHOLD_WEIGHTED)
{
bMeetsThreshold = false;
}
// We don't want to enregister arguments with only one use, as they will be
// loaded in the prolog. Just don't enregister them and load them lazily(
- if (varDsc->lvIsParam && ( varDsc->lvRefCnt <= FPENREGTHRESHOLD ||
- varDsc->lvRefCntWtd <= FPENREGTHRESHOLD_WEIGHTED))
+ if (varDsc->lvIsParam &&
+ (varDsc->lvRefCnt <= FPENREGTHRESHOLD || varDsc->lvRefCntWtd <= FPENREGTHRESHOLD_WEIGHTED))
{
bMeetsThreshold = false;
}
-
if (!bMeetsThreshold
#ifdef DEBUG
&& codeGen->genStressFloat() != 1
-#endif
+#endif
)
{
// Doesn't meet bar, do next
- JITDUMP("V%02u/T%02u doesnt meet threshold. Won't enregister\n",
- lclNum,
- varIndex);
+ JITDUMP("V%02u/T%02u doesnt meet threshold. Won't enregister\n", lclNum, varIndex);
continue;
}
// We don't want to have problems with overflow (we now have 2 unsigned counters
// that can possibly go to their limits), so we just promote to double here.
// diff
- double balance = double(varDsc->lvRefCntWtd) -
- double(raPayloadStackFP[varIndex]) - // Additional costs of enregistering variable
- double(raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS])- // Spilling costs of enregistering variable
- double(FPENREGTHRESHOLD_WEIGHTED);
-
- JITDUMP("balance = %d - %d - %d - %d\n",
- varDsc->lvRefCntWtd,
- raPayloadStackFP[varIndex],
- raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS],
- FPENREGTHRESHOLD_WEIGHTED
- );
-
- if (balance < 0.0
+ double balance =
+ double(varDsc->lvRefCntWtd) -
+ double(raPayloadStackFP[varIndex]) - // Additional costs of enregistering variable
+ double(raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS]) - // Spilling costs of enregistering variable
+ double(FPENREGTHRESHOLD_WEIGHTED);
+
+ JITDUMP("balance = %d - %d - %d - %d\n", varDsc->lvRefCntWtd, raPayloadStackFP[varIndex],
+ raHeightsStackFP[varIndex][FP_VIRTUALREGISTERS], FPENREGTHRESHOLD_WEIGHTED);
+
+ if (balance < 0.0
#ifdef DEBUG
&& codeGen->genStressFloat() != 1
-#endif
+#endif
)
{
// Doesn't meet bar, do next
- JITDUMP("V%02u/T%02u doesnt meet threshold. Won't enregister\n",
- lclNum,
- varIndex);
+ JITDUMP("V%02u/T%02u doesnt meet threshold. Won't enregister\n", lclNum, varIndex);
continue;
}
@@ -4566,9 +4389,7 @@ void Compiler::raEnregisterVarsStackFP()
if (reg == REG_FPNONE)
{
// Didn't make if (interferes with other regvars), do next
- JITDUMP("V%02u/T%02u interferes with other enreg vars. Won't enregister\n",
- lclNum,
- varIndex);
+ JITDUMP("V%02u/T%02u interferes with other enreg vars. Won't enregister\n", lclNum, varIndex);
continue;
}
@@ -4579,29 +4400,29 @@ void Compiler::raEnregisterVarsStackFP()
// promotion type PROMOTION_TYPE_DEPENDENT.
continue;
}
-
+
// Yipee, we will enregister var.
varDsc->lvRegister = true;
varDsc->lvRegNum = reg;
VarSetOps::AddElemD(this, optAllFPregVars, varIndex);
-#ifdef DEBUG
+#ifdef DEBUG
raDumpVariableRegIntfFloat();
- if (verbose) {
+ if (verbose)
+ {
printf("; ");
gtDispLclVar(lclNum);
- printf("V%02u/T%02u (refcnt=%2u,refwtd=%4u%s) enregistered in %s\n",
- varIndex, varDsc->lvVarIndex, varDsc->lvRefCnt,
- varDsc->lvRefCntWtd/2, (varDsc->lvRefCntWtd & 1) ? ".5" : "",
- CodeGen::regVarNameStackFP(varDsc->lvRegNum));
- }
+ printf("V%02u/T%02u (refcnt=%2u,refwtd=%4u%s) enregistered in %s\n", varIndex, varDsc->lvVarIndex,
+ varDsc->lvRefCnt, varDsc->lvRefCntWtd / 2, (varDsc->lvRefCntWtd & 1) ? ".5" : "",
+ CodeGen::regVarNameStackFP(varDsc->lvRegNum));
+ }
JITDUMP("\n");
#endif
// Create interferences with other variables.
- assert( VarSetOps::IsEmpty(this, VarSetOps::Diff(this, raLclRegIntfFloat[(int) reg], optAllFloatVars)) );
+ assert(VarSetOps::IsEmpty(this, VarSetOps::Diff(this, raLclRegIntfFloat[(int)reg], optAllFloatVars)));
VARSET_TP VARSET_INIT_NOCOPY(intfFloats, VarSetOps::Intersection(this, lvaVarIntf[varIndex], optAllFloatVars));
VarSetOps::UnionD(this, raLclRegIntfFloat[reg], intfFloats);
@@ -4613,38 +4434,33 @@ void Compiler::raEnregisterVarsStackFP()
maxRegVars = min(REG_FPCOUNT, max(maxRegVars, VarSetOps::Count(this, intfFloats)));
}
- assert( VarSetOps::IsSubset(this, optAllFPregVars, optAllFloatVars) );
- assert( VarSetOps::IsEmpty(this, VarSetOps::Intersection(this, optAllFPregVars, raMaskDontEnregFloat)) );
+ assert(VarSetOps::IsSubset(this, optAllFPregVars, optAllFloatVars));
+ assert(VarSetOps::IsEmpty(this, VarSetOps::Intersection(this, optAllFPregVars, raMaskDontEnregFloat)));
// This is a bit conservative, as they may not all go through a call.
// If we have to, we can fix this.
tmpDoubleSpillMax += maxRegVars;
// Do pass marking trees as egvars
- raEnregisterVarsPostPassStackFP();
-
+ raEnregisterVarsPostPassStackFP();
#ifdef DEBUG
{
JITDUMP("FP enregistration summary\n");
-
+
unsigned i;
- for (i = 0 ; i < numFPVars ; i++)
+ for (i = 0; i < numFPVars; i++)
{
- varDsc = fpLclFPVars[i];
+ varDsc = fpLclFPVars[i];
if (varDsc->lvRegister)
{
unsigned lclNum = varDsc - lvaTable;
- unsigned varIndex; varIndex = varDsc->lvVarIndex;
-
- JITDUMP("Enregistered V%02u/T%02u in FPV%i RefCount: %u Weight: %u \n",
- lclNum,
- varIndex,
- varDsc->lvRegNum,
- varDsc->lvRefCnt,
- varDsc->lvRefCntWtd
- );
+ unsigned varIndex;
+ varIndex = varDsc->lvVarIndex;
+
+ JITDUMP("Enregistered V%02u/T%02u in FPV%i RefCount: %u Weight: %u \n", lclNum, varIndex,
+ varDsc->lvRegNum, varDsc->lvRefCnt, varDsc->lvRefCntWtd);
}
}
JITDUMP("End of FP enregistration summary\n\n");
@@ -4661,20 +4477,14 @@ regMaskTP CodeGenInterface::genStressLockedMaskFloat()
// Don't use REG_FPV0 or REG_FPV1, they're reserved
if (genStressFloat() == 1)
{
- return genRegMaskFloat(REG_FPV4) |
- genRegMaskFloat(REG_FPV5) |
- genRegMaskFloat(REG_FPV6) |
- genRegMaskFloat(REG_FPV7) ;
+ return genRegMaskFloat(REG_FPV4) | genRegMaskFloat(REG_FPV5) | genRegMaskFloat(REG_FPV6) |
+ genRegMaskFloat(REG_FPV7);
}
else
{
- return genRegMaskFloat(REG_FPV2) |
- genRegMaskFloat(REG_FPV3) |
- genRegMaskFloat(REG_FPV4) |
- genRegMaskFloat(REG_FPV5) |
- genRegMaskFloat(REG_FPV6) |
- genRegMaskFloat(REG_FPV7) ;
- }
+ return genRegMaskFloat(REG_FPV2) | genRegMaskFloat(REG_FPV3) | genRegMaskFloat(REG_FPV4) |
+ genRegMaskFloat(REG_FPV5) | genRegMaskFloat(REG_FPV6) | genRegMaskFloat(REG_FPV7);
+ }
}
#endif
diff --git a/src/jit/target.h b/src/jit/target.h
index 0d1a6f26c6..6413e087b4 100644
--- a/src/jit/target.h
+++ b/src/jit/target.h
@@ -19,41 +19,41 @@
#endif
#if (defined(FEATURE_CORECLR) && defined(PLATFORM_UNIX))
-#define FEATURE_VARARG 0
+#define FEATURE_VARARG 0
#else // !(defined(FEATURE_CORECLR) && defined(PLATFORM_UNIX))
-#define FEATURE_VARARG 1
+#define FEATURE_VARARG 1
#endif // !(defined(FEATURE_CORECLR) && defined(PLATFORM_UNIX))
/*****************************************************************************/
// The following are human readable names for the target architectures
#if defined(_TARGET_X86_)
- #define TARGET_READABLE_NAME "X86"
+#define TARGET_READABLE_NAME "X86"
#elif defined(_TARGET_AMD64_)
- #define TARGET_READABLE_NAME "AMD64"
+#define TARGET_READABLE_NAME "AMD64"
#elif defined(_TARGET_ARM_)
- #define TARGET_READABLE_NAME "ARM"
+#define TARGET_READABLE_NAME "ARM"
#elif defined(_TARGET_ARM64_)
- #define TARGET_READABLE_NAME "ARM64"
+#define TARGET_READABLE_NAME "ARM64"
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
/*****************************************************************************/
// The following are intended to capture only those #defines that cannot be replaced
// with static const members of Target
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
- #define REGMASK_BITS 8 // number of bits used to represent register mask
+#define REGMASK_BITS 8 // number of bits used to represent register mask
#elif defined(_TARGET_XARCH_)
- #define REGMASK_BITS 32
+#define REGMASK_BITS 32
#elif defined(_TARGET_ARM_)
- #define REGMASK_BITS 64
+#define REGMASK_BITS 64
#elif defined(_TARGET_ARM64_)
- #define REGMASK_BITS 64
+#define REGMASK_BITS 64
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
/*****************************************************************************/
@@ -61,22 +61,20 @@
#if defined(_TARGET_ARM_)
DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
{
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #define REGALIAS(alias, realname) REG_##alias = REG_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
+#include "register.h"
- REG_COUNT,
- REG_NA = REG_COUNT,
- ACTUAL_REG_COUNT = REG_COUNT-1 // everything but REG_STK (only real regs)
+ REG_COUNT, REG_NA = REG_COUNT, ACTUAL_REG_COUNT = REG_COUNT - 1 // everything but REG_STK (only real regs)
}
END_DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
{
RBM_NONE = 0,
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
+#include "register.h"
}
END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
@@ -84,22 +82,20 @@ END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
{
- #define REGDEF(name, rnum, mask, xname, wname) REG_##name = rnum,
- #define REGALIAS(alias, realname) REG_##alias = REG_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, xname, wname) REG_##name = rnum,
+#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
+#include "register.h"
- REG_COUNT,
- REG_NA = REG_COUNT,
- ACTUAL_REG_COUNT = REG_COUNT-1 // everything but REG_STK (only real regs)
+ REG_COUNT, REG_NA = REG_COUNT, ACTUAL_REG_COUNT = REG_COUNT - 1 // everything but REG_STK (only real regs)
}
END_DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
{
RBM_NONE = 0,
- #define REGDEF(name, rnum, mask, xname, wname) RBM_##name = mask,
- #define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, xname, wname) RBM_##name = mask,
+#define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
+#include "register.h"
}
END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
@@ -107,13 +103,11 @@ END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned __int64)
DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
{
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #define REGALIAS(alias, realname) REG_##alias = REG_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
+#include "register.h"
- REG_COUNT,
- REG_NA = REG_COUNT,
- ACTUAL_REG_COUNT = REG_COUNT-1 // everything but REG_STK (only real regs)
+ REG_COUNT, REG_NA = REG_COUNT, ACTUAL_REG_COUNT = REG_COUNT - 1 // everything but REG_STK (only real regs)
}
END_DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
@@ -121,9 +115,9 @@ DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
{
RBM_NONE = 0,
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
+#include "register.h"
}
END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
@@ -132,13 +126,11 @@ END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
#ifndef LEGACY_BACKEND
DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
{
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #define REGALIAS(alias, realname) REG_##alias = REG_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
+#include "register.h"
- REG_COUNT,
- REG_NA = REG_COUNT,
- ACTUAL_REG_COUNT = REG_COUNT-1 // everything but REG_STK (only real regs)
+ REG_COUNT, REG_NA = REG_COUNT, ACTUAL_REG_COUNT = REG_COUNT - 1 // everything but REG_STK (only real regs)
}
END_DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
@@ -146,32 +138,30 @@ DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
{
RBM_NONE = 0,
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
+#include "register.h"
}
END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
#else // LEGACY_BACKEND
DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
{
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #define REGALIAS(alias, realname) REG_##alias = REG_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
+#include "register.h"
- REG_COUNT,
- REG_NA = REG_COUNT,
- ACTUAL_REG_COUNT = REG_COUNT-1, // everything but REG_STK (only real regs)
-
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #include "registerfp.h"
+ REG_COUNT, REG_NA = REG_COUNT,
+ ACTUAL_REG_COUNT = REG_COUNT - 1, // everything but REG_STK (only real regs)
- REG_FPCOUNT,
- REG_FPNONE = REG_FPCOUNT,
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#include "registerfp.h"
- #define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
- #include "registerxmm.h"
+ REG_FPCOUNT, REG_FPNONE = REG_FPCOUNT,
- REG_XMMCOUNT
+#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
+#include "registerxmm.h"
+
+ REG_XMMCOUNT
}
END_DECLARE_TYPED_ENUM(_regNumber_enum, unsigned)
@@ -179,15 +169,15 @@ DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
{
RBM_NONE = 0,
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
- #include "register.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#define REGALIAS(alias, realname) RBM_##alias = RBM_##realname,
+#include "register.h"
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #include "registerfp.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#include "registerfp.h"
- #define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
- #include "registerxmm.h"
+#define REGDEF(name, rnum, mask, sname) RBM_##name = mask,
+#include "registerxmm.h"
}
END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
@@ -204,42 +194,44 @@ END_DECLARE_TYPED_ENUM(_regMask_enum, unsigned)
*/
#ifdef _TARGET_ARM_
-#define REG_PAIR_NBITS 6
+#define REG_PAIR_NBITS 6
#else
-#define REG_PAIR_NBITS 4
+#define REG_PAIR_NBITS 4
#endif
-#define REG_PAIR_NMASK ((1<<REG_PAIR_NBITS)-1)
+#define REG_PAIR_NMASK ((1 << REG_PAIR_NBITS) - 1)
#ifdef DEBUG
// Under DEBUG, we want to make sure that code doesn't accidentally confuse a reg pair value
// with a simple register number. Thus, we offset the reg pair numbers so they are distinct
// from all register numbers. Note that this increases the minimum size of a regPairNoSmall
// type due to the additional bits used for this offset.
-#define REG_PAIR_FIRST (7 << REG_PAIR_NBITS)
-#define REG_PAIR_NBITS_DEBUG (REG_PAIR_NBITS + 3) // extra bits needed by the debug shifting (3 instead of 0 because we shift "7", not "1", above).
-C_ASSERT(REG_COUNT < REG_PAIR_FIRST); // make sure the register numbers (including REG_NA, ignoring fp/xmm regs on x86/x64) are distinct from the pair numbers
+#define REG_PAIR_FIRST (7 << REG_PAIR_NBITS)
+#define REG_PAIR_NBITS_DEBUG \
+ (REG_PAIR_NBITS + \
+ 3) // extra bits needed by the debug shifting (3 instead of 0 because we shift "7", not "1", above).
+C_ASSERT(REG_COUNT < REG_PAIR_FIRST); // make sure the register numbers (including REG_NA, ignoring fp/xmm regs on
+ // x86/x64) are distinct from the pair numbers
#else
-#define REG_PAIR_FIRST 0
+#define REG_PAIR_FIRST 0
#endif
DECLARE_TYPED_ENUM(_regPairNo_enum, unsigned)
{
- #define PAIRDEF(rlo,rhi) REG_PAIR_##rlo##rhi = REG_##rlo + (REG_##rhi << REG_PAIR_NBITS) + REG_PAIR_FIRST,
- #include "regpair.h"
+#define PAIRDEF(rlo, rhi) REG_PAIR_##rlo##rhi = REG_##rlo + (REG_##rhi << REG_PAIR_NBITS) + REG_PAIR_FIRST,
+#include "regpair.h"
- REG_PAIR_LAST = (REG_COUNT - 1) + ((REG_COUNT - 1) << REG_PAIR_NBITS) + REG_PAIR_FIRST,
+ REG_PAIR_LAST = (REG_COUNT - 1) + ((REG_COUNT - 1) << REG_PAIR_NBITS) + REG_PAIR_FIRST,
- REG_PAIR_NONE = REG_PAIR_LAST + 1
+ REG_PAIR_NONE = REG_PAIR_LAST + 1
}
END_DECLARE_TYPED_ENUM(_regPairNo_enum, unsigned)
enum regPairMask
{
- #define PAIRDEF(rlo,rhi) RBM_PAIR_##rlo##rhi = (RBM_##rlo|RBM_##rhi),
- #include "regpair.h"
+#define PAIRDEF(rlo, rhi) RBM_PAIR_##rlo##rhi = (RBM_##rlo | RBM_##rhi),
+#include "regpair.h"
};
-
/*****************************************************************************/
// TODO-Cleanup: The types defined below are mildly confusing: why are there both?
@@ -252,36 +244,36 @@ enum regPairMask
// be lost.
#ifdef _TARGET_ARMARCH_
-typedef unsigned __int64 regMaskTP;
+typedef unsigned __int64 regMaskTP;
#else
-typedef unsigned regMaskTP;
+typedef unsigned regMaskTP;
#endif
#if REGMASK_BITS == 8
-typedef unsigned char regMaskSmall;
-#define REG_MASK_INT_FMT "%02X"
-#define REG_MASK_ALL_FMT "%02X"
+typedef unsigned char regMaskSmall;
+#define REG_MASK_INT_FMT "%02X"
+#define REG_MASK_ALL_FMT "%02X"
#elif REGMASK_BITS == 16
-typedef unsigned short regMaskSmall;
-#define REG_MASK_INT_FMT "%04X"
-#define REG_MASK_ALL_FMT "%04X"
+typedef unsigned short regMaskSmall;
+#define REG_MASK_INT_FMT "%04X"
+#define REG_MASK_ALL_FMT "%04X"
#elif REGMASK_BITS == 32
-typedef unsigned regMaskSmall;
-#define REG_MASK_INT_FMT "%08X"
-#define REG_MASK_ALL_FMT "%08X"
+typedef unsigned regMaskSmall;
+#define REG_MASK_INT_FMT "%08X"
+#define REG_MASK_ALL_FMT "%08X"
#else
-typedef unsigned __int64 regMaskSmall;
-#define REG_MASK_INT_FMT "%04llX"
-#define REG_MASK_ALL_FMT "%016llX"
+typedef unsigned __int64 regMaskSmall;
+#define REG_MASK_INT_FMT "%04llX"
+#define REG_MASK_ALL_FMT "%016llX"
#endif
-typedef _regNumber_enum regNumber;
-typedef _regPairNo_enum regPairNo;
+typedef _regNumber_enum regNumber;
+typedef _regPairNo_enum regPairNo;
// LSRA currently converts freely between regNumber and regPairNo, so make sure they are the same size.
C_ASSERT(sizeof(regPairNo) == sizeof(regNumber));
-typedef unsigned char regNumberSmall;
+typedef unsigned char regNumberSmall;
#ifdef DEBUG
@@ -290,35 +282,35 @@ typedef unsigned char regNumberSmall;
#if ((2 * REG_PAIR_NBITS) + REG_PAIR_NBITS_DEBUG) <= 16
C_ASSERT(((2 * REG_PAIR_NBITS) + REG_PAIR_NBITS_DEBUG) > 8); // assert that nobody fits in 8 bits
-typedef unsigned short regPairNoSmall; // x86/x64: need 15 bits
+typedef unsigned short regPairNoSmall; // x86/x64: need 15 bits
#else
C_ASSERT(((2 * REG_PAIR_NBITS) + REG_PAIR_NBITS_DEBUG) <= 32);
-typedef unsigned regPairNoSmall; // arm: need 21 bits
+typedef unsigned regPairNoSmall; // arm: need 21 bits
#endif
#else // DEBUG
#if (2 * REG_PAIR_NBITS) <= 8
-typedef unsigned char regPairNoSmall; // x86/x64: need 8 bits
+typedef unsigned char regPairNoSmall; // x86/x64: need 8 bits
#else
-C_ASSERT((2 * REG_PAIR_NBITS) <= 16); // assert that nobody needs more than 16 bits
-typedef unsigned short regPairNoSmall; // arm: need 12 bits
+C_ASSERT((2 * REG_PAIR_NBITS) <= 16); // assert that nobody needs more than 16 bits
+typedef unsigned short regPairNoSmall; // arm: need 12 bits
#endif
#endif // DEBUG
/*****************************************************************************/
-#define LEA_AVAILABLE 1
-#define SCALED_ADDR_MODES 1
+#define LEA_AVAILABLE 1
+#define SCALED_ADDR_MODES 1
/*****************************************************************************/
-#ifdef DEBUG
-#define DSP_SRC_OPER_LEFT 0
-#define DSP_SRC_OPER_RIGHT 1
-#define DSP_DST_OPER_LEFT 1
-#define DSP_DST_OPER_RIGHT 0
+#ifdef DEBUG
+#define DSP_SRC_OPER_LEFT 0
+#define DSP_SRC_OPER_RIGHT 1
+#define DSP_DST_OPER_LEFT 1
+#define DSP_DST_OPER_RIGHT 0
#endif
/*****************************************************************************/
@@ -1846,11 +1838,15 @@ C_ASSERT((FEATURE_TAILCALL_OPT == 0) || (FEATURE_FASTTAILCALL == 1));
class Target
{
public:
- static const char * g_tgtCPUName;
- static const char * g_tgtPlatformName;
+ static const char* g_tgtCPUName;
+ static const char* g_tgtPlatformName;
- enum ArgOrder { ARG_ORDER_R2L, ARG_ORDER_L2R };
- static const enum ArgOrder g_tgtArgOrder;
+ enum ArgOrder
+ {
+ ARG_ORDER_R2L,
+ ARG_ORDER_L2R
+ };
+ static const enum ArgOrder g_tgtArgOrder;
#if NOGC_WRITE_BARRIERS
static regMaskTP exclude_WriteBarrierReg(regMaskTP mask)
@@ -1865,46 +1861,52 @@ public:
};
#if defined(DEBUG) || defined(LATE_DISASM)
- const char * getRegName(unsigned reg, bool isFloat = false); // this is for gcencode.cpp and disasm.cpp that don't use the regNumber type
- const char * getRegName(regNumber reg, bool isFloat = false);
+const char* getRegName(unsigned reg, bool isFloat = false); // this is for gcencode.cpp and disasm.cpp that don't use
+ // the regNumber type
+const char* getRegName(regNumber reg, bool isFloat = false);
#endif // defined(DEBUG) || defined(LATE_DISASM)
#ifdef DEBUG
- const char * getRegNameFloat(regNumber reg, var_types type);
- extern void dspRegMask(regMaskTP regMask, size_t minSiz = 0);
+const char* getRegNameFloat(regNumber reg, var_types type);
+extern void dspRegMask(regMaskTP regMask, size_t minSiz = 0);
#endif
#if CPU_HAS_BYTE_REGS
- inline BOOL isByteReg(regNumber reg) { return (reg <= REG_EBX); }
+inline BOOL isByteReg(regNumber reg)
+{
+ return (reg <= REG_EBX);
+}
#else
- inline BOOL isByteReg(regNumber reg) { return true; }
+inline BOOL isByteReg(regNumber reg)
+{
+ return true;
+}
#endif
#ifdef LEGACY_BACKEND
-extern const regNumber raRegTmpOrder[REG_TMP_ORDER_COUNT];
-extern const regNumber rpRegTmpOrder[REG_TMP_ORDER_COUNT];
+extern const regNumber raRegTmpOrder[REG_TMP_ORDER_COUNT];
+extern const regNumber rpRegTmpOrder[REG_TMP_ORDER_COUNT];
#if FEATURE_FP_REGALLOC
-extern const regNumber raRegFltTmpOrder[REG_FLT_TMP_ORDER_COUNT];
+extern const regNumber raRegFltTmpOrder[REG_FLT_TMP_ORDER_COUNT];
#endif
#endif // LEGACY_BACKEND
-inline regMaskTP genRegMask(regNumber reg);
-inline regMaskTP genRegMaskFloat(regNumber reg, var_types type = TYP_DOUBLE);
-
+inline regMaskTP genRegMask(regNumber reg);
+inline regMaskTP genRegMaskFloat(regNumber reg, var_types type = TYP_DOUBLE);
/*****************************************************************************
* Return true if the register number is valid
*/
-inline bool genIsValidReg(regNumber reg)
+inline bool genIsValidReg(regNumber reg)
{
/* It's safest to perform an unsigned comparison in case reg is negative */
- return ((unsigned) reg < (unsigned) REG_COUNT);
+ return ((unsigned)reg < (unsigned)REG_COUNT);
}
/*****************************************************************************
* Return true if the register is a valid integer register
*/
-inline bool genIsValidIntReg(regNumber reg)
+inline bool genIsValidIntReg(regNumber reg)
{
return reg >= REG_INT_FIRST && reg <= REG_INT_LAST;
}
@@ -1912,7 +1914,7 @@ inline bool genIsValidIntReg(regNumber reg)
/*****************************************************************************
* Return true if the register is a valid floating point register
*/
-inline bool genIsValidFloatReg(regNumber reg)
+inline bool genIsValidFloatReg(regNumber reg)
{
return reg >= REG_FP_FIRST && reg <= REG_FP_LAST;
}
@@ -1922,7 +1924,7 @@ inline bool genIsValidFloatReg(regNumber reg)
/*****************************************************************************
* Return true if the register is a valid floating point double register
*/
-inline bool genIsValidDoubleReg(regNumber reg)
+inline bool genIsValidDoubleReg(regNumber reg)
{
return genIsValidFloatReg(reg) && (((reg - REG_FP_FIRST) & 0x1) == 0);
}
@@ -1930,10 +1932,10 @@ inline bool genIsValidDoubleReg(regNumber reg)
#endif // defined(LEGACY_BACKEND) && defined(_TARGET_ARM_)
//-------------------------------------------------------------------------------------------
-// hasFixedRetBuffReg:
+// hasFixedRetBuffReg:
// Returns true if our target architecture uses a fixed return buffer register
//
-inline bool hasFixedRetBuffReg()
+inline bool hasFixedRetBuffReg()
{
#ifdef _TARGET_ARM64_
return true;
@@ -1943,12 +1945,12 @@ inline bool hasFixedRetBuffReg()
}
//-------------------------------------------------------------------------------------------
-// theFixedRetBuffReg:
-// Returns the regNumber to use for the fixed return buffer
-//
-inline regNumber theFixedRetBuffReg()
+// theFixedRetBuffReg:
+// Returns the regNumber to use for the fixed return buffer
+//
+inline regNumber theFixedRetBuffReg()
{
- assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
+ assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
#ifdef _TARGET_ARM64_
return REG_ARG_RET_BUFF;
#else
@@ -1957,12 +1959,12 @@ inline regNumber theFixedRetBuffReg()
}
//-------------------------------------------------------------------------------------------
-// theFixedRetBuffMask:
-// Returns the regNumber to use for the fixed return buffer
-//
-inline regMaskTP theFixedRetBuffMask()
+// theFixedRetBuffMask:
+// Returns the regNumber to use for the fixed return buffer
+//
+inline regMaskTP theFixedRetBuffMask()
{
- assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
+ assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
#ifdef _TARGET_ARM64_
return RBM_ARG_RET_BUFF;
#else
@@ -1971,12 +1973,12 @@ inline regMaskTP theFixedRetBuffMask()
}
//-------------------------------------------------------------------------------------------
-// theFixedRetBuffArgNum:
-// Returns the argNum to use for the fixed return buffer
-//
-inline unsigned theFixedRetBuffArgNum()
+// theFixedRetBuffArgNum:
+// Returns the argNum to use for the fixed return buffer
+//
+inline unsigned theFixedRetBuffArgNum()
{
- assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
+ assert(hasFixedRetBuffReg()); // This predicate should be checked before calling this method
#ifdef _TARGET_ARM64_
return RET_BUFF_ARGNUM;
#else
@@ -1985,12 +1987,12 @@ inline unsigned theFixedRetBuffArgNum()
}
//-------------------------------------------------------------------------------------------
-// fullIntArgRegMask:
+// fullIntArgRegMask:
// Returns the full mask of all possible integer registers
-// Note this includes the fixed return buffer register on Arm64
+// Note this includes the fixed return buffer register on Arm64
//
-inline regMaskTP fullIntArgRegMask()
-{
+inline regMaskTP fullIntArgRegMask()
+{
if (hasFixedRetBuffReg())
{
return RBM_ARG_REGS | theFixedRetBuffMask();
@@ -1998,31 +2000,31 @@ inline regMaskTP fullIntArgRegMask()
else
{
return RBM_ARG_REGS;
- }
+ }
}
//-------------------------------------------------------------------------------------------
-// isValidIntArgReg:
-// Returns true if the register is a valid integer argument register
-// Note this method also returns true on Arm64 when 'reg' is the RetBuff register
+// isValidIntArgReg:
+// Returns true if the register is a valid integer argument register
+// Note this method also returns true on Arm64 when 'reg' is the RetBuff register
//
-inline bool isValidIntArgReg(regNumber reg)
+inline bool isValidIntArgReg(regNumber reg)
{
return (genRegMask(reg) & fullIntArgRegMask()) != 0;
}
//-------------------------------------------------------------------------------------------
// genRegArgNext:
-// Given a register that is an integer or floating point argument register
-// returns the next argument register
+// Given a register that is an integer or floating point argument register
+// returns the next argument register
//
-regNumber genRegArgNext(regNumber argReg);
+regNumber genRegArgNext(regNumber argReg);
//-------------------------------------------------------------------------------------------
// isValidFloatArgReg:
-// Returns true if the register is a valid floating-point argument register
+// Returns true if the register is a valid floating-point argument register
//
-inline bool isValidFloatArgReg(regNumber reg)
+inline bool isValidFloatArgReg(regNumber reg)
{
if (reg == REG_NA)
{
@@ -2061,7 +2063,10 @@ inline bool floatRegCanHoldType(regNumber reg, var_types type)
// AMD64: xmm registers can hold any float type
// x86: FP stack can hold any float type
// ARM64: Floating-point/SIMD registers can hold any type.
-inline bool floatRegCanHoldType(regNumber reg, var_types type) { return true; }
+inline bool floatRegCanHoldType(regNumber reg, var_types type)
+{
+ return true;
+}
#endif
/*****************************************************************************
@@ -2069,9 +2074,9 @@ inline bool floatRegCanHoldType(regNumber reg, var_types type) { return true; }
* Map a register number to a register mask.
*/
-extern const regMaskSmall regMasks[REG_COUNT];
+extern const regMaskSmall regMasks[REG_COUNT];
-inline regMaskTP genRegMask(regNumber reg)
+inline regMaskTP genRegMask(regNumber reg)
{
assert((unsigned)reg < ArrLen(regMasks));
#ifdef _TARGET_AMD64_
@@ -2093,10 +2098,10 @@ inline regMaskTP genRegMask(regNumber reg)
*/
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
-extern const regMaskSmall regFPMasks[REG_FPCOUNT];
+extern const regMaskSmall regFPMasks[REG_FPCOUNT];
#endif // defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
-inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBLE */)
+inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBLE */)
{
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
assert(reg >= REG_FPV0 && reg < REG_FPCOUNT);
@@ -2110,23 +2115,23 @@ inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBL
assert(floatRegCanHoldType(reg, type));
assert(reg >= REG_F0 && reg <= REG_F31);
- if (type==TYP_DOUBLE)
+ if (type == TYP_DOUBLE)
{
- return regMasks[reg] | regMasks[reg+1];
+ return regMasks[reg] | regMasks[reg + 1];
}
else
{
return regMasks[reg];
}
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
}
-//------------------------------------------------------------------------
+//------------------------------------------------------------------------
// genRegMask: Given a register, and its type, generate the appropriate regMask
-//
-// Arguments:
+//
+// Arguments:
// regNum - the register of interest
// type - the type of regNum (i.e. the type it is being used as)
//
@@ -2142,11 +2147,11 @@ inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBL
// For registers that are used in pairs, the caller will be handling
// each member of the pair separately.
//
-inline regMaskTP genRegMask(regNumber regNum, var_types type)
+inline regMaskTP genRegMask(regNumber regNum, var_types type)
{
#ifndef _TARGET_ARM_
return genRegMask(regNum);
-#else
+#else
regMaskTP regMask = RBM_NONE;
if (varTypeIsFloating(type))
@@ -2163,39 +2168,35 @@ inline regMaskTP genRegMask(regNumber regNum, var_types type)
/*****************************************************************************
*
- * These arrays list the callee-saved register numbers (and bitmaps, respectively) for
+ * These arrays list the callee-saved register numbers (and bitmaps, respectively) for
* the current architecture.
*/
-extern const regNumber raRegCalleeSaveOrder[CNT_CALLEE_SAVED];
-extern const regMaskTP raRbmCalleeSaveOrder[CNT_CALLEE_SAVED];
+extern const regNumber raRegCalleeSaveOrder[CNT_CALLEE_SAVED];
+extern const regMaskTP raRbmCalleeSaveOrder[CNT_CALLEE_SAVED];
// This method takes a "compact" bitset of the callee-saved registers, and "expands" it to a full register mask.
-regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short);
-
+regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short);
/*****************************************************************************
*
* Returns the register that holds the low 32 bits of the long value given
* by the register pair 'regPair'.
*/
-inline regNumber genRegPairLo(regPairNo regPair)
+inline regNumber genRegPairLo(regPairNo regPair)
{
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
- return (regNumber)((regPair - REG_PAIR_FIRST) & REG_PAIR_NMASK);
+ return (regNumber)((regPair - REG_PAIR_FIRST) & REG_PAIR_NMASK);
}
-
/*****************************************************************************
*
* Returns the register that holds the high 32 bits of the long value given
* by the register pair 'regPair'.
*/
-inline regNumber genRegPairHi(regPairNo regPair)
+inline regNumber genRegPairHi(regPairNo regPair)
{
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
return (regNumber)(((regPair - REG_PAIR_FIRST) >> REG_PAIR_NBITS) & REG_PAIR_NMASK);
}
@@ -2207,8 +2208,7 @@ inline regNumber genRegPairHi(regPairNo regPair)
*
* In debug it also asserts that reg1 and reg2 are not the same.
*/
-bool genIsProperRegPair(regPairNo regPair);
-
+bool genIsProperRegPair(regPairNo regPair);
/*****************************************************************************
*
@@ -2228,12 +2228,10 @@ inline regPairNo gen2regs2pair(regNumber regLo, regNumber regHi)
return regPair;
}
-
/*****************************************************************************/
-inline regMaskTP genRegPairMask(regPairNo regPair)
+inline regMaskTP genRegPairMask(regPairNo regPair)
{
- assert(regPair >= REG_PAIR_FIRST &&
- regPair <= REG_PAIR_LAST);
+ assert(regPair >= REG_PAIR_FIRST && regPair <= REG_PAIR_LAST);
return genRegMask(genRegPairLo(regPair)) | genRegMask(genRegPairHi(regPair));
}
@@ -2244,7 +2242,7 @@ inline regMaskTP genRegPairMask(regPairNo regPair)
* of this type, else REG_NA if there are no more.
*/
-inline regNumber regNextOfType(regNumber reg, var_types type)
+inline regNumber regNextOfType(regNumber reg, var_types type)
{
regNumber regReturn;
@@ -2266,42 +2264,43 @@ inline regNumber regNextOfType(regNumber reg, var_types type)
if (varTypeIsFloating(type))
{
if (regReturn > REG_FP_LAST)
+ {
regReturn = REG_NA;
+ }
}
else
{
if (regReturn > REG_INT_LAST)
+ {
regReturn = REG_NA;
+ }
}
return regReturn;
}
-
/*****************************************************************************
*
* Type checks
*/
-inline
-bool isRegPairType(int /* s/b "var_types" */ type)
+inline bool isRegPairType(int /* s/b "var_types" */ type)
{
#ifdef _TARGET_64BIT_
return false;
#elif CPU_HAS_FP_SUPPORT
- return type == TYP_LONG;
+ return type == TYP_LONG;
#else
- return type == TYP_LONG || type == TYP_DOUBLE;
+ return type == TYP_LONG || type == TYP_DOUBLE;
#endif
}
-inline
-bool isFloatRegType(int /* s/b "var_types" */ type)
+inline bool isFloatRegType(int /* s/b "var_types" */ type)
{
#if CPU_HAS_FP_SUPPORT
- return type == TYP_DOUBLE || type == TYP_FLOAT;
+ return type == TYP_DOUBLE || type == TYP_FLOAT;
#else
- return false;
+ return false;
#endif
}
@@ -2326,5 +2325,5 @@ C_ASSERT((RBM_INT_CALLEE_SAVED & RBM_FPBASE) == RBM_NONE);
/*****************************************************************************/
/*****************************************************************************/
-#endif // _TARGET_H_
+#endif // _TARGET_H_
/*****************************************************************************/
diff --git a/src/jit/targetamd64.cpp b/src/jit/targetamd64.cpp
index f8413257bc..0cb302ae34 100644
--- a/src/jit/targetamd64.cpp
+++ b/src/jit/targetamd64.cpp
@@ -13,7 +13,7 @@
#include "target.h"
-const char * Target::g_tgtCPUName = "x64";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "x64";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
#endif // _TARGET_AMD64_
diff --git a/src/jit/targetarm.cpp b/src/jit/targetarm.cpp
index 86b494698d..f0ea5ca534 100644
--- a/src/jit/targetarm.cpp
+++ b/src/jit/targetarm.cpp
@@ -13,7 +13,7 @@
#include "target.h"
-const char * Target::g_tgtCPUName = "arm";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "arm";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
#endif // _TARGET_ARM_
diff --git a/src/jit/targetarm64.cpp b/src/jit/targetarm64.cpp
index 738039ea30..2acbe1a050 100644
--- a/src/jit/targetarm64.cpp
+++ b/src/jit/targetarm64.cpp
@@ -13,7 +13,7 @@
#include "target.h"
-const char * Target::g_tgtCPUName = "arm64";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "arm64";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
#endif // _TARGET_ARM64_
diff --git a/src/jit/targetx86.cpp b/src/jit/targetx86.cpp
index 5f2f1391ff..500f4e0651 100644
--- a/src/jit/targetx86.cpp
+++ b/src/jit/targetx86.cpp
@@ -13,7 +13,7 @@
#include "target.h"
-const char * Target::g_tgtCPUName = "x86";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_L2R;
+const char* Target::g_tgtCPUName = "x86";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_L2R;
#endif // _TARGET_X86_
diff --git a/src/jit/tinyarray.h b/src/jit/tinyarray.h
index 33f1ba4886..17d7e044b2 100644
--- a/src/jit/tinyarray.h
+++ b/src/jit/tinyarray.h
@@ -11,13 +11,13 @@
// storagetype is the type (integral) which your array is going to be packed into
// itemtype is the type of array elements
// bits_per_element is size of the elements in bits
-template<class storageType, class itemType, int bits_per_element>
+template <class storageType, class itemType, int bits_per_element>
class TinyArray
{
public:
// operator[] returns a 'ref' (usually a ref to the element type)
- // This presents a problem if you wanted to implement something like a
- // bitvector via this packed array, because you cannot make a ref to
+ // This presents a problem if you wanted to implement something like a
+ // bitvector via this packed array, because you cannot make a ref to
// the element type.
// The trick is you define something that acts like a ref (TinyArrayRef in this case)
// which for our purposes means you can assign to and from it and our chosen
@@ -26,19 +26,19 @@ public:
{
public:
// this is really the getter for the array.
- operator itemType()
+ operator itemType()
{
- storageType mask = ((1 << bits_per_element) - 1);
- int shift = bits_per_element * index;
+ storageType mask = ((1 << bits_per_element) - 1);
+ int shift = bits_per_element * index;
itemType result = (itemType)((*data >> shift) & mask);
return result;
}
- void operator =(const itemType b)
+ void operator=(const itemType b)
{
storageType mask = ((1 << bits_per_element) - 1);
- assert(itemType(b&mask) == b);
+ assert(itemType(b & mask) == b);
mask <<= bits_per_element * index;
@@ -46,28 +46,32 @@ public:
*data |= b << (bits_per_element * index);
}
friend class TinyArray;
+
protected:
- TinyArrayRef(storageType *d, int idx) : data(d), index(idx) {}
+ TinyArrayRef(storageType* d, int idx) : data(d), index(idx)
+ {
+ }
- storageType *data;
- int index;
-
+ storageType* data;
+ int index;
};
-
storageType data;
- void clear() { data = 0; }
+ void clear()
+ {
+ data = 0;
+ }
- TinyArrayRef operator [](unsigned int n)
+ TinyArrayRef operator[](unsigned int n)
{
- assert((n+1) * bits_per_element <= sizeof(itemType) * 8);
+ assert((n + 1) * bits_per_element <= sizeof(itemType) * 8);
return TinyArrayRef(&data, n);
}
- // only use this for clearing it
- void operator=(void *rhs)
+ // only use this for clearing it
+ void operator=(void* rhs)
{
- assert(rhs==NULL);
+ assert(rhs == NULL);
data = 0;
}
};
diff --git a/src/jit/titypes.h b/src/jit/titypes.h
index 46fe96b87b..a659320709 100644
--- a/src/jit/titypes.h
+++ b/src/jit/titypes.h
@@ -2,15 +2,14 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
-DEF_TI(TI_ERROR, "<ERROR>")
-DEF_TI(TI_REF, "Ref")
+DEF_TI(TI_ERROR, "<ERROR>")
+DEF_TI(TI_REF, "Ref")
DEF_TI(TI_STRUCT, "Struct")
DEF_TI(TI_METHOD, "Method")
-DEF_TI(TI_BYTE, "Byte")
-DEF_TI(TI_SHORT, "Short")
-DEF_TI(TI_INT, "Int")
-DEF_TI(TI_LONG, "Long")
-DEF_TI(TI_FLOAT, "Float")
+DEF_TI(TI_BYTE, "Byte")
+DEF_TI(TI_SHORT, "Short")
+DEF_TI(TI_INT, "Int")
+DEF_TI(TI_LONG, "Long")
+DEF_TI(TI_FLOAT, "Float")
DEF_TI(TI_DOUBLE, "Double")
-DEF_TI(TI_NULL, "Null")
+DEF_TI(TI_NULL, "Null")
diff --git a/src/jit/typeinfo.cpp b/src/jit/typeinfo.cpp
index 097ad10cdf..51429cca38 100644
--- a/src/jit/typeinfo.cpp
+++ b/src/jit/typeinfo.cpp
@@ -19,13 +19,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "_typeinfo.h"
-BOOL Compiler::tiCompatibleWith(const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack) const
+BOOL Compiler::tiCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
#ifdef DEBUG
#if VERBOSE_VERIFY
- if(VERBOSE && tiVerificationNeeded)
+ if (VERBOSE && tiVerificationNeeded)
{
printf("\n");
printf(TI_DUMP_PADDING);
@@ -37,14 +35,11 @@ BOOL Compiler::tiCompatibleWith(const typeInfo& child,
#endif // VERBOSE_VERIFY
#endif // DEBUG
- BOOL compatible = typeInfo::tiCompatibleWith(info.compCompHnd,
- child,
- parent,
- normalisedForStack);
+ BOOL compatible = typeInfo::tiCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
#ifdef DEBUG
#if VERBOSE_VERIFY
- if(VERBOSE && tiVerificationNeeded)
+ if (VERBOSE && tiVerificationNeeded)
{
printf(compatible ? " [YES]" : " [NO]");
}
@@ -54,23 +49,19 @@ BOOL Compiler::tiCompatibleWith(const typeInfo& child,
return compatible;
}
-
-BOOL Compiler::tiMergeCompatibleWith(const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack) const
+BOOL Compiler::tiMergeCompatibleWith(const typeInfo& child, const typeInfo& parent, bool normalisedForStack) const
{
return typeInfo::tiMergeCompatibleWith(info.compCompHnd, child, parent, normalisedForStack);
}
-BOOL Compiler::tiMergeToCommonParent(typeInfo *pDest,
- const typeInfo *pSrc,
- bool* changed) const
+BOOL Compiler::tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const
{
#ifdef DEBUG
#if VERBOSE_VERIFY
- if(VERBOSE && tiVerificationNeeded)
+ if (VERBOSE && tiVerificationNeeded)
{
- printf("\n"); printf(TI_DUMP_PADDING);
+ printf("\n");
+ printf(TI_DUMP_PADDING);
printf("Attempting to merge types: ");
pDest->Dump();
printf(" and ");
@@ -84,11 +75,11 @@ BOOL Compiler::tiMergeToCommonParent(typeInfo *pDest,
#ifdef DEBUG
#if VERBOSE_VERIFY
- if(VERBOSE && tiVerificationNeeded)
+ if (VERBOSE && tiVerificationNeeded)
{
printf(TI_DUMP_PADDING);
printf((mergeable == TRUE) ? "Merge successful" : "Couldn't merge types");
- if(*changed)
+ if (*changed)
{
assert(mergeable);
printf(", destination type changed to: ");
@@ -102,23 +93,28 @@ BOOL Compiler::tiMergeToCommonParent(typeInfo *pDest,
return mergeable;
}
-
static BOOL tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent)
{
assert(parent.IsByRef());
if (!child.IsByRef())
+ {
return FALSE;
+ }
if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef())
+ {
return FALSE;
+ }
// Byrefs are compatible if the underlying types are equivalent
- typeInfo childTarget = ::DereferenceByRef(child);
+ typeInfo childTarget = ::DereferenceByRef(child);
typeInfo parentTarget = ::DereferenceByRef(parent);
if (typeInfo::AreEquivalent(childTarget, parentTarget))
+ {
return TRUE;
+ }
// Make sure that both types have a valid m_cls
if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) &&
@@ -130,19 +126,18 @@ static BOOL tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, co
return FALSE;
}
-
/*****************************************************************************
- * Verify child is compatible with the template parent. Basically, that
- * child is a "subclass" of parent -it can be substituted for parent
+ * Verify child is compatible with the template parent. Basically, that
+ * child is a "subclass" of parent -it can be substituted for parent
* anywhere. Note that if parent contains fancy flags, such as "uninitialized"
- * , "is this ptr", or "has byref local/field" info, then child must also
+ * , "is this ptr", or "has byref local/field" info, then child must also
* contain those flags, otherwise FALSE will be returned !
*
* Rules for determining compatibility:
*
- * If parent is a primitive type or value class, then child must be the
- * same primitive type or value class. The exception is that the built in
- * value classes System/Boolean etc. are treated as synonyms for
+ * If parent is a primitive type or value class, then child must be the
+ * same primitive type or value class. The exception is that the built in
+ * value classes System/Boolean etc. are treated as synonyms for
* TI_BYTE etc.
*
* If parent is a byref of a primitive type or value class, then child
@@ -150,29 +145,27 @@ static BOOL tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, co
*
* Byrefs are compatible only with byrefs.
*
- * If parent is an object, child must be a subclass of it, implement it
+ * If parent is an object, child must be a subclass of it, implement it
* (if it is an interface), or be null.
*
* If parent is an array, child must be the same or subclassed array.
*
* If parent is a null objref, only null is compatible with it.
*
- * If the "uninitialized", "by ref local/field", "this pointer" or other flags
+ * If the "uninitialized", "by ref local/field", "this pointer" or other flags
* are different, the items are incompatible.
*
* parent CANNOT be an undefined (dead) item.
*
*/
-BOOL typeInfo::tiCompatibleWith (COMP_HANDLE CompHnd,
- const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack)
+BOOL typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd,
+ const typeInfo& child,
+ const typeInfo& parent,
+ bool normalisedForStack)
{
- assert(child.IsDead() || !normalisedForStack ||
- typeInfo::AreEquivalent(::NormaliseForStack(child), child));
- assert(parent.IsDead() || !normalisedForStack ||
- typeInfo::AreEquivalent(::NormaliseForStack(parent), parent));
+ assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child));
+ assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent));
if (typeInfo::AreEquivalent(child, parent))
{
@@ -181,33 +174,43 @@ BOOL typeInfo::tiCompatibleWith (COMP_HANDLE CompHnd,
if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar())
{
- return (FALSE); // need to have had child == parent
+ return (FALSE); // need to have had child == parent
}
else if (parent.IsType(TI_REF))
{
// An uninitialized objRef is not compatible to initialized.
if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef())
+ {
return FALSE;
+ }
- if (child.IsNullObjRef()) // NULL can be any reference type
+ if (child.IsNullObjRef())
+ { // NULL can be any reference type
return TRUE;
+ }
if (!child.IsType(TI_REF))
+ {
return FALSE;
+ }
return CompHnd->canCast(child.m_cls, parent.m_cls);
}
else if (parent.IsType(TI_METHOD))
{
if (!child.IsType(TI_METHOD))
+ {
return FALSE;
+ }
- // Right now we don't bother merging method handles
+ // Right now we don't bother merging method handles
return FALSE;
}
else if (parent.IsType(TI_STRUCT))
{
if (!child.IsType(TI_STRUCT))
+ {
return FALSE;
+ }
// Structures are compatible if they are equivalent
return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls);
@@ -218,7 +221,7 @@ BOOL typeInfo::tiCompatibleWith (COMP_HANDLE CompHnd,
}
#ifdef _TARGET_64BIT_
// On 64-bit targets we have precise representation for native int, so these rules
- // represent the fact that the ECMA spec permits the implicit conversion
+ // represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child))
{
@@ -232,10 +235,10 @@ BOOL typeInfo::tiCompatibleWith (COMP_HANDLE CompHnd,
return FALSE;
}
-BOOL typeInfo::tiMergeCompatibleWith (COMP_HANDLE CompHnd,
- const typeInfo& child,
- const typeInfo& parent,
- bool normalisedForStack)
+BOOL typeInfo::tiMergeCompatibleWith(COMP_HANDLE CompHnd,
+ const typeInfo& child,
+ const typeInfo& parent,
+ bool normalisedForStack)
{
if (!child.IsPermanentHomeByRef() && parent.IsPermanentHomeByRef())
{
@@ -245,7 +248,6 @@ BOOL typeInfo::tiMergeCompatibleWith (COMP_HANDLE CompHnd,
return typeInfo::tiCompatibleWith(CompHnd, child, parent, normalisedForStack);
}
-
/*****************************************************************************
* Merge pDest and pSrc to find some commonality (e.g. a common parent).
* Copy the result to pDest, marking it dead if no commonality can be found.
@@ -274,14 +276,12 @@ BOOL typeInfo::tiMergeCompatibleWith (COMP_HANDLE CompHnd,
*
* Also, System/Int32 and I4 merge -> I4, etc.
*
- * Returns FALSE if the merge was completely incompatible (i.e. the item became
+ * Returns FALSE if the merge was completely incompatible (i.e. the item became
* dead).
*
*/
-BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo *pDest,
- const typeInfo *pSrc,
- bool* changed)
+BOOL typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed)
{
assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc));
assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest));
@@ -289,7 +289,7 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
// Merge the auxiliary information like "this" pointer tracking, etc...
// Remember the pre-state, so we can tell if it changed.
- *changed = false;
+ *changed = false;
DWORD destFlagsBefore = pDest->m_flags;
// This bit is only set if both pDest and pSrc have it set
@@ -304,11 +304,16 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
// If the byref wasn't permanent home in both sides, then merge won't have the bit set
pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME);
- if (pDest->m_flags != destFlagsBefore) *changed = true;
+ if (pDest->m_flags != destFlagsBefore)
+ {
+ *changed = true;
+ }
// OK the main event. Merge the main types
if (typeInfo::AreEquivalent(*pDest, *pSrc))
- return(TRUE);
+ {
+ return (TRUE);
+ }
if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar())
{
@@ -317,22 +322,30 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
}
if (pDest->IsType(TI_REF))
{
- if (pSrc->IsType(TI_NULL)) // NULL can be any reference type
+ if (pSrc->IsType(TI_NULL))
+ { // NULL can be any reference type
return TRUE;
+ }
if (!pSrc->IsType(TI_REF))
+ {
goto FAIL;
+ }
// Ask the EE to find the common parent, This always succeeds since System.Object always works
CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls;
- pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle());
- if (pDestClsBefore != pDest->m_cls) *changed = true;
+ pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle());
+ if (pDestClsBefore != pDest->m_cls)
+ {
+ *changed = true;
+ }
return TRUE;
}
else if (pDest->IsType(TI_NULL))
{
- if (pSrc->IsType(TI_REF)) // NULL can be any reference type
+ if (pSrc->IsType(TI_REF)) // NULL can be any reference type
{
- *pDest = *pSrc; *changed = true;
+ *pDest = *pSrc;
+ *changed = true;
return TRUE;
}
goto FAIL;
@@ -340,7 +353,9 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
else if (pDest->IsType(TI_STRUCT))
{
if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle()))
+ {
return TRUE;
+ }
goto FAIL;
}
else if (pDest->IsByRef())
@@ -349,7 +364,7 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
}
#ifdef _TARGET_64BIT_
// On 64-bit targets we have precise representation for native int, so these rules
- // represent the fact that the ECMA spec permits the implicit conversion
+ // represent the fact that the ECMA spec permits the implicit conversion
// between an int32 and a native int.
else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT))
{
@@ -357,7 +372,7 @@ BOOL typeInfo::tiMergeToCommonParent (COMP_HANDLE CompHnd, typeInfo
}
else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT))
{
- *pDest = *pSrc;
+ *pDest = *pSrc;
*changed = true;
return TRUE;
}
@@ -375,13 +390,13 @@ void typeInfo::Dump() const
{
char flagsStr[8];
- flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-';
- flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-';
- flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-';
- flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-';
- flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-';
+ flagsStr[0] = ((m_flags & TI_FLAG_UNINIT_OBJREF) != 0) ? 'U' : '-';
+ flagsStr[1] = ((m_flags & TI_FLAG_BYREF) != 0) ? 'B' : '-';
+ flagsStr[2] = ((m_flags & TI_FLAG_BYREF_READONLY) != 0) ? 'R' : '-';
+ flagsStr[3] = ((m_flags & TI_FLAG_NATIVE_INT) != 0) ? 'N' : '-';
+ flagsStr[4] = ((m_flags & TI_FLAG_THIS_PTR) != 0) ? 'T' : '-';
flagsStr[5] = ((m_flags & TI_FLAG_BYREF_PERMANENT_HOME) != 0) ? 'P' : '-';
- flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-';
+ flagsStr[6] = ((m_flags & TI_FLAG_GENERIC_TYPE_VAR) != 0) ? 'G' : '-';
flagsStr[7] = '\0';
printf("[%s(%X) {%s}]", tiType2Str(m_bits.type), m_cls, flagsStr);
diff --git a/src/jit/typelist.h b/src/jit/typelist.h
index 7722dcb1b4..ed5884359d 100644
--- a/src/jit/typelist.h
+++ b/src/jit/typelist.h
@@ -2,17 +2,17 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-#define GCS EA_GCREF
-#define BRS EA_BYREF
-#define PS EA_PTRSIZE
-#define PST (sizeof(void*)/sizeof(int))
+#define GCS EA_GCREF
+#define BRS EA_BYREF
+#define PS EA_PTRSIZE
+#define PST (sizeof(void*) / sizeof(int))
#ifdef _TARGET_64BIT_
-# define VTF_I32 0
-# define VTF_I64 VTF_I
+#define VTF_I32 0
+#define VTF_I64 VTF_I
#else
-# define VTF_I32 VTF_I
-# define VTF_I64 0
+#define VTF_I32 VTF_I
+#define VTF_I64 0
#endif
/* tn - TYP_name
@@ -73,9 +73,9 @@ DEF_TP(SIMD32 ,"simd32" , TYP_SIMD32, TI_STRUCT,32,32, 32, 8,16, VTF_S,
DEF_TP(UNKNOWN ,"unknown" ,TYP_UNKNOWN, TI_ERROR, 0, 0, 0, 0, 0, VTF_ANY, 0 )
// clang-format on
-#undef GCS
-#undef BRS
-#undef PS
-#undef PST
-#undef VTF_I32
-#undef VTF_I64
+#undef GCS
+#undef BRS
+#undef PS
+#undef PST
+#undef VTF_I32
+#undef VTF_I64
diff --git a/src/jit/unwind.cpp b/src/jit/unwind.cpp
index cf4945c14e..4568fed75a 100644
--- a/src/jit/unwind.cpp
+++ b/src/jit/unwind.cpp
@@ -49,7 +49,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// A start location of nullptr means the beginning of the code.
// An end location of nullptr means the end of the code.
//
-void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc)
+void Compiler::unwindGetFuncLocations(FuncInfoDsc* func,
+ bool getHotSectionData,
+ /* OUT */ emitLocation** ppStartLoc,
+ /* OUT */ emitLocation** ppEndLoc)
{
if (func->funKind == FUNC_ROOT)
{
@@ -59,7 +62,8 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData,
if (getHotSectionData)
{
- *ppStartLoc = nullptr; // nullptr emit location means the beginning of the code. This is to handle the first fragment prolog.
+ *ppStartLoc = nullptr; // nullptr emit location means the beginning of the code. This is to handle the first
+ // fragment prolog.
if (fgFirstColdBlock != nullptr)
{
@@ -86,7 +90,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData,
assert(fgFirstColdBlock != nullptr); // There better be a cold section!
*ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(fgFirstColdBlock));
- *ppEndLoc = nullptr; // nullptr end location means the end of the code
+ *ppEndLoc = nullptr; // nullptr end location means the end of the code
}
}
else
@@ -105,7 +109,9 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData,
{
assert(func->funKind == FUNC_HANDLER);
*ppStartLoc = new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndBeg));
- *ppEndLoc = (HBtab->ebdHndLast->bbNext == nullptr) ? nullptr : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->bbNext));
+ *ppEndLoc = (HBtab->ebdHndLast->bbNext == nullptr)
+ ? nullptr
+ : new (this, CMK_UnwindInfo) emitLocation(ehEmitCookie(HBtab->ebdHndLast->bbNext));
}
}
}
@@ -127,16 +133,36 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData,
#elif defined(_TARGET_X86_)
// Stub routines that do nothing
-void Compiler::unwindBegProlog() { }
-void Compiler::unwindEndProlog() { }
-void Compiler::unwindBegEpilog() { }
-void Compiler::unwindEndEpilog() { }
-void Compiler::unwindReserve() { }
-void Compiler::unwindEmit(void* pHotCode, void* pColdCode) { }
-void Compiler::unwindPush(regNumber reg) { }
-void Compiler::unwindAllocStack(unsigned size) { }
-void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) { }
-void Compiler::unwindSaveReg(regNumber reg, unsigned offset) { }
+void Compiler::unwindBegProlog()
+{
+}
+void Compiler::unwindEndProlog()
+{
+}
+void Compiler::unwindBegEpilog()
+{
+}
+void Compiler::unwindEndEpilog()
+{
+}
+void Compiler::unwindReserve()
+{
+}
+void Compiler::unwindEmit(void* pHotCode, void* pColdCode)
+{
+}
+void Compiler::unwindPush(regNumber reg)
+{
+}
+void Compiler::unwindAllocStack(unsigned size)
+{
+}
+void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset)
+{
+}
+void Compiler::unwindSaveReg(regNumber reg, unsigned offset)
+{
+}
#else // _TARGET_*
diff --git a/src/jit/unwind.h b/src/jit/unwind.h
index c773193a96..27d23b1b54 100644
--- a/src/jit/unwind.h
+++ b/src/jit/unwind.h
@@ -22,29 +22,29 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if defined(_TARGET_ARM_)
const unsigned MAX_PROLOG_SIZE_BYTES = 40;
const unsigned MAX_EPILOG_SIZE_BYTES = 40;
-#define UWC_END 0xFF // "end" unwind code
-#define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 19)
-#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record
-#define UW_MAX_EPILOG_START_INDEX 0xFFU // Max number that can be encoded in the "Epilog Start Index" field
- // of the .pdata record
+#define UWC_END 0xFF // "end" unwind code
+#define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 19)
+#define UW_MAX_CODE_WORDS_COUNT 15 // Max number that can be encoded in the "Code Words" field of the .pdata record
+#define UW_MAX_EPILOG_START_INDEX 0xFFU // Max number that can be encoded in the "Epilog Start Index" field
+ // of the .pdata record
#elif defined(_TARGET_ARM64_)
const unsigned MAX_PROLOG_SIZE_BYTES = 100;
const unsigned MAX_EPILOG_SIZE_BYTES = 100;
-#define UWC_END 0xE4 // "end" unwind code
-#define UWC_END_C 0xE5 // "end_c" unwind code
-#define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20)
-#define UW_MAX_CODE_WORDS_COUNT 31
-#define UW_MAX_EPILOG_START_INDEX 0x3FFU
+#define UWC_END 0xE4 // "end" unwind code
+#define UWC_END_C 0xE5 // "end_c" unwind code
+#define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20)
+#define UW_MAX_CODE_WORDS_COUNT 31
+#define UW_MAX_EPILOG_START_INDEX 0x3FFU
#endif // _TARGET_ARM64_
-#define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field
- // of the .pdata record
-#define UW_MAX_EXTENDED_CODE_WORDS_COUNT 0xFFU // Max number that can be encoded in the "Extended Code Words"
- // field of the .pdata record
-#define UW_MAX_EXTENDED_EPILOG_COUNT 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count"
- // field of the .pdata record
-#define UW_MAX_EPILOG_START_OFFSET 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset"
- // field of the .pdata record
+#define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field
+ // of the .pdata record
+#define UW_MAX_EXTENDED_CODE_WORDS_COUNT 0xFFU // Max number that can be encoded in the "Extended Code Words"
+ // field of the .pdata record
+#define UW_MAX_EXTENDED_EPILOG_COUNT 0xFFFFU // Max number that can be encoded in the "Extended Epilog Count"
+ // field of the .pdata record
+#define UW_MAX_EPILOG_START_OFFSET 0x3FFFFU // Max number that can be encoded in the "Epilog Start Offset"
+ // field of the .pdata record
//
// Forward declaration of class defined in emit.h
@@ -63,29 +63,33 @@ class UnwindEpilogInfo;
class UnwindFragmentInfo;
class UnwindInfo;
-
// UnwindBase: A base class shared by the the unwind classes that require
// a Compiler* for memory allocation.
class UnwindBase
{
protected:
+ UnwindBase(Compiler* comp) : uwiComp(comp)
+ {
+ }
- UnwindBase(Compiler* comp) : uwiComp(comp) {}
-
- UnwindBase() { }
- ~UnwindBase() { }
+ UnwindBase()
+ {
+ }
+ ~UnwindBase()
+ {
+ }
// TODO: How do we get the ability to access uwiComp without error on Clang?
#if defined(DEBUG) && !defined(__GNUC__)
- template<typename T>
+ template <typename T>
T dspPtr(T p)
{
return uwiComp->dspPtr(p);
}
- template<typename T>
+ template <typename T>
T dspOffset(T o)
{
return uwiComp->dspOffset(o);
@@ -102,29 +106,27 @@ protected:
// Data
//
- Compiler* uwiComp;
+ Compiler* uwiComp;
};
-
// UnwindCodesBase: A base class shared by the the classes used to represent the prolog
// and epilog unwind codes.
class UnwindCodesBase
{
public:
-
// Add a single unwind code.
- virtual void AddCode(BYTE b1) = 0;
- virtual void AddCode(BYTE b1, BYTE b2) = 0;
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0;
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4) = 0;
+ virtual void AddCode(BYTE b1) = 0;
+ virtual void AddCode(BYTE b1, BYTE b2) = 0;
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3) = 0;
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4) = 0;
// Get access to the unwind codes
- virtual BYTE* GetCodes() = 0;
+ virtual BYTE* GetCodes() = 0;
- bool IsEndCode(BYTE b)
+ bool IsEndCode(BYTE b)
{
#if defined(_TARGET_ARM_)
return b >= 0xFD;
@@ -135,13 +137,11 @@ public:
#ifdef DEBUG
- unsigned GetCodeSizeFromUnwindCodes(bool isProlog);
+ unsigned GetCodeSizeFromUnwindCodes(bool isProlog);
#endif // DEBUG
-
};
-
// UnwindPrologCodes: represents the unwind codes for a prolog sequence.
// Prolog unwind codes arrive in reverse order from how they will be emitted.
// Store them as a stack, storing from the end of an array towards the beginning.
@@ -161,7 +161,6 @@ class UnwindPrologCodes : public UnwindBase, public UnwindCodesBase
static const int UPC_LOCAL_COUNT = 24;
public:
-
UnwindPrologCodes(Compiler* comp)
: UnwindBase(comp)
, upcMem(upcMemLocal)
@@ -184,25 +183,25 @@ public:
// Implementation of UnwindCodesBase
//
- virtual void AddCode(BYTE b1)
+ virtual void AddCode(BYTE b1)
{
PushByte(b1);
}
- virtual void AddCode(BYTE b1, BYTE b2)
+ virtual void AddCode(BYTE b1, BYTE b2)
{
PushByte(b2);
PushByte(b1);
}
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3)
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3)
{
PushByte(b3);
PushByte(b2);
PushByte(b1);
}
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
{
PushByte(b4);
PushByte(b3);
@@ -211,22 +210,22 @@ public:
}
// Return a pointer to the first unwind code byte
- virtual BYTE* GetCodes()
+ virtual BYTE* GetCodes()
{
- assert(upcCodeSlot < upcMemSize); // There better be at least one code!
+ assert(upcCodeSlot < upcMemSize); // There better be at least one code!
return &upcMem[upcCodeSlot];
}
///////////////////////////////////////////////////////////////////////////
- BYTE GetByte(int index)
+ BYTE GetByte(int index)
{
assert(upcCodeSlot <= index && index < upcMemSize);
return upcMem[index];
}
// Push a single byte on the unwind code stack
- void PushByte(BYTE b)
+ void PushByte(BYTE b)
{
if (upcCodeSlot == 0)
{
@@ -242,37 +241,40 @@ public:
// Return the size of the unwind codes, in bytes. The size is the exact size, not an aligned size.
// The size includes exactly one "end" code.
- int Size()
+ int Size()
{
// -3 because we put 4 "end" codes at the end in the constructor, and we shouldn't count that here
return upcMemSize - upcCodeSlot - 3;
}
- void SetFinalSize(int headerBytes, int epilogBytes);
+ void SetFinalSize(int headerBytes, int epilogBytes);
- void AddHeaderWord(DWORD d);
+ void AddHeaderWord(DWORD d);
- void GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize);
+ void GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize);
// AppendEpilog: copy the epilog bytes to the next epilog bytes slot
- void AppendEpilog(UnwindEpilogInfo* pEpi);
+ void AppendEpilog(UnwindEpilogInfo* pEpi);
// Match the prolog codes to a set of epilog codes
- int Match(UnwindEpilogInfo* pEpi);
+ int Match(UnwindEpilogInfo* pEpi);
// Copy the prolog codes from another prolog
- void CopyFrom(UnwindPrologCodes* pCopyFrom);
+ void CopyFrom(UnwindPrologCodes* pCopyFrom);
- UnwindPrologCodes() { }
- ~UnwindPrologCodes() { }
+ UnwindPrologCodes()
+ {
+ }
+ ~UnwindPrologCodes()
+ {
+ }
#ifdef DEBUG
- void Dump(int indent = 0);
+ void Dump(int indent = 0);
#endif // DEBUG
private:
-
- void EnsureSize(int requiredSize);
+ void EnsureSize(int requiredSize);
// No copy constructor or operator=
UnwindPrologCodes(const UnwindPrologCodes& info);
@@ -284,30 +286,29 @@ private:
// To store the unwind codes, we first use a local array that should satisfy almost all cases.
// If there are more unwind codes, we dynamically allocate memory.
- BYTE upcMemLocal[UPC_LOCAL_COUNT];
- BYTE* upcMem;
+ BYTE upcMemLocal[UPC_LOCAL_COUNT];
+ BYTE* upcMem;
// upcMemSize is the number of bytes in upcMem. This is equal to UPC_LOCAL_COUNT unless
// we've dynamically allocated memory to store the codes.
- int upcMemSize;
+ int upcMemSize;
// upcCodeSlot points to the last unwind code added to the array. The array is filled in from
// the end, so it starts pointing one beyond the array end.
- int upcCodeSlot;
+ int upcCodeSlot;
// upcHeaderSlot points to the last header byte prepended to the array. Headers bytes are
// filled in from the beginning, and only after SetFinalSize() is called.
- int upcHeaderSlot;
+ int upcHeaderSlot;
// upcEpilogSlot points to the next epilog location to fill
- int upcEpilogSlot;
+ int upcEpilogSlot;
// upcUnwindBlockSlot is only set after SetFinalSize() is called. It is the index of the first
// byte of the final unwind data, namely the first byte of the header.
- int upcUnwindBlockSlot;
+ int upcUnwindBlockSlot;
};
-
// UnwindEpilogCodes: represents the unwind codes for a single epilog sequence.
// Epilog unwind codes arrive in the order they will be emitted. Store them as an array,
// adding new ones to the end of the array.
@@ -319,13 +320,8 @@ class UnwindEpilogCodes : public UnwindBase, public UnwindCodesBase
static const int UEC_LOCAL_COUNT = 4;
public:
-
UnwindEpilogCodes(Compiler* comp)
- : UnwindBase(comp)
- , uecMem(uecMemLocal)
- , uecMemSize(UEC_LOCAL_COUNT)
- , uecCodeSlot(-1)
- , uecFinalized(false)
+ : UnwindBase(comp), uecMem(uecMemLocal), uecMemSize(UEC_LOCAL_COUNT), uecCodeSlot(-1), uecFinalized(false)
{
}
@@ -333,25 +329,25 @@ public:
// Implementation of UnwindCodesBase
//
- virtual void AddCode(BYTE b1)
+ virtual void AddCode(BYTE b1)
{
AppendByte(b1);
}
- virtual void AddCode(BYTE b1, BYTE b2)
+ virtual void AddCode(BYTE b1, BYTE b2)
{
AppendByte(b1);
AppendByte(b2);
}
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3)
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3)
{
AppendByte(b1);
AppendByte(b2);
AppendByte(b3);
}
- virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
+ virtual void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
{
AppendByte(b1);
AppendByte(b2);
@@ -360,7 +356,7 @@ public:
}
// Return a pointer to the first unwind code byte
- virtual BYTE* GetCodes()
+ virtual BYTE* GetCodes()
{
assert(uecFinalized);
@@ -370,14 +366,14 @@ public:
///////////////////////////////////////////////////////////////////////////
- BYTE GetByte(int index)
+ BYTE GetByte(int index)
{
assert(0 <= index && index <= uecCodeSlot);
return uecMem[index];
}
// Add a single byte on the unwind code array
- void AppendByte(BYTE b)
+ void AppendByte(BYTE b)
{
if (uecCodeSlot == uecMemSize - 1)
{
@@ -392,7 +388,7 @@ public:
}
// Return the size of the unwind codes, in bytes. The size is the exact size, not an aligned size.
- int Size()
+ int Size()
{
if (uecFinalized)
{
@@ -406,16 +402,16 @@ public:
}
}
- void FinalizeCodes()
+ void FinalizeCodes()
{
assert(!uecFinalized);
- noway_assert(0 <= uecCodeSlot && uecCodeSlot < uecMemSize); // There better be at least one code!
+ noway_assert(0 <= uecCodeSlot && uecCodeSlot < uecMemSize); // There better be at least one code!
BYTE lastCode = uecMem[uecCodeSlot];
if (!IsEndCode(lastCode)) // If the last code is an end code, we don't need to append one.
{
AppendByte(UWC_END); // Add a default "end" code to the end of the array of unwind codes
}
- uecFinalized = true; // With the "end" code in place, now we're done
+ uecFinalized = true; // With the "end" code in place, now we're done
#ifdef DEBUG
unsigned codeSize = GetCodeSizeFromUnwindCodes(false);
@@ -423,16 +419,19 @@ public:
#endif // DEBUG
}
- UnwindEpilogCodes() { }
- ~UnwindEpilogCodes() { }
+ UnwindEpilogCodes()
+ {
+ }
+ ~UnwindEpilogCodes()
+ {
+ }
#ifdef DEBUG
- void Dump(int indent = 0);
+ void Dump(int indent = 0);
#endif // DEBUG
private:
-
- void EnsureSize(int requiredSize);
+ void EnsureSize(int requiredSize);
// No destructor, copy constructor or operator=
UnwindEpilogCodes(const UnwindEpilogCodes& info);
@@ -444,22 +443,21 @@ private:
// To store the unwind codes, we first use a local array that should satisfy almost all cases.
// If there are more unwind codes, we dynamically allocate memory.
- BYTE uecMemLocal[UEC_LOCAL_COUNT];
- BYTE* uecMem;
+ BYTE uecMemLocal[UEC_LOCAL_COUNT];
+ BYTE* uecMem;
// uecMemSize is the number of bytes/slots in uecMem. This is equal to UEC_LOCAL_COUNT unless
// we've dynamically allocated memory to store the codes.
- int uecMemSize;
+ int uecMemSize;
// uecCodeSlot points to the last unwind code added to the array. The array is filled in from
// the beginning, so it starts at -1.
- int uecCodeSlot;
+ int uecCodeSlot;
// Is the unwind information finalized? Finalized info has an end code appended.
- bool uecFinalized;
+ bool uecFinalized;
};
-
// UnwindEpilogInfo: represents the unwind information for a single epilog sequence. Epilogs for a
// single function/funclet are in a linked list.
@@ -470,7 +468,6 @@ class UnwindEpilogInfo : public UnwindBase
static const unsigned EPI_ILLEGAL_OFFSET = 0xFFFFFFFF;
public:
-
UnwindEpilogInfo(Compiler* comp)
: UnwindBase(comp)
, epiNext(NULL)
@@ -482,67 +479,70 @@ public:
{
}
- void CaptureEmitLocation();
+ void CaptureEmitLocation();
- void FinalizeOffset();
+ void FinalizeOffset();
- void FinalizeCodes()
+ void FinalizeCodes()
{
epiCodes.FinalizeCodes();
}
- UNATIVE_OFFSET GetStartOffset()
+ UNATIVE_OFFSET GetStartOffset()
{
assert(epiStartOffset != EPI_ILLEGAL_OFFSET);
return epiStartOffset;
}
- int GetStartIndex()
+ int GetStartIndex()
{
assert(epiStartIndex != -1);
- return epiStartIndex; // The final "Epilog Start Index" of this epilog's unwind codes
+ return epiStartIndex; // The final "Epilog Start Index" of this epilog's unwind codes
}
- void SetStartIndex(int index)
+ void SetStartIndex(int index)
{
assert(epiStartIndex == -1);
epiStartIndex = (int)index;
}
- void SetMatches()
+ void SetMatches()
{
epiMatches = true;
}
- bool Matches()
+ bool Matches()
{
return epiMatches;
}
// Size of epilog unwind codes in bytes
- int Size()
+ int Size()
{
return epiCodes.Size();
}
// Return a pointer to the first unwind code byte
- BYTE* GetCodes()
+ BYTE* GetCodes()
{
return epiCodes.GetCodes();
}
// Match the codes to a set of epilog codes
- int Match(UnwindEpilogInfo* pEpi);
+ int Match(UnwindEpilogInfo* pEpi);
- UnwindEpilogInfo() { }
- ~UnwindEpilogInfo() { }
+ UnwindEpilogInfo()
+ {
+ }
+ ~UnwindEpilogInfo()
+ {
+ }
#ifdef DEBUG
- void Dump(int indent = 0);
+ void Dump(int indent = 0);
#endif // DEBUG
private:
-
// No copy constructor or operator=
UnwindEpilogInfo(const UnwindEpilogInfo& info);
UnwindEpilogInfo& operator=(const UnwindEpilogInfo&);
@@ -551,15 +551,16 @@ private:
// Data
//
- UnwindEpilogInfo* epiNext;
- emitLocation* epiEmitLocation; // The emitter location of the beginning of the epilog
- UnwindEpilogCodes epiCodes;
- UNATIVE_OFFSET epiStartOffset; // Actual offset of the epilog, in bytes, from the start of the function. Set in FinalizeOffset().
- bool epiMatches; // Do the epilog unwind codes match some other set of codes? If so, we don't copy these to the final set; we just point to another set.
- int epiStartIndex; // The final "Epilog Start Index" of this epilog's unwind codes
+ UnwindEpilogInfo* epiNext;
+ emitLocation* epiEmitLocation; // The emitter location of the beginning of the epilog
+ UnwindEpilogCodes epiCodes;
+ UNATIVE_OFFSET epiStartOffset; // Actual offset of the epilog, in bytes, from the start of the function. Set in
+ // FinalizeOffset().
+ bool epiMatches; // Do the epilog unwind codes match some other set of codes? If so, we don't copy these to the
+ // final set; we just point to another set.
+ int epiStartIndex; // The final "Epilog Start Index" of this epilog's unwind codes
};
-
// UnwindFragmentInfo: represents all the unwind information for a single fragment of a function or funclet.
// A fragment is a section with a code size less than the maximum unwind code size: either 512K bytes, or
// that specified by COMPlus_JitSplitFunctionSize. In most cases, there will be exactly one fragment.
@@ -571,12 +572,11 @@ class UnwindFragmentInfo : public UnwindBase
static const unsigned UFI_ILLEGAL_OFFSET = 0xFFFFFFFF;
public:
-
UnwindFragmentInfo(Compiler* comp, emitLocation* emitLoc, bool hasPhantomProlog);
- void FinalizeOffset();
+ void FinalizeOffset();
- UNATIVE_OFFSET GetStartOffset()
+ UNATIVE_OFFSET GetStartOffset()
{
assert(ufiStartOffset != UFI_ILLEGAL_OFFSET);
return ufiStartOffset;
@@ -585,31 +585,31 @@ public:
// Add an unwind code. It could be for a prolog, or for the current epilog.
// A single unwind code can be from 1 to 4 bytes.
- void AddCode(BYTE b1)
+ void AddCode(BYTE b1)
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
ufiCurCodes->AddCode(b1);
}
- void AddCode(BYTE b1, BYTE b2)
+ void AddCode(BYTE b1, BYTE b2)
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
ufiCurCodes->AddCode(b1, b2);
}
- void AddCode(BYTE b1, BYTE b2, BYTE b3)
+ void AddCode(BYTE b1, BYTE b2, BYTE b3)
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
ufiCurCodes->AddCode(b1, b2, b3);
}
- void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
+ void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
ufiCurCodes->AddCode(b1, b2, b3, b4);
}
- unsigned EpilogCount()
+ unsigned EpilogCount()
{
unsigned count = 0;
for (UnwindEpilogInfo* pEpi = ufiEpilogList; pEpi != NULL; pEpi = pEpi->epiNext)
@@ -619,46 +619,50 @@ public:
return count;
}
- void AddEpilog();
+ void AddEpilog();
- void MergeCodes();
+ void MergeCodes();
- void CopyPrologCodes(UnwindFragmentInfo* pCopyFrom);
+ void CopyPrologCodes(UnwindFragmentInfo* pCopyFrom);
- void SplitEpilogCodes(emitLocation* emitLoc, UnwindFragmentInfo* pSplitFrom);
+ void SplitEpilogCodes(emitLocation* emitLoc, UnwindFragmentInfo* pSplitFrom);
- bool IsAtFragmentEnd(UnwindEpilogInfo* pEpi);
+ bool IsAtFragmentEnd(UnwindEpilogInfo* pEpi);
// Return the full, final size of unwind block. This will be used to allocate memory for
// the unwind block. This is called before the code offsets are finalized.
// Size is in bytes.
- ULONG Size()
+ ULONG Size()
{
assert(ufiSize != 0);
return ufiSize;
}
- void Finalize(UNATIVE_OFFSET functionLength);
+ void Finalize(UNATIVE_OFFSET functionLength);
// GetFinalInfo: return a pointer to the final unwind info to hand to the VM, and the size of this info in bytes
- void GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize)
+ void GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize)
{
ufiPrologCodes.GetFinalInfo(ppUnwindBlock, pUnwindBlockSize);
}
- void Reserve(BOOL isFunclet, bool isHotCode);
+ void Reserve(BOOL isFunclet, bool isHotCode);
- void Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, UNATIVE_OFFSET funcEndOffset, bool isHotCode);
+ void Allocate(
+ CorJitFuncKind funKind, void* pHotCode, void* pColdCode, UNATIVE_OFFSET funcEndOffset, bool isHotCode);
- UnwindFragmentInfo() { }
- ~UnwindFragmentInfo() { }
+ UnwindFragmentInfo()
+ {
+ }
+ ~UnwindFragmentInfo()
+ {
+ }
#ifdef DEBUG
- void Dump(int indent = 0);
+ void Dump(int indent = 0);
#endif // DEBUG
private:
-
// No copy constructor or operator=
UnwindFragmentInfo(const UnwindFragmentInfo& info);
UnwindFragmentInfo& operator=(const UnwindFragmentInfo&);
@@ -667,71 +671,68 @@ private:
// Data
//
- UnwindFragmentInfo* ufiNext; // The next fragment
- emitLocation* ufiEmitLoc; // Emitter location for start of fragment
- bool ufiHasPhantomProlog; // Are the prolog codes for a phantom prolog, or a real prolog?
- // (For a phantom prolog, this code fragment represents a fragment in
- // the sense of the unwind info spec; something without a real prolog.)
- UnwindPrologCodes ufiPrologCodes; // The unwind codes for the prolog
- UnwindEpilogInfo ufiEpilogFirst; // In-line the first epilog to avoid separate memory allocation, since
- // almost all functions will have at least one epilog. It is pointed
- // to by ufiEpilogList when the first epilog is added.
- UnwindEpilogInfo* ufiEpilogList; // The head of the epilog list
- UnwindEpilogInfo* ufiEpilogLast; // The last entry in the epilog list (the last epilog added)
- UnwindCodesBase* ufiCurCodes; // Pointer to current unwind codes, either prolog or epilog
+ UnwindFragmentInfo* ufiNext; // The next fragment
+ emitLocation* ufiEmitLoc; // Emitter location for start of fragment
+ bool ufiHasPhantomProlog; // Are the prolog codes for a phantom prolog, or a real prolog?
+ // (For a phantom prolog, this code fragment represents a fragment in
+ // the sense of the unwind info spec; something without a real prolog.)
+ UnwindPrologCodes ufiPrologCodes; // The unwind codes for the prolog
+ UnwindEpilogInfo ufiEpilogFirst; // In-line the first epilog to avoid separate memory allocation, since
+ // almost all functions will have at least one epilog. It is pointed
+ // to by ufiEpilogList when the first epilog is added.
+ UnwindEpilogInfo* ufiEpilogList; // The head of the epilog list
+ UnwindEpilogInfo* ufiEpilogLast; // The last entry in the epilog list (the last epilog added)
+ UnwindCodesBase* ufiCurCodes; // Pointer to current unwind codes, either prolog or epilog
// Some data computed when merging the unwind codes, and used when finalizing the
// unwind block for emission.
- unsigned ufiSize; // The size of the unwind data for this fragment, in bytes
- bool ufiSetEBit;
- bool ufiNeedExtendedCodeWordsEpilogCount;
- unsigned ufiCodeWords;
- unsigned ufiEpilogScopes;
- UNATIVE_OFFSET ufiStartOffset;
+ unsigned ufiSize; // The size of the unwind data for this fragment, in bytes
+ bool ufiSetEBit;
+ bool ufiNeedExtendedCodeWordsEpilogCount;
+ unsigned ufiCodeWords;
+ unsigned ufiEpilogScopes;
+ UNATIVE_OFFSET ufiStartOffset;
#ifdef DEBUG
- unsigned ufiNum;
+ unsigned ufiNum;
// Are we processing the prolog? The prolog must come first, followed by a (possibly empty)
// set of epilogs, for this function/funclet.
- bool ufiInProlog;
+ bool ufiInProlog;
- static const unsigned UFI_INITIALIZED_PATTERN = 0x0FACADE0; // Something unlikely to be the fill pattern for
- // uninitialized memory
- unsigned ufiInitialized;
+ static const unsigned UFI_INITIALIZED_PATTERN = 0x0FACADE0; // Something unlikely to be the fill pattern for
+ // uninitialized memory
+ unsigned ufiInitialized;
#endif // DEBUG
-
};
-
// UnwindInfo: represents all the unwind information for a single function or funclet
class UnwindInfo : public UnwindBase
{
public:
+ void InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLocation* endLoc);
- void InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLocation* endLoc);
-
- void HotColdSplitCodes(UnwindInfo* puwi);
+ void HotColdSplitCodes(UnwindInfo* puwi);
// The following act on all the fragments that make up the unwind info for this function or funclet.
- void Split();
+ void Split();
- static void EmitSplitCallback(void* context, emitLocation* emitLoc);
+ static void EmitSplitCallback(void* context, emitLocation* emitLoc);
- void Reserve(BOOL isFunclet, bool isHotCode);
+ void Reserve(BOOL isFunclet, bool isHotCode);
- void Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, bool isHotCode);
+ void Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, bool isHotCode);
// The following act on the current fragment (the one pointed to by 'uwiFragmentLast').
// Add an unwind code. It could be for a prolog, or for the current epilog.
// A single unwind code can be from 1 to 4 bytes.
- void AddCode(BYTE b1)
+ void AddCode(BYTE b1)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -741,7 +742,7 @@ public:
CaptureLocation();
}
- void AddCode(BYTE b1, BYTE b2)
+ void AddCode(BYTE b1, BYTE b2)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -751,7 +752,7 @@ public:
CaptureLocation();
}
- void AddCode(BYTE b1, BYTE b2, BYTE b3)
+ void AddCode(BYTE b1, BYTE b2, BYTE b3)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -761,7 +762,7 @@ public:
CaptureLocation();
}
- void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
+ void AddCode(BYTE b1, BYTE b2, BYTE b3, BYTE b4)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -771,41 +772,46 @@ public:
CaptureLocation();
}
- void AddEpilog();
+ void AddEpilog();
- emitLocation* GetCurrentEmitterLocation()
+ emitLocation* GetCurrentEmitterLocation()
{
return uwiCurLoc;
}
#if defined(_TARGET_ARM_)
- unsigned GetInstructionSize();
+ unsigned GetInstructionSize();
#endif // defined(_TARGET_ARM_)
- void CaptureLocation();
+ void CaptureLocation();
- UnwindInfo() { }
- ~UnwindInfo() { }
+ UnwindInfo()
+ {
+ }
+ ~UnwindInfo()
+ {
+ }
#ifdef DEBUG
#if defined(_TARGET_ARM_)
// Given the first byte of the unwind code, check that its opsize matches
// the last instruction added in the emitter.
- void CheckOpsize(BYTE b1);
+ void CheckOpsize(BYTE b1);
#elif defined(_TARGET_ARM64_)
- void CheckOpsize(BYTE b1) {} // nothing to do; all instructions are 4 bytes
+ void CheckOpsize(BYTE b1)
+ {
+ } // nothing to do; all instructions are 4 bytes
#endif // defined(_TARGET_ARM64_)
- void Dump(bool isHotCode, int indent = 0);
+ void Dump(bool isHotCode, int indent = 0);
- bool uwiAddingNOP;
+ bool uwiAddingNOP;
#endif // DEBUG
private:
-
- void AddFragment(emitLocation* emitLoc);
+ void AddFragment(emitLocation* emitLoc);
// No copy constructor or operator=
UnwindInfo(const UnwindInfo& info);
@@ -815,25 +821,31 @@ private:
// Data
//
- UnwindFragmentInfo uwiFragmentFirst; // The first fragment is directly here, so it doesn't need to be separately allocated.
- UnwindFragmentInfo* uwiFragmentLast; // The last entry in the fragment list (the last fragment added)
- emitLocation* uwiEndLoc; // End emitter location of this function/funclet (NULL == end of all code)
- emitLocation* uwiCurLoc; // The current emitter location (updated after an unwind code is added), used for NOP padding, and asserts.
+ UnwindFragmentInfo uwiFragmentFirst; // The first fragment is directly here, so it doesn't need to be separately
+ // allocated.
+ UnwindFragmentInfo* uwiFragmentLast; // The last entry in the fragment list (the last fragment added)
+ emitLocation* uwiEndLoc; // End emitter location of this function/funclet (NULL == end of all code)
+ emitLocation* uwiCurLoc; // The current emitter location (updated after an unwind code is added), used for NOP
+ // padding, and asserts.
#ifdef DEBUG
- static const unsigned UWI_INITIALIZED_PATTERN = 0x0FACADE1; // Something unlikely to be the fill pattern for
- // uninitialized memory
- unsigned uwiInitialized;
+ static const unsigned UWI_INITIALIZED_PATTERN = 0x0FACADE1; // Something unlikely to be the fill pattern for
+ // uninitialized memory
+ unsigned uwiInitialized;
#endif // DEBUG
-
};
#ifdef DEBUG
// Forward declaration
-void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, const BYTE * const pHeader, ULONG unwindBlockSize);
+void DumpUnwindInfo(Compiler* comp,
+ bool isHotCode,
+ UNATIVE_OFFSET startOffset,
+ UNATIVE_OFFSET endOffset,
+ const BYTE* const pHeader,
+ ULONG unwindBlockSize);
#endif // DEBUG
diff --git a/src/jit/unwindamd64.cpp b/src/jit/unwindamd64.cpp
index 6c8833bfc0..89abdff2b3 100644
--- a/src/jit/unwindamd64.cpp
+++ b/src/jit/unwindamd64.cpp
@@ -24,40 +24,104 @@ int Compiler::mapRegNumToDwarfReg(regNumber reg)
switch (reg)
{
- case REG_RAX: dwarfReg = 0; break;
- case REG_RCX: dwarfReg = 2; break;
- case REG_RDX: dwarfReg = 1; break;
- case REG_RBX: dwarfReg = 3; break;
- case REG_RSP: dwarfReg = 7; break;
- case REG_RBP: dwarfReg = 6; break;
- case REG_RSI: dwarfReg = 4; break;
- case REG_RDI: dwarfReg = 5; break;
- case REG_R8: dwarfReg = 8; break;
- case REG_R9: dwarfReg = 9; break;
- case REG_R10: dwarfReg = 10; break;
- case REG_R11: dwarfReg = 11; break;
- case REG_R12: dwarfReg = 12; break;
- case REG_R13: dwarfReg = 13; break;
- case REG_R14: dwarfReg = 14; break;
- case REG_R15: dwarfReg = 15; break;
- case REG_XMM0: dwarfReg = 17; break;
- case REG_XMM1: dwarfReg = 18; break;
- case REG_XMM2: dwarfReg = 19; break;
- case REG_XMM3: dwarfReg = 20; break;
- case REG_XMM4: dwarfReg = 21; break;
- case REG_XMM5: dwarfReg = 22; break;
- case REG_XMM6: dwarfReg = 23; break;
- case REG_XMM7: dwarfReg = 24; break;
- case REG_XMM8: dwarfReg = 25; break;
- case REG_XMM9: dwarfReg = 26; break;
- case REG_XMM10:dwarfReg = 27; break;
- case REG_XMM11:dwarfReg = 28; break;
- case REG_XMM12:dwarfReg = 29; break;
- case REG_XMM13:dwarfReg = 30; break;
- case REG_XMM14:dwarfReg = 31; break;
- case REG_XMM15:dwarfReg = 32; break;
- default:
- noway_assert(!"unexpected REG_NUM");
+ case REG_RAX:
+ dwarfReg = 0;
+ break;
+ case REG_RCX:
+ dwarfReg = 2;
+ break;
+ case REG_RDX:
+ dwarfReg = 1;
+ break;
+ case REG_RBX:
+ dwarfReg = 3;
+ break;
+ case REG_RSP:
+ dwarfReg = 7;
+ break;
+ case REG_RBP:
+ dwarfReg = 6;
+ break;
+ case REG_RSI:
+ dwarfReg = 4;
+ break;
+ case REG_RDI:
+ dwarfReg = 5;
+ break;
+ case REG_R8:
+ dwarfReg = 8;
+ break;
+ case REG_R9:
+ dwarfReg = 9;
+ break;
+ case REG_R10:
+ dwarfReg = 10;
+ break;
+ case REG_R11:
+ dwarfReg = 11;
+ break;
+ case REG_R12:
+ dwarfReg = 12;
+ break;
+ case REG_R13:
+ dwarfReg = 13;
+ break;
+ case REG_R14:
+ dwarfReg = 14;
+ break;
+ case REG_R15:
+ dwarfReg = 15;
+ break;
+ case REG_XMM0:
+ dwarfReg = 17;
+ break;
+ case REG_XMM1:
+ dwarfReg = 18;
+ break;
+ case REG_XMM2:
+ dwarfReg = 19;
+ break;
+ case REG_XMM3:
+ dwarfReg = 20;
+ break;
+ case REG_XMM4:
+ dwarfReg = 21;
+ break;
+ case REG_XMM5:
+ dwarfReg = 22;
+ break;
+ case REG_XMM6:
+ dwarfReg = 23;
+ break;
+ case REG_XMM7:
+ dwarfReg = 24;
+ break;
+ case REG_XMM8:
+ dwarfReg = 25;
+ break;
+ case REG_XMM9:
+ dwarfReg = 26;
+ break;
+ case REG_XMM10:
+ dwarfReg = 27;
+ break;
+ case REG_XMM11:
+ dwarfReg = 28;
+ break;
+ case REG_XMM12:
+ dwarfReg = 29;
+ break;
+ case REG_XMM13:
+ dwarfReg = 30;
+ break;
+ case REG_XMM14:
+ dwarfReg = 31;
+ break;
+ case REG_XMM15:
+ dwarfReg = 32;
+ break;
+ default:
+ noway_assert(!"unexpected REG_NUM");
}
return dwarfReg;
@@ -131,18 +195,17 @@ void Compiler::unwindBegPrologWindows()
unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc);
}
- func->unwindCodeSlot = sizeof(func->unwindCodes);
- func->unwindHeader.Version = 1;
- func->unwindHeader.Flags = 0;
+ func->unwindCodeSlot = sizeof(func->unwindCodes);
+ func->unwindHeader.Version = 1;
+ func->unwindHeader.Flags = 0;
func->unwindHeader.CountOfUnwindCodes = 0;
- func->unwindHeader.FrameRegister = 0;
- func->unwindHeader.FrameOffset = 0;
+ func->unwindHeader.FrameRegister = 0;
+ func->unwindHeader.FrameOffset = 0;
}
#ifdef UNIX_AMD64_ABI
template <typename T>
-inline
-static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 5)
+inline static T* allocate_any(jitstd::allocator<void>& alloc, size_t count = 5)
{
return jitstd::allocator<T>(alloc).allocate(count);
}
@@ -174,7 +237,7 @@ void Compiler::unwindBegPrologCFI()
// Compiler::unwindEndProlog: Called at the end of main function or funclet
// prolog generation to indicate there is no more unwind information for this prolog.
//
-void Compiler::unwindEndProlog()
+void Compiler::unwindEndProlog()
{
assert(compGeneratingProlog);
}
@@ -223,10 +286,10 @@ void Compiler::unwindPushWindows(regNumber reg)
FuncInfoDsc* func = funCurrentFunc();
- assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
+ assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
assert(func->unwindHeader.CountOfUnwindCodes == 0); // Can't call this after unwindReserve
assert(func->unwindCodeSlot > sizeof(UNWIND_CODE));
- UNWIND_CODE * code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ UNWIND_CODE* code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
unsigned int cbProlog = unwindGetCurrentOffset(func);
noway_assert((BYTE)cbProlog == cbProlog);
code->CodeOffset = (BYTE)cbProlog;
@@ -239,16 +302,16 @@ void Compiler::unwindPushWindows(regNumber reg)
// since it is pushed as a frame register.
|| (reg == REG_FPBASE)
#endif // ETW_EBP_FRAMED
- )
+ )
{
code->UnwindOp = UWOP_PUSH_NONVOL;
- code->OpInfo = (BYTE)reg;
+ code->OpInfo = (BYTE)reg;
}
else
{
// Push of a volatile register is just a small stack allocation
code->UnwindOp = UWOP_ALLOC_SMALL;
- code->OpInfo = 0;
+ code->OpInfo = 0;
}
}
@@ -271,7 +334,7 @@ void Compiler::unwindPushCFI(regNumber reg)
// since it is pushed as a frame register.
|| (reg == REG_FPBASE)
#endif // ETW_EBP_FRAMED
- )
+ )
{
createCfiCode(func, cbProlog, CFI_REL_OFFSET, mapRegNumToDwarfReg(reg));
}
@@ -304,34 +367,34 @@ void Compiler::unwindAllocStackWindows(unsigned size)
FuncInfoDsc* func = funCurrentFunc();
- assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
+ assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
assert(func->unwindHeader.CountOfUnwindCodes == 0); // Can't call this after unwindReserve
- assert(size % 8 == 0); // Stack size is *always* 8 byte aligned
- UNWIND_CODE * code;
+ assert(size % 8 == 0); // Stack size is *always* 8 byte aligned
+ UNWIND_CODE* code;
if (size <= 128)
{
assert(func->unwindCodeSlot > sizeof(UNWIND_CODE));
- code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
code->UnwindOp = UWOP_ALLOC_SMALL;
- code->OpInfo = (size - 8) / 8;
+ code->OpInfo = (size - 8) / 8;
}
else if (size <= 0x7FFF8)
{
assert(func->unwindCodeSlot > (sizeof(UNWIND_CODE) + sizeof(USHORT)));
- USHORT * codedSize = (USHORT*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(USHORT)];
- *codedSize = (USHORT)(size / 8);
- code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
- code->UnwindOp = UWOP_ALLOC_LARGE;
- code->OpInfo = 0;
+ USHORT* codedSize = (USHORT*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(USHORT)];
+ *codedSize = (USHORT)(size / 8);
+ code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ code->UnwindOp = UWOP_ALLOC_LARGE;
+ code->OpInfo = 0;
}
else
{
assert(func->unwindCodeSlot > (sizeof(UNWIND_CODE) + sizeof(ULONG)));
- ULONG * codedSize = (ULONG*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(ULONG)];
- *codedSize = size;
- code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
- code->UnwindOp = UWOP_ALLOC_LARGE;
- code->OpInfo = 1;
+ ULONG* codedSize = (ULONG*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(ULONG)];
+ *codedSize = size;
+ code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ code->UnwindOp = UWOP_ALLOC_LARGE;
+ code->OpInfo = 1;
}
unsigned int cbProlog = unwindGetCurrentOffset(func);
noway_assert((BYTE)cbProlog == cbProlog);
@@ -378,7 +441,7 @@ void Compiler::unwindSetFrameRegWindows(regNumber reg, unsigned offset)
FuncInfoDsc* func = funCurrentFunc();
- assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
+ assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
assert(func->unwindHeader.CountOfUnwindCodes == 0); // Can't call this after unwindReserve
unsigned int cbProlog = unwindGetCurrentOffset(func);
noway_assert((BYTE)cbProlog == cbProlog);
@@ -397,10 +460,10 @@ void Compiler::unwindSetFrameRegWindows(regNumber reg, unsigned offset)
assert(offset % 16 == 0);
*codedSize = offset / 16;
- UNWIND_CODE* code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
- code->CodeOffset = (BYTE)cbProlog;
- code->OpInfo = 0;
- code->UnwindOp = UWOP_SET_FPREG_LARGE;
+ UNWIND_CODE* code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ code->CodeOffset = (BYTE)cbProlog;
+ code->OpInfo = 0;
+ code->UnwindOp = UWOP_SET_FPREG_LARGE;
func->unwindHeader.FrameOffset = 15;
}
else
@@ -408,9 +471,9 @@ void Compiler::unwindSetFrameRegWindows(regNumber reg, unsigned offset)
{
assert(func->unwindCodeSlot > sizeof(UNWIND_CODE));
UNWIND_CODE* code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
- code->CodeOffset = (BYTE)cbProlog;
- code->OpInfo = 0;
- code->UnwindOp = UWOP_SET_FPREG;
+ code->CodeOffset = (BYTE)cbProlog;
+ code->OpInfo = 0;
+ code->UnwindOp = UWOP_SET_FPREG;
assert(offset <= 240);
assert(offset % 16 == 0);
func->unwindHeader.FrameOffset = offset / 16;
@@ -461,38 +524,38 @@ void Compiler::unwindSaveRegWindows(regNumber reg, unsigned offset)
FuncInfoDsc* func = funCurrentFunc();
- assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
+ assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
assert(func->unwindHeader.CountOfUnwindCodes == 0); // Can't call this after unwindReserve
if (RBM_CALLEE_SAVED & genRegMask(reg))
{
- UNWIND_CODE * code;
+ UNWIND_CODE* code;
if (offset < 0x80000)
{
assert(func->unwindCodeSlot > (sizeof(UNWIND_CODE) + sizeof(USHORT)));
- USHORT * codedSize = (USHORT*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(USHORT)];
- code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ USHORT* codedSize = (USHORT*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(USHORT)];
+ code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
// As per AMD64 ABI, if saving entire xmm reg, then offset need to be scaled by 16.
if (genIsValidFloatReg(reg))
{
- *codedSize = (USHORT) (offset/16);
+ *codedSize = (USHORT)(offset / 16);
code->UnwindOp = UWOP_SAVE_XMM128;
}
else
{
- *codedSize = (USHORT) (offset/8);
+ *codedSize = (USHORT)(offset / 8);
code->UnwindOp = UWOP_SAVE_NONVOL;
}
}
else
{
assert(func->unwindCodeSlot > (sizeof(UNWIND_CODE) + sizeof(ULONG)));
- ULONG * codedSize = (ULONG*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(ULONG)];
- *codedSize = offset;
- code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
- code->UnwindOp = (genIsValidFloatReg(reg)) ? UWOP_SAVE_XMM128_FAR : UWOP_SAVE_NONVOL_FAR;
+ ULONG* codedSize = (ULONG*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(ULONG)];
+ *codedSize = offset;
+ code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot -= sizeof(UNWIND_CODE)];
+ code->UnwindOp = (genIsValidFloatReg(reg)) ? UWOP_SAVE_XMM128_FAR : UWOP_SAVE_NONVOL_FAR;
}
- code->OpInfo = (BYTE)reg;
+ code->OpInfo = (BYTE)reg;
unsigned int cbProlog = unwindGetCurrentOffset(func);
noway_assert((BYTE)cbProlog == cbProlog);
code->CodeOffset = (BYTE)cbProlog;
@@ -526,7 +589,10 @@ void Compiler::unwindSaveRegCFI(regNumber reg, unsigned offset)
// endOffset - byte offset of the code end that this unwind data represents.
// pHeader - pointer to the unwind data blob.
//
-void DumpUnwindInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, const UNWIND_INFO * const pHeader)
+void DumpUnwindInfo(bool isHotCode,
+ UNATIVE_OFFSET startOffset,
+ UNATIVE_OFFSET endOffset,
+ const UNWIND_INFO* const pHeader)
{
printf("Unwind Info%s:\n", isHotCode ? "" : " COLD");
printf(" >> Start offset : 0x%06x (not in unwind data)\n", dspOffset(startOffset));
@@ -546,135 +612,133 @@ void DumpUnwindInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET e
const UCHAR flags = pHeader->Flags;
printf(" (");
if (flags & UNW_FLAG_EHANDLER)
+ {
printf(" UNW_FLAG_EHANDLER");
+ }
if (flags & UNW_FLAG_UHANDLER)
+ {
printf(" UNW_FLAG_UHANDLER");
+ }
if (flags & UNW_FLAG_CHAININFO)
+ {
printf(" UNW_FLAG_CHAININFO");
+ }
printf(")");
}
printf("\n");
printf(" SizeOfProlog : 0x%02X\n", pHeader->SizeOfProlog);
printf(" CountOfUnwindCodes: %u\n", pHeader->CountOfUnwindCodes);
- printf(" FrameRegister : %s (%u)\n", (pHeader->FrameRegister == 0) ? "none" : getRegName(pHeader->FrameRegister), pHeader->FrameRegister); // RAX (0) is not allowed as a frame register
+ printf(" FrameRegister : %s (%u)\n",
+ (pHeader->FrameRegister == 0) ? "none" : getRegName(pHeader->FrameRegister),
+ pHeader->FrameRegister); // RAX (0) is not allowed as a frame register
if (pHeader->FrameRegister == 0)
{
- printf(" FrameOffset : N/A (no FrameRegister) (Value=%u)\n", pHeader->FrameOffset);
+ printf(" FrameOffset : N/A (no FrameRegister) (Value=%u)\n", pHeader->FrameOffset);
}
else
{
- printf(" FrameOffset : %u * 16 = 0x%02X\n", pHeader->FrameOffset, pHeader->FrameOffset * 16);
+ printf(" FrameOffset : %u * 16 = 0x%02X\n", pHeader->FrameOffset, pHeader->FrameOffset * 16);
}
printf(" UnwindCodes :\n");
for (unsigned i = 0; i < pHeader->CountOfUnwindCodes; i++)
{
- unsigned offset;
- const UNWIND_CODE * const pCode = &(pHeader->UnwindCode[i]);
- switch (pCode->UnwindOp)
+ unsigned offset;
+ const UNWIND_CODE* const pCode = &(pHeader->UnwindCode[i]);
+ switch (pCode->UnwindOp)
{
- case UWOP_PUSH_NONVOL:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_PUSH_NONVOL (%u) OpInfo: %s (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
- break;
+ case UWOP_PUSH_NONVOL:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_PUSH_NONVOL (%u) OpInfo: %s (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
+ break;
- case UWOP_ALLOC_LARGE:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_ALLOC_LARGE (%u) OpInfo: %u - ",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo);
- if (pCode->OpInfo == 0)
- {
- i++;
- printf("Scaled small \n Size: %u * 8 = %u = 0x%05X\n",
- pHeader->UnwindCode[i].FrameOffset,
- pHeader->UnwindCode[i].FrameOffset * 8,
- pHeader->UnwindCode[i].FrameOffset * 8);
- }
- else if (pCode->OpInfo == 1)
- {
- i++;
- printf("Unscaled large\n Size: %u = 0x%08X\n\n",
- *(ULONG*)&(pHeader->UnwindCode[i]),
- *(ULONG*)&(pHeader->UnwindCode[i]));
- i++;
- }
- else
- {
- printf("Unknown\n");
- }
- break;
+ case UWOP_ALLOC_LARGE:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_ALLOC_LARGE (%u) OpInfo: %u - ", pCode->CodeOffset,
+ pCode->UnwindOp, pCode->OpInfo);
+ if (pCode->OpInfo == 0)
+ {
+ i++;
+ printf("Scaled small \n Size: %u * 8 = %u = 0x%05X\n", pHeader->UnwindCode[i].FrameOffset,
+ pHeader->UnwindCode[i].FrameOffset * 8, pHeader->UnwindCode[i].FrameOffset * 8);
+ }
+ else if (pCode->OpInfo == 1)
+ {
+ i++;
+ printf("Unscaled large\n Size: %u = 0x%08X\n\n", *(ULONG*)&(pHeader->UnwindCode[i]),
+ *(ULONG*)&(pHeader->UnwindCode[i]));
+ i++;
+ }
+ else
+ {
+ printf("Unknown\n");
+ }
+ break;
- case UWOP_ALLOC_SMALL:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_ALLOC_SMALL (%u) OpInfo: %u * 8 + 8 = %u = 0x%02X\n",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo * 8 + 8, pCode->OpInfo * 8 + 8);
- break;
+ case UWOP_ALLOC_SMALL:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_ALLOC_SMALL (%u) OpInfo: %u * 8 + 8 = %u = 0x%02X\n",
+ pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo * 8 + 8, pCode->OpInfo * 8 + 8);
+ break;
- case UWOP_SET_FPREG:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SET_FPREG (%u) OpInfo: Unused (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo); // This should be zero
- break;
+ case UWOP_SET_FPREG:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SET_FPREG (%u) OpInfo: Unused (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo); // This should be zero
+ break;
#ifdef PLATFORM_UNIX
- case UWOP_SET_FPREG_LARGE:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SET_FPREG_LARGE (%u) OpInfo: Unused (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo); // This should be zero
- i++;
- offset = *(ULONG*)&(pHeader->UnwindCode[i]);
- i++;
- printf(" Scaled Offset: %u * 16 = %u = 0x%08X\n",
- offset,
- offset * 16,
- offset * 16);
- if ((offset & 0xF0000000) != 0)
- {
- printf(" Illegal unscaled offset: too large\n");
- }
- break;
+ case UWOP_SET_FPREG_LARGE:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SET_FPREG_LARGE (%u) OpInfo: Unused (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo); // This should be zero
+ i++;
+ offset = *(ULONG*)&(pHeader->UnwindCode[i]);
+ i++;
+ printf(" Scaled Offset: %u * 16 = %u = 0x%08X\n", offset, offset * 16, offset * 16);
+ if ((offset & 0xF0000000) != 0)
+ {
+ printf(" Illegal unscaled offset: too large\n");
+ }
+ break;
#endif // PLATFORM_UNIX
- case UWOP_SAVE_NONVOL:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_NONVOL (%u) OpInfo: %s (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
- i++;
- printf(" Scaled Small Offset: %u * 8 = %u = 0x%05X\n",
- pHeader->UnwindCode[i].FrameOffset,
- pHeader->UnwindCode[i].FrameOffset * 8,
- pHeader->UnwindCode[i].FrameOffset * 8);
- break;
-
- case UWOP_SAVE_NONVOL_FAR:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_NONVOL_FAR (%u) OpInfo: %s (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
- i++;
- printf(" Unscaled Large Offset: 0x%08X\n\n", *(ULONG*)&(pHeader->UnwindCode[i]));
- i++;
- break;
-
- case UWOP_SAVE_XMM128:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_XMM128 (%u) OpInfo: XMM%u (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo);
- i++;
- printf(" Scaled Small Offset: %u * 16 = %u = 0x%05X\n",
- pHeader->UnwindCode[i].FrameOffset,
- pHeader->UnwindCode[i].FrameOffset * 16,
- pHeader->UnwindCode[i].FrameOffset * 16);
- break;
-
- case UWOP_SAVE_XMM128_FAR:
- printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_XMM128_FAR (%u) OpInfo: XMM%u (%u)\n",
- pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo);
- i++;
- printf(" Unscaled Large Offset: 0x%08X\n\n", *(ULONG*)&(pHeader->UnwindCode[i]));
- i++;
- break;
-
- case UWOP_EPILOG:
- case UWOP_SPARE_CODE:
- case UWOP_PUSH_MACHFRAME:
- default:
- printf(" Unrecognized UNWIND_CODE: 0x%04X\n", *(USHORT*)pCode);
- break;
+ case UWOP_SAVE_NONVOL:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_NONVOL (%u) OpInfo: %s (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
+ i++;
+ printf(" Scaled Small Offset: %u * 8 = %u = 0x%05X\n", pHeader->UnwindCode[i].FrameOffset,
+ pHeader->UnwindCode[i].FrameOffset * 8, pHeader->UnwindCode[i].FrameOffset * 8);
+ break;
+
+ case UWOP_SAVE_NONVOL_FAR:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_NONVOL_FAR (%u) OpInfo: %s (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, getRegName(pCode->OpInfo), pCode->OpInfo);
+ i++;
+ printf(" Unscaled Large Offset: 0x%08X\n\n", *(ULONG*)&(pHeader->UnwindCode[i]));
+ i++;
+ break;
+
+ case UWOP_SAVE_XMM128:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_XMM128 (%u) OpInfo: XMM%u (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo);
+ i++;
+ printf(" Scaled Small Offset: %u * 16 = %u = 0x%05X\n", pHeader->UnwindCode[i].FrameOffset,
+ pHeader->UnwindCode[i].FrameOffset * 16, pHeader->UnwindCode[i].FrameOffset * 16);
+ break;
+
+ case UWOP_SAVE_XMM128_FAR:
+ printf(" CodeOffset: 0x%02X UnwindOp: UWOP_SAVE_XMM128_FAR (%u) OpInfo: XMM%u (%u)\n",
+ pCode->CodeOffset, pCode->UnwindOp, pCode->OpInfo, pCode->OpInfo);
+ i++;
+ printf(" Unscaled Large Offset: 0x%08X\n\n", *(ULONG*)&(pHeader->UnwindCode[i]));
+ i++;
+ break;
+
+ case UWOP_EPILOG:
+ case UWOP_SPARE_CODE:
+ case UWOP_PUSH_MACHFRAME:
+ default:
+ printf(" Unrecognized UNWIND_CODE: 0x%04X\n", *(USHORT*)pCode);
+ break;
}
}
}
@@ -689,7 +753,11 @@ void DumpUnwindInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET e
// endOffset - byte offset of the code end that this cfi data represents.
// pcFiCode - pointer to the cfi data blob.
//
-void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE * const pCfiCode)
+void DumpCfiInfo(bool isHotCode,
+ UNATIVE_OFFSET startOffset,
+ UNATIVE_OFFSET endOffset,
+ DWORD cfiCodeBytes,
+ const CFI_CODE* const pCfiCode)
{
printf("Cfi Info%s:\n", isHotCode ? "" : " COLD");
printf(" >> Start offset : 0x%06x \n", dspOffset(startOffset));
@@ -697,16 +765,17 @@ void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endO
for (int i = 0; i < cfiCodeBytes / sizeof(CFI_CODE); i++)
{
- const CFI_CODE * const pCode = &(pCfiCode[i]);
+ const CFI_CODE* const pCode = &(pCfiCode[i]);
UCHAR codeOffset = pCode->CodeOffset;
- SHORT dwarfReg = pCode->DwarfReg;
- INT offset = pCode->Offset;
+ SHORT dwarfReg = pCode->DwarfReg;
+ INT offset = pCode->Offset;
switch (pCode->CfiOpCode)
{
case CFI_REL_OFFSET:
- printf(" CodeOffset: 0x%02X Op: RelOffset DwarfReg:0x%x Offset:0x%X\n", codeOffset, dwarfReg, offset);
+ printf(" CodeOffset: 0x%02X Op: RelOffset DwarfReg:0x%x Offset:0x%X\n", codeOffset, dwarfReg,
+ offset);
break;
case CFI_DEF_CFA_REGISTER:
assert(offset == 0);
@@ -725,7 +794,6 @@ void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endO
#endif // UNIX_AMD64_ABI
#endif // DEBUG
-
//------------------------------------------------------------------------
// Compiler::unwindReserve: Ask the VM to reserve space for the unwind information
// for the function and all its funclets. Called once, just before asking the VM
@@ -782,39 +850,39 @@ void Compiler::unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode)
else
#endif // UNIX_AMD64_ABI
{
- assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
+ assert(func->unwindHeader.Version == 1); // Can't call this before unwindBegProlog
assert(func->unwindHeader.CountOfUnwindCodes == 0); // Only call this once per prolog
// Set the size of the prolog to be the last encoded action
if (func->unwindCodeSlot < sizeof(func->unwindCodes))
{
- UNWIND_CODE * code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot];
+ UNWIND_CODE* code = (UNWIND_CODE*)&func->unwindCodes[func->unwindCodeSlot];
func->unwindHeader.SizeOfProlog = code->CodeOffset;
}
else
{
func->unwindHeader.SizeOfProlog = 0;
}
- func->unwindHeader.CountOfUnwindCodes = (BYTE)((sizeof(func->unwindCodes) - func->unwindCodeSlot) / sizeof(UNWIND_CODE));
+ func->unwindHeader.CountOfUnwindCodes =
+ (BYTE)((sizeof(func->unwindCodes) - func->unwindCodeSlot) / sizeof(UNWIND_CODE));
// Prepend the unwindHeader onto the unwind codes
assert(func->unwindCodeSlot >= offsetof(UNWIND_INFO, UnwindCode));
func->unwindCodeSlot -= offsetof(UNWIND_INFO, UnwindCode);
- UNWIND_INFO * pHeader = (UNWIND_INFO*)&func->unwindCodes[func->unwindCodeSlot];
+ UNWIND_INFO* pHeader = (UNWIND_INFO*)&func->unwindCodes[func->unwindCodeSlot];
memcpy(pHeader, &func->unwindHeader, offsetof(UNWIND_INFO, UnwindCode));
unwindCodeBytes = sizeof(func->unwindCodes) - func->unwindCodeSlot;
}
}
- BOOL isFunclet = (func->funKind != FUNC_ROOT);
+ BOOL isFunclet = (func->funKind != FUNC_ROOT);
BOOL isColdCode = isHotCode ? FALSE : TRUE;
eeReserveUnwindInfo(isFunclet, isColdCode, unwindCodeBytes);
}
-
//------------------------------------------------------------------------
// Compiler::unwindEmit: Report all the unwind information to the VM.
//
@@ -849,8 +917,8 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
- DWORD unwindCodeBytes = 0;
- BYTE* pUnwindBlock = nullptr;
+ DWORD unwindCodeBytes = 0;
+ BYTE* pUnwindBlock = nullptr;
if (isHotCode)
{
@@ -879,7 +947,7 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
if (size > 0)
{
unwindCodeBytes = size * sizeof(CFI_CODE);
- pUnwindBlock = (BYTE*)&(*func->cfiCodes)[0];
+ pUnwindBlock = (BYTE*)&(*func->cfiCodes)[0];
}
}
else
@@ -888,8 +956,11 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
unwindCodeBytes = sizeof(func->unwindCodes) - func->unwindCodeSlot;
#ifdef DEBUG
- UNWIND_INFO * pUnwindInfo = (UNWIND_INFO *)(&func->unwindCodes[func->unwindCodeSlot]);
- DWORD unwindCodeBytesSpecified = offsetof(UNWIND_INFO, UnwindCode) + pUnwindInfo->CountOfUnwindCodes * sizeof(UNWIND_CODE); // This is what the unwind codes themselves say; it better match what we tell the VM.
+ UNWIND_INFO* pUnwindInfo = (UNWIND_INFO*)(&func->unwindCodes[func->unwindCodeSlot]);
+ DWORD unwindCodeBytesSpecified =
+ offsetof(UNWIND_INFO, UnwindCode) +
+ pUnwindInfo->CountOfUnwindCodes * sizeof(UNWIND_CODE); // This is what the unwind codes themselves say;
+ // it better match what we tell the VM.
assert(unwindCodeBytes == unwindCodeBytesSpecified);
#endif // DEBUG
@@ -926,12 +997,12 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
#ifdef UNIX_AMD64_ABI
if (generateCFIUnwindCodes())
{
- DumpCfiInfo(isHotCode, startOffset, endOffset, unwindCodeBytes, (const CFI_CODE * const)pUnwindBlock);
+ DumpCfiInfo(isHotCode, startOffset, endOffset, unwindCodeBytes, (const CFI_CODE* const)pUnwindBlock);
}
else
#endif // UNIX_AMD64_ABI
{
- DumpUnwindInfo(isHotCode, startOffset, endOffset, (const UNWIND_INFO * const)pUnwindBlock);
+ DumpUnwindInfo(isHotCode, startOffset, endOffset, (const UNWIND_INFO* const)pUnwindBlock);
}
}
#endif // DEBUG
@@ -950,15 +1021,10 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
{
assert(startOffset >= info.compTotalHotCodeSize);
startOffset -= info.compTotalHotCodeSize;
- endOffset -= info.compTotalHotCodeSize;
+ endOffset -= info.compTotalHotCodeSize;
}
- eeAllocUnwindInfo((BYTE*)pHotCode,
- (BYTE*)pColdCode,
- startOffset,
- endOffset,
- unwindCodeBytes,
- pUnwindBlock,
+ eeAllocUnwindInfo((BYTE*)pHotCode, (BYTE*)pColdCode, startOffset, endOffset, unwindCodeBytes, pUnwindBlock,
(CorJitFuncKind)func->funKind);
}
@@ -975,9 +1041,9 @@ void Compiler::unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pCo
void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode)
{
// Verify that the JIT enum is in sync with the JIT-EE interface enum
- static_assert_no_msg(FUNC_ROOT == (FuncKind)CORJIT_FUNC_ROOT);
- static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
- static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
+ static_assert_no_msg(FUNC_ROOT == (FuncKind)CORJIT_FUNC_ROOT);
+ static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
+ static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
unwindEmitFuncHelper(func, pHotCode, pColdCode, true);
diff --git a/src/jit/unwindarm.cpp b/src/jit/unwindarm.cpp
index 1dcfd06f6b..b537bef4a3 100644
--- a/src/jit/unwindarm.cpp
+++ b/src/jit/unwindarm.cpp
@@ -69,19 +69,19 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16)
// floating point registers cannot be specified in 'maskInt'
assert((maskInt & RBM_ALLFLOAT) == 0);
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
if (useOpsize16)
{
// The 16-bit opcode only encode R0-R7 and LR
- assert((maskInt & ~(RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_LR)) == 0);
+ assert((maskInt & ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_LR)) == 0);
bool shortFormat = false;
BYTE val = 0;
- if ((maskInt & (RBM_R0|RBM_R1|RBM_R2|RBM_R3)) == 0)
+ if ((maskInt & (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3)) == 0)
{
- regMaskTP matchMask = maskInt & (RBM_R4|RBM_R5|RBM_R6|RBM_R7);
+ regMaskTP matchMask = maskInt & (RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7);
regMaskTP valMask = RBM_R4;
while (val < 4)
{
@@ -92,7 +92,7 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16)
}
valMask <<= 1;
- valMask |= RBM_R4;
+ valMask |= RBM_R4;
val++;
}
@@ -104,24 +104,25 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16)
pu->AddCode(0xD0 | ((maskInt >> 12) & 0x4) | val);
}
else
- {
+ {
// EC-ED : pop {r0-r7,lr} (opsize 16)
- pu->AddCode(0xEC | ((maskInt >> 14) & 0x1),
- (BYTE)maskInt);
+ pu->AddCode(0xEC | ((maskInt >> 14) & 0x1), (BYTE)maskInt);
}
}
else
{
- assert((maskInt & ~(RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10|RBM_R11|RBM_R12|RBM_LR)) == 0);
+ assert((maskInt &
+ ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 |
+ RBM_R11 | RBM_R12 | RBM_LR)) == 0);
bool shortFormat = false;
BYTE val = 0;
- if (((maskInt & (RBM_R0|RBM_R1|RBM_R2|RBM_R3)) == 0) &&
- ((maskInt & (RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8)) == (RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8)))
+ if (((maskInt & (RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3)) == 0) &&
+ ((maskInt & (RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8)) == (RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8)))
{
- regMaskTP matchMask = maskInt & (RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10|RBM_R11);
- regMaskTP valMask = RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8;
+ regMaskTP matchMask = maskInt & (RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | RBM_R11);
+ regMaskTP valMask = RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8;
while (val < 4)
{
if (matchMask == valMask)
@@ -131,7 +132,7 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16)
}
valMask <<= 1;
- valMask |= RBM_R4;
+ valMask |= RBM_R4;
val++;
}
@@ -145,8 +146,7 @@ void Compiler::unwindPushPopMaskInt(regMaskTP maskInt, bool useOpsize16)
else
{
// 80-BF : pop {r0-r12,lr} (opsize 32)
- pu->AddCode(0x80 | ((maskInt >> 8) & 0x1F) | ((maskInt >> 9) & 0x20),
- (BYTE)maskInt);
+ pu->AddCode(0x80 | ((maskInt >> 8) & 0x1F) | ((maskInt >> 9) & 0x20), (BYTE)maskInt);
}
}
}
@@ -163,15 +163,15 @@ void Compiler::unwindPushPopMaskFloat(regMaskTP maskFloat)
return;
}
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
BYTE val = 0;
- regMaskTP valMask = (RBM_F16|RBM_F17);
+ regMaskTP valMask = (RBM_F16 | RBM_F17);
while (maskFloat != valMask)
{
valMask <<= 2;
- valMask |= (RBM_F16|RBM_F17);
+ valMask |= (RBM_F16 | RBM_F17);
val++;
@@ -189,9 +189,11 @@ void Compiler::unwindPushPopMaskFloat(regMaskTP maskFloat)
void Compiler::unwindPushMaskInt(regMaskTP maskInt)
{
// Only r0-r12 and lr are supported
- assert((maskInt & ~(RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10|RBM_R11|RBM_R12|RBM_LR)) == 0);
+ assert((maskInt &
+ ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 |
+ RBM_R11 | RBM_R12 | RBM_LR)) == 0);
- bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_LR)) == maskInt); // Can PUSH use the 16-bit encoding?
+ bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_LR)) == maskInt); // Can PUSH use the 16-bit encoding?
unwindPushPopMaskInt(maskInt, useOpsize16);
}
@@ -205,9 +207,11 @@ void Compiler::unwindPushMaskFloat(regMaskTP maskFloat)
void Compiler::unwindPopMaskInt(regMaskTP maskInt)
{
// Only r0-r12 and lr and pc are supported (pc is mapped to lr when encoding)
- assert((maskInt & ~(RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10|RBM_R11|RBM_R12|RBM_LR|RBM_PC)) == 0);
+ assert((maskInt &
+ ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 |
+ RBM_R11 | RBM_R12 | RBM_LR | RBM_PC)) == 0);
- bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_PC)) == maskInt); // Can POP use the 16-bit encoding?
+ bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_PC)) == maskInt); // Can POP use the 16-bit encoding?
// If we are popping PC, then we'll return from the function. In this case, we assume
// the first thing the prolog did was push LR, so give the unwind codes in terms of
@@ -230,7 +234,7 @@ void Compiler::unwindPopMaskFloat(regMaskTP maskFloat)
void Compiler::unwindAllocStack(unsigned size)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
assert(size % 4 == 0);
size /= 4;
@@ -243,8 +247,7 @@ void Compiler::unwindAllocStack(unsigned size)
else if (size <= 0x3FF)
{
// E8-EB : addw sp, sp, #X*4 (opsize 32)
- pu->AddCode(0xE8 | (BYTE)(size >> 8),
- (BYTE)size);
+ pu->AddCode(0xE8 | (BYTE)(size >> 8), (BYTE)size);
}
else if (size <= 0xFFFF)
{
@@ -254,10 +257,10 @@ void Compiler::unwindAllocStack(unsigned size)
// For large stack size, the most significant bits
// are stored first (and next to the opCode (F9)) per the unwind spec.
unsigned instrSizeInBytes = pu->GetInstructionSize();
- BYTE b1 = (instrSizeInBytes == 2) ? 0xF7 : 0xF9;
+ BYTE b1 = (instrSizeInBytes == 2) ? 0xF7 : 0xF9;
pu->AddCode(b1,
- (BYTE)(size >> 8), // msb
- (BYTE)size); // lsb
+ (BYTE)(size >> 8), // msb
+ (BYTE)size); // lsb
}
else
{
@@ -267,17 +270,14 @@ void Compiler::unwindAllocStack(unsigned size)
// For large stack size, the most significant bits
// are stored first (and next to the opCode (FA)) per the unwind spec.
unsigned instrSizeInBytes = pu->GetInstructionSize();
- BYTE b1 = (instrSizeInBytes == 2) ? 0xF8 : 0xFA;
- pu->AddCode(b1,
- (BYTE)(size >> 16),
- (BYTE)(size >> 8),
- (BYTE)size);
+ BYTE b1 = (instrSizeInBytes == 2) ? 0xF8 : 0xFA;
+ pu->AddCode(b1, (BYTE)(size >> 16), (BYTE)(size >> 8), (BYTE)size);
}
}
-void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset)
+void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// Arm unwind info does not allow offset
assert(offset == 0);
@@ -294,16 +294,16 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset)
void Compiler::unwindBranch16()
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
- // TODO-CQ: need to handle changing the exit code from 0xFF to 0xFD. Currently, this will waste an extra 0xFF at the end, automatically added.
+ // TODO-CQ: need to handle changing the exit code from 0xFF to 0xFD. Currently, this will waste an extra 0xFF at the
+ // end, automatically added.
pu->AddCode(0xFD);
}
-
-void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 bytes for Thumb2 instruction
+void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 bytes for Thumb2 instruction
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
#ifdef DEBUG
if (verbose)
@@ -337,11 +337,10 @@ void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or
// for them.
void Compiler::unwindPadding()
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
genEmitter->emitUnwindNopPadding(pu->GetCurrentEmitterLocation(), this);
}
-
// Ask the VM to reserve space for the unwind information for the function and
// all its funclets.
void Compiler::unwindReserve()
@@ -355,7 +354,7 @@ void Compiler::unwindReserve()
void Compiler::unwindReserveFunc(FuncInfoDsc* func)
{
- BOOL isFunclet = (func->funKind == FUNC_ROOT) ? FALSE : TRUE;
+ BOOL isFunclet = (func->funKind == FUNC_ROOT) ? FALSE : TRUE;
bool funcHasColdSection = false;
// If there is cold code, split the unwind data between the hot section and the
@@ -395,7 +394,6 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func)
}
}
-
// unwindEmit: Report all the unwind information to the VM.
// Arguments:
// pHotCode: Pointer to the beginning of the memory with the function and funclet hot code
@@ -413,9 +411,9 @@ void Compiler::unwindEmit(void* pHotCode, void* pColdCode)
void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode)
{
// Verify that the JIT enum is in sync with the JIT-EE interface enum
- static_assert_no_msg(FUNC_ROOT == (FuncKind)CORJIT_FUNC_ROOT);
- static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
- static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
+ static_assert_no_msg(FUNC_ROOT == (FuncKind)CORJIT_FUNC_ROOT);
+ static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
+ static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
func->uwi.Allocate((CorJitFuncKind)func->funKind, pHotCode, pColdCode, true);
@@ -425,7 +423,6 @@ void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode
}
}
-
#if defined(_TARGET_ARM_)
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -444,52 +441,54 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
unsigned GetOpcodeSizeFromUnwindHeader(BYTE b1)
{
- static BYTE s_UnwindOpsize[256] = { // array of opsizes, in bytes (as specified in the ARM unwind specification)
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 00-0F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 10-1F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 20-2F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 30-3F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 40-4F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 50-5F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 60-6F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 70-7F
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 80-8F
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 90-9F
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // A0-AF
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // B0-BF
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0-CF
- 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, // D0-DF
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 4, // E0-EF
- 0, 0, 0, 0, 0, 4, 4, 2, 2, 4, 4, 2, 4, 2, 4, 0 // F0-FF
+ static BYTE s_UnwindOpsize[256] = {
+ // array of opsizes, in bytes (as specified in the ARM unwind specification)
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 00-0F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 10-1F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 20-2F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 30-3F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 40-4F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 50-5F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 60-6F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 70-7F
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 80-8F
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 90-9F
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // A0-AF
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // B0-BF
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0-CF
+ 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, // D0-DF
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 4, // E0-EF
+ 0, 0, 0, 0, 0, 4, 4, 2, 2, 4, 4, 2, 4, 2, 4, 0 // F0-FF
};
BYTE opsize = s_UnwindOpsize[b1];
- assert(opsize == 2 || opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled specially)
+ assert(opsize == 2 ||
+ opsize == 4); // We shouldn't get a code with no opsize (the 0xFF end code is handled specially)
return opsize;
}
-
// Return the size of the unwind code (from 1 to 4 bytes), given the first byte of the unwind bytes
unsigned GetUnwindSizeFromUnwindHeader(BYTE b1)
{
- static BYTE s_UnwindSize[256] = { // array of unwind sizes, in bytes (as specified in the ARM unwind specification)
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 00-0F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10-1F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20-2F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30-3F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40-4F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 50-5F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60-6F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 70-7F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 80-8F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 90-9F
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // A0-AF
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // B0-BF
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C0-CF
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D0-DF
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, // E0-EF
- 1, 1, 1, 1, 1, 2, 2, 3, 4, 3, 4, 1, 1, 1, 1, 1 // F0-FF
+ static BYTE s_UnwindSize[256] = {
+ // array of unwind sizes, in bytes (as specified in the ARM unwind specification)
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 00-0F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10-1F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20-2F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30-3F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40-4F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 50-5F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60-6F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 70-7F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 80-8F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 90-9F
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // A0-AF
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // B0-BF
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // C0-CF
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // D0-DF
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, // E0-EF
+ 1, 1, 1, 1, 1, 2, 2, 3, 4, 3, 4, 1, 1, 1, 1, 1 // F0-FF
};
unsigned size = s_UnwindSize[b1];
@@ -499,7 +498,6 @@ unsigned GetUnwindSizeFromUnwindHeader(BYTE b1)
#endif // DEBUG
-
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
@@ -521,11 +519,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// The 0xFD and 0xFE "end + NOP" codes need to be handled differently between
// the prolog and epilog. They count as pure "end" codes in a prolog, but they
// count as 16 and 32 bit NOPs (respectively), as well as an "end", in an epilog.
-unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
+unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
{
- BYTE* pCodesStart = GetCodes();
- BYTE* pCodes = pCodesStart;
- unsigned size = 0;
+ BYTE* pCodesStart = GetCodes();
+ BYTE* pCodes = pCodesStart;
+ unsigned size = 0;
for (;;)
{
BYTE b1 = *pCodes;
@@ -539,9 +537,9 @@ unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
size += GetOpcodeSizeFromUnwindHeader(b1);
}
- break; // We hit an "end" code; we're done
+ break; // We hit an "end" code; we're done
}
- size += GetOpcodeSizeFromUnwindHeader(b1);
+ size += GetOpcodeSizeFromUnwindHeader(b1);
pCodes += GetUnwindSizeFromUnwindHeader(b1);
assert(pCodes - pCodesStart < 256); // 255 is the absolute maximum number of code bytes allowed
}
@@ -552,7 +550,6 @@ unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
#endif // defined(_TARGET_ARM_)
-
///////////////////////////////////////////////////////////////////////////////
//
// UnwindPrologCodes
@@ -566,7 +563,7 @@ unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
// is updated when a header byte is added), and remember the index that points
// to the beginning of the header.
-void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes)
+void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes)
{
#ifdef DEBUG
// We're done adding codes. Check that we didn't accidentally create a bigger prolog.
@@ -576,9 +573,9 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes
int prologBytes = Size();
- EnsureSize(headerBytes + prologBytes + epilogBytes + 3); // 3 = padding bytes for alignment
+ EnsureSize(headerBytes + prologBytes + epilogBytes + 3); // 3 = padding bytes for alignment
- upcUnwindBlockSlot = upcCodeSlot - headerBytes - epilogBytes; // Index of the first byte of the unwind header
+ upcUnwindBlockSlot = upcCodeSlot - headerBytes - epilogBytes; // Index of the first byte of the unwind header
assert(upcMemSize == upcUnwindBlockSlot + headerBytes + prologBytes + epilogBytes + 3);
@@ -590,8 +587,8 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes
// The prolog codes that are already at the end of the array need to get moved to the middle,
// with space for the non-matching epilog codes to follow.
-
- memmove_s(&upcMem[upcUnwindBlockSlot + headerBytes], upcMemSize - (upcUnwindBlockSlot + headerBytes), &upcMem[upcCodeSlot], prologBytes);
+ memmove_s(&upcMem[upcUnwindBlockSlot + headerBytes], upcMemSize - (upcUnwindBlockSlot + headerBytes),
+ &upcMem[upcCodeSlot], prologBytes);
// Note that the three UWC_END padding bytes still exist at the end of the array.
CLANG_FORMAT_COMMENT_ANCHOR;
@@ -601,7 +598,8 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes
memset(&upcMem[upcUnwindBlockSlot + headerBytes + prologBytes], 0, epilogBytes);
#endif // DEBUG
- upcEpilogSlot = upcUnwindBlockSlot + headerBytes + prologBytes; // upcEpilogSlot points to the next epilog location to fill
+ upcEpilogSlot =
+ upcUnwindBlockSlot + headerBytes + prologBytes; // upcEpilogSlot points to the next epilog location to fill
// Update upcCodeSlot to point at the new beginning of the prolog codes
upcCodeSlot = upcUnwindBlockSlot + headerBytes;
@@ -610,10 +608,10 @@ void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes
// Add a header word. Header words are added starting at the beginning, in order: first to last.
// This is in contrast to the prolog unwind codes, which are added in reverse order.
-void UnwindPrologCodes::AddHeaderWord(DWORD d)
+void UnwindPrologCodes::AddHeaderWord(DWORD d)
{
assert(-1 <= upcHeaderSlot);
- assert(upcHeaderSlot + 4 < upcCodeSlot); // Don't collide with the unwind codes that are already there!
+ assert(upcHeaderSlot + 4 < upcCodeSlot); // Don't collide with the unwind codes that are already there!
// Store it byte-by-byte in little-endian format. We've already ensured there is enough space
// in SetFinalSize().
@@ -624,25 +622,26 @@ void UnwindPrologCodes::AddHeaderWord(DWORD d)
}
// AppendEpilog: copy the epilog bytes to the next epilog bytes slot
-void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi)
+void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi)
{
assert(upcEpilogSlot != -1);
int epiSize = pEpi->Size();
- memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(), epiSize); // -3 to avoid writing to the alignment padding
- assert(pEpi->GetStartIndex() == upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it.
+ memcpy_s(&upcMem[upcEpilogSlot], upcMemSize - upcEpilogSlot - 3, pEpi->GetCodes(),
+ epiSize); // -3 to avoid writing to the alignment padding
+ assert(pEpi->GetStartIndex() ==
+ upcEpilogSlot - upcCodeSlot); // Make sure we copied it where we expected to copy it.
upcEpilogSlot += epiSize;
assert(upcEpilogSlot <= upcMemSize - 3);
}
-
// GetFinalInfo: return a pointer to the final unwind info to hand to the VM, and the size of this info in bytes
-void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize)
+void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize)
{
- assert(upcHeaderSlot + 1 == upcCodeSlot); // We better have filled in the header before asking for the final data!
+ assert(upcHeaderSlot + 1 == upcCodeSlot); // We better have filled in the header before asking for the final data!
- *ppUnwindBlock = &upcMem[upcUnwindBlockSlot];
+ *ppUnwindBlock = &upcMem[upcUnwindBlockSlot];
// We put 4 'end' codes at the end for padding, so we can ensure we have an
// unwind block that is a multiple of 4 bytes in size. Subtract off three 'end'
@@ -650,7 +649,6 @@ void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock,
*pUnwindBlockSize = AlignUp((UINT)(upcMemSize - upcUnwindBlockSlot - 3), sizeof(DWORD));
}
-
// Do the argument unwind codes match our unwind codes?
// If they don't match, return -1. If they do, return the offset into
// our codes at which they match. Note that this means that the
@@ -664,7 +662,7 @@ void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock,
// an existing 0xFF code to one of those, we might do that here.
#endif // defined(_TARGET_ARM_)
-int UnwindPrologCodes::Match(UnwindEpilogInfo* pEpi)
+int UnwindPrologCodes::Match(UnwindEpilogInfo* pEpi)
{
if (Size() < pEpi->Size())
{
@@ -681,16 +679,15 @@ int UnwindPrologCodes::Match(UnwindEpilogInfo* pEpi)
return -1;
}
-
// Copy the prolog codes from another prolog. The only time this is legal is
// if we are at the initial state and no prolog codes have been added.
// This is used to create the 'phantom' prolog for non-first fragments.
-void UnwindPrologCodes::CopyFrom(UnwindPrologCodes* pCopyFrom)
+void UnwindPrologCodes::CopyFrom(UnwindPrologCodes* pCopyFrom)
{
- assert(uwiComp == pCopyFrom->uwiComp);
- assert(upcMem == upcMemLocal);
- assert(upcMemSize == UPC_LOCAL_COUNT);
+ assert(uwiComp == pCopyFrom->uwiComp);
+ assert(upcMem == upcMemLocal);
+ assert(upcMemSize == UPC_LOCAL_COUNT);
assert(upcHeaderSlot == -1);
assert(upcEpilogSlot == -1);
@@ -706,8 +703,7 @@ void UnwindPrologCodes::CopyFrom(UnwindPrologCodes* pCopyFrom)
upcUnwindBlockSlot = pCopyFrom->upcUnwindBlockSlot;
}
-
-void UnwindPrologCodes::EnsureSize(int requiredSize)
+void UnwindPrologCodes::EnsureSize(int requiredSize)
{
if (requiredSize > upcMemSize)
{
@@ -721,31 +717,31 @@ void UnwindPrologCodes::EnsureSize(int requiredSize)
// do nothing
}
- BYTE * newUnwindCodes = new (uwiComp, CMK_UnwindInfo) BYTE[newSize];
- memcpy_s(newUnwindCodes + newSize - upcMemSize, upcMemSize, upcMem, upcMemSize); // copy the existing data to the end
+ BYTE* newUnwindCodes = new (uwiComp, CMK_UnwindInfo) BYTE[newSize];
+ memcpy_s(newUnwindCodes + newSize - upcMemSize, upcMemSize, upcMem,
+ upcMemSize); // copy the existing data to the end
#ifdef DEBUG
// Clear the old unwind codes; nobody should be looking at them
memset(upcMem, 0xFF, upcMemSize);
-#endif // DEBUG
+#endif // DEBUG
upcMem = newUnwindCodes; // we don't free anything that used to be there since we have a no-release allocator
upcCodeSlot += newSize - upcMemSize;
upcMemSize = newSize;
}
}
-
#ifdef DEBUG
-void UnwindPrologCodes::Dump(int indent)
+void UnwindPrologCodes::Dump(int indent)
{
printf("%*sUnwindPrologCodes @0x%08p, size:%d:\n", indent, "", dspPtr(this), sizeof(*this));
- printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
+ printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
printf("%*s &upcMemLocal[0]: 0x%08p\n", indent, "", dspPtr(&upcMemLocal[0]));
- printf("%*s upcMem: 0x%08p\n", indent, "", dspPtr(upcMem));
- printf("%*s upcMemSize: %d\n", indent, "", upcMemSize);
- printf("%*s upcCodeSlot: %d\n", indent, "", upcCodeSlot);
- printf("%*s upcHeaderSlot: %d\n", indent, "", upcHeaderSlot);
- printf("%*s upcEpilogSlot: %d\n", indent, "", upcEpilogSlot);
- printf("%*s upcUnwindBlockSlot: %d\n", indent, "", upcUnwindBlockSlot);
+ printf("%*s upcMem: 0x%08p\n", indent, "", dspPtr(upcMem));
+ printf("%*s upcMemSize: %d\n", indent, "", upcMemSize);
+ printf("%*s upcCodeSlot: %d\n", indent, "", upcCodeSlot);
+ printf("%*s upcHeaderSlot: %d\n", indent, "", upcHeaderSlot);
+ printf("%*s upcEpilogSlot: %d\n", indent, "", upcEpilogSlot);
+ printf("%*s upcUnwindBlockSlot: %d\n", indent, "", upcUnwindBlockSlot);
if (upcMemSize > 0)
{
@@ -767,14 +763,13 @@ void UnwindPrologCodes::Dump(int indent)
}
#endif // DEBUG
-
///////////////////////////////////////////////////////////////////////////////
//
// UnwindEpilogCodes
//
///////////////////////////////////////////////////////////////////////////////
-void UnwindEpilogCodes::EnsureSize(int requiredSize)
+void UnwindEpilogCodes::EnsureSize(int requiredSize)
{
if (requiredSize > uecMemSize)
{
@@ -788,29 +783,28 @@ void UnwindEpilogCodes::EnsureSize(int requiredSize)
// do nothing
}
- BYTE * newUnwindCodes = new (uwiComp, CMK_UnwindInfo) BYTE[newSize];
+ BYTE* newUnwindCodes = new (uwiComp, CMK_UnwindInfo) BYTE[newSize];
memcpy_s(newUnwindCodes, newSize, uecMem, uecMemSize);
#ifdef DEBUG
// Clear the old unwind codes; nobody should be looking at them
memset(uecMem, 0xFF, uecMemSize);
-#endif // DEBUG
+#endif // DEBUG
uecMem = newUnwindCodes; // we don't free anything that used to be there since we have a no-release allocator
// uecCodeSlot stays the same
uecMemSize = newSize;
}
}
-
#ifdef DEBUG
-void UnwindEpilogCodes::Dump(int indent)
+void UnwindEpilogCodes::Dump(int indent)
{
printf("%*sUnwindEpilogCodes @0x%08p, size:%d:\n", indent, "", dspPtr(this), sizeof(*this));
- printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
+ printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
printf("%*s &uecMemLocal[0]: 0x%08p\n", indent, "", dspPtr(&uecMemLocal[0]));
- printf("%*s uecMem: 0x%08p\n", indent, "", dspPtr(uecMem));
- printf("%*s uecMemSize: %d\n", indent, "", uecMemSize);
- printf("%*s uecCodeSlot: %d\n", indent, "", uecCodeSlot);
- printf("%*s uecFinalized: %s\n", indent, "", dspBool(uecFinalized));
+ printf("%*s uecMem: 0x%08p\n", indent, "", dspPtr(uecMem));
+ printf("%*s uecMemSize: %d\n", indent, "", uecMemSize);
+ printf("%*s uecCodeSlot: %d\n", indent, "", uecCodeSlot);
+ printf("%*s uecFinalized: %s\n", indent, "", dspBool(uecFinalized));
if (uecMemSize > 0)
{
@@ -819,14 +813,13 @@ void UnwindEpilogCodes::Dump(int indent)
{
printf(" %02x", uecMem[i]);
if (i == uecCodeSlot)
- printf(" <-C"); // Indicate the current pointer
+ printf(" <-C"); // Indicate the current pointer
}
printf("\n");
}
}
#endif // DEBUG
-
///////////////////////////////////////////////////////////////////////////////
//
// UnwindEpilogInfo
@@ -842,7 +835,7 @@ void UnwindEpilogCodes::Dump(int indent)
// Note that if we wanted to handle 0xFD and 0xFE codes, by converting
// an existing 0xFF code to one of those, we might do that here.
-int UnwindEpilogInfo::Match(UnwindEpilogInfo* pEpi)
+int UnwindEpilogInfo::Match(UnwindEpilogInfo* pEpi)
{
if (Matches())
{
@@ -865,37 +858,33 @@ int UnwindEpilogInfo::Match(UnwindEpilogInfo* pEpi)
return -1;
}
-
-void UnwindEpilogInfo::CaptureEmitLocation()
+void UnwindEpilogInfo::CaptureEmitLocation()
{
- noway_assert(epiEmitLocation == NULL); // This function is only called once per epilog
+ noway_assert(epiEmitLocation == NULL); // This function is only called once per epilog
epiEmitLocation = new (uwiComp, CMK_UnwindInfo) emitLocation();
epiEmitLocation->CaptureLocation(uwiComp->genEmitter);
}
-
-void UnwindEpilogInfo::FinalizeOffset()
+void UnwindEpilogInfo::FinalizeOffset()
{
epiStartOffset = epiEmitLocation->CodeOffset(uwiComp->genEmitter);
}
-
#ifdef DEBUG
-void UnwindEpilogInfo::Dump(int indent)
+void UnwindEpilogInfo::Dump(int indent)
{
printf("%*sUnwindEpilogInfo @0x%08p, size:%d:\n", indent, "", dspPtr(this), sizeof(*this));
- printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
- printf("%*s epiNext: 0x%08p\n", indent, "", dspPtr(epiNext));
+ printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
+ printf("%*s epiNext: 0x%08p\n", indent, "", dspPtr(epiNext));
printf("%*s epiEmitLocation: 0x%08p\n", indent, "", dspPtr(epiEmitLocation));
- printf("%*s epiStartOffset: 0x%x\n", indent, "", epiStartOffset);
- printf("%*s epiMatches: %s\n", indent, "", dspBool(epiMatches));
- printf("%*s epiStartIndex: %d\n", indent, "", epiStartIndex);
+ printf("%*s epiStartOffset: 0x%x\n", indent, "", epiStartOffset);
+ printf("%*s epiMatches: %s\n", indent, "", dspBool(epiMatches));
+ printf("%*s epiStartIndex: %d\n", indent, "", epiStartIndex);
epiCodes.Dump(indent + 2);
}
#endif // DEBUG
-
///////////////////////////////////////////////////////////////////////////////
//
// UnwindFragmentInfo
@@ -903,27 +892,26 @@ void UnwindEpilogInfo::Dump(int indent)
///////////////////////////////////////////////////////////////////////////////
UnwindFragmentInfo::UnwindFragmentInfo(Compiler* comp, emitLocation* emitLoc, bool hasPhantomProlog)
- :
- UnwindBase(comp),
- ufiNext(NULL),
- ufiEmitLoc(emitLoc),
- ufiHasPhantomProlog(hasPhantomProlog),
- ufiPrologCodes(comp),
- ufiEpilogFirst(comp),
- ufiEpilogList(NULL),
- ufiEpilogLast(NULL),
- ufiCurCodes(&ufiPrologCodes),
- ufiSize(0),
- ufiStartOffset(UFI_ILLEGAL_OFFSET)
+ : UnwindBase(comp)
+ , ufiNext(NULL)
+ , ufiEmitLoc(emitLoc)
+ , ufiHasPhantomProlog(hasPhantomProlog)
+ , ufiPrologCodes(comp)
+ , ufiEpilogFirst(comp)
+ , ufiEpilogList(NULL)
+ , ufiEpilogLast(NULL)
+ , ufiCurCodes(&ufiPrologCodes)
+ , ufiSize(0)
+ , ufiStartOffset(UFI_ILLEGAL_OFFSET)
{
#ifdef DEBUG
- ufiNum = 1;
- ufiInProlog = true;
+ ufiNum = 1;
+ ufiInProlog = true;
ufiInitialized = UFI_INITIALIZED_PATTERN;
#endif // DEBUG
}
-void UnwindFragmentInfo::FinalizeOffset()
+void UnwindFragmentInfo::FinalizeOffset()
{
if (ufiEmitLoc == NULL)
{
@@ -941,7 +929,7 @@ void UnwindFragmentInfo::FinalizeOffset()
}
}
-void UnwindFragmentInfo::AddEpilog()
+void UnwindFragmentInfo::AddEpilog()
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
@@ -991,12 +979,11 @@ void UnwindFragmentInfo::AddEpilog()
ufiCurCodes = &newepi->epiCodes;
}
-
// Copy the prolog codes from the 'pCopyFrom' fragment. These prolog codes will
// become 'phantom' prolog codes in this fragment. Note that this fragment should
// not have any prolog codes currently; it is at the initial state.
-void UnwindFragmentInfo::CopyPrologCodes(UnwindFragmentInfo* pCopyFrom)
+void UnwindFragmentInfo::CopyPrologCodes(UnwindFragmentInfo* pCopyFrom)
{
ufiPrologCodes.CopyFrom(&pCopyFrom->ufiPrologCodes);
#ifdef _TARGET_ARM64_
@@ -1009,18 +996,16 @@ void UnwindFragmentInfo::CopyPrologCodes(UnwindFragmentInfo* pCopyFro
// from 'pSplitFrom' and moved to this fragment. Note that this fragment should not have
// any epilog codes currently; it is at the initial state.
-void UnwindFragmentInfo::SplitEpilogCodes(emitLocation* emitLoc, UnwindFragmentInfo* pSplitFrom)
+void UnwindFragmentInfo::SplitEpilogCodes(emitLocation* emitLoc, UnwindFragmentInfo* pSplitFrom)
{
UnwindEpilogInfo* pEpiPrev;
UnwindEpilogInfo* pEpi;
UNATIVE_OFFSET splitOffset = emitLoc->CodeOffset(uwiComp->genEmitter);
- for (pEpiPrev = NULL, pEpi = pSplitFrom->ufiEpilogList;
- pEpi != NULL;
- pEpiPrev = pEpi, pEpi = pEpi->epiNext)
+ for (pEpiPrev = NULL, pEpi = pSplitFrom->ufiEpilogList; pEpi != NULL; pEpiPrev = pEpi, pEpi = pEpi->epiNext)
{
- pEpi->FinalizeOffset(); // Get the offset of the epilog from the emitter so we can compare it
+ pEpi->FinalizeOffset(); // Get the offset of the epilog from the emitter so we can compare it
if (pEpi->GetStartOffset() >= splitOffset)
{
// This epilog and all following epilogs, which must be in order of increasing offsets,
@@ -1028,7 +1013,7 @@ void UnwindFragmentInfo::SplitEpilogCodes(emitLocation* emitLoc, Unwi
// Splice in the epilogs to this fragment. Set the head of the epilog
// list to this epilog.
- ufiEpilogList = pEpi; // In this case, don't use 'ufiEpilogFirst'
+ ufiEpilogList = pEpi; // In this case, don't use 'ufiEpilogFirst'
ufiEpilogLast = pSplitFrom->ufiEpilogLast;
// Splice out the tail of the list from the 'pSplitFrom' epilog list
@@ -1044,39 +1029,37 @@ void UnwindFragmentInfo::SplitEpilogCodes(emitLocation* emitLoc, Unwi
// No more codes should be added once we start splitting
pSplitFrom->ufiCurCodes = NULL;
- ufiCurCodes = NULL;
+ ufiCurCodes = NULL;
break;
}
}
}
-
// Is this epilog at the end of an unwind fragment? Ask the emitter.
// Note that we need to know this before all code offsets are finalized,
// so we can determine whether we can omit an epilog scope word for a
// single matching epilog.
-bool UnwindFragmentInfo::IsAtFragmentEnd(UnwindEpilogInfo* pEpi)
+bool UnwindFragmentInfo::IsAtFragmentEnd(UnwindEpilogInfo* pEpi)
{
return uwiComp->genEmitter->emitIsFuncEnd(pEpi->epiEmitLocation, (ufiNext == NULL) ? NULL : ufiNext->ufiEmitLoc);
}
-
// Merge the unwind codes as much as possible.
// This function is called before all offsets are final.
// Also, compute the size of the final unwind block. Store this
// and some other data for later, when we actually emit the
// unwind block.
-void UnwindFragmentInfo::MergeCodes()
+void UnwindFragmentInfo::MergeCodes()
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
- unsigned epilogCount = 0;
- unsigned epilogCodeBytes = 0; // The total number of unwind code bytes used by epilogs that don't match the
- // prolog codes
- unsigned epilogIndex = ufiPrologCodes.Size(); // The "Epilog Start Index" for the next non-matching epilog codes
+ unsigned epilogCount = 0;
+ unsigned epilogCodeBytes = 0; // The total number of unwind code bytes used by epilogs that don't match the
+ // prolog codes
+ unsigned epilogIndex = ufiPrologCodes.Size(); // The "Epilog Start Index" for the next non-matching epilog codes
UnwindEpilogInfo* pEpi;
for (pEpi = ufiEpilogList; pEpi != NULL; pEpi = pEpi->epiNext)
@@ -1086,7 +1069,8 @@ void UnwindFragmentInfo::MergeCodes()
pEpi->FinalizeCodes();
// Does this epilog match the prolog?
- // NOTE: for the purpose of matching, we don't handle the 0xFD and 0xFE end codes that allow slightly unequal prolog and epilog codes.
+ // NOTE: for the purpose of matching, we don't handle the 0xFD and 0xFE end codes that allow slightly unequal
+ // prolog and epilog codes.
int matchIndex;
@@ -1094,7 +1078,7 @@ void UnwindFragmentInfo::MergeCodes()
if (matchIndex != -1)
{
pEpi->SetMatches();
- pEpi->SetStartIndex(matchIndex); // Prolog codes start at zero, so matchIndex is exactly the start index
+ pEpi->SetStartIndex(matchIndex); // Prolog codes start at zero, so matchIndex is exactly the start index
}
else
{
@@ -1109,7 +1093,8 @@ void UnwindFragmentInfo::MergeCodes()
{
// Use the same epilog index as the one we matched, as it has already been set.
pEpi->SetMatches();
- pEpi->SetStartIndex(pEpi2->GetStartIndex() + matchIndex); // We might match somewhere inside pEpi2's codes, in which case matchIndex > 0
+ pEpi->SetStartIndex(pEpi2->GetStartIndex() + matchIndex); // We might match somewhere inside pEpi2's
+ // codes, in which case matchIndex > 0
matched = true;
break;
}
@@ -1117,52 +1102,51 @@ void UnwindFragmentInfo::MergeCodes()
if (!matched)
{
- pEpi->SetStartIndex(epilogIndex); // We'll copy these codes to the next available location
+ pEpi->SetStartIndex(epilogIndex); // We'll copy these codes to the next available location
epilogCodeBytes += pEpi->Size();
- epilogIndex += pEpi->Size();
+ epilogIndex += pEpi->Size();
}
}
}
DWORD codeBytes = ufiPrologCodes.Size() + epilogCodeBytes;
- codeBytes = AlignUp(codeBytes, sizeof(DWORD));
+ codeBytes = AlignUp(codeBytes, sizeof(DWORD));
- DWORD codeWords = codeBytes / sizeof(DWORD); // This is how many words we need to store all the unwind codes in the unwind block
+ DWORD codeWords =
+ codeBytes / sizeof(DWORD); // This is how many words we need to store all the unwind codes in the unwind block
// Do we need the 2nd header word for "Extended Code Words" or "Extended Epilog Count"?
- bool needExtendedCodeWordsEpilogCount = (codeWords > UW_MAX_CODE_WORDS_COUNT) || (epilogCount > UW_MAX_EPILOG_COUNT);
+ bool needExtendedCodeWordsEpilogCount =
+ (codeWords > UW_MAX_CODE_WORDS_COUNT) || (epilogCount > UW_MAX_EPILOG_COUNT);
// How many epilog scope words do we need?
- bool setEBit = false; // do we need to set the E bit?
- unsigned epilogScopes = epilogCount; // Note that this could be zero if we have no epilogs!
+ bool setEBit = false; // do we need to set the E bit?
+ unsigned epilogScopes = epilogCount; // Note that this could be zero if we have no epilogs!
if (epilogCount == 1)
{
assert(ufiEpilogList != NULL);
assert(ufiEpilogList->epiNext == NULL);
- if (ufiEpilogList->Matches() &&
- (ufiEpilogList->GetStartIndex() == 0) && // The match is with the prolog
- !needExtendedCodeWordsEpilogCount &&
- IsAtFragmentEnd(ufiEpilogList))
+ if (ufiEpilogList->Matches() && (ufiEpilogList->GetStartIndex() == 0) && // The match is with the prolog
+ !needExtendedCodeWordsEpilogCount && IsAtFragmentEnd(ufiEpilogList))
{
epilogScopes = 0; // Don't need any epilog scope words
- setEBit = true;
+ setEBit = true;
}
}
- DWORD headerBytes = (
- 1 // Always need first header DWORD
- + (needExtendedCodeWordsEpilogCount ? 1 : 0) // Do we need the 2nd DWORD for Extended Code Words or Extended Epilog Count?
- + epilogScopes // One DWORD per epilog scope, for EBit = 0
- ) * sizeof(DWORD); // convert it to bytes
+ DWORD headerBytes = (1 // Always need first header DWORD
+ + (needExtendedCodeWordsEpilogCount ? 1 : 0) // Do we need the 2nd DWORD for Extended Code
+ // Words or Extended Epilog Count?
+ + epilogScopes // One DWORD per epilog scope, for EBit = 0
+ ) *
+ sizeof(DWORD); // convert it to bytes
- DWORD finalSize =
- headerBytes
- + codeBytes; // Size of actual unwind codes, aligned up to 4-byte words,
- // including end padding if necessary
+ DWORD finalSize = headerBytes + codeBytes; // Size of actual unwind codes, aligned up to 4-byte words,
+ // including end padding if necessary
// Construct the final unwind information.
@@ -1191,29 +1175,28 @@ void UnwindFragmentInfo::MergeCodes()
// Save some data for later
- ufiSize = finalSize;
- ufiSetEBit = setEBit;
+ ufiSize = finalSize;
+ ufiSetEBit = setEBit;
ufiNeedExtendedCodeWordsEpilogCount = needExtendedCodeWordsEpilogCount;
- ufiCodeWords = codeWords;
- ufiEpilogScopes = epilogScopes;
+ ufiCodeWords = codeWords;
+ ufiEpilogScopes = epilogScopes;
}
-
// Finalize: Prepare the unwind information for the VM. Compute and prepend the unwind header.
-void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
+void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
{
assert(ufiInitialized == UFI_INITIALIZED_PATTERN);
#ifdef DEBUG
- if (0&&uwiComp->verbose)
+ if (0 && uwiComp->verbose)
{
printf("*************** Before fragment #%d finalize\n", ufiNum);
Dump();
}
#endif
- // Compute the header
+// Compute the header
#if defined(_TARGET_ARM_)
noway_assert((functionLength & 1) == 0);
@@ -1223,20 +1206,22 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
DWORD headerFunctionLength = functionLength / 4;
#endif // _TARGET_ARM64_
- DWORD headerVers = 0; // Version of the unwind info is zero. No other version number is currently defined.
- DWORD headerXBit = 0; // We never generate "exception data", but the VM might add some.
+ DWORD headerVers = 0; // Version of the unwind info is zero. No other version number is currently defined.
+ DWORD headerXBit = 0; // We never generate "exception data", but the VM might add some.
DWORD headerEBit;
#if defined(_TARGET_ARM_)
- DWORD headerFBit = ufiHasPhantomProlog ? 1 : 0; // Is this data a fragment in the sense of the unwind data specification? That is, do the prolog codes represent a real prolog or not?
-#endif // defined(_TARGET_ARM_)
- DWORD headerEpilogCount; // This depends on how we set headerEBit.
+ DWORD headerFBit = ufiHasPhantomProlog ? 1 : 0; // Is this data a fragment in the sense of the unwind data
+ // specification? That is, do the prolog codes represent a real
+ // prolog or not?
+#endif // defined(_TARGET_ARM_)
+ DWORD headerEpilogCount; // This depends on how we set headerEBit.
DWORD headerCodeWords;
- DWORD headerExtendedEpilogCount = 0; // This depends on how we set headerEBit.
+ DWORD headerExtendedEpilogCount = 0; // This depends on how we set headerEBit.
DWORD headerExtendedCodeWords = 0;
if (ufiSetEBit)
{
- headerEBit = 1;
+ headerEBit = 1;
headerEpilogCount = ufiEpilogList->GetStartIndex(); // probably zero -- the start of the prolog codes!
headerCodeWords = ufiCodeWords;
}
@@ -1246,8 +1231,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
if (ufiNeedExtendedCodeWordsEpilogCount)
{
- headerEpilogCount = 0;
- headerCodeWords = 0;
+ headerEpilogCount = 0;
+ headerCodeWords = 0;
headerExtendedEpilogCount = ufiEpilogScopes;
headerExtendedCodeWords = ufiCodeWords;
}
@@ -1260,31 +1245,20 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
// Start writing the header
- noway_assert(headerFunctionLength <= 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error
+ noway_assert(headerFunctionLength <=
+ 0x3FFFFU); // We create fragments to prevent this from firing, so if it hits, we have an internal error
- if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) ||
- (headerCodeWords > UW_MAX_CODE_WORDS_COUNT))
+ if ((headerEpilogCount > UW_MAX_EPILOG_COUNT) || (headerCodeWords > UW_MAX_CODE_WORDS_COUNT))
{
IMPL_LIMITATION("unwind data too large");
}
#if defined(_TARGET_ARM_)
- DWORD header =
- headerFunctionLength |
- (headerVers << 18) |
- (headerXBit << 20) |
- (headerEBit << 21) |
- (headerFBit << 22) |
- (headerEpilogCount << 23) |
- (headerCodeWords << 28) ;
+ DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) |
+ (headerFBit << 22) | (headerEpilogCount << 23) | (headerCodeWords << 28);
#elif defined(_TARGET_ARM64_)
- DWORD header =
- headerFunctionLength |
- (headerVers << 18) |
- (headerXBit << 20) |
- (headerEBit << 21) |
- (headerEpilogCount << 22) |
- (headerCodeWords << 27) ;
+ DWORD header = headerFunctionLength | (headerVers << 18) | (headerXBit << 20) | (headerEBit << 21) |
+ (headerEpilogCount << 22) | (headerCodeWords << 27);
#endif // defined(_TARGET_ARM64_)
ufiPrologCodes.AddHeaderWord(header);
@@ -1296,17 +1270,16 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
noway_assert(headerEBit == 0);
noway_assert(headerEpilogCount == 0);
noway_assert(headerCodeWords == 0);
- noway_assert((headerExtendedEpilogCount > UW_MAX_EPILOG_COUNT) || (headerExtendedCodeWords > UW_MAX_CODE_WORDS_COUNT));
+ noway_assert((headerExtendedEpilogCount > UW_MAX_EPILOG_COUNT) ||
+ (headerExtendedCodeWords > UW_MAX_CODE_WORDS_COUNT));
- if ((headerExtendedEpilogCount > UW_MAX_EXTENDED_EPILOG_COUNT) ||
+ if ((headerExtendedEpilogCount > UW_MAX_EXTENDED_EPILOG_COUNT) ||
(headerExtendedCodeWords > UW_MAX_EXTENDED_CODE_WORDS_COUNT))
{
IMPL_LIMITATION("unwind data too large");
}
- DWORD header2 =
- headerExtendedEpilogCount |
- (headerExtendedCodeWords << 16) ;
+ DWORD header2 = headerExtendedEpilogCount | (headerExtendedCodeWords << 16);
ufiPrologCodes.AddHeaderWord(header2);
}
@@ -1318,8 +1291,8 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
for (UnwindEpilogInfo* pEpi = ufiEpilogList; pEpi != NULL; pEpi = pEpi->epiNext)
{
#if defined(_TARGET_ARM_)
- DWORD headerCondition = 0xE; // The epilog is unconditional. We don't have epilogs under the IT instruction.
-#endif // defined(_TARGET_ARM_)
+ DWORD headerCondition = 0xE; // The epilog is unconditional. We don't have epilogs under the IT instruction.
+#endif // defined(_TARGET_ARM_)
// The epilog must strictly follow the prolog. The prolog is in the first fragment of
// the hot section. If this epilog is at the start of a fragment, it can't be the
@@ -1334,29 +1307,26 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
#if defined(_TARGET_ARM_)
noway_assert((headerEpilogStartOffset & 1) == 0);
- headerEpilogStartOffset /= 2; // The unwind data stores the actual offset divided by 2 (since the low bit of the actual offset is always zero)
+ headerEpilogStartOffset /= 2; // The unwind data stores the actual offset divided by 2 (since the low bit of
+ // the actual offset is always zero)
#elif defined(_TARGET_ARM64_)
noway_assert((headerEpilogStartOffset & 3) == 0);
- headerEpilogStartOffset /= 4; // The unwind data stores the actual offset divided by 4 (since the low 2 bits of the actual offset is always zero)
+ headerEpilogStartOffset /= 4; // The unwind data stores the actual offset divided by 4 (since the low 2 bits
+ // of the actual offset is always zero)
#endif // defined(_TARGET_ARM64_)
DWORD headerEpilogStartIndex = pEpi->GetStartIndex();
- if ((headerEpilogStartOffset > UW_MAX_EPILOG_START_OFFSET) ||
+ if ((headerEpilogStartOffset > UW_MAX_EPILOG_START_OFFSET) ||
(headerEpilogStartIndex > UW_MAX_EPILOG_START_INDEX))
{
IMPL_LIMITATION("unwind data too large");
}
#if defined(_TARGET_ARM_)
- DWORD epilogScopeWord =
- headerEpilogStartOffset |
- (headerCondition << 20) |
- (headerEpilogStartIndex << 24) ;
+ DWORD epilogScopeWord = headerEpilogStartOffset | (headerCondition << 20) | (headerEpilogStartIndex << 24);
#elif defined(_TARGET_ARM64_)
- DWORD epilogScopeWord =
- headerEpilogStartOffset |
- (headerEpilogStartIndex << 22) ;
+ DWORD epilogScopeWord = headerEpilogStartOffset | (headerEpilogStartIndex << 22);
#endif // defined(_TARGET_ARM64_)
ufiPrologCodes.AddHeaderWord(epilogScopeWord);
@@ -1366,10 +1336,9 @@ void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength)
// The unwind code words are already here, following the header, so we're done!
}
-
-void UnwindFragmentInfo::Reserve(BOOL isFunclet, bool isHotCode)
+void UnwindFragmentInfo::Reserve(BOOL isFunclet, bool isHotCode)
{
- assert(isHotCode || !isFunclet); // TODO-CQ: support hot/cold splitting in functions with EH
+ assert(isHotCode || !isFunclet); // TODO-CQ: support hot/cold splitting in functions with EH
MergeCodes();
@@ -1388,7 +1357,6 @@ void UnwindFragmentInfo::Reserve(BOOL isFunclet, bool isHotCode)
uwiComp->eeReserveUnwindInfo(isFunclet, isColdCode, unwindSize);
}
-
// Allocate the unwind info for a fragment with the VM.
// Arguments:
// funKind: funclet kind
@@ -1398,7 +1366,8 @@ void UnwindFragmentInfo::Reserve(BOOL isFunclet, bool isHotCode)
// function/funclet.
// isHotCode: are we allocating the unwind info for the hot code section?
-void UnwindFragmentInfo::Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, UNATIVE_OFFSET funcEndOffset, bool isHotCode)
+void UnwindFragmentInfo::Allocate(
+ CorJitFuncKind funKind, void* pHotCode, void* pColdCode, UNATIVE_OFFSET funcEndOffset, bool isHotCode)
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
@@ -1463,7 +1432,7 @@ void UnwindFragmentInfo::Allocate(CorJitFuncKind funKind, void* pHotC
{
assert(startOffset >= uwiComp->info.compTotalHotCodeSize);
startOffset -= uwiComp->info.compTotalHotCodeSize;
- endOffset -= uwiComp->info.compTotalHotCodeSize;
+ endOffset -= uwiComp->info.compTotalHotCodeSize;
}
#ifdef DEBUG
@@ -1474,20 +1443,14 @@ void UnwindFragmentInfo::Allocate(CorJitFuncKind funKind, void* pHotC
}
#endif // DEBUG
- uwiComp->eeAllocUnwindInfo((BYTE*)pHotCode,
- (BYTE*)pColdCode,
- startOffset,
- endOffset,
- unwindBlockSize,
- pUnwindBlock,
+ uwiComp->eeAllocUnwindInfo((BYTE*)pHotCode, (BYTE*)pColdCode, startOffset, endOffset, unwindBlockSize, pUnwindBlock,
funKind);
}
-
#ifdef DEBUG
-void UnwindFragmentInfo::Dump(int indent)
+void UnwindFragmentInfo::Dump(int indent)
{
- unsigned count;
+ unsigned count;
UnwindEpilogInfo* pEpi;
count = 0;
@@ -1497,22 +1460,22 @@ void UnwindFragmentInfo::Dump(int indent)
}
printf("%*sUnwindFragmentInfo #%d, @0x%08p, size:%d:\n", indent, "", ufiNum, dspPtr(this), sizeof(*this));
- printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
- printf("%*s ufiNext: 0x%08p\n", indent, "", dspPtr(ufiNext));
- printf("%*s ufiEmitLoc: 0x%08p\n", indent, "", dspPtr(ufiEmitLoc));
+ printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
+ printf("%*s ufiNext: 0x%08p\n", indent, "", dspPtr(ufiNext));
+ printf("%*s ufiEmitLoc: 0x%08p\n", indent, "", dspPtr(ufiEmitLoc));
printf("%*s ufiHasPhantomProlog: %s\n", indent, "", dspBool(ufiHasPhantomProlog));
- printf("%*s %d epilog%s\n", indent, "", count, (count != 1) ? "s" : "");
- printf("%*s ufiEpilogList: 0x%08p\n", indent, "", dspPtr(ufiEpilogList));
- printf("%*s ufiEpilogLast: 0x%08p\n", indent, "", dspPtr(ufiEpilogLast));
- printf("%*s ufiCurCodes: 0x%08p\n", indent, "", dspPtr(ufiCurCodes));
- printf("%*s ufiSize: %u\n", indent, "", ufiSize);
- printf("%*s ufiSetEBit: %s\n", indent, "", dspBool(ufiSetEBit));
+ printf("%*s %d epilog%s\n", indent, "", count, (count != 1) ? "s" : "");
+ printf("%*s ufiEpilogList: 0x%08p\n", indent, "", dspPtr(ufiEpilogList));
+ printf("%*s ufiEpilogLast: 0x%08p\n", indent, "", dspPtr(ufiEpilogLast));
+ printf("%*s ufiCurCodes: 0x%08p\n", indent, "", dspPtr(ufiCurCodes));
+ printf("%*s ufiSize: %u\n", indent, "", ufiSize);
+ printf("%*s ufiSetEBit: %s\n", indent, "", dspBool(ufiSetEBit));
printf("%*s ufiNeedExtendedCodeWordsEpilogCount: %s\n", indent, "", dspBool(ufiNeedExtendedCodeWordsEpilogCount));
- printf("%*s ufiCodeWords: %u\n", indent, "", ufiCodeWords);
- printf("%*s ufiEpilogScopes: %u\n", indent, "", ufiEpilogScopes);
- printf("%*s ufiStartOffset: 0x%x\n", indent, "", ufiStartOffset);
- printf("%*s ufiInProlog: %s\n", indent, "", dspBool(ufiInProlog));
- printf("%*s ufiInitialized: 0x%08x\n", indent, "", ufiInitialized);
+ printf("%*s ufiCodeWords: %u\n", indent, "", ufiCodeWords);
+ printf("%*s ufiEpilogScopes: %u\n", indent, "", ufiEpilogScopes);
+ printf("%*s ufiStartOffset: 0x%x\n", indent, "", ufiStartOffset);
+ printf("%*s ufiInProlog: %s\n", indent, "", dspBool(ufiInProlog));
+ printf("%*s ufiInitialized: 0x%08x\n", indent, "", ufiInitialized);
ufiPrologCodes.Dump(indent + 2);
@@ -1523,14 +1486,13 @@ void UnwindFragmentInfo::Dump(int indent)
}
#endif // DEBUG
-
///////////////////////////////////////////////////////////////////////////////
//
// UnwindInfo
//
///////////////////////////////////////////////////////////////////////////////
-void UnwindInfo::InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLocation* endLoc)
+void UnwindInfo::InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLocation* endLoc)
{
uwiComp = comp;
@@ -1553,16 +1515,15 @@ void UnwindInfo::InitUnwindInfo(Compiler* comp, emitLocation* startLo
#ifdef DEBUG
uwiInitialized = UWI_INITIALIZED_PATTERN;
- uwiAddingNOP = false;
+ uwiAddingNOP = false;
#endif // DEBUG
}
-
// Split the unwind codes in 'puwi' into those that are in the hot section (leave them in 'puwi')
// and those that are in the cold section (move them to 'this'). There is exactly one fragment
// in each UnwindInfo; the fragments haven't been split for size, yet.
-void UnwindInfo::HotColdSplitCodes(UnwindInfo* puwi)
+void UnwindInfo::HotColdSplitCodes(UnwindInfo* puwi)
{
// Ensure that there is exactly a single fragment in both the hot and the cold sections
assert(&uwiFragmentFirst == uwiFragmentLast);
@@ -1578,7 +1539,6 @@ void UnwindInfo::HotColdSplitCodes(UnwindInfo* puwi)
uwiFragmentLast->SplitEpilogCodes(uwiFragmentLast->ufiEmitLoc, puwi->uwiFragmentLast);
}
-
// Split the function or funclet into fragments that are no larger than 512K,
// so the fragment size will fit in the unwind data "Function Length" field.
// The ARM Exception Data specification "Function Fragments" section describes this.
@@ -1590,20 +1550,20 @@ void UnwindInfo::HotColdSplitCodes(UnwindInfo* puwi)
// actually occur on ARM), so we don't finalize actual sizes or offsets.
//
// ARM64 has very similar limitations, except functions can be up to 1MB. TODO-ARM64-Bug?: make sure this works!
-//
+//
// We don't split any prolog or epilog. Ideally, we might not split an instruction,
// although that doesn't matter because the unwind at any point would still be
// well-defined.
-void UnwindInfo::Split()
+void UnwindInfo::Split()
{
- UNATIVE_OFFSET maxFragmentSize; // The maximum size of a code fragment in bytes
+ UNATIVE_OFFSET maxFragmentSize; // The maximum size of a code fragment in bytes
maxFragmentSize = UW_MAX_FRAGMENT_SIZE_BYTES;
#ifdef DEBUG
// Consider COMPlus_JitSplitFunctionSize
- unsigned splitFunctionSize = (unsigned) JitConfig.JitSplitFunctionSize();
+ unsigned splitFunctionSize = (unsigned)JitConfig.JitSplitFunctionSize();
if (splitFunctionSize != 0)
if (splitFunctionSize < maxFragmentSize)
@@ -1639,7 +1599,8 @@ void UnwindInfo::Split()
// compNativeCodeSize is precise, but is only set after instructions are issued, which is too late
// for us, since we need to decide how many fragments we need before the code memory is allocated
// (which is before instruction issuing).
- UNATIVE_OFFSET estimatedTotalCodeSize = uwiComp->info.compTotalHotCodeSize + uwiComp->info.compTotalColdCodeSize;
+ UNATIVE_OFFSET estimatedTotalCodeSize =
+ uwiComp->info.compTotalHotCodeSize + uwiComp->info.compTotalColdCodeSize;
assert(estimatedTotalCodeSize != 0);
endOffset = estimatedTotalCodeSize;
}
@@ -1648,13 +1609,13 @@ void UnwindInfo::Split()
endOffset = uwiEndLoc->CodeOffset(uwiComp->genEmitter);
}
- assert(endOffset > startOffset); // there better be at least 1 byte of code
+ assert(endOffset > startOffset); // there better be at least 1 byte of code
codeSize = endOffset - startOffset;
// Now that we know the code size for this section (main function hot or cold, or funclet),
// figure out how many fragments we're going to need.
- UNATIVE_OFFSET numberOfFragments = (codeSize + maxFragmentSize - 1) / maxFragmentSize; // round up
+ UNATIVE_OFFSET numberOfFragments = (codeSize + maxFragmentSize - 1) / maxFragmentSize; // round up
assert(numberOfFragments > 0);
if (numberOfFragments == 1)
@@ -1675,14 +1636,13 @@ void UnwindInfo::Split()
if (uwiComp->verbose)
{
printf("Split unwind info into %d fragments (function/funclet size: %d, maximum fragment size: %d)\n",
- numberOfFragments,
- codeSize,
- maxFragmentSize);
+ numberOfFragments, codeSize, maxFragmentSize);
}
#endif // DEBUG
// Call the emitter to do the split, and call us back for every split point it chooses.
- uwiComp->genEmitter->emitSplit(uwiFragmentLast->ufiEmitLoc, uwiEndLoc, maxFragmentSize, (void*)this, EmitSplitCallback);
+ uwiComp->genEmitter->emitSplit(uwiFragmentLast->ufiEmitLoc, uwiEndLoc, maxFragmentSize, (void*)this,
+ EmitSplitCallback);
#ifdef DEBUG
// Did the emitter split the function/funclet into as many fragments as we asked for?
@@ -1698,9 +1658,7 @@ void UnwindInfo::Split()
{
if (uwiComp->verbose)
{
- printf("WARNING: asked the emitter for %d fragments, but only got %d\n",
- numberOfFragments,
- fragCount);
+ printf("WARNING: asked the emitter for %d fragments, but only got %d\n", numberOfFragments, fragCount);
}
// If this fires, then we split into fewer fragments than we asked for, and we are using
@@ -1721,7 +1679,7 @@ void UnwindInfo::Split()
// Reserve space for the unwind info for all fragments
-void UnwindInfo::Reserve(BOOL isFunclet, bool isHotCode)
+void UnwindInfo::Reserve(BOOL isFunclet, bool isHotCode)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(isHotCode || !isFunclet);
@@ -1732,10 +1690,9 @@ void UnwindInfo::Reserve(BOOL isFunclet, bool isHotCode)
}
}
-
// Allocate and populate VM unwind info for all fragments
-void UnwindInfo::Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, bool isHotCode)
+void UnwindInfo::Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, bool isHotCode)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
@@ -1767,8 +1724,7 @@ void UnwindInfo::Allocate(CorJitFuncKind funKind, void* pHotCode, voi
}
}
-
-void UnwindInfo::AddEpilog()
+void UnwindInfo::AddEpilog()
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -1776,10 +1732,9 @@ void UnwindInfo::AddEpilog()
CaptureLocation();
}
-
#if defined(_TARGET_ARM_)
-unsigned UnwindInfo::GetInstructionSize()
+unsigned UnwindInfo::GetInstructionSize()
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
return uwiComp->genEmitter->emitGetInstructionSize(uwiCurLoc);
@@ -1787,16 +1742,14 @@ unsigned UnwindInfo::GetInstructionSize()
#endif // defined(_TARGET_ARM_)
-
-void UnwindInfo::CaptureLocation()
+void UnwindInfo::CaptureLocation()
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiCurLoc != NULL);
uwiCurLoc->CaptureLocation(uwiComp->genEmitter);
}
-
-void UnwindInfo::AddFragment(emitLocation* emitLoc)
+void UnwindInfo::AddFragment(emitLocation* emitLoc)
{
assert(uwiInitialized == UWI_INITIALIZED_PATTERN);
assert(uwiFragmentLast != NULL);
@@ -1812,17 +1765,16 @@ void UnwindInfo::AddFragment(emitLocation* emitLoc)
// Link the new fragment in at the end of the fragment list
uwiFragmentLast->ufiNext = newFrag;
- uwiFragmentLast = newFrag;
+ uwiFragmentLast = newFrag;
}
-
#ifdef DEBUG
#if defined(_TARGET_ARM_)
// Given the first byte of the unwind code, check that its opsize matches
// the last instruction added in the emitter.
-void UnwindInfo::CheckOpsize(BYTE b1)
+void UnwindInfo::CheckOpsize(BYTE b1)
{
// Adding NOP padding goes through the same path, but doesn't update the location to indicate
// the correct location of the instruction for which we are adding a NOP, so just skip the
@@ -1831,17 +1783,16 @@ void UnwindInfo::CheckOpsize(BYTE b1)
if (uwiAddingNOP)
return;
- unsigned opsizeInBytes = GetOpcodeSizeFromUnwindHeader(b1);
+ unsigned opsizeInBytes = GetOpcodeSizeFromUnwindHeader(b1);
unsigned instrSizeInBytes = GetInstructionSize();
assert(opsizeInBytes == instrSizeInBytes);
}
#endif // defined(_TARGET_ARM_)
-
-void UnwindInfo::Dump(bool isHotCode, int indent)
+void UnwindInfo::Dump(bool isHotCode, int indent)
{
- unsigned count;
+ unsigned count;
UnwindFragmentInfo* pFrag;
count = 0;
@@ -1850,12 +1801,12 @@ void UnwindInfo::Dump(bool isHotCode, int indent)
++count;
}
- printf("%*sUnwindInfo %s@0x%08p, size:%d:\n", indent, "", isHotCode ? "" : "COLD ", dspPtr(this), sizeof(*this));
- printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
- printf("%*s %d fragment%s\n", indent, "", count, (count != 1) ? "s" : "");
+ printf("%*sUnwindInfo %s@0x%08p, size:%d:\n", indent, "", isHotCode ? "" : "COLD ", dspPtr(this), sizeof(*this));
+ printf("%*s uwiComp: 0x%08p\n", indent, "", dspPtr(uwiComp));
+ printf("%*s %d fragment%s\n", indent, "", count, (count != 1) ? "s" : "");
printf("%*s uwiFragmentLast: 0x%08p\n", indent, "", dspPtr(uwiFragmentLast));
- printf("%*s uwiEndLoc: 0x%08p\n", indent, "", dspPtr(uwiEndLoc));
- printf("%*s uwiInitialized: 0x%08x\n", indent, "", uwiInitialized);
+ printf("%*s uwiEndLoc: 0x%08p\n", indent, "", dspPtr(uwiEndLoc));
+ printf("%*s uwiInitialized: 0x%08x\n", indent, "", uwiInitialized);
for (pFrag = &uwiFragmentFirst; pFrag != NULL; pFrag = pFrag->ufiNext)
{
@@ -1896,7 +1847,7 @@ DWORD DumpIntRegSet(DWORD x, DWORD lr)
printf("{");
++printed;
- bool first = true;
+ bool first = true;
DWORD bitMask = 1;
for (DWORD bitNum = 0; bitNum < 12; bitNum++)
{
@@ -1937,7 +1888,7 @@ DWORD DumpIntRegSet(DWORD x, DWORD lr)
DWORD DumpRegSetRange(const char* const rtype, DWORD start, DWORD end, DWORD lr)
{
assert(start <= end);
- DWORD printed = 0;
+ DWORD printed = 0;
DWORD rtypeLen = strlen(rtype);
printf("{");
@@ -1971,7 +1922,7 @@ DWORD DumpRegSetRange(const char* const rtype, DWORD start, DWORD end, DWORD lr)
DWORD DumpOpsize(DWORD padding, DWORD opsize)
{
if (padding > 100) // underflow?
- padding = 4;
+ padding = 4;
DWORD printed = padding;
for (; padding > 0; padding--)
printf(" ");
@@ -1987,13 +1938,18 @@ DWORD DumpOpsize(DWORD padding, DWORD opsize)
// pHeader: pointer to the unwind data blob
// unwindBlockSize: size in bytes of the unwind data blob
-void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, const BYTE * const pHeader, ULONG unwindBlockSize)
+void DumpUnwindInfo(Compiler* comp,
+ bool isHotCode,
+ UNATIVE_OFFSET startOffset,
+ UNATIVE_OFFSET endOffset,
+ const BYTE* const pHeader,
+ ULONG unwindBlockSize)
{
printf("Unwind Info%s:\n", isHotCode ? "" : " COLD");
// pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end
// to provide padding, and round down to get a multiple of 4 bytes in size.
- DWORD UNALIGNED* pdw = (DWORD UNALIGNED *)pHeader;
+ DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader;
DWORD dw;
dw = *pdw++;
@@ -2014,9 +1970,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
printf(" E bit : %u\n", EBit);
printf(" X bit : %u\n", XBit);
printf(" Vers : %u\n", Vers);
- printf(" Function Length : %u (0x%05x) Actual length = %u (0x%06x)\n",
- functionLength, functionLength,
- functionLength * 2, functionLength * 2);
+ printf(" Function Length : %u (0x%05x) Actual length = %u (0x%06x)\n", functionLength, functionLength,
+ functionLength * 2, functionLength * 2);
assert(functionLength * 2 == endOffset - startOffset);
@@ -2027,8 +1982,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
dw = *pdw++;
- codeWords = ExtractBits(dw, 16, 8);
- epilogCount = ExtractBits(dw, 0, 16);
+ codeWords = ExtractBits(dw, 16, 8);
+ epilogCount = ExtractBits(dw, 0, 16);
assert((dw & 0xF0000000) == 0); // reserved field should be zero
printf(" ---- Extension word ----\n");
@@ -2067,11 +2022,14 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
assert(res == 0);
printf(" ---- Scope %d\n", scope);
- printf(" Epilog Start Offset : %u (0x%05x) Actual offset = %u (0x%06x) Offset from main function begin = %u (0x%06x)\n",
- comp->dspOffset(epilogStartOffset), comp->dspOffset(epilogStartOffset),
- comp->dspOffset(epilogStartOffset * 2), comp->dspOffset(epilogStartOffset * 2),
- comp->dspOffset(epilogStartOffsetFromMainFunctionBegin), comp->dspOffset(epilogStartOffsetFromMainFunctionBegin));
- printf(" Condition : %u (0x%x)%s\n", condition, condition, (condition == 0xE) ? " (always)" : "");
+ printf(" Epilog Start Offset : %u (0x%05x) Actual offset = %u (0x%06x) Offset from main "
+ "function begin = %u (0x%06x)\n",
+ comp->dspOffset(epilogStartOffset), comp->dspOffset(epilogStartOffset),
+ comp->dspOffset(epilogStartOffset * 2), comp->dspOffset(epilogStartOffset * 2),
+ comp->dspOffset(epilogStartOffsetFromMainFunctionBegin),
+ comp->dspOffset(epilogStartOffsetFromMainFunctionBegin));
+ printf(" Condition : %u (0x%x)%s\n", condition, condition,
+ (condition == 0xE) ? " (always)" : "");
printf(" Epilog Start Index : %u (0x%02x)\n", epilogStartIndex, epilogStartIndex);
epilogStartAt[epilogStartIndex] = true; // an epilog starts at this offset in the unwind codes
@@ -2095,8 +2053,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
printf(" ---- Unwind codes ----\n");
DWORD countOfUnwindCodes = codeWords * 4;
- PBYTE pUnwindCode = (PBYTE)pdw;
- BYTE b1, b2, b3, b4;
+ PBYTE pUnwindCode = (PBYTE)pdw;
+ BYTE b1, b2, b3, b4;
DWORD x, y;
DWORD opsize;
DWORD opCol = 52;
@@ -2126,7 +2084,7 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
i++;
DWORD LBit = ExtractBits(b1, 5, 1);
- x = ((DWORD)(b1 & 0x1F) << 8) | (DWORD)b2;
+ x = ((DWORD)(b1 & 0x1F) << 8) | (DWORD)b2;
printf(" %02X %02X pop ", b1, b2);
printed = 20;
@@ -2144,7 +2102,7 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
else if ((b1 & 0xF8) == 0xD0)
{
// D0-D7 : pop {r4-rX,lr} (X=4-7) (opsize 16)
- x = b1 & 0x3;
+ x = b1 & 0x3;
DWORD LBit = b1 & 0x4;
printf(" %02X pop ", b1);
printed = 20;
@@ -2154,7 +2112,7 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
else if ((b1 & 0xF8) == 0xD8)
{
// D8-DF : pop {r4-rX,lr} (X=8-11) (opsize 32)
- x = b1 & 0x3;
+ x = b1 & 0x3;
DWORD LBit = b1 & 0x4;
printf(" %02X pop ", b1);
printed = 20;
@@ -2190,7 +2148,7 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
i++;
DWORD LBit = ExtractBits(b1, 0, 1);
- x = (DWORD)b2;
+ x = (DWORD)b2;
printf(" %02X %02X pop ", b1, b2);
printed = 20;
diff --git a/src/jit/unwindarm64.cpp b/src/jit/unwindarm64.cpp
index b23797c56b..21e2a36b2a 100644
--- a/src/jit/unwindarm64.cpp
+++ b/src/jit/unwindarm64.cpp
@@ -25,7 +25,7 @@ void Compiler::unwindPush(regNumber reg)
void Compiler::unwindAllocStack(unsigned size)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
assert(size % 16 == 0);
unsigned x = size / 16;
@@ -42,8 +42,7 @@ void Compiler::unwindAllocStack(unsigned size)
// alloc_m: 11000xxx | xxxxxxxx: allocate large stack with size < 16k (2^11 * 16)
// TODO-Review: should say size < 32K
- pu->AddCode(0xC0 | (BYTE)(x >> 8),
- (BYTE)x);
+ pu->AddCode(0xC0 | (BYTE)(x >> 8), (BYTE)x);
}
else
{
@@ -52,16 +51,13 @@ void Compiler::unwindAllocStack(unsigned size)
// For large stack size, the most significant bits
// are stored first (and next to the opCode) per the unwind spec.
- pu->AddCode(0xE0,
- (BYTE)(x >> 16),
- (BYTE)(x >> 8),
- (BYTE)x);
+ pu->AddCode(0xE0, (BYTE)(x >> 16), (BYTE)(x >> 8), (BYTE)x);
}
}
void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
if (offset == 0)
{
@@ -91,7 +87,7 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset)
void Compiler::unwindNop()
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
#ifdef DEBUG
if (verbose)
@@ -115,13 +111,12 @@ void Compiler::unwindNop()
// which we should do instead).
void Compiler::unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// stp reg1, reg2, [sp, #offset]
// offset for store pair in prolog must be positive and a multiple of 8.
- assert(0 <= offset &&
- offset <= 504);
+ assert(0 <= offset && offset <= 504);
assert((offset % 8) == 0);
int z = offset / 8;
@@ -139,52 +134,50 @@ void Compiler::unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset)
{
// save_lrpair: 1101011x | xxzzzzzz: save pair <r19 + 2 * #X, lr> at [sp + #Z * 8], offset <= 504
- assert(REG_R19 <= reg1 && // first legal pair: R19, LR
- reg1 <= REG_R27); // last legal pair: R27, LR
+ assert(REG_R19 <= reg1 && // first legal pair: R19, LR
+ reg1 <= REG_R27); // last legal pair: R27, LR
BYTE x = (BYTE)(reg1 - REG_R19);
- assert((x % 2) == 0); // only legal reg1: R19, R21, R23, R25, R27
+ assert((x % 2) == 0); // only legal reg1: R19, R21, R23, R25, R27
x /= 2;
assert(0 <= x && x <= 0x7);
- pu->AddCode(0xD6 | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xD6 | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
else if (emitter::isGeneralRegister(reg1))
{
// save_regp: 110010xx | xxzzzzzz: save r(19 + #X) pair at [sp + #Z * 8], offset <= 504
assert(REG_NEXT(reg1) == reg2);
- assert(REG_R19 <= reg1 && // first legal pair: R19, R20
- reg1 <= REG_R27); // last legal pair: R27, R28 (FP is never saved without LR)
+ assert(REG_R19 <= reg1 && // first legal pair: R19, R20
+ reg1 <= REG_R27); // last legal pair: R27, R28 (FP is never saved without LR)
BYTE x = (BYTE)(reg1 - REG_R19);
assert(0 <= x && x <= 0xF);
- pu->AddCode(0xC8 | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xC8 | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
else
{
// save_fregp: 1101100x | xxzzzzzz : save pair d(8 + #X) at [sp + #Z * 8], offset <= 504
assert(REG_NEXT(reg1) == reg2);
- assert(REG_V8 <= reg1 && // first legal pair: V8, V9
- reg1 <= REG_V14); // last legal pair: V14, V15
+ assert(REG_V8 <= reg1 && // first legal pair: V8, V9
+ reg1 <= REG_V14); // last legal pair: V14, V15
BYTE x = (BYTE)(reg1 - REG_V8);
assert(0 <= x && x <= 0x7);
- pu->AddCode(0xD8 | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xD8 | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
}
// unwindSaveRegPairPreindexed: save a register pair to the stack at the specified byte offset (which must be negative,
-// a multiple of 8 from -512 to -8). Note that for ARM64 unwind codes, reg2 must be exactly one register higher than reg1.
+// a multiple of 8 from -512 to -8). Note that for ARM64 unwind codes, reg2 must be exactly one register higher than
+// reg1.
void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// stp reg1, reg2, [sp, #offset]!
@@ -204,10 +197,12 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o
pu->AddCode(0x80 | (BYTE)z);
}
- else if ((reg1 == REG_R19) && (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x unwind code.
+ else if ((reg1 == REG_R19) &&
+ (-256 <= offset)) // If the offset is between -512 and -256, we use the save_regp_x unwind code.
{
// save_r19r20_x: 001zzzzz: save <r19,r20> pair at [sp-#Z*8]!, pre-indexed offset >= -248
- // NOTE: I'm not sure why we allow Z==0 here; seems useless, and the calculation of offset is different from the other cases.
+ // NOTE: I'm not sure why we allow Z==0 here; seems useless, and the calculation of offset is different from the
+ // other cases.
int z = (-offset) / 8;
assert(0 <= z && z <= 0x1F);
@@ -225,14 +220,13 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o
assert(0 <= z && z <= 0x3F);
assert(REG_NEXT(reg1) == reg2);
- assert(REG_R19 <= reg1 && // first legal pair: R19, R20
- reg1 <= REG_R27); // last legal pair: R27, R28 (FP is never saved without LR)
+ assert(REG_R19 <= reg1 && // first legal pair: R19, R20
+ reg1 <= REG_R27); // last legal pair: R27, R28 (FP is never saved without LR)
BYTE x = (BYTE)(reg1 - REG_R19);
assert(0 <= x && x <= 0xF);
- pu->AddCode(0xCC | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xCC | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
else
{
@@ -243,26 +237,24 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o
assert(0 <= z && z <= 0x3F);
assert(REG_NEXT(reg1) == reg2);
- assert(REG_V8 <= reg1 && // first legal pair: V8, V9
- reg1 <= REG_V14); // last legal pair: V14, V15
+ assert(REG_V8 <= reg1 && // first legal pair: V8, V9
+ reg1 <= REG_V14); // last legal pair: V14, V15
BYTE x = (BYTE)(reg1 - REG_V8);
assert(0 <= x && x <= 0x7);
- pu->AddCode(0xDA | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xDA | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
}
void Compiler::unwindSaveReg(regNumber reg, int offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// str reg, [sp, #offset]
// offset for store in prolog must be positive and a multiple of 8.
- assert(0 <= offset &&
- offset <= 504);
+ assert(0 <= offset && offset <= 504);
assert((offset % 8) == 0);
int z = offset / 8;
@@ -272,39 +264,36 @@ void Compiler::unwindSaveReg(regNumber reg, int offset)
{
// save_reg: 110100xx | xxzzzzzz: save reg r(19 + #X) at [sp + #Z * 8], offset <= 504
- assert(REG_R19 <= reg && // first legal register: R19
- reg <= REG_LR); // last legal register: LR
+ assert(REG_R19 <= reg && // first legal register: R19
+ reg <= REG_LR); // last legal register: LR
BYTE x = (BYTE)(reg - REG_R19);
assert(0 <= x && x <= 0xF);
- pu->AddCode(0xD0 | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xD0 | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
else
{
// save_freg: 1101110x | xxzzzzzz : save reg d(8 + #X) at [sp + #Z * 8], offset <= 504
- assert(REG_V8 <= reg && // first legal register: V8
- reg <= REG_V15); // last legal register: V15
+ assert(REG_V8 <= reg && // first legal register: V8
+ reg <= REG_V15); // last legal register: V15
BYTE x = (BYTE)(reg - REG_V8);
assert(0 <= x && x <= 0x7);
- pu->AddCode(0xDC | (BYTE)(x >> 2),
- (BYTE)(x << 6) | (BYTE)z);
+ pu->AddCode(0xDC | (BYTE)(x >> 2), (BYTE)(x << 6) | (BYTE)z);
}
}
void Compiler::unwindSaveRegPreindexed(regNumber reg, int offset)
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// str reg, [sp, #offset]!
// pre-indexed offset in prolog must be negative and a multiple of 8.
- assert(-256 <= offset &&
- offset < 0);
+ assert(-256 <= offset && offset < 0);
assert((offset % 8) == 0);
int z = (-offset) / 8 - 1;
@@ -314,33 +303,31 @@ void Compiler::unwindSaveRegPreindexed(regNumber reg, int offset)
{
// save_reg_x: 1101010x | xxxzzzzz: save reg r(19 + #X) at [sp - (#Z + 1) * 8]!, pre-indexed offset >= -256
- assert(REG_R19 <= reg && // first legal register: R19
- reg <= REG_LR); // last legal register: LR
+ assert(REG_R19 <= reg && // first legal register: R19
+ reg <= REG_LR); // last legal register: LR
BYTE x = (BYTE)(reg - REG_R19);
assert(0 <= x && x <= 0xF);
- pu->AddCode(0xD4 | (BYTE)(x >> 3),
- (BYTE)(x << 5) | (BYTE)z);
+ pu->AddCode(0xD4 | (BYTE)(x >> 3), (BYTE)(x << 5) | (BYTE)z);
}
else
{
// save_freg_x: 11011110 | xxxzzzzz : save reg d(8 + #X) at [sp - (#Z + 1) * 8]!, pre - indexed offset >= -256
- assert(REG_V8 <= reg && // first legal register: V8
- reg <= REG_V15); // last legal register: V15
+ assert(REG_V8 <= reg && // first legal register: V8
+ reg <= REG_V15); // last legal register: V15
BYTE x = (BYTE)(reg - REG_V8);
assert(0 <= x && x <= 0x7);
- pu->AddCode(0xDE,
- (BYTE)(x << 5) | (BYTE)z);
+ pu->AddCode(0xDE, (BYTE)(x << 5) | (BYTE)z);
}
}
void Compiler::unwindSaveNext()
{
- UnwindInfo * pu = &funCurrentFunc()->uwi;
+ UnwindInfo* pu = &funCurrentFunc()->uwi;
// We're saving the next register pair. The caller is responsible for ensuring this is correct!
@@ -368,23 +355,24 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
unsigned GetUnwindSizeFromUnwindHeader(BYTE b1)
{
- static BYTE s_UnwindSize[256] = { // array of unwind sizes, in bytes (as specified in the ARM unwind specification)
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 00-0F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10-1F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20-2F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30-3F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40-4F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 50-5F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60-6F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 70-7F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 80-8F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 90-9F
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A0-AF
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B0-BF
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0-CF
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, // D0-DF
- 4, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E0-EF
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F0-FF
+ static BYTE s_UnwindSize[256] = {
+ // array of unwind sizes, in bytes (as specified in the ARM unwind specification)
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 00-0F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10-1F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20-2F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30-3F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40-4F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 50-5F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60-6F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 70-7F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 80-8F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 90-9F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // A0-AF
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // B0-BF
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0-CF
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, // D0-DF
+ 4, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // E0-EF
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // F0-FF
};
unsigned size = s_UnwindSize[b1];
@@ -394,7 +382,6 @@ unsigned GetUnwindSizeFromUnwindHeader(BYTE b1)
#endif // DEBUG
-
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
@@ -413,19 +400,19 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef DEBUG
// Walk the prolog codes and calculate the size of the prolog or epilog, in bytes.
-unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
+unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog)
{
- BYTE* pCodesStart = GetCodes();
- BYTE* pCodes = pCodesStart;
- unsigned size = 0;
+ BYTE* pCodesStart = GetCodes();
+ BYTE* pCodes = pCodesStart;
+ unsigned size = 0;
for (;;)
{
BYTE b1 = *pCodes;
if (IsEndCode(b1))
{
- break; // We hit an "end" code; we're done
+ break; // We hit an "end" code; we're done
}
- size += 4; // All codes represent 4 byte instructions.
+ size += 4; // All codes represent 4 byte instructions.
pCodes += GetUnwindSizeFromUnwindHeader(b1);
assert(pCodes - pCodesStart < 256); // 255 is the absolute maximum number of code bytes allowed
}
@@ -459,13 +446,18 @@ DWORD ExtractBits(DWORD dw, DWORD start, DWORD length)
// pHeader: pointer to the unwind data blob
// unwindBlockSize: size in bytes of the unwind data blob
-void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, const BYTE * const pHeader, ULONG unwindBlockSize)
+void DumpUnwindInfo(Compiler* comp,
+ bool isHotCode,
+ UNATIVE_OFFSET startOffset,
+ UNATIVE_OFFSET endOffset,
+ const BYTE* const pHeader,
+ ULONG unwindBlockSize)
{
printf("Unwind Info%s:\n", isHotCode ? "" : " COLD");
// pHeader is not guaranteed to be aligned. We put four 0xFF end codes at the end
// to provide padding, and round down to get a multiple of 4 bytes in size.
- DWORD UNALIGNED* pdw = (DWORD UNALIGNED *)pHeader;
+ DWORD UNALIGNED* pdw = (DWORD UNALIGNED*)pHeader;
DWORD dw;
dw = *pdw++;
@@ -484,9 +476,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
printf(" E bit : %u\n", EBit);
printf(" X bit : %u\n", XBit);
printf(" Vers : %u\n", Vers);
- printf(" Function Length : %u (0x%05x) Actual length = %u (0x%06x)\n",
- functionLength, functionLength,
- functionLength * 4, functionLength * 4);
+ printf(" Function Length : %u (0x%05x) Actual length = %u (0x%06x)\n", functionLength, functionLength,
+ functionLength * 4, functionLength * 4);
assert(functionLength * 4 == endOffset - startOffset);
@@ -497,8 +488,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
dw = *pdw++;
- codeWords = ExtractBits(dw, 16, 8);
- epilogCount = ExtractBits(dw, 0, 16);
+ codeWords = ExtractBits(dw, 16, 8);
+ epilogCount = ExtractBits(dw, 0, 16);
assert((dw & 0xF0000000) == 0); // reserved field should be zero
printf(" ---- Extension word ----\n");
@@ -536,10 +527,12 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
assert(res == 0);
printf(" ---- Scope %d\n", scope);
- printf(" Epilog Start Offset : %u (0x%05x) Actual offset = %u (0x%06x) Offset from main function begin = %u (0x%06x)\n",
- comp->dspOffset(epilogStartOffset), comp->dspOffset(epilogStartOffset),
- comp->dspOffset(epilogStartOffset * 4), comp->dspOffset(epilogStartOffset * 4),
- comp->dspOffset(epilogStartOffsetFromMainFunctionBegin), comp->dspOffset(epilogStartOffsetFromMainFunctionBegin));
+ printf(" Epilog Start Offset : %u (0x%05x) Actual offset = %u (0x%06x) Offset from main "
+ "function begin = %u (0x%06x)\n",
+ comp->dspOffset(epilogStartOffset), comp->dspOffset(epilogStartOffset),
+ comp->dspOffset(epilogStartOffset * 4), comp->dspOffset(epilogStartOffset * 4),
+ comp->dspOffset(epilogStartOffsetFromMainFunctionBegin),
+ comp->dspOffset(epilogStartOffsetFromMainFunctionBegin));
printf(" Epilog Start Index : %u (0x%02x)\n", epilogStartIndex, epilogStartIndex);
epilogStartAt[epilogStartIndex] = true; // an epilog starts at this offset in the unwind codes
@@ -558,8 +551,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
printf(" ---- Unwind codes ----\n");
DWORD countOfUnwindCodes = codeWords * 4;
- PBYTE pUnwindCode = (PBYTE)pdw;
- BYTE b1, b2, b3, b4;
+ PBYTE pUnwindCode = (PBYTE)pdw;
+ BYTE b1, b2, b3, b4;
DWORD x, z;
for (DWORD i = 0; i < countOfUnwindCodes; i++)
{
@@ -582,19 +575,22 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
{
// save_r19r20_x: 001zzzzz: save <r19,r20> pair at [sp-#Z*8]!, pre-indexed offset >= -248
z = b1 & 0x1F;
- printf(" %02X save_r19r20_x #%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, z, z, getRegName(REG_R19), getRegName(REG_R20), z * 8);
+ printf(" %02X save_r19r20_x #%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, z, z,
+ getRegName(REG_R19), getRegName(REG_R20), z * 8);
}
else if ((b1 & 0xC0) == 0x40)
{
// save_fplr: 01zzzzzz: save <r29,lr> pair at [sp+#Z*8], offset <= 504
z = b1 & 0x3F;
- printf(" %02X save_fplr #%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, z, z, getRegName(REG_FP), getRegName(REG_LR), z * 8);
+ printf(" %02X save_fplr #%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, z, z, getRegName(REG_FP),
+ getRegName(REG_LR), z * 8);
}
else if ((b1 & 0xC0) == 0x80)
{
// save_fplr_x: 10zzzzzz: save <r29,lr> pair at [sp-(#Z+1)*8]!, pre-indexed offset >= -512
z = b1 & 0x3F;
- printf(" %02X save_fplr_x #%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, z, z, getRegName(REG_FP), getRegName(REG_LR), (z + 1) * 8);
+ printf(" %02X save_fplr_x #%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, z, z,
+ getRegName(REG_FP), getRegName(REG_LR), (z + 1) * 8);
}
else if ((b1 & 0xF8) == 0xC0)
{
@@ -606,7 +602,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x7) << 8) | (DWORD)b2;
- printf(" %02X %02X alloc_m #%u (0x%03X); sub sp, sp, #%u (0x%04X)\n", b1, b2, x, x, x * 16, x * 16);
+ printf(" %02X %02X alloc_m #%u (0x%03X); sub sp, sp, #%u (0x%04X)\n", b1, b2, x, x, x * 16,
+ x * 16);
}
else if ((b1 & 0xFC) == 0xC8)
{
@@ -618,11 +615,13 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x3) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_regp X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z, getRegName(REG_R19 + x), getRegName(REG_R19 + x + 1), z * 8);
+ printf(" %02X %02X save_regp X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z,
+ getRegName(REG_R19 + x), getRegName(REG_R19 + x + 1), z * 8);
}
else if ((b1 & 0xFC) == 0xCC)
{
- // save_regp_x: 110011xx | xxzzzzzz: save pair r(19 + #X) at [sp - (#Z + 1) * 8]!, pre-indexed offset >= -512
+ // save_regp_x: 110011xx | xxzzzzzz: save pair r(19 + #X) at [sp - (#Z + 1) * 8]!, pre-indexed offset >=
+ // -512
assert(i + 1 < countOfUnwindCodes);
b2 = *pUnwindCode++;
i++;
@@ -630,7 +629,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x3) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_regp_x X#%u Z#%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, b2, x, z, z, getRegName(REG_R19 + x), getRegName(REG_R19 + x + 1), (z + 1) * 8);
+ printf(" %02X %02X save_regp_x X#%u Z#%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, b2, x, z, z,
+ getRegName(REG_R19 + x), getRegName(REG_R19 + x + 1), (z + 1) * 8);
}
else if ((b1 & 0xFC) == 0xD0)
{
@@ -642,7 +642,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x3) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_reg X#%u Z#%u (0x%02X); str %s, [sp, #%u]\n", b1, b2, x, z, z, getRegName(REG_R19 + x), z * 8);
+ printf(" %02X %02X save_reg X#%u Z#%u (0x%02X); str %s, [sp, #%u]\n", b1, b2, x, z, z,
+ getRegName(REG_R19 + x), z * 8);
}
else if ((b1 & 0xFE) == 0xD4)
{
@@ -654,7 +655,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x1) << 3) | (DWORD)(b2 >> 5);
z = (DWORD)(b2 & 0x1F);
- printf(" %02X %02X save_reg_x X#%u Z#%u (0x%02X); str %s, [sp, #-%u]!\n", b1, b2, x, z, z, getRegName(REG_R19 + x), (z + 1) * 8);
+ printf(" %02X %02X save_reg_x X#%u Z#%u (0x%02X); str %s, [sp, #-%u]!\n", b1, b2, x, z, z,
+ getRegName(REG_R19 + x), (z + 1) * 8);
}
else if ((b1 & 0xFE) == 0xD6)
{
@@ -666,7 +668,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x1) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_lrpair X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z, getRegName(REG_R19 + 2 * x), getRegName(REG_LR), z * 8);
+ printf(" %02X %02X save_lrpair X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z,
+ getRegName(REG_R19 + 2 * x), getRegName(REG_LR), z * 8);
}
else if ((b1 & 0xFE) == 0xD8)
{
@@ -678,11 +681,13 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x1) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_fregp X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z, getRegName(REG_V8 + x, true), getRegName(REG_V8 + x + 1, true), z * 8);
+ printf(" %02X %02X save_fregp X#%u Z#%u (0x%02X); stp %s, %s, [sp, #%u]\n", b1, b2, x, z, z,
+ getRegName(REG_V8 + x, true), getRegName(REG_V8 + x + 1, true), z * 8);
}
else if ((b1 & 0xFE) == 0xDA)
{
- // save_fregp_x: 1101101x | xxzzzzzz : save pair d(8 + #X), at [sp - (#Z + 1) * 8]!, pre-indexed offset >= -512
+ // save_fregp_x: 1101101x | xxzzzzzz : save pair d(8 + #X), at [sp - (#Z + 1) * 8]!, pre-indexed offset >=
+ // -512
assert(i + 1 < countOfUnwindCodes);
b2 = *pUnwindCode++;
i++;
@@ -690,7 +695,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x1) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_fregp_x X#%u Z#%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, b2, x, z, z, getRegName(REG_V8 + x, true), getRegName(REG_V8 + x + 1, true), (z + 1) * 8);
+ printf(" %02X %02X save_fregp_x X#%u Z#%u (0x%02X); stp %s, %s, [sp, #-%u]!\n", b1, b2, x, z, z,
+ getRegName(REG_V8 + x, true), getRegName(REG_V8 + x + 1, true), (z + 1) * 8);
}
else if ((b1 & 0xFE) == 0xDC)
{
@@ -702,11 +708,13 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)(b1 & 0x1) << 2) | (DWORD)(b2 >> 6);
z = (DWORD)(b2 & 0x3F);
- printf(" %02X %02X save_freg X#%u Z#%u (0x%02X); str %s, [sp, #%u]\n", b1, b2, x, z, z, getRegName(REG_V8 + x, true), z * 8);
+ printf(" %02X %02X save_freg X#%u Z#%u (0x%02X); str %s, [sp, #%u]\n", b1, b2, x, z, z,
+ getRegName(REG_V8 + x, true), z * 8);
}
else if (b1 == 0xDE)
{
- // save_freg_x: 11011110 | xxxzzzzz : save reg d(8 + #X) at [sp - (#Z + 1) * 8]!, pre - indexed offset >= -256
+ // save_freg_x: 11011110 | xxxzzzzz : save reg d(8 + #X) at [sp - (#Z + 1) * 8]!, pre - indexed offset >=
+ // -256
assert(i + 1 < countOfUnwindCodes);
b2 = *pUnwindCode++;
i++;
@@ -714,7 +722,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = (DWORD)(b2 >> 5);
z = (DWORD)(b2 & 0x1F);
- printf(" %02X %02X save_freg_x X#%u Z#%u (0x%02X); str %s, [sp, #-%u]!\n", b1, b2, x, z, z, getRegName(REG_V8 + x, true), (z + 1) * 8);
+ printf(" %02X %02X save_freg_x X#%u Z#%u (0x%02X); str %s, [sp, #-%u]!\n", b1, b2, x, z, z,
+ getRegName(REG_V8 + x, true), (z + 1) * 8);
}
else if (b1 == 0xE0)
{
@@ -727,7 +736,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = ((DWORD)b2 << 16) | ((DWORD)b3 << 8) | (DWORD)b4;
- printf(" %02X %02X %02X %02X alloc_l %u (0x%06X); sub sp, sp, #%u (%06X)\n", b1, b2, b3, b4, x, x, x * 16, x * 16);
+ printf(" %02X %02X %02X %02X alloc_l %u (0x%06X); sub sp, sp, #%u (%06X)\n", b1, b2, b3, b4, x, x,
+ x * 16, x * 16);
}
else if (b1 == 0xE1)
{
@@ -744,7 +754,8 @@ void DumpUnwindInfo(Compiler* comp, bool isHotCode, UNATIVE_OFFSET startOffset,
x = (DWORD)b2;
- printf(" %02X %02X add_fp %u (0x%02X); add %s, sp, #%u\n", b1, b2, x, x, getRegName(REG_FP), x * 8);
+ printf(" %02X %02X add_fp %u (0x%02X); add %s, sp, #%u\n", b1, b2, x, x, getRegName(REG_FP),
+ x * 8);
}
else if (b1 == 0xE3)
{
diff --git a/src/jit/utils.cpp b/src/jit/utils.cpp
index e3eb978a6d..8329fac48d 100644
--- a/src/jit/utils.cpp
+++ b/src/jit/utils.cpp
@@ -28,9 +28,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#ifdef PLATFORM_UNIX
// Should we distinguish Mac? Can we?
// Should we distinguish flavors of Unix? Can we?
-const char * Target::g_tgtPlatformName = "Unix";
-#else // !PLATFORM_UNIX
-const char * Target::g_tgtPlatformName = "Windows";
+const char* Target::g_tgtPlatformName = "Unix";
+#else // !PLATFORM_UNIX
+const char* Target::g_tgtPlatformName = "Windows";
#endif // !PLATFORM_UNIX
/*****************************************************************************/
@@ -88,48 +88,41 @@ const signed char opcodeSizes[] =
};
// clang-format on
-const BYTE varTypeClassification[] =
-{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) tf,
- #include "typelist.h"
- #undef DEF_TP
+const BYTE varTypeClassification[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) tf,
+#include "typelist.h"
+#undef DEF_TP
};
/*****************************************************************************/
/*****************************************************************************/
#ifdef DEBUG
-extern
-const char * const opcodeNames[] =
-{
- #define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) string,
- #include "opcode.def"
- #undef OPDEF
+extern const char* const opcodeNames[] = {
+#define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) string,
+#include "opcode.def"
+#undef OPDEF
};
-extern
-const BYTE opcodeArgKinds[] =
-{
- #define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) (BYTE) oprType,
- #include "opcode.def"
- #undef OPDEF
+extern const BYTE opcodeArgKinds[] = {
+#define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) (BYTE) oprType,
+#include "opcode.def"
+#undef OPDEF
};
#endif
/*****************************************************************************/
-const char * varTypeName(var_types vt)
+const char* varTypeName(var_types vt)
{
- static
- const char * const varTypeNames[] =
- {
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) nm,
- #include "typelist.h"
- #undef DEF_TP
+ static const char* const varTypeNames[] = {
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) nm,
+#include "typelist.h"
+#undef DEF_TP
};
- assert((unsigned)vt < sizeof(varTypeNames)/sizeof(varTypeNames[0]));
+ assert((unsigned)vt < sizeof(varTypeNames) / sizeof(varTypeNames[0]));
- return varTypeNames[vt];
+ return varTypeNames[vt];
}
#if defined(DEBUG) || defined(LATE_DISASM)
@@ -138,7 +131,7 @@ const char * varTypeName(var_types vt)
* Return the name of the given register.
*/
-const char * getRegName(regNumber reg, bool isFloat)
+const char* getRegName(regNumber reg, bool isFloat)
{
// Special-case REG_NA; it's not in the regNames array, but we might want to print it.
if (reg == REG_NA)
@@ -146,20 +139,16 @@ const char * getRegName(regNumber reg, bool isFloat)
return "NA";
}
#if defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
- static
- const char * const regNames[] =
- {
- #define REGDEF(name, rnum, mask, sname) sname,
- #include "register.h"
+ static const char* const regNames[] = {
+#define REGDEF(name, rnum, mask, sname) sname,
+#include "register.h"
};
- static
- const char * const floatRegNames[] =
- {
- #define REGDEF(name, rnum, mask, sname) sname,
- #include "registerxmm.h"
+ static const char* const floatRegNames[] = {
+#define REGDEF(name, rnum, mask, sname) sname,
+#include "registerxmm.h"
};
- if (isFloat)
+ if (isFloat)
{
assert(reg < ArrLen(floatRegNames));
return floatRegNames[reg];
@@ -170,28 +159,24 @@ const char * getRegName(regNumber reg, bool isFloat)
return regNames[reg];
}
#elif defined(_TARGET_ARM64_)
- static
- const char * const regNames[] =
- {
- #define REGDEF(name, rnum, mask, xname, wname) xname,
- #include "register.h"
+ static const char* const regNames[] = {
+#define REGDEF(name, rnum, mask, xname, wname) xname,
+#include "register.h"
};
assert(reg < ArrLen(regNames));
return regNames[reg];
#else
- static
- const char * const regNames[] =
- {
- #define REGDEF(name, rnum, mask, sname) sname,
- #include "register.h"
+ static const char* const regNames[] = {
+#define REGDEF(name, rnum, mask, sname) sname,
+#include "register.h"
};
assert(reg < ArrLen(regNames));
return regNames[reg];
#endif
-
}
-const char * getRegName(unsigned reg, bool isFloat) // this is for gcencode.cpp and disasm.cpp that dont use the regNumber type
+const char* getRegName(unsigned reg,
+ bool isFloat) // this is for gcencode.cpp and disasm.cpp that dont use the regNumber type
{
return getRegName((regNumber)reg, isFloat);
}
@@ -205,57 +190,73 @@ const char* getRegNameFloat(regNumber reg, var_types type)
assert(genIsValidFloatReg(reg));
if (type == TYP_FLOAT)
return getRegName(reg);
- else
+ else
{
const char* regName;
- switch (reg) {
- default:
- assert(!"Bad double register");
- regName="d??";
- break;
- case REG_F0:
- regName = "d0"; break;
- case REG_F2:
- regName = "d2"; break;
- case REG_F4:
- regName = "d4"; break;
- case REG_F6:
- regName = "d6"; break;
- case REG_F8:
- regName = "d8"; break;
- case REG_F10:
- regName = "d10"; break;
- case REG_F12:
- regName = "d12"; break;
- case REG_F14:
- regName = "d14"; break;
- case REG_F16:
- regName = "d16"; break;
- case REG_F18:
- regName = "d18"; break;
- case REG_F20:
- regName = "d20"; break;
- case REG_F22:
- regName = "d22"; break;
- case REG_F24:
- regName = "d24"; break;
- case REG_F26:
- regName = "d26"; break;
- case REG_F28:
- regName = "d28"; break;
- case REG_F30:
- regName = "d30"; break;
+ switch (reg)
+ {
+ default:
+ assert(!"Bad double register");
+ regName = "d??";
+ break;
+ case REG_F0:
+ regName = "d0";
+ break;
+ case REG_F2:
+ regName = "d2";
+ break;
+ case REG_F4:
+ regName = "d4";
+ break;
+ case REG_F6:
+ regName = "d6";
+ break;
+ case REG_F8:
+ regName = "d8";
+ break;
+ case REG_F10:
+ regName = "d10";
+ break;
+ case REG_F12:
+ regName = "d12";
+ break;
+ case REG_F14:
+ regName = "d14";
+ break;
+ case REG_F16:
+ regName = "d16";
+ break;
+ case REG_F18:
+ regName = "d18";
+ break;
+ case REG_F20:
+ regName = "d20";
+ break;
+ case REG_F22:
+ regName = "d22";
+ break;
+ case REG_F24:
+ regName = "d24";
+ break;
+ case REG_F26:
+ regName = "d26";
+ break;
+ case REG_F28:
+ regName = "d28";
+ break;
+ case REG_F30:
+ regName = "d30";
+ break;
}
return regName;
}
#elif defined(_TARGET_X86_) && defined(LEGACY_BACKEND)
- static const char* regNamesFloat[] =
- {
- #define REGDEF(name, rnum, mask, sname) sname,
- #include "registerxmm.h"
+ static const char* regNamesFloat[] = {
+#define REGDEF(name, rnum, mask, sname) sname,
+#include "registerxmm.h"
};
assert((unsigned)reg < ArrLen(regNamesFloat));
@@ -263,26 +264,23 @@ const char* getRegNameFloat(regNumber reg, var_types type)
#elif defined(_TARGET_ARM64_)
- static const char* regNamesFloat[] =
- {
- #define REGDEF(name, rnum, mask, xname, wname) xname,
- #include "register.h"
+ static const char* regNamesFloat[] = {
+#define REGDEF(name, rnum, mask, xname, wname) xname,
+#include "register.h"
};
assert((unsigned)reg < ArrLen(regNamesFloat));
return regNamesFloat[reg];
#else
- static const char* regNamesFloat[] =
- {
- #define REGDEF(name, rnum, mask, sname) "x" sname,
- #include "register.h"
+ static const char* regNamesFloat[] = {
+#define REGDEF(name, rnum, mask, sname) "x" sname,
+#include "register.h"
};
#ifdef FEATURE_AVX_SUPPORT
- static const char* regNamesYMM[] =
- {
- #define REGDEF(name, rnum, mask, sname) "y" sname,
- #include "register.h"
+ static const char* regNamesYMM[] = {
+#define REGDEF(name, rnum, mask, sname) "y" sname,
+#include "register.h"
};
#endif // FEATURE_AVX_SUPPORT
assert((unsigned)reg < ArrLen(regNamesFloat));
@@ -304,19 +302,20 @@ const char* getRegNameFloat(regNumber reg, var_types type)
* TODO-ARM64-Cleanup: don't allow ip0, ip1 as part of a range.
*/
-void dspRegMask(regMaskTP regMask, size_t minSiz)
+void dspRegMask(regMaskTP regMask, size_t minSiz)
{
const char* sep = "";
printf("[");
bool inRegRange = false;
- regNumber regPrev = REG_NA;
- regNumber regHead = REG_NA; // When we start a range, remember the first register of the range, so we don't use range notation if the range contains just a single register.
+ regNumber regPrev = REG_NA;
+ regNumber regHead = REG_NA; // When we start a range, remember the first register of the range, so we don't use
+ // range notation if the range contains just a single register.
for (regNumber regNum = REG_INT_FIRST; regNum <= REG_INT_LAST; regNum = REG_NEXT(regNum))
{
- regMaskTP regBit = genRegMask(regNum);
-
+ regMaskTP regBit = genRegMask(regNum);
+
if ((regMask & regBit) != 0)
{
// We have a register to display. It gets displayed now if:
@@ -341,28 +340,27 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
// For AMD64, create ranges for int registers R8 through R15, but not the "old" registers.
if (regNum >= REG_R8)
{
- regHead = regNum;
+ regHead = regNum;
inRegRange = true;
- sep = "-";
+ sep = "-";
}
#elif defined(_TARGET_ARM64_)
// R17 and R28 can't be the start of a range, since the range would include TEB or FP
- if ((regNum < REG_R17) ||
- ((REG_R19 <= regNum) && (regNum < REG_R28)))
+ if ((regNum < REG_R17) || ((REG_R19 <= regNum) && (regNum < REG_R28)))
{
- regHead = regNum;
+ regHead = regNum;
inRegRange = true;
- sep = "-";
+ sep = "-";
}
#elif defined(_TARGET_ARM_)
if (regNum < REG_R12)
{
- regHead = regNum;
+ regHead = regNum;
inRegRange = true;
- sep = "-";
+ sep = "-";
}
#elif defined(_TARGET_X86_)
- // No register ranges
+// No register ranges
#else // _TARGET_*
#error Unsupported or unset target architecture
#endif // _TARGET_*
@@ -370,22 +368,21 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
#if defined(_TARGET_ARM64_)
// We've already printed a register. Is this the end of a range?
- else if ((regNum == REG_INT_LAST)
- || (regNum == REG_R17) // last register before TEB
- || (regNum == REG_R28)) // last register before FP
-#else // _TARGET_ARM64_
+ else if ((regNum == REG_INT_LAST) || (regNum == REG_R17) // last register before TEB
+ || (regNum == REG_R28)) // last register before FP
+#else // _TARGET_ARM64_
// We've already printed a register. Is this the end of a range?
else if (regNum == REG_INT_LAST)
-#endif // _TARGET_ARM64_
+#endif // _TARGET_ARM64_
{
const char* nam = getRegName(regNum);
printf("%s%s", sep, nam);
minSiz -= strlen(sep) + strlen(nam);
inRegRange = false; // No longer in the middle of a register range
- regHead = REG_NA;
- sep = " ";
+ regHead = REG_NA;
+ sep = " ";
}
- }
+ }
else // ((regMask & regBit) == 0)
{
if (inRegRange)
@@ -398,14 +395,16 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
printf("%s%s", sep, nam);
minSiz -= strlen(sep) + strlen(nam);
}
- sep = " ";
+ sep = " ";
inRegRange = false;
- regHead = REG_NA;
+ regHead = REG_NA;
}
}
if (regBit > regMask)
+ {
break;
+ }
regPrev = regNum;
}
@@ -413,9 +412,9 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
#if CPU_HAS_BYTE_REGS
if (regMask & RBM_BYTE_REG_FLAG)
{
- const char * nam = "BYTE";
+ const char* nam = "BYTE";
printf("%s%s", sep, nam);
- minSiz -= (strlen(sep) + strlen(nam));
+ minSiz -= (strlen(sep) + strlen(nam));
}
#endif
@@ -426,11 +425,11 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
sep = " ";
}
inRegRange = false;
- regPrev = REG_NA;
- regHead = REG_NA;
+ regPrev = REG_NA;
+ regHead = REG_NA;
for (regNumber regNum = REG_FP_FIRST; regNum <= REG_FP_LAST; regNum = REG_NEXT(regNum))
{
- regMaskTP regBit = genRegMask(regNum);
+ regMaskTP regBit = genRegMask(regNum);
if (regMask & regBit)
{
@@ -439,7 +438,7 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
const char* nam = getRegName(regNum);
printf("%s%s", sep, nam);
minSiz -= strlen(sep) + strlen(nam);
- sep = "-";
+ sep = "-";
regHead = regNum;
}
inRegRange = true;
@@ -450,7 +449,7 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
{
if (regPrev != regHead)
{
- const char * nam = getRegName(regPrev);
+ const char* nam = getRegName(regPrev);
printf("%s%s", sep, nam);
minSiz -= (strlen(sep) + strlen(nam));
}
@@ -460,7 +459,9 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
}
if (regBit > regMask)
+ {
break;
+ }
regPrev = regNum;
}
@@ -483,11 +484,10 @@ void dspRegMask(regMaskTP regMask, size_t minSiz)
// codeAddr - Pointer to IL byte stream to display.
// codeSize - Number of bytes of IL byte stream to display.
// alignSize - Pad out to this many characters, if fewer than this were written.
-//
-void
-dumpILBytes(const BYTE* const codeAddr,
- unsigned codeSize,
- unsigned alignSize) // number of characters to write, for alignment
+//
+void dumpILBytes(const BYTE* const codeAddr,
+ unsigned codeSize,
+ unsigned alignSize) // number of characters to write, for alignment
{
for (IL_OFFSET offs = 0; offs < codeSize; ++offs)
{
@@ -508,37 +508,38 @@ dumpILBytes(const BYTE* const codeAddr,
// codeAddr - Base pointer to a stream of IL instructions.
// offs - Offset from codeAddr of the IL instruction to display.
// prefix - Optional string to prefix the IL instruction with (if nullptr, no prefix is output).
-//
+//
// Return Value:
// Size of the displayed IL instruction in the instruction stream, in bytes. (Add this to 'offs' to
// get to the next instruction.)
-//
-unsigned
-dumpSingleInstr(const BYTE* const codeAddr, IL_OFFSET offs, const char* prefix)
+//
+unsigned dumpSingleInstr(const BYTE* const codeAddr, IL_OFFSET offs, const char* prefix)
{
- const BYTE * opcodePtr = codeAddr + offs;
- const BYTE * startOpcodePtr = opcodePtr;
- const unsigned ALIGN_WIDTH = 3 * 6; // assume 3 characters * (1 byte opcode + 4 bytes data + 1 prefix byte) for
- // most things
+ const BYTE* opcodePtr = codeAddr + offs;
+ const BYTE* startOpcodePtr = opcodePtr;
+ const unsigned ALIGN_WIDTH = 3 * 6; // assume 3 characters * (1 byte opcode + 4 bytes data + 1 prefix byte) for
+ // most things
- if (prefix != NULL)
+ if (prefix != nullptr)
+ {
printf("%s", prefix);
+ }
- OPCODE opcode = (OPCODE) getU1LittleEndian(opcodePtr);
+ OPCODE opcode = (OPCODE)getU1LittleEndian(opcodePtr);
opcodePtr += sizeof(__int8);
DECODE_OPCODE:
if (opcode >= CEE_COUNT)
{
- printf("\nIllegal opcode: %02X\n", (int) opcode);
+ printf("\nIllegal opcode: %02X\n", (int)opcode);
return (IL_OFFSET)(opcodePtr - startOpcodePtr);
}
/* Get the size of additional parameters */
- size_t sz = opcodeSizes [opcode];
- unsigned argKind = opcodeArgKinds[opcode];
+ size_t sz = opcodeSizes[opcode];
+ unsigned argKind = opcodeArgKinds[opcode];
/* See what kind of an opcode we have, then */
@@ -551,73 +552,88 @@ DECODE_OPCODE:
default:
{
- __int64 iOp;
- double dOp;
- int jOp;
- DWORD jOp2;
+ __int64 iOp;
+ double dOp;
+ int jOp;
+ DWORD jOp2;
switch (argKind)
{
- case InlineNone :
- dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
- printf(" %-12s", opcodeNames[opcode]);
- break;
-
- case ShortInlineVar : iOp = getU1LittleEndian(opcodePtr); goto INT_OP;
- case ShortInlineI : iOp = getI1LittleEndian(opcodePtr); goto INT_OP;
- case InlineVar : iOp = getU2LittleEndian(opcodePtr); goto INT_OP;
- case InlineTok :
- case InlineMethod :
- case InlineField :
- case InlineType :
- case InlineString :
- case InlineSig :
- case InlineI : iOp = getI4LittleEndian(opcodePtr); goto INT_OP;
- case InlineI8 : iOp = getU4LittleEndian(opcodePtr);
- iOp |= (__int64)getU4LittleEndian(opcodePtr + 4) << 32;
- goto INT_OP;
-
- INT_OP:
- dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
- printf(" %-12s 0x%X", opcodeNames[opcode], iOp);
- break;
-
- case ShortInlineR : dOp = getR4LittleEndian(opcodePtr); goto FLT_OP;
- case InlineR : dOp = getR8LittleEndian(opcodePtr); goto FLT_OP;
-
- FLT_OP:
- dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
- printf(" %-12s %f", opcodeNames[opcode], dOp);
- break;
-
- case ShortInlineBrTarget: jOp = getI1LittleEndian(opcodePtr); goto JMP_OP;
- case InlineBrTarget: jOp = getI4LittleEndian(opcodePtr); goto JMP_OP;
-
- JMP_OP:
- dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
- printf(" %-12s %d (IL_%04x)",
- opcodeNames[opcode],
- jOp,
- (int)(opcodePtr + sz - codeAddr) + jOp);
- break;
-
- case InlineSwitch:
- jOp2 = getU4LittleEndian(opcodePtr);
- opcodePtr += 4;
- opcodePtr += jOp2 * 4; // Jump over the table
- dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
- printf(" %-12s", opcodeNames[opcode]);
- break;
-
- case InlinePhi:
- jOp2 = getU1LittleEndian(opcodePtr);
- opcodePtr += 1;
- opcodePtr += jOp2 * 2; // Jump over the table
- dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
- printf(" %-12s", opcodeNames[opcode]);
- break;
-
- default : assert(!"Bad argKind");
+ case InlineNone:
+ dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
+ printf(" %-12s", opcodeNames[opcode]);
+ break;
+
+ case ShortInlineVar:
+ iOp = getU1LittleEndian(opcodePtr);
+ goto INT_OP;
+ case ShortInlineI:
+ iOp = getI1LittleEndian(opcodePtr);
+ goto INT_OP;
+ case InlineVar:
+ iOp = getU2LittleEndian(opcodePtr);
+ goto INT_OP;
+ case InlineTok:
+ case InlineMethod:
+ case InlineField:
+ case InlineType:
+ case InlineString:
+ case InlineSig:
+ case InlineI:
+ iOp = getI4LittleEndian(opcodePtr);
+ goto INT_OP;
+ case InlineI8:
+ iOp = getU4LittleEndian(opcodePtr);
+ iOp |= (__int64)getU4LittleEndian(opcodePtr + 4) << 32;
+ goto INT_OP;
+
+ INT_OP:
+ dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
+ printf(" %-12s 0x%X", opcodeNames[opcode], iOp);
+ break;
+
+ case ShortInlineR:
+ dOp = getR4LittleEndian(opcodePtr);
+ goto FLT_OP;
+ case InlineR:
+ dOp = getR8LittleEndian(opcodePtr);
+ goto FLT_OP;
+
+ FLT_OP:
+ dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
+ printf(" %-12s %f", opcodeNames[opcode], dOp);
+ break;
+
+ case ShortInlineBrTarget:
+ jOp = getI1LittleEndian(opcodePtr);
+ goto JMP_OP;
+ case InlineBrTarget:
+ jOp = getI4LittleEndian(opcodePtr);
+ goto JMP_OP;
+
+ JMP_OP:
+ dumpILBytes(startOpcodePtr, (unsigned)((opcodePtr - startOpcodePtr) + sz), ALIGN_WIDTH);
+ printf(" %-12s %d (IL_%04x)", opcodeNames[opcode], jOp, (int)(opcodePtr + sz - codeAddr) + jOp);
+ break;
+
+ case InlineSwitch:
+ jOp2 = getU4LittleEndian(opcodePtr);
+ opcodePtr += 4;
+ opcodePtr += jOp2 * 4; // Jump over the table
+ dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
+ printf(" %-12s", opcodeNames[opcode]);
+ break;
+
+ case InlinePhi:
+ jOp2 = getU1LittleEndian(opcodePtr);
+ opcodePtr += 1;
+ opcodePtr += jOp2 * 2; // Jump over the table
+ dumpILBytes(startOpcodePtr, (unsigned)(opcodePtr - startOpcodePtr), ALIGN_WIDTH);
+ printf(" %-12s", opcodeNames[opcode]);
+ break;
+
+ default:
+ assert(!"Bad argKind");
}
opcodePtr += sz;
@@ -636,11 +652,9 @@ DECODE_OPCODE:
// codeAddr - Pointer to IL byte stream to display.
// codeSize - Number of bytes of IL byte stream to display.
//
-void
-dumpILRange(const BYTE* const codeAddr,
- unsigned codeSize) // in bytes
+void dumpILRange(const BYTE* const codeAddr, unsigned codeSize) // in bytes
{
- for (IL_OFFSET offs = 0; offs < codeSize; )
+ for (IL_OFFSET offs = 0; offs < codeSize;)
{
char prefix[100];
sprintf(prefix, "IL_%04x ", offs);
@@ -655,23 +669,18 @@ dumpILRange(const BYTE* const codeAddr,
* one or two of these can be used at once.
*/
-
-const char * genES2str(EXPSET_TP set)
+const char* genES2str(EXPSET_TP set)
{
- const int bufSize = 17;
- static
- char num1[bufSize];
+ const int bufSize = 17;
+ static char num1[bufSize];
- static
- char num2[bufSize];
+ static char num2[bufSize];
- static
- char * nump = num1;
+ static char* nump = num1;
- char * temp = nump;
+ char* temp = nump;
- nump = (nump == num1) ? num2
- : num1;
+ nump = (nump == num1) ? num2 : num1;
#if EXPSET_SZ == 32
sprintf_s(temp, bufSize, "%08X", set);
@@ -679,39 +688,35 @@ const char * genES2str(EXPSET_TP set)
sprintf_s(temp, bufSize, "%08X%08X", (int)(set >> 32), (int)set);
#endif
- return temp;
+ return temp;
}
-
-const char * refCntWtd2str(unsigned refCntWtd)
+const char* refCntWtd2str(unsigned refCntWtd)
{
- const int bufSize = 17;
- static
- char num1[bufSize];
+ const int bufSize = 17;
+ static char num1[bufSize];
- static
- char num2[bufSize];
+ static char num2[bufSize];
- static
- char * nump = num1;
+ static char* nump = num1;
- char * temp = nump;
+ char* temp = nump;
- nump = (nump == num1) ? num2
- : num1;
+ nump = (nump == num1) ? num2 : num1;
unsigned valueInt = refCntWtd / BB_UNITY_WEIGHT;
unsigned valueFrac = refCntWtd % BB_UNITY_WEIGHT;
if (valueFrac == 0)
{
- sprintf_s(temp, bufSize, "%2u ", valueInt);
+ sprintf_s(temp, bufSize, "%2u ", valueInt);
}
- else {
- sprintf_s(temp, bufSize, "%2u.%1u", valueInt, (valueFrac*10/BB_UNITY_WEIGHT));
+ else
+ {
+ sprintf_s(temp, bufSize, "%2u.%1u", valueInt, (valueFrac * 10 / BB_UNITY_WEIGHT));
}
- return temp;
+ return temp;
}
#endif // DEBUG
@@ -744,7 +749,7 @@ bool ConfigMethodRange::Contains(ICorJitInfo* info, CORINFO_METHOD_HANDLE method
if ((m_ranges[i].m_low <= hash) && (hash <= m_ranges[i].m_high))
{
return true;
- }
+ }
}
return false;
@@ -782,12 +787,12 @@ void ConfigMethodRange::InitRanges(const wchar_t* rangeStr, unsigned capacity)
// Allocate some persistent memory
ICorJitHost* jitHost = JitHost::getJitHost();
- m_ranges = (Range*)jitHost->allocateMemory(capacity * sizeof(Range));
- m_entries = capacity;
+ m_ranges = (Range*)jitHost->allocateMemory(capacity * sizeof(Range));
+ m_entries = capacity;
- const wchar_t* p = rangeStr;
- unsigned lastRange = 0;
- bool setHighPart = false;
+ const wchar_t* p = rangeStr;
+ unsigned lastRange = 0;
+ bool setHighPart = false;
while ((*p != 0) && (lastRange < m_entries))
{
@@ -818,8 +823,7 @@ void ConfigMethodRange::InitRanges(const wchar_t* rangeStr, unsigned capacity)
m_ranges[lastRange].m_high = i;
// Sanity check that range is proper
- if ((m_badChar != 0) &&
- (m_ranges[lastRange].m_high < m_ranges[lastRange].m_low))
+ if ((m_badChar != 0) && (m_ranges[lastRange].m_high < m_ranges[lastRange].m_low))
{
m_badChar = (p - rangeStr) + 1;
}
@@ -867,7 +871,7 @@ void ConfigMethodRange::InitRanges(const wchar_t* rangeStr, unsigned capacity)
assert(lastRange <= m_entries);
m_lastRange = lastRange;
- m_inited = 1;
+ m_inited = 1;
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
@@ -879,16 +883,13 @@ void ConfigMethodRange::InitRanges(const wchar_t* rangeStr, unsigned capacity)
*/
Histogram::Histogram(IAllocator* allocator, const unsigned* const sizeTable)
- : m_allocator(allocator)
- , m_sizeTable(sizeTable)
- , m_counts(nullptr)
+ : m_allocator(allocator), m_sizeTable(sizeTable), m_counts(nullptr)
{
unsigned sizeCount = 0;
do
{
sizeCount++;
- }
- while ((sizeTable[sizeCount] != 0) && (sizeCount < 1000));
+ } while ((sizeTable[sizeCount] != 0) && (sizeCount < 1000));
m_sizeCount = sizeCount;
}
@@ -973,34 +974,34 @@ void Histogram::record(unsigned size)
*/
// bitChunkSize() - Returns number of bits in a bitVect chunk
-inline UINT FixedBitVect::bitChunkSize()
-{
- return sizeof(UINT) * 8;
+inline UINT FixedBitVect::bitChunkSize()
+{
+ return sizeof(UINT) * 8;
}
// bitNumToBit() - Returns a bit mask of the given bit number
-inline UINT FixedBitVect::bitNumToBit(UINT bitNum)
-{
+inline UINT FixedBitVect::bitNumToBit(UINT bitNum)
+{
assert(bitNum < bitChunkSize());
assert(bitChunkSize() <= sizeof(int) * 8);
- return 1 << bitNum;
+ return 1 << bitNum;
}
// bitVectInit() - Initializes a bit vector of a given size
-FixedBitVect * FixedBitVect::bitVectInit(UINT size, Compiler *comp)
+FixedBitVect* FixedBitVect::bitVectInit(UINT size, Compiler* comp)
{
- UINT bitVectMemSize, numberOfChunks;
- FixedBitVect *bv;
-
+ UINT bitVectMemSize, numberOfChunks;
+ FixedBitVect* bv;
+
assert(size != 0);
numberOfChunks = (size - 1) / bitChunkSize() + 1;
- bitVectMemSize = numberOfChunks * (bitChunkSize() / 8); // size in bytes
-
+ bitVectMemSize = numberOfChunks * (bitChunkSize() / 8); // size in bytes
+
assert(bitVectMemSize * bitChunkSize() >= size);
-
- bv = (FixedBitVect *)comp->compGetMemA(sizeof(FixedBitVect) + bitVectMemSize, CMK_FixedBitVect);
+
+ bv = (FixedBitVect*)comp->compGetMemA(sizeof(FixedBitVect) + bitVectMemSize, CMK_FixedBitVect);
memset(bv->bitVect, 0, bitVectMemSize);
bv->bitVectSize = size;
@@ -1035,10 +1036,10 @@ bool FixedBitVect::bitVectTest(UINT bitNum)
}
// bitVectOr() - Or in the given bit vector
-void FixedBitVect::bitVectOr(FixedBitVect *bv)
+void FixedBitVect::bitVectOr(FixedBitVect* bv)
{
UINT bitChunkCnt = (bitVectSize - 1) / bitChunkSize() + 1;
-
+
assert(bitVectSize == bv->bitVectSize);
// Or each chunks
@@ -1049,14 +1050,14 @@ void FixedBitVect::bitVectOr(FixedBitVect *bv)
}
// bitVectAnd() - And with passed in bit vector
-void FixedBitVect::bitVectAnd(FixedBitVect &bv)
+void FixedBitVect::bitVectAnd(FixedBitVect& bv)
{
UINT bitChunkCnt = (bitVectSize - 1) / bitChunkSize() + 1;
-
+
assert(bitVectSize == bv.bitVectSize);
// And each chunks
- for (UINT i = 0; i < bitChunkCnt ; i++)
+ for (UINT i = 0; i < bitChunkCnt; i++)
{
bitVect[i] &= bv.bitVect[i];
}
@@ -1066,7 +1067,7 @@ void FixedBitVect::bitVectAnd(FixedBitVect &bv)
// Return -1 if no bits found.
UINT FixedBitVect::bitVectGetFirst()
{
- return bitVectGetNext((UINT) -1);
+ return bitVectGetNext((UINT)-1);
}
// bitVectGetNext() - Find the next bit on given previous position and return bit num.
@@ -1081,7 +1082,7 @@ UINT FixedBitVect::bitVectGetNext(UINT bitNumPrev)
if (bitNumPrev == (UINT)-1)
{
- index = 0;
+ index = 0;
bitMask = (UINT)-1;
}
else
@@ -1090,13 +1091,12 @@ UINT FixedBitVect::bitVectGetNext(UINT bitNumPrev)
index = bitNumPrev / bitChunkSize();
bitNumPrev -= index * bitChunkSize();
- bit = bitNumToBit(bitNumPrev);
+ bit = bitNumToBit(bitNumPrev);
bitMask = ~(bit | (bit - 1));
}
-
// Find first bit
- for (i = index; i < bitChunkCnt ; i++)
+ for (i = index; i < bitChunkCnt; i++)
{
UINT bitChunk = bitVect[i] & bitMask;
@@ -1111,7 +1111,9 @@ UINT FixedBitVect::bitVectGetNext(UINT bitNumPrev)
// Empty bit vector?
if (bitNum == (UINT)-1)
+ {
return (UINT)-1;
+ }
bitNum += i * bitChunkSize();
@@ -1124,12 +1126,12 @@ UINT FixedBitVect::bitVectGetNext(UINT bitNumPrev)
// Return -1 if no bits found.
UINT FixedBitVect::bitVectGetNextAndClear()
{
- UINT bitNum = (UINT)-1;
+ UINT bitNum = (UINT)-1;
UINT bitChunkCnt = (bitVectSize - 1) / bitChunkSize() + 1;
UINT i;
// Find first bit
- for (i = 0; i < bitChunkCnt ; i++)
+ for (i = 0; i < bitChunkCnt; i++)
{
if (bitVect[i] != 0)
{
@@ -1140,7 +1142,9 @@ UINT FixedBitVect::bitVectGetNextAndClear()
// Empty bit vector?
if (bitNum == (UINT)-1)
+ {
return (UINT)-1;
+ }
// Clear the bit in the right chunk
bitVect[i] &= ~bitNumToBit(bitNum);
@@ -1152,9 +1156,11 @@ UINT FixedBitVect::bitVectGetNextAndClear()
return bitNum;
}
-int SimpleSprintf_s(__in_ecount(cbBufSize - (pWriteStart- pBufStart)) char * pWriteStart,
- __in_ecount(cbBufSize) char * pBufStart, size_t cbBufSize,
- __in_z const char * fmt, ...)
+int SimpleSprintf_s(__in_ecount(cbBufSize - (pWriteStart - pBufStart)) char* pWriteStart,
+ __in_ecount(cbBufSize) char* pBufStart,
+ size_t cbBufSize,
+ __in_z const char* fmt,
+ ...)
{
assert(fmt);
assert(pBufStart);
@@ -1162,25 +1168,31 @@ int SimpleSprintf_s(__in_ecount(cbBufSize - (pWriteStart- pBufStart)) char * pWr
assert((size_t)pBufStart <= (size_t)pWriteStart);
int ret;
- //compute the space left in the buffer.
+ // compute the space left in the buffer.
if ((pBufStart + cbBufSize) < pWriteStart)
+ {
NO_WAY("pWriteStart is past end of buffer");
- size_t cbSpaceLeft = (size_t)((pBufStart + cbBufSize) - pWriteStart);
+ }
+ size_t cbSpaceLeft = (size_t)((pBufStart + cbBufSize) - pWriteStart);
va_list args;
va_start(args, fmt);
ret = vsprintf_s(pWriteStart, cbSpaceLeft, const_cast<char*>(fmt), args);
va_end(args);
if (ret < 0)
+ {
NO_WAY("vsprintf_s failed.");
+ }
return ret;
}
-#ifdef DEBUG
+#ifdef DEBUG
-void hexDump(FILE* dmpf, const char* name, BYTE* addr, size_t size)
+void hexDump(FILE* dmpf, const char* name, BYTE* addr, size_t size)
{
- if (!size)
+ if (!size)
+ {
return;
+ }
assert(addr);
@@ -1188,7 +1200,7 @@ void hexDump(FILE* dmpf, const char* name, BYTE* addr, size_t siz
for (unsigned i = 0; i < size; i++)
{
- if ((i % 16) == 0)
+ if ((i % 16) == 0)
{
fprintf(dmpf, "\n %04X: ", i);
}
@@ -1202,295 +1214,294 @@ void hexDump(FILE* dmpf, const char* name, BYTE* addr, size_t siz
#endif // DEBUG
void HelperCallProperties::init()
-{
- for (CorInfoHelpFunc helper=CORINFO_HELP_UNDEF; // initialize helper
- (helper < CORINFO_HELP_COUNT); // test helper for loop exit
- helper = CorInfoHelpFunc( int(helper) + 1 ) ) // update helper to next
+{
+ for (CorInfoHelpFunc helper = CORINFO_HELP_UNDEF; // initialize helper
+ (helper < CORINFO_HELP_COUNT); // test helper for loop exit
+ helper = CorInfoHelpFunc(int(helper) + 1)) // update helper to next
{
// Generally you want initialize these to their most typical/safest result
//
- bool isPure = false; // true if the result only depends upon input args and not any global state
- bool noThrow = false; // true if the helper will never throw
- bool nonNullReturn = false; // true if the result will never be null or zero
- bool isAllocator = false; // true if the result is usually a newly created heap item, or may throw OutOfMemory
- bool mutatesHeap = false; // true if any previous heap objects [are|can be] modified
- bool mayRunCctor = false; // true if the helper call may cause a static constructor to be run.
- bool mayFinalize = false; // true if the helper call allocates an object that may need to run a finalizer
-
+ bool isPure = false; // true if the result only depends upon input args and not any global state
+ bool noThrow = false; // true if the helper will never throw
+ bool nonNullReturn = false; // true if the result will never be null or zero
+ bool isAllocator = false; // true if the result is usually a newly created heap item, or may throw OutOfMemory
+ bool mutatesHeap = false; // true if any previous heap objects [are|can be] modified
+ bool mayRunCctor = false; // true if the helper call may cause a static constructor to be run.
+ bool mayFinalize = false; // true if the helper call allocates an object that may need to run a finalizer
+
switch (helper)
{
// Arithmetic helpers that cannot throw
- case CORINFO_HELP_LLSH:
- case CORINFO_HELP_LRSH:
- case CORINFO_HELP_LRSZ:
- case CORINFO_HELP_LMUL:
- case CORINFO_HELP_ULDIV:
- case CORINFO_HELP_ULMOD:
- case CORINFO_HELP_LNG2DBL:
- case CORINFO_HELP_ULNG2DBL:
- case CORINFO_HELP_DBL2INT:
- case CORINFO_HELP_DBL2LNG:
- case CORINFO_HELP_DBL2UINT:
- case CORINFO_HELP_DBL2ULNG:
- case CORINFO_HELP_FLTREM:
- case CORINFO_HELP_DBLREM:
- case CORINFO_HELP_FLTROUND:
- case CORINFO_HELP_DBLROUND:
-
- isPure = true;
- noThrow = true;
- break;
+ case CORINFO_HELP_LLSH:
+ case CORINFO_HELP_LRSH:
+ case CORINFO_HELP_LRSZ:
+ case CORINFO_HELP_LMUL:
+ case CORINFO_HELP_ULDIV:
+ case CORINFO_HELP_ULMOD:
+ case CORINFO_HELP_LNG2DBL:
+ case CORINFO_HELP_ULNG2DBL:
+ case CORINFO_HELP_DBL2INT:
+ case CORINFO_HELP_DBL2LNG:
+ case CORINFO_HELP_DBL2UINT:
+ case CORINFO_HELP_DBL2ULNG:
+ case CORINFO_HELP_FLTREM:
+ case CORINFO_HELP_DBLREM:
+ case CORINFO_HELP_FLTROUND:
+ case CORINFO_HELP_DBLROUND:
+
+ isPure = true;
+ noThrow = true;
+ break;
// Arithmetic helpers that *can* throw.
-
// This (or these) are not pure, in that they have "VM side effects"...but they don't mutate the heap.
- case CORINFO_HELP_ENDCATCH:
- break;
-
+ case CORINFO_HELP_ENDCATCH:
+ break;
+
// Arithmetic helpers that may throw
- case CORINFO_HELP_LMOD: // Mods throw div-by zero, and signed mods have problems with the smallest integer mod -1,
- case CORINFO_HELP_MOD: // which is not representable as a positive integer.
- case CORINFO_HELP_UMOD:
-
- case CORINFO_HELP_UDIV: // Divs throw divide-by-zero.
- case CORINFO_HELP_LDIV:
-
- case CORINFO_HELP_LMUL_OVF:
- case CORINFO_HELP_ULMUL_OVF:
- case CORINFO_HELP_DBL2INT_OVF:
- case CORINFO_HELP_DBL2LNG_OVF:
- case CORINFO_HELP_DBL2UINT_OVF:
- case CORINFO_HELP_DBL2ULNG_OVF:
-
- isPure = true;
- break;
-
+ case CORINFO_HELP_LMOD: // Mods throw div-by zero, and signed mods have problems with the smallest integer
+ // mod -1,
+ case CORINFO_HELP_MOD: // which is not representable as a positive integer.
+ case CORINFO_HELP_UMOD:
+
+ case CORINFO_HELP_UDIV: // Divs throw divide-by-zero.
+ case CORINFO_HELP_LDIV:
+
+ case CORINFO_HELP_LMUL_OVF:
+ case CORINFO_HELP_ULMUL_OVF:
+ case CORINFO_HELP_DBL2INT_OVF:
+ case CORINFO_HELP_DBL2LNG_OVF:
+ case CORINFO_HELP_DBL2UINT_OVF:
+ case CORINFO_HELP_DBL2ULNG_OVF:
+
+ isPure = true;
+ break;
+
// Heap Allocation helpers, these all never return null
- case CORINFO_HELP_NEWSFAST:
- case CORINFO_HELP_NEWSFAST_ALIGN8:
+ case CORINFO_HELP_NEWSFAST:
+ case CORINFO_HELP_NEWSFAST_ALIGN8:
- isAllocator = true;
- nonNullReturn = true;
- noThrow = true; // only can throw OutOfMemory
- break;
+ isAllocator = true;
+ nonNullReturn = true;
+ noThrow = true; // only can throw OutOfMemory
+ break;
- case CORINFO_HELP_NEW_CROSSCONTEXT:
- case CORINFO_HELP_NEWFAST:
- case CORINFO_HELP_READYTORUN_NEW:
+ case CORINFO_HELP_NEW_CROSSCONTEXT:
+ case CORINFO_HELP_NEWFAST:
+ case CORINFO_HELP_READYTORUN_NEW:
+
+ mayFinalize = true; // These may run a finalizer
+ isAllocator = true;
+ nonNullReturn = true;
+ noThrow = true; // only can throw OutOfMemory
+ break;
- mayFinalize = true; // These may run a finalizer
- isAllocator = true;
- nonNullReturn = true;
- noThrow = true; // only can throw OutOfMemory
- break;
-
// These allocation helpers do some checks on the size (and lower bound) inputs,
// and can throw exceptions other than OOM.
- case CORINFO_HELP_NEWARR_1_VC:
- case CORINFO_HELP_NEWARR_1_ALIGN8:
+ case CORINFO_HELP_NEWARR_1_VC:
+ case CORINFO_HELP_NEWARR_1_ALIGN8:
- isAllocator = true;
- nonNullReturn = true;
- break;
+ isAllocator = true;
+ nonNullReturn = true;
+ break;
// These allocation helpers do some checks on the size (and lower bound) inputs,
// and can throw exceptions other than OOM.
- case CORINFO_HELP_NEW_MDARR:
- case CORINFO_HELP_NEWARR_1_DIRECT:
- case CORINFO_HELP_NEWARR_1_OBJ:
- case CORINFO_HELP_READYTORUN_NEWARR_1:
-
- mayFinalize = true; // These may run a finalizer
- isAllocator = true;
- nonNullReturn = true;
- break;
-
+ case CORINFO_HELP_NEW_MDARR:
+ case CORINFO_HELP_NEWARR_1_DIRECT:
+ case CORINFO_HELP_NEWARR_1_OBJ:
+ case CORINFO_HELP_READYTORUN_NEWARR_1:
+
+ mayFinalize = true; // These may run a finalizer
+ isAllocator = true;
+ nonNullReturn = true;
+ break;
+
// Heap Allocation helpers that are also pure
- case CORINFO_HELP_STRCNS:
-
- isPure = true;
- isAllocator = true;
- nonNullReturn = true;
- noThrow = true; // only can throw OutOfMemory
- break;
+ case CORINFO_HELP_STRCNS:
- case CORINFO_HELP_BOX:
- nonNullReturn = true;
- isAllocator = true;
- noThrow = true; // only can throw OutOfMemory
- break;
+ isPure = true;
+ isAllocator = true;
+ nonNullReturn = true;
+ noThrow = true; // only can throw OutOfMemory
+ break;
- case CORINFO_HELP_BOX_NULLABLE:
- // Box Nullable is not a 'pure' function
- // It has a Byref argument that it reads the contents of.
- //
- // So two calls to Box Nullable that pass the same address (with the same Value Number)
- // will produce different results when the contents of the memory pointed to by the Byref changes
- //
- isAllocator = true;
- noThrow = true; // only can throw OutOfMemory
- break;
-
- case CORINFO_HELP_RUNTIMEHANDLE_METHOD:
- case CORINFO_HELP_RUNTIMEHANDLE_CLASS:
- case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG:
- case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG:
- // logging helpers are not technically pure but can be optimized away
- isPure = true;
- noThrow = true;
- nonNullReturn = true;
- break;
+ case CORINFO_HELP_BOX:
+ nonNullReturn = true;
+ isAllocator = true;
+ noThrow = true; // only can throw OutOfMemory
+ break;
+
+ case CORINFO_HELP_BOX_NULLABLE:
+ // Box Nullable is not a 'pure' function
+ // It has a Byref argument that it reads the contents of.
+ //
+ // So two calls to Box Nullable that pass the same address (with the same Value Number)
+ // will produce different results when the contents of the memory pointed to by the Byref changes
+ //
+ isAllocator = true;
+ noThrow = true; // only can throw OutOfMemory
+ break;
+
+ case CORINFO_HELP_RUNTIMEHANDLE_METHOD:
+ case CORINFO_HELP_RUNTIMEHANDLE_CLASS:
+ case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG:
+ case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG:
+ // logging helpers are not technically pure but can be optimized away
+ isPure = true;
+ noThrow = true;
+ nonNullReturn = true;
+ break;
// type casting helpers
- case CORINFO_HELP_ISINSTANCEOFINTERFACE:
- case CORINFO_HELP_ISINSTANCEOFARRAY:
- case CORINFO_HELP_ISINSTANCEOFCLASS:
- case CORINFO_HELP_ISINSTANCEOFANY:
- case CORINFO_HELP_READYTORUN_ISINSTANCEOF:
-
- isPure = true;
- noThrow = true; // These return null for a failing cast
- break;
-
+ case CORINFO_HELP_ISINSTANCEOFINTERFACE:
+ case CORINFO_HELP_ISINSTANCEOFARRAY:
+ case CORINFO_HELP_ISINSTANCEOFCLASS:
+ case CORINFO_HELP_ISINSTANCEOFANY:
+ case CORINFO_HELP_READYTORUN_ISINSTANCEOF:
+
+ isPure = true;
+ noThrow = true; // These return null for a failing cast
+ break;
+
// type casting helpers that throw
- case CORINFO_HELP_CHKCASTINTERFACE:
- case CORINFO_HELP_CHKCASTARRAY:
- case CORINFO_HELP_CHKCASTCLASS:
- case CORINFO_HELP_CHKCASTANY:
- case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
- case CORINFO_HELP_READYTORUN_CHKCAST:
-
- // These throw for a failing cast
- // But if given a null input arg will return null
- isPure = true;
- break;
-
+ case CORINFO_HELP_CHKCASTINTERFACE:
+ case CORINFO_HELP_CHKCASTARRAY:
+ case CORINFO_HELP_CHKCASTCLASS:
+ case CORINFO_HELP_CHKCASTANY:
+ case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
+ case CORINFO_HELP_READYTORUN_CHKCAST:
+
+ // These throw for a failing cast
+ // But if given a null input arg will return null
+ isPure = true;
+ break;
+
// helpers returning addresses, these can also throw
- case CORINFO_HELP_UNBOX:
- case CORINFO_HELP_GETREFANY:
- case CORINFO_HELP_LDELEMA_REF:
-
- isPure = true;
- break;
-
+ case CORINFO_HELP_UNBOX:
+ case CORINFO_HELP_GETREFANY:
+ case CORINFO_HELP_LDELEMA_REF:
+
+ isPure = true;
+ break;
+
// helpers that return internal handle
// TODO-ARM64-Bug?: Can these throw or not?
- case CORINFO_HELP_GETCLASSFROMMETHODPARAM:
- case CORINFO_HELP_GETSYNCFROMCLASSHANDLE:
-
- isPure = true;
- break;
+ case CORINFO_HELP_GETCLASSFROMMETHODPARAM:
+ case CORINFO_HELP_GETSYNCFROMCLASSHANDLE:
+
+ isPure = true;
+ break;
// Helpers that load the base address for static variables.
// We divide these between those that may and may not invoke
// static class constructors.
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
- case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
- case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
- case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT:
- case CORINFO_HELP_GETSTATICFIELDADDR_TLS:
- case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
- case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
- case CORINFO_HELP_READYTORUN_STATIC_BASE:
-
- // These may invoke static class constructors
- // These can throw InvalidProgram exception if the class can not be constructed
- //
- isPure = true;
- nonNullReturn = true;
- mayRunCctor = true;
- break;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
+ case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
+ case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
+ case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT:
+ case CORINFO_HELP_GETSTATICFIELDADDR_TLS:
+ case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
+ case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
+ case CORINFO_HELP_READYTORUN_STATIC_BASE:
+
+ // These may invoke static class constructors
+ // These can throw InvalidProgram exception if the class can not be constructed
+ //
+ isPure = true;
+ nonNullReturn = true;
+ mayRunCctor = true;
+ break;
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
- // These do not invoke static class constructors
- //
- isPure = true;
- noThrow = true;
- nonNullReturn = true;
- break;
+ // These do not invoke static class constructors
+ //
+ isPure = true;
+ noThrow = true;
+ nonNullReturn = true;
+ break;
// GC Write barrier support
// TODO-ARM64-Bug?: Can these throw or not?
- case CORINFO_HELP_ASSIGN_REF:
- case CORINFO_HELP_CHECKED_ASSIGN_REF:
- case CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP:
- case CORINFO_HELP_ASSIGN_BYREF:
- case CORINFO_HELP_ASSIGN_STRUCT:
-
- mutatesHeap = true;
- break;
-
+ case CORINFO_HELP_ASSIGN_REF:
+ case CORINFO_HELP_CHECKED_ASSIGN_REF:
+ case CORINFO_HELP_ASSIGN_REF_ENSURE_NONHEAP:
+ case CORINFO_HELP_ASSIGN_BYREF:
+ case CORINFO_HELP_ASSIGN_STRUCT:
+
+ mutatesHeap = true;
+ break;
+
// Accessing fields (write)
- case CORINFO_HELP_SETFIELD32:
- case CORINFO_HELP_SETFIELD64:
- case CORINFO_HELP_SETFIELDOBJ:
- case CORINFO_HELP_SETFIELDSTRUCT:
- case CORINFO_HELP_SETFIELDFLOAT:
- case CORINFO_HELP_SETFIELDDOUBLE:
- case CORINFO_HELP_ARRADDR_ST:
-
- mutatesHeap = true;
- break;
+ case CORINFO_HELP_SETFIELD32:
+ case CORINFO_HELP_SETFIELD64:
+ case CORINFO_HELP_SETFIELDOBJ:
+ case CORINFO_HELP_SETFIELDSTRUCT:
+ case CORINFO_HELP_SETFIELDFLOAT:
+ case CORINFO_HELP_SETFIELDDOUBLE:
+ case CORINFO_HELP_ARRADDR_ST:
+
+ mutatesHeap = true;
+ break;
// These helper calls always throw an exception
- case CORINFO_HELP_OVERFLOW:
- case CORINFO_HELP_VERIFICATION:
- case CORINFO_HELP_RNGCHKFAIL:
- case CORINFO_HELP_THROWDIVZERO:
+ case CORINFO_HELP_OVERFLOW:
+ case CORINFO_HELP_VERIFICATION:
+ case CORINFO_HELP_RNGCHKFAIL:
+ case CORINFO_HELP_THROWDIVZERO:
#if COR_JIT_EE_VERSION > 460
- case CORINFO_HELP_THROWNULLREF:
+ case CORINFO_HELP_THROWNULLREF:
#endif // COR_JIT_EE_VERSION
- case CORINFO_HELP_THROW:
- case CORINFO_HELP_RETHROW:
+ case CORINFO_HELP_THROW:
+ case CORINFO_HELP_RETHROW:
- break;
+ break;
// These helper calls may throw an exception
- case CORINFO_HELP_METHOD_ACCESS_CHECK:
- case CORINFO_HELP_FIELD_ACCESS_CHECK:
- case CORINFO_HELP_CLASS_ACCESS_CHECK:
- case CORINFO_HELP_DELEGATE_SECURITY_CHECK:
-
- break;
+ case CORINFO_HELP_METHOD_ACCESS_CHECK:
+ case CORINFO_HELP_FIELD_ACCESS_CHECK:
+ case CORINFO_HELP_CLASS_ACCESS_CHECK:
+ case CORINFO_HELP_DELEGATE_SECURITY_CHECK:
+
+ break;
// This is a debugging aid; it simply returns a constant address.
- case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR:
- isPure = true;
- noThrow = true;
- break;
+ case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR:
+ isPure = true;
+ noThrow = true;
+ break;
// Not sure how to handle optimization involving the rest of these helpers
- default:
-
- // The most pessimistic results are returned for these helpers
- mutatesHeap = true;
- break;
+ default:
+
+ // The most pessimistic results are returned for these helpers
+ mutatesHeap = true;
+ break;
}
-
- m_isPure [helper] = isPure;
- m_noThrow [helper] = noThrow;
+
+ m_isPure[helper] = isPure;
+ m_noThrow[helper] = noThrow;
m_nonNullReturn[helper] = nonNullReturn;
- m_isAllocator [helper] = isAllocator;
- m_mutatesHeap [helper] = mutatesHeap;
- m_mayRunCctor [helper] = mayRunCctor;
- m_mayFinalize [helper] = mayFinalize;
+ m_isAllocator[helper] = isAllocator;
+ m_mutatesHeap[helper] = mutatesHeap;
+ m_mayRunCctor[helper] = mayRunCctor;
+ m_mayFinalize[helper] = mayFinalize;
}
}
-
//=============================================================================
// AssemblyNamesList2
//=============================================================================
@@ -1499,41 +1510,43 @@ void HelperCallProperties::init()
// MyAssembly;mscorlib;System
// MyAssembly;mscorlib System
-AssemblyNamesList2::AssemblyNamesList2(const wchar_t* list, IAllocator* alloc)
- : m_alloc(alloc)
+AssemblyNamesList2::AssemblyNamesList2(const wchar_t* list, IAllocator* alloc) : m_alloc(alloc)
{
assert(m_alloc != nullptr);
- WCHAR prevChar = '?'; // dummy
- LPWSTR nameStart = nullptr; // start of the name currently being processed. nullptr if no current name
+ WCHAR prevChar = '?'; // dummy
+ LPWSTR nameStart = nullptr; // start of the name currently being processed. nullptr if no current name
AssemblyName** ppPrevLink = &m_pNames;
-
+
for (LPWSTR listWalk = const_cast<LPWSTR>(list); prevChar != '\0'; prevChar = *listWalk, listWalk++)
{
WCHAR curChar = *listWalk;
-
- if (iswspace(curChar) || curChar == W(';') || curChar == W('\0') )
+
+ if (iswspace(curChar) || curChar == W(';') || curChar == W('\0'))
{
//
// Found white-space
//
-
+
if (nameStart)
{
// Found the end of the current name; add a new assembly name to the list.
-
+
AssemblyName* newName = new (m_alloc) AssemblyName();
-
+
// Null out the current character so we can do zero-terminated string work; we'll restore it later.
*listWalk = W('\0');
// How much space do we need?
- int convertedNameLenBytes = WszWideCharToMultiByte(CP_UTF8, 0, nameStart, -1, NULL, 0, NULL, NULL);
- newName->m_assemblyName = new (m_alloc) char[convertedNameLenBytes]; // convertedNameLenBytes includes the trailing null character
- if (WszWideCharToMultiByte(CP_UTF8, 0, nameStart, -1, newName->m_assemblyName, convertedNameLenBytes, NULL, NULL) != 0)
+ int convertedNameLenBytes =
+ WszWideCharToMultiByte(CP_UTF8, 0, nameStart, -1, nullptr, 0, nullptr, nullptr);
+ newName->m_assemblyName = new (m_alloc) char[convertedNameLenBytes]; // convertedNameLenBytes includes
+ // the trailing null character
+ if (WszWideCharToMultiByte(CP_UTF8, 0, nameStart, -1, newName->m_assemblyName, convertedNameLenBytes,
+ nullptr, nullptr) != 0)
{
*ppPrevLink = newName;
- ppPrevLink = &newName->m_next;
+ ppPrevLink = &newName->m_next;
}
else
{
@@ -1551,13 +1564,13 @@ AssemblyNamesList2::AssemblyNamesList2(const wchar_t* list, IAllocator* alloc)
//
// Found the start of a new name
//
-
+
nameStart = listWalk;
}
}
assert(nameStart == nullptr); // cannot be in the middle of a name
- *ppPrevLink = nullptr; // Terminate the last element of the list.
+ *ppPrevLink = nullptr; // Terminate the last element of the list.
}
AssemblyNamesList2::~AssemblyNamesList2()
@@ -1565,7 +1578,7 @@ AssemblyNamesList2::~AssemblyNamesList2()
for (AssemblyName* pName = m_pNames; pName != nullptr; /**/)
{
AssemblyName* cur = pName;
- pName = pName->m_next;
+ pName = pName->m_next;
m_alloc->Free(cur->m_assemblyName);
m_alloc->Free(cur);
@@ -1577,15 +1590,16 @@ bool AssemblyNamesList2::IsInList(const char* assemblyName)
for (AssemblyName* pName = m_pNames; pName != nullptr; pName = pName->m_next)
{
if (_stricmp(pName->m_assemblyName, assemblyName) == 0)
+ {
return true;
+ }
}
return false;
}
#ifdef FEATURE_JIT_METHOD_PERF
-CycleCount::CycleCount()
- : cps(CycleTimer::CyclesPerSecond())
+CycleCount::CycleCount() : cps(CycleTimer::CyclesPerSecond())
{
}
@@ -1602,8 +1616,8 @@ bool CycleCount::Start()
double CycleCount::ElapsedTime()
{
unsigned __int64 nowCycles;
- (void) GetCycles(&nowCycles);
- return ((double) (nowCycles - beginCycles) / cps) * 1000.0;
+ (void)GetCycles(&nowCycles);
+ return ((double)(nowCycles - beginCycles) / cps) * 1000.0;
}
bool PerfCounter::Start()
@@ -1613,8 +1627,8 @@ bool PerfCounter::Start()
{
return result;
}
- freq = (double) beg.QuadPart / 1000.0;
- (void) QueryPerformanceCounter(&beg);
+ freq = (double)beg.QuadPart / 1000.0;
+ (void)QueryPerformanceCounter(&beg);
return result;
}
@@ -1622,20 +1636,19 @@ bool PerfCounter::Start()
double PerfCounter::ElapsedTime()
{
LARGE_INTEGER li;
- (void) QueryPerformanceCounter(&li);
- return (double) (li.QuadPart - beg.QuadPart) / freq;
+ (void)QueryPerformanceCounter(&li);
+ return (double)(li.QuadPart - beg.QuadPart) / freq;
}
#endif
-
#ifdef DEBUG
/*****************************************************************************
* Return the number of digits in a number of the given base (default base 10).
* Used when outputting strings.
*/
-unsigned CountDigits(unsigned num, unsigned base /* = 10 */)
+unsigned CountDigits(unsigned num, unsigned base /* = 10 */)
{
assert(2 <= base && base <= 16); // sanity check
unsigned count = 1;
@@ -1649,51 +1662,56 @@ unsigned CountDigits(unsigned num, unsigned base /* = 10 */)
#endif // DEBUG
-
-double FloatingPointUtils::convertUInt64ToDouble(unsigned __int64 uIntVal) {
- __int64 s64 = uIntVal;
- double d;
- if (s64 < 0) {
+double FloatingPointUtils::convertUInt64ToDouble(unsigned __int64 uIntVal)
+{
+ __int64 s64 = uIntVal;
+ double d;
+ if (s64 < 0)
+ {
#if defined(_TARGET_XARCH_)
- // RyuJIT codegen and clang (or gcc) may produce different results for casting uint64 to
+ // RyuJIT codegen and clang (or gcc) may produce different results for casting uint64 to
// double, and the clang result is more accurate. For example,
// 1) (double)0x84595161401484A0UL --> 43e08b2a2c280290 (RyuJIT codegen or VC++)
// 2) (double)0x84595161401484A0UL --> 43e08b2a2c280291 (clang or gcc)
// If the folding optimization below is implemented by simple casting of (double)uint64_val
// and it is compiled by clang, casting result can be inconsistent, depending on whether
- // the folding optimization is triggered or the codegen generates instructions for casting. //
+ // the folding optimization is triggered or the codegen generates instructions for casting. //
// The current solution is to force the same math as the codegen does, so that casting
// result is always consistent.
// d = (double)(int64_t)uint64 + 0x1p64
uint64_t adjHex = 0x43F0000000000000UL;
- d = (double)s64 + *(double*)&adjHex;
+ d = (double)s64 + *(double*)&adjHex;
#else
- d = (double)uIntVal;
+ d = (double)uIntVal;
#endif
}
- else
+ else
{
d = (double)uIntVal;
}
return d;
}
-float FloatingPointUtils::convertUInt64ToFloat(unsigned __int64 u64) {
+float FloatingPointUtils::convertUInt64ToFloat(unsigned __int64 u64)
+{
double d = convertUInt64ToDouble(u64);
return (float)d;
}
-unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) {
+unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d)
+{
unsigned __int64 u64;
if (d >= 0.0)
{
// Work around a C++ issue where it doesn't properly convert large positive doubles
const double two63 = 2147483648.0 * 4294967296.0;
- if (d < two63) {
+ if (d < two63)
+ {
u64 = UINT64(d);
}
- else {
+ else
+ {
// subtract 0x8000000000000000, do the convert then add it back again
u64 = INT64(d - two63) + I64(0x8000000000000000);
}
@@ -1713,7 +1731,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) {
u64 = UINT64(INT64(d));
#else
- u64 = UINT64(d);
+ u64 = UINT64(d);
#endif // _TARGET_XARCH_
return u64;
@@ -1728,17 +1746,19 @@ double FloatingPointUtils::round(double x)
{
// If the number has no fractional part do nothing
// This shortcut is necessary to workaround precision loss in borderline cases on some platforms
- if (x == ((double)((__int64)x))) {
+ if (x == ((double)((__int64)x)))
+ {
return x;
}
// We had a number that was equally close to 2 integers.
// We need to return the even one.
- double tempVal = (x + 0.5);
+ double tempVal = (x + 0.5);
double flrTempVal = floor(tempVal);
- if ((flrTempVal == tempVal) && (fmod(tempVal, 2.0) != 0)) {
+ if ((flrTempVal == tempVal) && (fmod(tempVal, 2.0) != 0))
+ {
flrTempVal -= 1.0;
}
diff --git a/src/jit/utils.h b/src/jit/utils.h
index f9fb39131e..1cd35903dd 100644
--- a/src/jit/utils.h
+++ b/src/jit/utils.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
@@ -29,28 +28,29 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#define BitScanForwardPtr BitScanForward
#endif
-template<typename T, int size>
-unsigned ArrLen(T(&)[size]){return size;}
+template <typename T, int size>
+unsigned ArrLen(T (&)[size])
+{
+ return size;
+}
// return true if arg is a power of 2
-template<typename T>
+template <typename T>
inline bool isPow2(T i)
{
- return (i > 0 && ((i-1)&i) == 0);
+ return (i > 0 && ((i - 1) & i) == 0);
}
// Adapter for iterators to a type that is compatible with C++11
// range-based for loops.
-template<typename TIterator>
+template <typename TIterator>
class IteratorPair
{
TIterator m_begin;
TIterator m_end;
public:
- IteratorPair(TIterator begin, TIterator end)
- : m_begin(begin)
- , m_end(end)
+ IteratorPair(TIterator begin, TIterator end) : m_begin(begin), m_end(end)
{
}
@@ -65,7 +65,7 @@ public:
}
};
-template<typename TIterator>
+template <typename TIterator>
inline IteratorPair<TIterator> MakeIteratorPair(TIterator begin, TIterator end)
{
return IteratorPair<TIterator>(begin, end);
@@ -74,13 +74,31 @@ inline IteratorPair<TIterator> MakeIteratorPair(TIterator begin, TIterator end)
// Recursive template definition to calculate the base-2 logarithm
// of a constant value.
template <unsigned val, unsigned acc = 0>
-struct ConstLog2 { enum { value = ConstLog2<val / 2, acc + 1>::value }; };
+struct ConstLog2
+{
+ enum
+ {
+ value = ConstLog2<val / 2, acc + 1>::value
+ };
+};
template <unsigned acc>
-struct ConstLog2<0, acc> { enum { value = acc }; };
+struct ConstLog2<0, acc>
+{
+ enum
+ {
+ value = acc
+ };
+};
template <unsigned acc>
-struct ConstLog2<1, acc> { enum { value = acc }; };
+struct ConstLog2<1, acc>
+{
+ enum
+ {
+ value = acc
+ };
+};
inline const char* dspBool(bool b)
{
@@ -97,23 +115,30 @@ inline int64_t abs(int64_t t)
#endif
#endif // FEATURE_CORECLR
-template <typename T> int signum(T val)
+template <typename T>
+int signum(T val)
{
if (val < T(0))
+ {
return -1;
+ }
else if (val > T(0))
+ {
return 1;
- else
+ }
+ else
+ {
return 0;
+ }
}
class JitSimplerHashBehavior
{
public:
- static const unsigned s_growth_factor_numerator = 3;
+ static const unsigned s_growth_factor_numerator = 3;
static const unsigned s_growth_factor_denominator = 2;
- static const unsigned s_density_factor_numerator = 3;
+ static const unsigned s_density_factor_numerator = 3;
static const unsigned s_density_factor_denominator = 4;
static const unsigned s_minimum_allocation = 7;
@@ -149,7 +174,6 @@ class ConfigMethodRange
{
public:
-
// Default capacity
enum
{
@@ -173,11 +197,16 @@ public:
}
// Error checks
- bool Error() const { return m_badChar != 0; }
- size_t BadCharIndex() const { return m_badChar - 1; }
+ bool Error() const
+ {
+ return m_badChar != 0;
+ }
+ size_t BadCharIndex() const
+ {
+ return m_badChar - 1;
+ }
private:
-
struct Range
{
unsigned m_low;
@@ -186,18 +215,17 @@ private:
void InitRanges(const wchar_t* rangeStr, unsigned capacity);
- unsigned m_entries; // number of entries in the range array
- unsigned m_lastRange; // count of low-high pairs
- unsigned m_inited; // 1 if range string has been parsed
- size_t m_badChar; // index + 1 of any bad character in range string
- Range* m_ranges; // ranges of functions to include
+ unsigned m_entries; // number of entries in the range array
+ unsigned m_lastRange; // count of low-high pairs
+ unsigned m_inited; // 1 if range string has been parsed
+ size_t m_badChar; // index + 1 of any bad character in range string
+ Range* m_ranges; // ranges of functions to include
};
#endif // defined(DEBUG) || defined(INLINE_DATA)
class Compiler;
-
/*****************************************************************************
* Fixed bit vector class
*/
@@ -214,9 +242,8 @@ private:
static UINT bitNumToBit(UINT bitNum);
public:
-
// bitVectInit() - Initializes a bit vector of a given size
- static FixedBitVect *bitVectInit(UINT size, Compiler *comp);
+ static FixedBitVect* bitVectInit(UINT size, Compiler* comp);
// bitVectSet() - Sets the given bit
void bitVectSet(UINT bitNum);
@@ -225,10 +252,10 @@ public:
bool bitVectTest(UINT bitNum);
// bitVectOr() - Or in the given bit vector
- void bitVectOr(FixedBitVect *bv);
+ void bitVectOr(FixedBitVect* bv);
// bitVectAnd() - And with passed in bit vector
- void bitVectAnd(FixedBitVect &bv);
+ void bitVectAnd(FixedBitVect& bv);
// bitVectGetFirst() - Find the first bit on and return the bit num.
// Return -1 if no bits found.
@@ -259,15 +286,16 @@ public:
* returns -> number of bytes successfully written, not including the null
* terminator. Calls NO_WAY on error.
*/
-int SimpleSprintf_s(__in_ecount(cbBufSize-(pWriteStart - pBufStart)) char * pWriteStart,
- __in_ecount(cbBufSize) char * pBufStart, size_t cbBufSize,
- __in_z const char * fmt, ...);
+int SimpleSprintf_s(__in_ecount(cbBufSize - (pWriteStart - pBufStart)) char* pWriteStart,
+ __in_ecount(cbBufSize) char* pBufStart,
+ size_t cbBufSize,
+ __in_z const char* fmt,
+ ...);
#ifdef DEBUG
void hexDump(FILE* dmpf, const char* name, BYTE* addr, size_t size);
#endif // DEBUG
-
/******************************************************************************
* ScopedSetVariable: A simple class to set and restore a variable within a scope.
* For example, it can be used to set a 'bool' flag to 'true' at the beginning of a
@@ -280,10 +308,9 @@ template <typename T>
class ScopedSetVariable
{
public:
- ScopedSetVariable(T* pVariable, T value)
- : m_pVariable(pVariable)
+ ScopedSetVariable(T* pVariable, T value) : m_pVariable(pVariable)
{
- m_oldValue = *m_pVariable;
+ m_oldValue = *m_pVariable;
*m_pVariable = value;
INDEBUG(m_value = value;)
}
@@ -294,16 +321,14 @@ public:
*m_pVariable = m_oldValue;
}
-
private:
#ifdef DEBUG
T m_value; // The value we set the variable to (used for assert).
-#endif // DEBUG
- T m_oldValue; // The old value, to restore the variable to.
+#endif // DEBUG
+ T m_oldValue; // The old value, to restore the variable to.
T* m_pVariable; // Address of the variable to change
};
-
/******************************************************************************
* PhasedVar: A class to represent a variable that has phases, in particular,
* a write phase where the variable is computed, and a read phase where the
@@ -321,8 +346,7 @@ class PhasedVar
public:
PhasedVar()
#ifdef DEBUG
- : m_initialized(false)
- , m_writePhase(true)
+ : m_initialized(false), m_writePhase(true)
#endif // DEBUG
{
}
@@ -340,7 +364,7 @@ public:
{
#ifdef DEBUG
m_initialized = false;
- m_writePhase = true;
+ m_writePhase = true;
#endif // DEBUG
}
@@ -410,7 +434,6 @@ public:
}
private:
-
// Don't allow a copy constructor. (This could be allowed, but only add it once it is actually needed.)
PhasedVar(const PhasedVar& o)
@@ -418,29 +441,32 @@ private:
unreached();
}
-
T m_value;
#ifdef DEBUG
bool m_initialized; // true once the variable has been initialized, that is, written once.
- bool m_writePhase; // true if we are in the (initial) "write" phase. Once the value is read, this changes to false, and can't be changed back.
-#endif // DEBUG
+ bool m_writePhase; // true if we are in the (initial) "write" phase. Once the value is read, this changes to false,
+ // and can't be changed back.
+#endif // DEBUG
};
class HelperCallProperties
{
private:
- bool m_isPure [CORINFO_HELP_COUNT];
- bool m_noThrow [CORINFO_HELP_COUNT];
+ bool m_isPure[CORINFO_HELP_COUNT];
+ bool m_noThrow[CORINFO_HELP_COUNT];
bool m_nonNullReturn[CORINFO_HELP_COUNT];
- bool m_isAllocator [CORINFO_HELP_COUNT];
- bool m_mutatesHeap [CORINFO_HELP_COUNT];
- bool m_mayRunCctor [CORINFO_HELP_COUNT];
- bool m_mayFinalize [CORINFO_HELP_COUNT];
+ bool m_isAllocator[CORINFO_HELP_COUNT];
+ bool m_mutatesHeap[CORINFO_HELP_COUNT];
+ bool m_mayRunCctor[CORINFO_HELP_COUNT];
+ bool m_mayFinalize[CORINFO_HELP_COUNT];
- void init();
+ void init();
public:
- HelperCallProperties() { init(); }
+ HelperCallProperties()
+ {
+ init();
+ }
bool IsPure(CorInfoHelpFunc helperId)
{
@@ -492,11 +518,10 @@ public:
}
};
-
//*****************************************************************************
// AssemblyNamesList2: Parses and stores a list of Assembly names, and provides
// a function for determining whether a given assembly name is part of the list.
-//
+//
// This is a clone of the AssemblyNamesList class that exists in the VM's utilcode,
// modified to use the JIT's memory allocator and throw on out of memory behavior.
// It is named AssemblyNamesList2 to avoid a name conflict with the VM version.
@@ -510,15 +535,14 @@ class AssemblyNamesList2
{
struct AssemblyName
{
- char* m_assemblyName;
- AssemblyName* m_next;
+ char* m_assemblyName;
+ AssemblyName* m_next;
};
- AssemblyName* m_pNames; // List of names
- IAllocator* m_alloc; // IAllocator to use in this class
+ AssemblyName* m_pNames; // List of names
+ IAllocator* m_alloc; // IAllocator to use in this class
public:
-
// Take a Unicode string list of assembly names, parse it, and store it.
AssemblyNamesList2(const wchar_t* list, __in IAllocator* alloc);
@@ -541,9 +565,9 @@ public:
class CycleCount
{
private:
- double cps; // cycles per second
- unsigned __int64 beginCycles; // cycles at stop watch construction
-public:
+ double cps; // cycles per second
+ unsigned __int64 beginCycles; // cycles at stop watch construction
+public:
CycleCount();
// Kick off the counter, and if re-entrant will use the latest cycles as starting point.
@@ -558,15 +582,13 @@ private:
bool GetCycles(unsigned __int64* time);
};
-
// Uses win API QueryPerformanceCounter/QueryPerformanceFrequency.
class PerfCounter
{
LARGE_INTEGER beg;
- double freq;
+ double freq;
public:
-
// If the method returns false, any other query yield unpredictable results.
bool Start();
@@ -576,41 +598,39 @@ public:
#endif // FEATURE_JIT_METHOD_PERF
-
-
#ifdef DEBUG
/*****************************************************************************
* Return the number of digits in a number of the given base (default base 10).
* Used when outputting strings.
*/
-unsigned CountDigits(unsigned num, unsigned base = 10);
+unsigned CountDigits(unsigned num, unsigned base = 10);
#endif // DEBUG
// Utility class for lists.
-template<typename T>
-struct ListNode
+template <typename T>
+struct ListNode
{
- T data;
+ T data;
ListNode<T>* next;
// Create the class without using constructors.
static ListNode<T>* Create(T value, IAllocator* alloc)
{
ListNode<T>* node = new (alloc) ListNode<T>;
- node->data = value;
- node->next = nullptr;
+ node->data = value;
+ node->next = nullptr;
return node;
}
};
/*****************************************************************************
-* Floating point utility class
+* Floating point utility class
*/
-class FloatingPointUtils {
+class FloatingPointUtils
+{
public:
-
static double convertUInt64ToDouble(unsigned __int64 u64);
static float convertUInt64ToFloat(unsigned __int64 u64);
@@ -620,7 +640,6 @@ public:
static double round(double x);
};
-
// The CLR requires that critical section locks be initialized via its ClrCreateCriticalSection API...but
// that can't be called until the CLR is initialized. If we have static data that we'd like to protect by a
// lock, and we have a statically allocated lock to protect that data, there's an issue in how to initialize
@@ -635,20 +654,19 @@ public:
class CritSecObject
{
public:
-
CritSecObject()
{
- m_pCs = NULL;
+ m_pCs = nullptr;
}
CRITSEC_COOKIE Val()
{
- if (m_pCs == NULL)
+ if (m_pCs == nullptr)
{
// CompareExchange-based lazy init.
- CRITSEC_COOKIE newCs = ClrCreateCriticalSection(CrstLeafLock, CRST_DEFAULT);
+ CRITSEC_COOKIE newCs = ClrCreateCriticalSection(CrstLeafLock, CRST_DEFAULT);
CRITSEC_COOKIE observed = InterlockedCompareExchangeT(&m_pCs, newCs, NULL);
- if (observed != NULL)
+ if (observed != nullptr)
{
ClrDeleteCriticalSection(newCs);
}
@@ -657,7 +675,6 @@ public:
}
private:
-
// CRITSEC_COOKIE is an opaque pointer type.
CRITSEC_COOKIE m_pCs;
@@ -672,9 +689,7 @@ private:
class CritSecHolder
{
public:
-
- CritSecHolder(CritSecObject& critSec)
- : m_CritSec(critSec)
+ CritSecHolder(CritSecObject& critSec) : m_CritSec(critSec)
{
ClrEnterCriticalSection(m_CritSec.Val());
}
@@ -685,7 +700,6 @@ public:
}
private:
-
CritSecObject& m_CritSec;
// No copying or assignment allowed.
@@ -693,5 +707,4 @@ private:
CritSecHolder& operator=(const CritSecHolder&) = delete;
};
-
#endif // _UTILS_H_
diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
index 5ebbc48a01..fcf4702a78 100644
--- a/src/jit/valuenum.cpp
+++ b/src/jit/valuenum.cpp
@@ -27,42 +27,56 @@ VNFunc GetVNFuncForOper(genTreeOps oper, bool isUnsigned)
}
switch (oper)
{
- case GT_LT:
- return VNF_LT_UN;
- case GT_LE:
- return VNF_LE_UN;
- case GT_GE:
- return VNF_GT_UN;
- case GT_GT:
- return VNF_GT_UN;
- case GT_ADD:
- return VNF_ADD_UN;
- case GT_SUB:
- return VNF_SUB_UN;
- case GT_MUL:
- return VNF_MUL_UN;
- case GT_DIV:
- return VNF_DIV_UN;
- case GT_MOD:
- return VNF_MOD_UN;
-
- case GT_NOP:
- case GT_COMMA:
- return VNFunc(oper);
- default:
- unreached();
+ case GT_LT:
+ return VNF_LT_UN;
+ case GT_LE:
+ return VNF_LE_UN;
+ case GT_GE:
+ return VNF_GT_UN;
+ case GT_GT:
+ return VNF_GT_UN;
+ case GT_ADD:
+ return VNF_ADD_UN;
+ case GT_SUB:
+ return VNF_SUB_UN;
+ case GT_MUL:
+ return VNF_MUL_UN;
+ case GT_DIV:
+ return VNF_DIV_UN;
+ case GT_MOD:
+ return VNF_MOD_UN;
+
+ case GT_NOP:
+ case GT_COMMA:
+ return VNFunc(oper);
+ default:
+ unreached();
}
}
-ValueNumStore::ValueNumStore(Compiler* comp, IAllocator* alloc)
- : m_pComp(comp), m_alloc(alloc),
+ValueNumStore::ValueNumStore(Compiler* comp, IAllocator* alloc)
+ : m_pComp(comp)
+ , m_alloc(alloc)
+ ,
#ifdef DEBUG
- m_numMapSels(0),
+ m_numMapSels(0)
+ ,
#endif
- m_nextChunkBase(0), m_fixedPointMapSels(alloc, 8), m_chunks(alloc, 8),
- m_intCnsMap(NULL), m_longCnsMap(NULL), m_handleMap(NULL), m_floatCnsMap(NULL), m_doubleCnsMap(NULL), m_byrefCnsMap(NULL),
- m_VNFunc0Map(NULL), m_VNFunc1Map(NULL), m_VNFunc2Map(NULL), m_VNFunc3Map(NULL), m_VNFunc4Map(NULL),
- m_uPtrToLocNotAFieldCount(1)
+ m_nextChunkBase(0)
+ , m_fixedPointMapSels(alloc, 8)
+ , m_chunks(alloc, 8)
+ , m_intCnsMap(nullptr)
+ , m_longCnsMap(nullptr)
+ , m_handleMap(nullptr)
+ , m_floatCnsMap(nullptr)
+ , m_doubleCnsMap(nullptr)
+ , m_byrefCnsMap(nullptr)
+ , m_VNFunc0Map(nullptr)
+ , m_VNFunc1Map(nullptr)
+ , m_VNFunc2Map(nullptr)
+ , m_VNFunc3Map(nullptr)
+ , m_VNFunc4Map(nullptr)
+ , m_uPtrToLocNotAFieldCount(1)
{
// We have no current allocation chunks.
for (unsigned i = 0; i < TYP_COUNT; i++)
@@ -77,17 +91,19 @@ ValueNumStore::ValueNumStore(Compiler* comp, IAllocator* alloc)
{
m_VNsForSmallIntConsts[i] = NoVN;
}
- // We will reserve chunk 0 to hold some special constants, like the constant NULL, the "exception" value, and the "zero map."
+ // We will reserve chunk 0 to hold some special constants, like the constant NULL, the "exception" value, and the
+ // "zero map."
Chunk* specialConstChunk = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, TYP_REF, CEA_Const);
- specialConstChunk->m_numUsed += SRC_NumSpecialRefConsts; // Implicitly allocate 0 ==> NULL, and 1 ==> Exception, 2 ==> ZeroMap.
+ specialConstChunk->m_numUsed +=
+ SRC_NumSpecialRefConsts; // Implicitly allocate 0 ==> NULL, and 1 ==> Exception, 2 ==> ZeroMap.
ChunkNum cn = m_chunks.Push(specialConstChunk);
assert(cn == 0);
- m_mapSelectBudget = JitConfig.JitVNMapSelBudget();
+ m_mapSelectBudget = JitConfig.JitVNMapSelBudget();
}
// static.
-template<typename T>
+template <typename T>
T ValueNumStore::EvalOp(VNFunc vnf, T v0)
{
genTreeOps oper = genTreeOps(vnf);
@@ -95,15 +111,15 @@ T ValueNumStore::EvalOp(VNFunc vnf, T v0)
// Here we handle those unary ops that are the same for integral and floating-point types.
switch (oper)
{
- case GT_NEG:
- return -v0;
- default:
- // Must be int-specific
- return EvalOpIntegral(vnf, v0);
+ case GT_NEG:
+ return -v0;
+ default:
+ // Must be int-specific
+ return EvalOpIntegral(vnf, v0);
}
}
-template<typename T>
+template <typename T>
T ValueNumStore::EvalOpIntegral(VNFunc vnf, T v0)
{
genTreeOps oper = genTreeOps(vnf);
@@ -111,15 +127,15 @@ T ValueNumStore::EvalOpIntegral(VNFunc vnf, T v0)
// Here we handle unary ops that are the same for all integral types.
switch (oper)
{
- case GT_NOT:
- return ~v0;
- default:
- unreached();
+ case GT_NOT:
+ return ~v0;
+ default:
+ unreached();
}
}
// static
-template<typename T>
+template <typename T>
T ValueNumStore::EvalOp(VNFunc vnf, T v0, T v1, ValueNum* pExcSet)
{
if (vnf < VNF_Boundary)
@@ -128,65 +144,65 @@ T ValueNumStore::EvalOp(VNFunc vnf, T v0, T v1, ValueNum* pExcSet)
// Here we handle those that are the same for integral and floating-point types.
switch (oper)
{
- case GT_ADD:
- return v0 + v1;
- case GT_SUB:
- return v0 - v1;
- case GT_MUL:
- return v0 * v1;
- case GT_DIV:
- if (IsIntZero(v1))
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
- return (T)0;
- }
- if (IsOverflowIntDiv(v0, v1))
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_ArithmeticExc));
- return (T)0;
- }
- else
- {
- return v0 / v1;
- }
+ case GT_ADD:
+ return v0 + v1;
+ case GT_SUB:
+ return v0 - v1;
+ case GT_MUL:
+ return v0 * v1;
+ case GT_DIV:
+ if (IsIntZero(v1))
+ {
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
+ return (T)0;
+ }
+ if (IsOverflowIntDiv(v0, v1))
+ {
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_ArithmeticExc));
+ return (T)0;
+ }
+ else
+ {
+ return v0 / v1;
+ }
- default:
- // Must be int-specific
- return EvalOpIntegral(vnf, v0, v1, pExcSet);
+ default:
+ // Must be int-specific
+ return EvalOpIntegral(vnf, v0, v1, pExcSet);
}
}
- else // must be a VNF_ function
+ else // must be a VNF_ function
{
typedef typename jitstd::make_unsigned<T>::type UT;
switch (vnf)
{
- case VNF_GT_UN:
- return T(UT(v0) > UT(v1));
- case VNF_GE_UN:
- return T(UT(v0) >= UT(v1));
- case VNF_LT_UN:
- return T(UT(v0) < UT(v1));
- case VNF_LE_UN:
- return T(UT(v0) <= UT(v1));
- case VNF_ADD_UN:
- return T(UT(v0) + UT(v1));
- case VNF_SUB_UN:
- return T(UT(v0) - UT(v1));
- case VNF_MUL_UN:
- return T(UT(v0) * UT(v1));
- case VNF_DIV_UN:
- if (IsIntZero(v1))
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
- return (T)0;
- }
- else
- {
- return T(UT(v0) / UT(v1));
- }
- default:
- // Must be int-specific
- return EvalOpIntegral(vnf, v0, v1, pExcSet);
+ case VNF_GT_UN:
+ return T(UT(v0) > UT(v1));
+ case VNF_GE_UN:
+ return T(UT(v0) >= UT(v1));
+ case VNF_LT_UN:
+ return T(UT(v0) < UT(v1));
+ case VNF_LE_UN:
+ return T(UT(v0) <= UT(v1));
+ case VNF_ADD_UN:
+ return T(UT(v0) + UT(v1));
+ case VNF_SUB_UN:
+ return T(UT(v0) - UT(v1));
+ case VNF_MUL_UN:
+ return T(UT(v0) * UT(v1));
+ case VNF_DIV_UN:
+ if (IsIntZero(v1))
+ {
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
+ return (T)0;
+ }
+ else
+ {
+ return T(UT(v0) / UT(v1));
+ }
+ default:
+ // Must be int-specific
+ return EvalOpIntegral(vnf, v0, v1, pExcSet);
}
}
}
@@ -199,24 +215,23 @@ double ValueNumStore::EvalOp<double>(VNFunc vnf, double v0, double v1, ValueNum*
// Here we handle those that are the same for floating-point types.
switch (oper)
{
- case GT_ADD:
- return v0 + v1;
- case GT_SUB:
- return v0 - v1;
- case GT_MUL:
- return v0 * v1;
- case GT_DIV:
- return v0 / v1;
- case GT_MOD:
- return fmod(v0, v1);
+ case GT_ADD:
+ return v0 + v1;
+ case GT_SUB:
+ return v0 - v1;
+ case GT_MUL:
+ return v0 * v1;
+ case GT_DIV:
+ return v0 / v1;
+ case GT_MOD:
+ return fmod(v0, v1);
- default:
- unreached();
+ default:
+ unreached();
}
}
-
-template<typename T>
+template <typename T>
int ValueNumStore::EvalComparison(VNFunc vnf, T v0, T v1)
{
if (vnf < VNF_Boundary)
@@ -225,36 +240,36 @@ int ValueNumStore::EvalComparison(VNFunc vnf, T v0, T v1)
// Here we handle those that are the same for floating-point types.
switch (oper)
{
- case GT_EQ:
- return v0 == v1;
- case GT_NE:
- return v0 != v1;
- case GT_GT:
- return v0 > v1;
- case GT_GE:
- return v0 >= v1;
- case GT_LT:
- return v0 < v1;
- case GT_LE:
- return v0 <= v1;
- default:
- unreached();
+ case GT_EQ:
+ return v0 == v1;
+ case GT_NE:
+ return v0 != v1;
+ case GT_GT:
+ return v0 > v1;
+ case GT_GE:
+ return v0 >= v1;
+ case GT_LT:
+ return v0 < v1;
+ case GT_LE:
+ return v0 <= v1;
+ default:
+ unreached();
}
}
- else // must be a VNF_ function
+ else // must be a VNF_ function
{
switch (vnf)
{
- case VNF_GT_UN:
- return unsigned(v0) > unsigned(v1);
- case VNF_GE_UN:
- return unsigned(v0) >= unsigned(v1);
- case VNF_LT_UN:
- return unsigned(v0) < unsigned(v1);
- case VNF_LE_UN:
- return unsigned(v0) <= unsigned(v1);
- default:
- unreached();
+ case VNF_GT_UN:
+ return unsigned(v0) > unsigned(v1);
+ case VNF_GE_UN:
+ return unsigned(v0) >= unsigned(v1);
+ case VNF_LT_UN:
+ return unsigned(v0) < unsigned(v1);
+ case VNF_LE_UN:
+ return unsigned(v0) <= unsigned(v1);
+ default:
+ unreached();
}
}
}
@@ -264,12 +279,12 @@ template <typename T>
int ValueNumStore::EvalOrderedComparisonFloat(VNFunc vnf, T v0, T v1)
{
// !! NOTE !!
- //
+ //
// All comparisons below are ordered comparisons.
//
// We should guard this function from unordered comparisons
// identified by the GTF_RELOP_NAN_UN flag. Either the flag
- // should be bubbled (similar to GTF_UNSIGNED for ints)
+ // should be bubbled (similar to GTF_UNSIGNED for ints)
// to this point or we should bail much earlier if any of
// the operands are NaN.
//
@@ -277,20 +292,20 @@ int ValueNumStore::EvalOrderedComparisonFloat(VNFunc vnf, T v0, T v1)
// Here we handle those that are the same for floating-point types.
switch (oper)
{
- case GT_EQ:
- return v0 == v1;
- case GT_NE:
- return v0 != v1;
- case GT_GT:
- return v0 > v1;
- case GT_GE:
- return v0 >= v1;
- case GT_LT:
- return v0 < v1;
- case GT_LE:
- return v0 <= v1;
- default:
- unreached();
+ case GT_EQ:
+ return v0 == v1;
+ case GT_NE:
+ return v0 != v1;
+ case GT_GT:
+ return v0 > v1;
+ case GT_GE:
+ return v0 >= v1;
+ case GT_LT:
+ return v0 < v1;
+ case GT_LE:
+ return v0 <= v1;
+ default:
+ unreached();
}
}
@@ -306,114 +321,113 @@ int ValueNumStore::EvalComparison<float>(VNFunc vnf, float v0, float v1)
return EvalOrderedComparisonFloat(vnf, v0, v1);
}
-template<typename T>
+template <typename T>
T ValueNumStore::EvalOpIntegral(VNFunc vnf, T v0, T v1, ValueNum* pExcSet)
{
genTreeOps oper = genTreeOps(vnf);
switch (oper)
{
- case GT_EQ:
- return v0 == v1;
- case GT_NE:
- return v0 != v1;
- case GT_GT:
- return v0 > v1;
- case GT_GE:
- return v0 >= v1;
- case GT_LT:
- return v0 < v1;
- case GT_LE:
- return v0 <= v1;
- case GT_OR:
- return v0 | v1;
- case GT_XOR:
- return v0 ^ v1;
- case GT_AND:
- return v0 & v1;
- case GT_LSH:
- return v0 << v1;
- case GT_RSH:
- return v0 >> v1;
- case GT_RSZ:
- if (sizeof(T) == 8)
- {
- return UINT64(v0) >> v1;
- }
- else
- {
- return UINT32(v0) >> v1;
- }
- case GT_ROL:
- if (sizeof(T) == 8)
- {
- return (v0 << v1) | (UINT64(v0) >> (64 - v1));
- }
- else
- {
- return (v0 << v1) | (UINT32(v0) >> (32 - v1));
- }
-
- case GT_ROR:
- if (sizeof(T) == 8)
- {
- return (v0 << (64 - v1)) | (UINT64(v0) >> v1);
- }
- else
- {
- return (v0 << (32 - v1)) | (UINT32(v0) >> v1);
- }
+ case GT_EQ:
+ return v0 == v1;
+ case GT_NE:
+ return v0 != v1;
+ case GT_GT:
+ return v0 > v1;
+ case GT_GE:
+ return v0 >= v1;
+ case GT_LT:
+ return v0 < v1;
+ case GT_LE:
+ return v0 <= v1;
+ case GT_OR:
+ return v0 | v1;
+ case GT_XOR:
+ return v0 ^ v1;
+ case GT_AND:
+ return v0 & v1;
+ case GT_LSH:
+ return v0 << v1;
+ case GT_RSH:
+ return v0 >> v1;
+ case GT_RSZ:
+ if (sizeof(T) == 8)
+ {
+ return UINT64(v0) >> v1;
+ }
+ else
+ {
+ return UINT32(v0) >> v1;
+ }
+ case GT_ROL:
+ if (sizeof(T) == 8)
+ {
+ return (v0 << v1) | (UINT64(v0) >> (64 - v1));
+ }
+ else
+ {
+ return (v0 << v1) | (UINT32(v0) >> (32 - v1));
+ }
- case GT_DIV:
- case GT_MOD:
- if (v1 == 0)
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
- }
- else if (IsOverflowIntDiv(v0, v1))
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_ArithmeticExc));
- return 0;
- }
- else // We are not dividing by Zero, so we can calculate the exact result.
- {
- // Perform the appropriate operation.
- if (oper == GT_DIV)
+ case GT_ROR:
+ if (sizeof(T) == 8)
{
- return v0 / v1;
+ return (v0 << (64 - v1)) | (UINT64(v0) >> v1);
}
- else // Must be GT_MOD
+ else
{
- return v0 % v1;
+ return (v0 << (32 - v1)) | (UINT32(v0) >> v1);
}
- }
- case GT_UDIV:
- case GT_UMOD:
- if (v1 == 0)
- {
- *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
- return 0;
- }
- else // We are not dividing by Zero, so we can calculate the exact result.
- {
- typedef typename jitstd::make_unsigned<T>::type UT;
- // We need for force the source operands for the divide or mod operation
- // to be considered unsigned.
- //
- if (oper == GT_UDIV)
+ case GT_DIV:
+ case GT_MOD:
+ if (v1 == 0)
{
- // This is return unsigned(v0) / unsigned(v1) for both sizes of integers
- return T(UT(v0) / UT(v1));
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
+ }
+ else if (IsOverflowIntDiv(v0, v1))
+ {
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_ArithmeticExc));
+ return 0;
+ }
+ else // We are not dividing by Zero, so we can calculate the exact result.
+ {
+ // Perform the appropriate operation.
+ if (oper == GT_DIV)
+ {
+ return v0 / v1;
+ }
+ else // Must be GT_MOD
+ {
+ return v0 % v1;
+ }
+ }
+ case GT_UDIV:
+ case GT_UMOD:
+ if (v1 == 0)
+ {
+ *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
+ return 0;
}
- else // Must be GT_UMOD
+ else // We are not dividing by Zero, so we can calculate the exact result.
{
- // This is return unsigned(v0) % unsigned(v1) for both sizes of integers
- return T(UT(v0) % UT(v1));
+ typedef typename jitstd::make_unsigned<T>::type UT;
+ // We need for force the source operands for the divide or mod operation
+ // to be considered unsigned.
+ //
+ if (oper == GT_UDIV)
+ {
+ // This is return unsigned(v0) / unsigned(v1) for both sizes of integers
+ return T(UT(v0) / UT(v1));
+ }
+ else // Must be GT_UMOD
+ {
+ // This is return unsigned(v0) % unsigned(v1) for both sizes of integers
+ return T(UT(v0) % UT(v1));
+ }
}
- }
- default:
- unreached(); // NYI?
+ default:
+ unreached(); // NYI?
}
}
@@ -433,31 +447,33 @@ ValueNum ValueNumStore::VNExcSetSingleton(ValueNum x)
ValueNumPair ValueNumStore::VNPExcSetSingleton(ValueNumPair xp)
{
- return ValueNumPair(VNExcSetSingleton(xp.GetLiberal()),
- VNExcSetSingleton(xp.GetConservative()));
+ return ValueNumPair(VNExcSetSingleton(xp.GetLiberal()), VNExcSetSingleton(xp.GetConservative()));
}
ValueNum ValueNumStore::VNExcSetUnion(ValueNum xs0, ValueNum xs1 DEBUGARG(bool topLevel))
{
if (xs0 == VNForEmptyExcSet())
+ {
return xs1;
+ }
else if (xs1 == VNForEmptyExcSet())
+ {
return xs0;
+ }
else
{
- VNFuncApp funcXs0;
- bool b0 = GetVNFunc(xs0, &funcXs0);
+ VNFuncApp funcXs0;
+ bool b0 = GetVNFunc(xs0, &funcXs0);
assert(b0 && funcXs0.m_func == VNF_ExcSetCons); // Precondition: xs0 is an exception set.
- VNFuncApp funcXs1;
- bool b1 = GetVNFunc(xs1, &funcXs1);
+ VNFuncApp funcXs1;
+ bool b1 = GetVNFunc(xs1, &funcXs1);
assert(b1 && funcXs1.m_func == VNF_ExcSetCons); // Precondition: xs1 is an exception set.
ValueNum res = NoVN;
if (funcXs0.m_args[0] < funcXs1.m_args[0])
{
- res = VNForFunc(TYP_REF, VNF_ExcSetCons,
- funcXs0.m_args[0],
+ res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs0.m_args[0],
VNExcSetUnion(funcXs0.m_args[1], xs1 DEBUGARG(false)));
- }
+ }
else if (funcXs0.m_args[0] == funcXs1.m_args[0])
{
// Equal elements; only add one to the result.
@@ -466,8 +482,7 @@ ValueNum ValueNumStore::VNExcSetUnion(ValueNum xs0, ValueNum xs1 DEBUGARG(bool t
else
{
assert(funcXs0.m_args[0] > funcXs1.m_args[0]);
- res = VNForFunc(TYP_REF, VNF_ExcSetCons,
- funcXs1.m_args[0],
+ res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs1.m_args[0],
VNExcSetUnion(xs0, funcXs1.m_args[1] DEBUGARG(false)));
}
@@ -481,15 +496,14 @@ ValueNumPair ValueNumStore::VNPExcSetUnion(ValueNumPair xs0vnp, ValueNumPair xs1
VNExcSetUnion(xs0vnp.GetConservative(), xs1vnp.GetConservative()));
}
-
-
void ValueNumStore::VNUnpackExc(ValueNum vnWx, ValueNum* pvn, ValueNum* pvnx)
{
assert(vnWx != NoVN);
VNFuncApp funcApp;
if (GetVNFunc(vnWx, &funcApp) && funcApp.m_func == VNF_ValWithExc)
{
- *pvn = funcApp.m_args[0]; *pvnx = funcApp.m_args[1];
+ *pvn = funcApp.m_args[0];
+ *pvnx = funcApp.m_args[1];
}
else
{
@@ -503,13 +517,12 @@ void ValueNumStore::VNPUnpackExc(ValueNumPair vnWx, ValueNumPair* pvn, ValueNumP
VNUnpackExc(vnWx.GetConservative(), pvn->GetConservativeAddr(), pvnx->GetConservativeAddr());
}
-
ValueNum ValueNumStore::VNNormVal(ValueNum vn)
{
VNFuncApp funcApp;
if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc)
{
- return funcApp.m_args[0];
+ return funcApp.m_args[0];
}
else
{
@@ -527,7 +540,7 @@ ValueNum ValueNumStore::VNExcVal(ValueNum vn)
VNFuncApp funcApp;
if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc)
{
- return funcApp.m_args[1];
+ return funcApp.m_args[1];
}
else
{
@@ -540,16 +553,18 @@ ValueNumPair ValueNumStore::VNPExcVal(ValueNumPair vnp)
return ValueNumPair(VNExcVal(vnp.GetLiberal()), VNExcVal(vnp.GetConservative()));
}
-
// If vn "excSet" is not "VNForEmptyExcSet()", return "VNF_ValWithExc(vn, excSet)". Otherwise,
// just return "vn".
ValueNum ValueNumStore::VNWithExc(ValueNum vn, ValueNum excSet)
{
if (excSet == VNForEmptyExcSet())
+ {
return vn;
+ }
else
{
- ValueNum vnNorm; ValueNum vnX = VNForEmptyExcSet();
+ ValueNum vnNorm;
+ ValueNum vnX = VNForEmptyExcSet();
VNUnpackExc(vn, &vnNorm, &vnX);
return VNForFunc(TypeOfVN(vnNorm), VNF_ValWithExc, vnNorm, VNExcSetUnion(vnX, excSet));
}
@@ -561,7 +576,6 @@ ValueNumPair ValueNumStore::VNPWithExc(ValueNumPair vnp, ValueNumPair excSetVNP)
VNWithExc(vnp.GetConservative(), excSetVNP.GetConservative()));
}
-
bool ValueNumStore::IsKnownNonNull(ValueNum vn)
{
if (vn == NoVN)
@@ -582,70 +596,71 @@ bool ValueNumStore::IsSharedStatic(ValueNum vn)
return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_SharedStatic) != 0;
}
-ValueNumStore::Chunk::Chunk(IAllocator* alloc, ValueNum* pNextBaseVN, var_types typ, ChunkExtraAttribs attribs) : m_defs(NULL), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs)
+ValueNumStore::Chunk::Chunk(IAllocator* alloc, ValueNum* pNextBaseVN, var_types typ, ChunkExtraAttribs attribs)
+ : m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs)
{
// Allocate "m_defs" here, according to the typ/attribs pair.
switch (attribs)
{
- case CEA_None:
- break; // Nothing to do.
- case CEA_Const:
- switch (typ)
- {
- case TYP_INT:
- m_defs = new (alloc) Alloc<TYP_INT>::Type[ChunkSize];
+ case CEA_None:
+ break; // Nothing to do.
+ case CEA_Const:
+ switch (typ)
+ {
+ case TYP_INT:
+ m_defs = new (alloc) Alloc<TYP_INT>::Type[ChunkSize];
+ break;
+ case TYP_FLOAT:
+ m_defs = new (alloc) Alloc<TYP_FLOAT>::Type[ChunkSize];
+ break;
+ case TYP_LONG:
+ m_defs = new (alloc) Alloc<TYP_LONG>::Type[ChunkSize];
+ break;
+ case TYP_DOUBLE:
+ m_defs = new (alloc) Alloc<TYP_DOUBLE>::Type[ChunkSize];
+ break;
+ case TYP_BYREF:
+ m_defs = new (alloc) Alloc<TYP_BYREF>::Type[ChunkSize];
+ break;
+ case TYP_REF:
+ // We allocate space for a single REF constant, NULL, so we can access these values uniformly.
+ // Since this value is always the same, we represent it as a static.
+ m_defs = &s_specialRefConsts[0];
+ break; // Nothing to do.
+ default:
+ assert(false); // Should not reach here.
+ }
break;
- case TYP_FLOAT:
- m_defs = new (alloc) Alloc<TYP_FLOAT>::Type[ChunkSize];
+
+ case CEA_Handle:
+ m_defs = new (alloc) VNHandle[ChunkSize];
break;
- case TYP_LONG:
- m_defs = new (alloc) Alloc<TYP_LONG>::Type[ChunkSize];
+
+ case CEA_Func0:
+ m_defs = new (alloc) VNFunc[ChunkSize];
break;
- case TYP_DOUBLE:
- m_defs = new (alloc) Alloc<TYP_DOUBLE>::Type[ChunkSize];
+
+ case CEA_Func1:
+ m_defs = new (alloc) VNDefFunc1Arg[ChunkSize];
break;
- case TYP_BYREF:
- m_defs = new (alloc) Alloc<TYP_BYREF>::Type[ChunkSize];
+ case CEA_Func2:
+ m_defs = new (alloc) VNDefFunc2Arg[ChunkSize];
+ break;
+ case CEA_Func3:
+ m_defs = new (alloc) VNDefFunc3Arg[ChunkSize];
+ break;
+ case CEA_Func4:
+ m_defs = new (alloc) VNDefFunc4Arg[ChunkSize];
break;
- case TYP_REF:
- // We allocate space for a single REF constant, NULL, so we can access these values uniformly.
- // Since this value is always the same, we represent it as a static.
- m_defs = &s_specialRefConsts[0];
- break; // Nothing to do.
default:
- assert(false); // Should not reach here.
- }
- break;
-
- case CEA_Handle:
- m_defs = new (alloc) VNHandle[ChunkSize];
- break;
-
- case CEA_Func0:
- m_defs = new (alloc) VNFunc[ChunkSize];
- break;
-
- case CEA_Func1:
- m_defs = new (alloc) VNDefFunc1Arg[ChunkSize];
- break;
- case CEA_Func2:
- m_defs = new (alloc) VNDefFunc2Arg[ChunkSize];
- break;
- case CEA_Func3:
- m_defs = new (alloc) VNDefFunc3Arg[ChunkSize];
- break;
- case CEA_Func4:
- m_defs = new (alloc) VNDefFunc4Arg[ChunkSize];
- break;
- default:
- unreached();
+ unreached();
}
*pNextBaseVN += ChunkSize;
}
ValueNumStore::Chunk* ValueNumStore::GetAllocChunk(var_types typ, ValueNumStore::ChunkExtraAttribs attribs)
{
- Chunk* res;
+ Chunk* res;
ChunkNum cn = m_curAllocChunk[typ][attribs];
if (cn != NoChunk)
{
@@ -656,8 +671,8 @@ ValueNumStore::Chunk* ValueNumStore::GetAllocChunk(var_types typ, ValueNumStore:
}
}
// Otherwise, must allocate a new one.
- res = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, typ, attribs);
- cn = m_chunks.Push(res);
+ res = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, typ, attribs);
+ cn = m_chunks.Push(res);
m_curAllocChunk[typ][attribs] = cn;
return res;
}
@@ -667,9 +682,12 @@ ValueNum ValueNumStore::VNForIntCon(INT32 cnsVal)
if (IsSmallIntConst(cnsVal))
{
unsigned ind = cnsVal - SmallIntConstMin;
- ValueNum vn = m_VNsForSmallIntConsts[ind];
- if (vn != NoVN) return vn;
- vn = GetVNForIntCon(cnsVal);
+ ValueNum vn = m_VNsForSmallIntConsts[ind];
+ if (vn != NoVN)
+ {
+ return vn;
+ }
+ vn = GetVNForIntCon(cnsVal);
m_VNsForSmallIntConsts[ind] = vn;
return vn;
}
@@ -688,9 +706,9 @@ ValueNum ValueNumStore::VNForLongCon(INT64 cnsVal)
}
else
{
- Chunk* c = GetAllocChunk(TYP_LONG, CEA_Const);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_LONG, CEA_Const);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal;
GetLongCnsMap()->Set(cnsVal, res);
return res;
@@ -706,9 +724,9 @@ ValueNum ValueNumStore::VNForFloatCon(float cnsVal)
}
else
{
- Chunk* c = GetAllocChunk(TYP_FLOAT, CEA_Const);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_FLOAT, CEA_Const);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<float*>(c->m_defs)[offsetWithinChunk] = cnsVal;
GetFloatCnsMap()->Set(cnsVal, res);
return res;
@@ -724,9 +742,9 @@ ValueNum ValueNumStore::VNForDoubleCon(double cnsVal)
}
else
{
- Chunk* c = GetAllocChunk(TYP_DOUBLE, CEA_Const);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_DOUBLE, CEA_Const);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<double*>(c->m_defs)[offsetWithinChunk] = cnsVal;
GetDoubleCnsMap()->Set(cnsVal, res);
return res;
@@ -742,9 +760,9 @@ ValueNum ValueNumStore::VNForByrefCon(INT64 cnsVal)
}
else
{
- Chunk* c = GetAllocChunk(TYP_BYREF, CEA_Const);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_BYREF, CEA_Const);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal;
GetByrefCnsMap()->Set(cnsVal, res);
return res;
@@ -759,16 +777,16 @@ ValueNum ValueNumStore::VNForCastOper(var_types castToType, bool srcIsUnsigned /
if (srcIsUnsigned)
{
- // We record the srcIsUnsigned by or-ing a 0x01
+ // We record the srcIsUnsigned by or-ing a 0x01
cnsVal |= INT32(VCA_UnsignedSrc);
-
}
ValueNum result = VNForIntCon(cnsVal);
#ifdef DEBUG
if (m_pComp->verbose)
{
- printf(" VNForCastOper(%s%s) is " STR_VN "%x\n", varTypeName(castToType), srcIsUnsigned ? ", unsignedSrc" : "", result);
+ printf(" VNForCastOper(%s%s) is " STR_VN "%x\n", varTypeName(castToType),
+ srcIsUnsigned ? ", unsignedSrc" : "", result);
}
#endif
@@ -788,9 +806,9 @@ ValueNum ValueNumStore::VNForHandle(ssize_t cnsVal, unsigned handleFlags)
}
else
{
- Chunk* c = GetAllocChunk(TYP_I_IMPL, CEA_Handle);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_I_IMPL, CEA_Handle);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNHandle*>(c->m_defs)[offsetWithinChunk] = handle;
GetHandleMap()->Set(handle, res);
return res;
@@ -803,42 +821,42 @@ ValueNum ValueNumStore::VNZeroForType(var_types typ)
{
switch (typ)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_CHAR:
- case TYP_SHORT:
- case TYP_USHORT:
- case TYP_INT:
- case TYP_UINT:
- return VNForIntCon(0);
- case TYP_LONG:
- case TYP_ULONG:
- return VNForLongCon(0);
- case TYP_FLOAT:
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_CHAR:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_INT:
+ case TYP_UINT:
+ return VNForIntCon(0);
+ case TYP_LONG:
+ case TYP_ULONG:
+ return VNForLongCon(0);
+ case TYP_FLOAT:
#if FEATURE_X87_DOUBLES
- return VNForDoubleCon(0.0);
+ return VNForDoubleCon(0.0);
#else
- return VNForFloatCon(0.0f);
+ return VNForFloatCon(0.0f);
#endif
- case TYP_DOUBLE:
- return VNForDoubleCon(0.0);
- case TYP_REF:
- case TYP_ARRAY:
- return VNForNull();
- case TYP_STRUCT:
+ case TYP_DOUBLE:
+ return VNForDoubleCon(0.0);
+ case TYP_REF:
+ case TYP_ARRAY:
+ return VNForNull();
+ case TYP_STRUCT:
#ifdef FEATURE_SIMD
- // TODO-CQ: Improve value numbering for SIMD types.
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
-#endif // FEATURE_SIMD
- return VNForZeroMap(); // Recursion!
+ // TODO-CQ: Improve value numbering for SIMD types.
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
+#endif // FEATURE_SIMD
+ return VNForZeroMap(); // Recursion!
// These should be unreached.
- default:
- unreached(); // Should handle all types.
+ default:
+ unreached(); // Should handle all types.
}
}
@@ -848,29 +866,29 @@ ValueNum ValueNumStore::VNOneForType(var_types typ)
{
switch (typ)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_CHAR:
- case TYP_SHORT:
- case TYP_USHORT:
- case TYP_INT:
- case TYP_UINT:
- return VNForIntCon(1);
- case TYP_LONG:
- case TYP_ULONG:
- return VNForLongCon(1);
- case TYP_FLOAT:
- return VNForFloatCon(1.0f);
- case TYP_DOUBLE:
- return VNForDoubleCon(1.0);
-
- default:
- return NoVN;
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_CHAR:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_INT:
+ case TYP_UINT:
+ return VNForIntCon(1);
+ case TYP_LONG:
+ case TYP_ULONG:
+ return VNForLongCon(1);
+ case TYP_FLOAT:
+ return VNForFloatCon(1.0f);
+ case TYP_DOUBLE:
+ return VNForDoubleCon(1.0);
+
+ default:
+ return NoVN;
}
}
-class Object* ValueNumStore::s_specialRefConsts[] = {NULL, NULL, NULL};
+class Object* ValueNumStore::s_specialRefConsts[] = {nullptr, nullptr, nullptr};
// Nullary operators (i.e., symbolic constants).
ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func)
@@ -882,12 +900,12 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func)
if (GetVNFunc0Map()->Lookup(func, &res))
{
return res;
- }
+ }
else
{
- Chunk* c = GetAllocChunk(typ, CEA_Func0);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func0);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNFunc*>(c->m_defs)[offsetWithinChunk] = func;
GetVNFunc0Map()->Set(func, res);
return res;
@@ -896,9 +914,9 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func)
ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN)
{
- assert(arg0VN == VNNormVal(arg0VN)); // Arguments don't carry exceptions.
+ assert(arg0VN == VNNormVal(arg0VN)); // Arguments don't carry exceptions.
- ValueNum res;
+ ValueNum res;
VNDefFunc1Arg fstruct(func, arg0VN);
// Do constant-folding.
@@ -914,9 +932,9 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN)
else
{
// Otherwise, create a new VN for this application.
- Chunk* c = GetAllocChunk(typ, CEA_Func1);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func1);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
GetVNFunc1Map()->Set(fstruct, res);
return res;
@@ -926,19 +944,19 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN)
ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN)
{
assert(arg0VN != NoVN && arg1VN != NoVN);
- assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
- assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
+ assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
+ assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
assert(VNFuncArity(func) == 2);
- assert(func != VNF_MapSelect); // Precondition: use the special function VNForMapSelect defined for that.
+ assert(func != VNF_MapSelect); // Precondition: use the special function VNForMapSelect defined for that.
ValueNum res;
// Do constant-folding.
if (CanEvalForConstantArgs(func) && IsVNConstant(arg0VN) && IsVNConstant(arg1VN))
{
- bool canFold = true; // Normally we will be able to fold this 'func'
+ bool canFold = true; // Normally we will be able to fold this 'func'
- // Special case for VNF_Cast of constant handles
+ // Special case for VNF_Cast of constant handles
// Don't allow eval/fold of a GT_CAST(non-I_IMPL, Handle)
//
if ((func == VNF_Cast) && (typ != TYP_I_IMPL) && IsVNHandle(arg0VN))
@@ -992,128 +1010,156 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
// We have ways of evaluating some binary functions.
if (func < VNF_Boundary)
{
- if (typ != TYP_BYREF) // We don't want/need to optimize a zero byref
- {
+ if (typ != TYP_BYREF) // We don't want/need to optimize a zero byref
+ {
genTreeOps oper = genTreeOps(func);
- ValueNum ZeroVN, OneVN; // We may need to create one of these in the switch below.
+ ValueNum ZeroVN, OneVN; // We may need to create one of these in the switch below.
switch (oper)
{
- case GT_ADD:
- // This identity does not apply for floating point (when x == -0.0)
- if (!varTypeIsFloating(typ))
- {
- // (x + 0) == (0 + x) => x
+ case GT_ADD:
+ // This identity does not apply for floating point (when x == -0.0)
+ if (!varTypeIsFloating(typ))
+ {
+ // (x + 0) == (0 + x) => x
+ ZeroVN = VNZeroForType(typ);
+ if (arg0VN == ZeroVN)
+ {
+ return arg1VN;
+ }
+ else if (arg1VN == ZeroVN)
+ {
+ return arg0VN;
+ }
+ }
+ break;
+
+ case GT_SUB:
+ // (x - 0) => x
ZeroVN = VNZeroForType(typ);
- if (arg0VN == ZeroVN)
- return arg1VN;
- else if (arg1VN == ZeroVN)
+ if (arg1VN == ZeroVN)
+ {
return arg0VN;
- }
- break;
+ }
+ break;
- case GT_SUB:
- // (x - 0) => x
- ZeroVN = VNZeroForType(typ);
- if (arg1VN == ZeroVN)
- return arg0VN;
- break;
+ case GT_MUL:
+ // (x * 1) == (1 * x) => x
+ OneVN = VNOneForType(typ);
+ if (OneVN != NoVN)
+ {
+ if (arg0VN == OneVN)
+ {
+ return arg1VN;
+ }
+ else if (arg1VN == OneVN)
+ {
+ return arg0VN;
+ }
+ }
- case GT_MUL:
- // (x * 1) == (1 * x) => x
- OneVN = VNOneForType(typ);
- if (OneVN != NoVN)
- {
- if (arg0VN == OneVN)
+ if (!varTypeIsFloating(typ))
+ {
+ // (x * 0) == (0 * x) => 0 (unless x is NaN, which we must assume a fp value may be)
+ ZeroVN = VNZeroForType(typ);
+ if (arg0VN == ZeroVN)
+ {
+ return ZeroVN;
+ }
+ else if (arg1VN == ZeroVN)
+ {
+ return ZeroVN;
+ }
+ }
+ break;
+
+ case GT_DIV:
+ case GT_UDIV:
+ // (x / 1) => x
+ OneVN = VNOneForType(typ);
+ if (OneVN != NoVN)
+ {
+ if (arg1VN == OneVN)
+ {
+ return arg0VN;
+ }
+ }
+ break;
+
+ case GT_OR:
+ case GT_XOR:
+ // (x | 0) == (0 | x) => x
+ // (x ^ 0) == (0 ^ x) => x
+ ZeroVN = VNZeroForType(typ);
+ if (arg0VN == ZeroVN)
+ {
return arg1VN;
- else if (arg1VN == OneVN)
+ }
+ else if (arg1VN == ZeroVN)
+ {
return arg0VN;
- }
+ }
+ break;
- if (!varTypeIsFloating(typ))
- {
- // (x * 0) == (0 * x) => 0 (unless x is NaN, which we must assume a fp value may be)
+ case GT_AND:
+ // (x & 0) == (0 & x) => 0
ZeroVN = VNZeroForType(typ);
if (arg0VN == ZeroVN)
+ {
return ZeroVN;
+ }
else if (arg1VN == ZeroVN)
+ {
return ZeroVN;
- }
- break;
+ }
+ break;
- case GT_DIV:
- case GT_UDIV:
- // (x / 1) => x
- OneVN = VNOneForType(typ);
- if (OneVN != NoVN)
- {
- if (arg1VN == OneVN)
+ case GT_LSH:
+ case GT_RSH:
+ case GT_RSZ:
+ case GT_ROL:
+ case GT_ROR:
+ // (x << 0) => x
+ // (x >> 0) => x
+ // (x rol 0) => x
+ // (x ror 0) => x
+ ZeroVN = VNZeroForType(typ);
+ if (arg1VN == ZeroVN)
+ {
return arg0VN;
- }
- break;
-
- case GT_OR:
- case GT_XOR:
- // (x | 0) == (0 | x) => x
- // (x ^ 0) == (0 ^ x) => x
- ZeroVN = VNZeroForType(typ);
- if (arg0VN == ZeroVN)
- return arg1VN;
- else if (arg1VN == ZeroVN)
- return arg0VN;
- break;
-
- case GT_AND:
- // (x & 0) == (0 & x) => 0
- ZeroVN = VNZeroForType(typ);
- if (arg0VN == ZeroVN)
- return ZeroVN;
- else if (arg1VN == ZeroVN)
- return ZeroVN;
- break;
-
- case GT_LSH:
- case GT_RSH:
- case GT_RSZ:
- case GT_ROL:
- case GT_ROR:
- // (x << 0) => x
- // (x >> 0) => x
- // (x rol 0) => x
- // (x ror 0) => x
- ZeroVN = VNZeroForType(typ);
- if (arg1VN == ZeroVN)
- return arg0VN;
- break;
+ }
+ break;
- case GT_EQ:
- // (x == x) => true (unless x is NaN)
- if (!varTypeIsFloating(TypeOfVN(arg0VN)) && (arg0VN != NoVN) && (arg0VN == arg1VN))
- {
- return VNOneForType(typ);
- }
- if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) || (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
- {
- return VNZeroForType(typ);
- }
- break;
- case GT_NE:
- // (x != x) => false (unless x is NaN)
- if (!varTypeIsFloating(TypeOfVN(arg0VN)) && (arg0VN != NoVN) && (arg0VN == arg1VN))
- {
- return VNZeroForType(typ);
- }
- if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) || (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
- {
- return VNOneForType(typ);
- }
- break;
+ case GT_EQ:
+ // (x == x) => true (unless x is NaN)
+ if (!varTypeIsFloating(TypeOfVN(arg0VN)) && (arg0VN != NoVN) && (arg0VN == arg1VN))
+ {
+ return VNOneForType(typ);
+ }
+ if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) ||
+ (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
+ {
+ return VNZeroForType(typ);
+ }
+ break;
+ case GT_NE:
+ // (x != x) => false (unless x is NaN)
+ if (!varTypeIsFloating(TypeOfVN(arg0VN)) && (arg0VN != NoVN) && (arg0VN == arg1VN))
+ {
+ return VNZeroForType(typ);
+ }
+ if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) ||
+ (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
+ {
+ return VNOneForType(typ);
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
}
}
- else // must be a VNF_ function
+ else // must be a VNF_ function
{
if (func == VNF_CastClass)
{
@@ -1124,9 +1170,9 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
}
// Otherwise, assign a new VN for the function application.
- Chunk* c = GetAllocChunk(typ, CEA_Func2);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func2);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
GetVNFunc2Map()->Set(fstruct, res);
return res;
@@ -1152,10 +1198,7 @@ ValueNum ValueNumStore::VNForMapStore(var_types typ, ValueNum arg0VN, ValueNum a
#ifdef DEBUG
if (m_pComp->verbose)
{
- printf(" VNForMapStore(" STR_VN "%x, " STR_VN "%x, " STR_VN "%x):%s returns ",
- arg0VN,
- arg1VN,
- arg2VN,
+ printf(" VNForMapStore(" STR_VN "%x, " STR_VN "%x, " STR_VN "%x):%s returns ", arg0VN, arg1VN, arg2VN,
varTypeName(typ));
m_pComp->vnPrint(result, 1);
printf("\n");
@@ -1182,12 +1225,11 @@ ValueNum ValueNumStore::VNForMapStore(var_types typ, ValueNum arg0VN, ValueNum a
// "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number
// (liberal/conservative) to read from the SSA def referenced in the phi argument.
-
ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN)
{
- unsigned budget = m_mapSelectBudget;
- bool usedRecursiveVN = false;
- ValueNum result = VNForMapSelectWork(vnk, typ, arg0VN, arg1VN, &budget, &usedRecursiveVN);
+ unsigned budget = m_mapSelectBudget;
+ bool usedRecursiveVN = false;
+ ValueNum result = VNForMapSelectWork(vnk, typ, arg0VN, arg1VN, &budget, &usedRecursiveVN);
#ifdef DEBUG
if (m_pComp->verbose)
{
@@ -1220,18 +1262,14 @@ ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types typ, ValueNum
// "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number
// (liberal/conservative) to read from the SSA def referenced in the phi argument.
-ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk,
- var_types typ,
- ValueNum arg0VN,
- ValueNum arg1VN,
- unsigned* pBudget,
- bool* pUsedRecursiveVN)
+ValueNum ValueNumStore::VNForMapSelectWork(
+ ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN, unsigned* pBudget, bool* pUsedRecursiveVN)
{
TailCall:
// This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here.
assert(arg0VN != NoVN && arg1VN != NoVN);
- assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
- assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
+ assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
+ assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
*pUsedRecursiveVN = false;
@@ -1239,8 +1277,8 @@ TailCall:
// Provide a mechanism for writing tests that ensure we don't call this ridiculously often.
m_numMapSels++;
#if 1
- // This printing is sometimes useful in debugging.
- // if ((m_numMapSels % 1000) == 0) printf("%d VNF_MapSelect applications.\n", m_numMapSels);
+// This printing is sometimes useful in debugging.
+// if ((m_numMapSels % 1000) == 0) printf("%d VNF_MapSelect applications.\n", m_numMapSels);
#endif
unsigned selLim = JitConfig.JitVNMapSelLimit();
assert(selLim == 0 || m_numMapSels < selLim);
@@ -1269,7 +1307,7 @@ TailCall:
*pUsedRecursiveVN = true;
return RecursiveVN;
}
-
+
if (arg0VN == VNForZeroMap())
{
return VNZeroForType(typ);
@@ -1284,22 +1322,22 @@ TailCall:
if (funcApp.m_args[1] == arg1VN)
{
#if FEATURE_VN_TRACE_APPLY_SELECTORS
- JITDUMP(" AX1: select([" STR_VN "%x]store(" STR_VN "%x, " STR_VN "%x, " STR_VN "%x), " STR_VN "%x) ==> " STR_VN "%x.\n",
+ JITDUMP(" AX1: select([" STR_VN "%x]store(" STR_VN "%x, " STR_VN "%x, " STR_VN "%x), " STR_VN
+ "%x) ==> " STR_VN "%x.\n",
funcApp.m_args[0], arg0VN, funcApp.m_args[1], funcApp.m_args[2], arg1VN, funcApp.m_args[2]);
#endif
return funcApp.m_args[2];
}
// i # j ==> select(store(m, i, v), j) == select(m, j)
// Currently the only source of distinctions is when both indices are constants.
- else if (IsVNConstant(arg1VN)
- && IsVNConstant(funcApp.m_args[1]))
+ else if (IsVNConstant(arg1VN) && IsVNConstant(funcApp.m_args[1]))
{
assert(funcApp.m_args[1] != arg1VN); // we already checked this above.
#if FEATURE_VN_TRACE_APPLY_SELECTORS
- JITDUMP(" AX2: " STR_VN "%x != " STR_VN "%x ==> select([" STR_VN "%x]store(" STR_VN "%x, " STR_VN "%x, " STR_VN "%x), " STR_VN "%x) ==> select(" STR_VN "%x, " STR_VN "%x).\n",
- arg1VN, funcApp.m_args[1],
- arg0VN, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], arg1VN,
- funcApp.m_args[0], arg1VN);
+ JITDUMP(" AX2: " STR_VN "%x != " STR_VN "%x ==> select([" STR_VN "%x]store(" STR_VN
+ "%x, " STR_VN "%x, " STR_VN "%x), " STR_VN "%x) ==> select(" STR_VN "%x, " STR_VN "%x).\n",
+ arg1VN, funcApp.m_args[1], arg0VN, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2],
+ arg1VN, funcApp.m_args[0], arg1VN);
#endif
// This is the equivalent of the recursive tail call:
// return VNForMapSelect(vnk, typ, funcApp.m_args[0], arg1VN);
@@ -1310,19 +1348,19 @@ TailCall:
}
else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiHeapDef)
{
- unsigned lclNum = BAD_VAR_NUM;
- bool isHeap = false;
+ unsigned lclNum = BAD_VAR_NUM;
+ bool isHeap = false;
VNFuncApp phiFuncApp;
- bool defArgIsFunc = false;
+ bool defArgIsFunc = false;
if (funcApp.m_func == VNF_PhiDef)
{
- lclNum = unsigned(funcApp.m_args[0]);
+ lclNum = unsigned(funcApp.m_args[0]);
defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp);
}
else
{
assert(funcApp.m_func == VNF_PhiHeapDef);
- isHeap = true;
+ isHeap = true;
defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp);
}
if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi)
@@ -1346,26 +1384,22 @@ TailCall:
}
if (phiArgVN != ValueNumStore::NoVN)
{
- bool allSame = true;
+ bool allSame = true;
ValueNum argRest = phiFuncApp.m_args[1];
- ValueNum sameSelResult = VNForMapSelectWork(vnk,
- typ,
- phiArgVN,
- arg1VN,
- pBudget,
- pUsedRecursiveVN);
+ ValueNum sameSelResult =
+ VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, pUsedRecursiveVN);
while (allSame && argRest != ValueNumStore::NoVN)
{
- ValueNum cur = argRest;
+ ValueNum cur = argRest;
VNFuncApp phiArgFuncApp;
if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi)
{
- cur = phiArgFuncApp.m_args[0];
+ cur = phiArgFuncApp.m_args[0];
argRest = phiArgFuncApp.m_args[1];
}
else
{
- argRest = ValueNumStore::NoVN; // Cause the loop to terminate.
+ argRest = ValueNumStore::NoVN; // Cause the loop to terminate.
}
assert(IsVNConstant(cur));
phiArgSsaNum = ConstantValue<unsigned>(cur);
@@ -1383,18 +1417,18 @@ TailCall:
}
else
{
- bool usedRecursiveVN = false;
- ValueNum curResult = VNForMapSelectWork(vnk,
- typ,
- phiArgVN,
- arg1VN,
- pBudget,
- &usedRecursiveVN);
+ bool usedRecursiveVN = false;
+ ValueNum curResult =
+ VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, &usedRecursiveVN);
*pUsedRecursiveVN |= usedRecursiveVN;
if (sameSelResult == ValueNumStore::RecursiveVN)
+ {
sameSelResult = curResult;
+ }
if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult)
+ {
allSame = false;
+ }
}
}
if (allSame && sameSelResult != ValueNumStore::RecursiveVN)
@@ -1424,9 +1458,9 @@ TailCall:
}
// Otherwise, assign a new VN for the function application.
- Chunk* c = GetAllocChunk(typ, CEA_Func2);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func2);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
GetVNFunc2Map()->Set(fstruct, res);
return res;
@@ -1439,31 +1473,31 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
assert(IsVNConstant(arg0VN));
switch (TypeOfVN(arg0VN))
{
- case TYP_INT:
+ case TYP_INT:
{
int resVal = EvalOp(func, ConstantValue<int>(arg0VN));
// Unary op on a handle results in a handle.
return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForIntCon(resVal);
}
- case TYP_LONG:
+ case TYP_LONG:
{
INT64 resVal = EvalOp(func, ConstantValue<INT64>(arg0VN));
// Unary op on a handle results in a handle.
return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForLongCon(resVal);
}
- case TYP_FLOAT:
- return VNForFloatCon(EvalOp(func, ConstantValue<float>(arg0VN)));
- case TYP_DOUBLE:
- return VNForDoubleCon(EvalOp(func, ConstantValue<double>(arg0VN)));
- case TYP_REF:
- // If arg0 has a possible exception, it wouldn't have been constant.
- assert(!VNHasExc(arg0VN));
- // Otherwise...
- assert(arg0VN == VNForNull()); // Only other REF constant.
- assert(func == VNFunc(GT_ARR_LENGTH)); // Only function we can apply to a REF constant!
- return VNWithExc(VNForVoid(), VNExcSetSingleton(VNForFunc(TYP_REF, VNF_NullPtrExc, VNForNull())));
- default:
- unreached();
+ case TYP_FLOAT:
+ return VNForFloatCon(EvalOp(func, ConstantValue<float>(arg0VN)));
+ case TYP_DOUBLE:
+ return VNForDoubleCon(EvalOp(func, ConstantValue<double>(arg0VN)));
+ case TYP_REF:
+ // If arg0 has a possible exception, it wouldn't have been constant.
+ assert(!VNHasExc(arg0VN));
+ // Otherwise...
+ assert(arg0VN == VNForNull()); // Only other REF constant.
+ assert(func == VNFunc(GT_ARR_LENGTH)); // Only function we can apply to a REF constant!
+ return VNWithExc(VNForVoid(), VNExcSetSingleton(VNForFunc(TYP_REF, VNF_NullPtrExc, VNForNull())));
+ default:
+ unreached();
}
}
@@ -1473,7 +1507,10 @@ bool ValueNumStore::SelectIsBeingEvaluatedRecursively(ValueNum map, ValueNum ind
{
VNDefFunc2Arg& elem = m_fixedPointMapSels.GetRef(i);
assert(elem.m_func == VNF_MapSelect);
- if (elem.m_arg0 == map && elem.m_arg1 == ind) return true;
+ if (elem.m_arg0 == map && elem.m_arg1 == ind)
+ {
+ return true;
+ }
}
return false;
}
@@ -1481,15 +1518,16 @@ bool ValueNumStore::SelectIsBeingEvaluatedRecursively(ValueNum map, ValueNum ind
#ifdef DEBUG
bool ValueNumStore::FixedPointMapSelsTopHasValue(ValueNum map, ValueNum index)
{
- if (m_fixedPointMapSels.Size() == 0) return false;
+ if (m_fixedPointMapSels.Size() == 0)
+ {
+ return false;
+ }
VNDefFunc2Arg& top = m_fixedPointMapSels.TopRef();
- return top.m_func == VNF_MapSelect
- && top.m_arg0 == map
- && top.m_arg1 == index;
+ return top.m_func == VNF_MapSelect && top.m_arg0 == map && top.m_arg1 == index;
}
#endif
-// Given an integer constant value number return its value as an int.
+// Given an integer constant value number return its value as an int.
//
int ValueNumStore::GetConstantInt32(ValueNum argVN)
{
@@ -1500,22 +1538,22 @@ int ValueNumStore::GetConstantInt32(ValueNum argVN)
switch (argVNtyp)
{
- case TYP_INT:
- result = ConstantValue<int>(argVN);
- break;
+ case TYP_INT:
+ result = ConstantValue<int>(argVN);
+ break;
#ifndef _TARGET_64BIT_
- case TYP_REF:
- case TYP_BYREF:
- result = (int) ConstantValue<size_t>(argVN);
- break;
+ case TYP_REF:
+ case TYP_BYREF:
+ result = (int)ConstantValue<size_t>(argVN);
+ break;
#endif
- default:
- unreached();
+ default:
+ unreached();
}
return result;
}
-// Given an integer constant value number return its value as an INT64.
+// Given an integer constant value number return its value as an INT64.
//
INT64 ValueNumStore::GetConstantInt64(ValueNum argVN)
{
@@ -1526,23 +1564,23 @@ INT64 ValueNumStore::GetConstantInt64(ValueNum argVN)
switch (argVNtyp)
{
- case TYP_INT:
- result = (INT64) ConstantValue<int>(argVN);
- break;
- case TYP_LONG:
- result = ConstantValue<INT64>(argVN);
- break;
- case TYP_REF:
- case TYP_BYREF:
- result = (INT64) ConstantValue<size_t>(argVN);
- break;
- default:
- unreached();
+ case TYP_INT:
+ result = (INT64)ConstantValue<int>(argVN);
+ break;
+ case TYP_LONG:
+ result = ConstantValue<INT64>(argVN);
+ break;
+ case TYP_REF:
+ case TYP_BYREF:
+ result = (INT64)ConstantValue<size_t>(argVN);
+ break;
+ default:
+ unreached();
}
return result;
}
-// Given a float or a double constant value number return its value as a double.
+// Given a float or a double constant value number return its value as a double.
//
double ValueNumStore::GetConstantDouble(ValueNum argVN)
{
@@ -1553,14 +1591,14 @@ double ValueNumStore::GetConstantDouble(ValueNum argVN)
switch (argVNtyp)
{
- case TYP_FLOAT:
- result = (double) ConstantValue<float>(argVN);
- break;
- case TYP_DOUBLE:
- result = ConstantValue<double>(argVN);
- break;
- default:
- unreached();
+ case TYP_FLOAT:
+ result = (double)ConstantValue<float>(argVN);
+ break;
+ case TYP_DOUBLE:
+ result = ConstantValue<double>(argVN);
+ break;
+ default:
+ unreached();
}
return result;
}
@@ -1582,21 +1620,21 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
if (typ == TYP_BYREF)
{
- // We don't want to fold expressions that produce TYP_BYREF
+ // We don't want to fold expressions that produce TYP_BYREF
return false;
}
var_types arg0VNtyp = TypeOfVN(arg0VN);
var_types arg1VNtyp = TypeOfVN(arg1VN);
- // When both arguments are floating point types
+ // When both arguments are floating point types
// We defer to the EvalFuncForConstantFPArgs()
if (varTypeIsFloating(arg0VNtyp) && varTypeIsFloating(arg1VNtyp))
{
return EvalFuncForConstantFPArgs(typ, func, arg0VN, arg1VN);
}
-
- // after this we shouldn't have to deal with floating point types for arg0VN or arg1VN
+
+ // after this we shouldn't have to deal with floating point types for arg0VN or arg1VN
assert(!varTypeIsFloating(arg0VNtyp));
assert(!varTypeIsFloating(arg1VNtyp));
@@ -1606,10 +1644,10 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
typ = TYP_INT;
}
- ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
+ ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
ValueNum excSet = VNForEmptyExcSet();
- // Are both args of the same type?
+ // Are both args of the same type?
if (arg0VNtyp == arg1VNtyp)
{
if (arg0VNtyp == TYP_INT)
@@ -1622,8 +1660,8 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
// Bin op on a handle results in a handle.
ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN;
ValueNum resultVN = (handleVN != NoVN)
- ? VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)) // Use VN for Handle
- : VNForIntCon(resultVal);
+ ? VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)) // Use VN for Handle
+ : VNForIntCon(resultVal);
result = VNWithExc(resultVN, excSet);
}
else if (arg0VNtyp == TYP_LONG)
@@ -1637,36 +1675,36 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
}
else
- {
+ {
assert(typ == TYP_LONG);
- INT64 resultVal = EvalOp(func, arg0Val, arg1Val, &excSet);
- ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN;
- ValueNum resultVN = (handleVN != NoVN)
- ? VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)) // Use VN for Handle
- : VNForLongCon(resultVal);
+ INT64 resultVal = EvalOp(func, arg0Val, arg1Val, &excSet);
+ ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN;
+ ValueNum resultVN = (handleVN != NoVN)
+ ? VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)) // Use VN for Handle
+ : VNForLongCon(resultVal);
result = VNWithExc(resultVN, excSet);
}
}
else // both args are TYP_REF or both args are TYP_BYREF
{
- INT64 arg0Val = ConstantValue<size_t>(arg0VN); // We represent ref/byref constants as size_t's.
- INT64 arg1Val = ConstantValue<size_t>(arg1VN); // Also we consider null to be zero.
+ INT64 arg0Val = ConstantValue<size_t>(arg0VN); // We represent ref/byref constants as size_t's.
+ INT64 arg1Val = ConstantValue<size_t>(arg1VN); // Also we consider null to be zero.
if (VNFuncIsComparison(func))
{
assert(typ == TYP_INT);
result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
}
- else if (typ == TYP_INT) // We could see GT_OR of a constant ByRef and Null
+ else if (typ == TYP_INT) // We could see GT_OR of a constant ByRef and Null
{
- int resultVal = (int) EvalOp(func, arg0Val, arg1Val, &excSet);
- result = VNWithExc(VNForIntCon(resultVal), excSet);
+ int resultVal = (int)EvalOp(func, arg0Val, arg1Val, &excSet);
+ result = VNWithExc(VNForIntCon(resultVal), excSet);
}
- else // We could see GT_OR of a constant ByRef and Null
+ else // We could see GT_OR of a constant ByRef and Null
{
assert((typ == TYP_BYREF) || (typ == TYP_LONG));
INT64 resultVal = EvalOp(func, arg0Val, arg1Val, &excSet);
- result = VNWithExc(VNForByrefCon(resultVal), excSet);
+ result = VNWithExc(VNForByrefCon(resultVal), excSet);
}
}
}
@@ -1683,38 +1721,38 @@ ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, Valu
assert(typ == TYP_INT);
result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
}
- else if (typ == TYP_INT) // We could see GT_OR of an int and constant ByRef or Null
+ else if (typ == TYP_INT) // We could see GT_OR of an int and constant ByRef or Null
{
- int resultVal = (int) EvalOp(func, arg0Val, arg1Val, &excSet);
- result = VNWithExc(VNForIntCon(resultVal), excSet);
+ int resultVal = (int)EvalOp(func, arg0Val, arg1Val, &excSet);
+ result = VNWithExc(VNForIntCon(resultVal), excSet);
}
else
{
assert(typ != TYP_INT);
ValueNum resultValx = VNForEmptyExcSet();
- INT64 resultVal = EvalOp(func, arg0Val, arg1Val, &resultValx);
+ INT64 resultVal = EvalOp(func, arg0Val, arg1Val, &resultValx);
- // check for the Exception case
- if (resultValx != VNForEmptyExcSet())
+ // check for the Exception case
+ if (resultValx != VNForEmptyExcSet())
{
result = VNWithExc(VNForVoid(), resultValx);
}
else
- {
+ {
switch (typ)
{
- case TYP_BYREF:
- result = VNForByrefCon(resultVal);
- break;
- case TYP_LONG:
- result = VNForLongCon(resultVal);
- break;
- case TYP_REF:
- assert(resultVal == 0); // Only valid REF constant
- result = VNForNull();
- break;
- default:
- unreached();
+ case TYP_BYREF:
+ result = VNForByrefCon(resultVal);
+ break;
+ case TYP_LONG:
+ result = VNForLongCon(resultVal);
+ break;
+ case TYP_REF:
+ assert(resultVal == 0); // Only valid REF constant
+ result = VNForNull();
+ break;
+ default:
+ unreached();
}
}
}
@@ -1741,7 +1779,7 @@ ValueNum ValueNumStore::EvalFuncForConstantFPArgs(var_types typ, VNFunc func, Va
double arg0Val = GetConstantDouble(arg0VN);
double arg1Val = GetConstantDouble(arg1VN);
- ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
+ ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
if (VNFuncIsComparison(func))
{
@@ -1750,17 +1788,17 @@ ValueNum ValueNumStore::EvalFuncForConstantFPArgs(var_types typ, VNFunc func, Va
}
else
{
- assert(varTypeIsFloating(typ)); // We must be computing a floating point result
+ assert(varTypeIsFloating(typ)); // We must be computing a floating point result
// We always compute the result using a double
- ValueNum exception = VNForEmptyExcSet();
- double doubleResultVal = EvalOp(func, arg0Val, arg1Val, &exception);
+ ValueNum exception = VNForEmptyExcSet();
+ double doubleResultVal = EvalOp(func, arg0Val, arg1Val, &exception);
assert(exception == VNForEmptyExcSet()); // Floating point ops don't throw.
if (typ == TYP_FLOAT)
{
float floatResultVal = float(doubleResultVal);
- result = VNForFloatCon(floatResultVal);
+ result = VNForFloatCon(floatResultVal);
}
else
{
@@ -1802,12 +1840,12 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu
// We previously encoded the castToType operation using vnForCastOper()
//
- bool srcIsUnsigned = ((arg1Val & INT32(VCA_UnsignedSrc)) != 0);
+ bool srcIsUnsigned = ((arg1Val & INT32(VCA_UnsignedSrc)) != 0);
var_types castToType = var_types(arg1Val >> INT32(VCA_BitCount));
var_types castFromType = arg0VNtyp;
- switch (castFromType) // GT_CAST source type
+ switch (castFromType) // GT_CAST source type
{
#ifndef _TARGET_64BIT_
case TYP_REF:
@@ -1820,14 +1858,14 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu
{
case TYP_BYTE:
assert(typ == TYP_INT);
- return VNForIntCon(INT8(arg0Val));
- case TYP_BOOL:
+ return VNForIntCon(INT8(arg0Val));
+ case TYP_BOOL:
case TYP_UBYTE:
assert(typ == TYP_INT);
- return VNForIntCon(UINT8(arg0Val));
+ return VNForIntCon(UINT8(arg0Val));
case TYP_SHORT:
assert(typ == TYP_INT);
- return VNForIntCon(INT16(arg0Val));
+ return VNForIntCon(INT16(arg0Val));
case TYP_CHAR:
case TYP_USHORT:
assert(typ == TYP_INT);
@@ -1843,90 +1881,114 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu
if (typ == TYP_LONG)
{
if (srcIsUnsigned)
- return VNForLongCon(INT64(unsigned(arg0Val)));
+ {
+ return VNForLongCon(INT64(unsigned(arg0Val)));
+ }
else
+ {
return VNForLongCon(INT64(arg0Val));
+ }
}
else
{
assert(typ == TYP_BYREF);
if (srcIsUnsigned)
- return VNForByrefCon(INT64(unsigned(arg0Val)));
+ {
+ return VNForByrefCon(INT64(unsigned(arg0Val)));
+ }
else
+ {
return VNForByrefCon(INT64(arg0Val));
+ }
}
#else // TARGET_32BIT
if (srcIsUnsigned)
- return VNForLongCon(INT64(unsigned(arg0Val)));
+ return VNForLongCon(INT64(unsigned(arg0Val)));
else
return VNForLongCon(INT64(arg0Val));
#endif
case TYP_FLOAT:
- assert(typ == TYP_FLOAT);
- if (srcIsUnsigned)
- return VNForFloatCon(float(unsigned(arg0Val)));
- else
- return VNForFloatCon(float(arg0Val));
+ assert(typ == TYP_FLOAT);
+ if (srcIsUnsigned)
+ {
+ return VNForFloatCon(float(unsigned(arg0Val)));
+ }
+ else
+ {
+ return VNForFloatCon(float(arg0Val));
+ }
case TYP_DOUBLE:
- assert(typ == TYP_DOUBLE);
- if (srcIsUnsigned)
- return VNForDoubleCon(double(unsigned(arg0Val)));
- else
- return VNForDoubleCon(double(arg0Val));
+ assert(typ == TYP_DOUBLE);
+ if (srcIsUnsigned)
+ {
+ return VNForDoubleCon(double(unsigned(arg0Val)));
+ }
+ else
+ {
+ return VNForDoubleCon(double(arg0Val));
+ }
default:
unreached();
}
break;
}
- {
+ {
#ifdef _TARGET_64BIT_
- case TYP_REF:
+ case TYP_REF:
#endif
- case TYP_LONG:
- INT64 arg0Val = GetConstantInt64(arg0VN);
-
- switch (castToType)
- {
- case TYP_BYTE:
- assert(typ == TYP_INT);
- return VNForIntCon(INT8(arg0Val));
- case TYP_BOOL:
- case TYP_UBYTE:
- assert(typ == TYP_INT);
- return VNForIntCon(UINT8(arg0Val));
- case TYP_SHORT:
- assert(typ == TYP_INT);
- return VNForIntCon(INT16(arg0Val));
- case TYP_CHAR:
- case TYP_USHORT:
- assert(typ == TYP_INT);
- return VNForIntCon(UINT16(arg0Val));
- case TYP_INT:
- assert(typ == TYP_INT);
- return VNForIntCon(INT32(arg0Val));
- case TYP_UINT:
- assert(typ == TYP_INT);
- return VNForIntCon(UINT32(arg0Val));
case TYP_LONG:
- case TYP_ULONG:
- assert(typ == TYP_LONG);
- return arg0VN;
- case TYP_FLOAT:
- assert(typ == TYP_FLOAT);
- if (srcIsUnsigned)
- return VNForFloatCon(FloatingPointUtils::convertUInt64ToFloat(UINT64(arg0Val)));
- else
- return VNForFloatCon(float(arg0Val));
- case TYP_DOUBLE:
- assert(typ == TYP_DOUBLE);
- if (srcIsUnsigned)
- return VNForDoubleCon(FloatingPointUtils::convertUInt64ToDouble(UINT64(arg0Val)));
- else
- return VNForDoubleCon(double(arg0Val));
- default:
- unreached();
+ INT64 arg0Val = GetConstantInt64(arg0VN);
+
+ switch (castToType)
+ {
+ case TYP_BYTE:
+ assert(typ == TYP_INT);
+ return VNForIntCon(INT8(arg0Val));
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ assert(typ == TYP_INT);
+ return VNForIntCon(UINT8(arg0Val));
+ case TYP_SHORT:
+ assert(typ == TYP_INT);
+ return VNForIntCon(INT16(arg0Val));
+ case TYP_CHAR:
+ case TYP_USHORT:
+ assert(typ == TYP_INT);
+ return VNForIntCon(UINT16(arg0Val));
+ case TYP_INT:
+ assert(typ == TYP_INT);
+ return VNForIntCon(INT32(arg0Val));
+ case TYP_UINT:
+ assert(typ == TYP_INT);
+ return VNForIntCon(UINT32(arg0Val));
+ case TYP_LONG:
+ case TYP_ULONG:
+ assert(typ == TYP_LONG);
+ return arg0VN;
+ case TYP_FLOAT:
+ assert(typ == TYP_FLOAT);
+ if (srcIsUnsigned)
+ {
+ return VNForFloatCon(FloatingPointUtils::convertUInt64ToFloat(UINT64(arg0Val)));
+ }
+ else
+ {
+ return VNForFloatCon(float(arg0Val));
+ }
+ case TYP_DOUBLE:
+ assert(typ == TYP_DOUBLE);
+ if (srcIsUnsigned)
+ {
+ return VNForDoubleCon(FloatingPointUtils::convertUInt64ToDouble(UINT64(arg0Val)));
+ }
+ else
+ {
+ return VNForDoubleCon(double(arg0Val));
+ }
+ default:
+ unreached();
+ }
}
- }
case TYP_FLOAT:
case TYP_DOUBLE:
{
@@ -1936,27 +1998,27 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu
{
case TYP_BYTE:
assert(typ == TYP_INT);
- return VNForIntCon(INT8(arg0Val));
- case TYP_BOOL:
+ return VNForIntCon(INT8(arg0Val));
+ case TYP_BOOL:
case TYP_UBYTE:
assert(typ == TYP_INT);
- return VNForIntCon(UINT8(arg0Val));
+ return VNForIntCon(UINT8(arg0Val));
case TYP_SHORT:
assert(typ == TYP_INT);
- return VNForIntCon(INT16(arg0Val));
+ return VNForIntCon(INT16(arg0Val));
case TYP_CHAR:
case TYP_USHORT:
assert(typ == TYP_INT);
return VNForIntCon(UINT16(arg0Val));
case TYP_INT:
assert(typ == TYP_INT);
- return VNForIntCon(INT32(arg0Val));
+ return VNForIntCon(INT32(arg0Val));
case TYP_UINT:
assert(typ == TYP_INT);
- return VNForIntCon(UINT32(arg0Val));
+ return VNForIntCon(UINT32(arg0Val));
case TYP_LONG:
assert(typ == TYP_LONG);
- return VNForLongCon(INT64(arg0Val));
+ return VNForLongCon(INT64(arg0Val));
case TYP_ULONG:
assert(typ == TYP_LONG);
return VNForLongCon(UINT64(arg0Val));
@@ -1974,7 +2036,7 @@ ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, Valu
unreached();
}
}
-
+
bool ValueNumStore::CanEvalForConstantArgs(VNFunc vnf)
{
if (vnf < VNF_Boundary)
@@ -1986,16 +2048,16 @@ bool ValueNumStore::CanEvalForConstantArgs(VNFunc vnf)
// Some exceptions...
switch (oper)
{
- case GT_MKREFANY: // We can't evaluate these.
- case GT_RETFILT:
- case GT_LIST:
- case GT_ARR_LENGTH:
- return false;
- case GT_MULHI:
- // should be rare, not worth the complexity and risk of getting it wrong
- return false;
- default:
- return true;
+ case GT_MKREFANY: // We can't evaluate these.
+ case GT_RETFILT:
+ case GT_LIST:
+ case GT_ARR_LENGTH:
+ return false;
+ case GT_MULHI:
+ // should be rare, not worth the complexity and risk of getting it wrong
+ return false;
+ default:
+ return true;
}
}
else
@@ -2003,12 +2065,12 @@ bool ValueNumStore::CanEvalForConstantArgs(VNFunc vnf)
// some VNF_ that we can evaluate
switch (vnf)
{
- case VNF_Cast: // We can evaluate these.
- return true;
- case VNF_ObjGetType:
- return false;
- default:
- return false;
+ case VNF_Cast: // We can evaluate these.
+ return true;
+ case VNF_ObjGetType:
+ return false;
+ default:
+ return false;
}
}
}
@@ -2019,89 +2081,89 @@ unsigned ValueNumStore::VNFuncArity(VNFunc vnf)
return (s_vnfOpAttribs[vnf] & VNFOA_ArityMask) >> VNFOA_ArityShift;
}
-template<>
+template <>
bool ValueNumStore::IsOverflowIntDiv(int v0, int v1)
{
- return (v1 == -1) && (v0 == INT32_MIN);
+ return (v1 == -1) && (v0 == INT32_MIN);
}
-template<>
+template <>
bool ValueNumStore::IsOverflowIntDiv(INT64 v0, INT64 v1)
{
- return (v1 == -1) && (v0 == INT64_MIN);
+ return (v1 == -1) && (v0 == INT64_MIN);
}
-template<typename T>
+template <typename T>
bool ValueNumStore::IsOverflowIntDiv(T v0, T v1)
{
return false;
}
-template<>
+template <>
bool ValueNumStore::IsIntZero(int v)
{
- return v == 0;
+ return v == 0;
}
-template<>
+template <>
bool ValueNumStore::IsIntZero(unsigned v)
{
- return v == 0;
+ return v == 0;
}
-template<>
+template <>
bool ValueNumStore::IsIntZero(INT64 v)
{
- return v == 0;
+ return v == 0;
}
-template<>
+template <>
bool ValueNumStore::IsIntZero(UINT64 v)
{
- return v == 0;
+ return v == 0;
}
-template<typename T>
+template <typename T>
bool ValueNumStore::IsIntZero(T v)
{
return false;
}
-template<>
+template <>
float ValueNumStore::EvalOpIntegral<float>(VNFunc vnf, float v0)
{
assert(!"EvalOpIntegral<float>");
return 0.0f;
}
-template<>
+template <>
double ValueNumStore::EvalOpIntegral<double>(VNFunc vnf, double v0)
{
assert(!"EvalOpIntegral<double>");
return 0.0;
}
-template<>
+template <>
float ValueNumStore::EvalOpIntegral<float>(VNFunc vnf, float v0, float v1, ValueNum* pExcSet)
{
genTreeOps oper = genTreeOps(vnf);
switch (oper)
{
- case GT_MOD:
- return fmodf(v0, v1);
- default:
- // For any other values of 'oper', we will assert and return 0.0f
- break;
+ case GT_MOD:
+ return fmodf(v0, v1);
+ default:
+ // For any other values of 'oper', we will assert and return 0.0f
+ break;
}
assert(!"EvalOpIntegral<float> with pExcSet");
return 0.0f;
}
-template<>
+template <>
double ValueNumStore::EvalOpIntegral<double>(VNFunc vnf, double v0, double v1, ValueNum* pExcSet)
{
genTreeOps oper = genTreeOps(vnf);
switch (oper)
{
- case GT_MOD:
- return fmod(v0, v1);
- default:
- // For any other value of 'oper', we will assert and return 0.0
- break;
+ case GT_MOD:
+ return fmod(v0, v1);
+ default:
+ // For any other value of 'oper', we will assert and return 0.0
+ break;
}
assert(!"EvalOpIntegral<double> with pExcSet");
return 0.0;
@@ -2132,7 +2194,7 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
#endif
assert(VNFuncArity(func) == 3);
- ValueNum res;
+ ValueNum res;
VNDefFunc3Arg fstruct(func, arg0VN, arg1VN, arg2VN);
if (GetVNFunc3Map()->Lookup(fstruct, &res))
{
@@ -2140,16 +2202,17 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
}
else
{
- Chunk* c = GetAllocChunk(typ, CEA_Func3);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func3);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
GetVNFunc3Map()->Set(fstruct, res);
return res;
}
}
-ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN, ValueNum arg3VN)
+ValueNum ValueNumStore::VNForFunc(
+ var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN, ValueNum arg3VN)
{
assert(arg0VN != NoVN && arg1VN != NoVN && arg2VN != NoVN && arg3VN != NoVN);
// Function arguments carry no exceptions.
@@ -2159,7 +2222,7 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
assert(arg3VN == VNNormVal(arg3VN));
assert(VNFuncArity(func) == 4);
- ValueNum res;
+ ValueNum res;
VNDefFunc4Arg fstruct(func, arg0VN, arg1VN, arg2VN, arg3VN);
if (GetVNFunc4Map()->Lookup(fstruct, &res))
{
@@ -2167,9 +2230,9 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
}
else
{
- Chunk* c = GetAllocChunk(typ, CEA_Func4);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(typ, CEA_Func4);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
GetVNFunc4Map()->Set(fstruct, res);
return res;
@@ -2178,18 +2241,21 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
ValueNum ValueNumStore::VNForExpr(var_types typ)
{
- // We always allocate a new, unique VN in this call.
+ // We always allocate a new, unique VN in this call.
// The 'typ' is used to partition the allocation of VNs into different chunks.
//
- Chunk* c = GetAllocChunk(typ, CEA_None);
+ Chunk* c = GetAllocChunk(typ, CEA_None);
unsigned offsetWithinChunk = c->AllocVN();
- ValueNum result = c->m_baseVN + offsetWithinChunk;
+ ValueNum result = c->m_baseVN + offsetWithinChunk;
return result;
}
-ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, size_t* wbFinalStructSize)
+ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk,
+ ValueNum map,
+ FieldSeqNode* fieldSeq,
+ size_t* wbFinalStructSize)
{
- if (fieldSeq == NULL)
+ if (fieldSeq == nullptr)
{
return map;
}
@@ -2204,9 +2270,9 @@ ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSe
}
// Otherwise, is a real field handle.
- CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
+ CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
- ValueNum fldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
+ ValueNum fldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
noway_assert(fldHnd != nullptr);
CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd, &structHnd);
var_types fieldType = JITtype2varType(fieldCit);
@@ -2224,7 +2290,7 @@ ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSe
}
if (wbFinalStructSize != nullptr)
{
- *wbFinalStructSize = structSize;
+ *wbFinalStructSize = structSize;
}
#ifdef DEBUG
@@ -2233,7 +2299,8 @@ ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSe
printf(" VNApplySelectors:\n");
const char* modName;
const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName);
- printf(" VNForHandle(Fseq[%s]) is " STR_VN "%x, fieldType is %s", fldName, fldHndVN, varTypeName(fieldType));
+ printf(" VNForHandle(Fseq[%s]) is " STR_VN "%x, fieldType is %s", fldName, fldHndVN,
+ varTypeName(fieldType));
if (varTypeIsStruct(fieldType))
{
printf(", size = %d", structSize);
@@ -2247,14 +2314,14 @@ ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSe
ValueNum newMap = VNForMapSelect(vnk, fieldType, map, fldHndVN);
return VNApplySelectors(vnk, newMap, fieldSeq->m_next, wbFinalStructSize);
}
- else // end of fieldSeq
+ else // end of fieldSeq
{
- return VNForMapSelect(vnk, fieldType, map, fldHndVN);
+ return VNForMapSelect(vnk, fieldType, map, fldHndVN);
}
}
}
-ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types indType, size_t elemStructSize)
+ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types indType, size_t elemStructSize)
{
var_types elemTyp = TypeOfVN(elem);
@@ -2310,7 +2377,7 @@ ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types i
return elem;
}
-ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType)
+ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType)
{
var_types elemTyp = TypeOfVN(elem);
@@ -2345,9 +2412,10 @@ ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_
return elem;
}
-ValueNum ValueNumStore::VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum elem, var_types indType)
+ValueNum ValueNumStore::VNApplySelectorsAssign(
+ ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum elem, var_types indType)
{
- if (fieldSeq == NULL)
+ if (fieldSeq == nullptr)
{
return VNApplySelectorsAssignTypeCoerce(elem, indType);
}
@@ -2363,8 +2431,8 @@ ValueNum ValueNumStore::VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, F
}
// Otherwise, fldHnd is a real field handle.
- CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
- CORINFO_CLASS_HANDLE structType = NULL;
+ CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
+ CORINFO_CLASS_HANDLE structType = nullptr;
noway_assert(fldHnd != nullptr);
CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd, &structType);
var_types fieldType = JITtype2varType(fieldCit);
@@ -2381,8 +2449,8 @@ ValueNum ValueNumStore::VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, F
ValueNum seqNextVN = VNForFieldSeq(fieldSeq->m_next);
ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, fieldHndVN, seqNextVN);
- printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
- vnDump(m_pComp, fieldSeqVN);
+ printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
+ vnDump(m_pComp, fieldSeqVN);
printf("\n");
}
#endif
@@ -2391,7 +2459,7 @@ ValueNum ValueNumStore::VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, F
if (fieldSeq->m_next)
{
ValueNum fseqMap = VNForMapSelect(vnk, fieldType, map, fieldHndVN);
- elemAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->m_next, elem, indType);
+ elemAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->m_next, elem, indType);
}
else
{
@@ -2405,20 +2473,20 @@ ValueNum ValueNumStore::VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, F
ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType)
{
- size_t structSize = 0;
- ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize);
- liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize);
+ size_t structSize = 0;
+ ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize);
+ liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize);
- structSize = 0;
+ structSize = 0;
ValueNum conservVN = VNApplySelectors(VNK_Conservative, map.GetConservative(), fieldSeq, &structSize);
- conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize);
+ conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize);
return ValueNumPair(liberalVN, conservVN);
}
ValueNum ValueNumStore::VNForFieldSeq(FieldSeqNode* fieldSeq)
{
- if (fieldSeq == NULL)
+ if (fieldSeq == nullptr)
{
return VNForNull();
}
@@ -2434,29 +2502,27 @@ ValueNum ValueNumStore::VNForFieldSeq(FieldSeqNode* fieldSeq)
ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, fieldHndVN, seqNextVN);
#ifdef DEBUG
- if (m_pComp->verbose)
- {
- printf(" fieldHnd " STR_VN "%x is ", fieldHndVN);
- vnDump(m_pComp, fieldHndVN);
- printf("\n");
+ if (m_pComp->verbose)
+ {
+ printf(" fieldHnd " STR_VN "%x is ", fieldHndVN);
+ vnDump(m_pComp, fieldHndVN);
+ printf("\n");
- printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
- vnDump(m_pComp, fieldSeqVN);
- printf("\n");
- }
+ printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
+ vnDump(m_pComp, fieldSeqVN);
+ printf("\n");
+ }
#endif
return fieldSeqVN;
}
}
-
-
FieldSeqNode* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
{
if (vn == VNForNull())
{
- return NULL;
+ return nullptr;
}
else if (vn == VNForNotAField())
{
@@ -2468,8 +2534,9 @@ FieldSeqNode* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
VNFuncApp funcApp;
GetVNFunc(vn, &funcApp);
assert(funcApp.m_func == VNF_FieldSeq);
- ssize_t fieldHndVal = ConstantValue<ssize_t>(funcApp.m_args[0]);
- FieldSeqNode* head = m_pComp->GetFieldSeqStore()->CreateSingleton(reinterpret_cast<CORINFO_FIELD_HANDLE>(fieldHndVal));
+ ssize_t fieldHndVal = ConstantValue<ssize_t>(funcApp.m_args[0]);
+ FieldSeqNode* head =
+ m_pComp->GetFieldSeqStore()->CreateSingleton(reinterpret_cast<CORINFO_FIELD_HANDLE>(fieldHndVal));
FieldSeqNode* tail = FieldSeqVNToFieldSeq(funcApp.m_args[1]);
return m_pComp->GetFieldSeqStore()->Append(head, tail);
}
@@ -2491,14 +2558,14 @@ ValueNum ValueNumStore::FieldSeqVNAppend(ValueNum fsVN1, ValueNum fsVN2)
VNFuncApp funcApp1;
GetVNFunc(fsVN1, &funcApp1);
assert(funcApp1.m_func == VNF_FieldSeq);
- ValueNum tailRes = FieldSeqVNAppend(funcApp1.m_args[1], fsVN2);
+ ValueNum tailRes = FieldSeqVNAppend(funcApp1.m_args[1], fsVN2);
ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, funcApp1.m_args[0], tailRes);
#ifdef DEBUG
if (m_pComp->verbose)
{
- printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
- vnDump(m_pComp, fieldSeqVN);
+ printf(" fieldSeq " STR_VN "%x is ", fieldSeqVN);
+ vnDump(m_pComp, fieldSeqVN);
printf("\n");
}
#endif
@@ -2522,7 +2589,7 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTreePtr opA, GenTreePtr opB)
if (opB->OperGet() == GT_CNS_INT)
{
FieldSeqNode* fldSeq = opB->gtIntCon.gtFieldSeq;
- if ((fldSeq != NULL) && (fldSeq != FieldSeqStore::NotAField()))
+ if ((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()))
{
return ExtendPtrVN(opA, opB->gtIntCon.gtFieldSeq);
}
@@ -2537,41 +2604,36 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTreePtr opA, FieldSeqNode* fldSeq)
ValueNum opAvnWx = opA->gtVNPair.GetLiberal();
assert(VNIsValid(opAvnWx));
- ValueNum opAvn; ValueNum opAvnx = VNForEmptyExcSet();
+ ValueNum opAvn;
+ ValueNum opAvnx = VNForEmptyExcSet();
VNUnpackExc(opAvnWx, &opAvn, &opAvnx);
assert(VNIsValid(opAvn) && VNIsValid(opAvnx));
VNFuncApp funcApp;
- if (!GetVNFunc(opAvn, &funcApp)) return res;
+ if (!GetVNFunc(opAvn, &funcApp))
+ {
+ return res;
+ }
if (funcApp.m_func == VNF_PtrToLoc)
{
#ifdef DEBUG
// For PtrToLoc, lib == cons.
VNFuncApp consFuncApp;
- assert( GetVNFunc(VNNormVal(opA->GetVN(VNK_Conservative)), &consFuncApp)
- && consFuncApp.Equals(funcApp));
+ assert(GetVNFunc(VNNormVal(opA->GetVN(VNK_Conservative)), &consFuncApp) && consFuncApp.Equals(funcApp));
#endif
ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
- res = VNForPtrToLoc(TYP_BYREF,
- funcApp.m_args[0],
- FieldSeqVNAppend(funcApp.m_args[1], fldSeqVN));
+ res = VNForPtrToLoc(TYP_BYREF, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeqVN));
}
else if (funcApp.m_func == VNF_PtrToStatic)
{
ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
- res = VNForFunc(TYP_BYREF,
- VNF_PtrToStatic,
- FieldSeqVNAppend(funcApp.m_args[0], fldSeqVN));
+ res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, FieldSeqVNAppend(funcApp.m_args[0], fldSeqVN));
}
else if (funcApp.m_func == VNF_PtrToArrElem)
{
ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
- res = VNForFunc(TYP_BYREF,
- VNF_PtrToArrElem,
- funcApp.m_args[0],
- funcApp.m_args[1],
- funcApp.m_args[2],
+ res = VNForFunc(TYP_BYREF, VNF_PtrToArrElem, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2],
FieldSeqVNAppend(funcApp.m_args[3], fldSeqVN));
}
if (res != NoVN)
@@ -2582,30 +2644,26 @@ ValueNum ValueNumStore::ExtendPtrVN(GenTreePtr opA, FieldSeqNode* fldSeq)
}
void Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
- ValueNum arrVN,
- ValueNum inxVN,
- FieldSeqNode* fldSeq,
- ValueNum rhsVN,
- var_types indType)
-{
- bool invalidateArray = false;
- ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
- var_types arrElemType = DecodeElemType(elemTypeEq);
- ValueNum hAtArrType =
- vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurHeapVN, elemTypeEqVN);
- ValueNum hAtArrTypeAtArr =
- vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN);
- ValueNum hAtArrTypeAtArrAtInx =
- vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN);
+ ValueNum arrVN,
+ ValueNum inxVN,
+ FieldSeqNode* fldSeq,
+ ValueNum rhsVN,
+ var_types indType)
+{
+ bool invalidateArray = false;
+ ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
+ var_types arrElemType = DecodeElemType(elemTypeEq);
+ ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurHeapVN, elemTypeEqVN);
+ ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN);
+ ValueNum hAtArrTypeAtArrAtInx = vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN);
ValueNum newValAtInx = ValueNumStore::NoVN;
ValueNum newValAtArr = ValueNumStore::NoVN;
ValueNum newValAtArrType = ValueNumStore::NoVN;
-
if (fldSeq == FieldSeqStore::NotAField())
{
- // This doesn't represent a proper array access
+ // This doesn't represent a proper array access
JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexAssign\n");
// Store a new unique value for newValAtArrType
@@ -2618,7 +2676,7 @@ void Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
// This is the value that should be stored at "arr[inx]".
newValAtInx = vnStore->VNApplySelectorsAssign(VNK_Liberal, hAtArrTypeAtArrAtInx, fldSeq, rhsVN, indType);
- var_types arrElemFldType = arrElemType; // Uses arrElemType unless we has a non-null fldSeq
+ var_types arrElemFldType = arrElemType; // Uses arrElemType unless we has a non-null fldSeq
if (vnStore->IsVNFunc(newValAtInx))
{
VNFuncApp funcApp;
@@ -2644,15 +2702,14 @@ void Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
if (!invalidateArray)
{
- newValAtArr = vnStore->VNForMapStore(indType, hAtArrTypeAtArr, inxVN, newValAtInx);
+ newValAtArr = vnStore->VNForMapStore(indType, hAtArrTypeAtArr, inxVN, newValAtInx);
newValAtArrType = vnStore->VNForMapStore(TYP_REF, hAtArrType, arrVN, newValAtArr);
}
-
+
#ifdef DEBUG
if (verbose)
{
- printf(" hAtArrType " STR_VN "%x is MapSelect(curHeap(" STR_VN "%x), ",
- hAtArrType, fgCurHeapVN);
+ printf(" hAtArrType " STR_VN "%x is MapSelect(curHeap(" STR_VN "%x), ", hAtArrType, fgCurHeapVN);
if (arrElemType == TYP_STRUCT)
{
@@ -2692,24 +2749,22 @@ void Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
fgCurHeapVN = vnStore->VNForMapStore(TYP_REF, fgCurHeapVN, elemTypeEqVN, newValAtArrType);
}
-ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
- VNFuncApp* pFuncApp,
- ValueNum addrXvn)
+ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree, VNFuncApp* pFuncApp, ValueNum addrXvn)
{
assert(vnStore->IsVNHandle(pFuncApp->m_args[0]));
CORINFO_CLASS_HANDLE arrElemTypeEQ = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(pFuncApp->m_args[0]));
- ValueNum arrVN = pFuncApp->m_args[1];
- ValueNum inxVN = pFuncApp->m_args[2];
- FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]);
+ ValueNum arrVN = pFuncApp->m_args[1];
+ ValueNum inxVN = pFuncApp->m_args[2];
+ FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]);
return fgValueNumberArrIndexVal(tree, arrElemTypeEQ, arrVN, inxVN, addrXvn, fldSeq);
}
-ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
+ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
CORINFO_CLASS_HANDLE elemTypeEq,
- ValueNum arrVN,
- ValueNum inxVN,
- ValueNum excVN,
- FieldSeqNode* fldSeq)
+ ValueNum arrVN,
+ ValueNum inxVN,
+ ValueNum excVN,
+ FieldSeqNode* fldSeq)
{
assert(tree == nullptr || tree->OperGet() == GT_IND);
@@ -2723,7 +2778,7 @@ ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
if (fldSeq == FieldSeqStore::NotAField())
{
- // This doesn't represent a proper array access
+ // This doesn't represent a proper array access
JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexVal\n");
// a new unique value number
@@ -2731,7 +2786,7 @@ ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
#ifdef DEBUG
if (verbose)
- {
+ {
printf(" IND of PtrToArrElem is unique VN " STR_VN "%x.\n", selectedElem);
}
#endif // DEBUG
@@ -2751,8 +2806,7 @@ ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
#ifdef DEBUG
if (verbose)
{
- printf(" hAtArrType " STR_VN "%x is MapSelect(curHeap(" STR_VN "%x), ",
- hAtArrType, fgCurHeapVN);
+ printf(" hAtArrType " STR_VN "%x is MapSelect(curHeap(" STR_VN "%x), ", hAtArrType, fgCurHeapVN);
if (elemTyp == TYP_STRUCT)
{
printf("%s[]).\n", eeGetClassName(elemTypeEq));
@@ -2764,25 +2818,25 @@ ValueNum Compiler::fgValueNumberArrIndexVal(GenTreePtr tree,
printf(" hAtArrTypeAtArr " STR_VN "%x is MapSelect(hAtArrType(" STR_VN "%x), arr=" STR_VN "%x).\n",
hAtArrTypeAtArr, hAtArrType, arrVN);
-
+
printf(" wholeElem " STR_VN "%x is MapSelect(hAtArrTypeAtArr(" STR_VN "%x), ind=" STR_VN "%x).\n",
wholeElem, hAtArrTypeAtArr, inxVN);
}
-#endif // DEBUG
-
- selectedElem = wholeElem;
+#endif // DEBUG
+
+ selectedElem = wholeElem;
size_t elemStructSize = 0;
if (fldSeq)
{
selectedElem = vnStore->VNApplySelectors(VNK_Liberal, wholeElem, fldSeq, &elemStructSize);
- elemTyp = vnStore->TypeOfVN(selectedElem);
+ elemTyp = vnStore->TypeOfVN(selectedElem);
}
selectedElem = vnStore->VNApplySelectorsTypeCheck(selectedElem, indType, elemStructSize);
selectedElem = vnStore->VNWithExc(selectedElem, excVN);
#ifdef DEBUG
if (verbose && (selectedElem != wholeElem))
- {
+ {
printf(" selectedElem is " STR_VN "%x after applying selectors.\n", selectedElem);
}
#endif // DEBUG
@@ -2807,11 +2861,14 @@ var_types ValueNumStore::TypeOfVN(ValueNum vn)
bool ValueNumStore::IsVNConstant(ValueNum vn)
{
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
if (c->m_attribs == CEA_Const)
{
- return vn != VNForVoid(); // Void is not a "real" constant -- in the sense that it represents no value.
+ return vn != VNForVoid(); // Void is not a "real" constant -- in the sense that it represents no value.
}
else
{
@@ -2822,7 +2879,9 @@ bool ValueNumStore::IsVNConstant(ValueNum vn)
bool ValueNumStore::IsVNInt32Constant(ValueNum vn)
{
if (!IsVNConstant(vn))
+ {
return false;
+ }
return TypeOfVN(vn) == TYP_INT;
}
@@ -2830,15 +2889,18 @@ bool ValueNumStore::IsVNInt32Constant(ValueNum vn)
unsigned ValueNumStore::GetHandleFlags(ValueNum vn)
{
assert(IsVNHandle(vn));
- Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
- unsigned offset = ChunkOffset(vn);
+ Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
+ unsigned offset = ChunkOffset(vn);
VNHandle* handle = &reinterpret_cast<VNHandle*>(c->m_defs)[offset];
return handle->m_flags;
}
bool ValueNumStore::IsVNHandle(ValueNum vn)
{
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
return c->m_attribs == CEA_Handle;
@@ -2847,15 +2909,18 @@ bool ValueNumStore::IsVNHandle(ValueNum vn)
bool ValueNumStore::IsVNConstantBound(ValueNum vn)
{
// Do we have "var < 100"?
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
VNFuncApp funcAttr;
if (!GetVNFunc(vn, &funcAttr))
{
return false;
}
- if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE &&
- funcAttr.m_func != (VNFunc)GT_LT && funcAttr.m_func != (VNFunc)GT_GT)
+ if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
+ funcAttr.m_func != (VNFunc)GT_GT)
{
return false;
}
@@ -2873,17 +2938,17 @@ void ValueNumStore::GetConstantBoundInfo(ValueNum vn, ConstantBoundInfo* info)
GetVNFunc(vn, &funcAttr);
bool isOp1Const = IsVNInt32Constant(funcAttr.m_args[1]);
-
+
if (isOp1Const)
{
- info->cmpOper = funcAttr.m_func;
- info->cmpOpVN = funcAttr.m_args[0];
+ info->cmpOper = funcAttr.m_func;
+ info->cmpOpVN = funcAttr.m_args[0];
info->constVal = GetConstantInt32(funcAttr.m_args[1]);
}
else
{
- info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
- info->cmpOpVN = funcAttr.m_args[1];
+ info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
+ info->cmpOpVN = funcAttr.m_args[1];
info->constVal = GetConstantInt32(funcAttr.m_args[0]);
}
}
@@ -2891,20 +2956,22 @@ void ValueNumStore::GetConstantBoundInfo(ValueNum vn, ConstantBoundInfo* info)
bool ValueNumStore::IsVNArrLenBound(ValueNum vn)
{
// Do we have "var < a.len"?
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
VNFuncApp funcAttr;
if (!GetVNFunc(vn, &funcAttr))
{
return false;
}
- if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE &&
- funcAttr.m_func != (VNFunc)GT_LT && funcAttr.m_func != (VNFunc)GT_GT)
+ if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
+ funcAttr.m_func != (VNFunc)GT_GT)
{
return false;
}
- if (!IsVNArrLen(funcAttr.m_args[0]) &&
- !IsVNArrLen(funcAttr.m_args[1]))
+ if (!IsVNArrLen(funcAttr.m_args[0]) && !IsVNArrLen(funcAttr.m_args[1]))
{
return false;
}
@@ -2924,13 +2991,13 @@ void ValueNumStore::GetArrLenBoundInfo(ValueNum vn, ArrLenArithBoundInfo* info)
if (isOp1ArrLen)
{
info->cmpOper = funcAttr.m_func;
- info->cmpOp = funcAttr.m_args[0];
+ info->cmpOp = funcAttr.m_args[0];
info->vnArray = GetArrForLenVn(funcAttr.m_args[1]);
}
else
{
- info->cmpOper = GenTree::SwapRelop((genTreeOps) funcAttr.m_func);
- info->cmpOp = funcAttr.m_args[1];
+ info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
+ info->cmpOp = funcAttr.m_args[1];
info->vnArray = GetArrForLenVn(funcAttr.m_args[0]);
}
}
@@ -2938,13 +3005,16 @@ void ValueNumStore::GetArrLenBoundInfo(ValueNum vn, ArrLenArithBoundInfo* info)
bool ValueNumStore::IsVNArrLenArith(ValueNum vn)
{
// Do we have "a.len +or- var"
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
VNFuncApp funcAttr;
- return GetVNFunc(vn, &funcAttr) && // vn is a func.
- (funcAttr.m_func == (VNFunc)GT_ADD || funcAttr.m_func == (VNFunc)GT_SUB) && // the func is +/-
- (IsVNArrLen(funcAttr.m_args[0]) || IsVNArrLen(funcAttr.m_args[1])); // either op1 or op2 is a.len
+ return GetVNFunc(vn, &funcAttr) && // vn is a func.
+ (funcAttr.m_func == (VNFunc)GT_ADD || funcAttr.m_func == (VNFunc)GT_SUB) && // the func is +/-
+ (IsVNArrLen(funcAttr.m_args[0]) || IsVNArrLen(funcAttr.m_args[1])); // either op1 or op2 is a.len
}
void ValueNumStore::GetArrLenArithInfo(ValueNum vn, ArrLenArithBoundInfo* info)
@@ -2958,13 +3028,13 @@ void ValueNumStore::GetArrLenArithInfo(ValueNum vn, ArrLenArithBoundInfo* info)
if (isOp1ArrLen)
{
info->arrOper = funcArith.m_func;
- info->arrOp = funcArith.m_args[0];
+ info->arrOp = funcArith.m_args[0];
info->vnArray = GetArrForLenVn(funcArith.m_args[1]);
}
else
{
info->arrOper = funcArith.m_func;
- info->arrOp = funcArith.m_args[1];
+ info->arrOp = funcArith.m_args[1];
info->vnArray = GetArrForLenVn(funcArith.m_args[0]);
}
}
@@ -2972,7 +3042,10 @@ void ValueNumStore::GetArrLenArithInfo(ValueNum vn, ArrLenArithBoundInfo* info)
bool ValueNumStore::IsVNArrLenArithBound(ValueNum vn)
{
// Do we have: "var < a.len - var"
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
VNFuncApp funcAttr;
if (!GetVNFunc(vn, &funcAttr))
@@ -2981,15 +3054,14 @@ bool ValueNumStore::IsVNArrLenArithBound(ValueNum vn)
}
// Suitable comparator.
- if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE &&
- funcAttr.m_func != (VNFunc)GT_LT && funcAttr.m_func != (VNFunc)GT_GT)
+ if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
+ funcAttr.m_func != (VNFunc)GT_GT)
{
return false;
}
// Either the op0 or op1 is arr len arithmetic.
- if (!IsVNArrLenArith(funcAttr.m_args[0]) &&
- !IsVNArrLenArith(funcAttr.m_args[1]))
+ if (!IsVNArrLenArith(funcAttr.m_args[0]) && !IsVNArrLenArith(funcAttr.m_args[1]))
{
return false;
}
@@ -3009,20 +3081,23 @@ void ValueNumStore::GetArrLenArithBoundInfo(ValueNum vn, ArrLenArithBoundInfo* i
if (isOp1ArrLenArith)
{
info->cmpOper = funcAttr.m_func;
- info->cmpOp = funcAttr.m_args[0];
+ info->cmpOp = funcAttr.m_args[0];
GetArrLenArithInfo(funcAttr.m_args[1], info);
}
else
{
- info->cmpOper = GenTree::SwapRelop((genTreeOps) funcAttr.m_func);
- info->cmpOp = funcAttr.m_args[1];
+ info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
+ info->cmpOp = funcAttr.m_args[1];
GetArrLenArithInfo(funcAttr.m_args[0], info);
}
}
ValueNum ValueNumStore::GetArrForLenVn(ValueNum vn)
{
- if (vn == NoVN) return NoVN;
+ if (vn == NoVN)
+ {
+ return NoVN;
+ }
VNFuncApp funcAttr;
if (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH)
@@ -3034,12 +3109,14 @@ ValueNum ValueNumStore::GetArrForLenVn(ValueNum vn)
bool ValueNumStore::IsVNNewArr(ValueNum vn, VNFuncApp* funcApp)
{
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
bool result = false;
if (GetVNFunc(vn, funcApp))
{
- result = (funcApp->m_func == VNF_JitNewArr) ||
- (funcApp->m_func == VNF_JitReadyToRunNewArr);
+ result = (funcApp->m_func == VNF_JitNewArr) || (funcApp->m_func == VNF_JitReadyToRunNewArr);
}
return result;
}
@@ -3060,10 +3137,12 @@ int ValueNumStore::GetNewArrSize(ValueNum vn)
bool ValueNumStore::IsVNArrLen(ValueNum vn)
{
- if (vn == NoVN) return false;
+ if (vn == NoVN)
+ {
+ return false;
+ }
VNFuncApp funcAttr;
return (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH);
-
}
ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMathFN, ValueNum arg0VN)
@@ -3079,23 +3158,23 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMat
double res = 0.0;
switch (gtMathFN)
{
- case CORINFO_INTRINSIC_Sin:
- res = sin(arg0Val);
- break;
- case CORINFO_INTRINSIC_Cos:
- res = cos(arg0Val);
- break;
- case CORINFO_INTRINSIC_Sqrt:
- res = sqrt(arg0Val);
- break;
- case CORINFO_INTRINSIC_Abs:
- res = fabs(arg0Val); // The result and params are doubles.
- break;
- case CORINFO_INTRINSIC_Round:
- res = FloatingPointUtils::round(arg0Val);
- break;
- default:
- unreached(); // the above are the only math intrinsics at the time of this writing.
+ case CORINFO_INTRINSIC_Sin:
+ res = sin(arg0Val);
+ break;
+ case CORINFO_INTRINSIC_Cos:
+ res = cos(arg0Val);
+ break;
+ case CORINFO_INTRINSIC_Sqrt:
+ res = sqrt(arg0Val);
+ break;
+ case CORINFO_INTRINSIC_Abs:
+ res = fabs(arg0Val); // The result and params are doubles.
+ break;
+ case CORINFO_INTRINSIC_Round:
+ res = FloatingPointUtils::round(arg0Val);
+ break;
+ default:
+ unreached(); // the above are the only math intrinsics at the time of this writing.
}
if (typ == TYP_DOUBLE)
{
@@ -3115,9 +3194,7 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMat
}
else
{
- assert(typ == TYP_DOUBLE
- || typ == TYP_FLOAT
- || (typ == TYP_INT && gtMathFN == CORINFO_INTRINSIC_Round));
+ assert(typ == TYP_DOUBLE || typ == TYP_FLOAT || (typ == TYP_INT && gtMathFN == CORINFO_INTRINSIC_Round));
VNFunc vnf = VNF_Boundary;
switch (gtMathFN)
@@ -3126,13 +3203,13 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMat
vnf = VNF_Sin;
break;
case CORINFO_INTRINSIC_Cos:
- vnf = VNF_Cos;
+ vnf = VNF_Cos;
break;
case CORINFO_INTRINSIC_Sqrt:
- vnf = VNF_Sqrt;
+ vnf = VNF_Sqrt;
break;
case CORINFO_INTRINSIC_Abs:
- vnf = VNF_Abs;
+ vnf = VNF_Abs;
break;
case CORINFO_INTRINSIC_Round:
if (typ == TYP_DOUBLE)
@@ -3202,20 +3279,20 @@ ValueNum ValueNumStore::EvalMathFuncBinary(var_types typ, CorInfoIntrinsics gtMa
VNFunc vnf = VNF_Boundary;
// Currently, none of the binary math intrinsic are implemented by target-specific instructions.
- // To minimize precision loss, do not do constant folding on them.
+ // To minimize precision loss, do not do constant folding on them.
switch (gtMathFN)
{
- case CORINFO_INTRINSIC_Atan2:
- vnf = VNF_Atan2;
- break;
+ case CORINFO_INTRINSIC_Atan2:
+ vnf = VNF_Atan2;
+ break;
- case CORINFO_INTRINSIC_Pow:
- vnf = VNF_Pow;
- break;
+ case CORINFO_INTRINSIC_Pow:
+ vnf = VNF_Pow;
+ break;
- default:
- unreached(); // the above are the only binary math intrinsics at the time of this writing.
+ default:
+ unreached(); // the above are the only binary math intrinsics at the time of this writing.
}
return VNForFunc(typ, vnf, arg0VN, arg1VN);
@@ -3230,88 +3307,96 @@ bool ValueNumStore::IsVNFunc(ValueNum vn)
Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
switch (c->m_attribs)
{
- case CEA_Func0: case CEA_Func1: case CEA_Func2: case CEA_Func3: case CEA_Func4:
- return true;
- default:
- return false;
+ case CEA_Func0:
+ case CEA_Func1:
+ case CEA_Func2:
+ case CEA_Func3:
+ case CEA_Func4:
+ return true;
+ default:
+ return false;
}
}
bool ValueNumStore::GetVNFunc(ValueNum vn, VNFuncApp* funcApp)
{
- Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
+ Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
unsigned offset = ChunkOffset(vn);
assert(offset < c->m_numUsed);
switch (c->m_attribs)
{
- case CEA_Func4:
+ case CEA_Func4:
{
VNDefFunc4Arg* farg4 = &reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offset];
- funcApp->m_func = farg4->m_func;
- funcApp->m_arity = 4;
- funcApp->m_args[0] = farg4->m_arg0;
- funcApp->m_args[1] = farg4->m_arg1;
- funcApp->m_args[2] = farg4->m_arg2;
- funcApp->m_args[3] = farg4->m_arg3;
+ funcApp->m_func = farg4->m_func;
+ funcApp->m_arity = 4;
+ funcApp->m_args[0] = farg4->m_arg0;
+ funcApp->m_args[1] = farg4->m_arg1;
+ funcApp->m_args[2] = farg4->m_arg2;
+ funcApp->m_args[3] = farg4->m_arg3;
}
- return true;
- case CEA_Func3:
+ return true;
+ case CEA_Func3:
{
VNDefFunc3Arg* farg3 = &reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offset];
- funcApp->m_func = farg3->m_func;
- funcApp->m_arity = 3;
- funcApp->m_args[0] = farg3->m_arg0;
- funcApp->m_args[1] = farg3->m_arg1;
- funcApp->m_args[2] = farg3->m_arg2;
+ funcApp->m_func = farg3->m_func;
+ funcApp->m_arity = 3;
+ funcApp->m_args[0] = farg3->m_arg0;
+ funcApp->m_args[1] = farg3->m_arg1;
+ funcApp->m_args[2] = farg3->m_arg2;
}
- return true;
- case CEA_Func2:
+ return true;
+ case CEA_Func2:
{
VNDefFunc2Arg* farg2 = &reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offset];
- funcApp->m_func = farg2->m_func;
- funcApp->m_arity = 2;
- funcApp->m_args[0] = farg2->m_arg0;
- funcApp->m_args[1] = farg2->m_arg1;
+ funcApp->m_func = farg2->m_func;
+ funcApp->m_arity = 2;
+ funcApp->m_args[0] = farg2->m_arg0;
+ funcApp->m_args[1] = farg2->m_arg1;
}
- return true;
- case CEA_Func1:
+ return true;
+ case CEA_Func1:
{
VNDefFunc1Arg* farg1 = &reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offset];
- funcApp->m_func = farg1->m_func;
- funcApp->m_arity = 1;
- funcApp->m_args[0] = farg1->m_arg0;
+ funcApp->m_func = farg1->m_func;
+ funcApp->m_arity = 1;
+ funcApp->m_args[0] = farg1->m_arg0;
}
- return true;
- case CEA_Func0:
+ return true;
+ case CEA_Func0:
{
VNDefFunc0Arg* farg0 = &reinterpret_cast<VNDefFunc0Arg*>(c->m_defs)[offset];
- funcApp->m_func = farg0->m_func;
- funcApp->m_arity = 0;
+ funcApp->m_func = farg0->m_func;
+ funcApp->m_arity = 0;
}
- return true;
- default:
- return false;
+ return true;
+ default:
+ return false;
}
}
ValueNum ValueNumStore::VNForRefInAddr(ValueNum vn)
{
var_types vnType = TypeOfVN(vn);
- if (vnType == TYP_REF) return vn;
+ if (vnType == TYP_REF)
+ {
+ return vn;
+ }
// Otherwise...
assert(vnType == TYP_BYREF);
VNFuncApp funcApp;
if (GetVNFunc(vn, &funcApp))
{
- assert(funcApp.m_arity == 2 &&
- (funcApp.m_func == VNFunc(GT_ADD) || funcApp.m_func == VNFunc(GT_SUB)));
+ assert(funcApp.m_arity == 2 && (funcApp.m_func == VNFunc(GT_ADD) || funcApp.m_func == VNFunc(GT_SUB)));
var_types vnArg0Type = TypeOfVN(funcApp.m_args[0]);
- if (vnArg0Type == TYP_REF || vnArg0Type == TYP_BYREF)
+ if (vnArg0Type == TYP_REF || vnArg0Type == TYP_BYREF)
+ {
return VNForRefInAddr(funcApp.m_args[0]);
+ }
else
{
assert(funcApp.m_func == VNFunc(GT_ADD) &&
- (TypeOfVN(funcApp.m_args[1]) == TYP_REF || TypeOfVN(funcApp.m_args[1]) == TYP_BYREF));
+ (TypeOfVN(funcApp.m_args[1]) == TYP_REF || TypeOfVN(funcApp.m_args[1]) == TYP_BYREF));
return VNForRefInAddr(funcApp.m_args[1]);
}
}
@@ -3325,7 +3410,10 @@ ValueNum ValueNumStore::VNForRefInAddr(ValueNum vn)
bool ValueNumStore::VNIsValid(ValueNum vn)
{
ChunkNum cn = GetChunkNum(vn);
- if (cn >= m_chunks.Size()) return false;
+ if (cn >= m_chunks.Size())
+ {
+ return false;
+ }
// Otherwise...
Chunk* c = m_chunks.GetNoExpand(cn);
return ChunkOffset(vn) < c->m_numUsed;
@@ -3350,14 +3438,14 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
var_types vnt = TypeOfVN(vn);
switch (vnt)
{
- case TYP_BOOL:
- case TYP_BYTE:
- case TYP_UBYTE:
- case TYP_CHAR:
- case TYP_SHORT:
- case TYP_USHORT:
- case TYP_INT:
- case TYP_UINT:
+ case TYP_BOOL:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_CHAR:
+ case TYP_SHORT:
+ case TYP_USHORT:
+ case TYP_INT:
+ case TYP_UINT:
{
int val = ConstantValue<int>(vn);
if (isPtr)
@@ -3368,14 +3456,18 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
{
printf("IntCns");
if ((val > -1000) && (val < 1000))
- printf(" %ld", val);
- else
+ {
+ printf(" %ld", val);
+ }
+ else
+ {
printf(" 0x%X", val);
+ }
}
}
break;
- case TYP_LONG:
- case TYP_ULONG:
+ case TYP_LONG:
+ case TYP_ULONG:
{
INT64 val = ConstantValue<INT64>(vn);
if (isPtr)
@@ -3386,52 +3478,58 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
{
printf("LngCns: ");
if ((val > -1000) && (val < 1000))
- printf(" %ld", val);
+ {
+ printf(" %ld", val);
+ }
else if ((val & 0xFFFFFFFF00000000LL) == 0)
+ {
printf(" 0x%X", val);
+ }
else
- printf(" 0x%llx", val);
+ {
+ printf(" 0x%llx", val);
+ }
}
}
break;
- case TYP_FLOAT:
- printf("FltCns[%f]", ConstantValue<float>(vn));
- break;
- case TYP_DOUBLE:
- printf("DblCns[%f]", ConstantValue<double>(vn));
- break;
- case TYP_REF:
- case TYP_ARRAY:
- if (vn == VNForNull())
- {
- printf("null");
- }
- else if (vn == VNForVoid())
- {
- printf("void");
- }
- else
- {
- assert(vn == VNForZeroMap());
- printf("zeroMap");
- }
- break;
- case TYP_BYREF:
- printf("byrefVal");
- break;
- case TYP_STRUCT:
+ case TYP_FLOAT:
+ printf("FltCns[%f]", ConstantValue<float>(vn));
+ break;
+ case TYP_DOUBLE:
+ printf("DblCns[%f]", ConstantValue<double>(vn));
+ break;
+ case TYP_REF:
+ case TYP_ARRAY:
+ if (vn == VNForNull())
+ {
+ printf("null");
+ }
+ else if (vn == VNForVoid())
+ {
+ printf("void");
+ }
+ else
+ {
+ assert(vn == VNForZeroMap());
+ printf("zeroMap");
+ }
+ break;
+ case TYP_BYREF:
+ printf("byrefVal");
+ break;
+ case TYP_STRUCT:
#ifdef FEATURE_SIMD
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
- case TYP_SIMD32:
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
+ case TYP_SIMD32:
#endif // FEATURE_SIMD
- printf("structVal");
- break;
+ printf("structVal");
+ break;
// These should be unreached.
- default:
- unreached();
+ default:
+ unreached();
}
}
else if (IsVNArrLenBound(vn))
@@ -3453,29 +3551,32 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
// A few special cases...
switch (funcApp.m_func)
{
- case VNF_FieldSeq:
- vnDumpFieldSeq(comp, &funcApp, true);
- break;
- case VNF_MapSelect:
- vnDumpMapSelect(comp, &funcApp);
- break;
- case VNF_MapStore:
- vnDumpMapStore(comp, &funcApp);
- break;
- default:
- printf("%s(", VNFuncName(funcApp.m_func));
- for (unsigned i = 0; i < funcApp.m_arity; i++)
- {
- if (i > 0) printf(", ");
+ case VNF_FieldSeq:
+ vnDumpFieldSeq(comp, &funcApp, true);
+ break;
+ case VNF_MapSelect:
+ vnDumpMapSelect(comp, &funcApp);
+ break;
+ case VNF_MapStore:
+ vnDumpMapStore(comp, &funcApp);
+ break;
+ default:
+ printf("%s(", VNFuncName(funcApp.m_func));
+ for (unsigned i = 0; i < funcApp.m_arity; i++)
+ {
+ if (i > 0)
+ {
+ printf(", ");
+ }
- printf(STR_VN "%x", funcApp.m_args[i]);
+ printf(STR_VN "%x", funcApp.m_args[i]);
#if FEATURE_VN_DUMP_FUNC_ARGS
- printf("=");
- vnDump(comp, funcApp.m_args[i]);
+ printf("=");
+ vnDump(comp, funcApp.m_args[i]);
#endif
- }
- printf(")");
+ }
+ printf(")");
}
}
else
@@ -3492,10 +3593,12 @@ void ValueNumStore::vnDumpFieldSeq(Compiler* comp, VNFuncApp* fieldSeq, bool isH
// First arg is the field handle VN.
assert(IsVNConstant(fieldSeq->m_args[0]) && TypeOfVN(fieldSeq->m_args[0]) == TYP_I_IMPL);
ssize_t fieldHndVal = ConstantValue<ssize_t>(fieldSeq->m_args[0]);
- bool hasTail = (fieldSeq->m_args[1] != VNForNull());
+ bool hasTail = (fieldSeq->m_args[1] != VNForNull());
if (isHead && hasTail)
+ {
printf("(");
+ }
CORINFO_FIELD_HANDLE fldHnd = CORINFO_FIELD_HANDLE(fieldHndVal);
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
@@ -3523,15 +3626,17 @@ void ValueNumStore::vnDumpFieldSeq(Compiler* comp, VNFuncApp* fieldSeq, bool isH
}
if (isHead && hasTail)
+ {
printf(")");
+ }
}
void ValueNumStore::vnDumpMapSelect(Compiler* comp, VNFuncApp* mapSelect)
{
- assert(mapSelect->m_func == VNF_MapSelect); // Precondition.
+ assert(mapSelect->m_func == VNF_MapSelect); // Precondition.
- ValueNum mapVN = mapSelect->m_args[0]; // First arg is the map id
- ValueNum indexVN = mapSelect->m_args[1]; // Second arg is the index
+ ValueNum mapVN = mapSelect->m_args[0]; // First arg is the map id
+ ValueNum indexVN = mapSelect->m_args[1]; // Second arg is the index
comp->vnPrint(mapVN, 0);
printf("[");
@@ -3541,11 +3646,11 @@ void ValueNumStore::vnDumpMapSelect(Compiler* comp, VNFuncApp* mapSelect)
void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore)
{
- assert(mapStore->m_func == VNF_MapStore); // Precondition.
+ assert(mapStore->m_func == VNF_MapStore); // Precondition.
- ValueNum mapVN = mapStore->m_args[0]; // First arg is the map id
- ValueNum indexVN = mapStore->m_args[1]; // Second arg is the index
- ValueNum newValVN = mapStore->m_args[2]; // Third arg is the new value
+ ValueNum mapVN = mapStore->m_args[0]; // First arg is the map id
+ ValueNum indexVN = mapStore->m_args[1]; // Second arg is the index
+ ValueNum newValVN = mapStore->m_args[2]; // Third arg is the new value
comp->vnPrint(mapVN, 0);
printf("[");
@@ -3554,40 +3659,26 @@ void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore)
comp->vnPrint(newValVN, 0);
printf("]");
}
-#endif // DEBUG
+#endif // DEBUG
// Static fields, methods.
-static UINT8 vnfOpAttribs[VNF_COUNT];
-static genTreeOps genTreeOpsIllegalAsVNFunc[] = {
- GT_IND, // When we do heap memory.
- GT_NULLCHECK,
- GT_QMARK,
- GT_COLON,
- GT_LOCKADD,
- GT_XADD,
- GT_XCHG,
- GT_CMPXCHG,
- GT_LCLHEAP,
- GT_BOX,
-
- // These need special semantics:
- GT_COMMA, // == second argument (but with exception(s) from first).
- GT_INITBLK, // A kind of assignment.
- GT_COPYBLK, // A kind of assignment.
- GT_COPYOBJ, // A kind of assignment.
- GT_ADDR,
- GT_ARR_BOUNDS_CHECK,
- GT_OBJ, // May reference heap memory.
-
- // These control-flow operations need no values.
- GT_JTRUE,
- GT_RETURN,
- GT_SWITCH,
- GT_RETFILT,
- GT_CKFINITE
-};
+static UINT8 vnfOpAttribs[VNF_COUNT];
+static genTreeOps genTreeOpsIllegalAsVNFunc[] = {GT_IND, // When we do heap memory.
+ GT_NULLCHECK, GT_QMARK, GT_COLON, GT_LOCKADD, GT_XADD, GT_XCHG,
+ GT_CMPXCHG, GT_LCLHEAP, GT_BOX,
+
+ // These need special semantics:
+ GT_COMMA, // == second argument (but with exception(s) from first).
+ GT_INITBLK, // A kind of assignment.
+ GT_COPYBLK, // A kind of assignment.
+ GT_COPYOBJ, // A kind of assignment.
+ GT_ADDR, GT_ARR_BOUNDS_CHECK,
+ GT_OBJ, // May reference heap memory.
-UINT8* ValueNumStore::s_vnfOpAttribs = NULL;
+ // These control-flow operations need no values.
+ GT_JTRUE, GT_RETURN, GT_SWITCH, GT_RETFILT, GT_CKFINITE};
+
+UINT8* ValueNumStore::s_vnfOpAttribs = nullptr;
void ValueNumStore::InitValueNumStoreStatics()
{
@@ -3599,7 +3690,7 @@ void ValueNumStore::InitValueNumStoreStatics()
for (unsigned i = 0; i < GT_COUNT; i++)
{
genTreeOps gtOper = static_cast<genTreeOps>(i);
- unsigned arity = 0;
+ unsigned arity = 0;
if (GenTree::OperIsUnary(gtOper))
{
arity = 1;
@@ -3623,25 +3714,26 @@ void ValueNumStore::InitValueNumStoreStatics()
// I so wish this wasn't the best way to do this...
- int vnfNum = VNF_Boundary + 1; // The macro definition below will update this after using it.
+ int vnfNum = VNF_Boundary + 1; // The macro definition below will update this after using it.
-#define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) \
- if (commute) vnfOpAttribs[vnfNum] |= VNFOA_Commutative; \
- if (knownNonNull) vnfOpAttribs[vnfNum] |= VNFOA_KnownNonNull; \
- if (sharedStatic) vnfOpAttribs[vnfNum] |= VNFOA_SharedStatic; \
- vnfOpAttribs[vnfNum] |= (arity << VNFOA_ArityShift); \
+#define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) \
+ if (commute) \
+ vnfOpAttribs[vnfNum] |= VNFOA_Commutative; \
+ if (knownNonNull) \
+ vnfOpAttribs[vnfNum] |= VNFOA_KnownNonNull; \
+ if (sharedStatic) \
+ vnfOpAttribs[vnfNum] |= VNFOA_SharedStatic; \
+ vnfOpAttribs[vnfNum] |= (arity << VNFOA_ArityShift); \
vnfNum++;
#include "valuenumfuncs.h"
#undef ValueNumFuncDef
- unsigned n = sizeof(genTreeOpsIllegalAsVNFunc)/sizeof(genTreeOps);
+ unsigned n = sizeof(genTreeOpsIllegalAsVNFunc) / sizeof(genTreeOps);
for (unsigned i = 0; i < n; i++)
{
vnfOpAttribs[genTreeOpsIllegalAsVNFunc[i]] |= VNFOA_IllegalGenTreeOp;
}
-
-
}
#ifdef DEBUG
@@ -3662,11 +3754,10 @@ const char* ValueNumStore::VNFuncName(VNFunc vnf)
}
else
{
- return VNFuncNameArr[vnf-(VNF_Boundary+1)];
+ return VNFuncNameArr[vnf - (VNF_Boundary + 1)];
}
}
-
static const char* s_reservedNameArr[] = {
"$VN.Recursive", // -2 RecursiveVN
"$VN.No", // -1 NoVN
@@ -3675,34 +3766,34 @@ static const char* s_reservedNameArr[] = {
"$VN.NotAField", // 2 VNForNotAField()
"$VN.ReadOnlyHeap", // 3 VNForROH()
"$VN.Void", // 4 VNForVoid()
- "$VN.EmptyExcSet" // 5 VNForEmptyExcSet()
+ "$VN.EmptyExcSet" // 5 VNForEmptyExcSet()
};
-// Returns the string name of "vn" when it is a reserved value number, nullptr otherwise
+// Returns the string name of "vn" when it is a reserved value number, nullptr otherwise
// static
const char* ValueNumStore::reservedName(ValueNum vn)
{
int val = vn - ValueNumStore::RecursiveVN; // Add two, making 'RecursiveVN' equal to zero
int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN;
- if ((val >= 0) && (val < max))
+ if ((val >= 0) && (val < max))
{
return s_reservedNameArr[val];
}
return nullptr;
}
-#endif // DEBUG
+#endif // DEBUG
-// Returns true if "vn" is a reserved value number
+// Returns true if "vn" is a reserved value number
// static
-bool ValueNumStore::isReservedVN(ValueNum vn)
+bool ValueNumStore::isReservedVN(ValueNum vn)
{
int val = vn - ValueNumStore::RecursiveVN; // Adding two, making 'RecursiveVN' equal to zero
int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN;
- if ((val >= 0) && (val < max))
+ if ((val >= 0) && (val < max))
{
return true;
}
@@ -3714,8 +3805,8 @@ void ValueNumStore::RunTests(Compiler* comp)
{
VNFunc VNF_Add = GenTreeOpToVNFunc(GT_ADD);
- ValueNumStore* vns = new (comp->getAllocatorDebugOnly()) ValueNumStore(comp, comp->getAllocatorDebugOnly());
- ValueNum vnNull = VNForNull();
+ ValueNumStore* vns = new (comp->getAllocatorDebugOnly()) ValueNumStore(comp, comp->getAllocatorDebugOnly());
+ ValueNum vnNull = VNForNull();
assert(vnNull == VNForNull());
ValueNum vnFor1 = vns->VNForIntCon(1);
@@ -3745,7 +3836,7 @@ void ValueNumStore::RunTests(Compiler* comp)
assert(vns->IsVNConstant(vnFor1D));
assert(vns->ConstantValue<double>(vnFor1D) == 1.0);
- ValueNum vnRandom1 = vns->VNForExpr(TYP_INT);
+ ValueNum vnRandom1 = vns->VNForExpr(TYP_INT);
ValueNum vnForFunc2a = vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1);
assert(vnForFunc2a == vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1));
assert(vnForFunc2a != vnFor1D && vnForFunc2a != vnFor1F && vnForFunc2a != vnFor1 && vnForFunc2a != vnRandom1);
@@ -3753,7 +3844,7 @@ void ValueNumStore::RunTests(Compiler* comp)
assert(!vns->IsVNConstant(vnForFunc2a));
assert(vns->IsVNFunc(vnForFunc2a));
VNFuncApp fa2a;
- bool b = vns->GetVNFunc(vnForFunc2a, &fa2a);
+ bool b = vns->GetVNFunc(vnForFunc2a, &fa2a);
assert(b);
assert(fa2a.m_func == VNF_Add && fa2a.m_arity == 2 && fa2a.m_args[0] == vnFor1 && fa2a.m_args[1] == vnRandom1);
@@ -3773,7 +3864,7 @@ typedef ExpandArrayStack<BasicBlock*> BlockStack;
// This represents the "to do" state of the value number computation.
struct ValueNumberState
{
- // These two stacks collectively represent the set of blocks that are candidates for
+ // These two stacks collectively represent the set of blocks that are candidates for
// processing, because at least one predecessor has been processed. Blocks on "m_toDoAllPredsDone"
// have had *all* predecessors processed, and thus are candidates for some extra optimizations.
// Blocks on "m_toDoNotAllPredsDone" have at least one predecessor that has not been processed.
@@ -3785,31 +3876,33 @@ struct ValueNumberState
Compiler* m_comp;
// TBD: This should really be a bitset...
- // For now:
+ // For now:
// first bit indicates completed,
// second bit indicates that it's been pushed on all-done stack,
// third bit indicates that it's been pushed on not-all-done stack.
- BYTE* m_visited;
+ BYTE* m_visited;
enum BlockVisitBits
{
- BVB_complete = 0x1,
- BVB_onAllDone = 0x2,
+ BVB_complete = 0x1,
+ BVB_onAllDone = 0x2,
BVB_onNotAllDone = 0x4,
};
- bool GetVisitBit(unsigned bbNum, BlockVisitBits bvb) {
+ bool GetVisitBit(unsigned bbNum, BlockVisitBits bvb)
+ {
return (m_visited[bbNum] & bvb) != 0;
}
- void SetVisitBit(unsigned bbNum, BlockVisitBits bvb) {
+ void SetVisitBit(unsigned bbNum, BlockVisitBits bvb)
+ {
m_visited[bbNum] |= bvb;
}
- ValueNumberState(Compiler* comp) :
- m_toDoAllPredsDone(comp->getAllocator(), /*minSize*/4),
- m_toDoNotAllPredsDone(comp->getAllocator(), /*minSize*/4),
- m_comp(comp),
- m_visited(new (comp, CMK_ValueNumber) BYTE[comp->fgBBNumMax + 1]())
+ ValueNumberState(Compiler* comp)
+ : m_toDoAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
+ , m_toDoNotAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
+ , m_comp(comp)
+ , m_visited(new (comp, CMK_ValueNumber) BYTE[comp->fgBBNumMax + 1]())
{
}
@@ -3830,7 +3923,7 @@ struct ValueNumberState
// keep the array compact.
while (GetVisitBit(cand->bbNum, BVB_complete))
{
- if (i+1 < m_toDoNotAllPredsDone.Size())
+ if (i + 1 < m_toDoNotAllPredsDone.Size())
{
cand = m_toDoNotAllPredsDone.Pop();
m_toDoNotAllPredsDone.Set(i, cand);
@@ -3843,7 +3936,10 @@ struct ValueNumberState
}
}
// We may have run out of non-complete candidates above. If so, we're done.
- if (i == m_toDoNotAllPredsDone.Size()) break;
+ if (i == m_toDoNotAllPredsDone.Size())
+ {
+ break;
+ }
// See if "cand" is a loop entry.
unsigned lnum;
@@ -3871,7 +3967,9 @@ struct ValueNumberState
if (!m_comp->optLoopTable[lnum].lpContains(predBlock))
{
if (!GetVisitBit(predBlock->bbNum, BVB_complete))
+ {
allNonLoopPredsDone = false;
+ }
}
}
if (allNonLoopPredsDone)
@@ -3881,11 +3979,16 @@ struct ValueNumberState
}
}
- // If we didn't find a loop entry block with all non-loop preds done above, then return a random member (if there is one).
+ // If we didn't find a loop entry block with all non-loop preds done above, then return a random member (if
+ // there is one).
if (m_toDoNotAllPredsDone.Size() == 0)
+ {
return nullptr;
+ }
else
+ {
return m_toDoNotAllPredsDone.Pop();
+ }
}
// Debugging output that is too detailed for a normal JIT dump...
@@ -3908,7 +4011,10 @@ struct ValueNumberState
JITDUMP(" Succ(BB%02u).\n", succ->bbNum);
#endif // DEBUG_VN_VISIT
- if (GetVisitBit(succ->bbNum, BVB_complete)) continue;
+ if (GetVisitBit(succ->bbNum, BVB_complete))
+ {
+ continue;
+ }
#ifdef DEBUG_VN_VISIT
JITDUMP(" Not yet completed.\n");
#endif // DEBUG_VN_VISIT
@@ -3930,7 +4036,8 @@ struct ValueNumberState
JITDUMP(" All preds complete, adding to allDone.\n");
#endif // DEBUG_VN_VISIT
- assert(!GetVisitBit(succ->bbNum, BVB_onAllDone)); // Only last completion of last succ should add to this.
+ assert(!GetVisitBit(succ->bbNum, BVB_onAllDone)); // Only last completion of last succ should add to
+ // this.
m_toDoAllPredsDone.Push(succ);
SetVisitBit(succ->bbNum, BVB_onAllDone);
}
@@ -3952,8 +4059,8 @@ struct ValueNumberState
}
}
- bool ToDoExists()
- {
+ bool ToDoExists()
+ {
return m_toDoAllPredsDone.Size() > 0 || m_toDoNotAllPredsDone.Size() > 0;
}
};
@@ -3969,14 +4076,17 @@ void Compiler::fgValueNumber()
#endif
// If we skipped SSA, skip VN as well.
- if (fgSsaPassesCompleted == 0) return;
+ if (fgSsaPassesCompleted == 0)
+ {
+ return;
+ }
// Allocate the value number store.
- assert(fgVNPassesCompleted > 0 || vnStore == NULL);
+ assert(fgVNPassesCompleted > 0 || vnStore == nullptr);
if (fgVNPassesCompleted == 0)
{
- CompAllocator *allocator = new(this, CMK_ValueNumber) CompAllocator(this, CMK_ValueNumber);
- vnStore = new (this, CMK_ValueNumber) ValueNumStore(this, allocator);
+ CompAllocator* allocator = new (this, CMK_ValueNumber) CompAllocator(this, CMK_ValueNumber);
+ vnStore = new (this, CMK_ValueNumber) ValueNumStore(this, allocator);
}
else
{
@@ -3989,7 +4099,7 @@ void Compiler::fgValueNumber()
for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
{
// Now iterate over the block's statements, and their trees.
- for (GenTreePtr stmts = blk->FirstNonPhiDef(); stmts != NULL; stmts = stmts->gtNext)
+ for (GenTreePtr stmts = blk->FirstNonPhiDef(); stmts != nullptr; stmts = stmts->gtNext)
{
assert(stmts->IsStatement());
for (GenTreePtr tree = stmts->gtStmt.gtStmtList; tree; tree = tree->gtNext)
@@ -4000,7 +4110,6 @@ void Compiler::fgValueNumber()
}
}
-
// Compute the side effects of loops.
optComputeLoopSideEffects();
@@ -4015,7 +4124,7 @@ void Compiler::fgValueNumber()
// Start by giving incoming arguments value numbers.
// Also give must-init vars a zero of their type.
- for (unsigned i = 0; i < lvaCount; i++)
+ for (unsigned i = 0; i < lvaCount; i++)
{
LclVarDsc* varDsc = &lvaTable[i];
if (varDsc->lvIsParam)
@@ -4026,58 +4135,58 @@ void Compiler::fgValueNumber()
// SSA numbers always start from FIRST_SSA_NUM, and we give the value number to SSA name FIRST_SSA_NUM.
// We use the VNF_InitVal(i) from here so we know that this value is loop-invariant
// in all loops.
- ValueNum initVal = vnStore->VNForFunc(varDsc->TypeGet(), VNF_InitVal, vnStore->VNForIntCon(i));
- LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM);
+ ValueNum initVal = vnStore->VNForFunc(varDsc->TypeGet(), VNF_InitVal, vnStore->VNForIntCon(i));
+ LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM);
ssaDef->m_vnPair.SetBoth(initVal);
ssaDef->m_defLoc.m_blk = fgFirstBB;
}
else if (info.compInitMem || varDsc->lvMustInit ||
(varDsc->lvTracked && VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, varDsc->lvVarIndex)))
{
- // The last clause covers the use-before-def variables (the ones that are live-in to the the first block),
+ // The last clause covers the use-before-def variables (the ones that are live-in to the the first block),
// these are variables that are read before being initialized (at least on some control flow paths)
// if they are not must-init, then they get VNF_InitVal(i), as with the param case.)
- bool isZeroed = (info.compInitMem || varDsc->lvMustInit);
- ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal
- var_types typ = varDsc->TypeGet();
+ bool isZeroed = (info.compInitMem || varDsc->lvMustInit);
+ ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal
+ var_types typ = varDsc->TypeGet();
switch (typ)
{
- case TYP_LCLBLK: // The outgoing args area for arm and x64
- case TYP_BLK: // A blob of memory
- // TYP_BLK is used for the EHSlots LclVar on x86 (aka shadowSPslotsVar)
- // and for the lvaInlinedPInvokeFrameVar on x64, arm and x86
- // The stack associated with these LclVars are not zero initialized
- // thus we set 'initVN' to a new, unique VN.
- //
- initVal = vnStore->VNForExpr();
- break;
+ case TYP_LCLBLK: // The outgoing args area for arm and x64
+ case TYP_BLK: // A blob of memory
+ // TYP_BLK is used for the EHSlots LclVar on x86 (aka shadowSPslotsVar)
+ // and for the lvaInlinedPInvokeFrameVar on x64, arm and x86
+ // The stack associated with these LclVars are not zero initialized
+ // thus we set 'initVN' to a new, unique VN.
+ //
+ initVal = vnStore->VNForExpr();
+ break;
- case TYP_BYREF:
- if (isZeroed)
- {
- // LclVars of TYP_BYREF can be zero-inited.
- initVal = vnStore->VNForByrefCon(0);
- }
- else
- {
- // Here we have uninitialized TYP_BYREF
- initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(i));
- }
- break;
+ case TYP_BYREF:
+ if (isZeroed)
+ {
+ // LclVars of TYP_BYREF can be zero-inited.
+ initVal = vnStore->VNForByrefCon(0);
+ }
+ else
+ {
+ // Here we have uninitialized TYP_BYREF
+ initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(i));
+ }
+ break;
- default:
- if (isZeroed)
- {
- // By default we will zero init these LclVars
- initVal = vnStore->VNZeroForType(typ);
- }
- else
- {
- initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(i));
- }
- break;
+ default:
+ if (isZeroed)
+ {
+ // By default we will zero init these LclVars
+ initVal = vnStore->VNZeroForType(typ);
+ }
+ else
+ {
+ initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(i));
+ }
+ break;
}
#ifdef _TARGET_X86_
bool isVarargParam = (i == lvaVarargsBaseOfStkArgs || i == lvaVarargsHandleArg);
@@ -4092,7 +4201,7 @@ void Compiler::fgValueNumber()
}
}
// Give "Heap" an initial value number (about which we know nothing).
- ValueNum heapInitVal = vnStore->VNForFunc(TYP_REF, VNF_InitVal, vnStore->VNForIntCon(-1)); // Use -1 for the heap.
+ ValueNum heapInitVal = vnStore->VNForFunc(TYP_REF, VNF_InitVal, vnStore->VNForIntCon(-1)); // Use -1 for the heap.
GetHeapPerSsaData(SsaConfig::FIRST_SSA_NUM)->m_vnPair.SetBoth(heapInitVal);
#ifdef DEBUG
if (verbose)
@@ -4111,7 +4220,7 @@ void Compiler::fgValueNumber()
while (vs.m_toDoAllPredsDone.Size() > 0)
{
BasicBlock* toDo = vs.m_toDoAllPredsDone.Pop();
- fgValueNumberBlock(toDo, /*newVNsForPhis*/false);
+ fgValueNumberBlock(toDo, /*newVNsForPhis*/ false);
// Record that we've visited "toDo", and add successors to the right sets.
vs.FinishVisit(toDo);
}
@@ -4120,10 +4229,13 @@ void Compiler::fgValueNumber()
// note that this is an "if", not a "while" loop.
if (vs.m_toDoNotAllPredsDone.Size() > 0)
{
- BasicBlock* toDo = vs.ChooseFromNotAllPredsDone();
- if (toDo == nullptr) continue; // We may have run out, because of completed blocks on the not-all-preds done list.
+ BasicBlock* toDo = vs.ChooseFromNotAllPredsDone();
+ if (toDo == nullptr)
+ {
+ continue; // We may have run out, because of completed blocks on the not-all-preds done list.
+ }
- fgValueNumberBlock(toDo, /*newVNsForPhis*/true);
+ fgValueNumberBlock(toDo, /*newVNsForPhis*/ true);
// Record that we've visited "toDo", and add successors to the right sest.
vs.FinishVisit(toDo);
}
@@ -4140,8 +4252,8 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
{
compCurBB = blk;
-#ifdef DEBUG
- compCurStmtNum = blk->bbStmtNum-1; // Set compCurStmtNum
+#ifdef DEBUG
+ compCurStmtNum = blk->bbStmtNum - 1; // Set compCurStmtNum
#endif
unsigned outerLoopNum = BasicBlock::NOT_IN_LOOP;
@@ -4174,17 +4286,18 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
noway_assert(phiArgs->Rest() != nullptr);
GenTreeLclVarCommon* phiArg = phiArgs->Current()->AsLclVarCommon();
- phiArgs = phiArgs->Rest();
+ phiArgs = phiArgs->Rest();
phiAppVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum));
- bool allSameLib = true;
+ bool allSameLib = true;
bool allSameCons = true;
- sameVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair;
+ sameVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair;
if (!sameVNPair.BothDefined())
{
- allSameLib = false; allSameCons = false;
+ allSameLib = false;
+ allSameCons = false;
}
- while (phiArgs != NULL)
+ while (phiArgs != nullptr)
{
phiArg = phiArgs->Current()->AsLclVarCommon();
// Set the VN of the phi arg.
@@ -4202,12 +4315,13 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
}
else
{
- allSameLib = false; allSameCons = false;
+ allSameLib = false;
+ allSameCons = false;
}
ValueNumPair phiArgSsaVNP;
phiArgSsaVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum));
phiAppVNP = vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_Phi, phiArgSsaVNP, phiAppVNP);
- phiArgs = phiArgs->Rest();
+ phiArgs = phiArgs->Rest();
}
ValueNumPair newVNPair;
@@ -4237,7 +4351,8 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
#ifdef DEBUG
if (verbose)
{
- printf("In SSA definition, incoming phi args all same, set VN of local %d/%d to ", newSsaVar->GetLclNum(), newSsaVar->GetSsaNum());
+ printf("In SSA definition, incoming phi args all same, set VN of local %d/%d to ",
+ newSsaVar->GetLclNum(), newSsaVar->GetSsaNum());
vnpPrint(newVNPair, 1);
printf(".\n");
}
@@ -4246,13 +4361,12 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
else
{
// They were not the same; we need to create a phi definition.
- ValueNumPair lclNumVNP; lclNumVNP.SetBoth(ValueNum(newSsaVar->GetLclNum()));
- ValueNumPair ssaNumVNP; ssaNumVNP.SetBoth(ValueNum(newSsaVar->GetSsaNum()));
- ValueNumPair vnPhiDef = vnStore->VNPairForFunc(newSsaVar->TypeGet(),
- VNF_PhiDef,
- lclNumVNP,
- ssaNumVNP,
- phiAppVNP);
+ ValueNumPair lclNumVNP;
+ lclNumVNP.SetBoth(ValueNum(newSsaVar->GetLclNum()));
+ ValueNumPair ssaNumVNP;
+ ssaNumVNP.SetBoth(ValueNum(newSsaVar->GetSsaNum()));
+ ValueNumPair vnPhiDef =
+ vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_PhiDef, lclNumVNP, ssaNumVNP, phiAppVNP);
newSsaVarDsc->m_vnPair = vnPhiDef;
#ifdef DEBUG
if (verbose)
@@ -4267,7 +4381,7 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
// Now do the same for "Heap".
// Is there a phi for this block?
- if (blk->bbHeapSsaPhiFunc == NULL)
+ if (blk->bbHeapSsaPhiFunc == nullptr)
{
fgCurHeapVN = GetHeapPerSsaData(blk->bbHeapSsaNumIn)->m_vnPair.GetLiberal();
assert(fgCurHeapVN != ValueNumStore::NoVN);
@@ -4280,7 +4394,7 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
{
newHeapVN = fgHeapVNForLoopSideEffects(blk, loopNum);
}
- else
+ else
{
// Are all the VN's the same?
BasicBlock::HeapPhiArg* phiArgs = blk->bbHeapSsaPhiFunc;
@@ -4289,14 +4403,14 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
assert(phiArgs->m_nextArg != nullptr);
ValueNum phiAppVN = vnStore->VNForIntCon(phiArgs->GetSsaNum());
JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiAppVN, phiArgs->GetSsaNum());
- bool allSame = true;
- ValueNum sameVN = GetHeapPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
+ bool allSame = true;
+ ValueNum sameVN = GetHeapPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
if (sameVN == ValueNumStore::NoVN)
{
allSame = false;
}
phiArgs = phiArgs->m_nextArg;
- while (phiArgs != NULL)
+ while (phiArgs != nullptr)
{
ValueNum phiArgVN = GetHeapPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
if (phiArgVN == ValueNumStore::NoVN || phiArgVN != sameVN)
@@ -4306,7 +4420,7 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
#ifdef DEBUG
ValueNum oldPhiAppVN = phiAppVN;
#endif
- unsigned phiArgSSANum = phiArgs->GetSsaNum();
+ unsigned phiArgSSANum = phiArgs->GetSsaNum();
ValueNum phiArgSSANumVN = vnStore->VNForIntCon(phiArgSSANum);
JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiArgSSANumVN, phiArgSSANum);
phiAppVN = vnStore->VNForFunc(TYP_REF, VNF_Phi, phiArgSSANumVN, phiAppVN);
@@ -4319,9 +4433,8 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
}
else
{
- newHeapVN = vnStore->VNForFunc(TYP_REF, VNF_PhiHeapDef,
- vnStore->VNForHandle(ssize_t(blk), 0),
- phiAppVN);
+ newHeapVN =
+ vnStore->VNForFunc(TYP_REF, VNF_PhiHeapDef, vnStore->VNForHandle(ssize_t(blk), 0), phiAppVN);
}
}
GetHeapPerSsaData(blk->bbHeapSsaNumIn)->m_vnPair.SetLiberal(newHeapVN);
@@ -4337,11 +4450,11 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
#endif // DEBUG
// Now iterate over the remaining statements, and their trees.
- for (GenTreePtr stmt = firstNonPhi; stmt != NULL; stmt = stmt->gtNext)
+ for (GenTreePtr stmt = firstNonPhi; stmt != nullptr; stmt = stmt->gtNext)
{
assert(stmt->IsStatement());
-#ifdef DEBUG
+#ifdef DEBUG
compCurStmtNum++;
if (verbose)
{
@@ -4356,14 +4469,16 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk, bool newVNsForPhis)
fgValueNumberTree(tree);
}
-#ifdef DEBUG
+#ifdef DEBUG
if (verbose)
{
printf("\n***** BB%02u, stmt %d (after)\n", blk->bbNum, compCurStmtNum);
gtDispTree(stmt->gtStmt.gtStmtExpr);
printf("\n");
if (stmt->gtNext)
+ {
printf("---------\n");
+ }
}
#endif
}
@@ -4381,19 +4496,22 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
// "loopNum" is the innermost loop for which "blk" is the entry; find the outermost one.
assert(innermostLoopNum != BasicBlock::NOT_IN_LOOP);
unsigned loopsInNest = innermostLoopNum;
- unsigned loopNum = innermostLoopNum;
+ unsigned loopNum = innermostLoopNum;
while (loopsInNest != BasicBlock::NOT_IN_LOOP)
{
- if (optLoopTable[loopsInNest].lpEntry != entryBlock) break;
- loopNum = loopsInNest;
+ if (optLoopTable[loopsInNest].lpEntry != entryBlock)
+ {
+ break;
+ }
+ loopNum = loopsInNest;
loopsInNest = optLoopTable[loopsInNest].lpParent;
}
#ifdef DEBUG
if (verbose)
{
- printf("Computing heap state for block BB%02u, entry block for loops %d to %d:\n",
- entryBlock->bbNum, innermostLoopNum, loopNum);
+ printf("Computing heap state for block BB%02u, entry block for loops %d to %d:\n", entryBlock->bbNum,
+ innermostLoopNum, loopNum);
}
#endif // DEBUG
@@ -4404,8 +4522,7 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
#ifdef DEBUG
if (verbose)
{
- printf(" Loop %d has heap havoc effect; heap state is new fresh $%x.\n",
- loopNum, res);
+ printf(" Loop %d has heap havoc effect; heap state is new fresh $%x.\n", loopNum, res);
}
#endif // DEBUG
return res;
@@ -4414,8 +4531,8 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
// Otherwise, find the predecessors of the entry block that are not in the loop.
// If there is only one such, use its heap value as the "base." If more than one,
// use a new unique heap VN.
- BasicBlock* nonLoopPred = nullptr;
- bool multipleNonLoopPreds = false;
+ BasicBlock* nonLoopPred = nullptr;
+ bool multipleNonLoopPreds = false;
for (flowList* pred = BlockPredsWithEH(entryBlock); pred != nullptr; pred = pred->flNext)
{
BasicBlock* predBlock = pred->flBlock;
@@ -4430,7 +4547,8 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
#ifdef DEBUG
if (verbose)
{
- printf(" Entry block has >1 non-loop preds: (at least) BB%02u and BB%02u.\n", nonLoopPred->bbNum, predBlock->bbNum);
+ printf(" Entry block has >1 non-loop preds: (at least) BB%02u and BB%02u.\n", nonLoopPred->bbNum,
+ predBlock->bbNum);
}
#endif // DEBUG
multipleNonLoopPreds = true;
@@ -4453,7 +4571,8 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
assert(nonLoopPred != nullptr);
// What is it's heap post-state?
ValueNum newHeapVN = GetHeapPerSsaData(nonLoopPred->bbHeapSsaNumOut)->m_vnPair.GetLiberal();
- assert(newHeapVN != ValueNumStore::NoVN); // We must have processed the single non-loop pred before reaching the loop entry.
+ assert(newHeapVN !=
+ ValueNumStore::NoVN); // We must have processed the single non-loop pred before reaching the loop entry.
#ifdef DEBUG
if (verbose)
@@ -4469,8 +4588,8 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
{
for (Compiler::LoopDsc::FieldHandleSet::KeyIterator ki = fieldsMod->Begin(); !ki.Equal(fieldsMod->End()); ++ki)
{
- CORINFO_FIELD_HANDLE fldHnd = ki.Get();
- ValueNum fldHndVN = vnStore->VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
+ CORINFO_FIELD_HANDLE fldHnd = ki.Get();
+ ValueNum fldHndVN = vnStore->VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
#ifdef DEBUG
if (verbose)
@@ -4488,9 +4607,10 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
}
// Now do the array maps.
Compiler::LoopDsc::ClassHandleSet* elemTypesMod = optLoopTable[loopNum].lpArrayElemTypesModified;
- if (elemTypesMod != nullptr)
+ if (elemTypesMod != nullptr)
{
- for (Compiler::LoopDsc::ClassHandleSet::KeyIterator ki = elemTypesMod->Begin(); !ki.Equal(elemTypesMod->End()); ++ki)
+ for (Compiler::LoopDsc::ClassHandleSet::KeyIterator ki = elemTypesMod->Begin(); !ki.Equal(elemTypesMod->End());
+ ++ki)
{
CORINFO_CLASS_HANDLE elemClsHnd = ki.Get();
@@ -4511,8 +4631,8 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
#endif // DEBUG
ValueNum elemTypeVN = vnStore->VNForHandle(ssize_t(elemClsHnd), GTF_ICON_CLASS_HDL);
- ValueNum uniqueVN = vnStore->VNForExpr(TYP_REF);
- newHeapVN = vnStore->VNForMapStore(TYP_REF, newHeapVN, elemTypeVN, uniqueVN);
+ ValueNum uniqueVN = vnStore->VNForExpr(TYP_REF);
+ newHeapVN = vnStore->VNForMapStore(TYP_REF, newHeapVN, elemTypeVN, uniqueVN);
}
}
@@ -4525,8 +4645,7 @@ ValueNum Compiler::fgHeapVNForLoopSideEffects(BasicBlock* entryBlock, unsigned i
return newHeapVN;
}
-void Compiler::fgMutateHeap(GenTreePtr tree
- DEBUGARG(const char * msg) )
+void Compiler::fgMutateHeap(GenTreePtr tree DEBUGARG(const char* msg))
{
ValueNum newHeapVal = vnStore->VNForExpr(TYP_REF);
@@ -4534,9 +4653,7 @@ void Compiler::fgMutateHeap(GenTreePtr tree
assert(compCurBB->bbHeapDef);
// We make this a phi definition (of only one value), so we can tell what block the definition occurred in.
- fgCurHeapVN = vnStore->VNForFunc(TYP_REF, VNF_PhiHeapDef,
- vnStore->VNForHandle(ssize_t(compCurBB), 0),
- newHeapVal);
+ fgCurHeapVN = vnStore->VNForFunc(TYP_REF, VNF_PhiHeapDef, vnStore->VNForHandle(ssize_t(compCurBB), 0), newHeapVal);
// If we're tracking the heap SSA # caused by this node, record it.
fgValueNumberRecordHeapSsa(tree);
@@ -4577,57 +4694,67 @@ void Compiler::fgValueNumberTreeConst(GenTreePtr tree)
genTreeOps oper = tree->OperGet();
var_types typ = tree->TypeGet();
assert(GenTree::OperIsConst(oper));
-
+
switch (typ)
{
- case TYP_LONG: case TYP_ULONG:
- case TYP_INT: case TYP_UINT:
- case TYP_CHAR: case TYP_SHORT: case TYP_BYTE: case TYP_UBYTE: case TYP_BOOL:
- if (tree->IsCnsIntOrI() && tree->IsIconHandle())
- {
- tree->gtVNPair.SetBoth(vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
- }
- else if ((typ == TYP_LONG) || (typ == TYP_ULONG))
- {
- tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->gtIntConCommon.LngValue())));
- }
- else
- {
- tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->gtIntConCommon.IconValue())));
- }
- break;
-
- case TYP_FLOAT:
- tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float) tree->gtDblCon.gtDconVal));
- break;
- case TYP_DOUBLE:
- tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->gtDblCon.gtDconVal));
- break;
- case TYP_REF:
- // Null is the only constant. (Except maybe for String?)
- tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
- break;
+ case TYP_LONG:
+ case TYP_ULONG:
+ case TYP_INT:
+ case TYP_UINT:
+ case TYP_CHAR:
+ case TYP_SHORT:
+ case TYP_BYTE:
+ case TYP_UBYTE:
+ case TYP_BOOL:
+ if (tree->IsCnsIntOrI() && tree->IsIconHandle())
+ {
+ tree->gtVNPair.SetBoth(
+ vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
+ }
+ else if ((typ == TYP_LONG) || (typ == TYP_ULONG))
+ {
+ tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->gtIntConCommon.LngValue())));
+ }
+ else
+ {
+ tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->gtIntConCommon.IconValue())));
+ }
+ break;
- case TYP_BYREF:
- if (tree->gtIntConCommon.IconValue() == 0)
+ case TYP_FLOAT:
+ tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float)tree->gtDblCon.gtDconVal));
+ break;
+ case TYP_DOUBLE:
+ tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->gtDblCon.gtDconVal));
+ break;
+ case TYP_REF:
+ // Null is the only constant. (Except maybe for String?)
tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
- else
- {
- assert(tree->IsCnsIntOrI());
+ break;
- if (tree->IsIconHandle())
+ case TYP_BYREF:
+ if (tree->gtIntConCommon.IconValue() == 0)
{
- tree->gtVNPair.SetBoth(vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
+ tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
}
else
{
- tree->gtVNPair.SetBoth(vnStore->VNForByrefCon(tree->gtIntConCommon.IconValue()));
+ assert(tree->IsCnsIntOrI());
+
+ if (tree->IsIconHandle())
+ {
+ tree->gtVNPair.SetBoth(
+ vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
+ }
+ else
+ {
+ tree->gtVNPair.SetBoth(vnStore->VNForByrefCon(tree->gtIntConCommon.IconValue()));
+ }
}
- }
- break;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -4670,17 +4797,18 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
{
unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
- ValueNum initBlkVN = ValueNumStore::NoVN;
+ ValueNum initBlkVN = ValueNumStore::NoVN;
GenTreePtr initConst = tree->gtGetOp1()->gtGetOp2();
if (isEntire && initConst->OperGet() == GT_CNS_INT)
{
- unsigned initVal = 0xFF & (unsigned) initConst->AsIntConCommon()->IconValue();
+ unsigned initVal = 0xFF & (unsigned)initConst->AsIntConCommon()->IconValue();
if (initVal == 0)
{
initBlkVN = vnStore->VNZeroForType(lclVarTree->TypeGet());
}
}
- ValueNum lclVarVN = (initBlkVN != ValueNumStore::NoVN) ? initBlkVN
+ ValueNum lclVarVN = (initBlkVN != ValueNumStore::NoVN)
+ ? initBlkVN
: vnStore->VNForExpr(var_types(lvaTable[lclNum].lvType));
lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair.SetBoth(lclVarVN);
@@ -4691,7 +4819,7 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
Compiler::printTreeID(tree);
printf(" ");
gtDispNodeName(tree);
- printf(" V%02u/%d => ",lclNum, lclDefSsaNum);
+ printf(" V%02u/%d => ", lclNum, lclDefSsaNum);
vnPrint(lclVarVN, 1);
printf("\n");
}
@@ -4713,8 +4841,8 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
assert(tree->OperIsCopyBlkOp());
// TODO-Cleanup: We should factor things so that we uniformly rely on "PtrTo" VN's, and
// the heap cases can be shared with assignments.
- GenTreeLclVarCommon* lclVarTree = NULL;
- bool isEntire = false;
+ GenTreeLclVarCommon* lclVarTree = nullptr;
+ bool isEntire = false;
// Note that we don't care about exceptions here, since we're only using the values
// to perform an assignment (which happens after any exceptions are raised...)
@@ -4723,8 +4851,8 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
// Should not have been recorded as updating the heap.
assert(!GetHeapSsaMap()->Lookup(tree, &heapSsaNum));
- unsigned lhsLclNum = lclVarTree->GetLclNum();
- LclVarDsc* rhsVarDsc = &lvaTable[lhsLclNum];
+ unsigned lhsLclNum = lclVarTree->GetLclNum();
+ LclVarDsc* rhsVarDsc = &lvaTable[lhsLclNum];
// If it's excluded from SSA, don't need to do anything.
if (!fgExcludeFromSsa(lhsLclNum))
{
@@ -4739,28 +4867,30 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
VNFuncApp lhsAddrFuncApp;
vnStore->GetVNFunc(lhsAddrVN, &lhsAddrFuncApp);
assert(lhsAddrFuncApp.m_func == VNF_PtrToLoc);
- assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) && vnStore->ConstantValue<unsigned>(lhsAddrFuncApp.m_args[0]) == lhsLclNum);
+ assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) &&
+ vnStore->ConstantValue<unsigned>(lhsAddrFuncApp.m_args[0]) == lhsLclNum);
FieldSeqNode* lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]);
// Now we need to get the proper RHS.
- GenTreePtr srcAddr = tree->gtOp.gtOp1->gtOp.gtOp2;
- VNFuncApp srcAddrFuncApp;
+ GenTreePtr srcAddr = tree->gtOp.gtOp1->gtOp.gtOp2;
+ VNFuncApp srcAddrFuncApp;
GenTreeLclVarCommon* rhsLclVarTree = nullptr;
- FieldSeqNode* rhsFldSeq = nullptr;
- ValueNumPair rhsVNPair;
- bool isNewUniq = false;
+ FieldSeqNode* rhsFldSeq = nullptr;
+ ValueNumPair rhsVNPair;
+ bool isNewUniq = false;
if (srcAddr->IsLocalAddrExpr(this, &rhsLclVarTree, &rhsFldSeq))
{
- unsigned rhsLclNum = rhsLclVarTree->GetLclNum();
- LclVarDsc* rhsVarDsc = &lvaTable[rhsLclNum];
+ unsigned rhsLclNum = rhsLclVarTree->GetLclNum();
+ LclVarDsc* rhsVarDsc = &lvaTable[rhsLclNum];
if (fgExcludeFromSsa(rhsLclNum) || rhsFldSeq == FieldSeqStore::NotAField())
{
isNewUniq = true;
}
else
{
- rhsVNPair = lvaTable[rhsLclVarTree->GetLclNum()].GetPerSsaData(rhsLclVarTree->GetSsaNum())->m_vnPair;
+ rhsVNPair =
+ lvaTable[rhsLclVarTree->GetLclNum()].GetPerSsaData(rhsLclVarTree->GetSsaNum())->m_vnPair;
var_types indType = rhsLclVarTree->TypeGet();
rhsVNPair = vnStore->VNPairApplySelectors(rhsVNPair, rhsFldSeq, indType);
@@ -4770,13 +4900,14 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
{
if (srcAddrFuncApp.m_func == VNF_PtrToStatic)
{
- var_types indType = lclVarTree->TypeGet();
+ var_types indType = lclVarTree->TypeGet();
ValueNum fieldSeqVN = srcAddrFuncApp.m_args[0];
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(srcAddr, &zeroOffsetFldSeq))
{
- fieldSeqVN = vnStore->FieldSeqVNAppend(fieldSeqVN, vnStore->VNForFieldSeq(zeroOffsetFldSeq));
+ fieldSeqVN =
+ vnStore->FieldSeqVNAppend(fieldSeqVN, vnStore->VNForFieldSeq(zeroOffsetFldSeq));
}
FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN);
@@ -4784,10 +4915,12 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
if (fldSeqForStaticVar != FieldSeqStore::NotAField())
{
// We model statics as indices into the heap variable.
- ValueNum selectedStaticVar;
- size_t structSize = 0;
- selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
- selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize);
+ ValueNum selectedStaticVar;
+ size_t structSize = 0;
+ selectedStaticVar =
+ vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
+ selectedStaticVar =
+ vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize);
rhsVNPair.SetLiberal(selectedStaticVar);
rhsVNPair.SetConservative(vnStore->VNForExpr(indType));
@@ -4800,7 +4933,8 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
}
else if (srcAddrFuncApp.m_func == VNF_PtrToArrElem)
{
- ValueNum elemLib = fgValueNumberArrIndexVal(nullptr, &srcAddrFuncApp, vnStore->VNForEmptyExcSet());
+ ValueNum elemLib =
+ fgValueNumberArrIndexVal(nullptr, &srcAddrFuncApp, vnStore->VNForEmptyExcSet());
rhsVNPair.SetLiberal(elemLib);
rhsVNPair.SetConservative(vnStore->VNForExpr(lclVarTree->TypeGet()));
}
@@ -4811,7 +4945,7 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
- isNewUniq = true;
+ isNewUniq = true;
}
if (lhsFldSeq == FieldSeqStore::NotAField())
@@ -4833,14 +4967,15 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
else if (!isNewUniq)
{
ValueNumPair oldLhsVNPair = lvaTable[lhsLclNum].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair;
- rhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lhsFldSeq, rhsVNPair, lclVarTree->TypeGet());
+ rhsVNPair =
+ vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lhsFldSeq, rhsVNPair, lclVarTree->TypeGet());
}
if (isNewUniq)
{
rhsVNPair.SetBoth(vnStore->VNForExpr(lclVarTree->TypeGet()));
}
-
+
lvaTable[lhsLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = vnStore->VNPNormVal(rhsVNPair);
#ifdef DEBUG
@@ -4850,14 +4985,15 @@ void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
Compiler::printTreeID(tree);
printf(" assigned VN to local var V%02u/%d: ", lhsLclNum, lclDefSsaNum);
if (isNewUniq)
+ {
printf("new uniq ");
+ }
vnpPrint(rhsVNPair, 1);
printf("\n");
}
#endif // DEBUG
-
- }
- }
+ }
+ }
else
{
// For now, arbitrary side effect on Heap.
@@ -4876,7 +5012,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
#ifdef FEATURE_SIMD
// TODO-CQ: For now TYP_SIMD values are not handled by value numbering to be amenable for CSE'ing.
- if (oper == GT_SIMD)
+ if (oper == GT_SIMD)
{
tree->gtVNPair.SetBoth(vnStore->VNForExpr(TYP_UNKNOWN));
return;
@@ -4892,13 +5028,14 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
switch (oper)
{
- case GT_LCL_VAR:
- case GT_REG_VAR:
+ case GT_LCL_VAR:
+ case GT_REG_VAR:
{
- GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
- unsigned lclNum = lcl->gtLclNum;
+ GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
+ unsigned lclNum = lcl->gtLclNum;
- if ((lcl->gtFlags & GTF_VAR_DEF) == 0 || (lcl->gtFlags & GTF_VAR_USEASG)) // If it is a "pure" def, will handled as part of the assignment.
+ if ((lcl->gtFlags & GTF_VAR_DEF) == 0 ||
+ (lcl->gtFlags & GTF_VAR_USEASG)) // If it is a "pure" def, will handled as part of the assignment.
{
LclVarDsc* varDsc = &lvaTable[lcl->gtLclNum];
if (varDsc->lvPromoted && varDsc->lvFieldCnt == 1)
@@ -4917,8 +5054,8 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
- var_types varType = varDsc->TypeGet();
- ValueNumPair wholeLclVarVNP = varDsc->GetPerSsaData(lcl->gtSsaNum)->m_vnPair;
+ var_types varType = varDsc->TypeGet();
+ ValueNumPair wholeLclVarVNP = varDsc->GetPerSsaData(lcl->gtSsaNum)->m_vnPair;
// Check for mismatched LclVar size
//
@@ -4931,24 +5068,24 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else // mismatched LclVar definition and LclVar use size
{
- if (typSize < varSize)
+ if (typSize < varSize)
{
// the indirection is reading less that the whole LclVar
// create a new VN that represent the partial value
//
ValueNumPair partialLclVarVNP = vnStore->VNPairForCast(wholeLclVarVNP, typ, varType);
- lcl->gtVNPair = partialLclVarVNP;
+ lcl->gtVNPair = partialLclVarVNP;
}
else
{
assert(typSize > varSize);
// the indirection is reading beyond the end of the field
//
- lcl->gtVNPair.SetBoth(vnStore->VNForExpr(typ)); // return a new unique value number
+ lcl->gtVNPair.SetBoth(vnStore->VNForExpr(typ)); // return a new unique value number
}
}
}
- // Temporary, to make progress.
+ // Temporary, to make progress.
// TODO-CQ: This should become an assert again...
if (lcl->gtVNPair.GetLiberal() == ValueNumStore::NoVN)
{
@@ -4963,7 +5100,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Case 2) Local variables that represent structs which are assigned using CpBlk.
GenTree* nextNode = lcl->gtNext;
assert((nextNode->gtOper == GT_ADDR && nextNode->gtOp.gtOp1 == lcl) ||
- varTypeIsStruct(lcl->TypeGet()));
+ varTypeIsStruct(lcl->TypeGet()));
lcl->gtVNPair.SetBoth(vnStore->VNForExpr(lcl->TypeGet()));
}
assert(lcl->gtVNPair.BothDefined());
@@ -4979,41 +5116,44 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
LclVarDsc* varDsc = &lvaTable[lcl->gtLclNum];
if (lcl->gtSsaNum != SsaConfig::RESERVED_SSA_NUM)
{
- lvaTable[lclNum].GetPerSsaData(lcl->gtSsaNum)->m_vnPair.SetBoth(vnStore->VNForExpr(lcl->TypeGet()));
+ lvaTable[lclNum]
+ .GetPerSsaData(lcl->gtSsaNum)
+ ->m_vnPair.SetBoth(vnStore->VNForExpr(lcl->TypeGet()));
}
lcl->gtVNPair = ValueNumPair(); // Avoid confusion -- we don't set the VN of a lcl being defined.
}
}
break;
- case GT_FTN_ADDR:
- // Use the value of the function pointer (actually, a method handle.)
- tree->gtVNPair.SetBoth(vnStore->VNForHandle(ssize_t(tree->gtFptrVal.gtFptrMethod), GTF_ICON_METHOD_HDL));
- break;
+ case GT_FTN_ADDR:
+ // Use the value of the function pointer (actually, a method handle.)
+ tree->gtVNPair.SetBoth(
+ vnStore->VNForHandle(ssize_t(tree->gtFptrVal.gtFptrMethod), GTF_ICON_METHOD_HDL));
+ break;
// This group passes through a value from a child node.
- case GT_RET_EXPR:
- tree->SetVNsFromNode(tree->gtRetExpr.gtInlineCandidate);
- break;
+ case GT_RET_EXPR:
+ tree->SetVNsFromNode(tree->gtRetExpr.gtInlineCandidate);
+ break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
GenTreeLclFld* lclFld = tree->AsLclFld();
- assert(fgExcludeFromSsa(lclFld->GetLclNum()) || lclFld->gtFieldSeq != NULL);
+ assert(fgExcludeFromSsa(lclFld->GetLclNum()) || lclFld->gtFieldSeq != nullptr);
// If this is a (full) def, then the variable will be labeled with the new SSA number,
// which will not have a value. We skip; it will be handled by one of the assignment-like
// forms (assignment, or initBlk or copyBlk).
if (((lclFld->gtFlags & GTF_VAR_DEF) == 0) || (lclFld->gtFlags & GTF_VAR_USEASG))
{
- unsigned lclNum = lclFld->GetLclNum();
- unsigned ssaNum = lclFld->GetSsaNum();
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ unsigned lclNum = lclFld->GetLclNum();
+ unsigned ssaNum = lclFld->GetSsaNum();
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if (ssaNum == SsaConfig::UNINIT_SSA_NUM)
{
if (varDsc->GetPerSsaData(ssaNum)->m_vnPair.GetLiberal() == ValueNumStore::NoVN)
{
- ValueNum vnForLcl = vnStore->VNForExpr(lclFld->TypeGet());
+ ValueNum vnForLcl = vnStore->VNForExpr(lclFld->TypeGet());
varDsc->GetPerSsaData(ssaNum)->m_vnPair = ValueNumPair(vnForLcl, vnForLcl);
}
}
@@ -5028,115 +5168,120 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
else
{
ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair;
- tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->gtFieldSeq, indType);
+ tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->gtFieldSeq, indType);
}
}
}
break;
// The ones below here all get a new unique VN -- but for various reasons, explained after each.
- case GT_CATCH_ARG:
- // We know nothing about the value of a caught expression.
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- break;
-
- case GT_CLS_VAR:
- // Skip GT_CLS_VAR nodes that are the LHS of an assignment. (We labeled these earlier.)
- // We will "evaluate" this as part of the assignment. (Unless we're explicitly told by
- // the caller to evaluate anyway -- perhaps the assignment is an "op=" assignment.)
- //
- if (((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0) || evalAsgLhsInd)
- {
- bool isVolatile = (tree->gtFlags & GTF_FLD_VOLATILE) != 0;
+ case GT_CATCH_ARG:
+ // We know nothing about the value of a caught expression.
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
+ break;
- if (isVolatile)
+ case GT_CLS_VAR:
+ // Skip GT_CLS_VAR nodes that are the LHS of an assignment. (We labeled these earlier.)
+ // We will "evaluate" this as part of the assignment. (Unless we're explicitly told by
+ // the caller to evaluate anyway -- perhaps the assignment is an "op=" assignment.)
+ //
+ if (((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0) || evalAsgLhsInd)
{
- // For Volatile indirection, first mutate the global heap
- fgMutateHeap(tree DEBUGARG("GTF_FLD_VOLATILE - read"));
- }
+ bool isVolatile = (tree->gtFlags & GTF_FLD_VOLATILE) != 0;
- // We just mutate the heap if isVolatile is true, and then do the read as normal.
- //
- // This allows:
- // 1: read s;
- // 2: volatile read s;
- // 3: read s;
- //
- // We should never assume that the values read by 1 and 2 are the same (because the heap was mutated
- // in between them)... but we *should* be able to prove that the values read in 2 and 3 are the same.
- //
+ if (isVolatile)
+ {
+ // For Volatile indirection, first mutate the global heap
+ fgMutateHeap(tree DEBUGARG("GTF_FLD_VOLATILE - read"));
+ }
- ValueNumPair clsVarVNPair;
+ // We just mutate the heap if isVolatile is true, and then do the read as normal.
+ //
+ // This allows:
+ // 1: read s;
+ // 2: volatile read s;
+ // 3: read s;
+ //
+ // We should never assume that the values read by 1 and 2 are the same (because the heap was mutated
+ // in between them)... but we *should* be able to prove that the values read in 2 and 3 are the
+ // same.
+ //
- // If the static field handle is for a struct type field, then the value of the static
- // is a "ref" to the boxed struct -- treat it as the address of the static (we assume that a
- // first element offset will be added to get to the actual struct...)
- GenTreeClsVar* clsVar = tree->AsClsVar();
- FieldSeqNode* fldSeq = clsVar->gtFieldSeq;
- assert(fldSeq != nullptr); // We need to have one.
- ValueNum selectedStaticVar = ValueNumStore::NoVN;
- if (gtIsStaticFieldPtrToBoxedStruct(clsVar->TypeGet(), fldSeq->m_fieldHnd))
- {
- clsVarVNPair.SetBoth(vnStore->VNForFunc(TYP_BYREF, VNF_PtrToStatic, vnStore->VNForFieldSeq(fldSeq)));
- }
- else
- {
- // This is a reference to heap memory.
- // We model statics as indices into the heap variable.
+ ValueNumPair clsVarVNPair;
- FieldSeqNode* fldSeqForStaticVar = GetFieldSeqStore()->CreateSingleton(tree->gtClsVar.gtClsVarHnd);
- size_t structSize = 0;
- selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
- selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, tree->TypeGet(), structSize);
+ // If the static field handle is for a struct type field, then the value of the static
+ // is a "ref" to the boxed struct -- treat it as the address of the static (we assume that a
+ // first element offset will be added to get to the actual struct...)
+ GenTreeClsVar* clsVar = tree->AsClsVar();
+ FieldSeqNode* fldSeq = clsVar->gtFieldSeq;
+ assert(fldSeq != nullptr); // We need to have one.
+ ValueNum selectedStaticVar = ValueNumStore::NoVN;
+ if (gtIsStaticFieldPtrToBoxedStruct(clsVar->TypeGet(), fldSeq->m_fieldHnd))
+ {
+ clsVarVNPair.SetBoth(
+ vnStore->VNForFunc(TYP_BYREF, VNF_PtrToStatic, vnStore->VNForFieldSeq(fldSeq)));
+ }
+ else
+ {
+ // This is a reference to heap memory.
+ // We model statics as indices into the heap variable.
- clsVarVNPair.SetLiberal(selectedStaticVar);
- // The conservative interpretation always gets a new, unique VN.
- clsVarVNPair.SetConservative(vnStore->VNForExpr(tree->TypeGet()));
- }
+ FieldSeqNode* fldSeqForStaticVar =
+ GetFieldSeqStore()->CreateSingleton(tree->gtClsVar.gtClsVarHnd);
+ size_t structSize = 0;
+ selectedStaticVar =
+ vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
+ selectedStaticVar =
+ vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, tree->TypeGet(), structSize);
+
+ clsVarVNPair.SetLiberal(selectedStaticVar);
+ // The conservative interpretation always gets a new, unique VN.
+ clsVarVNPair.SetConservative(vnStore->VNForExpr(tree->TypeGet()));
+ }
- // The ValueNum returned must represent the full-sized IL-Stack value
- // If we need to widen this value then we need to introduce a VNF_Cast here to represent
- // the widened value. This is necessary since the CSE package can replace all occurances
- // of a given ValueNum with a LclVar that is a full-sized IL-Stack value
- //
- if (varTypeIsSmall(tree->TypeGet()))
- {
- var_types castToType = tree->TypeGet();
- clsVarVNPair = vnStore->VNPairForCast(clsVarVNPair, castToType, castToType);
+ // The ValueNum returned must represent the full-sized IL-Stack value
+ // If we need to widen this value then we need to introduce a VNF_Cast here to represent
+ // the widened value. This is necessary since the CSE package can replace all occurances
+ // of a given ValueNum with a LclVar that is a full-sized IL-Stack value
+ //
+ if (varTypeIsSmall(tree->TypeGet()))
+ {
+ var_types castToType = tree->TypeGet();
+ clsVarVNPair = vnStore->VNPairForCast(clsVarVNPair, castToType, castToType);
+ }
+ tree->gtVNPair = clsVarVNPair;
}
- tree->gtVNPair = clsVarVNPair;
- }
- break;
+ break;
- case GT_MEMORYBARRIER: // Leaf
- // For MEMORYBARRIER add an arbitrary side effect on Heap.
- fgMutateHeap(tree DEBUGARG("MEMORYBARRIER"));
- break;
+ case GT_MEMORYBARRIER: // Leaf
+ // For MEMORYBARRIER add an arbitrary side effect on Heap.
+ fgMutateHeap(tree DEBUGARG("MEMORYBARRIER"));
+ break;
// These do not represent values.
- case GT_NO_OP:
- case GT_JMP: // Control flow
- case GT_LABEL: // Control flow
+ case GT_NO_OP:
+ case GT_JMP: // Control flow
+ case GT_LABEL: // Control flow
#if !FEATURE_EH_FUNCLETS
- case GT_END_LFIN: // Control flow
+ case GT_END_LFIN: // Control flow
#endif
- case GT_ARGPLACE:
- // This node is a standin for an argument whose value will be computed later. (Perhaps it's
- // a register argument, and we don't want to preclude use of the register in arg evaluation yet.)
- // We give this a "fake" value number now; if the call in which it occurs cares about the
- // value (e.g., it's a helper call whose result is a function of argument values) we'll reset
- // this later, when the later args have been assigned VNs.
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- break;
+ case GT_ARGPLACE:
+ // This node is a standin for an argument whose value will be computed later. (Perhaps it's
+ // a register argument, and we don't want to preclude use of the register in arg evaluation yet.)
+ // We give this a "fake" value number now; if the call in which it occurs cares about the
+ // value (e.g., it's a helper call whose result is a function of argument values) we'll reset
+ // this later, when the later args have been assigned VNs.
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
+ break;
- case GT_PHI_ARG:
- // This one is special because we should never process it in this method: it should
- // always be taken care of, when needed, during pre-processing of a blocks phi definitions.
- assert(false);
- break;
-
- default:
- unreached();
+ case GT_PHI_ARG:
+ // This one is special because we should never process it in this method: it should
+ // always be taken care of, when needed, during pre-processing of a blocks phi definitions.
+ assert(false);
+ break;
+
+ default:
+ unreached();
}
}
else if (GenTree::OperIsSimple(oper))
@@ -5157,38 +5302,49 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
rhsVNPair = rhs->gtVNPair;
}
- else // Must be an "op="
+ else // Must be an "op="
{
// If the LHS is an IND, we didn't evaluate it when we visited it previously.
// But we didn't know that the parent was an op=. We do now, so go back and evaluate it.
// (We actually check if the effective val is the IND. We will have evaluated any non-last
// args of an LHS comma already -- including their heap effects.)
- GenTreePtr lhsVal = lhs->gtEffectiveVal(/*commaOnly*/true);
- if ((lhsVal->OperGet() == GT_IND) ||
- (lhsVal->OperGet() == GT_CLS_VAR) )
+ GenTreePtr lhsVal = lhs->gtEffectiveVal(/*commaOnly*/ true);
+ if ((lhsVal->OperGet() == GT_IND) || (lhsVal->OperGet() == GT_CLS_VAR))
{
- fgValueNumberTree(lhsVal, /*evalAsgLhsInd*/true);
+ fgValueNumberTree(lhsVal, /*evalAsgLhsInd*/ true);
}
// Now we can make this assertion:
assert(lhsVal->gtVNPair.BothDefined());
genTreeOps op = GenTree::OpAsgToOper(oper);
if (GenTree::OperIsBinary(op))
{
- ValueNumPair lhsNormVNP; ValueNumPair lhsExcVNP; lhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
+ ValueNumPair lhsNormVNP;
+ ValueNumPair lhsExcVNP;
+ lhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
vnStore->VNPUnpackExc(lhsVal->gtVNPair, &lhsNormVNP, &lhsExcVNP);
assert(rhs->gtVNPair.BothDefined());
- ValueNumPair rhsNormVNP; ValueNumPair rhsExcVNP; rhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
+ ValueNumPair rhsNormVNP;
+ ValueNumPair rhsExcVNP;
+ rhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
vnStore->VNPUnpackExc(rhs->gtVNPair, &rhsNormVNP, &rhsExcVNP);
- rhsVNPair = vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(), GetVNFuncForOper(op, (tree->gtFlags & GTF_UNSIGNED) != 0), lhsNormVNP, rhsNormVNP),
+ rhsVNPair = vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(),
+ GetVNFuncForOper(op, (tree->gtFlags &
+ GTF_UNSIGNED) != 0),
+ lhsNormVNP, rhsNormVNP),
vnStore->VNPExcSetUnion(lhsExcVNP, rhsExcVNP));
}
else
{
// As of now, GT_CHS ==> GT_NEG is the only pattern fitting this.
assert(GenTree::OperIsUnary(op));
- ValueNumPair lhsNormVNP; ValueNumPair lhsExcVNP; lhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
+ ValueNumPair lhsNormVNP;
+ ValueNumPair lhsExcVNP;
+ lhsExcVNP.SetBoth(ValueNumStore::VNForEmptyExcSet());
vnStore->VNPUnpackExc(lhsVal->gtVNPair, &lhsNormVNP, &lhsExcVNP);
- rhsVNPair = vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(), GetVNFuncForOper(op, (tree->gtFlags & GTF_UNSIGNED) != 0), lhsNormVNP),
+ rhsVNPair = vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(),
+ GetVNFuncForOper(op, (tree->gtFlags &
+ GTF_UNSIGNED) != 0),
+ lhsNormVNP),
lhsExcVNP);
}
}
@@ -5201,7 +5357,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Now that we've labeled the assignment as a whole, we don't care about exceptions.
rhsVNPair = vnStore->VNPNormVal(rhsVNPair);
- // If the types of the rhs and lhs are different then we
+ // If the types of the rhs and lhs are different then we
// may want to change the ValueNumber assigned to the lhs.
//
if (rhs->TypeGet() != lhs->TypeGet())
@@ -5230,10 +5386,11 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// LHS will come before the assignment in evaluation order.
switch (lhs->OperGet())
{
- case GT_LCL_VAR: case GT_REG_VAR:
+ case GT_LCL_VAR:
+ case GT_REG_VAR:
{
- GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon();
- unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl);
+ GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon();
+ unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl);
// Should not have been recorded as updating the heap.
assert(!GetHeapSsaMap()->Lookup(tree, &heapSsaNum));
@@ -5242,7 +5399,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN);
- lhs->gtVNPair = rhsVNPair;
+ lhs->gtVNPair = rhsVNPair;
lvaTable[lcl->gtLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = rhsVNPair;
#ifdef DEBUG
@@ -5252,7 +5409,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
Compiler::printTreeID(lhs);
printf(" ");
gtDispNodeName(lhs);
- gtDispLeaf(lhs, 0);
+ gtDispLeaf(lhs, nullptr);
printf(" => ");
vnpPrint(lhs->gtVNPair, 1);
printf("\n");
@@ -5266,16 +5423,17 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
JITDUMP("Tree ");
Compiler::printTreeID(tree);
- printf(" assigns to local var V%02u; excluded from SSA, so value not tracked.\n", lcl->GetLclNum());
+ printf(" assigns to local var V%02u; excluded from SSA, so value not tracked.\n",
+ lcl->GetLclNum());
}
}
#endif // DEBUG
}
break;
- case GT_LCL_FLD:
+ case GT_LCL_FLD:
{
- GenTreeLclFld* lclFld = lhs->AsLclFld();
- unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld);
+ GenTreeLclFld* lclFld = lhs->AsLclFld();
+ unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld);
// Should not have been recorded as updating the heap.
assert(!GetHeapSsaMap()->Lookup(tree, &heapSsaNum));
@@ -5293,7 +5451,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
else
{
// We should never have a null field sequence here.
- assert(lclFld->gtFieldSeq != NULL);
+ assert(lclFld->gtFieldSeq != nullptr);
if (lclFld->gtFieldSeq == FieldSeqStore::NotAField())
{
// We don't know what field this represents. Assign a new VN to the whole variable
@@ -5305,15 +5463,15 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// We do know the field sequence.
// The "lclFld" node will be labeled with the SSA number of its "use" identity
// (we looked in a side table above for its "def" identity). Look up that value.
- ValueNumPair oldLhsVNPair = lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair;
- newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair,
- lclFld->gtFieldSeq,
- rhsVNPair, // Pre-value.
- lvaGetActualType(lclFld->gtLclNum));
+ ValueNumPair oldLhsVNPair =
+ lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair;
+ newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lclFld->gtFieldSeq,
+ rhsVNPair, // Pre-value.
+ lvaGetActualType(lclFld->gtLclNum));
}
}
lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair;
- lhs->gtVNPair = newLhsVNPair;
+ lhs->gtVNPair = newLhsVNPair;
#ifdef DEBUG
if (verbose)
{
@@ -5323,7 +5481,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
Compiler::printTreeID(lhs);
printf(" ");
gtDispNodeName(lhs);
- gtDispLeaf(lhs, 0);
+ gtDispLeaf(lhs, nullptr);
printf(" => ");
vnpPrint(lhs->gtVNPair, 1);
printf("\n");
@@ -5334,12 +5492,12 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
break;
- case GT_PHI_ARG:
- assert(false); // Phi arg cannot be LHS.
+ case GT_PHI_ARG:
+ assert(false); // Phi arg cannot be LHS.
- case GT_IND:
+ case GT_IND:
{
- bool isVolatile = (lhs->gtFlags & GTF_IND_VOLATILE) != 0;
+ bool isVolatile = (lhs->gtFlags & GTF_IND_VOLATILE) != 0;
if (isVolatile)
{
@@ -5349,14 +5507,14 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
GenTreePtr arg = lhs->gtOp.gtOp1;
-
+
// Indicates whether the argument of the IND is the address of a local.
bool wasLocal = false;
lhs->gtVNPair = rhsVNPair;
VNFuncApp funcApp;
- ValueNum argVN = arg->gtVNPair.GetLiberal();
+ ValueNum argVN = arg->gtVNPair.GetLiberal();
bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormVal(argVN), &funcApp);
@@ -5364,11 +5522,12 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// If it is a PtrToLoc, lib and cons VNs will be the same.
if (argIsVNFunc)
{
- IndirectAssignmentAnnotation* pIndirAnnot = NULL; // This will be used if "tree" is an "indirect assignment",
- // explained below.
+ IndirectAssignmentAnnotation* pIndirAnnot =
+ nullptr; // This will be used if "tree" is an "indirect assignment",
+ // explained below.
if (funcApp.m_func == VNF_PtrToLoc)
{
- assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ.
+ assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ.
assert(vnStore->IsVNConstant(funcApp.m_args[0]));
unsigned lclNum = vnStore->ConstantValue<unsigned>(funcApp.m_args[0]);
@@ -5383,11 +5542,11 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// local to a temp, and that temp is our lhs, and we recorded this in a table when we
// made the indirect assignment...or else we have a "rogue" PtrToLoc, one that should
// have made the local in question address-exposed. Assert on that.
- GenTreeLclVarCommon* lclVarTree = NULL;
- bool isEntire = false;
+ GenTreeLclVarCommon* lclVarTree = nullptr;
+ bool isEntire = false;
unsigned lclDefSsaNum = SsaConfig::RESERVED_SSA_NUM;
ValueNumPair newLhsVNPair;
-
+
if (arg->DefinesLocalAddr(this, genTypeSize(lhs->TypeGet()), &lclVarTree, &isEntire))
{
// The local #'s should agree.
@@ -5410,16 +5569,16 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
// Don't use the lclVarTree's VN: if it's a local field, it will
// already be dereferenced by it's field sequence.
- ValueNumPair oldLhsVNPair = lvaTable[lclVarTree->GetLclNum()].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair;
+ ValueNumPair oldLhsVNPair = lvaTable[lclVarTree->GetLclNum()]
+ .GetPerSsaData(lclVarTree->GetSsaNum())
+ ->m_vnPair;
lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
- newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair,
- fieldSeq,
- rhsVNPair,
- lhs->TypeGet());
+ newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq,
+ rhsVNPair, lhs->TypeGet());
}
lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair;
}
- else if (m_indirAssignMap != NULL && GetIndirAssignMap()->Lookup(tree, &pIndirAnnot))
+ else if (m_indirAssignMap != nullptr && GetIndirAssignMap()->Lookup(tree, &pIndirAnnot))
{
// The local #'s should agree.
assert(lclNum == pIndirAnnot->m_lclNum);
@@ -5437,14 +5596,16 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
assert(pIndirAnnot->m_useSsaNum != SsaConfig::RESERVED_SSA_NUM);
assert(!pIndirAnnot->m_isEntire);
assert(pIndirAnnot->m_fieldSeq == fieldSeq);
- ValueNumPair oldLhsVNPair = lvaTable[lclNum].GetPerSsaData(pIndirAnnot->m_useSsaNum)->m_vnPair;
- newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq, rhsVNPair, lhs->TypeGet());
+ ValueNumPair oldLhsVNPair =
+ lvaTable[lclNum].GetPerSsaData(pIndirAnnot->m_useSsaNum)->m_vnPair;
+ newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq,
+ rhsVNPair, lhs->TypeGet());
}
lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair;
}
else
{
- unreached(); // "Rogue" PtrToLoc, as discussed above.
+ unreached(); // "Rogue" PtrToLoc, as discussed above.
}
#ifdef DEBUG
if (verbose)
@@ -5463,20 +5624,21 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Was the argument of the GT_IND the address of a local, handled above?
if (!wasLocal)
{
- GenTreePtr obj = nullptr;
- GenTreePtr staticOffset = nullptr;
- FieldSeqNode* fldSeq = nullptr;
+ GenTreePtr obj = nullptr;
+ GenTreePtr staticOffset = nullptr;
+ FieldSeqNode* fldSeq = nullptr;
// Is the LHS an array index expression?
if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem)
{
- CORINFO_CLASS_HANDLE elemTypeEq = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0]));
- ValueNum arrVN = funcApp.m_args[1];
- ValueNum inxVN = funcApp.m_args[2];
+ CORINFO_CLASS_HANDLE elemTypeEq =
+ CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0]));
+ ValueNum arrVN = funcApp.m_args[1];
+ ValueNum inxVN = funcApp.m_args[2];
FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]);
// Does the child of the GT_IND 'arg' have an associated zero-offset field sequence?
- FieldSeqNode* addrFieldSeq = NULL;
+ FieldSeqNode* addrFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(arg, &addrFieldSeq))
{
fldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fldSeq);
@@ -5491,19 +5653,20 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
#endif // DEBUG
- fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, rhsVNPair.GetLiberal(), lhs->TypeGet());
+ fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, rhsVNPair.GetLiberal(),
+ lhs->TypeGet());
fgValueNumberRecordHeapSsa(tree);
}
// It may be that we haven't parsed it yet. Try.
else if (lhs->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
- bool b = GetArrayInfoMap()->Lookup(lhs, &arrInfo);
+ bool b = GetArrayInfoMap()->Lookup(lhs, &arrInfo);
assert(b);
- ValueNum arrVN = ValueNumStore::NoVN;
- ValueNum inxVN = ValueNumStore::NoVN;
+ ValueNum arrVN = ValueNumStore::NoVN;
+ ValueNum inxVN = ValueNumStore::NoVN;
FieldSeqNode* fldSeq = nullptr;
-
+
// Try to parse it.
GenTreePtr arr = nullptr;
arg->ParseArrayAddress(this, &arrInfo, &arr, &inxVN, &fldSeq);
@@ -5517,7 +5680,8 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Need to form H[arrType][arr][ind][fldSeq] = rhsVNPair.GetLiberal()
// Get the element type equivalence class representative.
- CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
+ CORINFO_CLASS_HANDLE elemTypeEq =
+ EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
arrVN = arr->gtVNPair.GetLiberal();
FieldSeqNode* zeroOffsetFldSeq = nullptr;
@@ -5526,12 +5690,13 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
fldSeq = GetFieldSeqStore()->Append(fldSeq, zeroOffsetFldSeq);
}
- fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, rhsVNPair.GetLiberal(), lhs->TypeGet());
+ fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, rhsVNPair.GetLiberal(),
+ lhs->TypeGet());
fgValueNumberRecordHeapSsa(tree);
}
else if (arg->IsFieldAddr(this, &obj, &staticOffset, &fldSeq))
{
- if (fldSeq == FieldSeqStore::NotAField())
+ if (fldSeq == FieldSeqStore::NotAField())
{
fgMutateHeap(tree DEBUGARG("NotAField"));
}
@@ -5542,12 +5707,14 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq->m_fieldHnd);
if (obj != nullptr)
{
- // Make sure that the class containing it is not a value class (as we are expecting an instance field)
+ // Make sure that the class containing it is not a value class (as we are expecting
+ // an instance field)
assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0);
assert(staticOffset == nullptr);
}
#endif // DEBUG
- // Get the first (instance or static) field from field seq. Heap[field] will yield the "field map".
+ // Get the first (instance or static) field from field seq. Heap[field] will yield the
+ // "field map".
if (fldSeq->IsFirstElemFieldSeq())
{
fldSeq = fldSeq->m_next;
@@ -5566,11 +5733,13 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// otherwise it is the type returned from VNApplySelectors above.
var_types firstFieldType = vnStore->TypeOfVN(fldMapVN);
- ValueNum storeVal = rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
+ ValueNum storeVal =
+ rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
ValueNum newFldMapVN = ValueNumStore::NoVN;
// when (obj != nullptr) we have an instance field, otherwise a static field
- // when (staticOffset != nullptr) it represents a offset into a static or the call to Shared Static Base
+ // when (staticOffset != nullptr) it represents a offset into a static or the call to
+ // Shared Static Base
if ((obj != nullptr) || (staticOffset != nullptr))
{
ValueNum valAtAddr = fldMapVN;
@@ -5580,37 +5749,43 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
// construct the ValueNumber for 'fldMap at obj'
normVal = vnStore->VNNormVal(obj->GetVN(VNK_Liberal));
- valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
+ valAtAddr =
+ vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
}
else // (staticOffset != nullptr)
{
// construct the ValueNumber for 'fldMap at staticOffset'
normVal = vnStore->VNNormVal(staticOffset->GetVN(VNK_Liberal));
- valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
+ valAtAddr =
+ vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
}
- // Now get rid of any remaining struct field dereferences. (if they exist)
+ // Now get rid of any remaining struct field dereferences. (if they exist)
if (fldSeq->m_next)
{
- storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, valAtAddr, fldSeq->m_next, storeVal, indType);
+ storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, valAtAddr,
+ fldSeq->m_next, storeVal, indType);
}
// From which we can construct the new ValueNumber for 'fldMap at normVal'
- newFldMapVN = vnStore->VNForMapStore(vnStore->TypeOfVN(fldMapVN), fldMapVN, normVal, storeVal);
+ newFldMapVN = vnStore->VNForMapStore(vnStore->TypeOfVN(fldMapVN), fldMapVN, normVal,
+ storeVal);
}
else
{
// plain static field
- // Now get rid of any remaining struct field dereferences. (if they exist)
+ // Now get rid of any remaining struct field dereferences. (if they exist)
if (fldSeq->m_next)
{
- storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fldMapVN, fldSeq->m_next, storeVal, indType);
+ storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fldMapVN,
+ fldSeq->m_next, storeVal, indType);
}
- newFldMapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, fldSeq, storeVal, indType);
+ newFldMapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, fldSeq,
+ storeVal, indType);
}
- // It is not strictly necessary to set the lhs value number,
+ // It is not strictly necessary to set the lhs value number,
// but the dumps read better with it set to the 'storeVal' that we just computed
lhs->gtVNPair.SetBoth(storeVal);
@@ -5624,7 +5799,8 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
assert(compCurBB->bbHeapDef);
// Update the field map for firstField in Heap to this new value.
- fgCurHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, firstFieldOnly, newFldMapVN, indType);
+ fgCurHeapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, firstFieldOnly,
+ newFldMapVN, indType);
fgValueNumberRecordHeapSsa(tree);
}
@@ -5636,7 +5812,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
// If it doesn't define a local, then it might update the heap.
fgMutateHeap(tree DEBUGARG("assign-of-IND"));
- }
+ }
}
}
@@ -5645,24 +5821,25 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
break;
- case GT_CLS_VAR:
+ case GT_CLS_VAR:
{
- bool isVolatile = (lhs->gtFlags & GTF_FLD_VOLATILE) != 0;
+ bool isVolatile = (lhs->gtFlags & GTF_FLD_VOLATILE) != 0;
if (isVolatile)
{
// For Volatile store indirection, first mutate the global heap
- fgMutateHeap(lhs DEBUGARG("GTF_CLS_VAR - store")); // always change fgCurHeapVN
+ fgMutateHeap(lhs DEBUGARG("GTF_CLS_VAR - store")); // always change fgCurHeapVN
}
// We model statics as indices into the heap variable.
FieldSeqNode* fldSeqForStaticVar = GetFieldSeqStore()->CreateSingleton(lhs->gtClsVar.gtClsVarHnd);
assert(fldSeqForStaticVar != FieldSeqStore::NotAField());
- ValueNum storeVal = rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
- storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, storeVal, lhs->TypeGet());
+ ValueNum storeVal = rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
+ storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, storeVal,
+ lhs->TypeGet());
- // It is not strictly necessary to set the lhs value number,
+ // It is not strictly necessary to set the lhs value number,
// but the dumps read better with it set to the 'storeVal' that we just computed
lhs->gtVNPair.SetBoth(storeVal);
#ifdef DEBUG
@@ -5680,12 +5857,12 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
break;
- default:
- assert(!"Unknown node for lhs of assignment!");
+ default:
+ assert(!"Unknown node for lhs of assignment!");
- // For Unknown stores, mutate the global heap
- fgMutateHeap(lhs DEBUGARG("Unkwown Assignment - store")); // always change fgCurHeapVN
- break;
+ // For Unknown stores, mutate the global heap
+ fgMutateHeap(lhs DEBUGARG("Unkwown Assignment - store")); // always change fgCurHeapVN
+ break;
}
}
// Other kinds of assignment: initblk and copyblk.
@@ -5700,7 +5877,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
if (arg->OperIsLocal())
{
FieldSeqNode* fieldSeq = nullptr;
- ValueNum newVN = ValueNumStore::NoVN;
+ ValueNum newVN = ValueNumStore::NoVN;
if (fgExcludeFromSsa(arg->gtLclVarCommon.GetLclNum()))
{
newVN = vnStore->VNForExpr(TYP_BYREF);
@@ -5717,9 +5894,8 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
if (newVN == ValueNumStore::NoVN)
{
assert(arg->gtLclVarCommon.GetSsaNum() != ValueNumStore::NoVN);
- newVN = vnStore->VNForPtrToLoc(TYP_BYREF,
- vnStore->VNForIntCon(arg->gtLclVarCommon.GetLclNum()),
- vnStore->VNForFieldSeq(fieldSeq));
+ newVN = vnStore->VNForPtrToLoc(TYP_BYREF, vnStore->VNForIntCon(arg->gtLclVarCommon.GetLclNum()),
+ vnStore->VNForFieldSeq(fieldSeq));
}
tree->gtVNPair.SetBoth(newVN);
}
@@ -5729,17 +5905,17 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// except when this GT_ADDR has a valid zero-offset field sequence
//
FieldSeqNode* zeroOffsetFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq) &&
+ if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq) &&
(zeroOffsetFieldSeq != FieldSeqStore::NotAField()))
- {
+ {
ValueNum addrExtended = vnStore->ExtendPtrVN(arg->gtOp.gtOp1, zeroOffsetFieldSeq);
if (addrExtended != ValueNumStore::NoVN)
{
- tree->gtVNPair.SetBoth(addrExtended); // We don't care about lib/cons differences for addresses.
+ tree->gtVNPair.SetBoth(addrExtended); // We don't care about lib/cons differences for addresses.
}
else
{
- // ExtendPtrVN returned a failure result
+ // ExtendPtrVN returned a failure result
// So give this address a new unique value
tree->gtVNPair.SetBoth(vnStore->VNForExpr(TYP_BYREF));
}
@@ -5765,26 +5941,28 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
// So far, we handle cases in which the address is a ptr-to-local, or if it's
// a pointer to an object field.
- GenTreePtr addr = tree->gtOp.gtOp1;
- GenTreeLclVarCommon* lclVarTree = nullptr;
- FieldSeqNode * fldSeq1 = nullptr;
- FieldSeqNode * fldSeq2 = nullptr;
- GenTreePtr obj = nullptr;
+ GenTreePtr addr = tree->gtOp.gtOp1;
+ GenTreeLclVarCommon* lclVarTree = nullptr;
+ FieldSeqNode* fldSeq1 = nullptr;
+ FieldSeqNode* fldSeq2 = nullptr;
+ GenTreePtr obj = nullptr;
GenTreePtr staticOffset = nullptr;
- bool isVolatile = (tree->gtFlags & GTF_IND_VOLATILE) != 0;
+ bool isVolatile = (tree->gtFlags & GTF_IND_VOLATILE) != 0;
// See if the addr has any exceptional part.
- ValueNumPair addrNvnp; ValueNumPair addrXvnp = ValueNumPair(ValueNumStore::VNForEmptyExcSet(),
- ValueNumStore::VNForEmptyExcSet());
+ ValueNumPair addrNvnp;
+ ValueNumPair addrXvnp = ValueNumPair(ValueNumStore::VNForEmptyExcSet(), ValueNumStore::VNForEmptyExcSet());
vnStore->VNPUnpackExc(addr->gtVNPair, &addrNvnp, &addrXvnp);
// Is the dereference immutable? If so, model it as referencing the read-only heap.
if (tree->gtFlags & GTF_IND_INVARIANT)
{
- assert(!isVolatile); // We don't expect both volatile and invariant
- tree->gtVNPair =
- ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(), addrNvnp.GetLiberal()),
- vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(), addrNvnp.GetConservative()));
+ assert(!isVolatile); // We don't expect both volatile and invariant
+ tree->gtVNPair =
+ ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(),
+ addrNvnp.GetLiberal()),
+ vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(),
+ addrNvnp.GetConservative()));
tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
}
else if (isVolatile)
@@ -5794,17 +5972,17 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// The value read by the GT_IND can immediately change
ValueNum newUniq = vnStore->VNForExpr(tree->TypeGet());
- tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(newUniq, newUniq), addrXvnp);
+ tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(newUniq, newUniq), addrXvnp);
}
// We always want to evaluate the LHS when the GT_IND node is marked with GTF_IND_ARR_INDEX
// as this will relabel the GT_IND child correctly using the VNF_PtrToArrElem
else if ((tree->gtFlags & GTF_IND_ARR_INDEX) != 0)
{
ArrayInfo arrInfo;
- bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
+ bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
assert(b);
- ValueNum inxVN = ValueNumStore::NoVN;
+ ValueNum inxVN = ValueNumStore::NoVN;
FieldSeqNode* fldSeq = nullptr;
// GenTreePtr addr = tree->gtOp.gtOp1;
@@ -5823,17 +6001,18 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Otherwise...
// Need to form H[arrType][arr][ind][fldSeq]
// Get the array element type equivalence class rep.
- CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
- ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
+ CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
+ ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
// We take the "VNNormVal"s here, because if either has exceptional outcomes, they will be captured
// as part of the value of the composite "addr" operation...
ValueNum arrVN = vnStore->VNNormVal(arr->gtVNPair.GetLiberal());
- inxVN = vnStore->VNNormVal(inxVN);
-
+ inxVN = vnStore->VNNormVal(inxVN);
+
// Additionally, relabel the address with a PtrToArrElem value number.
ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq);
- ValueNum elemAddr = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN);
+ ValueNum elemAddr =
+ vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN);
// The aggregate "addr" VN should have had all the exceptions bubble up...
elemAddr = vnStore->VNWithExc(elemAddr, addrXvnp.GetLiberal());
@@ -5870,16 +6049,16 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// the caller to evaluate anyway -- perhaps the assignment is an "op=" assignment.)
else if (((tree->gtFlags & GTF_IND_ASG_LHS) == 0) || evalAsgLhsInd)
{
- FieldSeqNode * localFldSeq = nullptr;
- VNFuncApp funcApp;
+ FieldSeqNode* localFldSeq = nullptr;
+ VNFuncApp funcApp;
// Is it a local or a heap address?
- if ( addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq)
- && !fgExcludeFromSsa(lclVarTree->GetLclNum()))
+ if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq) &&
+ !fgExcludeFromSsa(lclVarTree->GetLclNum()))
{
- unsigned lclNum = lclVarTree->GetLclNum();
- unsigned ssaNum = lclVarTree->GetSsaNum();
- LclVarDsc* varDsc = &lvaTable[lclNum];
+ unsigned lclNum = lclVarTree->GetLclNum();
+ unsigned ssaNum = lclVarTree->GetSsaNum();
+ LclVarDsc* varDsc = &lvaTable[lclNum];
if ((localFldSeq == FieldSeqStore::NotAField()) || (localFldSeq == nullptr))
{
@@ -5887,25 +6066,27 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
- var_types indType = tree->TypeGet();
+ var_types indType = tree->TypeGet();
ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair;
- tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType);;
+ tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType);
+ ;
}
tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
}
else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && funcApp.m_func == VNF_PtrToStatic)
- {
- var_types indType = tree->TypeGet();
+ {
+ var_types indType = tree->TypeGet();
ValueNum fieldSeqVN = funcApp.m_args[0];
FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN);
if (fldSeqForStaticVar != FieldSeqStore::NotAField())
{
- ValueNum selectedStaticVar;
+ ValueNum selectedStaticVar;
// We model statics as indices into the heap variable.
size_t structSize = 0;
- selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
+ selectedStaticVar =
+ vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, fldSeqForStaticVar, &structSize);
selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize);
tree->gtVNPair.SetLiberal(selectedStaticVar);
@@ -5930,14 +6111,16 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else if (fldSeq2 != nullptr)
{
- // Get the first (instance or static) field from field seq. Heap[field] will yield the "field map".
+ // Get the first (instance or static) field from field seq. Heap[field] will yield the "field
+ // map".
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq2->m_fieldHnd);
if (obj != nullptr)
{
- // Make sure that the class containing it is not a value class (as we are expecting an instance field)
+ // Make sure that the class containing it is not a value class (as we are expecting an
+ // instance field)
assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0);
assert(staticOffset == nullptr);
}
@@ -5945,8 +6128,9 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// Get a field sequence for just the first field in the sequence
//
FieldSeqNode* firstFieldOnly = GetFieldSeqStore()->CreateSingleton(fldSeq2->m_fieldHnd);
- size_t structSize = 0;
- ValueNum fldMapVN = vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, firstFieldOnly, &structSize);
+ size_t structSize = 0;
+ ValueNum fldMapVN =
+ vnStore->VNApplySelectors(VNK_Liberal, fgCurHeapVN, firstFieldOnly, &structSize);
// The final field in the sequence will need to match the 'indType'
var_types indType = tree->TypeGet();
@@ -5968,7 +6152,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
ValueNum offsetNormVal = vnStore->VNNormVal(staticOffset->GetVN(VNK_Liberal));
valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, offsetNormVal);
}
-
+
// Now get rid of any remaining struct field dereferences.
if (fldSeq2->m_next)
{
@@ -6009,7 +6193,7 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
{
if (GenTree::OperIsUnary(oper))
{
- if (tree->gtOp.gtOp1 != NULL)
+ if (tree->gtOp.gtOp1 != nullptr)
{
if (tree->OperGet() == GT_NOP)
{
@@ -6018,16 +6202,22 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
- ValueNumPair op1VNP; ValueNumPair op1VNPx = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op1VNP;
+ ValueNumPair op1VNPx = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1VNP, &op1VNPx);
- tree->gtVNPair = vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(), GetVNFuncForOper(oper, (tree->gtFlags & GTF_UNSIGNED) != 0), op1VNP),
- op1VNPx);
+ tree->gtVNPair =
+ vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(),
+ GetVNFuncForOper(oper, (tree->gtFlags &
+ GTF_UNSIGNED) != 0),
+ op1VNP),
+ op1VNPx);
}
}
else // Is actually nullary.
{
// Mostly we'll leave these without a value number, assuming we'll detect these as VN failures
- // if they actually need to have values. With the exception of NOPs, which can sometimes have meaning.
+ // if they actually need to have values. With the exception of NOPs, which can sometimes have
+ // meaning.
if (tree->OperGet() == GT_NOP)
{
tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
@@ -6036,11 +6226,11 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
- assert(!GenTree::OperIsAssignment(oper)); // We handled assignments earlier.
+ assert(!GenTree::OperIsAssignment(oper)); // We handled assignments earlier.
assert(GenTree::OperIsBinary(oper));
// Standard binary operator.
ValueNumPair op2VNPair;
- if (tree->gtOp.gtOp2 == NULL)
+ if (tree->gtOp.gtOp2 == nullptr)
{
op2VNPair.SetBoth(ValueNumStore::VNForNull());
}
@@ -6051,9 +6241,11 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// A few special case: if we add a field offset constant to a PtrToXXX, we get back a new PtrToXXX.
ValueNum newVN = ValueNumStore::NoVN;
- ValueNumPair op1vnp; ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op1vnp;
+ ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp);
- ValueNumPair op2vnp; ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op2vnp;
+ ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(op2VNPair, &op2vnp, &op2Xvnp);
ValueNumPair excSet = vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp);
@@ -6073,36 +6265,42 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
else
{
-
- ValueNumPair normalRes =
- vnStore->VNPairForFunc(tree->TypeGet(), GetVNFuncForOper(oper, (tree->gtFlags & GTF_UNSIGNED) != 0), op1vnp, op2vnp);
+
+ ValueNumPair normalRes =
+ vnStore->VNPairForFunc(tree->TypeGet(),
+ GetVNFuncForOper(oper, (tree->gtFlags & GTF_UNSIGNED) != 0), op1vnp,
+ op2vnp);
// Overflow-checking operations add an overflow exception
if (tree->gtOverflowEx())
{
- ValueNum overflowExcSet = vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_OverflowExc));
+ ValueNum overflowExcSet =
+ vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_OverflowExc));
excSet = vnStore->VNPExcSetUnion(excSet, ValueNumPair(overflowExcSet, overflowExcSet));
}
tree->gtVNPair = vnStore->VNPWithExc(normalRes, excSet);
}
}
}
- else // ValueNumStore::VNFuncIsLegal returns false
+ else // ValueNumStore::VNFuncIsLegal returns false
{
// Some of the genTreeOps that aren't legal VNFuncs so they get special handling.
switch (oper)
{
- case GT_COMMA:
+ case GT_COMMA:
{
- ValueNumPair op1vnp; ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op1vnp;
+ ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp);
- ValueNumPair op2vnp; ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair op2vnp;
+ ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
if ((tree->gtOp.gtOp2->OperGet() == GT_IND) && (tree->gtOp.gtOp2->gtFlags & GTF_IND_ASG_LHS))
{
// If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
}
- else if ((tree->gtOp.gtOp2->OperGet() == GT_CLS_VAR) && (tree->gtOp.gtOp2->gtFlags & GTF_CLS_VAR_ASG_LHS))
+ else if ((tree->gtOp.gtOp2->OperGet() == GT_CLS_VAR) &&
+ (tree->gtOp.gtOp2->gtFlags & GTF_CLS_VAR_ASG_LHS))
{
// If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
@@ -6116,66 +6314,74 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
break;
- case GT_NULLCHECK:
- // Explicit null check.
- tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
- vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NullPtrExc, tree->gtOp.gtOp1->gtVNPair)));
- break;
-
- case GT_IND:
- if (tree->gtFlags & GTF_IND_ARR_LEN)
- {
- // It's an array length. The argument is the sum of an array ref with some integer values...
- ValueNum arrRefLib = vnStore->VNForRefInAddr(tree->gtOp.gtOp1->gtVNPair.GetLiberal());
- ValueNum arrRefCons = vnStore->VNForRefInAddr(tree->gtOp.gtOp1->gtVNPair.GetConservative());
+ case GT_NULLCHECK:
+ // Explicit null check.
+ tree->gtVNPair =
+ vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
+ vnStore->VNPExcSetSingleton(
+ vnStore->VNPairForFunc(TYP_REF, VNF_NullPtrExc,
+ tree->gtOp.gtOp1->gtVNPair)));
+ break;
- assert(vnStore->TypeOfVN(arrRefLib) == TYP_REF || vnStore->TypeOfVN(arrRefLib) == TYP_BYREF);
- if (vnStore->IsVNConstant(arrRefLib))
- {
- // (or in weird cases, a REF or BYREF constant, in which case the result is an exception).
- tree->gtVNPair.SetLiberal(vnStore->VNWithExc(ValueNumStore::VNForVoid(),
- vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_NullPtrExc, arrRefLib))));
- }
- else
+ case GT_IND:
+ if (tree->gtFlags & GTF_IND_ARR_LEN)
{
- tree->gtVNPair.SetLiberal(vnStore->VNForFunc(TYP_INT, VNFunc(GT_ARR_LENGTH), arrRefLib));
- }
- assert(vnStore->TypeOfVN(arrRefCons) == TYP_REF || vnStore->TypeOfVN(arrRefCons) == TYP_BYREF);
- if (vnStore->IsVNConstant(arrRefCons))
- {
- // (or in weird cases, a REF or BYREF constant, in which case the result is an exception).
- tree->gtVNPair.SetConservative(vnStore->VNWithExc(ValueNumStore::VNForVoid(),
- vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_NullPtrExc, arrRefCons))));
+ // It's an array length. The argument is the sum of an array ref with some integer values...
+ ValueNum arrRefLib = vnStore->VNForRefInAddr(tree->gtOp.gtOp1->gtVNPair.GetLiberal());
+ ValueNum arrRefCons = vnStore->VNForRefInAddr(tree->gtOp.gtOp1->gtVNPair.GetConservative());
+
+ assert(vnStore->TypeOfVN(arrRefLib) == TYP_REF || vnStore->TypeOfVN(arrRefLib) == TYP_BYREF);
+ if (vnStore->IsVNConstant(arrRefLib))
+ {
+ // (or in weird cases, a REF or BYREF constant, in which case the result is an exception).
+ tree->gtVNPair.SetLiberal(
+ vnStore->VNWithExc(ValueNumStore::VNForVoid(),
+ vnStore->VNExcSetSingleton(
+ vnStore->VNForFunc(TYP_REF, VNF_NullPtrExc, arrRefLib))));
+ }
+ else
+ {
+ tree->gtVNPair.SetLiberal(vnStore->VNForFunc(TYP_INT, VNFunc(GT_ARR_LENGTH), arrRefLib));
+ }
+ assert(vnStore->TypeOfVN(arrRefCons) == TYP_REF || vnStore->TypeOfVN(arrRefCons) == TYP_BYREF);
+ if (vnStore->IsVNConstant(arrRefCons))
+ {
+ // (or in weird cases, a REF or BYREF constant, in which case the result is an exception).
+ tree->gtVNPair.SetConservative(
+ vnStore->VNWithExc(ValueNumStore::VNForVoid(),
+ vnStore->VNExcSetSingleton(
+ vnStore->VNForFunc(TYP_REF, VNF_NullPtrExc, arrRefCons))));
+ }
+ else
+ {
+ tree->gtVNPair.SetConservative(
+ vnStore->VNForFunc(TYP_INT, VNFunc(GT_ARR_LENGTH), arrRefCons));
+ }
}
else
{
- tree->gtVNPair.SetConservative(vnStore->VNForFunc(TYP_INT, VNFunc(GT_ARR_LENGTH), arrRefCons));
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
}
- }
- else
- {
+ break;
+
+ case GT_LOCKADD: // Binop
+ case GT_XADD: // Binop
+ case GT_XCHG: // Binop
+ // For CMPXCHG and other intrinsics add an arbitrary side effect on Heap.
+ fgMutateHeap(tree DEBUGARG("Interlocked intrinsic"));
tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- }
- break;
-
- case GT_LOCKADD: // Binop
- case GT_XADD: // Binop
- case GT_XCHG: // Binop
- // For CMPXCHG and other intrinsics add an arbitrary side effect on Heap.
- fgMutateHeap(tree DEBUGARG("Interlocked intrinsic"));
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- break;
+ break;
- case GT_JTRUE:
- case GT_LIST:
- // These nodes never need to have a ValueNumber
- tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
- break;
+ case GT_JTRUE:
+ case GT_LIST:
+ // These nodes never need to have a ValueNumber
+ tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
+ break;
- default:
- // The default action is to give the node a new, unique VN.
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- break;
+ default:
+ // The default action is to give the node a new, unique VN.
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
+ break;
}
}
}
@@ -6186,20 +6392,20 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
// TBD: We must handle these individually. For now:
switch (oper)
{
- case GT_CALL:
- fgValueNumberCall(tree->AsCall());
- break;
+ case GT_CALL:
+ fgValueNumberCall(tree->AsCall());
+ break;
- case GT_ARR_BOUNDS_CHECK:
+ case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
- case GT_SIMD_CHK:
+ case GT_SIMD_CHK:
#endif // FEATURE_SIMD
{
// A bounds check node has no value, but may throw exceptions.
- ValueNumPair excSet =
- vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_IndexOutOfRangeExc,
- vnStore->VNPNormVal(tree->AsBoundsChk()->gtArrLen->gtVNPair),
- vnStore->VNPNormVal(tree->AsBoundsChk()->gtIndex->gtVNPair)));
+ ValueNumPair excSet = vnStore->VNPExcSetSingleton(
+ vnStore->VNPairForFunc(TYP_REF, VNF_IndexOutOfRangeExc,
+ vnStore->VNPNormVal(tree->AsBoundsChk()->gtArrLen->gtVNPair),
+ vnStore->VNPNormVal(tree->AsBoundsChk()->gtIndex->gtVNPair)));
excSet = vnStore->VNPExcSetUnion(excSet, vnStore->VNPExcVal(tree->AsBoundsChk()->gtArrLen->gtVNPair));
excSet = vnStore->VNPExcSetUnion(excSet, vnStore->VNPExcVal(tree->AsBoundsChk()->gtIndex->gtVNPair));
@@ -6207,14 +6413,14 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
}
break;
- case GT_CMPXCHG: // Specialop
- // For CMPXCHG and other intrinsics add an arbitrary side effect on Heap.
- fgMutateHeap(tree DEBUGARG("Interlocked intrinsic"));
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
- break;
-
- default:
- tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
+ case GT_CMPXCHG: // Specialop
+ // For CMPXCHG and other intrinsics add an arbitrary side effect on Heap.
+ fgMutateHeap(tree DEBUGARG("Interlocked intrinsic"));
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
+ break;
+
+ default:
+ tree->gtVNPair.SetBoth(vnStore->VNForExpr(tree->TypeGet()));
}
}
#ifdef DEBUG
@@ -6226,9 +6432,9 @@ void Compiler::fgValueNumberTree(GenTreePtr tree, bool evalAsgLhsInd)
printTreeID(tree);
printf(" ");
gtDispNodeName(tree);
- if (tree->OperIsLeaf()|| tree->OperIsLocalStore()) // local stores used to be leaves
+ if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
- gtDispLeaf(tree, 0);
+ gtDispLeaf(tree, nullptr);
}
printf(" => ");
vnpPrint(tree->gtVNPair, 1);
@@ -6242,10 +6448,10 @@ void Compiler::fgValueNumberIntrinsic(GenTreePtr tree)
{
assert(tree->OperGet() == GT_INTRINSIC);
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
- ValueNumPair arg0VNP, arg1VNP;
- ValueNumPair arg0VNPx = ValueNumStore::VNPForEmptyExcSet();
- ValueNumPair arg1VNPx = ValueNumStore::VNPForEmptyExcSet();
-
+ ValueNumPair arg0VNP, arg1VNP;
+ ValueNumPair arg0VNPx = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair arg1VNPx = ValueNumStore::VNPForEmptyExcSet();
+
vnStore->VNPUnpackExc(intrinsic->gtOp.gtOp1->gtVNPair, &arg0VNP, &arg0VNPx);
if (intrinsic->gtOp.gtOp2 != nullptr)
@@ -6255,52 +6461,51 @@ void Compiler::fgValueNumberIntrinsic(GenTreePtr tree)
switch (intrinsic->gtIntrinsicId)
{
- case CORINFO_INTRINSIC_Sin:
- case CORINFO_INTRINSIC_Sqrt:
- case CORINFO_INTRINSIC_Abs:
- case CORINFO_INTRINSIC_Cos:
- case CORINFO_INTRINSIC_Round:
- case CORINFO_INTRINSIC_Cosh:
- case CORINFO_INTRINSIC_Sinh:
- case CORINFO_INTRINSIC_Tan:
- case CORINFO_INTRINSIC_Tanh:
- case CORINFO_INTRINSIC_Asin:
- case CORINFO_INTRINSIC_Acos:
- case CORINFO_INTRINSIC_Atan:
- case CORINFO_INTRINSIC_Atan2:
- case CORINFO_INTRINSIC_Log10:
- case CORINFO_INTRINSIC_Pow:
- case CORINFO_INTRINSIC_Exp:
- case CORINFO_INTRINSIC_Ceiling:
- case CORINFO_INTRINSIC_Floor:
-
- // GT_INTRINSIC is a currently a subtype of binary operators. But most of
- // the math intrinsics are actually unary operations.
-
- if (intrinsic->gtOp.gtOp2 == nullptr)
- {
- intrinsic->gtVNPair = vnStore->VNPWithExc(
- vnStore->EvalMathFuncUnary(tree->TypeGet(),
- intrinsic->gtIntrinsicId,
- arg0VNP),
- arg0VNPx);
- }
- else
- {
- ValueNumPair newVNP = vnStore->EvalMathFuncBinary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP, arg1VNP);
- ValueNumPair excSet = vnStore->VNPExcSetUnion(arg0VNPx, arg1VNPx);
- intrinsic->gtVNPair = vnStore->VNPWithExc(newVNP, excSet);
- }
+ case CORINFO_INTRINSIC_Sin:
+ case CORINFO_INTRINSIC_Sqrt:
+ case CORINFO_INTRINSIC_Abs:
+ case CORINFO_INTRINSIC_Cos:
+ case CORINFO_INTRINSIC_Round:
+ case CORINFO_INTRINSIC_Cosh:
+ case CORINFO_INTRINSIC_Sinh:
+ case CORINFO_INTRINSIC_Tan:
+ case CORINFO_INTRINSIC_Tanh:
+ case CORINFO_INTRINSIC_Asin:
+ case CORINFO_INTRINSIC_Acos:
+ case CORINFO_INTRINSIC_Atan:
+ case CORINFO_INTRINSIC_Atan2:
+ case CORINFO_INTRINSIC_Log10:
+ case CORINFO_INTRINSIC_Pow:
+ case CORINFO_INTRINSIC_Exp:
+ case CORINFO_INTRINSIC_Ceiling:
+ case CORINFO_INTRINSIC_Floor:
+
+ // GT_INTRINSIC is a currently a subtype of binary operators. But most of
+ // the math intrinsics are actually unary operations.
+
+ if (intrinsic->gtOp.gtOp2 == nullptr)
+ {
+ intrinsic->gtVNPair =
+ vnStore->VNPWithExc(vnStore->EvalMathFuncUnary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP),
+ arg0VNPx);
+ }
+ else
+ {
+ ValueNumPair newVNP =
+ vnStore->EvalMathFuncBinary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP, arg1VNP);
+ ValueNumPair excSet = vnStore->VNPExcSetUnion(arg0VNPx, arg1VNPx);
+ intrinsic->gtVNPair = vnStore->VNPWithExc(newVNP, excSet);
+ }
- break;
+ break;
- case CORINFO_INTRINSIC_Object_GetType:
- intrinsic->gtVNPair = vnStore->VNPWithExc(
- vnStore->VNPairForFunc(intrinsic->TypeGet(), VNF_ObjGetType, arg0VNP), arg0VNPx);
- break;
+ case CORINFO_INTRINSIC_Object_GetType:
+ intrinsic->gtVNPair =
+ vnStore->VNPWithExc(vnStore->VNPairForFunc(intrinsic->TypeGet(), VNF_ObjGetType, arg0VNP), arg0VNPx);
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
}
@@ -6314,34 +6519,34 @@ void Compiler::fgValueNumberCastTree(GenTreePtr tree)
bool srcIsUnsigned = ((tree->gtFlags & GTF_UNSIGNED) != 0);
bool hasOverflowCheck = tree->gtOverflowEx();
- assert(genActualType(castToType) == tree->TypeGet()); // Insure that the resultType is correct
+ assert(genActualType(castToType) == tree->TypeGet()); // Insure that the resultType is correct
- tree->gtVNPair = vnStore->VNPairForCast(srcVNPair, castToType, castFromType,
- srcIsUnsigned, hasOverflowCheck);
+ tree->gtVNPair = vnStore->VNPairForCast(srcVNPair, castToType, castFromType, srcIsUnsigned, hasOverflowCheck);
}
-
// Compute the normal ValueNumber for a cast operation with no exceptions
-ValueNum ValueNumStore::VNForCast(ValueNum srcVN, var_types castToType, var_types castFromType,
- bool srcIsUnsigned /* = false */ )
+ValueNum ValueNumStore::VNForCast(ValueNum srcVN,
+ var_types castToType,
+ var_types castFromType,
+ bool srcIsUnsigned /* = false */)
{
// The resulting type after performingthe cast is always widened to a supported IL stack size
- var_types resultType = genActualType(castToType);
+ var_types resultType = genActualType(castToType);
- // When we're considering actual value returned by a non-checking cast whether or not the source is
+ // When we're considering actual value returned by a non-checking cast whether or not the source is
// unsigned does *not* matter for non-widening casts. That is, if we cast an int or a uint to short,
// we just extract the first two bytes from the source bit pattern, not worrying about the interpretation.
// The same is true in casting between signed/unsigned types of the same width. Only when we're doing
// a widening cast do we care about whether the source was unsigned,so we know whether to sign or zero extend it.
- //
- bool srcIsUnsignedNorm = srcIsUnsigned;
+ //
+ bool srcIsUnsignedNorm = srcIsUnsigned;
if (genTypeSize(castToType) <= genTypeSize(castFromType))
{
srcIsUnsignedNorm = false;
}
- ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsigned);
- ValueNum resultVN = VNForFunc(resultType, VNF_Cast, srcVN, castTypeVN);
+ ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsigned);
+ ValueNum resultVN = VNForFunc(resultType, VNF_Cast, srcVN, castTypeVN);
#ifdef DEBUG
if (m_pComp->verbose)
@@ -6355,29 +6560,30 @@ ValueNum ValueNumStore::VNForCast(ValueNum srcVN, var_types castToType, var_type
return resultVN;
}
-
// Compute the ValueNumberPair for a cast operation
-ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, var_types castToType, var_types castFromType,
- bool srcIsUnsigned, /* = false */
- bool hasOverflowCheck) /* = false */
+ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair,
+ var_types castToType,
+ var_types castFromType,
+ bool srcIsUnsigned, /* = false */
+ bool hasOverflowCheck) /* = false */
{
// The resulting type after performingthe cast is always widened to a supported IL stack size
- var_types resultType = genActualType(castToType);
+ var_types resultType = genActualType(castToType);
- ValueNumPair castArgVNP;
+ ValueNumPair castArgVNP;
ValueNumPair castArgxVNP = ValueNumStore::VNPForEmptyExcSet();
VNPUnpackExc(srcVNPair, &castArgVNP, &castArgxVNP);
- // When we're considering actual value returned by a non-checking cast (or a checking cast that succeeds),
- // whether or not the source is unsigned does *not* matter for non-widening casts.
- // That is, if we cast an int or a uint to short, we just extract the first two bytes from the source
- // bit pattern, not worrying about the interpretation. The same is true in casting between signed/unsigned
- // types of the same width. Only when we're doing a widening cast do we care about whether the source
+ // When we're considering actual value returned by a non-checking cast (or a checking cast that succeeds),
+ // whether or not the source is unsigned does *not* matter for non-widening casts.
+ // That is, if we cast an int or a uint to short, we just extract the first two bytes from the source
+ // bit pattern, not worrying about the interpretation. The same is true in casting between signed/unsigned
+ // types of the same width. Only when we're doing a widening cast do we care about whether the source
// was unsigned, so we know whether to sign or zero extend it.
//
// Important: Casts to floating point cannot be optimized in this fashion. (bug 946768)
//
- bool srcIsUnsignedNorm = srcIsUnsigned;
+ bool srcIsUnsignedNorm = srcIsUnsigned;
if (genTypeSize(castToType) <= genTypeSize(castFromType) && !varTypeIsFloating(castToType))
{
srcIsUnsignedNorm = false;
@@ -6394,8 +6600,9 @@ ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, var_types cast
{
// For overflow checking, we always need to know whether the source is unsigned.
castTypeVNPair.SetBoth(VNForCastOper(castToType, srcIsUnsigned));
- ValueNumPair excSet = VNPExcSetSingleton(VNPairForFunc(TYP_REF, VNF_ConvOverflowExc, castArgVNP, castTypeVNPair));
- excSet = VNPExcSetUnion(excSet, castArgxVNP);
+ ValueNumPair excSet =
+ VNPExcSetSingleton(VNPairForFunc(TYP_REF, VNF_ConvOverflowExc, castArgVNP, castTypeVNPair));
+ excSet = VNPExcSetUnion(excSet, castArgxVNP);
resultVNP = VNPWithExc(castNormRes, excSet);
}
@@ -6406,22 +6613,22 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
{
unsigned nArgs = ValueNumStore::VNFuncArity(vnf);
assert(vnf != VNF_Boundary);
- GenTreeArgList* args = call->gtCallArgs;
- bool generateUniqueVN = false;
- bool useEntryPointAddrAsArg0 = false;
+ GenTreeArgList* args = call->gtCallArgs;
+ bool generateUniqueVN = false;
+ bool useEntryPointAddrAsArg0 = false;
switch (vnf)
{
- case VNF_JitNew:
+ case VNF_JitNew:
{
generateUniqueVN = true;
- vnpExc = ValueNumStore::VNPForEmptyExcSet();
+ vnpExc = ValueNumStore::VNPForEmptyExcSet();
}
break;
- case VNF_JitNewArr:
+ case VNF_JitNewArr:
{
- generateUniqueVN = true;
+ generateUniqueVN = true;
ValueNumPair vnp1 = vnStore->VNPNormVal(args->Rest()->Current()->gtVNPair);
// The New Array helper may throw an overflow exception
@@ -6429,29 +6636,29 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
}
break;
- case VNF_BoxNullable:
+ case VNF_BoxNullable:
{
// Generate unique VN so, VNForFunc generates a uniq value number for box nullable.
- // Alternatively instead of using vnpUniq below in VNPairForFunc(...),
+ // Alternatively instead of using vnpUniq below in VNPairForFunc(...),
// we could use the value number of what the byref arg0 points to.
- //
+ //
// But retrieving the value number of what the byref arg0 points to is quite a bit more work
// and doing so only very rarely allows for an additional optimization.
generateUniqueVN = true;
}
break;
- case VNF_JitReadyToRunNew:
+ case VNF_JitReadyToRunNew:
{
- generateUniqueVN = true;
- vnpExc = ValueNumStore::VNPForEmptyExcSet();
+ generateUniqueVN = true;
+ vnpExc = ValueNumStore::VNPForEmptyExcSet();
useEntryPointAddrAsArg0 = true;
}
break;
- case VNF_JitReadyToRunNewArr:
+ case VNF_JitReadyToRunNewArr:
{
- generateUniqueVN = true;
+ generateUniqueVN = true;
ValueNumPair vnp1 = vnStore->VNPNormVal(args->Current()->gtVNPair);
// The New Array helper may throw an overflow exception
@@ -6460,15 +6667,15 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
}
break;
- case VNF_ReadyToRunStaticBase:
- case VNF_ReadyToRunIsInstanceOf:
- case VNF_ReadyToRunCastClass:
+ case VNF_ReadyToRunStaticBase:
+ case VNF_ReadyToRunIsInstanceOf:
+ case VNF_ReadyToRunCastClass:
{
useEntryPointAddrAsArg0 = true;
}
break;
- default:
+ default:
{
assert(s_helperCallProperties.IsPure(eeGetHelperNum(call->gtCallMethHnd)));
}
@@ -6501,12 +6708,13 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
else
{
// Has at least one argument.
- ValueNumPair vnp0; ValueNumPair vnp0x = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair vnp0;
+ ValueNumPair vnp0x = ValueNumStore::VNPForEmptyExcSet();
#ifdef FEATURE_READYTORUN_COMPILER
if (useEntryPointAddrAsArg0)
{
ValueNum callAddrVN = vnStore->VNForPtrSizeIntCon((ssize_t)call->gtCall.gtEntryPoint.addr);
- vnp0 = ValueNumPair(callAddrVN, callAddrVN);
+ vnp0 = ValueNumPair(callAddrVN, callAddrVN);
}
else
#endif
@@ -6535,7 +6743,8 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
{
// Has at least two arguments.
ValueNumPair vnp1wx = args->Current()->gtVNPair;
- ValueNumPair vnp1; ValueNumPair vnp1x = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair vnp1;
+ ValueNumPair vnp1x = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(vnp1wx, &vnp1, &vnp1x);
vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp1x);
@@ -6554,13 +6763,14 @@ void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueN
else
{
ValueNumPair vnp2wx = args->Current()->gtVNPair;
- ValueNumPair vnp2; ValueNumPair vnp2x = ValueNumStore::VNPForEmptyExcSet();
+ ValueNumPair vnp2;
+ ValueNumPair vnp2x = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(vnp2wx, &vnp2, &vnp2x);
vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp2x);
args = args->Rest();
- assert(nArgs == 3); // Our current maximum.
- assert(args == NULL);
+ assert(nArgs == 3); // Our current maximum.
+ assert(args == nullptr);
if (generateUniqueVN)
{
call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnp2, vnpUniq);
@@ -6580,16 +6790,16 @@ void Compiler::fgValueNumberCall(GenTreeCall* call)
{
// First: do value numbering of any argument placeholder nodes in the argument list
// (by transferring from the VN of the late arg that they are standing in for...)
- unsigned i = 0;
- GenTreeArgList* args = call->gtCallArgs;
- bool updatedArgPlace = false;
- while (args != NULL)
+ unsigned i = 0;
+ GenTreeArgList* args = call->gtCallArgs;
+ bool updatedArgPlace = false;
+ while (args != nullptr)
{
GenTreePtr arg = args->Current();
if (arg->OperGet() == GT_ARGPLACE)
{
// Find the corresponding late arg.
- GenTreePtr lateArg = NULL;
+ GenTreePtr lateArg = nullptr;
for (unsigned j = 0; j < call->fgArgInfo->ArgCount(); j++)
{
if (call->fgArgInfo->ArgTable()[j]->argNum == i)
@@ -6598,9 +6808,9 @@ void Compiler::fgValueNumberCall(GenTreeCall* call)
break;
}
}
- assert(lateArg != NULL);
+ assert(lateArg != nullptr);
assert(lateArg->gtVNPair.BothDefined());
- arg->gtVNPair = lateArg->gtVNPair;
+ arg->gtVNPair = lateArg->gtVNPair;
updatedArgPlace = true;
#ifdef DEBUG
if (verbose)
@@ -6636,9 +6846,13 @@ void Compiler::fgValueNumberCall(GenTreeCall* call)
else
{
if (call->TypeGet() == TYP_VOID)
+ {
call->gtVNPair.SetBoth(ValueNumStore::VNForVoid());
+ }
else
+ {
call->gtVNPair.SetBoth(vnStore->VNForExpr(call->TypeGet()));
+ }
// For now, arbitrary side effect on Heap.
fgMutateHeap(call DEBUGARG("CALL"));
@@ -6647,7 +6861,10 @@ void Compiler::fgValueNumberCall(GenTreeCall* call)
void Compiler::fgUpdateArgListVNs(GenTreeArgList* args)
{
- if (args == nullptr) return;
+ if (args == nullptr)
+ {
+ return;
+ }
// Otherwise...
fgUpdateArgListVNs(args->Rest());
fgValueNumberTree(args);
@@ -6655,208 +6872,268 @@ void Compiler::fgUpdateArgListVNs(GenTreeArgList* args)
VNFunc Compiler::fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc)
{
- assert( s_helperCallProperties.IsPure(helpFunc)
- || s_helperCallProperties.IsAllocator(helpFunc));
+ assert(s_helperCallProperties.IsPure(helpFunc) || s_helperCallProperties.IsAllocator(helpFunc));
- VNFunc vnf = VNF_Boundary; // An illegal value...
+ VNFunc vnf = VNF_Boundary; // An illegal value...
switch (helpFunc)
{
// These translate to other function symbols:
- case CORINFO_HELP_DIV:
- vnf = VNFunc(GT_DIV); break;
- case CORINFO_HELP_MOD:
- vnf = VNFunc(GT_MOD); break;
- case CORINFO_HELP_UDIV:
- vnf = VNFunc(GT_UDIV); break;
- case CORINFO_HELP_UMOD:
- vnf = VNFunc(GT_UMOD); break;
- case CORINFO_HELP_LLSH:
- vnf = VNFunc(GT_LSH); break;
- case CORINFO_HELP_LRSH:
- vnf = VNFunc(GT_RSH); break;
- case CORINFO_HELP_LRSZ:
- vnf = VNFunc(GT_RSZ); break;
- case CORINFO_HELP_LMUL:
- case CORINFO_HELP_LMUL_OVF:
- vnf = VNFunc(GT_MUL); break;
- case CORINFO_HELP_ULMUL_OVF:
- vnf = VNFunc(GT_MUL); break; // Is this the right thing?
- case CORINFO_HELP_LDIV:
- vnf = VNFunc(GT_DIV); break;
- case CORINFO_HELP_LMOD:
- vnf = VNFunc(GT_MOD); break;
- case CORINFO_HELP_ULDIV:
- vnf = VNFunc(GT_DIV); break; // Is this the right thing?
- case CORINFO_HELP_ULMOD:
- vnf = VNFunc(GT_MOD); break; // Is this the right thing?
-
- case CORINFO_HELP_LNG2DBL:
- vnf = VNF_Lng2Dbl; break;
- case CORINFO_HELP_ULNG2DBL:
- vnf = VNF_ULng2Dbl; break;
- case CORINFO_HELP_DBL2INT:
- vnf = VNF_Dbl2Int; break;
- case CORINFO_HELP_DBL2INT_OVF:
- vnf = VNF_Dbl2Int; break;
- case CORINFO_HELP_DBL2LNG:
- vnf = VNF_Dbl2Lng; break;
- case CORINFO_HELP_DBL2LNG_OVF:
- vnf = VNF_Dbl2Lng; break;
- case CORINFO_HELP_DBL2UINT:
- vnf = VNF_Dbl2UInt; break;
- case CORINFO_HELP_DBL2UINT_OVF:
- vnf = VNF_Dbl2UInt; break;
- case CORINFO_HELP_DBL2ULNG:
- vnf = VNF_Dbl2ULng; break;
- case CORINFO_HELP_DBL2ULNG_OVF:
- vnf = VNF_Dbl2ULng; break;
- case CORINFO_HELP_FLTREM:
- vnf = VNFunc(GT_MOD); break;
- case CORINFO_HELP_DBLREM:
- vnf = VNFunc(GT_MOD); break;
- case CORINFO_HELP_FLTROUND:
- vnf = VNF_FltRound; break; // Is this the right thing?
- case CORINFO_HELP_DBLROUND:
- vnf = VNF_DblRound; break; // Is this the right thing?
+ case CORINFO_HELP_DIV:
+ vnf = VNFunc(GT_DIV);
+ break;
+ case CORINFO_HELP_MOD:
+ vnf = VNFunc(GT_MOD);
+ break;
+ case CORINFO_HELP_UDIV:
+ vnf = VNFunc(GT_UDIV);
+ break;
+ case CORINFO_HELP_UMOD:
+ vnf = VNFunc(GT_UMOD);
+ break;
+ case CORINFO_HELP_LLSH:
+ vnf = VNFunc(GT_LSH);
+ break;
+ case CORINFO_HELP_LRSH:
+ vnf = VNFunc(GT_RSH);
+ break;
+ case CORINFO_HELP_LRSZ:
+ vnf = VNFunc(GT_RSZ);
+ break;
+ case CORINFO_HELP_LMUL:
+ case CORINFO_HELP_LMUL_OVF:
+ vnf = VNFunc(GT_MUL);
+ break;
+ case CORINFO_HELP_ULMUL_OVF:
+ vnf = VNFunc(GT_MUL);
+ break; // Is this the right thing?
+ case CORINFO_HELP_LDIV:
+ vnf = VNFunc(GT_DIV);
+ break;
+ case CORINFO_HELP_LMOD:
+ vnf = VNFunc(GT_MOD);
+ break;
+ case CORINFO_HELP_ULDIV:
+ vnf = VNFunc(GT_DIV);
+ break; // Is this the right thing?
+ case CORINFO_HELP_ULMOD:
+ vnf = VNFunc(GT_MOD);
+ break; // Is this the right thing?
+
+ case CORINFO_HELP_LNG2DBL:
+ vnf = VNF_Lng2Dbl;
+ break;
+ case CORINFO_HELP_ULNG2DBL:
+ vnf = VNF_ULng2Dbl;
+ break;
+ case CORINFO_HELP_DBL2INT:
+ vnf = VNF_Dbl2Int;
+ break;
+ case CORINFO_HELP_DBL2INT_OVF:
+ vnf = VNF_Dbl2Int;
+ break;
+ case CORINFO_HELP_DBL2LNG:
+ vnf = VNF_Dbl2Lng;
+ break;
+ case CORINFO_HELP_DBL2LNG_OVF:
+ vnf = VNF_Dbl2Lng;
+ break;
+ case CORINFO_HELP_DBL2UINT:
+ vnf = VNF_Dbl2UInt;
+ break;
+ case CORINFO_HELP_DBL2UINT_OVF:
+ vnf = VNF_Dbl2UInt;
+ break;
+ case CORINFO_HELP_DBL2ULNG:
+ vnf = VNF_Dbl2ULng;
+ break;
+ case CORINFO_HELP_DBL2ULNG_OVF:
+ vnf = VNF_Dbl2ULng;
+ break;
+ case CORINFO_HELP_FLTREM:
+ vnf = VNFunc(GT_MOD);
+ break;
+ case CORINFO_HELP_DBLREM:
+ vnf = VNFunc(GT_MOD);
+ break;
+ case CORINFO_HELP_FLTROUND:
+ vnf = VNF_FltRound;
+ break; // Is this the right thing?
+ case CORINFO_HELP_DBLROUND:
+ vnf = VNF_DblRound;
+ break; // Is this the right thing?
// These allocation operations probably require some augmentation -- perhaps allocSiteId,
// something about array length...
- case CORINFO_HELP_NEW_CROSSCONTEXT:
- case CORINFO_HELP_NEWFAST:
- case CORINFO_HELP_NEWSFAST:
- case CORINFO_HELP_NEWSFAST_ALIGN8:
- vnf = VNF_JitNew;
- break;
+ case CORINFO_HELP_NEW_CROSSCONTEXT:
+ case CORINFO_HELP_NEWFAST:
+ case CORINFO_HELP_NEWSFAST:
+ case CORINFO_HELP_NEWSFAST_ALIGN8:
+ vnf = VNF_JitNew;
+ break;
- case CORINFO_HELP_READYTORUN_NEW:
- vnf = VNF_JitReadyToRunNew;
- break;
+ case CORINFO_HELP_READYTORUN_NEW:
+ vnf = VNF_JitReadyToRunNew;
+ break;
- case CORINFO_HELP_NEWARR_1_DIRECT:
- case CORINFO_HELP_NEWARR_1_OBJ:
- case CORINFO_HELP_NEWARR_1_VC:
- case CORINFO_HELP_NEWARR_1_ALIGN8:
- vnf = VNF_JitNewArr;
- break;
+ case CORINFO_HELP_NEWARR_1_DIRECT:
+ case CORINFO_HELP_NEWARR_1_OBJ:
+ case CORINFO_HELP_NEWARR_1_VC:
+ case CORINFO_HELP_NEWARR_1_ALIGN8:
+ vnf = VNF_JitNewArr;
+ break;
- case CORINFO_HELP_READYTORUN_NEWARR_1:
- vnf = VNF_JitReadyToRunNewArr;
- break;
+ case CORINFO_HELP_READYTORUN_NEWARR_1:
+ vnf = VNF_JitReadyToRunNewArr;
+ break;
- case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
- vnf = VNF_GetgenericsGcstaticBase; break;
- case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
- vnf = VNF_GetgenericsNongcstaticBase; break;
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
- vnf = VNF_GetsharedGcstaticBase; break;
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
- vnf = VNF_GetsharedNongcstaticBase; break;
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
- vnf = VNF_GetsharedGcstaticBaseNoctor; break;
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
- vnf = VNF_GetsharedNongcstaticBaseNoctor; break;
- case CORINFO_HELP_READYTORUN_STATIC_BASE:
- vnf = VNF_ReadyToRunStaticBase; break;
- case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
- vnf = VNF_GetsharedGcstaticBaseDynamicclass; break;
- case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
- vnf = VNF_GetsharedNongcstaticBaseDynamicclass; break;
- case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
- vnf = VNF_ClassinitSharedDynamicclass; break;
- case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
- vnf = VNF_GetgenericsGcthreadstaticBase; break;
- case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
- vnf = VNF_GetgenericsNongcthreadstaticBase; break;
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
- vnf = VNF_GetsharedGcthreadstaticBase; break;
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
- vnf = VNF_GetsharedNongcthreadstaticBase; break;
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
- vnf = VNF_GetsharedGcthreadstaticBaseNoctor; break;
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
- vnf = VNF_GetsharedNongcthreadstaticBaseNoctor; break;
- case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
- vnf = VNF_GetsharedGcthreadstaticBaseDynamicclass; break;
- case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
- vnf = VNF_GetsharedNongcthreadstaticBaseDynamicclass; break;
- case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT:
- vnf = VNF_GetStaticAddrContext; break;
- case CORINFO_HELP_GETSTATICFIELDADDR_TLS:
- vnf = VNF_GetStaticAddrTLS; break;
-
- case CORINFO_HELP_RUNTIMEHANDLE_METHOD:
- case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG:
- vnf = VNF_RuntimeHandleMethod; break;
-
- case CORINFO_HELP_RUNTIMEHANDLE_CLASS:
- case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG:
- vnf = VNF_RuntimeHandleClass; break;
-
- case CORINFO_HELP_STRCNS:
- vnf = VNF_StrCns; break;
-
- case CORINFO_HELP_CHKCASTCLASS:
- case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
- case CORINFO_HELP_CHKCASTARRAY:
- case CORINFO_HELP_CHKCASTINTERFACE:
- case CORINFO_HELP_CHKCASTANY:
- vnf = VNF_CastClass; break;
-
- case CORINFO_HELP_READYTORUN_CHKCAST:
- vnf = VNF_ReadyToRunCastClass; break;
-
- case CORINFO_HELP_ISINSTANCEOFCLASS:
- case CORINFO_HELP_ISINSTANCEOFINTERFACE:
- case CORINFO_HELP_ISINSTANCEOFARRAY:
- case CORINFO_HELP_ISINSTANCEOFANY:
- vnf = VNF_IsInstanceOf; break;
-
- case CORINFO_HELP_READYTORUN_ISINSTANCEOF:
- vnf = VNF_ReadyToRunIsInstanceOf; break;
-
- case CORINFO_HELP_LDELEMA_REF:
- vnf = VNF_LdElemA; break;
-
- case CORINFO_HELP_UNBOX:
- vnf = VNF_Unbox; break;
+ case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
+ vnf = VNF_GetgenericsGcstaticBase;
+ break;
+ case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
+ vnf = VNF_GetgenericsNongcstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
+ vnf = VNF_GetsharedGcstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
+ vnf = VNF_GetsharedNongcstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
+ vnf = VNF_GetsharedGcstaticBaseNoctor;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
+ vnf = VNF_GetsharedNongcstaticBaseNoctor;
+ break;
+ case CORINFO_HELP_READYTORUN_STATIC_BASE:
+ vnf = VNF_ReadyToRunStaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
+ vnf = VNF_GetsharedGcstaticBaseDynamicclass;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
+ vnf = VNF_GetsharedNongcstaticBaseDynamicclass;
+ break;
+ case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
+ vnf = VNF_ClassinitSharedDynamicclass;
+ break;
+ case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
+ vnf = VNF_GetgenericsGcthreadstaticBase;
+ break;
+ case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
+ vnf = VNF_GetgenericsNongcthreadstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
+ vnf = VNF_GetsharedGcthreadstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
+ vnf = VNF_GetsharedNongcthreadstaticBase;
+ break;
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
+ vnf = VNF_GetsharedGcthreadstaticBaseNoctor;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
+ vnf = VNF_GetsharedNongcthreadstaticBaseNoctor;
+ break;
+ case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
+ vnf = VNF_GetsharedGcthreadstaticBaseDynamicclass;
+ break;
+ case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
+ vnf = VNF_GetsharedNongcthreadstaticBaseDynamicclass;
+ break;
+ case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT:
+ vnf = VNF_GetStaticAddrContext;
+ break;
+ case CORINFO_HELP_GETSTATICFIELDADDR_TLS:
+ vnf = VNF_GetStaticAddrTLS;
+ break;
+
+ case CORINFO_HELP_RUNTIMEHANDLE_METHOD:
+ case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG:
+ vnf = VNF_RuntimeHandleMethod;
+ break;
+
+ case CORINFO_HELP_RUNTIMEHANDLE_CLASS:
+ case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG:
+ vnf = VNF_RuntimeHandleClass;
+ break;
+
+ case CORINFO_HELP_STRCNS:
+ vnf = VNF_StrCns;
+ break;
+
+ case CORINFO_HELP_CHKCASTCLASS:
+ case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
+ case CORINFO_HELP_CHKCASTARRAY:
+ case CORINFO_HELP_CHKCASTINTERFACE:
+ case CORINFO_HELP_CHKCASTANY:
+ vnf = VNF_CastClass;
+ break;
+
+ case CORINFO_HELP_READYTORUN_CHKCAST:
+ vnf = VNF_ReadyToRunCastClass;
+ break;
+
+ case CORINFO_HELP_ISINSTANCEOFCLASS:
+ case CORINFO_HELP_ISINSTANCEOFINTERFACE:
+ case CORINFO_HELP_ISINSTANCEOFARRAY:
+ case CORINFO_HELP_ISINSTANCEOFANY:
+ vnf = VNF_IsInstanceOf;
+ break;
+
+ case CORINFO_HELP_READYTORUN_ISINSTANCEOF:
+ vnf = VNF_ReadyToRunIsInstanceOf;
+ break;
+
+ case CORINFO_HELP_LDELEMA_REF:
+ vnf = VNF_LdElemA;
+ break;
+
+ case CORINFO_HELP_UNBOX:
+ vnf = VNF_Unbox;
+ break;
// A constant within any method.
- case CORINFO_HELP_GETCURRENTMANAGEDTHREADID:
- vnf = VNF_ManagedThreadId; break;
+ case CORINFO_HELP_GETCURRENTMANAGEDTHREADID:
+ vnf = VNF_ManagedThreadId;
+ break;
- case CORINFO_HELP_GETREFANY:
- // TODO-CQ: This should really be interpreted as just a struct field reference, in terms of values.
- vnf = VNF_GetRefanyVal; break;
-
- case CORINFO_HELP_GETCLASSFROMMETHODPARAM:
- vnf = VNF_GetClassFromMethodParam; break;
+ case CORINFO_HELP_GETREFANY:
+ // TODO-CQ: This should really be interpreted as just a struct field reference, in terms of values.
+ vnf = VNF_GetRefanyVal;
+ break;
- case CORINFO_HELP_GETSYNCFROMCLASSHANDLE:
- vnf = VNF_GetSyncFromClassHandle; break;
+ case CORINFO_HELP_GETCLASSFROMMETHODPARAM:
+ vnf = VNF_GetClassFromMethodParam;
+ break;
- case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR:
- vnf = VNF_LoopCloneChoiceAddr; break;
+ case CORINFO_HELP_GETSYNCFROMCLASSHANDLE:
+ vnf = VNF_GetSyncFromClassHandle;
+ break;
- case CORINFO_HELP_BOX_NULLABLE:
- vnf = VNF_BoxNullable; break;
+ case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR:
+ vnf = VNF_LoopCloneChoiceAddr;
+ break;
+
+ case CORINFO_HELP_BOX_NULLABLE:
+ vnf = VNF_BoxNullable;
+ break;
- default:
- unreached();
+ default:
+ unreached();
}
assert(vnf != VNF_Boundary);
return vnf;
}
-
bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
{
- CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
- bool pure = s_helperCallProperties.IsPure(helpFunc);
- bool isAlloc = s_helperCallProperties.IsAllocator(helpFunc);
- bool modHeap = s_helperCallProperties.MutatesHeap(helpFunc);
- bool mayRunCctor = s_helperCallProperties.MayRunCctor(helpFunc);
- bool noThrow = s_helperCallProperties.NoThrow(helpFunc);
+ CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
+ bool pure = s_helperCallProperties.IsPure(helpFunc);
+ bool isAlloc = s_helperCallProperties.IsAllocator(helpFunc);
+ bool modHeap = s_helperCallProperties.MutatesHeap(helpFunc);
+ bool mayRunCctor = s_helperCallProperties.MayRunCctor(helpFunc);
+ bool noThrow = s_helperCallProperties.NoThrow(helpFunc);
ValueNumPair vnpExc = ValueNumStore::VNPForEmptyExcSet();
@@ -6870,21 +7147,21 @@ bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
//
switch (helpFunc)
{
- case CORINFO_HELP_OVERFLOW:
- // This helper always throws the VNF_OverflowExc exception
- vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc));
- break;
+ case CORINFO_HELP_OVERFLOW:
+ // This helper always throws the VNF_OverflowExc exception
+ vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc));
+ break;
- default:
- // Setup vnpExc with the information that multiple different exceptions
- // could be generated by this helper
- vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_HelperMultipleExc));
+ default:
+ // Setup vnpExc with the information that multiple different exceptions
+ // could be generated by this helper
+ vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_HelperMultipleExc));
}
}
ValueNumPair vnpNorm;
- if (call->TypeGet() == TYP_VOID)
+ if (call->TypeGet() == TYP_VOID)
{
vnpNorm = ValueNumStore::VNPForVoid();
}
@@ -6895,12 +7172,12 @@ bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
bool needsFurtherWork = false;
switch (helpFunc)
{
- case CORINFO_HELP_NEW_MDARR:
- // This is a varargs helper. We need to represent the array shape in the VN world somehow.
- needsFurtherWork = true;
- break;
- default:
- break;
+ case CORINFO_HELP_NEW_MDARR:
+ // This is a varargs helper. We need to represent the array shape in the VN world somehow.
+ needsFurtherWork = true;
+ break;
+ default:
+ break;
}
if (!needsFurtherWork && (pure || isAlloc))
@@ -6914,7 +7191,7 @@ bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
modHeap = true;
}
}
-
+
fgValueNumberHelperCallFunc(call, vnf, vnpExc);
return modHeap;
}
@@ -6934,11 +7211,14 @@ bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
// TODO-Cleanup: new JitTestLabels for lib vs cons vs both VN classes?
void Compiler::JitTestCheckVN()
{
- typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, ValueNum, JitSimplerHashBehavior> LabelToVNMap;
+ typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, ValueNum, JitSimplerHashBehavior> LabelToVNMap;
typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, ssize_t, JitSimplerHashBehavior> VNToLabelMap;
// If we have no test data, early out.
- if (m_nodeTestData == NULL) return;
+ if (m_nodeTestData == nullptr)
+ {
+ return;
+ }
NodeToTestDataMap* testData = GetNodeTestData();
@@ -6956,8 +7236,8 @@ void Compiler::JitTestCheckVN()
for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki)
{
TestLabelAndNum tlAndN;
- GenTreePtr node = ki.Get();
- ValueNum nodeVN = node->GetVN(VNK_Liberal);
+ GenTreePtr node = ki.Get();
+ ValueNum nodeVN = node->GetVN(VNK_Liberal);
bool b = testData->Lookup(node, &tlAndN);
assert(b);
@@ -6968,7 +7248,8 @@ void Compiler::JitTestCheckVN()
{
printf("Node ");
Compiler::printTreeID(node);
- printf(" had a test constraint declared, but has become unreachable at the time the constraint is tested.\n"
+ printf(" had a test constraint declared, but has become unreachable at the time the constraint is "
+ "tested.\n"
"(This is probably as a result of some optimization -- \n"
"you may need to modify the test case to defeat this opt.)\n");
assert(false);
@@ -6982,7 +7263,9 @@ void Compiler::JitTestCheckVN()
}
if (tlAndN.m_tl == TL_VNNorm)
+ {
nodeVN = vnStore->VNNormVal(nodeVN);
+ }
ValueNum vn;
if (labelToVN->Lookup(tlAndN.m_num, &vn))
@@ -6993,15 +7276,15 @@ void Compiler::JitTestCheckVN()
}
// The mapping(s) must be one-to-one: if the label has a mapping, then the ssaNm must, as well.
ssize_t num2;
- bool b = vnToLabel->Lookup(vn, &num2);
+ bool b = vnToLabel->Lookup(vn, &num2);
// And the mappings must be the same.
if (tlAndN.m_num != num2)
{
printf("Node: ");
Compiler::printTreeID(node);
- printf(", with value number " STR_VN "%x, was declared in VN class %d,\n",
- nodeVN, tlAndN.m_num);
- printf("but this value number " STR_VN "%x has already been associated with a different SSA name class: %d.\n",
+ printf(", with value number " STR_VN "%x, was declared in VN class %d,\n", nodeVN, tlAndN.m_num);
+ printf("but this value number " STR_VN
+ "%x has already been associated with a different SSA name class: %d.\n",
vn, num2);
assert(false);
}
@@ -7010,10 +7293,8 @@ void Compiler::JitTestCheckVN()
{
printf("Node: ");
Compiler::printTreeID(node);
- printf(", " STR_VN "%x was declared in SSA name class %d,\n",
- nodeVN, tlAndN.m_num);
- printf("but that name class was previously bound to a different value number: " STR_VN "%x.\n",
- vn);
+ printf(", " STR_VN "%x was declared in SSA name class %d,\n", nodeVN, tlAndN.m_num);
+ printf("but that name class was previously bound to a different value number: " STR_VN "%x.\n", vn);
assert(false);
}
}
@@ -7025,14 +7306,15 @@ void Compiler::JitTestCheckVN()
{
printf("Node: ");
Compiler::printTreeID(node);
- printf(", " STR_VN "%x was declared in value number class %d,\n",
- nodeVN, tlAndN.m_num);
- printf("but this value number has already been associated with a different value number class: %d.\n", num);
+ printf(", " STR_VN "%x was declared in value number class %d,\n", nodeVN, tlAndN.m_num);
+ printf(
+ "but this value number has already been associated with a different value number class: %d.\n",
+ num);
assert(false);
}
// Add to both mappings.
labelToVN->Set(tlAndN.m_num, nodeVN);
- vnToLabel->Set(nodeVN, tlAndN.m_num);
+ vnToLabel->Set(nodeVN, tlAndN.m_num);
if (verbose)
{
printf(" added to hash tables.\n");
@@ -7060,7 +7342,7 @@ void Compiler::vnpPrint(ValueNumPair vnp, unsigned level)
void Compiler::vnPrint(ValueNum vn, unsigned level)
{
-
+
if (ValueNumStore::isReservedVN(vn))
{
printf(ValueNumStore::reservedName(vn));
@@ -7070,7 +7352,7 @@ void Compiler::vnPrint(ValueNum vn, unsigned level)
printf(STR_VN "%x", vn);
if (level > 0)
{
- vnStore->vnDump(this, vn);
+ vnStore->vnDump(this, vn);
}
}
}
@@ -7078,7 +7360,9 @@ void Compiler::vnPrint(ValueNum vn, unsigned level)
#endif // DEBUG
// Methods of ValueNumPair.
-ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN) {}
+ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN)
+{
+}
bool ValueNumPair::BothDefined() const
{
diff --git a/src/jit/valuenum.h b/src/jit/valuenum.h
index 49eb845694..f792d281ac 100644
--- a/src/jit/valuenum.h
+++ b/src/jit/valuenum.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// Defines the class "ValueNumStore", which maintains value numbers for a compilation.
// Recall that "value numbering" assigns an integer value number to each expression. The "value
@@ -21,7 +20,6 @@
// after a dereference (since control flow continued because no exception was thrown); that an integer value
// is restricted to some subrange in after a comparison test; etc.
-
/*****************************************************************************/
#ifndef _VALUENUM_H_
#define _VALUENUM_H_
@@ -62,21 +60,29 @@ struct VNFuncApp
bool Equals(const VNFuncApp& funcApp)
{
- if (m_func != funcApp.m_func) return false;
- if (m_arity != funcApp.m_arity) return false;
+ if (m_func != funcApp.m_func)
+ {
+ return false;
+ }
+ if (m_arity != funcApp.m_arity)
+ {
+ return false;
+ }
for (unsigned i = 0; i < m_arity; i++)
{
- if (m_args[i] != funcApp.m_args[i]) return false;
+ if (m_args[i] != funcApp.m_args[i])
+ {
+ return false;
+ }
}
return true;
}
};
// A unique prefix character to use when dumping a tree's gtVN in the tree dumps
-// We use this together with string concatenation to put this in printf format strings
+// We use this together with string concatenation to put this in printf format strings
// static const char* const VN_DumpPrefix = "$";
-#define STR_VN "$"
-
+#define STR_VN "$"
class ValueNumStore
{
@@ -85,28 +91,30 @@ public:
// We will reserve "max unsigned" to represent "not a value number", for maps that might start uninitialized.
static const ValueNum NoVN = UINT32_MAX;
// A second special value, used to indicate that a function evaluation would cause infinite recursion.
- static const ValueNum RecursiveVN = UINT32_MAX-1;
+ static const ValueNum RecursiveVN = UINT32_MAX - 1;
// ==================================================================================================
// VNMap - map from something to ValueNum, where something is typically a constant value or a VNFunc
// This class has two purposes - to abstract the implementation and to validate the ValueNums
// being stored or retrieved.
- template <class fromType, class keyfuncs=LargePrimitiveKeyFuncs<fromType>>
- class VNMap : public SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>
+ template <class fromType, class keyfuncs = LargePrimitiveKeyFuncs<fromType>>
+ class VNMap : public SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>
{
public:
- VNMap(IAllocator* alloc) : SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>(alloc) {}
+ VNMap(IAllocator* alloc) : SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>(alloc)
+ {
+ }
~VNMap()
{
~VNMap<fromType, keyfuncs>::SimplerHashTable();
}
-
+
bool Set(fromType k, ValueNum val)
{
assert(val != RecursiveVN);
return SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>::Set(k, val);
}
- bool Lookup(fromType k, ValueNum* pVal = NULL) const
+ bool Lookup(fromType k, ValueNum* pVal = nullptr) const
{
bool result = SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>::Lookup(k, pVal);
assert(!result || *pVal != RecursiveVN);
@@ -114,8 +122,6 @@ public:
}
};
-
-
private:
Compiler* m_pComp;
@@ -124,31 +130,30 @@ private:
// TODO-Cleanup: should transform "attribs" into a struct with bit fields. That would be simpler...
- enum VNFOpAttrib
+ enum VNFOpAttrib
{
- VNFOA_IllegalGenTreeOp = 0x1, // corresponds to a genTreeOps value that is not a legal VN func.
- VNFOA_Commutative = 0x2, // 1 iff the function is commutative.
- VNFOA_Arity = 0x4, // Bits 2..3 encode the arity.
- VNFOA_AfterArity = 0x20, // Makes it clear what value the next flag(s) after Arity should have.
- VNFOA_KnownNonNull = 0x20, // 1 iff the result is known to be non-null.
- VNFOA_SharedStatic = 0x40, // 1 iff this VNF is represent one of the shared static jit helpers
+ VNFOA_IllegalGenTreeOp = 0x1, // corresponds to a genTreeOps value that is not a legal VN func.
+ VNFOA_Commutative = 0x2, // 1 iff the function is commutative.
+ VNFOA_Arity = 0x4, // Bits 2..3 encode the arity.
+ VNFOA_AfterArity = 0x20, // Makes it clear what value the next flag(s) after Arity should have.
+ VNFOA_KnownNonNull = 0x20, // 1 iff the result is known to be non-null.
+ VNFOA_SharedStatic = 0x40, // 1 iff this VNF is represent one of the shared static jit helpers
};
static const unsigned VNFOA_ArityShift = 2;
- static const unsigned VNFOA_ArityBits = 3;
- static const unsigned VNFOA_MaxArity = (1 << VNFOA_ArityBits) - 1; // Max arity we can represent.
- static const unsigned VNFOA_ArityMask = VNFOA_AfterArity - VNFOA_Arity;
+ static const unsigned VNFOA_ArityBits = 3;
+ static const unsigned VNFOA_MaxArity = (1 << VNFOA_ArityBits) - 1; // Max arity we can represent.
+ static const unsigned VNFOA_ArityMask = VNFOA_AfterArity - VNFOA_Arity;
// These enum constants are used to encode the cast operation in the lowest bits by VNForCastOper
enum VNFCastAttrib
{
- VCA_UnsignedSrc = 0x01,
+ VCA_UnsignedSrc = 0x01,
- VCA_BitCount = 1, // the number of reserved bits
- VCA_ReservedBits = 0x01, // i.e. (VCA_UnsignedSrc)
+ VCA_BitCount = 1, // the number of reserved bits
+ VCA_ReservedBits = 0x01, // i.e. (VCA_UnsignedSrc)
};
-
// An array of length GT_COUNT, mapping genTreeOp values to their VNFOpAttrib.
static UINT8* s_vnfOpAttribs;
@@ -167,39 +172,38 @@ private:
static bool CanEvalForConstantArgs(VNFunc vnf);
// return vnf(v0)
- template<typename T>
+ template <typename T>
static T EvalOp(VNFunc vnf, T v0);
// If vnf(v0, v1) would raise an exception, sets *pExcSet to the singleton set containing the exception, and
// returns (T)0. Otherwise, returns vnf(v0, v1).
- template<typename T>
+ template <typename T>
T EvalOp(VNFunc vnf, T v0, T v1, ValueNum* pExcSet);
- template<typename T>
+ template <typename T>
static int EvalComparison(VNFunc vnf, T v0, T v1);
- template<typename T>
+ template <typename T>
static int EvalOrderedComparisonFloat(VNFunc vnf, T v0, T v1);
// return vnf(v0) or vnf(v0, v1), respectively (must, of course be unary/binary ops, respectively.)
// Should only be instantiated for integral types.
- template<typename T>
+ template <typename T>
static T EvalOpIntegral(VNFunc vnf, T v0);
- template<typename T>
+ template <typename T>
T EvalOpIntegral(VNFunc vnf, T v0, T v1, ValueNum* pExcSet);
// Should only instantiate (in a non-trivial way) for "int" and "INT64". Returns true iff dividing "v0" by "v1"
// would produce integer overflow (an ArithmeticException -- *not* division by zero, which is separate.)
- template<typename T>
+ template <typename T>
static bool IsOverflowIntDiv(T v0, T v1);
- // Should only instantiate (in a non-trivial way) for integral types (signed/unsigned int32/int64).
+ // Should only instantiate (in a non-trivial way) for integral types (signed/unsigned int32/int64).
// Returns true iff v is the zero of the appropriate type.
- template<typename T>
+ template <typename T>
static bool IsIntZero(T v);
-
// Given an constant value number return its value.
- int GetConstantInt32(ValueNum argVN);
- INT64 GetConstantInt64(ValueNum argVN);
+ int GetConstantInt32(ValueNum argVN);
+ INT64 GetConstantInt64(ValueNum argVN);
double GetConstantDouble(ValueNum argVN);
// Assumes that all the ValueNum arguments of each of these functions have been shown to represent constants.
@@ -228,7 +232,7 @@ public:
// Initialize an empty ValueNumStore.
ValueNumStore(Compiler* comp, IAllocator* allocator);
-
+
// Returns "true" iff "vnf" (which may have been created by a cast from an integral value) represents
// a legal value number function.
// (Requires InitValueNumStoreStatics to have been run.)
@@ -257,17 +261,23 @@ public:
ValueNum VNForIntCon(INT32 cnsVal);
ValueNum VNForLongCon(INT64 cnsVal);
- ValueNum VNForFloatCon(float cnsVal);
+ ValueNum VNForFloatCon(float cnsVal);
ValueNum VNForDoubleCon(double cnsVal);
ValueNum VNForByrefCon(INT64 byrefVal);
#ifdef _TARGET_64BIT_
- ValueNum VNForPtrSizeIntCon(INT64 cnsVal) { return VNForLongCon(cnsVal); }
+ ValueNum VNForPtrSizeIntCon(INT64 cnsVal)
+ {
+ return VNForLongCon(cnsVal);
+ }
#else
- ValueNum VNForPtrSizeIntCon(INT32 cnsVal) { return VNForIntCon(cnsVal); }
+ ValueNum VNForPtrSizeIntCon(INT32 cnsVal)
+ {
+ return VNForIntCon(cnsVal);
+ }
#endif
- ValueNum VNForCastOper(var_types castToType, bool srcIsUnsigned = false);
+ ValueNum VNForCastOper(var_types castToType, bool srcIsUnsigned = false);
// We keep handle values in a separate pool, so we don't confuse a handle with an int constant
// that happens to be the same...
@@ -283,7 +293,7 @@ public:
// The zero map is the map that returns a zero "for the appropriate type" when indexed at any index.
static ValueNum VNForZeroMap()
{
- // We reserve Chunk 0 for "special" VNs. Let SRC_ZeroMap (== 1) be the zero map.
+ // We reserve Chunk 0 for "special" VNs. Let SRC_ZeroMap (== 1) be the zero map.
return ValueNum(SRC_ZeroMap);
}
@@ -298,7 +308,7 @@ public:
// has the same value number.
static ValueNum VNForROH()
{
- // We reserve Chunk 0 for "special" VNs. Let SRC_ReadOnlyHeap (== 3) be the read-only heap.
+ // We reserve Chunk 0 for "special" VNs. Let SRC_ReadOnlyHeap (== 3) be the read-only heap.
return ValueNum(SRC_ReadOnlyHeap);
}
@@ -306,7 +316,7 @@ public:
// GT_LIST, and we want the args to be non-NoVN.
static ValueNum VNForVoid()
{
- // We reserve Chunk 0 for "special" VNs. Let SRC_Void (== 4) be the value for "void".
+ // We reserve Chunk 0 for "special" VNs. Let SRC_Void (== 4) be the value for "void".
return ValueNum(SRC_Void);
}
static ValueNumPair VNPForVoid()
@@ -317,7 +327,8 @@ public:
// A special value number for the empty set of exceptions.
static ValueNum VNForEmptyExcSet()
{
- // We reserve Chunk 0 for "special" VNs. Let SRC_EmptyExcSet (== 5) be the value for the empty set of exceptions.
+ // We reserve Chunk 0 for "special" VNs. Let SRC_EmptyExcSet (== 5) be the value for the empty set of
+ // exceptions.
return ValueNum(SRC_EmptyExcSet);
}
static ValueNumPair VNPForEmptyExcSet()
@@ -378,9 +389,9 @@ public:
bool IsKnownNonNull(ValueNum vn);
// True "iff" vn is a value returned by a call to a shared static helper.
- bool IsSharedStatic(ValueNum vn);
+ bool IsSharedStatic(ValueNum vn);
- // VN's for functions of other values.
+ // VN's for functions of other values.
// Four overloads, for arities 0, 1, 2, and 3. If we need other arities, we'll consider it.
ValueNum VNForFunc(var_types typ, VNFunc func);
ValueNum VNForFunc(var_types typ, VNFunc func, ValueNum opVNwx);
@@ -389,7 +400,8 @@ public:
ValueNum VNForFunc(var_types typ, VNFunc func, ValueNum op1VNwx, ValueNum op2VNwx, ValueNum op3VNwx);
// The following four op VNForFunc is only used for VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN
- ValueNum VNForFunc(var_types typ, VNFunc func, ValueNum op1VNwx, ValueNum op2VNwx, ValueNum op3VNwx, ValueNum op4VNwx);
+ ValueNum VNForFunc(
+ var_types typ, VNFunc func, ValueNum op1VNwx, ValueNum op2VNwx, ValueNum op3VNwx, ValueNum op4VNwx);
// This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate
// "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number
@@ -397,12 +409,8 @@ public:
ValueNum VNForMapSelect(ValueNumKind vnk, var_types typ, ValueNum op1VN, ValueNum op2VN);
// A method that does the work for VNForMapSelect and may call itself recursively.
- ValueNum VNForMapSelectWork(ValueNumKind vnk,
- var_types typ,
- ValueNum op1VN,
- ValueNum op2VN,
- unsigned* pBudget,
- bool* pUsedRecursiveVN);
+ ValueNum VNForMapSelectWork(
+ ValueNumKind vnk, var_types typ, ValueNum op1VN, ValueNum op2VN, unsigned* pBudget, bool* pUsedRecursiveVN);
// A specialized version of VNForFunc that is used for VNF_MapStore and provides some logging when verbose is set
ValueNum VNForMapStore(var_types typ, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN);
@@ -418,8 +426,7 @@ public:
}
ValueNumPair VNPairForFunc(var_types typ, VNFunc func, ValueNumPair opVN)
{
- return ValueNumPair(VNForFunc(typ, func, opVN.GetLiberal()),
- VNForFunc(typ, func, opVN.GetConservative()));
+ return ValueNumPair(VNForFunc(typ, func, opVN.GetLiberal()), VNForFunc(typ, func, opVN.GetConservative()));
}
ValueNumPair VNPairForFunc(var_types typ, VNFunc func, ValueNumPair op1VN, ValueNumPair op2VN)
{
@@ -429,12 +436,16 @@ public:
ValueNumPair VNPairForFunc(var_types typ, VNFunc func, ValueNumPair op1VN, ValueNumPair op2VN, ValueNumPair op3VN)
{
return ValueNumPair(VNForFunc(typ, func, op1VN.GetLiberal(), op2VN.GetLiberal(), op3VN.GetLiberal()),
- VNForFunc(typ, func, op1VN.GetConservative(), op2VN.GetConservative(), op3VN.GetConservative()));
+ VNForFunc(typ, func, op1VN.GetConservative(), op2VN.GetConservative(),
+ op3VN.GetConservative()));
}
- ValueNumPair VNPairForFunc(var_types typ, VNFunc func, ValueNumPair op1VN, ValueNumPair op2VN, ValueNumPair op3VN, ValueNumPair op4VN)
+ ValueNumPair VNPairForFunc(
+ var_types typ, VNFunc func, ValueNumPair op1VN, ValueNumPair op2VN, ValueNumPair op3VN, ValueNumPair op4VN)
{
- return ValueNumPair(VNForFunc(typ, func, op1VN.GetLiberal(), op2VN.GetLiberal(), op3VN.GetLiberal(), op4VN.GetLiberal()),
- VNForFunc(typ, func, op1VN.GetConservative(), op2VN.GetConservative(), op3VN.GetConservative(), op4VN.GetConservative()));
+ return ValueNumPair(VNForFunc(typ, func, op1VN.GetLiberal(), op2VN.GetLiberal(), op3VN.GetLiberal(),
+ op4VN.GetLiberal()),
+ VNForFunc(typ, func, op1VN.GetConservative(), op2VN.GetConservative(),
+ op3VN.GetConservative(), op4VN.GetConservative()));
}
// Get a new, unique value number for an expression that we're not equating to some function.
@@ -445,14 +456,17 @@ public:
// Return the value number corresponding to constructing "MapSelect(map, f0)", where "f0" is the
// (value number of) the first field in "fieldSeq". (The type of this application will be the type of "f0".)
- // If there are no remaining fields in "fieldSeq", return that value number; otherwise, return VNApplySelectors
- // applied to that value number and the remainder of "fieldSeq". When the 'fieldSeq' specifies a TYP_STRUCT
+ // If there are no remaining fields in "fieldSeq", return that value number; otherwise, return VNApplySelectors
+ // applied to that value number and the remainder of "fieldSeq". When the 'fieldSeq' specifies a TYP_STRUCT
// then the size of the struct is returned by 'wbFinalStructSize' (when it is non-null)
- ValueNum VNApplySelectors(ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, size_t* wbFinalStructSize = nullptr);
+ ValueNum VNApplySelectors(ValueNumKind vnk,
+ ValueNum map,
+ FieldSeqNode* fieldSeq,
+ size_t* wbFinalStructSize = nullptr);
// Used after VNApplySelectors has determined that "selectedVN" is contained in a Map using VNForMapSelect
// It determines whether the 'selectedVN' is of an appropriate type to be read using and indirection of 'indType'
- // If it is appropriate type then 'selectedVN' is returned, otherwise it may insert a cast to indType
+ // If it is appropriate type then 'selectedVN' is returned, otherwise it may insert a cast to indType
// or return a unique value number for an incompatible indType.
ValueNum VNApplySelectorsTypeCheck(ValueNum selectedVN, var_types indType, size_t structSize);
@@ -460,7 +474,8 @@ public:
// to a value of the type of "rhs". Returns an expression for the RHS of an assignment to a location
// containing value "map" that will change the field addressed by "fieldSeq" to "rhs", leaving all other
// indices in "map" the same.
- ValueNum VNApplySelectorsAssign(ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum rhs, var_types indType);
+ ValueNum VNApplySelectorsAssign(
+ ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum rhs, var_types indType);
// Used after VNApplySelectorsAssign has determined that "elem" is to be writen into a Map using VNForMapStore
// It determines whether the 'elem' is of an appropriate type to be writen using using an indirection of 'indType'
@@ -469,20 +484,25 @@ public:
ValueNumPair VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType);
- ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map, FieldSeqNode* fieldSeq, ValueNumPair rhs, var_types indType)
+ ValueNumPair VNPairApplySelectorsAssign(ValueNumPair map,
+ FieldSeqNode* fieldSeq,
+ ValueNumPair rhs,
+ var_types indType)
{
return ValueNumPair(VNApplySelectorsAssign(VNK_Liberal, map.GetLiberal(), fieldSeq, rhs.GetLiberal(), indType),
- VNApplySelectorsAssign(VNK_Conservative, map.GetConservative(), fieldSeq, rhs.GetConservative(), indType));
+ VNApplySelectorsAssign(VNK_Conservative, map.GetConservative(), fieldSeq,
+ rhs.GetConservative(), indType));
}
// Compute the normal ValueNumber for a cast with no exceptions
- ValueNum VNForCast(ValueNum srcVN, var_types castToType, var_types castFromType,
- bool srcIsUnsigned = false);
+ ValueNum VNForCast(ValueNum srcVN, var_types castToType, var_types castFromType, bool srcIsUnsigned = false);
- // Compute the ValueNumberPair for a cast
- ValueNumPair VNPairForCast(ValueNumPair srcVNPair, var_types castToType, var_types castFromType,
- bool srcIsUnsigned = false,
- bool hasOverflowCheck = false);
+ // Compute the ValueNumberPair for a cast
+ ValueNumPair VNPairForCast(ValueNumPair srcVNPair,
+ var_types castToType,
+ var_types castFromType,
+ bool srcIsUnsigned = false,
+ bool hasOverflowCheck = false);
// PtrToLoc values need to express a field sequence as one of their arguments. VN for null represents
// empty sequence, otherwise, "FieldSeq(VN(FieldHandle), restOfSeq)".
@@ -532,12 +552,7 @@ public:
ValueNum arrOp;
unsigned cmpOper;
ValueNum cmpOp;
- ArrLenArithBoundInfo()
- : vnArray(NoVN)
- , arrOper(GT_NONE)
- , arrOp(NoVN)
- , cmpOper(GT_NONE)
- , cmpOp(NoVN)
+ ArrLenArithBoundInfo() : vnArray(NoVN), arrOper(GT_NONE), arrOp(NoVN), cmpOper(GT_NONE), cmpOp(NoVN)
{
}
#ifdef DEBUG
@@ -545,12 +560,12 @@ public:
{
vnStore->vnDump(vnStore->m_pComp, cmpOp);
printf(" ");
- printf(vnStore->VNFuncName((VNFunc) cmpOper));
- printf(" ");
+ printf(vnStore->VNFuncName((VNFunc)cmpOper));
+ printf(" ");
vnStore->vnDump(vnStore->m_pComp, vnArray);
if (arrOper != GT_NONE)
{
- printf(vnStore->VNFuncName((VNFunc) arrOper));
+ printf(vnStore->VNFuncName((VNFunc)arrOper));
vnStore->vnDump(vnStore->m_pComp, arrOp);
}
}
@@ -564,10 +579,7 @@ public:
unsigned cmpOper;
ValueNum cmpOpVN;
- ConstantBoundInfo()
- : constVal(0)
- , cmpOper(GT_NONE)
- , cmpOpVN(NoVN)
+ ConstantBoundInfo() : constVal(0), cmpOper(GT_NONE), cmpOpVN(NoVN)
{
}
@@ -595,7 +607,7 @@ public:
// If "vn" is VN(a.len) then return VN(a); NoVN if VN(a) can't be determined.
ValueNum GetArrForLenVn(ValueNum vn);
- // Return true with any Relop except for == and != and one operand has to be a 32-bit integer constant.
+ // Return true with any Relop except for == and != and one operand has to be a 32-bit integer constant.
bool IsVNConstantBound(ValueNum vn);
// If "vn" is constant bound, then populate the "info" fields for constVal, cmpOp, cmpOper.
@@ -629,7 +641,9 @@ public:
// For example, ValueNum of type TYP_LONG are stored in a map of INT64 variables.
// Lang is the language (C++) type for the corresponding vartype_t.
template <int N>
- struct VarTypConv { };
+ struct VarTypConv
+ {
+ };
private:
struct Chunk;
@@ -641,72 +655,72 @@ private:
template <typename T>
FORCEINLINE T SafeGetConstantValue(Chunk* c, unsigned offset);
- template<typename T>
+ template <typename T>
T ConstantValueInternal(ValueNum vn DEBUGARG(bool coerce))
{
Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
assert(c->m_attribs == CEA_Const || c->m_attribs == CEA_Handle);
-
unsigned offset = ChunkOffset(vn);
switch (c->m_typ)
{
- case TYP_REF:
- assert(0 <= offset && offset <= 1); // Null or exception.
- __fallthrough;
+ case TYP_REF:
+ assert(0 <= offset && offset <= 1); // Null or exception.
+ __fallthrough;
- case TYP_BYREF:
+ case TYP_BYREF:
#ifndef PLATFORM_UNIX
- assert(&typeid(T) == &typeid(size_t)); // We represent ref/byref constants as size_t's.
-#endif // PLATFORM_UNIX
- __fallthrough;
-
- case TYP_INT:
- case TYP_LONG:
- case TYP_FLOAT:
- case TYP_DOUBLE:
- if (c->m_attribs == CEA_Handle)
- {
- C_ASSERT(offsetof(VNHandle, m_cnsVal) == 0);
- return (T) reinterpret_cast<VNHandle*>(c->m_defs)[offset].m_cnsVal;
- }
-#ifdef DEBUG
- if (!coerce)
- {
- T val1 = reinterpret_cast<T*>(c->m_defs)[offset];
- T val2 = SafeGetConstantValue<T>(c, offset);
-
- // Detect if there is a mismatch between the VN storage type and explicitly
- // passed-in type T.
- bool mismatch = false;
- if (varTypeIsFloating(c->m_typ))
- {
- mismatch = (memcmp(&val1, &val2, sizeof(val1)) != 0);
- }
- else
+ assert(&typeid(T) == &typeid(size_t)); // We represent ref/byref constants as size_t's.
+#endif // PLATFORM_UNIX
+ __fallthrough;
+
+ case TYP_INT:
+ case TYP_LONG:
+ case TYP_FLOAT:
+ case TYP_DOUBLE:
+ if (c->m_attribs == CEA_Handle)
{
- mismatch = (val1 != val2);
+ C_ASSERT(offsetof(VNHandle, m_cnsVal) == 0);
+ return (T) reinterpret_cast<VNHandle*>(c->m_defs)[offset].m_cnsVal;
}
-
- if (mismatch)
+#ifdef DEBUG
+ if (!coerce)
{
- assert(!"Called ConstantValue<T>(vn), but type(T) != type(vn); Use CoercedConstantValue instead.");
+ T val1 = reinterpret_cast<T*>(c->m_defs)[offset];
+ T val2 = SafeGetConstantValue<T>(c, offset);
+
+ // Detect if there is a mismatch between the VN storage type and explicitly
+ // passed-in type T.
+ bool mismatch = false;
+ if (varTypeIsFloating(c->m_typ))
+ {
+ mismatch = (memcmp(&val1, &val2, sizeof(val1)) != 0);
+ }
+ else
+ {
+ mismatch = (val1 != val2);
+ }
+
+ if (mismatch)
+ {
+ assert(
+ !"Called ConstantValue<T>(vn), but type(T) != type(vn); Use CoercedConstantValue instead.");
+ }
}
- }
#endif
- return SafeGetConstantValue<T>(c, offset);
+ return SafeGetConstantValue<T>(c, offset);
- default:
- assert(false); // We do not record constants of this typ.
- return (T)0;
+ default:
+ assert(false); // We do not record constants of this typ.
+ return (T)0;
}
}
public:
// Requires that "vn" is a constant, and that its type is compatible with the explicitly passed
// type "T". Also, note that "T" has to have an accurate storage size of the TypeOfVN(vn).
- template<typename T>
+ template <typename T>
T ConstantValue(ValueNum vn)
{
return ConstantValueInternal<T>(vn DEBUGARG(false));
@@ -714,7 +728,7 @@ public:
// Requires that "vn" is a constant, and that its type can be coerced to the explicitly passed
// type "T".
- template<typename T>
+ template <typename T>
T CoercedConstantValue(ValueNum vn)
{
return ConstantValueInternal<T>(vn DEBUGARG(true));
@@ -725,21 +739,24 @@ public:
bool IsHandle(ValueNum vn);
// Requires "mthFunc" to be an intrinsic math function (one of the allowable values for the "gtMath" field
- // of a GenTreeMath node). For unary ops, return the value number for the application of this function to
- // "arg0VN". For binary ops, return the value number for the application of this function to "arg0VN" and
- // "arg1VN".
-
+ // of a GenTreeMath node). For unary ops, return the value number for the application of this function to
+ // "arg0VN". For binary ops, return the value number for the application of this function to "arg0VN" and
+ // "arg1VN".
+
ValueNum EvalMathFuncUnary(var_types typ, CorInfoIntrinsics mthFunc, ValueNum arg0VN);
-
+
ValueNum EvalMathFuncBinary(var_types typ, CorInfoIntrinsics mthFunc, ValueNum arg0VN, ValueNum arg1VN);
-
+
ValueNumPair EvalMathFuncUnary(var_types typ, CorInfoIntrinsics mthFunc, ValueNumPair arg0VNP)
{
return ValueNumPair(EvalMathFuncUnary(typ, mthFunc, arg0VNP.GetLiberal()),
EvalMathFuncUnary(typ, mthFunc, arg0VNP.GetConservative()));
}
-
- ValueNumPair EvalMathFuncBinary(var_types typ, CorInfoIntrinsics mthFunc, ValueNumPair arg0VNP, ValueNumPair arg1VNP)
+
+ ValueNumPair EvalMathFuncBinary(var_types typ,
+ CorInfoIntrinsics mthFunc,
+ ValueNumPair arg0VNP,
+ ValueNumPair arg1VNP)
{
return ValueNumPair(EvalMathFuncBinary(typ, mthFunc, arg0VNP.GetLiberal(), arg1VNP.GetLiberal()),
EvalMathFuncBinary(typ, mthFunc, arg0VNP.GetConservative(), arg1VNP.GetConservative()));
@@ -766,15 +783,15 @@ public:
// Prints, to standard out, a representation of "vn".
void vnDump(Compiler* comp, ValueNum vn, bool isPtr = false);
- // Requires "fieldSeq" to be a field sequence VNFuncApp.
+ // Requires "fieldSeq" to be a field sequence VNFuncApp.
// Prints a representation (comma-separated list of field names) on standard out.
void vnDumpFieldSeq(Compiler* comp, VNFuncApp* fieldSeq, bool isHead);
- // Requires "mapSelect" to be a map select VNFuncApp.
+ // Requires "mapSelect" to be a map select VNFuncApp.
// Prints a representation of a MapSelect operation on standard out.
void vnDumpMapSelect(Compiler* comp, VNFuncApp* mapSelect);
- // Requires "mapStore" to be a map store VNFuncApp.
+ // Requires "mapStore" to be a map store VNFuncApp.
// Prints a representation of a MapStore operation on standard out.
void vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore);
@@ -783,13 +800,13 @@ public:
// Used in the implementation of the above.
static const char* VNFuncNameArr[];
- // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise
+ // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise
static const char* reservedName(ValueNum vn);
#endif // DEBUG
- // Returns true if "vn" is a reserved value number
- static bool isReservedVN(ValueNum);
+ // Returns true if "vn" is a reserved value number
+ static bool isReservedVN(ValueNum);
#define VALUENUM_SUPPORT_MERGE 0
#if VALUENUM_SUPPORT_MERGE
@@ -803,16 +820,14 @@ public:
void MergeVNs(ValueNum vn1, ValueNum vn2);
#endif
-
private:
-
// We will allocate value numbers in "chunks". Each chunk will have the same type and "constness".
- static const unsigned LogChunkSize = 6;
- static const unsigned ChunkSize = 1 << LogChunkSize;
+ static const unsigned LogChunkSize = 6;
+ static const unsigned ChunkSize = 1 << LogChunkSize;
static const unsigned ChunkOffsetMask = ChunkSize - 1;
// A "ChunkNum" is a zero-based index naming a chunk in the Store, or else the special "NoChunk" value.
- typedef UINT32 ChunkNum;
+ typedef UINT32 ChunkNum;
static const ChunkNum NoChunk = UINT32_MAX;
// Returns the ChunkNum of the Chunk that holds "vn" (which is required to be a valid
@@ -831,19 +846,19 @@ private:
// The base VN of the next chunk to be allocated. Should always be a multiple of ChunkSize.
ValueNum m_nextChunkBase;
- DECLARE_TYPED_ENUM(ChunkExtraAttribs,BYTE)
- {
- CEA_None, // No extra attributes.
- CEA_Const, // This chunk contains constant values.
- CEA_Handle, // This chunk contains handle constants.
- CEA_Func0, // Represents functions of arity 0.
- CEA_Func1, // ...arity 1.
- CEA_Func2, // ...arity 2.
- CEA_Func3, // ...arity 3.
- CEA_Func4, // ...arity 4.
- CEA_Count
+ DECLARE_TYPED_ENUM(ChunkExtraAttribs, BYTE)
+ {
+ CEA_None, // No extra attributes.
+ CEA_Const, // This chunk contains constant values.
+ CEA_Handle, // This chunk contains handle constants.
+ CEA_Func0, // Represents functions of arity 0.
+ CEA_Func1, // ...arity 1.
+ CEA_Func2, // ...arity 2.
+ CEA_Func3, // ...arity 3.
+ CEA_Func4, // ...arity 4.
+ CEA_Count
}
- END_DECLARE_TYPED_ENUM(ChunkExtraAttribs,BYTE);
+ END_DECLARE_TYPED_ENUM(ChunkExtraAttribs, BYTE);
// A "Chunk" holds "ChunkSize" value numbers, starting at "m_baseVN". All of these share the same
// "m_typ" and "m_attribs". These properties determine the interpretation of "m_defs", as discussed below.
@@ -856,7 +871,7 @@ private:
unsigned m_numUsed;
// The value number of the first VN in the chunk.
- ValueNum m_baseVN;
+ ValueNum m_baseVN;
// The common attributes of this chunk.
var_types m_typ;
@@ -881,15 +896,15 @@ private:
};
};
- struct VNHandle: public KeyFuncsDefEquals<VNHandle>
+ struct VNHandle : public KeyFuncsDefEquals<VNHandle>
{
- ssize_t m_cnsVal;
+ ssize_t m_cnsVal;
unsigned m_flags;
// Don't use a constructor to use the default copy constructor for hashtable rehash.
static void Initialize(VNHandle* handle, ssize_t m_cnsVal, unsigned m_flags)
{
handle->m_cnsVal = m_cnsVal;
- handle->m_flags = m_flags;
+ handle->m_flags = m_flags;
}
bool operator==(const VNHandle& y) const
{
@@ -903,10 +918,14 @@ private:
struct VNDefFunc0Arg
{
- VNFunc m_func;
- VNDefFunc0Arg(VNFunc func) : m_func(func) {}
+ VNFunc m_func;
+ VNDefFunc0Arg(VNFunc func) : m_func(func)
+ {
+ }
- VNDefFunc0Arg(): m_func(VNF_COUNT) {}
+ VNDefFunc0Arg() : m_func(VNF_COUNT)
+ {
+ }
bool operator==(const VNDefFunc0Arg& y) const
{
@@ -914,12 +933,16 @@ private:
}
};
- struct VNDefFunc1Arg: public VNDefFunc0Arg
+ struct VNDefFunc1Arg : public VNDefFunc0Arg
{
ValueNum m_arg0;
- VNDefFunc1Arg(VNFunc func, ValueNum arg0) : VNDefFunc0Arg(func), m_arg0(arg0) {}
+ VNDefFunc1Arg(VNFunc func, ValueNum arg0) : VNDefFunc0Arg(func), m_arg0(arg0)
+ {
+ }
- VNDefFunc1Arg(): VNDefFunc0Arg(), m_arg0(ValueNumStore::NoVN) {}
+ VNDefFunc1Arg() : VNDefFunc0Arg(), m_arg0(ValueNumStore::NoVN)
+ {
+ }
bool operator==(const VNDefFunc1Arg& y) const
{
@@ -930,9 +953,13 @@ private:
struct VNDefFunc2Arg : public VNDefFunc1Arg
{
ValueNum m_arg1;
- VNDefFunc2Arg(VNFunc func, ValueNum arg0, ValueNum arg1) : VNDefFunc1Arg(func, arg0), m_arg1(arg1) {}
+ VNDefFunc2Arg(VNFunc func, ValueNum arg0, ValueNum arg1) : VNDefFunc1Arg(func, arg0), m_arg1(arg1)
+ {
+ }
- VNDefFunc2Arg(): m_arg1(ValueNumStore::NoVN) {}
+ VNDefFunc2Arg() : m_arg1(ValueNumStore::NoVN)
+ {
+ }
bool operator==(const VNDefFunc2Arg& y) const
{
@@ -943,8 +970,13 @@ private:
struct VNDefFunc3Arg : public VNDefFunc2Arg
{
ValueNum m_arg2;
- VNDefFunc3Arg(VNFunc func, ValueNum arg0, ValueNum arg1, ValueNum arg2) : VNDefFunc2Arg(func, arg0, arg1), m_arg2(arg2) {}
- VNDefFunc3Arg() : m_arg2(ValueNumStore::NoVN) {}
+ VNDefFunc3Arg(VNFunc func, ValueNum arg0, ValueNum arg1, ValueNum arg2)
+ : VNDefFunc2Arg(func, arg0, arg1), m_arg2(arg2)
+ {
+ }
+ VNDefFunc3Arg() : m_arg2(ValueNumStore::NoVN)
+ {
+ }
bool operator==(const VNDefFunc3Arg& y) const
{
@@ -955,8 +987,13 @@ private:
struct VNDefFunc4Arg : public VNDefFunc3Arg
{
ValueNum m_arg3;
- VNDefFunc4Arg(VNFunc func, ValueNum arg0, ValueNum arg1, ValueNum arg2, ValueNum arg3) : VNDefFunc3Arg(func, arg0, arg1, arg2), m_arg3(arg3) {}
- VNDefFunc4Arg() : m_arg3(ValueNumStore::NoVN) {}
+ VNDefFunc4Arg(VNFunc func, ValueNum arg0, ValueNum arg1, ValueNum arg2, ValueNum arg3)
+ : VNDefFunc3Arg(func, arg0, arg1, arg2), m_arg3(arg3)
+ {
+ }
+ VNDefFunc4Arg() : m_arg3(ValueNumStore::NoVN)
+ {
+ }
bool operator==(const VNDefFunc4Arg& y) const
{
@@ -998,27 +1035,35 @@ private:
// First, we need mechanisms for mapping from constants to value numbers.
// For small integers, we'll use an array.
- static const int SmallIntConstMin = -1;
- static const int SmallIntConstMax = 10;
+ static const int SmallIntConstMin = -1;
+ static const int SmallIntConstMax = 10;
static const unsigned SmallIntConstNum = SmallIntConstMax - SmallIntConstMin + 1;
- static bool IsSmallIntConst(int i) { return SmallIntConstMin <= i && i <= SmallIntConstMax; }
+ static bool IsSmallIntConst(int i)
+ {
+ return SmallIntConstMin <= i && i <= SmallIntConstMax;
+ }
ValueNum m_VNsForSmallIntConsts[SmallIntConstNum];
struct ValueNumList
{
- ValueNum vn;
+ ValueNum vn;
ValueNumList* next;
- ValueNumList(const ValueNum& v, ValueNumList* n = NULL) : vn(v), next(n) { }
+ ValueNumList(const ValueNum& v, ValueNumList* n = nullptr) : vn(v), next(n)
+ {
+ }
};
-
+
// Keeps track of value numbers that are integer constants and also handles (GTG_ICON_HDL_MASK.)
ValueNumList* m_intConHandles;
typedef VNMap<INT32> IntToValueNumMap;
- IntToValueNumMap* m_intCnsMap;
- IntToValueNumMap* GetIntCnsMap()
+ IntToValueNumMap* m_intCnsMap;
+ IntToValueNumMap* GetIntCnsMap()
{
- if (m_intCnsMap == NULL) m_intCnsMap = new (m_alloc) IntToValueNumMap(m_alloc);
+ if (m_intCnsMap == nullptr)
+ {
+ m_intCnsMap = new (m_alloc) IntToValueNumMap(m_alloc);
+ }
return m_intCnsMap;
}
@@ -1031,9 +1076,9 @@ private:
}
else
{
- Chunk* c = GetAllocChunk(TYP_INT, CEA_Const);
- unsigned offsetWithinChunk = c->AllocVN();
- res = c->m_baseVN + offsetWithinChunk;
+ Chunk* c = GetAllocChunk(TYP_INT, CEA_Const);
+ unsigned offsetWithinChunk = c->AllocVN();
+ res = c->m_baseVN + offsetWithinChunk;
reinterpret_cast<INT32*>(c->m_defs)[offsetWithinChunk] = cnsVal;
GetIntCnsMap()->Set(cnsVal, res);
return res;
@@ -1041,18 +1086,24 @@ private:
}
typedef VNMap<INT64> LongToValueNumMap;
- LongToValueNumMap* m_longCnsMap;
- LongToValueNumMap* GetLongCnsMap()
+ LongToValueNumMap* m_longCnsMap;
+ LongToValueNumMap* GetLongCnsMap()
{
- if (m_longCnsMap == NULL) m_longCnsMap = new (m_alloc) LongToValueNumMap(m_alloc);
+ if (m_longCnsMap == nullptr)
+ {
+ m_longCnsMap = new (m_alloc) LongToValueNumMap(m_alloc);
+ }
return m_longCnsMap;
}
typedef VNMap<VNHandle, VNHandle> HandleToValueNumMap;
HandleToValueNumMap* m_handleMap;
- HandleToValueNumMap* GetHandleMap()
+ HandleToValueNumMap* GetHandleMap()
{
- if (m_handleMap == NULL) m_handleMap = new (m_alloc) HandleToValueNumMap(m_alloc);
+ if (m_handleMap == nullptr)
+ {
+ m_handleMap = new (m_alloc) HandleToValueNumMap(m_alloc);
+ }
return m_handleMap;
}
@@ -1060,15 +1111,18 @@ private:
{
static bool Equals(float x, float y)
{
- return *(unsigned*) &x == *(unsigned*) &y;
+ return *(unsigned*)&x == *(unsigned*)&y;
}
};
typedef VNMap<float, LargePrimitiveKeyFuncsFloat> FloatToValueNumMap;
FloatToValueNumMap* m_floatCnsMap;
- FloatToValueNumMap* GetFloatCnsMap()
+ FloatToValueNumMap* GetFloatCnsMap()
{
- if (m_floatCnsMap == NULL) m_floatCnsMap = new (m_alloc) FloatToValueNumMap(m_alloc);
+ if (m_floatCnsMap == nullptr)
+ {
+ m_floatCnsMap = new (m_alloc) FloatToValueNumMap(m_alloc);
+ }
return m_floatCnsMap;
}
@@ -1077,27 +1131,32 @@ private:
{
static bool Equals(double x, double y)
{
- return *(__int64*) &x == *(__int64*) &y;
+ return *(__int64*)&x == *(__int64*)&y;
}
};
typedef VNMap<double, LargePrimitiveKeyFuncsDouble> DoubleToValueNumMap;
DoubleToValueNumMap* m_doubleCnsMap;
- DoubleToValueNumMap* GetDoubleCnsMap()
+ DoubleToValueNumMap* GetDoubleCnsMap()
{
- if (m_doubleCnsMap == NULL) m_doubleCnsMap = new (m_alloc) DoubleToValueNumMap(m_alloc);
+ if (m_doubleCnsMap == nullptr)
+ {
+ m_doubleCnsMap = new (m_alloc) DoubleToValueNumMap(m_alloc);
+ }
return m_doubleCnsMap;
}
LongToValueNumMap* m_byrefCnsMap;
- LongToValueNumMap* GetByrefCnsMap()
+ LongToValueNumMap* GetByrefCnsMap()
{
- if (m_byrefCnsMap == NULL) m_byrefCnsMap = new (m_alloc) LongToValueNumMap(m_alloc);
+ if (m_byrefCnsMap == nullptr)
+ {
+ m_byrefCnsMap = new (m_alloc) LongToValueNumMap(m_alloc);
+ }
return m_byrefCnsMap;
}
-
- struct VNDefFunc0ArgKeyFuncs: public KeyFuncsDefEquals<VNDefFunc1Arg>
+ struct VNDefFunc0ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc1Arg>
{
static unsigned GetHashCode(VNDefFunc1Arg val)
{
@@ -1106,13 +1165,16 @@ private:
};
typedef VNMap<VNFunc> VNFunc0ToValueNumMap;
VNFunc0ToValueNumMap* m_VNFunc0Map;
- VNFunc0ToValueNumMap* GetVNFunc0Map()
+ VNFunc0ToValueNumMap* GetVNFunc0Map()
{
- if (m_VNFunc0Map == NULL) m_VNFunc0Map = new (m_alloc) VNFunc0ToValueNumMap(m_alloc);
+ if (m_VNFunc0Map == nullptr)
+ {
+ m_VNFunc0Map = new (m_alloc) VNFunc0ToValueNumMap(m_alloc);
+ }
return m_VNFunc0Map;
}
- struct VNDefFunc1ArgKeyFuncs: public KeyFuncsDefEquals<VNDefFunc1Arg>
+ struct VNDefFunc1ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc1Arg>
{
static unsigned GetHashCode(VNDefFunc1Arg val)
{
@@ -1121,13 +1183,16 @@ private:
};
typedef VNMap<VNDefFunc1Arg, VNDefFunc1ArgKeyFuncs> VNFunc1ToValueNumMap;
VNFunc1ToValueNumMap* m_VNFunc1Map;
- VNFunc1ToValueNumMap* GetVNFunc1Map()
+ VNFunc1ToValueNumMap* GetVNFunc1Map()
{
- if (m_VNFunc1Map == NULL) m_VNFunc1Map = new (m_alloc) VNFunc1ToValueNumMap(m_alloc);
+ if (m_VNFunc1Map == nullptr)
+ {
+ m_VNFunc1Map = new (m_alloc) VNFunc1ToValueNumMap(m_alloc);
+ }
return m_VNFunc1Map;
}
- struct VNDefFunc2ArgKeyFuncs: public KeyFuncsDefEquals<VNDefFunc2Arg>
+ struct VNDefFunc2ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc2Arg>
{
static unsigned GetHashCode(VNDefFunc2Arg val)
{
@@ -1136,39 +1201,48 @@ private:
};
typedef VNMap<VNDefFunc2Arg, VNDefFunc2ArgKeyFuncs> VNFunc2ToValueNumMap;
VNFunc2ToValueNumMap* m_VNFunc2Map;
- VNFunc2ToValueNumMap* GetVNFunc2Map()
+ VNFunc2ToValueNumMap* GetVNFunc2Map()
{
- if (m_VNFunc2Map == NULL) m_VNFunc2Map = new (m_alloc) VNFunc2ToValueNumMap(m_alloc);
+ if (m_VNFunc2Map == nullptr)
+ {
+ m_VNFunc2Map = new (m_alloc) VNFunc2ToValueNumMap(m_alloc);
+ }
return m_VNFunc2Map;
}
- struct VNDefFunc3ArgKeyFuncs: public KeyFuncsDefEquals<VNDefFunc3Arg>
+ struct VNDefFunc3ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc3Arg>
{
static unsigned GetHashCode(VNDefFunc3Arg val)
{
- return (val.m_func << 24) + (val.m_arg0 << 16) + (val.m_arg1 << 8) + val.m_arg2;
+ return (val.m_func << 24) + (val.m_arg0 << 16) + (val.m_arg1 << 8) + val.m_arg2;
}
};
typedef VNMap<VNDefFunc3Arg, VNDefFunc3ArgKeyFuncs> VNFunc3ToValueNumMap;
VNFunc3ToValueNumMap* m_VNFunc3Map;
- VNFunc3ToValueNumMap* GetVNFunc3Map()
+ VNFunc3ToValueNumMap* GetVNFunc3Map()
{
- if (m_VNFunc3Map == NULL) m_VNFunc3Map = new (m_alloc) VNFunc3ToValueNumMap(m_alloc);
+ if (m_VNFunc3Map == nullptr)
+ {
+ m_VNFunc3Map = new (m_alloc) VNFunc3ToValueNumMap(m_alloc);
+ }
return m_VNFunc3Map;
}
- struct VNDefFunc4ArgKeyFuncs: public KeyFuncsDefEquals<VNDefFunc4Arg>
+ struct VNDefFunc4ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc4Arg>
{
static unsigned GetHashCode(VNDefFunc4Arg val)
{
- return (val.m_func << 24) + (val.m_arg0 << 16) + (val.m_arg1 << 8) + val.m_arg2 + (val.m_arg3 << 12);
+ return (val.m_func << 24) + (val.m_arg0 << 16) + (val.m_arg1 << 8) + val.m_arg2 + (val.m_arg3 << 12);
}
};
typedef VNMap<VNDefFunc4Arg, VNDefFunc4ArgKeyFuncs> VNFunc4ToValueNumMap;
VNFunc4ToValueNumMap* m_VNFunc4Map;
- VNFunc4ToValueNumMap* GetVNFunc4Map()
+ VNFunc4ToValueNumMap* GetVNFunc4Map()
{
- if (m_VNFunc4Map == NULL) m_VNFunc4Map = new (m_alloc) VNFunc4ToValueNumMap(m_alloc);
+ if (m_VNFunc4Map == nullptr)
+ {
+ m_VNFunc4Map = new (m_alloc) VNFunc4ToValueNumMap(m_alloc);
+ }
return m_VNFunc4Map;
}
@@ -1184,7 +1258,6 @@ private:
SRC_NumSpecialRefConsts
};
-
// Counter to keep track of all the unique not a field sequences that have been assigned to
// PtrToLoc, because the ptr was added to an offset that was not a field.
unsigned m_uPtrToLocNotAFieldCount;
@@ -1196,17 +1269,41 @@ private:
};
template <>
-struct ValueNumStore::VarTypConv<TYP_INT> { typedef INT32 Type; typedef int Lang; };
+struct ValueNumStore::VarTypConv<TYP_INT>
+{
+ typedef INT32 Type;
+ typedef int Lang;
+};
template <>
-struct ValueNumStore::VarTypConv<TYP_FLOAT> { typedef INT32 Type; typedef float Lang; };
+struct ValueNumStore::VarTypConv<TYP_FLOAT>
+{
+ typedef INT32 Type;
+ typedef float Lang;
+};
template <>
-struct ValueNumStore::VarTypConv<TYP_LONG> { typedef INT64 Type; typedef INT64 Lang; };
+struct ValueNumStore::VarTypConv<TYP_LONG>
+{
+ typedef INT64 Type;
+ typedef INT64 Lang;
+};
template <>
-struct ValueNumStore::VarTypConv<TYP_DOUBLE> { typedef INT64 Type; typedef double Lang; };
+struct ValueNumStore::VarTypConv<TYP_DOUBLE>
+{
+ typedef INT64 Type;
+ typedef double Lang;
+};
template <>
-struct ValueNumStore::VarTypConv<TYP_BYREF> { typedef INT64 Type; typedef void* Lang; };
+struct ValueNumStore::VarTypConv<TYP_BYREF>
+{
+ typedef INT64 Type;
+ typedef void* Lang;
+};
template <>
-struct ValueNumStore::VarTypConv<TYP_REF> { typedef class Object* Type; typedef class Object* Lang; };
+struct ValueNumStore::VarTypConv<TYP_REF>
+{
+ typedef class Object* Type;
+ typedef class Object* Lang;
+};
// Get the actual value and coerce the actual type c->m_typ to the wanted type T.
template <typename T>
@@ -1214,21 +1311,21 @@ FORCEINLINE T ValueNumStore::SafeGetConstantValue(Chunk* c, unsigned offset)
{
switch (c->m_typ)
{
- case TYP_REF:
- return CoerceTypRefToT<T>(c, offset);
- case TYP_BYREF:
- return static_cast<T>(reinterpret_cast<VarTypConv<TYP_BYREF>::Type*>(c->m_defs)[offset]);
- case TYP_INT:
- return static_cast<T>(reinterpret_cast<VarTypConv<TYP_INT>::Type*>(c->m_defs)[offset]);
- case TYP_LONG:
- return static_cast<T>(reinterpret_cast<VarTypConv<TYP_LONG>::Type*>(c->m_defs)[offset]);
- case TYP_FLOAT:
- return static_cast<T>(reinterpret_cast<VarTypConv<TYP_FLOAT>::Lang*>(c->m_defs)[offset]);
- case TYP_DOUBLE:
- return static_cast<T>(reinterpret_cast<VarTypConv<TYP_DOUBLE>::Lang*>(c->m_defs)[offset]);
- default:
- assert(false);
- return (T)0;
+ case TYP_REF:
+ return CoerceTypRefToT<T>(c, offset);
+ case TYP_BYREF:
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_BYREF>::Type*>(c->m_defs)[offset]);
+ case TYP_INT:
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_INT>::Type*>(c->m_defs)[offset]);
+ case TYP_LONG:
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_LONG>::Type*>(c->m_defs)[offset]);
+ case TYP_FLOAT:
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_FLOAT>::Lang*>(c->m_defs)[offset]);
+ case TYP_DOUBLE:
+ return static_cast<T>(reinterpret_cast<VarTypConv<TYP_DOUBLE>::Lang*>(c->m_defs)[offset]);
+ default:
+ assert(false);
+ return (T)0;
}
}
@@ -1248,7 +1345,10 @@ inline bool ValueNumStore::VNFuncIsCommutative(VNFunc vnf)
inline bool ValueNumStore::VNFuncIsComparison(VNFunc vnf)
{
- if (vnf >= VNF_Boundary) return false;
+ if (vnf >= VNF_Boundary)
+ {
+ return false;
+ }
genTreeOps gtOp = genTreeOps(vnf);
return GenTree::OperIsCompare(gtOp) != 0;
}
diff --git a/src/jit/valuenumfuncs.h b/src/jit/valuenumfuncs.h
index c374f421ae..064a33707b 100644
--- a/src/jit/valuenumfuncs.h
+++ b/src/jit/valuenumfuncs.h
@@ -2,9 +2,9 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// Defines the functions understood by the value-numbering system.
-// ValueNumFuncDef(<name of function>, <arity (1-4)>, <is-commutative (for arity = 2)>, <non-null (for gc functions)>, <is-shared-static>)
+// ValueNumFuncDef(<name of function>, <arity (1-4)>, <is-commutative (for arity = 2)>, <non-null (for gc functions)>,
+// <is-shared-static>)
// clang-format off
ValueNumFuncDef(MapStore, 3, false, false, false)
@@ -138,5 +138,4 @@ ValueNumFuncDef(StrCns, 2, false, true, false)
ValueNumFuncDef(Unbox, 2, false, true, false)
// clang-format on
-
#undef ValueNumFuncDef
diff --git a/src/jit/valuenumtype.h b/src/jit/valuenumtype.h
index 5f88894017..f898d87532 100644
--- a/src/jit/valuenumtype.h
+++ b/src/jit/valuenumtype.h
@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
// Defines the type "ValueNum".
// This file exists only to break an include file cycle -- had been in ValueNum.h. But that
@@ -20,52 +19,83 @@ typedef UINT32 ValueNum;
// There are two "kinds" of value numbers, which differ in their modeling of the actions of other threads.
// "Liberal" value numbers assume that the other threads change contents of heap locations only at
// synchronization points. Liberal VNs are appropriate, for example, in identifying CSE opportunities.
-// "Conservative" value numbers assume that the contents of heap locations change arbitrarily between
+// "Conservative" value numbers assume that the contents of heap locations change arbitrarily between
// every two accesses. Conservative VNs are appropriate, for example, in assertion prop, where an observation
// of a property of the value in some storage location is used to perform an optimization downstream on
// an operation involving the contents of that storage location. If other threads may modify the storage
// location between the two accesses, the observed property may no longer hold -- and conservative VNs make
// it clear that the values need not be the same.
//
-enum ValueNumKind { VNK_Liberal, VNK_Conservative };
+enum ValueNumKind
+{
+ VNK_Liberal,
+ VNK_Conservative
+};
struct ValueNumPair
{
private:
ValueNum m_liberal;
ValueNum m_conservative;
+
public:
- ValueNum GetLiberal() const { return m_liberal; }
- void SetLiberal(ValueNum vn) { m_liberal = vn; }
- ValueNum GetConservative() const { return m_conservative; }
- void SetConservative(ValueNum vn) { m_conservative = vn; }
+ ValueNum GetLiberal() const
+ {
+ return m_liberal;
+ }
+ void SetLiberal(ValueNum vn)
+ {
+ m_liberal = vn;
+ }
+ ValueNum GetConservative() const
+ {
+ return m_conservative;
+ }
+ void SetConservative(ValueNum vn)
+ {
+ m_conservative = vn;
+ }
- ValueNum* GetLiberalAddr() { return &m_liberal; }
- ValueNum* GetConservativeAddr() { return &m_conservative; }
+ ValueNum* GetLiberalAddr()
+ {
+ return &m_liberal;
+ }
+ ValueNum* GetConservativeAddr()
+ {
+ return &m_conservative;
+ }
- ValueNum Get(ValueNumKind vnk) { return vnk == VNK_Liberal ? m_liberal : m_conservative; }
+ ValueNum Get(ValueNumKind vnk)
+ {
+ return vnk == VNK_Liberal ? m_liberal : m_conservative;
+ }
void SetBoth(ValueNum vn)
{
- m_liberal = vn;
+ m_liberal = vn;
m_conservative = vn;
}
void operator=(const ValueNumPair& vn2)
{
- m_liberal = vn2.m_liberal;
+ m_liberal = vn2.m_liberal;
m_conservative = vn2.m_conservative;
}
// Initializes both elements to "NoVN". Defined in ValueNum.cpp.
ValueNumPair();
- ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons) {}
+ ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons)
+ {
+ }
// True iff neither element is "NoVN". Defined in ValueNum.cpp.
bool BothDefined() const;
- bool BothEqual() const { return m_liberal == m_conservative; }
+ bool BothEqual() const
+ {
+ return m_liberal == m_conservative;
+ }
};
#endif // _VALUENUMTYPE_H_
diff --git a/src/jit/varset.h b/src/jit/varset.h
index a84dc166ad..6a2c37ed40 100644
--- a/src/jit/varset.h
+++ b/src/jit/varset.h
@@ -11,7 +11,7 @@
// A VARSET_TP is a set of (small) integers representing local variables.
// We implement varsets using the BitSet abstraction, which supports
// several different implementations.
-//
+//
// The set of tracked variables may change during a compilation, and variables may be
// re-sorted, so the tracked variable index of a variable is decidedly *not* stable. The
// bitset abstraction supports labeling of bitsets with "epochs", and supports a
@@ -45,11 +45,11 @@ const unsigned UInt64Bits = sizeof(UINT64) * 8;
#include "bitsetasuint64.h"
-typedef BitSetOps</*BitSetType*/UINT64,
- /*Brand*/VARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/TrackedVarBitSetTraits>
- VarSetOpsRaw;
+typedef BitSetOps</*BitSetType*/ UINT64,
+ /*Brand*/ VARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ TrackedVarBitSetTraits>
+ VarSetOpsRaw;
typedef UINT64 VARSET_TP;
@@ -61,40 +61,39 @@ const unsigned lclMAX_TRACKED = UInt64Bits;
#include "bitsetasshortlong.h"
-typedef BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/VARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/TrackedVarBitSetTraits>
- VarSetOpsRaw;
+typedef BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ VARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ TrackedVarBitSetTraits>
+ VarSetOpsRaw;
-typedef BitSetShortLongRep VARSET_TP;
+typedef BitSetShortLongRep VARSET_TP;
// Tested various sizes for max tracked locals. The largest value for which no throughput regression
// could be measured was 512. Going to 1024 showed the first throughput regressions.
// We anticipate the larger size will be needed to support better inlining.
// There were a number of failures when 512 was used for legacy, so we just retain the 128 value
// for legacy backend.
-
+
#if !defined(LEGACY_BACKEND)
-const unsigned lclMAX_TRACKED = 512;
+const unsigned lclMAX_TRACKED = 512;
#else
const unsigned lclMAX_TRACKED = 128;
#endif
-
#define VARSET_REP_IS_CLASS 0
#elif VARSET_REP == BSUInt64Class
#include "bitsetasuint64inclass.h"
-typedef BitSetOps</*BitSetType*/BitSetUint64<Compiler*, TrackedVarBitSetTraits>,
- /*Brand*/VARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/TrackedVarBitSetTraits>
- VarSetOpsRaw;
+typedef BitSetOps</*BitSetType*/ BitSetUint64<Compiler*, TrackedVarBitSetTraits>,
+ /*Brand*/ VARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ TrackedVarBitSetTraits>
+ VarSetOpsRaw;
-typedef BitSetUint64<Compiler*, TrackedVarBitSetTraits> VARSET_TP;
+typedef BitSetUint64<Compiler*, TrackedVarBitSetTraits> VARSET_TP;
const unsigned lclMAX_TRACKED = UInt64Bits;
@@ -107,12 +106,19 @@ const unsigned lclMAX_TRACKED = UInt64Bits;
#endif
// These types should be used as the types for VARSET_TP arguments and return values, respectively.
-typedef VarSetOpsRaw::ValArgType VARSET_VALARG_TP;
-typedef VarSetOpsRaw::RetValType VARSET_VALRET_TP;
+typedef VarSetOpsRaw::ValArgType VARSET_VALARG_TP;
+typedef VarSetOpsRaw::RetValType VARSET_VALRET_TP;
#define VARSET_COUNTOPS 0
#if VARSET_COUNTOPS
-typedef BitSetOpsWithCounter<VARSET_TP, VARSET_REP, Compiler*, TrackedVarBitSetTraits, VARSET_VALARG_TP, VARSET_VALRET_TP, VarSetOpsRaw::Iter> VarSetOps;
+typedef BitSetOpsWithCounter<VARSET_TP,
+ VARSET_REP,
+ Compiler*,
+ TrackedVarBitSetTraits,
+ VARSET_VALARG_TP,
+ VARSET_VALRET_TP,
+ VarSetOpsRaw::Iter>
+ VarSetOps;
#else
typedef VarSetOpsRaw VarSetOps;
#endif
@@ -123,15 +129,15 @@ typedef VarSetOpsRaw VarSetOps;
#include "bitsetasuint64.h"
-typedef BitSetOps</*BitSetType*/UINT64,
- /*Brand*/ALLVARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/AllVarBitSetTraits>
- AllVarSetOps;
+typedef BitSetOps</*BitSetType*/ UINT64,
+ /*Brand*/ ALLVARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ AllVarBitSetTraits>
+ AllVarSetOps;
-typedef UINT64 ALLVARSET_TP;
+typedef UINT64 ALLVARSET_TP;
-const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
+const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
#define ALLVARSET_REP_IS_CLASS 0
@@ -139,15 +145,15 @@ const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
#include "bitsetasshortlong.h"
-typedef BitSetOps</*BitSetType*/BitSetShortLongRep,
- /*Brand*/ALLVARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/AllVarBitSetTraits>
- AllVarSetOps;
+typedef BitSetOps</*BitSetType*/ BitSetShortLongRep,
+ /*Brand*/ ALLVARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ AllVarBitSetTraits>
+ AllVarSetOps;
-typedef BitSetShortLongRep ALLVARSET_TP;
+typedef BitSetShortLongRep ALLVARSET_TP;
-const unsigned lclMAX_ALLSET_TRACKED = lclMAX_TRACKED;
+const unsigned lclMAX_ALLSET_TRACKED = lclMAX_TRACKED;
#define ALLVARSET_REP_IS_CLASS 0
@@ -155,15 +161,15 @@ const unsigned lclMAX_ALLSET_TRACKED = lclMAX_TRACKED;
#include "bitsetasuint64inclass.h"
-typedef BitSetOps</*BitSetType*/BitSetUint64<Compiler*, AllVarBitSetTraits>,
- /*Brand*/ALLVARSET_REP,
- /*Env*/Compiler*,
- /*BitSetTraits*/AllVarBitSetTraits>
- AllVarSetOps;
+typedef BitSetOps</*BitSetType*/ BitSetUint64<Compiler*, AllVarBitSetTraits>,
+ /*Brand*/ ALLVARSET_REP,
+ /*Env*/ Compiler*,
+ /*BitSetTraits*/ AllVarBitSetTraits>
+ AllVarSetOps;
-typedef BitSetUint64<Compiler*, AllVarBitSetTraits> ALLVARSET_TP;
+typedef BitSetUint64<Compiler*, AllVarBitSetTraits> ALLVARSET_TP;
-const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
+const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
#define ALLVARSET_REP_IS_CLASS 1
@@ -172,8 +178,8 @@ const unsigned lclMAX_ALLSET_TRACKED = UInt64Bits;
#endif
// These types should be used as the types for VARSET_TP arguments and return values, respectively.
-typedef AllVarSetOps::ValArgType ALLVARSET_VALARG_TP;
-typedef AllVarSetOps::RetValType ALLVARSET_VALRET_TP;
+typedef AllVarSetOps::ValArgType ALLVARSET_VALARG_TP;
+typedef AllVarSetOps::RetValType ALLVARSET_VALRET_TP;
// Initialize "varName" to "initVal." Copies contents, not references; if "varName" is uninitialized, allocates a var
// set for it (using "comp" for any necessary allocation), and copies the contents of "initVal" into it.
@@ -194,13 +200,12 @@ typedef AllVarSetOps::RetValType ALLVARSET_VALRET_TP;
#define ALLVARSET_INIT_NOCOPY(varName, initVal) varName(initVal)
#endif
-
// The iterator pattern.
// Use this to initialize an iterator "iterName" to iterate over a VARSET_TP "vs".
// "varIndex" will be an unsigned variable to which we assign the elements of "vs".
-#define VARSET_ITER_INIT(comp, iterName, vs, varIndex) \
- unsigned varIndex = 0; \
+#define VARSET_ITER_INIT(comp, iterName, vs, varIndex) \
+ unsigned varIndex = 0; \
VarSetOps::Iter iterName(comp, vs)
#endif // _VARSET_INCLUDED_
diff --git a/src/jit/vartype.h b/src/jit/vartype.h
index 82ddd51f39..550aeb9c5b 100644
--- a/src/jit/vartype.h
+++ b/src/jit/vartype.h
@@ -8,29 +8,29 @@
/*****************************************************************************/
#include "error.h"
-enum var_types_classification
+enum var_types_classification
{
VTF_ANY = 0x0000,
VTF_INT = 0x0001,
- VTF_UNS = 0x0002, // type is unsigned
+ VTF_UNS = 0x0002, // type is unsigned
VTF_FLT = 0x0004,
- VTF_GCR = 0x0008, // type is GC ref
- VTF_BYR = 0x0010, // type is Byref
- VTF_I = 0x0020, // is machine sized
- VTF_S = 0x0040, // is a struct type
+ VTF_GCR = 0x0008, // type is GC ref
+ VTF_BYR = 0x0010, // type is Byref
+ VTF_I = 0x0020, // is machine sized
+ VTF_S = 0x0040, // is a struct type
};
-DECLARE_TYPED_ENUM(var_types,BYTE)
+DECLARE_TYPED_ENUM(var_types, BYTE)
{
- #define DEF_TP(tn,nm,jitType,verType,sz,sze,asze,st,al,tf,howUsed) TYP_##tn,
- #include "typelist.h"
- #undef DEF_TP
+#define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) TYP_##tn,
+#include "typelist.h"
+#undef DEF_TP
TYP_COUNT,
- TYP_lastIntrins = TYP_DOUBLE
+ TYP_lastIntrins = TYP_DOUBLE
}
-END_DECLARE_TYPED_ENUM(var_types,BYTE)
+END_DECLARE_TYPED_ENUM(var_types, BYTE)
/*****************************************************************************
* C-style pointers are implemented as TYP_INT or TYP_LONG depending on the
@@ -38,98 +38,107 @@ END_DECLARE_TYPED_ENUM(var_types,BYTE)
*/
#ifdef _TARGET_64BIT_
-#define TYP_I_IMPL TYP_LONG
-#define TYP_U_IMPL TYP_ULONG
-#define TYPE_REF_IIM TYPE_REF_LNG
+#define TYP_I_IMPL TYP_LONG
+#define TYP_U_IMPL TYP_ULONG
+#define TYPE_REF_IIM TYPE_REF_LNG
#else
-#define TYP_I_IMPL TYP_INT
-#define TYP_U_IMPL TYP_UINT
-#define TYPE_REF_IIM TYPE_REF_INT
+#define TYP_I_IMPL TYP_INT
+#define TYP_U_IMPL TYP_UINT
+#define TYPE_REF_IIM TYPE_REF_INT
#ifdef _PREFAST_
// We silence this in the 32-bit build because for portability, we like to have asserts like this:
// assert(op2->gtType == TYP_INT || op2->gtType == TYP_I_IMPL);
// This is obviously redundant for 32-bit builds, but we don't want to have ifdefs and different
// asserts just for 64-bit builds, so for now just silence the assert
-#pragma warning(disable: 6287) // warning 6287: the left and right sub-expressions are identical
-#endif //_PREFAST_
+#pragma warning(disable : 6287) // warning 6287: the left and right sub-expressions are identical
+#endif //_PREFAST_
#endif
-
/*****************************************************************************/
-const extern BYTE varTypeClassification[TYP_COUNT];
+const extern BYTE varTypeClassification[TYP_COUNT];
// make any class with a TypeGet member also have a function TypeGet() that does the same thing
-template<class T>
-inline var_types TypeGet(T * t) { return t->TypeGet(); }
+template <class T>
+inline var_types TypeGet(T* t)
+{
+ return t->TypeGet();
+}
// make a TypeGet function which is the identity function for var_types
-// the point of this and the preceding template is now you can make template functions
+// the point of this and the preceding template is now you can make template functions
// that work on var_types as well as any object that exposes a TypeGet method.
// such as all of these varTypeIs* functions
-inline var_types TypeGet(var_types v) { return v; }
+inline var_types TypeGet(var_types v)
+{
+ return v;
+}
#ifdef FEATURE_SIMD
template <class T>
-inline bool varTypeIsSIMD(T vt)
+inline bool varTypeIsSIMD(T vt)
{
- switch(TypeGet(vt))
+ switch (TypeGet(vt))
{
- case TYP_SIMD8:
- case TYP_SIMD12:
- case TYP_SIMD16:
+ case TYP_SIMD8:
+ case TYP_SIMD12:
+ case TYP_SIMD16:
#ifdef FEATURE_AVX_SUPPORT
- case TYP_SIMD32:
+ case TYP_SIMD32:
#endif // FEATURE_AVX_SUPPORT
- return true;
- default:
- return false;
+ return true;
+ default:
+ return false;
}
}
-#else // FEATURE_SIMD
+#else // FEATURE_SIMD
// Always return false if FEATURE_SIMD is not enabled
template <class T>
-inline bool varTypeIsSIMD(T vt)
+inline bool varTypeIsSIMD(T vt)
{
return false;
-}
+}
#endif // !FEATURE_SIMD
template <class T>
-inline bool varTypeIsIntegral(T vt)
+inline bool varTypeIsIntegral(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_INT )) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_INT)) != 0);
}
template <class T>
-inline bool varTypeIsIntegralOrI(T vt)
+inline bool varTypeIsIntegralOrI(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_INT|VTF_I )) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_INT | VTF_I)) != 0);
}
template <class T>
-inline bool varTypeIsUnsigned (T vt)
+inline bool varTypeIsUnsigned(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_UNS )) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_UNS)) != 0);
}
// If "vt" is an unsigned integral type, returns the corresponding signed integral type, otherwise
// return "vt".
-inline var_types varTypeUnsignedToSigned(var_types vt)
+inline var_types varTypeUnsignedToSigned(var_types vt)
{
if (varTypeIsUnsigned(vt))
{
switch (vt)
{
- case TYP_BOOL:
- case TYP_UBYTE: return TYP_BYTE;
- case TYP_USHORT:
- case TYP_CHAR: return TYP_SHORT;
- case TYP_UINT: return TYP_INT;
- case TYP_ULONG: return TYP_LONG;
- default:
- unreached();
+ case TYP_BOOL:
+ case TYP_UBYTE:
+ return TYP_BYTE;
+ case TYP_USHORT:
+ case TYP_CHAR:
+ return TYP_SHORT;
+ case TYP_UINT:
+ return TYP_INT;
+ case TYP_ULONG:
+ return TYP_LONG;
+ default:
+ unreached();
}
}
else
@@ -139,107 +148,107 @@ inline var_types varTypeUnsignedToSigned(var_types vt)
}
template <class T>
-inline bool varTypeIsFloating (T vt)
+inline bool varTypeIsFloating(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_FLT )) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_FLT)) != 0);
}
template <class T>
-inline bool varTypeIsArithmetic(T vt)
+inline bool varTypeIsArithmetic(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_INT|VTF_FLT)) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_INT | VTF_FLT)) != 0);
}
template <class T>
-inline unsigned varTypeGCtype (T vt)
+inline unsigned varTypeGCtype(T vt)
{
- return (unsigned)(varTypeClassification[TypeGet(vt)] & (VTF_GCR|VTF_BYR));
+ return (unsigned)(varTypeClassification[TypeGet(vt)] & (VTF_GCR | VTF_BYR));
}
template <class T>
-inline bool varTypeIsGC (T vt)
+inline bool varTypeIsGC(T vt)
{
- return (varTypeGCtype(vt) != 0);
+ return (varTypeGCtype(vt) != 0);
}
template <class T>
-inline bool varTypeIsI (T vt)
+inline bool varTypeIsI(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & VTF_I) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & VTF_I) != 0);
}
template <class T>
-inline bool varTypeCanReg (T vt)
+inline bool varTypeCanReg(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_INT|VTF_I|VTF_FLT)) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & (VTF_INT | VTF_I | VTF_FLT)) != 0);
}
template <class T>
-inline bool varTypeIsByte (T vt)
+inline bool varTypeIsByte(T vt)
{
- return (TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_UBYTE);
+ return (TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_UBYTE);
}
template <class T>
-inline bool varTypeIsShort (T vt)
+inline bool varTypeIsShort(T vt)
{
- return (TypeGet(vt) >= TYP_CHAR) && (TypeGet(vt) <= TYP_USHORT);
+ return (TypeGet(vt) >= TYP_CHAR) && (TypeGet(vt) <= TYP_USHORT);
}
template <class T>
-inline bool varTypeIsSmall (T vt)
+inline bool varTypeIsSmall(T vt)
{
- return (TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_USHORT);
+ return (TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_USHORT);
}
template <class T>
-inline bool varTypeIsSmallInt (T vt)
+inline bool varTypeIsSmallInt(T vt)
{
- return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT);
+ return (TypeGet(vt) >= TYP_BYTE) && (TypeGet(vt) <= TYP_USHORT);
}
template <class T>
-inline bool varTypeIsIntOrI (T vt)
+inline bool varTypeIsIntOrI(T vt)
{
- return ((TypeGet(vt) == TYP_INT)
+ return ((TypeGet(vt) == TYP_INT)
#ifdef _TARGET_64BIT_
- || (TypeGet(vt) == TYP_I_IMPL)
+ || (TypeGet(vt) == TYP_I_IMPL)
#endif // _TARGET_64BIT_
- );
+ );
}
template <class T>
-inline bool genActualTypeIsIntOrI (T vt)
+inline bool genActualTypeIsIntOrI(T vt)
{
- return ((TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_U_IMPL));
+ return ((TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_U_IMPL));
}
template <class T>
-inline bool varTypeIsLong (T vt)
+inline bool varTypeIsLong(T vt)
{
- return (TypeGet(vt) >= TYP_LONG) && (TypeGet(vt) <= TYP_ULONG);
+ return (TypeGet(vt) >= TYP_LONG) && (TypeGet(vt) <= TYP_ULONG);
}
template <class T>
-inline bool varTypeIsMultiReg (T vt)
+inline bool varTypeIsMultiReg(T vt)
{
#ifdef _TARGET_64BIT_
return false;
#else
- return (TypeGet(vt) == TYP_LONG);
+ return (TypeGet(vt) == TYP_LONG);
#endif
}
template <class T>
-inline bool varTypeIsSingleReg (T vt)
+inline bool varTypeIsSingleReg(T vt)
{
return !varTypeIsMultiReg(vt);
}
template <class T>
-inline bool varTypeIsComposite(T vt)
+inline bool varTypeIsComposite(T vt)
{
- return (!varTypeIsArithmetic(TypeGet(vt)) && TypeGet(vt) != TYP_VOID);
+ return (!varTypeIsArithmetic(TypeGet(vt)) && TypeGet(vt) != TYP_VOID);
}
// Is this type promotable?
@@ -250,26 +259,25 @@ inline bool varTypeIsComposite(T vt)
// handled as if they are structs with two integer fields.
template <class T>
-inline bool varTypeIsPromotable(T vt)
+inline bool varTypeIsPromotable(T vt)
{
- return (varTypeIsStruct(vt)
- || (TypeGet(vt) == TYP_BLK)
+ return (varTypeIsStruct(vt) || (TypeGet(vt) == TYP_BLK)
#if !defined(_TARGET_64BIT_)
|| varTypeIsLong(vt)
#endif // !defined(_TARGET_64BIT_)
- );
+ );
}
template <class T>
-inline bool varTypeIsStruct(T vt)
+inline bool varTypeIsStruct(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & VTF_S) != 0);
+ return ((varTypeClassification[TypeGet(vt)] & VTF_S) != 0);
}
template <class T>
-inline bool varTypeIsEnregisterableStruct(T vt)
+inline bool varTypeIsEnregisterableStruct(T vt)
{
- return (TypeGet(vt) != TYP_STRUCT);
+ return (TypeGet(vt) != TYP_STRUCT);
}
/*****************************************************************************/
diff --git a/src/jit/x86_instrs.h b/src/jit/x86_instrs.h
index 49199a5eea..1c3489d3b4 100644
--- a/src/jit/x86_instrs.h
+++ b/src/jit/x86_instrs.h
@@ -2,10 +2,9 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-
//
-// This is a temporary file which defined the x86 instructions that
+// This is a temporary file which defined the x86 instructions that
// are currently still referenced when building the Arm Jit compiler
//
- INS_lea,
+INS_lea,